1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/zalloc.c
60 * Author: Avadis Tevanian, Jr.
61 *
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
64 */
65
66 #define ZALLOC_ALLOW_DEPRECATED 1
67 #if !ZALLOC_TEST
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <machine/machine_routines.h>
75 #include <mach/vm_map.h>
76 #include <mach/sdt.h>
77 #if __x86_64__
78 #include <i386/cpuid.h>
79 #endif
80
81 #include <kern/bits.h>
82 #include <kern/btlog.h>
83 #include <kern/startup.h>
84 #include <kern/kern_types.h>
85 #include <kern/assert.h>
86 #include <kern/backtrace.h>
87 #include <kern/host.h>
88 #include <kern/macro_help.h>
89 #include <kern/sched.h>
90 #include <kern/locks.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/thread_call.h>
94 #include <kern/zalloc_internal.h>
95 #include <kern/kalloc.h>
96 #include <kern/debug.h>
97
98 #include <prng/random.h>
99
100 #include <vm/pmap.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_kern.h>
103 #include <vm/vm_page.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_compressor.h> /* C_SLOT_PACKED_PTR* */
106
107 #include <pexpert/pexpert.h>
108
109 #include <machine/machparam.h>
110 #include <machine/machine_routines.h> /* ml_cpu_get_info */
111
112 #include <os/atomic.h>
113
114 #include <libkern/OSDebug.h>
115 #include <libkern/OSAtomic.h>
116 #include <libkern/section_keywords.h>
117 #include <sys/kdebug.h>
118 #include <sys/code_signing.h>
119
120 #include <san/kasan.h>
121 #include <libsa/stdlib.h>
122 #include <sys/errno.h>
123
124 #include <IOKit/IOBSD.h>
125 #include <arm64/amcc_rorgn.h>
126
127 #if DEBUG
128 #define z_debug_assert(expr) assert(expr)
129 #else
130 #define z_debug_assert(expr) (void)(expr)
131 #endif
132
133 /* Returns pid of the task with the largest number of VM map entries. */
134 extern pid_t find_largest_process_vm_map_entries(void);
135
136 /*
137 * Callout to jetsam. If pid is -1, we wake up the memorystatus thread to do asynchronous kills.
138 * For any other pid we try to kill that process synchronously.
139 */
140 extern boolean_t memorystatus_kill_on_zone_map_exhaustion(pid_t pid);
141
142 extern zone_t vm_object_zone;
143 extern zone_t ipc_service_port_label_zone;
144
145 ZONE_DEFINE_TYPE(percpu_u64_zone, "percpu.64", uint64_t,
146 ZC_PERCPU | ZC_ALIGNMENT_REQUIRED | ZC_KASAN_NOREDZONE);
147
148 #if KASAN_TBI
149 #define ZONE_MIN_ELEM_SIZE (sizeof(uint64_t) * 2)
150 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
151 #else /* KASAN_TBI */
152 #define ZONE_MIN_ELEM_SIZE sizeof(uint64_t)
153 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
154 #endif /* KASAN_TBI */
155
156 #define ZONE_MAX_ALLOC_SIZE (32 * 1024)
157 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
158 #define ZONE_CHUNK_ALLOC_SIZE (256 * 1024)
159 #define ZONE_GUARD_DENSE (32 * 1024)
160 #define ZONE_GUARD_SPARSE (64 * 1024)
161 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
162
163 #if XNU_PLATFORM_MacOSX
164 #define ZONE_MAP_MAX (32ULL << 30)
165 #define ZONE_MAP_VA_SIZE (128ULL << 30)
166 #else /* XNU_PLATFORM_MacOSX */
167 #define ZONE_MAP_MAX (8ULL << 30)
168 #define ZONE_MAP_VA_SIZE (24ULL << 30)
169 #endif /* !XNU_PLATFORM_MacOSX */
170
171 __enum_closed_decl(zm_len_t, uint16_t, {
172 ZM_CHUNK_FREE = 0x0,
173 /* 1 through 8 are valid lengths */
174 ZM_CHUNK_LEN_MAX = 0x8,
175
176 /* PGZ magical values */
177 ZM_PGZ_FREE = 0x0,
178 ZM_PGZ_ALLOCATED = 0xa, /* [a]llocated */
179 ZM_PGZ_GUARD = 0xb, /* oo[b] */
180 ZM_PGZ_DOUBLE_FREE = 0xd, /* [d]ouble_free */
181
182 /* secondary page markers */
183 ZM_SECONDARY_PAGE = 0xe,
184 ZM_SECONDARY_PCPU_PAGE = 0xf,
185 });
186
187 static_assert(MAX_ZONES < (1u << 10), "MAX_ZONES must fit in zm_index");
188
189 struct zone_page_metadata {
190 union {
191 struct {
192 /* The index of the zone this metadata page belongs to */
193 zone_id_t zm_index : 10;
194
195 /*
196 * This chunk ends with a guard page.
197 */
198 uint16_t zm_guarded : 1;
199
200 /*
201 * Whether `zm_bitmap` is an inline bitmap
202 * or a packed bitmap reference
203 */
204 uint16_t zm_inline_bitmap : 1;
205
206 /*
207 * Zones allocate in "chunks" of zone_t::z_chunk_pages
208 * consecutive pages, or zpercpu_count() pages if the
209 * zone is percpu.
210 *
211 * The first page of it has its metadata set with:
212 * - 0 if none of the pages are currently wired
213 * - the number of wired pages in the chunk
214 * (not scaled for percpu).
215 *
216 * Other pages in the chunk have their zm_chunk_len set
217 * to ZM_SECONDARY_PAGE or ZM_SECONDARY_PCPU_PAGE
218 * depending on whether the zone is percpu or not.
219 * For those, zm_page_index holds the index of that page
220 * in the run, and zm_subchunk_len the remaining length
221 * within the chunk.
222 *
223 * Metadata used for PGZ pages can have 3 values:
224 * - ZM_PGZ_FREE: slot is free
225 * - ZM_PGZ_ALLOCATED: slot holds an allocated element
226 * at offset (zm_pgz_orig_addr & PAGE_MASK)
227 * - ZM_PGZ_DOUBLE_FREE: slot detected a double free
228 * (will panic).
229 */
230 zm_len_t zm_chunk_len : 4;
231 };
232 uint16_t zm_bits;
233 };
234
235 union {
236 #define ZM_ALLOC_SIZE_LOCK 1u
237 uint16_t zm_alloc_size; /* first page only */
238 struct {
239 uint8_t zm_page_index; /* secondary pages only */
240 uint8_t zm_subchunk_len; /* secondary pages only */
241 };
242 uint16_t zm_oob_offs; /* in guard pages */
243 };
244 union {
245 uint32_t zm_bitmap; /* most zones */
246 uint32_t zm_bump; /* permanent zones */
247 };
248
249 union {
250 struct {
251 zone_pva_t zm_page_next;
252 zone_pva_t zm_page_prev;
253 };
254 vm_offset_t zm_pgz_orig_addr;
255 struct zone_page_metadata *zm_pgz_slot_next;
256 };
257 };
258 static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing");
259
260 /*!
261 * @typedef zone_magazine_t
262 *
263 * @brief
264 * Magazine of cached allocations.
265 *
266 * @field zm_next linkage used by magazine depots.
267 * @field zm_elems an array of @c zc_mag_size() elements.
268 */
269 struct zone_magazine {
270 zone_magazine_t zm_next;
271 smr_seq_t zm_seq;
272 vm_offset_t zm_elems[0];
273 };
274
275 /*!
276 * @typedef zone_cache_t
277 *
278 * @brief
279 * Magazine of cached allocations.
280 *
281 * @discussion
282 * Below is a diagram of the caching system. This design is inspired by the
283 * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
284 * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams and the FreeBSD UMA
285 * zone allocator (itself derived from this seminal work).
286 *
287 * It is divided into 3 layers:
288 * - the per-cpu layer,
289 * - the recirculation depot layer,
290 * - the Zone Allocator.
291 *
292 * The per-cpu and recirculation depot layer use magazines (@c zone_magazine_t),
293 * which are stacks of up to @c zc_mag_size() elements.
294 *
295 * <h2>CPU layer</h2>
296 *
297 * The CPU layer (@c zone_cache_t) looks like this:
298 *
299 * ╭─ a ─ f ─┬───────── zm_depot ──────────╮
300 * │ ╭─╮ ╭─╮ │ ╭─╮ ╭─╮ ╭─╮ ╭─╮ ╭─╮ │
301 * │ │#│ │#│ │ │#│ │#│ │#│ │#│ │#│ │
302 * │ │#│ │ │ │ │#│ │#│ │#│ │#│ │#│ │
303 * │ │ │ │ │ │ │#│ │#│ │#│ │#│ │#│ │
304 * │ ╰─╯ ╰─╯ │ ╰─╯ ╰─╯ ╰─╯ ╰─╯ ╰─╯ │
305 * ╰─────────┴─────────────────────────────╯
306 *
307 * It has two pre-loaded magazines (a)lloc and (f)ree which we allocate from,
308 * or free to. Serialization is achieved through disabling preemption, and only
309 * the current CPU can acces those allocations. This is represented on the left
310 * hand side of the diagram above.
311 *
312 * The right hand side is the per-cpu depot. It consists of @c zm_depot_count
313 * full magazines, and is protected by the @c zm_depot_lock for access.
314 * The lock is expected to absolutely never be contended, as only the local CPU
315 * tends to access the local per-cpu depot in regular operation mode.
316 *
317 * However unlike UMA, our implementation allows for the zone GC to reclaim
318 * per-CPU magazines aggresively, which is serialized with the @c zm_depot_lock.
319 *
320 *
321 * <h2>Recirculation Depot</h2>
322 *
323 * The recirculation depot layer is a list similar to the per-cpu depot,
324 * however it is different in two fundamental ways:
325 *
326 * - it is protected by the regular zone lock,
327 * - elements referenced by the magazines in that layer appear free
328 * to the zone layer.
329 *
330 *
331 * <h2>Magazine circulation and sizing</h2>
332 *
333 * The caching system sizes itself dynamically. Operations that allocate/free
334 * a single element call @c zone_lock_nopreempt_check_contention() which records
335 * contention on the lock by doing a trylock and recording its success.
336 *
337 * This information is stored in the @c z_recirc_cont_cur field of the zone,
338 * and a windowed moving average is maintained in @c z_contention_wma.
339 * The periodically run function @c compute_zone_working_set_size() will then
340 * take this into account to decide to grow the number of buckets allowed
341 * in the depot or shrink it based on the @c zc_grow_level and @c zc_shrink_level
342 * thresholds.
343 *
344 * The per-cpu layer will attempt to work with its depot, finding both full and
345 * empty magazines cached there. If it can't get what it needs, then it will
346 * mediate with the zone recirculation layer. Such recirculation is done in
347 * batches in order to amortize lock holds.
348 * (See @c {zalloc,zfree}_cached_depot_recirculate()).
349 *
350 * The recirculation layer keeps a track of what the minimum amount of magazines
351 * it had over time was for each of the full and empty queues. This allows for
352 * @c compute_zone_working_set_size() to return memory to the system when a zone
353 * stops being used as much.
354 *
355 * <h2>Security considerations</h2>
356 *
357 * The zone caching layer has been designed to avoid returning elements in
358 * a strict LIFO behavior: @c zalloc() will allocate from the (a) magazine,
359 * and @c zfree() free to the (f) magazine, and only swap them when the
360 * requested operation cannot be fulfilled.
361 *
362 * The per-cpu overflow depot or the recirculation depots are similarly used
363 * in FIFO order.
364 *
365 * @field zc_depot_lock a lock to access @c zc_depot, @c zc_depot_cur.
366 * @field zc_alloc_cur denormalized number of elements in the (a) magazine
367 * @field zc_free_cur denormalized number of elements in the (f) magazine
368 * @field zc_alloc_elems a pointer to the array of elements in (a)
369 * @field zc_free_elems a pointer to the array of elements in (f)
370 *
371 * @field zc_depot a list of @c zc_depot_cur full magazines
372 */
373 typedef struct zone_cache {
374 hw_lck_ticket_t zc_depot_lock;
375 uint16_t zc_alloc_cur;
376 uint16_t zc_free_cur;
377 vm_offset_t *zc_alloc_elems;
378 vm_offset_t *zc_free_elems;
379 struct zone_depot zc_depot;
380 smr_t zc_smr;
381 zone_smr_free_cb_t XNU_PTRAUTH_SIGNED_FUNCTION_PTR("zc_free") zc_free;
382 } __attribute__((aligned(64))) * zone_cache_t;
383
384 #if !__x86_64__
385 static
386 #endif
387 __security_const_late struct {
388 struct mach_vm_range zi_map_range; /* all zone submaps */
389 struct mach_vm_range zi_ro_range; /* read-only range */
390 struct mach_vm_range zi_meta_range; /* debugging only */
391 struct mach_vm_range zi_bits_range; /* bits buddy allocator */
392 struct mach_vm_range zi_xtra_range; /* vm tracking metadata */
393 struct mach_vm_range zi_pgz_range;
394 struct zone_page_metadata *zi_pgz_meta;
395
396 /*
397 * The metadata lives within the zi_meta_range address range.
398 *
399 * The correct formula to find a metadata index is:
400 * absolute_page_index - page_index(zi_map_range.min_address)
401 *
402 * And then this index is used to dereference zi_meta_range.min_address
403 * as a `struct zone_page_metadata` array.
404 *
405 * To avoid doing that substraction all the time in the various fast-paths,
406 * zi_meta_base are pre-offset with that minimum page index to avoid redoing
407 * that math all the time.
408 */
409 struct zone_page_metadata *zi_meta_base;
410 } zone_info;
411
412 __startup_data static struct mach_vm_range zone_map_range;
413 __startup_data static vm_map_size_t zone_meta_size;
414 __startup_data static vm_map_size_t zone_bits_size;
415 __startup_data static vm_map_size_t zone_xtra_size;
416
417 /*
418 * Initial array of metadata for stolen memory.
419 *
420 * The numbers here have to be kept in sync with vm_map_steal_memory()
421 * so that we have reserved enough metadata.
422 *
423 * After zone_init() has run (which happens while the kernel is still single
424 * threaded), the metadata is moved to its final dynamic location, and
425 * this array is unmapped with the rest of __startup_data at lockdown.
426 */
427 #define ZONE_EARLY_META_INLINE_COUNT 64
428 __startup_data
429 static struct zone_page_metadata
430 zone_early_meta_array_startup[ZONE_EARLY_META_INLINE_COUNT];
431
432 #if __x86_64__
433 /*
434 * On Intel we can't "free" pmap stolen pages,
435 * so instead we use a static array in __KLDDATA
436 * which gets reclaimed at lockdown time.
437 */
438 __startup_data __attribute__((aligned(PAGE_SIZE)))
439 static uint8_t zone_early_pages_to_cram[PAGE_SIZE * 16];
440 #endif
441
442 /*
443 * The zone_locks_grp allows for collecting lock statistics.
444 * All locks are associated to this group in zinit.
445 * Look at tools/lockstat for debugging lock contention.
446 */
447 LCK_GRP_DECLARE(zone_locks_grp, "zone_locks");
448 static LCK_MTX_DECLARE(zone_metadata_region_lck, &zone_locks_grp);
449
450 /*
451 * The zone metadata lock protects:
452 * - metadata faulting,
453 * - VM submap VA allocations,
454 * - early gap page queue list
455 */
456 #define zone_meta_lock() lck_mtx_lock(&zone_metadata_region_lck);
457 #define zone_meta_unlock() lck_mtx_unlock(&zone_metadata_region_lck);
458
459 /*
460 * Exclude more than one concurrent garbage collection
461 */
462 static LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc");
463 static LCK_MTX_DECLARE(zone_gc_lock, &zone_gc_lck_grp);
464 static LCK_SPIN_DECLARE(zone_exhausted_lock, &zone_gc_lck_grp);
465
466 /*
467 * Panic logging metadata
468 */
469 bool panic_include_zprint = false;
470 bool panic_include_kalloc_types = false;
471 zone_t kalloc_type_src_zone = ZONE_NULL;
472 zone_t kalloc_type_dst_zone = ZONE_NULL;
473 mach_memory_info_t *panic_kext_memory_info = NULL;
474 vm_size_t panic_kext_memory_size = 0;
475 vm_offset_t panic_fault_address = 0;
476
477 /*
478 * Protects zone_array, num_zones, num_zones_in_use, and
479 * zone_destroyed_bitmap
480 */
481 static SIMPLE_LOCK_DECLARE(all_zones_lock, 0);
482 static zone_id_t num_zones_in_use;
483 zone_id_t _Atomic num_zones;
484 SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count;
485
486 /*
487 * Initial globals for zone stats until we can allocate the real ones.
488 * Those get migrated inside the per-CPU ones during zone_init() and
489 * this array is unmapped with the rest of __startup_data at lockdown.
490 */
491
492 /* zone to allocate zone_magazine structs from */
493 static SECURITY_READ_ONLY_LATE(zone_t) zc_magazine_zone;
494 /*
495 * Until pid1 is made, zone caching is off,
496 * until compute_zone_working_set_size() runs for the firt time.
497 *
498 * -1 represents the "never enabled yet" value.
499 */
500 static int8_t zone_caching_disabled = -1;
501
502 __startup_data
503 static struct zone_stats zone_stats_startup[MAX_ZONES];
504 struct zone zone_array[MAX_ZONES];
505 SECURITY_READ_ONLY_LATE(zone_security_flags_t) zone_security_array[MAX_ZONES] = {
506 [0 ... MAX_ZONES - 1] = {
507 .z_kheap_id = KHEAP_ID_NONE,
508 .z_noencrypt = false,
509 .z_submap_idx = Z_SUBMAP_IDX_GENERAL_0,
510 .z_kalloc_type = false,
511 .z_sig_eq = 0
512 },
513 };
514 SECURITY_READ_ONLY_LATE(struct zone_size_params) zone_ro_size_params[ZONE_ID__LAST_RO + 1];
515 SECURITY_READ_ONLY_LATE(zone_cache_ops_t) zcache_ops[ZONE_ID__FIRST_DYNAMIC];
516
517 /* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */
518 static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count;
519
520 /* Used to keep track of destroyed slots in the zone_array */
521 static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)];
522
523 /* number of zone mapped pages used by all zones */
524 static size_t _Atomic zone_pages_jetsam_threshold = ~0;
525 size_t zone_pages_wired;
526 size_t zone_guard_pages;
527
528 /* Time in (ms) after which we panic for zone exhaustions */
529 TUNABLE(int, zone_exhausted_timeout, "zet", 5000);
530 static bool zone_share_always = true;
531 static TUNABLE_WRITEABLE(uint32_t, zone_early_thres_mul, "zone_early_thres_mul", 5);
532
533 #if VM_TAG_SIZECLASSES
534 /*
535 * Zone tagging allows for per "tag" accounting of allocations for the kalloc
536 * zones only.
537 *
538 * There are 3 kinds of tags that can be used:
539 * - pre-registered VM_KERN_MEMORY_*
540 * - dynamic tags allocated per call sites in core-kernel (using vm_tag_alloc())
541 * - per-kext tags computed by IOKit (using the magic Z_VM_TAG_BT_BIT marker).
542 *
543 * The VM tracks the statistics in lazily allocated structures.
544 * See vm_tag_will_update_zone(), vm_tag_update_zone_size().
545 *
546 * If for some reason the requested tag cannot be accounted for,
547 * the tag is forced to VM_KERN_MEMORY_KALLOC which is pre-allocated.
548 *
549 * Each allocated element also remembers the tag it was assigned,
550 * which lets zalloc/zfree update statistics correctly.
551 */
552
553 /* enable tags for zones that ask for it */
554 static TUNABLE(bool, zone_tagging_on, "-zt", false);
555
556 /*
557 * Array of all sizeclasses used by kalloc variants so that we can
558 * have accounting per size class for each kalloc callsite
559 */
560 static uint16_t zone_tags_sizeclasses[VM_TAG_SIZECLASSES];
561 #endif /* VM_TAG_SIZECLASSES */
562
563 #if DEBUG || DEVELOPMENT
564 static int zalloc_simulate_vm_pressure;
565 #endif /* DEBUG || DEVELOPMENT */
566
567 #define Z_TUNABLE(t, n, d) \
568 TUNABLE(t, _##n, #n, d); \
569 __pure2 static inline t n(void) { return _##n; }
570
571 /*
572 * Zone caching tunables
573 *
574 * zc_mag_size():
575 * size of magazines, larger to reduce contention at the expense of memory
576 *
577 * zc_enable_level
578 * number of contentions per second after which zone caching engages
579 * automatically.
580 *
581 * 0 to disable.
582 *
583 * zc_grow_level
584 * number of contentions per second x cpu after which the number of magazines
585 * allowed in the depot can grow. (in "Z_WMA_UNIT" units).
586 *
587 * zc_shrink_level
588 * number of contentions per second x cpu below which the number of magazines
589 * allowed in the depot will shrink. (in "Z_WMA_UNIT" units).
590 *
591 * zc_pcpu_max
592 * maximum memory size in bytes that can hang from a CPU,
593 * which will affect how many magazines are allowed in the depot.
594 *
595 * The alloc/free magazines are assumed to be on average half-empty
596 * and to count for "1" unit of magazines.
597 *
598 * zc_autotrim_size
599 * Size allowed to hang extra from the recirculation depot before
600 * auto-trim kicks in.
601 *
602 * zc_autotrim_buckets
603 *
604 * How many buckets in excess of the working-set are allowed
605 * before auto-trim kicks in for empty buckets.
606 *
607 * zc_free_batch_size
608 * The size of batches of frees/reclaim that can be done keeping
609 * the zone lock held (and preemption disabled).
610 */
611 Z_TUNABLE(uint16_t, zc_mag_size, 8);
612 static Z_TUNABLE(uint32_t, zc_enable_level, 10);
613 static Z_TUNABLE(uint32_t, zc_grow_level, 5 * Z_WMA_UNIT);
614 static Z_TUNABLE(uint32_t, zc_shrink_level, Z_WMA_UNIT / 2);
615 static Z_TUNABLE(uint32_t, zc_pcpu_max, 128 << 10);
616 static Z_TUNABLE(uint32_t, zc_autotrim_size, 16 << 10);
617 static Z_TUNABLE(uint32_t, zc_autotrim_buckets, 8);
618 static Z_TUNABLE(uint32_t, zc_free_batch_size, 256);
619
620 static SECURITY_READ_ONLY_LATE(size_t) zone_pages_wired_max;
621 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_submaps[Z_SUBMAP_IDX_COUNT];
622 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_meta_map;
623 static char const * const zone_submaps_names[Z_SUBMAP_IDX_COUNT] = {
624 [Z_SUBMAP_IDX_VM] = "VM",
625 [Z_SUBMAP_IDX_READ_ONLY] = "RO",
626 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
627 [Z_SUBMAP_IDX_GENERAL_0] = "GEN0",
628 [Z_SUBMAP_IDX_GENERAL_1] = "GEN1",
629 [Z_SUBMAP_IDX_GENERAL_2] = "GEN2",
630 [Z_SUBMAP_IDX_GENERAL_3] = "GEN3",
631 #else
632 [Z_SUBMAP_IDX_GENERAL_0] = "GEN",
633 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
634 [Z_SUBMAP_IDX_DATA] = "DATA",
635 };
636
637 #if __x86_64__
638 #define ZONE_ENTROPY_CNT 8
639 #else
640 #define ZONE_ENTROPY_CNT 2
641 #endif
642 static struct zone_bool_gen {
643 struct bool_gen zbg_bg;
644 uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
645 } zone_bool_gen[MAX_CPUS];
646
647 #if CONFIG_PROB_GZALLOC
648 /*
649 * Probabilistic gzalloc
650 * =====================
651 *
652 *
653 * Probabilistic guard zalloc samples allocations and will protect them by
654 * double-mapping the page holding them and returning the secondary virtual
655 * address to its callers.
656 *
657 * Its data structures are lazily allocated if the `pgz` or `pgz1` boot-args
658 * are set.
659 *
660 *
661 * Unlike GZalloc, PGZ uses a fixed amount of memory, and is compatible with
662 * most zalloc/kalloc features:
663 * - zone_require is functional
664 * - zone caching or zone tagging is compatible
665 * - non-blocking allocation work (they will always return NULL with gzalloc).
666 *
667 * PGZ limitations:
668 * - VA sequestering isn't respected, as the slots (which are in limited
669 * quantity) will be reused for any type, however the PGZ quarantine
670 * somewhat mitigates the impact.
671 * - zones with elements larger than a page cannot be protected.
672 *
673 *
674 * Tunables:
675 * --------
676 *
677 * pgz=1:
678 * Turn on probabilistic guard malloc for all zones
679 *
680 * (default on for DEVELOPMENT, off for RELEASE, or if pgz1... are specified)
681 *
682 * pgz_sample_rate=0 to 2^31
683 * average sample rate between two guarded allocations.
684 * 0 means every allocation.
685 *
686 * The default is a random number between 1000 and 10,000
687 *
688 * pgz_slots
689 * how many allocations to protect.
690 *
691 * Each costs:
692 * - a PTE in the pmap (when allocated)
693 * - 2 zone page meta's (every other page is a "guard" one, 32B total)
694 * - 64 bytes per backtraces.
695 * On LP64 this is <16K per 100 slots.
696 *
697 * The default is ~200 slots per G of physical ram (32k / G)
698 *
699 * TODO:
700 * - try harder to allocate elements at the "end" to catch OOB more reliably.
701 *
702 * pgz_quarantine
703 * how many slots should be free at any given time.
704 *
705 * PGZ will round robin through free slots to be reused, but free slots are
706 * important to detect use-after-free by acting as a quarantine.
707 *
708 * By default, PGZ will keep 33% of the slots around at all time.
709 *
710 * pgz1=<name>, pgz2=<name>, ..., pgzn=<name>...
711 * Specific zones for which to enable probabilistic guard malloc.
712 * There must be no numbering gap (names after the gap will be ignored).
713 */
714 #if DEBUG || DEVELOPMENT
715 static TUNABLE(bool, pgz_all, "pgz", true);
716 #else
717 static TUNABLE(bool, pgz_all, "pgz", false);
718 #endif
719 static TUNABLE(uint32_t, pgz_sample_rate, "pgz_sample_rate", 0);
720 static TUNABLE(uint32_t, pgz_slots, "pgz_slots", UINT32_MAX);
721 static TUNABLE(uint32_t, pgz_quarantine, "pgz_quarantine", 0);
722 #endif /* CONFIG_PROB_GZALLOC */
723
724 static zone_t zone_find_largest(uint64_t *zone_size);
725
726 #endif /* !ZALLOC_TEST */
727 #pragma mark Zone metadata
728 #if !ZALLOC_TEST
729
730 static inline bool
zone_has_index(zone_t z,zone_id_t zid)731 zone_has_index(zone_t z, zone_id_t zid)
732 {
733 return zone_array + zid == z;
734 }
735
736 __abortlike
737 void
zone_invalid_panic(zone_t zone)738 zone_invalid_panic(zone_t zone)
739 {
740 panic("zone %p isn't in the zone_array", zone);
741 }
742
743 __abortlike
744 static void
zone_metadata_corruption(zone_t zone,struct zone_page_metadata * meta,const char * kind)745 zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta,
746 const char *kind)
747 {
748 panic("zone metadata corruption: %s (meta %p, zone %s%s)",
749 kind, meta, zone_heap_name(zone), zone->z_name);
750 }
751
752 __abortlike
753 static void
zone_invalid_element_addr_panic(zone_t zone,vm_offset_t addr)754 zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr)
755 {
756 panic("zone element pointer validation failed (addr: %p, zone %s%s)",
757 (void *)addr, zone_heap_name(zone), zone->z_name);
758 }
759
760 __abortlike
761 static void
zone_page_metadata_index_confusion_panic(zone_t zone,vm_offset_t addr,struct zone_page_metadata * meta)762 zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr,
763 struct zone_page_metadata *meta)
764 {
765 zone_security_flags_t zsflags = zone_security_config(zone), src_zsflags;
766 zone_id_t zidx;
767 zone_t src_zone;
768
769 if (zsflags.z_kalloc_type) {
770 panic_include_kalloc_types = true;
771 kalloc_type_dst_zone = zone;
772 }
773
774 zidx = meta->zm_index;
775 if (zidx >= os_atomic_load(&num_zones, relaxed)) {
776 panic("%p expected in zone %s%s[%d], but metadata has invalid zidx: %d",
777 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
778 zidx);
779 }
780
781 src_zone = &zone_array[zidx];
782 src_zsflags = zone_security_array[zidx];
783 if (src_zsflags.z_kalloc_type) {
784 panic_include_kalloc_types = true;
785 kalloc_type_src_zone = src_zone;
786 }
787
788 panic("%p not in the expected zone %s%s[%d], but found in %s%s[%d]",
789 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
790 zone_heap_name(src_zone), src_zone->z_name, zidx);
791 }
792
793 __abortlike
794 static void
zone_page_metadata_list_corruption(zone_t zone,struct zone_page_metadata * meta)795 zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta)
796 {
797 panic("metadata list corruption through element %p detected in zone %s%s",
798 meta, zone_heap_name(zone), zone->z_name);
799 }
800
801 __abortlike
802 static void
zone_page_meta_accounting_panic(zone_t zone,struct zone_page_metadata * meta,const char * kind)803 zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta,
804 const char *kind)
805 {
806 panic("accounting mismatch (%s) for zone %s%s, meta %p", kind,
807 zone_heap_name(zone), zone->z_name, meta);
808 }
809
810 __abortlike
811 static void
zone_meta_double_free_panic(zone_t zone,vm_offset_t addr,const char * caller)812 zone_meta_double_free_panic(zone_t zone, vm_offset_t addr, const char *caller)
813 {
814 panic("%s: double free of %p to zone %s%s", caller,
815 (void *)addr, zone_heap_name(zone), zone->z_name);
816 }
817
818 __abortlike
819 static void
zone_accounting_panic(zone_t zone,const char * kind)820 zone_accounting_panic(zone_t zone, const char *kind)
821 {
822 panic("accounting mismatch (%s) for zone %s%s", kind,
823 zone_heap_name(zone), zone->z_name);
824 }
825
826 #define zone_counter_sub(z, stat, value) ({ \
827 if (os_sub_overflow((z)->stat, value, &(z)->stat)) { \
828 zone_accounting_panic(z, #stat " wrap-around"); \
829 } \
830 (z)->stat; \
831 })
832
833 static inline uint16_t
zone_meta_alloc_size_add(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)834 zone_meta_alloc_size_add(zone_t z, struct zone_page_metadata *m,
835 vm_offset_t esize)
836 {
837 if (os_add_overflow(m->zm_alloc_size, (uint16_t)esize, &m->zm_alloc_size)) {
838 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
839 }
840 return m->zm_alloc_size;
841 }
842
843 static inline uint16_t
zone_meta_alloc_size_sub(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)844 zone_meta_alloc_size_sub(zone_t z, struct zone_page_metadata *m,
845 vm_offset_t esize)
846 {
847 if (os_sub_overflow(m->zm_alloc_size, esize, &m->zm_alloc_size)) {
848 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
849 }
850 return m->zm_alloc_size;
851 }
852
853 __abortlike
854 static void
zone_nofail_panic(zone_t zone)855 zone_nofail_panic(zone_t zone)
856 {
857 panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)",
858 zone_heap_name(zone), zone->z_name);
859 }
860
861 __header_always_inline bool
zone_spans_ro_va(vm_offset_t addr_start,vm_offset_t addr_end)862 zone_spans_ro_va(vm_offset_t addr_start, vm_offset_t addr_end)
863 {
864 const struct mach_vm_range *ro_r = &zone_info.zi_ro_range;
865 struct mach_vm_range r = { addr_start, addr_end };
866
867 return mach_vm_range_intersects(ro_r, &r);
868 }
869
870 #define from_range(r, addr, size) \
871 __builtin_choose_expr(__builtin_constant_p(size) ? (size) == 1 : 0, \
872 mach_vm_range_contains(r, (mach_vm_offset_t)(addr)), \
873 mach_vm_range_contains(r, (mach_vm_offset_t)(addr), size))
874
875 #define from_ro_map(addr, size) \
876 from_range(&zone_info.zi_ro_range, addr, size)
877
878 #define from_zone_map(addr, size) \
879 from_range(&zone_info.zi_map_range, addr, size)
880
881 __header_always_inline bool
zone_pva_is_null(zone_pva_t page)882 zone_pva_is_null(zone_pva_t page)
883 {
884 return page.packed_address == 0;
885 }
886
887 __header_always_inline bool
zone_pva_is_queue(zone_pva_t page)888 zone_pva_is_queue(zone_pva_t page)
889 {
890 // actual kernel pages have the top bit set
891 return (int32_t)page.packed_address > 0;
892 }
893
894 __header_always_inline bool
zone_pva_is_equal(zone_pva_t pva1,zone_pva_t pva2)895 zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2)
896 {
897 return pva1.packed_address == pva2.packed_address;
898 }
899
900 __header_always_inline zone_pva_t *
zone_pageq_base(void)901 zone_pageq_base(void)
902 {
903 extern zone_pva_t data_seg_start[] __SEGMENT_START_SYM("__DATA");
904
905 /*
906 * `-1` so that if the first __DATA variable is a page queue,
907 * it gets a non 0 index
908 */
909 return data_seg_start - 1;
910 }
911
912 __header_always_inline void
zone_queue_set_head(zone_t z,zone_pva_t queue,zone_pva_t oldv,struct zone_page_metadata * meta)913 zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv,
914 struct zone_page_metadata *meta)
915 {
916 zone_pva_t *queue_head = &zone_pageq_base()[queue.packed_address];
917
918 if (!zone_pva_is_equal(*queue_head, oldv)) {
919 zone_page_metadata_list_corruption(z, meta);
920 }
921 *queue_head = meta->zm_page_next;
922 }
923
924 __header_always_inline zone_pva_t
zone_queue_encode(zone_pva_t * headp)925 zone_queue_encode(zone_pva_t *headp)
926 {
927 return (zone_pva_t){ (uint32_t)(headp - zone_pageq_base()) };
928 }
929
930 __header_always_inline zone_pva_t
zone_pva_from_addr(vm_address_t addr)931 zone_pva_from_addr(vm_address_t addr)
932 {
933 // cannot use atop() because we want to maintain the sign bit
934 return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) };
935 }
936
937 __header_always_inline vm_address_t
zone_pva_to_addr(zone_pva_t page)938 zone_pva_to_addr(zone_pva_t page)
939 {
940 // cause sign extension so that we end up with the right address
941 return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT;
942 }
943
944 __header_always_inline struct zone_page_metadata *
zone_pva_to_meta(zone_pva_t page)945 zone_pva_to_meta(zone_pva_t page)
946 {
947 return &zone_info.zi_meta_base[page.packed_address];
948 }
949
950 __header_always_inline zone_pva_t
zone_pva_from_meta(struct zone_page_metadata * meta)951 zone_pva_from_meta(struct zone_page_metadata *meta)
952 {
953 return (zone_pva_t){ (uint32_t)(meta - zone_info.zi_meta_base) };
954 }
955
956 __header_always_inline struct zone_page_metadata *
zone_meta_from_addr(vm_offset_t addr)957 zone_meta_from_addr(vm_offset_t addr)
958 {
959 return zone_pva_to_meta(zone_pva_from_addr(addr));
960 }
961
962 __header_always_inline zone_id_t
zone_index_from_ptr(const void * ptr)963 zone_index_from_ptr(const void *ptr)
964 {
965 return zone_pva_to_meta(zone_pva_from_addr((vm_offset_t)ptr))->zm_index;
966 }
967
968 __header_always_inline vm_offset_t
zone_meta_to_addr(struct zone_page_metadata * meta)969 zone_meta_to_addr(struct zone_page_metadata *meta)
970 {
971 return ptoa((int32_t)(meta - zone_info.zi_meta_base));
972 }
973
974 __attribute__((overloadable))
975 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta,vm_address_t addr)976 zone_meta_validate(zone_t z, struct zone_page_metadata *meta, vm_address_t addr)
977 {
978 if (!zone_has_index(z, meta->zm_index)) {
979 zone_page_metadata_index_confusion_panic(z, addr, meta);
980 }
981 }
982
983 __attribute__((overloadable))
984 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta)985 zone_meta_validate(zone_t z, struct zone_page_metadata *meta)
986 {
987 zone_meta_validate(z, meta, zone_meta_to_addr(meta));
988 }
989
990 __header_always_inline void
zone_meta_queue_push(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)991 zone_meta_queue_push(zone_t z, zone_pva_t *headp,
992 struct zone_page_metadata *meta)
993 {
994 zone_pva_t head = *headp;
995 zone_pva_t queue_pva = zone_queue_encode(headp);
996 struct zone_page_metadata *tmp;
997
998 meta->zm_page_next = head;
999 if (!zone_pva_is_null(head)) {
1000 tmp = zone_pva_to_meta(head);
1001 if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) {
1002 zone_page_metadata_list_corruption(z, meta);
1003 }
1004 tmp->zm_page_prev = zone_pva_from_meta(meta);
1005 }
1006 meta->zm_page_prev = queue_pva;
1007 *headp = zone_pva_from_meta(meta);
1008 }
1009
1010 __header_always_inline struct zone_page_metadata *
zone_meta_queue_pop(zone_t z,zone_pva_t * headp)1011 zone_meta_queue_pop(zone_t z, zone_pva_t *headp)
1012 {
1013 zone_pva_t head = *headp;
1014 struct zone_page_metadata *meta = zone_pva_to_meta(head);
1015 struct zone_page_metadata *tmp;
1016
1017 zone_meta_validate(z, meta);
1018
1019 if (!zone_pva_is_null(meta->zm_page_next)) {
1020 tmp = zone_pva_to_meta(meta->zm_page_next);
1021 if (!zone_pva_is_equal(tmp->zm_page_prev, head)) {
1022 zone_page_metadata_list_corruption(z, meta);
1023 }
1024 tmp->zm_page_prev = meta->zm_page_prev;
1025 }
1026 *headp = meta->zm_page_next;
1027
1028 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1029
1030 return meta;
1031 }
1032
1033 __header_always_inline void
zone_meta_remqueue(zone_t z,struct zone_page_metadata * meta)1034 zone_meta_remqueue(zone_t z, struct zone_page_metadata *meta)
1035 {
1036 zone_pva_t meta_pva = zone_pva_from_meta(meta);
1037 struct zone_page_metadata *tmp;
1038
1039 if (!zone_pva_is_null(meta->zm_page_next)) {
1040 tmp = zone_pva_to_meta(meta->zm_page_next);
1041 if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) {
1042 zone_page_metadata_list_corruption(z, meta);
1043 }
1044 tmp->zm_page_prev = meta->zm_page_prev;
1045 }
1046 if (zone_pva_is_queue(meta->zm_page_prev)) {
1047 zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta);
1048 } else {
1049 tmp = zone_pva_to_meta(meta->zm_page_prev);
1050 if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) {
1051 zone_page_metadata_list_corruption(z, meta);
1052 }
1053 tmp->zm_page_next = meta->zm_page_next;
1054 }
1055
1056 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1057 }
1058
1059 __header_always_inline void
zone_meta_requeue(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1060 zone_meta_requeue(zone_t z, zone_pva_t *headp,
1061 struct zone_page_metadata *meta)
1062 {
1063 zone_meta_remqueue(z, meta);
1064 zone_meta_queue_push(z, headp, meta);
1065 }
1066
1067 /* prevents a given metadata from ever reaching the z_pageq_empty queue */
1068 static inline void
zone_meta_lock_in_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1069 zone_meta_lock_in_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1070 {
1071 uint16_t new_size = zone_meta_alloc_size_add(z, m, ZM_ALLOC_SIZE_LOCK);
1072
1073 assert(new_size % sizeof(vm_offset_t) == ZM_ALLOC_SIZE_LOCK);
1074 if (new_size == ZM_ALLOC_SIZE_LOCK) {
1075 zone_meta_requeue(z, &z->z_pageq_partial, m);
1076 zone_counter_sub(z, z_wired_empty, len);
1077 }
1078 }
1079
1080 /* allows a given metadata to reach the z_pageq_empty queue again */
1081 static inline void
zone_meta_unlock_from_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1082 zone_meta_unlock_from_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1083 {
1084 uint16_t new_size = zone_meta_alloc_size_sub(z, m, ZM_ALLOC_SIZE_LOCK);
1085
1086 assert(new_size % sizeof(vm_offset_t) == 0);
1087 if (new_size == 0) {
1088 zone_meta_requeue(z, &z->z_pageq_empty, m);
1089 z->z_wired_empty += len;
1090 }
1091 }
1092
1093 /*
1094 * Routine to populate a page backing metadata in the zone_metadata_region.
1095 * Must be called without the zone lock held as it might potentially block.
1096 */
1097 static void
zone_meta_populate(vm_offset_t base,vm_size_t size)1098 zone_meta_populate(vm_offset_t base, vm_size_t size)
1099 {
1100 struct zone_page_metadata *from = zone_meta_from_addr(base);
1101 struct zone_page_metadata *to = from + atop(size);
1102 vm_offset_t page_addr = trunc_page(from);
1103
1104 for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) {
1105 #if !KASAN
1106 /*
1107 * This can race with another thread doing a populate on the same metadata
1108 * page, where we see an updated pmap but unmapped KASan shadow, causing a
1109 * fault in the shadow when we first access the metadata page. Avoid this
1110 * by always synchronizing on the zone_metadata_region lock with KASan.
1111 */
1112 if (pmap_find_phys(kernel_pmap, page_addr)) {
1113 continue;
1114 }
1115 #endif
1116
1117 for (;;) {
1118 kern_return_t ret = KERN_SUCCESS;
1119
1120 /*
1121 * All updates to the zone_metadata_region are done
1122 * under the zone_metadata_region_lck
1123 */
1124 zone_meta_lock();
1125 if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
1126 ret = kernel_memory_populate(page_addr,
1127 PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1128 VM_KERN_MEMORY_OSFMK);
1129 }
1130 zone_meta_unlock();
1131
1132 if (ret == KERN_SUCCESS) {
1133 break;
1134 }
1135
1136 /*
1137 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
1138 * to bad system deadlocks, so if the allocation failed,
1139 * we need to do the VM_PAGE_WAIT() outside of the lock.
1140 */
1141 VM_PAGE_WAIT();
1142 }
1143 }
1144 }
1145
1146 __abortlike
1147 static void
zone_invalid_element_panic(zone_t zone,vm_offset_t addr)1148 zone_invalid_element_panic(zone_t zone, vm_offset_t addr)
1149 {
1150 struct zone_page_metadata *meta;
1151 const char *from_cache = "";
1152 vm_offset_t page;
1153
1154 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1155 panic("addr %p being freed to zone %s%s%s, isn't from zone map",
1156 (void *)addr, zone_heap_name(zone), zone->z_name, from_cache);
1157 }
1158 page = trunc_page(addr);
1159 meta = zone_meta_from_addr(addr);
1160
1161 if (!zone_has_index(zone, meta->zm_index)) {
1162 zone_page_metadata_index_confusion_panic(zone, addr, meta);
1163 }
1164
1165 if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1166 panic("metadata %p corresponding to addr %p being freed to "
1167 "zone %s%s%s, is marked as secondary per cpu page",
1168 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1169 from_cache);
1170 }
1171 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1172 page -= ptoa(meta->zm_page_index);
1173 meta -= meta->zm_page_index;
1174 }
1175
1176 if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1177 panic("metadata %p corresponding to addr %p being freed to "
1178 "zone %s%s%s, has chunk len greater than max",
1179 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1180 from_cache);
1181 }
1182
1183 if ((addr - zone_elem_inner_offs(zone) - page) % zone_elem_outer_size(zone)) {
1184 panic("addr %p being freed to zone %s%s%s, isn't aligned to "
1185 "zone element size", (void *)addr, zone_heap_name(zone),
1186 zone->z_name, from_cache);
1187 }
1188
1189 zone_invalid_element_addr_panic(zone, addr);
1190 }
1191
1192 __attribute__((always_inline))
1193 static struct zone_page_metadata *
zone_element_resolve(zone_t zone,vm_offset_t addr,vm_offset_t * idx)1194 zone_element_resolve(
1195 zone_t zone,
1196 vm_offset_t addr,
1197 vm_offset_t *idx)
1198 {
1199 struct zone_page_metadata *meta;
1200 vm_offset_t offs, eidx;
1201
1202 meta = zone_meta_from_addr(addr);
1203 if (!from_zone_map(addr, 1) || !zone_has_index(zone, meta->zm_index)) {
1204 zone_invalid_element_panic(zone, addr);
1205 }
1206
1207 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1208 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1209 offs += ptoa(meta->zm_page_index);
1210 meta -= meta->zm_page_index;
1211 }
1212
1213 eidx = Z_FAST_QUO(offs, zone->z_quo_magic);
1214 if (eidx * zone_elem_outer_size(zone) != offs) {
1215 zone_invalid_element_panic(zone, addr);
1216 }
1217
1218 *idx = eidx;
1219 return meta;
1220 }
1221
1222 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1223 void *
zone_element_pgz_oob_adjust(void * ptr,vm_size_t req_size,vm_size_t elem_size)1224 zone_element_pgz_oob_adjust(void *ptr, vm_size_t req_size, vm_size_t elem_size)
1225 {
1226 vm_offset_t addr = (vm_offset_t)ptr;
1227 vm_offset_t end = addr + elem_size;
1228 vm_offset_t offs;
1229
1230 /*
1231 * 0-sized allocations in a KALLOC_MINSIZE bucket
1232 * would be offset to the next allocation which is incorrect.
1233 */
1234 req_size = MAX(roundup(req_size, KALLOC_MINALIGN), KALLOC_MINALIGN);
1235
1236 /*
1237 * Given how chunks work, for a zone with PGZ guards on,
1238 * there's a single element which ends precisely
1239 * at the page boundary: the last one.
1240 */
1241 if (req_size == elem_size ||
1242 (end & PAGE_MASK) ||
1243 !zone_meta_from_addr(addr)->zm_guarded) {
1244 return ptr;
1245 }
1246
1247 offs = elem_size - req_size;
1248 zone_meta_from_addr(end)->zm_oob_offs = (uint16_t)offs;
1249
1250 return (char *)addr + offs;
1251 }
1252 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1253
1254 __abortlike
1255 static void
zone_element_bounds_check_panic(vm_address_t addr,vm_size_t len)1256 zone_element_bounds_check_panic(vm_address_t addr, vm_size_t len)
1257 {
1258 struct zone_page_metadata *meta;
1259 vm_offset_t offs, size, page;
1260 zone_t zone;
1261
1262 page = trunc_page(addr);
1263 meta = zone_meta_from_addr(addr);
1264 zone = &zone_array[meta->zm_index];
1265
1266 if (zone->z_percpu) {
1267 panic("zone bound checks: address %p is a per-cpu allocation",
1268 (void *)addr);
1269 }
1270
1271 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1272 page -= ptoa(meta->zm_page_index);
1273 meta -= meta->zm_page_index;
1274 }
1275
1276 size = zone_elem_outer_size(zone);
1277 offs = Z_FAST_MOD(addr - zone_elem_inner_offs(zone) - page + size,
1278 zone->z_quo_magic, size);
1279 panic("zone bound checks: buffer %p of length %zd overflows "
1280 "object %p of size %zd in zone %p[%s%s]",
1281 (void *)addr, len, (void *)(addr - offs - zone_elem_redzone(zone)),
1282 zone_elem_inner_size(zone), zone, zone_heap_name(zone), zone_name(zone));
1283 }
1284
1285 void
zone_element_bounds_check(vm_address_t addr,vm_size_t len)1286 zone_element_bounds_check(vm_address_t addr, vm_size_t len)
1287 {
1288 struct zone_page_metadata *meta;
1289 vm_offset_t offs, size;
1290 zone_t zone;
1291
1292 if (!from_zone_map(addr, 1)) {
1293 return;
1294 }
1295
1296 #if CONFIG_PROB_GZALLOC
1297 if (__improbable(pgz_owned(addr))) {
1298 meta = zone_meta_from_addr(addr);
1299 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
1300 }
1301 #endif /* CONFIG_PROB_GZALLOC */
1302 meta = zone_meta_from_addr(addr);
1303 zone = zone_by_id(meta->zm_index);
1304
1305 if (zone->z_percpu) {
1306 zone_element_bounds_check_panic(addr, len);
1307 }
1308
1309 if (zone->z_permanent) {
1310 /* We don't know bounds for those */
1311 return;
1312 }
1313
1314 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1315 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1316 offs += ptoa(meta->zm_page_index);
1317 }
1318 size = zone_elem_outer_size(zone);
1319 offs = Z_FAST_MOD(offs + size, zone->z_quo_magic, size);
1320 if (len + zone_elem_redzone(zone) > size - offs) {
1321 zone_element_bounds_check_panic(addr, len);
1322 }
1323 }
1324
1325 /*
1326 * Routine to get the size of a zone allocated address.
1327 * If the address doesnt belong to the zone maps, returns 0.
1328 */
1329 vm_size_t
zone_element_size(void * elem,zone_t * z,bool clear_oob,vm_offset_t * oob_offs)1330 zone_element_size(void *elem, zone_t *z, bool clear_oob, vm_offset_t *oob_offs)
1331 {
1332 vm_address_t addr = (vm_address_t)elem;
1333 struct zone_page_metadata *meta;
1334 vm_size_t esize, offs, end;
1335 zone_t zone;
1336
1337 if (from_zone_map(addr, sizeof(void *))) {
1338 meta = zone_meta_from_addr(addr);
1339 zone = zone_by_id(meta->zm_index);
1340 esize = zone_elem_inner_size(zone);
1341 end = addr + esize;
1342 offs = 0;
1343
1344 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1345 /*
1346 * If the chunk uses guards, and that (addr + esize)
1347 * either crosses a page boundary or is at the boundary,
1348 * we need to look harder.
1349 */
1350 if (oob_offs && meta->zm_guarded && atop(addr ^ end)) {
1351 /*
1352 * Because in the vast majority of cases the element
1353 * size is sub-page, and that meta[1] must be faulted,
1354 * we can quickly peek at whether it's a guard.
1355 *
1356 * For elements larger than a page, finding the guard
1357 * page requires a little more effort.
1358 */
1359 if (meta[1].zm_chunk_len == ZM_PGZ_GUARD) {
1360 offs = meta[1].zm_oob_offs;
1361 if (clear_oob) {
1362 meta[1].zm_oob_offs = 0;
1363 }
1364 } else if (esize > PAGE_SIZE) {
1365 struct zone_page_metadata *gmeta;
1366
1367 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1368 gmeta = meta + meta->zm_subchunk_len;
1369 } else {
1370 gmeta = meta + zone->z_chunk_pages;
1371 }
1372 assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
1373
1374 if (end >= zone_meta_to_addr(gmeta)) {
1375 offs = gmeta->zm_oob_offs;
1376 if (clear_oob) {
1377 gmeta->zm_oob_offs = 0;
1378 }
1379 }
1380 }
1381 }
1382 #else
1383 #pragma unused(end, clear_oob)
1384 #endif /* ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1385
1386 if (oob_offs) {
1387 *oob_offs = offs;
1388 }
1389 if (z) {
1390 *z = zone;
1391 }
1392 return esize;
1393 }
1394
1395 if (oob_offs) {
1396 *oob_offs = 0;
1397 }
1398
1399 return 0;
1400 }
1401
1402 zone_id_t
zone_id_for_element(void * addr,vm_size_t esize)1403 zone_id_for_element(void *addr, vm_size_t esize)
1404 {
1405 zone_id_t zid = ZONE_ID_INVALID;
1406 if (from_zone_map(addr, esize)) {
1407 zid = zone_index_from_ptr(addr);
1408 __builtin_assume(zid != ZONE_ID_INVALID);
1409 }
1410 return zid;
1411 }
1412
1413 /* This function just formats the reason for the panics by redoing the checks */
1414 __abortlike
1415 static void
zone_require_panic(zone_t zone,void * addr)1416 zone_require_panic(zone_t zone, void *addr)
1417 {
1418 uint32_t zindex;
1419 zone_t other;
1420
1421 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1422 panic("zone_require failed: address not in a zone (addr: %p)", addr);
1423 }
1424
1425 zindex = zone_index_from_ptr(addr);
1426 other = &zone_array[zindex];
1427 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
1428 panic("zone_require failed: invalid zone index %d "
1429 "(addr: %p, expected: %s%s)", zindex,
1430 addr, zone_heap_name(zone), zone->z_name);
1431 } else {
1432 panic("zone_require failed: address in unexpected zone id %d (%s%s) "
1433 "(addr: %p, expected: %s%s)",
1434 zindex, zone_heap_name(other), other->z_name,
1435 addr, zone_heap_name(zone), zone->z_name);
1436 }
1437 }
1438
1439 __abortlike
1440 static void
zone_id_require_panic(zone_id_t zid,void * addr)1441 zone_id_require_panic(zone_id_t zid, void *addr)
1442 {
1443 zone_require_panic(&zone_array[zid], addr);
1444 }
1445
1446 /*
1447 * Routines to panic if a pointer is not mapped to an expected zone.
1448 * This can be used as a means of pinning an object to the zone it is expected
1449 * to be a part of. Causes a panic if the address does not belong to any
1450 * specified zone, does not belong to any zone, has been freed and therefore
1451 * unmapped from the zone, or the pointer contains an uninitialized value that
1452 * does not belong to any zone.
1453 */
1454 void
zone_require(zone_t zone,void * addr)1455 zone_require(zone_t zone, void *addr)
1456 {
1457 vm_size_t esize = zone_elem_inner_size(zone);
1458
1459 if (from_zone_map(addr, esize) &&
1460 zone_has_index(zone, zone_index_from_ptr(addr))) {
1461 return;
1462 }
1463 zone_require_panic(zone, addr);
1464 }
1465
1466 void
zone_id_require(zone_id_t zid,vm_size_t esize,void * addr)1467 zone_id_require(zone_id_t zid, vm_size_t esize, void *addr)
1468 {
1469 if (from_zone_map(addr, esize) && zid == zone_index_from_ptr(addr)) {
1470 return;
1471 }
1472 zone_id_require_panic(zid, addr);
1473 }
1474
1475 bool
zone_owns(zone_t zone,void * addr)1476 zone_owns(zone_t zone, void *addr)
1477 {
1478 vm_size_t esize = zone_elem_inner_size(zone);
1479
1480 if (from_zone_map(addr, esize)) {
1481 return zone_has_index(zone, zone_index_from_ptr(addr));
1482 }
1483 return false;
1484 }
1485
1486 static inline struct mach_vm_range
zone_kmem_suballoc(mach_vm_offset_t addr,vm_size_t size,int flags,vm_tag_t tag,vm_map_t * new_map)1487 zone_kmem_suballoc(
1488 mach_vm_offset_t addr,
1489 vm_size_t size,
1490 int flags,
1491 vm_tag_t tag,
1492 vm_map_t *new_map)
1493 {
1494 struct mach_vm_range r;
1495
1496 *new_map = kmem_suballoc(kernel_map, &addr, size,
1497 VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
1498 flags, KMS_PERMANENT | KMS_NOFAIL, tag).kmr_submap;
1499
1500 r.min_address = addr;
1501 r.max_address = addr + size;
1502 return r;
1503 }
1504
1505 #endif /* !ZALLOC_TEST */
1506 #pragma mark Zone bits allocator
1507
1508 /*!
1509 * @defgroup Zone Bitmap allocator
1510 * @{
1511 *
1512 * @brief
1513 * Functions implementing the zone bitmap allocator
1514 *
1515 * @discussion
1516 * The zone allocator maintains which elements are allocated or free in bitmaps.
1517 *
1518 * When the number of elements per page is smaller than 32, it is stored inline
1519 * on the @c zone_page_metadata structure (@c zm_inline_bitmap is set,
1520 * and @c zm_bitmap used for storage).
1521 *
1522 * When the number of elements is larger, then a bitmap is allocated from
1523 * a buddy allocator (impelemented under the @c zba_* namespace). Pointers
1524 * to bitmaps are implemented as a packed 32 bit bitmap reference, stored in
1525 * @c zm_bitmap. The low 3 bits encode the scale (order) of the allocation in
1526 * @c ZBA_GRANULE units, and hence actual allocations encoded with that scheme
1527 * cannot be larger than 1024 bytes (8192 bits).
1528 *
1529 * This buddy allocator can actually accomodate allocations as large
1530 * as 8k on 16k systems and 2k on 4k systems.
1531 *
1532 * Note: @c zba_* functions are implementation details not meant to be used
1533 * outside of the allocation of the allocator itself. Interfaces to the rest of
1534 * the zone allocator are documented and not @c zba_* prefixed.
1535 */
1536
1537 #define ZBA_CHUNK_SIZE PAGE_MAX_SIZE
1538 #define ZBA_GRANULE sizeof(uint64_t)
1539 #define ZBA_GRANULE_BITS (8 * sizeof(uint64_t))
1540 #define ZBA_MAX_ORDER (PAGE_MAX_SHIFT - 4)
1541 #define ZBA_MAX_ALLOC_ORDER 7
1542 #define ZBA_SLOTS (ZBA_CHUNK_SIZE / ZBA_GRANULE)
1543 #define ZBA_HEADS_COUNT (ZBA_MAX_ORDER + 1)
1544 #define ZBA_PTR_MASK 0x0fffffff
1545 #define ZBA_ORDER_SHIFT 29
1546 #define ZBA_HAS_EXTRA_BIT 0x10000000
1547
1548 static_assert(2ul * ZBA_GRANULE << ZBA_MAX_ORDER == ZBA_CHUNK_SIZE, "chunk sizes");
1549 static_assert(ZBA_MAX_ALLOC_ORDER <= ZBA_MAX_ORDER, "ZBA_MAX_ORDER is enough");
1550
1551 struct zone_bits_chain {
1552 uint32_t zbc_next;
1553 uint32_t zbc_prev;
1554 } __attribute__((aligned(ZBA_GRANULE)));
1555
1556 struct zone_bits_head {
1557 uint32_t zbh_next;
1558 uint32_t zbh_unused;
1559 } __attribute__((aligned(ZBA_GRANULE)));
1560
1561 static_assert(sizeof(struct zone_bits_chain) == ZBA_GRANULE, "zbc size");
1562 static_assert(sizeof(struct zone_bits_head) == ZBA_GRANULE, "zbh size");
1563
1564 struct zone_bits_allocator_meta {
1565 uint32_t zbam_left;
1566 uint32_t zbam_right;
1567 struct zone_bits_head zbam_lists[ZBA_HEADS_COUNT];
1568 struct zone_bits_head zbam_lists_with_extra[ZBA_HEADS_COUNT];
1569 };
1570
1571 struct zone_bits_allocator_header {
1572 uint64_t zbah_bits[ZBA_SLOTS / (8 * sizeof(uint64_t))];
1573 };
1574
1575 #if ZALLOC_TEST
1576 static struct zalloc_bits_allocator_test_setup {
1577 vm_offset_t zbats_base;
1578 void (*zbats_populate)(vm_address_t addr, vm_size_t size);
1579 } zba_test_info;
1580
1581 static struct zone_bits_allocator_header *
zba_base_header(void)1582 zba_base_header(void)
1583 {
1584 return (struct zone_bits_allocator_header *)zba_test_info.zbats_base;
1585 }
1586
1587 static kern_return_t
zba_populate(uint32_t n,bool with_extra __unused)1588 zba_populate(uint32_t n, bool with_extra __unused)
1589 {
1590 vm_address_t base = zba_test_info.zbats_base;
1591 zba_test_info.zbats_populate(base + n * ZBA_CHUNK_SIZE, ZBA_CHUNK_SIZE);
1592
1593 return KERN_SUCCESS;
1594 }
1595 #else
1596 __startup_data __attribute__((aligned(ZBA_CHUNK_SIZE)))
1597 static uint8_t zba_chunk_startup[ZBA_CHUNK_SIZE];
1598
1599 static SECURITY_READ_ONLY_LATE(uint8_t) zba_xtra_shift;
1600 static LCK_MTX_DECLARE(zba_mtx, &zone_locks_grp);
1601
1602 static struct zone_bits_allocator_header *
zba_base_header(void)1603 zba_base_header(void)
1604 {
1605 return (struct zone_bits_allocator_header *)zone_info.zi_bits_range.min_address;
1606 }
1607
1608 static void
zba_lock(void)1609 zba_lock(void)
1610 {
1611 lck_mtx_lock(&zba_mtx);
1612 }
1613
1614 static void
zba_unlock(void)1615 zba_unlock(void)
1616 {
1617 lck_mtx_unlock(&zba_mtx);
1618 }
1619
1620 __abortlike
1621 static void
zba_memory_exhausted(void)1622 zba_memory_exhausted(void)
1623 {
1624 uint64_t zsize = 0;
1625 zone_t z = zone_find_largest(&zsize);
1626 panic("zba_populate: out of bitmap space, "
1627 "likely due to memory leak in zone [%s%s] "
1628 "(%u%c, %d elements allocated)",
1629 zone_heap_name(z), zone_name(z),
1630 mach_vm_size_pretty(zsize), mach_vm_size_unit(zsize),
1631 zone_count_allocated(z));
1632 }
1633
1634
1635 static kern_return_t
zba_populate(uint32_t n,bool with_extra)1636 zba_populate(uint32_t n, bool with_extra)
1637 {
1638 vm_size_t bits_size = ZBA_CHUNK_SIZE;
1639 vm_size_t xtra_size = bits_size * CHAR_BIT << zba_xtra_shift;
1640 vm_address_t bits_addr;
1641 vm_address_t xtra_addr;
1642 kern_return_t kr;
1643
1644 bits_addr = zone_info.zi_bits_range.min_address + n * bits_size;
1645 xtra_addr = zone_info.zi_xtra_range.min_address + n * xtra_size;
1646
1647 kr = kernel_memory_populate(bits_addr, bits_size,
1648 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1649 VM_KERN_MEMORY_OSFMK);
1650 if (kr != KERN_SUCCESS) {
1651 return kr;
1652 }
1653
1654
1655 if (with_extra) {
1656 kr = kernel_memory_populate(xtra_addr, xtra_size,
1657 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1658 VM_KERN_MEMORY_OSFMK);
1659 if (kr != KERN_SUCCESS) {
1660 kernel_memory_depopulate(bits_addr, bits_size,
1661 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1662 VM_KERN_MEMORY_OSFMK);
1663 }
1664 }
1665
1666 return kr;
1667 }
1668 #endif
1669
1670 __pure2
1671 static struct zone_bits_allocator_meta *
zba_meta(void)1672 zba_meta(void)
1673 {
1674 return (struct zone_bits_allocator_meta *)&zba_base_header()[1];
1675 }
1676
1677 __pure2
1678 static uint64_t *
zba_slot_base(void)1679 zba_slot_base(void)
1680 {
1681 return (uint64_t *)zba_base_header();
1682 }
1683
1684 __pure2
1685 static struct zone_bits_head *
zba_head(uint32_t order,bool with_extra)1686 zba_head(uint32_t order, bool with_extra)
1687 {
1688 if (with_extra) {
1689 return &zba_meta()->zbam_lists_with_extra[order];
1690 } else {
1691 return &zba_meta()->zbam_lists[order];
1692 }
1693 }
1694
1695 __pure2
1696 static uint32_t
zba_head_index(struct zone_bits_head * hd)1697 zba_head_index(struct zone_bits_head *hd)
1698 {
1699 return (uint32_t)((uint64_t *)hd - zba_slot_base());
1700 }
1701
1702 __pure2
1703 static struct zone_bits_chain *
zba_chain_for_index(uint32_t index)1704 zba_chain_for_index(uint32_t index)
1705 {
1706 return (struct zone_bits_chain *)(zba_slot_base() + index);
1707 }
1708
1709 __pure2
1710 static uint32_t
zba_chain_to_index(const struct zone_bits_chain * zbc)1711 zba_chain_to_index(const struct zone_bits_chain *zbc)
1712 {
1713 return (uint32_t)((const uint64_t *)zbc - zba_slot_base());
1714 }
1715
1716 __abortlike
1717 static void
zba_head_corruption_panic(uint32_t order,bool with_extra)1718 zba_head_corruption_panic(uint32_t order, bool with_extra)
1719 {
1720 panic("zone bits allocator head[%d:%d:%p] is corrupt",
1721 order, with_extra, zba_head(order, with_extra));
1722 }
1723
1724 __abortlike
1725 static void
zba_chain_corruption_panic(struct zone_bits_chain * a,struct zone_bits_chain * b)1726 zba_chain_corruption_panic(struct zone_bits_chain *a, struct zone_bits_chain *b)
1727 {
1728 panic("zone bits allocator freelist is corrupt (%p <-> %p)", a, b);
1729 }
1730
1731 static void
zba_push_block(struct zone_bits_chain * zbc,uint32_t order,bool with_extra)1732 zba_push_block(struct zone_bits_chain *zbc, uint32_t order, bool with_extra)
1733 {
1734 struct zone_bits_head *hd = zba_head(order, with_extra);
1735 uint32_t hd_index = zba_head_index(hd);
1736 uint32_t index = zba_chain_to_index(zbc);
1737 struct zone_bits_chain *next;
1738
1739 if (hd->zbh_next) {
1740 next = zba_chain_for_index(hd->zbh_next);
1741 if (next->zbc_prev != hd_index) {
1742 zba_head_corruption_panic(order, with_extra);
1743 }
1744 next->zbc_prev = index;
1745 }
1746 zbc->zbc_next = hd->zbh_next;
1747 zbc->zbc_prev = hd_index;
1748 hd->zbh_next = index;
1749 }
1750
1751 static void
zba_remove_block(struct zone_bits_chain * zbc)1752 zba_remove_block(struct zone_bits_chain *zbc)
1753 {
1754 struct zone_bits_chain *prev = zba_chain_for_index(zbc->zbc_prev);
1755 uint32_t index = zba_chain_to_index(zbc);
1756
1757 if (prev->zbc_next != index) {
1758 zba_chain_corruption_panic(prev, zbc);
1759 }
1760 if ((prev->zbc_next = zbc->zbc_next)) {
1761 struct zone_bits_chain *next = zba_chain_for_index(zbc->zbc_next);
1762 if (next->zbc_prev != index) {
1763 zba_chain_corruption_panic(zbc, next);
1764 }
1765 next->zbc_prev = zbc->zbc_prev;
1766 }
1767 }
1768
1769 static vm_address_t
zba_try_pop_block(uint32_t order,bool with_extra)1770 zba_try_pop_block(uint32_t order, bool with_extra)
1771 {
1772 struct zone_bits_head *hd = zba_head(order, with_extra);
1773 struct zone_bits_chain *zbc;
1774
1775 if (hd->zbh_next == 0) {
1776 return 0;
1777 }
1778
1779 zbc = zba_chain_for_index(hd->zbh_next);
1780 zba_remove_block(zbc);
1781 return (vm_address_t)zbc;
1782 }
1783
1784 static struct zone_bits_allocator_header *
zba_header(vm_offset_t addr)1785 zba_header(vm_offset_t addr)
1786 {
1787 addr &= -(vm_offset_t)ZBA_CHUNK_SIZE;
1788 return (struct zone_bits_allocator_header *)addr;
1789 }
1790
1791 static size_t
zba_node_parent(size_t node)1792 zba_node_parent(size_t node)
1793 {
1794 return (node - 1) / 2;
1795 }
1796
1797 static size_t
zba_node_left_child(size_t node)1798 zba_node_left_child(size_t node)
1799 {
1800 return node * 2 + 1;
1801 }
1802
1803 static size_t
zba_node_buddy(size_t node)1804 zba_node_buddy(size_t node)
1805 {
1806 return ((node - 1) ^ 1) + 1;
1807 }
1808
1809 static size_t
zba_node(vm_offset_t addr,uint32_t order)1810 zba_node(vm_offset_t addr, uint32_t order)
1811 {
1812 vm_offset_t offs = (addr % ZBA_CHUNK_SIZE) / ZBA_GRANULE;
1813 return (offs >> order) + (1 << (ZBA_MAX_ORDER - order + 1)) - 1;
1814 }
1815
1816 static struct zone_bits_chain *
zba_chain_for_node(struct zone_bits_allocator_header * zbah,size_t node,uint32_t order)1817 zba_chain_for_node(struct zone_bits_allocator_header *zbah, size_t node, uint32_t order)
1818 {
1819 vm_offset_t offs = (node - (1 << (ZBA_MAX_ORDER - order + 1)) + 1) << order;
1820 return (struct zone_bits_chain *)((vm_offset_t)zbah + offs * ZBA_GRANULE);
1821 }
1822
1823 static void
zba_node_flip_split(struct zone_bits_allocator_header * zbah,size_t node)1824 zba_node_flip_split(struct zone_bits_allocator_header *zbah, size_t node)
1825 {
1826 zbah->zbah_bits[node / 64] ^= 1ull << (node % 64);
1827 }
1828
1829 static bool
zba_node_is_split(struct zone_bits_allocator_header * zbah,size_t node)1830 zba_node_is_split(struct zone_bits_allocator_header *zbah, size_t node)
1831 {
1832 return zbah->zbah_bits[node / 64] & (1ull << (node % 64));
1833 }
1834
1835 static void
zba_free(vm_offset_t addr,uint32_t order,bool with_extra)1836 zba_free(vm_offset_t addr, uint32_t order, bool with_extra)
1837 {
1838 struct zone_bits_allocator_header *zbah = zba_header(addr);
1839 struct zone_bits_chain *zbc;
1840 size_t node = zba_node(addr, order);
1841
1842 while (node) {
1843 size_t parent = zba_node_parent(node);
1844
1845 zba_node_flip_split(zbah, parent);
1846 if (zba_node_is_split(zbah, parent)) {
1847 break;
1848 }
1849
1850 zbc = zba_chain_for_node(zbah, zba_node_buddy(node), order);
1851 zba_remove_block(zbc);
1852 order++;
1853 node = parent;
1854 }
1855
1856 zba_push_block(zba_chain_for_node(zbah, node, order), order, with_extra);
1857 }
1858
1859 static vm_size_t
zba_chunk_header_size(uint32_t n)1860 zba_chunk_header_size(uint32_t n)
1861 {
1862 vm_size_t hdr_size = sizeof(struct zone_bits_allocator_header);
1863 if (n == 0) {
1864 hdr_size += sizeof(struct zone_bits_allocator_meta);
1865 }
1866 return hdr_size;
1867 }
1868
1869 static void
zba_init_chunk(uint32_t n,bool with_extra)1870 zba_init_chunk(uint32_t n, bool with_extra)
1871 {
1872 vm_size_t hdr_size = zba_chunk_header_size(n);
1873 vm_offset_t page = (vm_offset_t)zba_base_header() + n * ZBA_CHUNK_SIZE;
1874 struct zone_bits_allocator_header *zbah = zba_header(page);
1875 vm_size_t size = ZBA_CHUNK_SIZE;
1876 size_t node;
1877
1878 for (uint32_t o = ZBA_MAX_ORDER + 1; o-- > 0;) {
1879 if (size < hdr_size + (ZBA_GRANULE << o)) {
1880 continue;
1881 }
1882 size -= ZBA_GRANULE << o;
1883 node = zba_node(page + size, o);
1884 zba_node_flip_split(zbah, zba_node_parent(node));
1885 zba_push_block(zba_chain_for_node(zbah, node, o), o, with_extra);
1886 }
1887 }
1888
1889 __attribute__((noinline))
1890 static void
zba_grow(bool with_extra)1891 zba_grow(bool with_extra)
1892 {
1893 struct zone_bits_allocator_meta *meta = zba_meta();
1894 kern_return_t kr = KERN_SUCCESS;
1895 uint32_t chunk;
1896
1897 #if !ZALLOC_TEST
1898 if (meta->zbam_left >= meta->zbam_right) {
1899 zba_memory_exhausted();
1900 }
1901 #endif
1902
1903 if (with_extra) {
1904 chunk = meta->zbam_right - 1;
1905 } else {
1906 chunk = meta->zbam_left;
1907 }
1908
1909 kr = zba_populate(chunk, with_extra);
1910 if (kr == KERN_SUCCESS) {
1911 if (with_extra) {
1912 meta->zbam_right -= 1;
1913 } else {
1914 meta->zbam_left += 1;
1915 }
1916
1917 zba_init_chunk(chunk, with_extra);
1918 #if !ZALLOC_TEST
1919 } else {
1920 /*
1921 * zba_populate() has to be allowed to fail populating,
1922 * as we are under a global lock, we need to do the
1923 * VM_PAGE_WAIT() outside of the lock.
1924 */
1925 assert(kr == KERN_RESOURCE_SHORTAGE);
1926 zba_unlock();
1927 VM_PAGE_WAIT();
1928 zba_lock();
1929 #endif
1930 }
1931 }
1932
1933 static vm_offset_t
zba_alloc(uint32_t order,bool with_extra)1934 zba_alloc(uint32_t order, bool with_extra)
1935 {
1936 struct zone_bits_allocator_header *zbah;
1937 uint32_t cur = order;
1938 vm_address_t addr;
1939 size_t node;
1940
1941 while ((addr = zba_try_pop_block(cur, with_extra)) == 0) {
1942 if (__improbable(cur++ >= ZBA_MAX_ORDER)) {
1943 zba_grow(with_extra);
1944 cur = order;
1945 }
1946 }
1947
1948 zbah = zba_header(addr);
1949 node = zba_node(addr, cur);
1950 zba_node_flip_split(zbah, zba_node_parent(node));
1951 while (cur > order) {
1952 cur--;
1953 zba_node_flip_split(zbah, node);
1954 node = zba_node_left_child(node);
1955 zba_push_block(zba_chain_for_node(zbah, node + 1, cur),
1956 cur, with_extra);
1957 }
1958
1959 return addr;
1960 }
1961
1962 #define zba_map_index(type, n) (n / (8 * sizeof(type)))
1963 #define zba_map_bit(type, n) ((type)1 << (n % (8 * sizeof(type))))
1964 #define zba_map_mask_lt(type, n) (zba_map_bit(type, n) - 1)
1965 #define zba_map_mask_ge(type, n) ((type)-zba_map_bit(type, n))
1966
1967 #if !ZALLOC_TEST
1968 #if VM_TAG_SIZECLASSES
1969
1970 static void *
zba_extra_ref_ptr(uint32_t bref,vm_offset_t idx)1971 zba_extra_ref_ptr(uint32_t bref, vm_offset_t idx)
1972 {
1973 vm_offset_t base = zone_info.zi_xtra_range.min_address;
1974 vm_offset_t offs = (bref & ZBA_PTR_MASK) * ZBA_GRANULE * CHAR_BIT;
1975
1976 return (void *)(base + ((offs + idx) << zba_xtra_shift));
1977 }
1978
1979 #endif /* VM_TAG_SIZECLASSES */
1980
1981 static uint32_t
zba_bits_ref_order(uint32_t bref)1982 zba_bits_ref_order(uint32_t bref)
1983 {
1984 return bref >> ZBA_ORDER_SHIFT;
1985 }
1986
1987 static bitmap_t *
zba_bits_ref_ptr(uint32_t bref)1988 zba_bits_ref_ptr(uint32_t bref)
1989 {
1990 return zba_slot_base() + (bref & ZBA_PTR_MASK);
1991 }
1992
1993 static vm_offset_t
zba_scan_bitmap_inline(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags,vm_offset_t eidx)1994 zba_scan_bitmap_inline(zone_t zone, struct zone_page_metadata *meta,
1995 zalloc_flags_t flags, vm_offset_t eidx)
1996 {
1997 size_t i = eidx / 32;
1998 uint32_t map;
1999
2000 if (eidx % 32) {
2001 map = meta[i].zm_bitmap & zba_map_mask_ge(uint32_t, eidx);
2002 if (map) {
2003 eidx = __builtin_ctz(map);
2004 meta[i].zm_bitmap ^= 1u << eidx;
2005 return i * 32 + eidx;
2006 }
2007 i++;
2008 }
2009
2010 uint32_t chunk_len = meta->zm_chunk_len;
2011 if (flags & Z_PCPU) {
2012 chunk_len = zpercpu_count();
2013 }
2014 for (int j = 0; j < chunk_len; j++, i++) {
2015 if (i >= chunk_len) {
2016 i = 0;
2017 }
2018 if (__probable(map = meta[i].zm_bitmap)) {
2019 meta[i].zm_bitmap &= map - 1;
2020 return i * 32 + __builtin_ctz(map);
2021 }
2022 }
2023
2024 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2025 }
2026
2027 static vm_offset_t
zba_scan_bitmap_ref(zone_t zone,struct zone_page_metadata * meta,vm_offset_t eidx)2028 zba_scan_bitmap_ref(zone_t zone, struct zone_page_metadata *meta,
2029 vm_offset_t eidx)
2030 {
2031 uint32_t bits_size = 1 << zba_bits_ref_order(meta->zm_bitmap);
2032 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2033 size_t i = eidx / 64;
2034 uint64_t map;
2035
2036 if (eidx % 64) {
2037 map = bits[i] & zba_map_mask_ge(uint64_t, eidx);
2038 if (map) {
2039 eidx = __builtin_ctzll(map);
2040 bits[i] ^= 1ull << eidx;
2041 return i * 64 + eidx;
2042 }
2043 i++;
2044 }
2045
2046 for (int j = 0; j < bits_size; i++, j++) {
2047 if (i >= bits_size) {
2048 i = 0;
2049 }
2050 if (__probable(map = bits[i])) {
2051 bits[i] &= map - 1;
2052 return i * 64 + __builtin_ctzll(map);
2053 }
2054 }
2055
2056 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2057 }
2058
2059 /*!
2060 * @function zone_meta_find_and_clear_bit
2061 *
2062 * @brief
2063 * The core of the bitmap allocator: find a bit set in the bitmaps.
2064 *
2065 * @discussion
2066 * This method will round robin through available allocations,
2067 * with a per-core memory of the last allocated element index allocated.
2068 *
2069 * This is done in order to avoid a fully LIFO behavior which makes exploiting
2070 * double-free bugs way too practical.
2071 *
2072 * @param zone The zone we're allocating from.
2073 * @param meta The main metadata for the chunk being allocated from.
2074 * @param flags the alloc flags (for @c Z_PCPU).
2075 */
2076 static vm_offset_t
zone_meta_find_and_clear_bit(zone_t zone,zone_stats_t zs,struct zone_page_metadata * meta,zalloc_flags_t flags)2077 zone_meta_find_and_clear_bit(
2078 zone_t zone,
2079 zone_stats_t zs,
2080 struct zone_page_metadata *meta,
2081 zalloc_flags_t flags)
2082 {
2083 vm_offset_t eidx = zs->zs_alloc_rr + 1;
2084
2085 if (meta->zm_inline_bitmap) {
2086 eidx = zba_scan_bitmap_inline(zone, meta, flags, eidx);
2087 } else {
2088 eidx = zba_scan_bitmap_ref(zone, meta, eidx);
2089 }
2090 zs->zs_alloc_rr = (uint16_t)eidx;
2091 return eidx;
2092 }
2093
2094 /*!
2095 * @function zone_meta_bits_init_inline
2096 *
2097 * @brief
2098 * Initializes the inline zm_bitmap field(s) for a newly assigned chunk.
2099 *
2100 * @param meta The main metadata for the initialized chunk.
2101 * @param count The number of elements the chunk can hold
2102 * (which might be partial for partially populated chunks).
2103 */
2104 static void
zone_meta_bits_init_inline(struct zone_page_metadata * meta,uint32_t count)2105 zone_meta_bits_init_inline(struct zone_page_metadata *meta, uint32_t count)
2106 {
2107 /*
2108 * We're called with the metadata zm_bitmap fields already zeroed out.
2109 */
2110 for (size_t i = 0; i < count / 32; i++) {
2111 meta[i].zm_bitmap = ~0u;
2112 }
2113 if (count % 32) {
2114 meta[count / 32].zm_bitmap = zba_map_mask_lt(uint32_t, count);
2115 }
2116 }
2117
2118 /*!
2119 * @function zone_meta_bits_alloc_init
2120 *
2121 * @brief
2122 * Allocates a zm_bitmap field for a newly assigned chunk.
2123 *
2124 * @param count The number of elements the chunk can hold
2125 * (which might be partial for partially populated chunks).
2126 * @param nbits The maximum nuber of bits that will be used.
2127 * @param with_extra Whether "VM Tracking" metadata needs to be allocated.
2128 */
2129 static uint32_t
zone_meta_bits_alloc_init(uint32_t count,uint32_t nbits,bool with_extra)2130 zone_meta_bits_alloc_init(uint32_t count, uint32_t nbits, bool with_extra)
2131 {
2132 static_assert(ZONE_MAX_ALLOC_SIZE / ZONE_MIN_ELEM_SIZE <=
2133 ZBA_GRANULE_BITS << ZBA_MAX_ORDER, "bitmaps will be large enough");
2134
2135 uint32_t order = flsll((nbits - 1) / ZBA_GRANULE_BITS);
2136 uint64_t *bits;
2137 size_t i = 0;
2138
2139 assert(order <= ZBA_MAX_ALLOC_ORDER);
2140 assert(count <= ZBA_GRANULE_BITS << order);
2141
2142 zba_lock();
2143 bits = (uint64_t *)zba_alloc(order, with_extra);
2144 zba_unlock();
2145
2146 while (i < count / 64) {
2147 bits[i++] = ~0ull;
2148 }
2149 if (count % 64) {
2150 bits[i++] = zba_map_mask_lt(uint64_t, count);
2151 }
2152 while (i < 1u << order) {
2153 bits[i++] = 0;
2154 }
2155
2156 return (uint32_t)(bits - zba_slot_base()) +
2157 (order << ZBA_ORDER_SHIFT) +
2158 (with_extra ? ZBA_HAS_EXTRA_BIT : 0);
2159 }
2160
2161 /*!
2162 * @function zone_meta_bits_merge
2163 *
2164 * @brief
2165 * Adds elements <code>[start, end)</code> to a chunk being extended.
2166 *
2167 * @param meta The main metadata for the extended chunk.
2168 * @param start The index of the first element to add to the chunk.
2169 * @param end The index of the last (exclusive) element to add.
2170 */
2171 static void
zone_meta_bits_merge(struct zone_page_metadata * meta,uint32_t start,uint32_t end)2172 zone_meta_bits_merge(struct zone_page_metadata *meta,
2173 uint32_t start, uint32_t end)
2174 {
2175 if (meta->zm_inline_bitmap) {
2176 while (start < end) {
2177 size_t s_i = start / 32;
2178 size_t s_e = end / 32;
2179
2180 if (s_i == s_e) {
2181 meta[s_i].zm_bitmap |= zba_map_mask_lt(uint32_t, end) &
2182 zba_map_mask_ge(uint32_t, start);
2183 break;
2184 }
2185
2186 meta[s_i].zm_bitmap |= zba_map_mask_ge(uint32_t, start);
2187 start += 32 - (start % 32);
2188 }
2189 } else {
2190 uint64_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2191
2192 while (start < end) {
2193 size_t s_i = start / 64;
2194 size_t s_e = end / 64;
2195
2196 if (s_i == s_e) {
2197 bits[s_i] |= zba_map_mask_lt(uint64_t, end) &
2198 zba_map_mask_ge(uint64_t, start);
2199 break;
2200 }
2201 bits[s_i] |= zba_map_mask_ge(uint64_t, start);
2202 start += 64 - (start % 64);
2203 }
2204 }
2205 }
2206
2207 /*!
2208 * @function zone_bits_free
2209 *
2210 * @brief
2211 * Frees a bitmap to the zone bitmap allocator.
2212 *
2213 * @param bref
2214 * A bitmap reference set by @c zone_meta_bits_init() in a @c zm_bitmap field.
2215 */
2216 static void
zone_bits_free(uint32_t bref)2217 zone_bits_free(uint32_t bref)
2218 {
2219 zba_lock();
2220 zba_free((vm_offset_t)zba_bits_ref_ptr(bref),
2221 zba_bits_ref_order(bref), (bref & ZBA_HAS_EXTRA_BIT));
2222 zba_unlock();
2223 }
2224
2225 /*!
2226 * @function zone_meta_is_free
2227 *
2228 * @brief
2229 * Returns whether a given element appears free.
2230 */
2231 static bool
zone_meta_is_free(struct zone_page_metadata * meta,vm_offset_t eidx)2232 zone_meta_is_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2233 {
2234 if (meta->zm_inline_bitmap) {
2235 uint32_t bit = zba_map_bit(uint32_t, eidx);
2236 return meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit;
2237 } else {
2238 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2239 uint64_t bit = zba_map_bit(uint64_t, eidx);
2240 return bits[zba_map_index(uint64_t, eidx)] & bit;
2241 }
2242 }
2243
2244 /*!
2245 * @function zone_meta_mark_free
2246 *
2247 * @brief
2248 * Marks an element as free and returns whether it was marked as used.
2249 */
2250 static bool
zone_meta_mark_free(struct zone_page_metadata * meta,vm_offset_t eidx)2251 zone_meta_mark_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2252 {
2253 if (meta->zm_inline_bitmap) {
2254 uint32_t bit = zba_map_bit(uint32_t, eidx);
2255 if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2256 return false;
2257 }
2258 meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2259 } else {
2260 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2261 uint64_t bit = zba_map_bit(uint64_t, eidx);
2262 if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2263 return false;
2264 }
2265 bits[zba_map_index(uint64_t, eidx)] ^= bit;
2266 }
2267 return true;
2268 }
2269
2270 #if VM_TAG_SIZECLASSES
2271
2272 __startup_func
2273 void
__zone_site_register(vm_allocation_site_t * site)2274 __zone_site_register(vm_allocation_site_t *site)
2275 {
2276 if (zone_tagging_on) {
2277 vm_tag_alloc(site);
2278 }
2279 }
2280
2281 uint16_t
zone_index_from_tag_index(uint32_t sizeclass_idx)2282 zone_index_from_tag_index(uint32_t sizeclass_idx)
2283 {
2284 return zone_tags_sizeclasses[sizeclass_idx];
2285 }
2286
2287 #endif /* VM_TAG_SIZECLASSES */
2288 #endif /* !ZALLOC_TEST */
2289 /*! @} */
2290 #pragma mark zalloc helpers
2291 #if !ZALLOC_TEST
2292
2293 static inline void *
zstack_tbi_fix(vm_offset_t elem)2294 zstack_tbi_fix(vm_offset_t elem)
2295 {
2296 #if KASAN_TBI
2297 elem = kasan_tbi_fix_address_tag(elem);
2298 #endif
2299 return (void *)elem;
2300 }
2301
2302 static inline vm_offset_t
zstack_tbi_fill(void * addr)2303 zstack_tbi_fill(void *addr)
2304 {
2305 vm_offset_t elem = (vm_offset_t)addr;
2306
2307 #if KASAN_TBI
2308 elem = VM_KERNEL_TBI_FILL(elem);
2309 #endif
2310 return elem;
2311 }
2312
2313 __attribute__((always_inline))
2314 static inline void
zstack_push_no_delta(zstack_t * stack,void * addr)2315 zstack_push_no_delta(zstack_t *stack, void *addr)
2316 {
2317 vm_offset_t elem = zstack_tbi_fill(addr);
2318
2319 *(vm_offset_t *)addr = stack->z_head - elem;
2320 stack->z_head = elem;
2321 }
2322
2323 __attribute__((always_inline))
2324 void
zstack_push(zstack_t * stack,void * addr)2325 zstack_push(zstack_t *stack, void *addr)
2326 {
2327 zstack_push_no_delta(stack, addr);
2328 stack->z_count++;
2329 }
2330
2331 __attribute__((always_inline))
2332 static inline void *
zstack_pop_no_delta(zstack_t * stack)2333 zstack_pop_no_delta(zstack_t *stack)
2334 {
2335 void *addr = zstack_tbi_fix(stack->z_head);
2336
2337 stack->z_head += *(vm_offset_t *)addr;
2338 *(vm_offset_t *)addr = 0;
2339
2340 return addr;
2341 }
2342
2343 __attribute__((always_inline))
2344 void *
zstack_pop(zstack_t * stack)2345 zstack_pop(zstack_t *stack)
2346 {
2347 stack->z_count--;
2348 return zstack_pop_no_delta(stack);
2349 }
2350
2351 static inline void
zone_recirc_lock_nopreempt_check_contention(zone_t zone)2352 zone_recirc_lock_nopreempt_check_contention(zone_t zone)
2353 {
2354 uint32_t ticket;
2355
2356 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_recirc_lock,
2357 &ticket, &zone_locks_grp))) {
2358 return;
2359 }
2360
2361 hw_lck_ticket_wait(&zone->z_recirc_lock, ticket, NULL, &zone_locks_grp);
2362
2363 /*
2364 * If zone caching has been disabled due to memory pressure,
2365 * then recording contention is not useful, give the system
2366 * time to recover.
2367 */
2368 if (__probable(!zone_caching_disabled)) {
2369 zone->z_recirc_cont_cur++;
2370 }
2371 }
2372
2373 static inline void
zone_recirc_lock_nopreempt(zone_t zone)2374 zone_recirc_lock_nopreempt(zone_t zone)
2375 {
2376 hw_lck_ticket_lock_nopreempt(&zone->z_recirc_lock, &zone_locks_grp);
2377 }
2378
2379 static inline void
zone_recirc_unlock_nopreempt(zone_t zone)2380 zone_recirc_unlock_nopreempt(zone_t zone)
2381 {
2382 hw_lck_ticket_unlock_nopreempt(&zone->z_recirc_lock);
2383 }
2384
2385 static inline void
zone_lock_nopreempt_check_contention(zone_t zone)2386 zone_lock_nopreempt_check_contention(zone_t zone)
2387 {
2388 uint32_t ticket;
2389 #if KASAN_FAKESTACK
2390 spl_t s = 0;
2391 if (zone->z_kasan_fakestacks) {
2392 s = splsched();
2393 }
2394 #endif /* KASAN_FAKESTACK */
2395
2396 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_lock, &ticket,
2397 &zone_locks_grp))) {
2398 #if KASAN_FAKESTACK
2399 zone->z_kasan_spl = s;
2400 #endif /* KASAN_FAKESTACK */
2401 return;
2402 }
2403
2404 hw_lck_ticket_wait(&zone->z_lock, ticket, NULL, &zone_locks_grp);
2405 #if KASAN_FAKESTACK
2406 zone->z_kasan_spl = s;
2407 #endif /* KASAN_FAKESTACK */
2408
2409 /*
2410 * If zone caching has been disabled due to memory pressure,
2411 * then recording contention is not useful, give the system
2412 * time to recover.
2413 */
2414 if (__probable(!zone_caching_disabled && !zone->z_pcpu_cache)) {
2415 zone->z_recirc_cont_cur++;
2416 }
2417 }
2418
2419 static inline void
zone_lock_nopreempt(zone_t zone)2420 zone_lock_nopreempt(zone_t zone)
2421 {
2422 #if KASAN_FAKESTACK
2423 spl_t s = 0;
2424 if (zone->z_kasan_fakestacks) {
2425 s = splsched();
2426 }
2427 #endif /* KASAN_FAKESTACK */
2428 hw_lck_ticket_lock_nopreempt(&zone->z_lock, &zone_locks_grp);
2429 #if KASAN_FAKESTACK
2430 zone->z_kasan_spl = s;
2431 #endif /* KASAN_FAKESTACK */
2432 }
2433
2434 static inline void
zone_unlock_nopreempt(zone_t zone)2435 zone_unlock_nopreempt(zone_t zone)
2436 {
2437 #if KASAN_FAKESTACK
2438 spl_t s = zone->z_kasan_spl;
2439 zone->z_kasan_spl = 0;
2440 #endif /* KASAN_FAKESTACK */
2441 hw_lck_ticket_unlock_nopreempt(&zone->z_lock);
2442 #if KASAN_FAKESTACK
2443 if (zone->z_kasan_fakestacks) {
2444 splx(s);
2445 }
2446 #endif /* KASAN_FAKESTACK */
2447 }
2448
2449 static inline void
zone_depot_lock_nopreempt(zone_cache_t zc)2450 zone_depot_lock_nopreempt(zone_cache_t zc)
2451 {
2452 hw_lck_ticket_lock_nopreempt(&zc->zc_depot_lock, &zone_locks_grp);
2453 }
2454
2455 static inline void
zone_depot_unlock_nopreempt(zone_cache_t zc)2456 zone_depot_unlock_nopreempt(zone_cache_t zc)
2457 {
2458 hw_lck_ticket_unlock_nopreempt(&zc->zc_depot_lock);
2459 }
2460
2461 static inline void
zone_depot_lock(zone_cache_t zc)2462 zone_depot_lock(zone_cache_t zc)
2463 {
2464 hw_lck_ticket_lock(&zc->zc_depot_lock, &zone_locks_grp);
2465 }
2466
2467 static inline void
zone_depot_unlock(zone_cache_t zc)2468 zone_depot_unlock(zone_cache_t zc)
2469 {
2470 hw_lck_ticket_unlock(&zc->zc_depot_lock);
2471 }
2472
2473 zone_t
zone_by_id(size_t zid)2474 zone_by_id(size_t zid)
2475 {
2476 return (zone_t)((uintptr_t)zone_array + zid * sizeof(struct zone));
2477 }
2478
2479 static inline bool
zone_supports_vm(zone_t z)2480 zone_supports_vm(zone_t z)
2481 {
2482 /*
2483 * VM_MAP_ENTRY and VM_MAP_HOLES zones are allowed
2484 * to overcommit because they're used to reclaim memory
2485 * (VM support).
2486 */
2487 return z >= &zone_array[ZONE_ID_VM_MAP_ENTRY] &&
2488 z <= &zone_array[ZONE_ID_VM_MAP_HOLES];
2489 }
2490
2491 const char *
zone_name(zone_t z)2492 zone_name(zone_t z)
2493 {
2494 return z->z_name;
2495 }
2496
2497 const char *
zone_heap_name(zone_t z)2498 zone_heap_name(zone_t z)
2499 {
2500 zone_security_flags_t zsflags = zone_security_config(z);
2501 if (__probable(zsflags.z_kheap_id < KHEAP_ID_COUNT)) {
2502 return kalloc_heap_names[zsflags.z_kheap_id];
2503 }
2504 return "invalid";
2505 }
2506
2507 static uint32_t
zone_alloc_pages_for_nelems(zone_t z,vm_size_t max_elems)2508 zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
2509 {
2510 vm_size_t elem_count, chunks;
2511
2512 elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) /
2513 zone_elem_outer_size(z);
2514 chunks = (max_elems + elem_count - 1) / elem_count;
2515
2516 return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
2517 }
2518
2519 static inline vm_size_t
zone_submaps_approx_size(void)2520 zone_submaps_approx_size(void)
2521 {
2522 vm_size_t size = 0;
2523
2524 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
2525 if (zone_submaps[idx] != VM_MAP_NULL) {
2526 size += zone_submaps[idx]->size;
2527 }
2528 }
2529
2530 return size;
2531 }
2532
2533 static inline void
zone_depot_init(struct zone_depot * zd)2534 zone_depot_init(struct zone_depot *zd)
2535 {
2536 *zd = (struct zone_depot){
2537 .zd_tail = &zd->zd_head,
2538 };
2539 }
2540
2541 static inline void
zone_depot_insert_head_full(struct zone_depot * zd,zone_magazine_t mag)2542 zone_depot_insert_head_full(struct zone_depot *zd, zone_magazine_t mag)
2543 {
2544 if (zd->zd_full++ == 0) {
2545 zd->zd_tail = &mag->zm_next;
2546 }
2547 mag->zm_next = zd->zd_head;
2548 zd->zd_head = mag;
2549 }
2550
2551 static inline void
zone_depot_insert_tail_full(struct zone_depot * zd,zone_magazine_t mag)2552 zone_depot_insert_tail_full(struct zone_depot *zd, zone_magazine_t mag)
2553 {
2554 zd->zd_full++;
2555 mag->zm_next = *zd->zd_tail;
2556 *zd->zd_tail = mag;
2557 zd->zd_tail = &mag->zm_next;
2558 }
2559
2560 static inline void
zone_depot_insert_head_empty(struct zone_depot * zd,zone_magazine_t mag)2561 zone_depot_insert_head_empty(struct zone_depot *zd, zone_magazine_t mag)
2562 {
2563 zd->zd_empty++;
2564 mag->zm_next = *zd->zd_tail;
2565 *zd->zd_tail = mag;
2566 }
2567
2568 static inline zone_magazine_t
zone_depot_pop_head_full(struct zone_depot * zd,zone_t z)2569 zone_depot_pop_head_full(struct zone_depot *zd, zone_t z)
2570 {
2571 zone_magazine_t mag = zd->zd_head;
2572
2573 assert(zd->zd_full);
2574
2575 zd->zd_full--;
2576 if (z && z->z_recirc_full_min > zd->zd_full) {
2577 z->z_recirc_full_min = zd->zd_full;
2578 }
2579 zd->zd_head = mag->zm_next;
2580 if (zd->zd_full == 0) {
2581 zd->zd_tail = &zd->zd_head;
2582 }
2583
2584 mag->zm_next = NULL;
2585 return mag;
2586 }
2587
2588 static inline zone_magazine_t
zone_depot_pop_head_empty(struct zone_depot * zd,zone_t z)2589 zone_depot_pop_head_empty(struct zone_depot *zd, zone_t z)
2590 {
2591 zone_magazine_t mag = *zd->zd_tail;
2592
2593 assert(zd->zd_empty);
2594
2595 zd->zd_empty--;
2596 if (z && z->z_recirc_empty_min > zd->zd_empty) {
2597 z->z_recirc_empty_min = zd->zd_empty;
2598 }
2599 *zd->zd_tail = mag->zm_next;
2600
2601 mag->zm_next = NULL;
2602 return mag;
2603 }
2604
2605 static inline smr_seq_t
zone_depot_move_full(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2606 zone_depot_move_full(
2607 struct zone_depot *dst,
2608 struct zone_depot *src,
2609 uint32_t n,
2610 zone_t z)
2611 {
2612 zone_magazine_t head, last;
2613
2614 assert(n);
2615 assert(src->zd_full >= n);
2616
2617 src->zd_full -= n;
2618 if (z && z->z_recirc_full_min > src->zd_full) {
2619 z->z_recirc_full_min = src->zd_full;
2620 }
2621 head = last = src->zd_head;
2622 for (uint32_t i = n; i-- > 1;) {
2623 last = last->zm_next;
2624 }
2625
2626 src->zd_head = last->zm_next;
2627 if (src->zd_full == 0) {
2628 src->zd_tail = &src->zd_head;
2629 }
2630
2631 if (z && zone_security_array[zone_index(z)].z_lifo) {
2632 if (dst->zd_full == 0) {
2633 dst->zd_tail = &last->zm_next;
2634 }
2635 last->zm_next = dst->zd_head;
2636 dst->zd_head = head;
2637 } else {
2638 last->zm_next = *dst->zd_tail;
2639 *dst->zd_tail = head;
2640 dst->zd_tail = &last->zm_next;
2641 }
2642 dst->zd_full += n;
2643
2644 return last->zm_seq;
2645 }
2646
2647 static inline void
zone_depot_move_empty(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2648 zone_depot_move_empty(
2649 struct zone_depot *dst,
2650 struct zone_depot *src,
2651 uint32_t n,
2652 zone_t z)
2653 {
2654 zone_magazine_t head, last;
2655
2656 assert(n);
2657 assert(src->zd_empty >= n);
2658
2659 src->zd_empty -= n;
2660 if (z && z->z_recirc_empty_min > src->zd_empty) {
2661 z->z_recirc_empty_min = src->zd_empty;
2662 }
2663 head = last = *src->zd_tail;
2664 for (uint32_t i = n; i-- > 1;) {
2665 last = last->zm_next;
2666 }
2667
2668 *src->zd_tail = last->zm_next;
2669
2670 dst->zd_empty += n;
2671 last->zm_next = *dst->zd_tail;
2672 *dst->zd_tail = head;
2673 }
2674
2675 static inline bool
zone_depot_poll(struct zone_depot * depot,smr_t smr)2676 zone_depot_poll(struct zone_depot *depot, smr_t smr)
2677 {
2678 if (depot->zd_full == 0) {
2679 return false;
2680 }
2681
2682 return smr == NULL || smr_poll(smr, depot->zd_head->zm_seq);
2683 }
2684
2685 static void
zone_cache_swap_magazines(zone_cache_t cache)2686 zone_cache_swap_magazines(zone_cache_t cache)
2687 {
2688 uint16_t count_a = cache->zc_alloc_cur;
2689 uint16_t count_f = cache->zc_free_cur;
2690 vm_offset_t *elems_a = cache->zc_alloc_elems;
2691 vm_offset_t *elems_f = cache->zc_free_elems;
2692
2693 z_debug_assert(count_a <= zc_mag_size());
2694 z_debug_assert(count_f <= zc_mag_size());
2695
2696 cache->zc_alloc_cur = count_f;
2697 cache->zc_free_cur = count_a;
2698 cache->zc_alloc_elems = elems_f;
2699 cache->zc_free_elems = elems_a;
2700 }
2701
2702 __pure2
2703 static smr_t
zone_cache_smr(zone_cache_t cache)2704 zone_cache_smr(zone_cache_t cache)
2705 {
2706 return cache->zc_smr;
2707 }
2708
2709 /*!
2710 * @function zone_magazine_replace
2711 *
2712 * @brief
2713 * Unlod a magazine and load a new one instead.
2714 */
2715 static zone_magazine_t
zone_magazine_replace(zone_cache_t zc,zone_magazine_t mag,bool empty)2716 zone_magazine_replace(zone_cache_t zc, zone_magazine_t mag, bool empty)
2717 {
2718 zone_magazine_t old;
2719 vm_offset_t **elems;
2720
2721 mag->zm_seq = SMR_SEQ_INVALID;
2722
2723 if (empty) {
2724 elems = &zc->zc_free_elems;
2725 zc->zc_free_cur = 0;
2726 } else {
2727 elems = &zc->zc_alloc_elems;
2728 zc->zc_alloc_cur = zc_mag_size();
2729 }
2730 old = (zone_magazine_t)((uintptr_t)*elems -
2731 offsetof(struct zone_magazine, zm_elems));
2732 *elems = mag->zm_elems;
2733
2734 return old;
2735 }
2736
2737 static zone_magazine_t
zone_magazine_alloc(zalloc_flags_t flags)2738 zone_magazine_alloc(zalloc_flags_t flags)
2739 {
2740 return zalloc_flags(zc_magazine_zone, flags | Z_ZERO);
2741 }
2742
2743 static void
zone_magazine_free(zone_magazine_t mag)2744 zone_magazine_free(zone_magazine_t mag)
2745 {
2746 (zfree)(zc_magazine_zone, mag);
2747 }
2748
2749 static void
zone_magazine_free_list(struct zone_depot * zd)2750 zone_magazine_free_list(struct zone_depot *zd)
2751 {
2752 zone_magazine_t tmp, mag = *zd->zd_tail;
2753
2754 while (mag) {
2755 tmp = mag->zm_next;
2756 zone_magazine_free(mag);
2757 mag = tmp;
2758 }
2759
2760 *zd->zd_tail = NULL;
2761 zd->zd_empty = 0;
2762 }
2763
2764 void
zone_enable_caching(zone_t zone)2765 zone_enable_caching(zone_t zone)
2766 {
2767 size_t size_per_mag = zone_elem_inner_size(zone) * zc_mag_size();
2768 zone_cache_t caches;
2769 size_t depot_limit;
2770
2771 depot_limit = zc_pcpu_max() / size_per_mag;
2772 zone->z_depot_limit = (uint16_t)MIN(depot_limit, INT16_MAX);
2773
2774 caches = zalloc_percpu_permanent_type(struct zone_cache);
2775 zpercpu_foreach(zc, caches) {
2776 zc->zc_alloc_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2777 zc->zc_free_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2778 zone_depot_init(&zc->zc_depot);
2779 hw_lck_ticket_init(&zc->zc_depot_lock, &zone_locks_grp);
2780 }
2781
2782 zone_lock(zone);
2783 assert(zone->z_pcpu_cache == NULL);
2784 zone->z_pcpu_cache = caches;
2785 zone->z_recirc_cont_cur = 0;
2786 zone->z_recirc_cont_wma = 0;
2787 zone->z_elems_free_min = 0; /* becomes z_recirc_empty_min */
2788 zone->z_elems_free_wma = 0; /* becomes z_recirc_empty_wma */
2789 zone_unlock(zone);
2790 }
2791
2792 bool
zone_maps_owned(vm_address_t addr,vm_size_t size)2793 zone_maps_owned(vm_address_t addr, vm_size_t size)
2794 {
2795 return from_zone_map(addr, size);
2796 }
2797
2798 #if KASAN_LIGHT
2799 bool
kasan_zone_maps_owned(vm_address_t addr,vm_size_t size)2800 kasan_zone_maps_owned(vm_address_t addr, vm_size_t size)
2801 {
2802 return from_zone_map(addr, size) ||
2803 mach_vm_range_size(&zone_info.zi_map_range) == 0;
2804 }
2805 #endif /* KASAN_LIGHT */
2806
2807 void
zone_map_sizes(vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)2808 zone_map_sizes(
2809 vm_map_size_t *psize,
2810 vm_map_size_t *pfree,
2811 vm_map_size_t *plargest_free)
2812 {
2813 vm_map_size_t size, free, largest;
2814
2815 vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
2816
2817 for (uint32_t i = 1; i < Z_SUBMAP_IDX_COUNT; i++) {
2818 vm_map_sizes(zone_submaps[i], &size, &free, &largest);
2819 *psize += size;
2820 *pfree += free;
2821 *plargest_free = MAX(*plargest_free, largest);
2822 }
2823 }
2824
2825 __attribute__((always_inline))
2826 vm_map_t
zone_submap(zone_security_flags_t zsflags)2827 zone_submap(zone_security_flags_t zsflags)
2828 {
2829 return zone_submaps[zsflags.z_submap_idx];
2830 }
2831
2832 unsigned
zpercpu_count(void)2833 zpercpu_count(void)
2834 {
2835 return zpercpu_early_count;
2836 }
2837
2838 #if ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC
2839 /*
2840 * Returns a random number of a given bit-width.
2841 *
2842 * DO NOT COPY THIS CODE OUTSIDE OF ZALLOC
2843 *
2844 * This uses Intel's rdrand because random() uses FP registers
2845 * which causes FP faults and allocations which isn't something
2846 * we can do from zalloc itself due to reentrancy problems.
2847 *
2848 * For pre-rdrand machines (which we no longer support),
2849 * we use a bad biased random generator that doesn't use FP.
2850 * Such HW is no longer supported, but VM of newer OSes on older
2851 * bare metal is made to limp along (with reduced security) this way.
2852 */
2853 static uint64_t
zalloc_random_mask64(uint32_t bits)2854 zalloc_random_mask64(uint32_t bits)
2855 {
2856 uint64_t mask = ~0ull >> (64 - bits);
2857 uint64_t v;
2858
2859 #if __x86_64__
2860 if (__probable(cpuid_features() & CPUID_FEATURE_RDRAND)) {
2861 asm volatile ("1: rdrand %0; jnc 1b\n" : "=r" (v) :: "cc");
2862 v &= mask;
2863 } else {
2864 disable_preemption();
2865 int cpu = cpu_number();
2866 v = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
2867 zone_bool_gen[cpu].zbg_entropy,
2868 ZONE_ENTROPY_CNT, bits);
2869 enable_preemption();
2870 }
2871 #else
2872 v = early_random() & mask;
2873 #endif
2874
2875 return v;
2876 }
2877
2878 /*
2879 * Returns a random number within [bound_min, bound_max)
2880 *
2881 * This isn't _exactly_ uniform, but the skew is small enough
2882 * not to matter for the consumers of this interface.
2883 *
2884 * Values within [bound_min, 2^64 % (bound_max - bound_min))
2885 * will be returned (bound_max - bound_min) / 2^64 more often
2886 * than values within [2^64 % (bound_max - bound_min), bound_max).
2887 */
2888 static uint32_t
zalloc_random_uniform32(uint32_t bound_min,uint32_t bound_max)2889 zalloc_random_uniform32(uint32_t bound_min, uint32_t bound_max)
2890 {
2891 uint64_t delta = bound_max - bound_min;
2892
2893 return bound_min + (uint32_t)(zalloc_random_mask64(64) % delta);
2894 }
2895
2896 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC */
2897 #if ZALLOC_ENABLE_LOGGING || CONFIG_PROB_GZALLOC
2898 /*
2899 * Track all kalloc zones of specified size for zlog name
2900 * kalloc.type.<size> or kalloc.type.var.<size> or kalloc.<size>
2901 *
2902 * Additionally track all shared kalloc zones with shared.kalloc
2903 */
2904 static bool
track_kalloc_zones(zone_t z,const char * logname)2905 track_kalloc_zones(zone_t z, const char *logname)
2906 {
2907 const char *prefix;
2908 size_t len;
2909 zone_security_flags_t zsflags = zone_security_config(z);
2910
2911 prefix = "kalloc.type.var.";
2912 len = strlen(prefix);
2913 if (zsflags.z_kalloc_type && zsflags.z_kheap_id == KHEAP_ID_KT_VAR &&
2914 strncmp(logname, prefix, len) == 0) {
2915 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2916
2917 return zone_elem_inner_size(z) == sizeclass;
2918 }
2919
2920 prefix = "kalloc.type.";
2921 len = strlen(prefix);
2922 if (zsflags.z_kalloc_type && zsflags.z_kheap_id != KHEAP_ID_KT_VAR &&
2923 strncmp(logname, prefix, len) == 0) {
2924 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2925
2926 return zone_elem_inner_size(z) == sizeclass;
2927 }
2928
2929 prefix = "kalloc.";
2930 len = strlen(prefix);
2931 if ((zsflags.z_kheap_id || zsflags.z_kalloc_type) &&
2932 strncmp(logname, prefix, len) == 0) {
2933 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2934
2935 return zone_elem_inner_size(z) == sizeclass;
2936 }
2937
2938 prefix = "shared.kalloc";
2939 if ((zsflags.z_kheap_id == KHEAP_ID_SHARED) &&
2940 (strcmp(logname, prefix) == 0)) {
2941 return true;
2942 }
2943
2944 return false;
2945 }
2946 #endif
2947
2948 int
track_this_zone(const char * zonename,const char * logname)2949 track_this_zone(const char *zonename, const char *logname)
2950 {
2951 unsigned int len;
2952 const char *zc = zonename;
2953 const char *lc = logname;
2954
2955 /*
2956 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
2957 */
2958
2959 for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
2960 /*
2961 * If the current characters don't match, check for a space in
2962 * in the zone name and a corresponding period in the log name.
2963 * If that's not there, then the strings don't match.
2964 */
2965
2966 if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
2967 break;
2968 }
2969
2970 /*
2971 * The strings are equal so far. If we're at the end, then it's a match.
2972 */
2973
2974 if (*zc == '\0') {
2975 return TRUE;
2976 }
2977 }
2978
2979 return FALSE;
2980 }
2981
2982 #if DEBUG || DEVELOPMENT
2983
2984 vm_size_t
zone_element_info(void * addr,vm_tag_t * ptag)2985 zone_element_info(void *addr, vm_tag_t * ptag)
2986 {
2987 vm_size_t size = 0;
2988 vm_tag_t tag = VM_KERN_MEMORY_NONE;
2989 struct zone *src_zone;
2990
2991 if (from_zone_map(addr, sizeof(void *))) {
2992 src_zone = zone_by_id(zone_index_from_ptr(addr));
2993 size = zone_elem_inner_size(src_zone);
2994 #if VM_TAG_SIZECLASSES
2995 if (__improbable(src_zone->z_uses_tags)) {
2996 struct zone_page_metadata *meta;
2997 vm_offset_t eidx;
2998 vm_tag_t *slot;
2999
3000 meta = zone_element_resolve(src_zone,
3001 (vm_offset_t)addr, &eidx);
3002 slot = zba_extra_ref_ptr(meta->zm_bitmap, eidx);
3003 tag = *slot;
3004 }
3005 #endif /* VM_TAG_SIZECLASSES */
3006 }
3007
3008 *ptag = tag;
3009 return size;
3010 }
3011
3012 #endif /* DEBUG || DEVELOPMENT */
3013 #if KASAN_CLASSIC
3014
3015 vm_size_t
kasan_quarantine_resolve(vm_address_t addr,zone_t * zonep)3016 kasan_quarantine_resolve(vm_address_t addr, zone_t *zonep)
3017 {
3018 zone_t zone = zone_by_id(zone_index_from_ptr((void *)addr));
3019
3020 *zonep = zone;
3021 return zone_elem_inner_size(zone);
3022 }
3023
3024 #endif /* KASAN_CLASSIC */
3025 #endif /* !ZALLOC_TEST */
3026 #pragma mark Zone zeroing and early random
3027 #if !ZALLOC_TEST
3028
3029 /*
3030 * Zone zeroing
3031 *
3032 * All allocations from zones are zeroed on free and are additionally
3033 * check that they are still zero on alloc. The check is
3034 * always on, on embedded devices. Perf regression was detected
3035 * on intel as we cant use the vectorized implementation of
3036 * memcmp_zero_ptr_aligned due to cyclic dependenices between
3037 * initization and allocation. Therefore we perform the check
3038 * on 20% of the allocations.
3039 */
3040 #if ZALLOC_ENABLE_ZERO_CHECK
3041 #if defined(__x86_64__)
3042 /*
3043 * Peform zero validation on every 5th allocation
3044 */
3045 static TUNABLE(uint32_t, zzc_rate, "zzc_rate", 5);
3046 static uint32_t PERCPU_DATA(zzc_decrementer);
3047 #endif /* defined(__x86_64__) */
3048
3049 /*
3050 * Determine if zero validation for allocation should be skipped
3051 */
3052 static bool
zalloc_skip_zero_check(void)3053 zalloc_skip_zero_check(void)
3054 {
3055 #if defined(__x86_64__)
3056 uint32_t *counterp, cnt;
3057
3058 counterp = PERCPU_GET(zzc_decrementer);
3059 cnt = *counterp;
3060 if (__probable(cnt > 0)) {
3061 *counterp = cnt - 1;
3062 return true;
3063 }
3064 *counterp = zzc_rate - 1;
3065 #endif /* !defined(__x86_64__) */
3066 return false;
3067 }
3068
3069 __abortlike
3070 static void
zalloc_uaf_panic(zone_t z,uintptr_t elem,size_t size)3071 zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size)
3072 {
3073 uint32_t esize = (uint32_t)zone_elem_inner_size(z);
3074 uint32_t first_offs = ~0u;
3075 uintptr_t first_bits = 0, v;
3076 char buf[1024];
3077 int pos = 0;
3078
3079 buf[0] = '\0';
3080
3081 for (uint32_t o = 0; o < size; o += sizeof(v)) {
3082 if ((v = *(uintptr_t *)(elem + o)) == 0) {
3083 continue;
3084 }
3085 pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
3086 "%5d: 0x%016lx", o, v);
3087 if (first_offs > o) {
3088 first_offs = o;
3089 first_bits = v;
3090 }
3091 }
3092
3093 (panic)("[%s%s]: element modified after free "
3094 "(off:%d, val:0x%016lx, sz:%d, ptr:%p)%s",
3095 zone_heap_name(z), zone_name(z),
3096 first_offs, first_bits, esize, (void *)elem, buf);
3097 }
3098
3099 static void
zalloc_validate_element(zone_t zone,vm_offset_t elem,vm_size_t size,zalloc_flags_t flags)3100 zalloc_validate_element(
3101 zone_t zone,
3102 vm_offset_t elem,
3103 vm_size_t size,
3104 zalloc_flags_t flags)
3105 {
3106 if (flags & Z_NOZZC) {
3107 return;
3108 }
3109 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3110 zalloc_uaf_panic(zone, elem, size);
3111 }
3112 if (flags & Z_PCPU) {
3113 for (size_t i = zpercpu_count(); --i > 0;) {
3114 elem += PAGE_SIZE;
3115 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3116 zalloc_uaf_panic(zone, elem, size);
3117 }
3118 }
3119 }
3120 }
3121
3122 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
3123
3124 __attribute__((noinline))
3125 static void
zone_early_scramble_rr(zone_t zone,int cpu,zone_stats_t zs)3126 zone_early_scramble_rr(zone_t zone, int cpu, zone_stats_t zs)
3127 {
3128 #if KASAN_FAKESTACK
3129 /*
3130 * This can cause re-entrancy with kasan fakestacks
3131 */
3132 #pragma unused(zone, cpu, zs)
3133 #else
3134 uint32_t bits;
3135
3136 bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3137 zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
3138
3139 zs->zs_alloc_rr += bits;
3140 zs->zs_alloc_rr %= zone->z_chunk_elems;
3141 #endif
3142 }
3143
3144 #endif /* !ZALLOC_TEST */
3145 #pragma mark Zone Leak Detection
3146 #if !ZALLOC_TEST
3147 #if ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS
3148
3149 /*
3150 * Zone leak debugging code
3151 *
3152 * When enabled, this code keeps a log to track allocations to a particular
3153 * zone that have not yet been freed.
3154 *
3155 * Examining this log will reveal the source of a zone leak.
3156 *
3157 * The log is allocated only when logging is enabled (it is off by default),
3158 * so there is no effect on the system when it's turned off.
3159 *
3160 * Zone logging is enabled with the `zlog<n>=<zone>` boot-arg for each
3161 * zone name to log, with n starting at 1.
3162 *
3163 * Leaks debugging utilizes 2 tunables:
3164 * - zlsize (in kB) which describes how much "size" the record covers
3165 * (zones with smaller elements get more records, default is 4M).
3166 *
3167 * - zlfreq (in kB) which describes a sample rate in cumulative allocation
3168 * size at which automatic leak detection will sample allocations.
3169 * (default is 16k)
3170 *
3171 *
3172 * Zone corruption logging
3173 *
3174 * Logging can also be used to help identify the source of a zone corruption.
3175 *
3176 * First, identify the zone that is being corrupted,
3177 * then add "-zc zlog<n>=<zone name>" to the boot-args.
3178 *
3179 * When -zc is used in conjunction with zlog,
3180 * it changes the logging style to track both allocations and frees to the zone.
3181 *
3182 * When the corruption is detected, examining the log will show you the stack
3183 * traces of the callers who last allocated and freed any particular element in
3184 * the zone.
3185 *
3186 * Corruption debugging logs will have zrecs records
3187 * (tuned by the zrecs= boot-arg, 16k elements per G of RAM by default).
3188 */
3189
3190 #define ZRECORDS_MAX (256u << 10)
3191 #define ZRECORDS_DEFAULT (16u << 10)
3192 static TUNABLE(uint32_t, zrecs, "zrecs", 0);
3193 static TUNABLE(uint32_t, zlsize, "zlsize", 4 * 1024);
3194 static TUNABLE(uint32_t, zlfreq, "zlfreq", 16);
3195
3196 __startup_func
3197 static void
zone_leaks_init_zrecs(void)3198 zone_leaks_init_zrecs(void)
3199 {
3200 /*
3201 * Don't allow more than ZRECORDS_MAX records,
3202 * even if the user asked for more.
3203 *
3204 * This prevents accidentally hogging too much kernel memory
3205 * and making the system unusable.
3206 */
3207 if (zrecs == 0) {
3208 zrecs = ZRECORDS_DEFAULT *
3209 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3210 }
3211 if (zrecs > ZRECORDS_MAX) {
3212 zrecs = ZRECORDS_MAX;
3213 }
3214 }
3215 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_leaks_init_zrecs);
3216
3217 static uint32_t
zone_leaks_record_count(zone_t z)3218 zone_leaks_record_count(zone_t z)
3219 {
3220 uint32_t recs = (zlsize << 10) / zone_elem_inner_size(z);
3221
3222 return MIN(MAX(recs, ZRECORDS_DEFAULT), ZRECORDS_MAX);
3223 }
3224
3225 static uint32_t
zone_leaks_sample_rate(zone_t z)3226 zone_leaks_sample_rate(zone_t z)
3227 {
3228 return (zlfreq << 10) / zone_elem_inner_size(z);
3229 }
3230
3231 #if ZALLOC_ENABLE_LOGGING
3232 /* Log allocations and frees to help debug a zone element corruption */
3233 static TUNABLE(bool, corruption_debug_flag, "-zc", false);
3234
3235 /*
3236 * A maximum of 10 zlog<n> boot args can be provided (zlog1 -> zlog10)
3237 */
3238 #define MAX_ZONES_LOG_REQUESTS 10
3239
3240 /**
3241 * @function zone_setup_logging
3242 *
3243 * @abstract
3244 * Optionally sets up a zone for logging.
3245 *
3246 * @discussion
3247 * We recognized two boot-args:
3248 *
3249 * zlog=<zone_to_log>
3250 * zrecs=<num_records_in_log>
3251 * zlsize=<memory to cover for leaks>
3252 *
3253 * The zlog arg is used to specify the zone name that should be logged,
3254 * and zrecs/zlsize is used to control the size of the log.
3255 */
3256 static void
zone_setup_logging(zone_t z)3257 zone_setup_logging(zone_t z)
3258 {
3259 char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
3260 char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
3261 char zlog_val[MAX_ZONE_NAME]; /* the zone name we're logging, if any */
3262 bool logging_on = false;
3263
3264 /*
3265 * Append kalloc heap name to zone name (if zone is used by kalloc)
3266 */
3267 snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
3268
3269 /* zlog0 isn't allowed. */
3270 for (int i = 1; i <= MAX_ZONES_LOG_REQUESTS; i++) {
3271 snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
3272
3273 if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val))) {
3274 if (track_this_zone(zone_name, zlog_val) ||
3275 track_kalloc_zones(z, zlog_val)) {
3276 logging_on = true;
3277 break;
3278 }
3279 }
3280 }
3281
3282 /*
3283 * Backwards compat. with the old boot-arg used to specify single zone
3284 * logging i.e. zlog Needs to happen after the newer zlogn checks
3285 * because the prefix will match all the zlogn
3286 * boot-args.
3287 */
3288 if (!logging_on &&
3289 PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val))) {
3290 if (track_this_zone(zone_name, zlog_val) ||
3291 track_kalloc_zones(z, zlog_val)) {
3292 logging_on = true;
3293 }
3294 }
3295
3296 /*
3297 * If we want to log a zone, see if we need to allocate buffer space for
3298 * the log.
3299 *
3300 * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
3301 * we have to defer allocation in that case.
3302 *
3303 * zone_init() will finish the job.
3304 *
3305 * If we want to log one of the VM related zones that's set up early on,
3306 * we will skip allocation of the log until zinit is called again later
3307 * on some other zone.
3308 */
3309 if (logging_on) {
3310 if (corruption_debug_flag) {
3311 z->z_btlog = btlog_create(BTLOG_LOG, zrecs, 0);
3312 } else {
3313 z->z_btlog = btlog_create(BTLOG_HASH,
3314 zone_leaks_record_count(z), 0);
3315 }
3316 if (z->z_btlog) {
3317 z->z_log_on = true;
3318 printf("zone[%s%s]: logging enabled\n",
3319 zone_heap_name(z), z->z_name);
3320 } else {
3321 printf("zone[%s%s]: failed to enable logging\n",
3322 zone_heap_name(z), z->z_name);
3323 }
3324 }
3325 }
3326
3327 #endif /* ZALLOC_ENABLE_LOGGING */
3328 #if KASAN_TBI
3329 static TUNABLE(uint32_t, kasan_zrecs, "kasan_zrecs", 0);
3330
3331 __startup_func
3332 static void
kasan_tbi_init_zrecs(void)3333 kasan_tbi_init_zrecs(void)
3334 {
3335 /*
3336 * Don't allow more than ZRECORDS_MAX records,
3337 * even if the user asked for more.
3338 *
3339 * This prevents accidentally hogging too much kernel memory
3340 * and making the system unusable.
3341 */
3342 if (kasan_zrecs == 0) {
3343 kasan_zrecs = ZRECORDS_DEFAULT *
3344 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3345 }
3346 if (kasan_zrecs > ZRECORDS_MAX) {
3347 kasan_zrecs = ZRECORDS_MAX;
3348 }
3349 }
3350 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, kasan_tbi_init_zrecs);
3351
3352 static void
zone_setup_kasan_logging(zone_t z)3353 zone_setup_kasan_logging(zone_t z)
3354 {
3355 if (!z->z_tbi_tag) {
3356 printf("zone[%s%s]: kasan logging disabled for this zone\n",
3357 zone_heap_name(z), z->z_name);
3358 return;
3359 }
3360
3361 z->z_log_on = true;
3362 z->z_btlog = btlog_create(BTLOG_LOG, kasan_zrecs, 0);
3363 if (!z->z_btlog) {
3364 printf("zone[%s%s]: failed to enable kasan logging\n",
3365 zone_heap_name(z), z->z_name);
3366 }
3367 }
3368
3369 #endif /* KASAN_TBI */
3370 #if CONFIG_ZLEAKS
3371
3372 static thread_call_data_t zone_leaks_callout;
3373
3374 /*
3375 * The zone leak detector, abbreviated 'zleak', keeps track
3376 * of a subset of the currently outstanding allocations
3377 * made by the zone allocator.
3378 *
3379 * It will engage itself automatically if the zone map usage
3380 * goes above zleak_pages_global_wired_threshold pages.
3381 *
3382 * When that threshold is reached, zones who use more than
3383 * zleak_pages_per_zone_wired_threshold pages will get
3384 * a BTLOG_HASH btlog with sampling to minimize perf impact,
3385 * yet receive statistical data about the backtrace that is
3386 * the most likely to cause the leak.
3387 *
3388 * If the zone goes under the threshold enough, then the log
3389 * is disabled and backtraces freed. Data can be collected
3390 * from userspace with the zlog(1) command.
3391 */
3392
3393 /* whether the zleaks subsystem thinks the map is under pressure */
3394 uint32_t zleak_active;
3395 SECURITY_READ_ONLY_LATE(vm_size_t) zleak_max_zonemap_size;
3396
3397 /* Size of zone map at which to start collecting data */
3398 static size_t zleak_pages_global_wired_threshold = ~0;
3399 vm_size_t zleak_global_tracking_threshold = ~0;
3400
3401 /* Size a zone will have before we will collect data on it */
3402 static size_t zleak_pages_per_zone_wired_threshold = ~0;
3403 vm_size_t zleak_per_zone_tracking_threshold = ~0;
3404
3405 static inline bool
zleak_should_enable_for_zone(zone_t z)3406 zleak_should_enable_for_zone(zone_t z)
3407 {
3408 if (z->z_log_on) {
3409 return false;
3410 }
3411 if (z->z_btlog) {
3412 return false;
3413 }
3414 if (!zleak_active) {
3415 return false;
3416 }
3417 return z->z_wired_cur >= zleak_pages_per_zone_wired_threshold;
3418 }
3419
3420 static inline bool
zleak_should_disable_for_zone(zone_t z)3421 zleak_should_disable_for_zone(zone_t z)
3422 {
3423 if (z->z_log_on) {
3424 return false;
3425 }
3426 if (!z->z_btlog) {
3427 return false;
3428 }
3429 if (!zleak_active) {
3430 return true;
3431 }
3432 return z->z_wired_cur < zleak_pages_per_zone_wired_threshold / 2;
3433 }
3434
3435 static inline bool
zleak_should_activate(size_t pages)3436 zleak_should_activate(size_t pages)
3437 {
3438 return !zleak_active && pages >= zleak_pages_global_wired_threshold;
3439 }
3440
3441 static inline bool
zleak_should_deactivate(size_t pages)3442 zleak_should_deactivate(size_t pages)
3443 {
3444 return zleak_active && pages < zleak_pages_global_wired_threshold / 2;
3445 }
3446
3447 static void
zleaks_enable_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)3448 zleaks_enable_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
3449 {
3450 size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
3451 btlog_t log;
3452
3453 if (zleak_should_activate(pages)) {
3454 zleak_active = 1;
3455 } else if (zleak_should_deactivate(pages)) {
3456 zleak_active = 0;
3457 }
3458
3459 zone_foreach(z) {
3460 if (zleak_should_disable_for_zone(z)) {
3461 log = z->z_btlog;
3462 z->z_btlog = NULL;
3463 assert(z->z_btlog_disabled == NULL);
3464 btlog_disable(log);
3465 z->z_btlog_disabled = log;
3466 }
3467
3468 if (zleak_should_enable_for_zone(z)) {
3469 log = z->z_btlog_disabled;
3470 if (log == NULL) {
3471 log = btlog_create(BTLOG_HASH,
3472 zone_leaks_record_count(z),
3473 zone_leaks_sample_rate(z));
3474 } else if (btlog_enable(log) == KERN_SUCCESS) {
3475 z->z_btlog_disabled = NULL;
3476 } else {
3477 log = NULL;
3478 }
3479 os_atomic_store(&z->z_btlog, log, release);
3480 }
3481 }
3482 }
3483
3484 __startup_func
3485 static void
zleak_init(void)3486 zleak_init(void)
3487 {
3488 zleak_max_zonemap_size = ptoa(zone_pages_wired_max);
3489
3490 zleak_update_threshold(&zleak_global_tracking_threshold,
3491 zleak_max_zonemap_size / 2);
3492 zleak_update_threshold(&zleak_per_zone_tracking_threshold,
3493 zleak_global_tracking_threshold / 8);
3494
3495 thread_call_setup_with_options(&zone_leaks_callout,
3496 zleaks_enable_async, NULL, THREAD_CALL_PRIORITY_USER,
3497 THREAD_CALL_OPTIONS_ONCE);
3498 }
3499 STARTUP(ZALLOC, STARTUP_RANK_SECOND, zleak_init);
3500
3501 kern_return_t
zleak_update_threshold(vm_size_t * arg,uint64_t value)3502 zleak_update_threshold(vm_size_t *arg, uint64_t value)
3503 {
3504 if (value >= zleak_max_zonemap_size) {
3505 return KERN_INVALID_VALUE;
3506 }
3507
3508 if (arg == &zleak_global_tracking_threshold) {
3509 zleak_global_tracking_threshold = (vm_size_t)value;
3510 zleak_pages_global_wired_threshold = atop(value);
3511 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3512 thread_call_enter(&zone_leaks_callout);
3513 }
3514 return KERN_SUCCESS;
3515 }
3516
3517 if (arg == &zleak_per_zone_tracking_threshold) {
3518 zleak_per_zone_tracking_threshold = (vm_size_t)value;
3519 zleak_pages_per_zone_wired_threshold = atop(value);
3520 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3521 thread_call_enter(&zone_leaks_callout);
3522 }
3523 return KERN_SUCCESS;
3524 }
3525
3526 return KERN_INVALID_ARGUMENT;
3527 }
3528
3529 static void
panic_display_zleaks(bool has_syms)3530 panic_display_zleaks(bool has_syms)
3531 {
3532 bool did_header = false;
3533 vm_address_t bt[BTLOG_MAX_DEPTH];
3534 uint32_t len, count;
3535
3536 zone_foreach(z) {
3537 btlog_t log = z->z_btlog;
3538
3539 if (log == NULL || btlog_get_type(log) != BTLOG_HASH) {
3540 continue;
3541 }
3542
3543 count = btlog_guess_top(log, bt, &len);
3544 if (count == 0) {
3545 continue;
3546 }
3547
3548 if (!did_header) {
3549 paniclog_append_noflush("Zone (suspected) leak report:\n");
3550 did_header = true;
3551 }
3552
3553 paniclog_append_noflush(" Zone: %s%s\n",
3554 zone_heap_name(z), zone_name(z));
3555 paniclog_append_noflush(" Count: %d (%ld bytes)\n", count,
3556 (long)count * zone_scale_for_percpu(z, zone_elem_inner_size(z)));
3557 paniclog_append_noflush(" Size: %ld\n",
3558 (long)zone_size_wired(z));
3559 paniclog_append_noflush(" Top backtrace:\n");
3560 for (uint32_t i = 0; i < len; i++) {
3561 if (has_syms) {
3562 paniclog_append_noflush(" %p ", (void *)bt[i]);
3563 panic_print_symbol_name(bt[i]);
3564 paniclog_append_noflush("\n");
3565 } else {
3566 paniclog_append_noflush(" %p\n", (void *)bt[i]);
3567 }
3568 }
3569
3570 kmod_panic_dump(bt, len);
3571 paniclog_append_noflush("\n");
3572 }
3573 }
3574 #endif /* CONFIG_ZLEAKS */
3575
3576 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
3577 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
3578
3579 #if !KASAN_TBI
3580 __cold
3581 #endif
3582 static void
zalloc_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3583 zalloc_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3584 {
3585 btlog_t log = zone->z_btlog;
3586 btref_get_flags_t flags = 0;
3587 btref_t ref;
3588
3589 #if !KASAN_TBI
3590 if (!log || !btlog_sample(log)) {
3591 return;
3592 }
3593 #endif
3594 if (get_preemption_level() || zone_supports_vm(zone)) {
3595 /*
3596 * VM zones can be used by btlog, avoid reentrancy issues.
3597 */
3598 flags = BTREF_GET_NOWAIT;
3599 }
3600
3601 ref = btref_get(fp, flags);
3602 while (count-- > 0) {
3603 if (count) {
3604 btref_retain(ref);
3605 }
3606 btlog_record(log, (void *)addr, ZOP_ALLOC, ref);
3607 addr += *(vm_offset_t *)addr;
3608 }
3609 }
3610
3611 #define ZALLOC_LOG(zone, addr, count) ({ \
3612 if ((zone)->z_btlog) { \
3613 zalloc_log(zone, addr, count, __builtin_frame_address(0)); \
3614 } \
3615 })
3616
3617 #if !KASAN_TBI
3618 __cold
3619 #endif
3620 static void
zfree_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3621 zfree_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3622 {
3623 btlog_t log = zone->z_btlog;
3624 btref_get_flags_t flags = 0;
3625 btref_t ref;
3626
3627 #if !KASAN_TBI
3628 if (!log) {
3629 return;
3630 }
3631 #endif
3632
3633 /*
3634 * See if we're doing logging on this zone.
3635 *
3636 * There are two styles of logging used depending on
3637 * whether we're trying to catch a leak or corruption.
3638 */
3639 #if !KASAN_TBI
3640 if (btlog_get_type(log) == BTLOG_HASH) {
3641 /*
3642 * We're logging to catch a leak.
3643 *
3644 * Remove any record we might have for this element
3645 * since it's being freed. Note that we may not find it
3646 * if the buffer overflowed and that's OK.
3647 *
3648 * Since the log is of a limited size, old records get
3649 * overwritten if there are more zallocs than zfrees.
3650 */
3651 while (count-- > 0) {
3652 btlog_erase(log, (void *)addr);
3653 addr += *(vm_offset_t *)addr;
3654 }
3655 return;
3656 }
3657 #endif /* !KASAN_TBI */
3658
3659 if (get_preemption_level() || zone_supports_vm(zone)) {
3660 /*
3661 * VM zones can be used by btlog, avoid reentrancy issues.
3662 */
3663 flags = BTREF_GET_NOWAIT;
3664 }
3665
3666 ref = btref_get(fp, flags);
3667 while (count-- > 0) {
3668 if (count) {
3669 btref_retain(ref);
3670 }
3671 btlog_record(log, (void *)addr, ZOP_FREE, ref);
3672 addr += *(vm_offset_t *)addr;
3673 }
3674 }
3675
3676 #define ZFREE_LOG(zone, addr, count) ({ \
3677 if ((zone)->z_btlog) { \
3678 zfree_log(zone, addr, count, __builtin_frame_address(0)); \
3679 } \
3680 })
3681
3682 #else
3683 #define ZALLOC_LOG(...) ((void)0)
3684 #define ZFREE_LOG(...) ((void)0)
3685 #endif /* ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI */
3686 #endif /* !ZALLOC_TEST */
3687 #pragma mark zone (re)fill
3688 #if !ZALLOC_TEST
3689
3690 /*!
3691 * @defgroup Zone Refill
3692 * @{
3693 *
3694 * @brief
3695 * Functions handling The zone refill machinery.
3696 *
3697 * @discussion
3698 * Zones are refilled based on 2 mechanisms: direct expansion, async expansion.
3699 *
3700 * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
3701 * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
3702 *
3703 * - call @c zone_expand_locked() directly if the caller is allowed to block,
3704 *
3705 * - wakeup the asynchroous expansion thread call if the caller is not allowed
3706 * to block, or if the reserve becomes depleted.
3707 *
3708 *
3709 * <h2>Synchronous expansion</h2>
3710 *
3711 * This mechanism is actually the only one that may refill a zone, and all the
3712 * other ones funnel through this one eventually.
3713 *
3714 * @c zone_expand_locked() implements the core of the expansion mechanism,
3715 * and will do so while a caller specified predicate is true.
3716 *
3717 * Zone expansion allows for up to 2 threads to concurrently refill the zone:
3718 * - one VM privileged thread,
3719 * - one regular thread.
3720 *
3721 * Regular threads that refill will put down their identity in @c z_expander,
3722 * so that priority inversion avoidance can be implemented.
3723 *
3724 * However, VM privileged threads are allowed to use VM page reserves,
3725 * which allows for the system to recover from extreme memory pressure
3726 * situations, allowing for the few allocations that @c zone_gc() or
3727 * killing processes require.
3728 *
3729 * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
3730 * is set. @c z_expander is not necessarily the identity of this VM privileged
3731 * thread (it is if the VM privileged thread came in first, but wouldn't be, and
3732 * could even be @c THREAD_NULL otherwise).
3733 *
3734 * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
3735 * spending a whole pointer on priority inheritance for VM privileged threads
3736 * (and other issues related to having two owners), we use the rwlock boost as
3737 * a stop gap to avoid priority inversions.
3738 *
3739 *
3740 * <h2>Chunk wiring policies</h2>
3741 *
3742 * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
3743 * to try to minimize fragmentation relative to element sizes not aligning with
3744 * a chunk size well. However, this can grow large and be hard to fulfill on
3745 * a system under a lot of memory pressure (chunks can be as long as 8 pages on
3746 * 4k page systems).
3747 *
3748 * This is why, when under memory pressure the system allows chunks to be
3749 * partially populated. The metadata of the first page in the chunk maintains
3750 * the count of actually populated pages.
3751 *
3752 * The metadata for addresses assigned to a zone are found of 4 queues:
3753 * - @c z_pageq_empty has chunk heads with populated pages and no allocated
3754 * elements (those can be targeted by @c zone_gc()),
3755 * - @c z_pageq_partial has chunk heads with populated pages that are partially
3756 * used,
3757 * - @c z_pageq_full has chunk heads with populated pages with no free elements
3758 * left,
3759 * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
3760 * the zone forever, or the first secondary metadata for a chunk whose
3761 * corresponding page is not populated in the chunk.
3762 *
3763 * When new pages need to be wired/populated, chunks from the @c z_pageq_va
3764 * queues are preferred.
3765 *
3766 *
3767 * <h2>Asynchronous expansion</h2>
3768 *
3769 * This mechanism allows for refilling zones used mostly with non blocking
3770 * callers. It relies on a thread call (@c zone_expand_callout) which will
3771 * iterate all zones and refill the ones marked with @c z_async_refilling.
3772 *
3773 * NOTE: If the calling thread for zalloc_noblock is lower priority than
3774 * the thread_call, then zalloc_noblock to an empty zone may succeed.
3775 *
3776 *
3777 * <h2>Dealing with zone allocations from the mach VM code</h2>
3778 *
3779 * The implementation of the mach VM itself uses the zone allocator
3780 * for things like the vm_map_entry data structure. In order to prevent
3781 * a recursion problem when adding more pages to a zone, the VM zones
3782 * use the Z_SUBMAP_IDX_VM submap which doesn't use kmem_alloc()
3783 * or any VM map functions to allocate.
3784 *
3785 * Instead, a really simple coalescing first-fit allocator is used
3786 * for this submap, and no one else than zalloc can allocate from it.
3787 *
3788 * Memory is directly populated which doesn't require allocation of
3789 * VM map entries, and avoids recursion. The cost of this scheme however,
3790 * is that `vm_map_lookup_entry` will not function on those addresses
3791 * (nor any API relying on it).
3792 */
3793
3794 static thread_call_data_t zone_expand_callout;
3795
3796 __attribute__((overloadable))
3797 static inline bool
zone_submap_is_sequestered(zone_submap_idx_t idx)3798 zone_submap_is_sequestered(zone_submap_idx_t idx)
3799 {
3800 return idx != Z_SUBMAP_IDX_DATA;
3801 }
3802
3803 __attribute__((overloadable))
3804 static inline bool
zone_submap_is_sequestered(zone_security_flags_t zsflags)3805 zone_submap_is_sequestered(zone_security_flags_t zsflags)
3806 {
3807 return zone_submap_is_sequestered(zsflags.z_submap_idx);
3808 }
3809
3810 static inline kma_flags_t
zone_kma_flags(zone_t z,zone_security_flags_t zsflags,zalloc_flags_t flags)3811 zone_kma_flags(zone_t z, zone_security_flags_t zsflags, zalloc_flags_t flags)
3812 {
3813 kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
3814
3815 if (zsflags.z_noencrypt) {
3816 kmaflags |= KMA_NOENCRYPT;
3817 }
3818 if (flags & Z_NOPAGEWAIT) {
3819 kmaflags |= KMA_NOPAGEWAIT;
3820 }
3821 if (z->z_permanent || (!z->z_destructible &&
3822 zone_submap_is_sequestered(zsflags))) {
3823 kmaflags |= KMA_PERMANENT;
3824 }
3825 if (zsflags.z_submap_from_end) {
3826 kmaflags |= KMA_LAST_FREE;
3827 }
3828
3829 return kmaflags;
3830 }
3831
3832 static inline void
zone_add_wired_pages(uint32_t pages)3833 zone_add_wired_pages(uint32_t pages)
3834 {
3835 size_t count = os_atomic_add(&zone_pages_wired, pages, relaxed);
3836
3837 #if CONFIG_ZLEAKS
3838 if (__improbable(zleak_should_activate(count) &&
3839 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3840 thread_call_enter(&zone_leaks_callout);
3841 }
3842 #else
3843 (void)count;
3844 #endif
3845 }
3846
3847 static inline void
zone_remove_wired_pages(uint32_t pages)3848 zone_remove_wired_pages(uint32_t pages)
3849 {
3850 size_t count = os_atomic_sub(&zone_pages_wired, pages, relaxed);
3851
3852 #if CONFIG_ZLEAKS
3853 if (__improbable(zleak_should_deactivate(count) &&
3854 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3855 thread_call_enter(&zone_leaks_callout);
3856 }
3857 #else
3858 (void)count;
3859 #endif
3860 }
3861
3862 /*!
3863 * @function zcram_and_lock()
3864 *
3865 * @brief
3866 * Prepare some memory for being usable for allocation purposes.
3867 *
3868 * @discussion
3869 * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
3870 * to be usable in the zone.
3871 *
3872 * This function assumes the metadata is already populated for the range.
3873 *
3874 * Calling this function with @c pg_start being 0 means that the memory
3875 * is either a partial chunk, or a full chunk, that isn't published anywhere
3876 * and the initialization can happen without locks held.
3877 *
3878 * Calling this function with a non zero @c pg_start means that we are extending
3879 * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
3880 * is already usable and published in the zone, so extending it requires holding
3881 * the zone lock.
3882 *
3883 * @param zone The zone to cram new populated pages into
3884 * @param addr The base address for the chunk(s)
3885 * @param pg_va_new The number of virtual pages newly assigned to the zone
3886 * @param pg_start The first newly populated page relative to @a addr.
3887 * @param pg_end The after-last newly populated page relative to @a addr.
3888 * @param lock 0 or ZM_ALLOC_SIZE_LOCK (used by early crams)
3889 */
3890 static void
zcram_and_lock(zone_t zone,vm_offset_t addr,uint32_t pg_va_new,uint32_t pg_start,uint32_t pg_end,uint16_t lock)3891 zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
3892 uint32_t pg_start, uint32_t pg_end, uint16_t lock)
3893 {
3894 zone_id_t zindex = zone_index(zone);
3895 vm_offset_t elem_size = zone_elem_outer_size(zone);
3896 uint32_t free_start = 0, free_end = 0;
3897 uint32_t oob_offs = zone_elem_outer_offs(zone);
3898
3899 struct zone_page_metadata *meta = zone_meta_from_addr(addr);
3900 uint32_t chunk_pages = zone->z_chunk_pages;
3901 bool guarded = meta->zm_guarded;
3902
3903 assert(pg_start < pg_end && pg_end <= chunk_pages);
3904
3905 if (pg_start == 0) {
3906 uint16_t chunk_len = (uint16_t)pg_end;
3907 uint16_t secondary_len = ZM_SECONDARY_PAGE;
3908 bool inline_bitmap = false;
3909
3910 if (zone->z_percpu) {
3911 chunk_len = 1;
3912 secondary_len = ZM_SECONDARY_PCPU_PAGE;
3913 assert(pg_end == zpercpu_count());
3914 }
3915 if (!zone->z_permanent && !zone->z_uses_tags) {
3916 inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
3917 }
3918
3919 free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size;
3920
3921 meta[0] = (struct zone_page_metadata){
3922 .zm_index = zindex,
3923 .zm_guarded = guarded,
3924 .zm_inline_bitmap = inline_bitmap,
3925 .zm_chunk_len = chunk_len,
3926 .zm_alloc_size = lock,
3927 };
3928
3929 if (!zone->z_permanent && !inline_bitmap) {
3930 meta[0].zm_bitmap = zone_meta_bits_alloc_init(free_end,
3931 zone->z_chunk_elems, zone->z_uses_tags);
3932 }
3933
3934 for (uint16_t i = 1; i < chunk_pages; i++) {
3935 meta[i] = (struct zone_page_metadata){
3936 .zm_index = zindex,
3937 .zm_guarded = guarded,
3938 .zm_inline_bitmap = inline_bitmap,
3939 .zm_chunk_len = secondary_len,
3940 .zm_page_index = (uint8_t)i,
3941 .zm_bitmap = meta[0].zm_bitmap,
3942 .zm_subchunk_len = (uint8_t)(chunk_pages - i),
3943 };
3944 }
3945
3946 if (inline_bitmap) {
3947 zone_meta_bits_init_inline(meta, free_end);
3948 }
3949 } else {
3950 assert(!zone->z_percpu && !zone->z_permanent);
3951
3952 free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size;
3953 free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size;
3954 }
3955
3956 #if KASAN_CLASSIC
3957 assert(pg_start == 0); /* KASAN_CLASSIC never does partial chunks */
3958 if (zone->z_permanent) {
3959 kasan_poison_range(addr, ptoa(pg_end), ASAN_VALID);
3960 } else if (zone->z_percpu) {
3961 for (uint32_t i = 0; i < pg_end; i++) {
3962 kasan_zmem_add(addr + ptoa(i), PAGE_SIZE,
3963 zone_elem_outer_size(zone),
3964 zone_elem_outer_offs(zone),
3965 zone_elem_redzone(zone));
3966 }
3967 } else {
3968 kasan_zmem_add(addr, ptoa(pg_end),
3969 zone_elem_outer_size(zone),
3970 zone_elem_outer_offs(zone),
3971 zone_elem_redzone(zone));
3972 }
3973 #endif /* KASAN_CLASSIC */
3974
3975 /*
3976 * Insert the initialized pages / metadatas into the right lists.
3977 */
3978
3979 zone_lock(zone);
3980 assert(zone->z_self == zone);
3981
3982 if (pg_start != 0) {
3983 assert(meta->zm_chunk_len == pg_start);
3984
3985 zone_meta_bits_merge(meta, free_start, free_end);
3986 meta->zm_chunk_len = (uint16_t)pg_end;
3987
3988 /*
3989 * consume the zone_meta_lock_in_partial()
3990 * done in zone_expand_locked()
3991 */
3992 zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
3993 zone_meta_remqueue(zone, meta);
3994 }
3995
3996 if (zone->z_permanent || meta->zm_alloc_size) {
3997 zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
3998 } else {
3999 zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
4000 zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
4001 }
4002 if (pg_end < chunk_pages) {
4003 /* push any non populated residual VA on z_pageq_va */
4004 zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
4005 }
4006
4007 zone->z_elems_free += free_end - free_start;
4008 zone->z_elems_avail += free_end - free_start;
4009 zone->z_wired_cur += zone->z_percpu ? 1 : pg_end - pg_start;
4010 if (pg_va_new) {
4011 zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
4012 }
4013 if (zone->z_wired_hwm < zone->z_wired_cur) {
4014 zone->z_wired_hwm = zone->z_wired_cur;
4015 }
4016
4017 #if CONFIG_ZLEAKS
4018 if (__improbable(zleak_should_enable_for_zone(zone) &&
4019 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
4020 thread_call_enter(&zone_leaks_callout);
4021 }
4022 #endif /* CONFIG_ZLEAKS */
4023
4024 zone_add_wired_pages(pg_end - pg_start);
4025 }
4026
4027 static void
zcram(zone_t zone,vm_offset_t addr,uint32_t pages,uint16_t lock)4028 zcram(zone_t zone, vm_offset_t addr, uint32_t pages, uint16_t lock)
4029 {
4030 uint32_t chunk_pages = zone->z_chunk_pages;
4031
4032 assert(pages % chunk_pages == 0);
4033 for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
4034 zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, lock);
4035 zone_unlock(zone);
4036 }
4037 }
4038
4039 __startup_func
4040 void
zone_cram_early(zone_t zone,vm_offset_t newmem,vm_size_t size)4041 zone_cram_early(zone_t zone, vm_offset_t newmem, vm_size_t size)
4042 {
4043 uint32_t pages = (uint32_t)atop(size);
4044
4045 assert(from_zone_map(newmem, size));
4046 assert3u(size % ptoa(zone->z_chunk_pages), ==, 0);
4047 assert3u(startup_phase, <, STARTUP_SUB_ZALLOC);
4048
4049 /*
4050 * The early pages we move at the pmap layer can't be "depopulated"
4051 * because there's no vm_page_t for them.
4052 *
4053 * "Lock" them so that they never hit z_pageq_empty.
4054 */
4055 bzero((void *)newmem, size);
4056 zcram(zone, newmem, pages, ZM_ALLOC_SIZE_LOCK);
4057 }
4058
4059 /*!
4060 * @function zone_submap_alloc_sequestered_va
4061 *
4062 * @brief
4063 * Allocates VA without using vm_find_space().
4064 *
4065 * @discussion
4066 * Allocate VA quickly without using the slower vm_find_space() for cases
4067 * when the submaps are fully sequestered.
4068 *
4069 * The VM submap is used to implement the VM itself so it is always sequestered,
4070 * as it can't kmem_alloc which needs to always allocate vm entries.
4071 * However, it can use vm_map_enter() which tries to coalesce entries, which
4072 * always works, so the VM map only ever needs 2 entries (one for each end).
4073 *
4074 * The RO submap is similarly always sequestered if it exists (as a non
4075 * sequestered RO submap makes very little sense).
4076 *
4077 * The allocator is a very simple bump-allocator
4078 * that allocates from either end.
4079 */
4080 static kern_return_t
zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags,uint32_t pages,vm_offset_t * addrp)4081 zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags, uint32_t pages,
4082 vm_offset_t *addrp)
4083 {
4084 vm_size_t size = ptoa(pages);
4085 vm_map_t map = zone_submap(zsflags);
4086 vm_map_entry_t first, last;
4087 vm_map_offset_t addr;
4088
4089 vm_map_lock(map);
4090
4091 first = vm_map_first_entry(map);
4092 last = vm_map_last_entry(map);
4093
4094 if (first->vme_end + size > last->vme_start) {
4095 vm_map_unlock(map);
4096 return KERN_NO_SPACE;
4097 }
4098
4099 if (zsflags.z_submap_from_end) {
4100 last->vme_start -= size;
4101 addr = last->vme_start;
4102 VME_OFFSET_SET(last, addr);
4103 } else {
4104 addr = first->vme_end;
4105 first->vme_end += size;
4106 }
4107 map->size += size;
4108
4109 vm_map_unlock(map);
4110
4111 *addrp = addr;
4112 return KERN_SUCCESS;
4113 }
4114
4115 void
zone_fill_initially(zone_t zone,vm_size_t nelems)4116 zone_fill_initially(zone_t zone, vm_size_t nelems)
4117 {
4118 kma_flags_t kmaflags = KMA_NOFAIL | KMA_PERMANENT;
4119 kern_return_t kr;
4120 vm_offset_t addr;
4121 uint32_t pages;
4122 zone_security_flags_t zsflags = zone_security_config(zone);
4123
4124 assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
4125 assert(zone->z_elems_avail == 0);
4126
4127 kmaflags |= zone_kma_flags(zone, zsflags, Z_WAITOK);
4128 pages = zone_alloc_pages_for_nelems(zone, nelems);
4129 if (zone_submap_is_sequestered(zsflags)) {
4130 kr = zone_submap_alloc_sequestered_va(zsflags, pages, &addr);
4131 if (kr != KERN_SUCCESS) {
4132 panic("zone_submap_alloc_sequestered_va() "
4133 "of %u pages failed", pages);
4134 }
4135 kernel_memory_populate(addr, ptoa(pages),
4136 kmaflags, VM_KERN_MEMORY_ZONE);
4137 } else {
4138 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4139 kmem_alloc(zone_submap(zsflags), &addr, ptoa(pages),
4140 kmaflags, VM_KERN_MEMORY_ZONE);
4141 }
4142
4143 zone_meta_populate(addr, ptoa(pages));
4144 zcram(zone, addr, pages, 0);
4145 }
4146
4147 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4148 __attribute__((noinline))
4149 static void
zone_scramble_va_and_unlock(zone_t z,struct zone_page_metadata * meta,uint32_t runs,uint32_t pages,uint32_t chunk_pages,uint64_t guard_mask)4150 zone_scramble_va_and_unlock(
4151 zone_t z,
4152 struct zone_page_metadata *meta,
4153 uint32_t runs,
4154 uint32_t pages,
4155 uint32_t chunk_pages,
4156 uint64_t guard_mask)
4157 {
4158 struct zone_page_metadata *arr[ZONE_CHUNK_ALLOC_SIZE / 4096];
4159
4160 for (uint32_t run = 0, n = 0; run < runs; run++) {
4161 arr[run] = meta + n;
4162 n += chunk_pages + ((guard_mask >> run) & 1);
4163 }
4164
4165 /*
4166 * Fisher–Yates shuffle, for an array with indices [0, n)
4167 *
4168 * for i from n−1 downto 1 do
4169 * j ← random integer such that 0 ≤ j ≤ i
4170 * exchange a[j] and a[i]
4171 *
4172 * The point here is that early allocations aren't at a fixed
4173 * distance from each other.
4174 */
4175 for (uint32_t i = runs - 1; i > 0; i--) {
4176 uint32_t j = zalloc_random_uniform32(0, i + 1);
4177
4178 meta = arr[j];
4179 arr[j] = arr[i];
4180 arr[i] = meta;
4181 }
4182
4183 zone_lock(z);
4184
4185 for (uint32_t i = 0; i < runs; i++) {
4186 zone_meta_queue_push(z, &z->z_pageq_va, arr[i]);
4187 }
4188 z->z_va_cur += z->z_percpu ? runs : pages;
4189 }
4190
4191 static inline uint32_t
dist_u32(uint32_t a,uint32_t b)4192 dist_u32(uint32_t a, uint32_t b)
4193 {
4194 return a < b ? b - a : a - b;
4195 }
4196
4197 static uint64_t
zalloc_random_clear_n_bits(uint64_t mask,uint32_t pop,uint32_t n)4198 zalloc_random_clear_n_bits(uint64_t mask, uint32_t pop, uint32_t n)
4199 {
4200 for (; n-- > 0; pop--) {
4201 uint32_t bit = zalloc_random_uniform32(0, pop);
4202 uint64_t m = mask;
4203
4204 for (; bit; bit--) {
4205 m &= m - 1;
4206 }
4207
4208 mask ^= 1ull << __builtin_ctzll(m);
4209 }
4210
4211 return mask;
4212 }
4213
4214 /**
4215 * @function zalloc_random_bits
4216 *
4217 * @brief
4218 * Compute a random number with a specified number of bit set in a given width.
4219 *
4220 * @discussion
4221 * This function generates a "uniform" distribution of sets of bits set in
4222 * a given width, with typically less than width/4 calls to random.
4223 *
4224 * @param pop the target number of bits set.
4225 * @param width the number of bits in the random integer to generate.
4226 */
4227 static uint64_t
zalloc_random_bits(uint32_t pop,uint32_t width)4228 zalloc_random_bits(uint32_t pop, uint32_t width)
4229 {
4230 uint64_t w_mask = (1ull << width) - 1;
4231 uint64_t mask;
4232 uint32_t cur;
4233
4234 if (3 * width / 4 <= pop) {
4235 mask = w_mask;
4236 cur = width;
4237 } else if (pop <= width / 4) {
4238 mask = 0;
4239 cur = 0;
4240 } else {
4241 /*
4242 * Chosing a random number this way will overwhelmingly
4243 * contain `width` bits +/- a few.
4244 */
4245 mask = zalloc_random_mask64(width);
4246 cur = __builtin_popcountll(mask);
4247
4248 if (dist_u32(cur, pop) > dist_u32(width - cur, pop)) {
4249 /*
4250 * If the opposite mask has a closer popcount,
4251 * then start with that one as the seed.
4252 */
4253 cur = width - cur;
4254 mask ^= w_mask;
4255 }
4256 }
4257
4258 if (cur < pop) {
4259 /*
4260 * Setting `pop - cur` bits is really clearing that many from
4261 * the opposite mask.
4262 */
4263 mask ^= w_mask;
4264 mask = zalloc_random_clear_n_bits(mask, width - cur, pop - cur);
4265 mask ^= w_mask;
4266 } else if (pop < cur) {
4267 mask = zalloc_random_clear_n_bits(mask, cur, cur - pop);
4268 }
4269
4270 return mask;
4271 }
4272 #endif
4273
4274 static void
zone_allocate_va_locked(zone_t z,zalloc_flags_t flags)4275 zone_allocate_va_locked(zone_t z, zalloc_flags_t flags)
4276 {
4277 zone_security_flags_t zsflags = zone_security_config(z);
4278 struct zone_page_metadata *meta;
4279 kma_flags_t kmaflags = zone_kma_flags(z, zsflags, flags) | KMA_VAONLY;
4280 uint32_t chunk_pages = z->z_chunk_pages;
4281 uint32_t runs, pages, guards, rnum;
4282 uint64_t guard_mask = 0;
4283 bool lead_guard = false;
4284 kern_return_t kr;
4285 vm_offset_t addr;
4286
4287 zone_unlock(z);
4288
4289 /*
4290 * A lot of OOB exploitation techniques rely on precise placement
4291 * and interleaving of zone pages. The layout that is sought
4292 * by attackers will be C/P/T types, where:
4293 * - (C)ompromised is the type for which attackers have a bug,
4294 * - (P)adding is used to pad memory,
4295 * - (T)arget is the type that the attacker will attempt to corrupt
4296 * by exploiting (C).
4297 *
4298 * Note that in some cases C==T and P isn't needed.
4299 *
4300 * In order to make those placement games much harder,
4301 * we grow zones by random runs of memory, up to 256k.
4302 * This makes predicting the precise layout of the heap
4303 * quite more complicated.
4304 *
4305 * Note: this function makes a very heavy use of random,
4306 * however, it is mostly limited to sequestered zones,
4307 * and eventually the layout will be fixed,
4308 * and the usage of random vastly reduced.
4309 *
4310 * For non sequestered zones, there's a single call
4311 * to random in order to decide whether we want
4312 * a guard page or not.
4313 */
4314 pages = chunk_pages;
4315 guards = 0;
4316 runs = 1;
4317 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4318 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4319 pages = atop(ZONE_CHUNK_ALLOC_SIZE);
4320 runs = (pages + chunk_pages - 1) / chunk_pages;
4321 runs = zalloc_random_uniform32(1, runs + 1);
4322 pages = runs * chunk_pages;
4323 }
4324 static_assert(ZONE_CHUNK_ALLOC_SIZE / 4096 <= 64,
4325 "make sure that `runs` will never be larger than 64");
4326 #endif /* !ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4327
4328 /*
4329 * Zones that are suceptible to OOB (kalloc, ZC_PGZ_USE_GUARDS),
4330 * guards might be added after each chunk.
4331 *
4332 * Those guard pages are marked with the ZM_PGZ_GUARD
4333 * magical chunk len, and their zm_oob_offs field
4334 * is used to remember optional shift applied
4335 * to returned elements, in order to right-align-them
4336 * as much as possible.
4337 *
4338 * In an adversarial context, while guard pages
4339 * are extremely effective against linear overflow,
4340 * using a predictable density of guard pages feels like
4341 * a missed opportunity. Which is why we chose to insert
4342 * one guard page for about 32k of memory, and place it
4343 * randomly.
4344 */
4345 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4346 if (z->z_percpu) {
4347 /*
4348 * For per-cpu runs, have a 75% chance to have a guard.
4349 */
4350 rnum = zalloc_random_uniform32(0, 4 * 128);
4351 guards = rnum >= 128;
4352 } else if (!zsflags.z_pgz_use_guards && !z->z_pgz_use_guards) {
4353 vm_offset_t rest;
4354
4355 /*
4356 * For types that are less susceptible to have OOBs,
4357 * have a density of 1 guard every 64k, with a uniform
4358 * distribution.
4359 */
4360 rnum = zalloc_random_uniform32(0, ZONE_GUARD_SPARSE);
4361 guards = (uint32_t)ptoa(pages) / ZONE_GUARD_SPARSE;
4362 rest = (uint32_t)ptoa(pages) % ZONE_GUARD_SPARSE;
4363 guards += rnum < rest;
4364 } else if (ptoa(chunk_pages) >= ZONE_GUARD_DENSE) {
4365 /*
4366 * For chunks >= 32k, have a 75% chance of guard pages
4367 * between chunks.
4368 */
4369 rnum = zalloc_random_uniform32(65, 129);
4370 guards = runs * rnum / 128;
4371 } else {
4372 vm_offset_t rest;
4373
4374 /*
4375 * Otherwise, aim at 1 guard every 32k,
4376 * with a uniform distribution.
4377 */
4378 rnum = zalloc_random_uniform32(0, ZONE_GUARD_DENSE);
4379 guards = (uint32_t)ptoa(pages) / ZONE_GUARD_DENSE;
4380 rest = (uint32_t)ptoa(pages) % ZONE_GUARD_DENSE;
4381 guards += rnum < rest;
4382 }
4383 assert3u(guards, <=, runs);
4384
4385 guard_mask = 0;
4386
4387 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4388 uint32_t g = 0;
4389
4390 /*
4391 * Several exploitation strategies rely on a C/T (compromised
4392 * then target types) ordering of pages with a sub-page reach
4393 * from C into T.
4394 *
4395 * We want to reliably thwart such exploitations
4396 * and hence force a guard page between alternating
4397 * memory types.
4398 */
4399 guard_mask |= 1ull << (runs - 1);
4400 g++;
4401
4402 /*
4403 * While we randomize the chunks lengths, an attacker with
4404 * precise timing control can guess when overflows happen,
4405 * and "measure" the runs, which gives them an indication
4406 * of where the next run start offset is.
4407 *
4408 * In order to make this knowledge unusable, add a guard page
4409 * _before_ the new run with a 25% probability, regardless
4410 * of whether we had enough guard pages.
4411 */
4412 if ((rnum & 3) == 0) {
4413 lead_guard = true;
4414 g++;
4415 }
4416 if (guards > g) {
4417 guard_mask |= zalloc_random_bits(guards - g, runs - 1);
4418 } else {
4419 guards = g;
4420 }
4421 } else {
4422 assert3u(runs, ==, 1);
4423 assert3u(guards, <=, 1);
4424 guard_mask = guards << (runs - 1);
4425 }
4426 #else
4427 (void)rnum;
4428 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4429
4430 if (zone_submap_is_sequestered(zsflags)) {
4431 kr = zone_submap_alloc_sequestered_va(zsflags,
4432 pages + guards, &addr);
4433 } else {
4434 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4435 kr = kmem_alloc(zone_submap(zsflags), &addr,
4436 ptoa(pages + guards), kmaflags, VM_KERN_MEMORY_ZONE);
4437 }
4438
4439 if (kr != KERN_SUCCESS) {
4440 uint64_t zone_size = 0;
4441 zone_t zone_largest = zone_find_largest(&zone_size);
4442 panic("zalloc[%d]: zone map exhausted while allocating from zone [%s%s], "
4443 "likely due to memory leak in zone [%s%s] "
4444 "(%u%c, %d elements allocated)",
4445 kr, zone_heap_name(z), zone_name(z),
4446 zone_heap_name(zone_largest), zone_name(zone_largest),
4447 mach_vm_size_pretty(zone_size),
4448 mach_vm_size_unit(zone_size),
4449 zone_count_allocated(zone_largest));
4450 }
4451
4452 meta = zone_meta_from_addr(addr);
4453 zone_meta_populate(addr, ptoa(pages + guards));
4454
4455 /*
4456 * Handle the leading guard page if any
4457 */
4458 if (lead_guard) {
4459 meta[0].zm_index = zone_index(z);
4460 meta[0].zm_chunk_len = ZM_PGZ_GUARD;
4461 meta[0].zm_guarded = true;
4462 meta++;
4463 }
4464
4465 for (uint32_t run = 0, n = 0; run < runs; run++) {
4466 bool guarded = (guard_mask >> run) & 1;
4467
4468 for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4469 meta[n].zm_index = zone_index(z);
4470 meta[n].zm_guarded = guarded;
4471 }
4472 if (guarded) {
4473 meta[n].zm_index = zone_index(z);
4474 meta[n].zm_chunk_len = ZM_PGZ_GUARD;
4475 n++;
4476 }
4477 }
4478 if (guards) {
4479 os_atomic_add(&zone_guard_pages, guards, relaxed);
4480 }
4481
4482 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4483 if (__improbable(zone_caching_disabled < 0)) {
4484 return zone_scramble_va_and_unlock(z, meta, runs, pages,
4485 chunk_pages, guard_mask);
4486 }
4487 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4488
4489 zone_lock(z);
4490
4491 for (uint32_t run = 0, n = 0; run < runs; run++) {
4492 zone_meta_queue_push(z, &z->z_pageq_va, meta + n);
4493 n += chunk_pages + ((guard_mask >> run) & 1);
4494 }
4495 z->z_va_cur += z->z_percpu ? runs : pages;
4496 }
4497
4498 static bool
zone_expand_pred_nope(__unused zone_t z)4499 zone_expand_pred_nope(__unused zone_t z)
4500 {
4501 return false;
4502 }
4503
4504 static inline void
ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)4505 ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
4506 {
4507 #if DEBUG || DEVELOPMENT
4508 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START,
4509 size, 0, 0, 0);
4510 #else
4511 (void)size;
4512 #endif
4513 }
4514
4515 static inline void
ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)4516 ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
4517 {
4518 #if DEBUG || DEVELOPMENT
4519 task_t task = current_task_early();
4520 if (pages && task) {
4521 ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, pages);
4522 }
4523 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END,
4524 pages, 0, 0, 0);
4525 #else
4526 (void)pages;
4527 #endif
4528 }
4529
4530 __attribute__((noinline))
4531 static void
__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z,uint32_t pgs)4532 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z, uint32_t pgs)
4533 {
4534 uint64_t wait_start = 0;
4535 long mapped;
4536
4537 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4538
4539 if (zone_supports_vm(z) || (current_thread()->options & TH_OPT_VMPRIV)) {
4540 return;
4541 }
4542
4543 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4544
4545 /*
4546 * If the zone map is really exhausted, wait on the GC thread,
4547 * donating our priority (which is important because the GC
4548 * thread is at a rather low priority).
4549 */
4550 for (uint32_t n = 1; mapped >= zone_pages_wired_max - pgs; n++) {
4551 uint32_t wait_ms = n * (n + 1) / 2;
4552 uint64_t interval;
4553
4554 if (n == 1) {
4555 wait_start = mach_absolute_time();
4556 } else {
4557 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4558 }
4559 if (zone_exhausted_timeout > 0 &&
4560 wait_ms > zone_exhausted_timeout) {
4561 panic("zone map exhaustion: waited for %dms "
4562 "(pages: %ld, max: %ld, wanted: %d)",
4563 wait_ms, mapped, zone_pages_wired_max, pgs);
4564 }
4565
4566 clock_interval_to_absolutetime_interval(wait_ms, NSEC_PER_MSEC,
4567 &interval);
4568
4569 lck_spin_lock(&zone_exhausted_lock);
4570 lck_spin_sleep_with_inheritor(&zone_exhausted_lock,
4571 LCK_SLEEP_UNLOCK, &zone_pages_wired,
4572 vm_pageout_gc_thread, THREAD_UNINT, wait_start + interval);
4573
4574 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4575 }
4576 }
4577
4578 static bool
zone_expand_wait_for_pages(bool waited)4579 zone_expand_wait_for_pages(bool waited)
4580 {
4581 if (waited) {
4582 return false;
4583 }
4584 #if DEBUG || DEVELOPMENT
4585 if (zalloc_simulate_vm_pressure) {
4586 return false;
4587 }
4588 #endif /* DEBUG || DEVELOPMENT */
4589 return !vm_pool_low();
4590 }
4591
4592 static inline void
zone_expand_async_schedule_if_allowed(zone_t zone)4593 zone_expand_async_schedule_if_allowed(zone_t zone)
4594 {
4595 if (zone->z_async_refilling || zone->no_callout) {
4596 return;
4597 }
4598
4599 if (zone->exhaustible && zone->z_wired_cur >= zone->z_wired_max) {
4600 return;
4601 }
4602
4603 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
4604 return;
4605 }
4606
4607 if (!vm_pool_low() || zone_supports_vm(zone)) {
4608 zone->z_async_refilling = true;
4609 thread_call_enter(&zone_expand_callout);
4610 }
4611 }
4612
4613 static void
zone_expand_locked(zone_t z,zalloc_flags_t flags,bool (* pred)(zone_t))4614 zone_expand_locked(zone_t z, zalloc_flags_t flags, bool (*pred)(zone_t))
4615 {
4616 zone_security_flags_t zsflags = zone_security_config(z);
4617 struct zone_expand ze = {
4618 .ze_thread = current_thread(),
4619 };
4620
4621 if (!(ze.ze_thread->options & TH_OPT_VMPRIV) && zone_supports_vm(z)) {
4622 ze.ze_thread->options |= TH_OPT_VMPRIV;
4623 ze.ze_clear_priv = true;
4624 }
4625
4626 if (ze.ze_thread->options & TH_OPT_VMPRIV) {
4627 /*
4628 * When the thread is VM privileged,
4629 * vm_page_grab() will call VM_PAGE_WAIT()
4630 * without our knowledge, so we must assume
4631 * it's being called unfortunately.
4632 *
4633 * In practice it's not a big deal because
4634 * Z_NOPAGEWAIT is not really used on zones
4635 * that VM privileged threads are going to expand.
4636 */
4637 ze.ze_pg_wait = true;
4638 ze.ze_vm_priv = true;
4639 }
4640
4641 for (;;) {
4642 if (!pred) {
4643 /* NULL pred means "try just once" */
4644 pred = zone_expand_pred_nope;
4645 } else if (!pred(z)) {
4646 goto out;
4647 }
4648
4649 if (z->z_expander == NULL) {
4650 z->z_expander = &ze;
4651 break;
4652 }
4653
4654 if (ze.ze_vm_priv && !z->z_expander->ze_vm_priv) {
4655 change_sleep_inheritor(&z->z_expander, ze.ze_thread);
4656 ze.ze_next = z->z_expander;
4657 z->z_expander = &ze;
4658 break;
4659 }
4660
4661 if ((flags & Z_NOPAGEWAIT) && z->z_expander->ze_pg_wait) {
4662 goto out;
4663 }
4664
4665 z->z_expanding_wait = true;
4666 hw_lck_ticket_sleep_with_inheritor(&z->z_lock, &zone_locks_grp,
4667 LCK_SLEEP_DEFAULT, &z->z_expander, z->z_expander->ze_thread,
4668 TH_UNINT, TIMEOUT_WAIT_FOREVER);
4669 }
4670
4671 do {
4672 struct zone_page_metadata *meta = NULL;
4673 uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
4674 vm_page_t page_list = NULL;
4675 vm_offset_t addr = 0;
4676 int waited = 0;
4677
4678 /*
4679 * While we hold the zone lock, look if there's VA we can:
4680 * - complete from partial pages,
4681 * - reuse from the sequester list.
4682 *
4683 * When the page is being populated we pretend we allocated
4684 * an extra element so that zone_gc() can't attempt to free
4685 * the chunk (as it could become empty while we wait for pages).
4686 */
4687 if (zone_pva_is_null(z->z_pageq_va)) {
4688 zone_allocate_va_locked(z, flags);
4689 }
4690
4691 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
4692 addr = zone_meta_to_addr(meta);
4693 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
4694 cur_pages = meta->zm_page_index;
4695 meta -= cur_pages;
4696 addr -= ptoa(cur_pages);
4697 zone_meta_lock_in_partial(z, meta, cur_pages);
4698 }
4699 zone_unlock(z);
4700
4701 /*
4702 * And now allocate pages to populate our VA.
4703 */
4704 min_pages = z->z_chunk_pages;
4705 #if !KASAN_CLASSIC
4706 if (!z->z_percpu) {
4707 min_pages = (uint32_t)atop(round_page(zone_elem_outer_offs(z) +
4708 zone_elem_outer_size(z)));
4709 }
4710 #endif /* !KASAN_CLASSIC */
4711
4712 /*
4713 * Trigger jetsams via VM_PAGEOUT_GC_EVENT
4714 * if we're running out of zone memory
4715 */
4716 if (__improbable(zone_map_nearing_exhaustion())) {
4717 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(z, min_pages);
4718 }
4719
4720 ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
4721
4722 while (pages < z->z_chunk_pages - cur_pages) {
4723 vm_page_t m = vm_page_grab();
4724
4725 if (m) {
4726 pages++;
4727 m->vmp_snext = page_list;
4728 page_list = m;
4729 vm_page_zero_fill(m);
4730 continue;
4731 }
4732
4733 if (pages >= min_pages &&
4734 !zone_expand_wait_for_pages(waited)) {
4735 break;
4736 }
4737
4738 if ((flags & Z_NOPAGEWAIT) == 0) {
4739 /*
4740 * The first time we're about to wait for pages,
4741 * mention that to waiters and wake them all.
4742 *
4743 * Set `ze_pg_wait` in our zone_expand context
4744 * so that waiters who care do not wait again.
4745 */
4746 if (!ze.ze_pg_wait) {
4747 zone_lock(z);
4748 if (z->z_expanding_wait) {
4749 z->z_expanding_wait = false;
4750 wakeup_all_with_inheritor(&z->z_expander,
4751 THREAD_AWAKENED);
4752 }
4753 ze.ze_pg_wait = true;
4754 zone_unlock(z);
4755 }
4756
4757 waited++;
4758 VM_PAGE_WAIT();
4759 continue;
4760 }
4761
4762 /*
4763 * Undo everything and bail out:
4764 *
4765 * - free pages
4766 * - undo the fake allocation if any
4767 * - put the VA back on the VA page queue.
4768 */
4769 vm_page_free_list(page_list, FALSE);
4770 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4771
4772 zone_lock(z);
4773
4774 zone_expand_async_schedule_if_allowed(z);
4775
4776 if (cur_pages) {
4777 zone_meta_unlock_from_partial(z, meta, cur_pages);
4778 }
4779 if (meta) {
4780 zone_meta_queue_push(z, &z->z_pageq_va,
4781 meta + cur_pages);
4782 }
4783 goto page_shortage;
4784 }
4785
4786 vm_object_lock(kernel_object);
4787 kernel_memory_populate_object_and_unlock(kernel_object,
4788 addr + ptoa(cur_pages), addr + ptoa(cur_pages), ptoa(pages), page_list,
4789 zone_kma_flags(z, zsflags, flags), VM_KERN_MEMORY_ZONE,
4790 (zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY)
4791 ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE);
4792
4793 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4794
4795 zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages, 0);
4796
4797 if (z->z_wired_cur == z->z_wired_max) {
4798 zone_unlock(z);
4799 EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z);
4800 zone_lock(z);
4801 }
4802 } while (pred(z));
4803
4804 page_shortage:
4805 if (z->z_expander == &ze) {
4806 z->z_expander = ze.ze_next;
4807 } else {
4808 assert(z->z_expander->ze_next == &ze);
4809 z->z_expander->ze_next = NULL;
4810 }
4811 if (z->z_expanding_wait) {
4812 z->z_expanding_wait = false;
4813 wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
4814 }
4815 out:
4816 if (ze.ze_clear_priv) {
4817 ze.ze_thread->options &= ~TH_OPT_VMPRIV;
4818 }
4819 }
4820
4821 static bool
zalloc_needs_refill(zone_t zone)4822 zalloc_needs_refill(zone_t zone)
4823 {
4824 if (zone->z_elems_free > zone->z_elems_rsv) {
4825 return false;
4826 }
4827 if (zone->z_wired_cur < zone->z_wired_max) {
4828 return true;
4829 }
4830 return !zone->exhaustible;
4831 }
4832
4833 static void
zone_expand_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)4834 zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
4835 {
4836 zone_foreach(z) {
4837 if (z->no_callout) {
4838 /* z_async_refilling will never be set */
4839 continue;
4840 }
4841
4842 if (!z->z_async_refilling) {
4843 /*
4844 * avoid locking all zones, because the one(s)
4845 * we're looking for have been set _before_
4846 * thread_call_enter() was called, if we fail
4847 * to observe the bit, it means the thread-call
4848 * has been "dinged" again and we'll notice it then.
4849 */
4850 continue;
4851 }
4852
4853 zone_lock(z);
4854 if (z->z_self && z->z_async_refilling) {
4855 zone_expand_locked(z, Z_WAITOK, zalloc_needs_refill);
4856 /*
4857 * clearing _after_ we grow is important,
4858 * so that we avoid waking up the thread call
4859 * while we grow and cause to run a second time.
4860 */
4861 z->z_async_refilling = false;
4862 }
4863 zone_unlock(z);
4864 }
4865 }
4866
4867 #endif /* !ZALLOC_TEST */
4868 #pragma mark zone jetsam integration
4869 #if !ZALLOC_TEST
4870
4871 /*
4872 * We're being very conservative here and picking a value of 95%. We might need to lower this if
4873 * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
4874 */
4875 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
4876
4877 /*
4878 * Threshold above which largest zones should be included in the panic log
4879 */
4880 #define ZONE_MAP_EXHAUSTION_PRINT_PANIC 80
4881
4882 /*
4883 * Trigger zone-map-exhaustion jetsams if the zone map is X% full,
4884 * where X=zone_map_jetsam_limit.
4885 *
4886 * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
4887 */
4888 TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
4889 ZONE_MAP_JETSAM_LIMIT_DEFAULT);
4890
4891 kern_return_t
zone_map_jetsam_set_limit(uint32_t value)4892 zone_map_jetsam_set_limit(uint32_t value)
4893 {
4894 if (value <= 0 || value > 100) {
4895 return KERN_INVALID_VALUE;
4896 }
4897
4898 zone_map_jetsam_limit = value;
4899 os_atomic_store(&zone_pages_jetsam_threshold,
4900 zone_pages_wired_max * value / 100, relaxed);
4901 return KERN_SUCCESS;
4902 }
4903
4904 void
get_zone_map_size(uint64_t * current_size,uint64_t * capacity)4905 get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
4906 {
4907 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
4908 *current_size = ptoa_64(phys_pages);
4909 *capacity = ptoa_64(zone_pages_wired_max);
4910 }
4911
4912 void
get_largest_zone_info(char * zone_name,size_t zone_name_len,uint64_t * zone_size)4913 get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
4914 {
4915 zone_t largest_zone = zone_find_largest(zone_size);
4916
4917 /*
4918 * Append kalloc heap name to zone name (if zone is used by kalloc)
4919 */
4920 snprintf(zone_name, zone_name_len, "%s%s",
4921 zone_heap_name(largest_zone), largest_zone->z_name);
4922 }
4923
4924 static bool
zone_map_nearing_threshold(unsigned int threshold)4925 zone_map_nearing_threshold(unsigned int threshold)
4926 {
4927 uint64_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
4928 return phys_pages * 100 > zone_pages_wired_max * threshold;
4929 }
4930
4931 bool
zone_map_nearing_exhaustion(void)4932 zone_map_nearing_exhaustion(void)
4933 {
4934 vm_size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
4935
4936 return pages >= os_atomic_load(&zone_pages_jetsam_threshold, relaxed);
4937 }
4938
4939
4940 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
4941
4942 /*
4943 * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
4944 * to walk through the jetsam priority bands and kill processes.
4945 */
4946 static zone_t
kill_process_in_largest_zone(void)4947 kill_process_in_largest_zone(void)
4948 {
4949 pid_t pid = -1;
4950 uint64_t zone_size = 0;
4951 zone_t largest_zone = zone_find_largest(&zone_size);
4952
4953 printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
4954 ptoa_64(os_atomic_load(&zone_pages_wired, relaxed)),
4955 ptoa_64(zone_pages_wired_max),
4956 (uint64_t)zone_submaps_approx_size(),
4957 (uint64_t)mach_vm_range_size(&zone_info.zi_map_range),
4958 zone_map_jetsam_limit);
4959 printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
4960 largest_zone->z_name, (uintptr_t)zone_size);
4961
4962 /*
4963 * We want to make sure we don't call this function from userspace.
4964 * Or we could end up trying to synchronously kill the process
4965 * whose context we're in, causing the system to hang.
4966 */
4967 assert(current_task() == kernel_task);
4968
4969 /*
4970 * If vm_object_zone is the largest, check to see if the number of
4971 * elements in vm_map_entry_zone is comparable.
4972 *
4973 * If so, consider vm_map_entry_zone as the largest. This lets us target
4974 * a specific process to jetsam to quickly recover from the zone map
4975 * bloat.
4976 */
4977 if (largest_zone == vm_object_zone) {
4978 unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
4979 unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
4980 /* Is the VM map entries zone count >= 98% of the VM objects zone count? */
4981 if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
4982 largest_zone = vm_map_entry_zone;
4983 printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
4984 (uintptr_t)zone_size_wired(largest_zone));
4985 }
4986 }
4987
4988 /* TODO: Extend this to check for the largest process in other zones as well. */
4989 if (largest_zone == vm_map_entry_zone) {
4990 pid = find_largest_process_vm_map_entries();
4991 } else {
4992 printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
4993 "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
4994 largest_zone->z_name);
4995 }
4996 if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
4997 printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
4998 }
4999
5000 return largest_zone;
5001 }
5002
5003 #endif /* !ZALLOC_TEST */
5004 #pragma mark probabilistic gzalloc
5005 #if !ZALLOC_TEST
5006 #if CONFIG_PROB_GZALLOC
5007
5008 extern uint32_t random(void);
5009 struct pgz_backtrace {
5010 uint32_t pgz_depth;
5011 int32_t pgz_bt[MAX_ZTRACE_DEPTH];
5012 };
5013
5014 static int32_t PERCPU_DATA(pgz_sample_counter);
5015 static SECURITY_READ_ONLY_LATE(struct pgz_backtrace *) pgz_backtraces;
5016 static uint32_t pgz_uses; /* number of zones using PGZ */
5017 static int32_t pgz_slot_avail;
5018 #if OS_ATOMIC_HAS_LLSC
5019 struct zone_page_metadata *pgz_slot_head;
5020 #else
5021 static struct pgz_slot_head {
5022 uint32_t psh_count;
5023 uint32_t psh_slot;
5024 } pgz_slot_head;
5025 #endif
5026 struct zone_page_metadata *pgz_slot_tail;
5027 static SECURITY_READ_ONLY_LATE(vm_map_t) pgz_submap;
5028
5029 static struct zone_page_metadata *
pgz_meta(uint32_t index)5030 pgz_meta(uint32_t index)
5031 {
5032 return &zone_info.zi_pgz_meta[2 * index + 1];
5033 }
5034
5035 static struct pgz_backtrace *
pgz_bt(uint32_t slot,bool free)5036 pgz_bt(uint32_t slot, bool free)
5037 {
5038 return &pgz_backtraces[2 * slot + free];
5039 }
5040
5041 static void
pgz_backtrace(struct pgz_backtrace * bt,void * fp)5042 pgz_backtrace(struct pgz_backtrace *bt, void *fp)
5043 {
5044 struct backtrace_control ctl = {
5045 .btc_frame_addr = (uintptr_t)fp,
5046 };
5047
5048 bt->pgz_depth = (uint32_t)backtrace_packed(BTP_KERN_OFFSET_32,
5049 (uint8_t *)bt->pgz_bt, sizeof(bt->pgz_bt), &ctl, NULL) / 4;
5050 }
5051
5052 static uint32_t
pgz_slot(vm_offset_t addr)5053 pgz_slot(vm_offset_t addr)
5054 {
5055 return (uint32_t)((addr - zone_info.zi_pgz_range.min_address) >> (PAGE_SHIFT + 1));
5056 }
5057
5058 static vm_offset_t
pgz_addr(uint32_t slot)5059 pgz_addr(uint32_t slot)
5060 {
5061 return zone_info.zi_pgz_range.min_address + ptoa(2 * slot + 1);
5062 }
5063
5064 static bool
pgz_sample(vm_offset_t addr,vm_size_t esize)5065 pgz_sample(vm_offset_t addr, vm_size_t esize)
5066 {
5067 int32_t *counterp, cnt;
5068
5069 if (zone_addr_size_crosses_page(addr, esize)) {
5070 return false;
5071 }
5072
5073 /*
5074 * Note: accessing pgz_sample_counter is racy but this is
5075 * kind of acceptable given that this is not
5076 * a security load bearing feature.
5077 */
5078
5079 counterp = PERCPU_GET(pgz_sample_counter);
5080 cnt = *counterp;
5081 if (__probable(cnt > 0)) {
5082 *counterp = cnt - 1;
5083 return false;
5084 }
5085
5086 if (pgz_slot_avail <= 0) {
5087 return false;
5088 }
5089
5090 /*
5091 * zalloc_random_uniform() might block, so when preemption is disabled,
5092 * set the counter to `-1` which will cause the next allocation
5093 * that can block to generate a new random value.
5094 *
5095 * No allocation on this CPU will sample until then.
5096 */
5097 if (get_preemption_level()) {
5098 *counterp = -1;
5099 } else {
5100 *counterp = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5101 }
5102
5103 return cnt == 0;
5104 }
5105
5106 static inline bool
pgz_slot_alloc(uint32_t * slot)5107 pgz_slot_alloc(uint32_t *slot)
5108 {
5109 struct zone_page_metadata *m;
5110 uint32_t tries = 100;
5111
5112 disable_preemption();
5113
5114 #if OS_ATOMIC_USE_LLSC
5115 int32_t ov, nv;
5116 os_atomic_rmw_loop(&pgz_slot_avail, ov, nv, relaxed, {
5117 if (__improbable(ov <= 0)) {
5118 os_atomic_rmw_loop_give_up({
5119 enable_preemption();
5120 return false;
5121 });
5122 }
5123 nv = ov - 1;
5124 });
5125 #else
5126 if (__improbable(os_atomic_dec_orig(&pgz_slot_avail, relaxed) <= 0)) {
5127 os_atomic_inc(&pgz_slot_avail, relaxed);
5128 enable_preemption();
5129 return false;
5130 }
5131 #endif
5132
5133 again:
5134 if (__improbable(tries-- == 0)) {
5135 /*
5136 * Too much contention,
5137 * extremely unlikely but do not stay stuck.
5138 */
5139 os_atomic_inc(&pgz_slot_avail, relaxed);
5140 enable_preemption();
5141 return false;
5142 }
5143
5144 #if OS_ATOMIC_HAS_LLSC
5145 do {
5146 m = os_atomic_load_exclusive(&pgz_slot_head, dependency);
5147 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5148 /*
5149 * Either we are waiting for an enqueuer (unlikely)
5150 * or we are competing with another core and
5151 * are looking at a popped element.
5152 */
5153 os_atomic_clear_exclusive();
5154 goto again;
5155 }
5156 } while (!os_atomic_store_exclusive(&pgz_slot_head,
5157 m->zm_pgz_slot_next, relaxed));
5158 #else
5159 struct zone_page_metadata *base = zone_info.zi_pgz_meta;
5160 struct pgz_slot_head ov, nv;
5161 os_atomic_rmw_loop(&pgz_slot_head, ov, nv, dependency, {
5162 m = &base[ov.psh_slot * 2];
5163 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5164 /*
5165 * Either we are waiting for an enqueuer (unlikely)
5166 * or we are competing with another core and
5167 * are looking at a popped element.
5168 */
5169 os_atomic_rmw_loop_give_up(goto again);
5170 }
5171 nv.psh_count = ov.psh_count + 1;
5172 nv.psh_slot = (uint32_t)((m->zm_pgz_slot_next - base) / 2);
5173 });
5174 #endif
5175
5176 enable_preemption();
5177
5178 m->zm_pgz_slot_next = NULL;
5179 *slot = (uint32_t)((m - zone_info.zi_pgz_meta) / 2);
5180 return true;
5181 }
5182
5183 static inline bool
pgz_slot_free(uint32_t slot)5184 pgz_slot_free(uint32_t slot)
5185 {
5186 struct zone_page_metadata *m = &zone_info.zi_pgz_meta[2 * slot];
5187 struct zone_page_metadata *t;
5188
5189 disable_preemption();
5190 t = os_atomic_xchg(&pgz_slot_tail, m, relaxed);
5191 os_atomic_store(&t->zm_pgz_slot_next, m, release);
5192 os_atomic_inc(&pgz_slot_avail, relaxed);
5193 enable_preemption();
5194
5195 return true;
5196 }
5197
5198 /*!
5199 * @function pgz_protect()
5200 *
5201 * @brief
5202 * Try to protect an allocation with PGZ.
5203 *
5204 * @param zone The zone the allocation was made against.
5205 * @param addr An allocated element address to protect.
5206 * @param fp The caller frame pointer (for the backtrace).
5207 * @returns The new address for the element, or @c addr.
5208 */
5209 __attribute__((noinline))
5210 static vm_offset_t
pgz_protect(zone_t zone,vm_offset_t addr,void * fp)5211 pgz_protect(zone_t zone, vm_offset_t addr, void *fp)
5212 {
5213 kern_return_t kr;
5214 uint32_t slot;
5215
5216 if (!pgz_slot_alloc(&slot)) {
5217 return addr;
5218 }
5219
5220 /*
5221 * Try to double-map the page (may fail if Z_NOWAIT).
5222 * we will always find a PA because pgz_init() pre-expanded the pmap.
5223 */
5224 vm_offset_t new_addr = pgz_addr(slot);
5225 pmap_paddr_t pa = kvtophys(trunc_page(addr));
5226
5227 kr = pmap_enter_options_addr(kernel_pmap, new_addr, pa,
5228 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
5229 get_preemption_level() ? PMAP_OPTIONS_NOWAIT : 0, NULL);
5230
5231 if (__improbable(kr != KERN_SUCCESS)) {
5232 pgz_slot_free(slot);
5233 return addr;
5234 }
5235
5236 struct zone_page_metadata tmp = {
5237 .zm_chunk_len = ZM_PGZ_ALLOCATED,
5238 .zm_index = zone_index(zone),
5239 };
5240 struct zone_page_metadata *meta = pgz_meta(slot);
5241
5242 os_atomic_store(&meta->zm_bits, tmp.zm_bits, relaxed);
5243 os_atomic_store(&meta->zm_pgz_orig_addr, addr, relaxed);
5244 pgz_backtrace(pgz_bt(slot, false), fp);
5245
5246 return new_addr + (addr & PAGE_MASK);
5247 }
5248
5249 /*!
5250 * @function pgz_unprotect()
5251 *
5252 * @brief
5253 * Release a PGZ slot and returns the original address of a freed element.
5254 *
5255 * @param addr A PGZ protected element address.
5256 * @param fp The caller frame pointer (for the backtrace).
5257 * @returns The non protected address for the element
5258 * that was passed to @c pgz_protect().
5259 */
5260 __attribute__((noinline))
5261 static vm_offset_t
pgz_unprotect(vm_offset_t addr,void * fp)5262 pgz_unprotect(vm_offset_t addr, void *fp)
5263 {
5264 struct zone_page_metadata *meta;
5265 struct zone_page_metadata tmp;
5266 uint32_t slot;
5267
5268 slot = pgz_slot(addr);
5269 meta = zone_meta_from_addr(addr);
5270 tmp = *meta;
5271 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5272 goto double_free;
5273 }
5274
5275 pmap_remove(kernel_pmap, trunc_page(addr), trunc_page(addr) + PAGE_SIZE);
5276
5277 pgz_backtrace(pgz_bt(slot, true), fp);
5278
5279 tmp.zm_chunk_len = ZM_PGZ_FREE;
5280 tmp.zm_bits = os_atomic_xchg(&meta->zm_bits, tmp.zm_bits, relaxed);
5281 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5282 goto double_free;
5283 }
5284
5285 pgz_slot_free(slot);
5286 return tmp.zm_pgz_orig_addr;
5287
5288 double_free:
5289 panic_fault_address = addr;
5290 meta->zm_chunk_len = ZM_PGZ_DOUBLE_FREE;
5291 panic("probabilistic gzalloc double free: %p", (void *)addr);
5292 }
5293
5294 bool
pgz_owned(mach_vm_address_t addr)5295 pgz_owned(mach_vm_address_t addr)
5296 {
5297 #if CONFIG_KERNEL_TBI
5298 addr = VM_KERNEL_TBI_FILL(addr);
5299 #endif /* CONFIG_KERNEL_TBI */
5300
5301 return mach_vm_range_contains(&zone_info.zi_pgz_range, addr);
5302 }
5303
5304
5305 __attribute__((always_inline))
5306 vm_offset_t
__pgz_decode(mach_vm_address_t addr,mach_vm_size_t size)5307 __pgz_decode(mach_vm_address_t addr, mach_vm_size_t size)
5308 {
5309 struct zone_page_metadata *meta;
5310
5311 if (__probable(!pgz_owned(addr))) {
5312 return (vm_offset_t)addr;
5313 }
5314
5315 if (zone_addr_size_crosses_page(addr, size)) {
5316 panic("invalid size for PGZ protected address %p:%p",
5317 (void *)addr, (void *)(addr + size));
5318 }
5319
5320 meta = zone_meta_from_addr((vm_offset_t)addr);
5321 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5322 panic_fault_address = (vm_offset_t)addr;
5323 panic("probabilistic gzalloc use-after-free: %p", (void *)addr);
5324 }
5325
5326 return trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5327 }
5328
5329 __attribute__((always_inline))
5330 vm_offset_t
__pgz_decode_allow_invalid(vm_offset_t addr,zone_id_t zid)5331 __pgz_decode_allow_invalid(vm_offset_t addr, zone_id_t zid)
5332 {
5333 struct zone_page_metadata *meta;
5334 struct zone_page_metadata tmp;
5335
5336 if (__probable(!pgz_owned(addr))) {
5337 return addr;
5338 }
5339
5340 meta = zone_meta_from_addr(addr);
5341 tmp.zm_bits = os_atomic_load(&meta->zm_bits, relaxed);
5342
5343 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5344
5345 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5346 return 0;
5347 }
5348
5349 if (zid != ZONE_ID_ANY && tmp.zm_index != zid) {
5350 return 0;
5351 }
5352
5353 return addr;
5354 }
5355
5356 static void
pgz_zone_init(zone_t z)5357 pgz_zone_init(zone_t z)
5358 {
5359 char zn[MAX_ZONE_NAME];
5360 char zv[MAX_ZONE_NAME];
5361 char key[30];
5362
5363 if (zone_elem_inner_size(z) > PAGE_SIZE) {
5364 return;
5365 }
5366
5367 if (pgz_all) {
5368 os_atomic_inc(&pgz_uses, relaxed);
5369 z->z_pgz_tracked = true;
5370 return;
5371 }
5372
5373 snprintf(zn, sizeof(zn), "%s%s", zone_heap_name(z), zone_name(z));
5374
5375 for (int i = 1;; i++) {
5376 snprintf(key, sizeof(key), "pgz%d", i);
5377 if (!PE_parse_boot_argn(key, zv, sizeof(zv))) {
5378 break;
5379 }
5380 if (track_this_zone(zn, zv) || track_kalloc_zones(z, zv)) {
5381 os_atomic_inc(&pgz_uses, relaxed);
5382 z->z_pgz_tracked = true;
5383 break;
5384 }
5385 }
5386 }
5387
5388 __startup_func
5389 static vm_size_t
pgz_get_size(void)5390 pgz_get_size(void)
5391 {
5392 if (pgz_slots == UINT32_MAX) {
5393 /*
5394 * Scale with RAM size: ~200 slots a G
5395 */
5396 pgz_slots = (uint32_t)(sane_size >> 22);
5397 }
5398
5399 /*
5400 * Make sure that the slot allocation scheme works.
5401 * see pgz_slot_alloc() / pgz_slot_free();
5402 */
5403 if (pgz_slots < zpercpu_count() * 4) {
5404 pgz_slots = zpercpu_count() * 4;
5405 }
5406 if (pgz_slots >= UINT16_MAX) {
5407 pgz_slots = UINT16_MAX - 1;
5408 }
5409
5410 /*
5411 * Quarantine is 33% of slots by default, no more than 90%.
5412 */
5413 if (pgz_quarantine == 0) {
5414 pgz_quarantine = pgz_slots / 3;
5415 }
5416 if (pgz_quarantine > pgz_slots * 9 / 10) {
5417 pgz_quarantine = pgz_slots * 9 / 10;
5418 }
5419 pgz_slot_avail = pgz_slots - pgz_quarantine;
5420
5421 return ptoa(2 * pgz_slots + 1);
5422 }
5423
5424 __startup_func
5425 static void
pgz_init(void)5426 pgz_init(void)
5427 {
5428 if (!pgz_uses) {
5429 return;
5430 }
5431
5432 if (pgz_sample_rate == 0) {
5433 /*
5434 * If no rate was provided, pick a random one that scales
5435 * with the number of protected zones.
5436 *
5437 * Use a binomal distribution to avoid having too many
5438 * really fast sample rates.
5439 */
5440 uint32_t factor = MIN(pgz_uses, 10);
5441 uint32_t max_rate = 1000 * factor;
5442 uint32_t min_rate = 100 * factor;
5443
5444 pgz_sample_rate = (zalloc_random_uniform32(min_rate, max_rate) +
5445 zalloc_random_uniform32(min_rate, max_rate)) / 2;
5446 }
5447
5448 struct mach_vm_range *r = &zone_info.zi_pgz_range;
5449 zone_info.zi_pgz_meta = zone_meta_from_addr(r->min_address);
5450 zone_meta_populate(r->min_address, mach_vm_range_size(r));
5451
5452 for (size_t i = 0; i < 2 * pgz_slots + 1; i += 2) {
5453 zone_info.zi_pgz_meta[i].zm_chunk_len = ZM_PGZ_GUARD;
5454 }
5455
5456 for (size_t i = 1; i < pgz_slots; i++) {
5457 zone_info.zi_pgz_meta[2 * i - 1].zm_pgz_slot_next =
5458 &zone_info.zi_pgz_meta[2 * i + 1];
5459 }
5460 #if OS_ATOMIC_HAS_LLSC
5461 pgz_slot_head = &zone_info.zi_pgz_meta[1];
5462 #endif
5463 pgz_slot_tail = &zone_info.zi_pgz_meta[2 * pgz_slots - 1];
5464
5465 pgz_backtraces = zalloc_permanent(sizeof(struct pgz_backtrace) *
5466 2 * pgz_slots, ZALIGN_PTR);
5467
5468 /*
5469 * expand the pmap so that pmap_enter_options_addr()
5470 * in pgz_protect() never need to call pmap_expand().
5471 */
5472 for (uint32_t slot = 0; slot < pgz_slots; slot++) {
5473 (void)pmap_enter_options_addr(kernel_pmap, pgz_addr(slot), 0,
5474 VM_PROT_NONE, VM_PROT_NONE, 0, FALSE,
5475 PMAP_OPTIONS_NOENTER, NULL);
5476 }
5477
5478 /* do this last as this will enable pgz */
5479 percpu_foreach(counter, pgz_sample_counter) {
5480 *counter = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5481 }
5482 }
5483 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, pgz_init);
5484
5485 static void
panic_display_pgz_bt(bool has_syms,uint32_t slot,bool free)5486 panic_display_pgz_bt(bool has_syms, uint32_t slot, bool free)
5487 {
5488 struct pgz_backtrace *bt = pgz_bt(slot, free);
5489 const char *what = free ? "Free" : "Allocation";
5490 uintptr_t buf[MAX_ZTRACE_DEPTH];
5491
5492 if (!ml_validate_nofault((vm_offset_t)bt, sizeof(*bt))) {
5493 paniclog_append_noflush(" Can't decode %s Backtrace\n", what);
5494 return;
5495 }
5496
5497 backtrace_unpack(BTP_KERN_OFFSET_32, buf, MAX_ZTRACE_DEPTH,
5498 (uint8_t *)bt->pgz_bt, 4 * bt->pgz_depth);
5499
5500 paniclog_append_noflush(" %s Backtrace:\n", what);
5501 for (uint32_t i = 0; i < bt->pgz_depth && i < MAX_ZTRACE_DEPTH; i++) {
5502 if (has_syms) {
5503 paniclog_append_noflush(" %p ", (void *)buf[i]);
5504 panic_print_symbol_name(buf[i]);
5505 paniclog_append_noflush("\n");
5506 } else {
5507 paniclog_append_noflush(" %p\n", (void *)buf[i]);
5508 }
5509 }
5510 kmod_panic_dump((vm_offset_t *)buf, bt->pgz_depth);
5511 }
5512
5513 static void
panic_display_pgz_uaf_info(bool has_syms,vm_offset_t addr)5514 panic_display_pgz_uaf_info(bool has_syms, vm_offset_t addr)
5515 {
5516 struct zone_page_metadata *meta;
5517 vm_offset_t elem, esize;
5518 const char *type;
5519 const char *prob;
5520 uint32_t slot;
5521 zone_t z;
5522
5523 slot = pgz_slot(addr);
5524 meta = pgz_meta(slot);
5525 elem = pgz_addr(slot) + (meta->zm_pgz_orig_addr & PAGE_MASK);
5526
5527 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
5528
5529 if (ml_validate_nofault((vm_offset_t)meta, sizeof(*meta)) &&
5530 meta->zm_index &&
5531 meta->zm_index < os_atomic_load(&num_zones, relaxed)) {
5532 z = &zone_array[meta->zm_index];
5533 } else {
5534 paniclog_append_noflush(" Zone : <unknown>\n");
5535 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5536 paniclog_append_noflush("\n");
5537 return;
5538 }
5539
5540 esize = zone_elem_inner_size(z);
5541 paniclog_append_noflush(" Zone : %s%s\n",
5542 zone_heap_name(z), zone_name(z));
5543 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5544 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
5545 (void *)elem, (void *)(elem + esize), (uint32_t)esize);
5546
5547 if (addr < elem) {
5548 type = "out-of-bounds(underflow) + use-after-free";
5549 prob = "low";
5550 } else if (meta->zm_chunk_len == ZM_PGZ_DOUBLE_FREE) {
5551 type = "double-free";
5552 prob = "high";
5553 } else if (addr < elem + esize) {
5554 type = "use-after-free";
5555 prob = "high";
5556 } else if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5557 type = "out-of-bounds + use-after-free";
5558 prob = "low";
5559 } else {
5560 type = "out-of-bounds";
5561 prob = "high";
5562 }
5563 paniclog_append_noflush(" Kind : %s (%s confidence)\n",
5564 type, prob);
5565 if (addr < elem) {
5566 paniclog_append_noflush(" Access : %d byte(s) before\n",
5567 (uint32_t)(elem - addr) + 1);
5568 } else if (addr < elem + esize) {
5569 paniclog_append_noflush(" Access : %d byte(s) inside\n",
5570 (uint32_t)(addr - elem) + 1);
5571 } else {
5572 paniclog_append_noflush(" Access : %d byte(s) past\n",
5573 (uint32_t)(addr - (elem + esize)) + 1);
5574 }
5575
5576 panic_display_pgz_bt(has_syms, slot, false);
5577 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5578 panic_display_pgz_bt(has_syms, slot, true);
5579 }
5580
5581 paniclog_append_noflush("\n");
5582 }
5583
5584 #endif /* CONFIG_PROB_GZALLOC */
5585 #endif /* !ZALLOC_TEST */
5586 #pragma mark zfree
5587 #if !ZALLOC_TEST
5588
5589 /*!
5590 * @defgroup zfree
5591 * @{
5592 *
5593 * @brief
5594 * The codepath for zone frees.
5595 *
5596 * @discussion
5597 * There are 4 major ways to allocate memory that end up in the zone allocator:
5598 * - @c zfree()
5599 * - @c zfree_percpu()
5600 * - @c kfree*()
5601 * - @c zfree_permanent()
5602 *
5603 * While permanent zones have their own allocation scheme, all other codepaths
5604 * will eventually go through the @c zfree_ext() choking point.
5605 */
5606
5607 __header_always_inline void
zfree_drop(zone_t zone,vm_offset_t addr)5608 zfree_drop(zone_t zone, vm_offset_t addr)
5609 {
5610 vm_offset_t esize = zone_elem_outer_size(zone);
5611 struct zone_page_metadata *meta;
5612 vm_offset_t eidx;
5613
5614 meta = zone_element_resolve(zone, addr, &eidx);
5615
5616 if (!zone_meta_mark_free(meta, eidx)) {
5617 zone_meta_double_free_panic(zone, addr, __func__);
5618 }
5619
5620 vm_offset_t old_size = meta->zm_alloc_size;
5621 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5622 vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
5623
5624 if (new_size == 0) {
5625 /* whether the page was on the intermediate or all_used, queue, move it to free */
5626 zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
5627 zone->z_wired_empty += meta->zm_chunk_len;
5628 } else if (old_size + esize > max_size) {
5629 /* first free element on page, move from all_used */
5630 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5631 }
5632 }
5633
5634 __attribute__((noinline))
5635 static void
zfree_item(zone_t zone,vm_offset_t addr)5636 zfree_item(zone_t zone, vm_offset_t addr)
5637 {
5638 /* transfer preemption count to lock */
5639 zone_lock_nopreempt_check_contention(zone);
5640
5641 zfree_drop(zone, addr);
5642 zone->z_elems_free += 1;
5643
5644 zone_unlock(zone);
5645 }
5646
5647 static void
zfree_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache)5648 zfree_cached_depot_recirculate(
5649 zone_t zone,
5650 uint32_t depot_max,
5651 zone_cache_t cache)
5652 {
5653 smr_t smr = zone_cache_smr(cache);
5654 smr_seq_t seq;
5655 uint32_t n;
5656
5657 zone_recirc_lock_nopreempt_check_contention(zone);
5658
5659 n = cache->zc_depot.zd_full;
5660 if (n >= depot_max) {
5661 /*
5662 * If SMR is in use, rotate the entire chunk of magazines.
5663 *
5664 * If the head of the recirculation layer is ready to be
5665 * reused, pull them back to refill a little.
5666 */
5667 seq = zone_depot_move_full(&zone->z_recirc,
5668 &cache->zc_depot, smr ? n : n - depot_max / 2, NULL);
5669
5670 if (smr) {
5671 smr_deferred_advance_commit(smr, seq);
5672 if (depot_max > 1 && zone_depot_poll(&zone->z_recirc, smr)) {
5673 zone_depot_move_full(&cache->zc_depot,
5674 &zone->z_recirc, depot_max / 2, NULL);
5675 }
5676 }
5677 }
5678
5679 n = depot_max - cache->zc_depot.zd_full;
5680 if (n > zone->z_recirc.zd_empty) {
5681 n = zone->z_recirc.zd_empty;
5682 }
5683 if (n) {
5684 zone_depot_move_empty(&cache->zc_depot, &zone->z_recirc,
5685 n, zone);
5686 }
5687
5688 zone_recirc_unlock_nopreempt(zone);
5689 }
5690
5691 static zone_cache_t
zfree_cached_recirculate(zone_t zone,zone_cache_t cache)5692 zfree_cached_recirculate(zone_t zone, zone_cache_t cache)
5693 {
5694 zone_magazine_t mag = NULL, tmp = NULL;
5695 smr_t smr = zone_cache_smr(cache);
5696
5697 if (zone->z_recirc.zd_empty == 0) {
5698 mag = zone_magazine_alloc(Z_NOWAIT);
5699 }
5700
5701 zone_recirc_lock_nopreempt_check_contention(zone);
5702
5703 if (mag == NULL && zone->z_recirc.zd_empty) {
5704 mag = zone_depot_pop_head_empty(&zone->z_recirc, zone);
5705 __builtin_assume(mag);
5706 }
5707 if (mag) {
5708 tmp = zone_magazine_replace(cache, mag, true);
5709 if (smr) {
5710 smr_deferred_advance_commit(smr, tmp->zm_seq);
5711 }
5712 if (zone_security_array[zone_index(zone)].z_lifo) {
5713 zone_depot_insert_head_full(&zone->z_recirc, tmp);
5714 } else {
5715 zone_depot_insert_tail_full(&zone->z_recirc, tmp);
5716 }
5717 }
5718
5719 zone_recirc_unlock_nopreempt(zone);
5720
5721 return mag ? cache : NULL;
5722 }
5723
5724 __attribute__((noinline))
5725 static zone_cache_t
zfree_cached_trim(zone_t zone,zone_cache_t cache)5726 zfree_cached_trim(zone_t zone, zone_cache_t cache)
5727 {
5728 zone_magazine_t mag = NULL, tmp = NULL;
5729 uint32_t depot_max;
5730
5731 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
5732 if (depot_max) {
5733 zone_depot_lock_nopreempt(cache);
5734
5735 if (cache->zc_depot.zd_empty == 0) {
5736 zfree_cached_depot_recirculate(zone, depot_max, cache);
5737 }
5738
5739 if (__probable(cache->zc_depot.zd_empty)) {
5740 mag = zone_depot_pop_head_empty(&cache->zc_depot, NULL);
5741 __builtin_assume(mag);
5742 } else {
5743 mag = zone_magazine_alloc(Z_NOWAIT);
5744 }
5745 if (mag) {
5746 tmp = zone_magazine_replace(cache, mag, true);
5747 zone_depot_insert_tail_full(&cache->zc_depot, tmp);
5748 }
5749 zone_depot_unlock_nopreempt(cache);
5750
5751 return mag ? cache : NULL;
5752 }
5753
5754 return zfree_cached_recirculate(zone, cache);
5755 }
5756
5757 __attribute__((always_inline))
5758 static inline zone_cache_t
zfree_cached_get_pcpu_cache(zone_t zone,int cpu)5759 zfree_cached_get_pcpu_cache(zone_t zone, int cpu)
5760 {
5761 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5762
5763 if (__probable(cache->zc_free_cur < zc_mag_size())) {
5764 return cache;
5765 }
5766
5767 if (__probable(cache->zc_alloc_cur < zc_mag_size())) {
5768 zone_cache_swap_magazines(cache);
5769 return cache;
5770 }
5771
5772 return zfree_cached_trim(zone, cache);
5773 }
5774
5775 __attribute__((always_inline))
5776 static inline zone_cache_t
zfree_cached_get_pcpu_cache_smr(zone_t zone,int cpu)5777 zfree_cached_get_pcpu_cache_smr(zone_t zone, int cpu)
5778 {
5779 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5780 size_t idx = cache->zc_free_cur;
5781
5782 if (__probable(idx + 1 < zc_mag_size())) {
5783 return cache;
5784 }
5785
5786 /*
5787 * when SMR is in use, the bucket is tagged early with
5788 * @c smr_deferred_advance(), which costs a full barrier,
5789 * but performs no store.
5790 *
5791 * When zones hit the recirculation layer, the advance is commited,
5792 * under the recirculation lock (see zfree_cached_recirculate()).
5793 *
5794 * When done this way, the zone contention detection mechanism
5795 * will adjust the size of the per-cpu depots gracefully, which
5796 * mechanically reduces the pace of these commits as usage increases.
5797 */
5798
5799 if (__probable(idx + 1 == zc_mag_size())) {
5800 zone_magazine_t mag;
5801
5802 mag = (zone_magazine_t)((uintptr_t)cache->zc_free_elems -
5803 offsetof(struct zone_magazine, zm_elems));
5804 mag->zm_seq = smr_deferred_advance(zone_cache_smr(cache));
5805 return cache;
5806 }
5807
5808 return zfree_cached_trim(zone, cache);
5809 }
5810
5811 __attribute__((always_inline))
5812 static inline vm_offset_t
__zcache_mark_invalid(zone_t zone,vm_offset_t elem,uint64_t combined_size)5813 __zcache_mark_invalid(zone_t zone, vm_offset_t elem, uint64_t combined_size)
5814 {
5815 struct zone_page_metadata *meta;
5816 vm_offset_t offs;
5817
5818 #pragma unused(combined_size)
5819 #if CONFIG_PROB_GZALLOC
5820 if (__improbable(pgz_owned(elem))) {
5821 elem = pgz_unprotect(elem, __builtin_frame_address(0));
5822 }
5823 #endif /* CONFIG_PROB_GZALLOC */
5824
5825 meta = zone_meta_from_addr(elem);
5826 if (!from_zone_map(elem, 1) || !zone_has_index(zone, meta->zm_index)) {
5827 zone_invalid_element_panic(zone, elem);
5828 }
5829
5830 offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
5831 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
5832 offs += ptoa(meta->zm_page_index);
5833 }
5834
5835 if (!Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
5836 zone_invalid_element_panic(zone, elem);
5837 }
5838
5839 #if VM_TAG_SIZECLASSES
5840 if (__improbable(zone->z_uses_tags)) {
5841 vm_tag_t *slot;
5842
5843 slot = zba_extra_ref_ptr(meta->zm_bitmap,
5844 Z_FAST_QUO(offs, zone->z_quo_magic));
5845 vm_tag_update_zone_size(*slot, zone->z_tags_sizeclass,
5846 -(long)ZFREE_ELEM_SIZE(combined_size));
5847 *slot = VM_KERN_MEMORY_NONE;
5848 }
5849 #endif /* VM_TAG_SIZECLASSES */
5850
5851 #if KASAN_CLASSIC
5852 kasan_free(elem, ZFREE_ELEM_SIZE(combined_size),
5853 ZFREE_USER_SIZE(combined_size), zone_elem_redzone(zone),
5854 zone->z_percpu, __builtin_frame_address(0));
5855 #endif
5856 #if KASAN_TBI
5857 elem = kasan_tbi_tag_zfree(elem, ZFREE_ELEM_SIZE(combined_size),
5858 zone->z_percpu);
5859 #endif
5860
5861 return elem;
5862 }
5863
5864 __attribute__((always_inline))
vm_offset_t(zcache_mark_invalid)5865 vm_offset_t
5866 (zcache_mark_invalid)(zone_t zone, vm_offset_t elem)
5867 {
5868 vm_size_t esize = zone_elem_inner_offs(zone);
5869
5870 ZFREE_LOG(zone, elem, 1);
5871 return __zcache_mark_invalid(zone, elem, ZFREE_PACK_SIZE(esize, esize));
5872 }
5873
5874 /*
5875 * The function is noinline when zlog can be used so that the backtracing can
5876 * reliably skip the zfree_ext() and zfree_log()
5877 * boring frames.
5878 */
5879 #if ZALLOC_ENABLE_LOGGING
5880 __attribute__((noinline))
5881 #endif /* ZALLOC_ENABLE_LOGGING */
5882 void
zfree_ext(zone_t zone,zone_stats_t zstats,void * addr,uint64_t combined_size)5883 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr, uint64_t combined_size)
5884 {
5885 vm_offset_t esize = ZFREE_ELEM_SIZE(combined_size);
5886 vm_offset_t elem = (vm_offset_t)addr;
5887 int cpu;
5888
5889 DTRACE_VM2(zfree, zone_t, zone, void*, elem);
5890
5891 ZFREE_LOG(zone, elem, 1);
5892 elem = __zcache_mark_invalid(zone, elem, combined_size);
5893
5894 disable_preemption();
5895 cpu = cpu_number();
5896 zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += esize;
5897
5898 #if KASAN_CLASSIC
5899 if (zone->z_kasan_quarantine && startup_phase >= STARTUP_SUB_ZALLOC) {
5900 struct kasan_quarantine_result kqr;
5901
5902 kqr = kasan_quarantine(elem, esize);
5903 elem = kqr.addr;
5904 zone = kqr.zone;
5905 if (elem == 0) {
5906 return enable_preemption();
5907 }
5908 }
5909 #endif
5910
5911 if (zone->z_pcpu_cache) {
5912 zone_cache_t cache = zfree_cached_get_pcpu_cache(zone, cpu);
5913
5914 if (__probable(cache)) {
5915 cache->zc_free_elems[cache->zc_free_cur++] = elem;
5916 return enable_preemption();
5917 }
5918 }
5919
5920 return zfree_item(zone, elem);
5921 }
5922
5923 __attribute__((always_inline))
5924 static inline zstack_t
zcache_free_stack_to_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,vm_size_t esize,zone_cache_ops_t ops,bool zero)5925 zcache_free_stack_to_cpu(
5926 zone_id_t zid,
5927 zone_cache_t cache,
5928 zstack_t stack,
5929 vm_size_t esize,
5930 zone_cache_ops_t ops,
5931 bool zero)
5932 {
5933 size_t n = MIN(zc_mag_size() - cache->zc_free_cur, stack.z_count);
5934 vm_offset_t *p;
5935
5936 stack.z_count -= n;
5937 cache->zc_free_cur += n;
5938 p = cache->zc_free_elems + cache->zc_free_cur;
5939
5940 do {
5941 void *o = zstack_pop_no_delta(&stack);
5942
5943 if (ops) {
5944 o = ops->zc_op_mark_invalid(zid, o);
5945 } else {
5946 if (zero) {
5947 bzero(o, esize);
5948 }
5949 o = (void *)__zcache_mark_invalid(zone_by_id(zid),
5950 (vm_offset_t)o, ZFREE_PACK_SIZE(esize, esize));
5951 }
5952 *--p = (vm_offset_t)o;
5953 } while (--n > 0);
5954
5955 return stack;
5956 }
5957
5958 __attribute__((always_inline))
5959 static inline void
zcache_free_1_ext(zone_id_t zid,void * addr,zone_cache_ops_t ops)5960 zcache_free_1_ext(zone_id_t zid, void *addr, zone_cache_ops_t ops)
5961 {
5962 vm_offset_t elem = (vm_offset_t)addr;
5963 zone_cache_t cache;
5964 vm_size_t esize;
5965 zone_t zone = zone_by_id(zid);
5966 int cpu;
5967
5968 ZFREE_LOG(zone, elem, 1);
5969
5970 disable_preemption();
5971 cpu = cpu_number();
5972 esize = zone_elem_inner_size(zone);
5973 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
5974 if (!ops) {
5975 addr = (void *)__zcache_mark_invalid(zone, elem,
5976 ZFREE_PACK_SIZE(esize, esize));
5977 }
5978 cache = zfree_cached_get_pcpu_cache(zone, cpu);
5979 if (__probable(cache)) {
5980 if (ops) {
5981 addr = ops->zc_op_mark_invalid(zid, addr);
5982 }
5983 cache->zc_free_elems[cache->zc_free_cur++] = elem;
5984 enable_preemption();
5985 } else if (ops) {
5986 enable_preemption();
5987 os_atomic_dec(&zone_by_id(zid)->z_elems_avail, relaxed);
5988 ops->zc_op_free(zid, addr);
5989 } else {
5990 zfree_item(zone, elem);
5991 }
5992 }
5993
5994 __attribute__((always_inline))
5995 static inline void
zcache_free_n_ext(zone_id_t zid,zstack_t stack,zone_cache_ops_t ops,bool zero)5996 zcache_free_n_ext(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops, bool zero)
5997 {
5998 zone_t zone = zone_by_id(zid);
5999 zone_cache_t cache;
6000 vm_size_t esize;
6001 int cpu;
6002
6003 ZFREE_LOG(zone, stack.z_head, stack.z_count);
6004
6005 disable_preemption();
6006 cpu = cpu_number();
6007 esize = zone_elem_inner_size(zone);
6008 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed +=
6009 stack.z_count * esize;
6010
6011 for (;;) {
6012 cache = zfree_cached_get_pcpu_cache(zone, cpu);
6013 if (__probable(cache)) {
6014 stack = zcache_free_stack_to_cpu(zid, cache,
6015 stack, esize, ops, zero);
6016 enable_preemption();
6017 } else if (ops) {
6018 enable_preemption();
6019 os_atomic_dec(&zone->z_elems_avail, relaxed);
6020 ops->zc_op_free(zid, zstack_pop(&stack));
6021 } else {
6022 vm_offset_t addr = (vm_offset_t)zstack_pop(&stack);
6023
6024 if (zero) {
6025 bzero((void *)addr, esize);
6026 }
6027 addr = __zcache_mark_invalid(zone, addr,
6028 ZFREE_PACK_SIZE(esize, esize));
6029 zfree_item(zone, addr);
6030 }
6031
6032 if (stack.z_count == 0) {
6033 break;
6034 }
6035
6036 disable_preemption();
6037 cpu = cpu_number();
6038 }
6039 }
6040
6041 void
6042 (zcache_free)(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6043 {
6044 __builtin_assume(ops != NULL);
6045 zcache_free_1_ext(zid, addr, ops);
6046 }
6047
6048 void
6049 (zcache_free_n)(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops)
6050 {
6051 __builtin_assume(ops != NULL);
6052 zcache_free_n_ext(zid, stack, ops, false);
6053 }
6054
6055 void
6056 (zfree_n)(zone_id_t zid, zstack_t stack)
6057 {
6058 zcache_free_n_ext(zid, stack, NULL, true);
6059 }
6060
6061 void
6062 (zfree_nozero)(zone_id_t zid, void *addr)
6063 {
6064 zcache_free_1_ext(zid, addr, NULL);
6065 }
6066
6067 void
6068 (zfree_nozero_n)(zone_id_t zid, zstack_t stack)
6069 {
6070 zcache_free_n_ext(zid, stack, NULL, false);
6071 }
6072
6073 void
6074 (zfree)(zone_t zov, void *addr)
6075 {
6076 zone_t zone = zov->z_self;
6077 zone_stats_t zstats = zov->z_stats;
6078 vm_offset_t esize = zone_elem_inner_size(zone);
6079
6080 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6081 assert(!zone->z_percpu && !zone->z_permanent && !zone->z_smr);
6082 bzero(addr, esize);
6083 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6084 }
6085
6086 __attribute__((noinline))
6087 void
zfree_percpu(union zone_or_view zov,void * addr)6088 zfree_percpu(union zone_or_view zov, void *addr)
6089 {
6090 zone_t zone = zov.zov_view->zv_zone;
6091 zone_stats_t zstats = zov.zov_view->zv_stats;
6092 vm_offset_t esize = zone_elem_inner_size(zone);
6093
6094 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6095 assert(zone->z_percpu);
6096 addr = (void *)__zpcpu_demangle(addr);
6097 zpercpu_foreach_cpu(i) {
6098 bzero((char *)addr + ptoa(i), esize);
6099 }
6100 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6101 }
6102
6103 void
6104 (zfree_id)(zone_id_t zid, void *addr)
6105 {
6106 (zfree)(&zone_array[zid], addr);
6107 }
6108
6109 void
6110 (zfree_ro)(zone_id_t zid, void *addr)
6111 {
6112 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6113 zone_t zone = zone_by_id(zid);
6114 zone_stats_t zstats = zone->z_stats;
6115 vm_offset_t esize = zone_ro_size_params[zid].z_elem_size;
6116
6117 #if ZSECURITY_CONFIG(READ_ONLY)
6118 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6119 pmap_ro_zone_bzero(zid, (vm_offset_t)addr, 0, esize);
6120 #else
6121 (void)zid;
6122 bzero(addr, esize);
6123 #endif /* !KASAN_CLASSIC */
6124 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6125 }
6126
6127 __attribute__((noinline))
6128 static void
zfree_item_smr(zone_t zone,vm_offset_t addr)6129 zfree_item_smr(zone_t zone, vm_offset_t addr)
6130 {
6131 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, 0);
6132 vm_size_t esize = zone_elem_inner_size(zone);
6133
6134 /*
6135 * This should be taken extremely rarely:
6136 * this happens if we failed allocating an empty bucket.
6137 */
6138 smr_synchronize(zone_cache_smr(cache));
6139
6140 cache->zc_free((void *)addr, esize);
6141 addr = __zcache_mark_invalid(zone, addr, ZFREE_PACK_SIZE(esize, esize));
6142
6143 zfree_item(zone, addr);
6144 }
6145
6146 void
6147 (zfree_smr)(zone_t zone, void *addr)
6148 {
6149 vm_offset_t elem = (vm_offset_t)addr;
6150 vm_offset_t esize;
6151 zone_cache_t cache;
6152 int cpu;
6153
6154 ZFREE_LOG(zone, elem, 1);
6155
6156 disable_preemption();
6157 cpu = cpu_number();
6158 #if MACH_ASSERT
6159 cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6160 assert(!smr_entered_cpu(cache->zc_smr, cpu));
6161 #endif
6162 esize = zone_elem_inner_size(zone);
6163 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6164 cache = zfree_cached_get_pcpu_cache_smr(zone, cpu);
6165 if (__probable(cache)) {
6166 cache->zc_free_elems[cache->zc_free_cur++] = elem;
6167 enable_preemption();
6168 } else {
6169 zfree_item_smr(zone, elem);
6170 }
6171 }
6172
6173 void
6174 (zfree_id_smr)(zone_id_t zid, void *addr)
6175 {
6176 (zfree_smr)(&zone_array[zid], addr);
6177 }
6178
6179 void
kfree_type_impl_internal(kalloc_type_view_t kt_view,void * ptr __unsafe_indexable)6180 kfree_type_impl_internal(
6181 kalloc_type_view_t kt_view,
6182 void *ptr __unsafe_indexable)
6183 {
6184 zone_t zsig = kt_view->kt_zsig;
6185 zone_t z = kt_view->kt_zv.zv_zone;
6186 struct zone_page_metadata *meta = zone_meta_from_addr((vm_offset_t) ptr);
6187 zone_id_t zidx_meta = meta->zm_index;
6188 zone_security_flags_t zsflags_meta = zone_security_array[zidx_meta];
6189 zone_security_flags_t zsflags_z = zone_security_config(z);
6190 zone_security_flags_t zsflags_zsig;
6191
6192 if (NULL == ptr) {
6193 return;
6194 }
6195
6196 if ((zsflags_z.z_kheap_id == KHEAP_ID_DATA_BUFFERS) ||
6197 zone_has_index(z, zidx_meta)) {
6198 return (zfree)(&kt_view->kt_zv, ptr);
6199 }
6200 zsflags_zsig = zone_security_config(zsig);
6201 if (zsflags_meta.z_sig_eq == zsflags_zsig.z_sig_eq) {
6202 z = zone_array + zidx_meta;
6203 return (zfree)(z, ptr);
6204 }
6205
6206 return (zfree)(kt_view->kt_zshared, ptr);
6207 }
6208
6209 /*! @} */
6210 #endif /* !ZALLOC_TEST */
6211 #pragma mark zalloc
6212 #if !ZALLOC_TEST
6213
6214 /*!
6215 * @defgroup zalloc
6216 * @{
6217 *
6218 * @brief
6219 * The codepath for zone allocations.
6220 *
6221 * @discussion
6222 * There are 4 major ways to allocate memory that end up in the zone allocator:
6223 * - @c zalloc(), @c zalloc_flags(), ...
6224 * - @c zalloc_percpu()
6225 * - @c kalloc*()
6226 * - @c zalloc_permanent()
6227 *
6228 * While permanent zones have their own allocation scheme, all other codepaths
6229 * will eventually go through the @c zalloc_ext() choking point.
6230 *
6231 * @c zalloc_return() is the final function everyone tail calls into,
6232 * which prepares the element for consumption by the caller and deals with
6233 * common treatment (zone logging, tags, kasan, validation, ...).
6234 */
6235
6236 /*!
6237 * @function zalloc_import
6238 *
6239 * @brief
6240 * Import @c n elements in the specified array, opposite of @c zfree_drop().
6241 *
6242 * @param zone The zone to import elements from
6243 * @param elems The array to import into
6244 * @param n The number of elements to import. Must be non zero,
6245 * and smaller than @c zone->z_elems_free.
6246 */
6247 __header_always_inline vm_size_t
zalloc_import(zone_t zone,vm_offset_t * elems,zalloc_flags_t flags,uint32_t n)6248 zalloc_import(
6249 zone_t zone,
6250 vm_offset_t *elems,
6251 zalloc_flags_t flags,
6252 uint32_t n)
6253 {
6254 vm_offset_t esize = zone_elem_outer_size(zone);
6255 vm_offset_t offs = zone_elem_inner_offs(zone);
6256 zone_stats_t zs;
6257 int cpu = cpu_number();
6258 uint32_t i = 0;
6259
6260 zs = zpercpu_get_cpu(zone->z_stats, cpu);
6261
6262 if (__improbable(zone_caching_disabled < 0)) {
6263 /*
6264 * In the first 10s after boot, mess with
6265 * the scan position in order to make early
6266 * allocations patterns less predictable.
6267 */
6268 zone_early_scramble_rr(zone, cpu, zs);
6269 }
6270
6271 do {
6272 vm_offset_t page, eidx, size = 0;
6273 struct zone_page_metadata *meta;
6274
6275 if (!zone_pva_is_null(zone->z_pageq_partial)) {
6276 meta = zone_pva_to_meta(zone->z_pageq_partial);
6277 page = zone_pva_to_addr(zone->z_pageq_partial);
6278 } else if (!zone_pva_is_null(zone->z_pageq_empty)) {
6279 meta = zone_pva_to_meta(zone->z_pageq_empty);
6280 page = zone_pva_to_addr(zone->z_pageq_empty);
6281 zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
6282 } else {
6283 zone_accounting_panic(zone, "z_elems_free corruption");
6284 }
6285
6286 zone_meta_validate(zone, meta, page);
6287
6288 vm_offset_t old_size = meta->zm_alloc_size;
6289 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
6290
6291 do {
6292 eidx = zone_meta_find_and_clear_bit(zone, zs, meta, flags);
6293 elems[i++] = page + offs + eidx * esize;
6294 size += esize;
6295 } while (i < n && old_size + size + esize <= max_size);
6296
6297 vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
6298
6299 if (new_size + esize > max_size) {
6300 zone_meta_requeue(zone, &zone->z_pageq_full, meta);
6301 } else if (old_size == 0) {
6302 /* remove from free, move to intermediate */
6303 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
6304 }
6305 } while (i < n);
6306
6307 n = zone_counter_sub(zone, z_elems_free, n);
6308 if (zone->z_pcpu_cache == NULL && zone->z_elems_free_min > n) {
6309 zone->z_elems_free_min = n;
6310 }
6311
6312 return zone_elem_inner_size(zone);
6313 }
6314
6315 __attribute__((always_inline))
6316 static inline vm_offset_t
__zcache_mark_valid(zone_t zone,vm_offset_t addr,zalloc_flags_t flags)6317 __zcache_mark_valid(zone_t zone, vm_offset_t addr, zalloc_flags_t flags)
6318 {
6319 #pragma unused(zone, flags)
6320 #if KASAN || CONFIG_PROB_GZALLOC || VM_TAG_SIZECLASSES
6321 vm_offset_t esize = zone_elem_inner_size(zone);
6322 #endif
6323
6324 #if VM_TAG_SIZECLASSES
6325 if (__improbable(zone->z_uses_tags)) {
6326 struct zone_page_metadata *meta;
6327 vm_offset_t offs;
6328 vm_tag_t *slot;
6329 vm_tag_t tag;
6330
6331 tag = zalloc_flags_get_tag(flags);
6332 meta = zone_meta_from_addr(addr);
6333 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
6334 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
6335 offs += ptoa(meta->zm_page_index);
6336 }
6337
6338 slot = zba_extra_ref_ptr(meta->zm_bitmap,
6339 Z_FAST_QUO(offs, zone->z_quo_magic));
6340 *slot = tag;
6341
6342 vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
6343 (long)esize);
6344 }
6345 #endif /* VM_TAG_SIZECLASSES */
6346
6347 #if CONFIG_PROB_GZALLOC
6348 if (zone->z_pgz_tracked && pgz_sample(addr, esize)) {
6349 addr = pgz_protect(zone, addr, __builtin_frame_address(0));
6350 }
6351 #endif
6352
6353 /*
6354 * Kasan integration of kalloc heaps are handled by kalloc_ext()
6355 */
6356 if ((flags & Z_SKIP_KASAN) == 0) {
6357 #if KASAN_CLASSIC
6358 kasan_alloc(addr, esize, esize, zone_elem_redzone(zone),
6359 (flags & Z_PCPU), __builtin_frame_address(0));
6360 #endif /* KASAN_CLASSIC */
6361 #if KASAN_TBI
6362 if (__probable(zone->z_tbi_tag)) {
6363 addr = kasan_tbi_tag_zalloc(addr, esize, esize,
6364 (flags & Z_PCPU));
6365 } else {
6366 addr = kasan_tbi_tag_zalloc_default(addr, esize,
6367 (flags & Z_PCPU));
6368 }
6369 #endif /* KASAN_TBI */
6370 }
6371
6372 return addr;
6373 }
6374
6375 __attribute__((always_inline))
vm_offset_t(zcache_mark_valid)6376 vm_offset_t
6377 (zcache_mark_valid)(zone_t zone, vm_offset_t addr)
6378 {
6379 addr = __zcache_mark_valid(zone, addr, 0);
6380 ZALLOC_LOG(zone, addr, 1);
6381 return addr;
6382 }
6383
6384 /*!
6385 * @function zalloc_return
6386 *
6387 * @brief
6388 * Performs the tail-end of the work required on allocations before the caller
6389 * uses them.
6390 *
6391 * @discussion
6392 * This function is called without any zone lock held,
6393 * and preemption back to the state it had when @c zalloc_ext() was called.
6394 *
6395 * @param zone The zone we're allocating from.
6396 * @param addr The element we just allocated.
6397 * @param flags The flags passed to @c zalloc_ext() (for Z_ZERO).
6398 * @param elem_size The element size for this zone.
6399 */
6400 __attribute__((always_inline))
6401 static struct kalloc_result
zalloc_return(zone_t zone,vm_offset_t addr,zalloc_flags_t flags,vm_offset_t elem_size)6402 zalloc_return(
6403 zone_t zone,
6404 vm_offset_t addr,
6405 zalloc_flags_t flags,
6406 vm_offset_t elem_size)
6407 {
6408 addr = __zcache_mark_valid(zone, addr, flags);
6409 #if ZALLOC_ENABLE_ZERO_CHECK
6410 zalloc_validate_element(zone, addr, elem_size, flags);
6411 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
6412 ZALLOC_LOG(zone, addr, 1);
6413
6414 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6415 return (struct kalloc_result){ (void *)addr, elem_size };
6416 }
6417
6418 static vm_size_t
zalloc_get_shared_threshold(zone_t zone,vm_size_t esize)6419 zalloc_get_shared_threshold(zone_t zone, vm_size_t esize)
6420 {
6421 if (esize <= 512) {
6422 return zone_early_thres_mul * page_size / 4;
6423 } else if (esize < 2048) {
6424 return zone_early_thres_mul * esize * 8;
6425 }
6426 return zone_early_thres_mul * zone->z_chunk_elems * esize;
6427 }
6428
6429 __attribute__((noinline))
6430 static struct kalloc_result
zalloc_item(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6431 zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6432 {
6433 vm_offset_t esize, addr;
6434 zone_stats_t zs;
6435
6436 zone_lock_nopreempt_check_contention(zone);
6437
6438 zs = zpercpu_get(zstats);
6439 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv / 2)) {
6440 if ((flags & Z_NOWAIT) || zone->z_elems_free) {
6441 zone_expand_async_schedule_if_allowed(zone);
6442 } else {
6443 zone_expand_locked(zone, flags, zalloc_needs_refill);
6444 }
6445 if (__improbable(zone->z_elems_free == 0)) {
6446 zs->zs_alloc_fail++;
6447 zone_unlock(zone);
6448 if (__improbable(flags & Z_NOFAIL)) {
6449 zone_nofail_panic(zone);
6450 }
6451 DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
6452 return (struct kalloc_result){ };
6453 }
6454 }
6455
6456 esize = zalloc_import(zone, &addr, flags, 1);
6457 zs->zs_mem_allocated += esize;
6458
6459 if (__improbable(!zone_share_always &&
6460 !os_atomic_load(&zs->zs_alloc_not_shared, relaxed))) {
6461 if (flags & Z_SET_NOTSHARED) {
6462 vm_size_t shared_threshold = zalloc_get_shared_threshold(zone, esize);
6463
6464 if (zs->zs_mem_allocated >= shared_threshold) {
6465 zpercpu_foreach(zs_cpu, zstats) {
6466 os_atomic_store(&zs_cpu->zs_alloc_not_shared, 1, relaxed);
6467 }
6468 }
6469 }
6470 }
6471 zone_unlock(zone);
6472
6473 return zalloc_return(zone, addr, flags, esize);
6474 }
6475
6476 static void
zalloc_cached_import(zone_t zone,zalloc_flags_t flags,zone_cache_t cache)6477 zalloc_cached_import(
6478 zone_t zone,
6479 zalloc_flags_t flags,
6480 zone_cache_t cache)
6481 {
6482 uint16_t n_elems = zc_mag_size();
6483
6484 zone_lock_nopreempt(zone);
6485
6486 if (__probable(!zone_caching_disabled &&
6487 zone->z_elems_free > zone->z_elems_rsv / 2)) {
6488 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
6489 zone_expand_async_schedule_if_allowed(zone);
6490 }
6491 if (zone->z_elems_free < n_elems) {
6492 n_elems = (uint16_t)zone->z_elems_free;
6493 }
6494 zalloc_import(zone, cache->zc_alloc_elems, flags, n_elems);
6495 cache->zc_alloc_cur = n_elems;
6496 }
6497
6498 zone_unlock_nopreempt(zone);
6499 }
6500
6501 static void
zalloc_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache,smr_t smr)6502 zalloc_cached_depot_recirculate(
6503 zone_t zone,
6504 uint32_t depot_max,
6505 zone_cache_t cache,
6506 smr_t smr)
6507 {
6508 smr_seq_t seq;
6509 uint32_t n;
6510
6511 zone_recirc_lock_nopreempt_check_contention(zone);
6512
6513 n = cache->zc_depot.zd_empty;
6514 if (n >= depot_max) {
6515 zone_depot_move_empty(&zone->z_recirc, &cache->zc_depot,
6516 n - depot_max / 2, NULL);
6517 }
6518
6519 n = cache->zc_depot.zd_full;
6520 if (smr && n) {
6521 /*
6522 * if SMR is in use, it means smr_poll() failed,
6523 * so rotate the entire chunk of magazines in order
6524 * to let the sequence numbers age.
6525 */
6526 seq = zone_depot_move_full(&zone->z_recirc, &cache->zc_depot,
6527 n, NULL);
6528 smr_deferred_advance_commit(smr, seq);
6529 }
6530
6531 n = depot_max - cache->zc_depot.zd_empty;
6532 if (n > zone->z_recirc.zd_full) {
6533 n = zone->z_recirc.zd_full;
6534 }
6535
6536 if (n && zone_depot_poll(&zone->z_recirc, smr)) {
6537 zone_depot_move_full(&cache->zc_depot, &zone->z_recirc,
6538 n, zone);
6539 }
6540
6541 zone_recirc_unlock_nopreempt(zone);
6542 }
6543
6544 static void
zalloc_cached_reuse_smr(zone_t z,zone_cache_t cache,zone_magazine_t mag)6545 zalloc_cached_reuse_smr(zone_t z, zone_cache_t cache, zone_magazine_t mag)
6546 {
6547 zone_smr_free_cb_t zc_free = cache->zc_free;
6548 vm_size_t esize = zone_elem_inner_size(z);
6549
6550 for (uint16_t i = 0; i < zc_mag_size(); i++) {
6551 vm_offset_t elem = mag->zm_elems[i];
6552
6553 zc_free((void *)elem, zone_elem_inner_size(z));
6554 elem = __zcache_mark_invalid(z, elem,
6555 ZFREE_PACK_SIZE(esize, esize));
6556 mag->zm_elems[i] = elem;
6557 }
6558 }
6559
6560 static void
zalloc_cached_recirculate(zone_t zone,zone_cache_t cache)6561 zalloc_cached_recirculate(
6562 zone_t zone,
6563 zone_cache_t cache)
6564 {
6565 zone_magazine_t mag = NULL;
6566
6567 zone_recirc_lock_nopreempt_check_contention(zone);
6568
6569 if (zone_depot_poll(&zone->z_recirc, zone_cache_smr(cache))) {
6570 mag = zone_depot_pop_head_full(&zone->z_recirc, zone);
6571 if (zone_cache_smr(cache)) {
6572 zalloc_cached_reuse_smr(zone, cache, mag);
6573 }
6574 mag = zone_magazine_replace(cache, mag, false);
6575 zone_depot_insert_head_empty(&zone->z_recirc, mag);
6576 }
6577
6578 zone_recirc_unlock_nopreempt(zone);
6579 }
6580
6581 __attribute__((noinline))
6582 static zone_cache_t
zalloc_cached_prime(zone_t zone,zone_cache_ops_t ops,zalloc_flags_t flags,zone_cache_t cache)6583 zalloc_cached_prime(
6584 zone_t zone,
6585 zone_cache_ops_t ops,
6586 zalloc_flags_t flags,
6587 zone_cache_t cache)
6588 {
6589 zone_magazine_t mag = NULL;
6590 uint32_t depot_max;
6591 smr_t smr;
6592
6593 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
6594 if (depot_max) {
6595 smr = zone_cache_smr(cache);
6596
6597 zone_depot_lock_nopreempt(cache);
6598
6599 if (!zone_depot_poll(&cache->zc_depot, smr)) {
6600 zalloc_cached_depot_recirculate(zone, depot_max, cache,
6601 smr);
6602 }
6603
6604 if (__probable(cache->zc_depot.zd_full)) {
6605 mag = zone_depot_pop_head_full(&cache->zc_depot, NULL);
6606 if (zone_cache_smr(cache)) {
6607 zalloc_cached_reuse_smr(zone, cache, mag);
6608 }
6609 mag = zone_magazine_replace(cache, mag, false);
6610 zone_depot_insert_head_empty(&cache->zc_depot, mag);
6611 }
6612
6613 zone_depot_unlock_nopreempt(cache);
6614 } else if (zone->z_recirc.zd_full) {
6615 zalloc_cached_recirculate(zone, cache);
6616 }
6617
6618 if (__probable(cache->zc_alloc_cur)) {
6619 return cache;
6620 }
6621
6622 if (ops == NULL) {
6623 zalloc_cached_import(zone, flags, cache);
6624 if (__probable(cache->zc_alloc_cur)) {
6625 return cache;
6626 }
6627 }
6628
6629 return NULL;
6630 }
6631
6632 __attribute__((always_inline))
6633 static inline zone_cache_t
zalloc_cached_get_pcpu_cache(zone_t zone,zone_cache_ops_t ops,int cpu,zalloc_flags_t flags)6634 zalloc_cached_get_pcpu_cache(
6635 zone_t zone,
6636 zone_cache_ops_t ops,
6637 int cpu,
6638 zalloc_flags_t flags)
6639 {
6640 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6641
6642 if (__probable(cache->zc_alloc_cur != 0)) {
6643 return cache;
6644 }
6645
6646 if (__probable(cache->zc_free_cur != 0 && !cache->zc_smr)) {
6647 zone_cache_swap_magazines(cache);
6648 return cache;
6649 }
6650
6651 return zalloc_cached_prime(zone, ops, flags, cache);
6652 }
6653
6654
6655 /*!
6656 * @function zalloc_ext
6657 *
6658 * @brief
6659 * The core implementation of @c zalloc(), @c zalloc_flags(), @c zalloc_percpu().
6660 */
6661 struct kalloc_result
zalloc_ext(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6662 zalloc_ext(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6663 {
6664 /*
6665 * KASan uses zalloc() for fakestack, which can be called anywhere.
6666 * However, we make sure these calls can never block.
6667 */
6668 assertf(startup_phase < STARTUP_SUB_EARLY_BOOT ||
6669 #if KASAN_FAKESTACK
6670 zone->z_kasan_fakestacks ||
6671 #endif /* KASAN_FAKESTACK */
6672 ml_get_interrupts_enabled() ||
6673 ml_is_quiescing() ||
6674 debug_mode_active(),
6675 "Calling {k,z}alloc from interrupt disabled context isn't allowed");
6676
6677 /*
6678 * Make sure Z_NOFAIL was not obviously misused
6679 */
6680 if (flags & Z_NOFAIL) {
6681 assert(!zone->exhaustible &&
6682 (flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
6683 }
6684
6685 #if VM_TAG_SIZECLASSES
6686 if (__improbable(zone->z_uses_tags)) {
6687 vm_tag_t tag = zalloc_flags_get_tag(flags);
6688
6689 if (flags & Z_VM_TAG_BT_BIT) {
6690 tag = vm_tag_bt() ?: tag;
6691 }
6692 if (tag != VM_KERN_MEMORY_NONE) {
6693 tag = vm_tag_will_update_zone(tag, zone->z_tags_sizeclass,
6694 flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT));
6695 }
6696 if (tag == VM_KERN_MEMORY_NONE) {
6697 zone_security_flags_t zsflags = zone_security_config(zone);
6698
6699 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
6700 tag = VM_KERN_MEMORY_KALLOC_DATA;
6701 } else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR ||
6702 zsflags.z_kalloc_type) {
6703 tag = VM_KERN_MEMORY_KALLOC_TYPE;
6704 } else {
6705 tag = VM_KERN_MEMORY_KALLOC;
6706 }
6707 }
6708 flags = Z_VM_TAG(flags & ~Z_VM_TAG_MASK, tag);
6709 }
6710 #endif /* VM_TAG_SIZECLASSES */
6711
6712 disable_preemption();
6713
6714 #if ZALLOC_ENABLE_ZERO_CHECK
6715 if (zalloc_skip_zero_check()) {
6716 flags |= Z_NOZZC;
6717 }
6718 #endif
6719
6720 if (zone->z_pcpu_cache) {
6721 zone_cache_t cache;
6722 vm_offset_t index, addr, esize;
6723 int cpu = cpu_number();
6724
6725 cache = zalloc_cached_get_pcpu_cache(zone, NULL, cpu, flags);
6726 if (__probable(cache)) {
6727 esize = zone_elem_inner_size(zone);
6728 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += esize;
6729 index = --cache->zc_alloc_cur;
6730 addr = cache->zc_alloc_elems[index];
6731 cache->zc_alloc_elems[index] = 0;
6732 enable_preemption();
6733 return zalloc_return(zone, addr, flags, esize);
6734 }
6735 }
6736
6737 __attribute__((musttail))
6738 return zalloc_item(zone, zstats, flags);
6739 }
6740
6741 __attribute__((always_inline))
6742 static inline zstack_t
zcache_alloc_stack_from_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,uint32_t n,zone_cache_ops_t ops)6743 zcache_alloc_stack_from_cpu(
6744 zone_id_t zid,
6745 zone_cache_t cache,
6746 zstack_t stack,
6747 uint32_t n,
6748 zone_cache_ops_t ops)
6749 {
6750 vm_offset_t *p;
6751
6752 n = MIN(n, cache->zc_alloc_cur);
6753 p = cache->zc_alloc_elems + cache->zc_alloc_cur;
6754 cache->zc_alloc_cur -= n;
6755 stack.z_count += n;
6756
6757 do {
6758 vm_offset_t e = *--p;
6759
6760 *p = 0;
6761 if (ops) {
6762 e = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)e);
6763 } else {
6764 e = __zcache_mark_valid(zone_by_id(zid), e, 0);
6765 }
6766 zstack_push_no_delta(&stack, (void *)e);
6767 } while (--n > 0);
6768
6769 return stack;
6770 }
6771
6772 __attribute__((noinline))
6773 static zstack_t
zcache_alloc_fail(zone_id_t zid,zstack_t stack,uint32_t count)6774 zcache_alloc_fail(zone_id_t zid, zstack_t stack, uint32_t count)
6775 {
6776 zone_t zone = zone_by_id(zid);
6777 zone_stats_t zstats = zone->z_stats;
6778 int cpu;
6779
6780 count -= stack.z_count;
6781
6782 disable_preemption();
6783 cpu = cpu_number();
6784 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated -=
6785 count * zone_elem_inner_size(zone);
6786 zpercpu_get_cpu(zstats, cpu)->zs_alloc_fail += 1;
6787 enable_preemption();
6788
6789 return stack;
6790 }
6791
6792 __attribute__((always_inline))
6793 static zstack_t
zcache_alloc_n_ext(zone_id_t zid,uint32_t count,zalloc_flags_t flags,zone_cache_ops_t ops)6794 zcache_alloc_n_ext(
6795 zone_id_t zid,
6796 uint32_t count,
6797 zalloc_flags_t flags,
6798 zone_cache_ops_t ops)
6799 {
6800 zstack_t stack = { };
6801 zone_cache_t cache;
6802 zone_t zone;
6803 int cpu;
6804
6805 disable_preemption();
6806 cpu = cpu_number();
6807 zone = zone_by_id(zid);
6808 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_allocated +=
6809 count * zone_elem_inner_size(zone);
6810
6811 for (;;) {
6812 cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
6813 if (__probable(cache)) {
6814 stack = zcache_alloc_stack_from_cpu(zid, cache, stack,
6815 count - stack.z_count, ops);
6816 enable_preemption();
6817 } else {
6818 void *o;
6819
6820 if (ops) {
6821 enable_preemption();
6822 o = ops->zc_op_alloc(zid, flags);
6823 } else {
6824 o = zalloc_item(zone, zone->z_stats, flags).addr;
6825 }
6826 if (__improbable(o == NULL)) {
6827 return zcache_alloc_fail(zid, stack, count);
6828 }
6829 if (ops) {
6830 os_atomic_inc(&zone->z_elems_avail, relaxed);
6831 }
6832 zstack_push(&stack, o);
6833 }
6834
6835 if (stack.z_count == count) {
6836 break;
6837 }
6838
6839 disable_preemption();
6840 cpu = cpu_number();
6841 }
6842
6843 ZALLOC_LOG(zone, stack.z_head, stack.z_count);
6844
6845 return stack;
6846 }
6847
6848 zstack_t
zalloc_n(zone_id_t zid,uint32_t count,zalloc_flags_t flags)6849 zalloc_n(zone_id_t zid, uint32_t count, zalloc_flags_t flags)
6850 {
6851 return zcache_alloc_n_ext(zid, count, flags, NULL);
6852 }
6853
zstack_t(zcache_alloc_n)6854 zstack_t
6855 (zcache_alloc_n)(
6856 zone_id_t zid,
6857 uint32_t count,
6858 zalloc_flags_t flags,
6859 zone_cache_ops_t ops)
6860 {
6861 __builtin_assume(ops != NULL);
6862 return zcache_alloc_n_ext(zid, count, flags, ops);
6863 }
6864
6865 __attribute__((always_inline))
6866 void *
zalloc(zone_t zov)6867 zalloc(zone_t zov)
6868 {
6869 return zalloc_flags(zov, Z_WAITOK);
6870 }
6871
6872 __attribute__((always_inline))
6873 void *
zalloc_noblock(zone_t zov)6874 zalloc_noblock(zone_t zov)
6875 {
6876 return zalloc_flags(zov, Z_NOWAIT);
6877 }
6878
6879 void *
6880 (zalloc_flags)(zone_t zov, zalloc_flags_t flags)
6881 {
6882 zone_t zone = zov->z_self;
6883 zone_stats_t zstats = zov->z_stats;
6884
6885 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6886 assert(!zone->z_percpu && !zone->z_permanent);
6887 return zalloc_ext(zone, zstats, flags).addr;
6888 }
6889
6890 __attribute__((always_inline))
6891 void *
6892 (zalloc_id)(zone_id_t zid, zalloc_flags_t flags)
6893 {
6894 return (zalloc_flags)(zone_by_id(zid), flags);
6895 }
6896
6897 void *
6898 (zalloc_ro)(zone_id_t zid, zalloc_flags_t flags)
6899 {
6900 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6901 zone_t zone = zone_by_id(zid);
6902 zone_stats_t zstats = zone->z_stats;
6903 struct kalloc_result kr;
6904
6905 kr = zalloc_ext(zone, zstats, flags);
6906 #if ZSECURITY_CONFIG(READ_ONLY)
6907 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6908 if (kr.addr) {
6909 zone_require_ro(zid, kr.size, kr.addr);
6910 }
6911 #endif
6912 return kr.addr;
6913 }
6914
6915 #if ZSECURITY_CONFIG(READ_ONLY)
6916
6917 __attribute__((always_inline))
6918 static bool
from_current_stack(vm_offset_t addr,vm_size_t size)6919 from_current_stack(vm_offset_t addr, vm_size_t size)
6920 {
6921 vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
6922 vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
6923
6924 #if CONFIG_KERNEL_TBI
6925 addr = VM_KERNEL_TBI_FILL(addr);
6926 #endif /* CONFIG_KERNEL_TBI */
6927
6928 return (addr >= start) && (addr + size < end);
6929 }
6930
6931 /*
6932 * Check if an address is from const memory i.e TEXT or DATA CONST segements
6933 * or the SECURITY_READ_ONLY_LATE section.
6934 */
6935 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
6936 __attribute__((always_inline))
6937 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6938 from_const_memory(const vm_offset_t addr, vm_size_t size)
6939 {
6940 return rorgn_contains(addr, size, true);
6941 }
6942 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
6943 __attribute__((always_inline))
6944 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6945 from_const_memory(const vm_offset_t addr, vm_size_t size)
6946 {
6947 #pragma unused(addr, size)
6948 return true;
6949 }
6950 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
6951
6952 __abortlike
6953 static void
zalloc_ro_mut_validation_panic(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6954 zalloc_ro_mut_validation_panic(zone_id_t zid, void *elem,
6955 const vm_offset_t src, vm_size_t src_size)
6956 {
6957 vm_offset_t stack_start = (vm_offset_t)__builtin_frame_address(0);
6958 vm_offset_t stack_end = (stack_start + kernel_stack_size - 1) & -kernel_stack_size;
6959 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
6960 extern vm_offset_t rorgn_begin;
6961 extern vm_offset_t rorgn_end;
6962 #else
6963 vm_offset_t const rorgn_begin = 0;
6964 vm_offset_t const rorgn_end = 0;
6965 #endif
6966
6967 if (from_ro_map(src, src_size)) {
6968 zone_t src_zone = &zone_array[zone_index_from_ptr((void *)src)];
6969 zone_t dst_zone = &zone_array[zid];
6970 panic("zalloc_ro_mut failed: source (%p) not from same zone as dst (%p)"
6971 " (expected: %s, actual: %s", (void *)src, elem, src_zone->z_name,
6972 dst_zone->z_name);
6973 }
6974
6975 panic("zalloc_ro_mut failed: source (%p, phys %p) not from RO zone map (%p - %p), "
6976 "current stack (%p - %p) or const memory (phys %p - %p)",
6977 (void *)src, (void*)kvtophys(src),
6978 (void *)zone_info.zi_ro_range.min_address,
6979 (void *)zone_info.zi_ro_range.max_address,
6980 (void *)stack_start, (void *)stack_end,
6981 (void *)rorgn_begin, (void *)rorgn_end);
6982 }
6983
6984 __attribute__((always_inline))
6985 static void
zalloc_ro_mut_validate_src(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6986 zalloc_ro_mut_validate_src(zone_id_t zid, void *elem,
6987 const vm_offset_t src, vm_size_t src_size)
6988 {
6989 if (from_current_stack(src, src_size) ||
6990 (from_ro_map(src, src_size) &&
6991 zid == zone_index_from_ptr((void *)src)) ||
6992 from_const_memory(src, src_size)) {
6993 return;
6994 }
6995 zalloc_ro_mut_validation_panic(zid, elem, src, src_size);
6996 }
6997
6998 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
6999
7000 __attribute__((noinline))
7001 void
zalloc_ro_mut(zone_id_t zid,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)7002 zalloc_ro_mut(zone_id_t zid, void *elem, vm_offset_t offset,
7003 const void *new_data, vm_size_t new_data_size)
7004 {
7005 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7006
7007 #if ZSECURITY_CONFIG(READ_ONLY)
7008 bool skip_src_check = false;
7009
7010 /*
7011 * The OSEntitlements RO-zone is a little differently treated. For more
7012 * information: rdar://100518485.
7013 */
7014 if (zid == ZONE_ID_AMFI_OSENTITLEMENTS) {
7015 code_signing_config_t cs_config = 0;
7016
7017 code_signing_configuration(NULL, &cs_config);
7018 if (cs_config & CS_CONFIG_CSM_ENABLED) {
7019 skip_src_check = true;
7020 }
7021 }
7022
7023 if (skip_src_check == false) {
7024 zalloc_ro_mut_validate_src(zid, elem, (vm_offset_t)new_data,
7025 new_data_size);
7026 }
7027 pmap_ro_zone_memcpy(zid, (vm_offset_t) elem, offset,
7028 (vm_offset_t) new_data, new_data_size);
7029 #else
7030 (void)zid;
7031 memcpy((void *)((uintptr_t)elem + offset), new_data, new_data_size);
7032 #endif
7033 }
7034
7035 __attribute__((noinline))
7036 uint64_t
zalloc_ro_mut_atomic(zone_id_t zid,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)7037 zalloc_ro_mut_atomic(zone_id_t zid, void *elem, vm_offset_t offset,
7038 zro_atomic_op_t op, uint64_t value)
7039 {
7040 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7041
7042 #if ZSECURITY_CONFIG(READ_ONLY)
7043 value = pmap_ro_zone_atomic_op(zid, (vm_offset_t)elem, offset, op, value);
7044 #else
7045 (void)zid;
7046 value = __zalloc_ro_mut_atomic((vm_offset_t)elem + offset, op, value);
7047 #endif
7048 return value;
7049 }
7050
7051 void
zalloc_ro_clear(zone_id_t zid,void * elem,vm_offset_t offset,vm_size_t size)7052 zalloc_ro_clear(zone_id_t zid, void *elem, vm_offset_t offset, vm_size_t size)
7053 {
7054 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7055 #if ZSECURITY_CONFIG(READ_ONLY)
7056 pmap_ro_zone_bzero(zid, (vm_offset_t)elem, offset, size);
7057 #else
7058 (void)zid;
7059 bzero((void *)((uintptr_t)elem + offset), size);
7060 #endif
7061 }
7062
7063 /*
7064 * This function will run in the PPL and needs to be robust
7065 * against an attacker with arbitrary kernel write.
7066 */
7067
7068 #if ZSECURITY_CONFIG(READ_ONLY)
7069
7070 __abortlike
7071 static void
zone_id_require_ro_panic(zone_id_t zid,void * addr)7072 zone_id_require_ro_panic(zone_id_t zid, void *addr)
7073 {
7074 struct zone_size_params p = zone_ro_size_params[zid];
7075 vm_offset_t elem = (vm_offset_t)addr;
7076 uint32_t zindex;
7077 zone_t other;
7078 zone_t zone = &zone_array[zid];
7079
7080 if (!from_ro_map(addr, 1)) {
7081 panic("zone_require_ro failed: address not in a ro zone (addr: %p)", addr);
7082 }
7083
7084 if (!Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic)) {
7085 panic("zone_require_ro failed: element improperly aligned (addr: %p)", addr);
7086 }
7087
7088 zindex = zone_index_from_ptr(addr);
7089 other = &zone_array[zindex];
7090 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
7091 panic("zone_require_ro failed: invalid zone index %d "
7092 "(addr: %p, expected: %s%s)", zindex,
7093 addr, zone_heap_name(zone), zone->z_name);
7094 } else {
7095 panic("zone_require_ro failed: address in unexpected zone id %d (%s%s) "
7096 "(addr: %p, expected: %s%s)",
7097 zindex, zone_heap_name(other), other->z_name,
7098 addr, zone_heap_name(zone), zone->z_name);
7099 }
7100 }
7101
7102 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7103
7104 __attribute__((always_inline))
7105 void
zone_require_ro(zone_id_t zid,vm_size_t elem_size __unused,void * addr)7106 zone_require_ro(zone_id_t zid, vm_size_t elem_size __unused, void *addr)
7107 {
7108 #if ZSECURITY_CONFIG(READ_ONLY)
7109 struct zone_size_params p = zone_ro_size_params[zid];
7110 vm_offset_t elem = (vm_offset_t)addr;
7111
7112 if (!from_ro_map(addr, 1) ||
7113 !Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic) ||
7114 zid != zone_meta_from_addr(elem)->zm_index) {
7115 zone_id_require_ro_panic(zid, addr);
7116 }
7117 #else
7118 #pragma unused(zid, addr)
7119 #endif
7120 }
7121
7122 void *
7123 (zalloc_percpu)(union zone_or_view zov, zalloc_flags_t flags)
7124 {
7125 zone_t zone = zov.zov_view->zv_zone;
7126 zone_stats_t zstats = zov.zov_view->zv_stats;
7127
7128 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7129 assert(zone->z_percpu);
7130 flags |= Z_PCPU;
7131 return (void *)__zpcpu_mangle(zalloc_ext(zone, zstats, flags).addr);
7132 }
7133
7134 static void *
_zalloc_permanent(zone_t zone,vm_size_t size,vm_offset_t mask)7135 _zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask)
7136 {
7137 struct zone_page_metadata *page_meta;
7138 vm_offset_t offs, addr;
7139 zone_pva_t pva;
7140
7141 assert(ml_get_interrupts_enabled() ||
7142 ml_is_quiescing() ||
7143 debug_mode_active() ||
7144 startup_phase < STARTUP_SUB_EARLY_BOOT);
7145
7146 size = (size + mask) & ~mask;
7147 assert(size <= PAGE_SIZE);
7148
7149 zone_lock(zone);
7150 assert(zone->z_self == zone);
7151
7152 for (;;) {
7153 pva = zone->z_pageq_partial;
7154 while (!zone_pva_is_null(pva)) {
7155 page_meta = zone_pva_to_meta(pva);
7156 if (page_meta->zm_bump + size <= PAGE_SIZE) {
7157 goto found;
7158 }
7159 pva = page_meta->zm_page_next;
7160 }
7161
7162 zone_expand_locked(zone, Z_WAITOK, NULL);
7163 }
7164
7165 found:
7166 offs = (uint16_t)((page_meta->zm_bump + mask) & ~mask);
7167 page_meta->zm_bump = (uint16_t)(offs + size);
7168 page_meta->zm_alloc_size += size;
7169 zone->z_elems_free -= size;
7170 zpercpu_get(zone->z_stats)->zs_mem_allocated += size;
7171
7172 if (page_meta->zm_alloc_size >= PAGE_SIZE - sizeof(vm_offset_t)) {
7173 zone_meta_requeue(zone, &zone->z_pageq_full, page_meta);
7174 }
7175
7176 zone_unlock(zone);
7177
7178 addr = offs + zone_pva_to_addr(pva);
7179
7180 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
7181 return (void *)addr;
7182 }
7183
7184 static void *
_zalloc_permanent_large(size_t size,vm_offset_t mask,vm_tag_t tag)7185 _zalloc_permanent_large(size_t size, vm_offset_t mask, vm_tag_t tag)
7186 {
7187 vm_offset_t addr;
7188
7189 kernel_memory_allocate(kernel_map, &addr, size, mask,
7190 KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, tag);
7191
7192 return (void *)addr;
7193 }
7194
7195 void *
zalloc_permanent_tag(vm_size_t size,vm_offset_t mask,vm_tag_t tag)7196 zalloc_permanent_tag(vm_size_t size, vm_offset_t mask, vm_tag_t tag)
7197 {
7198 if (size <= PAGE_SIZE) {
7199 zone_t zone = &zone_array[ZONE_ID_PERMANENT];
7200 return _zalloc_permanent(zone, size, mask);
7201 }
7202 return _zalloc_permanent_large(size, mask, tag);
7203 }
7204
7205 void *
zalloc_percpu_permanent(vm_size_t size,vm_offset_t mask)7206 zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask)
7207 {
7208 zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT];
7209 return (void *)__zpcpu_mangle(_zalloc_permanent(zone, size, mask));
7210 }
7211
7212 /*! @} */
7213 #endif /* !ZALLOC_TEST */
7214 #pragma mark zone GC / trimming
7215 #if !ZALLOC_TEST
7216
7217 static thread_call_data_t zone_trim_callout;
7218 EVENT_DEFINE(ZONE_EXHAUSTED);
7219
7220 static void
zone_reclaim_chunk(zone_t z,struct zone_page_metadata * meta,uint32_t free_count)7221 zone_reclaim_chunk(
7222 zone_t z,
7223 struct zone_page_metadata *meta,
7224 uint32_t free_count)
7225 {
7226 vm_address_t page_addr;
7227 vm_size_t size_to_free;
7228 uint32_t bitmap_ref;
7229 uint32_t page_count;
7230 zone_security_flags_t zsflags = zone_security_config(z);
7231 bool sequester = !z->z_destroyed;
7232 bool oob_guard = false;
7233
7234 if (zone_submap_is_sequestered(zsflags)) {
7235 /*
7236 * If the entire map is sequestered, we can't return the VA.
7237 * It stays pinned to the zone forever.
7238 */
7239 sequester = true;
7240 }
7241
7242 zone_meta_queue_pop(z, &z->z_pageq_empty);
7243
7244 page_addr = zone_meta_to_addr(meta);
7245 page_count = meta->zm_chunk_len;
7246 oob_guard = meta->zm_guarded;
7247
7248 if (meta->zm_alloc_size) {
7249 zone_metadata_corruption(z, meta, "alloc_size");
7250 }
7251 if (z->z_percpu) {
7252 if (page_count != 1) {
7253 zone_metadata_corruption(z, meta, "page_count");
7254 }
7255 size_to_free = ptoa(z->z_chunk_pages);
7256 zone_remove_wired_pages(z->z_chunk_pages);
7257 } else {
7258 if (page_count > z->z_chunk_pages) {
7259 zone_metadata_corruption(z, meta, "page_count");
7260 }
7261 if (page_count < z->z_chunk_pages) {
7262 /* Dequeue non populated VA from z_pageq_va */
7263 zone_meta_remqueue(z, meta + page_count);
7264 }
7265 size_to_free = ptoa(page_count);
7266 zone_remove_wired_pages(page_count);
7267 }
7268
7269 zone_counter_sub(z, z_elems_free, free_count);
7270 zone_counter_sub(z, z_elems_avail, free_count);
7271 zone_counter_sub(z, z_wired_empty, page_count);
7272 zone_counter_sub(z, z_wired_cur, page_count);
7273
7274 if (z->z_pcpu_cache == NULL) {
7275 if (z->z_elems_free_min < free_count) {
7276 z->z_elems_free_min = 0;
7277 } else {
7278 z->z_elems_free_min -= free_count;
7279 }
7280 }
7281 if (z->z_elems_free_wma < free_count) {
7282 z->z_elems_free_wma = 0;
7283 } else {
7284 z->z_elems_free_wma -= free_count;
7285 }
7286
7287 bitmap_ref = 0;
7288 if (sequester) {
7289 if (meta->zm_inline_bitmap) {
7290 for (int i = 0; i < meta->zm_chunk_len; i++) {
7291 meta[i].zm_bitmap = 0;
7292 }
7293 } else {
7294 bitmap_ref = meta->zm_bitmap;
7295 meta->zm_bitmap = 0;
7296 }
7297 meta->zm_chunk_len = 0;
7298 } else {
7299 if (!meta->zm_inline_bitmap) {
7300 bitmap_ref = meta->zm_bitmap;
7301 }
7302 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
7303 bzero(meta, sizeof(*meta) * (z->z_chunk_pages + oob_guard));
7304 }
7305
7306 #if CONFIG_ZLEAKS
7307 if (__improbable(zleak_should_disable_for_zone(z) &&
7308 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
7309 thread_call_enter(&zone_leaks_callout);
7310 }
7311 #endif /* CONFIG_ZLEAKS */
7312
7313 zone_unlock(z);
7314
7315 if (bitmap_ref) {
7316 zone_bits_free(bitmap_ref);
7317 }
7318
7319 /* Free the pages for metadata and account for them */
7320 #if KASAN_CLASSIC
7321 if (z->z_percpu) {
7322 for (uint32_t i = 0; i < z->z_chunk_pages; i++) {
7323 kasan_zmem_remove(page_addr + ptoa(i), PAGE_SIZE,
7324 zone_elem_outer_size(z),
7325 zone_elem_outer_offs(z),
7326 zone_elem_redzone(z));
7327 }
7328 } else {
7329 kasan_zmem_remove(page_addr, size_to_free,
7330 zone_elem_outer_size(z),
7331 zone_elem_outer_offs(z),
7332 zone_elem_redzone(z));
7333 }
7334 #endif /* KASAN_CLASSIC */
7335
7336 if (sequester) {
7337 kernel_memory_depopulate(page_addr, size_to_free,
7338 KMA_KOBJECT, VM_KERN_MEMORY_ZONE);
7339 } else {
7340 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_VM);
7341 kmem_free(zone_submap(zsflags), page_addr,
7342 ptoa(z->z_chunk_pages + oob_guard));
7343 if (oob_guard) {
7344 os_atomic_dec(&zone_guard_pages, relaxed);
7345 }
7346 }
7347
7348 thread_yield_to_preemption();
7349
7350 zone_lock(z);
7351
7352 if (sequester) {
7353 zone_meta_queue_push(z, &z->z_pageq_va, meta);
7354 }
7355 }
7356
7357 static void
zone_reclaim_elements(zone_t z,uint16_t n,vm_offset_t * elems)7358 zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems)
7359 {
7360 z_debug_assert(n <= zc_mag_size());
7361
7362 for (uint16_t i = 0; i < n; i++) {
7363 vm_offset_t addr = elems[i];
7364 elems[i] = 0;
7365 zfree_drop(z, addr);
7366 }
7367
7368 z->z_elems_free += n;
7369 }
7370
7371 static void
zcache_reclaim_elements(zone_id_t zid,uint16_t n,vm_offset_t * elems)7372 zcache_reclaim_elements(zone_id_t zid, uint16_t n, vm_offset_t *elems)
7373 {
7374 z_debug_assert(n <= zc_mag_size());
7375 zone_cache_ops_t ops = zcache_ops[zid];
7376
7377 for (uint16_t i = 0; i < n; i++) {
7378 vm_offset_t addr = elems[i];
7379 elems[i] = 0;
7380 addr = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)addr);
7381 ops->zc_op_free(zid, (void *)addr);
7382 }
7383
7384 os_atomic_sub(&zone_by_id(zid)->z_elems_avail, n, relaxed);
7385 }
7386
7387 static void
zone_depot_trim(zone_t z,uint32_t target,struct zone_depot * zd)7388 zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd)
7389 {
7390 zpercpu_foreach(zc, z->z_pcpu_cache) {
7391 zone_depot_lock(zc);
7392
7393 if (zc->zc_depot.zd_full > (target + 1) / 2) {
7394 uint32_t n = zc->zc_depot.zd_full - (target + 1) / 2;
7395 zone_depot_move_full(zd, &zc->zc_depot, n, NULL);
7396 }
7397
7398 if (zc->zc_depot.zd_empty > target / 2) {
7399 uint32_t n = zc->zc_depot.zd_empty - target / 2;
7400 zone_depot_move_empty(zd, &zc->zc_depot, n, NULL);
7401 }
7402
7403 zone_depot_unlock(zc);
7404 }
7405 }
7406
7407 __enum_decl(zone_reclaim_mode_t, uint32_t, {
7408 ZONE_RECLAIM_TRIM,
7409 ZONE_RECLAIM_DRAIN,
7410 ZONE_RECLAIM_DESTROY,
7411 });
7412
7413 static void
zone_reclaim_pcpu(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)7414 zone_reclaim_pcpu(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
7415 {
7416 uint32_t depot_max = 0;
7417 bool cleanup = mode != ZONE_RECLAIM_TRIM;
7418
7419 if (z->z_depot_cleanup) {
7420 z->z_depot_cleanup = false;
7421 depot_max = z->z_depot_size;
7422 cleanup = true;
7423 }
7424
7425 if (cleanup) {
7426 zone_depot_trim(z, depot_max, zd);
7427 }
7428
7429 if (mode == ZONE_RECLAIM_DESTROY) {
7430 zpercpu_foreach(zc, z->z_pcpu_cache) {
7431 zone_reclaim_elements(z, zc->zc_alloc_cur,
7432 zc->zc_alloc_elems);
7433 zone_reclaim_elements(z, zc->zc_free_cur,
7434 zc->zc_free_elems);
7435 zc->zc_alloc_cur = zc->zc_free_cur = 0;
7436 }
7437
7438 z->z_recirc_empty_min = 0;
7439 z->z_recirc_empty_wma = 0;
7440 z->z_recirc_full_min = 0;
7441 z->z_recirc_full_wma = 0;
7442 z->z_recirc_cont_cur = 0;
7443 z->z_recirc_cont_wma = 0;
7444 }
7445 }
7446
7447 static void
zone_reclaim_recirc(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)7448 zone_reclaim_recirc(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
7449 {
7450 assert(zd->zd_empty == 0);
7451 assert(zd->zd_full == 0);
7452
7453 zone_recirc_lock_nopreempt(z);
7454
7455 if (mode == ZONE_RECLAIM_TRIM) {
7456 uint32_t count;
7457
7458 count = MIN(z->z_recirc_empty_wma / Z_WMA_UNIT,
7459 z->z_recirc_empty_min);
7460 assert(count <= z->z_recirc.zd_empty);
7461
7462 if (count) {
7463 zone_depot_move_empty(zd, &z->z_recirc, count, NULL);
7464 z->z_recirc_empty_min -= count;
7465 z->z_recirc_empty_wma -= count * Z_WMA_UNIT;
7466 }
7467
7468 count = MIN(z->z_recirc_full_wma / Z_WMA_UNIT, z->z_recirc_full_min);
7469 assert(count <= z->z_recirc.zd_full);
7470 if (count) {
7471 zone_depot_move_full(zd, &z->z_recirc, count, NULL);
7472 z->z_recirc_full_min -= count;
7473 z->z_recirc_full_wma -= count * Z_WMA_UNIT;
7474 }
7475 } else {
7476 *zd = z->z_recirc;
7477 if (zd->zd_full == 0) {
7478 zd->zd_tail = &zd->zd_head;
7479 }
7480 zone_depot_init(&z->z_recirc);
7481 z->z_recirc_empty_min = 0;
7482 z->z_recirc_empty_wma = 0;
7483 z->z_recirc_full_min = 0;
7484 z->z_recirc_full_wma = 0;
7485 }
7486
7487 zone_recirc_unlock_nopreempt(z);
7488 }
7489
7490 /*!
7491 * @function zone_reclaim
7492 *
7493 * @brief
7494 * Drains or trim the zone.
7495 *
7496 * @discussion
7497 * Draining the zone will free it from all its elements.
7498 *
7499 * Trimming the zone tries to respect the working set size, and avoids draining
7500 * the depot when it's not necessary.
7501 *
7502 * @param z The zone to reclaim from
7503 * @param mode The purpose of this reclaim.
7504 */
7505 static void
zone_reclaim(zone_t z,zone_reclaim_mode_t mode)7506 zone_reclaim(zone_t z, zone_reclaim_mode_t mode)
7507 {
7508 struct zone_depot zd;
7509
7510 zone_depot_init(&zd);
7511
7512 zone_lock(z);
7513
7514 if (mode == ZONE_RECLAIM_DESTROY) {
7515 if (!z->z_destructible || z->z_elems_rsv) {
7516 panic("zdestroy: Zone %s%s isn't destructible",
7517 zone_heap_name(z), z->z_name);
7518 }
7519
7520 if (!z->z_self || z->z_expander ||
7521 z->z_async_refilling || z->z_expanding_wait) {
7522 panic("zdestroy: Zone %s%s in an invalid state for destruction",
7523 zone_heap_name(z), z->z_name);
7524 }
7525
7526 #if !KASAN_CLASSIC
7527 /*
7528 * Unset the valid bit. We'll hit an assert failure on further
7529 * operations on this zone, until zinit() is called again.
7530 *
7531 * Leave the zone valid for KASan as we will see zfree's on
7532 * quarantined free elements even after the zone is destroyed.
7533 */
7534 z->z_self = NULL;
7535 #endif
7536 z->z_destroyed = true;
7537 } else if (z->z_destroyed) {
7538 return zone_unlock(z);
7539 } else if (zone_count_free(z) <= z->z_elems_rsv) {
7540 /* If the zone is under its reserve level, leave it alone. */
7541 return zone_unlock(z);
7542 }
7543
7544 if (z->z_pcpu_cache) {
7545 zone_magazine_t mag;
7546 uint32_t freed = 0;
7547
7548 /*
7549 * This is all done with the zone lock held on purpose.
7550 * The work here is O(ncpu), which should still be short.
7551 *
7552 * We need to keep the lock held until we have reclaimed
7553 * at least a few magazines, otherwise if the zone has no
7554 * free elements outside of the depot, a thread performing
7555 * a concurrent allocatiuon could try to grow the zone
7556 * while we're trying to drain it.
7557 */
7558 zone_reclaim_recirc(z, mode, &zd);
7559 zone_reclaim_pcpu(z, mode, &zd);
7560
7561 if (z->z_chunk_elems) {
7562 zone_cache_t cache = zpercpu_get_cpu(z->z_pcpu_cache, 0);
7563 smr_t smr = zone_cache_smr(cache);
7564
7565 while (zd.zd_full) {
7566 mag = zone_depot_pop_head_full(&zd, NULL);
7567 if (smr) {
7568 smr_wait(smr, mag->zm_seq);
7569 zalloc_cached_reuse_smr(z, cache, mag);
7570 freed += zc_mag_size();
7571 }
7572 zone_reclaim_elements(z, zc_mag_size(),
7573 mag->zm_elems);
7574 zone_depot_insert_head_empty(&zd, mag);
7575
7576 freed += zc_mag_size();
7577 if (freed >= zc_free_batch_size()) {
7578 zone_unlock(z);
7579 zone_magazine_free_list(&zd);
7580 thread_yield_to_preemption();
7581 zone_lock(z);
7582 freed = 0;
7583 }
7584 }
7585 } else {
7586 zone_id_t zid = zone_index(z);
7587
7588 zone_unlock(z);
7589
7590 assert(zid <= ZONE_ID__FIRST_DYNAMIC && zcache_ops[zid]);
7591
7592 while (zd.zd_full) {
7593 mag = zone_depot_pop_head_full(&zd, NULL);
7594 zcache_reclaim_elements(zid, zc_mag_size(),
7595 mag->zm_elems);
7596 zone_magazine_free(mag);
7597 }
7598
7599 goto cleanup;
7600 }
7601 }
7602
7603 while (!zone_pva_is_null(z->z_pageq_empty)) {
7604 struct zone_page_metadata *meta;
7605 uint32_t count, limit = z->z_elems_rsv * 5 / 4;
7606
7607 if (mode == ZONE_RECLAIM_TRIM && z->z_pcpu_cache == NULL) {
7608 limit = MAX(limit, z->z_elems_free -
7609 MIN(z->z_elems_free_min, z->z_elems_free_wma));
7610 }
7611
7612 meta = zone_pva_to_meta(z->z_pageq_empty);
7613 count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_outer_size(z);
7614
7615 if (zone_count_free(z) - count < limit) {
7616 break;
7617 }
7618
7619 zone_reclaim_chunk(z, meta, count);
7620 }
7621
7622 zone_unlock(z);
7623
7624 cleanup:
7625 zone_magazine_free_list(&zd);
7626 }
7627
7628 void
zone_drain(zone_t zone)7629 zone_drain(zone_t zone)
7630 {
7631 current_thread()->options |= TH_OPT_ZONE_PRIV;
7632 lck_mtx_lock(&zone_gc_lock);
7633 zone_reclaim(zone, ZONE_RECLAIM_DRAIN);
7634 lck_mtx_unlock(&zone_gc_lock);
7635 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7636 }
7637
7638 void
zcache_drain(zone_id_t zid)7639 zcache_drain(zone_id_t zid)
7640 {
7641 zone_drain(zone_by_id(zid));
7642 }
7643
7644 static void
zone_reclaim_all(zone_reclaim_mode_t mode)7645 zone_reclaim_all(zone_reclaim_mode_t mode)
7646 {
7647 /*
7648 * Start with zcaches, so that they flow into the regular zones.
7649 *
7650 * Then the zones with VA sequester since depopulating
7651 * pages will not need to allocate vm map entries for holes,
7652 * which will give memory back to the system faster.
7653 */
7654 for (zone_id_t zid = ZONE_ID__LAST_RO + 1; zid < ZONE_ID__FIRST_DYNAMIC; zid++) {
7655 zone_t z = zone_by_id(zid);
7656
7657 if (z->z_self && z->z_chunk_elems == 0) {
7658 zone_reclaim(z, mode);
7659 }
7660 }
7661 zone_index_foreach(zid) {
7662 zone_t z = zone_by_id(zid);
7663
7664 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7665 continue;
7666 }
7667 if (zone_submap_is_sequestered(zone_security_array[zid]) &&
7668 z->collectable) {
7669 zone_reclaim(z, mode);
7670 }
7671 }
7672
7673 zone_index_foreach(zid) {
7674 zone_t z = zone_by_id(zid);
7675
7676 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7677 continue;
7678 }
7679 if (!zone_submap_is_sequestered(zone_security_array[zid]) &&
7680 z->collectable) {
7681 zone_reclaim(z, mode);
7682 }
7683 }
7684
7685 zone_reclaim(zc_magazine_zone, mode);
7686 }
7687
7688 void
zone_userspace_reboot_checks(void)7689 zone_userspace_reboot_checks(void)
7690 {
7691 vm_size_t label_zone_size = zone_size_allocated(ipc_service_port_label_zone);
7692 if (label_zone_size != 0) {
7693 panic("Zone %s should be empty upon userspace reboot. Actual size: %lu.",
7694 ipc_service_port_label_zone->z_name, (unsigned long)label_zone_size);
7695 }
7696 }
7697
7698 void
zone_gc(zone_gc_level_t level)7699 zone_gc(zone_gc_level_t level)
7700 {
7701 zone_reclaim_mode_t mode;
7702 zone_t largest_zone = NULL;
7703
7704 switch (level) {
7705 case ZONE_GC_TRIM:
7706 mode = ZONE_RECLAIM_TRIM;
7707 break;
7708 case ZONE_GC_DRAIN:
7709 mode = ZONE_RECLAIM_DRAIN;
7710 break;
7711 case ZONE_GC_JETSAM:
7712 largest_zone = kill_process_in_largest_zone();
7713 mode = ZONE_RECLAIM_TRIM;
7714 break;
7715 }
7716
7717 current_thread()->options |= TH_OPT_ZONE_PRIV;
7718 lck_mtx_lock(&zone_gc_lock);
7719
7720 zone_reclaim_all(mode);
7721
7722 if (level == ZONE_GC_JETSAM && zone_map_nearing_exhaustion()) {
7723 /*
7724 * If we possibly killed a process, but we're still critical,
7725 * we need to drain harder.
7726 */
7727 zone_reclaim(largest_zone, ZONE_RECLAIM_DRAIN);
7728 zone_reclaim_all(ZONE_RECLAIM_DRAIN);
7729 }
7730
7731 lck_mtx_unlock(&zone_gc_lock);
7732 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7733 }
7734
7735 void
zone_gc_trim(void)7736 zone_gc_trim(void)
7737 {
7738 zone_gc(ZONE_GC_TRIM);
7739 }
7740
7741 void
zone_gc_drain(void)7742 zone_gc_drain(void)
7743 {
7744 zone_gc(ZONE_GC_DRAIN);
7745 }
7746
7747 static bool
zone_trim_needed(zone_t z)7748 zone_trim_needed(zone_t z)
7749 {
7750 if (z->z_depot_cleanup) {
7751 return true;
7752 }
7753
7754 if (z->z_async_refilling) {
7755 /* Don't fight with refill */
7756 return false;
7757 }
7758
7759 if (z->z_pcpu_cache) {
7760 uint32_t e_n, f_n;
7761
7762 e_n = MIN(z->z_recirc_empty_wma, z->z_recirc_empty_min * Z_WMA_UNIT);
7763 f_n = MIN(z->z_recirc_full_wma, z->z_recirc_full_min * Z_WMA_UNIT);
7764
7765 if (e_n > zc_autotrim_buckets() * Z_WMA_UNIT) {
7766 return true;
7767 }
7768
7769 if (f_n * zc_mag_size() > z->z_elems_rsv * Z_WMA_UNIT &&
7770 f_n * zc_mag_size() * zone_elem_inner_size(z) >
7771 zc_autotrim_size() * Z_WMA_UNIT) {
7772 return true;
7773 }
7774
7775 return false;
7776 }
7777
7778 if (!zone_pva_is_null(z->z_pageq_empty)) {
7779 uint32_t n;
7780
7781 n = MIN(z->z_elems_free_wma, z->z_elems_free_min);
7782
7783 return n >= z->z_elems_rsv + z->z_chunk_elems;
7784 }
7785
7786 return false;
7787 }
7788
7789 static void
zone_trim_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)7790 zone_trim_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
7791 {
7792 current_thread()->options |= TH_OPT_ZONE_PRIV;
7793
7794 zone_foreach(z) {
7795 if (!z->collectable || z == zc_magazine_zone) {
7796 continue;
7797 }
7798
7799 if (zone_trim_needed(z)) {
7800 lck_mtx_lock(&zone_gc_lock);
7801 zone_reclaim(z, ZONE_RECLAIM_TRIM);
7802 lck_mtx_unlock(&zone_gc_lock);
7803 }
7804 }
7805
7806 if (zone_trim_needed(zc_magazine_zone)) {
7807 lck_mtx_lock(&zone_gc_lock);
7808 zone_reclaim(zc_magazine_zone, ZONE_RECLAIM_TRIM);
7809 lck_mtx_unlock(&zone_gc_lock);
7810 }
7811
7812 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7813 }
7814
7815 void
compute_zone_working_set_size(__unused void * param)7816 compute_zone_working_set_size(__unused void *param)
7817 {
7818 uint32_t zc_auto = zc_enable_level();
7819 bool needs_trim = false;
7820
7821 /*
7822 * Keep zone caching disabled until the first proc is made.
7823 */
7824 if (__improbable(zone_caching_disabled < 0)) {
7825 return;
7826 }
7827
7828 zone_caching_disabled = vm_pool_low();
7829
7830 if (os_mul_overflow(zc_auto, Z_WMA_UNIT, &zc_auto)) {
7831 zc_auto = 0;
7832 }
7833
7834 zone_foreach(z) {
7835 uint32_t old, wma, cur;
7836 bool needs_caching = false;
7837
7838 if (z->z_self != z) {
7839 continue;
7840 }
7841
7842 zone_lock(z);
7843
7844 zone_recirc_lock_nopreempt(z);
7845
7846 if (z->z_pcpu_cache) {
7847 wma = Z_WMA_MIX(z->z_recirc_empty_wma, z->z_recirc_empty_min);
7848 z->z_recirc_empty_min = z->z_recirc.zd_empty;
7849 z->z_recirc_empty_wma = wma;
7850 } else {
7851 wma = Z_WMA_MIX(z->z_elems_free_wma, z->z_elems_free_min);
7852 z->z_elems_free_min = z->z_elems_free;
7853 z->z_elems_free_wma = wma;
7854 }
7855
7856 wma = Z_WMA_MIX(z->z_recirc_full_wma, z->z_recirc_full_min);
7857 z->z_recirc_full_min = z->z_recirc.zd_full;
7858 z->z_recirc_full_wma = wma;
7859
7860 /* fixed point decimal of contentions per second */
7861 old = z->z_recirc_cont_wma;
7862 cur = z->z_recirc_cont_cur * Z_WMA_UNIT /
7863 (zpercpu_count() * ZONE_WSS_UPDATE_PERIOD);
7864 cur = (3 * old + cur) / 4;
7865 zone_recirc_unlock_nopreempt(z);
7866
7867 if (z->z_pcpu_cache) {
7868 uint16_t size = z->z_depot_size;
7869
7870 if (size < z->z_depot_limit && cur > zc_grow_level()) {
7871 /*
7872 * lose history on purpose now
7873 * that we just grew, to give
7874 * the sytem time to adjust.
7875 */
7876 cur = (zc_grow_level() + zc_shrink_level()) / 2;
7877 size = size ? (3 * size + 2) / 2 : 2;
7878 z->z_depot_size = MIN(z->z_depot_limit, size);
7879 } else if (size > 0 && cur <= zc_shrink_level()) {
7880 /*
7881 * lose history on purpose now
7882 * that we just shrunk, to give
7883 * the sytem time to adjust.
7884 */
7885 cur = (zc_grow_level() + zc_shrink_level()) / 2;
7886 z->z_depot_size = size - 1;
7887 z->z_depot_cleanup = true;
7888 }
7889 } else if (!z->z_nocaching && !z->exhaustible && zc_auto &&
7890 old >= zc_auto && cur >= zc_auto) {
7891 needs_caching = true;
7892 }
7893
7894 z->z_recirc_cont_wma = cur;
7895 z->z_recirc_cont_cur = 0;
7896
7897 if (!needs_trim && zone_trim_needed(z)) {
7898 needs_trim = true;
7899 }
7900
7901 zone_unlock(z);
7902
7903 if (needs_caching) {
7904 zone_enable_caching(z);
7905 }
7906 }
7907
7908 if (needs_trim) {
7909 thread_call_enter(&zone_trim_callout);
7910 }
7911 }
7912
7913 #endif /* !ZALLOC_TEST */
7914 #pragma mark vm integration, MIG routines
7915 #if !ZALLOC_TEST
7916
7917 extern unsigned int stack_total;
7918 #if defined (__x86_64__)
7919 extern unsigned int inuse_ptepages_count;
7920 #endif
7921
7922 static const char *
panic_print_get_typename(kalloc_type_views_t cur,kalloc_type_views_t * next,bool is_kt_var)7923 panic_print_get_typename(kalloc_type_views_t cur, kalloc_type_views_t *next,
7924 bool is_kt_var)
7925 {
7926 if (is_kt_var) {
7927 next->ktv_var = (kalloc_type_var_view_t) cur.ktv_var->kt_next;
7928 return cur.ktv_var->kt_name;
7929 } else {
7930 next->ktv_fixed = (kalloc_type_view_t) cur.ktv_fixed->kt_zv.zv_next;
7931 return cur.ktv_fixed->kt_zv.zv_name;
7932 }
7933 }
7934
7935 static void
panic_print_types_in_zone(zone_t z,const char * debug_str)7936 panic_print_types_in_zone(zone_t z, const char* debug_str)
7937 {
7938 kalloc_type_views_t kt_cur = {};
7939 const char *prev_type = "";
7940 size_t skip_over_site = sizeof("site.") - 1;
7941 zone_security_flags_t zsflags = zone_security_config(z);
7942 bool is_kt_var = false;
7943
7944 if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
7945 uint32_t heap_id = KT_VAR_PTR_HEAP0 + ((zone_index(z) -
7946 kalloc_type_heap_array[KT_VAR_PTR_HEAP0].kh_zstart) / KHEAP_NUM_ZONES);
7947 kt_cur.ktv_var = kalloc_type_heap_array[heap_id].kt_views;
7948 is_kt_var = true;
7949 } else {
7950 kt_cur.ktv_fixed = (kalloc_type_view_t) z->z_views;
7951 }
7952
7953 paniclog_append_noflush("kalloc %s in zone, %s (%s):\n",
7954 is_kt_var? "type arrays" : "types", debug_str, z->z_name);
7955
7956 while (kt_cur.ktv_fixed) {
7957 kalloc_type_views_t kt_next = {};
7958 const char *typename = panic_print_get_typename(kt_cur, &kt_next,
7959 is_kt_var) + skip_over_site;
7960 if (strcmp(typename, prev_type) != 0) {
7961 paniclog_append_noflush("\t%-50s\n", typename);
7962 prev_type = typename;
7963 }
7964 kt_cur = kt_next;
7965 }
7966 paniclog_append_noflush("\n");
7967 }
7968
7969 static void
panic_display_kalloc_types(void)7970 panic_display_kalloc_types(void)
7971 {
7972 if (kalloc_type_src_zone) {
7973 panic_print_types_in_zone(kalloc_type_src_zone, "addr belongs to");
7974 }
7975 if (kalloc_type_dst_zone) {
7976 panic_print_types_in_zone(kalloc_type_dst_zone,
7977 "addr is being freed to");
7978 }
7979 }
7980
7981 static void
zone_find_n_largest(const uint32_t n,zone_t * largest_zones,uint64_t * zone_size)7982 zone_find_n_largest(const uint32_t n, zone_t *largest_zones,
7983 uint64_t *zone_size)
7984 {
7985 zone_index_foreach(zid) {
7986 zone_t z = &zone_array[zid];
7987 vm_offset_t size = zone_size_wired(z);
7988
7989 if (zid == ZONE_ID_VM_PAGES) {
7990 continue;
7991 }
7992 for (uint32_t i = 0; i < n; i++) {
7993 if (size > zone_size[i]) {
7994 largest_zones[i] = z;
7995 zone_size[i] = size;
7996 break;
7997 }
7998 }
7999 }
8000 }
8001
8002 #define NUM_LARGEST_ZONES 5
8003 static void
panic_display_largest_zones(void)8004 panic_display_largest_zones(void)
8005 {
8006 zone_t largest_zones[NUM_LARGEST_ZONES] = { NULL };
8007 uint64_t largest_size[NUM_LARGEST_ZONES] = { 0 };
8008
8009 zone_find_n_largest(NUM_LARGEST_ZONES, (zone_t *) &largest_zones,
8010 (uint64_t *) &largest_size);
8011
8012 paniclog_append_noflush("Largest zones:\n%-28s %10s %10s\n",
8013 "Zone Name", "Cur Size", "Free Size");
8014 for (uint32_t i = 0; i < NUM_LARGEST_ZONES; i++) {
8015 zone_t z = largest_zones[i];
8016 paniclog_append_noflush("%-8s%-20s %9u%c %9u%c\n",
8017 zone_heap_name(z), z->z_name,
8018 mach_vm_size_pretty(largest_size[i]),
8019 mach_vm_size_unit(largest_size[i]),
8020 mach_vm_size_pretty(zone_size_free(z)),
8021 mach_vm_size_unit(zone_size_free(z)));
8022 }
8023 }
8024
8025 static void
panic_display_zprint(void)8026 panic_display_zprint(void)
8027 {
8028 panic_display_largest_zones();
8029 paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
8030 (uintptr_t)(kernel_stack_size * stack_total));
8031 #if defined (__x86_64__)
8032 paniclog_append_noflush("%-20s %10lu\n", "PageTables",
8033 (uintptr_t)ptoa(inuse_ptepages_count));
8034 #endif
8035 paniclog_append_noflush("%-20s %10lu\n", "Kalloc.Large",
8036 (uintptr_t)kalloc_large_total);
8037
8038 if (panic_kext_memory_info) {
8039 mach_memory_info_t *mem_info = panic_kext_memory_info;
8040
8041 paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
8042 for (uint32_t i = 0; i < panic_kext_memory_size / sizeof(mem_info[0]); i++) {
8043 if ((mem_info[i].flags & VM_KERN_SITE_TYPE) != VM_KERN_SITE_KMOD) {
8044 continue;
8045 }
8046 if (mem_info[i].size > (1024 * 1024)) {
8047 paniclog_append_noflush("%-5lld %10lld\n",
8048 mem_info[i].site, mem_info[i].size);
8049 }
8050 }
8051 }
8052 }
8053
8054 static void
panic_display_zone_info(void)8055 panic_display_zone_info(void)
8056 {
8057 paniclog_append_noflush("Zone info:\n");
8058 paniclog_append_noflush(" Zone map: %p - %p\n",
8059 (void *)zone_info.zi_map_range.min_address,
8060 (void *)zone_info.zi_map_range.max_address);
8061 #if CONFIG_PROB_GZALLOC
8062 if (pgz_submap) {
8063 paniclog_append_noflush(" . PGZ : %p - %p\n",
8064 (void *)pgz_submap->min_offset,
8065 (void *)pgz_submap->max_offset);
8066 }
8067 #endif /* CONFIG_PROB_GZALLOC */
8068 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8069 vm_map_t map = zone_submaps[i];
8070
8071 if (map == VM_MAP_NULL) {
8072 continue;
8073 }
8074 paniclog_append_noflush(" . %-6s: %p - %p\n",
8075 zone_submaps_names[i],
8076 (void *)map->min_offset,
8077 (void *)map->max_offset);
8078 }
8079 paniclog_append_noflush(" Metadata: %p - %p\n"
8080 " Bitmaps : %p - %p\n"
8081 " Extra : %p - %p\n"
8082 "\n",
8083 (void *)zone_info.zi_meta_range.min_address,
8084 (void *)zone_info.zi_meta_range.max_address,
8085 (void *)zone_info.zi_bits_range.min_address,
8086 (void *)zone_info.zi_bits_range.max_address,
8087 (void *)zone_info.zi_xtra_range.min_address,
8088 (void *)zone_info.zi_xtra_range.max_address);
8089 }
8090
8091 static void
panic_display_zone_fault(vm_offset_t addr)8092 panic_display_zone_fault(vm_offset_t addr)
8093 {
8094 struct zone_page_metadata meta = { };
8095 vm_map_t map = VM_MAP_NULL;
8096 vm_offset_t oob_offs = 0, size = 0;
8097 int map_idx = -1;
8098 zone_t z = NULL;
8099 const char *kind = "whild deref";
8100 bool oob = false;
8101
8102 /*
8103 * First: look if we bumped into guard pages between submaps
8104 */
8105 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8106 map = zone_submaps[i];
8107 if (map == VM_MAP_NULL) {
8108 continue;
8109 }
8110
8111 if (addr >= map->min_offset && addr < map->max_offset) {
8112 map_idx = i;
8113 break;
8114 }
8115 }
8116
8117 if (map_idx == -1) {
8118 /* this really shouldn't happen, submaps are back to back */
8119 return;
8120 }
8121
8122 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
8123
8124 /*
8125 * Second: look if there's just no metadata at all
8126 */
8127 if (ml_nofault_copy((vm_offset_t)zone_meta_from_addr(addr),
8128 (vm_offset_t)&meta, sizeof(meta)) != sizeof(meta) ||
8129 meta.zm_index == 0 || meta.zm_index >= MAX_ZONES ||
8130 zone_array[meta.zm_index].z_self == NULL) {
8131 paniclog_append_noflush(" Zone : <unknown>\n");
8132 kind = "wild deref, missing or invalid metadata";
8133 } else {
8134 z = &zone_array[meta.zm_index];
8135 paniclog_append_noflush(" Zone : %s%s\n",
8136 zone_heap_name(z), zone_name(z));
8137 if (meta.zm_chunk_len == ZM_PGZ_GUARD) {
8138 kind = "out-of-bounds (high confidence)";
8139 oob = true;
8140 size = zone_element_size((void *)addr,
8141 &z, false, &oob_offs);
8142 } else {
8143 kind = "use-after-free (medium confidence)";
8144 }
8145 }
8146
8147 paniclog_append_noflush(" Address : %p\n", (void *)addr);
8148 if (oob) {
8149 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
8150 (void *)(trunc_page(addr) - (size - oob_offs)),
8151 (void *)trunc_page(addr), (uint32_t)(size - oob_offs));
8152 }
8153 paniclog_append_noflush(" Submap : %s [%p; %p)\n",
8154 zone_submaps_names[map_idx],
8155 (void *)map->min_offset, (void *)map->max_offset);
8156 paniclog_append_noflush(" Kind : %s\n", kind);
8157 if (oob) {
8158 paniclog_append_noflush(" Access : %d byte(s) past\n",
8159 (uint32_t)(addr & PAGE_MASK) + 1);
8160 }
8161 paniclog_append_noflush(" Metadata: zid:%d inl:%d cl:0x%x "
8162 "0x%04x 0x%08x 0x%08x 0x%08x\n",
8163 meta.zm_index, meta.zm_inline_bitmap, meta.zm_chunk_len,
8164 meta.zm_alloc_size, meta.zm_bitmap,
8165 meta.zm_page_next.packed_address,
8166 meta.zm_page_prev.packed_address);
8167 paniclog_append_noflush("\n");
8168 }
8169
8170 void
panic_display_zalloc(void)8171 panic_display_zalloc(void)
8172 {
8173 bool keepsyms = false;
8174
8175 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
8176
8177 panic_display_zone_info();
8178
8179 if (panic_fault_address) {
8180 #if CONFIG_PROB_GZALLOC
8181 if (pgz_owned(panic_fault_address)) {
8182 panic_display_pgz_uaf_info(keepsyms, panic_fault_address);
8183 } else
8184 #endif /* CONFIG_PROB_GZALLOC */
8185 if (zone_maps_owned(panic_fault_address, 1)) {
8186 panic_display_zone_fault(panic_fault_address);
8187 }
8188 }
8189
8190 if (panic_include_zprint) {
8191 panic_display_zprint();
8192 } else if (zone_map_nearing_threshold(ZONE_MAP_EXHAUSTION_PRINT_PANIC)) {
8193 panic_display_largest_zones();
8194 }
8195 #if CONFIG_ZLEAKS
8196 if (zleak_active) {
8197 panic_display_zleaks(keepsyms);
8198 }
8199 #endif
8200 if (panic_include_kalloc_types) {
8201 panic_display_kalloc_types();
8202 }
8203 }
8204
8205 /*
8206 * Creates a vm_map_copy_t to return to the caller of mach_* MIG calls
8207 * requesting zone information.
8208 * Frees unused pages towards the end of the region, and zero'es out unused
8209 * space on the last page.
8210 */
8211 static vm_map_copy_t
create_vm_map_copy(vm_offset_t start_addr,vm_size_t total_size,vm_size_t used_size)8212 create_vm_map_copy(
8213 vm_offset_t start_addr,
8214 vm_size_t total_size,
8215 vm_size_t used_size)
8216 {
8217 kern_return_t kr;
8218 vm_offset_t end_addr;
8219 vm_size_t free_size;
8220 vm_map_copy_t copy;
8221
8222 if (used_size != total_size) {
8223 end_addr = start_addr + used_size;
8224 free_size = total_size - (round_page(end_addr) - start_addr);
8225
8226 if (free_size >= PAGE_SIZE) {
8227 kmem_free(ipc_kernel_map,
8228 round_page(end_addr), free_size);
8229 }
8230 bzero((char *) end_addr, round_page(end_addr) - end_addr);
8231 }
8232
8233 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr,
8234 (vm_map_size_t)used_size, TRUE, ©);
8235 assert(kr == KERN_SUCCESS);
8236
8237 return copy;
8238 }
8239
8240 static boolean_t
get_zone_info(zone_t z,mach_zone_name_t * zn,mach_zone_info_t * zi)8241 get_zone_info(
8242 zone_t z,
8243 mach_zone_name_t *zn,
8244 mach_zone_info_t *zi)
8245 {
8246 struct zone zcopy;
8247 vm_size_t cached = 0;
8248
8249 assert(z != ZONE_NULL);
8250 zone_lock(z);
8251 if (!z->z_self) {
8252 zone_unlock(z);
8253 return FALSE;
8254 }
8255 zcopy = *z;
8256 if (z->z_pcpu_cache) {
8257 zpercpu_foreach(zc, z->z_pcpu_cache) {
8258 cached += zc->zc_alloc_cur + zc->zc_free_cur;
8259 cached += zc->zc_depot.zd_full * zc_mag_size();
8260 }
8261 }
8262 zone_unlock(z);
8263
8264 if (zn != NULL) {
8265 /*
8266 * Append kalloc heap name to zone name (if zone is used by kalloc)
8267 */
8268 char temp_zone_name[MAX_ZONE_NAME] = "";
8269 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8270 zone_heap_name(z), z->z_name);
8271
8272 /* assuming here the name data is static */
8273 (void) __nosan_strlcpy(zn->mzn_name, temp_zone_name,
8274 strlen(temp_zone_name) + 1);
8275 }
8276
8277 if (zi != NULL) {
8278 *zi = (mach_zone_info_t) {
8279 .mzi_count = zone_count_allocated(&zcopy) - cached,
8280 .mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)),
8281 // max_size for zprint is now high-watermark of pages used
8282 .mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)),
8283 .mzi_elem_size = zone_scale_for_percpu(&zcopy, zcopy.z_elem_size),
8284 .mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages),
8285 .mzi_exhaustible = (uint64_t)zcopy.exhaustible,
8286 };
8287 if (zcopy.z_chunk_pages == 0) {
8288 /* this is a zcache */
8289 zi->mzi_cur_size = zcopy.z_elems_avail * zcopy.z_elem_size;
8290 }
8291 zpercpu_foreach(zs, zcopy.z_stats) {
8292 zi->mzi_sum_size += zs->zs_mem_allocated;
8293 }
8294 if (zcopy.collectable) {
8295 SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable,
8296 ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty)));
8297 SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE);
8298 }
8299 }
8300
8301 return TRUE;
8302 }
8303
8304 /* mach_memory_info entitlement */
8305 #define MEMORYINFO_ENTITLEMENT "com.apple.private.memoryinfo"
8306
8307 /* macro needed to rate-limit mach_memory_info */
8308 #define NSEC_DAY (NSEC_PER_SEC * 60 * 60 * 24)
8309
8310 /* declarations necessary to call kauth_cred_issuser() */
8311 struct ucred;
8312 extern int kauth_cred_issuser(struct ucred *);
8313 extern struct ucred *kauth_cred_get(void);
8314
8315 static kern_return_t
8316 mach_memory_info_internal(
8317 host_t host,
8318 mach_zone_name_array_t *namesp,
8319 mach_msg_type_number_t *namesCntp,
8320 mach_zone_info_array_t *infop,
8321 mach_msg_type_number_t *infoCntp,
8322 mach_memory_info_array_t *memoryInfop,
8323 mach_msg_type_number_t *memoryInfoCntp,
8324 bool redact_info);
8325
8326 static kern_return_t
mach_memory_info_security_check(bool redact_info)8327 mach_memory_info_security_check(bool redact_info)
8328 {
8329 /* If not root, only allow redacted calls. */
8330 if (!kauth_cred_issuser(kauth_cred_get()) && !redact_info) {
8331 return KERN_NO_ACCESS;
8332 }
8333
8334 if (PE_srd_fused) {
8335 return KERN_SUCCESS;
8336 }
8337
8338 /* If does not have the memory entitlement, fail. */
8339 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8340 if (!IOTaskHasEntitlement(current_task(), MEMORYINFO_ENTITLEMENT)) {
8341 return KERN_DENIED;
8342 }
8343
8344 /*
8345 * On release non-mac arm devices, allow mach_memory_info
8346 * to be called twice per day per boot. memorymaintenanced
8347 * calls it once per day, which leaves room for a sysdiagnose.
8348 * Allow redacted version to be called without rate limit.
8349 */
8350
8351 if (!redact_info) {
8352 static uint64_t first_call = 0, second_call = 0;
8353 uint64_t now = 0;
8354 absolutetime_to_nanoseconds(ml_get_timebase(), &now);
8355
8356 if (!first_call) {
8357 first_call = now;
8358 } else if (!second_call) {
8359 second_call = now;
8360 } else if (first_call + NSEC_DAY > now) {
8361 return KERN_DENIED;
8362 } else if (first_call + NSEC_DAY < now) {
8363 first_call = now;
8364 second_call = 0;
8365 }
8366 }
8367 #endif
8368
8369 return KERN_SUCCESS;
8370 }
8371
8372 kern_return_t
mach_zone_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp)8373 mach_zone_info(
8374 mach_port_t host_port,
8375 mach_zone_name_array_t *namesp,
8376 mach_msg_type_number_t *namesCntp,
8377 mach_zone_info_array_t *infop,
8378 mach_msg_type_number_t *infoCntp)
8379 {
8380 return mach_memory_info(host_port, namesp, namesCntp, infop, infoCntp, NULL, NULL);
8381 }
8382
8383 kern_return_t
mach_memory_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp)8384 mach_memory_info(
8385 mach_port_t host_port,
8386 mach_zone_name_array_t *namesp,
8387 mach_msg_type_number_t *namesCntp,
8388 mach_zone_info_array_t *infop,
8389 mach_msg_type_number_t *infoCntp,
8390 mach_memory_info_array_t *memoryInfop,
8391 mach_msg_type_number_t *memoryInfoCntp)
8392 {
8393 bool redact_info = false;
8394 host_t host = HOST_NULL;
8395
8396 host = convert_port_to_host_priv(host_port);
8397 if (host == HOST_NULL) {
8398 redact_info = true;
8399 host = convert_port_to_host(host_port);
8400 }
8401
8402 return mach_memory_info_internal(host, namesp, namesCntp, infop, infoCntp, memoryInfop, memoryInfoCntp, redact_info);
8403 }
8404
8405 static void
zone_info_redact(mach_zone_info_t * zi)8406 zone_info_redact(mach_zone_info_t *zi)
8407 {
8408 zi->mzi_cur_size = 0;
8409 zi->mzi_max_size = 0;
8410 zi->mzi_alloc_size = 0;
8411 zi->mzi_sum_size = 0;
8412 zi->mzi_collectable = 0;
8413 }
8414
8415 static bool
zone_info_needs_to_be_coalesced(int zone_index)8416 zone_info_needs_to_be_coalesced(int zone_index)
8417 {
8418 zone_security_flags_t zsflags = zone_security_array[zone_index];
8419 if (zsflags.z_kalloc_type || zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8420 return true;
8421 }
8422 return false;
8423 }
8424
8425 static bool
zone_info_find_coalesce_zone(mach_zone_info_t * zi,mach_zone_info_t * info,int * coalesce,int coalesce_count,int * coalesce_index)8426 zone_info_find_coalesce_zone(
8427 mach_zone_info_t *zi,
8428 mach_zone_info_t *info,
8429 int *coalesce,
8430 int coalesce_count,
8431 int *coalesce_index)
8432 {
8433 for (int i = 0; i < coalesce_count; i++) {
8434 if (zi->mzi_elem_size == info[coalesce[i]].mzi_elem_size) {
8435 *coalesce_index = coalesce[i];
8436 return true;
8437 }
8438 }
8439
8440 return false;
8441 }
8442
8443 static void
zone_info_coalesce(mach_zone_info_t * info,int coalesce_index,mach_zone_info_t * zi)8444 zone_info_coalesce(
8445 mach_zone_info_t *info,
8446 int coalesce_index,
8447 mach_zone_info_t *zi)
8448 {
8449 info[coalesce_index].mzi_count += zi->mzi_count;
8450 }
8451
8452 static kern_return_t
mach_memory_info_internal(host_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp,bool redact_info)8453 mach_memory_info_internal(
8454 host_t host,
8455 mach_zone_name_array_t *namesp,
8456 mach_msg_type_number_t *namesCntp,
8457 mach_zone_info_array_t *infop,
8458 mach_msg_type_number_t *infoCntp,
8459 mach_memory_info_array_t *memoryInfop,
8460 mach_msg_type_number_t *memoryInfoCntp,
8461 bool redact_info)
8462 {
8463 mach_zone_name_t *names;
8464 vm_offset_t names_addr;
8465 vm_size_t names_size;
8466
8467 mach_zone_info_t *info;
8468 vm_offset_t info_addr;
8469 vm_size_t info_size;
8470
8471 int *coalesce;
8472 vm_offset_t coalesce_addr;
8473 vm_size_t coalesce_size;
8474 int coalesce_count = 0;
8475
8476 mach_memory_info_t *memory_info;
8477 vm_offset_t memory_info_addr;
8478 vm_size_t memory_info_size;
8479 vm_size_t memory_info_vmsize;
8480 unsigned int num_info;
8481
8482 unsigned int max_zones, used_zones, i;
8483 mach_zone_name_t *zn;
8484 mach_zone_info_t *zi;
8485 kern_return_t kr;
8486
8487 uint64_t zones_collectable_bytes = 0;
8488
8489 if (host == HOST_NULL) {
8490 return KERN_INVALID_HOST;
8491 }
8492
8493 kr = mach_memory_info_security_check(redact_info);
8494 if (kr != KERN_SUCCESS) {
8495 return kr;
8496 }
8497
8498 /*
8499 * We assume that zones aren't freed once allocated.
8500 * We won't pick up any zones that are allocated later.
8501 */
8502
8503 max_zones = os_atomic_load(&num_zones, relaxed);
8504
8505 names_size = round_page(max_zones * sizeof *names);
8506 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8507 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8508 if (kr != KERN_SUCCESS) {
8509 return kr;
8510 }
8511 names = (mach_zone_name_t *) names_addr;
8512
8513 info_size = round_page(max_zones * sizeof *info);
8514 kr = kmem_alloc(ipc_kernel_map, &info_addr, info_size,
8515 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8516 if (kr != KERN_SUCCESS) {
8517 kmem_free(ipc_kernel_map,
8518 names_addr, names_size);
8519 return kr;
8520 }
8521 info = (mach_zone_info_t *) info_addr;
8522
8523 if (redact_info) {
8524 coalesce_size = round_page(max_zones * sizeof *coalesce);
8525 kr = kmem_alloc(ipc_kernel_map, &coalesce_addr, coalesce_size,
8526 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8527 if (kr != KERN_SUCCESS) {
8528 kmem_free(ipc_kernel_map,
8529 names_addr, names_size);
8530 kmem_free(ipc_kernel_map,
8531 info_addr, info_size);
8532 return kr;
8533 }
8534 coalesce = (int *)coalesce_addr;
8535 }
8536
8537 zn = &names[0];
8538 zi = &info[0];
8539
8540 used_zones = 0;
8541 for (i = 0; i < max_zones; i++) {
8542 if (!get_zone_info(&(zone_array[i]), zn, zi)) {
8543 continue;
8544 }
8545
8546 if (!redact_info) {
8547 zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable);
8548 zn++;
8549 zi++;
8550 used_zones++;
8551 continue;
8552 }
8553
8554 zone_info_redact(zi);
8555 if (!zone_info_needs_to_be_coalesced(i)) {
8556 zn++;
8557 zi++;
8558 used_zones++;
8559 continue;
8560 }
8561
8562 int coalesce_index;
8563 bool found_coalesce_zone = zone_info_find_coalesce_zone(zi, info,
8564 coalesce, coalesce_count, &coalesce_index);
8565
8566 /* Didn't find a zone to coalesce */
8567 if (!found_coalesce_zone) {
8568 /* Updates the zone name */
8569 __nosan_bzero(zn->mzn_name, MAX_ZONE_NAME);
8570 snprintf(zn->mzn_name, MAX_ZONE_NAME, "kalloc.%d",
8571 (int)zi->mzi_elem_size);
8572
8573 coalesce[coalesce_count] = used_zones;
8574 coalesce_count++;
8575 zn++;
8576 zi++;
8577 used_zones++;
8578 continue;
8579 }
8580
8581 zone_info_coalesce(info, coalesce_index, zi);
8582 }
8583
8584 if (redact_info) {
8585 kmem_free(ipc_kernel_map, coalesce_addr, coalesce_size);
8586 }
8587
8588 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, used_zones * sizeof *names);
8589 *namesCntp = used_zones;
8590
8591 *infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info);
8592 *infoCntp = used_zones;
8593
8594 num_info = 0;
8595 memory_info_addr = 0;
8596
8597 if (memoryInfop && memoryInfoCntp) {
8598 vm_map_copy_t copy;
8599 num_info = vm_page_diagnose_estimate();
8600 memory_info_size = num_info * sizeof(*memory_info);
8601 memory_info_vmsize = round_page(memory_info_size);
8602 kr = kmem_alloc(ipc_kernel_map, &memory_info_addr, memory_info_vmsize,
8603 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8604 if (kr != KERN_SUCCESS) {
8605 return kr;
8606 }
8607
8608 kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
8609 VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
8610 assert(kr == KERN_SUCCESS);
8611
8612 memory_info = (mach_memory_info_t *) memory_info_addr;
8613 vm_page_diagnose(memory_info, num_info, zones_collectable_bytes, redact_info);
8614
8615 kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
8616 assert(kr == KERN_SUCCESS);
8617
8618 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
8619 (vm_map_size_t)memory_info_size, TRUE, ©);
8620 assert(kr == KERN_SUCCESS);
8621
8622 *memoryInfop = (mach_memory_info_t *) copy;
8623 *memoryInfoCntp = num_info;
8624 }
8625
8626 return KERN_SUCCESS;
8627 }
8628
8629 kern_return_t
mach_zone_info_for_zone(host_priv_t host,mach_zone_name_t name,mach_zone_info_t * infop)8630 mach_zone_info_for_zone(
8631 host_priv_t host,
8632 mach_zone_name_t name,
8633 mach_zone_info_t *infop)
8634 {
8635 zone_t zone_ptr;
8636
8637 if (host == HOST_NULL) {
8638 return KERN_INVALID_HOST;
8639 }
8640
8641 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8642 if (!PE_i_can_has_debugger(NULL)) {
8643 return KERN_INVALID_HOST;
8644 }
8645 #endif
8646
8647 if (infop == NULL) {
8648 return KERN_INVALID_ARGUMENT;
8649 }
8650
8651 zone_ptr = ZONE_NULL;
8652 zone_foreach(z) {
8653 /*
8654 * Append kalloc heap name to zone name (if zone is used by kalloc)
8655 */
8656 char temp_zone_name[MAX_ZONE_NAME] = "";
8657 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8658 zone_heap_name(z), z->z_name);
8659
8660 /* Find the requested zone by name */
8661 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8662 zone_ptr = z;
8663 break;
8664 }
8665 }
8666
8667 /* No zones found with the requested zone name */
8668 if (zone_ptr == ZONE_NULL) {
8669 return KERN_INVALID_ARGUMENT;
8670 }
8671
8672 if (get_zone_info(zone_ptr, NULL, infop)) {
8673 return KERN_SUCCESS;
8674 }
8675 return KERN_FAILURE;
8676 }
8677
8678 kern_return_t
mach_zone_info_for_largest_zone(host_priv_t host,mach_zone_name_t * namep,mach_zone_info_t * infop)8679 mach_zone_info_for_largest_zone(
8680 host_priv_t host,
8681 mach_zone_name_t *namep,
8682 mach_zone_info_t *infop)
8683 {
8684 if (host == HOST_NULL) {
8685 return KERN_INVALID_HOST;
8686 }
8687
8688 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8689 if (!PE_i_can_has_debugger(NULL)) {
8690 return KERN_INVALID_HOST;
8691 }
8692 #endif
8693
8694 if (namep == NULL || infop == NULL) {
8695 return KERN_INVALID_ARGUMENT;
8696 }
8697
8698 if (get_zone_info(zone_find_largest(NULL), namep, infop)) {
8699 return KERN_SUCCESS;
8700 }
8701 return KERN_FAILURE;
8702 }
8703
8704 uint64_t
get_zones_collectable_bytes(void)8705 get_zones_collectable_bytes(void)
8706 {
8707 uint64_t zones_collectable_bytes = 0;
8708 mach_zone_info_t zi;
8709
8710 zone_foreach(z) {
8711 if (get_zone_info(z, NULL, &zi)) {
8712 zones_collectable_bytes +=
8713 GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable);
8714 }
8715 }
8716
8717 return zones_collectable_bytes;
8718 }
8719
8720 kern_return_t
mach_zone_get_zlog_zones(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp)8721 mach_zone_get_zlog_zones(
8722 host_priv_t host,
8723 mach_zone_name_array_t *namesp,
8724 mach_msg_type_number_t *namesCntp)
8725 {
8726 #if ZALLOC_ENABLE_LOGGING
8727 unsigned int max_zones, logged_zones, i;
8728 kern_return_t kr;
8729 zone_t zone_ptr;
8730 mach_zone_name_t *names;
8731 vm_offset_t names_addr;
8732 vm_size_t names_size;
8733
8734 if (host == HOST_NULL) {
8735 return KERN_INVALID_HOST;
8736 }
8737
8738 if (namesp == NULL || namesCntp == NULL) {
8739 return KERN_INVALID_ARGUMENT;
8740 }
8741
8742 max_zones = os_atomic_load(&num_zones, relaxed);
8743
8744 names_size = round_page(max_zones * sizeof *names);
8745 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8746 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8747 if (kr != KERN_SUCCESS) {
8748 return kr;
8749 }
8750 names = (mach_zone_name_t *) names_addr;
8751
8752 zone_ptr = ZONE_NULL;
8753 logged_zones = 0;
8754 for (i = 0; i < max_zones; i++) {
8755 zone_t z = &(zone_array[i]);
8756 assert(z != ZONE_NULL);
8757
8758 /* Copy out the zone name if zone logging is enabled */
8759 if (z->z_btlog) {
8760 get_zone_info(z, &names[logged_zones], NULL);
8761 logged_zones++;
8762 }
8763 }
8764
8765 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, logged_zones * sizeof *names);
8766 *namesCntp = logged_zones;
8767
8768 return KERN_SUCCESS;
8769
8770 #else /* ZALLOC_ENABLE_LOGGING */
8771 #pragma unused(host, namesp, namesCntp)
8772 return KERN_FAILURE;
8773 #endif /* ZALLOC_ENABLE_LOGGING */
8774 }
8775
8776 kern_return_t
mach_zone_get_btlog_records(host_priv_t host,mach_zone_name_t name,zone_btrecord_array_t * recsp,mach_msg_type_number_t * numrecs)8777 mach_zone_get_btlog_records(
8778 host_priv_t host,
8779 mach_zone_name_t name,
8780 zone_btrecord_array_t *recsp,
8781 mach_msg_type_number_t *numrecs)
8782 {
8783 #if ZALLOC_ENABLE_LOGGING
8784 zone_btrecord_t *recs;
8785 kern_return_t kr;
8786 vm_address_t addr;
8787 vm_size_t size;
8788 zone_t zone_ptr;
8789 vm_map_copy_t copy;
8790
8791 if (host == HOST_NULL) {
8792 return KERN_INVALID_HOST;
8793 }
8794
8795 if (recsp == NULL || numrecs == NULL) {
8796 return KERN_INVALID_ARGUMENT;
8797 }
8798
8799 zone_ptr = ZONE_NULL;
8800 zone_foreach(z) {
8801 /*
8802 * Append kalloc heap name to zone name (if zone is used by kalloc)
8803 */
8804 char temp_zone_name[MAX_ZONE_NAME] = "";
8805 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8806 zone_heap_name(z), z->z_name);
8807
8808 /* Find the requested zone by name */
8809 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8810 zone_ptr = z;
8811 break;
8812 }
8813 }
8814
8815 /* No zones found with the requested zone name */
8816 if (zone_ptr == ZONE_NULL) {
8817 return KERN_INVALID_ARGUMENT;
8818 }
8819
8820 /* Logging not turned on for the requested zone */
8821 if (!zone_ptr->z_btlog) {
8822 return KERN_FAILURE;
8823 }
8824
8825 kr = btlog_get_records(zone_ptr->z_btlog, &recs, numrecs);
8826 if (kr != KERN_SUCCESS) {
8827 return kr;
8828 }
8829
8830 addr = (vm_address_t)recs;
8831 size = sizeof(zone_btrecord_t) * *numrecs;
8832
8833 kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, ©);
8834 assert(kr == KERN_SUCCESS);
8835
8836 *recsp = (zone_btrecord_t *)copy;
8837 return KERN_SUCCESS;
8838
8839 #else /* !ZALLOC_ENABLE_LOGGING */
8840 #pragma unused(host, name, recsp, numrecs)
8841 return KERN_FAILURE;
8842 #endif /* !ZALLOC_ENABLE_LOGGING */
8843 }
8844
8845
8846 kern_return_t
mach_zone_force_gc(host_t host)8847 mach_zone_force_gc(
8848 host_t host)
8849 {
8850 if (host == HOST_NULL) {
8851 return KERN_INVALID_HOST;
8852 }
8853
8854 #if DEBUG || DEVELOPMENT
8855 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
8856 /* Callout to buffer cache GC to drop elements in the apfs zones */
8857 if (consider_buffer_cache_collect != NULL) {
8858 (void)(*consider_buffer_cache_collect)(0);
8859 }
8860 zone_gc(ZONE_GC_DRAIN);
8861 #endif /* DEBUG || DEVELOPMENT */
8862 return KERN_SUCCESS;
8863 }
8864
8865 zone_t
zone_find_largest(uint64_t * zone_size)8866 zone_find_largest(uint64_t *zone_size)
8867 {
8868 zone_t largest_zone = 0;
8869 uint64_t largest_zone_size = 0;
8870 zone_find_n_largest(1, &largest_zone, &largest_zone_size);
8871 if (zone_size) {
8872 *zone_size = largest_zone_size;
8873 }
8874 return largest_zone;
8875 }
8876
8877 void
zone_get_stats(zone_t zone,struct zone_basic_stats * stats)8878 zone_get_stats(
8879 zone_t zone,
8880 struct zone_basic_stats *stats)
8881 {
8882 stats->zbs_avail = zone->z_elems_avail;
8883
8884 stats->zbs_alloc_fail = 0;
8885 zpercpu_foreach(zs, zone->z_stats) {
8886 stats->zbs_alloc_fail += zs->zs_alloc_fail;
8887 }
8888
8889 stats->zbs_cached = 0;
8890 if (zone->z_pcpu_cache) {
8891 zpercpu_foreach(zc, zone->z_pcpu_cache) {
8892 stats->zbs_cached += zc->zc_alloc_cur +
8893 zc->zc_free_cur +
8894 zc->zc_depot.zd_full * zc_mag_size();
8895 }
8896 }
8897
8898 stats->zbs_free = zone_count_free(zone) + stats->zbs_cached;
8899
8900 /*
8901 * Since we don't take any locks, deal with possible inconsistencies
8902 * as the counters may have changed.
8903 */
8904 if (os_sub_overflow(stats->zbs_avail, stats->zbs_free,
8905 &stats->zbs_alloc)) {
8906 stats->zbs_avail = stats->zbs_free;
8907 stats->zbs_alloc = 0;
8908 }
8909 }
8910
8911 #endif /* !ZALLOC_TEST */
8912 #pragma mark zone creation, configuration, destruction
8913 #if !ZALLOC_TEST
8914
8915 static zone_t
zone_init_defaults(zone_id_t zid)8916 zone_init_defaults(zone_id_t zid)
8917 {
8918 zone_t z = &zone_array[zid];
8919
8920 z->z_wired_max = ~0u;
8921 z->collectable = true;
8922
8923 hw_lck_ticket_init(&z->z_lock, &zone_locks_grp);
8924 hw_lck_ticket_init(&z->z_recirc_lock, &zone_locks_grp);
8925 zone_depot_init(&z->z_recirc);
8926 return z;
8927 }
8928
8929 void
zone_set_exhaustible(zone_t zone,vm_size_t nelems)8930 zone_set_exhaustible(zone_t zone, vm_size_t nelems)
8931 {
8932 zone_lock(zone);
8933 zone->exhaustible = true;
8934 zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
8935 zone_unlock(zone);
8936 }
8937
8938 void
zone_raise_reserve(union zone_or_view zov,uint16_t min_elements)8939 zone_raise_reserve(union zone_or_view zov, uint16_t min_elements)
8940 {
8941 zone_t zone = zov.zov_zone;
8942
8943 if (zone < zone_array || zone > &zone_array[MAX_ZONES]) {
8944 zone = zov.zov_view->zv_zone;
8945 } else {
8946 zone = zov.zov_zone;
8947 }
8948
8949 os_atomic_max(&zone->z_elems_rsv, min_elements, relaxed);
8950 }
8951
8952 /**
8953 * @function zone_create_find
8954 *
8955 * @abstract
8956 * Finds an unused zone for the given name and element size.
8957 *
8958 * @param name the zone name
8959 * @param size the element size (including redzones, ...)
8960 * @param flags the flags passed to @c zone_create*
8961 * @param zid_inout the desired zone ID or ZONE_ID_ANY
8962 *
8963 * @returns a zone to initialize further.
8964 */
8965 static zone_t
zone_create_find(const char * name,vm_size_t size,zone_create_flags_t flags,zone_id_t * zid_inout)8966 zone_create_find(
8967 const char *name,
8968 vm_size_t size,
8969 zone_create_flags_t flags,
8970 zone_id_t *zid_inout)
8971 {
8972 zone_id_t nzones, zid = *zid_inout;
8973 zone_t z;
8974
8975 simple_lock(&all_zones_lock, &zone_locks_grp);
8976
8977 nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed);
8978 assert(num_zones_in_use <= nzones && nzones < MAX_ZONES);
8979
8980 if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) {
8981 /*
8982 * The first time around, make sure the reserved zone IDs
8983 * have an initialized lock as zone_index_foreach() will
8984 * enumerate them.
8985 */
8986 while (nzones < ZONE_ID__FIRST_DYNAMIC) {
8987 zone_init_defaults(nzones++);
8988 }
8989
8990 os_atomic_store(&num_zones, nzones, release);
8991 }
8992
8993 if (zid != ZONE_ID_ANY) {
8994 if (zid >= ZONE_ID__FIRST_DYNAMIC) {
8995 panic("zone_create: invalid desired zone ID %d for %s",
8996 zid, name);
8997 }
8998 if (flags & ZC_DESTRUCTIBLE) {
8999 panic("zone_create: ID %d (%s) must be permanent", zid, name);
9000 }
9001 if (zone_array[zid].z_self) {
9002 panic("zone_create: creating zone ID %d (%s) twice", zid, name);
9003 }
9004 z = &zone_array[zid];
9005 } else {
9006 if (flags & ZC_DESTRUCTIBLE) {
9007 /*
9008 * If possible, find a previously zdestroy'ed zone in the
9009 * zone_array that we can reuse.
9010 */
9011 for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES);
9012 i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) {
9013 z = &zone_array[i];
9014
9015 /*
9016 * If the zone name and the element size are the
9017 * same, we can just reuse the old zone struct.
9018 */
9019 if (strcmp(z->z_name, name) ||
9020 zone_elem_outer_size(z) != size) {
9021 continue;
9022 }
9023 bitmap_clear(zone_destroyed_bitmap, i);
9024 z->z_destroyed = false;
9025 z->z_self = z;
9026 zid = (zone_id_t)i;
9027 goto out;
9028 }
9029 }
9030
9031 zid = nzones++;
9032 z = zone_init_defaults(zid);
9033
9034 /*
9035 * The release barrier pairs with the acquire in
9036 * zone_index_foreach() and makes sure that enumeration loops
9037 * always see an initialized zone lock.
9038 */
9039 os_atomic_store(&num_zones, nzones, release);
9040 }
9041
9042 out:
9043 num_zones_in_use++;
9044 simple_unlock(&all_zones_lock);
9045
9046 *zid_inout = zid;
9047 return z;
9048 }
9049
9050 __abortlike
9051 static void
zone_create_panic(const char * name,const char * f1,const char * f2)9052 zone_create_panic(const char *name, const char *f1, const char *f2)
9053 {
9054 panic("zone_create: creating zone %s: flag %s and %s are incompatible",
9055 name, f1, f2);
9056 }
9057 #define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \
9058 if ((flags) & forbidden_flag) { \
9059 zone_create_panic(name, #current_flag, #forbidden_flag); \
9060 }
9061
9062 /*
9063 * Adjusts the size of the element based on minimum size, alignment
9064 * and kasan redzones
9065 */
9066 static vm_size_t
zone_elem_adjust_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags __unused,uint16_t * redzone __unused)9067 zone_elem_adjust_size(
9068 const char *name __unused,
9069 vm_size_t elem_size,
9070 zone_create_flags_t flags __unused,
9071 uint16_t *redzone __unused)
9072 {
9073 vm_size_t size;
9074
9075 /*
9076 * Adjust element size for minimum size and pointer alignment
9077 */
9078 size = (elem_size + ZONE_ALIGN_SIZE - 1) & -ZONE_ALIGN_SIZE;
9079 if (size < ZONE_MIN_ELEM_SIZE) {
9080 size = ZONE_MIN_ELEM_SIZE;
9081 }
9082
9083 #if KASAN_CLASSIC
9084 /*
9085 * Expand the zone allocation size to include the redzones.
9086 *
9087 * For page-multiple zones add a full guard page because they
9088 * likely require alignment.
9089 */
9090 uint16_t redzone_tmp;
9091 if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU | ZC_OBJ_CACHE)) {
9092 redzone_tmp = 0;
9093 } else if ((size & PAGE_MASK) == 0) {
9094 if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) {
9095 panic("zone_create: zone %s can't provide more than PAGE_SIZE"
9096 "alignment", name);
9097 }
9098 redzone_tmp = PAGE_SIZE;
9099 } else if (flags & ZC_ALIGNMENT_REQUIRED) {
9100 redzone_tmp = 0;
9101 } else {
9102 redzone_tmp = KASAN_GUARD_SIZE;
9103 }
9104 size += redzone_tmp;
9105 if (redzone) {
9106 *redzone = redzone_tmp;
9107 }
9108 #endif
9109 return size;
9110 }
9111
9112 /*
9113 * Returns the allocation chunk size that has least framentation
9114 */
9115 static vm_size_t
zone_get_min_alloc_granule(vm_size_t elem_size,zone_create_flags_t flags)9116 zone_get_min_alloc_granule(
9117 vm_size_t elem_size,
9118 zone_create_flags_t flags)
9119 {
9120 vm_size_t alloc_granule = PAGE_SIZE;
9121 if (flags & ZC_PERCPU) {
9122 alloc_granule = PAGE_SIZE * zpercpu_count();
9123 if (PAGE_SIZE % elem_size > 256) {
9124 panic("zone_create: per-cpu zone has too much fragmentation");
9125 }
9126 } else if (flags & ZC_READONLY) {
9127 alloc_granule = PAGE_SIZE;
9128 } else if ((elem_size & PAGE_MASK) == 0) {
9129 /* zero fragmentation by definition */
9130 alloc_granule = elem_size;
9131 } else if (alloc_granule % elem_size == 0) {
9132 /* zero fragmentation by definition */
9133 } else {
9134 vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule;
9135 vm_size_t alloc_tmp = PAGE_SIZE;
9136 vm_size_t max_chunk_size = ZONE_MAX_ALLOC_SIZE;
9137
9138 #if __arm64__
9139 /*
9140 * Increase chunk size to 48K for sizes larger than 4K on 16k
9141 * machines, so as to reduce internal fragementation for kalloc
9142 * zones with sizes 12K and 24K.
9143 */
9144 if (elem_size > 4 * 1024 && PAGE_SIZE == 16 * 1024) {
9145 max_chunk_size = 48 * 1024;
9146 }
9147 #endif
9148 while ((alloc_tmp += PAGE_SIZE) <= max_chunk_size) {
9149 vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp;
9150 if (frag_tmp < frag) {
9151 frag = frag_tmp;
9152 alloc_granule = alloc_tmp;
9153 }
9154 }
9155 }
9156 return alloc_granule;
9157 }
9158
9159 vm_size_t
zone_get_early_alloc_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags,vm_size_t min_elems)9160 zone_get_early_alloc_size(
9161 const char *name __unused,
9162 vm_size_t elem_size,
9163 zone_create_flags_t flags,
9164 vm_size_t min_elems)
9165 {
9166 vm_size_t adjusted_size, alloc_granule, chunk_elems;
9167
9168 adjusted_size = zone_elem_adjust_size(name, elem_size, flags, NULL);
9169 alloc_granule = zone_get_min_alloc_granule(adjusted_size, flags);
9170 chunk_elems = alloc_granule / adjusted_size;
9171
9172 return ((min_elems + chunk_elems - 1) / chunk_elems) * alloc_granule;
9173 }
9174
9175 zone_t
9176 zone_create_ext(
9177 const char *name,
9178 vm_size_t size,
9179 zone_create_flags_t flags,
9180 zone_id_t zid,
9181 void (^extra_setup)(zone_t))
9182 {
9183 zone_security_flags_t *zsflags;
9184 uint16_t redzone;
9185 zone_t z;
9186
9187 if (size > ZONE_MAX_ALLOC_SIZE) {
9188 panic("zone_create: element size too large: %zd", (size_t)size);
9189 }
9190
9191 if (size < 2 * sizeof(vm_size_t)) {
9192 /* Elements are too small for kasan. */
9193 flags |= ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE;
9194 }
9195
9196 size = zone_elem_adjust_size(name, size, flags, &redzone);
9197
9198 /*
9199 * Allocate the zone slot, return early if we found an older match.
9200 */
9201 z = zone_create_find(name, size, flags, &zid);
9202 if (__improbable(z->z_self)) {
9203 /* We found a zone to reuse */
9204 return z;
9205 }
9206 zsflags = &zone_security_array[zid];
9207
9208 /*
9209 * Initialize the zone properly.
9210 */
9211
9212 /*
9213 * If the kernel is post lockdown, copy the zone name passed in.
9214 * Else simply maintain a pointer to the name string as it can only
9215 * be a core XNU zone (no unloadable kext exists before lockdown).
9216 */
9217 if (startup_phase >= STARTUP_SUB_LOCKDOWN) {
9218 size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN);
9219 char *buf = zalloc_permanent(nsz, ZALIGN_NONE);
9220 strlcpy(buf, name, nsz);
9221 z->z_name = buf;
9222 } else {
9223 z->z_name = name;
9224 }
9225 if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) {
9226 z->z_stats = zalloc_percpu_permanent_type(struct zone_stats);
9227 } else {
9228 /*
9229 * zone_init() hasn't run yet, use the storage provided by
9230 * zone_stats_startup(), and zone_init() will replace it
9231 * with the final value once the PERCPU zone exists.
9232 */
9233 z->z_stats = __zpcpu_mangle_for_boot(&zone_stats_startup[zone_index(z)]);
9234 }
9235
9236 if (flags & ZC_OBJ_CACHE) {
9237 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOCACHING);
9238 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_PERCPU);
9239 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOGC);
9240 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_DESTRUCTIBLE);
9241
9242 z->z_elem_size = (uint16_t)size;
9243 z->z_chunk_pages = 0;
9244 z->z_quo_magic = 0;
9245 z->z_align_magic = 0;
9246 z->z_chunk_elems = 0;
9247 z->z_elem_offs = 0;
9248 z->no_callout = true;
9249 zsflags->z_lifo = true;
9250 } else {
9251 vm_size_t alloc = zone_get_min_alloc_granule(size, flags);
9252
9253 z->z_elem_size = (uint16_t)(size - redzone);
9254 z->z_chunk_pages = (uint16_t)atop(alloc);
9255 z->z_quo_magic = Z_MAGIC_QUO(size);
9256 z->z_align_magic = Z_MAGIC_ALIGNED(size);
9257 if (flags & ZC_PERCPU) {
9258 z->z_chunk_elems = (uint16_t)(PAGE_SIZE / size);
9259 z->z_elem_offs = (uint16_t)(PAGE_SIZE % size) + redzone;
9260 } else {
9261 z->z_chunk_elems = (uint16_t)(alloc / size);
9262 z->z_elem_offs = (uint16_t)(alloc % size) + redzone;
9263 }
9264 }
9265
9266 /*
9267 * Handle KPI flags
9268 */
9269
9270 /* ZC_CACHING applied after all configuration is done */
9271 if (flags & ZC_NOCACHING) {
9272 z->z_nocaching = true;
9273 }
9274
9275 if (flags & ZC_READONLY) {
9276 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_VM);
9277 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_DATA);
9278 assert(zid <= ZONE_ID__LAST_RO);
9279 #if ZSECURITY_CONFIG(READ_ONLY)
9280 zsflags->z_submap_idx = Z_SUBMAP_IDX_READ_ONLY;
9281 #endif
9282 zone_ro_size_params[zid].z_elem_size = z->z_elem_size;
9283 zone_ro_size_params[zid].z_align_magic = z->z_align_magic;
9284 assert(size <= PAGE_SIZE);
9285 if ((PAGE_SIZE % size) * 10 >= PAGE_SIZE) {
9286 panic("Fragmentation greater than 10%% with elem size %d zone %s%s",
9287 (uint32_t)size, zone_heap_name(z), z->z_name);
9288 }
9289 }
9290
9291 if (flags & ZC_PERCPU) {
9292 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_READONLY);
9293 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_PGZ_USE_GUARDS);
9294 z->z_percpu = true;
9295 }
9296 if (flags & ZC_NOGC) {
9297 z->collectable = false;
9298 }
9299 /*
9300 * Handle ZC_NOENCRYPT from xnu only
9301 */
9302 if (startup_phase < STARTUP_SUB_LOCKDOWN && flags & ZC_NOENCRYPT) {
9303 zsflags->z_noencrypt = true;
9304 }
9305 if (flags & ZC_NOCALLOUT) {
9306 z->no_callout = true;
9307 }
9308 if (flags & ZC_DESTRUCTIBLE) {
9309 zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_READONLY);
9310 z->z_destructible = true;
9311 }
9312 /*
9313 * Handle Internal flags
9314 */
9315 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9316 if (flags & ZC_PGZ_USE_GUARDS) {
9317 /*
9318 * Try to turn on guard pages only for zones
9319 * with a chance of OOB.
9320 */
9321 if (startup_phase < STARTUP_SUB_LOCKDOWN) {
9322 zsflags->z_pgz_use_guards = true;
9323 }
9324 z->z_pgz_use_guards = true;
9325 }
9326 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9327 if (!(flags & ZC_NOTBITAG)) {
9328 z->z_tbi_tag = true;
9329 }
9330 if (flags & ZC_KALLOC_TYPE) {
9331 zsflags->z_kalloc_type = true;
9332 }
9333 if (flags & ZC_VM) {
9334 zone_create_assert_not_both(name, flags, ZC_VM, ZC_DATA);
9335 zsflags->z_submap_idx = Z_SUBMAP_IDX_VM;
9336 }
9337 if (flags & ZC_DATA) {
9338 zsflags->z_kheap_id = KHEAP_ID_DATA_BUFFERS;
9339 }
9340 #if KASAN_CLASSIC
9341 if (redzone && !(flags & ZC_KASAN_NOQUARANTINE)) {
9342 z->z_kasan_quarantine = true;
9343 }
9344 z->z_kasan_redzone = redzone;
9345 #endif /* KASAN_CLASSIC */
9346 #if KASAN_FAKESTACK
9347 if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) {
9348 z->z_kasan_fakestacks = true;
9349 }
9350 #endif /* KASAN_FAKESTACK */
9351
9352 /*
9353 * Then if there's extra tuning, do it
9354 */
9355 if (extra_setup) {
9356 extra_setup(z);
9357 }
9358
9359 /*
9360 * Configure debugging features
9361 */
9362 #if CONFIG_PROB_GZALLOC
9363 if ((flags & (ZC_READONLY | ZC_PERCPU | ZC_OBJ_CACHE | ZC_NOPGZ)) == 0) {
9364 pgz_zone_init(z);
9365 }
9366 #endif
9367 if (zc_magazine_zone) { /* proxy for "has zone_init run" */
9368 #if ZALLOC_ENABLE_LOGGING
9369 /*
9370 * Check for and set up zone leak detection
9371 * if requested via boot-args.
9372 */
9373 zone_setup_logging(z);
9374 #endif /* ZALLOC_ENABLE_LOGGING */
9375 #if KASAN_TBI
9376 zone_setup_kasan_logging(z);
9377 #endif /* KASAN_TBI */
9378 }
9379
9380 #if VM_TAG_SIZECLASSES
9381 if ((zsflags->z_kheap_id || zsflags->z_kalloc_type) && zone_tagging_on) {
9382 static uint16_t sizeclass_idx;
9383
9384 assert(startup_phase < STARTUP_SUB_LOCKDOWN);
9385 z->z_uses_tags = true;
9386 if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9387 zone_tags_sizeclasses[sizeclass_idx] = (uint16_t)size;
9388 z->z_tags_sizeclass = sizeclass_idx++;
9389 } else {
9390 uint16_t i = 0;
9391 for (; i < sizeclass_idx; i++) {
9392 if (size == zone_tags_sizeclasses[i]) {
9393 z->z_tags_sizeclass = i;
9394 break;
9395 }
9396 }
9397
9398 /*
9399 * Size class wasn't found, add it to zone_tags_sizeclasses
9400 */
9401 if (i == sizeclass_idx) {
9402 assert(i < VM_TAG_SIZECLASSES);
9403 zone_tags_sizeclasses[i] = (uint16_t)size;
9404 z->z_tags_sizeclass = sizeclass_idx++;
9405 }
9406 }
9407 assert(z->z_tags_sizeclass < VM_TAG_SIZECLASSES);
9408 }
9409 #endif
9410
9411 /*
9412 * Finally, fixup properties based on security policies, boot-args, ...
9413 */
9414 if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9415 /*
9416 * We use LIFO in the data map, because workloads like network
9417 * usage or similar tend to rotate through allocations very
9418 * quickly with sometimes epxloding working-sets and using
9419 * a FIFO policy might cause massive TLB trashing with rather
9420 * dramatic performance impacts.
9421 */
9422 zsflags->z_submap_idx = Z_SUBMAP_IDX_DATA;
9423 zsflags->z_lifo = true;
9424 }
9425
9426 if ((flags & (ZC_CACHING | ZC_OBJ_CACHE)) && !z->z_nocaching) {
9427 /*
9428 * No zone made before zone_init() can have ZC_CACHING set.
9429 */
9430 assert(zc_magazine_zone);
9431 zone_enable_caching(z);
9432 }
9433
9434 zone_lock(z);
9435 z->z_self = z;
9436 zone_unlock(z);
9437
9438 return z;
9439 }
9440
9441 void
zone_set_sig_eq(zone_t zone,zone_id_t sig_eq)9442 zone_set_sig_eq(zone_t zone, zone_id_t sig_eq)
9443 {
9444 zone_security_array[zone_index(zone)].z_sig_eq = sig_eq;
9445 }
9446
9447 zone_id_t
zone_get_sig_eq(zone_t zone)9448 zone_get_sig_eq(zone_t zone)
9449 {
9450 return zone_security_array[zone_index(zone)].z_sig_eq;
9451 }
9452
9453 void
zone_enable_smr(zone_t zone,struct smr * smr,zone_smr_free_cb_t free_cb)9454 zone_enable_smr(zone_t zone, struct smr *smr, zone_smr_free_cb_t free_cb)
9455 {
9456 /* moving to SMR must be done before the zone has ever been used */
9457 assert(zone->z_va_cur == 0 && !zone->z_smr && !zone->z_nocaching);
9458 assert(!zone_security_array[zone_index(zone)].z_lifo);
9459
9460 if (!zone->z_pcpu_cache) {
9461 zone_enable_caching(zone);
9462 }
9463
9464 zone_lock(zone);
9465
9466 zpercpu_foreach(it, zone->z_pcpu_cache) {
9467 it->zc_smr = smr;
9468 it->zc_free = free_cb;
9469 }
9470 zone->z_smr = true;
9471
9472 zone_unlock(zone);
9473 }
9474
9475 __startup_func
9476 void
zone_create_startup(struct zone_create_startup_spec * spec)9477 zone_create_startup(struct zone_create_startup_spec *spec)
9478 {
9479 zone_t z;
9480
9481 z = zone_create_ext(spec->z_name, spec->z_size,
9482 spec->z_flags, spec->z_zid, spec->z_setup);
9483 if (spec->z_var) {
9484 *spec->z_var = z;
9485 }
9486 }
9487
9488 /*
9489 * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t
9490 * union works. trust but verify.
9491 */
9492 #define zalloc_check_zov_alias(f1, f2) \
9493 static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2))
9494 zalloc_check_zov_alias(z_self, zv_zone);
9495 zalloc_check_zov_alias(z_stats, zv_stats);
9496 zalloc_check_zov_alias(z_name, zv_name);
9497 zalloc_check_zov_alias(z_views, zv_next);
9498 #undef zalloc_check_zov_alias
9499
9500 __startup_func
9501 void
zone_view_startup_init(struct zone_view_startup_spec * spec)9502 zone_view_startup_init(struct zone_view_startup_spec *spec)
9503 {
9504 struct kalloc_heap *heap = NULL;
9505 zone_view_t zv = spec->zv_view;
9506 zone_t z;
9507 zone_security_flags_t zsflags;
9508
9509 switch (spec->zv_heapid) {
9510 case KHEAP_ID_DATA_BUFFERS:
9511 heap = KHEAP_DATA_BUFFERS;
9512 break;
9513 default:
9514 heap = NULL;
9515 }
9516
9517 if (heap) {
9518 z = kalloc_zone_for_size(heap->kh_zstart, spec->zv_size);
9519 } else {
9520 z = *spec->zv_zone;
9521 assert(spec->zv_size <= zone_elem_inner_size(z));
9522 }
9523
9524 assert(z);
9525
9526 zv->zv_zone = z;
9527 zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats);
9528 zv->zv_next = z->z_views;
9529 zsflags = zone_security_config(z);
9530 if (z->z_views == NULL && zsflags.z_kheap_id == KHEAP_ID_NONE) {
9531 /*
9532 * count the raw view for zones not in a heap,
9533 * kalloc_heap_init() already counts it for its members.
9534 */
9535 zone_view_count += 2;
9536 } else {
9537 zone_view_count += 1;
9538 }
9539 z->z_views = zv;
9540 }
9541
9542 zone_t
zone_create(const char * name,vm_size_t size,zone_create_flags_t flags)9543 zone_create(
9544 const char *name,
9545 vm_size_t size,
9546 zone_create_flags_t flags)
9547 {
9548 return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL);
9549 }
9550
9551 static_assert(ZONE_ID__LAST_RO_EXT - ZONE_ID__FIRST_RO_EXT == ZC_RO_ID__LAST);
9552
9553 zone_id_t
zone_create_ro(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)9554 zone_create_ro(
9555 const char *name,
9556 vm_size_t size,
9557 zone_create_flags_t flags,
9558 zone_create_ro_id_t zc_ro_id)
9559 {
9560 assert(zc_ro_id <= ZC_RO_ID__LAST);
9561 zone_id_t reserved_zid = ZONE_ID__FIRST_RO_EXT + zc_ro_id;
9562 (void)zone_create_ext(name, size, ZC_READONLY | flags, reserved_zid, NULL);
9563 return reserved_zid;
9564 }
9565
9566 zone_t
zinit(vm_size_t size,vm_size_t max,vm_size_t alloc __unused,const char * name)9567 zinit(
9568 vm_size_t size, /* the size of an element */
9569 vm_size_t max, /* maximum memory to use */
9570 vm_size_t alloc __unused, /* allocation size */
9571 const char *name) /* a name for the zone */
9572 {
9573 zone_t z = zone_create(name, size, ZC_DESTRUCTIBLE);
9574 z->z_wired_max = zone_alloc_pages_for_nelems(z, max / size);
9575 return z;
9576 }
9577
9578 void
zdestroy(zone_t z)9579 zdestroy(zone_t z)
9580 {
9581 unsigned int zindex = zone_index(z);
9582 zone_security_flags_t zsflags = zone_security_array[zindex];
9583
9584 current_thread()->options |= TH_OPT_ZONE_PRIV;
9585 lck_mtx_lock(&zone_gc_lock);
9586
9587 zone_reclaim(z, ZONE_RECLAIM_DESTROY);
9588
9589 lck_mtx_unlock(&zone_gc_lock);
9590 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
9591
9592 zone_lock(z);
9593
9594 if (!zone_submap_is_sequestered(zsflags)) {
9595 while (!zone_pva_is_null(z->z_pageq_va)) {
9596 struct zone_page_metadata *meta;
9597
9598 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
9599 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
9600 assert(meta->zm_chunk_len <= ZM_CHUNK_LEN_MAX);
9601 bzero(meta, sizeof(*meta) * z->z_chunk_pages);
9602 zone_unlock(z);
9603 kmem_free(zone_submap(zsflags), zone_meta_to_addr(meta),
9604 ptoa(z->z_chunk_pages));
9605 zone_lock(z);
9606 }
9607 }
9608
9609 #if !KASAN_CLASSIC
9610 /* Assert that all counts are zero */
9611 if (z->z_elems_avail || z->z_elems_free || zone_size_wired(z) ||
9612 (z->z_va_cur && !zone_submap_is_sequestered(zsflags))) {
9613 panic("zdestroy: Zone %s%s isn't empty at zdestroy() time",
9614 zone_heap_name(z), z->z_name);
9615 }
9616
9617 /* consistency check: make sure everything is indeed empty */
9618 assert(zone_pva_is_null(z->z_pageq_empty));
9619 assert(zone_pva_is_null(z->z_pageq_partial));
9620 assert(zone_pva_is_null(z->z_pageq_full));
9621 if (!zone_submap_is_sequestered(zsflags)) {
9622 assert(zone_pva_is_null(z->z_pageq_va));
9623 }
9624 #endif
9625
9626 zone_unlock(z);
9627
9628 simple_lock(&all_zones_lock, &zone_locks_grp);
9629
9630 assert(!bitmap_test(zone_destroyed_bitmap, zindex));
9631 /* Mark the zone as empty in the bitmap */
9632 bitmap_set(zone_destroyed_bitmap, zindex);
9633 num_zones_in_use--;
9634 assert(num_zones_in_use > 0);
9635
9636 simple_unlock(&all_zones_lock);
9637 }
9638
9639 #endif /* !ZALLOC_TEST */
9640 #pragma mark zalloc module init
9641 #if !ZALLOC_TEST
9642
9643 /*
9644 * Initialize the "zone of zones" which uses fixed memory allocated
9645 * earlier in memory initialization. zone_bootstrap is called
9646 * before zone_init.
9647 */
9648 __startup_func
9649 void
zone_bootstrap(void)9650 zone_bootstrap(void)
9651 {
9652 #if DEBUG || DEVELOPMENT
9653 #if __x86_64__
9654 if (PE_parse_boot_argn("kernPOST", NULL, 0)) {
9655 /*
9656 * rdar://79781535 Disable early gaps while running kernPOST on Intel
9657 * the fp faulting code gets triggered and deadlocks.
9658 */
9659 zone_caching_disabled = 1;
9660 }
9661 #endif /* __x86_64__ */
9662 #endif /* DEBUG || DEVELOPMENT */
9663
9664 /* Validate struct zone_packed_virtual_address expectations */
9665 static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1");
9666 if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) {
9667 panic("zone_pva_t can't pack a kernel page address in 31 bits");
9668 }
9669
9670 zpercpu_early_count = ml_early_cpu_max_number() + 1;
9671 if (!PE_parse_boot_argn("zc_mag_size", NULL, 0)) {
9672 /*
9673 * Scale zc_mag_size() per machine.
9674 *
9675 * - wide machines get 128B magazines to avoid all false sharing
9676 * - smaller machines but with enough RAM get a bit bigger
9677 * buckets (empirically affects networking performance)
9678 */
9679 if (zpercpu_early_count >= 10) {
9680 _zc_mag_size = 14;
9681 } else if ((sane_size >> 30) >= 4) {
9682 _zc_mag_size = 10;
9683 }
9684 }
9685
9686 /*
9687 * Initialize random used to scramble early allocations
9688 */
9689 zpercpu_foreach_cpu(cpu) {
9690 random_bool_init(&zone_bool_gen[cpu].zbg_bg);
9691 }
9692
9693 #if CONFIG_PROB_GZALLOC
9694 /*
9695 * Set pgz_sample_counter on the boot CPU so that we do not sample
9696 * any allocation until PGZ has been properly setup (in pgz_init()).
9697 */
9698 *PERCPU_GET_MASTER(pgz_sample_counter) = INT32_MAX;
9699 #endif /* CONFIG_PROB_GZALLOC */
9700
9701 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9702 /*
9703 * Randomly assign zones to one of the 4 general submaps,
9704 * and pick whether they allocate from the begining
9705 * or the end of it.
9706 *
9707 * A lot of OOB exploitation relies on precise interleaving
9708 * of specific types in the heap.
9709 *
9710 * Woops, you can't guarantee that anymore.
9711 */
9712 for (zone_id_t i = 1; i < MAX_ZONES; i++) {
9713 uint32_t r = zalloc_random_uniform32(0,
9714 ZSECURITY_CONFIG_GENERAL_SUBMAPS * 2);
9715
9716 zone_security_array[i].z_submap_from_end = (r & 1);
9717 zone_security_array[i].z_submap_idx += (r >> 1);
9718 }
9719 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9720
9721 thread_call_setup_with_options(&zone_expand_callout,
9722 zone_expand_async, NULL, THREAD_CALL_PRIORITY_HIGH,
9723 THREAD_CALL_OPTIONS_ONCE);
9724
9725 thread_call_setup_with_options(&zone_trim_callout,
9726 zone_trim_async, NULL, THREAD_CALL_PRIORITY_USER,
9727 THREAD_CALL_OPTIONS_ONCE);
9728 }
9729
9730 #define ZONE_GUARD_SIZE (64UL << 10)
9731
9732 __startup_func
9733 static void
zone_tunables_fixup(void)9734 zone_tunables_fixup(void)
9735 {
9736 int wdt = 0;
9737
9738 #if CONFIG_PROB_GZALLOC && (DEVELOPMENT || DEBUG)
9739 if (!PE_parse_boot_argn("pgz", NULL, 0) &&
9740 PE_parse_boot_argn("pgz1", NULL, 0)) {
9741 /*
9742 * if pgz1= was used, but pgz= was not,
9743 * then the more specific pgz1 takes precedence.
9744 */
9745 pgz_all = false;
9746 }
9747 #endif
9748
9749 if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) {
9750 zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT;
9751 }
9752 if (PE_parse_boot_argn("wdt", &wdt, sizeof(wdt)) && wdt == -1 &&
9753 !PE_parse_boot_argn("zet", NULL, 0)) {
9754 zone_exhausted_timeout = -1;
9755 }
9756 }
9757 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup);
9758
9759 __startup_func
9760 static void
zone_submap_init(mach_vm_offset_t * submap_min,zone_submap_idx_t idx,uint64_t zone_sub_map_numer,uint64_t * remaining_denom,vm_offset_t * remaining_size)9761 zone_submap_init(
9762 mach_vm_offset_t *submap_min,
9763 zone_submap_idx_t idx,
9764 uint64_t zone_sub_map_numer,
9765 uint64_t *remaining_denom,
9766 vm_offset_t *remaining_size)
9767 {
9768 vm_map_create_options_t vmco;
9769 vm_map_address_t addr;
9770 vm_offset_t submap_start, submap_end;
9771 vm_size_t submap_size;
9772 vm_map_t submap;
9773 vm_prot_t prot = VM_PROT_DEFAULT;
9774 vm_prot_t prot_max = VM_PROT_ALL;
9775 kern_return_t kr;
9776
9777 submap_size = trunc_page(zone_sub_map_numer * *remaining_size /
9778 *remaining_denom);
9779 submap_start = *submap_min;
9780
9781 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9782 vm_offset_t submap_padding = pmap_ro_zone_align(submap_start) - submap_start;
9783 submap_start += submap_padding;
9784 submap_size = pmap_ro_zone_align(submap_size);
9785 assert(*remaining_size >= (submap_padding + submap_size));
9786 *remaining_size -= submap_padding;
9787 *submap_min = submap_start;
9788 }
9789
9790 submap_end = submap_start + submap_size;
9791 if (idx == Z_SUBMAP_IDX_VM) {
9792 vm_packing_verify_range("vm_compressor",
9793 submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR));
9794 vm_packing_verify_range("vm_page",
9795 submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR));
9796 }
9797
9798 vmco = VM_MAP_CREATE_NEVER_FAULTS;
9799 if (!zone_submap_is_sequestered(idx)) {
9800 vmco |= VM_MAP_CREATE_DISABLE_HOLELIST;
9801 }
9802
9803 vm_map_will_allocate_early_map(&zone_submaps[idx]);
9804 submap = kmem_suballoc(kernel_map, submap_min, submap_size, vmco,
9805 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_PERMANENT | KMS_NOFAIL,
9806 VM_KERN_MEMORY_ZONE).kmr_submap;
9807
9808 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9809 zone_info.zi_ro_range.min_address = submap_start;
9810 zone_info.zi_ro_range.max_address = submap_end;
9811 prot_max = prot = VM_PROT_NONE;
9812 }
9813
9814 addr = submap_start;
9815 kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
9816 VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vm_tag = VM_KERN_MEMORY_ZONE),
9817 kernel_object, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9818 if (kr != KERN_SUCCESS) {
9819 panic("ksubmap[%s]: failed to make first entry (%d)",
9820 zone_submaps_names[idx], kr);
9821 }
9822
9823 addr = submap_end - ZONE_GUARD_SIZE / 2;
9824 kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
9825 VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vm_tag = VM_KERN_MEMORY_ZONE),
9826 kernel_object, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9827 if (kr != KERN_SUCCESS) {
9828 panic("ksubmap[%s]: failed to make last entry (%d)",
9829 zone_submaps_names[idx], kr);
9830 }
9831
9832 #if DEBUG || DEVELOPMENT
9833 printf("zone_init: map %-5s %p:%p (%u%c)\n",
9834 zone_submaps_names[idx], (void *)submap_start, (void *)submap_end,
9835 mach_vm_size_pretty(submap_size), mach_vm_size_unit(submap_size));
9836 #endif /* DEBUG || DEVELOPMENT */
9837
9838 zone_submaps[idx] = submap;
9839 *submap_min = submap_end;
9840 *remaining_size -= submap_size;
9841 *remaining_denom -= zone_sub_map_numer;
9842 }
9843
9844 static inline void
zone_pva_relocate(zone_pva_t * pva,uint32_t delta)9845 zone_pva_relocate(zone_pva_t *pva, uint32_t delta)
9846 {
9847 if (!zone_pva_is_null(*pva) && !zone_pva_is_queue(*pva)) {
9848 pva->packed_address += delta;
9849 }
9850 }
9851
9852 /*
9853 * Allocate metadata array and migrate bootstrap initial metadata and memory.
9854 */
9855 __startup_func
9856 static void
zone_metadata_init(void)9857 zone_metadata_init(void)
9858 {
9859 vm_map_t vm_map = zone_submaps[Z_SUBMAP_IDX_VM];
9860 vm_map_entry_t first;
9861
9862 struct mach_vm_range meta_r, bits_r, xtra_r, early_r;
9863 vm_size_t early_sz;
9864 vm_offset_t reloc_base;
9865
9866 /*
9867 * Step 1: Allocate the metadata + bitmaps range
9868 *
9869 * Allocations can't be smaller than 8 bytes, which is 128b / 16B per 1k
9870 * of physical memory (16M per 1G).
9871 *
9872 * Let's preallocate for the worst to avoid weird panics.
9873 */
9874 vm_map_will_allocate_early_map(&zone_meta_map);
9875 meta_r = zone_kmem_suballoc(zone_info.zi_meta_range.min_address,
9876 zone_meta_size + zone_bits_size + zone_xtra_size,
9877 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
9878 VM_KERN_MEMORY_ZONE, &zone_meta_map);
9879 meta_r.min_address += ZONE_GUARD_SIZE;
9880 meta_r.max_address -= ZONE_GUARD_SIZE;
9881 if (zone_xtra_size) {
9882 xtra_r.max_address = meta_r.max_address;
9883 meta_r.max_address -= zone_xtra_size;
9884 xtra_r.min_address = meta_r.max_address;
9885 } else {
9886 xtra_r.min_address = xtra_r.max_address = 0;
9887 }
9888 bits_r.max_address = meta_r.max_address;
9889 meta_r.max_address -= zone_bits_size;
9890 bits_r.min_address = meta_r.max_address;
9891
9892 #if DEBUG || DEVELOPMENT
9893 printf("zone_init: metadata %p:%p (%u%c)\n",
9894 (void *)meta_r.min_address, (void *)meta_r.max_address,
9895 mach_vm_size_pretty(mach_vm_range_size(&meta_r)),
9896 mach_vm_size_unit(mach_vm_range_size(&meta_r)));
9897 printf("zone_init: metabits %p:%p (%u%c)\n",
9898 (void *)bits_r.min_address, (void *)bits_r.max_address,
9899 mach_vm_size_pretty(mach_vm_range_size(&bits_r)),
9900 mach_vm_size_unit(mach_vm_range_size(&bits_r)));
9901 printf("zone_init: extra %p:%p (%u%c)\n",
9902 (void *)xtra_r.min_address, (void *)xtra_r.max_address,
9903 mach_vm_size_pretty(mach_vm_range_size(&xtra_r)),
9904 mach_vm_size_unit(mach_vm_range_size(&xtra_r)));
9905 #endif /* DEBUG || DEVELOPMENT */
9906
9907 bits_r.min_address = (bits_r.min_address + ZBA_CHUNK_SIZE - 1) & -ZBA_CHUNK_SIZE;
9908 bits_r.max_address = bits_r.max_address & -ZBA_CHUNK_SIZE;
9909
9910 /*
9911 * Step 2: Install new ranges.
9912 * Relocate metadata and bits.
9913 */
9914 early_r = zone_info.zi_map_range;
9915 early_sz = mach_vm_range_size(&early_r);
9916
9917 zone_info.zi_map_range = zone_map_range;
9918 zone_info.zi_meta_range = meta_r;
9919 zone_info.zi_bits_range = bits_r;
9920 zone_info.zi_xtra_range = xtra_r;
9921 zone_info.zi_meta_base = (struct zone_page_metadata *)meta_r.min_address -
9922 zone_pva_from_addr(zone_map_range.min_address).packed_address;
9923
9924 vm_map_lock(vm_map);
9925 first = vm_map_first_entry(vm_map);
9926 reloc_base = first->vme_end;
9927 first->vme_end += early_sz;
9928 vm_map->size += early_sz;
9929 vm_map_unlock(vm_map);
9930
9931 struct zone_page_metadata *early_meta = zone_early_meta_array_startup;
9932 struct zone_page_metadata *new_meta = zone_meta_from_addr(reloc_base);
9933 vm_offset_t reloc_delta = reloc_base - early_r.min_address;
9934 /* this needs to sign extend */
9935 uint32_t pva_delta = (uint32_t)((intptr_t)reloc_delta >> PAGE_SHIFT);
9936
9937 zone_meta_populate(reloc_base, early_sz);
9938 memcpy(new_meta, early_meta,
9939 atop(early_sz) * sizeof(struct zone_page_metadata));
9940 for (uint32_t i = 0; i < atop(early_sz); i++) {
9941 zone_pva_relocate(&new_meta[i].zm_page_next, pva_delta);
9942 zone_pva_relocate(&new_meta[i].zm_page_prev, pva_delta);
9943 }
9944
9945 static_assert(ZONE_ID_VM_MAP_ENTRY == ZONE_ID_VM_MAP + 1);
9946 static_assert(ZONE_ID_VM_MAP_HOLES == ZONE_ID_VM_MAP + 2);
9947
9948 for (zone_id_t zid = ZONE_ID_VM_MAP; zid <= ZONE_ID_VM_MAP_HOLES; zid++) {
9949 zone_pva_relocate(&zone_array[zid].z_pageq_partial, pva_delta);
9950 zone_pva_relocate(&zone_array[zid].z_pageq_full, pva_delta);
9951 }
9952
9953 zba_populate(0, false);
9954 memcpy(zba_base_header(), zba_chunk_startup, sizeof(zba_chunk_startup));
9955 zba_meta()->zbam_right = (uint32_t)atop(zone_bits_size);
9956
9957 /*
9958 * Step 3: Relocate the boostrap VM structs
9959 * (including rewriting their content).
9960 */
9961
9962 #if __x86_64__
9963 kernel_memory_populate(reloc_base, early_sz,
9964 KMA_KOBJECT | KMA_NOENCRYPT | KMA_NOFAIL,
9965 VM_KERN_MEMORY_OSFMK);
9966 __nosan_memcpy((void *)reloc_base, (void *)early_r.min_address, early_sz);
9967 #else
9968 for (vm_address_t addr = early_r.min_address;
9969 addr < early_r.max_address; addr += PAGE_SIZE) {
9970 pmap_paddr_t pa = kvtophys(trunc_page(addr));
9971 __assert_only kern_return_t kr;
9972
9973 kr = pmap_enter_options_addr(kernel_pmap, addr + reloc_delta,
9974 pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
9975 0, NULL);
9976 assert(kr == KERN_SUCCESS);
9977 }
9978 #endif
9979
9980 #if KASAN
9981 kasan_notify_address(reloc_base, early_sz);
9982 #if KASAN_TBI
9983 kasan_tbi_copy_tags(reloc_base, early_r.min_address, early_sz);
9984 #endif /* KASAN_TBI */
9985 #endif /* KASAN */
9986
9987 vm_map_relocate_early_maps(reloc_delta);
9988
9989 for (uint32_t i = 0; i < atop(early_sz); i++) {
9990 zone_id_t zid = new_meta[i].zm_index;
9991 zone_t z = &zone_array[zid];
9992 vm_size_t esize = zone_elem_outer_size(z);
9993 vm_address_t base = reloc_base + ptoa(i) + zone_elem_inner_offs(z);
9994 vm_address_t addr;
9995
9996 if (new_meta[i].zm_chunk_len >= ZM_SECONDARY_PAGE) {
9997 continue;
9998 }
9999
10000 for (uint32_t eidx = 0; eidx < z->z_chunk_elems; eidx++) {
10001 if (zone_meta_is_free(&new_meta[i], eidx)) {
10002 continue;
10003 }
10004
10005 addr = base + eidx * esize;
10006 #if KASAN_CLASSIC
10007 kasan_alloc(addr,
10008 zone_elem_inner_size(z), zone_elem_inner_size(z),
10009 zone_elem_redzone(z), false,
10010 __builtin_frame_address(0));
10011 #endif
10012 vm_map_relocate_early_elem(zid, addr, reloc_delta);
10013 }
10014 }
10015
10016 #if !__x86_64__
10017 pmap_remove(kernel_pmap, early_r.min_address, early_r.max_address);
10018 #endif
10019 }
10020
10021 __startup_data
10022 static uint16_t submap_ratios[Z_SUBMAP_IDX_COUNT] = {
10023 #if ZSECURITY_CONFIG(READ_ONLY)
10024 [Z_SUBMAP_IDX_VM] = 15,
10025 [Z_SUBMAP_IDX_READ_ONLY] = 5,
10026 #else
10027 [Z_SUBMAP_IDX_VM] = 20,
10028 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10029 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
10030 [Z_SUBMAP_IDX_GENERAL_0] = 15,
10031 [Z_SUBMAP_IDX_GENERAL_1] = 15,
10032 [Z_SUBMAP_IDX_GENERAL_2] = 15,
10033 [Z_SUBMAP_IDX_GENERAL_3] = 15,
10034 [Z_SUBMAP_IDX_DATA] = 20,
10035 #else
10036 [Z_SUBMAP_IDX_GENERAL_0] = 60,
10037 [Z_SUBMAP_IDX_DATA] = 20,
10038 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
10039 };
10040
10041 __startup_func
10042 static inline uint16_t
zone_submap_ratios_denom(void)10043 zone_submap_ratios_denom(void)
10044 {
10045 uint16_t denom = 0;
10046
10047 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10048 denom += submap_ratios[idx];
10049 }
10050
10051 assert(denom == 100);
10052
10053 return denom;
10054 }
10055
10056 __startup_func
10057 static inline vm_offset_t
zone_restricted_va_max(void)10058 zone_restricted_va_max(void)
10059 {
10060 vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR);
10061 vm_offset_t vm_page_max = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR);
10062
10063 return trunc_page(MIN(compressor_max, vm_page_max));
10064 }
10065
10066 __startup_func
10067 static void
zone_set_map_sizes(void)10068 zone_set_map_sizes(void)
10069 {
10070 vm_size_t zsize;
10071 vm_size_t zsizearg;
10072
10073 /*
10074 * Compute the physical limits for the zone map
10075 */
10076
10077 if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) {
10078 zsize = zsizearg * (1024ULL * 1024);
10079 } else {
10080 /* Set target zone size as 1/4 of physical memory */
10081 zsize = (vm_size_t)(sane_size >> 2);
10082 zsize += zsize >> 1;
10083 }
10084
10085 if (zsize < CONFIG_ZONE_MAP_MIN) {
10086 zsize = CONFIG_ZONE_MAP_MIN; /* Clamp to min */
10087 }
10088 if (zsize > sane_size >> 1) {
10089 zsize = (vm_size_t)(sane_size >> 1); /* Clamp to half of RAM max */
10090 }
10091 if (zsizearg == 0 && zsize > ZONE_MAP_MAX) {
10092 /* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */
10093 printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n",
10094 (uintptr_t)zsize, (uintptr_t)ZONE_MAP_MAX);
10095 zsize = ZONE_MAP_MAX;
10096 }
10097
10098 zone_pages_wired_max = (uint32_t)atop(trunc_page(zsize));
10099
10100
10101 /*
10102 * Declare restrictions on zone max
10103 */
10104 vm_offset_t vm_submap_size = round_page(
10105 (submap_ratios[Z_SUBMAP_IDX_VM] * ZONE_MAP_VA_SIZE) /
10106 zone_submap_ratios_denom());
10107
10108 #if CONFIG_PROB_GZALLOC
10109 vm_submap_size += pgz_get_size();
10110 #endif /* CONFIG_PROB_GZALLOC */
10111 if (os_sub_overflow(zone_restricted_va_max(), vm_submap_size,
10112 &zone_map_range.min_address)) {
10113 zone_map_range.min_address = 0;
10114 }
10115
10116 zone_meta_size = round_page(atop(ZONE_MAP_VA_SIZE) *
10117 sizeof(struct zone_page_metadata)) + ZONE_GUARD_SIZE * 2;
10118
10119 static_assert(ZONE_MAP_MAX / (CHAR_BIT * KALLOC_MINSIZE) <=
10120 ZBA_PTR_MASK + 1);
10121 zone_bits_size = round_page(ptoa(zone_pages_wired_max) /
10122 (CHAR_BIT * KALLOC_MINSIZE));
10123
10124 #if VM_TAG_SIZECLASSES
10125 if (zone_tagging_on) {
10126 zba_xtra_shift = (uint8_t)fls(sizeof(vm_tag_t) - 1);
10127 }
10128 if (zba_xtra_shift) {
10129 /*
10130 * if we need the extra space range, then limit the size of the
10131 * bitmaps to something reasonable instead of a theoretical
10132 * worst case scenario of all zones being for the smallest
10133 * allocation granule, in order to avoid fake VA pressure on
10134 * other parts of the system.
10135 */
10136 zone_bits_size = round_page(zone_bits_size / 8);
10137 zone_xtra_size = round_page(zone_bits_size * CHAR_BIT << zba_xtra_shift);
10138 }
10139 #endif /* VM_TAG_SIZECLASSES */
10140 }
10141 STARTUP(KMEM, STARTUP_RANK_FIRST, zone_set_map_sizes);
10142
10143 /*
10144 * Can't use zone_info.zi_map_range at this point as it is being used to
10145 * store the range of early pmap memory that was stolen to bootstrap the
10146 * necessary VM zones.
10147 */
10148 KMEM_RANGE_REGISTER_STATIC(zones, &zone_map_range, ZONE_MAP_VA_SIZE);
10149 KMEM_RANGE_REGISTER_DYNAMIC(zone_meta, &zone_info.zi_meta_range, ^{
10150 return zone_meta_size + zone_bits_size + zone_xtra_size;
10151 });
10152
10153 /*
10154 * Global initialization of Zone Allocator.
10155 * Runs after zone_bootstrap.
10156 */
10157 __startup_func
10158 static void
zone_init(void)10159 zone_init(void)
10160 {
10161 vm_size_t remaining_size = ZONE_MAP_VA_SIZE;
10162 mach_vm_offset_t submap_min = 0;
10163 uint64_t denom = zone_submap_ratios_denom();
10164 /*
10165 * And now allocate the various pieces of VA and submaps.
10166 */
10167
10168 submap_min = zone_map_range.min_address;
10169
10170 #if CONFIG_PROB_GZALLOC
10171 vm_size_t pgz_size = pgz_get_size();
10172
10173 vm_map_will_allocate_early_map(&pgz_submap);
10174 zone_info.zi_pgz_range = zone_kmem_suballoc(submap_min, pgz_size,
10175 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10176 VM_KERN_MEMORY_ZONE, &pgz_submap);
10177
10178 submap_min += pgz_size;
10179 remaining_size -= pgz_size;
10180 #if DEBUG || DEVELOPMENT
10181 printf("zone_init: pgzalloc %p:%p (%u%c) [%d slots]\n",
10182 (void *)zone_info.zi_pgz_range.min_address,
10183 (void *)zone_info.zi_pgz_range.max_address,
10184 mach_vm_size_pretty(pgz_size), mach_vm_size_unit(pgz_size),
10185 pgz_slots);
10186 #endif /* DEBUG || DEVELOPMENT */
10187 #endif /* CONFIG_PROB_GZALLOC */
10188
10189 /*
10190 * Allocate the submaps
10191 */
10192 for (zone_submap_idx_t idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10193 if (submap_ratios[idx] == 0) {
10194 zone_submaps[idx] = VM_MAP_NULL;
10195 } else {
10196 zone_submap_init(&submap_min, idx, submap_ratios[idx],
10197 &denom, &remaining_size);
10198 }
10199 }
10200
10201 zone_metadata_init();
10202
10203 #if VM_TAG_SIZECLASSES
10204 if (zone_tagging_on) {
10205 vm_allocation_zones_init();
10206 }
10207 #endif /* VM_TAG_SIZECLASSES */
10208
10209 zone_create_flags_t kma_flags = ZC_NOCACHING | ZC_NOGC | ZC_NOCALLOUT |
10210 ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE | ZC_VM;
10211
10212 (void)zone_create_ext("vm.permanent", 1, kma_flags,
10213 ZONE_ID_PERMANENT, ^(zone_t z) {
10214 z->z_permanent = true;
10215 z->z_elem_size = 1;
10216 });
10217 (void)zone_create_ext("vm.permanent.percpu", 1,
10218 kma_flags | ZC_PERCPU, ZONE_ID_PERCPU_PERMANENT, ^(zone_t z) {
10219 z->z_permanent = true;
10220 z->z_elem_size = 1;
10221 });
10222
10223 zc_magazine_zone = zone_create("zcc_magazine_zone", sizeof(struct zone_magazine) +
10224 zc_mag_size() * sizeof(vm_offset_t),
10225 ZC_VM | ZC_NOCACHING | ZC_ZFREE_CLEARMEM | ZC_PGZ_USE_GUARDS);
10226 zone_raise_reserve(zc_magazine_zone, (uint16_t)(2 * zpercpu_count()));
10227
10228 /*
10229 * Now migrate the startup statistics into their final storage,
10230 * and enable logging for early zones (that zone_create_ext() skipped).
10231 */
10232 int cpu = cpu_number();
10233 zone_index_foreach(idx) {
10234 zone_t tz = &zone_array[idx];
10235
10236 if (tz->z_stats == __zpcpu_mangle_for_boot(&zone_stats_startup[idx])) {
10237 zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats);
10238
10239 *zpercpu_get_cpu(zs, cpu) = *zpercpu_get_cpu(tz->z_stats, cpu);
10240 tz->z_stats = zs;
10241 }
10242 if (tz->z_self == tz) {
10243 #if ZALLOC_ENABLE_LOGGING
10244 zone_setup_logging(tz);
10245 #endif /* ZALLOC_ENABLE_LOGGING */
10246 #if KASAN_TBI
10247 zone_setup_kasan_logging(tz);
10248 #endif /* KASAN_TBI */
10249 }
10250 }
10251 }
10252 STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init);
10253
10254 void
zalloc_iokit_lockdown(void)10255 zalloc_iokit_lockdown(void)
10256 {
10257 zone_share_always = false;
10258 }
10259
10260 void
zalloc_first_proc_made(void)10261 zalloc_first_proc_made(void)
10262 {
10263 zone_caching_disabled = 0;
10264 zone_early_thres_mul = 1;
10265 }
10266
10267 __startup_func
10268 vm_offset_t
zone_early_mem_init(vm_size_t size)10269 zone_early_mem_init(vm_size_t size)
10270 {
10271 vm_offset_t mem;
10272
10273 assert3u(atop(size), <=, ZONE_EARLY_META_INLINE_COUNT);
10274
10275 /*
10276 * The zone that is used early to bring up the VM is stolen here.
10277 *
10278 * When the zone subsystem is actually initialized,
10279 * zone_metadata_init() will be called, and those pages
10280 * and the elements they contain, will be relocated into
10281 * the VM submap (even for architectures when those zones
10282 * do not live there).
10283 */
10284 #if __x86_64__
10285 assert3u(size, <=, sizeof(zone_early_pages_to_cram));
10286 mem = (vm_offset_t)zone_early_pages_to_cram;
10287 #else
10288 mem = (vm_offset_t)pmap_steal_memory(size, PAGE_SIZE);
10289 #endif
10290
10291 zone_info.zi_meta_base = zone_early_meta_array_startup -
10292 zone_pva_from_addr(mem).packed_address;
10293 zone_info.zi_map_range.min_address = mem;
10294 zone_info.zi_map_range.max_address = mem + size;
10295
10296 zone_info.zi_bits_range = (struct mach_vm_range){
10297 .min_address = (mach_vm_offset_t)zba_chunk_startup,
10298 .max_address = (mach_vm_offset_t)zba_chunk_startup +
10299 sizeof(zba_chunk_startup),
10300 };
10301
10302 zba_meta()->zbam_left = 1;
10303 zba_meta()->zbam_right = 1;
10304 zba_init_chunk(0, false);
10305
10306 return mem;
10307 }
10308
10309 #endif /* !ZALLOC_TEST */
10310 #pragma mark - tests
10311 #if DEBUG || DEVELOPMENT
10312
10313 /*
10314 * Used for sysctl zone tests that aren't thread-safe. Ensure only one
10315 * thread goes through at a time.
10316 *
10317 * Or we can end up with multiple test zones (if a second zinit() comes through
10318 * before zdestroy()), which could lead us to run out of zones.
10319 */
10320 static bool any_zone_test_running = FALSE;
10321
10322 static uintptr_t *
zone_copy_allocations(zone_t z,uintptr_t * elems,zone_pva_t page_index)10323 zone_copy_allocations(zone_t z, uintptr_t *elems, zone_pva_t page_index)
10324 {
10325 vm_offset_t elem_size = zone_elem_outer_size(z);
10326 vm_offset_t base;
10327 struct zone_page_metadata *meta;
10328
10329 while (!zone_pva_is_null(page_index)) {
10330 base = zone_pva_to_addr(page_index) + zone_elem_inner_offs(z);
10331 meta = zone_pva_to_meta(page_index);
10332
10333 if (meta->zm_inline_bitmap) {
10334 for (size_t i = 0; i < meta->zm_chunk_len; i++) {
10335 uint32_t map = meta[i].zm_bitmap;
10336
10337 for (; map; map &= map - 1) {
10338 *elems++ = INSTANCE_PUT(base +
10339 elem_size * __builtin_clz(map));
10340 }
10341 base += elem_size * 32;
10342 }
10343 } else {
10344 uint32_t order = zba_bits_ref_order(meta->zm_bitmap);
10345 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
10346 for (size_t i = 0; i < (1u << order); i++) {
10347 uint64_t map = bits[i];
10348
10349 for (; map; map &= map - 1) {
10350 *elems++ = INSTANCE_PUT(base +
10351 elem_size * __builtin_clzll(map));
10352 }
10353 base += elem_size * 64;
10354 }
10355 }
10356
10357 page_index = meta->zm_page_next;
10358 }
10359 return elems;
10360 }
10361
10362 kern_return_t
zone_leaks(const char * zoneName,uint32_t nameLen,leak_site_proc proc)10363 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc)
10364 {
10365 zone_t zone = NULL;
10366 uintptr_t * array;
10367 uintptr_t * next;
10368 uintptr_t element;
10369 uint32_t idx, count, found;
10370 uint32_t nobtcount;
10371 uint32_t elemSize;
10372 size_t maxElems;
10373
10374 zone_foreach(z) {
10375 if (!z->z_name) {
10376 continue;
10377 }
10378 if (!strncmp(zoneName, z->z_name, nameLen)) {
10379 zone = z;
10380 break;
10381 }
10382 }
10383 if (zone == NULL) {
10384 return KERN_INVALID_NAME;
10385 }
10386
10387 elemSize = (uint32_t)zone_elem_inner_size(zone);
10388 maxElems = (zone->z_elems_avail + 1) & ~1ul;
10389
10390 array = kalloc_type_tag(vm_offset_t, maxElems, VM_KERN_MEMORY_DIAG);
10391 if (array == NULL) {
10392 return KERN_RESOURCE_SHORTAGE;
10393 }
10394
10395 zone_lock(zone);
10396
10397 next = array;
10398 next = zone_copy_allocations(zone, next, zone->z_pageq_partial);
10399 next = zone_copy_allocations(zone, next, zone->z_pageq_full);
10400 count = (uint32_t)(next - array);
10401
10402 zone_unlock(zone);
10403
10404 zone_leaks_scan(array, count, (uint32_t)zone_elem_outer_size(zone), &found);
10405 assert(found <= count);
10406
10407 for (idx = 0; idx < count; idx++) {
10408 element = array[idx];
10409 if (kInstanceFlagReferenced & element) {
10410 continue;
10411 }
10412 element = INSTANCE_PUT(element) & ~kInstanceFlags;
10413 }
10414
10415 #if ZALLOC_ENABLE_LOGGING
10416 if (zone->z_btlog && !corruption_debug_flag) {
10417 // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
10418 static_assert(sizeof(vm_address_t) == sizeof(uintptr_t));
10419 btlog_copy_backtraces_for_elements(zone->z_btlog,
10420 (vm_address_t *)array, &count, elemSize, proc);
10421 }
10422 #endif /* ZALLOC_ENABLE_LOGGING */
10423
10424 for (nobtcount = idx = 0; idx < count; idx++) {
10425 element = array[idx];
10426 if (!element) {
10427 continue;
10428 }
10429 if (kInstanceFlagReferenced & element) {
10430 continue;
10431 }
10432 nobtcount++;
10433 }
10434 if (nobtcount) {
10435 proc(nobtcount, elemSize, BTREF_NULL);
10436 }
10437
10438 kfree_type(vm_offset_t, maxElems, array);
10439 return KERN_SUCCESS;
10440 }
10441
10442 static int
zone_ro_basic_test_run(__unused int64_t in,int64_t * out)10443 zone_ro_basic_test_run(__unused int64_t in, int64_t *out)
10444 {
10445 zone_security_flags_t zsflags;
10446 uint32_t x = 4;
10447 uint32_t *test_ptr;
10448
10449 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10450 printf("zone_ro_basic_test: Test already running.\n");
10451 return EALREADY;
10452 }
10453
10454 zsflags = zone_security_array[ZONE_ID__FIRST_RO];
10455
10456 for (int i = 0; i < 3; i++) {
10457 #if ZSECURITY_CONFIG(READ_ONLY)
10458 /* Basic Test: Create int zone, zalloc int, modify value, free int */
10459 printf("zone_ro_basic_test: Basic Test iteration %d\n", i);
10460 printf("zone_ro_basic_test: create a sub-page size zone\n");
10461
10462 printf("zone_ro_basic_test: verify flags were set\n");
10463 assert(zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
10464
10465 printf("zone_ro_basic_test: zalloc an element\n");
10466 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10467 assert(test_ptr);
10468
10469 printf("zone_ro_basic_test: verify we can't write to it\n");
10470 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10471
10472 x = 4;
10473 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10474 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10475 assert(test_ptr);
10476 assert(*(uint32_t*)test_ptr == x);
10477
10478 x = 5;
10479 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10480 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10481 assert(test_ptr);
10482 assert(*(uint32_t*)test_ptr == x);
10483
10484 printf("zone_ro_basic_test: verify we can't write to it after assigning value\n");
10485 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10486
10487 printf("zone_ro_basic_test: free elem\n");
10488 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10489 assert(!test_ptr);
10490 #else
10491 printf("zone_ro_basic_test: Read-only allocator n/a on 32bit platforms, test functionality of API\n");
10492
10493 printf("zone_ro_basic_test: verify flags were set\n");
10494 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
10495
10496 printf("zone_ro_basic_test: zalloc an element\n");
10497 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10498 assert(test_ptr);
10499
10500 x = 4;
10501 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10502 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10503 assert(test_ptr);
10504 assert(*(uint32_t*)test_ptr == x);
10505
10506 x = 5;
10507 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10508 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10509 assert(test_ptr);
10510 assert(*(uint32_t*)test_ptr == x);
10511
10512 printf("zone_ro_basic_test: free elem\n");
10513 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10514 assert(!test_ptr);
10515 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10516 }
10517
10518 printf("zone_ro_basic_test: garbage collection\n");
10519 zone_gc(ZONE_GC_DRAIN);
10520
10521 printf("zone_ro_basic_test: Test passed\n");
10522
10523 *out = 1;
10524 os_atomic_store(&any_zone_test_running, false, relaxed);
10525 return 0;
10526 }
10527 SYSCTL_TEST_REGISTER(zone_ro_basic_test, zone_ro_basic_test_run);
10528
10529 static int
zone_basic_test_run(__unused int64_t in,int64_t * out)10530 zone_basic_test_run(__unused int64_t in, int64_t *out)
10531 {
10532 static zone_t test_zone_ptr = NULL;
10533
10534 unsigned int i = 0, max_iter = 5;
10535 void * test_ptr;
10536 zone_t test_zone;
10537 int rc = 0;
10538
10539 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10540 printf("zone_basic_test: Test already running.\n");
10541 return EALREADY;
10542 }
10543
10544 printf("zone_basic_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
10545
10546 /* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
10547 do {
10548 test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
10549 assert(test_zone);
10550
10551 #if KASAN_CLASSIC
10552 if (test_zone_ptr == NULL && test_zone->z_elems_free != 0)
10553 #else
10554 if (test_zone->z_elems_free != 0)
10555 #endif
10556 {
10557 printf("zone_basic_test: free count is not zero\n");
10558 rc = EIO;
10559 goto out;
10560 }
10561
10562 if (test_zone_ptr == NULL) {
10563 /* Stash the zone pointer returned on the fist zinit */
10564 printf("zone_basic_test: zone created for the first time\n");
10565 test_zone_ptr = test_zone;
10566 } else if (test_zone != test_zone_ptr) {
10567 printf("zone_basic_test: old zone pointer and new zone pointer don't match\n");
10568 rc = EIO;
10569 goto out;
10570 }
10571
10572 test_ptr = zalloc_flags(test_zone, Z_WAITOK | Z_NOFAIL);
10573 zfree(test_zone, test_ptr);
10574
10575 zdestroy(test_zone);
10576 i++;
10577
10578 printf("zone_basic_test: Iteration %d successful\n", i);
10579 } while (i < max_iter);
10580
10581 #if !KASAN_CLASSIC /* because of the quarantine and redzones */
10582 /* test Z_VA_SEQUESTER */
10583 {
10584 zone_t test_pcpu_zone;
10585 kern_return_t kr;
10586 int idx, num_allocs = 8;
10587 vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs;
10588 void *allocs[num_allocs];
10589 void **allocs_pcpu;
10590 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
10591
10592 test_zone = zone_create("test_zone_sysctl", elem_size,
10593 ZC_DESTRUCTIBLE);
10594 assert(test_zone);
10595
10596 test_pcpu_zone = zone_create("test_zone_sysctl.pcpu", sizeof(uint64_t),
10597 ZC_DESTRUCTIBLE | ZC_PERCPU);
10598 assert(test_pcpu_zone);
10599
10600 for (idx = 0; idx < num_allocs; idx++) {
10601 allocs[idx] = zalloc(test_zone);
10602 assert(NULL != allocs[idx]);
10603 printf("alloc[%d] %p\n", idx, allocs[idx]);
10604 }
10605 for (idx = 0; idx < num_allocs; idx++) {
10606 zfree(test_zone, allocs[idx]);
10607 }
10608 assert(!zone_pva_is_null(test_zone->z_pageq_empty));
10609
10610 kr = kmem_alloc(kernel_map, (vm_address_t *)&allocs_pcpu, PAGE_SIZE,
10611 KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
10612 assert(kr == KERN_SUCCESS);
10613
10614 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10615 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10616 Z_WAITOK | Z_ZERO);
10617 assert(NULL != allocs_pcpu[idx]);
10618 }
10619 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10620 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10621 }
10622 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10623
10624 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10625 vm_page_wire_count, vm_page_free_count,
10626 100L * phys_pages / zone_pages_wired_max);
10627 zone_gc(ZONE_GC_DRAIN);
10628 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10629 vm_page_wire_count, vm_page_free_count,
10630 100L * phys_pages / zone_pages_wired_max);
10631
10632 unsigned int allva = 0;
10633
10634 zone_foreach(z) {
10635 zone_lock(z);
10636 allva += z->z_wired_cur;
10637 if (zone_pva_is_null(z->z_pageq_va)) {
10638 zone_unlock(z);
10639 continue;
10640 }
10641 unsigned count = 0;
10642 uint64_t size;
10643 zone_pva_t pg = z->z_pageq_va;
10644 struct zone_page_metadata *page_meta;
10645 while (pg.packed_address) {
10646 page_meta = zone_pva_to_meta(pg);
10647 count += z->z_percpu ? 1 : z->z_chunk_pages;
10648 if (page_meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
10649 count -= page_meta->zm_page_index;
10650 }
10651 pg = page_meta->zm_page_next;
10652 }
10653 size = zone_size_wired(z);
10654 if (!size) {
10655 size = 1;
10656 }
10657 printf("%s%s: seq %d, res %d, %qd %%\n",
10658 zone_heap_name(z), z->z_name, z->z_va_cur - z->z_wired_cur,
10659 z->z_wired_cur, zone_size_allocated(z) * 100ULL / size);
10660 zone_unlock(z);
10661 }
10662
10663 printf("total va: %d\n", allva);
10664
10665 assert(zone_pva_is_null(test_zone->z_pageq_empty));
10666 assert(zone_pva_is_null(test_zone->z_pageq_partial));
10667 assert(!zone_pva_is_null(test_zone->z_pageq_va));
10668 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10669 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_partial));
10670 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_va));
10671
10672 for (idx = 0; idx < num_allocs; idx++) {
10673 assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx]));
10674 }
10675
10676 /* make sure the zone is still usable after a GC */
10677
10678 for (idx = 0; idx < num_allocs; idx++) {
10679 allocs[idx] = zalloc(test_zone);
10680 assert(allocs[idx]);
10681 printf("alloc[%d] %p\n", idx, allocs[idx]);
10682 }
10683 for (idx = 0; idx < num_allocs; idx++) {
10684 zfree(test_zone, allocs[idx]);
10685 }
10686
10687 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10688 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10689 Z_WAITOK | Z_ZERO);
10690 assert(NULL != allocs_pcpu[idx]);
10691 }
10692 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10693 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10694 }
10695
10696 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10697
10698 kmem_free(kernel_map, (vm_address_t)allocs_pcpu, PAGE_SIZE);
10699
10700 zdestroy(test_zone);
10701 zdestroy(test_pcpu_zone);
10702 }
10703 #endif /* KASAN_CLASSIC */
10704
10705 printf("zone_basic_test: Test passed\n");
10706
10707
10708 *out = 1;
10709 out:
10710 os_atomic_store(&any_zone_test_running, false, relaxed);
10711 return rc;
10712 }
10713 SYSCTL_TEST_REGISTER(zone_basic_test, zone_basic_test_run);
10714
10715 struct zone_stress_obj {
10716 TAILQ_ENTRY(zone_stress_obj) zso_link;
10717 };
10718
10719 struct zone_stress_ctx {
10720 thread_t zsc_leader;
10721 lck_mtx_t zsc_lock;
10722 zone_t zsc_zone;
10723 uint64_t zsc_end;
10724 uint32_t zsc_workers;
10725 };
10726
10727 static void
zone_stress_worker(void * arg,wait_result_t __unused wr)10728 zone_stress_worker(void *arg, wait_result_t __unused wr)
10729 {
10730 struct zone_stress_ctx *ctx = arg;
10731 bool leader = ctx->zsc_leader == current_thread();
10732 TAILQ_HEAD(zone_stress_head, zone_stress_obj) head = TAILQ_HEAD_INITIALIZER(head);
10733 struct zone_bool_gen bg = { };
10734 struct zone_stress_obj *obj;
10735 uint32_t allocs = 0;
10736
10737 random_bool_init(&bg.zbg_bg);
10738
10739 do {
10740 for (int i = 0; i < 2000; i++) {
10741 uint32_t what = random_bool_gen_bits(&bg.zbg_bg,
10742 bg.zbg_entropy, ZONE_ENTROPY_CNT, 1);
10743 switch (what) {
10744 case 0:
10745 case 1:
10746 if (allocs < 10000) {
10747 obj = zalloc(ctx->zsc_zone);
10748 TAILQ_INSERT_HEAD(&head, obj, zso_link);
10749 allocs++;
10750 }
10751 break;
10752 case 2:
10753 case 3:
10754 if (allocs < 10000) {
10755 obj = zalloc(ctx->zsc_zone);
10756 TAILQ_INSERT_TAIL(&head, obj, zso_link);
10757 allocs++;
10758 }
10759 break;
10760 case 4:
10761 if (leader) {
10762 zone_gc(ZONE_GC_DRAIN);
10763 }
10764 break;
10765 case 5:
10766 case 6:
10767 if (!TAILQ_EMPTY(&head)) {
10768 obj = TAILQ_FIRST(&head);
10769 TAILQ_REMOVE(&head, obj, zso_link);
10770 zfree(ctx->zsc_zone, obj);
10771 allocs--;
10772 }
10773 break;
10774 case 7:
10775 if (!TAILQ_EMPTY(&head)) {
10776 obj = TAILQ_LAST(&head, zone_stress_head);
10777 TAILQ_REMOVE(&head, obj, zso_link);
10778 zfree(ctx->zsc_zone, obj);
10779 allocs--;
10780 }
10781 break;
10782 }
10783 }
10784 } while (mach_absolute_time() < ctx->zsc_end);
10785
10786 while (!TAILQ_EMPTY(&head)) {
10787 obj = TAILQ_FIRST(&head);
10788 TAILQ_REMOVE(&head, obj, zso_link);
10789 zfree(ctx->zsc_zone, obj);
10790 }
10791
10792 lck_mtx_lock(&ctx->zsc_lock);
10793 if (--ctx->zsc_workers == 0) {
10794 thread_wakeup(ctx);
10795 } else if (leader) {
10796 while (ctx->zsc_workers) {
10797 lck_mtx_sleep(&ctx->zsc_lock, LCK_SLEEP_DEFAULT, ctx,
10798 THREAD_UNINT);
10799 }
10800 }
10801 lck_mtx_unlock(&ctx->zsc_lock);
10802
10803 if (!leader) {
10804 thread_terminate_self();
10805 __builtin_unreachable();
10806 }
10807 }
10808
10809 static int
zone_stress_test_run(__unused int64_t in,int64_t * out)10810 zone_stress_test_run(__unused int64_t in, int64_t *out)
10811 {
10812 struct zone_stress_ctx ctx = {
10813 .zsc_leader = current_thread(),
10814 .zsc_workers = 3,
10815 };
10816 kern_return_t kr;
10817 thread_t th;
10818
10819 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10820 printf("zone_stress_test: Test already running.\n");
10821 return EALREADY;
10822 }
10823
10824 lck_mtx_init(&ctx.zsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
10825 ctx.zsc_zone = zone_create("test_zone_344", 344,
10826 ZC_DESTRUCTIBLE | ZC_NOCACHING);
10827 assert(ctx.zsc_zone->z_chunk_pages > 1);
10828
10829 clock_interval_to_deadline(5, NSEC_PER_SEC, &ctx.zsc_end);
10830
10831 printf("zone_stress_test: Starting (leader %p)\n", current_thread());
10832
10833 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
10834
10835 for (uint32_t i = 1; i < ctx.zsc_workers; i++) {
10836 kr = kernel_thread_start_priority(zone_stress_worker, &ctx,
10837 BASEPRI_DEFAULT, &th);
10838 if (kr == KERN_SUCCESS) {
10839 printf("zone_stress_test: thread %d: %p\n", i, th);
10840 thread_deallocate(th);
10841 } else {
10842 ctx.zsc_workers--;
10843 }
10844 }
10845
10846 zone_stress_worker(&ctx, 0);
10847
10848 lck_mtx_destroy(&ctx.zsc_lock, &zone_locks_grp);
10849
10850 zdestroy(ctx.zsc_zone);
10851
10852 printf("zone_stress_test: Done\n");
10853
10854 *out = 1;
10855 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
10856 os_atomic_store(&any_zone_test_running, false, relaxed);
10857 return 0;
10858 }
10859 SYSCTL_TEST_REGISTER(zone_stress_test, zone_stress_test_run);
10860
10861 /*
10862 * Routines to test that zone garbage collection and zone replenish threads
10863 * running at the same time don't cause problems.
10864 */
10865
10866 static int
zone_gc_replenish_test(__unused int64_t in,int64_t * out)10867 zone_gc_replenish_test(__unused int64_t in, int64_t *out)
10868 {
10869 zone_gc(ZONE_GC_DRAIN);
10870 *out = 1;
10871 return 0;
10872 }
10873 SYSCTL_TEST_REGISTER(zone_gc_replenish_test, zone_gc_replenish_test);
10874
10875 static int
zone_alloc_replenish_test(__unused int64_t in,int64_t * out)10876 zone_alloc_replenish_test(__unused int64_t in, int64_t *out)
10877 {
10878 zone_t z = vm_map_entry_zone;
10879 struct data { struct data *next; } *node, *list = NULL;
10880
10881 if (z == NULL) {
10882 printf("Couldn't find a replenish zone\n");
10883 return EIO;
10884 }
10885
10886 /* big enough to go past replenishment */
10887 for (uint32_t i = 0; i < 10 * z->z_elems_rsv; ++i) {
10888 node = zalloc(z);
10889 node->next = list;
10890 list = node;
10891 }
10892
10893 /*
10894 * release the memory we allocated
10895 */
10896 while (list != NULL) {
10897 node = list;
10898 list = list->next;
10899 zfree(z, node);
10900 }
10901
10902 *out = 1;
10903 return 0;
10904 }
10905 SYSCTL_TEST_REGISTER(zone_alloc_replenish_test, zone_alloc_replenish_test);
10906
10907 #endif /* DEBUG || DEVELOPMENT */
10908