1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/zalloc.c
60 * Author: Avadis Tevanian, Jr.
61 *
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
64 */
65
66 #define ZALLOC_ALLOW_DEPRECATED 1
67 #if !ZALLOC_TEST
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <machine/machine_routines.h>
75 #include <mach/vm_map.h>
76 #include <mach/sdt.h>
77 #if __x86_64__
78 #include <i386/cpuid.h>
79 #endif
80
81 #include <kern/bits.h>
82 #include <kern/btlog.h>
83 #include <kern/startup.h>
84 #include <kern/kern_types.h>
85 #include <kern/assert.h>
86 #include <kern/backtrace.h>
87 #include <kern/host.h>
88 #include <kern/macro_help.h>
89 #include <kern/sched.h>
90 #include <kern/locks.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/thread_call.h>
94 #include <kern/zalloc_internal.h>
95 #include <kern/kalloc.h>
96 #include <kern/debug.h>
97
98 #include <prng/random.h>
99
100 #include <vm/pmap.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_kern.h>
103 #include <vm/vm_page.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_compressor.h> /* C_SLOT_PACKED_PTR* */
106
107 #include <pexpert/pexpert.h>
108
109 #include <machine/machparam.h>
110 #include <machine/machine_routines.h> /* ml_cpu_get_info */
111
112 #include <os/atomic.h>
113
114 #include <libkern/OSDebug.h>
115 #include <libkern/OSAtomic.h>
116 #include <libkern/section_keywords.h>
117 #include <sys/kdebug.h>
118 #include <sys/code_signing.h>
119
120 #include <san/kasan.h>
121 #include <libsa/stdlib.h>
122 #include <sys/errno.h>
123
124 #include <IOKit/IOBSD.h>
125 #include <arm64/amcc_rorgn.h>
126
127 #if DEBUG
128 #define z_debug_assert(expr) assert(expr)
129 #else
130 #define z_debug_assert(expr) (void)(expr)
131 #endif
132
133 /* Returns pid of the task with the largest number of VM map entries. */
134 extern pid_t find_largest_process_vm_map_entries(void);
135
136 /*
137 * Callout to jetsam. If pid is -1, we wake up the memorystatus thread to do asynchronous kills.
138 * For any other pid we try to kill that process synchronously.
139 */
140 extern boolean_t memorystatus_kill_on_zone_map_exhaustion(pid_t pid);
141
142 extern zone_t vm_object_zone;
143 extern zone_t ipc_service_port_label_zone;
144
145 ZONE_DEFINE_TYPE(percpu_u64_zone, "percpu.64", uint64_t,
146 ZC_PERCPU | ZC_ALIGNMENT_REQUIRED | ZC_KASAN_NOREDZONE);
147
148 #if KASAN_TBI
149 #define ZONE_MIN_ELEM_SIZE (sizeof(uint64_t) * 2)
150 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
151 #else /* KASAN_TBI */
152 #define ZONE_MIN_ELEM_SIZE sizeof(uint64_t)
153 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
154 #endif /* KASAN_TBI */
155
156 #define ZONE_MAX_ALLOC_SIZE (32 * 1024)
157 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
158 #define ZONE_CHUNK_ALLOC_SIZE (256 * 1024)
159 #define ZONE_GUARD_DENSE (32 * 1024)
160 #define ZONE_GUARD_SPARSE (64 * 1024)
161 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
162
163 #if XNU_PLATFORM_MacOSX
164 #define ZONE_MAP_MAX (32ULL << 30)
165 #define ZONE_MAP_VA_SIZE (128ULL << 30)
166 #else /* XNU_PLATFORM_MacOSX */
167 #define ZONE_MAP_MAX (8ULL << 30)
168 #define ZONE_MAP_VA_SIZE (24ULL << 30)
169 #endif /* !XNU_PLATFORM_MacOSX */
170
171 __enum_closed_decl(zm_len_t, uint16_t, {
172 ZM_CHUNK_FREE = 0x0,
173 /* 1 through 8 are valid lengths */
174 ZM_CHUNK_LEN_MAX = 0x8,
175
176 /* PGZ magical values */
177 ZM_PGZ_FREE = 0x0,
178 ZM_PGZ_ALLOCATED = 0xa, /* [a]llocated */
179 ZM_PGZ_GUARD = 0xb, /* oo[b] */
180 ZM_PGZ_DOUBLE_FREE = 0xd, /* [d]ouble_free */
181
182 /* secondary page markers */
183 ZM_SECONDARY_PAGE = 0xe,
184 ZM_SECONDARY_PCPU_PAGE = 0xf,
185 });
186
187 static_assert(MAX_ZONES < (1u << 10), "MAX_ZONES must fit in zm_index");
188
189 struct zone_page_metadata {
190 union {
191 struct {
192 /* The index of the zone this metadata page belongs to */
193 zone_id_t zm_index : 10;
194
195 /*
196 * This chunk ends with a guard page.
197 */
198 uint16_t zm_guarded : 1;
199
200 /*
201 * Whether `zm_bitmap` is an inline bitmap
202 * or a packed bitmap reference
203 */
204 uint16_t zm_inline_bitmap : 1;
205
206 /*
207 * Zones allocate in "chunks" of zone_t::z_chunk_pages
208 * consecutive pages, or zpercpu_count() pages if the
209 * zone is percpu.
210 *
211 * The first page of it has its metadata set with:
212 * - 0 if none of the pages are currently wired
213 * - the number of wired pages in the chunk
214 * (not scaled for percpu).
215 *
216 * Other pages in the chunk have their zm_chunk_len set
217 * to ZM_SECONDARY_PAGE or ZM_SECONDARY_PCPU_PAGE
218 * depending on whether the zone is percpu or not.
219 * For those, zm_page_index holds the index of that page
220 * in the run, and zm_subchunk_len the remaining length
221 * within the chunk.
222 *
223 * Metadata used for PGZ pages can have 3 values:
224 * - ZM_PGZ_FREE: slot is free
225 * - ZM_PGZ_ALLOCATED: slot holds an allocated element
226 * at offset (zm_pgz_orig_addr & PAGE_MASK)
227 * - ZM_PGZ_DOUBLE_FREE: slot detected a double free
228 * (will panic).
229 */
230 zm_len_t zm_chunk_len : 4;
231 };
232 uint16_t zm_bits;
233 };
234
235 union {
236 #define ZM_ALLOC_SIZE_LOCK 1u
237 uint16_t zm_alloc_size; /* first page only */
238 struct {
239 uint8_t zm_page_index; /* secondary pages only */
240 uint8_t zm_subchunk_len; /* secondary pages only */
241 };
242 uint16_t zm_oob_offs; /* in guard pages */
243 };
244 union {
245 uint32_t zm_bitmap; /* most zones */
246 uint32_t zm_bump; /* permanent zones */
247 };
248
249 union {
250 struct {
251 zone_pva_t zm_page_next;
252 zone_pva_t zm_page_prev;
253 };
254 vm_offset_t zm_pgz_orig_addr;
255 struct zone_page_metadata *zm_pgz_slot_next;
256 };
257 };
258 static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing");
259
260 /*!
261 * @typedef zone_magazine_t
262 *
263 * @brief
264 * Magazine of cached allocations.
265 *
266 * @field zm_next linkage used by magazine depots.
267 * @field zm_elems an array of @c zc_mag_size() elements.
268 */
269 struct zone_magazine {
270 zone_magazine_t zm_next;
271 smr_seq_t zm_seq;
272 vm_offset_t zm_elems[0];
273 };
274
275 /*!
276 * @typedef zone_cache_t
277 *
278 * @brief
279 * Magazine of cached allocations.
280 *
281 * @discussion
282 * Below is a diagram of the caching system. This design is inspired by the
283 * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
284 * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams and the FreeBSD UMA
285 * zone allocator (itself derived from this seminal work).
286 *
287 * It is divided into 3 layers:
288 * - the per-cpu layer,
289 * - the recirculation depot layer,
290 * - the Zone Allocator.
291 *
292 * The per-cpu and recirculation depot layer use magazines (@c zone_magazine_t),
293 * which are stacks of up to @c zc_mag_size() elements.
294 *
295 * <h2>CPU layer</h2>
296 *
297 * The CPU layer (@c zone_cache_t) looks like this:
298 *
299 * ╭─ a ─ f ─┬───────── zm_depot ──────────╮
300 * │ ╭─╮ ╭─╮ │ ╭─╮ ╭─╮ ╭─╮ ╭─╮ ╭─╮ │
301 * │ │#│ │#│ │ │#│ │#│ │#│ │#│ │#│ │
302 * │ │#│ │ │ │ │#│ │#│ │#│ │#│ │#│ │
303 * │ │ │ │ │ │ │#│ │#│ │#│ │#│ │#│ │
304 * │ ╰─╯ ╰─╯ │ ╰─╯ ╰─╯ ╰─╯ ╰─╯ ╰─╯ │
305 * ╰─────────┴─────────────────────────────╯
306 *
307 * It has two pre-loaded magazines (a)lloc and (f)ree which we allocate from,
308 * or free to. Serialization is achieved through disabling preemption, and only
309 * the current CPU can acces those allocations. This is represented on the left
310 * hand side of the diagram above.
311 *
312 * The right hand side is the per-cpu depot. It consists of @c zm_depot_count
313 * full magazines, and is protected by the @c zm_depot_lock for access.
314 * The lock is expected to absolutely never be contended, as only the local CPU
315 * tends to access the local per-cpu depot in regular operation mode.
316 *
317 * However unlike UMA, our implementation allows for the zone GC to reclaim
318 * per-CPU magazines aggresively, which is serialized with the @c zm_depot_lock.
319 *
320 *
321 * <h2>Recirculation Depot</h2>
322 *
323 * The recirculation depot layer is a list similar to the per-cpu depot,
324 * however it is different in two fundamental ways:
325 *
326 * - it is protected by the regular zone lock,
327 * - elements referenced by the magazines in that layer appear free
328 * to the zone layer.
329 *
330 *
331 * <h2>Magazine circulation and sizing</h2>
332 *
333 * The caching system sizes itself dynamically. Operations that allocate/free
334 * a single element call @c zone_lock_nopreempt_check_contention() which records
335 * contention on the lock by doing a trylock and recording its success.
336 *
337 * This information is stored in the @c z_recirc_cont_cur field of the zone,
338 * and a windowed moving average is maintained in @c z_contention_wma.
339 * The periodically run function @c compute_zone_working_set_size() will then
340 * take this into account to decide to grow the number of buckets allowed
341 * in the depot or shrink it based on the @c zc_grow_level and @c zc_shrink_level
342 * thresholds.
343 *
344 * The per-cpu layer will attempt to work with its depot, finding both full and
345 * empty magazines cached there. If it can't get what it needs, then it will
346 * mediate with the zone recirculation layer. Such recirculation is done in
347 * batches in order to amortize lock holds.
348 * (See @c {zalloc,zfree}_cached_depot_recirculate()).
349 *
350 * The recirculation layer keeps a track of what the minimum amount of magazines
351 * it had over time was for each of the full and empty queues. This allows for
352 * @c compute_zone_working_set_size() to return memory to the system when a zone
353 * stops being used as much.
354 *
355 * <h2>Security considerations</h2>
356 *
357 * The zone caching layer has been designed to avoid returning elements in
358 * a strict LIFO behavior: @c zalloc() will allocate from the (a) magazine,
359 * and @c zfree() free to the (f) magazine, and only swap them when the
360 * requested operation cannot be fulfilled.
361 *
362 * The per-cpu overflow depot or the recirculation depots are similarly used
363 * in FIFO order.
364 *
365 * @field zc_depot_lock a lock to access @c zc_depot, @c zc_depot_cur.
366 * @field zc_alloc_cur denormalized number of elements in the (a) magazine
367 * @field zc_free_cur denormalized number of elements in the (f) magazine
368 * @field zc_alloc_elems a pointer to the array of elements in (a)
369 * @field zc_free_elems a pointer to the array of elements in (f)
370 *
371 * @field zc_depot a list of @c zc_depot_cur full magazines
372 */
373 typedef struct zone_cache {
374 hw_lck_ticket_t zc_depot_lock;
375 uint16_t zc_alloc_cur;
376 uint16_t zc_free_cur;
377 vm_offset_t *zc_alloc_elems;
378 vm_offset_t *zc_free_elems;
379 struct zone_depot zc_depot;
380 smr_t zc_smr;
381 zone_smr_free_cb_t XNU_PTRAUTH_SIGNED_FUNCTION_PTR("zc_free") zc_free;
382 } __attribute__((aligned(64))) * zone_cache_t;
383
384 #if !__x86_64__
385 static
386 #endif
387 __security_const_late struct {
388 struct mach_vm_range zi_map_range; /* all zone submaps */
389 struct mach_vm_range zi_ro_range; /* read-only range */
390 struct mach_vm_range zi_meta_range; /* debugging only */
391 struct mach_vm_range zi_bits_range; /* bits buddy allocator */
392 struct mach_vm_range zi_xtra_range; /* vm tracking metadata */
393 struct mach_vm_range zi_pgz_range;
394 struct zone_page_metadata *zi_pgz_meta;
395
396 /*
397 * The metadata lives within the zi_meta_range address range.
398 *
399 * The correct formula to find a metadata index is:
400 * absolute_page_index - page_index(zi_map_range.min_address)
401 *
402 * And then this index is used to dereference zi_meta_range.min_address
403 * as a `struct zone_page_metadata` array.
404 *
405 * To avoid doing that substraction all the time in the various fast-paths,
406 * zi_meta_base are pre-offset with that minimum page index to avoid redoing
407 * that math all the time.
408 */
409 struct zone_page_metadata *zi_meta_base;
410 } zone_info;
411
412 __startup_data static struct mach_vm_range zone_map_range;
413 __startup_data static vm_map_size_t zone_meta_size;
414 __startup_data static vm_map_size_t zone_bits_size;
415 __startup_data static vm_map_size_t zone_xtra_size;
416
417 /*
418 * Initial array of metadata for stolen memory.
419 *
420 * The numbers here have to be kept in sync with vm_map_steal_memory()
421 * so that we have reserved enough metadata.
422 *
423 * After zone_init() has run (which happens while the kernel is still single
424 * threaded), the metadata is moved to its final dynamic location, and
425 * this array is unmapped with the rest of __startup_data at lockdown.
426 */
427 #define ZONE_EARLY_META_INLINE_COUNT 64
428 __startup_data
429 static struct zone_page_metadata
430 zone_early_meta_array_startup[ZONE_EARLY_META_INLINE_COUNT];
431
432 #if __x86_64__
433 /*
434 * On Intel we can't "free" pmap stolen pages,
435 * so instead we use a static array in __KLDDATA
436 * which gets reclaimed at lockdown time.
437 */
438 __startup_data __attribute__((aligned(PAGE_SIZE)))
439 static uint8_t zone_early_pages_to_cram[PAGE_SIZE * 16];
440 #endif
441
442 /*
443 * The zone_locks_grp allows for collecting lock statistics.
444 * All locks are associated to this group in zinit.
445 * Look at tools/lockstat for debugging lock contention.
446 */
447 LCK_GRP_DECLARE(zone_locks_grp, "zone_locks");
448 static LCK_MTX_DECLARE(zone_metadata_region_lck, &zone_locks_grp);
449
450 /*
451 * The zone metadata lock protects:
452 * - metadata faulting,
453 * - VM submap VA allocations,
454 * - early gap page queue list
455 */
456 #define zone_meta_lock() lck_mtx_lock(&zone_metadata_region_lck);
457 #define zone_meta_unlock() lck_mtx_unlock(&zone_metadata_region_lck);
458
459 /*
460 * Exclude more than one concurrent garbage collection
461 */
462 static LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc");
463 static LCK_MTX_DECLARE(zone_gc_lock, &zone_gc_lck_grp);
464 static LCK_SPIN_DECLARE(zone_exhausted_lock, &zone_gc_lck_grp);
465
466 /*
467 * Panic logging metadata
468 */
469 bool panic_include_zprint = false;
470 bool panic_include_kalloc_types = false;
471 zone_t kalloc_type_src_zone = ZONE_NULL;
472 zone_t kalloc_type_dst_zone = ZONE_NULL;
473 mach_memory_info_t *panic_kext_memory_info = NULL;
474 vm_size_t panic_kext_memory_size = 0;
475 vm_offset_t panic_fault_address = 0;
476
477 /*
478 * Protects zone_array, num_zones, num_zones_in_use, and
479 * zone_destroyed_bitmap
480 */
481 static SIMPLE_LOCK_DECLARE(all_zones_lock, 0);
482 static zone_id_t num_zones_in_use;
483 zone_id_t _Atomic num_zones;
484 SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count;
485
486 /*
487 * Initial globals for zone stats until we can allocate the real ones.
488 * Those get migrated inside the per-CPU ones during zone_init() and
489 * this array is unmapped with the rest of __startup_data at lockdown.
490 */
491
492 /* zone to allocate zone_magazine structs from */
493 static SECURITY_READ_ONLY_LATE(zone_t) zc_magazine_zone;
494 /*
495 * Until pid1 is made, zone caching is off,
496 * until compute_zone_working_set_size() runs for the firt time.
497 *
498 * -1 represents the "never enabled yet" value.
499 */
500 static int8_t zone_caching_disabled = -1;
501
502 __startup_data
503 static struct zone_stats zone_stats_startup[MAX_ZONES];
504 struct zone zone_array[MAX_ZONES];
505 SECURITY_READ_ONLY_LATE(zone_security_flags_t) zone_security_array[MAX_ZONES] = {
506 [0 ... MAX_ZONES - 1] = {
507 .z_kheap_id = KHEAP_ID_NONE,
508 .z_noencrypt = false,
509 .z_submap_idx = Z_SUBMAP_IDX_GENERAL_0,
510 .z_kalloc_type = false,
511 },
512 };
513 SECURITY_READ_ONLY_LATE(struct zone_size_params) zone_ro_size_params[ZONE_ID__LAST_RO + 1];
514 SECURITY_READ_ONLY_LATE(zone_cache_ops_t) zcache_ops[ZONE_ID__FIRST_DYNAMIC];
515
516 /* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */
517 static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count;
518
519 /* Used to keep track of destroyed slots in the zone_array */
520 static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)];
521
522 /* number of zone mapped pages used by all zones */
523 static size_t _Atomic zone_pages_jetsam_threshold = ~0;
524 size_t zone_pages_wired;
525 size_t zone_guard_pages;
526
527 /* Time in (ms) after which we panic for zone exhaustions */
528 TUNABLE(int, zone_exhausted_timeout, "zet", 5000);
529
530 #if VM_TAG_SIZECLASSES
531 /*
532 * Zone tagging allows for per "tag" accounting of allocations for the kalloc
533 * zones only.
534 *
535 * There are 3 kinds of tags that can be used:
536 * - pre-registered VM_KERN_MEMORY_*
537 * - dynamic tags allocated per call sites in core-kernel (using vm_tag_alloc())
538 * - per-kext tags computed by IOKit (using the magic Z_VM_TAG_BT_BIT marker).
539 *
540 * The VM tracks the statistics in lazily allocated structures.
541 * See vm_tag_will_update_zone(), vm_tag_update_zone_size().
542 *
543 * If for some reason the requested tag cannot be accounted for,
544 * the tag is forced to VM_KERN_MEMORY_KALLOC which is pre-allocated.
545 *
546 * Each allocated element also remembers the tag it was assigned,
547 * which lets zalloc/zfree update statistics correctly.
548 */
549
550 /* enable tags for zones that ask for it */
551 static TUNABLE(bool, zone_tagging_on, "-zt", false);
552
553 /*
554 * Array of all sizeclasses used by kalloc variants so that we can
555 * have accounting per size class for each kalloc callsite
556 */
557 static uint16_t zone_tags_sizeclasses[VM_TAG_SIZECLASSES];
558 #endif /* VM_TAG_SIZECLASSES */
559
560 #if DEBUG || DEVELOPMENT
561 static int zalloc_simulate_vm_pressure;
562 #endif /* DEBUG || DEVELOPMENT */
563
564 #define Z_TUNABLE(t, n, d) \
565 TUNABLE(t, _##n, #n, d); \
566 __pure2 static inline t n(void) { return _##n; }
567
568 /*
569 * Zone caching tunables
570 *
571 * zc_mag_size():
572 * size of magazines, larger to reduce contention at the expense of memory
573 *
574 * zc_enable_level
575 * number of contentions per second after which zone caching engages
576 * automatically.
577 *
578 * 0 to disable.
579 *
580 * zc_grow_level
581 * number of contentions per second x cpu after which the number of magazines
582 * allowed in the depot can grow. (in "Z_WMA_UNIT" units).
583 *
584 * zc_shrink_level
585 * number of contentions per second x cpu below which the number of magazines
586 * allowed in the depot will shrink. (in "Z_WMA_UNIT" units).
587 *
588 * zc_pcpu_max
589 * maximum memory size in bytes that can hang from a CPU,
590 * which will affect how many magazines are allowed in the depot.
591 *
592 * The alloc/free magazines are assumed to be on average half-empty
593 * and to count for "1" unit of magazines.
594 *
595 * zc_autotrim_size
596 * Size allowed to hang extra from the recirculation depot before
597 * auto-trim kicks in.
598 *
599 * zc_autotrim_buckets
600 *
601 * How many buckets in excess of the working-set are allowed
602 * before auto-trim kicks in for empty buckets.
603 *
604 * zc_free_batch_size
605 * The size of batches of frees/reclaim that can be done keeping
606 * the zone lock held (and preemption disabled).
607 */
608 Z_TUNABLE(uint16_t, zc_mag_size, 8);
609 static Z_TUNABLE(uint32_t, zc_enable_level, 10);
610 static Z_TUNABLE(uint32_t, zc_grow_level, 5 * Z_WMA_UNIT);
611 static Z_TUNABLE(uint32_t, zc_shrink_level, Z_WMA_UNIT / 2);
612 static Z_TUNABLE(uint32_t, zc_pcpu_max, 128 << 10);
613 static Z_TUNABLE(uint32_t, zc_autotrim_size, 16 << 10);
614 static Z_TUNABLE(uint32_t, zc_autotrim_buckets, 8);
615 static Z_TUNABLE(uint32_t, zc_free_batch_size, 256);
616
617 static SECURITY_READ_ONLY_LATE(size_t) zone_pages_wired_max;
618 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_submaps[Z_SUBMAP_IDX_COUNT];
619 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_meta_map;
620 static char const * const zone_submaps_names[Z_SUBMAP_IDX_COUNT] = {
621 [Z_SUBMAP_IDX_VM] = "VM",
622 [Z_SUBMAP_IDX_READ_ONLY] = "RO",
623 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
624 [Z_SUBMAP_IDX_GENERAL_0] = "GEN0",
625 [Z_SUBMAP_IDX_GENERAL_1] = "GEN1",
626 [Z_SUBMAP_IDX_GENERAL_2] = "GEN2",
627 [Z_SUBMAP_IDX_GENERAL_3] = "GEN3",
628 #else
629 [Z_SUBMAP_IDX_GENERAL_0] = "GEN",
630 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
631 [Z_SUBMAP_IDX_DATA] = "DATA",
632 };
633
634 #if __x86_64__
635 #define ZONE_ENTROPY_CNT 8
636 #else
637 #define ZONE_ENTROPY_CNT 2
638 #endif
639 static struct zone_bool_gen {
640 struct bool_gen zbg_bg;
641 uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
642 } zone_bool_gen[MAX_CPUS];
643
644 #if CONFIG_PROB_GZALLOC
645 /*
646 * Probabilistic gzalloc
647 * =====================
648 *
649 *
650 * Probabilistic guard zalloc samples allocations and will protect them by
651 * double-mapping the page holding them and returning the secondary virtual
652 * address to its callers.
653 *
654 * Its data structures are lazily allocated if the `pgz` or `pgz1` boot-args
655 * are set.
656 *
657 *
658 * Unlike GZalloc, PGZ uses a fixed amount of memory, and is compatible with
659 * most zalloc/kalloc features:
660 * - zone_require is functional
661 * - zone caching or zone tagging is compatible
662 * - non-blocking allocation work (they will always return NULL with gzalloc).
663 *
664 * PGZ limitations:
665 * - VA sequestering isn't respected, as the slots (which are in limited
666 * quantity) will be reused for any type, however the PGZ quarantine
667 * somewhat mitigates the impact.
668 * - zones with elements larger than a page cannot be protected.
669 *
670 *
671 * Tunables:
672 * --------
673 *
674 * pgz=1:
675 * Turn on probabilistic guard malloc for all zones
676 *
677 * (default on for DEVELOPMENT, off for RELEASE, or if pgz1... are specified)
678 *
679 * pgz_sample_rate=0 to 2^31
680 * average sample rate between two guarded allocations.
681 * 0 means every allocation.
682 *
683 * The default is a random number between 1000 and 10,000
684 *
685 * pgz_slots
686 * how many allocations to protect.
687 *
688 * Each costs:
689 * - a PTE in the pmap (when allocated)
690 * - 2 zone page meta's (every other page is a "guard" one, 32B total)
691 * - 64 bytes per backtraces.
692 * On LP64 this is <16K per 100 slots.
693 *
694 * The default is ~200 slots per G of physical ram (32k / G)
695 *
696 * TODO:
697 * - try harder to allocate elements at the "end" to catch OOB more reliably.
698 *
699 * pgz_quarantine
700 * how many slots should be free at any given time.
701 *
702 * PGZ will round robin through free slots to be reused, but free slots are
703 * important to detect use-after-free by acting as a quarantine.
704 *
705 * By default, PGZ will keep 33% of the slots around at all time.
706 *
707 * pgz1=<name>, pgz2=<name>, ..., pgzn=<name>...
708 * Specific zones for which to enable probabilistic guard malloc.
709 * There must be no numbering gap (names after the gap will be ignored).
710 */
711 #if DEBUG || DEVELOPMENT
712 static TUNABLE(bool, pgz_all, "pgz", true);
713 #else
714 static TUNABLE(bool, pgz_all, "pgz", false);
715 #endif
716 static TUNABLE(uint32_t, pgz_sample_rate, "pgz_sample_rate", 0);
717 static TUNABLE(uint32_t, pgz_slots, "pgz_slots", UINT32_MAX);
718 static TUNABLE(uint32_t, pgz_quarantine, "pgz_quarantine", 0);
719 #endif /* CONFIG_PROB_GZALLOC */
720
721 static zone_t zone_find_largest(uint64_t *zone_size);
722
723 #endif /* !ZALLOC_TEST */
724 #pragma mark Zone metadata
725 #if !ZALLOC_TEST
726
727 static inline bool
zone_has_index(zone_t z,zone_id_t zid)728 zone_has_index(zone_t z, zone_id_t zid)
729 {
730 return zone_array + zid == z;
731 }
732
733 __abortlike
734 void
zone_invalid_panic(zone_t zone)735 zone_invalid_panic(zone_t zone)
736 {
737 panic("zone %p isn't in the zone_array", zone);
738 }
739
740 __abortlike
741 static void
zone_metadata_corruption(zone_t zone,struct zone_page_metadata * meta,const char * kind)742 zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta,
743 const char *kind)
744 {
745 panic("zone metadata corruption: %s (meta %p, zone %s%s)",
746 kind, meta, zone_heap_name(zone), zone->z_name);
747 }
748
749 __abortlike
750 static void
zone_invalid_element_addr_panic(zone_t zone,vm_offset_t addr)751 zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr)
752 {
753 panic("zone element pointer validation failed (addr: %p, zone %s%s)",
754 (void *)addr, zone_heap_name(zone), zone->z_name);
755 }
756
757 __abortlike
758 static void
zone_page_metadata_index_confusion_panic(zone_t zone,vm_offset_t addr,struct zone_page_metadata * meta)759 zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr,
760 struct zone_page_metadata *meta)
761 {
762 zone_security_flags_t zsflags = zone_security_config(zone), src_zsflags;
763 zone_id_t zidx;
764 zone_t src_zone;
765
766 if (zsflags.z_kalloc_type) {
767 panic_include_kalloc_types = true;
768 kalloc_type_dst_zone = zone;
769 }
770
771 zidx = meta->zm_index;
772 if (zidx >= os_atomic_load(&num_zones, relaxed)) {
773 panic("%p expected in zone %s%s[%d], but metadata has invalid zidx: %d",
774 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
775 zidx);
776 }
777
778 src_zone = &zone_array[zidx];
779 src_zsflags = zone_security_array[zidx];
780 if (src_zsflags.z_kalloc_type) {
781 panic_include_kalloc_types = true;
782 kalloc_type_src_zone = src_zone;
783 }
784
785 panic("%p not in the expected zone %s%s[%d], but found in %s%s[%d]",
786 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
787 zone_heap_name(src_zone), src_zone->z_name, zidx);
788 }
789
790 __abortlike
791 static void
zone_page_metadata_list_corruption(zone_t zone,struct zone_page_metadata * meta)792 zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta)
793 {
794 panic("metadata list corruption through element %p detected in zone %s%s",
795 meta, zone_heap_name(zone), zone->z_name);
796 }
797
798 __abortlike
799 static void
zone_page_meta_accounting_panic(zone_t zone,struct zone_page_metadata * meta,const char * kind)800 zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta,
801 const char *kind)
802 {
803 panic("accounting mismatch (%s) for zone %s%s, meta %p", kind,
804 zone_heap_name(zone), zone->z_name, meta);
805 }
806
807 __abortlike
808 static void
zone_meta_double_free_panic(zone_t zone,vm_offset_t addr,const char * caller)809 zone_meta_double_free_panic(zone_t zone, vm_offset_t addr, const char *caller)
810 {
811 panic("%s: double free of %p to zone %s%s", caller,
812 (void *)addr, zone_heap_name(zone), zone->z_name);
813 }
814
815 __abortlike
816 static void
zone_accounting_panic(zone_t zone,const char * kind)817 zone_accounting_panic(zone_t zone, const char *kind)
818 {
819 panic("accounting mismatch (%s) for zone %s%s", kind,
820 zone_heap_name(zone), zone->z_name);
821 }
822
823 #define zone_counter_sub(z, stat, value) ({ \
824 if (os_sub_overflow((z)->stat, value, &(z)->stat)) { \
825 zone_accounting_panic(z, #stat " wrap-around"); \
826 } \
827 (z)->stat; \
828 })
829
830 static inline uint16_t
zone_meta_alloc_size_add(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)831 zone_meta_alloc_size_add(zone_t z, struct zone_page_metadata *m,
832 vm_offset_t esize)
833 {
834 if (os_add_overflow(m->zm_alloc_size, (uint16_t)esize, &m->zm_alloc_size)) {
835 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
836 }
837 return m->zm_alloc_size;
838 }
839
840 static inline uint16_t
zone_meta_alloc_size_sub(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)841 zone_meta_alloc_size_sub(zone_t z, struct zone_page_metadata *m,
842 vm_offset_t esize)
843 {
844 if (os_sub_overflow(m->zm_alloc_size, esize, &m->zm_alloc_size)) {
845 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
846 }
847 return m->zm_alloc_size;
848 }
849
850 __abortlike
851 static void
zone_nofail_panic(zone_t zone)852 zone_nofail_panic(zone_t zone)
853 {
854 panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)",
855 zone_heap_name(zone), zone->z_name);
856 }
857
858 __header_always_inline bool
zone_spans_ro_va(vm_offset_t addr_start,vm_offset_t addr_end)859 zone_spans_ro_va(vm_offset_t addr_start, vm_offset_t addr_end)
860 {
861 const struct mach_vm_range *ro_r = &zone_info.zi_ro_range;
862 struct mach_vm_range r = { addr_start, addr_end };
863
864 return mach_vm_range_intersects(ro_r, &r);
865 }
866
867 #define from_range(r, addr, size) \
868 __builtin_choose_expr(__builtin_constant_p(size) ? (size) == 1 : 0, \
869 mach_vm_range_contains(r, (mach_vm_offset_t)(addr)), \
870 mach_vm_range_contains(r, (mach_vm_offset_t)(addr), size))
871
872 #define from_ro_map(addr, size) \
873 from_range(&zone_info.zi_ro_range, addr, size)
874
875 #define from_zone_map(addr, size) \
876 from_range(&zone_info.zi_map_range, addr, size)
877
878 __header_always_inline bool
zone_pva_is_null(zone_pva_t page)879 zone_pva_is_null(zone_pva_t page)
880 {
881 return page.packed_address == 0;
882 }
883
884 __header_always_inline bool
zone_pva_is_queue(zone_pva_t page)885 zone_pva_is_queue(zone_pva_t page)
886 {
887 // actual kernel pages have the top bit set
888 return (int32_t)page.packed_address > 0;
889 }
890
891 __header_always_inline bool
zone_pva_is_equal(zone_pva_t pva1,zone_pva_t pva2)892 zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2)
893 {
894 return pva1.packed_address == pva2.packed_address;
895 }
896
897 __header_always_inline zone_pva_t *
zone_pageq_base(void)898 zone_pageq_base(void)
899 {
900 extern zone_pva_t data_seg_start[] __SEGMENT_START_SYM("__DATA");
901
902 /*
903 * `-1` so that if the first __DATA variable is a page queue,
904 * it gets a non 0 index
905 */
906 return data_seg_start - 1;
907 }
908
909 __header_always_inline void
zone_queue_set_head(zone_t z,zone_pva_t queue,zone_pva_t oldv,struct zone_page_metadata * meta)910 zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv,
911 struct zone_page_metadata *meta)
912 {
913 zone_pva_t *queue_head = &zone_pageq_base()[queue.packed_address];
914
915 if (!zone_pva_is_equal(*queue_head, oldv)) {
916 zone_page_metadata_list_corruption(z, meta);
917 }
918 *queue_head = meta->zm_page_next;
919 }
920
921 __header_always_inline zone_pva_t
zone_queue_encode(zone_pva_t * headp)922 zone_queue_encode(zone_pva_t *headp)
923 {
924 return (zone_pva_t){ (uint32_t)(headp - zone_pageq_base()) };
925 }
926
927 __header_always_inline zone_pva_t
zone_pva_from_addr(vm_address_t addr)928 zone_pva_from_addr(vm_address_t addr)
929 {
930 // cannot use atop() because we want to maintain the sign bit
931 return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) };
932 }
933
934 __header_always_inline vm_address_t
zone_pva_to_addr(zone_pva_t page)935 zone_pva_to_addr(zone_pva_t page)
936 {
937 // cause sign extension so that we end up with the right address
938 return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT;
939 }
940
941 __header_always_inline struct zone_page_metadata *
zone_pva_to_meta(zone_pva_t page)942 zone_pva_to_meta(zone_pva_t page)
943 {
944 return &zone_info.zi_meta_base[page.packed_address];
945 }
946
947 __header_always_inline zone_pva_t
zone_pva_from_meta(struct zone_page_metadata * meta)948 zone_pva_from_meta(struct zone_page_metadata *meta)
949 {
950 return (zone_pva_t){ (uint32_t)(meta - zone_info.zi_meta_base) };
951 }
952
953 __header_always_inline struct zone_page_metadata *
zone_meta_from_addr(vm_offset_t addr)954 zone_meta_from_addr(vm_offset_t addr)
955 {
956 return zone_pva_to_meta(zone_pva_from_addr(addr));
957 }
958
959 __header_always_inline zone_id_t
zone_index_from_ptr(const void * ptr)960 zone_index_from_ptr(const void *ptr)
961 {
962 return zone_pva_to_meta(zone_pva_from_addr((vm_offset_t)ptr))->zm_index;
963 }
964
965 __header_always_inline vm_offset_t
zone_meta_to_addr(struct zone_page_metadata * meta)966 zone_meta_to_addr(struct zone_page_metadata *meta)
967 {
968 return ptoa((int32_t)(meta - zone_info.zi_meta_base));
969 }
970
971 __attribute__((overloadable))
972 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta,vm_address_t addr)973 zone_meta_validate(zone_t z, struct zone_page_metadata *meta, vm_address_t addr)
974 {
975 if (!zone_has_index(z, meta->zm_index)) {
976 zone_page_metadata_index_confusion_panic(z, addr, meta);
977 }
978 }
979
980 __attribute__((overloadable))
981 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta)982 zone_meta_validate(zone_t z, struct zone_page_metadata *meta)
983 {
984 zone_meta_validate(z, meta, zone_meta_to_addr(meta));
985 }
986
987 __header_always_inline void
zone_meta_queue_push(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)988 zone_meta_queue_push(zone_t z, zone_pva_t *headp,
989 struct zone_page_metadata *meta)
990 {
991 zone_pva_t head = *headp;
992 zone_pva_t queue_pva = zone_queue_encode(headp);
993 struct zone_page_metadata *tmp;
994
995 meta->zm_page_next = head;
996 if (!zone_pva_is_null(head)) {
997 tmp = zone_pva_to_meta(head);
998 if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) {
999 zone_page_metadata_list_corruption(z, meta);
1000 }
1001 tmp->zm_page_prev = zone_pva_from_meta(meta);
1002 }
1003 meta->zm_page_prev = queue_pva;
1004 *headp = zone_pva_from_meta(meta);
1005 }
1006
1007 __header_always_inline struct zone_page_metadata *
zone_meta_queue_pop(zone_t z,zone_pva_t * headp)1008 zone_meta_queue_pop(zone_t z, zone_pva_t *headp)
1009 {
1010 zone_pva_t head = *headp;
1011 struct zone_page_metadata *meta = zone_pva_to_meta(head);
1012 struct zone_page_metadata *tmp;
1013
1014 zone_meta_validate(z, meta);
1015
1016 if (!zone_pva_is_null(meta->zm_page_next)) {
1017 tmp = zone_pva_to_meta(meta->zm_page_next);
1018 if (!zone_pva_is_equal(tmp->zm_page_prev, head)) {
1019 zone_page_metadata_list_corruption(z, meta);
1020 }
1021 tmp->zm_page_prev = meta->zm_page_prev;
1022 }
1023 *headp = meta->zm_page_next;
1024
1025 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1026
1027 return meta;
1028 }
1029
1030 __header_always_inline void
zone_meta_remqueue(zone_t z,struct zone_page_metadata * meta)1031 zone_meta_remqueue(zone_t z, struct zone_page_metadata *meta)
1032 {
1033 zone_pva_t meta_pva = zone_pva_from_meta(meta);
1034 struct zone_page_metadata *tmp;
1035
1036 if (!zone_pva_is_null(meta->zm_page_next)) {
1037 tmp = zone_pva_to_meta(meta->zm_page_next);
1038 if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) {
1039 zone_page_metadata_list_corruption(z, meta);
1040 }
1041 tmp->zm_page_prev = meta->zm_page_prev;
1042 }
1043 if (zone_pva_is_queue(meta->zm_page_prev)) {
1044 zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta);
1045 } else {
1046 tmp = zone_pva_to_meta(meta->zm_page_prev);
1047 if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) {
1048 zone_page_metadata_list_corruption(z, meta);
1049 }
1050 tmp->zm_page_next = meta->zm_page_next;
1051 }
1052
1053 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1054 }
1055
1056 __header_always_inline void
zone_meta_requeue(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1057 zone_meta_requeue(zone_t z, zone_pva_t *headp,
1058 struct zone_page_metadata *meta)
1059 {
1060 zone_meta_remqueue(z, meta);
1061 zone_meta_queue_push(z, headp, meta);
1062 }
1063
1064 /* prevents a given metadata from ever reaching the z_pageq_empty queue */
1065 static inline void
zone_meta_lock_in_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1066 zone_meta_lock_in_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1067 {
1068 uint16_t new_size = zone_meta_alloc_size_add(z, m, ZM_ALLOC_SIZE_LOCK);
1069
1070 assert(new_size % sizeof(vm_offset_t) == ZM_ALLOC_SIZE_LOCK);
1071 if (new_size == ZM_ALLOC_SIZE_LOCK) {
1072 zone_meta_requeue(z, &z->z_pageq_partial, m);
1073 zone_counter_sub(z, z_wired_empty, len);
1074 }
1075 }
1076
1077 /* allows a given metadata to reach the z_pageq_empty queue again */
1078 static inline void
zone_meta_unlock_from_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1079 zone_meta_unlock_from_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1080 {
1081 uint16_t new_size = zone_meta_alloc_size_sub(z, m, ZM_ALLOC_SIZE_LOCK);
1082
1083 assert(new_size % sizeof(vm_offset_t) == 0);
1084 if (new_size == 0) {
1085 zone_meta_requeue(z, &z->z_pageq_empty, m);
1086 z->z_wired_empty += len;
1087 }
1088 }
1089
1090 /*
1091 * Routine to populate a page backing metadata in the zone_metadata_region.
1092 * Must be called without the zone lock held as it might potentially block.
1093 */
1094 static void
zone_meta_populate(vm_offset_t base,vm_size_t size)1095 zone_meta_populate(vm_offset_t base, vm_size_t size)
1096 {
1097 struct zone_page_metadata *from = zone_meta_from_addr(base);
1098 struct zone_page_metadata *to = from + atop(size);
1099 vm_offset_t page_addr = trunc_page(from);
1100
1101 for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) {
1102 #if !KASAN
1103 /*
1104 * This can race with another thread doing a populate on the same metadata
1105 * page, where we see an updated pmap but unmapped KASan shadow, causing a
1106 * fault in the shadow when we first access the metadata page. Avoid this
1107 * by always synchronizing on the zone_metadata_region lock with KASan.
1108 */
1109 if (pmap_find_phys(kernel_pmap, page_addr)) {
1110 continue;
1111 }
1112 #endif
1113
1114 for (;;) {
1115 kern_return_t ret = KERN_SUCCESS;
1116
1117 /*
1118 * All updates to the zone_metadata_region are done
1119 * under the zone_metadata_region_lck
1120 */
1121 zone_meta_lock();
1122 if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
1123 ret = kernel_memory_populate(page_addr,
1124 PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1125 VM_KERN_MEMORY_OSFMK);
1126 }
1127 zone_meta_unlock();
1128
1129 if (ret == KERN_SUCCESS) {
1130 break;
1131 }
1132
1133 /*
1134 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
1135 * to bad system deadlocks, so if the allocation failed,
1136 * we need to do the VM_PAGE_WAIT() outside of the lock.
1137 */
1138 VM_PAGE_WAIT();
1139 }
1140 }
1141 }
1142
1143 __abortlike
1144 static void
zone_invalid_element_panic(zone_t zone,vm_offset_t addr)1145 zone_invalid_element_panic(zone_t zone, vm_offset_t addr)
1146 {
1147 struct zone_page_metadata *meta;
1148 const char *from_cache = "";
1149 vm_offset_t page;
1150
1151 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1152 panic("addr %p being freed to zone %s%s%s, isn't from zone map",
1153 (void *)addr, zone_heap_name(zone), zone->z_name, from_cache);
1154 }
1155 page = trunc_page(addr);
1156 meta = zone_meta_from_addr(addr);
1157
1158 if (!zone_has_index(zone, meta->zm_index)) {
1159 zone_page_metadata_index_confusion_panic(zone, addr, meta);
1160 }
1161
1162 if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1163 panic("metadata %p corresponding to addr %p being freed to "
1164 "zone %s%s%s, is marked as secondary per cpu page",
1165 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1166 from_cache);
1167 }
1168 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1169 page -= ptoa(meta->zm_page_index);
1170 meta -= meta->zm_page_index;
1171 }
1172
1173 if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1174 panic("metadata %p corresponding to addr %p being freed to "
1175 "zone %s%s%s, has chunk len greater than max",
1176 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1177 from_cache);
1178 }
1179
1180 if ((addr - zone_elem_inner_offs(zone) - page) % zone_elem_outer_size(zone)) {
1181 panic("addr %p being freed to zone %s%s%s, isn't aligned to "
1182 "zone element size", (void *)addr, zone_heap_name(zone),
1183 zone->z_name, from_cache);
1184 }
1185
1186 zone_invalid_element_addr_panic(zone, addr);
1187 }
1188
1189 __attribute__((always_inline))
1190 static struct zone_page_metadata *
zone_element_resolve(zone_t zone,vm_offset_t addr,vm_offset_t * idx)1191 zone_element_resolve(
1192 zone_t zone,
1193 vm_offset_t addr,
1194 vm_offset_t *idx)
1195 {
1196 struct zone_page_metadata *meta;
1197 vm_offset_t offs, eidx;
1198
1199 meta = zone_meta_from_addr(addr);
1200 if (!from_zone_map(addr, 1) || !zone_has_index(zone, meta->zm_index)) {
1201 zone_invalid_element_panic(zone, addr);
1202 }
1203
1204 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1205 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1206 offs += ptoa(meta->zm_page_index);
1207 meta -= meta->zm_page_index;
1208 }
1209
1210 eidx = Z_FAST_QUO(offs, zone->z_quo_magic);
1211 if (eidx * zone_elem_outer_size(zone) != offs) {
1212 zone_invalid_element_panic(zone, addr);
1213 }
1214
1215 *idx = eidx;
1216 return meta;
1217 }
1218
1219 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1220 void *
zone_element_pgz_oob_adjust(void * ptr,vm_size_t req_size,vm_size_t elem_size)1221 zone_element_pgz_oob_adjust(void *ptr, vm_size_t req_size, vm_size_t elem_size)
1222 {
1223 vm_offset_t addr = (vm_offset_t)ptr;
1224 vm_offset_t end = addr + elem_size;
1225 vm_offset_t offs;
1226
1227 /*
1228 * 0-sized allocations in a KALLOC_MINSIZE bucket
1229 * would be offset to the next allocation which is incorrect.
1230 */
1231 req_size = MAX(roundup(req_size, KALLOC_MINALIGN), KALLOC_MINALIGN);
1232
1233 /*
1234 * Given how chunks work, for a zone with PGZ guards on,
1235 * there's a single element which ends precisely
1236 * at the page boundary: the last one.
1237 */
1238 if (req_size == elem_size ||
1239 (end & PAGE_MASK) ||
1240 !zone_meta_from_addr(addr)->zm_guarded) {
1241 return ptr;
1242 }
1243
1244 offs = elem_size - req_size;
1245 zone_meta_from_addr(end)->zm_oob_offs = (uint16_t)offs;
1246
1247 return (char *)addr + offs;
1248 }
1249 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1250
1251 __abortlike
1252 static void
zone_element_bounds_check_panic(vm_address_t addr,vm_size_t len)1253 zone_element_bounds_check_panic(vm_address_t addr, vm_size_t len)
1254 {
1255 struct zone_page_metadata *meta;
1256 vm_offset_t offs, size, page;
1257 zone_t zone;
1258
1259 page = trunc_page(addr);
1260 meta = zone_meta_from_addr(addr);
1261 zone = &zone_array[meta->zm_index];
1262
1263 if (zone->z_percpu) {
1264 panic("zone bound checks: address %p is a per-cpu allocation",
1265 (void *)addr);
1266 }
1267
1268 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1269 page -= ptoa(meta->zm_page_index);
1270 meta -= meta->zm_page_index;
1271 }
1272
1273 size = zone_elem_outer_size(zone);
1274 offs = Z_FAST_MOD(addr - zone_elem_inner_offs(zone) - page + size,
1275 zone->z_quo_magic, size);
1276 panic("zone bound checks: buffer %p of length %zd overflows "
1277 "object %p of size %zd in zone %p[%s%s]",
1278 (void *)addr, len, (void *)(addr - offs - zone_elem_redzone(zone)),
1279 zone_elem_inner_size(zone), zone, zone_heap_name(zone), zone_name(zone));
1280 }
1281
1282 void
zone_element_bounds_check(vm_address_t addr,vm_size_t len)1283 zone_element_bounds_check(vm_address_t addr, vm_size_t len)
1284 {
1285 struct zone_page_metadata *meta;
1286 vm_offset_t offs, size;
1287 zone_t zone;
1288
1289 if (!from_zone_map(addr, 1)) {
1290 return;
1291 }
1292
1293 #if CONFIG_PROB_GZALLOC
1294 if (__improbable(pgz_owned(addr))) {
1295 meta = zone_meta_from_addr(addr);
1296 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
1297 }
1298 #endif /* CONFIG_PROB_GZALLOC */
1299 meta = zone_meta_from_addr(addr);
1300 zone = zone_by_id(meta->zm_index);
1301
1302 if (zone->z_percpu) {
1303 zone_element_bounds_check_panic(addr, len);
1304 }
1305
1306 if (zone->z_permanent) {
1307 /* We don't know bounds for those */
1308 return;
1309 }
1310
1311 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1312 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1313 offs += ptoa(meta->zm_page_index);
1314 }
1315 size = zone_elem_outer_size(zone);
1316 offs = Z_FAST_MOD(offs + size, zone->z_quo_magic, size);
1317 if (len + zone_elem_redzone(zone) > size - offs) {
1318 zone_element_bounds_check_panic(addr, len);
1319 }
1320 }
1321
1322 /*
1323 * Routine to get the size of a zone allocated address.
1324 * If the address doesnt belong to the zone maps, returns 0.
1325 */
1326 vm_size_t
zone_element_size(void * elem,zone_t * z,bool clear_oob,vm_offset_t * oob_offs)1327 zone_element_size(void *elem, zone_t *z, bool clear_oob, vm_offset_t *oob_offs)
1328 {
1329 vm_address_t addr = (vm_address_t)elem;
1330 struct zone_page_metadata *meta;
1331 vm_size_t esize, offs, end;
1332 zone_t zone;
1333
1334 if (from_zone_map(addr, sizeof(void *))) {
1335 meta = zone_meta_from_addr(addr);
1336 zone = zone_by_id(meta->zm_index);
1337 esize = zone_elem_inner_size(zone);
1338 end = addr + esize;
1339 offs = 0;
1340
1341 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1342 /*
1343 * If the chunk uses guards, and that (addr + esize)
1344 * either crosses a page boundary or is at the boundary,
1345 * we need to look harder.
1346 */
1347 if (oob_offs && meta->zm_guarded && atop(addr ^ end)) {
1348 /*
1349 * Because in the vast majority of cases the element
1350 * size is sub-page, and that meta[1] must be faulted,
1351 * we can quickly peek at whether it's a guard.
1352 *
1353 * For elements larger than a page, finding the guard
1354 * page requires a little more effort.
1355 */
1356 if (meta[1].zm_chunk_len == ZM_PGZ_GUARD) {
1357 offs = meta[1].zm_oob_offs;
1358 if (clear_oob) {
1359 meta[1].zm_oob_offs = 0;
1360 }
1361 } else if (esize > PAGE_SIZE) {
1362 struct zone_page_metadata *gmeta;
1363
1364 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1365 gmeta = meta + meta->zm_subchunk_len;
1366 } else {
1367 gmeta = meta + zone->z_chunk_pages;
1368 }
1369 assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
1370
1371 if (end >= zone_meta_to_addr(gmeta)) {
1372 offs = gmeta->zm_oob_offs;
1373 if (clear_oob) {
1374 gmeta->zm_oob_offs = 0;
1375 }
1376 }
1377 }
1378 }
1379 #else
1380 #pragma unused(end, clear_oob)
1381 #endif /* ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1382
1383 if (oob_offs) {
1384 *oob_offs = offs;
1385 }
1386 if (z) {
1387 *z = zone;
1388 }
1389 return esize;
1390 }
1391
1392 if (oob_offs) {
1393 *oob_offs = 0;
1394 }
1395
1396 return 0;
1397 }
1398
1399 zone_id_t
zone_id_for_element(void * addr,vm_size_t esize)1400 zone_id_for_element(void *addr, vm_size_t esize)
1401 {
1402 zone_id_t zid = ZONE_ID_INVALID;
1403 if (from_zone_map(addr, esize)) {
1404 zid = zone_index_from_ptr(addr);
1405 __builtin_assume(zid != ZONE_ID_INVALID);
1406 }
1407 return zid;
1408 }
1409
1410 /* This function just formats the reason for the panics by redoing the checks */
1411 __abortlike
1412 static void
zone_require_panic(zone_t zone,void * addr)1413 zone_require_panic(zone_t zone, void *addr)
1414 {
1415 uint32_t zindex;
1416 zone_t other;
1417
1418 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1419 panic("zone_require failed: address not in a zone (addr: %p)", addr);
1420 }
1421
1422 zindex = zone_index_from_ptr(addr);
1423 other = &zone_array[zindex];
1424 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
1425 panic("zone_require failed: invalid zone index %d "
1426 "(addr: %p, expected: %s%s)", zindex,
1427 addr, zone_heap_name(zone), zone->z_name);
1428 } else {
1429 panic("zone_require failed: address in unexpected zone id %d (%s%s) "
1430 "(addr: %p, expected: %s%s)",
1431 zindex, zone_heap_name(other), other->z_name,
1432 addr, zone_heap_name(zone), zone->z_name);
1433 }
1434 }
1435
1436 __abortlike
1437 static void
zone_id_require_panic(zone_id_t zid,void * addr)1438 zone_id_require_panic(zone_id_t zid, void *addr)
1439 {
1440 zone_require_panic(&zone_array[zid], addr);
1441 }
1442
1443 /*
1444 * Routines to panic if a pointer is not mapped to an expected zone.
1445 * This can be used as a means of pinning an object to the zone it is expected
1446 * to be a part of. Causes a panic if the address does not belong to any
1447 * specified zone, does not belong to any zone, has been freed and therefore
1448 * unmapped from the zone, or the pointer contains an uninitialized value that
1449 * does not belong to any zone.
1450 */
1451 void
zone_require(zone_t zone,void * addr)1452 zone_require(zone_t zone, void *addr)
1453 {
1454 vm_size_t esize = zone_elem_inner_size(zone);
1455
1456 if (from_zone_map(addr, esize) &&
1457 zone_has_index(zone, zone_index_from_ptr(addr))) {
1458 return;
1459 }
1460 zone_require_panic(zone, addr);
1461 }
1462
1463 void
zone_id_require(zone_id_t zid,vm_size_t esize,void * addr)1464 zone_id_require(zone_id_t zid, vm_size_t esize, void *addr)
1465 {
1466 if (from_zone_map(addr, esize) && zid == zone_index_from_ptr(addr)) {
1467 return;
1468 }
1469 zone_id_require_panic(zid, addr);
1470 }
1471
1472 bool
zone_owns(zone_t zone,void * addr)1473 zone_owns(zone_t zone, void *addr)
1474 {
1475 vm_size_t esize = zone_elem_inner_size(zone);
1476
1477 if (from_zone_map(addr, esize)) {
1478 return zone_has_index(zone, zone_index_from_ptr(addr));
1479 }
1480 return false;
1481 }
1482
1483 static inline struct mach_vm_range
zone_kmem_suballoc(mach_vm_offset_t addr,vm_size_t size,int flags,vm_tag_t tag,vm_map_t * new_map)1484 zone_kmem_suballoc(
1485 mach_vm_offset_t addr,
1486 vm_size_t size,
1487 int flags,
1488 vm_tag_t tag,
1489 vm_map_t *new_map)
1490 {
1491 struct mach_vm_range r;
1492
1493 *new_map = kmem_suballoc(kernel_map, &addr, size,
1494 VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
1495 flags, KMS_PERMANENT | KMS_NOFAIL, tag).kmr_submap;
1496
1497 r.min_address = addr;
1498 r.max_address = addr + size;
1499 return r;
1500 }
1501
1502 #endif /* !ZALLOC_TEST */
1503 #pragma mark Zone bits allocator
1504
1505 /*!
1506 * @defgroup Zone Bitmap allocator
1507 * @{
1508 *
1509 * @brief
1510 * Functions implementing the zone bitmap allocator
1511 *
1512 * @discussion
1513 * The zone allocator maintains which elements are allocated or free in bitmaps.
1514 *
1515 * When the number of elements per page is smaller than 32, it is stored inline
1516 * on the @c zone_page_metadata structure (@c zm_inline_bitmap is set,
1517 * and @c zm_bitmap used for storage).
1518 *
1519 * When the number of elements is larger, then a bitmap is allocated from
1520 * a buddy allocator (impelemented under the @c zba_* namespace). Pointers
1521 * to bitmaps are implemented as a packed 32 bit bitmap reference, stored in
1522 * @c zm_bitmap. The low 3 bits encode the scale (order) of the allocation in
1523 * @c ZBA_GRANULE units, and hence actual allocations encoded with that scheme
1524 * cannot be larger than 1024 bytes (8192 bits).
1525 *
1526 * This buddy allocator can actually accomodate allocations as large
1527 * as 8k on 16k systems and 2k on 4k systems.
1528 *
1529 * Note: @c zba_* functions are implementation details not meant to be used
1530 * outside of the allocation of the allocator itself. Interfaces to the rest of
1531 * the zone allocator are documented and not @c zba_* prefixed.
1532 */
1533
1534 #define ZBA_CHUNK_SIZE PAGE_MAX_SIZE
1535 #define ZBA_GRANULE sizeof(uint64_t)
1536 #define ZBA_GRANULE_BITS (8 * sizeof(uint64_t))
1537 #define ZBA_MAX_ORDER (PAGE_MAX_SHIFT - 4)
1538 #define ZBA_MAX_ALLOC_ORDER 7
1539 #define ZBA_SLOTS (ZBA_CHUNK_SIZE / ZBA_GRANULE)
1540 #define ZBA_HEADS_COUNT (ZBA_MAX_ORDER + 1)
1541 #define ZBA_PTR_MASK 0x0fffffff
1542 #define ZBA_ORDER_SHIFT 29
1543 #define ZBA_HAS_EXTRA_BIT 0x10000000
1544
1545 static_assert(2ul * ZBA_GRANULE << ZBA_MAX_ORDER == ZBA_CHUNK_SIZE, "chunk sizes");
1546 static_assert(ZBA_MAX_ALLOC_ORDER <= ZBA_MAX_ORDER, "ZBA_MAX_ORDER is enough");
1547
1548 struct zone_bits_chain {
1549 uint32_t zbc_next;
1550 uint32_t zbc_prev;
1551 } __attribute__((aligned(ZBA_GRANULE)));
1552
1553 struct zone_bits_head {
1554 uint32_t zbh_next;
1555 uint32_t zbh_unused;
1556 } __attribute__((aligned(ZBA_GRANULE)));
1557
1558 static_assert(sizeof(struct zone_bits_chain) == ZBA_GRANULE, "zbc size");
1559 static_assert(sizeof(struct zone_bits_head) == ZBA_GRANULE, "zbh size");
1560
1561 struct zone_bits_allocator_meta {
1562 uint32_t zbam_left;
1563 uint32_t zbam_right;
1564 struct zone_bits_head zbam_lists[ZBA_HEADS_COUNT];
1565 struct zone_bits_head zbam_lists_with_extra[ZBA_HEADS_COUNT];
1566 };
1567
1568 struct zone_bits_allocator_header {
1569 uint64_t zbah_bits[ZBA_SLOTS / (8 * sizeof(uint64_t))];
1570 };
1571
1572 #if ZALLOC_TEST
1573 static struct zalloc_bits_allocator_test_setup {
1574 vm_offset_t zbats_base;
1575 void (*zbats_populate)(vm_address_t addr, vm_size_t size);
1576 } zba_test_info;
1577
1578 static struct zone_bits_allocator_header *
zba_base_header(void)1579 zba_base_header(void)
1580 {
1581 return (struct zone_bits_allocator_header *)zba_test_info.zbats_base;
1582 }
1583
1584 static kern_return_t
zba_populate(uint32_t n,bool with_extra __unused)1585 zba_populate(uint32_t n, bool with_extra __unused)
1586 {
1587 vm_address_t base = zba_test_info.zbats_base;
1588 zba_test_info.zbats_populate(base + n * ZBA_CHUNK_SIZE, ZBA_CHUNK_SIZE);
1589
1590 return KERN_SUCCESS;
1591 }
1592 #else
1593 __startup_data __attribute__((aligned(ZBA_CHUNK_SIZE)))
1594 static uint8_t zba_chunk_startup[ZBA_CHUNK_SIZE];
1595
1596 static SECURITY_READ_ONLY_LATE(uint8_t) zba_xtra_shift;
1597 static LCK_MTX_DECLARE(zba_mtx, &zone_locks_grp);
1598
1599 static struct zone_bits_allocator_header *
zba_base_header(void)1600 zba_base_header(void)
1601 {
1602 return (struct zone_bits_allocator_header *)zone_info.zi_bits_range.min_address;
1603 }
1604
1605 static void
zba_lock(void)1606 zba_lock(void)
1607 {
1608 lck_mtx_lock(&zba_mtx);
1609 }
1610
1611 static void
zba_unlock(void)1612 zba_unlock(void)
1613 {
1614 lck_mtx_unlock(&zba_mtx);
1615 }
1616
1617 __abortlike
1618 static void
zba_memory_exhausted(void)1619 zba_memory_exhausted(void)
1620 {
1621 uint64_t zsize = 0;
1622 zone_t z = zone_find_largest(&zsize);
1623 panic("zba_populate: out of bitmap space, "
1624 "likely due to memory leak in zone [%s%s] "
1625 "(%u%c, %d elements allocated)",
1626 zone_heap_name(z), zone_name(z),
1627 mach_vm_size_pretty(zsize), mach_vm_size_unit(zsize),
1628 zone_count_allocated(z));
1629 }
1630
1631
1632 static kern_return_t
zba_populate(uint32_t n,bool with_extra)1633 zba_populate(uint32_t n, bool with_extra)
1634 {
1635 vm_size_t bits_size = ZBA_CHUNK_SIZE;
1636 vm_size_t xtra_size = bits_size * CHAR_BIT << zba_xtra_shift;
1637 vm_address_t bits_addr;
1638 vm_address_t xtra_addr;
1639 kern_return_t kr;
1640
1641 bits_addr = zone_info.zi_bits_range.min_address + n * bits_size;
1642 xtra_addr = zone_info.zi_xtra_range.min_address + n * xtra_size;
1643
1644 kr = kernel_memory_populate(bits_addr, bits_size,
1645 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1646 VM_KERN_MEMORY_OSFMK);
1647 if (kr != KERN_SUCCESS) {
1648 return kr;
1649 }
1650
1651
1652 if (with_extra) {
1653 kr = kernel_memory_populate(xtra_addr, xtra_size,
1654 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1655 VM_KERN_MEMORY_OSFMK);
1656 if (kr != KERN_SUCCESS) {
1657 kernel_memory_depopulate(bits_addr, bits_size,
1658 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1659 VM_KERN_MEMORY_OSFMK);
1660 }
1661 }
1662
1663 return kr;
1664 }
1665 #endif
1666
1667 __pure2
1668 static struct zone_bits_allocator_meta *
zba_meta(void)1669 zba_meta(void)
1670 {
1671 return (struct zone_bits_allocator_meta *)&zba_base_header()[1];
1672 }
1673
1674 __pure2
1675 static uint64_t *
zba_slot_base(void)1676 zba_slot_base(void)
1677 {
1678 return (uint64_t *)zba_base_header();
1679 }
1680
1681 __pure2
1682 static struct zone_bits_head *
zba_head(uint32_t order,bool with_extra)1683 zba_head(uint32_t order, bool with_extra)
1684 {
1685 if (with_extra) {
1686 return &zba_meta()->zbam_lists_with_extra[order];
1687 } else {
1688 return &zba_meta()->zbam_lists[order];
1689 }
1690 }
1691
1692 __pure2
1693 static uint32_t
zba_head_index(struct zone_bits_head * hd)1694 zba_head_index(struct zone_bits_head *hd)
1695 {
1696 return (uint32_t)((uint64_t *)hd - zba_slot_base());
1697 }
1698
1699 __pure2
1700 static struct zone_bits_chain *
zba_chain_for_index(uint32_t index)1701 zba_chain_for_index(uint32_t index)
1702 {
1703 return (struct zone_bits_chain *)(zba_slot_base() + index);
1704 }
1705
1706 __pure2
1707 static uint32_t
zba_chain_to_index(const struct zone_bits_chain * zbc)1708 zba_chain_to_index(const struct zone_bits_chain *zbc)
1709 {
1710 return (uint32_t)((const uint64_t *)zbc - zba_slot_base());
1711 }
1712
1713 __abortlike
1714 static void
zba_head_corruption_panic(uint32_t order,bool with_extra)1715 zba_head_corruption_panic(uint32_t order, bool with_extra)
1716 {
1717 panic("zone bits allocator head[%d:%d:%p] is corrupt",
1718 order, with_extra, zba_head(order, with_extra));
1719 }
1720
1721 __abortlike
1722 static void
zba_chain_corruption_panic(struct zone_bits_chain * a,struct zone_bits_chain * b)1723 zba_chain_corruption_panic(struct zone_bits_chain *a, struct zone_bits_chain *b)
1724 {
1725 panic("zone bits allocator freelist is corrupt (%p <-> %p)", a, b);
1726 }
1727
1728 static void
zba_push_block(struct zone_bits_chain * zbc,uint32_t order,bool with_extra)1729 zba_push_block(struct zone_bits_chain *zbc, uint32_t order, bool with_extra)
1730 {
1731 struct zone_bits_head *hd = zba_head(order, with_extra);
1732 uint32_t hd_index = zba_head_index(hd);
1733 uint32_t index = zba_chain_to_index(zbc);
1734 struct zone_bits_chain *next;
1735
1736 if (hd->zbh_next) {
1737 next = zba_chain_for_index(hd->zbh_next);
1738 if (next->zbc_prev != hd_index) {
1739 zba_head_corruption_panic(order, with_extra);
1740 }
1741 next->zbc_prev = index;
1742 }
1743 zbc->zbc_next = hd->zbh_next;
1744 zbc->zbc_prev = hd_index;
1745 hd->zbh_next = index;
1746 }
1747
1748 static void
zba_remove_block(struct zone_bits_chain * zbc)1749 zba_remove_block(struct zone_bits_chain *zbc)
1750 {
1751 struct zone_bits_chain *prev = zba_chain_for_index(zbc->zbc_prev);
1752 uint32_t index = zba_chain_to_index(zbc);
1753
1754 if (prev->zbc_next != index) {
1755 zba_chain_corruption_panic(prev, zbc);
1756 }
1757 if ((prev->zbc_next = zbc->zbc_next)) {
1758 struct zone_bits_chain *next = zba_chain_for_index(zbc->zbc_next);
1759 if (next->zbc_prev != index) {
1760 zba_chain_corruption_panic(zbc, next);
1761 }
1762 next->zbc_prev = zbc->zbc_prev;
1763 }
1764 }
1765
1766 static vm_address_t
zba_try_pop_block(uint32_t order,bool with_extra)1767 zba_try_pop_block(uint32_t order, bool with_extra)
1768 {
1769 struct zone_bits_head *hd = zba_head(order, with_extra);
1770 struct zone_bits_chain *zbc;
1771
1772 if (hd->zbh_next == 0) {
1773 return 0;
1774 }
1775
1776 zbc = zba_chain_for_index(hd->zbh_next);
1777 zba_remove_block(zbc);
1778 return (vm_address_t)zbc;
1779 }
1780
1781 static struct zone_bits_allocator_header *
zba_header(vm_offset_t addr)1782 zba_header(vm_offset_t addr)
1783 {
1784 addr &= -(vm_offset_t)ZBA_CHUNK_SIZE;
1785 return (struct zone_bits_allocator_header *)addr;
1786 }
1787
1788 static size_t
zba_node_parent(size_t node)1789 zba_node_parent(size_t node)
1790 {
1791 return (node - 1) / 2;
1792 }
1793
1794 static size_t
zba_node_left_child(size_t node)1795 zba_node_left_child(size_t node)
1796 {
1797 return node * 2 + 1;
1798 }
1799
1800 static size_t
zba_node_buddy(size_t node)1801 zba_node_buddy(size_t node)
1802 {
1803 return ((node - 1) ^ 1) + 1;
1804 }
1805
1806 static size_t
zba_node(vm_offset_t addr,uint32_t order)1807 zba_node(vm_offset_t addr, uint32_t order)
1808 {
1809 vm_offset_t offs = (addr % ZBA_CHUNK_SIZE) / ZBA_GRANULE;
1810 return (offs >> order) + (1 << (ZBA_MAX_ORDER - order + 1)) - 1;
1811 }
1812
1813 static struct zone_bits_chain *
zba_chain_for_node(struct zone_bits_allocator_header * zbah,size_t node,uint32_t order)1814 zba_chain_for_node(struct zone_bits_allocator_header *zbah, size_t node, uint32_t order)
1815 {
1816 vm_offset_t offs = (node - (1 << (ZBA_MAX_ORDER - order + 1)) + 1) << order;
1817 return (struct zone_bits_chain *)((vm_offset_t)zbah + offs * ZBA_GRANULE);
1818 }
1819
1820 static void
zba_node_flip_split(struct zone_bits_allocator_header * zbah,size_t node)1821 zba_node_flip_split(struct zone_bits_allocator_header *zbah, size_t node)
1822 {
1823 zbah->zbah_bits[node / 64] ^= 1ull << (node % 64);
1824 }
1825
1826 static bool
zba_node_is_split(struct zone_bits_allocator_header * zbah,size_t node)1827 zba_node_is_split(struct zone_bits_allocator_header *zbah, size_t node)
1828 {
1829 return zbah->zbah_bits[node / 64] & (1ull << (node % 64));
1830 }
1831
1832 static void
zba_free(vm_offset_t addr,uint32_t order,bool with_extra)1833 zba_free(vm_offset_t addr, uint32_t order, bool with_extra)
1834 {
1835 struct zone_bits_allocator_header *zbah = zba_header(addr);
1836 struct zone_bits_chain *zbc;
1837 size_t node = zba_node(addr, order);
1838
1839 while (node) {
1840 size_t parent = zba_node_parent(node);
1841
1842 zba_node_flip_split(zbah, parent);
1843 if (zba_node_is_split(zbah, parent)) {
1844 break;
1845 }
1846
1847 zbc = zba_chain_for_node(zbah, zba_node_buddy(node), order);
1848 zba_remove_block(zbc);
1849 order++;
1850 node = parent;
1851 }
1852
1853 zba_push_block(zba_chain_for_node(zbah, node, order), order, with_extra);
1854 }
1855
1856 static vm_size_t
zba_chunk_header_size(uint32_t n)1857 zba_chunk_header_size(uint32_t n)
1858 {
1859 vm_size_t hdr_size = sizeof(struct zone_bits_allocator_header);
1860 if (n == 0) {
1861 hdr_size += sizeof(struct zone_bits_allocator_meta);
1862 }
1863 return hdr_size;
1864 }
1865
1866 static void
zba_init_chunk(uint32_t n,bool with_extra)1867 zba_init_chunk(uint32_t n, bool with_extra)
1868 {
1869 vm_size_t hdr_size = zba_chunk_header_size(n);
1870 vm_offset_t page = (vm_offset_t)zba_base_header() + n * ZBA_CHUNK_SIZE;
1871 struct zone_bits_allocator_header *zbah = zba_header(page);
1872 vm_size_t size = ZBA_CHUNK_SIZE;
1873 size_t node;
1874
1875 for (uint32_t o = ZBA_MAX_ORDER + 1; o-- > 0;) {
1876 if (size < hdr_size + (ZBA_GRANULE << o)) {
1877 continue;
1878 }
1879 size -= ZBA_GRANULE << o;
1880 node = zba_node(page + size, o);
1881 zba_node_flip_split(zbah, zba_node_parent(node));
1882 zba_push_block(zba_chain_for_node(zbah, node, o), o, with_extra);
1883 }
1884 }
1885
1886 __attribute__((noinline))
1887 static void
zba_grow(bool with_extra)1888 zba_grow(bool with_extra)
1889 {
1890 struct zone_bits_allocator_meta *meta = zba_meta();
1891 kern_return_t kr = KERN_SUCCESS;
1892 uint32_t chunk;
1893
1894 #if !ZALLOC_TEST
1895 if (meta->zbam_left >= meta->zbam_right) {
1896 zba_memory_exhausted();
1897 }
1898 #endif
1899
1900 if (with_extra) {
1901 chunk = meta->zbam_right - 1;
1902 } else {
1903 chunk = meta->zbam_left;
1904 }
1905
1906 kr = zba_populate(chunk, with_extra);
1907 if (kr == KERN_SUCCESS) {
1908 if (with_extra) {
1909 meta->zbam_right -= 1;
1910 } else {
1911 meta->zbam_left += 1;
1912 }
1913
1914 zba_init_chunk(chunk, with_extra);
1915 #if !ZALLOC_TEST
1916 } else {
1917 /*
1918 * zba_populate() has to be allowed to fail populating,
1919 * as we are under a global lock, we need to do the
1920 * VM_PAGE_WAIT() outside of the lock.
1921 */
1922 assert(kr == KERN_RESOURCE_SHORTAGE);
1923 zba_unlock();
1924 VM_PAGE_WAIT();
1925 zba_lock();
1926 #endif
1927 }
1928 }
1929
1930 static vm_offset_t
zba_alloc(uint32_t order,bool with_extra)1931 zba_alloc(uint32_t order, bool with_extra)
1932 {
1933 struct zone_bits_allocator_header *zbah;
1934 uint32_t cur = order;
1935 vm_address_t addr;
1936 size_t node;
1937
1938 while ((addr = zba_try_pop_block(cur, with_extra)) == 0) {
1939 if (__improbable(cur++ >= ZBA_MAX_ORDER)) {
1940 zba_grow(with_extra);
1941 cur = order;
1942 }
1943 }
1944
1945 zbah = zba_header(addr);
1946 node = zba_node(addr, cur);
1947 zba_node_flip_split(zbah, zba_node_parent(node));
1948 while (cur > order) {
1949 cur--;
1950 zba_node_flip_split(zbah, node);
1951 node = zba_node_left_child(node);
1952 zba_push_block(zba_chain_for_node(zbah, node + 1, cur),
1953 cur, with_extra);
1954 }
1955
1956 return addr;
1957 }
1958
1959 #define zba_map_index(type, n) (n / (8 * sizeof(type)))
1960 #define zba_map_bit(type, n) ((type)1 << (n % (8 * sizeof(type))))
1961 #define zba_map_mask_lt(type, n) (zba_map_bit(type, n) - 1)
1962 #define zba_map_mask_ge(type, n) ((type)-zba_map_bit(type, n))
1963
1964 #if !ZALLOC_TEST
1965 #if VM_TAG_SIZECLASSES
1966
1967 static void *
zba_extra_ref_ptr(uint32_t bref,vm_offset_t idx)1968 zba_extra_ref_ptr(uint32_t bref, vm_offset_t idx)
1969 {
1970 vm_offset_t base = zone_info.zi_xtra_range.min_address;
1971 vm_offset_t offs = (bref & ZBA_PTR_MASK) * ZBA_GRANULE * CHAR_BIT;
1972
1973 return (void *)(base + ((offs + idx) << zba_xtra_shift));
1974 }
1975
1976 #endif /* VM_TAG_SIZECLASSES */
1977
1978 static uint32_t
zba_bits_ref_order(uint32_t bref)1979 zba_bits_ref_order(uint32_t bref)
1980 {
1981 return bref >> ZBA_ORDER_SHIFT;
1982 }
1983
1984 static bitmap_t *
zba_bits_ref_ptr(uint32_t bref)1985 zba_bits_ref_ptr(uint32_t bref)
1986 {
1987 return zba_slot_base() + (bref & ZBA_PTR_MASK);
1988 }
1989
1990 static vm_offset_t
zba_scan_bitmap_inline(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags,vm_offset_t eidx)1991 zba_scan_bitmap_inline(zone_t zone, struct zone_page_metadata *meta,
1992 zalloc_flags_t flags, vm_offset_t eidx)
1993 {
1994 size_t i = eidx / 32;
1995 uint32_t map;
1996
1997 if (eidx % 32) {
1998 map = meta[i].zm_bitmap & zba_map_mask_ge(uint32_t, eidx);
1999 if (map) {
2000 eidx = __builtin_ctz(map);
2001 meta[i].zm_bitmap ^= 1u << eidx;
2002 return i * 32 + eidx;
2003 }
2004 i++;
2005 }
2006
2007 uint32_t chunk_len = meta->zm_chunk_len;
2008 if (flags & Z_PCPU) {
2009 chunk_len = zpercpu_count();
2010 }
2011 for (int j = 0; j < chunk_len; j++, i++) {
2012 if (i >= chunk_len) {
2013 i = 0;
2014 }
2015 if (__probable(map = meta[i].zm_bitmap)) {
2016 meta[i].zm_bitmap &= map - 1;
2017 return i * 32 + __builtin_ctz(map);
2018 }
2019 }
2020
2021 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2022 }
2023
2024 static vm_offset_t
zba_scan_bitmap_ref(zone_t zone,struct zone_page_metadata * meta,vm_offset_t eidx)2025 zba_scan_bitmap_ref(zone_t zone, struct zone_page_metadata *meta,
2026 vm_offset_t eidx)
2027 {
2028 uint32_t bits_size = 1 << zba_bits_ref_order(meta->zm_bitmap);
2029 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2030 size_t i = eidx / 64;
2031 uint64_t map;
2032
2033 if (eidx % 64) {
2034 map = bits[i] & zba_map_mask_ge(uint64_t, eidx);
2035 if (map) {
2036 eidx = __builtin_ctzll(map);
2037 bits[i] ^= 1ull << eidx;
2038 return i * 64 + eidx;
2039 }
2040 i++;
2041 }
2042
2043 for (int j = 0; j < bits_size; i++, j++) {
2044 if (i >= bits_size) {
2045 i = 0;
2046 }
2047 if (__probable(map = bits[i])) {
2048 bits[i] &= map - 1;
2049 return i * 64 + __builtin_ctzll(map);
2050 }
2051 }
2052
2053 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2054 }
2055
2056 /*!
2057 * @function zone_meta_find_and_clear_bit
2058 *
2059 * @brief
2060 * The core of the bitmap allocator: find a bit set in the bitmaps.
2061 *
2062 * @discussion
2063 * This method will round robin through available allocations,
2064 * with a per-core memory of the last allocated element index allocated.
2065 *
2066 * This is done in order to avoid a fully LIFO behavior which makes exploiting
2067 * double-free bugs way too practical.
2068 *
2069 * @param zone The zone we're allocating from.
2070 * @param meta The main metadata for the chunk being allocated from.
2071 * @param flags the alloc flags (for @c Z_PCPU).
2072 */
2073 static vm_offset_t
zone_meta_find_and_clear_bit(zone_t zone,zone_stats_t zs,struct zone_page_metadata * meta,zalloc_flags_t flags)2074 zone_meta_find_and_clear_bit(
2075 zone_t zone,
2076 zone_stats_t zs,
2077 struct zone_page_metadata *meta,
2078 zalloc_flags_t flags)
2079 {
2080 vm_offset_t eidx = zs->zs_alloc_rr + 1;
2081
2082 if (meta->zm_inline_bitmap) {
2083 eidx = zba_scan_bitmap_inline(zone, meta, flags, eidx);
2084 } else {
2085 eidx = zba_scan_bitmap_ref(zone, meta, eidx);
2086 }
2087 zs->zs_alloc_rr = (uint16_t)eidx;
2088 return eidx;
2089 }
2090
2091 /*!
2092 * @function zone_meta_bits_init_inline
2093 *
2094 * @brief
2095 * Initializes the inline zm_bitmap field(s) for a newly assigned chunk.
2096 *
2097 * @param meta The main metadata for the initialized chunk.
2098 * @param count The number of elements the chunk can hold
2099 * (which might be partial for partially populated chunks).
2100 */
2101 static void
zone_meta_bits_init_inline(struct zone_page_metadata * meta,uint32_t count)2102 zone_meta_bits_init_inline(struct zone_page_metadata *meta, uint32_t count)
2103 {
2104 /*
2105 * We're called with the metadata zm_bitmap fields already zeroed out.
2106 */
2107 for (size_t i = 0; i < count / 32; i++) {
2108 meta[i].zm_bitmap = ~0u;
2109 }
2110 if (count % 32) {
2111 meta[count / 32].zm_bitmap = zba_map_mask_lt(uint32_t, count);
2112 }
2113 }
2114
2115 /*!
2116 * @function zone_meta_bits_alloc_init
2117 *
2118 * @brief
2119 * Allocates a zm_bitmap field for a newly assigned chunk.
2120 *
2121 * @param count The number of elements the chunk can hold
2122 * (which might be partial for partially populated chunks).
2123 * @param nbits The maximum nuber of bits that will be used.
2124 * @param with_extra Whether "VM Tracking" metadata needs to be allocated.
2125 */
2126 static uint32_t
zone_meta_bits_alloc_init(uint32_t count,uint32_t nbits,bool with_extra)2127 zone_meta_bits_alloc_init(uint32_t count, uint32_t nbits, bool with_extra)
2128 {
2129 static_assert(ZONE_MAX_ALLOC_SIZE / ZONE_MIN_ELEM_SIZE <=
2130 ZBA_GRANULE_BITS << ZBA_MAX_ORDER, "bitmaps will be large enough");
2131
2132 uint32_t order = flsll((nbits - 1) / ZBA_GRANULE_BITS);
2133 uint64_t *bits;
2134 size_t i = 0;
2135
2136 assert(order <= ZBA_MAX_ALLOC_ORDER);
2137 assert(count <= ZBA_GRANULE_BITS << order);
2138
2139 zba_lock();
2140 bits = (uint64_t *)zba_alloc(order, with_extra);
2141 zba_unlock();
2142
2143 while (i < count / 64) {
2144 bits[i++] = ~0ull;
2145 }
2146 if (count % 64) {
2147 bits[i++] = zba_map_mask_lt(uint64_t, count);
2148 }
2149 while (i < 1u << order) {
2150 bits[i++] = 0;
2151 }
2152
2153 return (uint32_t)(bits - zba_slot_base()) +
2154 (order << ZBA_ORDER_SHIFT) +
2155 (with_extra ? ZBA_HAS_EXTRA_BIT : 0);
2156 }
2157
2158 /*!
2159 * @function zone_meta_bits_merge
2160 *
2161 * @brief
2162 * Adds elements <code>[start, end)</code> to a chunk being extended.
2163 *
2164 * @param meta The main metadata for the extended chunk.
2165 * @param start The index of the first element to add to the chunk.
2166 * @param end The index of the last (exclusive) element to add.
2167 */
2168 static void
zone_meta_bits_merge(struct zone_page_metadata * meta,uint32_t start,uint32_t end)2169 zone_meta_bits_merge(struct zone_page_metadata *meta,
2170 uint32_t start, uint32_t end)
2171 {
2172 if (meta->zm_inline_bitmap) {
2173 while (start < end) {
2174 size_t s_i = start / 32;
2175 size_t s_e = end / 32;
2176
2177 if (s_i == s_e) {
2178 meta[s_i].zm_bitmap |= zba_map_mask_lt(uint32_t, end) &
2179 zba_map_mask_ge(uint32_t, start);
2180 break;
2181 }
2182
2183 meta[s_i].zm_bitmap |= zba_map_mask_ge(uint32_t, start);
2184 start += 32 - (start % 32);
2185 }
2186 } else {
2187 uint64_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2188
2189 while (start < end) {
2190 size_t s_i = start / 64;
2191 size_t s_e = end / 64;
2192
2193 if (s_i == s_e) {
2194 bits[s_i] |= zba_map_mask_lt(uint64_t, end) &
2195 zba_map_mask_ge(uint64_t, start);
2196 break;
2197 }
2198 bits[s_i] |= zba_map_mask_ge(uint64_t, start);
2199 start += 64 - (start % 64);
2200 }
2201 }
2202 }
2203
2204 /*!
2205 * @function zone_bits_free
2206 *
2207 * @brief
2208 * Frees a bitmap to the zone bitmap allocator.
2209 *
2210 * @param bref
2211 * A bitmap reference set by @c zone_meta_bits_init() in a @c zm_bitmap field.
2212 */
2213 static void
zone_bits_free(uint32_t bref)2214 zone_bits_free(uint32_t bref)
2215 {
2216 zba_lock();
2217 zba_free((vm_offset_t)zba_bits_ref_ptr(bref),
2218 zba_bits_ref_order(bref), (bref & ZBA_HAS_EXTRA_BIT));
2219 zba_unlock();
2220 }
2221
2222 /*!
2223 * @function zone_meta_is_free
2224 *
2225 * @brief
2226 * Returns whether a given element appears free.
2227 */
2228 static bool
zone_meta_is_free(struct zone_page_metadata * meta,vm_offset_t eidx)2229 zone_meta_is_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2230 {
2231 if (meta->zm_inline_bitmap) {
2232 uint32_t bit = zba_map_bit(uint32_t, eidx);
2233 return meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit;
2234 } else {
2235 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2236 uint64_t bit = zba_map_bit(uint64_t, eidx);
2237 return bits[zba_map_index(uint64_t, eidx)] & bit;
2238 }
2239 }
2240
2241 /*!
2242 * @function zone_meta_mark_free
2243 *
2244 * @brief
2245 * Marks an element as free and returns whether it was marked as used.
2246 */
2247 static bool
zone_meta_mark_free(struct zone_page_metadata * meta,vm_offset_t eidx)2248 zone_meta_mark_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2249 {
2250 if (meta->zm_inline_bitmap) {
2251 uint32_t bit = zba_map_bit(uint32_t, eidx);
2252 if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2253 return false;
2254 }
2255 meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2256 } else {
2257 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2258 uint64_t bit = zba_map_bit(uint64_t, eidx);
2259 if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2260 return false;
2261 }
2262 bits[zba_map_index(uint64_t, eidx)] ^= bit;
2263 }
2264 return true;
2265 }
2266
2267 #if VM_TAG_SIZECLASSES
2268
2269 __startup_func
2270 void
__zone_site_register(vm_allocation_site_t * site)2271 __zone_site_register(vm_allocation_site_t *site)
2272 {
2273 if (zone_tagging_on) {
2274 vm_tag_alloc(site);
2275 }
2276 }
2277
2278 uint16_t
zone_index_from_tag_index(uint32_t sizeclass_idx)2279 zone_index_from_tag_index(uint32_t sizeclass_idx)
2280 {
2281 return zone_tags_sizeclasses[sizeclass_idx];
2282 }
2283
2284 #endif /* VM_TAG_SIZECLASSES */
2285 #endif /* !ZALLOC_TEST */
2286 /*! @} */
2287 #pragma mark zalloc helpers
2288 #if !ZALLOC_TEST
2289
2290 static inline void *
zstack_tbi_fix(vm_offset_t elem)2291 zstack_tbi_fix(vm_offset_t elem)
2292 {
2293 #if KASAN_TBI
2294 elem = kasan_tbi_fix_address_tag(elem);
2295 #endif
2296 return (void *)elem;
2297 }
2298
2299 static inline vm_offset_t
zstack_tbi_fill(void * addr)2300 zstack_tbi_fill(void *addr)
2301 {
2302 vm_offset_t elem = (vm_offset_t)addr;
2303
2304 #if KASAN_TBI
2305 elem = VM_KERNEL_TBI_FILL(elem);
2306 #endif
2307 return elem;
2308 }
2309
2310 __attribute__((always_inline))
2311 static inline void
zstack_push_no_delta(zstack_t * stack,void * addr)2312 zstack_push_no_delta(zstack_t *stack, void *addr)
2313 {
2314 vm_offset_t elem = zstack_tbi_fill(addr);
2315
2316 *(vm_offset_t *)addr = stack->z_head - elem;
2317 stack->z_head = elem;
2318 }
2319
2320 __attribute__((always_inline))
2321 void
zstack_push(zstack_t * stack,void * addr)2322 zstack_push(zstack_t *stack, void *addr)
2323 {
2324 zstack_push_no_delta(stack, addr);
2325 stack->z_count++;
2326 }
2327
2328 __attribute__((always_inline))
2329 static inline void *
zstack_pop_no_delta(zstack_t * stack)2330 zstack_pop_no_delta(zstack_t *stack)
2331 {
2332 void *addr = zstack_tbi_fix(stack->z_head);
2333
2334 stack->z_head += *(vm_offset_t *)addr;
2335 *(vm_offset_t *)addr = 0;
2336
2337 return addr;
2338 }
2339
2340 __attribute__((always_inline))
2341 void *
zstack_pop(zstack_t * stack)2342 zstack_pop(zstack_t *stack)
2343 {
2344 stack->z_count--;
2345 return zstack_pop_no_delta(stack);
2346 }
2347
2348 static inline void
zone_recirc_lock_nopreempt_check_contention(zone_t zone)2349 zone_recirc_lock_nopreempt_check_contention(zone_t zone)
2350 {
2351 uint32_t ticket;
2352
2353 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_recirc_lock,
2354 &ticket, &zone_locks_grp))) {
2355 return;
2356 }
2357
2358 hw_lck_ticket_wait(&zone->z_recirc_lock, ticket, NULL, &zone_locks_grp);
2359
2360 /*
2361 * If zone caching has been disabled due to memory pressure,
2362 * then recording contention is not useful, give the system
2363 * time to recover.
2364 */
2365 if (__probable(!zone_caching_disabled)) {
2366 zone->z_recirc_cont_cur++;
2367 }
2368 }
2369
2370 static inline void
zone_recirc_lock_nopreempt(zone_t zone)2371 zone_recirc_lock_nopreempt(zone_t zone)
2372 {
2373 hw_lck_ticket_lock_nopreempt(&zone->z_recirc_lock, &zone_locks_grp);
2374 }
2375
2376 static inline void
zone_recirc_unlock_nopreempt(zone_t zone)2377 zone_recirc_unlock_nopreempt(zone_t zone)
2378 {
2379 hw_lck_ticket_unlock_nopreempt(&zone->z_recirc_lock);
2380 }
2381
2382 static inline void
zone_lock_nopreempt_check_contention(zone_t zone)2383 zone_lock_nopreempt_check_contention(zone_t zone)
2384 {
2385 uint32_t ticket;
2386 #if KASAN_FAKESTACK
2387 spl_t s = 0;
2388 if (zone->z_kasan_fakestacks) {
2389 s = splsched();
2390 }
2391 #endif /* KASAN_FAKESTACK */
2392
2393 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_lock, &ticket,
2394 &zone_locks_grp))) {
2395 #if KASAN_FAKESTACK
2396 zone->z_kasan_spl = s;
2397 #endif /* KASAN_FAKESTACK */
2398 return;
2399 }
2400
2401 hw_lck_ticket_wait(&zone->z_lock, ticket, NULL, &zone_locks_grp);
2402 #if KASAN_FAKESTACK
2403 zone->z_kasan_spl = s;
2404 #endif /* KASAN_FAKESTACK */
2405
2406 /*
2407 * If zone caching has been disabled due to memory pressure,
2408 * then recording contention is not useful, give the system
2409 * time to recover.
2410 */
2411 if (__probable(!zone_caching_disabled && !zone->z_pcpu_cache)) {
2412 zone->z_recirc_cont_cur++;
2413 }
2414 }
2415
2416 static inline void
zone_lock_nopreempt(zone_t zone)2417 zone_lock_nopreempt(zone_t zone)
2418 {
2419 #if KASAN_FAKESTACK
2420 spl_t s = 0;
2421 if (zone->z_kasan_fakestacks) {
2422 s = splsched();
2423 }
2424 #endif /* KASAN_FAKESTACK */
2425 hw_lck_ticket_lock_nopreempt(&zone->z_lock, &zone_locks_grp);
2426 #if KASAN_FAKESTACK
2427 zone->z_kasan_spl = s;
2428 #endif /* KASAN_FAKESTACK */
2429 }
2430
2431 static inline void
zone_unlock_nopreempt(zone_t zone)2432 zone_unlock_nopreempt(zone_t zone)
2433 {
2434 #if KASAN_FAKESTACK
2435 spl_t s = zone->z_kasan_spl;
2436 zone->z_kasan_spl = 0;
2437 #endif /* KASAN_FAKESTACK */
2438 hw_lck_ticket_unlock_nopreempt(&zone->z_lock);
2439 #if KASAN_FAKESTACK
2440 if (zone->z_kasan_fakestacks) {
2441 splx(s);
2442 }
2443 #endif /* KASAN_FAKESTACK */
2444 }
2445
2446 static inline void
zone_depot_lock_nopreempt(zone_cache_t zc)2447 zone_depot_lock_nopreempt(zone_cache_t zc)
2448 {
2449 hw_lck_ticket_lock_nopreempt(&zc->zc_depot_lock, &zone_locks_grp);
2450 }
2451
2452 static inline void
zone_depot_unlock_nopreempt(zone_cache_t zc)2453 zone_depot_unlock_nopreempt(zone_cache_t zc)
2454 {
2455 hw_lck_ticket_unlock_nopreempt(&zc->zc_depot_lock);
2456 }
2457
2458 static inline void
zone_depot_lock(zone_cache_t zc)2459 zone_depot_lock(zone_cache_t zc)
2460 {
2461 hw_lck_ticket_lock(&zc->zc_depot_lock, &zone_locks_grp);
2462 }
2463
2464 static inline void
zone_depot_unlock(zone_cache_t zc)2465 zone_depot_unlock(zone_cache_t zc)
2466 {
2467 hw_lck_ticket_unlock(&zc->zc_depot_lock);
2468 }
2469
2470 zone_t
zone_by_id(size_t zid)2471 zone_by_id(size_t zid)
2472 {
2473 return (zone_t)((uintptr_t)zone_array + zid * sizeof(struct zone));
2474 }
2475
2476 static inline bool
zone_supports_vm(zone_t z)2477 zone_supports_vm(zone_t z)
2478 {
2479 /*
2480 * VM_MAP_ENTRY and VM_MAP_HOLES zones are allowed
2481 * to overcommit because they're used to reclaim memory
2482 * (VM support).
2483 */
2484 return z >= &zone_array[ZONE_ID_VM_MAP_ENTRY] &&
2485 z <= &zone_array[ZONE_ID_VM_MAP_HOLES];
2486 }
2487
2488 const char *
zone_name(zone_t z)2489 zone_name(zone_t z)
2490 {
2491 return z->z_name;
2492 }
2493
2494 const char *
zone_heap_name(zone_t z)2495 zone_heap_name(zone_t z)
2496 {
2497 zone_security_flags_t zsflags = zone_security_config(z);
2498 if (__probable(zsflags.z_kheap_id < KHEAP_ID_COUNT)) {
2499 return kalloc_heap_names[zsflags.z_kheap_id];
2500 }
2501 return "invalid";
2502 }
2503
2504 static uint32_t
zone_alloc_pages_for_nelems(zone_t z,vm_size_t max_elems)2505 zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
2506 {
2507 vm_size_t elem_count, chunks;
2508
2509 elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) /
2510 zone_elem_outer_size(z);
2511 chunks = (max_elems + elem_count - 1) / elem_count;
2512
2513 return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
2514 }
2515
2516 static inline vm_size_t
zone_submaps_approx_size(void)2517 zone_submaps_approx_size(void)
2518 {
2519 vm_size_t size = 0;
2520
2521 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
2522 if (zone_submaps[idx] != VM_MAP_NULL) {
2523 size += zone_submaps[idx]->size;
2524 }
2525 }
2526
2527 return size;
2528 }
2529
2530 static inline void
zone_depot_init(struct zone_depot * zd)2531 zone_depot_init(struct zone_depot *zd)
2532 {
2533 *zd = (struct zone_depot){
2534 .zd_tail = &zd->zd_head,
2535 };
2536 }
2537
2538 static inline void
zone_depot_insert_head_full(struct zone_depot * zd,zone_magazine_t mag)2539 zone_depot_insert_head_full(struct zone_depot *zd, zone_magazine_t mag)
2540 {
2541 if (zd->zd_full++ == 0) {
2542 zd->zd_tail = &mag->zm_next;
2543 }
2544 mag->zm_next = zd->zd_head;
2545 zd->zd_head = mag;
2546 }
2547
2548 static inline void
zone_depot_insert_tail_full(struct zone_depot * zd,zone_magazine_t mag)2549 zone_depot_insert_tail_full(struct zone_depot *zd, zone_magazine_t mag)
2550 {
2551 zd->zd_full++;
2552 mag->zm_next = *zd->zd_tail;
2553 *zd->zd_tail = mag;
2554 zd->zd_tail = &mag->zm_next;
2555 }
2556
2557 static inline void
zone_depot_insert_head_empty(struct zone_depot * zd,zone_magazine_t mag)2558 zone_depot_insert_head_empty(struct zone_depot *zd, zone_magazine_t mag)
2559 {
2560 zd->zd_empty++;
2561 mag->zm_next = *zd->zd_tail;
2562 *zd->zd_tail = mag;
2563 }
2564
2565 static inline zone_magazine_t
zone_depot_pop_head_full(struct zone_depot * zd,zone_t z)2566 zone_depot_pop_head_full(struct zone_depot *zd, zone_t z)
2567 {
2568 zone_magazine_t mag = zd->zd_head;
2569
2570 assert(zd->zd_full);
2571
2572 zd->zd_full--;
2573 if (z && z->z_recirc_full_min > zd->zd_full) {
2574 z->z_recirc_full_min = zd->zd_full;
2575 }
2576 zd->zd_head = mag->zm_next;
2577 if (zd->zd_full == 0) {
2578 zd->zd_tail = &zd->zd_head;
2579 }
2580
2581 mag->zm_next = NULL;
2582 return mag;
2583 }
2584
2585 static inline zone_magazine_t
zone_depot_pop_head_empty(struct zone_depot * zd,zone_t z)2586 zone_depot_pop_head_empty(struct zone_depot *zd, zone_t z)
2587 {
2588 zone_magazine_t mag = *zd->zd_tail;
2589
2590 assert(zd->zd_empty);
2591
2592 zd->zd_empty--;
2593 if (z && z->z_recirc_empty_min > zd->zd_empty) {
2594 z->z_recirc_empty_min = zd->zd_empty;
2595 }
2596 *zd->zd_tail = mag->zm_next;
2597
2598 mag->zm_next = NULL;
2599 return mag;
2600 }
2601
2602 static inline smr_seq_t
zone_depot_move_full(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2603 zone_depot_move_full(
2604 struct zone_depot *dst,
2605 struct zone_depot *src,
2606 uint32_t n,
2607 zone_t z)
2608 {
2609 zone_magazine_t head, last;
2610
2611 assert(n);
2612 assert(src->zd_full >= n);
2613
2614 src->zd_full -= n;
2615 if (z && z->z_recirc_full_min > src->zd_full) {
2616 z->z_recirc_full_min = src->zd_full;
2617 }
2618 head = last = src->zd_head;
2619 for (uint32_t i = n; i-- > 1;) {
2620 last = last->zm_next;
2621 }
2622
2623 src->zd_head = last->zm_next;
2624 if (src->zd_full == 0) {
2625 src->zd_tail = &src->zd_head;
2626 }
2627
2628 if (z && zone_security_array[zone_index(z)].z_lifo) {
2629 if (dst->zd_full == 0) {
2630 dst->zd_tail = &last->zm_next;
2631 }
2632 last->zm_next = dst->zd_head;
2633 dst->zd_head = head;
2634 } else {
2635 last->zm_next = *dst->zd_tail;
2636 *dst->zd_tail = head;
2637 dst->zd_tail = &last->zm_next;
2638 }
2639 dst->zd_full += n;
2640
2641 return last->zm_seq;
2642 }
2643
2644 static inline void
zone_depot_move_empty(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2645 zone_depot_move_empty(
2646 struct zone_depot *dst,
2647 struct zone_depot *src,
2648 uint32_t n,
2649 zone_t z)
2650 {
2651 zone_magazine_t head, last;
2652
2653 assert(n);
2654 assert(src->zd_empty >= n);
2655
2656 src->zd_empty -= n;
2657 if (z && z->z_recirc_empty_min > src->zd_empty) {
2658 z->z_recirc_empty_min = src->zd_empty;
2659 }
2660 head = last = *src->zd_tail;
2661 for (uint32_t i = n; i-- > 1;) {
2662 last = last->zm_next;
2663 }
2664
2665 *src->zd_tail = last->zm_next;
2666
2667 dst->zd_empty += n;
2668 last->zm_next = *dst->zd_tail;
2669 *dst->zd_tail = head;
2670 }
2671
2672 static inline bool
zone_depot_poll(struct zone_depot * depot,smr_t smr)2673 zone_depot_poll(struct zone_depot *depot, smr_t smr)
2674 {
2675 if (depot->zd_full == 0) {
2676 return false;
2677 }
2678
2679 return smr == NULL || smr_poll(smr, depot->zd_head->zm_seq);
2680 }
2681
2682 static void
zone_cache_swap_magazines(zone_cache_t cache)2683 zone_cache_swap_magazines(zone_cache_t cache)
2684 {
2685 uint16_t count_a = cache->zc_alloc_cur;
2686 uint16_t count_f = cache->zc_free_cur;
2687 vm_offset_t *elems_a = cache->zc_alloc_elems;
2688 vm_offset_t *elems_f = cache->zc_free_elems;
2689
2690 z_debug_assert(count_a <= zc_mag_size());
2691 z_debug_assert(count_f <= zc_mag_size());
2692
2693 cache->zc_alloc_cur = count_f;
2694 cache->zc_free_cur = count_a;
2695 cache->zc_alloc_elems = elems_f;
2696 cache->zc_free_elems = elems_a;
2697 }
2698
2699 __pure2
2700 static smr_t
zone_cache_smr(zone_cache_t cache)2701 zone_cache_smr(zone_cache_t cache)
2702 {
2703 return cache->zc_smr;
2704 }
2705
2706 /*!
2707 * @function zone_magazine_replace
2708 *
2709 * @brief
2710 * Unlod a magazine and load a new one instead.
2711 */
2712 static zone_magazine_t
zone_magazine_replace(zone_cache_t zc,zone_magazine_t mag,bool empty)2713 zone_magazine_replace(zone_cache_t zc, zone_magazine_t mag, bool empty)
2714 {
2715 zone_magazine_t old;
2716 vm_offset_t **elems;
2717
2718 mag->zm_seq = SMR_SEQ_INVALID;
2719
2720 if (empty) {
2721 elems = &zc->zc_free_elems;
2722 zc->zc_free_cur = 0;
2723 } else {
2724 elems = &zc->zc_alloc_elems;
2725 zc->zc_alloc_cur = zc_mag_size();
2726 }
2727 old = (zone_magazine_t)((uintptr_t)*elems -
2728 offsetof(struct zone_magazine, zm_elems));
2729 *elems = mag->zm_elems;
2730
2731 return old;
2732 }
2733
2734 static zone_magazine_t
zone_magazine_alloc(zalloc_flags_t flags)2735 zone_magazine_alloc(zalloc_flags_t flags)
2736 {
2737 return zalloc_flags(zc_magazine_zone, flags | Z_ZERO);
2738 }
2739
2740 static void
zone_magazine_free(zone_magazine_t mag)2741 zone_magazine_free(zone_magazine_t mag)
2742 {
2743 (zfree)(zc_magazine_zone, mag);
2744 }
2745
2746 static void
zone_magazine_free_list(struct zone_depot * zd)2747 zone_magazine_free_list(struct zone_depot *zd)
2748 {
2749 zone_magazine_t tmp, mag = *zd->zd_tail;
2750
2751 while (mag) {
2752 tmp = mag->zm_next;
2753 zone_magazine_free(mag);
2754 mag = tmp;
2755 }
2756
2757 *zd->zd_tail = NULL;
2758 zd->zd_empty = 0;
2759 }
2760
2761 void
zone_enable_caching(zone_t zone)2762 zone_enable_caching(zone_t zone)
2763 {
2764 size_t size_per_mag = zone_elem_inner_size(zone) * zc_mag_size();
2765 zone_cache_t caches;
2766 size_t depot_limit;
2767
2768 depot_limit = zc_pcpu_max() / size_per_mag;
2769 zone->z_depot_limit = (uint16_t)MIN(depot_limit, INT16_MAX);
2770
2771 caches = zalloc_percpu_permanent_type(struct zone_cache);
2772 zpercpu_foreach(zc, caches) {
2773 zc->zc_alloc_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2774 zc->zc_free_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2775 zone_depot_init(&zc->zc_depot);
2776 hw_lck_ticket_init(&zc->zc_depot_lock, &zone_locks_grp);
2777 }
2778
2779 zone_lock(zone);
2780 assert(zone->z_pcpu_cache == NULL);
2781 zone->z_pcpu_cache = caches;
2782 zone->z_recirc_cont_cur = 0;
2783 zone->z_recirc_cont_wma = 0;
2784 zone->z_elems_free_min = 0; /* becomes z_recirc_empty_min */
2785 zone->z_elems_free_wma = 0; /* becomes z_recirc_empty_wma */
2786 zone_unlock(zone);
2787 }
2788
2789 bool
zone_maps_owned(vm_address_t addr,vm_size_t size)2790 zone_maps_owned(vm_address_t addr, vm_size_t size)
2791 {
2792 return from_zone_map(addr, size);
2793 }
2794
2795 #if KASAN_LIGHT
2796 bool
kasan_zone_maps_owned(vm_address_t addr,vm_size_t size)2797 kasan_zone_maps_owned(vm_address_t addr, vm_size_t size)
2798 {
2799 return from_zone_map(addr, size) ||
2800 mach_vm_range_size(&zone_info.zi_map_range) == 0;
2801 }
2802 #endif /* KASAN_LIGHT */
2803
2804 void
zone_map_sizes(vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)2805 zone_map_sizes(
2806 vm_map_size_t *psize,
2807 vm_map_size_t *pfree,
2808 vm_map_size_t *plargest_free)
2809 {
2810 vm_map_size_t size, free, largest;
2811
2812 vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
2813
2814 for (uint32_t i = 1; i < Z_SUBMAP_IDX_COUNT; i++) {
2815 vm_map_sizes(zone_submaps[i], &size, &free, &largest);
2816 *psize += size;
2817 *pfree += free;
2818 *plargest_free = MAX(*plargest_free, largest);
2819 }
2820 }
2821
2822 __attribute__((always_inline))
2823 vm_map_t
zone_submap(zone_security_flags_t zsflags)2824 zone_submap(zone_security_flags_t zsflags)
2825 {
2826 return zone_submaps[zsflags.z_submap_idx];
2827 }
2828
2829 unsigned
zpercpu_count(void)2830 zpercpu_count(void)
2831 {
2832 return zpercpu_early_count;
2833 }
2834
2835 #if ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC
2836 /*
2837 * Returns a random number of a given bit-width.
2838 *
2839 * DO NOT COPY THIS CODE OUTSIDE OF ZALLOC
2840 *
2841 * This uses Intel's rdrand because random() uses FP registers
2842 * which causes FP faults and allocations which isn't something
2843 * we can do from zalloc itself due to reentrancy problems.
2844 *
2845 * For pre-rdrand machines (which we no longer support),
2846 * we use a bad biased random generator that doesn't use FP.
2847 * Such HW is no longer supported, but VM of newer OSes on older
2848 * bare metal is made to limp along (with reduced security) this way.
2849 */
2850 static uint64_t
zalloc_random_mask64(uint32_t bits)2851 zalloc_random_mask64(uint32_t bits)
2852 {
2853 uint64_t mask = ~0ull >> (64 - bits);
2854 uint64_t v;
2855
2856 #if __x86_64__
2857 if (__probable(cpuid_features() & CPUID_FEATURE_RDRAND)) {
2858 asm volatile ("1: rdrand %0; jnc 1b\n" : "=r" (v) :: "cc");
2859 v &= mask;
2860 } else {
2861 disable_preemption();
2862 int cpu = cpu_number();
2863 v = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
2864 zone_bool_gen[cpu].zbg_entropy,
2865 ZONE_ENTROPY_CNT, bits);
2866 enable_preemption();
2867 }
2868 #else
2869 v = early_random() & mask;
2870 #endif
2871
2872 return v;
2873 }
2874
2875 /*
2876 * Returns a random number within [bound_min, bound_max)
2877 *
2878 * This isn't _exactly_ uniform, but the skew is small enough
2879 * not to matter for the consumers of this interface.
2880 *
2881 * Values within [bound_min, 2^64 % (bound_max - bound_min))
2882 * will be returned (bound_max - bound_min) / 2^64 more often
2883 * than values within [2^64 % (bound_max - bound_min), bound_max).
2884 */
2885 static uint32_t
zalloc_random_uniform32(uint32_t bound_min,uint32_t bound_max)2886 zalloc_random_uniform32(uint32_t bound_min, uint32_t bound_max)
2887 {
2888 uint64_t delta = bound_max - bound_min;
2889
2890 return bound_min + (uint32_t)(zalloc_random_mask64(64) % delta);
2891 }
2892
2893 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC */
2894 #if ZALLOC_ENABLE_LOGGING || CONFIG_PROB_GZALLOC
2895 /*
2896 * Track all kalloc zones of specified size for zlog name
2897 * kalloc.type.<size> or kalloc.type.var.<size> or kalloc.<size>
2898 */
2899 static bool
track_kalloc_zones(zone_t z,const char * logname)2900 track_kalloc_zones(zone_t z, const char *logname)
2901 {
2902 const char *prefix;
2903 size_t len;
2904 zone_security_flags_t zsflags = zone_security_config(z);
2905
2906 prefix = "kalloc.type.var.";
2907 len = strlen(prefix);
2908 if (zsflags.z_kalloc_type && zsflags.z_kheap_id == KHEAP_ID_KT_VAR &&
2909 strncmp(logname, prefix, len) == 0) {
2910 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2911
2912 return zone_elem_inner_size(z) == sizeclass;
2913 }
2914
2915 prefix = "kalloc.type.";
2916 len = strlen(prefix);
2917 if (zsflags.z_kalloc_type && zsflags.z_kheap_id != KHEAP_ID_KT_VAR &&
2918 strncmp(logname, prefix, len) == 0) {
2919 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2920
2921 return zone_elem_inner_size(z) == sizeclass;
2922 }
2923
2924 prefix = "kalloc.";
2925 len = strlen(prefix);
2926 if ((zsflags.z_kheap_id || zsflags.z_kalloc_type) &&
2927 strncmp(logname, prefix, len) == 0) {
2928 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2929
2930 return zone_elem_inner_size(z) == sizeclass;
2931 }
2932
2933 return false;
2934 }
2935 #endif
2936
2937 int
track_this_zone(const char * zonename,const char * logname)2938 track_this_zone(const char *zonename, const char *logname)
2939 {
2940 unsigned int len;
2941 const char *zc = zonename;
2942 const char *lc = logname;
2943
2944 /*
2945 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
2946 */
2947
2948 for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
2949 /*
2950 * If the current characters don't match, check for a space in
2951 * in the zone name and a corresponding period in the log name.
2952 * If that's not there, then the strings don't match.
2953 */
2954
2955 if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
2956 break;
2957 }
2958
2959 /*
2960 * The strings are equal so far. If we're at the end, then it's a match.
2961 */
2962
2963 if (*zc == '\0') {
2964 return TRUE;
2965 }
2966 }
2967
2968 return FALSE;
2969 }
2970
2971 #if DEBUG || DEVELOPMENT
2972
2973 vm_size_t
zone_element_info(void * addr,vm_tag_t * ptag)2974 zone_element_info(void *addr, vm_tag_t * ptag)
2975 {
2976 vm_size_t size = 0;
2977 vm_tag_t tag = VM_KERN_MEMORY_NONE;
2978 struct zone *src_zone;
2979
2980 if (from_zone_map(addr, sizeof(void *))) {
2981 src_zone = zone_by_id(zone_index_from_ptr(addr));
2982 size = zone_elem_inner_size(src_zone);
2983 #if VM_TAG_SIZECLASSES
2984 if (__improbable(src_zone->z_uses_tags)) {
2985 struct zone_page_metadata *meta;
2986 vm_offset_t eidx;
2987 vm_tag_t *slot;
2988
2989 meta = zone_element_resolve(src_zone,
2990 (vm_offset_t)addr, &eidx);
2991 slot = zba_extra_ref_ptr(meta->zm_bitmap, eidx);
2992 tag = *slot;
2993 }
2994 #endif /* VM_TAG_SIZECLASSES */
2995 }
2996
2997 *ptag = tag;
2998 return size;
2999 }
3000
3001 #endif /* DEBUG || DEVELOPMENT */
3002 #if KASAN_CLASSIC
3003
3004 vm_size_t
kasan_quarantine_resolve(vm_address_t addr,zone_t * zonep)3005 kasan_quarantine_resolve(vm_address_t addr, zone_t *zonep)
3006 {
3007 zone_t zone = zone_by_id(zone_index_from_ptr((void *)addr));
3008
3009 *zonep = zone;
3010 return zone_elem_inner_size(zone);
3011 }
3012
3013 #endif /* KASAN_CLASSIC */
3014 #endif /* !ZALLOC_TEST */
3015 #pragma mark Zone zeroing and early random
3016 #if !ZALLOC_TEST
3017
3018 /*
3019 * Zone zeroing
3020 *
3021 * All allocations from zones are zeroed on free and are additionally
3022 * check that they are still zero on alloc. The check is
3023 * always on, on embedded devices. Perf regression was detected
3024 * on intel as we cant use the vectorized implementation of
3025 * memcmp_zero_ptr_aligned due to cyclic dependenices between
3026 * initization and allocation. Therefore we perform the check
3027 * on 20% of the allocations.
3028 */
3029 #if ZALLOC_ENABLE_ZERO_CHECK
3030 #if defined(__x86_64__)
3031 /*
3032 * Peform zero validation on every 5th allocation
3033 */
3034 static TUNABLE(uint32_t, zzc_rate, "zzc_rate", 5);
3035 static uint32_t PERCPU_DATA(zzc_decrementer);
3036 #endif /* defined(__x86_64__) */
3037
3038 /*
3039 * Determine if zero validation for allocation should be skipped
3040 */
3041 static bool
zalloc_skip_zero_check(void)3042 zalloc_skip_zero_check(void)
3043 {
3044 #if defined(__x86_64__)
3045 uint32_t *counterp, cnt;
3046
3047 counterp = PERCPU_GET(zzc_decrementer);
3048 cnt = *counterp;
3049 if (__probable(cnt > 0)) {
3050 *counterp = cnt - 1;
3051 return true;
3052 }
3053 *counterp = zzc_rate - 1;
3054 #endif /* !defined(__x86_64__) */
3055 return false;
3056 }
3057
3058 __abortlike
3059 static void
zalloc_uaf_panic(zone_t z,uintptr_t elem,size_t size)3060 zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size)
3061 {
3062 uint32_t esize = (uint32_t)zone_elem_inner_size(z);
3063 uint32_t first_offs = ~0u;
3064 uintptr_t first_bits = 0, v;
3065 char buf[1024];
3066 int pos = 0;
3067
3068 buf[0] = '\0';
3069
3070 for (uint32_t o = 0; o < size; o += sizeof(v)) {
3071 if ((v = *(uintptr_t *)(elem + o)) == 0) {
3072 continue;
3073 }
3074 pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
3075 "%5d: 0x%016lx", o, v);
3076 if (first_offs > o) {
3077 first_offs = o;
3078 first_bits = v;
3079 }
3080 }
3081
3082 (panic)("[%s%s]: element modified after free "
3083 "(off:%d, val:0x%016lx, sz:%d, ptr:%p)%s",
3084 zone_heap_name(z), zone_name(z),
3085 first_offs, first_bits, esize, (void *)elem, buf);
3086 }
3087
3088 static void
zalloc_validate_element(zone_t zone,vm_offset_t elem,vm_size_t size,zalloc_flags_t flags)3089 zalloc_validate_element(
3090 zone_t zone,
3091 vm_offset_t elem,
3092 vm_size_t size,
3093 zalloc_flags_t flags)
3094 {
3095 if (flags & Z_NOZZC) {
3096 return;
3097 }
3098 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3099 zalloc_uaf_panic(zone, elem, size);
3100 }
3101 if (flags & Z_PCPU) {
3102 for (size_t i = zpercpu_count(); --i > 0;) {
3103 elem += PAGE_SIZE;
3104 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3105 zalloc_uaf_panic(zone, elem, size);
3106 }
3107 }
3108 }
3109 }
3110
3111 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
3112
3113 __attribute__((noinline))
3114 static void
zone_early_scramble_rr(zone_t zone,int cpu,zone_stats_t zs)3115 zone_early_scramble_rr(zone_t zone, int cpu, zone_stats_t zs)
3116 {
3117 #if KASAN_FAKESTACK
3118 /*
3119 * This can cause re-entrancy with kasan fakestacks
3120 */
3121 #pragma unused(zone, cpu, zs)
3122 #else
3123 uint32_t bits;
3124
3125 bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3126 zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
3127
3128 zs->zs_alloc_rr += bits;
3129 zs->zs_alloc_rr %= zone->z_chunk_elems;
3130 #endif
3131 }
3132
3133 #endif /* !ZALLOC_TEST */
3134 #pragma mark Zone Leak Detection
3135 #if !ZALLOC_TEST
3136 #if ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS
3137
3138 /*
3139 * Zone leak debugging code
3140 *
3141 * When enabled, this code keeps a log to track allocations to a particular
3142 * zone that have not yet been freed.
3143 *
3144 * Examining this log will reveal the source of a zone leak.
3145 *
3146 * The log is allocated only when logging is enabled (it is off by default),
3147 * so there is no effect on the system when it's turned off.
3148 *
3149 * Zone logging is enabled with the `zlog<n>=<zone>` boot-arg for each
3150 * zone name to log, with n starting at 1.
3151 *
3152 * Leaks debugging utilizes 2 tunables:
3153 * - zlsize (in kB) which describes how much "size" the record covers
3154 * (zones with smaller elements get more records, default is 4M).
3155 *
3156 * - zlfreq (in kB) which describes a sample rate in cumulative allocation
3157 * size at which automatic leak detection will sample allocations.
3158 * (default is 16k)
3159 *
3160 *
3161 * Zone corruption logging
3162 *
3163 * Logging can also be used to help identify the source of a zone corruption.
3164 *
3165 * First, identify the zone that is being corrupted,
3166 * then add "-zc zlog<n>=<zone name>" to the boot-args.
3167 *
3168 * When -zc is used in conjunction with zlog,
3169 * it changes the logging style to track both allocations and frees to the zone.
3170 *
3171 * When the corruption is detected, examining the log will show you the stack
3172 * traces of the callers who last allocated and freed any particular element in
3173 * the zone.
3174 *
3175 * Corruption debugging logs will have zrecs records
3176 * (tuned by the zrecs= boot-arg, 16k elements per G of RAM by default).
3177 */
3178
3179 #define ZRECORDS_MAX (256u << 10)
3180 #define ZRECORDS_DEFAULT (16u << 10)
3181 static TUNABLE(uint32_t, zrecs, "zrecs", 0);
3182 static TUNABLE(uint32_t, zlsize, "zlsize", 4 * 1024);
3183 static TUNABLE(uint32_t, zlfreq, "zlfreq", 16);
3184
3185 __startup_func
3186 static void
zone_leaks_init_zrecs(void)3187 zone_leaks_init_zrecs(void)
3188 {
3189 /*
3190 * Don't allow more than ZRECORDS_MAX records,
3191 * even if the user asked for more.
3192 *
3193 * This prevents accidentally hogging too much kernel memory
3194 * and making the system unusable.
3195 */
3196 if (zrecs == 0) {
3197 zrecs = ZRECORDS_DEFAULT *
3198 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3199 }
3200 if (zrecs > ZRECORDS_MAX) {
3201 zrecs = ZRECORDS_MAX;
3202 }
3203 }
3204 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_leaks_init_zrecs);
3205
3206 static uint32_t
zone_leaks_record_count(zone_t z)3207 zone_leaks_record_count(zone_t z)
3208 {
3209 uint32_t recs = (zlsize << 10) / zone_elem_inner_size(z);
3210
3211 return MIN(MAX(recs, ZRECORDS_DEFAULT), ZRECORDS_MAX);
3212 }
3213
3214 static uint32_t
zone_leaks_sample_rate(zone_t z)3215 zone_leaks_sample_rate(zone_t z)
3216 {
3217 return (zlfreq << 10) / zone_elem_inner_size(z);
3218 }
3219
3220 #if ZALLOC_ENABLE_LOGGING
3221 /* Log allocations and frees to help debug a zone element corruption */
3222 static TUNABLE(bool, corruption_debug_flag, "-zc", false);
3223
3224 /*
3225 * A maximum of 10 zlog<n> boot args can be provided (zlog1 -> zlog10)
3226 */
3227 #define MAX_ZONES_LOG_REQUESTS 10
3228
3229 /**
3230 * @function zone_setup_logging
3231 *
3232 * @abstract
3233 * Optionally sets up a zone for logging.
3234 *
3235 * @discussion
3236 * We recognized two boot-args:
3237 *
3238 * zlog=<zone_to_log>
3239 * zrecs=<num_records_in_log>
3240 * zlsize=<memory to cover for leaks>
3241 *
3242 * The zlog arg is used to specify the zone name that should be logged,
3243 * and zrecs/zlsize is used to control the size of the log.
3244 */
3245 static void
zone_setup_logging(zone_t z)3246 zone_setup_logging(zone_t z)
3247 {
3248 char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
3249 char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
3250 char zlog_val[MAX_ZONE_NAME]; /* the zone name we're logging, if any */
3251 bool logging_on = false;
3252
3253 /*
3254 * Append kalloc heap name to zone name (if zone is used by kalloc)
3255 */
3256 snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
3257
3258 /* zlog0 isn't allowed. */
3259 for (int i = 1; i <= MAX_ZONES_LOG_REQUESTS; i++) {
3260 snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
3261
3262 if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val))) {
3263 if (track_this_zone(zone_name, zlog_val) ||
3264 track_kalloc_zones(z, zlog_val)) {
3265 logging_on = true;
3266 break;
3267 }
3268 }
3269 }
3270
3271 /*
3272 * Backwards compat. with the old boot-arg used to specify single zone
3273 * logging i.e. zlog Needs to happen after the newer zlogn checks
3274 * because the prefix will match all the zlogn
3275 * boot-args.
3276 */
3277 if (!logging_on &&
3278 PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val))) {
3279 if (track_this_zone(zone_name, zlog_val) ||
3280 track_kalloc_zones(z, zlog_val)) {
3281 logging_on = true;
3282 }
3283 }
3284
3285 /*
3286 * If we want to log a zone, see if we need to allocate buffer space for
3287 * the log.
3288 *
3289 * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
3290 * we have to defer allocation in that case.
3291 *
3292 * zone_init() will finish the job.
3293 *
3294 * If we want to log one of the VM related zones that's set up early on,
3295 * we will skip allocation of the log until zinit is called again later
3296 * on some other zone.
3297 */
3298 if (logging_on) {
3299 if (corruption_debug_flag) {
3300 z->z_btlog = btlog_create(BTLOG_LOG, zrecs, 0);
3301 } else {
3302 z->z_btlog = btlog_create(BTLOG_HASH,
3303 zone_leaks_record_count(z), 0);
3304 }
3305 if (z->z_btlog) {
3306 z->z_log_on = true;
3307 printf("zone[%s%s]: logging enabled\n",
3308 zone_heap_name(z), z->z_name);
3309 } else {
3310 printf("zone[%s%s]: failed to enable logging\n",
3311 zone_heap_name(z), z->z_name);
3312 }
3313 }
3314 }
3315
3316 #endif /* ZALLOC_ENABLE_LOGGING */
3317 #if KASAN_TBI
3318 static TUNABLE(uint32_t, kasan_zrecs, "kasan_zrecs", 0);
3319
3320 __startup_func
3321 static void
kasan_tbi_init_zrecs(void)3322 kasan_tbi_init_zrecs(void)
3323 {
3324 /*
3325 * Don't allow more than ZRECORDS_MAX records,
3326 * even if the user asked for more.
3327 *
3328 * This prevents accidentally hogging too much kernel memory
3329 * and making the system unusable.
3330 */
3331 if (kasan_zrecs == 0) {
3332 kasan_zrecs = ZRECORDS_DEFAULT *
3333 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3334 }
3335 if (kasan_zrecs > ZRECORDS_MAX) {
3336 kasan_zrecs = ZRECORDS_MAX;
3337 }
3338 }
3339 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, kasan_tbi_init_zrecs);
3340
3341 static void
zone_setup_kasan_logging(zone_t z)3342 zone_setup_kasan_logging(zone_t z)
3343 {
3344 if (!z->z_tbi_tag) {
3345 printf("zone[%s%s]: kasan logging disabled for this zone\n",
3346 zone_heap_name(z), z->z_name);
3347 return;
3348 }
3349
3350 z->z_log_on = true;
3351 z->z_btlog = btlog_create(BTLOG_LOG, kasan_zrecs, 0);
3352 if (!z->z_btlog) {
3353 printf("zone[%s%s]: failed to enable kasan logging\n",
3354 zone_heap_name(z), z->z_name);
3355 }
3356 }
3357
3358 #endif /* KASAN_TBI */
3359 #if CONFIG_ZLEAKS
3360
3361 static thread_call_data_t zone_leaks_callout;
3362
3363 /*
3364 * The zone leak detector, abbreviated 'zleak', keeps track
3365 * of a subset of the currently outstanding allocations
3366 * made by the zone allocator.
3367 *
3368 * It will engage itself automatically if the zone map usage
3369 * goes above zleak_pages_global_wired_threshold pages.
3370 *
3371 * When that threshold is reached, zones who use more than
3372 * zleak_pages_per_zone_wired_threshold pages will get
3373 * a BTLOG_HASH btlog with sampling to minimize perf impact,
3374 * yet receive statistical data about the backtrace that is
3375 * the most likely to cause the leak.
3376 *
3377 * If the zone goes under the threshold enough, then the log
3378 * is disabled and backtraces freed. Data can be collected
3379 * from userspace with the zlog(1) command.
3380 */
3381
3382 /* whether the zleaks subsystem thinks the map is under pressure */
3383 uint32_t zleak_active;
3384 SECURITY_READ_ONLY_LATE(vm_size_t) zleak_max_zonemap_size;
3385
3386 /* Size of zone map at which to start collecting data */
3387 static size_t zleak_pages_global_wired_threshold = ~0;
3388 vm_size_t zleak_global_tracking_threshold = ~0;
3389
3390 /* Size a zone will have before we will collect data on it */
3391 static size_t zleak_pages_per_zone_wired_threshold = ~0;
3392 vm_size_t zleak_per_zone_tracking_threshold = ~0;
3393
3394 static inline bool
zleak_should_enable_for_zone(zone_t z)3395 zleak_should_enable_for_zone(zone_t z)
3396 {
3397 if (z->z_log_on) {
3398 return false;
3399 }
3400 if (z->z_btlog) {
3401 return false;
3402 }
3403 if (!zleak_active) {
3404 return false;
3405 }
3406 return z->z_wired_cur >= zleak_pages_per_zone_wired_threshold;
3407 }
3408
3409 static inline bool
zleak_should_disable_for_zone(zone_t z)3410 zleak_should_disable_for_zone(zone_t z)
3411 {
3412 if (z->z_log_on) {
3413 return false;
3414 }
3415 if (!z->z_btlog) {
3416 return false;
3417 }
3418 if (!zleak_active) {
3419 return true;
3420 }
3421 return z->z_wired_cur < zleak_pages_per_zone_wired_threshold / 2;
3422 }
3423
3424 static inline bool
zleak_should_activate(size_t pages)3425 zleak_should_activate(size_t pages)
3426 {
3427 return !zleak_active && pages >= zleak_pages_global_wired_threshold;
3428 }
3429
3430 static inline bool
zleak_should_deactivate(size_t pages)3431 zleak_should_deactivate(size_t pages)
3432 {
3433 return zleak_active && pages < zleak_pages_global_wired_threshold / 2;
3434 }
3435
3436 static void
zleaks_enable_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)3437 zleaks_enable_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
3438 {
3439 size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
3440 btlog_t log;
3441
3442 if (zleak_should_activate(pages)) {
3443 zleak_active = 1;
3444 } else if (zleak_should_deactivate(pages)) {
3445 zleak_active = 0;
3446 }
3447
3448 zone_foreach(z) {
3449 if (zleak_should_disable_for_zone(z)) {
3450 log = z->z_btlog;
3451 z->z_btlog = NULL;
3452 assert(z->z_btlog_disabled == NULL);
3453 btlog_disable(log);
3454 z->z_btlog_disabled = log;
3455 }
3456
3457 if (zleak_should_enable_for_zone(z)) {
3458 log = z->z_btlog_disabled;
3459 if (log == NULL) {
3460 log = btlog_create(BTLOG_HASH,
3461 zone_leaks_record_count(z),
3462 zone_leaks_sample_rate(z));
3463 } else if (btlog_enable(log) == KERN_SUCCESS) {
3464 z->z_btlog_disabled = NULL;
3465 } else {
3466 log = NULL;
3467 }
3468 os_atomic_store(&z->z_btlog, log, release);
3469 }
3470 }
3471 }
3472
3473 __startup_func
3474 static void
zleak_init(void)3475 zleak_init(void)
3476 {
3477 zleak_max_zonemap_size = ptoa(zone_pages_wired_max);
3478
3479 zleak_update_threshold(&zleak_global_tracking_threshold,
3480 zleak_max_zonemap_size / 2);
3481 zleak_update_threshold(&zleak_per_zone_tracking_threshold,
3482 zleak_global_tracking_threshold / 8);
3483
3484 thread_call_setup_with_options(&zone_leaks_callout,
3485 zleaks_enable_async, NULL, THREAD_CALL_PRIORITY_USER,
3486 THREAD_CALL_OPTIONS_ONCE);
3487 }
3488 STARTUP(ZALLOC, STARTUP_RANK_SECOND, zleak_init);
3489
3490 kern_return_t
zleak_update_threshold(vm_size_t * arg,uint64_t value)3491 zleak_update_threshold(vm_size_t *arg, uint64_t value)
3492 {
3493 if (value >= zleak_max_zonemap_size) {
3494 return KERN_INVALID_VALUE;
3495 }
3496
3497 if (arg == &zleak_global_tracking_threshold) {
3498 zleak_global_tracking_threshold = (vm_size_t)value;
3499 zleak_pages_global_wired_threshold = atop(value);
3500 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3501 thread_call_enter(&zone_leaks_callout);
3502 }
3503 return KERN_SUCCESS;
3504 }
3505
3506 if (arg == &zleak_per_zone_tracking_threshold) {
3507 zleak_per_zone_tracking_threshold = (vm_size_t)value;
3508 zleak_pages_per_zone_wired_threshold = atop(value);
3509 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3510 thread_call_enter(&zone_leaks_callout);
3511 }
3512 return KERN_SUCCESS;
3513 }
3514
3515 return KERN_INVALID_ARGUMENT;
3516 }
3517
3518 static void
panic_display_zleaks(bool has_syms)3519 panic_display_zleaks(bool has_syms)
3520 {
3521 bool did_header = false;
3522 vm_address_t bt[BTLOG_MAX_DEPTH];
3523 uint32_t len, count;
3524
3525 zone_foreach(z) {
3526 btlog_t log = z->z_btlog;
3527
3528 if (log == NULL || btlog_get_type(log) != BTLOG_HASH) {
3529 continue;
3530 }
3531
3532 count = btlog_guess_top(log, bt, &len);
3533 if (count == 0) {
3534 continue;
3535 }
3536
3537 if (!did_header) {
3538 paniclog_append_noflush("Zone (suspected) leak report:\n");
3539 did_header = true;
3540 }
3541
3542 paniclog_append_noflush(" Zone: %s%s\n",
3543 zone_heap_name(z), zone_name(z));
3544 paniclog_append_noflush(" Count: %d (%ld bytes)\n", count,
3545 (long)count * zone_scale_for_percpu(z, zone_elem_inner_size(z)));
3546 paniclog_append_noflush(" Size: %ld\n",
3547 (long)zone_size_wired(z));
3548 paniclog_append_noflush(" Top backtrace:\n");
3549 for (uint32_t i = 0; i < len; i++) {
3550 if (has_syms) {
3551 paniclog_append_noflush(" %p ", (void *)bt[i]);
3552 panic_print_symbol_name(bt[i]);
3553 paniclog_append_noflush("\n");
3554 } else {
3555 paniclog_append_noflush(" %p\n", (void *)bt[i]);
3556 }
3557 }
3558
3559 kmod_panic_dump(bt, len);
3560 paniclog_append_noflush("\n");
3561 }
3562 }
3563 #endif /* CONFIG_ZLEAKS */
3564
3565 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
3566 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
3567
3568 #if !KASAN_TBI
3569 __cold
3570 #endif
3571 static void
zalloc_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3572 zalloc_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3573 {
3574 btlog_t log = zone->z_btlog;
3575 btref_get_flags_t flags = 0;
3576 btref_t ref;
3577
3578 #if !KASAN_TBI
3579 if (!log || !btlog_sample(log)) {
3580 return;
3581 }
3582 #endif
3583 if (get_preemption_level() || zone_supports_vm(zone)) {
3584 /*
3585 * VM zones can be used by btlog, avoid reentrancy issues.
3586 */
3587 flags = BTREF_GET_NOWAIT;
3588 }
3589
3590 ref = btref_get(fp, flags);
3591 while (count-- > 0) {
3592 if (count) {
3593 btref_retain(ref);
3594 }
3595 btlog_record(log, (void *)addr, ZOP_ALLOC, ref);
3596 addr += *(vm_offset_t *)addr;
3597 }
3598 }
3599
3600 #define ZALLOC_LOG(zone, addr, count) ({ \
3601 if ((zone)->z_btlog) { \
3602 zalloc_log(zone, addr, count, __builtin_frame_address(0)); \
3603 } \
3604 })
3605
3606 #if !KASAN_TBI
3607 __cold
3608 #endif
3609 static void
zfree_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3610 zfree_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3611 {
3612 btlog_t log = zone->z_btlog;
3613 btref_get_flags_t flags = 0;
3614 btref_t ref;
3615
3616 #if !KASAN_TBI
3617 if (!log) {
3618 return;
3619 }
3620 #endif
3621
3622 /*
3623 * See if we're doing logging on this zone.
3624 *
3625 * There are two styles of logging used depending on
3626 * whether we're trying to catch a leak or corruption.
3627 */
3628 #if !KASAN_TBI
3629 if (btlog_get_type(log) == BTLOG_HASH) {
3630 /*
3631 * We're logging to catch a leak.
3632 *
3633 * Remove any record we might have for this element
3634 * since it's being freed. Note that we may not find it
3635 * if the buffer overflowed and that's OK.
3636 *
3637 * Since the log is of a limited size, old records get
3638 * overwritten if there are more zallocs than zfrees.
3639 */
3640 while (count-- > 0) {
3641 btlog_erase(log, (void *)addr);
3642 addr += *(vm_offset_t *)addr;
3643 }
3644 return;
3645 }
3646 #endif /* !KASAN_TBI */
3647
3648 if (get_preemption_level() || zone_supports_vm(zone)) {
3649 /*
3650 * VM zones can be used by btlog, avoid reentrancy issues.
3651 */
3652 flags = BTREF_GET_NOWAIT;
3653 }
3654
3655 ref = btref_get(fp, flags);
3656 while (count-- > 0) {
3657 if (count) {
3658 btref_retain(ref);
3659 }
3660 btlog_record(log, (void *)addr, ZOP_FREE, ref);
3661 addr += *(vm_offset_t *)addr;
3662 }
3663 }
3664
3665 #define ZFREE_LOG(zone, addr, count) ({ \
3666 if ((zone)->z_btlog) { \
3667 zfree_log(zone, addr, count, __builtin_frame_address(0)); \
3668 } \
3669 })
3670
3671 #else
3672 #define ZALLOC_LOG(...) ((void)0)
3673 #define ZFREE_LOG(...) ((void)0)
3674 #endif /* ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI */
3675 #endif /* !ZALLOC_TEST */
3676 #pragma mark zone (re)fill
3677 #if !ZALLOC_TEST
3678
3679 /*!
3680 * @defgroup Zone Refill
3681 * @{
3682 *
3683 * @brief
3684 * Functions handling The zone refill machinery.
3685 *
3686 * @discussion
3687 * Zones are refilled based on 2 mechanisms: direct expansion, async expansion.
3688 *
3689 * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
3690 * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
3691 *
3692 * - call @c zone_expand_locked() directly if the caller is allowed to block,
3693 *
3694 * - wakeup the asynchroous expansion thread call if the caller is not allowed
3695 * to block, or if the reserve becomes depleted.
3696 *
3697 *
3698 * <h2>Synchronous expansion</h2>
3699 *
3700 * This mechanism is actually the only one that may refill a zone, and all the
3701 * other ones funnel through this one eventually.
3702 *
3703 * @c zone_expand_locked() implements the core of the expansion mechanism,
3704 * and will do so while a caller specified predicate is true.
3705 *
3706 * Zone expansion allows for up to 2 threads to concurrently refill the zone:
3707 * - one VM privileged thread,
3708 * - one regular thread.
3709 *
3710 * Regular threads that refill will put down their identity in @c z_expander,
3711 * so that priority inversion avoidance can be implemented.
3712 *
3713 * However, VM privileged threads are allowed to use VM page reserves,
3714 * which allows for the system to recover from extreme memory pressure
3715 * situations, allowing for the few allocations that @c zone_gc() or
3716 * killing processes require.
3717 *
3718 * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
3719 * is set. @c z_expander is not necessarily the identity of this VM privileged
3720 * thread (it is if the VM privileged thread came in first, but wouldn't be, and
3721 * could even be @c THREAD_NULL otherwise).
3722 *
3723 * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
3724 * spending a whole pointer on priority inheritance for VM privileged threads
3725 * (and other issues related to having two owners), we use the rwlock boost as
3726 * a stop gap to avoid priority inversions.
3727 *
3728 *
3729 * <h2>Chunk wiring policies</h2>
3730 *
3731 * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
3732 * to try to minimize fragmentation relative to element sizes not aligning with
3733 * a chunk size well. However, this can grow large and be hard to fulfill on
3734 * a system under a lot of memory pressure (chunks can be as long as 8 pages on
3735 * 4k page systems).
3736 *
3737 * This is why, when under memory pressure the system allows chunks to be
3738 * partially populated. The metadata of the first page in the chunk maintains
3739 * the count of actually populated pages.
3740 *
3741 * The metadata for addresses assigned to a zone are found of 4 queues:
3742 * - @c z_pageq_empty has chunk heads with populated pages and no allocated
3743 * elements (those can be targeted by @c zone_gc()),
3744 * - @c z_pageq_partial has chunk heads with populated pages that are partially
3745 * used,
3746 * - @c z_pageq_full has chunk heads with populated pages with no free elements
3747 * left,
3748 * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
3749 * the zone forever, or the first secondary metadata for a chunk whose
3750 * corresponding page is not populated in the chunk.
3751 *
3752 * When new pages need to be wired/populated, chunks from the @c z_pageq_va
3753 * queues are preferred.
3754 *
3755 *
3756 * <h2>Asynchronous expansion</h2>
3757 *
3758 * This mechanism allows for refilling zones used mostly with non blocking
3759 * callers. It relies on a thread call (@c zone_expand_callout) which will
3760 * iterate all zones and refill the ones marked with @c z_async_refilling.
3761 *
3762 * NOTE: If the calling thread for zalloc_noblock is lower priority than
3763 * the thread_call, then zalloc_noblock to an empty zone may succeed.
3764 *
3765 *
3766 * <h2>Dealing with zone allocations from the mach VM code</h2>
3767 *
3768 * The implementation of the mach VM itself uses the zone allocator
3769 * for things like the vm_map_entry data structure. In order to prevent
3770 * a recursion problem when adding more pages to a zone, the VM zones
3771 * use the Z_SUBMAP_IDX_VM submap which doesn't use kmem_alloc()
3772 * or any VM map functions to allocate.
3773 *
3774 * Instead, a really simple coalescing first-fit allocator is used
3775 * for this submap, and no one else than zalloc can allocate from it.
3776 *
3777 * Memory is directly populated which doesn't require allocation of
3778 * VM map entries, and avoids recursion. The cost of this scheme however,
3779 * is that `vm_map_lookup_entry` will not function on those addresses
3780 * (nor any API relying on it).
3781 */
3782
3783 static thread_call_data_t zone_expand_callout;
3784
3785 __attribute__((overloadable))
3786 static inline bool
zone_submap_is_sequestered(zone_submap_idx_t idx)3787 zone_submap_is_sequestered(zone_submap_idx_t idx)
3788 {
3789 return idx != Z_SUBMAP_IDX_DATA;
3790 }
3791
3792 __attribute__((overloadable))
3793 static inline bool
zone_submap_is_sequestered(zone_security_flags_t zsflags)3794 zone_submap_is_sequestered(zone_security_flags_t zsflags)
3795 {
3796 return zone_submap_is_sequestered(zsflags.z_submap_idx);
3797 }
3798
3799 static inline kma_flags_t
zone_kma_flags(zone_t z,zone_security_flags_t zsflags,zalloc_flags_t flags)3800 zone_kma_flags(zone_t z, zone_security_flags_t zsflags, zalloc_flags_t flags)
3801 {
3802 kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
3803
3804 if (zsflags.z_noencrypt) {
3805 kmaflags |= KMA_NOENCRYPT;
3806 }
3807 if (flags & Z_NOPAGEWAIT) {
3808 kmaflags |= KMA_NOPAGEWAIT;
3809 }
3810 if (z->z_permanent || (!z->z_destructible &&
3811 zone_submap_is_sequestered(zsflags))) {
3812 kmaflags |= KMA_PERMANENT;
3813 }
3814 if (zsflags.z_submap_from_end) {
3815 kmaflags |= KMA_LAST_FREE;
3816 }
3817
3818 return kmaflags;
3819 }
3820
3821 static inline void
zone_add_wired_pages(uint32_t pages)3822 zone_add_wired_pages(uint32_t pages)
3823 {
3824 size_t count = os_atomic_add(&zone_pages_wired, pages, relaxed);
3825
3826 #if CONFIG_ZLEAKS
3827 if (__improbable(zleak_should_activate(count) &&
3828 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3829 thread_call_enter(&zone_leaks_callout);
3830 }
3831 #else
3832 (void)count;
3833 #endif
3834 }
3835
3836 static inline void
zone_remove_wired_pages(uint32_t pages)3837 zone_remove_wired_pages(uint32_t pages)
3838 {
3839 size_t count = os_atomic_sub(&zone_pages_wired, pages, relaxed);
3840
3841 #if CONFIG_ZLEAKS
3842 if (__improbable(zleak_should_deactivate(count) &&
3843 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3844 thread_call_enter(&zone_leaks_callout);
3845 }
3846 #else
3847 (void)count;
3848 #endif
3849 }
3850
3851 /*!
3852 * @function zcram_and_lock()
3853 *
3854 * @brief
3855 * Prepare some memory for being usable for allocation purposes.
3856 *
3857 * @discussion
3858 * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
3859 * to be usable in the zone.
3860 *
3861 * This function assumes the metadata is already populated for the range.
3862 *
3863 * Calling this function with @c pg_start being 0 means that the memory
3864 * is either a partial chunk, or a full chunk, that isn't published anywhere
3865 * and the initialization can happen without locks held.
3866 *
3867 * Calling this function with a non zero @c pg_start means that we are extending
3868 * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
3869 * is already usable and published in the zone, so extending it requires holding
3870 * the zone lock.
3871 *
3872 * @param zone The zone to cram new populated pages into
3873 * @param addr The base address for the chunk(s)
3874 * @param pg_va_new The number of virtual pages newly assigned to the zone
3875 * @param pg_start The first newly populated page relative to @a addr.
3876 * @param pg_end The after-last newly populated page relative to @a addr.
3877 * @param lock 0 or ZM_ALLOC_SIZE_LOCK (used by early crams)
3878 */
3879 static void
zcram_and_lock(zone_t zone,vm_offset_t addr,uint32_t pg_va_new,uint32_t pg_start,uint32_t pg_end,uint16_t lock)3880 zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
3881 uint32_t pg_start, uint32_t pg_end, uint16_t lock)
3882 {
3883 zone_id_t zindex = zone_index(zone);
3884 vm_offset_t elem_size = zone_elem_outer_size(zone);
3885 uint32_t free_start = 0, free_end = 0;
3886 uint32_t oob_offs = zone_elem_outer_offs(zone);
3887
3888 struct zone_page_metadata *meta = zone_meta_from_addr(addr);
3889 uint32_t chunk_pages = zone->z_chunk_pages;
3890 bool guarded = meta->zm_guarded;
3891
3892 assert(pg_start < pg_end && pg_end <= chunk_pages);
3893
3894 if (pg_start == 0) {
3895 uint16_t chunk_len = (uint16_t)pg_end;
3896 uint16_t secondary_len = ZM_SECONDARY_PAGE;
3897 bool inline_bitmap = false;
3898
3899 if (zone->z_percpu) {
3900 chunk_len = 1;
3901 secondary_len = ZM_SECONDARY_PCPU_PAGE;
3902 assert(pg_end == zpercpu_count());
3903 }
3904 if (!zone->z_permanent && !zone->z_uses_tags) {
3905 inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
3906 }
3907
3908 free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size;
3909
3910 meta[0] = (struct zone_page_metadata){
3911 .zm_index = zindex,
3912 .zm_guarded = guarded,
3913 .zm_inline_bitmap = inline_bitmap,
3914 .zm_chunk_len = chunk_len,
3915 .zm_alloc_size = lock,
3916 };
3917
3918 if (!zone->z_permanent && !inline_bitmap) {
3919 meta[0].zm_bitmap = zone_meta_bits_alloc_init(free_end,
3920 zone->z_chunk_elems, zone->z_uses_tags);
3921 }
3922
3923 for (uint16_t i = 1; i < chunk_pages; i++) {
3924 meta[i] = (struct zone_page_metadata){
3925 .zm_index = zindex,
3926 .zm_guarded = guarded,
3927 .zm_inline_bitmap = inline_bitmap,
3928 .zm_chunk_len = secondary_len,
3929 .zm_page_index = (uint8_t)i,
3930 .zm_bitmap = meta[0].zm_bitmap,
3931 .zm_subchunk_len = (uint8_t)(chunk_pages - i),
3932 };
3933 }
3934
3935 if (inline_bitmap) {
3936 zone_meta_bits_init_inline(meta, free_end);
3937 }
3938 } else {
3939 assert(!zone->z_percpu && !zone->z_permanent);
3940
3941 free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size;
3942 free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size;
3943 }
3944
3945 #if KASAN_CLASSIC
3946 assert(pg_start == 0); /* KASAN_CLASSIC never does partial chunks */
3947 if (zone->z_permanent) {
3948 kasan_poison_range(addr, ptoa(pg_end), ASAN_VALID);
3949 } else if (zone->z_percpu) {
3950 for (uint32_t i = 0; i < pg_end; i++) {
3951 kasan_zmem_add(addr + ptoa(i), PAGE_SIZE,
3952 zone_elem_outer_size(zone),
3953 zone_elem_outer_offs(zone),
3954 zone_elem_redzone(zone));
3955 }
3956 } else {
3957 kasan_zmem_add(addr, ptoa(pg_end),
3958 zone_elem_outer_size(zone),
3959 zone_elem_outer_offs(zone),
3960 zone_elem_redzone(zone));
3961 }
3962 #endif /* KASAN_CLASSIC */
3963
3964 /*
3965 * Insert the initialized pages / metadatas into the right lists.
3966 */
3967
3968 zone_lock(zone);
3969 assert(zone->z_self == zone);
3970
3971 if (pg_start != 0) {
3972 assert(meta->zm_chunk_len == pg_start);
3973
3974 zone_meta_bits_merge(meta, free_start, free_end);
3975 meta->zm_chunk_len = (uint16_t)pg_end;
3976
3977 /*
3978 * consume the zone_meta_lock_in_partial()
3979 * done in zone_expand_locked()
3980 */
3981 zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
3982 zone_meta_remqueue(zone, meta);
3983 }
3984
3985 if (zone->z_permanent || meta->zm_alloc_size) {
3986 zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
3987 } else {
3988 zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
3989 zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
3990 }
3991 if (pg_end < chunk_pages) {
3992 /* push any non populated residual VA on z_pageq_va */
3993 zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
3994 }
3995
3996 zone->z_elems_free += free_end - free_start;
3997 zone->z_elems_avail += free_end - free_start;
3998 zone->z_wired_cur += zone->z_percpu ? 1 : pg_end - pg_start;
3999 if (pg_va_new) {
4000 zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
4001 }
4002 if (zone->z_wired_hwm < zone->z_wired_cur) {
4003 zone->z_wired_hwm = zone->z_wired_cur;
4004 }
4005
4006 #if CONFIG_ZLEAKS
4007 if (__improbable(zleak_should_enable_for_zone(zone) &&
4008 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
4009 thread_call_enter(&zone_leaks_callout);
4010 }
4011 #endif /* CONFIG_ZLEAKS */
4012
4013 zone_add_wired_pages(pg_end - pg_start);
4014 }
4015
4016 static void
zcram(zone_t zone,vm_offset_t addr,uint32_t pages,uint16_t lock)4017 zcram(zone_t zone, vm_offset_t addr, uint32_t pages, uint16_t lock)
4018 {
4019 uint32_t chunk_pages = zone->z_chunk_pages;
4020
4021 assert(pages % chunk_pages == 0);
4022 for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
4023 zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, lock);
4024 zone_unlock(zone);
4025 }
4026 }
4027
4028 __startup_func
4029 void
zone_cram_early(zone_t zone,vm_offset_t newmem,vm_size_t size)4030 zone_cram_early(zone_t zone, vm_offset_t newmem, vm_size_t size)
4031 {
4032 uint32_t pages = (uint32_t)atop(size);
4033
4034 assert(from_zone_map(newmem, size));
4035 assert3u(size % ptoa(zone->z_chunk_pages), ==, 0);
4036 assert3u(startup_phase, <, STARTUP_SUB_ZALLOC);
4037
4038 /*
4039 * The early pages we move at the pmap layer can't be "depopulated"
4040 * because there's no vm_page_t for them.
4041 *
4042 * "Lock" them so that they never hit z_pageq_empty.
4043 */
4044 bzero((void *)newmem, size);
4045 zcram(zone, newmem, pages, ZM_ALLOC_SIZE_LOCK);
4046 }
4047
4048 /*!
4049 * @function zone_submap_alloc_sequestered_va
4050 *
4051 * @brief
4052 * Allocates VA without using vm_find_space().
4053 *
4054 * @discussion
4055 * Allocate VA quickly without using the slower vm_find_space() for cases
4056 * when the submaps are fully sequestered.
4057 *
4058 * The VM submap is used to implement the VM itself so it is always sequestered,
4059 * as it can't kmem_alloc which needs to always allocate vm entries.
4060 * However, it can use vm_map_enter() which tries to coalesce entries, which
4061 * always works, so the VM map only ever needs 2 entries (one for each end).
4062 *
4063 * The RO submap is similarly always sequestered if it exists (as a non
4064 * sequestered RO submap makes very little sense).
4065 *
4066 * The allocator is a very simple bump-allocator
4067 * that allocates from either end.
4068 */
4069 static kern_return_t
zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags,uint32_t pages,vm_offset_t * addrp)4070 zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags, uint32_t pages,
4071 vm_offset_t *addrp)
4072 {
4073 vm_size_t size = ptoa(pages);
4074 vm_map_t map = zone_submap(zsflags);
4075 vm_map_entry_t first, last;
4076 vm_map_offset_t addr;
4077
4078 vm_map_lock(map);
4079
4080 first = vm_map_first_entry(map);
4081 last = vm_map_last_entry(map);
4082
4083 if (first->vme_end + size > last->vme_start) {
4084 vm_map_unlock(map);
4085 return KERN_NO_SPACE;
4086 }
4087
4088 if (zsflags.z_submap_from_end) {
4089 last->vme_start -= size;
4090 addr = last->vme_start;
4091 VME_OFFSET_SET(last, addr);
4092 } else {
4093 addr = first->vme_end;
4094 first->vme_end += size;
4095 }
4096 map->size += size;
4097
4098 vm_map_unlock(map);
4099
4100 *addrp = addr;
4101 return KERN_SUCCESS;
4102 }
4103
4104 void
zone_fill_initially(zone_t zone,vm_size_t nelems)4105 zone_fill_initially(zone_t zone, vm_size_t nelems)
4106 {
4107 kma_flags_t kmaflags = KMA_NOFAIL | KMA_PERMANENT;
4108 kern_return_t kr;
4109 vm_offset_t addr;
4110 uint32_t pages;
4111 zone_security_flags_t zsflags = zone_security_config(zone);
4112
4113 assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
4114 assert(zone->z_elems_avail == 0);
4115
4116 kmaflags |= zone_kma_flags(zone, zsflags, Z_WAITOK);
4117 pages = zone_alloc_pages_for_nelems(zone, nelems);
4118 if (zone_submap_is_sequestered(zsflags)) {
4119 kr = zone_submap_alloc_sequestered_va(zsflags, pages, &addr);
4120 if (kr != KERN_SUCCESS) {
4121 panic("zone_submap_alloc_sequestered_va() "
4122 "of %u pages failed", pages);
4123 }
4124 kernel_memory_populate(addr, ptoa(pages),
4125 kmaflags, VM_KERN_MEMORY_ZONE);
4126 } else {
4127 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4128 kmem_alloc(zone_submap(zsflags), &addr, ptoa(pages),
4129 kmaflags, VM_KERN_MEMORY_ZONE);
4130 }
4131
4132 zone_meta_populate(addr, ptoa(pages));
4133 zcram(zone, addr, pages, 0);
4134 }
4135
4136 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4137 __attribute__((noinline))
4138 static void
zone_scramble_va_and_unlock(zone_t z,struct zone_page_metadata * meta,uint32_t runs,uint32_t pages,uint32_t chunk_pages,uint64_t guard_mask)4139 zone_scramble_va_and_unlock(
4140 zone_t z,
4141 struct zone_page_metadata *meta,
4142 uint32_t runs,
4143 uint32_t pages,
4144 uint32_t chunk_pages,
4145 uint64_t guard_mask)
4146 {
4147 struct zone_page_metadata *arr[ZONE_CHUNK_ALLOC_SIZE / 4096];
4148
4149 for (uint32_t run = 0, n = 0; run < runs; run++) {
4150 arr[run] = meta + n;
4151 n += chunk_pages + ((guard_mask >> run) & 1);
4152 }
4153
4154 /*
4155 * Fisher–Yates shuffle, for an array with indices [0, n)
4156 *
4157 * for i from n−1 downto 1 do
4158 * j ← random integer such that 0 ≤ j ≤ i
4159 * exchange a[j] and a[i]
4160 *
4161 * The point here is that early allocations aren't at a fixed
4162 * distance from each other.
4163 */
4164 for (uint32_t i = runs - 1; i > 0; i--) {
4165 uint32_t j = zalloc_random_uniform32(0, i + 1);
4166
4167 meta = arr[j];
4168 arr[j] = arr[i];
4169 arr[i] = meta;
4170 }
4171
4172 zone_lock(z);
4173
4174 for (uint32_t i = 0; i < runs; i++) {
4175 zone_meta_queue_push(z, &z->z_pageq_va, arr[i]);
4176 }
4177 z->z_va_cur += z->z_percpu ? runs : pages;
4178 }
4179
4180 static inline uint32_t
dist_u32(uint32_t a,uint32_t b)4181 dist_u32(uint32_t a, uint32_t b)
4182 {
4183 return a < b ? b - a : a - b;
4184 }
4185
4186 static uint64_t
zalloc_random_clear_n_bits(uint64_t mask,uint32_t pop,uint32_t n)4187 zalloc_random_clear_n_bits(uint64_t mask, uint32_t pop, uint32_t n)
4188 {
4189 for (; n-- > 0; pop--) {
4190 uint32_t bit = zalloc_random_uniform32(0, pop);
4191 uint64_t m = mask;
4192
4193 for (; bit; bit--) {
4194 m &= m - 1;
4195 }
4196
4197 mask ^= 1ull << __builtin_ctzll(m);
4198 }
4199
4200 return mask;
4201 }
4202
4203 /**
4204 * @function zalloc_random_bits
4205 *
4206 * @brief
4207 * Compute a random number with a specified number of bit set in a given width.
4208 *
4209 * @discussion
4210 * This function generates a "uniform" distribution of sets of bits set in
4211 * a given width, with typically less than width/4 calls to random.
4212 *
4213 * @param pop the target number of bits set.
4214 * @param width the number of bits in the random integer to generate.
4215 */
4216 static uint64_t
zalloc_random_bits(uint32_t pop,uint32_t width)4217 zalloc_random_bits(uint32_t pop, uint32_t width)
4218 {
4219 uint64_t w_mask = (1ull << width) - 1;
4220 uint64_t mask;
4221 uint32_t cur;
4222
4223 if (3 * width / 4 <= pop) {
4224 mask = w_mask;
4225 cur = width;
4226 } else if (pop <= width / 4) {
4227 mask = 0;
4228 cur = 0;
4229 } else {
4230 /*
4231 * Chosing a random number this way will overwhelmingly
4232 * contain `width` bits +/- a few.
4233 */
4234 mask = zalloc_random_mask64(width);
4235 cur = __builtin_popcountll(mask);
4236
4237 if (dist_u32(cur, pop) > dist_u32(width - cur, pop)) {
4238 /*
4239 * If the opposite mask has a closer popcount,
4240 * then start with that one as the seed.
4241 */
4242 cur = width - cur;
4243 mask ^= w_mask;
4244 }
4245 }
4246
4247 if (cur < pop) {
4248 /*
4249 * Setting `pop - cur` bits is really clearing that many from
4250 * the opposite mask.
4251 */
4252 mask ^= w_mask;
4253 mask = zalloc_random_clear_n_bits(mask, width - cur, pop - cur);
4254 mask ^= w_mask;
4255 } else if (pop < cur) {
4256 mask = zalloc_random_clear_n_bits(mask, cur, cur - pop);
4257 }
4258
4259 return mask;
4260 }
4261 #endif
4262
4263 static void
zone_allocate_va_locked(zone_t z,zalloc_flags_t flags)4264 zone_allocate_va_locked(zone_t z, zalloc_flags_t flags)
4265 {
4266 zone_security_flags_t zsflags = zone_security_config(z);
4267 struct zone_page_metadata *meta;
4268 kma_flags_t kmaflags = zone_kma_flags(z, zsflags, flags) | KMA_VAONLY;
4269 uint32_t chunk_pages = z->z_chunk_pages;
4270 uint32_t runs, pages, guards, rnum;
4271 uint64_t guard_mask = 0;
4272 bool lead_guard = false;
4273 kern_return_t kr;
4274 vm_offset_t addr;
4275
4276 zone_unlock(z);
4277
4278 /*
4279 * A lot of OOB exploitation techniques rely on precise placement
4280 * and interleaving of zone pages. The layout that is sought
4281 * by attackers will be C/P/T types, where:
4282 * - (C)ompromised is the type for which attackers have a bug,
4283 * - (P)adding is used to pad memory,
4284 * - (T)arget is the type that the attacker will attempt to corrupt
4285 * by exploiting (C).
4286 *
4287 * Note that in some cases C==T and P isn't needed.
4288 *
4289 * In order to make those placement games much harder,
4290 * we grow zones by random runs of memory, up to 256k.
4291 * This makes predicting the precise layout of the heap
4292 * quite more complicated.
4293 *
4294 * Note: this function makes a very heavy use of random,
4295 * however, it is mostly limited to sequestered zones,
4296 * and eventually the layout will be fixed,
4297 * and the usage of random vastly reduced.
4298 *
4299 * For non sequestered zones, there's a single call
4300 * to random in order to decide whether we want
4301 * a guard page or not.
4302 */
4303 pages = chunk_pages;
4304 guards = 0;
4305 runs = 1;
4306 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4307 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4308 pages = atop(ZONE_CHUNK_ALLOC_SIZE);
4309 runs = (pages + chunk_pages - 1) / chunk_pages;
4310 runs = zalloc_random_uniform32(1, runs + 1);
4311 pages = runs * chunk_pages;
4312 }
4313 static_assert(ZONE_CHUNK_ALLOC_SIZE / 4096 <= 64,
4314 "make sure that `runs` will never be larger than 64");
4315 #endif /* !ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4316
4317 /*
4318 * Zones that are suceptible to OOB (kalloc, ZC_PGZ_USE_GUARDS),
4319 * guards might be added after each chunk.
4320 *
4321 * Those guard pages are marked with the ZM_PGZ_GUARD
4322 * magical chunk len, and their zm_oob_offs field
4323 * is used to remember optional shift applied
4324 * to returned elements, in order to right-align-them
4325 * as much as possible.
4326 *
4327 * In an adversarial context, while guard pages
4328 * are extremely effective against linear overflow,
4329 * using a predictable density of guard pages feels like
4330 * a missed opportunity. Which is why we chose to insert
4331 * one guard page for about 32k of memory, and place it
4332 * randomly.
4333 */
4334 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4335 if (z->z_percpu) {
4336 /*
4337 * For per-cpu runs, have a 75% chance to have a guard.
4338 */
4339 rnum = zalloc_random_uniform32(0, 4 * 128);
4340 guards = rnum >= 128;
4341 } else if (!zsflags.z_pgz_use_guards && !z->z_pgz_use_guards) {
4342 vm_offset_t rest;
4343
4344 /*
4345 * For types that are less susceptible to have OOBs,
4346 * have a density of 1 guard every 64k, with a uniform
4347 * distribution.
4348 */
4349 rnum = zalloc_random_uniform32(0, ZONE_GUARD_SPARSE);
4350 guards = (uint32_t)ptoa(pages) / ZONE_GUARD_SPARSE;
4351 rest = (uint32_t)ptoa(pages) % ZONE_GUARD_SPARSE;
4352 guards += rnum < rest;
4353 } else if (ptoa(chunk_pages) >= ZONE_GUARD_DENSE) {
4354 /*
4355 * For chunks >= 32k, have a 75% chance of guard pages
4356 * between chunks.
4357 */
4358 rnum = zalloc_random_uniform32(65, 129);
4359 guards = runs * rnum / 128;
4360 } else {
4361 vm_offset_t rest;
4362
4363 /*
4364 * Otherwise, aim at 1 guard every 32k,
4365 * with a uniform distribution.
4366 */
4367 rnum = zalloc_random_uniform32(0, ZONE_GUARD_DENSE);
4368 guards = (uint32_t)ptoa(pages) / ZONE_GUARD_DENSE;
4369 rest = (uint32_t)ptoa(pages) % ZONE_GUARD_DENSE;
4370 guards += rnum < rest;
4371 }
4372 assert3u(guards, <=, runs);
4373
4374 guard_mask = 0;
4375
4376 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4377 uint32_t g = 0;
4378
4379 /*
4380 * Several exploitation strategies rely on a C/T (compromised
4381 * then target types) ordering of pages with a sub-page reach
4382 * from C into T.
4383 *
4384 * We want to reliably thwart such exploitations
4385 * and hence force a guard page between alternating
4386 * memory types.
4387 */
4388 guard_mask |= 1ull << (runs - 1);
4389 g++;
4390
4391 /*
4392 * While we randomize the chunks lengths, an attacker with
4393 * precise timing control can guess when overflows happen,
4394 * and "measure" the runs, which gives them an indication
4395 * of where the next run start offset is.
4396 *
4397 * In order to make this knowledge unusable, add a guard page
4398 * _before_ the new run with a 25% probability, regardless
4399 * of whether we had enough guard pages.
4400 */
4401 if ((rnum & 3) == 0) {
4402 lead_guard = true;
4403 g++;
4404 }
4405 if (guards > g) {
4406 guard_mask |= zalloc_random_bits(guards - g, runs - 1);
4407 } else {
4408 guards = g;
4409 }
4410 } else {
4411 assert3u(runs, ==, 1);
4412 assert3u(guards, <=, 1);
4413 guard_mask = guards << (runs - 1);
4414 }
4415 #else
4416 (void)rnum;
4417 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4418
4419 if (zone_submap_is_sequestered(zsflags)) {
4420 kr = zone_submap_alloc_sequestered_va(zsflags,
4421 pages + guards, &addr);
4422 } else {
4423 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4424 kr = kmem_alloc(zone_submap(zsflags), &addr,
4425 ptoa(pages + guards), kmaflags, VM_KERN_MEMORY_ZONE);
4426 }
4427
4428 if (kr != KERN_SUCCESS) {
4429 uint64_t zone_size = 0;
4430 zone_t zone_largest = zone_find_largest(&zone_size);
4431 panic("zalloc[%d]: zone map exhausted while allocating from zone [%s%s], "
4432 "likely due to memory leak in zone [%s%s] "
4433 "(%u%c, %d elements allocated)",
4434 kr, zone_heap_name(z), zone_name(z),
4435 zone_heap_name(zone_largest), zone_name(zone_largest),
4436 mach_vm_size_pretty(zone_size),
4437 mach_vm_size_unit(zone_size),
4438 zone_count_allocated(zone_largest));
4439 }
4440
4441 meta = zone_meta_from_addr(addr);
4442 zone_meta_populate(addr, ptoa(pages + guards));
4443
4444 /*
4445 * Handle the leading guard page if any
4446 */
4447 if (lead_guard) {
4448 meta[0].zm_index = zone_index(z);
4449 meta[0].zm_chunk_len = ZM_PGZ_GUARD;
4450 meta[0].zm_guarded = true;
4451 meta++;
4452 }
4453
4454 for (uint32_t run = 0, n = 0; run < runs; run++) {
4455 bool guarded = (guard_mask >> run) & 1;
4456
4457 for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4458 meta[n].zm_index = zone_index(z);
4459 meta[n].zm_guarded = guarded;
4460 }
4461 if (guarded) {
4462 meta[n].zm_index = zone_index(z);
4463 meta[n].zm_chunk_len = ZM_PGZ_GUARD;
4464 n++;
4465 }
4466 }
4467 if (guards) {
4468 os_atomic_add(&zone_guard_pages, guards, relaxed);
4469 }
4470
4471 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4472 if (__improbable(zone_caching_disabled < 0)) {
4473 return zone_scramble_va_and_unlock(z, meta, runs, pages,
4474 chunk_pages, guard_mask);
4475 }
4476 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4477
4478 zone_lock(z);
4479
4480 for (uint32_t run = 0, n = 0; run < runs; run++) {
4481 zone_meta_queue_push(z, &z->z_pageq_va, meta + n);
4482 n += chunk_pages + ((guard_mask >> run) & 1);
4483 }
4484 z->z_va_cur += z->z_percpu ? runs : pages;
4485 }
4486
4487 static bool
zone_expand_pred_nope(__unused zone_t z)4488 zone_expand_pred_nope(__unused zone_t z)
4489 {
4490 return false;
4491 }
4492
4493 static inline void
ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)4494 ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
4495 {
4496 #if DEBUG || DEVELOPMENT
4497 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START,
4498 size, 0, 0, 0);
4499 #else
4500 (void)size;
4501 #endif
4502 }
4503
4504 static inline void
ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)4505 ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
4506 {
4507 #if DEBUG || DEVELOPMENT
4508 task_t task = current_task_early();
4509 if (pages && task) {
4510 ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, pages);
4511 }
4512 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END,
4513 pages, 0, 0, 0);
4514 #else
4515 (void)pages;
4516 #endif
4517 }
4518
4519 __attribute__((noinline))
4520 static void
__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z,uint32_t pgs)4521 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z, uint32_t pgs)
4522 {
4523 uint64_t wait_start = 0;
4524 long mapped;
4525
4526 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4527
4528 if (zone_supports_vm(z) || (current_thread()->options & TH_OPT_VMPRIV)) {
4529 return;
4530 }
4531
4532 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4533
4534 /*
4535 * If the zone map is really exhausted, wait on the GC thread,
4536 * donating our priority (which is important because the GC
4537 * thread is at a rather low priority).
4538 */
4539 for (uint32_t n = 1; mapped >= zone_pages_wired_max - pgs; n++) {
4540 uint32_t wait_ms = n * (n + 1) / 2;
4541 uint64_t interval;
4542
4543 if (n == 1) {
4544 wait_start = mach_absolute_time();
4545 } else {
4546 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4547 }
4548 if (zone_exhausted_timeout > 0 &&
4549 wait_ms > zone_exhausted_timeout) {
4550 panic("zone map exhaustion: waited for %dms "
4551 "(pages: %ld, max: %ld, wanted: %d)",
4552 wait_ms, mapped, zone_pages_wired_max, pgs);
4553 }
4554
4555 clock_interval_to_absolutetime_interval(wait_ms, NSEC_PER_MSEC,
4556 &interval);
4557
4558 lck_spin_lock(&zone_exhausted_lock);
4559 lck_spin_sleep_with_inheritor(&zone_exhausted_lock,
4560 LCK_SLEEP_UNLOCK, &zone_pages_wired,
4561 vm_pageout_gc_thread, THREAD_UNINT, wait_start + interval);
4562
4563 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4564 }
4565 }
4566
4567 static bool
zone_expand_wait_for_pages(bool waited)4568 zone_expand_wait_for_pages(bool waited)
4569 {
4570 if (waited) {
4571 return false;
4572 }
4573 #if DEBUG || DEVELOPMENT
4574 if (zalloc_simulate_vm_pressure) {
4575 return false;
4576 }
4577 #endif /* DEBUG || DEVELOPMENT */
4578 return !vm_pool_low();
4579 }
4580
4581 static inline void
zone_expand_async_schedule_if_allowed(zone_t zone)4582 zone_expand_async_schedule_if_allowed(zone_t zone)
4583 {
4584 if (zone->z_async_refilling || zone->no_callout) {
4585 return;
4586 }
4587
4588 if (zone->exhaustible && zone->z_wired_cur >= zone->z_wired_max) {
4589 return;
4590 }
4591
4592 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
4593 return;
4594 }
4595
4596 if (!vm_pool_low() || zone_supports_vm(zone)) {
4597 zone->z_async_refilling = true;
4598 thread_call_enter(&zone_expand_callout);
4599 }
4600 }
4601
4602 static void
zone_expand_locked(zone_t z,zalloc_flags_t flags,bool (* pred)(zone_t))4603 zone_expand_locked(zone_t z, zalloc_flags_t flags, bool (*pred)(zone_t))
4604 {
4605 zone_security_flags_t zsflags = zone_security_config(z);
4606 struct zone_expand ze = {
4607 .ze_thread = current_thread(),
4608 };
4609
4610 if (!(ze.ze_thread->options & TH_OPT_VMPRIV) && zone_supports_vm(z)) {
4611 ze.ze_thread->options |= TH_OPT_VMPRIV;
4612 ze.ze_clear_priv = true;
4613 }
4614
4615 if (ze.ze_thread->options & TH_OPT_VMPRIV) {
4616 /*
4617 * When the thread is VM privileged,
4618 * vm_page_grab() will call VM_PAGE_WAIT()
4619 * without our knowledge, so we must assume
4620 * it's being called unfortunately.
4621 *
4622 * In practice it's not a big deal because
4623 * Z_NOPAGEWAIT is not really used on zones
4624 * that VM privileged threads are going to expand.
4625 */
4626 ze.ze_pg_wait = true;
4627 ze.ze_vm_priv = true;
4628 }
4629
4630 for (;;) {
4631 if (!pred) {
4632 /* NULL pred means "try just once" */
4633 pred = zone_expand_pred_nope;
4634 } else if (!pred(z)) {
4635 goto out;
4636 }
4637
4638 if (z->z_expander == NULL) {
4639 z->z_expander = &ze;
4640 break;
4641 }
4642
4643 if (ze.ze_vm_priv && !z->z_expander->ze_vm_priv) {
4644 change_sleep_inheritor(&z->z_expander, ze.ze_thread);
4645 ze.ze_next = z->z_expander;
4646 z->z_expander = &ze;
4647 break;
4648 }
4649
4650 if ((flags & Z_NOPAGEWAIT) && z->z_expander->ze_pg_wait) {
4651 goto out;
4652 }
4653
4654 z->z_expanding_wait = true;
4655 hw_lck_ticket_sleep_with_inheritor(&z->z_lock, &zone_locks_grp,
4656 LCK_SLEEP_DEFAULT, &z->z_expander, z->z_expander->ze_thread,
4657 TH_UNINT, TIMEOUT_WAIT_FOREVER);
4658 }
4659
4660 do {
4661 struct zone_page_metadata *meta = NULL;
4662 uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
4663 vm_page_t page_list = NULL;
4664 vm_offset_t addr = 0;
4665 int waited = 0;
4666
4667 /*
4668 * While we hold the zone lock, look if there's VA we can:
4669 * - complete from partial pages,
4670 * - reuse from the sequester list.
4671 *
4672 * When the page is being populated we pretend we allocated
4673 * an extra element so that zone_gc() can't attempt to free
4674 * the chunk (as it could become empty while we wait for pages).
4675 */
4676 if (zone_pva_is_null(z->z_pageq_va)) {
4677 zone_allocate_va_locked(z, flags);
4678 }
4679
4680 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
4681 addr = zone_meta_to_addr(meta);
4682 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
4683 cur_pages = meta->zm_page_index;
4684 meta -= cur_pages;
4685 addr -= ptoa(cur_pages);
4686 zone_meta_lock_in_partial(z, meta, cur_pages);
4687 }
4688 zone_unlock(z);
4689
4690 /*
4691 * And now allocate pages to populate our VA.
4692 */
4693 min_pages = z->z_chunk_pages;
4694 #if !KASAN_CLASSIC
4695 if (!z->z_percpu) {
4696 min_pages = (uint32_t)atop(round_page(zone_elem_outer_offs(z) +
4697 zone_elem_outer_size(z)));
4698 }
4699 #endif /* !KASAN_CLASSIC */
4700
4701 /*
4702 * Trigger jetsams via VM_PAGEOUT_GC_EVENT
4703 * if we're running out of zone memory
4704 */
4705 if (__improbable(zone_map_nearing_exhaustion())) {
4706 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(z, min_pages);
4707 }
4708
4709 ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
4710
4711 while (pages < z->z_chunk_pages - cur_pages) {
4712 vm_page_t m = vm_page_grab();
4713
4714 if (m) {
4715 pages++;
4716 m->vmp_snext = page_list;
4717 page_list = m;
4718 vm_page_zero_fill(m);
4719 continue;
4720 }
4721
4722 if (pages >= min_pages &&
4723 !zone_expand_wait_for_pages(waited)) {
4724 break;
4725 }
4726
4727 if ((flags & Z_NOPAGEWAIT) == 0) {
4728 /*
4729 * The first time we're about to wait for pages,
4730 * mention that to waiters and wake them all.
4731 *
4732 * Set `ze_pg_wait` in our zone_expand context
4733 * so that waiters who care do not wait again.
4734 */
4735 if (!ze.ze_pg_wait) {
4736 zone_lock(z);
4737 if (z->z_expanding_wait) {
4738 z->z_expanding_wait = false;
4739 wakeup_all_with_inheritor(&z->z_expander,
4740 THREAD_AWAKENED);
4741 }
4742 ze.ze_pg_wait = true;
4743 zone_unlock(z);
4744 }
4745
4746 waited++;
4747 VM_PAGE_WAIT();
4748 continue;
4749 }
4750
4751 /*
4752 * Undo everything and bail out:
4753 *
4754 * - free pages
4755 * - undo the fake allocation if any
4756 * - put the VA back on the VA page queue.
4757 */
4758 vm_page_free_list(page_list, FALSE);
4759 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4760
4761 zone_lock(z);
4762
4763 zone_expand_async_schedule_if_allowed(z);
4764
4765 if (cur_pages) {
4766 zone_meta_unlock_from_partial(z, meta, cur_pages);
4767 }
4768 if (meta) {
4769 zone_meta_queue_push(z, &z->z_pageq_va,
4770 meta + cur_pages);
4771 }
4772 goto page_shortage;
4773 }
4774
4775 vm_object_lock(kernel_object);
4776 kernel_memory_populate_object_and_unlock(kernel_object,
4777 addr + ptoa(cur_pages), addr + ptoa(cur_pages), ptoa(pages), page_list,
4778 zone_kma_flags(z, zsflags, flags), VM_KERN_MEMORY_ZONE,
4779 (zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY)
4780 ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE);
4781
4782 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4783
4784 zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages, 0);
4785
4786 if (z->z_wired_cur == z->z_wired_max) {
4787 zone_unlock(z);
4788 EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z);
4789 zone_lock(z);
4790 }
4791 } while (pred(z));
4792
4793 page_shortage:
4794 if (z->z_expander == &ze) {
4795 z->z_expander = ze.ze_next;
4796 } else {
4797 assert(z->z_expander->ze_next == &ze);
4798 z->z_expander->ze_next = NULL;
4799 }
4800 if (z->z_expanding_wait) {
4801 z->z_expanding_wait = false;
4802 wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
4803 }
4804 out:
4805 if (ze.ze_clear_priv) {
4806 ze.ze_thread->options &= ~TH_OPT_VMPRIV;
4807 }
4808 }
4809
4810 static bool
zalloc_needs_refill(zone_t zone)4811 zalloc_needs_refill(zone_t zone)
4812 {
4813 if (zone->z_elems_free > zone->z_elems_rsv) {
4814 return false;
4815 }
4816 if (zone->z_wired_cur < zone->z_wired_max) {
4817 return true;
4818 }
4819 return !zone->exhaustible;
4820 }
4821
4822 static void
zone_expand_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)4823 zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
4824 {
4825 zone_foreach(z) {
4826 if (z->no_callout) {
4827 /* z_async_refilling will never be set */
4828 continue;
4829 }
4830
4831 if (!z->z_async_refilling) {
4832 /*
4833 * avoid locking all zones, because the one(s)
4834 * we're looking for have been set _before_
4835 * thread_call_enter() was called, if we fail
4836 * to observe the bit, it means the thread-call
4837 * has been "dinged" again and we'll notice it then.
4838 */
4839 continue;
4840 }
4841
4842 zone_lock(z);
4843 if (z->z_self && z->z_async_refilling) {
4844 zone_expand_locked(z, Z_WAITOK, zalloc_needs_refill);
4845 /*
4846 * clearing _after_ we grow is important,
4847 * so that we avoid waking up the thread call
4848 * while we grow and cause to run a second time.
4849 */
4850 z->z_async_refilling = false;
4851 }
4852 zone_unlock(z);
4853 }
4854 }
4855
4856 #endif /* !ZALLOC_TEST */
4857 #pragma mark zone jetsam integration
4858 #if !ZALLOC_TEST
4859
4860 /*
4861 * We're being very conservative here and picking a value of 95%. We might need to lower this if
4862 * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
4863 */
4864 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
4865
4866 /*
4867 * Threshold above which largest zones should be included in the panic log
4868 */
4869 #define ZONE_MAP_EXHAUSTION_PRINT_PANIC 80
4870
4871 /*
4872 * Trigger zone-map-exhaustion jetsams if the zone map is X% full,
4873 * where X=zone_map_jetsam_limit.
4874 *
4875 * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
4876 */
4877 TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
4878 ZONE_MAP_JETSAM_LIMIT_DEFAULT);
4879
4880 kern_return_t
zone_map_jetsam_set_limit(uint32_t value)4881 zone_map_jetsam_set_limit(uint32_t value)
4882 {
4883 if (value <= 0 || value > 100) {
4884 return KERN_INVALID_VALUE;
4885 }
4886
4887 zone_map_jetsam_limit = value;
4888 os_atomic_store(&zone_pages_jetsam_threshold,
4889 zone_pages_wired_max * value / 100, relaxed);
4890 return KERN_SUCCESS;
4891 }
4892
4893 void
get_zone_map_size(uint64_t * current_size,uint64_t * capacity)4894 get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
4895 {
4896 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
4897 *current_size = ptoa_64(phys_pages);
4898 *capacity = ptoa_64(zone_pages_wired_max);
4899 }
4900
4901 void
get_largest_zone_info(char * zone_name,size_t zone_name_len,uint64_t * zone_size)4902 get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
4903 {
4904 zone_t largest_zone = zone_find_largest(zone_size);
4905
4906 /*
4907 * Append kalloc heap name to zone name (if zone is used by kalloc)
4908 */
4909 snprintf(zone_name, zone_name_len, "%s%s",
4910 zone_heap_name(largest_zone), largest_zone->z_name);
4911 }
4912
4913 static bool
zone_map_nearing_threshold(unsigned int threshold)4914 zone_map_nearing_threshold(unsigned int threshold)
4915 {
4916 uint64_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
4917 return phys_pages * 100 > zone_pages_wired_max * threshold;
4918 }
4919
4920 bool
zone_map_nearing_exhaustion(void)4921 zone_map_nearing_exhaustion(void)
4922 {
4923 vm_size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
4924
4925 return pages >= os_atomic_load(&zone_pages_jetsam_threshold, relaxed);
4926 }
4927
4928
4929 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
4930
4931 /*
4932 * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
4933 * to walk through the jetsam priority bands and kill processes.
4934 */
4935 static zone_t
kill_process_in_largest_zone(void)4936 kill_process_in_largest_zone(void)
4937 {
4938 pid_t pid = -1;
4939 uint64_t zone_size = 0;
4940 zone_t largest_zone = zone_find_largest(&zone_size);
4941
4942 printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
4943 ptoa_64(os_atomic_load(&zone_pages_wired, relaxed)),
4944 ptoa_64(zone_pages_wired_max),
4945 (uint64_t)zone_submaps_approx_size(),
4946 (uint64_t)mach_vm_range_size(&zone_info.zi_map_range),
4947 zone_map_jetsam_limit);
4948 printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
4949 largest_zone->z_name, (uintptr_t)zone_size);
4950
4951 /*
4952 * We want to make sure we don't call this function from userspace.
4953 * Or we could end up trying to synchronously kill the process
4954 * whose context we're in, causing the system to hang.
4955 */
4956 assert(current_task() == kernel_task);
4957
4958 /*
4959 * If vm_object_zone is the largest, check to see if the number of
4960 * elements in vm_map_entry_zone is comparable.
4961 *
4962 * If so, consider vm_map_entry_zone as the largest. This lets us target
4963 * a specific process to jetsam to quickly recover from the zone map
4964 * bloat.
4965 */
4966 if (largest_zone == vm_object_zone) {
4967 unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
4968 unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
4969 /* Is the VM map entries zone count >= 98% of the VM objects zone count? */
4970 if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
4971 largest_zone = vm_map_entry_zone;
4972 printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
4973 (uintptr_t)zone_size_wired(largest_zone));
4974 }
4975 }
4976
4977 /* TODO: Extend this to check for the largest process in other zones as well. */
4978 if (largest_zone == vm_map_entry_zone) {
4979 pid = find_largest_process_vm_map_entries();
4980 } else {
4981 printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
4982 "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
4983 largest_zone->z_name);
4984 }
4985 if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
4986 printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
4987 }
4988
4989 return largest_zone;
4990 }
4991
4992 #endif /* !ZALLOC_TEST */
4993 #pragma mark probabilistic gzalloc
4994 #if !ZALLOC_TEST
4995 #if CONFIG_PROB_GZALLOC
4996
4997 extern uint32_t random(void);
4998 struct pgz_backtrace {
4999 uint32_t pgz_depth;
5000 int32_t pgz_bt[MAX_ZTRACE_DEPTH];
5001 };
5002
5003 static int32_t PERCPU_DATA(pgz_sample_counter);
5004 static SECURITY_READ_ONLY_LATE(struct pgz_backtrace *) pgz_backtraces;
5005 static uint32_t pgz_uses; /* number of zones using PGZ */
5006 static int32_t pgz_slot_avail;
5007 #if OS_ATOMIC_HAS_LLSC
5008 struct zone_page_metadata *pgz_slot_head;
5009 #else
5010 static struct pgz_slot_head {
5011 uint32_t psh_count;
5012 uint32_t psh_slot;
5013 } pgz_slot_head;
5014 #endif
5015 struct zone_page_metadata *pgz_slot_tail;
5016 static SECURITY_READ_ONLY_LATE(vm_map_t) pgz_submap;
5017
5018 static struct zone_page_metadata *
pgz_meta(uint32_t index)5019 pgz_meta(uint32_t index)
5020 {
5021 return &zone_info.zi_pgz_meta[2 * index + 1];
5022 }
5023
5024 static struct pgz_backtrace *
pgz_bt(uint32_t slot,bool free)5025 pgz_bt(uint32_t slot, bool free)
5026 {
5027 return &pgz_backtraces[2 * slot + free];
5028 }
5029
5030 static void
pgz_backtrace(struct pgz_backtrace * bt,void * fp)5031 pgz_backtrace(struct pgz_backtrace *bt, void *fp)
5032 {
5033 struct backtrace_control ctl = {
5034 .btc_frame_addr = (uintptr_t)fp,
5035 };
5036
5037 bt->pgz_depth = (uint32_t)backtrace_packed(BTP_KERN_OFFSET_32,
5038 (uint8_t *)bt->pgz_bt, sizeof(bt->pgz_bt), &ctl, NULL) / 4;
5039 }
5040
5041 static uint32_t
pgz_slot(vm_offset_t addr)5042 pgz_slot(vm_offset_t addr)
5043 {
5044 return (uint32_t)((addr - zone_info.zi_pgz_range.min_address) >> (PAGE_SHIFT + 1));
5045 }
5046
5047 static vm_offset_t
pgz_addr(uint32_t slot)5048 pgz_addr(uint32_t slot)
5049 {
5050 return zone_info.zi_pgz_range.min_address + ptoa(2 * slot + 1);
5051 }
5052
5053 static bool
pgz_sample(vm_offset_t addr,vm_size_t esize)5054 pgz_sample(vm_offset_t addr, vm_size_t esize)
5055 {
5056 int32_t *counterp, cnt;
5057
5058 if (zone_addr_size_crosses_page(addr, esize)) {
5059 return false;
5060 }
5061
5062 /*
5063 * Note: accessing pgz_sample_counter is racy but this is
5064 * kind of acceptable given that this is not
5065 * a security load bearing feature.
5066 */
5067
5068 counterp = PERCPU_GET(pgz_sample_counter);
5069 cnt = *counterp;
5070 if (__probable(cnt > 0)) {
5071 *counterp = cnt - 1;
5072 return false;
5073 }
5074
5075 if (pgz_slot_avail <= 0) {
5076 return false;
5077 }
5078
5079 /*
5080 * zalloc_random_uniform() might block, so when preemption is disabled,
5081 * set the counter to `-1` which will cause the next allocation
5082 * that can block to generate a new random value.
5083 *
5084 * No allocation on this CPU will sample until then.
5085 */
5086 if (get_preemption_level()) {
5087 *counterp = -1;
5088 } else {
5089 *counterp = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5090 }
5091
5092 return cnt == 0;
5093 }
5094
5095 static inline bool
pgz_slot_alloc(uint32_t * slot)5096 pgz_slot_alloc(uint32_t *slot)
5097 {
5098 struct zone_page_metadata *m;
5099 uint32_t tries = 100;
5100
5101 disable_preemption();
5102
5103 #if OS_ATOMIC_USE_LLSC
5104 int32_t ov, nv;
5105 os_atomic_rmw_loop(&pgz_slot_avail, ov, nv, relaxed, {
5106 if (__improbable(ov <= 0)) {
5107 os_atomic_rmw_loop_give_up({
5108 enable_preemption();
5109 return false;
5110 });
5111 }
5112 nv = ov - 1;
5113 });
5114 #else
5115 if (__improbable(os_atomic_dec_orig(&pgz_slot_avail, relaxed) <= 0)) {
5116 os_atomic_inc(&pgz_slot_avail, relaxed);
5117 enable_preemption();
5118 return false;
5119 }
5120 #endif
5121
5122 again:
5123 if (__improbable(tries-- == 0)) {
5124 /*
5125 * Too much contention,
5126 * extremely unlikely but do not stay stuck.
5127 */
5128 os_atomic_inc(&pgz_slot_avail, relaxed);
5129 enable_preemption();
5130 return false;
5131 }
5132
5133 #if OS_ATOMIC_HAS_LLSC
5134 do {
5135 m = os_atomic_load_exclusive(&pgz_slot_head, dependency);
5136 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5137 /*
5138 * Either we are waiting for an enqueuer (unlikely)
5139 * or we are competing with another core and
5140 * are looking at a popped element.
5141 */
5142 os_atomic_clear_exclusive();
5143 goto again;
5144 }
5145 } while (!os_atomic_store_exclusive(&pgz_slot_head,
5146 m->zm_pgz_slot_next, relaxed));
5147 #else
5148 struct zone_page_metadata *base = zone_info.zi_pgz_meta;
5149 struct pgz_slot_head ov, nv;
5150 os_atomic_rmw_loop(&pgz_slot_head, ov, nv, dependency, {
5151 m = &base[ov.psh_slot * 2];
5152 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5153 /*
5154 * Either we are waiting for an enqueuer (unlikely)
5155 * or we are competing with another core and
5156 * are looking at a popped element.
5157 */
5158 os_atomic_rmw_loop_give_up(goto again);
5159 }
5160 nv.psh_count = ov.psh_count + 1;
5161 nv.psh_slot = (uint32_t)((m->zm_pgz_slot_next - base) / 2);
5162 });
5163 #endif
5164
5165 enable_preemption();
5166
5167 m->zm_pgz_slot_next = NULL;
5168 *slot = (uint32_t)((m - zone_info.zi_pgz_meta) / 2);
5169 return true;
5170 }
5171
5172 static inline bool
pgz_slot_free(uint32_t slot)5173 pgz_slot_free(uint32_t slot)
5174 {
5175 struct zone_page_metadata *m = &zone_info.zi_pgz_meta[2 * slot];
5176 struct zone_page_metadata *t;
5177
5178 disable_preemption();
5179 t = os_atomic_xchg(&pgz_slot_tail, m, relaxed);
5180 os_atomic_store(&t->zm_pgz_slot_next, m, release);
5181 os_atomic_inc(&pgz_slot_avail, relaxed);
5182 enable_preemption();
5183
5184 return true;
5185 }
5186
5187 /*!
5188 * @function pgz_protect()
5189 *
5190 * @brief
5191 * Try to protect an allocation with PGZ.
5192 *
5193 * @param zone The zone the allocation was made against.
5194 * @param addr An allocated element address to protect.
5195 * @param fp The caller frame pointer (for the backtrace).
5196 * @returns The new address for the element, or @c addr.
5197 */
5198 __attribute__((noinline))
5199 static vm_offset_t
pgz_protect(zone_t zone,vm_offset_t addr,void * fp)5200 pgz_protect(zone_t zone, vm_offset_t addr, void *fp)
5201 {
5202 kern_return_t kr;
5203 uint32_t slot;
5204
5205 if (!pgz_slot_alloc(&slot)) {
5206 return addr;
5207 }
5208
5209 /*
5210 * Try to double-map the page (may fail if Z_NOWAIT).
5211 * we will always find a PA because pgz_init() pre-expanded the pmap.
5212 */
5213 vm_offset_t new_addr = pgz_addr(slot);
5214 pmap_paddr_t pa = kvtophys(trunc_page(addr));
5215
5216 kr = pmap_enter_options_addr(kernel_pmap, new_addr, pa,
5217 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
5218 get_preemption_level() ? PMAP_OPTIONS_NOWAIT : 0, NULL);
5219
5220 if (__improbable(kr != KERN_SUCCESS)) {
5221 pgz_slot_free(slot);
5222 return addr;
5223 }
5224
5225 struct zone_page_metadata tmp = {
5226 .zm_chunk_len = ZM_PGZ_ALLOCATED,
5227 .zm_index = zone_index(zone),
5228 };
5229 struct zone_page_metadata *meta = pgz_meta(slot);
5230
5231 os_atomic_store(&meta->zm_bits, tmp.zm_bits, relaxed);
5232 os_atomic_store(&meta->zm_pgz_orig_addr, addr, relaxed);
5233 pgz_backtrace(pgz_bt(slot, false), fp);
5234
5235 return new_addr + (addr & PAGE_MASK);
5236 }
5237
5238 /*!
5239 * @function pgz_unprotect()
5240 *
5241 * @brief
5242 * Release a PGZ slot and returns the original address of a freed element.
5243 *
5244 * @param addr A PGZ protected element address.
5245 * @param fp The caller frame pointer (for the backtrace).
5246 * @returns The non protected address for the element
5247 * that was passed to @c pgz_protect().
5248 */
5249 __attribute__((noinline))
5250 static vm_offset_t
pgz_unprotect(vm_offset_t addr,void * fp)5251 pgz_unprotect(vm_offset_t addr, void *fp)
5252 {
5253 struct zone_page_metadata *meta;
5254 struct zone_page_metadata tmp;
5255 uint32_t slot;
5256
5257 slot = pgz_slot(addr);
5258 meta = zone_meta_from_addr(addr);
5259 tmp = *meta;
5260 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5261 goto double_free;
5262 }
5263
5264 pmap_remove(kernel_pmap, trunc_page(addr), trunc_page(addr) + PAGE_SIZE);
5265
5266 pgz_backtrace(pgz_bt(slot, true), fp);
5267
5268 tmp.zm_chunk_len = ZM_PGZ_FREE;
5269 tmp.zm_bits = os_atomic_xchg(&meta->zm_bits, tmp.zm_bits, relaxed);
5270 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5271 goto double_free;
5272 }
5273
5274 pgz_slot_free(slot);
5275 return tmp.zm_pgz_orig_addr;
5276
5277 double_free:
5278 panic_fault_address = addr;
5279 meta->zm_chunk_len = ZM_PGZ_DOUBLE_FREE;
5280 panic("probabilistic gzalloc double free: %p", (void *)addr);
5281 }
5282
5283 bool
pgz_owned(mach_vm_address_t addr)5284 pgz_owned(mach_vm_address_t addr)
5285 {
5286 #if CONFIG_KERNEL_TBI
5287 addr = VM_KERNEL_TBI_FILL(addr);
5288 #endif /* CONFIG_KERNEL_TBI */
5289
5290 return mach_vm_range_contains(&zone_info.zi_pgz_range, addr);
5291 }
5292
5293
5294 __attribute__((always_inline))
5295 vm_offset_t
__pgz_decode(mach_vm_address_t addr,mach_vm_size_t size)5296 __pgz_decode(mach_vm_address_t addr, mach_vm_size_t size)
5297 {
5298 struct zone_page_metadata *meta;
5299
5300 if (__probable(!pgz_owned(addr))) {
5301 return (vm_offset_t)addr;
5302 }
5303
5304 if (zone_addr_size_crosses_page(addr, size)) {
5305 panic("invalid size for PGZ protected address %p:%p",
5306 (void *)addr, (void *)(addr + size));
5307 }
5308
5309 meta = zone_meta_from_addr((vm_offset_t)addr);
5310 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5311 panic_fault_address = (vm_offset_t)addr;
5312 panic("probabilistic gzalloc use-after-free: %p", (void *)addr);
5313 }
5314
5315 return trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5316 }
5317
5318 __attribute__((always_inline))
5319 vm_offset_t
__pgz_decode_allow_invalid(vm_offset_t addr,zone_id_t zid)5320 __pgz_decode_allow_invalid(vm_offset_t addr, zone_id_t zid)
5321 {
5322 struct zone_page_metadata *meta;
5323 struct zone_page_metadata tmp;
5324
5325 if (__probable(!pgz_owned(addr))) {
5326 return addr;
5327 }
5328
5329 meta = zone_meta_from_addr(addr);
5330 tmp.zm_bits = os_atomic_load(&meta->zm_bits, relaxed);
5331
5332 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5333
5334 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5335 return 0;
5336 }
5337
5338 if (zid != ZONE_ID_ANY && tmp.zm_index != zid) {
5339 return 0;
5340 }
5341
5342 return addr;
5343 }
5344
5345 static void
pgz_zone_init(zone_t z)5346 pgz_zone_init(zone_t z)
5347 {
5348 char zn[MAX_ZONE_NAME];
5349 char zv[MAX_ZONE_NAME];
5350 char key[30];
5351
5352 if (zone_elem_inner_size(z) > PAGE_SIZE) {
5353 return;
5354 }
5355
5356 if (pgz_all) {
5357 os_atomic_inc(&pgz_uses, relaxed);
5358 z->z_pgz_tracked = true;
5359 return;
5360 }
5361
5362 snprintf(zn, sizeof(zn), "%s%s", zone_heap_name(z), zone_name(z));
5363
5364 for (int i = 1;; i++) {
5365 snprintf(key, sizeof(key), "pgz%d", i);
5366 if (!PE_parse_boot_argn(key, zv, sizeof(zv))) {
5367 break;
5368 }
5369 if (track_this_zone(zn, zv) || track_kalloc_zones(z, zv)) {
5370 os_atomic_inc(&pgz_uses, relaxed);
5371 z->z_pgz_tracked = true;
5372 break;
5373 }
5374 }
5375 }
5376
5377 __startup_func
5378 static vm_size_t
pgz_get_size(void)5379 pgz_get_size(void)
5380 {
5381 if (pgz_slots == UINT32_MAX) {
5382 /*
5383 * Scale with RAM size: ~200 slots a G
5384 */
5385 pgz_slots = (uint32_t)(sane_size >> 22);
5386 }
5387
5388 /*
5389 * Make sure that the slot allocation scheme works.
5390 * see pgz_slot_alloc() / pgz_slot_free();
5391 */
5392 if (pgz_slots < zpercpu_count() * 4) {
5393 pgz_slots = zpercpu_count() * 4;
5394 }
5395 if (pgz_slots >= UINT16_MAX) {
5396 pgz_slots = UINT16_MAX - 1;
5397 }
5398
5399 /*
5400 * Quarantine is 33% of slots by default, no more than 90%.
5401 */
5402 if (pgz_quarantine == 0) {
5403 pgz_quarantine = pgz_slots / 3;
5404 }
5405 if (pgz_quarantine > pgz_slots * 9 / 10) {
5406 pgz_quarantine = pgz_slots * 9 / 10;
5407 }
5408 pgz_slot_avail = pgz_slots - pgz_quarantine;
5409
5410 return ptoa(2 * pgz_slots + 1);
5411 }
5412
5413 __startup_func
5414 static void
pgz_init(void)5415 pgz_init(void)
5416 {
5417 if (!pgz_uses) {
5418 return;
5419 }
5420
5421 if (pgz_sample_rate == 0) {
5422 /*
5423 * If no rate was provided, pick a random one that scales
5424 * with the number of protected zones.
5425 *
5426 * Use a binomal distribution to avoid having too many
5427 * really fast sample rates.
5428 */
5429 uint32_t factor = MIN(pgz_uses, 10);
5430 uint32_t max_rate = 1000 * factor;
5431 uint32_t min_rate = 100 * factor;
5432
5433 pgz_sample_rate = (zalloc_random_uniform32(min_rate, max_rate) +
5434 zalloc_random_uniform32(min_rate, max_rate)) / 2;
5435 }
5436
5437 struct mach_vm_range *r = &zone_info.zi_pgz_range;
5438 zone_info.zi_pgz_meta = zone_meta_from_addr(r->min_address);
5439 zone_meta_populate(r->min_address, mach_vm_range_size(r));
5440
5441 for (size_t i = 0; i < 2 * pgz_slots + 1; i += 2) {
5442 zone_info.zi_pgz_meta[i].zm_chunk_len = ZM_PGZ_GUARD;
5443 }
5444
5445 for (size_t i = 1; i < pgz_slots; i++) {
5446 zone_info.zi_pgz_meta[2 * i - 1].zm_pgz_slot_next =
5447 &zone_info.zi_pgz_meta[2 * i + 1];
5448 }
5449 #if OS_ATOMIC_HAS_LLSC
5450 pgz_slot_head = &zone_info.zi_pgz_meta[1];
5451 #endif
5452 pgz_slot_tail = &zone_info.zi_pgz_meta[2 * pgz_slots - 1];
5453
5454 pgz_backtraces = zalloc_permanent(sizeof(struct pgz_backtrace) *
5455 2 * pgz_slots, ZALIGN_PTR);
5456
5457 /*
5458 * expand the pmap so that pmap_enter_options_addr()
5459 * in pgz_protect() never need to call pmap_expand().
5460 */
5461 for (uint32_t slot = 0; slot < pgz_slots; slot++) {
5462 (void)pmap_enter_options_addr(kernel_pmap, pgz_addr(slot), 0,
5463 VM_PROT_NONE, VM_PROT_NONE, 0, FALSE,
5464 PMAP_OPTIONS_NOENTER, NULL);
5465 }
5466
5467 /* do this last as this will enable pgz */
5468 percpu_foreach(counter, pgz_sample_counter) {
5469 *counter = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5470 }
5471 }
5472 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, pgz_init);
5473
5474 static void
panic_display_pgz_bt(bool has_syms,uint32_t slot,bool free)5475 panic_display_pgz_bt(bool has_syms, uint32_t slot, bool free)
5476 {
5477 struct pgz_backtrace *bt = pgz_bt(slot, free);
5478 const char *what = free ? "Free" : "Allocation";
5479 uintptr_t buf[MAX_ZTRACE_DEPTH];
5480
5481 if (!ml_validate_nofault((vm_offset_t)bt, sizeof(*bt))) {
5482 paniclog_append_noflush(" Can't decode %s Backtrace\n", what);
5483 return;
5484 }
5485
5486 backtrace_unpack(BTP_KERN_OFFSET_32, buf, MAX_ZTRACE_DEPTH,
5487 (uint8_t *)bt->pgz_bt, 4 * bt->pgz_depth);
5488
5489 paniclog_append_noflush(" %s Backtrace:\n", what);
5490 for (uint32_t i = 0; i < bt->pgz_depth && i < MAX_ZTRACE_DEPTH; i++) {
5491 if (has_syms) {
5492 paniclog_append_noflush(" %p ", (void *)buf[i]);
5493 panic_print_symbol_name(buf[i]);
5494 paniclog_append_noflush("\n");
5495 } else {
5496 paniclog_append_noflush(" %p\n", (void *)buf[i]);
5497 }
5498 }
5499 kmod_panic_dump((vm_offset_t *)buf, bt->pgz_depth);
5500 }
5501
5502 static void
panic_display_pgz_uaf_info(bool has_syms,vm_offset_t addr)5503 panic_display_pgz_uaf_info(bool has_syms, vm_offset_t addr)
5504 {
5505 struct zone_page_metadata *meta;
5506 vm_offset_t elem, esize;
5507 const char *type;
5508 const char *prob;
5509 uint32_t slot;
5510 zone_t z;
5511
5512 slot = pgz_slot(addr);
5513 meta = pgz_meta(slot);
5514 elem = pgz_addr(slot) + (meta->zm_pgz_orig_addr & PAGE_MASK);
5515
5516 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
5517
5518 if (ml_validate_nofault((vm_offset_t)meta, sizeof(*meta)) &&
5519 meta->zm_index &&
5520 meta->zm_index < os_atomic_load(&num_zones, relaxed)) {
5521 z = &zone_array[meta->zm_index];
5522 } else {
5523 paniclog_append_noflush(" Zone : <unknown>\n");
5524 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5525 paniclog_append_noflush("\n");
5526 return;
5527 }
5528
5529 esize = zone_elem_inner_size(z);
5530 paniclog_append_noflush(" Zone : %s%s\n",
5531 zone_heap_name(z), zone_name(z));
5532 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5533 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
5534 (void *)elem, (void *)(elem + esize), (uint32_t)esize);
5535
5536 if (addr < elem) {
5537 type = "out-of-bounds(underflow) + use-after-free";
5538 prob = "low";
5539 } else if (meta->zm_chunk_len == ZM_PGZ_DOUBLE_FREE) {
5540 type = "double-free";
5541 prob = "high";
5542 } else if (addr < elem + esize) {
5543 type = "use-after-free";
5544 prob = "high";
5545 } else if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5546 type = "out-of-bounds + use-after-free";
5547 prob = "low";
5548 } else {
5549 type = "out-of-bounds";
5550 prob = "high";
5551 }
5552 paniclog_append_noflush(" Kind : %s (%s confidence)\n",
5553 type, prob);
5554 if (addr < elem) {
5555 paniclog_append_noflush(" Access : %d byte(s) before\n",
5556 (uint32_t)(elem - addr) + 1);
5557 } else if (addr < elem + esize) {
5558 paniclog_append_noflush(" Access : %d byte(s) inside\n",
5559 (uint32_t)(addr - elem) + 1);
5560 } else {
5561 paniclog_append_noflush(" Access : %d byte(s) past\n",
5562 (uint32_t)(addr - (elem + esize)) + 1);
5563 }
5564
5565 panic_display_pgz_bt(has_syms, slot, false);
5566 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5567 panic_display_pgz_bt(has_syms, slot, true);
5568 }
5569
5570 paniclog_append_noflush("\n");
5571 }
5572
5573 #endif /* CONFIG_PROB_GZALLOC */
5574 #endif /* !ZALLOC_TEST */
5575 #pragma mark zfree
5576 #if !ZALLOC_TEST
5577
5578 /*!
5579 * @defgroup zfree
5580 * @{
5581 *
5582 * @brief
5583 * The codepath for zone frees.
5584 *
5585 * @discussion
5586 * There are 4 major ways to allocate memory that end up in the zone allocator:
5587 * - @c zfree()
5588 * - @c zfree_percpu()
5589 * - @c kfree*()
5590 * - @c zfree_permanent()
5591 *
5592 * While permanent zones have their own allocation scheme, all other codepaths
5593 * will eventually go through the @c zfree_ext() choking point.
5594 */
5595
5596 __header_always_inline void
zfree_drop(zone_t zone,vm_offset_t addr)5597 zfree_drop(zone_t zone, vm_offset_t addr)
5598 {
5599 vm_offset_t esize = zone_elem_outer_size(zone);
5600 struct zone_page_metadata *meta;
5601 vm_offset_t eidx;
5602
5603 meta = zone_element_resolve(zone, addr, &eidx);
5604
5605 if (!zone_meta_mark_free(meta, eidx)) {
5606 zone_meta_double_free_panic(zone, addr, __func__);
5607 }
5608
5609 vm_offset_t old_size = meta->zm_alloc_size;
5610 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5611 vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
5612
5613 if (new_size == 0) {
5614 /* whether the page was on the intermediate or all_used, queue, move it to free */
5615 zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
5616 zone->z_wired_empty += meta->zm_chunk_len;
5617 } else if (old_size + esize > max_size) {
5618 /* first free element on page, move from all_used */
5619 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5620 }
5621 }
5622
5623 __attribute__((noinline))
5624 static void
zfree_item(zone_t zone,vm_offset_t addr)5625 zfree_item(zone_t zone, vm_offset_t addr)
5626 {
5627 /* transfer preemption count to lock */
5628 zone_lock_nopreempt_check_contention(zone);
5629
5630 zfree_drop(zone, addr);
5631 zone->z_elems_free += 1;
5632
5633 zone_unlock(zone);
5634 }
5635
5636 static void
zfree_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache)5637 zfree_cached_depot_recirculate(
5638 zone_t zone,
5639 uint32_t depot_max,
5640 zone_cache_t cache)
5641 {
5642 smr_t smr = zone_cache_smr(cache);
5643 smr_seq_t seq;
5644 uint32_t n;
5645
5646 zone_recirc_lock_nopreempt_check_contention(zone);
5647
5648 n = cache->zc_depot.zd_full;
5649 if (n >= depot_max) {
5650 /*
5651 * If SMR is in use, rotate the entire chunk of magazines.
5652 *
5653 * If the head of the recirculation layer is ready to be
5654 * reused, pull them back to refill a little.
5655 */
5656 seq = zone_depot_move_full(&zone->z_recirc,
5657 &cache->zc_depot, smr ? n : n - depot_max / 2, NULL);
5658
5659 if (smr) {
5660 smr_deferred_advance_commit(smr, seq);
5661 if (depot_max > 1 && zone_depot_poll(&zone->z_recirc, smr)) {
5662 zone_depot_move_full(&cache->zc_depot,
5663 &zone->z_recirc, depot_max / 2, NULL);
5664 }
5665 }
5666 }
5667
5668 n = depot_max - cache->zc_depot.zd_full;
5669 if (n > zone->z_recirc.zd_empty) {
5670 n = zone->z_recirc.zd_empty;
5671 }
5672 if (n) {
5673 zone_depot_move_empty(&cache->zc_depot, &zone->z_recirc,
5674 n, zone);
5675 }
5676
5677 zone_recirc_unlock_nopreempt(zone);
5678 }
5679
5680 static zone_cache_t
zfree_cached_recirculate(zone_t zone,zone_cache_t cache)5681 zfree_cached_recirculate(zone_t zone, zone_cache_t cache)
5682 {
5683 zone_magazine_t mag = NULL, tmp = NULL;
5684 smr_t smr = zone_cache_smr(cache);
5685
5686 if (zone->z_recirc.zd_empty == 0) {
5687 mag = zone_magazine_alloc(Z_NOWAIT);
5688 }
5689
5690 zone_recirc_lock_nopreempt_check_contention(zone);
5691
5692 if (mag == NULL && zone->z_recirc.zd_empty) {
5693 mag = zone_depot_pop_head_empty(&zone->z_recirc, zone);
5694 __builtin_assume(mag);
5695 }
5696 if (mag) {
5697 tmp = zone_magazine_replace(cache, mag, true);
5698 if (smr) {
5699 smr_deferred_advance_commit(smr, tmp->zm_seq);
5700 }
5701 if (zone_security_array[zone_index(zone)].z_lifo) {
5702 zone_depot_insert_head_full(&zone->z_recirc, tmp);
5703 } else {
5704 zone_depot_insert_tail_full(&zone->z_recirc, tmp);
5705 }
5706 }
5707
5708 zone_recirc_unlock_nopreempt(zone);
5709
5710 return mag ? cache : NULL;
5711 }
5712
5713 __attribute__((noinline))
5714 static zone_cache_t
zfree_cached_trim(zone_t zone,zone_cache_t cache)5715 zfree_cached_trim(zone_t zone, zone_cache_t cache)
5716 {
5717 zone_magazine_t mag = NULL, tmp = NULL;
5718 uint32_t depot_max;
5719
5720 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
5721 if (depot_max) {
5722 zone_depot_lock_nopreempt(cache);
5723
5724 if (cache->zc_depot.zd_empty == 0) {
5725 zfree_cached_depot_recirculate(zone, depot_max, cache);
5726 }
5727
5728 if (__probable(cache->zc_depot.zd_empty)) {
5729 mag = zone_depot_pop_head_empty(&cache->zc_depot, NULL);
5730 __builtin_assume(mag);
5731 } else {
5732 mag = zone_magazine_alloc(Z_NOWAIT);
5733 }
5734 if (mag) {
5735 tmp = zone_magazine_replace(cache, mag, true);
5736 zone_depot_insert_tail_full(&cache->zc_depot, tmp);
5737 }
5738 zone_depot_unlock_nopreempt(cache);
5739
5740 return mag ? cache : NULL;
5741 }
5742
5743 return zfree_cached_recirculate(zone, cache);
5744 }
5745
5746 __attribute__((always_inline))
5747 static inline zone_cache_t
zfree_cached_get_pcpu_cache(zone_t zone,int cpu)5748 zfree_cached_get_pcpu_cache(zone_t zone, int cpu)
5749 {
5750 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5751
5752 if (__probable(cache->zc_free_cur < zc_mag_size())) {
5753 return cache;
5754 }
5755
5756 if (__probable(cache->zc_alloc_cur < zc_mag_size())) {
5757 zone_cache_swap_magazines(cache);
5758 return cache;
5759 }
5760
5761 return zfree_cached_trim(zone, cache);
5762 }
5763
5764 __attribute__((always_inline))
5765 static inline zone_cache_t
zfree_cached_get_pcpu_cache_smr(zone_t zone,int cpu)5766 zfree_cached_get_pcpu_cache_smr(zone_t zone, int cpu)
5767 {
5768 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5769 size_t idx = cache->zc_free_cur;
5770
5771 if (__probable(idx + 1 < zc_mag_size())) {
5772 return cache;
5773 }
5774
5775 /*
5776 * when SMR is in use, the bucket is tagged early with
5777 * @c smr_deferred_advance(), which costs a full barrier,
5778 * but performs no store.
5779 *
5780 * When zones hit the recirculation layer, the advance is commited,
5781 * under the recirculation lock (see zfree_cached_recirculate()).
5782 *
5783 * When done this way, the zone contention detection mechanism
5784 * will adjust the size of the per-cpu depots gracefully, which
5785 * mechanically reduces the pace of these commits as usage increases.
5786 */
5787
5788 if (__probable(idx + 1 == zc_mag_size())) {
5789 zone_magazine_t mag;
5790
5791 mag = (zone_magazine_t)((uintptr_t)cache->zc_free_elems -
5792 offsetof(struct zone_magazine, zm_elems));
5793 mag->zm_seq = smr_deferred_advance(zone_cache_smr(cache));
5794 return cache;
5795 }
5796
5797 return zfree_cached_trim(zone, cache);
5798 }
5799
5800 __attribute__((always_inline))
5801 static inline vm_offset_t
__zcache_mark_invalid(zone_t zone,vm_offset_t elem,uint64_t combined_size)5802 __zcache_mark_invalid(zone_t zone, vm_offset_t elem, uint64_t combined_size)
5803 {
5804 struct zone_page_metadata *meta;
5805 vm_offset_t offs;
5806
5807 #pragma unused(combined_size)
5808 #if CONFIG_PROB_GZALLOC
5809 if (__improbable(pgz_owned(elem))) {
5810 elem = pgz_unprotect(elem, __builtin_frame_address(0));
5811 }
5812 #endif /* CONFIG_PROB_GZALLOC */
5813
5814 meta = zone_meta_from_addr(elem);
5815 if (!from_zone_map(elem, 1) || !zone_has_index(zone, meta->zm_index)) {
5816 zone_invalid_element_panic(zone, elem);
5817 }
5818
5819 offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
5820 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
5821 offs += ptoa(meta->zm_page_index);
5822 }
5823
5824 if (!Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
5825 zone_invalid_element_panic(zone, elem);
5826 }
5827
5828 #if VM_TAG_SIZECLASSES
5829 if (__improbable(zone->z_uses_tags)) {
5830 vm_tag_t *slot;
5831
5832 slot = zba_extra_ref_ptr(meta->zm_bitmap,
5833 Z_FAST_QUO(offs, zone->z_quo_magic));
5834 vm_tag_update_zone_size(*slot, zone->z_tags_sizeclass,
5835 -(long)ZFREE_ELEM_SIZE(combined_size));
5836 *slot = VM_KERN_MEMORY_NONE;
5837 }
5838 #endif /* VM_TAG_SIZECLASSES */
5839
5840 #if KASAN_CLASSIC
5841 kasan_free(elem, ZFREE_ELEM_SIZE(combined_size),
5842 ZFREE_USER_SIZE(combined_size), zone_elem_redzone(zone),
5843 zone->z_percpu, __builtin_frame_address(0));
5844 #endif
5845 #if KASAN_TBI
5846 elem = kasan_tbi_tag_zfree(elem, ZFREE_ELEM_SIZE(combined_size),
5847 zone->z_percpu);
5848 #endif
5849
5850 return elem;
5851 }
5852
5853 __attribute__((always_inline))
vm_offset_t(zcache_mark_invalid)5854 vm_offset_t
5855 (zcache_mark_invalid)(zone_t zone, vm_offset_t elem)
5856 {
5857 vm_size_t esize = zone_elem_inner_offs(zone);
5858
5859 ZFREE_LOG(zone, elem, 1);
5860 return __zcache_mark_invalid(zone, elem, ZFREE_PACK_SIZE(esize, esize));
5861 }
5862
5863 /*
5864 * The function is noinline when zlog can be used so that the backtracing can
5865 * reliably skip the zfree_ext() and zfree_log()
5866 * boring frames.
5867 */
5868 #if ZALLOC_ENABLE_LOGGING
5869 __attribute__((noinline))
5870 #endif /* ZALLOC_ENABLE_LOGGING */
5871 void
zfree_ext(zone_t zone,zone_stats_t zstats,void * addr,uint64_t combined_size)5872 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr, uint64_t combined_size)
5873 {
5874 vm_offset_t esize = ZFREE_ELEM_SIZE(combined_size);
5875 vm_offset_t elem = (vm_offset_t)addr;
5876 int cpu;
5877
5878 DTRACE_VM2(zfree, zone_t, zone, void*, elem);
5879
5880 ZFREE_LOG(zone, elem, 1);
5881 elem = __zcache_mark_invalid(zone, elem, combined_size);
5882
5883 disable_preemption();
5884 cpu = cpu_number();
5885 zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += esize;
5886
5887 #if KASAN_CLASSIC
5888 if (zone->z_kasan_quarantine && startup_phase >= STARTUP_SUB_ZALLOC) {
5889 struct kasan_quarantine_result kqr;
5890
5891 kqr = kasan_quarantine(elem, esize);
5892 elem = kqr.addr;
5893 zone = kqr.zone;
5894 if (elem == 0) {
5895 return enable_preemption();
5896 }
5897 }
5898 #endif
5899
5900 if (zone->z_pcpu_cache) {
5901 zone_cache_t cache = zfree_cached_get_pcpu_cache(zone, cpu);
5902
5903 if (__probable(cache)) {
5904 cache->zc_free_elems[cache->zc_free_cur++] = elem;
5905 return enable_preemption();
5906 }
5907 }
5908
5909 return zfree_item(zone, elem);
5910 }
5911
5912 __attribute__((always_inline))
5913 static inline zstack_t
zcache_free_stack_to_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,vm_size_t esize,zone_cache_ops_t ops,bool zero)5914 zcache_free_stack_to_cpu(
5915 zone_id_t zid,
5916 zone_cache_t cache,
5917 zstack_t stack,
5918 vm_size_t esize,
5919 zone_cache_ops_t ops,
5920 bool zero)
5921 {
5922 size_t n = MIN(zc_mag_size() - cache->zc_free_cur, stack.z_count);
5923 vm_offset_t *p;
5924
5925 stack.z_count -= n;
5926 cache->zc_free_cur += n;
5927 p = cache->zc_free_elems + cache->zc_free_cur;
5928
5929 do {
5930 void *o = zstack_pop_no_delta(&stack);
5931
5932 if (ops) {
5933 o = ops->zc_op_mark_invalid(zid, o);
5934 } else {
5935 if (zero) {
5936 bzero(o, esize);
5937 }
5938 o = (void *)__zcache_mark_invalid(zone_by_id(zid),
5939 (vm_offset_t)o, ZFREE_PACK_SIZE(esize, esize));
5940 }
5941 *--p = (vm_offset_t)o;
5942 } while (--n > 0);
5943
5944 return stack;
5945 }
5946
5947 __attribute__((always_inline))
5948 static inline void
zcache_free_1_ext(zone_id_t zid,void * addr,zone_cache_ops_t ops)5949 zcache_free_1_ext(zone_id_t zid, void *addr, zone_cache_ops_t ops)
5950 {
5951 vm_offset_t elem = (vm_offset_t)addr;
5952 zone_cache_t cache;
5953 vm_size_t esize;
5954 zone_t zone = zone_by_id(zid);
5955 int cpu;
5956
5957 ZFREE_LOG(zone, elem, 1);
5958
5959 disable_preemption();
5960 cpu = cpu_number();
5961 esize = zone_elem_inner_size(zone);
5962 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
5963 if (!ops) {
5964 addr = (void *)__zcache_mark_invalid(zone, elem,
5965 ZFREE_PACK_SIZE(esize, esize));
5966 }
5967 cache = zfree_cached_get_pcpu_cache(zone, cpu);
5968 if (__probable(cache)) {
5969 if (ops) {
5970 addr = ops->zc_op_mark_invalid(zid, addr);
5971 }
5972 cache->zc_free_elems[cache->zc_free_cur++] = elem;
5973 enable_preemption();
5974 } else if (ops) {
5975 enable_preemption();
5976 os_atomic_dec(&zone_by_id(zid)->z_elems_avail, relaxed);
5977 ops->zc_op_free(zid, addr);
5978 } else {
5979 zfree_item(zone, elem);
5980 }
5981 }
5982
5983 __attribute__((always_inline))
5984 static inline void
zcache_free_n_ext(zone_id_t zid,zstack_t stack,zone_cache_ops_t ops,bool zero)5985 zcache_free_n_ext(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops, bool zero)
5986 {
5987 zone_t zone = zone_by_id(zid);
5988 zone_cache_t cache;
5989 vm_size_t esize;
5990 int cpu;
5991
5992 ZFREE_LOG(zone, stack.z_head, stack.z_count);
5993
5994 disable_preemption();
5995 cpu = cpu_number();
5996 esize = zone_elem_inner_size(zone);
5997 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed +=
5998 stack.z_count * esize;
5999
6000 for (;;) {
6001 cache = zfree_cached_get_pcpu_cache(zone, cpu);
6002 if (__probable(cache)) {
6003 stack = zcache_free_stack_to_cpu(zid, cache,
6004 stack, esize, ops, zero);
6005 enable_preemption();
6006 } else if (ops) {
6007 enable_preemption();
6008 os_atomic_dec(&zone->z_elems_avail, relaxed);
6009 ops->zc_op_free(zid, zstack_pop(&stack));
6010 } else {
6011 vm_offset_t addr = (vm_offset_t)zstack_pop(&stack);
6012
6013 if (zero) {
6014 bzero((void *)addr, esize);
6015 }
6016 addr = __zcache_mark_invalid(zone, addr,
6017 ZFREE_PACK_SIZE(esize, esize));
6018 zfree_item(zone, addr);
6019 }
6020
6021 if (stack.z_count == 0) {
6022 break;
6023 }
6024
6025 disable_preemption();
6026 cpu = cpu_number();
6027 }
6028 }
6029
6030 void
6031 (zcache_free)(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6032 {
6033 __builtin_assume(ops != NULL);
6034 zcache_free_1_ext(zid, addr, ops);
6035 }
6036
6037 void
6038 (zcache_free_n)(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops)
6039 {
6040 __builtin_assume(ops != NULL);
6041 zcache_free_n_ext(zid, stack, ops, false);
6042 }
6043
6044 void
6045 (zfree_n)(zone_id_t zid, zstack_t stack)
6046 {
6047 zcache_free_n_ext(zid, stack, NULL, true);
6048 }
6049
6050 void
6051 (zfree_nozero)(zone_id_t zid, void *addr)
6052 {
6053 zcache_free_1_ext(zid, addr, NULL);
6054 }
6055
6056 void
6057 (zfree_nozero_n)(zone_id_t zid, zstack_t stack)
6058 {
6059 zcache_free_n_ext(zid, stack, NULL, false);
6060 }
6061
6062 void
6063 (zfree)(union zone_or_view zov, void *addr)
6064 {
6065 zone_t zone = zov.zov_view->zv_zone;
6066 zone_stats_t zstats = zov.zov_view->zv_stats;
6067 vm_offset_t esize = zone_elem_inner_size(zone);
6068
6069 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6070 assert(!zone->z_percpu && !zone->z_permanent && !zone->z_smr);
6071 bzero(addr, esize);
6072 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6073 }
6074
6075 __attribute__((noinline))
6076 void
zfree_percpu(union zone_or_view zov,void * addr)6077 zfree_percpu(union zone_or_view zov, void *addr)
6078 {
6079 zone_t zone = zov.zov_view->zv_zone;
6080 zone_stats_t zstats = zov.zov_view->zv_stats;
6081 vm_offset_t esize = zone_elem_inner_size(zone);
6082
6083 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6084 assert(zone->z_percpu);
6085 addr = (void *)__zpcpu_demangle(addr);
6086 zpercpu_foreach_cpu(i) {
6087 bzero((char *)addr + ptoa(i), esize);
6088 }
6089 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6090 }
6091
6092 void
6093 (zfree_id)(zone_id_t zid, void *addr)
6094 {
6095 (zfree)(&zone_array[zid], addr);
6096 }
6097
6098 void
6099 (zfree_ro)(zone_id_t zid, void *addr)
6100 {
6101 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6102 zone_t zone = zone_by_id(zid);
6103 zone_stats_t zstats = zone->z_stats;
6104 vm_offset_t esize = zone_ro_size_params[zid].z_elem_size;
6105
6106 #if ZSECURITY_CONFIG(READ_ONLY)
6107 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6108 pmap_ro_zone_bzero(zid, (vm_offset_t)addr, 0, esize);
6109 #else
6110 (void)zid;
6111 bzero(addr, esize);
6112 #endif /* !KASAN_CLASSIC */
6113 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6114 }
6115
6116 __attribute__((noinline))
6117 static void
zfree_item_smr(zone_t zone,vm_offset_t addr)6118 zfree_item_smr(zone_t zone, vm_offset_t addr)
6119 {
6120 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, 0);
6121 vm_size_t esize = zone_elem_inner_size(zone);
6122
6123 /*
6124 * This should be taken extremely rarely:
6125 * this happens if we failed allocating an empty bucket.
6126 */
6127 smr_synchronize(zone_cache_smr(cache));
6128
6129 cache->zc_free((void *)addr, esize);
6130 addr = __zcache_mark_invalid(zone, addr, ZFREE_PACK_SIZE(esize, esize));
6131
6132 zfree_item(zone, addr);
6133 }
6134
6135 void
6136 (zfree_smr)(zone_t zone, void *addr)
6137 {
6138 vm_offset_t elem = (vm_offset_t)addr;
6139 vm_offset_t esize;
6140 zone_cache_t cache;
6141 int cpu;
6142
6143 ZFREE_LOG(zone, elem, 1);
6144
6145 disable_preemption();
6146 cpu = cpu_number();
6147 #if MACH_ASSERT
6148 cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6149 assert(!smr_entered_cpu(cache->zc_smr, cpu));
6150 #endif
6151 esize = zone_elem_inner_size(zone);
6152 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6153 cache = zfree_cached_get_pcpu_cache_smr(zone, cpu);
6154 if (__probable(cache)) {
6155 cache->zc_free_elems[cache->zc_free_cur++] = elem;
6156 enable_preemption();
6157 } else {
6158 zfree_item_smr(zone, elem);
6159 }
6160 }
6161
6162 void
6163 (zfree_id_smr)(zone_id_t zid, void *addr)
6164 {
6165 (zfree_smr)(&zone_array[zid], addr);
6166 }
6167
6168 /*! @} */
6169 #endif /* !ZALLOC_TEST */
6170 #pragma mark zalloc
6171 #if !ZALLOC_TEST
6172
6173 /*!
6174 * @defgroup zalloc
6175 * @{
6176 *
6177 * @brief
6178 * The codepath for zone allocations.
6179 *
6180 * @discussion
6181 * There are 4 major ways to allocate memory that end up in the zone allocator:
6182 * - @c zalloc(), @c zalloc_flags(), ...
6183 * - @c zalloc_percpu()
6184 * - @c kalloc*()
6185 * - @c zalloc_permanent()
6186 *
6187 * While permanent zones have their own allocation scheme, all other codepaths
6188 * will eventually go through the @c zalloc_ext() choking point.
6189 *
6190 * @c zalloc_return() is the final function everyone tail calls into,
6191 * which prepares the element for consumption by the caller and deals with
6192 * common treatment (zone logging, tags, kasan, validation, ...).
6193 */
6194
6195 /*!
6196 * @function zalloc_import
6197 *
6198 * @brief
6199 * Import @c n elements in the specified array, opposite of @c zfree_drop().
6200 *
6201 * @param zone The zone to import elements from
6202 * @param elems The array to import into
6203 * @param n The number of elements to import. Must be non zero,
6204 * and smaller than @c zone->z_elems_free.
6205 */
6206 __header_always_inline vm_size_t
zalloc_import(zone_t zone,vm_offset_t * elems,zalloc_flags_t flags,uint32_t n)6207 zalloc_import(
6208 zone_t zone,
6209 vm_offset_t *elems,
6210 zalloc_flags_t flags,
6211 uint32_t n)
6212 {
6213 vm_offset_t esize = zone_elem_outer_size(zone);
6214 vm_offset_t offs = zone_elem_inner_offs(zone);
6215 zone_stats_t zs;
6216 int cpu = cpu_number();
6217 uint32_t i = 0;
6218
6219 zs = zpercpu_get_cpu(zone->z_stats, cpu);
6220
6221 if (__improbable(zone_caching_disabled < 0)) {
6222 /*
6223 * In the first 10s after boot, mess with
6224 * the scan position in order to make early
6225 * allocations patterns less predictable.
6226 */
6227 zone_early_scramble_rr(zone, cpu, zs);
6228 }
6229
6230 do {
6231 vm_offset_t page, eidx, size = 0;
6232 struct zone_page_metadata *meta;
6233
6234 if (!zone_pva_is_null(zone->z_pageq_partial)) {
6235 meta = zone_pva_to_meta(zone->z_pageq_partial);
6236 page = zone_pva_to_addr(zone->z_pageq_partial);
6237 } else if (!zone_pva_is_null(zone->z_pageq_empty)) {
6238 meta = zone_pva_to_meta(zone->z_pageq_empty);
6239 page = zone_pva_to_addr(zone->z_pageq_empty);
6240 zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
6241 } else {
6242 zone_accounting_panic(zone, "z_elems_free corruption");
6243 }
6244
6245 zone_meta_validate(zone, meta, page);
6246
6247 vm_offset_t old_size = meta->zm_alloc_size;
6248 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
6249
6250 do {
6251 eidx = zone_meta_find_and_clear_bit(zone, zs, meta, flags);
6252 elems[i++] = page + offs + eidx * esize;
6253 size += esize;
6254 } while (i < n && old_size + size + esize <= max_size);
6255
6256 vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
6257
6258 if (new_size + esize > max_size) {
6259 zone_meta_requeue(zone, &zone->z_pageq_full, meta);
6260 } else if (old_size == 0) {
6261 /* remove from free, move to intermediate */
6262 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
6263 }
6264 } while (i < n);
6265
6266 n = zone_counter_sub(zone, z_elems_free, n);
6267 if (zone->z_pcpu_cache == NULL && zone->z_elems_free_min > n) {
6268 zone->z_elems_free_min = n;
6269 }
6270
6271 return zone_elem_inner_size(zone);
6272 }
6273
6274 __attribute__((always_inline))
6275 static inline vm_offset_t
__zcache_mark_valid(zone_t zone,vm_offset_t addr,zalloc_flags_t flags)6276 __zcache_mark_valid(zone_t zone, vm_offset_t addr, zalloc_flags_t flags)
6277 {
6278 #pragma unused(zone, flags)
6279 #if KASAN || CONFIG_PROB_GZALLOC || VM_TAG_SIZECLASSES
6280 vm_offset_t esize = zone_elem_inner_size(zone);
6281 #endif
6282
6283 #if VM_TAG_SIZECLASSES
6284 if (__improbable(zone->z_uses_tags)) {
6285 struct zone_page_metadata *meta;
6286 vm_offset_t offs;
6287 vm_tag_t *slot;
6288 vm_tag_t tag;
6289
6290 tag = zalloc_flags_get_tag(flags);
6291 meta = zone_meta_from_addr(addr);
6292 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
6293 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
6294 offs += ptoa(meta->zm_page_index);
6295 }
6296
6297 slot = zba_extra_ref_ptr(meta->zm_bitmap,
6298 Z_FAST_QUO(offs, zone->z_quo_magic));
6299 *slot = tag;
6300
6301 vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
6302 (long)esize);
6303 }
6304 #endif /* VM_TAG_SIZECLASSES */
6305
6306 #if CONFIG_PROB_GZALLOC
6307 if (zone->z_pgz_tracked && pgz_sample(addr, esize)) {
6308 addr = pgz_protect(zone, addr, __builtin_frame_address(0));
6309 }
6310 #endif
6311
6312 /*
6313 * Kasan integration of kalloc heaps are handled by kalloc_ext()
6314 */
6315 if ((flags & Z_SKIP_KASAN) == 0) {
6316 #if KASAN_CLASSIC
6317 kasan_alloc(addr, esize, esize, zone_elem_redzone(zone),
6318 (flags & Z_PCPU), __builtin_frame_address(0));
6319 #endif /* KASAN_CLASSIC */
6320 #if KASAN_TBI
6321 if (__probable(zone->z_tbi_tag)) {
6322 addr = kasan_tbi_tag_zalloc(addr, esize, esize,
6323 (flags & Z_PCPU));
6324 } else {
6325 addr = kasan_tbi_tag_zalloc_default(addr, esize,
6326 (flags & Z_PCPU));
6327 }
6328 #endif /* KASAN_TBI */
6329 }
6330
6331 return addr;
6332 }
6333
6334 __attribute__((always_inline))
vm_offset_t(zcache_mark_valid)6335 vm_offset_t
6336 (zcache_mark_valid)(zone_t zone, vm_offset_t addr)
6337 {
6338 addr = __zcache_mark_valid(zone, addr, 0);
6339 ZALLOC_LOG(zone, addr, 1);
6340 return addr;
6341 }
6342
6343 /*!
6344 * @function zalloc_return
6345 *
6346 * @brief
6347 * Performs the tail-end of the work required on allocations before the caller
6348 * uses them.
6349 *
6350 * @discussion
6351 * This function is called without any zone lock held,
6352 * and preemption back to the state it had when @c zalloc_ext() was called.
6353 *
6354 * @param zone The zone we're allocating from.
6355 * @param addr The element we just allocated.
6356 * @param flags The flags passed to @c zalloc_ext() (for Z_ZERO).
6357 * @param elem_size The element size for this zone.
6358 */
6359 __attribute__((always_inline))
6360 static struct kalloc_result
zalloc_return(zone_t zone,vm_offset_t addr,zalloc_flags_t flags,vm_offset_t elem_size)6361 zalloc_return(
6362 zone_t zone,
6363 vm_offset_t addr,
6364 zalloc_flags_t flags,
6365 vm_offset_t elem_size)
6366 {
6367 addr = __zcache_mark_valid(zone, addr, flags);
6368 #if ZALLOC_ENABLE_ZERO_CHECK
6369 zalloc_validate_element(zone, addr, elem_size, flags);
6370 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
6371 ZALLOC_LOG(zone, addr, 1);
6372
6373 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6374 return (struct kalloc_result){ (void *)addr, elem_size };
6375 }
6376
6377 __attribute__((noinline))
6378 static struct kalloc_result
zalloc_item(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6379 zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6380 {
6381 vm_offset_t esize, addr;
6382
6383 zone_lock_nopreempt_check_contention(zone);
6384
6385 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv / 2)) {
6386 if ((flags & Z_NOWAIT) || zone->z_elems_free) {
6387 zone_expand_async_schedule_if_allowed(zone);
6388 } else {
6389 zone_expand_locked(zone, flags, zalloc_needs_refill);
6390 }
6391 if (__improbable(zone->z_elems_free == 0)) {
6392 zpercpu_get(zstats)->zs_alloc_fail++;
6393 zone_unlock(zone);
6394 if (__improbable(flags & Z_NOFAIL)) {
6395 zone_nofail_panic(zone);
6396 }
6397 DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
6398 return (struct kalloc_result){ };
6399 }
6400 }
6401
6402 esize = zalloc_import(zone, &addr, flags, 1);
6403 zpercpu_get(zstats)->zs_mem_allocated += esize;
6404 zone_unlock(zone);
6405
6406 return zalloc_return(zone, addr, flags, esize);
6407 }
6408
6409 static void
zalloc_cached_import(zone_t zone,zalloc_flags_t flags,zone_cache_t cache)6410 zalloc_cached_import(
6411 zone_t zone,
6412 zalloc_flags_t flags,
6413 zone_cache_t cache)
6414 {
6415 uint16_t n_elems = zc_mag_size();
6416
6417 zone_lock_nopreempt(zone);
6418
6419 if (__probable(!zone_caching_disabled &&
6420 zone->z_elems_free > zone->z_elems_rsv / 2)) {
6421 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
6422 zone_expand_async_schedule_if_allowed(zone);
6423 }
6424 if (zone->z_elems_free < n_elems) {
6425 n_elems = (uint16_t)zone->z_elems_free;
6426 }
6427 zalloc_import(zone, cache->zc_alloc_elems, flags, n_elems);
6428 cache->zc_alloc_cur = n_elems;
6429 }
6430
6431 zone_unlock_nopreempt(zone);
6432 }
6433
6434 static void
zalloc_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache,smr_t smr)6435 zalloc_cached_depot_recirculate(
6436 zone_t zone,
6437 uint32_t depot_max,
6438 zone_cache_t cache,
6439 smr_t smr)
6440 {
6441 smr_seq_t seq;
6442 uint32_t n;
6443
6444 zone_recirc_lock_nopreempt_check_contention(zone);
6445
6446 n = cache->zc_depot.zd_empty;
6447 if (n >= depot_max) {
6448 zone_depot_move_empty(&zone->z_recirc, &cache->zc_depot,
6449 n - depot_max / 2, NULL);
6450 }
6451
6452 n = cache->zc_depot.zd_full;
6453 if (smr && n) {
6454 /*
6455 * if SMR is in use, it means smr_poll() failed,
6456 * so rotate the entire chunk of magazines in order
6457 * to let the sequence numbers age.
6458 */
6459 seq = zone_depot_move_full(&zone->z_recirc, &cache->zc_depot,
6460 n, NULL);
6461 smr_deferred_advance_commit(smr, seq);
6462 }
6463
6464 n = depot_max - cache->zc_depot.zd_empty;
6465 if (n > zone->z_recirc.zd_full) {
6466 n = zone->z_recirc.zd_full;
6467 }
6468
6469 if (n && zone_depot_poll(&zone->z_recirc, smr)) {
6470 zone_depot_move_full(&cache->zc_depot, &zone->z_recirc,
6471 n, zone);
6472 }
6473
6474 zone_recirc_unlock_nopreempt(zone);
6475 }
6476
6477 static void
zalloc_cached_reuse_smr(zone_t z,zone_cache_t cache,zone_magazine_t mag)6478 zalloc_cached_reuse_smr(zone_t z, zone_cache_t cache, zone_magazine_t mag)
6479 {
6480 zone_smr_free_cb_t zc_free = cache->zc_free;
6481 vm_size_t esize = zone_elem_inner_size(z);
6482
6483 for (uint16_t i = 0; i < zc_mag_size(); i++) {
6484 vm_offset_t elem = mag->zm_elems[i];
6485
6486 zc_free((void *)elem, zone_elem_inner_size(z));
6487 elem = __zcache_mark_invalid(z, elem,
6488 ZFREE_PACK_SIZE(esize, esize));
6489 mag->zm_elems[i] = elem;
6490 }
6491 }
6492
6493 static void
zalloc_cached_recirculate(zone_t zone,zone_cache_t cache)6494 zalloc_cached_recirculate(
6495 zone_t zone,
6496 zone_cache_t cache)
6497 {
6498 zone_magazine_t mag = NULL;
6499
6500 zone_recirc_lock_nopreempt_check_contention(zone);
6501
6502 if (zone_depot_poll(&zone->z_recirc, zone_cache_smr(cache))) {
6503 mag = zone_depot_pop_head_full(&zone->z_recirc, zone);
6504 if (zone_cache_smr(cache)) {
6505 zalloc_cached_reuse_smr(zone, cache, mag);
6506 }
6507 mag = zone_magazine_replace(cache, mag, false);
6508 zone_depot_insert_head_empty(&zone->z_recirc, mag);
6509 }
6510
6511 zone_recirc_unlock_nopreempt(zone);
6512 }
6513
6514 __attribute__((noinline))
6515 static zone_cache_t
zalloc_cached_prime(zone_t zone,zone_cache_ops_t ops,zalloc_flags_t flags,zone_cache_t cache)6516 zalloc_cached_prime(
6517 zone_t zone,
6518 zone_cache_ops_t ops,
6519 zalloc_flags_t flags,
6520 zone_cache_t cache)
6521 {
6522 zone_magazine_t mag = NULL;
6523 uint32_t depot_max;
6524 smr_t smr;
6525
6526 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
6527 if (depot_max) {
6528 smr = zone_cache_smr(cache);
6529
6530 zone_depot_lock_nopreempt(cache);
6531
6532 if (!zone_depot_poll(&cache->zc_depot, smr)) {
6533 zalloc_cached_depot_recirculate(zone, depot_max, cache,
6534 smr);
6535 }
6536
6537 if (__probable(cache->zc_depot.zd_full)) {
6538 mag = zone_depot_pop_head_full(&cache->zc_depot, NULL);
6539 if (zone_cache_smr(cache)) {
6540 zalloc_cached_reuse_smr(zone, cache, mag);
6541 }
6542 mag = zone_magazine_replace(cache, mag, false);
6543 zone_depot_insert_head_empty(&cache->zc_depot, mag);
6544 }
6545
6546 zone_depot_unlock_nopreempt(cache);
6547 } else if (zone->z_recirc.zd_full) {
6548 zalloc_cached_recirculate(zone, cache);
6549 }
6550
6551 if (__probable(cache->zc_alloc_cur)) {
6552 return cache;
6553 }
6554
6555 if (ops == NULL) {
6556 zalloc_cached_import(zone, flags, cache);
6557 if (__probable(cache->zc_alloc_cur)) {
6558 return cache;
6559 }
6560 }
6561
6562 return NULL;
6563 }
6564
6565 __attribute__((always_inline))
6566 static inline zone_cache_t
zalloc_cached_get_pcpu_cache(zone_t zone,zone_cache_ops_t ops,int cpu,zalloc_flags_t flags)6567 zalloc_cached_get_pcpu_cache(
6568 zone_t zone,
6569 zone_cache_ops_t ops,
6570 int cpu,
6571 zalloc_flags_t flags)
6572 {
6573 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6574
6575 if (__probable(cache->zc_alloc_cur != 0)) {
6576 return cache;
6577 }
6578
6579 if (__probable(cache->zc_free_cur != 0 && !cache->zc_smr)) {
6580 zone_cache_swap_magazines(cache);
6581 return cache;
6582 }
6583
6584 return zalloc_cached_prime(zone, ops, flags, cache);
6585 }
6586
6587
6588 /*!
6589 * @function zalloc_ext
6590 *
6591 * @brief
6592 * The core implementation of @c zalloc(), @c zalloc_flags(), @c zalloc_percpu().
6593 */
6594 struct kalloc_result
zalloc_ext(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6595 zalloc_ext(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6596 {
6597 /*
6598 * KASan uses zalloc() for fakestack, which can be called anywhere.
6599 * However, we make sure these calls can never block.
6600 */
6601 assertf(startup_phase < STARTUP_SUB_EARLY_BOOT ||
6602 #if KASAN_FAKESTACK
6603 zone->z_kasan_fakestacks ||
6604 #endif /* KASAN_FAKESTACK */
6605 ml_get_interrupts_enabled() ||
6606 ml_is_quiescing() ||
6607 debug_mode_active(),
6608 "Calling {k,z}alloc from interrupt disabled context isn't allowed");
6609
6610 /*
6611 * Make sure Z_NOFAIL was not obviously misused
6612 */
6613 if (flags & Z_NOFAIL) {
6614 assert(!zone->exhaustible &&
6615 (flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
6616 }
6617
6618 #if VM_TAG_SIZECLASSES
6619 if (__improbable(zone->z_uses_tags)) {
6620 vm_tag_t tag = zalloc_flags_get_tag(flags);
6621
6622 if (flags & Z_VM_TAG_BT_BIT) {
6623 tag = vm_tag_bt() ?: tag;
6624 }
6625 if (tag != VM_KERN_MEMORY_NONE) {
6626 tag = vm_tag_will_update_zone(tag, zone->z_tags_sizeclass,
6627 flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT));
6628 }
6629 if (tag == VM_KERN_MEMORY_NONE) {
6630 zone_security_flags_t zsflags = zone_security_config(zone);
6631
6632 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
6633 tag = VM_KERN_MEMORY_KALLOC_DATA;
6634 } else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR ||
6635 zsflags.z_kalloc_type) {
6636 tag = VM_KERN_MEMORY_KALLOC_TYPE;
6637 } else {
6638 tag = VM_KERN_MEMORY_KALLOC;
6639 }
6640 }
6641 flags = Z_VM_TAG(flags & ~Z_VM_TAG_MASK, tag);
6642 }
6643 #endif /* VM_TAG_SIZECLASSES */
6644
6645 disable_preemption();
6646
6647 #if ZALLOC_ENABLE_ZERO_CHECK
6648 if (zalloc_skip_zero_check()) {
6649 flags |= Z_NOZZC;
6650 }
6651 #endif
6652
6653 if (zone->z_pcpu_cache) {
6654 zone_cache_t cache;
6655 vm_offset_t index, addr, esize;
6656 int cpu = cpu_number();
6657
6658 cache = zalloc_cached_get_pcpu_cache(zone, NULL, cpu, flags);
6659 if (__probable(cache)) {
6660 esize = zone_elem_inner_size(zone);
6661 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += esize;
6662 index = --cache->zc_alloc_cur;
6663 addr = cache->zc_alloc_elems[index];
6664 cache->zc_alloc_elems[index] = 0;
6665 enable_preemption();
6666 return zalloc_return(zone, addr, flags, esize);
6667 }
6668 }
6669
6670 __attribute__((musttail))
6671 return zalloc_item(zone, zstats, flags);
6672 }
6673
6674 __attribute__((always_inline))
6675 static inline zstack_t
zcache_alloc_stack_from_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,uint32_t n,zone_cache_ops_t ops)6676 zcache_alloc_stack_from_cpu(
6677 zone_id_t zid,
6678 zone_cache_t cache,
6679 zstack_t stack,
6680 uint32_t n,
6681 zone_cache_ops_t ops)
6682 {
6683 vm_offset_t *p;
6684
6685 n = MIN(n, cache->zc_alloc_cur);
6686 p = cache->zc_alloc_elems + cache->zc_alloc_cur;
6687 cache->zc_alloc_cur -= n;
6688 stack.z_count += n;
6689
6690 do {
6691 vm_offset_t e = *--p;
6692
6693 *p = 0;
6694 if (ops) {
6695 e = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)e);
6696 } else {
6697 e = __zcache_mark_valid(zone_by_id(zid), e, 0);
6698 }
6699 zstack_push_no_delta(&stack, (void *)e);
6700 } while (--n > 0);
6701
6702 return stack;
6703 }
6704
6705 __attribute__((noinline))
6706 static zstack_t
zcache_alloc_fail(zone_id_t zid,zstack_t stack,uint32_t count)6707 zcache_alloc_fail(zone_id_t zid, zstack_t stack, uint32_t count)
6708 {
6709 zone_t zone = zone_by_id(zid);
6710 zone_stats_t zstats = zone->z_stats;
6711 int cpu;
6712
6713 count -= stack.z_count;
6714
6715 disable_preemption();
6716 cpu = cpu_number();
6717 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated -=
6718 count * zone_elem_inner_size(zone);
6719 zpercpu_get_cpu(zstats, cpu)->zs_alloc_fail += 1;
6720 enable_preemption();
6721
6722 return stack;
6723 }
6724
6725 __attribute__((always_inline))
6726 static zstack_t
zcache_alloc_n_ext(zone_id_t zid,uint32_t count,zalloc_flags_t flags,zone_cache_ops_t ops)6727 zcache_alloc_n_ext(
6728 zone_id_t zid,
6729 uint32_t count,
6730 zalloc_flags_t flags,
6731 zone_cache_ops_t ops)
6732 {
6733 zstack_t stack = { };
6734 zone_cache_t cache;
6735 zone_t zone;
6736 int cpu;
6737
6738 disable_preemption();
6739 cpu = cpu_number();
6740 zone = zone_by_id(zid);
6741 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_allocated +=
6742 count * zone_elem_inner_size(zone);
6743
6744 for (;;) {
6745 cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
6746 if (__probable(cache)) {
6747 stack = zcache_alloc_stack_from_cpu(zid, cache, stack,
6748 count - stack.z_count, ops);
6749 enable_preemption();
6750 } else {
6751 void *o;
6752
6753 if (ops) {
6754 enable_preemption();
6755 o = ops->zc_op_alloc(zid, flags);
6756 } else {
6757 o = zalloc_item(zone, zone->z_stats, flags).addr;
6758 }
6759 if (__improbable(o == NULL)) {
6760 return zcache_alloc_fail(zid, stack, count);
6761 }
6762 if (ops) {
6763 os_atomic_inc(&zone->z_elems_avail, relaxed);
6764 }
6765 zstack_push(&stack, o);
6766 }
6767
6768 if (stack.z_count == count) {
6769 break;
6770 }
6771
6772 disable_preemption();
6773 cpu = cpu_number();
6774 }
6775
6776 ZALLOC_LOG(zone, stack.z_head, stack.z_count);
6777
6778 return stack;
6779 }
6780
6781 zstack_t
zalloc_n(zone_id_t zid,uint32_t count,zalloc_flags_t flags)6782 zalloc_n(zone_id_t zid, uint32_t count, zalloc_flags_t flags)
6783 {
6784 return zcache_alloc_n_ext(zid, count, flags, NULL);
6785 }
6786
zstack_t(zcache_alloc_n)6787 zstack_t
6788 (zcache_alloc_n)(
6789 zone_id_t zid,
6790 uint32_t count,
6791 zalloc_flags_t flags,
6792 zone_cache_ops_t ops)
6793 {
6794 __builtin_assume(ops != NULL);
6795 return zcache_alloc_n_ext(zid, count, flags, ops);
6796 }
6797
6798 __attribute__((always_inline))
6799 void *
zalloc(union zone_or_view zov)6800 zalloc(union zone_or_view zov)
6801 {
6802 return zalloc_flags(zov, Z_WAITOK);
6803 }
6804
6805 __attribute__((always_inline))
6806 void *
zalloc_noblock(union zone_or_view zov)6807 zalloc_noblock(union zone_or_view zov)
6808 {
6809 return zalloc_flags(zov, Z_NOWAIT);
6810 }
6811
6812 void *
6813 (zalloc_flags)(union zone_or_view zov, zalloc_flags_t flags)
6814 {
6815 zone_t zone = zov.zov_view->zv_zone;
6816 zone_stats_t zstats = zov.zov_view->zv_stats;
6817
6818 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6819 assert(!zone->z_percpu && !zone->z_permanent);
6820 return zalloc_ext(zone, zstats, flags).addr;
6821 }
6822
6823 __attribute__((always_inline))
6824 void *
6825 (zalloc_id)(zone_id_t zid, zalloc_flags_t flags)
6826 {
6827 return (zalloc_flags)(zone_by_id(zid), flags);
6828 }
6829
6830 void *
6831 (zalloc_ro)(zone_id_t zid, zalloc_flags_t flags)
6832 {
6833 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6834 zone_t zone = zone_by_id(zid);
6835 zone_stats_t zstats = zone->z_stats;
6836 struct kalloc_result kr;
6837
6838 kr = zalloc_ext(zone, zstats, flags);
6839 #if ZSECURITY_CONFIG(READ_ONLY)
6840 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6841 if (kr.addr) {
6842 zone_require_ro(zid, kr.size, kr.addr);
6843 }
6844 #endif
6845 return kr.addr;
6846 }
6847
6848 #if ZSECURITY_CONFIG(READ_ONLY)
6849
6850 __attribute__((always_inline))
6851 static bool
from_current_stack(vm_offset_t addr,vm_size_t size)6852 from_current_stack(vm_offset_t addr, vm_size_t size)
6853 {
6854 vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
6855 vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
6856
6857 #if CONFIG_KERNEL_TBI
6858 addr = VM_KERNEL_TBI_FILL(addr);
6859 #endif /* CONFIG_KERNEL_TBI */
6860
6861 return (addr >= start) && (addr + size < end);
6862 }
6863
6864 /*
6865 * Check if an address is from const memory i.e TEXT or DATA CONST segements
6866 * or the SECURITY_READ_ONLY_LATE section.
6867 */
6868 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
6869 __attribute__((always_inline))
6870 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6871 from_const_memory(const vm_offset_t addr, vm_size_t size)
6872 {
6873 return rorgn_contains(addr, size, true);
6874 }
6875 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
6876 __attribute__((always_inline))
6877 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6878 from_const_memory(const vm_offset_t addr, vm_size_t size)
6879 {
6880 #pragma unused(addr, size)
6881 return true;
6882 }
6883 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
6884
6885 __abortlike
6886 static void
zalloc_ro_mut_validation_panic(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6887 zalloc_ro_mut_validation_panic(zone_id_t zid, void *elem,
6888 const vm_offset_t src, vm_size_t src_size)
6889 {
6890 vm_offset_t stack_start = (vm_offset_t)__builtin_frame_address(0);
6891 vm_offset_t stack_end = (stack_start + kernel_stack_size - 1) & -kernel_stack_size;
6892 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
6893 extern vm_offset_t rorgn_begin;
6894 extern vm_offset_t rorgn_end;
6895 #else
6896 vm_offset_t const rorgn_begin = 0;
6897 vm_offset_t const rorgn_end = 0;
6898 #endif
6899
6900 if (from_ro_map(src, src_size)) {
6901 zone_t src_zone = &zone_array[zone_index_from_ptr((void *)src)];
6902 zone_t dst_zone = &zone_array[zid];
6903 panic("zalloc_ro_mut failed: source (%p) not from same zone as dst (%p)"
6904 " (expected: %s, actual: %s", (void *)src, elem, src_zone->z_name,
6905 dst_zone->z_name);
6906 }
6907
6908 panic("zalloc_ro_mut failed: source (%p, phys %p) not from RO zone map (%p - %p), "
6909 "current stack (%p - %p) or const memory (phys %p - %p)",
6910 (void *)src, (void*)kvtophys(src),
6911 (void *)zone_info.zi_ro_range.min_address,
6912 (void *)zone_info.zi_ro_range.max_address,
6913 (void *)stack_start, (void *)stack_end,
6914 (void *)rorgn_begin, (void *)rorgn_end);
6915 }
6916
6917 __attribute__((always_inline))
6918 static void
zalloc_ro_mut_validate_src(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6919 zalloc_ro_mut_validate_src(zone_id_t zid, void *elem,
6920 const vm_offset_t src, vm_size_t src_size)
6921 {
6922 if (from_current_stack(src, src_size) ||
6923 (from_ro_map(src, src_size) &&
6924 zid == zone_index_from_ptr((void *)src)) ||
6925 from_const_memory(src, src_size)) {
6926 return;
6927 }
6928 zalloc_ro_mut_validation_panic(zid, elem, src, src_size);
6929 }
6930
6931 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
6932
6933 __attribute__((noinline))
6934 void
zalloc_ro_mut(zone_id_t zid,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)6935 zalloc_ro_mut(zone_id_t zid, void *elem, vm_offset_t offset,
6936 const void *new_data, vm_size_t new_data_size)
6937 {
6938 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6939
6940 #if ZSECURITY_CONFIG(READ_ONLY)
6941 bool skip_src_check = false;
6942
6943 /*
6944 * The OSEntitlements RO-zone is a little differently treated. For more
6945 * information: rdar://100518485.
6946 */
6947 if (zid == ZONE_ID_AMFI_OSENTITLEMENTS) {
6948 code_signing_config_t cs_config = 0;
6949
6950 code_signing_configuration(NULL, &cs_config);
6951 if (cs_config & CS_CONFIG_CSM_ENABLED) {
6952 skip_src_check = true;
6953 }
6954 }
6955
6956 if (skip_src_check == false) {
6957 zalloc_ro_mut_validate_src(zid, elem, (vm_offset_t)new_data,
6958 new_data_size);
6959 }
6960 pmap_ro_zone_memcpy(zid, (vm_offset_t) elem, offset,
6961 (vm_offset_t) new_data, new_data_size);
6962 #else
6963 (void)zid;
6964 memcpy((void *)((uintptr_t)elem + offset), new_data, new_data_size);
6965 #endif
6966 }
6967
6968 __attribute__((noinline))
6969 uint64_t
zalloc_ro_mut_atomic(zone_id_t zid,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)6970 zalloc_ro_mut_atomic(zone_id_t zid, void *elem, vm_offset_t offset,
6971 zro_atomic_op_t op, uint64_t value)
6972 {
6973 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6974
6975 #if ZSECURITY_CONFIG(READ_ONLY)
6976 value = pmap_ro_zone_atomic_op(zid, (vm_offset_t)elem, offset, op, value);
6977 #else
6978 (void)zid;
6979 value = __zalloc_ro_mut_atomic((vm_offset_t)elem + offset, op, value);
6980 #endif
6981 return value;
6982 }
6983
6984 void
zalloc_ro_clear(zone_id_t zid,void * elem,vm_offset_t offset,vm_size_t size)6985 zalloc_ro_clear(zone_id_t zid, void *elem, vm_offset_t offset, vm_size_t size)
6986 {
6987 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6988 #if ZSECURITY_CONFIG(READ_ONLY)
6989 pmap_ro_zone_bzero(zid, (vm_offset_t)elem, offset, size);
6990 #else
6991 (void)zid;
6992 bzero((void *)((uintptr_t)elem + offset), size);
6993 #endif
6994 }
6995
6996 /*
6997 * This function will run in the PPL and needs to be robust
6998 * against an attacker with arbitrary kernel write.
6999 */
7000
7001 #if ZSECURITY_CONFIG(READ_ONLY)
7002
7003 __abortlike
7004 static void
zone_id_require_ro_panic(zone_id_t zid,void * addr)7005 zone_id_require_ro_panic(zone_id_t zid, void *addr)
7006 {
7007 struct zone_size_params p = zone_ro_size_params[zid];
7008 vm_offset_t elem = (vm_offset_t)addr;
7009 uint32_t zindex;
7010 zone_t other;
7011 zone_t zone = &zone_array[zid];
7012
7013 if (!from_ro_map(addr, 1)) {
7014 panic("zone_require_ro failed: address not in a ro zone (addr: %p)", addr);
7015 }
7016
7017 if (!Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic)) {
7018 panic("zone_require_ro failed: element improperly aligned (addr: %p)", addr);
7019 }
7020
7021 zindex = zone_index_from_ptr(addr);
7022 other = &zone_array[zindex];
7023 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
7024 panic("zone_require_ro failed: invalid zone index %d "
7025 "(addr: %p, expected: %s%s)", zindex,
7026 addr, zone_heap_name(zone), zone->z_name);
7027 } else {
7028 panic("zone_require_ro failed: address in unexpected zone id %d (%s%s) "
7029 "(addr: %p, expected: %s%s)",
7030 zindex, zone_heap_name(other), other->z_name,
7031 addr, zone_heap_name(zone), zone->z_name);
7032 }
7033 }
7034
7035 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7036
7037 __attribute__((always_inline))
7038 void
zone_require_ro(zone_id_t zid,vm_size_t elem_size __unused,void * addr)7039 zone_require_ro(zone_id_t zid, vm_size_t elem_size __unused, void *addr)
7040 {
7041 #if ZSECURITY_CONFIG(READ_ONLY)
7042 struct zone_size_params p = zone_ro_size_params[zid];
7043 vm_offset_t elem = (vm_offset_t)addr;
7044
7045 if (!from_ro_map(addr, 1) ||
7046 !Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic) ||
7047 zid != zone_meta_from_addr(elem)->zm_index) {
7048 zone_id_require_ro_panic(zid, addr);
7049 }
7050 #else
7051 #pragma unused(zid, addr)
7052 #endif
7053 }
7054
7055 void *
7056 (zalloc_percpu)(union zone_or_view zov, zalloc_flags_t flags)
7057 {
7058 zone_t zone = zov.zov_view->zv_zone;
7059 zone_stats_t zstats = zov.zov_view->zv_stats;
7060
7061 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7062 assert(zone->z_percpu);
7063 flags |= Z_PCPU;
7064 return (void *)__zpcpu_mangle(zalloc_ext(zone, zstats, flags).addr);
7065 }
7066
7067 static void *
_zalloc_permanent(zone_t zone,vm_size_t size,vm_offset_t mask)7068 _zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask)
7069 {
7070 struct zone_page_metadata *page_meta;
7071 vm_offset_t offs, addr;
7072 zone_pva_t pva;
7073
7074 assert(ml_get_interrupts_enabled() ||
7075 ml_is_quiescing() ||
7076 debug_mode_active() ||
7077 startup_phase < STARTUP_SUB_EARLY_BOOT);
7078
7079 size = (size + mask) & ~mask;
7080 assert(size <= PAGE_SIZE);
7081
7082 zone_lock(zone);
7083 assert(zone->z_self == zone);
7084
7085 for (;;) {
7086 pva = zone->z_pageq_partial;
7087 while (!zone_pva_is_null(pva)) {
7088 page_meta = zone_pva_to_meta(pva);
7089 if (page_meta->zm_bump + size <= PAGE_SIZE) {
7090 goto found;
7091 }
7092 pva = page_meta->zm_page_next;
7093 }
7094
7095 zone_expand_locked(zone, Z_WAITOK, NULL);
7096 }
7097
7098 found:
7099 offs = (uint16_t)((page_meta->zm_bump + mask) & ~mask);
7100 page_meta->zm_bump = (uint16_t)(offs + size);
7101 page_meta->zm_alloc_size += size;
7102 zone->z_elems_free -= size;
7103 zpercpu_get(zone->z_stats)->zs_mem_allocated += size;
7104
7105 if (page_meta->zm_alloc_size >= PAGE_SIZE - sizeof(vm_offset_t)) {
7106 zone_meta_requeue(zone, &zone->z_pageq_full, page_meta);
7107 }
7108
7109 zone_unlock(zone);
7110
7111 addr = offs + zone_pva_to_addr(pva);
7112
7113 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
7114 return (void *)addr;
7115 }
7116
7117 static void *
_zalloc_permanent_large(size_t size,vm_offset_t mask,vm_tag_t tag)7118 _zalloc_permanent_large(size_t size, vm_offset_t mask, vm_tag_t tag)
7119 {
7120 vm_offset_t addr;
7121
7122 kernel_memory_allocate(kernel_map, &addr, size, mask,
7123 KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, tag);
7124
7125 return (void *)addr;
7126 }
7127
7128 void *
zalloc_permanent_tag(vm_size_t size,vm_offset_t mask,vm_tag_t tag)7129 zalloc_permanent_tag(vm_size_t size, vm_offset_t mask, vm_tag_t tag)
7130 {
7131 if (size <= PAGE_SIZE) {
7132 zone_t zone = &zone_array[ZONE_ID_PERMANENT];
7133 return _zalloc_permanent(zone, size, mask);
7134 }
7135 return _zalloc_permanent_large(size, mask, tag);
7136 }
7137
7138 void *
zalloc_percpu_permanent(vm_size_t size,vm_offset_t mask)7139 zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask)
7140 {
7141 zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT];
7142 return (void *)__zpcpu_mangle(_zalloc_permanent(zone, size, mask));
7143 }
7144
7145 /*! @} */
7146 #endif /* !ZALLOC_TEST */
7147 #pragma mark zone GC / trimming
7148 #if !ZALLOC_TEST
7149
7150 static thread_call_data_t zone_trim_callout;
7151 EVENT_DEFINE(ZONE_EXHAUSTED);
7152
7153 static void
zone_reclaim_chunk(zone_t z,struct zone_page_metadata * meta,uint32_t free_count)7154 zone_reclaim_chunk(
7155 zone_t z,
7156 struct zone_page_metadata *meta,
7157 uint32_t free_count)
7158 {
7159 vm_address_t page_addr;
7160 vm_size_t size_to_free;
7161 uint32_t bitmap_ref;
7162 uint32_t page_count;
7163 zone_security_flags_t zsflags = zone_security_config(z);
7164 bool sequester = !z->z_destroyed;
7165 bool oob_guard = false;
7166
7167 if (zone_submap_is_sequestered(zsflags)) {
7168 /*
7169 * If the entire map is sequestered, we can't return the VA.
7170 * It stays pinned to the zone forever.
7171 */
7172 sequester = true;
7173 }
7174
7175 zone_meta_queue_pop(z, &z->z_pageq_empty);
7176
7177 page_addr = zone_meta_to_addr(meta);
7178 page_count = meta->zm_chunk_len;
7179 oob_guard = meta->zm_guarded;
7180
7181 if (meta->zm_alloc_size) {
7182 zone_metadata_corruption(z, meta, "alloc_size");
7183 }
7184 if (z->z_percpu) {
7185 if (page_count != 1) {
7186 zone_metadata_corruption(z, meta, "page_count");
7187 }
7188 size_to_free = ptoa(z->z_chunk_pages);
7189 zone_remove_wired_pages(z->z_chunk_pages);
7190 } else {
7191 if (page_count > z->z_chunk_pages) {
7192 zone_metadata_corruption(z, meta, "page_count");
7193 }
7194 if (page_count < z->z_chunk_pages) {
7195 /* Dequeue non populated VA from z_pageq_va */
7196 zone_meta_remqueue(z, meta + page_count);
7197 }
7198 size_to_free = ptoa(page_count);
7199 zone_remove_wired_pages(page_count);
7200 }
7201
7202 zone_counter_sub(z, z_elems_free, free_count);
7203 zone_counter_sub(z, z_elems_avail, free_count);
7204 zone_counter_sub(z, z_wired_empty, page_count);
7205 zone_counter_sub(z, z_wired_cur, page_count);
7206
7207 if (z->z_pcpu_cache == NULL) {
7208 if (z->z_elems_free_min < free_count) {
7209 z->z_elems_free_min = 0;
7210 } else {
7211 z->z_elems_free_min -= free_count;
7212 }
7213 }
7214 if (z->z_elems_free_wma < free_count) {
7215 z->z_elems_free_wma = 0;
7216 } else {
7217 z->z_elems_free_wma -= free_count;
7218 }
7219
7220 bitmap_ref = 0;
7221 if (sequester) {
7222 if (meta->zm_inline_bitmap) {
7223 for (int i = 0; i < meta->zm_chunk_len; i++) {
7224 meta[i].zm_bitmap = 0;
7225 }
7226 } else {
7227 bitmap_ref = meta->zm_bitmap;
7228 meta->zm_bitmap = 0;
7229 }
7230 meta->zm_chunk_len = 0;
7231 } else {
7232 if (!meta->zm_inline_bitmap) {
7233 bitmap_ref = meta->zm_bitmap;
7234 }
7235 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
7236 bzero(meta, sizeof(*meta) * (z->z_chunk_pages + oob_guard));
7237 }
7238
7239 #if CONFIG_ZLEAKS
7240 if (__improbable(zleak_should_disable_for_zone(z) &&
7241 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
7242 thread_call_enter(&zone_leaks_callout);
7243 }
7244 #endif /* CONFIG_ZLEAKS */
7245
7246 zone_unlock(z);
7247
7248 if (bitmap_ref) {
7249 zone_bits_free(bitmap_ref);
7250 }
7251
7252 /* Free the pages for metadata and account for them */
7253 #if KASAN_CLASSIC
7254 if (z->z_percpu) {
7255 for (uint32_t i = 0; i < z->z_chunk_pages; i++) {
7256 kasan_zmem_remove(page_addr + ptoa(i), PAGE_SIZE,
7257 zone_elem_outer_size(z),
7258 zone_elem_outer_offs(z),
7259 zone_elem_redzone(z));
7260 }
7261 } else {
7262 kasan_zmem_remove(page_addr, size_to_free,
7263 zone_elem_outer_size(z),
7264 zone_elem_outer_offs(z),
7265 zone_elem_redzone(z));
7266 }
7267 #endif /* KASAN_CLASSIC */
7268
7269 if (sequester) {
7270 kernel_memory_depopulate(page_addr, size_to_free,
7271 KMA_KOBJECT, VM_KERN_MEMORY_ZONE);
7272 } else {
7273 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_VM);
7274 kmem_free(zone_submap(zsflags), page_addr,
7275 ptoa(z->z_chunk_pages + oob_guard));
7276 if (oob_guard) {
7277 os_atomic_dec(&zone_guard_pages, relaxed);
7278 }
7279 }
7280
7281 thread_yield_to_preemption();
7282
7283 zone_lock(z);
7284
7285 if (sequester) {
7286 zone_meta_queue_push(z, &z->z_pageq_va, meta);
7287 }
7288 }
7289
7290 static void
zone_reclaim_elements(zone_t z,uint16_t n,vm_offset_t * elems)7291 zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems)
7292 {
7293 z_debug_assert(n <= zc_mag_size());
7294
7295 for (uint16_t i = 0; i < n; i++) {
7296 vm_offset_t addr = elems[i];
7297 elems[i] = 0;
7298 zfree_drop(z, addr);
7299 }
7300
7301 z->z_elems_free += n;
7302 }
7303
7304 static void
zcache_reclaim_elements(zone_id_t zid,uint16_t n,vm_offset_t * elems)7305 zcache_reclaim_elements(zone_id_t zid, uint16_t n, vm_offset_t *elems)
7306 {
7307 z_debug_assert(n <= zc_mag_size());
7308 zone_cache_ops_t ops = zcache_ops[zid];
7309
7310 for (uint16_t i = 0; i < n; i++) {
7311 vm_offset_t addr = elems[i];
7312 elems[i] = 0;
7313 addr = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)addr);
7314 ops->zc_op_free(zid, (void *)addr);
7315 }
7316
7317 os_atomic_sub(&zone_by_id(zid)->z_elems_avail, n, relaxed);
7318 }
7319
7320 static void
zone_depot_trim(zone_t z,uint32_t target,struct zone_depot * zd)7321 zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd)
7322 {
7323 zpercpu_foreach(zc, z->z_pcpu_cache) {
7324 zone_depot_lock(zc);
7325
7326 if (zc->zc_depot.zd_full > (target + 1) / 2) {
7327 uint32_t n = zc->zc_depot.zd_full - (target + 1) / 2;
7328 zone_depot_move_full(zd, &zc->zc_depot, n, NULL);
7329 }
7330
7331 if (zc->zc_depot.zd_empty > target / 2) {
7332 uint32_t n = zc->zc_depot.zd_empty - target / 2;
7333 zone_depot_move_empty(zd, &zc->zc_depot, n, NULL);
7334 }
7335
7336 zone_depot_unlock(zc);
7337 }
7338 }
7339
7340 __enum_decl(zone_reclaim_mode_t, uint32_t, {
7341 ZONE_RECLAIM_TRIM,
7342 ZONE_RECLAIM_DRAIN,
7343 ZONE_RECLAIM_DESTROY,
7344 });
7345
7346 static void
zone_reclaim_pcpu(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)7347 zone_reclaim_pcpu(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
7348 {
7349 uint32_t depot_max = 0;
7350 bool cleanup = mode != ZONE_RECLAIM_TRIM;
7351
7352 if (z->z_depot_cleanup) {
7353 z->z_depot_cleanup = false;
7354 depot_max = z->z_depot_size;
7355 cleanup = true;
7356 }
7357
7358 if (cleanup) {
7359 zone_depot_trim(z, depot_max, zd);
7360 }
7361
7362 if (mode == ZONE_RECLAIM_DESTROY) {
7363 zpercpu_foreach(zc, z->z_pcpu_cache) {
7364 zone_reclaim_elements(z, zc->zc_alloc_cur,
7365 zc->zc_alloc_elems);
7366 zone_reclaim_elements(z, zc->zc_free_cur,
7367 zc->zc_free_elems);
7368 zc->zc_alloc_cur = zc->zc_free_cur = 0;
7369 }
7370
7371 z->z_recirc_empty_min = 0;
7372 z->z_recirc_empty_wma = 0;
7373 z->z_recirc_full_min = 0;
7374 z->z_recirc_full_wma = 0;
7375 z->z_recirc_cont_cur = 0;
7376 z->z_recirc_cont_wma = 0;
7377 }
7378 }
7379
7380 static void
zone_reclaim_recirc(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)7381 zone_reclaim_recirc(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
7382 {
7383 assert(zd->zd_empty == 0);
7384 assert(zd->zd_full == 0);
7385
7386 zone_recirc_lock_nopreempt(z);
7387
7388 if (mode == ZONE_RECLAIM_TRIM) {
7389 uint32_t count;
7390
7391 count = MIN(z->z_recirc_empty_wma / Z_WMA_UNIT,
7392 z->z_recirc_empty_min);
7393 assert(count <= z->z_recirc.zd_empty);
7394
7395 if (count) {
7396 zone_depot_move_empty(zd, &z->z_recirc, count, NULL);
7397 z->z_recirc_empty_min -= count;
7398 z->z_recirc_empty_wma -= count * Z_WMA_UNIT;
7399 }
7400
7401 count = MIN(z->z_recirc_full_wma / Z_WMA_UNIT, z->z_recirc_full_min);
7402 assert(count <= z->z_recirc.zd_full);
7403 if (count) {
7404 zone_depot_move_full(zd, &z->z_recirc, count, NULL);
7405 z->z_recirc_full_min -= count;
7406 z->z_recirc_full_wma -= count * Z_WMA_UNIT;
7407 }
7408 } else {
7409 *zd = z->z_recirc;
7410 if (zd->zd_full == 0) {
7411 zd->zd_tail = &zd->zd_head;
7412 }
7413 zone_depot_init(&z->z_recirc);
7414 z->z_recirc_empty_min = 0;
7415 z->z_recirc_empty_wma = 0;
7416 z->z_recirc_full_min = 0;
7417 z->z_recirc_full_wma = 0;
7418 }
7419
7420 zone_recirc_unlock_nopreempt(z);
7421 }
7422
7423 /*!
7424 * @function zone_reclaim
7425 *
7426 * @brief
7427 * Drains or trim the zone.
7428 *
7429 * @discussion
7430 * Draining the zone will free it from all its elements.
7431 *
7432 * Trimming the zone tries to respect the working set size, and avoids draining
7433 * the depot when it's not necessary.
7434 *
7435 * @param z The zone to reclaim from
7436 * @param mode The purpose of this reclaim.
7437 */
7438 static void
zone_reclaim(zone_t z,zone_reclaim_mode_t mode)7439 zone_reclaim(zone_t z, zone_reclaim_mode_t mode)
7440 {
7441 struct zone_depot zd;
7442
7443 zone_depot_init(&zd);
7444
7445 zone_lock(z);
7446
7447 if (mode == ZONE_RECLAIM_DESTROY) {
7448 if (!z->z_destructible || z->z_elems_rsv) {
7449 panic("zdestroy: Zone %s%s isn't destructible",
7450 zone_heap_name(z), z->z_name);
7451 }
7452
7453 if (!z->z_self || z->z_expander ||
7454 z->z_async_refilling || z->z_expanding_wait) {
7455 panic("zdestroy: Zone %s%s in an invalid state for destruction",
7456 zone_heap_name(z), z->z_name);
7457 }
7458
7459 #if !KASAN_CLASSIC
7460 /*
7461 * Unset the valid bit. We'll hit an assert failure on further
7462 * operations on this zone, until zinit() is called again.
7463 *
7464 * Leave the zone valid for KASan as we will see zfree's on
7465 * quarantined free elements even after the zone is destroyed.
7466 */
7467 z->z_self = NULL;
7468 #endif
7469 z->z_destroyed = true;
7470 } else if (z->z_destroyed) {
7471 return zone_unlock(z);
7472 } else if (zone_count_free(z) <= z->z_elems_rsv) {
7473 /* If the zone is under its reserve level, leave it alone. */
7474 return zone_unlock(z);
7475 }
7476
7477 if (z->z_pcpu_cache) {
7478 zone_magazine_t mag;
7479 uint32_t freed = 0;
7480
7481 /*
7482 * This is all done with the zone lock held on purpose.
7483 * The work here is O(ncpu), which should still be short.
7484 *
7485 * We need to keep the lock held until we have reclaimed
7486 * at least a few magazines, otherwise if the zone has no
7487 * free elements outside of the depot, a thread performing
7488 * a concurrent allocatiuon could try to grow the zone
7489 * while we're trying to drain it.
7490 */
7491 zone_reclaim_recirc(z, mode, &zd);
7492 zone_reclaim_pcpu(z, mode, &zd);
7493
7494 if (z->z_chunk_elems) {
7495 zone_cache_t cache = zpercpu_get_cpu(z->z_pcpu_cache, 0);
7496 smr_t smr = zone_cache_smr(cache);
7497
7498 while (zd.zd_full) {
7499 mag = zone_depot_pop_head_full(&zd, NULL);
7500 if (smr) {
7501 smr_wait(smr, mag->zm_seq);
7502 zalloc_cached_reuse_smr(z, cache, mag);
7503 freed += zc_mag_size();
7504 }
7505 zone_reclaim_elements(z, zc_mag_size(),
7506 mag->zm_elems);
7507 zone_depot_insert_head_empty(&zd, mag);
7508
7509 freed += zc_mag_size();
7510 if (freed >= zc_free_batch_size()) {
7511 zone_unlock(z);
7512 zone_magazine_free_list(&zd);
7513 thread_yield_to_preemption();
7514 zone_lock(z);
7515 freed = 0;
7516 }
7517 }
7518 } else {
7519 zone_id_t zid = zone_index(z);
7520
7521 zone_unlock(z);
7522
7523 assert(zid <= ZONE_ID__FIRST_DYNAMIC && zcache_ops[zid]);
7524
7525 while (zd.zd_full) {
7526 mag = zone_depot_pop_head_full(&zd, NULL);
7527 zcache_reclaim_elements(zid, zc_mag_size(),
7528 mag->zm_elems);
7529 zone_magazine_free(mag);
7530 }
7531
7532 goto cleanup;
7533 }
7534 }
7535
7536 while (!zone_pva_is_null(z->z_pageq_empty)) {
7537 struct zone_page_metadata *meta;
7538 uint32_t count, limit = z->z_elems_rsv * 5 / 4;
7539
7540 if (mode == ZONE_RECLAIM_TRIM && z->z_pcpu_cache == NULL) {
7541 limit = MAX(limit, z->z_elems_free -
7542 MIN(z->z_elems_free_min, z->z_elems_free_wma));
7543 }
7544
7545 meta = zone_pva_to_meta(z->z_pageq_empty);
7546 count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_outer_size(z);
7547
7548 if (zone_count_free(z) - count < limit) {
7549 break;
7550 }
7551
7552 zone_reclaim_chunk(z, meta, count);
7553 }
7554
7555 zone_unlock(z);
7556
7557 cleanup:
7558 zone_magazine_free_list(&zd);
7559 }
7560
7561 void
zone_drain(zone_t zone)7562 zone_drain(zone_t zone)
7563 {
7564 current_thread()->options |= TH_OPT_ZONE_PRIV;
7565 lck_mtx_lock(&zone_gc_lock);
7566 zone_reclaim(zone, ZONE_RECLAIM_DRAIN);
7567 lck_mtx_unlock(&zone_gc_lock);
7568 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7569 }
7570
7571 void
zcache_drain(zone_id_t zid)7572 zcache_drain(zone_id_t zid)
7573 {
7574 zone_drain(zone_by_id(zid));
7575 }
7576
7577 static void
zone_reclaim_all(zone_reclaim_mode_t mode)7578 zone_reclaim_all(zone_reclaim_mode_t mode)
7579 {
7580 /*
7581 * Start with zcaches, so that they flow into the regular zones.
7582 *
7583 * Then the zones with VA sequester since depopulating
7584 * pages will not need to allocate vm map entries for holes,
7585 * which will give memory back to the system faster.
7586 */
7587 for (zone_id_t zid = ZONE_ID__LAST_RO + 1; zid < ZONE_ID__FIRST_DYNAMIC; zid++) {
7588 zone_t z = zone_by_id(zid);
7589
7590 if (z->z_self && z->z_chunk_elems == 0) {
7591 zone_reclaim(z, mode);
7592 }
7593 }
7594 zone_index_foreach(zid) {
7595 zone_t z = zone_by_id(zid);
7596
7597 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7598 continue;
7599 }
7600 if (zone_submap_is_sequestered(zone_security_array[zid]) &&
7601 z->collectable) {
7602 zone_reclaim(z, mode);
7603 }
7604 }
7605
7606 zone_index_foreach(zid) {
7607 zone_t z = zone_by_id(zid);
7608
7609 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7610 continue;
7611 }
7612 if (!zone_submap_is_sequestered(zone_security_array[zid]) &&
7613 z->collectable) {
7614 zone_reclaim(z, mode);
7615 }
7616 }
7617
7618 zone_reclaim(zc_magazine_zone, mode);
7619 }
7620
7621 void
zone_userspace_reboot_checks(void)7622 zone_userspace_reboot_checks(void)
7623 {
7624 vm_size_t label_zone_size = zone_size_allocated(ipc_service_port_label_zone);
7625 if (label_zone_size != 0) {
7626 panic("Zone %s should be empty upon userspace reboot. Actual size: %lu.",
7627 ipc_service_port_label_zone->z_name, (unsigned long)label_zone_size);
7628 }
7629 }
7630
7631 void
zone_gc(zone_gc_level_t level)7632 zone_gc(zone_gc_level_t level)
7633 {
7634 zone_reclaim_mode_t mode;
7635 zone_t largest_zone = NULL;
7636
7637 switch (level) {
7638 case ZONE_GC_TRIM:
7639 mode = ZONE_RECLAIM_TRIM;
7640 break;
7641 case ZONE_GC_DRAIN:
7642 mode = ZONE_RECLAIM_DRAIN;
7643 break;
7644 case ZONE_GC_JETSAM:
7645 largest_zone = kill_process_in_largest_zone();
7646 mode = ZONE_RECLAIM_TRIM;
7647 break;
7648 }
7649
7650 current_thread()->options |= TH_OPT_ZONE_PRIV;
7651 lck_mtx_lock(&zone_gc_lock);
7652
7653 zone_reclaim_all(mode);
7654
7655 if (level == ZONE_GC_JETSAM && zone_map_nearing_exhaustion()) {
7656 /*
7657 * If we possibly killed a process, but we're still critical,
7658 * we need to drain harder.
7659 */
7660 zone_reclaim(largest_zone, ZONE_RECLAIM_DRAIN);
7661 zone_reclaim_all(ZONE_RECLAIM_DRAIN);
7662 }
7663
7664 lck_mtx_unlock(&zone_gc_lock);
7665 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7666 }
7667
7668 void
zone_gc_trim(void)7669 zone_gc_trim(void)
7670 {
7671 zone_gc(ZONE_GC_TRIM);
7672 }
7673
7674 void
zone_gc_drain(void)7675 zone_gc_drain(void)
7676 {
7677 zone_gc(ZONE_GC_DRAIN);
7678 }
7679
7680 static bool
zone_trim_needed(zone_t z)7681 zone_trim_needed(zone_t z)
7682 {
7683 if (z->z_depot_cleanup) {
7684 return true;
7685 }
7686
7687 if (z->z_async_refilling) {
7688 /* Don't fight with refill */
7689 return false;
7690 }
7691
7692 if (z->z_pcpu_cache) {
7693 uint32_t e_n, f_n;
7694
7695 e_n = MIN(z->z_recirc_empty_wma, z->z_recirc_empty_min * Z_WMA_UNIT);
7696 f_n = MIN(z->z_recirc_full_wma, z->z_recirc_full_min * Z_WMA_UNIT);
7697
7698 if (e_n > zc_autotrim_buckets() * Z_WMA_UNIT) {
7699 return true;
7700 }
7701
7702 if (f_n * zc_mag_size() > z->z_elems_rsv * Z_WMA_UNIT &&
7703 f_n * zc_mag_size() * zone_elem_inner_size(z) >
7704 zc_autotrim_size() * Z_WMA_UNIT) {
7705 return true;
7706 }
7707
7708 return false;
7709 }
7710
7711 if (!zone_pva_is_null(z->z_pageq_empty)) {
7712 uint32_t n;
7713
7714 n = MIN(z->z_elems_free_wma, z->z_elems_free_min);
7715
7716 return n >= z->z_elems_rsv + z->z_chunk_elems;
7717 }
7718
7719 return false;
7720 }
7721
7722 static void
zone_trim_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)7723 zone_trim_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
7724 {
7725 current_thread()->options |= TH_OPT_ZONE_PRIV;
7726
7727 zone_foreach(z) {
7728 if (!z->collectable || z == zc_magazine_zone) {
7729 continue;
7730 }
7731
7732 if (zone_trim_needed(z)) {
7733 lck_mtx_lock(&zone_gc_lock);
7734 zone_reclaim(z, ZONE_RECLAIM_TRIM);
7735 lck_mtx_unlock(&zone_gc_lock);
7736 }
7737 }
7738
7739 if (zone_trim_needed(zc_magazine_zone)) {
7740 lck_mtx_lock(&zone_gc_lock);
7741 zone_reclaim(zc_magazine_zone, ZONE_RECLAIM_TRIM);
7742 lck_mtx_unlock(&zone_gc_lock);
7743 }
7744
7745 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7746 }
7747
7748 void
compute_zone_working_set_size(__unused void * param)7749 compute_zone_working_set_size(__unused void *param)
7750 {
7751 uint32_t zc_auto = zc_enable_level();
7752 bool needs_trim = false;
7753
7754 /*
7755 * Keep zone caching disabled until the first proc is made.
7756 */
7757 if (__improbable(zone_caching_disabled < 0)) {
7758 return;
7759 }
7760
7761 zone_caching_disabled = vm_pool_low();
7762
7763 if (os_mul_overflow(zc_auto, Z_WMA_UNIT, &zc_auto)) {
7764 zc_auto = 0;
7765 }
7766
7767 zone_foreach(z) {
7768 uint32_t old, wma, cur;
7769 bool needs_caching = false;
7770
7771 if (z->z_self != z) {
7772 continue;
7773 }
7774
7775 zone_lock(z);
7776
7777 zone_recirc_lock_nopreempt(z);
7778
7779 if (z->z_pcpu_cache) {
7780 wma = Z_WMA_MIX(z->z_recirc_empty_wma, z->z_recirc_empty_min);
7781 z->z_recirc_empty_min = z->z_recirc.zd_empty;
7782 z->z_recirc_empty_wma = wma;
7783 } else {
7784 wma = Z_WMA_MIX(z->z_elems_free_wma, z->z_elems_free_min);
7785 z->z_elems_free_min = z->z_elems_free;
7786 z->z_elems_free_wma = wma;
7787 }
7788
7789 wma = Z_WMA_MIX(z->z_recirc_full_wma, z->z_recirc_full_min);
7790 z->z_recirc_full_min = z->z_recirc.zd_full;
7791 z->z_recirc_full_wma = wma;
7792
7793 /* fixed point decimal of contentions per second */
7794 old = z->z_recirc_cont_wma;
7795 cur = z->z_recirc_cont_cur * Z_WMA_UNIT /
7796 (zpercpu_count() * ZONE_WSS_UPDATE_PERIOD);
7797 cur = (3 * old + cur) / 4;
7798 zone_recirc_unlock_nopreempt(z);
7799
7800 if (z->z_pcpu_cache) {
7801 uint16_t size = z->z_depot_size;
7802
7803 if (size < z->z_depot_limit && cur > zc_grow_level()) {
7804 /*
7805 * lose history on purpose now
7806 * that we just grew, to give
7807 * the sytem time to adjust.
7808 */
7809 cur = (zc_grow_level() + zc_shrink_level()) / 2;
7810 size = size ? (3 * size + 2) / 2 : 2;
7811 z->z_depot_size = MIN(z->z_depot_limit, size);
7812 } else if (size > 0 && cur <= zc_shrink_level()) {
7813 /*
7814 * lose history on purpose now
7815 * that we just shrunk, to give
7816 * the sytem time to adjust.
7817 */
7818 cur = (zc_grow_level() + zc_shrink_level()) / 2;
7819 z->z_depot_size = size - 1;
7820 z->z_depot_cleanup = true;
7821 }
7822 } else if (!z->z_nocaching && !z->exhaustible && zc_auto &&
7823 old >= zc_auto && cur >= zc_auto) {
7824 needs_caching = true;
7825 }
7826
7827 z->z_recirc_cont_wma = cur;
7828 z->z_recirc_cont_cur = 0;
7829
7830 if (!needs_trim && zone_trim_needed(z)) {
7831 needs_trim = true;
7832 }
7833
7834 zone_unlock(z);
7835
7836 if (needs_caching) {
7837 zone_enable_caching(z);
7838 }
7839 }
7840
7841 if (needs_trim) {
7842 thread_call_enter(&zone_trim_callout);
7843 }
7844 }
7845
7846 #endif /* !ZALLOC_TEST */
7847 #pragma mark vm integration, MIG routines
7848 #if !ZALLOC_TEST
7849
7850 extern unsigned int stack_total;
7851 #if defined (__x86_64__)
7852 extern unsigned int inuse_ptepages_count;
7853 #endif
7854
7855 static const char *
panic_print_get_typename(kalloc_type_views_t cur,kalloc_type_views_t * next,bool is_kt_var)7856 panic_print_get_typename(kalloc_type_views_t cur, kalloc_type_views_t *next,
7857 bool is_kt_var)
7858 {
7859 if (is_kt_var) {
7860 next->ktv_var = (kalloc_type_var_view_t) cur.ktv_var->kt_next;
7861 return cur.ktv_var->kt_name;
7862 } else {
7863 next->ktv_fixed = (kalloc_type_view_t) cur.ktv_fixed->kt_zv.zv_next;
7864 return cur.ktv_fixed->kt_zv.zv_name;
7865 }
7866 }
7867
7868 static void
panic_print_types_in_zone(zone_t z,const char * debug_str)7869 panic_print_types_in_zone(zone_t z, const char* debug_str)
7870 {
7871 kalloc_type_views_t kt_cur = {};
7872 const char *prev_type = "";
7873 size_t skip_over_site = sizeof("site.") - 1;
7874 zone_security_flags_t zsflags = zone_security_config(z);
7875 bool is_kt_var = false;
7876
7877 if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
7878 uint32_t heap_id = KT_VAR_PTR_HEAP + ((zone_index(z) -
7879 kalloc_type_heap_array[KT_VAR_PTR_HEAP].kh_zstart) / KHEAP_NUM_ZONES);
7880 kt_cur.ktv_var = kalloc_type_heap_array[heap_id].kt_views;
7881 is_kt_var = true;
7882 } else {
7883 kt_cur.ktv_fixed = (kalloc_type_view_t) z->z_views;
7884 }
7885
7886 paniclog_append_noflush("kalloc %s in zone, %s (%s):\n",
7887 is_kt_var? "type arrays" : "types", debug_str, z->z_name);
7888
7889 while (kt_cur.ktv_fixed) {
7890 kalloc_type_views_t kt_next = {};
7891 const char *typename = panic_print_get_typename(kt_cur, &kt_next,
7892 is_kt_var) + skip_over_site;
7893 if (strcmp(typename, prev_type) != 0) {
7894 paniclog_append_noflush("\t%-50s\n", typename);
7895 prev_type = typename;
7896 }
7897 kt_cur = kt_next;
7898 }
7899 paniclog_append_noflush("\n");
7900 }
7901
7902 static void
panic_display_kalloc_types(void)7903 panic_display_kalloc_types(void)
7904 {
7905 if (kalloc_type_src_zone) {
7906 panic_print_types_in_zone(kalloc_type_src_zone, "addr belongs to");
7907 }
7908 if (kalloc_type_dst_zone) {
7909 panic_print_types_in_zone(kalloc_type_dst_zone,
7910 "addr is being freed to");
7911 }
7912 }
7913
7914 static void
zone_find_n_largest(const uint32_t n,zone_t * largest_zones,uint64_t * zone_size)7915 zone_find_n_largest(const uint32_t n, zone_t *largest_zones,
7916 uint64_t *zone_size)
7917 {
7918 zone_index_foreach(zid) {
7919 zone_t z = &zone_array[zid];
7920 vm_offset_t size = zone_size_wired(z);
7921
7922 if (zid == ZONE_ID_VM_PAGES) {
7923 continue;
7924 }
7925 for (uint32_t i = 0; i < n; i++) {
7926 if (size > zone_size[i]) {
7927 largest_zones[i] = z;
7928 zone_size[i] = size;
7929 break;
7930 }
7931 }
7932 }
7933 }
7934
7935 #define NUM_LARGEST_ZONES 5
7936 static void
panic_display_largest_zones(void)7937 panic_display_largest_zones(void)
7938 {
7939 zone_t largest_zones[NUM_LARGEST_ZONES] = { NULL };
7940 uint64_t largest_size[NUM_LARGEST_ZONES] = { 0 };
7941
7942 zone_find_n_largest(NUM_LARGEST_ZONES, (zone_t *) &largest_zones,
7943 (uint64_t *) &largest_size);
7944
7945 paniclog_append_noflush("Largest zones:\n%-28s %10s %10s\n",
7946 "Zone Name", "Cur Size", "Free Size");
7947 for (uint32_t i = 0; i < NUM_LARGEST_ZONES; i++) {
7948 zone_t z = largest_zones[i];
7949 paniclog_append_noflush("%-8s%-20s %9u%c %9u%c\n",
7950 zone_heap_name(z), z->z_name,
7951 mach_vm_size_pretty(largest_size[i]),
7952 mach_vm_size_unit(largest_size[i]),
7953 mach_vm_size_pretty(zone_size_free(z)),
7954 mach_vm_size_unit(zone_size_free(z)));
7955 }
7956 }
7957
7958 static void
panic_display_zprint(void)7959 panic_display_zprint(void)
7960 {
7961 panic_display_largest_zones();
7962 paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
7963 (uintptr_t)(kernel_stack_size * stack_total));
7964 #if defined (__x86_64__)
7965 paniclog_append_noflush("%-20s %10lu\n", "PageTables",
7966 (uintptr_t)ptoa(inuse_ptepages_count));
7967 #endif
7968 paniclog_append_noflush("%-20s %10lu\n", "Kalloc.Large",
7969 (uintptr_t)kalloc_large_total);
7970
7971 if (panic_kext_memory_info) {
7972 mach_memory_info_t *mem_info = panic_kext_memory_info;
7973
7974 paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
7975 for (uint32_t i = 0; i < panic_kext_memory_size / sizeof(mem_info[0]); i++) {
7976 if ((mem_info[i].flags & VM_KERN_SITE_TYPE) != VM_KERN_SITE_KMOD) {
7977 continue;
7978 }
7979 if (mem_info[i].size > (1024 * 1024)) {
7980 paniclog_append_noflush("%-5lld %10lld\n",
7981 mem_info[i].site, mem_info[i].size);
7982 }
7983 }
7984 }
7985 }
7986
7987 static void
panic_display_zone_info(void)7988 panic_display_zone_info(void)
7989 {
7990 paniclog_append_noflush("Zone info:\n");
7991 paniclog_append_noflush(" Zone map: %p - %p\n",
7992 (void *)zone_info.zi_map_range.min_address,
7993 (void *)zone_info.zi_map_range.max_address);
7994 #if CONFIG_PROB_GZALLOC
7995 if (pgz_submap) {
7996 paniclog_append_noflush(" . PGZ : %p - %p\n",
7997 (void *)pgz_submap->min_offset,
7998 (void *)pgz_submap->max_offset);
7999 }
8000 #endif /* CONFIG_PROB_GZALLOC */
8001 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8002 vm_map_t map = zone_submaps[i];
8003
8004 if (map == VM_MAP_NULL) {
8005 continue;
8006 }
8007 paniclog_append_noflush(" . %-6s: %p - %p\n",
8008 zone_submaps_names[i],
8009 (void *)map->min_offset,
8010 (void *)map->max_offset);
8011 }
8012 paniclog_append_noflush(" Metadata: %p - %p\n"
8013 " Bitmaps : %p - %p\n"
8014 " Extra : %p - %p\n"
8015 "\n",
8016 (void *)zone_info.zi_meta_range.min_address,
8017 (void *)zone_info.zi_meta_range.max_address,
8018 (void *)zone_info.zi_bits_range.min_address,
8019 (void *)zone_info.zi_bits_range.max_address,
8020 (void *)zone_info.zi_xtra_range.min_address,
8021 (void *)zone_info.zi_xtra_range.max_address);
8022 }
8023
8024 static void
panic_display_zone_fault(vm_offset_t addr)8025 panic_display_zone_fault(vm_offset_t addr)
8026 {
8027 struct zone_page_metadata meta = { };
8028 vm_map_t map = VM_MAP_NULL;
8029 vm_offset_t oob_offs = 0, size = 0;
8030 int map_idx = -1;
8031 zone_t z = NULL;
8032 const char *kind = "whild deref";
8033 bool oob = false;
8034
8035 /*
8036 * First: look if we bumped into guard pages between submaps
8037 */
8038 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8039 map = zone_submaps[i];
8040 if (map == VM_MAP_NULL) {
8041 continue;
8042 }
8043
8044 if (addr >= map->min_offset && addr < map->max_offset) {
8045 map_idx = i;
8046 break;
8047 }
8048 }
8049
8050 if (map_idx == -1) {
8051 /* this really shouldn't happen, submaps are back to back */
8052 return;
8053 }
8054
8055 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
8056
8057 /*
8058 * Second: look if there's just no metadata at all
8059 */
8060 if (ml_nofault_copy((vm_offset_t)zone_meta_from_addr(addr),
8061 (vm_offset_t)&meta, sizeof(meta)) != sizeof(meta) ||
8062 meta.zm_index == 0 || meta.zm_index >= MAX_ZONES ||
8063 zone_array[meta.zm_index].z_self == NULL) {
8064 paniclog_append_noflush(" Zone : <unknown>\n");
8065 kind = "wild deref, missing or invalid metadata";
8066 } else {
8067 z = &zone_array[meta.zm_index];
8068 paniclog_append_noflush(" Zone : %s%s\n",
8069 zone_heap_name(z), zone_name(z));
8070 if (meta.zm_chunk_len == ZM_PGZ_GUARD) {
8071 kind = "out-of-bounds (high confidence)";
8072 oob = true;
8073 size = zone_element_size((void *)addr,
8074 &z, false, &oob_offs);
8075 } else {
8076 kind = "use-after-free (medium confidence)";
8077 }
8078 }
8079
8080 paniclog_append_noflush(" Address : %p\n", (void *)addr);
8081 if (oob) {
8082 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
8083 (void *)(trunc_page(addr) - (size - oob_offs)),
8084 (void *)trunc_page(addr), (uint32_t)(size - oob_offs));
8085 }
8086 paniclog_append_noflush(" Submap : %s [%p; %p)\n",
8087 zone_submaps_names[map_idx],
8088 (void *)map->min_offset, (void *)map->max_offset);
8089 paniclog_append_noflush(" Kind : %s\n", kind);
8090 if (oob) {
8091 paniclog_append_noflush(" Access : %d byte(s) past\n",
8092 (uint32_t)(addr & PAGE_MASK) + 1);
8093 }
8094 paniclog_append_noflush(" Metadata: zid:%d inl:%d cl:0x%x "
8095 "0x%04x 0x%08x 0x%08x 0x%08x\n",
8096 meta.zm_index, meta.zm_inline_bitmap, meta.zm_chunk_len,
8097 meta.zm_alloc_size, meta.zm_bitmap,
8098 meta.zm_page_next.packed_address,
8099 meta.zm_page_prev.packed_address);
8100 paniclog_append_noflush("\n");
8101 }
8102
8103 void
panic_display_zalloc(void)8104 panic_display_zalloc(void)
8105 {
8106 bool keepsyms = false;
8107
8108 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
8109
8110 panic_display_zone_info();
8111
8112 if (panic_fault_address) {
8113 #if CONFIG_PROB_GZALLOC
8114 if (pgz_owned(panic_fault_address)) {
8115 panic_display_pgz_uaf_info(keepsyms, panic_fault_address);
8116 } else
8117 #endif /* CONFIG_PROB_GZALLOC */
8118 if (zone_maps_owned(panic_fault_address, 1)) {
8119 panic_display_zone_fault(panic_fault_address);
8120 }
8121 }
8122
8123 if (panic_include_zprint) {
8124 panic_display_zprint();
8125 } else if (zone_map_nearing_threshold(ZONE_MAP_EXHAUSTION_PRINT_PANIC)) {
8126 panic_display_largest_zones();
8127 }
8128 #if CONFIG_ZLEAKS
8129 if (zleak_active) {
8130 panic_display_zleaks(keepsyms);
8131 }
8132 #endif
8133 if (panic_include_kalloc_types) {
8134 panic_display_kalloc_types();
8135 }
8136 }
8137
8138 /*
8139 * Creates a vm_map_copy_t to return to the caller of mach_* MIG calls
8140 * requesting zone information.
8141 * Frees unused pages towards the end of the region, and zero'es out unused
8142 * space on the last page.
8143 */
8144 static vm_map_copy_t
create_vm_map_copy(vm_offset_t start_addr,vm_size_t total_size,vm_size_t used_size)8145 create_vm_map_copy(
8146 vm_offset_t start_addr,
8147 vm_size_t total_size,
8148 vm_size_t used_size)
8149 {
8150 kern_return_t kr;
8151 vm_offset_t end_addr;
8152 vm_size_t free_size;
8153 vm_map_copy_t copy;
8154
8155 if (used_size != total_size) {
8156 end_addr = start_addr + used_size;
8157 free_size = total_size - (round_page(end_addr) - start_addr);
8158
8159 if (free_size >= PAGE_SIZE) {
8160 kmem_free(ipc_kernel_map,
8161 round_page(end_addr), free_size);
8162 }
8163 bzero((char *) end_addr, round_page(end_addr) - end_addr);
8164 }
8165
8166 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr,
8167 (vm_map_size_t)used_size, TRUE, ©);
8168 assert(kr == KERN_SUCCESS);
8169
8170 return copy;
8171 }
8172
8173 static boolean_t
get_zone_info(zone_t z,mach_zone_name_t * zn,mach_zone_info_t * zi)8174 get_zone_info(
8175 zone_t z,
8176 mach_zone_name_t *zn,
8177 mach_zone_info_t *zi)
8178 {
8179 struct zone zcopy;
8180 vm_size_t cached = 0;
8181
8182 assert(z != ZONE_NULL);
8183 zone_lock(z);
8184 if (!z->z_self) {
8185 zone_unlock(z);
8186 return FALSE;
8187 }
8188 zcopy = *z;
8189 if (z->z_pcpu_cache) {
8190 zpercpu_foreach(zc, z->z_pcpu_cache) {
8191 cached += zc->zc_alloc_cur + zc->zc_free_cur;
8192 cached += zc->zc_depot.zd_full * zc_mag_size();
8193 }
8194 }
8195 zone_unlock(z);
8196
8197 if (zn != NULL) {
8198 /*
8199 * Append kalloc heap name to zone name (if zone is used by kalloc)
8200 */
8201 char temp_zone_name[MAX_ZONE_NAME] = "";
8202 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8203 zone_heap_name(z), z->z_name);
8204
8205 /* assuming here the name data is static */
8206 (void) __nosan_strlcpy(zn->mzn_name, temp_zone_name,
8207 strlen(temp_zone_name) + 1);
8208 }
8209
8210 if (zi != NULL) {
8211 *zi = (mach_zone_info_t) {
8212 .mzi_count = zone_count_allocated(&zcopy) - cached,
8213 .mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)),
8214 // max_size for zprint is now high-watermark of pages used
8215 .mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)),
8216 .mzi_elem_size = zone_scale_for_percpu(&zcopy, zcopy.z_elem_size),
8217 .mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages),
8218 .mzi_exhaustible = (uint64_t)zcopy.exhaustible,
8219 };
8220 if (zcopy.z_chunk_pages == 0) {
8221 /* this is a zcache */
8222 zi->mzi_cur_size = zcopy.z_elems_avail * zcopy.z_elem_size;
8223 }
8224 zpercpu_foreach(zs, zcopy.z_stats) {
8225 zi->mzi_sum_size += zs->zs_mem_allocated;
8226 }
8227 if (zcopy.collectable) {
8228 SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable,
8229 ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty)));
8230 SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE);
8231 }
8232 }
8233
8234 return TRUE;
8235 }
8236
8237 /* mach_memory_info entitlement */
8238 #define MEMORYINFO_ENTITLEMENT "com.apple.private.memoryinfo"
8239
8240 /* macro needed to rate-limit mach_memory_info */
8241 #define NSEC_DAY (NSEC_PER_SEC * 60 * 60 * 24)
8242
8243 /* declarations necessary to call kauth_cred_issuser() */
8244 struct ucred;
8245 extern int kauth_cred_issuser(struct ucred *);
8246 extern struct ucred *kauth_cred_get(void);
8247
8248 static kern_return_t
8249 mach_memory_info_internal(
8250 host_t host,
8251 mach_zone_name_array_t *namesp,
8252 mach_msg_type_number_t *namesCntp,
8253 mach_zone_info_array_t *infop,
8254 mach_msg_type_number_t *infoCntp,
8255 mach_memory_info_array_t *memoryInfop,
8256 mach_msg_type_number_t *memoryInfoCntp,
8257 bool redact_info);
8258
8259 static kern_return_t
mach_memory_info_security_check(bool redact_info)8260 mach_memory_info_security_check(bool redact_info)
8261 {
8262 /* If not root, only allow redacted calls. */
8263 if (!kauth_cred_issuser(kauth_cred_get()) && !redact_info) {
8264 return KERN_NO_ACCESS;
8265 }
8266
8267 if (PE_srd_fused) {
8268 return KERN_SUCCESS;
8269 }
8270
8271 /* If does not have the memory entitlement, fail. */
8272 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8273 if (!IOTaskHasEntitlement(current_task(), MEMORYINFO_ENTITLEMENT)) {
8274 return KERN_DENIED;
8275 }
8276
8277 /*
8278 * On release non-mac arm devices, allow mach_memory_info
8279 * to be called twice per day per boot. memorymaintenanced
8280 * calls it once per day, which leaves room for a sysdiagnose.
8281 * Allow redacted version to be called without rate limit.
8282 */
8283
8284 if (!redact_info) {
8285 static uint64_t first_call = 0, second_call = 0;
8286 uint64_t now = 0;
8287 absolutetime_to_nanoseconds(ml_get_timebase(), &now);
8288
8289 if (!first_call) {
8290 first_call = now;
8291 } else if (!second_call) {
8292 second_call = now;
8293 } else if (first_call + NSEC_DAY > now) {
8294 return KERN_DENIED;
8295 } else if (first_call + NSEC_DAY < now) {
8296 first_call = now;
8297 second_call = 0;
8298 }
8299 }
8300 #endif
8301
8302 return KERN_SUCCESS;
8303 }
8304
8305 kern_return_t
mach_zone_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp)8306 mach_zone_info(
8307 mach_port_t host_port,
8308 mach_zone_name_array_t *namesp,
8309 mach_msg_type_number_t *namesCntp,
8310 mach_zone_info_array_t *infop,
8311 mach_msg_type_number_t *infoCntp)
8312 {
8313 return mach_memory_info(host_port, namesp, namesCntp, infop, infoCntp, NULL, NULL);
8314 }
8315
8316 kern_return_t
mach_memory_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp)8317 mach_memory_info(
8318 mach_port_t host_port,
8319 mach_zone_name_array_t *namesp,
8320 mach_msg_type_number_t *namesCntp,
8321 mach_zone_info_array_t *infop,
8322 mach_msg_type_number_t *infoCntp,
8323 mach_memory_info_array_t *memoryInfop,
8324 mach_msg_type_number_t *memoryInfoCntp)
8325 {
8326 bool redact_info = false;
8327 host_t host = HOST_NULL;
8328
8329 host = convert_port_to_host_priv(host_port);
8330 if (host == HOST_NULL) {
8331 redact_info = true;
8332 host = convert_port_to_host(host_port);
8333 }
8334
8335 return mach_memory_info_internal(host, namesp, namesCntp, infop, infoCntp, memoryInfop, memoryInfoCntp, redact_info);
8336 }
8337
8338 static void
zone_info_redact(mach_zone_info_t * zi)8339 zone_info_redact(mach_zone_info_t *zi)
8340 {
8341 zi->mzi_cur_size = 0;
8342 zi->mzi_max_size = 0;
8343 zi->mzi_alloc_size = 0;
8344 zi->mzi_sum_size = 0;
8345 zi->mzi_collectable = 0;
8346 }
8347
8348 static bool
zone_info_needs_to_be_coalesced(int zone_index)8349 zone_info_needs_to_be_coalesced(int zone_index)
8350 {
8351 zone_security_flags_t zsflags = zone_security_array[zone_index];
8352 if (zsflags.z_kalloc_type || zsflags.z_kheap_id == KHEAP_ID_DEFAULT ||
8353 zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8354 return true;
8355 }
8356 return false;
8357 }
8358
8359 static bool
zone_info_find_coalesce_zone(mach_zone_info_t * zi,mach_zone_info_t * info,int * coalesce,int coalesce_count,int * coalesce_index)8360 zone_info_find_coalesce_zone(
8361 mach_zone_info_t *zi,
8362 mach_zone_info_t *info,
8363 int *coalesce,
8364 int coalesce_count,
8365 int *coalesce_index)
8366 {
8367 for (int i = 0; i < coalesce_count; i++) {
8368 if (zi->mzi_elem_size == info[coalesce[i]].mzi_elem_size) {
8369 *coalesce_index = coalesce[i];
8370 return true;
8371 }
8372 }
8373
8374 return false;
8375 }
8376
8377 static void
zone_info_coalesce(mach_zone_info_t * info,int coalesce_index,mach_zone_info_t * zi)8378 zone_info_coalesce(
8379 mach_zone_info_t *info,
8380 int coalesce_index,
8381 mach_zone_info_t *zi)
8382 {
8383 info[coalesce_index].mzi_count += zi->mzi_count;
8384 }
8385
8386 static kern_return_t
mach_memory_info_internal(host_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp,bool redact_info)8387 mach_memory_info_internal(
8388 host_t host,
8389 mach_zone_name_array_t *namesp,
8390 mach_msg_type_number_t *namesCntp,
8391 mach_zone_info_array_t *infop,
8392 mach_msg_type_number_t *infoCntp,
8393 mach_memory_info_array_t *memoryInfop,
8394 mach_msg_type_number_t *memoryInfoCntp,
8395 bool redact_info)
8396 {
8397 mach_zone_name_t *names;
8398 vm_offset_t names_addr;
8399 vm_size_t names_size;
8400
8401 mach_zone_info_t *info;
8402 vm_offset_t info_addr;
8403 vm_size_t info_size;
8404
8405 int *coalesce;
8406 vm_offset_t coalesce_addr;
8407 vm_size_t coalesce_size;
8408 int coalesce_count = 0;
8409
8410 mach_memory_info_t *memory_info;
8411 vm_offset_t memory_info_addr;
8412 vm_size_t memory_info_size;
8413 vm_size_t memory_info_vmsize;
8414 unsigned int num_info;
8415
8416 unsigned int max_zones, used_zones, i;
8417 mach_zone_name_t *zn;
8418 mach_zone_info_t *zi;
8419 kern_return_t kr;
8420
8421 uint64_t zones_collectable_bytes = 0;
8422
8423 if (host == HOST_NULL) {
8424 return KERN_INVALID_HOST;
8425 }
8426
8427 kr = mach_memory_info_security_check(redact_info);
8428 if (kr != KERN_SUCCESS) {
8429 return kr;
8430 }
8431
8432 /*
8433 * We assume that zones aren't freed once allocated.
8434 * We won't pick up any zones that are allocated later.
8435 */
8436
8437 max_zones = os_atomic_load(&num_zones, relaxed);
8438
8439 names_size = round_page(max_zones * sizeof *names);
8440 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8441 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8442 if (kr != KERN_SUCCESS) {
8443 return kr;
8444 }
8445 names = (mach_zone_name_t *) names_addr;
8446
8447 info_size = round_page(max_zones * sizeof *info);
8448 kr = kmem_alloc(ipc_kernel_map, &info_addr, info_size,
8449 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8450 if (kr != KERN_SUCCESS) {
8451 kmem_free(ipc_kernel_map,
8452 names_addr, names_size);
8453 return kr;
8454 }
8455 info = (mach_zone_info_t *) info_addr;
8456
8457 if (redact_info) {
8458 coalesce_size = round_page(max_zones * sizeof *coalesce);
8459 kr = kmem_alloc(ipc_kernel_map, &coalesce_addr, coalesce_size,
8460 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8461 if (kr != KERN_SUCCESS) {
8462 kmem_free(ipc_kernel_map,
8463 names_addr, names_size);
8464 kmem_free(ipc_kernel_map,
8465 info_addr, info_size);
8466 return kr;
8467 }
8468 coalesce = (int *)coalesce_addr;
8469 }
8470
8471 zn = &names[0];
8472 zi = &info[0];
8473
8474 used_zones = 0;
8475 for (i = 0; i < max_zones; i++) {
8476 if (!get_zone_info(&(zone_array[i]), zn, zi)) {
8477 continue;
8478 }
8479
8480 if (!redact_info) {
8481 zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable);
8482 zn++;
8483 zi++;
8484 used_zones++;
8485 continue;
8486 }
8487
8488 zone_info_redact(zi);
8489 if (!zone_info_needs_to_be_coalesced(i)) {
8490 zn++;
8491 zi++;
8492 used_zones++;
8493 continue;
8494 }
8495
8496 int coalesce_index;
8497 bool found_coalesce_zone = zone_info_find_coalesce_zone(zi, info,
8498 coalesce, coalesce_count, &coalesce_index);
8499
8500 /* Didn't find a zone to coalesce */
8501 if (!found_coalesce_zone) {
8502 /* Updates the zone name */
8503 __nosan_bzero(zn->mzn_name, MAX_ZONE_NAME);
8504 snprintf(zn->mzn_name, MAX_ZONE_NAME, "kalloc.%d",
8505 (int)zi->mzi_elem_size);
8506
8507 coalesce[coalesce_count] = used_zones;
8508 coalesce_count++;
8509 zn++;
8510 zi++;
8511 used_zones++;
8512 continue;
8513 }
8514
8515 zone_info_coalesce(info, coalesce_index, zi);
8516 }
8517
8518 if (redact_info) {
8519 kmem_free(ipc_kernel_map, coalesce_addr, coalesce_size);
8520 }
8521
8522 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, used_zones * sizeof *names);
8523 *namesCntp = used_zones;
8524
8525 *infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info);
8526 *infoCntp = used_zones;
8527
8528 num_info = 0;
8529 memory_info_addr = 0;
8530
8531 if (memoryInfop && memoryInfoCntp) {
8532 vm_map_copy_t copy;
8533 num_info = vm_page_diagnose_estimate();
8534 memory_info_size = num_info * sizeof(*memory_info);
8535 memory_info_vmsize = round_page(memory_info_size);
8536 kr = kmem_alloc(ipc_kernel_map, &memory_info_addr, memory_info_vmsize,
8537 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8538 if (kr != KERN_SUCCESS) {
8539 return kr;
8540 }
8541
8542 kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
8543 VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
8544 assert(kr == KERN_SUCCESS);
8545
8546 memory_info = (mach_memory_info_t *) memory_info_addr;
8547 vm_page_diagnose(memory_info, num_info, zones_collectable_bytes, redact_info);
8548
8549 kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
8550 assert(kr == KERN_SUCCESS);
8551
8552 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
8553 (vm_map_size_t)memory_info_size, TRUE, ©);
8554 assert(kr == KERN_SUCCESS);
8555
8556 *memoryInfop = (mach_memory_info_t *) copy;
8557 *memoryInfoCntp = num_info;
8558 }
8559
8560 return KERN_SUCCESS;
8561 }
8562
8563 kern_return_t
mach_zone_info_for_zone(host_priv_t host,mach_zone_name_t name,mach_zone_info_t * infop)8564 mach_zone_info_for_zone(
8565 host_priv_t host,
8566 mach_zone_name_t name,
8567 mach_zone_info_t *infop)
8568 {
8569 zone_t zone_ptr;
8570
8571 if (host == HOST_NULL) {
8572 return KERN_INVALID_HOST;
8573 }
8574
8575 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8576 if (!PE_i_can_has_debugger(NULL)) {
8577 return KERN_INVALID_HOST;
8578 }
8579 #endif
8580
8581 if (infop == NULL) {
8582 return KERN_INVALID_ARGUMENT;
8583 }
8584
8585 zone_ptr = ZONE_NULL;
8586 zone_foreach(z) {
8587 /*
8588 * Append kalloc heap name to zone name (if zone is used by kalloc)
8589 */
8590 char temp_zone_name[MAX_ZONE_NAME] = "";
8591 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8592 zone_heap_name(z), z->z_name);
8593
8594 /* Find the requested zone by name */
8595 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8596 zone_ptr = z;
8597 break;
8598 }
8599 }
8600
8601 /* No zones found with the requested zone name */
8602 if (zone_ptr == ZONE_NULL) {
8603 return KERN_INVALID_ARGUMENT;
8604 }
8605
8606 if (get_zone_info(zone_ptr, NULL, infop)) {
8607 return KERN_SUCCESS;
8608 }
8609 return KERN_FAILURE;
8610 }
8611
8612 kern_return_t
mach_zone_info_for_largest_zone(host_priv_t host,mach_zone_name_t * namep,mach_zone_info_t * infop)8613 mach_zone_info_for_largest_zone(
8614 host_priv_t host,
8615 mach_zone_name_t *namep,
8616 mach_zone_info_t *infop)
8617 {
8618 if (host == HOST_NULL) {
8619 return KERN_INVALID_HOST;
8620 }
8621
8622 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8623 if (!PE_i_can_has_debugger(NULL)) {
8624 return KERN_INVALID_HOST;
8625 }
8626 #endif
8627
8628 if (namep == NULL || infop == NULL) {
8629 return KERN_INVALID_ARGUMENT;
8630 }
8631
8632 if (get_zone_info(zone_find_largest(NULL), namep, infop)) {
8633 return KERN_SUCCESS;
8634 }
8635 return KERN_FAILURE;
8636 }
8637
8638 uint64_t
get_zones_collectable_bytes(void)8639 get_zones_collectable_bytes(void)
8640 {
8641 uint64_t zones_collectable_bytes = 0;
8642 mach_zone_info_t zi;
8643
8644 zone_foreach(z) {
8645 if (get_zone_info(z, NULL, &zi)) {
8646 zones_collectable_bytes +=
8647 GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable);
8648 }
8649 }
8650
8651 return zones_collectable_bytes;
8652 }
8653
8654 kern_return_t
mach_zone_get_zlog_zones(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp)8655 mach_zone_get_zlog_zones(
8656 host_priv_t host,
8657 mach_zone_name_array_t *namesp,
8658 mach_msg_type_number_t *namesCntp)
8659 {
8660 #if ZALLOC_ENABLE_LOGGING
8661 unsigned int max_zones, logged_zones, i;
8662 kern_return_t kr;
8663 zone_t zone_ptr;
8664 mach_zone_name_t *names;
8665 vm_offset_t names_addr;
8666 vm_size_t names_size;
8667
8668 if (host == HOST_NULL) {
8669 return KERN_INVALID_HOST;
8670 }
8671
8672 if (namesp == NULL || namesCntp == NULL) {
8673 return KERN_INVALID_ARGUMENT;
8674 }
8675
8676 max_zones = os_atomic_load(&num_zones, relaxed);
8677
8678 names_size = round_page(max_zones * sizeof *names);
8679 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8680 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8681 if (kr != KERN_SUCCESS) {
8682 return kr;
8683 }
8684 names = (mach_zone_name_t *) names_addr;
8685
8686 zone_ptr = ZONE_NULL;
8687 logged_zones = 0;
8688 for (i = 0; i < max_zones; i++) {
8689 zone_t z = &(zone_array[i]);
8690 assert(z != ZONE_NULL);
8691
8692 /* Copy out the zone name if zone logging is enabled */
8693 if (z->z_btlog) {
8694 get_zone_info(z, &names[logged_zones], NULL);
8695 logged_zones++;
8696 }
8697 }
8698
8699 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, logged_zones * sizeof *names);
8700 *namesCntp = logged_zones;
8701
8702 return KERN_SUCCESS;
8703
8704 #else /* ZALLOC_ENABLE_LOGGING */
8705 #pragma unused(host, namesp, namesCntp)
8706 return KERN_FAILURE;
8707 #endif /* ZALLOC_ENABLE_LOGGING */
8708 }
8709
8710 kern_return_t
mach_zone_get_btlog_records(host_priv_t host,mach_zone_name_t name,zone_btrecord_array_t * recsp,mach_msg_type_number_t * numrecs)8711 mach_zone_get_btlog_records(
8712 host_priv_t host,
8713 mach_zone_name_t name,
8714 zone_btrecord_array_t *recsp,
8715 mach_msg_type_number_t *numrecs)
8716 {
8717 #if ZALLOC_ENABLE_LOGGING
8718 zone_btrecord_t *recs;
8719 kern_return_t kr;
8720 vm_address_t addr;
8721 vm_size_t size;
8722 zone_t zone_ptr;
8723 vm_map_copy_t copy;
8724
8725 if (host == HOST_NULL) {
8726 return KERN_INVALID_HOST;
8727 }
8728
8729 if (recsp == NULL || numrecs == NULL) {
8730 return KERN_INVALID_ARGUMENT;
8731 }
8732
8733 zone_ptr = ZONE_NULL;
8734 zone_foreach(z) {
8735 /*
8736 * Append kalloc heap name to zone name (if zone is used by kalloc)
8737 */
8738 char temp_zone_name[MAX_ZONE_NAME] = "";
8739 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8740 zone_heap_name(z), z->z_name);
8741
8742 /* Find the requested zone by name */
8743 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8744 zone_ptr = z;
8745 break;
8746 }
8747 }
8748
8749 /* No zones found with the requested zone name */
8750 if (zone_ptr == ZONE_NULL) {
8751 return KERN_INVALID_ARGUMENT;
8752 }
8753
8754 /* Logging not turned on for the requested zone */
8755 if (!zone_ptr->z_btlog) {
8756 return KERN_FAILURE;
8757 }
8758
8759 kr = btlog_get_records(zone_ptr->z_btlog, &recs, numrecs);
8760 if (kr != KERN_SUCCESS) {
8761 return kr;
8762 }
8763
8764 addr = (vm_address_t)recs;
8765 size = sizeof(zone_btrecord_t) * *numrecs;
8766
8767 kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, ©);
8768 assert(kr == KERN_SUCCESS);
8769
8770 *recsp = (zone_btrecord_t *)copy;
8771 return KERN_SUCCESS;
8772
8773 #else /* !ZALLOC_ENABLE_LOGGING */
8774 #pragma unused(host, name, recsp, numrecs)
8775 return KERN_FAILURE;
8776 #endif /* !ZALLOC_ENABLE_LOGGING */
8777 }
8778
8779
8780 kern_return_t
mach_zone_force_gc(host_t host)8781 mach_zone_force_gc(
8782 host_t host)
8783 {
8784 if (host == HOST_NULL) {
8785 return KERN_INVALID_HOST;
8786 }
8787
8788 #if DEBUG || DEVELOPMENT
8789 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
8790 /* Callout to buffer cache GC to drop elements in the apfs zones */
8791 if (consider_buffer_cache_collect != NULL) {
8792 (void)(*consider_buffer_cache_collect)(0);
8793 }
8794 zone_gc(ZONE_GC_DRAIN);
8795 #endif /* DEBUG || DEVELOPMENT */
8796 return KERN_SUCCESS;
8797 }
8798
8799 zone_t
zone_find_largest(uint64_t * zone_size)8800 zone_find_largest(uint64_t *zone_size)
8801 {
8802 zone_t largest_zone = 0;
8803 uint64_t largest_zone_size = 0;
8804 zone_find_n_largest(1, &largest_zone, &largest_zone_size);
8805 if (zone_size) {
8806 *zone_size = largest_zone_size;
8807 }
8808 return largest_zone;
8809 }
8810
8811 void
zone_get_stats(zone_t zone,struct zone_basic_stats * stats)8812 zone_get_stats(
8813 zone_t zone,
8814 struct zone_basic_stats *stats)
8815 {
8816 stats->zbs_avail = zone->z_elems_avail;
8817
8818 stats->zbs_alloc_fail = 0;
8819 zpercpu_foreach(zs, zone->z_stats) {
8820 stats->zbs_alloc_fail += zs->zs_alloc_fail;
8821 }
8822
8823 stats->zbs_cached = 0;
8824 if (zone->z_pcpu_cache) {
8825 zpercpu_foreach(zc, zone->z_pcpu_cache) {
8826 stats->zbs_cached += zc->zc_alloc_cur +
8827 zc->zc_free_cur +
8828 zc->zc_depot.zd_full * zc_mag_size();
8829 }
8830 }
8831
8832 stats->zbs_free = zone_count_free(zone) + stats->zbs_cached;
8833
8834 /*
8835 * Since we don't take any locks, deal with possible inconsistencies
8836 * as the counters may have changed.
8837 */
8838 if (os_sub_overflow(stats->zbs_avail, stats->zbs_free,
8839 &stats->zbs_alloc)) {
8840 stats->zbs_avail = stats->zbs_free;
8841 stats->zbs_alloc = 0;
8842 }
8843 }
8844
8845 #endif /* !ZALLOC_TEST */
8846 #pragma mark zone creation, configuration, destruction
8847 #if !ZALLOC_TEST
8848
8849 static zone_t
zone_init_defaults(zone_id_t zid)8850 zone_init_defaults(zone_id_t zid)
8851 {
8852 zone_t z = &zone_array[zid];
8853
8854 z->z_wired_max = ~0u;
8855 z->collectable = true;
8856
8857 hw_lck_ticket_init(&z->z_lock, &zone_locks_grp);
8858 hw_lck_ticket_init(&z->z_recirc_lock, &zone_locks_grp);
8859 zone_depot_init(&z->z_recirc);
8860 return z;
8861 }
8862
8863 void
zone_set_exhaustible(zone_t zone,vm_size_t nelems)8864 zone_set_exhaustible(zone_t zone, vm_size_t nelems)
8865 {
8866 zone_lock(zone);
8867 zone->exhaustible = true;
8868 zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
8869 zone_unlock(zone);
8870 }
8871
8872 void
zone_raise_reserve(union zone_or_view zov,uint16_t min_elements)8873 zone_raise_reserve(union zone_or_view zov, uint16_t min_elements)
8874 {
8875 zone_t zone = zov.zov_zone;
8876
8877 if (zone < zone_array || zone > &zone_array[MAX_ZONES]) {
8878 zone = zov.zov_view->zv_zone;
8879 } else {
8880 zone = zov.zov_zone;
8881 }
8882
8883 os_atomic_max(&zone->z_elems_rsv, min_elements, relaxed);
8884 }
8885
8886 /**
8887 * @function zone_create_find
8888 *
8889 * @abstract
8890 * Finds an unused zone for the given name and element size.
8891 *
8892 * @param name the zone name
8893 * @param size the element size (including redzones, ...)
8894 * @param flags the flags passed to @c zone_create*
8895 * @param zid_inout the desired zone ID or ZONE_ID_ANY
8896 *
8897 * @returns a zone to initialize further.
8898 */
8899 static zone_t
zone_create_find(const char * name,vm_size_t size,zone_create_flags_t flags,zone_id_t * zid_inout)8900 zone_create_find(
8901 const char *name,
8902 vm_size_t size,
8903 zone_create_flags_t flags,
8904 zone_id_t *zid_inout)
8905 {
8906 zone_id_t nzones, zid = *zid_inout;
8907 zone_t z;
8908
8909 simple_lock(&all_zones_lock, &zone_locks_grp);
8910
8911 nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed);
8912 assert(num_zones_in_use <= nzones && nzones < MAX_ZONES);
8913
8914 if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) {
8915 /*
8916 * The first time around, make sure the reserved zone IDs
8917 * have an initialized lock as zone_index_foreach() will
8918 * enumerate them.
8919 */
8920 while (nzones < ZONE_ID__FIRST_DYNAMIC) {
8921 zone_init_defaults(nzones++);
8922 }
8923
8924 os_atomic_store(&num_zones, nzones, release);
8925 }
8926
8927 if (zid != ZONE_ID_ANY) {
8928 if (zid >= ZONE_ID__FIRST_DYNAMIC) {
8929 panic("zone_create: invalid desired zone ID %d for %s",
8930 zid, name);
8931 }
8932 if (flags & ZC_DESTRUCTIBLE) {
8933 panic("zone_create: ID %d (%s) must be permanent", zid, name);
8934 }
8935 if (zone_array[zid].z_self) {
8936 panic("zone_create: creating zone ID %d (%s) twice", zid, name);
8937 }
8938 z = &zone_array[zid];
8939 } else {
8940 if (flags & ZC_DESTRUCTIBLE) {
8941 /*
8942 * If possible, find a previously zdestroy'ed zone in the
8943 * zone_array that we can reuse.
8944 */
8945 for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES);
8946 i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) {
8947 z = &zone_array[i];
8948
8949 /*
8950 * If the zone name and the element size are the
8951 * same, we can just reuse the old zone struct.
8952 */
8953 if (strcmp(z->z_name, name) ||
8954 zone_elem_outer_size(z) != size) {
8955 continue;
8956 }
8957 bitmap_clear(zone_destroyed_bitmap, i);
8958 z->z_destroyed = false;
8959 z->z_self = z;
8960 zid = (zone_id_t)i;
8961 goto out;
8962 }
8963 }
8964
8965 zid = nzones++;
8966 z = zone_init_defaults(zid);
8967
8968 /*
8969 * The release barrier pairs with the acquire in
8970 * zone_index_foreach() and makes sure that enumeration loops
8971 * always see an initialized zone lock.
8972 */
8973 os_atomic_store(&num_zones, nzones, release);
8974 }
8975
8976 out:
8977 num_zones_in_use++;
8978 simple_unlock(&all_zones_lock);
8979
8980 *zid_inout = zid;
8981 return z;
8982 }
8983
8984 __abortlike
8985 static void
zone_create_panic(const char * name,const char * f1,const char * f2)8986 zone_create_panic(const char *name, const char *f1, const char *f2)
8987 {
8988 panic("zone_create: creating zone %s: flag %s and %s are incompatible",
8989 name, f1, f2);
8990 }
8991 #define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \
8992 if ((flags) & forbidden_flag) { \
8993 zone_create_panic(name, #current_flag, #forbidden_flag); \
8994 }
8995
8996 /*
8997 * Adjusts the size of the element based on minimum size, alignment
8998 * and kasan redzones
8999 */
9000 static vm_size_t
zone_elem_adjust_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags __unused,uint16_t * redzone __unused)9001 zone_elem_adjust_size(
9002 const char *name __unused,
9003 vm_size_t elem_size,
9004 zone_create_flags_t flags __unused,
9005 uint16_t *redzone __unused)
9006 {
9007 vm_size_t size;
9008
9009 /*
9010 * Adjust element size for minimum size and pointer alignment
9011 */
9012 size = (elem_size + ZONE_ALIGN_SIZE - 1) & -ZONE_ALIGN_SIZE;
9013 if (size < ZONE_MIN_ELEM_SIZE) {
9014 size = ZONE_MIN_ELEM_SIZE;
9015 }
9016
9017 #if KASAN_CLASSIC
9018 /*
9019 * Expand the zone allocation size to include the redzones.
9020 *
9021 * For page-multiple zones add a full guard page because they
9022 * likely require alignment.
9023 */
9024 uint16_t redzone_tmp;
9025 if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU | ZC_OBJ_CACHE)) {
9026 redzone_tmp = 0;
9027 } else if ((size & PAGE_MASK) == 0) {
9028 if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) {
9029 panic("zone_create: zone %s can't provide more than PAGE_SIZE"
9030 "alignment", name);
9031 }
9032 redzone_tmp = PAGE_SIZE;
9033 } else if (flags & ZC_ALIGNMENT_REQUIRED) {
9034 redzone_tmp = 0;
9035 } else {
9036 redzone_tmp = KASAN_GUARD_SIZE;
9037 }
9038 size += redzone_tmp;
9039 if (redzone) {
9040 *redzone = redzone_tmp;
9041 }
9042 #endif
9043 return size;
9044 }
9045
9046 /*
9047 * Returns the allocation chunk size that has least framentation
9048 */
9049 static vm_size_t
zone_get_min_alloc_granule(vm_size_t elem_size,zone_create_flags_t flags)9050 zone_get_min_alloc_granule(
9051 vm_size_t elem_size,
9052 zone_create_flags_t flags)
9053 {
9054 vm_size_t alloc_granule = PAGE_SIZE;
9055 if (flags & ZC_PERCPU) {
9056 alloc_granule = PAGE_SIZE * zpercpu_count();
9057 if (PAGE_SIZE % elem_size > 256) {
9058 panic("zone_create: per-cpu zone has too much fragmentation");
9059 }
9060 } else if (flags & ZC_READONLY) {
9061 alloc_granule = PAGE_SIZE;
9062 } else if ((elem_size & PAGE_MASK) == 0) {
9063 /* zero fragmentation by definition */
9064 alloc_granule = elem_size;
9065 } else if (alloc_granule % elem_size == 0) {
9066 /* zero fragmentation by definition */
9067 } else {
9068 vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule;
9069 vm_size_t alloc_tmp = PAGE_SIZE;
9070 vm_size_t max_chunk_size = ZONE_MAX_ALLOC_SIZE;
9071
9072 #if __arm64__
9073 /*
9074 * Increase chunk size to 48K for sizes larger than 4K on 16k
9075 * machines, so as to reduce internal fragementation for kalloc
9076 * zones with sizes 12K and 24K.
9077 */
9078 if (elem_size > 4 * 1024 && PAGE_SIZE == 16 * 1024) {
9079 max_chunk_size = 48 * 1024;
9080 }
9081 #endif
9082 while ((alloc_tmp += PAGE_SIZE) <= max_chunk_size) {
9083 vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp;
9084 if (frag_tmp < frag) {
9085 frag = frag_tmp;
9086 alloc_granule = alloc_tmp;
9087 }
9088 }
9089 }
9090 return alloc_granule;
9091 }
9092
9093 vm_size_t
zone_get_early_alloc_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags,vm_size_t min_elems)9094 zone_get_early_alloc_size(
9095 const char *name __unused,
9096 vm_size_t elem_size,
9097 zone_create_flags_t flags,
9098 vm_size_t min_elems)
9099 {
9100 vm_size_t adjusted_size, alloc_granule, chunk_elems;
9101
9102 adjusted_size = zone_elem_adjust_size(name, elem_size, flags, NULL);
9103 alloc_granule = zone_get_min_alloc_granule(adjusted_size, flags);
9104 chunk_elems = alloc_granule / adjusted_size;
9105
9106 return ((min_elems + chunk_elems - 1) / chunk_elems) * alloc_granule;
9107 }
9108
9109 zone_t
9110 zone_create_ext(
9111 const char *name,
9112 vm_size_t size,
9113 zone_create_flags_t flags,
9114 zone_id_t zid,
9115 void (^extra_setup)(zone_t))
9116 {
9117 zone_security_flags_t *zsflags;
9118 uint16_t redzone;
9119 zone_t z;
9120
9121 if (size > ZONE_MAX_ALLOC_SIZE) {
9122 panic("zone_create: element size too large: %zd", (size_t)size);
9123 }
9124
9125 if (size < 2 * sizeof(vm_size_t)) {
9126 /* Elements are too small for kasan. */
9127 flags |= ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE;
9128 }
9129
9130 size = zone_elem_adjust_size(name, size, flags, &redzone);
9131
9132 /*
9133 * Allocate the zone slot, return early if we found an older match.
9134 */
9135 z = zone_create_find(name, size, flags, &zid);
9136 if (__improbable(z->z_self)) {
9137 /* We found a zone to reuse */
9138 return z;
9139 }
9140 zsflags = &zone_security_array[zid];
9141
9142 /*
9143 * Initialize the zone properly.
9144 */
9145
9146 /*
9147 * If the kernel is post lockdown, copy the zone name passed in.
9148 * Else simply maintain a pointer to the name string as it can only
9149 * be a core XNU zone (no unloadable kext exists before lockdown).
9150 */
9151 if (startup_phase >= STARTUP_SUB_LOCKDOWN) {
9152 size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN);
9153 char *buf = zalloc_permanent(nsz, ZALIGN_NONE);
9154 strlcpy(buf, name, nsz);
9155 z->z_name = buf;
9156 } else {
9157 z->z_name = name;
9158 }
9159 if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) {
9160 z->z_stats = zalloc_percpu_permanent_type(struct zone_stats);
9161 } else {
9162 /*
9163 * zone_init() hasn't run yet, use the storage provided by
9164 * zone_stats_startup(), and zone_init() will replace it
9165 * with the final value once the PERCPU zone exists.
9166 */
9167 z->z_stats = __zpcpu_mangle_for_boot(&zone_stats_startup[zone_index(z)]);
9168 }
9169
9170 if (flags & ZC_OBJ_CACHE) {
9171 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOCACHING);
9172 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_PERCPU);
9173 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOGC);
9174 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_DESTRUCTIBLE);
9175
9176 z->z_elem_size = (uint16_t)size;
9177 z->z_chunk_pages = 0;
9178 z->z_quo_magic = 0;
9179 z->z_align_magic = 0;
9180 z->z_chunk_elems = 0;
9181 z->z_elem_offs = 0;
9182 z->no_callout = true;
9183 zsflags->z_lifo = true;
9184 } else {
9185 vm_size_t alloc = zone_get_min_alloc_granule(size, flags);
9186
9187 z->z_elem_size = (uint16_t)(size - redzone);
9188 z->z_chunk_pages = (uint16_t)atop(alloc);
9189 z->z_quo_magic = Z_MAGIC_QUO(size);
9190 z->z_align_magic = Z_MAGIC_ALIGNED(size);
9191 if (flags & ZC_PERCPU) {
9192 z->z_chunk_elems = (uint16_t)(PAGE_SIZE / size);
9193 z->z_elem_offs = (uint16_t)(PAGE_SIZE % size) + redzone;
9194 } else {
9195 z->z_chunk_elems = (uint16_t)(alloc / size);
9196 z->z_elem_offs = (uint16_t)(alloc % size) + redzone;
9197 }
9198 }
9199
9200 /*
9201 * Handle KPI flags
9202 */
9203
9204 /* ZC_CACHING applied after all configuration is done */
9205 if (flags & ZC_NOCACHING) {
9206 z->z_nocaching = true;
9207 }
9208
9209 if (flags & ZC_READONLY) {
9210 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_VM);
9211 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_DATA);
9212 assert(zid <= ZONE_ID__LAST_RO);
9213 #if ZSECURITY_CONFIG(READ_ONLY)
9214 zsflags->z_submap_idx = Z_SUBMAP_IDX_READ_ONLY;
9215 #endif
9216 zone_ro_size_params[zid].z_elem_size = z->z_elem_size;
9217 zone_ro_size_params[zid].z_align_magic = z->z_align_magic;
9218 assert(size <= PAGE_SIZE);
9219 if ((PAGE_SIZE % size) * 10 >= PAGE_SIZE) {
9220 panic("Fragmentation greater than 10%% with elem size %d zone %s%s",
9221 (uint32_t)size, zone_heap_name(z), z->z_name);
9222 }
9223 }
9224
9225 if (flags & ZC_PERCPU) {
9226 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_READONLY);
9227 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_PGZ_USE_GUARDS);
9228 z->z_percpu = true;
9229 }
9230 if (flags & ZC_NOGC) {
9231 z->collectable = false;
9232 }
9233 /*
9234 * Handle ZC_NOENCRYPT from xnu only
9235 */
9236 if (startup_phase < STARTUP_SUB_LOCKDOWN && flags & ZC_NOENCRYPT) {
9237 zsflags->z_noencrypt = true;
9238 }
9239 if (flags & ZC_NOCALLOUT) {
9240 z->no_callout = true;
9241 }
9242 if (flags & ZC_DESTRUCTIBLE) {
9243 zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_READONLY);
9244 z->z_destructible = true;
9245 }
9246 /*
9247 * Handle Internal flags
9248 */
9249 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9250 if (flags & ZC_PGZ_USE_GUARDS) {
9251 /*
9252 * Try to turn on guard pages only for zones
9253 * with a chance of OOB.
9254 */
9255 if (startup_phase < STARTUP_SUB_LOCKDOWN) {
9256 zsflags->z_pgz_use_guards = true;
9257 }
9258 z->z_pgz_use_guards = true;
9259 }
9260 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9261 if (!(flags & ZC_NOTBITAG)) {
9262 z->z_tbi_tag = true;
9263 }
9264 if (flags & ZC_KALLOC_TYPE) {
9265 zsflags->z_kalloc_type = true;
9266 }
9267 if (flags & ZC_VM) {
9268 zone_create_assert_not_both(name, flags, ZC_VM, ZC_DATA);
9269 zsflags->z_submap_idx = Z_SUBMAP_IDX_VM;
9270 }
9271 if (flags & ZC_DATA) {
9272 zsflags->z_kheap_id = KHEAP_ID_DATA_BUFFERS;
9273 }
9274 #if KASAN_CLASSIC
9275 if (redzone && !(flags & ZC_KASAN_NOQUARANTINE)) {
9276 z->z_kasan_quarantine = true;
9277 }
9278 z->z_kasan_redzone = redzone;
9279 #endif /* KASAN_CLASSIC */
9280 #if KASAN_FAKESTACK
9281 if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) {
9282 z->z_kasan_fakestacks = true;
9283 }
9284 #endif /* KASAN_FAKESTACK */
9285
9286 /*
9287 * Then if there's extra tuning, do it
9288 */
9289 if (extra_setup) {
9290 extra_setup(z);
9291 }
9292
9293 /*
9294 * Configure debugging features
9295 */
9296 #if CONFIG_PROB_GZALLOC
9297 if ((flags & (ZC_READONLY | ZC_PERCPU | ZC_OBJ_CACHE | ZC_NOPGZ)) == 0) {
9298 pgz_zone_init(z);
9299 }
9300 #endif
9301 if (zc_magazine_zone) { /* proxy for "has zone_init run" */
9302 #if ZALLOC_ENABLE_LOGGING
9303 /*
9304 * Check for and set up zone leak detection
9305 * if requested via boot-args.
9306 */
9307 zone_setup_logging(z);
9308 #endif /* ZALLOC_ENABLE_LOGGING */
9309 #if KASAN_TBI
9310 zone_setup_kasan_logging(z);
9311 #endif /* KASAN_TBI */
9312 }
9313
9314 #if VM_TAG_SIZECLASSES
9315 if ((zsflags->z_kheap_id || zsflags->z_kalloc_type) && zone_tagging_on) {
9316 static uint16_t sizeclass_idx;
9317
9318 assert(startup_phase < STARTUP_SUB_LOCKDOWN);
9319 z->z_uses_tags = true;
9320 if (zsflags->z_kheap_id == KHEAP_ID_DEFAULT) {
9321 zone_tags_sizeclasses[sizeclass_idx] = (uint16_t)size;
9322 z->z_tags_sizeclass = sizeclass_idx++;
9323 } else {
9324 uint16_t i = 0;
9325 for (; i < sizeclass_idx; i++) {
9326 if (size == zone_tags_sizeclasses[i]) {
9327 z->z_tags_sizeclass = i;
9328 break;
9329 }
9330 }
9331
9332 /*
9333 * Size class wasn't found, add it to zone_tags_sizeclasses
9334 */
9335 if (i == sizeclass_idx) {
9336 assert(i < VM_TAG_SIZECLASSES);
9337 zone_tags_sizeclasses[i] = (uint16_t)size;
9338 z->z_tags_sizeclass = sizeclass_idx++;
9339 }
9340 }
9341 assert(z->z_tags_sizeclass < VM_TAG_SIZECLASSES);
9342 }
9343 #endif
9344
9345 /*
9346 * Finally, fixup properties based on security policies, boot-args, ...
9347 */
9348 if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9349 /*
9350 * We use LIFO in the data map, because workloads like network
9351 * usage or similar tend to rotate through allocations very
9352 * quickly with sometimes epxloding working-sets and using
9353 * a FIFO policy might cause massive TLB trashing with rather
9354 * dramatic performance impacts.
9355 */
9356 zsflags->z_submap_idx = Z_SUBMAP_IDX_DATA;
9357 zsflags->z_lifo = true;
9358 }
9359
9360 if ((flags & (ZC_CACHING | ZC_OBJ_CACHE)) && !z->z_nocaching) {
9361 /*
9362 * No zone made before zone_init() can have ZC_CACHING set.
9363 */
9364 assert(zc_magazine_zone);
9365 zone_enable_caching(z);
9366 }
9367
9368 zone_lock(z);
9369 z->z_self = z;
9370 zone_unlock(z);
9371
9372 return z;
9373 }
9374
9375 void
zone_enable_smr(zone_t zone,struct smr * smr,zone_smr_free_cb_t free_cb)9376 zone_enable_smr(zone_t zone, struct smr *smr, zone_smr_free_cb_t free_cb)
9377 {
9378 /* moving to SMR must be done before the zone has ever been used */
9379 assert(zone->z_va_cur == 0 && !zone->z_smr && !zone->z_nocaching);
9380 assert(!zone_security_array[zone_index(zone)].z_lifo);
9381
9382 if (!zone->z_pcpu_cache) {
9383 zone_enable_caching(zone);
9384 }
9385
9386 zone_lock(zone);
9387
9388 zpercpu_foreach(it, zone->z_pcpu_cache) {
9389 it->zc_smr = smr;
9390 it->zc_free = free_cb;
9391 }
9392 zone->z_smr = true;
9393
9394 zone_unlock(zone);
9395 }
9396
9397 __startup_func
9398 void
zone_create_startup(struct zone_create_startup_spec * spec)9399 zone_create_startup(struct zone_create_startup_spec *spec)
9400 {
9401 zone_t z;
9402
9403 z = zone_create_ext(spec->z_name, spec->z_size,
9404 spec->z_flags, spec->z_zid, spec->z_setup);
9405 if (spec->z_var) {
9406 *spec->z_var = z;
9407 }
9408 }
9409
9410 /*
9411 * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t
9412 * union works. trust but verify.
9413 */
9414 #define zalloc_check_zov_alias(f1, f2) \
9415 static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2))
9416 zalloc_check_zov_alias(z_self, zv_zone);
9417 zalloc_check_zov_alias(z_stats, zv_stats);
9418 zalloc_check_zov_alias(z_name, zv_name);
9419 zalloc_check_zov_alias(z_views, zv_next);
9420 #undef zalloc_check_zov_alias
9421
9422 __startup_func
9423 void
zone_view_startup_init(struct zone_view_startup_spec * spec)9424 zone_view_startup_init(struct zone_view_startup_spec *spec)
9425 {
9426 struct kalloc_heap *heap = NULL;
9427 zone_view_t zv = spec->zv_view;
9428 zone_t z;
9429 zone_security_flags_t zsflags;
9430
9431 switch (spec->zv_heapid) {
9432 case KHEAP_ID_DEFAULT:
9433 panic("%s: Use KALLOC_TYPE_DEFINE for zone view %s instead"
9434 "of ZONE_VIEW_DEFINE as it is from default kalloc heap",
9435 __func__, zv->zv_name);
9436 __builtin_unreachable();
9437 case KHEAP_ID_DATA_BUFFERS:
9438 heap = KHEAP_DATA_BUFFERS;
9439 break;
9440 default:
9441 heap = NULL;
9442 }
9443
9444 if (heap) {
9445 z = kalloc_zone_for_size(heap->kh_zstart, spec->zv_size);
9446 } else {
9447 z = *spec->zv_zone;
9448 assert(spec->zv_size <= zone_elem_inner_size(z));
9449 }
9450
9451 assert(z);
9452
9453 zv->zv_zone = z;
9454 zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats);
9455 zv->zv_next = z->z_views;
9456 zsflags = zone_security_config(z);
9457 if (z->z_views == NULL && zsflags.z_kheap_id == KHEAP_ID_NONE) {
9458 /*
9459 * count the raw view for zones not in a heap,
9460 * kalloc_heap_init() already counts it for its members.
9461 */
9462 zone_view_count += 2;
9463 } else {
9464 zone_view_count += 1;
9465 }
9466 z->z_views = zv;
9467 }
9468
9469 zone_t
zone_create(const char * name,vm_size_t size,zone_create_flags_t flags)9470 zone_create(
9471 const char *name,
9472 vm_size_t size,
9473 zone_create_flags_t flags)
9474 {
9475 return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL);
9476 }
9477
9478 static_assert(ZONE_ID__LAST_RO_EXT - ZONE_ID__FIRST_RO_EXT == ZC_RO_ID__LAST);
9479
9480 zone_id_t
zone_create_ro(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)9481 zone_create_ro(
9482 const char *name,
9483 vm_size_t size,
9484 zone_create_flags_t flags,
9485 zone_create_ro_id_t zc_ro_id)
9486 {
9487 assert(zc_ro_id <= ZC_RO_ID__LAST);
9488 zone_id_t reserved_zid = ZONE_ID__FIRST_RO_EXT + zc_ro_id;
9489 (void)zone_create_ext(name, size, ZC_READONLY | flags, reserved_zid, NULL);
9490 return reserved_zid;
9491 }
9492
9493 zone_t
zinit(vm_size_t size,vm_size_t max,vm_size_t alloc __unused,const char * name)9494 zinit(
9495 vm_size_t size, /* the size of an element */
9496 vm_size_t max, /* maximum memory to use */
9497 vm_size_t alloc __unused, /* allocation size */
9498 const char *name) /* a name for the zone */
9499 {
9500 zone_t z = zone_create(name, size, ZC_DESTRUCTIBLE);
9501 z->z_wired_max = zone_alloc_pages_for_nelems(z, max / size);
9502 return z;
9503 }
9504
9505 void
zdestroy(zone_t z)9506 zdestroy(zone_t z)
9507 {
9508 unsigned int zindex = zone_index(z);
9509 zone_security_flags_t zsflags = zone_security_array[zindex];
9510
9511 current_thread()->options |= TH_OPT_ZONE_PRIV;
9512 lck_mtx_lock(&zone_gc_lock);
9513
9514 zone_reclaim(z, ZONE_RECLAIM_DESTROY);
9515
9516 lck_mtx_unlock(&zone_gc_lock);
9517 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
9518
9519 zone_lock(z);
9520
9521 if (!zone_submap_is_sequestered(zsflags)) {
9522 while (!zone_pva_is_null(z->z_pageq_va)) {
9523 struct zone_page_metadata *meta;
9524
9525 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
9526 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
9527 assert(meta->zm_chunk_len <= ZM_CHUNK_LEN_MAX);
9528 bzero(meta, sizeof(*meta) * z->z_chunk_pages);
9529 zone_unlock(z);
9530 kmem_free(zone_submap(zsflags), zone_meta_to_addr(meta),
9531 ptoa(z->z_chunk_pages));
9532 zone_lock(z);
9533 }
9534 }
9535
9536 #if !KASAN_CLASSIC
9537 /* Assert that all counts are zero */
9538 if (z->z_elems_avail || z->z_elems_free || zone_size_wired(z) ||
9539 (z->z_va_cur && !zone_submap_is_sequestered(zsflags))) {
9540 panic("zdestroy: Zone %s%s isn't empty at zdestroy() time",
9541 zone_heap_name(z), z->z_name);
9542 }
9543
9544 /* consistency check: make sure everything is indeed empty */
9545 assert(zone_pva_is_null(z->z_pageq_empty));
9546 assert(zone_pva_is_null(z->z_pageq_partial));
9547 assert(zone_pva_is_null(z->z_pageq_full));
9548 if (!zone_submap_is_sequestered(zsflags)) {
9549 assert(zone_pva_is_null(z->z_pageq_va));
9550 }
9551 #endif
9552
9553 zone_unlock(z);
9554
9555 simple_lock(&all_zones_lock, &zone_locks_grp);
9556
9557 assert(!bitmap_test(zone_destroyed_bitmap, zindex));
9558 /* Mark the zone as empty in the bitmap */
9559 bitmap_set(zone_destroyed_bitmap, zindex);
9560 num_zones_in_use--;
9561 assert(num_zones_in_use > 0);
9562
9563 simple_unlock(&all_zones_lock);
9564 }
9565
9566 #endif /* !ZALLOC_TEST */
9567 #pragma mark zalloc module init
9568 #if !ZALLOC_TEST
9569
9570 /*
9571 * Initialize the "zone of zones" which uses fixed memory allocated
9572 * earlier in memory initialization. zone_bootstrap is called
9573 * before zone_init.
9574 */
9575 __startup_func
9576 void
zone_bootstrap(void)9577 zone_bootstrap(void)
9578 {
9579 #if DEBUG || DEVELOPMENT
9580 #if __x86_64__
9581 if (PE_parse_boot_argn("kernPOST", NULL, 0)) {
9582 /*
9583 * rdar://79781535 Disable early gaps while running kernPOST on Intel
9584 * the fp faulting code gets triggered and deadlocks.
9585 */
9586 zone_caching_disabled = 1;
9587 }
9588 #endif /* __x86_64__ */
9589 #endif /* DEBUG || DEVELOPMENT */
9590
9591 /* Validate struct zone_packed_virtual_address expectations */
9592 static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1");
9593 if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) {
9594 panic("zone_pva_t can't pack a kernel page address in 31 bits");
9595 }
9596
9597 zpercpu_early_count = ml_early_cpu_max_number() + 1;
9598 if (!PE_parse_boot_argn("zc_mag_size", NULL, 0)) {
9599 /*
9600 * Scale zc_mag_size() per machine.
9601 *
9602 * - wide machines get 128B magazines to avoid all false sharing
9603 * - smaller machines but with enough RAM get a bit bigger
9604 * buckets (empirically affects networking performance)
9605 */
9606 if (zpercpu_early_count >= 10) {
9607 _zc_mag_size = 14;
9608 } else if ((sane_size >> 30) >= 4) {
9609 _zc_mag_size = 10;
9610 }
9611 }
9612
9613 /*
9614 * Initialize random used to scramble early allocations
9615 */
9616 zpercpu_foreach_cpu(cpu) {
9617 random_bool_init(&zone_bool_gen[cpu].zbg_bg);
9618 }
9619
9620 #if CONFIG_PROB_GZALLOC
9621 /*
9622 * Set pgz_sample_counter on the boot CPU so that we do not sample
9623 * any allocation until PGZ has been properly setup (in pgz_init()).
9624 */
9625 *PERCPU_GET_MASTER(pgz_sample_counter) = INT32_MAX;
9626 #endif /* CONFIG_PROB_GZALLOC */
9627
9628 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9629 /*
9630 * Randomly assign zones to one of the 4 general submaps,
9631 * and pick whether they allocate from the begining
9632 * or the end of it.
9633 *
9634 * A lot of OOB exploitation relies on precise interleaving
9635 * of specific types in the heap.
9636 *
9637 * Woops, you can't guarantee that anymore.
9638 */
9639 for (zone_id_t i = 1; i < MAX_ZONES; i++) {
9640 uint32_t r = zalloc_random_uniform32(0,
9641 ZSECURITY_CONFIG_GENERAL_SUBMAPS * 2);
9642
9643 zone_security_array[i].z_submap_from_end = (r & 1);
9644 zone_security_array[i].z_submap_idx += (r >> 1);
9645 }
9646 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9647
9648 thread_call_setup_with_options(&zone_expand_callout,
9649 zone_expand_async, NULL, THREAD_CALL_PRIORITY_HIGH,
9650 THREAD_CALL_OPTIONS_ONCE);
9651
9652 thread_call_setup_with_options(&zone_trim_callout,
9653 zone_trim_async, NULL, THREAD_CALL_PRIORITY_USER,
9654 THREAD_CALL_OPTIONS_ONCE);
9655 }
9656
9657 #define ZONE_GUARD_SIZE (64UL << 10)
9658
9659 __startup_func
9660 static void
zone_tunables_fixup(void)9661 zone_tunables_fixup(void)
9662 {
9663 int wdt = 0;
9664
9665 #if CONFIG_PROB_GZALLOC && (DEVELOPMENT || DEBUG)
9666 if (!PE_parse_boot_argn("pgz", NULL, 0) &&
9667 PE_parse_boot_argn("pgz1", NULL, 0)) {
9668 /*
9669 * if pgz1= was used, but pgz= was not,
9670 * then the more specific pgz1 takes precedence.
9671 */
9672 pgz_all = false;
9673 }
9674 #endif
9675
9676 if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) {
9677 zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT;
9678 }
9679 if (PE_parse_boot_argn("wdt", &wdt, sizeof(wdt)) && wdt == -1 &&
9680 !PE_parse_boot_argn("zet", NULL, 0)) {
9681 zone_exhausted_timeout = -1;
9682 }
9683 }
9684 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup);
9685
9686 __startup_func
9687 static void
zone_submap_init(mach_vm_offset_t * submap_min,zone_submap_idx_t idx,uint64_t zone_sub_map_numer,uint64_t * remaining_denom,vm_offset_t * remaining_size)9688 zone_submap_init(
9689 mach_vm_offset_t *submap_min,
9690 zone_submap_idx_t idx,
9691 uint64_t zone_sub_map_numer,
9692 uint64_t *remaining_denom,
9693 vm_offset_t *remaining_size)
9694 {
9695 vm_map_create_options_t vmco;
9696 vm_map_address_t addr;
9697 vm_offset_t submap_start, submap_end;
9698 vm_size_t submap_size;
9699 vm_map_t submap;
9700 vm_prot_t prot = VM_PROT_DEFAULT;
9701 vm_prot_t prot_max = VM_PROT_ALL;
9702 kern_return_t kr;
9703
9704 submap_size = trunc_page(zone_sub_map_numer * *remaining_size /
9705 *remaining_denom);
9706 submap_start = *submap_min;
9707
9708 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9709 vm_offset_t submap_padding = pmap_ro_zone_align(submap_start) - submap_start;
9710 submap_start += submap_padding;
9711 submap_size = pmap_ro_zone_align(submap_size);
9712 assert(*remaining_size >= (submap_padding + submap_size));
9713 *remaining_size -= submap_padding;
9714 *submap_min = submap_start;
9715 }
9716
9717 submap_end = submap_start + submap_size;
9718 if (idx == Z_SUBMAP_IDX_VM) {
9719 vm_packing_verify_range("vm_compressor",
9720 submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR));
9721 vm_packing_verify_range("vm_page",
9722 submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR));
9723 }
9724
9725 vmco = VM_MAP_CREATE_NEVER_FAULTS;
9726 if (!zone_submap_is_sequestered(idx)) {
9727 vmco |= VM_MAP_CREATE_DISABLE_HOLELIST;
9728 }
9729
9730 vm_map_will_allocate_early_map(&zone_submaps[idx]);
9731 submap = kmem_suballoc(kernel_map, submap_min, submap_size, vmco,
9732 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_PERMANENT | KMS_NOFAIL,
9733 VM_KERN_MEMORY_ZONE).kmr_submap;
9734
9735 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9736 zone_info.zi_ro_range.min_address = submap_start;
9737 zone_info.zi_ro_range.max_address = submap_end;
9738 prot_max = prot = VM_PROT_NONE;
9739 }
9740
9741 addr = submap_start;
9742 kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
9743 VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vm_tag = VM_KERN_MEMORY_ZONE),
9744 kernel_object, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9745 if (kr != KERN_SUCCESS) {
9746 panic("ksubmap[%s]: failed to make first entry (%d)",
9747 zone_submaps_names[idx], kr);
9748 }
9749
9750 addr = submap_end - ZONE_GUARD_SIZE / 2;
9751 kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
9752 VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vm_tag = VM_KERN_MEMORY_ZONE),
9753 kernel_object, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9754 if (kr != KERN_SUCCESS) {
9755 panic("ksubmap[%s]: failed to make last entry (%d)",
9756 zone_submaps_names[idx], kr);
9757 }
9758
9759 #if DEBUG || DEVELOPMENT
9760 printf("zone_init: map %-5s %p:%p (%u%c)\n",
9761 zone_submaps_names[idx], (void *)submap_start, (void *)submap_end,
9762 mach_vm_size_pretty(submap_size), mach_vm_size_unit(submap_size));
9763 #endif /* DEBUG || DEVELOPMENT */
9764
9765 zone_submaps[idx] = submap;
9766 *submap_min = submap_end;
9767 *remaining_size -= submap_size;
9768 *remaining_denom -= zone_sub_map_numer;
9769 }
9770
9771 static inline void
zone_pva_relocate(zone_pva_t * pva,uint32_t delta)9772 zone_pva_relocate(zone_pva_t *pva, uint32_t delta)
9773 {
9774 if (!zone_pva_is_null(*pva) && !zone_pva_is_queue(*pva)) {
9775 pva->packed_address += delta;
9776 }
9777 }
9778
9779 /*
9780 * Allocate metadata array and migrate bootstrap initial metadata and memory.
9781 */
9782 __startup_func
9783 static void
zone_metadata_init(void)9784 zone_metadata_init(void)
9785 {
9786 vm_map_t vm_map = zone_submaps[Z_SUBMAP_IDX_VM];
9787 vm_map_entry_t first;
9788
9789 struct mach_vm_range meta_r, bits_r, xtra_r, early_r;
9790 vm_size_t early_sz;
9791 vm_offset_t reloc_base;
9792
9793 /*
9794 * Step 1: Allocate the metadata + bitmaps range
9795 *
9796 * Allocations can't be smaller than 8 bytes, which is 128b / 16B per 1k
9797 * of physical memory (16M per 1G).
9798 *
9799 * Let's preallocate for the worst to avoid weird panics.
9800 */
9801 vm_map_will_allocate_early_map(&zone_meta_map);
9802 meta_r = zone_kmem_suballoc(zone_info.zi_meta_range.min_address,
9803 zone_meta_size + zone_bits_size + zone_xtra_size,
9804 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
9805 VM_KERN_MEMORY_ZONE, &zone_meta_map);
9806 meta_r.min_address += ZONE_GUARD_SIZE;
9807 meta_r.max_address -= ZONE_GUARD_SIZE;
9808 if (zone_xtra_size) {
9809 xtra_r.max_address = meta_r.max_address;
9810 meta_r.max_address -= zone_xtra_size;
9811 xtra_r.min_address = meta_r.max_address;
9812 } else {
9813 xtra_r.min_address = xtra_r.max_address = 0;
9814 }
9815 bits_r.max_address = meta_r.max_address;
9816 meta_r.max_address -= zone_bits_size;
9817 bits_r.min_address = meta_r.max_address;
9818
9819 #if DEBUG || DEVELOPMENT
9820 printf("zone_init: metadata %p:%p (%u%c)\n",
9821 (void *)meta_r.min_address, (void *)meta_r.max_address,
9822 mach_vm_size_pretty(mach_vm_range_size(&meta_r)),
9823 mach_vm_size_unit(mach_vm_range_size(&meta_r)));
9824 printf("zone_init: metabits %p:%p (%u%c)\n",
9825 (void *)bits_r.min_address, (void *)bits_r.max_address,
9826 mach_vm_size_pretty(mach_vm_range_size(&bits_r)),
9827 mach_vm_size_unit(mach_vm_range_size(&bits_r)));
9828 printf("zone_init: extra %p:%p (%u%c)\n",
9829 (void *)xtra_r.min_address, (void *)xtra_r.max_address,
9830 mach_vm_size_pretty(mach_vm_range_size(&xtra_r)),
9831 mach_vm_size_unit(mach_vm_range_size(&xtra_r)));
9832 #endif /* DEBUG || DEVELOPMENT */
9833
9834 bits_r.min_address = (bits_r.min_address + ZBA_CHUNK_SIZE - 1) & -ZBA_CHUNK_SIZE;
9835 bits_r.max_address = bits_r.max_address & -ZBA_CHUNK_SIZE;
9836
9837 /*
9838 * Step 2: Install new ranges.
9839 * Relocate metadata and bits.
9840 */
9841 early_r = zone_info.zi_map_range;
9842 early_sz = mach_vm_range_size(&early_r);
9843
9844 zone_info.zi_map_range = zone_map_range;
9845 zone_info.zi_meta_range = meta_r;
9846 zone_info.zi_bits_range = bits_r;
9847 zone_info.zi_xtra_range = xtra_r;
9848 zone_info.zi_meta_base = (struct zone_page_metadata *)meta_r.min_address -
9849 zone_pva_from_addr(zone_map_range.min_address).packed_address;
9850
9851 vm_map_lock(vm_map);
9852 first = vm_map_first_entry(vm_map);
9853 reloc_base = first->vme_end;
9854 first->vme_end += early_sz;
9855 vm_map->size += early_sz;
9856 vm_map_unlock(vm_map);
9857
9858 struct zone_page_metadata *early_meta = zone_early_meta_array_startup;
9859 struct zone_page_metadata *new_meta = zone_meta_from_addr(reloc_base);
9860 vm_offset_t reloc_delta = reloc_base - early_r.min_address;
9861 /* this needs to sign extend */
9862 uint32_t pva_delta = (uint32_t)((intptr_t)reloc_delta >> PAGE_SHIFT);
9863
9864 zone_meta_populate(reloc_base, early_sz);
9865 memcpy(new_meta, early_meta,
9866 atop(early_sz) * sizeof(struct zone_page_metadata));
9867 for (uint32_t i = 0; i < atop(early_sz); i++) {
9868 zone_pva_relocate(&new_meta[i].zm_page_next, pva_delta);
9869 zone_pva_relocate(&new_meta[i].zm_page_prev, pva_delta);
9870 }
9871
9872 static_assert(ZONE_ID_VM_MAP_ENTRY == ZONE_ID_VM_MAP + 1);
9873 static_assert(ZONE_ID_VM_MAP_HOLES == ZONE_ID_VM_MAP + 2);
9874
9875 for (zone_id_t zid = ZONE_ID_VM_MAP; zid <= ZONE_ID_VM_MAP_HOLES; zid++) {
9876 zone_pva_relocate(&zone_array[zid].z_pageq_partial, pva_delta);
9877 zone_pva_relocate(&zone_array[zid].z_pageq_full, pva_delta);
9878 }
9879
9880 zba_populate(0, false);
9881 memcpy(zba_base_header(), zba_chunk_startup, sizeof(zba_chunk_startup));
9882 zba_meta()->zbam_right = (uint32_t)atop(zone_bits_size);
9883
9884 /*
9885 * Step 3: Relocate the boostrap VM structs
9886 * (including rewriting their content).
9887 */
9888
9889 #if __x86_64__
9890 kernel_memory_populate(reloc_base, early_sz,
9891 KMA_KOBJECT | KMA_NOENCRYPT | KMA_NOFAIL,
9892 VM_KERN_MEMORY_OSFMK);
9893 __nosan_memcpy((void *)reloc_base, (void *)early_r.min_address, early_sz);
9894 #else
9895 for (vm_address_t addr = early_r.min_address;
9896 addr < early_r.max_address; addr += PAGE_SIZE) {
9897 pmap_paddr_t pa = kvtophys(trunc_page(addr));
9898 __assert_only kern_return_t kr;
9899
9900 kr = pmap_enter_options_addr(kernel_pmap, addr + reloc_delta,
9901 pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
9902 0, NULL);
9903 assert(kr == KERN_SUCCESS);
9904 }
9905 #endif
9906
9907 #if KASAN
9908 kasan_notify_address(reloc_base, early_sz);
9909 #if KASAN_TBI
9910 kasan_tbi_copy_tags(reloc_base, early_r.min_address, early_sz);
9911 #endif /* KASAN_TBI */
9912 #endif /* KASAN */
9913
9914 vm_map_relocate_early_maps(reloc_delta);
9915
9916 for (uint32_t i = 0; i < atop(early_sz); i++) {
9917 zone_id_t zid = new_meta[i].zm_index;
9918 zone_t z = &zone_array[zid];
9919 vm_size_t esize = zone_elem_outer_size(z);
9920 vm_address_t base = reloc_base + ptoa(i) + zone_elem_inner_offs(z);
9921 vm_address_t addr;
9922
9923 if (new_meta[i].zm_chunk_len >= ZM_SECONDARY_PAGE) {
9924 continue;
9925 }
9926
9927 for (uint32_t eidx = 0; eidx < z->z_chunk_elems; eidx++) {
9928 if (zone_meta_is_free(&new_meta[i], eidx)) {
9929 continue;
9930 }
9931
9932 addr = base + eidx * esize;
9933 #if KASAN_CLASSIC
9934 kasan_alloc(addr,
9935 zone_elem_inner_size(z), zone_elem_inner_size(z),
9936 zone_elem_redzone(z), false,
9937 __builtin_frame_address(0));
9938 #endif
9939 vm_map_relocate_early_elem(zid, addr, reloc_delta);
9940 }
9941 }
9942
9943 #if !__x86_64__
9944 pmap_remove(kernel_pmap, early_r.min_address, early_r.max_address);
9945 #endif
9946 }
9947
9948 __startup_data
9949 static uint16_t submap_ratios[Z_SUBMAP_IDX_COUNT] = {
9950 #if ZSECURITY_CONFIG(READ_ONLY)
9951 [Z_SUBMAP_IDX_VM] = 15,
9952 [Z_SUBMAP_IDX_READ_ONLY] = 5,
9953 #else
9954 [Z_SUBMAP_IDX_VM] = 20,
9955 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
9956 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9957 [Z_SUBMAP_IDX_GENERAL_0] = 15,
9958 [Z_SUBMAP_IDX_GENERAL_1] = 15,
9959 [Z_SUBMAP_IDX_GENERAL_2] = 15,
9960 [Z_SUBMAP_IDX_GENERAL_3] = 15,
9961 [Z_SUBMAP_IDX_DATA] = 20,
9962 #else
9963 [Z_SUBMAP_IDX_GENERAL_0] = 60,
9964 [Z_SUBMAP_IDX_DATA] = 20,
9965 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9966 };
9967
9968 __startup_func
9969 static inline uint16_t
zone_submap_ratios_denom(void)9970 zone_submap_ratios_denom(void)
9971 {
9972 uint16_t denom = 0;
9973
9974 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
9975 denom += submap_ratios[idx];
9976 }
9977
9978 assert(denom == 100);
9979
9980 return denom;
9981 }
9982
9983 __startup_func
9984 static inline vm_offset_t
zone_restricted_va_max(void)9985 zone_restricted_va_max(void)
9986 {
9987 vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR);
9988 vm_offset_t vm_page_max = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR);
9989
9990 return trunc_page(MIN(compressor_max, vm_page_max));
9991 }
9992
9993 __startup_func
9994 static void
zone_set_map_sizes(void)9995 zone_set_map_sizes(void)
9996 {
9997 vm_size_t zsize;
9998 vm_size_t zsizearg;
9999
10000 /*
10001 * Compute the physical limits for the zone map
10002 */
10003
10004 if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) {
10005 zsize = zsizearg * (1024ULL * 1024);
10006 } else {
10007 /* Set target zone size as 1/4 of physical memory */
10008 zsize = (vm_size_t)(sane_size >> 2);
10009 zsize += zsize >> 1;
10010 }
10011
10012 if (zsize < CONFIG_ZONE_MAP_MIN) {
10013 zsize = CONFIG_ZONE_MAP_MIN; /* Clamp to min */
10014 }
10015 if (zsize > sane_size >> 1) {
10016 zsize = (vm_size_t)(sane_size >> 1); /* Clamp to half of RAM max */
10017 }
10018 if (zsizearg == 0 && zsize > ZONE_MAP_MAX) {
10019 /* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */
10020 printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n",
10021 (uintptr_t)zsize, (uintptr_t)ZONE_MAP_MAX);
10022 zsize = ZONE_MAP_MAX;
10023 }
10024
10025 zone_pages_wired_max = (uint32_t)atop(trunc_page(zsize));
10026
10027
10028 /*
10029 * Declare restrictions on zone max
10030 */
10031 vm_offset_t vm_submap_size = round_page(
10032 (submap_ratios[Z_SUBMAP_IDX_VM] * ZONE_MAP_VA_SIZE) /
10033 zone_submap_ratios_denom());
10034
10035 #if CONFIG_PROB_GZALLOC
10036 vm_submap_size += pgz_get_size();
10037 #endif /* CONFIG_PROB_GZALLOC */
10038 if (os_sub_overflow(zone_restricted_va_max(), vm_submap_size,
10039 &zone_map_range.min_address)) {
10040 zone_map_range.min_address = 0;
10041 }
10042
10043 zone_meta_size = round_page(atop(ZONE_MAP_VA_SIZE) *
10044 sizeof(struct zone_page_metadata)) + ZONE_GUARD_SIZE * 2;
10045
10046 static_assert(ZONE_MAP_MAX / (CHAR_BIT * KALLOC_MINSIZE) <=
10047 ZBA_PTR_MASK + 1);
10048 zone_bits_size = round_page(ptoa(zone_pages_wired_max) /
10049 (CHAR_BIT * KALLOC_MINSIZE));
10050
10051 #if VM_TAG_SIZECLASSES
10052 if (zone_tagging_on) {
10053 zba_xtra_shift = (uint8_t)fls(sizeof(vm_tag_t) - 1);
10054 }
10055 if (zba_xtra_shift) {
10056 /*
10057 * if we need the extra space range, then limit the size of the
10058 * bitmaps to something reasonable instead of a theoretical
10059 * worst case scenario of all zones being for the smallest
10060 * allocation granule, in order to avoid fake VA pressure on
10061 * other parts of the system.
10062 */
10063 zone_bits_size = round_page(zone_bits_size / 8);
10064 zone_xtra_size = round_page(zone_bits_size * CHAR_BIT << zba_xtra_shift);
10065 }
10066 #endif /* VM_TAG_SIZECLASSES */
10067 }
10068 STARTUP(KMEM, STARTUP_RANK_FIRST, zone_set_map_sizes);
10069
10070 /*
10071 * Can't use zone_info.zi_map_range at this point as it is being used to
10072 * store the range of early pmap memory that was stolen to bootstrap the
10073 * necessary VM zones.
10074 */
10075 KMEM_RANGE_REGISTER_STATIC(zones, &zone_map_range, ZONE_MAP_VA_SIZE);
10076 KMEM_RANGE_REGISTER_DYNAMIC(zone_meta, &zone_info.zi_meta_range, ^{
10077 return zone_meta_size + zone_bits_size + zone_xtra_size;
10078 });
10079
10080 /*
10081 * Global initialization of Zone Allocator.
10082 * Runs after zone_bootstrap.
10083 */
10084 __startup_func
10085 static void
zone_init(void)10086 zone_init(void)
10087 {
10088 vm_size_t remaining_size = ZONE_MAP_VA_SIZE;
10089 mach_vm_offset_t submap_min = 0;
10090 uint64_t denom = zone_submap_ratios_denom();
10091 /*
10092 * And now allocate the various pieces of VA and submaps.
10093 */
10094
10095 submap_min = zone_map_range.min_address;
10096
10097 #if CONFIG_PROB_GZALLOC
10098 vm_size_t pgz_size = pgz_get_size();
10099
10100 vm_map_will_allocate_early_map(&pgz_submap);
10101 zone_info.zi_pgz_range = zone_kmem_suballoc(submap_min, pgz_size,
10102 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10103 VM_KERN_MEMORY_ZONE, &pgz_submap);
10104
10105 submap_min += pgz_size;
10106 remaining_size -= pgz_size;
10107 #if DEBUG || DEVELOPMENT
10108 printf("zone_init: pgzalloc %p:%p (%u%c) [%d slots]\n",
10109 (void *)zone_info.zi_pgz_range.min_address,
10110 (void *)zone_info.zi_pgz_range.max_address,
10111 mach_vm_size_pretty(pgz_size), mach_vm_size_unit(pgz_size),
10112 pgz_slots);
10113 #endif /* DEBUG || DEVELOPMENT */
10114 #endif /* CONFIG_PROB_GZALLOC */
10115
10116 /*
10117 * Allocate the submaps
10118 */
10119 for (zone_submap_idx_t idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10120 if (submap_ratios[idx] == 0) {
10121 zone_submaps[idx] = VM_MAP_NULL;
10122 } else {
10123 zone_submap_init(&submap_min, idx, submap_ratios[idx],
10124 &denom, &remaining_size);
10125 }
10126 }
10127
10128 zone_metadata_init();
10129
10130 #if VM_TAG_SIZECLASSES
10131 if (zone_tagging_on) {
10132 vm_allocation_zones_init();
10133 }
10134 #endif /* VM_TAG_SIZECLASSES */
10135
10136 zone_create_flags_t kma_flags = ZC_NOCACHING | ZC_NOGC | ZC_NOCALLOUT |
10137 ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE | ZC_VM;
10138
10139 (void)zone_create_ext("vm.permanent", 1, kma_flags,
10140 ZONE_ID_PERMANENT, ^(zone_t z) {
10141 z->z_permanent = true;
10142 z->z_elem_size = 1;
10143 });
10144 (void)zone_create_ext("vm.permanent.percpu", 1,
10145 kma_flags | ZC_PERCPU, ZONE_ID_PERCPU_PERMANENT, ^(zone_t z) {
10146 z->z_permanent = true;
10147 z->z_elem_size = 1;
10148 });
10149
10150 zc_magazine_zone = zone_create("zcc_magazine_zone", sizeof(struct zone_magazine) +
10151 zc_mag_size() * sizeof(vm_offset_t),
10152 ZC_VM | ZC_NOCACHING | ZC_ZFREE_CLEARMEM | ZC_PGZ_USE_GUARDS);
10153 zone_raise_reserve(zc_magazine_zone, (uint16_t)(2 * zpercpu_count()));
10154
10155 /*
10156 * Now migrate the startup statistics into their final storage,
10157 * and enable logging for early zones (that zone_create_ext() skipped).
10158 */
10159 int cpu = cpu_number();
10160 zone_index_foreach(idx) {
10161 zone_t tz = &zone_array[idx];
10162
10163 if (tz->z_stats == __zpcpu_mangle_for_boot(&zone_stats_startup[idx])) {
10164 zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats);
10165
10166 *zpercpu_get_cpu(zs, cpu) = *zpercpu_get_cpu(tz->z_stats, cpu);
10167 tz->z_stats = zs;
10168 }
10169 if (tz->z_self == tz) {
10170 #if ZALLOC_ENABLE_LOGGING
10171 zone_setup_logging(tz);
10172 #endif /* ZALLOC_ENABLE_LOGGING */
10173 #if KASAN_TBI
10174 zone_setup_kasan_logging(tz);
10175 #endif /* KASAN_TBI */
10176 }
10177 }
10178 }
10179 STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init);
10180
10181 void
zalloc_first_proc_made(void)10182 zalloc_first_proc_made(void)
10183 {
10184 zone_caching_disabled = 0;
10185 }
10186
10187 __startup_func
10188 vm_offset_t
zone_early_mem_init(vm_size_t size)10189 zone_early_mem_init(vm_size_t size)
10190 {
10191 vm_offset_t mem;
10192
10193 assert3u(atop(size), <=, ZONE_EARLY_META_INLINE_COUNT);
10194
10195 /*
10196 * The zone that is used early to bring up the VM is stolen here.
10197 *
10198 * When the zone subsystem is actually initialized,
10199 * zone_metadata_init() will be called, and those pages
10200 * and the elements they contain, will be relocated into
10201 * the VM submap (even for architectures when those zones
10202 * do not live there).
10203 */
10204 #if __x86_64__
10205 assert3u(size, <=, sizeof(zone_early_pages_to_cram));
10206 mem = (vm_offset_t)zone_early_pages_to_cram;
10207 #else
10208 mem = (vm_offset_t)pmap_steal_memory(size, PAGE_SIZE);
10209 #endif
10210
10211 zone_info.zi_meta_base = zone_early_meta_array_startup -
10212 zone_pva_from_addr(mem).packed_address;
10213 zone_info.zi_map_range.min_address = mem;
10214 zone_info.zi_map_range.max_address = mem + size;
10215
10216 zone_info.zi_bits_range = (struct mach_vm_range){
10217 .min_address = (mach_vm_offset_t)zba_chunk_startup,
10218 .max_address = (mach_vm_offset_t)zba_chunk_startup +
10219 sizeof(zba_chunk_startup),
10220 };
10221
10222 zba_meta()->zbam_left = 1;
10223 zba_meta()->zbam_right = 1;
10224 zba_init_chunk(0, false);
10225
10226 return mem;
10227 }
10228
10229 #endif /* !ZALLOC_TEST */
10230 #pragma mark - tests
10231 #if DEBUG || DEVELOPMENT
10232
10233 /*
10234 * Used for sysctl zone tests that aren't thread-safe. Ensure only one
10235 * thread goes through at a time.
10236 *
10237 * Or we can end up with multiple test zones (if a second zinit() comes through
10238 * before zdestroy()), which could lead us to run out of zones.
10239 */
10240 static bool any_zone_test_running = FALSE;
10241
10242 static uintptr_t *
zone_copy_allocations(zone_t z,uintptr_t * elems,zone_pva_t page_index)10243 zone_copy_allocations(zone_t z, uintptr_t *elems, zone_pva_t page_index)
10244 {
10245 vm_offset_t elem_size = zone_elem_outer_size(z);
10246 vm_offset_t base;
10247 struct zone_page_metadata *meta;
10248
10249 while (!zone_pva_is_null(page_index)) {
10250 base = zone_pva_to_addr(page_index) + zone_elem_inner_offs(z);
10251 meta = zone_pva_to_meta(page_index);
10252
10253 if (meta->zm_inline_bitmap) {
10254 for (size_t i = 0; i < meta->zm_chunk_len; i++) {
10255 uint32_t map = meta[i].zm_bitmap;
10256
10257 for (; map; map &= map - 1) {
10258 *elems++ = INSTANCE_PUT(base +
10259 elem_size * __builtin_clz(map));
10260 }
10261 base += elem_size * 32;
10262 }
10263 } else {
10264 uint32_t order = zba_bits_ref_order(meta->zm_bitmap);
10265 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
10266 for (size_t i = 0; i < (1u << order); i++) {
10267 uint64_t map = bits[i];
10268
10269 for (; map; map &= map - 1) {
10270 *elems++ = INSTANCE_PUT(base +
10271 elem_size * __builtin_clzll(map));
10272 }
10273 base += elem_size * 64;
10274 }
10275 }
10276
10277 page_index = meta->zm_page_next;
10278 }
10279 return elems;
10280 }
10281
10282 kern_return_t
zone_leaks(const char * zoneName,uint32_t nameLen,leak_site_proc proc)10283 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc)
10284 {
10285 zone_t zone = NULL;
10286 uintptr_t * array;
10287 uintptr_t * next;
10288 uintptr_t element;
10289 uint32_t idx, count, found;
10290 uint32_t nobtcount;
10291 uint32_t elemSize;
10292 size_t maxElems;
10293
10294 zone_foreach(z) {
10295 if (!strncmp(zoneName, z->z_name, nameLen)) {
10296 zone = z;
10297 break;
10298 }
10299 }
10300 if (zone == NULL) {
10301 return KERN_INVALID_NAME;
10302 }
10303
10304 elemSize = (uint32_t)zone_elem_inner_size(zone);
10305 maxElems = (zone->z_elems_avail + 1) & ~1ul;
10306
10307 array = kalloc_type_tag(vm_offset_t, maxElems, VM_KERN_MEMORY_DIAG);
10308 if (array == NULL) {
10309 return KERN_RESOURCE_SHORTAGE;
10310 }
10311
10312 zone_lock(zone);
10313
10314 next = array;
10315 next = zone_copy_allocations(zone, next, zone->z_pageq_partial);
10316 next = zone_copy_allocations(zone, next, zone->z_pageq_full);
10317 count = (uint32_t)(next - array);
10318
10319 zone_unlock(zone);
10320
10321 zone_leaks_scan(array, count, (uint32_t)zone_elem_outer_size(zone), &found);
10322 assert(found <= count);
10323
10324 for (idx = 0; idx < count; idx++) {
10325 element = array[idx];
10326 if (kInstanceFlagReferenced & element) {
10327 continue;
10328 }
10329 element = INSTANCE_PUT(element) & ~kInstanceFlags;
10330 }
10331
10332 #if ZALLOC_ENABLE_LOGGING
10333 if (zone->z_btlog && !corruption_debug_flag) {
10334 // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
10335 static_assert(sizeof(vm_address_t) == sizeof(uintptr_t));
10336 btlog_copy_backtraces_for_elements(zone->z_btlog,
10337 (vm_address_t *)array, &count, elemSize, proc);
10338 }
10339 #endif /* ZALLOC_ENABLE_LOGGING */
10340
10341 for (nobtcount = idx = 0; idx < count; idx++) {
10342 element = array[idx];
10343 if (!element) {
10344 continue;
10345 }
10346 if (kInstanceFlagReferenced & element) {
10347 continue;
10348 }
10349 nobtcount++;
10350 }
10351 if (nobtcount) {
10352 proc(nobtcount, elemSize, BTREF_NULL);
10353 }
10354
10355 kfree_type(vm_offset_t, maxElems, array);
10356 return KERN_SUCCESS;
10357 }
10358
10359 static int
zone_ro_basic_test_run(__unused int64_t in,int64_t * out)10360 zone_ro_basic_test_run(__unused int64_t in, int64_t *out)
10361 {
10362 zone_security_flags_t zsflags;
10363 uint32_t x = 4;
10364 uint32_t *test_ptr;
10365
10366 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10367 printf("zone_ro_basic_test: Test already running.\n");
10368 return EALREADY;
10369 }
10370
10371 zsflags = zone_security_array[ZONE_ID__FIRST_RO];
10372
10373 for (int i = 0; i < 3; i++) {
10374 #if ZSECURITY_CONFIG(READ_ONLY)
10375 /* Basic Test: Create int zone, zalloc int, modify value, free int */
10376 printf("zone_ro_basic_test: Basic Test iteration %d\n", i);
10377 printf("zone_ro_basic_test: create a sub-page size zone\n");
10378
10379 printf("zone_ro_basic_test: verify flags were set\n");
10380 assert(zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
10381
10382 printf("zone_ro_basic_test: zalloc an element\n");
10383 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10384 assert(test_ptr);
10385
10386 printf("zone_ro_basic_test: verify we can't write to it\n");
10387 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10388
10389 x = 4;
10390 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10391 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10392 assert(test_ptr);
10393 assert(*(uint32_t*)test_ptr == x);
10394
10395 x = 5;
10396 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10397 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10398 assert(test_ptr);
10399 assert(*(uint32_t*)test_ptr == x);
10400
10401 printf("zone_ro_basic_test: verify we can't write to it after assigning value\n");
10402 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10403
10404 printf("zone_ro_basic_test: free elem\n");
10405 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10406 assert(!test_ptr);
10407 #else
10408 printf("zone_ro_basic_test: Read-only allocator n/a on 32bit platforms, test functionality of API\n");
10409
10410 printf("zone_ro_basic_test: verify flags were set\n");
10411 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
10412
10413 printf("zone_ro_basic_test: zalloc an element\n");
10414 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10415 assert(test_ptr);
10416
10417 x = 4;
10418 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10419 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10420 assert(test_ptr);
10421 assert(*(uint32_t*)test_ptr == x);
10422
10423 x = 5;
10424 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10425 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10426 assert(test_ptr);
10427 assert(*(uint32_t*)test_ptr == x);
10428
10429 printf("zone_ro_basic_test: free elem\n");
10430 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10431 assert(!test_ptr);
10432 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10433 }
10434
10435 printf("zone_ro_basic_test: garbage collection\n");
10436 zone_gc(ZONE_GC_DRAIN);
10437
10438 printf("zone_ro_basic_test: Test passed\n");
10439
10440 *out = 1;
10441 os_atomic_store(&any_zone_test_running, false, relaxed);
10442 return 0;
10443 }
10444 SYSCTL_TEST_REGISTER(zone_ro_basic_test, zone_ro_basic_test_run);
10445
10446 static int
zone_basic_test_run(__unused int64_t in,int64_t * out)10447 zone_basic_test_run(__unused int64_t in, int64_t *out)
10448 {
10449 static zone_t test_zone_ptr = NULL;
10450
10451 unsigned int i = 0, max_iter = 5;
10452 void * test_ptr;
10453 zone_t test_zone;
10454 int rc = 0;
10455
10456 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10457 printf("zone_basic_test: Test already running.\n");
10458 return EALREADY;
10459 }
10460
10461 printf("zone_basic_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
10462
10463 /* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
10464 do {
10465 test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
10466 assert(test_zone);
10467
10468 #if KASAN_CLASSIC
10469 if (test_zone_ptr == NULL && test_zone->z_elems_free != 0)
10470 #else
10471 if (test_zone->z_elems_free != 0)
10472 #endif
10473 {
10474 printf("zone_basic_test: free count is not zero\n");
10475 rc = EIO;
10476 goto out;
10477 }
10478
10479 if (test_zone_ptr == NULL) {
10480 /* Stash the zone pointer returned on the fist zinit */
10481 printf("zone_basic_test: zone created for the first time\n");
10482 test_zone_ptr = test_zone;
10483 } else if (test_zone != test_zone_ptr) {
10484 printf("zone_basic_test: old zone pointer and new zone pointer don't match\n");
10485 rc = EIO;
10486 goto out;
10487 }
10488
10489 test_ptr = zalloc_flags(test_zone, Z_WAITOK | Z_NOFAIL);
10490 zfree(test_zone, test_ptr);
10491
10492 zdestroy(test_zone);
10493 i++;
10494
10495 printf("zone_basic_test: Iteration %d successful\n", i);
10496 } while (i < max_iter);
10497
10498 #if !KASAN_CLASSIC /* because of the quarantine and redzones */
10499 /* test Z_VA_SEQUESTER */
10500 {
10501 zone_t test_pcpu_zone;
10502 kern_return_t kr;
10503 int idx, num_allocs = 8;
10504 vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs;
10505 void *allocs[num_allocs];
10506 void **allocs_pcpu;
10507 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
10508
10509 test_zone = zone_create("test_zone_sysctl", elem_size,
10510 ZC_DESTRUCTIBLE);
10511 assert(test_zone);
10512
10513 test_pcpu_zone = zone_create("test_zone_sysctl.pcpu", sizeof(uint64_t),
10514 ZC_DESTRUCTIBLE | ZC_PERCPU);
10515 assert(test_pcpu_zone);
10516
10517 for (idx = 0; idx < num_allocs; idx++) {
10518 allocs[idx] = zalloc(test_zone);
10519 assert(NULL != allocs[idx]);
10520 printf("alloc[%d] %p\n", idx, allocs[idx]);
10521 }
10522 for (idx = 0; idx < num_allocs; idx++) {
10523 zfree(test_zone, allocs[idx]);
10524 }
10525 assert(!zone_pva_is_null(test_zone->z_pageq_empty));
10526
10527 kr = kmem_alloc(kernel_map, (vm_address_t *)&allocs_pcpu, PAGE_SIZE,
10528 KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
10529 assert(kr == KERN_SUCCESS);
10530
10531 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10532 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10533 Z_WAITOK | Z_ZERO);
10534 assert(NULL != allocs_pcpu[idx]);
10535 }
10536 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10537 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10538 }
10539 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10540
10541 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10542 vm_page_wire_count, vm_page_free_count,
10543 100L * phys_pages / zone_pages_wired_max);
10544 zone_gc(ZONE_GC_DRAIN);
10545 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10546 vm_page_wire_count, vm_page_free_count,
10547 100L * phys_pages / zone_pages_wired_max);
10548
10549 unsigned int allva = 0;
10550
10551 zone_foreach(z) {
10552 zone_lock(z);
10553 allva += z->z_wired_cur;
10554 if (zone_pva_is_null(z->z_pageq_va)) {
10555 zone_unlock(z);
10556 continue;
10557 }
10558 unsigned count = 0;
10559 uint64_t size;
10560 zone_pva_t pg = z->z_pageq_va;
10561 struct zone_page_metadata *page_meta;
10562 while (pg.packed_address) {
10563 page_meta = zone_pva_to_meta(pg);
10564 count += z->z_percpu ? 1 : z->z_chunk_pages;
10565 if (page_meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
10566 count -= page_meta->zm_page_index;
10567 }
10568 pg = page_meta->zm_page_next;
10569 }
10570 size = zone_size_wired(z);
10571 if (!size) {
10572 size = 1;
10573 }
10574 printf("%s%s: seq %d, res %d, %qd %%\n",
10575 zone_heap_name(z), z->z_name, z->z_va_cur - z->z_wired_cur,
10576 z->z_wired_cur, zone_size_allocated(z) * 100ULL / size);
10577 zone_unlock(z);
10578 }
10579
10580 printf("total va: %d\n", allva);
10581
10582 assert(zone_pva_is_null(test_zone->z_pageq_empty));
10583 assert(zone_pva_is_null(test_zone->z_pageq_partial));
10584 assert(!zone_pva_is_null(test_zone->z_pageq_va));
10585 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10586 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_partial));
10587 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_va));
10588
10589 for (idx = 0; idx < num_allocs; idx++) {
10590 assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx]));
10591 }
10592
10593 /* make sure the zone is still usable after a GC */
10594
10595 for (idx = 0; idx < num_allocs; idx++) {
10596 allocs[idx] = zalloc(test_zone);
10597 assert(allocs[idx]);
10598 printf("alloc[%d] %p\n", idx, allocs[idx]);
10599 }
10600 for (idx = 0; idx < num_allocs; idx++) {
10601 zfree(test_zone, allocs[idx]);
10602 }
10603
10604 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10605 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10606 Z_WAITOK | Z_ZERO);
10607 assert(NULL != allocs_pcpu[idx]);
10608 }
10609 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10610 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10611 }
10612
10613 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10614
10615 kmem_free(kernel_map, (vm_address_t)allocs_pcpu, PAGE_SIZE);
10616
10617 zdestroy(test_zone);
10618 zdestroy(test_pcpu_zone);
10619 }
10620 #endif /* KASAN_CLASSIC */
10621
10622 printf("zone_basic_test: Test passed\n");
10623
10624
10625 *out = 1;
10626 out:
10627 os_atomic_store(&any_zone_test_running, false, relaxed);
10628 return rc;
10629 }
10630 SYSCTL_TEST_REGISTER(zone_basic_test, zone_basic_test_run);
10631
10632 struct zone_stress_obj {
10633 TAILQ_ENTRY(zone_stress_obj) zso_link;
10634 };
10635
10636 struct zone_stress_ctx {
10637 thread_t zsc_leader;
10638 lck_mtx_t zsc_lock;
10639 zone_t zsc_zone;
10640 uint64_t zsc_end;
10641 uint32_t zsc_workers;
10642 };
10643
10644 static void
zone_stress_worker(void * arg,wait_result_t __unused wr)10645 zone_stress_worker(void *arg, wait_result_t __unused wr)
10646 {
10647 struct zone_stress_ctx *ctx = arg;
10648 bool leader = ctx->zsc_leader == current_thread();
10649 TAILQ_HEAD(zone_stress_head, zone_stress_obj) head = TAILQ_HEAD_INITIALIZER(head);
10650 struct zone_bool_gen bg = { };
10651 struct zone_stress_obj *obj;
10652 uint32_t allocs = 0;
10653
10654 random_bool_init(&bg.zbg_bg);
10655
10656 do {
10657 for (int i = 0; i < 2000; i++) {
10658 uint32_t what = random_bool_gen_bits(&bg.zbg_bg,
10659 bg.zbg_entropy, ZONE_ENTROPY_CNT, 1);
10660 switch (what) {
10661 case 0:
10662 case 1:
10663 if (allocs < 10000) {
10664 obj = zalloc(ctx->zsc_zone);
10665 TAILQ_INSERT_HEAD(&head, obj, zso_link);
10666 allocs++;
10667 }
10668 break;
10669 case 2:
10670 case 3:
10671 if (allocs < 10000) {
10672 obj = zalloc(ctx->zsc_zone);
10673 TAILQ_INSERT_TAIL(&head, obj, zso_link);
10674 allocs++;
10675 }
10676 break;
10677 case 4:
10678 if (leader) {
10679 zone_gc(ZONE_GC_DRAIN);
10680 }
10681 break;
10682 case 5:
10683 case 6:
10684 if (!TAILQ_EMPTY(&head)) {
10685 obj = TAILQ_FIRST(&head);
10686 TAILQ_REMOVE(&head, obj, zso_link);
10687 zfree(ctx->zsc_zone, obj);
10688 allocs--;
10689 }
10690 break;
10691 case 7:
10692 if (!TAILQ_EMPTY(&head)) {
10693 obj = TAILQ_LAST(&head, zone_stress_head);
10694 TAILQ_REMOVE(&head, obj, zso_link);
10695 zfree(ctx->zsc_zone, obj);
10696 allocs--;
10697 }
10698 break;
10699 }
10700 }
10701 } while (mach_absolute_time() < ctx->zsc_end);
10702
10703 while (!TAILQ_EMPTY(&head)) {
10704 obj = TAILQ_FIRST(&head);
10705 TAILQ_REMOVE(&head, obj, zso_link);
10706 zfree(ctx->zsc_zone, obj);
10707 }
10708
10709 lck_mtx_lock(&ctx->zsc_lock);
10710 if (--ctx->zsc_workers == 0) {
10711 thread_wakeup(ctx);
10712 } else if (leader) {
10713 while (ctx->zsc_workers) {
10714 lck_mtx_sleep(&ctx->zsc_lock, LCK_SLEEP_DEFAULT, ctx,
10715 THREAD_UNINT);
10716 }
10717 }
10718 lck_mtx_unlock(&ctx->zsc_lock);
10719
10720 if (!leader) {
10721 thread_terminate_self();
10722 __builtin_unreachable();
10723 }
10724 }
10725
10726 static int
zone_stress_test_run(__unused int64_t in,int64_t * out)10727 zone_stress_test_run(__unused int64_t in, int64_t *out)
10728 {
10729 struct zone_stress_ctx ctx = {
10730 .zsc_leader = current_thread(),
10731 .zsc_workers = 3,
10732 };
10733 kern_return_t kr;
10734 thread_t th;
10735
10736 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10737 printf("zone_stress_test: Test already running.\n");
10738 return EALREADY;
10739 }
10740
10741 lck_mtx_init(&ctx.zsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
10742 ctx.zsc_zone = zone_create("test_zone_344", 344,
10743 ZC_DESTRUCTIBLE | ZC_NOCACHING);
10744 assert(ctx.zsc_zone->z_chunk_pages > 1);
10745
10746 clock_interval_to_deadline(5, NSEC_PER_SEC, &ctx.zsc_end);
10747
10748 printf("zone_stress_test: Starting (leader %p)\n", current_thread());
10749
10750 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
10751
10752 for (uint32_t i = 1; i < ctx.zsc_workers; i++) {
10753 kr = kernel_thread_start_priority(zone_stress_worker, &ctx,
10754 BASEPRI_DEFAULT, &th);
10755 if (kr == KERN_SUCCESS) {
10756 printf("zone_stress_test: thread %d: %p\n", i, th);
10757 thread_deallocate(th);
10758 } else {
10759 ctx.zsc_workers--;
10760 }
10761 }
10762
10763 zone_stress_worker(&ctx, 0);
10764
10765 lck_mtx_destroy(&ctx.zsc_lock, &zone_locks_grp);
10766
10767 zdestroy(ctx.zsc_zone);
10768
10769 printf("zone_stress_test: Done\n");
10770
10771 *out = 1;
10772 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
10773 os_atomic_store(&any_zone_test_running, false, relaxed);
10774 return 0;
10775 }
10776 SYSCTL_TEST_REGISTER(zone_stress_test, zone_stress_test_run);
10777
10778 /*
10779 * Routines to test that zone garbage collection and zone replenish threads
10780 * running at the same time don't cause problems.
10781 */
10782
10783 static int
zone_gc_replenish_test(__unused int64_t in,int64_t * out)10784 zone_gc_replenish_test(__unused int64_t in, int64_t *out)
10785 {
10786 zone_gc(ZONE_GC_DRAIN);
10787 *out = 1;
10788 return 0;
10789 }
10790 SYSCTL_TEST_REGISTER(zone_gc_replenish_test, zone_gc_replenish_test);
10791
10792 static int
zone_alloc_replenish_test(__unused int64_t in,int64_t * out)10793 zone_alloc_replenish_test(__unused int64_t in, int64_t *out)
10794 {
10795 zone_t z = vm_map_entry_zone;
10796 struct data { struct data *next; } *node, *list = NULL;
10797
10798 if (z == NULL) {
10799 printf("Couldn't find a replenish zone\n");
10800 return EIO;
10801 }
10802
10803 /* big enough to go past replenishment */
10804 for (uint32_t i = 0; i < 10 * z->z_elems_rsv; ++i) {
10805 node = zalloc(z);
10806 node->next = list;
10807 list = node;
10808 }
10809
10810 /*
10811 * release the memory we allocated
10812 */
10813 while (list != NULL) {
10814 node = list;
10815 list = list->next;
10816 zfree(z, node);
10817 }
10818
10819 *out = 1;
10820 return 0;
10821 }
10822 SYSCTL_TEST_REGISTER(zone_alloc_replenish_test, zone_alloc_replenish_test);
10823
10824 #endif /* DEBUG || DEVELOPMENT */
10825