1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/zalloc.c
60 * Author: Avadis Tevanian, Jr.
61 *
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
64 */
65
66 #define ZALLOC_ALLOW_DEPRECATED 1
67 #if !ZALLOC_TEST
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <machine/machine_routines.h>
75 #include <mach/vm_map.h>
76 #include <mach/sdt.h>
77 #if __x86_64__
78 #include <i386/cpuid.h>
79 #endif
80
81 #include <kern/bits.h>
82 #include <kern/btlog.h>
83 #include <kern/startup.h>
84 #include <kern/kern_types.h>
85 #include <kern/assert.h>
86 #include <kern/backtrace.h>
87 #include <kern/host.h>
88 #include <kern/macro_help.h>
89 #include <kern/sched.h>
90 #include <kern/locks.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/thread_call.h>
94 #include <kern/zalloc_internal.h>
95 #include <kern/kalloc.h>
96 #include <kern/debug.h>
97
98 #include <prng/random.h>
99
100 #include <vm/pmap.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_memtag.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_page.h>
105 #include <vm/vm_pageout.h>
106 #include <vm/vm_compressor.h> /* C_SLOT_PACKED_PTR* */
107
108 #include <pexpert/pexpert.h>
109
110 #include <machine/machparam.h>
111 #include <machine/machine_routines.h> /* ml_cpu_get_info */
112
113 #include <os/atomic.h>
114
115 #include <libkern/OSDebug.h>
116 #include <libkern/OSAtomic.h>
117 #include <libkern/section_keywords.h>
118 #include <sys/kdebug.h>
119 #include <sys/code_signing.h>
120
121 #include <san/kasan.h>
122 #include <libsa/stdlib.h>
123 #include <sys/errno.h>
124
125 #include <IOKit/IOBSD.h>
126 #include <arm64/amcc_rorgn.h>
127
128 #if DEBUG
129 #define z_debug_assert(expr) assert(expr)
130 #else
131 #define z_debug_assert(expr) (void)(expr)
132 #endif
133
134 /* Returns pid of the task with the largest number of VM map entries. */
135 extern pid_t find_largest_process_vm_map_entries(void);
136
137 /*
138 * Callout to jetsam. If pid is -1, we wake up the memorystatus thread to do asynchronous kills.
139 * For any other pid we try to kill that process synchronously.
140 */
141 extern boolean_t memorystatus_kill_on_zone_map_exhaustion(pid_t pid);
142
143 extern zone_t vm_object_zone;
144 extern zone_t ipc_service_port_label_zone;
145
146 ZONE_DEFINE_TYPE(percpu_u64_zone, "percpu.64", uint64_t,
147 ZC_PERCPU | ZC_ALIGNMENT_REQUIRED | ZC_KASAN_NOREDZONE);
148
149 #if CONFIG_KERNEL_TAGGING
150 #define ZONE_MIN_ELEM_SIZE (sizeof(uint64_t) * 2)
151 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
152 #else /* CONFIG_KERNEL_TAGGING */
153 #define ZONE_MIN_ELEM_SIZE sizeof(uint64_t)
154 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
155 #endif /* CONFIG_KERNEL_TAGGING */
156
157 #define ZONE_MAX_ALLOC_SIZE (32 * 1024)
158 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
159 #define ZONE_CHUNK_ALLOC_SIZE (256 * 1024)
160 #define ZONE_GUARD_DENSE (32 * 1024)
161 #define ZONE_GUARD_SPARSE (64 * 1024)
162 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
163
164 #if XNU_PLATFORM_MacOSX
165 #define ZONE_MAP_MAX (32ULL << 30)
166 #define ZONE_MAP_VA_SIZE (128ULL << 30)
167 #else /* XNU_PLATFORM_MacOSX */
168 #define ZONE_MAP_MAX (8ULL << 30)
169 #define ZONE_MAP_VA_SIZE (24ULL << 30)
170 #endif /* !XNU_PLATFORM_MacOSX */
171
172 __enum_closed_decl(zm_len_t, uint16_t, {
173 ZM_CHUNK_FREE = 0x0,
174 /* 1 through 8 are valid lengths */
175 ZM_CHUNK_LEN_MAX = 0x8,
176
177 /* PGZ magical values */
178 ZM_PGZ_FREE = 0x0,
179 ZM_PGZ_ALLOCATED = 0xa, /* [a]llocated */
180 ZM_PGZ_GUARD = 0xb, /* oo[b] */
181 ZM_PGZ_DOUBLE_FREE = 0xd, /* [d]ouble_free */
182
183 /* secondary page markers */
184 ZM_SECONDARY_PAGE = 0xe,
185 ZM_SECONDARY_PCPU_PAGE = 0xf,
186 });
187
188 static_assert(MAX_ZONES < (1u << 10), "MAX_ZONES must fit in zm_index");
189
190 struct zone_page_metadata {
191 union {
192 struct {
193 /* The index of the zone this metadata page belongs to */
194 zone_id_t zm_index : 10;
195
196 /*
197 * This chunk ends with a guard page.
198 */
199 uint16_t zm_guarded : 1;
200
201 /*
202 * Whether `zm_bitmap` is an inline bitmap
203 * or a packed bitmap reference
204 */
205 uint16_t zm_inline_bitmap : 1;
206
207 /*
208 * Zones allocate in "chunks" of zone_t::z_chunk_pages
209 * consecutive pages, or zpercpu_count() pages if the
210 * zone is percpu.
211 *
212 * The first page of it has its metadata set with:
213 * - 0 if none of the pages are currently wired
214 * - the number of wired pages in the chunk
215 * (not scaled for percpu).
216 *
217 * Other pages in the chunk have their zm_chunk_len set
218 * to ZM_SECONDARY_PAGE or ZM_SECONDARY_PCPU_PAGE
219 * depending on whether the zone is percpu or not.
220 * For those, zm_page_index holds the index of that page
221 * in the run, and zm_subchunk_len the remaining length
222 * within the chunk.
223 *
224 * Metadata used for PGZ pages can have 3 values:
225 * - ZM_PGZ_FREE: slot is free
226 * - ZM_PGZ_ALLOCATED: slot holds an allocated element
227 * at offset (zm_pgz_orig_addr & PAGE_MASK)
228 * - ZM_PGZ_DOUBLE_FREE: slot detected a double free
229 * (will panic).
230 */
231 zm_len_t zm_chunk_len : 4;
232 };
233 uint16_t zm_bits;
234 };
235
236 union {
237 #define ZM_ALLOC_SIZE_LOCK 1u
238 uint16_t zm_alloc_size; /* first page only */
239 struct {
240 uint8_t zm_page_index; /* secondary pages only */
241 uint8_t zm_subchunk_len; /* secondary pages only */
242 };
243 uint16_t zm_oob_offs; /* in guard pages */
244 };
245 union {
246 uint32_t zm_bitmap; /* most zones */
247 uint32_t zm_bump; /* permanent zones */
248 };
249
250 union {
251 struct {
252 zone_pva_t zm_page_next;
253 zone_pva_t zm_page_prev;
254 };
255 vm_offset_t zm_pgz_orig_addr;
256 struct zone_page_metadata *zm_pgz_slot_next;
257 };
258 };
259 static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing");
260
261 /*!
262 * @typedef zone_magazine_t
263 *
264 * @brief
265 * Magazine of cached allocations.
266 *
267 * @field zm_next linkage used by magazine depots.
268 * @field zm_elems an array of @c zc_mag_size() elements.
269 */
270 struct zone_magazine {
271 zone_magazine_t zm_next;
272 smr_seq_t zm_seq;
273 vm_offset_t zm_elems[0];
274 };
275
276 /*!
277 * @typedef zone_cache_t
278 *
279 * @brief
280 * Magazine of cached allocations.
281 *
282 * @discussion
283 * Below is a diagram of the caching system. This design is inspired by the
284 * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
285 * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams and the FreeBSD UMA
286 * zone allocator (itself derived from this seminal work).
287 *
288 * It is divided into 3 layers:
289 * - the per-cpu layer,
290 * - the recirculation depot layer,
291 * - the Zone Allocator.
292 *
293 * The per-cpu and recirculation depot layer use magazines (@c zone_magazine_t),
294 * which are stacks of up to @c zc_mag_size() elements.
295 *
296 * <h2>CPU layer</h2>
297 *
298 * The CPU layer (@c zone_cache_t) looks like this:
299 *
300 * ╭─ a ─ f ─┬───────── zm_depot ──────────╮
301 * │ ╭─╮ ╭─╮ │ ╭─╮ ╭─╮ ╭─╮ ╭─╮ ╭─╮ │
302 * │ │#│ │#│ │ │#│ │#│ │#│ │#│ │#│ │
303 * │ │#│ │ │ │ │#│ │#│ │#│ │#│ │#│ │
304 * │ │ │ │ │ │ │#│ │#│ │#│ │#│ │#│ │
305 * │ ╰─╯ ╰─╯ │ ╰─╯ ╰─╯ ╰─╯ ╰─╯ ╰─╯ │
306 * ╰─────────┴─────────────────────────────╯
307 *
308 * It has two pre-loaded magazines (a)lloc and (f)ree which we allocate from,
309 * or free to. Serialization is achieved through disabling preemption, and only
310 * the current CPU can acces those allocations. This is represented on the left
311 * hand side of the diagram above.
312 *
313 * The right hand side is the per-cpu depot. It consists of @c zm_depot_count
314 * full magazines, and is protected by the @c zm_depot_lock for access.
315 * The lock is expected to absolutely never be contended, as only the local CPU
316 * tends to access the local per-cpu depot in regular operation mode.
317 *
318 * However unlike UMA, our implementation allows for the zone GC to reclaim
319 * per-CPU magazines aggresively, which is serialized with the @c zm_depot_lock.
320 *
321 *
322 * <h2>Recirculation Depot</h2>
323 *
324 * The recirculation depot layer is a list similar to the per-cpu depot,
325 * however it is different in two fundamental ways:
326 *
327 * - it is protected by the regular zone lock,
328 * - elements referenced by the magazines in that layer appear free
329 * to the zone layer.
330 *
331 *
332 * <h2>Magazine circulation and sizing</h2>
333 *
334 * The caching system sizes itself dynamically. Operations that allocate/free
335 * a single element call @c zone_lock_nopreempt_check_contention() which records
336 * contention on the lock by doing a trylock and recording its success.
337 *
338 * This information is stored in the @c z_recirc_cont_cur field of the zone,
339 * and a windowed moving average is maintained in @c z_contention_wma.
340 * The periodically run function @c compute_zone_working_set_size() will then
341 * take this into account to decide to grow the number of buckets allowed
342 * in the depot or shrink it based on the @c zc_grow_level and @c zc_shrink_level
343 * thresholds.
344 *
345 * The per-cpu layer will attempt to work with its depot, finding both full and
346 * empty magazines cached there. If it can't get what it needs, then it will
347 * mediate with the zone recirculation layer. Such recirculation is done in
348 * batches in order to amortize lock holds.
349 * (See @c {zalloc,zfree}_cached_depot_recirculate()).
350 *
351 * The recirculation layer keeps a track of what the minimum amount of magazines
352 * it had over time was for each of the full and empty queues. This allows for
353 * @c compute_zone_working_set_size() to return memory to the system when a zone
354 * stops being used as much.
355 *
356 * <h2>Security considerations</h2>
357 *
358 * The zone caching layer has been designed to avoid returning elements in
359 * a strict LIFO behavior: @c zalloc() will allocate from the (a) magazine,
360 * and @c zfree() free to the (f) magazine, and only swap them when the
361 * requested operation cannot be fulfilled.
362 *
363 * The per-cpu overflow depot or the recirculation depots are similarly used
364 * in FIFO order.
365 *
366 * @field zc_depot_lock a lock to access @c zc_depot, @c zc_depot_cur.
367 * @field zc_alloc_cur denormalized number of elements in the (a) magazine
368 * @field zc_free_cur denormalized number of elements in the (f) magazine
369 * @field zc_alloc_elems a pointer to the array of elements in (a)
370 * @field zc_free_elems a pointer to the array of elements in (f)
371 *
372 * @field zc_depot a list of @c zc_depot_cur full magazines
373 */
374 typedef struct zone_cache {
375 hw_lck_ticket_t zc_depot_lock;
376 uint16_t zc_alloc_cur;
377 uint16_t zc_free_cur;
378 vm_offset_t *zc_alloc_elems;
379 vm_offset_t *zc_free_elems;
380 struct zone_depot zc_depot;
381 smr_t zc_smr;
382 zone_smr_free_cb_t XNU_PTRAUTH_SIGNED_FUNCTION_PTR("zc_free") zc_free;
383 } __attribute__((aligned(64))) * zone_cache_t;
384
385 #if !__x86_64__
386 static
387 #endif
388 __security_const_late struct {
389 struct mach_vm_range zi_map_range; /* all zone submaps */
390 struct mach_vm_range zi_ro_range; /* read-only range */
391 struct mach_vm_range zi_meta_range; /* debugging only */
392 struct mach_vm_range zi_bits_range; /* bits buddy allocator */
393 struct mach_vm_range zi_xtra_range; /* vm tracking metadata */
394 struct mach_vm_range zi_pgz_range;
395 struct zone_page_metadata *zi_pgz_meta;
396
397 /*
398 * The metadata lives within the zi_meta_range address range.
399 *
400 * The correct formula to find a metadata index is:
401 * absolute_page_index - page_index(zi_map_range.min_address)
402 *
403 * And then this index is used to dereference zi_meta_range.min_address
404 * as a `struct zone_page_metadata` array.
405 *
406 * To avoid doing that substraction all the time in the various fast-paths,
407 * zi_meta_base are pre-offset with that minimum page index to avoid redoing
408 * that math all the time.
409 */
410 struct zone_page_metadata *zi_meta_base;
411 } zone_info;
412
413 __startup_data static struct mach_vm_range zone_map_range;
414 __startup_data static vm_map_size_t zone_meta_size;
415 __startup_data static vm_map_size_t zone_bits_size;
416 __startup_data static vm_map_size_t zone_xtra_size;
417
418 /*
419 * Initial array of metadata for stolen memory.
420 *
421 * The numbers here have to be kept in sync with vm_map_steal_memory()
422 * so that we have reserved enough metadata.
423 *
424 * After zone_init() has run (which happens while the kernel is still single
425 * threaded), the metadata is moved to its final dynamic location, and
426 * this array is unmapped with the rest of __startup_data at lockdown.
427 */
428 #define ZONE_EARLY_META_INLINE_COUNT 64
429 __startup_data
430 static struct zone_page_metadata
431 zone_early_meta_array_startup[ZONE_EARLY_META_INLINE_COUNT];
432
433 #if __x86_64__
434 /*
435 * On Intel we can't "free" pmap stolen pages,
436 * so instead we use a static array in __KLDDATA
437 * which gets reclaimed at lockdown time.
438 */
439 __startup_data __attribute__((aligned(PAGE_SIZE)))
440 static uint8_t zone_early_pages_to_cram[PAGE_SIZE * 16];
441 #endif
442
443 /*
444 * The zone_locks_grp allows for collecting lock statistics.
445 * All locks are associated to this group in zinit.
446 * Look at tools/lockstat for debugging lock contention.
447 */
448 LCK_GRP_DECLARE(zone_locks_grp, "zone_locks");
449 static LCK_MTX_DECLARE(zone_metadata_region_lck, &zone_locks_grp);
450
451 /*
452 * The zone metadata lock protects:
453 * - metadata faulting,
454 * - VM submap VA allocations,
455 * - early gap page queue list
456 */
457 #define zone_meta_lock() lck_mtx_lock(&zone_metadata_region_lck);
458 #define zone_meta_unlock() lck_mtx_unlock(&zone_metadata_region_lck);
459
460 /*
461 * Exclude more than one concurrent garbage collection
462 */
463 static LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc");
464 static LCK_MTX_DECLARE(zone_gc_lock, &zone_gc_lck_grp);
465 static LCK_SPIN_DECLARE(zone_exhausted_lock, &zone_gc_lck_grp);
466
467 /*
468 * Panic logging metadata
469 */
470 bool panic_include_zprint = false;
471 bool panic_include_kalloc_types = false;
472 zone_t kalloc_type_src_zone = ZONE_NULL;
473 zone_t kalloc_type_dst_zone = ZONE_NULL;
474 mach_memory_info_t *panic_kext_memory_info = NULL;
475 vm_size_t panic_kext_memory_size = 0;
476 vm_offset_t panic_fault_address = 0;
477
478 /*
479 * Protects zone_array, num_zones, num_zones_in_use, and
480 * zone_destroyed_bitmap
481 */
482 static SIMPLE_LOCK_DECLARE(all_zones_lock, 0);
483 static zone_id_t num_zones_in_use;
484 zone_id_t _Atomic num_zones;
485 SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count;
486
487 /*
488 * Initial globals for zone stats until we can allocate the real ones.
489 * Those get migrated inside the per-CPU ones during zone_init() and
490 * this array is unmapped with the rest of __startup_data at lockdown.
491 */
492
493 /* zone to allocate zone_magazine structs from */
494 static SECURITY_READ_ONLY_LATE(zone_t) zc_magazine_zone;
495 /*
496 * Until pid1 is made, zone caching is off,
497 * until compute_zone_working_set_size() runs for the firt time.
498 *
499 * -1 represents the "never enabled yet" value.
500 */
501 static int8_t zone_caching_disabled = -1;
502
503 __startup_data
504 static struct zone_stats zone_stats_startup[MAX_ZONES];
505 struct zone zone_array[MAX_ZONES];
506 SECURITY_READ_ONLY_LATE(zone_security_flags_t) zone_security_array[MAX_ZONES] = {
507 [0 ... MAX_ZONES - 1] = {
508 .z_kheap_id = KHEAP_ID_NONE,
509 .z_noencrypt = false,
510 .z_submap_idx = Z_SUBMAP_IDX_GENERAL_0,
511 .z_kalloc_type = false,
512 .z_sig_eq = 0
513 },
514 };
515 SECURITY_READ_ONLY_LATE(struct zone_size_params) zone_ro_size_params[ZONE_ID__LAST_RO + 1];
516 SECURITY_READ_ONLY_LATE(zone_cache_ops_t) zcache_ops[ZONE_ID__FIRST_DYNAMIC];
517
518 /* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */
519 static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count;
520
521 /* Used to keep track of destroyed slots in the zone_array */
522 static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)];
523
524 /* number of zone mapped pages used by all zones */
525 static size_t _Atomic zone_pages_jetsam_threshold = ~0;
526 size_t zone_pages_wired;
527 size_t zone_guard_pages;
528
529 /* Time in (ms) after which we panic for zone exhaustions */
530 TUNABLE(int, zone_exhausted_timeout, "zet", 5000);
531 static bool zone_share_always = true;
532 static TUNABLE_WRITEABLE(uint32_t, zone_early_thres_mul, "zone_early_thres_mul", 5);
533
534 #if VM_TAG_SIZECLASSES
535 /*
536 * Zone tagging allows for per "tag" accounting of allocations for the kalloc
537 * zones only.
538 *
539 * There are 3 kinds of tags that can be used:
540 * - pre-registered VM_KERN_MEMORY_*
541 * - dynamic tags allocated per call sites in core-kernel (using vm_tag_alloc())
542 * - per-kext tags computed by IOKit (using the magic Z_VM_TAG_BT_BIT marker).
543 *
544 * The VM tracks the statistics in lazily allocated structures.
545 * See vm_tag_will_update_zone(), vm_tag_update_zone_size().
546 *
547 * If for some reason the requested tag cannot be accounted for,
548 * the tag is forced to VM_KERN_MEMORY_KALLOC which is pre-allocated.
549 *
550 * Each allocated element also remembers the tag it was assigned,
551 * which lets zalloc/zfree update statistics correctly.
552 */
553
554 /* enable tags for zones that ask for it */
555 static TUNABLE(bool, zone_tagging_on, "-zt", false);
556
557 /*
558 * Array of all sizeclasses used by kalloc variants so that we can
559 * have accounting per size class for each kalloc callsite
560 */
561 static uint16_t zone_tags_sizeclasses[VM_TAG_SIZECLASSES];
562 #endif /* VM_TAG_SIZECLASSES */
563
564 #if DEBUG || DEVELOPMENT
565 static int zalloc_simulate_vm_pressure;
566 #endif /* DEBUG || DEVELOPMENT */
567
568 #define Z_TUNABLE(t, n, d) \
569 TUNABLE(t, _##n, #n, d); \
570 __pure2 static inline t n(void) { return _##n; }
571
572 /*
573 * Zone caching tunables
574 *
575 * zc_mag_size():
576 * size of magazines, larger to reduce contention at the expense of memory
577 *
578 * zc_enable_level
579 * number of contentions per second after which zone caching engages
580 * automatically.
581 *
582 * 0 to disable.
583 *
584 * zc_grow_level
585 * number of contentions per second x cpu after which the number of magazines
586 * allowed in the depot can grow. (in "Z_WMA_UNIT" units).
587 *
588 * zc_shrink_level
589 * number of contentions per second x cpu below which the number of magazines
590 * allowed in the depot will shrink. (in "Z_WMA_UNIT" units).
591 *
592 * zc_pcpu_max
593 * maximum memory size in bytes that can hang from a CPU,
594 * which will affect how many magazines are allowed in the depot.
595 *
596 * The alloc/free magazines are assumed to be on average half-empty
597 * and to count for "1" unit of magazines.
598 *
599 * zc_autotrim_size
600 * Size allowed to hang extra from the recirculation depot before
601 * auto-trim kicks in.
602 *
603 * zc_autotrim_buckets
604 *
605 * How many buckets in excess of the working-set are allowed
606 * before auto-trim kicks in for empty buckets.
607 *
608 * zc_free_batch_size
609 * The size of batches of frees/reclaim that can be done keeping
610 * the zone lock held (and preemption disabled).
611 */
612 Z_TUNABLE(uint16_t, zc_mag_size, 8);
613 static Z_TUNABLE(uint32_t, zc_enable_level, 10);
614 static Z_TUNABLE(uint32_t, zc_grow_level, 5 * Z_WMA_UNIT);
615 static Z_TUNABLE(uint32_t, zc_shrink_level, Z_WMA_UNIT / 2);
616 static Z_TUNABLE(uint32_t, zc_pcpu_max, 128 << 10);
617 static Z_TUNABLE(uint32_t, zc_autotrim_size, 16 << 10);
618 static Z_TUNABLE(uint32_t, zc_autotrim_buckets, 8);
619 static Z_TUNABLE(uint32_t, zc_free_batch_size, 256);
620
621 static SECURITY_READ_ONLY_LATE(size_t) zone_pages_wired_max;
622 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_submaps[Z_SUBMAP_IDX_COUNT];
623 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_meta_map;
624 static char const * const zone_submaps_names[Z_SUBMAP_IDX_COUNT] = {
625 [Z_SUBMAP_IDX_VM] = "VM",
626 [Z_SUBMAP_IDX_READ_ONLY] = "RO",
627 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
628 [Z_SUBMAP_IDX_GENERAL_0] = "GEN0",
629 [Z_SUBMAP_IDX_GENERAL_1] = "GEN1",
630 [Z_SUBMAP_IDX_GENERAL_2] = "GEN2",
631 [Z_SUBMAP_IDX_GENERAL_3] = "GEN3",
632 #else
633 [Z_SUBMAP_IDX_GENERAL_0] = "GEN",
634 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
635 [Z_SUBMAP_IDX_DATA] = "DATA",
636 };
637
638 #if __x86_64__
639 #define ZONE_ENTROPY_CNT 8
640 #else
641 #define ZONE_ENTROPY_CNT 2
642 #endif
643 static struct zone_bool_gen {
644 struct bool_gen zbg_bg;
645 uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
646 } zone_bool_gen[MAX_CPUS];
647
648 #if CONFIG_PROB_GZALLOC
649 /*
650 * Probabilistic gzalloc
651 * =====================
652 *
653 *
654 * Probabilistic guard zalloc samples allocations and will protect them by
655 * double-mapping the page holding them and returning the secondary virtual
656 * address to its callers.
657 *
658 * Its data structures are lazily allocated if the `pgz` or `pgz1` boot-args
659 * are set.
660 *
661 *
662 * Unlike GZalloc, PGZ uses a fixed amount of memory, and is compatible with
663 * most zalloc/kalloc features:
664 * - zone_require is functional
665 * - zone caching or zone tagging is compatible
666 * - non-blocking allocation work (they will always return NULL with gzalloc).
667 *
668 * PGZ limitations:
669 * - VA sequestering isn't respected, as the slots (which are in limited
670 * quantity) will be reused for any type, however the PGZ quarantine
671 * somewhat mitigates the impact.
672 * - zones with elements larger than a page cannot be protected.
673 *
674 *
675 * Tunables:
676 * --------
677 *
678 * pgz=1:
679 * Turn on probabilistic guard malloc for all zones
680 *
681 * (default on for DEVELOPMENT, off for RELEASE, or if pgz1... are specified)
682 *
683 * pgz_sample_rate=0 to 2^31
684 * average sample rate between two guarded allocations.
685 * 0 means every allocation.
686 *
687 * The default is a random number between 1000 and 10,000
688 *
689 * pgz_slots
690 * how many allocations to protect.
691 *
692 * Each costs:
693 * - a PTE in the pmap (when allocated)
694 * - 2 zone page meta's (every other page is a "guard" one, 32B total)
695 * - 64 bytes per backtraces.
696 * On LP64 this is <16K per 100 slots.
697 *
698 * The default is ~200 slots per G of physical ram (32k / G)
699 *
700 * TODO:
701 * - try harder to allocate elements at the "end" to catch OOB more reliably.
702 *
703 * pgz_quarantine
704 * how many slots should be free at any given time.
705 *
706 * PGZ will round robin through free slots to be reused, but free slots are
707 * important to detect use-after-free by acting as a quarantine.
708 *
709 * By default, PGZ will keep 33% of the slots around at all time.
710 *
711 * pgz1=<name>, pgz2=<name>, ..., pgzn=<name>...
712 * Specific zones for which to enable probabilistic guard malloc.
713 * There must be no numbering gap (names after the gap will be ignored).
714 */
715 #if DEBUG || DEVELOPMENT
716 static TUNABLE(bool, pgz_all, "pgz", true);
717 #else
718 static TUNABLE(bool, pgz_all, "pgz", false);
719 #endif
720 static TUNABLE(uint32_t, pgz_sample_rate, "pgz_sample_rate", 0);
721 static TUNABLE(uint32_t, pgz_slots, "pgz_slots", UINT32_MAX);
722 static TUNABLE(uint32_t, pgz_quarantine, "pgz_quarantine", 0);
723 #endif /* CONFIG_PROB_GZALLOC */
724
725 static zone_t zone_find_largest(uint64_t *zone_size);
726
727 #endif /* !ZALLOC_TEST */
728 #pragma mark Zone metadata
729 #if !ZALLOC_TEST
730
731 static inline bool
zone_has_index(zone_t z,zone_id_t zid)732 zone_has_index(zone_t z, zone_id_t zid)
733 {
734 return zone_array + zid == z;
735 }
736
737 __abortlike
738 void
zone_invalid_panic(zone_t zone)739 zone_invalid_panic(zone_t zone)
740 {
741 panic("zone %p isn't in the zone_array", zone);
742 }
743
744 __abortlike
745 static void
zone_metadata_corruption(zone_t zone,struct zone_page_metadata * meta,const char * kind)746 zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta,
747 const char *kind)
748 {
749 panic("zone metadata corruption: %s (meta %p, zone %s%s)",
750 kind, meta, zone_heap_name(zone), zone->z_name);
751 }
752
753 __abortlike
754 static void
zone_invalid_element_addr_panic(zone_t zone,vm_offset_t addr)755 zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr)
756 {
757 panic("zone element pointer validation failed (addr: %p, zone %s%s)",
758 (void *)addr, zone_heap_name(zone), zone->z_name);
759 }
760
761 __abortlike
762 static void
zone_page_metadata_index_confusion_panic(zone_t zone,vm_offset_t addr,struct zone_page_metadata * meta)763 zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr,
764 struct zone_page_metadata *meta)
765 {
766 zone_security_flags_t zsflags = zone_security_config(zone), src_zsflags;
767 zone_id_t zidx;
768 zone_t src_zone;
769
770 if (zsflags.z_kalloc_type) {
771 panic_include_kalloc_types = true;
772 kalloc_type_dst_zone = zone;
773 }
774
775 zidx = meta->zm_index;
776 if (zidx >= os_atomic_load(&num_zones, relaxed)) {
777 panic("%p expected in zone %s%s[%d], but metadata has invalid zidx: %d",
778 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
779 zidx);
780 }
781
782 src_zone = &zone_array[zidx];
783 src_zsflags = zone_security_array[zidx];
784 if (src_zsflags.z_kalloc_type) {
785 panic_include_kalloc_types = true;
786 kalloc_type_src_zone = src_zone;
787 }
788
789 panic("%p not in the expected zone %s%s[%d], but found in %s%s[%d]",
790 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
791 zone_heap_name(src_zone), src_zone->z_name, zidx);
792 }
793
794 __abortlike
795 static void
zone_page_metadata_list_corruption(zone_t zone,struct zone_page_metadata * meta)796 zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta)
797 {
798 panic("metadata list corruption through element %p detected in zone %s%s",
799 meta, zone_heap_name(zone), zone->z_name);
800 }
801
802 __abortlike
803 static void
zone_page_meta_accounting_panic(zone_t zone,struct zone_page_metadata * meta,const char * kind)804 zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta,
805 const char *kind)
806 {
807 panic("accounting mismatch (%s) for zone %s%s, meta %p", kind,
808 zone_heap_name(zone), zone->z_name, meta);
809 }
810
811 __abortlike
812 static void
zone_meta_double_free_panic(zone_t zone,vm_offset_t addr,const char * caller)813 zone_meta_double_free_panic(zone_t zone, vm_offset_t addr, const char *caller)
814 {
815 panic("%s: double free of %p to zone %s%s", caller,
816 (void *)addr, zone_heap_name(zone), zone->z_name);
817 }
818
819 __abortlike
820 static void
zone_accounting_panic(zone_t zone,const char * kind)821 zone_accounting_panic(zone_t zone, const char *kind)
822 {
823 panic("accounting mismatch (%s) for zone %s%s", kind,
824 zone_heap_name(zone), zone->z_name);
825 }
826
827 #define zone_counter_sub(z, stat, value) ({ \
828 if (os_sub_overflow((z)->stat, value, &(z)->stat)) { \
829 zone_accounting_panic(z, #stat " wrap-around"); \
830 } \
831 (z)->stat; \
832 })
833
834 static inline uint16_t
zone_meta_alloc_size_add(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)835 zone_meta_alloc_size_add(zone_t z, struct zone_page_metadata *m,
836 vm_offset_t esize)
837 {
838 if (os_add_overflow(m->zm_alloc_size, (uint16_t)esize, &m->zm_alloc_size)) {
839 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
840 }
841 return m->zm_alloc_size;
842 }
843
844 static inline uint16_t
zone_meta_alloc_size_sub(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)845 zone_meta_alloc_size_sub(zone_t z, struct zone_page_metadata *m,
846 vm_offset_t esize)
847 {
848 if (os_sub_overflow(m->zm_alloc_size, esize, &m->zm_alloc_size)) {
849 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
850 }
851 return m->zm_alloc_size;
852 }
853
854 __abortlike
855 static void
zone_nofail_panic(zone_t zone)856 zone_nofail_panic(zone_t zone)
857 {
858 panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)",
859 zone_heap_name(zone), zone->z_name);
860 }
861
862 __header_always_inline bool
zone_spans_ro_va(vm_offset_t addr_start,vm_offset_t addr_end)863 zone_spans_ro_va(vm_offset_t addr_start, vm_offset_t addr_end)
864 {
865 const struct mach_vm_range *ro_r = &zone_info.zi_ro_range;
866 struct mach_vm_range r = { addr_start, addr_end };
867
868 return mach_vm_range_intersects(ro_r, &r);
869 }
870
871 #define from_range(r, addr, size) \
872 __builtin_choose_expr(__builtin_constant_p(size) ? (size) == 1 : 0, \
873 mach_vm_range_contains(r, (mach_vm_offset_t)(addr)), \
874 mach_vm_range_contains(r, (mach_vm_offset_t)(addr), size))
875
876 #define from_ro_map(addr, size) \
877 from_range(&zone_info.zi_ro_range, addr, size)
878
879 #define from_zone_map(addr, size) \
880 from_range(&zone_info.zi_map_range, addr, size)
881
882 __header_always_inline bool
zone_pva_is_null(zone_pva_t page)883 zone_pva_is_null(zone_pva_t page)
884 {
885 return page.packed_address == 0;
886 }
887
888 __header_always_inline bool
zone_pva_is_queue(zone_pva_t page)889 zone_pva_is_queue(zone_pva_t page)
890 {
891 // actual kernel pages have the top bit set
892 return (int32_t)page.packed_address > 0;
893 }
894
895 __header_always_inline bool
zone_pva_is_equal(zone_pva_t pva1,zone_pva_t pva2)896 zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2)
897 {
898 return pva1.packed_address == pva2.packed_address;
899 }
900
901 __header_always_inline zone_pva_t *
zone_pageq_base(void)902 zone_pageq_base(void)
903 {
904 extern zone_pva_t data_seg_start[] __SEGMENT_START_SYM("__DATA");
905
906 /*
907 * `-1` so that if the first __DATA variable is a page queue,
908 * it gets a non 0 index
909 */
910 return data_seg_start - 1;
911 }
912
913 __header_always_inline void
zone_queue_set_head(zone_t z,zone_pva_t queue,zone_pva_t oldv,struct zone_page_metadata * meta)914 zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv,
915 struct zone_page_metadata *meta)
916 {
917 zone_pva_t *queue_head = &zone_pageq_base()[queue.packed_address];
918
919 if (!zone_pva_is_equal(*queue_head, oldv)) {
920 zone_page_metadata_list_corruption(z, meta);
921 }
922 *queue_head = meta->zm_page_next;
923 }
924
925 __header_always_inline zone_pva_t
zone_queue_encode(zone_pva_t * headp)926 zone_queue_encode(zone_pva_t *headp)
927 {
928 return (zone_pva_t){ (uint32_t)(headp - zone_pageq_base()) };
929 }
930
931 __header_always_inline zone_pva_t
zone_pva_from_addr(vm_address_t addr)932 zone_pva_from_addr(vm_address_t addr)
933 {
934 // cannot use atop() because we want to maintain the sign bit
935 return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) };
936 }
937
938 __header_always_inline vm_address_t
zone_pva_to_addr(zone_pva_t page)939 zone_pva_to_addr(zone_pva_t page)
940 {
941 // cause sign extension so that we end up with the right address
942 return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT;
943 }
944
945 __header_always_inline struct zone_page_metadata *
zone_pva_to_meta(zone_pva_t page)946 zone_pva_to_meta(zone_pva_t page)
947 {
948 return &zone_info.zi_meta_base[page.packed_address];
949 }
950
951 __header_always_inline zone_pva_t
zone_pva_from_meta(struct zone_page_metadata * meta)952 zone_pva_from_meta(struct zone_page_metadata *meta)
953 {
954 return (zone_pva_t){ (uint32_t)(meta - zone_info.zi_meta_base) };
955 }
956
957 __header_always_inline struct zone_page_metadata *
zone_meta_from_addr(vm_offset_t addr)958 zone_meta_from_addr(vm_offset_t addr)
959 {
960 return zone_pva_to_meta(zone_pva_from_addr(addr));
961 }
962
963 __header_always_inline zone_id_t
zone_index_from_ptr(const void * ptr)964 zone_index_from_ptr(const void *ptr)
965 {
966 return zone_pva_to_meta(zone_pva_from_addr((vm_offset_t)ptr))->zm_index;
967 }
968
969 __header_always_inline vm_offset_t
zone_meta_to_addr(struct zone_page_metadata * meta)970 zone_meta_to_addr(struct zone_page_metadata *meta)
971 {
972 return ptoa((int32_t)(meta - zone_info.zi_meta_base));
973 }
974
975 __attribute__((overloadable))
976 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta,vm_address_t addr)977 zone_meta_validate(zone_t z, struct zone_page_metadata *meta, vm_address_t addr)
978 {
979 if (!zone_has_index(z, meta->zm_index)) {
980 zone_page_metadata_index_confusion_panic(z, addr, meta);
981 }
982 }
983
984 __attribute__((overloadable))
985 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta)986 zone_meta_validate(zone_t z, struct zone_page_metadata *meta)
987 {
988 zone_meta_validate(z, meta, zone_meta_to_addr(meta));
989 }
990
991 __header_always_inline void
zone_meta_queue_push(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)992 zone_meta_queue_push(zone_t z, zone_pva_t *headp,
993 struct zone_page_metadata *meta)
994 {
995 zone_pva_t head = *headp;
996 zone_pva_t queue_pva = zone_queue_encode(headp);
997 struct zone_page_metadata *tmp;
998
999 meta->zm_page_next = head;
1000 if (!zone_pva_is_null(head)) {
1001 tmp = zone_pva_to_meta(head);
1002 if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) {
1003 zone_page_metadata_list_corruption(z, meta);
1004 }
1005 tmp->zm_page_prev = zone_pva_from_meta(meta);
1006 }
1007 meta->zm_page_prev = queue_pva;
1008 *headp = zone_pva_from_meta(meta);
1009 }
1010
1011 __header_always_inline struct zone_page_metadata *
zone_meta_queue_pop(zone_t z,zone_pva_t * headp)1012 zone_meta_queue_pop(zone_t z, zone_pva_t *headp)
1013 {
1014 zone_pva_t head = *headp;
1015 struct zone_page_metadata *meta = zone_pva_to_meta(head);
1016 struct zone_page_metadata *tmp;
1017
1018 zone_meta_validate(z, meta);
1019
1020 if (!zone_pva_is_null(meta->zm_page_next)) {
1021 tmp = zone_pva_to_meta(meta->zm_page_next);
1022 if (!zone_pva_is_equal(tmp->zm_page_prev, head)) {
1023 zone_page_metadata_list_corruption(z, meta);
1024 }
1025 tmp->zm_page_prev = meta->zm_page_prev;
1026 }
1027 *headp = meta->zm_page_next;
1028
1029 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1030
1031 return meta;
1032 }
1033
1034 __header_always_inline void
zone_meta_remqueue(zone_t z,struct zone_page_metadata * meta)1035 zone_meta_remqueue(zone_t z, struct zone_page_metadata *meta)
1036 {
1037 zone_pva_t meta_pva = zone_pva_from_meta(meta);
1038 struct zone_page_metadata *tmp;
1039
1040 if (!zone_pva_is_null(meta->zm_page_next)) {
1041 tmp = zone_pva_to_meta(meta->zm_page_next);
1042 if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) {
1043 zone_page_metadata_list_corruption(z, meta);
1044 }
1045 tmp->zm_page_prev = meta->zm_page_prev;
1046 }
1047 if (zone_pva_is_queue(meta->zm_page_prev)) {
1048 zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta);
1049 } else {
1050 tmp = zone_pva_to_meta(meta->zm_page_prev);
1051 if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) {
1052 zone_page_metadata_list_corruption(z, meta);
1053 }
1054 tmp->zm_page_next = meta->zm_page_next;
1055 }
1056
1057 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1058 }
1059
1060 __header_always_inline void
zone_meta_requeue(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1061 zone_meta_requeue(zone_t z, zone_pva_t *headp,
1062 struct zone_page_metadata *meta)
1063 {
1064 zone_meta_remqueue(z, meta);
1065 zone_meta_queue_push(z, headp, meta);
1066 }
1067
1068 /* prevents a given metadata from ever reaching the z_pageq_empty queue */
1069 static inline void
zone_meta_lock_in_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1070 zone_meta_lock_in_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1071 {
1072 uint16_t new_size = zone_meta_alloc_size_add(z, m, ZM_ALLOC_SIZE_LOCK);
1073
1074 assert(new_size % sizeof(vm_offset_t) == ZM_ALLOC_SIZE_LOCK);
1075 if (new_size == ZM_ALLOC_SIZE_LOCK) {
1076 zone_meta_requeue(z, &z->z_pageq_partial, m);
1077 zone_counter_sub(z, z_wired_empty, len);
1078 }
1079 }
1080
1081 /* allows a given metadata to reach the z_pageq_empty queue again */
1082 static inline void
zone_meta_unlock_from_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1083 zone_meta_unlock_from_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1084 {
1085 uint16_t new_size = zone_meta_alloc_size_sub(z, m, ZM_ALLOC_SIZE_LOCK);
1086
1087 assert(new_size % sizeof(vm_offset_t) == 0);
1088 if (new_size == 0) {
1089 zone_meta_requeue(z, &z->z_pageq_empty, m);
1090 z->z_wired_empty += len;
1091 }
1092 }
1093
1094 /*
1095 * Routine to populate a page backing metadata in the zone_metadata_region.
1096 * Must be called without the zone lock held as it might potentially block.
1097 */
1098 static void
zone_meta_populate(vm_offset_t base,vm_size_t size)1099 zone_meta_populate(vm_offset_t base, vm_size_t size)
1100 {
1101 struct zone_page_metadata *from = zone_meta_from_addr(base);
1102 struct zone_page_metadata *to = from + atop(size);
1103 vm_offset_t page_addr = trunc_page(from);
1104
1105 for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) {
1106 #if !KASAN
1107 /*
1108 * This can race with another thread doing a populate on the same metadata
1109 * page, where we see an updated pmap but unmapped KASan shadow, causing a
1110 * fault in the shadow when we first access the metadata page. Avoid this
1111 * by always synchronizing on the zone_metadata_region lock with KASan.
1112 */
1113 if (pmap_find_phys(kernel_pmap, page_addr)) {
1114 continue;
1115 }
1116 #endif
1117
1118 for (;;) {
1119 kern_return_t ret = KERN_SUCCESS;
1120
1121 /*
1122 * All updates to the zone_metadata_region are done
1123 * under the zone_metadata_region_lck
1124 */
1125 zone_meta_lock();
1126 if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
1127 ret = kernel_memory_populate(page_addr,
1128 PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1129 VM_KERN_MEMORY_OSFMK);
1130 }
1131 zone_meta_unlock();
1132
1133 if (ret == KERN_SUCCESS) {
1134 break;
1135 }
1136
1137 /*
1138 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
1139 * to bad system deadlocks, so if the allocation failed,
1140 * we need to do the VM_PAGE_WAIT() outside of the lock.
1141 */
1142 VM_PAGE_WAIT();
1143 }
1144 }
1145 }
1146
1147 __abortlike
1148 static void
zone_invalid_element_panic(zone_t zone,vm_offset_t addr)1149 zone_invalid_element_panic(zone_t zone, vm_offset_t addr)
1150 {
1151 struct zone_page_metadata *meta;
1152 const char *from_cache = "";
1153 vm_offset_t page;
1154
1155 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1156 panic("addr %p being freed to zone %s%s%s, isn't from zone map",
1157 (void *)addr, zone_heap_name(zone), zone->z_name, from_cache);
1158 }
1159 page = trunc_page(addr);
1160 meta = zone_meta_from_addr(addr);
1161
1162 if (!zone_has_index(zone, meta->zm_index)) {
1163 zone_page_metadata_index_confusion_panic(zone, addr, meta);
1164 }
1165
1166 if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1167 panic("metadata %p corresponding to addr %p being freed to "
1168 "zone %s%s%s, is marked as secondary per cpu page",
1169 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1170 from_cache);
1171 }
1172 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1173 page -= ptoa(meta->zm_page_index);
1174 meta -= meta->zm_page_index;
1175 }
1176
1177 if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1178 panic("metadata %p corresponding to addr %p being freed to "
1179 "zone %s%s%s, has chunk len greater than max",
1180 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1181 from_cache);
1182 }
1183
1184 if ((addr - zone_elem_inner_offs(zone) - page) % zone_elem_outer_size(zone)) {
1185 panic("addr %p being freed to zone %s%s%s, isn't aligned to "
1186 "zone element size", (void *)addr, zone_heap_name(zone),
1187 zone->z_name, from_cache);
1188 }
1189
1190 zone_invalid_element_addr_panic(zone, addr);
1191 }
1192
1193 __attribute__((always_inline))
1194 static struct zone_page_metadata *
zone_element_resolve(zone_t zone,vm_offset_t addr,vm_offset_t * idx)1195 zone_element_resolve(
1196 zone_t zone,
1197 vm_offset_t addr,
1198 vm_offset_t *idx)
1199 {
1200 struct zone_page_metadata *meta;
1201 vm_offset_t offs, eidx;
1202
1203 meta = zone_meta_from_addr(addr);
1204 if (!from_zone_map(addr, 1) || !zone_has_index(zone, meta->zm_index)) {
1205 zone_invalid_element_panic(zone, addr);
1206 }
1207
1208 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1209 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1210 offs += ptoa(meta->zm_page_index);
1211 meta -= meta->zm_page_index;
1212 }
1213
1214 eidx = Z_FAST_QUO(offs, zone->z_quo_magic);
1215 if (eidx * zone_elem_outer_size(zone) != offs) {
1216 zone_invalid_element_panic(zone, addr);
1217 }
1218
1219 *idx = eidx;
1220 return meta;
1221 }
1222
1223 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1224 void *
zone_element_pgz_oob_adjust(void * ptr,vm_size_t req_size,vm_size_t elem_size)1225 zone_element_pgz_oob_adjust(void *ptr, vm_size_t req_size, vm_size_t elem_size)
1226 {
1227 vm_offset_t addr = (vm_offset_t)ptr;
1228 vm_offset_t end = addr + elem_size;
1229 vm_offset_t offs;
1230
1231 /*
1232 * 0-sized allocations in a KALLOC_MINSIZE bucket
1233 * would be offset to the next allocation which is incorrect.
1234 */
1235 req_size = MAX(roundup(req_size, KALLOC_MINALIGN), KALLOC_MINALIGN);
1236
1237 /*
1238 * Given how chunks work, for a zone with PGZ guards on,
1239 * there's a single element which ends precisely
1240 * at the page boundary: the last one.
1241 */
1242 if (req_size == elem_size ||
1243 (end & PAGE_MASK) ||
1244 !zone_meta_from_addr(addr)->zm_guarded) {
1245 return ptr;
1246 }
1247
1248 offs = elem_size - req_size;
1249 zone_meta_from_addr(end)->zm_oob_offs = (uint16_t)offs;
1250
1251 return (char *)addr + offs;
1252 }
1253 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1254
1255 __abortlike
1256 static void
zone_element_bounds_check_panic(vm_address_t addr,vm_size_t len)1257 zone_element_bounds_check_panic(vm_address_t addr, vm_size_t len)
1258 {
1259 struct zone_page_metadata *meta;
1260 vm_offset_t offs, size, page;
1261 zone_t zone;
1262
1263 page = trunc_page(addr);
1264 meta = zone_meta_from_addr(addr);
1265 zone = &zone_array[meta->zm_index];
1266
1267 if (zone->z_percpu) {
1268 panic("zone bound checks: address %p is a per-cpu allocation",
1269 (void *)addr);
1270 }
1271
1272 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1273 page -= ptoa(meta->zm_page_index);
1274 meta -= meta->zm_page_index;
1275 }
1276
1277 size = zone_elem_outer_size(zone);
1278 offs = Z_FAST_MOD(addr - zone_elem_inner_offs(zone) - page + size,
1279 zone->z_quo_magic, size);
1280 panic("zone bound checks: buffer %p of length %zd overflows "
1281 "object %p of size %zd in zone %p[%s%s]",
1282 (void *)addr, len, (void *)(addr - offs - zone_elem_redzone(zone)),
1283 zone_elem_inner_size(zone), zone, zone_heap_name(zone), zone_name(zone));
1284 }
1285
1286 void
zone_element_bounds_check(vm_address_t addr,vm_size_t len)1287 zone_element_bounds_check(vm_address_t addr, vm_size_t len)
1288 {
1289 struct zone_page_metadata *meta;
1290 vm_offset_t offs, size;
1291 zone_t zone;
1292
1293 if (!from_zone_map(addr, 1)) {
1294 return;
1295 }
1296
1297 #if CONFIG_PROB_GZALLOC
1298 if (__improbable(pgz_owned(addr))) {
1299 meta = zone_meta_from_addr(addr);
1300 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
1301 }
1302 #endif /* CONFIG_PROB_GZALLOC */
1303 meta = zone_meta_from_addr(addr);
1304 zone = zone_by_id(meta->zm_index);
1305
1306 if (zone->z_percpu) {
1307 zone_element_bounds_check_panic(addr, len);
1308 }
1309
1310 if (zone->z_permanent) {
1311 /* We don't know bounds for those */
1312 return;
1313 }
1314
1315 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1316 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1317 offs += ptoa(meta->zm_page_index);
1318 }
1319 size = zone_elem_outer_size(zone);
1320 offs = Z_FAST_MOD(offs + size, zone->z_quo_magic, size);
1321 if (len + zone_elem_redzone(zone) > size - offs) {
1322 zone_element_bounds_check_panic(addr, len);
1323 }
1324 }
1325
1326 /*
1327 * Routine to get the size of a zone allocated address.
1328 * If the address doesnt belong to the zone maps, returns 0.
1329 */
1330 vm_size_t
zone_element_size(void * elem,zone_t * z,bool clear_oob,vm_offset_t * oob_offs)1331 zone_element_size(void *elem, zone_t *z, bool clear_oob, vm_offset_t *oob_offs)
1332 {
1333 vm_address_t addr = (vm_address_t)elem;
1334 struct zone_page_metadata *meta;
1335 vm_size_t esize, offs, end;
1336 zone_t zone;
1337
1338 if (from_zone_map(addr, sizeof(void *))) {
1339 meta = zone_meta_from_addr(addr);
1340 zone = zone_by_id(meta->zm_index);
1341 esize = zone_elem_inner_size(zone);
1342 end = vm_memtag_canonicalize_address(addr + esize);
1343 offs = 0;
1344
1345 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1346 /*
1347 * If the chunk uses guards, and that (addr + esize)
1348 * either crosses a page boundary or is at the boundary,
1349 * we need to look harder.
1350 */
1351 if (oob_offs && meta->zm_guarded && atop(addr ^ end)) {
1352 /*
1353 * Because in the vast majority of cases the element
1354 * size is sub-page, and that meta[1] must be faulted,
1355 * we can quickly peek at whether it's a guard.
1356 *
1357 * For elements larger than a page, finding the guard
1358 * page requires a little more effort.
1359 */
1360 if (meta[1].zm_chunk_len == ZM_PGZ_GUARD) {
1361 offs = meta[1].zm_oob_offs;
1362 if (clear_oob) {
1363 meta[1].zm_oob_offs = 0;
1364 }
1365 } else if (esize > PAGE_SIZE) {
1366 struct zone_page_metadata *gmeta;
1367
1368 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1369 gmeta = meta + meta->zm_subchunk_len;
1370 } else {
1371 gmeta = meta + zone->z_chunk_pages;
1372 }
1373 assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
1374
1375 if (end >= zone_meta_to_addr(gmeta)) {
1376 offs = gmeta->zm_oob_offs;
1377 if (clear_oob) {
1378 gmeta->zm_oob_offs = 0;
1379 }
1380 }
1381 }
1382 }
1383 #else
1384 #pragma unused(end, clear_oob)
1385 #endif /* ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1386
1387 if (oob_offs) {
1388 *oob_offs = offs;
1389 }
1390 if (z) {
1391 *z = zone;
1392 }
1393 return esize;
1394 }
1395
1396 if (oob_offs) {
1397 *oob_offs = 0;
1398 }
1399
1400 return 0;
1401 }
1402
1403 zone_id_t
zone_id_for_element(void * addr,vm_size_t esize)1404 zone_id_for_element(void *addr, vm_size_t esize)
1405 {
1406 zone_id_t zid = ZONE_ID_INVALID;
1407 if (from_zone_map(addr, esize)) {
1408 zid = zone_index_from_ptr(addr);
1409 __builtin_assume(zid != ZONE_ID_INVALID);
1410 }
1411 return zid;
1412 }
1413
1414 /* This function just formats the reason for the panics by redoing the checks */
1415 __abortlike
1416 static void
zone_require_panic(zone_t zone,void * addr)1417 zone_require_panic(zone_t zone, void *addr)
1418 {
1419 uint32_t zindex;
1420 zone_t other;
1421
1422 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1423 panic("zone_require failed: address not in a zone (addr: %p)", addr);
1424 }
1425
1426 zindex = zone_index_from_ptr(addr);
1427 other = &zone_array[zindex];
1428 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
1429 panic("zone_require failed: invalid zone index %d "
1430 "(addr: %p, expected: %s%s)", zindex,
1431 addr, zone_heap_name(zone), zone->z_name);
1432 } else {
1433 panic("zone_require failed: address in unexpected zone id %d (%s%s) "
1434 "(addr: %p, expected: %s%s)",
1435 zindex, zone_heap_name(other), other->z_name,
1436 addr, zone_heap_name(zone), zone->z_name);
1437 }
1438 }
1439
1440 __abortlike
1441 static void
zone_id_require_panic(zone_id_t zid,void * addr)1442 zone_id_require_panic(zone_id_t zid, void *addr)
1443 {
1444 zone_require_panic(&zone_array[zid], addr);
1445 }
1446
1447 /*
1448 * Routines to panic if a pointer is not mapped to an expected zone.
1449 * This can be used as a means of pinning an object to the zone it is expected
1450 * to be a part of. Causes a panic if the address does not belong to any
1451 * specified zone, does not belong to any zone, has been freed and therefore
1452 * unmapped from the zone, or the pointer contains an uninitialized value that
1453 * does not belong to any zone.
1454 */
1455 void
zone_require(zone_t zone,void * addr)1456 zone_require(zone_t zone, void *addr)
1457 {
1458 vm_size_t esize = zone_elem_inner_size(zone);
1459
1460 if (from_zone_map(addr, esize) &&
1461 zone_has_index(zone, zone_index_from_ptr(addr))) {
1462 return;
1463 }
1464 zone_require_panic(zone, addr);
1465 }
1466
1467 void
zone_id_require(zone_id_t zid,vm_size_t esize,void * addr)1468 zone_id_require(zone_id_t zid, vm_size_t esize, void *addr)
1469 {
1470 if (from_zone_map(addr, esize) && zid == zone_index_from_ptr(addr)) {
1471 return;
1472 }
1473 zone_id_require_panic(zid, addr);
1474 }
1475
1476 bool
zone_owns(zone_t zone,void * addr)1477 zone_owns(zone_t zone, void *addr)
1478 {
1479 vm_size_t esize = zone_elem_inner_size(zone);
1480
1481 if (from_zone_map(addr, esize)) {
1482 return zone_has_index(zone, zone_index_from_ptr(addr));
1483 }
1484 return false;
1485 }
1486
1487 static inline struct mach_vm_range
zone_kmem_suballoc(mach_vm_offset_t addr,vm_size_t size,int flags,vm_tag_t tag,vm_map_t * new_map)1488 zone_kmem_suballoc(
1489 mach_vm_offset_t addr,
1490 vm_size_t size,
1491 int flags,
1492 vm_tag_t tag,
1493 vm_map_t *new_map)
1494 {
1495 struct mach_vm_range r;
1496
1497 *new_map = kmem_suballoc(kernel_map, &addr, size,
1498 VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
1499 flags, KMS_PERMANENT | KMS_NOFAIL, tag).kmr_submap;
1500
1501 r.min_address = addr;
1502 r.max_address = addr + size;
1503 return r;
1504 }
1505
1506 #endif /* !ZALLOC_TEST */
1507 #pragma mark Zone bits allocator
1508
1509 /*!
1510 * @defgroup Zone Bitmap allocator
1511 * @{
1512 *
1513 * @brief
1514 * Functions implementing the zone bitmap allocator
1515 *
1516 * @discussion
1517 * The zone allocator maintains which elements are allocated or free in bitmaps.
1518 *
1519 * When the number of elements per page is smaller than 32, it is stored inline
1520 * on the @c zone_page_metadata structure (@c zm_inline_bitmap is set,
1521 * and @c zm_bitmap used for storage).
1522 *
1523 * When the number of elements is larger, then a bitmap is allocated from
1524 * a buddy allocator (impelemented under the @c zba_* namespace). Pointers
1525 * to bitmaps are implemented as a packed 32 bit bitmap reference, stored in
1526 * @c zm_bitmap. The low 3 bits encode the scale (order) of the allocation in
1527 * @c ZBA_GRANULE units, and hence actual allocations encoded with that scheme
1528 * cannot be larger than 1024 bytes (8192 bits).
1529 *
1530 * This buddy allocator can actually accomodate allocations as large
1531 * as 8k on 16k systems and 2k on 4k systems.
1532 *
1533 * Note: @c zba_* functions are implementation details not meant to be used
1534 * outside of the allocation of the allocator itself. Interfaces to the rest of
1535 * the zone allocator are documented and not @c zba_* prefixed.
1536 */
1537
1538 #define ZBA_CHUNK_SIZE PAGE_MAX_SIZE
1539 #define ZBA_GRANULE sizeof(uint64_t)
1540 #define ZBA_GRANULE_BITS (8 * sizeof(uint64_t))
1541 #define ZBA_MAX_ORDER (PAGE_MAX_SHIFT - 4)
1542 #define ZBA_MAX_ALLOC_ORDER 7
1543 #define ZBA_SLOTS (ZBA_CHUNK_SIZE / ZBA_GRANULE)
1544 #define ZBA_HEADS_COUNT (ZBA_MAX_ORDER + 1)
1545 #define ZBA_PTR_MASK 0x0fffffff
1546 #define ZBA_ORDER_SHIFT 29
1547 #define ZBA_HAS_EXTRA_BIT 0x10000000
1548
1549 static_assert(2ul * ZBA_GRANULE << ZBA_MAX_ORDER == ZBA_CHUNK_SIZE, "chunk sizes");
1550 static_assert(ZBA_MAX_ALLOC_ORDER <= ZBA_MAX_ORDER, "ZBA_MAX_ORDER is enough");
1551
1552 struct zone_bits_chain {
1553 uint32_t zbc_next;
1554 uint32_t zbc_prev;
1555 } __attribute__((aligned(ZBA_GRANULE)));
1556
1557 struct zone_bits_head {
1558 uint32_t zbh_next;
1559 uint32_t zbh_unused;
1560 } __attribute__((aligned(ZBA_GRANULE)));
1561
1562 static_assert(sizeof(struct zone_bits_chain) == ZBA_GRANULE, "zbc size");
1563 static_assert(sizeof(struct zone_bits_head) == ZBA_GRANULE, "zbh size");
1564
1565 struct zone_bits_allocator_meta {
1566 uint32_t zbam_left;
1567 uint32_t zbam_right;
1568 struct zone_bits_head zbam_lists[ZBA_HEADS_COUNT];
1569 struct zone_bits_head zbam_lists_with_extra[ZBA_HEADS_COUNT];
1570 };
1571
1572 struct zone_bits_allocator_header {
1573 uint64_t zbah_bits[ZBA_SLOTS / (8 * sizeof(uint64_t))];
1574 };
1575
1576 #if ZALLOC_TEST
1577 static struct zalloc_bits_allocator_test_setup {
1578 vm_offset_t zbats_base;
1579 void (*zbats_populate)(vm_address_t addr, vm_size_t size);
1580 } zba_test_info;
1581
1582 static struct zone_bits_allocator_header *
zba_base_header(void)1583 zba_base_header(void)
1584 {
1585 return (struct zone_bits_allocator_header *)zba_test_info.zbats_base;
1586 }
1587
1588 static kern_return_t
zba_populate(uint32_t n,bool with_extra __unused)1589 zba_populate(uint32_t n, bool with_extra __unused)
1590 {
1591 vm_address_t base = zba_test_info.zbats_base;
1592 zba_test_info.zbats_populate(base + n * ZBA_CHUNK_SIZE, ZBA_CHUNK_SIZE);
1593
1594 return KERN_SUCCESS;
1595 }
1596 #else
1597 __startup_data __attribute__((aligned(ZBA_CHUNK_SIZE)))
1598 static uint8_t zba_chunk_startup[ZBA_CHUNK_SIZE];
1599
1600 static SECURITY_READ_ONLY_LATE(uint8_t) zba_xtra_shift;
1601 static LCK_MTX_DECLARE(zba_mtx, &zone_locks_grp);
1602
1603 static struct zone_bits_allocator_header *
zba_base_header(void)1604 zba_base_header(void)
1605 {
1606 return (struct zone_bits_allocator_header *)zone_info.zi_bits_range.min_address;
1607 }
1608
1609 static void
zba_lock(void)1610 zba_lock(void)
1611 {
1612 lck_mtx_lock(&zba_mtx);
1613 }
1614
1615 static void
zba_unlock(void)1616 zba_unlock(void)
1617 {
1618 lck_mtx_unlock(&zba_mtx);
1619 }
1620
1621 __abortlike
1622 static void
zba_memory_exhausted(void)1623 zba_memory_exhausted(void)
1624 {
1625 uint64_t zsize = 0;
1626 zone_t z = zone_find_largest(&zsize);
1627 panic("zba_populate: out of bitmap space, "
1628 "likely due to memory leak in zone [%s%s] "
1629 "(%u%c, %d elements allocated)",
1630 zone_heap_name(z), zone_name(z),
1631 mach_vm_size_pretty(zsize), mach_vm_size_unit(zsize),
1632 zone_count_allocated(z));
1633 }
1634
1635
1636 static kern_return_t
zba_populate(uint32_t n,bool with_extra)1637 zba_populate(uint32_t n, bool with_extra)
1638 {
1639 vm_size_t bits_size = ZBA_CHUNK_SIZE;
1640 vm_size_t xtra_size = bits_size * CHAR_BIT << zba_xtra_shift;
1641 vm_address_t bits_addr;
1642 vm_address_t xtra_addr;
1643 kern_return_t kr;
1644
1645 bits_addr = zone_info.zi_bits_range.min_address + n * bits_size;
1646 xtra_addr = zone_info.zi_xtra_range.min_address + n * xtra_size;
1647
1648 kr = kernel_memory_populate(bits_addr, bits_size,
1649 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1650 VM_KERN_MEMORY_OSFMK);
1651 if (kr != KERN_SUCCESS) {
1652 return kr;
1653 }
1654
1655
1656 if (with_extra) {
1657 kr = kernel_memory_populate(xtra_addr, xtra_size,
1658 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1659 VM_KERN_MEMORY_OSFMK);
1660 if (kr != KERN_SUCCESS) {
1661 kernel_memory_depopulate(bits_addr, bits_size,
1662 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1663 VM_KERN_MEMORY_OSFMK);
1664 }
1665 }
1666
1667 return kr;
1668 }
1669 #endif
1670
1671 __pure2
1672 static struct zone_bits_allocator_meta *
zba_meta(void)1673 zba_meta(void)
1674 {
1675 return (struct zone_bits_allocator_meta *)&zba_base_header()[1];
1676 }
1677
1678 __pure2
1679 static uint64_t *
zba_slot_base(void)1680 zba_slot_base(void)
1681 {
1682 return (uint64_t *)zba_base_header();
1683 }
1684
1685 __pure2
1686 static struct zone_bits_head *
zba_head(uint32_t order,bool with_extra)1687 zba_head(uint32_t order, bool with_extra)
1688 {
1689 if (with_extra) {
1690 return &zba_meta()->zbam_lists_with_extra[order];
1691 } else {
1692 return &zba_meta()->zbam_lists[order];
1693 }
1694 }
1695
1696 __pure2
1697 static uint32_t
zba_head_index(struct zone_bits_head * hd)1698 zba_head_index(struct zone_bits_head *hd)
1699 {
1700 return (uint32_t)((uint64_t *)hd - zba_slot_base());
1701 }
1702
1703 __pure2
1704 static struct zone_bits_chain *
zba_chain_for_index(uint32_t index)1705 zba_chain_for_index(uint32_t index)
1706 {
1707 return (struct zone_bits_chain *)(zba_slot_base() + index);
1708 }
1709
1710 __pure2
1711 static uint32_t
zba_chain_to_index(const struct zone_bits_chain * zbc)1712 zba_chain_to_index(const struct zone_bits_chain *zbc)
1713 {
1714 return (uint32_t)((const uint64_t *)zbc - zba_slot_base());
1715 }
1716
1717 __abortlike
1718 static void
zba_head_corruption_panic(uint32_t order,bool with_extra)1719 zba_head_corruption_panic(uint32_t order, bool with_extra)
1720 {
1721 panic("zone bits allocator head[%d:%d:%p] is corrupt",
1722 order, with_extra, zba_head(order, with_extra));
1723 }
1724
1725 __abortlike
1726 static void
zba_chain_corruption_panic(struct zone_bits_chain * a,struct zone_bits_chain * b)1727 zba_chain_corruption_panic(struct zone_bits_chain *a, struct zone_bits_chain *b)
1728 {
1729 panic("zone bits allocator freelist is corrupt (%p <-> %p)", a, b);
1730 }
1731
1732 static void
zba_push_block(struct zone_bits_chain * zbc,uint32_t order,bool with_extra)1733 zba_push_block(struct zone_bits_chain *zbc, uint32_t order, bool with_extra)
1734 {
1735 struct zone_bits_head *hd = zba_head(order, with_extra);
1736 uint32_t hd_index = zba_head_index(hd);
1737 uint32_t index = zba_chain_to_index(zbc);
1738 struct zone_bits_chain *next;
1739
1740 if (hd->zbh_next) {
1741 next = zba_chain_for_index(hd->zbh_next);
1742 if (next->zbc_prev != hd_index) {
1743 zba_head_corruption_panic(order, with_extra);
1744 }
1745 next->zbc_prev = index;
1746 }
1747 zbc->zbc_next = hd->zbh_next;
1748 zbc->zbc_prev = hd_index;
1749 hd->zbh_next = index;
1750 }
1751
1752 static void
zba_remove_block(struct zone_bits_chain * zbc)1753 zba_remove_block(struct zone_bits_chain *zbc)
1754 {
1755 struct zone_bits_chain *prev = zba_chain_for_index(zbc->zbc_prev);
1756 uint32_t index = zba_chain_to_index(zbc);
1757
1758 if (prev->zbc_next != index) {
1759 zba_chain_corruption_panic(prev, zbc);
1760 }
1761 if ((prev->zbc_next = zbc->zbc_next)) {
1762 struct zone_bits_chain *next = zba_chain_for_index(zbc->zbc_next);
1763 if (next->zbc_prev != index) {
1764 zba_chain_corruption_panic(zbc, next);
1765 }
1766 next->zbc_prev = zbc->zbc_prev;
1767 }
1768 }
1769
1770 static vm_address_t
zba_try_pop_block(uint32_t order,bool with_extra)1771 zba_try_pop_block(uint32_t order, bool with_extra)
1772 {
1773 struct zone_bits_head *hd = zba_head(order, with_extra);
1774 struct zone_bits_chain *zbc;
1775
1776 if (hd->zbh_next == 0) {
1777 return 0;
1778 }
1779
1780 zbc = zba_chain_for_index(hd->zbh_next);
1781 zba_remove_block(zbc);
1782 return (vm_address_t)zbc;
1783 }
1784
1785 static struct zone_bits_allocator_header *
zba_header(vm_offset_t addr)1786 zba_header(vm_offset_t addr)
1787 {
1788 addr &= -(vm_offset_t)ZBA_CHUNK_SIZE;
1789 return (struct zone_bits_allocator_header *)addr;
1790 }
1791
1792 static size_t
zba_node_parent(size_t node)1793 zba_node_parent(size_t node)
1794 {
1795 return (node - 1) / 2;
1796 }
1797
1798 static size_t
zba_node_left_child(size_t node)1799 zba_node_left_child(size_t node)
1800 {
1801 return node * 2 + 1;
1802 }
1803
1804 static size_t
zba_node_buddy(size_t node)1805 zba_node_buddy(size_t node)
1806 {
1807 return ((node - 1) ^ 1) + 1;
1808 }
1809
1810 static size_t
zba_node(vm_offset_t addr,uint32_t order)1811 zba_node(vm_offset_t addr, uint32_t order)
1812 {
1813 vm_offset_t offs = (addr % ZBA_CHUNK_SIZE) / ZBA_GRANULE;
1814 return (offs >> order) + (1 << (ZBA_MAX_ORDER - order + 1)) - 1;
1815 }
1816
1817 static struct zone_bits_chain *
zba_chain_for_node(struct zone_bits_allocator_header * zbah,size_t node,uint32_t order)1818 zba_chain_for_node(struct zone_bits_allocator_header *zbah, size_t node, uint32_t order)
1819 {
1820 vm_offset_t offs = (node - (1 << (ZBA_MAX_ORDER - order + 1)) + 1) << order;
1821 return (struct zone_bits_chain *)((vm_offset_t)zbah + offs * ZBA_GRANULE);
1822 }
1823
1824 static void
zba_node_flip_split(struct zone_bits_allocator_header * zbah,size_t node)1825 zba_node_flip_split(struct zone_bits_allocator_header *zbah, size_t node)
1826 {
1827 zbah->zbah_bits[node / 64] ^= 1ull << (node % 64);
1828 }
1829
1830 static bool
zba_node_is_split(struct zone_bits_allocator_header * zbah,size_t node)1831 zba_node_is_split(struct zone_bits_allocator_header *zbah, size_t node)
1832 {
1833 return zbah->zbah_bits[node / 64] & (1ull << (node % 64));
1834 }
1835
1836 static void
zba_free(vm_offset_t addr,uint32_t order,bool with_extra)1837 zba_free(vm_offset_t addr, uint32_t order, bool with_extra)
1838 {
1839 struct zone_bits_allocator_header *zbah = zba_header(addr);
1840 struct zone_bits_chain *zbc;
1841 size_t node = zba_node(addr, order);
1842
1843 while (node) {
1844 size_t parent = zba_node_parent(node);
1845
1846 zba_node_flip_split(zbah, parent);
1847 if (zba_node_is_split(zbah, parent)) {
1848 break;
1849 }
1850
1851 zbc = zba_chain_for_node(zbah, zba_node_buddy(node), order);
1852 zba_remove_block(zbc);
1853 order++;
1854 node = parent;
1855 }
1856
1857 zba_push_block(zba_chain_for_node(zbah, node, order), order, with_extra);
1858 }
1859
1860 static vm_size_t
zba_chunk_header_size(uint32_t n)1861 zba_chunk_header_size(uint32_t n)
1862 {
1863 vm_size_t hdr_size = sizeof(struct zone_bits_allocator_header);
1864 if (n == 0) {
1865 hdr_size += sizeof(struct zone_bits_allocator_meta);
1866 }
1867 return hdr_size;
1868 }
1869
1870 static void
zba_init_chunk(uint32_t n,bool with_extra)1871 zba_init_chunk(uint32_t n, bool with_extra)
1872 {
1873 vm_size_t hdr_size = zba_chunk_header_size(n);
1874 vm_offset_t page = (vm_offset_t)zba_base_header() + n * ZBA_CHUNK_SIZE;
1875 struct zone_bits_allocator_header *zbah = zba_header(page);
1876 vm_size_t size = ZBA_CHUNK_SIZE;
1877 size_t node;
1878
1879 for (uint32_t o = ZBA_MAX_ORDER + 1; o-- > 0;) {
1880 if (size < hdr_size + (ZBA_GRANULE << o)) {
1881 continue;
1882 }
1883 size -= ZBA_GRANULE << o;
1884 node = zba_node(page + size, o);
1885 zba_node_flip_split(zbah, zba_node_parent(node));
1886 zba_push_block(zba_chain_for_node(zbah, node, o), o, with_extra);
1887 }
1888 }
1889
1890 __attribute__((noinline))
1891 static void
zba_grow(bool with_extra)1892 zba_grow(bool with_extra)
1893 {
1894 struct zone_bits_allocator_meta *meta = zba_meta();
1895 kern_return_t kr = KERN_SUCCESS;
1896 uint32_t chunk;
1897
1898 #if !ZALLOC_TEST
1899 if (meta->zbam_left >= meta->zbam_right) {
1900 zba_memory_exhausted();
1901 }
1902 #endif
1903
1904 if (with_extra) {
1905 chunk = meta->zbam_right - 1;
1906 } else {
1907 chunk = meta->zbam_left;
1908 }
1909
1910 kr = zba_populate(chunk, with_extra);
1911 if (kr == KERN_SUCCESS) {
1912 if (with_extra) {
1913 meta->zbam_right -= 1;
1914 } else {
1915 meta->zbam_left += 1;
1916 }
1917
1918 zba_init_chunk(chunk, with_extra);
1919 #if !ZALLOC_TEST
1920 } else {
1921 /*
1922 * zba_populate() has to be allowed to fail populating,
1923 * as we are under a global lock, we need to do the
1924 * VM_PAGE_WAIT() outside of the lock.
1925 */
1926 assert(kr == KERN_RESOURCE_SHORTAGE);
1927 zba_unlock();
1928 VM_PAGE_WAIT();
1929 zba_lock();
1930 #endif
1931 }
1932 }
1933
1934 static vm_offset_t
zba_alloc(uint32_t order,bool with_extra)1935 zba_alloc(uint32_t order, bool with_extra)
1936 {
1937 struct zone_bits_allocator_header *zbah;
1938 uint32_t cur = order;
1939 vm_address_t addr;
1940 size_t node;
1941
1942 while ((addr = zba_try_pop_block(cur, with_extra)) == 0) {
1943 if (__improbable(cur++ >= ZBA_MAX_ORDER)) {
1944 zba_grow(with_extra);
1945 cur = order;
1946 }
1947 }
1948
1949 zbah = zba_header(addr);
1950 node = zba_node(addr, cur);
1951 zba_node_flip_split(zbah, zba_node_parent(node));
1952 while (cur > order) {
1953 cur--;
1954 zba_node_flip_split(zbah, node);
1955 node = zba_node_left_child(node);
1956 zba_push_block(zba_chain_for_node(zbah, node + 1, cur),
1957 cur, with_extra);
1958 }
1959
1960 return addr;
1961 }
1962
1963 #define zba_map_index(type, n) (n / (8 * sizeof(type)))
1964 #define zba_map_bit(type, n) ((type)1 << (n % (8 * sizeof(type))))
1965 #define zba_map_mask_lt(type, n) (zba_map_bit(type, n) - 1)
1966 #define zba_map_mask_ge(type, n) ((type)-zba_map_bit(type, n))
1967
1968 #if !ZALLOC_TEST
1969 #if VM_TAG_SIZECLASSES
1970
1971 static void *
zba_extra_ref_ptr(uint32_t bref,vm_offset_t idx)1972 zba_extra_ref_ptr(uint32_t bref, vm_offset_t idx)
1973 {
1974 vm_offset_t base = zone_info.zi_xtra_range.min_address;
1975 vm_offset_t offs = (bref & ZBA_PTR_MASK) * ZBA_GRANULE * CHAR_BIT;
1976
1977 return (void *)(base + ((offs + idx) << zba_xtra_shift));
1978 }
1979
1980 #endif /* VM_TAG_SIZECLASSES */
1981
1982 static uint32_t
zba_bits_ref_order(uint32_t bref)1983 zba_bits_ref_order(uint32_t bref)
1984 {
1985 return bref >> ZBA_ORDER_SHIFT;
1986 }
1987
1988 static bitmap_t *
zba_bits_ref_ptr(uint32_t bref)1989 zba_bits_ref_ptr(uint32_t bref)
1990 {
1991 return zba_slot_base() + (bref & ZBA_PTR_MASK);
1992 }
1993
1994 static vm_offset_t
zba_scan_bitmap_inline(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags,vm_offset_t eidx)1995 zba_scan_bitmap_inline(zone_t zone, struct zone_page_metadata *meta,
1996 zalloc_flags_t flags, vm_offset_t eidx)
1997 {
1998 size_t i = eidx / 32;
1999 uint32_t map;
2000
2001 if (eidx % 32) {
2002 map = meta[i].zm_bitmap & zba_map_mask_ge(uint32_t, eidx);
2003 if (map) {
2004 eidx = __builtin_ctz(map);
2005 meta[i].zm_bitmap ^= 1u << eidx;
2006 return i * 32 + eidx;
2007 }
2008 i++;
2009 }
2010
2011 uint32_t chunk_len = meta->zm_chunk_len;
2012 if (flags & Z_PCPU) {
2013 chunk_len = zpercpu_count();
2014 }
2015 for (int j = 0; j < chunk_len; j++, i++) {
2016 if (i >= chunk_len) {
2017 i = 0;
2018 }
2019 if (__probable(map = meta[i].zm_bitmap)) {
2020 meta[i].zm_bitmap &= map - 1;
2021 return i * 32 + __builtin_ctz(map);
2022 }
2023 }
2024
2025 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2026 }
2027
2028 static vm_offset_t
zba_scan_bitmap_ref(zone_t zone,struct zone_page_metadata * meta,vm_offset_t eidx)2029 zba_scan_bitmap_ref(zone_t zone, struct zone_page_metadata *meta,
2030 vm_offset_t eidx)
2031 {
2032 uint32_t bits_size = 1 << zba_bits_ref_order(meta->zm_bitmap);
2033 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2034 size_t i = eidx / 64;
2035 uint64_t map;
2036
2037 if (eidx % 64) {
2038 map = bits[i] & zba_map_mask_ge(uint64_t, eidx);
2039 if (map) {
2040 eidx = __builtin_ctzll(map);
2041 bits[i] ^= 1ull << eidx;
2042 return i * 64 + eidx;
2043 }
2044 i++;
2045 }
2046
2047 for (int j = 0; j < bits_size; i++, j++) {
2048 if (i >= bits_size) {
2049 i = 0;
2050 }
2051 if (__probable(map = bits[i])) {
2052 bits[i] &= map - 1;
2053 return i * 64 + __builtin_ctzll(map);
2054 }
2055 }
2056
2057 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2058 }
2059
2060 /*!
2061 * @function zone_meta_find_and_clear_bit
2062 *
2063 * @brief
2064 * The core of the bitmap allocator: find a bit set in the bitmaps.
2065 *
2066 * @discussion
2067 * This method will round robin through available allocations,
2068 * with a per-core memory of the last allocated element index allocated.
2069 *
2070 * This is done in order to avoid a fully LIFO behavior which makes exploiting
2071 * double-free bugs way too practical.
2072 *
2073 * @param zone The zone we're allocating from.
2074 * @param meta The main metadata for the chunk being allocated from.
2075 * @param flags the alloc flags (for @c Z_PCPU).
2076 */
2077 static vm_offset_t
zone_meta_find_and_clear_bit(zone_t zone,zone_stats_t zs,struct zone_page_metadata * meta,zalloc_flags_t flags)2078 zone_meta_find_and_clear_bit(
2079 zone_t zone,
2080 zone_stats_t zs,
2081 struct zone_page_metadata *meta,
2082 zalloc_flags_t flags)
2083 {
2084 vm_offset_t eidx = zs->zs_alloc_rr + 1;
2085
2086 if (meta->zm_inline_bitmap) {
2087 eidx = zba_scan_bitmap_inline(zone, meta, flags, eidx);
2088 } else {
2089 eidx = zba_scan_bitmap_ref(zone, meta, eidx);
2090 }
2091 zs->zs_alloc_rr = (uint16_t)eidx;
2092 return eidx;
2093 }
2094
2095 /*!
2096 * @function zone_meta_bits_init_inline
2097 *
2098 * @brief
2099 * Initializes the inline zm_bitmap field(s) for a newly assigned chunk.
2100 *
2101 * @param meta The main metadata for the initialized chunk.
2102 * @param count The number of elements the chunk can hold
2103 * (which might be partial for partially populated chunks).
2104 */
2105 static void
zone_meta_bits_init_inline(struct zone_page_metadata * meta,uint32_t count)2106 zone_meta_bits_init_inline(struct zone_page_metadata *meta, uint32_t count)
2107 {
2108 /*
2109 * We're called with the metadata zm_bitmap fields already zeroed out.
2110 */
2111 for (size_t i = 0; i < count / 32; i++) {
2112 meta[i].zm_bitmap = ~0u;
2113 }
2114 if (count % 32) {
2115 meta[count / 32].zm_bitmap = zba_map_mask_lt(uint32_t, count);
2116 }
2117 }
2118
2119 /*!
2120 * @function zone_meta_bits_alloc_init
2121 *
2122 * @brief
2123 * Allocates a zm_bitmap field for a newly assigned chunk.
2124 *
2125 * @param count The number of elements the chunk can hold
2126 * (which might be partial for partially populated chunks).
2127 * @param nbits The maximum nuber of bits that will be used.
2128 * @param with_extra Whether "VM Tracking" metadata needs to be allocated.
2129 */
2130 static uint32_t
zone_meta_bits_alloc_init(uint32_t count,uint32_t nbits,bool with_extra)2131 zone_meta_bits_alloc_init(uint32_t count, uint32_t nbits, bool with_extra)
2132 {
2133 static_assert(ZONE_MAX_ALLOC_SIZE / ZONE_MIN_ELEM_SIZE <=
2134 ZBA_GRANULE_BITS << ZBA_MAX_ORDER, "bitmaps will be large enough");
2135
2136 uint32_t order = flsll((nbits - 1) / ZBA_GRANULE_BITS);
2137 uint64_t *bits;
2138 size_t i = 0;
2139
2140 assert(order <= ZBA_MAX_ALLOC_ORDER);
2141 assert(count <= ZBA_GRANULE_BITS << order);
2142
2143 zba_lock();
2144 bits = (uint64_t *)zba_alloc(order, with_extra);
2145 zba_unlock();
2146
2147 while (i < count / 64) {
2148 bits[i++] = ~0ull;
2149 }
2150 if (count % 64) {
2151 bits[i++] = zba_map_mask_lt(uint64_t, count);
2152 }
2153 while (i < 1u << order) {
2154 bits[i++] = 0;
2155 }
2156
2157 return (uint32_t)(bits - zba_slot_base()) +
2158 (order << ZBA_ORDER_SHIFT) +
2159 (with_extra ? ZBA_HAS_EXTRA_BIT : 0);
2160 }
2161
2162 /*!
2163 * @function zone_meta_bits_merge
2164 *
2165 * @brief
2166 * Adds elements <code>[start, end)</code> to a chunk being extended.
2167 *
2168 * @param meta The main metadata for the extended chunk.
2169 * @param start The index of the first element to add to the chunk.
2170 * @param end The index of the last (exclusive) element to add.
2171 */
2172 static void
zone_meta_bits_merge(struct zone_page_metadata * meta,uint32_t start,uint32_t end)2173 zone_meta_bits_merge(struct zone_page_metadata *meta,
2174 uint32_t start, uint32_t end)
2175 {
2176 if (meta->zm_inline_bitmap) {
2177 while (start < end) {
2178 size_t s_i = start / 32;
2179 size_t s_e = end / 32;
2180
2181 if (s_i == s_e) {
2182 meta[s_i].zm_bitmap |= zba_map_mask_lt(uint32_t, end) &
2183 zba_map_mask_ge(uint32_t, start);
2184 break;
2185 }
2186
2187 meta[s_i].zm_bitmap |= zba_map_mask_ge(uint32_t, start);
2188 start += 32 - (start % 32);
2189 }
2190 } else {
2191 uint64_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2192
2193 while (start < end) {
2194 size_t s_i = start / 64;
2195 size_t s_e = end / 64;
2196
2197 if (s_i == s_e) {
2198 bits[s_i] |= zba_map_mask_lt(uint64_t, end) &
2199 zba_map_mask_ge(uint64_t, start);
2200 break;
2201 }
2202 bits[s_i] |= zba_map_mask_ge(uint64_t, start);
2203 start += 64 - (start % 64);
2204 }
2205 }
2206 }
2207
2208 /*!
2209 * @function zone_bits_free
2210 *
2211 * @brief
2212 * Frees a bitmap to the zone bitmap allocator.
2213 *
2214 * @param bref
2215 * A bitmap reference set by @c zone_meta_bits_init() in a @c zm_bitmap field.
2216 */
2217 static void
zone_bits_free(uint32_t bref)2218 zone_bits_free(uint32_t bref)
2219 {
2220 zba_lock();
2221 zba_free((vm_offset_t)zba_bits_ref_ptr(bref),
2222 zba_bits_ref_order(bref), (bref & ZBA_HAS_EXTRA_BIT));
2223 zba_unlock();
2224 }
2225
2226 /*!
2227 * @function zone_meta_is_free
2228 *
2229 * @brief
2230 * Returns whether a given element appears free.
2231 */
2232 static bool
zone_meta_is_free(struct zone_page_metadata * meta,vm_offset_t eidx)2233 zone_meta_is_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2234 {
2235 if (meta->zm_inline_bitmap) {
2236 uint32_t bit = zba_map_bit(uint32_t, eidx);
2237 return meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit;
2238 } else {
2239 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2240 uint64_t bit = zba_map_bit(uint64_t, eidx);
2241 return bits[zba_map_index(uint64_t, eidx)] & bit;
2242 }
2243 }
2244
2245 /*!
2246 * @function zone_meta_mark_free
2247 *
2248 * @brief
2249 * Marks an element as free and returns whether it was marked as used.
2250 */
2251 static bool
zone_meta_mark_free(struct zone_page_metadata * meta,vm_offset_t eidx)2252 zone_meta_mark_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2253 {
2254 if (meta->zm_inline_bitmap) {
2255 uint32_t bit = zba_map_bit(uint32_t, eidx);
2256 if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2257 return false;
2258 }
2259 meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2260 } else {
2261 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2262 uint64_t bit = zba_map_bit(uint64_t, eidx);
2263 if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2264 return false;
2265 }
2266 bits[zba_map_index(uint64_t, eidx)] ^= bit;
2267 }
2268 return true;
2269 }
2270
2271 #if VM_TAG_SIZECLASSES
2272
2273 __startup_func
2274 void
__zone_site_register(vm_allocation_site_t * site)2275 __zone_site_register(vm_allocation_site_t *site)
2276 {
2277 if (zone_tagging_on) {
2278 vm_tag_alloc(site);
2279 }
2280 }
2281
2282 uint16_t
zone_index_from_tag_index(uint32_t sizeclass_idx)2283 zone_index_from_tag_index(uint32_t sizeclass_idx)
2284 {
2285 return zone_tags_sizeclasses[sizeclass_idx];
2286 }
2287
2288 #endif /* VM_TAG_SIZECLASSES */
2289 #endif /* !ZALLOC_TEST */
2290 /*! @} */
2291 #pragma mark zalloc helpers
2292 #if !ZALLOC_TEST
2293
2294 static inline void *
zstack_tbi_fix(vm_offset_t elem)2295 zstack_tbi_fix(vm_offset_t elem)
2296 {
2297 #if CONFIG_KERNEL_TAGGING
2298 elem = vm_memtag_fixup_ptr(elem);
2299 #endif /* CONFIG_KERNEL_TAGGING */
2300 return (void *)elem;
2301 }
2302
2303 static inline vm_offset_t
zstack_tbi_fill(void * addr)2304 zstack_tbi_fill(void *addr)
2305 {
2306 vm_offset_t elem = (vm_offset_t)addr;
2307
2308 return vm_memtag_canonicalize_address(elem);
2309 }
2310
2311 __attribute__((always_inline))
2312 static inline void
zstack_push_no_delta(zstack_t * stack,void * addr)2313 zstack_push_no_delta(zstack_t *stack, void *addr)
2314 {
2315 vm_offset_t elem = zstack_tbi_fill(addr);
2316
2317 *(vm_offset_t *)addr = stack->z_head - elem;
2318 stack->z_head = elem;
2319 }
2320
2321 __attribute__((always_inline))
2322 void
zstack_push(zstack_t * stack,void * addr)2323 zstack_push(zstack_t *stack, void *addr)
2324 {
2325 zstack_push_no_delta(stack, addr);
2326 stack->z_count++;
2327 }
2328
2329 __attribute__((always_inline))
2330 static inline void *
zstack_pop_no_delta(zstack_t * stack)2331 zstack_pop_no_delta(zstack_t *stack)
2332 {
2333 void *addr = zstack_tbi_fix(stack->z_head);
2334
2335 stack->z_head += *(vm_offset_t *)addr;
2336 *(vm_offset_t *)addr = 0;
2337
2338 return addr;
2339 }
2340
2341 __attribute__((always_inline))
2342 void *
zstack_pop(zstack_t * stack)2343 zstack_pop(zstack_t *stack)
2344 {
2345 stack->z_count--;
2346 return zstack_pop_no_delta(stack);
2347 }
2348
2349 static inline void
zone_recirc_lock_nopreempt_check_contention(zone_t zone)2350 zone_recirc_lock_nopreempt_check_contention(zone_t zone)
2351 {
2352 uint32_t ticket;
2353
2354 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_recirc_lock,
2355 &ticket, &zone_locks_grp))) {
2356 return;
2357 }
2358
2359 hw_lck_ticket_wait(&zone->z_recirc_lock, ticket, NULL, &zone_locks_grp);
2360
2361 /*
2362 * If zone caching has been disabled due to memory pressure,
2363 * then recording contention is not useful, give the system
2364 * time to recover.
2365 */
2366 if (__probable(!zone_caching_disabled && !zone_exhausted(zone))) {
2367 zone->z_recirc_cont_cur++;
2368 }
2369 }
2370
2371 static inline void
zone_recirc_lock_nopreempt(zone_t zone)2372 zone_recirc_lock_nopreempt(zone_t zone)
2373 {
2374 hw_lck_ticket_lock_nopreempt(&zone->z_recirc_lock, &zone_locks_grp);
2375 }
2376
2377 static inline void
zone_recirc_unlock_nopreempt(zone_t zone)2378 zone_recirc_unlock_nopreempt(zone_t zone)
2379 {
2380 hw_lck_ticket_unlock_nopreempt(&zone->z_recirc_lock);
2381 }
2382
2383 static inline void
zone_lock_nopreempt_check_contention(zone_t zone)2384 zone_lock_nopreempt_check_contention(zone_t zone)
2385 {
2386 uint32_t ticket;
2387 #if KASAN_FAKESTACK
2388 spl_t s = 0;
2389 if (zone->z_kasan_fakestacks) {
2390 s = splsched();
2391 }
2392 #endif /* KASAN_FAKESTACK */
2393
2394 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_lock, &ticket,
2395 &zone_locks_grp))) {
2396 #if KASAN_FAKESTACK
2397 zone->z_kasan_spl = s;
2398 #endif /* KASAN_FAKESTACK */
2399 return;
2400 }
2401
2402 hw_lck_ticket_wait(&zone->z_lock, ticket, NULL, &zone_locks_grp);
2403 #if KASAN_FAKESTACK
2404 zone->z_kasan_spl = s;
2405 #endif /* KASAN_FAKESTACK */
2406
2407 /*
2408 * If zone caching has been disabled due to memory pressure,
2409 * then recording contention is not useful, give the system
2410 * time to recover.
2411 */
2412 if (__probable(!zone_caching_disabled &&
2413 !zone->z_pcpu_cache && !zone_exhausted(zone))) {
2414 zone->z_recirc_cont_cur++;
2415 }
2416 }
2417
2418 static inline void
zone_lock_nopreempt(zone_t zone)2419 zone_lock_nopreempt(zone_t zone)
2420 {
2421 #if KASAN_FAKESTACK
2422 spl_t s = 0;
2423 if (zone->z_kasan_fakestacks) {
2424 s = splsched();
2425 }
2426 #endif /* KASAN_FAKESTACK */
2427 hw_lck_ticket_lock_nopreempt(&zone->z_lock, &zone_locks_grp);
2428 #if KASAN_FAKESTACK
2429 zone->z_kasan_spl = s;
2430 #endif /* KASAN_FAKESTACK */
2431 }
2432
2433 static inline void
zone_unlock_nopreempt(zone_t zone)2434 zone_unlock_nopreempt(zone_t zone)
2435 {
2436 #if KASAN_FAKESTACK
2437 spl_t s = zone->z_kasan_spl;
2438 zone->z_kasan_spl = 0;
2439 #endif /* KASAN_FAKESTACK */
2440 hw_lck_ticket_unlock_nopreempt(&zone->z_lock);
2441 #if KASAN_FAKESTACK
2442 if (zone->z_kasan_fakestacks) {
2443 splx(s);
2444 }
2445 #endif /* KASAN_FAKESTACK */
2446 }
2447
2448 static inline void
zone_depot_lock_nopreempt(zone_cache_t zc)2449 zone_depot_lock_nopreempt(zone_cache_t zc)
2450 {
2451 hw_lck_ticket_lock_nopreempt(&zc->zc_depot_lock, &zone_locks_grp);
2452 }
2453
2454 static inline void
zone_depot_unlock_nopreempt(zone_cache_t zc)2455 zone_depot_unlock_nopreempt(zone_cache_t zc)
2456 {
2457 hw_lck_ticket_unlock_nopreempt(&zc->zc_depot_lock);
2458 }
2459
2460 static inline void
zone_depot_lock(zone_cache_t zc)2461 zone_depot_lock(zone_cache_t zc)
2462 {
2463 hw_lck_ticket_lock(&zc->zc_depot_lock, &zone_locks_grp);
2464 }
2465
2466 static inline void
zone_depot_unlock(zone_cache_t zc)2467 zone_depot_unlock(zone_cache_t zc)
2468 {
2469 hw_lck_ticket_unlock(&zc->zc_depot_lock);
2470 }
2471
2472 zone_t
zone_by_id(size_t zid)2473 zone_by_id(size_t zid)
2474 {
2475 return (zone_t)((uintptr_t)zone_array + zid * sizeof(struct zone));
2476 }
2477
2478 static inline bool
zone_supports_vm(zone_t z)2479 zone_supports_vm(zone_t z)
2480 {
2481 /*
2482 * VM_MAP_ENTRY and VM_MAP_HOLES zones are allowed
2483 * to overcommit because they're used to reclaim memory
2484 * (VM support).
2485 */
2486 return z >= &zone_array[ZONE_ID_VM_MAP_ENTRY] &&
2487 z <= &zone_array[ZONE_ID_VM_MAP_HOLES];
2488 }
2489
2490 const char *
zone_name(zone_t z)2491 zone_name(zone_t z)
2492 {
2493 return z->z_name;
2494 }
2495
2496 const char *
zone_heap_name(zone_t z)2497 zone_heap_name(zone_t z)
2498 {
2499 zone_security_flags_t zsflags = zone_security_config(z);
2500 if (__probable(zsflags.z_kheap_id < KHEAP_ID_COUNT)) {
2501 return kalloc_heap_names[zsflags.z_kheap_id];
2502 }
2503 return "invalid";
2504 }
2505
2506 static uint32_t
zone_alloc_pages_for_nelems(zone_t z,vm_size_t max_elems)2507 zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
2508 {
2509 vm_size_t elem_count, chunks;
2510
2511 elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) /
2512 zone_elem_outer_size(z);
2513 chunks = (max_elems + elem_count - 1) / elem_count;
2514
2515 return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
2516 }
2517
2518 static inline vm_size_t
zone_submaps_approx_size(void)2519 zone_submaps_approx_size(void)
2520 {
2521 vm_size_t size = 0;
2522
2523 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
2524 if (zone_submaps[idx] != VM_MAP_NULL) {
2525 size += zone_submaps[idx]->size;
2526 }
2527 }
2528
2529 return size;
2530 }
2531
2532 static inline void
zone_depot_init(struct zone_depot * zd)2533 zone_depot_init(struct zone_depot *zd)
2534 {
2535 *zd = (struct zone_depot){
2536 .zd_tail = &zd->zd_head,
2537 };
2538 }
2539
2540 static inline void
zone_depot_insert_head_full(struct zone_depot * zd,zone_magazine_t mag)2541 zone_depot_insert_head_full(struct zone_depot *zd, zone_magazine_t mag)
2542 {
2543 if (zd->zd_full++ == 0) {
2544 zd->zd_tail = &mag->zm_next;
2545 }
2546 mag->zm_next = zd->zd_head;
2547 zd->zd_head = mag;
2548 }
2549
2550 static inline void
zone_depot_insert_tail_full(struct zone_depot * zd,zone_magazine_t mag)2551 zone_depot_insert_tail_full(struct zone_depot *zd, zone_magazine_t mag)
2552 {
2553 zd->zd_full++;
2554 mag->zm_next = *zd->zd_tail;
2555 *zd->zd_tail = mag;
2556 zd->zd_tail = &mag->zm_next;
2557 }
2558
2559 static inline void
zone_depot_insert_head_empty(struct zone_depot * zd,zone_magazine_t mag)2560 zone_depot_insert_head_empty(struct zone_depot *zd, zone_magazine_t mag)
2561 {
2562 zd->zd_empty++;
2563 mag->zm_next = *zd->zd_tail;
2564 *zd->zd_tail = mag;
2565 }
2566
2567 static inline zone_magazine_t
zone_depot_pop_head_full(struct zone_depot * zd,zone_t z)2568 zone_depot_pop_head_full(struct zone_depot *zd, zone_t z)
2569 {
2570 zone_magazine_t mag = zd->zd_head;
2571
2572 assert(zd->zd_full);
2573
2574 zd->zd_full--;
2575 if (z && z->z_recirc_full_min > zd->zd_full) {
2576 z->z_recirc_full_min = zd->zd_full;
2577 }
2578 zd->zd_head = mag->zm_next;
2579 if (zd->zd_full == 0) {
2580 zd->zd_tail = &zd->zd_head;
2581 }
2582
2583 mag->zm_next = NULL;
2584 return mag;
2585 }
2586
2587 static inline zone_magazine_t
zone_depot_pop_head_empty(struct zone_depot * zd,zone_t z)2588 zone_depot_pop_head_empty(struct zone_depot *zd, zone_t z)
2589 {
2590 zone_magazine_t mag = *zd->zd_tail;
2591
2592 assert(zd->zd_empty);
2593
2594 zd->zd_empty--;
2595 if (z && z->z_recirc_empty_min > zd->zd_empty) {
2596 z->z_recirc_empty_min = zd->zd_empty;
2597 }
2598 *zd->zd_tail = mag->zm_next;
2599
2600 mag->zm_next = NULL;
2601 return mag;
2602 }
2603
2604 static inline smr_seq_t
zone_depot_move_full(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2605 zone_depot_move_full(
2606 struct zone_depot *dst,
2607 struct zone_depot *src,
2608 uint32_t n,
2609 zone_t z)
2610 {
2611 zone_magazine_t head, last;
2612
2613 assert(n);
2614 assert(src->zd_full >= n);
2615
2616 src->zd_full -= n;
2617 if (z && z->z_recirc_full_min > src->zd_full) {
2618 z->z_recirc_full_min = src->zd_full;
2619 }
2620 head = last = src->zd_head;
2621 for (uint32_t i = n; i-- > 1;) {
2622 last = last->zm_next;
2623 }
2624
2625 src->zd_head = last->zm_next;
2626 if (src->zd_full == 0) {
2627 src->zd_tail = &src->zd_head;
2628 }
2629
2630 if (z && zone_security_array[zone_index(z)].z_lifo) {
2631 if (dst->zd_full == 0) {
2632 dst->zd_tail = &last->zm_next;
2633 }
2634 last->zm_next = dst->zd_head;
2635 dst->zd_head = head;
2636 } else {
2637 last->zm_next = *dst->zd_tail;
2638 *dst->zd_tail = head;
2639 dst->zd_tail = &last->zm_next;
2640 }
2641 dst->zd_full += n;
2642
2643 return last->zm_seq;
2644 }
2645
2646 static inline void
zone_depot_move_empty(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2647 zone_depot_move_empty(
2648 struct zone_depot *dst,
2649 struct zone_depot *src,
2650 uint32_t n,
2651 zone_t z)
2652 {
2653 zone_magazine_t head, last;
2654
2655 assert(n);
2656 assert(src->zd_empty >= n);
2657
2658 src->zd_empty -= n;
2659 if (z && z->z_recirc_empty_min > src->zd_empty) {
2660 z->z_recirc_empty_min = src->zd_empty;
2661 }
2662 head = last = *src->zd_tail;
2663 for (uint32_t i = n; i-- > 1;) {
2664 last = last->zm_next;
2665 }
2666
2667 *src->zd_tail = last->zm_next;
2668
2669 dst->zd_empty += n;
2670 last->zm_next = *dst->zd_tail;
2671 *dst->zd_tail = head;
2672 }
2673
2674 static inline bool
zone_depot_poll(struct zone_depot * depot,smr_t smr)2675 zone_depot_poll(struct zone_depot *depot, smr_t smr)
2676 {
2677 if (depot->zd_full == 0) {
2678 return false;
2679 }
2680
2681 return smr == NULL || smr_poll(smr, depot->zd_head->zm_seq);
2682 }
2683
2684 static void
zone_cache_swap_magazines(zone_cache_t cache)2685 zone_cache_swap_magazines(zone_cache_t cache)
2686 {
2687 uint16_t count_a = cache->zc_alloc_cur;
2688 uint16_t count_f = cache->zc_free_cur;
2689 vm_offset_t *elems_a = cache->zc_alloc_elems;
2690 vm_offset_t *elems_f = cache->zc_free_elems;
2691
2692 z_debug_assert(count_a <= zc_mag_size());
2693 z_debug_assert(count_f <= zc_mag_size());
2694
2695 cache->zc_alloc_cur = count_f;
2696 cache->zc_free_cur = count_a;
2697 cache->zc_alloc_elems = elems_f;
2698 cache->zc_free_elems = elems_a;
2699 }
2700
2701 __pure2
2702 static smr_t
zone_cache_smr(zone_cache_t cache)2703 zone_cache_smr(zone_cache_t cache)
2704 {
2705 return cache->zc_smr;
2706 }
2707
2708 /*!
2709 * @function zone_magazine_replace
2710 *
2711 * @brief
2712 * Unlod a magazine and load a new one instead.
2713 */
2714 static zone_magazine_t
zone_magazine_replace(zone_cache_t zc,zone_magazine_t mag,bool empty)2715 zone_magazine_replace(zone_cache_t zc, zone_magazine_t mag, bool empty)
2716 {
2717 zone_magazine_t old;
2718 vm_offset_t **elems;
2719
2720 mag->zm_seq = SMR_SEQ_INVALID;
2721
2722 if (empty) {
2723 elems = &zc->zc_free_elems;
2724 zc->zc_free_cur = 0;
2725 } else {
2726 elems = &zc->zc_alloc_elems;
2727 zc->zc_alloc_cur = zc_mag_size();
2728 }
2729 old = (zone_magazine_t)((uintptr_t)*elems -
2730 offsetof(struct zone_magazine, zm_elems));
2731 *elems = mag->zm_elems;
2732
2733 return old;
2734 }
2735
2736 static zone_magazine_t
zone_magazine_alloc(zalloc_flags_t flags)2737 zone_magazine_alloc(zalloc_flags_t flags)
2738 {
2739 return zalloc_flags(zc_magazine_zone, flags | Z_ZERO);
2740 }
2741
2742 static void
zone_magazine_free(zone_magazine_t mag)2743 zone_magazine_free(zone_magazine_t mag)
2744 {
2745 (zfree)(zc_magazine_zone, mag);
2746 }
2747
2748 static void
zone_magazine_free_list(struct zone_depot * zd)2749 zone_magazine_free_list(struct zone_depot *zd)
2750 {
2751 zone_magazine_t tmp, mag = *zd->zd_tail;
2752
2753 while (mag) {
2754 tmp = mag->zm_next;
2755 zone_magazine_free(mag);
2756 mag = tmp;
2757 }
2758
2759 *zd->zd_tail = NULL;
2760 zd->zd_empty = 0;
2761 }
2762
2763 void
zone_enable_caching(zone_t zone)2764 zone_enable_caching(zone_t zone)
2765 {
2766 size_t size_per_mag = zone_elem_inner_size(zone) * zc_mag_size();
2767 zone_cache_t caches;
2768 size_t depot_limit;
2769
2770 depot_limit = zc_pcpu_max() / size_per_mag;
2771 zone->z_depot_limit = (uint16_t)MIN(depot_limit, INT16_MAX);
2772
2773 caches = zalloc_percpu_permanent_type(struct zone_cache);
2774 zpercpu_foreach(zc, caches) {
2775 zc->zc_alloc_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2776 zc->zc_free_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2777 zone_depot_init(&zc->zc_depot);
2778 hw_lck_ticket_init(&zc->zc_depot_lock, &zone_locks_grp);
2779 }
2780
2781 zone_lock(zone);
2782 assert(zone->z_pcpu_cache == NULL);
2783 zone->z_pcpu_cache = caches;
2784 zone->z_recirc_cont_cur = 0;
2785 zone->z_recirc_cont_wma = 0;
2786 zone->z_elems_free_min = 0; /* becomes z_recirc_empty_min */
2787 zone->z_elems_free_wma = 0; /* becomes z_recirc_empty_wma */
2788 zone_unlock(zone);
2789 }
2790
2791 bool
zone_maps_owned(vm_address_t addr,vm_size_t size)2792 zone_maps_owned(vm_address_t addr, vm_size_t size)
2793 {
2794 return from_zone_map(addr, size);
2795 }
2796
2797 #if KASAN_LIGHT
2798 bool
kasan_zone_maps_owned(vm_address_t addr,vm_size_t size)2799 kasan_zone_maps_owned(vm_address_t addr, vm_size_t size)
2800 {
2801 return from_zone_map(addr, size) ||
2802 mach_vm_range_size(&zone_info.zi_map_range) == 0;
2803 }
2804 #endif /* KASAN_LIGHT */
2805
2806 void
zone_map_sizes(vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)2807 zone_map_sizes(
2808 vm_map_size_t *psize,
2809 vm_map_size_t *pfree,
2810 vm_map_size_t *plargest_free)
2811 {
2812 vm_map_size_t size, free, largest;
2813
2814 vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
2815
2816 for (uint32_t i = 1; i < Z_SUBMAP_IDX_COUNT; i++) {
2817 vm_map_sizes(zone_submaps[i], &size, &free, &largest);
2818 *psize += size;
2819 *pfree += free;
2820 *plargest_free = MAX(*plargest_free, largest);
2821 }
2822 }
2823
2824 __attribute__((always_inline))
2825 vm_map_t
zone_submap(zone_security_flags_t zsflags)2826 zone_submap(zone_security_flags_t zsflags)
2827 {
2828 return zone_submaps[zsflags.z_submap_idx];
2829 }
2830
2831 unsigned
zpercpu_count(void)2832 zpercpu_count(void)
2833 {
2834 return zpercpu_early_count;
2835 }
2836
2837 #if ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC
2838 /*
2839 * Returns a random number of a given bit-width.
2840 *
2841 * DO NOT COPY THIS CODE OUTSIDE OF ZALLOC
2842 *
2843 * This uses Intel's rdrand because random() uses FP registers
2844 * which causes FP faults and allocations which isn't something
2845 * we can do from zalloc itself due to reentrancy problems.
2846 *
2847 * For pre-rdrand machines (which we no longer support),
2848 * we use a bad biased random generator that doesn't use FP.
2849 * Such HW is no longer supported, but VM of newer OSes on older
2850 * bare metal is made to limp along (with reduced security) this way.
2851 */
2852 static uint64_t
zalloc_random_mask64(uint32_t bits)2853 zalloc_random_mask64(uint32_t bits)
2854 {
2855 uint64_t mask = ~0ull >> (64 - bits);
2856 uint64_t v;
2857
2858 #if __x86_64__
2859 if (__probable(cpuid_features() & CPUID_FEATURE_RDRAND)) {
2860 asm volatile ("1: rdrand %0; jnc 1b\n" : "=r" (v) :: "cc");
2861 v &= mask;
2862 } else {
2863 disable_preemption();
2864 int cpu = cpu_number();
2865 v = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
2866 zone_bool_gen[cpu].zbg_entropy,
2867 ZONE_ENTROPY_CNT, bits);
2868 enable_preemption();
2869 }
2870 #else
2871 v = early_random() & mask;
2872 #endif
2873
2874 return v;
2875 }
2876
2877 /*
2878 * Returns a random number within [bound_min, bound_max)
2879 *
2880 * This isn't _exactly_ uniform, but the skew is small enough
2881 * not to matter for the consumers of this interface.
2882 *
2883 * Values within [bound_min, 2^64 % (bound_max - bound_min))
2884 * will be returned (bound_max - bound_min) / 2^64 more often
2885 * than values within [2^64 % (bound_max - bound_min), bound_max).
2886 */
2887 static uint32_t
zalloc_random_uniform32(uint32_t bound_min,uint32_t bound_max)2888 zalloc_random_uniform32(uint32_t bound_min, uint32_t bound_max)
2889 {
2890 uint64_t delta = bound_max - bound_min;
2891
2892 return bound_min + (uint32_t)(zalloc_random_mask64(64) % delta);
2893 }
2894
2895 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC */
2896 #if ZALLOC_ENABLE_LOGGING || CONFIG_PROB_GZALLOC
2897 /*
2898 * Track all kalloc zones of specified size for zlog name
2899 * kalloc.type.<size> or kalloc.type.var.<size> or kalloc.<size>
2900 *
2901 * Additionally track all shared kalloc zones with shared.kalloc
2902 */
2903 static bool
track_kalloc_zones(zone_t z,const char * logname)2904 track_kalloc_zones(zone_t z, const char *logname)
2905 {
2906 const char *prefix;
2907 size_t len;
2908 zone_security_flags_t zsflags = zone_security_config(z);
2909
2910 prefix = "kalloc.type.var.";
2911 len = strlen(prefix);
2912 if (zsflags.z_kalloc_type && zsflags.z_kheap_id == KHEAP_ID_KT_VAR &&
2913 strncmp(logname, prefix, len) == 0) {
2914 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2915
2916 return zone_elem_inner_size(z) == sizeclass;
2917 }
2918
2919 prefix = "kalloc.type.";
2920 len = strlen(prefix);
2921 if (zsflags.z_kalloc_type && zsflags.z_kheap_id != KHEAP_ID_KT_VAR &&
2922 strncmp(logname, prefix, len) == 0) {
2923 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2924
2925 return zone_elem_inner_size(z) == sizeclass;
2926 }
2927
2928 prefix = "kalloc.";
2929 len = strlen(prefix);
2930 if ((zsflags.z_kheap_id || zsflags.z_kalloc_type) &&
2931 strncmp(logname, prefix, len) == 0) {
2932 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2933
2934 return zone_elem_inner_size(z) == sizeclass;
2935 }
2936
2937 prefix = "shared.kalloc";
2938 if ((zsflags.z_kheap_id == KHEAP_ID_SHARED) &&
2939 (strcmp(logname, prefix) == 0)) {
2940 return true;
2941 }
2942
2943 return false;
2944 }
2945 #endif
2946
2947 int
track_this_zone(const char * zonename,const char * logname)2948 track_this_zone(const char *zonename, const char *logname)
2949 {
2950 unsigned int len;
2951 const char *zc = zonename;
2952 const char *lc = logname;
2953
2954 /*
2955 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
2956 */
2957
2958 for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
2959 /*
2960 * If the current characters don't match, check for a space in
2961 * in the zone name and a corresponding period in the log name.
2962 * If that's not there, then the strings don't match.
2963 */
2964
2965 if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
2966 break;
2967 }
2968
2969 /*
2970 * The strings are equal so far. If we're at the end, then it's a match.
2971 */
2972
2973 if (*zc == '\0') {
2974 return TRUE;
2975 }
2976 }
2977
2978 return FALSE;
2979 }
2980
2981 #if DEBUG || DEVELOPMENT
2982
2983 vm_size_t
zone_element_info(void * addr,vm_tag_t * ptag)2984 zone_element_info(void *addr, vm_tag_t * ptag)
2985 {
2986 vm_size_t size = 0;
2987 vm_tag_t tag = VM_KERN_MEMORY_NONE;
2988 struct zone *src_zone;
2989
2990 if (from_zone_map(addr, sizeof(void *))) {
2991 src_zone = zone_by_id(zone_index_from_ptr(addr));
2992 size = zone_elem_inner_size(src_zone);
2993 #if VM_TAG_SIZECLASSES
2994 if (__improbable(src_zone->z_uses_tags)) {
2995 struct zone_page_metadata *meta;
2996 vm_offset_t eidx;
2997 vm_tag_t *slot;
2998
2999 meta = zone_element_resolve(src_zone,
3000 (vm_offset_t)addr, &eidx);
3001 slot = zba_extra_ref_ptr(meta->zm_bitmap, eidx);
3002 tag = *slot;
3003 }
3004 #endif /* VM_TAG_SIZECLASSES */
3005 }
3006
3007 *ptag = tag;
3008 return size;
3009 }
3010
3011 #endif /* DEBUG || DEVELOPMENT */
3012 #if KASAN_CLASSIC
3013
3014 vm_size_t
kasan_quarantine_resolve(vm_address_t addr,zone_t * zonep)3015 kasan_quarantine_resolve(vm_address_t addr, zone_t *zonep)
3016 {
3017 zone_t zone = zone_by_id(zone_index_from_ptr((void *)addr));
3018
3019 *zonep = zone;
3020 return zone_elem_inner_size(zone);
3021 }
3022
3023 #endif /* KASAN_CLASSIC */
3024 #endif /* !ZALLOC_TEST */
3025 #pragma mark Zone zeroing and early random
3026 #if !ZALLOC_TEST
3027
3028 /*
3029 * Zone zeroing
3030 *
3031 * All allocations from zones are zeroed on free and are additionally
3032 * check that they are still zero on alloc. The check is
3033 * always on, on embedded devices. Perf regression was detected
3034 * on intel as we cant use the vectorized implementation of
3035 * memcmp_zero_ptr_aligned due to cyclic dependenices between
3036 * initization and allocation. Therefore we perform the check
3037 * on 20% of the allocations.
3038 */
3039 #if ZALLOC_ENABLE_ZERO_CHECK
3040 #if defined(__x86_64__)
3041 /*
3042 * Peform zero validation on every 5th allocation
3043 */
3044 static TUNABLE(uint32_t, zzc_rate, "zzc_rate", 5);
3045 static uint32_t PERCPU_DATA(zzc_decrementer);
3046 #endif /* defined(__x86_64__) */
3047
3048 /*
3049 * Determine if zero validation for allocation should be skipped
3050 */
3051 static bool
zalloc_skip_zero_check(void)3052 zalloc_skip_zero_check(void)
3053 {
3054 #if defined(__x86_64__)
3055 uint32_t *counterp, cnt;
3056
3057 counterp = PERCPU_GET(zzc_decrementer);
3058 cnt = *counterp;
3059 if (__probable(cnt > 0)) {
3060 *counterp = cnt - 1;
3061 return true;
3062 }
3063 *counterp = zzc_rate - 1;
3064 #endif /* !defined(__x86_64__) */
3065 return false;
3066 }
3067
3068 __abortlike
3069 static void
zalloc_uaf_panic(zone_t z,uintptr_t elem,size_t size)3070 zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size)
3071 {
3072 uint32_t esize = (uint32_t)zone_elem_inner_size(z);
3073 uint32_t first_offs = ~0u;
3074 uintptr_t first_bits = 0, v;
3075 char buf[1024];
3076 int pos = 0;
3077
3078 buf[0] = '\0';
3079
3080 for (uint32_t o = 0; o < size; o += sizeof(v)) {
3081 if ((v = *(uintptr_t *)(elem + o)) == 0) {
3082 continue;
3083 }
3084 pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
3085 "%5d: 0x%016lx", o, v);
3086 if (first_offs > o) {
3087 first_offs = o;
3088 first_bits = v;
3089 }
3090 }
3091
3092 (panic)("[%s%s]: element modified after free "
3093 "(off:%d, val:0x%016lx, sz:%d, ptr:%p)%s",
3094 zone_heap_name(z), zone_name(z),
3095 first_offs, first_bits, esize, (void *)elem, buf);
3096 }
3097
3098 static void
zalloc_validate_element(zone_t zone,vm_offset_t elem,vm_size_t size,zalloc_flags_t flags)3099 zalloc_validate_element(
3100 zone_t zone,
3101 vm_offset_t elem,
3102 vm_size_t size,
3103 zalloc_flags_t flags)
3104 {
3105 if (flags & Z_NOZZC) {
3106 return;
3107 }
3108 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3109 zalloc_uaf_panic(zone, elem, size);
3110 }
3111 if (flags & Z_PCPU) {
3112 for (size_t i = zpercpu_count(); --i > 0;) {
3113 elem += PAGE_SIZE;
3114 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3115 zalloc_uaf_panic(zone, elem, size);
3116 }
3117 }
3118 }
3119 }
3120
3121 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
3122
3123 __attribute__((noinline))
3124 static void
zone_early_scramble_rr(zone_t zone,int cpu,zone_stats_t zs)3125 zone_early_scramble_rr(zone_t zone, int cpu, zone_stats_t zs)
3126 {
3127 #if KASAN_FAKESTACK
3128 /*
3129 * This can cause re-entrancy with kasan fakestacks
3130 */
3131 #pragma unused(zone, cpu, zs)
3132 #else
3133 uint32_t bits;
3134
3135 bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3136 zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
3137
3138 zs->zs_alloc_rr += bits;
3139 zs->zs_alloc_rr %= zone->z_chunk_elems;
3140 #endif
3141 }
3142
3143 #endif /* !ZALLOC_TEST */
3144 #pragma mark Zone Leak Detection
3145 #if !ZALLOC_TEST
3146 #if ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS
3147
3148 /*
3149 * Zone leak debugging code
3150 *
3151 * When enabled, this code keeps a log to track allocations to a particular
3152 * zone that have not yet been freed.
3153 *
3154 * Examining this log will reveal the source of a zone leak.
3155 *
3156 * The log is allocated only when logging is enabled (it is off by default),
3157 * so there is no effect on the system when it's turned off.
3158 *
3159 * Zone logging is enabled with the `zlog<n>=<zone>` boot-arg for each
3160 * zone name to log, with n starting at 1.
3161 *
3162 * Leaks debugging utilizes 2 tunables:
3163 * - zlsize (in kB) which describes how much "size" the record covers
3164 * (zones with smaller elements get more records, default is 4M).
3165 *
3166 * - zlfreq (in bytes) which describes a sample rate in cumulative allocation
3167 * size at which automatic leak detection will sample allocations.
3168 * (default is 8k)
3169 *
3170 *
3171 * Zone corruption logging
3172 *
3173 * Logging can also be used to help identify the source of a zone corruption.
3174 *
3175 * First, identify the zone that is being corrupted,
3176 * then add "-zc zlog<n>=<zone name>" to the boot-args.
3177 *
3178 * When -zc is used in conjunction with zlog,
3179 * it changes the logging style to track both allocations and frees to the zone.
3180 *
3181 * When the corruption is detected, examining the log will show you the stack
3182 * traces of the callers who last allocated and freed any particular element in
3183 * the zone.
3184 *
3185 * Corruption debugging logs will have zrecs records
3186 * (tuned by the zrecs= boot-arg, 16k elements per G of RAM by default).
3187 */
3188
3189 #define ZRECORDS_MAX (256u << 10)
3190 #define ZRECORDS_DEFAULT (16u << 10)
3191 static TUNABLE(uint32_t, zrecs, "zrecs", 0);
3192 static TUNABLE(uint32_t, zlsize, "zlsize", 4 * 1024);
3193 static TUNABLE(uint32_t, zlfreq, "zlfreq", 8 * 1024);
3194
3195 __startup_func
3196 static void
zone_leaks_init_zrecs(void)3197 zone_leaks_init_zrecs(void)
3198 {
3199 /*
3200 * Don't allow more than ZRECORDS_MAX records,
3201 * even if the user asked for more.
3202 *
3203 * This prevents accidentally hogging too much kernel memory
3204 * and making the system unusable.
3205 */
3206 if (zrecs == 0) {
3207 zrecs = ZRECORDS_DEFAULT *
3208 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3209 }
3210 if (zrecs > ZRECORDS_MAX) {
3211 zrecs = ZRECORDS_MAX;
3212 }
3213 }
3214 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_leaks_init_zrecs);
3215
3216 static uint32_t
zone_leaks_record_count(zone_t z)3217 zone_leaks_record_count(zone_t z)
3218 {
3219 uint32_t recs = (zlsize << 10) / zone_elem_inner_size(z);
3220
3221 return MIN(MAX(recs, ZRECORDS_DEFAULT), ZRECORDS_MAX);
3222 }
3223
3224 static uint32_t
zone_leaks_sample_rate(zone_t z)3225 zone_leaks_sample_rate(zone_t z)
3226 {
3227 return zlfreq / zone_elem_inner_size(z);
3228 }
3229
3230 #if ZALLOC_ENABLE_LOGGING
3231 /* Log allocations and frees to help debug a zone element corruption */
3232 static TUNABLE(bool, corruption_debug_flag, "-zc", false);
3233
3234 /*
3235 * A maximum of 10 zlog<n> boot args can be provided (zlog1 -> zlog10)
3236 */
3237 #define MAX_ZONES_LOG_REQUESTS 10
3238
3239 /**
3240 * @function zone_setup_logging
3241 *
3242 * @abstract
3243 * Optionally sets up a zone for logging.
3244 *
3245 * @discussion
3246 * We recognized two boot-args:
3247 *
3248 * zlog=<zone_to_log>
3249 * zrecs=<num_records_in_log>
3250 * zlsize=<memory to cover for leaks>
3251 *
3252 * The zlog arg is used to specify the zone name that should be logged,
3253 * and zrecs/zlsize is used to control the size of the log.
3254 */
3255 static void
zone_setup_logging(zone_t z)3256 zone_setup_logging(zone_t z)
3257 {
3258 char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
3259 char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
3260 char zlog_val[MAX_ZONE_NAME]; /* the zone name we're logging, if any */
3261 bool logging_on = false;
3262
3263 /*
3264 * Append kalloc heap name to zone name (if zone is used by kalloc)
3265 */
3266 snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
3267
3268 /* zlog0 isn't allowed. */
3269 for (int i = 1; i <= MAX_ZONES_LOG_REQUESTS; i++) {
3270 snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
3271
3272 if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val))) {
3273 if (track_this_zone(zone_name, zlog_val) ||
3274 track_kalloc_zones(z, zlog_val)) {
3275 logging_on = true;
3276 break;
3277 }
3278 }
3279 }
3280
3281 /*
3282 * Backwards compat. with the old boot-arg used to specify single zone
3283 * logging i.e. zlog Needs to happen after the newer zlogn checks
3284 * because the prefix will match all the zlogn
3285 * boot-args.
3286 */
3287 if (!logging_on &&
3288 PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val))) {
3289 if (track_this_zone(zone_name, zlog_val) ||
3290 track_kalloc_zones(z, zlog_val)) {
3291 logging_on = true;
3292 }
3293 }
3294
3295 /*
3296 * If we want to log a zone, see if we need to allocate buffer space for
3297 * the log.
3298 *
3299 * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
3300 * we have to defer allocation in that case.
3301 *
3302 * zone_init() will finish the job.
3303 *
3304 * If we want to log one of the VM related zones that's set up early on,
3305 * we will skip allocation of the log until zinit is called again later
3306 * on some other zone.
3307 */
3308 if (logging_on) {
3309 if (corruption_debug_flag) {
3310 z->z_btlog = btlog_create(BTLOG_LOG, zrecs, 0);
3311 } else {
3312 z->z_btlog = btlog_create(BTLOG_HASH,
3313 zone_leaks_record_count(z), 0);
3314 }
3315 if (z->z_btlog) {
3316 z->z_log_on = true;
3317 printf("zone[%s%s]: logging enabled\n",
3318 zone_heap_name(z), z->z_name);
3319 } else {
3320 printf("zone[%s%s]: failed to enable logging\n",
3321 zone_heap_name(z), z->z_name);
3322 }
3323 }
3324 }
3325
3326 #endif /* ZALLOC_ENABLE_LOGGING */
3327 #if KASAN_TBI
3328 static TUNABLE(uint32_t, kasan_zrecs, "kasan_zrecs", 0);
3329
3330 __startup_func
3331 static void
kasan_tbi_init_zrecs(void)3332 kasan_tbi_init_zrecs(void)
3333 {
3334 /*
3335 * Don't allow more than ZRECORDS_MAX records,
3336 * even if the user asked for more.
3337 *
3338 * This prevents accidentally hogging too much kernel memory
3339 * and making the system unusable.
3340 */
3341 if (kasan_zrecs == 0) {
3342 kasan_zrecs = ZRECORDS_DEFAULT *
3343 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3344 }
3345 if (kasan_zrecs > ZRECORDS_MAX) {
3346 kasan_zrecs = ZRECORDS_MAX;
3347 }
3348 }
3349 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, kasan_tbi_init_zrecs);
3350
3351 static void
zone_setup_kasan_logging(zone_t z)3352 zone_setup_kasan_logging(zone_t z)
3353 {
3354 if (!z->z_tbi_tag) {
3355 printf("zone[%s%s]: kasan logging disabled for this zone\n",
3356 zone_heap_name(z), z->z_name);
3357 return;
3358 }
3359
3360 z->z_log_on = true;
3361 z->z_btlog = btlog_create(BTLOG_LOG, kasan_zrecs, 0);
3362 if (!z->z_btlog) {
3363 printf("zone[%s%s]: failed to enable kasan logging\n",
3364 zone_heap_name(z), z->z_name);
3365 }
3366 }
3367
3368 #endif /* KASAN_TBI */
3369 #if CONFIG_ZLEAKS
3370
3371 static thread_call_data_t zone_leaks_callout;
3372
3373 /*
3374 * The zone leak detector, abbreviated 'zleak', keeps track
3375 * of a subset of the currently outstanding allocations
3376 * made by the zone allocator.
3377 *
3378 * Zones who use more than zleak_pages_per_zone_wired_threshold
3379 * pages will get a BTLOG_HASH btlog with sampling to minimize
3380 * perf impact, yet receive statistical data about the backtrace
3381 * that is the most likely to cause the leak.
3382 *
3383 * If the zone goes under the threshold enough, then the log
3384 * is disabled and backtraces freed. Data can be collected
3385 * from userspace with the zlog(1) command.
3386 */
3387
3388 uint32_t zleak_active;
3389 SECURITY_READ_ONLY_LATE(vm_size_t) zleak_max_zonemap_size;
3390
3391 /* Size a zone will have before we will collect data on it */
3392 static size_t zleak_pages_per_zone_wired_threshold = ~0;
3393 vm_size_t zleak_per_zone_tracking_threshold = ~0;
3394
3395 static inline bool
zleak_should_enable_for_zone(zone_t z)3396 zleak_should_enable_for_zone(zone_t z)
3397 {
3398 if (z->z_log_on) {
3399 return false;
3400 }
3401 if (z->z_btlog) {
3402 return false;
3403 }
3404 if (z->z_exhausts) {
3405 return false;
3406 }
3407 if (zone_exhaustible(z)) {
3408 return z->z_wired_cur * 8 >= z->z_wired_max * 7;
3409 }
3410 return z->z_wired_cur >= zleak_pages_per_zone_wired_threshold;
3411 }
3412
3413 static inline bool
zleak_should_disable_for_zone(zone_t z)3414 zleak_should_disable_for_zone(zone_t z)
3415 {
3416 if (z->z_log_on) {
3417 return false;
3418 }
3419 if (!z->z_btlog) {
3420 return false;
3421 }
3422 if (zone_exhaustible(z)) {
3423 return z->z_wired_cur * 8 < z->z_wired_max * 7;
3424 }
3425 return z->z_wired_cur < zleak_pages_per_zone_wired_threshold / 2;
3426 }
3427
3428 static void
zleaks_enable_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)3429 zleaks_enable_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
3430 {
3431 btlog_t log;
3432
3433 zone_foreach(z) {
3434 if (zleak_should_disable_for_zone(z)) {
3435 log = z->z_btlog;
3436 z->z_btlog = NULL;
3437 assert(z->z_btlog_disabled == NULL);
3438 btlog_disable(log);
3439 z->z_btlog_disabled = log;
3440 os_atomic_dec(&zleak_active, relaxed);
3441 }
3442
3443 if (zleak_should_enable_for_zone(z)) {
3444 log = z->z_btlog_disabled;
3445 if (log == NULL) {
3446 log = btlog_create(BTLOG_HASH,
3447 zone_leaks_record_count(z),
3448 zone_leaks_sample_rate(z));
3449 } else if (btlog_enable(log) == KERN_SUCCESS) {
3450 z->z_btlog_disabled = NULL;
3451 } else {
3452 log = NULL;
3453 }
3454 os_atomic_store(&z->z_btlog, log, release);
3455 os_atomic_inc(&zleak_active, relaxed);
3456 }
3457 }
3458 }
3459
3460 __startup_func
3461 static void
zleak_init(void)3462 zleak_init(void)
3463 {
3464 zleak_max_zonemap_size = ptoa(zone_pages_wired_max);
3465
3466 zleak_update_threshold(&zleak_per_zone_tracking_threshold,
3467 zleak_max_zonemap_size / 8);
3468
3469 thread_call_setup_with_options(&zone_leaks_callout,
3470 zleaks_enable_async, NULL, THREAD_CALL_PRIORITY_USER,
3471 THREAD_CALL_OPTIONS_ONCE);
3472 }
3473 STARTUP(ZALLOC, STARTUP_RANK_SECOND, zleak_init);
3474
3475 kern_return_t
zleak_update_threshold(vm_size_t * arg,uint64_t value)3476 zleak_update_threshold(vm_size_t *arg, uint64_t value)
3477 {
3478 if (value >= zleak_max_zonemap_size) {
3479 return KERN_INVALID_VALUE;
3480 }
3481
3482 if (arg == &zleak_per_zone_tracking_threshold) {
3483 zleak_per_zone_tracking_threshold = (vm_size_t)value;
3484 zleak_pages_per_zone_wired_threshold = atop(value);
3485 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3486 thread_call_enter(&zone_leaks_callout);
3487 }
3488 return KERN_SUCCESS;
3489 }
3490
3491 return KERN_INVALID_ARGUMENT;
3492 }
3493
3494 static void
panic_display_zleaks(bool has_syms)3495 panic_display_zleaks(bool has_syms)
3496 {
3497 bool did_header = false;
3498 vm_address_t bt[BTLOG_MAX_DEPTH];
3499 uint32_t len, count;
3500
3501 zone_foreach(z) {
3502 btlog_t log = z->z_btlog;
3503
3504 if (log == NULL || btlog_get_type(log) != BTLOG_HASH) {
3505 continue;
3506 }
3507
3508 count = btlog_guess_top(log, bt, &len);
3509 if (count == 0) {
3510 continue;
3511 }
3512
3513 if (!did_header) {
3514 paniclog_append_noflush("Zone (suspected) leak report:\n");
3515 did_header = true;
3516 }
3517
3518 paniclog_append_noflush(" Zone: %s%s\n",
3519 zone_heap_name(z), zone_name(z));
3520 paniclog_append_noflush(" Count: %d (%ld bytes)\n", count,
3521 (long)count * zone_scale_for_percpu(z, zone_elem_inner_size(z)));
3522 paniclog_append_noflush(" Size: %ld\n",
3523 (long)zone_size_wired(z));
3524 paniclog_append_noflush(" Top backtrace:\n");
3525 for (uint32_t i = 0; i < len; i++) {
3526 if (has_syms) {
3527 paniclog_append_noflush(" %p ", (void *)bt[i]);
3528 panic_print_symbol_name(bt[i]);
3529 paniclog_append_noflush("\n");
3530 } else {
3531 paniclog_append_noflush(" %p\n", (void *)bt[i]);
3532 }
3533 }
3534
3535 kmod_panic_dump(bt, len);
3536 paniclog_append_noflush("\n");
3537 }
3538 }
3539 #endif /* CONFIG_ZLEAKS */
3540
3541 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
3542 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
3543
3544 #if !KASAN_TBI
3545 __cold
3546 #endif
3547 static void
zalloc_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3548 zalloc_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3549 {
3550 btlog_t log = zone->z_btlog;
3551 btref_get_flags_t flags = 0;
3552 btref_t ref;
3553
3554 #if !KASAN_TBI
3555 if (!log || !btlog_sample(log)) {
3556 return;
3557 }
3558 #endif
3559 if (get_preemption_level() || zone_supports_vm(zone)) {
3560 /*
3561 * VM zones can be used by btlog, avoid reentrancy issues.
3562 */
3563 flags = BTREF_GET_NOWAIT;
3564 }
3565
3566 ref = btref_get(fp, flags);
3567 while (count-- > 0) {
3568 if (count) {
3569 btref_retain(ref);
3570 }
3571 btlog_record(log, (void *)addr, ZOP_ALLOC, ref);
3572 addr += *(vm_offset_t *)addr;
3573 }
3574 }
3575
3576 #define ZALLOC_LOG(zone, addr, count) ({ \
3577 if ((zone)->z_btlog) { \
3578 zalloc_log(zone, addr, count, __builtin_frame_address(0)); \
3579 } \
3580 })
3581
3582 #if !KASAN_TBI
3583 __cold
3584 #endif
3585 static void
zfree_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3586 zfree_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3587 {
3588 btlog_t log = zone->z_btlog;
3589 btref_get_flags_t flags = 0;
3590 btref_t ref;
3591
3592 #if !KASAN_TBI
3593 if (!log) {
3594 return;
3595 }
3596 #endif
3597
3598 /*
3599 * See if we're doing logging on this zone.
3600 *
3601 * There are two styles of logging used depending on
3602 * whether we're trying to catch a leak or corruption.
3603 */
3604 #if !KASAN_TBI
3605 if (btlog_get_type(log) == BTLOG_HASH) {
3606 /*
3607 * We're logging to catch a leak.
3608 *
3609 * Remove any record we might have for this element
3610 * since it's being freed. Note that we may not find it
3611 * if the buffer overflowed and that's OK.
3612 *
3613 * Since the log is of a limited size, old records get
3614 * overwritten if there are more zallocs than zfrees.
3615 */
3616 while (count-- > 0) {
3617 btlog_erase(log, (void *)addr);
3618 addr += *(vm_offset_t *)addr;
3619 }
3620 return;
3621 }
3622 #endif /* !KASAN_TBI */
3623
3624 if (get_preemption_level() || zone_supports_vm(zone)) {
3625 /*
3626 * VM zones can be used by btlog, avoid reentrancy issues.
3627 */
3628 flags = BTREF_GET_NOWAIT;
3629 }
3630
3631 ref = btref_get(fp, flags);
3632 while (count-- > 0) {
3633 if (count) {
3634 btref_retain(ref);
3635 }
3636 btlog_record(log, (void *)addr, ZOP_FREE, ref);
3637 addr += *(vm_offset_t *)addr;
3638 }
3639 }
3640
3641 #define ZFREE_LOG(zone, addr, count) ({ \
3642 if ((zone)->z_btlog) { \
3643 zfree_log(zone, addr, count, __builtin_frame_address(0)); \
3644 } \
3645 })
3646
3647 #else
3648 #define ZALLOC_LOG(...) ((void)0)
3649 #define ZFREE_LOG(...) ((void)0)
3650 #endif /* ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI */
3651 #endif /* !ZALLOC_TEST */
3652 #pragma mark zone (re)fill
3653 #if !ZALLOC_TEST
3654
3655 /*!
3656 * @defgroup Zone Refill
3657 * @{
3658 *
3659 * @brief
3660 * Functions handling The zone refill machinery.
3661 *
3662 * @discussion
3663 * Zones are refilled based on 2 mechanisms: direct expansion, async expansion.
3664 *
3665 * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
3666 * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
3667 *
3668 * - call @c zone_expand_locked() directly if the caller is allowed to block,
3669 *
3670 * - wakeup the asynchroous expansion thread call if the caller is not allowed
3671 * to block, or if the reserve becomes depleted.
3672 *
3673 *
3674 * <h2>Synchronous expansion</h2>
3675 *
3676 * This mechanism is actually the only one that may refill a zone, and all the
3677 * other ones funnel through this one eventually.
3678 *
3679 * @c zone_expand_locked() implements the core of the expansion mechanism,
3680 * and will do so while a caller specified predicate is true.
3681 *
3682 * Zone expansion allows for up to 2 threads to concurrently refill the zone:
3683 * - one VM privileged thread,
3684 * - one regular thread.
3685 *
3686 * Regular threads that refill will put down their identity in @c z_expander,
3687 * so that priority inversion avoidance can be implemented.
3688 *
3689 * However, VM privileged threads are allowed to use VM page reserves,
3690 * which allows for the system to recover from extreme memory pressure
3691 * situations, allowing for the few allocations that @c zone_gc() or
3692 * killing processes require.
3693 *
3694 * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
3695 * is set. @c z_expander is not necessarily the identity of this VM privileged
3696 * thread (it is if the VM privileged thread came in first, but wouldn't be, and
3697 * could even be @c THREAD_NULL otherwise).
3698 *
3699 * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
3700 * spending a whole pointer on priority inheritance for VM privileged threads
3701 * (and other issues related to having two owners), we use the rwlock boost as
3702 * a stop gap to avoid priority inversions.
3703 *
3704 *
3705 * <h2>Chunk wiring policies</h2>
3706 *
3707 * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
3708 * to try to minimize fragmentation relative to element sizes not aligning with
3709 * a chunk size well. However, this can grow large and be hard to fulfill on
3710 * a system under a lot of memory pressure (chunks can be as long as 8 pages on
3711 * 4k page systems).
3712 *
3713 * This is why, when under memory pressure the system allows chunks to be
3714 * partially populated. The metadata of the first page in the chunk maintains
3715 * the count of actually populated pages.
3716 *
3717 * The metadata for addresses assigned to a zone are found of 4 queues:
3718 * - @c z_pageq_empty has chunk heads with populated pages and no allocated
3719 * elements (those can be targeted by @c zone_gc()),
3720 * - @c z_pageq_partial has chunk heads with populated pages that are partially
3721 * used,
3722 * - @c z_pageq_full has chunk heads with populated pages with no free elements
3723 * left,
3724 * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
3725 * the zone forever, or the first secondary metadata for a chunk whose
3726 * corresponding page is not populated in the chunk.
3727 *
3728 * When new pages need to be wired/populated, chunks from the @c z_pageq_va
3729 * queues are preferred.
3730 *
3731 *
3732 * <h2>Asynchronous expansion</h2>
3733 *
3734 * This mechanism allows for refilling zones used mostly with non blocking
3735 * callers. It relies on a thread call (@c zone_expand_callout) which will
3736 * iterate all zones and refill the ones marked with @c z_async_refilling.
3737 *
3738 * NOTE: If the calling thread for zalloc_noblock is lower priority than
3739 * the thread_call, then zalloc_noblock to an empty zone may succeed.
3740 *
3741 *
3742 * <h2>Dealing with zone allocations from the mach VM code</h2>
3743 *
3744 * The implementation of the mach VM itself uses the zone allocator
3745 * for things like the vm_map_entry data structure. In order to prevent
3746 * a recursion problem when adding more pages to a zone, the VM zones
3747 * use the Z_SUBMAP_IDX_VM submap which doesn't use kmem_alloc()
3748 * or any VM map functions to allocate.
3749 *
3750 * Instead, a really simple coalescing first-fit allocator is used
3751 * for this submap, and no one else than zalloc can allocate from it.
3752 *
3753 * Memory is directly populated which doesn't require allocation of
3754 * VM map entries, and avoids recursion. The cost of this scheme however,
3755 * is that `vm_map_lookup_entry` will not function on those addresses
3756 * (nor any API relying on it).
3757 */
3758
3759 static void zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems);
3760 static void zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd);
3761 static thread_call_data_t zone_expand_callout;
3762
3763 __attribute__((overloadable))
3764 static inline bool
zone_submap_is_sequestered(zone_submap_idx_t idx)3765 zone_submap_is_sequestered(zone_submap_idx_t idx)
3766 {
3767 return idx != Z_SUBMAP_IDX_DATA;
3768 }
3769
3770 __attribute__((overloadable))
3771 static inline bool
zone_submap_is_sequestered(zone_security_flags_t zsflags)3772 zone_submap_is_sequestered(zone_security_flags_t zsflags)
3773 {
3774 return zone_submap_is_sequestered(zsflags.z_submap_idx);
3775 }
3776
3777 static inline kma_flags_t
zone_kma_flags(zone_t z,zone_security_flags_t zsflags,zalloc_flags_t flags)3778 zone_kma_flags(zone_t z, zone_security_flags_t zsflags, zalloc_flags_t flags)
3779 {
3780 kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
3781
3782 if (zsflags.z_noencrypt) {
3783 kmaflags |= KMA_NOENCRYPT;
3784 }
3785 if (flags & Z_NOPAGEWAIT) {
3786 kmaflags |= KMA_NOPAGEWAIT;
3787 }
3788 if (z->z_permanent || (!z->z_destructible &&
3789 zone_submap_is_sequestered(zsflags))) {
3790 kmaflags |= KMA_PERMANENT;
3791 }
3792 if (zsflags.z_submap_from_end) {
3793 kmaflags |= KMA_LAST_FREE;
3794 }
3795
3796 if (z->z_tbi_tag) {
3797 kmaflags |= KMA_TAG;
3798 }
3799
3800 return kmaflags;
3801 }
3802
3803 static inline void
zone_add_wired_pages(zone_t z,uint32_t pages)3804 zone_add_wired_pages(zone_t z, uint32_t pages)
3805 {
3806 os_atomic_add(&zone_pages_wired, pages, relaxed);
3807
3808 #if CONFIG_ZLEAKS
3809 if (__improbable(zleak_should_enable_for_zone(z) &&
3810 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3811 thread_call_enter(&zone_leaks_callout);
3812 }
3813 #else
3814 (void)z;
3815 #endif
3816 }
3817
3818 static inline void
zone_remove_wired_pages(zone_t z,uint32_t pages)3819 zone_remove_wired_pages(zone_t z, uint32_t pages)
3820 {
3821 os_atomic_sub(&zone_pages_wired, pages, relaxed);
3822
3823 #if CONFIG_ZLEAKS
3824 if (__improbable(zleak_should_disable_for_zone(z) &&
3825 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3826 thread_call_enter(&zone_leaks_callout);
3827 }
3828 #else
3829 (void)z;
3830 #endif
3831 }
3832
3833 #if CONFIG_KERNEL_TAGGING
3834 static inline vm_address_t
zone_tag_element(zone_t zone,vm_offset_t addr,vm_size_t elem_size)3835 zone_tag_element(zone_t zone, vm_offset_t addr, vm_size_t elem_size)
3836 {
3837 vm_offset_t tagged_address;
3838
3839 tagged_address = vm_memtag_assign_tag(addr, elem_size);
3840
3841 vm_memtag_set_tag(tagged_address, elem_size);
3842
3843 if (zone->z_percpu) {
3844 zpercpu_foreach_cpu(index) {
3845 vm_memtag_set_tag(tagged_address + ptoa(index), elem_size);
3846 }
3847 }
3848
3849 return tagged_address;
3850 }
3851
3852 static inline void
zcram_memtag_init(zone_t zone,vm_offset_t base,uint32_t start,uint32_t end)3853 zcram_memtag_init(zone_t zone, vm_offset_t base, uint32_t start, uint32_t end)
3854 {
3855 vm_offset_t elem_size = zone_elem_outer_size(zone);
3856 vm_offset_t oob_offs = zone_elem_outer_offs(zone);
3857
3858 for (uint32_t i = start; i < end; i++) {
3859 vm_offset_t elem_addr = base + oob_offs + i * elem_size;
3860
3861 (void)zone_tag_element(zone, elem_addr, elem_size);
3862 }
3863 }
3864 #endif /* CONFIG_KERNEL_TAGGING */
3865
3866 /*!
3867 * @function zcram_and_lock()
3868 *
3869 * @brief
3870 * Prepare some memory for being usable for allocation purposes.
3871 *
3872 * @discussion
3873 * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
3874 * to be usable in the zone.
3875 *
3876 * This function assumes the metadata is already populated for the range.
3877 *
3878 * Calling this function with @c pg_start being 0 means that the memory
3879 * is either a partial chunk, or a full chunk, that isn't published anywhere
3880 * and the initialization can happen without locks held.
3881 *
3882 * Calling this function with a non zero @c pg_start means that we are extending
3883 * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
3884 * is already usable and published in the zone, so extending it requires holding
3885 * the zone lock.
3886 *
3887 * @param zone The zone to cram new populated pages into
3888 * @param addr The base address for the chunk(s)
3889 * @param pg_va_new The number of virtual pages newly assigned to the zone
3890 * @param pg_start The first newly populated page relative to @a addr.
3891 * @param pg_end The after-last newly populated page relative to @a addr.
3892 * @param lock 0 or ZM_ALLOC_SIZE_LOCK (used by early crams)
3893 */
3894 static void
zcram_and_lock(zone_t zone,vm_offset_t addr,uint32_t pg_va_new,uint32_t pg_start,uint32_t pg_end,uint16_t lock)3895 zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
3896 uint32_t pg_start, uint32_t pg_end, uint16_t lock)
3897 {
3898 zone_id_t zindex = zone_index(zone);
3899 vm_offset_t elem_size = zone_elem_outer_size(zone);
3900 uint32_t free_start = 0, free_end = 0;
3901 uint32_t oob_offs = zone_elem_outer_offs(zone);
3902
3903 struct zone_page_metadata *meta = zone_meta_from_addr(addr);
3904 uint32_t chunk_pages = zone->z_chunk_pages;
3905 bool guarded = meta->zm_guarded;
3906
3907 assert(pg_start < pg_end && pg_end <= chunk_pages);
3908
3909 if (pg_start == 0) {
3910 uint16_t chunk_len = (uint16_t)pg_end;
3911 uint16_t secondary_len = ZM_SECONDARY_PAGE;
3912 bool inline_bitmap = false;
3913
3914 if (zone->z_percpu) {
3915 chunk_len = 1;
3916 secondary_len = ZM_SECONDARY_PCPU_PAGE;
3917 assert(pg_end == zpercpu_count());
3918 }
3919 if (!zone->z_permanent && !zone->z_uses_tags) {
3920 inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
3921 }
3922
3923 free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size;
3924
3925 meta[0] = (struct zone_page_metadata){
3926 .zm_index = zindex,
3927 .zm_guarded = guarded,
3928 .zm_inline_bitmap = inline_bitmap,
3929 .zm_chunk_len = chunk_len,
3930 .zm_alloc_size = lock,
3931 };
3932
3933 if (!zone->z_permanent && !inline_bitmap) {
3934 meta[0].zm_bitmap = zone_meta_bits_alloc_init(free_end,
3935 zone->z_chunk_elems, zone->z_uses_tags);
3936 }
3937
3938 for (uint16_t i = 1; i < chunk_pages; i++) {
3939 meta[i] = (struct zone_page_metadata){
3940 .zm_index = zindex,
3941 .zm_guarded = guarded,
3942 .zm_inline_bitmap = inline_bitmap,
3943 .zm_chunk_len = secondary_len,
3944 .zm_page_index = (uint8_t)i,
3945 .zm_bitmap = meta[0].zm_bitmap,
3946 .zm_subchunk_len = (uint8_t)(chunk_pages - i),
3947 };
3948 }
3949
3950 if (inline_bitmap) {
3951 zone_meta_bits_init_inline(meta, free_end);
3952 }
3953 } else {
3954 assert(!zone->z_percpu && !zone->z_permanent);
3955
3956 free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size;
3957 free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size;
3958 }
3959
3960 #if CONFIG_KERNEL_TAGGING
3961 if (__probable(zone->z_tbi_tag)) {
3962 zcram_memtag_init(zone, addr, free_end, free_start);
3963 }
3964 #endif /* CONFIG_KERNEL_TAGGING */
3965
3966 #if KASAN_CLASSIC
3967 assert(pg_start == 0); /* KASAN_CLASSIC never does partial chunks */
3968 if (zone->z_permanent) {
3969 kasan_poison_range(addr, ptoa(pg_end), ASAN_VALID);
3970 } else if (zone->z_percpu) {
3971 for (uint32_t i = 0; i < pg_end; i++) {
3972 kasan_zmem_add(addr + ptoa(i), PAGE_SIZE,
3973 zone_elem_outer_size(zone),
3974 zone_elem_outer_offs(zone),
3975 zone_elem_redzone(zone));
3976 }
3977 } else {
3978 kasan_zmem_add(addr, ptoa(pg_end),
3979 zone_elem_outer_size(zone),
3980 zone_elem_outer_offs(zone),
3981 zone_elem_redzone(zone));
3982 }
3983 #endif /* KASAN_CLASSIC */
3984
3985 /*
3986 * Insert the initialized pages / metadatas into the right lists.
3987 */
3988
3989 zone_lock(zone);
3990 assert(zone->z_self == zone);
3991
3992 if (pg_start != 0) {
3993 assert(meta->zm_chunk_len == pg_start);
3994
3995 zone_meta_bits_merge(meta, free_start, free_end);
3996 meta->zm_chunk_len = (uint16_t)pg_end;
3997
3998 /*
3999 * consume the zone_meta_lock_in_partial()
4000 * done in zone_expand_locked()
4001 */
4002 zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
4003 zone_meta_remqueue(zone, meta);
4004 }
4005
4006 if (zone->z_permanent || meta->zm_alloc_size) {
4007 zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
4008 } else {
4009 zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
4010 zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
4011 }
4012 if (pg_end < chunk_pages) {
4013 /* push any non populated residual VA on z_pageq_va */
4014 zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
4015 }
4016
4017 zone->z_elems_free += free_end - free_start;
4018 zone->z_elems_avail += free_end - free_start;
4019 zone->z_wired_cur += zone->z_percpu ? 1 : pg_end - pg_start;
4020 if (pg_va_new) {
4021 zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
4022 }
4023 if (zone->z_wired_hwm < zone->z_wired_cur) {
4024 zone->z_wired_hwm = zone->z_wired_cur;
4025 }
4026
4027 #if CONFIG_ZLEAKS
4028 if (__improbable(zleak_should_enable_for_zone(zone) &&
4029 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
4030 thread_call_enter(&zone_leaks_callout);
4031 }
4032 #endif /* CONFIG_ZLEAKS */
4033
4034 zone_add_wired_pages(zone, pg_end - pg_start);
4035 }
4036
4037 static void
zcram(zone_t zone,vm_offset_t addr,uint32_t pages,uint16_t lock)4038 zcram(zone_t zone, vm_offset_t addr, uint32_t pages, uint16_t lock)
4039 {
4040 uint32_t chunk_pages = zone->z_chunk_pages;
4041
4042 assert(pages % chunk_pages == 0);
4043 for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
4044 zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, lock);
4045 zone_unlock(zone);
4046 }
4047 }
4048
4049 __startup_func
4050 void
zone_cram_early(zone_t zone,vm_offset_t newmem,vm_size_t size)4051 zone_cram_early(zone_t zone, vm_offset_t newmem, vm_size_t size)
4052 {
4053 uint32_t pages = (uint32_t)atop(size);
4054
4055
4056 assert(from_zone_map(newmem, size));
4057 assert3u(size % ptoa(zone->z_chunk_pages), ==, 0);
4058 assert3u(startup_phase, <, STARTUP_SUB_ZALLOC);
4059
4060 /*
4061 * The early pages we move at the pmap layer can't be "depopulated"
4062 * because there's no vm_page_t for them.
4063 *
4064 * "Lock" them so that they never hit z_pageq_empty.
4065 */
4066 vm_memtag_bzero((void *)newmem, size);
4067 zcram(zone, newmem, pages, ZM_ALLOC_SIZE_LOCK);
4068 }
4069
4070 /*!
4071 * @function zone_submap_alloc_sequestered_va
4072 *
4073 * @brief
4074 * Allocates VA without using vm_find_space().
4075 *
4076 * @discussion
4077 * Allocate VA quickly without using the slower vm_find_space() for cases
4078 * when the submaps are fully sequestered.
4079 *
4080 * The VM submap is used to implement the VM itself so it is always sequestered,
4081 * as it can't kmem_alloc which needs to always allocate vm entries.
4082 * However, it can use vm_map_enter() which tries to coalesce entries, which
4083 * always works, so the VM map only ever needs 2 entries (one for each end).
4084 *
4085 * The RO submap is similarly always sequestered if it exists (as a non
4086 * sequestered RO submap makes very little sense).
4087 *
4088 * The allocator is a very simple bump-allocator
4089 * that allocates from either end.
4090 */
4091 static kern_return_t
zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags,uint32_t pages,vm_offset_t * addrp)4092 zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags, uint32_t pages,
4093 vm_offset_t *addrp)
4094 {
4095 vm_size_t size = ptoa(pages);
4096 vm_map_t map = zone_submap(zsflags);
4097 vm_map_entry_t first, last;
4098 vm_map_offset_t addr;
4099
4100 vm_map_lock(map);
4101
4102 first = vm_map_first_entry(map);
4103 last = vm_map_last_entry(map);
4104
4105 if (first->vme_end + size > last->vme_start) {
4106 vm_map_unlock(map);
4107 return KERN_NO_SPACE;
4108 }
4109
4110 if (zsflags.z_submap_from_end) {
4111 last->vme_start -= size;
4112 addr = last->vme_start;
4113 VME_OFFSET_SET(last, addr);
4114 } else {
4115 addr = first->vme_end;
4116 first->vme_end += size;
4117 }
4118 map->size += size;
4119
4120 vm_map_unlock(map);
4121
4122 *addrp = addr;
4123 return KERN_SUCCESS;
4124 }
4125
4126 void
zone_fill_initially(zone_t zone,vm_size_t nelems)4127 zone_fill_initially(zone_t zone, vm_size_t nelems)
4128 {
4129 kma_flags_t kmaflags = KMA_NOFAIL | KMA_PERMANENT;
4130 kern_return_t kr;
4131 vm_offset_t addr;
4132 uint32_t pages;
4133 zone_security_flags_t zsflags = zone_security_config(zone);
4134
4135 assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
4136 assert(zone->z_elems_avail == 0);
4137
4138 kmaflags |= zone_kma_flags(zone, zsflags, Z_WAITOK);
4139 pages = zone_alloc_pages_for_nelems(zone, nelems);
4140 if (zone_submap_is_sequestered(zsflags)) {
4141 kr = zone_submap_alloc_sequestered_va(zsflags, pages, &addr);
4142 if (kr != KERN_SUCCESS) {
4143 panic("zone_submap_alloc_sequestered_va() "
4144 "of %u pages failed", pages);
4145 }
4146 kernel_memory_populate(addr, ptoa(pages),
4147 kmaflags, VM_KERN_MEMORY_ZONE);
4148 } else {
4149 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4150 kmem_alloc(zone_submap(zsflags), &addr, ptoa(pages),
4151 kmaflags, VM_KERN_MEMORY_ZONE);
4152 }
4153
4154 zone_meta_populate(addr, ptoa(pages));
4155 zcram(zone, addr, pages, 0);
4156 }
4157
4158 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4159 __attribute__((noinline))
4160 static void
zone_scramble_va_and_unlock(zone_t z,struct zone_page_metadata * meta,uint32_t runs,uint32_t pages,uint32_t chunk_pages,uint64_t guard_mask)4161 zone_scramble_va_and_unlock(
4162 zone_t z,
4163 struct zone_page_metadata *meta,
4164 uint32_t runs,
4165 uint32_t pages,
4166 uint32_t chunk_pages,
4167 uint64_t guard_mask)
4168 {
4169 struct zone_page_metadata *arr[ZONE_CHUNK_ALLOC_SIZE / 4096];
4170
4171 for (uint32_t run = 0, n = 0; run < runs; run++) {
4172 arr[run] = meta + n;
4173 n += chunk_pages + ((guard_mask >> run) & 1);
4174 }
4175
4176 /*
4177 * Fisher–Yates shuffle, for an array with indices [0, n)
4178 *
4179 * for i from n−1 downto 1 do
4180 * j ← random integer such that 0 ≤ j ≤ i
4181 * exchange a[j] and a[i]
4182 *
4183 * The point here is that early allocations aren't at a fixed
4184 * distance from each other.
4185 */
4186 for (uint32_t i = runs - 1; i > 0; i--) {
4187 uint32_t j = zalloc_random_uniform32(0, i + 1);
4188
4189 meta = arr[j];
4190 arr[j] = arr[i];
4191 arr[i] = meta;
4192 }
4193
4194 zone_lock(z);
4195
4196 for (uint32_t i = 0; i < runs; i++) {
4197 zone_meta_queue_push(z, &z->z_pageq_va, arr[i]);
4198 }
4199 z->z_va_cur += z->z_percpu ? runs : pages;
4200 }
4201
4202 static inline uint32_t
dist_u32(uint32_t a,uint32_t b)4203 dist_u32(uint32_t a, uint32_t b)
4204 {
4205 return a < b ? b - a : a - b;
4206 }
4207
4208 static uint64_t
zalloc_random_clear_n_bits(uint64_t mask,uint32_t pop,uint32_t n)4209 zalloc_random_clear_n_bits(uint64_t mask, uint32_t pop, uint32_t n)
4210 {
4211 for (; n-- > 0; pop--) {
4212 uint32_t bit = zalloc_random_uniform32(0, pop);
4213 uint64_t m = mask;
4214
4215 for (; bit; bit--) {
4216 m &= m - 1;
4217 }
4218
4219 mask ^= 1ull << __builtin_ctzll(m);
4220 }
4221
4222 return mask;
4223 }
4224
4225 /**
4226 * @function zalloc_random_bits
4227 *
4228 * @brief
4229 * Compute a random number with a specified number of bit set in a given width.
4230 *
4231 * @discussion
4232 * This function generates a "uniform" distribution of sets of bits set in
4233 * a given width, with typically less than width/4 calls to random.
4234 *
4235 * @param pop the target number of bits set.
4236 * @param width the number of bits in the random integer to generate.
4237 */
4238 static uint64_t
zalloc_random_bits(uint32_t pop,uint32_t width)4239 zalloc_random_bits(uint32_t pop, uint32_t width)
4240 {
4241 uint64_t w_mask = (1ull << width) - 1;
4242 uint64_t mask;
4243 uint32_t cur;
4244
4245 if (3 * width / 4 <= pop) {
4246 mask = w_mask;
4247 cur = width;
4248 } else if (pop <= width / 4) {
4249 mask = 0;
4250 cur = 0;
4251 } else {
4252 /*
4253 * Chosing a random number this way will overwhelmingly
4254 * contain `width` bits +/- a few.
4255 */
4256 mask = zalloc_random_mask64(width);
4257 cur = __builtin_popcountll(mask);
4258
4259 if (dist_u32(cur, pop) > dist_u32(width - cur, pop)) {
4260 /*
4261 * If the opposite mask has a closer popcount,
4262 * then start with that one as the seed.
4263 */
4264 cur = width - cur;
4265 mask ^= w_mask;
4266 }
4267 }
4268
4269 if (cur < pop) {
4270 /*
4271 * Setting `pop - cur` bits is really clearing that many from
4272 * the opposite mask.
4273 */
4274 mask ^= w_mask;
4275 mask = zalloc_random_clear_n_bits(mask, width - cur, pop - cur);
4276 mask ^= w_mask;
4277 } else if (pop < cur) {
4278 mask = zalloc_random_clear_n_bits(mask, cur, cur - pop);
4279 }
4280
4281 return mask;
4282 }
4283 #endif
4284
4285 static void
zone_allocate_va_locked(zone_t z,zalloc_flags_t flags)4286 zone_allocate_va_locked(zone_t z, zalloc_flags_t flags)
4287 {
4288 zone_security_flags_t zsflags = zone_security_config(z);
4289 struct zone_page_metadata *meta;
4290 kma_flags_t kmaflags = zone_kma_flags(z, zsflags, flags) | KMA_VAONLY;
4291 uint32_t chunk_pages = z->z_chunk_pages;
4292 uint32_t runs, pages, guards, rnum;
4293 uint64_t guard_mask = 0;
4294 bool lead_guard = false;
4295 kern_return_t kr;
4296 vm_offset_t addr;
4297
4298 zone_unlock(z);
4299
4300 /*
4301 * A lot of OOB exploitation techniques rely on precise placement
4302 * and interleaving of zone pages. The layout that is sought
4303 * by attackers will be C/P/T types, where:
4304 * - (C)ompromised is the type for which attackers have a bug,
4305 * - (P)adding is used to pad memory,
4306 * - (T)arget is the type that the attacker will attempt to corrupt
4307 * by exploiting (C).
4308 *
4309 * Note that in some cases C==T and P isn't needed.
4310 *
4311 * In order to make those placement games much harder,
4312 * we grow zones by random runs of memory, up to 256k.
4313 * This makes predicting the precise layout of the heap
4314 * quite more complicated.
4315 *
4316 * Note: this function makes a very heavy use of random,
4317 * however, it is mostly limited to sequestered zones,
4318 * and eventually the layout will be fixed,
4319 * and the usage of random vastly reduced.
4320 *
4321 * For non sequestered zones, there's a single call
4322 * to random in order to decide whether we want
4323 * a guard page or not.
4324 */
4325 pages = chunk_pages;
4326 guards = 0;
4327 runs = 1;
4328 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4329 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4330 pages = atop(ZONE_CHUNK_ALLOC_SIZE);
4331 runs = (pages + chunk_pages - 1) / chunk_pages;
4332 runs = zalloc_random_uniform32(1, runs + 1);
4333 pages = runs * chunk_pages;
4334 }
4335 static_assert(ZONE_CHUNK_ALLOC_SIZE / 4096 <= 64,
4336 "make sure that `runs` will never be larger than 64");
4337 #endif /* !ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4338
4339 /*
4340 * Zones that are suceptible to OOB (kalloc, ZC_PGZ_USE_GUARDS),
4341 * guards might be added after each chunk.
4342 *
4343 * Those guard pages are marked with the ZM_PGZ_GUARD
4344 * magical chunk len, and their zm_oob_offs field
4345 * is used to remember optional shift applied
4346 * to returned elements, in order to right-align-them
4347 * as much as possible.
4348 *
4349 * In an adversarial context, while guard pages
4350 * are extremely effective against linear overflow,
4351 * using a predictable density of guard pages feels like
4352 * a missed opportunity. Which is why we chose to insert
4353 * one guard page for about 32k of memory, and place it
4354 * randomly.
4355 */
4356 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4357 if (z->z_percpu) {
4358 /*
4359 * For per-cpu runs, have a 75% chance to have a guard.
4360 */
4361 rnum = zalloc_random_uniform32(0, 4 * 128);
4362 guards = rnum >= 128;
4363 } else if (!zsflags.z_pgz_use_guards && !z->z_pgz_use_guards) {
4364 vm_offset_t rest;
4365
4366 /*
4367 * For types that are less susceptible to have OOBs,
4368 * have a density of 1 guard every 64k, with a uniform
4369 * distribution.
4370 */
4371 rnum = zalloc_random_uniform32(0, ZONE_GUARD_SPARSE);
4372 guards = (uint32_t)ptoa(pages) / ZONE_GUARD_SPARSE;
4373 rest = (uint32_t)ptoa(pages) % ZONE_GUARD_SPARSE;
4374 guards += rnum < rest;
4375 } else if (ptoa(chunk_pages) >= ZONE_GUARD_DENSE) {
4376 /*
4377 * For chunks >= 32k, have a 75% chance of guard pages
4378 * between chunks.
4379 */
4380 rnum = zalloc_random_uniform32(65, 129);
4381 guards = runs * rnum / 128;
4382 } else {
4383 vm_offset_t rest;
4384
4385 /*
4386 * Otherwise, aim at 1 guard every 32k,
4387 * with a uniform distribution.
4388 */
4389 rnum = zalloc_random_uniform32(0, ZONE_GUARD_DENSE);
4390 guards = (uint32_t)ptoa(pages) / ZONE_GUARD_DENSE;
4391 rest = (uint32_t)ptoa(pages) % ZONE_GUARD_DENSE;
4392 guards += rnum < rest;
4393 }
4394 assert3u(guards, <=, runs);
4395
4396 guard_mask = 0;
4397
4398 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4399 uint32_t g = 0;
4400
4401 /*
4402 * Several exploitation strategies rely on a C/T (compromised
4403 * then target types) ordering of pages with a sub-page reach
4404 * from C into T.
4405 *
4406 * We want to reliably thwart such exploitations
4407 * and hence force a guard page between alternating
4408 * memory types.
4409 */
4410 guard_mask |= 1ull << (runs - 1);
4411 g++;
4412
4413 /*
4414 * While we randomize the chunks lengths, an attacker with
4415 * precise timing control can guess when overflows happen,
4416 * and "measure" the runs, which gives them an indication
4417 * of where the next run start offset is.
4418 *
4419 * In order to make this knowledge unusable, add a guard page
4420 * _before_ the new run with a 25% probability, regardless
4421 * of whether we had enough guard pages.
4422 */
4423 if ((rnum & 3) == 0) {
4424 lead_guard = true;
4425 g++;
4426 }
4427 if (guards > g) {
4428 guard_mask |= zalloc_random_bits(guards - g, runs - 1);
4429 } else {
4430 guards = g;
4431 }
4432 } else {
4433 assert3u(runs, ==, 1);
4434 assert3u(guards, <=, 1);
4435 guard_mask = guards << (runs - 1);
4436 }
4437 #else
4438 (void)rnum;
4439 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4440
4441 if (zone_submap_is_sequestered(zsflags)) {
4442 kr = zone_submap_alloc_sequestered_va(zsflags,
4443 pages + guards, &addr);
4444 } else {
4445 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4446 kr = kmem_alloc(zone_submap(zsflags), &addr,
4447 ptoa(pages + guards), kmaflags, VM_KERN_MEMORY_ZONE);
4448 }
4449
4450 if (kr != KERN_SUCCESS) {
4451 uint64_t zone_size = 0;
4452 zone_t zone_largest = zone_find_largest(&zone_size);
4453 panic("zalloc[%d]: zone map exhausted while allocating from zone [%s%s], "
4454 "likely due to memory leak in zone [%s%s] "
4455 "(%u%c, %d elements allocated)",
4456 kr, zone_heap_name(z), zone_name(z),
4457 zone_heap_name(zone_largest), zone_name(zone_largest),
4458 mach_vm_size_pretty(zone_size),
4459 mach_vm_size_unit(zone_size),
4460 zone_count_allocated(zone_largest));
4461 }
4462
4463 meta = zone_meta_from_addr(addr);
4464 zone_meta_populate(addr, ptoa(pages + guards));
4465
4466 /*
4467 * Handle the leading guard page if any
4468 */
4469 if (lead_guard) {
4470 meta[0].zm_index = zone_index(z);
4471 meta[0].zm_chunk_len = ZM_PGZ_GUARD;
4472 meta[0].zm_guarded = true;
4473 meta++;
4474 }
4475
4476 for (uint32_t run = 0, n = 0; run < runs; run++) {
4477 bool guarded = (guard_mask >> run) & 1;
4478
4479 for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4480 meta[n].zm_index = zone_index(z);
4481 meta[n].zm_guarded = guarded;
4482 }
4483 if (guarded) {
4484 meta[n].zm_index = zone_index(z);
4485 meta[n].zm_chunk_len = ZM_PGZ_GUARD;
4486 n++;
4487 }
4488 }
4489 if (guards) {
4490 os_atomic_add(&zone_guard_pages, guards, relaxed);
4491 }
4492
4493 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4494 if (__improbable(zone_caching_disabled < 0)) {
4495 return zone_scramble_va_and_unlock(z, meta, runs, pages,
4496 chunk_pages, guard_mask);
4497 }
4498 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4499
4500 zone_lock(z);
4501
4502 for (uint32_t run = 0, n = 0; run < runs; run++) {
4503 zone_meta_queue_push(z, &z->z_pageq_va, meta + n);
4504 n += chunk_pages + ((guard_mask >> run) & 1);
4505 }
4506 z->z_va_cur += z->z_percpu ? runs : pages;
4507 }
4508
4509 static inline void
ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)4510 ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
4511 {
4512 #if DEBUG || DEVELOPMENT
4513 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START,
4514 size, 0, 0, 0);
4515 #else
4516 (void)size;
4517 #endif
4518 }
4519
4520 static inline void
ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)4521 ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
4522 {
4523 #if DEBUG || DEVELOPMENT
4524 task_t task = current_task_early();
4525 if (pages && task) {
4526 ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, pages);
4527 }
4528 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END,
4529 pages, 0, 0, 0);
4530 #else
4531 (void)pages;
4532 #endif
4533 }
4534
4535 __attribute__((noinline))
4536 static void
__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z,uint32_t pgs)4537 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z, uint32_t pgs)
4538 {
4539 uint64_t wait_start = 0;
4540 long mapped;
4541
4542 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4543
4544 if (zone_supports_vm(z) || (current_thread()->options & TH_OPT_VMPRIV)) {
4545 return;
4546 }
4547
4548 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4549
4550 /*
4551 * If the zone map is really exhausted, wait on the GC thread,
4552 * donating our priority (which is important because the GC
4553 * thread is at a rather low priority).
4554 */
4555 for (uint32_t n = 1; mapped >= zone_pages_wired_max - pgs; n++) {
4556 uint32_t wait_ms = n * (n + 1) / 2;
4557 uint64_t interval;
4558
4559 if (n == 1) {
4560 wait_start = mach_absolute_time();
4561 } else {
4562 thread_wakeup(VM_PAGEOUT_GC_EVENT);
4563 }
4564 if (zone_exhausted_timeout > 0 &&
4565 wait_ms > zone_exhausted_timeout) {
4566 panic("zone map exhaustion: waited for %dms "
4567 "(pages: %ld, max: %ld, wanted: %d)",
4568 wait_ms, mapped, zone_pages_wired_max, pgs);
4569 }
4570
4571 clock_interval_to_absolutetime_interval(wait_ms, NSEC_PER_MSEC,
4572 &interval);
4573
4574 lck_spin_lock(&zone_exhausted_lock);
4575 lck_spin_sleep_with_inheritor(&zone_exhausted_lock,
4576 LCK_SLEEP_UNLOCK, &zone_pages_wired,
4577 vm_pageout_gc_thread, THREAD_UNINT, wait_start + interval);
4578
4579 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4580 }
4581 }
4582
4583 static bool
zone_expand_wait_for_pages(bool waited)4584 zone_expand_wait_for_pages(bool waited)
4585 {
4586 if (waited) {
4587 return false;
4588 }
4589 #if DEBUG || DEVELOPMENT
4590 if (zalloc_simulate_vm_pressure) {
4591 return false;
4592 }
4593 #endif /* DEBUG || DEVELOPMENT */
4594 return !vm_pool_low();
4595 }
4596
4597 static inline void
zone_expand_async_schedule_if_allowed(zone_t zone)4598 zone_expand_async_schedule_if_allowed(zone_t zone)
4599 {
4600 if (zone->z_async_refilling || zone->no_callout) {
4601 return;
4602 }
4603
4604 if (zone_exhausted(zone)) {
4605 return;
4606 }
4607
4608 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
4609 return;
4610 }
4611
4612 if (!vm_pool_low() || zone_supports_vm(zone)) {
4613 zone->z_async_refilling = true;
4614 thread_call_enter(&zone_expand_callout);
4615 }
4616 }
4617
4618 __attribute__((noinline))
4619 static bool
zalloc_expand_drain_exhausted_caches_locked(zone_t z)4620 zalloc_expand_drain_exhausted_caches_locked(zone_t z)
4621 {
4622 struct zone_depot zd;
4623 zone_magazine_t mag = NULL;
4624
4625 if (z->z_depot_size) {
4626 z->z_depot_size = 0;
4627 z->z_depot_cleanup = true;
4628
4629 zone_depot_init(&zd);
4630 zone_depot_trim(z, 0, &zd);
4631
4632 zone_recirc_lock_nopreempt(z);
4633 if (zd.zd_full) {
4634 zone_depot_move_full(&z->z_recirc,
4635 &zd, zd.zd_full, NULL);
4636 }
4637 if (zd.zd_empty) {
4638 zone_depot_move_empty(&z->z_recirc,
4639 &zd, zd.zd_empty, NULL);
4640 }
4641 zone_recirc_unlock_nopreempt(z);
4642 }
4643
4644 zone_recirc_lock_nopreempt(z);
4645 if (z->z_recirc.zd_full) {
4646 mag = zone_depot_pop_head_full(&z->z_recirc, z);
4647 }
4648 zone_recirc_unlock_nopreempt(z);
4649
4650 if (mag) {
4651 zone_reclaim_elements(z, zc_mag_size(), mag->zm_elems);
4652 zone_magazine_free(mag);
4653 }
4654
4655 return mag != NULL;
4656 }
4657
4658 static bool
zalloc_needs_refill(zone_t zone,zalloc_flags_t flags)4659 zalloc_needs_refill(zone_t zone, zalloc_flags_t flags)
4660 {
4661 if (zone->z_elems_free > zone->z_elems_rsv) {
4662 return false;
4663 }
4664 if (!zone_exhausted(zone)) {
4665 return true;
4666 }
4667 if (zone->z_pcpu_cache && zone->z_depot_size) {
4668 if (zalloc_expand_drain_exhausted_caches_locked(zone)) {
4669 return false;
4670 }
4671 }
4672 return (flags & Z_NOFAIL) != 0;
4673 }
4674
4675 static void
zone_wakeup_exhausted_waiters(zone_t z)4676 zone_wakeup_exhausted_waiters(zone_t z)
4677 {
4678 z->z_exhausted_wait = false;
4679 EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z, false);
4680 thread_wakeup(&z->z_expander);
4681 }
4682
4683 __attribute__((noinline))
4684 static void
__ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)4685 __ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)
4686 {
4687 if (z->z_pcpu_cache && z->z_depot_size &&
4688 zalloc_expand_drain_exhausted_caches_locked(z)) {
4689 return;
4690 }
4691
4692 if (!z->z_exhausted_wait) {
4693 zone_recirc_lock_nopreempt(z);
4694 z->z_exhausted_wait = true;
4695 zone_recirc_unlock_nopreempt(z);
4696 EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z, true);
4697 }
4698
4699 assert_wait(&z->z_expander, TH_UNINT);
4700 zone_unlock(z);
4701 thread_block(THREAD_CONTINUE_NULL);
4702 zone_lock(z);
4703 }
4704
4705 static void
zone_expand_locked(zone_t z,zalloc_flags_t flags)4706 zone_expand_locked(zone_t z, zalloc_flags_t flags)
4707 {
4708 zone_security_flags_t zsflags = zone_security_config(z);
4709 struct zone_expand ze = {
4710 .ze_thread = current_thread(),
4711 };
4712
4713 if (!(ze.ze_thread->options & TH_OPT_VMPRIV) && zone_supports_vm(z)) {
4714 ze.ze_thread->options |= TH_OPT_VMPRIV;
4715 ze.ze_clear_priv = true;
4716 }
4717
4718 if (ze.ze_thread->options & TH_OPT_VMPRIV) {
4719 /*
4720 * When the thread is VM privileged,
4721 * vm_page_grab() will call VM_PAGE_WAIT()
4722 * without our knowledge, so we must assume
4723 * it's being called unfortunately.
4724 *
4725 * In practice it's not a big deal because
4726 * Z_NOPAGEWAIT is not really used on zones
4727 * that VM privileged threads are going to expand.
4728 */
4729 ze.ze_pg_wait = true;
4730 ze.ze_vm_priv = true;
4731 }
4732
4733 for (;;) {
4734 if (!z->z_permanent && !zalloc_needs_refill(z, flags)) {
4735 goto out;
4736 }
4737
4738 if (z->z_expander == NULL) {
4739 z->z_expander = &ze;
4740 break;
4741 }
4742
4743 if (ze.ze_vm_priv && !z->z_expander->ze_vm_priv) {
4744 change_sleep_inheritor(&z->z_expander, ze.ze_thread);
4745 ze.ze_next = z->z_expander;
4746 z->z_expander = &ze;
4747 break;
4748 }
4749
4750 if ((flags & Z_NOPAGEWAIT) && z->z_expander->ze_pg_wait) {
4751 goto out;
4752 }
4753
4754 z->z_expanding_wait = true;
4755 hw_lck_ticket_sleep_with_inheritor(&z->z_lock, &zone_locks_grp,
4756 LCK_SLEEP_DEFAULT, &z->z_expander, z->z_expander->ze_thread,
4757 TH_UNINT, TIMEOUT_WAIT_FOREVER);
4758 }
4759
4760 do {
4761 struct zone_page_metadata *meta = NULL;
4762 uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
4763 vm_page_t page_list = NULL;
4764 vm_offset_t addr = 0;
4765 int waited = 0;
4766
4767 if ((flags & Z_NOFAIL) && zone_exhausted(z)) {
4768 __ZONE_EXHAUSTED_AND_WAITING_HARD__(z);
4769 continue; /* reevaluate if we really need it */
4770 }
4771
4772 /*
4773 * While we hold the zone lock, look if there's VA we can:
4774 * - complete from partial pages,
4775 * - reuse from the sequester list.
4776 *
4777 * When the page is being populated we pretend we allocated
4778 * an extra element so that zone_gc() can't attempt to free
4779 * the chunk (as it could become empty while we wait for pages).
4780 */
4781 if (zone_pva_is_null(z->z_pageq_va)) {
4782 zone_allocate_va_locked(z, flags);
4783 }
4784
4785 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
4786 addr = zone_meta_to_addr(meta);
4787 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
4788 cur_pages = meta->zm_page_index;
4789 meta -= cur_pages;
4790 addr -= ptoa(cur_pages);
4791 zone_meta_lock_in_partial(z, meta, cur_pages);
4792 }
4793 zone_unlock(z);
4794
4795 /*
4796 * And now allocate pages to populate our VA.
4797 */
4798 min_pages = z->z_chunk_pages;
4799 #if !KASAN_CLASSIC
4800 if (!z->z_percpu) {
4801 min_pages = (uint32_t)atop(round_page(zone_elem_outer_offs(z) +
4802 zone_elem_outer_size(z)));
4803 }
4804 #endif /* !KASAN_CLASSIC */
4805
4806 /*
4807 * Trigger jetsams via VM_PAGEOUT_GC_EVENT
4808 * if we're running out of zone memory
4809 */
4810 if (__improbable(zone_map_nearing_exhaustion())) {
4811 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(z, min_pages);
4812 }
4813
4814 ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
4815
4816 while (pages < z->z_chunk_pages - cur_pages) {
4817 vm_page_t m = vm_page_grab();
4818
4819 if (m) {
4820 pages++;
4821 m->vmp_snext = page_list;
4822 page_list = m;
4823 vm_page_zero_fill(m);
4824 continue;
4825 }
4826
4827 if (pages >= min_pages &&
4828 !zone_expand_wait_for_pages(waited)) {
4829 break;
4830 }
4831
4832 if ((flags & Z_NOPAGEWAIT) == 0) {
4833 /*
4834 * The first time we're about to wait for pages,
4835 * mention that to waiters and wake them all.
4836 *
4837 * Set `ze_pg_wait` in our zone_expand context
4838 * so that waiters who care do not wait again.
4839 */
4840 if (!ze.ze_pg_wait) {
4841 zone_lock(z);
4842 if (z->z_expanding_wait) {
4843 z->z_expanding_wait = false;
4844 wakeup_all_with_inheritor(&z->z_expander,
4845 THREAD_AWAKENED);
4846 }
4847 ze.ze_pg_wait = true;
4848 zone_unlock(z);
4849 }
4850
4851 waited++;
4852 VM_PAGE_WAIT();
4853 continue;
4854 }
4855
4856 /*
4857 * Undo everything and bail out:
4858 *
4859 * - free pages
4860 * - undo the fake allocation if any
4861 * - put the VA back on the VA page queue.
4862 */
4863 vm_page_free_list(page_list, FALSE);
4864 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4865
4866 zone_lock(z);
4867
4868 zone_expand_async_schedule_if_allowed(z);
4869
4870 if (cur_pages) {
4871 zone_meta_unlock_from_partial(z, meta, cur_pages);
4872 }
4873 if (meta) {
4874 zone_meta_queue_push(z, &z->z_pageq_va,
4875 meta + cur_pages);
4876 }
4877 goto page_shortage;
4878 }
4879
4880 vm_object_t object;
4881 object = kernel_object_default;
4882 vm_object_lock(object);
4883 kernel_memory_populate_object_and_unlock(object,
4884 addr + ptoa(cur_pages), addr + ptoa(cur_pages), ptoa(pages), page_list,
4885 zone_kma_flags(z, zsflags, flags), VM_KERN_MEMORY_ZONE,
4886 (zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY)
4887 ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE);
4888
4889 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4890
4891 zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages, 0);
4892
4893 /*
4894 * permanent zones only try once,
4895 * the retry loop is in the caller
4896 */
4897 } while (!z->z_permanent && zalloc_needs_refill(z, flags));
4898
4899 page_shortage:
4900 if (z->z_expander == &ze) {
4901 z->z_expander = ze.ze_next;
4902 } else {
4903 assert(z->z_expander->ze_next == &ze);
4904 z->z_expander->ze_next = NULL;
4905 }
4906 if (z->z_expanding_wait) {
4907 z->z_expanding_wait = false;
4908 wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
4909 }
4910 out:
4911 if (ze.ze_clear_priv) {
4912 ze.ze_thread->options &= ~TH_OPT_VMPRIV;
4913 }
4914 }
4915
4916 static void
zone_expand_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)4917 zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
4918 {
4919 zone_foreach(z) {
4920 if (z->no_callout) {
4921 /* z_async_refilling will never be set */
4922 continue;
4923 }
4924
4925 if (!z->z_async_refilling) {
4926 /*
4927 * avoid locking all zones, because the one(s)
4928 * we're looking for have been set _before_
4929 * thread_call_enter() was called, if we fail
4930 * to observe the bit, it means the thread-call
4931 * has been "dinged" again and we'll notice it then.
4932 */
4933 continue;
4934 }
4935
4936 zone_lock(z);
4937 if (z->z_self && z->z_async_refilling) {
4938 zone_expand_locked(z, Z_WAITOK);
4939 /*
4940 * clearing _after_ we grow is important,
4941 * so that we avoid waking up the thread call
4942 * while we grow and cause to run a second time.
4943 */
4944 z->z_async_refilling = false;
4945 }
4946 zone_unlock(z);
4947 }
4948 }
4949
4950 #endif /* !ZALLOC_TEST */
4951 #pragma mark zone jetsam integration
4952 #if !ZALLOC_TEST
4953
4954 /*
4955 * We're being very conservative here and picking a value of 95%. We might need to lower this if
4956 * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
4957 */
4958 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
4959
4960 /*
4961 * Threshold above which largest zones should be included in the panic log
4962 */
4963 #define ZONE_MAP_EXHAUSTION_PRINT_PANIC 80
4964
4965 /*
4966 * Trigger zone-map-exhaustion jetsams if the zone map is X% full,
4967 * where X=zone_map_jetsam_limit.
4968 *
4969 * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
4970 */
4971 TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
4972 ZONE_MAP_JETSAM_LIMIT_DEFAULT);
4973
4974 kern_return_t
zone_map_jetsam_set_limit(uint32_t value)4975 zone_map_jetsam_set_limit(uint32_t value)
4976 {
4977 if (value <= 0 || value > 100) {
4978 return KERN_INVALID_VALUE;
4979 }
4980
4981 zone_map_jetsam_limit = value;
4982 os_atomic_store(&zone_pages_jetsam_threshold,
4983 zone_pages_wired_max * value / 100, relaxed);
4984 return KERN_SUCCESS;
4985 }
4986
4987 void
get_zone_map_size(uint64_t * current_size,uint64_t * capacity)4988 get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
4989 {
4990 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
4991 *current_size = ptoa_64(phys_pages);
4992 *capacity = ptoa_64(zone_pages_wired_max);
4993 }
4994
4995 void
get_largest_zone_info(char * zone_name,size_t zone_name_len,uint64_t * zone_size)4996 get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
4997 {
4998 zone_t largest_zone = zone_find_largest(zone_size);
4999
5000 /*
5001 * Append kalloc heap name to zone name (if zone is used by kalloc)
5002 */
5003 snprintf(zone_name, zone_name_len, "%s%s",
5004 zone_heap_name(largest_zone), largest_zone->z_name);
5005 }
5006
5007 static bool
zone_map_nearing_threshold(unsigned int threshold)5008 zone_map_nearing_threshold(unsigned int threshold)
5009 {
5010 uint64_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
5011 return phys_pages * 100 > zone_pages_wired_max * threshold;
5012 }
5013
5014 bool
zone_map_nearing_exhaustion(void)5015 zone_map_nearing_exhaustion(void)
5016 {
5017 vm_size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
5018
5019 return pages >= os_atomic_load(&zone_pages_jetsam_threshold, relaxed);
5020 }
5021
5022
5023 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
5024
5025 /*
5026 * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
5027 * to walk through the jetsam priority bands and kill processes.
5028 */
5029 static zone_t
kill_process_in_largest_zone(void)5030 kill_process_in_largest_zone(void)
5031 {
5032 pid_t pid = -1;
5033 uint64_t zone_size = 0;
5034 zone_t largest_zone = zone_find_largest(&zone_size);
5035
5036 printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
5037 ptoa_64(os_atomic_load(&zone_pages_wired, relaxed)),
5038 ptoa_64(zone_pages_wired_max),
5039 (uint64_t)zone_submaps_approx_size(),
5040 (uint64_t)mach_vm_range_size(&zone_info.zi_map_range),
5041 zone_map_jetsam_limit);
5042 printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
5043 largest_zone->z_name, (uintptr_t)zone_size);
5044
5045 /*
5046 * We want to make sure we don't call this function from userspace.
5047 * Or we could end up trying to synchronously kill the process
5048 * whose context we're in, causing the system to hang.
5049 */
5050 assert(current_task() == kernel_task);
5051
5052 /*
5053 * If vm_object_zone is the largest, check to see if the number of
5054 * elements in vm_map_entry_zone is comparable.
5055 *
5056 * If so, consider vm_map_entry_zone as the largest. This lets us target
5057 * a specific process to jetsam to quickly recover from the zone map
5058 * bloat.
5059 */
5060 if (largest_zone == vm_object_zone) {
5061 unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
5062 unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
5063 /* Is the VM map entries zone count >= 98% of the VM objects zone count? */
5064 if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
5065 largest_zone = vm_map_entry_zone;
5066 printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
5067 (uintptr_t)zone_size_wired(largest_zone));
5068 }
5069 }
5070
5071 /* TODO: Extend this to check for the largest process in other zones as well. */
5072 if (largest_zone == vm_map_entry_zone) {
5073 pid = find_largest_process_vm_map_entries();
5074 } else {
5075 printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
5076 "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
5077 largest_zone->z_name);
5078 }
5079 if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
5080 printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
5081 }
5082
5083 return largest_zone;
5084 }
5085
5086 #endif /* !ZALLOC_TEST */
5087 #pragma mark probabilistic gzalloc
5088 #if !ZALLOC_TEST
5089 #if CONFIG_PROB_GZALLOC
5090
5091 extern uint32_t random(void);
5092 struct pgz_backtrace {
5093 uint32_t pgz_depth;
5094 int32_t pgz_bt[MAX_ZTRACE_DEPTH];
5095 };
5096
5097 static int32_t PERCPU_DATA(pgz_sample_counter);
5098 static SECURITY_READ_ONLY_LATE(struct pgz_backtrace *) pgz_backtraces;
5099 static uint32_t pgz_uses; /* number of zones using PGZ */
5100 static int32_t pgz_slot_avail;
5101 #if OS_ATOMIC_HAS_LLSC
5102 struct zone_page_metadata *pgz_slot_head;
5103 #else
5104 static struct pgz_slot_head {
5105 uint32_t psh_count;
5106 uint32_t psh_slot;
5107 } pgz_slot_head;
5108 #endif
5109 struct zone_page_metadata *pgz_slot_tail;
5110 static SECURITY_READ_ONLY_LATE(vm_map_t) pgz_submap;
5111
5112 static struct zone_page_metadata *
pgz_meta(uint32_t index)5113 pgz_meta(uint32_t index)
5114 {
5115 return &zone_info.zi_pgz_meta[2 * index + 1];
5116 }
5117
5118 static struct pgz_backtrace *
pgz_bt(uint32_t slot,bool free)5119 pgz_bt(uint32_t slot, bool free)
5120 {
5121 return &pgz_backtraces[2 * slot + free];
5122 }
5123
5124 static void
pgz_backtrace(struct pgz_backtrace * bt,void * fp)5125 pgz_backtrace(struct pgz_backtrace *bt, void *fp)
5126 {
5127 struct backtrace_control ctl = {
5128 .btc_frame_addr = (uintptr_t)fp,
5129 };
5130
5131 bt->pgz_depth = (uint32_t)backtrace_packed(BTP_KERN_OFFSET_32,
5132 (uint8_t *)bt->pgz_bt, sizeof(bt->pgz_bt), &ctl, NULL) / 4;
5133 }
5134
5135 static uint32_t
pgz_slot(vm_offset_t addr)5136 pgz_slot(vm_offset_t addr)
5137 {
5138 return (uint32_t)((addr - zone_info.zi_pgz_range.min_address) >> (PAGE_SHIFT + 1));
5139 }
5140
5141 static vm_offset_t
pgz_addr(uint32_t slot)5142 pgz_addr(uint32_t slot)
5143 {
5144 return zone_info.zi_pgz_range.min_address + ptoa(2 * slot + 1);
5145 }
5146
5147 static bool
pgz_sample(vm_offset_t addr,vm_size_t esize)5148 pgz_sample(vm_offset_t addr, vm_size_t esize)
5149 {
5150 int32_t *counterp, cnt;
5151
5152 if (zone_addr_size_crosses_page(addr, esize)) {
5153 return false;
5154 }
5155
5156 /*
5157 * Note: accessing pgz_sample_counter is racy but this is
5158 * kind of acceptable given that this is not
5159 * a security load bearing feature.
5160 */
5161
5162 counterp = PERCPU_GET(pgz_sample_counter);
5163 cnt = *counterp;
5164 if (__probable(cnt > 0)) {
5165 *counterp = cnt - 1;
5166 return false;
5167 }
5168
5169 if (pgz_slot_avail <= 0) {
5170 return false;
5171 }
5172
5173 /*
5174 * zalloc_random_uniform() might block, so when preemption is disabled,
5175 * set the counter to `-1` which will cause the next allocation
5176 * that can block to generate a new random value.
5177 *
5178 * No allocation on this CPU will sample until then.
5179 */
5180 if (get_preemption_level()) {
5181 *counterp = -1;
5182 } else {
5183 *counterp = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5184 }
5185
5186 return cnt == 0;
5187 }
5188
5189 static inline bool
pgz_slot_alloc(uint32_t * slot)5190 pgz_slot_alloc(uint32_t *slot)
5191 {
5192 struct zone_page_metadata *m;
5193 uint32_t tries = 100;
5194
5195 disable_preemption();
5196
5197 #if OS_ATOMIC_USE_LLSC
5198 int32_t ov, nv;
5199 os_atomic_rmw_loop(&pgz_slot_avail, ov, nv, relaxed, {
5200 if (__improbable(ov <= 0)) {
5201 os_atomic_rmw_loop_give_up({
5202 enable_preemption();
5203 return false;
5204 });
5205 }
5206 nv = ov - 1;
5207 });
5208 #else
5209 if (__improbable(os_atomic_dec_orig(&pgz_slot_avail, relaxed) <= 0)) {
5210 os_atomic_inc(&pgz_slot_avail, relaxed);
5211 enable_preemption();
5212 return false;
5213 }
5214 #endif
5215
5216 again:
5217 if (__improbable(tries-- == 0)) {
5218 /*
5219 * Too much contention,
5220 * extremely unlikely but do not stay stuck.
5221 */
5222 os_atomic_inc(&pgz_slot_avail, relaxed);
5223 enable_preemption();
5224 return false;
5225 }
5226
5227 #if OS_ATOMIC_HAS_LLSC
5228 do {
5229 m = os_atomic_load_exclusive(&pgz_slot_head, dependency);
5230 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5231 /*
5232 * Either we are waiting for an enqueuer (unlikely)
5233 * or we are competing with another core and
5234 * are looking at a popped element.
5235 */
5236 os_atomic_clear_exclusive();
5237 goto again;
5238 }
5239 } while (!os_atomic_store_exclusive(&pgz_slot_head,
5240 m->zm_pgz_slot_next, relaxed));
5241 #else
5242 struct zone_page_metadata *base = zone_info.zi_pgz_meta;
5243 struct pgz_slot_head ov, nv;
5244 os_atomic_rmw_loop(&pgz_slot_head, ov, nv, dependency, {
5245 m = &base[ov.psh_slot * 2];
5246 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5247 /*
5248 * Either we are waiting for an enqueuer (unlikely)
5249 * or we are competing with another core and
5250 * are looking at a popped element.
5251 */
5252 os_atomic_rmw_loop_give_up(goto again);
5253 }
5254 nv.psh_count = ov.psh_count + 1;
5255 nv.psh_slot = (uint32_t)((m->zm_pgz_slot_next - base) / 2);
5256 });
5257 #endif
5258
5259 enable_preemption();
5260
5261 m->zm_pgz_slot_next = NULL;
5262 *slot = (uint32_t)((m - zone_info.zi_pgz_meta) / 2);
5263 return true;
5264 }
5265
5266 static inline bool
pgz_slot_free(uint32_t slot)5267 pgz_slot_free(uint32_t slot)
5268 {
5269 struct zone_page_metadata *m = &zone_info.zi_pgz_meta[2 * slot];
5270 struct zone_page_metadata *t;
5271
5272 disable_preemption();
5273 t = os_atomic_xchg(&pgz_slot_tail, m, relaxed);
5274 os_atomic_store(&t->zm_pgz_slot_next, m, release);
5275 os_atomic_inc(&pgz_slot_avail, relaxed);
5276 enable_preemption();
5277
5278 return true;
5279 }
5280
5281 /*!
5282 * @function pgz_protect()
5283 *
5284 * @brief
5285 * Try to protect an allocation with PGZ.
5286 *
5287 * @param zone The zone the allocation was made against.
5288 * @param addr An allocated element address to protect.
5289 * @param fp The caller frame pointer (for the backtrace).
5290 * @returns The new address for the element, or @c addr.
5291 */
5292 __attribute__((noinline))
5293 static vm_offset_t
pgz_protect(zone_t zone,vm_offset_t addr,void * fp)5294 pgz_protect(zone_t zone, vm_offset_t addr, void *fp)
5295 {
5296 kern_return_t kr;
5297 uint32_t slot;
5298
5299 if (!pgz_slot_alloc(&slot)) {
5300 return addr;
5301 }
5302
5303 /*
5304 * Try to double-map the page (may fail if Z_NOWAIT).
5305 * we will always find a PA because pgz_init() pre-expanded the pmap.
5306 */
5307 vm_offset_t new_addr = pgz_addr(slot);
5308 pmap_paddr_t pa = kvtophys(trunc_page(addr));
5309
5310 kr = pmap_enter_options_addr(kernel_pmap, new_addr, pa,
5311 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
5312 get_preemption_level() ? PMAP_OPTIONS_NOWAIT : 0, NULL);
5313
5314 if (__improbable(kr != KERN_SUCCESS)) {
5315 pgz_slot_free(slot);
5316 return addr;
5317 }
5318
5319 struct zone_page_metadata tmp = {
5320 .zm_chunk_len = ZM_PGZ_ALLOCATED,
5321 .zm_index = zone_index(zone),
5322 };
5323 struct zone_page_metadata *meta = pgz_meta(slot);
5324
5325 os_atomic_store(&meta->zm_bits, tmp.zm_bits, relaxed);
5326 os_atomic_store(&meta->zm_pgz_orig_addr, addr, relaxed);
5327 pgz_backtrace(pgz_bt(slot, false), fp);
5328
5329 return new_addr + (addr & PAGE_MASK);
5330 }
5331
5332 /*!
5333 * @function pgz_unprotect()
5334 *
5335 * @brief
5336 * Release a PGZ slot and returns the original address of a freed element.
5337 *
5338 * @param addr A PGZ protected element address.
5339 * @param fp The caller frame pointer (for the backtrace).
5340 * @returns The non protected address for the element
5341 * that was passed to @c pgz_protect().
5342 */
5343 __attribute__((noinline))
5344 static vm_offset_t
pgz_unprotect(vm_offset_t addr,void * fp)5345 pgz_unprotect(vm_offset_t addr, void *fp)
5346 {
5347 struct zone_page_metadata *meta;
5348 struct zone_page_metadata tmp;
5349 uint32_t slot;
5350
5351 slot = pgz_slot(addr);
5352 meta = zone_meta_from_addr(addr);
5353 tmp = *meta;
5354 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5355 goto double_free;
5356 }
5357
5358 pmap_remove(kernel_pmap, vm_memtag_canonicalize_address(trunc_page(addr)),
5359 vm_memtag_canonicalize_address(trunc_page(addr) + PAGE_SIZE));
5360
5361 pgz_backtrace(pgz_bt(slot, true), fp);
5362
5363 tmp.zm_chunk_len = ZM_PGZ_FREE;
5364 tmp.zm_bits = os_atomic_xchg(&meta->zm_bits, tmp.zm_bits, relaxed);
5365 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5366 goto double_free;
5367 }
5368
5369 pgz_slot_free(slot);
5370 return tmp.zm_pgz_orig_addr;
5371
5372 double_free:
5373 panic_fault_address = addr;
5374 meta->zm_chunk_len = ZM_PGZ_DOUBLE_FREE;
5375 panic("probabilistic gzalloc double free: %p", (void *)addr);
5376 }
5377
5378 bool
pgz_owned(mach_vm_address_t addr)5379 pgz_owned(mach_vm_address_t addr)
5380 {
5381 return mach_vm_range_contains(&zone_info.zi_pgz_range, vm_memtag_canonicalize_address(addr));
5382 }
5383
5384
5385 __attribute__((always_inline))
5386 vm_offset_t
__pgz_decode(mach_vm_address_t addr,mach_vm_size_t size)5387 __pgz_decode(mach_vm_address_t addr, mach_vm_size_t size)
5388 {
5389 struct zone_page_metadata *meta;
5390
5391 if (__probable(!pgz_owned(addr))) {
5392 return (vm_offset_t)addr;
5393 }
5394
5395 if (zone_addr_size_crosses_page(addr, size)) {
5396 panic("invalid size for PGZ protected address %p:%p",
5397 (void *)addr, (void *)(addr + size));
5398 }
5399
5400 meta = zone_meta_from_addr((vm_offset_t)addr);
5401 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5402 panic_fault_address = (vm_offset_t)addr;
5403 panic("probabilistic gzalloc use-after-free: %p", (void *)addr);
5404 }
5405
5406 return trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5407 }
5408
5409 __attribute__((always_inline))
5410 vm_offset_t
__pgz_decode_allow_invalid(vm_offset_t addr,zone_id_t zid)5411 __pgz_decode_allow_invalid(vm_offset_t addr, zone_id_t zid)
5412 {
5413 struct zone_page_metadata *meta;
5414 struct zone_page_metadata tmp;
5415
5416 if (__probable(!pgz_owned(addr))) {
5417 return addr;
5418 }
5419
5420 meta = zone_meta_from_addr(addr);
5421 tmp.zm_bits = os_atomic_load(&meta->zm_bits, relaxed);
5422
5423 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5424
5425 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5426 return 0;
5427 }
5428
5429 if (zid != ZONE_ID_ANY && tmp.zm_index != zid) {
5430 return 0;
5431 }
5432
5433 return addr;
5434 }
5435
5436 static void
pgz_zone_init(zone_t z)5437 pgz_zone_init(zone_t z)
5438 {
5439 char zn[MAX_ZONE_NAME];
5440 char zv[MAX_ZONE_NAME];
5441 char key[30];
5442
5443 if (zone_elem_inner_size(z) > PAGE_SIZE) {
5444 return;
5445 }
5446
5447 if (pgz_all) {
5448 os_atomic_inc(&pgz_uses, relaxed);
5449 z->z_pgz_tracked = true;
5450 return;
5451 }
5452
5453 snprintf(zn, sizeof(zn), "%s%s", zone_heap_name(z), zone_name(z));
5454
5455 for (int i = 1;; i++) {
5456 snprintf(key, sizeof(key), "pgz%d", i);
5457 if (!PE_parse_boot_argn(key, zv, sizeof(zv))) {
5458 break;
5459 }
5460 if (track_this_zone(zn, zv) || track_kalloc_zones(z, zv)) {
5461 os_atomic_inc(&pgz_uses, relaxed);
5462 z->z_pgz_tracked = true;
5463 break;
5464 }
5465 }
5466 }
5467
5468 __startup_func
5469 static vm_size_t
pgz_get_size(void)5470 pgz_get_size(void)
5471 {
5472 if (pgz_slots == UINT32_MAX) {
5473 /*
5474 * Scale with RAM size: ~200 slots a G
5475 */
5476 pgz_slots = (uint32_t)(sane_size >> 22);
5477 }
5478
5479 /*
5480 * Make sure that the slot allocation scheme works.
5481 * see pgz_slot_alloc() / pgz_slot_free();
5482 */
5483 if (pgz_slots < zpercpu_count() * 4) {
5484 pgz_slots = zpercpu_count() * 4;
5485 }
5486 if (pgz_slots >= UINT16_MAX) {
5487 pgz_slots = UINT16_MAX - 1;
5488 }
5489
5490 /*
5491 * Quarantine is 33% of slots by default, no more than 90%.
5492 */
5493 if (pgz_quarantine == 0) {
5494 pgz_quarantine = pgz_slots / 3;
5495 }
5496 if (pgz_quarantine > pgz_slots * 9 / 10) {
5497 pgz_quarantine = pgz_slots * 9 / 10;
5498 }
5499 pgz_slot_avail = pgz_slots - pgz_quarantine;
5500
5501 return ptoa(2 * pgz_slots + 1);
5502 }
5503
5504 __startup_func
5505 static void
pgz_init(void)5506 pgz_init(void)
5507 {
5508 if (!pgz_uses) {
5509 return;
5510 }
5511
5512 if (pgz_sample_rate == 0) {
5513 /*
5514 * If no rate was provided, pick a random one that scales
5515 * with the number of protected zones.
5516 *
5517 * Use a binomal distribution to avoid having too many
5518 * really fast sample rates.
5519 */
5520 uint32_t factor = MIN(pgz_uses, 10);
5521 uint32_t max_rate = 1000 * factor;
5522 uint32_t min_rate = 100 * factor;
5523
5524 pgz_sample_rate = (zalloc_random_uniform32(min_rate, max_rate) +
5525 zalloc_random_uniform32(min_rate, max_rate)) / 2;
5526 }
5527
5528 struct mach_vm_range *r = &zone_info.zi_pgz_range;
5529 zone_info.zi_pgz_meta = zone_meta_from_addr(r->min_address);
5530 zone_meta_populate(r->min_address, mach_vm_range_size(r));
5531
5532 for (size_t i = 0; i < 2 * pgz_slots + 1; i += 2) {
5533 zone_info.zi_pgz_meta[i].zm_chunk_len = ZM_PGZ_GUARD;
5534 }
5535
5536 for (size_t i = 1; i < pgz_slots; i++) {
5537 zone_info.zi_pgz_meta[2 * i - 1].zm_pgz_slot_next =
5538 &zone_info.zi_pgz_meta[2 * i + 1];
5539 }
5540 #if OS_ATOMIC_HAS_LLSC
5541 pgz_slot_head = &zone_info.zi_pgz_meta[1];
5542 #endif
5543 pgz_slot_tail = &zone_info.zi_pgz_meta[2 * pgz_slots - 1];
5544
5545 pgz_backtraces = zalloc_permanent(sizeof(struct pgz_backtrace) *
5546 2 * pgz_slots, ZALIGN_PTR);
5547
5548 /*
5549 * expand the pmap so that pmap_enter_options_addr()
5550 * in pgz_protect() never need to call pmap_expand().
5551 */
5552 for (uint32_t slot = 0; slot < pgz_slots; slot++) {
5553 (void)pmap_enter_options_addr(kernel_pmap, pgz_addr(slot), 0,
5554 VM_PROT_NONE, VM_PROT_NONE, 0, FALSE,
5555 PMAP_OPTIONS_NOENTER, NULL);
5556 }
5557
5558 /* do this last as this will enable pgz */
5559 percpu_foreach(counter, pgz_sample_counter) {
5560 *counter = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5561 }
5562 }
5563 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, pgz_init);
5564
5565 static void
panic_display_pgz_bt(bool has_syms,uint32_t slot,bool free)5566 panic_display_pgz_bt(bool has_syms, uint32_t slot, bool free)
5567 {
5568 struct pgz_backtrace *bt = pgz_bt(slot, free);
5569 const char *what = free ? "Free" : "Allocation";
5570 uintptr_t buf[MAX_ZTRACE_DEPTH];
5571
5572 if (!ml_validate_nofault((vm_offset_t)bt, sizeof(*bt))) {
5573 paniclog_append_noflush(" Can't decode %s Backtrace\n", what);
5574 return;
5575 }
5576
5577 backtrace_unpack(BTP_KERN_OFFSET_32, buf, MAX_ZTRACE_DEPTH,
5578 (uint8_t *)bt->pgz_bt, 4 * bt->pgz_depth);
5579
5580 paniclog_append_noflush(" %s Backtrace:\n", what);
5581 for (uint32_t i = 0; i < bt->pgz_depth && i < MAX_ZTRACE_DEPTH; i++) {
5582 if (has_syms) {
5583 paniclog_append_noflush(" %p ", (void *)buf[i]);
5584 panic_print_symbol_name(buf[i]);
5585 paniclog_append_noflush("\n");
5586 } else {
5587 paniclog_append_noflush(" %p\n", (void *)buf[i]);
5588 }
5589 }
5590 kmod_panic_dump((vm_offset_t *)buf, bt->pgz_depth);
5591 }
5592
5593 static void
panic_display_pgz_uaf_info(bool has_syms,vm_offset_t addr)5594 panic_display_pgz_uaf_info(bool has_syms, vm_offset_t addr)
5595 {
5596 struct zone_page_metadata *meta;
5597 vm_offset_t elem, esize;
5598 const char *type;
5599 const char *prob;
5600 uint32_t slot;
5601 zone_t z;
5602
5603 slot = pgz_slot(addr);
5604 meta = pgz_meta(slot);
5605 elem = pgz_addr(slot) + (meta->zm_pgz_orig_addr & PAGE_MASK);
5606
5607 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
5608
5609 if (ml_validate_nofault((vm_offset_t)meta, sizeof(*meta)) &&
5610 meta->zm_index &&
5611 meta->zm_index < os_atomic_load(&num_zones, relaxed)) {
5612 z = &zone_array[meta->zm_index];
5613 } else {
5614 paniclog_append_noflush(" Zone : <unknown>\n");
5615 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5616 paniclog_append_noflush("\n");
5617 return;
5618 }
5619
5620 esize = zone_elem_inner_size(z);
5621 paniclog_append_noflush(" Zone : %s%s\n",
5622 zone_heap_name(z), zone_name(z));
5623 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5624 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
5625 (void *)elem, (void *)(elem + esize), (uint32_t)esize);
5626
5627 if (addr < elem) {
5628 type = "out-of-bounds(underflow) + use-after-free";
5629 prob = "low";
5630 } else if (meta->zm_chunk_len == ZM_PGZ_DOUBLE_FREE) {
5631 type = "double-free";
5632 prob = "high";
5633 } else if (addr < elem + esize) {
5634 type = "use-after-free";
5635 prob = "high";
5636 } else if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5637 type = "out-of-bounds + use-after-free";
5638 prob = "low";
5639 } else {
5640 type = "out-of-bounds";
5641 prob = "high";
5642 }
5643 paniclog_append_noflush(" Kind : %s (%s confidence)\n",
5644 type, prob);
5645 if (addr < elem) {
5646 paniclog_append_noflush(" Access : %d byte(s) before\n",
5647 (uint32_t)(elem - addr) + 1);
5648 } else if (addr < elem + esize) {
5649 paniclog_append_noflush(" Access : %d byte(s) inside\n",
5650 (uint32_t)(addr - elem) + 1);
5651 } else {
5652 paniclog_append_noflush(" Access : %d byte(s) past\n",
5653 (uint32_t)(addr - (elem + esize)) + 1);
5654 }
5655
5656 panic_display_pgz_bt(has_syms, slot, false);
5657 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5658 panic_display_pgz_bt(has_syms, slot, true);
5659 }
5660
5661 paniclog_append_noflush("\n");
5662 }
5663
5664 #endif /* CONFIG_PROB_GZALLOC */
5665 #endif /* !ZALLOC_TEST */
5666 #pragma mark zfree
5667 #if !ZALLOC_TEST
5668
5669 /*!
5670 * @defgroup zfree
5671 * @{
5672 *
5673 * @brief
5674 * The codepath for zone frees.
5675 *
5676 * @discussion
5677 * There are 4 major ways to allocate memory that end up in the zone allocator:
5678 * - @c zfree()
5679 * - @c zfree_percpu()
5680 * - @c kfree*()
5681 * - @c zfree_permanent()
5682 *
5683 * While permanent zones have their own allocation scheme, all other codepaths
5684 * will eventually go through the @c zfree_ext() choking point.
5685 */
5686
5687 __header_always_inline void
zfree_drop(zone_t zone,vm_offset_t addr)5688 zfree_drop(zone_t zone, vm_offset_t addr)
5689 {
5690 vm_offset_t esize = zone_elem_outer_size(zone);
5691 struct zone_page_metadata *meta;
5692 vm_offset_t eidx;
5693
5694 meta = zone_element_resolve(zone, addr, &eidx);
5695
5696 if (!zone_meta_mark_free(meta, eidx)) {
5697 zone_meta_double_free_panic(zone, addr, __func__);
5698 }
5699
5700 vm_offset_t old_size = meta->zm_alloc_size;
5701 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5702 vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
5703
5704 if (new_size == 0) {
5705 /* whether the page was on the intermediate or all_used, queue, move it to free */
5706 zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
5707 zone->z_wired_empty += meta->zm_chunk_len;
5708 } else if (old_size + esize > max_size) {
5709 /* first free element on page, move from all_used */
5710 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5711 }
5712
5713 if (__improbable(zone->z_exhausted_wait)) {
5714 zone_wakeup_exhausted_waiters(zone);
5715 }
5716 }
5717
5718 __attribute__((noinline))
5719 static void
zfree_item(zone_t zone,vm_offset_t addr)5720 zfree_item(zone_t zone, vm_offset_t addr)
5721 {
5722 /* transfer preemption count to lock */
5723 zone_lock_nopreempt_check_contention(zone);
5724
5725 zfree_drop(zone, addr);
5726 zone->z_elems_free += 1;
5727
5728 zone_unlock(zone);
5729 }
5730
5731 static void
zfree_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache)5732 zfree_cached_depot_recirculate(
5733 zone_t zone,
5734 uint32_t depot_max,
5735 zone_cache_t cache)
5736 {
5737 smr_t smr = zone_cache_smr(cache);
5738 smr_seq_t seq;
5739 uint32_t n;
5740
5741 zone_recirc_lock_nopreempt_check_contention(zone);
5742
5743 n = cache->zc_depot.zd_full;
5744 if (n >= depot_max) {
5745 /*
5746 * If SMR is in use, rotate the entire chunk of magazines.
5747 *
5748 * If the head of the recirculation layer is ready to be
5749 * reused, pull them back to refill a little.
5750 */
5751 seq = zone_depot_move_full(&zone->z_recirc,
5752 &cache->zc_depot, smr ? n : n - depot_max / 2, NULL);
5753
5754 if (smr) {
5755 smr_deferred_advance_commit(smr, seq);
5756 if (depot_max > 1 && zone_depot_poll(&zone->z_recirc, smr)) {
5757 zone_depot_move_full(&cache->zc_depot,
5758 &zone->z_recirc, depot_max / 2, NULL);
5759 }
5760 }
5761 }
5762
5763 n = depot_max - cache->zc_depot.zd_full;
5764 if (n > zone->z_recirc.zd_empty) {
5765 n = zone->z_recirc.zd_empty;
5766 }
5767 if (n) {
5768 zone_depot_move_empty(&cache->zc_depot, &zone->z_recirc,
5769 n, zone);
5770 }
5771
5772 zone_recirc_unlock_nopreempt(zone);
5773 }
5774
5775 static zone_cache_t
zfree_cached_recirculate(zone_t zone,zone_cache_t cache)5776 zfree_cached_recirculate(zone_t zone, zone_cache_t cache)
5777 {
5778 zone_magazine_t mag = NULL, tmp = NULL;
5779 smr_t smr = zone_cache_smr(cache);
5780 bool wakeup_exhausted = false;
5781
5782 if (zone->z_recirc.zd_empty == 0) {
5783 mag = zone_magazine_alloc(Z_NOWAIT);
5784 }
5785
5786 zone_recirc_lock_nopreempt_check_contention(zone);
5787
5788 if (mag == NULL && zone->z_recirc.zd_empty) {
5789 mag = zone_depot_pop_head_empty(&zone->z_recirc, zone);
5790 __builtin_assume(mag);
5791 }
5792 if (mag) {
5793 tmp = zone_magazine_replace(cache, mag, true);
5794 if (smr) {
5795 smr_deferred_advance_commit(smr, tmp->zm_seq);
5796 }
5797 if (zone_security_array[zone_index(zone)].z_lifo) {
5798 zone_depot_insert_head_full(&zone->z_recirc, tmp);
5799 } else {
5800 zone_depot_insert_tail_full(&zone->z_recirc, tmp);
5801 }
5802
5803 wakeup_exhausted = zone->z_exhausted_wait;
5804 }
5805
5806 zone_recirc_unlock_nopreempt(zone);
5807
5808 if (__improbable(wakeup_exhausted)) {
5809 zone_lock_nopreempt(zone);
5810 if (zone->z_exhausted_wait) {
5811 zone_wakeup_exhausted_waiters(zone);
5812 }
5813 zone_unlock_nopreempt(zone);
5814 }
5815
5816 return mag ? cache : NULL;
5817 }
5818
5819 __attribute__((noinline))
5820 static zone_cache_t
zfree_cached_trim(zone_t zone,zone_cache_t cache)5821 zfree_cached_trim(zone_t zone, zone_cache_t cache)
5822 {
5823 zone_magazine_t mag = NULL, tmp = NULL;
5824 uint32_t depot_max;
5825
5826 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
5827 if (depot_max) {
5828 zone_depot_lock_nopreempt(cache);
5829
5830 if (cache->zc_depot.zd_empty == 0) {
5831 zfree_cached_depot_recirculate(zone, depot_max, cache);
5832 }
5833
5834 if (__probable(cache->zc_depot.zd_empty)) {
5835 mag = zone_depot_pop_head_empty(&cache->zc_depot, NULL);
5836 __builtin_assume(mag);
5837 } else {
5838 mag = zone_magazine_alloc(Z_NOWAIT);
5839 }
5840 if (mag) {
5841 tmp = zone_magazine_replace(cache, mag, true);
5842 zone_depot_insert_tail_full(&cache->zc_depot, tmp);
5843 }
5844
5845 zone_depot_unlock_nopreempt(cache);
5846
5847 return mag ? cache : NULL;
5848 }
5849
5850 return zfree_cached_recirculate(zone, cache);
5851 }
5852
5853 __attribute__((always_inline))
5854 static inline zone_cache_t
zfree_cached_get_pcpu_cache(zone_t zone,int cpu)5855 zfree_cached_get_pcpu_cache(zone_t zone, int cpu)
5856 {
5857 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5858
5859 if (__probable(cache->zc_free_cur < zc_mag_size())) {
5860 return cache;
5861 }
5862
5863 if (__probable(cache->zc_alloc_cur < zc_mag_size())) {
5864 zone_cache_swap_magazines(cache);
5865 return cache;
5866 }
5867
5868 return zfree_cached_trim(zone, cache);
5869 }
5870
5871 __attribute__((always_inline))
5872 static inline zone_cache_t
zfree_cached_get_pcpu_cache_smr(zone_t zone,int cpu)5873 zfree_cached_get_pcpu_cache_smr(zone_t zone, int cpu)
5874 {
5875 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5876 size_t idx = cache->zc_free_cur;
5877
5878 if (__probable(idx + 1 < zc_mag_size())) {
5879 return cache;
5880 }
5881
5882 /*
5883 * when SMR is in use, the bucket is tagged early with
5884 * @c smr_deferred_advance(), which costs a full barrier,
5885 * but performs no store.
5886 *
5887 * When zones hit the recirculation layer, the advance is commited,
5888 * under the recirculation lock (see zfree_cached_recirculate()).
5889 *
5890 * When done this way, the zone contention detection mechanism
5891 * will adjust the size of the per-cpu depots gracefully, which
5892 * mechanically reduces the pace of these commits as usage increases.
5893 */
5894
5895 if (__probable(idx + 1 == zc_mag_size())) {
5896 zone_magazine_t mag;
5897
5898 mag = (zone_magazine_t)((uintptr_t)cache->zc_free_elems -
5899 offsetof(struct zone_magazine, zm_elems));
5900 mag->zm_seq = smr_deferred_advance(zone_cache_smr(cache));
5901 return cache;
5902 }
5903
5904 return zfree_cached_trim(zone, cache);
5905 }
5906
5907 __attribute__((always_inline))
5908 static inline vm_offset_t
__zcache_mark_invalid(zone_t zone,vm_offset_t elem,uint64_t combined_size)5909 __zcache_mark_invalid(zone_t zone, vm_offset_t elem, uint64_t combined_size)
5910 {
5911 struct zone_page_metadata *meta;
5912 vm_offset_t offs;
5913
5914 #pragma unused(combined_size)
5915 #if CONFIG_PROB_GZALLOC
5916 if (__improbable(pgz_owned(elem))) {
5917 elem = pgz_unprotect(elem, __builtin_frame_address(0));
5918 }
5919 #endif /* CONFIG_PROB_GZALLOC */
5920
5921 meta = zone_meta_from_addr(elem);
5922 if (!from_zone_map(elem, 1) || !zone_has_index(zone, meta->zm_index)) {
5923 zone_invalid_element_panic(zone, elem);
5924 }
5925
5926 offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
5927 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
5928 offs += ptoa(meta->zm_page_index);
5929 }
5930
5931 if (!Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
5932 zone_invalid_element_panic(zone, elem);
5933 }
5934
5935 #if VM_TAG_SIZECLASSES
5936 if (__improbable(zone->z_uses_tags)) {
5937 vm_tag_t *slot;
5938
5939 slot = zba_extra_ref_ptr(meta->zm_bitmap,
5940 Z_FAST_QUO(offs, zone->z_quo_magic));
5941 vm_tag_update_zone_size(*slot, zone->z_tags_sizeclass,
5942 -(long)ZFREE_ELEM_SIZE(combined_size));
5943 *slot = VM_KERN_MEMORY_NONE;
5944 }
5945 #endif /* VM_TAG_SIZECLASSES */
5946
5947 #if KASAN_CLASSIC
5948 kasan_free(elem, ZFREE_ELEM_SIZE(combined_size),
5949 ZFREE_USER_SIZE(combined_size), zone_elem_redzone(zone),
5950 zone->z_percpu, __builtin_frame_address(0));
5951 #endif
5952 #if CONFIG_KERNEL_TAGGING
5953 if (__probable(zone->z_tbi_tag)) {
5954 elem = zone_tag_element(zone, elem, ZFREE_ELEM_SIZE(combined_size));
5955 }
5956 #endif /* CONFIG_KERNEL_TAGGING */
5957
5958 return elem;
5959 }
5960
5961 __attribute__((always_inline))
5962 void *
zcache_mark_invalid(zone_t zone,void * elem)5963 zcache_mark_invalid(zone_t zone, void *elem)
5964 {
5965 vm_size_t esize = zone_elem_inner_offs(zone);
5966
5967 ZFREE_LOG(zone, (vm_offset_t)elem, 1);
5968 return (void *)__zcache_mark_invalid(zone, (vm_offset_t)elem, ZFREE_PACK_SIZE(esize, esize));
5969 }
5970
5971 /*
5972 * The function is noinline when zlog can be used so that the backtracing can
5973 * reliably skip the zfree_ext() and zfree_log()
5974 * boring frames.
5975 */
5976 #if ZALLOC_ENABLE_LOGGING
5977 __attribute__((noinline))
5978 #endif /* ZALLOC_ENABLE_LOGGING */
5979 void
zfree_ext(zone_t zone,zone_stats_t zstats,void * addr,uint64_t combined_size)5980 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr, uint64_t combined_size)
5981 {
5982 vm_offset_t esize = ZFREE_ELEM_SIZE(combined_size);
5983 vm_offset_t elem = (vm_offset_t)addr;
5984 int cpu;
5985
5986 DTRACE_VM2(zfree, zone_t, zone, void*, elem);
5987
5988 ZFREE_LOG(zone, elem, 1);
5989 elem = __zcache_mark_invalid(zone, elem, combined_size);
5990
5991 disable_preemption();
5992 cpu = cpu_number();
5993 zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += esize;
5994
5995 #if KASAN_CLASSIC
5996 if (zone->z_kasan_quarantine && startup_phase >= STARTUP_SUB_ZALLOC) {
5997 struct kasan_quarantine_result kqr;
5998
5999 kqr = kasan_quarantine(elem, esize);
6000 elem = kqr.addr;
6001 zone = kqr.zone;
6002 if (elem == 0) {
6003 return enable_preemption();
6004 }
6005 }
6006 #endif
6007
6008 if (zone->z_pcpu_cache) {
6009 zone_cache_t cache = zfree_cached_get_pcpu_cache(zone, cpu);
6010
6011 if (__probable(cache)) {
6012 cache->zc_free_elems[cache->zc_free_cur++] = elem;
6013 return enable_preemption();
6014 }
6015 }
6016
6017 return zfree_item(zone, elem);
6018 }
6019
6020 __attribute__((always_inline))
6021 static inline zstack_t
zcache_free_stack_to_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,vm_size_t esize,zone_cache_ops_t ops,bool zero)6022 zcache_free_stack_to_cpu(
6023 zone_id_t zid,
6024 zone_cache_t cache,
6025 zstack_t stack,
6026 vm_size_t esize,
6027 zone_cache_ops_t ops,
6028 bool zero)
6029 {
6030 size_t n = MIN(zc_mag_size() - cache->zc_free_cur, stack.z_count);
6031 vm_offset_t *p;
6032
6033 stack.z_count -= n;
6034 cache->zc_free_cur += n;
6035 p = cache->zc_free_elems + cache->zc_free_cur;
6036
6037 do {
6038 void *o = zstack_pop_no_delta(&stack);
6039
6040 if (ops) {
6041 o = ops->zc_op_mark_invalid(zid, o);
6042 } else {
6043 if (zero) {
6044 bzero(o, esize);
6045 }
6046 o = (void *)__zcache_mark_invalid(zone_by_id(zid),
6047 (vm_offset_t)o, ZFREE_PACK_SIZE(esize, esize));
6048 }
6049 *--p = (vm_offset_t)o;
6050 } while (--n > 0);
6051
6052 return stack;
6053 }
6054
6055 __attribute__((always_inline))
6056 static inline void
zcache_free_1_ext(zone_id_t zid,void * addr,zone_cache_ops_t ops)6057 zcache_free_1_ext(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6058 {
6059 vm_offset_t elem = (vm_offset_t)addr;
6060 zone_cache_t cache;
6061 vm_size_t esize;
6062 zone_t zone = zone_by_id(zid);
6063 int cpu;
6064
6065 ZFREE_LOG(zone, elem, 1);
6066
6067 disable_preemption();
6068 cpu = cpu_number();
6069 esize = zone_elem_inner_size(zone);
6070 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6071 if (!ops) {
6072 addr = (void *)__zcache_mark_invalid(zone, elem,
6073 ZFREE_PACK_SIZE(esize, esize));
6074 }
6075 cache = zfree_cached_get_pcpu_cache(zone, cpu);
6076 if (__probable(cache)) {
6077 if (ops) {
6078 addr = ops->zc_op_mark_invalid(zid, addr);
6079 }
6080 cache->zc_free_elems[cache->zc_free_cur++] = elem;
6081 enable_preemption();
6082 } else if (ops) {
6083 enable_preemption();
6084 os_atomic_dec(&zone_by_id(zid)->z_elems_avail, relaxed);
6085 ops->zc_op_free(zid, addr);
6086 } else {
6087 zfree_item(zone, elem);
6088 }
6089 }
6090
6091 __attribute__((always_inline))
6092 static inline void
zcache_free_n_ext(zone_id_t zid,zstack_t stack,zone_cache_ops_t ops,bool zero)6093 zcache_free_n_ext(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops, bool zero)
6094 {
6095 zone_t zone = zone_by_id(zid);
6096 zone_cache_t cache;
6097 vm_size_t esize;
6098 int cpu;
6099
6100 ZFREE_LOG(zone, stack.z_head, stack.z_count);
6101
6102 disable_preemption();
6103 cpu = cpu_number();
6104 esize = zone_elem_inner_size(zone);
6105 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed +=
6106 stack.z_count * esize;
6107
6108 for (;;) {
6109 cache = zfree_cached_get_pcpu_cache(zone, cpu);
6110 if (__probable(cache)) {
6111 stack = zcache_free_stack_to_cpu(zid, cache,
6112 stack, esize, ops, zero);
6113 enable_preemption();
6114 } else if (ops) {
6115 enable_preemption();
6116 os_atomic_dec(&zone->z_elems_avail, relaxed);
6117 ops->zc_op_free(zid, zstack_pop(&stack));
6118 } else {
6119 vm_offset_t addr = (vm_offset_t)zstack_pop(&stack);
6120
6121 if (zero) {
6122 bzero((void *)addr, esize);
6123 }
6124 addr = __zcache_mark_invalid(zone, addr,
6125 ZFREE_PACK_SIZE(esize, esize));
6126 zfree_item(zone, addr);
6127 }
6128
6129 if (stack.z_count == 0) {
6130 break;
6131 }
6132
6133 disable_preemption();
6134 cpu = cpu_number();
6135 }
6136 }
6137
6138 void
6139 (zcache_free)(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6140 {
6141 __builtin_assume(ops != NULL);
6142 zcache_free_1_ext(zid, addr, ops);
6143 }
6144
6145 void
6146 (zcache_free_n)(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops)
6147 {
6148 __builtin_assume(ops != NULL);
6149 zcache_free_n_ext(zid, stack, ops, false);
6150 }
6151
6152 void
6153 (zfree_n)(zone_id_t zid, zstack_t stack)
6154 {
6155 zcache_free_n_ext(zid, stack, NULL, true);
6156 }
6157
6158 void
6159 (zfree_nozero)(zone_id_t zid, void *addr)
6160 {
6161 zcache_free_1_ext(zid, addr, NULL);
6162 }
6163
6164 void
6165 (zfree_nozero_n)(zone_id_t zid, zstack_t stack)
6166 {
6167 zcache_free_n_ext(zid, stack, NULL, false);
6168 }
6169
6170 void
6171 (zfree)(zone_t zov, void *addr)
6172 {
6173 zone_t zone = zov->z_self;
6174 zone_stats_t zstats = zov->z_stats;
6175 vm_offset_t esize = zone_elem_inner_size(zone);
6176
6177 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6178 assert(!zone->z_percpu && !zone->z_permanent && !zone->z_smr);
6179
6180 vm_memtag_bzero(addr, esize);
6181
6182 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6183 }
6184
6185 __attribute__((noinline))
6186 void
zfree_percpu(union zone_or_view zov,void * addr)6187 zfree_percpu(union zone_or_view zov, void *addr)
6188 {
6189 zone_t zone = zov.zov_view->zv_zone;
6190 zone_stats_t zstats = zov.zov_view->zv_stats;
6191 vm_offset_t esize = zone_elem_inner_size(zone);
6192
6193 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6194 assert(zone->z_percpu);
6195 addr = (void *)__zpcpu_demangle(addr);
6196 zpercpu_foreach_cpu(i) {
6197 vm_memtag_bzero((char *)addr + ptoa(i), esize);
6198 }
6199 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6200 }
6201
6202 void
6203 (zfree_id)(zone_id_t zid, void *addr)
6204 {
6205 (zfree)(&zone_array[zid], addr);
6206 }
6207
6208 void
6209 (zfree_ro)(zone_id_t zid, void *addr)
6210 {
6211 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6212 zone_t zone = zone_by_id(zid);
6213 zone_stats_t zstats = zone->z_stats;
6214 vm_offset_t esize = zone_ro_size_params[zid].z_elem_size;
6215
6216 #if ZSECURITY_CONFIG(READ_ONLY)
6217 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6218 pmap_ro_zone_bzero(zid, (vm_offset_t)addr, 0, esize);
6219 #else
6220 (void)zid;
6221 bzero(addr, esize);
6222 #endif /* !KASAN_CLASSIC */
6223 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6224 }
6225
6226 __attribute__((noinline))
6227 static void
zfree_item_smr(zone_t zone,vm_offset_t addr)6228 zfree_item_smr(zone_t zone, vm_offset_t addr)
6229 {
6230 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, 0);
6231 vm_size_t esize = zone_elem_inner_size(zone);
6232
6233 /*
6234 * This should be taken extremely rarely:
6235 * this happens if we failed allocating an empty bucket.
6236 */
6237 smr_synchronize(zone_cache_smr(cache));
6238
6239 cache->zc_free((void *)addr, esize);
6240 addr = __zcache_mark_invalid(zone, addr, ZFREE_PACK_SIZE(esize, esize));
6241
6242 zfree_item(zone, addr);
6243 }
6244
6245 void
6246 (zfree_smr)(zone_t zone, void *addr)
6247 {
6248 vm_offset_t elem = (vm_offset_t)addr;
6249 vm_offset_t esize;
6250 zone_cache_t cache;
6251 int cpu;
6252
6253 ZFREE_LOG(zone, elem, 1);
6254
6255 disable_preemption();
6256 cpu = cpu_number();
6257 #if MACH_ASSERT
6258 cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6259 assert(!smr_entered_cpu_noblock(cache->zc_smr, cpu));
6260 #endif
6261 esize = zone_elem_inner_size(zone);
6262 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6263 cache = zfree_cached_get_pcpu_cache_smr(zone, cpu);
6264 if (__probable(cache)) {
6265 cache->zc_free_elems[cache->zc_free_cur++] = elem;
6266 enable_preemption();
6267 } else {
6268 zfree_item_smr(zone, elem);
6269 }
6270 }
6271
6272 void
6273 (zfree_id_smr)(zone_id_t zid, void *addr)
6274 {
6275 (zfree_smr)(&zone_array[zid], addr);
6276 }
6277
6278 void
kfree_type_impl_internal(kalloc_type_view_t kt_view,void * ptr __unsafe_indexable)6279 kfree_type_impl_internal(
6280 kalloc_type_view_t kt_view,
6281 void *ptr __unsafe_indexable)
6282 {
6283 zone_t zsig = kt_view->kt_zsig;
6284 zone_t z = kt_view->kt_zv.zv_zone;
6285 struct zone_page_metadata *meta = zone_meta_from_addr((vm_offset_t) ptr);
6286 zone_id_t zidx_meta = meta->zm_index;
6287 zone_security_flags_t zsflags_meta = zone_security_array[zidx_meta];
6288 zone_security_flags_t zsflags_z = zone_security_config(z);
6289 zone_security_flags_t zsflags_zsig;
6290
6291 if (NULL == ptr) {
6292 return;
6293 }
6294
6295 if ((zsflags_z.z_kheap_id == KHEAP_ID_DATA_BUFFERS) ||
6296 zone_has_index(z, zidx_meta)) {
6297 return (zfree)(&kt_view->kt_zv, ptr);
6298 }
6299 zsflags_zsig = zone_security_config(zsig);
6300 if (zsflags_meta.z_sig_eq == zsflags_zsig.z_sig_eq) {
6301 z = zone_array + zidx_meta;
6302 return (zfree)(z, ptr);
6303 }
6304
6305 return (zfree)(kt_view->kt_zshared, ptr);
6306 }
6307
6308 /*! @} */
6309 #endif /* !ZALLOC_TEST */
6310 #pragma mark zalloc
6311 #if !ZALLOC_TEST
6312
6313 /*!
6314 * @defgroup zalloc
6315 * @{
6316 *
6317 * @brief
6318 * The codepath for zone allocations.
6319 *
6320 * @discussion
6321 * There are 4 major ways to allocate memory that end up in the zone allocator:
6322 * - @c zalloc(), @c zalloc_flags(), ...
6323 * - @c zalloc_percpu()
6324 * - @c kalloc*()
6325 * - @c zalloc_permanent()
6326 *
6327 * While permanent zones have their own allocation scheme, all other codepaths
6328 * will eventually go through the @c zalloc_ext() choking point.
6329 *
6330 * @c zalloc_return() is the final function everyone tail calls into,
6331 * which prepares the element for consumption by the caller and deals with
6332 * common treatment (zone logging, tags, kasan, validation, ...).
6333 */
6334
6335 /*!
6336 * @function zalloc_import
6337 *
6338 * @brief
6339 * Import @c n elements in the specified array, opposite of @c zfree_drop().
6340 *
6341 * @param zone The zone to import elements from
6342 * @param elems The array to import into
6343 * @param n The number of elements to import. Must be non zero,
6344 * and smaller than @c zone->z_elems_free.
6345 */
6346 __header_always_inline vm_size_t
zalloc_import(zone_t zone,vm_offset_t * elems,zalloc_flags_t flags,uint32_t n)6347 zalloc_import(
6348 zone_t zone,
6349 vm_offset_t *elems,
6350 zalloc_flags_t flags,
6351 uint32_t n)
6352 {
6353 vm_offset_t esize = zone_elem_outer_size(zone);
6354 vm_offset_t offs = zone_elem_inner_offs(zone);
6355 zone_stats_t zs;
6356 int cpu = cpu_number();
6357 uint32_t i = 0;
6358
6359 zs = zpercpu_get_cpu(zone->z_stats, cpu);
6360
6361 if (__improbable(zone_caching_disabled < 0)) {
6362 /*
6363 * In the first 10s after boot, mess with
6364 * the scan position in order to make early
6365 * allocations patterns less predictable.
6366 */
6367 zone_early_scramble_rr(zone, cpu, zs);
6368 }
6369
6370 do {
6371 vm_offset_t page, eidx, size = 0;
6372 struct zone_page_metadata *meta;
6373
6374 if (!zone_pva_is_null(zone->z_pageq_partial)) {
6375 meta = zone_pva_to_meta(zone->z_pageq_partial);
6376 page = zone_pva_to_addr(zone->z_pageq_partial);
6377 } else if (!zone_pva_is_null(zone->z_pageq_empty)) {
6378 meta = zone_pva_to_meta(zone->z_pageq_empty);
6379 page = zone_pva_to_addr(zone->z_pageq_empty);
6380 zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
6381 } else {
6382 zone_accounting_panic(zone, "z_elems_free corruption");
6383 }
6384
6385 zone_meta_validate(zone, meta, page);
6386
6387 vm_offset_t old_size = meta->zm_alloc_size;
6388 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
6389
6390 do {
6391 eidx = zone_meta_find_and_clear_bit(zone, zs, meta, flags);
6392 elems[i++] = page + offs + eidx * esize;
6393 size += esize;
6394 } while (i < n && old_size + size + esize <= max_size);
6395
6396 vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
6397
6398 if (new_size + esize > max_size) {
6399 zone_meta_requeue(zone, &zone->z_pageq_full, meta);
6400 } else if (old_size == 0) {
6401 /* remove from free, move to intermediate */
6402 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
6403 }
6404 } while (i < n);
6405
6406 n = zone_counter_sub(zone, z_elems_free, n);
6407 if (zone->z_pcpu_cache == NULL && zone->z_elems_free_min > n) {
6408 zone->z_elems_free_min = n;
6409 }
6410
6411 return zone_elem_inner_size(zone);
6412 }
6413
6414 __attribute__((always_inline))
6415 static inline vm_offset_t
__zcache_mark_valid(zone_t zone,vm_offset_t addr,zalloc_flags_t flags)6416 __zcache_mark_valid(zone_t zone, vm_offset_t addr, zalloc_flags_t flags)
6417 {
6418 #pragma unused(zone, flags)
6419 #if KASAN_CLASSIC || CONFIG_PROB_GZALLOC || VM_TAG_SIZECLASSES
6420 vm_offset_t esize = zone_elem_inner_size(zone);
6421 #endif
6422
6423 #if CONFIG_KERNEL_TAGGING
6424 if (__probable(zone->z_tbi_tag)) {
6425 /*
6426 * Retrieve the memory tag assigned on free and update the pointer
6427 * metadata.
6428 */
6429 addr = vm_memtag_fixup_ptr(addr);
6430 }
6431 #endif /* CONFIG_KERNEL_TAGGING */
6432
6433 #if VM_TAG_SIZECLASSES
6434 if (__improbable(zone->z_uses_tags)) {
6435 struct zone_page_metadata *meta;
6436 vm_offset_t offs;
6437 vm_tag_t *slot;
6438 vm_tag_t tag;
6439
6440 tag = zalloc_flags_get_tag(flags);
6441 meta = zone_meta_from_addr(addr);
6442 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
6443 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
6444 offs += ptoa(meta->zm_page_index);
6445 }
6446
6447 slot = zba_extra_ref_ptr(meta->zm_bitmap,
6448 Z_FAST_QUO(offs, zone->z_quo_magic));
6449 *slot = tag;
6450
6451 vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
6452 (long)esize);
6453 }
6454 #endif /* VM_TAG_SIZECLASSES */
6455
6456 #if CONFIG_PROB_GZALLOC
6457 if (zone->z_pgz_tracked && pgz_sample(addr, esize)) {
6458 addr = pgz_protect(zone, addr, __builtin_frame_address(0));
6459 }
6460 #endif
6461
6462 #if KASAN_CLASSIC
6463 /*
6464 * KASAN_CLASSIC integration of kalloc heaps are handled by kalloc_ext()
6465 */
6466 if ((flags & Z_SKIP_KASAN) == 0) {
6467 kasan_alloc(addr, esize, esize, zone_elem_redzone(zone),
6468 (flags & Z_PCPU), __builtin_frame_address(0));
6469 }
6470 #endif /* KASAN_CLASSIC */
6471
6472 return addr;
6473 }
6474
6475 __attribute__((always_inline))
6476 void *
zcache_mark_valid(zone_t zone,void * addr)6477 zcache_mark_valid(zone_t zone, void *addr)
6478 {
6479 addr = (void *)__zcache_mark_valid(zone, (vm_offset_t)addr, 0);
6480 ZALLOC_LOG(zone, (vm_offset_t)addr, 1);
6481 return addr;
6482 }
6483
6484 /*!
6485 * @function zalloc_return
6486 *
6487 * @brief
6488 * Performs the tail-end of the work required on allocations before the caller
6489 * uses them.
6490 *
6491 * @discussion
6492 * This function is called without any zone lock held,
6493 * and preemption back to the state it had when @c zalloc_ext() was called.
6494 *
6495 * @param zone The zone we're allocating from.
6496 * @param addr The element we just allocated.
6497 * @param flags The flags passed to @c zalloc_ext() (for Z_ZERO).
6498 * @param elem_size The element size for this zone.
6499 */
6500 __attribute__((always_inline))
6501 static struct kalloc_result
zalloc_return(zone_t zone,vm_offset_t addr,zalloc_flags_t flags,vm_offset_t elem_size)6502 zalloc_return(
6503 zone_t zone,
6504 vm_offset_t addr,
6505 zalloc_flags_t flags,
6506 vm_offset_t elem_size)
6507 {
6508 addr = __zcache_mark_valid(zone, addr, flags);
6509 #if ZALLOC_ENABLE_ZERO_CHECK
6510 zalloc_validate_element(zone, addr, elem_size, flags);
6511 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
6512 ZALLOC_LOG(zone, addr, 1);
6513
6514 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6515 return (struct kalloc_result){ (void *)addr, elem_size };
6516 }
6517
6518 static vm_size_t
zalloc_get_shared_threshold(zone_t zone,vm_size_t esize)6519 zalloc_get_shared_threshold(zone_t zone, vm_size_t esize)
6520 {
6521 if (esize <= 512) {
6522 return zone_early_thres_mul * page_size / 4;
6523 } else if (esize < 2048) {
6524 return zone_early_thres_mul * esize * 8;
6525 }
6526 return zone_early_thres_mul * zone->z_chunk_elems * esize;
6527 }
6528
6529 __attribute__((noinline))
6530 static struct kalloc_result
zalloc_item(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6531 zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6532 {
6533 vm_offset_t esize, addr;
6534 zone_stats_t zs;
6535
6536 zone_lock_nopreempt_check_contention(zone);
6537
6538 zs = zpercpu_get(zstats);
6539 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv / 2)) {
6540 if ((flags & Z_NOWAIT) || zone->z_elems_free) {
6541 zone_expand_async_schedule_if_allowed(zone);
6542 } else {
6543 zone_expand_locked(zone, flags);
6544 }
6545 if (__improbable(zone->z_elems_free == 0)) {
6546 zs->zs_alloc_fail++;
6547 zone_unlock(zone);
6548 if (__improbable(flags & Z_NOFAIL)) {
6549 zone_nofail_panic(zone);
6550 }
6551 DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
6552 return (struct kalloc_result){ };
6553 }
6554 }
6555
6556 esize = zalloc_import(zone, &addr, flags, 1);
6557 zs->zs_mem_allocated += esize;
6558
6559 if (__improbable(!zone_share_always &&
6560 !os_atomic_load(&zs->zs_alloc_not_shared, relaxed))) {
6561 if (flags & Z_SET_NOTSHARED) {
6562 vm_size_t shared_threshold = zalloc_get_shared_threshold(zone, esize);
6563
6564 if (zs->zs_mem_allocated >= shared_threshold) {
6565 zpercpu_foreach(zs_cpu, zstats) {
6566 os_atomic_store(&zs_cpu->zs_alloc_not_shared, 1, relaxed);
6567 }
6568 }
6569 }
6570 }
6571 zone_unlock(zone);
6572
6573 return zalloc_return(zone, addr, flags, esize);
6574 }
6575
6576 static void
zalloc_cached_import(zone_t zone,zalloc_flags_t flags,zone_cache_t cache)6577 zalloc_cached_import(
6578 zone_t zone,
6579 zalloc_flags_t flags,
6580 zone_cache_t cache)
6581 {
6582 uint16_t n_elems = zc_mag_size();
6583
6584 zone_lock_nopreempt(zone);
6585
6586 if (__probable(!zone_caching_disabled &&
6587 zone->z_elems_free > zone->z_elems_rsv / 2)) {
6588 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
6589 zone_expand_async_schedule_if_allowed(zone);
6590 }
6591 if (zone->z_elems_free < n_elems) {
6592 n_elems = (uint16_t)zone->z_elems_free;
6593 }
6594 zalloc_import(zone, cache->zc_alloc_elems, flags, n_elems);
6595 cache->zc_alloc_cur = n_elems;
6596 }
6597
6598 zone_unlock_nopreempt(zone);
6599 }
6600
6601 static void
zalloc_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache,smr_t smr)6602 zalloc_cached_depot_recirculate(
6603 zone_t zone,
6604 uint32_t depot_max,
6605 zone_cache_t cache,
6606 smr_t smr)
6607 {
6608 smr_seq_t seq;
6609 uint32_t n;
6610
6611 zone_recirc_lock_nopreempt_check_contention(zone);
6612
6613 n = cache->zc_depot.zd_empty;
6614 if (n >= depot_max) {
6615 zone_depot_move_empty(&zone->z_recirc, &cache->zc_depot,
6616 n - depot_max / 2, NULL);
6617 }
6618
6619 n = cache->zc_depot.zd_full;
6620 if (smr && n) {
6621 /*
6622 * if SMR is in use, it means smr_poll() failed,
6623 * so rotate the entire chunk of magazines in order
6624 * to let the sequence numbers age.
6625 */
6626 seq = zone_depot_move_full(&zone->z_recirc, &cache->zc_depot,
6627 n, NULL);
6628 smr_deferred_advance_commit(smr, seq);
6629 }
6630
6631 n = depot_max - cache->zc_depot.zd_empty;
6632 if (n > zone->z_recirc.zd_full) {
6633 n = zone->z_recirc.zd_full;
6634 }
6635
6636 if (n && zone_depot_poll(&zone->z_recirc, smr)) {
6637 zone_depot_move_full(&cache->zc_depot, &zone->z_recirc,
6638 n, zone);
6639 }
6640
6641 zone_recirc_unlock_nopreempt(zone);
6642 }
6643
6644 static void
zalloc_cached_reuse_smr(zone_t z,zone_cache_t cache,zone_magazine_t mag)6645 zalloc_cached_reuse_smr(zone_t z, zone_cache_t cache, zone_magazine_t mag)
6646 {
6647 zone_smr_free_cb_t zc_free = cache->zc_free;
6648 vm_size_t esize = zone_elem_inner_size(z);
6649
6650 for (uint16_t i = 0; i < zc_mag_size(); i++) {
6651 vm_offset_t elem = mag->zm_elems[i];
6652
6653 zc_free((void *)elem, zone_elem_inner_size(z));
6654 elem = __zcache_mark_invalid(z, elem,
6655 ZFREE_PACK_SIZE(esize, esize));
6656 mag->zm_elems[i] = elem;
6657 }
6658 }
6659
6660 static void
zalloc_cached_recirculate(zone_t zone,zone_cache_t cache)6661 zalloc_cached_recirculate(
6662 zone_t zone,
6663 zone_cache_t cache)
6664 {
6665 zone_magazine_t mag = NULL;
6666
6667 zone_recirc_lock_nopreempt_check_contention(zone);
6668
6669 if (zone_depot_poll(&zone->z_recirc, zone_cache_smr(cache))) {
6670 mag = zone_depot_pop_head_full(&zone->z_recirc, zone);
6671 if (zone_cache_smr(cache)) {
6672 zalloc_cached_reuse_smr(zone, cache, mag);
6673 }
6674 mag = zone_magazine_replace(cache, mag, false);
6675 zone_depot_insert_head_empty(&zone->z_recirc, mag);
6676 }
6677
6678 zone_recirc_unlock_nopreempt(zone);
6679 }
6680
6681 __attribute__((noinline))
6682 static zone_cache_t
zalloc_cached_prime(zone_t zone,zone_cache_ops_t ops,zalloc_flags_t flags,zone_cache_t cache)6683 zalloc_cached_prime(
6684 zone_t zone,
6685 zone_cache_ops_t ops,
6686 zalloc_flags_t flags,
6687 zone_cache_t cache)
6688 {
6689 zone_magazine_t mag = NULL;
6690 uint32_t depot_max;
6691 smr_t smr;
6692
6693 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
6694 if (depot_max) {
6695 smr = zone_cache_smr(cache);
6696
6697 zone_depot_lock_nopreempt(cache);
6698
6699 if (!zone_depot_poll(&cache->zc_depot, smr)) {
6700 zalloc_cached_depot_recirculate(zone, depot_max, cache,
6701 smr);
6702 }
6703
6704 if (__probable(cache->zc_depot.zd_full)) {
6705 mag = zone_depot_pop_head_full(&cache->zc_depot, NULL);
6706 if (zone_cache_smr(cache)) {
6707 zalloc_cached_reuse_smr(zone, cache, mag);
6708 }
6709 mag = zone_magazine_replace(cache, mag, false);
6710 zone_depot_insert_head_empty(&cache->zc_depot, mag);
6711 }
6712
6713 zone_depot_unlock_nopreempt(cache);
6714 } else if (zone->z_recirc.zd_full) {
6715 zalloc_cached_recirculate(zone, cache);
6716 }
6717
6718 if (__probable(cache->zc_alloc_cur)) {
6719 return cache;
6720 }
6721
6722 if (ops == NULL) {
6723 zalloc_cached_import(zone, flags, cache);
6724 if (__probable(cache->zc_alloc_cur)) {
6725 return cache;
6726 }
6727 }
6728
6729 return NULL;
6730 }
6731
6732 __attribute__((always_inline))
6733 static inline zone_cache_t
zalloc_cached_get_pcpu_cache(zone_t zone,zone_cache_ops_t ops,int cpu,zalloc_flags_t flags)6734 zalloc_cached_get_pcpu_cache(
6735 zone_t zone,
6736 zone_cache_ops_t ops,
6737 int cpu,
6738 zalloc_flags_t flags)
6739 {
6740 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6741
6742 if (__probable(cache->zc_alloc_cur != 0)) {
6743 return cache;
6744 }
6745
6746 if (__probable(cache->zc_free_cur != 0 && !cache->zc_smr)) {
6747 zone_cache_swap_magazines(cache);
6748 return cache;
6749 }
6750
6751 return zalloc_cached_prime(zone, ops, flags, cache);
6752 }
6753
6754
6755 /*!
6756 * @function zalloc_ext
6757 *
6758 * @brief
6759 * The core implementation of @c zalloc(), @c zalloc_flags(), @c zalloc_percpu().
6760 */
6761 struct kalloc_result
zalloc_ext(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6762 zalloc_ext(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6763 {
6764 /*
6765 * KASan uses zalloc() for fakestack, which can be called anywhere.
6766 * However, we make sure these calls can never block.
6767 */
6768 assertf(startup_phase < STARTUP_SUB_EARLY_BOOT ||
6769 #if KASAN_FAKESTACK
6770 zone->z_kasan_fakestacks ||
6771 #endif /* KASAN_FAKESTACK */
6772 ml_get_interrupts_enabled() ||
6773 ml_is_quiescing() ||
6774 debug_mode_active(),
6775 "Calling {k,z}alloc from interrupt disabled context isn't allowed");
6776
6777 /*
6778 * Make sure Z_NOFAIL was not obviously misused
6779 */
6780 if (flags & Z_NOFAIL) {
6781 assert((flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
6782 }
6783
6784 #if VM_TAG_SIZECLASSES
6785 if (__improbable(zone->z_uses_tags)) {
6786 vm_tag_t tag = zalloc_flags_get_tag(flags);
6787
6788 if (flags & Z_VM_TAG_BT_BIT) {
6789 tag = vm_tag_bt() ?: tag;
6790 }
6791 if (tag != VM_KERN_MEMORY_NONE) {
6792 tag = vm_tag_will_update_zone(tag, zone->z_tags_sizeclass,
6793 flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT));
6794 }
6795 if (tag == VM_KERN_MEMORY_NONE) {
6796 zone_security_flags_t zsflags = zone_security_config(zone);
6797
6798 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
6799 tag = VM_KERN_MEMORY_KALLOC_DATA;
6800 } else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR ||
6801 zsflags.z_kalloc_type) {
6802 tag = VM_KERN_MEMORY_KALLOC_TYPE;
6803 } else {
6804 tag = VM_KERN_MEMORY_KALLOC;
6805 }
6806 }
6807 flags = Z_VM_TAG(flags & ~Z_VM_TAG_MASK, tag);
6808 }
6809 #endif /* VM_TAG_SIZECLASSES */
6810
6811 disable_preemption();
6812
6813 #if ZALLOC_ENABLE_ZERO_CHECK
6814 if (zalloc_skip_zero_check()) {
6815 flags |= Z_NOZZC;
6816 }
6817 #endif
6818
6819 if (zone->z_pcpu_cache) {
6820 zone_cache_t cache;
6821 vm_offset_t index, addr, esize;
6822 int cpu = cpu_number();
6823
6824 cache = zalloc_cached_get_pcpu_cache(zone, NULL, cpu, flags);
6825 if (__probable(cache)) {
6826 esize = zone_elem_inner_size(zone);
6827 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += esize;
6828 index = --cache->zc_alloc_cur;
6829 addr = cache->zc_alloc_elems[index];
6830 cache->zc_alloc_elems[index] = 0;
6831 enable_preemption();
6832 return zalloc_return(zone, addr, flags, esize);
6833 }
6834 }
6835
6836 __attribute__((musttail))
6837 return zalloc_item(zone, zstats, flags);
6838 }
6839
6840 __attribute__((always_inline))
6841 static inline zstack_t
zcache_alloc_stack_from_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,uint32_t n,zone_cache_ops_t ops)6842 zcache_alloc_stack_from_cpu(
6843 zone_id_t zid,
6844 zone_cache_t cache,
6845 zstack_t stack,
6846 uint32_t n,
6847 zone_cache_ops_t ops)
6848 {
6849 vm_offset_t *p;
6850
6851 n = MIN(n, cache->zc_alloc_cur);
6852 p = cache->zc_alloc_elems + cache->zc_alloc_cur;
6853 cache->zc_alloc_cur -= n;
6854 stack.z_count += n;
6855
6856 do {
6857 vm_offset_t e = *--p;
6858
6859 *p = 0;
6860 if (ops) {
6861 e = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)e);
6862 } else {
6863 e = __zcache_mark_valid(zone_by_id(zid), e, 0);
6864 }
6865 zstack_push_no_delta(&stack, (void *)e);
6866 } while (--n > 0);
6867
6868 return stack;
6869 }
6870
6871 __attribute__((noinline))
6872 static zstack_t
zcache_alloc_fail(zone_id_t zid,zstack_t stack,uint32_t count)6873 zcache_alloc_fail(zone_id_t zid, zstack_t stack, uint32_t count)
6874 {
6875 zone_t zone = zone_by_id(zid);
6876 zone_stats_t zstats = zone->z_stats;
6877 int cpu;
6878
6879 count -= stack.z_count;
6880
6881 disable_preemption();
6882 cpu = cpu_number();
6883 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated -=
6884 count * zone_elem_inner_size(zone);
6885 zpercpu_get_cpu(zstats, cpu)->zs_alloc_fail += 1;
6886 enable_preemption();
6887
6888 return stack;
6889 }
6890
6891 #define ZCACHE_ALLOC_RETRY ((void *)-1)
6892
6893 __attribute__((noinline))
6894 static void *
zcache_alloc_one(zone_id_t zid,zalloc_flags_t flags,zone_cache_ops_t ops)6895 zcache_alloc_one(
6896 zone_id_t zid,
6897 zalloc_flags_t flags,
6898 zone_cache_ops_t ops)
6899 {
6900 zone_t zone = zone_by_id(zid);
6901 void *o;
6902
6903 /*
6904 * First try to allocate in rudimentary zones without ever going into
6905 * __ZONE_EXHAUSTED_AND_WAITING_HARD__() by clearing Z_NOFAIL.
6906 */
6907 enable_preemption();
6908 o = ops->zc_op_alloc(zid, flags & ~Z_NOFAIL);
6909 if (__probable(o)) {
6910 os_atomic_inc(&zone->z_elems_avail, relaxed);
6911 } else if (__probable(flags & Z_NOFAIL)) {
6912 zone_cache_t cache;
6913 vm_offset_t index;
6914 int cpu;
6915
6916 zone_lock(zone);
6917
6918 cpu = cpu_number();
6919 cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
6920 o = ZCACHE_ALLOC_RETRY;
6921 if (__probable(cache)) {
6922 index = --cache->zc_alloc_cur;
6923 o = (void *)cache->zc_alloc_elems[index];
6924 cache->zc_alloc_elems[index] = 0;
6925 o = ops->zc_op_mark_valid(zid, o);
6926 } else if (zone->z_elems_free == 0) {
6927 __ZONE_EXHAUSTED_AND_WAITING_HARD__(zone);
6928 }
6929
6930 zone_unlock(zone);
6931 }
6932
6933 return o;
6934 }
6935
6936 __attribute__((always_inline))
6937 static zstack_t
zcache_alloc_n_ext(zone_id_t zid,uint32_t count,zalloc_flags_t flags,zone_cache_ops_t ops)6938 zcache_alloc_n_ext(
6939 zone_id_t zid,
6940 uint32_t count,
6941 zalloc_flags_t flags,
6942 zone_cache_ops_t ops)
6943 {
6944 zstack_t stack = { };
6945 zone_cache_t cache;
6946 zone_t zone;
6947 int cpu;
6948
6949 disable_preemption();
6950 cpu = cpu_number();
6951 zone = zone_by_id(zid);
6952 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_allocated +=
6953 count * zone_elem_inner_size(zone);
6954
6955 for (;;) {
6956 cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
6957 if (__probable(cache)) {
6958 stack = zcache_alloc_stack_from_cpu(zid, cache, stack,
6959 count - stack.z_count, ops);
6960 enable_preemption();
6961 } else {
6962 void *o;
6963
6964 if (ops) {
6965 o = zcache_alloc_one(zid, flags, ops);
6966 } else {
6967 o = zalloc_item(zone, zone->z_stats, flags).addr;
6968 }
6969 if (__improbable(o == NULL)) {
6970 return zcache_alloc_fail(zid, stack, count);
6971 }
6972 if (ops == NULL || o != ZCACHE_ALLOC_RETRY) {
6973 zstack_push(&stack, o);
6974 }
6975 }
6976
6977 if (stack.z_count == count) {
6978 break;
6979 }
6980
6981 disable_preemption();
6982 cpu = cpu_number();
6983 }
6984
6985 ZALLOC_LOG(zone, stack.z_head, stack.z_count);
6986
6987 return stack;
6988 }
6989
6990 zstack_t
zalloc_n(zone_id_t zid,uint32_t count,zalloc_flags_t flags)6991 zalloc_n(zone_id_t zid, uint32_t count, zalloc_flags_t flags)
6992 {
6993 return zcache_alloc_n_ext(zid, count, flags, NULL);
6994 }
6995
zstack_t(zcache_alloc_n)6996 zstack_t
6997 (zcache_alloc_n)(
6998 zone_id_t zid,
6999 uint32_t count,
7000 zalloc_flags_t flags,
7001 zone_cache_ops_t ops)
7002 {
7003 __builtin_assume(ops != NULL);
7004 return zcache_alloc_n_ext(zid, count, flags, ops);
7005 }
7006
7007 __attribute__((always_inline))
7008 void *
zalloc(zone_t zov)7009 zalloc(zone_t zov)
7010 {
7011 return zalloc_flags(zov, Z_WAITOK);
7012 }
7013
7014 __attribute__((always_inline))
7015 void *
zalloc_noblock(zone_t zov)7016 zalloc_noblock(zone_t zov)
7017 {
7018 return zalloc_flags(zov, Z_NOWAIT);
7019 }
7020
7021 void *
7022 (zalloc_flags)(zone_t zov, zalloc_flags_t flags)
7023 {
7024 zone_t zone = zov->z_self;
7025 zone_stats_t zstats = zov->z_stats;
7026
7027 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7028 assert(!zone->z_percpu && !zone->z_permanent);
7029 return zalloc_ext(zone, zstats, flags).addr;
7030 }
7031
7032 __attribute__((always_inline))
7033 void *
7034 (zalloc_id)(zone_id_t zid, zalloc_flags_t flags)
7035 {
7036 return (zalloc_flags)(zone_by_id(zid), flags);
7037 }
7038
7039 void *
7040 (zalloc_ro)(zone_id_t zid, zalloc_flags_t flags)
7041 {
7042 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7043 zone_t zone = zone_by_id(zid);
7044 zone_stats_t zstats = zone->z_stats;
7045 struct kalloc_result kr;
7046
7047 kr = zalloc_ext(zone, zstats, flags);
7048 #if ZSECURITY_CONFIG(READ_ONLY)
7049 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
7050 if (kr.addr) {
7051 zone_require_ro(zid, kr.size, kr.addr);
7052 }
7053 #endif
7054 return kr.addr;
7055 }
7056
7057 #if ZSECURITY_CONFIG(READ_ONLY)
7058
7059 __attribute__((always_inline))
7060 static bool
from_current_stack(vm_offset_t addr,vm_size_t size)7061 from_current_stack(vm_offset_t addr, vm_size_t size)
7062 {
7063 vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
7064 vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
7065
7066 addr = vm_memtag_canonicalize_address(addr);
7067
7068 return (addr >= start) && (addr + size < end);
7069 }
7070
7071 /*
7072 * Check if an address is from const memory i.e TEXT or DATA CONST segements
7073 * or the SECURITY_READ_ONLY_LATE section.
7074 */
7075 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
7076 __attribute__((always_inline))
7077 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)7078 from_const_memory(const vm_offset_t addr, vm_size_t size)
7079 {
7080 return rorgn_contains(addr, size, true);
7081 }
7082 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
7083 __attribute__((always_inline))
7084 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)7085 from_const_memory(const vm_offset_t addr, vm_size_t size)
7086 {
7087 #pragma unused(addr, size)
7088 return true;
7089 }
7090 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
7091
7092 __abortlike
7093 static void
zalloc_ro_mut_validation_panic(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)7094 zalloc_ro_mut_validation_panic(zone_id_t zid, void *elem,
7095 const vm_offset_t src, vm_size_t src_size)
7096 {
7097 vm_offset_t stack_start = (vm_offset_t)__builtin_frame_address(0);
7098 vm_offset_t stack_end = (stack_start + kernel_stack_size - 1) & -kernel_stack_size;
7099 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
7100 extern vm_offset_t rorgn_begin;
7101 extern vm_offset_t rorgn_end;
7102 #else
7103 vm_offset_t const rorgn_begin = 0;
7104 vm_offset_t const rorgn_end = 0;
7105 #endif
7106
7107 if (from_ro_map(src, src_size)) {
7108 zone_t src_zone = &zone_array[zone_index_from_ptr((void *)src)];
7109 zone_t dst_zone = &zone_array[zid];
7110 panic("zalloc_ro_mut failed: source (%p) not from same zone as dst (%p)"
7111 " (expected: %s, actual: %s", (void *)src, elem, src_zone->z_name,
7112 dst_zone->z_name);
7113 }
7114
7115 panic("zalloc_ro_mut failed: source (%p, phys %p) not from RO zone map (%p - %p), "
7116 "current stack (%p - %p) or const memory (phys %p - %p)",
7117 (void *)src, (void*)kvtophys(src),
7118 (void *)zone_info.zi_ro_range.min_address,
7119 (void *)zone_info.zi_ro_range.max_address,
7120 (void *)stack_start, (void *)stack_end,
7121 (void *)rorgn_begin, (void *)rorgn_end);
7122 }
7123
7124 __attribute__((always_inline))
7125 static void
zalloc_ro_mut_validate_src(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)7126 zalloc_ro_mut_validate_src(zone_id_t zid, void *elem,
7127 const vm_offset_t src, vm_size_t src_size)
7128 {
7129 if (from_current_stack(src, src_size) ||
7130 (from_ro_map(src, src_size) &&
7131 zid == zone_index_from_ptr((void *)src)) ||
7132 from_const_memory(src, src_size)) {
7133 return;
7134 }
7135 zalloc_ro_mut_validation_panic(zid, elem, src, src_size);
7136 }
7137
7138 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7139
7140 __attribute__((noinline))
7141 void
zalloc_ro_mut(zone_id_t zid,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)7142 zalloc_ro_mut(zone_id_t zid, void *elem, vm_offset_t offset,
7143 const void *new_data, vm_size_t new_data_size)
7144 {
7145 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7146
7147 #if ZSECURITY_CONFIG(READ_ONLY)
7148 bool skip_src_check = false;
7149
7150 /*
7151 * The OSEntitlements RO-zone is a little differently treated. For more
7152 * information: rdar://100518485.
7153 */
7154 if (zid == ZONE_ID_AMFI_OSENTITLEMENTS) {
7155 code_signing_config_t cs_config = 0;
7156
7157 code_signing_configuration(NULL, &cs_config);
7158 if (cs_config & CS_CONFIG_CSM_ENABLED) {
7159 skip_src_check = true;
7160 }
7161 }
7162
7163 if (skip_src_check == false) {
7164 zalloc_ro_mut_validate_src(zid, elem, (vm_offset_t)new_data,
7165 new_data_size);
7166 }
7167 pmap_ro_zone_memcpy(zid, (vm_offset_t) elem, offset,
7168 (vm_offset_t) new_data, new_data_size);
7169 #else
7170 (void)zid;
7171 memcpy((void *)((uintptr_t)elem + offset), new_data, new_data_size);
7172 #endif
7173 }
7174
7175 __attribute__((noinline))
7176 uint64_t
zalloc_ro_mut_atomic(zone_id_t zid,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)7177 zalloc_ro_mut_atomic(zone_id_t zid, void *elem, vm_offset_t offset,
7178 zro_atomic_op_t op, uint64_t value)
7179 {
7180 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7181
7182 #if ZSECURITY_CONFIG(READ_ONLY)
7183 value = pmap_ro_zone_atomic_op(zid, (vm_offset_t)elem, offset, op, value);
7184 #else
7185 (void)zid;
7186 value = __zalloc_ro_mut_atomic((vm_offset_t)elem + offset, op, value);
7187 #endif
7188 return value;
7189 }
7190
7191 void
zalloc_ro_clear(zone_id_t zid,void * elem,vm_offset_t offset,vm_size_t size)7192 zalloc_ro_clear(zone_id_t zid, void *elem, vm_offset_t offset, vm_size_t size)
7193 {
7194 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7195 #if ZSECURITY_CONFIG(READ_ONLY)
7196 pmap_ro_zone_bzero(zid, (vm_offset_t)elem, offset, size);
7197 #else
7198 (void)zid;
7199 bzero((void *)((uintptr_t)elem + offset), size);
7200 #endif
7201 }
7202
7203 /*
7204 * This function will run in the PPL and needs to be robust
7205 * against an attacker with arbitrary kernel write.
7206 */
7207
7208 #if ZSECURITY_CONFIG(READ_ONLY)
7209
7210 __abortlike
7211 static void
zone_id_require_ro_panic(zone_id_t zid,void * addr)7212 zone_id_require_ro_panic(zone_id_t zid, void *addr)
7213 {
7214 struct zone_size_params p = zone_ro_size_params[zid];
7215 vm_offset_t elem = (vm_offset_t)addr;
7216 uint32_t zindex;
7217 zone_t other;
7218 zone_t zone = &zone_array[zid];
7219
7220 if (!from_ro_map(addr, 1)) {
7221 panic("zone_require_ro failed: address not in a ro zone (addr: %p)", addr);
7222 }
7223
7224 if (!Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic)) {
7225 panic("zone_require_ro failed: element improperly aligned (addr: %p)", addr);
7226 }
7227
7228 zindex = zone_index_from_ptr(addr);
7229 other = &zone_array[zindex];
7230 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
7231 panic("zone_require_ro failed: invalid zone index %d "
7232 "(addr: %p, expected: %s%s)", zindex,
7233 addr, zone_heap_name(zone), zone->z_name);
7234 } else {
7235 panic("zone_require_ro failed: address in unexpected zone id %d (%s%s) "
7236 "(addr: %p, expected: %s%s)",
7237 zindex, zone_heap_name(other), other->z_name,
7238 addr, zone_heap_name(zone), zone->z_name);
7239 }
7240 }
7241
7242 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7243
7244 __attribute__((always_inline))
7245 void
zone_require_ro(zone_id_t zid,vm_size_t elem_size __unused,void * addr)7246 zone_require_ro(zone_id_t zid, vm_size_t elem_size __unused, void *addr)
7247 {
7248 #if ZSECURITY_CONFIG(READ_ONLY)
7249 struct zone_size_params p = zone_ro_size_params[zid];
7250 vm_offset_t elem = (vm_offset_t)addr;
7251
7252 if (!from_ro_map(addr, 1) ||
7253 !Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic) ||
7254 zid != zone_meta_from_addr(elem)->zm_index) {
7255 zone_id_require_ro_panic(zid, addr);
7256 }
7257 #else
7258 #pragma unused(zid, addr)
7259 #endif
7260 }
7261
7262 void *
7263 (zalloc_percpu)(union zone_or_view zov, zalloc_flags_t flags)
7264 {
7265 zone_t zone = zov.zov_view->zv_zone;
7266 zone_stats_t zstats = zov.zov_view->zv_stats;
7267
7268 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7269 assert(zone->z_percpu);
7270 flags |= Z_PCPU;
7271 return (void *)__zpcpu_mangle(zalloc_ext(zone, zstats, flags).addr);
7272 }
7273
7274 static void *
_zalloc_permanent(zone_t zone,vm_size_t size,vm_offset_t mask)7275 _zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask)
7276 {
7277 struct zone_page_metadata *page_meta;
7278 vm_offset_t offs, addr;
7279 zone_pva_t pva;
7280
7281 assert(ml_get_interrupts_enabled() ||
7282 ml_is_quiescing() ||
7283 debug_mode_active() ||
7284 startup_phase < STARTUP_SUB_EARLY_BOOT);
7285
7286 size = (size + mask) & ~mask;
7287 assert(size <= PAGE_SIZE);
7288
7289 zone_lock(zone);
7290 assert(zone->z_self == zone);
7291
7292 for (;;) {
7293 pva = zone->z_pageq_partial;
7294 while (!zone_pva_is_null(pva)) {
7295 page_meta = zone_pva_to_meta(pva);
7296 if (page_meta->zm_bump + size <= PAGE_SIZE) {
7297 goto found;
7298 }
7299 pva = page_meta->zm_page_next;
7300 }
7301
7302 zone_expand_locked(zone, Z_WAITOK);
7303 }
7304
7305 found:
7306 offs = (uint16_t)((page_meta->zm_bump + mask) & ~mask);
7307 page_meta->zm_bump = (uint16_t)(offs + size);
7308 page_meta->zm_alloc_size += size;
7309 zone->z_elems_free -= size;
7310 zpercpu_get(zone->z_stats)->zs_mem_allocated += size;
7311
7312 if (page_meta->zm_alloc_size >= PAGE_SIZE - sizeof(vm_offset_t)) {
7313 zone_meta_requeue(zone, &zone->z_pageq_full, page_meta);
7314 }
7315
7316 zone_unlock(zone);
7317
7318 if (zone->z_tbi_tag) {
7319 addr = vm_memtag_fixup_ptr(offs + zone_pva_to_addr(pva));
7320 } else {
7321 addr = offs + zone_pva_to_addr(pva);
7322 }
7323
7324 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
7325 return (void *)addr;
7326 }
7327
7328 static void *
_zalloc_permanent_large(size_t size,vm_offset_t mask,vm_tag_t tag)7329 _zalloc_permanent_large(size_t size, vm_offset_t mask, vm_tag_t tag)
7330 {
7331 vm_offset_t addr;
7332
7333 kernel_memory_allocate(kernel_map, &addr, size, mask,
7334 KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, tag);
7335
7336 return (void *)addr;
7337 }
7338
7339 void *
zalloc_permanent_tag(vm_size_t size,vm_offset_t mask,vm_tag_t tag)7340 zalloc_permanent_tag(vm_size_t size, vm_offset_t mask, vm_tag_t tag)
7341 {
7342 if (size <= PAGE_SIZE) {
7343 zone_t zone = &zone_array[ZONE_ID_PERMANENT];
7344 return _zalloc_permanent(zone, size, mask);
7345 }
7346 return _zalloc_permanent_large(size, mask, tag);
7347 }
7348
7349 void *
zalloc_percpu_permanent(vm_size_t size,vm_offset_t mask)7350 zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask)
7351 {
7352 zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT];
7353 return (void *)__zpcpu_mangle(_zalloc_permanent(zone, size, mask));
7354 }
7355
7356 /*! @} */
7357 #endif /* !ZALLOC_TEST */
7358 #pragma mark zone GC / trimming
7359 #if !ZALLOC_TEST
7360
7361 static thread_call_data_t zone_trim_callout;
7362 EVENT_DEFINE(ZONE_EXHAUSTED);
7363
7364 static void
zone_reclaim_chunk(zone_t z,struct zone_page_metadata * meta,uint32_t free_count)7365 zone_reclaim_chunk(
7366 zone_t z,
7367 struct zone_page_metadata *meta,
7368 uint32_t free_count)
7369 {
7370 vm_address_t page_addr;
7371 vm_size_t size_to_free;
7372 uint32_t bitmap_ref;
7373 uint32_t page_count;
7374 zone_security_flags_t zsflags = zone_security_config(z);
7375 bool sequester = !z->z_destroyed;
7376 bool oob_guard = false;
7377
7378 if (zone_submap_is_sequestered(zsflags)) {
7379 /*
7380 * If the entire map is sequestered, we can't return the VA.
7381 * It stays pinned to the zone forever.
7382 */
7383 sequester = true;
7384 }
7385
7386 zone_meta_queue_pop(z, &z->z_pageq_empty);
7387
7388 page_addr = zone_meta_to_addr(meta);
7389 page_count = meta->zm_chunk_len;
7390 oob_guard = meta->zm_guarded;
7391
7392 if (meta->zm_alloc_size) {
7393 zone_metadata_corruption(z, meta, "alloc_size");
7394 }
7395 if (z->z_percpu) {
7396 if (page_count != 1) {
7397 zone_metadata_corruption(z, meta, "page_count");
7398 }
7399 size_to_free = ptoa(z->z_chunk_pages);
7400 zone_remove_wired_pages(z, z->z_chunk_pages);
7401 } else {
7402 if (page_count > z->z_chunk_pages) {
7403 zone_metadata_corruption(z, meta, "page_count");
7404 }
7405 if (page_count < z->z_chunk_pages) {
7406 /* Dequeue non populated VA from z_pageq_va */
7407 zone_meta_remqueue(z, meta + page_count);
7408 }
7409 size_to_free = ptoa(page_count);
7410 zone_remove_wired_pages(z, page_count);
7411 }
7412
7413 zone_counter_sub(z, z_elems_free, free_count);
7414 zone_counter_sub(z, z_elems_avail, free_count);
7415 zone_counter_sub(z, z_wired_empty, page_count);
7416 zone_counter_sub(z, z_wired_cur, page_count);
7417
7418 if (z->z_pcpu_cache == NULL) {
7419 if (z->z_elems_free_min < free_count) {
7420 z->z_elems_free_min = 0;
7421 } else {
7422 z->z_elems_free_min -= free_count;
7423 }
7424 }
7425 if (z->z_elems_free_wma < free_count) {
7426 z->z_elems_free_wma = 0;
7427 } else {
7428 z->z_elems_free_wma -= free_count;
7429 }
7430
7431 bitmap_ref = 0;
7432 if (sequester) {
7433 if (meta->zm_inline_bitmap) {
7434 for (int i = 0; i < meta->zm_chunk_len; i++) {
7435 meta[i].zm_bitmap = 0;
7436 }
7437 } else {
7438 bitmap_ref = meta->zm_bitmap;
7439 meta->zm_bitmap = 0;
7440 }
7441 meta->zm_chunk_len = 0;
7442 } else {
7443 if (!meta->zm_inline_bitmap) {
7444 bitmap_ref = meta->zm_bitmap;
7445 }
7446 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
7447 bzero(meta, sizeof(*meta) * (z->z_chunk_pages + oob_guard));
7448 }
7449
7450 #if CONFIG_ZLEAKS
7451 if (__improbable(zleak_should_disable_for_zone(z) &&
7452 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
7453 thread_call_enter(&zone_leaks_callout);
7454 }
7455 #endif /* CONFIG_ZLEAKS */
7456
7457 zone_unlock(z);
7458
7459 if (bitmap_ref) {
7460 zone_bits_free(bitmap_ref);
7461 }
7462
7463 /* Free the pages for metadata and account for them */
7464 #if KASAN_CLASSIC
7465 if (z->z_percpu) {
7466 for (uint32_t i = 0; i < z->z_chunk_pages; i++) {
7467 kasan_zmem_remove(page_addr + ptoa(i), PAGE_SIZE,
7468 zone_elem_outer_size(z),
7469 zone_elem_outer_offs(z),
7470 zone_elem_redzone(z));
7471 }
7472 } else {
7473 kasan_zmem_remove(page_addr, size_to_free,
7474 zone_elem_outer_size(z),
7475 zone_elem_outer_offs(z),
7476 zone_elem_redzone(z));
7477 }
7478 #endif /* KASAN_CLASSIC */
7479
7480 if (sequester) {
7481 kernel_memory_depopulate(page_addr, size_to_free,
7482 KMA_KOBJECT, VM_KERN_MEMORY_ZONE);
7483 } else {
7484 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_VM);
7485 kmem_free(zone_submap(zsflags), page_addr,
7486 ptoa(z->z_chunk_pages + oob_guard));
7487 if (oob_guard) {
7488 os_atomic_dec(&zone_guard_pages, relaxed);
7489 }
7490 }
7491
7492 thread_yield_to_preemption();
7493
7494 zone_lock(z);
7495
7496 if (sequester) {
7497 zone_meta_queue_push(z, &z->z_pageq_va, meta);
7498 }
7499 }
7500
7501 static void
zone_reclaim_elements(zone_t z,uint16_t n,vm_offset_t * elems)7502 zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems)
7503 {
7504 z_debug_assert(n <= zc_mag_size());
7505
7506 for (uint16_t i = 0; i < n; i++) {
7507 vm_offset_t addr = elems[i];
7508 elems[i] = 0;
7509 zfree_drop(z, addr);
7510 }
7511
7512 z->z_elems_free += n;
7513 }
7514
7515 static void
zcache_reclaim_elements(zone_id_t zid,uint16_t n,vm_offset_t * elems)7516 zcache_reclaim_elements(zone_id_t zid, uint16_t n, vm_offset_t *elems)
7517 {
7518 z_debug_assert(n <= zc_mag_size());
7519 zone_cache_ops_t ops = zcache_ops[zid];
7520
7521 for (uint16_t i = 0; i < n; i++) {
7522 vm_offset_t addr = elems[i];
7523 elems[i] = 0;
7524 addr = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)addr);
7525 ops->zc_op_free(zid, (void *)addr);
7526 }
7527
7528 os_atomic_sub(&zone_by_id(zid)->z_elems_avail, n, relaxed);
7529 }
7530
7531 static void
zone_depot_trim(zone_t z,uint32_t target,struct zone_depot * zd)7532 zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd)
7533 {
7534 zpercpu_foreach(zc, z->z_pcpu_cache) {
7535 zone_depot_lock(zc);
7536
7537 if (zc->zc_depot.zd_full > (target + 1) / 2) {
7538 uint32_t n = zc->zc_depot.zd_full - (target + 1) / 2;
7539 zone_depot_move_full(zd, &zc->zc_depot, n, NULL);
7540 }
7541
7542 if (zc->zc_depot.zd_empty > target / 2) {
7543 uint32_t n = zc->zc_depot.zd_empty - target / 2;
7544 zone_depot_move_empty(zd, &zc->zc_depot, n, NULL);
7545 }
7546
7547 zone_depot_unlock(zc);
7548 }
7549 }
7550
7551 __enum_decl(zone_reclaim_mode_t, uint32_t, {
7552 ZONE_RECLAIM_TRIM,
7553 ZONE_RECLAIM_DRAIN,
7554 ZONE_RECLAIM_DESTROY,
7555 });
7556
7557 static void
zone_reclaim_pcpu(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)7558 zone_reclaim_pcpu(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
7559 {
7560 uint32_t depot_max = 0;
7561 bool cleanup = mode != ZONE_RECLAIM_TRIM;
7562
7563 if (z->z_depot_cleanup) {
7564 z->z_depot_cleanup = false;
7565 depot_max = z->z_depot_size;
7566 cleanup = true;
7567 }
7568
7569 if (cleanup) {
7570 zone_depot_trim(z, depot_max, zd);
7571 }
7572
7573 if (mode == ZONE_RECLAIM_DESTROY) {
7574 zpercpu_foreach(zc, z->z_pcpu_cache) {
7575 zone_reclaim_elements(z, zc->zc_alloc_cur,
7576 zc->zc_alloc_elems);
7577 zone_reclaim_elements(z, zc->zc_free_cur,
7578 zc->zc_free_elems);
7579 zc->zc_alloc_cur = zc->zc_free_cur = 0;
7580 }
7581
7582 z->z_recirc_empty_min = 0;
7583 z->z_recirc_empty_wma = 0;
7584 z->z_recirc_full_min = 0;
7585 z->z_recirc_full_wma = 0;
7586 z->z_recirc_cont_cur = 0;
7587 z->z_recirc_cont_wma = 0;
7588 }
7589 }
7590
7591 static void
zone_reclaim_recirc_drain(zone_t z,struct zone_depot * zd)7592 zone_reclaim_recirc_drain(zone_t z, struct zone_depot *zd)
7593 {
7594 assert(zd->zd_empty == 0);
7595 assert(zd->zd_full == 0);
7596
7597 zone_recirc_lock_nopreempt(z);
7598
7599 *zd = z->z_recirc;
7600 if (zd->zd_full == 0) {
7601 zd->zd_tail = &zd->zd_head;
7602 }
7603 zone_depot_init(&z->z_recirc);
7604 z->z_recirc_empty_min = 0;
7605 z->z_recirc_empty_wma = 0;
7606 z->z_recirc_full_min = 0;
7607 z->z_recirc_full_wma = 0;
7608
7609 zone_recirc_unlock_nopreempt(z);
7610 }
7611
7612 static void
zone_reclaim_recirc_trim(zone_t z,struct zone_depot * zd)7613 zone_reclaim_recirc_trim(zone_t z, struct zone_depot *zd)
7614 {
7615 for (;;) {
7616 uint32_t budget = zc_free_batch_size();
7617 uint32_t count;
7618 bool done = true;
7619
7620 zone_recirc_lock_nopreempt(z);
7621 count = MIN(z->z_recirc_empty_wma / Z_WMA_UNIT,
7622 z->z_recirc_empty_min);
7623 assert(count <= z->z_recirc.zd_empty);
7624
7625 if (count > budget) {
7626 count = budget;
7627 done = false;
7628 }
7629 if (count) {
7630 budget -= count;
7631 zone_depot_move_empty(zd, &z->z_recirc, count, NULL);
7632 z->z_recirc_empty_min -= count;
7633 z->z_recirc_empty_wma -= count * Z_WMA_UNIT;
7634 }
7635
7636 count = MIN(z->z_recirc_full_wma / Z_WMA_UNIT,
7637 z->z_recirc_full_min);
7638 assert(count <= z->z_recirc.zd_full);
7639
7640 if (count > budget) {
7641 count = budget;
7642 done = false;
7643 }
7644 if (count) {
7645 zone_depot_move_full(zd, &z->z_recirc, count, NULL);
7646 z->z_recirc_full_min -= count;
7647 z->z_recirc_full_wma -= count * Z_WMA_UNIT;
7648 }
7649
7650 zone_recirc_unlock_nopreempt(z);
7651
7652 if (done) {
7653 return;
7654 }
7655
7656 /*
7657 * If the number of magazines to reclaim is too large,
7658 * we might be keeping preemption disabled for too long.
7659 *
7660 * Drop and retake the lock to allow for preemption to occur.
7661 */
7662 zone_unlock(z);
7663 zone_lock(z);
7664 }
7665 }
7666
7667 /*!
7668 * @function zone_reclaim
7669 *
7670 * @brief
7671 * Drains or trim the zone.
7672 *
7673 * @discussion
7674 * Draining the zone will free it from all its elements.
7675 *
7676 * Trimming the zone tries to respect the working set size, and avoids draining
7677 * the depot when it's not necessary.
7678 *
7679 * @param z The zone to reclaim from
7680 * @param mode The purpose of this reclaim.
7681 */
7682 static void
zone_reclaim(zone_t z,zone_reclaim_mode_t mode)7683 zone_reclaim(zone_t z, zone_reclaim_mode_t mode)
7684 {
7685 struct zone_depot zd;
7686
7687 zone_depot_init(&zd);
7688
7689 zone_lock(z);
7690
7691 if (mode == ZONE_RECLAIM_DESTROY) {
7692 if (!z->z_destructible || z->z_elems_rsv) {
7693 panic("zdestroy: Zone %s%s isn't destructible",
7694 zone_heap_name(z), z->z_name);
7695 }
7696
7697 if (!z->z_self || z->z_expander ||
7698 z->z_async_refilling || z->z_expanding_wait) {
7699 panic("zdestroy: Zone %s%s in an invalid state for destruction",
7700 zone_heap_name(z), z->z_name);
7701 }
7702
7703 #if !KASAN_CLASSIC
7704 /*
7705 * Unset the valid bit. We'll hit an assert failure on further
7706 * operations on this zone, until zinit() is called again.
7707 *
7708 * Leave the zone valid for KASan as we will see zfree's on
7709 * quarantined free elements even after the zone is destroyed.
7710 */
7711 z->z_self = NULL;
7712 #endif
7713 z->z_destroyed = true;
7714 } else if (z->z_destroyed) {
7715 return zone_unlock(z);
7716 } else if (zone_count_free(z) <= z->z_elems_rsv) {
7717 /* If the zone is under its reserve level, leave it alone. */
7718 return zone_unlock(z);
7719 }
7720
7721 if (z->z_pcpu_cache) {
7722 zone_magazine_t mag;
7723 uint32_t freed = 0;
7724
7725 /*
7726 * This is all done with the zone lock held on purpose.
7727 * The work here is O(ncpu), which should still be short.
7728 *
7729 * We need to keep the lock held until we have reclaimed
7730 * at least a few magazines, otherwise if the zone has no
7731 * free elements outside of the depot, a thread performing
7732 * a concurrent allocatiuon could try to grow the zone
7733 * while we're trying to drain it.
7734 */
7735 if (mode == ZONE_RECLAIM_TRIM) {
7736 zone_reclaim_recirc_trim(z, &zd);
7737 } else {
7738 zone_reclaim_recirc_drain(z, &zd);
7739 }
7740 zone_reclaim_pcpu(z, mode, &zd);
7741
7742 if (z->z_chunk_elems) {
7743 zone_cache_t cache = zpercpu_get_cpu(z->z_pcpu_cache, 0);
7744 smr_t smr = zone_cache_smr(cache);
7745
7746 while (zd.zd_full) {
7747 mag = zone_depot_pop_head_full(&zd, NULL);
7748 if (smr) {
7749 smr_wait(smr, mag->zm_seq);
7750 zalloc_cached_reuse_smr(z, cache, mag);
7751 freed += zc_mag_size();
7752 }
7753 zone_reclaim_elements(z, zc_mag_size(),
7754 mag->zm_elems);
7755 zone_depot_insert_head_empty(&zd, mag);
7756
7757 freed += zc_mag_size();
7758 if (freed >= zc_free_batch_size()) {
7759 zone_unlock(z);
7760 zone_magazine_free_list(&zd);
7761 thread_yield_to_preemption();
7762 zone_lock(z);
7763 freed = 0;
7764 }
7765 }
7766 } else {
7767 zone_id_t zid = zone_index(z);
7768
7769 zone_unlock(z);
7770
7771 assert(zid <= ZONE_ID__FIRST_DYNAMIC && zcache_ops[zid]);
7772
7773 while (zd.zd_full) {
7774 mag = zone_depot_pop_head_full(&zd, NULL);
7775 zcache_reclaim_elements(zid, zc_mag_size(),
7776 mag->zm_elems);
7777 zone_magazine_free(mag);
7778 }
7779
7780 goto cleanup;
7781 }
7782 }
7783
7784 while (!zone_pva_is_null(z->z_pageq_empty)) {
7785 struct zone_page_metadata *meta;
7786 uint32_t count, limit = z->z_elems_rsv * 5 / 4;
7787
7788 if (mode == ZONE_RECLAIM_TRIM && z->z_pcpu_cache == NULL) {
7789 limit = MAX(limit, z->z_elems_free -
7790 MIN(z->z_elems_free_min, z->z_elems_free_wma));
7791 }
7792
7793 meta = zone_pva_to_meta(z->z_pageq_empty);
7794 count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_outer_size(z);
7795
7796 if (zone_count_free(z) - count < limit) {
7797 break;
7798 }
7799
7800 zone_reclaim_chunk(z, meta, count);
7801 }
7802
7803 zone_unlock(z);
7804
7805 cleanup:
7806 zone_magazine_free_list(&zd);
7807 }
7808
7809 void
zone_drain(zone_t zone)7810 zone_drain(zone_t zone)
7811 {
7812 current_thread()->options |= TH_OPT_ZONE_PRIV;
7813 lck_mtx_lock(&zone_gc_lock);
7814 zone_reclaim(zone, ZONE_RECLAIM_DRAIN);
7815 lck_mtx_unlock(&zone_gc_lock);
7816 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7817 }
7818
7819 void
zcache_drain(zone_id_t zid)7820 zcache_drain(zone_id_t zid)
7821 {
7822 zone_drain(zone_by_id(zid));
7823 }
7824
7825 static void
zone_reclaim_all(zone_reclaim_mode_t mode)7826 zone_reclaim_all(zone_reclaim_mode_t mode)
7827 {
7828 /*
7829 * Start with zcaches, so that they flow into the regular zones.
7830 *
7831 * Then the zones with VA sequester since depopulating
7832 * pages will not need to allocate vm map entries for holes,
7833 * which will give memory back to the system faster.
7834 */
7835 for (zone_id_t zid = ZONE_ID__LAST_RO + 1; zid < ZONE_ID__FIRST_DYNAMIC; zid++) {
7836 zone_t z = zone_by_id(zid);
7837
7838 if (z->z_self && z->z_chunk_elems == 0) {
7839 zone_reclaim(z, mode);
7840 }
7841 }
7842 zone_index_foreach(zid) {
7843 zone_t z = zone_by_id(zid);
7844
7845 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7846 continue;
7847 }
7848 if (zone_submap_is_sequestered(zone_security_array[zid]) &&
7849 z->collectable) {
7850 zone_reclaim(z, mode);
7851 }
7852 }
7853
7854 zone_index_foreach(zid) {
7855 zone_t z = zone_by_id(zid);
7856
7857 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7858 continue;
7859 }
7860 if (!zone_submap_is_sequestered(zone_security_array[zid]) &&
7861 z->collectable) {
7862 zone_reclaim(z, mode);
7863 }
7864 }
7865
7866 zone_reclaim(zc_magazine_zone, mode);
7867 }
7868
7869 void
zone_userspace_reboot_checks(void)7870 zone_userspace_reboot_checks(void)
7871 {
7872 vm_size_t label_zone_size = zone_size_allocated(ipc_service_port_label_zone);
7873 if (label_zone_size != 0) {
7874 panic("Zone %s should be empty upon userspace reboot. Actual size: %lu.",
7875 ipc_service_port_label_zone->z_name, (unsigned long)label_zone_size);
7876 }
7877 }
7878
7879 void
zone_gc(zone_gc_level_t level)7880 zone_gc(zone_gc_level_t level)
7881 {
7882 zone_reclaim_mode_t mode;
7883 zone_t largest_zone = NULL;
7884
7885 switch (level) {
7886 case ZONE_GC_TRIM:
7887 mode = ZONE_RECLAIM_TRIM;
7888 break;
7889 case ZONE_GC_DRAIN:
7890 mode = ZONE_RECLAIM_DRAIN;
7891 break;
7892 case ZONE_GC_JETSAM:
7893 largest_zone = kill_process_in_largest_zone();
7894 mode = ZONE_RECLAIM_TRIM;
7895 break;
7896 }
7897
7898 current_thread()->options |= TH_OPT_ZONE_PRIV;
7899 lck_mtx_lock(&zone_gc_lock);
7900
7901 zone_reclaim_all(mode);
7902
7903 if (level == ZONE_GC_JETSAM && zone_map_nearing_exhaustion()) {
7904 /*
7905 * If we possibly killed a process, but we're still critical,
7906 * we need to drain harder.
7907 */
7908 zone_reclaim(largest_zone, ZONE_RECLAIM_DRAIN);
7909 zone_reclaim_all(ZONE_RECLAIM_DRAIN);
7910 }
7911
7912 lck_mtx_unlock(&zone_gc_lock);
7913 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7914 }
7915
7916 void
zone_gc_trim(void)7917 zone_gc_trim(void)
7918 {
7919 zone_gc(ZONE_GC_TRIM);
7920 }
7921
7922 void
zone_gc_drain(void)7923 zone_gc_drain(void)
7924 {
7925 zone_gc(ZONE_GC_DRAIN);
7926 }
7927
7928 static bool
zone_trim_needed(zone_t z)7929 zone_trim_needed(zone_t z)
7930 {
7931 if (z->z_depot_cleanup) {
7932 return true;
7933 }
7934
7935 if (z->z_async_refilling) {
7936 /* Don't fight with refill */
7937 return false;
7938 }
7939
7940 if (z->z_pcpu_cache) {
7941 uint32_t e_n, f_n;
7942
7943 e_n = MIN(z->z_recirc_empty_wma, z->z_recirc_empty_min * Z_WMA_UNIT);
7944 f_n = MIN(z->z_recirc_full_wma, z->z_recirc_full_min * Z_WMA_UNIT);
7945
7946 if (e_n > zc_autotrim_buckets() * Z_WMA_UNIT) {
7947 return true;
7948 }
7949
7950 if (f_n * zc_mag_size() > z->z_elems_rsv * Z_WMA_UNIT &&
7951 f_n * zc_mag_size() * zone_elem_inner_size(z) >
7952 zc_autotrim_size() * Z_WMA_UNIT) {
7953 return true;
7954 }
7955
7956 return false;
7957 }
7958
7959 if (!zone_pva_is_null(z->z_pageq_empty)) {
7960 uint32_t n;
7961
7962 n = MIN(z->z_elems_free_wma, z->z_elems_free_min);
7963
7964 return n >= z->z_elems_rsv + z->z_chunk_elems;
7965 }
7966
7967 return false;
7968 }
7969
7970 static void
zone_trim_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)7971 zone_trim_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
7972 {
7973 current_thread()->options |= TH_OPT_ZONE_PRIV;
7974
7975 zone_foreach(z) {
7976 if (!z->collectable || z == zc_magazine_zone) {
7977 continue;
7978 }
7979
7980 if (zone_trim_needed(z)) {
7981 lck_mtx_lock(&zone_gc_lock);
7982 zone_reclaim(z, ZONE_RECLAIM_TRIM);
7983 lck_mtx_unlock(&zone_gc_lock);
7984 }
7985 }
7986
7987 if (zone_trim_needed(zc_magazine_zone)) {
7988 lck_mtx_lock(&zone_gc_lock);
7989 zone_reclaim(zc_magazine_zone, ZONE_RECLAIM_TRIM);
7990 lck_mtx_unlock(&zone_gc_lock);
7991 }
7992
7993 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7994 }
7995
7996 void
compute_zone_working_set_size(__unused void * param)7997 compute_zone_working_set_size(__unused void *param)
7998 {
7999 uint32_t zc_auto = zc_enable_level();
8000 bool needs_trim = false;
8001
8002 /*
8003 * Keep zone caching disabled until the first proc is made.
8004 */
8005 if (__improbable(zone_caching_disabled < 0)) {
8006 return;
8007 }
8008
8009 zone_caching_disabled = vm_pool_low();
8010
8011 if (os_mul_overflow(zc_auto, Z_WMA_UNIT, &zc_auto)) {
8012 zc_auto = 0;
8013 }
8014
8015 zone_foreach(z) {
8016 uint32_t old, wma, cur;
8017 bool needs_caching = false;
8018
8019 if (z->z_self != z) {
8020 continue;
8021 }
8022
8023 zone_lock(z);
8024
8025 zone_recirc_lock_nopreempt(z);
8026
8027 if (z->z_pcpu_cache) {
8028 wma = Z_WMA_MIX(z->z_recirc_empty_wma, z->z_recirc_empty_min);
8029 z->z_recirc_empty_min = z->z_recirc.zd_empty;
8030 z->z_recirc_empty_wma = wma;
8031 } else {
8032 wma = Z_WMA_MIX(z->z_elems_free_wma, z->z_elems_free_min);
8033 z->z_elems_free_min = z->z_elems_free;
8034 z->z_elems_free_wma = wma;
8035 }
8036
8037 wma = Z_WMA_MIX(z->z_recirc_full_wma, z->z_recirc_full_min);
8038 z->z_recirc_full_min = z->z_recirc.zd_full;
8039 z->z_recirc_full_wma = wma;
8040
8041 /* fixed point decimal of contentions per second */
8042 old = z->z_recirc_cont_wma;
8043 cur = z->z_recirc_cont_cur * Z_WMA_UNIT /
8044 (zpercpu_count() * ZONE_WSS_UPDATE_PERIOD);
8045 cur = (3 * old + cur) / 4;
8046 zone_recirc_unlock_nopreempt(z);
8047
8048 if (z->z_pcpu_cache) {
8049 uint16_t size = z->z_depot_size;
8050
8051 if (zone_exhausted(z)) {
8052 if (z->z_depot_size) {
8053 z->z_depot_size = 0;
8054 z->z_depot_cleanup = true;
8055 }
8056 } else if (size < z->z_depot_limit && cur > zc_grow_level()) {
8057 /*
8058 * lose history on purpose now
8059 * that we just grew, to give
8060 * the sytem time to adjust.
8061 */
8062 cur = (zc_grow_level() + zc_shrink_level()) / 2;
8063 size = size ? (3 * size + 2) / 2 : 2;
8064 z->z_depot_size = MIN(z->z_depot_limit, size);
8065 } else if (size > 0 && cur <= zc_shrink_level()) {
8066 /*
8067 * lose history on purpose now
8068 * that we just shrunk, to give
8069 * the sytem time to adjust.
8070 */
8071 cur = (zc_grow_level() + zc_shrink_level()) / 2;
8072 z->z_depot_size = size - 1;
8073 z->z_depot_cleanup = true;
8074 }
8075 } else if (!z->z_nocaching && !zone_exhaustible(z) && zc_auto &&
8076 old >= zc_auto && cur >= zc_auto) {
8077 needs_caching = true;
8078 }
8079
8080 z->z_recirc_cont_wma = cur;
8081 z->z_recirc_cont_cur = 0;
8082
8083 if (!needs_trim && zone_trim_needed(z)) {
8084 needs_trim = true;
8085 }
8086
8087 zone_unlock(z);
8088
8089 if (needs_caching) {
8090 zone_enable_caching(z);
8091 }
8092 }
8093
8094 if (needs_trim) {
8095 thread_call_enter(&zone_trim_callout);
8096 }
8097 }
8098
8099 #endif /* !ZALLOC_TEST */
8100 #pragma mark vm integration, MIG routines
8101 #if !ZALLOC_TEST
8102
8103 extern unsigned int stack_total;
8104 #if defined (__x86_64__)
8105 extern unsigned int inuse_ptepages_count;
8106 #endif
8107
8108 static const char *
panic_print_get_typename(kalloc_type_views_t cur,kalloc_type_views_t * next,bool is_kt_var)8109 panic_print_get_typename(kalloc_type_views_t cur, kalloc_type_views_t *next,
8110 bool is_kt_var)
8111 {
8112 if (is_kt_var) {
8113 next->ktv_var = (kalloc_type_var_view_t) cur.ktv_var->kt_next;
8114 return cur.ktv_var->kt_name;
8115 } else {
8116 next->ktv_fixed = (kalloc_type_view_t) cur.ktv_fixed->kt_zv.zv_next;
8117 return cur.ktv_fixed->kt_zv.zv_name;
8118 }
8119 }
8120
8121 static void
panic_print_types_in_zone(zone_t z,const char * debug_str)8122 panic_print_types_in_zone(zone_t z, const char* debug_str)
8123 {
8124 kalloc_type_views_t kt_cur = {};
8125 const char *prev_type = "";
8126 size_t skip_over_site = sizeof("site.") - 1;
8127 zone_security_flags_t zsflags = zone_security_config(z);
8128 bool is_kt_var = false;
8129
8130 if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8131 uint32_t heap_id = KT_VAR_PTR_HEAP0 + ((zone_index(z) -
8132 kalloc_type_heap_array[KT_VAR_PTR_HEAP0].kh_zstart) / KHEAP_NUM_ZONES);
8133 kt_cur.ktv_var = kalloc_type_heap_array[heap_id].kt_views;
8134 is_kt_var = true;
8135 } else {
8136 kt_cur.ktv_fixed = (kalloc_type_view_t) z->z_views;
8137 }
8138
8139 paniclog_append_noflush("kalloc %s in zone, %s (%s):\n",
8140 is_kt_var? "type arrays" : "types", debug_str, z->z_name);
8141
8142 while (kt_cur.ktv_fixed) {
8143 kalloc_type_views_t kt_next = {};
8144 const char *typename = panic_print_get_typename(kt_cur, &kt_next,
8145 is_kt_var) + skip_over_site;
8146 if (strcmp(typename, prev_type) != 0) {
8147 paniclog_append_noflush("\t%-50s\n", typename);
8148 prev_type = typename;
8149 }
8150 kt_cur = kt_next;
8151 }
8152 paniclog_append_noflush("\n");
8153 }
8154
8155 static void
panic_display_kalloc_types(void)8156 panic_display_kalloc_types(void)
8157 {
8158 if (kalloc_type_src_zone) {
8159 panic_print_types_in_zone(kalloc_type_src_zone, "addr belongs to");
8160 }
8161 if (kalloc_type_dst_zone) {
8162 panic_print_types_in_zone(kalloc_type_dst_zone,
8163 "addr is being freed to");
8164 }
8165 }
8166
8167 static void
zone_find_n_largest(const uint32_t n,zone_t * largest_zones,uint64_t * zone_size)8168 zone_find_n_largest(const uint32_t n, zone_t *largest_zones,
8169 uint64_t *zone_size)
8170 {
8171 zone_index_foreach(zid) {
8172 zone_t z = &zone_array[zid];
8173 vm_offset_t size = zone_size_wired(z);
8174
8175 if (zid == ZONE_ID_VM_PAGES) {
8176 continue;
8177 }
8178 for (uint32_t i = 0; i < n; i++) {
8179 if (size > zone_size[i]) {
8180 largest_zones[i] = z;
8181 zone_size[i] = size;
8182 break;
8183 }
8184 }
8185 }
8186 }
8187
8188 #define NUM_LARGEST_ZONES 5
8189 static void
panic_display_largest_zones(void)8190 panic_display_largest_zones(void)
8191 {
8192 zone_t largest_zones[NUM_LARGEST_ZONES] = { NULL };
8193 uint64_t largest_size[NUM_LARGEST_ZONES] = { 0 };
8194
8195 zone_find_n_largest(NUM_LARGEST_ZONES, (zone_t *) &largest_zones,
8196 (uint64_t *) &largest_size);
8197
8198 paniclog_append_noflush("Largest zones:\n%-28s %10s %10s\n",
8199 "Zone Name", "Cur Size", "Free Size");
8200 for (uint32_t i = 0; i < NUM_LARGEST_ZONES; i++) {
8201 zone_t z = largest_zones[i];
8202 paniclog_append_noflush("%-8s%-20s %9u%c %9u%c\n",
8203 zone_heap_name(z), z->z_name,
8204 mach_vm_size_pretty(largest_size[i]),
8205 mach_vm_size_unit(largest_size[i]),
8206 mach_vm_size_pretty(zone_size_free(z)),
8207 mach_vm_size_unit(zone_size_free(z)));
8208 }
8209 }
8210
8211 static void
panic_display_zprint(void)8212 panic_display_zprint(void)
8213 {
8214 panic_display_largest_zones();
8215 paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
8216 (uintptr_t)(kernel_stack_size * stack_total));
8217 #if defined (__x86_64__)
8218 paniclog_append_noflush("%-20s %10lu\n", "PageTables",
8219 (uintptr_t)ptoa(inuse_ptepages_count));
8220 #endif
8221 paniclog_append_noflush("%-20s %10llu\n", "Kalloc.Large",
8222 counter_load(&kalloc_large_total));
8223
8224 if (panic_kext_memory_info) {
8225 mach_memory_info_t *mem_info = panic_kext_memory_info;
8226
8227 paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
8228 for (uint32_t i = 0; i < panic_kext_memory_size / sizeof(mem_info[0]); i++) {
8229 if ((mem_info[i].flags & VM_KERN_SITE_TYPE) != VM_KERN_SITE_KMOD) {
8230 continue;
8231 }
8232 if (mem_info[i].size > (1024 * 1024)) {
8233 paniclog_append_noflush("%-5lld %10lld\n",
8234 mem_info[i].site, mem_info[i].size);
8235 }
8236 }
8237 }
8238 }
8239
8240 static void
panic_display_zone_info(void)8241 panic_display_zone_info(void)
8242 {
8243 paniclog_append_noflush("Zone info:\n");
8244 paniclog_append_noflush(" Zone map: %p - %p\n",
8245 (void *)zone_info.zi_map_range.min_address,
8246 (void *)zone_info.zi_map_range.max_address);
8247 #if CONFIG_PROB_GZALLOC
8248 if (pgz_submap) {
8249 paniclog_append_noflush(" . PGZ : %p - %p\n",
8250 (void *)pgz_submap->min_offset,
8251 (void *)pgz_submap->max_offset);
8252 }
8253 #endif /* CONFIG_PROB_GZALLOC */
8254 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8255 vm_map_t map = zone_submaps[i];
8256
8257 if (map == VM_MAP_NULL) {
8258 continue;
8259 }
8260 paniclog_append_noflush(" . %-6s: %p - %p\n",
8261 zone_submaps_names[i],
8262 (void *)map->min_offset,
8263 (void *)map->max_offset);
8264 }
8265 paniclog_append_noflush(" Metadata: %p - %p\n"
8266 " Bitmaps : %p - %p\n"
8267 " Extra : %p - %p\n"
8268 "\n",
8269 (void *)zone_info.zi_meta_range.min_address,
8270 (void *)zone_info.zi_meta_range.max_address,
8271 (void *)zone_info.zi_bits_range.min_address,
8272 (void *)zone_info.zi_bits_range.max_address,
8273 (void *)zone_info.zi_xtra_range.min_address,
8274 (void *)zone_info.zi_xtra_range.max_address);
8275 }
8276
8277 static void
panic_display_zone_fault(vm_offset_t addr)8278 panic_display_zone_fault(vm_offset_t addr)
8279 {
8280 struct zone_page_metadata meta = { };
8281 vm_map_t map = VM_MAP_NULL;
8282 vm_offset_t oob_offs = 0, size = 0;
8283 int map_idx = -1;
8284 zone_t z = NULL;
8285 const char *kind = "whild deref";
8286 bool oob = false;
8287
8288 /*
8289 * First: look if we bumped into guard pages between submaps
8290 */
8291 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8292 map = zone_submaps[i];
8293 if (map == VM_MAP_NULL) {
8294 continue;
8295 }
8296
8297 if (addr >= map->min_offset && addr < map->max_offset) {
8298 map_idx = i;
8299 break;
8300 }
8301 }
8302
8303 if (map_idx == -1) {
8304 /* this really shouldn't happen, submaps are back to back */
8305 return;
8306 }
8307
8308 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
8309
8310 /*
8311 * Second: look if there's just no metadata at all
8312 */
8313 if (ml_nofault_copy((vm_offset_t)zone_meta_from_addr(addr),
8314 (vm_offset_t)&meta, sizeof(meta)) != sizeof(meta) ||
8315 meta.zm_index == 0 || meta.zm_index >= MAX_ZONES ||
8316 zone_array[meta.zm_index].z_self == NULL) {
8317 paniclog_append_noflush(" Zone : <unknown>\n");
8318 kind = "wild deref, missing or invalid metadata";
8319 } else {
8320 z = &zone_array[meta.zm_index];
8321 paniclog_append_noflush(" Zone : %s%s\n",
8322 zone_heap_name(z), zone_name(z));
8323 if (meta.zm_chunk_len == ZM_PGZ_GUARD) {
8324 kind = "out-of-bounds (high confidence)";
8325 oob = true;
8326 size = zone_element_size((void *)addr,
8327 &z, false, &oob_offs);
8328 } else {
8329 kind = "use-after-free (medium confidence)";
8330 }
8331 }
8332
8333 paniclog_append_noflush(" Address : %p\n", (void *)addr);
8334 if (oob) {
8335 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
8336 (void *)(trunc_page(addr) - (size - oob_offs)),
8337 (void *)trunc_page(addr), (uint32_t)(size - oob_offs));
8338 }
8339 paniclog_append_noflush(" Submap : %s [%p; %p)\n",
8340 zone_submaps_names[map_idx],
8341 (void *)map->min_offset, (void *)map->max_offset);
8342 paniclog_append_noflush(" Kind : %s\n", kind);
8343 if (oob) {
8344 paniclog_append_noflush(" Access : %d byte(s) past\n",
8345 (uint32_t)(addr & PAGE_MASK) + 1);
8346 }
8347 paniclog_append_noflush(" Metadata: zid:%d inl:%d cl:0x%x "
8348 "0x%04x 0x%08x 0x%08x 0x%08x\n",
8349 meta.zm_index, meta.zm_inline_bitmap, meta.zm_chunk_len,
8350 meta.zm_alloc_size, meta.zm_bitmap,
8351 meta.zm_page_next.packed_address,
8352 meta.zm_page_prev.packed_address);
8353 paniclog_append_noflush("\n");
8354 }
8355
8356 void
panic_display_zalloc(void)8357 panic_display_zalloc(void)
8358 {
8359 bool keepsyms = false;
8360
8361 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
8362
8363 panic_display_zone_info();
8364
8365 if (panic_fault_address) {
8366 #if CONFIG_PROB_GZALLOC
8367 if (pgz_owned(panic_fault_address)) {
8368 panic_display_pgz_uaf_info(keepsyms, panic_fault_address);
8369 } else
8370 #endif /* CONFIG_PROB_GZALLOC */
8371 if (zone_maps_owned(panic_fault_address, 1)) {
8372 panic_display_zone_fault(panic_fault_address);
8373 }
8374 }
8375
8376 if (panic_include_zprint) {
8377 panic_display_zprint();
8378 } else if (zone_map_nearing_threshold(ZONE_MAP_EXHAUSTION_PRINT_PANIC)) {
8379 panic_display_largest_zones();
8380 }
8381 #if CONFIG_ZLEAKS
8382 if (zleak_active) {
8383 panic_display_zleaks(keepsyms);
8384 }
8385 #endif
8386 if (panic_include_kalloc_types) {
8387 panic_display_kalloc_types();
8388 }
8389 }
8390
8391 /*
8392 * Creates a vm_map_copy_t to return to the caller of mach_* MIG calls
8393 * requesting zone information.
8394 * Frees unused pages towards the end of the region, and zero'es out unused
8395 * space on the last page.
8396 */
8397 static vm_map_copy_t
create_vm_map_copy(vm_offset_t start_addr,vm_size_t total_size,vm_size_t used_size)8398 create_vm_map_copy(
8399 vm_offset_t start_addr,
8400 vm_size_t total_size,
8401 vm_size_t used_size)
8402 {
8403 kern_return_t kr;
8404 vm_offset_t end_addr;
8405 vm_size_t free_size;
8406 vm_map_copy_t copy;
8407
8408 if (used_size != total_size) {
8409 end_addr = start_addr + used_size;
8410 free_size = total_size - (round_page(end_addr) - start_addr);
8411
8412 if (free_size >= PAGE_SIZE) {
8413 kmem_free(ipc_kernel_map,
8414 round_page(end_addr), free_size);
8415 }
8416 bzero((char *) end_addr, round_page(end_addr) - end_addr);
8417 }
8418
8419 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr,
8420 (vm_map_size_t)used_size, TRUE, ©);
8421 assert(kr == KERN_SUCCESS);
8422
8423 return copy;
8424 }
8425
8426 static boolean_t
get_zone_info(zone_t z,mach_zone_name_t * zn,mach_zone_info_t * zi)8427 get_zone_info(
8428 zone_t z,
8429 mach_zone_name_t *zn,
8430 mach_zone_info_t *zi)
8431 {
8432 struct zone zcopy;
8433 vm_size_t cached = 0;
8434
8435 assert(z != ZONE_NULL);
8436 zone_lock(z);
8437 if (!z->z_self) {
8438 zone_unlock(z);
8439 return FALSE;
8440 }
8441 zcopy = *z;
8442 if (z->z_pcpu_cache) {
8443 zpercpu_foreach(zc, z->z_pcpu_cache) {
8444 cached += zc->zc_alloc_cur + zc->zc_free_cur;
8445 cached += zc->zc_depot.zd_full * zc_mag_size();
8446 }
8447 }
8448 zone_unlock(z);
8449
8450 if (zn != NULL) {
8451 /*
8452 * Append kalloc heap name to zone name (if zone is used by kalloc)
8453 */
8454 char temp_zone_name[MAX_ZONE_NAME] = "";
8455 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8456 zone_heap_name(z), z->z_name);
8457
8458 /* assuming here the name data is static */
8459 (void) __nosan_strlcpy(zn->mzn_name, temp_zone_name,
8460 strlen(temp_zone_name) + 1);
8461 }
8462
8463 if (zi != NULL) {
8464 *zi = (mach_zone_info_t) {
8465 .mzi_count = zone_count_allocated(&zcopy) - cached,
8466 .mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)),
8467 // max_size for zprint is now high-watermark of pages used
8468 .mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)),
8469 .mzi_elem_size = zone_scale_for_percpu(&zcopy, zcopy.z_elem_size),
8470 .mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages),
8471 .mzi_exhaustible = (uint64_t)zone_exhaustible(&zcopy),
8472 };
8473 if (zcopy.z_chunk_pages == 0) {
8474 /* this is a zcache */
8475 zi->mzi_cur_size = zcopy.z_elems_avail * zcopy.z_elem_size;
8476 }
8477 zpercpu_foreach(zs, zcopy.z_stats) {
8478 zi->mzi_sum_size += zs->zs_mem_allocated;
8479 }
8480 if (zcopy.collectable) {
8481 SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable,
8482 ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty)));
8483 SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE);
8484 }
8485 }
8486
8487 return TRUE;
8488 }
8489
8490 /* mach_memory_info entitlement */
8491 #define MEMORYINFO_ENTITLEMENT "com.apple.private.memoryinfo"
8492
8493 /* macro needed to rate-limit mach_memory_info */
8494 #define NSEC_DAY (NSEC_PER_SEC * 60 * 60 * 24)
8495
8496 /* declarations necessary to call kauth_cred_issuser() */
8497 struct ucred;
8498 extern int kauth_cred_issuser(struct ucred *);
8499 extern struct ucred *kauth_cred_get(void);
8500
8501 static kern_return_t
8502 mach_memory_info_internal(
8503 host_t host,
8504 mach_zone_name_array_t *namesp,
8505 mach_msg_type_number_t *namesCntp,
8506 mach_zone_info_array_t *infop,
8507 mach_msg_type_number_t *infoCntp,
8508 mach_memory_info_array_t *memoryInfop,
8509 mach_msg_type_number_t *memoryInfoCntp,
8510 bool redact_info);
8511
8512 static kern_return_t
mach_memory_info_security_check(bool redact_info)8513 mach_memory_info_security_check(bool redact_info)
8514 {
8515 /* If not root, only allow redacted calls. */
8516 if (!kauth_cred_issuser(kauth_cred_get()) && !redact_info) {
8517 return KERN_NO_ACCESS;
8518 }
8519
8520 if (PE_srd_fused) {
8521 return KERN_SUCCESS;
8522 }
8523
8524 /* If does not have the memory entitlement, fail. */
8525 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8526 if (!IOTaskHasEntitlement(current_task(), MEMORYINFO_ENTITLEMENT)) {
8527 return KERN_DENIED;
8528 }
8529
8530 /*
8531 * On release non-mac arm devices, allow mach_memory_info
8532 * to be called twice per day per boot. memorymaintenanced
8533 * calls it once per day, which leaves room for a sysdiagnose.
8534 * Allow redacted version to be called without rate limit.
8535 */
8536
8537 if (!redact_info) {
8538 static uint64_t first_call = 0, second_call = 0;
8539 uint64_t now = 0;
8540 absolutetime_to_nanoseconds(ml_get_timebase(), &now);
8541
8542 if (!first_call) {
8543 first_call = now;
8544 } else if (!second_call) {
8545 second_call = now;
8546 } else if (first_call + NSEC_DAY > now) {
8547 return KERN_DENIED;
8548 } else if (first_call + NSEC_DAY < now) {
8549 first_call = now;
8550 second_call = 0;
8551 }
8552 }
8553 #endif
8554
8555 return KERN_SUCCESS;
8556 }
8557
8558 kern_return_t
mach_zone_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp)8559 mach_zone_info(
8560 mach_port_t host_port,
8561 mach_zone_name_array_t *namesp,
8562 mach_msg_type_number_t *namesCntp,
8563 mach_zone_info_array_t *infop,
8564 mach_msg_type_number_t *infoCntp)
8565 {
8566 return mach_memory_info(host_port, namesp, namesCntp, infop, infoCntp, NULL, NULL);
8567 }
8568
8569 kern_return_t
mach_memory_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp)8570 mach_memory_info(
8571 mach_port_t host_port,
8572 mach_zone_name_array_t *namesp,
8573 mach_msg_type_number_t *namesCntp,
8574 mach_zone_info_array_t *infop,
8575 mach_msg_type_number_t *infoCntp,
8576 mach_memory_info_array_t *memoryInfop,
8577 mach_msg_type_number_t *memoryInfoCntp)
8578 {
8579 bool redact_info = false;
8580 host_t host = HOST_NULL;
8581
8582 host = convert_port_to_host_priv(host_port);
8583 if (host == HOST_NULL) {
8584 redact_info = true;
8585 host = convert_port_to_host(host_port);
8586 }
8587
8588 return mach_memory_info_internal(host, namesp, namesCntp, infop, infoCntp, memoryInfop, memoryInfoCntp, redact_info);
8589 }
8590
8591 static void
zone_info_redact(mach_zone_info_t * zi)8592 zone_info_redact(mach_zone_info_t *zi)
8593 {
8594 zi->mzi_cur_size = 0;
8595 zi->mzi_max_size = 0;
8596 zi->mzi_alloc_size = 0;
8597 zi->mzi_sum_size = 0;
8598 zi->mzi_collectable = 0;
8599 }
8600
8601 static bool
zone_info_needs_to_be_coalesced(int zone_index)8602 zone_info_needs_to_be_coalesced(int zone_index)
8603 {
8604 zone_security_flags_t zsflags = zone_security_array[zone_index];
8605 if (zsflags.z_kalloc_type || zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8606 return true;
8607 }
8608 return false;
8609 }
8610
8611 static bool
zone_info_find_coalesce_zone(mach_zone_info_t * zi,mach_zone_info_t * info,int * coalesce,int coalesce_count,int * coalesce_index)8612 zone_info_find_coalesce_zone(
8613 mach_zone_info_t *zi,
8614 mach_zone_info_t *info,
8615 int *coalesce,
8616 int coalesce_count,
8617 int *coalesce_index)
8618 {
8619 for (int i = 0; i < coalesce_count; i++) {
8620 if (zi->mzi_elem_size == info[coalesce[i]].mzi_elem_size) {
8621 *coalesce_index = coalesce[i];
8622 return true;
8623 }
8624 }
8625
8626 return false;
8627 }
8628
8629 static void
zone_info_coalesce(mach_zone_info_t * info,int coalesce_index,mach_zone_info_t * zi)8630 zone_info_coalesce(
8631 mach_zone_info_t *info,
8632 int coalesce_index,
8633 mach_zone_info_t *zi)
8634 {
8635 info[coalesce_index].mzi_count += zi->mzi_count;
8636 }
8637
8638 static kern_return_t
mach_memory_info_internal(host_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp,bool redact_info)8639 mach_memory_info_internal(
8640 host_t host,
8641 mach_zone_name_array_t *namesp,
8642 mach_msg_type_number_t *namesCntp,
8643 mach_zone_info_array_t *infop,
8644 mach_msg_type_number_t *infoCntp,
8645 mach_memory_info_array_t *memoryInfop,
8646 mach_msg_type_number_t *memoryInfoCntp,
8647 bool redact_info)
8648 {
8649 mach_zone_name_t *names;
8650 vm_offset_t names_addr;
8651 vm_size_t names_size;
8652
8653 mach_zone_info_t *info;
8654 vm_offset_t info_addr;
8655 vm_size_t info_size;
8656
8657 int *coalesce;
8658 vm_offset_t coalesce_addr;
8659 vm_size_t coalesce_size;
8660 int coalesce_count = 0;
8661
8662 mach_memory_info_t *memory_info;
8663 vm_offset_t memory_info_addr;
8664 vm_size_t memory_info_size;
8665 vm_size_t memory_info_vmsize;
8666 unsigned int num_info;
8667
8668 unsigned int max_zones, used_zones, i;
8669 mach_zone_name_t *zn;
8670 mach_zone_info_t *zi;
8671 kern_return_t kr;
8672
8673 uint64_t zones_collectable_bytes = 0;
8674
8675 if (host == HOST_NULL) {
8676 return KERN_INVALID_HOST;
8677 }
8678
8679 kr = mach_memory_info_security_check(redact_info);
8680 if (kr != KERN_SUCCESS) {
8681 return kr;
8682 }
8683
8684 /*
8685 * We assume that zones aren't freed once allocated.
8686 * We won't pick up any zones that are allocated later.
8687 */
8688
8689 max_zones = os_atomic_load(&num_zones, relaxed);
8690
8691 names_size = round_page(max_zones * sizeof *names);
8692 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8693 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8694 if (kr != KERN_SUCCESS) {
8695 return kr;
8696 }
8697 names = (mach_zone_name_t *) names_addr;
8698
8699 info_size = round_page(max_zones * sizeof *info);
8700 kr = kmem_alloc(ipc_kernel_map, &info_addr, info_size,
8701 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8702 if (kr != KERN_SUCCESS) {
8703 kmem_free(ipc_kernel_map,
8704 names_addr, names_size);
8705 return kr;
8706 }
8707 info = (mach_zone_info_t *) info_addr;
8708
8709 if (redact_info) {
8710 coalesce_size = round_page(max_zones * sizeof *coalesce);
8711 kr = kmem_alloc(ipc_kernel_map, &coalesce_addr, coalesce_size,
8712 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8713 if (kr != KERN_SUCCESS) {
8714 kmem_free(ipc_kernel_map,
8715 names_addr, names_size);
8716 kmem_free(ipc_kernel_map,
8717 info_addr, info_size);
8718 return kr;
8719 }
8720 coalesce = (int *)coalesce_addr;
8721 }
8722
8723 zn = &names[0];
8724 zi = &info[0];
8725
8726 used_zones = 0;
8727 for (i = 0; i < max_zones; i++) {
8728 if (!get_zone_info(&(zone_array[i]), zn, zi)) {
8729 continue;
8730 }
8731
8732 if (!redact_info) {
8733 zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable);
8734 zn++;
8735 zi++;
8736 used_zones++;
8737 continue;
8738 }
8739
8740 zone_info_redact(zi);
8741 if (!zone_info_needs_to_be_coalesced(i)) {
8742 zn++;
8743 zi++;
8744 used_zones++;
8745 continue;
8746 }
8747
8748 int coalesce_index;
8749 bool found_coalesce_zone = zone_info_find_coalesce_zone(zi, info,
8750 coalesce, coalesce_count, &coalesce_index);
8751
8752 /* Didn't find a zone to coalesce */
8753 if (!found_coalesce_zone) {
8754 /* Updates the zone name */
8755 __nosan_bzero(zn->mzn_name, MAX_ZONE_NAME);
8756 snprintf(zn->mzn_name, MAX_ZONE_NAME, "kalloc.%d",
8757 (int)zi->mzi_elem_size);
8758
8759 coalesce[coalesce_count] = used_zones;
8760 coalesce_count++;
8761 zn++;
8762 zi++;
8763 used_zones++;
8764 continue;
8765 }
8766
8767 zone_info_coalesce(info, coalesce_index, zi);
8768 }
8769
8770 if (redact_info) {
8771 kmem_free(ipc_kernel_map, coalesce_addr, coalesce_size);
8772 }
8773
8774 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, used_zones * sizeof *names);
8775 *namesCntp = used_zones;
8776
8777 *infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info);
8778 *infoCntp = used_zones;
8779
8780 num_info = 0;
8781 memory_info_addr = 0;
8782
8783 if (memoryInfop && memoryInfoCntp) {
8784 vm_map_copy_t copy;
8785 num_info = vm_page_diagnose_estimate();
8786 memory_info_size = num_info * sizeof(*memory_info);
8787 memory_info_vmsize = round_page(memory_info_size);
8788 kr = kmem_alloc(ipc_kernel_map, &memory_info_addr, memory_info_vmsize,
8789 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8790 if (kr != KERN_SUCCESS) {
8791 return kr;
8792 }
8793
8794 kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
8795 VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
8796 assert(kr == KERN_SUCCESS);
8797
8798 memory_info = (mach_memory_info_t *) memory_info_addr;
8799 vm_page_diagnose(memory_info, num_info, zones_collectable_bytes, redact_info);
8800
8801 kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
8802 assert(kr == KERN_SUCCESS);
8803
8804 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
8805 (vm_map_size_t)memory_info_size, TRUE, ©);
8806 assert(kr == KERN_SUCCESS);
8807
8808 *memoryInfop = (mach_memory_info_t *) copy;
8809 *memoryInfoCntp = num_info;
8810 }
8811
8812 return KERN_SUCCESS;
8813 }
8814
8815 kern_return_t
mach_zone_info_for_zone(host_priv_t host,mach_zone_name_t name,mach_zone_info_t * infop)8816 mach_zone_info_for_zone(
8817 host_priv_t host,
8818 mach_zone_name_t name,
8819 mach_zone_info_t *infop)
8820 {
8821 zone_t zone_ptr;
8822
8823 if (host == HOST_NULL) {
8824 return KERN_INVALID_HOST;
8825 }
8826
8827 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8828 if (!PE_i_can_has_debugger(NULL)) {
8829 return KERN_INVALID_HOST;
8830 }
8831 #endif
8832
8833 if (infop == NULL) {
8834 return KERN_INVALID_ARGUMENT;
8835 }
8836
8837 zone_ptr = ZONE_NULL;
8838 zone_foreach(z) {
8839 /*
8840 * Append kalloc heap name to zone name (if zone is used by kalloc)
8841 */
8842 char temp_zone_name[MAX_ZONE_NAME] = "";
8843 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8844 zone_heap_name(z), z->z_name);
8845
8846 /* Find the requested zone by name */
8847 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8848 zone_ptr = z;
8849 break;
8850 }
8851 }
8852
8853 /* No zones found with the requested zone name */
8854 if (zone_ptr == ZONE_NULL) {
8855 return KERN_INVALID_ARGUMENT;
8856 }
8857
8858 if (get_zone_info(zone_ptr, NULL, infop)) {
8859 return KERN_SUCCESS;
8860 }
8861 return KERN_FAILURE;
8862 }
8863
8864 kern_return_t
mach_zone_info_for_largest_zone(host_priv_t host,mach_zone_name_t * namep,mach_zone_info_t * infop)8865 mach_zone_info_for_largest_zone(
8866 host_priv_t host,
8867 mach_zone_name_t *namep,
8868 mach_zone_info_t *infop)
8869 {
8870 if (host == HOST_NULL) {
8871 return KERN_INVALID_HOST;
8872 }
8873
8874 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8875 if (!PE_i_can_has_debugger(NULL)) {
8876 return KERN_INVALID_HOST;
8877 }
8878 #endif
8879
8880 if (namep == NULL || infop == NULL) {
8881 return KERN_INVALID_ARGUMENT;
8882 }
8883
8884 if (get_zone_info(zone_find_largest(NULL), namep, infop)) {
8885 return KERN_SUCCESS;
8886 }
8887 return KERN_FAILURE;
8888 }
8889
8890 uint64_t
get_zones_collectable_bytes(void)8891 get_zones_collectable_bytes(void)
8892 {
8893 uint64_t zones_collectable_bytes = 0;
8894 mach_zone_info_t zi;
8895
8896 zone_foreach(z) {
8897 if (get_zone_info(z, NULL, &zi)) {
8898 zones_collectable_bytes +=
8899 GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable);
8900 }
8901 }
8902
8903 return zones_collectable_bytes;
8904 }
8905
8906 kern_return_t
mach_zone_get_zlog_zones(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp)8907 mach_zone_get_zlog_zones(
8908 host_priv_t host,
8909 mach_zone_name_array_t *namesp,
8910 mach_msg_type_number_t *namesCntp)
8911 {
8912 #if ZALLOC_ENABLE_LOGGING
8913 unsigned int max_zones, logged_zones, i;
8914 kern_return_t kr;
8915 zone_t zone_ptr;
8916 mach_zone_name_t *names;
8917 vm_offset_t names_addr;
8918 vm_size_t names_size;
8919
8920 if (host == HOST_NULL) {
8921 return KERN_INVALID_HOST;
8922 }
8923
8924 if (namesp == NULL || namesCntp == NULL) {
8925 return KERN_INVALID_ARGUMENT;
8926 }
8927
8928 max_zones = os_atomic_load(&num_zones, relaxed);
8929
8930 names_size = round_page(max_zones * sizeof *names);
8931 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8932 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8933 if (kr != KERN_SUCCESS) {
8934 return kr;
8935 }
8936 names = (mach_zone_name_t *) names_addr;
8937
8938 zone_ptr = ZONE_NULL;
8939 logged_zones = 0;
8940 for (i = 0; i < max_zones; i++) {
8941 zone_t z = &(zone_array[i]);
8942 assert(z != ZONE_NULL);
8943
8944 /* Copy out the zone name if zone logging is enabled */
8945 if (z->z_btlog) {
8946 get_zone_info(z, &names[logged_zones], NULL);
8947 logged_zones++;
8948 }
8949 }
8950
8951 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, logged_zones * sizeof *names);
8952 *namesCntp = logged_zones;
8953
8954 return KERN_SUCCESS;
8955
8956 #else /* ZALLOC_ENABLE_LOGGING */
8957 #pragma unused(host, namesp, namesCntp)
8958 return KERN_FAILURE;
8959 #endif /* ZALLOC_ENABLE_LOGGING */
8960 }
8961
8962 kern_return_t
mach_zone_get_btlog_records(host_priv_t host,mach_zone_name_t name,zone_btrecord_array_t * recsp,mach_msg_type_number_t * numrecs)8963 mach_zone_get_btlog_records(
8964 host_priv_t host,
8965 mach_zone_name_t name,
8966 zone_btrecord_array_t *recsp,
8967 mach_msg_type_number_t *numrecs)
8968 {
8969 #if ZALLOC_ENABLE_LOGGING
8970 zone_btrecord_t *recs;
8971 kern_return_t kr;
8972 vm_address_t addr;
8973 vm_size_t size;
8974 zone_t zone_ptr;
8975 vm_map_copy_t copy;
8976
8977 if (host == HOST_NULL) {
8978 return KERN_INVALID_HOST;
8979 }
8980
8981 if (recsp == NULL || numrecs == NULL) {
8982 return KERN_INVALID_ARGUMENT;
8983 }
8984
8985 zone_ptr = ZONE_NULL;
8986 zone_foreach(z) {
8987 /*
8988 * Append kalloc heap name to zone name (if zone is used by kalloc)
8989 */
8990 char temp_zone_name[MAX_ZONE_NAME] = "";
8991 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8992 zone_heap_name(z), z->z_name);
8993
8994 /* Find the requested zone by name */
8995 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8996 zone_ptr = z;
8997 break;
8998 }
8999 }
9000
9001 /* No zones found with the requested zone name */
9002 if (zone_ptr == ZONE_NULL) {
9003 return KERN_INVALID_ARGUMENT;
9004 }
9005
9006 /* Logging not turned on for the requested zone */
9007 if (!zone_ptr->z_btlog) {
9008 return KERN_FAILURE;
9009 }
9010
9011 kr = btlog_get_records(zone_ptr->z_btlog, &recs, numrecs);
9012 if (kr != KERN_SUCCESS) {
9013 return kr;
9014 }
9015
9016 addr = (vm_address_t)recs;
9017 size = sizeof(zone_btrecord_t) * *numrecs;
9018
9019 kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, ©);
9020 assert(kr == KERN_SUCCESS);
9021
9022 *recsp = (zone_btrecord_t *)copy;
9023 return KERN_SUCCESS;
9024
9025 #else /* !ZALLOC_ENABLE_LOGGING */
9026 #pragma unused(host, name, recsp, numrecs)
9027 return KERN_FAILURE;
9028 #endif /* !ZALLOC_ENABLE_LOGGING */
9029 }
9030
9031
9032 kern_return_t
mach_zone_force_gc(host_t host)9033 mach_zone_force_gc(
9034 host_t host)
9035 {
9036 if (host == HOST_NULL) {
9037 return KERN_INVALID_HOST;
9038 }
9039
9040 #if DEBUG || DEVELOPMENT
9041 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
9042 /* Callout to buffer cache GC to drop elements in the apfs zones */
9043 if (consider_buffer_cache_collect != NULL) {
9044 (void)(*consider_buffer_cache_collect)(0);
9045 }
9046 zone_gc(ZONE_GC_DRAIN);
9047 #endif /* DEBUG || DEVELOPMENT */
9048 return KERN_SUCCESS;
9049 }
9050
9051 zone_t
zone_find_largest(uint64_t * zone_size)9052 zone_find_largest(uint64_t *zone_size)
9053 {
9054 zone_t largest_zone = 0;
9055 uint64_t largest_zone_size = 0;
9056 zone_find_n_largest(1, &largest_zone, &largest_zone_size);
9057 if (zone_size) {
9058 *zone_size = largest_zone_size;
9059 }
9060 return largest_zone;
9061 }
9062
9063 void
zone_get_stats(zone_t zone,struct zone_basic_stats * stats)9064 zone_get_stats(
9065 zone_t zone,
9066 struct zone_basic_stats *stats)
9067 {
9068 stats->zbs_avail = zone->z_elems_avail;
9069
9070 stats->zbs_alloc_fail = 0;
9071 zpercpu_foreach(zs, zone->z_stats) {
9072 stats->zbs_alloc_fail += zs->zs_alloc_fail;
9073 }
9074
9075 stats->zbs_cached = 0;
9076 if (zone->z_pcpu_cache) {
9077 zpercpu_foreach(zc, zone->z_pcpu_cache) {
9078 stats->zbs_cached += zc->zc_alloc_cur +
9079 zc->zc_free_cur +
9080 zc->zc_depot.zd_full * zc_mag_size();
9081 }
9082 }
9083
9084 stats->zbs_free = zone_count_free(zone) + stats->zbs_cached;
9085
9086 /*
9087 * Since we don't take any locks, deal with possible inconsistencies
9088 * as the counters may have changed.
9089 */
9090 if (os_sub_overflow(stats->zbs_avail, stats->zbs_free,
9091 &stats->zbs_alloc)) {
9092 stats->zbs_avail = stats->zbs_free;
9093 stats->zbs_alloc = 0;
9094 }
9095 }
9096
9097 #endif /* !ZALLOC_TEST */
9098 #pragma mark zone creation, configuration, destruction
9099 #if !ZALLOC_TEST
9100
9101 static zone_t
zone_init_defaults(zone_id_t zid)9102 zone_init_defaults(zone_id_t zid)
9103 {
9104 zone_t z = &zone_array[zid];
9105
9106 z->z_wired_max = ~0u;
9107 z->collectable = true;
9108
9109 hw_lck_ticket_init(&z->z_lock, &zone_locks_grp);
9110 hw_lck_ticket_init(&z->z_recirc_lock, &zone_locks_grp);
9111 zone_depot_init(&z->z_recirc);
9112 return z;
9113 }
9114
9115 void
zone_set_exhaustible(zone_t zone,vm_size_t nelems,bool exhausts_by_design)9116 zone_set_exhaustible(zone_t zone, vm_size_t nelems, bool exhausts_by_design)
9117 {
9118 zone_lock(zone);
9119 zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
9120 zone->z_exhausts = exhausts_by_design;
9121 zone_unlock(zone);
9122 }
9123
9124 void
zone_raise_reserve(union zone_or_view zov,uint16_t min_elements)9125 zone_raise_reserve(union zone_or_view zov, uint16_t min_elements)
9126 {
9127 zone_t zone = zov.zov_zone;
9128
9129 if (zone < zone_array || zone > &zone_array[MAX_ZONES]) {
9130 zone = zov.zov_view->zv_zone;
9131 } else {
9132 zone = zov.zov_zone;
9133 }
9134
9135 os_atomic_max(&zone->z_elems_rsv, min_elements, relaxed);
9136 }
9137
9138 /**
9139 * @function zone_create_find
9140 *
9141 * @abstract
9142 * Finds an unused zone for the given name and element size.
9143 *
9144 * @param name the zone name
9145 * @param size the element size (including redzones, ...)
9146 * @param flags the flags passed to @c zone_create*
9147 * @param zid_inout the desired zone ID or ZONE_ID_ANY
9148 *
9149 * @returns a zone to initialize further.
9150 */
9151 static zone_t
zone_create_find(const char * name,vm_size_t size,zone_create_flags_t flags,zone_id_t * zid_inout)9152 zone_create_find(
9153 const char *name,
9154 vm_size_t size,
9155 zone_create_flags_t flags,
9156 zone_id_t *zid_inout)
9157 {
9158 zone_id_t nzones, zid = *zid_inout;
9159 zone_t z;
9160
9161 simple_lock(&all_zones_lock, &zone_locks_grp);
9162
9163 nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed);
9164 assert(num_zones_in_use <= nzones && nzones < MAX_ZONES);
9165
9166 if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) {
9167 /*
9168 * The first time around, make sure the reserved zone IDs
9169 * have an initialized lock as zone_index_foreach() will
9170 * enumerate them.
9171 */
9172 while (nzones < ZONE_ID__FIRST_DYNAMIC) {
9173 zone_init_defaults(nzones++);
9174 }
9175
9176 os_atomic_store(&num_zones, nzones, release);
9177 }
9178
9179 if (zid != ZONE_ID_ANY) {
9180 if (zid >= ZONE_ID__FIRST_DYNAMIC) {
9181 panic("zone_create: invalid desired zone ID %d for %s",
9182 zid, name);
9183 }
9184 if (flags & ZC_DESTRUCTIBLE) {
9185 panic("zone_create: ID %d (%s) must be permanent", zid, name);
9186 }
9187 if (zone_array[zid].z_self) {
9188 panic("zone_create: creating zone ID %d (%s) twice", zid, name);
9189 }
9190 z = &zone_array[zid];
9191 } else {
9192 if (flags & ZC_DESTRUCTIBLE) {
9193 /*
9194 * If possible, find a previously zdestroy'ed zone in the
9195 * zone_array that we can reuse.
9196 */
9197 for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES);
9198 i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) {
9199 z = &zone_array[i];
9200
9201 /*
9202 * If the zone name and the element size are the
9203 * same, we can just reuse the old zone struct.
9204 */
9205 if (strcmp(z->z_name, name) ||
9206 zone_elem_outer_size(z) != size) {
9207 continue;
9208 }
9209 bitmap_clear(zone_destroyed_bitmap, i);
9210 z->z_destroyed = false;
9211 z->z_self = z;
9212 zid = (zone_id_t)i;
9213 goto out;
9214 }
9215 }
9216
9217 zid = nzones++;
9218 z = zone_init_defaults(zid);
9219
9220 /*
9221 * The release barrier pairs with the acquire in
9222 * zone_index_foreach() and makes sure that enumeration loops
9223 * always see an initialized zone lock.
9224 */
9225 os_atomic_store(&num_zones, nzones, release);
9226 }
9227
9228 out:
9229 num_zones_in_use++;
9230 simple_unlock(&all_zones_lock);
9231
9232 *zid_inout = zid;
9233 return z;
9234 }
9235
9236 __abortlike
9237 static void
zone_create_panic(const char * name,const char * f1,const char * f2)9238 zone_create_panic(const char *name, const char *f1, const char *f2)
9239 {
9240 panic("zone_create: creating zone %s: flag %s and %s are incompatible",
9241 name, f1, f2);
9242 }
9243 #define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \
9244 if ((flags) & forbidden_flag) { \
9245 zone_create_panic(name, #current_flag, #forbidden_flag); \
9246 }
9247
9248 /*
9249 * Adjusts the size of the element based on minimum size, alignment
9250 * and kasan redzones
9251 */
9252 static vm_size_t
zone_elem_adjust_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags __unused,uint16_t * redzone __unused)9253 zone_elem_adjust_size(
9254 const char *name __unused,
9255 vm_size_t elem_size,
9256 zone_create_flags_t flags __unused,
9257 uint16_t *redzone __unused)
9258 {
9259 vm_size_t size;
9260
9261 /*
9262 * Adjust element size for minimum size and pointer alignment
9263 */
9264 size = (elem_size + ZONE_ALIGN_SIZE - 1) & -ZONE_ALIGN_SIZE;
9265 if (size < ZONE_MIN_ELEM_SIZE) {
9266 size = ZONE_MIN_ELEM_SIZE;
9267 }
9268
9269 #if KASAN_CLASSIC
9270 /*
9271 * Expand the zone allocation size to include the redzones.
9272 *
9273 * For page-multiple zones add a full guard page because they
9274 * likely require alignment.
9275 */
9276 uint16_t redzone_tmp;
9277 if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU | ZC_OBJ_CACHE)) {
9278 redzone_tmp = 0;
9279 } else if ((size & PAGE_MASK) == 0) {
9280 if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) {
9281 panic("zone_create: zone %s can't provide more than PAGE_SIZE"
9282 "alignment", name);
9283 }
9284 redzone_tmp = PAGE_SIZE;
9285 } else if (flags & ZC_ALIGNMENT_REQUIRED) {
9286 redzone_tmp = 0;
9287 } else {
9288 redzone_tmp = KASAN_GUARD_SIZE;
9289 }
9290 size += redzone_tmp;
9291 if (redzone) {
9292 *redzone = redzone_tmp;
9293 }
9294 #endif
9295 return size;
9296 }
9297
9298 /*
9299 * Returns the allocation chunk size that has least framentation
9300 */
9301 static vm_size_t
zone_get_min_alloc_granule(vm_size_t elem_size,zone_create_flags_t flags)9302 zone_get_min_alloc_granule(
9303 vm_size_t elem_size,
9304 zone_create_flags_t flags)
9305 {
9306 vm_size_t alloc_granule = PAGE_SIZE;
9307 if (flags & ZC_PERCPU) {
9308 alloc_granule = PAGE_SIZE * zpercpu_count();
9309 if (PAGE_SIZE % elem_size > 256) {
9310 panic("zone_create: per-cpu zone has too much fragmentation");
9311 }
9312 } else if (flags & ZC_READONLY) {
9313 alloc_granule = PAGE_SIZE;
9314 } else if ((elem_size & PAGE_MASK) == 0) {
9315 /* zero fragmentation by definition */
9316 alloc_granule = elem_size;
9317 } else if (alloc_granule % elem_size == 0) {
9318 /* zero fragmentation by definition */
9319 } else {
9320 vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule;
9321 vm_size_t alloc_tmp = PAGE_SIZE;
9322 vm_size_t max_chunk_size = ZONE_MAX_ALLOC_SIZE;
9323
9324 #if __arm64__
9325 /*
9326 * Increase chunk size to 48K for sizes larger than 4K on 16k
9327 * machines, so as to reduce internal fragementation for kalloc
9328 * zones with sizes 12K and 24K.
9329 */
9330 if (elem_size > 4 * 1024 && PAGE_SIZE == 16 * 1024) {
9331 max_chunk_size = 48 * 1024;
9332 }
9333 #endif
9334 while ((alloc_tmp += PAGE_SIZE) <= max_chunk_size) {
9335 vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp;
9336 if (frag_tmp < frag) {
9337 frag = frag_tmp;
9338 alloc_granule = alloc_tmp;
9339 }
9340 }
9341 }
9342 return alloc_granule;
9343 }
9344
9345 vm_size_t
zone_get_early_alloc_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags,vm_size_t min_elems)9346 zone_get_early_alloc_size(
9347 const char *name __unused,
9348 vm_size_t elem_size,
9349 zone_create_flags_t flags,
9350 vm_size_t min_elems)
9351 {
9352 vm_size_t adjusted_size, alloc_granule, chunk_elems;
9353
9354 adjusted_size = zone_elem_adjust_size(name, elem_size, flags, NULL);
9355 alloc_granule = zone_get_min_alloc_granule(adjusted_size, flags);
9356 chunk_elems = alloc_granule / adjusted_size;
9357
9358 return ((min_elems + chunk_elems - 1) / chunk_elems) * alloc_granule;
9359 }
9360
9361 zone_t
9362 zone_create_ext(
9363 const char *name,
9364 vm_size_t size,
9365 zone_create_flags_t flags,
9366 zone_id_t zid,
9367 void (^extra_setup)(zone_t))
9368 {
9369 zone_security_flags_t *zsflags;
9370 uint16_t redzone;
9371 zone_t z;
9372
9373 if (size > ZONE_MAX_ALLOC_SIZE) {
9374 panic("zone_create: element size too large: %zd", (size_t)size);
9375 }
9376
9377 if (size < 2 * sizeof(vm_size_t)) {
9378 /* Elements are too small for kasan. */
9379 flags |= ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE;
9380 }
9381
9382 size = zone_elem_adjust_size(name, size, flags, &redzone);
9383
9384 /*
9385 * Allocate the zone slot, return early if we found an older match.
9386 */
9387 z = zone_create_find(name, size, flags, &zid);
9388 if (__improbable(z->z_self)) {
9389 /* We found a zone to reuse */
9390 return z;
9391 }
9392 zsflags = &zone_security_array[zid];
9393
9394 /*
9395 * Initialize the zone properly.
9396 */
9397
9398 /*
9399 * If the kernel is post lockdown, copy the zone name passed in.
9400 * Else simply maintain a pointer to the name string as it can only
9401 * be a core XNU zone (no unloadable kext exists before lockdown).
9402 */
9403 if (startup_phase >= STARTUP_SUB_LOCKDOWN) {
9404 size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN);
9405 char *buf = zalloc_permanent(nsz, ZALIGN_NONE);
9406 strlcpy(buf, name, nsz);
9407 z->z_name = buf;
9408 } else {
9409 z->z_name = name;
9410 }
9411 if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) {
9412 z->z_stats = zalloc_percpu_permanent_type(struct zone_stats);
9413 } else {
9414 /*
9415 * zone_init() hasn't run yet, use the storage provided by
9416 * zone_stats_startup(), and zone_init() will replace it
9417 * with the final value once the PERCPU zone exists.
9418 */
9419 z->z_stats = __zpcpu_mangle_for_boot(&zone_stats_startup[zone_index(z)]);
9420 }
9421
9422 if (flags & ZC_OBJ_CACHE) {
9423 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOCACHING);
9424 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_PERCPU);
9425 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOGC);
9426 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_DESTRUCTIBLE);
9427
9428 z->z_elem_size = (uint16_t)size;
9429 z->z_chunk_pages = 0;
9430 z->z_quo_magic = 0;
9431 z->z_align_magic = 0;
9432 z->z_chunk_elems = 0;
9433 z->z_elem_offs = 0;
9434 z->no_callout = true;
9435 zsflags->z_lifo = true;
9436 } else {
9437 vm_size_t alloc = zone_get_min_alloc_granule(size, flags);
9438
9439 z->z_elem_size = (uint16_t)(size - redzone);
9440 z->z_chunk_pages = (uint16_t)atop(alloc);
9441 z->z_quo_magic = Z_MAGIC_QUO(size);
9442 z->z_align_magic = Z_MAGIC_ALIGNED(size);
9443 if (flags & ZC_PERCPU) {
9444 z->z_chunk_elems = (uint16_t)(PAGE_SIZE / size);
9445 z->z_elem_offs = (uint16_t)(PAGE_SIZE % size) + redzone;
9446 } else {
9447 z->z_chunk_elems = (uint16_t)(alloc / size);
9448 z->z_elem_offs = (uint16_t)(alloc % size) + redzone;
9449 }
9450 }
9451
9452 /*
9453 * Handle KPI flags
9454 */
9455
9456 /* ZC_CACHING applied after all configuration is done */
9457 if (flags & ZC_NOCACHING) {
9458 z->z_nocaching = true;
9459 }
9460
9461 if (flags & ZC_READONLY) {
9462 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_VM);
9463 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_DATA);
9464 assert(zid <= ZONE_ID__LAST_RO);
9465 #if ZSECURITY_CONFIG(READ_ONLY)
9466 zsflags->z_submap_idx = Z_SUBMAP_IDX_READ_ONLY;
9467 #endif
9468 zone_ro_size_params[zid].z_elem_size = z->z_elem_size;
9469 zone_ro_size_params[zid].z_align_magic = z->z_align_magic;
9470 assert(size <= PAGE_SIZE);
9471 if ((PAGE_SIZE % size) * 10 >= PAGE_SIZE) {
9472 panic("Fragmentation greater than 10%% with elem size %d zone %s%s",
9473 (uint32_t)size, zone_heap_name(z), z->z_name);
9474 }
9475 }
9476
9477 if (flags & ZC_PERCPU) {
9478 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_READONLY);
9479 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_PGZ_USE_GUARDS);
9480 z->z_percpu = true;
9481 }
9482 if (flags & ZC_NOGC) {
9483 z->collectable = false;
9484 }
9485 /*
9486 * Handle ZC_NOENCRYPT from xnu only
9487 */
9488 if (startup_phase < STARTUP_SUB_LOCKDOWN && flags & ZC_NOENCRYPT) {
9489 zsflags->z_noencrypt = true;
9490 }
9491 if (flags & ZC_NOCALLOUT) {
9492 z->no_callout = true;
9493 }
9494 if (flags & ZC_DESTRUCTIBLE) {
9495 zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_READONLY);
9496 z->z_destructible = true;
9497 }
9498 /*
9499 * Handle Internal flags
9500 */
9501 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9502 if (flags & ZC_PGZ_USE_GUARDS) {
9503 /*
9504 * Try to turn on guard pages only for zones
9505 * with a chance of OOB.
9506 */
9507 if (startup_phase < STARTUP_SUB_LOCKDOWN) {
9508 zsflags->z_pgz_use_guards = true;
9509 }
9510 z->z_pgz_use_guards = true;
9511 }
9512 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9513 if (!(flags & ZC_NOTBITAG)) {
9514 z->z_tbi_tag = true;
9515 }
9516 if (flags & ZC_KALLOC_TYPE) {
9517 zsflags->z_kalloc_type = true;
9518 }
9519 if (flags & ZC_VM) {
9520 zone_create_assert_not_both(name, flags, ZC_VM, ZC_DATA);
9521 zsflags->z_submap_idx = Z_SUBMAP_IDX_VM;
9522 }
9523 if (flags & ZC_DATA) {
9524 zsflags->z_kheap_id = KHEAP_ID_DATA_BUFFERS;
9525 }
9526 #if KASAN_CLASSIC
9527 if (redzone && !(flags & ZC_KASAN_NOQUARANTINE)) {
9528 z->z_kasan_quarantine = true;
9529 }
9530 z->z_kasan_redzone = redzone;
9531 #endif /* KASAN_CLASSIC */
9532 #if KASAN_FAKESTACK
9533 if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) {
9534 z->z_kasan_fakestacks = true;
9535 }
9536 #endif /* KASAN_FAKESTACK */
9537
9538 /*
9539 * Then if there's extra tuning, do it
9540 */
9541 if (extra_setup) {
9542 extra_setup(z);
9543 }
9544
9545 /*
9546 * Configure debugging features
9547 */
9548 #if CONFIG_PROB_GZALLOC
9549 if ((flags & (ZC_READONLY | ZC_PERCPU | ZC_OBJ_CACHE | ZC_NOPGZ)) == 0) {
9550 pgz_zone_init(z);
9551 }
9552 #endif
9553 if (zc_magazine_zone) { /* proxy for "has zone_init run" */
9554 #if ZALLOC_ENABLE_LOGGING
9555 /*
9556 * Check for and set up zone leak detection
9557 * if requested via boot-args.
9558 */
9559 zone_setup_logging(z);
9560 #endif /* ZALLOC_ENABLE_LOGGING */
9561 #if KASAN_TBI
9562 zone_setup_kasan_logging(z);
9563 #endif /* KASAN_TBI */
9564 }
9565
9566 #if VM_TAG_SIZECLASSES
9567 if ((zsflags->z_kheap_id || zsflags->z_kalloc_type) && zone_tagging_on) {
9568 static uint16_t sizeclass_idx;
9569
9570 assert(startup_phase < STARTUP_SUB_LOCKDOWN);
9571 z->z_uses_tags = true;
9572 if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9573 zone_tags_sizeclasses[sizeclass_idx] = (uint16_t)size;
9574 z->z_tags_sizeclass = sizeclass_idx++;
9575 } else {
9576 uint16_t i = 0;
9577 for (; i < sizeclass_idx; i++) {
9578 if (size == zone_tags_sizeclasses[i]) {
9579 z->z_tags_sizeclass = i;
9580 break;
9581 }
9582 }
9583
9584 /*
9585 * Size class wasn't found, add it to zone_tags_sizeclasses
9586 */
9587 if (i == sizeclass_idx) {
9588 assert(i < VM_TAG_SIZECLASSES);
9589 zone_tags_sizeclasses[i] = (uint16_t)size;
9590 z->z_tags_sizeclass = sizeclass_idx++;
9591 }
9592 }
9593 assert(z->z_tags_sizeclass < VM_TAG_SIZECLASSES);
9594 }
9595 #endif
9596
9597 /*
9598 * Finally, fixup properties based on security policies, boot-args, ...
9599 */
9600 if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9601 /*
9602 * We use LIFO in the data map, because workloads like network
9603 * usage or similar tend to rotate through allocations very
9604 * quickly with sometimes epxloding working-sets and using
9605 * a FIFO policy might cause massive TLB trashing with rather
9606 * dramatic performance impacts.
9607 */
9608 zsflags->z_submap_idx = Z_SUBMAP_IDX_DATA;
9609 zsflags->z_lifo = true;
9610 }
9611
9612 if ((flags & (ZC_CACHING | ZC_OBJ_CACHE)) && !z->z_nocaching) {
9613 /*
9614 * No zone made before zone_init() can have ZC_CACHING set.
9615 */
9616 assert(zc_magazine_zone);
9617 zone_enable_caching(z);
9618 }
9619
9620 zone_lock(z);
9621 z->z_self = z;
9622 zone_unlock(z);
9623
9624 return z;
9625 }
9626
9627 void
zone_set_sig_eq(zone_t zone,zone_id_t sig_eq)9628 zone_set_sig_eq(zone_t zone, zone_id_t sig_eq)
9629 {
9630 zone_security_array[zone_index(zone)].z_sig_eq = sig_eq;
9631 }
9632
9633 zone_id_t
zone_get_sig_eq(zone_t zone)9634 zone_get_sig_eq(zone_t zone)
9635 {
9636 return zone_security_array[zone_index(zone)].z_sig_eq;
9637 }
9638
9639 void
zone_enable_smr(zone_t zone,struct smr * smr,zone_smr_free_cb_t free_cb)9640 zone_enable_smr(zone_t zone, struct smr *smr, zone_smr_free_cb_t free_cb)
9641 {
9642 /* moving to SMR must be done before the zone has ever been used */
9643 assert(zone->z_va_cur == 0 && !zone->z_smr && !zone->z_nocaching);
9644 assert(!zone_security_array[zone_index(zone)].z_lifo);
9645 assert((smr->smr_flags & SMR_SLEEPABLE) == 0);
9646
9647 if (!zone->z_pcpu_cache) {
9648 zone_enable_caching(zone);
9649 }
9650
9651 zone_lock(zone);
9652
9653 zpercpu_foreach(it, zone->z_pcpu_cache) {
9654 it->zc_smr = smr;
9655 it->zc_free = free_cb;
9656 }
9657 zone->z_smr = true;
9658
9659 zone_unlock(zone);
9660 }
9661
9662 __startup_func
9663 void
zone_create_startup(struct zone_create_startup_spec * spec)9664 zone_create_startup(struct zone_create_startup_spec *spec)
9665 {
9666 zone_t z;
9667
9668 z = zone_create_ext(spec->z_name, spec->z_size,
9669 spec->z_flags, spec->z_zid, spec->z_setup);
9670 if (spec->z_var) {
9671 *spec->z_var = z;
9672 }
9673 }
9674
9675 /*
9676 * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t
9677 * union works. trust but verify.
9678 */
9679 #define zalloc_check_zov_alias(f1, f2) \
9680 static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2))
9681 zalloc_check_zov_alias(z_self, zv_zone);
9682 zalloc_check_zov_alias(z_stats, zv_stats);
9683 zalloc_check_zov_alias(z_name, zv_name);
9684 zalloc_check_zov_alias(z_views, zv_next);
9685 #undef zalloc_check_zov_alias
9686
9687 __startup_func
9688 void
zone_view_startup_init(struct zone_view_startup_spec * spec)9689 zone_view_startup_init(struct zone_view_startup_spec *spec)
9690 {
9691 struct kalloc_heap *heap = NULL;
9692 zone_view_t zv = spec->zv_view;
9693 zone_t z;
9694 zone_security_flags_t zsflags;
9695
9696 switch (spec->zv_heapid) {
9697 case KHEAP_ID_DATA_BUFFERS:
9698 heap = KHEAP_DATA_BUFFERS;
9699 break;
9700 default:
9701 heap = NULL;
9702 }
9703
9704 if (heap) {
9705 z = kalloc_zone_for_size(heap->kh_zstart, spec->zv_size);
9706 } else {
9707 z = *spec->zv_zone;
9708 assert(spec->zv_size <= zone_elem_inner_size(z));
9709 }
9710
9711 assert(z);
9712
9713 zv->zv_zone = z;
9714 zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats);
9715 zv->zv_next = z->z_views;
9716 zsflags = zone_security_config(z);
9717 if (z->z_views == NULL && zsflags.z_kheap_id == KHEAP_ID_NONE) {
9718 /*
9719 * count the raw view for zones not in a heap,
9720 * kalloc_heap_init() already counts it for its members.
9721 */
9722 zone_view_count += 2;
9723 } else {
9724 zone_view_count += 1;
9725 }
9726 z->z_views = zv;
9727 }
9728
9729 zone_t
zone_create(const char * name,vm_size_t size,zone_create_flags_t flags)9730 zone_create(
9731 const char *name,
9732 vm_size_t size,
9733 zone_create_flags_t flags)
9734 {
9735 return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL);
9736 }
9737
9738 static_assert(ZONE_ID__LAST_RO_EXT - ZONE_ID__FIRST_RO_EXT == ZC_RO_ID__LAST);
9739
9740 zone_id_t
zone_create_ro(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)9741 zone_create_ro(
9742 const char *name,
9743 vm_size_t size,
9744 zone_create_flags_t flags,
9745 zone_create_ro_id_t zc_ro_id)
9746 {
9747 assert(zc_ro_id <= ZC_RO_ID__LAST);
9748 zone_id_t reserved_zid = ZONE_ID__FIRST_RO_EXT + zc_ro_id;
9749 (void)zone_create_ext(name, size, ZC_READONLY | flags, reserved_zid, NULL);
9750 return reserved_zid;
9751 }
9752
9753 zone_t
zinit(vm_size_t size,vm_size_t max __unused,vm_size_t alloc __unused,const char * name)9754 zinit(
9755 vm_size_t size, /* the size of an element */
9756 vm_size_t max __unused, /* maximum memory to use */
9757 vm_size_t alloc __unused, /* allocation size */
9758 const char *name) /* a name for the zone */
9759 {
9760 return zone_create(name, size, ZC_DESTRUCTIBLE);
9761 }
9762
9763 void
zdestroy(zone_t z)9764 zdestroy(zone_t z)
9765 {
9766 unsigned int zindex = zone_index(z);
9767 zone_security_flags_t zsflags = zone_security_array[zindex];
9768
9769 current_thread()->options |= TH_OPT_ZONE_PRIV;
9770 lck_mtx_lock(&zone_gc_lock);
9771
9772 zone_reclaim(z, ZONE_RECLAIM_DESTROY);
9773
9774 lck_mtx_unlock(&zone_gc_lock);
9775 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
9776
9777 zone_lock(z);
9778
9779 if (!zone_submap_is_sequestered(zsflags)) {
9780 while (!zone_pva_is_null(z->z_pageq_va)) {
9781 struct zone_page_metadata *meta;
9782
9783 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
9784 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
9785 assert(meta->zm_chunk_len <= ZM_CHUNK_LEN_MAX);
9786 bzero(meta, sizeof(*meta) * z->z_chunk_pages);
9787 zone_unlock(z);
9788 kmem_free(zone_submap(zsflags), zone_meta_to_addr(meta),
9789 ptoa(z->z_chunk_pages));
9790 zone_lock(z);
9791 }
9792 }
9793
9794 #if !KASAN_CLASSIC
9795 /* Assert that all counts are zero */
9796 if (z->z_elems_avail || z->z_elems_free || zone_size_wired(z) ||
9797 (z->z_va_cur && !zone_submap_is_sequestered(zsflags))) {
9798 panic("zdestroy: Zone %s%s isn't empty at zdestroy() time",
9799 zone_heap_name(z), z->z_name);
9800 }
9801
9802 /* consistency check: make sure everything is indeed empty */
9803 assert(zone_pva_is_null(z->z_pageq_empty));
9804 assert(zone_pva_is_null(z->z_pageq_partial));
9805 assert(zone_pva_is_null(z->z_pageq_full));
9806 if (!zone_submap_is_sequestered(zsflags)) {
9807 assert(zone_pva_is_null(z->z_pageq_va));
9808 }
9809 #endif
9810
9811 zone_unlock(z);
9812
9813 simple_lock(&all_zones_lock, &zone_locks_grp);
9814
9815 assert(!bitmap_test(zone_destroyed_bitmap, zindex));
9816 /* Mark the zone as empty in the bitmap */
9817 bitmap_set(zone_destroyed_bitmap, zindex);
9818 num_zones_in_use--;
9819 assert(num_zones_in_use > 0);
9820
9821 simple_unlock(&all_zones_lock);
9822 }
9823
9824 #endif /* !ZALLOC_TEST */
9825 #pragma mark zalloc module init
9826 #if !ZALLOC_TEST
9827
9828 /*
9829 * Initialize the "zone of zones" which uses fixed memory allocated
9830 * earlier in memory initialization. zone_bootstrap is called
9831 * before zone_init.
9832 */
9833 __startup_func
9834 void
zone_bootstrap(void)9835 zone_bootstrap(void)
9836 {
9837 #if DEBUG || DEVELOPMENT
9838 #if __x86_64__
9839 if (PE_parse_boot_argn("kernPOST", NULL, 0)) {
9840 /*
9841 * rdar://79781535 Disable early gaps while running kernPOST on Intel
9842 * the fp faulting code gets triggered and deadlocks.
9843 */
9844 zone_caching_disabled = 1;
9845 }
9846 #endif /* __x86_64__ */
9847 #endif /* DEBUG || DEVELOPMENT */
9848
9849 /* Validate struct zone_packed_virtual_address expectations */
9850 static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1");
9851 if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) {
9852 panic("zone_pva_t can't pack a kernel page address in 31 bits");
9853 }
9854
9855 zpercpu_early_count = ml_early_cpu_max_number() + 1;
9856 if (!PE_parse_boot_argn("zc_mag_size", NULL, 0)) {
9857 /*
9858 * Scale zc_mag_size() per machine.
9859 *
9860 * - wide machines get 128B magazines to avoid all false sharing
9861 * - smaller machines but with enough RAM get a bit bigger
9862 * buckets (empirically affects networking performance)
9863 */
9864 if (zpercpu_early_count >= 10) {
9865 _zc_mag_size = 14;
9866 } else if ((sane_size >> 30) >= 4) {
9867 _zc_mag_size = 10;
9868 }
9869 }
9870
9871 /*
9872 * Initialize random used to scramble early allocations
9873 */
9874 zpercpu_foreach_cpu(cpu) {
9875 random_bool_init(&zone_bool_gen[cpu].zbg_bg);
9876 }
9877
9878 #if CONFIG_PROB_GZALLOC
9879 /*
9880 * Set pgz_sample_counter on the boot CPU so that we do not sample
9881 * any allocation until PGZ has been properly setup (in pgz_init()).
9882 */
9883 *PERCPU_GET_MASTER(pgz_sample_counter) = INT32_MAX;
9884 #endif /* CONFIG_PROB_GZALLOC */
9885
9886 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9887 /*
9888 * Randomly assign zones to one of the 4 general submaps,
9889 * and pick whether they allocate from the begining
9890 * or the end of it.
9891 *
9892 * A lot of OOB exploitation relies on precise interleaving
9893 * of specific types in the heap.
9894 *
9895 * Woops, you can't guarantee that anymore.
9896 */
9897 for (zone_id_t i = 1; i < MAX_ZONES; i++) {
9898 uint32_t r = zalloc_random_uniform32(0,
9899 ZSECURITY_CONFIG_GENERAL_SUBMAPS * 2);
9900
9901 zone_security_array[i].z_submap_from_end = (r & 1);
9902 zone_security_array[i].z_submap_idx += (r >> 1);
9903 }
9904 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9905
9906 thread_call_setup_with_options(&zone_expand_callout,
9907 zone_expand_async, NULL, THREAD_CALL_PRIORITY_HIGH,
9908 THREAD_CALL_OPTIONS_ONCE);
9909
9910 thread_call_setup_with_options(&zone_trim_callout,
9911 zone_trim_async, NULL, THREAD_CALL_PRIORITY_USER,
9912 THREAD_CALL_OPTIONS_ONCE);
9913 }
9914
9915 #define ZONE_GUARD_SIZE (64UL << 10)
9916
9917 __startup_func
9918 static void
zone_tunables_fixup(void)9919 zone_tunables_fixup(void)
9920 {
9921 int wdt = 0;
9922
9923 #if CONFIG_PROB_GZALLOC && (DEVELOPMENT || DEBUG)
9924 if (!PE_parse_boot_argn("pgz", NULL, 0) &&
9925 PE_parse_boot_argn("pgz1", NULL, 0)) {
9926 /*
9927 * if pgz1= was used, but pgz= was not,
9928 * then the more specific pgz1 takes precedence.
9929 */
9930 pgz_all = false;
9931 }
9932 #endif
9933
9934 if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) {
9935 zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT;
9936 }
9937 if (PE_parse_boot_argn("wdt", &wdt, sizeof(wdt)) && wdt == -1 &&
9938 !PE_parse_boot_argn("zet", NULL, 0)) {
9939 zone_exhausted_timeout = -1;
9940 }
9941 }
9942 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup);
9943
9944 __startup_func
9945 static void
zone_submap_init(mach_vm_offset_t * submap_min,zone_submap_idx_t idx,uint64_t zone_sub_map_numer,uint64_t * remaining_denom,vm_offset_t * remaining_size)9946 zone_submap_init(
9947 mach_vm_offset_t *submap_min,
9948 zone_submap_idx_t idx,
9949 uint64_t zone_sub_map_numer,
9950 uint64_t *remaining_denom,
9951 vm_offset_t *remaining_size)
9952 {
9953 vm_map_create_options_t vmco;
9954 vm_map_address_t addr;
9955 vm_offset_t submap_start, submap_end;
9956 vm_size_t submap_size;
9957 vm_map_t submap;
9958 vm_prot_t prot = VM_PROT_DEFAULT;
9959 vm_prot_t prot_max = VM_PROT_ALL;
9960 kern_return_t kr;
9961
9962 submap_size = trunc_page(zone_sub_map_numer * *remaining_size /
9963 *remaining_denom);
9964 submap_start = *submap_min;
9965
9966 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9967 vm_offset_t submap_padding = pmap_ro_zone_align(submap_start) - submap_start;
9968 submap_start += submap_padding;
9969 submap_size = pmap_ro_zone_align(submap_size);
9970 assert(*remaining_size >= (submap_padding + submap_size));
9971 *remaining_size -= submap_padding;
9972 *submap_min = submap_start;
9973 }
9974
9975 submap_end = submap_start + submap_size;
9976 if (idx == Z_SUBMAP_IDX_VM) {
9977 vm_packing_verify_range("vm_compressor",
9978 submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR));
9979 vm_packing_verify_range("vm_page",
9980 submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR));
9981 }
9982
9983 vmco = VM_MAP_CREATE_NEVER_FAULTS;
9984 if (!zone_submap_is_sequestered(idx)) {
9985 vmco |= VM_MAP_CREATE_DISABLE_HOLELIST;
9986 }
9987
9988 vm_map_will_allocate_early_map(&zone_submaps[idx]);
9989 submap = kmem_suballoc(kernel_map, submap_min, submap_size, vmco,
9990 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_PERMANENT | KMS_NOFAIL,
9991 VM_KERN_MEMORY_ZONE).kmr_submap;
9992
9993 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9994 zone_info.zi_ro_range.min_address = submap_start;
9995 zone_info.zi_ro_range.max_address = submap_end;
9996 prot_max = prot = VM_PROT_NONE;
9997 }
9998
9999 addr = submap_start;
10000 vm_object_t kobject = kernel_object_default;
10001 kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
10002 VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vm_tag = VM_KERN_MEMORY_ZONE),
10003 kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
10004 if (kr != KERN_SUCCESS) {
10005 panic("ksubmap[%s]: failed to make first entry (%d)",
10006 zone_submaps_names[idx], kr);
10007 }
10008
10009 addr = submap_end - ZONE_GUARD_SIZE / 2;
10010 kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
10011 VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vm_tag = VM_KERN_MEMORY_ZONE),
10012 kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
10013 if (kr != KERN_SUCCESS) {
10014 panic("ksubmap[%s]: failed to make last entry (%d)",
10015 zone_submaps_names[idx], kr);
10016 }
10017
10018 #if DEBUG || DEVELOPMENT
10019 printf("zone_init: map %-5s %p:%p (%u%c)\n",
10020 zone_submaps_names[idx], (void *)submap_start, (void *)submap_end,
10021 mach_vm_size_pretty(submap_size), mach_vm_size_unit(submap_size));
10022 #endif /* DEBUG || DEVELOPMENT */
10023
10024 zone_submaps[idx] = submap;
10025 *submap_min = submap_end;
10026 *remaining_size -= submap_size;
10027 *remaining_denom -= zone_sub_map_numer;
10028 }
10029
10030 static inline void
zone_pva_relocate(zone_pva_t * pva,uint32_t delta)10031 zone_pva_relocate(zone_pva_t *pva, uint32_t delta)
10032 {
10033 if (!zone_pva_is_null(*pva) && !zone_pva_is_queue(*pva)) {
10034 pva->packed_address += delta;
10035 }
10036 }
10037
10038 /*
10039 * Allocate metadata array and migrate bootstrap initial metadata and memory.
10040 */
10041 __startup_func
10042 static void
zone_metadata_init(void)10043 zone_metadata_init(void)
10044 {
10045 vm_map_t vm_map = zone_submaps[Z_SUBMAP_IDX_VM];
10046 vm_map_entry_t first;
10047
10048 struct mach_vm_range meta_r, bits_r, xtra_r, early_r;
10049 vm_size_t early_sz;
10050 vm_offset_t reloc_base;
10051
10052 /*
10053 * Step 1: Allocate the metadata + bitmaps range
10054 *
10055 * Allocations can't be smaller than 8 bytes, which is 128b / 16B per 1k
10056 * of physical memory (16M per 1G).
10057 *
10058 * Let's preallocate for the worst to avoid weird panics.
10059 */
10060 vm_map_will_allocate_early_map(&zone_meta_map);
10061 meta_r = zone_kmem_suballoc(zone_info.zi_meta_range.min_address,
10062 zone_meta_size + zone_bits_size + zone_xtra_size,
10063 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10064 VM_KERN_MEMORY_ZONE, &zone_meta_map);
10065 meta_r.min_address += ZONE_GUARD_SIZE;
10066 meta_r.max_address -= ZONE_GUARD_SIZE;
10067 if (zone_xtra_size) {
10068 xtra_r.max_address = meta_r.max_address;
10069 meta_r.max_address -= zone_xtra_size;
10070 xtra_r.min_address = meta_r.max_address;
10071 } else {
10072 xtra_r.min_address = xtra_r.max_address = 0;
10073 }
10074 bits_r.max_address = meta_r.max_address;
10075 meta_r.max_address -= zone_bits_size;
10076 bits_r.min_address = meta_r.max_address;
10077
10078 #if DEBUG || DEVELOPMENT
10079 printf("zone_init: metadata %p:%p (%u%c)\n",
10080 (void *)meta_r.min_address, (void *)meta_r.max_address,
10081 mach_vm_size_pretty(mach_vm_range_size(&meta_r)),
10082 mach_vm_size_unit(mach_vm_range_size(&meta_r)));
10083 printf("zone_init: metabits %p:%p (%u%c)\n",
10084 (void *)bits_r.min_address, (void *)bits_r.max_address,
10085 mach_vm_size_pretty(mach_vm_range_size(&bits_r)),
10086 mach_vm_size_unit(mach_vm_range_size(&bits_r)));
10087 printf("zone_init: extra %p:%p (%u%c)\n",
10088 (void *)xtra_r.min_address, (void *)xtra_r.max_address,
10089 mach_vm_size_pretty(mach_vm_range_size(&xtra_r)),
10090 mach_vm_size_unit(mach_vm_range_size(&xtra_r)));
10091 #endif /* DEBUG || DEVELOPMENT */
10092
10093 bits_r.min_address = (bits_r.min_address + ZBA_CHUNK_SIZE - 1) & -ZBA_CHUNK_SIZE;
10094 bits_r.max_address = bits_r.max_address & -ZBA_CHUNK_SIZE;
10095
10096 /*
10097 * Step 2: Install new ranges.
10098 * Relocate metadata and bits.
10099 */
10100 early_r = zone_info.zi_map_range;
10101 early_sz = mach_vm_range_size(&early_r);
10102
10103 zone_info.zi_map_range = zone_map_range;
10104 zone_info.zi_meta_range = meta_r;
10105 zone_info.zi_bits_range = bits_r;
10106 zone_info.zi_xtra_range = xtra_r;
10107 zone_info.zi_meta_base = (struct zone_page_metadata *)meta_r.min_address -
10108 zone_pva_from_addr(zone_map_range.min_address).packed_address;
10109
10110 vm_map_lock(vm_map);
10111 first = vm_map_first_entry(vm_map);
10112 reloc_base = first->vme_end;
10113 first->vme_end += early_sz;
10114 vm_map->size += early_sz;
10115 vm_map_unlock(vm_map);
10116
10117 struct zone_page_metadata *early_meta = zone_early_meta_array_startup;
10118 struct zone_page_metadata *new_meta = zone_meta_from_addr(reloc_base);
10119 vm_offset_t reloc_delta = reloc_base - early_r.min_address;
10120 /* this needs to sign extend */
10121 uint32_t pva_delta = (uint32_t)((intptr_t)reloc_delta >> PAGE_SHIFT);
10122
10123 zone_meta_populate(reloc_base, early_sz);
10124 memcpy(new_meta, early_meta,
10125 atop(early_sz) * sizeof(struct zone_page_metadata));
10126 for (uint32_t i = 0; i < atop(early_sz); i++) {
10127 zone_pva_relocate(&new_meta[i].zm_page_next, pva_delta);
10128 zone_pva_relocate(&new_meta[i].zm_page_prev, pva_delta);
10129 }
10130
10131 static_assert(ZONE_ID_VM_MAP_ENTRY == ZONE_ID_VM_MAP + 1);
10132 static_assert(ZONE_ID_VM_MAP_HOLES == ZONE_ID_VM_MAP + 2);
10133
10134 for (zone_id_t zid = ZONE_ID_VM_MAP; zid <= ZONE_ID_VM_MAP_HOLES; zid++) {
10135 zone_pva_relocate(&zone_array[zid].z_pageq_partial, pva_delta);
10136 zone_pva_relocate(&zone_array[zid].z_pageq_full, pva_delta);
10137 }
10138
10139 zba_populate(0, false);
10140 memcpy(zba_base_header(), zba_chunk_startup, sizeof(zba_chunk_startup));
10141 zba_meta()->zbam_right = (uint32_t)atop(zone_bits_size);
10142
10143 /*
10144 * Step 3: Relocate the boostrap VM structs
10145 * (including rewriting their content).
10146 */
10147
10148 #if __x86_64__
10149 kernel_memory_populate(reloc_base, early_sz,
10150 KMA_KOBJECT | KMA_NOENCRYPT | KMA_NOFAIL,
10151 VM_KERN_MEMORY_OSFMK);
10152 __nosan_memcpy((void *)reloc_base, (void *)early_r.min_address, early_sz);
10153 #else
10154 for (vm_address_t addr = early_r.min_address;
10155 addr < early_r.max_address; addr += PAGE_SIZE) {
10156 pmap_paddr_t pa = kvtophys(trunc_page(addr));
10157 __assert_only kern_return_t kr;
10158
10159 unsigned int pmap_flags = 0;
10160
10161
10162 kr = pmap_enter_options_addr(kernel_pmap, addr + reloc_delta,
10163 pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, pmap_flags, TRUE,
10164 0, NULL);
10165 assert(kr == KERN_SUCCESS);
10166 }
10167 #endif
10168
10169 #if KASAN
10170 kasan_notify_address(reloc_base, early_sz);
10171 #if KASAN_TBI
10172 kasan_tbi_copy_tags(reloc_base, early_r.min_address, early_sz);
10173 #endif /* KASAN_TBI */
10174 #endif /* KASAN */
10175
10176 vm_map_relocate_early_maps(reloc_delta);
10177
10178 for (uint32_t i = 0; i < atop(early_sz); i++) {
10179 zone_id_t zid = new_meta[i].zm_index;
10180 zone_t z = &zone_array[zid];
10181 vm_size_t esize = zone_elem_outer_size(z);
10182 vm_address_t base = reloc_base + ptoa(i) + zone_elem_inner_offs(z);
10183 vm_address_t addr;
10184
10185 if (new_meta[i].zm_chunk_len >= ZM_SECONDARY_PAGE) {
10186 continue;
10187 }
10188
10189 for (uint32_t eidx = 0; eidx < z->z_chunk_elems; eidx++) {
10190 if (zone_meta_is_free(&new_meta[i], eidx)) {
10191 continue;
10192 }
10193
10194 addr = vm_memtag_fixup_ptr(base + eidx * esize);
10195 #if KASAN_CLASSIC
10196 kasan_alloc(addr,
10197 zone_elem_inner_size(z), zone_elem_inner_size(z),
10198 zone_elem_redzone(z), false,
10199 __builtin_frame_address(0));
10200 #endif
10201 vm_map_relocate_early_elem(zid, addr, reloc_delta);
10202 }
10203 }
10204
10205 #if !__x86_64__
10206 pmap_remove(kernel_pmap, early_r.min_address, early_r.max_address);
10207 #endif
10208 }
10209
10210 __startup_data
10211 static uint16_t submap_ratios[Z_SUBMAP_IDX_COUNT] = {
10212 #if ZSECURITY_CONFIG(READ_ONLY)
10213 [Z_SUBMAP_IDX_VM] = 15,
10214 [Z_SUBMAP_IDX_READ_ONLY] = 5,
10215 #else
10216 [Z_SUBMAP_IDX_VM] = 20,
10217 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10218 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
10219 [Z_SUBMAP_IDX_GENERAL_0] = 15,
10220 [Z_SUBMAP_IDX_GENERAL_1] = 15,
10221 [Z_SUBMAP_IDX_GENERAL_2] = 15,
10222 [Z_SUBMAP_IDX_GENERAL_3] = 15,
10223 [Z_SUBMAP_IDX_DATA] = 20,
10224 #else
10225 [Z_SUBMAP_IDX_GENERAL_0] = 60,
10226 [Z_SUBMAP_IDX_DATA] = 20,
10227 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
10228 };
10229
10230 __startup_func
10231 static inline uint16_t
zone_submap_ratios_denom(void)10232 zone_submap_ratios_denom(void)
10233 {
10234 uint16_t denom = 0;
10235
10236 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10237 denom += submap_ratios[idx];
10238 }
10239
10240 assert(denom == 100);
10241
10242 return denom;
10243 }
10244
10245 __startup_func
10246 static inline vm_offset_t
zone_restricted_va_max(void)10247 zone_restricted_va_max(void)
10248 {
10249 vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR);
10250 vm_offset_t vm_page_max = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR);
10251
10252 return trunc_page(MIN(compressor_max, vm_page_max));
10253 }
10254
10255 __startup_func
10256 static void
zone_set_map_sizes(void)10257 zone_set_map_sizes(void)
10258 {
10259 vm_size_t zsize;
10260 vm_size_t zsizearg;
10261
10262 /*
10263 * Compute the physical limits for the zone map
10264 */
10265
10266 if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) {
10267 zsize = zsizearg * (1024ULL * 1024);
10268 } else {
10269 /* Set target zone size as 1/4 of physical memory */
10270 zsize = (vm_size_t)(sane_size >> 2);
10271 zsize += zsize >> 1;
10272 }
10273
10274 if (zsize < CONFIG_ZONE_MAP_MIN) {
10275 zsize = CONFIG_ZONE_MAP_MIN; /* Clamp to min */
10276 }
10277 if (zsize > sane_size >> 1) {
10278 zsize = (vm_size_t)(sane_size >> 1); /* Clamp to half of RAM max */
10279 }
10280 if (zsizearg == 0 && zsize > ZONE_MAP_MAX) {
10281 /* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */
10282 printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n",
10283 (uintptr_t)zsize, (uintptr_t)ZONE_MAP_MAX);
10284 zsize = ZONE_MAP_MAX;
10285 }
10286
10287 zone_pages_wired_max = (uint32_t)atop(trunc_page(zsize));
10288
10289
10290 /*
10291 * Declare restrictions on zone max
10292 */
10293 vm_offset_t vm_submap_size = round_page(
10294 (submap_ratios[Z_SUBMAP_IDX_VM] * ZONE_MAP_VA_SIZE) /
10295 zone_submap_ratios_denom());
10296
10297 #if CONFIG_PROB_GZALLOC
10298 vm_submap_size += pgz_get_size();
10299 #endif /* CONFIG_PROB_GZALLOC */
10300 if (os_sub_overflow(zone_restricted_va_max(), vm_submap_size,
10301 &zone_map_range.min_address)) {
10302 zone_map_range.min_address = 0;
10303 }
10304
10305 zone_meta_size = round_page(atop(ZONE_MAP_VA_SIZE) *
10306 sizeof(struct zone_page_metadata)) + ZONE_GUARD_SIZE * 2;
10307
10308 static_assert(ZONE_MAP_MAX / (CHAR_BIT * KALLOC_MINSIZE) <=
10309 ZBA_PTR_MASK + 1);
10310 zone_bits_size = round_page(ptoa(zone_pages_wired_max) /
10311 (CHAR_BIT * KALLOC_MINSIZE));
10312
10313 #if VM_TAG_SIZECLASSES
10314 if (zone_tagging_on) {
10315 zba_xtra_shift = (uint8_t)fls(sizeof(vm_tag_t) - 1);
10316 }
10317 if (zba_xtra_shift) {
10318 /*
10319 * if we need the extra space range, then limit the size of the
10320 * bitmaps to something reasonable instead of a theoretical
10321 * worst case scenario of all zones being for the smallest
10322 * allocation granule, in order to avoid fake VA pressure on
10323 * other parts of the system.
10324 */
10325 zone_bits_size = round_page(zone_bits_size / 8);
10326 zone_xtra_size = round_page(zone_bits_size * CHAR_BIT << zba_xtra_shift);
10327 }
10328 #endif /* VM_TAG_SIZECLASSES */
10329 }
10330 STARTUP(KMEM, STARTUP_RANK_FIRST, zone_set_map_sizes);
10331
10332 /*
10333 * Can't use zone_info.zi_map_range at this point as it is being used to
10334 * store the range of early pmap memory that was stolen to bootstrap the
10335 * necessary VM zones.
10336 */
10337 KMEM_RANGE_REGISTER_STATIC(zones, &zone_map_range, ZONE_MAP_VA_SIZE);
10338 KMEM_RANGE_REGISTER_DYNAMIC(zone_meta, &zone_info.zi_meta_range, ^{
10339 return zone_meta_size + zone_bits_size + zone_xtra_size;
10340 });
10341
10342 /*
10343 * Global initialization of Zone Allocator.
10344 * Runs after zone_bootstrap.
10345 */
10346 __startup_func
10347 static void
zone_init(void)10348 zone_init(void)
10349 {
10350 vm_size_t remaining_size = ZONE_MAP_VA_SIZE;
10351 mach_vm_offset_t submap_min = 0;
10352 uint64_t denom = zone_submap_ratios_denom();
10353 /*
10354 * And now allocate the various pieces of VA and submaps.
10355 */
10356
10357 submap_min = zone_map_range.min_address;
10358
10359 #if CONFIG_PROB_GZALLOC
10360 vm_size_t pgz_size = pgz_get_size();
10361
10362 vm_map_will_allocate_early_map(&pgz_submap);
10363 zone_info.zi_pgz_range = zone_kmem_suballoc(submap_min, pgz_size,
10364 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10365 VM_KERN_MEMORY_ZONE, &pgz_submap);
10366
10367 submap_min += pgz_size;
10368 remaining_size -= pgz_size;
10369 #if DEBUG || DEVELOPMENT
10370 printf("zone_init: pgzalloc %p:%p (%u%c) [%d slots]\n",
10371 (void *)zone_info.zi_pgz_range.min_address,
10372 (void *)zone_info.zi_pgz_range.max_address,
10373 mach_vm_size_pretty(pgz_size), mach_vm_size_unit(pgz_size),
10374 pgz_slots);
10375 #endif /* DEBUG || DEVELOPMENT */
10376 #endif /* CONFIG_PROB_GZALLOC */
10377
10378 /*
10379 * Allocate the submaps
10380 */
10381 for (zone_submap_idx_t idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10382 if (submap_ratios[idx] == 0) {
10383 zone_submaps[idx] = VM_MAP_NULL;
10384 } else {
10385 zone_submap_init(&submap_min, idx, submap_ratios[idx],
10386 &denom, &remaining_size);
10387 }
10388 }
10389
10390 zone_metadata_init();
10391
10392 #if VM_TAG_SIZECLASSES
10393 if (zone_tagging_on) {
10394 vm_allocation_zones_init();
10395 }
10396 #endif /* VM_TAG_SIZECLASSES */
10397
10398 zone_create_flags_t kma_flags = ZC_NOCACHING | ZC_NOGC | ZC_NOCALLOUT |
10399 ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE | ZC_VM;
10400
10401 (void)zone_create_ext("vm.permanent", 1, kma_flags | ZC_NOTBITAG,
10402 ZONE_ID_PERMANENT, ^(zone_t z) {
10403 z->z_permanent = true;
10404 z->z_elem_size = 1;
10405 });
10406 (void)zone_create_ext("vm.permanent.percpu", 1,
10407 kma_flags | ZC_PERCPU | ZC_NOTBITAG, ZONE_ID_PERCPU_PERMANENT, ^(zone_t z) {
10408 z->z_permanent = true;
10409 z->z_elem_size = 1;
10410 });
10411
10412 zc_magazine_zone = zone_create("zcc_magazine_zone", sizeof(struct zone_magazine) +
10413 zc_mag_size() * sizeof(vm_offset_t),
10414 ZC_VM | ZC_NOCACHING | ZC_ZFREE_CLEARMEM | ZC_PGZ_USE_GUARDS);
10415 zone_raise_reserve(zc_magazine_zone, (uint16_t)(2 * zpercpu_count()));
10416
10417 /*
10418 * Now migrate the startup statistics into their final storage,
10419 * and enable logging for early zones (that zone_create_ext() skipped).
10420 */
10421 int cpu = cpu_number();
10422 zone_index_foreach(idx) {
10423 zone_t tz = &zone_array[idx];
10424
10425 if (tz->z_stats == __zpcpu_mangle_for_boot(&zone_stats_startup[idx])) {
10426 zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats);
10427
10428 *zpercpu_get_cpu(zs, cpu) = *zpercpu_get_cpu(tz->z_stats, cpu);
10429 tz->z_stats = zs;
10430 }
10431 if (tz->z_self == tz) {
10432 #if ZALLOC_ENABLE_LOGGING
10433 zone_setup_logging(tz);
10434 #endif /* ZALLOC_ENABLE_LOGGING */
10435 #if KASAN_TBI
10436 zone_setup_kasan_logging(tz);
10437 #endif /* KASAN_TBI */
10438 }
10439 }
10440 }
10441 STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init);
10442
10443 void
zalloc_iokit_lockdown(void)10444 zalloc_iokit_lockdown(void)
10445 {
10446 zone_share_always = false;
10447 }
10448
10449 void
zalloc_first_proc_made(void)10450 zalloc_first_proc_made(void)
10451 {
10452 zone_caching_disabled = 0;
10453 zone_early_thres_mul = 1;
10454 }
10455
10456 __startup_func
10457 vm_offset_t
zone_early_mem_init(vm_size_t size)10458 zone_early_mem_init(vm_size_t size)
10459 {
10460 vm_offset_t mem;
10461
10462 assert3u(atop(size), <=, ZONE_EARLY_META_INLINE_COUNT);
10463
10464 /*
10465 * The zone that is used early to bring up the VM is stolen here.
10466 *
10467 * When the zone subsystem is actually initialized,
10468 * zone_metadata_init() will be called, and those pages
10469 * and the elements they contain, will be relocated into
10470 * the VM submap (even for architectures when those zones
10471 * do not live there).
10472 */
10473 #if __x86_64__
10474 assert3u(size, <=, sizeof(zone_early_pages_to_cram));
10475 mem = (vm_offset_t)zone_early_pages_to_cram;
10476 #else
10477 mem = (vm_offset_t)pmap_steal_zone_memory(size, PAGE_SIZE);
10478 #endif
10479
10480 zone_info.zi_meta_base = zone_early_meta_array_startup -
10481 zone_pva_from_addr(mem).packed_address;
10482 zone_info.zi_map_range.min_address = mem;
10483 zone_info.zi_map_range.max_address = mem + size;
10484
10485 zone_info.zi_bits_range = (struct mach_vm_range){
10486 .min_address = (mach_vm_offset_t)zba_chunk_startup,
10487 .max_address = (mach_vm_offset_t)zba_chunk_startup +
10488 sizeof(zba_chunk_startup),
10489 };
10490
10491 zba_meta()->zbam_left = 1;
10492 zba_meta()->zbam_right = 1;
10493 zba_init_chunk(0, false);
10494
10495 return mem;
10496 }
10497
10498 #endif /* !ZALLOC_TEST */
10499 #pragma mark - tests
10500 #if DEBUG || DEVELOPMENT
10501
10502 /*
10503 * Used for sysctl zone tests that aren't thread-safe. Ensure only one
10504 * thread goes through at a time.
10505 *
10506 * Or we can end up with multiple test zones (if a second zinit() comes through
10507 * before zdestroy()), which could lead us to run out of zones.
10508 */
10509 static bool any_zone_test_running = FALSE;
10510
10511 static uintptr_t *
zone_copy_allocations(zone_t z,uintptr_t * elems,zone_pva_t page_index)10512 zone_copy_allocations(zone_t z, uintptr_t *elems, zone_pva_t page_index)
10513 {
10514 vm_offset_t elem_size = zone_elem_outer_size(z);
10515 vm_offset_t base;
10516 struct zone_page_metadata *meta;
10517
10518 while (!zone_pva_is_null(page_index)) {
10519 base = zone_pva_to_addr(page_index) + zone_elem_inner_offs(z);
10520 meta = zone_pva_to_meta(page_index);
10521
10522 if (meta->zm_inline_bitmap) {
10523 for (size_t i = 0; i < meta->zm_chunk_len; i++) {
10524 uint32_t map = meta[i].zm_bitmap;
10525
10526 for (; map; map &= map - 1) {
10527 *elems++ = INSTANCE_PUT(base +
10528 elem_size * __builtin_clz(map));
10529 }
10530 base += elem_size * 32;
10531 }
10532 } else {
10533 uint32_t order = zba_bits_ref_order(meta->zm_bitmap);
10534 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
10535 for (size_t i = 0; i < (1u << order); i++) {
10536 uint64_t map = bits[i];
10537
10538 for (; map; map &= map - 1) {
10539 *elems++ = INSTANCE_PUT(base +
10540 elem_size * __builtin_clzll(map));
10541 }
10542 base += elem_size * 64;
10543 }
10544 }
10545
10546 page_index = meta->zm_page_next;
10547 }
10548 return elems;
10549 }
10550
10551 kern_return_t
zone_leaks(const char * zoneName,uint32_t nameLen,leak_site_proc proc)10552 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc)
10553 {
10554 zone_t zone = NULL;
10555 uintptr_t * array;
10556 uintptr_t * next;
10557 uintptr_t element;
10558 uint32_t idx, count, found;
10559 uint32_t nobtcount;
10560 uint32_t elemSize;
10561 size_t maxElems;
10562
10563 zone_foreach(z) {
10564 if (!z->z_name) {
10565 continue;
10566 }
10567 if (!strncmp(zoneName, z->z_name, nameLen)) {
10568 zone = z;
10569 break;
10570 }
10571 }
10572 if (zone == NULL) {
10573 return KERN_INVALID_NAME;
10574 }
10575
10576 elemSize = (uint32_t)zone_elem_inner_size(zone);
10577 maxElems = (zone->z_elems_avail + 1) & ~1ul;
10578
10579 array = kalloc_type_tag(vm_offset_t, maxElems, VM_KERN_MEMORY_DIAG);
10580 if (array == NULL) {
10581 return KERN_RESOURCE_SHORTAGE;
10582 }
10583
10584 zone_lock(zone);
10585
10586 next = array;
10587 next = zone_copy_allocations(zone, next, zone->z_pageq_partial);
10588 next = zone_copy_allocations(zone, next, zone->z_pageq_full);
10589 count = (uint32_t)(next - array);
10590
10591 zone_unlock(zone);
10592
10593 zone_leaks_scan(array, count, (uint32_t)zone_elem_outer_size(zone), &found);
10594 assert(found <= count);
10595
10596 for (idx = 0; idx < count; idx++) {
10597 element = array[idx];
10598 if (kInstanceFlagReferenced & element) {
10599 continue;
10600 }
10601 element = INSTANCE_PUT(element) & ~kInstanceFlags;
10602 }
10603
10604 #if ZALLOC_ENABLE_LOGGING
10605 if (zone->z_btlog && !corruption_debug_flag) {
10606 // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
10607 static_assert(sizeof(vm_address_t) == sizeof(uintptr_t));
10608 btlog_copy_backtraces_for_elements(zone->z_btlog,
10609 (vm_address_t *)array, &count, elemSize, proc);
10610 }
10611 #endif /* ZALLOC_ENABLE_LOGGING */
10612
10613 for (nobtcount = idx = 0; idx < count; idx++) {
10614 element = array[idx];
10615 if (!element) {
10616 continue;
10617 }
10618 if (kInstanceFlagReferenced & element) {
10619 continue;
10620 }
10621 nobtcount++;
10622 }
10623 if (nobtcount) {
10624 proc(nobtcount, elemSize, BTREF_NULL);
10625 }
10626
10627 kfree_type(vm_offset_t, maxElems, array);
10628 return KERN_SUCCESS;
10629 }
10630
10631 static int
zone_ro_basic_test_run(__unused int64_t in,int64_t * out)10632 zone_ro_basic_test_run(__unused int64_t in, int64_t *out)
10633 {
10634 zone_security_flags_t zsflags;
10635 uint32_t x = 4;
10636 uint32_t *test_ptr;
10637
10638 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10639 printf("zone_ro_basic_test: Test already running.\n");
10640 return EALREADY;
10641 }
10642
10643 zsflags = zone_security_array[ZONE_ID__FIRST_RO];
10644
10645 for (int i = 0; i < 3; i++) {
10646 #if ZSECURITY_CONFIG(READ_ONLY)
10647 /* Basic Test: Create int zone, zalloc int, modify value, free int */
10648 printf("zone_ro_basic_test: Basic Test iteration %d\n", i);
10649 printf("zone_ro_basic_test: create a sub-page size zone\n");
10650
10651 printf("zone_ro_basic_test: verify flags were set\n");
10652 assert(zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
10653
10654 printf("zone_ro_basic_test: zalloc an element\n");
10655 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10656 assert(test_ptr);
10657
10658 printf("zone_ro_basic_test: verify we can't write to it\n");
10659 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10660
10661 x = 4;
10662 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10663 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10664 assert(test_ptr);
10665 assert(*(uint32_t*)test_ptr == x);
10666
10667 x = 5;
10668 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10669 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10670 assert(test_ptr);
10671 assert(*(uint32_t*)test_ptr == x);
10672
10673 printf("zone_ro_basic_test: verify we can't write to it after assigning value\n");
10674 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10675
10676 printf("zone_ro_basic_test: free elem\n");
10677 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10678 assert(!test_ptr);
10679 #else
10680 printf("zone_ro_basic_test: Read-only allocator n/a on 32bit platforms, test functionality of API\n");
10681
10682 printf("zone_ro_basic_test: verify flags were set\n");
10683 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
10684
10685 printf("zone_ro_basic_test: zalloc an element\n");
10686 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10687 assert(test_ptr);
10688
10689 x = 4;
10690 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10691 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10692 assert(test_ptr);
10693 assert(*(uint32_t*)test_ptr == x);
10694
10695 x = 5;
10696 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10697 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10698 assert(test_ptr);
10699 assert(*(uint32_t*)test_ptr == x);
10700
10701 printf("zone_ro_basic_test: free elem\n");
10702 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10703 assert(!test_ptr);
10704 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10705 }
10706
10707 printf("zone_ro_basic_test: garbage collection\n");
10708 zone_gc(ZONE_GC_DRAIN);
10709
10710 printf("zone_ro_basic_test: Test passed\n");
10711
10712 *out = 1;
10713 os_atomic_store(&any_zone_test_running, false, relaxed);
10714 return 0;
10715 }
10716 SYSCTL_TEST_REGISTER(zone_ro_basic_test, zone_ro_basic_test_run);
10717
10718 static int
zone_basic_test_run(__unused int64_t in,int64_t * out)10719 zone_basic_test_run(__unused int64_t in, int64_t *out)
10720 {
10721 static zone_t test_zone_ptr = NULL;
10722
10723 unsigned int i = 0, max_iter = 5;
10724 void * test_ptr;
10725 zone_t test_zone;
10726 int rc = 0;
10727
10728 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10729 printf("zone_basic_test: Test already running.\n");
10730 return EALREADY;
10731 }
10732
10733 printf("zone_basic_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
10734
10735 /* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
10736 do {
10737 test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
10738 assert(test_zone);
10739
10740 #if KASAN_CLASSIC
10741 if (test_zone_ptr == NULL && test_zone->z_elems_free != 0)
10742 #else
10743 if (test_zone->z_elems_free != 0)
10744 #endif
10745 {
10746 printf("zone_basic_test: free count is not zero\n");
10747 rc = EIO;
10748 goto out;
10749 }
10750
10751 if (test_zone_ptr == NULL) {
10752 /* Stash the zone pointer returned on the fist zinit */
10753 printf("zone_basic_test: zone created for the first time\n");
10754 test_zone_ptr = test_zone;
10755 } else if (test_zone != test_zone_ptr) {
10756 printf("zone_basic_test: old zone pointer and new zone pointer don't match\n");
10757 rc = EIO;
10758 goto out;
10759 }
10760
10761 test_ptr = zalloc_flags(test_zone, Z_WAITOK | Z_NOFAIL);
10762 zfree(test_zone, test_ptr);
10763
10764 zdestroy(test_zone);
10765 i++;
10766
10767 printf("zone_basic_test: Iteration %d successful\n", i);
10768 } while (i < max_iter);
10769
10770 #if !KASAN_CLASSIC /* because of the quarantine and redzones */
10771 /* test Z_VA_SEQUESTER */
10772 {
10773 zone_t test_pcpu_zone;
10774 kern_return_t kr;
10775 int idx, num_allocs = 8;
10776 vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs;
10777 void *allocs[num_allocs];
10778 void **allocs_pcpu;
10779 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
10780
10781 test_zone = zone_create("test_zone_sysctl", elem_size,
10782 ZC_DESTRUCTIBLE);
10783 assert(test_zone);
10784
10785 test_pcpu_zone = zone_create("test_zone_sysctl.pcpu", sizeof(uint64_t),
10786 ZC_DESTRUCTIBLE | ZC_PERCPU);
10787 assert(test_pcpu_zone);
10788
10789 for (idx = 0; idx < num_allocs; idx++) {
10790 allocs[idx] = zalloc(test_zone);
10791 assert(NULL != allocs[idx]);
10792 printf("alloc[%d] %p\n", idx, allocs[idx]);
10793 }
10794 for (idx = 0; idx < num_allocs; idx++) {
10795 zfree(test_zone, allocs[idx]);
10796 }
10797 assert(!zone_pva_is_null(test_zone->z_pageq_empty));
10798
10799 kr = kmem_alloc(kernel_map, (vm_address_t *)&allocs_pcpu, PAGE_SIZE,
10800 KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
10801 assert(kr == KERN_SUCCESS);
10802
10803 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10804 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10805 Z_WAITOK | Z_ZERO);
10806 assert(NULL != allocs_pcpu[idx]);
10807 }
10808 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10809 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10810 }
10811 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10812
10813 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10814 vm_page_wire_count, vm_page_free_count,
10815 100L * phys_pages / zone_pages_wired_max);
10816 zone_gc(ZONE_GC_DRAIN);
10817 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10818 vm_page_wire_count, vm_page_free_count,
10819 100L * phys_pages / zone_pages_wired_max);
10820
10821 unsigned int allva = 0;
10822
10823 zone_foreach(z) {
10824 zone_lock(z);
10825 allva += z->z_wired_cur;
10826 if (zone_pva_is_null(z->z_pageq_va)) {
10827 zone_unlock(z);
10828 continue;
10829 }
10830 unsigned count = 0;
10831 uint64_t size;
10832 zone_pva_t pg = z->z_pageq_va;
10833 struct zone_page_metadata *page_meta;
10834 while (pg.packed_address) {
10835 page_meta = zone_pva_to_meta(pg);
10836 count += z->z_percpu ? 1 : z->z_chunk_pages;
10837 if (page_meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
10838 count -= page_meta->zm_page_index;
10839 }
10840 pg = page_meta->zm_page_next;
10841 }
10842 size = zone_size_wired(z);
10843 if (!size) {
10844 size = 1;
10845 }
10846 printf("%s%s: seq %d, res %d, %qd %%\n",
10847 zone_heap_name(z), z->z_name, z->z_va_cur - z->z_wired_cur,
10848 z->z_wired_cur, zone_size_allocated(z) * 100ULL / size);
10849 zone_unlock(z);
10850 }
10851
10852 printf("total va: %d\n", allva);
10853
10854 assert(zone_pva_is_null(test_zone->z_pageq_empty));
10855 assert(zone_pva_is_null(test_zone->z_pageq_partial));
10856 assert(!zone_pva_is_null(test_zone->z_pageq_va));
10857 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10858 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_partial));
10859 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_va));
10860
10861 for (idx = 0; idx < num_allocs; idx++) {
10862 assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx]));
10863 }
10864
10865 /* make sure the zone is still usable after a GC */
10866
10867 for (idx = 0; idx < num_allocs; idx++) {
10868 allocs[idx] = zalloc(test_zone);
10869 assert(allocs[idx]);
10870 printf("alloc[%d] %p\n", idx, allocs[idx]);
10871 }
10872 for (idx = 0; idx < num_allocs; idx++) {
10873 zfree(test_zone, allocs[idx]);
10874 }
10875
10876 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10877 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10878 Z_WAITOK | Z_ZERO);
10879 assert(NULL != allocs_pcpu[idx]);
10880 }
10881 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10882 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10883 }
10884
10885 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10886
10887 kmem_free(kernel_map, (vm_address_t)allocs_pcpu, PAGE_SIZE);
10888
10889 zdestroy(test_zone);
10890 zdestroy(test_pcpu_zone);
10891 }
10892 #endif /* KASAN_CLASSIC */
10893
10894 printf("zone_basic_test: Test passed\n");
10895
10896
10897 *out = 1;
10898 out:
10899 os_atomic_store(&any_zone_test_running, false, relaxed);
10900 return rc;
10901 }
10902 SYSCTL_TEST_REGISTER(zone_basic_test, zone_basic_test_run);
10903
10904 struct zone_stress_obj {
10905 TAILQ_ENTRY(zone_stress_obj) zso_link;
10906 };
10907
10908 struct zone_stress_ctx {
10909 thread_t zsc_leader;
10910 lck_mtx_t zsc_lock;
10911 zone_t zsc_zone;
10912 uint64_t zsc_end;
10913 uint32_t zsc_workers;
10914 };
10915
10916 static void
zone_stress_worker(void * arg,wait_result_t __unused wr)10917 zone_stress_worker(void *arg, wait_result_t __unused wr)
10918 {
10919 struct zone_stress_ctx *ctx = arg;
10920 bool leader = ctx->zsc_leader == current_thread();
10921 TAILQ_HEAD(zone_stress_head, zone_stress_obj) head = TAILQ_HEAD_INITIALIZER(head);
10922 struct zone_bool_gen bg = { };
10923 struct zone_stress_obj *obj;
10924 uint32_t allocs = 0;
10925
10926 random_bool_init(&bg.zbg_bg);
10927
10928 do {
10929 for (int i = 0; i < 2000; i++) {
10930 uint32_t what = random_bool_gen_bits(&bg.zbg_bg,
10931 bg.zbg_entropy, ZONE_ENTROPY_CNT, 1);
10932 switch (what) {
10933 case 0:
10934 case 1:
10935 if (allocs < 10000) {
10936 obj = zalloc(ctx->zsc_zone);
10937 TAILQ_INSERT_HEAD(&head, obj, zso_link);
10938 allocs++;
10939 }
10940 break;
10941 case 2:
10942 case 3:
10943 if (allocs < 10000) {
10944 obj = zalloc(ctx->zsc_zone);
10945 TAILQ_INSERT_TAIL(&head, obj, zso_link);
10946 allocs++;
10947 }
10948 break;
10949 case 4:
10950 if (leader) {
10951 zone_gc(ZONE_GC_DRAIN);
10952 }
10953 break;
10954 case 5:
10955 case 6:
10956 if (!TAILQ_EMPTY(&head)) {
10957 obj = TAILQ_FIRST(&head);
10958 TAILQ_REMOVE(&head, obj, zso_link);
10959 zfree(ctx->zsc_zone, obj);
10960 allocs--;
10961 }
10962 break;
10963 case 7:
10964 if (!TAILQ_EMPTY(&head)) {
10965 obj = TAILQ_LAST(&head, zone_stress_head);
10966 TAILQ_REMOVE(&head, obj, zso_link);
10967 zfree(ctx->zsc_zone, obj);
10968 allocs--;
10969 }
10970 break;
10971 }
10972 }
10973 } while (mach_absolute_time() < ctx->zsc_end);
10974
10975 while (!TAILQ_EMPTY(&head)) {
10976 obj = TAILQ_FIRST(&head);
10977 TAILQ_REMOVE(&head, obj, zso_link);
10978 zfree(ctx->zsc_zone, obj);
10979 }
10980
10981 lck_mtx_lock(&ctx->zsc_lock);
10982 if (--ctx->zsc_workers == 0) {
10983 thread_wakeup(ctx);
10984 } else if (leader) {
10985 while (ctx->zsc_workers) {
10986 lck_mtx_sleep(&ctx->zsc_lock, LCK_SLEEP_DEFAULT, ctx,
10987 THREAD_UNINT);
10988 }
10989 }
10990 lck_mtx_unlock(&ctx->zsc_lock);
10991
10992 if (!leader) {
10993 thread_terminate_self();
10994 __builtin_unreachable();
10995 }
10996 }
10997
10998 static int
zone_stress_test_run(__unused int64_t in,int64_t * out)10999 zone_stress_test_run(__unused int64_t in, int64_t *out)
11000 {
11001 struct zone_stress_ctx ctx = {
11002 .zsc_leader = current_thread(),
11003 .zsc_workers = 3,
11004 };
11005 kern_return_t kr;
11006 thread_t th;
11007
11008 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
11009 printf("zone_stress_test: Test already running.\n");
11010 return EALREADY;
11011 }
11012
11013 lck_mtx_init(&ctx.zsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
11014 ctx.zsc_zone = zone_create("test_zone_344", 344,
11015 ZC_DESTRUCTIBLE | ZC_NOCACHING);
11016 assert(ctx.zsc_zone->z_chunk_pages > 1);
11017
11018 clock_interval_to_deadline(5, NSEC_PER_SEC, &ctx.zsc_end);
11019
11020 printf("zone_stress_test: Starting (leader %p)\n", current_thread());
11021
11022 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
11023
11024 for (uint32_t i = 1; i < ctx.zsc_workers; i++) {
11025 kr = kernel_thread_start_priority(zone_stress_worker, &ctx,
11026 BASEPRI_DEFAULT, &th);
11027 if (kr == KERN_SUCCESS) {
11028 printf("zone_stress_test: thread %d: %p\n", i, th);
11029 thread_deallocate(th);
11030 } else {
11031 ctx.zsc_workers--;
11032 }
11033 }
11034
11035 zone_stress_worker(&ctx, 0);
11036
11037 lck_mtx_destroy(&ctx.zsc_lock, &zone_locks_grp);
11038
11039 zdestroy(ctx.zsc_zone);
11040
11041 printf("zone_stress_test: Done\n");
11042
11043 *out = 1;
11044 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
11045 os_atomic_store(&any_zone_test_running, false, relaxed);
11046 return 0;
11047 }
11048 SYSCTL_TEST_REGISTER(zone_stress_test, zone_stress_test_run);
11049
11050 struct zone_gc_stress_obj {
11051 STAILQ_ENTRY(zone_gc_stress_obj) zgso_link;
11052 uintptr_t zgso_pad[63];
11053 };
11054 STAILQ_HEAD(zone_gc_stress_head, zone_gc_stress_obj);
11055
11056 #define ZONE_GC_OBJ_PER_PAGE (PAGE_SIZE / sizeof(struct zone_gc_stress_obj))
11057
11058 KALLOC_TYPE_DEFINE(zone_gc_stress_zone, struct zone_gc_stress_obj, KT_DEFAULT);
11059
11060 struct zone_gc_stress_ctx {
11061 bool zgsc_done;
11062 lck_mtx_t zgsc_lock;
11063 zone_t zgsc_zone;
11064 uint64_t zgsc_end;
11065 uint32_t zgsc_workers;
11066 };
11067
11068 static void
zone_gc_stress_test_alloc_n(struct zone_gc_stress_head * head,size_t n)11069 zone_gc_stress_test_alloc_n(struct zone_gc_stress_head *head, size_t n)
11070 {
11071 struct zone_gc_stress_obj *obj;
11072
11073 for (size_t i = 0; i < n; i++) {
11074 obj = zalloc_flags(zone_gc_stress_zone, Z_WAITOK);
11075 STAILQ_INSERT_TAIL(head, obj, zgso_link);
11076 }
11077 }
11078
11079 static void
zone_gc_stress_test_free_n(struct zone_gc_stress_head * head)11080 zone_gc_stress_test_free_n(struct zone_gc_stress_head *head)
11081 {
11082 struct zone_gc_stress_obj *obj;
11083
11084 while ((obj = STAILQ_FIRST(head))) {
11085 STAILQ_REMOVE_HEAD(head, zgso_link);
11086 zfree(zone_gc_stress_zone, obj);
11087 }
11088 }
11089
11090 __dead2
11091 static void
zone_gc_stress_worker(void * arg,wait_result_t __unused wr)11092 zone_gc_stress_worker(void *arg, wait_result_t __unused wr)
11093 {
11094 struct zone_gc_stress_ctx *ctx = arg;
11095 struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
11096
11097 while (!ctx->zgsc_done) {
11098 zone_gc_stress_test_alloc_n(&head, ZONE_GC_OBJ_PER_PAGE * 4);
11099 zone_gc_stress_test_free_n(&head);
11100 }
11101
11102 lck_mtx_lock(&ctx->zgsc_lock);
11103 if (--ctx->zgsc_workers == 0) {
11104 thread_wakeup(ctx);
11105 }
11106 lck_mtx_unlock(&ctx->zgsc_lock);
11107
11108 thread_terminate_self();
11109 __builtin_unreachable();
11110 }
11111
11112 static int
zone_gc_stress_test_run(__unused int64_t in,int64_t * out)11113 zone_gc_stress_test_run(__unused int64_t in, int64_t *out)
11114 {
11115 struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
11116 struct zone_gc_stress_ctx ctx = {
11117 .zgsc_workers = 3,
11118 };
11119 kern_return_t kr;
11120 thread_t th;
11121
11122 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
11123 printf("zone_gc_stress_test: Test already running.\n");
11124 return EALREADY;
11125 }
11126
11127 lck_mtx_init(&ctx.zgsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
11128 lck_mtx_lock(&ctx.zgsc_lock);
11129
11130 printf("zone_gc_stress_test: Starting (leader %p)\n", current_thread());
11131
11132 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
11133
11134 for (uint32_t i = 0; i < ctx.zgsc_workers; i++) {
11135 kr = kernel_thread_start_priority(zone_gc_stress_worker, &ctx,
11136 BASEPRI_DEFAULT, &th);
11137 if (kr == KERN_SUCCESS) {
11138 printf("zone_gc_stress_test: thread %d: %p\n", i, th);
11139 thread_deallocate(th);
11140 } else {
11141 ctx.zgsc_workers--;
11142 }
11143 }
11144
11145 for (uint64_t i = 0; i < in; i++) {
11146 size_t count = zc_mag_size() * zc_free_batch_size() * 10;
11147
11148 if (count < ZONE_GC_OBJ_PER_PAGE * 20) {
11149 count = ZONE_GC_OBJ_PER_PAGE * 20;
11150 }
11151
11152 zone_gc_stress_test_alloc_n(&head, count);
11153 zone_gc_stress_test_free_n(&head);
11154
11155 lck_mtx_lock(&zone_gc_lock);
11156 zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
11157 ZONE_RECLAIM_TRIM);
11158 lck_mtx_unlock(&zone_gc_lock);
11159
11160 printf("zone_gc_stress_test: round %lld/%lld\n", i + 1, in);
11161 }
11162
11163 os_atomic_thread_fence(seq_cst);
11164 ctx.zgsc_done = true;
11165 lck_mtx_sleep(&ctx.zgsc_lock, LCK_SLEEP_DEFAULT, &ctx, THREAD_UNINT);
11166 lck_mtx_unlock(&ctx.zgsc_lock);
11167
11168 lck_mtx_destroy(&ctx.zgsc_lock, &zone_locks_grp);
11169
11170 lck_mtx_lock(&zone_gc_lock);
11171 zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
11172 ZONE_RECLAIM_DRAIN);
11173 lck_mtx_unlock(&zone_gc_lock);
11174
11175 printf("zone_gc_stress_test: Done\n");
11176
11177 *out = 1;
11178 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
11179 os_atomic_store(&any_zone_test_running, false, relaxed);
11180 return 0;
11181 }
11182 SYSCTL_TEST_REGISTER(zone_gc_stress_test, zone_gc_stress_test_run);
11183
11184 /*
11185 * Routines to test that zone garbage collection and zone replenish threads
11186 * running at the same time don't cause problems.
11187 */
11188
11189 static int
zone_gc_replenish_test(__unused int64_t in,int64_t * out)11190 zone_gc_replenish_test(__unused int64_t in, int64_t *out)
11191 {
11192 zone_gc(ZONE_GC_DRAIN);
11193 *out = 1;
11194 return 0;
11195 }
11196 SYSCTL_TEST_REGISTER(zone_gc_replenish_test, zone_gc_replenish_test);
11197
11198 static int
zone_alloc_replenish_test(__unused int64_t in,int64_t * out)11199 zone_alloc_replenish_test(__unused int64_t in, int64_t *out)
11200 {
11201 zone_t z = vm_map_entry_zone;
11202 struct data { struct data *next; } *node, *list = NULL;
11203
11204 if (z == NULL) {
11205 printf("Couldn't find a replenish zone\n");
11206 return EIO;
11207 }
11208
11209 /* big enough to go past replenishment */
11210 for (uint32_t i = 0; i < 10 * z->z_elems_rsv; ++i) {
11211 node = zalloc(z);
11212 node->next = list;
11213 list = node;
11214 }
11215
11216 /*
11217 * release the memory we allocated
11218 */
11219 while (list != NULL) {
11220 node = list;
11221 list = list->next;
11222 zfree(z, node);
11223 }
11224
11225 *out = 1;
11226 return 0;
11227 }
11228 SYSCTL_TEST_REGISTER(zone_alloc_replenish_test, zone_alloc_replenish_test);
11229
11230 #endif /* DEBUG || DEVELOPMENT */
11231