1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/zalloc.c
60 * Author: Avadis Tevanian, Jr.
61 *
62 * Zone-based memory allocator. A zone is a collection of fixed size
63 * data blocks for which quick allocation/deallocation is possible.
64 */
65
66 #define ZALLOC_ALLOW_DEPRECATED 1
67 #if !ZALLOC_TEST
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <machine/machine_routines.h>
75 #include <mach/vm_map.h>
76 #include <mach/sdt.h>
77 #if __x86_64__
78 #include <i386/cpuid.h>
79 #endif
80
81 #include <kern/bits.h>
82 #include <kern/btlog.h>
83 #include <kern/startup.h>
84 #include <kern/kern_types.h>
85 #include <kern/assert.h>
86 #include <kern/backtrace.h>
87 #include <kern/host.h>
88 #include <kern/macro_help.h>
89 #include <kern/sched.h>
90 #include <kern/locks.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/thread_call.h>
94 #include <kern/zalloc_internal.h>
95 #include <kern/kalloc.h>
96 #include <kern/debug.h>
97
98 #include <prng/random.h>
99
100 #include <vm/pmap.h>
101 #include <vm/vm_map_internal.h>
102 #include <vm/vm_memtag.h>
103 #include <vm/vm_kern_internal.h>
104 #include <vm/vm_kern_xnu.h>
105 #include <vm/vm_page_internal.h>
106 #include <vm/vm_pageout_internal.h>
107 #include <vm/vm_compressor_xnu.h> /* C_SLOT_PACKED_PTR* */
108 #include <vm/vm_far.h>
109
110 #include <pexpert/pexpert.h>
111
112 #include <machine/machparam.h>
113 #include <machine/machine_routines.h> /* ml_cpu_get_info */
114
115 #include <os/atomic.h>
116
117 #include <libkern/OSDebug.h>
118 #include <libkern/OSAtomic.h>
119 #include <libkern/section_keywords.h>
120 #include <sys/kdebug.h>
121 #include <sys/kern_memorystatus_xnu.h>
122 #include <sys/code_signing.h>
123
124 #include <san/kasan.h>
125 #include <libsa/stdlib.h>
126 #include <sys/errno.h>
127
128 #include <IOKit/IOBSD.h>
129 #include <arm64/amcc_rorgn.h>
130
131 #if DEBUG
132 #define z_debug_assert(expr) assert(expr)
133 #else
134 #define z_debug_assert(expr) (void)(expr)
135 #endif
136
137 #if CONFIG_PROB_GZALLOC && CONFIG_SPTM
138 #error This is not a supported configuration
139 #endif
140
141 /* Returns pid of the task with the largest number of VM map entries. */
142 extern pid_t find_largest_process_vm_map_entries(void);
143
144 extern zone_t vm_object_zone;
145 extern zone_t ipc_service_port_label_zone;
146
147 ZONE_DEFINE_TYPE(percpu_u64_zone, "percpu.64", uint64_t,
148 ZC_PERCPU | ZC_ALIGNMENT_REQUIRED | ZC_KASAN_NOREDZONE);
149
150 #if ZSECURITY_CONFIG(ZONE_TAGGING)
151 #define ZONE_MIN_ELEM_SIZE (sizeof(uint64_t) * 2)
152 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
153 #else /* ZSECURITY_CONFIG_ZONE_TAGGING */
154 #define ZONE_MIN_ELEM_SIZE sizeof(uint64_t)
155 #define ZONE_ALIGN_SIZE ZONE_MIN_ELEM_SIZE
156 #endif /* ZSECURITY_CONFIG_ZONE_TAGGING */
157
158 #define ZONE_MAX_ALLOC_SIZE (32 * 1024)
159 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
160 #define ZONE_CHUNK_ALLOC_SIZE (256 * 1024)
161 #define ZONE_MAX_CHUNK_ALLOC_NUM (10)
162 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
163
164 #if XNU_PLATFORM_MacOSX
165 #define ZONE_MAP_MAX (32ULL << 30)
166 #define ZONE_MAP_VA_SIZE (128ULL << 30)
167 #else
168 #define ZONE_MAP_MAX (8ULL << 30)
169 #define ZONE_MAP_VA_SIZE (24ULL << 30)
170 #endif
171
172 __enum_closed_decl(zm_len_t, uint16_t, {
173 ZM_CHUNK_FREE = 0x0,
174 /* 1 through 8 are valid lengths */
175 ZM_CHUNK_LEN_MAX = 0x8,
176
177 /* PGZ magical values */
178 ZM_PGZ_FREE = 0x0,
179 ZM_PGZ_ALLOCATED = 0xa, /* [a]llocated */
180 ZM_PGZ_GUARD = 0xb, /* oo[b] */
181 ZM_PGZ_DOUBLE_FREE = 0xd, /* [d]ouble_free */
182
183 /* secondary page markers */
184 ZM_SECONDARY_PAGE = 0xe,
185 ZM_SECONDARY_PCPU_PAGE = 0xf,
186 });
187
188 static_assert(MAX_ZONES < (1u << 10), "MAX_ZONES must fit in zm_index");
189
190 struct zone_page_metadata {
191 union {
192 struct {
193 /* The index of the zone this metadata page belongs to */
194 zone_id_t zm_index : 10;
195
196 /*
197 * This chunk ends with a guard page.
198 */
199 uint16_t zm_guarded : 1;
200
201 /*
202 * Whether `zm_bitmap` is an inline bitmap
203 * or a packed bitmap reference
204 */
205 uint16_t zm_inline_bitmap : 1;
206
207 /*
208 * Zones allocate in "chunks" of zone_t::z_chunk_pages
209 * consecutive pages, or zpercpu_count() pages if the
210 * zone is percpu.
211 *
212 * The first page of it has its metadata set with:
213 * - 0 if none of the pages are currently wired
214 * - the number of wired pages in the chunk
215 * (not scaled for percpu).
216 *
217 * Other pages in the chunk have their zm_chunk_len set
218 * to ZM_SECONDARY_PAGE or ZM_SECONDARY_PCPU_PAGE
219 * depending on whether the zone is percpu or not.
220 * For those, zm_page_index holds the index of that page
221 * in the run, and zm_subchunk_len the remaining length
222 * within the chunk.
223 *
224 * Metadata used for PGZ pages can have 3 values:
225 * - ZM_PGZ_FREE: slot is free
226 * - ZM_PGZ_ALLOCATED: slot holds an allocated element
227 * at offset (zm_pgz_orig_addr & PAGE_MASK)
228 * - ZM_PGZ_DOUBLE_FREE: slot detected a double free
229 * (will panic).
230 */
231 zm_len_t zm_chunk_len : 4;
232 };
233 uint16_t zm_bits;
234 };
235
236 union {
237 #define ZM_ALLOC_SIZE_LOCK 1u
238 uint16_t zm_alloc_size; /* first page only */
239 struct {
240 uint8_t zm_page_index; /* secondary pages only */
241 uint8_t zm_subchunk_len; /* secondary pages only */
242 };
243 uint16_t zm_oob_offs; /* in guard pages */
244 };
245 union {
246 uint32_t zm_bitmap; /* most zones */
247 uint32_t zm_bump; /* permanent zones */
248 };
249
250 union {
251 struct {
252 zone_pva_t zm_page_next;
253 zone_pva_t zm_page_prev;
254 };
255 vm_offset_t zm_pgz_orig_addr;
256 struct zone_page_metadata *zm_pgz_slot_next;
257 };
258 };
259 static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing");
260
261 /*!
262 * @typedef zone_magazine_t
263 *
264 * @brief
265 * Magazine of cached allocations.
266 *
267 * @field zm_next linkage used by magazine depots.
268 * @field zm_elems an array of @c zc_mag_size() elements.
269 */
270 struct zone_magazine {
271 zone_magazine_t zm_next;
272 smr_seq_t zm_seq;
273 vm_offset_t zm_elems[0];
274 };
275
276 /*!
277 * @typedef zone_cache_t
278 *
279 * @brief
280 * Magazine of cached allocations.
281 *
282 * @discussion
283 * Below is a diagram of the caching system. This design is inspired by the
284 * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
285 * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams and the FreeBSD UMA
286 * zone allocator (itself derived from this seminal work).
287 *
288 * It is divided into 3 layers:
289 * - the per-cpu layer,
290 * - the recirculation depot layer,
291 * - the Zone Allocator.
292 *
293 * The per-cpu and recirculation depot layer use magazines (@c zone_magazine_t),
294 * which are stacks of up to @c zc_mag_size() elements.
295 *
296 * <h2>CPU layer</h2>
297 *
298 * The CPU layer (@c zone_cache_t) looks like this:
299 *
300 * ╭─ a ─ f ─┬───────── zm_depot ──────────╮
301 * │ ╭─╮ ╭─╮ │ ╭─╮ ╭─╮ ╭─╮ ╭─╮ ╭─╮ │
302 * │ │#│ │#│ │ │#│ │#│ │#│ │#│ │#│ │
303 * │ │#│ │ │ │ │#│ │#│ │#│ │#│ │#│ │
304 * │ │ │ │ │ │ │#│ │#│ │#│ │#│ │#│ │
305 * │ ╰─╯ ╰─╯ │ ╰─╯ ╰─╯ ╰─╯ ╰─╯ ╰─╯ │
306 * ╰─────────┴─────────────────────────────╯
307 *
308 * It has two pre-loaded magazines (a)lloc and (f)ree which we allocate from,
309 * or free to. Serialization is achieved through disabling preemption, and only
310 * the current CPU can acces those allocations. This is represented on the left
311 * hand side of the diagram above.
312 *
313 * The right hand side is the per-cpu depot. It consists of @c zm_depot_count
314 * full magazines, and is protected by the @c zm_depot_lock for access.
315 * The lock is expected to absolutely never be contended, as only the local CPU
316 * tends to access the local per-cpu depot in regular operation mode.
317 *
318 * However unlike UMA, our implementation allows for the zone GC to reclaim
319 * per-CPU magazines aggresively, which is serialized with the @c zm_depot_lock.
320 *
321 *
322 * <h2>Recirculation Depot</h2>
323 *
324 * The recirculation depot layer is a list similar to the per-cpu depot,
325 * however it is different in two fundamental ways:
326 *
327 * - it is protected by the regular zone lock,
328 * - elements referenced by the magazines in that layer appear free
329 * to the zone layer.
330 *
331 *
332 * <h2>Magazine circulation and sizing</h2>
333 *
334 * The caching system sizes itself dynamically. Operations that allocate/free
335 * a single element call @c zone_lock_nopreempt_check_contention() which records
336 * contention on the lock by doing a trylock and recording its success.
337 *
338 * This information is stored in the @c z_recirc_cont_cur field of the zone,
339 * and a windowed moving average is maintained in @c z_contention_wma.
340 * The periodically run function @c compute_zone_working_set_size() will then
341 * take this into account to decide to grow the number of buckets allowed
342 * in the depot or shrink it based on the @c zc_grow_level and @c zc_shrink_level
343 * thresholds.
344 *
345 * The per-cpu layer will attempt to work with its depot, finding both full and
346 * empty magazines cached there. If it can't get what it needs, then it will
347 * mediate with the zone recirculation layer. Such recirculation is done in
348 * batches in order to amortize lock holds.
349 * (See @c {zalloc,zfree}_cached_depot_recirculate()).
350 *
351 * The recirculation layer keeps a track of what the minimum amount of magazines
352 * it had over time was for each of the full and empty queues. This allows for
353 * @c compute_zone_working_set_size() to return memory to the system when a zone
354 * stops being used as much.
355 *
356 * <h2>Security considerations</h2>
357 *
358 * The zone caching layer has been designed to avoid returning elements in
359 * a strict LIFO behavior: @c zalloc() will allocate from the (a) magazine,
360 * and @c zfree() free to the (f) magazine, and only swap them when the
361 * requested operation cannot be fulfilled.
362 *
363 * The per-cpu overflow depot or the recirculation depots are similarly used
364 * in FIFO order.
365 *
366 * @field zc_depot_lock a lock to access @c zc_depot, @c zc_depot_cur.
367 * @field zc_alloc_cur denormalized number of elements in the (a) magazine
368 * @field zc_free_cur denormalized number of elements in the (f) magazine
369 * @field zc_alloc_elems a pointer to the array of elements in (a)
370 * @field zc_free_elems a pointer to the array of elements in (f)
371 *
372 * @field zc_depot a list of @c zc_depot_cur full magazines
373 */
374 typedef struct zone_cache {
375 hw_lck_ticket_t zc_depot_lock;
376 uint16_t zc_alloc_cur;
377 uint16_t zc_free_cur;
378 vm_offset_t *zc_alloc_elems;
379 vm_offset_t *zc_free_elems;
380 struct zone_depot zc_depot;
381 smr_t zc_smr;
382 zone_smr_free_cb_t XNU_PTRAUTH_SIGNED_FUNCTION_PTR("zc_free") zc_free;
383 } __attribute__((aligned(64))) * zone_cache_t;
384
385 #if !__x86_64__
386 static
387 #endif
388 __security_const_late struct {
389 struct mach_vm_range zi_map_range; /* all zone submaps */
390 struct mach_vm_range zi_ro_range; /* read-only range */
391 struct mach_vm_range zi_meta_range; /* debugging only */
392 struct mach_vm_range zi_bits_range; /* bits buddy allocator */
393 struct mach_vm_range zi_xtra_range; /* vm tracking metadata */
394 struct mach_vm_range zi_pgz_range;
395 struct zone_page_metadata *zi_pgz_meta;
396
397 /*
398 * The metadata lives within the zi_meta_range address range.
399 *
400 * The correct formula to find a metadata index is:
401 * absolute_page_index - page_index(zi_map_range.min_address)
402 *
403 * And then this index is used to dereference zi_meta_range.min_address
404 * as a `struct zone_page_metadata` array.
405 *
406 * To avoid doing that substraction all the time in the various fast-paths,
407 * zi_meta_base are pre-offset with that minimum page index to avoid redoing
408 * that math all the time.
409 */
410 struct zone_page_metadata *zi_meta_base;
411 } zone_info;
412
413 __startup_data static struct mach_vm_range zone_map_range;
414 __startup_data static vm_map_size_t zone_meta_size;
415 __startup_data static vm_map_size_t zone_bits_size;
416 __startup_data static vm_map_size_t zone_xtra_size;
417 #if MACH_ASSERT
418 __startup_data static vm_map_size_t vm_submap_restriction_size_debug;
419 #endif /* MACH_ASSERT */
420
421 /*
422 * Initial array of metadata for stolen memory.
423 *
424 * The numbers here have to be kept in sync with vm_map_steal_memory()
425 * so that we have reserved enough metadata.
426 *
427 * After zone_init() has run (which happens while the kernel is still single
428 * threaded), the metadata is moved to its final dynamic location, and
429 * this array is unmapped with the rest of __startup_data at lockdown.
430 */
431 #define ZONE_EARLY_META_INLINE_COUNT 64
432 __startup_data
433 static struct zone_page_metadata
434 zone_early_meta_array_startup[ZONE_EARLY_META_INLINE_COUNT];
435
436
437 __startup_data __attribute__((aligned(PAGE_MAX_SIZE)))
438 static uint8_t zone_early_pages_to_cram[PAGE_MAX_SIZE * 16];
439
440 /*
441 * The zone_locks_grp allows for collecting lock statistics.
442 * All locks are associated to this group in zinit.
443 * Look at tools/lockstat for debugging lock contention.
444 */
445 LCK_GRP_DECLARE(zone_locks_grp, "zone_locks");
446 static LCK_MTX_DECLARE(zone_metadata_region_lck, &zone_locks_grp);
447
448 /*
449 * The zone metadata lock protects:
450 * - metadata faulting,
451 * - VM submap VA allocations,
452 * - early gap page queue list
453 */
454 #define zone_meta_lock() lck_mtx_lock(&zone_metadata_region_lck);
455 #define zone_meta_unlock() lck_mtx_unlock(&zone_metadata_region_lck);
456
457 /*
458 * Exclude more than one concurrent garbage collection
459 */
460 static LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc");
461 static LCK_MTX_DECLARE(zone_gc_lock, &zone_gc_lck_grp);
462 static LCK_SPIN_DECLARE(zone_exhausted_lock, &zone_gc_lck_grp);
463
464 /*
465 * Panic logging metadata
466 */
467 bool panic_include_zprint = false;
468 bool panic_include_kalloc_types = false;
469 zone_t kalloc_type_src_zone = ZONE_NULL;
470 zone_t kalloc_type_dst_zone = ZONE_NULL;
471 mach_memory_info_t *panic_kext_memory_info = NULL;
472 vm_size_t panic_kext_memory_size = 0;
473 vm_offset_t panic_fault_address = 0;
474
475 /*
476 * Protects zone_array, num_zones, num_zones_in_use, and
477 * zone_destroyed_bitmap
478 */
479 static SIMPLE_LOCK_DECLARE(all_zones_lock, 0);
480 static zone_id_t num_zones_in_use;
481 zone_id_t _Atomic num_zones;
482 SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count;
483
484 /*
485 * Initial globals for zone stats until we can allocate the real ones.
486 * Those get migrated inside the per-CPU ones during zone_init() and
487 * this array is unmapped with the rest of __startup_data at lockdown.
488 */
489
490 /* zone to allocate zone_magazine structs from */
491 static SECURITY_READ_ONLY_LATE(zone_t) zc_magazine_zone;
492 /*
493 * Until pid1 is made, zone caching is off,
494 * until compute_zone_working_set_size() runs for the firt time.
495 *
496 * -1 represents the "never enabled yet" value.
497 */
498 static int8_t zone_caching_disabled = -1;
499
500 __startup_data
501 static struct zone_stats zone_stats_startup[MAX_ZONES];
502 struct zone zone_array[MAX_ZONES];
503 SECURITY_READ_ONLY_LATE(zone_security_flags_t) zone_security_array[MAX_ZONES] = {
504 [0 ... MAX_ZONES - 1] = {
505 .z_kheap_id = KHEAP_ID_NONE,
506 .z_noencrypt = false,
507 .z_submap_idx = Z_SUBMAP_IDX_GENERAL_0,
508 .z_kalloc_type = false,
509 .z_sig_eq = 0,
510 #if ZSECURITY_CONFIG(ZONE_TAGGING)
511 .z_tag = 1,
512 #else /* ZSECURITY_CONFIG(ZONE_TAGGING) */
513 .z_tag = 0,
514 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
515 },
516 };
517 SECURITY_READ_ONLY_LATE(struct zone_size_params) zone_ro_size_params[ZONE_ID__LAST_RO + 1];
SECURITY_READ_ONLY_LATE(zone_cache_ops_t)518 SECURITY_READ_ONLY_LATE(zone_cache_ops_t) zcache_ops[ZONE_ID__FIRST_DYNAMIC];
519
520 #if DEBUG || DEVELOPMENT
521 unsigned int
522 zone_max_zones(void)
523 {
524 return MAX_ZONES;
525 }
526 #endif
527
528 /* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */
529 static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count;
530
531 /* Used to keep track of destroyed slots in the zone_array */
532 static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)];
533
534 /* number of zone mapped pages used by all zones */
535 static size_t _Atomic zone_pages_jetsam_threshold = ~0;
536 size_t zone_pages_wired;
537 size_t zone_guard_pages;
538
539 /* Time in (ms) after which we panic for zone exhaustions */
540 TUNABLE(int, zone_exhausted_timeout, "zet", 5000);
541 static bool zone_share_always = true;
542 static TUNABLE_WRITEABLE(uint32_t, zone_early_thres_mul, "zone_early_thres_mul", 5);
543
544 #if VM_TAG_SIZECLASSES
545 /*
546 * Zone tagging allows for per "tag" accounting of allocations for the kalloc
547 * zones only.
548 *
549 * There are 3 kinds of tags that can be used:
550 * - pre-registered VM_KERN_MEMORY_*
551 * - dynamic tags allocated per call sites in core-kernel (using vm_tag_alloc())
552 * - per-kext tags computed by IOKit (using the magic Z_VM_TAG_BT_BIT marker).
553 *
554 * The VM tracks the statistics in lazily allocated structures.
555 * See vm_tag_will_update_zone(), vm_tag_update_zone_size().
556 *
557 * If for some reason the requested tag cannot be accounted for,
558 * the tag is forced to VM_KERN_MEMORY_KALLOC which is pre-allocated.
559 *
560 * Each allocated element also remembers the tag it was assigned,
561 * which lets zalloc/zfree update statistics correctly.
562 */
563
564 /* enable tags for zones that ask for it */
565 static TUNABLE(bool, zone_tagging_on, "-zt", false);
566
567 /*
568 * Array of all sizeclasses used by kalloc variants so that we can
569 * have accounting per size class for each kalloc callsite
570 */
571 static uint16_t zone_tags_sizeclasses[VM_TAG_SIZECLASSES];
572 #endif /* VM_TAG_SIZECLASSES */
573
574 #if DEBUG || DEVELOPMENT
575 static int zalloc_simulate_vm_pressure;
576 #endif /* DEBUG || DEVELOPMENT */
577
578 #define Z_TUNABLE(t, n, d) \
579 TUNABLE(t, _##n, #n, d); \
580 __pure2 static inline t n(void) { return _##n; }
581
582 /*
583 * Zone caching tunables
584 *
585 * zc_mag_size():
586 * size of magazines, larger to reduce contention at the expense of memory
587 *
588 * zc_enable_level
589 * number of contentions per second after which zone caching engages
590 * automatically.
591 *
592 * 0 to disable.
593 *
594 * zc_grow_level
595 * number of contentions per second x cpu after which the number of magazines
596 * allowed in the depot can grow. (in "Z_WMA_UNIT" units).
597 *
598 * zc_shrink_level
599 * number of contentions per second x cpu below which the number of magazines
600 * allowed in the depot will shrink. (in "Z_WMA_UNIT" units).
601 *
602 * zc_pcpu_max
603 * maximum memory size in bytes that can hang from a CPU,
604 * which will affect how many magazines are allowed in the depot.
605 *
606 * The alloc/free magazines are assumed to be on average half-empty
607 * and to count for "1" unit of magazines.
608 *
609 * zc_autotrim_size
610 * Size allowed to hang extra from the recirculation depot before
611 * auto-trim kicks in.
612 *
613 * zc_autotrim_buckets
614 *
615 * How many buckets in excess of the working-set are allowed
616 * before auto-trim kicks in for empty buckets.
617 *
618 * zc_free_batch_size
619 * The size of batches of frees/reclaim that can be done keeping
620 * the zone lock held (and preemption disabled).
621 */
622 Z_TUNABLE(uint16_t, zc_mag_size, 8);
623 static Z_TUNABLE(uint32_t, zc_enable_level, 10);
624 static Z_TUNABLE(uint32_t, zc_grow_level, 5 * Z_WMA_UNIT);
625 static Z_TUNABLE(uint32_t, zc_shrink_level, Z_WMA_UNIT / 2);
626 static Z_TUNABLE(uint32_t, zc_pcpu_max, 128 << 10);
627 static Z_TUNABLE(uint32_t, zc_autotrim_size, 16 << 10);
628 static Z_TUNABLE(uint32_t, zc_autotrim_buckets, 8);
629 static Z_TUNABLE(uint32_t, zc_free_batch_size, 128);
630
631 static SECURITY_READ_ONLY_LATE(size_t) zone_pages_wired_max;
632 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_submaps[Z_SUBMAP_IDX_COUNT];
633 static SECURITY_READ_ONLY_LATE(vm_map_t) zone_meta_map;
634 static char const * const zone_submaps_names[Z_SUBMAP_IDX_COUNT] = {
635 [Z_SUBMAP_IDX_VM] = "VM",
636 [Z_SUBMAP_IDX_READ_ONLY] = "RO",
637 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
638 [Z_SUBMAP_IDX_GENERAL_0] = "GEN0",
639 [Z_SUBMAP_IDX_GENERAL_1] = "GEN1",
640 [Z_SUBMAP_IDX_GENERAL_2] = "GEN2",
641 [Z_SUBMAP_IDX_GENERAL_3] = "GEN3",
642 #else
643 [Z_SUBMAP_IDX_GENERAL_0] = "GEN",
644 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
645 [Z_SUBMAP_IDX_DATA] = "DATA",
646 };
647
648 #if __x86_64__
649 #define ZONE_ENTROPY_CNT 8
650 #else
651 #define ZONE_ENTROPY_CNT 2
652 #endif
653 static struct zone_bool_gen {
654 struct bool_gen zbg_bg;
655 uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
656 } zone_bool_gen[MAX_CPUS];
657
658 #if CONFIG_PROB_GZALLOC
659 /*
660 * Probabilistic gzalloc
661 * =====================
662 *
663 *
664 * Probabilistic guard zalloc samples allocations and will protect them by
665 * double-mapping the page holding them and returning the secondary virtual
666 * address to its callers.
667 *
668 * Its data structures are lazily allocated if the `pgz` or `pgz1` boot-args
669 * are set.
670 *
671 *
672 * Unlike GZalloc, PGZ uses a fixed amount of memory, and is compatible with
673 * most zalloc/kalloc features:
674 * - zone_require is functional
675 * - zone caching or zone tagging is compatible
676 * - non-blocking allocation work (they will always return NULL with gzalloc).
677 *
678 * PGZ limitations:
679 * - VA sequestering isn't respected, as the slots (which are in limited
680 * quantity) will be reused for any type, however the PGZ quarantine
681 * somewhat mitigates the impact.
682 * - zones with elements larger than a page cannot be protected.
683 *
684 *
685 * Tunables:
686 * --------
687 *
688 * pgz=1:
689 * Turn on probabilistic guard malloc for all zones
690 *
691 * (default on for DEVELOPMENT, off for RELEASE, or if pgz1... are specified)
692 *
693 * pgz_sample_rate=0 to 2^31
694 * average sample rate between two guarded allocations.
695 * 0 means every allocation.
696 *
697 * The default is a random number between 1000 and 10,000
698 *
699 * pgz_slots
700 * how many allocations to protect.
701 *
702 * Each costs:
703 * - a PTE in the pmap (when allocated)
704 * - 2 zone page meta's (every other page is a "guard" one, 32B total)
705 * - 64 bytes per backtraces.
706 * On LP64 this is <16K per 100 slots.
707 *
708 * The default is ~200 slots per G of physical ram (32k / G)
709 *
710 * TODO:
711 * - try harder to allocate elements at the "end" to catch OOB more reliably.
712 *
713 * pgz_quarantine
714 * how many slots should be free at any given time.
715 *
716 * PGZ will round robin through free slots to be reused, but free slots are
717 * important to detect use-after-free by acting as a quarantine.
718 *
719 * By default, PGZ will keep 33% of the slots around at all time.
720 *
721 * pgz1=<name>, pgz2=<name>, ..., pgzn=<name>...
722 * Specific zones for which to enable probabilistic guard malloc.
723 * There must be no numbering gap (names after the gap will be ignored).
724 */
725 #if DEBUG || DEVELOPMENT
726 static TUNABLE(bool, pgz_all, "pgz", true);
727 #else
728 static TUNABLE(bool, pgz_all, "pgz", false);
729 #endif
730 static TUNABLE(uint32_t, pgz_sample_rate, "pgz_sample_rate", 0);
731 static TUNABLE(uint32_t, pgz_slots, "pgz_slots", UINT32_MAX);
732 static TUNABLE(uint32_t, pgz_quarantine, "pgz_quarantine", 0);
733 #endif /* CONFIG_PROB_GZALLOC */
734
735 static zone_t zone_find_largest(uint64_t *zone_size);
736
737 #endif /* !ZALLOC_TEST */
738 #pragma mark Zone metadata
739 #if !ZALLOC_TEST
740
741 static inline bool
zone_has_index(zone_t z,zone_id_t zid)742 zone_has_index(zone_t z, zone_id_t zid)
743 {
744 return zone_array + zid == z;
745 }
746
747 __abortlike
748 void
zone_invalid_panic(zone_t zone)749 zone_invalid_panic(zone_t zone)
750 {
751 panic("zone %p isn't in the zone_array", zone);
752 }
753
754 __abortlike
755 static void
zone_metadata_corruption(zone_t zone,struct zone_page_metadata * meta,const char * kind)756 zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta,
757 const char *kind)
758 {
759 panic("zone metadata corruption: %s (meta %p, zone %s%s)",
760 kind, meta, zone_heap_name(zone), zone->z_name);
761 }
762
763 __abortlike
764 static void
zone_invalid_element_addr_panic(zone_t zone,vm_offset_t addr)765 zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr)
766 {
767 panic("zone element pointer validation failed (addr: %p, zone %s%s)",
768 (void *)addr, zone_heap_name(zone), zone->z_name);
769 }
770
771 __abortlike
772 static void
zone_page_metadata_index_confusion_panic(zone_t zone,vm_offset_t addr,struct zone_page_metadata * meta)773 zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr,
774 struct zone_page_metadata *meta)
775 {
776 zone_security_flags_t zsflags = zone_security_config(zone), src_zsflags;
777 zone_id_t zidx;
778 zone_t src_zone;
779
780 if (zsflags.z_kalloc_type) {
781 panic_include_kalloc_types = true;
782 kalloc_type_dst_zone = zone;
783 }
784
785 zidx = meta->zm_index;
786 if (zidx >= os_atomic_load(&num_zones, relaxed)) {
787 panic("%p expected in zone %s%s[%d], but metadata has invalid zidx: %d",
788 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
789 zidx);
790 }
791
792 src_zone = &zone_array[zidx];
793 src_zsflags = zone_security_array[zidx];
794 if (src_zsflags.z_kalloc_type) {
795 panic_include_kalloc_types = true;
796 kalloc_type_src_zone = src_zone;
797 }
798
799 panic("%p not in the expected zone %s%s[%d], but found in %s%s[%d]",
800 (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
801 zone_heap_name(src_zone), src_zone->z_name, zidx);
802 }
803
804 __abortlike
805 static void
zone_page_metadata_list_corruption(zone_t zone,struct zone_page_metadata * meta)806 zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta)
807 {
808 panic("metadata list corruption through element %p detected in zone %s%s",
809 meta, zone_heap_name(zone), zone->z_name);
810 }
811
812 __abortlike
813 static void
zone_page_meta_accounting_panic(zone_t zone,struct zone_page_metadata * meta,const char * kind)814 zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta,
815 const char *kind)
816 {
817 panic("accounting mismatch (%s) for zone %s%s, meta %p", kind,
818 zone_heap_name(zone), zone->z_name, meta);
819 }
820
821 __abortlike
822 static void
zone_meta_double_free_panic(zone_t zone,vm_offset_t addr,const char * caller)823 zone_meta_double_free_panic(zone_t zone, vm_offset_t addr, const char *caller)
824 {
825 panic("%s: double free of %p to zone %s%s", caller,
826 (void *)addr, zone_heap_name(zone), zone->z_name);
827 }
828
829 __abortlike
830 static void
zone_accounting_panic(zone_t zone,const char * kind)831 zone_accounting_panic(zone_t zone, const char *kind)
832 {
833 panic("accounting mismatch (%s) for zone %s%s", kind,
834 zone_heap_name(zone), zone->z_name);
835 }
836
837 #define zone_counter_sub(z, stat, value) ({ \
838 if (os_sub_overflow((z)->stat, value, &(z)->stat)) { \
839 zone_accounting_panic(z, #stat " wrap-around"); \
840 } \
841 (z)->stat; \
842 })
843
844 static inline uint16_t
zone_meta_alloc_size_add(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)845 zone_meta_alloc_size_add(zone_t z, struct zone_page_metadata *m,
846 vm_offset_t esize)
847 {
848 if (os_add_overflow(m->zm_alloc_size, (uint16_t)esize, &m->zm_alloc_size)) {
849 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
850 }
851 return m->zm_alloc_size;
852 }
853
854 static inline uint16_t
zone_meta_alloc_size_sub(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)855 zone_meta_alloc_size_sub(zone_t z, struct zone_page_metadata *m,
856 vm_offset_t esize)
857 {
858 if (os_sub_overflow(m->zm_alloc_size, esize, &m->zm_alloc_size)) {
859 zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
860 }
861 return m->zm_alloc_size;
862 }
863
864 __abortlike
865 static void
zone_nofail_panic(zone_t zone)866 zone_nofail_panic(zone_t zone)
867 {
868 panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)",
869 zone_heap_name(zone), zone->z_name);
870 }
871
872 __header_always_inline bool
zone_spans_ro_va(vm_offset_t addr_start,vm_offset_t addr_end)873 zone_spans_ro_va(vm_offset_t addr_start, vm_offset_t addr_end)
874 {
875 const struct mach_vm_range *ro_r = &zone_info.zi_ro_range;
876 struct mach_vm_range r = { addr_start, addr_end };
877
878 return mach_vm_range_intersects(ro_r, &r);
879 }
880
881 #define from_range(r, addr, size) \
882 __builtin_choose_expr(__builtin_constant_p(size) ? (size) == 1 : 0, \
883 mach_vm_range_contains(r, vm_memtag_canonicalize_kernel((mach_vm_offset_t)(addr))), \
884 mach_vm_range_contains(r, vm_memtag_canonicalize_kernel((mach_vm_offset_t)(addr)), size))
885
886 #define from_ro_map(addr, size) \
887 from_range(&zone_info.zi_ro_range, addr, size)
888
889 #define from_zone_map(addr, size) \
890 from_range(&zone_info.zi_map_range, addr, size)
891
892 __header_always_inline bool
zone_pva_is_null(zone_pva_t page)893 zone_pva_is_null(zone_pva_t page)
894 {
895 return page.packed_address == 0;
896 }
897
898 __header_always_inline bool
zone_pva_is_queue(zone_pva_t page)899 zone_pva_is_queue(zone_pva_t page)
900 {
901 // actual kernel pages have the top bit set
902 return (int32_t)page.packed_address > 0;
903 }
904
905 __header_always_inline bool
zone_pva_is_equal(zone_pva_t pva1,zone_pva_t pva2)906 zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2)
907 {
908 return pva1.packed_address == pva2.packed_address;
909 }
910
911 __header_always_inline zone_pva_t *
zone_pageq_base(void)912 zone_pageq_base(void)
913 {
914 extern zone_pva_t data_seg_start[] __SEGMENT_START_SYM("__DATA");
915
916 /*
917 * `-1` so that if the first __DATA variable is a page queue,
918 * it gets a non 0 index
919 */
920 return data_seg_start - 1;
921 }
922
923 __header_always_inline void
zone_queue_set_head(zone_t z,zone_pva_t queue,zone_pva_t oldv,struct zone_page_metadata * meta)924 zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv,
925 struct zone_page_metadata *meta)
926 {
927 zone_pva_t *queue_head = &zone_pageq_base()[queue.packed_address];
928
929 if (!zone_pva_is_equal(*queue_head, oldv)) {
930 zone_page_metadata_list_corruption(z, meta);
931 }
932 *queue_head = meta->zm_page_next;
933 }
934
935 __header_always_inline zone_pva_t
zone_queue_encode(zone_pva_t * headp)936 zone_queue_encode(zone_pva_t *headp)
937 {
938 return (zone_pva_t){ (uint32_t)(headp - zone_pageq_base()) };
939 }
940
941 __header_always_inline zone_pva_t
zone_pva_from_addr(vm_address_t addr)942 zone_pva_from_addr(vm_address_t addr)
943 {
944 // cannot use atop() because we want to maintain the sign bit
945 return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) };
946 }
947
948 __header_always_inline vm_address_t
zone_pva_to_addr(zone_pva_t page)949 zone_pva_to_addr(zone_pva_t page)
950 {
951 // cause sign extension so that we end up with the right address
952 return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT;
953 }
954
955 __header_always_inline struct zone_page_metadata *
zone_pva_to_meta(zone_pva_t page)956 zone_pva_to_meta(zone_pva_t page)
957 {
958 return VM_FAR_ADD_PTR_UNBOUNDED(
959 zone_info.zi_meta_base, page.packed_address);
960 }
961
962 __header_always_inline zone_pva_t
zone_pva_from_meta(struct zone_page_metadata * meta)963 zone_pva_from_meta(struct zone_page_metadata *meta)
964 {
965 return (zone_pva_t){ (uint32_t)(meta - zone_info.zi_meta_base) };
966 }
967
968 __header_always_inline struct zone_page_metadata *
zone_meta_from_addr(vm_offset_t addr)969 zone_meta_from_addr(vm_offset_t addr)
970 {
971 return zone_pva_to_meta(zone_pva_from_addr(addr));
972 }
973
974 __header_always_inline zone_id_t
zone_index_from_ptr(const void * ptr)975 zone_index_from_ptr(const void *ptr)
976 {
977 return zone_pva_to_meta(zone_pva_from_addr((vm_offset_t)ptr))->zm_index;
978 }
979
980 __header_always_inline vm_offset_t
zone_meta_to_addr(struct zone_page_metadata * meta)981 zone_meta_to_addr(struct zone_page_metadata *meta)
982 {
983 return ptoa((int32_t)(meta - zone_info.zi_meta_base));
984 }
985
986 __attribute__((overloadable))
987 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta,vm_address_t addr)988 zone_meta_validate(zone_t z, struct zone_page_metadata *meta, vm_address_t addr)
989 {
990 if (!zone_has_index(z, meta->zm_index)) {
991 zone_page_metadata_index_confusion_panic(z, addr, meta);
992 }
993 }
994
995 __attribute__((overloadable))
996 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta)997 zone_meta_validate(zone_t z, struct zone_page_metadata *meta)
998 {
999 zone_meta_validate(z, meta, zone_meta_to_addr(meta));
1000 }
1001
1002 __header_always_inline void
zone_meta_queue_push(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1003 zone_meta_queue_push(zone_t z, zone_pva_t *headp,
1004 struct zone_page_metadata *meta)
1005 {
1006 zone_pva_t head = *headp;
1007 zone_pva_t queue_pva = zone_queue_encode(headp);
1008 struct zone_page_metadata *tmp;
1009
1010 meta->zm_page_next = head;
1011 if (!zone_pva_is_null(head)) {
1012 tmp = zone_pva_to_meta(head);
1013 if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) {
1014 zone_page_metadata_list_corruption(z, meta);
1015 }
1016 tmp->zm_page_prev = zone_pva_from_meta(meta);
1017 }
1018 meta->zm_page_prev = queue_pva;
1019 *headp = zone_pva_from_meta(meta);
1020 }
1021
1022 __header_always_inline struct zone_page_metadata *
zone_meta_queue_pop(zone_t z,zone_pva_t * headp)1023 zone_meta_queue_pop(zone_t z, zone_pva_t *headp)
1024 {
1025 zone_pva_t head = *headp;
1026 struct zone_page_metadata *meta = zone_pva_to_meta(head);
1027 struct zone_page_metadata *tmp;
1028
1029 zone_meta_validate(z, meta);
1030
1031 if (!zone_pva_is_null(meta->zm_page_next)) {
1032 tmp = zone_pva_to_meta(meta->zm_page_next);
1033 if (!zone_pva_is_equal(tmp->zm_page_prev, head)) {
1034 zone_page_metadata_list_corruption(z, meta);
1035 }
1036 tmp->zm_page_prev = meta->zm_page_prev;
1037 }
1038 *headp = meta->zm_page_next;
1039
1040 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1041
1042 return meta;
1043 }
1044
1045 __header_always_inline void
zone_meta_remqueue(zone_t z,struct zone_page_metadata * meta)1046 zone_meta_remqueue(zone_t z, struct zone_page_metadata *meta)
1047 {
1048 zone_pva_t meta_pva = zone_pva_from_meta(meta);
1049 struct zone_page_metadata *tmp;
1050
1051 if (!zone_pva_is_null(meta->zm_page_next)) {
1052 tmp = zone_pva_to_meta(meta->zm_page_next);
1053 if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) {
1054 zone_page_metadata_list_corruption(z, meta);
1055 }
1056 tmp->zm_page_prev = meta->zm_page_prev;
1057 }
1058 if (zone_pva_is_queue(meta->zm_page_prev)) {
1059 zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta);
1060 } else {
1061 tmp = zone_pva_to_meta(meta->zm_page_prev);
1062 if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) {
1063 zone_page_metadata_list_corruption(z, meta);
1064 }
1065 tmp->zm_page_next = meta->zm_page_next;
1066 }
1067
1068 meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1069 }
1070
1071 __header_always_inline void
zone_meta_requeue(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1072 zone_meta_requeue(zone_t z, zone_pva_t *headp,
1073 struct zone_page_metadata *meta)
1074 {
1075 zone_meta_remqueue(z, meta);
1076 zone_meta_queue_push(z, headp, meta);
1077 }
1078
1079 /* prevents a given metadata from ever reaching the z_pageq_empty queue */
1080 static inline void
zone_meta_lock_in_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1081 zone_meta_lock_in_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1082 {
1083 uint16_t new_size = zone_meta_alloc_size_add(z, m, ZM_ALLOC_SIZE_LOCK);
1084
1085 assert(new_size % sizeof(vm_offset_t) == ZM_ALLOC_SIZE_LOCK);
1086 if (new_size == ZM_ALLOC_SIZE_LOCK) {
1087 zone_meta_requeue(z, &z->z_pageq_partial, m);
1088 zone_counter_sub(z, z_wired_empty, len);
1089 }
1090 }
1091
1092 /* allows a given metadata to reach the z_pageq_empty queue again */
1093 static inline void
zone_meta_unlock_from_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1094 zone_meta_unlock_from_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1095 {
1096 uint16_t new_size = zone_meta_alloc_size_sub(z, m, ZM_ALLOC_SIZE_LOCK);
1097
1098 assert(new_size % sizeof(vm_offset_t) == 0);
1099 if (new_size == 0) {
1100 zone_meta_requeue(z, &z->z_pageq_empty, m);
1101 z->z_wired_empty += len;
1102 }
1103 }
1104
1105 /*
1106 * Routine to populate a page backing metadata in the zone_metadata_region.
1107 * Must be called without the zone lock held as it might potentially block.
1108 */
1109 static void
zone_meta_populate(vm_offset_t base,vm_size_t size)1110 zone_meta_populate(vm_offset_t base, vm_size_t size)
1111 {
1112 struct zone_page_metadata *from = zone_meta_from_addr(base);
1113 struct zone_page_metadata *to = from + atop(size);
1114 vm_offset_t page_addr = trunc_page(from);
1115
1116 for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) {
1117 #if !KASAN
1118 /*
1119 * This can race with another thread doing a populate on the same metadata
1120 * page, where we see an updated pmap but unmapped KASan shadow, causing a
1121 * fault in the shadow when we first access the metadata page. Avoid this
1122 * by always synchronizing on the zone_metadata_region lock with KASan.
1123 */
1124 if (pmap_find_phys(kernel_pmap, page_addr)) {
1125 continue;
1126 }
1127 #endif
1128
1129 for (;;) {
1130 kern_return_t ret = KERN_SUCCESS;
1131
1132 /*
1133 * All updates to the zone_metadata_region are done
1134 * under the zone_metadata_region_lck
1135 */
1136 zone_meta_lock();
1137 if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
1138 ret = kernel_memory_populate(page_addr,
1139 PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1140 VM_KERN_MEMORY_OSFMK);
1141 }
1142 zone_meta_unlock();
1143
1144 if (ret == KERN_SUCCESS) {
1145 break;
1146 }
1147
1148 /*
1149 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
1150 * to bad system deadlocks, so if the allocation failed,
1151 * we need to do the VM_PAGE_WAIT() outside of the lock.
1152 */
1153 VM_PAGE_WAIT();
1154 }
1155 }
1156 }
1157
1158 __abortlike
1159 static void
zone_invalid_element_panic(zone_t zone,vm_offset_t addr)1160 zone_invalid_element_panic(zone_t zone, vm_offset_t addr)
1161 {
1162 struct zone_page_metadata *meta;
1163 const char *from_cache = "";
1164 vm_offset_t page;
1165
1166 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1167 panic("addr %p being freed to zone %s%s%s, isn't from zone map",
1168 (void *)addr, zone_heap_name(zone), zone->z_name, from_cache);
1169 }
1170 page = trunc_page(addr);
1171 meta = zone_meta_from_addr(addr);
1172
1173 if (!zone_has_index(zone, meta->zm_index)) {
1174 zone_page_metadata_index_confusion_panic(zone, addr, meta);
1175 }
1176
1177 if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1178 panic("metadata %p corresponding to addr %p being freed to "
1179 "zone %s%s%s, is marked as secondary per cpu page",
1180 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1181 from_cache);
1182 }
1183 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1184 page -= ptoa(meta->zm_page_index);
1185 meta -= meta->zm_page_index;
1186 }
1187
1188 if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1189 panic("metadata %p corresponding to addr %p being freed to "
1190 "zone %s%s%s, has chunk len greater than max",
1191 meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1192 from_cache);
1193 }
1194
1195 if ((addr - zone_elem_inner_offs(zone) - page) % zone_elem_outer_size(zone)) {
1196 panic("addr %p being freed to zone %s%s%s, isn't aligned to "
1197 "zone element size", (void *)addr, zone_heap_name(zone),
1198 zone->z_name, from_cache);
1199 }
1200
1201 zone_invalid_element_addr_panic(zone, addr);
1202 }
1203
1204 __attribute__((always_inline))
1205 static struct zone_page_metadata *
zone_element_resolve(zone_t zone,vm_offset_t addr,vm_offset_t * idx)1206 zone_element_resolve(
1207 zone_t zone,
1208 vm_offset_t addr,
1209 vm_offset_t *idx)
1210 {
1211 struct zone_page_metadata *meta;
1212 vm_offset_t offs, eidx;
1213
1214 meta = zone_meta_from_addr(addr);
1215 if (!from_zone_map(addr, 1) || !zone_has_index(zone, meta->zm_index)) {
1216 zone_invalid_element_panic(zone, addr);
1217 }
1218
1219 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1220 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1221 offs += ptoa(meta->zm_page_index);
1222 meta -= meta->zm_page_index;
1223 }
1224
1225 eidx = Z_FAST_QUO(offs, zone->z_quo_magic);
1226 if (eidx * zone_elem_outer_size(zone) != offs) {
1227 zone_invalid_element_panic(zone, addr);
1228 }
1229
1230 *idx = eidx;
1231 return meta;
1232 }
1233
1234 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1235 void *
zone_element_pgz_oob_adjust(void * ptr,vm_size_t req_size,vm_size_t elem_size)1236 zone_element_pgz_oob_adjust(void *ptr, vm_size_t req_size, vm_size_t elem_size)
1237 {
1238 vm_offset_t addr = (vm_offset_t)ptr;
1239 vm_offset_t end = addr + elem_size;
1240 vm_offset_t offs;
1241
1242 /*
1243 * 0-sized allocations in a KALLOC_MINSIZE bucket
1244 * would be offset to the next allocation which is incorrect.
1245 */
1246 req_size = MAX(roundup(req_size, KALLOC_MINALIGN), KALLOC_MINALIGN);
1247
1248 /*
1249 * Given how chunks work, for a zone with PGZ guards on,
1250 * there's a single element which ends precisely
1251 * at the page boundary: the last one.
1252 */
1253 if (req_size == elem_size ||
1254 (end & PAGE_MASK) ||
1255 !zone_meta_from_addr(addr)->zm_guarded) {
1256 return ptr;
1257 }
1258
1259 offs = elem_size - req_size;
1260 zone_meta_from_addr(end)->zm_oob_offs = (uint16_t)offs;
1261
1262 return (char *)addr + offs;
1263 }
1264 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1265
1266 __abortlike
1267 static void
zone_element_bounds_check_panic(vm_address_t addr,vm_size_t len)1268 zone_element_bounds_check_panic(vm_address_t addr, vm_size_t len)
1269 {
1270 struct zone_page_metadata *meta;
1271 vm_offset_t offs, size, page;
1272 zone_t zone;
1273
1274 page = trunc_page(addr);
1275 meta = zone_meta_from_addr(addr);
1276 zone = &zone_array[meta->zm_index];
1277
1278 if (zone->z_percpu) {
1279 panic("zone bound checks: address %p is a per-cpu allocation",
1280 (void *)addr);
1281 }
1282
1283 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1284 page -= ptoa(meta->zm_page_index);
1285 meta -= meta->zm_page_index;
1286 }
1287
1288 size = zone_elem_outer_size(zone);
1289 offs = Z_FAST_MOD(addr - zone_elem_inner_offs(zone) - page + size,
1290 zone->z_quo_magic, size);
1291 panic("zone bound checks: buffer %p of length %zd overflows "
1292 "object %p of size %zd in zone %p[%s%s]",
1293 (void *)addr, len, (void *)(addr - offs - zone_elem_redzone(zone)),
1294 zone_elem_inner_size(zone), zone, zone_heap_name(zone), zone_name(zone));
1295 }
1296
1297 void
zone_element_bounds_check(vm_address_t addr,vm_size_t len)1298 zone_element_bounds_check(vm_address_t addr, vm_size_t len)
1299 {
1300 struct zone_page_metadata *meta;
1301 vm_offset_t offs, size;
1302 zone_t zone;
1303
1304 if (!from_zone_map(addr, 1)) {
1305 return;
1306 }
1307
1308 #if CONFIG_PROB_GZALLOC
1309 if (__improbable(pgz_owned(addr))) {
1310 meta = zone_meta_from_addr(addr);
1311 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
1312 }
1313 #endif /* CONFIG_PROB_GZALLOC */
1314 meta = zone_meta_from_addr(addr);
1315 zone = zone_by_id(meta->zm_index);
1316
1317 if (zone->z_percpu) {
1318 zone_element_bounds_check_panic(addr, len);
1319 }
1320
1321 if (zone->z_permanent) {
1322 /* We don't know bounds for those */
1323 return;
1324 }
1325
1326 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1327 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1328 offs += ptoa(meta->zm_page_index);
1329 }
1330 size = zone_elem_outer_size(zone);
1331 offs = Z_FAST_MOD(offs + size, zone->z_quo_magic, size);
1332 if (len + zone_elem_redzone(zone) > size - offs) {
1333 zone_element_bounds_check_panic(addr, len);
1334 }
1335 }
1336
1337 /*
1338 * Routine to get the size of a zone allocated address.
1339 * If the address doesn't belong to the zone maps, returns 0.
1340 */
1341 vm_size_t
zone_element_size(void * elem,zone_t * z,bool clear_oob,vm_offset_t * oob_offs)1342 zone_element_size(void *elem, zone_t *z, bool clear_oob, vm_offset_t *oob_offs)
1343 {
1344 vm_address_t addr = (vm_address_t)elem;
1345 struct zone_page_metadata *meta;
1346 vm_size_t esize, offs, end;
1347 zone_t zone;
1348
1349 if (from_zone_map(addr, sizeof(void *))) {
1350 meta = zone_meta_from_addr(addr);
1351 zone = zone_by_id(meta->zm_index);
1352 esize = zone_elem_inner_size(zone);
1353 end = vm_memtag_canonicalize_kernel(addr + esize);
1354 offs = 0;
1355
1356 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1357 /*
1358 * If the chunk uses guards, and that (addr + esize)
1359 * either crosses a page boundary or is at the boundary,
1360 * we need to look harder.
1361 */
1362 if (oob_offs && meta->zm_guarded && atop(addr ^ end)) {
1363 uint32_t chunk_pages = zone->z_chunk_pages;
1364
1365 /*
1366 * Because in the vast majority of cases the element
1367 * size is sub-page, and that meta[1] must be faulted,
1368 * we can quickly peek at whether it's a guard.
1369 *
1370 * For elements larger than a page, finding the guard
1371 * page requires a little more effort.
1372 */
1373 if (meta[1].zm_chunk_len == ZM_PGZ_GUARD) {
1374 offs = meta[1].zm_oob_offs;
1375 if (clear_oob) {
1376 meta[1].zm_oob_offs = 0;
1377 }
1378 } else if (esize > PAGE_SIZE) {
1379 struct zone_page_metadata *gmeta;
1380
1381 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1382 gmeta = meta + meta->zm_subchunk_len;
1383 } else {
1384 gmeta = meta + chunk_pages;
1385 }
1386 assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
1387
1388 if (end >= zone_meta_to_addr(gmeta)) {
1389 offs = gmeta->zm_oob_offs;
1390 if (clear_oob) {
1391 gmeta->zm_oob_offs = 0;
1392 }
1393 }
1394 }
1395 }
1396 #else
1397 #pragma unused(end, clear_oob)
1398 #endif /* ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1399
1400 if (oob_offs) {
1401 *oob_offs = offs;
1402 }
1403 if (z) {
1404 *z = zone;
1405 }
1406 return esize;
1407 }
1408
1409 if (oob_offs) {
1410 *oob_offs = 0;
1411 }
1412
1413 return 0;
1414 }
1415
1416 zone_id_t
zone_id_for_element(void * addr,vm_size_t esize)1417 zone_id_for_element(void *addr, vm_size_t esize)
1418 {
1419 zone_id_t zid = ZONE_ID_INVALID;
1420 if (from_zone_map(addr, esize)) {
1421 zid = zone_index_from_ptr(addr);
1422 __builtin_assume(zid != ZONE_ID_INVALID);
1423 }
1424 return zid;
1425 }
1426
1427 /* This function just formats the reason for the panics by redoing the checks */
1428 __abortlike
1429 static void
zone_require_panic(zone_t zone,void * addr)1430 zone_require_panic(zone_t zone, void *addr)
1431 {
1432 uint32_t zindex;
1433 zone_t other;
1434
1435 if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1436 panic("zone_require failed: address not in a zone (addr: %p)", addr);
1437 }
1438
1439 zindex = zone_index_from_ptr(addr);
1440 other = &zone_array[zindex];
1441 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
1442 panic("zone_require failed: invalid zone index %d "
1443 "(addr: %p, expected: %s%s)", zindex,
1444 addr, zone_heap_name(zone), zone->z_name);
1445 } else {
1446 panic("zone_require failed: address in unexpected zone id %d (%s%s) "
1447 "(addr: %p, expected: %s%s)",
1448 zindex, zone_heap_name(other), other->z_name,
1449 addr, zone_heap_name(zone), zone->z_name);
1450 }
1451 }
1452
1453 __abortlike
1454 static void
zone_id_require_panic(zone_id_t zid,void * addr)1455 zone_id_require_panic(zone_id_t zid, void *addr)
1456 {
1457 zone_require_panic(&zone_array[zid], addr);
1458 }
1459
1460 /*
1461 * Routines to panic if a pointer is not mapped to an expected zone.
1462 * This can be used as a means of pinning an object to the zone it is expected
1463 * to be a part of. Causes a panic if the address does not belong to any
1464 * specified zone, does not belong to any zone, has been freed and therefore
1465 * unmapped from the zone, or the pointer contains an uninitialized value that
1466 * does not belong to any zone.
1467 */
1468 void
zone_require(zone_t zone,void * addr)1469 zone_require(zone_t zone, void *addr)
1470 {
1471 vm_size_t esize = zone_elem_inner_size(zone);
1472
1473 if (from_zone_map(addr, esize) &&
1474 zone_has_index(zone, zone_index_from_ptr(addr))) {
1475 return;
1476 }
1477 zone_require_panic(zone, addr);
1478 }
1479
1480 void
zone_id_require(zone_id_t zid,vm_size_t esize,void * addr)1481 zone_id_require(zone_id_t zid, vm_size_t esize, void *addr)
1482 {
1483 if (from_zone_map(addr, esize) && zid == zone_index_from_ptr(addr)) {
1484 return;
1485 }
1486 zone_id_require_panic(zid, addr);
1487 }
1488
1489 void
zone_id_require_aligned(zone_id_t zid,void * addr)1490 zone_id_require_aligned(zone_id_t zid, void *addr)
1491 {
1492 zone_t zone = zone_by_id(zid);
1493 vm_offset_t elem, offs;
1494
1495 elem = (vm_offset_t)addr;
1496 offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
1497
1498 if (from_zone_map(addr, 1)) {
1499 struct zone_page_metadata *meta;
1500
1501 meta = zone_meta_from_addr(elem);
1502 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1503 offs += ptoa(meta->zm_page_index);
1504 }
1505
1506 if (zid == meta->zm_index &&
1507 Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
1508 return;
1509 }
1510 }
1511
1512 zone_invalid_element_panic(zone, elem);
1513 }
1514
1515 bool
zone_owns(zone_t zone,void * addr)1516 zone_owns(zone_t zone, void *addr)
1517 {
1518 vm_size_t esize = zone_elem_inner_size(zone);
1519
1520 if (from_zone_map(addr, esize)) {
1521 return zone_has_index(zone, zone_index_from_ptr(addr));
1522 }
1523 return false;
1524 }
1525
1526 static inline struct mach_vm_range
zone_kmem_suballoc(mach_vm_offset_t addr,vm_size_t size,int flags,vm_tag_t tag,vm_map_t * new_map)1527 zone_kmem_suballoc(
1528 mach_vm_offset_t addr,
1529 vm_size_t size,
1530 int flags,
1531 vm_tag_t tag,
1532 vm_map_t *new_map)
1533 {
1534 struct mach_vm_range r;
1535
1536 *new_map = kmem_suballoc(kernel_map, &addr, size,
1537 VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
1538 flags, KMS_PERMANENT | KMS_NOFAIL | KMS_NOSOFTLIMIT, tag).kmr_submap;
1539
1540 r.min_address = addr;
1541 r.max_address = addr + size;
1542 return r;
1543 }
1544
1545 #endif /* !ZALLOC_TEST */
1546 #pragma mark Zone bits allocator
1547
1548 /*!
1549 * @defgroup Zone Bitmap allocator
1550 * @{
1551 *
1552 * @brief
1553 * Functions implementing the zone bitmap allocator
1554 *
1555 * @discussion
1556 * The zone allocator maintains which elements are allocated or free in bitmaps.
1557 *
1558 * When the number of elements per page is smaller than 32, it is stored inline
1559 * on the @c zone_page_metadata structure (@c zm_inline_bitmap is set,
1560 * and @c zm_bitmap used for storage).
1561 *
1562 * When the number of elements is larger, then a bitmap is allocated from
1563 * a buddy allocator (impelemented under the @c zba_* namespace). Pointers
1564 * to bitmaps are implemented as a packed 32 bit bitmap reference, stored in
1565 * @c zm_bitmap. The low 3 bits encode the scale (order) of the allocation in
1566 * @c ZBA_GRANULE units, and hence actual allocations encoded with that scheme
1567 * cannot be larger than 1024 bytes (8192 bits).
1568 *
1569 * This buddy allocator can actually accomodate allocations as large
1570 * as 8k on 16k systems and 2k on 4k systems.
1571 *
1572 * Note: @c zba_* functions are implementation details not meant to be used
1573 * outside of the allocation of the allocator itself. Interfaces to the rest of
1574 * the zone allocator are documented and not @c zba_* prefixed.
1575 */
1576
1577 #define ZBA_CHUNK_SIZE PAGE_MAX_SIZE
1578 #define ZBA_GRANULE sizeof(uint64_t)
1579 #define ZBA_GRANULE_BITS (8 * sizeof(uint64_t))
1580 #define ZBA_MAX_ORDER (PAGE_MAX_SHIFT - 4)
1581 #define ZBA_MAX_ALLOC_ORDER 7
1582 #define ZBA_SLOTS (ZBA_CHUNK_SIZE / ZBA_GRANULE)
1583 #define ZBA_HEADS_COUNT (ZBA_MAX_ORDER + 1)
1584 #define ZBA_PTR_MASK 0x0fffffff
1585 #define ZBA_ORDER_SHIFT 29
1586 #define ZBA_HAS_EXTRA_BIT 0x10000000
1587
1588 static_assert(2ul * ZBA_GRANULE << ZBA_MAX_ORDER == ZBA_CHUNK_SIZE, "chunk sizes");
1589 static_assert(ZBA_MAX_ALLOC_ORDER <= ZBA_MAX_ORDER, "ZBA_MAX_ORDER is enough");
1590
1591 struct zone_bits_chain {
1592 uint32_t zbc_next;
1593 uint32_t zbc_prev;
1594 } __attribute__((aligned(ZBA_GRANULE)));
1595
1596 struct zone_bits_head {
1597 uint32_t zbh_next;
1598 uint32_t zbh_unused;
1599 } __attribute__((aligned(ZBA_GRANULE)));
1600
1601 static_assert(sizeof(struct zone_bits_chain) == ZBA_GRANULE, "zbc size");
1602 static_assert(sizeof(struct zone_bits_head) == ZBA_GRANULE, "zbh size");
1603
1604 struct zone_bits_allocator_meta {
1605 uint32_t zbam_left;
1606 uint32_t zbam_right;
1607 struct zone_bits_head zbam_lists[ZBA_HEADS_COUNT];
1608 struct zone_bits_head zbam_lists_with_extra[ZBA_HEADS_COUNT];
1609 };
1610
1611 struct zone_bits_allocator_header {
1612 uint64_t zbah_bits[ZBA_SLOTS / (8 * sizeof(uint64_t))];
1613 };
1614
1615 #if ZALLOC_TEST
1616 static struct zalloc_bits_allocator_test_setup {
1617 vm_offset_t zbats_base;
1618 void (*zbats_populate)(vm_address_t addr, vm_size_t size);
1619 } zba_test_info;
1620
1621 static struct zone_bits_allocator_header *
zba_base_header(void)1622 zba_base_header(void)
1623 {
1624 return (struct zone_bits_allocator_header *)zba_test_info.zbats_base;
1625 }
1626
1627 static kern_return_t
zba_populate(uint32_t n,bool with_extra __unused)1628 zba_populate(uint32_t n, bool with_extra __unused)
1629 {
1630 vm_address_t base = zba_test_info.zbats_base;
1631 zba_test_info.zbats_populate(base + n * ZBA_CHUNK_SIZE, ZBA_CHUNK_SIZE);
1632
1633 return KERN_SUCCESS;
1634 }
1635 #else
1636 __startup_data __attribute__((aligned(ZBA_CHUNK_SIZE)))
1637 static uint8_t zba_chunk_startup[ZBA_CHUNK_SIZE];
1638
1639 static SECURITY_READ_ONLY_LATE(uint8_t) zba_xtra_shift;
1640 static LCK_MTX_DECLARE(zba_mtx, &zone_locks_grp);
1641
1642 static struct zone_bits_allocator_header *
zba_base_header(void)1643 zba_base_header(void)
1644 {
1645 return (struct zone_bits_allocator_header *)zone_info.zi_bits_range.min_address;
1646 }
1647
1648 static void
zba_lock(void)1649 zba_lock(void)
1650 {
1651 lck_mtx_lock(&zba_mtx);
1652 }
1653
1654 static void
zba_unlock(void)1655 zba_unlock(void)
1656 {
1657 lck_mtx_unlock(&zba_mtx);
1658 }
1659
1660 __abortlike
1661 static void
zba_memory_exhausted(void)1662 zba_memory_exhausted(void)
1663 {
1664 uint64_t zsize = 0;
1665 zone_t z = zone_find_largest(&zsize);
1666 panic("zba_populate: out of bitmap space, "
1667 "likely due to memory leak in zone [%s%s] "
1668 "(%u%c, %d elements allocated)",
1669 zone_heap_name(z), zone_name(z),
1670 mach_vm_size_pretty(zsize), mach_vm_size_unit(zsize),
1671 zone_count_allocated(z));
1672 }
1673
1674
1675 static kern_return_t
zba_populate(uint32_t n,bool with_extra)1676 zba_populate(uint32_t n, bool with_extra)
1677 {
1678 vm_size_t bits_size = ZBA_CHUNK_SIZE;
1679 vm_size_t xtra_size = bits_size * CHAR_BIT << zba_xtra_shift;
1680 vm_address_t bits_addr;
1681 vm_address_t xtra_addr;
1682 kern_return_t kr;
1683
1684 bits_addr = zone_info.zi_bits_range.min_address + n * bits_size;
1685 xtra_addr = zone_info.zi_xtra_range.min_address + n * xtra_size;
1686
1687 kr = kernel_memory_populate(bits_addr, bits_size,
1688 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1689 VM_KERN_MEMORY_OSFMK);
1690 if (kr != KERN_SUCCESS) {
1691 return kr;
1692 }
1693
1694
1695 if (with_extra) {
1696 kr = kernel_memory_populate(xtra_addr, xtra_size,
1697 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1698 VM_KERN_MEMORY_OSFMK);
1699 if (kr != KERN_SUCCESS) {
1700 kernel_memory_depopulate(bits_addr, bits_size,
1701 KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1702 VM_KERN_MEMORY_OSFMK);
1703 }
1704 }
1705
1706 return kr;
1707 }
1708 #endif
1709
1710 __pure2
1711 static struct zone_bits_allocator_meta *
zba_meta(void)1712 zba_meta(void)
1713 {
1714 return (struct zone_bits_allocator_meta *)&zba_base_header()[1];
1715 }
1716
1717 __pure2
1718 static uint64_t *
zba_slot_base(void)1719 zba_slot_base(void)
1720 {
1721 return (uint64_t *)zba_base_header();
1722 }
1723
1724 __pure2
1725 static struct zone_bits_head *
zba_head(uint32_t order,bool with_extra)1726 zba_head(uint32_t order, bool with_extra)
1727 {
1728 if (with_extra) {
1729 return &zba_meta()->zbam_lists_with_extra[order];
1730 } else {
1731 return &zba_meta()->zbam_lists[order];
1732 }
1733 }
1734
1735 __pure2
1736 static uint32_t
zba_head_index(struct zone_bits_head * hd)1737 zba_head_index(struct zone_bits_head *hd)
1738 {
1739 return (uint32_t)((uint64_t *)hd - zba_slot_base());
1740 }
1741
1742 __pure2
1743 static struct zone_bits_chain *
zba_chain_for_index(uint32_t index)1744 zba_chain_for_index(uint32_t index)
1745 {
1746 return (struct zone_bits_chain *)(zba_slot_base() + index);
1747 }
1748
1749 __pure2
1750 static uint32_t
zba_chain_to_index(const struct zone_bits_chain * zbc)1751 zba_chain_to_index(const struct zone_bits_chain *zbc)
1752 {
1753 return (uint32_t)((const uint64_t *)zbc - zba_slot_base());
1754 }
1755
1756 __abortlike
1757 static void
zba_head_corruption_panic(uint32_t order,bool with_extra)1758 zba_head_corruption_panic(uint32_t order, bool with_extra)
1759 {
1760 panic("zone bits allocator head[%d:%d:%p] is corrupt",
1761 order, with_extra, zba_head(order, with_extra));
1762 }
1763
1764 __abortlike
1765 static void
zba_chain_corruption_panic(struct zone_bits_chain * a,struct zone_bits_chain * b)1766 zba_chain_corruption_panic(struct zone_bits_chain *a, struct zone_bits_chain *b)
1767 {
1768 panic("zone bits allocator freelist is corrupt (%p <-> %p)", a, b);
1769 }
1770
1771 static void
zba_push_block(struct zone_bits_chain * zbc,uint32_t order,bool with_extra)1772 zba_push_block(struct zone_bits_chain *zbc, uint32_t order, bool with_extra)
1773 {
1774 struct zone_bits_head *hd = zba_head(order, with_extra);
1775 uint32_t hd_index = zba_head_index(hd);
1776 uint32_t index = zba_chain_to_index(zbc);
1777 struct zone_bits_chain *next;
1778
1779 if (hd->zbh_next) {
1780 next = zba_chain_for_index(hd->zbh_next);
1781 if (next->zbc_prev != hd_index) {
1782 zba_head_corruption_panic(order, with_extra);
1783 }
1784 next->zbc_prev = index;
1785 }
1786 zbc->zbc_next = hd->zbh_next;
1787 zbc->zbc_prev = hd_index;
1788 hd->zbh_next = index;
1789 }
1790
1791 static void
zba_remove_block(struct zone_bits_chain * zbc)1792 zba_remove_block(struct zone_bits_chain *zbc)
1793 {
1794 struct zone_bits_chain *prev = zba_chain_for_index(zbc->zbc_prev);
1795 uint32_t index = zba_chain_to_index(zbc);
1796
1797 if (prev->zbc_next != index) {
1798 zba_chain_corruption_panic(prev, zbc);
1799 }
1800 if ((prev->zbc_next = zbc->zbc_next)) {
1801 struct zone_bits_chain *next = zba_chain_for_index(zbc->zbc_next);
1802 if (next->zbc_prev != index) {
1803 zba_chain_corruption_panic(zbc, next);
1804 }
1805 next->zbc_prev = zbc->zbc_prev;
1806 }
1807 }
1808
1809 static vm_address_t
zba_try_pop_block(uint32_t order,bool with_extra)1810 zba_try_pop_block(uint32_t order, bool with_extra)
1811 {
1812 struct zone_bits_head *hd = zba_head(order, with_extra);
1813 struct zone_bits_chain *zbc;
1814
1815 if (hd->zbh_next == 0) {
1816 return 0;
1817 }
1818
1819 zbc = zba_chain_for_index(hd->zbh_next);
1820 zba_remove_block(zbc);
1821 return (vm_address_t)zbc;
1822 }
1823
1824 static struct zone_bits_allocator_header *
zba_header(vm_offset_t addr)1825 zba_header(vm_offset_t addr)
1826 {
1827 addr &= -(vm_offset_t)ZBA_CHUNK_SIZE;
1828 return (struct zone_bits_allocator_header *)addr;
1829 }
1830
1831 static size_t
zba_node_parent(size_t node)1832 zba_node_parent(size_t node)
1833 {
1834 return (node - 1) / 2;
1835 }
1836
1837 static size_t
zba_node_left_child(size_t node)1838 zba_node_left_child(size_t node)
1839 {
1840 return node * 2 + 1;
1841 }
1842
1843 static size_t
zba_node_buddy(size_t node)1844 zba_node_buddy(size_t node)
1845 {
1846 return ((node - 1) ^ 1) + 1;
1847 }
1848
1849 static size_t
zba_node(vm_offset_t addr,uint32_t order)1850 zba_node(vm_offset_t addr, uint32_t order)
1851 {
1852 vm_offset_t offs = (addr % ZBA_CHUNK_SIZE) / ZBA_GRANULE;
1853 return (offs >> order) + (1 << (ZBA_MAX_ORDER - order + 1)) - 1;
1854 }
1855
1856 static struct zone_bits_chain *
zba_chain_for_node(struct zone_bits_allocator_header * zbah,size_t node,uint32_t order)1857 zba_chain_for_node(struct zone_bits_allocator_header *zbah, size_t node, uint32_t order)
1858 {
1859 vm_offset_t offs = (node - (1 << (ZBA_MAX_ORDER - order + 1)) + 1) << order;
1860 return (struct zone_bits_chain *)((vm_offset_t)zbah + offs * ZBA_GRANULE);
1861 }
1862
1863 static void
zba_node_flip_split(struct zone_bits_allocator_header * zbah,size_t node)1864 zba_node_flip_split(struct zone_bits_allocator_header *zbah, size_t node)
1865 {
1866 zbah->zbah_bits[node / 64] ^= 1ull << (node % 64);
1867 }
1868
1869 static bool
zba_node_is_split(struct zone_bits_allocator_header * zbah,size_t node)1870 zba_node_is_split(struct zone_bits_allocator_header *zbah, size_t node)
1871 {
1872 return zbah->zbah_bits[node / 64] & (1ull << (node % 64));
1873 }
1874
1875 static void
zba_free(vm_offset_t addr,uint32_t order,bool with_extra)1876 zba_free(vm_offset_t addr, uint32_t order, bool with_extra)
1877 {
1878 struct zone_bits_allocator_header *zbah = zba_header(addr);
1879 struct zone_bits_chain *zbc;
1880 size_t node = zba_node(addr, order);
1881
1882 while (node) {
1883 size_t parent = zba_node_parent(node);
1884
1885 zba_node_flip_split(zbah, parent);
1886 if (zba_node_is_split(zbah, parent)) {
1887 break;
1888 }
1889
1890 zbc = zba_chain_for_node(zbah, zba_node_buddy(node), order);
1891 zba_remove_block(zbc);
1892 order++;
1893 node = parent;
1894 }
1895
1896 zba_push_block(zba_chain_for_node(zbah, node, order), order, with_extra);
1897 }
1898
1899 static vm_size_t
zba_chunk_header_size(uint32_t n)1900 zba_chunk_header_size(uint32_t n)
1901 {
1902 vm_size_t hdr_size = sizeof(struct zone_bits_allocator_header);
1903 if (n == 0) {
1904 hdr_size += sizeof(struct zone_bits_allocator_meta);
1905 }
1906 return hdr_size;
1907 }
1908
1909 static void
zba_init_chunk(uint32_t n,bool with_extra)1910 zba_init_chunk(uint32_t n, bool with_extra)
1911 {
1912 vm_size_t hdr_size = zba_chunk_header_size(n);
1913 vm_offset_t page = (vm_offset_t)zba_base_header() + n * ZBA_CHUNK_SIZE;
1914 struct zone_bits_allocator_header *zbah = zba_header(page);
1915 vm_size_t size = ZBA_CHUNK_SIZE;
1916 size_t node;
1917
1918 for (uint32_t o = ZBA_MAX_ORDER + 1; o-- > 0;) {
1919 if (size < hdr_size + (ZBA_GRANULE << o)) {
1920 continue;
1921 }
1922 size -= ZBA_GRANULE << o;
1923 node = zba_node(page + size, o);
1924 zba_node_flip_split(zbah, zba_node_parent(node));
1925 zba_push_block(zba_chain_for_node(zbah, node, o), o, with_extra);
1926 }
1927 }
1928
1929 __attribute__((noinline))
1930 static void
zba_grow(bool with_extra)1931 zba_grow(bool with_extra)
1932 {
1933 struct zone_bits_allocator_meta *meta = zba_meta();
1934 kern_return_t kr = KERN_SUCCESS;
1935 uint32_t chunk;
1936
1937 #if !ZALLOC_TEST
1938 if (meta->zbam_left >= meta->zbam_right) {
1939 zba_memory_exhausted();
1940 }
1941 #endif
1942
1943 if (with_extra) {
1944 chunk = meta->zbam_right - 1;
1945 } else {
1946 chunk = meta->zbam_left;
1947 }
1948
1949 kr = zba_populate(chunk, with_extra);
1950 if (kr == KERN_SUCCESS) {
1951 if (with_extra) {
1952 meta->zbam_right -= 1;
1953 } else {
1954 meta->zbam_left += 1;
1955 }
1956
1957 zba_init_chunk(chunk, with_extra);
1958 #if !ZALLOC_TEST
1959 } else {
1960 /*
1961 * zba_populate() has to be allowed to fail populating,
1962 * as we are under a global lock, we need to do the
1963 * VM_PAGE_WAIT() outside of the lock.
1964 */
1965 assert(kr == KERN_RESOURCE_SHORTAGE);
1966 zba_unlock();
1967 VM_PAGE_WAIT();
1968 zba_lock();
1969 #endif
1970 }
1971 }
1972
1973 static vm_offset_t
zba_alloc(uint32_t order,bool with_extra)1974 zba_alloc(uint32_t order, bool with_extra)
1975 {
1976 struct zone_bits_allocator_header *zbah;
1977 uint32_t cur = order;
1978 vm_address_t addr;
1979 size_t node;
1980
1981 while ((addr = zba_try_pop_block(cur, with_extra)) == 0) {
1982 if (__improbable(cur++ >= ZBA_MAX_ORDER)) {
1983 zba_grow(with_extra);
1984 cur = order;
1985 }
1986 }
1987
1988 zbah = zba_header(addr);
1989 node = zba_node(addr, cur);
1990 zba_node_flip_split(zbah, zba_node_parent(node));
1991 while (cur > order) {
1992 cur--;
1993 zba_node_flip_split(zbah, node);
1994 node = zba_node_left_child(node);
1995 zba_push_block(zba_chain_for_node(zbah, node + 1, cur),
1996 cur, with_extra);
1997 }
1998
1999 return addr;
2000 }
2001
2002 #define zba_map_index(type, n) (n / (8 * sizeof(type)))
2003 #define zba_map_bit(type, n) ((type)1 << (n % (8 * sizeof(type))))
2004 #define zba_map_mask_lt(type, n) (zba_map_bit(type, n) - 1)
2005 #define zba_map_mask_ge(type, n) ((type)-zba_map_bit(type, n))
2006
2007 #if !ZALLOC_TEST
2008 #if VM_TAG_SIZECLASSES
2009
2010 static void *
zba_extra_ref_ptr(uint32_t bref,vm_offset_t idx)2011 zba_extra_ref_ptr(uint32_t bref, vm_offset_t idx)
2012 {
2013 vm_offset_t base = zone_info.zi_xtra_range.min_address;
2014 vm_offset_t offs = (bref & ZBA_PTR_MASK) * ZBA_GRANULE * CHAR_BIT;
2015
2016 return (void *)(base + ((offs + idx) << zba_xtra_shift));
2017 }
2018
2019 #endif /* VM_TAG_SIZECLASSES */
2020
2021 static uint32_t
zba_bits_ref_order(uint32_t bref)2022 zba_bits_ref_order(uint32_t bref)
2023 {
2024 return bref >> ZBA_ORDER_SHIFT;
2025 }
2026
2027 static bitmap_t *
zba_bits_ref_ptr(uint32_t bref)2028 zba_bits_ref_ptr(uint32_t bref)
2029 {
2030 return zba_slot_base() + (bref & ZBA_PTR_MASK);
2031 }
2032
2033 static vm_offset_t
zba_scan_bitmap_inline(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags,vm_offset_t eidx)2034 zba_scan_bitmap_inline(zone_t zone, struct zone_page_metadata *meta,
2035 zalloc_flags_t flags, vm_offset_t eidx)
2036 {
2037 size_t i = eidx / 32;
2038 uint32_t map;
2039
2040 if (eidx % 32) {
2041 map = meta[i].zm_bitmap & zba_map_mask_ge(uint32_t, eidx);
2042 if (map) {
2043 eidx = __builtin_ctz(map);
2044 meta[i].zm_bitmap ^= 1u << eidx;
2045 return i * 32 + eidx;
2046 }
2047 i++;
2048 }
2049
2050 uint32_t chunk_len = meta->zm_chunk_len;
2051 if (flags & Z_PCPU) {
2052 chunk_len = zpercpu_count();
2053 }
2054 for (int j = 0; j < chunk_len; j++, i++) {
2055 if (i >= chunk_len) {
2056 i = 0;
2057 }
2058 if (__probable(map = meta[i].zm_bitmap)) {
2059 meta[i].zm_bitmap &= map - 1;
2060 return i * 32 + __builtin_ctz(map);
2061 }
2062 }
2063
2064 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2065 }
2066
2067 static vm_offset_t
zba_scan_bitmap_ref(zone_t zone,struct zone_page_metadata * meta,vm_offset_t eidx)2068 zba_scan_bitmap_ref(zone_t zone, struct zone_page_metadata *meta,
2069 vm_offset_t eidx)
2070 {
2071 uint32_t bits_size = 1 << zba_bits_ref_order(meta->zm_bitmap);
2072 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2073 size_t i = eidx / 64;
2074 uint64_t map;
2075
2076 if (eidx % 64) {
2077 map = bits[i] & zba_map_mask_ge(uint64_t, eidx);
2078 if (map) {
2079 eidx = __builtin_ctzll(map);
2080 bits[i] ^= 1ull << eidx;
2081 return i * 64 + eidx;
2082 }
2083 i++;
2084 }
2085
2086 for (int j = 0; j < bits_size; i++, j++) {
2087 if (i >= bits_size) {
2088 i = 0;
2089 }
2090 if (__probable(map = bits[i])) {
2091 bits[i] &= map - 1;
2092 return i * 64 + __builtin_ctzll(map);
2093 }
2094 }
2095
2096 zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2097 }
2098
2099 /*!
2100 * @function zone_meta_find_and_clear_bit
2101 *
2102 * @brief
2103 * The core of the bitmap allocator: find a bit set in the bitmaps.
2104 *
2105 * @discussion
2106 * This method will round robin through available allocations,
2107 * with a per-core memory of the last allocated element index allocated.
2108 *
2109 * This is done in order to avoid a fully LIFO behavior which makes exploiting
2110 * double-free bugs way too practical.
2111 *
2112 * @param zone The zone we're allocating from.
2113 * @param meta The main metadata for the chunk being allocated from.
2114 * @param flags the alloc flags (for @c Z_PCPU).
2115 */
2116 static vm_offset_t
zone_meta_find_and_clear_bit(zone_t zone,zone_stats_t zs,struct zone_page_metadata * meta,zalloc_flags_t flags)2117 zone_meta_find_and_clear_bit(
2118 zone_t zone,
2119 zone_stats_t zs,
2120 struct zone_page_metadata *meta,
2121 zalloc_flags_t flags)
2122 {
2123 vm_offset_t eidx = zs->zs_alloc_rr + 1;
2124
2125 if (meta->zm_inline_bitmap) {
2126 eidx = zba_scan_bitmap_inline(zone, meta, flags, eidx);
2127 } else {
2128 eidx = zba_scan_bitmap_ref(zone, meta, eidx);
2129 }
2130 zs->zs_alloc_rr = (uint16_t)eidx;
2131 return eidx;
2132 }
2133
2134 /*!
2135 * @function zone_meta_bits_init_inline
2136 *
2137 * @brief
2138 * Initializes the inline zm_bitmap field(s) for a newly assigned chunk.
2139 *
2140 * @param meta The main metadata for the initialized chunk.
2141 * @param count The number of elements the chunk can hold
2142 * (which might be partial for partially populated chunks).
2143 */
2144 static void
zone_meta_bits_init_inline(struct zone_page_metadata * meta,uint32_t count)2145 zone_meta_bits_init_inline(struct zone_page_metadata *meta, uint32_t count)
2146 {
2147 /*
2148 * We're called with the metadata zm_bitmap fields already zeroed out.
2149 */
2150 for (size_t i = 0; i < count / 32; i++) {
2151 meta[i].zm_bitmap = ~0u;
2152 }
2153 if (count % 32) {
2154 meta[count / 32].zm_bitmap = zba_map_mask_lt(uint32_t, count);
2155 }
2156 }
2157
2158 /*!
2159 * @function zone_meta_bits_alloc_init
2160 *
2161 * @brief
2162 * Allocates a zm_bitmap field for a newly assigned chunk.
2163 *
2164 * @param count The number of elements the chunk can hold
2165 * (which might be partial for partially populated chunks).
2166 * @param nbits The maximum nuber of bits that will be used.
2167 * @param with_extra Whether "VM Tracking" metadata needs to be allocated.
2168 */
2169 static uint32_t
zone_meta_bits_alloc_init(uint32_t count,uint32_t nbits,bool with_extra)2170 zone_meta_bits_alloc_init(uint32_t count, uint32_t nbits, bool with_extra)
2171 {
2172 static_assert(ZONE_MAX_ALLOC_SIZE / ZONE_MIN_ELEM_SIZE <=
2173 ZBA_GRANULE_BITS << ZBA_MAX_ORDER, "bitmaps will be large enough");
2174
2175 uint32_t order = flsll((nbits - 1) / ZBA_GRANULE_BITS);
2176 uint64_t *bits;
2177 size_t i = 0;
2178
2179 assert(order <= ZBA_MAX_ALLOC_ORDER);
2180 assert(count <= ZBA_GRANULE_BITS << order);
2181
2182 zba_lock();
2183 bits = (uint64_t *)zba_alloc(order, with_extra);
2184 zba_unlock();
2185
2186 while (i < count / 64) {
2187 bits[i++] = ~0ull;
2188 }
2189 if (count % 64) {
2190 bits[i++] = zba_map_mask_lt(uint64_t, count);
2191 }
2192 while (i < 1u << order) {
2193 bits[i++] = 0;
2194 }
2195
2196 return (uint32_t)(bits - zba_slot_base()) +
2197 (order << ZBA_ORDER_SHIFT) +
2198 (with_extra ? ZBA_HAS_EXTRA_BIT : 0);
2199 }
2200
2201 /*!
2202 * @function zone_meta_bits_merge
2203 *
2204 * @brief
2205 * Adds elements <code>[start, end)</code> to a chunk being extended.
2206 *
2207 * @param meta The main metadata for the extended chunk.
2208 * @param start The index of the first element to add to the chunk.
2209 * @param end The index of the last (exclusive) element to add.
2210 */
2211 static void
zone_meta_bits_merge(struct zone_page_metadata * meta,uint32_t start,uint32_t end)2212 zone_meta_bits_merge(struct zone_page_metadata *meta,
2213 uint32_t start, uint32_t end)
2214 {
2215 if (meta->zm_inline_bitmap) {
2216 while (start < end) {
2217 size_t s_i = start / 32;
2218 size_t s_e = end / 32;
2219
2220 if (s_i == s_e) {
2221 meta[s_i].zm_bitmap |= zba_map_mask_lt(uint32_t, end) &
2222 zba_map_mask_ge(uint32_t, start);
2223 break;
2224 }
2225
2226 meta[s_i].zm_bitmap |= zba_map_mask_ge(uint32_t, start);
2227 start += 32 - (start % 32);
2228 }
2229 } else {
2230 uint64_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2231
2232 while (start < end) {
2233 size_t s_i = start / 64;
2234 size_t s_e = end / 64;
2235
2236 if (s_i == s_e) {
2237 bits[s_i] |= zba_map_mask_lt(uint64_t, end) &
2238 zba_map_mask_ge(uint64_t, start);
2239 break;
2240 }
2241 bits[s_i] |= zba_map_mask_ge(uint64_t, start);
2242 start += 64 - (start % 64);
2243 }
2244 }
2245 }
2246
2247 /*!
2248 * @function zone_bits_free
2249 *
2250 * @brief
2251 * Frees a bitmap to the zone bitmap allocator.
2252 *
2253 * @param bref
2254 * A bitmap reference set by @c zone_meta_bits_init() in a @c zm_bitmap field.
2255 */
2256 static void
zone_bits_free(uint32_t bref)2257 zone_bits_free(uint32_t bref)
2258 {
2259 zba_lock();
2260 zba_free((vm_offset_t)zba_bits_ref_ptr(bref),
2261 zba_bits_ref_order(bref), (bref & ZBA_HAS_EXTRA_BIT));
2262 zba_unlock();
2263 }
2264
2265 /*!
2266 * @function zone_meta_is_free
2267 *
2268 * @brief
2269 * Returns whether a given element appears free.
2270 */
2271 static bool
zone_meta_is_free(struct zone_page_metadata * meta,vm_offset_t eidx)2272 zone_meta_is_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2273 {
2274 if (meta->zm_inline_bitmap) {
2275 uint32_t bit = zba_map_bit(uint32_t, eidx);
2276 return meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit;
2277 } else {
2278 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2279 uint64_t bit = zba_map_bit(uint64_t, eidx);
2280 return bits[zba_map_index(uint64_t, eidx)] & bit;
2281 }
2282 }
2283
2284 /*!
2285 * @function zone_meta_mark_free
2286 *
2287 * @brief
2288 * Marks an element as free and returns whether it was marked as used.
2289 */
2290 static bool
zone_meta_mark_free(struct zone_page_metadata * meta,vm_offset_t eidx)2291 zone_meta_mark_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2292 {
2293 if (meta->zm_inline_bitmap) {
2294 uint32_t bit = zba_map_bit(uint32_t, eidx);
2295 if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2296 return false;
2297 }
2298 meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2299 } else {
2300 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2301 uint64_t bit = zba_map_bit(uint64_t, eidx);
2302 if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2303 return false;
2304 }
2305 bits[zba_map_index(uint64_t, eidx)] ^= bit;
2306 }
2307 return true;
2308 }
2309
2310 #if VM_TAG_SIZECLASSES
2311
2312 __startup_func
2313 void
__zone_site_register(vm_allocation_site_t * site)2314 __zone_site_register(vm_allocation_site_t *site)
2315 {
2316 if (zone_tagging_on) {
2317 vm_tag_alloc(site);
2318 }
2319 }
2320
2321 uint16_t
zone_index_from_tag_index(uint32_t sizeclass_idx)2322 zone_index_from_tag_index(uint32_t sizeclass_idx)
2323 {
2324 return zone_tags_sizeclasses[sizeclass_idx];
2325 }
2326
2327 #endif /* VM_TAG_SIZECLASSES */
2328 #endif /* !ZALLOC_TEST */
2329 /*! @} */
2330 #pragma mark zalloc helpers
2331 #if !ZALLOC_TEST
2332
2333 static inline void *
zstack_tbi_fix(vm_offset_t elem)2334 zstack_tbi_fix(vm_offset_t elem)
2335 {
2336 elem = vm_memtag_load_tag(elem);
2337 return (void *)elem;
2338 }
2339
2340 static inline vm_offset_t
zstack_tbi_fill(void * addr)2341 zstack_tbi_fill(void *addr)
2342 {
2343 vm_offset_t elem = (vm_offset_t)addr;
2344
2345 return vm_memtag_canonicalize_kernel(elem);
2346 }
2347
2348 __attribute__((always_inline))
2349 static inline void
zstack_push_no_delta(zstack_t * stack,void * addr)2350 zstack_push_no_delta(zstack_t *stack, void *addr)
2351 {
2352 vm_offset_t elem = zstack_tbi_fill(addr);
2353
2354 *(vm_offset_t *)addr = stack->z_head - elem;
2355 stack->z_head = elem;
2356 }
2357
2358 __attribute__((always_inline))
2359 void
zstack_push(zstack_t * stack,void * addr)2360 zstack_push(zstack_t *stack, void *addr)
2361 {
2362 zstack_push_no_delta(stack, addr);
2363 stack->z_count++;
2364 }
2365
2366 __attribute__((always_inline))
2367 static inline void *
zstack_pop_no_delta(zstack_t * stack)2368 zstack_pop_no_delta(zstack_t *stack)
2369 {
2370 void *addr = zstack_tbi_fix(stack->z_head);
2371
2372 stack->z_head += *(vm_offset_t *)addr;
2373 *(vm_offset_t *)addr = 0;
2374
2375 return addr;
2376 }
2377
2378 __attribute__((always_inline))
2379 void *
zstack_pop(zstack_t * stack)2380 zstack_pop(zstack_t *stack)
2381 {
2382 stack->z_count--;
2383 return zstack_pop_no_delta(stack);
2384 }
2385
2386 static inline void
zone_recirc_lock_nopreempt_check_contention(zone_t zone)2387 zone_recirc_lock_nopreempt_check_contention(zone_t zone)
2388 {
2389 uint32_t ticket;
2390
2391 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_recirc_lock,
2392 &ticket, &zone_locks_grp))) {
2393 return;
2394 }
2395
2396 hw_lck_ticket_wait(&zone->z_recirc_lock, ticket, NULL, &zone_locks_grp);
2397
2398 /*
2399 * If zone caching has been disabled due to memory pressure,
2400 * then recording contention is not useful, give the system
2401 * time to recover.
2402 */
2403 if (__probable(!zone_caching_disabled && !zone_exhausted(zone))) {
2404 zone->z_recirc_cont_cur++;
2405 }
2406 }
2407
2408 static inline void
zone_recirc_lock_nopreempt(zone_t zone)2409 zone_recirc_lock_nopreempt(zone_t zone)
2410 {
2411 hw_lck_ticket_lock_nopreempt(&zone->z_recirc_lock, &zone_locks_grp);
2412 }
2413
2414 static inline void
zone_recirc_unlock_nopreempt(zone_t zone)2415 zone_recirc_unlock_nopreempt(zone_t zone)
2416 {
2417 hw_lck_ticket_unlock_nopreempt(&zone->z_recirc_lock);
2418 }
2419
2420 static inline void
zone_lock_nopreempt_check_contention(zone_t zone)2421 zone_lock_nopreempt_check_contention(zone_t zone)
2422 {
2423 uint32_t ticket;
2424 #if KASAN_FAKESTACK
2425 spl_t s = 0;
2426 if (zone->z_kasan_fakestacks) {
2427 s = splsched();
2428 }
2429 #endif /* KASAN_FAKESTACK */
2430
2431 if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_lock, &ticket,
2432 &zone_locks_grp))) {
2433 #if KASAN_FAKESTACK
2434 zone->z_kasan_spl = s;
2435 #endif /* KASAN_FAKESTACK */
2436 return;
2437 }
2438
2439 hw_lck_ticket_wait(&zone->z_lock, ticket, NULL, &zone_locks_grp);
2440 #if KASAN_FAKESTACK
2441 zone->z_kasan_spl = s;
2442 #endif /* KASAN_FAKESTACK */
2443
2444 /*
2445 * If zone caching has been disabled due to memory pressure,
2446 * then recording contention is not useful, give the system
2447 * time to recover.
2448 */
2449 if (__probable(!zone_caching_disabled &&
2450 !zone->z_pcpu_cache && !zone_exhausted(zone))) {
2451 zone->z_recirc_cont_cur++;
2452 }
2453 }
2454
2455 static inline void
zone_lock_nopreempt(zone_t zone)2456 zone_lock_nopreempt(zone_t zone)
2457 {
2458 #if KASAN_FAKESTACK
2459 spl_t s = 0;
2460 if (zone->z_kasan_fakestacks) {
2461 s = splsched();
2462 }
2463 #endif /* KASAN_FAKESTACK */
2464 hw_lck_ticket_lock_nopreempt(&zone->z_lock, &zone_locks_grp);
2465 #if KASAN_FAKESTACK
2466 zone->z_kasan_spl = s;
2467 #endif /* KASAN_FAKESTACK */
2468 }
2469
2470 static inline void
zone_unlock_nopreempt(zone_t zone)2471 zone_unlock_nopreempt(zone_t zone)
2472 {
2473 #if KASAN_FAKESTACK
2474 spl_t s = zone->z_kasan_spl;
2475 zone->z_kasan_spl = 0;
2476 #endif /* KASAN_FAKESTACK */
2477 hw_lck_ticket_unlock_nopreempt(&zone->z_lock);
2478 #if KASAN_FAKESTACK
2479 if (zone->z_kasan_fakestacks) {
2480 splx(s);
2481 }
2482 #endif /* KASAN_FAKESTACK */
2483 }
2484
2485 static inline void
zone_depot_lock_nopreempt(zone_cache_t zc)2486 zone_depot_lock_nopreempt(zone_cache_t zc)
2487 {
2488 hw_lck_ticket_lock_nopreempt(&zc->zc_depot_lock, &zone_locks_grp);
2489 }
2490
2491 static inline void
zone_depot_unlock_nopreempt(zone_cache_t zc)2492 zone_depot_unlock_nopreempt(zone_cache_t zc)
2493 {
2494 hw_lck_ticket_unlock_nopreempt(&zc->zc_depot_lock);
2495 }
2496
2497 static inline void
zone_depot_lock(zone_cache_t zc)2498 zone_depot_lock(zone_cache_t zc)
2499 {
2500 hw_lck_ticket_lock(&zc->zc_depot_lock, &zone_locks_grp);
2501 }
2502
2503 static inline void
zone_depot_unlock(zone_cache_t zc)2504 zone_depot_unlock(zone_cache_t zc)
2505 {
2506 hw_lck_ticket_unlock(&zc->zc_depot_lock);
2507 }
2508
2509 zone_t
zone_by_id(size_t zid)2510 zone_by_id(size_t zid)
2511 {
2512 return (zone_t)((uintptr_t)zone_array + zid * sizeof(struct zone));
2513 }
2514
2515 static inline bool
zone_supports_vm(zone_t z)2516 zone_supports_vm(zone_t z)
2517 {
2518 /*
2519 * VM_MAP_ENTRY and VM_MAP_HOLES zones are allowed
2520 * to overcommit because they're used to reclaim memory
2521 * (VM support).
2522 */
2523 return z >= &zone_array[ZONE_ID_VM_MAP_ENTRY] &&
2524 z <= &zone_array[ZONE_ID_VM_MAP_HOLES];
2525 }
2526
2527 const char *
zone_name(zone_t z)2528 zone_name(zone_t z)
2529 {
2530 return z->z_name;
2531 }
2532
2533 const char *
zone_heap_name(zone_t z)2534 zone_heap_name(zone_t z)
2535 {
2536 zone_security_flags_t zsflags = zone_security_config(z);
2537 if (__probable(zsflags.z_kheap_id < KHEAP_ID_COUNT)) {
2538 return kalloc_heap_names[zsflags.z_kheap_id];
2539 }
2540 return "invalid";
2541 }
2542
2543 static uint32_t
zone_alloc_pages_for_nelems(zone_t z,vm_size_t max_elems)2544 zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
2545 {
2546 vm_size_t elem_count, chunks;
2547
2548 elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) /
2549 zone_elem_outer_size(z);
2550 chunks = (max_elems + elem_count - 1) / elem_count;
2551
2552 return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
2553 }
2554
2555 static inline vm_size_t
zone_submaps_approx_size(void)2556 zone_submaps_approx_size(void)
2557 {
2558 vm_size_t size = 0;
2559
2560 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
2561 if (zone_submaps[idx] != VM_MAP_NULL) {
2562 size += zone_submaps[idx]->size;
2563 }
2564 }
2565
2566 return size;
2567 }
2568
2569 static inline void
zone_depot_init(struct zone_depot * zd)2570 zone_depot_init(struct zone_depot *zd)
2571 {
2572 *zd = (struct zone_depot){
2573 .zd_tail = &zd->zd_head,
2574 };
2575 }
2576
2577 static inline void
zone_depot_insert_head_full(struct zone_depot * zd,zone_magazine_t mag)2578 zone_depot_insert_head_full(struct zone_depot *zd, zone_magazine_t mag)
2579 {
2580 if (zd->zd_full++ == 0) {
2581 zd->zd_tail = &mag->zm_next;
2582 }
2583 mag->zm_next = zd->zd_head;
2584 zd->zd_head = mag;
2585 }
2586
2587 static inline void
zone_depot_insert_tail_full(struct zone_depot * zd,zone_magazine_t mag)2588 zone_depot_insert_tail_full(struct zone_depot *zd, zone_magazine_t mag)
2589 {
2590 zd->zd_full++;
2591 mag->zm_next = *zd->zd_tail;
2592 *zd->zd_tail = mag;
2593 zd->zd_tail = &mag->zm_next;
2594 }
2595
2596 static inline void
zone_depot_insert_head_empty(struct zone_depot * zd,zone_magazine_t mag)2597 zone_depot_insert_head_empty(struct zone_depot *zd, zone_magazine_t mag)
2598 {
2599 zd->zd_empty++;
2600 mag->zm_next = *zd->zd_tail;
2601 *zd->zd_tail = mag;
2602 }
2603
2604 static inline zone_magazine_t
zone_depot_pop_head_full(struct zone_depot * zd,zone_t z)2605 zone_depot_pop_head_full(struct zone_depot *zd, zone_t z)
2606 {
2607 zone_magazine_t mag = zd->zd_head;
2608
2609 assert(zd->zd_full);
2610
2611 zd->zd_full--;
2612 if (z && z->z_recirc_full_min > zd->zd_full) {
2613 z->z_recirc_full_min = zd->zd_full;
2614 }
2615 zd->zd_head = mag->zm_next;
2616 if (zd->zd_full == 0) {
2617 zd->zd_tail = &zd->zd_head;
2618 }
2619
2620 mag->zm_next = NULL;
2621 return mag;
2622 }
2623
2624 static inline zone_magazine_t
zone_depot_pop_head_empty(struct zone_depot * zd,zone_t z)2625 zone_depot_pop_head_empty(struct zone_depot *zd, zone_t z)
2626 {
2627 zone_magazine_t mag = *zd->zd_tail;
2628
2629 assert(zd->zd_empty);
2630
2631 zd->zd_empty--;
2632 if (z && z->z_recirc_empty_min > zd->zd_empty) {
2633 z->z_recirc_empty_min = zd->zd_empty;
2634 }
2635 *zd->zd_tail = mag->zm_next;
2636
2637 mag->zm_next = NULL;
2638 return mag;
2639 }
2640
2641 static inline smr_seq_t
zone_depot_move_full(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2642 zone_depot_move_full(
2643 struct zone_depot *dst,
2644 struct zone_depot *src,
2645 uint32_t n,
2646 zone_t z)
2647 {
2648 zone_magazine_t head, last;
2649
2650 assert(n);
2651 assert(src->zd_full >= n);
2652
2653 src->zd_full -= n;
2654 if (z && z->z_recirc_full_min > src->zd_full) {
2655 z->z_recirc_full_min = src->zd_full;
2656 }
2657 head = last = src->zd_head;
2658 for (uint32_t i = n; i-- > 1;) {
2659 last = last->zm_next;
2660 }
2661
2662 src->zd_head = last->zm_next;
2663 if (src->zd_full == 0) {
2664 src->zd_tail = &src->zd_head;
2665 }
2666
2667 if (z && zone_security_array[zone_index(z)].z_lifo) {
2668 if (dst->zd_full == 0) {
2669 dst->zd_tail = &last->zm_next;
2670 }
2671 last->zm_next = dst->zd_head;
2672 dst->zd_head = head;
2673 } else {
2674 last->zm_next = *dst->zd_tail;
2675 *dst->zd_tail = head;
2676 dst->zd_tail = &last->zm_next;
2677 }
2678 dst->zd_full += n;
2679
2680 return last->zm_seq;
2681 }
2682
2683 static inline void
zone_depot_move_empty(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2684 zone_depot_move_empty(
2685 struct zone_depot *dst,
2686 struct zone_depot *src,
2687 uint32_t n,
2688 zone_t z)
2689 {
2690 zone_magazine_t head, last;
2691
2692 assert(n);
2693 assert(src->zd_empty >= n);
2694
2695 src->zd_empty -= n;
2696 if (z && z->z_recirc_empty_min > src->zd_empty) {
2697 z->z_recirc_empty_min = src->zd_empty;
2698 }
2699 head = last = *src->zd_tail;
2700 for (uint32_t i = n; i-- > 1;) {
2701 last = last->zm_next;
2702 }
2703
2704 *src->zd_tail = last->zm_next;
2705
2706 dst->zd_empty += n;
2707 last->zm_next = *dst->zd_tail;
2708 *dst->zd_tail = head;
2709 }
2710
2711 static inline bool
zone_depot_poll(struct zone_depot * depot,smr_t smr)2712 zone_depot_poll(struct zone_depot *depot, smr_t smr)
2713 {
2714 if (depot->zd_full == 0) {
2715 return false;
2716 }
2717
2718 return smr == NULL || smr_poll(smr, depot->zd_head->zm_seq);
2719 }
2720
2721 static void
zone_cache_swap_magazines(zone_cache_t cache)2722 zone_cache_swap_magazines(zone_cache_t cache)
2723 {
2724 uint16_t count_a = cache->zc_alloc_cur;
2725 uint16_t count_f = cache->zc_free_cur;
2726 vm_offset_t *elems_a = cache->zc_alloc_elems;
2727 vm_offset_t *elems_f = cache->zc_free_elems;
2728
2729 z_debug_assert(count_a <= zc_mag_size());
2730 z_debug_assert(count_f <= zc_mag_size());
2731
2732 cache->zc_alloc_cur = count_f;
2733 cache->zc_free_cur = count_a;
2734 cache->zc_alloc_elems = elems_f;
2735 cache->zc_free_elems = elems_a;
2736 }
2737
2738 __pure2
2739 static smr_t
zone_cache_smr(zone_cache_t cache)2740 zone_cache_smr(zone_cache_t cache)
2741 {
2742 return cache->zc_smr;
2743 }
2744
2745 /*!
2746 * @function zone_magazine_replace
2747 *
2748 * @brief
2749 * Unlod a magazine and load a new one instead.
2750 */
2751 static zone_magazine_t
zone_magazine_replace(zone_cache_t zc,zone_magazine_t mag,bool empty)2752 zone_magazine_replace(zone_cache_t zc, zone_magazine_t mag, bool empty)
2753 {
2754 zone_magazine_t old;
2755 vm_offset_t **elems;
2756
2757 mag->zm_seq = SMR_SEQ_INVALID;
2758
2759 if (empty) {
2760 elems = &zc->zc_free_elems;
2761 zc->zc_free_cur = 0;
2762 } else {
2763 elems = &zc->zc_alloc_elems;
2764 zc->zc_alloc_cur = zc_mag_size();
2765 }
2766 old = (zone_magazine_t)((uintptr_t)*elems -
2767 offsetof(struct zone_magazine, zm_elems));
2768 *elems = mag->zm_elems;
2769
2770 return old;
2771 }
2772
2773 static zone_magazine_t
zone_magazine_alloc(zalloc_flags_t flags)2774 zone_magazine_alloc(zalloc_flags_t flags)
2775 {
2776 return zalloc_flags(zc_magazine_zone, flags | Z_ZERO);
2777 }
2778
2779 static void
zone_magazine_free(zone_magazine_t mag)2780 zone_magazine_free(zone_magazine_t mag)
2781 {
2782 (zfree)(zc_magazine_zone, mag);
2783 }
2784
2785 static void
zone_magazine_free_list(struct zone_depot * zd)2786 zone_magazine_free_list(struct zone_depot *zd)
2787 {
2788 zone_magazine_t tmp, mag = *zd->zd_tail;
2789
2790 while (mag) {
2791 tmp = mag->zm_next;
2792 zone_magazine_free(mag);
2793 mag = tmp;
2794 }
2795
2796 *zd->zd_tail = NULL;
2797 zd->zd_empty = 0;
2798 }
2799
2800 void
zone_enable_caching(zone_t zone)2801 zone_enable_caching(zone_t zone)
2802 {
2803 size_t size_per_mag = zone_elem_inner_size(zone) * zc_mag_size();
2804 zone_cache_t caches;
2805 size_t depot_limit;
2806
2807 depot_limit = zc_pcpu_max() / size_per_mag;
2808 zone->z_depot_limit = (uint16_t)MIN(depot_limit, INT16_MAX);
2809
2810 caches = zalloc_percpu_permanent_type(struct zone_cache);
2811 zpercpu_foreach(zc, caches) {
2812 zc->zc_alloc_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2813 zc->zc_free_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2814 zone_depot_init(&zc->zc_depot);
2815 hw_lck_ticket_init(&zc->zc_depot_lock, &zone_locks_grp);
2816 }
2817
2818 zone_lock(zone);
2819 assert(zone->z_pcpu_cache == NULL);
2820 zone->z_pcpu_cache = caches;
2821 zone->z_recirc_cont_cur = 0;
2822 zone->z_recirc_cont_wma = 0;
2823 zone->z_elems_free_min = 0; /* becomes z_recirc_empty_min */
2824 zone->z_elems_free_wma = 0; /* becomes z_recirc_empty_wma */
2825 zone_unlock(zone);
2826 }
2827
2828 bool
zone_maps_owned(vm_address_t addr,vm_size_t size)2829 zone_maps_owned(vm_address_t addr, vm_size_t size)
2830 {
2831 return from_zone_map(addr, size);
2832 }
2833
2834 #if KASAN_LIGHT
2835 bool
kasan_zone_maps_owned(vm_address_t addr,vm_size_t size)2836 kasan_zone_maps_owned(vm_address_t addr, vm_size_t size)
2837 {
2838 return from_zone_map(addr, size) ||
2839 mach_vm_range_size(&zone_info.zi_map_range) == 0;
2840 }
2841 #endif /* KASAN_LIGHT */
2842
2843 void
zone_map_sizes(vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)2844 zone_map_sizes(
2845 vm_map_size_t *psize,
2846 vm_map_size_t *pfree,
2847 vm_map_size_t *plargest_free)
2848 {
2849 vm_map_size_t size, free, largest;
2850
2851 vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
2852
2853 for (uint32_t i = 1; i < Z_SUBMAP_IDX_COUNT; i++) {
2854 vm_map_sizes(zone_submaps[i], &size, &free, &largest);
2855 *psize += size;
2856 *pfree += free;
2857 *plargest_free = MAX(*plargest_free, largest);
2858 }
2859 }
2860
2861 __attribute__((always_inline))
2862 vm_map_t
zone_submap(zone_security_flags_t zsflags)2863 zone_submap(zone_security_flags_t zsflags)
2864 {
2865 return zone_submaps[zsflags.z_submap_idx];
2866 }
2867
2868 unsigned
zpercpu_count(void)2869 zpercpu_count(void)
2870 {
2871 return zpercpu_early_count;
2872 }
2873
2874 #if ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC
2875 /*
2876 * Returns a random number of a given bit-width.
2877 *
2878 * DO NOT COPY THIS CODE OUTSIDE OF ZALLOC
2879 *
2880 * This uses Intel's rdrand because random() uses FP registers
2881 * which causes FP faults and allocations which isn't something
2882 * we can do from zalloc itself due to reentrancy problems.
2883 *
2884 * For pre-rdrand machines (which we no longer support),
2885 * we use a bad biased random generator that doesn't use FP.
2886 * Such HW is no longer supported, but VM of newer OSes on older
2887 * bare metal is made to limp along (with reduced security) this way.
2888 */
2889 static uint64_t
zalloc_random_mask64(uint32_t bits)2890 zalloc_random_mask64(uint32_t bits)
2891 {
2892 uint64_t mask = ~0ull >> (64 - bits);
2893 uint64_t v;
2894
2895 #if __x86_64__
2896 if (__probable(cpuid_features() & CPUID_FEATURE_RDRAND)) {
2897 asm volatile ("1: rdrand %0; jnc 1b\n" : "=r" (v) :: "cc");
2898 v &= mask;
2899 } else {
2900 disable_preemption();
2901 int cpu = cpu_number();
2902 v = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
2903 zone_bool_gen[cpu].zbg_entropy,
2904 ZONE_ENTROPY_CNT, bits);
2905 enable_preemption();
2906 }
2907 #else
2908 v = early_random() & mask;
2909 #endif
2910
2911 return v;
2912 }
2913
2914 /*
2915 * Returns a random number within [bound_min, bound_max)
2916 *
2917 * This isn't _exactly_ uniform, but the skew is small enough
2918 * not to matter for the consumers of this interface.
2919 *
2920 * Values within [bound_min, 2^64 % (bound_max - bound_min))
2921 * will be returned (bound_max - bound_min) / 2^64 more often
2922 * than values within [2^64 % (bound_max - bound_min), bound_max).
2923 */
2924 static uint32_t
zalloc_random_uniform32(uint32_t bound_min,uint32_t bound_max)2925 zalloc_random_uniform32(uint32_t bound_min, uint32_t bound_max)
2926 {
2927 uint64_t delta = bound_max - bound_min;
2928
2929 return bound_min + (uint32_t)(zalloc_random_mask64(64) % delta);
2930 }
2931
2932 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC */
2933 #if ZALLOC_ENABLE_LOGGING || CONFIG_PROB_GZALLOC
2934 /*
2935 * Track all kalloc zones of specified size for zlog name
2936 * kalloc.type.<size> or kalloc.type.var.<size> or kalloc.<size>
2937 *
2938 * Additionally track all early kalloc zones with early.kalloc
2939 */
2940 static bool
track_kalloc_zones(zone_t z,const char * logname)2941 track_kalloc_zones(zone_t z, const char *logname)
2942 {
2943 const char *prefix;
2944 size_t len;
2945 zone_security_flags_t zsflags = zone_security_config(z);
2946
2947 prefix = "kalloc.type.var.";
2948 len = strlen(prefix);
2949 if (zsflags.z_kalloc_type && zsflags.z_kheap_id == KHEAP_ID_KT_VAR &&
2950 strncmp(logname, prefix, len) == 0) {
2951 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2952
2953 return zone_elem_inner_size(z) == sizeclass;
2954 }
2955
2956 prefix = "kalloc.type.";
2957 len = strlen(prefix);
2958 if (zsflags.z_kalloc_type && zsflags.z_kheap_id != KHEAP_ID_KT_VAR &&
2959 strncmp(logname, prefix, len) == 0) {
2960 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2961
2962 return zone_elem_inner_size(z) == sizeclass;
2963 }
2964
2965 prefix = "kalloc.";
2966 len = strlen(prefix);
2967 if ((zsflags.z_kheap_id || zsflags.z_kalloc_type) &&
2968 strncmp(logname, prefix, len) == 0) {
2969 vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2970
2971 return zone_elem_inner_size(z) == sizeclass;
2972 }
2973
2974 prefix = "early.kalloc";
2975 if ((zsflags.z_kheap_id == KHEAP_ID_EARLY) &&
2976 (strcmp(logname, prefix) == 0)) {
2977 return true;
2978 }
2979
2980 return false;
2981 }
2982 #endif
2983
2984 int
track_this_zone(const char * zonename,const char * logname)2985 track_this_zone(const char *zonename, const char *logname)
2986 {
2987 unsigned int len;
2988 const char *zc = zonename;
2989 const char *lc = logname;
2990
2991 /*
2992 * Compare the strings. We bound the compare by MAX_ZONE_NAME.
2993 */
2994
2995 for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
2996 /*
2997 * If the current characters don't match, check for a space in
2998 * in the zone name and a corresponding period in the log name.
2999 * If that's not there, then the strings don't match.
3000 */
3001
3002 if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
3003 break;
3004 }
3005
3006 /*
3007 * The strings are equal so far. If we're at the end, then it's a match.
3008 */
3009
3010 if (*zc == '\0') {
3011 return TRUE;
3012 }
3013 }
3014
3015 return FALSE;
3016 }
3017
3018 #if DEBUG || DEVELOPMENT
3019
3020 vm_size_t
zone_element_info(void * addr,vm_tag_t * ptag)3021 zone_element_info(void *addr, vm_tag_t * ptag)
3022 {
3023 vm_size_t size = 0;
3024 vm_tag_t tag = VM_KERN_MEMORY_NONE;
3025 struct zone *src_zone;
3026
3027 if (from_zone_map(addr, sizeof(void *))) {
3028 src_zone = zone_by_id(zone_index_from_ptr(addr));
3029 size = zone_elem_inner_size(src_zone);
3030 #if VM_TAG_SIZECLASSES
3031 if (__improbable(src_zone->z_uses_tags)) {
3032 struct zone_page_metadata *meta;
3033 vm_offset_t eidx;
3034 vm_tag_t *slot;
3035
3036 meta = zone_element_resolve(src_zone,
3037 (vm_offset_t)addr, &eidx);
3038 slot = zba_extra_ref_ptr(meta->zm_bitmap, eidx);
3039 tag = *slot;
3040 }
3041 #endif /* VM_TAG_SIZECLASSES */
3042 }
3043
3044 *ptag = tag;
3045 return size;
3046 }
3047
3048 #endif /* DEBUG || DEVELOPMENT */
3049 #if KASAN_CLASSIC
3050
3051 vm_size_t
kasan_quarantine_resolve(vm_address_t addr,zone_t * zonep)3052 kasan_quarantine_resolve(vm_address_t addr, zone_t *zonep)
3053 {
3054 zone_t zone = zone_by_id(zone_index_from_ptr((void *)addr));
3055
3056 *zonep = zone;
3057 return zone_elem_inner_size(zone);
3058 }
3059
3060 #endif /* KASAN_CLASSIC */
3061 #endif /* !ZALLOC_TEST */
3062 #pragma mark Zone zeroing and early random
3063 #if !ZALLOC_TEST
3064
3065 /*
3066 * Zone zeroing
3067 *
3068 * All allocations from zones are zeroed on free and are additionally
3069 * check that they are still zero on alloc. The check is
3070 * always on, on embedded devices. Perf regression was detected
3071 * on intel as we cant use the vectorized implementation of
3072 * memcmp_zero_ptr_aligned due to cyclic dependenices between
3073 * initization and allocation. Therefore we perform the check
3074 * on 20% of the allocations.
3075 */
3076 #if ZALLOC_ENABLE_ZERO_CHECK
3077 #if defined(__x86_64__)
3078 /*
3079 * Peform zero validation on every 5th allocation
3080 */
3081 static TUNABLE(uint32_t, zzc_rate, "zzc_rate", 5);
3082 static uint32_t PERCPU_DATA(zzc_decrementer);
3083 #endif /* defined(__x86_64__) */
3084
3085 /*
3086 * Determine if zero validation for allocation should be skipped
3087 */
3088 static bool
zalloc_skip_zero_check(void)3089 zalloc_skip_zero_check(void)
3090 {
3091 #if defined(__x86_64__)
3092 uint32_t *counterp, cnt;
3093
3094 counterp = PERCPU_GET(zzc_decrementer);
3095 cnt = *counterp;
3096 if (__probable(cnt > 0)) {
3097 *counterp = cnt - 1;
3098 return true;
3099 }
3100 *counterp = zzc_rate - 1;
3101 #endif /* !defined(__x86_64__) */
3102 return false;
3103 }
3104
3105 __abortlike
3106 static void
zalloc_uaf_panic(zone_t z,uintptr_t elem,size_t size)3107 zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size)
3108 {
3109 uint32_t esize = (uint32_t)zone_elem_inner_size(z);
3110 uint32_t first_offs = ~0u;
3111 uintptr_t first_bits = 0, v;
3112 char buf[1024];
3113 int pos = 0;
3114
3115 buf[0] = '\0';
3116
3117 for (uint32_t o = 0; o < size; o += sizeof(v)) {
3118 if ((v = *(uintptr_t *)(elem + o)) == 0) {
3119 continue;
3120 }
3121 pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
3122 "%5d: 0x%016lx", o, v);
3123 if (first_offs > o) {
3124 first_offs = o;
3125 first_bits = v;
3126 }
3127 }
3128
3129 (panic)("[%s%s]: element modified after free "
3130 "(off:%d, val:0x%016lx, sz:%d, ptr:%p)%s",
3131 zone_heap_name(z), zone_name(z),
3132 first_offs, first_bits, esize, (void *)elem, buf);
3133 }
3134
3135 static void
zalloc_validate_element(zone_t zone,vm_offset_t elem,vm_size_t size,zalloc_flags_t flags)3136 zalloc_validate_element(
3137 zone_t zone,
3138 vm_offset_t elem,
3139 vm_size_t size,
3140 zalloc_flags_t flags)
3141 {
3142 if (flags & Z_NOZZC) {
3143 return;
3144 }
3145 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3146 zalloc_uaf_panic(zone, elem, size);
3147 }
3148 if (flags & Z_PCPU) {
3149 for (size_t i = zpercpu_count(); --i > 0;) {
3150 elem += PAGE_SIZE;
3151 if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3152 zalloc_uaf_panic(zone, elem, size);
3153 }
3154 }
3155 }
3156 }
3157
3158 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
3159
3160 __attribute__((noinline))
3161 static void
zone_early_scramble_rr(zone_t zone,int cpu,zone_stats_t zs)3162 zone_early_scramble_rr(zone_t zone, int cpu, zone_stats_t zs)
3163 {
3164 #if KASAN_FAKESTACK
3165 /*
3166 * This can cause re-entrancy with kasan fakestacks
3167 */
3168 #pragma unused(zone, cpu, zs)
3169 #else
3170 uint32_t bits;
3171
3172 bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3173 zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
3174
3175 zs->zs_alloc_rr += bits;
3176 zs->zs_alloc_rr %= zone->z_chunk_elems;
3177 #endif
3178 }
3179
3180 #endif /* !ZALLOC_TEST */
3181 #pragma mark Zone Leak Detection
3182 #if !ZALLOC_TEST
3183 #if ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS
3184
3185 /*
3186 * Zone leak debugging code
3187 *
3188 * When enabled, this code keeps a log to track allocations to a particular
3189 * zone that have not yet been freed.
3190 *
3191 * Examining this log will reveal the source of a zone leak.
3192 *
3193 * The log is allocated only when logging is enabled (it is off by default),
3194 * so there is no effect on the system when it's turned off.
3195 *
3196 * Zone logging is enabled with the `zlog<n>=<zone>` boot-arg for each
3197 * zone name to log, with n starting at 1.
3198 *
3199 * Leaks debugging utilizes 2 tunables:
3200 * - zlsize (in kB) which describes how much "size" the record covers
3201 * (zones with smaller elements get more records, default is 4M).
3202 *
3203 * - zlfreq (in bytes) which describes a sample rate in cumulative allocation
3204 * size at which automatic leak detection will sample allocations.
3205 * (default is 8k)
3206 *
3207 *
3208 * Zone corruption logging
3209 *
3210 * Logging can also be used to help identify the source of a zone corruption.
3211 *
3212 * First, identify the zone that is being corrupted,
3213 * then add "-zc zlog<n>=<zone name>" to the boot-args.
3214 *
3215 * When -zc is used in conjunction with zlog,
3216 * it changes the logging style to track both allocations and frees to the zone.
3217 *
3218 * When the corruption is detected, examining the log will show you the stack
3219 * traces of the callers who last allocated and freed any particular element in
3220 * the zone.
3221 *
3222 * Corruption debugging logs will have zrecs records
3223 * (tuned by the zrecs= boot-arg, 16k elements per G of RAM by default).
3224 */
3225
3226 #define ZRECORDS_MAX (256u << 10)
3227 #define ZRECORDS_DEFAULT (16u << 10)
3228 static TUNABLE(uint32_t, zrecs, "zrecs", 0);
3229 static TUNABLE(uint32_t, zlsize, "zlsize", 4 * 1024);
3230 static TUNABLE(uint32_t, zlfreq, "zlfreq", 8 * 1024);
3231
3232 __startup_func
3233 static void
zone_leaks_init_zrecs(void)3234 zone_leaks_init_zrecs(void)
3235 {
3236 /*
3237 * Don't allow more than ZRECORDS_MAX records,
3238 * even if the user asked for more.
3239 *
3240 * This prevents accidentally hogging too much kernel memory
3241 * and making the system unusable.
3242 */
3243 if (zrecs == 0) {
3244 zrecs = ZRECORDS_DEFAULT *
3245 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3246 }
3247 if (zrecs > ZRECORDS_MAX) {
3248 zrecs = ZRECORDS_MAX;
3249 }
3250 }
3251 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_leaks_init_zrecs);
3252
3253 static uint32_t
zone_leaks_record_count(zone_t z)3254 zone_leaks_record_count(zone_t z)
3255 {
3256 uint32_t recs = (zlsize << 10) / zone_elem_inner_size(z);
3257
3258 return MIN(MAX(recs, ZRECORDS_DEFAULT), ZRECORDS_MAX);
3259 }
3260
3261 static uint32_t
zone_leaks_sample_rate(zone_t z)3262 zone_leaks_sample_rate(zone_t z)
3263 {
3264 return zlfreq / zone_elem_inner_size(z);
3265 }
3266
3267 #if ZALLOC_ENABLE_LOGGING
3268 /* Log allocations and frees to help debug a zone element corruption */
3269 static TUNABLE(bool, corruption_debug_flag, "-zc", false);
3270
3271 /*
3272 * A maximum of 10 zlog<n> boot args can be provided (zlog1 -> zlog10)
3273 */
3274 #define MAX_ZONES_LOG_REQUESTS 10
3275
3276 /**
3277 * @function zone_setup_logging
3278 *
3279 * @abstract
3280 * Optionally sets up a zone for logging.
3281 *
3282 * @discussion
3283 * We recognized two boot-args:
3284 *
3285 * zlog=<zone_to_log>
3286 * zrecs=<num_records_in_log>
3287 * zlsize=<memory to cover for leaks>
3288 *
3289 * The zlog arg is used to specify the zone name that should be logged,
3290 * and zrecs/zlsize is used to control the size of the log.
3291 */
3292 static void
zone_setup_logging(zone_t z)3293 zone_setup_logging(zone_t z)
3294 {
3295 char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
3296 char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
3297 char zlog_val[MAX_ZONE_NAME]; /* the zone name we're logging, if any */
3298 bool logging_on = false;
3299
3300 /*
3301 * Append kalloc heap name to zone name (if zone is used by kalloc)
3302 */
3303 snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
3304
3305 /* zlog0 isn't allowed. */
3306 for (int i = 1; i <= MAX_ZONES_LOG_REQUESTS; i++) {
3307 snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
3308
3309 if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val))) {
3310 if (track_this_zone(zone_name, zlog_val) ||
3311 track_kalloc_zones(z, zlog_val)) {
3312 logging_on = true;
3313 break;
3314 }
3315 }
3316 }
3317
3318 /*
3319 * Backwards compat. with the old boot-arg used to specify single zone
3320 * logging i.e. zlog Needs to happen after the newer zlogn checks
3321 * because the prefix will match all the zlogn
3322 * boot-args.
3323 */
3324 if (!logging_on &&
3325 PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val))) {
3326 if (track_this_zone(zone_name, zlog_val) ||
3327 track_kalloc_zones(z, zlog_val)) {
3328 logging_on = true;
3329 }
3330 }
3331
3332 /*
3333 * If we want to log a zone, see if we need to allocate buffer space for
3334 * the log.
3335 *
3336 * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
3337 * we have to defer allocation in that case.
3338 *
3339 * zone_init() will finish the job.
3340 *
3341 * If we want to log one of the VM related zones that's set up early on,
3342 * we will skip allocation of the log until zinit is called again later
3343 * on some other zone.
3344 */
3345 if (logging_on) {
3346 if (corruption_debug_flag) {
3347 z->z_btlog = btlog_create(BTLOG_LOG, zrecs, 0);
3348 } else {
3349 z->z_btlog = btlog_create(BTLOG_HASH,
3350 zone_leaks_record_count(z), 0);
3351 }
3352 if (z->z_btlog) {
3353 z->z_log_on = true;
3354 printf("zone[%s%s]: logging enabled\n",
3355 zone_heap_name(z), z->z_name);
3356 } else {
3357 printf("zone[%s%s]: failed to enable logging\n",
3358 zone_heap_name(z), z->z_name);
3359 }
3360 }
3361 }
3362
3363 #endif /* ZALLOC_ENABLE_LOGGING */
3364 #if KASAN_TBI
3365 static TUNABLE(uint32_t, kasan_zrecs, "kasan_zrecs", 0);
3366
3367 __startup_func
3368 static void
kasan_tbi_init_zrecs(void)3369 kasan_tbi_init_zrecs(void)
3370 {
3371 /*
3372 * Don't allow more than ZRECORDS_MAX records,
3373 * even if the user asked for more.
3374 *
3375 * This prevents accidentally hogging too much kernel memory
3376 * and making the system unusable.
3377 */
3378 if (kasan_zrecs == 0) {
3379 kasan_zrecs = ZRECORDS_DEFAULT *
3380 (uint32_t)((max_mem + (1ul << 30)) >> 30);
3381 }
3382 if (kasan_zrecs > ZRECORDS_MAX) {
3383 kasan_zrecs = ZRECORDS_MAX;
3384 }
3385 }
3386 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, kasan_tbi_init_zrecs);
3387
3388 static void
zone_setup_kasan_logging(zone_t z)3389 zone_setup_kasan_logging(zone_t z)
3390 {
3391 if (!z->z_tbi_tag) {
3392 printf("zone[%s%s]: kasan logging disabled for this zone\n",
3393 zone_heap_name(z), z->z_name);
3394 return;
3395 }
3396
3397 z->z_log_on = true;
3398 z->z_btlog = btlog_create(BTLOG_LOG, kasan_zrecs, 0);
3399 if (!z->z_btlog) {
3400 printf("zone[%s%s]: failed to enable kasan logging\n",
3401 zone_heap_name(z), z->z_name);
3402 }
3403 }
3404
3405 #endif /* KASAN_TBI */
3406 #if CONFIG_ZLEAKS
3407
3408 static thread_call_data_t zone_leaks_callout;
3409
3410 /*
3411 * The zone leak detector, abbreviated 'zleak', keeps track
3412 * of a subset of the currently outstanding allocations
3413 * made by the zone allocator.
3414 *
3415 * Zones who use more than zleak_pages_per_zone_wired_threshold
3416 * pages will get a BTLOG_HASH btlog with sampling to minimize
3417 * perf impact, yet receive statistical data about the backtrace
3418 * that is the most likely to cause the leak.
3419 *
3420 * If the zone goes under the threshold enough, then the log
3421 * is disabled and backtraces freed. Data can be collected
3422 * from userspace with the zlog(1) command.
3423 */
3424
3425 uint32_t zleak_active;
3426 SECURITY_READ_ONLY_LATE(vm_size_t) zleak_max_zonemap_size;
3427
3428 /* Size a zone will have before we will collect data on it */
3429 static size_t zleak_pages_per_zone_wired_threshold = ~0;
3430 vm_size_t zleak_per_zone_tracking_threshold = ~0;
3431
3432 static inline bool
zleak_should_enable_for_zone(zone_t z)3433 zleak_should_enable_for_zone(zone_t z)
3434 {
3435 if (z->z_log_on) {
3436 return false;
3437 }
3438 if (z->z_btlog) {
3439 return false;
3440 }
3441 if (z->z_exhausts) {
3442 return false;
3443 }
3444 if (zone_exhaustible(z)) {
3445 return z->z_wired_cur * 8 >= z->z_wired_max * 7;
3446 }
3447 return z->z_wired_cur >= zleak_pages_per_zone_wired_threshold;
3448 }
3449
3450 static inline bool
zleak_should_disable_for_zone(zone_t z)3451 zleak_should_disable_for_zone(zone_t z)
3452 {
3453 if (z->z_log_on) {
3454 return false;
3455 }
3456 if (!z->z_btlog) {
3457 return false;
3458 }
3459 if (zone_exhaustible(z)) {
3460 return z->z_wired_cur * 8 < z->z_wired_max * 7;
3461 }
3462 return z->z_wired_cur < zleak_pages_per_zone_wired_threshold / 2;
3463 }
3464
3465 static void
zleaks_enable_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)3466 zleaks_enable_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
3467 {
3468 btlog_t log;
3469
3470 zone_foreach(z) {
3471 if (zleak_should_disable_for_zone(z)) {
3472 log = z->z_btlog;
3473 z->z_btlog = NULL;
3474 assert(z->z_btlog_disabled == NULL);
3475 btlog_disable(log);
3476 z->z_btlog_disabled = log;
3477 os_atomic_dec(&zleak_active, relaxed);
3478 }
3479
3480 if (zleak_should_enable_for_zone(z)) {
3481 log = z->z_btlog_disabled;
3482 if (log == NULL) {
3483 log = btlog_create(BTLOG_HASH,
3484 zone_leaks_record_count(z),
3485 zone_leaks_sample_rate(z));
3486 } else if (btlog_enable(log) == KERN_SUCCESS) {
3487 z->z_btlog_disabled = NULL;
3488 } else {
3489 log = NULL;
3490 }
3491 os_atomic_store(&z->z_btlog, log, release);
3492 os_atomic_inc(&zleak_active, relaxed);
3493 }
3494 }
3495 }
3496
3497 __startup_func
3498 static void
zleak_init(void)3499 zleak_init(void)
3500 {
3501 zleak_max_zonemap_size = ptoa(zone_pages_wired_max);
3502
3503 zleak_update_threshold(&zleak_per_zone_tracking_threshold,
3504 zleak_max_zonemap_size / 8);
3505
3506 thread_call_setup_with_options(&zone_leaks_callout,
3507 zleaks_enable_async, NULL, THREAD_CALL_PRIORITY_USER,
3508 THREAD_CALL_OPTIONS_ONCE);
3509 }
3510 STARTUP(ZALLOC, STARTUP_RANK_SECOND, zleak_init);
3511
3512 kern_return_t
zleak_update_threshold(vm_size_t * arg,uint64_t value)3513 zleak_update_threshold(vm_size_t *arg, uint64_t value)
3514 {
3515 if (value >= zleak_max_zonemap_size) {
3516 return KERN_INVALID_VALUE;
3517 }
3518
3519 if (arg == &zleak_per_zone_tracking_threshold) {
3520 zleak_per_zone_tracking_threshold = (vm_size_t)value;
3521 zleak_pages_per_zone_wired_threshold = atop(value);
3522 if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3523 thread_call_enter(&zone_leaks_callout);
3524 }
3525 return KERN_SUCCESS;
3526 }
3527
3528 return KERN_INVALID_ARGUMENT;
3529 }
3530
3531 static void
panic_display_zleaks(bool has_syms)3532 panic_display_zleaks(bool has_syms)
3533 {
3534 bool did_header = false;
3535 vm_address_t bt[BTLOG_MAX_DEPTH];
3536 uint32_t len, count;
3537
3538 zone_foreach(z) {
3539 btlog_t log = z->z_btlog;
3540
3541 if (log == NULL || btlog_get_type(log) != BTLOG_HASH) {
3542 continue;
3543 }
3544
3545 count = btlog_guess_top(log, bt, &len);
3546 if (count == 0) {
3547 continue;
3548 }
3549
3550 if (!did_header) {
3551 paniclog_append_noflush("Zone (suspected) leak report:\n");
3552 did_header = true;
3553 }
3554
3555 paniclog_append_noflush(" Zone: %s%s\n",
3556 zone_heap_name(z), zone_name(z));
3557 paniclog_append_noflush(" Count: %d (%ld bytes)\n", count,
3558 (long)count * zone_scale_for_percpu(z, zone_elem_inner_size(z)));
3559 paniclog_append_noflush(" Size: %ld\n",
3560 (long)zone_size_wired(z));
3561 paniclog_append_noflush(" Top backtrace:\n");
3562 for (uint32_t i = 0; i < len; i++) {
3563 if (has_syms) {
3564 paniclog_append_noflush(" %p ", (void *)bt[i]);
3565 panic_print_symbol_name(bt[i]);
3566 paniclog_append_noflush("\n");
3567 } else {
3568 paniclog_append_noflush(" %p\n", (void *)bt[i]);
3569 }
3570 }
3571
3572 kmod_panic_dump(bt, len);
3573 paniclog_append_noflush("\n");
3574 }
3575 }
3576 #endif /* CONFIG_ZLEAKS */
3577
3578 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
3579 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
3580
3581 #if !KASAN_TBI
3582 __cold
3583 #endif
3584 static void
zalloc_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3585 zalloc_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3586 {
3587 btlog_t log = zone->z_btlog;
3588 btref_get_flags_t flags = 0;
3589 btref_t ref;
3590
3591 #if !KASAN_TBI
3592 if (!log || !btlog_sample(log)) {
3593 return;
3594 }
3595 #endif
3596 if (get_preemption_level() || zone_supports_vm(zone)) {
3597 /*
3598 * VM zones can be used by btlog, avoid reentrancy issues.
3599 */
3600 flags = BTREF_GET_NOWAIT;
3601 }
3602
3603 ref = btref_get(fp, flags);
3604 while (count-- > 0) {
3605 if (count) {
3606 btref_retain(ref);
3607 }
3608 addr = (vm_offset_t)zstack_tbi_fix(addr);
3609 btlog_record(log, (void *)addr, ZOP_ALLOC, ref);
3610 addr += *(vm_offset_t *)addr;
3611 }
3612 }
3613
3614 #define ZALLOC_LOG(zone, addr, count) ({ \
3615 if ((zone)->z_btlog) { \
3616 zalloc_log(zone, addr, count, __builtin_frame_address(0)); \
3617 } \
3618 })
3619
3620 #if !KASAN_TBI
3621 __cold
3622 #endif
3623 static void
zfree_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3624 zfree_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3625 {
3626 btlog_t log = zone->z_btlog;
3627 btref_get_flags_t flags = 0;
3628 btref_t ref;
3629
3630 #if !KASAN_TBI
3631 if (!log) {
3632 return;
3633 }
3634 #endif
3635
3636 /*
3637 * See if we're doing logging on this zone.
3638 *
3639 * There are two styles of logging used depending on
3640 * whether we're trying to catch a leak or corruption.
3641 */
3642 #if !KASAN_TBI
3643 if (btlog_get_type(log) == BTLOG_HASH) {
3644 /*
3645 * We're logging to catch a leak.
3646 *
3647 * Remove any record we might have for this element
3648 * since it's being freed. Note that we may not find it
3649 * if the buffer overflowed and that's OK.
3650 *
3651 * Since the log is of a limited size, old records get
3652 * overwritten if there are more zallocs than zfrees.
3653 */
3654 while (count-- > 0) {
3655 addr = (vm_offset_t)zstack_tbi_fix(addr);
3656 btlog_erase(log, (void *)addr);
3657 addr += *(vm_offset_t *)addr;
3658 }
3659 return;
3660 }
3661 #endif /* !KASAN_TBI */
3662
3663 if (get_preemption_level() || zone_supports_vm(zone)) {
3664 /*
3665 * VM zones can be used by btlog, avoid reentrancy issues.
3666 */
3667 flags = BTREF_GET_NOWAIT;
3668 }
3669
3670 ref = btref_get(fp, flags);
3671 while (count-- > 0) {
3672 if (count) {
3673 btref_retain(ref);
3674 }
3675 addr = (vm_offset_t)zstack_tbi_fix(addr);
3676 btlog_record(log, (void *)addr, ZOP_FREE, ref);
3677 addr += *(vm_offset_t *)addr;
3678 }
3679 }
3680
3681 #define ZFREE_LOG(zone, addr, count) ({ \
3682 if ((zone)->z_btlog) { \
3683 zfree_log(zone, addr, count, __builtin_frame_address(0)); \
3684 } \
3685 })
3686
3687 #else
3688 #define ZALLOC_LOG(...) ((void)0)
3689 #define ZFREE_LOG(...) ((void)0)
3690 #endif /* ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI */
3691 #endif /* !ZALLOC_TEST */
3692 #pragma mark zone (re)fill
3693 #if !ZALLOC_TEST
3694
3695 /*!
3696 * @defgroup Zone Refill
3697 * @{
3698 *
3699 * @brief
3700 * Functions handling The zone refill machinery.
3701 *
3702 * @discussion
3703 * Zones are refilled based on 2 mechanisms: direct expansion, async expansion.
3704 *
3705 * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
3706 * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
3707 *
3708 * - call @c zone_expand_locked() directly if the caller is allowed to block,
3709 *
3710 * - wakeup the asynchroous expansion thread call if the caller is not allowed
3711 * to block, or if the reserve becomes depleted.
3712 *
3713 *
3714 * <h2>Synchronous expansion</h2>
3715 *
3716 * This mechanism is actually the only one that may refill a zone, and all the
3717 * other ones funnel through this one eventually.
3718 *
3719 * @c zone_expand_locked() implements the core of the expansion mechanism,
3720 * and will do so while a caller specified predicate is true.
3721 *
3722 * Zone expansion allows for up to 2 threads to concurrently refill the zone:
3723 * - one VM privileged thread,
3724 * - one regular thread.
3725 *
3726 * Regular threads that refill will put down their identity in @c z_expander,
3727 * so that priority inversion avoidance can be implemented.
3728 *
3729 * However, VM privileged threads are allowed to use VM page reserves,
3730 * which allows for the system to recover from extreme memory pressure
3731 * situations, allowing for the few allocations that @c zone_gc() or
3732 * killing processes require.
3733 *
3734 * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
3735 * is set. @c z_expander is not necessarily the identity of this VM privileged
3736 * thread (it is if the VM privileged thread came in first, but wouldn't be, and
3737 * could even be @c THREAD_NULL otherwise).
3738 *
3739 * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
3740 * spending a whole pointer on priority inheritance for VM privileged threads
3741 * (and other issues related to having two owners), we use the rwlock boost as
3742 * a stop gap to avoid priority inversions.
3743 *
3744 *
3745 * <h2>Chunk wiring policies</h2>
3746 *
3747 * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
3748 * to try to minimize fragmentation relative to element sizes not aligning with
3749 * a chunk size well. However, this can grow large and be hard to fulfill on
3750 * a system under a lot of memory pressure (chunks can be as long as 8 pages on
3751 * 4k page systems).
3752 *
3753 * This is why, when under memory pressure the system allows chunks to be
3754 * partially populated. The metadata of the first page in the chunk maintains
3755 * the count of actually populated pages.
3756 *
3757 * The metadata for addresses assigned to a zone are found of 4 queues:
3758 * - @c z_pageq_empty has chunk heads with populated pages and no allocated
3759 * elements (those can be targeted by @c zone_gc()),
3760 * - @c z_pageq_partial has chunk heads with populated pages that are partially
3761 * used,
3762 * - @c z_pageq_full has chunk heads with populated pages with no free elements
3763 * left,
3764 * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
3765 * the zone forever, or the first secondary metadata for a chunk whose
3766 * corresponding page is not populated in the chunk.
3767 *
3768 * When new pages need to be wired/populated, chunks from the @c z_pageq_va
3769 * queues are preferred.
3770 *
3771 *
3772 * <h2>Asynchronous expansion</h2>
3773 *
3774 * This mechanism allows for refilling zones used mostly with non blocking
3775 * callers. It relies on a thread call (@c zone_expand_callout) which will
3776 * iterate all zones and refill the ones marked with @c z_async_refilling.
3777 *
3778 * NOTE: If the calling thread for zalloc_noblock is lower priority than
3779 * the thread_call, then zalloc_noblock to an empty zone may succeed.
3780 *
3781 *
3782 * <h2>Dealing with zone allocations from the mach VM code</h2>
3783 *
3784 * The implementation of the mach VM itself uses the zone allocator
3785 * for things like the vm_map_entry data structure. In order to prevent
3786 * a recursion problem when adding more pages to a zone, the VM zones
3787 * use the Z_SUBMAP_IDX_VM submap which doesn't use kmem_alloc()
3788 * or any VM map functions to allocate.
3789 *
3790 * Instead, a really simple coalescing first-fit allocator is used
3791 * for this submap, and no one else than zalloc can allocate from it.
3792 *
3793 * Memory is directly populated which doesn't require allocation of
3794 * VM map entries, and avoids recursion. The cost of this scheme however,
3795 * is that `vm_map_lookup_entry` will not function on those addresses
3796 * (nor any API relying on it).
3797 */
3798
3799 static void zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems);
3800 static void zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd);
3801 static thread_call_data_t zone_expand_callout;
3802
3803 __attribute__((overloadable))
3804 static inline bool
zone_submap_is_sequestered(zone_submap_idx_t idx)3805 zone_submap_is_sequestered(zone_submap_idx_t idx)
3806 {
3807 return idx != Z_SUBMAP_IDX_DATA;
3808 }
3809
3810 __attribute__((overloadable))
3811 static inline bool
zone_submap_is_sequestered(zone_security_flags_t zsflags)3812 zone_submap_is_sequestered(zone_security_flags_t zsflags)
3813 {
3814 return zone_submap_is_sequestered(zsflags.z_submap_idx);
3815 }
3816
3817 static inline kma_flags_t
zone_kma_flags(zone_t z,zone_security_flags_t zsflags,zalloc_flags_t flags)3818 zone_kma_flags(zone_t z, zone_security_flags_t zsflags, zalloc_flags_t flags)
3819 {
3820 kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
3821
3822 if (zsflags.z_noencrypt) {
3823 kmaflags |= KMA_NOENCRYPT;
3824 }
3825
3826 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
3827 kmaflags |= KMA_DATA;
3828 } else if (zsflags.z_kheap_id == Z_SUBMAP_IDX_DATA) {
3829 /*
3830 * assume zones which are manually in the data heap,
3831 * like mbufs, are going to be shared somehow.
3832 */
3833 kmaflags |= KMA_DATA_SHARED;
3834 }
3835
3836 if (flags & Z_NOPAGEWAIT) {
3837 kmaflags |= KMA_NOPAGEWAIT;
3838 }
3839 if (z->z_permanent || (!z->z_destructible &&
3840 zone_submap_is_sequestered(zsflags))) {
3841 kmaflags |= KMA_PERMANENT;
3842 }
3843 if (zsflags.z_submap_from_end) {
3844 kmaflags |= KMA_LAST_FREE;
3845 }
3846
3847
3848 return kmaflags;
3849 }
3850
3851 static inline void
zone_add_wired_pages(zone_t z,uint32_t pages)3852 zone_add_wired_pages(zone_t z, uint32_t pages)
3853 {
3854 os_atomic_add(&zone_pages_wired, pages, relaxed);
3855
3856 #if CONFIG_ZLEAKS
3857 if (__improbable(zleak_should_enable_for_zone(z) &&
3858 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3859 thread_call_enter(&zone_leaks_callout);
3860 }
3861 #else
3862 (void)z;
3863 #endif
3864 }
3865
3866 static inline void
zone_remove_wired_pages(zone_t z,uint32_t pages)3867 zone_remove_wired_pages(zone_t z, uint32_t pages)
3868 {
3869 os_atomic_sub(&zone_pages_wired, pages, relaxed);
3870
3871 #if CONFIG_ZLEAKS
3872 if (__improbable(zleak_should_disable_for_zone(z) &&
3873 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3874 thread_call_enter(&zone_leaks_callout);
3875 }
3876 #else
3877 (void)z;
3878 #endif
3879 }
3880
3881 #if ZSECURITY_CONFIG(ZONE_TAGGING)
3882
3883 static inline void
zone_tag_element(zone_t zone,caddr_t addr,vm_size_t elem_size)3884 zone_tag_element(zone_t zone, caddr_t addr, vm_size_t elem_size)
3885 {
3886 if (zone->z_percpu) {
3887 zpercpu_foreach_cpu(index) {
3888 vm_memtag_store_tag(addr + ptoa(index), elem_size);
3889 }
3890 }
3891 }
3892
3893 static inline caddr_t
zone_tag_free_element(zone_t zone,caddr_t addr,vm_size_t elem_size)3894 zone_tag_free_element(zone_t zone, caddr_t addr, vm_size_t elem_size)
3895 {
3896 if (__improbable((uintptr_t)addr > 0xFF00000000000000ULL)) {
3897 return addr;
3898 }
3899
3900 addr = vm_memtag_generate_and_store_tag(addr, elem_size);
3901 zone_tag_element(zone, addr, elem_size);
3902
3903 return addr;
3904 }
3905
3906 static inline void
zcram_memtag_init(zone_t zone,vm_offset_t base,uint32_t start,uint32_t end)3907 zcram_memtag_init(zone_t zone, vm_offset_t base, uint32_t start, uint32_t end)
3908 {
3909 zone_security_flags_t *zsflags = &zone_security_array[zone_index(zone)];
3910
3911 if (!zsflags->z_tag) {
3912 return;
3913 }
3914
3915 vm_size_t elem_size = zone_elem_outer_size(zone);
3916 vm_size_t oob_offs = zone_elem_outer_offs(zone);
3917
3918
3919 for (uint32_t i = start; i < end; i++) {
3920 caddr_t elem_addr = (caddr_t)(base + oob_offs + i * elem_size);
3921
3922 elem_addr = vm_memtag_generate_and_store_tag(elem_addr, elem_size);
3923 zone_tag_element(zone, elem_addr, elem_size);
3924 }
3925 }
3926 #else /* ZSECURITY_CONFIG(ZONE_TAGGING) */
3927 #define zone_tag_free_element(z, a, s) (a)
3928 #define zcram_memtag_init(z, b, s, e) do {} while (0)
3929 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
3930
3931 /*!
3932 * @function zcram_and_lock()
3933 *
3934 * @brief
3935 * Prepare some memory for being usable for allocation purposes.
3936 *
3937 * @discussion
3938 * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
3939 * to be usable in the zone.
3940 *
3941 * This function assumes the metadata is already populated for the range.
3942 *
3943 * Calling this function with @c pg_start being 0 means that the memory
3944 * is either a partial chunk, or a full chunk, that isn't published anywhere
3945 * and the initialization can happen without locks held.
3946 *
3947 * Calling this function with a non zero @c pg_start means that we are extending
3948 * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
3949 * is already usable and published in the zone, so extending it requires holding
3950 * the zone lock.
3951 *
3952 * @param zone The zone to cram new populated pages into
3953 * @param addr The base address for the chunk(s)
3954 * @param pg_va_new The number of virtual pages newly assigned to the zone
3955 * @param pg_start The first newly populated page relative to @a addr.
3956 * @param pg_end The after-last newly populated page relative to @a addr.
3957 * @param lock 0 or ZM_ALLOC_SIZE_LOCK (used by early crams)
3958 */
3959 static void
zcram_and_lock(zone_t zone,vm_offset_t addr,uint32_t pg_va_new,uint32_t pg_start,uint32_t pg_end,uint16_t lock)3960 zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
3961 uint32_t pg_start, uint32_t pg_end, uint16_t lock)
3962 {
3963 zone_id_t zindex = zone_index(zone);
3964 vm_offset_t elem_size = zone_elem_outer_size(zone);
3965 uint32_t free_start = 0, free_end = 0;
3966 uint32_t oob_offs = zone_elem_outer_offs(zone);
3967
3968 struct zone_page_metadata *meta = zone_meta_from_addr(addr);
3969 uint32_t chunk_pages = zone->z_chunk_pages;
3970 bool guarded = meta->zm_guarded;
3971
3972 assert(pg_start < pg_end && pg_end <= chunk_pages);
3973
3974 if (pg_start == 0) {
3975 uint16_t chunk_len = (uint16_t)pg_end;
3976 uint16_t secondary_len = ZM_SECONDARY_PAGE;
3977 bool inline_bitmap = false;
3978
3979 if (zone->z_percpu) {
3980 chunk_len = 1;
3981 secondary_len = ZM_SECONDARY_PCPU_PAGE;
3982 assert(pg_end == zpercpu_count());
3983 }
3984 if (!zone->z_permanent && !zone->z_uses_tags) {
3985 inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
3986 }
3987
3988 free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size;
3989
3990 meta[0] = (struct zone_page_metadata){
3991 .zm_index = zindex,
3992 .zm_guarded = guarded,
3993 .zm_inline_bitmap = inline_bitmap,
3994 .zm_chunk_len = chunk_len,
3995 .zm_alloc_size = lock,
3996 };
3997
3998 if (!zone->z_permanent && !inline_bitmap) {
3999 meta[0].zm_bitmap = zone_meta_bits_alloc_init(free_end,
4000 zone->z_chunk_elems, zone->z_uses_tags);
4001 }
4002
4003 for (uint16_t i = 1; i < chunk_pages; i++) {
4004 meta[i] = (struct zone_page_metadata){
4005 .zm_index = zindex,
4006 .zm_guarded = guarded,
4007 .zm_inline_bitmap = inline_bitmap,
4008 .zm_chunk_len = secondary_len,
4009 .zm_page_index = (uint8_t)i,
4010 .zm_bitmap = meta[0].zm_bitmap,
4011 .zm_subchunk_len = (uint8_t)(chunk_pages - i),
4012 };
4013 }
4014
4015 if (inline_bitmap) {
4016 zone_meta_bits_init_inline(meta, free_end);
4017 }
4018 } else {
4019 assert(!zone->z_percpu && !zone->z_permanent);
4020
4021 free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size;
4022 free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size;
4023 }
4024
4025 zcram_memtag_init(zone, addr, free_start, free_end);
4026
4027 #if KASAN_CLASSIC
4028 assert(pg_start == 0); /* KASAN_CLASSIC never does partial chunks */
4029 if (zone->z_permanent) {
4030 kasan_poison_range(addr, ptoa(pg_end), ASAN_VALID);
4031 } else if (zone->z_percpu) {
4032 for (uint32_t i = 0; i < pg_end; i++) {
4033 kasan_zmem_add(addr + ptoa(i), PAGE_SIZE,
4034 zone_elem_outer_size(zone),
4035 zone_elem_outer_offs(zone),
4036 zone_elem_redzone(zone));
4037 }
4038 } else {
4039 kasan_zmem_add(addr, ptoa(pg_end),
4040 zone_elem_outer_size(zone),
4041 zone_elem_outer_offs(zone),
4042 zone_elem_redzone(zone));
4043 }
4044 #endif /* KASAN_CLASSIC */
4045
4046 /*
4047 * Insert the initialized pages / metadatas into the right lists.
4048 */
4049
4050 zone_lock(zone);
4051 assert(zone->z_self == zone);
4052
4053 if (pg_start != 0) {
4054 assert(meta->zm_chunk_len == pg_start);
4055
4056 zone_meta_bits_merge(meta, free_start, free_end);
4057 meta->zm_chunk_len = (uint16_t)pg_end;
4058
4059 /*
4060 * consume the zone_meta_lock_in_partial()
4061 * done in zone_expand_locked()
4062 */
4063 zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
4064 zone_meta_remqueue(zone, meta);
4065 }
4066
4067 if (zone->z_permanent || meta->zm_alloc_size) {
4068 zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
4069 } else {
4070 zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
4071 zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
4072 }
4073 if (pg_end < chunk_pages) {
4074 /* push any non populated residual VA on z_pageq_va */
4075 zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
4076 }
4077
4078 zone->z_elems_free += free_end - free_start;
4079 zone->z_elems_avail += free_end - free_start;
4080 zone->z_wired_cur += zone->z_percpu ? 1 : pg_end - pg_start;
4081 if (pg_va_new) {
4082 zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
4083 }
4084 if (zone->z_wired_hwm < zone->z_wired_cur) {
4085 zone->z_wired_hwm = zone->z_wired_cur;
4086 }
4087
4088 #if CONFIG_ZLEAKS
4089 if (__improbable(zleak_should_enable_for_zone(zone) &&
4090 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
4091 thread_call_enter(&zone_leaks_callout);
4092 }
4093 #endif /* CONFIG_ZLEAKS */
4094
4095 zone_add_wired_pages(zone, pg_end - pg_start);
4096 }
4097
4098 static void
zcram(zone_t zone,vm_offset_t addr,uint32_t pages,uint16_t lock)4099 zcram(zone_t zone, vm_offset_t addr, uint32_t pages, uint16_t lock)
4100 {
4101 uint32_t chunk_pages = zone->z_chunk_pages;
4102
4103 assert(pages % chunk_pages == 0);
4104 for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
4105 zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, lock);
4106 zone_unlock(zone);
4107 }
4108 }
4109
4110 __startup_func
4111 void
zone_cram_early(zone_t zone,vm_offset_t newmem,vm_size_t size)4112 zone_cram_early(zone_t zone, vm_offset_t newmem, vm_size_t size)
4113 {
4114 uint32_t pages = (uint32_t)atop(size);
4115
4116 assert(from_zone_map(newmem, size));
4117 assert3u(size % ptoa(zone->z_chunk_pages), ==, 0);
4118 assert3u(startup_phase, <, STARTUP_SUB_ZALLOC);
4119
4120 /*
4121 * The early pages we move at the pmap layer can't be "depopulated"
4122 * because there's no vm_page_t for them.
4123 *
4124 * "Lock" them so that they never hit z_pageq_empty.
4125 */
4126 vm_memtag_bzero_unchecked((void *)newmem, size);
4127 zcram(zone, newmem, pages, ZM_ALLOC_SIZE_LOCK);
4128 }
4129
4130 /*!
4131 * @function zone_submap_alloc_sequestered_va
4132 *
4133 * @brief
4134 * Allocates VA without using vm_find_space().
4135 *
4136 * @discussion
4137 * Allocate VA quickly without using the slower vm_find_space() for cases
4138 * when the submaps are fully sequestered.
4139 *
4140 * The VM submap is used to implement the VM itself so it is always sequestered,
4141 * as it can't kmem_alloc which needs to always allocate vm entries.
4142 * However, it can use vm_map_enter() which tries to coalesce entries, which
4143 * always works, so the VM map only ever needs 2 entries (one for each end).
4144 *
4145 * The RO submap is similarly always sequestered if it exists (as a non
4146 * sequestered RO submap makes very little sense).
4147 *
4148 * The allocator is a very simple bump-allocator
4149 * that allocates from either end.
4150 */
4151 static kern_return_t
zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags,uint32_t pages,vm_offset_t * addrp)4152 zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags, uint32_t pages,
4153 vm_offset_t *addrp)
4154 {
4155 vm_size_t size = ptoa(pages);
4156 vm_map_t map = zone_submap(zsflags);
4157 vm_map_entry_t first, last;
4158 vm_map_offset_t addr;
4159
4160 vm_map_lock(map);
4161
4162 first = vm_map_first_entry(map);
4163 last = vm_map_last_entry(map);
4164
4165 if (first->vme_end + size > last->vme_start) {
4166 vm_map_unlock(map);
4167 return KERN_NO_SPACE;
4168 }
4169
4170 if (zsflags.z_submap_from_end) {
4171 last->vme_start -= size;
4172 addr = last->vme_start;
4173 VME_OFFSET_SET(last, addr);
4174 } else {
4175 addr = first->vme_end;
4176 first->vme_end += size;
4177 }
4178 map->size += size;
4179
4180 vm_map_unlock(map);
4181
4182 *addrp = addr;
4183 return KERN_SUCCESS;
4184 }
4185
4186 void
zone_fill_initially(zone_t zone,vm_size_t nelems)4187 zone_fill_initially(zone_t zone, vm_size_t nelems)
4188 {
4189 kma_flags_t kmaflags = KMA_NOFAIL | KMA_PERMANENT;
4190 kern_return_t kr;
4191 vm_offset_t addr;
4192 uint32_t pages;
4193 zone_security_flags_t zsflags = zone_security_config(zone);
4194
4195 assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
4196 assert(zone->z_elems_avail == 0);
4197
4198 kmaflags |= zone_kma_flags(zone, zsflags, Z_WAITOK);
4199 pages = zone_alloc_pages_for_nelems(zone, nelems);
4200 if (zone_submap_is_sequestered(zsflags)) {
4201 kr = zone_submap_alloc_sequestered_va(zsflags, pages, &addr);
4202 if (kr != KERN_SUCCESS) {
4203 panic("zone_submap_alloc_sequestered_va() "
4204 "of %u pages failed", pages);
4205 }
4206 kernel_memory_populate(addr, ptoa(pages),
4207 kmaflags, VM_KERN_MEMORY_ZONE);
4208 } else {
4209 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4210 kmem_alloc(zone_submap(zsflags), &addr, ptoa(pages),
4211 kmaflags, VM_KERN_MEMORY_ZONE);
4212 }
4213
4214 zone_meta_populate(addr, ptoa(pages));
4215 zcram(zone, addr, pages, 0);
4216 }
4217
4218 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4219 __attribute__((noinline))
4220 static void
zone_scramble_va_and_unlock(zone_t z,struct zone_page_metadata * meta,uint32_t runs,uint32_t pages,uint32_t chunk_pages,uint64_t guard_mask)4221 zone_scramble_va_and_unlock(
4222 zone_t z,
4223 struct zone_page_metadata *meta,
4224 uint32_t runs,
4225 uint32_t pages,
4226 uint32_t chunk_pages,
4227 uint64_t guard_mask)
4228 {
4229 struct zone_page_metadata *arr[ZONE_MAX_CHUNK_ALLOC_NUM];
4230
4231 for (uint32_t run = 0, n = 0; run < runs; run++) {
4232 arr[run] = meta + n;
4233 n += chunk_pages + ((guard_mask >> run) & 1) * chunk_pages;
4234 }
4235
4236 /*
4237 * Fisher–Yates shuffle, for an array with indices [0, n)
4238 *
4239 * for i from n−1 downto 1 do
4240 * j ← random integer such that 0 ≤ j ≤ i
4241 * exchange a[j] and a[i]
4242 *
4243 * The point here is that early allocations aren't at a fixed
4244 * distance from each other.
4245 */
4246 for (uint32_t i = runs - 1; i > 0; i--) {
4247 uint32_t j = zalloc_random_uniform32(0, i + 1);
4248
4249 meta = arr[j];
4250 arr[j] = arr[i];
4251 arr[i] = meta;
4252 }
4253
4254 zone_lock(z);
4255
4256 for (uint32_t i = 0; i < runs; i++) {
4257 zone_meta_queue_push(z, &z->z_pageq_va, arr[i]);
4258 }
4259 z->z_va_cur += z->z_percpu ? runs : pages;
4260 }
4261
4262 static inline uint32_t
dist_u32(uint32_t a,uint32_t b)4263 dist_u32(uint32_t a, uint32_t b)
4264 {
4265 return a < b ? b - a : a - b;
4266 }
4267
4268 static uint64_t
zalloc_random_clear_n_bits(uint64_t mask,uint32_t pop,uint32_t n)4269 zalloc_random_clear_n_bits(uint64_t mask, uint32_t pop, uint32_t n)
4270 {
4271 for (; n-- > 0; pop--) {
4272 uint32_t bit = zalloc_random_uniform32(0, pop);
4273 uint64_t m = mask;
4274
4275 for (; bit; bit--) {
4276 m &= m - 1;
4277 }
4278
4279 mask ^= 1ull << __builtin_ctzll(m);
4280 }
4281
4282 return mask;
4283 }
4284
4285 /**
4286 * @function zalloc_random_bits
4287 *
4288 * @brief
4289 * Compute a random number with a specified number of bit set in a given width.
4290 *
4291 * @discussion
4292 * This function generates a "uniform" distribution of sets of bits set in
4293 * a given width, with typically less than width/4 calls to random.
4294 *
4295 * @param pop the target number of bits set.
4296 * @param width the number of bits in the random integer to generate.
4297 */
4298 static uint64_t
zalloc_random_bits(uint32_t pop,uint32_t width)4299 zalloc_random_bits(uint32_t pop, uint32_t width)
4300 {
4301 uint64_t w_mask = (1ull << width) - 1;
4302 uint64_t mask;
4303 uint32_t cur;
4304
4305 if (3 * width / 4 <= pop) {
4306 mask = w_mask;
4307 cur = width;
4308 } else if (pop <= width / 4) {
4309 mask = 0;
4310 cur = 0;
4311 } else {
4312 /*
4313 * Chosing a random number this way will overwhelmingly
4314 * contain `width` bits +/- a few.
4315 */
4316 mask = zalloc_random_mask64(width);
4317 cur = __builtin_popcountll(mask);
4318
4319 if (dist_u32(cur, pop) > dist_u32(width - cur, pop)) {
4320 /*
4321 * If the opposite mask has a closer popcount,
4322 * then start with that one as the seed.
4323 */
4324 cur = width - cur;
4325 mask ^= w_mask;
4326 }
4327 }
4328
4329 if (cur < pop) {
4330 /*
4331 * Setting `pop - cur` bits is really clearing that many from
4332 * the opposite mask.
4333 */
4334 mask ^= w_mask;
4335 mask = zalloc_random_clear_n_bits(mask, width - cur, pop - cur);
4336 mask ^= w_mask;
4337 } else if (pop < cur) {
4338 mask = zalloc_random_clear_n_bits(mask, cur, cur - pop);
4339 }
4340
4341 return mask;
4342 }
4343 #endif
4344
4345 static void
zone_allocate_va_locked(zone_t z,zalloc_flags_t flags)4346 zone_allocate_va_locked(zone_t z, zalloc_flags_t flags)
4347 {
4348 zone_security_flags_t zsflags = zone_security_config(z);
4349 struct zone_page_metadata *meta;
4350 kma_flags_t kmaflags = zone_kma_flags(z, zsflags, flags) | KMA_VAONLY;
4351 uint32_t chunk_pages = z->z_chunk_pages;
4352 uint32_t runs, pages, guards, guard_pages, rnum;
4353 uint64_t guard_mask = 0;
4354 bool lead_guard = false;
4355 zone_id_t zidx = zone_index(z);
4356 kern_return_t kr;
4357 vm_offset_t addr;
4358
4359 zone_unlock(z);
4360
4361 /*
4362 * A lot of OOB exploitation techniques rely on precise placement
4363 * and interleaving of zone pages. The layout that is sought
4364 * by attackers will be C/P/T types, where:
4365 * - (C)ompromised is the type for which attackers have a bug,
4366 * - (P)adding is used to pad memory,
4367 * - (T)arget is the type that the attacker will attempt to corrupt
4368 * by exploiting (C).
4369 *
4370 * Note that in some cases C==T and P isn't needed.
4371 *
4372 * In order to make those placement games much harder,
4373 * we grow zones by random runs of memory, up to 10 chunks.
4374 * This makes predicting the precise layout of the heap
4375 * quite more complicated.
4376 *
4377 * Note: this function makes a very heavy use of random,
4378 * however, it is mostly limited to sequestered zones,
4379 * and eventually the layout will be fixed,
4380 * and the usage of random vastly reduced.
4381 *
4382 * For non sequestered zones, there's a single call
4383 * to random in order to decide whether we want
4384 * a guard page or not.
4385 */
4386 pages = chunk_pages;
4387 guards = 0;
4388 runs = 1;
4389 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4390 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4391 runs = ZONE_MAX_CHUNK_ALLOC_NUM;
4392 runs = zalloc_random_uniform32(1, runs + 1);
4393 pages = runs * chunk_pages;
4394 }
4395 static_assert(ZONE_MAX_CHUNK_ALLOC_NUM <= 10,
4396 "make sure that `runs` will never exceed 10");
4397 #endif /* !ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4398
4399 /*
4400 * Zones that are suceptible to OOB (kalloc, ZC_PGZ_USE_GUARDS),
4401 * guards might be added after each chunk.
4402 *
4403 * Those guard pages are marked with the ZM_PGZ_GUARD
4404 * magical chunk len, and their zm_oob_offs field
4405 * is used to remember optional shift applied
4406 * to returned elements, in order to right-align-them
4407 * as much as possible.
4408 *
4409 * In an adversarial context, while guard pages
4410 * are extremely effective against linear overflow,
4411 * using a predictable frequency of guard pages feels like
4412 * a missed opportunity. Which is why we choose to insert
4413 * one guard region (chunk_pages guard pages) with 25% probability,
4414 * with a goal of having ~20% of the VA allocated consist of guard pages.
4415 */
4416 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4417 if (!z->z_percpu) {
4418 /*
4419 * Don't bother with adding guard regions for per-CPU zones, as
4420 * they're not interesting to attackers.
4421 */
4422 for (uint32_t run = 0; run < runs; run++) {
4423 rnum = zalloc_random_uniform32(0, 4 * 128);
4424 guards += (rnum < 128);
4425 }
4426 }
4427 assert3u(guards, <=, runs);
4428
4429 guard_mask = 0;
4430
4431 if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4432 /*
4433 * Several exploitation strategies rely on a C/T (compromised
4434 * then target types) ordering of pages with a sub-page reach
4435 * from C into T.
4436 *
4437 * We want to reliably thwart such exploitations
4438 * and hence force a guard page between alternating
4439 * memory types.
4440 *
4441 * Note: this counts towards the number of guard pages we want.
4442 */
4443 guard_mask |= 1ull << (runs - 1);
4444
4445 if (guards > 1) {
4446 guard_mask |= zalloc_random_bits(guards - 1, runs - 1);
4447 } else {
4448 guards = 1;
4449 }
4450
4451 /*
4452 * While we randomize the chunks lengths, an attacker with
4453 * precise timing control can guess when overflows happen,
4454 * and "measure" the runs, which gives them an indication
4455 * of where the next run start offset is.
4456 *
4457 * In order to make this knowledge unusable, add a guard page
4458 * _before_ the new run with a 25% probability, regardless
4459 * of whether we had enough guard pages.
4460 */
4461 if ((rnum & 3) == 0) {
4462 lead_guard = true;
4463 guards++;
4464 }
4465 } else {
4466 assert3u(runs, ==, 1);
4467 assert3u(guards, <=, 1);
4468 guard_mask = guards << (runs - 1);
4469 }
4470 #else
4471 (void)rnum;
4472 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4473
4474 /* We want guards to be at least the size of the chunk. */
4475 guard_pages = guards * chunk_pages;
4476 if (zone_submap_is_sequestered(zsflags)) {
4477 kr = zone_submap_alloc_sequestered_va(zsflags,
4478 pages + guard_pages, &addr);
4479 } else {
4480 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4481 kr = kmem_alloc(zone_submap(zsflags), &addr,
4482 ptoa(pages + guard_pages), kmaflags, VM_KERN_MEMORY_ZONE);
4483 }
4484
4485 if (kr != KERN_SUCCESS) {
4486 uint64_t zone_size = 0;
4487 zone_t zone_largest = zone_find_largest(&zone_size);
4488 panic("zalloc[%d]: zone map exhausted while allocating from zone [%s%s], "
4489 "likely due to memory leak in zone [%s%s] "
4490 "(%u%c, %d elements allocated)",
4491 kr, zone_heap_name(z), zone_name(z),
4492 zone_heap_name(zone_largest), zone_name(zone_largest),
4493 mach_vm_size_pretty(zone_size),
4494 mach_vm_size_unit(zone_size),
4495 zone_count_allocated(zone_largest));
4496 }
4497
4498 meta = zone_meta_from_addr(addr);
4499 zone_meta_populate(addr, ptoa(pages + guard_pages));
4500
4501 /*
4502 * Handle the leading guard page, if any
4503 */
4504 if (lead_guard) {
4505 for (uint32_t i = 0; i < chunk_pages; i++) {
4506 meta[i].zm_index = zidx;
4507 meta[i].zm_chunk_len = ZM_PGZ_GUARD;
4508 meta[i].zm_guarded = true;
4509 meta++;
4510 }
4511 }
4512
4513 for (uint32_t run = 0, n = 0; run < runs; run++) {
4514 bool guarded = (guard_mask >> run) & 1;
4515
4516 for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4517 meta[n].zm_index = zidx;
4518 meta[n].zm_guarded = guarded;
4519 }
4520 if (guarded) {
4521 for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4522 meta[n].zm_index = zidx;
4523 meta[n].zm_chunk_len = ZM_PGZ_GUARD;
4524 }
4525 }
4526 }
4527 if (guards) {
4528 os_atomic_add(&zone_guard_pages, guard_pages, relaxed);
4529 }
4530
4531 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4532 if (__improbable(zone_caching_disabled < 0)) {
4533 return zone_scramble_va_and_unlock(z, meta, runs, pages,
4534 chunk_pages, guard_mask);
4535 }
4536 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4537
4538 zone_lock(z);
4539
4540 for (uint32_t run = 0, n = 0; run < runs; run++) {
4541 zone_meta_queue_push(z, &z->z_pageq_va, meta + n);
4542 n += chunk_pages + ((guard_mask >> run) & 1) * chunk_pages;
4543 }
4544 z->z_va_cur += z->z_percpu ? runs : pages;
4545 }
4546
4547 static inline void
ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)4548 ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
4549 {
4550 #if DEBUG || DEVELOPMENT
4551 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_START,
4552 size, 0, 0, 0);
4553 #else
4554 (void)size;
4555 #endif
4556 }
4557
4558 static inline void
ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)4559 ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
4560 {
4561 task_t task = current_task_early();
4562 if (pages && task) {
4563 counter_add(&task->pages_grabbed_kern, pages);
4564 }
4565 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_END,
4566 pages, 0, 0, 0);
4567 }
4568
4569 __attribute__((noinline))
4570 static void
__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z,uint32_t pgs)4571 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z, uint32_t pgs)
4572 {
4573 uint64_t wait_start = 0;
4574 long mapped;
4575
4576 sched_cond_signal(&vm_pageout_gc_cond, vm_pageout_gc_thread);
4577
4578 if (zone_supports_vm(z) || (current_thread()->options & TH_OPT_VMPRIV)) {
4579 return;
4580 }
4581
4582 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4583
4584 /*
4585 * If the zone map is really exhausted, wait on the GC thread,
4586 * donating our priority (which is important because the GC
4587 * thread is at a rather low priority).
4588 */
4589 for (uint32_t n = 1; mapped >= zone_pages_wired_max - pgs; n++) {
4590 uint32_t wait_ms = n * (n + 1) / 2;
4591 uint64_t interval;
4592
4593 if (n == 1) {
4594 wait_start = mach_absolute_time();
4595 } else {
4596 sched_cond_signal(&vm_pageout_gc_cond, vm_pageout_gc_thread);
4597 }
4598 if (zone_exhausted_timeout > 0 &&
4599 wait_ms > zone_exhausted_timeout) {
4600 panic("zone map exhaustion: waited for %dms "
4601 "(pages: %ld, max: %ld, wanted: %d)",
4602 wait_ms, mapped, zone_pages_wired_max, pgs);
4603 }
4604
4605 clock_interval_to_absolutetime_interval(wait_ms, NSEC_PER_MSEC,
4606 &interval);
4607
4608 lck_spin_lock(&zone_exhausted_lock);
4609 lck_spin_sleep_with_inheritor(&zone_exhausted_lock,
4610 LCK_SLEEP_UNLOCK, &zone_pages_wired,
4611 vm_pageout_gc_thread, THREAD_UNINT, wait_start + interval);
4612
4613 mapped = os_atomic_load(&zone_pages_wired, relaxed);
4614 }
4615 }
4616
4617 static bool
zone_expand_wait_for_pages(bool waited)4618 zone_expand_wait_for_pages(bool waited)
4619 {
4620 if (waited) {
4621 return false;
4622 }
4623 #if DEBUG || DEVELOPMENT
4624 if (zalloc_simulate_vm_pressure) {
4625 return false;
4626 }
4627 #endif /* DEBUG || DEVELOPMENT */
4628 return !vm_pool_low();
4629 }
4630
4631 static inline void
zone_expand_async_schedule_if_allowed(zone_t zone)4632 zone_expand_async_schedule_if_allowed(zone_t zone)
4633 {
4634 if (zone->z_async_refilling || zone->no_callout) {
4635 return;
4636 }
4637
4638 if (zone_exhausted(zone)) {
4639 return;
4640 }
4641
4642 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
4643 return;
4644 }
4645
4646 if (!vm_pool_low() || zone_supports_vm(zone)) {
4647 zone->z_async_refilling = true;
4648 thread_call_enter(&zone_expand_callout);
4649 }
4650 }
4651
4652 __attribute__((noinline))
4653 static bool
zalloc_expand_drain_exhausted_caches_locked(zone_t z)4654 zalloc_expand_drain_exhausted_caches_locked(zone_t z)
4655 {
4656 struct zone_depot zd;
4657 zone_magazine_t mag = NULL;
4658
4659 if (z->z_depot_size) {
4660 z->z_depot_size = 0;
4661 z->z_depot_cleanup = true;
4662
4663 zone_depot_init(&zd);
4664 zone_depot_trim(z, 0, &zd);
4665
4666 zone_recirc_lock_nopreempt(z);
4667 if (zd.zd_full) {
4668 zone_depot_move_full(&z->z_recirc,
4669 &zd, zd.zd_full, NULL);
4670 }
4671 if (zd.zd_empty) {
4672 zone_depot_move_empty(&z->z_recirc,
4673 &zd, zd.zd_empty, NULL);
4674 }
4675 zone_recirc_unlock_nopreempt(z);
4676 }
4677
4678 zone_recirc_lock_nopreempt(z);
4679 if (z->z_recirc.zd_full) {
4680 mag = zone_depot_pop_head_full(&z->z_recirc, z);
4681 }
4682 zone_recirc_unlock_nopreempt(z);
4683
4684 if (mag) {
4685 zone_reclaim_elements(z, zc_mag_size(), mag->zm_elems);
4686 zone_magazine_free(mag);
4687 }
4688
4689 return mag != NULL;
4690 }
4691
4692 static bool
zalloc_needs_refill(zone_t zone,zalloc_flags_t flags)4693 zalloc_needs_refill(zone_t zone, zalloc_flags_t flags)
4694 {
4695 if (zone->z_elems_free > zone->z_elems_rsv) {
4696 return false;
4697 }
4698 if (!zone_exhausted(zone)) {
4699 return true;
4700 }
4701 if (zone->z_pcpu_cache && zone->z_depot_size) {
4702 if (zalloc_expand_drain_exhausted_caches_locked(zone)) {
4703 return false;
4704 }
4705 }
4706 return (flags & Z_NOFAIL) != 0;
4707 }
4708
4709 static void
zone_wakeup_exhausted_waiters(zone_t z)4710 zone_wakeup_exhausted_waiters(zone_t z)
4711 {
4712 z->z_exhausted_wait = false;
4713 EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z, false);
4714 thread_wakeup(&z->z_expander);
4715 }
4716
4717 __attribute__((noinline))
4718 static void
__ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)4719 __ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)
4720 {
4721 if (z->z_pcpu_cache && z->z_depot_size &&
4722 zalloc_expand_drain_exhausted_caches_locked(z)) {
4723 return;
4724 }
4725
4726 if (!z->z_exhausted_wait) {
4727 zone_recirc_lock_nopreempt(z);
4728 z->z_exhausted_wait = true;
4729 zone_recirc_unlock_nopreempt(z);
4730 EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z, true);
4731 }
4732
4733 assert_wait(&z->z_expander, TH_UNINT);
4734 zone_unlock(z);
4735 thread_block(THREAD_CONTINUE_NULL);
4736 zone_lock(z);
4737 }
4738
4739 static pmap_mapping_type_t
zone_mapping_type(zone_t z)4740 zone_mapping_type(zone_t z)
4741 {
4742 zone_security_flags_t zsflags = zone_security_config(z);
4743
4744 /*
4745 * If the zone has z_submap_idx is not Z_SUBMAP_IDX_DATA or
4746 * Z_SUBMAP_IDX_READ_ONLY, mark the corresponding mapping
4747 * type as PMAP_MAPPING_TYPE_RESTRICTED.
4748 */
4749 switch (zsflags.z_submap_idx) {
4750 case Z_SUBMAP_IDX_DATA:
4751 return PMAP_MAPPING_TYPE_DEFAULT;
4752 case Z_SUBMAP_IDX_READ_ONLY:
4753 return PMAP_MAPPING_TYPE_ROZONE;
4754 default:
4755 return PMAP_MAPPING_TYPE_RESTRICTED;
4756 }
4757 }
4758
4759 static vm_prot_t
zone_page_prot(zone_security_flags_t zsflags)4760 zone_page_prot(zone_security_flags_t zsflags)
4761 {
4762 switch (zsflags.z_submap_idx) {
4763 case Z_SUBMAP_IDX_READ_ONLY:
4764 return VM_PROT_READ;
4765 default:
4766 return VM_PROT_READ | VM_PROT_WRITE;
4767 }
4768 }
4769
4770 static void
zone_expand_locked(zone_t z,zalloc_flags_t flags)4771 zone_expand_locked(zone_t z, zalloc_flags_t flags)
4772 {
4773 zone_security_flags_t zsflags = zone_security_config(z);
4774 struct zone_expand ze = {
4775 .ze_thread = current_thread(),
4776 };
4777
4778 if (!(ze.ze_thread->options & TH_OPT_VMPRIV) && zone_supports_vm(z)) {
4779 ze.ze_thread->options |= TH_OPT_VMPRIV;
4780 ze.ze_clear_priv = true;
4781 }
4782
4783 if (ze.ze_thread->options & TH_OPT_VMPRIV) {
4784 /*
4785 * When the thread is VM privileged,
4786 * vm_page_grab() will call VM_PAGE_WAIT()
4787 * without our knowledge, so we must assume
4788 * it's being called unfortunately.
4789 *
4790 * In practice it's not a big deal because
4791 * Z_NOPAGEWAIT is not really used on zones
4792 * that VM privileged threads are going to expand.
4793 */
4794 ze.ze_pg_wait = true;
4795 ze.ze_vm_priv = true;
4796 }
4797
4798 for (;;) {
4799 if (!z->z_permanent && !zalloc_needs_refill(z, flags)) {
4800 goto out;
4801 }
4802
4803 if (z->z_expander == NULL) {
4804 z->z_expander = &ze;
4805 break;
4806 }
4807
4808 if (ze.ze_vm_priv && !z->z_expander->ze_vm_priv) {
4809 change_sleep_inheritor(&z->z_expander, ze.ze_thread);
4810 ze.ze_next = z->z_expander;
4811 z->z_expander = &ze;
4812 break;
4813 }
4814
4815 if ((flags & Z_NOPAGEWAIT) && z->z_expander->ze_pg_wait) {
4816 goto out;
4817 }
4818
4819 z->z_expanding_wait = true;
4820 hw_lck_ticket_sleep_with_inheritor(&z->z_lock, &zone_locks_grp,
4821 LCK_SLEEP_DEFAULT, &z->z_expander, z->z_expander->ze_thread,
4822 TH_UNINT, TIMEOUT_WAIT_FOREVER);
4823 }
4824
4825 do {
4826 struct zone_page_metadata *meta = NULL;
4827 uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
4828 vm_page_t page_list = NULL;
4829 vm_offset_t addr = 0;
4830 int waited = 0;
4831
4832 if ((flags & Z_NOFAIL) && zone_exhausted(z)) {
4833 __ZONE_EXHAUSTED_AND_WAITING_HARD__(z);
4834 continue; /* reevaluate if we really need it */
4835 }
4836
4837 /*
4838 * While we hold the zone lock, look if there's VA we can:
4839 * - complete from partial pages,
4840 * - reuse from the sequester list.
4841 *
4842 * When the page is being populated we pretend we allocated
4843 * an extra element so that zone_gc() can't attempt to free
4844 * the chunk (as it could become empty while we wait for pages).
4845 */
4846 if (zone_pva_is_null(z->z_pageq_va)) {
4847 zone_allocate_va_locked(z, flags);
4848 }
4849
4850 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
4851 addr = zone_meta_to_addr(meta);
4852 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
4853 cur_pages = meta->zm_page_index;
4854 meta -= cur_pages;
4855 addr -= ptoa(cur_pages);
4856 zone_meta_lock_in_partial(z, meta, cur_pages);
4857 }
4858 zone_unlock(z);
4859
4860 /*
4861 * And now allocate pages to populate our VA.
4862 */
4863 min_pages = z->z_chunk_pages;
4864 #if !KASAN_CLASSIC
4865 if (!z->z_percpu) {
4866 min_pages = (uint32_t)atop(round_page(zone_elem_outer_offs(z) +
4867 zone_elem_outer_size(z)));
4868 }
4869 #endif /* !KASAN_CLASSIC */
4870
4871 /*
4872 * Trigger jetsams via VM_pageout GC
4873 * if we're running out of zone memory
4874 */
4875 if (__improbable(zone_map_nearing_exhaustion())) {
4876 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(z, min_pages);
4877 }
4878
4879 ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
4880
4881 while (pages < z->z_chunk_pages - cur_pages) {
4882 vm_grab_options_t grab_options = VM_PAGE_GRAB_NOPAGEWAIT;
4883 vm_page_t m;
4884
4885 m = vm_page_grab_options(grab_options);
4886
4887 if (m) {
4888 pages++;
4889 m->vmp_snext = page_list;
4890 page_list = m;
4891 vm_page_zero_fill(
4892 m
4893 );
4894 continue;
4895 }
4896
4897 if (pages >= min_pages &&
4898 !zone_expand_wait_for_pages(waited)) {
4899 break;
4900 }
4901
4902 if ((flags & Z_NOPAGEWAIT) == 0) {
4903 /*
4904 * The first time we're about to wait for pages,
4905 * mention that to waiters and wake them all.
4906 *
4907 * Set `ze_pg_wait` in our zone_expand context
4908 * so that waiters who care do not wait again.
4909 */
4910 if (!ze.ze_pg_wait) {
4911 zone_lock(z);
4912 if (z->z_expanding_wait) {
4913 z->z_expanding_wait = false;
4914 wakeup_all_with_inheritor(&z->z_expander,
4915 THREAD_AWAKENED);
4916 }
4917 ze.ze_pg_wait = true;
4918 zone_unlock(z);
4919 }
4920
4921 waited++;
4922 VM_PAGE_WAIT();
4923 continue;
4924 }
4925
4926 /*
4927 * Undo everything and bail out:
4928 *
4929 * - free pages
4930 * - undo the fake allocation if any
4931 * - put the VA back on the VA page queue.
4932 */
4933 vm_page_free_list(page_list, FALSE);
4934 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4935
4936 zone_lock(z);
4937
4938 zone_expand_async_schedule_if_allowed(z);
4939
4940 if (cur_pages) {
4941 zone_meta_unlock_from_partial(z, meta, cur_pages);
4942 }
4943 if (meta) {
4944 zone_meta_queue_push(z, &z->z_pageq_va,
4945 meta + cur_pages);
4946 }
4947 goto page_shortage;
4948 }
4949 vm_object_t object;
4950 object = kernel_object_default;
4951 vm_object_lock(object);
4952
4953 kernel_memory_populate_object_and_unlock(object,
4954 addr + ptoa(cur_pages), addr + ptoa(cur_pages), ptoa(pages), page_list,
4955 zone_kma_flags(z, zsflags, flags), VM_KERN_MEMORY_ZONE,
4956 zone_page_prot(zsflags), zone_mapping_type(z));
4957
4958 ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4959
4960 zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages, 0);
4961
4962 /*
4963 * permanent zones only try once,
4964 * the retry loop is in the caller
4965 */
4966 } while (!z->z_permanent && zalloc_needs_refill(z, flags));
4967
4968 page_shortage:
4969 if (z->z_expander == &ze) {
4970 z->z_expander = ze.ze_next;
4971 } else {
4972 assert(z->z_expander->ze_next == &ze);
4973 z->z_expander->ze_next = NULL;
4974 }
4975 if (z->z_expanding_wait) {
4976 z->z_expanding_wait = false;
4977 wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
4978 }
4979 out:
4980 if (ze.ze_clear_priv) {
4981 ze.ze_thread->options &= ~TH_OPT_VMPRIV;
4982 }
4983 }
4984
4985 static void
zone_expand_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)4986 zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
4987 {
4988 zone_foreach(z) {
4989 if (z->no_callout) {
4990 /* z_async_refilling will never be set */
4991 continue;
4992 }
4993
4994 if (!z->z_async_refilling) {
4995 /*
4996 * avoid locking all zones, because the one(s)
4997 * we're looking for have been set _before_
4998 * thread_call_enter() was called, if we fail
4999 * to observe the bit, it means the thread-call
5000 * has been "dinged" again and we'll notice it then.
5001 */
5002 continue;
5003 }
5004
5005 zone_lock(z);
5006 if (z->z_self && z->z_async_refilling) {
5007 zone_expand_locked(z, Z_WAITOK);
5008 /*
5009 * clearing _after_ we grow is important,
5010 * so that we avoid waking up the thread call
5011 * while we grow and cause to run a second time.
5012 */
5013 z->z_async_refilling = false;
5014 }
5015 zone_unlock(z);
5016 }
5017 }
5018
5019 #endif /* !ZALLOC_TEST */
5020 #pragma mark zone jetsam integration
5021 #if !ZALLOC_TEST
5022
5023 /*
5024 * We're being very conservative here and picking a value of 95%. We might need to lower this if
5025 * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
5026 */
5027 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
5028
5029 /*
5030 * Threshold above which largest zones should be included in the panic log
5031 */
5032 #define ZONE_MAP_EXHAUSTION_PRINT_PANIC 80
5033
5034 /*
5035 * Trigger zone-map-exhaustion jetsams if the zone map is X% full,
5036 * where X=zone_map_jetsam_limit.
5037 *
5038 * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
5039 */
5040 TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
5041 ZONE_MAP_JETSAM_LIMIT_DEFAULT);
5042
5043 kern_return_t
zone_map_jetsam_set_limit(uint32_t value)5044 zone_map_jetsam_set_limit(uint32_t value)
5045 {
5046 if (value <= 0 || value > 100) {
5047 return KERN_INVALID_VALUE;
5048 }
5049
5050 zone_map_jetsam_limit = value;
5051 os_atomic_store(&zone_pages_jetsam_threshold,
5052 zone_pages_wired_max * value / 100, relaxed);
5053 return KERN_SUCCESS;
5054 }
5055
5056 void
get_zone_map_size(uint64_t * current_size,uint64_t * capacity)5057 get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
5058 {
5059 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
5060 *current_size = ptoa_64(phys_pages);
5061 *capacity = ptoa_64(zone_pages_wired_max);
5062 }
5063
5064 void
get_largest_zone_info(char * zone_name,size_t zone_name_len,uint64_t * zone_size)5065 get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
5066 {
5067 zone_t largest_zone = zone_find_largest(zone_size);
5068
5069 /*
5070 * Append kalloc heap name to zone name (if zone is used by kalloc)
5071 */
5072 snprintf(zone_name, zone_name_len, "%s%s",
5073 zone_heap_name(largest_zone), largest_zone->z_name);
5074 }
5075
5076 static bool
zone_map_nearing_threshold(unsigned int threshold)5077 zone_map_nearing_threshold(unsigned int threshold)
5078 {
5079 uint64_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
5080 return phys_pages * 100 > zone_pages_wired_max * threshold;
5081 }
5082
5083 bool
zone_map_nearing_exhaustion(void)5084 zone_map_nearing_exhaustion(void)
5085 {
5086 vm_size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
5087
5088 return pages >= os_atomic_load(&zone_pages_jetsam_threshold, relaxed);
5089 }
5090
5091
5092 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
5093
5094 /*
5095 * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
5096 * to walk through the jetsam priority bands and kill processes.
5097 */
5098 static zone_t
kill_process_in_largest_zone(void)5099 kill_process_in_largest_zone(void)
5100 {
5101 pid_t pid = -1;
5102 uint64_t zone_size = 0;
5103 zone_t largest_zone = zone_find_largest(&zone_size);
5104
5105 printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
5106 ptoa_64(os_atomic_load(&zone_pages_wired, relaxed)),
5107 ptoa_64(zone_pages_wired_max),
5108 (uint64_t)zone_submaps_approx_size(),
5109 (uint64_t)mach_vm_range_size(&zone_info.zi_map_range),
5110 zone_map_jetsam_limit);
5111 printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
5112 largest_zone->z_name, (uintptr_t)zone_size);
5113
5114 /*
5115 * We want to make sure we don't call this function from userspace.
5116 * Or we could end up trying to synchronously kill the process
5117 * whose context we're in, causing the system to hang.
5118 */
5119 assert(current_task() == kernel_task);
5120
5121 /*
5122 * If vm_object_zone is the largest, check to see if the number of
5123 * elements in vm_map_entry_zone is comparable.
5124 *
5125 * If so, consider vm_map_entry_zone as the largest. This lets us target
5126 * a specific process to jetsam to quickly recover from the zone map
5127 * bloat.
5128 */
5129 if (largest_zone == vm_object_zone) {
5130 unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
5131 unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
5132 /* Is the VM map entries zone count >= 98% of the VM objects zone count? */
5133 if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
5134 largest_zone = vm_map_entry_zone;
5135 printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
5136 (uintptr_t)zone_size_wired(largest_zone));
5137 }
5138 }
5139
5140 /* TODO: Extend this to check for the largest process in other zones as well. */
5141 if (largest_zone == vm_map_entry_zone) {
5142 pid = find_largest_process_vm_map_entries();
5143 } else {
5144 printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
5145 "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
5146 largest_zone->z_name);
5147 }
5148 if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
5149 printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
5150 }
5151
5152 return largest_zone;
5153 }
5154
5155 #endif /* !ZALLOC_TEST */
5156 #pragma mark probabilistic gzalloc
5157 #if !ZALLOC_TEST
5158 #if CONFIG_PROB_GZALLOC
5159
5160 extern uint32_t random(void);
5161 struct pgz_backtrace {
5162 uint32_t pgz_depth;
5163 int32_t pgz_bt[MAX_ZTRACE_DEPTH];
5164 };
5165
5166 static int32_t PERCPU_DATA(pgz_sample_counter);
5167 static SECURITY_READ_ONLY_LATE(struct pgz_backtrace *) pgz_backtraces;
5168 static uint32_t pgz_uses; /* number of zones using PGZ */
5169 static int32_t pgz_slot_avail;
5170 #if OS_ATOMIC_HAS_LLSC
5171 struct zone_page_metadata *pgz_slot_head;
5172 #else
5173 static struct pgz_slot_head {
5174 uint32_t psh_count;
5175 uint32_t psh_slot;
5176 } pgz_slot_head;
5177 #endif
5178 struct zone_page_metadata *pgz_slot_tail;
5179 static SECURITY_READ_ONLY_LATE(vm_map_t) pgz_submap;
5180
5181 static struct zone_page_metadata *
pgz_meta_raw(uint32_t index)5182 pgz_meta_raw(uint32_t index)
5183 {
5184 return VM_FAR_ADD_PTR_UNBOUNDED(zone_info.zi_pgz_meta, index);
5185 }
5186
5187 static struct zone_page_metadata *
pgz_meta(uint32_t index)5188 pgz_meta(uint32_t index)
5189 {
5190 return pgz_meta_raw(2 * index + 1);
5191 }
5192
5193 static struct pgz_backtrace *
pgz_bt(uint32_t slot,bool free)5194 pgz_bt(uint32_t slot, bool free)
5195 {
5196 /*
5197 * While we could use a bounds checked variant, slot is generally
5198 * trustworthy and so it isn't necessary.
5199 */
5200 return VM_FAR_ADD_PTR_UNBOUNDED(pgz_backtraces, 2 * slot + free);
5201 }
5202
5203 static void
pgz_backtrace(struct pgz_backtrace * bt,void * fp)5204 pgz_backtrace(struct pgz_backtrace *bt, void *fp)
5205 {
5206 struct backtrace_control ctl = {
5207 .btc_frame_addr = (uintptr_t)fp,
5208 };
5209
5210 bt->pgz_depth = (uint32_t)backtrace_packed(BTP_KERN_OFFSET_32,
5211 (uint8_t *)bt->pgz_bt, sizeof(bt->pgz_bt), &ctl, NULL) / 4;
5212 }
5213
5214 static uint32_t
pgz_slot(vm_offset_t addr)5215 pgz_slot(vm_offset_t addr)
5216 {
5217 return (uint32_t)((addr - zone_info.zi_pgz_range.min_address) >> (PAGE_SHIFT + 1));
5218 }
5219
5220 static vm_offset_t
pgz_addr(uint32_t slot)5221 pgz_addr(uint32_t slot)
5222 {
5223 return zone_info.zi_pgz_range.min_address + ptoa(2 * slot + 1);
5224 }
5225
5226 static bool
pgz_sample(vm_offset_t addr,vm_size_t esize)5227 pgz_sample(vm_offset_t addr, vm_size_t esize)
5228 {
5229 int32_t *counterp, cnt;
5230
5231 if (zone_addr_size_crosses_page(addr, esize)) {
5232 return false;
5233 }
5234
5235 /*
5236 * Note: accessing pgz_sample_counter is racy but this is
5237 * kind of acceptable given that this is not
5238 * a security load bearing feature.
5239 */
5240
5241 counterp = PERCPU_GET(pgz_sample_counter);
5242 cnt = *counterp;
5243 if (__probable(cnt > 0)) {
5244 *counterp = cnt - 1;
5245 return false;
5246 }
5247
5248 if (pgz_slot_avail <= 0) {
5249 return false;
5250 }
5251
5252 /*
5253 * zalloc_random_uniform() might block, so when preemption is disabled,
5254 * set the counter to `-1` which will cause the next allocation
5255 * that can block to generate a new random value.
5256 *
5257 * No allocation on this CPU will sample until then.
5258 */
5259 if (get_preemption_level()) {
5260 *counterp = -1;
5261 } else {
5262 *counterp = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5263 }
5264
5265 return cnt == 0;
5266 }
5267
5268 static inline bool
pgz_slot_alloc(uint32_t * slot)5269 pgz_slot_alloc(uint32_t *slot)
5270 {
5271 struct zone_page_metadata *m;
5272 uint32_t tries = 100;
5273
5274 disable_preemption();
5275
5276 #if OS_ATOMIC_USE_LLSC
5277 int32_t ov, nv;
5278 os_atomic_rmw_loop(&pgz_slot_avail, ov, nv, relaxed, {
5279 if (__improbable(ov <= 0)) {
5280 os_atomic_rmw_loop_give_up({
5281 enable_preemption();
5282 return false;
5283 });
5284 }
5285 nv = ov - 1;
5286 });
5287 #else
5288 if (__improbable(os_atomic_dec_orig(&pgz_slot_avail, relaxed) <= 0)) {
5289 os_atomic_inc(&pgz_slot_avail, relaxed);
5290 enable_preemption();
5291 return false;
5292 }
5293 #endif
5294
5295 again:
5296 if (__improbable(tries-- == 0)) {
5297 /*
5298 * Too much contention,
5299 * extremely unlikely but do not stay stuck.
5300 */
5301 os_atomic_inc(&pgz_slot_avail, relaxed);
5302 enable_preemption();
5303 return false;
5304 }
5305
5306 #if OS_ATOMIC_HAS_LLSC
5307 uint32_t castries = 20;
5308 do {
5309 if (__improbable(castries-- == 0)) {
5310 /*
5311 * rdar://115922110 On many many cores devices,
5312 * this can fail for a very long time.
5313 */
5314 goto again;
5315 }
5316
5317 m = os_atomic_load_exclusive(&pgz_slot_head, dependency);
5318 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5319 /*
5320 * Either we are waiting for an enqueuer (unlikely)
5321 * or we are competing with another core and
5322 * are looking at a popped element.
5323 */
5324 os_atomic_clear_exclusive();
5325 goto again;
5326 }
5327 } while (!os_atomic_store_exclusive(&pgz_slot_head,
5328 m->zm_pgz_slot_next, relaxed));
5329 #else
5330 struct zone_page_metadata *base = zone_info.zi_pgz_meta;
5331 struct pgz_slot_head ov, nv;
5332 os_atomic_rmw_loop(&pgz_slot_head, ov, nv, dependency, {
5333 m = pgz_meta_raw(ov.psh_slot * 2);
5334 if (__improbable(m->zm_pgz_slot_next == NULL)) {
5335 /*
5336 * Either we are waiting for an enqueuer (unlikely)
5337 * or we are competing with another core and
5338 * are looking at a popped element.
5339 */
5340 os_atomic_rmw_loop_give_up(goto again);
5341 }
5342 nv.psh_count = ov.psh_count + 1;
5343 nv.psh_slot = (uint32_t)((m->zm_pgz_slot_next - base) / 2);
5344 });
5345 #endif
5346
5347 enable_preemption();
5348
5349 m->zm_pgz_slot_next = NULL;
5350 *slot = (uint32_t)((m - zone_info.zi_pgz_meta) / 2);
5351 return true;
5352 }
5353
5354 static inline bool
pgz_slot_free(uint32_t slot)5355 pgz_slot_free(uint32_t slot)
5356 {
5357 struct zone_page_metadata *m = pgz_meta_raw(2 * slot);
5358 struct zone_page_metadata *t;
5359
5360 disable_preemption();
5361 t = os_atomic_xchg(&pgz_slot_tail, m, relaxed);
5362 os_atomic_store(&t->zm_pgz_slot_next, m, release);
5363 os_atomic_inc(&pgz_slot_avail, relaxed);
5364 enable_preemption();
5365
5366 return true;
5367 }
5368
5369 /*!
5370 * @function pgz_protect()
5371 *
5372 * @brief
5373 * Try to protect an allocation with PGZ.
5374 *
5375 * @param zone The zone the allocation was made against.
5376 * @param addr An allocated element address to protect.
5377 * @param fp The caller frame pointer (for the backtrace).
5378 * @returns The new address for the element, or @c addr.
5379 */
5380 __attribute__((noinline))
5381 static vm_offset_t
pgz_protect(zone_t zone,vm_offset_t addr,void * fp)5382 pgz_protect(zone_t zone, vm_offset_t addr, void *fp)
5383 {
5384 kern_return_t kr;
5385 uint32_t slot;
5386 uint_t flags = 0;
5387
5388 if (!pgz_slot_alloc(&slot)) {
5389 return addr;
5390 }
5391
5392 /*
5393 * Try to double-map the page (may fail if Z_NOWAIT).
5394 * we will always find a PA because pgz_init() pre-expanded the pmap.
5395 */
5396 pmap_paddr_t pa = kvtophys(trunc_page(addr));
5397 vm_offset_t new_addr = pgz_addr(slot);
5398 kr = pmap_enter_options_addr(kernel_pmap, new_addr, pa,
5399 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, flags, TRUE,
5400 get_preemption_level() ? (PMAP_OPTIONS_NOWAIT | PMAP_OPTIONS_NOPREEMPT) : 0,
5401 NULL, PMAP_MAPPING_TYPE_INFER);
5402
5403 if (__improbable(kr != KERN_SUCCESS)) {
5404 pgz_slot_free(slot);
5405 return addr;
5406 }
5407
5408 struct zone_page_metadata tmp = {
5409 .zm_chunk_len = ZM_PGZ_ALLOCATED,
5410 .zm_index = zone_index(zone),
5411 };
5412 struct zone_page_metadata *meta = pgz_meta(slot);
5413
5414 os_atomic_store(&meta->zm_bits, tmp.zm_bits, relaxed);
5415 os_atomic_store(&meta->zm_pgz_orig_addr, addr, relaxed);
5416 pgz_backtrace(pgz_bt(slot, false), fp);
5417
5418 return new_addr + (addr & PAGE_MASK);
5419 }
5420
5421 /*!
5422 * @function pgz_unprotect()
5423 *
5424 * @brief
5425 * Release a PGZ slot and returns the original address of a freed element.
5426 *
5427 * @param addr A PGZ protected element address.
5428 * @param fp The caller frame pointer (for the backtrace).
5429 * @returns The non protected address for the element
5430 * that was passed to @c pgz_protect().
5431 */
5432 __attribute__((noinline))
5433 static vm_offset_t
pgz_unprotect(vm_offset_t addr,void * fp)5434 pgz_unprotect(vm_offset_t addr, void *fp)
5435 {
5436 struct zone_page_metadata *meta;
5437 struct zone_page_metadata tmp;
5438 uint32_t slot;
5439
5440 slot = pgz_slot(addr);
5441 meta = zone_meta_from_addr(addr);
5442 tmp = *meta;
5443 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5444 goto double_free;
5445 }
5446
5447 pmap_remove_options(kernel_pmap, vm_memtag_canonicalize_kernel(trunc_page(addr)),
5448 vm_memtag_canonicalize_kernel(trunc_page(addr) + PAGE_SIZE),
5449 PMAP_OPTIONS_REMOVE | PMAP_OPTIONS_NOPREEMPT);
5450
5451 pgz_backtrace(pgz_bt(slot, true), fp);
5452
5453 tmp.zm_chunk_len = ZM_PGZ_FREE;
5454 tmp.zm_bits = os_atomic_xchg(&meta->zm_bits, tmp.zm_bits, relaxed);
5455 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5456 goto double_free;
5457 }
5458
5459 pgz_slot_free(slot);
5460 return tmp.zm_pgz_orig_addr;
5461
5462 double_free:
5463 panic_fault_address = addr;
5464 meta->zm_chunk_len = ZM_PGZ_DOUBLE_FREE;
5465 panic("probabilistic gzalloc double free: %p", (void *)addr);
5466 }
5467
5468 bool
pgz_owned(mach_vm_address_t addr)5469 pgz_owned(mach_vm_address_t addr)
5470 {
5471 return mach_vm_range_contains(&zone_info.zi_pgz_range, vm_memtag_canonicalize_kernel(addr));
5472 }
5473
5474
5475 __attribute__((always_inline))
5476 vm_offset_t
__pgz_decode(mach_vm_address_t addr,mach_vm_size_t size)5477 __pgz_decode(mach_vm_address_t addr, mach_vm_size_t size)
5478 {
5479 struct zone_page_metadata *meta;
5480
5481 if (__probable(!pgz_owned(addr))) {
5482 return (vm_offset_t)addr;
5483 }
5484
5485 if (zone_addr_size_crosses_page(addr, size)) {
5486 panic("invalid size for PGZ protected address %p:%p",
5487 (void *)addr, (void *)(addr + size));
5488 }
5489
5490 meta = zone_meta_from_addr((vm_offset_t)addr);
5491 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5492 panic_fault_address = (vm_offset_t)addr;
5493 panic("probabilistic gzalloc use-after-free: %p", (void *)addr);
5494 }
5495
5496 return trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5497 }
5498
5499 __attribute__((always_inline))
5500 vm_offset_t
__pgz_decode_allow_invalid(vm_offset_t addr,zone_id_t zid)5501 __pgz_decode_allow_invalid(vm_offset_t addr, zone_id_t zid)
5502 {
5503 struct zone_page_metadata *meta;
5504 struct zone_page_metadata tmp;
5505
5506 if (__probable(!pgz_owned(addr))) {
5507 return addr;
5508 }
5509
5510 meta = zone_meta_from_addr(addr);
5511 tmp.zm_bits = os_atomic_load(&meta->zm_bits, relaxed);
5512
5513 addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5514
5515 if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5516 return 0;
5517 }
5518
5519 if (zid != ZONE_ID_ANY && tmp.zm_index != zid) {
5520 return 0;
5521 }
5522
5523 return addr;
5524 }
5525
5526 static void
pgz_zone_init(zone_t z)5527 pgz_zone_init(zone_t z)
5528 {
5529 char zn[MAX_ZONE_NAME];
5530 char zv[MAX_ZONE_NAME];
5531 char key[30];
5532
5533 if (zone_elem_inner_size(z) > PAGE_SIZE) {
5534 return;
5535 }
5536
5537 if (pgz_all) {
5538 os_atomic_inc(&pgz_uses, relaxed);
5539 z->z_pgz_tracked = true;
5540 return;
5541 }
5542
5543 snprintf(zn, sizeof(zn), "%s%s", zone_heap_name(z), zone_name(z));
5544
5545 for (int i = 1;; i++) {
5546 snprintf(key, sizeof(key), "pgz%d", i);
5547 if (!PE_parse_boot_argn(key, zv, sizeof(zv))) {
5548 break;
5549 }
5550 if (track_this_zone(zn, zv) || track_kalloc_zones(z, zv)) {
5551 os_atomic_inc(&pgz_uses, relaxed);
5552 z->z_pgz_tracked = true;
5553 break;
5554 }
5555 }
5556 }
5557
5558 __startup_func
5559 static vm_size_t
pgz_get_size(void)5560 pgz_get_size(void)
5561 {
5562 if (pgz_slots == UINT32_MAX) {
5563 /*
5564 * Scale with RAM size: ~200 slots a G
5565 */
5566 pgz_slots = (uint32_t)(sane_size >> 22);
5567 }
5568
5569 /*
5570 * Make sure that the slot allocation scheme works.
5571 * see pgz_slot_alloc() / pgz_slot_free();
5572 */
5573 if (pgz_slots < zpercpu_count() * 4) {
5574 pgz_slots = zpercpu_count() * 4;
5575 }
5576 if (pgz_slots >= UINT16_MAX) {
5577 pgz_slots = UINT16_MAX - 1;
5578 }
5579
5580 /*
5581 * Quarantine is 33% of slots by default, no more than 90%.
5582 */
5583 if (pgz_quarantine == 0) {
5584 pgz_quarantine = pgz_slots / 3;
5585 }
5586 if (pgz_quarantine > pgz_slots * 9 / 10) {
5587 pgz_quarantine = pgz_slots * 9 / 10;
5588 }
5589 pgz_slot_avail = pgz_slots - pgz_quarantine;
5590
5591 return ptoa(2 * pgz_slots + 1);
5592 }
5593
5594 __startup_func
5595 static void
pgz_init(void)5596 pgz_init(void)
5597 {
5598 if (!pgz_uses) {
5599 return;
5600 }
5601
5602 if (pgz_sample_rate == 0) {
5603 /*
5604 * If no rate was provided, pick a random one that scales
5605 * with the number of protected zones.
5606 *
5607 * Use a binomal distribution to avoid having too many
5608 * really fast sample rates.
5609 */
5610 uint32_t factor = MIN(pgz_uses, 10);
5611 uint32_t max_rate = 1000 * factor;
5612 uint32_t min_rate = 100 * factor;
5613
5614 pgz_sample_rate = (zalloc_random_uniform32(min_rate, max_rate) +
5615 zalloc_random_uniform32(min_rate, max_rate)) / 2;
5616 }
5617
5618 struct mach_vm_range *r = &zone_info.zi_pgz_range;
5619 zone_info.zi_pgz_meta = zone_meta_from_addr(r->min_address);
5620 zone_meta_populate(r->min_address, mach_vm_range_size(r));
5621
5622 for (uint32_t i = 0; i < 2 * pgz_slots + 1; i += 2) {
5623 pgz_meta_raw(i)->zm_chunk_len = ZM_PGZ_GUARD;
5624 }
5625
5626 for (uint32_t i = 1; i < pgz_slots; i++) {
5627 pgz_meta_raw(2 * i - 1)->zm_pgz_slot_next = pgz_meta_raw(2 * i + 1);
5628 }
5629 #if OS_ATOMIC_HAS_LLSC
5630 pgz_slot_head = pgz_meta_raw(1);
5631 #endif
5632 pgz_slot_tail = pgz_meta_raw(2 * pgz_slots - 1);
5633
5634 kernel_memory_allocate(kernel_map, (vm_offset_t *)&pgz_backtraces,
5635 /* size */ sizeof(struct pgz_backtrace) * 2 * pgz_slots,
5636 /* mask */ ZALIGN_PTR,
5637 KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO | KMA_NOSOFTLIMIT,
5638 VM_KERN_MEMORY_KALLOC);
5639
5640 /*
5641 * expand the pmap so that pmap_enter_options_addr()
5642 * in pgz_protect() never need to call pmap_expand().
5643 */
5644 for (uint32_t slot = 0; slot < pgz_slots; slot++) {
5645 (void)pmap_enter_options_addr(kernel_pmap, pgz_addr(slot), 0,
5646 VM_PROT_NONE, VM_PROT_NONE, 0, FALSE,
5647 PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
5648 }
5649
5650 /* do this last as this will enable pgz */
5651 percpu_foreach(counter, pgz_sample_counter) {
5652 *counter = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5653 }
5654 }
5655 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, pgz_init);
5656
5657 static void
panic_display_pgz_bt(bool has_syms,uint32_t slot,bool free)5658 panic_display_pgz_bt(bool has_syms, uint32_t slot, bool free)
5659 {
5660 struct pgz_backtrace *bt = pgz_bt(slot, free);
5661 const char *what = free ? "Free" : "Allocation";
5662 uintptr_t buf[MAX_ZTRACE_DEPTH];
5663
5664 if (!ml_validate_nofault((vm_offset_t)bt, sizeof(*bt))) {
5665 paniclog_append_noflush(" Can't decode %s Backtrace\n", what);
5666 return;
5667 }
5668
5669 backtrace_unpack(BTP_KERN_OFFSET_32, buf, MAX_ZTRACE_DEPTH,
5670 (uint8_t *)bt->pgz_bt, 4 * bt->pgz_depth);
5671
5672 paniclog_append_noflush(" %s Backtrace:\n", what);
5673 for (uint32_t i = 0; i < bt->pgz_depth && i < MAX_ZTRACE_DEPTH; i++) {
5674 if (has_syms) {
5675 paniclog_append_noflush(" %p ", (void *)buf[i]);
5676 panic_print_symbol_name(buf[i]);
5677 paniclog_append_noflush("\n");
5678 } else {
5679 paniclog_append_noflush(" %p\n", (void *)buf[i]);
5680 }
5681 }
5682 kmod_panic_dump((vm_offset_t *)buf, bt->pgz_depth);
5683 }
5684
5685 static void
panic_display_pgz_uaf_info(bool has_syms,vm_offset_t addr)5686 panic_display_pgz_uaf_info(bool has_syms, vm_offset_t addr)
5687 {
5688 struct zone_page_metadata *meta;
5689 vm_offset_t elem, esize;
5690 const char *type;
5691 const char *prob;
5692 uint32_t slot;
5693 zone_t z;
5694
5695 slot = pgz_slot(addr);
5696 meta = pgz_meta(slot);
5697 elem = pgz_addr(slot) + (meta->zm_pgz_orig_addr & PAGE_MASK);
5698
5699 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
5700
5701 if (ml_validate_nofault((vm_offset_t)meta, sizeof(*meta)) &&
5702 meta->zm_index &&
5703 meta->zm_index < os_atomic_load(&num_zones, relaxed)) {
5704 z = &zone_array[meta->zm_index];
5705 } else {
5706 paniclog_append_noflush(" Zone : <unknown>\n");
5707 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5708 paniclog_append_noflush("\n");
5709 return;
5710 }
5711
5712 esize = zone_elem_inner_size(z);
5713 paniclog_append_noflush(" Zone : %s%s\n",
5714 zone_heap_name(z), zone_name(z));
5715 paniclog_append_noflush(" Address : %p\n", (void *)addr);
5716 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
5717 (void *)elem, (void *)(elem + esize), (uint32_t)esize);
5718
5719 if (addr < elem) {
5720 type = "out-of-bounds(underflow) + use-after-free";
5721 prob = "low";
5722 } else if (meta->zm_chunk_len == ZM_PGZ_DOUBLE_FREE) {
5723 type = "double-free";
5724 prob = "high";
5725 } else if (addr < elem + esize) {
5726 type = "use-after-free";
5727 prob = "high";
5728 } else if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5729 type = "out-of-bounds + use-after-free";
5730 prob = "low";
5731 } else {
5732 type = "out-of-bounds";
5733 prob = "high";
5734 }
5735 paniclog_append_noflush(" Kind : %s (%s confidence)\n",
5736 type, prob);
5737 if (addr < elem) {
5738 paniclog_append_noflush(" Access : %d byte(s) before\n",
5739 (uint32_t)(elem - addr) + 1);
5740 } else if (addr < elem + esize) {
5741 paniclog_append_noflush(" Access : %d byte(s) inside\n",
5742 (uint32_t)(addr - elem) + 1);
5743 } else {
5744 paniclog_append_noflush(" Access : %d byte(s) past\n",
5745 (uint32_t)(addr - (elem + esize)) + 1);
5746 }
5747
5748 panic_display_pgz_bt(has_syms, slot, false);
5749 if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5750 panic_display_pgz_bt(has_syms, slot, true);
5751 }
5752
5753 paniclog_append_noflush("\n");
5754 }
5755
5756 vm_offset_t pgz_protect_for_testing_only(zone_t zone, vm_offset_t addr, void *fp);
5757 vm_offset_t
pgz_protect_for_testing_only(zone_t zone,vm_offset_t addr,void * fp)5758 pgz_protect_for_testing_only(zone_t zone, vm_offset_t addr, void *fp)
5759 {
5760 return pgz_protect(zone, addr, fp);
5761 }
5762
5763
5764 #endif /* CONFIG_PROB_GZALLOC */
5765 #endif /* !ZALLOC_TEST */
5766 #pragma mark zfree
5767 #if !ZALLOC_TEST
5768
5769 /*!
5770 * @defgroup zfree
5771 * @{
5772 *
5773 * @brief
5774 * The codepath for zone frees.
5775 *
5776 * @discussion
5777 * There are 4 major ways to allocate memory that end up in the zone allocator:
5778 * - @c zfree()
5779 * - @c zfree_percpu()
5780 * - @c kfree*()
5781 * - @c zfree_permanent()
5782 *
5783 * While permanent zones have their own allocation scheme, all other codepaths
5784 * will eventually go through the @c zfree_ext() choking point.
5785 */
5786
5787 __header_always_inline void
zfree_drop(zone_t zone,vm_offset_t addr)5788 zfree_drop(zone_t zone, vm_offset_t addr)
5789 {
5790 vm_offset_t esize = zone_elem_outer_size(zone);
5791 struct zone_page_metadata *meta;
5792 vm_offset_t eidx;
5793
5794 meta = zone_element_resolve(zone, addr, &eidx);
5795
5796 if (!zone_meta_mark_free(meta, eidx)) {
5797 zone_meta_double_free_panic(zone, addr, __func__);
5798 }
5799
5800 vm_offset_t old_size = meta->zm_alloc_size;
5801 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5802 vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
5803
5804 if (new_size == 0) {
5805 /* whether the page was on the intermediate or all_used, queue, move it to free */
5806 zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
5807 zone->z_wired_empty += meta->zm_chunk_len;
5808 } else if (old_size + esize > max_size) {
5809 /* first free element on page, move from all_used */
5810 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5811 }
5812
5813 if (__improbable(zone->z_exhausted_wait)) {
5814 zone_wakeup_exhausted_waiters(zone);
5815 }
5816 }
5817
5818 __attribute__((noinline))
5819 static void
zfree_item(zone_t zone,vm_offset_t addr)5820 zfree_item(zone_t zone, vm_offset_t addr)
5821 {
5822 /* transfer preemption count to lock */
5823 zone_lock_nopreempt_check_contention(zone);
5824
5825 zfree_drop(zone, addr);
5826 zone->z_elems_free += 1;
5827
5828 zone_unlock(zone);
5829 }
5830
5831 static void
zfree_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache)5832 zfree_cached_depot_recirculate(
5833 zone_t zone,
5834 uint32_t depot_max,
5835 zone_cache_t cache)
5836 {
5837 smr_t smr = zone_cache_smr(cache);
5838 smr_seq_t seq;
5839 uint32_t n;
5840
5841 zone_recirc_lock_nopreempt_check_contention(zone);
5842
5843 n = cache->zc_depot.zd_full;
5844 if (n >= depot_max) {
5845 /*
5846 * If SMR is in use, rotate the entire chunk of magazines.
5847 *
5848 * If the head of the recirculation layer is ready to be
5849 * reused, pull them back to refill a little.
5850 */
5851 seq = zone_depot_move_full(&zone->z_recirc,
5852 &cache->zc_depot, smr ? n : n - depot_max / 2, NULL);
5853
5854 if (smr) {
5855 smr_deferred_advance_commit(smr, seq);
5856 if (depot_max > 1 && zone_depot_poll(&zone->z_recirc, smr)) {
5857 zone_depot_move_full(&cache->zc_depot,
5858 &zone->z_recirc, depot_max / 2, NULL);
5859 }
5860 }
5861 }
5862
5863 n = depot_max - cache->zc_depot.zd_full;
5864 if (n > zone->z_recirc.zd_empty) {
5865 n = zone->z_recirc.zd_empty;
5866 }
5867 if (n) {
5868 zone_depot_move_empty(&cache->zc_depot, &zone->z_recirc,
5869 n, zone);
5870 }
5871
5872 zone_recirc_unlock_nopreempt(zone);
5873 }
5874
5875 static zone_cache_t
zfree_cached_recirculate(zone_t zone,zone_cache_t cache)5876 zfree_cached_recirculate(zone_t zone, zone_cache_t cache)
5877 {
5878 zone_magazine_t mag = NULL, tmp = NULL;
5879 smr_t smr = zone_cache_smr(cache);
5880 bool wakeup_exhausted = false;
5881
5882 if (zone->z_recirc.zd_empty == 0) {
5883 mag = zone_magazine_alloc(Z_NOWAIT);
5884 }
5885
5886 zone_recirc_lock_nopreempt_check_contention(zone);
5887
5888 if (mag == NULL && zone->z_recirc.zd_empty) {
5889 mag = zone_depot_pop_head_empty(&zone->z_recirc, zone);
5890 __builtin_assume(mag);
5891 }
5892 if (mag) {
5893 tmp = zone_magazine_replace(cache, mag, true);
5894 if (smr) {
5895 smr_deferred_advance_commit(smr, tmp->zm_seq);
5896 }
5897 if (zone_security_array[zone_index(zone)].z_lifo) {
5898 zone_depot_insert_head_full(&zone->z_recirc, tmp);
5899 } else {
5900 zone_depot_insert_tail_full(&zone->z_recirc, tmp);
5901 }
5902
5903 wakeup_exhausted = zone->z_exhausted_wait;
5904 }
5905
5906 zone_recirc_unlock_nopreempt(zone);
5907
5908 if (__improbable(wakeup_exhausted)) {
5909 zone_lock_nopreempt(zone);
5910 if (zone->z_exhausted_wait) {
5911 zone_wakeup_exhausted_waiters(zone);
5912 }
5913 zone_unlock_nopreempt(zone);
5914 }
5915
5916 return mag ? cache : NULL;
5917 }
5918
5919 __attribute__((noinline))
5920 static zone_cache_t
zfree_cached_trim(zone_t zone,zone_cache_t cache)5921 zfree_cached_trim(zone_t zone, zone_cache_t cache)
5922 {
5923 zone_magazine_t mag = NULL, tmp = NULL;
5924 uint32_t depot_max;
5925
5926 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
5927 if (depot_max) {
5928 zone_depot_lock_nopreempt(cache);
5929
5930 if (cache->zc_depot.zd_empty == 0) {
5931 zfree_cached_depot_recirculate(zone, depot_max, cache);
5932 }
5933
5934 if (__probable(cache->zc_depot.zd_empty)) {
5935 mag = zone_depot_pop_head_empty(&cache->zc_depot, NULL);
5936 __builtin_assume(mag);
5937 } else {
5938 mag = zone_magazine_alloc(Z_NOWAIT);
5939 }
5940 if (mag) {
5941 tmp = zone_magazine_replace(cache, mag, true);
5942 zone_depot_insert_tail_full(&cache->zc_depot, tmp);
5943 }
5944
5945 zone_depot_unlock_nopreempt(cache);
5946
5947 return mag ? cache : NULL;
5948 }
5949
5950 return zfree_cached_recirculate(zone, cache);
5951 }
5952
5953 __attribute__((always_inline))
5954 static inline zone_cache_t
zfree_cached_get_pcpu_cache(zone_t zone,int cpu)5955 zfree_cached_get_pcpu_cache(zone_t zone, int cpu)
5956 {
5957 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5958
5959 if (__probable(cache->zc_free_cur < zc_mag_size())) {
5960 return cache;
5961 }
5962
5963 if (__probable(cache->zc_alloc_cur < zc_mag_size())) {
5964 zone_cache_swap_magazines(cache);
5965 return cache;
5966 }
5967
5968 return zfree_cached_trim(zone, cache);
5969 }
5970
5971 __attribute__((always_inline))
5972 static inline zone_cache_t
zfree_cached_get_pcpu_cache_smr(zone_t zone,int cpu)5973 zfree_cached_get_pcpu_cache_smr(zone_t zone, int cpu)
5974 {
5975 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5976 size_t idx = cache->zc_free_cur;
5977
5978 if (__probable(idx + 1 < zc_mag_size())) {
5979 return cache;
5980 }
5981
5982 /*
5983 * when SMR is in use, the bucket is tagged early with
5984 * @c smr_deferred_advance(), which costs a full barrier,
5985 * but performs no store.
5986 *
5987 * When zones hit the recirculation layer, the advance is commited,
5988 * under the recirculation lock (see zfree_cached_recirculate()).
5989 *
5990 * When done this way, the zone contention detection mechanism
5991 * will adjust the size of the per-cpu depots gracefully, which
5992 * mechanically reduces the pace of these commits as usage increases.
5993 */
5994
5995 if (__probable(idx + 1 == zc_mag_size())) {
5996 zone_magazine_t mag;
5997
5998 mag = (zone_magazine_t)((uintptr_t)cache->zc_free_elems -
5999 offsetof(struct zone_magazine, zm_elems));
6000 mag->zm_seq = smr_deferred_advance(zone_cache_smr(cache));
6001 return cache;
6002 }
6003
6004 return zfree_cached_trim(zone, cache);
6005 }
6006
6007 __attribute__((always_inline))
6008 static inline vm_offset_t
__zcache_mark_invalid(zone_t zone,vm_offset_t elem,uint64_t combined_size)6009 __zcache_mark_invalid(zone_t zone, vm_offset_t elem, uint64_t combined_size)
6010 {
6011 struct zone_page_metadata *meta;
6012 vm_offset_t offs;
6013
6014 #pragma unused(combined_size)
6015 #if CONFIG_PROB_GZALLOC
6016 if (__improbable(pgz_owned(elem))) {
6017 elem = pgz_unprotect(elem, __builtin_frame_address(0));
6018 }
6019 #endif /* CONFIG_PROB_GZALLOC */
6020
6021 meta = zone_meta_from_addr(elem);
6022 if (!from_zone_map(elem, 1) || !zone_has_index(zone, meta->zm_index)) {
6023 zone_invalid_element_panic(zone, elem);
6024 }
6025
6026 offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
6027 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
6028 offs += ptoa(meta->zm_page_index);
6029 }
6030
6031 if (!Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
6032 zone_invalid_element_panic(zone, elem);
6033 }
6034
6035 #if VM_TAG_SIZECLASSES
6036 if (__improbable(zone->z_uses_tags)) {
6037 vm_tag_t *slot;
6038
6039 slot = zba_extra_ref_ptr(meta->zm_bitmap,
6040 Z_FAST_QUO(offs, zone->z_quo_magic));
6041 vm_tag_update_zone_size(*slot, zone->z_tags_sizeclass,
6042 -(long)ZFREE_ELEM_SIZE(combined_size));
6043 *slot = VM_KERN_MEMORY_NONE;
6044 }
6045 #endif /* VM_TAG_SIZECLASSES */
6046
6047 #if KASAN_CLASSIC
6048 kasan_free(elem, ZFREE_ELEM_SIZE(combined_size),
6049 ZFREE_USER_SIZE(combined_size), zone_elem_redzone(zone),
6050 zone->z_percpu, __builtin_frame_address(0));
6051 #endif
6052
6053 elem = (vm_offset_t)zone_tag_free_element(zone, (caddr_t)elem, ZFREE_ELEM_SIZE(combined_size));
6054 return elem;
6055 }
6056
6057 __attribute__((always_inline))
6058 void *
zcache_mark_invalid(zone_t zone,void * elem)6059 zcache_mark_invalid(zone_t zone, void *elem)
6060 {
6061 vm_size_t esize = zone_elem_inner_size(zone);
6062
6063 ZFREE_LOG(zone, (vm_offset_t)elem, 1);
6064 return (void *)__zcache_mark_invalid(zone, (vm_offset_t)elem, ZFREE_PACK_SIZE(esize, esize));
6065 }
6066
6067 /*
6068 * The function is noinline when zlog can be used so that the backtracing can
6069 * reliably skip the zfree_ext() and zfree_log()
6070 * boring frames.
6071 */
6072 #if ZALLOC_ENABLE_LOGGING
6073 __attribute__((noinline))
6074 #endif /* ZALLOC_ENABLE_LOGGING */
6075 void
zfree_ext(zone_t zone,zone_stats_t zstats,void * addr,uint64_t combined_size)6076 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr, uint64_t combined_size)
6077 {
6078 vm_offset_t esize = ZFREE_ELEM_SIZE(combined_size);
6079 vm_offset_t elem = (vm_offset_t)addr;
6080 int cpu;
6081
6082 DTRACE_VM2(zfree, zone_t, zone, void*, elem);
6083
6084 ZFREE_LOG(zone, elem, 1);
6085 elem = __zcache_mark_invalid(zone, elem, combined_size);
6086
6087 disable_preemption();
6088 cpu = cpu_number();
6089 zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += esize;
6090
6091 #if KASAN_CLASSIC
6092 if (zone->z_kasan_quarantine && startup_phase >= STARTUP_SUB_ZALLOC) {
6093 struct kasan_quarantine_result kqr;
6094
6095 kqr = kasan_quarantine(elem, esize);
6096 elem = kqr.addr;
6097 zone = kqr.zone;
6098 if (elem == 0) {
6099 return enable_preemption();
6100 }
6101 }
6102 #endif
6103
6104 if (zone->z_pcpu_cache) {
6105 zone_cache_t cache = zfree_cached_get_pcpu_cache(zone, cpu);
6106
6107 if (__probable(cache)) {
6108 cache->zc_free_elems[cache->zc_free_cur++] = elem;
6109 return enable_preemption();
6110 }
6111 }
6112
6113 return zfree_item(zone, elem);
6114 }
6115
6116 __attribute__((always_inline))
6117 static inline zstack_t
zcache_free_stack_to_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,vm_size_t esize,zone_cache_ops_t ops,bool zero)6118 zcache_free_stack_to_cpu(
6119 zone_id_t zid,
6120 zone_cache_t cache,
6121 zstack_t stack,
6122 vm_size_t esize,
6123 zone_cache_ops_t ops,
6124 bool zero)
6125 {
6126 size_t n = MIN(zc_mag_size() - cache->zc_free_cur, stack.z_count);
6127 vm_offset_t *p;
6128
6129 stack.z_count -= n;
6130 cache->zc_free_cur += n;
6131 p = cache->zc_free_elems + cache->zc_free_cur;
6132
6133 do {
6134 void *o = zstack_pop_no_delta(&stack);
6135
6136 if (ops) {
6137 o = ops->zc_op_mark_invalid(zid, o);
6138 } else {
6139 if (zero) {
6140 bzero(o, esize);
6141 }
6142 o = (void *)__zcache_mark_invalid(zone_by_id(zid),
6143 (vm_offset_t)o, ZFREE_PACK_SIZE(esize, esize));
6144 }
6145 *--p = (vm_offset_t)o;
6146 } while (--n > 0);
6147
6148 return stack;
6149 }
6150
6151 __attribute__((always_inline))
6152 static inline void
zcache_free_1_ext(zone_id_t zid,void * addr,zone_cache_ops_t ops)6153 zcache_free_1_ext(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6154 {
6155 vm_offset_t elem = (vm_offset_t)addr;
6156 zone_cache_t cache;
6157 vm_size_t esize;
6158 zone_t zone = zone_by_id(zid);
6159 int cpu;
6160
6161 ZFREE_LOG(zone, elem, 1);
6162
6163 disable_preemption();
6164 cpu = cpu_number();
6165 esize = zone_elem_inner_size(zone);
6166 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6167 if (!ops) {
6168 addr = (void *)__zcache_mark_invalid(zone, elem,
6169 ZFREE_PACK_SIZE(esize, esize));
6170 }
6171 cache = zfree_cached_get_pcpu_cache(zone, cpu);
6172 if (__probable(cache)) {
6173 if (ops) {
6174 addr = ops->zc_op_mark_invalid(zid, addr);
6175 }
6176 cache->zc_free_elems[cache->zc_free_cur++] = elem;
6177 enable_preemption();
6178 } else if (ops) {
6179 enable_preemption();
6180 os_atomic_dec(&zone_by_id(zid)->z_elems_avail, relaxed);
6181 ops->zc_op_free(zid, addr);
6182 } else {
6183 zfree_item(zone, elem);
6184 }
6185 }
6186
6187 __attribute__((always_inline))
6188 static inline void
zcache_free_n_ext(zone_id_t zid,zstack_t stack,zone_cache_ops_t ops,bool zero)6189 zcache_free_n_ext(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops, bool zero)
6190 {
6191 zone_t zone = zone_by_id(zid);
6192 zone_cache_t cache;
6193 vm_size_t esize;
6194 int cpu;
6195
6196 ZFREE_LOG(zone, stack.z_head, stack.z_count);
6197
6198 disable_preemption();
6199 cpu = cpu_number();
6200 esize = zone_elem_inner_size(zone);
6201 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed +=
6202 stack.z_count * esize;
6203
6204 for (;;) {
6205 cache = zfree_cached_get_pcpu_cache(zone, cpu);
6206 if (__probable(cache)) {
6207 stack = zcache_free_stack_to_cpu(zid, cache,
6208 stack, esize, ops, zero);
6209 enable_preemption();
6210 } else if (ops) {
6211 enable_preemption();
6212 os_atomic_dec(&zone->z_elems_avail, relaxed);
6213 ops->zc_op_free(zid, zstack_pop(&stack));
6214 } else {
6215 vm_offset_t addr = (vm_offset_t)zstack_pop(&stack);
6216
6217 if (zero) {
6218 bzero((void *)addr, esize);
6219 }
6220 addr = __zcache_mark_invalid(zone, addr,
6221 ZFREE_PACK_SIZE(esize, esize));
6222 zfree_item(zone, addr);
6223 }
6224
6225 if (stack.z_count == 0) {
6226 break;
6227 }
6228
6229 disable_preemption();
6230 cpu = cpu_number();
6231 }
6232 }
6233
6234 void
6235 (zcache_free)(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6236 {
6237 __builtin_assume(ops != NULL);
6238 zcache_free_1_ext(zid, addr, ops);
6239 }
6240
6241 void
6242 (zcache_free_n)(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops)
6243 {
6244 __builtin_assume(ops != NULL);
6245 zcache_free_n_ext(zid, stack, ops, false);
6246 }
6247
6248 void
6249 (zfree_n)(zone_id_t zid, zstack_t stack)
6250 {
6251 zcache_free_n_ext(zid, stack, NULL, true);
6252 }
6253
6254 void
6255 (zfree_nozero)(zone_id_t zid, void *addr)
6256 {
6257 zcache_free_1_ext(zid, addr, NULL);
6258 }
6259
6260 void
6261 (zfree_nozero_n)(zone_id_t zid, zstack_t stack)
6262 {
6263 zcache_free_n_ext(zid, stack, NULL, false);
6264 }
6265
6266 void
6267 (zfree)(zone_t zov, void *addr)
6268 {
6269 zone_t zone = zov->z_self;
6270 zone_stats_t zstats = zov->z_stats;
6271 vm_offset_t esize = zone_elem_inner_size(zone);
6272
6273 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6274 assert(!zone->z_percpu && !zone->z_permanent && !zone->z_smr);
6275 vm_memtag_bzero_fast_checked(addr, esize);
6276
6277 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6278 }
6279
6280 __attribute__((noinline))
6281 void
zfree_percpu(union zone_or_view zov,void * addr)6282 zfree_percpu(union zone_or_view zov, void *addr)
6283 {
6284 zone_t zone = zov.zov_view->zv_zone;
6285 zone_stats_t zstats = zov.zov_view->zv_stats;
6286 vm_offset_t esize = zone_elem_inner_size(zone);
6287
6288 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6289 assert(zone->z_percpu);
6290 zpercpu_foreach_cpu(i) {
6291 vm_memtag_bzero_fast_checked((char *)addr + ptoa(i), esize);
6292 }
6293 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6294 }
6295
6296 void
6297 (zfree_id)(zone_id_t zid, void *addr)
6298 {
6299 (zfree)(&zone_array[zid], addr);
6300 }
6301
6302 void
6303 (zfree_ro)(zone_id_t zid, void *addr)
6304 {
6305 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6306 zone_t zone = zone_by_id(zid);
6307 zone_stats_t zstats = zone->z_stats;
6308 vm_offset_t esize = zone_ro_size_params[zid].z_elem_size;
6309
6310 #if ZSECURITY_CONFIG(READ_ONLY)
6311 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6312 pmap_ro_zone_bzero(zid, (vm_offset_t)addr, 0, esize);
6313 #else
6314 (void)zid;
6315 bzero(addr, esize);
6316 #endif /* !KASAN_CLASSIC */
6317 zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6318 }
6319
6320 __attribute__((noinline))
6321 static void
zfree_item_smr(zone_t zone,vm_offset_t addr)6322 zfree_item_smr(zone_t zone, vm_offset_t addr)
6323 {
6324 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, 0);
6325 vm_size_t esize = zone_elem_inner_size(zone);
6326
6327 /*
6328 * This should be taken extremely rarely:
6329 * this happens if we failed allocating an empty bucket.
6330 */
6331 smr_synchronize(zone_cache_smr(cache));
6332
6333 cache->zc_free((void *)addr, esize);
6334 addr = __zcache_mark_invalid(zone, addr, ZFREE_PACK_SIZE(esize, esize));
6335
6336 zfree_item(zone, addr);
6337 }
6338
6339 void
6340 (zfree_smr)(zone_t zone, void *addr)
6341 {
6342 vm_offset_t elem = (vm_offset_t)addr;
6343 vm_offset_t esize;
6344 zone_cache_t cache;
6345 int cpu;
6346
6347 ZFREE_LOG(zone, elem, 1);
6348
6349 disable_preemption();
6350 cpu = cpu_number();
6351 #if MACH_ASSERT
6352 cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6353 assert(!smr_entered_cpu_noblock(cache->zc_smr, cpu));
6354 #endif
6355 esize = zone_elem_inner_size(zone);
6356 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6357 cache = zfree_cached_get_pcpu_cache_smr(zone, cpu);
6358 if (__probable(cache)) {
6359 cache->zc_free_elems[cache->zc_free_cur++] = elem;
6360 enable_preemption();
6361 } else {
6362 zfree_item_smr(zone, elem);
6363 }
6364 }
6365
6366 void
6367 (zfree_id_smr)(zone_id_t zid, void *addr)
6368 {
6369 (zfree_smr)(&zone_array[zid], addr);
6370 }
6371
6372 void
kfree_type_impl_internal(kalloc_type_view_t kt_view,void * ptr __unsafe_indexable)6373 kfree_type_impl_internal(
6374 kalloc_type_view_t kt_view,
6375 void *ptr __unsafe_indexable)
6376 {
6377 zone_t zsig = kt_view->kt_zsig;
6378 zone_t z = kt_view->kt_zv.zv_zone;
6379 struct zone_page_metadata *meta;
6380 zone_id_t zidx_meta;
6381 zone_security_flags_t zsflags_meta;
6382 zone_security_flags_t zsflags_z = zone_security_config(z);
6383 zone_security_flags_t zsflags_zsig;
6384
6385 if (NULL == ptr) {
6386 return;
6387 }
6388
6389 meta = zone_meta_from_addr((vm_offset_t) ptr);
6390 zidx_meta = meta->zm_index;
6391 zsflags_meta = zone_security_array[zidx_meta];
6392
6393 if (zone_is_data_kheap(zsflags_z.z_kheap_id) ||
6394 zone_has_index(z, zidx_meta)) {
6395 return (zfree)(&kt_view->kt_zv, ptr);
6396 }
6397 zsflags_zsig = zone_security_config(zsig);
6398 if (zsflags_meta.z_sig_eq == zsflags_zsig.z_sig_eq) {
6399 z = zone_array + zidx_meta;
6400 return (zfree)(z, ptr);
6401 }
6402
6403 return (zfree)(kt_view->kt_zearly, ptr);
6404 }
6405
6406 /*! @} */
6407 #endif /* !ZALLOC_TEST */
6408 #pragma mark zalloc
6409 #if !ZALLOC_TEST
6410
6411 /*!
6412 * @defgroup zalloc
6413 * @{
6414 *
6415 * @brief
6416 * The codepath for zone allocations.
6417 *
6418 * @discussion
6419 * There are 4 major ways to allocate memory that end up in the zone allocator:
6420 * - @c zalloc(), @c zalloc_flags(), ...
6421 * - @c zalloc_percpu()
6422 * - @c kalloc*()
6423 * - @c zalloc_permanent()
6424 *
6425 * While permanent zones have their own allocation scheme, all other codepaths
6426 * will eventually go through the @c zalloc_ext() choking point.
6427 *
6428 * @c zalloc_return() is the final function everyone tail calls into,
6429 * which prepares the element for consumption by the caller and deals with
6430 * common treatment (zone logging, tags, kasan, validation, ...).
6431 */
6432
6433 /*!
6434 * @function zalloc_import
6435 *
6436 * @brief
6437 * Import @c n elements in the specified array, opposite of @c zfree_drop().
6438 *
6439 * @param zone The zone to import elements from
6440 * @param elems The array to import into
6441 * @param n The number of elements to import. Must be non zero,
6442 * and smaller than @c zone->z_elems_free.
6443 */
6444 __header_always_inline vm_size_t
zalloc_import(zone_t zone,vm_offset_t * elems,zalloc_flags_t flags,uint32_t n)6445 zalloc_import(
6446 zone_t zone,
6447 vm_offset_t *elems,
6448 zalloc_flags_t flags,
6449 uint32_t n)
6450 {
6451 vm_offset_t esize = zone_elem_outer_size(zone);
6452 vm_offset_t offs = zone_elem_inner_offs(zone);
6453 zone_stats_t zs;
6454 int cpu = cpu_number();
6455 uint32_t i = 0;
6456
6457 zs = zpercpu_get_cpu(zone->z_stats, cpu);
6458
6459 if (__improbable(zone_caching_disabled < 0)) {
6460 /*
6461 * In the first 10s after boot, mess with
6462 * the scan position in order to make early
6463 * allocations patterns less predictable.
6464 */
6465 zone_early_scramble_rr(zone, cpu, zs);
6466 }
6467
6468 do {
6469 vm_offset_t page, eidx, size = 0;
6470 struct zone_page_metadata *meta;
6471
6472 if (!zone_pva_is_null(zone->z_pageq_partial)) {
6473 meta = zone_pva_to_meta(zone->z_pageq_partial);
6474 page = zone_pva_to_addr(zone->z_pageq_partial);
6475 } else if (!zone_pva_is_null(zone->z_pageq_empty)) {
6476 meta = zone_pva_to_meta(zone->z_pageq_empty);
6477 page = zone_pva_to_addr(zone->z_pageq_empty);
6478 zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
6479 } else {
6480 zone_accounting_panic(zone, "z_elems_free corruption");
6481 }
6482
6483 zone_meta_validate(zone, meta, page);
6484
6485 vm_offset_t old_size = meta->zm_alloc_size;
6486 vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
6487
6488 do {
6489 eidx = zone_meta_find_and_clear_bit(zone, zs, meta, flags);
6490 elems[i++] = page + offs + eidx * esize;
6491 size += esize;
6492 } while (i < n && old_size + size + esize <= max_size);
6493
6494 vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
6495
6496 if (new_size + esize > max_size) {
6497 zone_meta_requeue(zone, &zone->z_pageq_full, meta);
6498 } else if (old_size == 0) {
6499 /* remove from free, move to intermediate */
6500 zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
6501 }
6502 } while (i < n);
6503
6504 n = zone_counter_sub(zone, z_elems_free, n);
6505 if (zone->z_pcpu_cache == NULL && zone->z_elems_free_min > n) {
6506 zone->z_elems_free_min = n;
6507 }
6508
6509 return zone_elem_inner_size(zone);
6510 }
6511
6512 __attribute__((always_inline))
6513 static inline vm_offset_t
__zcache_mark_valid(zone_t zone,vm_offset_t addr,zalloc_flags_t flags)6514 __zcache_mark_valid(zone_t zone, vm_offset_t addr, zalloc_flags_t flags)
6515 {
6516 #pragma unused(zone, flags)
6517 #if KASAN_CLASSIC || CONFIG_PROB_GZALLOC || VM_TAG_SIZECLASSES
6518 vm_offset_t esize = zone_elem_inner_size(zone);
6519 #endif
6520
6521 addr = vm_memtag_load_tag(addr);
6522
6523 #if VM_TAG_SIZECLASSES
6524 if (__improbable(zone->z_uses_tags)) {
6525 struct zone_page_metadata *meta;
6526 vm_offset_t offs;
6527 vm_tag_t *slot;
6528 vm_tag_t tag;
6529
6530 tag = zalloc_flags_get_tag(flags);
6531 meta = zone_meta_from_addr(addr);
6532 offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
6533 if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
6534 offs += ptoa(meta->zm_page_index);
6535 }
6536
6537 slot = zba_extra_ref_ptr(meta->zm_bitmap,
6538 Z_FAST_QUO(offs, zone->z_quo_magic));
6539 *slot = tag;
6540
6541 vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
6542 (long)esize);
6543 }
6544 #endif /* VM_TAG_SIZECLASSES */
6545
6546 #if CONFIG_PROB_GZALLOC
6547 if (zone->z_pgz_tracked && pgz_sample(addr, esize)) {
6548 addr = pgz_protect(zone, addr, __builtin_frame_address(0));
6549 }
6550 #endif
6551
6552 #if KASAN_CLASSIC
6553 /*
6554 * KASAN_CLASSIC integration of kalloc heaps are handled by kalloc_ext()
6555 */
6556 if ((flags & Z_SKIP_KASAN) == 0) {
6557 kasan_alloc(addr, esize, esize, zone_elem_redzone(zone),
6558 (flags & Z_PCPU), __builtin_frame_address(0));
6559 }
6560 #endif /* KASAN_CLASSIC */
6561
6562 return addr;
6563 }
6564
6565 __attribute__((always_inline))
6566 void *
zcache_mark_valid(zone_t zone,void * addr)6567 zcache_mark_valid(zone_t zone, void *addr)
6568 {
6569 addr = (void *)__zcache_mark_valid(zone, (vm_offset_t)addr, 0);
6570 ZALLOC_LOG(zone, (vm_offset_t)addr, 1);
6571 return addr;
6572 }
6573
6574 /*!
6575 * @function zalloc_return
6576 *
6577 * @brief
6578 * Performs the tail-end of the work required on allocations before the caller
6579 * uses them.
6580 *
6581 * @discussion
6582 * This function is called without any zone lock held,
6583 * and preemption back to the state it had when @c zalloc_ext() was called.
6584 *
6585 * @param zone The zone we're allocating from.
6586 * @param addr The element we just allocated.
6587 * @param flags The flags passed to @c zalloc_ext() (for Z_ZERO).
6588 * @param elem_size The element size for this zone.
6589 */
6590 __attribute__((always_inline))
6591 static struct kalloc_result
zalloc_return(zone_t zone,vm_offset_t addr,zalloc_flags_t flags,vm_offset_t elem_size)6592 zalloc_return(
6593 zone_t zone,
6594 vm_offset_t addr,
6595 zalloc_flags_t flags,
6596 vm_offset_t elem_size)
6597 {
6598 addr = __zcache_mark_valid(zone, addr, flags);
6599 #if ZALLOC_ENABLE_ZERO_CHECK
6600 zalloc_validate_element(zone, addr, elem_size, flags);
6601 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
6602 ZALLOC_LOG(zone, addr, 1);
6603
6604 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6605 return (struct kalloc_result){ (void *)addr, elem_size };
6606 }
6607
6608 static vm_size_t
zalloc_get_shared_threshold(zone_t zone,vm_size_t esize)6609 zalloc_get_shared_threshold(zone_t zone, vm_size_t esize)
6610 {
6611 if (esize <= 512) {
6612 return zone_early_thres_mul * page_size / 4;
6613 } else if (esize < 2048) {
6614 return zone_early_thres_mul * esize * 8;
6615 }
6616 return zone_early_thres_mul * zone->z_chunk_elems * esize;
6617 }
6618
6619 __attribute__((noinline))
6620 static struct kalloc_result
zalloc_item(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6621 zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6622 {
6623 vm_offset_t esize, addr;
6624 zone_stats_t zs;
6625
6626 zone_lock_nopreempt_check_contention(zone);
6627
6628 zs = zpercpu_get(zstats);
6629 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv / 2)) {
6630 if ((flags & Z_NOWAIT) || zone->z_elems_free) {
6631 zone_expand_async_schedule_if_allowed(zone);
6632 } else {
6633 zone_expand_locked(zone, flags);
6634 }
6635 if (__improbable(zone->z_elems_free == 0)) {
6636 zs->zs_alloc_fail++;
6637 zone_unlock(zone);
6638 if (__improbable(flags & Z_NOFAIL)) {
6639 zone_nofail_panic(zone);
6640 }
6641 DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
6642 return (struct kalloc_result){ };
6643 }
6644 }
6645
6646 esize = zalloc_import(zone, &addr, flags, 1);
6647 zs->zs_mem_allocated += esize;
6648
6649 if (__improbable(!zone_share_always &&
6650 !os_atomic_load(&zs->zs_alloc_not_early, relaxed))) {
6651 if (flags & Z_SET_NOTEARLY) {
6652 vm_size_t shared_threshold = zalloc_get_shared_threshold(zone, esize);
6653
6654 if (zs->zs_mem_allocated >= shared_threshold) {
6655 zpercpu_foreach(zs_cpu, zstats) {
6656 os_atomic_store(&zs_cpu->zs_alloc_not_early, 1, relaxed);
6657 }
6658 }
6659 }
6660 }
6661 zone_unlock(zone);
6662
6663 return zalloc_return(zone, addr, flags, esize);
6664 }
6665
6666 static void
zalloc_cached_import(zone_t zone,zalloc_flags_t flags,zone_cache_t cache)6667 zalloc_cached_import(
6668 zone_t zone,
6669 zalloc_flags_t flags,
6670 zone_cache_t cache)
6671 {
6672 uint16_t n_elems = zc_mag_size();
6673
6674 zone_lock_nopreempt(zone);
6675
6676 if (__probable(!zone_caching_disabled &&
6677 zone->z_elems_free > zone->z_elems_rsv / 2)) {
6678 if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
6679 zone_expand_async_schedule_if_allowed(zone);
6680 }
6681 if (zone->z_elems_free < n_elems) {
6682 n_elems = (uint16_t)zone->z_elems_free;
6683 }
6684 zalloc_import(zone, cache->zc_alloc_elems, flags, n_elems);
6685 cache->zc_alloc_cur = n_elems;
6686 }
6687
6688 zone_unlock_nopreempt(zone);
6689 }
6690
6691 static void
zalloc_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache,smr_t smr)6692 zalloc_cached_depot_recirculate(
6693 zone_t zone,
6694 uint32_t depot_max,
6695 zone_cache_t cache,
6696 smr_t smr)
6697 {
6698 smr_seq_t seq;
6699 uint32_t n;
6700
6701 zone_recirc_lock_nopreempt_check_contention(zone);
6702
6703 n = cache->zc_depot.zd_empty;
6704 if (n >= depot_max) {
6705 zone_depot_move_empty(&zone->z_recirc, &cache->zc_depot,
6706 n - depot_max / 2, NULL);
6707 }
6708
6709 n = cache->zc_depot.zd_full;
6710 if (smr && n) {
6711 /*
6712 * if SMR is in use, it means smr_poll() failed,
6713 * so rotate the entire chunk of magazines in order
6714 * to let the sequence numbers age.
6715 */
6716 seq = zone_depot_move_full(&zone->z_recirc, &cache->zc_depot,
6717 n, NULL);
6718 smr_deferred_advance_commit(smr, seq);
6719 }
6720
6721 n = depot_max - cache->zc_depot.zd_empty;
6722 if (n > zone->z_recirc.zd_full) {
6723 n = zone->z_recirc.zd_full;
6724 }
6725
6726 if (n && zone_depot_poll(&zone->z_recirc, smr)) {
6727 zone_depot_move_full(&cache->zc_depot, &zone->z_recirc,
6728 n, zone);
6729 }
6730
6731 zone_recirc_unlock_nopreempt(zone);
6732 }
6733
6734 static void
zalloc_cached_reuse_smr(zone_t z,zone_cache_t cache,zone_magazine_t mag)6735 zalloc_cached_reuse_smr(zone_t z, zone_cache_t cache, zone_magazine_t mag)
6736 {
6737 zone_smr_free_cb_t zc_free = cache->zc_free;
6738 vm_size_t esize = zone_elem_inner_size(z);
6739
6740 for (uint16_t i = 0; i < zc_mag_size(); i++) {
6741 vm_offset_t elem = mag->zm_elems[i];
6742
6743 zc_free((void *)elem, zone_elem_inner_size(z));
6744 elem = __zcache_mark_invalid(z, elem,
6745 ZFREE_PACK_SIZE(esize, esize));
6746 mag->zm_elems[i] = elem;
6747 }
6748 }
6749
6750 static void
zalloc_cached_recirculate(zone_t zone,zone_cache_t cache)6751 zalloc_cached_recirculate(
6752 zone_t zone,
6753 zone_cache_t cache)
6754 {
6755 zone_magazine_t mag = NULL;
6756
6757 zone_recirc_lock_nopreempt_check_contention(zone);
6758
6759 if (zone_depot_poll(&zone->z_recirc, zone_cache_smr(cache))) {
6760 mag = zone_depot_pop_head_full(&zone->z_recirc, zone);
6761 if (zone_cache_smr(cache)) {
6762 zalloc_cached_reuse_smr(zone, cache, mag);
6763 }
6764 mag = zone_magazine_replace(cache, mag, false);
6765 zone_depot_insert_head_empty(&zone->z_recirc, mag);
6766 }
6767
6768 zone_recirc_unlock_nopreempt(zone);
6769 }
6770
6771 __attribute__((noinline))
6772 static zone_cache_t
zalloc_cached_prime(zone_t zone,zone_cache_ops_t ops,zalloc_flags_t flags,zone_cache_t cache)6773 zalloc_cached_prime(
6774 zone_t zone,
6775 zone_cache_ops_t ops,
6776 zalloc_flags_t flags,
6777 zone_cache_t cache)
6778 {
6779 zone_magazine_t mag = NULL;
6780 uint32_t depot_max;
6781 smr_t smr;
6782
6783 depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
6784 if (depot_max) {
6785 smr = zone_cache_smr(cache);
6786
6787 zone_depot_lock_nopreempt(cache);
6788
6789 if (!zone_depot_poll(&cache->zc_depot, smr)) {
6790 zalloc_cached_depot_recirculate(zone, depot_max, cache,
6791 smr);
6792 }
6793
6794 if (__probable(cache->zc_depot.zd_full)) {
6795 mag = zone_depot_pop_head_full(&cache->zc_depot, NULL);
6796 if (zone_cache_smr(cache)) {
6797 zalloc_cached_reuse_smr(zone, cache, mag);
6798 }
6799 mag = zone_magazine_replace(cache, mag, false);
6800 zone_depot_insert_head_empty(&cache->zc_depot, mag);
6801 }
6802
6803 zone_depot_unlock_nopreempt(cache);
6804 } else if (zone->z_recirc.zd_full) {
6805 zalloc_cached_recirculate(zone, cache);
6806 }
6807
6808 if (__probable(cache->zc_alloc_cur)) {
6809 return cache;
6810 }
6811
6812 if (ops == NULL) {
6813 zalloc_cached_import(zone, flags, cache);
6814 if (__probable(cache->zc_alloc_cur)) {
6815 return cache;
6816 }
6817 }
6818
6819 return NULL;
6820 }
6821
6822 __attribute__((always_inline))
6823 static inline zone_cache_t
zalloc_cached_get_pcpu_cache(zone_t zone,zone_cache_ops_t ops,int cpu,zalloc_flags_t flags)6824 zalloc_cached_get_pcpu_cache(
6825 zone_t zone,
6826 zone_cache_ops_t ops,
6827 int cpu,
6828 zalloc_flags_t flags)
6829 {
6830 zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6831
6832 if (__probable(cache->zc_alloc_cur != 0)) {
6833 return cache;
6834 }
6835
6836 if (__probable(cache->zc_free_cur != 0 && !cache->zc_smr)) {
6837 zone_cache_swap_magazines(cache);
6838 return cache;
6839 }
6840
6841 return zalloc_cached_prime(zone, ops, flags, cache);
6842 }
6843
6844
6845 /*!
6846 * @function zalloc_ext
6847 *
6848 * @brief
6849 * The core implementation of @c zalloc(), @c zalloc_flags(), @c zalloc_percpu().
6850 */
6851 struct kalloc_result
zalloc_ext(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6852 zalloc_ext(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6853 {
6854 /*
6855 * KASan uses zalloc() for fakestack, which can be called anywhere.
6856 * However, we make sure these calls can never block.
6857 */
6858 assertf(startup_phase < STARTUP_SUB_EARLY_BOOT ||
6859 #if KASAN_FAKESTACK
6860 zone->z_kasan_fakestacks ||
6861 #endif /* KASAN_FAKESTACK */
6862 ml_get_interrupts_enabled() ||
6863 ml_is_quiescing() ||
6864 debug_mode_active(),
6865 "Calling {k,z}alloc from interrupt disabled context isn't allowed");
6866
6867 /*
6868 * Make sure Z_NOFAIL was not obviously misused
6869 */
6870 if (flags & Z_NOFAIL) {
6871 assert((flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
6872 }
6873
6874 #if VM_TAG_SIZECLASSES
6875 if (__improbable(zone->z_uses_tags)) {
6876 vm_tag_t tag = zalloc_flags_get_tag(flags);
6877
6878 if (flags & Z_VM_TAG_BT_BIT) {
6879 tag = vm_tag_bt() ?: tag;
6880 }
6881 if (tag != VM_KERN_MEMORY_NONE) {
6882 tag = vm_tag_will_update_zone(tag,
6883 flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT));
6884 }
6885 if (tag == VM_KERN_MEMORY_NONE) {
6886 zone_security_flags_t zsflags = zone_security_config(zone);
6887
6888 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
6889 tag = VM_KERN_MEMORY_KALLOC_DATA;
6890 } else if (zsflags.z_kheap_id == KHEAP_ID_DATA_SHARED) {
6891 tag = VM_KERN_MEMORY_KALLOC_SHARED;
6892 } else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR ||
6893 zsflags.z_kalloc_type) {
6894 tag = VM_KERN_MEMORY_KALLOC_TYPE;
6895 } else {
6896 tag = VM_KERN_MEMORY_KALLOC;
6897 }
6898 }
6899 flags = Z_VM_TAG(flags & ~Z_VM_TAG_MASK, tag);
6900 }
6901 #endif /* VM_TAG_SIZECLASSES */
6902
6903 disable_preemption();
6904
6905 #if ZALLOC_ENABLE_ZERO_CHECK
6906 if (zalloc_skip_zero_check()) {
6907 flags |= Z_NOZZC;
6908 }
6909 #endif
6910
6911 if (zone->z_pcpu_cache) {
6912 zone_cache_t cache;
6913 vm_offset_t index, addr, esize;
6914 int cpu = cpu_number();
6915
6916 cache = zalloc_cached_get_pcpu_cache(zone, NULL, cpu, flags);
6917 if (__probable(cache)) {
6918 esize = zone_elem_inner_size(zone);
6919 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += esize;
6920 index = --cache->zc_alloc_cur;
6921 addr = cache->zc_alloc_elems[index];
6922 cache->zc_alloc_elems[index] = 0;
6923 enable_preemption();
6924 return zalloc_return(zone, addr, flags, esize);
6925 }
6926 }
6927
6928 __attribute__((musttail))
6929 return zalloc_item(zone, zstats, flags);
6930 }
6931
6932 __attribute__((always_inline))
6933 static inline zstack_t
zcache_alloc_stack_from_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,uint32_t n,zone_cache_ops_t ops)6934 zcache_alloc_stack_from_cpu(
6935 zone_id_t zid,
6936 zone_cache_t cache,
6937 zstack_t stack,
6938 uint32_t n,
6939 zone_cache_ops_t ops)
6940 {
6941 vm_offset_t *p;
6942
6943 n = MIN(n, cache->zc_alloc_cur);
6944 p = cache->zc_alloc_elems + cache->zc_alloc_cur;
6945 cache->zc_alloc_cur -= n;
6946 stack.z_count += n;
6947
6948 do {
6949 vm_offset_t e = *--p;
6950
6951 *p = 0;
6952 if (ops) {
6953 e = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)e);
6954 } else {
6955 e = __zcache_mark_valid(zone_by_id(zid), e, 0);
6956 }
6957 zstack_push_no_delta(&stack, (void *)e);
6958 } while (--n > 0);
6959
6960 return stack;
6961 }
6962
6963 __attribute__((noinline))
6964 static zstack_t
zcache_alloc_fail(zone_id_t zid,zstack_t stack,uint32_t count)6965 zcache_alloc_fail(zone_id_t zid, zstack_t stack, uint32_t count)
6966 {
6967 zone_t zone = zone_by_id(zid);
6968 zone_stats_t zstats = zone->z_stats;
6969 int cpu;
6970
6971 count -= stack.z_count;
6972
6973 disable_preemption();
6974 cpu = cpu_number();
6975 zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated -=
6976 count * zone_elem_inner_size(zone);
6977 zpercpu_get_cpu(zstats, cpu)->zs_alloc_fail += 1;
6978 enable_preemption();
6979
6980 return stack;
6981 }
6982
6983 #define ZCACHE_ALLOC_RETRY ((void *)-1)
6984
6985 __attribute__((noinline))
6986 static void *
zcache_alloc_one(zone_id_t zid,zalloc_flags_t flags,zone_cache_ops_t ops)6987 zcache_alloc_one(
6988 zone_id_t zid,
6989 zalloc_flags_t flags,
6990 zone_cache_ops_t ops)
6991 {
6992 zone_t zone = zone_by_id(zid);
6993 void *o;
6994
6995 /*
6996 * First try to allocate in rudimentary zones without ever going into
6997 * __ZONE_EXHAUSTED_AND_WAITING_HARD__() by clearing Z_NOFAIL.
6998 */
6999 enable_preemption();
7000 o = ops->zc_op_alloc(zid, flags & ~Z_NOFAIL);
7001 if (__probable(o)) {
7002 os_atomic_inc(&zone->z_elems_avail, relaxed);
7003 } else if (__probable(flags & Z_NOFAIL)) {
7004 zone_cache_t cache;
7005 vm_offset_t index;
7006 int cpu;
7007
7008 zone_lock(zone);
7009
7010 cpu = cpu_number();
7011 cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
7012 o = ZCACHE_ALLOC_RETRY;
7013 if (__probable(cache)) {
7014 index = --cache->zc_alloc_cur;
7015 o = (void *)cache->zc_alloc_elems[index];
7016 cache->zc_alloc_elems[index] = 0;
7017 o = ops->zc_op_mark_valid(zid, o);
7018 } else if (zone->z_elems_free == 0) {
7019 __ZONE_EXHAUSTED_AND_WAITING_HARD__(zone);
7020 }
7021
7022 zone_unlock(zone);
7023 }
7024
7025 return o;
7026 }
7027
7028 __attribute__((always_inline))
7029 static zstack_t
zcache_alloc_n_ext(zone_id_t zid,uint32_t count,zalloc_flags_t flags,zone_cache_ops_t ops)7030 zcache_alloc_n_ext(
7031 zone_id_t zid,
7032 uint32_t count,
7033 zalloc_flags_t flags,
7034 zone_cache_ops_t ops)
7035 {
7036 zstack_t stack = { };
7037 zone_cache_t cache;
7038 zone_t zone;
7039 int cpu;
7040
7041 disable_preemption();
7042 cpu = cpu_number();
7043 zone = zone_by_id(zid);
7044 zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_allocated +=
7045 count * zone_elem_inner_size(zone);
7046
7047 for (;;) {
7048 cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
7049 if (__probable(cache)) {
7050 stack = zcache_alloc_stack_from_cpu(zid, cache, stack,
7051 count - stack.z_count, ops);
7052 enable_preemption();
7053 } else {
7054 void *o;
7055
7056 if (ops) {
7057 o = zcache_alloc_one(zid, flags, ops);
7058 } else {
7059 o = zalloc_item(zone, zone->z_stats, flags).addr;
7060 }
7061 if (__improbable(o == NULL)) {
7062 return zcache_alloc_fail(zid, stack, count);
7063 }
7064 if (ops == NULL || o != ZCACHE_ALLOC_RETRY) {
7065 zstack_push(&stack, o);
7066 }
7067 }
7068
7069 if (stack.z_count == count) {
7070 break;
7071 }
7072
7073 disable_preemption();
7074 cpu = cpu_number();
7075 }
7076
7077 ZALLOC_LOG(zone, stack.z_head, stack.z_count);
7078
7079 return stack;
7080 }
7081
7082 zstack_t
zalloc_n(zone_id_t zid,uint32_t count,zalloc_flags_t flags)7083 zalloc_n(zone_id_t zid, uint32_t count, zalloc_flags_t flags)
7084 {
7085 return zcache_alloc_n_ext(zid, count, flags, NULL);
7086 }
7087
zstack_t(zcache_alloc_n)7088 zstack_t
7089 (zcache_alloc_n)(
7090 zone_id_t zid,
7091 uint32_t count,
7092 zalloc_flags_t flags,
7093 zone_cache_ops_t ops)
7094 {
7095 __builtin_assume(ops != NULL);
7096 return zcache_alloc_n_ext(zid, count, flags, ops);
7097 }
7098
7099 __attribute__((always_inline))
7100 void *
zalloc(zone_t zov)7101 zalloc(zone_t zov)
7102 {
7103 return zalloc_flags(zov, Z_WAITOK);
7104 }
7105
7106 __attribute__((always_inline))
7107 void *
zalloc_noblock(zone_t zov)7108 zalloc_noblock(zone_t zov)
7109 {
7110 return zalloc_flags(zov, Z_NOWAIT);
7111 }
7112
7113 void *
7114 (zalloc_flags)(zone_t zov, zalloc_flags_t flags)
7115 {
7116 zone_t zone = zov->z_self;
7117 zone_stats_t zstats = zov->z_stats;
7118
7119 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7120 assert(!zone->z_percpu && !zone->z_permanent);
7121 return zalloc_ext(zone, zstats, flags).addr;
7122 }
7123
7124 __attribute__((always_inline))
7125 void *
7126 (zalloc_id)(zone_id_t zid, zalloc_flags_t flags)
7127 {
7128 return (zalloc_flags)(zone_by_id(zid), flags);
7129 }
7130
7131 void *
7132 (zalloc_ro)(zone_id_t zid, zalloc_flags_t flags)
7133 {
7134 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7135 zone_t zone = zone_by_id(zid);
7136 zone_stats_t zstats = zone->z_stats;
7137 struct kalloc_result kr;
7138
7139 kr = zalloc_ext(zone, zstats, flags);
7140 #if ZSECURITY_CONFIG(READ_ONLY)
7141 assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
7142 if (kr.addr) {
7143 zone_require_ro(zid, kr.size, kr.addr);
7144 }
7145 #endif
7146 return kr.addr;
7147 }
7148
7149 #if ZSECURITY_CONFIG(READ_ONLY)
7150
7151 __attribute__((always_inline))
7152 static bool
from_current_stack(vm_offset_t addr,vm_size_t size)7153 from_current_stack(vm_offset_t addr, vm_size_t size)
7154 {
7155 vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
7156 vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
7157
7158 addr = vm_memtag_canonicalize_kernel(addr);
7159
7160 return (addr >= start) && (addr + size < end);
7161 }
7162
7163 /*
7164 * Check if an address is from const memory i.e TEXT or DATA CONST segements
7165 * or the SECURITY_READ_ONLY_LATE section.
7166 */
7167 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
7168 __attribute__((always_inline))
7169 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)7170 from_const_memory(const vm_offset_t addr, vm_size_t size)
7171 {
7172 return rorgn_contains(addr, size, true);
7173 }
7174 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
7175 __attribute__((always_inline))
7176 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)7177 from_const_memory(const vm_offset_t addr, vm_size_t size)
7178 {
7179 #pragma unused(addr, size)
7180 return true;
7181 }
7182 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
7183
7184 __abortlike
7185 static void
zalloc_ro_mut_validation_panic(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)7186 zalloc_ro_mut_validation_panic(zone_id_t zid, void *elem,
7187 const vm_offset_t src, vm_size_t src_size)
7188 {
7189 vm_offset_t stack_start = (vm_offset_t)__builtin_frame_address(0);
7190 vm_offset_t stack_end = (stack_start + kernel_stack_size - 1) & -kernel_stack_size;
7191 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
7192 extern vm_offset_t rorgn_begin;
7193 extern vm_offset_t rorgn_end;
7194 #else
7195 vm_offset_t const rorgn_begin = 0;
7196 vm_offset_t const rorgn_end = 0;
7197 #endif
7198
7199 if (from_ro_map(src, src_size)) {
7200 zone_t src_zone = &zone_array[zone_index_from_ptr((void *)src)];
7201 zone_t dst_zone = &zone_array[zid];
7202 panic("zalloc_ro_mut failed: source (%p) not from same zone as dst (%p)"
7203 " (expected: %s, actual: %s", (void *)src, elem, src_zone->z_name,
7204 dst_zone->z_name);
7205 }
7206
7207 panic("zalloc_ro_mut failed: source (%p, phys %p) not from RO zone map (%p - %p), "
7208 "current stack (%p - %p) or const memory (phys %p - %p)",
7209 (void *)src, (void*)kvtophys(src),
7210 (void *)zone_info.zi_ro_range.min_address,
7211 (void *)zone_info.zi_ro_range.max_address,
7212 (void *)stack_start, (void *)stack_end,
7213 (void *)rorgn_begin, (void *)rorgn_end);
7214 }
7215
7216 __attribute__((always_inline))
7217 static void
zalloc_ro_mut_validate_src(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)7218 zalloc_ro_mut_validate_src(zone_id_t zid, void *elem,
7219 const vm_offset_t src, vm_size_t src_size)
7220 {
7221 if (from_current_stack(src, src_size) ||
7222 (from_ro_map(src, src_size) &&
7223 zid == zone_index_from_ptr((void *)src)) ||
7224 from_const_memory(src, src_size)) {
7225 return;
7226 }
7227 zalloc_ro_mut_validation_panic(zid, elem, src, src_size);
7228 }
7229
7230 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7231
7232 __attribute__((noinline))
7233 void
zalloc_ro_mut(zone_id_t zid,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)7234 zalloc_ro_mut(zone_id_t zid, void *elem, vm_offset_t offset,
7235 const void *new_data, vm_size_t new_data_size)
7236 {
7237 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7238
7239 #if ZSECURITY_CONFIG(READ_ONLY)
7240 bool skip_src_check = false;
7241
7242 /*
7243 * The OSEntitlements RO-zone is a little differently treated. For more
7244 * information: rdar://100518485.
7245 */
7246 if (zid == ZONE_ID_AMFI_OSENTITLEMENTS) {
7247 code_signing_config_t cs_config = 0;
7248
7249 code_signing_configuration(NULL, &cs_config);
7250 if (cs_config & CS_CONFIG_CSM_ENABLED) {
7251 skip_src_check = true;
7252 }
7253 }
7254
7255 if (skip_src_check == false) {
7256 zalloc_ro_mut_validate_src(zid, elem, (vm_offset_t)new_data,
7257 new_data_size);
7258 }
7259 pmap_ro_zone_memcpy(zid, (vm_offset_t) elem, offset,
7260 (vm_offset_t) new_data, new_data_size);
7261 #else
7262 (void)zid;
7263 memcpy((void *)((uintptr_t)elem + offset), new_data, new_data_size);
7264 #endif
7265 }
7266
7267 __attribute__((noinline))
7268 uint64_t
zalloc_ro_mut_atomic(zone_id_t zid,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)7269 zalloc_ro_mut_atomic(zone_id_t zid, void *elem, vm_offset_t offset,
7270 zro_atomic_op_t op, uint64_t value)
7271 {
7272 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7273
7274 #if ZSECURITY_CONFIG(READ_ONLY)
7275 value = pmap_ro_zone_atomic_op(zid, (vm_offset_t)elem, offset, op, value);
7276 #else
7277 (void)zid;
7278 value = __zalloc_ro_mut_atomic((vm_offset_t)elem + offset, op, value);
7279 #endif
7280 return value;
7281 }
7282
7283 void
zalloc_ro_clear(zone_id_t zid,void * elem,vm_offset_t offset,vm_size_t size)7284 zalloc_ro_clear(zone_id_t zid, void *elem, vm_offset_t offset, vm_size_t size)
7285 {
7286 assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7287 #if ZSECURITY_CONFIG(READ_ONLY)
7288 pmap_ro_zone_bzero(zid, (vm_offset_t)elem, offset, size);
7289 #else
7290 (void)zid;
7291 bzero((void *)((uintptr_t)elem + offset), size);
7292 #endif
7293 }
7294
7295 /*
7296 * This function will run in the PPL and needs to be robust
7297 * against an attacker with arbitrary kernel write.
7298 */
7299
7300 #if ZSECURITY_CONFIG(READ_ONLY)
7301
7302 __abortlike
7303 static void
zone_id_require_ro_panic(zone_id_t zid,void * addr)7304 zone_id_require_ro_panic(zone_id_t zid, void *addr)
7305 {
7306 struct zone_size_params p = zone_ro_size_params[zid];
7307 vm_offset_t elem = (vm_offset_t)addr;
7308 uint32_t zindex;
7309 zone_t other;
7310 zone_t zone = &zone_array[zid];
7311
7312 if (!from_ro_map(addr, 1)) {
7313 panic("zone_require_ro failed: address not in a ro zone (addr: %p)", addr);
7314 }
7315
7316 if (!Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic)) {
7317 panic("zone_require_ro failed: element improperly aligned (addr: %p)", addr);
7318 }
7319
7320 zindex = zone_index_from_ptr(addr);
7321 other = &zone_array[zindex];
7322 if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
7323 panic("zone_require_ro failed: invalid zone index %d "
7324 "(addr: %p, expected: %s%s)", zindex,
7325 addr, zone_heap_name(zone), zone->z_name);
7326 } else {
7327 panic("zone_require_ro failed: address in unexpected zone id %d (%s%s) "
7328 "(addr: %p, expected: %s%s)",
7329 zindex, zone_heap_name(other), other->z_name,
7330 addr, zone_heap_name(zone), zone->z_name);
7331 }
7332 }
7333
7334 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7335
7336 __attribute__((always_inline))
7337 void
zone_require_ro(zone_id_t zid,vm_size_t elem_size __unused,void * addr)7338 zone_require_ro(zone_id_t zid, vm_size_t elem_size __unused, void *addr)
7339 {
7340 #if ZSECURITY_CONFIG(READ_ONLY)
7341 struct zone_size_params p = zone_ro_size_params[zid];
7342 vm_offset_t elem = (vm_offset_t)addr;
7343
7344 if (!from_ro_map(addr, 1) ||
7345 !Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic) ||
7346 zid != zone_meta_from_addr(elem)->zm_index) {
7347 zone_id_require_ro_panic(zid, addr);
7348 }
7349 #else
7350 #pragma unused(zid, addr)
7351 #endif
7352 }
7353
7354 void *
7355 (zalloc_percpu)(union zone_or_view zov, zalloc_flags_t flags)
7356 {
7357 zone_t zone = zov.zov_view->zv_zone;
7358 zone_stats_t zstats = zov.zov_view->zv_stats;
7359
7360 assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7361 assert(zone->z_percpu);
7362 flags |= Z_PCPU;
7363 return zalloc_ext(zone, zstats, flags).addr;
7364 }
7365
7366 static void *
_zalloc_permanent(zone_t zone,vm_size_t size,vm_offset_t mask)7367 _zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask)
7368 {
7369 struct zone_page_metadata *page_meta;
7370 vm_offset_t offs, addr;
7371 zone_pva_t pva;
7372
7373 assert(ml_get_interrupts_enabled() ||
7374 ml_is_quiescing() ||
7375 debug_mode_active() ||
7376 startup_phase < STARTUP_SUB_EARLY_BOOT);
7377
7378 size = (size + mask) & ~mask;
7379 assert(size <= PAGE_SIZE);
7380
7381 zone_lock(zone);
7382 assert(zone->z_self == zone);
7383
7384 for (;;) {
7385 pva = zone->z_pageq_partial;
7386 while (!zone_pva_is_null(pva)) {
7387 page_meta = zone_pva_to_meta(pva);
7388 if (page_meta->zm_bump + size <= PAGE_SIZE) {
7389 goto found;
7390 }
7391 pva = page_meta->zm_page_next;
7392 }
7393
7394 zone_expand_locked(zone, Z_WAITOK);
7395 }
7396
7397 found:
7398 offs = (uint16_t)((page_meta->zm_bump + mask) & ~mask);
7399 page_meta->zm_bump = (uint16_t)(offs + size);
7400 page_meta->zm_alloc_size += size;
7401 zone->z_elems_free -= size;
7402 zpercpu_get(zone->z_stats)->zs_mem_allocated += size;
7403
7404 if (page_meta->zm_alloc_size >= PAGE_SIZE - sizeof(vm_offset_t)) {
7405 zone_meta_requeue(zone, &zone->z_pageq_full, page_meta);
7406 }
7407
7408 zone_unlock(zone);
7409
7410 if (zone->z_tbi_tag) {
7411 addr = vm_memtag_load_tag(offs + zone_pva_to_addr(pva));
7412 } else {
7413 addr = offs + zone_pva_to_addr(pva);
7414 }
7415
7416 DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
7417 return (void *)addr;
7418 }
7419
7420 static void *
_zalloc_permanent_large(size_t size,vm_offset_t mask,vm_tag_t tag)7421 _zalloc_permanent_large(size_t size, vm_offset_t mask, vm_tag_t tag)
7422 {
7423 vm_offset_t addr;
7424
7425 kernel_memory_allocate(kernel_map, &addr, size, mask,
7426 KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, tag);
7427
7428 return (void *)addr;
7429 }
7430
7431 void *
zalloc_permanent_tag(vm_size_t size,vm_offset_t mask,vm_tag_t tag)7432 zalloc_permanent_tag(vm_size_t size, vm_offset_t mask, vm_tag_t tag)
7433 {
7434 if (size <= PAGE_SIZE) {
7435 zone_t zone = &zone_array[ZONE_ID_PERMANENT];
7436 return _zalloc_permanent(zone, size, mask);
7437 }
7438 return _zalloc_permanent_large(size, mask, tag);
7439 }
7440
7441 void *
zalloc_percpu_permanent(vm_size_t size,vm_offset_t mask)7442 zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask)
7443 {
7444 zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT];
7445 return _zalloc_permanent(zone, size, mask);
7446 }
7447
7448 /*! @} */
7449 #endif /* !ZALLOC_TEST */
7450 #pragma mark zone GC / trimming
7451 #if !ZALLOC_TEST
7452
7453 static thread_call_data_t zone_trim_callout;
7454 EVENT_DEFINE(ZONE_EXHAUSTED);
7455
7456 static void
zone_reclaim_chunk(zone_t z,struct zone_page_metadata * meta,uint32_t free_count)7457 zone_reclaim_chunk(
7458 zone_t z,
7459 struct zone_page_metadata *meta,
7460 uint32_t free_count)
7461 {
7462 vm_address_t page_addr;
7463 vm_size_t size_to_free;
7464 uint32_t bitmap_ref;
7465 uint32_t page_count;
7466 zone_security_flags_t zsflags = zone_security_config(z);
7467 bool sequester = !z->z_destroyed;
7468 bool oob_guard = false;
7469
7470 if (zone_submap_is_sequestered(zsflags)) {
7471 /*
7472 * If the entire map is sequestered, we can't return the VA.
7473 * It stays pinned to the zone forever.
7474 */
7475 sequester = true;
7476 }
7477
7478 zone_meta_queue_pop(z, &z->z_pageq_empty);
7479
7480 page_addr = zone_meta_to_addr(meta);
7481 page_count = meta->zm_chunk_len;
7482 oob_guard = meta->zm_guarded;
7483
7484 if (meta->zm_alloc_size) {
7485 zone_metadata_corruption(z, meta, "alloc_size");
7486 }
7487 if (z->z_percpu) {
7488 if (page_count != 1) {
7489 zone_metadata_corruption(z, meta, "page_count");
7490 }
7491 size_to_free = ptoa(z->z_chunk_pages);
7492 zone_remove_wired_pages(z, z->z_chunk_pages);
7493 } else {
7494 if (page_count > z->z_chunk_pages) {
7495 zone_metadata_corruption(z, meta, "page_count");
7496 }
7497 if (page_count < z->z_chunk_pages) {
7498 /* Dequeue non populated VA from z_pageq_va */
7499 zone_meta_remqueue(z, meta + page_count);
7500 }
7501 size_to_free = ptoa(page_count);
7502 zone_remove_wired_pages(z, page_count);
7503 }
7504
7505 zone_counter_sub(z, z_elems_free, free_count);
7506 zone_counter_sub(z, z_elems_avail, free_count);
7507 zone_counter_sub(z, z_wired_empty, page_count);
7508 zone_counter_sub(z, z_wired_cur, page_count);
7509
7510 if (z->z_pcpu_cache == NULL) {
7511 if (z->z_elems_free_min < free_count) {
7512 z->z_elems_free_min = 0;
7513 } else {
7514 z->z_elems_free_min -= free_count;
7515 }
7516 }
7517 if (z->z_elems_free_wma < free_count) {
7518 z->z_elems_free_wma = 0;
7519 } else {
7520 z->z_elems_free_wma -= free_count;
7521 }
7522
7523 bitmap_ref = 0;
7524 if (sequester) {
7525 if (meta->zm_inline_bitmap) {
7526 for (int i = 0; i < meta->zm_chunk_len; i++) {
7527 meta[i].zm_bitmap = 0;
7528 }
7529 } else {
7530 bitmap_ref = meta->zm_bitmap;
7531 meta->zm_bitmap = 0;
7532 }
7533 meta->zm_chunk_len = 0;
7534 } else {
7535 if (!meta->zm_inline_bitmap) {
7536 bitmap_ref = meta->zm_bitmap;
7537 }
7538 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
7539 bzero(meta, sizeof(*meta) * (z->z_chunk_pages + oob_guard));
7540 }
7541
7542 #if CONFIG_ZLEAKS
7543 if (__improbable(zleak_should_disable_for_zone(z) &&
7544 startup_phase >= STARTUP_SUB_THREAD_CALL)) {
7545 thread_call_enter(&zone_leaks_callout);
7546 }
7547 #endif /* CONFIG_ZLEAKS */
7548
7549 zone_unlock(z);
7550
7551 if (bitmap_ref) {
7552 zone_bits_free(bitmap_ref);
7553 }
7554
7555 /* Free the pages for metadata and account for them */
7556 #if KASAN_CLASSIC
7557 if (z->z_percpu) {
7558 for (uint32_t i = 0; i < z->z_chunk_pages; i++) {
7559 kasan_zmem_remove(page_addr + ptoa(i), PAGE_SIZE,
7560 zone_elem_outer_size(z),
7561 zone_elem_outer_offs(z),
7562 zone_elem_redzone(z));
7563 }
7564 } else {
7565 kasan_zmem_remove(page_addr, size_to_free,
7566 zone_elem_outer_size(z),
7567 zone_elem_outer_offs(z),
7568 zone_elem_redzone(z));
7569 }
7570 #endif /* KASAN_CLASSIC */
7571
7572 if (sequester) {
7573 kma_flags_t flags = zone_kma_flags(z, zsflags, 0) | KMA_KOBJECT;
7574 kernel_memory_depopulate(page_addr, size_to_free,
7575 flags, VM_KERN_MEMORY_ZONE);
7576 } else {
7577 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_VM);
7578 kmem_free(zone_submap(zsflags), page_addr,
7579 ptoa(z->z_chunk_pages + oob_guard));
7580 if (oob_guard) {
7581 os_atomic_dec(&zone_guard_pages, relaxed);
7582 }
7583 }
7584
7585 thread_yield_to_preemption();
7586
7587 zone_lock(z);
7588
7589 if (sequester) {
7590 zone_meta_queue_push(z, &z->z_pageq_va, meta);
7591 }
7592 }
7593
7594 static void
zone_reclaim_elements(zone_t z,uint16_t n,vm_offset_t * elems)7595 zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems)
7596 {
7597 z_debug_assert(n <= zc_mag_size());
7598
7599 for (uint16_t i = 0; i < n; i++) {
7600 vm_offset_t addr = elems[i];
7601 elems[i] = 0;
7602 zfree_drop(z, addr);
7603 }
7604
7605 z->z_elems_free += n;
7606 }
7607
7608 static void
zcache_reclaim_elements(zone_id_t zid,uint16_t n,vm_offset_t * elems)7609 zcache_reclaim_elements(zone_id_t zid, uint16_t n, vm_offset_t *elems)
7610 {
7611 z_debug_assert(n <= zc_mag_size());
7612 zone_cache_ops_t ops = zcache_ops[zid];
7613
7614 for (uint16_t i = 0; i < n; i++) {
7615 vm_offset_t addr = elems[i];
7616 elems[i] = 0;
7617 addr = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)addr);
7618 ops->zc_op_free(zid, (void *)addr);
7619 }
7620
7621 os_atomic_sub(&zone_by_id(zid)->z_elems_avail, n, relaxed);
7622 }
7623
7624 static void
zone_depot_trim(zone_t z,uint32_t target,struct zone_depot * zd)7625 zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd)
7626 {
7627 zpercpu_foreach(zc, z->z_pcpu_cache) {
7628 zone_depot_lock(zc);
7629
7630 if (zc->zc_depot.zd_full > (target + 1) / 2) {
7631 uint32_t n = zc->zc_depot.zd_full - (target + 1) / 2;
7632 zone_depot_move_full(zd, &zc->zc_depot, n, NULL);
7633 }
7634
7635 if (zc->zc_depot.zd_empty > target / 2) {
7636 uint32_t n = zc->zc_depot.zd_empty - target / 2;
7637 zone_depot_move_empty(zd, &zc->zc_depot, n, NULL);
7638 }
7639
7640 zone_depot_unlock(zc);
7641 }
7642 }
7643
7644 __enum_decl(zone_reclaim_mode_t, uint32_t, {
7645 ZONE_RECLAIM_TRIM,
7646 ZONE_RECLAIM_DRAIN,
7647 ZONE_RECLAIM_DESTROY,
7648 });
7649
7650 static void
zone_reclaim_pcpu(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)7651 zone_reclaim_pcpu(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
7652 {
7653 uint32_t depot_max = 0;
7654 bool cleanup = mode != ZONE_RECLAIM_TRIM;
7655
7656 if (z->z_depot_cleanup) {
7657 z->z_depot_cleanup = false;
7658 depot_max = z->z_depot_size;
7659 cleanup = true;
7660 }
7661
7662 if (cleanup) {
7663 zone_depot_trim(z, depot_max, zd);
7664 }
7665
7666 if (mode == ZONE_RECLAIM_DESTROY) {
7667 zpercpu_foreach(zc, z->z_pcpu_cache) {
7668 zone_reclaim_elements(z, zc->zc_alloc_cur,
7669 zc->zc_alloc_elems);
7670 zone_reclaim_elements(z, zc->zc_free_cur,
7671 zc->zc_free_elems);
7672 zc->zc_alloc_cur = zc->zc_free_cur = 0;
7673 }
7674
7675 z->z_recirc_empty_min = 0;
7676 z->z_recirc_empty_wma = 0;
7677 z->z_recirc_full_min = 0;
7678 z->z_recirc_full_wma = 0;
7679 z->z_recirc_cont_cur = 0;
7680 z->z_recirc_cont_wma = 0;
7681 }
7682 }
7683
7684 static void
zone_reclaim_recirc_drain(zone_t z,struct zone_depot * zd)7685 zone_reclaim_recirc_drain(zone_t z, struct zone_depot *zd)
7686 {
7687 assert(zd->zd_empty == 0);
7688 assert(zd->zd_full == 0);
7689
7690 zone_recirc_lock_nopreempt(z);
7691
7692 *zd = z->z_recirc;
7693 if (zd->zd_full == 0) {
7694 zd->zd_tail = &zd->zd_head;
7695 }
7696 zone_depot_init(&z->z_recirc);
7697 z->z_recirc_empty_min = 0;
7698 z->z_recirc_empty_wma = 0;
7699 z->z_recirc_full_min = 0;
7700 z->z_recirc_full_wma = 0;
7701
7702 zone_recirc_unlock_nopreempt(z);
7703 }
7704
7705 static void
zone_reclaim_recirc_trim(zone_t z,struct zone_depot * zd)7706 zone_reclaim_recirc_trim(zone_t z, struct zone_depot *zd)
7707 {
7708 for (;;) {
7709 uint32_t budget = zc_free_batch_size();
7710 uint32_t count;
7711 bool done = true;
7712
7713 zone_recirc_lock_nopreempt(z);
7714 count = MIN(z->z_recirc_empty_wma / Z_WMA_UNIT,
7715 z->z_recirc_empty_min);
7716 assert(count <= z->z_recirc.zd_empty);
7717
7718 if (count > budget) {
7719 count = budget;
7720 done = false;
7721 }
7722 if (count) {
7723 budget -= count;
7724 zone_depot_move_empty(zd, &z->z_recirc, count, NULL);
7725 z->z_recirc_empty_min -= count;
7726 z->z_recirc_empty_wma -= count * Z_WMA_UNIT;
7727 }
7728
7729 count = MIN(z->z_recirc_full_wma / Z_WMA_UNIT,
7730 z->z_recirc_full_min);
7731 assert(count <= z->z_recirc.zd_full);
7732
7733 if (count > budget) {
7734 count = budget;
7735 done = false;
7736 }
7737 if (count) {
7738 zone_depot_move_full(zd, &z->z_recirc, count, NULL);
7739 z->z_recirc_full_min -= count;
7740 z->z_recirc_full_wma -= count * Z_WMA_UNIT;
7741 }
7742
7743 zone_recirc_unlock_nopreempt(z);
7744
7745 if (done) {
7746 return;
7747 }
7748
7749 /*
7750 * If the number of magazines to reclaim is too large,
7751 * we might be keeping preemption disabled for too long.
7752 *
7753 * Drop and retake the lock to allow for preemption to occur.
7754 */
7755 zone_unlock(z);
7756 zone_lock(z);
7757 }
7758 }
7759
7760 /*!
7761 * @function zone_reclaim
7762 *
7763 * @brief
7764 * Drains or trim the zone.
7765 *
7766 * @discussion
7767 * Draining the zone will free it from all its elements.
7768 *
7769 * Trimming the zone tries to respect the working set size, and avoids draining
7770 * the depot when it's not necessary.
7771 *
7772 * @param z The zone to reclaim from
7773 * @param mode The purpose of this reclaim.
7774 */
7775 static void
zone_reclaim(zone_t z,zone_reclaim_mode_t mode)7776 zone_reclaim(zone_t z, zone_reclaim_mode_t mode)
7777 {
7778 struct zone_depot zd;
7779
7780 zone_depot_init(&zd);
7781
7782 zone_lock(z);
7783
7784 if (mode == ZONE_RECLAIM_DESTROY) {
7785 if (!z->z_destructible || z->z_elems_rsv) {
7786 panic("zdestroy: Zone %s%s isn't destructible",
7787 zone_heap_name(z), z->z_name);
7788 }
7789
7790 if (!z->z_self || z->z_expander ||
7791 z->z_async_refilling || z->z_expanding_wait) {
7792 panic("zdestroy: Zone %s%s in an invalid state for destruction",
7793 zone_heap_name(z), z->z_name);
7794 }
7795
7796 #if !KASAN_CLASSIC
7797 /*
7798 * Unset the valid bit. We'll hit an assert failure on further
7799 * operations on this zone, until zinit() is called again.
7800 *
7801 * Leave the zone valid for KASan as we will see zfree's on
7802 * quarantined free elements even after the zone is destroyed.
7803 */
7804 z->z_self = NULL;
7805 #endif
7806 z->z_destroyed = true;
7807 } else if (z->z_destroyed) {
7808 return zone_unlock(z);
7809 } else if (zone_count_free(z) <= z->z_elems_rsv) {
7810 /* If the zone is under its reserve level, leave it alone. */
7811 return zone_unlock(z);
7812 }
7813
7814 if (z->z_pcpu_cache) {
7815 zone_magazine_t mag;
7816 uint32_t freed = 0;
7817
7818 /*
7819 * This is all done with the zone lock held on purpose.
7820 * The work here is O(ncpu), which should still be short.
7821 *
7822 * We need to keep the lock held until we have reclaimed
7823 * at least a few magazines, otherwise if the zone has no
7824 * free elements outside of the depot, a thread performing
7825 * a concurrent allocatiuon could try to grow the zone
7826 * while we're trying to drain it.
7827 */
7828 if (mode == ZONE_RECLAIM_TRIM) {
7829 zone_reclaim_recirc_trim(z, &zd);
7830 } else {
7831 zone_reclaim_recirc_drain(z, &zd);
7832 }
7833 zone_reclaim_pcpu(z, mode, &zd);
7834
7835 if (z->z_chunk_elems) {
7836 zone_cache_t cache = zpercpu_get_cpu(z->z_pcpu_cache, 0);
7837 smr_t smr = zone_cache_smr(cache);
7838
7839 while (zd.zd_full) {
7840 mag = zone_depot_pop_head_full(&zd, NULL);
7841 if (smr) {
7842 smr_wait(smr, mag->zm_seq);
7843 zalloc_cached_reuse_smr(z, cache, mag);
7844 freed += zc_mag_size();
7845 }
7846 zone_reclaim_elements(z, zc_mag_size(),
7847 mag->zm_elems);
7848 zone_depot_insert_head_empty(&zd, mag);
7849
7850 freed += zc_mag_size();
7851 if (freed >= zc_free_batch_size()) {
7852 zone_unlock(z);
7853 zone_magazine_free_list(&zd);
7854 thread_yield_to_preemption();
7855 zone_lock(z);
7856 freed = 0;
7857 }
7858 }
7859 } else {
7860 zone_id_t zid = zone_index(z);
7861
7862 zone_unlock(z);
7863
7864 assert(zid <= ZONE_ID__FIRST_DYNAMIC && zcache_ops[zid]);
7865
7866 while (zd.zd_full) {
7867 mag = zone_depot_pop_head_full(&zd, NULL);
7868 zcache_reclaim_elements(zid, zc_mag_size(),
7869 mag->zm_elems);
7870 zone_magazine_free(mag);
7871 }
7872
7873 goto cleanup;
7874 }
7875 }
7876
7877 while (!zone_pva_is_null(z->z_pageq_empty)) {
7878 struct zone_page_metadata *meta;
7879 uint32_t count, limit = z->z_elems_rsv * 5 / 4;
7880
7881 if (mode == ZONE_RECLAIM_TRIM && z->z_pcpu_cache == NULL) {
7882 limit = MAX(limit, z->z_elems_free -
7883 MIN(z->z_elems_free_min, z->z_elems_free_wma / Z_WMA_UNIT));
7884 }
7885
7886 meta = zone_pva_to_meta(z->z_pageq_empty);
7887 count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_outer_size(z);
7888
7889 if (zone_count_free(z) - count < limit) {
7890 break;
7891 }
7892
7893 zone_reclaim_chunk(z, meta, count);
7894 }
7895
7896 zone_unlock(z);
7897
7898 cleanup:
7899 zone_magazine_free_list(&zd);
7900 }
7901
7902 void
zone_drain(zone_t zone)7903 zone_drain(zone_t zone)
7904 {
7905 current_thread()->options |= TH_OPT_ZONE_PRIV;
7906 lck_mtx_lock(&zone_gc_lock);
7907 zone_reclaim(zone, ZONE_RECLAIM_DRAIN);
7908 lck_mtx_unlock(&zone_gc_lock);
7909 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7910 }
7911
7912 void
zcache_drain(zone_id_t zid)7913 zcache_drain(zone_id_t zid)
7914 {
7915 zone_drain(zone_by_id(zid));
7916 }
7917
7918 static void
zone_reclaim_all(zone_reclaim_mode_t mode)7919 zone_reclaim_all(zone_reclaim_mode_t mode)
7920 {
7921 /*
7922 * Start with zcaches, so that they flow into the regular zones.
7923 *
7924 * Then the zones with VA sequester since depopulating
7925 * pages will not need to allocate vm map entries for holes,
7926 * which will give memory back to the system faster.
7927 */
7928 for (zone_id_t zid = ZONE_ID__LAST_RO + 1; zid < ZONE_ID__FIRST_DYNAMIC; zid++) {
7929 zone_t z = zone_by_id(zid);
7930
7931 if (z->z_self && z->z_chunk_elems == 0) {
7932 zone_reclaim(z, mode);
7933 }
7934 }
7935 zone_index_foreach(zid) {
7936 zone_t z = zone_by_id(zid);
7937
7938 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7939 continue;
7940 }
7941 if (zone_submap_is_sequestered(zone_security_array[zid]) &&
7942 z->collectable) {
7943 zone_reclaim(z, mode);
7944 }
7945 }
7946
7947 zone_index_foreach(zid) {
7948 zone_t z = zone_by_id(zid);
7949
7950 if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7951 continue;
7952 }
7953 if (!zone_submap_is_sequestered(zone_security_array[zid]) &&
7954 z->collectable) {
7955 zone_reclaim(z, mode);
7956 }
7957 }
7958
7959 zone_reclaim(zc_magazine_zone, mode);
7960 }
7961
7962 void
zone_userspace_reboot_checks(void)7963 zone_userspace_reboot_checks(void)
7964 {
7965 vm_size_t label_zone_size = zone_size_allocated(ipc_service_port_label_zone);
7966 if (label_zone_size != 0) {
7967 panic("Zone %s should be empty upon userspace reboot. Actual size: %lu.",
7968 ipc_service_port_label_zone->z_name, (unsigned long)label_zone_size);
7969 }
7970 }
7971
7972 void
zone_gc(zone_gc_level_t level)7973 zone_gc(zone_gc_level_t level)
7974 {
7975 zone_reclaim_mode_t mode;
7976 zone_t largest_zone = NULL;
7977
7978 switch (level) {
7979 case ZONE_GC_TRIM:
7980 mode = ZONE_RECLAIM_TRIM;
7981 break;
7982 case ZONE_GC_DRAIN:
7983 mode = ZONE_RECLAIM_DRAIN;
7984 break;
7985 case ZONE_GC_JETSAM:
7986 largest_zone = kill_process_in_largest_zone();
7987 mode = ZONE_RECLAIM_TRIM;
7988 break;
7989 }
7990
7991 current_thread()->options |= TH_OPT_ZONE_PRIV;
7992 lck_mtx_lock(&zone_gc_lock);
7993
7994 zone_reclaim_all(mode);
7995
7996 if (level == ZONE_GC_JETSAM && zone_map_nearing_exhaustion()) {
7997 /*
7998 * If we possibly killed a process, but we're still critical,
7999 * we need to drain harder.
8000 */
8001 zone_reclaim(largest_zone, ZONE_RECLAIM_DRAIN);
8002 zone_reclaim_all(ZONE_RECLAIM_DRAIN);
8003 }
8004
8005 lck_mtx_unlock(&zone_gc_lock);
8006 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
8007 }
8008
8009 void
zone_gc_trim(void)8010 zone_gc_trim(void)
8011 {
8012 zone_gc(ZONE_GC_TRIM);
8013 }
8014
8015 void
zone_gc_drain(void)8016 zone_gc_drain(void)
8017 {
8018 zone_gc(ZONE_GC_DRAIN);
8019 }
8020
8021 static bool
zone_trim_needed(zone_t z)8022 zone_trim_needed(zone_t z)
8023 {
8024 if (z->z_depot_cleanup) {
8025 return true;
8026 }
8027
8028 if (z->z_async_refilling) {
8029 /* Don't fight with refill */
8030 return false;
8031 }
8032
8033 if (z->z_pcpu_cache) {
8034 uint32_t e_n, f_n;
8035
8036 e_n = MIN(z->z_recirc_empty_wma, z->z_recirc_empty_min * Z_WMA_UNIT);
8037 f_n = MIN(z->z_recirc_full_wma, z->z_recirc_full_min * Z_WMA_UNIT);
8038
8039 if (e_n > zc_autotrim_buckets() * Z_WMA_UNIT) {
8040 return true;
8041 }
8042
8043 if (f_n * zc_mag_size() > z->z_elems_rsv * Z_WMA_UNIT &&
8044 f_n * zc_mag_size() * zone_elem_inner_size(z) >
8045 zc_autotrim_size() * Z_WMA_UNIT) {
8046 return true;
8047 }
8048
8049 return false;
8050 }
8051
8052 if (!zone_pva_is_null(z->z_pageq_empty)) {
8053 uint32_t n;
8054
8055 n = MIN(z->z_elems_free_wma / Z_WMA_UNIT, z->z_elems_free_min);
8056
8057 return n >= z->z_elems_rsv + z->z_chunk_elems;
8058 }
8059
8060 return false;
8061 }
8062
8063 static void
zone_trim_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)8064 zone_trim_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
8065 {
8066 current_thread()->options |= TH_OPT_ZONE_PRIV;
8067
8068 zone_foreach(z) {
8069 if (!z->collectable || z == zc_magazine_zone) {
8070 continue;
8071 }
8072
8073 if (zone_trim_needed(z)) {
8074 lck_mtx_lock(&zone_gc_lock);
8075 zone_reclaim(z, ZONE_RECLAIM_TRIM);
8076 lck_mtx_unlock(&zone_gc_lock);
8077 }
8078 }
8079
8080 if (zone_trim_needed(zc_magazine_zone)) {
8081 lck_mtx_lock(&zone_gc_lock);
8082 zone_reclaim(zc_magazine_zone, ZONE_RECLAIM_TRIM);
8083 lck_mtx_unlock(&zone_gc_lock);
8084 }
8085
8086 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
8087 }
8088
8089 void
compute_zone_working_set_size(__unused void * param)8090 compute_zone_working_set_size(__unused void *param)
8091 {
8092 uint32_t zc_auto = zc_enable_level();
8093 bool needs_trim = false;
8094
8095 /*
8096 * Keep zone caching disabled until the first proc is made.
8097 */
8098 if (__improbable(zone_caching_disabled < 0)) {
8099 return;
8100 }
8101
8102 zone_caching_disabled = vm_pool_low();
8103
8104 if (os_mul_overflow(zc_auto, Z_WMA_UNIT, &zc_auto)) {
8105 zc_auto = 0;
8106 }
8107
8108 zone_foreach(z) {
8109 uint32_t old, wma, cur;
8110 bool needs_caching = false;
8111
8112 if (z->z_self != z) {
8113 continue;
8114 }
8115
8116 zone_lock(z);
8117
8118 zone_recirc_lock_nopreempt(z);
8119
8120 if (z->z_pcpu_cache) {
8121 wma = Z_WMA_MIX(z->z_recirc_empty_wma, z->z_recirc_empty_min);
8122 z->z_recirc_empty_min = z->z_recirc.zd_empty;
8123 z->z_recirc_empty_wma = wma;
8124 } else {
8125 wma = Z_WMA_MIX(z->z_elems_free_wma, z->z_elems_free_min);
8126 z->z_elems_free_min = z->z_elems_free;
8127 z->z_elems_free_wma = wma;
8128 }
8129
8130 wma = Z_WMA_MIX(z->z_recirc_full_wma, z->z_recirc_full_min);
8131 z->z_recirc_full_min = z->z_recirc.zd_full;
8132 z->z_recirc_full_wma = wma;
8133
8134 /* fixed point decimal of contentions per second */
8135 old = z->z_recirc_cont_wma;
8136 cur = z->z_recirc_cont_cur * Z_WMA_UNIT /
8137 (zpercpu_count() * ZONE_WSS_UPDATE_PERIOD);
8138 cur = (3 * old + cur) / 4;
8139 zone_recirc_unlock_nopreempt(z);
8140
8141 if (z->z_pcpu_cache) {
8142 uint16_t size = z->z_depot_size;
8143
8144 if (zone_exhausted(z)) {
8145 if (z->z_depot_size) {
8146 z->z_depot_size = 0;
8147 z->z_depot_cleanup = true;
8148 }
8149 } else if (size < z->z_depot_limit && cur > zc_grow_level()) {
8150 /*
8151 * lose history on purpose now
8152 * that we just grew, to give
8153 * the sytem time to adjust.
8154 */
8155 cur = (zc_grow_level() + zc_shrink_level()) / 2;
8156 size = size ? (3 * size + 2) / 2 : 2;
8157 z->z_depot_size = MIN(z->z_depot_limit, size);
8158 } else if (size > 0 && cur <= zc_shrink_level()) {
8159 /*
8160 * lose history on purpose now
8161 * that we just shrunk, to give
8162 * the sytem time to adjust.
8163 */
8164 cur = (zc_grow_level() + zc_shrink_level()) / 2;
8165 z->z_depot_size = size - 1;
8166 z->z_depot_cleanup = true;
8167 }
8168 } else if (!z->z_nocaching && !zone_exhaustible(z) && zc_auto &&
8169 old >= zc_auto && cur >= zc_auto) {
8170 needs_caching = true;
8171 }
8172
8173 z->z_recirc_cont_wma = cur;
8174 z->z_recirc_cont_cur = 0;
8175
8176 if (!needs_trim && zone_trim_needed(z)) {
8177 needs_trim = true;
8178 }
8179
8180 zone_unlock(z);
8181
8182 if (needs_caching) {
8183 zone_enable_caching(z);
8184 }
8185 }
8186
8187 if (needs_trim) {
8188 thread_call_enter(&zone_trim_callout);
8189 }
8190 }
8191
8192 #endif /* !ZALLOC_TEST */
8193 #pragma mark vm integration, MIG routines
8194 #if !ZALLOC_TEST
8195
8196 extern unsigned int stack_total;
8197 #if defined (__x86_64__)
8198 extern unsigned int inuse_ptepages_count;
8199 #endif
8200
8201 static const char *
panic_print_get_typename(kalloc_type_views_t cur,kalloc_type_views_t * next,bool is_kt_var)8202 panic_print_get_typename(kalloc_type_views_t cur, kalloc_type_views_t *next,
8203 bool is_kt_var)
8204 {
8205 if (is_kt_var) {
8206 next->ktv_var = (kalloc_type_var_view_t) cur.ktv_var->kt_next;
8207 return cur.ktv_var->kt_name;
8208 } else {
8209 next->ktv_fixed = (kalloc_type_view_t) cur.ktv_fixed->kt_zv.zv_next;
8210 return cur.ktv_fixed->kt_zv.zv_name;
8211 }
8212 }
8213
8214 static void
panic_print_types_in_zone(zone_t z,const char * debug_str)8215 panic_print_types_in_zone(zone_t z, const char* debug_str)
8216 {
8217 kalloc_type_views_t kt_cur = {};
8218 const char *prev_type = "";
8219 size_t skip_over_site = sizeof("site.") - 1;
8220 zone_security_flags_t zsflags = zone_security_config(z);
8221 bool is_kt_var = false;
8222
8223 if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8224 uint32_t heap_id = KT_VAR_PTR_HEAP0 + ((zone_index(z) -
8225 kalloc_type_heap_array[KT_VAR_PTR_HEAP0].kh_zstart) / KHEAP_NUM_ZONES);
8226 kt_cur.ktv_var = kalloc_type_heap_array[heap_id].kt_views;
8227 is_kt_var = true;
8228 } else {
8229 kt_cur.ktv_fixed = (kalloc_type_view_t) z->z_views;
8230 }
8231
8232 paniclog_append_noflush("kalloc %s in zone, %s (%s):\n",
8233 is_kt_var? "type arrays" : "types", debug_str, z->z_name);
8234
8235 while (kt_cur.ktv_fixed) {
8236 kalloc_type_views_t kt_next = {};
8237 const char *typename = panic_print_get_typename(kt_cur, &kt_next,
8238 is_kt_var) + skip_over_site;
8239 if (strcmp(typename, prev_type) != 0) {
8240 paniclog_append_noflush("\t%-50s\n", typename);
8241 prev_type = typename;
8242 }
8243 kt_cur = kt_next;
8244 }
8245 paniclog_append_noflush("\n");
8246 }
8247
8248 static void
panic_display_kalloc_types(void)8249 panic_display_kalloc_types(void)
8250 {
8251 if (kalloc_type_src_zone) {
8252 panic_print_types_in_zone(kalloc_type_src_zone, "addr belongs to");
8253 }
8254 if (kalloc_type_dst_zone) {
8255 panic_print_types_in_zone(kalloc_type_dst_zone,
8256 "addr is being freed to");
8257 }
8258 }
8259
8260 static void
zone_find_n_largest(const uint32_t n,zone_t * largest_zones,uint64_t * zone_size)8261 zone_find_n_largest(const uint32_t n, zone_t *largest_zones,
8262 uint64_t *zone_size)
8263 {
8264 zone_index_foreach(zid) {
8265 zone_t z = &zone_array[zid];
8266 vm_offset_t size = zone_size_wired(z);
8267
8268 if (zid == ZONE_ID_VM_PAGES) {
8269 continue;
8270 }
8271 for (uint32_t i = 0; i < n; i++) {
8272 if (size > zone_size[i]) {
8273 largest_zones[i] = z;
8274 zone_size[i] = size;
8275 break;
8276 }
8277 }
8278 }
8279 }
8280
8281 #define NUM_LARGEST_ZONES 5
8282 static void
panic_display_largest_zones(void)8283 panic_display_largest_zones(void)
8284 {
8285 zone_t largest_zones[NUM_LARGEST_ZONES] = { NULL };
8286 uint64_t largest_size[NUM_LARGEST_ZONES] = { 0 };
8287
8288 zone_find_n_largest(NUM_LARGEST_ZONES, (zone_t *) &largest_zones,
8289 (uint64_t *) &largest_size);
8290
8291 paniclog_append_noflush("Largest zones:\n%-28s %10s %10s\n",
8292 "Zone Name", "Cur Size", "Free Size");
8293 for (uint32_t i = 0; i < NUM_LARGEST_ZONES; i++) {
8294 zone_t z = largest_zones[i];
8295 paniclog_append_noflush("%-8s%-20s %9u%c %9u%c\n",
8296 zone_heap_name(z), z->z_name,
8297 mach_vm_size_pretty(largest_size[i]),
8298 mach_vm_size_unit(largest_size[i]),
8299 mach_vm_size_pretty(zone_size_free(z)),
8300 mach_vm_size_unit(zone_size_free(z)));
8301 }
8302 }
8303
8304 static void
panic_display_zprint(void)8305 panic_display_zprint(void)
8306 {
8307 panic_display_largest_zones();
8308 paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
8309 (uintptr_t)(kernel_stack_size * stack_total));
8310 #if defined (__x86_64__)
8311 paniclog_append_noflush("%-20s %10lu\n", "PageTables",
8312 (uintptr_t)ptoa(inuse_ptepages_count));
8313 #endif
8314 paniclog_append_noflush("%-20s %10llu\n", "Kalloc.Large",
8315 counter_load(&kalloc_large_total));
8316
8317 if (panic_kext_memory_info) {
8318 mach_memory_info_t *mem_info = panic_kext_memory_info;
8319
8320 paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
8321 for (uint32_t i = 0; i < panic_kext_memory_size / sizeof(mem_info[0]); i++) {
8322 if ((mem_info[i].flags & VM_KERN_SITE_TYPE) != VM_KERN_SITE_KMOD) {
8323 continue;
8324 }
8325 if (mem_info[i].size > (1024 * 1024)) {
8326 paniclog_append_noflush("%-5lld %10lld\n",
8327 mem_info[i].site, mem_info[i].size);
8328 }
8329 }
8330 }
8331 }
8332
8333 static void
panic_display_zone_info(void)8334 panic_display_zone_info(void)
8335 {
8336 paniclog_append_noflush("Zone info:\n");
8337 paniclog_append_noflush(" Zone map: %p - %p\n",
8338 (void *)zone_info.zi_map_range.min_address,
8339 (void *)zone_info.zi_map_range.max_address);
8340 #if CONFIG_PROB_GZALLOC
8341 if (pgz_submap) {
8342 paniclog_append_noflush(" . PGZ : %p - %p\n",
8343 (void *)pgz_submap->min_offset,
8344 (void *)pgz_submap->max_offset);
8345 }
8346 #endif /* CONFIG_PROB_GZALLOC */
8347 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8348 vm_map_t map = zone_submaps[i];
8349
8350 if (map == VM_MAP_NULL) {
8351 continue;
8352 }
8353 paniclog_append_noflush(" . %-6s: %p - %p\n",
8354 zone_submaps_names[i],
8355 (void *)map->min_offset,
8356 (void *)map->max_offset);
8357 }
8358 paniclog_append_noflush(" Metadata: %p - %p\n"
8359 " Bitmaps : %p - %p\n"
8360 " Extra : %p - %p\n"
8361 "\n",
8362 (void *)zone_info.zi_meta_range.min_address,
8363 (void *)zone_info.zi_meta_range.max_address,
8364 (void *)zone_info.zi_bits_range.min_address,
8365 (void *)zone_info.zi_bits_range.max_address,
8366 (void *)zone_info.zi_xtra_range.min_address,
8367 (void *)zone_info.zi_xtra_range.max_address);
8368 }
8369
8370 static void
panic_display_zone_fault(vm_offset_t addr)8371 panic_display_zone_fault(vm_offset_t addr)
8372 {
8373 struct zone_page_metadata meta = { };
8374 vm_map_t map = VM_MAP_NULL;
8375 vm_offset_t oob_offs = 0, size = 0;
8376 int map_idx = -1;
8377 zone_t z = NULL;
8378 const char *kind = "whild deref";
8379 bool oob = false;
8380
8381 /*
8382 * First: look if we bumped into guard pages between submaps
8383 */
8384 for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8385 map = zone_submaps[i];
8386 if (map == VM_MAP_NULL) {
8387 continue;
8388 }
8389
8390 if (addr >= map->min_offset && addr < map->max_offset) {
8391 map_idx = i;
8392 break;
8393 }
8394 }
8395
8396 if (map_idx == -1) {
8397 /* this really shouldn't happen, submaps are back to back */
8398 return;
8399 }
8400
8401 paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
8402
8403 /*
8404 * Second: look if there's just no metadata at all
8405 */
8406 if (ml_nofault_copy((vm_offset_t)zone_meta_from_addr(addr),
8407 (vm_offset_t)&meta, sizeof(meta)) != sizeof(meta) ||
8408 meta.zm_index == 0 || meta.zm_index >= MAX_ZONES ||
8409 zone_array[meta.zm_index].z_self == NULL) {
8410 paniclog_append_noflush(" Zone : <unknown>\n");
8411 kind = "wild deref, missing or invalid metadata";
8412 } else {
8413 z = &zone_array[meta.zm_index];
8414 paniclog_append_noflush(" Zone : %s%s\n",
8415 zone_heap_name(z), zone_name(z));
8416 if (meta.zm_chunk_len == ZM_PGZ_GUARD) {
8417 kind = "out-of-bounds (high confidence)";
8418 oob = true;
8419 size = zone_element_size((void *)addr,
8420 &z, false, &oob_offs);
8421 } else {
8422 kind = "use-after-free (medium confidence)";
8423 }
8424 }
8425
8426 paniclog_append_noflush(" Address : %p\n", (void *)addr);
8427 if (oob) {
8428 paniclog_append_noflush(" Element : [%p, %p) of size %d\n",
8429 (void *)(trunc_page(addr) - (size - oob_offs)),
8430 (void *)trunc_page(addr), (uint32_t)(size - oob_offs));
8431 }
8432 paniclog_append_noflush(" Submap : %s [%p; %p)\n",
8433 zone_submaps_names[map_idx],
8434 (void *)map->min_offset, (void *)map->max_offset);
8435 paniclog_append_noflush(" Kind : %s\n", kind);
8436 if (oob) {
8437 paniclog_append_noflush(" Access : %d byte(s) past\n",
8438 (uint32_t)(addr & PAGE_MASK) + 1);
8439 }
8440 paniclog_append_noflush(" Metadata: zid:%d inl:%d cl:0x%x "
8441 "0x%04x 0x%08x 0x%08x 0x%08x\n",
8442 meta.zm_index, meta.zm_inline_bitmap, meta.zm_chunk_len,
8443 meta.zm_alloc_size, meta.zm_bitmap,
8444 meta.zm_page_next.packed_address,
8445 meta.zm_page_prev.packed_address);
8446 paniclog_append_noflush("\n");
8447 }
8448
8449 void
panic_display_zalloc(void)8450 panic_display_zalloc(void)
8451 {
8452 bool keepsyms = false;
8453
8454 PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
8455
8456 panic_display_zone_info();
8457
8458 if (panic_fault_address) {
8459 #if CONFIG_PROB_GZALLOC
8460 if (pgz_owned(panic_fault_address)) {
8461 panic_display_pgz_uaf_info(keepsyms, panic_fault_address);
8462 } else
8463 #endif /* CONFIG_PROB_GZALLOC */
8464 if (zone_maps_owned(panic_fault_address, 1)) {
8465 panic_display_zone_fault(panic_fault_address);
8466 }
8467 }
8468
8469 if (panic_include_zprint) {
8470 panic_display_zprint();
8471 } else if (zone_map_nearing_threshold(ZONE_MAP_EXHAUSTION_PRINT_PANIC)) {
8472 panic_display_largest_zones();
8473 }
8474 #if CONFIG_ZLEAKS
8475 if (zleak_active) {
8476 panic_display_zleaks(keepsyms);
8477 }
8478 #endif
8479 if (panic_include_kalloc_types) {
8480 panic_display_kalloc_types();
8481 }
8482 }
8483
8484 /*
8485 * Creates a vm_map_copy_t to return to the caller of mach_* MIG calls
8486 * requesting zone information.
8487 * Frees unused pages towards the end of the region, and zero'es out unused
8488 * space on the last page.
8489 */
8490 static vm_map_copy_t
create_vm_map_copy(vm_offset_t start_addr,vm_size_t total_size,vm_size_t used_size)8491 create_vm_map_copy(
8492 vm_offset_t start_addr,
8493 vm_size_t total_size,
8494 vm_size_t used_size)
8495 {
8496 kern_return_t kr;
8497 vm_offset_t end_addr;
8498 vm_size_t free_size;
8499 vm_map_copy_t copy;
8500
8501 if (used_size != total_size) {
8502 end_addr = start_addr + used_size;
8503 free_size = total_size - (round_page(end_addr) - start_addr);
8504
8505 if (free_size >= PAGE_SIZE) {
8506 kmem_free(ipc_kernel_map,
8507 round_page(end_addr), free_size);
8508 }
8509 bzero((char *) end_addr, round_page(end_addr) - end_addr);
8510 }
8511
8512 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr,
8513 (vm_map_size_t)used_size, TRUE, ©);
8514 assert(kr == KERN_SUCCESS);
8515
8516 return copy;
8517 }
8518
8519 static boolean_t
get_zone_info(zone_t z,mach_zone_name_t * zn,mach_zone_info_t * zi)8520 get_zone_info(
8521 zone_t z,
8522 mach_zone_name_t *zn,
8523 mach_zone_info_t *zi)
8524 {
8525 struct zone zcopy;
8526 vm_size_t cached = 0;
8527
8528 assert(z != ZONE_NULL);
8529 zone_lock(z);
8530 if (!z->z_self) {
8531 zone_unlock(z);
8532 return FALSE;
8533 }
8534 zcopy = *z;
8535 if (z->z_pcpu_cache) {
8536 zpercpu_foreach(zc, z->z_pcpu_cache) {
8537 cached += zc->zc_alloc_cur + zc->zc_free_cur;
8538 cached += zc->zc_depot.zd_full * zc_mag_size();
8539 }
8540 }
8541 zone_unlock(z);
8542
8543 if (zn != NULL) {
8544 /*
8545 * Append kalloc heap name to zone name (if zone is used by kalloc)
8546 */
8547 char temp_zone_name[MAX_ZONE_NAME] = "";
8548 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8549 zone_heap_name(z), z->z_name);
8550
8551 /* assuming here the name data is static */
8552 (void) __nosan_strlcpy(zn->mzn_name, temp_zone_name,
8553 strlen(temp_zone_name) + 1);
8554 }
8555
8556 if (zi != NULL) {
8557 *zi = (mach_zone_info_t) {
8558 .mzi_count = zone_count_allocated(&zcopy) - cached,
8559 .mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)),
8560 // max_size for zprint is now high-watermark of pages used
8561 .mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)),
8562 .mzi_elem_size = zone_scale_for_percpu(&zcopy, zcopy.z_elem_size),
8563 .mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages),
8564 .mzi_exhaustible = (uint64_t)zone_exhaustible(&zcopy),
8565 };
8566 if (zcopy.z_chunk_pages == 0) {
8567 /* this is a zcache */
8568 zi->mzi_cur_size = zcopy.z_elems_avail * zcopy.z_elem_size;
8569 }
8570 zpercpu_foreach(zs, zcopy.z_stats) {
8571 zi->mzi_sum_size += zs->zs_mem_allocated;
8572 }
8573 if (zcopy.collectable) {
8574 SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable,
8575 ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty)));
8576 SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE);
8577 }
8578 }
8579
8580 return TRUE;
8581 }
8582
8583 /* mach_memory_info entitlement */
8584 #define MEMORYINFO_ENTITLEMENT "com.apple.private.memoryinfo"
8585
8586 /* macro needed to rate-limit mach_memory_info */
8587 #define NSEC_DAY (NSEC_PER_SEC * 60 * 60 * 24)
8588
8589 /* declarations necessary to call kauth_cred_issuser() */
8590 struct ucred;
8591 extern int kauth_cred_issuser(struct ucred *);
8592 extern struct ucred *kauth_cred_get(void);
8593
8594 static kern_return_t
8595 mach_memory_info_internal(
8596 host_t host,
8597 mach_zone_name_array_t *namesp,
8598 mach_msg_type_number_t *namesCntp,
8599 mach_zone_info_array_t *infop,
8600 mach_msg_type_number_t *infoCntp,
8601 mach_memory_info_array_t *memoryInfop,
8602 mach_msg_type_number_t *memoryInfoCntp,
8603 bool redact_info);
8604
8605 static kern_return_t
mach_memory_info_security_check(bool redact_info)8606 mach_memory_info_security_check(bool redact_info)
8607 {
8608 /* If not root, only allow redacted calls. */
8609 if (!kauth_cred_issuser(kauth_cred_get()) && !redact_info) {
8610 return KERN_NO_ACCESS;
8611 }
8612
8613 if (PE_srd_fused) {
8614 return KERN_SUCCESS;
8615 }
8616
8617 /* If does not have the memory entitlement, fail. */
8618 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8619 task_t task = current_task();
8620 if (task != kernel_task && !IOTaskHasEntitlement(task, MEMORYINFO_ENTITLEMENT)) {
8621 return KERN_DENIED;
8622 }
8623
8624 /*
8625 * On release non-mac arm devices, allow mach_memory_info
8626 * to be called twice per day per boot. memorymaintenanced
8627 * calls it once per day, which leaves room for a sysdiagnose.
8628 * Allow redacted version to be called without rate limit.
8629 */
8630
8631 if (!redact_info) {
8632 static uint64_t first_call = 0, second_call = 0;
8633 uint64_t now = 0;
8634 absolutetime_to_nanoseconds(ml_get_timebase(), &now);
8635
8636 if (!first_call) {
8637 first_call = now;
8638 } else if (!second_call) {
8639 second_call = now;
8640 } else if (first_call + NSEC_DAY > now) {
8641 return KERN_DENIED;
8642 } else if (first_call + NSEC_DAY < now) {
8643 first_call = now;
8644 second_call = 0;
8645 }
8646 }
8647 #endif
8648
8649 return KERN_SUCCESS;
8650 }
8651
8652 kern_return_t
mach_zone_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp)8653 mach_zone_info(
8654 mach_port_t host_port,
8655 mach_zone_name_array_t *namesp,
8656 mach_msg_type_number_t *namesCntp,
8657 mach_zone_info_array_t *infop,
8658 mach_msg_type_number_t *infoCntp)
8659 {
8660 return mach_memory_info(host_port, namesp, namesCntp, infop, infoCntp, NULL, NULL);
8661 }
8662
8663 kern_return_t
mach_memory_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp)8664 mach_memory_info(
8665 mach_port_t host_port,
8666 mach_zone_name_array_t *namesp,
8667 mach_msg_type_number_t *namesCntp,
8668 mach_zone_info_array_t *infop,
8669 mach_msg_type_number_t *infoCntp,
8670 mach_memory_info_array_t *memoryInfop,
8671 mach_msg_type_number_t *memoryInfoCntp)
8672 {
8673 bool redact_info = false;
8674 host_t host = HOST_NULL;
8675
8676 host = convert_port_to_host_priv(host_port);
8677 if (host == HOST_NULL) {
8678 redact_info = true;
8679 host = convert_port_to_host(host_port);
8680 }
8681
8682 return mach_memory_info_internal(host, namesp, namesCntp, infop, infoCntp, memoryInfop, memoryInfoCntp, redact_info);
8683 }
8684
8685 static void
zone_info_redact(mach_zone_info_t * zi)8686 zone_info_redact(mach_zone_info_t *zi)
8687 {
8688 zi->mzi_cur_size = 0;
8689 zi->mzi_max_size = 0;
8690 zi->mzi_alloc_size = 0;
8691 zi->mzi_sum_size = 0;
8692 zi->mzi_collectable = 0;
8693 }
8694
8695 static bool
zone_info_needs_to_be_coalesced(int zone_index)8696 zone_info_needs_to_be_coalesced(int zone_index)
8697 {
8698 zone_security_flags_t zsflags = zone_security_array[zone_index];
8699 if (zsflags.z_kalloc_type || zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8700 return true;
8701 }
8702 return false;
8703 }
8704
8705 static bool
zone_info_find_coalesce_zone(mach_zone_info_t * zi,mach_zone_info_t * info,int * coalesce,int coalesce_count,int * coalesce_index)8706 zone_info_find_coalesce_zone(
8707 mach_zone_info_t *zi,
8708 mach_zone_info_t *info,
8709 int *coalesce,
8710 int coalesce_count,
8711 int *coalesce_index)
8712 {
8713 for (int i = 0; i < coalesce_count; i++) {
8714 if (zi->mzi_elem_size == info[coalesce[i]].mzi_elem_size) {
8715 *coalesce_index = coalesce[i];
8716 return true;
8717 }
8718 }
8719
8720 return false;
8721 }
8722
8723 static void
zone_info_coalesce(mach_zone_info_t * info,int coalesce_index,mach_zone_info_t * zi)8724 zone_info_coalesce(
8725 mach_zone_info_t *info,
8726 int coalesce_index,
8727 mach_zone_info_t *zi)
8728 {
8729 info[coalesce_index].mzi_count += zi->mzi_count;
8730 }
8731
8732 kern_return_t
mach_memory_info_sample(mach_zone_name_t * names,mach_zone_info_t * info,int * coalesce,unsigned int * zonesCnt,mach_memory_info_t * memoryInfo,unsigned int memoryInfoCnt,bool redact_info)8733 mach_memory_info_sample(
8734 mach_zone_name_t *names,
8735 mach_zone_info_t *info,
8736 int *coalesce,
8737 unsigned int *zonesCnt,
8738 mach_memory_info_t *memoryInfo,
8739 unsigned int memoryInfoCnt,
8740 bool redact_info)
8741 {
8742 int coalesce_count = 0;
8743 unsigned int max_zones, used_zones = 0;
8744 mach_zone_name_t *zn;
8745 mach_zone_info_t *zi;
8746 kern_return_t kr;
8747
8748 uint64_t zones_collectable_bytes = 0;
8749
8750 kr = mach_memory_info_security_check(redact_info);
8751 if (kr != KERN_SUCCESS) {
8752 return kr;
8753 }
8754
8755 max_zones = *zonesCnt;
8756
8757 bzero(names, max_zones * sizeof(*names));
8758 bzero(info, max_zones * sizeof(*info));
8759 if (redact_info) {
8760 bzero(coalesce, max_zones * sizeof(*coalesce));
8761 }
8762
8763 zn = &names[0];
8764 zi = &info[0];
8765
8766 zone_index_foreach(i) {
8767 if (used_zones > max_zones) {
8768 break;
8769 }
8770
8771 if (!get_zone_info(&(zone_array[i]), zn, zi)) {
8772 continue;
8773 }
8774
8775 if (!redact_info) {
8776 zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable);
8777 zn++;
8778 zi++;
8779 used_zones++;
8780 continue;
8781 }
8782
8783 zone_info_redact(zi);
8784 if (!zone_info_needs_to_be_coalesced(i)) {
8785 zn++;
8786 zi++;
8787 used_zones++;
8788 continue;
8789 }
8790
8791 int coalesce_index;
8792 bool found_coalesce_zone = zone_info_find_coalesce_zone(zi, info,
8793 coalesce, coalesce_count, &coalesce_index);
8794
8795 /* Didn't find a zone to coalesce */
8796 if (!found_coalesce_zone) {
8797 /* Updates the zone name */
8798 __nosan_bzero(zn->mzn_name, MAX_ZONE_NAME);
8799 snprintf(zn->mzn_name, MAX_ZONE_NAME, "kalloc.%d",
8800 (int)zi->mzi_elem_size);
8801
8802 coalesce[coalesce_count] = used_zones;
8803 coalesce_count++;
8804 zn++;
8805 zi++;
8806 used_zones++;
8807 continue;
8808 }
8809
8810 zone_info_coalesce(info, coalesce_index, zi);
8811 }
8812
8813 *zonesCnt = used_zones;
8814
8815 if (memoryInfo) {
8816 bzero(memoryInfo, memoryInfoCnt * sizeof(*memoryInfo));
8817 kr = vm_page_diagnose(memoryInfo, memoryInfoCnt, zones_collectable_bytes, redact_info);
8818 if (kr != KERN_SUCCESS) {
8819 return kr;
8820 }
8821 }
8822
8823 return kr;
8824 }
8825
8826 static kern_return_t
mach_memory_info_internal(host_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp,bool redact_info)8827 mach_memory_info_internal(
8828 host_t host,
8829 mach_zone_name_array_t *namesp,
8830 mach_msg_type_number_t *namesCntp,
8831 mach_zone_info_array_t *infop,
8832 mach_msg_type_number_t *infoCntp,
8833 mach_memory_info_array_t *memoryInfop,
8834 mach_msg_type_number_t *memoryInfoCntp,
8835 bool redact_info)
8836 {
8837 mach_zone_name_t *names;
8838 vm_offset_t names_addr;
8839 vm_size_t names_size;
8840
8841 mach_zone_info_t *info;
8842 vm_offset_t info_addr;
8843 vm_size_t info_size;
8844
8845 int *coalesce;
8846 vm_offset_t coalesce_addr;
8847 vm_size_t coalesce_size;
8848
8849 mach_memory_info_t *memory_info = NULL;
8850 vm_offset_t memory_info_addr = 0;
8851 vm_size_t memory_info_size;
8852 vm_size_t memory_info_vmsize;
8853 vm_map_copy_t memory_info_copy;
8854 unsigned int num_info = 0;
8855
8856 unsigned int max_zones, used_zones;
8857 kern_return_t kr;
8858
8859 if (host == HOST_NULL) {
8860 return KERN_INVALID_HOST;
8861 }
8862
8863 /*
8864 * We assume that zones aren't freed once allocated.
8865 * We won't pick up any zones that are allocated later.
8866 */
8867
8868 max_zones = os_atomic_load(&num_zones, relaxed);
8869
8870 names_size = round_page(max_zones * sizeof *names);
8871 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8872 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8873 if (kr != KERN_SUCCESS) {
8874 return kr;
8875 }
8876 names = (mach_zone_name_t *) names_addr;
8877
8878 info_size = round_page(max_zones * sizeof *info);
8879 kr = kmem_alloc(ipc_kernel_map, &info_addr, info_size,
8880 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8881 if (kr != KERN_SUCCESS) {
8882 kmem_free(ipc_kernel_map,
8883 names_addr, names_size);
8884 return kr;
8885 }
8886 info = (mach_zone_info_t *) info_addr;
8887
8888 if (redact_info) {
8889 coalesce_size = round_page(max_zones * sizeof *coalesce);
8890 kr = kmem_alloc(ipc_kernel_map, &coalesce_addr, coalesce_size,
8891 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8892 if (kr != KERN_SUCCESS) {
8893 kmem_free(ipc_kernel_map,
8894 names_addr, names_size);
8895 kmem_free(ipc_kernel_map,
8896 info_addr, info_size);
8897 return kr;
8898 }
8899 coalesce = (int *)coalesce_addr;
8900 }
8901
8902 if (memoryInfop && memoryInfoCntp) {
8903 num_info = vm_page_diagnose_estimate();
8904 memory_info_size = num_info * sizeof(*memory_info);
8905 memory_info_vmsize = round_page(memory_info_size);
8906 kr = kmem_alloc(ipc_kernel_map, &memory_info_addr, memory_info_vmsize,
8907 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8908 if (kr != KERN_SUCCESS) {
8909 return kr;
8910 }
8911
8912 kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
8913 VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
8914 assert(kr == KERN_SUCCESS);
8915
8916 memory_info = (mach_memory_info_t *) memory_info_addr;
8917 }
8918
8919 used_zones = max_zones;
8920 mach_memory_info_sample(names, info, coalesce, &used_zones, memory_info, num_info, redact_info);
8921
8922 if (redact_info) {
8923 kmem_free(ipc_kernel_map, coalesce_addr, coalesce_size);
8924 }
8925
8926 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, used_zones * sizeof *names);
8927 *namesCntp = used_zones;
8928
8929 *infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info);
8930 *infoCntp = used_zones;
8931
8932 if (memoryInfop && memoryInfoCntp) {
8933 kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
8934 assert(kr == KERN_SUCCESS);
8935
8936 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
8937 (vm_map_size_t)memory_info_size, TRUE, &memory_info_copy);
8938 assert(kr == KERN_SUCCESS);
8939
8940 *memoryInfop = (mach_memory_info_t *) memory_info_copy;
8941 *memoryInfoCntp = num_info;
8942 }
8943
8944 return KERN_SUCCESS;
8945 }
8946
8947 kern_return_t
mach_zone_info_for_zone(host_priv_t host,mach_zone_name_t name,mach_zone_info_t * infop)8948 mach_zone_info_for_zone(
8949 host_priv_t host,
8950 mach_zone_name_t name,
8951 mach_zone_info_t *infop)
8952 {
8953 zone_t zone_ptr;
8954
8955 if (host == HOST_NULL) {
8956 return KERN_INVALID_HOST;
8957 }
8958
8959 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8960 if (!PE_i_can_has_debugger(NULL)) {
8961 return KERN_INVALID_HOST;
8962 }
8963 #endif
8964
8965 if (infop == NULL) {
8966 return KERN_INVALID_ARGUMENT;
8967 }
8968
8969 zone_ptr = ZONE_NULL;
8970 zone_foreach(z) {
8971 /*
8972 * Append kalloc heap name to zone name (if zone is used by kalloc)
8973 */
8974 char temp_zone_name[MAX_ZONE_NAME] = "";
8975 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8976 zone_heap_name(z), z->z_name);
8977
8978 /* Find the requested zone by name */
8979 if (track_this_zone(temp_zone_name, name.mzn_name)) {
8980 zone_ptr = z;
8981 break;
8982 }
8983 }
8984
8985 /* No zones found with the requested zone name */
8986 if (zone_ptr == ZONE_NULL) {
8987 return KERN_INVALID_ARGUMENT;
8988 }
8989
8990 if (get_zone_info(zone_ptr, NULL, infop)) {
8991 return KERN_SUCCESS;
8992 }
8993 return KERN_FAILURE;
8994 }
8995
8996 kern_return_t
mach_zone_info_for_largest_zone(host_priv_t host,mach_zone_name_t * namep,mach_zone_info_t * infop)8997 mach_zone_info_for_largest_zone(
8998 host_priv_t host,
8999 mach_zone_name_t *namep,
9000 mach_zone_info_t *infop)
9001 {
9002 if (host == HOST_NULL) {
9003 return KERN_INVALID_HOST;
9004 }
9005
9006 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
9007 if (!PE_i_can_has_debugger(NULL)) {
9008 return KERN_INVALID_HOST;
9009 }
9010 #endif
9011
9012 if (namep == NULL || infop == NULL) {
9013 return KERN_INVALID_ARGUMENT;
9014 }
9015
9016 if (get_zone_info(zone_find_largest(NULL), namep, infop)) {
9017 return KERN_SUCCESS;
9018 }
9019 return KERN_FAILURE;
9020 }
9021
9022 uint64_t
get_zones_collectable_bytes(void)9023 get_zones_collectable_bytes(void)
9024 {
9025 uint64_t zones_collectable_bytes = 0;
9026 mach_zone_info_t zi;
9027
9028 zone_foreach(z) {
9029 if (get_zone_info(z, NULL, &zi)) {
9030 zones_collectable_bytes +=
9031 GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable);
9032 }
9033 }
9034
9035 return zones_collectable_bytes;
9036 }
9037
9038 kern_return_t
mach_zone_get_zlog_zones(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp)9039 mach_zone_get_zlog_zones(
9040 host_priv_t host,
9041 mach_zone_name_array_t *namesp,
9042 mach_msg_type_number_t *namesCntp)
9043 {
9044 #if ZALLOC_ENABLE_LOGGING
9045 unsigned int max_zones, logged_zones, i;
9046 kern_return_t kr;
9047 zone_t zone_ptr;
9048 mach_zone_name_t *names;
9049 vm_offset_t names_addr;
9050 vm_size_t names_size;
9051
9052 if (host == HOST_NULL) {
9053 return KERN_INVALID_HOST;
9054 }
9055
9056 if (namesp == NULL || namesCntp == NULL) {
9057 return KERN_INVALID_ARGUMENT;
9058 }
9059
9060 max_zones = os_atomic_load(&num_zones, relaxed);
9061
9062 names_size = round_page(max_zones * sizeof *names);
9063 kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
9064 KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
9065 if (kr != KERN_SUCCESS) {
9066 return kr;
9067 }
9068 names = (mach_zone_name_t *) names_addr;
9069
9070 zone_ptr = ZONE_NULL;
9071 logged_zones = 0;
9072 for (i = 0; i < max_zones; i++) {
9073 zone_t z = &(zone_array[i]);
9074 assert(z != ZONE_NULL);
9075
9076 /* Copy out the zone name if zone logging is enabled */
9077 if (z->z_btlog) {
9078 get_zone_info(z, &names[logged_zones], NULL);
9079 logged_zones++;
9080 }
9081 }
9082
9083 *namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, logged_zones * sizeof *names);
9084 *namesCntp = logged_zones;
9085
9086 return KERN_SUCCESS;
9087
9088 #else /* ZALLOC_ENABLE_LOGGING */
9089 #pragma unused(host, namesp, namesCntp)
9090 return KERN_FAILURE;
9091 #endif /* ZALLOC_ENABLE_LOGGING */
9092 }
9093
9094 kern_return_t
mach_zone_get_btlog_records(host_priv_t host,mach_zone_name_t name,zone_btrecord_array_t * recsp,mach_msg_type_number_t * numrecs)9095 mach_zone_get_btlog_records(
9096 host_priv_t host,
9097 mach_zone_name_t name,
9098 zone_btrecord_array_t *recsp,
9099 mach_msg_type_number_t *numrecs)
9100 {
9101 #if ZALLOC_ENABLE_LOGGING
9102 zone_btrecord_t *recs;
9103 kern_return_t kr;
9104 vm_address_t addr;
9105 vm_size_t size;
9106 zone_t zone_ptr;
9107 vm_map_copy_t copy;
9108
9109 if (host == HOST_NULL) {
9110 return KERN_INVALID_HOST;
9111 }
9112
9113 if (recsp == NULL || numrecs == NULL) {
9114 return KERN_INVALID_ARGUMENT;
9115 }
9116
9117 zone_ptr = ZONE_NULL;
9118 zone_foreach(z) {
9119 /*
9120 * Append kalloc heap name to zone name (if zone is used by kalloc)
9121 */
9122 char temp_zone_name[MAX_ZONE_NAME] = "";
9123 snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
9124 zone_heap_name(z), z->z_name);
9125
9126 /* Find the requested zone by name */
9127 if (track_this_zone(temp_zone_name, name.mzn_name)) {
9128 zone_ptr = z;
9129 break;
9130 }
9131 }
9132
9133 /* No zones found with the requested zone name */
9134 if (zone_ptr == ZONE_NULL) {
9135 return KERN_INVALID_ARGUMENT;
9136 }
9137
9138 /* Logging not turned on for the requested zone */
9139 if (!zone_ptr->z_btlog) {
9140 return KERN_FAILURE;
9141 }
9142
9143 kr = btlog_get_records(zone_ptr->z_btlog, &recs, numrecs);
9144 if (kr != KERN_SUCCESS) {
9145 return kr;
9146 }
9147
9148 addr = (vm_address_t)recs;
9149 size = sizeof(zone_btrecord_t) * *numrecs;
9150
9151 kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, ©);
9152 assert(kr == KERN_SUCCESS);
9153
9154 *recsp = (zone_btrecord_t *)copy;
9155 return KERN_SUCCESS;
9156
9157 #else /* !ZALLOC_ENABLE_LOGGING */
9158 #pragma unused(host, name, recsp, numrecs)
9159 return KERN_FAILURE;
9160 #endif /* !ZALLOC_ENABLE_LOGGING */
9161 }
9162
9163
9164 kern_return_t
mach_zone_force_gc(host_t host)9165 mach_zone_force_gc(
9166 host_t host)
9167 {
9168 if (host == HOST_NULL) {
9169 return KERN_INVALID_HOST;
9170 }
9171
9172 #if DEBUG || DEVELOPMENT
9173 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
9174 /* Callout to buffer cache GC to drop elements in the apfs zones */
9175 if (consider_buffer_cache_collect != NULL) {
9176 (void)(*consider_buffer_cache_collect)(0);
9177 }
9178 zone_gc(ZONE_GC_DRAIN);
9179 #endif /* DEBUG || DEVELOPMENT */
9180 return KERN_SUCCESS;
9181 }
9182
9183 zone_t
zone_find_largest(uint64_t * zone_size)9184 zone_find_largest(uint64_t *zone_size)
9185 {
9186 zone_t largest_zone = 0;
9187 uint64_t largest_zone_size = 0;
9188 zone_find_n_largest(1, &largest_zone, &largest_zone_size);
9189 if (zone_size) {
9190 *zone_size = largest_zone_size;
9191 }
9192 return largest_zone;
9193 }
9194
9195 void
zone_get_stats(zone_t zone,struct zone_basic_stats * stats)9196 zone_get_stats(
9197 zone_t zone,
9198 struct zone_basic_stats *stats)
9199 {
9200 stats->zbs_avail = zone->z_elems_avail;
9201
9202 stats->zbs_alloc_fail = 0;
9203 zpercpu_foreach(zs, zone->z_stats) {
9204 stats->zbs_alloc_fail += zs->zs_alloc_fail;
9205 }
9206
9207 stats->zbs_cached = 0;
9208 if (zone->z_pcpu_cache) {
9209 zpercpu_foreach(zc, zone->z_pcpu_cache) {
9210 stats->zbs_cached += zc->zc_alloc_cur +
9211 zc->zc_free_cur +
9212 zc->zc_depot.zd_full * zc_mag_size();
9213 }
9214 }
9215
9216 stats->zbs_free = zone_count_free(zone) + stats->zbs_cached;
9217
9218 /*
9219 * Since we don't take any locks, deal with possible inconsistencies
9220 * as the counters may have changed.
9221 */
9222 if (os_sub_overflow(stats->zbs_avail, stats->zbs_free,
9223 &stats->zbs_alloc)) {
9224 stats->zbs_avail = stats->zbs_free;
9225 stats->zbs_alloc = 0;
9226 }
9227 }
9228
9229 #endif /* !ZALLOC_TEST */
9230 #pragma mark zone creation, configuration, destruction
9231 #if !ZALLOC_TEST
9232
9233 static zone_t
zone_init_defaults(zone_id_t zid)9234 zone_init_defaults(zone_id_t zid)
9235 {
9236 zone_t z = &zone_array[zid];
9237
9238 z->z_wired_max = ~0u;
9239 z->collectable = true;
9240
9241 hw_lck_ticket_init(&z->z_lock, &zone_locks_grp);
9242 hw_lck_ticket_init(&z->z_recirc_lock, &zone_locks_grp);
9243 zone_depot_init(&z->z_recirc);
9244 return z;
9245 }
9246
9247 void
zone_set_exhaustible(zone_t zone,vm_size_t nelems,bool exhausts_by_design)9248 zone_set_exhaustible(zone_t zone, vm_size_t nelems, bool exhausts_by_design)
9249 {
9250 zone_lock(zone);
9251 zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
9252 zone->z_exhausts = exhausts_by_design;
9253 zone_unlock(zone);
9254 }
9255
9256 void
zone_raise_reserve(union zone_or_view zov,uint16_t min_elements)9257 zone_raise_reserve(union zone_or_view zov, uint16_t min_elements)
9258 {
9259 zone_t zone = zov.zov_zone;
9260
9261 if (zone < zone_array || zone > &zone_array[MAX_ZONES]) {
9262 zone = zov.zov_view->zv_zone;
9263 } else {
9264 zone = zov.zov_zone;
9265 }
9266
9267 os_atomic_max(&zone->z_elems_rsv, min_elements, relaxed);
9268 }
9269
9270 /**
9271 * @function zone_create_find
9272 *
9273 * @abstract
9274 * Finds an unused zone for the given name and element size.
9275 *
9276 * @param name the zone name
9277 * @param size the element size (including redzones, ...)
9278 * @param flags the flags passed to @c zone_create*
9279 * @param zid_inout the desired zone ID or ZONE_ID_ANY
9280 *
9281 * @returns a zone to initialize further.
9282 */
9283 static zone_t
zone_create_find(const char * name,vm_size_t size,zone_create_flags_t flags,zone_id_t * zid_inout)9284 zone_create_find(
9285 const char *name,
9286 vm_size_t size,
9287 zone_create_flags_t flags,
9288 zone_id_t *zid_inout)
9289 {
9290 zone_id_t nzones, zid = *zid_inout;
9291 zone_t z;
9292
9293 simple_lock(&all_zones_lock, &zone_locks_grp);
9294
9295 nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed);
9296 assert(num_zones_in_use <= nzones && nzones < MAX_ZONES);
9297
9298 if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) {
9299 /*
9300 * The first time around, make sure the reserved zone IDs
9301 * have an initialized lock as zone_index_foreach() will
9302 * enumerate them.
9303 */
9304 while (nzones < ZONE_ID__FIRST_DYNAMIC) {
9305 zone_init_defaults(nzones++);
9306 }
9307
9308 os_atomic_store(&num_zones, nzones, release);
9309 }
9310
9311 if (zid != ZONE_ID_ANY) {
9312 if (zid >= ZONE_ID__FIRST_DYNAMIC) {
9313 panic("zone_create: invalid desired zone ID %d for %s",
9314 zid, name);
9315 }
9316 if (flags & ZC_DESTRUCTIBLE) {
9317 panic("zone_create: ID %d (%s) must be permanent", zid, name);
9318 }
9319 if (zone_array[zid].z_self) {
9320 panic("zone_create: creating zone ID %d (%s) twice", zid, name);
9321 }
9322 z = &zone_array[zid];
9323 } else {
9324 if (flags & ZC_DESTRUCTIBLE) {
9325 /*
9326 * If possible, find a previously zdestroy'ed zone in the
9327 * zone_array that we can reuse.
9328 */
9329 for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES);
9330 i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) {
9331 z = &zone_array[i];
9332
9333 /*
9334 * If the zone name and the element size are the
9335 * same, we can just reuse the old zone struct.
9336 */
9337 if (strcmp(z->z_name, name) ||
9338 zone_elem_outer_size(z) != size) {
9339 continue;
9340 }
9341 bitmap_clear(zone_destroyed_bitmap, i);
9342 z->z_destroyed = false;
9343 z->z_self = z;
9344 zid = (zone_id_t)i;
9345 goto out;
9346 }
9347 }
9348
9349 zid = nzones++;
9350 z = zone_init_defaults(zid);
9351
9352 /*
9353 * The release barrier pairs with the acquire in
9354 * zone_index_foreach() and makes sure that enumeration loops
9355 * always see an initialized zone lock.
9356 */
9357 os_atomic_store(&num_zones, nzones, release);
9358 }
9359
9360 out:
9361 num_zones_in_use++;
9362 simple_unlock(&all_zones_lock);
9363
9364 *zid_inout = zid;
9365 return z;
9366 }
9367
9368 __abortlike
9369 static void
zone_create_panic(const char * name,const char * f1,const char * f2)9370 zone_create_panic(const char *name, const char *f1, const char *f2)
9371 {
9372 panic("zone_create: creating zone %s: flag %s and %s are incompatible",
9373 name, f1, f2);
9374 }
9375 #define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \
9376 if ((flags) & forbidden_flag) { \
9377 zone_create_panic(name, #current_flag, #forbidden_flag); \
9378 }
9379
9380 /*
9381 * Adjusts the size of the element based on minimum size, alignment
9382 * and kasan redzones
9383 */
9384 static vm_size_t
zone_elem_adjust_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags __unused,uint16_t * redzone __unused)9385 zone_elem_adjust_size(
9386 const char *name __unused,
9387 vm_size_t elem_size,
9388 zone_create_flags_t flags __unused,
9389 uint16_t *redzone __unused)
9390 {
9391 vm_size_t size;
9392
9393 /*
9394 * Adjust element size for minimum size and pointer alignment
9395 */
9396 size = (elem_size + ZONE_ALIGN_SIZE - 1) & -ZONE_ALIGN_SIZE;
9397 if (size < ZONE_MIN_ELEM_SIZE) {
9398 size = ZONE_MIN_ELEM_SIZE;
9399 }
9400
9401 #if KASAN_CLASSIC
9402 /*
9403 * Expand the zone allocation size to include the redzones.
9404 *
9405 * For page-multiple zones add a full guard page because they
9406 * likely require alignment.
9407 */
9408 uint16_t redzone_tmp;
9409 if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU | ZC_OBJ_CACHE)) {
9410 redzone_tmp = 0;
9411 } else if ((size & PAGE_MASK) == 0) {
9412 if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) {
9413 panic("zone_create: zone %s can't provide more than PAGE_SIZE"
9414 "alignment", name);
9415 }
9416 redzone_tmp = PAGE_SIZE;
9417 } else if (flags & ZC_ALIGNMENT_REQUIRED) {
9418 redzone_tmp = 0;
9419 } else {
9420 redzone_tmp = KASAN_GUARD_SIZE;
9421 }
9422 size += redzone_tmp;
9423 if (redzone) {
9424 *redzone = redzone_tmp;
9425 }
9426 #endif
9427 return size;
9428 }
9429
9430 /*
9431 * Returns the allocation chunk size that has least framentation
9432 */
9433 static vm_size_t
zone_get_min_alloc_granule(vm_size_t elem_size,zone_create_flags_t flags)9434 zone_get_min_alloc_granule(
9435 vm_size_t elem_size,
9436 zone_create_flags_t flags)
9437 {
9438 vm_size_t alloc_granule = PAGE_SIZE;
9439 if (flags & ZC_PERCPU) {
9440 alloc_granule = PAGE_SIZE * zpercpu_count();
9441 if (PAGE_SIZE % elem_size > 256) {
9442 panic("zone_create: per-cpu zone has too much fragmentation");
9443 }
9444 } else if (flags & ZC_READONLY) {
9445 alloc_granule = PAGE_SIZE;
9446 } else if ((elem_size & PAGE_MASK) == 0) {
9447 /* zero fragmentation by definition */
9448 alloc_granule = elem_size;
9449 } else if (alloc_granule % elem_size == 0) {
9450 /* zero fragmentation by definition */
9451 } else {
9452 vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule;
9453 vm_size_t alloc_tmp = PAGE_SIZE;
9454 vm_size_t max_chunk_size = ZONE_MAX_ALLOC_SIZE;
9455
9456 #if __arm64__
9457 /*
9458 * Increase chunk size to 48K for sizes larger than 4K on 16k
9459 * machines, so as to reduce internal fragementation for kalloc
9460 * zones with sizes 12K and 24K.
9461 */
9462 if (elem_size > 4 * 1024 && PAGE_SIZE == 16 * 1024) {
9463 max_chunk_size = 48 * 1024;
9464 }
9465 #endif
9466 while ((alloc_tmp += PAGE_SIZE) <= max_chunk_size) {
9467 vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp;
9468 if (frag_tmp < frag) {
9469 frag = frag_tmp;
9470 alloc_granule = alloc_tmp;
9471 }
9472 }
9473 }
9474 return alloc_granule;
9475 }
9476
9477 vm_size_t
zone_get_early_alloc_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags,vm_size_t min_elems)9478 zone_get_early_alloc_size(
9479 const char *name __unused,
9480 vm_size_t elem_size,
9481 zone_create_flags_t flags,
9482 vm_size_t min_elems)
9483 {
9484 vm_size_t adjusted_size, alloc_granule, chunk_elems;
9485
9486 adjusted_size = zone_elem_adjust_size(name, elem_size, flags, NULL);
9487 alloc_granule = zone_get_min_alloc_granule(adjusted_size, flags);
9488 chunk_elems = alloc_granule / adjusted_size;
9489
9490 return ((min_elems + chunk_elems - 1) / chunk_elems) * alloc_granule;
9491 }
9492
9493 zone_t
9494 zone_create_ext(
9495 const char *name,
9496 vm_size_t size,
9497 zone_create_flags_t flags,
9498 zone_id_t zid,
9499 void (^extra_setup)(zone_t))
9500 {
9501 zone_security_flags_t *zsflags;
9502 uint16_t redzone;
9503 zone_t z;
9504
9505 if (size > ZONE_MAX_ALLOC_SIZE) {
9506 panic("zone_create: element size too large: %zd", (size_t)size);
9507 }
9508
9509 if (size < 2 * sizeof(vm_size_t)) {
9510 /* Elements are too small for kasan. */
9511 flags |= ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE;
9512 }
9513
9514 size = zone_elem_adjust_size(name, size, flags, &redzone);
9515
9516 /*
9517 * Allocate the zone slot, return early if we found an older match.
9518 */
9519 z = zone_create_find(name, size, flags, &zid);
9520 if (__improbable(z->z_self)) {
9521 /* We found a zone to reuse */
9522 return z;
9523 }
9524 zsflags = &zone_security_array[zid];
9525
9526 /*
9527 * Initialize the zone properly.
9528 */
9529
9530 /*
9531 * If the kernel is post lockdown, copy the zone name passed in.
9532 * Else simply maintain a pointer to the name string as it can only
9533 * be a core XNU zone (no unloadable kext exists before lockdown).
9534 */
9535 if (startup_phase >= STARTUP_SUB_LOCKDOWN) {
9536 size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN);
9537 char *buf = zalloc_permanent(nsz, ZALIGN_NONE);
9538 strlcpy(buf, name, nsz);
9539 z->z_name = buf;
9540 } else {
9541 z->z_name = name;
9542 }
9543 if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) {
9544 z->z_stats = zalloc_percpu_permanent_type(struct zone_stats);
9545 } else {
9546 /*
9547 * zone_init() hasn't run yet, use the storage provided by
9548 * zone_stats_startup(), and zone_init() will replace it
9549 * with the final value once the PERCPU zone exists.
9550 */
9551 z->z_stats = __zpcpu_mangle_for_boot(&zone_stats_startup[zone_index(z)]);
9552 }
9553
9554 if (flags & ZC_OBJ_CACHE) {
9555 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOCACHING);
9556 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_PERCPU);
9557 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOGC);
9558 zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_DESTRUCTIBLE);
9559
9560 z->z_elem_size = (uint16_t)size;
9561 z->z_chunk_pages = 0;
9562 z->z_quo_magic = 0;
9563 z->z_align_magic = 0;
9564 z->z_chunk_elems = 0;
9565 z->z_elem_offs = 0;
9566 z->no_callout = true;
9567 zsflags->z_lifo = true;
9568 } else {
9569 vm_size_t alloc = zone_get_min_alloc_granule(size, flags);
9570
9571 z->z_elem_size = (uint16_t)(size - redzone);
9572 z->z_chunk_pages = (uint16_t)atop(alloc);
9573 z->z_quo_magic = Z_MAGIC_QUO(size);
9574 z->z_align_magic = Z_MAGIC_ALIGNED(size);
9575 if (flags & ZC_PERCPU) {
9576 z->z_chunk_elems = (uint16_t)(PAGE_SIZE / size);
9577 z->z_elem_offs = (uint16_t)(PAGE_SIZE % size) + redzone;
9578 } else {
9579 z->z_chunk_elems = (uint16_t)(alloc / size);
9580 z->z_elem_offs = (uint16_t)(alloc % size) + redzone;
9581 }
9582 }
9583
9584 /*
9585 * Handle KPI flags
9586 */
9587
9588 /* ZC_CACHING applied after all configuration is done */
9589 if (flags & ZC_NOCACHING) {
9590 z->z_nocaching = true;
9591 }
9592
9593 if (flags & ZC_READONLY) {
9594 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_VM);
9595 zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_DATA);
9596 assert(zid <= ZONE_ID__LAST_RO);
9597 #if ZSECURITY_CONFIG(READ_ONLY)
9598 zsflags->z_submap_idx = Z_SUBMAP_IDX_READ_ONLY;
9599 #endif
9600 zone_ro_size_params[zid].z_elem_size = z->z_elem_size;
9601 zone_ro_size_params[zid].z_align_magic = z->z_align_magic;
9602 assert(size <= PAGE_SIZE);
9603 if ((PAGE_SIZE % size) * 10 >= PAGE_SIZE) {
9604 panic("Fragmentation greater than 10%% with elem size %d zone %s%s",
9605 (uint32_t)size, zone_heap_name(z), z->z_name);
9606 }
9607 }
9608
9609 if (flags & ZC_PERCPU) {
9610 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_READONLY);
9611 zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_PGZ_USE_GUARDS);
9612 z->z_percpu = true;
9613 }
9614 if (flags & ZC_NOGC) {
9615 z->collectable = false;
9616 }
9617 /*
9618 * Handle ZC_NOENCRYPT from xnu only
9619 */
9620 if (startup_phase < STARTUP_SUB_LOCKDOWN && flags & ZC_NOENCRYPT) {
9621 zsflags->z_noencrypt = true;
9622 }
9623 if (flags & ZC_NOCALLOUT) {
9624 z->no_callout = true;
9625 }
9626 if (flags & ZC_DESTRUCTIBLE) {
9627 zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_READONLY);
9628 z->z_destructible = true;
9629 }
9630 /*
9631 * Handle Internal flags
9632 */
9633 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9634 if (flags & ZC_PGZ_USE_GUARDS) {
9635 /*
9636 * Try to turn on guard pages only for zones
9637 * with a chance of OOB.
9638 */
9639 if (startup_phase < STARTUP_SUB_LOCKDOWN) {
9640 zsflags->z_pgz_use_guards = true;
9641 }
9642 z->z_pgz_use_guards = true;
9643 }
9644 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9645
9646 #if ZSECURITY_CONFIG(ZONE_TAGGING)
9647 if (flags & (ZC_NO_TBI_TAG)) {
9648 zsflags->z_tag = false;
9649 }
9650
9651 #if KASAN_TBI
9652 /*
9653 * Maintain for now the old behavior of not tagging DATA. Remove once
9654 * we move to the new DATA-tagging behavior.
9655 */
9656 if (flags & ZC_DATA || flags & ZC_SHARED_DATA) {
9657 zsflags->z_tag = false;
9658 }
9659 #endif /* KASAN_TBI */
9660
9661
9662 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
9663
9664 if (flags & ZC_KALLOC_TYPE) {
9665 zsflags->z_kalloc_type = true;
9666 }
9667 if (flags & ZC_VM) {
9668 zone_create_assert_not_both(name, flags, ZC_VM, ZC_DATA);
9669 zsflags->z_submap_idx = Z_SUBMAP_IDX_VM;
9670 }
9671 if (flags & ZC_DATA) {
9672 zsflags->z_kheap_id = KHEAP_ID_DATA_BUFFERS;
9673 }
9674 if (flags & ZC_SHARED_DATA) {
9675 zsflags->z_kheap_id = KHEAP_ID_DATA_SHARED;
9676 }
9677
9678 #if KASAN_CLASSIC
9679 if (redzone && !(flags & ZC_KASAN_NOQUARANTINE)) {
9680 z->z_kasan_quarantine = true;
9681 }
9682 z->z_kasan_redzone = redzone;
9683 #endif /* KASAN_CLASSIC */
9684 #if KASAN_FAKESTACK
9685 if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) {
9686 z->z_kasan_fakestacks = true;
9687 }
9688 #endif /* KASAN_FAKESTACK */
9689
9690 /*
9691 * Then if there's extra tuning, do it
9692 */
9693 if (extra_setup) {
9694 extra_setup(z);
9695 }
9696
9697 /*
9698 * Configure debugging features
9699 */
9700 #if CONFIG_PROB_GZALLOC
9701 if ((flags & (ZC_READONLY | ZC_PERCPU | ZC_OBJ_CACHE | ZC_NOPGZ)) == 0) {
9702 pgz_zone_init(z);
9703 }
9704 #endif
9705 if (zc_magazine_zone) { /* proxy for "has zone_init run" */
9706 #if ZALLOC_ENABLE_LOGGING
9707 /*
9708 * Check for and set up zone leak detection
9709 * if requested via boot-args.
9710 */
9711 zone_setup_logging(z);
9712 #endif /* ZALLOC_ENABLE_LOGGING */
9713 #if KASAN_TBI
9714 zone_setup_kasan_logging(z);
9715 #endif /* KASAN_TBI */
9716 }
9717
9718 #if VM_TAG_SIZECLASSES
9719 if ((zsflags->z_kheap_id || zsflags->z_kalloc_type) && zone_tagging_on) {
9720 static uint16_t sizeclass_idx;
9721
9722 assert(startup_phase < STARTUP_SUB_LOCKDOWN);
9723 z->z_uses_tags = true;
9724 if (zone_is_data_kheap(zsflags->z_kheap_id)) {
9725 zone_tags_sizeclasses[sizeclass_idx] = (uint16_t)size;
9726 z->z_tags_sizeclass = sizeclass_idx++;
9727 } else {
9728 uint16_t i = 0;
9729 for (; i < sizeclass_idx; i++) {
9730 if (size == zone_tags_sizeclasses[i]) {
9731 z->z_tags_sizeclass = i;
9732 break;
9733 }
9734 }
9735
9736 /*
9737 * Size class wasn't found, add it to zone_tags_sizeclasses
9738 */
9739 if (i == sizeclass_idx) {
9740 assert(i < VM_TAG_SIZECLASSES);
9741 zone_tags_sizeclasses[i] = (uint16_t)size;
9742 z->z_tags_sizeclass = sizeclass_idx++;
9743 }
9744 }
9745 assert(z->z_tags_sizeclass < VM_TAG_SIZECLASSES);
9746 }
9747 #endif
9748
9749 /*
9750 * Finally, fixup properties based on security policies, boot-args, ...
9751 */
9752 if (zone_is_data_kheap(zsflags->z_kheap_id)) {
9753 /*
9754 * We use LIFO in the data map, because workloads like network
9755 * usage or similar tend to rotate through allocations very
9756 * quickly with sometimes epxloding working-sets and using
9757 * a FIFO policy might cause massive TLB trashing with rather
9758 * dramatic performance impacts.
9759 */
9760 zsflags->z_submap_idx = Z_SUBMAP_IDX_DATA;
9761 zsflags->z_lifo = true;
9762 }
9763
9764 if ((flags & (ZC_CACHING | ZC_OBJ_CACHE)) && !z->z_nocaching) {
9765 /*
9766 * No zone made before zone_init() can have ZC_CACHING set.
9767 */
9768 assert(zc_magazine_zone);
9769 zone_enable_caching(z);
9770 }
9771
9772 zone_lock(z);
9773 z->z_self = z;
9774 zone_unlock(z);
9775
9776 return z;
9777 }
9778
9779 void
zone_set_sig_eq(zone_t zone,zone_id_t sig_eq)9780 zone_set_sig_eq(zone_t zone, zone_id_t sig_eq)
9781 {
9782 zone_security_array[zone_index(zone)].z_sig_eq = sig_eq;
9783 }
9784
9785 zone_id_t
zone_get_sig_eq(zone_t zone)9786 zone_get_sig_eq(zone_t zone)
9787 {
9788 return zone_security_array[zone_index(zone)].z_sig_eq;
9789 }
9790
9791 void
zone_enable_smr(zone_t zone,struct smr * smr,zone_smr_free_cb_t free_cb)9792 zone_enable_smr(zone_t zone, struct smr *smr, zone_smr_free_cb_t free_cb)
9793 {
9794 /* moving to SMR must be done before the zone has ever been used */
9795 assert(zone->z_va_cur == 0 && !zone->z_smr && !zone->z_nocaching);
9796 assert(!zone_security_array[zone_index(zone)].z_lifo);
9797 assert((smr->smr_flags & SMR_SLEEPABLE) == 0);
9798
9799 if (!zone->z_pcpu_cache) {
9800 zone_enable_caching(zone);
9801 }
9802
9803 zone_lock(zone);
9804
9805 zpercpu_foreach(it, zone->z_pcpu_cache) {
9806 it->zc_smr = smr;
9807 it->zc_free = free_cb;
9808 }
9809 zone->z_smr = true;
9810
9811 zone_unlock(zone);
9812 }
9813
9814 __startup_func
9815 void
zone_create_startup(struct zone_create_startup_spec * spec)9816 zone_create_startup(struct zone_create_startup_spec *spec)
9817 {
9818 zone_t z;
9819
9820 z = zone_create_ext(spec->z_name, spec->z_size,
9821 spec->z_flags, spec->z_zid, spec->z_setup);
9822 if (spec->z_var) {
9823 *spec->z_var = z;
9824 }
9825 }
9826
9827 /*
9828 * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t
9829 * union works. trust but verify.
9830 */
9831 #define zalloc_check_zov_alias(f1, f2) \
9832 static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2))
9833 zalloc_check_zov_alias(z_self, zv_zone);
9834 zalloc_check_zov_alias(z_stats, zv_stats);
9835 zalloc_check_zov_alias(z_name, zv_name);
9836 zalloc_check_zov_alias(z_views, zv_next);
9837 #undef zalloc_check_zov_alias
9838
9839 __startup_func
9840 void
zone_view_startup_init(struct zone_view_startup_spec * spec)9841 zone_view_startup_init(struct zone_view_startup_spec *spec)
9842 {
9843 struct kalloc_heap *heap = NULL;
9844 zone_view_t zv = spec->zv_view;
9845 zone_t z;
9846 zone_security_flags_t zsflags;
9847
9848 switch (spec->zv_heapid) {
9849 case KHEAP_ID_DATA_BUFFERS:
9850 heap = KHEAP_DATA_BUFFERS;
9851 break;
9852 case KHEAP_ID_DATA_SHARED:
9853 heap = KHEAP_DATA_SHARED;
9854 break;
9855 default:
9856 heap = NULL;
9857 }
9858
9859 if (heap) {
9860 z = kalloc_zone_for_size(heap->kh_zstart, spec->zv_size);
9861 } else {
9862 z = *spec->zv_zone;
9863 assert(spec->zv_size <= zone_elem_inner_size(z));
9864 }
9865
9866 assert(z);
9867
9868 zv->zv_zone = z;
9869 zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats);
9870 zv->zv_next = z->z_views;
9871 zsflags = zone_security_config(z);
9872 if (z->z_views == NULL && zsflags.z_kheap_id == KHEAP_ID_NONE) {
9873 /*
9874 * count the raw view for zones not in a heap,
9875 * kalloc_heap_init() already counts it for its members.
9876 */
9877 zone_view_count += 2;
9878 } else {
9879 zone_view_count += 1;
9880 }
9881 z->z_views = zv;
9882 }
9883
9884 zone_t
zone_create(const char * name,vm_size_t size,zone_create_flags_t flags)9885 zone_create(
9886 const char *name,
9887 vm_size_t size,
9888 zone_create_flags_t flags)
9889 {
9890 return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL);
9891 }
9892
9893 vm_size_t
zone_get_elem_size(zone_t zone)9894 zone_get_elem_size(zone_t zone)
9895 {
9896 return zone->z_elem_size;
9897 }
9898
9899 static_assert(ZONE_ID__LAST_RO_EXT - ZONE_ID__FIRST_RO_EXT == ZC_RO_ID__LAST);
9900
9901 zone_id_t
zone_create_ro(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)9902 zone_create_ro(
9903 const char *name,
9904 vm_size_t size,
9905 zone_create_flags_t flags,
9906 zone_create_ro_id_t zc_ro_id)
9907 {
9908 assert(zc_ro_id <= ZC_RO_ID__LAST);
9909 zone_id_t reserved_zid = ZONE_ID__FIRST_RO_EXT + zc_ro_id;
9910 (void)zone_create_ext(name, size, ZC_READONLY | flags, reserved_zid, NULL);
9911 return reserved_zid;
9912 }
9913
9914 zone_t
zinit(vm_size_t size,vm_size_t max __unused,vm_size_t alloc __unused,const char * name)9915 zinit(
9916 vm_size_t size, /* the size of an element */
9917 vm_size_t max __unused, /* maximum memory to use */
9918 vm_size_t alloc __unused, /* allocation size */
9919 const char *name) /* a name for the zone */
9920 {
9921 return zone_create(name, size, ZC_DESTRUCTIBLE);
9922 }
9923
9924 void
zdestroy(zone_t z)9925 zdestroy(zone_t z)
9926 {
9927 unsigned int zindex = zone_index(z);
9928 zone_security_flags_t zsflags = zone_security_array[zindex];
9929
9930 current_thread()->options |= TH_OPT_ZONE_PRIV;
9931 lck_mtx_lock(&zone_gc_lock);
9932
9933 zone_reclaim(z, ZONE_RECLAIM_DESTROY);
9934
9935 lck_mtx_unlock(&zone_gc_lock);
9936 current_thread()->options &= ~TH_OPT_ZONE_PRIV;
9937
9938 zone_lock(z);
9939
9940 if (!zone_submap_is_sequestered(zsflags)) {
9941 while (!zone_pva_is_null(z->z_pageq_va)) {
9942 struct zone_page_metadata *meta;
9943
9944 zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
9945 meta = zone_meta_queue_pop(z, &z->z_pageq_va);
9946 assert(meta->zm_chunk_len <= ZM_CHUNK_LEN_MAX);
9947 bzero(meta, sizeof(*meta) * z->z_chunk_pages);
9948 zone_unlock(z);
9949 kmem_free(zone_submap(zsflags), zone_meta_to_addr(meta),
9950 ptoa(z->z_chunk_pages));
9951 zone_lock(z);
9952 }
9953 }
9954
9955 #if !KASAN_CLASSIC
9956 /* Assert that all counts are zero */
9957 if (z->z_elems_avail || z->z_elems_free || zone_size_wired(z) ||
9958 (z->z_va_cur && !zone_submap_is_sequestered(zsflags))) {
9959 panic("zdestroy: Zone %s%s isn't empty at zdestroy() time",
9960 zone_heap_name(z), z->z_name);
9961 }
9962
9963 /* consistency check: make sure everything is indeed empty */
9964 assert(zone_pva_is_null(z->z_pageq_empty));
9965 assert(zone_pva_is_null(z->z_pageq_partial));
9966 assert(zone_pva_is_null(z->z_pageq_full));
9967 if (!zone_submap_is_sequestered(zsflags)) {
9968 assert(zone_pva_is_null(z->z_pageq_va));
9969 }
9970 #endif
9971
9972 zone_unlock(z);
9973
9974 simple_lock(&all_zones_lock, &zone_locks_grp);
9975
9976 assert(!bitmap_test(zone_destroyed_bitmap, zindex));
9977 /* Mark the zone as empty in the bitmap */
9978 bitmap_set(zone_destroyed_bitmap, zindex);
9979 num_zones_in_use--;
9980 assert(num_zones_in_use > 0);
9981
9982 simple_unlock(&all_zones_lock);
9983 }
9984
9985 #endif /* !ZALLOC_TEST */
9986 #pragma mark zalloc module init
9987 #if !ZALLOC_TEST
9988
9989 /*
9990 * Initialize the "zone of zones" which uses fixed memory allocated
9991 * earlier in memory initialization. zone_bootstrap is called
9992 * before zone_init.
9993 */
9994 __startup_func
9995 void
zone_bootstrap(void)9996 zone_bootstrap(void)
9997 {
9998 #if DEBUG || DEVELOPMENT
9999 #if __x86_64__
10000 if (PE_parse_boot_argn("kernPOST", NULL, 0)) {
10001 /*
10002 * rdar://79781535 Disable early gaps while running kernPOST on Intel
10003 * the fp faulting code gets triggered and deadlocks.
10004 */
10005 zone_caching_disabled = 1;
10006 }
10007 #endif /* __x86_64__ */
10008 #endif /* DEBUG || DEVELOPMENT */
10009
10010 /* Validate struct zone_packed_virtual_address expectations */
10011 static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1");
10012 if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) {
10013 panic("zone_pva_t can't pack a kernel page address in 31 bits");
10014 }
10015
10016 zpercpu_early_count = ml_early_cpu_max_number() + 1;
10017 if (!PE_parse_boot_argn("zc_mag_size", NULL, 0)) {
10018 /*
10019 * Scale zc_mag_size() per machine.
10020 *
10021 * - wide machines get 128B magazines to avoid all false sharing
10022 * - smaller machines but with enough RAM get a bit bigger
10023 * buckets (empirically affects networking performance)
10024 */
10025 if (zpercpu_early_count >= 10) {
10026 _zc_mag_size = 14;
10027 } else if ((sane_size >> 30) >= 4) {
10028 _zc_mag_size = 10;
10029 }
10030 }
10031
10032 /*
10033 * Initialize random used to scramble early allocations
10034 */
10035 zpercpu_foreach_cpu(cpu) {
10036 random_bool_init(&zone_bool_gen[cpu].zbg_bg);
10037 }
10038
10039 #if CONFIG_PROB_GZALLOC
10040 /*
10041 * Set pgz_sample_counter on the boot CPU so that we do not sample
10042 * any allocation until PGZ has been properly setup (in pgz_init()).
10043 */
10044 *PERCPU_GET_MASTER(pgz_sample_counter) = INT32_MAX;
10045 #endif /* CONFIG_PROB_GZALLOC */
10046
10047 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
10048 /*
10049 * Randomly assign zones to one of the 4 general submaps,
10050 * and pick whether they allocate from the begining
10051 * or the end of it.
10052 *
10053 * A lot of OOB exploitation relies on precise interleaving
10054 * of specific types in the heap.
10055 *
10056 * Woops, you can't guarantee that anymore.
10057 */
10058 for (zone_id_t i = 1; i < MAX_ZONES; i++) {
10059 uint32_t r = zalloc_random_uniform32(0,
10060 ZSECURITY_CONFIG_GENERAL_SUBMAPS * 2);
10061
10062 zone_security_array[i].z_submap_from_end = (r & 1);
10063 zone_security_array[i].z_submap_idx += (r >> 1);
10064 }
10065 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
10066
10067
10068 thread_call_setup_with_options(&zone_expand_callout,
10069 zone_expand_async, NULL, THREAD_CALL_PRIORITY_HIGH,
10070 THREAD_CALL_OPTIONS_ONCE);
10071
10072 thread_call_setup_with_options(&zone_trim_callout,
10073 zone_trim_async, NULL, THREAD_CALL_PRIORITY_USER,
10074 THREAD_CALL_OPTIONS_ONCE);
10075 }
10076
10077 #define ZONE_GUARD_SIZE (64UL << 10)
10078
10079 __startup_func
10080 static void
zone_tunables_fixup(void)10081 zone_tunables_fixup(void)
10082 {
10083 int wdt = 0;
10084
10085 #if CONFIG_PROB_GZALLOC && (DEVELOPMENT || DEBUG)
10086 if (!PE_parse_boot_argn("pgz", NULL, 0) &&
10087 PE_parse_boot_argn("pgz1", NULL, 0)) {
10088 /*
10089 * if pgz1= was used, but pgz= was not,
10090 * then the more specific pgz1 takes precedence.
10091 */
10092 pgz_all = false;
10093 }
10094 #endif
10095
10096 if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) {
10097 zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT;
10098 }
10099 if (PE_parse_boot_argn("wdt", &wdt, sizeof(wdt)) && wdt == -1 &&
10100 !PE_parse_boot_argn("zet", NULL, 0)) {
10101 zone_exhausted_timeout = -1;
10102 }
10103 }
10104 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup);
10105
10106 /** Get the left zone guard size for the submap at IDX */
10107 __pure2
10108 __startup_func
10109 static vm_map_size_t
zone_submap_left_guard_size(zone_submap_idx_t __unused idx)10110 zone_submap_left_guard_size(zone_submap_idx_t __unused idx)
10111 {
10112 return ZONE_GUARD_SIZE / 2;
10113 }
10114
10115 /** Get the right zone guard size for the submap at IDX */
10116 __pure2
10117 __startup_func
10118 static vm_map_size_t
zone_submap_right_guard_size(zone_submap_idx_t __unused idx)10119 zone_submap_right_guard_size(zone_submap_idx_t __unused idx)
10120 {
10121 return ZONE_GUARD_SIZE / 2;
10122 }
10123
10124 __startup_func
10125 static void
zone_submap_init(mach_vm_offset_t * submap_min,zone_submap_idx_t idx,uint64_t zone_sub_map_numer,uint64_t * remaining_denom,vm_offset_t * remaining_size)10126 zone_submap_init(
10127 mach_vm_offset_t *submap_min,
10128 zone_submap_idx_t idx,
10129 uint64_t zone_sub_map_numer,
10130 uint64_t *remaining_denom,
10131 vm_offset_t *remaining_size)
10132 {
10133 vm_map_create_options_t vmco;
10134 vm_map_address_t addr;
10135 vm_offset_t submap_start, submap_end;
10136 vm_size_t submap_actual_size, submap_usable_size;
10137 vm_map_t submap;
10138 vm_map_size_t left_guard_size = 0, right_guard_size = 0;
10139 vm_prot_t prot = VM_PROT_DEFAULT;
10140 vm_prot_t prot_max = VM_PROT_ALL;
10141 kern_return_t kr;
10142
10143 submap_usable_size =
10144 zone_sub_map_numer * *remaining_size / *remaining_denom;
10145 submap_usable_size = trunc_page(submap_usable_size);
10146
10147 submap_start = *submap_min;
10148
10149 left_guard_size = zone_submap_left_guard_size(idx);
10150 right_guard_size = zone_submap_right_guard_size(idx);
10151
10152 /*
10153 * Compute the final submap size.
10154 *
10155 * The usable size does not include the zone guards, so add them now. This
10156 * VA is paid for in zone_init ahead of time.
10157 */
10158
10159 submap_actual_size =
10160 submap_usable_size + left_guard_size + right_guard_size;
10161
10162 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
10163 /*
10164 * The RO zone has special alignment requirements, so snap to the
10165 * required boundary and reflow based on the available space.
10166 *
10167 * This operation only increases the amount of VA used by the submap,
10168 * and so the guards will always still fit.
10169 */
10170 vm_offset_t submap_padding = 0;
10171
10172 submap_padding = pmap_ro_zone_align(submap_start) - submap_start;
10173 submap_start += submap_padding;
10174
10175 submap_actual_size = pmap_ro_zone_align(submap_actual_size);
10176 submap_usable_size =
10177 submap_actual_size - left_guard_size - right_guard_size;
10178
10179 assert(*remaining_size >= (submap_padding + submap_usable_size));
10180
10181 *remaining_size -= submap_padding;
10182 *submap_min = submap_start;
10183 }
10184
10185 submap_end = submap_start + submap_actual_size;
10186
10187 if (idx == Z_SUBMAP_IDX_VM) {
10188 vm_packing_verify_range("vm_compressor",
10189 submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR));
10190 vm_packing_verify_range("vm_page",
10191 submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR));
10192
10193 #if MACH_ASSERT
10194 /*
10195 * vm_submap_restriction_size_debug gives the size passed to the kmem
10196 * claim placer to ensure that the packing behaves correctly. If this
10197 * size is smaller than what we actually end up using for the VM submap,
10198 * the packing may be probabilistically invalid. Assert on this
10199 * condition to catch this type of failure deterministically rather than
10200 * relying on the above assertions catching it when we actually hit that
10201 * rare case and the packing is invalid.
10202 */
10203 assert(submap_actual_size <= vm_submap_restriction_size_debug);
10204 #endif /* MACH_ASSERT */
10205 }
10206
10207 vmco = VM_MAP_CREATE_NEVER_FAULTS;
10208 if (!zone_submap_is_sequestered(idx)) {
10209 vmco |= VM_MAP_CREATE_DISABLE_HOLELIST;
10210 }
10211
10212 vm_map_will_allocate_early_map(&zone_submaps[idx]);
10213 submap = kmem_suballoc(kernel_map, submap_min, submap_actual_size, vmco,
10214 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10215 KMS_PERMANENT | KMS_NOFAIL | KMS_NOSOFTLIMIT,
10216 VM_KERN_MEMORY_ZONE).kmr_submap;
10217
10218 if (idx == Z_SUBMAP_IDX_READ_ONLY) {
10219 zone_info.zi_ro_range.min_address = submap_start;
10220 zone_info.zi_ro_range.max_address = submap_end;
10221 prot_max = prot = VM_PROT_NONE;
10222 }
10223
10224 addr = submap_start;
10225 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(
10226 .vmkf_no_soft_limit = true,
10227 .vm_tag = VM_KERN_MEMORY_ZONE);
10228 vm_object_t kobject = kernel_object_default;
10229
10230 kr = vm_map_enter(submap, &addr, left_guard_size, 0,
10231 vmk_flags, kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
10232 if (kr != KERN_SUCCESS) {
10233 panic("ksubmap[%s]: failed to make first entry (%d)",
10234 zone_submaps_names[idx], kr);
10235 }
10236
10237 addr = submap_end - right_guard_size;
10238 kr = vm_map_enter(submap, &addr, right_guard_size, 0,
10239 vmk_flags, kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
10240 if (kr != KERN_SUCCESS) {
10241 panic("ksubmap[%s]: failed to make last entry (%d)",
10242 zone_submaps_names[idx], kr);
10243 }
10244
10245 #if DEBUG || DEVELOPMENT
10246 printf("zone_init: map %-5s %p:%p (%u%c, %u%c usable)\n",
10247 zone_submaps_names[idx], (void *)submap_start, (void *)submap_end,
10248 mach_vm_size_pretty(submap_actual_size),
10249 mach_vm_size_unit(submap_actual_size),
10250 mach_vm_size_pretty(submap_usable_size),
10251 mach_vm_size_unit(submap_usable_size));
10252 #endif /* DEBUG || DEVELOPMENT */
10253
10254 zone_submaps[idx] = submap;
10255 *submap_min = submap_end;
10256 *remaining_size -= submap_usable_size;
10257 *remaining_denom -= zone_sub_map_numer;
10258 }
10259
10260 static inline void
zone_pva_relocate(zone_pva_t * pva,uint32_t delta)10261 zone_pva_relocate(zone_pva_t *pva, uint32_t delta)
10262 {
10263 if (!zone_pva_is_null(*pva) && !zone_pva_is_queue(*pva)) {
10264 pva->packed_address += delta;
10265 }
10266 }
10267
10268 /*
10269 * Allocate metadata array and migrate bootstrap initial metadata and memory.
10270 */
10271 __startup_func
10272 static void
zone_metadata_init(void)10273 zone_metadata_init(void)
10274 {
10275 vm_map_t vm_map = zone_submaps[Z_SUBMAP_IDX_VM];
10276 vm_map_entry_t first;
10277
10278 struct mach_vm_range meta_r, bits_r, xtra_r, early_r;
10279 vm_size_t early_sz;
10280 vm_offset_t reloc_base;
10281
10282 /*
10283 * Step 1: Allocate the metadata + bitmaps range
10284 *
10285 * Allocations can't be smaller than 8 bytes, which is 128b / 16B per 1k
10286 * of physical memory (16M per 1G).
10287 *
10288 * Let's preallocate for the worst to avoid weird panics.
10289 */
10290 vm_map_will_allocate_early_map(&zone_meta_map);
10291 meta_r = zone_kmem_suballoc(zone_info.zi_meta_range.min_address,
10292 zone_meta_size + zone_bits_size + zone_xtra_size,
10293 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10294 VM_KERN_MEMORY_ZONE, &zone_meta_map);
10295 meta_r.min_address += ZONE_GUARD_SIZE;
10296 meta_r.max_address -= ZONE_GUARD_SIZE;
10297 if (zone_xtra_size) {
10298 xtra_r.max_address = meta_r.max_address;
10299 meta_r.max_address -= zone_xtra_size;
10300 xtra_r.min_address = meta_r.max_address;
10301 } else {
10302 xtra_r.min_address = xtra_r.max_address = 0;
10303 }
10304 bits_r.max_address = meta_r.max_address;
10305 meta_r.max_address -= zone_bits_size;
10306 bits_r.min_address = meta_r.max_address;
10307
10308 #if DEBUG || DEVELOPMENT
10309 printf("zone_init: metadata %p:%p (%u%c)\n",
10310 (void *)meta_r.min_address, (void *)meta_r.max_address,
10311 mach_vm_size_pretty(mach_vm_range_size(&meta_r)),
10312 mach_vm_size_unit(mach_vm_range_size(&meta_r)));
10313 printf("zone_init: metabits %p:%p (%u%c)\n",
10314 (void *)bits_r.min_address, (void *)bits_r.max_address,
10315 mach_vm_size_pretty(mach_vm_range_size(&bits_r)),
10316 mach_vm_size_unit(mach_vm_range_size(&bits_r)));
10317 printf("zone_init: extra %p:%p (%u%c)\n",
10318 (void *)xtra_r.min_address, (void *)xtra_r.max_address,
10319 mach_vm_size_pretty(mach_vm_range_size(&xtra_r)),
10320 mach_vm_size_unit(mach_vm_range_size(&xtra_r)));
10321 #endif /* DEBUG || DEVELOPMENT */
10322
10323 bits_r.min_address = (bits_r.min_address + ZBA_CHUNK_SIZE - 1) & -ZBA_CHUNK_SIZE;
10324 bits_r.max_address = bits_r.max_address & -ZBA_CHUNK_SIZE;
10325
10326 /*
10327 * Step 2: Install new ranges.
10328 * Relocate metadata and bits.
10329 */
10330 early_r = zone_info.zi_map_range;
10331 early_sz = mach_vm_range_size(&early_r);
10332
10333 zone_info.zi_map_range = zone_map_range;
10334 zone_info.zi_meta_range = meta_r;
10335 zone_info.zi_bits_range = bits_r;
10336 zone_info.zi_xtra_range = xtra_r;
10337 zone_info.zi_meta_base = VM_FAR_ADD_PTR_UNBOUNDED(
10338 (struct zone_page_metadata *)meta_r.min_address,
10339 -(ptrdiff_t)zone_pva_from_addr(zone_map_range.min_address).packed_address);
10340
10341 vm_map_lock(vm_map);
10342 first = vm_map_first_entry(vm_map);
10343 reloc_base = first->vme_end;
10344 first->vme_end += early_sz;
10345 vm_map->size += early_sz;
10346 vm_map_unlock(vm_map);
10347
10348 struct zone_page_metadata *early_meta = zone_early_meta_array_startup;
10349 struct zone_page_metadata *new_meta = zone_meta_from_addr(reloc_base);
10350 vm_offset_t reloc_delta = reloc_base - early_r.min_address;
10351 /* this needs to sign extend */
10352 uint32_t pva_delta = (uint32_t)((intptr_t)reloc_delta >> PAGE_SHIFT);
10353
10354 zone_meta_populate(reloc_base, early_sz);
10355 memcpy(new_meta, early_meta,
10356 atop(early_sz) * sizeof(struct zone_page_metadata));
10357 for (uint32_t i = 0; i < atop(early_sz); i++) {
10358 zone_pva_relocate(&new_meta[i].zm_page_next, pva_delta);
10359 zone_pva_relocate(&new_meta[i].zm_page_prev, pva_delta);
10360 }
10361
10362 static_assert(ZONE_ID_VM_MAP_ENTRY == ZONE_ID_VM_MAP + 1);
10363 static_assert(ZONE_ID_VM_MAP_HOLES == ZONE_ID_VM_MAP + 2);
10364
10365 for (zone_id_t zid = ZONE_ID_VM_MAP; zid <= ZONE_ID_VM_MAP_HOLES; zid++) {
10366 zone_pva_relocate(&zone_array[zid].z_pageq_partial, pva_delta);
10367 zone_pva_relocate(&zone_array[zid].z_pageq_full, pva_delta);
10368 }
10369
10370 zba_populate(0, false);
10371 memcpy(zba_base_header(), zba_chunk_startup, sizeof(zba_chunk_startup));
10372 zba_meta()->zbam_right = (uint32_t)atop(zone_bits_size);
10373
10374 /*
10375 * Step 3: Relocate the boostrap VM structs
10376 * (including rewriting their content).
10377 */
10378 kma_flags_t flags = KMA_KOBJECT | KMA_NOENCRYPT | KMA_NOFAIL;
10379
10380 #if ZSECURITY_CONFIG(ZONE_TAGGING)
10381 flags |= KMA_TAG;
10382 #endif /* ZSECURITY_CONFIG_ZONE_TAGGING */
10383
10384
10385 kernel_memory_populate(reloc_base, early_sz, flags,
10386 VM_KERN_MEMORY_OSFMK);
10387
10388 vm_memtag_disable_checking();
10389 __nosan_memcpy((void *)reloc_base, (void *)early_r.min_address, early_sz);
10390 vm_memtag_enable_checking();
10391
10392 #if ZSECURITY_CONFIG(ZONE_TAGGING)
10393 vm_memtag_relocate_tags(reloc_base, early_r.min_address, early_sz);
10394 #endif /* ZSECURITY_CONFIG_ZONE_TAGGING */
10395
10396 #if KASAN
10397 kasan_notify_address(reloc_base, early_sz);
10398 #endif /* KASAN */
10399
10400 vm_map_relocate_early_maps(reloc_delta);
10401
10402 for (uint32_t i = 0; i < atop(early_sz); i++) {
10403 zone_id_t zid = new_meta[i].zm_index;
10404 zone_t z = &zone_array[zid];
10405 vm_size_t esize = zone_elem_outer_size(z);
10406 vm_address_t base = reloc_base + ptoa(i) + zone_elem_inner_offs(z);
10407 vm_address_t addr;
10408
10409 if (new_meta[i].zm_chunk_len >= ZM_SECONDARY_PAGE) {
10410 continue;
10411 }
10412
10413 for (uint32_t eidx = 0; eidx < z->z_chunk_elems; eidx++) {
10414 if (zone_meta_is_free(&new_meta[i], eidx)) {
10415 continue;
10416 }
10417
10418 addr = vm_memtag_load_tag(base + eidx * esize);
10419 #if KASAN_CLASSIC
10420 kasan_alloc(addr,
10421 zone_elem_inner_size(z), zone_elem_inner_size(z),
10422 zone_elem_redzone(z), false,
10423 __builtin_frame_address(0));
10424 #endif
10425 vm_map_relocate_early_elem(zid, addr, reloc_delta);
10426 }
10427 }
10428
10429 }
10430
10431
10432 __startup_data
10433 static uint16_t submap_ratios[Z_SUBMAP_IDX_COUNT] = {
10434 #if ZSECURITY_CONFIG(READ_ONLY)
10435 [Z_SUBMAP_IDX_VM] = 15,
10436 [Z_SUBMAP_IDX_READ_ONLY] = 5,
10437 #else
10438 [Z_SUBMAP_IDX_VM] = 20,
10439 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10440 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
10441 [Z_SUBMAP_IDX_GENERAL_0] = 15,
10442 [Z_SUBMAP_IDX_GENERAL_1] = 15,
10443 [Z_SUBMAP_IDX_GENERAL_2] = 15,
10444 [Z_SUBMAP_IDX_GENERAL_3] = 15,
10445 [Z_SUBMAP_IDX_DATA] = 20,
10446 #else
10447 [Z_SUBMAP_IDX_GENERAL_0] = 60,
10448 [Z_SUBMAP_IDX_DATA] = 20,
10449 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
10450 };
10451
10452 __startup_func
10453 static inline uint16_t
zone_submap_ratios_denom(void)10454 zone_submap_ratios_denom(void)
10455 {
10456 uint16_t denom = 0;
10457
10458 for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10459 denom += submap_ratios[idx];
10460 }
10461
10462 assert(denom == 100);
10463
10464 return denom;
10465 }
10466
10467 __startup_func
10468 static inline vm_offset_t
zone_restricted_va_max(void)10469 zone_restricted_va_max(void)
10470 {
10471 vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR);
10472 vm_offset_t vm_page_max = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR);
10473
10474 return trunc_page(MIN(compressor_max, vm_page_max));
10475 }
10476
10477 __startup_func
10478 static void
zone_set_map_sizes(void)10479 zone_set_map_sizes(void)
10480 {
10481 vm_size_t zsize;
10482 vm_size_t zsizearg;
10483
10484 /*
10485 * Compute the physical limits for the zone map
10486 */
10487
10488 if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) {
10489 zsize = zsizearg * (1024ULL * 1024);
10490 } else {
10491 /* Set target zone size as 1/4 of physical memory */
10492 zsize = (vm_size_t)(sane_size >> 2);
10493 zsize += zsize >> 1;
10494 }
10495
10496 if (zsize < CONFIG_ZONE_MAP_MIN) {
10497 zsize = CONFIG_ZONE_MAP_MIN; /* Clamp to min */
10498 }
10499 if (zsize > sane_size >> 1) {
10500 zsize = (vm_size_t)(sane_size >> 1); /* Clamp to half of RAM max */
10501 }
10502 if (zsizearg == 0 && zsize > ZONE_MAP_MAX) {
10503 /* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */
10504 printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n",
10505 (uintptr_t)zsize, (uintptr_t)ZONE_MAP_MAX);
10506 zsize = ZONE_MAP_MAX;
10507 }
10508
10509 zone_pages_wired_max = (uint32_t)atop(trunc_page(zsize));
10510
10511
10512 /*
10513 * Declare restrictions on zone max
10514 */
10515 vm_offset_t vm_submap_size = round_page(
10516 (submap_ratios[Z_SUBMAP_IDX_VM] * ZONE_MAP_VA_SIZE) /
10517 zone_submap_ratios_denom()) +
10518 zone_submap_left_guard_size(Z_SUBMAP_IDX_VM) +
10519 zone_submap_right_guard_size(Z_SUBMAP_IDX_VM);
10520
10521 #if CONFIG_PROB_GZALLOC
10522 vm_submap_size += pgz_get_size();
10523 #endif /* CONFIG_PROB_GZALLOC */
10524 if (os_sub_overflow(zone_restricted_va_max(), vm_submap_size,
10525 &zone_map_range.min_address)) {
10526 zone_map_range.min_address = 0;
10527 }
10528
10529 #if MACH_ASSERT
10530 vm_submap_restriction_size_debug = vm_submap_size;
10531 #endif /* MACH_ASSERT */
10532
10533 zone_meta_size = round_page(atop(ZONE_MAP_VA_SIZE) *
10534 sizeof(struct zone_page_metadata)) + ZONE_GUARD_SIZE * 2;
10535
10536 static_assert(ZONE_MAP_MAX / (CHAR_BIT * KALLOC_MINSIZE) <=
10537 ZBA_PTR_MASK + 1);
10538 zone_bits_size = round_page(ptoa(zone_pages_wired_max) /
10539 (CHAR_BIT * KALLOC_MINSIZE));
10540
10541 #if VM_TAG_SIZECLASSES
10542 if (zone_tagging_on) {
10543 zba_xtra_shift = (uint8_t)fls(sizeof(vm_tag_t) - 1);
10544 }
10545 if (zba_xtra_shift) {
10546 /*
10547 * if we need the extra space range, then limit the size of the
10548 * bitmaps to something reasonable instead of a theoretical
10549 * worst case scenario of all zones being for the smallest
10550 * allocation granule, in order to avoid fake VA pressure on
10551 * other parts of the system.
10552 */
10553 zone_bits_size = round_page(zone_bits_size / 8);
10554 zone_xtra_size = round_page(zone_bits_size * CHAR_BIT << zba_xtra_shift);
10555 }
10556 #endif /* VM_TAG_SIZECLASSES */
10557 }
10558 STARTUP(KMEM, STARTUP_RANK_FIRST, zone_set_map_sizes);
10559
10560 /*
10561 * Can't use zone_info.zi_map_range at this point as it is being used to
10562 * store the range of early pmap memory that was stolen to bootstrap the
10563 * necessary VM zones.
10564 */
10565 KMEM_RANGE_REGISTER_STATIC(zones, &zone_map_range, ZONE_MAP_VA_SIZE);
10566 KMEM_RANGE_REGISTER_DYNAMIC(zone_meta, &zone_info.zi_meta_range, ^{
10567 return zone_meta_size + zone_bits_size + zone_xtra_size;
10568 });
10569
10570 /*
10571 * Global initialization of Zone Allocator.
10572 * Runs after zone_bootstrap.
10573 */
10574 __startup_func
10575 static void
zone_init(void)10576 zone_init(void)
10577 {
10578 vm_size_t remaining_size = ZONE_MAP_VA_SIZE;
10579 mach_vm_offset_t submap_min = 0;
10580 uint64_t denom = zone_submap_ratios_denom();
10581 /*
10582 * And now allocate the various pieces of VA and submaps.
10583 */
10584
10585 submap_min = zone_map_range.min_address;
10586
10587 #if CONFIG_PROB_GZALLOC
10588 vm_size_t pgz_size = pgz_get_size();
10589
10590 vm_map_will_allocate_early_map(&pgz_submap);
10591 zone_info.zi_pgz_range = zone_kmem_suballoc(submap_min, pgz_size,
10592 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10593 VM_KERN_MEMORY_ZONE, &pgz_submap);
10594
10595 submap_min += pgz_size;
10596 remaining_size -= pgz_size;
10597 #if DEBUG || DEVELOPMENT
10598 printf("zone_init: pgzalloc %p:%p (%u%c) [%d slots]\n",
10599 (void *)zone_info.zi_pgz_range.min_address,
10600 (void *)zone_info.zi_pgz_range.max_address,
10601 mach_vm_size_pretty(pgz_size), mach_vm_size_unit(pgz_size),
10602 pgz_slots);
10603 #endif /* DEBUG || DEVELOPMENT */
10604 #endif /* CONFIG_PROB_GZALLOC */
10605
10606 /*
10607 * Allocate the submaps
10608 */
10609
10610 /*
10611 * In order to prevent us from throwing off the ratios, deduct VA for the
10612 * zone guards ahead of time.
10613 */
10614 for (uint32_t i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
10615 remaining_size -= zone_submap_left_guard_size(i);
10616 remaining_size -= zone_submap_right_guard_size(i);
10617 }
10618
10619 for (zone_submap_idx_t idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10620 if (submap_ratios[idx] == 0) {
10621 zone_submaps[idx] = VM_MAP_NULL;
10622 } else {
10623 zone_submap_init(&submap_min, idx, submap_ratios[idx],
10624 &denom, &remaining_size);
10625 }
10626 }
10627
10628 zone_metadata_init();
10629
10630 #if VM_TAG_SIZECLASSES
10631 if (zone_tagging_on) {
10632 vm_allocation_zones_init();
10633 }
10634 #endif /* VM_TAG_SIZECLASSES */
10635
10636 zone_create_flags_t kma_flags = ZC_NOCACHING | ZC_NOGC | ZC_NOCALLOUT |
10637 ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE | ZC_VM;
10638
10639 (void)zone_create_ext("vm.permanent", 1, kma_flags | ZC_NO_TBI_TAG,
10640 ZONE_ID_PERMANENT, ^(zone_t z) {
10641 z->z_permanent = true;
10642 z->z_elem_size = 1;
10643 });
10644 (void)zone_create_ext("vm.permanent.percpu", 1,
10645 kma_flags | ZC_PERCPU | ZC_NO_TBI_TAG, ZONE_ID_PERCPU_PERMANENT, ^(zone_t z) {
10646 z->z_permanent = true;
10647 z->z_elem_size = 1;
10648 });
10649
10650 zc_magazine_zone = zone_create("zcc_magazine_zone", sizeof(struct zone_magazine) +
10651 zc_mag_size() * sizeof(vm_offset_t),
10652 ZC_VM | ZC_NOCACHING | ZC_ZFREE_CLEARMEM | ZC_PGZ_USE_GUARDS);
10653 zone_raise_reserve(zc_magazine_zone, (uint16_t)(2 * zpercpu_count()));
10654
10655 /*
10656 * Now migrate the startup statistics into their final storage,
10657 * and enable logging for early zones (that zone_create_ext() skipped).
10658 */
10659 int cpu = cpu_number();
10660 zone_index_foreach(idx) {
10661 zone_t tz = &zone_array[idx];
10662
10663 if (tz->z_stats == __zpcpu_mangle_for_boot(&zone_stats_startup[idx])) {
10664 zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats);
10665
10666 *zpercpu_get_cpu(zs, cpu) = *zpercpu_get_cpu(tz->z_stats, cpu);
10667 tz->z_stats = zs;
10668 }
10669 if (tz->z_self == tz) {
10670 #if ZALLOC_ENABLE_LOGGING
10671 zone_setup_logging(tz);
10672 #endif /* ZALLOC_ENABLE_LOGGING */
10673 #if KASAN_TBI
10674 zone_setup_kasan_logging(tz);
10675 #endif /* KASAN_TBI */
10676 }
10677 }
10678 }
10679 STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init);
10680
10681 void
zalloc_iokit_lockdown(void)10682 zalloc_iokit_lockdown(void)
10683 {
10684 zone_share_always = false;
10685 }
10686
10687 void
zalloc_first_proc_made(void)10688 zalloc_first_proc_made(void)
10689 {
10690 zone_caching_disabled = 0;
10691 zone_early_thres_mul = 1;
10692 }
10693
10694 __startup_func
10695 vm_offset_t
zone_early_mem_init(vm_size_t size)10696 zone_early_mem_init(vm_size_t size)
10697 {
10698 vm_offset_t mem;
10699
10700 assert3u(atop(size), <=, ZONE_EARLY_META_INLINE_COUNT);
10701
10702 /*
10703 * The zone that is used early to bring up the VM is stolen here.
10704 *
10705 * When the zone subsystem is actually initialized,
10706 * zone_metadata_init() will be called, and those pages
10707 * and the elements they contain, will be relocated into
10708 * the VM submap (even for architectures when those zones
10709 * do not live there).
10710 */
10711 assert3u(size, <=, sizeof(zone_early_pages_to_cram));
10712 mem = (vm_offset_t)zone_early_pages_to_cram;
10713
10714
10715 zone_info.zi_meta_base = VM_FAR_ADD_PTR_UNBOUNDED(
10716 (struct zone_page_metadata *)zone_early_meta_array_startup,
10717 -(ptrdiff_t)zone_pva_from_addr(mem).packed_address);
10718 zone_info.zi_map_range.min_address = mem;
10719 zone_info.zi_map_range.max_address = mem + size;
10720
10721 zone_info.zi_bits_range = (struct mach_vm_range){
10722 .min_address = (mach_vm_offset_t)zba_chunk_startup,
10723 .max_address = (mach_vm_offset_t)zba_chunk_startup +
10724 sizeof(zba_chunk_startup),
10725 };
10726
10727 zba_meta()->zbam_left = 1;
10728 zba_meta()->zbam_right = 1;
10729 zba_init_chunk(0, false);
10730
10731 return mem;
10732 }
10733
10734 #endif /* !ZALLOC_TEST */
10735 #pragma mark - tests
10736 #if DEBUG || DEVELOPMENT
10737
10738 /*
10739 * Used for sysctl zone tests that aren't thread-safe. Ensure only one
10740 * thread goes through at a time.
10741 *
10742 * Or we can end up with multiple test zones (if a second zinit() comes through
10743 * before zdestroy()), which could lead us to run out of zones.
10744 */
10745 static bool any_zone_test_running = FALSE;
10746
10747 static uintptr_t *
zone_copy_allocations(zone_t z,uintptr_t * elems,zone_pva_t page_index)10748 zone_copy_allocations(zone_t z, uintptr_t *elems, zone_pva_t page_index)
10749 {
10750 vm_offset_t elem_size = zone_elem_outer_size(z);
10751 vm_offset_t base;
10752 struct zone_page_metadata *meta;
10753
10754 while (!zone_pva_is_null(page_index)) {
10755 base = zone_pva_to_addr(page_index) + zone_elem_inner_offs(z);
10756 meta = zone_pva_to_meta(page_index);
10757
10758 if (meta->zm_inline_bitmap) {
10759 for (size_t i = 0; i < meta->zm_chunk_len; i++) {
10760 uint32_t map = meta[i].zm_bitmap;
10761
10762 for (; map; map &= map - 1) {
10763 *elems++ = INSTANCE_PUT(base +
10764 elem_size * __builtin_clz(map));
10765 }
10766 base += elem_size * 32;
10767 }
10768 } else {
10769 uint32_t order = zba_bits_ref_order(meta->zm_bitmap);
10770 bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
10771 for (size_t i = 0; i < (1u << order); i++) {
10772 uint64_t map = bits[i];
10773
10774 for (; map; map &= map - 1) {
10775 *elems++ = INSTANCE_PUT(base +
10776 elem_size * __builtin_clzll(map));
10777 }
10778 base += elem_size * 64;
10779 }
10780 }
10781
10782 page_index = meta->zm_page_next;
10783 }
10784 return elems;
10785 }
10786
10787 kern_return_t
zone_leaks(const char * zoneName,uint32_t nameLen,leak_site_proc proc)10788 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc)
10789 {
10790 zone_t zone = NULL;
10791 uintptr_t * array;
10792 uintptr_t * next;
10793 uintptr_t element;
10794 uint32_t idx, count, found;
10795 uint32_t nobtcount;
10796 uint32_t elemSize;
10797 size_t maxElems;
10798
10799 zone_foreach(z) {
10800 if (!z->z_name) {
10801 continue;
10802 }
10803 if (!strncmp(zoneName, z->z_name, nameLen)) {
10804 zone = z;
10805 break;
10806 }
10807 }
10808 if (zone == NULL) {
10809 return KERN_INVALID_NAME;
10810 }
10811
10812 elemSize = (uint32_t)zone_elem_inner_size(zone);
10813 maxElems = (zone->z_elems_avail + 1) & ~1ul;
10814
10815 array = kalloc_type_tag(vm_offset_t, maxElems, Z_WAITOK, VM_KERN_MEMORY_DIAG);
10816 if (array == NULL) {
10817 return KERN_RESOURCE_SHORTAGE;
10818 }
10819
10820 zone_lock(zone);
10821
10822 next = array;
10823 next = zone_copy_allocations(zone, next, zone->z_pageq_partial);
10824 next = zone_copy_allocations(zone, next, zone->z_pageq_full);
10825 count = (uint32_t)(next - array);
10826
10827 zone_unlock(zone);
10828
10829 zone_leaks_scan(array, count, (uint32_t)zone_elem_outer_size(zone), &found);
10830 assert(found <= count);
10831
10832 for (idx = 0; idx < count; idx++) {
10833 element = array[idx];
10834 if (kInstanceFlagReferenced & element) {
10835 continue;
10836 }
10837 element = INSTANCE_PUT(element) & ~kInstanceFlags;
10838 }
10839
10840 #if ZALLOC_ENABLE_LOGGING
10841 if (zone->z_btlog && !corruption_debug_flag) {
10842 // btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
10843 static_assert(sizeof(vm_address_t) == sizeof(uintptr_t));
10844 btlog_copy_backtraces_for_elements(zone->z_btlog,
10845 (vm_address_t *)array, &count, elemSize, proc);
10846 }
10847 #endif /* ZALLOC_ENABLE_LOGGING */
10848
10849 for (nobtcount = idx = 0; idx < count; idx++) {
10850 element = array[idx];
10851 if (!element) {
10852 continue;
10853 }
10854 if (kInstanceFlagReferenced & element) {
10855 continue;
10856 }
10857 nobtcount++;
10858 }
10859 if (nobtcount) {
10860 proc(nobtcount, elemSize, BTREF_NULL);
10861 }
10862
10863 kfree_type(vm_offset_t, maxElems, array);
10864 return KERN_SUCCESS;
10865 }
10866
10867 static int
zone_ro_basic_test_run(__unused int64_t in,int64_t * out)10868 zone_ro_basic_test_run(__unused int64_t in, int64_t *out)
10869 {
10870 zone_security_flags_t zsflags;
10871 uint32_t x = 4;
10872 uint32_t *test_ptr;
10873
10874 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10875 printf("zone_ro_basic_test: Test already running.\n");
10876 return EALREADY;
10877 }
10878
10879 zsflags = zone_security_array[ZONE_ID__FIRST_RO];
10880
10881 for (int i = 0; i < 3; i++) {
10882 #if ZSECURITY_CONFIG(READ_ONLY)
10883 /* Basic Test: Create int zone, zalloc int, modify value, free int */
10884 printf("zone_ro_basic_test: Basic Test iteration %d\n", i);
10885 printf("zone_ro_basic_test: create a sub-page size zone\n");
10886
10887 printf("zone_ro_basic_test: verify flags were set\n");
10888 assert(zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
10889
10890 printf("zone_ro_basic_test: zalloc an element\n");
10891 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10892 assert(test_ptr);
10893
10894 printf("zone_ro_basic_test: verify we can't write to it\n");
10895 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10896
10897 x = 4;
10898 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10899 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10900 assert(test_ptr);
10901 assert(*(uint32_t*)test_ptr == x);
10902
10903 x = 5;
10904 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10905 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10906 assert(test_ptr);
10907 assert(*(uint32_t*)test_ptr == x);
10908
10909 printf("zone_ro_basic_test: verify we can't write to it after assigning value\n");
10910 assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10911
10912 printf("zone_ro_basic_test: free elem\n");
10913 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10914 assert(!test_ptr);
10915 #else
10916 printf("zone_ro_basic_test: Read-only allocator n/a on 32bit platforms, test functionality of API\n");
10917
10918 printf("zone_ro_basic_test: verify flags were set\n");
10919 assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
10920
10921 printf("zone_ro_basic_test: zalloc an element\n");
10922 test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10923 assert(test_ptr);
10924
10925 x = 4;
10926 printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10927 zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10928 assert(test_ptr);
10929 assert(*(uint32_t*)test_ptr == x);
10930
10931 x = 5;
10932 printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10933 zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10934 assert(test_ptr);
10935 assert(*(uint32_t*)test_ptr == x);
10936
10937 printf("zone_ro_basic_test: free elem\n");
10938 zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10939 assert(!test_ptr);
10940 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10941 }
10942
10943 printf("zone_ro_basic_test: garbage collection\n");
10944 zone_gc(ZONE_GC_DRAIN);
10945
10946 printf("zone_ro_basic_test: Test passed\n");
10947
10948 *out = 1;
10949 os_atomic_store(&any_zone_test_running, false, relaxed);
10950 return 0;
10951 }
10952 SYSCTL_TEST_REGISTER(zone_ro_basic_test, zone_ro_basic_test_run);
10953
10954 static int
zone_basic_test_run(__unused int64_t in,int64_t * out)10955 zone_basic_test_run(__unused int64_t in, int64_t *out)
10956 {
10957 static zone_t test_zone_ptr = NULL;
10958
10959 unsigned int i = 0, max_iter = 5;
10960 void * test_ptr;
10961 zone_t test_zone;
10962 int rc = 0;
10963
10964 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10965 printf("zone_basic_test: Test already running.\n");
10966 return EALREADY;
10967 }
10968
10969 printf("zone_basic_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
10970
10971 /* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
10972 do {
10973 test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
10974 assert(test_zone);
10975
10976 #if KASAN_CLASSIC
10977 if (test_zone_ptr == NULL && test_zone->z_elems_free != 0)
10978 #else
10979 if (test_zone->z_elems_free != 0)
10980 #endif
10981 {
10982 printf("zone_basic_test: free count is not zero\n");
10983 rc = EIO;
10984 goto out;
10985 }
10986
10987 if (test_zone_ptr == NULL) {
10988 /* Stash the zone pointer returned on the fist zinit */
10989 printf("zone_basic_test: zone created for the first time\n");
10990 test_zone_ptr = test_zone;
10991 } else if (test_zone != test_zone_ptr) {
10992 printf("zone_basic_test: old zone pointer and new zone pointer don't match\n");
10993 rc = EIO;
10994 goto out;
10995 }
10996
10997 test_ptr = zalloc_flags(test_zone, Z_WAITOK | Z_NOFAIL);
10998 zfree(test_zone, test_ptr);
10999
11000 zdestroy(test_zone);
11001 i++;
11002
11003 printf("zone_basic_test: Iteration %d successful\n", i);
11004 } while (i < max_iter);
11005
11006 #if !KASAN_CLASSIC /* because of the quarantine and redzones */
11007 /* test Z_VA_SEQUESTER */
11008 {
11009 zone_t test_pcpu_zone;
11010 kern_return_t kr;
11011 const int num_allocs = 8;
11012 int idx;
11013 vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs;
11014 void *allocs[num_allocs];
11015 void **allocs_pcpu;
11016 vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
11017
11018 test_zone = zone_create("test_zone_sysctl", elem_size,
11019 ZC_DESTRUCTIBLE);
11020 assert(test_zone);
11021
11022 test_pcpu_zone = zone_create("test_zone_sysctl.pcpu", sizeof(uint64_t),
11023 ZC_DESTRUCTIBLE | ZC_PERCPU);
11024 assert(test_pcpu_zone);
11025
11026 for (idx = 0; idx < num_allocs; idx++) {
11027 allocs[idx] = zalloc(test_zone);
11028 assert(NULL != allocs[idx]);
11029 printf("alloc[%d] %p\n", idx, allocs[idx]);
11030 }
11031 for (idx = 0; idx < num_allocs; idx++) {
11032 zfree(test_zone, allocs[idx]);
11033 }
11034 assert(!zone_pva_is_null(test_zone->z_pageq_empty));
11035
11036 kr = kmem_alloc(kernel_map, (vm_address_t *)&allocs_pcpu, PAGE_SIZE,
11037 KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
11038 assert(kr == KERN_SUCCESS);
11039
11040 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
11041 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
11042 Z_WAITOK | Z_ZERO);
11043 assert(NULL != allocs_pcpu[idx]);
11044 }
11045 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
11046 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
11047 }
11048 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
11049
11050 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
11051 vm_page_wire_count, vm_page_free_count,
11052 100L * phys_pages / zone_pages_wired_max);
11053 zone_gc(ZONE_GC_DRAIN);
11054 printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
11055 vm_page_wire_count, vm_page_free_count,
11056 100L * phys_pages / zone_pages_wired_max);
11057
11058 unsigned int allva = 0;
11059
11060 zone_foreach(z) {
11061 zone_lock(z);
11062 allva += z->z_wired_cur;
11063 if (zone_pva_is_null(z->z_pageq_va)) {
11064 zone_unlock(z);
11065 continue;
11066 }
11067 unsigned count = 0;
11068 uint64_t size;
11069 zone_pva_t pg = z->z_pageq_va;
11070 struct zone_page_metadata *page_meta;
11071 while (pg.packed_address) {
11072 page_meta = zone_pva_to_meta(pg);
11073 count += z->z_percpu ? 1 : z->z_chunk_pages;
11074 if (page_meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
11075 count -= page_meta->zm_page_index;
11076 }
11077 pg = page_meta->zm_page_next;
11078 }
11079 size = zone_size_wired(z);
11080 if (!size) {
11081 size = 1;
11082 }
11083 printf("%s%s: seq %d, res %d, %qd %%\n",
11084 zone_heap_name(z), z->z_name, z->z_va_cur - z->z_wired_cur,
11085 z->z_wired_cur, zone_size_allocated(z) * 100ULL / size);
11086 zone_unlock(z);
11087 }
11088
11089 printf("total va: %d\n", allva);
11090
11091 assert(zone_pva_is_null(test_zone->z_pageq_empty));
11092 assert(zone_pva_is_null(test_zone->z_pageq_partial));
11093 assert(!zone_pva_is_null(test_zone->z_pageq_va));
11094 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
11095 assert(zone_pva_is_null(test_pcpu_zone->z_pageq_partial));
11096 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_va));
11097
11098 for (idx = 0; idx < num_allocs; idx++) {
11099 assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx]));
11100 }
11101
11102 /* make sure the zone is still usable after a GC */
11103
11104 for (idx = 0; idx < num_allocs; idx++) {
11105 allocs[idx] = zalloc(test_zone);
11106 assert(allocs[idx]);
11107 printf("alloc[%d] %p\n", idx, allocs[idx]);
11108 }
11109 for (idx = 0; idx < num_allocs; idx++) {
11110 zfree(test_zone, allocs[idx]);
11111 }
11112
11113 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
11114 allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
11115 Z_WAITOK | Z_ZERO);
11116 assert(NULL != allocs_pcpu[idx]);
11117 }
11118 for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
11119 zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
11120 }
11121
11122 assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
11123
11124 kmem_free(kernel_map, (vm_address_t)allocs_pcpu, PAGE_SIZE);
11125
11126 zdestroy(test_zone);
11127 zdestroy(test_pcpu_zone);
11128 }
11129 #endif /* KASAN_CLASSIC */
11130
11131 printf("zone_basic_test: Test passed\n");
11132
11133
11134 *out = 1;
11135 out:
11136 os_atomic_store(&any_zone_test_running, false, relaxed);
11137 return rc;
11138 }
11139 SYSCTL_TEST_REGISTER(zone_basic_test, zone_basic_test_run);
11140
11141 #define N_ALLOCATIONS 100
11142
11143 static int
run_kalloc_guard_insertion_test(int64_t in __unused,int64_t * out)11144 run_kalloc_guard_insertion_test(int64_t in __unused, int64_t *out)
11145 {
11146 size_t alloc_size = 24576;
11147 uint64_t *ptrs[N_ALLOCATIONS];
11148 uint32_t n_guard_regions = 0;
11149 zalloc_flags_t flags = Z_WAITOK | Z_FULLSIZE;
11150 int retval = 1;
11151
11152 *out = 0;
11153
11154 for (uint i = 0; i < N_ALLOCATIONS; ++i) {
11155 uint64_t *data_ptr = kalloc_ext(KHEAP_DATA_BUFFERS, alloc_size,
11156 flags, &data_ptr).addr;
11157 if (!data_ptr) {
11158 printf("%s: kalloc_ext %zu with owner and Z_FULLSIZE returned null\n",
11159 __func__, alloc_size);
11160 goto cleanup;
11161 }
11162 ptrs[i] = data_ptr;
11163 }
11164
11165 /* We don't know where there are guard regions, but let's try to find one. */
11166 for (uint i = 0; i < N_ALLOCATIONS; i++) {
11167 vm_address_t addr;
11168 zone_t z;
11169 struct zone_page_metadata *meta;
11170 struct zone_page_metadata *gmeta;
11171 uint32_t chunk_pages;
11172
11173 addr = (vm_address_t)ptrs[i];
11174 meta = zone_meta_from_addr(addr);
11175 z = &zone_array[meta->zm_index];
11176 chunk_pages = z->z_chunk_pages;
11177
11178 if (meta->zm_guarded) {
11179 n_guard_regions++;
11180 if (meta->zm_chunk_len == chunk_pages) {
11181 gmeta = meta + chunk_pages;
11182 } else if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
11183 gmeta = meta + meta->zm_subchunk_len;
11184 } else if (meta->zm_chunk_len == ZM_PGZ_GUARD) {
11185 printf("%s: kalloc_ext gave us address 0x%lx for a guard region.\n",
11186 __func__, addr);
11187 goto cleanup;
11188 } else if ((meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) && !z->z_percpu) {
11189 printf("%s: zone [%s%s] is not per-CPU.\n",
11190 __func__, zone_heap_name(z), zone_name(z));
11191 goto cleanup;
11192 } else {
11193 printf("%s: zm_chunk_len value not recognized for 0x%lx.\n",
11194 __func__, addr);
11195 goto cleanup;
11196 }
11197
11198 assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
11199 /* Now check that we have chunk_len of guard pages. */
11200 for (uint j = 0; j < chunk_pages; j++) {
11201 if (gmeta->zm_chunk_len != ZM_PGZ_GUARD) {
11202 printf("%s: page %u / %u is not a guard page.\n",
11203 __func__, j + 1, chunk_pages);
11204 goto cleanup;
11205 }
11206 gmeta++;
11207 }
11208
11209 /* The metadata following the guard region should not be a guard page. */
11210 if (gmeta->zm_chunk_len == ZM_PGZ_GUARD) {
11211 printf("%s: zone page following guard region is a guard page.\n",
11212 __func__);
11213 goto cleanup;
11214 }
11215 }
11216 }
11217
11218 printf("%s: there were %u guard regions in %d allocations.\n",
11219 __func__, n_guard_regions, N_ALLOCATIONS);
11220
11221 *out = 1;
11222 retval = 0;
11223
11224 cleanup:
11225 for (uint i = 0; i < N_ALLOCATIONS; ++i) {
11226 kfree_ext(KHEAP_DATA_BUFFERS, ptrs[i], alloc_size);
11227 }
11228
11229 return retval;
11230 }
11231 SYSCTL_TEST_REGISTER(kalloc_guard_regions, run_kalloc_guard_insertion_test);
11232
11233
11234 struct zone_stress_obj {
11235 TAILQ_ENTRY(zone_stress_obj) zso_link;
11236 };
11237
11238 struct zone_stress_ctx {
11239 thread_t zsc_leader;
11240 lck_mtx_t zsc_lock;
11241 zone_t zsc_zone;
11242 uint64_t zsc_end;
11243 uint32_t zsc_workers;
11244 };
11245
11246 static void
zone_stress_worker(void * arg,wait_result_t __unused wr)11247 zone_stress_worker(void *arg, wait_result_t __unused wr)
11248 {
11249 struct zone_stress_ctx *ctx = arg;
11250 bool leader = ctx->zsc_leader == current_thread();
11251 TAILQ_HEAD(zone_stress_head, zone_stress_obj) head = TAILQ_HEAD_INITIALIZER(head);
11252 struct zone_bool_gen bg = { };
11253 struct zone_stress_obj *obj;
11254 uint32_t allocs = 0;
11255
11256 random_bool_init(&bg.zbg_bg);
11257
11258 do {
11259 for (int i = 0; i < 2000; i++) {
11260 uint32_t what = random_bool_gen_bits(&bg.zbg_bg,
11261 bg.zbg_entropy, ZONE_ENTROPY_CNT, 1);
11262 switch (what) {
11263 case 0:
11264 case 1:
11265 if (allocs < 10000) {
11266 obj = zalloc(ctx->zsc_zone);
11267 TAILQ_INSERT_HEAD(&head, obj, zso_link);
11268 allocs++;
11269 }
11270 break;
11271 case 2:
11272 case 3:
11273 if (allocs < 10000) {
11274 obj = zalloc(ctx->zsc_zone);
11275 TAILQ_INSERT_TAIL(&head, obj, zso_link);
11276 allocs++;
11277 }
11278 break;
11279 case 4:
11280 if (leader) {
11281 zone_gc(ZONE_GC_DRAIN);
11282 }
11283 break;
11284 case 5:
11285 case 6:
11286 if (!TAILQ_EMPTY(&head)) {
11287 obj = TAILQ_FIRST(&head);
11288 TAILQ_REMOVE(&head, obj, zso_link);
11289 zfree(ctx->zsc_zone, obj);
11290 allocs--;
11291 }
11292 break;
11293 case 7:
11294 if (!TAILQ_EMPTY(&head)) {
11295 obj = TAILQ_LAST(&head, zone_stress_head);
11296 TAILQ_REMOVE(&head, obj, zso_link);
11297 zfree(ctx->zsc_zone, obj);
11298 allocs--;
11299 }
11300 break;
11301 }
11302 }
11303 } while (mach_absolute_time() < ctx->zsc_end);
11304
11305 while (!TAILQ_EMPTY(&head)) {
11306 obj = TAILQ_FIRST(&head);
11307 TAILQ_REMOVE(&head, obj, zso_link);
11308 zfree(ctx->zsc_zone, obj);
11309 }
11310
11311 lck_mtx_lock(&ctx->zsc_lock);
11312 if (--ctx->zsc_workers == 0) {
11313 thread_wakeup(ctx);
11314 } else if (leader) {
11315 while (ctx->zsc_workers) {
11316 lck_mtx_sleep(&ctx->zsc_lock, LCK_SLEEP_DEFAULT, ctx,
11317 THREAD_UNINT);
11318 }
11319 }
11320 lck_mtx_unlock(&ctx->zsc_lock);
11321
11322 if (!leader) {
11323 thread_terminate_self();
11324 __builtin_unreachable();
11325 }
11326 }
11327
11328 static int
zone_stress_test_run(__unused int64_t in,int64_t * out)11329 zone_stress_test_run(__unused int64_t in, int64_t *out)
11330 {
11331 struct zone_stress_ctx ctx = {
11332 .zsc_leader = current_thread(),
11333 .zsc_workers = 3,
11334 };
11335 kern_return_t kr;
11336 thread_t th;
11337
11338 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
11339 printf("zone_stress_test: Test already running.\n");
11340 return EALREADY;
11341 }
11342
11343 lck_mtx_init(&ctx.zsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
11344 ctx.zsc_zone = zone_create("test_zone_344", 344,
11345 ZC_DESTRUCTIBLE | ZC_NOCACHING);
11346 assert(ctx.zsc_zone->z_chunk_pages > 1);
11347
11348 clock_interval_to_deadline(5, NSEC_PER_SEC, &ctx.zsc_end);
11349
11350 printf("zone_stress_test: Starting (leader %p)\n", current_thread());
11351
11352 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
11353
11354 for (uint32_t i = 1; i < ctx.zsc_workers; i++) {
11355 kr = kernel_thread_start_priority(zone_stress_worker, &ctx,
11356 BASEPRI_DEFAULT, &th);
11357 if (kr == KERN_SUCCESS) {
11358 printf("zone_stress_test: thread %d: %p\n", i, th);
11359 thread_deallocate(th);
11360 } else {
11361 ctx.zsc_workers--;
11362 }
11363 }
11364
11365 zone_stress_worker(&ctx, 0);
11366
11367 lck_mtx_destroy(&ctx.zsc_lock, &zone_locks_grp);
11368
11369 zdestroy(ctx.zsc_zone);
11370
11371 printf("zone_stress_test: Done\n");
11372
11373 *out = 1;
11374 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
11375 os_atomic_store(&any_zone_test_running, false, relaxed);
11376 return 0;
11377 }
11378 SYSCTL_TEST_REGISTER(zone_stress_test, zone_stress_test_run);
11379
11380 struct zone_gc_stress_obj {
11381 STAILQ_ENTRY(zone_gc_stress_obj) zgso_link;
11382 uintptr_t zgso_pad[63];
11383 };
11384 STAILQ_HEAD(zone_gc_stress_head, zone_gc_stress_obj);
11385
11386 #define ZONE_GC_OBJ_PER_PAGE (PAGE_SIZE / sizeof(struct zone_gc_stress_obj))
11387
11388 KALLOC_TYPE_DEFINE(zone_gc_stress_zone, struct zone_gc_stress_obj, KT_DEFAULT);
11389
11390 struct zone_gc_stress_ctx {
11391 bool zgsc_done;
11392 lck_mtx_t zgsc_lock;
11393 zone_t zgsc_zone;
11394 uint64_t zgsc_end;
11395 uint32_t zgsc_workers;
11396 };
11397
11398 static void
zone_gc_stress_test_alloc_n(struct zone_gc_stress_head * head,size_t n)11399 zone_gc_stress_test_alloc_n(struct zone_gc_stress_head *head, size_t n)
11400 {
11401 struct zone_gc_stress_obj *obj;
11402
11403 for (size_t i = 0; i < n; i++) {
11404 obj = zalloc_flags(zone_gc_stress_zone, Z_WAITOK);
11405 STAILQ_INSERT_TAIL(head, obj, zgso_link);
11406 }
11407 }
11408
11409 static void
zone_gc_stress_test_free_n(struct zone_gc_stress_head * head)11410 zone_gc_stress_test_free_n(struct zone_gc_stress_head *head)
11411 {
11412 struct zone_gc_stress_obj *obj;
11413
11414 while ((obj = STAILQ_FIRST(head))) {
11415 STAILQ_REMOVE_HEAD(head, zgso_link);
11416 zfree(zone_gc_stress_zone, obj);
11417 }
11418 }
11419
11420 __dead2
11421 static void
zone_gc_stress_worker(void * arg,wait_result_t __unused wr)11422 zone_gc_stress_worker(void *arg, wait_result_t __unused wr)
11423 {
11424 struct zone_gc_stress_ctx *ctx = arg;
11425 struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
11426
11427 while (!ctx->zgsc_done) {
11428 zone_gc_stress_test_alloc_n(&head, ZONE_GC_OBJ_PER_PAGE * 4);
11429 zone_gc_stress_test_free_n(&head);
11430 }
11431
11432 lck_mtx_lock(&ctx->zgsc_lock);
11433 if (--ctx->zgsc_workers == 0) {
11434 thread_wakeup(ctx);
11435 }
11436 lck_mtx_unlock(&ctx->zgsc_lock);
11437
11438 thread_terminate_self();
11439 __builtin_unreachable();
11440 }
11441
11442 static int
zone_gc_stress_test_run(__unused int64_t in,int64_t * out)11443 zone_gc_stress_test_run(__unused int64_t in, int64_t *out)
11444 {
11445 struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
11446 struct zone_gc_stress_ctx ctx = {
11447 .zgsc_workers = 3,
11448 };
11449 kern_return_t kr;
11450 thread_t th;
11451
11452 if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
11453 printf("zone_gc_stress_test: Test already running.\n");
11454 return EALREADY;
11455 }
11456
11457 lck_mtx_init(&ctx.zgsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
11458 lck_mtx_lock(&ctx.zgsc_lock);
11459
11460 printf("zone_gc_stress_test: Starting (leader %p)\n", current_thread());
11461
11462 os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
11463
11464 for (uint32_t i = 0; i < ctx.zgsc_workers; i++) {
11465 kr = kernel_thread_start_priority(zone_gc_stress_worker, &ctx,
11466 BASEPRI_DEFAULT, &th);
11467 if (kr == KERN_SUCCESS) {
11468 printf("zone_gc_stress_test: thread %d: %p\n", i, th);
11469 thread_deallocate(th);
11470 } else {
11471 ctx.zgsc_workers--;
11472 }
11473 }
11474
11475 for (uint64_t i = 0; i < in; i++) {
11476 size_t count = zc_mag_size() * zc_free_batch_size() * 10;
11477
11478 if (count < ZONE_GC_OBJ_PER_PAGE * 20) {
11479 count = ZONE_GC_OBJ_PER_PAGE * 20;
11480 }
11481
11482 zone_gc_stress_test_alloc_n(&head, count);
11483 zone_gc_stress_test_free_n(&head);
11484
11485 lck_mtx_lock(&zone_gc_lock);
11486 zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
11487 ZONE_RECLAIM_TRIM);
11488 lck_mtx_unlock(&zone_gc_lock);
11489
11490 printf("zone_gc_stress_test: round %lld/%lld\n", i + 1, in);
11491 }
11492
11493 os_atomic_thread_fence(seq_cst);
11494 ctx.zgsc_done = true;
11495 lck_mtx_sleep(&ctx.zgsc_lock, LCK_SLEEP_DEFAULT, &ctx, THREAD_UNINT);
11496 lck_mtx_unlock(&ctx.zgsc_lock);
11497
11498 lck_mtx_destroy(&ctx.zgsc_lock, &zone_locks_grp);
11499
11500 lck_mtx_lock(&zone_gc_lock);
11501 zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
11502 ZONE_RECLAIM_DRAIN);
11503 lck_mtx_unlock(&zone_gc_lock);
11504
11505 printf("zone_gc_stress_test: Done\n");
11506
11507 *out = 1;
11508 os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
11509 os_atomic_store(&any_zone_test_running, false, relaxed);
11510 return 0;
11511 }
11512 SYSCTL_TEST_REGISTER(zone_gc_stress_test, zone_gc_stress_test_run);
11513
11514 /*
11515 * Routines to test that zone garbage collection and zone replenish threads
11516 * running at the same time don't cause problems.
11517 */
11518
11519 static int
zone_gc_replenish_test(__unused int64_t in,int64_t * out)11520 zone_gc_replenish_test(__unused int64_t in, int64_t *out)
11521 {
11522 zone_gc(ZONE_GC_DRAIN);
11523 *out = 1;
11524 return 0;
11525 }
11526 SYSCTL_TEST_REGISTER(zone_gc_replenish_test, zone_gc_replenish_test);
11527
11528 static int
zone_alloc_replenish_test(__unused int64_t in,int64_t * out)11529 zone_alloc_replenish_test(__unused int64_t in, int64_t *out)
11530 {
11531 zone_t z = vm_map_entry_zone;
11532 struct data { struct data *next; } *node, *list = NULL;
11533
11534 if (z == NULL) {
11535 printf("Couldn't find a replenish zone\n");
11536 return EIO;
11537 }
11538
11539 /* big enough to go past replenishment */
11540 for (uint32_t i = 0; i < 10 * z->z_elems_rsv; ++i) {
11541 node = zalloc(z);
11542 node->next = list;
11543 list = node;
11544 }
11545
11546 /*
11547 * release the memory we allocated
11548 */
11549 while (list != NULL) {
11550 node = list;
11551 list = list->next;
11552 zfree(z, node);
11553 }
11554
11555 *out = 1;
11556 return 0;
11557 }
11558 SYSCTL_TEST_REGISTER(zone_alloc_replenish_test, zone_alloc_replenish_test);
11559
11560
11561 #endif /* DEBUG || DEVELOPMENT */
11562