xref: /xnu-10002.1.13/osfmk/kern/zalloc.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	kern/zalloc.c
60  *	Author:	Avadis Tevanian, Jr.
61  *
62  *	Zone-based memory allocator.  A zone is a collection of fixed size
63  *	data blocks for which quick allocation/deallocation is possible.
64  */
65 
66 #define ZALLOC_ALLOW_DEPRECATED 1
67 #if !ZALLOC_TEST
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <machine/machine_routines.h>
75 #include <mach/vm_map.h>
76 #include <mach/sdt.h>
77 #if __x86_64__
78 #include <i386/cpuid.h>
79 #endif
80 
81 #include <kern/bits.h>
82 #include <kern/btlog.h>
83 #include <kern/startup.h>
84 #include <kern/kern_types.h>
85 #include <kern/assert.h>
86 #include <kern/backtrace.h>
87 #include <kern/host.h>
88 #include <kern/macro_help.h>
89 #include <kern/sched.h>
90 #include <kern/locks.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/thread_call.h>
94 #include <kern/zalloc_internal.h>
95 #include <kern/kalloc.h>
96 #include <kern/debug.h>
97 
98 #include <prng/random.h>
99 
100 #include <vm/pmap.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_memtag.h>
103 #include <vm/vm_kern.h>
104 #include <vm/vm_page.h>
105 #include <vm/vm_pageout.h>
106 #include <vm/vm_compressor.h> /* C_SLOT_PACKED_PTR* */
107 
108 #include <pexpert/pexpert.h>
109 
110 #include <machine/machparam.h>
111 #include <machine/machine_routines.h>  /* ml_cpu_get_info */
112 
113 #include <os/atomic.h>
114 
115 #include <libkern/OSDebug.h>
116 #include <libkern/OSAtomic.h>
117 #include <libkern/section_keywords.h>
118 #include <sys/kdebug.h>
119 #include <sys/code_signing.h>
120 
121 #include <san/kasan.h>
122 #include <libsa/stdlib.h>
123 #include <sys/errno.h>
124 
125 #include <IOKit/IOBSD.h>
126 #include <arm64/amcc_rorgn.h>
127 
128 #if DEBUG
129 #define z_debug_assert(expr)  assert(expr)
130 #else
131 #define z_debug_assert(expr)  (void)(expr)
132 #endif
133 
134 /* Returns pid of the task with the largest number of VM map entries.  */
135 extern pid_t find_largest_process_vm_map_entries(void);
136 
137 /*
138  * Callout to jetsam. If pid is -1, we wake up the memorystatus thread to do asynchronous kills.
139  * For any other pid we try to kill that process synchronously.
140  */
141 extern boolean_t memorystatus_kill_on_zone_map_exhaustion(pid_t pid);
142 
143 extern zone_t vm_object_zone;
144 extern zone_t ipc_service_port_label_zone;
145 
146 ZONE_DEFINE_TYPE(percpu_u64_zone, "percpu.64", uint64_t,
147     ZC_PERCPU | ZC_ALIGNMENT_REQUIRED | ZC_KASAN_NOREDZONE);
148 
149 #if CONFIG_KERNEL_TAGGING
150 #define ZONE_MIN_ELEM_SIZE      (sizeof(uint64_t) * 2)
151 #define ZONE_ALIGN_SIZE         ZONE_MIN_ELEM_SIZE
152 #else /* CONFIG_KERNEL_TAGGING */
153 #define ZONE_MIN_ELEM_SIZE      sizeof(uint64_t)
154 #define ZONE_ALIGN_SIZE         ZONE_MIN_ELEM_SIZE
155 #endif /* CONFIG_KERNEL_TAGGING */
156 
157 #define ZONE_MAX_ALLOC_SIZE     (32 * 1024)
158 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
159 #define ZONE_CHUNK_ALLOC_SIZE   (256 * 1024)
160 #define ZONE_GUARD_DENSE        (32  * 1024)
161 #define ZONE_GUARD_SPARSE       (64  * 1024)
162 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
163 
164 #if XNU_PLATFORM_MacOSX
165 #define ZONE_MAP_MAX            (32ULL << 30)
166 #define ZONE_MAP_VA_SIZE        (128ULL << 30)
167 #else /* XNU_PLATFORM_MacOSX */
168 #define ZONE_MAP_MAX            (8ULL << 30)
169 #define ZONE_MAP_VA_SIZE        (24ULL << 30)
170 #endif /* !XNU_PLATFORM_MacOSX */
171 
172 __enum_closed_decl(zm_len_t, uint16_t, {
173 	ZM_CHUNK_FREE           = 0x0,
174 	/* 1 through 8 are valid lengths */
175 	ZM_CHUNK_LEN_MAX        = 0x8,
176 
177 	/* PGZ magical values */
178 	ZM_PGZ_FREE             = 0x0,
179 	ZM_PGZ_ALLOCATED        = 0xa, /* [a]llocated   */
180 	ZM_PGZ_GUARD            = 0xb, /* oo[b]         */
181 	ZM_PGZ_DOUBLE_FREE      = 0xd, /* [d]ouble_free */
182 
183 	/* secondary page markers */
184 	ZM_SECONDARY_PAGE       = 0xe,
185 	ZM_SECONDARY_PCPU_PAGE  = 0xf,
186 });
187 
188 static_assert(MAX_ZONES < (1u << 10), "MAX_ZONES must fit in zm_index");
189 
190 struct zone_page_metadata {
191 	union {
192 		struct {
193 			/* The index of the zone this metadata page belongs to */
194 			zone_id_t       zm_index : 10;
195 
196 			/*
197 			 * This chunk ends with a guard page.
198 			 */
199 			uint16_t        zm_guarded : 1;
200 
201 			/*
202 			 * Whether `zm_bitmap` is an inline bitmap
203 			 * or a packed bitmap reference
204 			 */
205 			uint16_t        zm_inline_bitmap : 1;
206 
207 			/*
208 			 * Zones allocate in "chunks" of zone_t::z_chunk_pages
209 			 * consecutive pages, or zpercpu_count() pages if the
210 			 * zone is percpu.
211 			 *
212 			 * The first page of it has its metadata set with:
213 			 * - 0 if none of the pages are currently wired
214 			 * - the number of wired pages in the chunk
215 			 *   (not scaled for percpu).
216 			 *
217 			 * Other pages in the chunk have their zm_chunk_len set
218 			 * to ZM_SECONDARY_PAGE or ZM_SECONDARY_PCPU_PAGE
219 			 * depending on whether the zone is percpu or not.
220 			 * For those, zm_page_index holds the index of that page
221 			 * in the run, and zm_subchunk_len the remaining length
222 			 * within the chunk.
223 			 *
224 			 * Metadata used for PGZ pages can have 3 values:
225 			 * - ZM_PGZ_FREE:         slot is free
226 			 * - ZM_PGZ_ALLOCATED:    slot holds an allocated element
227 			 *                        at offset (zm_pgz_orig_addr & PAGE_MASK)
228 			 * - ZM_PGZ_DOUBLE_FREE:  slot detected a double free
229 			 *                        (will panic).
230 			 */
231 			zm_len_t        zm_chunk_len : 4;
232 		};
233 		uint16_t zm_bits;
234 	};
235 
236 	union {
237 #define ZM_ALLOC_SIZE_LOCK      1u
238 		uint16_t zm_alloc_size; /* first page only */
239 		struct {
240 			uint8_t zm_page_index;   /* secondary pages only */
241 			uint8_t zm_subchunk_len; /* secondary pages only */
242 		};
243 		uint16_t zm_oob_offs;   /* in guard pages  */
244 	};
245 	union {
246 		uint32_t zm_bitmap;     /* most zones      */
247 		uint32_t zm_bump;       /* permanent zones */
248 	};
249 
250 	union {
251 		struct {
252 			zone_pva_t      zm_page_next;
253 			zone_pva_t      zm_page_prev;
254 		};
255 		vm_offset_t zm_pgz_orig_addr;
256 		struct zone_page_metadata *zm_pgz_slot_next;
257 	};
258 };
259 static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing");
260 
261 /*!
262  * @typedef zone_magazine_t
263  *
264  * @brief
265  * Magazine of cached allocations.
266  *
267  * @field zm_next       linkage used by magazine depots.
268  * @field zm_elems      an array of @c zc_mag_size() elements.
269  */
270 struct zone_magazine {
271 	zone_magazine_t         zm_next;
272 	smr_seq_t               zm_seq;
273 	vm_offset_t             zm_elems[0];
274 };
275 
276 /*!
277  * @typedef zone_cache_t
278  *
279  * @brief
280  * Magazine of cached allocations.
281  *
282  * @discussion
283  * Below is a diagram of the caching system. This design is inspired by the
284  * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
285  * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams and the FreeBSD UMA
286  * zone allocator (itself derived from this seminal work).
287  *
288  * It is divided into 3 layers:
289  * - the per-cpu layer,
290  * - the recirculation depot layer,
291  * - the Zone Allocator.
292  *
293  * The per-cpu and recirculation depot layer use magazines (@c zone_magazine_t),
294  * which are stacks of up to @c zc_mag_size() elements.
295  *
296  * <h2>CPU layer</h2>
297  *
298  * The CPU layer (@c zone_cache_t) looks like this:
299  *
300  *      ╭─ a ─ f ─┬───────── zm_depot ──────────╮
301  *      │ ╭─╮ ╭─╮ │ ╭─╮ ╭─╮ ╭─╮ ╭─╮ ╭─╮         │
302  *      │ │#│ │#│ │ │#│ │#│ │#│ │#│ │#│         │
303  *      │ │#│ │ │ │ │#│ │#│ │#│ │#│ │#│         │
304  *      │ │ │ │ │ │ │#│ │#│ │#│ │#│ │#│         │
305  *      │ ╰─╯ ╰─╯ │ ╰─╯ ╰─╯ ╰─╯ ╰─╯ ╰─╯         │
306  *      ╰─────────┴─────────────────────────────╯
307  *
308  * It has two pre-loaded magazines (a)lloc and (f)ree which we allocate from,
309  * or free to. Serialization is achieved through disabling preemption, and only
310  * the current CPU can acces those allocations. This is represented on the left
311  * hand side of the diagram above.
312  *
313  * The right hand side is the per-cpu depot. It consists of @c zm_depot_count
314  * full magazines, and is protected by the @c zm_depot_lock for access.
315  * The lock is expected to absolutely never be contended, as only the local CPU
316  * tends to access the local per-cpu depot in regular operation mode.
317  *
318  * However unlike UMA, our implementation allows for the zone GC to reclaim
319  * per-CPU magazines aggresively, which is serialized with the @c zm_depot_lock.
320  *
321  *
322  * <h2>Recirculation Depot</h2>
323  *
324  * The recirculation depot layer is a list similar to the per-cpu depot,
325  * however it is different in two fundamental ways:
326  *
327  * - it is protected by the regular zone lock,
328  * - elements referenced by the magazines in that layer appear free
329  *   to the zone layer.
330  *
331  *
332  * <h2>Magazine circulation and sizing</h2>
333  *
334  * The caching system sizes itself dynamically. Operations that allocate/free
335  * a single element call @c zone_lock_nopreempt_check_contention() which records
336  * contention on the lock by doing a trylock and recording its success.
337  *
338  * This information is stored in the @c z_recirc_cont_cur field of the zone,
339  * and a windowed moving average is maintained in @c z_contention_wma.
340  * The periodically run function @c compute_zone_working_set_size() will then
341  * take this into account to decide to grow the number of buckets allowed
342  * in the depot or shrink it based on the @c zc_grow_level and @c zc_shrink_level
343  * thresholds.
344  *
345  * The per-cpu layer will attempt to work with its depot, finding both full and
346  * empty magazines cached there. If it can't get what it needs, then it will
347  * mediate with the zone recirculation layer. Such recirculation is done in
348  * batches in order to amortize lock holds.
349  * (See @c {zalloc,zfree}_cached_depot_recirculate()).
350  *
351  * The recirculation layer keeps a track of what the minimum amount of magazines
352  * it had over time was for each of the full and empty queues. This allows for
353  * @c compute_zone_working_set_size() to return memory to the system when a zone
354  * stops being used as much.
355  *
356  * <h2>Security considerations</h2>
357  *
358  * The zone caching layer has been designed to avoid returning elements in
359  * a strict LIFO behavior: @c zalloc() will allocate from the (a) magazine,
360  * and @c zfree() free to the (f) magazine, and only swap them when the
361  * requested operation cannot be fulfilled.
362  *
363  * The per-cpu overflow depot or the recirculation depots are similarly used
364  * in FIFO order.
365  *
366  * @field zc_depot_lock     a lock to access @c zc_depot, @c zc_depot_cur.
367  * @field zc_alloc_cur      denormalized number of elements in the (a) magazine
368  * @field zc_free_cur       denormalized number of elements in the (f) magazine
369  * @field zc_alloc_elems    a pointer to the array of elements in (a)
370  * @field zc_free_elems     a pointer to the array of elements in (f)
371  *
372  * @field zc_depot          a list of @c zc_depot_cur full magazines
373  */
374 typedef struct zone_cache {
375 	hw_lck_ticket_t            zc_depot_lock;
376 	uint16_t                   zc_alloc_cur;
377 	uint16_t                   zc_free_cur;
378 	vm_offset_t               *zc_alloc_elems;
379 	vm_offset_t               *zc_free_elems;
380 	struct zone_depot          zc_depot;
381 	smr_t                      zc_smr;
382 	zone_smr_free_cb_t XNU_PTRAUTH_SIGNED_FUNCTION_PTR("zc_free") zc_free;
383 } __attribute__((aligned(64))) * zone_cache_t;
384 
385 #if !__x86_64__
386 static
387 #endif
388 __security_const_late struct {
389 	struct mach_vm_range       zi_map_range;  /* all zone submaps     */
390 	struct mach_vm_range       zi_ro_range;   /* read-only range      */
391 	struct mach_vm_range       zi_meta_range; /* debugging only       */
392 	struct mach_vm_range       zi_bits_range; /* bits buddy allocator */
393 	struct mach_vm_range       zi_xtra_range; /* vm tracking metadata */
394 	struct mach_vm_range       zi_pgz_range;
395 	struct zone_page_metadata *zi_pgz_meta;
396 
397 	/*
398 	 * The metadata lives within the zi_meta_range address range.
399 	 *
400 	 * The correct formula to find a metadata index is:
401 	 *     absolute_page_index - page_index(zi_map_range.min_address)
402 	 *
403 	 * And then this index is used to dereference zi_meta_range.min_address
404 	 * as a `struct zone_page_metadata` array.
405 	 *
406 	 * To avoid doing that substraction all the time in the various fast-paths,
407 	 * zi_meta_base are pre-offset with that minimum page index to avoid redoing
408 	 * that math all the time.
409 	 */
410 	struct zone_page_metadata *zi_meta_base;
411 } zone_info;
412 
413 __startup_data static struct mach_vm_range  zone_map_range;
414 __startup_data static vm_map_size_t         zone_meta_size;
415 __startup_data static vm_map_size_t         zone_bits_size;
416 __startup_data static vm_map_size_t         zone_xtra_size;
417 
418 /*
419  * Initial array of metadata for stolen memory.
420  *
421  * The numbers here have to be kept in sync with vm_map_steal_memory()
422  * so that we have reserved enough metadata.
423  *
424  * After zone_init() has run (which happens while the kernel is still single
425  * threaded), the metadata is moved to its final dynamic location, and
426  * this array is unmapped with the rest of __startup_data at lockdown.
427  */
428 #define ZONE_EARLY_META_INLINE_COUNT    64
429 __startup_data
430 static struct zone_page_metadata
431     zone_early_meta_array_startup[ZONE_EARLY_META_INLINE_COUNT];
432 
433 #if __x86_64__
434 /*
435  * On Intel we can't "free" pmap stolen pages,
436  * so instead we use a static array in __KLDDATA
437  * which gets reclaimed at lockdown time.
438  */
439 __startup_data __attribute__((aligned(PAGE_SIZE)))
440 static uint8_t zone_early_pages_to_cram[PAGE_SIZE * 16];
441 #endif
442 
443 /*
444  *	The zone_locks_grp allows for collecting lock statistics.
445  *	All locks are associated to this group in zinit.
446  *	Look at tools/lockstat for debugging lock contention.
447  */
448 LCK_GRP_DECLARE(zone_locks_grp, "zone_locks");
449 static LCK_MTX_DECLARE(zone_metadata_region_lck, &zone_locks_grp);
450 
451 /*
452  *	The zone metadata lock protects:
453  *	- metadata faulting,
454  *	- VM submap VA allocations,
455  *	- early gap page queue list
456  */
457 #define zone_meta_lock()   lck_mtx_lock(&zone_metadata_region_lck);
458 #define zone_meta_unlock() lck_mtx_unlock(&zone_metadata_region_lck);
459 
460 /*
461  *	Exclude more than one concurrent garbage collection
462  */
463 static LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc");
464 static LCK_MTX_DECLARE(zone_gc_lock, &zone_gc_lck_grp);
465 static LCK_SPIN_DECLARE(zone_exhausted_lock, &zone_gc_lck_grp);
466 
467 /*
468  * Panic logging metadata
469  */
470 bool panic_include_zprint = false;
471 bool panic_include_kalloc_types = false;
472 zone_t kalloc_type_src_zone = ZONE_NULL;
473 zone_t kalloc_type_dst_zone = ZONE_NULL;
474 mach_memory_info_t *panic_kext_memory_info = NULL;
475 vm_size_t panic_kext_memory_size = 0;
476 vm_offset_t panic_fault_address = 0;
477 
478 /*
479  *      Protects zone_array, num_zones, num_zones_in_use, and
480  *      zone_destroyed_bitmap
481  */
482 static SIMPLE_LOCK_DECLARE(all_zones_lock, 0);
483 static zone_id_t        num_zones_in_use;
484 zone_id_t _Atomic       num_zones;
485 SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count;
486 
487 /*
488  * Initial globals for zone stats until we can allocate the real ones.
489  * Those get migrated inside the per-CPU ones during zone_init() and
490  * this array is unmapped with the rest of __startup_data at lockdown.
491  */
492 
493 /* zone to allocate zone_magazine structs from */
494 static SECURITY_READ_ONLY_LATE(zone_t) zc_magazine_zone;
495 /*
496  * Until pid1 is made, zone caching is off,
497  * until compute_zone_working_set_size() runs for the firt time.
498  *
499  * -1 represents the "never enabled yet" value.
500  */
501 static int8_t zone_caching_disabled = -1;
502 
503 __startup_data
504 static struct zone_stats zone_stats_startup[MAX_ZONES];
505 struct zone              zone_array[MAX_ZONES];
506 SECURITY_READ_ONLY_LATE(zone_security_flags_t) zone_security_array[MAX_ZONES] = {
507 	[0 ... MAX_ZONES - 1] = {
508 		.z_kheap_id       = KHEAP_ID_NONE,
509 		.z_noencrypt      = false,
510 		.z_submap_idx     = Z_SUBMAP_IDX_GENERAL_0,
511 		.z_kalloc_type    = false,
512 		.z_sig_eq         = 0
513 	},
514 };
515 SECURITY_READ_ONLY_LATE(struct zone_size_params) zone_ro_size_params[ZONE_ID__LAST_RO + 1];
516 SECURITY_READ_ONLY_LATE(zone_cache_ops_t) zcache_ops[ZONE_ID__FIRST_DYNAMIC];
517 
518 /* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */
519 static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count;
520 
521 /* Used to keep track of destroyed slots in the zone_array */
522 static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)];
523 
524 /* number of zone mapped pages used by all zones */
525 static size_t _Atomic zone_pages_jetsam_threshold = ~0;
526 size_t zone_pages_wired;
527 size_t zone_guard_pages;
528 
529 /* Time in (ms) after which we panic for zone exhaustions */
530 TUNABLE(int, zone_exhausted_timeout, "zet", 5000);
531 static bool zone_share_always = true;
532 static TUNABLE_WRITEABLE(uint32_t, zone_early_thres_mul, "zone_early_thres_mul", 5);
533 
534 #if VM_TAG_SIZECLASSES
535 /*
536  * Zone tagging allows for per "tag" accounting of allocations for the kalloc
537  * zones only.
538  *
539  * There are 3 kinds of tags that can be used:
540  * - pre-registered VM_KERN_MEMORY_*
541  * - dynamic tags allocated per call sites in core-kernel (using vm_tag_alloc())
542  * - per-kext tags computed by IOKit (using the magic Z_VM_TAG_BT_BIT marker).
543  *
544  * The VM tracks the statistics in lazily allocated structures.
545  * See vm_tag_will_update_zone(), vm_tag_update_zone_size().
546  *
547  * If for some reason the requested tag cannot be accounted for,
548  * the tag is forced to VM_KERN_MEMORY_KALLOC which is pre-allocated.
549  *
550  * Each allocated element also remembers the tag it was assigned,
551  * which lets zalloc/zfree update statistics correctly.
552  */
553 
554 /* enable tags for zones that ask for it */
555 static TUNABLE(bool, zone_tagging_on, "-zt", false);
556 
557 /*
558  * Array of all sizeclasses used by kalloc variants so that we can
559  * have accounting per size class for each kalloc callsite
560  */
561 static uint16_t zone_tags_sizeclasses[VM_TAG_SIZECLASSES];
562 #endif /* VM_TAG_SIZECLASSES */
563 
564 #if DEBUG || DEVELOPMENT
565 static int zalloc_simulate_vm_pressure;
566 #endif /* DEBUG || DEVELOPMENT */
567 
568 #define Z_TUNABLE(t, n, d) \
569 	TUNABLE(t, _##n, #n, d); \
570 	__pure2 static inline t n(void) { return _##n; }
571 
572 /*
573  * Zone caching tunables
574  *
575  * zc_mag_size():
576  *   size of magazines, larger to reduce contention at the expense of memory
577  *
578  * zc_enable_level
579  *   number of contentions per second after which zone caching engages
580  *   automatically.
581  *
582  *   0 to disable.
583  *
584  * zc_grow_level
585  *   number of contentions per second x cpu after which the number of magazines
586  *   allowed in the depot can grow. (in "Z_WMA_UNIT" units).
587  *
588  * zc_shrink_level
589  *   number of contentions per second x cpu below which the number of magazines
590  *   allowed in the depot will shrink. (in "Z_WMA_UNIT" units).
591  *
592  * zc_pcpu_max
593  *   maximum memory size in bytes that can hang from a CPU,
594  *   which will affect how many magazines are allowed in the depot.
595  *
596  *   The alloc/free magazines are assumed to be on average half-empty
597  *   and to count for "1" unit of magazines.
598  *
599  * zc_autotrim_size
600  *   Size allowed to hang extra from the recirculation depot before
601  *   auto-trim kicks in.
602  *
603  * zc_autotrim_buckets
604  *
605  *   How many buckets in excess of the working-set are allowed
606  *   before auto-trim kicks in for empty buckets.
607  *
608  * zc_free_batch_size
609  *   The size of batches of frees/reclaim that can be done keeping
610  *   the zone lock held (and preemption disabled).
611  */
612 Z_TUNABLE(uint16_t, zc_mag_size, 8);
613 static Z_TUNABLE(uint32_t, zc_enable_level, 10);
614 static Z_TUNABLE(uint32_t, zc_grow_level, 5 * Z_WMA_UNIT);
615 static Z_TUNABLE(uint32_t, zc_shrink_level, Z_WMA_UNIT / 2);
616 static Z_TUNABLE(uint32_t, zc_pcpu_max, 128 << 10);
617 static Z_TUNABLE(uint32_t, zc_autotrim_size, 16 << 10);
618 static Z_TUNABLE(uint32_t, zc_autotrim_buckets, 8);
619 static Z_TUNABLE(uint32_t, zc_free_batch_size, 256);
620 
621 static SECURITY_READ_ONLY_LATE(size_t)    zone_pages_wired_max;
622 static SECURITY_READ_ONLY_LATE(vm_map_t)  zone_submaps[Z_SUBMAP_IDX_COUNT];
623 static SECURITY_READ_ONLY_LATE(vm_map_t)  zone_meta_map;
624 static char const * const zone_submaps_names[Z_SUBMAP_IDX_COUNT] = {
625 	[Z_SUBMAP_IDX_VM]               = "VM",
626 	[Z_SUBMAP_IDX_READ_ONLY]        = "RO",
627 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
628 	[Z_SUBMAP_IDX_GENERAL_0]        = "GEN0",
629 	[Z_SUBMAP_IDX_GENERAL_1]        = "GEN1",
630 	[Z_SUBMAP_IDX_GENERAL_2]        = "GEN2",
631 	[Z_SUBMAP_IDX_GENERAL_3]        = "GEN3",
632 #else
633 	[Z_SUBMAP_IDX_GENERAL_0]        = "GEN",
634 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
635 	[Z_SUBMAP_IDX_DATA]             = "DATA",
636 };
637 
638 #if __x86_64__
639 #define ZONE_ENTROPY_CNT 8
640 #else
641 #define ZONE_ENTROPY_CNT 2
642 #endif
643 static struct zone_bool_gen {
644 	struct bool_gen zbg_bg;
645 	uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
646 } zone_bool_gen[MAX_CPUS];
647 
648 #if CONFIG_PROB_GZALLOC
649 /*
650  * Probabilistic gzalloc
651  * =====================
652  *
653  *
654  * Probabilistic guard zalloc samples allocations and will protect them by
655  * double-mapping the page holding them and returning the secondary virtual
656  * address to its callers.
657  *
658  * Its data structures are lazily allocated if the `pgz` or `pgz1` boot-args
659  * are set.
660  *
661  *
662  * Unlike GZalloc, PGZ uses a fixed amount of memory, and is compatible with
663  * most zalloc/kalloc features:
664  * - zone_require is functional
665  * - zone caching or zone tagging is compatible
666  * - non-blocking allocation work (they will always return NULL with gzalloc).
667  *
668  * PGZ limitations:
669  * - VA sequestering isn't respected, as the slots (which are in limited
670  *   quantity) will be reused for any type, however the PGZ quarantine
671  *   somewhat mitigates the impact.
672  * - zones with elements larger than a page cannot be protected.
673  *
674  *
675  * Tunables:
676  * --------
677  *
678  * pgz=1:
679  *   Turn on probabilistic guard malloc for all zones
680  *
681  *   (default on for DEVELOPMENT, off for RELEASE, or if pgz1... are specified)
682  *
683  * pgz_sample_rate=0 to 2^31
684  *   average sample rate between two guarded allocations.
685  *   0 means every allocation.
686  *
687  *   The default is a random number between 1000 and 10,000
688  *
689  * pgz_slots
690  *   how many allocations to protect.
691  *
692  *   Each costs:
693  *   - a PTE in the pmap (when allocated)
694  *   - 2 zone page meta's (every other page is a "guard" one, 32B total)
695  *   - 64 bytes per backtraces.
696  *   On LP64 this is <16K per 100 slots.
697  *
698  *   The default is ~200 slots per G of physical ram (32k / G)
699  *
700  *   TODO:
701  *   - try harder to allocate elements at the "end" to catch OOB more reliably.
702  *
703  * pgz_quarantine
704  *   how many slots should be free at any given time.
705  *
706  *   PGZ will round robin through free slots to be reused, but free slots are
707  *   important to detect use-after-free by acting as a quarantine.
708  *
709  *   By default, PGZ will keep 33% of the slots around at all time.
710  *
711  * pgz1=<name>, pgz2=<name>, ..., pgzn=<name>...
712  *   Specific zones for which to enable probabilistic guard malloc.
713  *   There must be no numbering gap (names after the gap will be ignored).
714  */
715 #if DEBUG || DEVELOPMENT
716 static TUNABLE(bool, pgz_all, "pgz", true);
717 #else
718 static TUNABLE(bool, pgz_all, "pgz", false);
719 #endif
720 static TUNABLE(uint32_t, pgz_sample_rate, "pgz_sample_rate", 0);
721 static TUNABLE(uint32_t, pgz_slots, "pgz_slots", UINT32_MAX);
722 static TUNABLE(uint32_t, pgz_quarantine, "pgz_quarantine", 0);
723 #endif /* CONFIG_PROB_GZALLOC */
724 
725 static zone_t zone_find_largest(uint64_t *zone_size);
726 
727 #endif /* !ZALLOC_TEST */
728 #pragma mark Zone metadata
729 #if !ZALLOC_TEST
730 
731 static inline bool
zone_has_index(zone_t z,zone_id_t zid)732 zone_has_index(zone_t z, zone_id_t zid)
733 {
734 	return zone_array + zid == z;
735 }
736 
737 __abortlike
738 void
zone_invalid_panic(zone_t zone)739 zone_invalid_panic(zone_t zone)
740 {
741 	panic("zone %p isn't in the zone_array", zone);
742 }
743 
744 __abortlike
745 static void
zone_metadata_corruption(zone_t zone,struct zone_page_metadata * meta,const char * kind)746 zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta,
747     const char *kind)
748 {
749 	panic("zone metadata corruption: %s (meta %p, zone %s%s)",
750 	    kind, meta, zone_heap_name(zone), zone->z_name);
751 }
752 
753 __abortlike
754 static void
zone_invalid_element_addr_panic(zone_t zone,vm_offset_t addr)755 zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr)
756 {
757 	panic("zone element pointer validation failed (addr: %p, zone %s%s)",
758 	    (void *)addr, zone_heap_name(zone), zone->z_name);
759 }
760 
761 __abortlike
762 static void
zone_page_metadata_index_confusion_panic(zone_t zone,vm_offset_t addr,struct zone_page_metadata * meta)763 zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr,
764     struct zone_page_metadata *meta)
765 {
766 	zone_security_flags_t zsflags = zone_security_config(zone), src_zsflags;
767 	zone_id_t zidx;
768 	zone_t src_zone;
769 
770 	if (zsflags.z_kalloc_type) {
771 		panic_include_kalloc_types = true;
772 		kalloc_type_dst_zone = zone;
773 	}
774 
775 	zidx = meta->zm_index;
776 	if (zidx >= os_atomic_load(&num_zones, relaxed)) {
777 		panic("%p expected in zone %s%s[%d], but metadata has invalid zidx: %d",
778 		    (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
779 		    zidx);
780 	}
781 
782 	src_zone = &zone_array[zidx];
783 	src_zsflags = zone_security_array[zidx];
784 	if (src_zsflags.z_kalloc_type) {
785 		panic_include_kalloc_types = true;
786 		kalloc_type_src_zone = src_zone;
787 	}
788 
789 	panic("%p not in the expected zone %s%s[%d], but found in %s%s[%d]",
790 	    (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
791 	    zone_heap_name(src_zone), src_zone->z_name, zidx);
792 }
793 
794 __abortlike
795 static void
zone_page_metadata_list_corruption(zone_t zone,struct zone_page_metadata * meta)796 zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta)
797 {
798 	panic("metadata list corruption through element %p detected in zone %s%s",
799 	    meta, zone_heap_name(zone), zone->z_name);
800 }
801 
802 __abortlike
803 static void
zone_page_meta_accounting_panic(zone_t zone,struct zone_page_metadata * meta,const char * kind)804 zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta,
805     const char *kind)
806 {
807 	panic("accounting mismatch (%s) for zone %s%s, meta %p", kind,
808 	    zone_heap_name(zone), zone->z_name, meta);
809 }
810 
811 __abortlike
812 static void
zone_meta_double_free_panic(zone_t zone,vm_offset_t addr,const char * caller)813 zone_meta_double_free_panic(zone_t zone, vm_offset_t addr, const char *caller)
814 {
815 	panic("%s: double free of %p to zone %s%s", caller,
816 	    (void *)addr, zone_heap_name(zone), zone->z_name);
817 }
818 
819 __abortlike
820 static void
zone_accounting_panic(zone_t zone,const char * kind)821 zone_accounting_panic(zone_t zone, const char *kind)
822 {
823 	panic("accounting mismatch (%s) for zone %s%s", kind,
824 	    zone_heap_name(zone), zone->z_name);
825 }
826 
827 #define zone_counter_sub(z, stat, value)  ({ \
828 	if (os_sub_overflow((z)->stat, value, &(z)->stat)) { \
829 	    zone_accounting_panic(z, #stat " wrap-around"); \
830 	} \
831 	(z)->stat; \
832 })
833 
834 static inline uint16_t
zone_meta_alloc_size_add(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)835 zone_meta_alloc_size_add(zone_t z, struct zone_page_metadata *m,
836     vm_offset_t esize)
837 {
838 	if (os_add_overflow(m->zm_alloc_size, (uint16_t)esize, &m->zm_alloc_size)) {
839 		zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
840 	}
841 	return m->zm_alloc_size;
842 }
843 
844 static inline uint16_t
zone_meta_alloc_size_sub(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)845 zone_meta_alloc_size_sub(zone_t z, struct zone_page_metadata *m,
846     vm_offset_t esize)
847 {
848 	if (os_sub_overflow(m->zm_alloc_size, esize, &m->zm_alloc_size)) {
849 		zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
850 	}
851 	return m->zm_alloc_size;
852 }
853 
854 __abortlike
855 static void
zone_nofail_panic(zone_t zone)856 zone_nofail_panic(zone_t zone)
857 {
858 	panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)",
859 	    zone_heap_name(zone), zone->z_name);
860 }
861 
862 __header_always_inline bool
zone_spans_ro_va(vm_offset_t addr_start,vm_offset_t addr_end)863 zone_spans_ro_va(vm_offset_t addr_start, vm_offset_t addr_end)
864 {
865 	const struct mach_vm_range *ro_r = &zone_info.zi_ro_range;
866 	struct mach_vm_range r = { addr_start, addr_end };
867 
868 	return mach_vm_range_intersects(ro_r, &r);
869 }
870 
871 #define from_range(r, addr, size) \
872 	__builtin_choose_expr(__builtin_constant_p(size) ? (size) == 1 : 0, \
873 	mach_vm_range_contains(r, (mach_vm_offset_t)(addr)), \
874 	mach_vm_range_contains(r, (mach_vm_offset_t)(addr), size))
875 
876 #define from_ro_map(addr, size) \
877 	from_range(&zone_info.zi_ro_range, addr, size)
878 
879 #define from_zone_map(addr, size) \
880 	from_range(&zone_info.zi_map_range, addr, size)
881 
882 __header_always_inline bool
zone_pva_is_null(zone_pva_t page)883 zone_pva_is_null(zone_pva_t page)
884 {
885 	return page.packed_address == 0;
886 }
887 
888 __header_always_inline bool
zone_pva_is_queue(zone_pva_t page)889 zone_pva_is_queue(zone_pva_t page)
890 {
891 	// actual kernel pages have the top bit set
892 	return (int32_t)page.packed_address > 0;
893 }
894 
895 __header_always_inline bool
zone_pva_is_equal(zone_pva_t pva1,zone_pva_t pva2)896 zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2)
897 {
898 	return pva1.packed_address == pva2.packed_address;
899 }
900 
901 __header_always_inline zone_pva_t *
zone_pageq_base(void)902 zone_pageq_base(void)
903 {
904 	extern zone_pva_t data_seg_start[] __SEGMENT_START_SYM("__DATA");
905 
906 	/*
907 	 * `-1` so that if the first __DATA variable is a page queue,
908 	 * it gets a non 0 index
909 	 */
910 	return data_seg_start - 1;
911 }
912 
913 __header_always_inline void
zone_queue_set_head(zone_t z,zone_pva_t queue,zone_pva_t oldv,struct zone_page_metadata * meta)914 zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv,
915     struct zone_page_metadata *meta)
916 {
917 	zone_pva_t *queue_head = &zone_pageq_base()[queue.packed_address];
918 
919 	if (!zone_pva_is_equal(*queue_head, oldv)) {
920 		zone_page_metadata_list_corruption(z, meta);
921 	}
922 	*queue_head = meta->zm_page_next;
923 }
924 
925 __header_always_inline zone_pva_t
zone_queue_encode(zone_pva_t * headp)926 zone_queue_encode(zone_pva_t *headp)
927 {
928 	return (zone_pva_t){ (uint32_t)(headp - zone_pageq_base()) };
929 }
930 
931 __header_always_inline zone_pva_t
zone_pva_from_addr(vm_address_t addr)932 zone_pva_from_addr(vm_address_t addr)
933 {
934 	// cannot use atop() because we want to maintain the sign bit
935 	return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) };
936 }
937 
938 __header_always_inline vm_address_t
zone_pva_to_addr(zone_pva_t page)939 zone_pva_to_addr(zone_pva_t page)
940 {
941 	// cause sign extension so that we end up with the right address
942 	return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT;
943 }
944 
945 __header_always_inline struct zone_page_metadata *
zone_pva_to_meta(zone_pva_t page)946 zone_pva_to_meta(zone_pva_t page)
947 {
948 	return &zone_info.zi_meta_base[page.packed_address];
949 }
950 
951 __header_always_inline zone_pva_t
zone_pva_from_meta(struct zone_page_metadata * meta)952 zone_pva_from_meta(struct zone_page_metadata *meta)
953 {
954 	return (zone_pva_t){ (uint32_t)(meta - zone_info.zi_meta_base) };
955 }
956 
957 __header_always_inline struct zone_page_metadata *
zone_meta_from_addr(vm_offset_t addr)958 zone_meta_from_addr(vm_offset_t addr)
959 {
960 	return zone_pva_to_meta(zone_pva_from_addr(addr));
961 }
962 
963 __header_always_inline zone_id_t
zone_index_from_ptr(const void * ptr)964 zone_index_from_ptr(const void *ptr)
965 {
966 	return zone_pva_to_meta(zone_pva_from_addr((vm_offset_t)ptr))->zm_index;
967 }
968 
969 __header_always_inline vm_offset_t
zone_meta_to_addr(struct zone_page_metadata * meta)970 zone_meta_to_addr(struct zone_page_metadata *meta)
971 {
972 	return ptoa((int32_t)(meta - zone_info.zi_meta_base));
973 }
974 
975 __attribute__((overloadable))
976 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta,vm_address_t addr)977 zone_meta_validate(zone_t z, struct zone_page_metadata *meta, vm_address_t addr)
978 {
979 	if (!zone_has_index(z, meta->zm_index)) {
980 		zone_page_metadata_index_confusion_panic(z, addr, meta);
981 	}
982 }
983 
984 __attribute__((overloadable))
985 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta)986 zone_meta_validate(zone_t z, struct zone_page_metadata *meta)
987 {
988 	zone_meta_validate(z, meta, zone_meta_to_addr(meta));
989 }
990 
991 __header_always_inline void
zone_meta_queue_push(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)992 zone_meta_queue_push(zone_t z, zone_pva_t *headp,
993     struct zone_page_metadata *meta)
994 {
995 	zone_pva_t head = *headp;
996 	zone_pva_t queue_pva = zone_queue_encode(headp);
997 	struct zone_page_metadata *tmp;
998 
999 	meta->zm_page_next = head;
1000 	if (!zone_pva_is_null(head)) {
1001 		tmp = zone_pva_to_meta(head);
1002 		if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) {
1003 			zone_page_metadata_list_corruption(z, meta);
1004 		}
1005 		tmp->zm_page_prev = zone_pva_from_meta(meta);
1006 	}
1007 	meta->zm_page_prev = queue_pva;
1008 	*headp = zone_pva_from_meta(meta);
1009 }
1010 
1011 __header_always_inline struct zone_page_metadata *
zone_meta_queue_pop(zone_t z,zone_pva_t * headp)1012 zone_meta_queue_pop(zone_t z, zone_pva_t *headp)
1013 {
1014 	zone_pva_t head = *headp;
1015 	struct zone_page_metadata *meta = zone_pva_to_meta(head);
1016 	struct zone_page_metadata *tmp;
1017 
1018 	zone_meta_validate(z, meta);
1019 
1020 	if (!zone_pva_is_null(meta->zm_page_next)) {
1021 		tmp = zone_pva_to_meta(meta->zm_page_next);
1022 		if (!zone_pva_is_equal(tmp->zm_page_prev, head)) {
1023 			zone_page_metadata_list_corruption(z, meta);
1024 		}
1025 		tmp->zm_page_prev = meta->zm_page_prev;
1026 	}
1027 	*headp = meta->zm_page_next;
1028 
1029 	meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1030 
1031 	return meta;
1032 }
1033 
1034 __header_always_inline void
zone_meta_remqueue(zone_t z,struct zone_page_metadata * meta)1035 zone_meta_remqueue(zone_t z, struct zone_page_metadata *meta)
1036 {
1037 	zone_pva_t meta_pva = zone_pva_from_meta(meta);
1038 	struct zone_page_metadata *tmp;
1039 
1040 	if (!zone_pva_is_null(meta->zm_page_next)) {
1041 		tmp = zone_pva_to_meta(meta->zm_page_next);
1042 		if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) {
1043 			zone_page_metadata_list_corruption(z, meta);
1044 		}
1045 		tmp->zm_page_prev = meta->zm_page_prev;
1046 	}
1047 	if (zone_pva_is_queue(meta->zm_page_prev)) {
1048 		zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta);
1049 	} else {
1050 		tmp = zone_pva_to_meta(meta->zm_page_prev);
1051 		if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) {
1052 			zone_page_metadata_list_corruption(z, meta);
1053 		}
1054 		tmp->zm_page_next = meta->zm_page_next;
1055 	}
1056 
1057 	meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1058 }
1059 
1060 __header_always_inline void
zone_meta_requeue(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1061 zone_meta_requeue(zone_t z, zone_pva_t *headp,
1062     struct zone_page_metadata *meta)
1063 {
1064 	zone_meta_remqueue(z, meta);
1065 	zone_meta_queue_push(z, headp, meta);
1066 }
1067 
1068 /* prevents a given metadata from ever reaching the z_pageq_empty queue */
1069 static inline void
zone_meta_lock_in_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1070 zone_meta_lock_in_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1071 {
1072 	uint16_t new_size = zone_meta_alloc_size_add(z, m, ZM_ALLOC_SIZE_LOCK);
1073 
1074 	assert(new_size % sizeof(vm_offset_t) == ZM_ALLOC_SIZE_LOCK);
1075 	if (new_size == ZM_ALLOC_SIZE_LOCK) {
1076 		zone_meta_requeue(z, &z->z_pageq_partial, m);
1077 		zone_counter_sub(z, z_wired_empty, len);
1078 	}
1079 }
1080 
1081 /* allows a given metadata to reach the z_pageq_empty queue again */
1082 static inline void
zone_meta_unlock_from_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1083 zone_meta_unlock_from_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1084 {
1085 	uint16_t new_size = zone_meta_alloc_size_sub(z, m, ZM_ALLOC_SIZE_LOCK);
1086 
1087 	assert(new_size % sizeof(vm_offset_t) == 0);
1088 	if (new_size == 0) {
1089 		zone_meta_requeue(z, &z->z_pageq_empty, m);
1090 		z->z_wired_empty += len;
1091 	}
1092 }
1093 
1094 /*
1095  * Routine to populate a page backing metadata in the zone_metadata_region.
1096  * Must be called without the zone lock held as it might potentially block.
1097  */
1098 static void
zone_meta_populate(vm_offset_t base,vm_size_t size)1099 zone_meta_populate(vm_offset_t base, vm_size_t size)
1100 {
1101 	struct zone_page_metadata *from = zone_meta_from_addr(base);
1102 	struct zone_page_metadata *to   = from + atop(size);
1103 	vm_offset_t page_addr = trunc_page(from);
1104 
1105 	for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) {
1106 #if !KASAN
1107 		/*
1108 		 * This can race with another thread doing a populate on the same metadata
1109 		 * page, where we see an updated pmap but unmapped KASan shadow, causing a
1110 		 * fault in the shadow when we first access the metadata page. Avoid this
1111 		 * by always synchronizing on the zone_metadata_region lock with KASan.
1112 		 */
1113 		if (pmap_find_phys(kernel_pmap, page_addr)) {
1114 			continue;
1115 		}
1116 #endif
1117 
1118 		for (;;) {
1119 			kern_return_t ret = KERN_SUCCESS;
1120 
1121 			/*
1122 			 * All updates to the zone_metadata_region are done
1123 			 * under the zone_metadata_region_lck
1124 			 */
1125 			zone_meta_lock();
1126 			if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
1127 				ret = kernel_memory_populate(page_addr,
1128 				    PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1129 				    VM_KERN_MEMORY_OSFMK);
1130 			}
1131 			zone_meta_unlock();
1132 
1133 			if (ret == KERN_SUCCESS) {
1134 				break;
1135 			}
1136 
1137 			/*
1138 			 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
1139 			 * to bad system deadlocks, so if the allocation failed,
1140 			 * we need to do the VM_PAGE_WAIT() outside of the lock.
1141 			 */
1142 			VM_PAGE_WAIT();
1143 		}
1144 	}
1145 }
1146 
1147 __abortlike
1148 static void
zone_invalid_element_panic(zone_t zone,vm_offset_t addr)1149 zone_invalid_element_panic(zone_t zone, vm_offset_t addr)
1150 {
1151 	struct zone_page_metadata *meta;
1152 	const char *from_cache = "";
1153 	vm_offset_t page;
1154 
1155 	if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1156 		panic("addr %p being freed to zone %s%s%s, isn't from zone map",
1157 		    (void *)addr, zone_heap_name(zone), zone->z_name, from_cache);
1158 	}
1159 	page = trunc_page(addr);
1160 	meta = zone_meta_from_addr(addr);
1161 
1162 	if (!zone_has_index(zone, meta->zm_index)) {
1163 		zone_page_metadata_index_confusion_panic(zone, addr, meta);
1164 	}
1165 
1166 	if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1167 		panic("metadata %p corresponding to addr %p being freed to "
1168 		    "zone %s%s%s, is marked as secondary per cpu page",
1169 		    meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1170 		    from_cache);
1171 	}
1172 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1173 		page -= ptoa(meta->zm_page_index);
1174 		meta -= meta->zm_page_index;
1175 	}
1176 
1177 	if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1178 		panic("metadata %p corresponding to addr %p being freed to "
1179 		    "zone %s%s%s, has chunk len greater than max",
1180 		    meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1181 		    from_cache);
1182 	}
1183 
1184 	if ((addr - zone_elem_inner_offs(zone) - page) % zone_elem_outer_size(zone)) {
1185 		panic("addr %p being freed to zone %s%s%s, isn't aligned to "
1186 		    "zone element size", (void *)addr, zone_heap_name(zone),
1187 		    zone->z_name, from_cache);
1188 	}
1189 
1190 	zone_invalid_element_addr_panic(zone, addr);
1191 }
1192 
1193 __attribute__((always_inline))
1194 static struct zone_page_metadata *
zone_element_resolve(zone_t zone,vm_offset_t addr,vm_offset_t * idx)1195 zone_element_resolve(
1196 	zone_t                  zone,
1197 	vm_offset_t             addr,
1198 	vm_offset_t            *idx)
1199 {
1200 	struct zone_page_metadata *meta;
1201 	vm_offset_t offs, eidx;
1202 
1203 	meta = zone_meta_from_addr(addr);
1204 	if (!from_zone_map(addr, 1) || !zone_has_index(zone, meta->zm_index)) {
1205 		zone_invalid_element_panic(zone, addr);
1206 	}
1207 
1208 	offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1209 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1210 		offs += ptoa(meta->zm_page_index);
1211 		meta -= meta->zm_page_index;
1212 	}
1213 
1214 	eidx = Z_FAST_QUO(offs, zone->z_quo_magic);
1215 	if (eidx * zone_elem_outer_size(zone) != offs) {
1216 		zone_invalid_element_panic(zone, addr);
1217 	}
1218 
1219 	*idx = eidx;
1220 	return meta;
1221 }
1222 
1223 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1224 void *
zone_element_pgz_oob_adjust(void * ptr,vm_size_t req_size,vm_size_t elem_size)1225 zone_element_pgz_oob_adjust(void *ptr, vm_size_t req_size, vm_size_t elem_size)
1226 {
1227 	vm_offset_t addr = (vm_offset_t)ptr;
1228 	vm_offset_t end = addr + elem_size;
1229 	vm_offset_t offs;
1230 
1231 	/*
1232 	 * 0-sized allocations in a KALLOC_MINSIZE bucket
1233 	 * would be offset to the next allocation which is incorrect.
1234 	 */
1235 	req_size = MAX(roundup(req_size, KALLOC_MINALIGN), KALLOC_MINALIGN);
1236 
1237 	/*
1238 	 * Given how chunks work, for a zone with PGZ guards on,
1239 	 * there's a single element which ends precisely
1240 	 * at the page boundary: the last one.
1241 	 */
1242 	if (req_size == elem_size ||
1243 	    (end & PAGE_MASK) ||
1244 	    !zone_meta_from_addr(addr)->zm_guarded) {
1245 		return ptr;
1246 	}
1247 
1248 	offs = elem_size - req_size;
1249 	zone_meta_from_addr(end)->zm_oob_offs = (uint16_t)offs;
1250 
1251 	return (char *)addr + offs;
1252 }
1253 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1254 
1255 __abortlike
1256 static void
zone_element_bounds_check_panic(vm_address_t addr,vm_size_t len)1257 zone_element_bounds_check_panic(vm_address_t addr, vm_size_t len)
1258 {
1259 	struct zone_page_metadata *meta;
1260 	vm_offset_t offs, size, page;
1261 	zone_t      zone;
1262 
1263 	page = trunc_page(addr);
1264 	meta = zone_meta_from_addr(addr);
1265 	zone = &zone_array[meta->zm_index];
1266 
1267 	if (zone->z_percpu) {
1268 		panic("zone bound checks: address %p is a per-cpu allocation",
1269 		    (void *)addr);
1270 	}
1271 
1272 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1273 		page -= ptoa(meta->zm_page_index);
1274 		meta -= meta->zm_page_index;
1275 	}
1276 
1277 	size = zone_elem_outer_size(zone);
1278 	offs = Z_FAST_MOD(addr - zone_elem_inner_offs(zone) - page + size,
1279 	    zone->z_quo_magic, size);
1280 	panic("zone bound checks: buffer %p of length %zd overflows "
1281 	    "object %p of size %zd in zone %p[%s%s]",
1282 	    (void *)addr, len, (void *)(addr - offs - zone_elem_redzone(zone)),
1283 	    zone_elem_inner_size(zone), zone, zone_heap_name(zone), zone_name(zone));
1284 }
1285 
1286 void
zone_element_bounds_check(vm_address_t addr,vm_size_t len)1287 zone_element_bounds_check(vm_address_t addr, vm_size_t len)
1288 {
1289 	struct zone_page_metadata *meta;
1290 	vm_offset_t offs, size;
1291 	zone_t      zone;
1292 
1293 	if (!from_zone_map(addr, 1)) {
1294 		return;
1295 	}
1296 
1297 #if CONFIG_PROB_GZALLOC
1298 	if (__improbable(pgz_owned(addr))) {
1299 		meta = zone_meta_from_addr(addr);
1300 		addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
1301 	}
1302 #endif /* CONFIG_PROB_GZALLOC */
1303 	meta = zone_meta_from_addr(addr);
1304 	zone = zone_by_id(meta->zm_index);
1305 
1306 	if (zone->z_percpu) {
1307 		zone_element_bounds_check_panic(addr, len);
1308 	}
1309 
1310 	if (zone->z_permanent) {
1311 		/* We don't know bounds for those */
1312 		return;
1313 	}
1314 
1315 	offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1316 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1317 		offs += ptoa(meta->zm_page_index);
1318 	}
1319 	size = zone_elem_outer_size(zone);
1320 	offs = Z_FAST_MOD(offs + size, zone->z_quo_magic, size);
1321 	if (len + zone_elem_redzone(zone) > size - offs) {
1322 		zone_element_bounds_check_panic(addr, len);
1323 	}
1324 }
1325 
1326 /*
1327  * Routine to get the size of a zone allocated address.
1328  * If the address doesnt belong to the zone maps, returns 0.
1329  */
1330 vm_size_t
zone_element_size(void * elem,zone_t * z,bool clear_oob,vm_offset_t * oob_offs)1331 zone_element_size(void *elem, zone_t *z, bool clear_oob, vm_offset_t *oob_offs)
1332 {
1333 	vm_address_t addr = (vm_address_t)elem;
1334 	struct zone_page_metadata *meta;
1335 	vm_size_t esize, offs, end;
1336 	zone_t zone;
1337 
1338 	if (from_zone_map(addr, sizeof(void *))) {
1339 		meta  = zone_meta_from_addr(addr);
1340 		zone  = zone_by_id(meta->zm_index);
1341 		esize = zone_elem_inner_size(zone);
1342 		end   = vm_memtag_canonicalize_address(addr + esize);
1343 		offs  = 0;
1344 
1345 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1346 		/*
1347 		 * If the chunk uses guards, and that (addr + esize)
1348 		 * either crosses a page boundary or is at the boundary,
1349 		 * we need to look harder.
1350 		 */
1351 		if (oob_offs && meta->zm_guarded && atop(addr ^ end)) {
1352 			/*
1353 			 * Because in the vast majority of cases the element
1354 			 * size is sub-page, and that meta[1] must be faulted,
1355 			 * we can quickly peek at whether it's a guard.
1356 			 *
1357 			 * For elements larger than a page, finding the guard
1358 			 * page requires a little more effort.
1359 			 */
1360 			if (meta[1].zm_chunk_len == ZM_PGZ_GUARD) {
1361 				offs = meta[1].zm_oob_offs;
1362 				if (clear_oob) {
1363 					meta[1].zm_oob_offs = 0;
1364 				}
1365 			} else if (esize > PAGE_SIZE) {
1366 				struct zone_page_metadata *gmeta;
1367 
1368 				if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1369 					gmeta = meta + meta->zm_subchunk_len;
1370 				} else {
1371 					gmeta = meta + zone->z_chunk_pages;
1372 				}
1373 				assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
1374 
1375 				if (end >= zone_meta_to_addr(gmeta)) {
1376 					offs = gmeta->zm_oob_offs;
1377 					if (clear_oob) {
1378 						gmeta->zm_oob_offs = 0;
1379 					}
1380 				}
1381 			}
1382 		}
1383 #else
1384 #pragma unused(end, clear_oob)
1385 #endif /* ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1386 
1387 		if (oob_offs) {
1388 			*oob_offs = offs;
1389 		}
1390 		if (z) {
1391 			*z = zone;
1392 		}
1393 		return esize;
1394 	}
1395 
1396 	if (oob_offs) {
1397 		*oob_offs = 0;
1398 	}
1399 
1400 	return 0;
1401 }
1402 
1403 zone_id_t
zone_id_for_element(void * addr,vm_size_t esize)1404 zone_id_for_element(void *addr, vm_size_t esize)
1405 {
1406 	zone_id_t zid = ZONE_ID_INVALID;
1407 	if (from_zone_map(addr, esize)) {
1408 		zid = zone_index_from_ptr(addr);
1409 		__builtin_assume(zid != ZONE_ID_INVALID);
1410 	}
1411 	return zid;
1412 }
1413 
1414 /* This function just formats the reason for the panics by redoing the checks */
1415 __abortlike
1416 static void
zone_require_panic(zone_t zone,void * addr)1417 zone_require_panic(zone_t zone, void *addr)
1418 {
1419 	uint32_t zindex;
1420 	zone_t other;
1421 
1422 	if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1423 		panic("zone_require failed: address not in a zone (addr: %p)", addr);
1424 	}
1425 
1426 	zindex = zone_index_from_ptr(addr);
1427 	other = &zone_array[zindex];
1428 	if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
1429 		panic("zone_require failed: invalid zone index %d "
1430 		    "(addr: %p, expected: %s%s)", zindex,
1431 		    addr, zone_heap_name(zone), zone->z_name);
1432 	} else {
1433 		panic("zone_require failed: address in unexpected zone id %d (%s%s) "
1434 		    "(addr: %p, expected: %s%s)",
1435 		    zindex, zone_heap_name(other), other->z_name,
1436 		    addr, zone_heap_name(zone), zone->z_name);
1437 	}
1438 }
1439 
1440 __abortlike
1441 static void
zone_id_require_panic(zone_id_t zid,void * addr)1442 zone_id_require_panic(zone_id_t zid, void *addr)
1443 {
1444 	zone_require_panic(&zone_array[zid], addr);
1445 }
1446 
1447 /*
1448  * Routines to panic if a pointer is not mapped to an expected zone.
1449  * This can be used as a means of pinning an object to the zone it is expected
1450  * to be a part of.  Causes a panic if the address does not belong to any
1451  * specified zone, does not belong to any zone, has been freed and therefore
1452  * unmapped from the zone, or the pointer contains an uninitialized value that
1453  * does not belong to any zone.
1454  */
1455 void
zone_require(zone_t zone,void * addr)1456 zone_require(zone_t zone, void *addr)
1457 {
1458 	vm_size_t esize = zone_elem_inner_size(zone);
1459 
1460 	if (from_zone_map(addr, esize) &&
1461 	    zone_has_index(zone, zone_index_from_ptr(addr))) {
1462 		return;
1463 	}
1464 	zone_require_panic(zone, addr);
1465 }
1466 
1467 void
zone_id_require(zone_id_t zid,vm_size_t esize,void * addr)1468 zone_id_require(zone_id_t zid, vm_size_t esize, void *addr)
1469 {
1470 	if (from_zone_map(addr, esize) && zid == zone_index_from_ptr(addr)) {
1471 		return;
1472 	}
1473 	zone_id_require_panic(zid, addr);
1474 }
1475 
1476 bool
zone_owns(zone_t zone,void * addr)1477 zone_owns(zone_t zone, void *addr)
1478 {
1479 	vm_size_t esize = zone_elem_inner_size(zone);
1480 
1481 	if (from_zone_map(addr, esize)) {
1482 		return zone_has_index(zone, zone_index_from_ptr(addr));
1483 	}
1484 	return false;
1485 }
1486 
1487 static inline struct mach_vm_range
zone_kmem_suballoc(mach_vm_offset_t addr,vm_size_t size,int flags,vm_tag_t tag,vm_map_t * new_map)1488 zone_kmem_suballoc(
1489 	mach_vm_offset_t        addr,
1490 	vm_size_t               size,
1491 	int                     flags,
1492 	vm_tag_t                tag,
1493 	vm_map_t                *new_map)
1494 {
1495 	struct mach_vm_range r;
1496 
1497 	*new_map = kmem_suballoc(kernel_map, &addr, size,
1498 	    VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
1499 	    flags, KMS_PERMANENT | KMS_NOFAIL, tag).kmr_submap;
1500 
1501 	r.min_address = addr;
1502 	r.max_address = addr + size;
1503 	return r;
1504 }
1505 
1506 #endif /* !ZALLOC_TEST */
1507 #pragma mark Zone bits allocator
1508 
1509 /*!
1510  * @defgroup Zone Bitmap allocator
1511  * @{
1512  *
1513  * @brief
1514  * Functions implementing the zone bitmap allocator
1515  *
1516  * @discussion
1517  * The zone allocator maintains which elements are allocated or free in bitmaps.
1518  *
1519  * When the number of elements per page is smaller than 32, it is stored inline
1520  * on the @c zone_page_metadata structure (@c zm_inline_bitmap is set,
1521  * and @c zm_bitmap used for storage).
1522  *
1523  * When the number of elements is larger, then a bitmap is allocated from
1524  * a buddy allocator (impelemented under the @c zba_* namespace). Pointers
1525  * to bitmaps are implemented as a packed 32 bit bitmap reference, stored in
1526  * @c zm_bitmap. The low 3 bits encode the scale (order) of the allocation in
1527  * @c ZBA_GRANULE units, and hence actual allocations encoded with that scheme
1528  * cannot be larger than 1024 bytes (8192 bits).
1529  *
1530  * This buddy allocator can actually accomodate allocations as large
1531  * as 8k on 16k systems and 2k on 4k systems.
1532  *
1533  * Note: @c zba_* functions are implementation details not meant to be used
1534  * outside of the allocation of the allocator itself. Interfaces to the rest of
1535  * the zone allocator are documented and not @c zba_* prefixed.
1536  */
1537 
1538 #define ZBA_CHUNK_SIZE          PAGE_MAX_SIZE
1539 #define ZBA_GRANULE             sizeof(uint64_t)
1540 #define ZBA_GRANULE_BITS        (8 * sizeof(uint64_t))
1541 #define ZBA_MAX_ORDER           (PAGE_MAX_SHIFT - 4)
1542 #define ZBA_MAX_ALLOC_ORDER     7
1543 #define ZBA_SLOTS               (ZBA_CHUNK_SIZE / ZBA_GRANULE)
1544 #define ZBA_HEADS_COUNT         (ZBA_MAX_ORDER + 1)
1545 #define ZBA_PTR_MASK            0x0fffffff
1546 #define ZBA_ORDER_SHIFT         29
1547 #define ZBA_HAS_EXTRA_BIT       0x10000000
1548 
1549 static_assert(2ul * ZBA_GRANULE << ZBA_MAX_ORDER == ZBA_CHUNK_SIZE, "chunk sizes");
1550 static_assert(ZBA_MAX_ALLOC_ORDER <= ZBA_MAX_ORDER, "ZBA_MAX_ORDER is enough");
1551 
1552 struct zone_bits_chain {
1553 	uint32_t zbc_next;
1554 	uint32_t zbc_prev;
1555 } __attribute__((aligned(ZBA_GRANULE)));
1556 
1557 struct zone_bits_head {
1558 	uint32_t zbh_next;
1559 	uint32_t zbh_unused;
1560 } __attribute__((aligned(ZBA_GRANULE)));
1561 
1562 static_assert(sizeof(struct zone_bits_chain) == ZBA_GRANULE, "zbc size");
1563 static_assert(sizeof(struct zone_bits_head) == ZBA_GRANULE, "zbh size");
1564 
1565 struct zone_bits_allocator_meta {
1566 	uint32_t  zbam_left;
1567 	uint32_t  zbam_right;
1568 	struct zone_bits_head zbam_lists[ZBA_HEADS_COUNT];
1569 	struct zone_bits_head zbam_lists_with_extra[ZBA_HEADS_COUNT];
1570 };
1571 
1572 struct zone_bits_allocator_header {
1573 	uint64_t zbah_bits[ZBA_SLOTS / (8 * sizeof(uint64_t))];
1574 };
1575 
1576 #if ZALLOC_TEST
1577 static struct zalloc_bits_allocator_test_setup {
1578 	vm_offset_t zbats_base;
1579 	void      (*zbats_populate)(vm_address_t addr, vm_size_t size);
1580 } zba_test_info;
1581 
1582 static struct zone_bits_allocator_header *
zba_base_header(void)1583 zba_base_header(void)
1584 {
1585 	return (struct zone_bits_allocator_header *)zba_test_info.zbats_base;
1586 }
1587 
1588 static kern_return_t
zba_populate(uint32_t n,bool with_extra __unused)1589 zba_populate(uint32_t n, bool with_extra __unused)
1590 {
1591 	vm_address_t base = zba_test_info.zbats_base;
1592 	zba_test_info.zbats_populate(base + n * ZBA_CHUNK_SIZE, ZBA_CHUNK_SIZE);
1593 
1594 	return KERN_SUCCESS;
1595 }
1596 #else
1597 __startup_data __attribute__((aligned(ZBA_CHUNK_SIZE)))
1598 static uint8_t zba_chunk_startup[ZBA_CHUNK_SIZE];
1599 
1600 static SECURITY_READ_ONLY_LATE(uint8_t) zba_xtra_shift;
1601 static LCK_MTX_DECLARE(zba_mtx, &zone_locks_grp);
1602 
1603 static struct zone_bits_allocator_header *
zba_base_header(void)1604 zba_base_header(void)
1605 {
1606 	return (struct zone_bits_allocator_header *)zone_info.zi_bits_range.min_address;
1607 }
1608 
1609 static void
zba_lock(void)1610 zba_lock(void)
1611 {
1612 	lck_mtx_lock(&zba_mtx);
1613 }
1614 
1615 static void
zba_unlock(void)1616 zba_unlock(void)
1617 {
1618 	lck_mtx_unlock(&zba_mtx);
1619 }
1620 
1621 __abortlike
1622 static void
zba_memory_exhausted(void)1623 zba_memory_exhausted(void)
1624 {
1625 	uint64_t zsize = 0;
1626 	zone_t z = zone_find_largest(&zsize);
1627 	panic("zba_populate: out of bitmap space, "
1628 	    "likely due to memory leak in zone [%s%s] "
1629 	    "(%u%c, %d elements allocated)",
1630 	    zone_heap_name(z), zone_name(z),
1631 	    mach_vm_size_pretty(zsize), mach_vm_size_unit(zsize),
1632 	    zone_count_allocated(z));
1633 }
1634 
1635 
1636 static kern_return_t
zba_populate(uint32_t n,bool with_extra)1637 zba_populate(uint32_t n, bool with_extra)
1638 {
1639 	vm_size_t bits_size = ZBA_CHUNK_SIZE;
1640 	vm_size_t xtra_size = bits_size * CHAR_BIT << zba_xtra_shift;
1641 	vm_address_t bits_addr;
1642 	vm_address_t xtra_addr;
1643 	kern_return_t kr;
1644 
1645 	bits_addr = zone_info.zi_bits_range.min_address + n * bits_size;
1646 	xtra_addr = zone_info.zi_xtra_range.min_address + n * xtra_size;
1647 
1648 	kr = kernel_memory_populate(bits_addr, bits_size,
1649 	    KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1650 	    VM_KERN_MEMORY_OSFMK);
1651 	if (kr != KERN_SUCCESS) {
1652 		return kr;
1653 	}
1654 
1655 
1656 	if (with_extra) {
1657 		kr = kernel_memory_populate(xtra_addr, xtra_size,
1658 		    KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1659 		    VM_KERN_MEMORY_OSFMK);
1660 		if (kr != KERN_SUCCESS) {
1661 			kernel_memory_depopulate(bits_addr, bits_size,
1662 			    KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1663 			    VM_KERN_MEMORY_OSFMK);
1664 		}
1665 	}
1666 
1667 	return kr;
1668 }
1669 #endif
1670 
1671 __pure2
1672 static struct zone_bits_allocator_meta *
zba_meta(void)1673 zba_meta(void)
1674 {
1675 	return (struct zone_bits_allocator_meta *)&zba_base_header()[1];
1676 }
1677 
1678 __pure2
1679 static uint64_t *
zba_slot_base(void)1680 zba_slot_base(void)
1681 {
1682 	return (uint64_t *)zba_base_header();
1683 }
1684 
1685 __pure2
1686 static struct zone_bits_head *
zba_head(uint32_t order,bool with_extra)1687 zba_head(uint32_t order, bool with_extra)
1688 {
1689 	if (with_extra) {
1690 		return &zba_meta()->zbam_lists_with_extra[order];
1691 	} else {
1692 		return &zba_meta()->zbam_lists[order];
1693 	}
1694 }
1695 
1696 __pure2
1697 static uint32_t
zba_head_index(struct zone_bits_head * hd)1698 zba_head_index(struct zone_bits_head *hd)
1699 {
1700 	return (uint32_t)((uint64_t *)hd - zba_slot_base());
1701 }
1702 
1703 __pure2
1704 static struct zone_bits_chain *
zba_chain_for_index(uint32_t index)1705 zba_chain_for_index(uint32_t index)
1706 {
1707 	return (struct zone_bits_chain *)(zba_slot_base() + index);
1708 }
1709 
1710 __pure2
1711 static uint32_t
zba_chain_to_index(const struct zone_bits_chain * zbc)1712 zba_chain_to_index(const struct zone_bits_chain *zbc)
1713 {
1714 	return (uint32_t)((const uint64_t *)zbc - zba_slot_base());
1715 }
1716 
1717 __abortlike
1718 static void
zba_head_corruption_panic(uint32_t order,bool with_extra)1719 zba_head_corruption_panic(uint32_t order, bool with_extra)
1720 {
1721 	panic("zone bits allocator head[%d:%d:%p] is corrupt",
1722 	    order, with_extra, zba_head(order, with_extra));
1723 }
1724 
1725 __abortlike
1726 static void
zba_chain_corruption_panic(struct zone_bits_chain * a,struct zone_bits_chain * b)1727 zba_chain_corruption_panic(struct zone_bits_chain *a, struct zone_bits_chain *b)
1728 {
1729 	panic("zone bits allocator freelist is corrupt (%p <-> %p)", a, b);
1730 }
1731 
1732 static void
zba_push_block(struct zone_bits_chain * zbc,uint32_t order,bool with_extra)1733 zba_push_block(struct zone_bits_chain *zbc, uint32_t order, bool with_extra)
1734 {
1735 	struct zone_bits_head *hd = zba_head(order, with_extra);
1736 	uint32_t hd_index = zba_head_index(hd);
1737 	uint32_t index = zba_chain_to_index(zbc);
1738 	struct zone_bits_chain *next;
1739 
1740 	if (hd->zbh_next) {
1741 		next = zba_chain_for_index(hd->zbh_next);
1742 		if (next->zbc_prev != hd_index) {
1743 			zba_head_corruption_panic(order, with_extra);
1744 		}
1745 		next->zbc_prev = index;
1746 	}
1747 	zbc->zbc_next = hd->zbh_next;
1748 	zbc->zbc_prev = hd_index;
1749 	hd->zbh_next = index;
1750 }
1751 
1752 static void
zba_remove_block(struct zone_bits_chain * zbc)1753 zba_remove_block(struct zone_bits_chain *zbc)
1754 {
1755 	struct zone_bits_chain *prev = zba_chain_for_index(zbc->zbc_prev);
1756 	uint32_t index = zba_chain_to_index(zbc);
1757 
1758 	if (prev->zbc_next != index) {
1759 		zba_chain_corruption_panic(prev, zbc);
1760 	}
1761 	if ((prev->zbc_next = zbc->zbc_next)) {
1762 		struct zone_bits_chain *next = zba_chain_for_index(zbc->zbc_next);
1763 		if (next->zbc_prev != index) {
1764 			zba_chain_corruption_panic(zbc, next);
1765 		}
1766 		next->zbc_prev = zbc->zbc_prev;
1767 	}
1768 }
1769 
1770 static vm_address_t
zba_try_pop_block(uint32_t order,bool with_extra)1771 zba_try_pop_block(uint32_t order, bool with_extra)
1772 {
1773 	struct zone_bits_head *hd = zba_head(order, with_extra);
1774 	struct zone_bits_chain *zbc;
1775 
1776 	if (hd->zbh_next == 0) {
1777 		return 0;
1778 	}
1779 
1780 	zbc = zba_chain_for_index(hd->zbh_next);
1781 	zba_remove_block(zbc);
1782 	return (vm_address_t)zbc;
1783 }
1784 
1785 static struct zone_bits_allocator_header *
zba_header(vm_offset_t addr)1786 zba_header(vm_offset_t addr)
1787 {
1788 	addr &= -(vm_offset_t)ZBA_CHUNK_SIZE;
1789 	return (struct zone_bits_allocator_header *)addr;
1790 }
1791 
1792 static size_t
zba_node_parent(size_t node)1793 zba_node_parent(size_t node)
1794 {
1795 	return (node - 1) / 2;
1796 }
1797 
1798 static size_t
zba_node_left_child(size_t node)1799 zba_node_left_child(size_t node)
1800 {
1801 	return node * 2 + 1;
1802 }
1803 
1804 static size_t
zba_node_buddy(size_t node)1805 zba_node_buddy(size_t node)
1806 {
1807 	return ((node - 1) ^ 1) + 1;
1808 }
1809 
1810 static size_t
zba_node(vm_offset_t addr,uint32_t order)1811 zba_node(vm_offset_t addr, uint32_t order)
1812 {
1813 	vm_offset_t offs = (addr % ZBA_CHUNK_SIZE) / ZBA_GRANULE;
1814 	return (offs >> order) + (1 << (ZBA_MAX_ORDER - order + 1)) - 1;
1815 }
1816 
1817 static struct zone_bits_chain *
zba_chain_for_node(struct zone_bits_allocator_header * zbah,size_t node,uint32_t order)1818 zba_chain_for_node(struct zone_bits_allocator_header *zbah, size_t node, uint32_t order)
1819 {
1820 	vm_offset_t offs = (node - (1 << (ZBA_MAX_ORDER - order + 1)) + 1) << order;
1821 	return (struct zone_bits_chain *)((vm_offset_t)zbah + offs * ZBA_GRANULE);
1822 }
1823 
1824 static void
zba_node_flip_split(struct zone_bits_allocator_header * zbah,size_t node)1825 zba_node_flip_split(struct zone_bits_allocator_header *zbah, size_t node)
1826 {
1827 	zbah->zbah_bits[node / 64] ^= 1ull << (node % 64);
1828 }
1829 
1830 static bool
zba_node_is_split(struct zone_bits_allocator_header * zbah,size_t node)1831 zba_node_is_split(struct zone_bits_allocator_header *zbah, size_t node)
1832 {
1833 	return zbah->zbah_bits[node / 64] & (1ull << (node % 64));
1834 }
1835 
1836 static void
zba_free(vm_offset_t addr,uint32_t order,bool with_extra)1837 zba_free(vm_offset_t addr, uint32_t order, bool with_extra)
1838 {
1839 	struct zone_bits_allocator_header *zbah = zba_header(addr);
1840 	struct zone_bits_chain *zbc;
1841 	size_t node = zba_node(addr, order);
1842 
1843 	while (node) {
1844 		size_t parent = zba_node_parent(node);
1845 
1846 		zba_node_flip_split(zbah, parent);
1847 		if (zba_node_is_split(zbah, parent)) {
1848 			break;
1849 		}
1850 
1851 		zbc = zba_chain_for_node(zbah, zba_node_buddy(node), order);
1852 		zba_remove_block(zbc);
1853 		order++;
1854 		node = parent;
1855 	}
1856 
1857 	zba_push_block(zba_chain_for_node(zbah, node, order), order, with_extra);
1858 }
1859 
1860 static vm_size_t
zba_chunk_header_size(uint32_t n)1861 zba_chunk_header_size(uint32_t n)
1862 {
1863 	vm_size_t hdr_size = sizeof(struct zone_bits_allocator_header);
1864 	if (n == 0) {
1865 		hdr_size += sizeof(struct zone_bits_allocator_meta);
1866 	}
1867 	return hdr_size;
1868 }
1869 
1870 static void
zba_init_chunk(uint32_t n,bool with_extra)1871 zba_init_chunk(uint32_t n, bool with_extra)
1872 {
1873 	vm_size_t hdr_size = zba_chunk_header_size(n);
1874 	vm_offset_t page = (vm_offset_t)zba_base_header() + n * ZBA_CHUNK_SIZE;
1875 	struct zone_bits_allocator_header *zbah = zba_header(page);
1876 	vm_size_t size = ZBA_CHUNK_SIZE;
1877 	size_t node;
1878 
1879 	for (uint32_t o = ZBA_MAX_ORDER + 1; o-- > 0;) {
1880 		if (size < hdr_size + (ZBA_GRANULE << o)) {
1881 			continue;
1882 		}
1883 		size -= ZBA_GRANULE << o;
1884 		node = zba_node(page + size, o);
1885 		zba_node_flip_split(zbah, zba_node_parent(node));
1886 		zba_push_block(zba_chain_for_node(zbah, node, o), o, with_extra);
1887 	}
1888 }
1889 
1890 __attribute__((noinline))
1891 static void
zba_grow(bool with_extra)1892 zba_grow(bool with_extra)
1893 {
1894 	struct zone_bits_allocator_meta *meta = zba_meta();
1895 	kern_return_t kr = KERN_SUCCESS;
1896 	uint32_t chunk;
1897 
1898 #if !ZALLOC_TEST
1899 	if (meta->zbam_left >= meta->zbam_right) {
1900 		zba_memory_exhausted();
1901 	}
1902 #endif
1903 
1904 	if (with_extra) {
1905 		chunk = meta->zbam_right - 1;
1906 	} else {
1907 		chunk = meta->zbam_left;
1908 	}
1909 
1910 	kr = zba_populate(chunk, with_extra);
1911 	if (kr == KERN_SUCCESS) {
1912 		if (with_extra) {
1913 			meta->zbam_right -= 1;
1914 		} else {
1915 			meta->zbam_left += 1;
1916 		}
1917 
1918 		zba_init_chunk(chunk, with_extra);
1919 #if !ZALLOC_TEST
1920 	} else {
1921 		/*
1922 		 * zba_populate() has to be allowed to fail populating,
1923 		 * as we are under a global lock, we need to do the
1924 		 * VM_PAGE_WAIT() outside of the lock.
1925 		 */
1926 		assert(kr == KERN_RESOURCE_SHORTAGE);
1927 		zba_unlock();
1928 		VM_PAGE_WAIT();
1929 		zba_lock();
1930 #endif
1931 	}
1932 }
1933 
1934 static vm_offset_t
zba_alloc(uint32_t order,bool with_extra)1935 zba_alloc(uint32_t order, bool with_extra)
1936 {
1937 	struct zone_bits_allocator_header *zbah;
1938 	uint32_t cur = order;
1939 	vm_address_t addr;
1940 	size_t node;
1941 
1942 	while ((addr = zba_try_pop_block(cur, with_extra)) == 0) {
1943 		if (__improbable(cur++ >= ZBA_MAX_ORDER)) {
1944 			zba_grow(with_extra);
1945 			cur = order;
1946 		}
1947 	}
1948 
1949 	zbah = zba_header(addr);
1950 	node = zba_node(addr, cur);
1951 	zba_node_flip_split(zbah, zba_node_parent(node));
1952 	while (cur > order) {
1953 		cur--;
1954 		zba_node_flip_split(zbah, node);
1955 		node = zba_node_left_child(node);
1956 		zba_push_block(zba_chain_for_node(zbah, node + 1, cur),
1957 		    cur, with_extra);
1958 	}
1959 
1960 	return addr;
1961 }
1962 
1963 #define zba_map_index(type, n)    (n / (8 * sizeof(type)))
1964 #define zba_map_bit(type, n)      ((type)1 << (n % (8 * sizeof(type))))
1965 #define zba_map_mask_lt(type, n)  (zba_map_bit(type, n) - 1)
1966 #define zba_map_mask_ge(type, n)  ((type)-zba_map_bit(type, n))
1967 
1968 #if !ZALLOC_TEST
1969 #if VM_TAG_SIZECLASSES
1970 
1971 static void *
zba_extra_ref_ptr(uint32_t bref,vm_offset_t idx)1972 zba_extra_ref_ptr(uint32_t bref, vm_offset_t idx)
1973 {
1974 	vm_offset_t base = zone_info.zi_xtra_range.min_address;
1975 	vm_offset_t offs = (bref & ZBA_PTR_MASK) * ZBA_GRANULE * CHAR_BIT;
1976 
1977 	return (void *)(base + ((offs + idx) << zba_xtra_shift));
1978 }
1979 
1980 #endif /* VM_TAG_SIZECLASSES */
1981 
1982 static uint32_t
zba_bits_ref_order(uint32_t bref)1983 zba_bits_ref_order(uint32_t bref)
1984 {
1985 	return bref >> ZBA_ORDER_SHIFT;
1986 }
1987 
1988 static bitmap_t *
zba_bits_ref_ptr(uint32_t bref)1989 zba_bits_ref_ptr(uint32_t bref)
1990 {
1991 	return zba_slot_base() + (bref & ZBA_PTR_MASK);
1992 }
1993 
1994 static vm_offset_t
zba_scan_bitmap_inline(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags,vm_offset_t eidx)1995 zba_scan_bitmap_inline(zone_t zone, struct zone_page_metadata *meta,
1996     zalloc_flags_t flags, vm_offset_t eidx)
1997 {
1998 	size_t i = eidx / 32;
1999 	uint32_t map;
2000 
2001 	if (eidx % 32) {
2002 		map = meta[i].zm_bitmap & zba_map_mask_ge(uint32_t, eidx);
2003 		if (map) {
2004 			eidx = __builtin_ctz(map);
2005 			meta[i].zm_bitmap ^= 1u << eidx;
2006 			return i * 32 + eidx;
2007 		}
2008 		i++;
2009 	}
2010 
2011 	uint32_t chunk_len = meta->zm_chunk_len;
2012 	if (flags & Z_PCPU) {
2013 		chunk_len = zpercpu_count();
2014 	}
2015 	for (int j = 0; j < chunk_len; j++, i++) {
2016 		if (i >= chunk_len) {
2017 			i = 0;
2018 		}
2019 		if (__probable(map = meta[i].zm_bitmap)) {
2020 			meta[i].zm_bitmap &= map - 1;
2021 			return i * 32 + __builtin_ctz(map);
2022 		}
2023 	}
2024 
2025 	zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2026 }
2027 
2028 static vm_offset_t
zba_scan_bitmap_ref(zone_t zone,struct zone_page_metadata * meta,vm_offset_t eidx)2029 zba_scan_bitmap_ref(zone_t zone, struct zone_page_metadata *meta,
2030     vm_offset_t eidx)
2031 {
2032 	uint32_t bits_size = 1 << zba_bits_ref_order(meta->zm_bitmap);
2033 	bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2034 	size_t i = eidx / 64;
2035 	uint64_t map;
2036 
2037 	if (eidx % 64) {
2038 		map = bits[i] & zba_map_mask_ge(uint64_t, eidx);
2039 		if (map) {
2040 			eidx = __builtin_ctzll(map);
2041 			bits[i] ^= 1ull << eidx;
2042 			return i * 64 + eidx;
2043 		}
2044 		i++;
2045 	}
2046 
2047 	for (int j = 0; j < bits_size; i++, j++) {
2048 		if (i >= bits_size) {
2049 			i = 0;
2050 		}
2051 		if (__probable(map = bits[i])) {
2052 			bits[i] &= map - 1;
2053 			return i * 64 + __builtin_ctzll(map);
2054 		}
2055 	}
2056 
2057 	zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2058 }
2059 
2060 /*!
2061  * @function zone_meta_find_and_clear_bit
2062  *
2063  * @brief
2064  * The core of the bitmap allocator: find a bit set in the bitmaps.
2065  *
2066  * @discussion
2067  * This method will round robin through available allocations,
2068  * with a per-core memory of the last allocated element index allocated.
2069  *
2070  * This is done in order to avoid a fully LIFO behavior which makes exploiting
2071  * double-free bugs way too practical.
2072  *
2073  * @param zone          The zone we're allocating from.
2074  * @param meta          The main metadata for the chunk being allocated from.
2075  * @param flags         the alloc flags (for @c Z_PCPU).
2076  */
2077 static vm_offset_t
zone_meta_find_and_clear_bit(zone_t zone,zone_stats_t zs,struct zone_page_metadata * meta,zalloc_flags_t flags)2078 zone_meta_find_and_clear_bit(
2079 	zone_t                  zone,
2080 	zone_stats_t            zs,
2081 	struct zone_page_metadata *meta,
2082 	zalloc_flags_t          flags)
2083 {
2084 	vm_offset_t eidx = zs->zs_alloc_rr + 1;
2085 
2086 	if (meta->zm_inline_bitmap) {
2087 		eidx = zba_scan_bitmap_inline(zone, meta, flags, eidx);
2088 	} else {
2089 		eidx = zba_scan_bitmap_ref(zone, meta, eidx);
2090 	}
2091 	zs->zs_alloc_rr = (uint16_t)eidx;
2092 	return eidx;
2093 }
2094 
2095 /*!
2096  * @function zone_meta_bits_init_inline
2097  *
2098  * @brief
2099  * Initializes the inline zm_bitmap field(s) for a newly assigned chunk.
2100  *
2101  * @param meta          The main metadata for the initialized chunk.
2102  * @param count         The number of elements the chunk can hold
2103  *                      (which might be partial for partially populated chunks).
2104  */
2105 static void
zone_meta_bits_init_inline(struct zone_page_metadata * meta,uint32_t count)2106 zone_meta_bits_init_inline(struct zone_page_metadata *meta, uint32_t count)
2107 {
2108 	/*
2109 	 * We're called with the metadata zm_bitmap fields already zeroed out.
2110 	 */
2111 	for (size_t i = 0; i < count / 32; i++) {
2112 		meta[i].zm_bitmap = ~0u;
2113 	}
2114 	if (count % 32) {
2115 		meta[count / 32].zm_bitmap = zba_map_mask_lt(uint32_t, count);
2116 	}
2117 }
2118 
2119 /*!
2120  * @function zone_meta_bits_alloc_init
2121  *
2122  * @brief
2123  * Allocates a  zm_bitmap field for a newly assigned chunk.
2124  *
2125  * @param count         The number of elements the chunk can hold
2126  *                      (which might be partial for partially populated chunks).
2127  * @param nbits         The maximum nuber of bits that will be used.
2128  * @param with_extra    Whether "VM Tracking" metadata needs to be allocated.
2129  */
2130 static uint32_t
zone_meta_bits_alloc_init(uint32_t count,uint32_t nbits,bool with_extra)2131 zone_meta_bits_alloc_init(uint32_t count, uint32_t nbits, bool with_extra)
2132 {
2133 	static_assert(ZONE_MAX_ALLOC_SIZE / ZONE_MIN_ELEM_SIZE <=
2134 	    ZBA_GRANULE_BITS << ZBA_MAX_ORDER, "bitmaps will be large enough");
2135 
2136 	uint32_t order = flsll((nbits - 1) / ZBA_GRANULE_BITS);
2137 	uint64_t *bits;
2138 	size_t   i = 0;
2139 
2140 	assert(order <= ZBA_MAX_ALLOC_ORDER);
2141 	assert(count <= ZBA_GRANULE_BITS << order);
2142 
2143 	zba_lock();
2144 	bits = (uint64_t *)zba_alloc(order, with_extra);
2145 	zba_unlock();
2146 
2147 	while (i < count / 64) {
2148 		bits[i++] = ~0ull;
2149 	}
2150 	if (count % 64) {
2151 		bits[i++] = zba_map_mask_lt(uint64_t, count);
2152 	}
2153 	while (i < 1u << order) {
2154 		bits[i++] = 0;
2155 	}
2156 
2157 	return (uint32_t)(bits - zba_slot_base()) +
2158 	       (order << ZBA_ORDER_SHIFT) +
2159 	       (with_extra ? ZBA_HAS_EXTRA_BIT : 0);
2160 }
2161 
2162 /*!
2163  * @function zone_meta_bits_merge
2164  *
2165  * @brief
2166  * Adds elements <code>[start, end)</code> to a chunk being extended.
2167  *
2168  * @param meta          The main metadata for the extended chunk.
2169  * @param start         The index of the first element to add to the chunk.
2170  * @param end           The index of the last (exclusive) element to add.
2171  */
2172 static void
zone_meta_bits_merge(struct zone_page_metadata * meta,uint32_t start,uint32_t end)2173 zone_meta_bits_merge(struct zone_page_metadata *meta,
2174     uint32_t start, uint32_t end)
2175 {
2176 	if (meta->zm_inline_bitmap) {
2177 		while (start < end) {
2178 			size_t s_i = start / 32;
2179 			size_t s_e = end / 32;
2180 
2181 			if (s_i == s_e) {
2182 				meta[s_i].zm_bitmap |= zba_map_mask_lt(uint32_t, end) &
2183 				    zba_map_mask_ge(uint32_t, start);
2184 				break;
2185 			}
2186 
2187 			meta[s_i].zm_bitmap |= zba_map_mask_ge(uint32_t, start);
2188 			start += 32 - (start % 32);
2189 		}
2190 	} else {
2191 		uint64_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2192 
2193 		while (start < end) {
2194 			size_t s_i = start / 64;
2195 			size_t s_e = end / 64;
2196 
2197 			if (s_i == s_e) {
2198 				bits[s_i] |= zba_map_mask_lt(uint64_t, end) &
2199 				    zba_map_mask_ge(uint64_t, start);
2200 				break;
2201 			}
2202 			bits[s_i] |= zba_map_mask_ge(uint64_t, start);
2203 			start += 64 - (start % 64);
2204 		}
2205 	}
2206 }
2207 
2208 /*!
2209  * @function zone_bits_free
2210  *
2211  * @brief
2212  * Frees a bitmap to the zone bitmap allocator.
2213  *
2214  * @param bref
2215  * A bitmap reference set by @c zone_meta_bits_init() in a @c zm_bitmap field.
2216  */
2217 static void
zone_bits_free(uint32_t bref)2218 zone_bits_free(uint32_t bref)
2219 {
2220 	zba_lock();
2221 	zba_free((vm_offset_t)zba_bits_ref_ptr(bref),
2222 	    zba_bits_ref_order(bref), (bref & ZBA_HAS_EXTRA_BIT));
2223 	zba_unlock();
2224 }
2225 
2226 /*!
2227  * @function zone_meta_is_free
2228  *
2229  * @brief
2230  * Returns whether a given element appears free.
2231  */
2232 static bool
zone_meta_is_free(struct zone_page_metadata * meta,vm_offset_t eidx)2233 zone_meta_is_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2234 {
2235 	if (meta->zm_inline_bitmap) {
2236 		uint32_t bit = zba_map_bit(uint32_t, eidx);
2237 		return meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit;
2238 	} else {
2239 		bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2240 		uint64_t bit = zba_map_bit(uint64_t, eidx);
2241 		return bits[zba_map_index(uint64_t, eidx)] & bit;
2242 	}
2243 }
2244 
2245 /*!
2246  * @function zone_meta_mark_free
2247  *
2248  * @brief
2249  * Marks an element as free and returns whether it was marked as used.
2250  */
2251 static bool
zone_meta_mark_free(struct zone_page_metadata * meta,vm_offset_t eidx)2252 zone_meta_mark_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2253 {
2254 	if (meta->zm_inline_bitmap) {
2255 		uint32_t bit = zba_map_bit(uint32_t, eidx);
2256 		if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2257 			return false;
2258 		}
2259 		meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2260 	} else {
2261 		bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2262 		uint64_t bit = zba_map_bit(uint64_t, eidx);
2263 		if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2264 			return false;
2265 		}
2266 		bits[zba_map_index(uint64_t, eidx)] ^= bit;
2267 	}
2268 	return true;
2269 }
2270 
2271 #if VM_TAG_SIZECLASSES
2272 
2273 __startup_func
2274 void
__zone_site_register(vm_allocation_site_t * site)2275 __zone_site_register(vm_allocation_site_t *site)
2276 {
2277 	if (zone_tagging_on) {
2278 		vm_tag_alloc(site);
2279 	}
2280 }
2281 
2282 uint16_t
zone_index_from_tag_index(uint32_t sizeclass_idx)2283 zone_index_from_tag_index(uint32_t sizeclass_idx)
2284 {
2285 	return zone_tags_sizeclasses[sizeclass_idx];
2286 }
2287 
2288 #endif /* VM_TAG_SIZECLASSES */
2289 #endif /* !ZALLOC_TEST */
2290 /*! @} */
2291 #pragma mark zalloc helpers
2292 #if !ZALLOC_TEST
2293 
2294 static inline void *
zstack_tbi_fix(vm_offset_t elem)2295 zstack_tbi_fix(vm_offset_t elem)
2296 {
2297 #if CONFIG_KERNEL_TAGGING
2298 	elem = vm_memtag_fixup_ptr(elem);
2299 #endif /* CONFIG_KERNEL_TAGGING */
2300 	return (void *)elem;
2301 }
2302 
2303 static inline vm_offset_t
zstack_tbi_fill(void * addr)2304 zstack_tbi_fill(void *addr)
2305 {
2306 	vm_offset_t elem = (vm_offset_t)addr;
2307 
2308 	return vm_memtag_canonicalize_address(elem);
2309 }
2310 
2311 __attribute__((always_inline))
2312 static inline void
zstack_push_no_delta(zstack_t * stack,void * addr)2313 zstack_push_no_delta(zstack_t *stack, void *addr)
2314 {
2315 	vm_offset_t elem = zstack_tbi_fill(addr);
2316 
2317 	*(vm_offset_t *)addr = stack->z_head - elem;
2318 	stack->z_head = elem;
2319 }
2320 
2321 __attribute__((always_inline))
2322 void
zstack_push(zstack_t * stack,void * addr)2323 zstack_push(zstack_t *stack, void *addr)
2324 {
2325 	zstack_push_no_delta(stack, addr);
2326 	stack->z_count++;
2327 }
2328 
2329 __attribute__((always_inline))
2330 static inline void *
zstack_pop_no_delta(zstack_t * stack)2331 zstack_pop_no_delta(zstack_t *stack)
2332 {
2333 	void *addr = zstack_tbi_fix(stack->z_head);
2334 
2335 	stack->z_head += *(vm_offset_t *)addr;
2336 	*(vm_offset_t *)addr = 0;
2337 
2338 	return addr;
2339 }
2340 
2341 __attribute__((always_inline))
2342 void *
zstack_pop(zstack_t * stack)2343 zstack_pop(zstack_t *stack)
2344 {
2345 	stack->z_count--;
2346 	return zstack_pop_no_delta(stack);
2347 }
2348 
2349 static inline void
zone_recirc_lock_nopreempt_check_contention(zone_t zone)2350 zone_recirc_lock_nopreempt_check_contention(zone_t zone)
2351 {
2352 	uint32_t ticket;
2353 
2354 	if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_recirc_lock,
2355 	    &ticket, &zone_locks_grp))) {
2356 		return;
2357 	}
2358 
2359 	hw_lck_ticket_wait(&zone->z_recirc_lock, ticket, NULL, &zone_locks_grp);
2360 
2361 	/*
2362 	 * If zone caching has been disabled due to memory pressure,
2363 	 * then recording contention is not useful, give the system
2364 	 * time to recover.
2365 	 */
2366 	if (__probable(!zone_caching_disabled && !zone_exhausted(zone))) {
2367 		zone->z_recirc_cont_cur++;
2368 	}
2369 }
2370 
2371 static inline void
zone_recirc_lock_nopreempt(zone_t zone)2372 zone_recirc_lock_nopreempt(zone_t zone)
2373 {
2374 	hw_lck_ticket_lock_nopreempt(&zone->z_recirc_lock, &zone_locks_grp);
2375 }
2376 
2377 static inline void
zone_recirc_unlock_nopreempt(zone_t zone)2378 zone_recirc_unlock_nopreempt(zone_t zone)
2379 {
2380 	hw_lck_ticket_unlock_nopreempt(&zone->z_recirc_lock);
2381 }
2382 
2383 static inline void
zone_lock_nopreempt_check_contention(zone_t zone)2384 zone_lock_nopreempt_check_contention(zone_t zone)
2385 {
2386 	uint32_t ticket;
2387 #if KASAN_FAKESTACK
2388 	spl_t s = 0;
2389 	if (zone->z_kasan_fakestacks) {
2390 		s = splsched();
2391 	}
2392 #endif /* KASAN_FAKESTACK */
2393 
2394 	if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_lock, &ticket,
2395 	    &zone_locks_grp))) {
2396 #if KASAN_FAKESTACK
2397 		zone->z_kasan_spl = s;
2398 #endif /* KASAN_FAKESTACK */
2399 		return;
2400 	}
2401 
2402 	hw_lck_ticket_wait(&zone->z_lock, ticket, NULL, &zone_locks_grp);
2403 #if KASAN_FAKESTACK
2404 	zone->z_kasan_spl = s;
2405 #endif /* KASAN_FAKESTACK */
2406 
2407 	/*
2408 	 * If zone caching has been disabled due to memory pressure,
2409 	 * then recording contention is not useful, give the system
2410 	 * time to recover.
2411 	 */
2412 	if (__probable(!zone_caching_disabled &&
2413 	    !zone->z_pcpu_cache && !zone_exhausted(zone))) {
2414 		zone->z_recirc_cont_cur++;
2415 	}
2416 }
2417 
2418 static inline void
zone_lock_nopreempt(zone_t zone)2419 zone_lock_nopreempt(zone_t zone)
2420 {
2421 #if KASAN_FAKESTACK
2422 	spl_t s = 0;
2423 	if (zone->z_kasan_fakestacks) {
2424 		s = splsched();
2425 	}
2426 #endif /* KASAN_FAKESTACK */
2427 	hw_lck_ticket_lock_nopreempt(&zone->z_lock, &zone_locks_grp);
2428 #if KASAN_FAKESTACK
2429 	zone->z_kasan_spl = s;
2430 #endif /* KASAN_FAKESTACK */
2431 }
2432 
2433 static inline void
zone_unlock_nopreempt(zone_t zone)2434 zone_unlock_nopreempt(zone_t zone)
2435 {
2436 #if KASAN_FAKESTACK
2437 	spl_t s = zone->z_kasan_spl;
2438 	zone->z_kasan_spl = 0;
2439 #endif /* KASAN_FAKESTACK */
2440 	hw_lck_ticket_unlock_nopreempt(&zone->z_lock);
2441 #if KASAN_FAKESTACK
2442 	if (zone->z_kasan_fakestacks) {
2443 		splx(s);
2444 	}
2445 #endif /* KASAN_FAKESTACK */
2446 }
2447 
2448 static inline void
zone_depot_lock_nopreempt(zone_cache_t zc)2449 zone_depot_lock_nopreempt(zone_cache_t zc)
2450 {
2451 	hw_lck_ticket_lock_nopreempt(&zc->zc_depot_lock, &zone_locks_grp);
2452 }
2453 
2454 static inline void
zone_depot_unlock_nopreempt(zone_cache_t zc)2455 zone_depot_unlock_nopreempt(zone_cache_t zc)
2456 {
2457 	hw_lck_ticket_unlock_nopreempt(&zc->zc_depot_lock);
2458 }
2459 
2460 static inline void
zone_depot_lock(zone_cache_t zc)2461 zone_depot_lock(zone_cache_t zc)
2462 {
2463 	hw_lck_ticket_lock(&zc->zc_depot_lock, &zone_locks_grp);
2464 }
2465 
2466 static inline void
zone_depot_unlock(zone_cache_t zc)2467 zone_depot_unlock(zone_cache_t zc)
2468 {
2469 	hw_lck_ticket_unlock(&zc->zc_depot_lock);
2470 }
2471 
2472 zone_t
zone_by_id(size_t zid)2473 zone_by_id(size_t zid)
2474 {
2475 	return (zone_t)((uintptr_t)zone_array + zid * sizeof(struct zone));
2476 }
2477 
2478 static inline bool
zone_supports_vm(zone_t z)2479 zone_supports_vm(zone_t z)
2480 {
2481 	/*
2482 	 * VM_MAP_ENTRY and VM_MAP_HOLES zones are allowed
2483 	 * to overcommit because they're used to reclaim memory
2484 	 * (VM support).
2485 	 */
2486 	return z >= &zone_array[ZONE_ID_VM_MAP_ENTRY] &&
2487 	       z <= &zone_array[ZONE_ID_VM_MAP_HOLES];
2488 }
2489 
2490 const char *
zone_name(zone_t z)2491 zone_name(zone_t z)
2492 {
2493 	return z->z_name;
2494 }
2495 
2496 const char *
zone_heap_name(zone_t z)2497 zone_heap_name(zone_t z)
2498 {
2499 	zone_security_flags_t zsflags = zone_security_config(z);
2500 	if (__probable(zsflags.z_kheap_id < KHEAP_ID_COUNT)) {
2501 		return kalloc_heap_names[zsflags.z_kheap_id];
2502 	}
2503 	return "invalid";
2504 }
2505 
2506 static uint32_t
zone_alloc_pages_for_nelems(zone_t z,vm_size_t max_elems)2507 zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
2508 {
2509 	vm_size_t elem_count, chunks;
2510 
2511 	elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) /
2512 	    zone_elem_outer_size(z);
2513 	chunks = (max_elems + elem_count - 1) / elem_count;
2514 
2515 	return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
2516 }
2517 
2518 static inline vm_size_t
zone_submaps_approx_size(void)2519 zone_submaps_approx_size(void)
2520 {
2521 	vm_size_t size = 0;
2522 
2523 	for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
2524 		if (zone_submaps[idx] != VM_MAP_NULL) {
2525 			size += zone_submaps[idx]->size;
2526 		}
2527 	}
2528 
2529 	return size;
2530 }
2531 
2532 static inline void
zone_depot_init(struct zone_depot * zd)2533 zone_depot_init(struct zone_depot *zd)
2534 {
2535 	*zd = (struct zone_depot){
2536 		.zd_tail = &zd->zd_head,
2537 	};
2538 }
2539 
2540 static inline void
zone_depot_insert_head_full(struct zone_depot * zd,zone_magazine_t mag)2541 zone_depot_insert_head_full(struct zone_depot *zd, zone_magazine_t mag)
2542 {
2543 	if (zd->zd_full++ == 0) {
2544 		zd->zd_tail = &mag->zm_next;
2545 	}
2546 	mag->zm_next = zd->zd_head;
2547 	zd->zd_head = mag;
2548 }
2549 
2550 static inline void
zone_depot_insert_tail_full(struct zone_depot * zd,zone_magazine_t mag)2551 zone_depot_insert_tail_full(struct zone_depot *zd, zone_magazine_t mag)
2552 {
2553 	zd->zd_full++;
2554 	mag->zm_next = *zd->zd_tail;
2555 	*zd->zd_tail = mag;
2556 	zd->zd_tail = &mag->zm_next;
2557 }
2558 
2559 static inline void
zone_depot_insert_head_empty(struct zone_depot * zd,zone_magazine_t mag)2560 zone_depot_insert_head_empty(struct zone_depot *zd, zone_magazine_t mag)
2561 {
2562 	zd->zd_empty++;
2563 	mag->zm_next = *zd->zd_tail;
2564 	*zd->zd_tail = mag;
2565 }
2566 
2567 static inline zone_magazine_t
zone_depot_pop_head_full(struct zone_depot * zd,zone_t z)2568 zone_depot_pop_head_full(struct zone_depot *zd, zone_t z)
2569 {
2570 	zone_magazine_t mag = zd->zd_head;
2571 
2572 	assert(zd->zd_full);
2573 
2574 	zd->zd_full--;
2575 	if (z && z->z_recirc_full_min > zd->zd_full) {
2576 		z->z_recirc_full_min = zd->zd_full;
2577 	}
2578 	zd->zd_head = mag->zm_next;
2579 	if (zd->zd_full == 0) {
2580 		zd->zd_tail = &zd->zd_head;
2581 	}
2582 
2583 	mag->zm_next = NULL;
2584 	return mag;
2585 }
2586 
2587 static inline zone_magazine_t
zone_depot_pop_head_empty(struct zone_depot * zd,zone_t z)2588 zone_depot_pop_head_empty(struct zone_depot *zd, zone_t z)
2589 {
2590 	zone_magazine_t mag = *zd->zd_tail;
2591 
2592 	assert(zd->zd_empty);
2593 
2594 	zd->zd_empty--;
2595 	if (z && z->z_recirc_empty_min > zd->zd_empty) {
2596 		z->z_recirc_empty_min = zd->zd_empty;
2597 	}
2598 	*zd->zd_tail = mag->zm_next;
2599 
2600 	mag->zm_next = NULL;
2601 	return mag;
2602 }
2603 
2604 static inline smr_seq_t
zone_depot_move_full(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2605 zone_depot_move_full(
2606 	struct zone_depot      *dst,
2607 	struct zone_depot      *src,
2608 	uint32_t                n,
2609 	zone_t                  z)
2610 {
2611 	zone_magazine_t head, last;
2612 
2613 	assert(n);
2614 	assert(src->zd_full >= n);
2615 
2616 	src->zd_full -= n;
2617 	if (z && z->z_recirc_full_min > src->zd_full) {
2618 		z->z_recirc_full_min = src->zd_full;
2619 	}
2620 	head = last = src->zd_head;
2621 	for (uint32_t i = n; i-- > 1;) {
2622 		last = last->zm_next;
2623 	}
2624 
2625 	src->zd_head = last->zm_next;
2626 	if (src->zd_full == 0) {
2627 		src->zd_tail = &src->zd_head;
2628 	}
2629 
2630 	if (z && zone_security_array[zone_index(z)].z_lifo) {
2631 		if (dst->zd_full == 0) {
2632 			dst->zd_tail = &last->zm_next;
2633 		}
2634 		last->zm_next = dst->zd_head;
2635 		dst->zd_head = head;
2636 	} else {
2637 		last->zm_next = *dst->zd_tail;
2638 		*dst->zd_tail = head;
2639 		dst->zd_tail = &last->zm_next;
2640 	}
2641 	dst->zd_full += n;
2642 
2643 	return last->zm_seq;
2644 }
2645 
2646 static inline void
zone_depot_move_empty(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2647 zone_depot_move_empty(
2648 	struct zone_depot      *dst,
2649 	struct zone_depot      *src,
2650 	uint32_t                n,
2651 	zone_t                  z)
2652 {
2653 	zone_magazine_t head, last;
2654 
2655 	assert(n);
2656 	assert(src->zd_empty >= n);
2657 
2658 	src->zd_empty -= n;
2659 	if (z && z->z_recirc_empty_min > src->zd_empty) {
2660 		z->z_recirc_empty_min = src->zd_empty;
2661 	}
2662 	head = last = *src->zd_tail;
2663 	for (uint32_t i = n; i-- > 1;) {
2664 		last = last->zm_next;
2665 	}
2666 
2667 	*src->zd_tail = last->zm_next;
2668 
2669 	dst->zd_empty += n;
2670 	last->zm_next = *dst->zd_tail;
2671 	*dst->zd_tail = head;
2672 }
2673 
2674 static inline bool
zone_depot_poll(struct zone_depot * depot,smr_t smr)2675 zone_depot_poll(struct zone_depot *depot, smr_t smr)
2676 {
2677 	if (depot->zd_full == 0) {
2678 		return false;
2679 	}
2680 
2681 	return smr == NULL || smr_poll(smr, depot->zd_head->zm_seq);
2682 }
2683 
2684 static void
zone_cache_swap_magazines(zone_cache_t cache)2685 zone_cache_swap_magazines(zone_cache_t cache)
2686 {
2687 	uint16_t count_a = cache->zc_alloc_cur;
2688 	uint16_t count_f = cache->zc_free_cur;
2689 	vm_offset_t *elems_a = cache->zc_alloc_elems;
2690 	vm_offset_t *elems_f = cache->zc_free_elems;
2691 
2692 	z_debug_assert(count_a <= zc_mag_size());
2693 	z_debug_assert(count_f <= zc_mag_size());
2694 
2695 	cache->zc_alloc_cur = count_f;
2696 	cache->zc_free_cur = count_a;
2697 	cache->zc_alloc_elems = elems_f;
2698 	cache->zc_free_elems = elems_a;
2699 }
2700 
2701 __pure2
2702 static smr_t
zone_cache_smr(zone_cache_t cache)2703 zone_cache_smr(zone_cache_t cache)
2704 {
2705 	return cache->zc_smr;
2706 }
2707 
2708 /*!
2709  * @function zone_magazine_replace
2710  *
2711  * @brief
2712  * Unlod a magazine and load a new one instead.
2713  */
2714 static zone_magazine_t
zone_magazine_replace(zone_cache_t zc,zone_magazine_t mag,bool empty)2715 zone_magazine_replace(zone_cache_t zc, zone_magazine_t mag, bool empty)
2716 {
2717 	zone_magazine_t old;
2718 	vm_offset_t **elems;
2719 
2720 	mag->zm_seq = SMR_SEQ_INVALID;
2721 
2722 	if (empty) {
2723 		elems = &zc->zc_free_elems;
2724 		zc->zc_free_cur = 0;
2725 	} else {
2726 		elems = &zc->zc_alloc_elems;
2727 		zc->zc_alloc_cur = zc_mag_size();
2728 	}
2729 	old = (zone_magazine_t)((uintptr_t)*elems -
2730 	    offsetof(struct zone_magazine, zm_elems));
2731 	*elems = mag->zm_elems;
2732 
2733 	return old;
2734 }
2735 
2736 static zone_magazine_t
zone_magazine_alloc(zalloc_flags_t flags)2737 zone_magazine_alloc(zalloc_flags_t flags)
2738 {
2739 	return zalloc_flags(zc_magazine_zone, flags | Z_ZERO);
2740 }
2741 
2742 static void
zone_magazine_free(zone_magazine_t mag)2743 zone_magazine_free(zone_magazine_t mag)
2744 {
2745 	(zfree)(zc_magazine_zone, mag);
2746 }
2747 
2748 static void
zone_magazine_free_list(struct zone_depot * zd)2749 zone_magazine_free_list(struct zone_depot *zd)
2750 {
2751 	zone_magazine_t tmp, mag = *zd->zd_tail;
2752 
2753 	while (mag) {
2754 		tmp = mag->zm_next;
2755 		zone_magazine_free(mag);
2756 		mag = tmp;
2757 	}
2758 
2759 	*zd->zd_tail = NULL;
2760 	zd->zd_empty = 0;
2761 }
2762 
2763 void
zone_enable_caching(zone_t zone)2764 zone_enable_caching(zone_t zone)
2765 {
2766 	size_t size_per_mag = zone_elem_inner_size(zone) * zc_mag_size();
2767 	zone_cache_t caches;
2768 	size_t depot_limit;
2769 
2770 	depot_limit = zc_pcpu_max() / size_per_mag;
2771 	zone->z_depot_limit = (uint16_t)MIN(depot_limit, INT16_MAX);
2772 
2773 	caches = zalloc_percpu_permanent_type(struct zone_cache);
2774 	zpercpu_foreach(zc, caches) {
2775 		zc->zc_alloc_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2776 		zc->zc_free_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2777 		zone_depot_init(&zc->zc_depot);
2778 		hw_lck_ticket_init(&zc->zc_depot_lock, &zone_locks_grp);
2779 	}
2780 
2781 	zone_lock(zone);
2782 	assert(zone->z_pcpu_cache == NULL);
2783 	zone->z_pcpu_cache = caches;
2784 	zone->z_recirc_cont_cur = 0;
2785 	zone->z_recirc_cont_wma = 0;
2786 	zone->z_elems_free_min = 0; /* becomes z_recirc_empty_min */
2787 	zone->z_elems_free_wma = 0; /* becomes z_recirc_empty_wma */
2788 	zone_unlock(zone);
2789 }
2790 
2791 bool
zone_maps_owned(vm_address_t addr,vm_size_t size)2792 zone_maps_owned(vm_address_t addr, vm_size_t size)
2793 {
2794 	return from_zone_map(addr, size);
2795 }
2796 
2797 #if KASAN_LIGHT
2798 bool
kasan_zone_maps_owned(vm_address_t addr,vm_size_t size)2799 kasan_zone_maps_owned(vm_address_t addr, vm_size_t size)
2800 {
2801 	return from_zone_map(addr, size) ||
2802 	       mach_vm_range_size(&zone_info.zi_map_range) == 0;
2803 }
2804 #endif /* KASAN_LIGHT */
2805 
2806 void
zone_map_sizes(vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)2807 zone_map_sizes(
2808 	vm_map_size_t    *psize,
2809 	vm_map_size_t    *pfree,
2810 	vm_map_size_t    *plargest_free)
2811 {
2812 	vm_map_size_t size, free, largest;
2813 
2814 	vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
2815 
2816 	for (uint32_t i = 1; i < Z_SUBMAP_IDX_COUNT; i++) {
2817 		vm_map_sizes(zone_submaps[i], &size, &free, &largest);
2818 		*psize += size;
2819 		*pfree += free;
2820 		*plargest_free = MAX(*plargest_free, largest);
2821 	}
2822 }
2823 
2824 __attribute__((always_inline))
2825 vm_map_t
zone_submap(zone_security_flags_t zsflags)2826 zone_submap(zone_security_flags_t zsflags)
2827 {
2828 	return zone_submaps[zsflags.z_submap_idx];
2829 }
2830 
2831 unsigned
zpercpu_count(void)2832 zpercpu_count(void)
2833 {
2834 	return zpercpu_early_count;
2835 }
2836 
2837 #if ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC
2838 /*
2839  * Returns a random number of a given bit-width.
2840  *
2841  * DO NOT COPY THIS CODE OUTSIDE OF ZALLOC
2842  *
2843  * This uses Intel's rdrand because random() uses FP registers
2844  * which causes FP faults and allocations which isn't something
2845  * we can do from zalloc itself due to reentrancy problems.
2846  *
2847  * For pre-rdrand machines (which we no longer support),
2848  * we use a bad biased random generator that doesn't use FP.
2849  * Such HW is no longer supported, but VM of newer OSes on older
2850  * bare metal is made to limp along (with reduced security) this way.
2851  */
2852 static uint64_t
zalloc_random_mask64(uint32_t bits)2853 zalloc_random_mask64(uint32_t bits)
2854 {
2855 	uint64_t mask = ~0ull >> (64 - bits);
2856 	uint64_t v;
2857 
2858 #if __x86_64__
2859 	if (__probable(cpuid_features() & CPUID_FEATURE_RDRAND)) {
2860 		asm volatile ("1: rdrand %0; jnc 1b\n" : "=r" (v) :: "cc");
2861 		v &= mask;
2862 	} else {
2863 		disable_preemption();
2864 		int cpu = cpu_number();
2865 		v = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
2866 		    zone_bool_gen[cpu].zbg_entropy,
2867 		    ZONE_ENTROPY_CNT, bits);
2868 		enable_preemption();
2869 	}
2870 #else
2871 	v = early_random() & mask;
2872 #endif
2873 
2874 	return v;
2875 }
2876 
2877 /*
2878  * Returns a random number within [bound_min, bound_max)
2879  *
2880  * This isn't _exactly_ uniform, but the skew is small enough
2881  * not to matter for the consumers of this interface.
2882  *
2883  * Values within [bound_min, 2^64 % (bound_max - bound_min))
2884  * will be returned (bound_max - bound_min) / 2^64 more often
2885  * than values within [2^64 % (bound_max - bound_min), bound_max).
2886  */
2887 static uint32_t
zalloc_random_uniform32(uint32_t bound_min,uint32_t bound_max)2888 zalloc_random_uniform32(uint32_t bound_min, uint32_t bound_max)
2889 {
2890 	uint64_t delta = bound_max - bound_min;
2891 
2892 	return bound_min + (uint32_t)(zalloc_random_mask64(64) % delta);
2893 }
2894 
2895 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC */
2896 #if ZALLOC_ENABLE_LOGGING || CONFIG_PROB_GZALLOC
2897 /*
2898  * Track all kalloc zones of specified size for zlog name
2899  * kalloc.type.<size> or kalloc.type.var.<size> or kalloc.<size>
2900  *
2901  * Additionally track all shared kalloc zones with shared.kalloc
2902  */
2903 static bool
track_kalloc_zones(zone_t z,const char * logname)2904 track_kalloc_zones(zone_t z, const char *logname)
2905 {
2906 	const char *prefix;
2907 	size_t len;
2908 	zone_security_flags_t zsflags = zone_security_config(z);
2909 
2910 	prefix = "kalloc.type.var.";
2911 	len    = strlen(prefix);
2912 	if (zsflags.z_kalloc_type && zsflags.z_kheap_id == KHEAP_ID_KT_VAR &&
2913 	    strncmp(logname, prefix, len) == 0) {
2914 		vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2915 
2916 		return zone_elem_inner_size(z) == sizeclass;
2917 	}
2918 
2919 	prefix = "kalloc.type.";
2920 	len    = strlen(prefix);
2921 	if (zsflags.z_kalloc_type && zsflags.z_kheap_id != KHEAP_ID_KT_VAR &&
2922 	    strncmp(logname, prefix, len) == 0) {
2923 		vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2924 
2925 		return zone_elem_inner_size(z) == sizeclass;
2926 	}
2927 
2928 	prefix = "kalloc.";
2929 	len    = strlen(prefix);
2930 	if ((zsflags.z_kheap_id || zsflags.z_kalloc_type) &&
2931 	    strncmp(logname, prefix, len) == 0) {
2932 		vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2933 
2934 		return zone_elem_inner_size(z) == sizeclass;
2935 	}
2936 
2937 	prefix = "shared.kalloc";
2938 	if ((zsflags.z_kheap_id == KHEAP_ID_SHARED) &&
2939 	    (strcmp(logname, prefix) == 0)) {
2940 		return true;
2941 	}
2942 
2943 	return false;
2944 }
2945 #endif
2946 
2947 int
track_this_zone(const char * zonename,const char * logname)2948 track_this_zone(const char *zonename, const char *logname)
2949 {
2950 	unsigned int len;
2951 	const char *zc = zonename;
2952 	const char *lc = logname;
2953 
2954 	/*
2955 	 * Compare the strings.  We bound the compare by MAX_ZONE_NAME.
2956 	 */
2957 
2958 	for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
2959 		/*
2960 		 * If the current characters don't match, check for a space in
2961 		 * in the zone name and a corresponding period in the log name.
2962 		 * If that's not there, then the strings don't match.
2963 		 */
2964 
2965 		if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
2966 			break;
2967 		}
2968 
2969 		/*
2970 		 * The strings are equal so far.  If we're at the end, then it's a match.
2971 		 */
2972 
2973 		if (*zc == '\0') {
2974 			return TRUE;
2975 		}
2976 	}
2977 
2978 	return FALSE;
2979 }
2980 
2981 #if DEBUG || DEVELOPMENT
2982 
2983 vm_size_t
zone_element_info(void * addr,vm_tag_t * ptag)2984 zone_element_info(void *addr, vm_tag_t * ptag)
2985 {
2986 	vm_size_t     size = 0;
2987 	vm_tag_t      tag = VM_KERN_MEMORY_NONE;
2988 	struct zone *src_zone;
2989 
2990 	if (from_zone_map(addr, sizeof(void *))) {
2991 		src_zone = zone_by_id(zone_index_from_ptr(addr));
2992 		size     = zone_elem_inner_size(src_zone);
2993 #if VM_TAG_SIZECLASSES
2994 		if (__improbable(src_zone->z_uses_tags)) {
2995 			struct zone_page_metadata *meta;
2996 			vm_offset_t eidx;
2997 			vm_tag_t *slot;
2998 
2999 			meta = zone_element_resolve(src_zone,
3000 			    (vm_offset_t)addr, &eidx);
3001 			slot = zba_extra_ref_ptr(meta->zm_bitmap, eidx);
3002 			tag  = *slot;
3003 		}
3004 #endif /* VM_TAG_SIZECLASSES */
3005 	}
3006 
3007 	*ptag = tag;
3008 	return size;
3009 }
3010 
3011 #endif /* DEBUG || DEVELOPMENT */
3012 #if KASAN_CLASSIC
3013 
3014 vm_size_t
kasan_quarantine_resolve(vm_address_t addr,zone_t * zonep)3015 kasan_quarantine_resolve(vm_address_t addr, zone_t *zonep)
3016 {
3017 	zone_t zone = zone_by_id(zone_index_from_ptr((void *)addr));
3018 
3019 	*zonep = zone;
3020 	return zone_elem_inner_size(zone);
3021 }
3022 
3023 #endif /* KASAN_CLASSIC */
3024 #endif /* !ZALLOC_TEST */
3025 #pragma mark Zone zeroing and early random
3026 #if !ZALLOC_TEST
3027 
3028 /*
3029  * Zone zeroing
3030  *
3031  * All allocations from zones are zeroed on free and are additionally
3032  * check that they are still zero on alloc. The check is
3033  * always on, on embedded devices. Perf regression was detected
3034  * on intel as we cant use the vectorized implementation of
3035  * memcmp_zero_ptr_aligned due to cyclic dependenices between
3036  * initization and allocation. Therefore we perform the check
3037  * on 20% of the allocations.
3038  */
3039 #if ZALLOC_ENABLE_ZERO_CHECK
3040 #if defined(__x86_64__)
3041 /*
3042  * Peform zero validation on every 5th allocation
3043  */
3044 static TUNABLE(uint32_t, zzc_rate, "zzc_rate", 5);
3045 static uint32_t PERCPU_DATA(zzc_decrementer);
3046 #endif /* defined(__x86_64__) */
3047 
3048 /*
3049  * Determine if zero validation for allocation should be skipped
3050  */
3051 static bool
zalloc_skip_zero_check(void)3052 zalloc_skip_zero_check(void)
3053 {
3054 #if defined(__x86_64__)
3055 	uint32_t *counterp, cnt;
3056 
3057 	counterp = PERCPU_GET(zzc_decrementer);
3058 	cnt = *counterp;
3059 	if (__probable(cnt > 0)) {
3060 		*counterp  = cnt - 1;
3061 		return true;
3062 	}
3063 	*counterp = zzc_rate - 1;
3064 #endif /* !defined(__x86_64__) */
3065 	return false;
3066 }
3067 
3068 __abortlike
3069 static void
zalloc_uaf_panic(zone_t z,uintptr_t elem,size_t size)3070 zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size)
3071 {
3072 	uint32_t esize = (uint32_t)zone_elem_inner_size(z);
3073 	uint32_t first_offs = ~0u;
3074 	uintptr_t first_bits = 0, v;
3075 	char buf[1024];
3076 	int pos = 0;
3077 
3078 	buf[0] = '\0';
3079 
3080 	for (uint32_t o = 0; o < size; o += sizeof(v)) {
3081 		if ((v = *(uintptr_t *)(elem + o)) == 0) {
3082 			continue;
3083 		}
3084 		pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
3085 		    "%5d: 0x%016lx", o, v);
3086 		if (first_offs > o) {
3087 			first_offs = o;
3088 			first_bits = v;
3089 		}
3090 	}
3091 
3092 	(panic)("[%s%s]: element modified after free "
3093 	"(off:%d, val:0x%016lx, sz:%d, ptr:%p)%s",
3094 	zone_heap_name(z), zone_name(z),
3095 	first_offs, first_bits, esize, (void *)elem, buf);
3096 }
3097 
3098 static void
zalloc_validate_element(zone_t zone,vm_offset_t elem,vm_size_t size,zalloc_flags_t flags)3099 zalloc_validate_element(
3100 	zone_t                  zone,
3101 	vm_offset_t             elem,
3102 	vm_size_t               size,
3103 	zalloc_flags_t          flags)
3104 {
3105 	if (flags & Z_NOZZC) {
3106 		return;
3107 	}
3108 	if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3109 		zalloc_uaf_panic(zone, elem, size);
3110 	}
3111 	if (flags & Z_PCPU) {
3112 		for (size_t i = zpercpu_count(); --i > 0;) {
3113 			elem += PAGE_SIZE;
3114 			if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3115 				zalloc_uaf_panic(zone, elem, size);
3116 			}
3117 		}
3118 	}
3119 }
3120 
3121 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
3122 
3123 __attribute__((noinline))
3124 static void
zone_early_scramble_rr(zone_t zone,int cpu,zone_stats_t zs)3125 zone_early_scramble_rr(zone_t zone, int cpu, zone_stats_t zs)
3126 {
3127 #if KASAN_FAKESTACK
3128 	/*
3129 	 * This can cause re-entrancy with kasan fakestacks
3130 	 */
3131 #pragma unused(zone, cpu, zs)
3132 #else
3133 	uint32_t bits;
3134 
3135 	bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3136 	    zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
3137 
3138 	zs->zs_alloc_rr += bits;
3139 	zs->zs_alloc_rr %= zone->z_chunk_elems;
3140 #endif
3141 }
3142 
3143 #endif /* !ZALLOC_TEST */
3144 #pragma mark Zone Leak Detection
3145 #if !ZALLOC_TEST
3146 #if ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS
3147 
3148 /*
3149  * Zone leak debugging code
3150  *
3151  * When enabled, this code keeps a log to track allocations to a particular
3152  * zone that have not yet been freed.
3153  *
3154  * Examining this log will reveal the source of a zone leak.
3155  *
3156  * The log is allocated only when logging is enabled (it is off by default),
3157  * so there is no effect on the system when it's turned off.
3158  *
3159  * Zone logging is enabled with the `zlog<n>=<zone>` boot-arg for each
3160  * zone name to log, with n starting at 1.
3161  *
3162  * Leaks debugging utilizes 2 tunables:
3163  * - zlsize (in kB) which describes how much "size" the record covers
3164  *   (zones with smaller elements get more records, default is 4M).
3165  *
3166  * - zlfreq (in bytes) which describes a sample rate in cumulative allocation
3167  *   size at which automatic leak detection will sample allocations.
3168  *   (default is 8k)
3169  *
3170  *
3171  * Zone corruption logging
3172  *
3173  * Logging can also be used to help identify the source of a zone corruption.
3174  *
3175  * First, identify the zone that is being corrupted,
3176  * then add "-zc zlog<n>=<zone name>" to the boot-args.
3177  *
3178  * When -zc is used in conjunction with zlog,
3179  * it changes the logging style to track both allocations and frees to the zone.
3180  *
3181  * When the corruption is detected, examining the log will show you the stack
3182  * traces of the callers who last allocated and freed any particular element in
3183  * the zone.
3184  *
3185  * Corruption debugging logs will have zrecs records
3186  * (tuned by the zrecs= boot-arg, 16k elements per G of RAM by default).
3187  */
3188 
3189 #define ZRECORDS_MAX            (256u << 10)
3190 #define ZRECORDS_DEFAULT        (16u  << 10)
3191 static TUNABLE(uint32_t, zrecs, "zrecs", 0);
3192 static TUNABLE(uint32_t, zlsize, "zlsize", 4 * 1024);
3193 static TUNABLE(uint32_t, zlfreq, "zlfreq", 8 * 1024);
3194 
3195 __startup_func
3196 static void
zone_leaks_init_zrecs(void)3197 zone_leaks_init_zrecs(void)
3198 {
3199 	/*
3200 	 * Don't allow more than ZRECORDS_MAX records,
3201 	 * even if the user asked for more.
3202 	 *
3203 	 * This prevents accidentally hogging too much kernel memory
3204 	 * and making the system unusable.
3205 	 */
3206 	if (zrecs == 0) {
3207 		zrecs = ZRECORDS_DEFAULT *
3208 		    (uint32_t)((max_mem + (1ul << 30)) >> 30);
3209 	}
3210 	if (zrecs > ZRECORDS_MAX) {
3211 		zrecs = ZRECORDS_MAX;
3212 	}
3213 }
3214 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_leaks_init_zrecs);
3215 
3216 static uint32_t
zone_leaks_record_count(zone_t z)3217 zone_leaks_record_count(zone_t z)
3218 {
3219 	uint32_t recs = (zlsize << 10) / zone_elem_inner_size(z);
3220 
3221 	return MIN(MAX(recs, ZRECORDS_DEFAULT), ZRECORDS_MAX);
3222 }
3223 
3224 static uint32_t
zone_leaks_sample_rate(zone_t z)3225 zone_leaks_sample_rate(zone_t z)
3226 {
3227 	return zlfreq / zone_elem_inner_size(z);
3228 }
3229 
3230 #if ZALLOC_ENABLE_LOGGING
3231 /* Log allocations and frees to help debug a zone element corruption */
3232 static TUNABLE(bool, corruption_debug_flag, "-zc", false);
3233 
3234 /*
3235  * A maximum of 10 zlog<n> boot args can be provided (zlog1 -> zlog10)
3236  */
3237 #define MAX_ZONES_LOG_REQUESTS  10
3238 
3239 /**
3240  * @function zone_setup_logging
3241  *
3242  * @abstract
3243  * Optionally sets up a zone for logging.
3244  *
3245  * @discussion
3246  * We recognized two boot-args:
3247  *
3248  *	zlog=<zone_to_log>
3249  *	zrecs=<num_records_in_log>
3250  *	zlsize=<memory to cover for leaks>
3251  *
3252  * The zlog arg is used to specify the zone name that should be logged,
3253  * and zrecs/zlsize is used to control the size of the log.
3254  */
3255 static void
zone_setup_logging(zone_t z)3256 zone_setup_logging(zone_t z)
3257 {
3258 	char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
3259 	char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
3260 	char zlog_val[MAX_ZONE_NAME];  /* the zone name we're logging, if any */
3261 	bool logging_on = false;
3262 
3263 	/*
3264 	 * Append kalloc heap name to zone name (if zone is used by kalloc)
3265 	 */
3266 	snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
3267 
3268 	/* zlog0 isn't allowed. */
3269 	for (int i = 1; i <= MAX_ZONES_LOG_REQUESTS; i++) {
3270 		snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
3271 
3272 		if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val))) {
3273 			if (track_this_zone(zone_name, zlog_val) ||
3274 			    track_kalloc_zones(z, zlog_val)) {
3275 				logging_on = true;
3276 				break;
3277 			}
3278 		}
3279 	}
3280 
3281 	/*
3282 	 * Backwards compat. with the old boot-arg used to specify single zone
3283 	 * logging i.e. zlog Needs to happen after the newer zlogn checks
3284 	 * because the prefix will match all the zlogn
3285 	 * boot-args.
3286 	 */
3287 	if (!logging_on &&
3288 	    PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val))) {
3289 		if (track_this_zone(zone_name, zlog_val) ||
3290 		    track_kalloc_zones(z, zlog_val)) {
3291 			logging_on = true;
3292 		}
3293 	}
3294 
3295 	/*
3296 	 * If we want to log a zone, see if we need to allocate buffer space for
3297 	 * the log.
3298 	 *
3299 	 * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
3300 	 * we have to defer allocation in that case.
3301 	 *
3302 	 * zone_init() will finish the job.
3303 	 *
3304 	 * If we want to log one of the VM related zones that's set up early on,
3305 	 * we will skip allocation of the log until zinit is called again later
3306 	 * on some other zone.
3307 	 */
3308 	if (logging_on) {
3309 		if (corruption_debug_flag) {
3310 			z->z_btlog = btlog_create(BTLOG_LOG, zrecs, 0);
3311 		} else {
3312 			z->z_btlog = btlog_create(BTLOG_HASH,
3313 			    zone_leaks_record_count(z), 0);
3314 		}
3315 		if (z->z_btlog) {
3316 			z->z_log_on = true;
3317 			printf("zone[%s%s]: logging enabled\n",
3318 			    zone_heap_name(z), z->z_name);
3319 		} else {
3320 			printf("zone[%s%s]: failed to enable logging\n",
3321 			    zone_heap_name(z), z->z_name);
3322 		}
3323 	}
3324 }
3325 
3326 #endif /* ZALLOC_ENABLE_LOGGING */
3327 #if KASAN_TBI
3328 static TUNABLE(uint32_t, kasan_zrecs, "kasan_zrecs", 0);
3329 
3330 __startup_func
3331 static void
kasan_tbi_init_zrecs(void)3332 kasan_tbi_init_zrecs(void)
3333 {
3334 	/*
3335 	 * Don't allow more than ZRECORDS_MAX records,
3336 	 * even if the user asked for more.
3337 	 *
3338 	 * This prevents accidentally hogging too much kernel memory
3339 	 * and making the system unusable.
3340 	 */
3341 	if (kasan_zrecs == 0) {
3342 		kasan_zrecs = ZRECORDS_DEFAULT *
3343 		    (uint32_t)((max_mem + (1ul << 30)) >> 30);
3344 	}
3345 	if (kasan_zrecs > ZRECORDS_MAX) {
3346 		kasan_zrecs = ZRECORDS_MAX;
3347 	}
3348 }
3349 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, kasan_tbi_init_zrecs);
3350 
3351 static void
zone_setup_kasan_logging(zone_t z)3352 zone_setup_kasan_logging(zone_t z)
3353 {
3354 	if (!z->z_tbi_tag) {
3355 		printf("zone[%s%s]: kasan logging disabled for this zone\n",
3356 		    zone_heap_name(z), z->z_name);
3357 		return;
3358 	}
3359 
3360 	z->z_log_on = true;
3361 	z->z_btlog = btlog_create(BTLOG_LOG, kasan_zrecs, 0);
3362 	if (!z->z_btlog) {
3363 		printf("zone[%s%s]: failed to enable kasan logging\n",
3364 		    zone_heap_name(z), z->z_name);
3365 	}
3366 }
3367 
3368 #endif /* KASAN_TBI */
3369 #if CONFIG_ZLEAKS
3370 
3371 static thread_call_data_t zone_leaks_callout;
3372 
3373 /*
3374  * The zone leak detector, abbreviated 'zleak', keeps track
3375  * of a subset of the currently outstanding allocations
3376  * made by the zone allocator.
3377  *
3378  * Zones who use more than zleak_pages_per_zone_wired_threshold
3379  * pages will get a BTLOG_HASH btlog with sampling to minimize
3380  * perf impact, yet receive statistical data about the backtrace
3381  * that is the most likely to cause the leak.
3382  *
3383  * If the zone goes under the threshold enough, then the log
3384  * is disabled and backtraces freed. Data can be collected
3385  * from userspace with the zlog(1) command.
3386  */
3387 
3388 uint32_t                zleak_active;
3389 SECURITY_READ_ONLY_LATE(vm_size_t) zleak_max_zonemap_size;
3390 
3391 /* Size a zone will have before we will collect data on it */
3392 static size_t           zleak_pages_per_zone_wired_threshold = ~0;
3393 vm_size_t               zleak_per_zone_tracking_threshold = ~0;
3394 
3395 static inline bool
zleak_should_enable_for_zone(zone_t z)3396 zleak_should_enable_for_zone(zone_t z)
3397 {
3398 	if (z->z_log_on) {
3399 		return false;
3400 	}
3401 	if (z->z_btlog) {
3402 		return false;
3403 	}
3404 #if KASAN_FAKESTACK
3405 	if (z->z_kasan_fakestacks) {
3406 		return false;
3407 	}
3408 #endif
3409 	if (zone_exhaustible(z)) {
3410 		return z->z_wired_cur * 8 >= z->z_wired_max * 7;
3411 	}
3412 	return z->z_wired_cur >= zleak_pages_per_zone_wired_threshold;
3413 }
3414 
3415 static inline bool
zleak_should_disable_for_zone(zone_t z)3416 zleak_should_disable_for_zone(zone_t z)
3417 {
3418 	if (z->z_log_on) {
3419 		return false;
3420 	}
3421 	if (!z->z_btlog) {
3422 		return false;
3423 	}
3424 	if (zone_exhaustible(z)) {
3425 		return z->z_wired_cur * 8 < z->z_wired_max * 7;
3426 	}
3427 	return z->z_wired_cur < zleak_pages_per_zone_wired_threshold / 2;
3428 }
3429 
3430 static void
zleaks_enable_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)3431 zleaks_enable_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
3432 {
3433 	btlog_t log;
3434 
3435 	zone_foreach(z) {
3436 		if (zleak_should_disable_for_zone(z)) {
3437 			log = z->z_btlog;
3438 			z->z_btlog = NULL;
3439 			assert(z->z_btlog_disabled == NULL);
3440 			btlog_disable(log);
3441 			z->z_btlog_disabled = log;
3442 			os_atomic_dec(&zleak_active, relaxed);
3443 		}
3444 
3445 		if (zleak_should_enable_for_zone(z)) {
3446 			log = z->z_btlog_disabled;
3447 			if (log == NULL) {
3448 				log = btlog_create(BTLOG_HASH,
3449 				    zone_leaks_record_count(z),
3450 				    zone_leaks_sample_rate(z));
3451 			} else if (btlog_enable(log) == KERN_SUCCESS) {
3452 				z->z_btlog_disabled = NULL;
3453 			} else {
3454 				log = NULL;
3455 			}
3456 			os_atomic_store(&z->z_btlog, log, release);
3457 			os_atomic_inc(&zleak_active, relaxed);
3458 		}
3459 	}
3460 }
3461 
3462 __startup_func
3463 static void
zleak_init(void)3464 zleak_init(void)
3465 {
3466 	zleak_max_zonemap_size = ptoa(zone_pages_wired_max);
3467 
3468 	zleak_update_threshold(&zleak_per_zone_tracking_threshold,
3469 	    zleak_max_zonemap_size / 8);
3470 
3471 	thread_call_setup_with_options(&zone_leaks_callout,
3472 	    zleaks_enable_async, NULL, THREAD_CALL_PRIORITY_USER,
3473 	    THREAD_CALL_OPTIONS_ONCE);
3474 }
3475 STARTUP(ZALLOC, STARTUP_RANK_SECOND, zleak_init);
3476 
3477 kern_return_t
zleak_update_threshold(vm_size_t * arg,uint64_t value)3478 zleak_update_threshold(vm_size_t *arg, uint64_t value)
3479 {
3480 	if (value >= zleak_max_zonemap_size) {
3481 		return KERN_INVALID_VALUE;
3482 	}
3483 
3484 	if (arg == &zleak_per_zone_tracking_threshold) {
3485 		zleak_per_zone_tracking_threshold = (vm_size_t)value;
3486 		zleak_pages_per_zone_wired_threshold = atop(value);
3487 		if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3488 			thread_call_enter(&zone_leaks_callout);
3489 		}
3490 		return KERN_SUCCESS;
3491 	}
3492 
3493 	return KERN_INVALID_ARGUMENT;
3494 }
3495 
3496 static void
panic_display_zleaks(bool has_syms)3497 panic_display_zleaks(bool has_syms)
3498 {
3499 	bool did_header = false;
3500 	vm_address_t bt[BTLOG_MAX_DEPTH];
3501 	uint32_t len, count;
3502 
3503 	zone_foreach(z) {
3504 		btlog_t log = z->z_btlog;
3505 
3506 		if (log == NULL || btlog_get_type(log) != BTLOG_HASH) {
3507 			continue;
3508 		}
3509 
3510 		count = btlog_guess_top(log, bt, &len);
3511 		if (count == 0) {
3512 			continue;
3513 		}
3514 
3515 		if (!did_header) {
3516 			paniclog_append_noflush("Zone (suspected) leak report:\n");
3517 			did_header = true;
3518 		}
3519 
3520 		paniclog_append_noflush("  Zone:    %s%s\n",
3521 		    zone_heap_name(z), zone_name(z));
3522 		paniclog_append_noflush("  Count:   %d (%ld bytes)\n", count,
3523 		    (long)count * zone_scale_for_percpu(z, zone_elem_inner_size(z)));
3524 		paniclog_append_noflush("  Size:    %ld\n",
3525 		    (long)zone_size_wired(z));
3526 		paniclog_append_noflush("  Top backtrace:\n");
3527 		for (uint32_t i = 0; i < len; i++) {
3528 			if (has_syms) {
3529 				paniclog_append_noflush("    %p ", (void *)bt[i]);
3530 				panic_print_symbol_name(bt[i]);
3531 				paniclog_append_noflush("\n");
3532 			} else {
3533 				paniclog_append_noflush("    %p\n", (void *)bt[i]);
3534 			}
3535 		}
3536 
3537 		kmod_panic_dump(bt, len);
3538 		paniclog_append_noflush("\n");
3539 	}
3540 }
3541 #endif /* CONFIG_ZLEAKS */
3542 
3543 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
3544 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
3545 
3546 #if !KASAN_TBI
3547 __cold
3548 #endif
3549 static void
zalloc_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3550 zalloc_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3551 {
3552 	btlog_t log = zone->z_btlog;
3553 	btref_get_flags_t flags = 0;
3554 	btref_t ref;
3555 
3556 #if !KASAN_TBI
3557 	if (!log || !btlog_sample(log)) {
3558 		return;
3559 	}
3560 #endif
3561 	if (get_preemption_level() || zone_supports_vm(zone)) {
3562 		/*
3563 		 * VM zones can be used by btlog, avoid reentrancy issues.
3564 		 */
3565 		flags = BTREF_GET_NOWAIT;
3566 	}
3567 
3568 	ref = btref_get(fp, flags);
3569 	while (count-- > 0) {
3570 		if (count) {
3571 			btref_retain(ref);
3572 		}
3573 		btlog_record(log, (void *)addr, ZOP_ALLOC, ref);
3574 		addr += *(vm_offset_t *)addr;
3575 	}
3576 }
3577 
3578 #define ZALLOC_LOG(zone, addr, count)  ({ \
3579 	if ((zone)->z_btlog) {                                                 \
3580 	        zalloc_log(zone, addr, count, __builtin_frame_address(0));     \
3581 	}                                                                      \
3582 })
3583 
3584 #if !KASAN_TBI
3585 __cold
3586 #endif
3587 static void
zfree_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3588 zfree_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3589 {
3590 	btlog_t log = zone->z_btlog;
3591 	btref_get_flags_t flags = 0;
3592 	btref_t ref;
3593 
3594 #if !KASAN_TBI
3595 	if (!log) {
3596 		return;
3597 	}
3598 #endif
3599 
3600 	/*
3601 	 * See if we're doing logging on this zone.
3602 	 *
3603 	 * There are two styles of logging used depending on
3604 	 * whether we're trying to catch a leak or corruption.
3605 	 */
3606 #if !KASAN_TBI
3607 	if (btlog_get_type(log) == BTLOG_HASH) {
3608 		/*
3609 		 * We're logging to catch a leak.
3610 		 *
3611 		 * Remove any record we might have for this element
3612 		 * since it's being freed.  Note that we may not find it
3613 		 * if the buffer overflowed and that's OK.
3614 		 *
3615 		 * Since the log is of a limited size, old records get
3616 		 * overwritten if there are more zallocs than zfrees.
3617 		 */
3618 		while (count-- > 0) {
3619 			btlog_erase(log, (void *)addr);
3620 			addr += *(vm_offset_t *)addr;
3621 		}
3622 		return;
3623 	}
3624 #endif /* !KASAN_TBI */
3625 
3626 	if (get_preemption_level() || zone_supports_vm(zone)) {
3627 		/*
3628 		 * VM zones can be used by btlog, avoid reentrancy issues.
3629 		 */
3630 		flags = BTREF_GET_NOWAIT;
3631 	}
3632 
3633 	ref = btref_get(fp, flags);
3634 	while (count-- > 0) {
3635 		if (count) {
3636 			btref_retain(ref);
3637 		}
3638 		btlog_record(log, (void *)addr, ZOP_FREE, ref);
3639 		addr += *(vm_offset_t *)addr;
3640 	}
3641 }
3642 
3643 #define ZFREE_LOG(zone, addr, count)  ({ \
3644 	if ((zone)->z_btlog) {                                                 \
3645 	        zfree_log(zone, addr, count, __builtin_frame_address(0));      \
3646 	}                                                                      \
3647 })
3648 
3649 #else
3650 #define ZALLOC_LOG(...)         ((void)0)
3651 #define ZFREE_LOG(...)          ((void)0)
3652 #endif /* ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI */
3653 #endif /* !ZALLOC_TEST */
3654 #pragma mark zone (re)fill
3655 #if !ZALLOC_TEST
3656 
3657 /*!
3658  * @defgroup Zone Refill
3659  * @{
3660  *
3661  * @brief
3662  * Functions handling The zone refill machinery.
3663  *
3664  * @discussion
3665  * Zones are refilled based on 2 mechanisms: direct expansion, async expansion.
3666  *
3667  * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
3668  * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
3669  *
3670  * - call @c zone_expand_locked() directly if the caller is allowed to block,
3671  *
3672  * - wakeup the asynchroous expansion thread call if the caller is not allowed
3673  *   to block, or if the reserve becomes depleted.
3674  *
3675  *
3676  * <h2>Synchronous expansion</h2>
3677  *
3678  * This mechanism is actually the only one that may refill a zone, and all the
3679  * other ones funnel through this one eventually.
3680  *
3681  * @c zone_expand_locked() implements the core of the expansion mechanism,
3682  * and will do so while a caller specified predicate is true.
3683  *
3684  * Zone expansion allows for up to 2 threads to concurrently refill the zone:
3685  * - one VM privileged thread,
3686  * - one regular thread.
3687  *
3688  * Regular threads that refill will put down their identity in @c z_expander,
3689  * so that priority inversion avoidance can be implemented.
3690  *
3691  * However, VM privileged threads are allowed to use VM page reserves,
3692  * which allows for the system to recover from extreme memory pressure
3693  * situations, allowing for the few allocations that @c zone_gc() or
3694  * killing processes require.
3695  *
3696  * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
3697  * is set. @c z_expander is not necessarily the identity of this VM privileged
3698  * thread (it is if the VM privileged thread came in first, but wouldn't be, and
3699  * could even be @c THREAD_NULL otherwise).
3700  *
3701  * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
3702  * spending a whole pointer on priority inheritance for VM privileged threads
3703  * (and other issues related to having two owners), we use the rwlock boost as
3704  * a stop gap to avoid priority inversions.
3705  *
3706  *
3707  * <h2>Chunk wiring policies</h2>
3708  *
3709  * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
3710  * to try to minimize fragmentation relative to element sizes not aligning with
3711  * a chunk size well.  However, this can grow large and be hard to fulfill on
3712  * a system under a lot of memory pressure (chunks can be as long as 8 pages on
3713  * 4k page systems).
3714  *
3715  * This is why, when under memory pressure the system allows chunks to be
3716  * partially populated. The metadata of the first page in the chunk maintains
3717  * the count of actually populated pages.
3718  *
3719  * The metadata for addresses assigned to a zone are found of 4 queues:
3720  * - @c z_pageq_empty has chunk heads with populated pages and no allocated
3721  *   elements (those can be targeted by @c zone_gc()),
3722  * - @c z_pageq_partial has chunk heads with populated pages that are partially
3723  *   used,
3724  * - @c z_pageq_full has chunk heads with populated pages with no free elements
3725  *   left,
3726  * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
3727  *   the zone forever, or the first secondary metadata for a chunk whose
3728  *   corresponding page is not populated in the chunk.
3729  *
3730  * When new pages need to be wired/populated, chunks from the @c z_pageq_va
3731  * queues are preferred.
3732  *
3733  *
3734  * <h2>Asynchronous expansion</h2>
3735  *
3736  * This mechanism allows for refilling zones used mostly with non blocking
3737  * callers. It relies on a thread call (@c zone_expand_callout) which will
3738  * iterate all zones and refill the ones marked with @c z_async_refilling.
3739  *
3740  * NOTE: If the calling thread for zalloc_noblock is lower priority than
3741  *       the thread_call, then zalloc_noblock to an empty zone may succeed.
3742  *
3743  *
3744  * <h2>Dealing with zone allocations from the mach VM code</h2>
3745  *
3746  * The implementation of the mach VM itself uses the zone allocator
3747  * for things like the vm_map_entry data structure. In order to prevent
3748  * a recursion problem when adding more pages to a zone, the VM zones
3749  * use the Z_SUBMAP_IDX_VM submap which doesn't use kmem_alloc()
3750  * or any VM map functions to allocate.
3751  *
3752  * Instead, a really simple coalescing first-fit allocator is used
3753  * for this submap, and no one else than zalloc can allocate from it.
3754  *
3755  * Memory is directly populated which doesn't require allocation of
3756  * VM map entries, and avoids recursion. The cost of this scheme however,
3757  * is that `vm_map_lookup_entry` will not function on those addresses
3758  * (nor any API relying on it).
3759  */
3760 
3761 static void zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems);
3762 static thread_call_data_t zone_expand_callout;
3763 
3764 __attribute__((overloadable))
3765 static inline bool
zone_submap_is_sequestered(zone_submap_idx_t idx)3766 zone_submap_is_sequestered(zone_submap_idx_t idx)
3767 {
3768 	return idx != Z_SUBMAP_IDX_DATA;
3769 }
3770 
3771 __attribute__((overloadable))
3772 static inline bool
zone_submap_is_sequestered(zone_security_flags_t zsflags)3773 zone_submap_is_sequestered(zone_security_flags_t zsflags)
3774 {
3775 	return zone_submap_is_sequestered(zsflags.z_submap_idx);
3776 }
3777 
3778 static inline kma_flags_t
zone_kma_flags(zone_t z,zone_security_flags_t zsflags,zalloc_flags_t flags)3779 zone_kma_flags(zone_t z, zone_security_flags_t zsflags, zalloc_flags_t flags)
3780 {
3781 	kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
3782 
3783 	if (zsflags.z_noencrypt) {
3784 		kmaflags |= KMA_NOENCRYPT;
3785 	}
3786 	if (flags & Z_NOPAGEWAIT) {
3787 		kmaflags |= KMA_NOPAGEWAIT;
3788 	}
3789 	if (z->z_permanent || (!z->z_destructible &&
3790 	    zone_submap_is_sequestered(zsflags))) {
3791 		kmaflags |= KMA_PERMANENT;
3792 	}
3793 	if (zsflags.z_submap_from_end) {
3794 		kmaflags |= KMA_LAST_FREE;
3795 	}
3796 
3797 	if (z->z_tbi_tag) {
3798 		kmaflags |= KMA_TAG;
3799 	}
3800 
3801 	return kmaflags;
3802 }
3803 
3804 static inline void
zone_add_wired_pages(zone_t z,uint32_t pages)3805 zone_add_wired_pages(zone_t z, uint32_t pages)
3806 {
3807 	os_atomic_add(&zone_pages_wired, pages, relaxed);
3808 
3809 #if CONFIG_ZLEAKS
3810 	if (__improbable(zleak_should_enable_for_zone(z) &&
3811 	    startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3812 		thread_call_enter(&zone_leaks_callout);
3813 	}
3814 #else
3815 	(void)z;
3816 #endif
3817 }
3818 
3819 static inline void
zone_remove_wired_pages(zone_t z,uint32_t pages)3820 zone_remove_wired_pages(zone_t z, uint32_t pages)
3821 {
3822 	os_atomic_sub(&zone_pages_wired, pages, relaxed);
3823 
3824 #if CONFIG_ZLEAKS
3825 	if (__improbable(zleak_should_disable_for_zone(z) &&
3826 	    startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3827 		thread_call_enter(&zone_leaks_callout);
3828 	}
3829 #else
3830 	(void)z;
3831 #endif
3832 }
3833 
3834 #if CONFIG_KERNEL_TAGGING
3835 static inline vm_address_t
zone_tag_element(zone_t zone,vm_offset_t addr,vm_size_t elem_size)3836 zone_tag_element(zone_t zone, vm_offset_t addr, vm_size_t elem_size)
3837 {
3838 	vm_offset_t tagged_address;
3839 
3840 	tagged_address = vm_memtag_assign_tag(addr, elem_size);
3841 
3842 	vm_memtag_set_tag(tagged_address, elem_size);
3843 
3844 	if (zone->z_percpu) {
3845 		zpercpu_foreach_cpu(index) {
3846 			vm_memtag_set_tag(tagged_address + ptoa(index), elem_size);
3847 		}
3848 	}
3849 
3850 	return tagged_address;
3851 }
3852 
3853 static inline void
zcram_memtag_init(zone_t zone,vm_offset_t base,uint32_t start,uint32_t end)3854 zcram_memtag_init(zone_t zone, vm_offset_t base, uint32_t start, uint32_t end)
3855 {
3856 	vm_offset_t elem_size = zone_elem_outer_size(zone);
3857 	vm_offset_t oob_offs = zone_elem_outer_offs(zone);
3858 
3859 	for (uint32_t i = start; i < end; i++) {
3860 		vm_offset_t elem_addr = base + oob_offs + i * elem_size;
3861 
3862 		(void)zone_tag_element(zone, elem_addr, elem_size);
3863 	}
3864 }
3865 #endif /* CONFIG_KERNEL_TAGGING */
3866 
3867 /*!
3868  * @function zcram_and_lock()
3869  *
3870  * @brief
3871  * Prepare some memory for being usable for allocation purposes.
3872  *
3873  * @discussion
3874  * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
3875  * to be usable in the zone.
3876  *
3877  * This function assumes the metadata is already populated for the range.
3878  *
3879  * Calling this function with @c pg_start being 0 means that the memory
3880  * is either a partial chunk, or a full chunk, that isn't published anywhere
3881  * and the initialization can happen without locks held.
3882  *
3883  * Calling this function with a non zero @c pg_start means that we are extending
3884  * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
3885  * is already usable and published in the zone, so extending it requires holding
3886  * the zone lock.
3887  *
3888  * @param zone          The zone to cram new populated pages into
3889  * @param addr          The base address for the chunk(s)
3890  * @param pg_va_new     The number of virtual pages newly assigned to the zone
3891  * @param pg_start      The first newly populated page relative to @a addr.
3892  * @param pg_end        The after-last newly populated page relative to @a addr.
3893  * @param lock          0 or ZM_ALLOC_SIZE_LOCK (used by early crams)
3894  */
3895 static void
zcram_and_lock(zone_t zone,vm_offset_t addr,uint32_t pg_va_new,uint32_t pg_start,uint32_t pg_end,uint16_t lock)3896 zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
3897     uint32_t pg_start, uint32_t pg_end, uint16_t lock)
3898 {
3899 	zone_id_t zindex = zone_index(zone);
3900 	vm_offset_t elem_size = zone_elem_outer_size(zone);
3901 	uint32_t free_start = 0, free_end = 0;
3902 	uint32_t oob_offs = zone_elem_outer_offs(zone);
3903 
3904 	struct zone_page_metadata *meta = zone_meta_from_addr(addr);
3905 	uint32_t chunk_pages = zone->z_chunk_pages;
3906 	bool guarded = meta->zm_guarded;
3907 
3908 	assert(pg_start < pg_end && pg_end <= chunk_pages);
3909 
3910 	if (pg_start == 0) {
3911 		uint16_t chunk_len = (uint16_t)pg_end;
3912 		uint16_t secondary_len = ZM_SECONDARY_PAGE;
3913 		bool inline_bitmap = false;
3914 
3915 		if (zone->z_percpu) {
3916 			chunk_len = 1;
3917 			secondary_len = ZM_SECONDARY_PCPU_PAGE;
3918 			assert(pg_end == zpercpu_count());
3919 		}
3920 		if (!zone->z_permanent && !zone->z_uses_tags) {
3921 			inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
3922 		}
3923 
3924 		free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size;
3925 
3926 		meta[0] = (struct zone_page_metadata){
3927 			.zm_index         = zindex,
3928 			.zm_guarded       = guarded,
3929 			.zm_inline_bitmap = inline_bitmap,
3930 			.zm_chunk_len     = chunk_len,
3931 			.zm_alloc_size    = lock,
3932 		};
3933 
3934 		if (!zone->z_permanent && !inline_bitmap) {
3935 			meta[0].zm_bitmap = zone_meta_bits_alloc_init(free_end,
3936 			    zone->z_chunk_elems, zone->z_uses_tags);
3937 		}
3938 
3939 		for (uint16_t i = 1; i < chunk_pages; i++) {
3940 			meta[i] = (struct zone_page_metadata){
3941 				.zm_index          = zindex,
3942 				.zm_guarded        = guarded,
3943 				.zm_inline_bitmap  = inline_bitmap,
3944 				.zm_chunk_len      = secondary_len,
3945 				.zm_page_index     = (uint8_t)i,
3946 				.zm_bitmap         = meta[0].zm_bitmap,
3947 				.zm_subchunk_len   = (uint8_t)(chunk_pages - i),
3948 			};
3949 		}
3950 
3951 		if (inline_bitmap) {
3952 			zone_meta_bits_init_inline(meta, free_end);
3953 		}
3954 	} else {
3955 		assert(!zone->z_percpu && !zone->z_permanent);
3956 
3957 		free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size;
3958 		free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size;
3959 	}
3960 
3961 #if CONFIG_KERNEL_TAGGING
3962 	if (__probable(zone->z_tbi_tag)) {
3963 		zcram_memtag_init(zone, addr, free_end, free_start);
3964 	}
3965 #endif /* CONFIG_KERNEL_TAGGING */
3966 
3967 #if KASAN_CLASSIC
3968 	assert(pg_start == 0); /* KASAN_CLASSIC never does partial chunks */
3969 	if (zone->z_permanent) {
3970 		kasan_poison_range(addr, ptoa(pg_end), ASAN_VALID);
3971 	} else if (zone->z_percpu) {
3972 		for (uint32_t i = 0; i < pg_end; i++) {
3973 			kasan_zmem_add(addr + ptoa(i), PAGE_SIZE,
3974 			    zone_elem_outer_size(zone),
3975 			    zone_elem_outer_offs(zone),
3976 			    zone_elem_redzone(zone));
3977 		}
3978 	} else {
3979 		kasan_zmem_add(addr, ptoa(pg_end),
3980 		    zone_elem_outer_size(zone),
3981 		    zone_elem_outer_offs(zone),
3982 		    zone_elem_redzone(zone));
3983 	}
3984 #endif /* KASAN_CLASSIC */
3985 
3986 	/*
3987 	 * Insert the initialized pages / metadatas into the right lists.
3988 	 */
3989 
3990 	zone_lock(zone);
3991 	assert(zone->z_self == zone);
3992 
3993 	if (pg_start != 0) {
3994 		assert(meta->zm_chunk_len == pg_start);
3995 
3996 		zone_meta_bits_merge(meta, free_start, free_end);
3997 		meta->zm_chunk_len = (uint16_t)pg_end;
3998 
3999 		/*
4000 		 * consume the zone_meta_lock_in_partial()
4001 		 * done in zone_expand_locked()
4002 		 */
4003 		zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
4004 		zone_meta_remqueue(zone, meta);
4005 	}
4006 
4007 	if (zone->z_permanent || meta->zm_alloc_size) {
4008 		zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
4009 	} else {
4010 		zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
4011 		zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
4012 	}
4013 	if (pg_end < chunk_pages) {
4014 		/* push any non populated residual VA on z_pageq_va */
4015 		zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
4016 	}
4017 
4018 	zone->z_elems_free  += free_end - free_start;
4019 	zone->z_elems_avail += free_end - free_start;
4020 	zone->z_wired_cur   += zone->z_percpu ? 1 : pg_end - pg_start;
4021 	if (pg_va_new) {
4022 		zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
4023 	}
4024 	if (zone->z_wired_hwm < zone->z_wired_cur) {
4025 		zone->z_wired_hwm = zone->z_wired_cur;
4026 	}
4027 
4028 #if CONFIG_ZLEAKS
4029 	if (__improbable(zleak_should_enable_for_zone(zone) &&
4030 	    startup_phase >= STARTUP_SUB_THREAD_CALL)) {
4031 		thread_call_enter(&zone_leaks_callout);
4032 	}
4033 #endif /* CONFIG_ZLEAKS */
4034 
4035 	zone_add_wired_pages(zone, pg_end - pg_start);
4036 }
4037 
4038 static void
zcram(zone_t zone,vm_offset_t addr,uint32_t pages,uint16_t lock)4039 zcram(zone_t zone, vm_offset_t addr, uint32_t pages, uint16_t lock)
4040 {
4041 	uint32_t chunk_pages = zone->z_chunk_pages;
4042 
4043 	assert(pages % chunk_pages == 0);
4044 	for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
4045 		zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, lock);
4046 		zone_unlock(zone);
4047 	}
4048 }
4049 
4050 __startup_func
4051 void
zone_cram_early(zone_t zone,vm_offset_t newmem,vm_size_t size)4052 zone_cram_early(zone_t zone, vm_offset_t newmem, vm_size_t size)
4053 {
4054 	uint32_t pages = (uint32_t)atop(size);
4055 
4056 
4057 	assert(from_zone_map(newmem, size));
4058 	assert3u(size % ptoa(zone->z_chunk_pages), ==, 0);
4059 	assert3u(startup_phase, <, STARTUP_SUB_ZALLOC);
4060 
4061 	/*
4062 	 * The early pages we move at the pmap layer can't be "depopulated"
4063 	 * because there's no vm_page_t for them.
4064 	 *
4065 	 * "Lock" them so that they never hit z_pageq_empty.
4066 	 */
4067 	vm_memtag_bzero((void *)newmem, size);
4068 	zcram(zone, newmem, pages, ZM_ALLOC_SIZE_LOCK);
4069 }
4070 
4071 /*!
4072  * @function zone_submap_alloc_sequestered_va
4073  *
4074  * @brief
4075  * Allocates VA without using vm_find_space().
4076  *
4077  * @discussion
4078  * Allocate VA quickly without using the slower vm_find_space() for cases
4079  * when the submaps are fully sequestered.
4080  *
4081  * The VM submap is used to implement the VM itself so it is always sequestered,
4082  * as it can't kmem_alloc which needs to always allocate vm entries.
4083  * However, it can use vm_map_enter() which tries to coalesce entries, which
4084  * always works, so the VM map only ever needs 2 entries (one for each end).
4085  *
4086  * The RO submap is similarly always sequestered if it exists (as a non
4087  * sequestered RO submap makes very little sense).
4088  *
4089  * The allocator is a very simple bump-allocator
4090  * that allocates from either end.
4091  */
4092 static kern_return_t
zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags,uint32_t pages,vm_offset_t * addrp)4093 zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags, uint32_t pages,
4094     vm_offset_t *addrp)
4095 {
4096 	vm_size_t size = ptoa(pages);
4097 	vm_map_t map = zone_submap(zsflags);
4098 	vm_map_entry_t first, last;
4099 	vm_map_offset_t addr;
4100 
4101 	vm_map_lock(map);
4102 
4103 	first = vm_map_first_entry(map);
4104 	last = vm_map_last_entry(map);
4105 
4106 	if (first->vme_end + size > last->vme_start) {
4107 		vm_map_unlock(map);
4108 		return KERN_NO_SPACE;
4109 	}
4110 
4111 	if (zsflags.z_submap_from_end) {
4112 		last->vme_start -= size;
4113 		addr = last->vme_start;
4114 		VME_OFFSET_SET(last, addr);
4115 	} else {
4116 		addr = first->vme_end;
4117 		first->vme_end += size;
4118 	}
4119 	map->size += size;
4120 
4121 	vm_map_unlock(map);
4122 
4123 	*addrp = addr;
4124 	return KERN_SUCCESS;
4125 }
4126 
4127 void
zone_fill_initially(zone_t zone,vm_size_t nelems)4128 zone_fill_initially(zone_t zone, vm_size_t nelems)
4129 {
4130 	kma_flags_t kmaflags = KMA_NOFAIL | KMA_PERMANENT;
4131 	kern_return_t kr;
4132 	vm_offset_t addr;
4133 	uint32_t pages;
4134 	zone_security_flags_t zsflags = zone_security_config(zone);
4135 
4136 	assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
4137 	assert(zone->z_elems_avail == 0);
4138 
4139 	kmaflags |= zone_kma_flags(zone, zsflags, Z_WAITOK);
4140 	pages = zone_alloc_pages_for_nelems(zone, nelems);
4141 	if (zone_submap_is_sequestered(zsflags)) {
4142 		kr = zone_submap_alloc_sequestered_va(zsflags, pages, &addr);
4143 		if (kr != KERN_SUCCESS) {
4144 			panic("zone_submap_alloc_sequestered_va() "
4145 			    "of %u pages failed", pages);
4146 		}
4147 		kernel_memory_populate(addr, ptoa(pages),
4148 		    kmaflags, VM_KERN_MEMORY_ZONE);
4149 	} else {
4150 		assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4151 		kmem_alloc(zone_submap(zsflags), &addr, ptoa(pages),
4152 		    kmaflags, VM_KERN_MEMORY_ZONE);
4153 	}
4154 
4155 	zone_meta_populate(addr, ptoa(pages));
4156 	zcram(zone, addr, pages, 0);
4157 }
4158 
4159 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4160 __attribute__((noinline))
4161 static void
zone_scramble_va_and_unlock(zone_t z,struct zone_page_metadata * meta,uint32_t runs,uint32_t pages,uint32_t chunk_pages,uint64_t guard_mask)4162 zone_scramble_va_and_unlock(
4163 	zone_t                      z,
4164 	struct zone_page_metadata  *meta,
4165 	uint32_t                    runs,
4166 	uint32_t                    pages,
4167 	uint32_t                    chunk_pages,
4168 	uint64_t                    guard_mask)
4169 {
4170 	struct zone_page_metadata *arr[ZONE_CHUNK_ALLOC_SIZE / 4096];
4171 
4172 	for (uint32_t run = 0, n = 0; run < runs; run++) {
4173 		arr[run] = meta + n;
4174 		n += chunk_pages + ((guard_mask >> run) & 1);
4175 	}
4176 
4177 	/*
4178 	 * Fisher–Yates shuffle, for an array with indices [0, n)
4179 	 *
4180 	 * for i from n−1 downto 1 do
4181 	 *     j ← random integer such that 0 ≤ j ≤ i
4182 	 *     exchange a[j] and a[i]
4183 	 *
4184 	 * The point here is that early allocations aren't at a fixed
4185 	 * distance from each other.
4186 	 */
4187 	for (uint32_t i = runs - 1; i > 0; i--) {
4188 		uint32_t j = zalloc_random_uniform32(0, i + 1);
4189 
4190 		meta   = arr[j];
4191 		arr[j] = arr[i];
4192 		arr[i] = meta;
4193 	}
4194 
4195 	zone_lock(z);
4196 
4197 	for (uint32_t i = 0; i < runs; i++) {
4198 		zone_meta_queue_push(z, &z->z_pageq_va, arr[i]);
4199 	}
4200 	z->z_va_cur += z->z_percpu ? runs : pages;
4201 }
4202 
4203 static inline uint32_t
dist_u32(uint32_t a,uint32_t b)4204 dist_u32(uint32_t a, uint32_t b)
4205 {
4206 	return a < b ? b - a : a - b;
4207 }
4208 
4209 static uint64_t
zalloc_random_clear_n_bits(uint64_t mask,uint32_t pop,uint32_t n)4210 zalloc_random_clear_n_bits(uint64_t mask, uint32_t pop, uint32_t n)
4211 {
4212 	for (; n-- > 0; pop--) {
4213 		uint32_t bit = zalloc_random_uniform32(0, pop);
4214 		uint64_t m = mask;
4215 
4216 		for (; bit; bit--) {
4217 			m &= m - 1;
4218 		}
4219 
4220 		mask ^= 1ull << __builtin_ctzll(m);
4221 	}
4222 
4223 	return mask;
4224 }
4225 
4226 /**
4227  * @function zalloc_random_bits
4228  *
4229  * @brief
4230  * Compute a random number with a specified number of bit set in a given width.
4231  *
4232  * @discussion
4233  * This function generates a "uniform" distribution of sets of bits set in
4234  * a given width, with typically less than width/4 calls to random.
4235  *
4236  * @param pop           the target number of bits set.
4237  * @param width         the number of bits in the random integer to generate.
4238  */
4239 static uint64_t
zalloc_random_bits(uint32_t pop,uint32_t width)4240 zalloc_random_bits(uint32_t pop, uint32_t width)
4241 {
4242 	uint64_t w_mask = (1ull << width) - 1;
4243 	uint64_t mask;
4244 	uint32_t cur;
4245 
4246 	if (3 * width / 4 <= pop) {
4247 		mask = w_mask;
4248 		cur  = width;
4249 	} else if (pop <= width / 4) {
4250 		mask = 0;
4251 		cur  = 0;
4252 	} else {
4253 		/*
4254 		 * Chosing a random number this way will overwhelmingly
4255 		 * contain `width` bits +/- a few.
4256 		 */
4257 		mask = zalloc_random_mask64(width);
4258 		cur  = __builtin_popcountll(mask);
4259 
4260 		if (dist_u32(cur, pop) > dist_u32(width - cur, pop)) {
4261 			/*
4262 			 * If the opposite mask has a closer popcount,
4263 			 * then start with that one as the seed.
4264 			 */
4265 			cur = width - cur;
4266 			mask ^= w_mask;
4267 		}
4268 	}
4269 
4270 	if (cur < pop) {
4271 		/*
4272 		 * Setting `pop - cur` bits is really clearing that many from
4273 		 * the opposite mask.
4274 		 */
4275 		mask ^= w_mask;
4276 		mask = zalloc_random_clear_n_bits(mask, width - cur, pop - cur);
4277 		mask ^= w_mask;
4278 	} else if (pop < cur) {
4279 		mask = zalloc_random_clear_n_bits(mask, cur, cur - pop);
4280 	}
4281 
4282 	return mask;
4283 }
4284 #endif
4285 
4286 static void
zone_allocate_va_locked(zone_t z,zalloc_flags_t flags)4287 zone_allocate_va_locked(zone_t z, zalloc_flags_t flags)
4288 {
4289 	zone_security_flags_t zsflags = zone_security_config(z);
4290 	struct zone_page_metadata *meta;
4291 	kma_flags_t kmaflags = zone_kma_flags(z, zsflags, flags) | KMA_VAONLY;
4292 	uint32_t chunk_pages = z->z_chunk_pages;
4293 	uint32_t runs, pages, guards, rnum;
4294 	uint64_t guard_mask = 0;
4295 	bool     lead_guard = false;
4296 	kern_return_t kr;
4297 	vm_offset_t addr;
4298 
4299 	zone_unlock(z);
4300 
4301 	/*
4302 	 * A lot of OOB exploitation techniques rely on precise placement
4303 	 * and interleaving of zone pages. The layout that is sought
4304 	 * by attackers will be C/P/T types, where:
4305 	 * - (C)ompromised is the type for which attackers have a bug,
4306 	 * - (P)adding is used to pad memory,
4307 	 * - (T)arget is the type that the attacker will attempt to corrupt
4308 	 *   by exploiting (C).
4309 	 *
4310 	 * Note that in some cases C==T and P isn't needed.
4311 	 *
4312 	 * In order to make those placement games much harder,
4313 	 * we grow zones by random runs of memory, up to 256k.
4314 	 * This makes predicting the precise layout of the heap
4315 	 * quite more complicated.
4316 	 *
4317 	 * Note: this function makes a very heavy use of random,
4318 	 *       however, it is mostly limited to sequestered zones,
4319 	 *       and eventually the layout will be fixed,
4320 	 *       and the usage of random vastly reduced.
4321 	 *
4322 	 *       For non sequestered zones, there's a single call
4323 	 *       to random in order to decide whether we want
4324 	 *       a guard page or not.
4325 	 */
4326 	pages  = chunk_pages;
4327 	guards = 0;
4328 	runs   = 1;
4329 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4330 	if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4331 		pages = atop(ZONE_CHUNK_ALLOC_SIZE);
4332 		runs  = (pages + chunk_pages - 1) / chunk_pages;
4333 		runs  = zalloc_random_uniform32(1, runs + 1);
4334 		pages = runs * chunk_pages;
4335 	}
4336 	static_assert(ZONE_CHUNK_ALLOC_SIZE / 4096 <= 64,
4337 	    "make sure that `runs` will never be larger than 64");
4338 #endif /* !ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4339 
4340 	/*
4341 	 * Zones that are suceptible to OOB (kalloc, ZC_PGZ_USE_GUARDS),
4342 	 * guards might be added after each chunk.
4343 	 *
4344 	 * Those guard pages are marked with the ZM_PGZ_GUARD
4345 	 * magical chunk len, and their zm_oob_offs field
4346 	 * is used to remember optional shift applied
4347 	 * to returned elements, in order to right-align-them
4348 	 * as much as possible.
4349 	 *
4350 	 * In an adversarial context, while guard pages
4351 	 * are extremely effective against linear overflow,
4352 	 * using a predictable density of guard pages feels like
4353 	 * a missed opportunity. Which is why we chose to insert
4354 	 * one guard page for about 32k of memory, and place it
4355 	 * randomly.
4356 	 */
4357 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4358 	if (z->z_percpu) {
4359 		/*
4360 		 * For per-cpu runs, have a 75% chance to have a guard.
4361 		 */
4362 		rnum = zalloc_random_uniform32(0, 4 * 128);
4363 		guards = rnum >= 128;
4364 	} else if (!zsflags.z_pgz_use_guards && !z->z_pgz_use_guards) {
4365 		vm_offset_t rest;
4366 
4367 		/*
4368 		 * For types that are less susceptible to have OOBs,
4369 		 * have a density of 1 guard every 64k, with a uniform
4370 		 * distribution.
4371 		 */
4372 		rnum   = zalloc_random_uniform32(0, ZONE_GUARD_SPARSE);
4373 		guards = (uint32_t)ptoa(pages) / ZONE_GUARD_SPARSE;
4374 		rest   = (uint32_t)ptoa(pages) % ZONE_GUARD_SPARSE;
4375 		guards += rnum < rest;
4376 	} else if (ptoa(chunk_pages) >= ZONE_GUARD_DENSE) {
4377 		/*
4378 		 * For chunks >= 32k, have a 75% chance of guard pages
4379 		 * between chunks.
4380 		 */
4381 		rnum = zalloc_random_uniform32(65, 129);
4382 		guards = runs * rnum / 128;
4383 	} else {
4384 		vm_offset_t rest;
4385 
4386 		/*
4387 		 * Otherwise, aim at 1 guard every 32k,
4388 		 * with a uniform distribution.
4389 		 */
4390 		rnum   = zalloc_random_uniform32(0, ZONE_GUARD_DENSE);
4391 		guards = (uint32_t)ptoa(pages) / ZONE_GUARD_DENSE;
4392 		rest   = (uint32_t)ptoa(pages) % ZONE_GUARD_DENSE;
4393 		guards += rnum < rest;
4394 	}
4395 	assert3u(guards, <=, runs);
4396 
4397 	guard_mask = 0;
4398 
4399 	if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4400 		uint32_t g = 0;
4401 
4402 		/*
4403 		 * Several exploitation strategies rely on a C/T (compromised
4404 		 * then target types) ordering of pages with a sub-page reach
4405 		 * from C into T.
4406 		 *
4407 		 * We want to reliably thwart such exploitations
4408 		 * and hence force a guard page between alternating
4409 		 * memory types.
4410 		 */
4411 		guard_mask |= 1ull << (runs - 1);
4412 		g++;
4413 
4414 		/*
4415 		 * While we randomize the chunks lengths, an attacker with
4416 		 * precise timing control can guess when overflows happen,
4417 		 * and "measure" the runs, which gives them an indication
4418 		 * of where the next run start offset is.
4419 		 *
4420 		 * In order to make this knowledge unusable, add a guard page
4421 		 * _before_ the new run with a 25% probability, regardless
4422 		 * of whether we had enough guard pages.
4423 		 */
4424 		if ((rnum & 3) == 0) {
4425 			lead_guard = true;
4426 			g++;
4427 		}
4428 		if (guards > g) {
4429 			guard_mask |= zalloc_random_bits(guards - g, runs - 1);
4430 		} else {
4431 			guards = g;
4432 		}
4433 	} else {
4434 		assert3u(runs, ==, 1);
4435 		assert3u(guards, <=, 1);
4436 		guard_mask = guards << (runs - 1);
4437 	}
4438 #else
4439 	(void)rnum;
4440 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4441 
4442 	if (zone_submap_is_sequestered(zsflags)) {
4443 		kr = zone_submap_alloc_sequestered_va(zsflags,
4444 		    pages + guards, &addr);
4445 	} else {
4446 		assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4447 		kr = kmem_alloc(zone_submap(zsflags), &addr,
4448 		    ptoa(pages + guards), kmaflags, VM_KERN_MEMORY_ZONE);
4449 	}
4450 
4451 	if (kr != KERN_SUCCESS) {
4452 		uint64_t zone_size = 0;
4453 		zone_t zone_largest = zone_find_largest(&zone_size);
4454 		panic("zalloc[%d]: zone map exhausted while allocating from zone [%s%s], "
4455 		    "likely due to memory leak in zone [%s%s] "
4456 		    "(%u%c, %d elements allocated)",
4457 		    kr, zone_heap_name(z), zone_name(z),
4458 		    zone_heap_name(zone_largest), zone_name(zone_largest),
4459 		    mach_vm_size_pretty(zone_size),
4460 		    mach_vm_size_unit(zone_size),
4461 		    zone_count_allocated(zone_largest));
4462 	}
4463 
4464 	meta = zone_meta_from_addr(addr);
4465 	zone_meta_populate(addr, ptoa(pages + guards));
4466 
4467 	/*
4468 	 * Handle the leading guard page if any
4469 	 */
4470 	if (lead_guard) {
4471 		meta[0].zm_index = zone_index(z);
4472 		meta[0].zm_chunk_len = ZM_PGZ_GUARD;
4473 		meta[0].zm_guarded = true;
4474 		meta++;
4475 	}
4476 
4477 	for (uint32_t run = 0, n = 0; run < runs; run++) {
4478 		bool guarded = (guard_mask >> run) & 1;
4479 
4480 		for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4481 			meta[n].zm_index = zone_index(z);
4482 			meta[n].zm_guarded = guarded;
4483 		}
4484 		if (guarded) {
4485 			meta[n].zm_index = zone_index(z);
4486 			meta[n].zm_chunk_len = ZM_PGZ_GUARD;
4487 			n++;
4488 		}
4489 	}
4490 	if (guards) {
4491 		os_atomic_add(&zone_guard_pages, guards, relaxed);
4492 	}
4493 
4494 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4495 	if (__improbable(zone_caching_disabled < 0)) {
4496 		return zone_scramble_va_and_unlock(z, meta, runs, pages,
4497 		           chunk_pages, guard_mask);
4498 	}
4499 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4500 
4501 	zone_lock(z);
4502 
4503 	for (uint32_t run = 0, n = 0; run < runs; run++) {
4504 		zone_meta_queue_push(z, &z->z_pageq_va, meta + n);
4505 		n += chunk_pages + ((guard_mask >> run) & 1);
4506 	}
4507 	z->z_va_cur += z->z_percpu ? runs : pages;
4508 }
4509 
4510 static inline void
ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)4511 ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
4512 {
4513 #if DEBUG || DEVELOPMENT
4514 	VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START,
4515 	    size, 0, 0, 0);
4516 #else
4517 	(void)size;
4518 #endif
4519 }
4520 
4521 static inline void
ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)4522 ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
4523 {
4524 #if DEBUG || DEVELOPMENT
4525 	task_t task = current_task_early();
4526 	if (pages && task) {
4527 		ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, pages);
4528 	}
4529 	VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END,
4530 	    pages, 0, 0, 0);
4531 #else
4532 	(void)pages;
4533 #endif
4534 }
4535 
4536 __attribute__((noinline))
4537 static void
__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z,uint32_t pgs)4538 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z, uint32_t pgs)
4539 {
4540 	uint64_t wait_start = 0;
4541 	long mapped;
4542 
4543 	thread_wakeup(VM_PAGEOUT_GC_EVENT);
4544 
4545 	if (zone_supports_vm(z) || (current_thread()->options & TH_OPT_VMPRIV)) {
4546 		return;
4547 	}
4548 
4549 	mapped = os_atomic_load(&zone_pages_wired, relaxed);
4550 
4551 	/*
4552 	 * If the zone map is really exhausted, wait on the GC thread,
4553 	 * donating our priority (which is important because the GC
4554 	 * thread is at a rather low priority).
4555 	 */
4556 	for (uint32_t n = 1; mapped >= zone_pages_wired_max - pgs; n++) {
4557 		uint32_t wait_ms = n * (n + 1) / 2;
4558 		uint64_t interval;
4559 
4560 		if (n == 1) {
4561 			wait_start = mach_absolute_time();
4562 		} else {
4563 			thread_wakeup(VM_PAGEOUT_GC_EVENT);
4564 		}
4565 		if (zone_exhausted_timeout > 0 &&
4566 		    wait_ms > zone_exhausted_timeout) {
4567 			panic("zone map exhaustion: waited for %dms "
4568 			    "(pages: %ld, max: %ld, wanted: %d)",
4569 			    wait_ms, mapped, zone_pages_wired_max, pgs);
4570 		}
4571 
4572 		clock_interval_to_absolutetime_interval(wait_ms, NSEC_PER_MSEC,
4573 		    &interval);
4574 
4575 		lck_spin_lock(&zone_exhausted_lock);
4576 		lck_spin_sleep_with_inheritor(&zone_exhausted_lock,
4577 		    LCK_SLEEP_UNLOCK, &zone_pages_wired,
4578 		    vm_pageout_gc_thread, THREAD_UNINT, wait_start + interval);
4579 
4580 		mapped = os_atomic_load(&zone_pages_wired, relaxed);
4581 	}
4582 }
4583 
4584 static bool
zone_expand_wait_for_pages(bool waited)4585 zone_expand_wait_for_pages(bool waited)
4586 {
4587 	if (waited) {
4588 		return false;
4589 	}
4590 #if DEBUG || DEVELOPMENT
4591 	if (zalloc_simulate_vm_pressure) {
4592 		return false;
4593 	}
4594 #endif /* DEBUG || DEVELOPMENT */
4595 	return !vm_pool_low();
4596 }
4597 
4598 static inline void
zone_expand_async_schedule_if_allowed(zone_t zone)4599 zone_expand_async_schedule_if_allowed(zone_t zone)
4600 {
4601 	if (zone->z_async_refilling || zone->no_callout) {
4602 		return;
4603 	}
4604 
4605 	if (zone_exhausted(zone)) {
4606 		return;
4607 	}
4608 
4609 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
4610 		return;
4611 	}
4612 
4613 	if (!vm_pool_low() || zone_supports_vm(zone)) {
4614 		zone->z_async_refilling = true;
4615 		thread_call_enter(&zone_expand_callout);
4616 	}
4617 }
4618 
4619 __attribute__((noinline))
4620 static bool
zalloc_expand_drain_exhausted_caches_locked(zone_t z)4621 zalloc_expand_drain_exhausted_caches_locked(zone_t z)
4622 {
4623 	zone_magazine_t mag = NULL;
4624 
4625 	z->z_depot_size = 0;
4626 	z->z_depot_cleanup = true;
4627 
4628 	zpercpu_foreach(zc, z->z_pcpu_cache) {
4629 		uint32_t n;
4630 
4631 		assert(zone_cache_smr(zc) == NULL);
4632 		zone_depot_lock_nopreempt(zc);
4633 		n = zc->zc_depot.zd_full;
4634 		if (n) {
4635 			zone_recirc_lock_nopreempt(z);
4636 			zone_depot_move_full(&z->z_recirc,
4637 			    &zc->zc_depot, n, NULL);
4638 			zone_recirc_unlock_nopreempt(z);
4639 		}
4640 		zone_depot_unlock_nopreempt(zc);
4641 	}
4642 
4643 	zone_recirc_lock_nopreempt(z);
4644 	if (z->z_recirc.zd_full) {
4645 		mag = zone_depot_pop_head_full(&z->z_recirc, z);
4646 	}
4647 	zone_recirc_unlock_nopreempt(z);
4648 
4649 	if (mag) {
4650 		zone_reclaim_elements(z, zc_mag_size(), mag->zm_elems);
4651 		zone_magazine_free(mag);
4652 	}
4653 
4654 	return mag != NULL;
4655 }
4656 
4657 static bool
zalloc_needs_refill(zone_t zone,zalloc_flags_t flags)4658 zalloc_needs_refill(zone_t zone, zalloc_flags_t flags)
4659 {
4660 	if (zone->z_elems_free > zone->z_elems_rsv) {
4661 		return false;
4662 	}
4663 	if (!zone_exhausted(zone)) {
4664 		return true;
4665 	}
4666 	if (zone->z_pcpu_cache && zone->z_depot_size) {
4667 		if (zalloc_expand_drain_exhausted_caches_locked(zone)) {
4668 			return false;
4669 		}
4670 	}
4671 	return (flags & Z_NOFAIL) != 0;
4672 }
4673 
4674 __attribute__((noinline))
4675 static void
__ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)4676 __ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)
4677 {
4678 	z->z_exhausted_wait = true;
4679 	EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z);
4680 	assert_wait(&z->z_expander, TH_UNINT);
4681 	zone_unlock(z);
4682 	thread_block(THREAD_CONTINUE_NULL);
4683 	zone_lock(z);
4684 	z->z_exhausted_wait = false;
4685 }
4686 
4687 static void
zone_expand_locked(zone_t z,zalloc_flags_t flags)4688 zone_expand_locked(zone_t z, zalloc_flags_t flags)
4689 {
4690 	zone_security_flags_t zsflags = zone_security_config(z);
4691 	struct zone_expand ze = {
4692 		.ze_thread  = current_thread(),
4693 	};
4694 
4695 	if (!(ze.ze_thread->options & TH_OPT_VMPRIV) && zone_supports_vm(z)) {
4696 		ze.ze_thread->options |= TH_OPT_VMPRIV;
4697 		ze.ze_clear_priv = true;
4698 	}
4699 
4700 	if (ze.ze_thread->options & TH_OPT_VMPRIV) {
4701 		/*
4702 		 * When the thread is VM privileged,
4703 		 * vm_page_grab() will call VM_PAGE_WAIT()
4704 		 * without our knowledge, so we must assume
4705 		 * it's being called unfortunately.
4706 		 *
4707 		 * In practice it's not a big deal because
4708 		 * Z_NOPAGEWAIT is not really used on zones
4709 		 * that VM privileged threads are going to expand.
4710 		 */
4711 		ze.ze_pg_wait = true;
4712 		ze.ze_vm_priv = true;
4713 	}
4714 
4715 	for (;;) {
4716 		if (!z->z_permanent && !zalloc_needs_refill(z, flags)) {
4717 			goto out;
4718 		}
4719 
4720 		if (z->z_expander == NULL) {
4721 			z->z_expander = &ze;
4722 			break;
4723 		}
4724 
4725 		if (ze.ze_vm_priv && !z->z_expander->ze_vm_priv) {
4726 			change_sleep_inheritor(&z->z_expander, ze.ze_thread);
4727 			ze.ze_next = z->z_expander;
4728 			z->z_expander = &ze;
4729 			break;
4730 		}
4731 
4732 		if ((flags & Z_NOPAGEWAIT) && z->z_expander->ze_pg_wait) {
4733 			goto out;
4734 		}
4735 
4736 		z->z_expanding_wait = true;
4737 		hw_lck_ticket_sleep_with_inheritor(&z->z_lock, &zone_locks_grp,
4738 		    LCK_SLEEP_DEFAULT, &z->z_expander, z->z_expander->ze_thread,
4739 		    TH_UNINT, TIMEOUT_WAIT_FOREVER);
4740 	}
4741 
4742 	do {
4743 		struct zone_page_metadata *meta = NULL;
4744 		uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
4745 		vm_page_t page_list = NULL;
4746 		vm_offset_t addr = 0;
4747 		int waited = 0;
4748 
4749 		if ((flags & Z_NOFAIL) && zone_exhausted(z)) {
4750 			__ZONE_EXHAUSTED_AND_WAITING_HARD__(z);
4751 			continue; /* reevaluate if we really need it */
4752 		}
4753 
4754 		/*
4755 		 * While we hold the zone lock, look if there's VA we can:
4756 		 * - complete from partial pages,
4757 		 * - reuse from the sequester list.
4758 		 *
4759 		 * When the page is being populated we pretend we allocated
4760 		 * an extra element so that zone_gc() can't attempt to free
4761 		 * the chunk (as it could become empty while we wait for pages).
4762 		 */
4763 		if (zone_pva_is_null(z->z_pageq_va)) {
4764 			zone_allocate_va_locked(z, flags);
4765 		}
4766 
4767 		meta = zone_meta_queue_pop(z, &z->z_pageq_va);
4768 		addr = zone_meta_to_addr(meta);
4769 		if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
4770 			cur_pages = meta->zm_page_index;
4771 			meta -= cur_pages;
4772 			addr -= ptoa(cur_pages);
4773 			zone_meta_lock_in_partial(z, meta, cur_pages);
4774 		}
4775 		zone_unlock(z);
4776 
4777 		/*
4778 		 * And now allocate pages to populate our VA.
4779 		 */
4780 		min_pages = z->z_chunk_pages;
4781 #if !KASAN_CLASSIC
4782 		if (!z->z_percpu) {
4783 			min_pages = (uint32_t)atop(round_page(zone_elem_outer_offs(z) +
4784 			    zone_elem_outer_size(z)));
4785 		}
4786 #endif /* !KASAN_CLASSIC */
4787 
4788 		/*
4789 		 * Trigger jetsams via VM_PAGEOUT_GC_EVENT
4790 		 * if we're running out of zone memory
4791 		 */
4792 		if (__improbable(zone_map_nearing_exhaustion())) {
4793 			__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(z, min_pages);
4794 		}
4795 
4796 		ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
4797 
4798 		while (pages < z->z_chunk_pages - cur_pages) {
4799 			vm_page_t m = vm_page_grab();
4800 
4801 			if (m) {
4802 				pages++;
4803 				m->vmp_snext = page_list;
4804 				page_list = m;
4805 				vm_page_zero_fill(m);
4806 				continue;
4807 			}
4808 
4809 			if (pages >= min_pages &&
4810 			    !zone_expand_wait_for_pages(waited)) {
4811 				break;
4812 			}
4813 
4814 			if ((flags & Z_NOPAGEWAIT) == 0) {
4815 				/*
4816 				 * The first time we're about to wait for pages,
4817 				 * mention that to waiters and wake them all.
4818 				 *
4819 				 * Set `ze_pg_wait` in our zone_expand context
4820 				 * so that waiters who care do not wait again.
4821 				 */
4822 				if (!ze.ze_pg_wait) {
4823 					zone_lock(z);
4824 					if (z->z_expanding_wait) {
4825 						z->z_expanding_wait = false;
4826 						wakeup_all_with_inheritor(&z->z_expander,
4827 						    THREAD_AWAKENED);
4828 					}
4829 					ze.ze_pg_wait = true;
4830 					zone_unlock(z);
4831 				}
4832 
4833 				waited++;
4834 				VM_PAGE_WAIT();
4835 				continue;
4836 			}
4837 
4838 			/*
4839 			 * Undo everything and bail out:
4840 			 *
4841 			 * - free pages
4842 			 * - undo the fake allocation if any
4843 			 * - put the VA back on the VA page queue.
4844 			 */
4845 			vm_page_free_list(page_list, FALSE);
4846 			ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4847 
4848 			zone_lock(z);
4849 
4850 			zone_expand_async_schedule_if_allowed(z);
4851 
4852 			if (cur_pages) {
4853 				zone_meta_unlock_from_partial(z, meta, cur_pages);
4854 			}
4855 			if (meta) {
4856 				zone_meta_queue_push(z, &z->z_pageq_va,
4857 				    meta + cur_pages);
4858 			}
4859 			goto page_shortage;
4860 		}
4861 
4862 		vm_object_t object;
4863 		object = kernel_object_default;
4864 		vm_object_lock(object);
4865 		kernel_memory_populate_object_and_unlock(object,
4866 		    addr + ptoa(cur_pages), addr + ptoa(cur_pages), ptoa(pages), page_list,
4867 		    zone_kma_flags(z, zsflags, flags), VM_KERN_MEMORY_ZONE,
4868 		    (zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY)
4869 		    ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE);
4870 
4871 		ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4872 
4873 		zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages, 0);
4874 
4875 		/*
4876 		 * permanent zones only try once,
4877 		 * the retry loop is in the caller
4878 		 */
4879 	} while (!z->z_permanent && zalloc_needs_refill(z, flags));
4880 
4881 page_shortage:
4882 	if (z->z_expander == &ze) {
4883 		z->z_expander = ze.ze_next;
4884 	} else {
4885 		assert(z->z_expander->ze_next == &ze);
4886 		z->z_expander->ze_next = NULL;
4887 	}
4888 	if (z->z_expanding_wait) {
4889 		z->z_expanding_wait = false;
4890 		wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
4891 	}
4892 out:
4893 	if (ze.ze_clear_priv) {
4894 		ze.ze_thread->options &= ~TH_OPT_VMPRIV;
4895 	}
4896 }
4897 
4898 static void
zone_expand_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)4899 zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
4900 {
4901 	zone_foreach(z) {
4902 		if (z->no_callout) {
4903 			/* z_async_refilling will never be set */
4904 			continue;
4905 		}
4906 
4907 		if (!z->z_async_refilling) {
4908 			/*
4909 			 * avoid locking all zones, because the one(s)
4910 			 * we're looking for have been set _before_
4911 			 * thread_call_enter() was called, if we fail
4912 			 * to observe the bit, it means the thread-call
4913 			 * has been "dinged" again and we'll notice it then.
4914 			 */
4915 			continue;
4916 		}
4917 
4918 		zone_lock(z);
4919 		if (z->z_self && z->z_async_refilling) {
4920 			zone_expand_locked(z, Z_WAITOK);
4921 			/*
4922 			 * clearing _after_ we grow is important,
4923 			 * so that we avoid waking up the thread call
4924 			 * while we grow and cause to run a second time.
4925 			 */
4926 			z->z_async_refilling = false;
4927 		}
4928 		zone_unlock(z);
4929 	}
4930 }
4931 
4932 #endif /* !ZALLOC_TEST */
4933 #pragma mark zone jetsam integration
4934 #if !ZALLOC_TEST
4935 
4936 /*
4937  * We're being very conservative here and picking a value of 95%. We might need to lower this if
4938  * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
4939  */
4940 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
4941 
4942 /*
4943  * Threshold above which largest zones should be included in the panic log
4944  */
4945 #define ZONE_MAP_EXHAUSTION_PRINT_PANIC 80
4946 
4947 /*
4948  * Trigger zone-map-exhaustion jetsams if the zone map is X% full,
4949  * where X=zone_map_jetsam_limit.
4950  *
4951  * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
4952  */
4953 TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
4954     ZONE_MAP_JETSAM_LIMIT_DEFAULT);
4955 
4956 kern_return_t
zone_map_jetsam_set_limit(uint32_t value)4957 zone_map_jetsam_set_limit(uint32_t value)
4958 {
4959 	if (value <= 0 || value > 100) {
4960 		return KERN_INVALID_VALUE;
4961 	}
4962 
4963 	zone_map_jetsam_limit = value;
4964 	os_atomic_store(&zone_pages_jetsam_threshold,
4965 	    zone_pages_wired_max * value / 100, relaxed);
4966 	return KERN_SUCCESS;
4967 }
4968 
4969 void
get_zone_map_size(uint64_t * current_size,uint64_t * capacity)4970 get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
4971 {
4972 	vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
4973 	*current_size = ptoa_64(phys_pages);
4974 	*capacity = ptoa_64(zone_pages_wired_max);
4975 }
4976 
4977 void
get_largest_zone_info(char * zone_name,size_t zone_name_len,uint64_t * zone_size)4978 get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
4979 {
4980 	zone_t largest_zone = zone_find_largest(zone_size);
4981 
4982 	/*
4983 	 * Append kalloc heap name to zone name (if zone is used by kalloc)
4984 	 */
4985 	snprintf(zone_name, zone_name_len, "%s%s",
4986 	    zone_heap_name(largest_zone), largest_zone->z_name);
4987 }
4988 
4989 static bool
zone_map_nearing_threshold(unsigned int threshold)4990 zone_map_nearing_threshold(unsigned int threshold)
4991 {
4992 	uint64_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
4993 	return phys_pages * 100 > zone_pages_wired_max * threshold;
4994 }
4995 
4996 bool
zone_map_nearing_exhaustion(void)4997 zone_map_nearing_exhaustion(void)
4998 {
4999 	vm_size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
5000 
5001 	return pages >= os_atomic_load(&zone_pages_jetsam_threshold, relaxed);
5002 }
5003 
5004 
5005 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
5006 
5007 /*
5008  * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
5009  * to walk through the jetsam priority bands and kill processes.
5010  */
5011 static zone_t
kill_process_in_largest_zone(void)5012 kill_process_in_largest_zone(void)
5013 {
5014 	pid_t pid = -1;
5015 	uint64_t zone_size = 0;
5016 	zone_t largest_zone = zone_find_largest(&zone_size);
5017 
5018 	printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
5019 	    ptoa_64(os_atomic_load(&zone_pages_wired, relaxed)),
5020 	    ptoa_64(zone_pages_wired_max),
5021 	    (uint64_t)zone_submaps_approx_size(),
5022 	    (uint64_t)mach_vm_range_size(&zone_info.zi_map_range),
5023 	    zone_map_jetsam_limit);
5024 	printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
5025 	    largest_zone->z_name, (uintptr_t)zone_size);
5026 
5027 	/*
5028 	 * We want to make sure we don't call this function from userspace.
5029 	 * Or we could end up trying to synchronously kill the process
5030 	 * whose context we're in, causing the system to hang.
5031 	 */
5032 	assert(current_task() == kernel_task);
5033 
5034 	/*
5035 	 * If vm_object_zone is the largest, check to see if the number of
5036 	 * elements in vm_map_entry_zone is comparable.
5037 	 *
5038 	 * If so, consider vm_map_entry_zone as the largest. This lets us target
5039 	 * a specific process to jetsam to quickly recover from the zone map
5040 	 * bloat.
5041 	 */
5042 	if (largest_zone == vm_object_zone) {
5043 		unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
5044 		unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
5045 		/* Is the VM map entries zone count >= 98% of the VM objects zone count? */
5046 		if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
5047 			largest_zone = vm_map_entry_zone;
5048 			printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
5049 			    (uintptr_t)zone_size_wired(largest_zone));
5050 		}
5051 	}
5052 
5053 	/* TODO: Extend this to check for the largest process in other zones as well. */
5054 	if (largest_zone == vm_map_entry_zone) {
5055 		pid = find_largest_process_vm_map_entries();
5056 	} else {
5057 		printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
5058 		    "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
5059 		    largest_zone->z_name);
5060 	}
5061 	if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
5062 		printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
5063 	}
5064 
5065 	return largest_zone;
5066 }
5067 
5068 #endif /* !ZALLOC_TEST */
5069 #pragma mark probabilistic gzalloc
5070 #if !ZALLOC_TEST
5071 #if CONFIG_PROB_GZALLOC
5072 
5073 extern uint32_t random(void);
5074 struct pgz_backtrace {
5075 	uint32_t  pgz_depth;
5076 	int32_t   pgz_bt[MAX_ZTRACE_DEPTH];
5077 };
5078 
5079 static int32_t  PERCPU_DATA(pgz_sample_counter);
5080 static SECURITY_READ_ONLY_LATE(struct pgz_backtrace *) pgz_backtraces;
5081 static uint32_t pgz_uses;       /* number of zones using PGZ */
5082 static int32_t  pgz_slot_avail;
5083 #if OS_ATOMIC_HAS_LLSC
5084 struct zone_page_metadata *pgz_slot_head;
5085 #else
5086 static struct pgz_slot_head {
5087 	uint32_t psh_count;
5088 	uint32_t psh_slot;
5089 } pgz_slot_head;
5090 #endif
5091 struct zone_page_metadata *pgz_slot_tail;
5092 static SECURITY_READ_ONLY_LATE(vm_map_t) pgz_submap;
5093 
5094 static struct zone_page_metadata *
pgz_meta(uint32_t index)5095 pgz_meta(uint32_t index)
5096 {
5097 	return &zone_info.zi_pgz_meta[2 * index + 1];
5098 }
5099 
5100 static struct pgz_backtrace *
pgz_bt(uint32_t slot,bool free)5101 pgz_bt(uint32_t slot, bool free)
5102 {
5103 	return &pgz_backtraces[2 * slot + free];
5104 }
5105 
5106 static void
pgz_backtrace(struct pgz_backtrace * bt,void * fp)5107 pgz_backtrace(struct pgz_backtrace *bt, void *fp)
5108 {
5109 	struct backtrace_control ctl = {
5110 		.btc_frame_addr = (uintptr_t)fp,
5111 	};
5112 
5113 	bt->pgz_depth = (uint32_t)backtrace_packed(BTP_KERN_OFFSET_32,
5114 	    (uint8_t *)bt->pgz_bt, sizeof(bt->pgz_bt), &ctl, NULL) / 4;
5115 }
5116 
5117 static uint32_t
pgz_slot(vm_offset_t addr)5118 pgz_slot(vm_offset_t addr)
5119 {
5120 	return (uint32_t)((addr - zone_info.zi_pgz_range.min_address) >> (PAGE_SHIFT + 1));
5121 }
5122 
5123 static vm_offset_t
pgz_addr(uint32_t slot)5124 pgz_addr(uint32_t slot)
5125 {
5126 	return zone_info.zi_pgz_range.min_address + ptoa(2 * slot + 1);
5127 }
5128 
5129 static bool
pgz_sample(vm_offset_t addr,vm_size_t esize)5130 pgz_sample(vm_offset_t addr, vm_size_t esize)
5131 {
5132 	int32_t *counterp, cnt;
5133 
5134 	if (zone_addr_size_crosses_page(addr, esize)) {
5135 		return false;
5136 	}
5137 
5138 	/*
5139 	 * Note: accessing pgz_sample_counter is racy but this is
5140 	 *       kind of acceptable given that this is not
5141 	 *       a security load bearing feature.
5142 	 */
5143 
5144 	counterp = PERCPU_GET(pgz_sample_counter);
5145 	cnt = *counterp;
5146 	if (__probable(cnt > 0)) {
5147 		*counterp = cnt - 1;
5148 		return false;
5149 	}
5150 
5151 	if (pgz_slot_avail <= 0) {
5152 		return false;
5153 	}
5154 
5155 	/*
5156 	 * zalloc_random_uniform() might block, so when preemption is disabled,
5157 	 * set the counter to `-1` which will cause the next allocation
5158 	 * that can block to generate a new random value.
5159 	 *
5160 	 * No allocation on this CPU will sample until then.
5161 	 */
5162 	if (get_preemption_level()) {
5163 		*counterp = -1;
5164 	} else {
5165 		*counterp = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5166 	}
5167 
5168 	return cnt == 0;
5169 }
5170 
5171 static inline bool
pgz_slot_alloc(uint32_t * slot)5172 pgz_slot_alloc(uint32_t *slot)
5173 {
5174 	struct zone_page_metadata *m;
5175 	uint32_t tries = 100;
5176 
5177 	disable_preemption();
5178 
5179 #if OS_ATOMIC_USE_LLSC
5180 	int32_t ov, nv;
5181 	os_atomic_rmw_loop(&pgz_slot_avail, ov, nv, relaxed, {
5182 		if (__improbable(ov <= 0)) {
5183 		        os_atomic_rmw_loop_give_up({
5184 				enable_preemption();
5185 				return false;
5186 			});
5187 		}
5188 		nv = ov - 1;
5189 	});
5190 #else
5191 	if (__improbable(os_atomic_dec_orig(&pgz_slot_avail, relaxed) <= 0)) {
5192 		os_atomic_inc(&pgz_slot_avail, relaxed);
5193 		enable_preemption();
5194 		return false;
5195 	}
5196 #endif
5197 
5198 again:
5199 	if (__improbable(tries-- == 0)) {
5200 		/*
5201 		 * Too much contention,
5202 		 * extremely unlikely but do not stay stuck.
5203 		 */
5204 		os_atomic_inc(&pgz_slot_avail, relaxed);
5205 		enable_preemption();
5206 		return false;
5207 	}
5208 
5209 #if OS_ATOMIC_HAS_LLSC
5210 	do {
5211 		m = os_atomic_load_exclusive(&pgz_slot_head, dependency);
5212 		if (__improbable(m->zm_pgz_slot_next == NULL)) {
5213 			/*
5214 			 * Either we are waiting for an enqueuer (unlikely)
5215 			 * or we are competing with another core and
5216 			 * are looking at a popped element.
5217 			 */
5218 			os_atomic_clear_exclusive();
5219 			goto again;
5220 		}
5221 	} while (!os_atomic_store_exclusive(&pgz_slot_head,
5222 	    m->zm_pgz_slot_next, relaxed));
5223 #else
5224 	struct zone_page_metadata *base = zone_info.zi_pgz_meta;
5225 	struct pgz_slot_head ov, nv;
5226 	os_atomic_rmw_loop(&pgz_slot_head, ov, nv, dependency, {
5227 		m = &base[ov.psh_slot * 2];
5228 		if (__improbable(m->zm_pgz_slot_next == NULL)) {
5229 		        /*
5230 		         * Either we are waiting for an enqueuer (unlikely)
5231 		         * or we are competing with another core and
5232 		         * are looking at a popped element.
5233 		         */
5234 		        os_atomic_rmw_loop_give_up(goto again);
5235 		}
5236 		nv.psh_count = ov.psh_count + 1;
5237 		nv.psh_slot  = (uint32_t)((m->zm_pgz_slot_next - base) / 2);
5238 	});
5239 #endif
5240 
5241 	enable_preemption();
5242 
5243 	m->zm_pgz_slot_next = NULL;
5244 	*slot = (uint32_t)((m - zone_info.zi_pgz_meta) / 2);
5245 	return true;
5246 }
5247 
5248 static inline bool
pgz_slot_free(uint32_t slot)5249 pgz_slot_free(uint32_t slot)
5250 {
5251 	struct zone_page_metadata *m = &zone_info.zi_pgz_meta[2 * slot];
5252 	struct zone_page_metadata *t;
5253 
5254 	disable_preemption();
5255 	t = os_atomic_xchg(&pgz_slot_tail, m, relaxed);
5256 	os_atomic_store(&t->zm_pgz_slot_next, m, release);
5257 	os_atomic_inc(&pgz_slot_avail, relaxed);
5258 	enable_preemption();
5259 
5260 	return true;
5261 }
5262 
5263 /*!
5264  * @function pgz_protect()
5265  *
5266  * @brief
5267  * Try to protect an allocation with PGZ.
5268  *
5269  * @param zone          The zone the allocation was made against.
5270  * @param addr          An allocated element address to protect.
5271  * @param fp            The caller frame pointer (for the backtrace).
5272  * @returns             The new address for the element, or @c addr.
5273  */
5274 __attribute__((noinline))
5275 static vm_offset_t
pgz_protect(zone_t zone,vm_offset_t addr,void * fp)5276 pgz_protect(zone_t zone, vm_offset_t addr, void *fp)
5277 {
5278 	kern_return_t kr;
5279 	uint32_t slot;
5280 
5281 	if (!pgz_slot_alloc(&slot)) {
5282 		return addr;
5283 	}
5284 
5285 	/*
5286 	 * Try to double-map the page (may fail if Z_NOWAIT).
5287 	 * we will always find a PA because pgz_init() pre-expanded the pmap.
5288 	 */
5289 	vm_offset_t  new_addr = pgz_addr(slot);
5290 	pmap_paddr_t pa = kvtophys(trunc_page(addr));
5291 
5292 	kr = pmap_enter_options_addr(kernel_pmap, new_addr, pa,
5293 	    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
5294 	    get_preemption_level() ? PMAP_OPTIONS_NOWAIT : 0, NULL);
5295 
5296 	if (__improbable(kr != KERN_SUCCESS)) {
5297 		pgz_slot_free(slot);
5298 		return addr;
5299 	}
5300 
5301 	struct zone_page_metadata tmp = {
5302 		.zm_chunk_len = ZM_PGZ_ALLOCATED,
5303 		.zm_index     = zone_index(zone),
5304 	};
5305 	struct zone_page_metadata *meta = pgz_meta(slot);
5306 
5307 	os_atomic_store(&meta->zm_bits, tmp.zm_bits, relaxed);
5308 	os_atomic_store(&meta->zm_pgz_orig_addr, addr, relaxed);
5309 	pgz_backtrace(pgz_bt(slot, false), fp);
5310 
5311 	return new_addr + (addr & PAGE_MASK);
5312 }
5313 
5314 /*!
5315  * @function pgz_unprotect()
5316  *
5317  * @brief
5318  * Release a PGZ slot and returns the original address of a freed element.
5319  *
5320  * @param addr          A PGZ protected element address.
5321  * @param fp            The caller frame pointer (for the backtrace).
5322  * @returns             The non protected address for the element
5323  *                      that was passed to @c pgz_protect().
5324  */
5325 __attribute__((noinline))
5326 static vm_offset_t
pgz_unprotect(vm_offset_t addr,void * fp)5327 pgz_unprotect(vm_offset_t addr, void *fp)
5328 {
5329 	struct zone_page_metadata *meta;
5330 	struct zone_page_metadata tmp;
5331 	uint32_t slot;
5332 
5333 	slot = pgz_slot(addr);
5334 	meta = zone_meta_from_addr(addr);
5335 	tmp  = *meta;
5336 	if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5337 		goto double_free;
5338 	}
5339 
5340 	pmap_remove(kernel_pmap, vm_memtag_canonicalize_address(trunc_page(addr)),
5341 	    vm_memtag_canonicalize_address(trunc_page(addr) + PAGE_SIZE));
5342 
5343 	pgz_backtrace(pgz_bt(slot, true), fp);
5344 
5345 	tmp.zm_chunk_len = ZM_PGZ_FREE;
5346 	tmp.zm_bits = os_atomic_xchg(&meta->zm_bits, tmp.zm_bits, relaxed);
5347 	if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5348 		goto double_free;
5349 	}
5350 
5351 	pgz_slot_free(slot);
5352 	return tmp.zm_pgz_orig_addr;
5353 
5354 double_free:
5355 	panic_fault_address = addr;
5356 	meta->zm_chunk_len = ZM_PGZ_DOUBLE_FREE;
5357 	panic("probabilistic gzalloc double free: %p", (void *)addr);
5358 }
5359 
5360 bool
pgz_owned(mach_vm_address_t addr)5361 pgz_owned(mach_vm_address_t addr)
5362 {
5363 	return mach_vm_range_contains(&zone_info.zi_pgz_range, vm_memtag_canonicalize_address(addr));
5364 }
5365 
5366 
5367 __attribute__((always_inline))
5368 vm_offset_t
__pgz_decode(mach_vm_address_t addr,mach_vm_size_t size)5369 __pgz_decode(mach_vm_address_t addr, mach_vm_size_t size)
5370 {
5371 	struct zone_page_metadata *meta;
5372 
5373 	if (__probable(!pgz_owned(addr))) {
5374 		return (vm_offset_t)addr;
5375 	}
5376 
5377 	if (zone_addr_size_crosses_page(addr, size)) {
5378 		panic("invalid size for PGZ protected address %p:%p",
5379 		    (void *)addr, (void *)(addr + size));
5380 	}
5381 
5382 	meta = zone_meta_from_addr((vm_offset_t)addr);
5383 	if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5384 		panic_fault_address = (vm_offset_t)addr;
5385 		panic("probabilistic gzalloc use-after-free: %p", (void *)addr);
5386 	}
5387 
5388 	return trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5389 }
5390 
5391 __attribute__((always_inline))
5392 vm_offset_t
__pgz_decode_allow_invalid(vm_offset_t addr,zone_id_t zid)5393 __pgz_decode_allow_invalid(vm_offset_t addr, zone_id_t zid)
5394 {
5395 	struct zone_page_metadata *meta;
5396 	struct zone_page_metadata tmp;
5397 
5398 	if (__probable(!pgz_owned(addr))) {
5399 		return addr;
5400 	}
5401 
5402 	meta = zone_meta_from_addr(addr);
5403 	tmp.zm_bits = os_atomic_load(&meta->zm_bits, relaxed);
5404 
5405 	addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5406 
5407 	if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5408 		return 0;
5409 	}
5410 
5411 	if (zid != ZONE_ID_ANY && tmp.zm_index != zid) {
5412 		return 0;
5413 	}
5414 
5415 	return addr;
5416 }
5417 
5418 static void
pgz_zone_init(zone_t z)5419 pgz_zone_init(zone_t z)
5420 {
5421 	char zn[MAX_ZONE_NAME];
5422 	char zv[MAX_ZONE_NAME];
5423 	char key[30];
5424 
5425 	if (zone_elem_inner_size(z) > PAGE_SIZE) {
5426 		return;
5427 	}
5428 
5429 	if (pgz_all) {
5430 		os_atomic_inc(&pgz_uses, relaxed);
5431 		z->z_pgz_tracked = true;
5432 		return;
5433 	}
5434 
5435 	snprintf(zn, sizeof(zn), "%s%s", zone_heap_name(z), zone_name(z));
5436 
5437 	for (int i = 1;; i++) {
5438 		snprintf(key, sizeof(key), "pgz%d", i);
5439 		if (!PE_parse_boot_argn(key, zv, sizeof(zv))) {
5440 			break;
5441 		}
5442 		if (track_this_zone(zn, zv) || track_kalloc_zones(z, zv)) {
5443 			os_atomic_inc(&pgz_uses, relaxed);
5444 			z->z_pgz_tracked = true;
5445 			break;
5446 		}
5447 	}
5448 }
5449 
5450 __startup_func
5451 static vm_size_t
pgz_get_size(void)5452 pgz_get_size(void)
5453 {
5454 	if (pgz_slots == UINT32_MAX) {
5455 		/*
5456 		 * Scale with RAM size: ~200 slots a G
5457 		 */
5458 		pgz_slots = (uint32_t)(sane_size >> 22);
5459 	}
5460 
5461 	/*
5462 	 * Make sure that the slot allocation scheme works.
5463 	 * see pgz_slot_alloc() / pgz_slot_free();
5464 	 */
5465 	if (pgz_slots < zpercpu_count() * 4) {
5466 		pgz_slots = zpercpu_count() * 4;
5467 	}
5468 	if (pgz_slots >= UINT16_MAX) {
5469 		pgz_slots = UINT16_MAX - 1;
5470 	}
5471 
5472 	/*
5473 	 * Quarantine is 33% of slots by default, no more than 90%.
5474 	 */
5475 	if (pgz_quarantine == 0) {
5476 		pgz_quarantine = pgz_slots / 3;
5477 	}
5478 	if (pgz_quarantine > pgz_slots * 9 / 10) {
5479 		pgz_quarantine = pgz_slots * 9 / 10;
5480 	}
5481 	pgz_slot_avail = pgz_slots - pgz_quarantine;
5482 
5483 	return ptoa(2 * pgz_slots + 1);
5484 }
5485 
5486 __startup_func
5487 static void
pgz_init(void)5488 pgz_init(void)
5489 {
5490 	if (!pgz_uses) {
5491 		return;
5492 	}
5493 
5494 	if (pgz_sample_rate == 0) {
5495 		/*
5496 		 * If no rate was provided, pick a random one that scales
5497 		 * with the number of protected zones.
5498 		 *
5499 		 * Use a binomal distribution to avoid having too many
5500 		 * really fast sample rates.
5501 		 */
5502 		uint32_t factor = MIN(pgz_uses, 10);
5503 		uint32_t max_rate = 1000 * factor;
5504 		uint32_t min_rate =  100 * factor;
5505 
5506 		pgz_sample_rate = (zalloc_random_uniform32(min_rate, max_rate) +
5507 		    zalloc_random_uniform32(min_rate, max_rate)) / 2;
5508 	}
5509 
5510 	struct mach_vm_range *r = &zone_info.zi_pgz_range;
5511 	zone_info.zi_pgz_meta = zone_meta_from_addr(r->min_address);
5512 	zone_meta_populate(r->min_address, mach_vm_range_size(r));
5513 
5514 	for (size_t i = 0; i < 2 * pgz_slots + 1; i += 2) {
5515 		zone_info.zi_pgz_meta[i].zm_chunk_len = ZM_PGZ_GUARD;
5516 	}
5517 
5518 	for (size_t i = 1; i < pgz_slots; i++) {
5519 		zone_info.zi_pgz_meta[2 * i - 1].zm_pgz_slot_next =
5520 		    &zone_info.zi_pgz_meta[2 * i + 1];
5521 	}
5522 #if OS_ATOMIC_HAS_LLSC
5523 	pgz_slot_head = &zone_info.zi_pgz_meta[1];
5524 #endif
5525 	pgz_slot_tail = &zone_info.zi_pgz_meta[2 * pgz_slots - 1];
5526 
5527 	pgz_backtraces = zalloc_permanent(sizeof(struct pgz_backtrace) *
5528 	    2 * pgz_slots, ZALIGN_PTR);
5529 
5530 	/*
5531 	 * expand the pmap so that pmap_enter_options_addr()
5532 	 * in pgz_protect() never need to call pmap_expand().
5533 	 */
5534 	for (uint32_t slot = 0; slot < pgz_slots; slot++) {
5535 		(void)pmap_enter_options_addr(kernel_pmap, pgz_addr(slot), 0,
5536 		    VM_PROT_NONE, VM_PROT_NONE, 0, FALSE,
5537 		    PMAP_OPTIONS_NOENTER, NULL);
5538 	}
5539 
5540 	/* do this last as this will enable pgz */
5541 	percpu_foreach(counter, pgz_sample_counter) {
5542 		*counter = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5543 	}
5544 }
5545 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, pgz_init);
5546 
5547 static void
panic_display_pgz_bt(bool has_syms,uint32_t slot,bool free)5548 panic_display_pgz_bt(bool has_syms, uint32_t slot, bool free)
5549 {
5550 	struct pgz_backtrace *bt = pgz_bt(slot, free);
5551 	const char *what = free ? "Free" : "Allocation";
5552 	uintptr_t buf[MAX_ZTRACE_DEPTH];
5553 
5554 	if (!ml_validate_nofault((vm_offset_t)bt, sizeof(*bt))) {
5555 		paniclog_append_noflush("  Can't decode %s Backtrace\n", what);
5556 		return;
5557 	}
5558 
5559 	backtrace_unpack(BTP_KERN_OFFSET_32, buf, MAX_ZTRACE_DEPTH,
5560 	    (uint8_t *)bt->pgz_bt, 4 * bt->pgz_depth);
5561 
5562 	paniclog_append_noflush("  %s Backtrace:\n", what);
5563 	for (uint32_t i = 0; i < bt->pgz_depth && i < MAX_ZTRACE_DEPTH; i++) {
5564 		if (has_syms) {
5565 			paniclog_append_noflush("    %p ", (void *)buf[i]);
5566 			panic_print_symbol_name(buf[i]);
5567 			paniclog_append_noflush("\n");
5568 		} else {
5569 			paniclog_append_noflush("    %p\n", (void *)buf[i]);
5570 		}
5571 	}
5572 	kmod_panic_dump((vm_offset_t *)buf, bt->pgz_depth);
5573 }
5574 
5575 static void
panic_display_pgz_uaf_info(bool has_syms,vm_offset_t addr)5576 panic_display_pgz_uaf_info(bool has_syms, vm_offset_t addr)
5577 {
5578 	struct zone_page_metadata *meta;
5579 	vm_offset_t elem, esize;
5580 	const char *type;
5581 	const char *prob;
5582 	uint32_t slot;
5583 	zone_t z;
5584 
5585 	slot = pgz_slot(addr);
5586 	meta = pgz_meta(slot);
5587 	elem = pgz_addr(slot) + (meta->zm_pgz_orig_addr & PAGE_MASK);
5588 
5589 	paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
5590 
5591 	if (ml_validate_nofault((vm_offset_t)meta, sizeof(*meta)) &&
5592 	    meta->zm_index &&
5593 	    meta->zm_index < os_atomic_load(&num_zones, relaxed)) {
5594 		z = &zone_array[meta->zm_index];
5595 	} else {
5596 		paniclog_append_noflush("  Zone    : <unknown>\n");
5597 		paniclog_append_noflush("  Address : %p\n", (void *)addr);
5598 		paniclog_append_noflush("\n");
5599 		return;
5600 	}
5601 
5602 	esize = zone_elem_inner_size(z);
5603 	paniclog_append_noflush("  Zone    : %s%s\n",
5604 	    zone_heap_name(z), zone_name(z));
5605 	paniclog_append_noflush("  Address : %p\n", (void *)addr);
5606 	paniclog_append_noflush("  Element : [%p, %p) of size %d\n",
5607 	    (void *)elem, (void *)(elem + esize), (uint32_t)esize);
5608 
5609 	if (addr < elem) {
5610 		type = "out-of-bounds(underflow) + use-after-free";
5611 		prob = "low";
5612 	} else if (meta->zm_chunk_len == ZM_PGZ_DOUBLE_FREE) {
5613 		type = "double-free";
5614 		prob = "high";
5615 	} else if (addr < elem + esize) {
5616 		type = "use-after-free";
5617 		prob = "high";
5618 	} else if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5619 		type = "out-of-bounds + use-after-free";
5620 		prob = "low";
5621 	} else {
5622 		type = "out-of-bounds";
5623 		prob = "high";
5624 	}
5625 	paniclog_append_noflush("  Kind    : %s (%s confidence)\n",
5626 	    type, prob);
5627 	if (addr < elem) {
5628 		paniclog_append_noflush("  Access  : %d byte(s) before\n",
5629 		    (uint32_t)(elem - addr) + 1);
5630 	} else if (addr < elem + esize) {
5631 		paniclog_append_noflush("  Access  : %d byte(s) inside\n",
5632 		    (uint32_t)(addr - elem) + 1);
5633 	} else {
5634 		paniclog_append_noflush("  Access  : %d byte(s) past\n",
5635 		    (uint32_t)(addr - (elem + esize)) + 1);
5636 	}
5637 
5638 	panic_display_pgz_bt(has_syms, slot, false);
5639 	if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5640 		panic_display_pgz_bt(has_syms, slot, true);
5641 	}
5642 
5643 	paniclog_append_noflush("\n");
5644 }
5645 
5646 #endif /* CONFIG_PROB_GZALLOC */
5647 #endif /* !ZALLOC_TEST */
5648 #pragma mark zfree
5649 #if !ZALLOC_TEST
5650 
5651 /*!
5652  * @defgroup zfree
5653  * @{
5654  *
5655  * @brief
5656  * The codepath for zone frees.
5657  *
5658  * @discussion
5659  * There are 4 major ways to allocate memory that end up in the zone allocator:
5660  * - @c zfree()
5661  * - @c zfree_percpu()
5662  * - @c kfree*()
5663  * - @c zfree_permanent()
5664  *
5665  * While permanent zones have their own allocation scheme, all other codepaths
5666  * will eventually go through the @c zfree_ext() choking point.
5667  */
5668 
5669 __header_always_inline void
zfree_drop(zone_t zone,vm_offset_t addr)5670 zfree_drop(zone_t zone, vm_offset_t addr)
5671 {
5672 	vm_offset_t esize = zone_elem_outer_size(zone);
5673 	struct zone_page_metadata *meta;
5674 	vm_offset_t eidx;
5675 
5676 	meta = zone_element_resolve(zone, addr, &eidx);
5677 
5678 	if (!zone_meta_mark_free(meta, eidx)) {
5679 		zone_meta_double_free_panic(zone, addr, __func__);
5680 	}
5681 
5682 	vm_offset_t old_size = meta->zm_alloc_size;
5683 	vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5684 	vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
5685 
5686 	if (new_size == 0) {
5687 		/* whether the page was on the intermediate or all_used, queue, move it to free */
5688 		zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
5689 		zone->z_wired_empty += meta->zm_chunk_len;
5690 	} else if (old_size + esize > max_size) {
5691 		/* first free element on page, move from all_used */
5692 		zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5693 	}
5694 
5695 	if (__improbable(zone->z_exhausted_wait)) {
5696 		zone->z_exhausted_wait = false;
5697 		thread_wakeup(&zone->z_expander);
5698 	}
5699 }
5700 
5701 __attribute__((noinline))
5702 static void
zfree_item(zone_t zone,vm_offset_t addr)5703 zfree_item(zone_t zone, vm_offset_t addr)
5704 {
5705 	/* transfer preemption count to lock */
5706 	zone_lock_nopreempt_check_contention(zone);
5707 
5708 	zfree_drop(zone, addr);
5709 	zone->z_elems_free += 1;
5710 
5711 	zone_unlock(zone);
5712 }
5713 
5714 static void
zfree_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache)5715 zfree_cached_depot_recirculate(
5716 	zone_t                  zone,
5717 	uint32_t                depot_max,
5718 	zone_cache_t            cache)
5719 {
5720 	smr_t smr = zone_cache_smr(cache);
5721 	smr_seq_t seq;
5722 	uint32_t n;
5723 
5724 	zone_recirc_lock_nopreempt_check_contention(zone);
5725 
5726 	n = cache->zc_depot.zd_full;
5727 	if (n >= depot_max) {
5728 		/*
5729 		 * If SMR is in use, rotate the entire chunk of magazines.
5730 		 *
5731 		 * If the head of the recirculation layer is ready to be
5732 		 * reused, pull them back to refill a little.
5733 		 */
5734 		seq = zone_depot_move_full(&zone->z_recirc,
5735 		    &cache->zc_depot, smr ? n : n - depot_max / 2, NULL);
5736 
5737 		if (smr) {
5738 			smr_deferred_advance_commit(smr, seq);
5739 			if (depot_max > 1 && zone_depot_poll(&zone->z_recirc, smr)) {
5740 				zone_depot_move_full(&cache->zc_depot,
5741 				    &zone->z_recirc, depot_max / 2, NULL);
5742 			}
5743 		}
5744 	}
5745 
5746 	n = depot_max - cache->zc_depot.zd_full;
5747 	if (n > zone->z_recirc.zd_empty) {
5748 		n = zone->z_recirc.zd_empty;
5749 	}
5750 	if (n) {
5751 		zone_depot_move_empty(&cache->zc_depot, &zone->z_recirc,
5752 		    n, zone);
5753 	}
5754 
5755 	zone_recirc_unlock_nopreempt(zone);
5756 }
5757 
5758 static zone_cache_t
zfree_cached_recirculate(zone_t zone,zone_cache_t cache)5759 zfree_cached_recirculate(zone_t zone, zone_cache_t cache)
5760 {
5761 	zone_magazine_t mag = NULL, tmp = NULL;
5762 	smr_t smr = zone_cache_smr(cache);
5763 
5764 	if (zone->z_recirc.zd_empty == 0) {
5765 		mag = zone_magazine_alloc(Z_NOWAIT);
5766 	}
5767 
5768 	zone_recirc_lock_nopreempt_check_contention(zone);
5769 
5770 	if (mag == NULL && zone->z_recirc.zd_empty) {
5771 		mag = zone_depot_pop_head_empty(&zone->z_recirc, zone);
5772 		__builtin_assume(mag);
5773 	}
5774 	if (mag) {
5775 		tmp = zone_magazine_replace(cache, mag, true);
5776 		if (smr) {
5777 			smr_deferred_advance_commit(smr, tmp->zm_seq);
5778 		}
5779 		if (zone_security_array[zone_index(zone)].z_lifo) {
5780 			zone_depot_insert_head_full(&zone->z_recirc, tmp);
5781 		} else {
5782 			zone_depot_insert_tail_full(&zone->z_recirc, tmp);
5783 		}
5784 	}
5785 
5786 	zone_recirc_unlock_nopreempt(zone);
5787 
5788 	return mag ? cache : NULL;
5789 }
5790 
5791 __attribute__((noinline))
5792 static zone_cache_t
zfree_cached_trim(zone_t zone,zone_cache_t cache)5793 zfree_cached_trim(zone_t zone, zone_cache_t cache)
5794 {
5795 	zone_magazine_t mag = NULL, tmp = NULL;
5796 	uint32_t depot_max;
5797 
5798 	depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
5799 	if (depot_max) {
5800 		zone_depot_lock_nopreempt(cache);
5801 
5802 		if (cache->zc_depot.zd_empty == 0) {
5803 			zfree_cached_depot_recirculate(zone, depot_max, cache);
5804 		}
5805 
5806 		if (__probable(cache->zc_depot.zd_empty)) {
5807 			mag = zone_depot_pop_head_empty(&cache->zc_depot, NULL);
5808 			__builtin_assume(mag);
5809 		} else {
5810 			mag = zone_magazine_alloc(Z_NOWAIT);
5811 		}
5812 		if (mag) {
5813 			tmp = zone_magazine_replace(cache, mag, true);
5814 			zone_depot_insert_tail_full(&cache->zc_depot, tmp);
5815 		}
5816 		zone_depot_unlock_nopreempt(cache);
5817 
5818 		return mag ? cache : NULL;
5819 	}
5820 
5821 	return zfree_cached_recirculate(zone, cache);
5822 }
5823 
5824 __attribute__((always_inline))
5825 static inline zone_cache_t
zfree_cached_get_pcpu_cache(zone_t zone,int cpu)5826 zfree_cached_get_pcpu_cache(zone_t zone, int cpu)
5827 {
5828 	zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5829 
5830 	if (__probable(cache->zc_free_cur < zc_mag_size())) {
5831 		return cache;
5832 	}
5833 
5834 	if (__probable(cache->zc_alloc_cur < zc_mag_size())) {
5835 		zone_cache_swap_magazines(cache);
5836 		return cache;
5837 	}
5838 
5839 	return zfree_cached_trim(zone, cache);
5840 }
5841 
5842 __attribute__((always_inline))
5843 static inline zone_cache_t
zfree_cached_get_pcpu_cache_smr(zone_t zone,int cpu)5844 zfree_cached_get_pcpu_cache_smr(zone_t zone, int cpu)
5845 {
5846 	zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5847 	size_t idx = cache->zc_free_cur;
5848 
5849 	if (__probable(idx + 1 < zc_mag_size())) {
5850 		return cache;
5851 	}
5852 
5853 	/*
5854 	 * when SMR is in use, the bucket is tagged early with
5855 	 * @c smr_deferred_advance(), which costs a full barrier,
5856 	 * but performs no store.
5857 	 *
5858 	 * When zones hit the recirculation layer, the advance is commited,
5859 	 * under the recirculation lock (see zfree_cached_recirculate()).
5860 	 *
5861 	 * When done this way, the zone contention detection mechanism
5862 	 * will adjust the size of the per-cpu depots gracefully, which
5863 	 * mechanically reduces the pace of these commits as usage increases.
5864 	 */
5865 
5866 	if (__probable(idx + 1 == zc_mag_size())) {
5867 		zone_magazine_t mag;
5868 
5869 		mag = (zone_magazine_t)((uintptr_t)cache->zc_free_elems -
5870 		    offsetof(struct zone_magazine, zm_elems));
5871 		mag->zm_seq = smr_deferred_advance(zone_cache_smr(cache));
5872 		return cache;
5873 	}
5874 
5875 	return zfree_cached_trim(zone, cache);
5876 }
5877 
5878 __attribute__((always_inline))
5879 static inline vm_offset_t
__zcache_mark_invalid(zone_t zone,vm_offset_t elem,uint64_t combined_size)5880 __zcache_mark_invalid(zone_t zone, vm_offset_t elem, uint64_t combined_size)
5881 {
5882 	struct zone_page_metadata *meta;
5883 	vm_offset_t offs;
5884 
5885 #pragma unused(combined_size)
5886 #if CONFIG_PROB_GZALLOC
5887 	if (__improbable(pgz_owned(elem))) {
5888 		elem = pgz_unprotect(elem, __builtin_frame_address(0));
5889 	}
5890 #endif /* CONFIG_PROB_GZALLOC */
5891 
5892 	meta = zone_meta_from_addr(elem);
5893 	if (!from_zone_map(elem, 1) || !zone_has_index(zone, meta->zm_index)) {
5894 		zone_invalid_element_panic(zone, elem);
5895 	}
5896 
5897 	offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
5898 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
5899 		offs += ptoa(meta->zm_page_index);
5900 	}
5901 
5902 	if (!Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
5903 		zone_invalid_element_panic(zone, elem);
5904 	}
5905 
5906 #if VM_TAG_SIZECLASSES
5907 	if (__improbable(zone->z_uses_tags)) {
5908 		vm_tag_t *slot;
5909 
5910 		slot = zba_extra_ref_ptr(meta->zm_bitmap,
5911 		    Z_FAST_QUO(offs, zone->z_quo_magic));
5912 		vm_tag_update_zone_size(*slot, zone->z_tags_sizeclass,
5913 		    -(long)ZFREE_ELEM_SIZE(combined_size));
5914 		*slot = VM_KERN_MEMORY_NONE;
5915 	}
5916 #endif /* VM_TAG_SIZECLASSES */
5917 
5918 #if KASAN_CLASSIC
5919 	kasan_free(elem, ZFREE_ELEM_SIZE(combined_size),
5920 	    ZFREE_USER_SIZE(combined_size), zone_elem_redzone(zone),
5921 	    zone->z_percpu, __builtin_frame_address(0));
5922 #endif
5923 #if CONFIG_KERNEL_TAGGING
5924 	if (__probable(zone->z_tbi_tag)) {
5925 		elem = zone_tag_element(zone, elem, ZFREE_ELEM_SIZE(combined_size));
5926 	}
5927 #endif /* CONFIG_KERNEL_TAGGING */
5928 
5929 	return elem;
5930 }
5931 
5932 __attribute__((always_inline))
5933 void *
zcache_mark_invalid(zone_t zone,void * elem)5934 zcache_mark_invalid(zone_t zone, void *elem)
5935 {
5936 	vm_size_t esize = zone_elem_inner_offs(zone);
5937 
5938 	ZFREE_LOG(zone, (vm_offset_t)elem, 1);
5939 	return (void *)__zcache_mark_invalid(zone, (vm_offset_t)elem, ZFREE_PACK_SIZE(esize, esize));
5940 }
5941 
5942 /*
5943  *     The function is noinline when zlog can be used so that the backtracing can
5944  *     reliably skip the zfree_ext() and zfree_log()
5945  *     boring frames.
5946  */
5947 #if ZALLOC_ENABLE_LOGGING
5948 __attribute__((noinline))
5949 #endif /* ZALLOC_ENABLE_LOGGING */
5950 void
zfree_ext(zone_t zone,zone_stats_t zstats,void * addr,uint64_t combined_size)5951 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr, uint64_t combined_size)
5952 {
5953 	vm_offset_t esize = ZFREE_ELEM_SIZE(combined_size);
5954 	vm_offset_t elem = (vm_offset_t)addr;
5955 	int cpu;
5956 
5957 	DTRACE_VM2(zfree, zone_t, zone, void*, elem);
5958 
5959 	ZFREE_LOG(zone, elem, 1);
5960 	elem = __zcache_mark_invalid(zone, elem, combined_size);
5961 
5962 	disable_preemption();
5963 	cpu = cpu_number();
5964 	zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += esize;
5965 
5966 #if KASAN_CLASSIC
5967 	if (zone->z_kasan_quarantine && startup_phase >= STARTUP_SUB_ZALLOC) {
5968 		struct kasan_quarantine_result kqr;
5969 
5970 		kqr  = kasan_quarantine(elem, esize);
5971 		elem = kqr.addr;
5972 		zone = kqr.zone;
5973 		if (elem == 0) {
5974 			return enable_preemption();
5975 		}
5976 	}
5977 #endif
5978 
5979 	if (zone->z_pcpu_cache) {
5980 		zone_cache_t cache = zfree_cached_get_pcpu_cache(zone, cpu);
5981 
5982 		if (__probable(cache)) {
5983 			cache->zc_free_elems[cache->zc_free_cur++] = elem;
5984 			return enable_preemption();
5985 		}
5986 	}
5987 
5988 	return zfree_item(zone, elem);
5989 }
5990 
5991 __attribute__((always_inline))
5992 static inline zstack_t
zcache_free_stack_to_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,vm_size_t esize,zone_cache_ops_t ops,bool zero)5993 zcache_free_stack_to_cpu(
5994 	zone_id_t               zid,
5995 	zone_cache_t            cache,
5996 	zstack_t                stack,
5997 	vm_size_t               esize,
5998 	zone_cache_ops_t        ops,
5999 	bool                    zero)
6000 {
6001 	size_t       n = MIN(zc_mag_size() - cache->zc_free_cur, stack.z_count);
6002 	vm_offset_t *p;
6003 
6004 	stack.z_count -= n;
6005 	cache->zc_free_cur += n;
6006 	p = cache->zc_free_elems + cache->zc_free_cur;
6007 
6008 	do {
6009 		void *o = zstack_pop_no_delta(&stack);
6010 
6011 		if (ops) {
6012 			o = ops->zc_op_mark_invalid(zid, o);
6013 		} else {
6014 			if (zero) {
6015 				bzero(o, esize);
6016 			}
6017 			o = (void *)__zcache_mark_invalid(zone_by_id(zid),
6018 			    (vm_offset_t)o, ZFREE_PACK_SIZE(esize, esize));
6019 		}
6020 		*--p  = (vm_offset_t)o;
6021 	} while (--n > 0);
6022 
6023 	return stack;
6024 }
6025 
6026 __attribute__((always_inline))
6027 static inline void
zcache_free_1_ext(zone_id_t zid,void * addr,zone_cache_ops_t ops)6028 zcache_free_1_ext(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6029 {
6030 	vm_offset_t elem = (vm_offset_t)addr;
6031 	zone_cache_t cache;
6032 	vm_size_t esize;
6033 	zone_t zone = zone_by_id(zid);
6034 	int cpu;
6035 
6036 	ZFREE_LOG(zone, elem, 1);
6037 
6038 	disable_preemption();
6039 	cpu = cpu_number();
6040 	esize = zone_elem_inner_size(zone);
6041 	zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6042 	if (!ops) {
6043 		addr = (void *)__zcache_mark_invalid(zone, elem,
6044 		    ZFREE_PACK_SIZE(esize, esize));
6045 	}
6046 	cache = zfree_cached_get_pcpu_cache(zone, cpu);
6047 	if (__probable(cache)) {
6048 		if (ops) {
6049 			addr = ops->zc_op_mark_invalid(zid, addr);
6050 		}
6051 		cache->zc_free_elems[cache->zc_free_cur++] = elem;
6052 		enable_preemption();
6053 	} else if (ops) {
6054 		enable_preemption();
6055 		os_atomic_dec(&zone_by_id(zid)->z_elems_avail, relaxed);
6056 		ops->zc_op_free(zid, addr);
6057 	} else {
6058 		zfree_item(zone, elem);
6059 	}
6060 }
6061 
6062 __attribute__((always_inline))
6063 static inline void
zcache_free_n_ext(zone_id_t zid,zstack_t stack,zone_cache_ops_t ops,bool zero)6064 zcache_free_n_ext(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops, bool zero)
6065 {
6066 	zone_t zone = zone_by_id(zid);
6067 	zone_cache_t cache;
6068 	vm_size_t esize;
6069 	int cpu;
6070 
6071 	ZFREE_LOG(zone, stack.z_head, stack.z_count);
6072 
6073 	disable_preemption();
6074 	cpu = cpu_number();
6075 	esize = zone_elem_inner_size(zone);
6076 	zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed +=
6077 	    stack.z_count * esize;
6078 
6079 	for (;;) {
6080 		cache = zfree_cached_get_pcpu_cache(zone, cpu);
6081 		if (__probable(cache)) {
6082 			stack = zcache_free_stack_to_cpu(zid, cache,
6083 			    stack, esize, ops, zero);
6084 			enable_preemption();
6085 		} else if (ops) {
6086 			enable_preemption();
6087 			os_atomic_dec(&zone->z_elems_avail, relaxed);
6088 			ops->zc_op_free(zid, zstack_pop(&stack));
6089 		} else {
6090 			vm_offset_t addr = (vm_offset_t)zstack_pop(&stack);
6091 
6092 			if (zero) {
6093 				bzero((void *)addr, esize);
6094 			}
6095 			addr = __zcache_mark_invalid(zone, addr,
6096 			    ZFREE_PACK_SIZE(esize, esize));
6097 			zfree_item(zone, addr);
6098 		}
6099 
6100 		if (stack.z_count == 0) {
6101 			break;
6102 		}
6103 
6104 		disable_preemption();
6105 		cpu = cpu_number();
6106 	}
6107 }
6108 
6109 void
6110 (zcache_free)(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6111 {
6112 	__builtin_assume(ops != NULL);
6113 	zcache_free_1_ext(zid, addr, ops);
6114 }
6115 
6116 void
6117 (zcache_free_n)(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops)
6118 {
6119 	__builtin_assume(ops != NULL);
6120 	zcache_free_n_ext(zid, stack, ops, false);
6121 }
6122 
6123 void
6124 (zfree_n)(zone_id_t zid, zstack_t stack)
6125 {
6126 	zcache_free_n_ext(zid, stack, NULL, true);
6127 }
6128 
6129 void
6130 (zfree_nozero)(zone_id_t zid, void *addr)
6131 {
6132 	zcache_free_1_ext(zid, addr, NULL);
6133 }
6134 
6135 void
6136 (zfree_nozero_n)(zone_id_t zid, zstack_t stack)
6137 {
6138 	zcache_free_n_ext(zid, stack, NULL, false);
6139 }
6140 
6141 void
6142 (zfree)(zone_t zov, void *addr)
6143 {
6144 	zone_t zone = zov->z_self;
6145 	zone_stats_t zstats = zov->z_stats;
6146 	vm_offset_t esize = zone_elem_inner_size(zone);
6147 
6148 	assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6149 	assert(!zone->z_percpu && !zone->z_permanent && !zone->z_smr);
6150 
6151 	vm_memtag_bzero(addr, esize);
6152 
6153 	zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6154 }
6155 
6156 __attribute__((noinline))
6157 void
zfree_percpu(union zone_or_view zov,void * addr)6158 zfree_percpu(union zone_or_view zov, void *addr)
6159 {
6160 	zone_t zone = zov.zov_view->zv_zone;
6161 	zone_stats_t zstats = zov.zov_view->zv_stats;
6162 	vm_offset_t esize = zone_elem_inner_size(zone);
6163 
6164 	assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6165 	assert(zone->z_percpu);
6166 	addr = (void *)__zpcpu_demangle(addr);
6167 	zpercpu_foreach_cpu(i) {
6168 		vm_memtag_bzero((char *)addr + ptoa(i), esize);
6169 	}
6170 	zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6171 }
6172 
6173 void
6174 (zfree_id)(zone_id_t zid, void *addr)
6175 {
6176 	(zfree)(&zone_array[zid], addr);
6177 }
6178 
6179 void
6180 (zfree_ro)(zone_id_t zid, void *addr)
6181 {
6182 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6183 	zone_t zone = zone_by_id(zid);
6184 	zone_stats_t zstats = zone->z_stats;
6185 	vm_offset_t esize = zone_ro_size_params[zid].z_elem_size;
6186 
6187 #if ZSECURITY_CONFIG(READ_ONLY)
6188 	assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6189 	pmap_ro_zone_bzero(zid, (vm_offset_t)addr, 0, esize);
6190 #else
6191 	(void)zid;
6192 	bzero(addr, esize);
6193 #endif /* !KASAN_CLASSIC */
6194 	zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6195 }
6196 
6197 __attribute__((noinline))
6198 static void
zfree_item_smr(zone_t zone,vm_offset_t addr)6199 zfree_item_smr(zone_t zone, vm_offset_t addr)
6200 {
6201 	zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, 0);
6202 	vm_size_t esize = zone_elem_inner_size(zone);
6203 
6204 	/*
6205 	 * This should be taken extremely rarely:
6206 	 * this happens if we failed allocating an empty bucket.
6207 	 */
6208 	smr_synchronize(zone_cache_smr(cache));
6209 
6210 	cache->zc_free((void *)addr, esize);
6211 	addr = __zcache_mark_invalid(zone, addr, ZFREE_PACK_SIZE(esize, esize));
6212 
6213 	zfree_item(zone, addr);
6214 }
6215 
6216 void
6217 (zfree_smr)(zone_t zone, void *addr)
6218 {
6219 	vm_offset_t elem = (vm_offset_t)addr;
6220 	vm_offset_t esize;
6221 	zone_cache_t cache;
6222 	int cpu;
6223 
6224 	ZFREE_LOG(zone, elem, 1);
6225 
6226 	disable_preemption();
6227 	cpu   = cpu_number();
6228 #if MACH_ASSERT
6229 	cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6230 	assert(!smr_entered_cpu_noblock(cache->zc_smr, cpu));
6231 #endif
6232 	esize = zone_elem_inner_size(zone);
6233 	zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6234 	cache = zfree_cached_get_pcpu_cache_smr(zone, cpu);
6235 	if (__probable(cache)) {
6236 		cache->zc_free_elems[cache->zc_free_cur++] = elem;
6237 		enable_preemption();
6238 	} else {
6239 		zfree_item_smr(zone, elem);
6240 	}
6241 }
6242 
6243 void
6244 (zfree_id_smr)(zone_id_t zid, void *addr)
6245 {
6246 	(zfree_smr)(&zone_array[zid], addr);
6247 }
6248 
6249 void
kfree_type_impl_internal(kalloc_type_view_t kt_view,void * ptr __unsafe_indexable)6250 kfree_type_impl_internal(
6251 	kalloc_type_view_t  kt_view,
6252 	void               *ptr __unsafe_indexable)
6253 {
6254 	zone_t zsig = kt_view->kt_zsig;
6255 	zone_t z = kt_view->kt_zv.zv_zone;
6256 	struct zone_page_metadata *meta = zone_meta_from_addr((vm_offset_t) ptr);
6257 	zone_id_t zidx_meta = meta->zm_index;
6258 	zone_security_flags_t zsflags_meta = zone_security_array[zidx_meta];
6259 	zone_security_flags_t zsflags_z = zone_security_config(z);
6260 	zone_security_flags_t zsflags_zsig;
6261 
6262 	if (NULL == ptr) {
6263 		return;
6264 	}
6265 
6266 	if ((zsflags_z.z_kheap_id == KHEAP_ID_DATA_BUFFERS) ||
6267 	    zone_has_index(z, zidx_meta)) {
6268 		return (zfree)(&kt_view->kt_zv, ptr);
6269 	}
6270 	zsflags_zsig = zone_security_config(zsig);
6271 	if (zsflags_meta.z_sig_eq == zsflags_zsig.z_sig_eq) {
6272 		z = zone_array + zidx_meta;
6273 		return (zfree)(z, ptr);
6274 	}
6275 
6276 	return (zfree)(kt_view->kt_zshared, ptr);
6277 }
6278 
6279 /*! @} */
6280 #endif /* !ZALLOC_TEST */
6281 #pragma mark zalloc
6282 #if !ZALLOC_TEST
6283 
6284 /*!
6285  * @defgroup zalloc
6286  * @{
6287  *
6288  * @brief
6289  * The codepath for zone allocations.
6290  *
6291  * @discussion
6292  * There are 4 major ways to allocate memory that end up in the zone allocator:
6293  * - @c zalloc(), @c zalloc_flags(), ...
6294  * - @c zalloc_percpu()
6295  * - @c kalloc*()
6296  * - @c zalloc_permanent()
6297  *
6298  * While permanent zones have their own allocation scheme, all other codepaths
6299  * will eventually go through the @c zalloc_ext() choking point.
6300  *
6301  * @c zalloc_return() is the final function everyone tail calls into,
6302  * which prepares the element for consumption by the caller and deals with
6303  * common treatment (zone logging, tags, kasan, validation, ...).
6304  */
6305 
6306 /*!
6307  * @function zalloc_import
6308  *
6309  * @brief
6310  * Import @c n elements in the specified array, opposite of @c zfree_drop().
6311  *
6312  * @param zone          The zone to import elements from
6313  * @param elems         The array to import into
6314  * @param n             The number of elements to import. Must be non zero,
6315  *                      and smaller than @c zone->z_elems_free.
6316  */
6317 __header_always_inline vm_size_t
zalloc_import(zone_t zone,vm_offset_t * elems,zalloc_flags_t flags,uint32_t n)6318 zalloc_import(
6319 	zone_t                  zone,
6320 	vm_offset_t            *elems,
6321 	zalloc_flags_t          flags,
6322 	uint32_t                n)
6323 {
6324 	vm_offset_t esize = zone_elem_outer_size(zone);
6325 	vm_offset_t offs  = zone_elem_inner_offs(zone);
6326 	zone_stats_t zs;
6327 	int cpu = cpu_number();
6328 	uint32_t i = 0;
6329 
6330 	zs = zpercpu_get_cpu(zone->z_stats, cpu);
6331 
6332 	if (__improbable(zone_caching_disabled < 0)) {
6333 		/*
6334 		 * In the first 10s after boot, mess with
6335 		 * the scan position in order to make early
6336 		 * allocations patterns less predictable.
6337 		 */
6338 		zone_early_scramble_rr(zone, cpu, zs);
6339 	}
6340 
6341 	do {
6342 		vm_offset_t page, eidx, size = 0;
6343 		struct zone_page_metadata *meta;
6344 
6345 		if (!zone_pva_is_null(zone->z_pageq_partial)) {
6346 			meta = zone_pva_to_meta(zone->z_pageq_partial);
6347 			page = zone_pva_to_addr(zone->z_pageq_partial);
6348 		} else if (!zone_pva_is_null(zone->z_pageq_empty)) {
6349 			meta = zone_pva_to_meta(zone->z_pageq_empty);
6350 			page = zone_pva_to_addr(zone->z_pageq_empty);
6351 			zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
6352 		} else {
6353 			zone_accounting_panic(zone, "z_elems_free corruption");
6354 		}
6355 
6356 		zone_meta_validate(zone, meta, page);
6357 
6358 		vm_offset_t old_size = meta->zm_alloc_size;
6359 		vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
6360 
6361 		do {
6362 			eidx = zone_meta_find_and_clear_bit(zone, zs, meta, flags);
6363 			elems[i++] = page + offs + eidx * esize;
6364 			size += esize;
6365 		} while (i < n && old_size + size + esize <= max_size);
6366 
6367 		vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
6368 
6369 		if (new_size + esize > max_size) {
6370 			zone_meta_requeue(zone, &zone->z_pageq_full, meta);
6371 		} else if (old_size == 0) {
6372 			/* remove from free, move to intermediate */
6373 			zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
6374 		}
6375 	} while (i < n);
6376 
6377 	n = zone_counter_sub(zone, z_elems_free, n);
6378 	if (zone->z_pcpu_cache == NULL && zone->z_elems_free_min > n) {
6379 		zone->z_elems_free_min = n;
6380 	}
6381 
6382 	return zone_elem_inner_size(zone);
6383 }
6384 
6385 __attribute__((always_inline))
6386 static inline vm_offset_t
__zcache_mark_valid(zone_t zone,vm_offset_t addr,zalloc_flags_t flags)6387 __zcache_mark_valid(zone_t zone, vm_offset_t addr, zalloc_flags_t flags)
6388 {
6389 #pragma unused(zone, flags)
6390 #if KASAN_CLASSIC || CONFIG_PROB_GZALLOC || VM_TAG_SIZECLASSES
6391 	vm_offset_t esize = zone_elem_inner_size(zone);
6392 #endif
6393 
6394 #if CONFIG_KERNEL_TAGGING
6395 	if (__probable(zone->z_tbi_tag)) {
6396 		/*
6397 		 * Retrieve the memory tag assigned on free and update the pointer
6398 		 * metadata.
6399 		 */
6400 		addr = vm_memtag_fixup_ptr(addr);
6401 	}
6402 #endif /* CONFIG_KERNEL_TAGGING */
6403 
6404 #if VM_TAG_SIZECLASSES
6405 	if (__improbable(zone->z_uses_tags)) {
6406 		struct zone_page_metadata *meta;
6407 		vm_offset_t offs;
6408 		vm_tag_t *slot;
6409 		vm_tag_t tag;
6410 
6411 		tag  = zalloc_flags_get_tag(flags);
6412 		meta = zone_meta_from_addr(addr);
6413 		offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
6414 		if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
6415 			offs += ptoa(meta->zm_page_index);
6416 		}
6417 
6418 		slot = zba_extra_ref_ptr(meta->zm_bitmap,
6419 		    Z_FAST_QUO(offs, zone->z_quo_magic));
6420 		*slot = tag;
6421 
6422 		vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
6423 		    (long)esize);
6424 	}
6425 #endif /* VM_TAG_SIZECLASSES */
6426 
6427 #if CONFIG_PROB_GZALLOC
6428 	if (zone->z_pgz_tracked && pgz_sample(addr, esize)) {
6429 		addr = pgz_protect(zone, addr, __builtin_frame_address(0));
6430 	}
6431 #endif
6432 
6433 #if KASAN_CLASSIC
6434 	/*
6435 	 * KASAN_CLASSIC integration of kalloc heaps are handled by kalloc_ext()
6436 	 */
6437 	if ((flags & Z_SKIP_KASAN) == 0) {
6438 		kasan_alloc(addr, esize, esize, zone_elem_redzone(zone),
6439 		    (flags & Z_PCPU), __builtin_frame_address(0));
6440 	}
6441 #endif /* KASAN_CLASSIC */
6442 
6443 	return addr;
6444 }
6445 
6446 __attribute__((always_inline))
6447 void *
zcache_mark_valid(zone_t zone,void * addr)6448 zcache_mark_valid(zone_t zone, void *addr)
6449 {
6450 	addr = (void *)__zcache_mark_valid(zone, (vm_offset_t)addr, 0);
6451 	ZALLOC_LOG(zone, (vm_offset_t)addr, 1);
6452 	return addr;
6453 }
6454 
6455 /*!
6456  * @function zalloc_return
6457  *
6458  * @brief
6459  * Performs the tail-end of the work required on allocations before the caller
6460  * uses them.
6461  *
6462  * @discussion
6463  * This function is called without any zone lock held,
6464  * and preemption back to the state it had when @c zalloc_ext() was called.
6465  *
6466  * @param zone          The zone we're allocating from.
6467  * @param addr          The element we just allocated.
6468  * @param flags         The flags passed to @c zalloc_ext() (for Z_ZERO).
6469  * @param elem_size     The element size for this zone.
6470  */
6471 __attribute__((always_inline))
6472 static struct kalloc_result
zalloc_return(zone_t zone,vm_offset_t addr,zalloc_flags_t flags,vm_offset_t elem_size)6473 zalloc_return(
6474 	zone_t                  zone,
6475 	vm_offset_t             addr,
6476 	zalloc_flags_t          flags,
6477 	vm_offset_t             elem_size)
6478 {
6479 	addr = __zcache_mark_valid(zone, addr, flags);
6480 #if ZALLOC_ENABLE_ZERO_CHECK
6481 	zalloc_validate_element(zone, addr, elem_size, flags);
6482 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
6483 	ZALLOC_LOG(zone, addr, 1);
6484 
6485 	DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6486 	return (struct kalloc_result){ (void *)addr, elem_size };
6487 }
6488 
6489 static vm_size_t
zalloc_get_shared_threshold(zone_t zone,vm_size_t esize)6490 zalloc_get_shared_threshold(zone_t zone, vm_size_t esize)
6491 {
6492 	if (esize <= 512) {
6493 		return zone_early_thres_mul * page_size / 4;
6494 	} else if (esize < 2048) {
6495 		return zone_early_thres_mul * esize * 8;
6496 	}
6497 	return zone_early_thres_mul * zone->z_chunk_elems * esize;
6498 }
6499 
6500 __attribute__((noinline))
6501 static struct kalloc_result
zalloc_item(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6502 zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6503 {
6504 	vm_offset_t esize, addr;
6505 	zone_stats_t zs;
6506 
6507 	zone_lock_nopreempt_check_contention(zone);
6508 
6509 	zs = zpercpu_get(zstats);
6510 	if (__improbable(zone->z_elems_free <= zone->z_elems_rsv / 2)) {
6511 		if ((flags & Z_NOWAIT) || zone->z_elems_free) {
6512 			zone_expand_async_schedule_if_allowed(zone);
6513 		} else {
6514 			zone_expand_locked(zone, flags);
6515 		}
6516 		if (__improbable(zone->z_elems_free == 0)) {
6517 			zs->zs_alloc_fail++;
6518 			zone_unlock(zone);
6519 			if (__improbable(flags & Z_NOFAIL)) {
6520 				zone_nofail_panic(zone);
6521 			}
6522 			DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
6523 			return (struct kalloc_result){ };
6524 		}
6525 	}
6526 
6527 	esize = zalloc_import(zone, &addr, flags, 1);
6528 	zs->zs_mem_allocated += esize;
6529 
6530 	if (__improbable(!zone_share_always &&
6531 	    !os_atomic_load(&zs->zs_alloc_not_shared, relaxed))) {
6532 		if (flags & Z_SET_NOTSHARED) {
6533 			vm_size_t shared_threshold = zalloc_get_shared_threshold(zone, esize);
6534 
6535 			if (zs->zs_mem_allocated >= shared_threshold) {
6536 				zpercpu_foreach(zs_cpu, zstats) {
6537 					os_atomic_store(&zs_cpu->zs_alloc_not_shared, 1, relaxed);
6538 				}
6539 			}
6540 		}
6541 	}
6542 	zone_unlock(zone);
6543 
6544 	return zalloc_return(zone, addr, flags, esize);
6545 }
6546 
6547 static void
zalloc_cached_import(zone_t zone,zalloc_flags_t flags,zone_cache_t cache)6548 zalloc_cached_import(
6549 	zone_t                  zone,
6550 	zalloc_flags_t          flags,
6551 	zone_cache_t            cache)
6552 {
6553 	uint16_t n_elems = zc_mag_size();
6554 
6555 	zone_lock_nopreempt(zone);
6556 
6557 	if (__probable(!zone_caching_disabled &&
6558 	    zone->z_elems_free > zone->z_elems_rsv / 2)) {
6559 		if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
6560 			zone_expand_async_schedule_if_allowed(zone);
6561 		}
6562 		if (zone->z_elems_free < n_elems) {
6563 			n_elems = (uint16_t)zone->z_elems_free;
6564 		}
6565 		zalloc_import(zone, cache->zc_alloc_elems, flags, n_elems);
6566 		cache->zc_alloc_cur = n_elems;
6567 	}
6568 
6569 	zone_unlock_nopreempt(zone);
6570 }
6571 
6572 static void
zalloc_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache,smr_t smr)6573 zalloc_cached_depot_recirculate(
6574 	zone_t                  zone,
6575 	uint32_t                depot_max,
6576 	zone_cache_t            cache,
6577 	smr_t                   smr)
6578 {
6579 	smr_seq_t seq;
6580 	uint32_t n;
6581 
6582 	zone_recirc_lock_nopreempt_check_contention(zone);
6583 
6584 	n = cache->zc_depot.zd_empty;
6585 	if (n >= depot_max) {
6586 		zone_depot_move_empty(&zone->z_recirc, &cache->zc_depot,
6587 		    n - depot_max / 2, NULL);
6588 	}
6589 
6590 	n = cache->zc_depot.zd_full;
6591 	if (smr && n) {
6592 		/*
6593 		 * if SMR is in use, it means smr_poll() failed,
6594 		 * so rotate the entire chunk of magazines in order
6595 		 * to let the sequence numbers age.
6596 		 */
6597 		seq = zone_depot_move_full(&zone->z_recirc, &cache->zc_depot,
6598 		    n, NULL);
6599 		smr_deferred_advance_commit(smr, seq);
6600 	}
6601 
6602 	n = depot_max - cache->zc_depot.zd_empty;
6603 	if (n > zone->z_recirc.zd_full) {
6604 		n = zone->z_recirc.zd_full;
6605 	}
6606 
6607 	if (n && zone_depot_poll(&zone->z_recirc, smr)) {
6608 		zone_depot_move_full(&cache->zc_depot, &zone->z_recirc,
6609 		    n, zone);
6610 	}
6611 
6612 	zone_recirc_unlock_nopreempt(zone);
6613 }
6614 
6615 static void
zalloc_cached_reuse_smr(zone_t z,zone_cache_t cache,zone_magazine_t mag)6616 zalloc_cached_reuse_smr(zone_t z, zone_cache_t cache, zone_magazine_t mag)
6617 {
6618 	zone_smr_free_cb_t zc_free = cache->zc_free;
6619 	vm_size_t esize = zone_elem_inner_size(z);
6620 
6621 	for (uint16_t i = 0; i < zc_mag_size(); i++) {
6622 		vm_offset_t elem = mag->zm_elems[i];
6623 
6624 		zc_free((void *)elem, zone_elem_inner_size(z));
6625 		elem = __zcache_mark_invalid(z, elem,
6626 		    ZFREE_PACK_SIZE(esize, esize));
6627 		mag->zm_elems[i] = elem;
6628 	}
6629 }
6630 
6631 static void
zalloc_cached_recirculate(zone_t zone,zone_cache_t cache)6632 zalloc_cached_recirculate(
6633 	zone_t                  zone,
6634 	zone_cache_t            cache)
6635 {
6636 	zone_magazine_t mag = NULL;
6637 
6638 	zone_recirc_lock_nopreempt_check_contention(zone);
6639 
6640 	if (zone_depot_poll(&zone->z_recirc, zone_cache_smr(cache))) {
6641 		mag = zone_depot_pop_head_full(&zone->z_recirc, zone);
6642 		if (zone_cache_smr(cache)) {
6643 			zalloc_cached_reuse_smr(zone, cache, mag);
6644 		}
6645 		mag = zone_magazine_replace(cache, mag, false);
6646 		zone_depot_insert_head_empty(&zone->z_recirc, mag);
6647 	}
6648 
6649 	zone_recirc_unlock_nopreempt(zone);
6650 }
6651 
6652 __attribute__((noinline))
6653 static zone_cache_t
zalloc_cached_prime(zone_t zone,zone_cache_ops_t ops,zalloc_flags_t flags,zone_cache_t cache)6654 zalloc_cached_prime(
6655 	zone_t                  zone,
6656 	zone_cache_ops_t        ops,
6657 	zalloc_flags_t          flags,
6658 	zone_cache_t            cache)
6659 {
6660 	zone_magazine_t mag = NULL;
6661 	uint32_t depot_max;
6662 	smr_t smr;
6663 
6664 	depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
6665 	if (depot_max) {
6666 		smr = zone_cache_smr(cache);
6667 
6668 		zone_depot_lock_nopreempt(cache);
6669 
6670 		if (!zone_depot_poll(&cache->zc_depot, smr)) {
6671 			zalloc_cached_depot_recirculate(zone, depot_max, cache,
6672 			    smr);
6673 		}
6674 
6675 		if (__probable(cache->zc_depot.zd_full)) {
6676 			mag = zone_depot_pop_head_full(&cache->zc_depot, NULL);
6677 			if (zone_cache_smr(cache)) {
6678 				zalloc_cached_reuse_smr(zone, cache, mag);
6679 			}
6680 			mag = zone_magazine_replace(cache, mag, false);
6681 			zone_depot_insert_head_empty(&cache->zc_depot, mag);
6682 		}
6683 
6684 		zone_depot_unlock_nopreempt(cache);
6685 	} else if (zone->z_recirc.zd_full) {
6686 		zalloc_cached_recirculate(zone, cache);
6687 	}
6688 
6689 	if (__probable(cache->zc_alloc_cur)) {
6690 		return cache;
6691 	}
6692 
6693 	if (ops == NULL) {
6694 		zalloc_cached_import(zone, flags, cache);
6695 		if (__probable(cache->zc_alloc_cur)) {
6696 			return cache;
6697 		}
6698 	}
6699 
6700 	return NULL;
6701 }
6702 
6703 __attribute__((always_inline))
6704 static inline zone_cache_t
zalloc_cached_get_pcpu_cache(zone_t zone,zone_cache_ops_t ops,int cpu,zalloc_flags_t flags)6705 zalloc_cached_get_pcpu_cache(
6706 	zone_t                  zone,
6707 	zone_cache_ops_t        ops,
6708 	int                     cpu,
6709 	zalloc_flags_t          flags)
6710 {
6711 	zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6712 
6713 	if (__probable(cache->zc_alloc_cur != 0)) {
6714 		return cache;
6715 	}
6716 
6717 	if (__probable(cache->zc_free_cur != 0 && !cache->zc_smr)) {
6718 		zone_cache_swap_magazines(cache);
6719 		return cache;
6720 	}
6721 
6722 	return zalloc_cached_prime(zone, ops, flags, cache);
6723 }
6724 
6725 
6726 /*!
6727  * @function zalloc_ext
6728  *
6729  * @brief
6730  * The core implementation of @c zalloc(), @c zalloc_flags(), @c zalloc_percpu().
6731  */
6732 struct kalloc_result
zalloc_ext(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6733 zalloc_ext(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6734 {
6735 	/*
6736 	 * KASan uses zalloc() for fakestack, which can be called anywhere.
6737 	 * However, we make sure these calls can never block.
6738 	 */
6739 	assertf(startup_phase < STARTUP_SUB_EARLY_BOOT ||
6740 #if KASAN_FAKESTACK
6741 	    zone->z_kasan_fakestacks ||
6742 #endif /* KASAN_FAKESTACK */
6743 	    ml_get_interrupts_enabled() ||
6744 	    ml_is_quiescing() ||
6745 	    debug_mode_active(),
6746 	    "Calling {k,z}alloc from interrupt disabled context isn't allowed");
6747 
6748 	/*
6749 	 * Make sure Z_NOFAIL was not obviously misused
6750 	 */
6751 	if (flags & Z_NOFAIL) {
6752 		assert((flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
6753 	}
6754 
6755 #if VM_TAG_SIZECLASSES
6756 	if (__improbable(zone->z_uses_tags)) {
6757 		vm_tag_t tag = zalloc_flags_get_tag(flags);
6758 
6759 		if (flags & Z_VM_TAG_BT_BIT) {
6760 			tag = vm_tag_bt() ?: tag;
6761 		}
6762 		if (tag != VM_KERN_MEMORY_NONE) {
6763 			tag = vm_tag_will_update_zone(tag, zone->z_tags_sizeclass,
6764 			    flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT));
6765 		}
6766 		if (tag == VM_KERN_MEMORY_NONE) {
6767 			zone_security_flags_t zsflags = zone_security_config(zone);
6768 
6769 			if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
6770 				tag = VM_KERN_MEMORY_KALLOC_DATA;
6771 			} else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR ||
6772 			    zsflags.z_kalloc_type) {
6773 				tag = VM_KERN_MEMORY_KALLOC_TYPE;
6774 			} else {
6775 				tag = VM_KERN_MEMORY_KALLOC;
6776 			}
6777 		}
6778 		flags = Z_VM_TAG(flags & ~Z_VM_TAG_MASK, tag);
6779 	}
6780 #endif /* VM_TAG_SIZECLASSES */
6781 
6782 	disable_preemption();
6783 
6784 #if ZALLOC_ENABLE_ZERO_CHECK
6785 	if (zalloc_skip_zero_check()) {
6786 		flags |= Z_NOZZC;
6787 	}
6788 #endif
6789 
6790 	if (zone->z_pcpu_cache) {
6791 		zone_cache_t cache;
6792 		vm_offset_t index, addr, esize;
6793 		int cpu = cpu_number();
6794 
6795 		cache = zalloc_cached_get_pcpu_cache(zone, NULL, cpu, flags);
6796 		if (__probable(cache)) {
6797 			esize = zone_elem_inner_size(zone);
6798 			zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += esize;
6799 			index = --cache->zc_alloc_cur;
6800 			addr  = cache->zc_alloc_elems[index];
6801 			cache->zc_alloc_elems[index] = 0;
6802 			enable_preemption();
6803 			return zalloc_return(zone, addr, flags, esize);
6804 		}
6805 	}
6806 
6807 	__attribute__((musttail))
6808 	return zalloc_item(zone, zstats, flags);
6809 }
6810 
6811 __attribute__((always_inline))
6812 static inline zstack_t
zcache_alloc_stack_from_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,uint32_t n,zone_cache_ops_t ops)6813 zcache_alloc_stack_from_cpu(
6814 	zone_id_t               zid,
6815 	zone_cache_t            cache,
6816 	zstack_t                stack,
6817 	uint32_t                n,
6818 	zone_cache_ops_t        ops)
6819 {
6820 	vm_offset_t *p;
6821 
6822 	n = MIN(n, cache->zc_alloc_cur);
6823 	p = cache->zc_alloc_elems + cache->zc_alloc_cur;
6824 	cache->zc_alloc_cur -= n;
6825 	stack.z_count += n;
6826 
6827 	do {
6828 		vm_offset_t e = *--p;
6829 
6830 		*p = 0;
6831 		if (ops) {
6832 			e = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)e);
6833 		} else {
6834 			e = __zcache_mark_valid(zone_by_id(zid), e, 0);
6835 		}
6836 		zstack_push_no_delta(&stack, (void *)e);
6837 	} while (--n > 0);
6838 
6839 	return stack;
6840 }
6841 
6842 __attribute__((noinline))
6843 static zstack_t
zcache_alloc_fail(zone_id_t zid,zstack_t stack,uint32_t count)6844 zcache_alloc_fail(zone_id_t zid, zstack_t stack, uint32_t count)
6845 {
6846 	zone_t zone = zone_by_id(zid);
6847 	zone_stats_t zstats = zone->z_stats;
6848 	int cpu;
6849 
6850 	count -= stack.z_count;
6851 
6852 	disable_preemption();
6853 	cpu = cpu_number();
6854 	zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated -=
6855 	    count * zone_elem_inner_size(zone);
6856 	zpercpu_get_cpu(zstats, cpu)->zs_alloc_fail += 1;
6857 	enable_preemption();
6858 
6859 	return stack;
6860 }
6861 
6862 __attribute__((always_inline))
6863 static zstack_t
zcache_alloc_n_ext(zone_id_t zid,uint32_t count,zalloc_flags_t flags,zone_cache_ops_t ops)6864 zcache_alloc_n_ext(
6865 	zone_id_t               zid,
6866 	uint32_t                count,
6867 	zalloc_flags_t          flags,
6868 	zone_cache_ops_t        ops)
6869 {
6870 	zstack_t stack = { };
6871 	zone_cache_t cache;
6872 	zone_t zone;
6873 	int cpu;
6874 
6875 	disable_preemption();
6876 	cpu  = cpu_number();
6877 	zone = zone_by_id(zid);
6878 	zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_allocated +=
6879 	    count * zone_elem_inner_size(zone);
6880 
6881 	for (;;) {
6882 		cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
6883 		if (__probable(cache)) {
6884 			stack = zcache_alloc_stack_from_cpu(zid, cache, stack,
6885 			    count - stack.z_count, ops);
6886 			enable_preemption();
6887 		} else {
6888 			void *o;
6889 
6890 			if (ops) {
6891 				enable_preemption();
6892 				o = ops->zc_op_alloc(zid, flags);
6893 			} else {
6894 				o = zalloc_item(zone, zone->z_stats, flags).addr;
6895 			}
6896 			if (__improbable(o == NULL)) {
6897 				return zcache_alloc_fail(zid, stack, count);
6898 			}
6899 			if (ops) {
6900 				os_atomic_inc(&zone->z_elems_avail, relaxed);
6901 			}
6902 			zstack_push(&stack, o);
6903 		}
6904 
6905 		if (stack.z_count == count) {
6906 			break;
6907 		}
6908 
6909 		disable_preemption();
6910 		cpu = cpu_number();
6911 	}
6912 
6913 	ZALLOC_LOG(zone, stack.z_head, stack.z_count);
6914 
6915 	return stack;
6916 }
6917 
6918 zstack_t
zalloc_n(zone_id_t zid,uint32_t count,zalloc_flags_t flags)6919 zalloc_n(zone_id_t zid, uint32_t count, zalloc_flags_t flags)
6920 {
6921 	return zcache_alloc_n_ext(zid, count, flags, NULL);
6922 }
6923 
zstack_t(zcache_alloc_n)6924 zstack_t
6925 (zcache_alloc_n)(
6926 	zone_id_t               zid,
6927 	uint32_t                count,
6928 	zalloc_flags_t          flags,
6929 	zone_cache_ops_t        ops)
6930 {
6931 	__builtin_assume(ops != NULL);
6932 	return zcache_alloc_n_ext(zid, count, flags, ops);
6933 }
6934 
6935 __attribute__((always_inline))
6936 void *
zalloc(zone_t zov)6937 zalloc(zone_t zov)
6938 {
6939 	return zalloc_flags(zov, Z_WAITOK);
6940 }
6941 
6942 __attribute__((always_inline))
6943 void *
zalloc_noblock(zone_t zov)6944 zalloc_noblock(zone_t zov)
6945 {
6946 	return zalloc_flags(zov, Z_NOWAIT);
6947 }
6948 
6949 void *
6950 (zalloc_flags)(zone_t zov, zalloc_flags_t flags)
6951 {
6952 	zone_t zone = zov->z_self;
6953 	zone_stats_t zstats = zov->z_stats;
6954 
6955 	assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6956 	assert(!zone->z_percpu && !zone->z_permanent);
6957 	return zalloc_ext(zone, zstats, flags).addr;
6958 }
6959 
6960 __attribute__((always_inline))
6961 void *
6962 (zalloc_id)(zone_id_t zid, zalloc_flags_t flags)
6963 {
6964 	return (zalloc_flags)(zone_by_id(zid), flags);
6965 }
6966 
6967 void *
6968 (zalloc_ro)(zone_id_t zid, zalloc_flags_t flags)
6969 {
6970 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6971 	zone_t zone = zone_by_id(zid);
6972 	zone_stats_t zstats = zone->z_stats;
6973 	struct kalloc_result kr;
6974 
6975 	kr = zalloc_ext(zone, zstats, flags);
6976 #if ZSECURITY_CONFIG(READ_ONLY)
6977 	assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6978 	if (kr.addr) {
6979 		zone_require_ro(zid, kr.size, kr.addr);
6980 	}
6981 #endif
6982 	return kr.addr;
6983 }
6984 
6985 #if ZSECURITY_CONFIG(READ_ONLY)
6986 
6987 __attribute__((always_inline))
6988 static bool
from_current_stack(vm_offset_t addr,vm_size_t size)6989 from_current_stack(vm_offset_t addr, vm_size_t size)
6990 {
6991 	vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
6992 	vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
6993 
6994 	addr = vm_memtag_canonicalize_address(addr);
6995 
6996 	return (addr >= start) && (addr + size < end);
6997 }
6998 
6999 /*
7000  * Check if an address is from const memory i.e TEXT or DATA CONST segements
7001  * or the SECURITY_READ_ONLY_LATE section.
7002  */
7003 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
7004 __attribute__((always_inline))
7005 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)7006 from_const_memory(const vm_offset_t addr, vm_size_t size)
7007 {
7008 	return rorgn_contains(addr, size, true);
7009 }
7010 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
7011 __attribute__((always_inline))
7012 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)7013 from_const_memory(const vm_offset_t addr, vm_size_t size)
7014 {
7015 #pragma unused(addr, size)
7016 	return true;
7017 }
7018 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
7019 
7020 __abortlike
7021 static void
zalloc_ro_mut_validation_panic(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)7022 zalloc_ro_mut_validation_panic(zone_id_t zid, void *elem,
7023     const vm_offset_t src, vm_size_t src_size)
7024 {
7025 	vm_offset_t stack_start = (vm_offset_t)__builtin_frame_address(0);
7026 	vm_offset_t stack_end = (stack_start + kernel_stack_size - 1) & -kernel_stack_size;
7027 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
7028 	extern vm_offset_t rorgn_begin;
7029 	extern vm_offset_t rorgn_end;
7030 #else
7031 	vm_offset_t const rorgn_begin = 0;
7032 	vm_offset_t const rorgn_end = 0;
7033 #endif
7034 
7035 	if (from_ro_map(src, src_size)) {
7036 		zone_t src_zone = &zone_array[zone_index_from_ptr((void *)src)];
7037 		zone_t dst_zone = &zone_array[zid];
7038 		panic("zalloc_ro_mut failed: source (%p) not from same zone as dst (%p)"
7039 		    " (expected: %s, actual: %s", (void *)src, elem, src_zone->z_name,
7040 		    dst_zone->z_name);
7041 	}
7042 
7043 	panic("zalloc_ro_mut failed: source (%p, phys %p) not from RO zone map (%p - %p), "
7044 	    "current stack (%p - %p) or const memory (phys %p - %p)",
7045 	    (void *)src, (void*)kvtophys(src),
7046 	    (void *)zone_info.zi_ro_range.min_address,
7047 	    (void *)zone_info.zi_ro_range.max_address,
7048 	    (void *)stack_start, (void *)stack_end,
7049 	    (void *)rorgn_begin, (void *)rorgn_end);
7050 }
7051 
7052 __attribute__((always_inline))
7053 static void
zalloc_ro_mut_validate_src(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)7054 zalloc_ro_mut_validate_src(zone_id_t zid, void *elem,
7055     const vm_offset_t src, vm_size_t src_size)
7056 {
7057 	if (from_current_stack(src, src_size) ||
7058 	    (from_ro_map(src, src_size) &&
7059 	    zid == zone_index_from_ptr((void *)src)) ||
7060 	    from_const_memory(src, src_size)) {
7061 		return;
7062 	}
7063 	zalloc_ro_mut_validation_panic(zid, elem, src, src_size);
7064 }
7065 
7066 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7067 
7068 __attribute__((noinline))
7069 void
zalloc_ro_mut(zone_id_t zid,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)7070 zalloc_ro_mut(zone_id_t zid, void *elem, vm_offset_t offset,
7071     const void *new_data, vm_size_t new_data_size)
7072 {
7073 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7074 
7075 #if ZSECURITY_CONFIG(READ_ONLY)
7076 	bool skip_src_check = false;
7077 
7078 	/*
7079 	 * The OSEntitlements RO-zone is a little differently treated. For more
7080 	 * information: rdar://100518485.
7081 	 */
7082 	if (zid == ZONE_ID_AMFI_OSENTITLEMENTS) {
7083 		code_signing_config_t cs_config = 0;
7084 
7085 		code_signing_configuration(NULL, &cs_config);
7086 		if (cs_config & CS_CONFIG_CSM_ENABLED) {
7087 			skip_src_check = true;
7088 		}
7089 	}
7090 
7091 	if (skip_src_check == false) {
7092 		zalloc_ro_mut_validate_src(zid, elem, (vm_offset_t)new_data,
7093 		    new_data_size);
7094 	}
7095 	pmap_ro_zone_memcpy(zid, (vm_offset_t) elem, offset,
7096 	    (vm_offset_t) new_data, new_data_size);
7097 #else
7098 	(void)zid;
7099 	memcpy((void *)((uintptr_t)elem + offset), new_data, new_data_size);
7100 #endif
7101 }
7102 
7103 __attribute__((noinline))
7104 uint64_t
zalloc_ro_mut_atomic(zone_id_t zid,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)7105 zalloc_ro_mut_atomic(zone_id_t zid, void *elem, vm_offset_t offset,
7106     zro_atomic_op_t op, uint64_t value)
7107 {
7108 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7109 
7110 #if ZSECURITY_CONFIG(READ_ONLY)
7111 	value = pmap_ro_zone_atomic_op(zid, (vm_offset_t)elem, offset, op, value);
7112 #else
7113 	(void)zid;
7114 	value = __zalloc_ro_mut_atomic((vm_offset_t)elem + offset, op, value);
7115 #endif
7116 	return value;
7117 }
7118 
7119 void
zalloc_ro_clear(zone_id_t zid,void * elem,vm_offset_t offset,vm_size_t size)7120 zalloc_ro_clear(zone_id_t zid, void *elem, vm_offset_t offset, vm_size_t size)
7121 {
7122 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7123 #if ZSECURITY_CONFIG(READ_ONLY)
7124 	pmap_ro_zone_bzero(zid, (vm_offset_t)elem, offset, size);
7125 #else
7126 	(void)zid;
7127 	bzero((void *)((uintptr_t)elem + offset), size);
7128 #endif
7129 }
7130 
7131 /*
7132  * This function will run in the PPL and needs to be robust
7133  * against an attacker with arbitrary kernel write.
7134  */
7135 
7136 #if ZSECURITY_CONFIG(READ_ONLY)
7137 
7138 __abortlike
7139 static void
zone_id_require_ro_panic(zone_id_t zid,void * addr)7140 zone_id_require_ro_panic(zone_id_t zid, void *addr)
7141 {
7142 	struct zone_size_params p = zone_ro_size_params[zid];
7143 	vm_offset_t elem = (vm_offset_t)addr;
7144 	uint32_t zindex;
7145 	zone_t other;
7146 	zone_t zone = &zone_array[zid];
7147 
7148 	if (!from_ro_map(addr, 1)) {
7149 		panic("zone_require_ro failed: address not in a ro zone (addr: %p)", addr);
7150 	}
7151 
7152 	if (!Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic)) {
7153 		panic("zone_require_ro failed: element improperly aligned (addr: %p)", addr);
7154 	}
7155 
7156 	zindex = zone_index_from_ptr(addr);
7157 	other = &zone_array[zindex];
7158 	if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
7159 		panic("zone_require_ro failed: invalid zone index %d "
7160 		    "(addr: %p, expected: %s%s)", zindex,
7161 		    addr, zone_heap_name(zone), zone->z_name);
7162 	} else {
7163 		panic("zone_require_ro failed: address in unexpected zone id %d (%s%s) "
7164 		    "(addr: %p, expected: %s%s)",
7165 		    zindex, zone_heap_name(other), other->z_name,
7166 		    addr, zone_heap_name(zone), zone->z_name);
7167 	}
7168 }
7169 
7170 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7171 
7172 __attribute__((always_inline))
7173 void
zone_require_ro(zone_id_t zid,vm_size_t elem_size __unused,void * addr)7174 zone_require_ro(zone_id_t zid, vm_size_t elem_size __unused, void *addr)
7175 {
7176 #if ZSECURITY_CONFIG(READ_ONLY)
7177 	struct zone_size_params p = zone_ro_size_params[zid];
7178 	vm_offset_t elem = (vm_offset_t)addr;
7179 
7180 	if (!from_ro_map(addr, 1) ||
7181 	    !Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic) ||
7182 	    zid != zone_meta_from_addr(elem)->zm_index) {
7183 		zone_id_require_ro_panic(zid, addr);
7184 	}
7185 #else
7186 #pragma unused(zid, addr)
7187 #endif
7188 }
7189 
7190 void *
7191 (zalloc_percpu)(union zone_or_view zov, zalloc_flags_t flags)
7192 {
7193 	zone_t zone = zov.zov_view->zv_zone;
7194 	zone_stats_t zstats = zov.zov_view->zv_stats;
7195 
7196 	assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7197 	assert(zone->z_percpu);
7198 	flags |= Z_PCPU;
7199 	return (void *)__zpcpu_mangle(zalloc_ext(zone, zstats, flags).addr);
7200 }
7201 
7202 static void *
_zalloc_permanent(zone_t zone,vm_size_t size,vm_offset_t mask)7203 _zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask)
7204 {
7205 	struct zone_page_metadata *page_meta;
7206 	vm_offset_t offs, addr;
7207 	zone_pva_t pva;
7208 
7209 	assert(ml_get_interrupts_enabled() ||
7210 	    ml_is_quiescing() ||
7211 	    debug_mode_active() ||
7212 	    startup_phase < STARTUP_SUB_EARLY_BOOT);
7213 
7214 	size = (size + mask) & ~mask;
7215 	assert(size <= PAGE_SIZE);
7216 
7217 	zone_lock(zone);
7218 	assert(zone->z_self == zone);
7219 
7220 	for (;;) {
7221 		pva = zone->z_pageq_partial;
7222 		while (!zone_pva_is_null(pva)) {
7223 			page_meta = zone_pva_to_meta(pva);
7224 			if (page_meta->zm_bump + size <= PAGE_SIZE) {
7225 				goto found;
7226 			}
7227 			pva = page_meta->zm_page_next;
7228 		}
7229 
7230 		zone_expand_locked(zone, Z_WAITOK);
7231 	}
7232 
7233 found:
7234 	offs = (uint16_t)((page_meta->zm_bump + mask) & ~mask);
7235 	page_meta->zm_bump = (uint16_t)(offs + size);
7236 	page_meta->zm_alloc_size += size;
7237 	zone->z_elems_free -= size;
7238 	zpercpu_get(zone->z_stats)->zs_mem_allocated += size;
7239 
7240 	if (page_meta->zm_alloc_size >= PAGE_SIZE - sizeof(vm_offset_t)) {
7241 		zone_meta_requeue(zone, &zone->z_pageq_full, page_meta);
7242 	}
7243 
7244 	zone_unlock(zone);
7245 
7246 	if (zone->z_tbi_tag) {
7247 		addr = vm_memtag_fixup_ptr(offs + zone_pva_to_addr(pva));
7248 	} else {
7249 		addr = offs + zone_pva_to_addr(pva);
7250 	}
7251 
7252 	DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
7253 	return (void *)addr;
7254 }
7255 
7256 static void *
_zalloc_permanent_large(size_t size,vm_offset_t mask,vm_tag_t tag)7257 _zalloc_permanent_large(size_t size, vm_offset_t mask, vm_tag_t tag)
7258 {
7259 	vm_offset_t addr;
7260 
7261 	kernel_memory_allocate(kernel_map, &addr, size, mask,
7262 	    KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, tag);
7263 
7264 	return (void *)addr;
7265 }
7266 
7267 void *
zalloc_permanent_tag(vm_size_t size,vm_offset_t mask,vm_tag_t tag)7268 zalloc_permanent_tag(vm_size_t size, vm_offset_t mask, vm_tag_t tag)
7269 {
7270 	if (size <= PAGE_SIZE) {
7271 		zone_t zone = &zone_array[ZONE_ID_PERMANENT];
7272 		return _zalloc_permanent(zone, size, mask);
7273 	}
7274 	return _zalloc_permanent_large(size, mask, tag);
7275 }
7276 
7277 void *
zalloc_percpu_permanent(vm_size_t size,vm_offset_t mask)7278 zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask)
7279 {
7280 	zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT];
7281 	return (void *)__zpcpu_mangle(_zalloc_permanent(zone, size, mask));
7282 }
7283 
7284 /*! @} */
7285 #endif /* !ZALLOC_TEST */
7286 #pragma mark zone GC / trimming
7287 #if !ZALLOC_TEST
7288 
7289 static thread_call_data_t zone_trim_callout;
7290 EVENT_DEFINE(ZONE_EXHAUSTED);
7291 
7292 static void
zone_reclaim_chunk(zone_t z,struct zone_page_metadata * meta,uint32_t free_count)7293 zone_reclaim_chunk(
7294 	zone_t                  z,
7295 	struct zone_page_metadata *meta,
7296 	uint32_t                free_count)
7297 {
7298 	vm_address_t page_addr;
7299 	vm_size_t    size_to_free;
7300 	uint32_t     bitmap_ref;
7301 	uint32_t     page_count;
7302 	zone_security_flags_t zsflags = zone_security_config(z);
7303 	bool         sequester = !z->z_destroyed;
7304 	bool         oob_guard = false;
7305 
7306 	if (zone_submap_is_sequestered(zsflags)) {
7307 		/*
7308 		 * If the entire map is sequestered, we can't return the VA.
7309 		 * It stays pinned to the zone forever.
7310 		 */
7311 		sequester = true;
7312 	}
7313 
7314 	zone_meta_queue_pop(z, &z->z_pageq_empty);
7315 
7316 	page_addr  = zone_meta_to_addr(meta);
7317 	page_count = meta->zm_chunk_len;
7318 	oob_guard  = meta->zm_guarded;
7319 
7320 	if (meta->zm_alloc_size) {
7321 		zone_metadata_corruption(z, meta, "alloc_size");
7322 	}
7323 	if (z->z_percpu) {
7324 		if (page_count != 1) {
7325 			zone_metadata_corruption(z, meta, "page_count");
7326 		}
7327 		size_to_free = ptoa(z->z_chunk_pages);
7328 		zone_remove_wired_pages(z, z->z_chunk_pages);
7329 	} else {
7330 		if (page_count > z->z_chunk_pages) {
7331 			zone_metadata_corruption(z, meta, "page_count");
7332 		}
7333 		if (page_count < z->z_chunk_pages) {
7334 			/* Dequeue non populated VA from z_pageq_va */
7335 			zone_meta_remqueue(z, meta + page_count);
7336 		}
7337 		size_to_free = ptoa(page_count);
7338 		zone_remove_wired_pages(z, page_count);
7339 	}
7340 
7341 	zone_counter_sub(z, z_elems_free, free_count);
7342 	zone_counter_sub(z, z_elems_avail, free_count);
7343 	zone_counter_sub(z, z_wired_empty, page_count);
7344 	zone_counter_sub(z, z_wired_cur, page_count);
7345 
7346 	if (z->z_pcpu_cache == NULL) {
7347 		if (z->z_elems_free_min < free_count) {
7348 			z->z_elems_free_min = 0;
7349 		} else {
7350 			z->z_elems_free_min -= free_count;
7351 		}
7352 	}
7353 	if (z->z_elems_free_wma < free_count) {
7354 		z->z_elems_free_wma = 0;
7355 	} else {
7356 		z->z_elems_free_wma -= free_count;
7357 	}
7358 
7359 	bitmap_ref = 0;
7360 	if (sequester) {
7361 		if (meta->zm_inline_bitmap) {
7362 			for (int i = 0; i < meta->zm_chunk_len; i++) {
7363 				meta[i].zm_bitmap = 0;
7364 			}
7365 		} else {
7366 			bitmap_ref = meta->zm_bitmap;
7367 			meta->zm_bitmap = 0;
7368 		}
7369 		meta->zm_chunk_len = 0;
7370 	} else {
7371 		if (!meta->zm_inline_bitmap) {
7372 			bitmap_ref = meta->zm_bitmap;
7373 		}
7374 		zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
7375 		bzero(meta, sizeof(*meta) * (z->z_chunk_pages + oob_guard));
7376 	}
7377 
7378 #if CONFIG_ZLEAKS
7379 	if (__improbable(zleak_should_disable_for_zone(z) &&
7380 	    startup_phase >= STARTUP_SUB_THREAD_CALL)) {
7381 		thread_call_enter(&zone_leaks_callout);
7382 	}
7383 #endif /* CONFIG_ZLEAKS */
7384 
7385 	zone_unlock(z);
7386 
7387 	if (bitmap_ref) {
7388 		zone_bits_free(bitmap_ref);
7389 	}
7390 
7391 	/* Free the pages for metadata and account for them */
7392 #if KASAN_CLASSIC
7393 	if (z->z_percpu) {
7394 		for (uint32_t i = 0; i < z->z_chunk_pages; i++) {
7395 			kasan_zmem_remove(page_addr + ptoa(i), PAGE_SIZE,
7396 			    zone_elem_outer_size(z),
7397 			    zone_elem_outer_offs(z),
7398 			    zone_elem_redzone(z));
7399 		}
7400 	} else {
7401 		kasan_zmem_remove(page_addr, size_to_free,
7402 		    zone_elem_outer_size(z),
7403 		    zone_elem_outer_offs(z),
7404 		    zone_elem_redzone(z));
7405 	}
7406 #endif /* KASAN_CLASSIC */
7407 
7408 	if (sequester) {
7409 		kernel_memory_depopulate(page_addr, size_to_free,
7410 		    KMA_KOBJECT, VM_KERN_MEMORY_ZONE);
7411 	} else {
7412 		assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_VM);
7413 		kmem_free(zone_submap(zsflags), page_addr,
7414 		    ptoa(z->z_chunk_pages + oob_guard));
7415 		if (oob_guard) {
7416 			os_atomic_dec(&zone_guard_pages, relaxed);
7417 		}
7418 	}
7419 
7420 	thread_yield_to_preemption();
7421 
7422 	zone_lock(z);
7423 
7424 	if (sequester) {
7425 		zone_meta_queue_push(z, &z->z_pageq_va, meta);
7426 	}
7427 }
7428 
7429 static void
zone_reclaim_elements(zone_t z,uint16_t n,vm_offset_t * elems)7430 zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems)
7431 {
7432 	z_debug_assert(n <= zc_mag_size());
7433 
7434 	for (uint16_t i = 0; i < n; i++) {
7435 		vm_offset_t addr = elems[i];
7436 		elems[i] = 0;
7437 		zfree_drop(z, addr);
7438 	}
7439 
7440 	z->z_elems_free += n;
7441 }
7442 
7443 static void
zcache_reclaim_elements(zone_id_t zid,uint16_t n,vm_offset_t * elems)7444 zcache_reclaim_elements(zone_id_t zid, uint16_t n, vm_offset_t *elems)
7445 {
7446 	z_debug_assert(n <= zc_mag_size());
7447 	zone_cache_ops_t ops = zcache_ops[zid];
7448 
7449 	for (uint16_t i = 0; i < n; i++) {
7450 		vm_offset_t addr = elems[i];
7451 		elems[i] = 0;
7452 		addr = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)addr);
7453 		ops->zc_op_free(zid, (void *)addr);
7454 	}
7455 
7456 	os_atomic_sub(&zone_by_id(zid)->z_elems_avail, n, relaxed);
7457 }
7458 
7459 static void
zone_depot_trim(zone_t z,uint32_t target,struct zone_depot * zd)7460 zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd)
7461 {
7462 	zpercpu_foreach(zc, z->z_pcpu_cache) {
7463 		zone_depot_lock(zc);
7464 
7465 		if (zc->zc_depot.zd_full > (target + 1) / 2) {
7466 			uint32_t n = zc->zc_depot.zd_full - (target + 1) / 2;
7467 			zone_depot_move_full(zd, &zc->zc_depot, n, NULL);
7468 		}
7469 
7470 		if (zc->zc_depot.zd_empty > target / 2) {
7471 			uint32_t n = zc->zc_depot.zd_empty - target / 2;
7472 			zone_depot_move_empty(zd, &zc->zc_depot, n, NULL);
7473 		}
7474 
7475 		zone_depot_unlock(zc);
7476 	}
7477 }
7478 
7479 __enum_decl(zone_reclaim_mode_t, uint32_t, {
7480 	ZONE_RECLAIM_TRIM,
7481 	ZONE_RECLAIM_DRAIN,
7482 	ZONE_RECLAIM_DESTROY,
7483 });
7484 
7485 static void
zone_reclaim_pcpu(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)7486 zone_reclaim_pcpu(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
7487 {
7488 	uint32_t depot_max = 0;
7489 	bool cleanup = mode != ZONE_RECLAIM_TRIM;
7490 
7491 	if (z->z_depot_cleanup) {
7492 		z->z_depot_cleanup = false;
7493 		depot_max = z->z_depot_size;
7494 		cleanup = true;
7495 	}
7496 
7497 	if (cleanup) {
7498 		zone_depot_trim(z, depot_max, zd);
7499 	}
7500 
7501 	if (mode == ZONE_RECLAIM_DESTROY) {
7502 		zpercpu_foreach(zc, z->z_pcpu_cache) {
7503 			zone_reclaim_elements(z, zc->zc_alloc_cur,
7504 			    zc->zc_alloc_elems);
7505 			zone_reclaim_elements(z, zc->zc_free_cur,
7506 			    zc->zc_free_elems);
7507 			zc->zc_alloc_cur = zc->zc_free_cur = 0;
7508 		}
7509 
7510 		z->z_recirc_empty_min = 0;
7511 		z->z_recirc_empty_wma = 0;
7512 		z->z_recirc_full_min = 0;
7513 		z->z_recirc_full_wma = 0;
7514 		z->z_recirc_cont_cur = 0;
7515 		z->z_recirc_cont_wma = 0;
7516 	}
7517 }
7518 
7519 static void
zone_reclaim_recirc_drain(zone_t z,struct zone_depot * zd)7520 zone_reclaim_recirc_drain(zone_t z, struct zone_depot *zd)
7521 {
7522 	assert(zd->zd_empty == 0);
7523 	assert(zd->zd_full == 0);
7524 
7525 	zone_recirc_lock_nopreempt(z);
7526 
7527 	*zd = z->z_recirc;
7528 	if (zd->zd_full == 0) {
7529 		zd->zd_tail = &zd->zd_head;
7530 	}
7531 	zone_depot_init(&z->z_recirc);
7532 	z->z_recirc_empty_min = 0;
7533 	z->z_recirc_empty_wma = 0;
7534 	z->z_recirc_full_min = 0;
7535 	z->z_recirc_full_wma = 0;
7536 
7537 	zone_recirc_unlock_nopreempt(z);
7538 }
7539 
7540 static void
zone_reclaim_recirc_trim(zone_t z,struct zone_depot * zd)7541 zone_reclaim_recirc_trim(zone_t z, struct zone_depot *zd)
7542 {
7543 	for (;;) {
7544 		uint32_t budget = zc_free_batch_size();
7545 		uint32_t count;
7546 		bool done = true;
7547 
7548 		zone_recirc_lock_nopreempt(z);
7549 		count = MIN(z->z_recirc_empty_wma / Z_WMA_UNIT,
7550 		    z->z_recirc_empty_min);
7551 		assert(count <= z->z_recirc.zd_empty);
7552 
7553 		if (count > budget) {
7554 			count = budget;
7555 			done  = false;
7556 		}
7557 		if (count) {
7558 			budget -= count;
7559 			zone_depot_move_empty(zd, &z->z_recirc, count, NULL);
7560 			z->z_recirc_empty_min -= count;
7561 			z->z_recirc_empty_wma -= count * Z_WMA_UNIT;
7562 		}
7563 
7564 		count = MIN(z->z_recirc_full_wma / Z_WMA_UNIT,
7565 		    z->z_recirc_full_min);
7566 		assert(count <= z->z_recirc.zd_full);
7567 
7568 		if (count > budget) {
7569 			count = budget;
7570 			done  = false;
7571 		}
7572 		if (count) {
7573 			zone_depot_move_full(zd, &z->z_recirc, count, NULL);
7574 			z->z_recirc_full_min -= count;
7575 			z->z_recirc_full_wma -= count * Z_WMA_UNIT;
7576 		}
7577 
7578 		zone_recirc_unlock_nopreempt(z);
7579 
7580 		if (done) {
7581 			return;
7582 		}
7583 
7584 		/*
7585 		 * If the number of magazines to reclaim is too large,
7586 		 * we might be keeping preemption disabled for too long.
7587 		 *
7588 		 * Drop and retake the lock to allow for preemption to occur.
7589 		 */
7590 		zone_unlock(z);
7591 		zone_lock(z);
7592 	}
7593 }
7594 
7595 /*!
7596  * @function zone_reclaim
7597  *
7598  * @brief
7599  * Drains or trim the zone.
7600  *
7601  * @discussion
7602  * Draining the zone will free it from all its elements.
7603  *
7604  * Trimming the zone tries to respect the working set size, and avoids draining
7605  * the depot when it's not necessary.
7606  *
7607  * @param z             The zone to reclaim from
7608  * @param mode          The purpose of this reclaim.
7609  */
7610 static void
zone_reclaim(zone_t z,zone_reclaim_mode_t mode)7611 zone_reclaim(zone_t z, zone_reclaim_mode_t mode)
7612 {
7613 	struct zone_depot zd;
7614 
7615 	zone_depot_init(&zd);
7616 
7617 	zone_lock(z);
7618 
7619 	if (mode == ZONE_RECLAIM_DESTROY) {
7620 		if (!z->z_destructible || z->z_elems_rsv) {
7621 			panic("zdestroy: Zone %s%s isn't destructible",
7622 			    zone_heap_name(z), z->z_name);
7623 		}
7624 
7625 		if (!z->z_self || z->z_expander ||
7626 		    z->z_async_refilling || z->z_expanding_wait) {
7627 			panic("zdestroy: Zone %s%s in an invalid state for destruction",
7628 			    zone_heap_name(z), z->z_name);
7629 		}
7630 
7631 #if !KASAN_CLASSIC
7632 		/*
7633 		 * Unset the valid bit. We'll hit an assert failure on further
7634 		 * operations on this zone, until zinit() is called again.
7635 		 *
7636 		 * Leave the zone valid for KASan as we will see zfree's on
7637 		 * quarantined free elements even after the zone is destroyed.
7638 		 */
7639 		z->z_self = NULL;
7640 #endif
7641 		z->z_destroyed = true;
7642 	} else if (z->z_destroyed) {
7643 		return zone_unlock(z);
7644 	} else if (zone_count_free(z) <= z->z_elems_rsv) {
7645 		/* If the zone is under its reserve level, leave it alone. */
7646 		return zone_unlock(z);
7647 	}
7648 
7649 	if (z->z_pcpu_cache) {
7650 		zone_magazine_t mag;
7651 		uint32_t freed = 0;
7652 
7653 		/*
7654 		 * This is all done with the zone lock held on purpose.
7655 		 * The work here is O(ncpu), which should still be short.
7656 		 *
7657 		 * We need to keep the lock held until we have reclaimed
7658 		 * at least a few magazines, otherwise if the zone has no
7659 		 * free elements outside of the depot, a thread performing
7660 		 * a concurrent allocatiuon could try to grow the zone
7661 		 * while we're trying to drain it.
7662 		 */
7663 		if (mode == ZONE_RECLAIM_TRIM) {
7664 			zone_reclaim_recirc_trim(z, &zd);
7665 		} else {
7666 			zone_reclaim_recirc_drain(z, &zd);
7667 		}
7668 		zone_reclaim_pcpu(z, mode, &zd);
7669 
7670 		if (z->z_chunk_elems) {
7671 			zone_cache_t cache = zpercpu_get_cpu(z->z_pcpu_cache, 0);
7672 			smr_t smr = zone_cache_smr(cache);
7673 
7674 			while (zd.zd_full) {
7675 				mag = zone_depot_pop_head_full(&zd, NULL);
7676 				if (smr) {
7677 					smr_wait(smr, mag->zm_seq);
7678 					zalloc_cached_reuse_smr(z, cache, mag);
7679 					freed += zc_mag_size();
7680 				}
7681 				zone_reclaim_elements(z, zc_mag_size(),
7682 				    mag->zm_elems);
7683 				zone_depot_insert_head_empty(&zd, mag);
7684 
7685 				freed += zc_mag_size();
7686 				if (freed >= zc_free_batch_size()) {
7687 					zone_unlock(z);
7688 					zone_magazine_free_list(&zd);
7689 					thread_yield_to_preemption();
7690 					zone_lock(z);
7691 					freed = 0;
7692 				}
7693 			}
7694 		} else {
7695 			zone_id_t zid = zone_index(z);
7696 
7697 			zone_unlock(z);
7698 
7699 			assert(zid <= ZONE_ID__FIRST_DYNAMIC && zcache_ops[zid]);
7700 
7701 			while (zd.zd_full) {
7702 				mag = zone_depot_pop_head_full(&zd, NULL);
7703 				zcache_reclaim_elements(zid, zc_mag_size(),
7704 				    mag->zm_elems);
7705 				zone_magazine_free(mag);
7706 			}
7707 
7708 			goto cleanup;
7709 		}
7710 	}
7711 
7712 	while (!zone_pva_is_null(z->z_pageq_empty)) {
7713 		struct zone_page_metadata *meta;
7714 		uint32_t count, limit = z->z_elems_rsv * 5 / 4;
7715 
7716 		if (mode == ZONE_RECLAIM_TRIM && z->z_pcpu_cache == NULL) {
7717 			limit = MAX(limit, z->z_elems_free -
7718 			    MIN(z->z_elems_free_min, z->z_elems_free_wma));
7719 		}
7720 
7721 		meta  = zone_pva_to_meta(z->z_pageq_empty);
7722 		count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_outer_size(z);
7723 
7724 		if (zone_count_free(z) - count < limit) {
7725 			break;
7726 		}
7727 
7728 		zone_reclaim_chunk(z, meta, count);
7729 	}
7730 
7731 	zone_unlock(z);
7732 
7733 cleanup:
7734 	zone_magazine_free_list(&zd);
7735 }
7736 
7737 void
zone_drain(zone_t zone)7738 zone_drain(zone_t zone)
7739 {
7740 	current_thread()->options |= TH_OPT_ZONE_PRIV;
7741 	lck_mtx_lock(&zone_gc_lock);
7742 	zone_reclaim(zone, ZONE_RECLAIM_DRAIN);
7743 	lck_mtx_unlock(&zone_gc_lock);
7744 	current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7745 }
7746 
7747 void
zcache_drain(zone_id_t zid)7748 zcache_drain(zone_id_t zid)
7749 {
7750 	zone_drain(zone_by_id(zid));
7751 }
7752 
7753 static void
zone_reclaim_all(zone_reclaim_mode_t mode)7754 zone_reclaim_all(zone_reclaim_mode_t mode)
7755 {
7756 	/*
7757 	 * Start with zcaches, so that they flow into the regular zones.
7758 	 *
7759 	 * Then the zones with VA sequester since depopulating
7760 	 * pages will not need to allocate vm map entries for holes,
7761 	 * which will give memory back to the system faster.
7762 	 */
7763 	for (zone_id_t zid = ZONE_ID__LAST_RO + 1; zid < ZONE_ID__FIRST_DYNAMIC; zid++) {
7764 		zone_t z = zone_by_id(zid);
7765 
7766 		if (z->z_self && z->z_chunk_elems == 0) {
7767 			zone_reclaim(z, mode);
7768 		}
7769 	}
7770 	zone_index_foreach(zid) {
7771 		zone_t z = zone_by_id(zid);
7772 
7773 		if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7774 			continue;
7775 		}
7776 		if (zone_submap_is_sequestered(zone_security_array[zid]) &&
7777 		    z->collectable) {
7778 			zone_reclaim(z, mode);
7779 		}
7780 	}
7781 
7782 	zone_index_foreach(zid) {
7783 		zone_t z = zone_by_id(zid);
7784 
7785 		if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7786 			continue;
7787 		}
7788 		if (!zone_submap_is_sequestered(zone_security_array[zid]) &&
7789 		    z->collectable) {
7790 			zone_reclaim(z, mode);
7791 		}
7792 	}
7793 
7794 	zone_reclaim(zc_magazine_zone, mode);
7795 }
7796 
7797 void
zone_userspace_reboot_checks(void)7798 zone_userspace_reboot_checks(void)
7799 {
7800 	vm_size_t label_zone_size = zone_size_allocated(ipc_service_port_label_zone);
7801 	if (label_zone_size != 0) {
7802 		panic("Zone %s should be empty upon userspace reboot. Actual size: %lu.",
7803 		    ipc_service_port_label_zone->z_name, (unsigned long)label_zone_size);
7804 	}
7805 }
7806 
7807 void
zone_gc(zone_gc_level_t level)7808 zone_gc(zone_gc_level_t level)
7809 {
7810 	zone_reclaim_mode_t mode;
7811 	zone_t largest_zone = NULL;
7812 
7813 	switch (level) {
7814 	case ZONE_GC_TRIM:
7815 		mode = ZONE_RECLAIM_TRIM;
7816 		break;
7817 	case ZONE_GC_DRAIN:
7818 		mode = ZONE_RECLAIM_DRAIN;
7819 		break;
7820 	case ZONE_GC_JETSAM:
7821 		largest_zone = kill_process_in_largest_zone();
7822 		mode = ZONE_RECLAIM_TRIM;
7823 		break;
7824 	}
7825 
7826 	current_thread()->options |= TH_OPT_ZONE_PRIV;
7827 	lck_mtx_lock(&zone_gc_lock);
7828 
7829 	zone_reclaim_all(mode);
7830 
7831 	if (level == ZONE_GC_JETSAM && zone_map_nearing_exhaustion()) {
7832 		/*
7833 		 * If we possibly killed a process, but we're still critical,
7834 		 * we need to drain harder.
7835 		 */
7836 		zone_reclaim(largest_zone, ZONE_RECLAIM_DRAIN);
7837 		zone_reclaim_all(ZONE_RECLAIM_DRAIN);
7838 	}
7839 
7840 	lck_mtx_unlock(&zone_gc_lock);
7841 	current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7842 }
7843 
7844 void
zone_gc_trim(void)7845 zone_gc_trim(void)
7846 {
7847 	zone_gc(ZONE_GC_TRIM);
7848 }
7849 
7850 void
zone_gc_drain(void)7851 zone_gc_drain(void)
7852 {
7853 	zone_gc(ZONE_GC_DRAIN);
7854 }
7855 
7856 static bool
zone_trim_needed(zone_t z)7857 zone_trim_needed(zone_t z)
7858 {
7859 	if (z->z_depot_cleanup) {
7860 		return true;
7861 	}
7862 
7863 	if (z->z_async_refilling) {
7864 		/* Don't fight with refill */
7865 		return false;
7866 	}
7867 
7868 	if (z->z_pcpu_cache) {
7869 		uint32_t e_n, f_n;
7870 
7871 		e_n = MIN(z->z_recirc_empty_wma, z->z_recirc_empty_min * Z_WMA_UNIT);
7872 		f_n = MIN(z->z_recirc_full_wma, z->z_recirc_full_min * Z_WMA_UNIT);
7873 
7874 		if (e_n > zc_autotrim_buckets() * Z_WMA_UNIT) {
7875 			return true;
7876 		}
7877 
7878 		if (f_n * zc_mag_size() > z->z_elems_rsv * Z_WMA_UNIT &&
7879 		    f_n * zc_mag_size() * zone_elem_inner_size(z) >
7880 		    zc_autotrim_size() * Z_WMA_UNIT) {
7881 			return true;
7882 		}
7883 
7884 		return false;
7885 	}
7886 
7887 	if (!zone_pva_is_null(z->z_pageq_empty)) {
7888 		uint32_t n;
7889 
7890 		n = MIN(z->z_elems_free_wma, z->z_elems_free_min);
7891 
7892 		return n >= z->z_elems_rsv + z->z_chunk_elems;
7893 	}
7894 
7895 	return false;
7896 }
7897 
7898 static void
zone_trim_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)7899 zone_trim_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
7900 {
7901 	current_thread()->options |= TH_OPT_ZONE_PRIV;
7902 
7903 	zone_foreach(z) {
7904 		if (!z->collectable || z == zc_magazine_zone) {
7905 			continue;
7906 		}
7907 
7908 		if (zone_trim_needed(z)) {
7909 			lck_mtx_lock(&zone_gc_lock);
7910 			zone_reclaim(z, ZONE_RECLAIM_TRIM);
7911 			lck_mtx_unlock(&zone_gc_lock);
7912 		}
7913 	}
7914 
7915 	if (zone_trim_needed(zc_magazine_zone)) {
7916 		lck_mtx_lock(&zone_gc_lock);
7917 		zone_reclaim(zc_magazine_zone, ZONE_RECLAIM_TRIM);
7918 		lck_mtx_unlock(&zone_gc_lock);
7919 	}
7920 
7921 	current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7922 }
7923 
7924 void
compute_zone_working_set_size(__unused void * param)7925 compute_zone_working_set_size(__unused void *param)
7926 {
7927 	uint32_t zc_auto = zc_enable_level();
7928 	bool needs_trim = false;
7929 
7930 	/*
7931 	 * Keep zone caching disabled until the first proc is made.
7932 	 */
7933 	if (__improbable(zone_caching_disabled < 0)) {
7934 		return;
7935 	}
7936 
7937 	zone_caching_disabled = vm_pool_low();
7938 
7939 	if (os_mul_overflow(zc_auto, Z_WMA_UNIT, &zc_auto)) {
7940 		zc_auto = 0;
7941 	}
7942 
7943 	zone_foreach(z) {
7944 		uint32_t old, wma, cur;
7945 		bool needs_caching = false;
7946 
7947 		if (z->z_self != z) {
7948 			continue;
7949 		}
7950 
7951 		zone_lock(z);
7952 
7953 		zone_recirc_lock_nopreempt(z);
7954 
7955 		if (z->z_pcpu_cache) {
7956 			wma = Z_WMA_MIX(z->z_recirc_empty_wma, z->z_recirc_empty_min);
7957 			z->z_recirc_empty_min = z->z_recirc.zd_empty;
7958 			z->z_recirc_empty_wma = wma;
7959 		} else {
7960 			wma = Z_WMA_MIX(z->z_elems_free_wma, z->z_elems_free_min);
7961 			z->z_elems_free_min = z->z_elems_free;
7962 			z->z_elems_free_wma = wma;
7963 		}
7964 
7965 		wma = Z_WMA_MIX(z->z_recirc_full_wma, z->z_recirc_full_min);
7966 		z->z_recirc_full_min = z->z_recirc.zd_full;
7967 		z->z_recirc_full_wma = wma;
7968 
7969 		/* fixed point decimal of contentions per second */
7970 		old = z->z_recirc_cont_wma;
7971 		cur = z->z_recirc_cont_cur * Z_WMA_UNIT /
7972 		    (zpercpu_count() * ZONE_WSS_UPDATE_PERIOD);
7973 		cur = (3 * old + cur) / 4;
7974 		zone_recirc_unlock_nopreempt(z);
7975 
7976 		if (z->z_pcpu_cache) {
7977 			uint16_t size = z->z_depot_size;
7978 
7979 			if (zone_exhausted(z)) {
7980 				z->z_depot_size = 0;
7981 				z->z_depot_cleanup = true;
7982 			} else if (size < z->z_depot_limit && cur > zc_grow_level()) {
7983 				/*
7984 				 * lose history on purpose now
7985 				 * that we just grew, to give
7986 				 * the sytem time to adjust.
7987 				 */
7988 				cur  = (zc_grow_level() + zc_shrink_level()) / 2;
7989 				size = size ? (3 * size + 2) / 2 : 2;
7990 				z->z_depot_size = MIN(z->z_depot_limit, size);
7991 			} else if (size > 0 && cur <= zc_shrink_level()) {
7992 				/*
7993 				 * lose history on purpose now
7994 				 * that we just shrunk, to give
7995 				 * the sytem time to adjust.
7996 				 */
7997 				cur = (zc_grow_level() + zc_shrink_level()) / 2;
7998 				z->z_depot_size = size - 1;
7999 				z->z_depot_cleanup = true;
8000 			}
8001 		} else if (!z->z_nocaching && !zone_exhaustible(z) && zc_auto &&
8002 		    old >= zc_auto && cur >= zc_auto) {
8003 			needs_caching = true;
8004 		}
8005 
8006 		z->z_recirc_cont_wma = cur;
8007 		z->z_recirc_cont_cur = 0;
8008 
8009 		if (!needs_trim && zone_trim_needed(z)) {
8010 			needs_trim = true;
8011 		}
8012 
8013 		zone_unlock(z);
8014 
8015 		if (needs_caching) {
8016 			zone_enable_caching(z);
8017 		}
8018 	}
8019 
8020 	if (needs_trim) {
8021 		thread_call_enter(&zone_trim_callout);
8022 	}
8023 }
8024 
8025 #endif /* !ZALLOC_TEST */
8026 #pragma mark vm integration, MIG routines
8027 #if !ZALLOC_TEST
8028 
8029 extern unsigned int stack_total;
8030 #if defined (__x86_64__)
8031 extern unsigned int inuse_ptepages_count;
8032 #endif
8033 
8034 static const char *
panic_print_get_typename(kalloc_type_views_t cur,kalloc_type_views_t * next,bool is_kt_var)8035 panic_print_get_typename(kalloc_type_views_t cur, kalloc_type_views_t *next,
8036     bool is_kt_var)
8037 {
8038 	if (is_kt_var) {
8039 		next->ktv_var = (kalloc_type_var_view_t) cur.ktv_var->kt_next;
8040 		return cur.ktv_var->kt_name;
8041 	} else {
8042 		next->ktv_fixed = (kalloc_type_view_t) cur.ktv_fixed->kt_zv.zv_next;
8043 		return cur.ktv_fixed->kt_zv.zv_name;
8044 	}
8045 }
8046 
8047 static void
panic_print_types_in_zone(zone_t z,const char * debug_str)8048 panic_print_types_in_zone(zone_t z, const char* debug_str)
8049 {
8050 	kalloc_type_views_t kt_cur = {};
8051 	const char *prev_type = "";
8052 	size_t skip_over_site = sizeof("site.") - 1;
8053 	zone_security_flags_t zsflags = zone_security_config(z);
8054 	bool is_kt_var = false;
8055 
8056 	if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8057 		uint32_t heap_id = KT_VAR_PTR_HEAP0 + ((zone_index(z) -
8058 		    kalloc_type_heap_array[KT_VAR_PTR_HEAP0].kh_zstart) / KHEAP_NUM_ZONES);
8059 		kt_cur.ktv_var = kalloc_type_heap_array[heap_id].kt_views;
8060 		is_kt_var = true;
8061 	} else {
8062 		kt_cur.ktv_fixed = (kalloc_type_view_t) z->z_views;
8063 	}
8064 
8065 	paniclog_append_noflush("kalloc %s in zone, %s (%s):\n",
8066 	    is_kt_var? "type arrays" : "types", debug_str, z->z_name);
8067 
8068 	while (kt_cur.ktv_fixed) {
8069 		kalloc_type_views_t kt_next = {};
8070 		const char *typename = panic_print_get_typename(kt_cur, &kt_next,
8071 		    is_kt_var) + skip_over_site;
8072 		if (strcmp(typename, prev_type) != 0) {
8073 			paniclog_append_noflush("\t%-50s\n", typename);
8074 			prev_type = typename;
8075 		}
8076 		kt_cur = kt_next;
8077 	}
8078 	paniclog_append_noflush("\n");
8079 }
8080 
8081 static void
panic_display_kalloc_types(void)8082 panic_display_kalloc_types(void)
8083 {
8084 	if (kalloc_type_src_zone) {
8085 		panic_print_types_in_zone(kalloc_type_src_zone, "addr belongs to");
8086 	}
8087 	if (kalloc_type_dst_zone) {
8088 		panic_print_types_in_zone(kalloc_type_dst_zone,
8089 		    "addr is being freed to");
8090 	}
8091 }
8092 
8093 static void
zone_find_n_largest(const uint32_t n,zone_t * largest_zones,uint64_t * zone_size)8094 zone_find_n_largest(const uint32_t n, zone_t *largest_zones,
8095     uint64_t *zone_size)
8096 {
8097 	zone_index_foreach(zid) {
8098 		zone_t z = &zone_array[zid];
8099 		vm_offset_t size = zone_size_wired(z);
8100 
8101 		if (zid == ZONE_ID_VM_PAGES) {
8102 			continue;
8103 		}
8104 		for (uint32_t i = 0; i < n; i++) {
8105 			if (size > zone_size[i]) {
8106 				largest_zones[i] = z;
8107 				zone_size[i] = size;
8108 				break;
8109 			}
8110 		}
8111 	}
8112 }
8113 
8114 #define NUM_LARGEST_ZONES 5
8115 static void
panic_display_largest_zones(void)8116 panic_display_largest_zones(void)
8117 {
8118 	zone_t largest_zones[NUM_LARGEST_ZONES]  = { NULL };
8119 	uint64_t largest_size[NUM_LARGEST_ZONES] = { 0 };
8120 
8121 	zone_find_n_largest(NUM_LARGEST_ZONES, (zone_t *) &largest_zones,
8122 	    (uint64_t *) &largest_size);
8123 
8124 	paniclog_append_noflush("Largest zones:\n%-28s %10s %10s\n",
8125 	    "Zone Name", "Cur Size", "Free Size");
8126 	for (uint32_t i = 0; i < NUM_LARGEST_ZONES; i++) {
8127 		zone_t z = largest_zones[i];
8128 		paniclog_append_noflush("%-8s%-20s %9u%c %9u%c\n",
8129 		    zone_heap_name(z), z->z_name,
8130 		    mach_vm_size_pretty(largest_size[i]),
8131 		    mach_vm_size_unit(largest_size[i]),
8132 		    mach_vm_size_pretty(zone_size_free(z)),
8133 		    mach_vm_size_unit(zone_size_free(z)));
8134 	}
8135 }
8136 
8137 static void
panic_display_zprint(void)8138 panic_display_zprint(void)
8139 {
8140 	panic_display_largest_zones();
8141 	paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
8142 	    (uintptr_t)(kernel_stack_size * stack_total));
8143 #if defined (__x86_64__)
8144 	paniclog_append_noflush("%-20s %10lu\n", "PageTables",
8145 	    (uintptr_t)ptoa(inuse_ptepages_count));
8146 #endif
8147 	paniclog_append_noflush("%-20s %10llu\n", "Kalloc.Large",
8148 	    counter_load(&kalloc_large_total));
8149 
8150 	if (panic_kext_memory_info) {
8151 		mach_memory_info_t *mem_info = panic_kext_memory_info;
8152 
8153 		paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
8154 		for (uint32_t i = 0; i < panic_kext_memory_size / sizeof(mem_info[0]); i++) {
8155 			if ((mem_info[i].flags & VM_KERN_SITE_TYPE) != VM_KERN_SITE_KMOD) {
8156 				continue;
8157 			}
8158 			if (mem_info[i].size > (1024 * 1024)) {
8159 				paniclog_append_noflush("%-5lld %10lld\n",
8160 				    mem_info[i].site, mem_info[i].size);
8161 			}
8162 		}
8163 	}
8164 }
8165 
8166 static void
panic_display_zone_info(void)8167 panic_display_zone_info(void)
8168 {
8169 	paniclog_append_noflush("Zone info:\n");
8170 	paniclog_append_noflush("  Zone map: %p - %p\n",
8171 	    (void *)zone_info.zi_map_range.min_address,
8172 	    (void *)zone_info.zi_map_range.max_address);
8173 #if CONFIG_PROB_GZALLOC
8174 	if (pgz_submap) {
8175 		paniclog_append_noflush("  . PGZ   : %p - %p\n",
8176 		    (void *)pgz_submap->min_offset,
8177 		    (void *)pgz_submap->max_offset);
8178 	}
8179 #endif /* CONFIG_PROB_GZALLOC */
8180 	for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8181 		vm_map_t map = zone_submaps[i];
8182 
8183 		if (map == VM_MAP_NULL) {
8184 			continue;
8185 		}
8186 		paniclog_append_noflush("  . %-6s: %p - %p\n",
8187 		    zone_submaps_names[i],
8188 		    (void *)map->min_offset,
8189 		    (void *)map->max_offset);
8190 	}
8191 	paniclog_append_noflush("  Metadata: %p - %p\n"
8192 	    "  Bitmaps : %p - %p\n"
8193 	    "  Extra   : %p - %p\n"
8194 	    "\n",
8195 	    (void *)zone_info.zi_meta_range.min_address,
8196 	    (void *)zone_info.zi_meta_range.max_address,
8197 	    (void *)zone_info.zi_bits_range.min_address,
8198 	    (void *)zone_info.zi_bits_range.max_address,
8199 	    (void *)zone_info.zi_xtra_range.min_address,
8200 	    (void *)zone_info.zi_xtra_range.max_address);
8201 }
8202 
8203 static void
panic_display_zone_fault(vm_offset_t addr)8204 panic_display_zone_fault(vm_offset_t addr)
8205 {
8206 	struct zone_page_metadata meta = { };
8207 	vm_map_t map = VM_MAP_NULL;
8208 	vm_offset_t oob_offs = 0, size = 0;
8209 	int map_idx = -1;
8210 	zone_t z = NULL;
8211 	const char *kind = "whild deref";
8212 	bool oob = false;
8213 
8214 	/*
8215 	 * First: look if we bumped into guard pages between submaps
8216 	 */
8217 	for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8218 		map = zone_submaps[i];
8219 		if (map == VM_MAP_NULL) {
8220 			continue;
8221 		}
8222 
8223 		if (addr >= map->min_offset && addr < map->max_offset) {
8224 			map_idx = i;
8225 			break;
8226 		}
8227 	}
8228 
8229 	if (map_idx == -1) {
8230 		/* this really shouldn't happen, submaps are back to back */
8231 		return;
8232 	}
8233 
8234 	paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
8235 
8236 	/*
8237 	 * Second: look if there's just no metadata at all
8238 	 */
8239 	if (ml_nofault_copy((vm_offset_t)zone_meta_from_addr(addr),
8240 	    (vm_offset_t)&meta, sizeof(meta)) != sizeof(meta) ||
8241 	    meta.zm_index == 0 || meta.zm_index >= MAX_ZONES ||
8242 	    zone_array[meta.zm_index].z_self == NULL) {
8243 		paniclog_append_noflush("  Zone    : <unknown>\n");
8244 		kind = "wild deref, missing or invalid metadata";
8245 	} else {
8246 		z = &zone_array[meta.zm_index];
8247 		paniclog_append_noflush("  Zone    : %s%s\n",
8248 		    zone_heap_name(z), zone_name(z));
8249 		if (meta.zm_chunk_len == ZM_PGZ_GUARD) {
8250 			kind = "out-of-bounds (high confidence)";
8251 			oob = true;
8252 			size = zone_element_size((void *)addr,
8253 			    &z, false, &oob_offs);
8254 		} else {
8255 			kind = "use-after-free (medium confidence)";
8256 		}
8257 	}
8258 
8259 	paniclog_append_noflush("  Address : %p\n", (void *)addr);
8260 	if (oob) {
8261 		paniclog_append_noflush("  Element : [%p, %p) of size %d\n",
8262 		    (void *)(trunc_page(addr) - (size - oob_offs)),
8263 		    (void *)trunc_page(addr), (uint32_t)(size - oob_offs));
8264 	}
8265 	paniclog_append_noflush("  Submap  : %s [%p; %p)\n",
8266 	    zone_submaps_names[map_idx],
8267 	    (void *)map->min_offset, (void *)map->max_offset);
8268 	paniclog_append_noflush("  Kind    : %s\n", kind);
8269 	if (oob) {
8270 		paniclog_append_noflush("  Access  : %d byte(s) past\n",
8271 		    (uint32_t)(addr & PAGE_MASK) + 1);
8272 	}
8273 	paniclog_append_noflush("  Metadata: zid:%d inl:%d cl:0x%x "
8274 	    "0x%04x 0x%08x 0x%08x 0x%08x\n",
8275 	    meta.zm_index, meta.zm_inline_bitmap, meta.zm_chunk_len,
8276 	    meta.zm_alloc_size, meta.zm_bitmap,
8277 	    meta.zm_page_next.packed_address,
8278 	    meta.zm_page_prev.packed_address);
8279 	paniclog_append_noflush("\n");
8280 }
8281 
8282 void
panic_display_zalloc(void)8283 panic_display_zalloc(void)
8284 {
8285 	bool keepsyms = false;
8286 
8287 	PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
8288 
8289 	panic_display_zone_info();
8290 
8291 	if (panic_fault_address) {
8292 #if CONFIG_PROB_GZALLOC
8293 		if (pgz_owned(panic_fault_address)) {
8294 			panic_display_pgz_uaf_info(keepsyms, panic_fault_address);
8295 		} else
8296 #endif /* CONFIG_PROB_GZALLOC */
8297 		if (zone_maps_owned(panic_fault_address, 1)) {
8298 			panic_display_zone_fault(panic_fault_address);
8299 		}
8300 	}
8301 
8302 	if (panic_include_zprint) {
8303 		panic_display_zprint();
8304 	} else if (zone_map_nearing_threshold(ZONE_MAP_EXHAUSTION_PRINT_PANIC)) {
8305 		panic_display_largest_zones();
8306 	}
8307 #if CONFIG_ZLEAKS
8308 	if (zleak_active) {
8309 		panic_display_zleaks(keepsyms);
8310 	}
8311 #endif
8312 	if (panic_include_kalloc_types) {
8313 		panic_display_kalloc_types();
8314 	}
8315 }
8316 
8317 /*
8318  * Creates a vm_map_copy_t to return to the caller of mach_* MIG calls
8319  * requesting zone information.
8320  * Frees unused pages towards the end of the region, and zero'es out unused
8321  * space on the last page.
8322  */
8323 static vm_map_copy_t
create_vm_map_copy(vm_offset_t start_addr,vm_size_t total_size,vm_size_t used_size)8324 create_vm_map_copy(
8325 	vm_offset_t             start_addr,
8326 	vm_size_t               total_size,
8327 	vm_size_t               used_size)
8328 {
8329 	kern_return_t   kr;
8330 	vm_offset_t             end_addr;
8331 	vm_size_t               free_size;
8332 	vm_map_copy_t   copy;
8333 
8334 	if (used_size != total_size) {
8335 		end_addr = start_addr + used_size;
8336 		free_size = total_size - (round_page(end_addr) - start_addr);
8337 
8338 		if (free_size >= PAGE_SIZE) {
8339 			kmem_free(ipc_kernel_map,
8340 			    round_page(end_addr), free_size);
8341 		}
8342 		bzero((char *) end_addr, round_page(end_addr) - end_addr);
8343 	}
8344 
8345 	kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr,
8346 	    (vm_map_size_t)used_size, TRUE, &copy);
8347 	assert(kr == KERN_SUCCESS);
8348 
8349 	return copy;
8350 }
8351 
8352 static boolean_t
get_zone_info(zone_t z,mach_zone_name_t * zn,mach_zone_info_t * zi)8353 get_zone_info(
8354 	zone_t                   z,
8355 	mach_zone_name_t        *zn,
8356 	mach_zone_info_t        *zi)
8357 {
8358 	struct zone zcopy;
8359 	vm_size_t cached = 0;
8360 
8361 	assert(z != ZONE_NULL);
8362 	zone_lock(z);
8363 	if (!z->z_self) {
8364 		zone_unlock(z);
8365 		return FALSE;
8366 	}
8367 	zcopy = *z;
8368 	if (z->z_pcpu_cache) {
8369 		zpercpu_foreach(zc, z->z_pcpu_cache) {
8370 			cached += zc->zc_alloc_cur + zc->zc_free_cur;
8371 			cached += zc->zc_depot.zd_full * zc_mag_size();
8372 		}
8373 	}
8374 	zone_unlock(z);
8375 
8376 	if (zn != NULL) {
8377 		/*
8378 		 * Append kalloc heap name to zone name (if zone is used by kalloc)
8379 		 */
8380 		char temp_zone_name[MAX_ZONE_NAME] = "";
8381 		snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8382 		    zone_heap_name(z), z->z_name);
8383 
8384 		/* assuming here the name data is static */
8385 		(void) __nosan_strlcpy(zn->mzn_name, temp_zone_name,
8386 		    strlen(temp_zone_name) + 1);
8387 	}
8388 
8389 	if (zi != NULL) {
8390 		*zi = (mach_zone_info_t) {
8391 			.mzi_count = zone_count_allocated(&zcopy) - cached,
8392 			.mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)),
8393 			// max_size for zprint is now high-watermark of pages used
8394 			.mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)),
8395 			.mzi_elem_size = zone_scale_for_percpu(&zcopy, zcopy.z_elem_size),
8396 			.mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages),
8397 			.mzi_exhaustible = (uint64_t)zone_exhaustible(&zcopy),
8398 		};
8399 		if (zcopy.z_chunk_pages == 0) {
8400 			/* this is a zcache */
8401 			zi->mzi_cur_size = zcopy.z_elems_avail * zcopy.z_elem_size;
8402 		}
8403 		zpercpu_foreach(zs, zcopy.z_stats) {
8404 			zi->mzi_sum_size += zs->zs_mem_allocated;
8405 		}
8406 		if (zcopy.collectable) {
8407 			SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable,
8408 			    ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty)));
8409 			SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE);
8410 		}
8411 	}
8412 
8413 	return TRUE;
8414 }
8415 
8416 /* mach_memory_info entitlement */
8417 #define MEMORYINFO_ENTITLEMENT "com.apple.private.memoryinfo"
8418 
8419 /* macro needed to rate-limit mach_memory_info */
8420 #define NSEC_DAY (NSEC_PER_SEC * 60 * 60 * 24)
8421 
8422 /* declarations necessary to call kauth_cred_issuser() */
8423 struct ucred;
8424 extern int kauth_cred_issuser(struct ucred *);
8425 extern struct ucred *kauth_cred_get(void);
8426 
8427 static kern_return_t
8428 mach_memory_info_internal(
8429 	host_t                  host,
8430 	mach_zone_name_array_t  *namesp,
8431 	mach_msg_type_number_t  *namesCntp,
8432 	mach_zone_info_array_t  *infop,
8433 	mach_msg_type_number_t  *infoCntp,
8434 	mach_memory_info_array_t *memoryInfop,
8435 	mach_msg_type_number_t   *memoryInfoCntp,
8436 	bool                     redact_info);
8437 
8438 static kern_return_t
mach_memory_info_security_check(bool redact_info)8439 mach_memory_info_security_check(bool redact_info)
8440 {
8441 	/* If not root, only allow redacted calls. */
8442 	if (!kauth_cred_issuser(kauth_cred_get()) && !redact_info) {
8443 		return KERN_NO_ACCESS;
8444 	}
8445 
8446 	if (PE_srd_fused) {
8447 		return KERN_SUCCESS;
8448 	}
8449 
8450 	/* If does not have the memory entitlement, fail. */
8451 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8452 	if (!IOTaskHasEntitlement(current_task(), MEMORYINFO_ENTITLEMENT)) {
8453 		return KERN_DENIED;
8454 	}
8455 
8456 	/*
8457 	 * On release non-mac arm devices, allow mach_memory_info
8458 	 * to be called twice per day per boot. memorymaintenanced
8459 	 * calls it once per day, which leaves room for a sysdiagnose.
8460 	 * Allow redacted version to be called without rate limit.
8461 	 */
8462 
8463 	if (!redact_info) {
8464 		static uint64_t first_call = 0, second_call = 0;
8465 		uint64_t now = 0;
8466 		absolutetime_to_nanoseconds(ml_get_timebase(), &now);
8467 
8468 		if (!first_call) {
8469 			first_call = now;
8470 		} else if (!second_call) {
8471 			second_call = now;
8472 		} else if (first_call + NSEC_DAY > now) {
8473 			return KERN_DENIED;
8474 		} else if (first_call + NSEC_DAY < now) {
8475 			first_call = now;
8476 			second_call = 0;
8477 		}
8478 	}
8479 #endif
8480 
8481 	return KERN_SUCCESS;
8482 }
8483 
8484 kern_return_t
mach_zone_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp)8485 mach_zone_info(
8486 	mach_port_t             host_port,
8487 	mach_zone_name_array_t  *namesp,
8488 	mach_msg_type_number_t  *namesCntp,
8489 	mach_zone_info_array_t  *infop,
8490 	mach_msg_type_number_t  *infoCntp)
8491 {
8492 	return mach_memory_info(host_port, namesp, namesCntp, infop, infoCntp, NULL, NULL);
8493 }
8494 
8495 kern_return_t
mach_memory_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp)8496 mach_memory_info(
8497 	mach_port_t             host_port,
8498 	mach_zone_name_array_t  *namesp,
8499 	mach_msg_type_number_t  *namesCntp,
8500 	mach_zone_info_array_t  *infop,
8501 	mach_msg_type_number_t  *infoCntp,
8502 	mach_memory_info_array_t *memoryInfop,
8503 	mach_msg_type_number_t   *memoryInfoCntp)
8504 {
8505 	bool redact_info = false;
8506 	host_t host = HOST_NULL;
8507 
8508 	host = convert_port_to_host_priv(host_port);
8509 	if (host == HOST_NULL) {
8510 		redact_info = true;
8511 		host = convert_port_to_host(host_port);
8512 	}
8513 
8514 	return mach_memory_info_internal(host, namesp, namesCntp, infop, infoCntp, memoryInfop, memoryInfoCntp, redact_info);
8515 }
8516 
8517 static void
zone_info_redact(mach_zone_info_t * zi)8518 zone_info_redact(mach_zone_info_t *zi)
8519 {
8520 	zi->mzi_cur_size = 0;
8521 	zi->mzi_max_size = 0;
8522 	zi->mzi_alloc_size = 0;
8523 	zi->mzi_sum_size = 0;
8524 	zi->mzi_collectable = 0;
8525 }
8526 
8527 static bool
zone_info_needs_to_be_coalesced(int zone_index)8528 zone_info_needs_to_be_coalesced(int zone_index)
8529 {
8530 	zone_security_flags_t zsflags = zone_security_array[zone_index];
8531 	if (zsflags.z_kalloc_type || zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8532 		return true;
8533 	}
8534 	return false;
8535 }
8536 
8537 static bool
zone_info_find_coalesce_zone(mach_zone_info_t * zi,mach_zone_info_t * info,int * coalesce,int coalesce_count,int * coalesce_index)8538 zone_info_find_coalesce_zone(
8539 	mach_zone_info_t *zi,
8540 	mach_zone_info_t *info,
8541 	int              *coalesce,
8542 	int              coalesce_count,
8543 	int              *coalesce_index)
8544 {
8545 	for (int i = 0; i < coalesce_count; i++) {
8546 		if (zi->mzi_elem_size == info[coalesce[i]].mzi_elem_size) {
8547 			*coalesce_index = coalesce[i];
8548 			return true;
8549 		}
8550 	}
8551 
8552 	return false;
8553 }
8554 
8555 static void
zone_info_coalesce(mach_zone_info_t * info,int coalesce_index,mach_zone_info_t * zi)8556 zone_info_coalesce(
8557 	mach_zone_info_t *info,
8558 	int coalesce_index,
8559 	mach_zone_info_t *zi)
8560 {
8561 	info[coalesce_index].mzi_count += zi->mzi_count;
8562 }
8563 
8564 static kern_return_t
mach_memory_info_internal(host_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp,bool redact_info)8565 mach_memory_info_internal(
8566 	host_t                  host,
8567 	mach_zone_name_array_t  *namesp,
8568 	mach_msg_type_number_t  *namesCntp,
8569 	mach_zone_info_array_t  *infop,
8570 	mach_msg_type_number_t  *infoCntp,
8571 	mach_memory_info_array_t *memoryInfop,
8572 	mach_msg_type_number_t   *memoryInfoCntp,
8573 	bool                     redact_info)
8574 {
8575 	mach_zone_name_t        *names;
8576 	vm_offset_t             names_addr;
8577 	vm_size_t               names_size;
8578 
8579 	mach_zone_info_t        *info;
8580 	vm_offset_t             info_addr;
8581 	vm_size_t               info_size;
8582 
8583 	int                     *coalesce;
8584 	vm_offset_t             coalesce_addr;
8585 	vm_size_t               coalesce_size;
8586 	int                     coalesce_count = 0;
8587 
8588 	mach_memory_info_t      *memory_info;
8589 	vm_offset_t             memory_info_addr;
8590 	vm_size_t               memory_info_size;
8591 	vm_size_t               memory_info_vmsize;
8592 	unsigned int            num_info;
8593 
8594 	unsigned int            max_zones, used_zones, i;
8595 	mach_zone_name_t        *zn;
8596 	mach_zone_info_t        *zi;
8597 	kern_return_t           kr;
8598 
8599 	uint64_t                zones_collectable_bytes = 0;
8600 
8601 	if (host == HOST_NULL) {
8602 		return KERN_INVALID_HOST;
8603 	}
8604 
8605 	kr = mach_memory_info_security_check(redact_info);
8606 	if (kr != KERN_SUCCESS) {
8607 		return kr;
8608 	}
8609 
8610 	/*
8611 	 *	We assume that zones aren't freed once allocated.
8612 	 *	We won't pick up any zones that are allocated later.
8613 	 */
8614 
8615 	max_zones = os_atomic_load(&num_zones, relaxed);
8616 
8617 	names_size = round_page(max_zones * sizeof *names);
8618 	kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8619 	    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8620 	if (kr != KERN_SUCCESS) {
8621 		return kr;
8622 	}
8623 	names = (mach_zone_name_t *) names_addr;
8624 
8625 	info_size = round_page(max_zones * sizeof *info);
8626 	kr = kmem_alloc(ipc_kernel_map, &info_addr, info_size,
8627 	    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8628 	if (kr != KERN_SUCCESS) {
8629 		kmem_free(ipc_kernel_map,
8630 		    names_addr, names_size);
8631 		return kr;
8632 	}
8633 	info = (mach_zone_info_t *) info_addr;
8634 
8635 	if (redact_info) {
8636 		coalesce_size = round_page(max_zones * sizeof *coalesce);
8637 		kr = kmem_alloc(ipc_kernel_map, &coalesce_addr, coalesce_size,
8638 		    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8639 		if (kr != KERN_SUCCESS) {
8640 			kmem_free(ipc_kernel_map,
8641 			    names_addr, names_size);
8642 			kmem_free(ipc_kernel_map,
8643 			    info_addr, info_size);
8644 			return kr;
8645 		}
8646 		coalesce = (int *)coalesce_addr;
8647 	}
8648 
8649 	zn = &names[0];
8650 	zi = &info[0];
8651 
8652 	used_zones = 0;
8653 	for (i = 0; i < max_zones; i++) {
8654 		if (!get_zone_info(&(zone_array[i]), zn, zi)) {
8655 			continue;
8656 		}
8657 
8658 		if (!redact_info) {
8659 			zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable);
8660 			zn++;
8661 			zi++;
8662 			used_zones++;
8663 			continue;
8664 		}
8665 
8666 		zone_info_redact(zi);
8667 		if (!zone_info_needs_to_be_coalesced(i)) {
8668 			zn++;
8669 			zi++;
8670 			used_zones++;
8671 			continue;
8672 		}
8673 
8674 		int coalesce_index;
8675 		bool found_coalesce_zone = zone_info_find_coalesce_zone(zi, info,
8676 		    coalesce, coalesce_count, &coalesce_index);
8677 
8678 		/* Didn't find a zone to coalesce */
8679 		if (!found_coalesce_zone) {
8680 			/* Updates the zone name */
8681 			__nosan_bzero(zn->mzn_name, MAX_ZONE_NAME);
8682 			snprintf(zn->mzn_name, MAX_ZONE_NAME, "kalloc.%d",
8683 			    (int)zi->mzi_elem_size);
8684 
8685 			coalesce[coalesce_count] = used_zones;
8686 			coalesce_count++;
8687 			zn++;
8688 			zi++;
8689 			used_zones++;
8690 			continue;
8691 		}
8692 
8693 		zone_info_coalesce(info, coalesce_index, zi);
8694 	}
8695 
8696 	if (redact_info) {
8697 		kmem_free(ipc_kernel_map, coalesce_addr, coalesce_size);
8698 	}
8699 
8700 	*namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, used_zones * sizeof *names);
8701 	*namesCntp = used_zones;
8702 
8703 	*infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info);
8704 	*infoCntp = used_zones;
8705 
8706 	num_info = 0;
8707 	memory_info_addr = 0;
8708 
8709 	if (memoryInfop && memoryInfoCntp) {
8710 		vm_map_copy_t           copy;
8711 		num_info = vm_page_diagnose_estimate();
8712 		memory_info_size = num_info * sizeof(*memory_info);
8713 		memory_info_vmsize = round_page(memory_info_size);
8714 		kr = kmem_alloc(ipc_kernel_map, &memory_info_addr, memory_info_vmsize,
8715 		    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8716 		if (kr != KERN_SUCCESS) {
8717 			return kr;
8718 		}
8719 
8720 		kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
8721 		    VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
8722 		assert(kr == KERN_SUCCESS);
8723 
8724 		memory_info = (mach_memory_info_t *) memory_info_addr;
8725 		vm_page_diagnose(memory_info, num_info, zones_collectable_bytes, redact_info);
8726 
8727 		kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
8728 		assert(kr == KERN_SUCCESS);
8729 
8730 		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
8731 		    (vm_map_size_t)memory_info_size, TRUE, &copy);
8732 		assert(kr == KERN_SUCCESS);
8733 
8734 		*memoryInfop = (mach_memory_info_t *) copy;
8735 		*memoryInfoCntp = num_info;
8736 	}
8737 
8738 	return KERN_SUCCESS;
8739 }
8740 
8741 kern_return_t
mach_zone_info_for_zone(host_priv_t host,mach_zone_name_t name,mach_zone_info_t * infop)8742 mach_zone_info_for_zone(
8743 	host_priv_t                     host,
8744 	mach_zone_name_t        name,
8745 	mach_zone_info_t        *infop)
8746 {
8747 	zone_t zone_ptr;
8748 
8749 	if (host == HOST_NULL) {
8750 		return KERN_INVALID_HOST;
8751 	}
8752 
8753 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8754 	if (!PE_i_can_has_debugger(NULL)) {
8755 		return KERN_INVALID_HOST;
8756 	}
8757 #endif
8758 
8759 	if (infop == NULL) {
8760 		return KERN_INVALID_ARGUMENT;
8761 	}
8762 
8763 	zone_ptr = ZONE_NULL;
8764 	zone_foreach(z) {
8765 		/*
8766 		 * Append kalloc heap name to zone name (if zone is used by kalloc)
8767 		 */
8768 		char temp_zone_name[MAX_ZONE_NAME] = "";
8769 		snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8770 		    zone_heap_name(z), z->z_name);
8771 
8772 		/* Find the requested zone by name */
8773 		if (track_this_zone(temp_zone_name, name.mzn_name)) {
8774 			zone_ptr = z;
8775 			break;
8776 		}
8777 	}
8778 
8779 	/* No zones found with the requested zone name */
8780 	if (zone_ptr == ZONE_NULL) {
8781 		return KERN_INVALID_ARGUMENT;
8782 	}
8783 
8784 	if (get_zone_info(zone_ptr, NULL, infop)) {
8785 		return KERN_SUCCESS;
8786 	}
8787 	return KERN_FAILURE;
8788 }
8789 
8790 kern_return_t
mach_zone_info_for_largest_zone(host_priv_t host,mach_zone_name_t * namep,mach_zone_info_t * infop)8791 mach_zone_info_for_largest_zone(
8792 	host_priv_t                     host,
8793 	mach_zone_name_t        *namep,
8794 	mach_zone_info_t        *infop)
8795 {
8796 	if (host == HOST_NULL) {
8797 		return KERN_INVALID_HOST;
8798 	}
8799 
8800 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8801 	if (!PE_i_can_has_debugger(NULL)) {
8802 		return KERN_INVALID_HOST;
8803 	}
8804 #endif
8805 
8806 	if (namep == NULL || infop == NULL) {
8807 		return KERN_INVALID_ARGUMENT;
8808 	}
8809 
8810 	if (get_zone_info(zone_find_largest(NULL), namep, infop)) {
8811 		return KERN_SUCCESS;
8812 	}
8813 	return KERN_FAILURE;
8814 }
8815 
8816 uint64_t
get_zones_collectable_bytes(void)8817 get_zones_collectable_bytes(void)
8818 {
8819 	uint64_t zones_collectable_bytes = 0;
8820 	mach_zone_info_t zi;
8821 
8822 	zone_foreach(z) {
8823 		if (get_zone_info(z, NULL, &zi)) {
8824 			zones_collectable_bytes +=
8825 			    GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable);
8826 		}
8827 	}
8828 
8829 	return zones_collectable_bytes;
8830 }
8831 
8832 kern_return_t
mach_zone_get_zlog_zones(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp)8833 mach_zone_get_zlog_zones(
8834 	host_priv_t                             host,
8835 	mach_zone_name_array_t  *namesp,
8836 	mach_msg_type_number_t  *namesCntp)
8837 {
8838 #if ZALLOC_ENABLE_LOGGING
8839 	unsigned int max_zones, logged_zones, i;
8840 	kern_return_t kr;
8841 	zone_t zone_ptr;
8842 	mach_zone_name_t *names;
8843 	vm_offset_t names_addr;
8844 	vm_size_t names_size;
8845 
8846 	if (host == HOST_NULL) {
8847 		return KERN_INVALID_HOST;
8848 	}
8849 
8850 	if (namesp == NULL || namesCntp == NULL) {
8851 		return KERN_INVALID_ARGUMENT;
8852 	}
8853 
8854 	max_zones = os_atomic_load(&num_zones, relaxed);
8855 
8856 	names_size = round_page(max_zones * sizeof *names);
8857 	kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8858 	    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8859 	if (kr != KERN_SUCCESS) {
8860 		return kr;
8861 	}
8862 	names = (mach_zone_name_t *) names_addr;
8863 
8864 	zone_ptr = ZONE_NULL;
8865 	logged_zones = 0;
8866 	for (i = 0; i < max_zones; i++) {
8867 		zone_t z = &(zone_array[i]);
8868 		assert(z != ZONE_NULL);
8869 
8870 		/* Copy out the zone name if zone logging is enabled */
8871 		if (z->z_btlog) {
8872 			get_zone_info(z, &names[logged_zones], NULL);
8873 			logged_zones++;
8874 		}
8875 	}
8876 
8877 	*namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, logged_zones * sizeof *names);
8878 	*namesCntp = logged_zones;
8879 
8880 	return KERN_SUCCESS;
8881 
8882 #else /* ZALLOC_ENABLE_LOGGING */
8883 #pragma unused(host, namesp, namesCntp)
8884 	return KERN_FAILURE;
8885 #endif /* ZALLOC_ENABLE_LOGGING */
8886 }
8887 
8888 kern_return_t
mach_zone_get_btlog_records(host_priv_t host,mach_zone_name_t name,zone_btrecord_array_t * recsp,mach_msg_type_number_t * numrecs)8889 mach_zone_get_btlog_records(
8890 	host_priv_t             host,
8891 	mach_zone_name_t        name,
8892 	zone_btrecord_array_t  *recsp,
8893 	mach_msg_type_number_t *numrecs)
8894 {
8895 #if ZALLOC_ENABLE_LOGGING
8896 	zone_btrecord_t *recs;
8897 	kern_return_t    kr;
8898 	vm_address_t     addr;
8899 	vm_size_t        size;
8900 	zone_t           zone_ptr;
8901 	vm_map_copy_t    copy;
8902 
8903 	if (host == HOST_NULL) {
8904 		return KERN_INVALID_HOST;
8905 	}
8906 
8907 	if (recsp == NULL || numrecs == NULL) {
8908 		return KERN_INVALID_ARGUMENT;
8909 	}
8910 
8911 	zone_ptr = ZONE_NULL;
8912 	zone_foreach(z) {
8913 		/*
8914 		 * Append kalloc heap name to zone name (if zone is used by kalloc)
8915 		 */
8916 		char temp_zone_name[MAX_ZONE_NAME] = "";
8917 		snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8918 		    zone_heap_name(z), z->z_name);
8919 
8920 		/* Find the requested zone by name */
8921 		if (track_this_zone(temp_zone_name, name.mzn_name)) {
8922 			zone_ptr = z;
8923 			break;
8924 		}
8925 	}
8926 
8927 	/* No zones found with the requested zone name */
8928 	if (zone_ptr == ZONE_NULL) {
8929 		return KERN_INVALID_ARGUMENT;
8930 	}
8931 
8932 	/* Logging not turned on for the requested zone */
8933 	if (!zone_ptr->z_btlog) {
8934 		return KERN_FAILURE;
8935 	}
8936 
8937 	kr = btlog_get_records(zone_ptr->z_btlog, &recs, numrecs);
8938 	if (kr != KERN_SUCCESS) {
8939 		return kr;
8940 	}
8941 
8942 	addr = (vm_address_t)recs;
8943 	size = sizeof(zone_btrecord_t) * *numrecs;
8944 
8945 	kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, &copy);
8946 	assert(kr == KERN_SUCCESS);
8947 
8948 	*recsp = (zone_btrecord_t *)copy;
8949 	return KERN_SUCCESS;
8950 
8951 #else /* !ZALLOC_ENABLE_LOGGING */
8952 #pragma unused(host, name, recsp, numrecs)
8953 	return KERN_FAILURE;
8954 #endif /* !ZALLOC_ENABLE_LOGGING */
8955 }
8956 
8957 
8958 kern_return_t
mach_zone_force_gc(host_t host)8959 mach_zone_force_gc(
8960 	host_t host)
8961 {
8962 	if (host == HOST_NULL) {
8963 		return KERN_INVALID_HOST;
8964 	}
8965 
8966 #if DEBUG || DEVELOPMENT
8967 	extern boolean_t(*volatile consider_buffer_cache_collect)(int);
8968 	/* Callout to buffer cache GC to drop elements in the apfs zones */
8969 	if (consider_buffer_cache_collect != NULL) {
8970 		(void)(*consider_buffer_cache_collect)(0);
8971 	}
8972 	zone_gc(ZONE_GC_DRAIN);
8973 #endif /* DEBUG || DEVELOPMENT */
8974 	return KERN_SUCCESS;
8975 }
8976 
8977 zone_t
zone_find_largest(uint64_t * zone_size)8978 zone_find_largest(uint64_t *zone_size)
8979 {
8980 	zone_t    largest_zone  = 0;
8981 	uint64_t  largest_zone_size = 0;
8982 	zone_find_n_largest(1, &largest_zone, &largest_zone_size);
8983 	if (zone_size) {
8984 		*zone_size = largest_zone_size;
8985 	}
8986 	return largest_zone;
8987 }
8988 
8989 void
zone_get_stats(zone_t zone,struct zone_basic_stats * stats)8990 zone_get_stats(
8991 	zone_t                  zone,
8992 	struct zone_basic_stats *stats)
8993 {
8994 	stats->zbs_avail = zone->z_elems_avail;
8995 
8996 	stats->zbs_alloc_fail = 0;
8997 	zpercpu_foreach(zs, zone->z_stats) {
8998 		stats->zbs_alloc_fail += zs->zs_alloc_fail;
8999 	}
9000 
9001 	stats->zbs_cached = 0;
9002 	if (zone->z_pcpu_cache) {
9003 		zpercpu_foreach(zc, zone->z_pcpu_cache) {
9004 			stats->zbs_cached += zc->zc_alloc_cur +
9005 			    zc->zc_free_cur +
9006 			    zc->zc_depot.zd_full * zc_mag_size();
9007 		}
9008 	}
9009 
9010 	stats->zbs_free = zone_count_free(zone) + stats->zbs_cached;
9011 
9012 	/*
9013 	 * Since we don't take any locks, deal with possible inconsistencies
9014 	 * as the counters may have changed.
9015 	 */
9016 	if (os_sub_overflow(stats->zbs_avail, stats->zbs_free,
9017 	    &stats->zbs_alloc)) {
9018 		stats->zbs_avail = stats->zbs_free;
9019 		stats->zbs_alloc = 0;
9020 	}
9021 }
9022 
9023 #endif /* !ZALLOC_TEST */
9024 #pragma mark zone creation, configuration, destruction
9025 #if !ZALLOC_TEST
9026 
9027 static zone_t
zone_init_defaults(zone_id_t zid)9028 zone_init_defaults(zone_id_t zid)
9029 {
9030 	zone_t z = &zone_array[zid];
9031 
9032 	z->z_wired_max = ~0u;
9033 	z->collectable = true;
9034 
9035 	hw_lck_ticket_init(&z->z_lock, &zone_locks_grp);
9036 	hw_lck_ticket_init(&z->z_recirc_lock, &zone_locks_grp);
9037 	zone_depot_init(&z->z_recirc);
9038 	return z;
9039 }
9040 
9041 void
zone_set_exhaustible(zone_t zone,vm_size_t nelems)9042 zone_set_exhaustible(zone_t zone, vm_size_t nelems)
9043 {
9044 	zone_lock(zone);
9045 	zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
9046 	zone_unlock(zone);
9047 }
9048 
9049 void
zone_raise_reserve(union zone_or_view zov,uint16_t min_elements)9050 zone_raise_reserve(union zone_or_view zov, uint16_t min_elements)
9051 {
9052 	zone_t zone = zov.zov_zone;
9053 
9054 	if (zone < zone_array || zone > &zone_array[MAX_ZONES]) {
9055 		zone = zov.zov_view->zv_zone;
9056 	} else {
9057 		zone = zov.zov_zone;
9058 	}
9059 
9060 	os_atomic_max(&zone->z_elems_rsv, min_elements, relaxed);
9061 }
9062 
9063 /**
9064  * @function zone_create_find
9065  *
9066  * @abstract
9067  * Finds an unused zone for the given name and element size.
9068  *
9069  * @param name          the zone name
9070  * @param size          the element size (including redzones, ...)
9071  * @param flags         the flags passed to @c zone_create*
9072  * @param zid_inout     the desired zone ID or ZONE_ID_ANY
9073  *
9074  * @returns             a zone to initialize further.
9075  */
9076 static zone_t
zone_create_find(const char * name,vm_size_t size,zone_create_flags_t flags,zone_id_t * zid_inout)9077 zone_create_find(
9078 	const char             *name,
9079 	vm_size_t               size,
9080 	zone_create_flags_t     flags,
9081 	zone_id_t              *zid_inout)
9082 {
9083 	zone_id_t nzones, zid = *zid_inout;
9084 	zone_t z;
9085 
9086 	simple_lock(&all_zones_lock, &zone_locks_grp);
9087 
9088 	nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed);
9089 	assert(num_zones_in_use <= nzones && nzones < MAX_ZONES);
9090 
9091 	if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) {
9092 		/*
9093 		 * The first time around, make sure the reserved zone IDs
9094 		 * have an initialized lock as zone_index_foreach() will
9095 		 * enumerate them.
9096 		 */
9097 		while (nzones < ZONE_ID__FIRST_DYNAMIC) {
9098 			zone_init_defaults(nzones++);
9099 		}
9100 
9101 		os_atomic_store(&num_zones, nzones, release);
9102 	}
9103 
9104 	if (zid != ZONE_ID_ANY) {
9105 		if (zid >= ZONE_ID__FIRST_DYNAMIC) {
9106 			panic("zone_create: invalid desired zone ID %d for %s",
9107 			    zid, name);
9108 		}
9109 		if (flags & ZC_DESTRUCTIBLE) {
9110 			panic("zone_create: ID %d (%s) must be permanent", zid, name);
9111 		}
9112 		if (zone_array[zid].z_self) {
9113 			panic("zone_create: creating zone ID %d (%s) twice", zid, name);
9114 		}
9115 		z = &zone_array[zid];
9116 	} else {
9117 		if (flags & ZC_DESTRUCTIBLE) {
9118 			/*
9119 			 * If possible, find a previously zdestroy'ed zone in the
9120 			 * zone_array that we can reuse.
9121 			 */
9122 			for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES);
9123 			    i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) {
9124 				z = &zone_array[i];
9125 
9126 				/*
9127 				 * If the zone name and the element size are the
9128 				 * same, we can just reuse the old zone struct.
9129 				 */
9130 				if (strcmp(z->z_name, name) ||
9131 				    zone_elem_outer_size(z) != size) {
9132 					continue;
9133 				}
9134 				bitmap_clear(zone_destroyed_bitmap, i);
9135 				z->z_destroyed = false;
9136 				z->z_self = z;
9137 				zid = (zone_id_t)i;
9138 				goto out;
9139 			}
9140 		}
9141 
9142 		zid = nzones++;
9143 		z = zone_init_defaults(zid);
9144 
9145 		/*
9146 		 * The release barrier pairs with the acquire in
9147 		 * zone_index_foreach() and makes sure that enumeration loops
9148 		 * always see an initialized zone lock.
9149 		 */
9150 		os_atomic_store(&num_zones, nzones, release);
9151 	}
9152 
9153 out:
9154 	num_zones_in_use++;
9155 	simple_unlock(&all_zones_lock);
9156 
9157 	*zid_inout = zid;
9158 	return z;
9159 }
9160 
9161 __abortlike
9162 static void
zone_create_panic(const char * name,const char * f1,const char * f2)9163 zone_create_panic(const char *name, const char *f1, const char *f2)
9164 {
9165 	panic("zone_create: creating zone %s: flag %s and %s are incompatible",
9166 	    name, f1, f2);
9167 }
9168 #define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \
9169 	if ((flags) & forbidden_flag) { \
9170 	        zone_create_panic(name, #current_flag, #forbidden_flag); \
9171 	}
9172 
9173 /*
9174  * Adjusts the size of the element based on minimum size, alignment
9175  * and kasan redzones
9176  */
9177 static vm_size_t
zone_elem_adjust_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags __unused,uint16_t * redzone __unused)9178 zone_elem_adjust_size(
9179 	const char             *name __unused,
9180 	vm_size_t               elem_size,
9181 	zone_create_flags_t     flags __unused,
9182 	uint16_t               *redzone __unused)
9183 {
9184 	vm_size_t size;
9185 
9186 	/*
9187 	 * Adjust element size for minimum size and pointer alignment
9188 	 */
9189 	size = (elem_size + ZONE_ALIGN_SIZE - 1) & -ZONE_ALIGN_SIZE;
9190 	if (size < ZONE_MIN_ELEM_SIZE) {
9191 		size = ZONE_MIN_ELEM_SIZE;
9192 	}
9193 
9194 #if KASAN_CLASSIC
9195 	/*
9196 	 * Expand the zone allocation size to include the redzones.
9197 	 *
9198 	 * For page-multiple zones add a full guard page because they
9199 	 * likely require alignment.
9200 	 */
9201 	uint16_t redzone_tmp;
9202 	if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU | ZC_OBJ_CACHE)) {
9203 		redzone_tmp = 0;
9204 	} else if ((size & PAGE_MASK) == 0) {
9205 		if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) {
9206 			panic("zone_create: zone %s can't provide more than PAGE_SIZE"
9207 			    "alignment", name);
9208 		}
9209 		redzone_tmp = PAGE_SIZE;
9210 	} else if (flags & ZC_ALIGNMENT_REQUIRED) {
9211 		redzone_tmp = 0;
9212 	} else {
9213 		redzone_tmp = KASAN_GUARD_SIZE;
9214 	}
9215 	size += redzone_tmp;
9216 	if (redzone) {
9217 		*redzone = redzone_tmp;
9218 	}
9219 #endif
9220 	return size;
9221 }
9222 
9223 /*
9224  * Returns the allocation chunk size that has least framentation
9225  */
9226 static vm_size_t
zone_get_min_alloc_granule(vm_size_t elem_size,zone_create_flags_t flags)9227 zone_get_min_alloc_granule(
9228 	vm_size_t               elem_size,
9229 	zone_create_flags_t     flags)
9230 {
9231 	vm_size_t alloc_granule = PAGE_SIZE;
9232 	if (flags & ZC_PERCPU) {
9233 		alloc_granule = PAGE_SIZE * zpercpu_count();
9234 		if (PAGE_SIZE % elem_size > 256) {
9235 			panic("zone_create: per-cpu zone has too much fragmentation");
9236 		}
9237 	} else if (flags & ZC_READONLY) {
9238 		alloc_granule = PAGE_SIZE;
9239 	} else if ((elem_size & PAGE_MASK) == 0) {
9240 		/* zero fragmentation by definition */
9241 		alloc_granule = elem_size;
9242 	} else if (alloc_granule % elem_size == 0) {
9243 		/* zero fragmentation by definition */
9244 	} else {
9245 		vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule;
9246 		vm_size_t alloc_tmp = PAGE_SIZE;
9247 		vm_size_t max_chunk_size = ZONE_MAX_ALLOC_SIZE;
9248 
9249 #if __arm64__
9250 		/*
9251 		 * Increase chunk size to 48K for sizes larger than 4K on 16k
9252 		 * machines, so as to reduce internal fragementation for kalloc
9253 		 * zones with sizes 12K and 24K.
9254 		 */
9255 		if (elem_size > 4 * 1024 && PAGE_SIZE == 16 * 1024) {
9256 			max_chunk_size = 48 * 1024;
9257 		}
9258 #endif
9259 		while ((alloc_tmp += PAGE_SIZE) <= max_chunk_size) {
9260 			vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp;
9261 			if (frag_tmp < frag) {
9262 				frag = frag_tmp;
9263 				alloc_granule = alloc_tmp;
9264 			}
9265 		}
9266 	}
9267 	return alloc_granule;
9268 }
9269 
9270 vm_size_t
zone_get_early_alloc_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags,vm_size_t min_elems)9271 zone_get_early_alloc_size(
9272 	const char             *name __unused,
9273 	vm_size_t               elem_size,
9274 	zone_create_flags_t     flags,
9275 	vm_size_t               min_elems)
9276 {
9277 	vm_size_t adjusted_size, alloc_granule, chunk_elems;
9278 
9279 	adjusted_size = zone_elem_adjust_size(name, elem_size, flags, NULL);
9280 	alloc_granule = zone_get_min_alloc_granule(adjusted_size, flags);
9281 	chunk_elems   = alloc_granule / adjusted_size;
9282 
9283 	return ((min_elems + chunk_elems - 1) / chunk_elems) * alloc_granule;
9284 }
9285 
9286 zone_t
9287 zone_create_ext(
9288 	const char             *name,
9289 	vm_size_t               size,
9290 	zone_create_flags_t     flags,
9291 	zone_id_t               zid,
9292 	void                  (^extra_setup)(zone_t))
9293 {
9294 	zone_security_flags_t *zsflags;
9295 	uint16_t redzone;
9296 	zone_t z;
9297 
9298 	if (size > ZONE_MAX_ALLOC_SIZE) {
9299 		panic("zone_create: element size too large: %zd", (size_t)size);
9300 	}
9301 
9302 	if (size < 2 * sizeof(vm_size_t)) {
9303 		/* Elements are too small for kasan. */
9304 		flags |= ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE;
9305 	}
9306 
9307 	size = zone_elem_adjust_size(name, size, flags, &redzone);
9308 
9309 	/*
9310 	 * Allocate the zone slot, return early if we found an older match.
9311 	 */
9312 	z = zone_create_find(name, size, flags, &zid);
9313 	if (__improbable(z->z_self)) {
9314 		/* We found a zone to reuse */
9315 		return z;
9316 	}
9317 	zsflags = &zone_security_array[zid];
9318 
9319 	/*
9320 	 * Initialize the zone properly.
9321 	 */
9322 
9323 	/*
9324 	 * If the kernel is post lockdown, copy the zone name passed in.
9325 	 * Else simply maintain a pointer to the name string as it can only
9326 	 * be a core XNU zone (no unloadable kext exists before lockdown).
9327 	 */
9328 	if (startup_phase >= STARTUP_SUB_LOCKDOWN) {
9329 		size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN);
9330 		char *buf = zalloc_permanent(nsz, ZALIGN_NONE);
9331 		strlcpy(buf, name, nsz);
9332 		z->z_name = buf;
9333 	} else {
9334 		z->z_name = name;
9335 	}
9336 	if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) {
9337 		z->z_stats = zalloc_percpu_permanent_type(struct zone_stats);
9338 	} else {
9339 		/*
9340 		 * zone_init() hasn't run yet, use the storage provided by
9341 		 * zone_stats_startup(), and zone_init() will replace it
9342 		 * with the final value once the PERCPU zone exists.
9343 		 */
9344 		z->z_stats = __zpcpu_mangle_for_boot(&zone_stats_startup[zone_index(z)]);
9345 	}
9346 
9347 	if (flags & ZC_OBJ_CACHE) {
9348 		zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOCACHING);
9349 		zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_PERCPU);
9350 		zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOGC);
9351 		zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_DESTRUCTIBLE);
9352 
9353 		z->z_elem_size   = (uint16_t)size;
9354 		z->z_chunk_pages = 0;
9355 		z->z_quo_magic   = 0;
9356 		z->z_align_magic = 0;
9357 		z->z_chunk_elems = 0;
9358 		z->z_elem_offs   = 0;
9359 		z->no_callout    = true;
9360 		zsflags->z_lifo  = true;
9361 	} else {
9362 		vm_size_t alloc = zone_get_min_alloc_granule(size, flags);
9363 
9364 		z->z_elem_size   = (uint16_t)(size - redzone);
9365 		z->z_chunk_pages = (uint16_t)atop(alloc);
9366 		z->z_quo_magic   = Z_MAGIC_QUO(size);
9367 		z->z_align_magic = Z_MAGIC_ALIGNED(size);
9368 		if (flags & ZC_PERCPU) {
9369 			z->z_chunk_elems = (uint16_t)(PAGE_SIZE / size);
9370 			z->z_elem_offs = (uint16_t)(PAGE_SIZE % size) + redzone;
9371 		} else {
9372 			z->z_chunk_elems = (uint16_t)(alloc / size);
9373 			z->z_elem_offs = (uint16_t)(alloc % size) + redzone;
9374 		}
9375 	}
9376 
9377 	/*
9378 	 * Handle KPI flags
9379 	 */
9380 
9381 	/* ZC_CACHING applied after all configuration is done */
9382 	if (flags & ZC_NOCACHING) {
9383 		z->z_nocaching = true;
9384 	}
9385 
9386 	if (flags & ZC_READONLY) {
9387 		zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_VM);
9388 		zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_DATA);
9389 		assert(zid <= ZONE_ID__LAST_RO);
9390 #if ZSECURITY_CONFIG(READ_ONLY)
9391 		zsflags->z_submap_idx = Z_SUBMAP_IDX_READ_ONLY;
9392 #endif
9393 		zone_ro_size_params[zid].z_elem_size = z->z_elem_size;
9394 		zone_ro_size_params[zid].z_align_magic = z->z_align_magic;
9395 		assert(size <= PAGE_SIZE);
9396 		if ((PAGE_SIZE % size) * 10 >= PAGE_SIZE) {
9397 			panic("Fragmentation greater than 10%% with elem size %d zone %s%s",
9398 			    (uint32_t)size, zone_heap_name(z), z->z_name);
9399 		}
9400 	}
9401 
9402 	if (flags & ZC_PERCPU) {
9403 		zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_READONLY);
9404 		zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_PGZ_USE_GUARDS);
9405 		z->z_percpu = true;
9406 	}
9407 	if (flags & ZC_NOGC) {
9408 		z->collectable = false;
9409 	}
9410 	/*
9411 	 * Handle ZC_NOENCRYPT from xnu only
9412 	 */
9413 	if (startup_phase < STARTUP_SUB_LOCKDOWN && flags & ZC_NOENCRYPT) {
9414 		zsflags->z_noencrypt = true;
9415 	}
9416 	if (flags & ZC_NOCALLOUT) {
9417 		z->no_callout = true;
9418 	}
9419 	if (flags & ZC_DESTRUCTIBLE) {
9420 		zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_READONLY);
9421 		z->z_destructible = true;
9422 	}
9423 	/*
9424 	 * Handle Internal flags
9425 	 */
9426 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9427 	if (flags & ZC_PGZ_USE_GUARDS) {
9428 		/*
9429 		 * Try to turn on guard pages only for zones
9430 		 * with a chance of OOB.
9431 		 */
9432 		if (startup_phase < STARTUP_SUB_LOCKDOWN) {
9433 			zsflags->z_pgz_use_guards = true;
9434 		}
9435 		z->z_pgz_use_guards = true;
9436 	}
9437 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9438 	if (!(flags & ZC_NOTBITAG)) {
9439 		z->z_tbi_tag = true;
9440 	}
9441 	if (flags & ZC_KALLOC_TYPE) {
9442 		zsflags->z_kalloc_type = true;
9443 	}
9444 	if (flags & ZC_VM) {
9445 		zone_create_assert_not_both(name, flags, ZC_VM, ZC_DATA);
9446 		zsflags->z_submap_idx = Z_SUBMAP_IDX_VM;
9447 	}
9448 	if (flags & ZC_DATA) {
9449 		zsflags->z_kheap_id = KHEAP_ID_DATA_BUFFERS;
9450 	}
9451 #if KASAN_CLASSIC
9452 	if (redzone && !(flags & ZC_KASAN_NOQUARANTINE)) {
9453 		z->z_kasan_quarantine = true;
9454 	}
9455 	z->z_kasan_redzone = redzone;
9456 #endif /* KASAN_CLASSIC */
9457 #if KASAN_FAKESTACK
9458 	if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) {
9459 		z->z_kasan_fakestacks = true;
9460 	}
9461 #endif /* KASAN_FAKESTACK */
9462 
9463 	/*
9464 	 * Then if there's extra tuning, do it
9465 	 */
9466 	if (extra_setup) {
9467 		extra_setup(z);
9468 	}
9469 
9470 	/*
9471 	 * Configure debugging features
9472 	 */
9473 #if CONFIG_PROB_GZALLOC
9474 	if ((flags & (ZC_READONLY | ZC_PERCPU | ZC_OBJ_CACHE | ZC_NOPGZ)) == 0) {
9475 		pgz_zone_init(z);
9476 	}
9477 #endif
9478 	if (zc_magazine_zone) { /* proxy for "has zone_init run" */
9479 #if ZALLOC_ENABLE_LOGGING
9480 		/*
9481 		 * Check for and set up zone leak detection
9482 		 * if requested via boot-args.
9483 		 */
9484 		zone_setup_logging(z);
9485 #endif /* ZALLOC_ENABLE_LOGGING */
9486 #if KASAN_TBI
9487 		zone_setup_kasan_logging(z);
9488 #endif /* KASAN_TBI */
9489 	}
9490 
9491 #if VM_TAG_SIZECLASSES
9492 	if ((zsflags->z_kheap_id || zsflags->z_kalloc_type) && zone_tagging_on) {
9493 		static uint16_t sizeclass_idx;
9494 
9495 		assert(startup_phase < STARTUP_SUB_LOCKDOWN);
9496 		z->z_uses_tags = true;
9497 		if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9498 			zone_tags_sizeclasses[sizeclass_idx] = (uint16_t)size;
9499 			z->z_tags_sizeclass = sizeclass_idx++;
9500 		} else {
9501 			uint16_t i = 0;
9502 			for (; i < sizeclass_idx; i++) {
9503 				if (size == zone_tags_sizeclasses[i]) {
9504 					z->z_tags_sizeclass = i;
9505 					break;
9506 				}
9507 			}
9508 
9509 			/*
9510 			 * Size class wasn't found, add it to zone_tags_sizeclasses
9511 			 */
9512 			if (i == sizeclass_idx) {
9513 				assert(i < VM_TAG_SIZECLASSES);
9514 				zone_tags_sizeclasses[i] = (uint16_t)size;
9515 				z->z_tags_sizeclass = sizeclass_idx++;
9516 			}
9517 		}
9518 		assert(z->z_tags_sizeclass < VM_TAG_SIZECLASSES);
9519 	}
9520 #endif
9521 
9522 	/*
9523 	 * Finally, fixup properties based on security policies, boot-args, ...
9524 	 */
9525 	if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9526 		/*
9527 		 * We use LIFO in the data map, because workloads like network
9528 		 * usage or similar tend to rotate through allocations very
9529 		 * quickly with sometimes epxloding working-sets and using
9530 		 * a FIFO policy might cause massive TLB trashing with rather
9531 		 * dramatic performance impacts.
9532 		 */
9533 		zsflags->z_submap_idx = Z_SUBMAP_IDX_DATA;
9534 		zsflags->z_lifo = true;
9535 	}
9536 
9537 	if ((flags & (ZC_CACHING | ZC_OBJ_CACHE)) && !z->z_nocaching) {
9538 		/*
9539 		 * No zone made before zone_init() can have ZC_CACHING set.
9540 		 */
9541 		assert(zc_magazine_zone);
9542 		zone_enable_caching(z);
9543 	}
9544 
9545 	zone_lock(z);
9546 	z->z_self = z;
9547 	zone_unlock(z);
9548 
9549 	return z;
9550 }
9551 
9552 void
zone_set_sig_eq(zone_t zone,zone_id_t sig_eq)9553 zone_set_sig_eq(zone_t zone, zone_id_t sig_eq)
9554 {
9555 	zone_security_array[zone_index(zone)].z_sig_eq = sig_eq;
9556 }
9557 
9558 zone_id_t
zone_get_sig_eq(zone_t zone)9559 zone_get_sig_eq(zone_t zone)
9560 {
9561 	return zone_security_array[zone_index(zone)].z_sig_eq;
9562 }
9563 
9564 void
zone_enable_smr(zone_t zone,struct smr * smr,zone_smr_free_cb_t free_cb)9565 zone_enable_smr(zone_t zone, struct smr *smr, zone_smr_free_cb_t free_cb)
9566 {
9567 	/* moving to SMR must be done before the zone has ever been used */
9568 	assert(zone->z_va_cur == 0 && !zone->z_smr && !zone->z_nocaching);
9569 	assert(!zone_security_array[zone_index(zone)].z_lifo);
9570 	assert((smr->smr_flags & SMR_SLEEPABLE) == 0);
9571 
9572 	if (!zone->z_pcpu_cache) {
9573 		zone_enable_caching(zone);
9574 	}
9575 
9576 	zone_lock(zone);
9577 
9578 	zpercpu_foreach(it, zone->z_pcpu_cache) {
9579 		it->zc_smr = smr;
9580 		it->zc_free = free_cb;
9581 	}
9582 	zone->z_smr = true;
9583 
9584 	zone_unlock(zone);
9585 }
9586 
9587 __startup_func
9588 void
zone_create_startup(struct zone_create_startup_spec * spec)9589 zone_create_startup(struct zone_create_startup_spec *spec)
9590 {
9591 	zone_t z;
9592 
9593 	z = zone_create_ext(spec->z_name, spec->z_size,
9594 	    spec->z_flags, spec->z_zid, spec->z_setup);
9595 	if (spec->z_var) {
9596 		*spec->z_var = z;
9597 	}
9598 }
9599 
9600 /*
9601  * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t
9602  * union works. trust but verify.
9603  */
9604 #define zalloc_check_zov_alias(f1, f2) \
9605     static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2))
9606 zalloc_check_zov_alias(z_self, zv_zone);
9607 zalloc_check_zov_alias(z_stats, zv_stats);
9608 zalloc_check_zov_alias(z_name, zv_name);
9609 zalloc_check_zov_alias(z_views, zv_next);
9610 #undef zalloc_check_zov_alias
9611 
9612 __startup_func
9613 void
zone_view_startup_init(struct zone_view_startup_spec * spec)9614 zone_view_startup_init(struct zone_view_startup_spec *spec)
9615 {
9616 	struct kalloc_heap *heap = NULL;
9617 	zone_view_t zv = spec->zv_view;
9618 	zone_t z;
9619 	zone_security_flags_t zsflags;
9620 
9621 	switch (spec->zv_heapid) {
9622 	case KHEAP_ID_DATA_BUFFERS:
9623 		heap = KHEAP_DATA_BUFFERS;
9624 		break;
9625 	default:
9626 		heap = NULL;
9627 	}
9628 
9629 	if (heap) {
9630 		z = kalloc_zone_for_size(heap->kh_zstart, spec->zv_size);
9631 	} else {
9632 		z = *spec->zv_zone;
9633 		assert(spec->zv_size <= zone_elem_inner_size(z));
9634 	}
9635 
9636 	assert(z);
9637 
9638 	zv->zv_zone  = z;
9639 	zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats);
9640 	zv->zv_next  = z->z_views;
9641 	zsflags = zone_security_config(z);
9642 	if (z->z_views == NULL && zsflags.z_kheap_id == KHEAP_ID_NONE) {
9643 		/*
9644 		 * count the raw view for zones not in a heap,
9645 		 * kalloc_heap_init() already counts it for its members.
9646 		 */
9647 		zone_view_count += 2;
9648 	} else {
9649 		zone_view_count += 1;
9650 	}
9651 	z->z_views = zv;
9652 }
9653 
9654 zone_t
zone_create(const char * name,vm_size_t size,zone_create_flags_t flags)9655 zone_create(
9656 	const char             *name,
9657 	vm_size_t               size,
9658 	zone_create_flags_t     flags)
9659 {
9660 	return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL);
9661 }
9662 
9663 static_assert(ZONE_ID__LAST_RO_EXT - ZONE_ID__FIRST_RO_EXT == ZC_RO_ID__LAST);
9664 
9665 zone_id_t
zone_create_ro(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)9666 zone_create_ro(
9667 	const char             *name,
9668 	vm_size_t               size,
9669 	zone_create_flags_t     flags,
9670 	zone_create_ro_id_t     zc_ro_id)
9671 {
9672 	assert(zc_ro_id <= ZC_RO_ID__LAST);
9673 	zone_id_t reserved_zid = ZONE_ID__FIRST_RO_EXT + zc_ro_id;
9674 	(void)zone_create_ext(name, size, ZC_READONLY | flags, reserved_zid, NULL);
9675 	return reserved_zid;
9676 }
9677 
9678 zone_t
zinit(vm_size_t size,vm_size_t max __unused,vm_size_t alloc __unused,const char * name)9679 zinit(
9680 	vm_size_t       size,           /* the size of an element */
9681 	vm_size_t       max __unused,   /* maximum memory to use */
9682 	vm_size_t       alloc __unused, /* allocation size */
9683 	const char      *name)          /* a name for the zone */
9684 {
9685 	return zone_create(name, size, ZC_DESTRUCTIBLE);
9686 }
9687 
9688 void
zdestroy(zone_t z)9689 zdestroy(zone_t z)
9690 {
9691 	unsigned int zindex = zone_index(z);
9692 	zone_security_flags_t zsflags = zone_security_array[zindex];
9693 
9694 	current_thread()->options |= TH_OPT_ZONE_PRIV;
9695 	lck_mtx_lock(&zone_gc_lock);
9696 
9697 	zone_reclaim(z, ZONE_RECLAIM_DESTROY);
9698 
9699 	lck_mtx_unlock(&zone_gc_lock);
9700 	current_thread()->options &= ~TH_OPT_ZONE_PRIV;
9701 
9702 	zone_lock(z);
9703 
9704 	if (!zone_submap_is_sequestered(zsflags)) {
9705 		while (!zone_pva_is_null(z->z_pageq_va)) {
9706 			struct zone_page_metadata *meta;
9707 
9708 			zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
9709 			meta = zone_meta_queue_pop(z, &z->z_pageq_va);
9710 			assert(meta->zm_chunk_len <= ZM_CHUNK_LEN_MAX);
9711 			bzero(meta, sizeof(*meta) * z->z_chunk_pages);
9712 			zone_unlock(z);
9713 			kmem_free(zone_submap(zsflags), zone_meta_to_addr(meta),
9714 			    ptoa(z->z_chunk_pages));
9715 			zone_lock(z);
9716 		}
9717 	}
9718 
9719 #if !KASAN_CLASSIC
9720 	/* Assert that all counts are zero */
9721 	if (z->z_elems_avail || z->z_elems_free || zone_size_wired(z) ||
9722 	    (z->z_va_cur && !zone_submap_is_sequestered(zsflags))) {
9723 		panic("zdestroy: Zone %s%s isn't empty at zdestroy() time",
9724 		    zone_heap_name(z), z->z_name);
9725 	}
9726 
9727 	/* consistency check: make sure everything is indeed empty */
9728 	assert(zone_pva_is_null(z->z_pageq_empty));
9729 	assert(zone_pva_is_null(z->z_pageq_partial));
9730 	assert(zone_pva_is_null(z->z_pageq_full));
9731 	if (!zone_submap_is_sequestered(zsflags)) {
9732 		assert(zone_pva_is_null(z->z_pageq_va));
9733 	}
9734 #endif
9735 
9736 	zone_unlock(z);
9737 
9738 	simple_lock(&all_zones_lock, &zone_locks_grp);
9739 
9740 	assert(!bitmap_test(zone_destroyed_bitmap, zindex));
9741 	/* Mark the zone as empty in the bitmap */
9742 	bitmap_set(zone_destroyed_bitmap, zindex);
9743 	num_zones_in_use--;
9744 	assert(num_zones_in_use > 0);
9745 
9746 	simple_unlock(&all_zones_lock);
9747 }
9748 
9749 #endif /* !ZALLOC_TEST */
9750 #pragma mark zalloc module init
9751 #if !ZALLOC_TEST
9752 
9753 /*
9754  *	Initialize the "zone of zones" which uses fixed memory allocated
9755  *	earlier in memory initialization.  zone_bootstrap is called
9756  *	before zone_init.
9757  */
9758 __startup_func
9759 void
zone_bootstrap(void)9760 zone_bootstrap(void)
9761 {
9762 #if DEBUG || DEVELOPMENT
9763 #if __x86_64__
9764 	if (PE_parse_boot_argn("kernPOST", NULL, 0)) {
9765 		/*
9766 		 * rdar://79781535 Disable early gaps while running kernPOST on Intel
9767 		 * the fp faulting code gets triggered and deadlocks.
9768 		 */
9769 		zone_caching_disabled = 1;
9770 	}
9771 #endif /* __x86_64__ */
9772 #endif /* DEBUG || DEVELOPMENT */
9773 
9774 	/* Validate struct zone_packed_virtual_address expectations */
9775 	static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1");
9776 	if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) {
9777 		panic("zone_pva_t can't pack a kernel page address in 31 bits");
9778 	}
9779 
9780 	zpercpu_early_count = ml_early_cpu_max_number() + 1;
9781 	if (!PE_parse_boot_argn("zc_mag_size", NULL, 0)) {
9782 		/*
9783 		 * Scale zc_mag_size() per machine.
9784 		 *
9785 		 * - wide machines get 128B magazines to avoid all false sharing
9786 		 * - smaller machines but with enough RAM get a bit bigger
9787 		 *   buckets (empirically affects networking performance)
9788 		 */
9789 		if (zpercpu_early_count >= 10) {
9790 			_zc_mag_size = 14;
9791 		} else if ((sane_size >> 30) >= 4) {
9792 			_zc_mag_size = 10;
9793 		}
9794 	}
9795 
9796 	/*
9797 	 * Initialize random used to scramble early allocations
9798 	 */
9799 	zpercpu_foreach_cpu(cpu) {
9800 		random_bool_init(&zone_bool_gen[cpu].zbg_bg);
9801 	}
9802 
9803 #if CONFIG_PROB_GZALLOC
9804 	/*
9805 	 * Set pgz_sample_counter on the boot CPU so that we do not sample
9806 	 * any allocation until PGZ has been properly setup (in pgz_init()).
9807 	 */
9808 	*PERCPU_GET_MASTER(pgz_sample_counter) = INT32_MAX;
9809 #endif /* CONFIG_PROB_GZALLOC */
9810 
9811 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9812 	/*
9813 	 * Randomly assign zones to one of the 4 general submaps,
9814 	 * and pick whether they allocate from the begining
9815 	 * or the end of it.
9816 	 *
9817 	 * A lot of OOB exploitation relies on precise interleaving
9818 	 * of specific types in the heap.
9819 	 *
9820 	 * Woops, you can't guarantee that anymore.
9821 	 */
9822 	for (zone_id_t i = 1; i < MAX_ZONES; i++) {
9823 		uint32_t r = zalloc_random_uniform32(0,
9824 		    ZSECURITY_CONFIG_GENERAL_SUBMAPS * 2);
9825 
9826 		zone_security_array[i].z_submap_from_end = (r & 1);
9827 		zone_security_array[i].z_submap_idx += (r >> 1);
9828 	}
9829 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9830 
9831 	thread_call_setup_with_options(&zone_expand_callout,
9832 	    zone_expand_async, NULL, THREAD_CALL_PRIORITY_HIGH,
9833 	    THREAD_CALL_OPTIONS_ONCE);
9834 
9835 	thread_call_setup_with_options(&zone_trim_callout,
9836 	    zone_trim_async, NULL, THREAD_CALL_PRIORITY_USER,
9837 	    THREAD_CALL_OPTIONS_ONCE);
9838 }
9839 
9840 #define ZONE_GUARD_SIZE                 (64UL << 10)
9841 
9842 __startup_func
9843 static void
zone_tunables_fixup(void)9844 zone_tunables_fixup(void)
9845 {
9846 	int wdt = 0;
9847 
9848 #if CONFIG_PROB_GZALLOC && (DEVELOPMENT || DEBUG)
9849 	if (!PE_parse_boot_argn("pgz", NULL, 0) &&
9850 	    PE_parse_boot_argn("pgz1", NULL, 0)) {
9851 		/*
9852 		 * if pgz1= was used, but pgz= was not,
9853 		 * then the more specific pgz1 takes precedence.
9854 		 */
9855 		pgz_all = false;
9856 	}
9857 #endif
9858 
9859 	if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) {
9860 		zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT;
9861 	}
9862 	if (PE_parse_boot_argn("wdt", &wdt, sizeof(wdt)) && wdt == -1 &&
9863 	    !PE_parse_boot_argn("zet", NULL, 0)) {
9864 		zone_exhausted_timeout = -1;
9865 	}
9866 }
9867 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup);
9868 
9869 __startup_func
9870 static void
zone_submap_init(mach_vm_offset_t * submap_min,zone_submap_idx_t idx,uint64_t zone_sub_map_numer,uint64_t * remaining_denom,vm_offset_t * remaining_size)9871 zone_submap_init(
9872 	mach_vm_offset_t       *submap_min,
9873 	zone_submap_idx_t       idx,
9874 	uint64_t                zone_sub_map_numer,
9875 	uint64_t               *remaining_denom,
9876 	vm_offset_t            *remaining_size)
9877 {
9878 	vm_map_create_options_t vmco;
9879 	vm_map_address_t addr;
9880 	vm_offset_t submap_start, submap_end;
9881 	vm_size_t submap_size;
9882 	vm_map_t  submap;
9883 	vm_prot_t prot = VM_PROT_DEFAULT;
9884 	vm_prot_t prot_max = VM_PROT_ALL;
9885 	kern_return_t kr;
9886 
9887 	submap_size = trunc_page(zone_sub_map_numer * *remaining_size /
9888 	    *remaining_denom);
9889 	submap_start = *submap_min;
9890 
9891 	if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9892 		vm_offset_t submap_padding = pmap_ro_zone_align(submap_start) - submap_start;
9893 		submap_start += submap_padding;
9894 		submap_size = pmap_ro_zone_align(submap_size);
9895 		assert(*remaining_size >= (submap_padding + submap_size));
9896 		*remaining_size -= submap_padding;
9897 		*submap_min = submap_start;
9898 	}
9899 
9900 	submap_end = submap_start + submap_size;
9901 	if (idx == Z_SUBMAP_IDX_VM) {
9902 		vm_packing_verify_range("vm_compressor",
9903 		    submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR));
9904 		vm_packing_verify_range("vm_page",
9905 		    submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR));
9906 	}
9907 
9908 	vmco = VM_MAP_CREATE_NEVER_FAULTS;
9909 	if (!zone_submap_is_sequestered(idx)) {
9910 		vmco |= VM_MAP_CREATE_DISABLE_HOLELIST;
9911 	}
9912 
9913 	vm_map_will_allocate_early_map(&zone_submaps[idx]);
9914 	submap = kmem_suballoc(kernel_map, submap_min, submap_size, vmco,
9915 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_PERMANENT | KMS_NOFAIL,
9916 	    VM_KERN_MEMORY_ZONE).kmr_submap;
9917 
9918 	if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9919 		zone_info.zi_ro_range.min_address = submap_start;
9920 		zone_info.zi_ro_range.max_address = submap_end;
9921 		prot_max = prot = VM_PROT_NONE;
9922 	}
9923 
9924 	addr = submap_start;
9925 	vm_object_t kobject = kernel_object_default;
9926 	kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
9927 	    VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vm_tag = VM_KERN_MEMORY_ZONE),
9928 	    kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9929 	if (kr != KERN_SUCCESS) {
9930 		panic("ksubmap[%s]: failed to make first entry (%d)",
9931 		    zone_submaps_names[idx], kr);
9932 	}
9933 
9934 	addr = submap_end - ZONE_GUARD_SIZE / 2;
9935 	kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
9936 	    VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vm_tag = VM_KERN_MEMORY_ZONE),
9937 	    kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9938 	if (kr != KERN_SUCCESS) {
9939 		panic("ksubmap[%s]: failed to make last entry (%d)",
9940 		    zone_submaps_names[idx], kr);
9941 	}
9942 
9943 #if DEBUG || DEVELOPMENT
9944 	printf("zone_init: map %-5s %p:%p (%u%c)\n",
9945 	    zone_submaps_names[idx], (void *)submap_start, (void *)submap_end,
9946 	    mach_vm_size_pretty(submap_size), mach_vm_size_unit(submap_size));
9947 #endif /* DEBUG || DEVELOPMENT */
9948 
9949 	zone_submaps[idx] = submap;
9950 	*submap_min       = submap_end;
9951 	*remaining_size  -= submap_size;
9952 	*remaining_denom -= zone_sub_map_numer;
9953 }
9954 
9955 static inline void
zone_pva_relocate(zone_pva_t * pva,uint32_t delta)9956 zone_pva_relocate(zone_pva_t *pva, uint32_t delta)
9957 {
9958 	if (!zone_pva_is_null(*pva) && !zone_pva_is_queue(*pva)) {
9959 		pva->packed_address += delta;
9960 	}
9961 }
9962 
9963 /*
9964  * Allocate metadata array and migrate bootstrap initial metadata and memory.
9965  */
9966 __startup_func
9967 static void
zone_metadata_init(void)9968 zone_metadata_init(void)
9969 {
9970 	vm_map_t vm_map = zone_submaps[Z_SUBMAP_IDX_VM];
9971 	vm_map_entry_t first;
9972 
9973 	struct mach_vm_range meta_r, bits_r, xtra_r, early_r;
9974 	vm_size_t early_sz;
9975 	vm_offset_t reloc_base;
9976 
9977 	/*
9978 	 * Step 1: Allocate the metadata + bitmaps range
9979 	 *
9980 	 * Allocations can't be smaller than 8 bytes, which is 128b / 16B per 1k
9981 	 * of physical memory (16M per 1G).
9982 	 *
9983 	 * Let's preallocate for the worst to avoid weird panics.
9984 	 */
9985 	vm_map_will_allocate_early_map(&zone_meta_map);
9986 	meta_r = zone_kmem_suballoc(zone_info.zi_meta_range.min_address,
9987 	    zone_meta_size + zone_bits_size + zone_xtra_size,
9988 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
9989 	    VM_KERN_MEMORY_ZONE, &zone_meta_map);
9990 	meta_r.min_address += ZONE_GUARD_SIZE;
9991 	meta_r.max_address -= ZONE_GUARD_SIZE;
9992 	if (zone_xtra_size) {
9993 		xtra_r.max_address  = meta_r.max_address;
9994 		meta_r.max_address -= zone_xtra_size;
9995 		xtra_r.min_address  = meta_r.max_address;
9996 	} else {
9997 		xtra_r.min_address  = xtra_r.max_address = 0;
9998 	}
9999 	bits_r.max_address  = meta_r.max_address;
10000 	meta_r.max_address -= zone_bits_size;
10001 	bits_r.min_address  = meta_r.max_address;
10002 
10003 #if DEBUG || DEVELOPMENT
10004 	printf("zone_init: metadata  %p:%p (%u%c)\n",
10005 	    (void *)meta_r.min_address, (void *)meta_r.max_address,
10006 	    mach_vm_size_pretty(mach_vm_range_size(&meta_r)),
10007 	    mach_vm_size_unit(mach_vm_range_size(&meta_r)));
10008 	printf("zone_init: metabits  %p:%p (%u%c)\n",
10009 	    (void *)bits_r.min_address, (void *)bits_r.max_address,
10010 	    mach_vm_size_pretty(mach_vm_range_size(&bits_r)),
10011 	    mach_vm_size_unit(mach_vm_range_size(&bits_r)));
10012 	printf("zone_init: extra     %p:%p (%u%c)\n",
10013 	    (void *)xtra_r.min_address, (void *)xtra_r.max_address,
10014 	    mach_vm_size_pretty(mach_vm_range_size(&xtra_r)),
10015 	    mach_vm_size_unit(mach_vm_range_size(&xtra_r)));
10016 #endif /* DEBUG || DEVELOPMENT */
10017 
10018 	bits_r.min_address = (bits_r.min_address + ZBA_CHUNK_SIZE - 1) & -ZBA_CHUNK_SIZE;
10019 	bits_r.max_address = bits_r.max_address & -ZBA_CHUNK_SIZE;
10020 
10021 	/*
10022 	 * Step 2: Install new ranges.
10023 	 *         Relocate metadata and bits.
10024 	 */
10025 	early_r  = zone_info.zi_map_range;
10026 	early_sz = mach_vm_range_size(&early_r);
10027 
10028 	zone_info.zi_map_range  = zone_map_range;
10029 	zone_info.zi_meta_range = meta_r;
10030 	zone_info.zi_bits_range = bits_r;
10031 	zone_info.zi_xtra_range = xtra_r;
10032 	zone_info.zi_meta_base  = (struct zone_page_metadata *)meta_r.min_address -
10033 	    zone_pva_from_addr(zone_map_range.min_address).packed_address;
10034 
10035 	vm_map_lock(vm_map);
10036 	first = vm_map_first_entry(vm_map);
10037 	reloc_base = first->vme_end;
10038 	first->vme_end += early_sz;
10039 	vm_map->size += early_sz;
10040 	vm_map_unlock(vm_map);
10041 
10042 	struct zone_page_metadata *early_meta = zone_early_meta_array_startup;
10043 	struct zone_page_metadata *new_meta = zone_meta_from_addr(reloc_base);
10044 	vm_offset_t reloc_delta = reloc_base - early_r.min_address;
10045 	/* this needs to sign extend */
10046 	uint32_t pva_delta = (uint32_t)((intptr_t)reloc_delta >> PAGE_SHIFT);
10047 
10048 	zone_meta_populate(reloc_base, early_sz);
10049 	memcpy(new_meta, early_meta,
10050 	    atop(early_sz) * sizeof(struct zone_page_metadata));
10051 	for (uint32_t i = 0; i < atop(early_sz); i++) {
10052 		zone_pva_relocate(&new_meta[i].zm_page_next, pva_delta);
10053 		zone_pva_relocate(&new_meta[i].zm_page_prev, pva_delta);
10054 	}
10055 
10056 	static_assert(ZONE_ID_VM_MAP_ENTRY == ZONE_ID_VM_MAP + 1);
10057 	static_assert(ZONE_ID_VM_MAP_HOLES == ZONE_ID_VM_MAP + 2);
10058 
10059 	for (zone_id_t zid = ZONE_ID_VM_MAP; zid <= ZONE_ID_VM_MAP_HOLES; zid++) {
10060 		zone_pva_relocate(&zone_array[zid].z_pageq_partial, pva_delta);
10061 		zone_pva_relocate(&zone_array[zid].z_pageq_full, pva_delta);
10062 	}
10063 
10064 	zba_populate(0, false);
10065 	memcpy(zba_base_header(), zba_chunk_startup, sizeof(zba_chunk_startup));
10066 	zba_meta()->zbam_right = (uint32_t)atop(zone_bits_size);
10067 
10068 	/*
10069 	 * Step 3: Relocate the boostrap VM structs
10070 	 *         (including rewriting their content).
10071 	 */
10072 
10073 #if __x86_64__
10074 	kernel_memory_populate(reloc_base, early_sz,
10075 	    KMA_KOBJECT | KMA_NOENCRYPT | KMA_NOFAIL,
10076 	    VM_KERN_MEMORY_OSFMK);
10077 	__nosan_memcpy((void *)reloc_base, (void *)early_r.min_address, early_sz);
10078 #else
10079 	for (vm_address_t addr = early_r.min_address;
10080 	    addr < early_r.max_address; addr += PAGE_SIZE) {
10081 		pmap_paddr_t pa = kvtophys(trunc_page(addr));
10082 		__assert_only kern_return_t kr;
10083 
10084 		unsigned int pmap_flags = 0;
10085 
10086 
10087 		kr = pmap_enter_options_addr(kernel_pmap, addr + reloc_delta,
10088 		    pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, pmap_flags, TRUE,
10089 		    0, NULL);
10090 		assert(kr == KERN_SUCCESS);
10091 	}
10092 #endif
10093 
10094 #if KASAN
10095 	kasan_notify_address(reloc_base, early_sz);
10096 #if KASAN_TBI
10097 	kasan_tbi_copy_tags(reloc_base, early_r.min_address, early_sz);
10098 #endif /* KASAN_TBI */
10099 #endif /* KASAN */
10100 
10101 	vm_map_relocate_early_maps(reloc_delta);
10102 
10103 	for (uint32_t i = 0; i < atop(early_sz); i++) {
10104 		zone_id_t zid = new_meta[i].zm_index;
10105 		zone_t z = &zone_array[zid];
10106 		vm_size_t esize = zone_elem_outer_size(z);
10107 		vm_address_t base = reloc_base + ptoa(i) + zone_elem_inner_offs(z);
10108 		vm_address_t addr;
10109 
10110 		if (new_meta[i].zm_chunk_len >= ZM_SECONDARY_PAGE) {
10111 			continue;
10112 		}
10113 
10114 		for (uint32_t eidx = 0; eidx < z->z_chunk_elems; eidx++) {
10115 			if (zone_meta_is_free(&new_meta[i], eidx)) {
10116 				continue;
10117 			}
10118 
10119 			addr = vm_memtag_fixup_ptr(base + eidx * esize);
10120 #if KASAN_CLASSIC
10121 			kasan_alloc(addr,
10122 			    zone_elem_inner_size(z), zone_elem_inner_size(z),
10123 			    zone_elem_redzone(z), false,
10124 			    __builtin_frame_address(0));
10125 #endif
10126 			vm_map_relocate_early_elem(zid, addr, reloc_delta);
10127 		}
10128 	}
10129 
10130 #if !__x86_64__
10131 	pmap_remove(kernel_pmap, early_r.min_address, early_r.max_address);
10132 #endif
10133 }
10134 
10135 __startup_data
10136 static uint16_t submap_ratios[Z_SUBMAP_IDX_COUNT] = {
10137 #if ZSECURITY_CONFIG(READ_ONLY)
10138 	[Z_SUBMAP_IDX_VM]               = 15,
10139 	[Z_SUBMAP_IDX_READ_ONLY]        =  5,
10140 #else
10141 	[Z_SUBMAP_IDX_VM]               = 20,
10142 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10143 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
10144 	[Z_SUBMAP_IDX_GENERAL_0]        = 15,
10145 	[Z_SUBMAP_IDX_GENERAL_1]        = 15,
10146 	[Z_SUBMAP_IDX_GENERAL_2]        = 15,
10147 	[Z_SUBMAP_IDX_GENERAL_3]        = 15,
10148 	[Z_SUBMAP_IDX_DATA]             = 20,
10149 #else
10150 	[Z_SUBMAP_IDX_GENERAL_0]        = 60,
10151 	[Z_SUBMAP_IDX_DATA]             = 20,
10152 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
10153 };
10154 
10155 __startup_func
10156 static inline uint16_t
zone_submap_ratios_denom(void)10157 zone_submap_ratios_denom(void)
10158 {
10159 	uint16_t denom = 0;
10160 
10161 	for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10162 		denom += submap_ratios[idx];
10163 	}
10164 
10165 	assert(denom == 100);
10166 
10167 	return denom;
10168 }
10169 
10170 __startup_func
10171 static inline vm_offset_t
zone_restricted_va_max(void)10172 zone_restricted_va_max(void)
10173 {
10174 	vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR);
10175 	vm_offset_t vm_page_max    = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR);
10176 
10177 	return trunc_page(MIN(compressor_max, vm_page_max));
10178 }
10179 
10180 __startup_func
10181 static void
zone_set_map_sizes(void)10182 zone_set_map_sizes(void)
10183 {
10184 	vm_size_t zsize;
10185 	vm_size_t zsizearg;
10186 
10187 	/*
10188 	 * Compute the physical limits for the zone map
10189 	 */
10190 
10191 	if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) {
10192 		zsize = zsizearg * (1024ULL * 1024);
10193 	} else {
10194 		/* Set target zone size as 1/4 of physical memory */
10195 		zsize = (vm_size_t)(sane_size >> 2);
10196 		zsize += zsize >> 1;
10197 	}
10198 
10199 	if (zsize < CONFIG_ZONE_MAP_MIN) {
10200 		zsize = CONFIG_ZONE_MAP_MIN;   /* Clamp to min */
10201 	}
10202 	if (zsize > sane_size >> 1) {
10203 		zsize = (vm_size_t)(sane_size >> 1); /* Clamp to half of RAM max */
10204 	}
10205 	if (zsizearg == 0 && zsize > ZONE_MAP_MAX) {
10206 		/* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */
10207 		printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n",
10208 		    (uintptr_t)zsize, (uintptr_t)ZONE_MAP_MAX);
10209 		zsize = ZONE_MAP_MAX;
10210 	}
10211 
10212 	zone_pages_wired_max = (uint32_t)atop(trunc_page(zsize));
10213 
10214 
10215 	/*
10216 	 * Declare restrictions on zone max
10217 	 */
10218 	vm_offset_t vm_submap_size = round_page(
10219 		(submap_ratios[Z_SUBMAP_IDX_VM] * ZONE_MAP_VA_SIZE) /
10220 		zone_submap_ratios_denom());
10221 
10222 #if CONFIG_PROB_GZALLOC
10223 	vm_submap_size += pgz_get_size();
10224 #endif /* CONFIG_PROB_GZALLOC */
10225 	if (os_sub_overflow(zone_restricted_va_max(), vm_submap_size,
10226 	    &zone_map_range.min_address)) {
10227 		zone_map_range.min_address = 0;
10228 	}
10229 
10230 	zone_meta_size = round_page(atop(ZONE_MAP_VA_SIZE) *
10231 	    sizeof(struct zone_page_metadata)) + ZONE_GUARD_SIZE * 2;
10232 
10233 	static_assert(ZONE_MAP_MAX / (CHAR_BIT * KALLOC_MINSIZE) <=
10234 	    ZBA_PTR_MASK + 1);
10235 	zone_bits_size = round_page(ptoa(zone_pages_wired_max) /
10236 	    (CHAR_BIT * KALLOC_MINSIZE));
10237 
10238 #if VM_TAG_SIZECLASSES
10239 	if (zone_tagging_on) {
10240 		zba_xtra_shift = (uint8_t)fls(sizeof(vm_tag_t) - 1);
10241 	}
10242 	if (zba_xtra_shift) {
10243 		/*
10244 		 * if we need the extra space range, then limit the size of the
10245 		 * bitmaps to something reasonable instead of a theoretical
10246 		 * worst case scenario of all zones being for the smallest
10247 		 * allocation granule, in order to avoid fake VA pressure on
10248 		 * other parts of the system.
10249 		 */
10250 		zone_bits_size = round_page(zone_bits_size / 8);
10251 		zone_xtra_size = round_page(zone_bits_size * CHAR_BIT << zba_xtra_shift);
10252 	}
10253 #endif /* VM_TAG_SIZECLASSES */
10254 }
10255 STARTUP(KMEM, STARTUP_RANK_FIRST, zone_set_map_sizes);
10256 
10257 /*
10258  * Can't use zone_info.zi_map_range at this point as it is being used to
10259  * store the range of early pmap memory that was stolen to bootstrap the
10260  * necessary VM zones.
10261  */
10262 KMEM_RANGE_REGISTER_STATIC(zones, &zone_map_range, ZONE_MAP_VA_SIZE);
10263 KMEM_RANGE_REGISTER_DYNAMIC(zone_meta, &zone_info.zi_meta_range, ^{
10264 	return zone_meta_size + zone_bits_size + zone_xtra_size;
10265 });
10266 
10267 /*
10268  * Global initialization of Zone Allocator.
10269  * Runs after zone_bootstrap.
10270  */
10271 __startup_func
10272 static void
zone_init(void)10273 zone_init(void)
10274 {
10275 	vm_size_t           remaining_size = ZONE_MAP_VA_SIZE;
10276 	mach_vm_offset_t    submap_min = 0;
10277 	uint64_t            denom = zone_submap_ratios_denom();
10278 	/*
10279 	 * And now allocate the various pieces of VA and submaps.
10280 	 */
10281 
10282 	submap_min = zone_map_range.min_address;
10283 
10284 #if CONFIG_PROB_GZALLOC
10285 	vm_size_t pgz_size = pgz_get_size();
10286 
10287 	vm_map_will_allocate_early_map(&pgz_submap);
10288 	zone_info.zi_pgz_range = zone_kmem_suballoc(submap_min, pgz_size,
10289 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10290 	    VM_KERN_MEMORY_ZONE, &pgz_submap);
10291 
10292 	submap_min     += pgz_size;
10293 	remaining_size -= pgz_size;
10294 #if DEBUG || DEVELOPMENT
10295 	printf("zone_init: pgzalloc  %p:%p (%u%c) [%d slots]\n",
10296 	    (void *)zone_info.zi_pgz_range.min_address,
10297 	    (void *)zone_info.zi_pgz_range.max_address,
10298 	    mach_vm_size_pretty(pgz_size), mach_vm_size_unit(pgz_size),
10299 	    pgz_slots);
10300 #endif /* DEBUG || DEVELOPMENT */
10301 #endif /* CONFIG_PROB_GZALLOC */
10302 
10303 	/*
10304 	 * Allocate the submaps
10305 	 */
10306 	for (zone_submap_idx_t idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10307 		if (submap_ratios[idx] == 0) {
10308 			zone_submaps[idx] = VM_MAP_NULL;
10309 		} else {
10310 			zone_submap_init(&submap_min, idx, submap_ratios[idx],
10311 			    &denom, &remaining_size);
10312 		}
10313 	}
10314 
10315 	zone_metadata_init();
10316 
10317 #if VM_TAG_SIZECLASSES
10318 	if (zone_tagging_on) {
10319 		vm_allocation_zones_init();
10320 	}
10321 #endif /* VM_TAG_SIZECLASSES */
10322 
10323 	zone_create_flags_t kma_flags = ZC_NOCACHING | ZC_NOGC | ZC_NOCALLOUT |
10324 	    ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE | ZC_VM;
10325 
10326 	(void)zone_create_ext("vm.permanent", 1, kma_flags | ZC_NOTBITAG,
10327 	    ZONE_ID_PERMANENT, ^(zone_t z) {
10328 		z->z_permanent = true;
10329 		z->z_elem_size = 1;
10330 	});
10331 	(void)zone_create_ext("vm.permanent.percpu", 1,
10332 	    kma_flags | ZC_PERCPU | ZC_NOTBITAG, ZONE_ID_PERCPU_PERMANENT, ^(zone_t z) {
10333 		z->z_permanent = true;
10334 		z->z_elem_size = 1;
10335 	});
10336 
10337 	zc_magazine_zone = zone_create("zcc_magazine_zone", sizeof(struct zone_magazine) +
10338 	    zc_mag_size() * sizeof(vm_offset_t),
10339 	    ZC_VM | ZC_NOCACHING | ZC_ZFREE_CLEARMEM | ZC_PGZ_USE_GUARDS);
10340 	zone_raise_reserve(zc_magazine_zone, (uint16_t)(2 * zpercpu_count()));
10341 
10342 	/*
10343 	 * Now migrate the startup statistics into their final storage,
10344 	 * and enable logging for early zones (that zone_create_ext() skipped).
10345 	 */
10346 	int cpu = cpu_number();
10347 	zone_index_foreach(idx) {
10348 		zone_t tz = &zone_array[idx];
10349 
10350 		if (tz->z_stats == __zpcpu_mangle_for_boot(&zone_stats_startup[idx])) {
10351 			zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats);
10352 
10353 			*zpercpu_get_cpu(zs, cpu) = *zpercpu_get_cpu(tz->z_stats, cpu);
10354 			tz->z_stats = zs;
10355 		}
10356 		if (tz->z_self == tz) {
10357 #if ZALLOC_ENABLE_LOGGING
10358 			zone_setup_logging(tz);
10359 #endif /* ZALLOC_ENABLE_LOGGING */
10360 #if KASAN_TBI
10361 			zone_setup_kasan_logging(tz);
10362 #endif /* KASAN_TBI */
10363 		}
10364 	}
10365 }
10366 STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init);
10367 
10368 void
zalloc_iokit_lockdown(void)10369 zalloc_iokit_lockdown(void)
10370 {
10371 	zone_share_always = false;
10372 }
10373 
10374 void
zalloc_first_proc_made(void)10375 zalloc_first_proc_made(void)
10376 {
10377 	zone_caching_disabled = 0;
10378 	zone_early_thres_mul = 1;
10379 }
10380 
10381 __startup_func
10382 vm_offset_t
zone_early_mem_init(vm_size_t size)10383 zone_early_mem_init(vm_size_t size)
10384 {
10385 	vm_offset_t mem;
10386 
10387 	assert3u(atop(size), <=, ZONE_EARLY_META_INLINE_COUNT);
10388 
10389 	/*
10390 	 * The zone that is used early to bring up the VM is stolen here.
10391 	 *
10392 	 * When the zone subsystem is actually initialized,
10393 	 * zone_metadata_init() will be called, and those pages
10394 	 * and the elements they contain, will be relocated into
10395 	 * the VM submap (even for architectures when those zones
10396 	 * do not live there).
10397 	 */
10398 #if __x86_64__
10399 	assert3u(size, <=, sizeof(zone_early_pages_to_cram));
10400 	mem = (vm_offset_t)zone_early_pages_to_cram;
10401 #else
10402 	mem = (vm_offset_t)pmap_steal_zone_memory(size, PAGE_SIZE);
10403 #endif
10404 
10405 	zone_info.zi_meta_base = zone_early_meta_array_startup -
10406 	    zone_pva_from_addr(mem).packed_address;
10407 	zone_info.zi_map_range.min_address = mem;
10408 	zone_info.zi_map_range.max_address = mem + size;
10409 
10410 	zone_info.zi_bits_range = (struct mach_vm_range){
10411 		.min_address = (mach_vm_offset_t)zba_chunk_startup,
10412 		.max_address = (mach_vm_offset_t)zba_chunk_startup +
10413 	    sizeof(zba_chunk_startup),
10414 	};
10415 
10416 	zba_meta()->zbam_left  = 1;
10417 	zba_meta()->zbam_right = 1;
10418 	zba_init_chunk(0, false);
10419 
10420 	return mem;
10421 }
10422 
10423 #endif /* !ZALLOC_TEST */
10424 #pragma mark - tests
10425 #if DEBUG || DEVELOPMENT
10426 
10427 /*
10428  * Used for sysctl zone tests that aren't thread-safe. Ensure only one
10429  * thread goes through at a time.
10430  *
10431  * Or we can end up with multiple test zones (if a second zinit() comes through
10432  * before zdestroy()), which could lead us to run out of zones.
10433  */
10434 static bool any_zone_test_running = FALSE;
10435 
10436 static uintptr_t *
zone_copy_allocations(zone_t z,uintptr_t * elems,zone_pva_t page_index)10437 zone_copy_allocations(zone_t z, uintptr_t *elems, zone_pva_t page_index)
10438 {
10439 	vm_offset_t elem_size = zone_elem_outer_size(z);
10440 	vm_offset_t base;
10441 	struct zone_page_metadata *meta;
10442 
10443 	while (!zone_pva_is_null(page_index)) {
10444 		base  = zone_pva_to_addr(page_index) + zone_elem_inner_offs(z);
10445 		meta  = zone_pva_to_meta(page_index);
10446 
10447 		if (meta->zm_inline_bitmap) {
10448 			for (size_t i = 0; i < meta->zm_chunk_len; i++) {
10449 				uint32_t map = meta[i].zm_bitmap;
10450 
10451 				for (; map; map &= map - 1) {
10452 					*elems++ = INSTANCE_PUT(base +
10453 					    elem_size * __builtin_clz(map));
10454 				}
10455 				base += elem_size * 32;
10456 			}
10457 		} else {
10458 			uint32_t order = zba_bits_ref_order(meta->zm_bitmap);
10459 			bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
10460 			for (size_t i = 0; i < (1u << order); i++) {
10461 				uint64_t map = bits[i];
10462 
10463 				for (; map; map &= map - 1) {
10464 					*elems++ = INSTANCE_PUT(base +
10465 					    elem_size * __builtin_clzll(map));
10466 				}
10467 				base += elem_size * 64;
10468 			}
10469 		}
10470 
10471 		page_index = meta->zm_page_next;
10472 	}
10473 	return elems;
10474 }
10475 
10476 kern_return_t
zone_leaks(const char * zoneName,uint32_t nameLen,leak_site_proc proc)10477 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc)
10478 {
10479 	zone_t        zone = NULL;
10480 	uintptr_t *   array;
10481 	uintptr_t *   next;
10482 	uintptr_t     element;
10483 	uint32_t      idx, count, found;
10484 	uint32_t      nobtcount;
10485 	uint32_t      elemSize;
10486 	size_t        maxElems;
10487 
10488 	zone_foreach(z) {
10489 		if (!z->z_name) {
10490 			continue;
10491 		}
10492 		if (!strncmp(zoneName, z->z_name, nameLen)) {
10493 			zone = z;
10494 			break;
10495 		}
10496 	}
10497 	if (zone == NULL) {
10498 		return KERN_INVALID_NAME;
10499 	}
10500 
10501 	elemSize = (uint32_t)zone_elem_inner_size(zone);
10502 	maxElems = (zone->z_elems_avail + 1) & ~1ul;
10503 
10504 	array = kalloc_type_tag(vm_offset_t, maxElems, VM_KERN_MEMORY_DIAG);
10505 	if (array == NULL) {
10506 		return KERN_RESOURCE_SHORTAGE;
10507 	}
10508 
10509 	zone_lock(zone);
10510 
10511 	next = array;
10512 	next = zone_copy_allocations(zone, next, zone->z_pageq_partial);
10513 	next = zone_copy_allocations(zone, next, zone->z_pageq_full);
10514 	count = (uint32_t)(next - array);
10515 
10516 	zone_unlock(zone);
10517 
10518 	zone_leaks_scan(array, count, (uint32_t)zone_elem_outer_size(zone), &found);
10519 	assert(found <= count);
10520 
10521 	for (idx = 0; idx < count; idx++) {
10522 		element = array[idx];
10523 		if (kInstanceFlagReferenced & element) {
10524 			continue;
10525 		}
10526 		element = INSTANCE_PUT(element) & ~kInstanceFlags;
10527 	}
10528 
10529 #if ZALLOC_ENABLE_LOGGING
10530 	if (zone->z_btlog && !corruption_debug_flag) {
10531 		// btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
10532 		static_assert(sizeof(vm_address_t) == sizeof(uintptr_t));
10533 		btlog_copy_backtraces_for_elements(zone->z_btlog,
10534 		    (vm_address_t *)array, &count, elemSize, proc);
10535 	}
10536 #endif /* ZALLOC_ENABLE_LOGGING */
10537 
10538 	for (nobtcount = idx = 0; idx < count; idx++) {
10539 		element = array[idx];
10540 		if (!element) {
10541 			continue;
10542 		}
10543 		if (kInstanceFlagReferenced & element) {
10544 			continue;
10545 		}
10546 		nobtcount++;
10547 	}
10548 	if (nobtcount) {
10549 		proc(nobtcount, elemSize, BTREF_NULL);
10550 	}
10551 
10552 	kfree_type(vm_offset_t, maxElems, array);
10553 	return KERN_SUCCESS;
10554 }
10555 
10556 static int
zone_ro_basic_test_run(__unused int64_t in,int64_t * out)10557 zone_ro_basic_test_run(__unused int64_t in, int64_t *out)
10558 {
10559 	zone_security_flags_t zsflags;
10560 	uint32_t x = 4;
10561 	uint32_t *test_ptr;
10562 
10563 	if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10564 		printf("zone_ro_basic_test: Test already running.\n");
10565 		return EALREADY;
10566 	}
10567 
10568 	zsflags = zone_security_array[ZONE_ID__FIRST_RO];
10569 
10570 	for (int i = 0; i < 3; i++) {
10571 #if ZSECURITY_CONFIG(READ_ONLY)
10572 		/* Basic Test: Create int zone, zalloc int, modify value, free int */
10573 		printf("zone_ro_basic_test: Basic Test iteration %d\n", i);
10574 		printf("zone_ro_basic_test: create a sub-page size zone\n");
10575 
10576 		printf("zone_ro_basic_test: verify flags were set\n");
10577 		assert(zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
10578 
10579 		printf("zone_ro_basic_test: zalloc an element\n");
10580 		test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10581 		assert(test_ptr);
10582 
10583 		printf("zone_ro_basic_test: verify we can't write to it\n");
10584 		assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10585 
10586 		x = 4;
10587 		printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10588 		zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10589 		assert(test_ptr);
10590 		assert(*(uint32_t*)test_ptr == x);
10591 
10592 		x = 5;
10593 		printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10594 		zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10595 		assert(test_ptr);
10596 		assert(*(uint32_t*)test_ptr == x);
10597 
10598 		printf("zone_ro_basic_test: verify we can't write to it after assigning value\n");
10599 		assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10600 
10601 		printf("zone_ro_basic_test: free elem\n");
10602 		zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10603 		assert(!test_ptr);
10604 #else
10605 		printf("zone_ro_basic_test: Read-only allocator n/a on 32bit platforms, test functionality of API\n");
10606 
10607 		printf("zone_ro_basic_test: verify flags were set\n");
10608 		assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
10609 
10610 		printf("zone_ro_basic_test: zalloc an element\n");
10611 		test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10612 		assert(test_ptr);
10613 
10614 		x = 4;
10615 		printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10616 		zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10617 		assert(test_ptr);
10618 		assert(*(uint32_t*)test_ptr == x);
10619 
10620 		x = 5;
10621 		printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10622 		zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10623 		assert(test_ptr);
10624 		assert(*(uint32_t*)test_ptr == x);
10625 
10626 		printf("zone_ro_basic_test: free elem\n");
10627 		zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10628 		assert(!test_ptr);
10629 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10630 	}
10631 
10632 	printf("zone_ro_basic_test: garbage collection\n");
10633 	zone_gc(ZONE_GC_DRAIN);
10634 
10635 	printf("zone_ro_basic_test: Test passed\n");
10636 
10637 	*out = 1;
10638 	os_atomic_store(&any_zone_test_running, false, relaxed);
10639 	return 0;
10640 }
10641 SYSCTL_TEST_REGISTER(zone_ro_basic_test, zone_ro_basic_test_run);
10642 
10643 static int
zone_basic_test_run(__unused int64_t in,int64_t * out)10644 zone_basic_test_run(__unused int64_t in, int64_t *out)
10645 {
10646 	static zone_t test_zone_ptr = NULL;
10647 
10648 	unsigned int i = 0, max_iter = 5;
10649 	void * test_ptr;
10650 	zone_t test_zone;
10651 	int rc = 0;
10652 
10653 	if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10654 		printf("zone_basic_test: Test already running.\n");
10655 		return EALREADY;
10656 	}
10657 
10658 	printf("zone_basic_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
10659 
10660 	/* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
10661 	do {
10662 		test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
10663 		assert(test_zone);
10664 
10665 #if KASAN_CLASSIC
10666 		if (test_zone_ptr == NULL && test_zone->z_elems_free != 0)
10667 #else
10668 		if (test_zone->z_elems_free != 0)
10669 #endif
10670 		{
10671 			printf("zone_basic_test: free count is not zero\n");
10672 			rc = EIO;
10673 			goto out;
10674 		}
10675 
10676 		if (test_zone_ptr == NULL) {
10677 			/* Stash the zone pointer returned on the fist zinit */
10678 			printf("zone_basic_test: zone created for the first time\n");
10679 			test_zone_ptr = test_zone;
10680 		} else if (test_zone != test_zone_ptr) {
10681 			printf("zone_basic_test: old zone pointer and new zone pointer don't match\n");
10682 			rc = EIO;
10683 			goto out;
10684 		}
10685 
10686 		test_ptr = zalloc_flags(test_zone, Z_WAITOK | Z_NOFAIL);
10687 		zfree(test_zone, test_ptr);
10688 
10689 		zdestroy(test_zone);
10690 		i++;
10691 
10692 		printf("zone_basic_test: Iteration %d successful\n", i);
10693 	} while (i < max_iter);
10694 
10695 #if !KASAN_CLASSIC /* because of the quarantine and redzones */
10696 	/* test Z_VA_SEQUESTER */
10697 	{
10698 		zone_t test_pcpu_zone;
10699 		kern_return_t kr;
10700 		int idx, num_allocs = 8;
10701 		vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs;
10702 		void *allocs[num_allocs];
10703 		void **allocs_pcpu;
10704 		vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
10705 
10706 		test_zone = zone_create("test_zone_sysctl", elem_size,
10707 		    ZC_DESTRUCTIBLE);
10708 		assert(test_zone);
10709 
10710 		test_pcpu_zone = zone_create("test_zone_sysctl.pcpu", sizeof(uint64_t),
10711 		    ZC_DESTRUCTIBLE | ZC_PERCPU);
10712 		assert(test_pcpu_zone);
10713 
10714 		for (idx = 0; idx < num_allocs; idx++) {
10715 			allocs[idx] = zalloc(test_zone);
10716 			assert(NULL != allocs[idx]);
10717 			printf("alloc[%d] %p\n", idx, allocs[idx]);
10718 		}
10719 		for (idx = 0; idx < num_allocs; idx++) {
10720 			zfree(test_zone, allocs[idx]);
10721 		}
10722 		assert(!zone_pva_is_null(test_zone->z_pageq_empty));
10723 
10724 		kr = kmem_alloc(kernel_map, (vm_address_t *)&allocs_pcpu, PAGE_SIZE,
10725 		    KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
10726 		assert(kr == KERN_SUCCESS);
10727 
10728 		for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10729 			allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10730 			    Z_WAITOK | Z_ZERO);
10731 			assert(NULL != allocs_pcpu[idx]);
10732 		}
10733 		for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10734 			zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10735 		}
10736 		assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10737 
10738 		printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10739 		    vm_page_wire_count, vm_page_free_count,
10740 		    100L * phys_pages / zone_pages_wired_max);
10741 		zone_gc(ZONE_GC_DRAIN);
10742 		printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10743 		    vm_page_wire_count, vm_page_free_count,
10744 		    100L * phys_pages / zone_pages_wired_max);
10745 
10746 		unsigned int allva = 0;
10747 
10748 		zone_foreach(z) {
10749 			zone_lock(z);
10750 			allva += z->z_wired_cur;
10751 			if (zone_pva_is_null(z->z_pageq_va)) {
10752 				zone_unlock(z);
10753 				continue;
10754 			}
10755 			unsigned count = 0;
10756 			uint64_t size;
10757 			zone_pva_t pg = z->z_pageq_va;
10758 			struct zone_page_metadata *page_meta;
10759 			while (pg.packed_address) {
10760 				page_meta = zone_pva_to_meta(pg);
10761 				count += z->z_percpu ? 1 : z->z_chunk_pages;
10762 				if (page_meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
10763 					count -= page_meta->zm_page_index;
10764 				}
10765 				pg = page_meta->zm_page_next;
10766 			}
10767 			size = zone_size_wired(z);
10768 			if (!size) {
10769 				size = 1;
10770 			}
10771 			printf("%s%s: seq %d, res %d, %qd %%\n",
10772 			    zone_heap_name(z), z->z_name, z->z_va_cur - z->z_wired_cur,
10773 			    z->z_wired_cur, zone_size_allocated(z) * 100ULL / size);
10774 			zone_unlock(z);
10775 		}
10776 
10777 		printf("total va: %d\n", allva);
10778 
10779 		assert(zone_pva_is_null(test_zone->z_pageq_empty));
10780 		assert(zone_pva_is_null(test_zone->z_pageq_partial));
10781 		assert(!zone_pva_is_null(test_zone->z_pageq_va));
10782 		assert(zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10783 		assert(zone_pva_is_null(test_pcpu_zone->z_pageq_partial));
10784 		assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_va));
10785 
10786 		for (idx = 0; idx < num_allocs; idx++) {
10787 			assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx]));
10788 		}
10789 
10790 		/* make sure the zone is still usable after a GC */
10791 
10792 		for (idx = 0; idx < num_allocs; idx++) {
10793 			allocs[idx] = zalloc(test_zone);
10794 			assert(allocs[idx]);
10795 			printf("alloc[%d] %p\n", idx, allocs[idx]);
10796 		}
10797 		for (idx = 0; idx < num_allocs; idx++) {
10798 			zfree(test_zone, allocs[idx]);
10799 		}
10800 
10801 		for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10802 			allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10803 			    Z_WAITOK | Z_ZERO);
10804 			assert(NULL != allocs_pcpu[idx]);
10805 		}
10806 		for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10807 			zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10808 		}
10809 
10810 		assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10811 
10812 		kmem_free(kernel_map, (vm_address_t)allocs_pcpu, PAGE_SIZE);
10813 
10814 		zdestroy(test_zone);
10815 		zdestroy(test_pcpu_zone);
10816 	}
10817 #endif /* KASAN_CLASSIC */
10818 
10819 	printf("zone_basic_test: Test passed\n");
10820 
10821 
10822 	*out = 1;
10823 out:
10824 	os_atomic_store(&any_zone_test_running, false, relaxed);
10825 	return rc;
10826 }
10827 SYSCTL_TEST_REGISTER(zone_basic_test, zone_basic_test_run);
10828 
10829 struct zone_stress_obj {
10830 	TAILQ_ENTRY(zone_stress_obj) zso_link;
10831 };
10832 
10833 struct zone_stress_ctx {
10834 	thread_t  zsc_leader;
10835 	lck_mtx_t zsc_lock;
10836 	zone_t    zsc_zone;
10837 	uint64_t  zsc_end;
10838 	uint32_t  zsc_workers;
10839 };
10840 
10841 static void
zone_stress_worker(void * arg,wait_result_t __unused wr)10842 zone_stress_worker(void *arg, wait_result_t __unused wr)
10843 {
10844 	struct zone_stress_ctx *ctx = arg;
10845 	bool leader = ctx->zsc_leader == current_thread();
10846 	TAILQ_HEAD(zone_stress_head, zone_stress_obj) head = TAILQ_HEAD_INITIALIZER(head);
10847 	struct zone_bool_gen bg = { };
10848 	struct zone_stress_obj *obj;
10849 	uint32_t allocs = 0;
10850 
10851 	random_bool_init(&bg.zbg_bg);
10852 
10853 	do {
10854 		for (int i = 0; i < 2000; i++) {
10855 			uint32_t what = random_bool_gen_bits(&bg.zbg_bg,
10856 			    bg.zbg_entropy, ZONE_ENTROPY_CNT, 1);
10857 			switch (what) {
10858 			case 0:
10859 			case 1:
10860 				if (allocs < 10000) {
10861 					obj = zalloc(ctx->zsc_zone);
10862 					TAILQ_INSERT_HEAD(&head, obj, zso_link);
10863 					allocs++;
10864 				}
10865 				break;
10866 			case 2:
10867 			case 3:
10868 				if (allocs < 10000) {
10869 					obj = zalloc(ctx->zsc_zone);
10870 					TAILQ_INSERT_TAIL(&head, obj, zso_link);
10871 					allocs++;
10872 				}
10873 				break;
10874 			case 4:
10875 				if (leader) {
10876 					zone_gc(ZONE_GC_DRAIN);
10877 				}
10878 				break;
10879 			case 5:
10880 			case 6:
10881 				if (!TAILQ_EMPTY(&head)) {
10882 					obj = TAILQ_FIRST(&head);
10883 					TAILQ_REMOVE(&head, obj, zso_link);
10884 					zfree(ctx->zsc_zone, obj);
10885 					allocs--;
10886 				}
10887 				break;
10888 			case 7:
10889 				if (!TAILQ_EMPTY(&head)) {
10890 					obj = TAILQ_LAST(&head, zone_stress_head);
10891 					TAILQ_REMOVE(&head, obj, zso_link);
10892 					zfree(ctx->zsc_zone, obj);
10893 					allocs--;
10894 				}
10895 				break;
10896 			}
10897 		}
10898 	} while (mach_absolute_time() < ctx->zsc_end);
10899 
10900 	while (!TAILQ_EMPTY(&head)) {
10901 		obj = TAILQ_FIRST(&head);
10902 		TAILQ_REMOVE(&head, obj, zso_link);
10903 		zfree(ctx->zsc_zone, obj);
10904 	}
10905 
10906 	lck_mtx_lock(&ctx->zsc_lock);
10907 	if (--ctx->zsc_workers == 0) {
10908 		thread_wakeup(ctx);
10909 	} else if (leader) {
10910 		while (ctx->zsc_workers) {
10911 			lck_mtx_sleep(&ctx->zsc_lock, LCK_SLEEP_DEFAULT, ctx,
10912 			    THREAD_UNINT);
10913 		}
10914 	}
10915 	lck_mtx_unlock(&ctx->zsc_lock);
10916 
10917 	if (!leader) {
10918 		thread_terminate_self();
10919 		__builtin_unreachable();
10920 	}
10921 }
10922 
10923 static int
zone_stress_test_run(__unused int64_t in,int64_t * out)10924 zone_stress_test_run(__unused int64_t in, int64_t *out)
10925 {
10926 	struct zone_stress_ctx ctx = {
10927 		.zsc_leader  = current_thread(),
10928 		.zsc_workers = 3,
10929 	};
10930 	kern_return_t kr;
10931 	thread_t th;
10932 
10933 	if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10934 		printf("zone_stress_test: Test already running.\n");
10935 		return EALREADY;
10936 	}
10937 
10938 	lck_mtx_init(&ctx.zsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
10939 	ctx.zsc_zone = zone_create("test_zone_344", 344,
10940 	    ZC_DESTRUCTIBLE | ZC_NOCACHING);
10941 	assert(ctx.zsc_zone->z_chunk_pages > 1);
10942 
10943 	clock_interval_to_deadline(5, NSEC_PER_SEC, &ctx.zsc_end);
10944 
10945 	printf("zone_stress_test: Starting (leader %p)\n", current_thread());
10946 
10947 	os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
10948 
10949 	for (uint32_t i = 1; i < ctx.zsc_workers; i++) {
10950 		kr = kernel_thread_start_priority(zone_stress_worker, &ctx,
10951 		    BASEPRI_DEFAULT, &th);
10952 		if (kr == KERN_SUCCESS) {
10953 			printf("zone_stress_test: thread %d: %p\n", i, th);
10954 			thread_deallocate(th);
10955 		} else {
10956 			ctx.zsc_workers--;
10957 		}
10958 	}
10959 
10960 	zone_stress_worker(&ctx, 0);
10961 
10962 	lck_mtx_destroy(&ctx.zsc_lock, &zone_locks_grp);
10963 
10964 	zdestroy(ctx.zsc_zone);
10965 
10966 	printf("zone_stress_test: Done\n");
10967 
10968 	*out = 1;
10969 	os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
10970 	os_atomic_store(&any_zone_test_running, false, relaxed);
10971 	return 0;
10972 }
10973 SYSCTL_TEST_REGISTER(zone_stress_test, zone_stress_test_run);
10974 
10975 struct zone_gc_stress_obj {
10976 	STAILQ_ENTRY(zone_gc_stress_obj) zgso_link;
10977 	uintptr_t                        zgso_pad[63];
10978 };
10979 STAILQ_HEAD(zone_gc_stress_head, zone_gc_stress_obj);
10980 
10981 #define ZONE_GC_OBJ_PER_PAGE  (PAGE_SIZE / sizeof(struct zone_gc_stress_obj))
10982 
10983 KALLOC_TYPE_DEFINE(zone_gc_stress_zone, struct zone_gc_stress_obj, KT_DEFAULT);
10984 
10985 struct zone_gc_stress_ctx {
10986 	bool      zgsc_done;
10987 	lck_mtx_t zgsc_lock;
10988 	zone_t    zgsc_zone;
10989 	uint64_t  zgsc_end;
10990 	uint32_t  zgsc_workers;
10991 };
10992 
10993 static void
zone_gc_stress_test_alloc_n(struct zone_gc_stress_head * head,size_t n)10994 zone_gc_stress_test_alloc_n(struct zone_gc_stress_head *head, size_t n)
10995 {
10996 	struct zone_gc_stress_obj *obj;
10997 
10998 	for (size_t i = 0; i < n; i++) {
10999 		obj = zalloc_flags(zone_gc_stress_zone, Z_WAITOK);
11000 		STAILQ_INSERT_TAIL(head, obj, zgso_link);
11001 	}
11002 }
11003 
11004 static void
zone_gc_stress_test_free_n(struct zone_gc_stress_head * head)11005 zone_gc_stress_test_free_n(struct zone_gc_stress_head *head)
11006 {
11007 	struct zone_gc_stress_obj *obj;
11008 
11009 	while ((obj = STAILQ_FIRST(head))) {
11010 		STAILQ_REMOVE_HEAD(head, zgso_link);
11011 		zfree(zone_gc_stress_zone, obj);
11012 	}
11013 }
11014 
11015 __dead2
11016 static void
zone_gc_stress_worker(void * arg,wait_result_t __unused wr)11017 zone_gc_stress_worker(void *arg, wait_result_t __unused wr)
11018 {
11019 	struct zone_gc_stress_ctx *ctx = arg;
11020 	struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
11021 
11022 	while (!ctx->zgsc_done) {
11023 		zone_gc_stress_test_alloc_n(&head, ZONE_GC_OBJ_PER_PAGE * 4);
11024 		zone_gc_stress_test_free_n(&head);
11025 	}
11026 
11027 	lck_mtx_lock(&ctx->zgsc_lock);
11028 	if (--ctx->zgsc_workers == 0) {
11029 		thread_wakeup(ctx);
11030 	}
11031 	lck_mtx_unlock(&ctx->zgsc_lock);
11032 
11033 	thread_terminate_self();
11034 	__builtin_unreachable();
11035 }
11036 
11037 static int
zone_gc_stress_test_run(__unused int64_t in,int64_t * out)11038 zone_gc_stress_test_run(__unused int64_t in, int64_t *out)
11039 {
11040 	struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
11041 	struct zone_gc_stress_ctx ctx = {
11042 		.zgsc_workers = 3,
11043 	};
11044 	kern_return_t kr;
11045 	thread_t th;
11046 
11047 	if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
11048 		printf("zone_gc_stress_test: Test already running.\n");
11049 		return EALREADY;
11050 	}
11051 
11052 	lck_mtx_init(&ctx.zgsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
11053 	lck_mtx_lock(&ctx.zgsc_lock);
11054 
11055 	printf("zone_gc_stress_test: Starting (leader %p)\n", current_thread());
11056 
11057 	os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
11058 
11059 	for (uint32_t i = 0; i < ctx.zgsc_workers; i++) {
11060 		kr = kernel_thread_start_priority(zone_gc_stress_worker, &ctx,
11061 		    BASEPRI_DEFAULT, &th);
11062 		if (kr == KERN_SUCCESS) {
11063 			printf("zone_gc_stress_test: thread %d: %p\n", i, th);
11064 			thread_deallocate(th);
11065 		} else {
11066 			ctx.zgsc_workers--;
11067 		}
11068 	}
11069 
11070 	for (uint64_t i = 0; i < in; i++) {
11071 		size_t count = zc_mag_size() * zc_free_batch_size() * 10;
11072 
11073 		if (count < ZONE_GC_OBJ_PER_PAGE * 20) {
11074 			count = ZONE_GC_OBJ_PER_PAGE * 20;
11075 		}
11076 
11077 		zone_gc_stress_test_alloc_n(&head, count);
11078 		zone_gc_stress_test_free_n(&head);
11079 
11080 		lck_mtx_lock(&zone_gc_lock);
11081 		zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
11082 		    ZONE_RECLAIM_TRIM);
11083 		lck_mtx_unlock(&zone_gc_lock);
11084 
11085 		printf("zone_gc_stress_test: round %lld/%lld\n", i + 1, in);
11086 	}
11087 
11088 	os_atomic_thread_fence(seq_cst);
11089 	ctx.zgsc_done = true;
11090 	lck_mtx_sleep(&ctx.zgsc_lock, LCK_SLEEP_DEFAULT, &ctx, THREAD_UNINT);
11091 	lck_mtx_unlock(&ctx.zgsc_lock);
11092 
11093 	lck_mtx_destroy(&ctx.zgsc_lock, &zone_locks_grp);
11094 
11095 	lck_mtx_lock(&zone_gc_lock);
11096 	zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
11097 	    ZONE_RECLAIM_DRAIN);
11098 	lck_mtx_unlock(&zone_gc_lock);
11099 
11100 	printf("zone_gc_stress_test: Done\n");
11101 
11102 	*out = 1;
11103 	os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
11104 	os_atomic_store(&any_zone_test_running, false, relaxed);
11105 	return 0;
11106 }
11107 SYSCTL_TEST_REGISTER(zone_gc_stress_test, zone_gc_stress_test_run);
11108 
11109 /*
11110  * Routines to test that zone garbage collection and zone replenish threads
11111  * running at the same time don't cause problems.
11112  */
11113 
11114 static int
zone_gc_replenish_test(__unused int64_t in,int64_t * out)11115 zone_gc_replenish_test(__unused int64_t in, int64_t *out)
11116 {
11117 	zone_gc(ZONE_GC_DRAIN);
11118 	*out = 1;
11119 	return 0;
11120 }
11121 SYSCTL_TEST_REGISTER(zone_gc_replenish_test, zone_gc_replenish_test);
11122 
11123 static int
zone_alloc_replenish_test(__unused int64_t in,int64_t * out)11124 zone_alloc_replenish_test(__unused int64_t in, int64_t *out)
11125 {
11126 	zone_t z = vm_map_entry_zone;
11127 	struct data { struct data *next; } *node, *list = NULL;
11128 
11129 	if (z == NULL) {
11130 		printf("Couldn't find a replenish zone\n");
11131 		return EIO;
11132 	}
11133 
11134 	/* big enough to go past replenishment */
11135 	for (uint32_t i = 0; i < 10 * z->z_elems_rsv; ++i) {
11136 		node = zalloc(z);
11137 		node->next = list;
11138 		list = node;
11139 	}
11140 
11141 	/*
11142 	 * release the memory we allocated
11143 	 */
11144 	while (list != NULL) {
11145 		node = list;
11146 		list = list->next;
11147 		zfree(z, node);
11148 	}
11149 
11150 	*out = 1;
11151 	return 0;
11152 }
11153 SYSCTL_TEST_REGISTER(zone_alloc_replenish_test, zone_alloc_replenish_test);
11154 
11155 #endif /* DEBUG || DEVELOPMENT */
11156