xref: /xnu-11417.121.6/osfmk/kern/zalloc.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	kern/zalloc.c
60  *	Author:	Avadis Tevanian, Jr.
61  *
62  *	Zone-based memory allocator.  A zone is a collection of fixed size
63  *	data blocks for which quick allocation/deallocation is possible.
64  */
65 
66 #define ZALLOC_ALLOW_DEPRECATED 1
67 #if !ZALLOC_TEST
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <machine/machine_routines.h>
75 #include <mach/vm_map.h>
76 #include <mach/sdt.h>
77 #if __x86_64__
78 #include <i386/cpuid.h>
79 #endif
80 
81 #include <kern/bits.h>
82 #include <kern/btlog.h>
83 #include <kern/startup.h>
84 #include <kern/kern_types.h>
85 #include <kern/assert.h>
86 #include <kern/backtrace.h>
87 #include <kern/host.h>
88 #include <kern/macro_help.h>
89 #include <kern/sched.h>
90 #include <kern/locks.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/thread_call.h>
94 #include <kern/zalloc_internal.h>
95 #include <kern/kalloc.h>
96 #include <kern/debug.h>
97 
98 #include <prng/random.h>
99 
100 #include <vm/pmap.h>
101 #include <vm/vm_map_internal.h>
102 #include <vm/vm_memtag.h>
103 #include <vm/vm_kern_internal.h>
104 #include <vm/vm_kern_xnu.h>
105 #include <vm/vm_page_internal.h>
106 #include <vm/vm_pageout_internal.h>
107 #include <vm/vm_compressor_xnu.h> /* C_SLOT_PACKED_PTR* */
108 #include <vm/vm_far.h>
109 
110 #include <pexpert/pexpert.h>
111 
112 #include <machine/machparam.h>
113 #include <machine/machine_routines.h>  /* ml_cpu_get_info */
114 
115 #include <os/atomic.h>
116 
117 #include <libkern/OSDebug.h>
118 #include <libkern/OSAtomic.h>
119 #include <libkern/section_keywords.h>
120 #include <sys/kdebug.h>
121 #include <sys/kern_memorystatus_xnu.h>
122 #include <sys/code_signing.h>
123 
124 #include <san/kasan.h>
125 #include <libsa/stdlib.h>
126 #include <sys/errno.h>
127 
128 #include <IOKit/IOBSD.h>
129 #include <arm64/amcc_rorgn.h>
130 
131 #if DEBUG
132 #define z_debug_assert(expr)  assert(expr)
133 #else
134 #define z_debug_assert(expr)  (void)(expr)
135 #endif
136 
137 #if CONFIG_PROB_GZALLOC && CONFIG_SPTM
138 #error This is not a supported configuration
139 #endif
140 
141 /* Returns pid of the task with the largest number of VM map entries.  */
142 extern pid_t find_largest_process_vm_map_entries(void);
143 
144 extern zone_t vm_object_zone;
145 extern zone_t ipc_service_port_label_zone;
146 
147 ZONE_DEFINE_TYPE(percpu_u64_zone, "percpu.64", uint64_t,
148     ZC_PERCPU | ZC_ALIGNMENT_REQUIRED | ZC_KASAN_NOREDZONE);
149 
150 #if ZSECURITY_CONFIG(ZONE_TAGGING)
151 #define ZONE_MIN_ELEM_SIZE      (sizeof(uint64_t) * 2)
152 #define ZONE_ALIGN_SIZE         ZONE_MIN_ELEM_SIZE
153 #else /* ZSECURITY_CONFIG_ZONE_TAGGING */
154 #define ZONE_MIN_ELEM_SIZE      sizeof(uint64_t)
155 #define ZONE_ALIGN_SIZE         ZONE_MIN_ELEM_SIZE
156 #endif /* ZSECURITY_CONFIG_ZONE_TAGGING */
157 
158 #define ZONE_MAX_ALLOC_SIZE             (32 * 1024)
159 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
160 #define ZONE_CHUNK_ALLOC_SIZE           (256 * 1024)
161 #define ZONE_MAX_CHUNK_ALLOC_NUM        (10)
162 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
163 
164 #if   XNU_PLATFORM_MacOSX
165 #define ZONE_MAP_MAX            (32ULL << 30)
166 #define ZONE_MAP_VA_SIZE        (128ULL << 30)
167 #else
168 #define ZONE_MAP_MAX            (8ULL << 30)
169 #define ZONE_MAP_VA_SIZE        (24ULL << 30)
170 #endif
171 
172 __enum_closed_decl(zm_len_t, uint16_t, {
173 	ZM_CHUNK_FREE           = 0x0,
174 	/* 1 through 8 are valid lengths */
175 	ZM_CHUNK_LEN_MAX        = 0x8,
176 
177 	/* PGZ magical values */
178 	ZM_PGZ_FREE             = 0x0,
179 	ZM_PGZ_ALLOCATED        = 0xa, /* [a]llocated   */
180 	ZM_PGZ_GUARD            = 0xb, /* oo[b]         */
181 	ZM_PGZ_DOUBLE_FREE      = 0xd, /* [d]ouble_free */
182 
183 	/* secondary page markers */
184 	ZM_SECONDARY_PAGE       = 0xe,
185 	ZM_SECONDARY_PCPU_PAGE  = 0xf,
186 });
187 
188 static_assert(MAX_ZONES < (1u << 10), "MAX_ZONES must fit in zm_index");
189 
190 struct zone_page_metadata {
191 	union {
192 		struct {
193 			/* The index of the zone this metadata page belongs to */
194 			zone_id_t       zm_index : 10;
195 
196 			/*
197 			 * This chunk ends with a guard page.
198 			 */
199 			uint16_t        zm_guarded : 1;
200 
201 			/*
202 			 * Whether `zm_bitmap` is an inline bitmap
203 			 * or a packed bitmap reference
204 			 */
205 			uint16_t        zm_inline_bitmap : 1;
206 
207 			/*
208 			 * Zones allocate in "chunks" of zone_t::z_chunk_pages
209 			 * consecutive pages, or zpercpu_count() pages if the
210 			 * zone is percpu.
211 			 *
212 			 * The first page of it has its metadata set with:
213 			 * - 0 if none of the pages are currently wired
214 			 * - the number of wired pages in the chunk
215 			 *   (not scaled for percpu).
216 			 *
217 			 * Other pages in the chunk have their zm_chunk_len set
218 			 * to ZM_SECONDARY_PAGE or ZM_SECONDARY_PCPU_PAGE
219 			 * depending on whether the zone is percpu or not.
220 			 * For those, zm_page_index holds the index of that page
221 			 * in the run, and zm_subchunk_len the remaining length
222 			 * within the chunk.
223 			 *
224 			 * Metadata used for PGZ pages can have 3 values:
225 			 * - ZM_PGZ_FREE:         slot is free
226 			 * - ZM_PGZ_ALLOCATED:    slot holds an allocated element
227 			 *                        at offset (zm_pgz_orig_addr & PAGE_MASK)
228 			 * - ZM_PGZ_DOUBLE_FREE:  slot detected a double free
229 			 *                        (will panic).
230 			 */
231 			zm_len_t        zm_chunk_len : 4;
232 		};
233 		uint16_t zm_bits;
234 	};
235 
236 	union {
237 #define ZM_ALLOC_SIZE_LOCK      1u
238 		uint16_t zm_alloc_size; /* first page only */
239 		struct {
240 			uint8_t zm_page_index;   /* secondary pages only */
241 			uint8_t zm_subchunk_len; /* secondary pages only */
242 		};
243 		uint16_t zm_oob_offs;   /* in guard pages  */
244 	};
245 	union {
246 		uint32_t zm_bitmap;     /* most zones      */
247 		uint32_t zm_bump;       /* permanent zones */
248 	};
249 
250 	union {
251 		struct {
252 			zone_pva_t      zm_page_next;
253 			zone_pva_t      zm_page_prev;
254 		};
255 		vm_offset_t zm_pgz_orig_addr;
256 		struct zone_page_metadata *zm_pgz_slot_next;
257 	};
258 };
259 static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing");
260 
261 /*!
262  * @typedef zone_magazine_t
263  *
264  * @brief
265  * Magazine of cached allocations.
266  *
267  * @field zm_next       linkage used by magazine depots.
268  * @field zm_elems      an array of @c zc_mag_size() elements.
269  */
270 struct zone_magazine {
271 	zone_magazine_t         zm_next;
272 	smr_seq_t               zm_seq;
273 	vm_offset_t             zm_elems[0];
274 };
275 
276 /*!
277  * @typedef zone_cache_t
278  *
279  * @brief
280  * Magazine of cached allocations.
281  *
282  * @discussion
283  * Below is a diagram of the caching system. This design is inspired by the
284  * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
285  * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams and the FreeBSD UMA
286  * zone allocator (itself derived from this seminal work).
287  *
288  * It is divided into 3 layers:
289  * - the per-cpu layer,
290  * - the recirculation depot layer,
291  * - the Zone Allocator.
292  *
293  * The per-cpu and recirculation depot layer use magazines (@c zone_magazine_t),
294  * which are stacks of up to @c zc_mag_size() elements.
295  *
296  * <h2>CPU layer</h2>
297  *
298  * The CPU layer (@c zone_cache_t) looks like this:
299  *
300  *      ╭─ a ─ f ─┬───────── zm_depot ──────────╮
301  *      │ ╭─╮ ╭─╮ │ ╭─╮ ╭─╮ ╭─╮ ╭─╮ ╭─╮         │
302  *      │ │#│ │#│ │ │#│ │#│ │#│ │#│ │#│         │
303  *      │ │#│ │ │ │ │#│ │#│ │#│ │#│ │#│         │
304  *      │ │ │ │ │ │ │#│ │#│ │#│ │#│ │#│         │
305  *      │ ╰─╯ ╰─╯ │ ╰─╯ ╰─╯ ╰─╯ ╰─╯ ╰─╯         │
306  *      ╰─────────┴─────────────────────────────╯
307  *
308  * It has two pre-loaded magazines (a)lloc and (f)ree which we allocate from,
309  * or free to. Serialization is achieved through disabling preemption, and only
310  * the current CPU can acces those allocations. This is represented on the left
311  * hand side of the diagram above.
312  *
313  * The right hand side is the per-cpu depot. It consists of @c zm_depot_count
314  * full magazines, and is protected by the @c zm_depot_lock for access.
315  * The lock is expected to absolutely never be contended, as only the local CPU
316  * tends to access the local per-cpu depot in regular operation mode.
317  *
318  * However unlike UMA, our implementation allows for the zone GC to reclaim
319  * per-CPU magazines aggresively, which is serialized with the @c zm_depot_lock.
320  *
321  *
322  * <h2>Recirculation Depot</h2>
323  *
324  * The recirculation depot layer is a list similar to the per-cpu depot,
325  * however it is different in two fundamental ways:
326  *
327  * - it is protected by the regular zone lock,
328  * - elements referenced by the magazines in that layer appear free
329  *   to the zone layer.
330  *
331  *
332  * <h2>Magazine circulation and sizing</h2>
333  *
334  * The caching system sizes itself dynamically. Operations that allocate/free
335  * a single element call @c zone_lock_nopreempt_check_contention() which records
336  * contention on the lock by doing a trylock and recording its success.
337  *
338  * This information is stored in the @c z_recirc_cont_cur field of the zone,
339  * and a windowed moving average is maintained in @c z_contention_wma.
340  * The periodically run function @c compute_zone_working_set_size() will then
341  * take this into account to decide to grow the number of buckets allowed
342  * in the depot or shrink it based on the @c zc_grow_level and @c zc_shrink_level
343  * thresholds.
344  *
345  * The per-cpu layer will attempt to work with its depot, finding both full and
346  * empty magazines cached there. If it can't get what it needs, then it will
347  * mediate with the zone recirculation layer. Such recirculation is done in
348  * batches in order to amortize lock holds.
349  * (See @c {zalloc,zfree}_cached_depot_recirculate()).
350  *
351  * The recirculation layer keeps a track of what the minimum amount of magazines
352  * it had over time was for each of the full and empty queues. This allows for
353  * @c compute_zone_working_set_size() to return memory to the system when a zone
354  * stops being used as much.
355  *
356  * <h2>Security considerations</h2>
357  *
358  * The zone caching layer has been designed to avoid returning elements in
359  * a strict LIFO behavior: @c zalloc() will allocate from the (a) magazine,
360  * and @c zfree() free to the (f) magazine, and only swap them when the
361  * requested operation cannot be fulfilled.
362  *
363  * The per-cpu overflow depot or the recirculation depots are similarly used
364  * in FIFO order.
365  *
366  * @field zc_depot_lock     a lock to access @c zc_depot, @c zc_depot_cur.
367  * @field zc_alloc_cur      denormalized number of elements in the (a) magazine
368  * @field zc_free_cur       denormalized number of elements in the (f) magazine
369  * @field zc_alloc_elems    a pointer to the array of elements in (a)
370  * @field zc_free_elems     a pointer to the array of elements in (f)
371  *
372  * @field zc_depot          a list of @c zc_depot_cur full magazines
373  */
374 typedef struct zone_cache {
375 	hw_lck_ticket_t            zc_depot_lock;
376 	uint16_t                   zc_alloc_cur;
377 	uint16_t                   zc_free_cur;
378 	vm_offset_t               *zc_alloc_elems;
379 	vm_offset_t               *zc_free_elems;
380 	struct zone_depot          zc_depot;
381 	smr_t                      zc_smr;
382 	zone_smr_free_cb_t XNU_PTRAUTH_SIGNED_FUNCTION_PTR("zc_free") zc_free;
383 } __attribute__((aligned(64))) * zone_cache_t;
384 
385 #if !__x86_64__
386 static
387 #endif
388 __security_const_late struct {
389 	struct mach_vm_range       zi_map_range;  /* all zone submaps     */
390 	struct mach_vm_range       zi_ro_range;   /* read-only range      */
391 	struct mach_vm_range       zi_meta_range; /* debugging only       */
392 	struct mach_vm_range       zi_bits_range; /* bits buddy allocator */
393 	struct mach_vm_range       zi_xtra_range; /* vm tracking metadata */
394 	struct mach_vm_range       zi_pgz_range;
395 	struct zone_page_metadata *zi_pgz_meta;
396 
397 	/*
398 	 * The metadata lives within the zi_meta_range address range.
399 	 *
400 	 * The correct formula to find a metadata index is:
401 	 *     absolute_page_index - page_index(zi_map_range.min_address)
402 	 *
403 	 * And then this index is used to dereference zi_meta_range.min_address
404 	 * as a `struct zone_page_metadata` array.
405 	 *
406 	 * To avoid doing that substraction all the time in the various fast-paths,
407 	 * zi_meta_base are pre-offset with that minimum page index to avoid redoing
408 	 * that math all the time.
409 	 */
410 	struct zone_page_metadata *zi_meta_base;
411 } zone_info;
412 
413 __startup_data static struct mach_vm_range  zone_map_range;
414 __startup_data static vm_map_size_t         zone_meta_size;
415 __startup_data static vm_map_size_t         zone_bits_size;
416 __startup_data static vm_map_size_t         zone_xtra_size;
417 #if MACH_ASSERT
418 __startup_data static vm_map_size_t         vm_submap_restriction_size_debug;
419 #endif /* MACH_ASSERT */
420 
421 /*
422  * Initial array of metadata for stolen memory.
423  *
424  * The numbers here have to be kept in sync with vm_map_steal_memory()
425  * so that we have reserved enough metadata.
426  *
427  * After zone_init() has run (which happens while the kernel is still single
428  * threaded), the metadata is moved to its final dynamic location, and
429  * this array is unmapped with the rest of __startup_data at lockdown.
430  */
431 #define ZONE_EARLY_META_INLINE_COUNT    64
432 __startup_data
433 static struct zone_page_metadata
434     zone_early_meta_array_startup[ZONE_EARLY_META_INLINE_COUNT];
435 
436 
437 __startup_data __attribute__((aligned(PAGE_MAX_SIZE)))
438 static uint8_t zone_early_pages_to_cram[PAGE_MAX_SIZE * 16];
439 
440 /*
441  *	The zone_locks_grp allows for collecting lock statistics.
442  *	All locks are associated to this group in zinit.
443  *	Look at tools/lockstat for debugging lock contention.
444  */
445 LCK_GRP_DECLARE(zone_locks_grp, "zone_locks");
446 static LCK_MTX_DECLARE(zone_metadata_region_lck, &zone_locks_grp);
447 
448 /*
449  *	The zone metadata lock protects:
450  *	- metadata faulting,
451  *	- VM submap VA allocations,
452  *	- early gap page queue list
453  */
454 #define zone_meta_lock()   lck_mtx_lock(&zone_metadata_region_lck);
455 #define zone_meta_unlock() lck_mtx_unlock(&zone_metadata_region_lck);
456 
457 /*
458  *	Exclude more than one concurrent garbage collection
459  */
460 static LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc");
461 static LCK_MTX_DECLARE(zone_gc_lock, &zone_gc_lck_grp);
462 static LCK_SPIN_DECLARE(zone_exhausted_lock, &zone_gc_lck_grp);
463 
464 /*
465  * Panic logging metadata
466  */
467 bool panic_include_zprint = false;
468 bool panic_include_kalloc_types = false;
469 zone_t kalloc_type_src_zone = ZONE_NULL;
470 zone_t kalloc_type_dst_zone = ZONE_NULL;
471 mach_memory_info_t *panic_kext_memory_info = NULL;
472 vm_size_t panic_kext_memory_size = 0;
473 vm_offset_t panic_fault_address = 0;
474 
475 /*
476  *      Protects zone_array, num_zones, num_zones_in_use, and
477  *      zone_destroyed_bitmap
478  */
479 static SIMPLE_LOCK_DECLARE(all_zones_lock, 0);
480 static zone_id_t        num_zones_in_use;
481 zone_id_t _Atomic       num_zones;
482 SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count;
483 
484 /*
485  * Initial globals for zone stats until we can allocate the real ones.
486  * Those get migrated inside the per-CPU ones during zone_init() and
487  * this array is unmapped with the rest of __startup_data at lockdown.
488  */
489 
490 /* zone to allocate zone_magazine structs from */
491 static SECURITY_READ_ONLY_LATE(zone_t) zc_magazine_zone;
492 /*
493  * Until pid1 is made, zone caching is off,
494  * until compute_zone_working_set_size() runs for the firt time.
495  *
496  * -1 represents the "never enabled yet" value.
497  */
498 static int8_t zone_caching_disabled = -1;
499 
500 __startup_data
501 static struct zone_stats zone_stats_startup[MAX_ZONES];
502 struct zone              zone_array[MAX_ZONES];
503 SECURITY_READ_ONLY_LATE(zone_security_flags_t) zone_security_array[MAX_ZONES] = {
504 	[0 ... MAX_ZONES - 1] = {
505 		.z_kheap_id       = KHEAP_ID_NONE,
506 		.z_noencrypt      = false,
507 		.z_submap_idx     = Z_SUBMAP_IDX_GENERAL_0,
508 		.z_kalloc_type    = false,
509 		.z_sig_eq         = 0,
510 #if ZSECURITY_CONFIG(ZONE_TAGGING)
511 		.z_tag            = 1,
512 #else /* ZSECURITY_CONFIG(ZONE_TAGGING) */
513 		.z_tag            = 0,
514 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
515 	},
516 };
517 SECURITY_READ_ONLY_LATE(struct zone_size_params) zone_ro_size_params[ZONE_ID__LAST_RO + 1];
SECURITY_READ_ONLY_LATE(zone_cache_ops_t)518 SECURITY_READ_ONLY_LATE(zone_cache_ops_t) zcache_ops[ZONE_ID__FIRST_DYNAMIC];
519 
520 #if DEBUG || DEVELOPMENT
521 unsigned int
522 zone_max_zones(void)
523 {
524 	return MAX_ZONES;
525 }
526 #endif
527 
528 /* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */
529 static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count;
530 
531 /* Used to keep track of destroyed slots in the zone_array */
532 static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)];
533 
534 /* number of zone mapped pages used by all zones */
535 static size_t _Atomic zone_pages_jetsam_threshold = ~0;
536 size_t zone_pages_wired;
537 size_t zone_guard_pages;
538 
539 /* Time in (ms) after which we panic for zone exhaustions */
540 TUNABLE(int, zone_exhausted_timeout, "zet", 5000);
541 static bool zone_share_always = true;
542 static TUNABLE_WRITEABLE(uint32_t, zone_early_thres_mul, "zone_early_thres_mul", 5);
543 
544 #if VM_TAG_SIZECLASSES
545 /*
546  * Zone tagging allows for per "tag" accounting of allocations for the kalloc
547  * zones only.
548  *
549  * There are 3 kinds of tags that can be used:
550  * - pre-registered VM_KERN_MEMORY_*
551  * - dynamic tags allocated per call sites in core-kernel (using vm_tag_alloc())
552  * - per-kext tags computed by IOKit (using the magic Z_VM_TAG_BT_BIT marker).
553  *
554  * The VM tracks the statistics in lazily allocated structures.
555  * See vm_tag_will_update_zone(), vm_tag_update_zone_size().
556  *
557  * If for some reason the requested tag cannot be accounted for,
558  * the tag is forced to VM_KERN_MEMORY_KALLOC which is pre-allocated.
559  *
560  * Each allocated element also remembers the tag it was assigned,
561  * which lets zalloc/zfree update statistics correctly.
562  */
563 
564 /* enable tags for zones that ask for it */
565 static TUNABLE(bool, zone_tagging_on, "-zt", false);
566 
567 /*
568  * Array of all sizeclasses used by kalloc variants so that we can
569  * have accounting per size class for each kalloc callsite
570  */
571 static uint16_t zone_tags_sizeclasses[VM_TAG_SIZECLASSES];
572 #endif /* VM_TAG_SIZECLASSES */
573 
574 #if DEBUG || DEVELOPMENT
575 static int zalloc_simulate_vm_pressure;
576 #endif /* DEBUG || DEVELOPMENT */
577 
578 #define Z_TUNABLE(t, n, d) \
579 	TUNABLE(t, _##n, #n, d); \
580 	__pure2 static inline t n(void) { return _##n; }
581 
582 /*
583  * Zone caching tunables
584  *
585  * zc_mag_size():
586  *   size of magazines, larger to reduce contention at the expense of memory
587  *
588  * zc_enable_level
589  *   number of contentions per second after which zone caching engages
590  *   automatically.
591  *
592  *   0 to disable.
593  *
594  * zc_grow_level
595  *   number of contentions per second x cpu after which the number of magazines
596  *   allowed in the depot can grow. (in "Z_WMA_UNIT" units).
597  *
598  * zc_shrink_level
599  *   number of contentions per second x cpu below which the number of magazines
600  *   allowed in the depot will shrink. (in "Z_WMA_UNIT" units).
601  *
602  * zc_pcpu_max
603  *   maximum memory size in bytes that can hang from a CPU,
604  *   which will affect how many magazines are allowed in the depot.
605  *
606  *   The alloc/free magazines are assumed to be on average half-empty
607  *   and to count for "1" unit of magazines.
608  *
609  * zc_autotrim_size
610  *   Size allowed to hang extra from the recirculation depot before
611  *   auto-trim kicks in.
612  *
613  * zc_autotrim_buckets
614  *
615  *   How many buckets in excess of the working-set are allowed
616  *   before auto-trim kicks in for empty buckets.
617  *
618  * zc_free_batch_size
619  *   The size of batches of frees/reclaim that can be done keeping
620  *   the zone lock held (and preemption disabled).
621  */
622 Z_TUNABLE(uint16_t, zc_mag_size, 8);
623 static Z_TUNABLE(uint32_t, zc_enable_level, 10);
624 static Z_TUNABLE(uint32_t, zc_grow_level, 5 * Z_WMA_UNIT);
625 static Z_TUNABLE(uint32_t, zc_shrink_level, Z_WMA_UNIT / 2);
626 static Z_TUNABLE(uint32_t, zc_pcpu_max, 128 << 10);
627 static Z_TUNABLE(uint32_t, zc_autotrim_size, 16 << 10);
628 static Z_TUNABLE(uint32_t, zc_autotrim_buckets, 8);
629 static Z_TUNABLE(uint32_t, zc_free_batch_size, 128);
630 
631 static SECURITY_READ_ONLY_LATE(size_t)    zone_pages_wired_max;
632 static SECURITY_READ_ONLY_LATE(vm_map_t)  zone_submaps[Z_SUBMAP_IDX_COUNT];
633 static SECURITY_READ_ONLY_LATE(vm_map_t)  zone_meta_map;
634 static char const * const zone_submaps_names[Z_SUBMAP_IDX_COUNT] = {
635 	[Z_SUBMAP_IDX_VM]               = "VM",
636 	[Z_SUBMAP_IDX_READ_ONLY]        = "RO",
637 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
638 	[Z_SUBMAP_IDX_GENERAL_0]        = "GEN0",
639 	[Z_SUBMAP_IDX_GENERAL_1]        = "GEN1",
640 	[Z_SUBMAP_IDX_GENERAL_2]        = "GEN2",
641 	[Z_SUBMAP_IDX_GENERAL_3]        = "GEN3",
642 #else
643 	[Z_SUBMAP_IDX_GENERAL_0]        = "GEN",
644 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
645 	[Z_SUBMAP_IDX_DATA]             = "DATA",
646 };
647 
648 #if __x86_64__
649 #define ZONE_ENTROPY_CNT 8
650 #else
651 #define ZONE_ENTROPY_CNT 2
652 #endif
653 static struct zone_bool_gen {
654 	struct bool_gen zbg_bg;
655 	uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
656 } zone_bool_gen[MAX_CPUS];
657 
658 #if CONFIG_PROB_GZALLOC
659 /*
660  * Probabilistic gzalloc
661  * =====================
662  *
663  *
664  * Probabilistic guard zalloc samples allocations and will protect them by
665  * double-mapping the page holding them and returning the secondary virtual
666  * address to its callers.
667  *
668  * Its data structures are lazily allocated if the `pgz` or `pgz1` boot-args
669  * are set.
670  *
671  *
672  * Unlike GZalloc, PGZ uses a fixed amount of memory, and is compatible with
673  * most zalloc/kalloc features:
674  * - zone_require is functional
675  * - zone caching or zone tagging is compatible
676  * - non-blocking allocation work (they will always return NULL with gzalloc).
677  *
678  * PGZ limitations:
679  * - VA sequestering isn't respected, as the slots (which are in limited
680  *   quantity) will be reused for any type, however the PGZ quarantine
681  *   somewhat mitigates the impact.
682  * - zones with elements larger than a page cannot be protected.
683  *
684  *
685  * Tunables:
686  * --------
687  *
688  * pgz=1:
689  *   Turn on probabilistic guard malloc for all zones
690  *
691  *   (default on for DEVELOPMENT, off for RELEASE, or if pgz1... are specified)
692  *
693  * pgz_sample_rate=0 to 2^31
694  *   average sample rate between two guarded allocations.
695  *   0 means every allocation.
696  *
697  *   The default is a random number between 1000 and 10,000
698  *
699  * pgz_slots
700  *   how many allocations to protect.
701  *
702  *   Each costs:
703  *   - a PTE in the pmap (when allocated)
704  *   - 2 zone page meta's (every other page is a "guard" one, 32B total)
705  *   - 64 bytes per backtraces.
706  *   On LP64 this is <16K per 100 slots.
707  *
708  *   The default is ~200 slots per G of physical ram (32k / G)
709  *
710  *   TODO:
711  *   - try harder to allocate elements at the "end" to catch OOB more reliably.
712  *
713  * pgz_quarantine
714  *   how many slots should be free at any given time.
715  *
716  *   PGZ will round robin through free slots to be reused, but free slots are
717  *   important to detect use-after-free by acting as a quarantine.
718  *
719  *   By default, PGZ will keep 33% of the slots around at all time.
720  *
721  * pgz1=<name>, pgz2=<name>, ..., pgzn=<name>...
722  *   Specific zones for which to enable probabilistic guard malloc.
723  *   There must be no numbering gap (names after the gap will be ignored).
724  */
725 #if DEBUG || DEVELOPMENT
726 static TUNABLE(bool, pgz_all, "pgz", true);
727 #else
728 static TUNABLE(bool, pgz_all, "pgz", false);
729 #endif
730 static TUNABLE(uint32_t, pgz_sample_rate, "pgz_sample_rate", 0);
731 static TUNABLE(uint32_t, pgz_slots, "pgz_slots", UINT32_MAX);
732 static TUNABLE(uint32_t, pgz_quarantine, "pgz_quarantine", 0);
733 #endif /* CONFIG_PROB_GZALLOC */
734 
735 static zone_t zone_find_largest(uint64_t *zone_size);
736 
737 #endif /* !ZALLOC_TEST */
738 #pragma mark Zone metadata
739 #if !ZALLOC_TEST
740 
741 static inline bool
zone_has_index(zone_t z,zone_id_t zid)742 zone_has_index(zone_t z, zone_id_t zid)
743 {
744 	return zone_array + zid == z;
745 }
746 
747 __abortlike
748 void
zone_invalid_panic(zone_t zone)749 zone_invalid_panic(zone_t zone)
750 {
751 	panic("zone %p isn't in the zone_array", zone);
752 }
753 
754 __abortlike
755 static void
zone_metadata_corruption(zone_t zone,struct zone_page_metadata * meta,const char * kind)756 zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta,
757     const char *kind)
758 {
759 	panic("zone metadata corruption: %s (meta %p, zone %s%s)",
760 	    kind, meta, zone_heap_name(zone), zone->z_name);
761 }
762 
763 __abortlike
764 static void
zone_invalid_element_addr_panic(zone_t zone,vm_offset_t addr)765 zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr)
766 {
767 	panic("zone element pointer validation failed (addr: %p, zone %s%s)",
768 	    (void *)addr, zone_heap_name(zone), zone->z_name);
769 }
770 
771 __abortlike
772 static void
zone_page_metadata_index_confusion_panic(zone_t zone,vm_offset_t addr,struct zone_page_metadata * meta)773 zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr,
774     struct zone_page_metadata *meta)
775 {
776 	zone_security_flags_t zsflags = zone_security_config(zone), src_zsflags;
777 	zone_id_t zidx;
778 	zone_t src_zone;
779 
780 	if (zsflags.z_kalloc_type) {
781 		panic_include_kalloc_types = true;
782 		kalloc_type_dst_zone = zone;
783 	}
784 
785 	zidx = meta->zm_index;
786 	if (zidx >= os_atomic_load(&num_zones, relaxed)) {
787 		panic("%p expected in zone %s%s[%d], but metadata has invalid zidx: %d",
788 		    (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
789 		    zidx);
790 	}
791 
792 	src_zone = &zone_array[zidx];
793 	src_zsflags = zone_security_array[zidx];
794 	if (src_zsflags.z_kalloc_type) {
795 		panic_include_kalloc_types = true;
796 		kalloc_type_src_zone = src_zone;
797 	}
798 
799 	panic("%p not in the expected zone %s%s[%d], but found in %s%s[%d]",
800 	    (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
801 	    zone_heap_name(src_zone), src_zone->z_name, zidx);
802 }
803 
804 __abortlike
805 static void
zone_page_metadata_list_corruption(zone_t zone,struct zone_page_metadata * meta)806 zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta)
807 {
808 	panic("metadata list corruption through element %p detected in zone %s%s",
809 	    meta, zone_heap_name(zone), zone->z_name);
810 }
811 
812 __abortlike
813 static void
zone_page_meta_accounting_panic(zone_t zone,struct zone_page_metadata * meta,const char * kind)814 zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta,
815     const char *kind)
816 {
817 	panic("accounting mismatch (%s) for zone %s%s, meta %p", kind,
818 	    zone_heap_name(zone), zone->z_name, meta);
819 }
820 
821 __abortlike
822 static void
zone_meta_double_free_panic(zone_t zone,vm_offset_t addr,const char * caller)823 zone_meta_double_free_panic(zone_t zone, vm_offset_t addr, const char *caller)
824 {
825 	panic("%s: double free of %p to zone %s%s", caller,
826 	    (void *)addr, zone_heap_name(zone), zone->z_name);
827 }
828 
829 __abortlike
830 static void
zone_accounting_panic(zone_t zone,const char * kind)831 zone_accounting_panic(zone_t zone, const char *kind)
832 {
833 	panic("accounting mismatch (%s) for zone %s%s", kind,
834 	    zone_heap_name(zone), zone->z_name);
835 }
836 
837 #define zone_counter_sub(z, stat, value)  ({ \
838 	if (os_sub_overflow((z)->stat, value, &(z)->stat)) { \
839 	    zone_accounting_panic(z, #stat " wrap-around"); \
840 	} \
841 	(z)->stat; \
842 })
843 
844 static inline uint16_t
zone_meta_alloc_size_add(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)845 zone_meta_alloc_size_add(zone_t z, struct zone_page_metadata *m,
846     vm_offset_t esize)
847 {
848 	if (os_add_overflow(m->zm_alloc_size, (uint16_t)esize, &m->zm_alloc_size)) {
849 		zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
850 	}
851 	return m->zm_alloc_size;
852 }
853 
854 static inline uint16_t
zone_meta_alloc_size_sub(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)855 zone_meta_alloc_size_sub(zone_t z, struct zone_page_metadata *m,
856     vm_offset_t esize)
857 {
858 	if (os_sub_overflow(m->zm_alloc_size, esize, &m->zm_alloc_size)) {
859 		zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
860 	}
861 	return m->zm_alloc_size;
862 }
863 
864 __abortlike
865 static void
zone_nofail_panic(zone_t zone)866 zone_nofail_panic(zone_t zone)
867 {
868 	panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)",
869 	    zone_heap_name(zone), zone->z_name);
870 }
871 
872 __header_always_inline bool
zone_spans_ro_va(vm_offset_t addr_start,vm_offset_t addr_end)873 zone_spans_ro_va(vm_offset_t addr_start, vm_offset_t addr_end)
874 {
875 	const struct mach_vm_range *ro_r = &zone_info.zi_ro_range;
876 	struct mach_vm_range r = { addr_start, addr_end };
877 
878 	return mach_vm_range_intersects(ro_r, &r);
879 }
880 
881 #define from_range(r, addr, size) \
882 	__builtin_choose_expr(__builtin_constant_p(size) ? (size) == 1 : 0, \
883 	mach_vm_range_contains(r, (mach_vm_offset_t)(addr)), \
884 	mach_vm_range_contains(r, (mach_vm_offset_t)(addr), size))
885 
886 #define from_ro_map(addr, size) \
887 	from_range(&zone_info.zi_ro_range, addr, size)
888 
889 #define from_zone_map(addr, size) \
890 	from_range(&zone_info.zi_map_range, addr, size)
891 
892 __header_always_inline bool
zone_pva_is_null(zone_pva_t page)893 zone_pva_is_null(zone_pva_t page)
894 {
895 	return page.packed_address == 0;
896 }
897 
898 __header_always_inline bool
zone_pva_is_queue(zone_pva_t page)899 zone_pva_is_queue(zone_pva_t page)
900 {
901 	// actual kernel pages have the top bit set
902 	return (int32_t)page.packed_address > 0;
903 }
904 
905 __header_always_inline bool
zone_pva_is_equal(zone_pva_t pva1,zone_pva_t pva2)906 zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2)
907 {
908 	return pva1.packed_address == pva2.packed_address;
909 }
910 
911 __header_always_inline zone_pva_t *
zone_pageq_base(void)912 zone_pageq_base(void)
913 {
914 	extern zone_pva_t data_seg_start[] __SEGMENT_START_SYM("__DATA");
915 
916 	/*
917 	 * `-1` so that if the first __DATA variable is a page queue,
918 	 * it gets a non 0 index
919 	 */
920 	return data_seg_start - 1;
921 }
922 
923 __header_always_inline void
zone_queue_set_head(zone_t z,zone_pva_t queue,zone_pva_t oldv,struct zone_page_metadata * meta)924 zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv,
925     struct zone_page_metadata *meta)
926 {
927 	zone_pva_t *queue_head = &zone_pageq_base()[queue.packed_address];
928 
929 	if (!zone_pva_is_equal(*queue_head, oldv)) {
930 		zone_page_metadata_list_corruption(z, meta);
931 	}
932 	*queue_head = meta->zm_page_next;
933 }
934 
935 __header_always_inline zone_pva_t
zone_queue_encode(zone_pva_t * headp)936 zone_queue_encode(zone_pva_t *headp)
937 {
938 	return (zone_pva_t){ (uint32_t)(headp - zone_pageq_base()) };
939 }
940 
941 __header_always_inline zone_pva_t
zone_pva_from_addr(vm_address_t addr)942 zone_pva_from_addr(vm_address_t addr)
943 {
944 	// cannot use atop() because we want to maintain the sign bit
945 	return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) };
946 }
947 
948 __header_always_inline vm_address_t
zone_pva_to_addr(zone_pva_t page)949 zone_pva_to_addr(zone_pva_t page)
950 {
951 	// cause sign extension so that we end up with the right address
952 	return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT;
953 }
954 
955 __header_always_inline struct zone_page_metadata *
zone_pva_to_meta(zone_pva_t page)956 zone_pva_to_meta(zone_pva_t page)
957 {
958 	return VM_FAR_ADD_PTR_UNBOUNDED(
959 		zone_info.zi_meta_base, page.packed_address);
960 }
961 
962 __header_always_inline zone_pva_t
zone_pva_from_meta(struct zone_page_metadata * meta)963 zone_pva_from_meta(struct zone_page_metadata *meta)
964 {
965 	return (zone_pva_t){ (uint32_t)(meta - zone_info.zi_meta_base) };
966 }
967 
968 __header_always_inline struct zone_page_metadata *
zone_meta_from_addr(vm_offset_t addr)969 zone_meta_from_addr(vm_offset_t addr)
970 {
971 	return zone_pva_to_meta(zone_pva_from_addr(addr));
972 }
973 
974 __header_always_inline zone_id_t
zone_index_from_ptr(const void * ptr)975 zone_index_from_ptr(const void *ptr)
976 {
977 	return zone_pva_to_meta(zone_pva_from_addr((vm_offset_t)ptr))->zm_index;
978 }
979 
980 __header_always_inline vm_offset_t
zone_meta_to_addr(struct zone_page_metadata * meta)981 zone_meta_to_addr(struct zone_page_metadata *meta)
982 {
983 	return ptoa((int32_t)(meta - zone_info.zi_meta_base));
984 }
985 
986 __attribute__((overloadable))
987 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta,vm_address_t addr)988 zone_meta_validate(zone_t z, struct zone_page_metadata *meta, vm_address_t addr)
989 {
990 	if (!zone_has_index(z, meta->zm_index)) {
991 		zone_page_metadata_index_confusion_panic(z, addr, meta);
992 	}
993 }
994 
995 __attribute__((overloadable))
996 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta)997 zone_meta_validate(zone_t z, struct zone_page_metadata *meta)
998 {
999 	zone_meta_validate(z, meta, zone_meta_to_addr(meta));
1000 }
1001 
1002 __header_always_inline void
zone_meta_queue_push(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1003 zone_meta_queue_push(zone_t z, zone_pva_t *headp,
1004     struct zone_page_metadata *meta)
1005 {
1006 	zone_pva_t head = *headp;
1007 	zone_pva_t queue_pva = zone_queue_encode(headp);
1008 	struct zone_page_metadata *tmp;
1009 
1010 	meta->zm_page_next = head;
1011 	if (!zone_pva_is_null(head)) {
1012 		tmp = zone_pva_to_meta(head);
1013 		if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) {
1014 			zone_page_metadata_list_corruption(z, meta);
1015 		}
1016 		tmp->zm_page_prev = zone_pva_from_meta(meta);
1017 	}
1018 	meta->zm_page_prev = queue_pva;
1019 	*headp = zone_pva_from_meta(meta);
1020 }
1021 
1022 __header_always_inline struct zone_page_metadata *
zone_meta_queue_pop(zone_t z,zone_pva_t * headp)1023 zone_meta_queue_pop(zone_t z, zone_pva_t *headp)
1024 {
1025 	zone_pva_t head = *headp;
1026 	struct zone_page_metadata *meta = zone_pva_to_meta(head);
1027 	struct zone_page_metadata *tmp;
1028 
1029 	zone_meta_validate(z, meta);
1030 
1031 	if (!zone_pva_is_null(meta->zm_page_next)) {
1032 		tmp = zone_pva_to_meta(meta->zm_page_next);
1033 		if (!zone_pva_is_equal(tmp->zm_page_prev, head)) {
1034 			zone_page_metadata_list_corruption(z, meta);
1035 		}
1036 		tmp->zm_page_prev = meta->zm_page_prev;
1037 	}
1038 	*headp = meta->zm_page_next;
1039 
1040 	meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1041 
1042 	return meta;
1043 }
1044 
1045 __header_always_inline void
zone_meta_remqueue(zone_t z,struct zone_page_metadata * meta)1046 zone_meta_remqueue(zone_t z, struct zone_page_metadata *meta)
1047 {
1048 	zone_pva_t meta_pva = zone_pva_from_meta(meta);
1049 	struct zone_page_metadata *tmp;
1050 
1051 	if (!zone_pva_is_null(meta->zm_page_next)) {
1052 		tmp = zone_pva_to_meta(meta->zm_page_next);
1053 		if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) {
1054 			zone_page_metadata_list_corruption(z, meta);
1055 		}
1056 		tmp->zm_page_prev = meta->zm_page_prev;
1057 	}
1058 	if (zone_pva_is_queue(meta->zm_page_prev)) {
1059 		zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta);
1060 	} else {
1061 		tmp = zone_pva_to_meta(meta->zm_page_prev);
1062 		if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) {
1063 			zone_page_metadata_list_corruption(z, meta);
1064 		}
1065 		tmp->zm_page_next = meta->zm_page_next;
1066 	}
1067 
1068 	meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1069 }
1070 
1071 __header_always_inline void
zone_meta_requeue(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1072 zone_meta_requeue(zone_t z, zone_pva_t *headp,
1073     struct zone_page_metadata *meta)
1074 {
1075 	zone_meta_remqueue(z, meta);
1076 	zone_meta_queue_push(z, headp, meta);
1077 }
1078 
1079 /* prevents a given metadata from ever reaching the z_pageq_empty queue */
1080 static inline void
zone_meta_lock_in_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1081 zone_meta_lock_in_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1082 {
1083 	uint16_t new_size = zone_meta_alloc_size_add(z, m, ZM_ALLOC_SIZE_LOCK);
1084 
1085 	assert(new_size % sizeof(vm_offset_t) == ZM_ALLOC_SIZE_LOCK);
1086 	if (new_size == ZM_ALLOC_SIZE_LOCK) {
1087 		zone_meta_requeue(z, &z->z_pageq_partial, m);
1088 		zone_counter_sub(z, z_wired_empty, len);
1089 	}
1090 }
1091 
1092 /* allows a given metadata to reach the z_pageq_empty queue again */
1093 static inline void
zone_meta_unlock_from_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1094 zone_meta_unlock_from_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1095 {
1096 	uint16_t new_size = zone_meta_alloc_size_sub(z, m, ZM_ALLOC_SIZE_LOCK);
1097 
1098 	assert(new_size % sizeof(vm_offset_t) == 0);
1099 	if (new_size == 0) {
1100 		zone_meta_requeue(z, &z->z_pageq_empty, m);
1101 		z->z_wired_empty += len;
1102 	}
1103 }
1104 
1105 /*
1106  * Routine to populate a page backing metadata in the zone_metadata_region.
1107  * Must be called without the zone lock held as it might potentially block.
1108  */
1109 static void
zone_meta_populate(vm_offset_t base,vm_size_t size)1110 zone_meta_populate(vm_offset_t base, vm_size_t size)
1111 {
1112 	struct zone_page_metadata *from = zone_meta_from_addr(base);
1113 	struct zone_page_metadata *to   = from + atop(size);
1114 	vm_offset_t page_addr = trunc_page(from);
1115 
1116 	for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) {
1117 #if !KASAN
1118 		/*
1119 		 * This can race with another thread doing a populate on the same metadata
1120 		 * page, where we see an updated pmap but unmapped KASan shadow, causing a
1121 		 * fault in the shadow when we first access the metadata page. Avoid this
1122 		 * by always synchronizing on the zone_metadata_region lock with KASan.
1123 		 */
1124 		if (pmap_find_phys(kernel_pmap, page_addr)) {
1125 			continue;
1126 		}
1127 #endif
1128 
1129 		for (;;) {
1130 			kern_return_t ret = KERN_SUCCESS;
1131 
1132 			/*
1133 			 * All updates to the zone_metadata_region are done
1134 			 * under the zone_metadata_region_lck
1135 			 */
1136 			zone_meta_lock();
1137 			if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
1138 				ret = kernel_memory_populate(page_addr,
1139 				    PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1140 				    VM_KERN_MEMORY_OSFMK);
1141 			}
1142 			zone_meta_unlock();
1143 
1144 			if (ret == KERN_SUCCESS) {
1145 				break;
1146 			}
1147 
1148 			/*
1149 			 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
1150 			 * to bad system deadlocks, so if the allocation failed,
1151 			 * we need to do the VM_PAGE_WAIT() outside of the lock.
1152 			 */
1153 			VM_PAGE_WAIT();
1154 		}
1155 	}
1156 }
1157 
1158 __abortlike
1159 static void
zone_invalid_element_panic(zone_t zone,vm_offset_t addr)1160 zone_invalid_element_panic(zone_t zone, vm_offset_t addr)
1161 {
1162 	struct zone_page_metadata *meta;
1163 	const char *from_cache = "";
1164 	vm_offset_t page;
1165 
1166 	if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1167 		panic("addr %p being freed to zone %s%s%s, isn't from zone map",
1168 		    (void *)addr, zone_heap_name(zone), zone->z_name, from_cache);
1169 	}
1170 	page = trunc_page(addr);
1171 	meta = zone_meta_from_addr(addr);
1172 
1173 	if (!zone_has_index(zone, meta->zm_index)) {
1174 		zone_page_metadata_index_confusion_panic(zone, addr, meta);
1175 	}
1176 
1177 	if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1178 		panic("metadata %p corresponding to addr %p being freed to "
1179 		    "zone %s%s%s, is marked as secondary per cpu page",
1180 		    meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1181 		    from_cache);
1182 	}
1183 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1184 		page -= ptoa(meta->zm_page_index);
1185 		meta -= meta->zm_page_index;
1186 	}
1187 
1188 	if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1189 		panic("metadata %p corresponding to addr %p being freed to "
1190 		    "zone %s%s%s, has chunk len greater than max",
1191 		    meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1192 		    from_cache);
1193 	}
1194 
1195 	if ((addr - zone_elem_inner_offs(zone) - page) % zone_elem_outer_size(zone)) {
1196 		panic("addr %p being freed to zone %s%s%s, isn't aligned to "
1197 		    "zone element size", (void *)addr, zone_heap_name(zone),
1198 		    zone->z_name, from_cache);
1199 	}
1200 
1201 	zone_invalid_element_addr_panic(zone, addr);
1202 }
1203 
1204 __attribute__((always_inline))
1205 static struct zone_page_metadata *
zone_element_resolve(zone_t zone,vm_offset_t addr,vm_offset_t * idx)1206 zone_element_resolve(
1207 	zone_t                  zone,
1208 	vm_offset_t             addr,
1209 	vm_offset_t            *idx)
1210 {
1211 	struct zone_page_metadata *meta;
1212 	vm_offset_t offs, eidx;
1213 
1214 	meta = zone_meta_from_addr(addr);
1215 	if (!from_zone_map(addr, 1) || !zone_has_index(zone, meta->zm_index)) {
1216 		zone_invalid_element_panic(zone, addr);
1217 	}
1218 
1219 	offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1220 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1221 		offs += ptoa(meta->zm_page_index);
1222 		meta -= meta->zm_page_index;
1223 	}
1224 
1225 	eidx = Z_FAST_QUO(offs, zone->z_quo_magic);
1226 	if (eidx * zone_elem_outer_size(zone) != offs) {
1227 		zone_invalid_element_panic(zone, addr);
1228 	}
1229 
1230 	*idx = eidx;
1231 	return meta;
1232 }
1233 
1234 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1235 void *
zone_element_pgz_oob_adjust(void * ptr,vm_size_t req_size,vm_size_t elem_size)1236 zone_element_pgz_oob_adjust(void *ptr, vm_size_t req_size, vm_size_t elem_size)
1237 {
1238 	vm_offset_t addr = (vm_offset_t)ptr;
1239 	vm_offset_t end = addr + elem_size;
1240 	vm_offset_t offs;
1241 
1242 	/*
1243 	 * 0-sized allocations in a KALLOC_MINSIZE bucket
1244 	 * would be offset to the next allocation which is incorrect.
1245 	 */
1246 	req_size = MAX(roundup(req_size, KALLOC_MINALIGN), KALLOC_MINALIGN);
1247 
1248 	/*
1249 	 * Given how chunks work, for a zone with PGZ guards on,
1250 	 * there's a single element which ends precisely
1251 	 * at the page boundary: the last one.
1252 	 */
1253 	if (req_size == elem_size ||
1254 	    (end & PAGE_MASK) ||
1255 	    !zone_meta_from_addr(addr)->zm_guarded) {
1256 		return ptr;
1257 	}
1258 
1259 	offs = elem_size - req_size;
1260 	zone_meta_from_addr(end)->zm_oob_offs = (uint16_t)offs;
1261 
1262 	return (char *)addr + offs;
1263 }
1264 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1265 
1266 __abortlike
1267 static void
zone_element_bounds_check_panic(vm_address_t addr,vm_size_t len)1268 zone_element_bounds_check_panic(vm_address_t addr, vm_size_t len)
1269 {
1270 	struct zone_page_metadata *meta;
1271 	vm_offset_t offs, size, page;
1272 	zone_t      zone;
1273 
1274 	page = trunc_page(addr);
1275 	meta = zone_meta_from_addr(addr);
1276 	zone = &zone_array[meta->zm_index];
1277 
1278 	if (zone->z_percpu) {
1279 		panic("zone bound checks: address %p is a per-cpu allocation",
1280 		    (void *)addr);
1281 	}
1282 
1283 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1284 		page -= ptoa(meta->zm_page_index);
1285 		meta -= meta->zm_page_index;
1286 	}
1287 
1288 	size = zone_elem_outer_size(zone);
1289 	offs = Z_FAST_MOD(addr - zone_elem_inner_offs(zone) - page + size,
1290 	    zone->z_quo_magic, size);
1291 	panic("zone bound checks: buffer %p of length %zd overflows "
1292 	    "object %p of size %zd in zone %p[%s%s]",
1293 	    (void *)addr, len, (void *)(addr - offs - zone_elem_redzone(zone)),
1294 	    zone_elem_inner_size(zone), zone, zone_heap_name(zone), zone_name(zone));
1295 }
1296 
1297 void
zone_element_bounds_check(vm_address_t addr,vm_size_t len)1298 zone_element_bounds_check(vm_address_t addr, vm_size_t len)
1299 {
1300 	struct zone_page_metadata *meta;
1301 	vm_offset_t offs, size;
1302 	zone_t      zone;
1303 
1304 	if (!from_zone_map(addr, 1)) {
1305 		return;
1306 	}
1307 
1308 #if CONFIG_PROB_GZALLOC
1309 	if (__improbable(pgz_owned(addr))) {
1310 		meta = zone_meta_from_addr(addr);
1311 		addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
1312 	}
1313 #endif /* CONFIG_PROB_GZALLOC */
1314 	meta = zone_meta_from_addr(addr);
1315 	zone = zone_by_id(meta->zm_index);
1316 
1317 	if (zone->z_percpu) {
1318 		zone_element_bounds_check_panic(addr, len);
1319 	}
1320 
1321 	if (zone->z_permanent) {
1322 		/* We don't know bounds for those */
1323 		return;
1324 	}
1325 
1326 	offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1327 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1328 		offs += ptoa(meta->zm_page_index);
1329 	}
1330 	size = zone_elem_outer_size(zone);
1331 	offs = Z_FAST_MOD(offs + size, zone->z_quo_magic, size);
1332 	if (len + zone_elem_redzone(zone) > size - offs) {
1333 		zone_element_bounds_check_panic(addr, len);
1334 	}
1335 }
1336 
1337 /*
1338  * Routine to get the size of a zone allocated address.
1339  * If the address doesn't belong to the zone maps, returns 0.
1340  */
1341 vm_size_t
zone_element_size(void * elem,zone_t * z,bool clear_oob,vm_offset_t * oob_offs)1342 zone_element_size(void *elem, zone_t *z, bool clear_oob, vm_offset_t *oob_offs)
1343 {
1344 	vm_address_t addr = (vm_address_t)elem;
1345 	struct zone_page_metadata *meta;
1346 	vm_size_t esize, offs, end;
1347 	zone_t zone;
1348 
1349 	if (from_zone_map(addr, sizeof(void *))) {
1350 		meta  = zone_meta_from_addr(addr);
1351 		zone  = zone_by_id(meta->zm_index);
1352 		esize = zone_elem_inner_size(zone);
1353 		end   = vm_memtag_canonicalize_kernel(addr + esize);
1354 		offs  = 0;
1355 
1356 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1357 		/*
1358 		 * If the chunk uses guards, and that (addr + esize)
1359 		 * either crosses a page boundary or is at the boundary,
1360 		 * we need to look harder.
1361 		 */
1362 		if (oob_offs && meta->zm_guarded && atop(addr ^ end)) {
1363 			uint32_t chunk_pages = zone->z_chunk_pages;
1364 
1365 			/*
1366 			 * Because in the vast majority of cases the element
1367 			 * size is sub-page, and that meta[1] must be faulted,
1368 			 * we can quickly peek at whether it's a guard.
1369 			 *
1370 			 * For elements larger than a page, finding the guard
1371 			 * page requires a little more effort.
1372 			 */
1373 			if (meta[1].zm_chunk_len == ZM_PGZ_GUARD) {
1374 				offs = meta[1].zm_oob_offs;
1375 				if (clear_oob) {
1376 					meta[1].zm_oob_offs = 0;
1377 				}
1378 			} else if (esize > PAGE_SIZE) {
1379 				struct zone_page_metadata *gmeta;
1380 
1381 				if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1382 					gmeta = meta + meta->zm_subchunk_len;
1383 				} else {
1384 					gmeta = meta + chunk_pages;
1385 				}
1386 				assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
1387 
1388 				if (end >= zone_meta_to_addr(gmeta)) {
1389 					offs = gmeta->zm_oob_offs;
1390 					if (clear_oob) {
1391 						gmeta->zm_oob_offs = 0;
1392 					}
1393 				}
1394 			}
1395 		}
1396 #else
1397 #pragma unused(end, clear_oob)
1398 #endif /* ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1399 
1400 		if (oob_offs) {
1401 			*oob_offs = offs;
1402 		}
1403 		if (z) {
1404 			*z = zone;
1405 		}
1406 		return esize;
1407 	}
1408 
1409 	if (oob_offs) {
1410 		*oob_offs = 0;
1411 	}
1412 
1413 	return 0;
1414 }
1415 
1416 zone_id_t
zone_id_for_element(void * addr,vm_size_t esize)1417 zone_id_for_element(void *addr, vm_size_t esize)
1418 {
1419 	zone_id_t zid = ZONE_ID_INVALID;
1420 	if (from_zone_map(addr, esize)) {
1421 		zid = zone_index_from_ptr(addr);
1422 		__builtin_assume(zid != ZONE_ID_INVALID);
1423 	}
1424 	return zid;
1425 }
1426 
1427 /* This function just formats the reason for the panics by redoing the checks */
1428 __abortlike
1429 static void
zone_require_panic(zone_t zone,void * addr)1430 zone_require_panic(zone_t zone, void *addr)
1431 {
1432 	uint32_t zindex;
1433 	zone_t other;
1434 
1435 	if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1436 		panic("zone_require failed: address not in a zone (addr: %p)", addr);
1437 	}
1438 
1439 	zindex = zone_index_from_ptr(addr);
1440 	other = &zone_array[zindex];
1441 	if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
1442 		panic("zone_require failed: invalid zone index %d "
1443 		    "(addr: %p, expected: %s%s)", zindex,
1444 		    addr, zone_heap_name(zone), zone->z_name);
1445 	} else {
1446 		panic("zone_require failed: address in unexpected zone id %d (%s%s) "
1447 		    "(addr: %p, expected: %s%s)",
1448 		    zindex, zone_heap_name(other), other->z_name,
1449 		    addr, zone_heap_name(zone), zone->z_name);
1450 	}
1451 }
1452 
1453 __abortlike
1454 static void
zone_id_require_panic(zone_id_t zid,void * addr)1455 zone_id_require_panic(zone_id_t zid, void *addr)
1456 {
1457 	zone_require_panic(&zone_array[zid], addr);
1458 }
1459 
1460 /*
1461  * Routines to panic if a pointer is not mapped to an expected zone.
1462  * This can be used as a means of pinning an object to the zone it is expected
1463  * to be a part of.  Causes a panic if the address does not belong to any
1464  * specified zone, does not belong to any zone, has been freed and therefore
1465  * unmapped from the zone, or the pointer contains an uninitialized value that
1466  * does not belong to any zone.
1467  */
1468 void
zone_require(zone_t zone,void * addr)1469 zone_require(zone_t zone, void *addr)
1470 {
1471 	vm_size_t esize = zone_elem_inner_size(zone);
1472 
1473 	if (from_zone_map(addr, esize) &&
1474 	    zone_has_index(zone, zone_index_from_ptr(addr))) {
1475 		return;
1476 	}
1477 	zone_require_panic(zone, addr);
1478 }
1479 
1480 void
zone_id_require(zone_id_t zid,vm_size_t esize,void * addr)1481 zone_id_require(zone_id_t zid, vm_size_t esize, void *addr)
1482 {
1483 	if (from_zone_map(addr, esize) && zid == zone_index_from_ptr(addr)) {
1484 		return;
1485 	}
1486 	zone_id_require_panic(zid, addr);
1487 }
1488 
1489 void
zone_id_require_aligned(zone_id_t zid,void * addr)1490 zone_id_require_aligned(zone_id_t zid, void *addr)
1491 {
1492 	zone_t zone = zone_by_id(zid);
1493 	vm_offset_t elem, offs;
1494 
1495 	elem = (vm_offset_t)addr;
1496 	offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
1497 
1498 	if (from_zone_map(addr, 1)) {
1499 		struct zone_page_metadata *meta;
1500 
1501 		meta = zone_meta_from_addr(elem);
1502 		if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1503 			offs += ptoa(meta->zm_page_index);
1504 		}
1505 
1506 		if (zid == meta->zm_index &&
1507 		    Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
1508 			return;
1509 		}
1510 	}
1511 
1512 	zone_invalid_element_panic(zone, elem);
1513 }
1514 
1515 bool
zone_owns(zone_t zone,void * addr)1516 zone_owns(zone_t zone, void *addr)
1517 {
1518 	vm_size_t esize = zone_elem_inner_size(zone);
1519 
1520 	if (from_zone_map(addr, esize)) {
1521 		return zone_has_index(zone, zone_index_from_ptr(addr));
1522 	}
1523 	return false;
1524 }
1525 
1526 static inline struct mach_vm_range
zone_kmem_suballoc(mach_vm_offset_t addr,vm_size_t size,int flags,vm_tag_t tag,vm_map_t * new_map)1527 zone_kmem_suballoc(
1528 	mach_vm_offset_t        addr,
1529 	vm_size_t               size,
1530 	int                     flags,
1531 	vm_tag_t                tag,
1532 	vm_map_t                *new_map)
1533 {
1534 	struct mach_vm_range r;
1535 
1536 	*new_map = kmem_suballoc(kernel_map, &addr, size,
1537 	    VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
1538 	    flags, KMS_PERMANENT | KMS_NOFAIL | KMS_NOSOFTLIMIT, tag).kmr_submap;
1539 
1540 	r.min_address = addr;
1541 	r.max_address = addr + size;
1542 	return r;
1543 }
1544 
1545 #endif /* !ZALLOC_TEST */
1546 #pragma mark Zone bits allocator
1547 
1548 /*!
1549  * @defgroup Zone Bitmap allocator
1550  * @{
1551  *
1552  * @brief
1553  * Functions implementing the zone bitmap allocator
1554  *
1555  * @discussion
1556  * The zone allocator maintains which elements are allocated or free in bitmaps.
1557  *
1558  * When the number of elements per page is smaller than 32, it is stored inline
1559  * on the @c zone_page_metadata structure (@c zm_inline_bitmap is set,
1560  * and @c zm_bitmap used for storage).
1561  *
1562  * When the number of elements is larger, then a bitmap is allocated from
1563  * a buddy allocator (impelemented under the @c zba_* namespace). Pointers
1564  * to bitmaps are implemented as a packed 32 bit bitmap reference, stored in
1565  * @c zm_bitmap. The low 3 bits encode the scale (order) of the allocation in
1566  * @c ZBA_GRANULE units, and hence actual allocations encoded with that scheme
1567  * cannot be larger than 1024 bytes (8192 bits).
1568  *
1569  * This buddy allocator can actually accomodate allocations as large
1570  * as 8k on 16k systems and 2k on 4k systems.
1571  *
1572  * Note: @c zba_* functions are implementation details not meant to be used
1573  * outside of the allocation of the allocator itself. Interfaces to the rest of
1574  * the zone allocator are documented and not @c zba_* prefixed.
1575  */
1576 
1577 #define ZBA_CHUNK_SIZE          PAGE_MAX_SIZE
1578 #define ZBA_GRANULE             sizeof(uint64_t)
1579 #define ZBA_GRANULE_BITS        (8 * sizeof(uint64_t))
1580 #define ZBA_MAX_ORDER           (PAGE_MAX_SHIFT - 4)
1581 #define ZBA_MAX_ALLOC_ORDER     7
1582 #define ZBA_SLOTS               (ZBA_CHUNK_SIZE / ZBA_GRANULE)
1583 #define ZBA_HEADS_COUNT         (ZBA_MAX_ORDER + 1)
1584 #define ZBA_PTR_MASK            0x0fffffff
1585 #define ZBA_ORDER_SHIFT         29
1586 #define ZBA_HAS_EXTRA_BIT       0x10000000
1587 
1588 static_assert(2ul * ZBA_GRANULE << ZBA_MAX_ORDER == ZBA_CHUNK_SIZE, "chunk sizes");
1589 static_assert(ZBA_MAX_ALLOC_ORDER <= ZBA_MAX_ORDER, "ZBA_MAX_ORDER is enough");
1590 
1591 struct zone_bits_chain {
1592 	uint32_t zbc_next;
1593 	uint32_t zbc_prev;
1594 } __attribute__((aligned(ZBA_GRANULE)));
1595 
1596 struct zone_bits_head {
1597 	uint32_t zbh_next;
1598 	uint32_t zbh_unused;
1599 } __attribute__((aligned(ZBA_GRANULE)));
1600 
1601 static_assert(sizeof(struct zone_bits_chain) == ZBA_GRANULE, "zbc size");
1602 static_assert(sizeof(struct zone_bits_head) == ZBA_GRANULE, "zbh size");
1603 
1604 struct zone_bits_allocator_meta {
1605 	uint32_t  zbam_left;
1606 	uint32_t  zbam_right;
1607 	struct zone_bits_head zbam_lists[ZBA_HEADS_COUNT];
1608 	struct zone_bits_head zbam_lists_with_extra[ZBA_HEADS_COUNT];
1609 };
1610 
1611 struct zone_bits_allocator_header {
1612 	uint64_t zbah_bits[ZBA_SLOTS / (8 * sizeof(uint64_t))];
1613 };
1614 
1615 #if ZALLOC_TEST
1616 static struct zalloc_bits_allocator_test_setup {
1617 	vm_offset_t zbats_base;
1618 	void      (*zbats_populate)(vm_address_t addr, vm_size_t size);
1619 } zba_test_info;
1620 
1621 static struct zone_bits_allocator_header *
zba_base_header(void)1622 zba_base_header(void)
1623 {
1624 	return (struct zone_bits_allocator_header *)zba_test_info.zbats_base;
1625 }
1626 
1627 static kern_return_t
zba_populate(uint32_t n,bool with_extra __unused)1628 zba_populate(uint32_t n, bool with_extra __unused)
1629 {
1630 	vm_address_t base = zba_test_info.zbats_base;
1631 	zba_test_info.zbats_populate(base + n * ZBA_CHUNK_SIZE, ZBA_CHUNK_SIZE);
1632 
1633 	return KERN_SUCCESS;
1634 }
1635 #else
1636 __startup_data __attribute__((aligned(ZBA_CHUNK_SIZE)))
1637 static uint8_t zba_chunk_startup[ZBA_CHUNK_SIZE];
1638 
1639 static SECURITY_READ_ONLY_LATE(uint8_t) zba_xtra_shift;
1640 static LCK_MTX_DECLARE(zba_mtx, &zone_locks_grp);
1641 
1642 static struct zone_bits_allocator_header *
zba_base_header(void)1643 zba_base_header(void)
1644 {
1645 	return (struct zone_bits_allocator_header *)zone_info.zi_bits_range.min_address;
1646 }
1647 
1648 static void
zba_lock(void)1649 zba_lock(void)
1650 {
1651 	lck_mtx_lock(&zba_mtx);
1652 }
1653 
1654 static void
zba_unlock(void)1655 zba_unlock(void)
1656 {
1657 	lck_mtx_unlock(&zba_mtx);
1658 }
1659 
1660 __abortlike
1661 static void
zba_memory_exhausted(void)1662 zba_memory_exhausted(void)
1663 {
1664 	uint64_t zsize = 0;
1665 	zone_t z = zone_find_largest(&zsize);
1666 	panic("zba_populate: out of bitmap space, "
1667 	    "likely due to memory leak in zone [%s%s] "
1668 	    "(%u%c, %d elements allocated)",
1669 	    zone_heap_name(z), zone_name(z),
1670 	    mach_vm_size_pretty(zsize), mach_vm_size_unit(zsize),
1671 	    zone_count_allocated(z));
1672 }
1673 
1674 
1675 static kern_return_t
zba_populate(uint32_t n,bool with_extra)1676 zba_populate(uint32_t n, bool with_extra)
1677 {
1678 	vm_size_t bits_size = ZBA_CHUNK_SIZE;
1679 	vm_size_t xtra_size = bits_size * CHAR_BIT << zba_xtra_shift;
1680 	vm_address_t bits_addr;
1681 	vm_address_t xtra_addr;
1682 	kern_return_t kr;
1683 
1684 	bits_addr = zone_info.zi_bits_range.min_address + n * bits_size;
1685 	xtra_addr = zone_info.zi_xtra_range.min_address + n * xtra_size;
1686 
1687 	kr = kernel_memory_populate(bits_addr, bits_size,
1688 	    KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1689 	    VM_KERN_MEMORY_OSFMK);
1690 	if (kr != KERN_SUCCESS) {
1691 		return kr;
1692 	}
1693 
1694 
1695 	if (with_extra) {
1696 		kr = kernel_memory_populate(xtra_addr, xtra_size,
1697 		    KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1698 		    VM_KERN_MEMORY_OSFMK);
1699 		if (kr != KERN_SUCCESS) {
1700 			kernel_memory_depopulate(bits_addr, bits_size,
1701 			    KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1702 			    VM_KERN_MEMORY_OSFMK);
1703 		}
1704 	}
1705 
1706 	return kr;
1707 }
1708 #endif
1709 
1710 __pure2
1711 static struct zone_bits_allocator_meta *
zba_meta(void)1712 zba_meta(void)
1713 {
1714 	return (struct zone_bits_allocator_meta *)&zba_base_header()[1];
1715 }
1716 
1717 __pure2
1718 static uint64_t *
zba_slot_base(void)1719 zba_slot_base(void)
1720 {
1721 	return (uint64_t *)zba_base_header();
1722 }
1723 
1724 __pure2
1725 static struct zone_bits_head *
zba_head(uint32_t order,bool with_extra)1726 zba_head(uint32_t order, bool with_extra)
1727 {
1728 	if (with_extra) {
1729 		return &zba_meta()->zbam_lists_with_extra[order];
1730 	} else {
1731 		return &zba_meta()->zbam_lists[order];
1732 	}
1733 }
1734 
1735 __pure2
1736 static uint32_t
zba_head_index(struct zone_bits_head * hd)1737 zba_head_index(struct zone_bits_head *hd)
1738 {
1739 	return (uint32_t)((uint64_t *)hd - zba_slot_base());
1740 }
1741 
1742 __pure2
1743 static struct zone_bits_chain *
zba_chain_for_index(uint32_t index)1744 zba_chain_for_index(uint32_t index)
1745 {
1746 	return (struct zone_bits_chain *)(zba_slot_base() + index);
1747 }
1748 
1749 __pure2
1750 static uint32_t
zba_chain_to_index(const struct zone_bits_chain * zbc)1751 zba_chain_to_index(const struct zone_bits_chain *zbc)
1752 {
1753 	return (uint32_t)((const uint64_t *)zbc - zba_slot_base());
1754 }
1755 
1756 __abortlike
1757 static void
zba_head_corruption_panic(uint32_t order,bool with_extra)1758 zba_head_corruption_panic(uint32_t order, bool with_extra)
1759 {
1760 	panic("zone bits allocator head[%d:%d:%p] is corrupt",
1761 	    order, with_extra, zba_head(order, with_extra));
1762 }
1763 
1764 __abortlike
1765 static void
zba_chain_corruption_panic(struct zone_bits_chain * a,struct zone_bits_chain * b)1766 zba_chain_corruption_panic(struct zone_bits_chain *a, struct zone_bits_chain *b)
1767 {
1768 	panic("zone bits allocator freelist is corrupt (%p <-> %p)", a, b);
1769 }
1770 
1771 static void
zba_push_block(struct zone_bits_chain * zbc,uint32_t order,bool with_extra)1772 zba_push_block(struct zone_bits_chain *zbc, uint32_t order, bool with_extra)
1773 {
1774 	struct zone_bits_head *hd = zba_head(order, with_extra);
1775 	uint32_t hd_index = zba_head_index(hd);
1776 	uint32_t index = zba_chain_to_index(zbc);
1777 	struct zone_bits_chain *next;
1778 
1779 	if (hd->zbh_next) {
1780 		next = zba_chain_for_index(hd->zbh_next);
1781 		if (next->zbc_prev != hd_index) {
1782 			zba_head_corruption_panic(order, with_extra);
1783 		}
1784 		next->zbc_prev = index;
1785 	}
1786 	zbc->zbc_next = hd->zbh_next;
1787 	zbc->zbc_prev = hd_index;
1788 	hd->zbh_next = index;
1789 }
1790 
1791 static void
zba_remove_block(struct zone_bits_chain * zbc)1792 zba_remove_block(struct zone_bits_chain *zbc)
1793 {
1794 	struct zone_bits_chain *prev = zba_chain_for_index(zbc->zbc_prev);
1795 	uint32_t index = zba_chain_to_index(zbc);
1796 
1797 	if (prev->zbc_next != index) {
1798 		zba_chain_corruption_panic(prev, zbc);
1799 	}
1800 	if ((prev->zbc_next = zbc->zbc_next)) {
1801 		struct zone_bits_chain *next = zba_chain_for_index(zbc->zbc_next);
1802 		if (next->zbc_prev != index) {
1803 			zba_chain_corruption_panic(zbc, next);
1804 		}
1805 		next->zbc_prev = zbc->zbc_prev;
1806 	}
1807 }
1808 
1809 static vm_address_t
zba_try_pop_block(uint32_t order,bool with_extra)1810 zba_try_pop_block(uint32_t order, bool with_extra)
1811 {
1812 	struct zone_bits_head *hd = zba_head(order, with_extra);
1813 	struct zone_bits_chain *zbc;
1814 
1815 	if (hd->zbh_next == 0) {
1816 		return 0;
1817 	}
1818 
1819 	zbc = zba_chain_for_index(hd->zbh_next);
1820 	zba_remove_block(zbc);
1821 	return (vm_address_t)zbc;
1822 }
1823 
1824 static struct zone_bits_allocator_header *
zba_header(vm_offset_t addr)1825 zba_header(vm_offset_t addr)
1826 {
1827 	addr &= -(vm_offset_t)ZBA_CHUNK_SIZE;
1828 	return (struct zone_bits_allocator_header *)addr;
1829 }
1830 
1831 static size_t
zba_node_parent(size_t node)1832 zba_node_parent(size_t node)
1833 {
1834 	return (node - 1) / 2;
1835 }
1836 
1837 static size_t
zba_node_left_child(size_t node)1838 zba_node_left_child(size_t node)
1839 {
1840 	return node * 2 + 1;
1841 }
1842 
1843 static size_t
zba_node_buddy(size_t node)1844 zba_node_buddy(size_t node)
1845 {
1846 	return ((node - 1) ^ 1) + 1;
1847 }
1848 
1849 static size_t
zba_node(vm_offset_t addr,uint32_t order)1850 zba_node(vm_offset_t addr, uint32_t order)
1851 {
1852 	vm_offset_t offs = (addr % ZBA_CHUNK_SIZE) / ZBA_GRANULE;
1853 	return (offs >> order) + (1 << (ZBA_MAX_ORDER - order + 1)) - 1;
1854 }
1855 
1856 static struct zone_bits_chain *
zba_chain_for_node(struct zone_bits_allocator_header * zbah,size_t node,uint32_t order)1857 zba_chain_for_node(struct zone_bits_allocator_header *zbah, size_t node, uint32_t order)
1858 {
1859 	vm_offset_t offs = (node - (1 << (ZBA_MAX_ORDER - order + 1)) + 1) << order;
1860 	return (struct zone_bits_chain *)((vm_offset_t)zbah + offs * ZBA_GRANULE);
1861 }
1862 
1863 static void
zba_node_flip_split(struct zone_bits_allocator_header * zbah,size_t node)1864 zba_node_flip_split(struct zone_bits_allocator_header *zbah, size_t node)
1865 {
1866 	zbah->zbah_bits[node / 64] ^= 1ull << (node % 64);
1867 }
1868 
1869 static bool
zba_node_is_split(struct zone_bits_allocator_header * zbah,size_t node)1870 zba_node_is_split(struct zone_bits_allocator_header *zbah, size_t node)
1871 {
1872 	return zbah->zbah_bits[node / 64] & (1ull << (node % 64));
1873 }
1874 
1875 static void
zba_free(vm_offset_t addr,uint32_t order,bool with_extra)1876 zba_free(vm_offset_t addr, uint32_t order, bool with_extra)
1877 {
1878 	struct zone_bits_allocator_header *zbah = zba_header(addr);
1879 	struct zone_bits_chain *zbc;
1880 	size_t node = zba_node(addr, order);
1881 
1882 	while (node) {
1883 		size_t parent = zba_node_parent(node);
1884 
1885 		zba_node_flip_split(zbah, parent);
1886 		if (zba_node_is_split(zbah, parent)) {
1887 			break;
1888 		}
1889 
1890 		zbc = zba_chain_for_node(zbah, zba_node_buddy(node), order);
1891 		zba_remove_block(zbc);
1892 		order++;
1893 		node = parent;
1894 	}
1895 
1896 	zba_push_block(zba_chain_for_node(zbah, node, order), order, with_extra);
1897 }
1898 
1899 static vm_size_t
zba_chunk_header_size(uint32_t n)1900 zba_chunk_header_size(uint32_t n)
1901 {
1902 	vm_size_t hdr_size = sizeof(struct zone_bits_allocator_header);
1903 	if (n == 0) {
1904 		hdr_size += sizeof(struct zone_bits_allocator_meta);
1905 	}
1906 	return hdr_size;
1907 }
1908 
1909 static void
zba_init_chunk(uint32_t n,bool with_extra)1910 zba_init_chunk(uint32_t n, bool with_extra)
1911 {
1912 	vm_size_t hdr_size = zba_chunk_header_size(n);
1913 	vm_offset_t page = (vm_offset_t)zba_base_header() + n * ZBA_CHUNK_SIZE;
1914 	struct zone_bits_allocator_header *zbah = zba_header(page);
1915 	vm_size_t size = ZBA_CHUNK_SIZE;
1916 	size_t node;
1917 
1918 	for (uint32_t o = ZBA_MAX_ORDER + 1; o-- > 0;) {
1919 		if (size < hdr_size + (ZBA_GRANULE << o)) {
1920 			continue;
1921 		}
1922 		size -= ZBA_GRANULE << o;
1923 		node = zba_node(page + size, o);
1924 		zba_node_flip_split(zbah, zba_node_parent(node));
1925 		zba_push_block(zba_chain_for_node(zbah, node, o), o, with_extra);
1926 	}
1927 }
1928 
1929 __attribute__((noinline))
1930 static void
zba_grow(bool with_extra)1931 zba_grow(bool with_extra)
1932 {
1933 	struct zone_bits_allocator_meta *meta = zba_meta();
1934 	kern_return_t kr = KERN_SUCCESS;
1935 	uint32_t chunk;
1936 
1937 #if !ZALLOC_TEST
1938 	if (meta->zbam_left >= meta->zbam_right) {
1939 		zba_memory_exhausted();
1940 	}
1941 #endif
1942 
1943 	if (with_extra) {
1944 		chunk = meta->zbam_right - 1;
1945 	} else {
1946 		chunk = meta->zbam_left;
1947 	}
1948 
1949 	kr = zba_populate(chunk, with_extra);
1950 	if (kr == KERN_SUCCESS) {
1951 		if (with_extra) {
1952 			meta->zbam_right -= 1;
1953 		} else {
1954 			meta->zbam_left += 1;
1955 		}
1956 
1957 		zba_init_chunk(chunk, with_extra);
1958 #if !ZALLOC_TEST
1959 	} else {
1960 		/*
1961 		 * zba_populate() has to be allowed to fail populating,
1962 		 * as we are under a global lock, we need to do the
1963 		 * VM_PAGE_WAIT() outside of the lock.
1964 		 */
1965 		assert(kr == KERN_RESOURCE_SHORTAGE);
1966 		zba_unlock();
1967 		VM_PAGE_WAIT();
1968 		zba_lock();
1969 #endif
1970 	}
1971 }
1972 
1973 static vm_offset_t
zba_alloc(uint32_t order,bool with_extra)1974 zba_alloc(uint32_t order, bool with_extra)
1975 {
1976 	struct zone_bits_allocator_header *zbah;
1977 	uint32_t cur = order;
1978 	vm_address_t addr;
1979 	size_t node;
1980 
1981 	while ((addr = zba_try_pop_block(cur, with_extra)) == 0) {
1982 		if (__improbable(cur++ >= ZBA_MAX_ORDER)) {
1983 			zba_grow(with_extra);
1984 			cur = order;
1985 		}
1986 	}
1987 
1988 	zbah = zba_header(addr);
1989 	node = zba_node(addr, cur);
1990 	zba_node_flip_split(zbah, zba_node_parent(node));
1991 	while (cur > order) {
1992 		cur--;
1993 		zba_node_flip_split(zbah, node);
1994 		node = zba_node_left_child(node);
1995 		zba_push_block(zba_chain_for_node(zbah, node + 1, cur),
1996 		    cur, with_extra);
1997 	}
1998 
1999 	return addr;
2000 }
2001 
2002 #define zba_map_index(type, n)    (n / (8 * sizeof(type)))
2003 #define zba_map_bit(type, n)      ((type)1 << (n % (8 * sizeof(type))))
2004 #define zba_map_mask_lt(type, n)  (zba_map_bit(type, n) - 1)
2005 #define zba_map_mask_ge(type, n)  ((type)-zba_map_bit(type, n))
2006 
2007 #if !ZALLOC_TEST
2008 #if VM_TAG_SIZECLASSES
2009 
2010 static void *
zba_extra_ref_ptr(uint32_t bref,vm_offset_t idx)2011 zba_extra_ref_ptr(uint32_t bref, vm_offset_t idx)
2012 {
2013 	vm_offset_t base = zone_info.zi_xtra_range.min_address;
2014 	vm_offset_t offs = (bref & ZBA_PTR_MASK) * ZBA_GRANULE * CHAR_BIT;
2015 
2016 	return (void *)(base + ((offs + idx) << zba_xtra_shift));
2017 }
2018 
2019 #endif /* VM_TAG_SIZECLASSES */
2020 
2021 static uint32_t
zba_bits_ref_order(uint32_t bref)2022 zba_bits_ref_order(uint32_t bref)
2023 {
2024 	return bref >> ZBA_ORDER_SHIFT;
2025 }
2026 
2027 static bitmap_t *
zba_bits_ref_ptr(uint32_t bref)2028 zba_bits_ref_ptr(uint32_t bref)
2029 {
2030 	return zba_slot_base() + (bref & ZBA_PTR_MASK);
2031 }
2032 
2033 static vm_offset_t
zba_scan_bitmap_inline(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags,vm_offset_t eidx)2034 zba_scan_bitmap_inline(zone_t zone, struct zone_page_metadata *meta,
2035     zalloc_flags_t flags, vm_offset_t eidx)
2036 {
2037 	size_t i = eidx / 32;
2038 	uint32_t map;
2039 
2040 	if (eidx % 32) {
2041 		map = meta[i].zm_bitmap & zba_map_mask_ge(uint32_t, eidx);
2042 		if (map) {
2043 			eidx = __builtin_ctz(map);
2044 			meta[i].zm_bitmap ^= 1u << eidx;
2045 			return i * 32 + eidx;
2046 		}
2047 		i++;
2048 	}
2049 
2050 	uint32_t chunk_len = meta->zm_chunk_len;
2051 	if (flags & Z_PCPU) {
2052 		chunk_len = zpercpu_count();
2053 	}
2054 	for (int j = 0; j < chunk_len; j++, i++) {
2055 		if (i >= chunk_len) {
2056 			i = 0;
2057 		}
2058 		if (__probable(map = meta[i].zm_bitmap)) {
2059 			meta[i].zm_bitmap &= map - 1;
2060 			return i * 32 + __builtin_ctz(map);
2061 		}
2062 	}
2063 
2064 	zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2065 }
2066 
2067 static vm_offset_t
zba_scan_bitmap_ref(zone_t zone,struct zone_page_metadata * meta,vm_offset_t eidx)2068 zba_scan_bitmap_ref(zone_t zone, struct zone_page_metadata *meta,
2069     vm_offset_t eidx)
2070 {
2071 	uint32_t bits_size = 1 << zba_bits_ref_order(meta->zm_bitmap);
2072 	bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2073 	size_t i = eidx / 64;
2074 	uint64_t map;
2075 
2076 	if (eidx % 64) {
2077 		map = bits[i] & zba_map_mask_ge(uint64_t, eidx);
2078 		if (map) {
2079 			eidx = __builtin_ctzll(map);
2080 			bits[i] ^= 1ull << eidx;
2081 			return i * 64 + eidx;
2082 		}
2083 		i++;
2084 	}
2085 
2086 	for (int j = 0; j < bits_size; i++, j++) {
2087 		if (i >= bits_size) {
2088 			i = 0;
2089 		}
2090 		if (__probable(map = bits[i])) {
2091 			bits[i] &= map - 1;
2092 			return i * 64 + __builtin_ctzll(map);
2093 		}
2094 	}
2095 
2096 	zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2097 }
2098 
2099 /*!
2100  * @function zone_meta_find_and_clear_bit
2101  *
2102  * @brief
2103  * The core of the bitmap allocator: find a bit set in the bitmaps.
2104  *
2105  * @discussion
2106  * This method will round robin through available allocations,
2107  * with a per-core memory of the last allocated element index allocated.
2108  *
2109  * This is done in order to avoid a fully LIFO behavior which makes exploiting
2110  * double-free bugs way too practical.
2111  *
2112  * @param zone          The zone we're allocating from.
2113  * @param meta          The main metadata for the chunk being allocated from.
2114  * @param flags         the alloc flags (for @c Z_PCPU).
2115  */
2116 static vm_offset_t
zone_meta_find_and_clear_bit(zone_t zone,zone_stats_t zs,struct zone_page_metadata * meta,zalloc_flags_t flags)2117 zone_meta_find_and_clear_bit(
2118 	zone_t                  zone,
2119 	zone_stats_t            zs,
2120 	struct zone_page_metadata *meta,
2121 	zalloc_flags_t          flags)
2122 {
2123 	vm_offset_t eidx = zs->zs_alloc_rr + 1;
2124 
2125 	if (meta->zm_inline_bitmap) {
2126 		eidx = zba_scan_bitmap_inline(zone, meta, flags, eidx);
2127 	} else {
2128 		eidx = zba_scan_bitmap_ref(zone, meta, eidx);
2129 	}
2130 	zs->zs_alloc_rr = (uint16_t)eidx;
2131 	return eidx;
2132 }
2133 
2134 /*!
2135  * @function zone_meta_bits_init_inline
2136  *
2137  * @brief
2138  * Initializes the inline zm_bitmap field(s) for a newly assigned chunk.
2139  *
2140  * @param meta          The main metadata for the initialized chunk.
2141  * @param count         The number of elements the chunk can hold
2142  *                      (which might be partial for partially populated chunks).
2143  */
2144 static void
zone_meta_bits_init_inline(struct zone_page_metadata * meta,uint32_t count)2145 zone_meta_bits_init_inline(struct zone_page_metadata *meta, uint32_t count)
2146 {
2147 	/*
2148 	 * We're called with the metadata zm_bitmap fields already zeroed out.
2149 	 */
2150 	for (size_t i = 0; i < count / 32; i++) {
2151 		meta[i].zm_bitmap = ~0u;
2152 	}
2153 	if (count % 32) {
2154 		meta[count / 32].zm_bitmap = zba_map_mask_lt(uint32_t, count);
2155 	}
2156 }
2157 
2158 /*!
2159  * @function zone_meta_bits_alloc_init
2160  *
2161  * @brief
2162  * Allocates a  zm_bitmap field for a newly assigned chunk.
2163  *
2164  * @param count         The number of elements the chunk can hold
2165  *                      (which might be partial for partially populated chunks).
2166  * @param nbits         The maximum nuber of bits that will be used.
2167  * @param with_extra    Whether "VM Tracking" metadata needs to be allocated.
2168  */
2169 static uint32_t
zone_meta_bits_alloc_init(uint32_t count,uint32_t nbits,bool with_extra)2170 zone_meta_bits_alloc_init(uint32_t count, uint32_t nbits, bool with_extra)
2171 {
2172 	static_assert(ZONE_MAX_ALLOC_SIZE / ZONE_MIN_ELEM_SIZE <=
2173 	    ZBA_GRANULE_BITS << ZBA_MAX_ORDER, "bitmaps will be large enough");
2174 
2175 	uint32_t order = flsll((nbits - 1) / ZBA_GRANULE_BITS);
2176 	uint64_t *bits;
2177 	size_t   i = 0;
2178 
2179 	assert(order <= ZBA_MAX_ALLOC_ORDER);
2180 	assert(count <= ZBA_GRANULE_BITS << order);
2181 
2182 	zba_lock();
2183 	bits = (uint64_t *)zba_alloc(order, with_extra);
2184 	zba_unlock();
2185 
2186 	while (i < count / 64) {
2187 		bits[i++] = ~0ull;
2188 	}
2189 	if (count % 64) {
2190 		bits[i++] = zba_map_mask_lt(uint64_t, count);
2191 	}
2192 	while (i < 1u << order) {
2193 		bits[i++] = 0;
2194 	}
2195 
2196 	return (uint32_t)(bits - zba_slot_base()) +
2197 	       (order << ZBA_ORDER_SHIFT) +
2198 	       (with_extra ? ZBA_HAS_EXTRA_BIT : 0);
2199 }
2200 
2201 /*!
2202  * @function zone_meta_bits_merge
2203  *
2204  * @brief
2205  * Adds elements <code>[start, end)</code> to a chunk being extended.
2206  *
2207  * @param meta          The main metadata for the extended chunk.
2208  * @param start         The index of the first element to add to the chunk.
2209  * @param end           The index of the last (exclusive) element to add.
2210  */
2211 static void
zone_meta_bits_merge(struct zone_page_metadata * meta,uint32_t start,uint32_t end)2212 zone_meta_bits_merge(struct zone_page_metadata *meta,
2213     uint32_t start, uint32_t end)
2214 {
2215 	if (meta->zm_inline_bitmap) {
2216 		while (start < end) {
2217 			size_t s_i = start / 32;
2218 			size_t s_e = end / 32;
2219 
2220 			if (s_i == s_e) {
2221 				meta[s_i].zm_bitmap |= zba_map_mask_lt(uint32_t, end) &
2222 				    zba_map_mask_ge(uint32_t, start);
2223 				break;
2224 			}
2225 
2226 			meta[s_i].zm_bitmap |= zba_map_mask_ge(uint32_t, start);
2227 			start += 32 - (start % 32);
2228 		}
2229 	} else {
2230 		uint64_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2231 
2232 		while (start < end) {
2233 			size_t s_i = start / 64;
2234 			size_t s_e = end / 64;
2235 
2236 			if (s_i == s_e) {
2237 				bits[s_i] |= zba_map_mask_lt(uint64_t, end) &
2238 				    zba_map_mask_ge(uint64_t, start);
2239 				break;
2240 			}
2241 			bits[s_i] |= zba_map_mask_ge(uint64_t, start);
2242 			start += 64 - (start % 64);
2243 		}
2244 	}
2245 }
2246 
2247 /*!
2248  * @function zone_bits_free
2249  *
2250  * @brief
2251  * Frees a bitmap to the zone bitmap allocator.
2252  *
2253  * @param bref
2254  * A bitmap reference set by @c zone_meta_bits_init() in a @c zm_bitmap field.
2255  */
2256 static void
zone_bits_free(uint32_t bref)2257 zone_bits_free(uint32_t bref)
2258 {
2259 	zba_lock();
2260 	zba_free((vm_offset_t)zba_bits_ref_ptr(bref),
2261 	    zba_bits_ref_order(bref), (bref & ZBA_HAS_EXTRA_BIT));
2262 	zba_unlock();
2263 }
2264 
2265 /*!
2266  * @function zone_meta_is_free
2267  *
2268  * @brief
2269  * Returns whether a given element appears free.
2270  */
2271 static bool
zone_meta_is_free(struct zone_page_metadata * meta,vm_offset_t eidx)2272 zone_meta_is_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2273 {
2274 	if (meta->zm_inline_bitmap) {
2275 		uint32_t bit = zba_map_bit(uint32_t, eidx);
2276 		return meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit;
2277 	} else {
2278 		bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2279 		uint64_t bit = zba_map_bit(uint64_t, eidx);
2280 		return bits[zba_map_index(uint64_t, eidx)] & bit;
2281 	}
2282 }
2283 
2284 /*!
2285  * @function zone_meta_mark_free
2286  *
2287  * @brief
2288  * Marks an element as free and returns whether it was marked as used.
2289  */
2290 static bool
zone_meta_mark_free(struct zone_page_metadata * meta,vm_offset_t eidx)2291 zone_meta_mark_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2292 {
2293 	if (meta->zm_inline_bitmap) {
2294 		uint32_t bit = zba_map_bit(uint32_t, eidx);
2295 		if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2296 			return false;
2297 		}
2298 		meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2299 	} else {
2300 		bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2301 		uint64_t bit = zba_map_bit(uint64_t, eidx);
2302 		if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2303 			return false;
2304 		}
2305 		bits[zba_map_index(uint64_t, eidx)] ^= bit;
2306 	}
2307 	return true;
2308 }
2309 
2310 #if VM_TAG_SIZECLASSES
2311 
2312 __startup_func
2313 void
__zone_site_register(vm_allocation_site_t * site)2314 __zone_site_register(vm_allocation_site_t *site)
2315 {
2316 	if (zone_tagging_on) {
2317 		vm_tag_alloc(site);
2318 	}
2319 }
2320 
2321 uint16_t
zone_index_from_tag_index(uint32_t sizeclass_idx)2322 zone_index_from_tag_index(uint32_t sizeclass_idx)
2323 {
2324 	return zone_tags_sizeclasses[sizeclass_idx];
2325 }
2326 
2327 #endif /* VM_TAG_SIZECLASSES */
2328 #endif /* !ZALLOC_TEST */
2329 /*! @} */
2330 #pragma mark zalloc helpers
2331 #if !ZALLOC_TEST
2332 
2333 static inline void *
zstack_tbi_fix(vm_offset_t elem)2334 zstack_tbi_fix(vm_offset_t elem)
2335 {
2336 	elem = vm_memtag_load_tag(elem);
2337 	return (void *)elem;
2338 }
2339 
2340 static inline vm_offset_t
zstack_tbi_fill(void * addr)2341 zstack_tbi_fill(void *addr)
2342 {
2343 	vm_offset_t elem = (vm_offset_t)addr;
2344 
2345 	return vm_memtag_canonicalize_kernel(elem);
2346 }
2347 
2348 __attribute__((always_inline))
2349 static inline void
zstack_push_no_delta(zstack_t * stack,void * addr)2350 zstack_push_no_delta(zstack_t *stack, void *addr)
2351 {
2352 	vm_offset_t elem = zstack_tbi_fill(addr);
2353 
2354 	*(vm_offset_t *)addr = stack->z_head - elem;
2355 	stack->z_head = elem;
2356 }
2357 
2358 __attribute__((always_inline))
2359 void
zstack_push(zstack_t * stack,void * addr)2360 zstack_push(zstack_t *stack, void *addr)
2361 {
2362 	zstack_push_no_delta(stack, addr);
2363 	stack->z_count++;
2364 }
2365 
2366 __attribute__((always_inline))
2367 static inline void *
zstack_pop_no_delta(zstack_t * stack)2368 zstack_pop_no_delta(zstack_t *stack)
2369 {
2370 	void *addr = zstack_tbi_fix(stack->z_head);
2371 
2372 	stack->z_head += *(vm_offset_t *)addr;
2373 	*(vm_offset_t *)addr = 0;
2374 
2375 	return addr;
2376 }
2377 
2378 __attribute__((always_inline))
2379 void *
zstack_pop(zstack_t * stack)2380 zstack_pop(zstack_t *stack)
2381 {
2382 	stack->z_count--;
2383 	return zstack_pop_no_delta(stack);
2384 }
2385 
2386 static inline void
zone_recirc_lock_nopreempt_check_contention(zone_t zone)2387 zone_recirc_lock_nopreempt_check_contention(zone_t zone)
2388 {
2389 	uint32_t ticket;
2390 
2391 	if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_recirc_lock,
2392 	    &ticket, &zone_locks_grp))) {
2393 		return;
2394 	}
2395 
2396 	hw_lck_ticket_wait(&zone->z_recirc_lock, ticket, NULL, &zone_locks_grp);
2397 
2398 	/*
2399 	 * If zone caching has been disabled due to memory pressure,
2400 	 * then recording contention is not useful, give the system
2401 	 * time to recover.
2402 	 */
2403 	if (__probable(!zone_caching_disabled && !zone_exhausted(zone))) {
2404 		zone->z_recirc_cont_cur++;
2405 	}
2406 }
2407 
2408 static inline void
zone_recirc_lock_nopreempt(zone_t zone)2409 zone_recirc_lock_nopreempt(zone_t zone)
2410 {
2411 	hw_lck_ticket_lock_nopreempt(&zone->z_recirc_lock, &zone_locks_grp);
2412 }
2413 
2414 static inline void
zone_recirc_unlock_nopreempt(zone_t zone)2415 zone_recirc_unlock_nopreempt(zone_t zone)
2416 {
2417 	hw_lck_ticket_unlock_nopreempt(&zone->z_recirc_lock);
2418 }
2419 
2420 static inline void
zone_lock_nopreempt_check_contention(zone_t zone)2421 zone_lock_nopreempt_check_contention(zone_t zone)
2422 {
2423 	uint32_t ticket;
2424 #if KASAN_FAKESTACK
2425 	spl_t s = 0;
2426 	if (zone->z_kasan_fakestacks) {
2427 		s = splsched();
2428 	}
2429 #endif /* KASAN_FAKESTACK */
2430 
2431 	if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_lock, &ticket,
2432 	    &zone_locks_grp))) {
2433 #if KASAN_FAKESTACK
2434 		zone->z_kasan_spl = s;
2435 #endif /* KASAN_FAKESTACK */
2436 		return;
2437 	}
2438 
2439 	hw_lck_ticket_wait(&zone->z_lock, ticket, NULL, &zone_locks_grp);
2440 #if KASAN_FAKESTACK
2441 	zone->z_kasan_spl = s;
2442 #endif /* KASAN_FAKESTACK */
2443 
2444 	/*
2445 	 * If zone caching has been disabled due to memory pressure,
2446 	 * then recording contention is not useful, give the system
2447 	 * time to recover.
2448 	 */
2449 	if (__probable(!zone_caching_disabled &&
2450 	    !zone->z_pcpu_cache && !zone_exhausted(zone))) {
2451 		zone->z_recirc_cont_cur++;
2452 	}
2453 }
2454 
2455 static inline void
zone_lock_nopreempt(zone_t zone)2456 zone_lock_nopreempt(zone_t zone)
2457 {
2458 #if KASAN_FAKESTACK
2459 	spl_t s = 0;
2460 	if (zone->z_kasan_fakestacks) {
2461 		s = splsched();
2462 	}
2463 #endif /* KASAN_FAKESTACK */
2464 	hw_lck_ticket_lock_nopreempt(&zone->z_lock, &zone_locks_grp);
2465 #if KASAN_FAKESTACK
2466 	zone->z_kasan_spl = s;
2467 #endif /* KASAN_FAKESTACK */
2468 }
2469 
2470 static inline void
zone_unlock_nopreempt(zone_t zone)2471 zone_unlock_nopreempt(zone_t zone)
2472 {
2473 #if KASAN_FAKESTACK
2474 	spl_t s = zone->z_kasan_spl;
2475 	zone->z_kasan_spl = 0;
2476 #endif /* KASAN_FAKESTACK */
2477 	hw_lck_ticket_unlock_nopreempt(&zone->z_lock);
2478 #if KASAN_FAKESTACK
2479 	if (zone->z_kasan_fakestacks) {
2480 		splx(s);
2481 	}
2482 #endif /* KASAN_FAKESTACK */
2483 }
2484 
2485 static inline void
zone_depot_lock_nopreempt(zone_cache_t zc)2486 zone_depot_lock_nopreempt(zone_cache_t zc)
2487 {
2488 	hw_lck_ticket_lock_nopreempt(&zc->zc_depot_lock, &zone_locks_grp);
2489 }
2490 
2491 static inline void
zone_depot_unlock_nopreempt(zone_cache_t zc)2492 zone_depot_unlock_nopreempt(zone_cache_t zc)
2493 {
2494 	hw_lck_ticket_unlock_nopreempt(&zc->zc_depot_lock);
2495 }
2496 
2497 static inline void
zone_depot_lock(zone_cache_t zc)2498 zone_depot_lock(zone_cache_t zc)
2499 {
2500 	hw_lck_ticket_lock(&zc->zc_depot_lock, &zone_locks_grp);
2501 }
2502 
2503 static inline void
zone_depot_unlock(zone_cache_t zc)2504 zone_depot_unlock(zone_cache_t zc)
2505 {
2506 	hw_lck_ticket_unlock(&zc->zc_depot_lock);
2507 }
2508 
2509 zone_t
zone_by_id(size_t zid)2510 zone_by_id(size_t zid)
2511 {
2512 	return (zone_t)((uintptr_t)zone_array + zid * sizeof(struct zone));
2513 }
2514 
2515 static inline bool
zone_supports_vm(zone_t z)2516 zone_supports_vm(zone_t z)
2517 {
2518 	/*
2519 	 * VM_MAP_ENTRY and VM_MAP_HOLES zones are allowed
2520 	 * to overcommit because they're used to reclaim memory
2521 	 * (VM support).
2522 	 */
2523 	return z >= &zone_array[ZONE_ID_VM_MAP_ENTRY] &&
2524 	       z <= &zone_array[ZONE_ID_VM_MAP_HOLES];
2525 }
2526 
2527 const char *
zone_name(zone_t z)2528 zone_name(zone_t z)
2529 {
2530 	return z->z_name;
2531 }
2532 
2533 const char *
zone_heap_name(zone_t z)2534 zone_heap_name(zone_t z)
2535 {
2536 	zone_security_flags_t zsflags = zone_security_config(z);
2537 	if (__probable(zsflags.z_kheap_id < KHEAP_ID_COUNT)) {
2538 		return kalloc_heap_names[zsflags.z_kheap_id];
2539 	}
2540 	return "invalid";
2541 }
2542 
2543 static uint32_t
zone_alloc_pages_for_nelems(zone_t z,vm_size_t max_elems)2544 zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
2545 {
2546 	vm_size_t elem_count, chunks;
2547 
2548 	elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) /
2549 	    zone_elem_outer_size(z);
2550 	chunks = (max_elems + elem_count - 1) / elem_count;
2551 
2552 	return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
2553 }
2554 
2555 static inline vm_size_t
zone_submaps_approx_size(void)2556 zone_submaps_approx_size(void)
2557 {
2558 	vm_size_t size = 0;
2559 
2560 	for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
2561 		if (zone_submaps[idx] != VM_MAP_NULL) {
2562 			size += zone_submaps[idx]->size;
2563 		}
2564 	}
2565 
2566 	return size;
2567 }
2568 
2569 static inline void
zone_depot_init(struct zone_depot * zd)2570 zone_depot_init(struct zone_depot *zd)
2571 {
2572 	*zd = (struct zone_depot){
2573 		.zd_tail = &zd->zd_head,
2574 	};
2575 }
2576 
2577 static inline void
zone_depot_insert_head_full(struct zone_depot * zd,zone_magazine_t mag)2578 zone_depot_insert_head_full(struct zone_depot *zd, zone_magazine_t mag)
2579 {
2580 	if (zd->zd_full++ == 0) {
2581 		zd->zd_tail = &mag->zm_next;
2582 	}
2583 	mag->zm_next = zd->zd_head;
2584 	zd->zd_head = mag;
2585 }
2586 
2587 static inline void
zone_depot_insert_tail_full(struct zone_depot * zd,zone_magazine_t mag)2588 zone_depot_insert_tail_full(struct zone_depot *zd, zone_magazine_t mag)
2589 {
2590 	zd->zd_full++;
2591 	mag->zm_next = *zd->zd_tail;
2592 	*zd->zd_tail = mag;
2593 	zd->zd_tail = &mag->zm_next;
2594 }
2595 
2596 static inline void
zone_depot_insert_head_empty(struct zone_depot * zd,zone_magazine_t mag)2597 zone_depot_insert_head_empty(struct zone_depot *zd, zone_magazine_t mag)
2598 {
2599 	zd->zd_empty++;
2600 	mag->zm_next = *zd->zd_tail;
2601 	*zd->zd_tail = mag;
2602 }
2603 
2604 static inline zone_magazine_t
zone_depot_pop_head_full(struct zone_depot * zd,zone_t z)2605 zone_depot_pop_head_full(struct zone_depot *zd, zone_t z)
2606 {
2607 	zone_magazine_t mag = zd->zd_head;
2608 
2609 	assert(zd->zd_full);
2610 
2611 	zd->zd_full--;
2612 	if (z && z->z_recirc_full_min > zd->zd_full) {
2613 		z->z_recirc_full_min = zd->zd_full;
2614 	}
2615 	zd->zd_head = mag->zm_next;
2616 	if (zd->zd_full == 0) {
2617 		zd->zd_tail = &zd->zd_head;
2618 	}
2619 
2620 	mag->zm_next = NULL;
2621 	return mag;
2622 }
2623 
2624 static inline zone_magazine_t
zone_depot_pop_head_empty(struct zone_depot * zd,zone_t z)2625 zone_depot_pop_head_empty(struct zone_depot *zd, zone_t z)
2626 {
2627 	zone_magazine_t mag = *zd->zd_tail;
2628 
2629 	assert(zd->zd_empty);
2630 
2631 	zd->zd_empty--;
2632 	if (z && z->z_recirc_empty_min > zd->zd_empty) {
2633 		z->z_recirc_empty_min = zd->zd_empty;
2634 	}
2635 	*zd->zd_tail = mag->zm_next;
2636 
2637 	mag->zm_next = NULL;
2638 	return mag;
2639 }
2640 
2641 static inline smr_seq_t
zone_depot_move_full(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2642 zone_depot_move_full(
2643 	struct zone_depot      *dst,
2644 	struct zone_depot      *src,
2645 	uint32_t                n,
2646 	zone_t                  z)
2647 {
2648 	zone_magazine_t head, last;
2649 
2650 	assert(n);
2651 	assert(src->zd_full >= n);
2652 
2653 	src->zd_full -= n;
2654 	if (z && z->z_recirc_full_min > src->zd_full) {
2655 		z->z_recirc_full_min = src->zd_full;
2656 	}
2657 	head = last = src->zd_head;
2658 	for (uint32_t i = n; i-- > 1;) {
2659 		last = last->zm_next;
2660 	}
2661 
2662 	src->zd_head = last->zm_next;
2663 	if (src->zd_full == 0) {
2664 		src->zd_tail = &src->zd_head;
2665 	}
2666 
2667 	if (z && zone_security_array[zone_index(z)].z_lifo) {
2668 		if (dst->zd_full == 0) {
2669 			dst->zd_tail = &last->zm_next;
2670 		}
2671 		last->zm_next = dst->zd_head;
2672 		dst->zd_head = head;
2673 	} else {
2674 		last->zm_next = *dst->zd_tail;
2675 		*dst->zd_tail = head;
2676 		dst->zd_tail = &last->zm_next;
2677 	}
2678 	dst->zd_full += n;
2679 
2680 	return last->zm_seq;
2681 }
2682 
2683 static inline void
zone_depot_move_empty(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2684 zone_depot_move_empty(
2685 	struct zone_depot      *dst,
2686 	struct zone_depot      *src,
2687 	uint32_t                n,
2688 	zone_t                  z)
2689 {
2690 	zone_magazine_t head, last;
2691 
2692 	assert(n);
2693 	assert(src->zd_empty >= n);
2694 
2695 	src->zd_empty -= n;
2696 	if (z && z->z_recirc_empty_min > src->zd_empty) {
2697 		z->z_recirc_empty_min = src->zd_empty;
2698 	}
2699 	head = last = *src->zd_tail;
2700 	for (uint32_t i = n; i-- > 1;) {
2701 		last = last->zm_next;
2702 	}
2703 
2704 	*src->zd_tail = last->zm_next;
2705 
2706 	dst->zd_empty += n;
2707 	last->zm_next = *dst->zd_tail;
2708 	*dst->zd_tail = head;
2709 }
2710 
2711 static inline bool
zone_depot_poll(struct zone_depot * depot,smr_t smr)2712 zone_depot_poll(struct zone_depot *depot, smr_t smr)
2713 {
2714 	if (depot->zd_full == 0) {
2715 		return false;
2716 	}
2717 
2718 	return smr == NULL || smr_poll(smr, depot->zd_head->zm_seq);
2719 }
2720 
2721 static void
zone_cache_swap_magazines(zone_cache_t cache)2722 zone_cache_swap_magazines(zone_cache_t cache)
2723 {
2724 	uint16_t count_a = cache->zc_alloc_cur;
2725 	uint16_t count_f = cache->zc_free_cur;
2726 	vm_offset_t *elems_a = cache->zc_alloc_elems;
2727 	vm_offset_t *elems_f = cache->zc_free_elems;
2728 
2729 	z_debug_assert(count_a <= zc_mag_size());
2730 	z_debug_assert(count_f <= zc_mag_size());
2731 
2732 	cache->zc_alloc_cur = count_f;
2733 	cache->zc_free_cur = count_a;
2734 	cache->zc_alloc_elems = elems_f;
2735 	cache->zc_free_elems = elems_a;
2736 }
2737 
2738 __pure2
2739 static smr_t
zone_cache_smr(zone_cache_t cache)2740 zone_cache_smr(zone_cache_t cache)
2741 {
2742 	return cache->zc_smr;
2743 }
2744 
2745 /*!
2746  * @function zone_magazine_replace
2747  *
2748  * @brief
2749  * Unlod a magazine and load a new one instead.
2750  */
2751 static zone_magazine_t
zone_magazine_replace(zone_cache_t zc,zone_magazine_t mag,bool empty)2752 zone_magazine_replace(zone_cache_t zc, zone_magazine_t mag, bool empty)
2753 {
2754 	zone_magazine_t old;
2755 	vm_offset_t **elems;
2756 
2757 	mag->zm_seq = SMR_SEQ_INVALID;
2758 
2759 	if (empty) {
2760 		elems = &zc->zc_free_elems;
2761 		zc->zc_free_cur = 0;
2762 	} else {
2763 		elems = &zc->zc_alloc_elems;
2764 		zc->zc_alloc_cur = zc_mag_size();
2765 	}
2766 	old = (zone_magazine_t)((uintptr_t)*elems -
2767 	    offsetof(struct zone_magazine, zm_elems));
2768 	*elems = mag->zm_elems;
2769 
2770 	return old;
2771 }
2772 
2773 static zone_magazine_t
zone_magazine_alloc(zalloc_flags_t flags)2774 zone_magazine_alloc(zalloc_flags_t flags)
2775 {
2776 	return zalloc_flags(zc_magazine_zone, flags | Z_ZERO);
2777 }
2778 
2779 static void
zone_magazine_free(zone_magazine_t mag)2780 zone_magazine_free(zone_magazine_t mag)
2781 {
2782 	(zfree)(zc_magazine_zone, mag);
2783 }
2784 
2785 static void
zone_magazine_free_list(struct zone_depot * zd)2786 zone_magazine_free_list(struct zone_depot *zd)
2787 {
2788 	zone_magazine_t tmp, mag = *zd->zd_tail;
2789 
2790 	while (mag) {
2791 		tmp = mag->zm_next;
2792 		zone_magazine_free(mag);
2793 		mag = tmp;
2794 	}
2795 
2796 	*zd->zd_tail = NULL;
2797 	zd->zd_empty = 0;
2798 }
2799 
2800 void
zone_enable_caching(zone_t zone)2801 zone_enable_caching(zone_t zone)
2802 {
2803 	size_t size_per_mag = zone_elem_inner_size(zone) * zc_mag_size();
2804 	zone_cache_t caches;
2805 	size_t depot_limit;
2806 
2807 	depot_limit = zc_pcpu_max() / size_per_mag;
2808 	zone->z_depot_limit = (uint16_t)MIN(depot_limit, INT16_MAX);
2809 
2810 	caches = zalloc_percpu_permanent_type(struct zone_cache);
2811 	zpercpu_foreach(zc, caches) {
2812 		zc->zc_alloc_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2813 		zc->zc_free_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2814 		zone_depot_init(&zc->zc_depot);
2815 		hw_lck_ticket_init(&zc->zc_depot_lock, &zone_locks_grp);
2816 	}
2817 
2818 	zone_lock(zone);
2819 	assert(zone->z_pcpu_cache == NULL);
2820 	zone->z_pcpu_cache = caches;
2821 	zone->z_recirc_cont_cur = 0;
2822 	zone->z_recirc_cont_wma = 0;
2823 	zone->z_elems_free_min = 0; /* becomes z_recirc_empty_min */
2824 	zone->z_elems_free_wma = 0; /* becomes z_recirc_empty_wma */
2825 	zone_unlock(zone);
2826 }
2827 
2828 bool
zone_maps_owned(vm_address_t addr,vm_size_t size)2829 zone_maps_owned(vm_address_t addr, vm_size_t size)
2830 {
2831 	return from_zone_map(addr, size);
2832 }
2833 
2834 #if KASAN_LIGHT
2835 bool
kasan_zone_maps_owned(vm_address_t addr,vm_size_t size)2836 kasan_zone_maps_owned(vm_address_t addr, vm_size_t size)
2837 {
2838 	return from_zone_map(addr, size) ||
2839 	       mach_vm_range_size(&zone_info.zi_map_range) == 0;
2840 }
2841 #endif /* KASAN_LIGHT */
2842 
2843 void
zone_map_sizes(vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)2844 zone_map_sizes(
2845 	vm_map_size_t    *psize,
2846 	vm_map_size_t    *pfree,
2847 	vm_map_size_t    *plargest_free)
2848 {
2849 	vm_map_size_t size, free, largest;
2850 
2851 	vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
2852 
2853 	for (uint32_t i = 1; i < Z_SUBMAP_IDX_COUNT; i++) {
2854 		vm_map_sizes(zone_submaps[i], &size, &free, &largest);
2855 		*psize += size;
2856 		*pfree += free;
2857 		*plargest_free = MAX(*plargest_free, largest);
2858 	}
2859 }
2860 
2861 __attribute__((always_inline))
2862 vm_map_t
zone_submap(zone_security_flags_t zsflags)2863 zone_submap(zone_security_flags_t zsflags)
2864 {
2865 	return zone_submaps[zsflags.z_submap_idx];
2866 }
2867 
2868 unsigned
zpercpu_count(void)2869 zpercpu_count(void)
2870 {
2871 	return zpercpu_early_count;
2872 }
2873 
2874 #if ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC
2875 /*
2876  * Returns a random number of a given bit-width.
2877  *
2878  * DO NOT COPY THIS CODE OUTSIDE OF ZALLOC
2879  *
2880  * This uses Intel's rdrand because random() uses FP registers
2881  * which causes FP faults and allocations which isn't something
2882  * we can do from zalloc itself due to reentrancy problems.
2883  *
2884  * For pre-rdrand machines (which we no longer support),
2885  * we use a bad biased random generator that doesn't use FP.
2886  * Such HW is no longer supported, but VM of newer OSes on older
2887  * bare metal is made to limp along (with reduced security) this way.
2888  */
2889 static uint64_t
zalloc_random_mask64(uint32_t bits)2890 zalloc_random_mask64(uint32_t bits)
2891 {
2892 	uint64_t mask = ~0ull >> (64 - bits);
2893 	uint64_t v;
2894 
2895 #if __x86_64__
2896 	if (__probable(cpuid_features() & CPUID_FEATURE_RDRAND)) {
2897 		asm volatile ("1: rdrand %0; jnc 1b\n" : "=r" (v) :: "cc");
2898 		v &= mask;
2899 	} else {
2900 		disable_preemption();
2901 		int cpu = cpu_number();
2902 		v = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
2903 		    zone_bool_gen[cpu].zbg_entropy,
2904 		    ZONE_ENTROPY_CNT, bits);
2905 		enable_preemption();
2906 	}
2907 #else
2908 	v = early_random() & mask;
2909 #endif
2910 
2911 	return v;
2912 }
2913 
2914 /*
2915  * Returns a random number within [bound_min, bound_max)
2916  *
2917  * This isn't _exactly_ uniform, but the skew is small enough
2918  * not to matter for the consumers of this interface.
2919  *
2920  * Values within [bound_min, 2^64 % (bound_max - bound_min))
2921  * will be returned (bound_max - bound_min) / 2^64 more often
2922  * than values within [2^64 % (bound_max - bound_min), bound_max).
2923  */
2924 static uint32_t
zalloc_random_uniform32(uint32_t bound_min,uint32_t bound_max)2925 zalloc_random_uniform32(uint32_t bound_min, uint32_t bound_max)
2926 {
2927 	uint64_t delta = bound_max - bound_min;
2928 
2929 	return bound_min + (uint32_t)(zalloc_random_mask64(64) % delta);
2930 }
2931 
2932 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC */
2933 #if ZALLOC_ENABLE_LOGGING || CONFIG_PROB_GZALLOC
2934 /*
2935  * Track all kalloc zones of specified size for zlog name
2936  * kalloc.type.<size> or kalloc.type.var.<size> or kalloc.<size>
2937  *
2938  * Additionally track all early kalloc zones with early.kalloc
2939  */
2940 static bool
track_kalloc_zones(zone_t z,const char * logname)2941 track_kalloc_zones(zone_t z, const char *logname)
2942 {
2943 	const char *prefix;
2944 	size_t len;
2945 	zone_security_flags_t zsflags = zone_security_config(z);
2946 
2947 	prefix = "kalloc.type.var.";
2948 	len    = strlen(prefix);
2949 	if (zsflags.z_kalloc_type && zsflags.z_kheap_id == KHEAP_ID_KT_VAR &&
2950 	    strncmp(logname, prefix, len) == 0) {
2951 		vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2952 
2953 		return zone_elem_inner_size(z) == sizeclass;
2954 	}
2955 
2956 	prefix = "kalloc.type.";
2957 	len    = strlen(prefix);
2958 	if (zsflags.z_kalloc_type && zsflags.z_kheap_id != KHEAP_ID_KT_VAR &&
2959 	    strncmp(logname, prefix, len) == 0) {
2960 		vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2961 
2962 		return zone_elem_inner_size(z) == sizeclass;
2963 	}
2964 
2965 	prefix = "kalloc.";
2966 	len    = strlen(prefix);
2967 	if ((zsflags.z_kheap_id || zsflags.z_kalloc_type) &&
2968 	    strncmp(logname, prefix, len) == 0) {
2969 		vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2970 
2971 		return zone_elem_inner_size(z) == sizeclass;
2972 	}
2973 
2974 	prefix = "early.kalloc";
2975 	if ((zsflags.z_kheap_id == KHEAP_ID_EARLY) &&
2976 	    (strcmp(logname, prefix) == 0)) {
2977 		return true;
2978 	}
2979 
2980 	return false;
2981 }
2982 #endif
2983 
2984 int
track_this_zone(const char * zonename,const char * logname)2985 track_this_zone(const char *zonename, const char *logname)
2986 {
2987 	unsigned int len;
2988 	const char *zc = zonename;
2989 	const char *lc = logname;
2990 
2991 	/*
2992 	 * Compare the strings.  We bound the compare by MAX_ZONE_NAME.
2993 	 */
2994 
2995 	for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
2996 		/*
2997 		 * If the current characters don't match, check for a space in
2998 		 * in the zone name and a corresponding period in the log name.
2999 		 * If that's not there, then the strings don't match.
3000 		 */
3001 
3002 		if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
3003 			break;
3004 		}
3005 
3006 		/*
3007 		 * The strings are equal so far.  If we're at the end, then it's a match.
3008 		 */
3009 
3010 		if (*zc == '\0') {
3011 			return TRUE;
3012 		}
3013 	}
3014 
3015 	return FALSE;
3016 }
3017 
3018 #if DEBUG || DEVELOPMENT
3019 
3020 vm_size_t
zone_element_info(void * addr,vm_tag_t * ptag)3021 zone_element_info(void *addr, vm_tag_t * ptag)
3022 {
3023 	vm_size_t     size = 0;
3024 	vm_tag_t      tag = VM_KERN_MEMORY_NONE;
3025 	struct zone *src_zone;
3026 
3027 	if (from_zone_map(addr, sizeof(void *))) {
3028 		src_zone = zone_by_id(zone_index_from_ptr(addr));
3029 		size     = zone_elem_inner_size(src_zone);
3030 #if VM_TAG_SIZECLASSES
3031 		if (__improbable(src_zone->z_uses_tags)) {
3032 			struct zone_page_metadata *meta;
3033 			vm_offset_t eidx;
3034 			vm_tag_t *slot;
3035 
3036 			meta = zone_element_resolve(src_zone,
3037 			    (vm_offset_t)addr, &eidx);
3038 			slot = zba_extra_ref_ptr(meta->zm_bitmap, eidx);
3039 			tag  = *slot;
3040 		}
3041 #endif /* VM_TAG_SIZECLASSES */
3042 	}
3043 
3044 	*ptag = tag;
3045 	return size;
3046 }
3047 
3048 #endif /* DEBUG || DEVELOPMENT */
3049 #if KASAN_CLASSIC
3050 
3051 vm_size_t
kasan_quarantine_resolve(vm_address_t addr,zone_t * zonep)3052 kasan_quarantine_resolve(vm_address_t addr, zone_t *zonep)
3053 {
3054 	zone_t zone = zone_by_id(zone_index_from_ptr((void *)addr));
3055 
3056 	*zonep = zone;
3057 	return zone_elem_inner_size(zone);
3058 }
3059 
3060 #endif /* KASAN_CLASSIC */
3061 #endif /* !ZALLOC_TEST */
3062 #pragma mark Zone zeroing and early random
3063 #if !ZALLOC_TEST
3064 
3065 /*
3066  * Zone zeroing
3067  *
3068  * All allocations from zones are zeroed on free and are additionally
3069  * check that they are still zero on alloc. The check is
3070  * always on, on embedded devices. Perf regression was detected
3071  * on intel as we cant use the vectorized implementation of
3072  * memcmp_zero_ptr_aligned due to cyclic dependenices between
3073  * initization and allocation. Therefore we perform the check
3074  * on 20% of the allocations.
3075  */
3076 #if ZALLOC_ENABLE_ZERO_CHECK
3077 #if defined(__x86_64__)
3078 /*
3079  * Peform zero validation on every 5th allocation
3080  */
3081 static TUNABLE(uint32_t, zzc_rate, "zzc_rate", 5);
3082 static uint32_t PERCPU_DATA(zzc_decrementer);
3083 #endif /* defined(__x86_64__) */
3084 
3085 /*
3086  * Determine if zero validation for allocation should be skipped
3087  */
3088 static bool
zalloc_skip_zero_check(void)3089 zalloc_skip_zero_check(void)
3090 {
3091 #if defined(__x86_64__)
3092 	uint32_t *counterp, cnt;
3093 
3094 	counterp = PERCPU_GET(zzc_decrementer);
3095 	cnt = *counterp;
3096 	if (__probable(cnt > 0)) {
3097 		*counterp  = cnt - 1;
3098 		return true;
3099 	}
3100 	*counterp = zzc_rate - 1;
3101 #endif /* !defined(__x86_64__) */
3102 	return false;
3103 }
3104 
3105 __abortlike
3106 static void
zalloc_uaf_panic(zone_t z,uintptr_t elem,size_t size)3107 zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size)
3108 {
3109 	uint32_t esize = (uint32_t)zone_elem_inner_size(z);
3110 	uint32_t first_offs = ~0u;
3111 	uintptr_t first_bits = 0, v;
3112 	char buf[1024];
3113 	int pos = 0;
3114 
3115 	buf[0] = '\0';
3116 
3117 	for (uint32_t o = 0; o < size; o += sizeof(v)) {
3118 		if ((v = *(uintptr_t *)(elem + o)) == 0) {
3119 			continue;
3120 		}
3121 		pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
3122 		    "%5d: 0x%016lx", o, v);
3123 		if (first_offs > o) {
3124 			first_offs = o;
3125 			first_bits = v;
3126 		}
3127 	}
3128 
3129 	(panic)("[%s%s]: element modified after free "
3130 	"(off:%d, val:0x%016lx, sz:%d, ptr:%p)%s",
3131 	zone_heap_name(z), zone_name(z),
3132 	first_offs, first_bits, esize, (void *)elem, buf);
3133 }
3134 
3135 static void
zalloc_validate_element(zone_t zone,vm_offset_t elem,vm_size_t size,zalloc_flags_t flags)3136 zalloc_validate_element(
3137 	zone_t                  zone,
3138 	vm_offset_t             elem,
3139 	vm_size_t               size,
3140 	zalloc_flags_t          flags)
3141 {
3142 	if (flags & Z_NOZZC) {
3143 		return;
3144 	}
3145 	if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3146 		zalloc_uaf_panic(zone, elem, size);
3147 	}
3148 	if (flags & Z_PCPU) {
3149 		for (size_t i = zpercpu_count(); --i > 0;) {
3150 			elem += PAGE_SIZE;
3151 			if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3152 				zalloc_uaf_panic(zone, elem, size);
3153 			}
3154 		}
3155 	}
3156 }
3157 
3158 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
3159 
3160 __attribute__((noinline))
3161 static void
zone_early_scramble_rr(zone_t zone,int cpu,zone_stats_t zs)3162 zone_early_scramble_rr(zone_t zone, int cpu, zone_stats_t zs)
3163 {
3164 #if KASAN_FAKESTACK
3165 	/*
3166 	 * This can cause re-entrancy with kasan fakestacks
3167 	 */
3168 #pragma unused(zone, cpu, zs)
3169 #else
3170 	uint32_t bits;
3171 
3172 	bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3173 	    zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
3174 
3175 	zs->zs_alloc_rr += bits;
3176 	zs->zs_alloc_rr %= zone->z_chunk_elems;
3177 #endif
3178 }
3179 
3180 #endif /* !ZALLOC_TEST */
3181 #pragma mark Zone Leak Detection
3182 #if !ZALLOC_TEST
3183 #if ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS
3184 
3185 /*
3186  * Zone leak debugging code
3187  *
3188  * When enabled, this code keeps a log to track allocations to a particular
3189  * zone that have not yet been freed.
3190  *
3191  * Examining this log will reveal the source of a zone leak.
3192  *
3193  * The log is allocated only when logging is enabled (it is off by default),
3194  * so there is no effect on the system when it's turned off.
3195  *
3196  * Zone logging is enabled with the `zlog<n>=<zone>` boot-arg for each
3197  * zone name to log, with n starting at 1.
3198  *
3199  * Leaks debugging utilizes 2 tunables:
3200  * - zlsize (in kB) which describes how much "size" the record covers
3201  *   (zones with smaller elements get more records, default is 4M).
3202  *
3203  * - zlfreq (in bytes) which describes a sample rate in cumulative allocation
3204  *   size at which automatic leak detection will sample allocations.
3205  *   (default is 8k)
3206  *
3207  *
3208  * Zone corruption logging
3209  *
3210  * Logging can also be used to help identify the source of a zone corruption.
3211  *
3212  * First, identify the zone that is being corrupted,
3213  * then add "-zc zlog<n>=<zone name>" to the boot-args.
3214  *
3215  * When -zc is used in conjunction with zlog,
3216  * it changes the logging style to track both allocations and frees to the zone.
3217  *
3218  * When the corruption is detected, examining the log will show you the stack
3219  * traces of the callers who last allocated and freed any particular element in
3220  * the zone.
3221  *
3222  * Corruption debugging logs will have zrecs records
3223  * (tuned by the zrecs= boot-arg, 16k elements per G of RAM by default).
3224  */
3225 
3226 #define ZRECORDS_MAX            (256u << 10)
3227 #define ZRECORDS_DEFAULT        (16u  << 10)
3228 static TUNABLE(uint32_t, zrecs, "zrecs", 0);
3229 static TUNABLE(uint32_t, zlsize, "zlsize", 4 * 1024);
3230 static TUNABLE(uint32_t, zlfreq, "zlfreq", 8 * 1024);
3231 
3232 __startup_func
3233 static void
zone_leaks_init_zrecs(void)3234 zone_leaks_init_zrecs(void)
3235 {
3236 	/*
3237 	 * Don't allow more than ZRECORDS_MAX records,
3238 	 * even if the user asked for more.
3239 	 *
3240 	 * This prevents accidentally hogging too much kernel memory
3241 	 * and making the system unusable.
3242 	 */
3243 	if (zrecs == 0) {
3244 		zrecs = ZRECORDS_DEFAULT *
3245 		    (uint32_t)((max_mem + (1ul << 30)) >> 30);
3246 	}
3247 	if (zrecs > ZRECORDS_MAX) {
3248 		zrecs = ZRECORDS_MAX;
3249 	}
3250 }
3251 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_leaks_init_zrecs);
3252 
3253 static uint32_t
zone_leaks_record_count(zone_t z)3254 zone_leaks_record_count(zone_t z)
3255 {
3256 	uint32_t recs = (zlsize << 10) / zone_elem_inner_size(z);
3257 
3258 	return MIN(MAX(recs, ZRECORDS_DEFAULT), ZRECORDS_MAX);
3259 }
3260 
3261 static uint32_t
zone_leaks_sample_rate(zone_t z)3262 zone_leaks_sample_rate(zone_t z)
3263 {
3264 	return zlfreq / zone_elem_inner_size(z);
3265 }
3266 
3267 #if ZALLOC_ENABLE_LOGGING
3268 /* Log allocations and frees to help debug a zone element corruption */
3269 static TUNABLE(bool, corruption_debug_flag, "-zc", false);
3270 
3271 /*
3272  * A maximum of 10 zlog<n> boot args can be provided (zlog1 -> zlog10)
3273  */
3274 #define MAX_ZONES_LOG_REQUESTS  10
3275 
3276 /**
3277  * @function zone_setup_logging
3278  *
3279  * @abstract
3280  * Optionally sets up a zone for logging.
3281  *
3282  * @discussion
3283  * We recognized two boot-args:
3284  *
3285  *	zlog=<zone_to_log>
3286  *	zrecs=<num_records_in_log>
3287  *	zlsize=<memory to cover for leaks>
3288  *
3289  * The zlog arg is used to specify the zone name that should be logged,
3290  * and zrecs/zlsize is used to control the size of the log.
3291  */
3292 static void
zone_setup_logging(zone_t z)3293 zone_setup_logging(zone_t z)
3294 {
3295 	char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
3296 	char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
3297 	char zlog_val[MAX_ZONE_NAME];  /* the zone name we're logging, if any */
3298 	bool logging_on = false;
3299 
3300 	/*
3301 	 * Append kalloc heap name to zone name (if zone is used by kalloc)
3302 	 */
3303 	snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
3304 
3305 	/* zlog0 isn't allowed. */
3306 	for (int i = 1; i <= MAX_ZONES_LOG_REQUESTS; i++) {
3307 		snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
3308 
3309 		if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val))) {
3310 			if (track_this_zone(zone_name, zlog_val) ||
3311 			    track_kalloc_zones(z, zlog_val)) {
3312 				logging_on = true;
3313 				break;
3314 			}
3315 		}
3316 	}
3317 
3318 	/*
3319 	 * Backwards compat. with the old boot-arg used to specify single zone
3320 	 * logging i.e. zlog Needs to happen after the newer zlogn checks
3321 	 * because the prefix will match all the zlogn
3322 	 * boot-args.
3323 	 */
3324 	if (!logging_on &&
3325 	    PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val))) {
3326 		if (track_this_zone(zone_name, zlog_val) ||
3327 		    track_kalloc_zones(z, zlog_val)) {
3328 			logging_on = true;
3329 		}
3330 	}
3331 
3332 	/*
3333 	 * If we want to log a zone, see if we need to allocate buffer space for
3334 	 * the log.
3335 	 *
3336 	 * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
3337 	 * we have to defer allocation in that case.
3338 	 *
3339 	 * zone_init() will finish the job.
3340 	 *
3341 	 * If we want to log one of the VM related zones that's set up early on,
3342 	 * we will skip allocation of the log until zinit is called again later
3343 	 * on some other zone.
3344 	 */
3345 	if (logging_on) {
3346 		if (corruption_debug_flag) {
3347 			z->z_btlog = btlog_create(BTLOG_LOG, zrecs, 0);
3348 		} else {
3349 			z->z_btlog = btlog_create(BTLOG_HASH,
3350 			    zone_leaks_record_count(z), 0);
3351 		}
3352 		if (z->z_btlog) {
3353 			z->z_log_on = true;
3354 			printf("zone[%s%s]: logging enabled\n",
3355 			    zone_heap_name(z), z->z_name);
3356 		} else {
3357 			printf("zone[%s%s]: failed to enable logging\n",
3358 			    zone_heap_name(z), z->z_name);
3359 		}
3360 	}
3361 }
3362 
3363 #endif /* ZALLOC_ENABLE_LOGGING */
3364 #if KASAN_TBI
3365 static TUNABLE(uint32_t, kasan_zrecs, "kasan_zrecs", 0);
3366 
3367 __startup_func
3368 static void
kasan_tbi_init_zrecs(void)3369 kasan_tbi_init_zrecs(void)
3370 {
3371 	/*
3372 	 * Don't allow more than ZRECORDS_MAX records,
3373 	 * even if the user asked for more.
3374 	 *
3375 	 * This prevents accidentally hogging too much kernel memory
3376 	 * and making the system unusable.
3377 	 */
3378 	if (kasan_zrecs == 0) {
3379 		kasan_zrecs = ZRECORDS_DEFAULT *
3380 		    (uint32_t)((max_mem + (1ul << 30)) >> 30);
3381 	}
3382 	if (kasan_zrecs > ZRECORDS_MAX) {
3383 		kasan_zrecs = ZRECORDS_MAX;
3384 	}
3385 }
3386 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, kasan_tbi_init_zrecs);
3387 
3388 static void
zone_setup_kasan_logging(zone_t z)3389 zone_setup_kasan_logging(zone_t z)
3390 {
3391 	if (!z->z_tbi_tag) {
3392 		printf("zone[%s%s]: kasan logging disabled for this zone\n",
3393 		    zone_heap_name(z), z->z_name);
3394 		return;
3395 	}
3396 
3397 	z->z_log_on = true;
3398 	z->z_btlog = btlog_create(BTLOG_LOG, kasan_zrecs, 0);
3399 	if (!z->z_btlog) {
3400 		printf("zone[%s%s]: failed to enable kasan logging\n",
3401 		    zone_heap_name(z), z->z_name);
3402 	}
3403 }
3404 
3405 #endif /* KASAN_TBI */
3406 #if CONFIG_ZLEAKS
3407 
3408 static thread_call_data_t zone_leaks_callout;
3409 
3410 /*
3411  * The zone leak detector, abbreviated 'zleak', keeps track
3412  * of a subset of the currently outstanding allocations
3413  * made by the zone allocator.
3414  *
3415  * Zones who use more than zleak_pages_per_zone_wired_threshold
3416  * pages will get a BTLOG_HASH btlog with sampling to minimize
3417  * perf impact, yet receive statistical data about the backtrace
3418  * that is the most likely to cause the leak.
3419  *
3420  * If the zone goes under the threshold enough, then the log
3421  * is disabled and backtraces freed. Data can be collected
3422  * from userspace with the zlog(1) command.
3423  */
3424 
3425 uint32_t                zleak_active;
3426 SECURITY_READ_ONLY_LATE(vm_size_t) zleak_max_zonemap_size;
3427 
3428 /* Size a zone will have before we will collect data on it */
3429 static size_t           zleak_pages_per_zone_wired_threshold = ~0;
3430 vm_size_t               zleak_per_zone_tracking_threshold = ~0;
3431 
3432 static inline bool
zleak_should_enable_for_zone(zone_t z)3433 zleak_should_enable_for_zone(zone_t z)
3434 {
3435 	if (z->z_log_on) {
3436 		return false;
3437 	}
3438 	if (z->z_btlog) {
3439 		return false;
3440 	}
3441 	if (z->z_exhausts) {
3442 		return false;
3443 	}
3444 	if (zone_exhaustible(z)) {
3445 		return z->z_wired_cur * 8 >= z->z_wired_max * 7;
3446 	}
3447 	return z->z_wired_cur >= zleak_pages_per_zone_wired_threshold;
3448 }
3449 
3450 static inline bool
zleak_should_disable_for_zone(zone_t z)3451 zleak_should_disable_for_zone(zone_t z)
3452 {
3453 	if (z->z_log_on) {
3454 		return false;
3455 	}
3456 	if (!z->z_btlog) {
3457 		return false;
3458 	}
3459 	if (zone_exhaustible(z)) {
3460 		return z->z_wired_cur * 8 < z->z_wired_max * 7;
3461 	}
3462 	return z->z_wired_cur < zleak_pages_per_zone_wired_threshold / 2;
3463 }
3464 
3465 static void
zleaks_enable_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)3466 zleaks_enable_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
3467 {
3468 	btlog_t log;
3469 
3470 	zone_foreach(z) {
3471 		if (zleak_should_disable_for_zone(z)) {
3472 			log = z->z_btlog;
3473 			z->z_btlog = NULL;
3474 			assert(z->z_btlog_disabled == NULL);
3475 			btlog_disable(log);
3476 			z->z_btlog_disabled = log;
3477 			os_atomic_dec(&zleak_active, relaxed);
3478 		}
3479 
3480 		if (zleak_should_enable_for_zone(z)) {
3481 			log = z->z_btlog_disabled;
3482 			if (log == NULL) {
3483 				log = btlog_create(BTLOG_HASH,
3484 				    zone_leaks_record_count(z),
3485 				    zone_leaks_sample_rate(z));
3486 			} else if (btlog_enable(log) == KERN_SUCCESS) {
3487 				z->z_btlog_disabled = NULL;
3488 			} else {
3489 				log = NULL;
3490 			}
3491 			os_atomic_store(&z->z_btlog, log, release);
3492 			os_atomic_inc(&zleak_active, relaxed);
3493 		}
3494 	}
3495 }
3496 
3497 __startup_func
3498 static void
zleak_init(void)3499 zleak_init(void)
3500 {
3501 	zleak_max_zonemap_size = ptoa(zone_pages_wired_max);
3502 
3503 	zleak_update_threshold(&zleak_per_zone_tracking_threshold,
3504 	    zleak_max_zonemap_size / 8);
3505 
3506 	thread_call_setup_with_options(&zone_leaks_callout,
3507 	    zleaks_enable_async, NULL, THREAD_CALL_PRIORITY_USER,
3508 	    THREAD_CALL_OPTIONS_ONCE);
3509 }
3510 STARTUP(ZALLOC, STARTUP_RANK_SECOND, zleak_init);
3511 
3512 kern_return_t
zleak_update_threshold(vm_size_t * arg,uint64_t value)3513 zleak_update_threshold(vm_size_t *arg, uint64_t value)
3514 {
3515 	if (value >= zleak_max_zonemap_size) {
3516 		return KERN_INVALID_VALUE;
3517 	}
3518 
3519 	if (arg == &zleak_per_zone_tracking_threshold) {
3520 		zleak_per_zone_tracking_threshold = (vm_size_t)value;
3521 		zleak_pages_per_zone_wired_threshold = atop(value);
3522 		if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3523 			thread_call_enter(&zone_leaks_callout);
3524 		}
3525 		return KERN_SUCCESS;
3526 	}
3527 
3528 	return KERN_INVALID_ARGUMENT;
3529 }
3530 
3531 static void
panic_display_zleaks(bool has_syms)3532 panic_display_zleaks(bool has_syms)
3533 {
3534 	bool did_header = false;
3535 	vm_address_t bt[BTLOG_MAX_DEPTH];
3536 	uint32_t len, count;
3537 
3538 	zone_foreach(z) {
3539 		btlog_t log = z->z_btlog;
3540 
3541 		if (log == NULL || btlog_get_type(log) != BTLOG_HASH) {
3542 			continue;
3543 		}
3544 
3545 		count = btlog_guess_top(log, bt, &len);
3546 		if (count == 0) {
3547 			continue;
3548 		}
3549 
3550 		if (!did_header) {
3551 			paniclog_append_noflush("Zone (suspected) leak report:\n");
3552 			did_header = true;
3553 		}
3554 
3555 		paniclog_append_noflush("  Zone:    %s%s\n",
3556 		    zone_heap_name(z), zone_name(z));
3557 		paniclog_append_noflush("  Count:   %d (%ld bytes)\n", count,
3558 		    (long)count * zone_scale_for_percpu(z, zone_elem_inner_size(z)));
3559 		paniclog_append_noflush("  Size:    %ld\n",
3560 		    (long)zone_size_wired(z));
3561 		paniclog_append_noflush("  Top backtrace:\n");
3562 		for (uint32_t i = 0; i < len; i++) {
3563 			if (has_syms) {
3564 				paniclog_append_noflush("    %p ", (void *)bt[i]);
3565 				panic_print_symbol_name(bt[i]);
3566 				paniclog_append_noflush("\n");
3567 			} else {
3568 				paniclog_append_noflush("    %p\n", (void *)bt[i]);
3569 			}
3570 		}
3571 
3572 		kmod_panic_dump(bt, len);
3573 		paniclog_append_noflush("\n");
3574 	}
3575 }
3576 #endif /* CONFIG_ZLEAKS */
3577 
3578 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
3579 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
3580 
3581 #if !KASAN_TBI
3582 __cold
3583 #endif
3584 static void
zalloc_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3585 zalloc_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3586 {
3587 	btlog_t log = zone->z_btlog;
3588 	btref_get_flags_t flags = 0;
3589 	btref_t ref;
3590 
3591 #if !KASAN_TBI
3592 	if (!log || !btlog_sample(log)) {
3593 		return;
3594 	}
3595 #endif
3596 	if (get_preemption_level() || zone_supports_vm(zone)) {
3597 		/*
3598 		 * VM zones can be used by btlog, avoid reentrancy issues.
3599 		 */
3600 		flags = BTREF_GET_NOWAIT;
3601 	}
3602 
3603 	ref = btref_get(fp, flags);
3604 	while (count-- > 0) {
3605 		if (count) {
3606 			btref_retain(ref);
3607 		}
3608 		addr = (vm_offset_t)zstack_tbi_fix(addr);
3609 		btlog_record(log, (void *)addr, ZOP_ALLOC, ref);
3610 		addr += *(vm_offset_t *)addr;
3611 	}
3612 }
3613 
3614 #define ZALLOC_LOG(zone, addr, count)  ({ \
3615 	if ((zone)->z_btlog) {                                                 \
3616 	        zalloc_log(zone, addr, count, __builtin_frame_address(0));     \
3617 	}                                                                      \
3618 })
3619 
3620 #if !KASAN_TBI
3621 __cold
3622 #endif
3623 static void
zfree_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3624 zfree_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3625 {
3626 	btlog_t log = zone->z_btlog;
3627 	btref_get_flags_t flags = 0;
3628 	btref_t ref;
3629 
3630 #if !KASAN_TBI
3631 	if (!log) {
3632 		return;
3633 	}
3634 #endif
3635 
3636 	/*
3637 	 * See if we're doing logging on this zone.
3638 	 *
3639 	 * There are two styles of logging used depending on
3640 	 * whether we're trying to catch a leak or corruption.
3641 	 */
3642 #if !KASAN_TBI
3643 	if (btlog_get_type(log) == BTLOG_HASH) {
3644 		/*
3645 		 * We're logging to catch a leak.
3646 		 *
3647 		 * Remove any record we might have for this element
3648 		 * since it's being freed.  Note that we may not find it
3649 		 * if the buffer overflowed and that's OK.
3650 		 *
3651 		 * Since the log is of a limited size, old records get
3652 		 * overwritten if there are more zallocs than zfrees.
3653 		 */
3654 		while (count-- > 0) {
3655 			addr = (vm_offset_t)zstack_tbi_fix(addr);
3656 			btlog_erase(log, (void *)addr);
3657 			addr += *(vm_offset_t *)addr;
3658 		}
3659 		return;
3660 	}
3661 #endif /* !KASAN_TBI */
3662 
3663 	if (get_preemption_level() || zone_supports_vm(zone)) {
3664 		/*
3665 		 * VM zones can be used by btlog, avoid reentrancy issues.
3666 		 */
3667 		flags = BTREF_GET_NOWAIT;
3668 	}
3669 
3670 	ref = btref_get(fp, flags);
3671 	while (count-- > 0) {
3672 		if (count) {
3673 			btref_retain(ref);
3674 		}
3675 		addr = (vm_offset_t)zstack_tbi_fix(addr);
3676 		btlog_record(log, (void *)addr, ZOP_FREE, ref);
3677 		addr += *(vm_offset_t *)addr;
3678 	}
3679 }
3680 
3681 #define ZFREE_LOG(zone, addr, count)  ({ \
3682 	if ((zone)->z_btlog) {                                                 \
3683 	        zfree_log(zone, addr, count, __builtin_frame_address(0));      \
3684 	}                                                                      \
3685 })
3686 
3687 #else
3688 #define ZALLOC_LOG(...)         ((void)0)
3689 #define ZFREE_LOG(...)          ((void)0)
3690 #endif /* ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI */
3691 #endif /* !ZALLOC_TEST */
3692 #pragma mark zone (re)fill
3693 #if !ZALLOC_TEST
3694 
3695 /*!
3696  * @defgroup Zone Refill
3697  * @{
3698  *
3699  * @brief
3700  * Functions handling The zone refill machinery.
3701  *
3702  * @discussion
3703  * Zones are refilled based on 2 mechanisms: direct expansion, async expansion.
3704  *
3705  * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
3706  * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
3707  *
3708  * - call @c zone_expand_locked() directly if the caller is allowed to block,
3709  *
3710  * - wakeup the asynchroous expansion thread call if the caller is not allowed
3711  *   to block, or if the reserve becomes depleted.
3712  *
3713  *
3714  * <h2>Synchronous expansion</h2>
3715  *
3716  * This mechanism is actually the only one that may refill a zone, and all the
3717  * other ones funnel through this one eventually.
3718  *
3719  * @c zone_expand_locked() implements the core of the expansion mechanism,
3720  * and will do so while a caller specified predicate is true.
3721  *
3722  * Zone expansion allows for up to 2 threads to concurrently refill the zone:
3723  * - one VM privileged thread,
3724  * - one regular thread.
3725  *
3726  * Regular threads that refill will put down their identity in @c z_expander,
3727  * so that priority inversion avoidance can be implemented.
3728  *
3729  * However, VM privileged threads are allowed to use VM page reserves,
3730  * which allows for the system to recover from extreme memory pressure
3731  * situations, allowing for the few allocations that @c zone_gc() or
3732  * killing processes require.
3733  *
3734  * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
3735  * is set. @c z_expander is not necessarily the identity of this VM privileged
3736  * thread (it is if the VM privileged thread came in first, but wouldn't be, and
3737  * could even be @c THREAD_NULL otherwise).
3738  *
3739  * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
3740  * spending a whole pointer on priority inheritance for VM privileged threads
3741  * (and other issues related to having two owners), we use the rwlock boost as
3742  * a stop gap to avoid priority inversions.
3743  *
3744  *
3745  * <h2>Chunk wiring policies</h2>
3746  *
3747  * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
3748  * to try to minimize fragmentation relative to element sizes not aligning with
3749  * a chunk size well.  However, this can grow large and be hard to fulfill on
3750  * a system under a lot of memory pressure (chunks can be as long as 8 pages on
3751  * 4k page systems).
3752  *
3753  * This is why, when under memory pressure the system allows chunks to be
3754  * partially populated. The metadata of the first page in the chunk maintains
3755  * the count of actually populated pages.
3756  *
3757  * The metadata for addresses assigned to a zone are found of 4 queues:
3758  * - @c z_pageq_empty has chunk heads with populated pages and no allocated
3759  *   elements (those can be targeted by @c zone_gc()),
3760  * - @c z_pageq_partial has chunk heads with populated pages that are partially
3761  *   used,
3762  * - @c z_pageq_full has chunk heads with populated pages with no free elements
3763  *   left,
3764  * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
3765  *   the zone forever, or the first secondary metadata for a chunk whose
3766  *   corresponding page is not populated in the chunk.
3767  *
3768  * When new pages need to be wired/populated, chunks from the @c z_pageq_va
3769  * queues are preferred.
3770  *
3771  *
3772  * <h2>Asynchronous expansion</h2>
3773  *
3774  * This mechanism allows for refilling zones used mostly with non blocking
3775  * callers. It relies on a thread call (@c zone_expand_callout) which will
3776  * iterate all zones and refill the ones marked with @c z_async_refilling.
3777  *
3778  * NOTE: If the calling thread for zalloc_noblock is lower priority than
3779  *       the thread_call, then zalloc_noblock to an empty zone may succeed.
3780  *
3781  *
3782  * <h2>Dealing with zone allocations from the mach VM code</h2>
3783  *
3784  * The implementation of the mach VM itself uses the zone allocator
3785  * for things like the vm_map_entry data structure. In order to prevent
3786  * a recursion problem when adding more pages to a zone, the VM zones
3787  * use the Z_SUBMAP_IDX_VM submap which doesn't use kmem_alloc()
3788  * or any VM map functions to allocate.
3789  *
3790  * Instead, a really simple coalescing first-fit allocator is used
3791  * for this submap, and no one else than zalloc can allocate from it.
3792  *
3793  * Memory is directly populated which doesn't require allocation of
3794  * VM map entries, and avoids recursion. The cost of this scheme however,
3795  * is that `vm_map_lookup_entry` will not function on those addresses
3796  * (nor any API relying on it).
3797  */
3798 
3799 static void zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems);
3800 static void zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd);
3801 static thread_call_data_t zone_expand_callout;
3802 
3803 __attribute__((overloadable))
3804 static inline bool
zone_submap_is_sequestered(zone_submap_idx_t idx)3805 zone_submap_is_sequestered(zone_submap_idx_t idx)
3806 {
3807 	return idx != Z_SUBMAP_IDX_DATA;
3808 }
3809 
3810 __attribute__((overloadable))
3811 static inline bool
zone_submap_is_sequestered(zone_security_flags_t zsflags)3812 zone_submap_is_sequestered(zone_security_flags_t zsflags)
3813 {
3814 	return zone_submap_is_sequestered(zsflags.z_submap_idx);
3815 }
3816 
3817 static inline kma_flags_t
zone_kma_flags(zone_t z,zone_security_flags_t zsflags,zalloc_flags_t flags)3818 zone_kma_flags(zone_t z, zone_security_flags_t zsflags, zalloc_flags_t flags)
3819 {
3820 	kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
3821 
3822 	if (zsflags.z_noencrypt) {
3823 		kmaflags |= KMA_NOENCRYPT;
3824 	}
3825 
3826 	if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
3827 		kmaflags |= KMA_DATA;
3828 	} else if (zsflags.z_kheap_id == Z_SUBMAP_IDX_DATA) {
3829 		/*
3830 		 * assume zones which are manually in the data heap,
3831 		 * like mbufs, are going to be shared somehow.
3832 		 */
3833 		kmaflags |= KMA_DATA_SHARED;
3834 	}
3835 
3836 	if (flags & Z_NOPAGEWAIT) {
3837 		kmaflags |= KMA_NOPAGEWAIT;
3838 	}
3839 	if (z->z_permanent || (!z->z_destructible &&
3840 	    zone_submap_is_sequestered(zsflags))) {
3841 		kmaflags |= KMA_PERMANENT;
3842 	}
3843 	if (zsflags.z_submap_from_end) {
3844 		kmaflags |= KMA_LAST_FREE;
3845 	}
3846 
3847 
3848 	return kmaflags;
3849 }
3850 
3851 static inline void
zone_add_wired_pages(zone_t z,uint32_t pages)3852 zone_add_wired_pages(zone_t z, uint32_t pages)
3853 {
3854 	os_atomic_add(&zone_pages_wired, pages, relaxed);
3855 
3856 #if CONFIG_ZLEAKS
3857 	if (__improbable(zleak_should_enable_for_zone(z) &&
3858 	    startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3859 		thread_call_enter(&zone_leaks_callout);
3860 	}
3861 #else
3862 	(void)z;
3863 #endif
3864 }
3865 
3866 static inline void
zone_remove_wired_pages(zone_t z,uint32_t pages)3867 zone_remove_wired_pages(zone_t z, uint32_t pages)
3868 {
3869 	os_atomic_sub(&zone_pages_wired, pages, relaxed);
3870 
3871 #if CONFIG_ZLEAKS
3872 	if (__improbable(zleak_should_disable_for_zone(z) &&
3873 	    startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3874 		thread_call_enter(&zone_leaks_callout);
3875 	}
3876 #else
3877 	(void)z;
3878 #endif
3879 }
3880 
3881 #if ZSECURITY_CONFIG(ZONE_TAGGING)
3882 static inline caddr_t
zone_tag_element(zone_t zone,caddr_t addr,vm_size_t elem_size)3883 zone_tag_element(zone_t zone, caddr_t addr, vm_size_t elem_size)
3884 {
3885 	addr = vm_memtag_generate_and_store_tag(addr, elem_size);
3886 
3887 	if (zone->z_percpu) {
3888 		zpercpu_foreach_cpu(index) {
3889 			vm_memtag_store_tag(addr + ptoa(index), elem_size);
3890 		}
3891 	}
3892 
3893 	return addr;
3894 }
3895 
3896 static inline caddr_t
zone_tag_free_element(zone_t zone,caddr_t addr,vm_size_t elem_size)3897 zone_tag_free_element(zone_t zone, caddr_t addr, vm_size_t elem_size)
3898 {
3899 	if (__improbable((uintptr_t)addr > 0xFF00000000000000ULL)) {
3900 		return addr;
3901 	}
3902 
3903 	return zone_tag_element(zone, addr, elem_size);
3904 }
3905 
3906 static inline void
zcram_memtag_init(zone_t zone,vm_offset_t base,uint32_t start,uint32_t end)3907 zcram_memtag_init(zone_t zone, vm_offset_t base, uint32_t start, uint32_t end)
3908 {
3909 	zone_security_flags_t *zsflags = &zone_security_array[zone_index(zone)];
3910 
3911 	if (!zsflags->z_tag) {
3912 		return;
3913 	}
3914 
3915 	vm_size_t elem_size = zone_elem_outer_size(zone);
3916 	vm_size_t oob_offs = zone_elem_outer_offs(zone);
3917 
3918 	for (uint32_t i = start; i < end; i++) {
3919 		caddr_t elem_addr = (caddr_t)(base + oob_offs + i * elem_size);
3920 
3921 		(void)zone_tag_element(zone, elem_addr, elem_size);
3922 	}
3923 }
3924 #else /* ZSECURITY_CONFIG(ZONE_TAGGING) */
3925 #define zone_tag_free_element(z, a, s)  (a)
3926 #define zcram_memtag_init(z, b, s, e)   do {} while (0)
3927 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
3928 
3929 /*!
3930  * @function zcram_and_lock()
3931  *
3932  * @brief
3933  * Prepare some memory for being usable for allocation purposes.
3934  *
3935  * @discussion
3936  * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
3937  * to be usable in the zone.
3938  *
3939  * This function assumes the metadata is already populated for the range.
3940  *
3941  * Calling this function with @c pg_start being 0 means that the memory
3942  * is either a partial chunk, or a full chunk, that isn't published anywhere
3943  * and the initialization can happen without locks held.
3944  *
3945  * Calling this function with a non zero @c pg_start means that we are extending
3946  * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
3947  * is already usable and published in the zone, so extending it requires holding
3948  * the zone lock.
3949  *
3950  * @param zone          The zone to cram new populated pages into
3951  * @param addr          The base address for the chunk(s)
3952  * @param pg_va_new     The number of virtual pages newly assigned to the zone
3953  * @param pg_start      The first newly populated page relative to @a addr.
3954  * @param pg_end        The after-last newly populated page relative to @a addr.
3955  * @param lock          0 or ZM_ALLOC_SIZE_LOCK (used by early crams)
3956  */
3957 static void
zcram_and_lock(zone_t zone,vm_offset_t addr,uint32_t pg_va_new,uint32_t pg_start,uint32_t pg_end,uint16_t lock)3958 zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
3959     uint32_t pg_start, uint32_t pg_end, uint16_t lock)
3960 {
3961 	zone_id_t zindex = zone_index(zone);
3962 	vm_offset_t elem_size = zone_elem_outer_size(zone);
3963 	uint32_t free_start = 0, free_end = 0;
3964 	uint32_t oob_offs = zone_elem_outer_offs(zone);
3965 
3966 	struct zone_page_metadata *meta = zone_meta_from_addr(addr);
3967 	uint32_t chunk_pages = zone->z_chunk_pages;
3968 	bool guarded = meta->zm_guarded;
3969 
3970 	assert(pg_start < pg_end && pg_end <= chunk_pages);
3971 
3972 	if (pg_start == 0) {
3973 		uint16_t chunk_len = (uint16_t)pg_end;
3974 		uint16_t secondary_len = ZM_SECONDARY_PAGE;
3975 		bool inline_bitmap = false;
3976 
3977 		if (zone->z_percpu) {
3978 			chunk_len = 1;
3979 			secondary_len = ZM_SECONDARY_PCPU_PAGE;
3980 			assert(pg_end == zpercpu_count());
3981 		}
3982 		if (!zone->z_permanent && !zone->z_uses_tags) {
3983 			inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
3984 		}
3985 
3986 		free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size;
3987 
3988 		meta[0] = (struct zone_page_metadata){
3989 			.zm_index         = zindex,
3990 			.zm_guarded       = guarded,
3991 			.zm_inline_bitmap = inline_bitmap,
3992 			.zm_chunk_len     = chunk_len,
3993 			.zm_alloc_size    = lock,
3994 		};
3995 
3996 		if (!zone->z_permanent && !inline_bitmap) {
3997 			meta[0].zm_bitmap = zone_meta_bits_alloc_init(free_end,
3998 			    zone->z_chunk_elems, zone->z_uses_tags);
3999 		}
4000 
4001 		for (uint16_t i = 1; i < chunk_pages; i++) {
4002 			meta[i] = (struct zone_page_metadata){
4003 				.zm_index          = zindex,
4004 				.zm_guarded        = guarded,
4005 				.zm_inline_bitmap  = inline_bitmap,
4006 				.zm_chunk_len      = secondary_len,
4007 				.zm_page_index     = (uint8_t)i,
4008 				.zm_bitmap         = meta[0].zm_bitmap,
4009 				.zm_subchunk_len   = (uint8_t)(chunk_pages - i),
4010 			};
4011 		}
4012 
4013 		if (inline_bitmap) {
4014 			zone_meta_bits_init_inline(meta, free_end);
4015 		}
4016 	} else {
4017 		assert(!zone->z_percpu && !zone->z_permanent);
4018 
4019 		free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size;
4020 		free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size;
4021 	}
4022 
4023 	zcram_memtag_init(zone, addr, free_start, free_end);
4024 
4025 #if KASAN_CLASSIC
4026 	assert(pg_start == 0);         /* KASAN_CLASSIC never does partial chunks */
4027 	if (zone->z_permanent) {
4028 		kasan_poison_range(addr, ptoa(pg_end), ASAN_VALID);
4029 	} else if (zone->z_percpu) {
4030 		for (uint32_t i = 0; i < pg_end; i++) {
4031 			kasan_zmem_add(addr + ptoa(i), PAGE_SIZE,
4032 			    zone_elem_outer_size(zone),
4033 			    zone_elem_outer_offs(zone),
4034 			    zone_elem_redzone(zone));
4035 		}
4036 	} else {
4037 		kasan_zmem_add(addr, ptoa(pg_end),
4038 		    zone_elem_outer_size(zone),
4039 		    zone_elem_outer_offs(zone),
4040 		    zone_elem_redzone(zone));
4041 	}
4042 #endif /* KASAN_CLASSIC */
4043 
4044 	/*
4045 	 * Insert the initialized pages / metadatas into the right lists.
4046 	 */
4047 
4048 	zone_lock(zone);
4049 	assert(zone->z_self == zone);
4050 
4051 	if (pg_start != 0) {
4052 		assert(meta->zm_chunk_len == pg_start);
4053 
4054 		zone_meta_bits_merge(meta, free_start, free_end);
4055 		meta->zm_chunk_len = (uint16_t)pg_end;
4056 
4057 		/*
4058 		 * consume the zone_meta_lock_in_partial()
4059 		 * done in zone_expand_locked()
4060 		 */
4061 		zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
4062 		zone_meta_remqueue(zone, meta);
4063 	}
4064 
4065 	if (zone->z_permanent || meta->zm_alloc_size) {
4066 		zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
4067 	} else {
4068 		zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
4069 		zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
4070 	}
4071 	if (pg_end < chunk_pages) {
4072 		/* push any non populated residual VA on z_pageq_va */
4073 		zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
4074 	}
4075 
4076 	zone->z_elems_free  += free_end - free_start;
4077 	zone->z_elems_avail += free_end - free_start;
4078 	zone->z_wired_cur   += zone->z_percpu ? 1 : pg_end - pg_start;
4079 	if (pg_va_new) {
4080 		zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
4081 	}
4082 	if (zone->z_wired_hwm < zone->z_wired_cur) {
4083 		zone->z_wired_hwm = zone->z_wired_cur;
4084 	}
4085 
4086 #if CONFIG_ZLEAKS
4087 	if (__improbable(zleak_should_enable_for_zone(zone) &&
4088 	    startup_phase >= STARTUP_SUB_THREAD_CALL)) {
4089 		thread_call_enter(&zone_leaks_callout);
4090 	}
4091 #endif /* CONFIG_ZLEAKS */
4092 
4093 	zone_add_wired_pages(zone, pg_end - pg_start);
4094 }
4095 
4096 static void
zcram(zone_t zone,vm_offset_t addr,uint32_t pages,uint16_t lock)4097 zcram(zone_t zone, vm_offset_t addr, uint32_t pages, uint16_t lock)
4098 {
4099 	uint32_t chunk_pages = zone->z_chunk_pages;
4100 
4101 	assert(pages % chunk_pages == 0);
4102 	for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
4103 		zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, lock);
4104 		zone_unlock(zone);
4105 	}
4106 }
4107 
4108 __startup_func
4109 void
zone_cram_early(zone_t zone,vm_offset_t newmem,vm_size_t size)4110 zone_cram_early(zone_t zone, vm_offset_t newmem, vm_size_t size)
4111 {
4112 	uint32_t pages = (uint32_t)atop(size);
4113 
4114 	assert(from_zone_map(newmem, size));
4115 	assert3u(size % ptoa(zone->z_chunk_pages), ==, 0);
4116 	assert3u(startup_phase, <, STARTUP_SUB_ZALLOC);
4117 
4118 	/*
4119 	 * The early pages we move at the pmap layer can't be "depopulated"
4120 	 * because there's no vm_page_t for them.
4121 	 *
4122 	 * "Lock" them so that they never hit z_pageq_empty.
4123 	 */
4124 	vm_memtag_bzero_unchecked((void *)newmem, size);
4125 	zcram(zone, newmem, pages, ZM_ALLOC_SIZE_LOCK);
4126 }
4127 
4128 /*!
4129  * @function zone_submap_alloc_sequestered_va
4130  *
4131  * @brief
4132  * Allocates VA without using vm_find_space().
4133  *
4134  * @discussion
4135  * Allocate VA quickly without using the slower vm_find_space() for cases
4136  * when the submaps are fully sequestered.
4137  *
4138  * The VM submap is used to implement the VM itself so it is always sequestered,
4139  * as it can't kmem_alloc which needs to always allocate vm entries.
4140  * However, it can use vm_map_enter() which tries to coalesce entries, which
4141  * always works, so the VM map only ever needs 2 entries (one for each end).
4142  *
4143  * The RO submap is similarly always sequestered if it exists (as a non
4144  * sequestered RO submap makes very little sense).
4145  *
4146  * The allocator is a very simple bump-allocator
4147  * that allocates from either end.
4148  */
4149 static kern_return_t
zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags,uint32_t pages,vm_offset_t * addrp)4150 zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags, uint32_t pages,
4151     vm_offset_t *addrp)
4152 {
4153 	vm_size_t size = ptoa(pages);
4154 	vm_map_t map = zone_submap(zsflags);
4155 	vm_map_entry_t first, last;
4156 	vm_map_offset_t addr;
4157 
4158 	vm_map_lock(map);
4159 
4160 	first = vm_map_first_entry(map);
4161 	last = vm_map_last_entry(map);
4162 
4163 	if (first->vme_end + size > last->vme_start) {
4164 		vm_map_unlock(map);
4165 		return KERN_NO_SPACE;
4166 	}
4167 
4168 	if (zsflags.z_submap_from_end) {
4169 		last->vme_start -= size;
4170 		addr = last->vme_start;
4171 		VME_OFFSET_SET(last, addr);
4172 	} else {
4173 		addr = first->vme_end;
4174 		first->vme_end += size;
4175 	}
4176 	map->size += size;
4177 
4178 	vm_map_unlock(map);
4179 
4180 	*addrp = addr;
4181 	return KERN_SUCCESS;
4182 }
4183 
4184 void
zone_fill_initially(zone_t zone,vm_size_t nelems)4185 zone_fill_initially(zone_t zone, vm_size_t nelems)
4186 {
4187 	kma_flags_t kmaflags = KMA_NOFAIL | KMA_PERMANENT;
4188 	kern_return_t kr;
4189 	vm_offset_t addr;
4190 	uint32_t pages;
4191 	zone_security_flags_t zsflags = zone_security_config(zone);
4192 
4193 	assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
4194 	assert(zone->z_elems_avail == 0);
4195 
4196 	kmaflags |= zone_kma_flags(zone, zsflags, Z_WAITOK);
4197 	pages = zone_alloc_pages_for_nelems(zone, nelems);
4198 	if (zone_submap_is_sequestered(zsflags)) {
4199 		kr = zone_submap_alloc_sequestered_va(zsflags, pages, &addr);
4200 		if (kr != KERN_SUCCESS) {
4201 			panic("zone_submap_alloc_sequestered_va() "
4202 			    "of %u pages failed", pages);
4203 		}
4204 		kernel_memory_populate(addr, ptoa(pages),
4205 		    kmaflags, VM_KERN_MEMORY_ZONE);
4206 	} else {
4207 		assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4208 		kmem_alloc(zone_submap(zsflags), &addr, ptoa(pages),
4209 		    kmaflags, VM_KERN_MEMORY_ZONE);
4210 	}
4211 
4212 	zone_meta_populate(addr, ptoa(pages));
4213 	zcram(zone, addr, pages, 0);
4214 }
4215 
4216 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4217 __attribute__((noinline))
4218 static void
zone_scramble_va_and_unlock(zone_t z,struct zone_page_metadata * meta,uint32_t runs,uint32_t pages,uint32_t chunk_pages,uint64_t guard_mask)4219 zone_scramble_va_and_unlock(
4220 	zone_t                      z,
4221 	struct zone_page_metadata  *meta,
4222 	uint32_t                    runs,
4223 	uint32_t                    pages,
4224 	uint32_t                    chunk_pages,
4225 	uint64_t                    guard_mask)
4226 {
4227 	struct zone_page_metadata *arr[ZONE_MAX_CHUNK_ALLOC_NUM];
4228 
4229 	for (uint32_t run = 0, n = 0; run < runs; run++) {
4230 		arr[run] = meta + n;
4231 		n += chunk_pages + ((guard_mask >> run) & 1) * chunk_pages;
4232 	}
4233 
4234 	/*
4235 	 * Fisher–Yates shuffle, for an array with indices [0, n)
4236 	 *
4237 	 * for i from n−1 downto 1 do
4238 	 *     j ← random integer such that 0 ≤ j ≤ i
4239 	 *     exchange a[j] and a[i]
4240 	 *
4241 	 * The point here is that early allocations aren't at a fixed
4242 	 * distance from each other.
4243 	 */
4244 	for (uint32_t i = runs - 1; i > 0; i--) {
4245 		uint32_t j = zalloc_random_uniform32(0, i + 1);
4246 
4247 		meta   = arr[j];
4248 		arr[j] = arr[i];
4249 		arr[i] = meta;
4250 	}
4251 
4252 	zone_lock(z);
4253 
4254 	for (uint32_t i = 0; i < runs; i++) {
4255 		zone_meta_queue_push(z, &z->z_pageq_va, arr[i]);
4256 	}
4257 	z->z_va_cur += z->z_percpu ? runs : pages;
4258 }
4259 
4260 static inline uint32_t
dist_u32(uint32_t a,uint32_t b)4261 dist_u32(uint32_t a, uint32_t b)
4262 {
4263 	return a < b ? b - a : a - b;
4264 }
4265 
4266 static uint64_t
zalloc_random_clear_n_bits(uint64_t mask,uint32_t pop,uint32_t n)4267 zalloc_random_clear_n_bits(uint64_t mask, uint32_t pop, uint32_t n)
4268 {
4269 	for (; n-- > 0; pop--) {
4270 		uint32_t bit = zalloc_random_uniform32(0, pop);
4271 		uint64_t m = mask;
4272 
4273 		for (; bit; bit--) {
4274 			m &= m - 1;
4275 		}
4276 
4277 		mask ^= 1ull << __builtin_ctzll(m);
4278 	}
4279 
4280 	return mask;
4281 }
4282 
4283 /**
4284  * @function zalloc_random_bits
4285  *
4286  * @brief
4287  * Compute a random number with a specified number of bit set in a given width.
4288  *
4289  * @discussion
4290  * This function generates a "uniform" distribution of sets of bits set in
4291  * a given width, with typically less than width/4 calls to random.
4292  *
4293  * @param pop           the target number of bits set.
4294  * @param width         the number of bits in the random integer to generate.
4295  */
4296 static uint64_t
zalloc_random_bits(uint32_t pop,uint32_t width)4297 zalloc_random_bits(uint32_t pop, uint32_t width)
4298 {
4299 	uint64_t w_mask = (1ull << width) - 1;
4300 	uint64_t mask;
4301 	uint32_t cur;
4302 
4303 	if (3 * width / 4 <= pop) {
4304 		mask = w_mask;
4305 		cur  = width;
4306 	} else if (pop <= width / 4) {
4307 		mask = 0;
4308 		cur  = 0;
4309 	} else {
4310 		/*
4311 		 * Chosing a random number this way will overwhelmingly
4312 		 * contain `width` bits +/- a few.
4313 		 */
4314 		mask = zalloc_random_mask64(width);
4315 		cur  = __builtin_popcountll(mask);
4316 
4317 		if (dist_u32(cur, pop) > dist_u32(width - cur, pop)) {
4318 			/*
4319 			 * If the opposite mask has a closer popcount,
4320 			 * then start with that one as the seed.
4321 			 */
4322 			cur = width - cur;
4323 			mask ^= w_mask;
4324 		}
4325 	}
4326 
4327 	if (cur < pop) {
4328 		/*
4329 		 * Setting `pop - cur` bits is really clearing that many from
4330 		 * the opposite mask.
4331 		 */
4332 		mask ^= w_mask;
4333 		mask = zalloc_random_clear_n_bits(mask, width - cur, pop - cur);
4334 		mask ^= w_mask;
4335 	} else if (pop < cur) {
4336 		mask = zalloc_random_clear_n_bits(mask, cur, cur - pop);
4337 	}
4338 
4339 	return mask;
4340 }
4341 #endif
4342 
4343 static void
zone_allocate_va_locked(zone_t z,zalloc_flags_t flags)4344 zone_allocate_va_locked(zone_t z, zalloc_flags_t flags)
4345 {
4346 	zone_security_flags_t zsflags = zone_security_config(z);
4347 	struct zone_page_metadata *meta;
4348 	kma_flags_t kmaflags = zone_kma_flags(z, zsflags, flags) | KMA_VAONLY;
4349 	uint32_t chunk_pages = z->z_chunk_pages;
4350 	uint32_t runs, pages, guards, guard_pages, rnum;
4351 	uint64_t guard_mask = 0;
4352 	bool     lead_guard = false;
4353 	zone_id_t zidx = zone_index(z);
4354 	kern_return_t kr;
4355 	vm_offset_t addr;
4356 
4357 	zone_unlock(z);
4358 
4359 	/*
4360 	 * A lot of OOB exploitation techniques rely on precise placement
4361 	 * and interleaving of zone pages. The layout that is sought
4362 	 * by attackers will be C/P/T types, where:
4363 	 * - (C)ompromised is the type for which attackers have a bug,
4364 	 * - (P)adding is used to pad memory,
4365 	 * - (T)arget is the type that the attacker will attempt to corrupt
4366 	 *   by exploiting (C).
4367 	 *
4368 	 * Note that in some cases C==T and P isn't needed.
4369 	 *
4370 	 * In order to make those placement games much harder,
4371 	 * we grow zones by random runs of memory, up to 10 chunks.
4372 	 * This makes predicting the precise layout of the heap
4373 	 * quite more complicated.
4374 	 *
4375 	 * Note: this function makes a very heavy use of random,
4376 	 *       however, it is mostly limited to sequestered zones,
4377 	 *       and eventually the layout will be fixed,
4378 	 *       and the usage of random vastly reduced.
4379 	 *
4380 	 *       For non sequestered zones, there's a single call
4381 	 *       to random in order to decide whether we want
4382 	 *       a guard page or not.
4383 	 */
4384 	pages  = chunk_pages;
4385 	guards = 0;
4386 	runs   = 1;
4387 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4388 	if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4389 		runs  = ZONE_MAX_CHUNK_ALLOC_NUM;
4390 		runs  = zalloc_random_uniform32(1, runs + 1);
4391 		pages = runs * chunk_pages;
4392 	}
4393 	static_assert(ZONE_MAX_CHUNK_ALLOC_NUM <= 10,
4394 	    "make sure that `runs` will never exceed 10");
4395 #endif /* !ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4396 
4397 	/*
4398 	 * Zones that are suceptible to OOB (kalloc, ZC_PGZ_USE_GUARDS),
4399 	 * guards might be added after each chunk.
4400 	 *
4401 	 * Those guard pages are marked with the ZM_PGZ_GUARD
4402 	 * magical chunk len, and their zm_oob_offs field
4403 	 * is used to remember optional shift applied
4404 	 * to returned elements, in order to right-align-them
4405 	 * as much as possible.
4406 	 *
4407 	 * In an adversarial context, while guard pages
4408 	 * are extremely effective against linear overflow,
4409 	 * using a predictable frequency of guard pages feels like
4410 	 * a missed opportunity. Which is why we choose to insert
4411 	 * one guard region (chunk_pages guard pages) with 25% probability,
4412 	 * with a goal of having ~20% of the VA allocated consist of guard pages.
4413 	 */
4414 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4415 	if (!z->z_percpu) {
4416 		/*
4417 		 * Don't bother with adding guard regions for per-CPU zones, as
4418 		 * they're not interesting to attackers.
4419 		 */
4420 		for (uint32_t run = 0; run < runs; run++) {
4421 			rnum = zalloc_random_uniform32(0, 4 * 128);
4422 			guards += (rnum < 128);
4423 		}
4424 	}
4425 	assert3u(guards, <=, runs);
4426 
4427 	guard_mask = 0;
4428 
4429 	if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4430 		/*
4431 		 * Several exploitation strategies rely on a C/T (compromised
4432 		 * then target types) ordering of pages with a sub-page reach
4433 		 * from C into T.
4434 		 *
4435 		 * We want to reliably thwart such exploitations
4436 		 * and hence force a guard page between alternating
4437 		 * memory types.
4438 		 *
4439 		 * Note: this counts towards the number of guard pages we want.
4440 		 */
4441 		guard_mask |= 1ull << (runs - 1);
4442 
4443 		if (guards > 1) {
4444 			guard_mask |= zalloc_random_bits(guards - 1, runs - 1);
4445 		} else {
4446 			guards = 1;
4447 		}
4448 
4449 		/*
4450 		 * While we randomize the chunks lengths, an attacker with
4451 		 * precise timing control can guess when overflows happen,
4452 		 * and "measure" the runs, which gives them an indication
4453 		 * of where the next run start offset is.
4454 		 *
4455 		 * In order to make this knowledge unusable, add a guard page
4456 		 * _before_ the new run with a 25% probability, regardless
4457 		 * of whether we had enough guard pages.
4458 		 */
4459 		if ((rnum & 3) == 0) {
4460 			lead_guard = true;
4461 			guards++;
4462 		}
4463 	} else {
4464 		assert3u(runs, ==, 1);
4465 		assert3u(guards, <=, 1);
4466 		guard_mask = guards << (runs - 1);
4467 	}
4468 #else
4469 	(void)rnum;
4470 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4471 
4472 	/* We want guards to be at least the size of the chunk. */
4473 	guard_pages = guards * chunk_pages;
4474 	if (zone_submap_is_sequestered(zsflags)) {
4475 		kr = zone_submap_alloc_sequestered_va(zsflags,
4476 		    pages + guard_pages, &addr);
4477 	} else {
4478 		assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4479 		kr = kmem_alloc(zone_submap(zsflags), &addr,
4480 		    ptoa(pages + guard_pages), kmaflags, VM_KERN_MEMORY_ZONE);
4481 	}
4482 
4483 	if (kr != KERN_SUCCESS) {
4484 		uint64_t zone_size = 0;
4485 		zone_t zone_largest = zone_find_largest(&zone_size);
4486 		panic("zalloc[%d]: zone map exhausted while allocating from zone [%s%s], "
4487 		    "likely due to memory leak in zone [%s%s] "
4488 		    "(%u%c, %d elements allocated)",
4489 		    kr, zone_heap_name(z), zone_name(z),
4490 		    zone_heap_name(zone_largest), zone_name(zone_largest),
4491 		    mach_vm_size_pretty(zone_size),
4492 		    mach_vm_size_unit(zone_size),
4493 		    zone_count_allocated(zone_largest));
4494 	}
4495 
4496 	meta = zone_meta_from_addr(addr);
4497 	zone_meta_populate(addr, ptoa(pages + guard_pages));
4498 
4499 	/*
4500 	 * Handle the leading guard page, if any
4501 	 */
4502 	if (lead_guard) {
4503 		for (uint32_t i = 0; i < chunk_pages; i++) {
4504 			meta[i].zm_index = zidx;
4505 			meta[i].zm_chunk_len = ZM_PGZ_GUARD;
4506 			meta[i].zm_guarded = true;
4507 			meta++;
4508 		}
4509 	}
4510 
4511 	for (uint32_t run = 0, n = 0; run < runs; run++) {
4512 		bool guarded = (guard_mask >> run) & 1;
4513 
4514 		for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4515 			meta[n].zm_index = zidx;
4516 			meta[n].zm_guarded = guarded;
4517 		}
4518 		if (guarded) {
4519 			for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4520 				meta[n].zm_index = zidx;
4521 				meta[n].zm_chunk_len = ZM_PGZ_GUARD;
4522 			}
4523 		}
4524 	}
4525 	if (guards) {
4526 		os_atomic_add(&zone_guard_pages, guard_pages, relaxed);
4527 	}
4528 
4529 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4530 	if (__improbable(zone_caching_disabled < 0)) {
4531 		return zone_scramble_va_and_unlock(z, meta, runs, pages,
4532 		           chunk_pages, guard_mask);
4533 	}
4534 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4535 
4536 	zone_lock(z);
4537 
4538 	for (uint32_t run = 0, n = 0; run < runs; run++) {
4539 		zone_meta_queue_push(z, &z->z_pageq_va, meta + n);
4540 		n += chunk_pages + ((guard_mask >> run) & 1) * chunk_pages;
4541 	}
4542 	z->z_va_cur += z->z_percpu ? runs : pages;
4543 }
4544 
4545 static inline void
ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)4546 ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
4547 {
4548 #if DEBUG || DEVELOPMENT
4549 	VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_START,
4550 	    size, 0, 0, 0);
4551 #else
4552 	(void)size;
4553 #endif
4554 }
4555 
4556 static inline void
ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)4557 ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
4558 {
4559 	task_t task = current_task_early();
4560 	if (pages && task) {
4561 		counter_add(&task->pages_grabbed_kern, pages);
4562 	}
4563 	VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_END,
4564 	    pages, 0, 0, 0);
4565 }
4566 
4567 __attribute__((noinline))
4568 static void
__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z,uint32_t pgs)4569 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z, uint32_t pgs)
4570 {
4571 	uint64_t wait_start = 0;
4572 	long mapped;
4573 
4574 	sched_cond_signal(&vm_pageout_gc_cond, vm_pageout_gc_thread);
4575 
4576 	if (zone_supports_vm(z) || (current_thread()->options & TH_OPT_VMPRIV)) {
4577 		return;
4578 	}
4579 
4580 	mapped = os_atomic_load(&zone_pages_wired, relaxed);
4581 
4582 	/*
4583 	 * If the zone map is really exhausted, wait on the GC thread,
4584 	 * donating our priority (which is important because the GC
4585 	 * thread is at a rather low priority).
4586 	 */
4587 	for (uint32_t n = 1; mapped >= zone_pages_wired_max - pgs; n++) {
4588 		uint32_t wait_ms = n * (n + 1) / 2;
4589 		uint64_t interval;
4590 
4591 		if (n == 1) {
4592 			wait_start = mach_absolute_time();
4593 		} else {
4594 			sched_cond_signal(&vm_pageout_gc_cond, vm_pageout_gc_thread);
4595 		}
4596 		if (zone_exhausted_timeout > 0 &&
4597 		    wait_ms > zone_exhausted_timeout) {
4598 			panic("zone map exhaustion: waited for %dms "
4599 			    "(pages: %ld, max: %ld, wanted: %d)",
4600 			    wait_ms, mapped, zone_pages_wired_max, pgs);
4601 		}
4602 
4603 		clock_interval_to_absolutetime_interval(wait_ms, NSEC_PER_MSEC,
4604 		    &interval);
4605 
4606 		lck_spin_lock(&zone_exhausted_lock);
4607 		lck_spin_sleep_with_inheritor(&zone_exhausted_lock,
4608 		    LCK_SLEEP_UNLOCK, &zone_pages_wired,
4609 		    vm_pageout_gc_thread, THREAD_UNINT, wait_start + interval);
4610 
4611 		mapped = os_atomic_load(&zone_pages_wired, relaxed);
4612 	}
4613 }
4614 
4615 static bool
zone_expand_wait_for_pages(bool waited)4616 zone_expand_wait_for_pages(bool waited)
4617 {
4618 	if (waited) {
4619 		return false;
4620 	}
4621 #if DEBUG || DEVELOPMENT
4622 	if (zalloc_simulate_vm_pressure) {
4623 		return false;
4624 	}
4625 #endif /* DEBUG || DEVELOPMENT */
4626 	return !vm_pool_low();
4627 }
4628 
4629 static inline void
zone_expand_async_schedule_if_allowed(zone_t zone)4630 zone_expand_async_schedule_if_allowed(zone_t zone)
4631 {
4632 	if (zone->z_async_refilling || zone->no_callout) {
4633 		return;
4634 	}
4635 
4636 	if (zone_exhausted(zone)) {
4637 		return;
4638 	}
4639 
4640 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
4641 		return;
4642 	}
4643 
4644 	if (!vm_pool_low() || zone_supports_vm(zone)) {
4645 		zone->z_async_refilling = true;
4646 		thread_call_enter(&zone_expand_callout);
4647 	}
4648 }
4649 
4650 __attribute__((noinline))
4651 static bool
zalloc_expand_drain_exhausted_caches_locked(zone_t z)4652 zalloc_expand_drain_exhausted_caches_locked(zone_t z)
4653 {
4654 	struct zone_depot zd;
4655 	zone_magazine_t mag = NULL;
4656 
4657 	if (z->z_depot_size) {
4658 		z->z_depot_size = 0;
4659 		z->z_depot_cleanup = true;
4660 
4661 		zone_depot_init(&zd);
4662 		zone_depot_trim(z, 0, &zd);
4663 
4664 		zone_recirc_lock_nopreempt(z);
4665 		if (zd.zd_full) {
4666 			zone_depot_move_full(&z->z_recirc,
4667 			    &zd, zd.zd_full, NULL);
4668 		}
4669 		if (zd.zd_empty) {
4670 			zone_depot_move_empty(&z->z_recirc,
4671 			    &zd, zd.zd_empty, NULL);
4672 		}
4673 		zone_recirc_unlock_nopreempt(z);
4674 	}
4675 
4676 	zone_recirc_lock_nopreempt(z);
4677 	if (z->z_recirc.zd_full) {
4678 		mag = zone_depot_pop_head_full(&z->z_recirc, z);
4679 	}
4680 	zone_recirc_unlock_nopreempt(z);
4681 
4682 	if (mag) {
4683 		zone_reclaim_elements(z, zc_mag_size(), mag->zm_elems);
4684 		zone_magazine_free(mag);
4685 	}
4686 
4687 	return mag != NULL;
4688 }
4689 
4690 static bool
zalloc_needs_refill(zone_t zone,zalloc_flags_t flags)4691 zalloc_needs_refill(zone_t zone, zalloc_flags_t flags)
4692 {
4693 	if (zone->z_elems_free > zone->z_elems_rsv) {
4694 		return false;
4695 	}
4696 	if (!zone_exhausted(zone)) {
4697 		return true;
4698 	}
4699 	if (zone->z_pcpu_cache && zone->z_depot_size) {
4700 		if (zalloc_expand_drain_exhausted_caches_locked(zone)) {
4701 			return false;
4702 		}
4703 	}
4704 	return (flags & Z_NOFAIL) != 0;
4705 }
4706 
4707 static void
zone_wakeup_exhausted_waiters(zone_t z)4708 zone_wakeup_exhausted_waiters(zone_t z)
4709 {
4710 	z->z_exhausted_wait = false;
4711 	EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z, false);
4712 	thread_wakeup(&z->z_expander);
4713 }
4714 
4715 __attribute__((noinline))
4716 static void
__ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)4717 __ZONE_EXHAUSTED_AND_WAITING_HARD__(zone_t z)
4718 {
4719 	if (z->z_pcpu_cache && z->z_depot_size &&
4720 	    zalloc_expand_drain_exhausted_caches_locked(z)) {
4721 		return;
4722 	}
4723 
4724 	if (!z->z_exhausted_wait) {
4725 		zone_recirc_lock_nopreempt(z);
4726 		z->z_exhausted_wait = true;
4727 		zone_recirc_unlock_nopreempt(z);
4728 		EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z, true);
4729 	}
4730 
4731 	assert_wait(&z->z_expander, TH_UNINT);
4732 	zone_unlock(z);
4733 	thread_block(THREAD_CONTINUE_NULL);
4734 	zone_lock(z);
4735 }
4736 
4737 static pmap_mapping_type_t
zone_mapping_type(zone_t z)4738 zone_mapping_type(zone_t z)
4739 {
4740 	zone_security_flags_t zsflags = zone_security_config(z);
4741 
4742 	/*
4743 	 * If the zone has z_submap_idx is not Z_SUBMAP_IDX_DATA or
4744 	 * Z_SUBMAP_IDX_READ_ONLY, mark the corresponding mapping
4745 	 * type as PMAP_MAPPING_TYPE_RESTRICTED.
4746 	 */
4747 	switch (zsflags.z_submap_idx) {
4748 	case Z_SUBMAP_IDX_DATA:
4749 		return PMAP_MAPPING_TYPE_DEFAULT;
4750 	case Z_SUBMAP_IDX_READ_ONLY:
4751 		return PMAP_MAPPING_TYPE_ROZONE;
4752 	default:
4753 		return PMAP_MAPPING_TYPE_RESTRICTED;
4754 	}
4755 }
4756 
4757 static vm_prot_t
zone_page_prot(zone_security_flags_t zsflags)4758 zone_page_prot(zone_security_flags_t zsflags)
4759 {
4760 	switch (zsflags.z_submap_idx) {
4761 	case Z_SUBMAP_IDX_READ_ONLY:
4762 		return VM_PROT_READ;
4763 	default:
4764 		return VM_PROT_READ | VM_PROT_WRITE;
4765 	}
4766 }
4767 
4768 static void
zone_expand_locked(zone_t z,zalloc_flags_t flags)4769 zone_expand_locked(zone_t z, zalloc_flags_t flags)
4770 {
4771 	zone_security_flags_t zsflags = zone_security_config(z);
4772 	struct zone_expand ze = {
4773 		.ze_thread  = current_thread(),
4774 	};
4775 
4776 	if (!(ze.ze_thread->options & TH_OPT_VMPRIV) && zone_supports_vm(z)) {
4777 		ze.ze_thread->options |= TH_OPT_VMPRIV;
4778 		ze.ze_clear_priv = true;
4779 	}
4780 
4781 	if (ze.ze_thread->options & TH_OPT_VMPRIV) {
4782 		/*
4783 		 * When the thread is VM privileged,
4784 		 * vm_page_grab() will call VM_PAGE_WAIT()
4785 		 * without our knowledge, so we must assume
4786 		 * it's being called unfortunately.
4787 		 *
4788 		 * In practice it's not a big deal because
4789 		 * Z_NOPAGEWAIT is not really used on zones
4790 		 * that VM privileged threads are going to expand.
4791 		 */
4792 		ze.ze_pg_wait = true;
4793 		ze.ze_vm_priv = true;
4794 	}
4795 
4796 	for (;;) {
4797 		if (!z->z_permanent && !zalloc_needs_refill(z, flags)) {
4798 			goto out;
4799 		}
4800 
4801 		if (z->z_expander == NULL) {
4802 			z->z_expander = &ze;
4803 			break;
4804 		}
4805 
4806 		if (ze.ze_vm_priv && !z->z_expander->ze_vm_priv) {
4807 			change_sleep_inheritor(&z->z_expander, ze.ze_thread);
4808 			ze.ze_next = z->z_expander;
4809 			z->z_expander = &ze;
4810 			break;
4811 		}
4812 
4813 		if ((flags & Z_NOPAGEWAIT) && z->z_expander->ze_pg_wait) {
4814 			goto out;
4815 		}
4816 
4817 		z->z_expanding_wait = true;
4818 		hw_lck_ticket_sleep_with_inheritor(&z->z_lock, &zone_locks_grp,
4819 		    LCK_SLEEP_DEFAULT, &z->z_expander, z->z_expander->ze_thread,
4820 		    TH_UNINT, TIMEOUT_WAIT_FOREVER);
4821 	}
4822 
4823 	do {
4824 		struct zone_page_metadata *meta = NULL;
4825 		uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
4826 		vm_page_t page_list = NULL;
4827 		vm_offset_t addr = 0;
4828 		int waited = 0;
4829 
4830 		if ((flags & Z_NOFAIL) && zone_exhausted(z)) {
4831 			__ZONE_EXHAUSTED_AND_WAITING_HARD__(z);
4832 			continue;         /* reevaluate if we really need it */
4833 		}
4834 
4835 		/*
4836 		 * While we hold the zone lock, look if there's VA we can:
4837 		 * - complete from partial pages,
4838 		 * - reuse from the sequester list.
4839 		 *
4840 		 * When the page is being populated we pretend we allocated
4841 		 * an extra element so that zone_gc() can't attempt to free
4842 		 * the chunk (as it could become empty while we wait for pages).
4843 		 */
4844 		if (zone_pva_is_null(z->z_pageq_va)) {
4845 			zone_allocate_va_locked(z, flags);
4846 		}
4847 
4848 		meta = zone_meta_queue_pop(z, &z->z_pageq_va);
4849 		addr = zone_meta_to_addr(meta);
4850 		if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
4851 			cur_pages = meta->zm_page_index;
4852 			meta -= cur_pages;
4853 			addr -= ptoa(cur_pages);
4854 			zone_meta_lock_in_partial(z, meta, cur_pages);
4855 		}
4856 		zone_unlock(z);
4857 
4858 		/*
4859 		 * And now allocate pages to populate our VA.
4860 		 */
4861 		min_pages = z->z_chunk_pages;
4862 #if !KASAN_CLASSIC
4863 		if (!z->z_percpu) {
4864 			min_pages = (uint32_t)atop(round_page(zone_elem_outer_offs(z) +
4865 			    zone_elem_outer_size(z)));
4866 		}
4867 #endif /* !KASAN_CLASSIC */
4868 
4869 		/*
4870 		 * Trigger jetsams via VM_pageout GC
4871 		 * if we're running out of zone memory
4872 		 */
4873 		if (__improbable(zone_map_nearing_exhaustion())) {
4874 			__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(z, min_pages);
4875 		}
4876 
4877 		ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
4878 
4879 		while (pages < z->z_chunk_pages - cur_pages) {
4880 			uint_t grab_options = VM_PAGE_GRAB_OPTIONS_NONE;
4881 			vm_page_t m = vm_page_grab_options(grab_options);
4882 
4883 			if (m) {
4884 				pages++;
4885 				m->vmp_snext = page_list;
4886 				page_list = m;
4887 				vm_page_zero_fill(
4888 					m
4889 					);
4890 				continue;
4891 			}
4892 
4893 			if (pages >= min_pages &&
4894 			    !zone_expand_wait_for_pages(waited)) {
4895 				break;
4896 			}
4897 
4898 			if ((flags & Z_NOPAGEWAIT) == 0) {
4899 				/*
4900 				 * The first time we're about to wait for pages,
4901 				 * mention that to waiters and wake them all.
4902 				 *
4903 				 * Set `ze_pg_wait` in our zone_expand context
4904 				 * so that waiters who care do not wait again.
4905 				 */
4906 				if (!ze.ze_pg_wait) {
4907 					zone_lock(z);
4908 					if (z->z_expanding_wait) {
4909 						z->z_expanding_wait = false;
4910 						wakeup_all_with_inheritor(&z->z_expander,
4911 						    THREAD_AWAKENED);
4912 					}
4913 					ze.ze_pg_wait = true;
4914 					zone_unlock(z);
4915 				}
4916 
4917 				waited++;
4918 				VM_PAGE_WAIT();
4919 				continue;
4920 			}
4921 
4922 			/*
4923 			 * Undo everything and bail out:
4924 			 *
4925 			 * - free pages
4926 			 * - undo the fake allocation if any
4927 			 * - put the VA back on the VA page queue.
4928 			 */
4929 			vm_page_free_list(page_list, FALSE);
4930 			ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4931 
4932 			zone_lock(z);
4933 
4934 			zone_expand_async_schedule_if_allowed(z);
4935 
4936 			if (cur_pages) {
4937 				zone_meta_unlock_from_partial(z, meta, cur_pages);
4938 			}
4939 			if (meta) {
4940 				zone_meta_queue_push(z, &z->z_pageq_va,
4941 				    meta + cur_pages);
4942 			}
4943 			goto page_shortage;
4944 		}
4945 		vm_object_t object;
4946 		object = kernel_object_default;
4947 		vm_object_lock(object);
4948 
4949 		kernel_memory_populate_object_and_unlock(object,
4950 		    addr + ptoa(cur_pages), addr + ptoa(cur_pages), ptoa(pages), page_list,
4951 		    zone_kma_flags(z, zsflags, flags), VM_KERN_MEMORY_ZONE,
4952 		    zone_page_prot(zsflags), zone_mapping_type(z));
4953 
4954 		ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4955 
4956 		zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages, 0);
4957 
4958 		/*
4959 		 * permanent zones only try once,
4960 		 * the retry loop is in the caller
4961 		 */
4962 	} while (!z->z_permanent && zalloc_needs_refill(z, flags));
4963 
4964 page_shortage:
4965 	if (z->z_expander == &ze) {
4966 		z->z_expander = ze.ze_next;
4967 	} else {
4968 		assert(z->z_expander->ze_next == &ze);
4969 		z->z_expander->ze_next = NULL;
4970 	}
4971 	if (z->z_expanding_wait) {
4972 		z->z_expanding_wait = false;
4973 		wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
4974 	}
4975 out:
4976 	if (ze.ze_clear_priv) {
4977 		ze.ze_thread->options &= ~TH_OPT_VMPRIV;
4978 	}
4979 }
4980 
4981 static void
zone_expand_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)4982 zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
4983 {
4984 	zone_foreach(z) {
4985 		if (z->no_callout) {
4986 			/* z_async_refilling will never be set */
4987 			continue;
4988 		}
4989 
4990 		if (!z->z_async_refilling) {
4991 			/*
4992 			 * avoid locking all zones, because the one(s)
4993 			 * we're looking for have been set _before_
4994 			 * thread_call_enter() was called, if we fail
4995 			 * to observe the bit, it means the thread-call
4996 			 * has been "dinged" again and we'll notice it then.
4997 			 */
4998 			continue;
4999 		}
5000 
5001 		zone_lock(z);
5002 		if (z->z_self && z->z_async_refilling) {
5003 			zone_expand_locked(z, Z_WAITOK);
5004 			/*
5005 			 * clearing _after_ we grow is important,
5006 			 * so that we avoid waking up the thread call
5007 			 * while we grow and cause to run a second time.
5008 			 */
5009 			z->z_async_refilling = false;
5010 		}
5011 		zone_unlock(z);
5012 	}
5013 }
5014 
5015 #endif /* !ZALLOC_TEST */
5016 #pragma mark zone jetsam integration
5017 #if !ZALLOC_TEST
5018 
5019 /*
5020  * We're being very conservative here and picking a value of 95%. We might need to lower this if
5021  * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
5022  */
5023 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
5024 
5025 /*
5026  * Threshold above which largest zones should be included in the panic log
5027  */
5028 #define ZONE_MAP_EXHAUSTION_PRINT_PANIC 80
5029 
5030 /*
5031  * Trigger zone-map-exhaustion jetsams if the zone map is X% full,
5032  * where X=zone_map_jetsam_limit.
5033  *
5034  * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
5035  */
5036 TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
5037     ZONE_MAP_JETSAM_LIMIT_DEFAULT);
5038 
5039 kern_return_t
zone_map_jetsam_set_limit(uint32_t value)5040 zone_map_jetsam_set_limit(uint32_t value)
5041 {
5042 	if (value <= 0 || value > 100) {
5043 		return KERN_INVALID_VALUE;
5044 	}
5045 
5046 	zone_map_jetsam_limit = value;
5047 	os_atomic_store(&zone_pages_jetsam_threshold,
5048 	    zone_pages_wired_max * value / 100, relaxed);
5049 	return KERN_SUCCESS;
5050 }
5051 
5052 void
get_zone_map_size(uint64_t * current_size,uint64_t * capacity)5053 get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
5054 {
5055 	vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
5056 	*current_size = ptoa_64(phys_pages);
5057 	*capacity = ptoa_64(zone_pages_wired_max);
5058 }
5059 
5060 void
get_largest_zone_info(char * zone_name,size_t zone_name_len,uint64_t * zone_size)5061 get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
5062 {
5063 	zone_t largest_zone = zone_find_largest(zone_size);
5064 
5065 	/*
5066 	 * Append kalloc heap name to zone name (if zone is used by kalloc)
5067 	 */
5068 	snprintf(zone_name, zone_name_len, "%s%s",
5069 	    zone_heap_name(largest_zone), largest_zone->z_name);
5070 }
5071 
5072 static bool
zone_map_nearing_threshold(unsigned int threshold)5073 zone_map_nearing_threshold(unsigned int threshold)
5074 {
5075 	uint64_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
5076 	return phys_pages * 100 > zone_pages_wired_max * threshold;
5077 }
5078 
5079 bool
zone_map_nearing_exhaustion(void)5080 zone_map_nearing_exhaustion(void)
5081 {
5082 	vm_size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
5083 
5084 	return pages >= os_atomic_load(&zone_pages_jetsam_threshold, relaxed);
5085 }
5086 
5087 
5088 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
5089 
5090 /*
5091  * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
5092  * to walk through the jetsam priority bands and kill processes.
5093  */
5094 static zone_t
kill_process_in_largest_zone(void)5095 kill_process_in_largest_zone(void)
5096 {
5097 	pid_t pid = -1;
5098 	uint64_t zone_size = 0;
5099 	zone_t largest_zone = zone_find_largest(&zone_size);
5100 
5101 	printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
5102 	    ptoa_64(os_atomic_load(&zone_pages_wired, relaxed)),
5103 	    ptoa_64(zone_pages_wired_max),
5104 	    (uint64_t)zone_submaps_approx_size(),
5105 	    (uint64_t)mach_vm_range_size(&zone_info.zi_map_range),
5106 	    zone_map_jetsam_limit);
5107 	printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
5108 	    largest_zone->z_name, (uintptr_t)zone_size);
5109 
5110 	/*
5111 	 * We want to make sure we don't call this function from userspace.
5112 	 * Or we could end up trying to synchronously kill the process
5113 	 * whose context we're in, causing the system to hang.
5114 	 */
5115 	assert(current_task() == kernel_task);
5116 
5117 	/*
5118 	 * If vm_object_zone is the largest, check to see if the number of
5119 	 * elements in vm_map_entry_zone is comparable.
5120 	 *
5121 	 * If so, consider vm_map_entry_zone as the largest. This lets us target
5122 	 * a specific process to jetsam to quickly recover from the zone map
5123 	 * bloat.
5124 	 */
5125 	if (largest_zone == vm_object_zone) {
5126 		unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
5127 		unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
5128 		/* Is the VM map entries zone count >= 98% of the VM objects zone count? */
5129 		if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
5130 			largest_zone = vm_map_entry_zone;
5131 			printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
5132 			    (uintptr_t)zone_size_wired(largest_zone));
5133 		}
5134 	}
5135 
5136 	/* TODO: Extend this to check for the largest process in other zones as well. */
5137 	if (largest_zone == vm_map_entry_zone) {
5138 		pid = find_largest_process_vm_map_entries();
5139 	} else {
5140 		printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
5141 		    "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
5142 		    largest_zone->z_name);
5143 	}
5144 	if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
5145 		printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
5146 	}
5147 
5148 	return largest_zone;
5149 }
5150 
5151 #endif /* !ZALLOC_TEST */
5152 #pragma mark probabilistic gzalloc
5153 #if !ZALLOC_TEST
5154 #if CONFIG_PROB_GZALLOC
5155 
5156 extern uint32_t random(void);
5157 struct pgz_backtrace {
5158 	uint32_t  pgz_depth;
5159 	int32_t   pgz_bt[MAX_ZTRACE_DEPTH];
5160 };
5161 
5162 static int32_t  PERCPU_DATA(pgz_sample_counter);
5163 static SECURITY_READ_ONLY_LATE(struct pgz_backtrace *) pgz_backtraces;
5164 static uint32_t pgz_uses;       /* number of zones using PGZ */
5165 static int32_t  pgz_slot_avail;
5166 #if OS_ATOMIC_HAS_LLSC
5167 struct zone_page_metadata *pgz_slot_head;
5168 #else
5169 static struct pgz_slot_head {
5170 	uint32_t psh_count;
5171 	uint32_t psh_slot;
5172 } pgz_slot_head;
5173 #endif
5174 struct zone_page_metadata *pgz_slot_tail;
5175 static SECURITY_READ_ONLY_LATE(vm_map_t) pgz_submap;
5176 
5177 static struct zone_page_metadata *
pgz_meta_raw(uint32_t index)5178 pgz_meta_raw(uint32_t index)
5179 {
5180 	return VM_FAR_ADD_PTR_UNBOUNDED(zone_info.zi_pgz_meta, index);
5181 }
5182 
5183 static struct zone_page_metadata *
pgz_meta(uint32_t index)5184 pgz_meta(uint32_t index)
5185 {
5186 	return pgz_meta_raw(2 * index + 1);
5187 }
5188 
5189 static struct pgz_backtrace *
pgz_bt(uint32_t slot,bool free)5190 pgz_bt(uint32_t slot, bool free)
5191 {
5192 	/*
5193 	 * While we could use a bounds checked variant, slot is generally
5194 	 * trustworthy and so it isn't necessary.
5195 	 */
5196 	return VM_FAR_ADD_PTR_UNBOUNDED(pgz_backtraces, 2 * slot + free);
5197 }
5198 
5199 static void
pgz_backtrace(struct pgz_backtrace * bt,void * fp)5200 pgz_backtrace(struct pgz_backtrace *bt, void *fp)
5201 {
5202 	struct backtrace_control ctl = {
5203 		.btc_frame_addr = (uintptr_t)fp,
5204 	};
5205 
5206 	bt->pgz_depth = (uint32_t)backtrace_packed(BTP_KERN_OFFSET_32,
5207 	    (uint8_t *)bt->pgz_bt, sizeof(bt->pgz_bt), &ctl, NULL) / 4;
5208 }
5209 
5210 static uint32_t
pgz_slot(vm_offset_t addr)5211 pgz_slot(vm_offset_t addr)
5212 {
5213 	return (uint32_t)((addr - zone_info.zi_pgz_range.min_address) >> (PAGE_SHIFT + 1));
5214 }
5215 
5216 static vm_offset_t
pgz_addr(uint32_t slot)5217 pgz_addr(uint32_t slot)
5218 {
5219 	return zone_info.zi_pgz_range.min_address + ptoa(2 * slot + 1);
5220 }
5221 
5222 static bool
pgz_sample(vm_offset_t addr,vm_size_t esize)5223 pgz_sample(vm_offset_t addr, vm_size_t esize)
5224 {
5225 	int32_t *counterp, cnt;
5226 
5227 	if (zone_addr_size_crosses_page(addr, esize)) {
5228 		return false;
5229 	}
5230 
5231 	/*
5232 	 * Note: accessing pgz_sample_counter is racy but this is
5233 	 *       kind of acceptable given that this is not
5234 	 *       a security load bearing feature.
5235 	 */
5236 
5237 	counterp = PERCPU_GET(pgz_sample_counter);
5238 	cnt = *counterp;
5239 	if (__probable(cnt > 0)) {
5240 		*counterp = cnt - 1;
5241 		return false;
5242 	}
5243 
5244 	if (pgz_slot_avail <= 0) {
5245 		return false;
5246 	}
5247 
5248 	/*
5249 	 * zalloc_random_uniform() might block, so when preemption is disabled,
5250 	 * set the counter to `-1` which will cause the next allocation
5251 	 * that can block to generate a new random value.
5252 	 *
5253 	 * No allocation on this CPU will sample until then.
5254 	 */
5255 	if (get_preemption_level()) {
5256 		*counterp = -1;
5257 	} else {
5258 		*counterp = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5259 	}
5260 
5261 	return cnt == 0;
5262 }
5263 
5264 static inline bool
pgz_slot_alloc(uint32_t * slot)5265 pgz_slot_alloc(uint32_t *slot)
5266 {
5267 	struct zone_page_metadata *m;
5268 	uint32_t tries = 100;
5269 
5270 	disable_preemption();
5271 
5272 #if OS_ATOMIC_USE_LLSC
5273 	int32_t ov, nv;
5274 	os_atomic_rmw_loop(&pgz_slot_avail, ov, nv, relaxed, {
5275 		if (__improbable(ov <= 0)) {
5276 		        os_atomic_rmw_loop_give_up({
5277 				enable_preemption();
5278 				return false;
5279 			});
5280 		}
5281 		nv = ov - 1;
5282 	});
5283 #else
5284 	if (__improbable(os_atomic_dec_orig(&pgz_slot_avail, relaxed) <= 0)) {
5285 		os_atomic_inc(&pgz_slot_avail, relaxed);
5286 		enable_preemption();
5287 		return false;
5288 	}
5289 #endif
5290 
5291 again:
5292 	if (__improbable(tries-- == 0)) {
5293 		/*
5294 		 * Too much contention,
5295 		 * extremely unlikely but do not stay stuck.
5296 		 */
5297 		os_atomic_inc(&pgz_slot_avail, relaxed);
5298 		enable_preemption();
5299 		return false;
5300 	}
5301 
5302 #if OS_ATOMIC_HAS_LLSC
5303 	uint32_t castries = 20;
5304 	do {
5305 		if (__improbable(castries-- == 0)) {
5306 			/*
5307 			 * rdar://115922110 On many many cores devices,
5308 			 * this can fail for a very long time.
5309 			 */
5310 			goto again;
5311 		}
5312 
5313 		m = os_atomic_load_exclusive(&pgz_slot_head, dependency);
5314 		if (__improbable(m->zm_pgz_slot_next == NULL)) {
5315 			/*
5316 			 * Either we are waiting for an enqueuer (unlikely)
5317 			 * or we are competing with another core and
5318 			 * are looking at a popped element.
5319 			 */
5320 			os_atomic_clear_exclusive();
5321 			goto again;
5322 		}
5323 	} while (!os_atomic_store_exclusive(&pgz_slot_head,
5324 	    m->zm_pgz_slot_next, relaxed));
5325 #else
5326 	struct zone_page_metadata *base = zone_info.zi_pgz_meta;
5327 	struct pgz_slot_head ov, nv;
5328 	os_atomic_rmw_loop(&pgz_slot_head, ov, nv, dependency, {
5329 		m = pgz_meta_raw(ov.psh_slot * 2);
5330 		if (__improbable(m->zm_pgz_slot_next == NULL)) {
5331 		        /*
5332 		         * Either we are waiting for an enqueuer (unlikely)
5333 		         * or we are competing with another core and
5334 		         * are looking at a popped element.
5335 		         */
5336 		        os_atomic_rmw_loop_give_up(goto again);
5337 		}
5338 		nv.psh_count = ov.psh_count + 1;
5339 		nv.psh_slot  = (uint32_t)((m->zm_pgz_slot_next - base) / 2);
5340 	});
5341 #endif
5342 
5343 	enable_preemption();
5344 
5345 	m->zm_pgz_slot_next = NULL;
5346 	*slot = (uint32_t)((m - zone_info.zi_pgz_meta) / 2);
5347 	return true;
5348 }
5349 
5350 static inline bool
pgz_slot_free(uint32_t slot)5351 pgz_slot_free(uint32_t slot)
5352 {
5353 	struct zone_page_metadata *m = pgz_meta_raw(2 * slot);
5354 	struct zone_page_metadata *t;
5355 
5356 	disable_preemption();
5357 	t = os_atomic_xchg(&pgz_slot_tail, m, relaxed);
5358 	os_atomic_store(&t->zm_pgz_slot_next, m, release);
5359 	os_atomic_inc(&pgz_slot_avail, relaxed);
5360 	enable_preemption();
5361 
5362 	return true;
5363 }
5364 
5365 /*!
5366  * @function pgz_protect()
5367  *
5368  * @brief
5369  * Try to protect an allocation with PGZ.
5370  *
5371  * @param zone          The zone the allocation was made against.
5372  * @param addr          An allocated element address to protect.
5373  * @param fp            The caller frame pointer (for the backtrace).
5374  * @returns             The new address for the element, or @c addr.
5375  */
5376 __attribute__((noinline))
5377 static vm_offset_t
pgz_protect(zone_t zone,vm_offset_t addr,void * fp)5378 pgz_protect(zone_t zone, vm_offset_t addr, void *fp)
5379 {
5380 	kern_return_t kr;
5381 	uint32_t slot;
5382 	uint_t flags = 0;
5383 
5384 	if (!pgz_slot_alloc(&slot)) {
5385 		return addr;
5386 	}
5387 
5388 	/*
5389 	 * Try to double-map the page (may fail if Z_NOWAIT).
5390 	 * we will always find a PA because pgz_init() pre-expanded the pmap.
5391 	 */
5392 	pmap_paddr_t pa = kvtophys(trunc_page(addr));
5393 	vm_offset_t  new_addr = pgz_addr(slot);
5394 	kr = pmap_enter_options_addr(kernel_pmap, new_addr, pa,
5395 	    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, flags, TRUE,
5396 	    get_preemption_level() ? (PMAP_OPTIONS_NOWAIT | PMAP_OPTIONS_NOPREEMPT) : 0,
5397 	    NULL, PMAP_MAPPING_TYPE_INFER);
5398 
5399 	if (__improbable(kr != KERN_SUCCESS)) {
5400 		pgz_slot_free(slot);
5401 		return addr;
5402 	}
5403 
5404 	struct zone_page_metadata tmp = {
5405 		.zm_chunk_len = ZM_PGZ_ALLOCATED,
5406 		.zm_index     = zone_index(zone),
5407 	};
5408 	struct zone_page_metadata *meta = pgz_meta(slot);
5409 
5410 	os_atomic_store(&meta->zm_bits, tmp.zm_bits, relaxed);
5411 	os_atomic_store(&meta->zm_pgz_orig_addr, addr, relaxed);
5412 	pgz_backtrace(pgz_bt(slot, false), fp);
5413 
5414 	return new_addr + (addr & PAGE_MASK);
5415 }
5416 
5417 /*!
5418  * @function pgz_unprotect()
5419  *
5420  * @brief
5421  * Release a PGZ slot and returns the original address of a freed element.
5422  *
5423  * @param addr          A PGZ protected element address.
5424  * @param fp            The caller frame pointer (for the backtrace).
5425  * @returns             The non protected address for the element
5426  *                      that was passed to @c pgz_protect().
5427  */
5428 __attribute__((noinline))
5429 static vm_offset_t
pgz_unprotect(vm_offset_t addr,void * fp)5430 pgz_unprotect(vm_offset_t addr, void *fp)
5431 {
5432 	struct zone_page_metadata *meta;
5433 	struct zone_page_metadata tmp;
5434 	uint32_t slot;
5435 
5436 	slot = pgz_slot(addr);
5437 	meta = zone_meta_from_addr(addr);
5438 	tmp  = *meta;
5439 	if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5440 		goto double_free;
5441 	}
5442 
5443 	pmap_remove_options(kernel_pmap, vm_memtag_canonicalize_kernel(trunc_page(addr)),
5444 	    vm_memtag_canonicalize_kernel(trunc_page(addr) + PAGE_SIZE),
5445 	    PMAP_OPTIONS_REMOVE | PMAP_OPTIONS_NOPREEMPT);
5446 
5447 	pgz_backtrace(pgz_bt(slot, true), fp);
5448 
5449 	tmp.zm_chunk_len = ZM_PGZ_FREE;
5450 	tmp.zm_bits = os_atomic_xchg(&meta->zm_bits, tmp.zm_bits, relaxed);
5451 	if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5452 		goto double_free;
5453 	}
5454 
5455 	pgz_slot_free(slot);
5456 	return tmp.zm_pgz_orig_addr;
5457 
5458 double_free:
5459 	panic_fault_address = addr;
5460 	meta->zm_chunk_len = ZM_PGZ_DOUBLE_FREE;
5461 	panic("probabilistic gzalloc double free: %p", (void *)addr);
5462 }
5463 
5464 bool
pgz_owned(mach_vm_address_t addr)5465 pgz_owned(mach_vm_address_t addr)
5466 {
5467 	return mach_vm_range_contains(&zone_info.zi_pgz_range, vm_memtag_canonicalize_kernel(addr));
5468 }
5469 
5470 
5471 __attribute__((always_inline))
5472 vm_offset_t
__pgz_decode(mach_vm_address_t addr,mach_vm_size_t size)5473 __pgz_decode(mach_vm_address_t addr, mach_vm_size_t size)
5474 {
5475 	struct zone_page_metadata *meta;
5476 
5477 	if (__probable(!pgz_owned(addr))) {
5478 		return (vm_offset_t)addr;
5479 	}
5480 
5481 	if (zone_addr_size_crosses_page(addr, size)) {
5482 		panic("invalid size for PGZ protected address %p:%p",
5483 		    (void *)addr, (void *)(addr + size));
5484 	}
5485 
5486 	meta = zone_meta_from_addr((vm_offset_t)addr);
5487 	if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5488 		panic_fault_address = (vm_offset_t)addr;
5489 		panic("probabilistic gzalloc use-after-free: %p", (void *)addr);
5490 	}
5491 
5492 	return trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5493 }
5494 
5495 __attribute__((always_inline))
5496 vm_offset_t
__pgz_decode_allow_invalid(vm_offset_t addr,zone_id_t zid)5497 __pgz_decode_allow_invalid(vm_offset_t addr, zone_id_t zid)
5498 {
5499 	struct zone_page_metadata *meta;
5500 	struct zone_page_metadata tmp;
5501 
5502 	if (__probable(!pgz_owned(addr))) {
5503 		return addr;
5504 	}
5505 
5506 	meta = zone_meta_from_addr(addr);
5507 	tmp.zm_bits = os_atomic_load(&meta->zm_bits, relaxed);
5508 
5509 	addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5510 
5511 	if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5512 		return 0;
5513 	}
5514 
5515 	if (zid != ZONE_ID_ANY && tmp.zm_index != zid) {
5516 		return 0;
5517 	}
5518 
5519 	return addr;
5520 }
5521 
5522 static void
pgz_zone_init(zone_t z)5523 pgz_zone_init(zone_t z)
5524 {
5525 	char zn[MAX_ZONE_NAME];
5526 	char zv[MAX_ZONE_NAME];
5527 	char key[30];
5528 
5529 	if (zone_elem_inner_size(z) > PAGE_SIZE) {
5530 		return;
5531 	}
5532 
5533 	if (pgz_all) {
5534 		os_atomic_inc(&pgz_uses, relaxed);
5535 		z->z_pgz_tracked = true;
5536 		return;
5537 	}
5538 
5539 	snprintf(zn, sizeof(zn), "%s%s", zone_heap_name(z), zone_name(z));
5540 
5541 	for (int i = 1;; i++) {
5542 		snprintf(key, sizeof(key), "pgz%d", i);
5543 		if (!PE_parse_boot_argn(key, zv, sizeof(zv))) {
5544 			break;
5545 		}
5546 		if (track_this_zone(zn, zv) || track_kalloc_zones(z, zv)) {
5547 			os_atomic_inc(&pgz_uses, relaxed);
5548 			z->z_pgz_tracked = true;
5549 			break;
5550 		}
5551 	}
5552 }
5553 
5554 __startup_func
5555 static vm_size_t
pgz_get_size(void)5556 pgz_get_size(void)
5557 {
5558 	if (pgz_slots == UINT32_MAX) {
5559 		/*
5560 		 * Scale with RAM size: ~200 slots a G
5561 		 */
5562 		pgz_slots = (uint32_t)(sane_size >> 22);
5563 	}
5564 
5565 	/*
5566 	 * Make sure that the slot allocation scheme works.
5567 	 * see pgz_slot_alloc() / pgz_slot_free();
5568 	 */
5569 	if (pgz_slots < zpercpu_count() * 4) {
5570 		pgz_slots = zpercpu_count() * 4;
5571 	}
5572 	if (pgz_slots >= UINT16_MAX) {
5573 		pgz_slots = UINT16_MAX - 1;
5574 	}
5575 
5576 	/*
5577 	 * Quarantine is 33% of slots by default, no more than 90%.
5578 	 */
5579 	if (pgz_quarantine == 0) {
5580 		pgz_quarantine = pgz_slots / 3;
5581 	}
5582 	if (pgz_quarantine > pgz_slots * 9 / 10) {
5583 		pgz_quarantine = pgz_slots * 9 / 10;
5584 	}
5585 	pgz_slot_avail = pgz_slots - pgz_quarantine;
5586 
5587 	return ptoa(2 * pgz_slots + 1);
5588 }
5589 
5590 __startup_func
5591 static void
pgz_init(void)5592 pgz_init(void)
5593 {
5594 	if (!pgz_uses) {
5595 		return;
5596 	}
5597 
5598 	if (pgz_sample_rate == 0) {
5599 		/*
5600 		 * If no rate was provided, pick a random one that scales
5601 		 * with the number of protected zones.
5602 		 *
5603 		 * Use a binomal distribution to avoid having too many
5604 		 * really fast sample rates.
5605 		 */
5606 		uint32_t factor = MIN(pgz_uses, 10);
5607 		uint32_t max_rate = 1000 * factor;
5608 		uint32_t min_rate =  100 * factor;
5609 
5610 		pgz_sample_rate = (zalloc_random_uniform32(min_rate, max_rate) +
5611 		    zalloc_random_uniform32(min_rate, max_rate)) / 2;
5612 	}
5613 
5614 	struct mach_vm_range *r = &zone_info.zi_pgz_range;
5615 	zone_info.zi_pgz_meta = zone_meta_from_addr(r->min_address);
5616 	zone_meta_populate(r->min_address, mach_vm_range_size(r));
5617 
5618 	for (uint32_t i = 0; i < 2 * pgz_slots + 1; i += 2) {
5619 		pgz_meta_raw(i)->zm_chunk_len = ZM_PGZ_GUARD;
5620 	}
5621 
5622 	for (uint32_t i = 1; i < pgz_slots; i++) {
5623 		pgz_meta_raw(2 * i - 1)->zm_pgz_slot_next = pgz_meta_raw(2 * i + 1);
5624 	}
5625 #if OS_ATOMIC_HAS_LLSC
5626 	pgz_slot_head = pgz_meta_raw(1);
5627 #endif
5628 	pgz_slot_tail = pgz_meta_raw(2 * pgz_slots - 1);
5629 
5630 	kernel_memory_allocate(kernel_map, (vm_offset_t *)&pgz_backtraces,
5631 	    /* size */ sizeof(struct pgz_backtrace) * 2 * pgz_slots,
5632 	    /* mask */ ZALIGN_PTR,
5633 	    KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO | KMA_NOSOFTLIMIT,
5634 	    VM_KERN_MEMORY_KALLOC);
5635 
5636 	/*
5637 	 * expand the pmap so that pmap_enter_options_addr()
5638 	 * in pgz_protect() never need to call pmap_expand().
5639 	 */
5640 	for (uint32_t slot = 0; slot < pgz_slots; slot++) {
5641 		(void)pmap_enter_options_addr(kernel_pmap, pgz_addr(slot), 0,
5642 		    VM_PROT_NONE, VM_PROT_NONE, 0, FALSE,
5643 		    PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
5644 	}
5645 
5646 	/* do this last as this will enable pgz */
5647 	percpu_foreach(counter, pgz_sample_counter) {
5648 		*counter = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5649 	}
5650 }
5651 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, pgz_init);
5652 
5653 static void
panic_display_pgz_bt(bool has_syms,uint32_t slot,bool free)5654 panic_display_pgz_bt(bool has_syms, uint32_t slot, bool free)
5655 {
5656 	struct pgz_backtrace *bt = pgz_bt(slot, free);
5657 	const char *what = free ? "Free" : "Allocation";
5658 	uintptr_t buf[MAX_ZTRACE_DEPTH];
5659 
5660 	if (!ml_validate_nofault((vm_offset_t)bt, sizeof(*bt))) {
5661 		paniclog_append_noflush("  Can't decode %s Backtrace\n", what);
5662 		return;
5663 	}
5664 
5665 	backtrace_unpack(BTP_KERN_OFFSET_32, buf, MAX_ZTRACE_DEPTH,
5666 	    (uint8_t *)bt->pgz_bt, 4 * bt->pgz_depth);
5667 
5668 	paniclog_append_noflush("  %s Backtrace:\n", what);
5669 	for (uint32_t i = 0; i < bt->pgz_depth && i < MAX_ZTRACE_DEPTH; i++) {
5670 		if (has_syms) {
5671 			paniclog_append_noflush("    %p ", (void *)buf[i]);
5672 			panic_print_symbol_name(buf[i]);
5673 			paniclog_append_noflush("\n");
5674 		} else {
5675 			paniclog_append_noflush("    %p\n", (void *)buf[i]);
5676 		}
5677 	}
5678 	kmod_panic_dump((vm_offset_t *)buf, bt->pgz_depth);
5679 }
5680 
5681 static void
panic_display_pgz_uaf_info(bool has_syms,vm_offset_t addr)5682 panic_display_pgz_uaf_info(bool has_syms, vm_offset_t addr)
5683 {
5684 	struct zone_page_metadata *meta;
5685 	vm_offset_t elem, esize;
5686 	const char *type;
5687 	const char *prob;
5688 	uint32_t slot;
5689 	zone_t z;
5690 
5691 	slot = pgz_slot(addr);
5692 	meta = pgz_meta(slot);
5693 	elem = pgz_addr(slot) + (meta->zm_pgz_orig_addr & PAGE_MASK);
5694 
5695 	paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
5696 
5697 	if (ml_validate_nofault((vm_offset_t)meta, sizeof(*meta)) &&
5698 	    meta->zm_index &&
5699 	    meta->zm_index < os_atomic_load(&num_zones, relaxed)) {
5700 		z = &zone_array[meta->zm_index];
5701 	} else {
5702 		paniclog_append_noflush("  Zone    : <unknown>\n");
5703 		paniclog_append_noflush("  Address : %p\n", (void *)addr);
5704 		paniclog_append_noflush("\n");
5705 		return;
5706 	}
5707 
5708 	esize = zone_elem_inner_size(z);
5709 	paniclog_append_noflush("  Zone    : %s%s\n",
5710 	    zone_heap_name(z), zone_name(z));
5711 	paniclog_append_noflush("  Address : %p\n", (void *)addr);
5712 	paniclog_append_noflush("  Element : [%p, %p) of size %d\n",
5713 	    (void *)elem, (void *)(elem + esize), (uint32_t)esize);
5714 
5715 	if (addr < elem) {
5716 		type = "out-of-bounds(underflow) + use-after-free";
5717 		prob = "low";
5718 	} else if (meta->zm_chunk_len == ZM_PGZ_DOUBLE_FREE) {
5719 		type = "double-free";
5720 		prob = "high";
5721 	} else if (addr < elem + esize) {
5722 		type = "use-after-free";
5723 		prob = "high";
5724 	} else if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5725 		type = "out-of-bounds + use-after-free";
5726 		prob = "low";
5727 	} else {
5728 		type = "out-of-bounds";
5729 		prob = "high";
5730 	}
5731 	paniclog_append_noflush("  Kind    : %s (%s confidence)\n",
5732 	    type, prob);
5733 	if (addr < elem) {
5734 		paniclog_append_noflush("  Access  : %d byte(s) before\n",
5735 		    (uint32_t)(elem - addr) + 1);
5736 	} else if (addr < elem + esize) {
5737 		paniclog_append_noflush("  Access  : %d byte(s) inside\n",
5738 		    (uint32_t)(addr - elem) + 1);
5739 	} else {
5740 		paniclog_append_noflush("  Access  : %d byte(s) past\n",
5741 		    (uint32_t)(addr - (elem + esize)) + 1);
5742 	}
5743 
5744 	panic_display_pgz_bt(has_syms, slot, false);
5745 	if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5746 		panic_display_pgz_bt(has_syms, slot, true);
5747 	}
5748 
5749 	paniclog_append_noflush("\n");
5750 }
5751 
5752 vm_offset_t pgz_protect_for_testing_only(zone_t zone, vm_offset_t addr, void *fp);
5753 vm_offset_t
pgz_protect_for_testing_only(zone_t zone,vm_offset_t addr,void * fp)5754 pgz_protect_for_testing_only(zone_t zone, vm_offset_t addr, void *fp)
5755 {
5756 	return pgz_protect(zone, addr, fp);
5757 }
5758 
5759 
5760 #endif /* CONFIG_PROB_GZALLOC */
5761 #endif /* !ZALLOC_TEST */
5762 #pragma mark zfree
5763 #if !ZALLOC_TEST
5764 
5765 /*!
5766  * @defgroup zfree
5767  * @{
5768  *
5769  * @brief
5770  * The codepath for zone frees.
5771  *
5772  * @discussion
5773  * There are 4 major ways to allocate memory that end up in the zone allocator:
5774  * - @c zfree()
5775  * - @c zfree_percpu()
5776  * - @c kfree*()
5777  * - @c zfree_permanent()
5778  *
5779  * While permanent zones have their own allocation scheme, all other codepaths
5780  * will eventually go through the @c zfree_ext() choking point.
5781  */
5782 
5783 __header_always_inline void
zfree_drop(zone_t zone,vm_offset_t addr)5784 zfree_drop(zone_t zone, vm_offset_t addr)
5785 {
5786 	vm_offset_t esize = zone_elem_outer_size(zone);
5787 	struct zone_page_metadata *meta;
5788 	vm_offset_t eidx;
5789 
5790 	meta = zone_element_resolve(zone, addr, &eidx);
5791 
5792 	if (!zone_meta_mark_free(meta, eidx)) {
5793 		zone_meta_double_free_panic(zone, addr, __func__);
5794 	}
5795 
5796 	vm_offset_t old_size = meta->zm_alloc_size;
5797 	vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5798 	vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
5799 
5800 	if (new_size == 0) {
5801 		/* whether the page was on the intermediate or all_used, queue, move it to free */
5802 		zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
5803 		zone->z_wired_empty += meta->zm_chunk_len;
5804 	} else if (old_size + esize > max_size) {
5805 		/* first free element on page, move from all_used */
5806 		zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5807 	}
5808 
5809 	if (__improbable(zone->z_exhausted_wait)) {
5810 		zone_wakeup_exhausted_waiters(zone);
5811 	}
5812 }
5813 
5814 __attribute__((noinline))
5815 static void
zfree_item(zone_t zone,vm_offset_t addr)5816 zfree_item(zone_t zone, vm_offset_t addr)
5817 {
5818 	/* transfer preemption count to lock */
5819 	zone_lock_nopreempt_check_contention(zone);
5820 
5821 	zfree_drop(zone, addr);
5822 	zone->z_elems_free += 1;
5823 
5824 	zone_unlock(zone);
5825 }
5826 
5827 static void
zfree_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache)5828 zfree_cached_depot_recirculate(
5829 	zone_t                  zone,
5830 	uint32_t                depot_max,
5831 	zone_cache_t            cache)
5832 {
5833 	smr_t smr = zone_cache_smr(cache);
5834 	smr_seq_t seq;
5835 	uint32_t n;
5836 
5837 	zone_recirc_lock_nopreempt_check_contention(zone);
5838 
5839 	n = cache->zc_depot.zd_full;
5840 	if (n >= depot_max) {
5841 		/*
5842 		 * If SMR is in use, rotate the entire chunk of magazines.
5843 		 *
5844 		 * If the head of the recirculation layer is ready to be
5845 		 * reused, pull them back to refill a little.
5846 		 */
5847 		seq = zone_depot_move_full(&zone->z_recirc,
5848 		    &cache->zc_depot, smr ? n : n - depot_max / 2, NULL);
5849 
5850 		if (smr) {
5851 			smr_deferred_advance_commit(smr, seq);
5852 			if (depot_max > 1 && zone_depot_poll(&zone->z_recirc, smr)) {
5853 				zone_depot_move_full(&cache->zc_depot,
5854 				    &zone->z_recirc, depot_max / 2, NULL);
5855 			}
5856 		}
5857 	}
5858 
5859 	n = depot_max - cache->zc_depot.zd_full;
5860 	if (n > zone->z_recirc.zd_empty) {
5861 		n = zone->z_recirc.zd_empty;
5862 	}
5863 	if (n) {
5864 		zone_depot_move_empty(&cache->zc_depot, &zone->z_recirc,
5865 		    n, zone);
5866 	}
5867 
5868 	zone_recirc_unlock_nopreempt(zone);
5869 }
5870 
5871 static zone_cache_t
zfree_cached_recirculate(zone_t zone,zone_cache_t cache)5872 zfree_cached_recirculate(zone_t zone, zone_cache_t cache)
5873 {
5874 	zone_magazine_t mag = NULL, tmp = NULL;
5875 	smr_t smr = zone_cache_smr(cache);
5876 	bool wakeup_exhausted = false;
5877 
5878 	if (zone->z_recirc.zd_empty == 0) {
5879 		mag = zone_magazine_alloc(Z_NOWAIT);
5880 	}
5881 
5882 	zone_recirc_lock_nopreempt_check_contention(zone);
5883 
5884 	if (mag == NULL && zone->z_recirc.zd_empty) {
5885 		mag = zone_depot_pop_head_empty(&zone->z_recirc, zone);
5886 		__builtin_assume(mag);
5887 	}
5888 	if (mag) {
5889 		tmp = zone_magazine_replace(cache, mag, true);
5890 		if (smr) {
5891 			smr_deferred_advance_commit(smr, tmp->zm_seq);
5892 		}
5893 		if (zone_security_array[zone_index(zone)].z_lifo) {
5894 			zone_depot_insert_head_full(&zone->z_recirc, tmp);
5895 		} else {
5896 			zone_depot_insert_tail_full(&zone->z_recirc, tmp);
5897 		}
5898 
5899 		wakeup_exhausted = zone->z_exhausted_wait;
5900 	}
5901 
5902 	zone_recirc_unlock_nopreempt(zone);
5903 
5904 	if (__improbable(wakeup_exhausted)) {
5905 		zone_lock_nopreempt(zone);
5906 		if (zone->z_exhausted_wait) {
5907 			zone_wakeup_exhausted_waiters(zone);
5908 		}
5909 		zone_unlock_nopreempt(zone);
5910 	}
5911 
5912 	return mag ? cache : NULL;
5913 }
5914 
5915 __attribute__((noinline))
5916 static zone_cache_t
zfree_cached_trim(zone_t zone,zone_cache_t cache)5917 zfree_cached_trim(zone_t zone, zone_cache_t cache)
5918 {
5919 	zone_magazine_t mag = NULL, tmp = NULL;
5920 	uint32_t depot_max;
5921 
5922 	depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
5923 	if (depot_max) {
5924 		zone_depot_lock_nopreempt(cache);
5925 
5926 		if (cache->zc_depot.zd_empty == 0) {
5927 			zfree_cached_depot_recirculate(zone, depot_max, cache);
5928 		}
5929 
5930 		if (__probable(cache->zc_depot.zd_empty)) {
5931 			mag = zone_depot_pop_head_empty(&cache->zc_depot, NULL);
5932 			__builtin_assume(mag);
5933 		} else {
5934 			mag = zone_magazine_alloc(Z_NOWAIT);
5935 		}
5936 		if (mag) {
5937 			tmp = zone_magazine_replace(cache, mag, true);
5938 			zone_depot_insert_tail_full(&cache->zc_depot, tmp);
5939 		}
5940 
5941 		zone_depot_unlock_nopreempt(cache);
5942 
5943 		return mag ? cache : NULL;
5944 	}
5945 
5946 	return zfree_cached_recirculate(zone, cache);
5947 }
5948 
5949 __attribute__((always_inline))
5950 static inline zone_cache_t
zfree_cached_get_pcpu_cache(zone_t zone,int cpu)5951 zfree_cached_get_pcpu_cache(zone_t zone, int cpu)
5952 {
5953 	zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5954 
5955 	if (__probable(cache->zc_free_cur < zc_mag_size())) {
5956 		return cache;
5957 	}
5958 
5959 	if (__probable(cache->zc_alloc_cur < zc_mag_size())) {
5960 		zone_cache_swap_magazines(cache);
5961 		return cache;
5962 	}
5963 
5964 	return zfree_cached_trim(zone, cache);
5965 }
5966 
5967 __attribute__((always_inline))
5968 static inline zone_cache_t
zfree_cached_get_pcpu_cache_smr(zone_t zone,int cpu)5969 zfree_cached_get_pcpu_cache_smr(zone_t zone, int cpu)
5970 {
5971 	zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5972 	size_t idx = cache->zc_free_cur;
5973 
5974 	if (__probable(idx + 1 < zc_mag_size())) {
5975 		return cache;
5976 	}
5977 
5978 	/*
5979 	 * when SMR is in use, the bucket is tagged early with
5980 	 * @c smr_deferred_advance(), which costs a full barrier,
5981 	 * but performs no store.
5982 	 *
5983 	 * When zones hit the recirculation layer, the advance is commited,
5984 	 * under the recirculation lock (see zfree_cached_recirculate()).
5985 	 *
5986 	 * When done this way, the zone contention detection mechanism
5987 	 * will adjust the size of the per-cpu depots gracefully, which
5988 	 * mechanically reduces the pace of these commits as usage increases.
5989 	 */
5990 
5991 	if (__probable(idx + 1 == zc_mag_size())) {
5992 		zone_magazine_t mag;
5993 
5994 		mag = (zone_magazine_t)((uintptr_t)cache->zc_free_elems -
5995 		    offsetof(struct zone_magazine, zm_elems));
5996 		mag->zm_seq = smr_deferred_advance(zone_cache_smr(cache));
5997 		return cache;
5998 	}
5999 
6000 	return zfree_cached_trim(zone, cache);
6001 }
6002 
6003 __attribute__((always_inline))
6004 static inline vm_offset_t
__zcache_mark_invalid(zone_t zone,vm_offset_t elem,uint64_t combined_size)6005 __zcache_mark_invalid(zone_t zone, vm_offset_t elem, uint64_t combined_size)
6006 {
6007 	struct zone_page_metadata *meta;
6008 	vm_offset_t offs;
6009 
6010 #pragma unused(combined_size)
6011 #if CONFIG_PROB_GZALLOC
6012 	if (__improbable(pgz_owned(elem))) {
6013 		elem = pgz_unprotect(elem, __builtin_frame_address(0));
6014 	}
6015 #endif /* CONFIG_PROB_GZALLOC */
6016 
6017 	meta = zone_meta_from_addr(elem);
6018 	if (!from_zone_map(elem, 1) || !zone_has_index(zone, meta->zm_index)) {
6019 		zone_invalid_element_panic(zone, elem);
6020 	}
6021 
6022 	offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
6023 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
6024 		offs += ptoa(meta->zm_page_index);
6025 	}
6026 
6027 	if (!Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
6028 		zone_invalid_element_panic(zone, elem);
6029 	}
6030 
6031 #if VM_TAG_SIZECLASSES
6032 	if (__improbable(zone->z_uses_tags)) {
6033 		vm_tag_t *slot;
6034 
6035 		slot = zba_extra_ref_ptr(meta->zm_bitmap,
6036 		    Z_FAST_QUO(offs, zone->z_quo_magic));
6037 		vm_tag_update_zone_size(*slot, zone->z_tags_sizeclass,
6038 		    -(long)ZFREE_ELEM_SIZE(combined_size));
6039 		*slot = VM_KERN_MEMORY_NONE;
6040 	}
6041 #endif /* VM_TAG_SIZECLASSES */
6042 
6043 #if KASAN_CLASSIC
6044 	kasan_free(elem, ZFREE_ELEM_SIZE(combined_size),
6045 	    ZFREE_USER_SIZE(combined_size), zone_elem_redzone(zone),
6046 	    zone->z_percpu, __builtin_frame_address(0));
6047 #endif
6048 
6049 	elem = (vm_offset_t)zone_tag_free_element(zone, (caddr_t)elem, ZFREE_ELEM_SIZE(combined_size));
6050 	return elem;
6051 }
6052 
6053 __attribute__((always_inline))
6054 void *
zcache_mark_invalid(zone_t zone,void * elem)6055 zcache_mark_invalid(zone_t zone, void *elem)
6056 {
6057 	vm_size_t esize = zone_elem_inner_size(zone);
6058 
6059 	ZFREE_LOG(zone, (vm_offset_t)elem, 1);
6060 	return (void *)__zcache_mark_invalid(zone, (vm_offset_t)elem, ZFREE_PACK_SIZE(esize, esize));
6061 }
6062 
6063 /*
6064  *     The function is noinline when zlog can be used so that the backtracing can
6065  *     reliably skip the zfree_ext() and zfree_log()
6066  *     boring frames.
6067  */
6068 #if ZALLOC_ENABLE_LOGGING
6069 __attribute__((noinline))
6070 #endif /* ZALLOC_ENABLE_LOGGING */
6071 void
zfree_ext(zone_t zone,zone_stats_t zstats,void * addr,uint64_t combined_size)6072 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr, uint64_t combined_size)
6073 {
6074 	vm_offset_t esize = ZFREE_ELEM_SIZE(combined_size);
6075 	vm_offset_t elem = (vm_offset_t)addr;
6076 	int cpu;
6077 
6078 	DTRACE_VM2(zfree, zone_t, zone, void*, elem);
6079 
6080 	ZFREE_LOG(zone, elem, 1);
6081 	elem = __zcache_mark_invalid(zone, elem, combined_size);
6082 
6083 	disable_preemption();
6084 	cpu = cpu_number();
6085 	zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += esize;
6086 
6087 #if KASAN_CLASSIC
6088 	if (zone->z_kasan_quarantine && startup_phase >= STARTUP_SUB_ZALLOC) {
6089 		struct kasan_quarantine_result kqr;
6090 
6091 		kqr  = kasan_quarantine(elem, esize);
6092 		elem = kqr.addr;
6093 		zone = kqr.zone;
6094 		if (elem == 0) {
6095 			return enable_preemption();
6096 		}
6097 	}
6098 #endif
6099 
6100 	if (zone->z_pcpu_cache) {
6101 		zone_cache_t cache = zfree_cached_get_pcpu_cache(zone, cpu);
6102 
6103 		if (__probable(cache)) {
6104 			cache->zc_free_elems[cache->zc_free_cur++] = elem;
6105 			return enable_preemption();
6106 		}
6107 	}
6108 
6109 	return zfree_item(zone, elem);
6110 }
6111 
6112 __attribute__((always_inline))
6113 static inline zstack_t
zcache_free_stack_to_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,vm_size_t esize,zone_cache_ops_t ops,bool zero)6114 zcache_free_stack_to_cpu(
6115 	zone_id_t               zid,
6116 	zone_cache_t            cache,
6117 	zstack_t                stack,
6118 	vm_size_t               esize,
6119 	zone_cache_ops_t        ops,
6120 	bool                    zero)
6121 {
6122 	size_t       n = MIN(zc_mag_size() - cache->zc_free_cur, stack.z_count);
6123 	vm_offset_t *p;
6124 
6125 	stack.z_count -= n;
6126 	cache->zc_free_cur += n;
6127 	p = cache->zc_free_elems + cache->zc_free_cur;
6128 
6129 	do {
6130 		void *o = zstack_pop_no_delta(&stack);
6131 
6132 		if (ops) {
6133 			o = ops->zc_op_mark_invalid(zid, o);
6134 		} else {
6135 			if (zero) {
6136 				bzero(o, esize);
6137 			}
6138 			o = (void *)__zcache_mark_invalid(zone_by_id(zid),
6139 			    (vm_offset_t)o, ZFREE_PACK_SIZE(esize, esize));
6140 		}
6141 		*--p  = (vm_offset_t)o;
6142 	} while (--n > 0);
6143 
6144 	return stack;
6145 }
6146 
6147 __attribute__((always_inline))
6148 static inline void
zcache_free_1_ext(zone_id_t zid,void * addr,zone_cache_ops_t ops)6149 zcache_free_1_ext(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6150 {
6151 	vm_offset_t elem = (vm_offset_t)addr;
6152 	zone_cache_t cache;
6153 	vm_size_t esize;
6154 	zone_t zone = zone_by_id(zid);
6155 	int cpu;
6156 
6157 	ZFREE_LOG(zone, elem, 1);
6158 
6159 	disable_preemption();
6160 	cpu = cpu_number();
6161 	esize = zone_elem_inner_size(zone);
6162 	zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6163 	if (!ops) {
6164 		addr = (void *)__zcache_mark_invalid(zone, elem,
6165 		    ZFREE_PACK_SIZE(esize, esize));
6166 	}
6167 	cache = zfree_cached_get_pcpu_cache(zone, cpu);
6168 	if (__probable(cache)) {
6169 		if (ops) {
6170 			addr = ops->zc_op_mark_invalid(zid, addr);
6171 		}
6172 		cache->zc_free_elems[cache->zc_free_cur++] = elem;
6173 		enable_preemption();
6174 	} else if (ops) {
6175 		enable_preemption();
6176 		os_atomic_dec(&zone_by_id(zid)->z_elems_avail, relaxed);
6177 		ops->zc_op_free(zid, addr);
6178 	} else {
6179 		zfree_item(zone, elem);
6180 	}
6181 }
6182 
6183 __attribute__((always_inline))
6184 static inline void
zcache_free_n_ext(zone_id_t zid,zstack_t stack,zone_cache_ops_t ops,bool zero)6185 zcache_free_n_ext(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops, bool zero)
6186 {
6187 	zone_t zone = zone_by_id(zid);
6188 	zone_cache_t cache;
6189 	vm_size_t esize;
6190 	int cpu;
6191 
6192 	ZFREE_LOG(zone, stack.z_head, stack.z_count);
6193 
6194 	disable_preemption();
6195 	cpu = cpu_number();
6196 	esize = zone_elem_inner_size(zone);
6197 	zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed +=
6198 	    stack.z_count * esize;
6199 
6200 	for (;;) {
6201 		cache = zfree_cached_get_pcpu_cache(zone, cpu);
6202 		if (__probable(cache)) {
6203 			stack = zcache_free_stack_to_cpu(zid, cache,
6204 			    stack, esize, ops, zero);
6205 			enable_preemption();
6206 		} else if (ops) {
6207 			enable_preemption();
6208 			os_atomic_dec(&zone->z_elems_avail, relaxed);
6209 			ops->zc_op_free(zid, zstack_pop(&stack));
6210 		} else {
6211 			vm_offset_t addr = (vm_offset_t)zstack_pop(&stack);
6212 
6213 			if (zero) {
6214 				bzero((void *)addr, esize);
6215 			}
6216 			addr = __zcache_mark_invalid(zone, addr,
6217 			    ZFREE_PACK_SIZE(esize, esize));
6218 			zfree_item(zone, addr);
6219 		}
6220 
6221 		if (stack.z_count == 0) {
6222 			break;
6223 		}
6224 
6225 		disable_preemption();
6226 		cpu = cpu_number();
6227 	}
6228 }
6229 
6230 void
6231 (zcache_free)(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6232 {
6233 	__builtin_assume(ops != NULL);
6234 	zcache_free_1_ext(zid, addr, ops);
6235 }
6236 
6237 void
6238 (zcache_free_n)(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops)
6239 {
6240 	__builtin_assume(ops != NULL);
6241 	zcache_free_n_ext(zid, stack, ops, false);
6242 }
6243 
6244 void
6245 (zfree_n)(zone_id_t zid, zstack_t stack)
6246 {
6247 	zcache_free_n_ext(zid, stack, NULL, true);
6248 }
6249 
6250 void
6251 (zfree_nozero)(zone_id_t zid, void *addr)
6252 {
6253 	zcache_free_1_ext(zid, addr, NULL);
6254 }
6255 
6256 void
6257 (zfree_nozero_n)(zone_id_t zid, zstack_t stack)
6258 {
6259 	zcache_free_n_ext(zid, stack, NULL, false);
6260 }
6261 
6262 void
6263 (zfree)(zone_t zov, void *addr)
6264 {
6265 	zone_t zone = zov->z_self;
6266 	zone_stats_t zstats = zov->z_stats;
6267 	vm_offset_t esize = zone_elem_inner_size(zone);
6268 
6269 	assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6270 	assert(!zone->z_percpu && !zone->z_permanent && !zone->z_smr);
6271 	vm_memtag_bzero_fast_checked(addr, esize);
6272 
6273 	zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6274 }
6275 
6276 __attribute__((noinline))
6277 void
zfree_percpu(union zone_or_view zov,void * addr)6278 zfree_percpu(union zone_or_view zov, void *addr)
6279 {
6280 	zone_t zone = zov.zov_view->zv_zone;
6281 	zone_stats_t zstats = zov.zov_view->zv_stats;
6282 	vm_offset_t esize = zone_elem_inner_size(zone);
6283 
6284 	assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6285 	assert(zone->z_percpu);
6286 	zpercpu_foreach_cpu(i) {
6287 		vm_memtag_bzero_fast_checked((char *)addr + ptoa(i), esize);
6288 	}
6289 	zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6290 }
6291 
6292 void
6293 (zfree_id)(zone_id_t zid, void *addr)
6294 {
6295 	(zfree)(&zone_array[zid], addr);
6296 }
6297 
6298 void
6299 (zfree_ro)(zone_id_t zid, void *addr)
6300 {
6301 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6302 	zone_t zone = zone_by_id(zid);
6303 	zone_stats_t zstats = zone->z_stats;
6304 	vm_offset_t esize = zone_ro_size_params[zid].z_elem_size;
6305 
6306 #if ZSECURITY_CONFIG(READ_ONLY)
6307 	assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6308 	pmap_ro_zone_bzero(zid, (vm_offset_t)addr, 0, esize);
6309 #else
6310 	(void)zid;
6311 	bzero(addr, esize);
6312 #endif /* !KASAN_CLASSIC */
6313 	zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6314 }
6315 
6316 __attribute__((noinline))
6317 static void
zfree_item_smr(zone_t zone,vm_offset_t addr)6318 zfree_item_smr(zone_t zone, vm_offset_t addr)
6319 {
6320 	zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, 0);
6321 	vm_size_t esize = zone_elem_inner_size(zone);
6322 
6323 	/*
6324 	 * This should be taken extremely rarely:
6325 	 * this happens if we failed allocating an empty bucket.
6326 	 */
6327 	smr_synchronize(zone_cache_smr(cache));
6328 
6329 	cache->zc_free((void *)addr, esize);
6330 	addr = __zcache_mark_invalid(zone, addr, ZFREE_PACK_SIZE(esize, esize));
6331 
6332 	zfree_item(zone, addr);
6333 }
6334 
6335 void
6336 (zfree_smr)(zone_t zone, void *addr)
6337 {
6338 	vm_offset_t elem = (vm_offset_t)addr;
6339 	vm_offset_t esize;
6340 	zone_cache_t cache;
6341 	int cpu;
6342 
6343 	ZFREE_LOG(zone, elem, 1);
6344 
6345 	disable_preemption();
6346 	cpu   = cpu_number();
6347 #if MACH_ASSERT
6348 	cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6349 	assert(!smr_entered_cpu_noblock(cache->zc_smr, cpu));
6350 #endif
6351 	esize = zone_elem_inner_size(zone);
6352 	zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6353 	cache = zfree_cached_get_pcpu_cache_smr(zone, cpu);
6354 	if (__probable(cache)) {
6355 		cache->zc_free_elems[cache->zc_free_cur++] = elem;
6356 		enable_preemption();
6357 	} else {
6358 		zfree_item_smr(zone, elem);
6359 	}
6360 }
6361 
6362 void
6363 (zfree_id_smr)(zone_id_t zid, void *addr)
6364 {
6365 	(zfree_smr)(&zone_array[zid], addr);
6366 }
6367 
6368 void
kfree_type_impl_internal(kalloc_type_view_t kt_view,void * ptr __unsafe_indexable)6369 kfree_type_impl_internal(
6370 	kalloc_type_view_t  kt_view,
6371 	void               *ptr __unsafe_indexable)
6372 {
6373 	zone_t zsig = kt_view->kt_zsig;
6374 	zone_t z = kt_view->kt_zv.zv_zone;
6375 	struct zone_page_metadata *meta;
6376 	zone_id_t zidx_meta;
6377 	zone_security_flags_t zsflags_meta;
6378 	zone_security_flags_t zsflags_z = zone_security_config(z);
6379 	zone_security_flags_t zsflags_zsig;
6380 
6381 	if (NULL == ptr) {
6382 		return;
6383 	}
6384 
6385 	meta = zone_meta_from_addr((vm_offset_t) ptr);
6386 	zidx_meta = meta->zm_index;
6387 	zsflags_meta = zone_security_array[zidx_meta];
6388 
6389 	if (zone_is_data_kheap(zsflags_z.z_kheap_id) ||
6390 	    zone_has_index(z, zidx_meta)) {
6391 		return (zfree)(&kt_view->kt_zv, ptr);
6392 	}
6393 	zsflags_zsig = zone_security_config(zsig);
6394 	if (zsflags_meta.z_sig_eq == zsflags_zsig.z_sig_eq) {
6395 		z = zone_array + zidx_meta;
6396 		return (zfree)(z, ptr);
6397 	}
6398 
6399 	return (zfree)(kt_view->kt_zearly, ptr);
6400 }
6401 
6402 /*! @} */
6403 #endif /* !ZALLOC_TEST */
6404 #pragma mark zalloc
6405 #if !ZALLOC_TEST
6406 
6407 /*!
6408  * @defgroup zalloc
6409  * @{
6410  *
6411  * @brief
6412  * The codepath for zone allocations.
6413  *
6414  * @discussion
6415  * There are 4 major ways to allocate memory that end up in the zone allocator:
6416  * - @c zalloc(), @c zalloc_flags(), ...
6417  * - @c zalloc_percpu()
6418  * - @c kalloc*()
6419  * - @c zalloc_permanent()
6420  *
6421  * While permanent zones have their own allocation scheme, all other codepaths
6422  * will eventually go through the @c zalloc_ext() choking point.
6423  *
6424  * @c zalloc_return() is the final function everyone tail calls into,
6425  * which prepares the element for consumption by the caller and deals with
6426  * common treatment (zone logging, tags, kasan, validation, ...).
6427  */
6428 
6429 /*!
6430  * @function zalloc_import
6431  *
6432  * @brief
6433  * Import @c n elements in the specified array, opposite of @c zfree_drop().
6434  *
6435  * @param zone          The zone to import elements from
6436  * @param elems         The array to import into
6437  * @param n             The number of elements to import. Must be non zero,
6438  *                      and smaller than @c zone->z_elems_free.
6439  */
6440 __header_always_inline vm_size_t
zalloc_import(zone_t zone,vm_offset_t * elems,zalloc_flags_t flags,uint32_t n)6441 zalloc_import(
6442 	zone_t                  zone,
6443 	vm_offset_t            *elems,
6444 	zalloc_flags_t          flags,
6445 	uint32_t                n)
6446 {
6447 	vm_offset_t esize = zone_elem_outer_size(zone);
6448 	vm_offset_t offs  = zone_elem_inner_offs(zone);
6449 	zone_stats_t zs;
6450 	int cpu = cpu_number();
6451 	uint32_t i = 0;
6452 
6453 	zs = zpercpu_get_cpu(zone->z_stats, cpu);
6454 
6455 	if (__improbable(zone_caching_disabled < 0)) {
6456 		/*
6457 		 * In the first 10s after boot, mess with
6458 		 * the scan position in order to make early
6459 		 * allocations patterns less predictable.
6460 		 */
6461 		zone_early_scramble_rr(zone, cpu, zs);
6462 	}
6463 
6464 	do {
6465 		vm_offset_t page, eidx, size = 0;
6466 		struct zone_page_metadata *meta;
6467 
6468 		if (!zone_pva_is_null(zone->z_pageq_partial)) {
6469 			meta = zone_pva_to_meta(zone->z_pageq_partial);
6470 			page = zone_pva_to_addr(zone->z_pageq_partial);
6471 		} else if (!zone_pva_is_null(zone->z_pageq_empty)) {
6472 			meta = zone_pva_to_meta(zone->z_pageq_empty);
6473 			page = zone_pva_to_addr(zone->z_pageq_empty);
6474 			zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
6475 		} else {
6476 			zone_accounting_panic(zone, "z_elems_free corruption");
6477 		}
6478 
6479 		zone_meta_validate(zone, meta, page);
6480 
6481 		vm_offset_t old_size = meta->zm_alloc_size;
6482 		vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
6483 
6484 		do {
6485 			eidx = zone_meta_find_and_clear_bit(zone, zs, meta, flags);
6486 			elems[i++] = page + offs + eidx * esize;
6487 			size += esize;
6488 		} while (i < n && old_size + size + esize <= max_size);
6489 
6490 		vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
6491 
6492 		if (new_size + esize > max_size) {
6493 			zone_meta_requeue(zone, &zone->z_pageq_full, meta);
6494 		} else if (old_size == 0) {
6495 			/* remove from free, move to intermediate */
6496 			zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
6497 		}
6498 	} while (i < n);
6499 
6500 	n = zone_counter_sub(zone, z_elems_free, n);
6501 	if (zone->z_pcpu_cache == NULL && zone->z_elems_free_min > n) {
6502 		zone->z_elems_free_min = n;
6503 	}
6504 
6505 	return zone_elem_inner_size(zone);
6506 }
6507 
6508 __attribute__((always_inline))
6509 static inline vm_offset_t
__zcache_mark_valid(zone_t zone,vm_offset_t addr,zalloc_flags_t flags)6510 __zcache_mark_valid(zone_t zone, vm_offset_t addr, zalloc_flags_t flags)
6511 {
6512 #pragma unused(zone, flags)
6513 #if KASAN_CLASSIC || CONFIG_PROB_GZALLOC || VM_TAG_SIZECLASSES
6514 	vm_offset_t esize = zone_elem_inner_size(zone);
6515 #endif
6516 
6517 	addr = vm_memtag_load_tag(addr);
6518 
6519 #if VM_TAG_SIZECLASSES
6520 	if (__improbable(zone->z_uses_tags)) {
6521 		struct zone_page_metadata *meta;
6522 		vm_offset_t offs;
6523 		vm_tag_t *slot;
6524 		vm_tag_t tag;
6525 
6526 		tag  = zalloc_flags_get_tag(flags);
6527 		meta = zone_meta_from_addr(addr);
6528 		offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
6529 		if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
6530 			offs += ptoa(meta->zm_page_index);
6531 		}
6532 
6533 		slot = zba_extra_ref_ptr(meta->zm_bitmap,
6534 		    Z_FAST_QUO(offs, zone->z_quo_magic));
6535 		*slot = tag;
6536 
6537 		vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
6538 		    (long)esize);
6539 	}
6540 #endif /* VM_TAG_SIZECLASSES */
6541 
6542 #if CONFIG_PROB_GZALLOC
6543 	if (zone->z_pgz_tracked && pgz_sample(addr, esize)) {
6544 		addr = pgz_protect(zone, addr, __builtin_frame_address(0));
6545 	}
6546 #endif
6547 
6548 #if KASAN_CLASSIC
6549 	/*
6550 	 * KASAN_CLASSIC integration of kalloc heaps are handled by kalloc_ext()
6551 	 */
6552 	if ((flags & Z_SKIP_KASAN) == 0) {
6553 		kasan_alloc(addr, esize, esize, zone_elem_redzone(zone),
6554 		    (flags & Z_PCPU), __builtin_frame_address(0));
6555 	}
6556 #endif /* KASAN_CLASSIC */
6557 
6558 	return addr;
6559 }
6560 
6561 __attribute__((always_inline))
6562 void *
zcache_mark_valid(zone_t zone,void * addr)6563 zcache_mark_valid(zone_t zone, void *addr)
6564 {
6565 	addr = (void *)__zcache_mark_valid(zone, (vm_offset_t)addr, 0);
6566 	ZALLOC_LOG(zone, (vm_offset_t)addr, 1);
6567 	return addr;
6568 }
6569 
6570 /*!
6571  * @function zalloc_return
6572  *
6573  * @brief
6574  * Performs the tail-end of the work required on allocations before the caller
6575  * uses them.
6576  *
6577  * @discussion
6578  * This function is called without any zone lock held,
6579  * and preemption back to the state it had when @c zalloc_ext() was called.
6580  *
6581  * @param zone          The zone we're allocating from.
6582  * @param addr          The element we just allocated.
6583  * @param flags         The flags passed to @c zalloc_ext() (for Z_ZERO).
6584  * @param elem_size     The element size for this zone.
6585  */
6586 __attribute__((always_inline))
6587 static struct kalloc_result
zalloc_return(zone_t zone,vm_offset_t addr,zalloc_flags_t flags,vm_offset_t elem_size)6588 zalloc_return(
6589 	zone_t                  zone,
6590 	vm_offset_t             addr,
6591 	zalloc_flags_t          flags,
6592 	vm_offset_t             elem_size)
6593 {
6594 	addr = __zcache_mark_valid(zone, addr, flags);
6595 #if ZALLOC_ENABLE_ZERO_CHECK
6596 	zalloc_validate_element(zone, addr, elem_size, flags);
6597 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
6598 	ZALLOC_LOG(zone, addr, 1);
6599 
6600 	DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6601 	return (struct kalloc_result){ (void *)addr, elem_size };
6602 }
6603 
6604 static vm_size_t
zalloc_get_shared_threshold(zone_t zone,vm_size_t esize)6605 zalloc_get_shared_threshold(zone_t zone, vm_size_t esize)
6606 {
6607 	if (esize <= 512) {
6608 		return zone_early_thres_mul * page_size / 4;
6609 	} else if (esize < 2048) {
6610 		return zone_early_thres_mul * esize * 8;
6611 	}
6612 	return zone_early_thres_mul * zone->z_chunk_elems * esize;
6613 }
6614 
6615 __attribute__((noinline))
6616 static struct kalloc_result
zalloc_item(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6617 zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6618 {
6619 	vm_offset_t esize, addr;
6620 	zone_stats_t zs;
6621 
6622 	zone_lock_nopreempt_check_contention(zone);
6623 
6624 	zs = zpercpu_get(zstats);
6625 	if (__improbable(zone->z_elems_free <= zone->z_elems_rsv / 2)) {
6626 		if ((flags & Z_NOWAIT) || zone->z_elems_free) {
6627 			zone_expand_async_schedule_if_allowed(zone);
6628 		} else {
6629 			zone_expand_locked(zone, flags);
6630 		}
6631 		if (__improbable(zone->z_elems_free == 0)) {
6632 			zs->zs_alloc_fail++;
6633 			zone_unlock(zone);
6634 			if (__improbable(flags & Z_NOFAIL)) {
6635 				zone_nofail_panic(zone);
6636 			}
6637 			DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
6638 			return (struct kalloc_result){ };
6639 		}
6640 	}
6641 
6642 	esize = zalloc_import(zone, &addr, flags, 1);
6643 	zs->zs_mem_allocated += esize;
6644 
6645 	if (__improbable(!zone_share_always &&
6646 	    !os_atomic_load(&zs->zs_alloc_not_early, relaxed))) {
6647 		if (flags & Z_SET_NOTEARLY) {
6648 			vm_size_t shared_threshold = zalloc_get_shared_threshold(zone, esize);
6649 
6650 			if (zs->zs_mem_allocated >= shared_threshold) {
6651 				zpercpu_foreach(zs_cpu, zstats) {
6652 					os_atomic_store(&zs_cpu->zs_alloc_not_early, 1, relaxed);
6653 				}
6654 			}
6655 		}
6656 	}
6657 	zone_unlock(zone);
6658 
6659 	return zalloc_return(zone, addr, flags, esize);
6660 }
6661 
6662 static void
zalloc_cached_import(zone_t zone,zalloc_flags_t flags,zone_cache_t cache)6663 zalloc_cached_import(
6664 	zone_t                  zone,
6665 	zalloc_flags_t          flags,
6666 	zone_cache_t            cache)
6667 {
6668 	uint16_t n_elems = zc_mag_size();
6669 
6670 	zone_lock_nopreempt(zone);
6671 
6672 	if (__probable(!zone_caching_disabled &&
6673 	    zone->z_elems_free > zone->z_elems_rsv / 2)) {
6674 		if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
6675 			zone_expand_async_schedule_if_allowed(zone);
6676 		}
6677 		if (zone->z_elems_free < n_elems) {
6678 			n_elems = (uint16_t)zone->z_elems_free;
6679 		}
6680 		zalloc_import(zone, cache->zc_alloc_elems, flags, n_elems);
6681 		cache->zc_alloc_cur = n_elems;
6682 	}
6683 
6684 	zone_unlock_nopreempt(zone);
6685 }
6686 
6687 static void
zalloc_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache,smr_t smr)6688 zalloc_cached_depot_recirculate(
6689 	zone_t                  zone,
6690 	uint32_t                depot_max,
6691 	zone_cache_t            cache,
6692 	smr_t                   smr)
6693 {
6694 	smr_seq_t seq;
6695 	uint32_t n;
6696 
6697 	zone_recirc_lock_nopreempt_check_contention(zone);
6698 
6699 	n = cache->zc_depot.zd_empty;
6700 	if (n >= depot_max) {
6701 		zone_depot_move_empty(&zone->z_recirc, &cache->zc_depot,
6702 		    n - depot_max / 2, NULL);
6703 	}
6704 
6705 	n = cache->zc_depot.zd_full;
6706 	if (smr && n) {
6707 		/*
6708 		 * if SMR is in use, it means smr_poll() failed,
6709 		 * so rotate the entire chunk of magazines in order
6710 		 * to let the sequence numbers age.
6711 		 */
6712 		seq = zone_depot_move_full(&zone->z_recirc, &cache->zc_depot,
6713 		    n, NULL);
6714 		smr_deferred_advance_commit(smr, seq);
6715 	}
6716 
6717 	n = depot_max - cache->zc_depot.zd_empty;
6718 	if (n > zone->z_recirc.zd_full) {
6719 		n = zone->z_recirc.zd_full;
6720 	}
6721 
6722 	if (n && zone_depot_poll(&zone->z_recirc, smr)) {
6723 		zone_depot_move_full(&cache->zc_depot, &zone->z_recirc,
6724 		    n, zone);
6725 	}
6726 
6727 	zone_recirc_unlock_nopreempt(zone);
6728 }
6729 
6730 static void
zalloc_cached_reuse_smr(zone_t z,zone_cache_t cache,zone_magazine_t mag)6731 zalloc_cached_reuse_smr(zone_t z, zone_cache_t cache, zone_magazine_t mag)
6732 {
6733 	zone_smr_free_cb_t zc_free = cache->zc_free;
6734 	vm_size_t esize = zone_elem_inner_size(z);
6735 
6736 	for (uint16_t i = 0; i < zc_mag_size(); i++) {
6737 		vm_offset_t elem = mag->zm_elems[i];
6738 
6739 		zc_free((void *)elem, zone_elem_inner_size(z));
6740 		elem = __zcache_mark_invalid(z, elem,
6741 		    ZFREE_PACK_SIZE(esize, esize));
6742 		mag->zm_elems[i] = elem;
6743 	}
6744 }
6745 
6746 static void
zalloc_cached_recirculate(zone_t zone,zone_cache_t cache)6747 zalloc_cached_recirculate(
6748 	zone_t                  zone,
6749 	zone_cache_t            cache)
6750 {
6751 	zone_magazine_t mag = NULL;
6752 
6753 	zone_recirc_lock_nopreempt_check_contention(zone);
6754 
6755 	if (zone_depot_poll(&zone->z_recirc, zone_cache_smr(cache))) {
6756 		mag = zone_depot_pop_head_full(&zone->z_recirc, zone);
6757 		if (zone_cache_smr(cache)) {
6758 			zalloc_cached_reuse_smr(zone, cache, mag);
6759 		}
6760 		mag = zone_magazine_replace(cache, mag, false);
6761 		zone_depot_insert_head_empty(&zone->z_recirc, mag);
6762 	}
6763 
6764 	zone_recirc_unlock_nopreempt(zone);
6765 }
6766 
6767 __attribute__((noinline))
6768 static zone_cache_t
zalloc_cached_prime(zone_t zone,zone_cache_ops_t ops,zalloc_flags_t flags,zone_cache_t cache)6769 zalloc_cached_prime(
6770 	zone_t                  zone,
6771 	zone_cache_ops_t        ops,
6772 	zalloc_flags_t          flags,
6773 	zone_cache_t            cache)
6774 {
6775 	zone_magazine_t mag = NULL;
6776 	uint32_t depot_max;
6777 	smr_t smr;
6778 
6779 	depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
6780 	if (depot_max) {
6781 		smr = zone_cache_smr(cache);
6782 
6783 		zone_depot_lock_nopreempt(cache);
6784 
6785 		if (!zone_depot_poll(&cache->zc_depot, smr)) {
6786 			zalloc_cached_depot_recirculate(zone, depot_max, cache,
6787 			    smr);
6788 		}
6789 
6790 		if (__probable(cache->zc_depot.zd_full)) {
6791 			mag = zone_depot_pop_head_full(&cache->zc_depot, NULL);
6792 			if (zone_cache_smr(cache)) {
6793 				zalloc_cached_reuse_smr(zone, cache, mag);
6794 			}
6795 			mag = zone_magazine_replace(cache, mag, false);
6796 			zone_depot_insert_head_empty(&cache->zc_depot, mag);
6797 		}
6798 
6799 		zone_depot_unlock_nopreempt(cache);
6800 	} else if (zone->z_recirc.zd_full) {
6801 		zalloc_cached_recirculate(zone, cache);
6802 	}
6803 
6804 	if (__probable(cache->zc_alloc_cur)) {
6805 		return cache;
6806 	}
6807 
6808 	if (ops == NULL) {
6809 		zalloc_cached_import(zone, flags, cache);
6810 		if (__probable(cache->zc_alloc_cur)) {
6811 			return cache;
6812 		}
6813 	}
6814 
6815 	return NULL;
6816 }
6817 
6818 __attribute__((always_inline))
6819 static inline zone_cache_t
zalloc_cached_get_pcpu_cache(zone_t zone,zone_cache_ops_t ops,int cpu,zalloc_flags_t flags)6820 zalloc_cached_get_pcpu_cache(
6821 	zone_t                  zone,
6822 	zone_cache_ops_t        ops,
6823 	int                     cpu,
6824 	zalloc_flags_t          flags)
6825 {
6826 	zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6827 
6828 	if (__probable(cache->zc_alloc_cur != 0)) {
6829 		return cache;
6830 	}
6831 
6832 	if (__probable(cache->zc_free_cur != 0 && !cache->zc_smr)) {
6833 		zone_cache_swap_magazines(cache);
6834 		return cache;
6835 	}
6836 
6837 	return zalloc_cached_prime(zone, ops, flags, cache);
6838 }
6839 
6840 
6841 /*!
6842  * @function zalloc_ext
6843  *
6844  * @brief
6845  * The core implementation of @c zalloc(), @c zalloc_flags(), @c zalloc_percpu().
6846  */
6847 struct kalloc_result
zalloc_ext(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6848 zalloc_ext(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6849 {
6850 	/*
6851 	 * KASan uses zalloc() for fakestack, which can be called anywhere.
6852 	 * However, we make sure these calls can never block.
6853 	 */
6854 	assertf(startup_phase < STARTUP_SUB_EARLY_BOOT ||
6855 #if KASAN_FAKESTACK
6856 	    zone->z_kasan_fakestacks ||
6857 #endif /* KASAN_FAKESTACK */
6858 	    ml_get_interrupts_enabled() ||
6859 	    ml_is_quiescing() ||
6860 	    debug_mode_active(),
6861 	    "Calling {k,z}alloc from interrupt disabled context isn't allowed");
6862 
6863 	/*
6864 	 * Make sure Z_NOFAIL was not obviously misused
6865 	 */
6866 	if (flags & Z_NOFAIL) {
6867 		assert((flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
6868 	}
6869 
6870 #if VM_TAG_SIZECLASSES
6871 	if (__improbable(zone->z_uses_tags)) {
6872 		vm_tag_t tag = zalloc_flags_get_tag(flags);
6873 
6874 		if (flags & Z_VM_TAG_BT_BIT) {
6875 			tag = vm_tag_bt() ?: tag;
6876 		}
6877 		if (tag != VM_KERN_MEMORY_NONE) {
6878 			tag = vm_tag_will_update_zone(tag,
6879 			    flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT));
6880 		}
6881 		if (tag == VM_KERN_MEMORY_NONE) {
6882 			zone_security_flags_t zsflags = zone_security_config(zone);
6883 
6884 			if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
6885 				tag = VM_KERN_MEMORY_KALLOC_DATA;
6886 			} else if (zsflags.z_kheap_id == KHEAP_ID_DATA_SHARED) {
6887 				tag = VM_KERN_MEMORY_KALLOC_SHARED;
6888 			} else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR ||
6889 			    zsflags.z_kalloc_type) {
6890 				tag = VM_KERN_MEMORY_KALLOC_TYPE;
6891 			} else {
6892 				tag = VM_KERN_MEMORY_KALLOC;
6893 			}
6894 		}
6895 		flags = Z_VM_TAG(flags & ~Z_VM_TAG_MASK, tag);
6896 	}
6897 #endif /* VM_TAG_SIZECLASSES */
6898 
6899 	disable_preemption();
6900 
6901 #if ZALLOC_ENABLE_ZERO_CHECK
6902 	if (zalloc_skip_zero_check()) {
6903 		flags |= Z_NOZZC;
6904 	}
6905 #endif
6906 
6907 	if (zone->z_pcpu_cache) {
6908 		zone_cache_t cache;
6909 		vm_offset_t index, addr, esize;
6910 		int cpu = cpu_number();
6911 
6912 		cache = zalloc_cached_get_pcpu_cache(zone, NULL, cpu, flags);
6913 		if (__probable(cache)) {
6914 			esize = zone_elem_inner_size(zone);
6915 			zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += esize;
6916 			index = --cache->zc_alloc_cur;
6917 			addr  = cache->zc_alloc_elems[index];
6918 			cache->zc_alloc_elems[index] = 0;
6919 			enable_preemption();
6920 			return zalloc_return(zone, addr, flags, esize);
6921 		}
6922 	}
6923 
6924 	__attribute__((musttail))
6925 	return zalloc_item(zone, zstats, flags);
6926 }
6927 
6928 __attribute__((always_inline))
6929 static inline zstack_t
zcache_alloc_stack_from_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,uint32_t n,zone_cache_ops_t ops)6930 zcache_alloc_stack_from_cpu(
6931 	zone_id_t               zid,
6932 	zone_cache_t            cache,
6933 	zstack_t                stack,
6934 	uint32_t                n,
6935 	zone_cache_ops_t        ops)
6936 {
6937 	vm_offset_t *p;
6938 
6939 	n = MIN(n, cache->zc_alloc_cur);
6940 	p = cache->zc_alloc_elems + cache->zc_alloc_cur;
6941 	cache->zc_alloc_cur -= n;
6942 	stack.z_count += n;
6943 
6944 	do {
6945 		vm_offset_t e = *--p;
6946 
6947 		*p = 0;
6948 		if (ops) {
6949 			e = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)e);
6950 		} else {
6951 			e = __zcache_mark_valid(zone_by_id(zid), e, 0);
6952 		}
6953 		zstack_push_no_delta(&stack, (void *)e);
6954 	} while (--n > 0);
6955 
6956 	return stack;
6957 }
6958 
6959 __attribute__((noinline))
6960 static zstack_t
zcache_alloc_fail(zone_id_t zid,zstack_t stack,uint32_t count)6961 zcache_alloc_fail(zone_id_t zid, zstack_t stack, uint32_t count)
6962 {
6963 	zone_t zone = zone_by_id(zid);
6964 	zone_stats_t zstats = zone->z_stats;
6965 	int cpu;
6966 
6967 	count -= stack.z_count;
6968 
6969 	disable_preemption();
6970 	cpu = cpu_number();
6971 	zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated -=
6972 	    count * zone_elem_inner_size(zone);
6973 	zpercpu_get_cpu(zstats, cpu)->zs_alloc_fail += 1;
6974 	enable_preemption();
6975 
6976 	return stack;
6977 }
6978 
6979 #define ZCACHE_ALLOC_RETRY  ((void *)-1)
6980 
6981 __attribute__((noinline))
6982 static void *
zcache_alloc_one(zone_id_t zid,zalloc_flags_t flags,zone_cache_ops_t ops)6983 zcache_alloc_one(
6984 	zone_id_t               zid,
6985 	zalloc_flags_t          flags,
6986 	zone_cache_ops_t        ops)
6987 {
6988 	zone_t zone = zone_by_id(zid);
6989 	void *o;
6990 
6991 	/*
6992 	 * First try to allocate in rudimentary zones without ever going into
6993 	 * __ZONE_EXHAUSTED_AND_WAITING_HARD__() by clearing Z_NOFAIL.
6994 	 */
6995 	enable_preemption();
6996 	o = ops->zc_op_alloc(zid, flags & ~Z_NOFAIL);
6997 	if (__probable(o)) {
6998 		os_atomic_inc(&zone->z_elems_avail, relaxed);
6999 	} else if (__probable(flags & Z_NOFAIL)) {
7000 		zone_cache_t cache;
7001 		vm_offset_t index;
7002 		int cpu;
7003 
7004 		zone_lock(zone);
7005 
7006 		cpu   = cpu_number();
7007 		cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
7008 		o     = ZCACHE_ALLOC_RETRY;
7009 		if (__probable(cache)) {
7010 			index = --cache->zc_alloc_cur;
7011 			o     = (void *)cache->zc_alloc_elems[index];
7012 			cache->zc_alloc_elems[index] = 0;
7013 			o = ops->zc_op_mark_valid(zid, o);
7014 		} else if (zone->z_elems_free == 0) {
7015 			__ZONE_EXHAUSTED_AND_WAITING_HARD__(zone);
7016 		}
7017 
7018 		zone_unlock(zone);
7019 	}
7020 
7021 	return o;
7022 }
7023 
7024 __attribute__((always_inline))
7025 static zstack_t
zcache_alloc_n_ext(zone_id_t zid,uint32_t count,zalloc_flags_t flags,zone_cache_ops_t ops)7026 zcache_alloc_n_ext(
7027 	zone_id_t               zid,
7028 	uint32_t                count,
7029 	zalloc_flags_t          flags,
7030 	zone_cache_ops_t        ops)
7031 {
7032 	zstack_t stack = { };
7033 	zone_cache_t cache;
7034 	zone_t zone;
7035 	int cpu;
7036 
7037 	disable_preemption();
7038 	cpu  = cpu_number();
7039 	zone = zone_by_id(zid);
7040 	zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_allocated +=
7041 	    count * zone_elem_inner_size(zone);
7042 
7043 	for (;;) {
7044 		cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
7045 		if (__probable(cache)) {
7046 			stack = zcache_alloc_stack_from_cpu(zid, cache, stack,
7047 			    count - stack.z_count, ops);
7048 			enable_preemption();
7049 		} else {
7050 			void *o;
7051 
7052 			if (ops) {
7053 				o = zcache_alloc_one(zid, flags, ops);
7054 			} else {
7055 				o = zalloc_item(zone, zone->z_stats, flags).addr;
7056 			}
7057 			if (__improbable(o == NULL)) {
7058 				return zcache_alloc_fail(zid, stack, count);
7059 			}
7060 			if (ops == NULL || o != ZCACHE_ALLOC_RETRY) {
7061 				zstack_push(&stack, o);
7062 			}
7063 		}
7064 
7065 		if (stack.z_count == count) {
7066 			break;
7067 		}
7068 
7069 		disable_preemption();
7070 		cpu = cpu_number();
7071 	}
7072 
7073 	ZALLOC_LOG(zone, stack.z_head, stack.z_count);
7074 
7075 	return stack;
7076 }
7077 
7078 zstack_t
zalloc_n(zone_id_t zid,uint32_t count,zalloc_flags_t flags)7079 zalloc_n(zone_id_t zid, uint32_t count, zalloc_flags_t flags)
7080 {
7081 	return zcache_alloc_n_ext(zid, count, flags, NULL);
7082 }
7083 
zstack_t(zcache_alloc_n)7084 zstack_t
7085 (zcache_alloc_n)(
7086 	zone_id_t               zid,
7087 	uint32_t                count,
7088 	zalloc_flags_t          flags,
7089 	zone_cache_ops_t        ops)
7090 {
7091 	__builtin_assume(ops != NULL);
7092 	return zcache_alloc_n_ext(zid, count, flags, ops);
7093 }
7094 
7095 __attribute__((always_inline))
7096 void *
zalloc(zone_t zov)7097 zalloc(zone_t zov)
7098 {
7099 	return zalloc_flags(zov, Z_WAITOK);
7100 }
7101 
7102 __attribute__((always_inline))
7103 void *
zalloc_noblock(zone_t zov)7104 zalloc_noblock(zone_t zov)
7105 {
7106 	return zalloc_flags(zov, Z_NOWAIT);
7107 }
7108 
7109 void *
7110 (zalloc_flags)(zone_t zov, zalloc_flags_t flags)
7111 {
7112 	zone_t zone = zov->z_self;
7113 	zone_stats_t zstats = zov->z_stats;
7114 
7115 	assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7116 	assert(!zone->z_percpu && !zone->z_permanent);
7117 	return zalloc_ext(zone, zstats, flags).addr;
7118 }
7119 
7120 __attribute__((always_inline))
7121 void *
7122 (zalloc_id)(zone_id_t zid, zalloc_flags_t flags)
7123 {
7124 	return (zalloc_flags)(zone_by_id(zid), flags);
7125 }
7126 
7127 void *
7128 (zalloc_ro)(zone_id_t zid, zalloc_flags_t flags)
7129 {
7130 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7131 	zone_t zone = zone_by_id(zid);
7132 	zone_stats_t zstats = zone->z_stats;
7133 	struct kalloc_result kr;
7134 
7135 	kr = zalloc_ext(zone, zstats, flags);
7136 #if ZSECURITY_CONFIG(READ_ONLY)
7137 	assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
7138 	if (kr.addr) {
7139 		zone_require_ro(zid, kr.size, kr.addr);
7140 	}
7141 #endif
7142 	return kr.addr;
7143 }
7144 
7145 #if ZSECURITY_CONFIG(READ_ONLY)
7146 
7147 __attribute__((always_inline))
7148 static bool
from_current_stack(vm_offset_t addr,vm_size_t size)7149 from_current_stack(vm_offset_t addr, vm_size_t size)
7150 {
7151 	vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
7152 	vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
7153 
7154 	addr = vm_memtag_canonicalize_kernel(addr);
7155 
7156 	return (addr >= start) && (addr + size < end);
7157 }
7158 
7159 /*
7160  * Check if an address is from const memory i.e TEXT or DATA CONST segements
7161  * or the SECURITY_READ_ONLY_LATE section.
7162  */
7163 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
7164 __attribute__((always_inline))
7165 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)7166 from_const_memory(const vm_offset_t addr, vm_size_t size)
7167 {
7168 	return rorgn_contains(addr, size, true);
7169 }
7170 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
7171 __attribute__((always_inline))
7172 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)7173 from_const_memory(const vm_offset_t addr, vm_size_t size)
7174 {
7175 #pragma unused(addr, size)
7176 	return true;
7177 }
7178 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
7179 
7180 __abortlike
7181 static void
zalloc_ro_mut_validation_panic(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)7182 zalloc_ro_mut_validation_panic(zone_id_t zid, void *elem,
7183     const vm_offset_t src, vm_size_t src_size)
7184 {
7185 	vm_offset_t stack_start = (vm_offset_t)__builtin_frame_address(0);
7186 	vm_offset_t stack_end = (stack_start + kernel_stack_size - 1) & -kernel_stack_size;
7187 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
7188 	extern vm_offset_t rorgn_begin;
7189 	extern vm_offset_t rorgn_end;
7190 #else
7191 	vm_offset_t const rorgn_begin = 0;
7192 	vm_offset_t const rorgn_end = 0;
7193 #endif
7194 
7195 	if (from_ro_map(src, src_size)) {
7196 		zone_t src_zone = &zone_array[zone_index_from_ptr((void *)src)];
7197 		zone_t dst_zone = &zone_array[zid];
7198 		panic("zalloc_ro_mut failed: source (%p) not from same zone as dst (%p)"
7199 		    " (expected: %s, actual: %s", (void *)src, elem, src_zone->z_name,
7200 		    dst_zone->z_name);
7201 	}
7202 
7203 	panic("zalloc_ro_mut failed: source (%p, phys %p) not from RO zone map (%p - %p), "
7204 	    "current stack (%p - %p) or const memory (phys %p - %p)",
7205 	    (void *)src, (void*)kvtophys(src),
7206 	    (void *)zone_info.zi_ro_range.min_address,
7207 	    (void *)zone_info.zi_ro_range.max_address,
7208 	    (void *)stack_start, (void *)stack_end,
7209 	    (void *)rorgn_begin, (void *)rorgn_end);
7210 }
7211 
7212 __attribute__((always_inline))
7213 static void
zalloc_ro_mut_validate_src(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)7214 zalloc_ro_mut_validate_src(zone_id_t zid, void *elem,
7215     const vm_offset_t src, vm_size_t src_size)
7216 {
7217 	if (from_current_stack(src, src_size) ||
7218 	    (from_ro_map(src, src_size) &&
7219 	    zid == zone_index_from_ptr((void *)src)) ||
7220 	    from_const_memory(src, src_size)) {
7221 		return;
7222 	}
7223 	zalloc_ro_mut_validation_panic(zid, elem, src, src_size);
7224 }
7225 
7226 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7227 
7228 __attribute__((noinline))
7229 void
zalloc_ro_mut(zone_id_t zid,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)7230 zalloc_ro_mut(zone_id_t zid, void *elem, vm_offset_t offset,
7231     const void *new_data, vm_size_t new_data_size)
7232 {
7233 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7234 
7235 #if ZSECURITY_CONFIG(READ_ONLY)
7236 	bool skip_src_check = false;
7237 
7238 	/*
7239 	 * The OSEntitlements RO-zone is a little differently treated. For more
7240 	 * information: rdar://100518485.
7241 	 */
7242 	if (zid == ZONE_ID_AMFI_OSENTITLEMENTS) {
7243 		code_signing_config_t cs_config = 0;
7244 
7245 		code_signing_configuration(NULL, &cs_config);
7246 		if (cs_config & CS_CONFIG_CSM_ENABLED) {
7247 			skip_src_check = true;
7248 		}
7249 	}
7250 
7251 	if (skip_src_check == false) {
7252 		zalloc_ro_mut_validate_src(zid, elem, (vm_offset_t)new_data,
7253 		    new_data_size);
7254 	}
7255 	pmap_ro_zone_memcpy(zid, (vm_offset_t) elem, offset,
7256 	    (vm_offset_t) new_data, new_data_size);
7257 #else
7258 	(void)zid;
7259 	memcpy((void *)((uintptr_t)elem + offset), new_data, new_data_size);
7260 #endif
7261 }
7262 
7263 __attribute__((noinline))
7264 uint64_t
zalloc_ro_mut_atomic(zone_id_t zid,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)7265 zalloc_ro_mut_atomic(zone_id_t zid, void *elem, vm_offset_t offset,
7266     zro_atomic_op_t op, uint64_t value)
7267 {
7268 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7269 
7270 #if ZSECURITY_CONFIG(READ_ONLY)
7271 	value = pmap_ro_zone_atomic_op(zid, (vm_offset_t)elem, offset, op, value);
7272 #else
7273 	(void)zid;
7274 	value = __zalloc_ro_mut_atomic((vm_offset_t)elem + offset, op, value);
7275 #endif
7276 	return value;
7277 }
7278 
7279 void
zalloc_ro_clear(zone_id_t zid,void * elem,vm_offset_t offset,vm_size_t size)7280 zalloc_ro_clear(zone_id_t zid, void *elem, vm_offset_t offset, vm_size_t size)
7281 {
7282 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
7283 #if ZSECURITY_CONFIG(READ_ONLY)
7284 	pmap_ro_zone_bzero(zid, (vm_offset_t)elem, offset, size);
7285 #else
7286 	(void)zid;
7287 	bzero((void *)((uintptr_t)elem + offset), size);
7288 #endif
7289 }
7290 
7291 /*
7292  * This function will run in the PPL and needs to be robust
7293  * against an attacker with arbitrary kernel write.
7294  */
7295 
7296 #if ZSECURITY_CONFIG(READ_ONLY)
7297 
7298 __abortlike
7299 static void
zone_id_require_ro_panic(zone_id_t zid,void * addr)7300 zone_id_require_ro_panic(zone_id_t zid, void *addr)
7301 {
7302 	struct zone_size_params p = zone_ro_size_params[zid];
7303 	vm_offset_t elem = (vm_offset_t)addr;
7304 	uint32_t zindex;
7305 	zone_t other;
7306 	zone_t zone = &zone_array[zid];
7307 
7308 	if (!from_ro_map(addr, 1)) {
7309 		panic("zone_require_ro failed: address not in a ro zone (addr: %p)", addr);
7310 	}
7311 
7312 	if (!Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic)) {
7313 		panic("zone_require_ro failed: element improperly aligned (addr: %p)", addr);
7314 	}
7315 
7316 	zindex = zone_index_from_ptr(addr);
7317 	other = &zone_array[zindex];
7318 	if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
7319 		panic("zone_require_ro failed: invalid zone index %d "
7320 		    "(addr: %p, expected: %s%s)", zindex,
7321 		    addr, zone_heap_name(zone), zone->z_name);
7322 	} else {
7323 		panic("zone_require_ro failed: address in unexpected zone id %d (%s%s) "
7324 		    "(addr: %p, expected: %s%s)",
7325 		    zindex, zone_heap_name(other), other->z_name,
7326 		    addr, zone_heap_name(zone), zone->z_name);
7327 	}
7328 }
7329 
7330 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7331 
7332 __attribute__((always_inline))
7333 void
zone_require_ro(zone_id_t zid,vm_size_t elem_size __unused,void * addr)7334 zone_require_ro(zone_id_t zid, vm_size_t elem_size __unused, void *addr)
7335 {
7336 #if ZSECURITY_CONFIG(READ_ONLY)
7337 	struct zone_size_params p = zone_ro_size_params[zid];
7338 	vm_offset_t elem = (vm_offset_t)addr;
7339 
7340 	if (!from_ro_map(addr, 1) ||
7341 	    !Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic) ||
7342 	    zid != zone_meta_from_addr(elem)->zm_index) {
7343 		zone_id_require_ro_panic(zid, addr);
7344 	}
7345 #else
7346 #pragma unused(zid, addr)
7347 #endif
7348 }
7349 
7350 void *
7351 (zalloc_percpu)(union zone_or_view zov, zalloc_flags_t flags)
7352 {
7353 	zone_t zone = zov.zov_view->zv_zone;
7354 	zone_stats_t zstats = zov.zov_view->zv_stats;
7355 
7356 	assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7357 	assert(zone->z_percpu);
7358 	flags |= Z_PCPU;
7359 	return zalloc_ext(zone, zstats, flags).addr;
7360 }
7361 
7362 static void *
_zalloc_permanent(zone_t zone,vm_size_t size,vm_offset_t mask)7363 _zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask)
7364 {
7365 	struct zone_page_metadata *page_meta;
7366 	vm_offset_t offs, addr;
7367 	zone_pva_t pva;
7368 
7369 	assert(ml_get_interrupts_enabled() ||
7370 	    ml_is_quiescing() ||
7371 	    debug_mode_active() ||
7372 	    startup_phase < STARTUP_SUB_EARLY_BOOT);
7373 
7374 	size = (size + mask) & ~mask;
7375 	assert(size <= PAGE_SIZE);
7376 
7377 	zone_lock(zone);
7378 	assert(zone->z_self == zone);
7379 
7380 	for (;;) {
7381 		pva = zone->z_pageq_partial;
7382 		while (!zone_pva_is_null(pva)) {
7383 			page_meta = zone_pva_to_meta(pva);
7384 			if (page_meta->zm_bump + size <= PAGE_SIZE) {
7385 				goto found;
7386 			}
7387 			pva = page_meta->zm_page_next;
7388 		}
7389 
7390 		zone_expand_locked(zone, Z_WAITOK);
7391 	}
7392 
7393 found:
7394 	offs = (uint16_t)((page_meta->zm_bump + mask) & ~mask);
7395 	page_meta->zm_bump = (uint16_t)(offs + size);
7396 	page_meta->zm_alloc_size += size;
7397 	zone->z_elems_free -= size;
7398 	zpercpu_get(zone->z_stats)->zs_mem_allocated += size;
7399 
7400 	if (page_meta->zm_alloc_size >= PAGE_SIZE - sizeof(vm_offset_t)) {
7401 		zone_meta_requeue(zone, &zone->z_pageq_full, page_meta);
7402 	}
7403 
7404 	zone_unlock(zone);
7405 
7406 	if (zone->z_tbi_tag) {
7407 		addr = vm_memtag_load_tag(offs + zone_pva_to_addr(pva));
7408 	} else {
7409 		addr = offs + zone_pva_to_addr(pva);
7410 	}
7411 
7412 	DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
7413 	return (void *)addr;
7414 }
7415 
7416 static void *
_zalloc_permanent_large(size_t size,vm_offset_t mask,vm_tag_t tag)7417 _zalloc_permanent_large(size_t size, vm_offset_t mask, vm_tag_t tag)
7418 {
7419 	vm_offset_t addr;
7420 
7421 	kernel_memory_allocate(kernel_map, &addr, size, mask,
7422 	    KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, tag);
7423 
7424 	return (void *)addr;
7425 }
7426 
7427 void *
zalloc_permanent_tag(vm_size_t size,vm_offset_t mask,vm_tag_t tag)7428 zalloc_permanent_tag(vm_size_t size, vm_offset_t mask, vm_tag_t tag)
7429 {
7430 	if (size <= PAGE_SIZE) {
7431 		zone_t zone = &zone_array[ZONE_ID_PERMANENT];
7432 		return _zalloc_permanent(zone, size, mask);
7433 	}
7434 	return _zalloc_permanent_large(size, mask, tag);
7435 }
7436 
7437 void *
zalloc_percpu_permanent(vm_size_t size,vm_offset_t mask)7438 zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask)
7439 {
7440 	zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT];
7441 	return _zalloc_permanent(zone, size, mask);
7442 }
7443 
7444 /*! @} */
7445 #endif /* !ZALLOC_TEST */
7446 #pragma mark zone GC / trimming
7447 #if !ZALLOC_TEST
7448 
7449 static thread_call_data_t zone_trim_callout;
7450 EVENT_DEFINE(ZONE_EXHAUSTED);
7451 
7452 static void
zone_reclaim_chunk(zone_t z,struct zone_page_metadata * meta,uint32_t free_count)7453 zone_reclaim_chunk(
7454 	zone_t                  z,
7455 	struct zone_page_metadata *meta,
7456 	uint32_t                free_count)
7457 {
7458 	vm_address_t page_addr;
7459 	vm_size_t    size_to_free;
7460 	uint32_t     bitmap_ref;
7461 	uint32_t     page_count;
7462 	zone_security_flags_t zsflags = zone_security_config(z);
7463 	bool         sequester = !z->z_destroyed;
7464 	bool         oob_guard = false;
7465 
7466 	if (zone_submap_is_sequestered(zsflags)) {
7467 		/*
7468 		 * If the entire map is sequestered, we can't return the VA.
7469 		 * It stays pinned to the zone forever.
7470 		 */
7471 		sequester = true;
7472 	}
7473 
7474 	zone_meta_queue_pop(z, &z->z_pageq_empty);
7475 
7476 	page_addr  = zone_meta_to_addr(meta);
7477 	page_count = meta->zm_chunk_len;
7478 	oob_guard  = meta->zm_guarded;
7479 
7480 	if (meta->zm_alloc_size) {
7481 		zone_metadata_corruption(z, meta, "alloc_size");
7482 	}
7483 	if (z->z_percpu) {
7484 		if (page_count != 1) {
7485 			zone_metadata_corruption(z, meta, "page_count");
7486 		}
7487 		size_to_free = ptoa(z->z_chunk_pages);
7488 		zone_remove_wired_pages(z, z->z_chunk_pages);
7489 	} else {
7490 		if (page_count > z->z_chunk_pages) {
7491 			zone_metadata_corruption(z, meta, "page_count");
7492 		}
7493 		if (page_count < z->z_chunk_pages) {
7494 			/* Dequeue non populated VA from z_pageq_va */
7495 			zone_meta_remqueue(z, meta + page_count);
7496 		}
7497 		size_to_free = ptoa(page_count);
7498 		zone_remove_wired_pages(z, page_count);
7499 	}
7500 
7501 	zone_counter_sub(z, z_elems_free, free_count);
7502 	zone_counter_sub(z, z_elems_avail, free_count);
7503 	zone_counter_sub(z, z_wired_empty, page_count);
7504 	zone_counter_sub(z, z_wired_cur, page_count);
7505 
7506 	if (z->z_pcpu_cache == NULL) {
7507 		if (z->z_elems_free_min < free_count) {
7508 			z->z_elems_free_min = 0;
7509 		} else {
7510 			z->z_elems_free_min -= free_count;
7511 		}
7512 	}
7513 	if (z->z_elems_free_wma < free_count) {
7514 		z->z_elems_free_wma = 0;
7515 	} else {
7516 		z->z_elems_free_wma -= free_count;
7517 	}
7518 
7519 	bitmap_ref = 0;
7520 	if (sequester) {
7521 		if (meta->zm_inline_bitmap) {
7522 			for (int i = 0; i < meta->zm_chunk_len; i++) {
7523 				meta[i].zm_bitmap = 0;
7524 			}
7525 		} else {
7526 			bitmap_ref = meta->zm_bitmap;
7527 			meta->zm_bitmap = 0;
7528 		}
7529 		meta->zm_chunk_len = 0;
7530 	} else {
7531 		if (!meta->zm_inline_bitmap) {
7532 			bitmap_ref = meta->zm_bitmap;
7533 		}
7534 		zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
7535 		bzero(meta, sizeof(*meta) * (z->z_chunk_pages + oob_guard));
7536 	}
7537 
7538 #if CONFIG_ZLEAKS
7539 	if (__improbable(zleak_should_disable_for_zone(z) &&
7540 	    startup_phase >= STARTUP_SUB_THREAD_CALL)) {
7541 		thread_call_enter(&zone_leaks_callout);
7542 	}
7543 #endif /* CONFIG_ZLEAKS */
7544 
7545 	zone_unlock(z);
7546 
7547 	if (bitmap_ref) {
7548 		zone_bits_free(bitmap_ref);
7549 	}
7550 
7551 	/* Free the pages for metadata and account for them */
7552 #if KASAN_CLASSIC
7553 	if (z->z_percpu) {
7554 		for (uint32_t i = 0; i < z->z_chunk_pages; i++) {
7555 			kasan_zmem_remove(page_addr + ptoa(i), PAGE_SIZE,
7556 			    zone_elem_outer_size(z),
7557 			    zone_elem_outer_offs(z),
7558 			    zone_elem_redzone(z));
7559 		}
7560 	} else {
7561 		kasan_zmem_remove(page_addr, size_to_free,
7562 		    zone_elem_outer_size(z),
7563 		    zone_elem_outer_offs(z),
7564 		    zone_elem_redzone(z));
7565 	}
7566 #endif /* KASAN_CLASSIC */
7567 
7568 	if (sequester) {
7569 		kma_flags_t flags = zone_kma_flags(z, zsflags, 0) | KMA_KOBJECT;
7570 		kernel_memory_depopulate(page_addr, size_to_free,
7571 		    flags, VM_KERN_MEMORY_ZONE);
7572 	} else {
7573 		assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_VM);
7574 		kmem_free(zone_submap(zsflags), page_addr,
7575 		    ptoa(z->z_chunk_pages + oob_guard));
7576 		if (oob_guard) {
7577 			os_atomic_dec(&zone_guard_pages, relaxed);
7578 		}
7579 	}
7580 
7581 	thread_yield_to_preemption();
7582 
7583 	zone_lock(z);
7584 
7585 	if (sequester) {
7586 		zone_meta_queue_push(z, &z->z_pageq_va, meta);
7587 	}
7588 }
7589 
7590 static void
zone_reclaim_elements(zone_t z,uint16_t n,vm_offset_t * elems)7591 zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems)
7592 {
7593 	z_debug_assert(n <= zc_mag_size());
7594 
7595 	for (uint16_t i = 0; i < n; i++) {
7596 		vm_offset_t addr = elems[i];
7597 		elems[i] = 0;
7598 		zfree_drop(z, addr);
7599 	}
7600 
7601 	z->z_elems_free += n;
7602 }
7603 
7604 static void
zcache_reclaim_elements(zone_id_t zid,uint16_t n,vm_offset_t * elems)7605 zcache_reclaim_elements(zone_id_t zid, uint16_t n, vm_offset_t *elems)
7606 {
7607 	z_debug_assert(n <= zc_mag_size());
7608 	zone_cache_ops_t ops = zcache_ops[zid];
7609 
7610 	for (uint16_t i = 0; i < n; i++) {
7611 		vm_offset_t addr = elems[i];
7612 		elems[i] = 0;
7613 		addr = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)addr);
7614 		ops->zc_op_free(zid, (void *)addr);
7615 	}
7616 
7617 	os_atomic_sub(&zone_by_id(zid)->z_elems_avail, n, relaxed);
7618 }
7619 
7620 static void
zone_depot_trim(zone_t z,uint32_t target,struct zone_depot * zd)7621 zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd)
7622 {
7623 	zpercpu_foreach(zc, z->z_pcpu_cache) {
7624 		zone_depot_lock(zc);
7625 
7626 		if (zc->zc_depot.zd_full > (target + 1) / 2) {
7627 			uint32_t n = zc->zc_depot.zd_full - (target + 1) / 2;
7628 			zone_depot_move_full(zd, &zc->zc_depot, n, NULL);
7629 		}
7630 
7631 		if (zc->zc_depot.zd_empty > target / 2) {
7632 			uint32_t n = zc->zc_depot.zd_empty - target / 2;
7633 			zone_depot_move_empty(zd, &zc->zc_depot, n, NULL);
7634 		}
7635 
7636 		zone_depot_unlock(zc);
7637 	}
7638 }
7639 
7640 __enum_decl(zone_reclaim_mode_t, uint32_t, {
7641 	ZONE_RECLAIM_TRIM,
7642 	ZONE_RECLAIM_DRAIN,
7643 	ZONE_RECLAIM_DESTROY,
7644 });
7645 
7646 static void
zone_reclaim_pcpu(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)7647 zone_reclaim_pcpu(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
7648 {
7649 	uint32_t depot_max = 0;
7650 	bool cleanup = mode != ZONE_RECLAIM_TRIM;
7651 
7652 	if (z->z_depot_cleanup) {
7653 		z->z_depot_cleanup = false;
7654 		depot_max = z->z_depot_size;
7655 		cleanup = true;
7656 	}
7657 
7658 	if (cleanup) {
7659 		zone_depot_trim(z, depot_max, zd);
7660 	}
7661 
7662 	if (mode == ZONE_RECLAIM_DESTROY) {
7663 		zpercpu_foreach(zc, z->z_pcpu_cache) {
7664 			zone_reclaim_elements(z, zc->zc_alloc_cur,
7665 			    zc->zc_alloc_elems);
7666 			zone_reclaim_elements(z, zc->zc_free_cur,
7667 			    zc->zc_free_elems);
7668 			zc->zc_alloc_cur = zc->zc_free_cur = 0;
7669 		}
7670 
7671 		z->z_recirc_empty_min = 0;
7672 		z->z_recirc_empty_wma = 0;
7673 		z->z_recirc_full_min = 0;
7674 		z->z_recirc_full_wma = 0;
7675 		z->z_recirc_cont_cur = 0;
7676 		z->z_recirc_cont_wma = 0;
7677 	}
7678 }
7679 
7680 static void
zone_reclaim_recirc_drain(zone_t z,struct zone_depot * zd)7681 zone_reclaim_recirc_drain(zone_t z, struct zone_depot *zd)
7682 {
7683 	assert(zd->zd_empty == 0);
7684 	assert(zd->zd_full == 0);
7685 
7686 	zone_recirc_lock_nopreempt(z);
7687 
7688 	*zd = z->z_recirc;
7689 	if (zd->zd_full == 0) {
7690 		zd->zd_tail = &zd->zd_head;
7691 	}
7692 	zone_depot_init(&z->z_recirc);
7693 	z->z_recirc_empty_min = 0;
7694 	z->z_recirc_empty_wma = 0;
7695 	z->z_recirc_full_min = 0;
7696 	z->z_recirc_full_wma = 0;
7697 
7698 	zone_recirc_unlock_nopreempt(z);
7699 }
7700 
7701 static void
zone_reclaim_recirc_trim(zone_t z,struct zone_depot * zd)7702 zone_reclaim_recirc_trim(zone_t z, struct zone_depot *zd)
7703 {
7704 	for (;;) {
7705 		uint32_t budget = zc_free_batch_size();
7706 		uint32_t count;
7707 		bool done = true;
7708 
7709 		zone_recirc_lock_nopreempt(z);
7710 		count = MIN(z->z_recirc_empty_wma / Z_WMA_UNIT,
7711 		    z->z_recirc_empty_min);
7712 		assert(count <= z->z_recirc.zd_empty);
7713 
7714 		if (count > budget) {
7715 			count = budget;
7716 			done  = false;
7717 		}
7718 		if (count) {
7719 			budget -= count;
7720 			zone_depot_move_empty(zd, &z->z_recirc, count, NULL);
7721 			z->z_recirc_empty_min -= count;
7722 			z->z_recirc_empty_wma -= count * Z_WMA_UNIT;
7723 		}
7724 
7725 		count = MIN(z->z_recirc_full_wma / Z_WMA_UNIT,
7726 		    z->z_recirc_full_min);
7727 		assert(count <= z->z_recirc.zd_full);
7728 
7729 		if (count > budget) {
7730 			count = budget;
7731 			done  = false;
7732 		}
7733 		if (count) {
7734 			zone_depot_move_full(zd, &z->z_recirc, count, NULL);
7735 			z->z_recirc_full_min -= count;
7736 			z->z_recirc_full_wma -= count * Z_WMA_UNIT;
7737 		}
7738 
7739 		zone_recirc_unlock_nopreempt(z);
7740 
7741 		if (done) {
7742 			return;
7743 		}
7744 
7745 		/*
7746 		 * If the number of magazines to reclaim is too large,
7747 		 * we might be keeping preemption disabled for too long.
7748 		 *
7749 		 * Drop and retake the lock to allow for preemption to occur.
7750 		 */
7751 		zone_unlock(z);
7752 		zone_lock(z);
7753 	}
7754 }
7755 
7756 /*!
7757  * @function zone_reclaim
7758  *
7759  * @brief
7760  * Drains or trim the zone.
7761  *
7762  * @discussion
7763  * Draining the zone will free it from all its elements.
7764  *
7765  * Trimming the zone tries to respect the working set size, and avoids draining
7766  * the depot when it's not necessary.
7767  *
7768  * @param z             The zone to reclaim from
7769  * @param mode          The purpose of this reclaim.
7770  */
7771 static void
zone_reclaim(zone_t z,zone_reclaim_mode_t mode)7772 zone_reclaim(zone_t z, zone_reclaim_mode_t mode)
7773 {
7774 	struct zone_depot zd;
7775 
7776 	zone_depot_init(&zd);
7777 
7778 	zone_lock(z);
7779 
7780 	if (mode == ZONE_RECLAIM_DESTROY) {
7781 		if (!z->z_destructible || z->z_elems_rsv) {
7782 			panic("zdestroy: Zone %s%s isn't destructible",
7783 			    zone_heap_name(z), z->z_name);
7784 		}
7785 
7786 		if (!z->z_self || z->z_expander ||
7787 		    z->z_async_refilling || z->z_expanding_wait) {
7788 			panic("zdestroy: Zone %s%s in an invalid state for destruction",
7789 			    zone_heap_name(z), z->z_name);
7790 		}
7791 
7792 #if !KASAN_CLASSIC
7793 		/*
7794 		 * Unset the valid bit. We'll hit an assert failure on further
7795 		 * operations on this zone, until zinit() is called again.
7796 		 *
7797 		 * Leave the zone valid for KASan as we will see zfree's on
7798 		 * quarantined free elements even after the zone is destroyed.
7799 		 */
7800 		z->z_self = NULL;
7801 #endif
7802 		z->z_destroyed = true;
7803 	} else if (z->z_destroyed) {
7804 		return zone_unlock(z);
7805 	} else if (zone_count_free(z) <= z->z_elems_rsv) {
7806 		/* If the zone is under its reserve level, leave it alone. */
7807 		return zone_unlock(z);
7808 	}
7809 
7810 	if (z->z_pcpu_cache) {
7811 		zone_magazine_t mag;
7812 		uint32_t freed = 0;
7813 
7814 		/*
7815 		 * This is all done with the zone lock held on purpose.
7816 		 * The work here is O(ncpu), which should still be short.
7817 		 *
7818 		 * We need to keep the lock held until we have reclaimed
7819 		 * at least a few magazines, otherwise if the zone has no
7820 		 * free elements outside of the depot, a thread performing
7821 		 * a concurrent allocatiuon could try to grow the zone
7822 		 * while we're trying to drain it.
7823 		 */
7824 		if (mode == ZONE_RECLAIM_TRIM) {
7825 			zone_reclaim_recirc_trim(z, &zd);
7826 		} else {
7827 			zone_reclaim_recirc_drain(z, &zd);
7828 		}
7829 		zone_reclaim_pcpu(z, mode, &zd);
7830 
7831 		if (z->z_chunk_elems) {
7832 			zone_cache_t cache = zpercpu_get_cpu(z->z_pcpu_cache, 0);
7833 			smr_t smr = zone_cache_smr(cache);
7834 
7835 			while (zd.zd_full) {
7836 				mag = zone_depot_pop_head_full(&zd, NULL);
7837 				if (smr) {
7838 					smr_wait(smr, mag->zm_seq);
7839 					zalloc_cached_reuse_smr(z, cache, mag);
7840 					freed += zc_mag_size();
7841 				}
7842 				zone_reclaim_elements(z, zc_mag_size(),
7843 				    mag->zm_elems);
7844 				zone_depot_insert_head_empty(&zd, mag);
7845 
7846 				freed += zc_mag_size();
7847 				if (freed >= zc_free_batch_size()) {
7848 					zone_unlock(z);
7849 					zone_magazine_free_list(&zd);
7850 					thread_yield_to_preemption();
7851 					zone_lock(z);
7852 					freed = 0;
7853 				}
7854 			}
7855 		} else {
7856 			zone_id_t zid = zone_index(z);
7857 
7858 			zone_unlock(z);
7859 
7860 			assert(zid <= ZONE_ID__FIRST_DYNAMIC && zcache_ops[zid]);
7861 
7862 			while (zd.zd_full) {
7863 				mag = zone_depot_pop_head_full(&zd, NULL);
7864 				zcache_reclaim_elements(zid, zc_mag_size(),
7865 				    mag->zm_elems);
7866 				zone_magazine_free(mag);
7867 			}
7868 
7869 			goto cleanup;
7870 		}
7871 	}
7872 
7873 	while (!zone_pva_is_null(z->z_pageq_empty)) {
7874 		struct zone_page_metadata *meta;
7875 		uint32_t count, limit = z->z_elems_rsv * 5 / 4;
7876 
7877 		if (mode == ZONE_RECLAIM_TRIM && z->z_pcpu_cache == NULL) {
7878 			limit = MAX(limit, z->z_elems_free -
7879 			    MIN(z->z_elems_free_min, z->z_elems_free_wma / Z_WMA_UNIT));
7880 		}
7881 
7882 		meta  = zone_pva_to_meta(z->z_pageq_empty);
7883 		count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_outer_size(z);
7884 
7885 		if (zone_count_free(z) - count < limit) {
7886 			break;
7887 		}
7888 
7889 		zone_reclaim_chunk(z, meta, count);
7890 	}
7891 
7892 	zone_unlock(z);
7893 
7894 cleanup:
7895 	zone_magazine_free_list(&zd);
7896 }
7897 
7898 void
zone_drain(zone_t zone)7899 zone_drain(zone_t zone)
7900 {
7901 	current_thread()->options |= TH_OPT_ZONE_PRIV;
7902 	lck_mtx_lock(&zone_gc_lock);
7903 	zone_reclaim(zone, ZONE_RECLAIM_DRAIN);
7904 	lck_mtx_unlock(&zone_gc_lock);
7905 	current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7906 }
7907 
7908 void
zcache_drain(zone_id_t zid)7909 zcache_drain(zone_id_t zid)
7910 {
7911 	zone_drain(zone_by_id(zid));
7912 }
7913 
7914 static void
zone_reclaim_all(zone_reclaim_mode_t mode)7915 zone_reclaim_all(zone_reclaim_mode_t mode)
7916 {
7917 	/*
7918 	 * Start with zcaches, so that they flow into the regular zones.
7919 	 *
7920 	 * Then the zones with VA sequester since depopulating
7921 	 * pages will not need to allocate vm map entries for holes,
7922 	 * which will give memory back to the system faster.
7923 	 */
7924 	for (zone_id_t zid = ZONE_ID__LAST_RO + 1; zid < ZONE_ID__FIRST_DYNAMIC; zid++) {
7925 		zone_t z = zone_by_id(zid);
7926 
7927 		if (z->z_self && z->z_chunk_elems == 0) {
7928 			zone_reclaim(z, mode);
7929 		}
7930 	}
7931 	zone_index_foreach(zid) {
7932 		zone_t z = zone_by_id(zid);
7933 
7934 		if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7935 			continue;
7936 		}
7937 		if (zone_submap_is_sequestered(zone_security_array[zid]) &&
7938 		    z->collectable) {
7939 			zone_reclaim(z, mode);
7940 		}
7941 	}
7942 
7943 	zone_index_foreach(zid) {
7944 		zone_t z = zone_by_id(zid);
7945 
7946 		if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7947 			continue;
7948 		}
7949 		if (!zone_submap_is_sequestered(zone_security_array[zid]) &&
7950 		    z->collectable) {
7951 			zone_reclaim(z, mode);
7952 		}
7953 	}
7954 
7955 	zone_reclaim(zc_magazine_zone, mode);
7956 }
7957 
7958 void
zone_userspace_reboot_checks(void)7959 zone_userspace_reboot_checks(void)
7960 {
7961 	vm_size_t label_zone_size = zone_size_allocated(ipc_service_port_label_zone);
7962 	if (label_zone_size != 0) {
7963 		panic("Zone %s should be empty upon userspace reboot. Actual size: %lu.",
7964 		    ipc_service_port_label_zone->z_name, (unsigned long)label_zone_size);
7965 	}
7966 }
7967 
7968 void
zone_gc(zone_gc_level_t level)7969 zone_gc(zone_gc_level_t level)
7970 {
7971 	zone_reclaim_mode_t mode;
7972 	zone_t largest_zone = NULL;
7973 
7974 	switch (level) {
7975 	case ZONE_GC_TRIM:
7976 		mode = ZONE_RECLAIM_TRIM;
7977 		break;
7978 	case ZONE_GC_DRAIN:
7979 		mode = ZONE_RECLAIM_DRAIN;
7980 		break;
7981 	case ZONE_GC_JETSAM:
7982 		largest_zone = kill_process_in_largest_zone();
7983 		mode = ZONE_RECLAIM_TRIM;
7984 		break;
7985 	}
7986 
7987 	current_thread()->options |= TH_OPT_ZONE_PRIV;
7988 	lck_mtx_lock(&zone_gc_lock);
7989 
7990 	zone_reclaim_all(mode);
7991 
7992 	if (level == ZONE_GC_JETSAM && zone_map_nearing_exhaustion()) {
7993 		/*
7994 		 * If we possibly killed a process, but we're still critical,
7995 		 * we need to drain harder.
7996 		 */
7997 		zone_reclaim(largest_zone, ZONE_RECLAIM_DRAIN);
7998 		zone_reclaim_all(ZONE_RECLAIM_DRAIN);
7999 	}
8000 
8001 	lck_mtx_unlock(&zone_gc_lock);
8002 	current_thread()->options &= ~TH_OPT_ZONE_PRIV;
8003 }
8004 
8005 void
zone_gc_trim(void)8006 zone_gc_trim(void)
8007 {
8008 	zone_gc(ZONE_GC_TRIM);
8009 }
8010 
8011 void
zone_gc_drain(void)8012 zone_gc_drain(void)
8013 {
8014 	zone_gc(ZONE_GC_DRAIN);
8015 }
8016 
8017 static bool
zone_trim_needed(zone_t z)8018 zone_trim_needed(zone_t z)
8019 {
8020 	if (z->z_depot_cleanup) {
8021 		return true;
8022 	}
8023 
8024 	if (z->z_async_refilling) {
8025 		/* Don't fight with refill */
8026 		return false;
8027 	}
8028 
8029 	if (z->z_pcpu_cache) {
8030 		uint32_t e_n, f_n;
8031 
8032 		e_n = MIN(z->z_recirc_empty_wma, z->z_recirc_empty_min * Z_WMA_UNIT);
8033 		f_n = MIN(z->z_recirc_full_wma, z->z_recirc_full_min * Z_WMA_UNIT);
8034 
8035 		if (e_n > zc_autotrim_buckets() * Z_WMA_UNIT) {
8036 			return true;
8037 		}
8038 
8039 		if (f_n * zc_mag_size() > z->z_elems_rsv * Z_WMA_UNIT &&
8040 		    f_n * zc_mag_size() * zone_elem_inner_size(z) >
8041 		    zc_autotrim_size() * Z_WMA_UNIT) {
8042 			return true;
8043 		}
8044 
8045 		return false;
8046 	}
8047 
8048 	if (!zone_pva_is_null(z->z_pageq_empty)) {
8049 		uint32_t n;
8050 
8051 		n = MIN(z->z_elems_free_wma / Z_WMA_UNIT, z->z_elems_free_min);
8052 
8053 		return n >= z->z_elems_rsv + z->z_chunk_elems;
8054 	}
8055 
8056 	return false;
8057 }
8058 
8059 static void
zone_trim_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)8060 zone_trim_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
8061 {
8062 	current_thread()->options |= TH_OPT_ZONE_PRIV;
8063 
8064 	zone_foreach(z) {
8065 		if (!z->collectable || z == zc_magazine_zone) {
8066 			continue;
8067 		}
8068 
8069 		if (zone_trim_needed(z)) {
8070 			lck_mtx_lock(&zone_gc_lock);
8071 			zone_reclaim(z, ZONE_RECLAIM_TRIM);
8072 			lck_mtx_unlock(&zone_gc_lock);
8073 		}
8074 	}
8075 
8076 	if (zone_trim_needed(zc_magazine_zone)) {
8077 		lck_mtx_lock(&zone_gc_lock);
8078 		zone_reclaim(zc_magazine_zone, ZONE_RECLAIM_TRIM);
8079 		lck_mtx_unlock(&zone_gc_lock);
8080 	}
8081 
8082 	current_thread()->options &= ~TH_OPT_ZONE_PRIV;
8083 }
8084 
8085 void
compute_zone_working_set_size(__unused void * param)8086 compute_zone_working_set_size(__unused void *param)
8087 {
8088 	uint32_t zc_auto = zc_enable_level();
8089 	bool needs_trim = false;
8090 
8091 	/*
8092 	 * Keep zone caching disabled until the first proc is made.
8093 	 */
8094 	if (__improbable(zone_caching_disabled < 0)) {
8095 		return;
8096 	}
8097 
8098 	zone_caching_disabled = vm_pool_low();
8099 
8100 	if (os_mul_overflow(zc_auto, Z_WMA_UNIT, &zc_auto)) {
8101 		zc_auto = 0;
8102 	}
8103 
8104 	zone_foreach(z) {
8105 		uint32_t old, wma, cur;
8106 		bool needs_caching = false;
8107 
8108 		if (z->z_self != z) {
8109 			continue;
8110 		}
8111 
8112 		zone_lock(z);
8113 
8114 		zone_recirc_lock_nopreempt(z);
8115 
8116 		if (z->z_pcpu_cache) {
8117 			wma = Z_WMA_MIX(z->z_recirc_empty_wma, z->z_recirc_empty_min);
8118 			z->z_recirc_empty_min = z->z_recirc.zd_empty;
8119 			z->z_recirc_empty_wma = wma;
8120 		} else {
8121 			wma = Z_WMA_MIX(z->z_elems_free_wma, z->z_elems_free_min);
8122 			z->z_elems_free_min = z->z_elems_free;
8123 			z->z_elems_free_wma = wma;
8124 		}
8125 
8126 		wma = Z_WMA_MIX(z->z_recirc_full_wma, z->z_recirc_full_min);
8127 		z->z_recirc_full_min = z->z_recirc.zd_full;
8128 		z->z_recirc_full_wma = wma;
8129 
8130 		/* fixed point decimal of contentions per second */
8131 		old = z->z_recirc_cont_wma;
8132 		cur = z->z_recirc_cont_cur * Z_WMA_UNIT /
8133 		    (zpercpu_count() * ZONE_WSS_UPDATE_PERIOD);
8134 		cur = (3 * old + cur) / 4;
8135 		zone_recirc_unlock_nopreempt(z);
8136 
8137 		if (z->z_pcpu_cache) {
8138 			uint16_t size = z->z_depot_size;
8139 
8140 			if (zone_exhausted(z)) {
8141 				if (z->z_depot_size) {
8142 					z->z_depot_size = 0;
8143 					z->z_depot_cleanup = true;
8144 				}
8145 			} else if (size < z->z_depot_limit && cur > zc_grow_level()) {
8146 				/*
8147 				 * lose history on purpose now
8148 				 * that we just grew, to give
8149 				 * the sytem time to adjust.
8150 				 */
8151 				cur  = (zc_grow_level() + zc_shrink_level()) / 2;
8152 				size = size ? (3 * size + 2) / 2 : 2;
8153 				z->z_depot_size = MIN(z->z_depot_limit, size);
8154 			} else if (size > 0 && cur <= zc_shrink_level()) {
8155 				/*
8156 				 * lose history on purpose now
8157 				 * that we just shrunk, to give
8158 				 * the sytem time to adjust.
8159 				 */
8160 				cur = (zc_grow_level() + zc_shrink_level()) / 2;
8161 				z->z_depot_size = size - 1;
8162 				z->z_depot_cleanup = true;
8163 			}
8164 		} else if (!z->z_nocaching && !zone_exhaustible(z) && zc_auto &&
8165 		    old >= zc_auto && cur >= zc_auto) {
8166 			needs_caching = true;
8167 		}
8168 
8169 		z->z_recirc_cont_wma = cur;
8170 		z->z_recirc_cont_cur = 0;
8171 
8172 		if (!needs_trim && zone_trim_needed(z)) {
8173 			needs_trim = true;
8174 		}
8175 
8176 		zone_unlock(z);
8177 
8178 		if (needs_caching) {
8179 			zone_enable_caching(z);
8180 		}
8181 	}
8182 
8183 	if (needs_trim) {
8184 		thread_call_enter(&zone_trim_callout);
8185 	}
8186 }
8187 
8188 #endif /* !ZALLOC_TEST */
8189 #pragma mark vm integration, MIG routines
8190 #if !ZALLOC_TEST
8191 
8192 extern unsigned int stack_total;
8193 #if defined (__x86_64__)
8194 extern unsigned int inuse_ptepages_count;
8195 #endif
8196 
8197 static const char *
panic_print_get_typename(kalloc_type_views_t cur,kalloc_type_views_t * next,bool is_kt_var)8198 panic_print_get_typename(kalloc_type_views_t cur, kalloc_type_views_t *next,
8199     bool is_kt_var)
8200 {
8201 	if (is_kt_var) {
8202 		next->ktv_var = (kalloc_type_var_view_t) cur.ktv_var->kt_next;
8203 		return cur.ktv_var->kt_name;
8204 	} else {
8205 		next->ktv_fixed = (kalloc_type_view_t) cur.ktv_fixed->kt_zv.zv_next;
8206 		return cur.ktv_fixed->kt_zv.zv_name;
8207 	}
8208 }
8209 
8210 static void
panic_print_types_in_zone(zone_t z,const char * debug_str)8211 panic_print_types_in_zone(zone_t z, const char* debug_str)
8212 {
8213 	kalloc_type_views_t kt_cur = {};
8214 	const char *prev_type = "";
8215 	size_t skip_over_site = sizeof("site.") - 1;
8216 	zone_security_flags_t zsflags = zone_security_config(z);
8217 	bool is_kt_var = false;
8218 
8219 	if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8220 		uint32_t heap_id = KT_VAR_PTR_HEAP0 + ((zone_index(z) -
8221 		    kalloc_type_heap_array[KT_VAR_PTR_HEAP0].kh_zstart) / KHEAP_NUM_ZONES);
8222 		kt_cur.ktv_var = kalloc_type_heap_array[heap_id].kt_views;
8223 		is_kt_var = true;
8224 	} else {
8225 		kt_cur.ktv_fixed = (kalloc_type_view_t) z->z_views;
8226 	}
8227 
8228 	paniclog_append_noflush("kalloc %s in zone, %s (%s):\n",
8229 	    is_kt_var? "type arrays" : "types", debug_str, z->z_name);
8230 
8231 	while (kt_cur.ktv_fixed) {
8232 		kalloc_type_views_t kt_next = {};
8233 		const char *typename = panic_print_get_typename(kt_cur, &kt_next,
8234 		    is_kt_var) + skip_over_site;
8235 		if (strcmp(typename, prev_type) != 0) {
8236 			paniclog_append_noflush("\t%-50s\n", typename);
8237 			prev_type = typename;
8238 		}
8239 		kt_cur = kt_next;
8240 	}
8241 	paniclog_append_noflush("\n");
8242 }
8243 
8244 static void
panic_display_kalloc_types(void)8245 panic_display_kalloc_types(void)
8246 {
8247 	if (kalloc_type_src_zone) {
8248 		panic_print_types_in_zone(kalloc_type_src_zone, "addr belongs to");
8249 	}
8250 	if (kalloc_type_dst_zone) {
8251 		panic_print_types_in_zone(kalloc_type_dst_zone,
8252 		    "addr is being freed to");
8253 	}
8254 }
8255 
8256 static void
zone_find_n_largest(const uint32_t n,zone_t * largest_zones,uint64_t * zone_size)8257 zone_find_n_largest(const uint32_t n, zone_t *largest_zones,
8258     uint64_t *zone_size)
8259 {
8260 	zone_index_foreach(zid) {
8261 		zone_t z = &zone_array[zid];
8262 		vm_offset_t size = zone_size_wired(z);
8263 
8264 		if (zid == ZONE_ID_VM_PAGES) {
8265 			continue;
8266 		}
8267 		for (uint32_t i = 0; i < n; i++) {
8268 			if (size > zone_size[i]) {
8269 				largest_zones[i] = z;
8270 				zone_size[i] = size;
8271 				break;
8272 			}
8273 		}
8274 	}
8275 }
8276 
8277 #define NUM_LARGEST_ZONES 5
8278 static void
panic_display_largest_zones(void)8279 panic_display_largest_zones(void)
8280 {
8281 	zone_t largest_zones[NUM_LARGEST_ZONES]  = { NULL };
8282 	uint64_t largest_size[NUM_LARGEST_ZONES] = { 0 };
8283 
8284 	zone_find_n_largest(NUM_LARGEST_ZONES, (zone_t *) &largest_zones,
8285 	    (uint64_t *) &largest_size);
8286 
8287 	paniclog_append_noflush("Largest zones:\n%-28s %10s %10s\n",
8288 	    "Zone Name", "Cur Size", "Free Size");
8289 	for (uint32_t i = 0; i < NUM_LARGEST_ZONES; i++) {
8290 		zone_t z = largest_zones[i];
8291 		paniclog_append_noflush("%-8s%-20s %9u%c %9u%c\n",
8292 		    zone_heap_name(z), z->z_name,
8293 		    mach_vm_size_pretty(largest_size[i]),
8294 		    mach_vm_size_unit(largest_size[i]),
8295 		    mach_vm_size_pretty(zone_size_free(z)),
8296 		    mach_vm_size_unit(zone_size_free(z)));
8297 	}
8298 }
8299 
8300 static void
panic_display_zprint(void)8301 panic_display_zprint(void)
8302 {
8303 	panic_display_largest_zones();
8304 	paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
8305 	    (uintptr_t)(kernel_stack_size * stack_total));
8306 #if defined (__x86_64__)
8307 	paniclog_append_noflush("%-20s %10lu\n", "PageTables",
8308 	    (uintptr_t)ptoa(inuse_ptepages_count));
8309 #endif
8310 	paniclog_append_noflush("%-20s %10llu\n", "Kalloc.Large",
8311 	    counter_load(&kalloc_large_total));
8312 
8313 	if (panic_kext_memory_info) {
8314 		mach_memory_info_t *mem_info = panic_kext_memory_info;
8315 
8316 		paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
8317 		for (uint32_t i = 0; i < panic_kext_memory_size / sizeof(mem_info[0]); i++) {
8318 			if ((mem_info[i].flags & VM_KERN_SITE_TYPE) != VM_KERN_SITE_KMOD) {
8319 				continue;
8320 			}
8321 			if (mem_info[i].size > (1024 * 1024)) {
8322 				paniclog_append_noflush("%-5lld %10lld\n",
8323 				    mem_info[i].site, mem_info[i].size);
8324 			}
8325 		}
8326 	}
8327 }
8328 
8329 static void
panic_display_zone_info(void)8330 panic_display_zone_info(void)
8331 {
8332 	paniclog_append_noflush("Zone info:\n");
8333 	paniclog_append_noflush("  Zone map: %p - %p\n",
8334 	    (void *)zone_info.zi_map_range.min_address,
8335 	    (void *)zone_info.zi_map_range.max_address);
8336 #if CONFIG_PROB_GZALLOC
8337 	if (pgz_submap) {
8338 		paniclog_append_noflush("  . PGZ   : %p - %p\n",
8339 		    (void *)pgz_submap->min_offset,
8340 		    (void *)pgz_submap->max_offset);
8341 	}
8342 #endif /* CONFIG_PROB_GZALLOC */
8343 	for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8344 		vm_map_t map = zone_submaps[i];
8345 
8346 		if (map == VM_MAP_NULL) {
8347 			continue;
8348 		}
8349 		paniclog_append_noflush("  . %-6s: %p - %p\n",
8350 		    zone_submaps_names[i],
8351 		    (void *)map->min_offset,
8352 		    (void *)map->max_offset);
8353 	}
8354 	paniclog_append_noflush("  Metadata: %p - %p\n"
8355 	    "  Bitmaps : %p - %p\n"
8356 	    "  Extra   : %p - %p\n"
8357 	    "\n",
8358 	    (void *)zone_info.zi_meta_range.min_address,
8359 	    (void *)zone_info.zi_meta_range.max_address,
8360 	    (void *)zone_info.zi_bits_range.min_address,
8361 	    (void *)zone_info.zi_bits_range.max_address,
8362 	    (void *)zone_info.zi_xtra_range.min_address,
8363 	    (void *)zone_info.zi_xtra_range.max_address);
8364 }
8365 
8366 static void
panic_display_zone_fault(vm_offset_t addr)8367 panic_display_zone_fault(vm_offset_t addr)
8368 {
8369 	struct zone_page_metadata meta = { };
8370 	vm_map_t map = VM_MAP_NULL;
8371 	vm_offset_t oob_offs = 0, size = 0;
8372 	int map_idx = -1;
8373 	zone_t z = NULL;
8374 	const char *kind = "whild deref";
8375 	bool oob = false;
8376 
8377 	/*
8378 	 * First: look if we bumped into guard pages between submaps
8379 	 */
8380 	for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8381 		map = zone_submaps[i];
8382 		if (map == VM_MAP_NULL) {
8383 			continue;
8384 		}
8385 
8386 		if (addr >= map->min_offset && addr < map->max_offset) {
8387 			map_idx = i;
8388 			break;
8389 		}
8390 	}
8391 
8392 	if (map_idx == -1) {
8393 		/* this really shouldn't happen, submaps are back to back */
8394 		return;
8395 	}
8396 
8397 	paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
8398 
8399 	/*
8400 	 * Second: look if there's just no metadata at all
8401 	 */
8402 	if (ml_nofault_copy((vm_offset_t)zone_meta_from_addr(addr),
8403 	    (vm_offset_t)&meta, sizeof(meta)) != sizeof(meta) ||
8404 	    meta.zm_index == 0 || meta.zm_index >= MAX_ZONES ||
8405 	    zone_array[meta.zm_index].z_self == NULL) {
8406 		paniclog_append_noflush("  Zone    : <unknown>\n");
8407 		kind = "wild deref, missing or invalid metadata";
8408 	} else {
8409 		z = &zone_array[meta.zm_index];
8410 		paniclog_append_noflush("  Zone    : %s%s\n",
8411 		    zone_heap_name(z), zone_name(z));
8412 		if (meta.zm_chunk_len == ZM_PGZ_GUARD) {
8413 			kind = "out-of-bounds (high confidence)";
8414 			oob = true;
8415 			size = zone_element_size((void *)addr,
8416 			    &z, false, &oob_offs);
8417 		} else {
8418 			kind = "use-after-free (medium confidence)";
8419 		}
8420 	}
8421 
8422 	paniclog_append_noflush("  Address : %p\n", (void *)addr);
8423 	if (oob) {
8424 		paniclog_append_noflush("  Element : [%p, %p) of size %d\n",
8425 		    (void *)(trunc_page(addr) - (size - oob_offs)),
8426 		    (void *)trunc_page(addr), (uint32_t)(size - oob_offs));
8427 	}
8428 	paniclog_append_noflush("  Submap  : %s [%p; %p)\n",
8429 	    zone_submaps_names[map_idx],
8430 	    (void *)map->min_offset, (void *)map->max_offset);
8431 	paniclog_append_noflush("  Kind    : %s\n", kind);
8432 	if (oob) {
8433 		paniclog_append_noflush("  Access  : %d byte(s) past\n",
8434 		    (uint32_t)(addr & PAGE_MASK) + 1);
8435 	}
8436 	paniclog_append_noflush("  Metadata: zid:%d inl:%d cl:0x%x "
8437 	    "0x%04x 0x%08x 0x%08x 0x%08x\n",
8438 	    meta.zm_index, meta.zm_inline_bitmap, meta.zm_chunk_len,
8439 	    meta.zm_alloc_size, meta.zm_bitmap,
8440 	    meta.zm_page_next.packed_address,
8441 	    meta.zm_page_prev.packed_address);
8442 	paniclog_append_noflush("\n");
8443 }
8444 
8445 void
panic_display_zalloc(void)8446 panic_display_zalloc(void)
8447 {
8448 	bool keepsyms = false;
8449 
8450 	PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
8451 
8452 	panic_display_zone_info();
8453 
8454 	if (panic_fault_address) {
8455 #if CONFIG_PROB_GZALLOC
8456 		if (pgz_owned(panic_fault_address)) {
8457 			panic_display_pgz_uaf_info(keepsyms, panic_fault_address);
8458 		} else
8459 #endif /* CONFIG_PROB_GZALLOC */
8460 		if (zone_maps_owned(panic_fault_address, 1)) {
8461 			panic_display_zone_fault(panic_fault_address);
8462 		}
8463 	}
8464 
8465 	if (panic_include_zprint) {
8466 		panic_display_zprint();
8467 	} else if (zone_map_nearing_threshold(ZONE_MAP_EXHAUSTION_PRINT_PANIC)) {
8468 		panic_display_largest_zones();
8469 	}
8470 #if CONFIG_ZLEAKS
8471 	if (zleak_active) {
8472 		panic_display_zleaks(keepsyms);
8473 	}
8474 #endif
8475 	if (panic_include_kalloc_types) {
8476 		panic_display_kalloc_types();
8477 	}
8478 }
8479 
8480 /*
8481  * Creates a vm_map_copy_t to return to the caller of mach_* MIG calls
8482  * requesting zone information.
8483  * Frees unused pages towards the end of the region, and zero'es out unused
8484  * space on the last page.
8485  */
8486 static vm_map_copy_t
create_vm_map_copy(vm_offset_t start_addr,vm_size_t total_size,vm_size_t used_size)8487 create_vm_map_copy(
8488 	vm_offset_t             start_addr,
8489 	vm_size_t               total_size,
8490 	vm_size_t               used_size)
8491 {
8492 	kern_return_t   kr;
8493 	vm_offset_t             end_addr;
8494 	vm_size_t               free_size;
8495 	vm_map_copy_t   copy;
8496 
8497 	if (used_size != total_size) {
8498 		end_addr = start_addr + used_size;
8499 		free_size = total_size - (round_page(end_addr) - start_addr);
8500 
8501 		if (free_size >= PAGE_SIZE) {
8502 			kmem_free(ipc_kernel_map,
8503 			    round_page(end_addr), free_size);
8504 		}
8505 		bzero((char *) end_addr, round_page(end_addr) - end_addr);
8506 	}
8507 
8508 	kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr,
8509 	    (vm_map_size_t)used_size, TRUE, &copy);
8510 	assert(kr == KERN_SUCCESS);
8511 
8512 	return copy;
8513 }
8514 
8515 static boolean_t
get_zone_info(zone_t z,mach_zone_name_t * zn,mach_zone_info_t * zi)8516 get_zone_info(
8517 	zone_t                   z,
8518 	mach_zone_name_t        *zn,
8519 	mach_zone_info_t        *zi)
8520 {
8521 	struct zone zcopy;
8522 	vm_size_t cached = 0;
8523 
8524 	assert(z != ZONE_NULL);
8525 	zone_lock(z);
8526 	if (!z->z_self) {
8527 		zone_unlock(z);
8528 		return FALSE;
8529 	}
8530 	zcopy = *z;
8531 	if (z->z_pcpu_cache) {
8532 		zpercpu_foreach(zc, z->z_pcpu_cache) {
8533 			cached += zc->zc_alloc_cur + zc->zc_free_cur;
8534 			cached += zc->zc_depot.zd_full * zc_mag_size();
8535 		}
8536 	}
8537 	zone_unlock(z);
8538 
8539 	if (zn != NULL) {
8540 		/*
8541 		 * Append kalloc heap name to zone name (if zone is used by kalloc)
8542 		 */
8543 		char temp_zone_name[MAX_ZONE_NAME] = "";
8544 		snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8545 		    zone_heap_name(z), z->z_name);
8546 
8547 		/* assuming here the name data is static */
8548 		(void) __nosan_strlcpy(zn->mzn_name, temp_zone_name,
8549 		    strlen(temp_zone_name) + 1);
8550 	}
8551 
8552 	if (zi != NULL) {
8553 		*zi = (mach_zone_info_t) {
8554 			.mzi_count = zone_count_allocated(&zcopy) - cached,
8555 			.mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)),
8556 			// max_size for zprint is now high-watermark of pages used
8557 			.mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)),
8558 			.mzi_elem_size = zone_scale_for_percpu(&zcopy, zcopy.z_elem_size),
8559 			.mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages),
8560 			.mzi_exhaustible = (uint64_t)zone_exhaustible(&zcopy),
8561 		};
8562 		if (zcopy.z_chunk_pages == 0) {
8563 			/* this is a zcache */
8564 			zi->mzi_cur_size = zcopy.z_elems_avail * zcopy.z_elem_size;
8565 		}
8566 		zpercpu_foreach(zs, zcopy.z_stats) {
8567 			zi->mzi_sum_size += zs->zs_mem_allocated;
8568 		}
8569 		if (zcopy.collectable) {
8570 			SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable,
8571 			    ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty)));
8572 			SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE);
8573 		}
8574 	}
8575 
8576 	return TRUE;
8577 }
8578 
8579 /* mach_memory_info entitlement */
8580 #define MEMORYINFO_ENTITLEMENT "com.apple.private.memoryinfo"
8581 
8582 /* macro needed to rate-limit mach_memory_info */
8583 #define NSEC_DAY (NSEC_PER_SEC * 60 * 60 * 24)
8584 
8585 /* declarations necessary to call kauth_cred_issuser() */
8586 struct ucred;
8587 extern int kauth_cred_issuser(struct ucred *);
8588 extern struct ucred *kauth_cred_get(void);
8589 
8590 static kern_return_t
8591 mach_memory_info_internal(
8592 	host_t                  host,
8593 	mach_zone_name_array_t  *namesp,
8594 	mach_msg_type_number_t  *namesCntp,
8595 	mach_zone_info_array_t  *infop,
8596 	mach_msg_type_number_t  *infoCntp,
8597 	mach_memory_info_array_t *memoryInfop,
8598 	mach_msg_type_number_t   *memoryInfoCntp,
8599 	bool                     redact_info);
8600 
8601 static kern_return_t
mach_memory_info_security_check(bool redact_info)8602 mach_memory_info_security_check(bool redact_info)
8603 {
8604 	/* If not root, only allow redacted calls. */
8605 	if (!kauth_cred_issuser(kauth_cred_get()) && !redact_info) {
8606 		return KERN_NO_ACCESS;
8607 	}
8608 
8609 	if (PE_srd_fused) {
8610 		return KERN_SUCCESS;
8611 	}
8612 
8613 	/* If does not have the memory entitlement, fail. */
8614 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8615 	task_t task = current_task();
8616 	if (task != kernel_task && !IOTaskHasEntitlement(task, MEMORYINFO_ENTITLEMENT)) {
8617 		return KERN_DENIED;
8618 	}
8619 
8620 	/*
8621 	 * On release non-mac arm devices, allow mach_memory_info
8622 	 * to be called twice per day per boot. memorymaintenanced
8623 	 * calls it once per day, which leaves room for a sysdiagnose.
8624 	 * Allow redacted version to be called without rate limit.
8625 	 */
8626 
8627 	if (!redact_info) {
8628 		static uint64_t first_call = 0, second_call = 0;
8629 		uint64_t now = 0;
8630 		absolutetime_to_nanoseconds(ml_get_timebase(), &now);
8631 
8632 		if (!first_call) {
8633 			first_call = now;
8634 		} else if (!second_call) {
8635 			second_call = now;
8636 		} else if (first_call + NSEC_DAY > now) {
8637 			return KERN_DENIED;
8638 		} else if (first_call + NSEC_DAY < now) {
8639 			first_call = now;
8640 			second_call = 0;
8641 		}
8642 	}
8643 #endif
8644 
8645 	return KERN_SUCCESS;
8646 }
8647 
8648 kern_return_t
mach_zone_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp)8649 mach_zone_info(
8650 	mach_port_t             host_port,
8651 	mach_zone_name_array_t  *namesp,
8652 	mach_msg_type_number_t  *namesCntp,
8653 	mach_zone_info_array_t  *infop,
8654 	mach_msg_type_number_t  *infoCntp)
8655 {
8656 	return mach_memory_info(host_port, namesp, namesCntp, infop, infoCntp, NULL, NULL);
8657 }
8658 
8659 kern_return_t
mach_memory_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp)8660 mach_memory_info(
8661 	mach_port_t             host_port,
8662 	mach_zone_name_array_t  *namesp,
8663 	mach_msg_type_number_t  *namesCntp,
8664 	mach_zone_info_array_t  *infop,
8665 	mach_msg_type_number_t  *infoCntp,
8666 	mach_memory_info_array_t *memoryInfop,
8667 	mach_msg_type_number_t   *memoryInfoCntp)
8668 {
8669 	bool redact_info = false;
8670 	host_t host = HOST_NULL;
8671 
8672 	host = convert_port_to_host_priv(host_port);
8673 	if (host == HOST_NULL) {
8674 		redact_info = true;
8675 		host = convert_port_to_host(host_port);
8676 	}
8677 
8678 	return mach_memory_info_internal(host, namesp, namesCntp, infop, infoCntp, memoryInfop, memoryInfoCntp, redact_info);
8679 }
8680 
8681 static void
zone_info_redact(mach_zone_info_t * zi)8682 zone_info_redact(mach_zone_info_t *zi)
8683 {
8684 	zi->mzi_cur_size = 0;
8685 	zi->mzi_max_size = 0;
8686 	zi->mzi_alloc_size = 0;
8687 	zi->mzi_sum_size = 0;
8688 	zi->mzi_collectable = 0;
8689 }
8690 
8691 static bool
zone_info_needs_to_be_coalesced(int zone_index)8692 zone_info_needs_to_be_coalesced(int zone_index)
8693 {
8694 	zone_security_flags_t zsflags = zone_security_array[zone_index];
8695 	if (zsflags.z_kalloc_type || zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8696 		return true;
8697 	}
8698 	return false;
8699 }
8700 
8701 static bool
zone_info_find_coalesce_zone(mach_zone_info_t * zi,mach_zone_info_t * info,int * coalesce,int coalesce_count,int * coalesce_index)8702 zone_info_find_coalesce_zone(
8703 	mach_zone_info_t *zi,
8704 	mach_zone_info_t *info,
8705 	int              *coalesce,
8706 	int              coalesce_count,
8707 	int              *coalesce_index)
8708 {
8709 	for (int i = 0; i < coalesce_count; i++) {
8710 		if (zi->mzi_elem_size == info[coalesce[i]].mzi_elem_size) {
8711 			*coalesce_index = coalesce[i];
8712 			return true;
8713 		}
8714 	}
8715 
8716 	return false;
8717 }
8718 
8719 static void
zone_info_coalesce(mach_zone_info_t * info,int coalesce_index,mach_zone_info_t * zi)8720 zone_info_coalesce(
8721 	mach_zone_info_t *info,
8722 	int coalesce_index,
8723 	mach_zone_info_t *zi)
8724 {
8725 	info[coalesce_index].mzi_count += zi->mzi_count;
8726 }
8727 
8728 kern_return_t
mach_memory_info_sample(mach_zone_name_t * names,mach_zone_info_t * info,int * coalesce,unsigned int * zonesCnt,mach_memory_info_t * memoryInfo,unsigned int memoryInfoCnt,bool redact_info)8729 mach_memory_info_sample(
8730 	mach_zone_name_t *names,
8731 	mach_zone_info_t *info,
8732 	int              *coalesce,
8733 	unsigned int     *zonesCnt,
8734 	mach_memory_info_t *memoryInfo,
8735 	unsigned int       memoryInfoCnt,
8736 	bool               redact_info)
8737 {
8738 	int                     coalesce_count = 0;
8739 	unsigned int            max_zones, used_zones = 0;
8740 	mach_zone_name_t        *zn;
8741 	mach_zone_info_t        *zi;
8742 	kern_return_t           kr;
8743 
8744 	uint64_t                zones_collectable_bytes = 0;
8745 
8746 	kr = mach_memory_info_security_check(redact_info);
8747 	if (kr != KERN_SUCCESS) {
8748 		return kr;
8749 	}
8750 
8751 	max_zones = *zonesCnt;
8752 
8753 	bzero(names, max_zones * sizeof(*names));
8754 	bzero(info, max_zones * sizeof(*info));
8755 	if (redact_info) {
8756 		bzero(coalesce, max_zones * sizeof(*coalesce));
8757 	}
8758 
8759 	zn = &names[0];
8760 	zi = &info[0];
8761 
8762 	zone_index_foreach(i) {
8763 		if (used_zones > max_zones) {
8764 			break;
8765 		}
8766 
8767 		if (!get_zone_info(&(zone_array[i]), zn, zi)) {
8768 			continue;
8769 		}
8770 
8771 		if (!redact_info) {
8772 			zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable);
8773 			zn++;
8774 			zi++;
8775 			used_zones++;
8776 			continue;
8777 		}
8778 
8779 		zone_info_redact(zi);
8780 		if (!zone_info_needs_to_be_coalesced(i)) {
8781 			zn++;
8782 			zi++;
8783 			used_zones++;
8784 			continue;
8785 		}
8786 
8787 		int coalesce_index;
8788 		bool found_coalesce_zone = zone_info_find_coalesce_zone(zi, info,
8789 		    coalesce, coalesce_count, &coalesce_index);
8790 
8791 		/* Didn't find a zone to coalesce */
8792 		if (!found_coalesce_zone) {
8793 			/* Updates the zone name */
8794 			__nosan_bzero(zn->mzn_name, MAX_ZONE_NAME);
8795 			snprintf(zn->mzn_name, MAX_ZONE_NAME, "kalloc.%d",
8796 			    (int)zi->mzi_elem_size);
8797 
8798 			coalesce[coalesce_count] = used_zones;
8799 			coalesce_count++;
8800 			zn++;
8801 			zi++;
8802 			used_zones++;
8803 			continue;
8804 		}
8805 
8806 		zone_info_coalesce(info, coalesce_index, zi);
8807 	}
8808 
8809 	*zonesCnt = used_zones;
8810 
8811 	if (memoryInfo) {
8812 		bzero(memoryInfo, memoryInfoCnt * sizeof(*memoryInfo));
8813 		kr = vm_page_diagnose(memoryInfo, memoryInfoCnt, zones_collectable_bytes, redact_info);
8814 		if (kr != KERN_SUCCESS) {
8815 			return kr;
8816 		}
8817 	}
8818 
8819 	return kr;
8820 }
8821 
8822 static kern_return_t
mach_memory_info_internal(host_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp,bool redact_info)8823 mach_memory_info_internal(
8824 	host_t                  host,
8825 	mach_zone_name_array_t  *namesp,
8826 	mach_msg_type_number_t  *namesCntp,
8827 	mach_zone_info_array_t  *infop,
8828 	mach_msg_type_number_t  *infoCntp,
8829 	mach_memory_info_array_t *memoryInfop,
8830 	mach_msg_type_number_t   *memoryInfoCntp,
8831 	bool                     redact_info)
8832 {
8833 	mach_zone_name_t        *names;
8834 	vm_offset_t             names_addr;
8835 	vm_size_t               names_size;
8836 
8837 	mach_zone_info_t        *info;
8838 	vm_offset_t             info_addr;
8839 	vm_size_t               info_size;
8840 
8841 	int                     *coalesce;
8842 	vm_offset_t             coalesce_addr;
8843 	vm_size_t               coalesce_size;
8844 
8845 	mach_memory_info_t      *memory_info = NULL;
8846 	vm_offset_t             memory_info_addr = 0;
8847 	vm_size_t               memory_info_size;
8848 	vm_size_t               memory_info_vmsize;
8849 	vm_map_copy_t           memory_info_copy;
8850 	unsigned int            num_info = 0;
8851 
8852 	unsigned int            max_zones, used_zones;
8853 	kern_return_t           kr;
8854 
8855 	if (host == HOST_NULL) {
8856 		return KERN_INVALID_HOST;
8857 	}
8858 
8859 	/*
8860 	 *	We assume that zones aren't freed once allocated.
8861 	 *	We won't pick up any zones that are allocated later.
8862 	 */
8863 
8864 	max_zones = os_atomic_load(&num_zones, relaxed);
8865 
8866 	names_size = round_page(max_zones * sizeof *names);
8867 	kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8868 	    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8869 	if (kr != KERN_SUCCESS) {
8870 		return kr;
8871 	}
8872 	names = (mach_zone_name_t *) names_addr;
8873 
8874 	info_size = round_page(max_zones * sizeof *info);
8875 	kr = kmem_alloc(ipc_kernel_map, &info_addr, info_size,
8876 	    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8877 	if (kr != KERN_SUCCESS) {
8878 		kmem_free(ipc_kernel_map,
8879 		    names_addr, names_size);
8880 		return kr;
8881 	}
8882 	info = (mach_zone_info_t *) info_addr;
8883 
8884 	if (redact_info) {
8885 		coalesce_size = round_page(max_zones * sizeof *coalesce);
8886 		kr = kmem_alloc(ipc_kernel_map, &coalesce_addr, coalesce_size,
8887 		    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8888 		if (kr != KERN_SUCCESS) {
8889 			kmem_free(ipc_kernel_map,
8890 			    names_addr, names_size);
8891 			kmem_free(ipc_kernel_map,
8892 			    info_addr, info_size);
8893 			return kr;
8894 		}
8895 		coalesce = (int *)coalesce_addr;
8896 	}
8897 
8898 	if (memoryInfop && memoryInfoCntp) {
8899 		num_info = vm_page_diagnose_estimate();
8900 		memory_info_size = num_info * sizeof(*memory_info);
8901 		memory_info_vmsize = round_page(memory_info_size);
8902 		kr = kmem_alloc(ipc_kernel_map, &memory_info_addr, memory_info_vmsize,
8903 		    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8904 		if (kr != KERN_SUCCESS) {
8905 			return kr;
8906 		}
8907 
8908 		kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
8909 		    VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
8910 		assert(kr == KERN_SUCCESS);
8911 
8912 		memory_info = (mach_memory_info_t *) memory_info_addr;
8913 	}
8914 
8915 	used_zones = max_zones;
8916 	mach_memory_info_sample(names, info, coalesce, &used_zones, memory_info, num_info, redact_info);
8917 
8918 	if (redact_info) {
8919 		kmem_free(ipc_kernel_map, coalesce_addr, coalesce_size);
8920 	}
8921 
8922 	*namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, used_zones * sizeof *names);
8923 	*namesCntp = used_zones;
8924 
8925 	*infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info);
8926 	*infoCntp = used_zones;
8927 
8928 	if (memoryInfop && memoryInfoCntp) {
8929 		kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
8930 		assert(kr == KERN_SUCCESS);
8931 
8932 		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
8933 		    (vm_map_size_t)memory_info_size, TRUE, &memory_info_copy);
8934 		assert(kr == KERN_SUCCESS);
8935 
8936 		*memoryInfop = (mach_memory_info_t *) memory_info_copy;
8937 		*memoryInfoCntp = num_info;
8938 	}
8939 
8940 	return KERN_SUCCESS;
8941 }
8942 
8943 kern_return_t
mach_zone_info_for_zone(host_priv_t host,mach_zone_name_t name,mach_zone_info_t * infop)8944 mach_zone_info_for_zone(
8945 	host_priv_t                     host,
8946 	mach_zone_name_t        name,
8947 	mach_zone_info_t        *infop)
8948 {
8949 	zone_t zone_ptr;
8950 
8951 	if (host == HOST_NULL) {
8952 		return KERN_INVALID_HOST;
8953 	}
8954 
8955 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8956 	if (!PE_i_can_has_debugger(NULL)) {
8957 		return KERN_INVALID_HOST;
8958 	}
8959 #endif
8960 
8961 	if (infop == NULL) {
8962 		return KERN_INVALID_ARGUMENT;
8963 	}
8964 
8965 	zone_ptr = ZONE_NULL;
8966 	zone_foreach(z) {
8967 		/*
8968 		 * Append kalloc heap name to zone name (if zone is used by kalloc)
8969 		 */
8970 		char temp_zone_name[MAX_ZONE_NAME] = "";
8971 		snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8972 		    zone_heap_name(z), z->z_name);
8973 
8974 		/* Find the requested zone by name */
8975 		if (track_this_zone(temp_zone_name, name.mzn_name)) {
8976 			zone_ptr = z;
8977 			break;
8978 		}
8979 	}
8980 
8981 	/* No zones found with the requested zone name */
8982 	if (zone_ptr == ZONE_NULL) {
8983 		return KERN_INVALID_ARGUMENT;
8984 	}
8985 
8986 	if (get_zone_info(zone_ptr, NULL, infop)) {
8987 		return KERN_SUCCESS;
8988 	}
8989 	return KERN_FAILURE;
8990 }
8991 
8992 kern_return_t
mach_zone_info_for_largest_zone(host_priv_t host,mach_zone_name_t * namep,mach_zone_info_t * infop)8993 mach_zone_info_for_largest_zone(
8994 	host_priv_t                     host,
8995 	mach_zone_name_t        *namep,
8996 	mach_zone_info_t        *infop)
8997 {
8998 	if (host == HOST_NULL) {
8999 		return KERN_INVALID_HOST;
9000 	}
9001 
9002 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
9003 	if (!PE_i_can_has_debugger(NULL)) {
9004 		return KERN_INVALID_HOST;
9005 	}
9006 #endif
9007 
9008 	if (namep == NULL || infop == NULL) {
9009 		return KERN_INVALID_ARGUMENT;
9010 	}
9011 
9012 	if (get_zone_info(zone_find_largest(NULL), namep, infop)) {
9013 		return KERN_SUCCESS;
9014 	}
9015 	return KERN_FAILURE;
9016 }
9017 
9018 uint64_t
get_zones_collectable_bytes(void)9019 get_zones_collectable_bytes(void)
9020 {
9021 	uint64_t zones_collectable_bytes = 0;
9022 	mach_zone_info_t zi;
9023 
9024 	zone_foreach(z) {
9025 		if (get_zone_info(z, NULL, &zi)) {
9026 			zones_collectable_bytes +=
9027 			    GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable);
9028 		}
9029 	}
9030 
9031 	return zones_collectable_bytes;
9032 }
9033 
9034 kern_return_t
mach_zone_get_zlog_zones(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp)9035 mach_zone_get_zlog_zones(
9036 	host_priv_t                             host,
9037 	mach_zone_name_array_t  *namesp,
9038 	mach_msg_type_number_t  *namesCntp)
9039 {
9040 #if ZALLOC_ENABLE_LOGGING
9041 	unsigned int max_zones, logged_zones, i;
9042 	kern_return_t kr;
9043 	zone_t zone_ptr;
9044 	mach_zone_name_t *names;
9045 	vm_offset_t names_addr;
9046 	vm_size_t names_size;
9047 
9048 	if (host == HOST_NULL) {
9049 		return KERN_INVALID_HOST;
9050 	}
9051 
9052 	if (namesp == NULL || namesCntp == NULL) {
9053 		return KERN_INVALID_ARGUMENT;
9054 	}
9055 
9056 	max_zones = os_atomic_load(&num_zones, relaxed);
9057 
9058 	names_size = round_page(max_zones * sizeof *names);
9059 	kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
9060 	    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
9061 	if (kr != KERN_SUCCESS) {
9062 		return kr;
9063 	}
9064 	names = (mach_zone_name_t *) names_addr;
9065 
9066 	zone_ptr = ZONE_NULL;
9067 	logged_zones = 0;
9068 	for (i = 0; i < max_zones; i++) {
9069 		zone_t z = &(zone_array[i]);
9070 		assert(z != ZONE_NULL);
9071 
9072 		/* Copy out the zone name if zone logging is enabled */
9073 		if (z->z_btlog) {
9074 			get_zone_info(z, &names[logged_zones], NULL);
9075 			logged_zones++;
9076 		}
9077 	}
9078 
9079 	*namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, logged_zones * sizeof *names);
9080 	*namesCntp = logged_zones;
9081 
9082 	return KERN_SUCCESS;
9083 
9084 #else /* ZALLOC_ENABLE_LOGGING */
9085 #pragma unused(host, namesp, namesCntp)
9086 	return KERN_FAILURE;
9087 #endif /* ZALLOC_ENABLE_LOGGING */
9088 }
9089 
9090 kern_return_t
mach_zone_get_btlog_records(host_priv_t host,mach_zone_name_t name,zone_btrecord_array_t * recsp,mach_msg_type_number_t * numrecs)9091 mach_zone_get_btlog_records(
9092 	host_priv_t             host,
9093 	mach_zone_name_t        name,
9094 	zone_btrecord_array_t  *recsp,
9095 	mach_msg_type_number_t *numrecs)
9096 {
9097 #if ZALLOC_ENABLE_LOGGING
9098 	zone_btrecord_t *recs;
9099 	kern_return_t    kr;
9100 	vm_address_t     addr;
9101 	vm_size_t        size;
9102 	zone_t           zone_ptr;
9103 	vm_map_copy_t    copy;
9104 
9105 	if (host == HOST_NULL) {
9106 		return KERN_INVALID_HOST;
9107 	}
9108 
9109 	if (recsp == NULL || numrecs == NULL) {
9110 		return KERN_INVALID_ARGUMENT;
9111 	}
9112 
9113 	zone_ptr = ZONE_NULL;
9114 	zone_foreach(z) {
9115 		/*
9116 		 * Append kalloc heap name to zone name (if zone is used by kalloc)
9117 		 */
9118 		char temp_zone_name[MAX_ZONE_NAME] = "";
9119 		snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
9120 		    zone_heap_name(z), z->z_name);
9121 
9122 		/* Find the requested zone by name */
9123 		if (track_this_zone(temp_zone_name, name.mzn_name)) {
9124 			zone_ptr = z;
9125 			break;
9126 		}
9127 	}
9128 
9129 	/* No zones found with the requested zone name */
9130 	if (zone_ptr == ZONE_NULL) {
9131 		return KERN_INVALID_ARGUMENT;
9132 	}
9133 
9134 	/* Logging not turned on for the requested zone */
9135 	if (!zone_ptr->z_btlog) {
9136 		return KERN_FAILURE;
9137 	}
9138 
9139 	kr = btlog_get_records(zone_ptr->z_btlog, &recs, numrecs);
9140 	if (kr != KERN_SUCCESS) {
9141 		return kr;
9142 	}
9143 
9144 	addr = (vm_address_t)recs;
9145 	size = sizeof(zone_btrecord_t) * *numrecs;
9146 
9147 	kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, &copy);
9148 	assert(kr == KERN_SUCCESS);
9149 
9150 	*recsp = (zone_btrecord_t *)copy;
9151 	return KERN_SUCCESS;
9152 
9153 #else /* !ZALLOC_ENABLE_LOGGING */
9154 #pragma unused(host, name, recsp, numrecs)
9155 	return KERN_FAILURE;
9156 #endif /* !ZALLOC_ENABLE_LOGGING */
9157 }
9158 
9159 
9160 kern_return_t
mach_zone_force_gc(host_t host)9161 mach_zone_force_gc(
9162 	host_t host)
9163 {
9164 	if (host == HOST_NULL) {
9165 		return KERN_INVALID_HOST;
9166 	}
9167 
9168 #if DEBUG || DEVELOPMENT
9169 	extern boolean_t(*volatile consider_buffer_cache_collect)(int);
9170 	/* Callout to buffer cache GC to drop elements in the apfs zones */
9171 	if (consider_buffer_cache_collect != NULL) {
9172 		(void)(*consider_buffer_cache_collect)(0);
9173 	}
9174 	zone_gc(ZONE_GC_DRAIN);
9175 #endif /* DEBUG || DEVELOPMENT */
9176 	return KERN_SUCCESS;
9177 }
9178 
9179 zone_t
zone_find_largest(uint64_t * zone_size)9180 zone_find_largest(uint64_t *zone_size)
9181 {
9182 	zone_t    largest_zone  = 0;
9183 	uint64_t  largest_zone_size = 0;
9184 	zone_find_n_largest(1, &largest_zone, &largest_zone_size);
9185 	if (zone_size) {
9186 		*zone_size = largest_zone_size;
9187 	}
9188 	return largest_zone;
9189 }
9190 
9191 void
zone_get_stats(zone_t zone,struct zone_basic_stats * stats)9192 zone_get_stats(
9193 	zone_t                  zone,
9194 	struct zone_basic_stats *stats)
9195 {
9196 	stats->zbs_avail = zone->z_elems_avail;
9197 
9198 	stats->zbs_alloc_fail = 0;
9199 	zpercpu_foreach(zs, zone->z_stats) {
9200 		stats->zbs_alloc_fail += zs->zs_alloc_fail;
9201 	}
9202 
9203 	stats->zbs_cached = 0;
9204 	if (zone->z_pcpu_cache) {
9205 		zpercpu_foreach(zc, zone->z_pcpu_cache) {
9206 			stats->zbs_cached += zc->zc_alloc_cur +
9207 			    zc->zc_free_cur +
9208 			    zc->zc_depot.zd_full * zc_mag_size();
9209 		}
9210 	}
9211 
9212 	stats->zbs_free = zone_count_free(zone) + stats->zbs_cached;
9213 
9214 	/*
9215 	 * Since we don't take any locks, deal with possible inconsistencies
9216 	 * as the counters may have changed.
9217 	 */
9218 	if (os_sub_overflow(stats->zbs_avail, stats->zbs_free,
9219 	    &stats->zbs_alloc)) {
9220 		stats->zbs_avail = stats->zbs_free;
9221 		stats->zbs_alloc = 0;
9222 	}
9223 }
9224 
9225 #endif /* !ZALLOC_TEST */
9226 #pragma mark zone creation, configuration, destruction
9227 #if !ZALLOC_TEST
9228 
9229 static zone_t
zone_init_defaults(zone_id_t zid)9230 zone_init_defaults(zone_id_t zid)
9231 {
9232 	zone_t z = &zone_array[zid];
9233 
9234 	z->z_wired_max = ~0u;
9235 	z->collectable = true;
9236 
9237 	hw_lck_ticket_init(&z->z_lock, &zone_locks_grp);
9238 	hw_lck_ticket_init(&z->z_recirc_lock, &zone_locks_grp);
9239 	zone_depot_init(&z->z_recirc);
9240 	return z;
9241 }
9242 
9243 void
zone_set_exhaustible(zone_t zone,vm_size_t nelems,bool exhausts_by_design)9244 zone_set_exhaustible(zone_t zone, vm_size_t nelems, bool exhausts_by_design)
9245 {
9246 	zone_lock(zone);
9247 	zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
9248 	zone->z_exhausts = exhausts_by_design;
9249 	zone_unlock(zone);
9250 }
9251 
9252 void
zone_raise_reserve(union zone_or_view zov,uint16_t min_elements)9253 zone_raise_reserve(union zone_or_view zov, uint16_t min_elements)
9254 {
9255 	zone_t zone = zov.zov_zone;
9256 
9257 	if (zone < zone_array || zone > &zone_array[MAX_ZONES]) {
9258 		zone = zov.zov_view->zv_zone;
9259 	} else {
9260 		zone = zov.zov_zone;
9261 	}
9262 
9263 	os_atomic_max(&zone->z_elems_rsv, min_elements, relaxed);
9264 }
9265 
9266 /**
9267  * @function zone_create_find
9268  *
9269  * @abstract
9270  * Finds an unused zone for the given name and element size.
9271  *
9272  * @param name          the zone name
9273  * @param size          the element size (including redzones, ...)
9274  * @param flags         the flags passed to @c zone_create*
9275  * @param zid_inout     the desired zone ID or ZONE_ID_ANY
9276  *
9277  * @returns             a zone to initialize further.
9278  */
9279 static zone_t
zone_create_find(const char * name,vm_size_t size,zone_create_flags_t flags,zone_id_t * zid_inout)9280 zone_create_find(
9281 	const char             *name,
9282 	vm_size_t               size,
9283 	zone_create_flags_t     flags,
9284 	zone_id_t              *zid_inout)
9285 {
9286 	zone_id_t nzones, zid = *zid_inout;
9287 	zone_t z;
9288 
9289 	simple_lock(&all_zones_lock, &zone_locks_grp);
9290 
9291 	nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed);
9292 	assert(num_zones_in_use <= nzones && nzones < MAX_ZONES);
9293 
9294 	if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) {
9295 		/*
9296 		 * The first time around, make sure the reserved zone IDs
9297 		 * have an initialized lock as zone_index_foreach() will
9298 		 * enumerate them.
9299 		 */
9300 		while (nzones < ZONE_ID__FIRST_DYNAMIC) {
9301 			zone_init_defaults(nzones++);
9302 		}
9303 
9304 		os_atomic_store(&num_zones, nzones, release);
9305 	}
9306 
9307 	if (zid != ZONE_ID_ANY) {
9308 		if (zid >= ZONE_ID__FIRST_DYNAMIC) {
9309 			panic("zone_create: invalid desired zone ID %d for %s",
9310 			    zid, name);
9311 		}
9312 		if (flags & ZC_DESTRUCTIBLE) {
9313 			panic("zone_create: ID %d (%s) must be permanent", zid, name);
9314 		}
9315 		if (zone_array[zid].z_self) {
9316 			panic("zone_create: creating zone ID %d (%s) twice", zid, name);
9317 		}
9318 		z = &zone_array[zid];
9319 	} else {
9320 		if (flags & ZC_DESTRUCTIBLE) {
9321 			/*
9322 			 * If possible, find a previously zdestroy'ed zone in the
9323 			 * zone_array that we can reuse.
9324 			 */
9325 			for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES);
9326 			    i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) {
9327 				z = &zone_array[i];
9328 
9329 				/*
9330 				 * If the zone name and the element size are the
9331 				 * same, we can just reuse the old zone struct.
9332 				 */
9333 				if (strcmp(z->z_name, name) ||
9334 				    zone_elem_outer_size(z) != size) {
9335 					continue;
9336 				}
9337 				bitmap_clear(zone_destroyed_bitmap, i);
9338 				z->z_destroyed = false;
9339 				z->z_self = z;
9340 				zid = (zone_id_t)i;
9341 				goto out;
9342 			}
9343 		}
9344 
9345 		zid = nzones++;
9346 		z = zone_init_defaults(zid);
9347 
9348 		/*
9349 		 * The release barrier pairs with the acquire in
9350 		 * zone_index_foreach() and makes sure that enumeration loops
9351 		 * always see an initialized zone lock.
9352 		 */
9353 		os_atomic_store(&num_zones, nzones, release);
9354 	}
9355 
9356 out:
9357 	num_zones_in_use++;
9358 	simple_unlock(&all_zones_lock);
9359 
9360 	*zid_inout = zid;
9361 	return z;
9362 }
9363 
9364 __abortlike
9365 static void
zone_create_panic(const char * name,const char * f1,const char * f2)9366 zone_create_panic(const char *name, const char *f1, const char *f2)
9367 {
9368 	panic("zone_create: creating zone %s: flag %s and %s are incompatible",
9369 	    name, f1, f2);
9370 }
9371 #define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \
9372 	if ((flags) & forbidden_flag) { \
9373 	        zone_create_panic(name, #current_flag, #forbidden_flag); \
9374 	}
9375 
9376 /*
9377  * Adjusts the size of the element based on minimum size, alignment
9378  * and kasan redzones
9379  */
9380 static vm_size_t
zone_elem_adjust_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags __unused,uint16_t * redzone __unused)9381 zone_elem_adjust_size(
9382 	const char             *name __unused,
9383 	vm_size_t               elem_size,
9384 	zone_create_flags_t     flags __unused,
9385 	uint16_t               *redzone __unused)
9386 {
9387 	vm_size_t size;
9388 
9389 	/*
9390 	 * Adjust element size for minimum size and pointer alignment
9391 	 */
9392 	size = (elem_size + ZONE_ALIGN_SIZE - 1) & -ZONE_ALIGN_SIZE;
9393 	if (size < ZONE_MIN_ELEM_SIZE) {
9394 		size = ZONE_MIN_ELEM_SIZE;
9395 	}
9396 
9397 #if KASAN_CLASSIC
9398 	/*
9399 	 * Expand the zone allocation size to include the redzones.
9400 	 *
9401 	 * For page-multiple zones add a full guard page because they
9402 	 * likely require alignment.
9403 	 */
9404 	uint16_t redzone_tmp;
9405 	if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU | ZC_OBJ_CACHE)) {
9406 		redzone_tmp = 0;
9407 	} else if ((size & PAGE_MASK) == 0) {
9408 		if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) {
9409 			panic("zone_create: zone %s can't provide more than PAGE_SIZE"
9410 			    "alignment", name);
9411 		}
9412 		redzone_tmp = PAGE_SIZE;
9413 	} else if (flags & ZC_ALIGNMENT_REQUIRED) {
9414 		redzone_tmp = 0;
9415 	} else {
9416 		redzone_tmp = KASAN_GUARD_SIZE;
9417 	}
9418 	size += redzone_tmp;
9419 	if (redzone) {
9420 		*redzone = redzone_tmp;
9421 	}
9422 #endif
9423 	return size;
9424 }
9425 
9426 /*
9427  * Returns the allocation chunk size that has least framentation
9428  */
9429 static vm_size_t
zone_get_min_alloc_granule(vm_size_t elem_size,zone_create_flags_t flags)9430 zone_get_min_alloc_granule(
9431 	vm_size_t               elem_size,
9432 	zone_create_flags_t     flags)
9433 {
9434 	vm_size_t alloc_granule = PAGE_SIZE;
9435 	if (flags & ZC_PERCPU) {
9436 		alloc_granule = PAGE_SIZE * zpercpu_count();
9437 		if (PAGE_SIZE % elem_size > 256) {
9438 			panic("zone_create: per-cpu zone has too much fragmentation");
9439 		}
9440 	} else if (flags & ZC_READONLY) {
9441 		alloc_granule = PAGE_SIZE;
9442 	} else if ((elem_size & PAGE_MASK) == 0) {
9443 		/* zero fragmentation by definition */
9444 		alloc_granule = elem_size;
9445 	} else if (alloc_granule % elem_size == 0) {
9446 		/* zero fragmentation by definition */
9447 	} else {
9448 		vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule;
9449 		vm_size_t alloc_tmp = PAGE_SIZE;
9450 		vm_size_t max_chunk_size = ZONE_MAX_ALLOC_SIZE;
9451 
9452 #if __arm64__
9453 		/*
9454 		 * Increase chunk size to 48K for sizes larger than 4K on 16k
9455 		 * machines, so as to reduce internal fragementation for kalloc
9456 		 * zones with sizes 12K and 24K.
9457 		 */
9458 		if (elem_size > 4 * 1024 && PAGE_SIZE == 16 * 1024) {
9459 			max_chunk_size = 48 * 1024;
9460 		}
9461 #endif
9462 		while ((alloc_tmp += PAGE_SIZE) <= max_chunk_size) {
9463 			vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp;
9464 			if (frag_tmp < frag) {
9465 				frag = frag_tmp;
9466 				alloc_granule = alloc_tmp;
9467 			}
9468 		}
9469 	}
9470 	return alloc_granule;
9471 }
9472 
9473 vm_size_t
zone_get_early_alloc_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags,vm_size_t min_elems)9474 zone_get_early_alloc_size(
9475 	const char             *name __unused,
9476 	vm_size_t               elem_size,
9477 	zone_create_flags_t     flags,
9478 	vm_size_t               min_elems)
9479 {
9480 	vm_size_t adjusted_size, alloc_granule, chunk_elems;
9481 
9482 	adjusted_size = zone_elem_adjust_size(name, elem_size, flags, NULL);
9483 	alloc_granule = zone_get_min_alloc_granule(adjusted_size, flags);
9484 	chunk_elems   = alloc_granule / adjusted_size;
9485 
9486 	return ((min_elems + chunk_elems - 1) / chunk_elems) * alloc_granule;
9487 }
9488 
9489 zone_t
9490 zone_create_ext(
9491 	const char             *name,
9492 	vm_size_t               size,
9493 	zone_create_flags_t     flags,
9494 	zone_id_t               zid,
9495 	void                  (^extra_setup)(zone_t))
9496 {
9497 	zone_security_flags_t *zsflags;
9498 	uint16_t redzone;
9499 	zone_t z;
9500 
9501 	if (size > ZONE_MAX_ALLOC_SIZE) {
9502 		panic("zone_create: element size too large: %zd", (size_t)size);
9503 	}
9504 
9505 	if (size < 2 * sizeof(vm_size_t)) {
9506 		/* Elements are too small for kasan. */
9507 		flags |= ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE;
9508 	}
9509 
9510 	size = zone_elem_adjust_size(name, size, flags, &redzone);
9511 
9512 	/*
9513 	 * Allocate the zone slot, return early if we found an older match.
9514 	 */
9515 	z = zone_create_find(name, size, flags, &zid);
9516 	if (__improbable(z->z_self)) {
9517 		/* We found a zone to reuse */
9518 		return z;
9519 	}
9520 	zsflags = &zone_security_array[zid];
9521 
9522 	/*
9523 	 * Initialize the zone properly.
9524 	 */
9525 
9526 	/*
9527 	 * If the kernel is post lockdown, copy the zone name passed in.
9528 	 * Else simply maintain a pointer to the name string as it can only
9529 	 * be a core XNU zone (no unloadable kext exists before lockdown).
9530 	 */
9531 	if (startup_phase >= STARTUP_SUB_LOCKDOWN) {
9532 		size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN);
9533 		char *buf = zalloc_permanent(nsz, ZALIGN_NONE);
9534 		strlcpy(buf, name, nsz);
9535 		z->z_name = buf;
9536 	} else {
9537 		z->z_name = name;
9538 	}
9539 	if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) {
9540 		z->z_stats = zalloc_percpu_permanent_type(struct zone_stats);
9541 	} else {
9542 		/*
9543 		 * zone_init() hasn't run yet, use the storage provided by
9544 		 * zone_stats_startup(), and zone_init() will replace it
9545 		 * with the final value once the PERCPU zone exists.
9546 		 */
9547 		z->z_stats = __zpcpu_mangle_for_boot(&zone_stats_startup[zone_index(z)]);
9548 	}
9549 
9550 	if (flags & ZC_OBJ_CACHE) {
9551 		zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOCACHING);
9552 		zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_PERCPU);
9553 		zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOGC);
9554 		zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_DESTRUCTIBLE);
9555 
9556 		z->z_elem_size   = (uint16_t)size;
9557 		z->z_chunk_pages = 0;
9558 		z->z_quo_magic   = 0;
9559 		z->z_align_magic = 0;
9560 		z->z_chunk_elems = 0;
9561 		z->z_elem_offs   = 0;
9562 		z->no_callout    = true;
9563 		zsflags->z_lifo  = true;
9564 	} else {
9565 		vm_size_t alloc = zone_get_min_alloc_granule(size, flags);
9566 
9567 		z->z_elem_size   = (uint16_t)(size - redzone);
9568 		z->z_chunk_pages = (uint16_t)atop(alloc);
9569 		z->z_quo_magic   = Z_MAGIC_QUO(size);
9570 		z->z_align_magic = Z_MAGIC_ALIGNED(size);
9571 		if (flags & ZC_PERCPU) {
9572 			z->z_chunk_elems = (uint16_t)(PAGE_SIZE / size);
9573 			z->z_elem_offs = (uint16_t)(PAGE_SIZE % size) + redzone;
9574 		} else {
9575 			z->z_chunk_elems = (uint16_t)(alloc / size);
9576 			z->z_elem_offs = (uint16_t)(alloc % size) + redzone;
9577 		}
9578 	}
9579 
9580 	/*
9581 	 * Handle KPI flags
9582 	 */
9583 
9584 	/* ZC_CACHING applied after all configuration is done */
9585 	if (flags & ZC_NOCACHING) {
9586 		z->z_nocaching = true;
9587 	}
9588 
9589 	if (flags & ZC_READONLY) {
9590 		zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_VM);
9591 		zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_DATA);
9592 		assert(zid <= ZONE_ID__LAST_RO);
9593 #if ZSECURITY_CONFIG(READ_ONLY)
9594 		zsflags->z_submap_idx = Z_SUBMAP_IDX_READ_ONLY;
9595 #endif
9596 		zone_ro_size_params[zid].z_elem_size = z->z_elem_size;
9597 		zone_ro_size_params[zid].z_align_magic = z->z_align_magic;
9598 		assert(size <= PAGE_SIZE);
9599 		if ((PAGE_SIZE % size) * 10 >= PAGE_SIZE) {
9600 			panic("Fragmentation greater than 10%% with elem size %d zone %s%s",
9601 			    (uint32_t)size, zone_heap_name(z), z->z_name);
9602 		}
9603 	}
9604 
9605 	if (flags & ZC_PERCPU) {
9606 		zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_READONLY);
9607 		zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_PGZ_USE_GUARDS);
9608 		z->z_percpu = true;
9609 	}
9610 	if (flags & ZC_NOGC) {
9611 		z->collectable = false;
9612 	}
9613 	/*
9614 	 * Handle ZC_NOENCRYPT from xnu only
9615 	 */
9616 	if (startup_phase < STARTUP_SUB_LOCKDOWN && flags & ZC_NOENCRYPT) {
9617 		zsflags->z_noencrypt = true;
9618 	}
9619 	if (flags & ZC_NOCALLOUT) {
9620 		z->no_callout = true;
9621 	}
9622 	if (flags & ZC_DESTRUCTIBLE) {
9623 		zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_READONLY);
9624 		z->z_destructible = true;
9625 	}
9626 	/*
9627 	 * Handle Internal flags
9628 	 */
9629 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9630 	if (flags & ZC_PGZ_USE_GUARDS) {
9631 		/*
9632 		 * Try to turn on guard pages only for zones
9633 		 * with a chance of OOB.
9634 		 */
9635 		if (startup_phase < STARTUP_SUB_LOCKDOWN) {
9636 			zsflags->z_pgz_use_guards = true;
9637 		}
9638 		z->z_pgz_use_guards = true;
9639 	}
9640 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9641 
9642 #if ZSECURITY_CONFIG(ZONE_TAGGING)
9643 	if (flags & (ZC_NO_TBI_TAG)) {
9644 		zsflags->z_tag = false;
9645 	}
9646 
9647 #if KASAN_TBI
9648 	/*
9649 	 * Maintain for now the old behavior of not tagging DATA. Remove once
9650 	 * we move to the new DATA-tagging behavior.
9651 	 */
9652 	if (flags & ZC_DATA || flags & ZC_SHARED_DATA) {
9653 		zsflags->z_tag = false;
9654 	}
9655 #endif /* KASAN_TBI */
9656 
9657 
9658 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
9659 
9660 	if (flags & ZC_KALLOC_TYPE) {
9661 		zsflags->z_kalloc_type = true;
9662 	}
9663 	if (flags & ZC_VM) {
9664 		zone_create_assert_not_both(name, flags, ZC_VM, ZC_DATA);
9665 		zsflags->z_submap_idx = Z_SUBMAP_IDX_VM;
9666 	}
9667 	if (flags & ZC_DATA) {
9668 		zsflags->z_kheap_id = KHEAP_ID_DATA_BUFFERS;
9669 	}
9670 	if (flags & ZC_SHARED_DATA) {
9671 		zsflags->z_kheap_id = KHEAP_ID_DATA_SHARED;
9672 	}
9673 
9674 #if KASAN_CLASSIC
9675 	if (redzone && !(flags & ZC_KASAN_NOQUARANTINE)) {
9676 		z->z_kasan_quarantine = true;
9677 	}
9678 	z->z_kasan_redzone = redzone;
9679 #endif /* KASAN_CLASSIC */
9680 #if KASAN_FAKESTACK
9681 	if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) {
9682 		z->z_kasan_fakestacks = true;
9683 	}
9684 #endif /* KASAN_FAKESTACK */
9685 
9686 	/*
9687 	 * Then if there's extra tuning, do it
9688 	 */
9689 	if (extra_setup) {
9690 		extra_setup(z);
9691 	}
9692 
9693 	/*
9694 	 * Configure debugging features
9695 	 */
9696 #if CONFIG_PROB_GZALLOC
9697 	if ((flags & (ZC_READONLY | ZC_PERCPU | ZC_OBJ_CACHE | ZC_NOPGZ)) == 0) {
9698 		pgz_zone_init(z);
9699 	}
9700 #endif
9701 	if (zc_magazine_zone) { /* proxy for "has zone_init run" */
9702 #if ZALLOC_ENABLE_LOGGING
9703 		/*
9704 		 * Check for and set up zone leak detection
9705 		 * if requested via boot-args.
9706 		 */
9707 		zone_setup_logging(z);
9708 #endif /* ZALLOC_ENABLE_LOGGING */
9709 #if KASAN_TBI
9710 		zone_setup_kasan_logging(z);
9711 #endif /* KASAN_TBI */
9712 	}
9713 
9714 #if VM_TAG_SIZECLASSES
9715 	if ((zsflags->z_kheap_id || zsflags->z_kalloc_type) && zone_tagging_on) {
9716 		static uint16_t sizeclass_idx;
9717 
9718 		assert(startup_phase < STARTUP_SUB_LOCKDOWN);
9719 		z->z_uses_tags = true;
9720 		if (zone_is_data_kheap(zsflags->z_kheap_id)) {
9721 			zone_tags_sizeclasses[sizeclass_idx] = (uint16_t)size;
9722 			z->z_tags_sizeclass = sizeclass_idx++;
9723 		} else {
9724 			uint16_t i = 0;
9725 			for (; i < sizeclass_idx; i++) {
9726 				if (size == zone_tags_sizeclasses[i]) {
9727 					z->z_tags_sizeclass = i;
9728 					break;
9729 				}
9730 			}
9731 
9732 			/*
9733 			 * Size class wasn't found, add it to zone_tags_sizeclasses
9734 			 */
9735 			if (i == sizeclass_idx) {
9736 				assert(i < VM_TAG_SIZECLASSES);
9737 				zone_tags_sizeclasses[i] = (uint16_t)size;
9738 				z->z_tags_sizeclass = sizeclass_idx++;
9739 			}
9740 		}
9741 		assert(z->z_tags_sizeclass < VM_TAG_SIZECLASSES);
9742 	}
9743 #endif
9744 
9745 	/*
9746 	 * Finally, fixup properties based on security policies, boot-args, ...
9747 	 */
9748 	if (zone_is_data_kheap(zsflags->z_kheap_id)) {
9749 		/*
9750 		 * We use LIFO in the data map, because workloads like network
9751 		 * usage or similar tend to rotate through allocations very
9752 		 * quickly with sometimes epxloding working-sets and using
9753 		 * a FIFO policy might cause massive TLB trashing with rather
9754 		 * dramatic performance impacts.
9755 		 */
9756 		zsflags->z_submap_idx = Z_SUBMAP_IDX_DATA;
9757 		zsflags->z_lifo = true;
9758 	}
9759 
9760 	if ((flags & (ZC_CACHING | ZC_OBJ_CACHE)) && !z->z_nocaching) {
9761 		/*
9762 		 * No zone made before zone_init() can have ZC_CACHING set.
9763 		 */
9764 		assert(zc_magazine_zone);
9765 		zone_enable_caching(z);
9766 	}
9767 
9768 	zone_lock(z);
9769 	z->z_self = z;
9770 	zone_unlock(z);
9771 
9772 	return z;
9773 }
9774 
9775 void
zone_set_sig_eq(zone_t zone,zone_id_t sig_eq)9776 zone_set_sig_eq(zone_t zone, zone_id_t sig_eq)
9777 {
9778 	zone_security_array[zone_index(zone)].z_sig_eq = sig_eq;
9779 }
9780 
9781 zone_id_t
zone_get_sig_eq(zone_t zone)9782 zone_get_sig_eq(zone_t zone)
9783 {
9784 	return zone_security_array[zone_index(zone)].z_sig_eq;
9785 }
9786 
9787 void
zone_enable_smr(zone_t zone,struct smr * smr,zone_smr_free_cb_t free_cb)9788 zone_enable_smr(zone_t zone, struct smr *smr, zone_smr_free_cb_t free_cb)
9789 {
9790 	/* moving to SMR must be done before the zone has ever been used */
9791 	assert(zone->z_va_cur == 0 && !zone->z_smr && !zone->z_nocaching);
9792 	assert(!zone_security_array[zone_index(zone)].z_lifo);
9793 	assert((smr->smr_flags & SMR_SLEEPABLE) == 0);
9794 
9795 	if (!zone->z_pcpu_cache) {
9796 		zone_enable_caching(zone);
9797 	}
9798 
9799 	zone_lock(zone);
9800 
9801 	zpercpu_foreach(it, zone->z_pcpu_cache) {
9802 		it->zc_smr = smr;
9803 		it->zc_free = free_cb;
9804 	}
9805 	zone->z_smr = true;
9806 
9807 	zone_unlock(zone);
9808 }
9809 
9810 __startup_func
9811 void
zone_create_startup(struct zone_create_startup_spec * spec)9812 zone_create_startup(struct zone_create_startup_spec *spec)
9813 {
9814 	zone_t z;
9815 
9816 	z = zone_create_ext(spec->z_name, spec->z_size,
9817 	    spec->z_flags, spec->z_zid, spec->z_setup);
9818 	if (spec->z_var) {
9819 		*spec->z_var = z;
9820 	}
9821 }
9822 
9823 /*
9824  * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t
9825  * union works. trust but verify.
9826  */
9827 #define zalloc_check_zov_alias(f1, f2) \
9828     static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2))
9829 zalloc_check_zov_alias(z_self, zv_zone);
9830 zalloc_check_zov_alias(z_stats, zv_stats);
9831 zalloc_check_zov_alias(z_name, zv_name);
9832 zalloc_check_zov_alias(z_views, zv_next);
9833 #undef zalloc_check_zov_alias
9834 
9835 __startup_func
9836 void
zone_view_startup_init(struct zone_view_startup_spec * spec)9837 zone_view_startup_init(struct zone_view_startup_spec *spec)
9838 {
9839 	struct kalloc_heap *heap = NULL;
9840 	zone_view_t zv = spec->zv_view;
9841 	zone_t z;
9842 	zone_security_flags_t zsflags;
9843 
9844 	switch (spec->zv_heapid) {
9845 	case KHEAP_ID_DATA_BUFFERS:
9846 		heap = KHEAP_DATA_BUFFERS;
9847 		break;
9848 	case KHEAP_ID_DATA_SHARED:
9849 		heap = KHEAP_DATA_SHARED;
9850 		break;
9851 	default:
9852 		heap = NULL;
9853 	}
9854 
9855 	if (heap) {
9856 		z = kalloc_zone_for_size(heap->kh_zstart, spec->zv_size);
9857 	} else {
9858 		z = *spec->zv_zone;
9859 		assert(spec->zv_size <= zone_elem_inner_size(z));
9860 	}
9861 
9862 	assert(z);
9863 
9864 	zv->zv_zone  = z;
9865 	zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats);
9866 	zv->zv_next  = z->z_views;
9867 	zsflags = zone_security_config(z);
9868 	if (z->z_views == NULL && zsflags.z_kheap_id == KHEAP_ID_NONE) {
9869 		/*
9870 		 * count the raw view for zones not in a heap,
9871 		 * kalloc_heap_init() already counts it for its members.
9872 		 */
9873 		zone_view_count += 2;
9874 	} else {
9875 		zone_view_count += 1;
9876 	}
9877 	z->z_views = zv;
9878 }
9879 
9880 zone_t
zone_create(const char * name,vm_size_t size,zone_create_flags_t flags)9881 zone_create(
9882 	const char             *name,
9883 	vm_size_t               size,
9884 	zone_create_flags_t     flags)
9885 {
9886 	return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL);
9887 }
9888 
9889 vm_size_t
zone_get_elem_size(zone_t zone)9890 zone_get_elem_size(zone_t zone)
9891 {
9892 	return zone->z_elem_size;
9893 }
9894 
9895 static_assert(ZONE_ID__LAST_RO_EXT - ZONE_ID__FIRST_RO_EXT == ZC_RO_ID__LAST);
9896 
9897 zone_id_t
zone_create_ro(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)9898 zone_create_ro(
9899 	const char             *name,
9900 	vm_size_t               size,
9901 	zone_create_flags_t     flags,
9902 	zone_create_ro_id_t     zc_ro_id)
9903 {
9904 	assert(zc_ro_id <= ZC_RO_ID__LAST);
9905 	zone_id_t reserved_zid = ZONE_ID__FIRST_RO_EXT + zc_ro_id;
9906 	(void)zone_create_ext(name, size, ZC_READONLY | flags, reserved_zid, NULL);
9907 	return reserved_zid;
9908 }
9909 
9910 zone_t
zinit(vm_size_t size,vm_size_t max __unused,vm_size_t alloc __unused,const char * name)9911 zinit(
9912 	vm_size_t       size,           /* the size of an element */
9913 	vm_size_t       max __unused,   /* maximum memory to use */
9914 	vm_size_t       alloc __unused, /* allocation size */
9915 	const char      *name)          /* a name for the zone */
9916 {
9917 	return zone_create(name, size, ZC_DESTRUCTIBLE);
9918 }
9919 
9920 void
zdestroy(zone_t z)9921 zdestroy(zone_t z)
9922 {
9923 	unsigned int zindex = zone_index(z);
9924 	zone_security_flags_t zsflags = zone_security_array[zindex];
9925 
9926 	current_thread()->options |= TH_OPT_ZONE_PRIV;
9927 	lck_mtx_lock(&zone_gc_lock);
9928 
9929 	zone_reclaim(z, ZONE_RECLAIM_DESTROY);
9930 
9931 	lck_mtx_unlock(&zone_gc_lock);
9932 	current_thread()->options &= ~TH_OPT_ZONE_PRIV;
9933 
9934 	zone_lock(z);
9935 
9936 	if (!zone_submap_is_sequestered(zsflags)) {
9937 		while (!zone_pva_is_null(z->z_pageq_va)) {
9938 			struct zone_page_metadata *meta;
9939 
9940 			zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
9941 			meta = zone_meta_queue_pop(z, &z->z_pageq_va);
9942 			assert(meta->zm_chunk_len <= ZM_CHUNK_LEN_MAX);
9943 			bzero(meta, sizeof(*meta) * z->z_chunk_pages);
9944 			zone_unlock(z);
9945 			kmem_free(zone_submap(zsflags), zone_meta_to_addr(meta),
9946 			    ptoa(z->z_chunk_pages));
9947 			zone_lock(z);
9948 		}
9949 	}
9950 
9951 #if !KASAN_CLASSIC
9952 	/* Assert that all counts are zero */
9953 	if (z->z_elems_avail || z->z_elems_free || zone_size_wired(z) ||
9954 	    (z->z_va_cur && !zone_submap_is_sequestered(zsflags))) {
9955 		panic("zdestroy: Zone %s%s isn't empty at zdestroy() time",
9956 		    zone_heap_name(z), z->z_name);
9957 	}
9958 
9959 	/* consistency check: make sure everything is indeed empty */
9960 	assert(zone_pva_is_null(z->z_pageq_empty));
9961 	assert(zone_pva_is_null(z->z_pageq_partial));
9962 	assert(zone_pva_is_null(z->z_pageq_full));
9963 	if (!zone_submap_is_sequestered(zsflags)) {
9964 		assert(zone_pva_is_null(z->z_pageq_va));
9965 	}
9966 #endif
9967 
9968 	zone_unlock(z);
9969 
9970 	simple_lock(&all_zones_lock, &zone_locks_grp);
9971 
9972 	assert(!bitmap_test(zone_destroyed_bitmap, zindex));
9973 	/* Mark the zone as empty in the bitmap */
9974 	bitmap_set(zone_destroyed_bitmap, zindex);
9975 	num_zones_in_use--;
9976 	assert(num_zones_in_use > 0);
9977 
9978 	simple_unlock(&all_zones_lock);
9979 }
9980 
9981 #endif /* !ZALLOC_TEST */
9982 #pragma mark zalloc module init
9983 #if !ZALLOC_TEST
9984 
9985 /*
9986  *	Initialize the "zone of zones" which uses fixed memory allocated
9987  *	earlier in memory initialization.  zone_bootstrap is called
9988  *	before zone_init.
9989  */
9990 __startup_func
9991 void
zone_bootstrap(void)9992 zone_bootstrap(void)
9993 {
9994 #if DEBUG || DEVELOPMENT
9995 #if __x86_64__
9996 	if (PE_parse_boot_argn("kernPOST", NULL, 0)) {
9997 		/*
9998 		 * rdar://79781535 Disable early gaps while running kernPOST on Intel
9999 		 * the fp faulting code gets triggered and deadlocks.
10000 		 */
10001 		zone_caching_disabled = 1;
10002 	}
10003 #endif /* __x86_64__ */
10004 #endif /* DEBUG || DEVELOPMENT */
10005 
10006 	/* Validate struct zone_packed_virtual_address expectations */
10007 	static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1");
10008 	if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) {
10009 		panic("zone_pva_t can't pack a kernel page address in 31 bits");
10010 	}
10011 
10012 	zpercpu_early_count = ml_early_cpu_max_number() + 1;
10013 	if (!PE_parse_boot_argn("zc_mag_size", NULL, 0)) {
10014 		/*
10015 		 * Scale zc_mag_size() per machine.
10016 		 *
10017 		 * - wide machines get 128B magazines to avoid all false sharing
10018 		 * - smaller machines but with enough RAM get a bit bigger
10019 		 *   buckets (empirically affects networking performance)
10020 		 */
10021 		if (zpercpu_early_count >= 10) {
10022 			_zc_mag_size = 14;
10023 		} else if ((sane_size >> 30) >= 4) {
10024 			_zc_mag_size = 10;
10025 		}
10026 	}
10027 
10028 	/*
10029 	 * Initialize random used to scramble early allocations
10030 	 */
10031 	zpercpu_foreach_cpu(cpu) {
10032 		random_bool_init(&zone_bool_gen[cpu].zbg_bg);
10033 	}
10034 
10035 #if CONFIG_PROB_GZALLOC
10036 	/*
10037 	 * Set pgz_sample_counter on the boot CPU so that we do not sample
10038 	 * any allocation until PGZ has been properly setup (in pgz_init()).
10039 	 */
10040 	*PERCPU_GET_MASTER(pgz_sample_counter) = INT32_MAX;
10041 #endif /* CONFIG_PROB_GZALLOC */
10042 
10043 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
10044 	/*
10045 	 * Randomly assign zones to one of the 4 general submaps,
10046 	 * and pick whether they allocate from the begining
10047 	 * or the end of it.
10048 	 *
10049 	 * A lot of OOB exploitation relies on precise interleaving
10050 	 * of specific types in the heap.
10051 	 *
10052 	 * Woops, you can't guarantee that anymore.
10053 	 */
10054 	for (zone_id_t i = 1; i < MAX_ZONES; i++) {
10055 		uint32_t r = zalloc_random_uniform32(0,
10056 		    ZSECURITY_CONFIG_GENERAL_SUBMAPS * 2);
10057 
10058 		zone_security_array[i].z_submap_from_end = (r & 1);
10059 		zone_security_array[i].z_submap_idx += (r >> 1);
10060 	}
10061 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
10062 
10063 
10064 	thread_call_setup_with_options(&zone_expand_callout,
10065 	    zone_expand_async, NULL, THREAD_CALL_PRIORITY_HIGH,
10066 	    THREAD_CALL_OPTIONS_ONCE);
10067 
10068 	thread_call_setup_with_options(&zone_trim_callout,
10069 	    zone_trim_async, NULL, THREAD_CALL_PRIORITY_USER,
10070 	    THREAD_CALL_OPTIONS_ONCE);
10071 }
10072 
10073 #define ZONE_GUARD_SIZE                 (64UL << 10)
10074 
10075 __startup_func
10076 static void
zone_tunables_fixup(void)10077 zone_tunables_fixup(void)
10078 {
10079 	int wdt = 0;
10080 
10081 #if CONFIG_PROB_GZALLOC && (DEVELOPMENT || DEBUG)
10082 	if (!PE_parse_boot_argn("pgz", NULL, 0) &&
10083 	    PE_parse_boot_argn("pgz1", NULL, 0)) {
10084 		/*
10085 		 * if pgz1= was used, but pgz= was not,
10086 		 * then the more specific pgz1 takes precedence.
10087 		 */
10088 		pgz_all = false;
10089 	}
10090 #endif
10091 
10092 	if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) {
10093 		zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT;
10094 	}
10095 	if (PE_parse_boot_argn("wdt", &wdt, sizeof(wdt)) && wdt == -1 &&
10096 	    !PE_parse_boot_argn("zet", NULL, 0)) {
10097 		zone_exhausted_timeout = -1;
10098 	}
10099 }
10100 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup);
10101 
10102 /** Get the left zone guard size for the submap at IDX */
10103 __pure2
10104 __startup_func
10105 static vm_map_size_t
zone_submap_left_guard_size(zone_submap_idx_t __unused idx)10106 zone_submap_left_guard_size(zone_submap_idx_t __unused idx)
10107 {
10108 	return ZONE_GUARD_SIZE / 2;
10109 }
10110 
10111 /** Get the right zone guard size for the submap at IDX */
10112 __pure2
10113 __startup_func
10114 static vm_map_size_t
zone_submap_right_guard_size(zone_submap_idx_t __unused idx)10115 zone_submap_right_guard_size(zone_submap_idx_t __unused idx)
10116 {
10117 	return ZONE_GUARD_SIZE / 2;
10118 }
10119 
10120 __startup_func
10121 static void
zone_submap_init(mach_vm_offset_t * submap_min,zone_submap_idx_t idx,uint64_t zone_sub_map_numer,uint64_t * remaining_denom,vm_offset_t * remaining_size)10122 zone_submap_init(
10123 	mach_vm_offset_t       *submap_min,
10124 	zone_submap_idx_t       idx,
10125 	uint64_t                zone_sub_map_numer,
10126 	uint64_t               *remaining_denom,
10127 	vm_offset_t            *remaining_size)
10128 {
10129 	vm_map_create_options_t vmco;
10130 	vm_map_address_t addr;
10131 	vm_offset_t submap_start, submap_end;
10132 	vm_size_t submap_actual_size, submap_usable_size;
10133 	vm_map_t  submap;
10134 	vm_map_size_t left_guard_size = 0, right_guard_size = 0;
10135 	vm_prot_t prot = VM_PROT_DEFAULT;
10136 	vm_prot_t prot_max = VM_PROT_ALL;
10137 	kern_return_t kr;
10138 
10139 	submap_usable_size =
10140 	    zone_sub_map_numer * *remaining_size / *remaining_denom;
10141 	submap_usable_size = trunc_page(submap_usable_size);
10142 
10143 	submap_start = *submap_min;
10144 
10145 	left_guard_size = zone_submap_left_guard_size(idx);
10146 	right_guard_size = zone_submap_right_guard_size(idx);
10147 
10148 	/*
10149 	 * Compute the final submap size.
10150 	 *
10151 	 * The usable size does not include the zone guards, so add them now. This
10152 	 * VA is paid for in zone_init ahead of time.
10153 	 */
10154 
10155 	submap_actual_size =
10156 	    submap_usable_size + left_guard_size + right_guard_size;
10157 
10158 	if (idx == Z_SUBMAP_IDX_READ_ONLY) {
10159 		/*
10160 		 * The RO zone has special alignment requirements, so snap to the
10161 		 * required boundary and reflow based on the available space.
10162 		 *
10163 		 * This operation only increases the amount of VA used by the submap,
10164 		 * and so the guards will always still fit.
10165 		 */
10166 		vm_offset_t submap_padding = 0;
10167 
10168 		submap_padding = pmap_ro_zone_align(submap_start) - submap_start;
10169 		submap_start += submap_padding;
10170 
10171 		submap_actual_size = pmap_ro_zone_align(submap_actual_size);
10172 		submap_usable_size =
10173 		    submap_actual_size - left_guard_size - right_guard_size;
10174 
10175 		assert(*remaining_size >= (submap_padding + submap_usable_size));
10176 
10177 		*remaining_size -= submap_padding;
10178 		*submap_min = submap_start;
10179 	}
10180 
10181 	submap_end = submap_start + submap_actual_size;
10182 
10183 	if (idx == Z_SUBMAP_IDX_VM) {
10184 		vm_packing_verify_range("vm_compressor",
10185 		    submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR));
10186 		vm_packing_verify_range("vm_page",
10187 		    submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR));
10188 
10189 #if MACH_ASSERT
10190 		/*
10191 		 * vm_submap_restriction_size_debug gives the size passed to the kmem
10192 		 * claim placer to ensure that the packing behaves correctly. If this
10193 		 * size is smaller than what we actually end up using for the VM submap,
10194 		 * the packing may be probabilistically invalid. Assert on this
10195 		 * condition to catch this type of failure deterministically rather than
10196 		 * relying on the above assertions catching it when we actually hit that
10197 		 * rare case and the packing is invalid.
10198 		 */
10199 		assert(submap_actual_size <= vm_submap_restriction_size_debug);
10200 #endif /* MACH_ASSERT */
10201 	}
10202 
10203 	vmco = VM_MAP_CREATE_NEVER_FAULTS;
10204 	if (!zone_submap_is_sequestered(idx)) {
10205 		vmco |= VM_MAP_CREATE_DISABLE_HOLELIST;
10206 	}
10207 
10208 	vm_map_will_allocate_early_map(&zone_submaps[idx]);
10209 	submap = kmem_suballoc(kernel_map, submap_min, submap_actual_size, vmco,
10210 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10211 	    KMS_PERMANENT | KMS_NOFAIL | KMS_NOSOFTLIMIT,
10212 	    VM_KERN_MEMORY_ZONE).kmr_submap;
10213 
10214 	if (idx == Z_SUBMAP_IDX_READ_ONLY) {
10215 		zone_info.zi_ro_range.min_address = submap_start;
10216 		zone_info.zi_ro_range.max_address = submap_end;
10217 		prot_max = prot = VM_PROT_NONE;
10218 	}
10219 
10220 	addr = submap_start;
10221 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(
10222 		.vmkf_no_soft_limit = true,
10223 		.vm_tag = VM_KERN_MEMORY_ZONE);
10224 	vm_object_t kobject = kernel_object_default;
10225 
10226 	kr = vm_map_enter(submap, &addr, left_guard_size, 0,
10227 	    vmk_flags, kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
10228 	if (kr != KERN_SUCCESS) {
10229 		panic("ksubmap[%s]: failed to make first entry (%d)",
10230 		    zone_submaps_names[idx], kr);
10231 	}
10232 
10233 	addr = submap_end - right_guard_size;
10234 	kr = vm_map_enter(submap, &addr, right_guard_size, 0,
10235 	    vmk_flags, kobject, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
10236 	if (kr != KERN_SUCCESS) {
10237 		panic("ksubmap[%s]: failed to make last entry (%d)",
10238 		    zone_submaps_names[idx], kr);
10239 	}
10240 
10241 #if DEBUG || DEVELOPMENT
10242 	printf("zone_init: map %-5s %p:%p (%u%c, %u%c usable)\n",
10243 	    zone_submaps_names[idx], (void *)submap_start, (void *)submap_end,
10244 	    mach_vm_size_pretty(submap_actual_size),
10245 	    mach_vm_size_unit(submap_actual_size),
10246 	    mach_vm_size_pretty(submap_usable_size),
10247 	    mach_vm_size_unit(submap_usable_size));
10248 #endif /* DEBUG || DEVELOPMENT */
10249 
10250 	zone_submaps[idx] = submap;
10251 	*submap_min       = submap_end;
10252 	*remaining_size  -= submap_usable_size;
10253 	*remaining_denom -= zone_sub_map_numer;
10254 }
10255 
10256 static inline void
zone_pva_relocate(zone_pva_t * pva,uint32_t delta)10257 zone_pva_relocate(zone_pva_t *pva, uint32_t delta)
10258 {
10259 	if (!zone_pva_is_null(*pva) && !zone_pva_is_queue(*pva)) {
10260 		pva->packed_address += delta;
10261 	}
10262 }
10263 
10264 /*
10265  * Allocate metadata array and migrate bootstrap initial metadata and memory.
10266  */
10267 __startup_func
10268 static void
zone_metadata_init(void)10269 zone_metadata_init(void)
10270 {
10271 	vm_map_t vm_map = zone_submaps[Z_SUBMAP_IDX_VM];
10272 	vm_map_entry_t first;
10273 
10274 	struct mach_vm_range meta_r, bits_r, xtra_r, early_r;
10275 	vm_size_t early_sz;
10276 	vm_offset_t reloc_base;
10277 
10278 	/*
10279 	 * Step 1: Allocate the metadata + bitmaps range
10280 	 *
10281 	 * Allocations can't be smaller than 8 bytes, which is 128b / 16B per 1k
10282 	 * of physical memory (16M per 1G).
10283 	 *
10284 	 * Let's preallocate for the worst to avoid weird panics.
10285 	 */
10286 	vm_map_will_allocate_early_map(&zone_meta_map);
10287 	meta_r = zone_kmem_suballoc(zone_info.zi_meta_range.min_address,
10288 	    zone_meta_size + zone_bits_size + zone_xtra_size,
10289 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10290 	    VM_KERN_MEMORY_ZONE, &zone_meta_map);
10291 	meta_r.min_address += ZONE_GUARD_SIZE;
10292 	meta_r.max_address -= ZONE_GUARD_SIZE;
10293 	if (zone_xtra_size) {
10294 		xtra_r.max_address  = meta_r.max_address;
10295 		meta_r.max_address -= zone_xtra_size;
10296 		xtra_r.min_address  = meta_r.max_address;
10297 	} else {
10298 		xtra_r.min_address  = xtra_r.max_address = 0;
10299 	}
10300 	bits_r.max_address  = meta_r.max_address;
10301 	meta_r.max_address -= zone_bits_size;
10302 	bits_r.min_address  = meta_r.max_address;
10303 
10304 #if DEBUG || DEVELOPMENT
10305 	printf("zone_init: metadata  %p:%p (%u%c)\n",
10306 	    (void *)meta_r.min_address, (void *)meta_r.max_address,
10307 	    mach_vm_size_pretty(mach_vm_range_size(&meta_r)),
10308 	    mach_vm_size_unit(mach_vm_range_size(&meta_r)));
10309 	printf("zone_init: metabits  %p:%p (%u%c)\n",
10310 	    (void *)bits_r.min_address, (void *)bits_r.max_address,
10311 	    mach_vm_size_pretty(mach_vm_range_size(&bits_r)),
10312 	    mach_vm_size_unit(mach_vm_range_size(&bits_r)));
10313 	printf("zone_init: extra     %p:%p (%u%c)\n",
10314 	    (void *)xtra_r.min_address, (void *)xtra_r.max_address,
10315 	    mach_vm_size_pretty(mach_vm_range_size(&xtra_r)),
10316 	    mach_vm_size_unit(mach_vm_range_size(&xtra_r)));
10317 #endif /* DEBUG || DEVELOPMENT */
10318 
10319 	bits_r.min_address = (bits_r.min_address + ZBA_CHUNK_SIZE - 1) & -ZBA_CHUNK_SIZE;
10320 	bits_r.max_address = bits_r.max_address & -ZBA_CHUNK_SIZE;
10321 
10322 	/*
10323 	 * Step 2: Install new ranges.
10324 	 *         Relocate metadata and bits.
10325 	 */
10326 	early_r  = zone_info.zi_map_range;
10327 	early_sz = mach_vm_range_size(&early_r);
10328 
10329 	zone_info.zi_map_range  = zone_map_range;
10330 	zone_info.zi_meta_range = meta_r;
10331 	zone_info.zi_bits_range = bits_r;
10332 	zone_info.zi_xtra_range = xtra_r;
10333 	zone_info.zi_meta_base  = VM_FAR_ADD_PTR_UNBOUNDED(
10334 		(struct zone_page_metadata *)meta_r.min_address,
10335 		-(ptrdiff_t)zone_pva_from_addr(zone_map_range.min_address).packed_address);
10336 
10337 	vm_map_lock(vm_map);
10338 	first = vm_map_first_entry(vm_map);
10339 	reloc_base = first->vme_end;
10340 	first->vme_end += early_sz;
10341 	vm_map->size += early_sz;
10342 	vm_map_unlock(vm_map);
10343 
10344 	struct zone_page_metadata *early_meta = zone_early_meta_array_startup;
10345 	struct zone_page_metadata *new_meta = zone_meta_from_addr(reloc_base);
10346 	vm_offset_t reloc_delta = reloc_base - early_r.min_address;
10347 	/* this needs to sign extend */
10348 	uint32_t pva_delta = (uint32_t)((intptr_t)reloc_delta >> PAGE_SHIFT);
10349 
10350 	zone_meta_populate(reloc_base, early_sz);
10351 	memcpy(new_meta, early_meta,
10352 	    atop(early_sz) * sizeof(struct zone_page_metadata));
10353 	for (uint32_t i = 0; i < atop(early_sz); i++) {
10354 		zone_pva_relocate(&new_meta[i].zm_page_next, pva_delta);
10355 		zone_pva_relocate(&new_meta[i].zm_page_prev, pva_delta);
10356 	}
10357 
10358 	static_assert(ZONE_ID_VM_MAP_ENTRY == ZONE_ID_VM_MAP + 1);
10359 	static_assert(ZONE_ID_VM_MAP_HOLES == ZONE_ID_VM_MAP + 2);
10360 
10361 	for (zone_id_t zid = ZONE_ID_VM_MAP; zid <= ZONE_ID_VM_MAP_HOLES; zid++) {
10362 		zone_pva_relocate(&zone_array[zid].z_pageq_partial, pva_delta);
10363 		zone_pva_relocate(&zone_array[zid].z_pageq_full, pva_delta);
10364 	}
10365 
10366 	zba_populate(0, false);
10367 	memcpy(zba_base_header(), zba_chunk_startup, sizeof(zba_chunk_startup));
10368 	zba_meta()->zbam_right = (uint32_t)atop(zone_bits_size);
10369 
10370 	/*
10371 	 * Step 3: Relocate the boostrap VM structs
10372 	 *         (including rewriting their content).
10373 	 */
10374 	kma_flags_t flags = KMA_KOBJECT | KMA_NOENCRYPT | KMA_NOFAIL;
10375 
10376 #if ZSECURITY_CONFIG(ZONE_TAGGING)
10377 	flags |= KMA_TAG;
10378 #endif /* ZSECURITY_CONFIG_ZONE_TAGGING */
10379 
10380 
10381 	kernel_memory_populate(reloc_base, early_sz, flags,
10382 	    VM_KERN_MEMORY_OSFMK);
10383 
10384 	vm_memtag_disable_checking();
10385 	__nosan_memcpy((void *)reloc_base, (void *)early_r.min_address, early_sz);
10386 	vm_memtag_enable_checking();
10387 
10388 #if ZSECURITY_CONFIG(ZONE_TAGGING)
10389 	vm_memtag_relocate_tags(reloc_base, early_r.min_address, early_sz);
10390 #endif /* ZSECURITY_CONFIG_ZONE_TAGGING */
10391 
10392 #if KASAN
10393 	kasan_notify_address(reloc_base, early_sz);
10394 #endif /* KASAN */
10395 
10396 	vm_map_relocate_early_maps(reloc_delta);
10397 
10398 	for (uint32_t i = 0; i < atop(early_sz); i++) {
10399 		zone_id_t zid = new_meta[i].zm_index;
10400 		zone_t z = &zone_array[zid];
10401 		vm_size_t esize = zone_elem_outer_size(z);
10402 		vm_address_t base = reloc_base + ptoa(i) + zone_elem_inner_offs(z);
10403 		vm_address_t addr;
10404 
10405 		if (new_meta[i].zm_chunk_len >= ZM_SECONDARY_PAGE) {
10406 			continue;
10407 		}
10408 
10409 		for (uint32_t eidx = 0; eidx < z->z_chunk_elems; eidx++) {
10410 			if (zone_meta_is_free(&new_meta[i], eidx)) {
10411 				continue;
10412 			}
10413 
10414 			addr = vm_memtag_load_tag(base + eidx * esize);
10415 #if KASAN_CLASSIC
10416 			kasan_alloc(addr,
10417 			    zone_elem_inner_size(z), zone_elem_inner_size(z),
10418 			    zone_elem_redzone(z), false,
10419 			    __builtin_frame_address(0));
10420 #endif
10421 			vm_map_relocate_early_elem(zid, addr, reloc_delta);
10422 		}
10423 	}
10424 
10425 }
10426 
10427 
10428 __startup_data
10429 static uint16_t submap_ratios[Z_SUBMAP_IDX_COUNT] = {
10430 #if ZSECURITY_CONFIG(READ_ONLY)
10431 	[Z_SUBMAP_IDX_VM]               = 15,
10432 	[Z_SUBMAP_IDX_READ_ONLY]        =  5,
10433 #else
10434 	[Z_SUBMAP_IDX_VM]               = 20,
10435 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10436 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
10437 	[Z_SUBMAP_IDX_GENERAL_0]        = 15,
10438 	[Z_SUBMAP_IDX_GENERAL_1]        = 15,
10439 	[Z_SUBMAP_IDX_GENERAL_2]        = 15,
10440 	[Z_SUBMAP_IDX_GENERAL_3]        = 15,
10441 	[Z_SUBMAP_IDX_DATA]             = 20,
10442 #else
10443 	[Z_SUBMAP_IDX_GENERAL_0]        = 60,
10444 	[Z_SUBMAP_IDX_DATA]             = 20,
10445 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
10446 };
10447 
10448 __startup_func
10449 static inline uint16_t
zone_submap_ratios_denom(void)10450 zone_submap_ratios_denom(void)
10451 {
10452 	uint16_t denom = 0;
10453 
10454 	for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10455 		denom += submap_ratios[idx];
10456 	}
10457 
10458 	assert(denom == 100);
10459 
10460 	return denom;
10461 }
10462 
10463 __startup_func
10464 static inline vm_offset_t
zone_restricted_va_max(void)10465 zone_restricted_va_max(void)
10466 {
10467 	vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR);
10468 	vm_offset_t vm_page_max    = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR);
10469 
10470 	return trunc_page(MIN(compressor_max, vm_page_max));
10471 }
10472 
10473 __startup_func
10474 static void
zone_set_map_sizes(void)10475 zone_set_map_sizes(void)
10476 {
10477 	vm_size_t zsize;
10478 	vm_size_t zsizearg;
10479 
10480 	/*
10481 	 * Compute the physical limits for the zone map
10482 	 */
10483 
10484 	if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) {
10485 		zsize = zsizearg * (1024ULL * 1024);
10486 	} else {
10487 		/* Set target zone size as 1/4 of physical memory */
10488 		zsize = (vm_size_t)(sane_size >> 2);
10489 		zsize += zsize >> 1;
10490 	}
10491 
10492 	if (zsize < CONFIG_ZONE_MAP_MIN) {
10493 		zsize = CONFIG_ZONE_MAP_MIN;   /* Clamp to min */
10494 	}
10495 	if (zsize > sane_size >> 1) {
10496 		zsize = (vm_size_t)(sane_size >> 1); /* Clamp to half of RAM max */
10497 	}
10498 	if (zsizearg == 0 && zsize > ZONE_MAP_MAX) {
10499 		/* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */
10500 		printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n",
10501 		    (uintptr_t)zsize, (uintptr_t)ZONE_MAP_MAX);
10502 		zsize = ZONE_MAP_MAX;
10503 	}
10504 
10505 	zone_pages_wired_max = (uint32_t)atop(trunc_page(zsize));
10506 
10507 
10508 	/*
10509 	 * Declare restrictions on zone max
10510 	 */
10511 	vm_offset_t vm_submap_size = round_page(
10512 		(submap_ratios[Z_SUBMAP_IDX_VM] * ZONE_MAP_VA_SIZE) /
10513 		zone_submap_ratios_denom()) +
10514 	    zone_submap_left_guard_size(Z_SUBMAP_IDX_VM) +
10515 	    zone_submap_right_guard_size(Z_SUBMAP_IDX_VM);
10516 
10517 #if CONFIG_PROB_GZALLOC
10518 	vm_submap_size += pgz_get_size();
10519 #endif /* CONFIG_PROB_GZALLOC */
10520 	if (os_sub_overflow(zone_restricted_va_max(), vm_submap_size,
10521 	    &zone_map_range.min_address)) {
10522 		zone_map_range.min_address = 0;
10523 	}
10524 
10525 #if MACH_ASSERT
10526 	vm_submap_restriction_size_debug = vm_submap_size;
10527 #endif /* MACH_ASSERT */
10528 
10529 	zone_meta_size = round_page(atop(ZONE_MAP_VA_SIZE) *
10530 	    sizeof(struct zone_page_metadata)) + ZONE_GUARD_SIZE * 2;
10531 
10532 	static_assert(ZONE_MAP_MAX / (CHAR_BIT * KALLOC_MINSIZE) <=
10533 	    ZBA_PTR_MASK + 1);
10534 	zone_bits_size = round_page(ptoa(zone_pages_wired_max) /
10535 	    (CHAR_BIT * KALLOC_MINSIZE));
10536 
10537 #if VM_TAG_SIZECLASSES
10538 	if (zone_tagging_on) {
10539 		zba_xtra_shift = (uint8_t)fls(sizeof(vm_tag_t) - 1);
10540 	}
10541 	if (zba_xtra_shift) {
10542 		/*
10543 		 * if we need the extra space range, then limit the size of the
10544 		 * bitmaps to something reasonable instead of a theoretical
10545 		 * worst case scenario of all zones being for the smallest
10546 		 * allocation granule, in order to avoid fake VA pressure on
10547 		 * other parts of the system.
10548 		 */
10549 		zone_bits_size = round_page(zone_bits_size / 8);
10550 		zone_xtra_size = round_page(zone_bits_size * CHAR_BIT << zba_xtra_shift);
10551 	}
10552 #endif /* VM_TAG_SIZECLASSES */
10553 }
10554 STARTUP(KMEM, STARTUP_RANK_FIRST, zone_set_map_sizes);
10555 
10556 /*
10557  * Can't use zone_info.zi_map_range at this point as it is being used to
10558  * store the range of early pmap memory that was stolen to bootstrap the
10559  * necessary VM zones.
10560  */
10561 KMEM_RANGE_REGISTER_STATIC(zones, &zone_map_range, ZONE_MAP_VA_SIZE);
10562 KMEM_RANGE_REGISTER_DYNAMIC(zone_meta, &zone_info.zi_meta_range, ^{
10563 	return zone_meta_size + zone_bits_size + zone_xtra_size;
10564 });
10565 
10566 /*
10567  * Global initialization of Zone Allocator.
10568  * Runs after zone_bootstrap.
10569  */
10570 __startup_func
10571 static void
zone_init(void)10572 zone_init(void)
10573 {
10574 	vm_size_t           remaining_size = ZONE_MAP_VA_SIZE;
10575 	mach_vm_offset_t    submap_min = 0;
10576 	uint64_t            denom = zone_submap_ratios_denom();
10577 	/*
10578 	 * And now allocate the various pieces of VA and submaps.
10579 	 */
10580 
10581 	submap_min = zone_map_range.min_address;
10582 
10583 #if CONFIG_PROB_GZALLOC
10584 	vm_size_t pgz_size = pgz_get_size();
10585 
10586 	vm_map_will_allocate_early_map(&pgz_submap);
10587 	zone_info.zi_pgz_range = zone_kmem_suballoc(submap_min, pgz_size,
10588 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10589 	    VM_KERN_MEMORY_ZONE, &pgz_submap);
10590 
10591 	submap_min     += pgz_size;
10592 	remaining_size -= pgz_size;
10593 #if DEBUG || DEVELOPMENT
10594 	printf("zone_init: pgzalloc  %p:%p (%u%c) [%d slots]\n",
10595 	    (void *)zone_info.zi_pgz_range.min_address,
10596 	    (void *)zone_info.zi_pgz_range.max_address,
10597 	    mach_vm_size_pretty(pgz_size), mach_vm_size_unit(pgz_size),
10598 	    pgz_slots);
10599 #endif /* DEBUG || DEVELOPMENT */
10600 #endif /* CONFIG_PROB_GZALLOC */
10601 
10602 	/*
10603 	 * Allocate the submaps
10604 	 */
10605 
10606 	/*
10607 	 * In order to prevent us from throwing off the ratios, deduct VA for the
10608 	 * zone guards ahead of time.
10609 	 */
10610 	for (uint32_t i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
10611 		remaining_size -= zone_submap_left_guard_size(i);
10612 		remaining_size -= zone_submap_right_guard_size(i);
10613 	}
10614 
10615 	for (zone_submap_idx_t idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10616 		if (submap_ratios[idx] == 0) {
10617 			zone_submaps[idx] = VM_MAP_NULL;
10618 		} else {
10619 			zone_submap_init(&submap_min, idx, submap_ratios[idx],
10620 			    &denom, &remaining_size);
10621 		}
10622 	}
10623 
10624 	zone_metadata_init();
10625 
10626 #if VM_TAG_SIZECLASSES
10627 	if (zone_tagging_on) {
10628 		vm_allocation_zones_init();
10629 	}
10630 #endif /* VM_TAG_SIZECLASSES */
10631 
10632 	zone_create_flags_t kma_flags = ZC_NOCACHING | ZC_NOGC | ZC_NOCALLOUT |
10633 	    ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE | ZC_VM;
10634 
10635 	(void)zone_create_ext("vm.permanent", 1, kma_flags | ZC_NO_TBI_TAG,
10636 	    ZONE_ID_PERMANENT, ^(zone_t z) {
10637 		z->z_permanent = true;
10638 		z->z_elem_size = 1;
10639 	});
10640 	(void)zone_create_ext("vm.permanent.percpu", 1,
10641 	    kma_flags | ZC_PERCPU | ZC_NO_TBI_TAG, ZONE_ID_PERCPU_PERMANENT, ^(zone_t z) {
10642 		z->z_permanent = true;
10643 		z->z_elem_size = 1;
10644 	});
10645 
10646 	zc_magazine_zone = zone_create("zcc_magazine_zone", sizeof(struct zone_magazine) +
10647 	    zc_mag_size() * sizeof(vm_offset_t),
10648 	    ZC_VM | ZC_NOCACHING | ZC_ZFREE_CLEARMEM | ZC_PGZ_USE_GUARDS);
10649 	zone_raise_reserve(zc_magazine_zone, (uint16_t)(2 * zpercpu_count()));
10650 
10651 	/*
10652 	 * Now migrate the startup statistics into their final storage,
10653 	 * and enable logging for early zones (that zone_create_ext() skipped).
10654 	 */
10655 	int cpu = cpu_number();
10656 	zone_index_foreach(idx) {
10657 		zone_t tz = &zone_array[idx];
10658 
10659 		if (tz->z_stats == __zpcpu_mangle_for_boot(&zone_stats_startup[idx])) {
10660 			zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats);
10661 
10662 			*zpercpu_get_cpu(zs, cpu) = *zpercpu_get_cpu(tz->z_stats, cpu);
10663 			tz->z_stats = zs;
10664 		}
10665 		if (tz->z_self == tz) {
10666 #if ZALLOC_ENABLE_LOGGING
10667 			zone_setup_logging(tz);
10668 #endif /* ZALLOC_ENABLE_LOGGING */
10669 #if KASAN_TBI
10670 			zone_setup_kasan_logging(tz);
10671 #endif /* KASAN_TBI */
10672 		}
10673 	}
10674 }
10675 STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init);
10676 
10677 void
zalloc_iokit_lockdown(void)10678 zalloc_iokit_lockdown(void)
10679 {
10680 	zone_share_always = false;
10681 }
10682 
10683 void
zalloc_first_proc_made(void)10684 zalloc_first_proc_made(void)
10685 {
10686 	zone_caching_disabled = 0;
10687 	zone_early_thres_mul = 1;
10688 }
10689 
10690 __startup_func
10691 vm_offset_t
zone_early_mem_init(vm_size_t size)10692 zone_early_mem_init(vm_size_t size)
10693 {
10694 	vm_offset_t mem;
10695 
10696 	assert3u(atop(size), <=, ZONE_EARLY_META_INLINE_COUNT);
10697 
10698 	/*
10699 	 * The zone that is used early to bring up the VM is stolen here.
10700 	 *
10701 	 * When the zone subsystem is actually initialized,
10702 	 * zone_metadata_init() will be called, and those pages
10703 	 * and the elements they contain, will be relocated into
10704 	 * the VM submap (even for architectures when those zones
10705 	 * do not live there).
10706 	 */
10707 	assert3u(size, <=, sizeof(zone_early_pages_to_cram));
10708 	mem = (vm_offset_t)zone_early_pages_to_cram;
10709 
10710 
10711 	zone_info.zi_meta_base = VM_FAR_ADD_PTR_UNBOUNDED(
10712 		(struct zone_page_metadata *)zone_early_meta_array_startup,
10713 		-(ptrdiff_t)zone_pva_from_addr(mem).packed_address);
10714 	zone_info.zi_map_range.min_address = mem;
10715 	zone_info.zi_map_range.max_address = mem + size;
10716 
10717 	zone_info.zi_bits_range = (struct mach_vm_range){
10718 		.min_address = (mach_vm_offset_t)zba_chunk_startup,
10719 		.max_address = (mach_vm_offset_t)zba_chunk_startup +
10720 	    sizeof(zba_chunk_startup),
10721 	};
10722 
10723 	zba_meta()->zbam_left  = 1;
10724 	zba_meta()->zbam_right = 1;
10725 	zba_init_chunk(0, false);
10726 
10727 	return mem;
10728 }
10729 
10730 #endif /* !ZALLOC_TEST */
10731 #pragma mark - tests
10732 #if DEBUG || DEVELOPMENT
10733 
10734 /*
10735  * Used for sysctl zone tests that aren't thread-safe. Ensure only one
10736  * thread goes through at a time.
10737  *
10738  * Or we can end up with multiple test zones (if a second zinit() comes through
10739  * before zdestroy()), which could lead us to run out of zones.
10740  */
10741 static bool any_zone_test_running = FALSE;
10742 
10743 static uintptr_t *
zone_copy_allocations(zone_t z,uintptr_t * elems,zone_pva_t page_index)10744 zone_copy_allocations(zone_t z, uintptr_t *elems, zone_pva_t page_index)
10745 {
10746 	vm_offset_t elem_size = zone_elem_outer_size(z);
10747 	vm_offset_t base;
10748 	struct zone_page_metadata *meta;
10749 
10750 	while (!zone_pva_is_null(page_index)) {
10751 		base  = zone_pva_to_addr(page_index) + zone_elem_inner_offs(z);
10752 		meta  = zone_pva_to_meta(page_index);
10753 
10754 		if (meta->zm_inline_bitmap) {
10755 			for (size_t i = 0; i < meta->zm_chunk_len; i++) {
10756 				uint32_t map = meta[i].zm_bitmap;
10757 
10758 				for (; map; map &= map - 1) {
10759 					*elems++ = INSTANCE_PUT(base +
10760 					    elem_size * __builtin_clz(map));
10761 				}
10762 				base += elem_size * 32;
10763 			}
10764 		} else {
10765 			uint32_t order = zba_bits_ref_order(meta->zm_bitmap);
10766 			bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
10767 			for (size_t i = 0; i < (1u << order); i++) {
10768 				uint64_t map = bits[i];
10769 
10770 				for (; map; map &= map - 1) {
10771 					*elems++ = INSTANCE_PUT(base +
10772 					    elem_size * __builtin_clzll(map));
10773 				}
10774 				base += elem_size * 64;
10775 			}
10776 		}
10777 
10778 		page_index = meta->zm_page_next;
10779 	}
10780 	return elems;
10781 }
10782 
10783 kern_return_t
zone_leaks(const char * zoneName,uint32_t nameLen,leak_site_proc proc)10784 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc)
10785 {
10786 	zone_t        zone = NULL;
10787 	uintptr_t *   array;
10788 	uintptr_t *   next;
10789 	uintptr_t     element;
10790 	uint32_t      idx, count, found;
10791 	uint32_t      nobtcount;
10792 	uint32_t      elemSize;
10793 	size_t        maxElems;
10794 
10795 	zone_foreach(z) {
10796 		if (!z->z_name) {
10797 			continue;
10798 		}
10799 		if (!strncmp(zoneName, z->z_name, nameLen)) {
10800 			zone = z;
10801 			break;
10802 		}
10803 	}
10804 	if (zone == NULL) {
10805 		return KERN_INVALID_NAME;
10806 	}
10807 
10808 	elemSize = (uint32_t)zone_elem_inner_size(zone);
10809 	maxElems = (zone->z_elems_avail + 1) & ~1ul;
10810 
10811 	array = kalloc_type_tag(vm_offset_t, maxElems, Z_WAITOK, VM_KERN_MEMORY_DIAG);
10812 	if (array == NULL) {
10813 		return KERN_RESOURCE_SHORTAGE;
10814 	}
10815 
10816 	zone_lock(zone);
10817 
10818 	next = array;
10819 	next = zone_copy_allocations(zone, next, zone->z_pageq_partial);
10820 	next = zone_copy_allocations(zone, next, zone->z_pageq_full);
10821 	count = (uint32_t)(next - array);
10822 
10823 	zone_unlock(zone);
10824 
10825 	zone_leaks_scan(array, count, (uint32_t)zone_elem_outer_size(zone), &found);
10826 	assert(found <= count);
10827 
10828 	for (idx = 0; idx < count; idx++) {
10829 		element = array[idx];
10830 		if (kInstanceFlagReferenced & element) {
10831 			continue;
10832 		}
10833 		element = INSTANCE_PUT(element) & ~kInstanceFlags;
10834 	}
10835 
10836 #if ZALLOC_ENABLE_LOGGING
10837 	if (zone->z_btlog && !corruption_debug_flag) {
10838 		// btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
10839 		static_assert(sizeof(vm_address_t) == sizeof(uintptr_t));
10840 		btlog_copy_backtraces_for_elements(zone->z_btlog,
10841 		    (vm_address_t *)array, &count, elemSize, proc);
10842 	}
10843 #endif /* ZALLOC_ENABLE_LOGGING */
10844 
10845 	for (nobtcount = idx = 0; idx < count; idx++) {
10846 		element = array[idx];
10847 		if (!element) {
10848 			continue;
10849 		}
10850 		if (kInstanceFlagReferenced & element) {
10851 			continue;
10852 		}
10853 		nobtcount++;
10854 	}
10855 	if (nobtcount) {
10856 		proc(nobtcount, elemSize, BTREF_NULL);
10857 	}
10858 
10859 	kfree_type(vm_offset_t, maxElems, array);
10860 	return KERN_SUCCESS;
10861 }
10862 
10863 static int
zone_ro_basic_test_run(__unused int64_t in,int64_t * out)10864 zone_ro_basic_test_run(__unused int64_t in, int64_t *out)
10865 {
10866 	zone_security_flags_t zsflags;
10867 	uint32_t x = 4;
10868 	uint32_t *test_ptr;
10869 
10870 	if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10871 		printf("zone_ro_basic_test: Test already running.\n");
10872 		return EALREADY;
10873 	}
10874 
10875 	zsflags = zone_security_array[ZONE_ID__FIRST_RO];
10876 
10877 	for (int i = 0; i < 3; i++) {
10878 #if ZSECURITY_CONFIG(READ_ONLY)
10879 		/* Basic Test: Create int zone, zalloc int, modify value, free int */
10880 		printf("zone_ro_basic_test: Basic Test iteration %d\n", i);
10881 		printf("zone_ro_basic_test: create a sub-page size zone\n");
10882 
10883 		printf("zone_ro_basic_test: verify flags were set\n");
10884 		assert(zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
10885 
10886 		printf("zone_ro_basic_test: zalloc an element\n");
10887 		test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10888 		assert(test_ptr);
10889 
10890 		printf("zone_ro_basic_test: verify we can't write to it\n");
10891 		assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10892 
10893 		x = 4;
10894 		printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10895 		zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10896 		assert(test_ptr);
10897 		assert(*(uint32_t*)test_ptr == x);
10898 
10899 		x = 5;
10900 		printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10901 		zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10902 		assert(test_ptr);
10903 		assert(*(uint32_t*)test_ptr == x);
10904 
10905 		printf("zone_ro_basic_test: verify we can't write to it after assigning value\n");
10906 		assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10907 
10908 		printf("zone_ro_basic_test: free elem\n");
10909 		zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10910 		assert(!test_ptr);
10911 #else
10912 		printf("zone_ro_basic_test: Read-only allocator n/a on 32bit platforms, test functionality of API\n");
10913 
10914 		printf("zone_ro_basic_test: verify flags were set\n");
10915 		assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
10916 
10917 		printf("zone_ro_basic_test: zalloc an element\n");
10918 		test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10919 		assert(test_ptr);
10920 
10921 		x = 4;
10922 		printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10923 		zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10924 		assert(test_ptr);
10925 		assert(*(uint32_t*)test_ptr == x);
10926 
10927 		x = 5;
10928 		printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10929 		zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10930 		assert(test_ptr);
10931 		assert(*(uint32_t*)test_ptr == x);
10932 
10933 		printf("zone_ro_basic_test: free elem\n");
10934 		zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10935 		assert(!test_ptr);
10936 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10937 	}
10938 
10939 	printf("zone_ro_basic_test: garbage collection\n");
10940 	zone_gc(ZONE_GC_DRAIN);
10941 
10942 	printf("zone_ro_basic_test: Test passed\n");
10943 
10944 	*out = 1;
10945 	os_atomic_store(&any_zone_test_running, false, relaxed);
10946 	return 0;
10947 }
10948 SYSCTL_TEST_REGISTER(zone_ro_basic_test, zone_ro_basic_test_run);
10949 
10950 static int
zone_basic_test_run(__unused int64_t in,int64_t * out)10951 zone_basic_test_run(__unused int64_t in, int64_t *out)
10952 {
10953 	static zone_t test_zone_ptr = NULL;
10954 
10955 	unsigned int i = 0, max_iter = 5;
10956 	void * test_ptr;
10957 	zone_t test_zone;
10958 	int rc = 0;
10959 
10960 	if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10961 		printf("zone_basic_test: Test already running.\n");
10962 		return EALREADY;
10963 	}
10964 
10965 	printf("zone_basic_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
10966 
10967 	/* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
10968 	do {
10969 		test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
10970 		assert(test_zone);
10971 
10972 #if KASAN_CLASSIC
10973 		if (test_zone_ptr == NULL && test_zone->z_elems_free != 0)
10974 #else
10975 		if (test_zone->z_elems_free != 0)
10976 #endif
10977 		{
10978 			printf("zone_basic_test: free count is not zero\n");
10979 			rc = EIO;
10980 			goto out;
10981 		}
10982 
10983 		if (test_zone_ptr == NULL) {
10984 			/* Stash the zone pointer returned on the fist zinit */
10985 			printf("zone_basic_test: zone created for the first time\n");
10986 			test_zone_ptr = test_zone;
10987 		} else if (test_zone != test_zone_ptr) {
10988 			printf("zone_basic_test: old zone pointer and new zone pointer don't match\n");
10989 			rc = EIO;
10990 			goto out;
10991 		}
10992 
10993 		test_ptr = zalloc_flags(test_zone, Z_WAITOK | Z_NOFAIL);
10994 		zfree(test_zone, test_ptr);
10995 
10996 		zdestroy(test_zone);
10997 		i++;
10998 
10999 		printf("zone_basic_test: Iteration %d successful\n", i);
11000 	} while (i < max_iter);
11001 
11002 #if !KASAN_CLASSIC /* because of the quarantine and redzones */
11003 	/* test Z_VA_SEQUESTER */
11004 	{
11005 		zone_t test_pcpu_zone;
11006 		kern_return_t kr;
11007 		const int num_allocs = 8;
11008 		int idx;
11009 		vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs;
11010 		void *allocs[num_allocs];
11011 		void **allocs_pcpu;
11012 		vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
11013 
11014 		test_zone = zone_create("test_zone_sysctl", elem_size,
11015 		    ZC_DESTRUCTIBLE);
11016 		assert(test_zone);
11017 
11018 		test_pcpu_zone = zone_create("test_zone_sysctl.pcpu", sizeof(uint64_t),
11019 		    ZC_DESTRUCTIBLE | ZC_PERCPU);
11020 		assert(test_pcpu_zone);
11021 
11022 		for (idx = 0; idx < num_allocs; idx++) {
11023 			allocs[idx] = zalloc(test_zone);
11024 			assert(NULL != allocs[idx]);
11025 			printf("alloc[%d] %p\n", idx, allocs[idx]);
11026 		}
11027 		for (idx = 0; idx < num_allocs; idx++) {
11028 			zfree(test_zone, allocs[idx]);
11029 		}
11030 		assert(!zone_pva_is_null(test_zone->z_pageq_empty));
11031 
11032 		kr = kmem_alloc(kernel_map, (vm_address_t *)&allocs_pcpu, PAGE_SIZE,
11033 		    KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
11034 		assert(kr == KERN_SUCCESS);
11035 
11036 		for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
11037 			allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
11038 			    Z_WAITOK | Z_ZERO);
11039 			assert(NULL != allocs_pcpu[idx]);
11040 		}
11041 		for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
11042 			zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
11043 		}
11044 		assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
11045 
11046 		printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
11047 		    vm_page_wire_count, vm_page_free_count,
11048 		    100L * phys_pages / zone_pages_wired_max);
11049 		zone_gc(ZONE_GC_DRAIN);
11050 		printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
11051 		    vm_page_wire_count, vm_page_free_count,
11052 		    100L * phys_pages / zone_pages_wired_max);
11053 
11054 		unsigned int allva = 0;
11055 
11056 		zone_foreach(z) {
11057 			zone_lock(z);
11058 			allva += z->z_wired_cur;
11059 			if (zone_pva_is_null(z->z_pageq_va)) {
11060 				zone_unlock(z);
11061 				continue;
11062 			}
11063 			unsigned count = 0;
11064 			uint64_t size;
11065 			zone_pva_t pg = z->z_pageq_va;
11066 			struct zone_page_metadata *page_meta;
11067 			while (pg.packed_address) {
11068 				page_meta = zone_pva_to_meta(pg);
11069 				count += z->z_percpu ? 1 : z->z_chunk_pages;
11070 				if (page_meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
11071 					count -= page_meta->zm_page_index;
11072 				}
11073 				pg = page_meta->zm_page_next;
11074 			}
11075 			size = zone_size_wired(z);
11076 			if (!size) {
11077 				size = 1;
11078 			}
11079 			printf("%s%s: seq %d, res %d, %qd %%\n",
11080 			    zone_heap_name(z), z->z_name, z->z_va_cur - z->z_wired_cur,
11081 			    z->z_wired_cur, zone_size_allocated(z) * 100ULL / size);
11082 			zone_unlock(z);
11083 		}
11084 
11085 		printf("total va: %d\n", allva);
11086 
11087 		assert(zone_pva_is_null(test_zone->z_pageq_empty));
11088 		assert(zone_pva_is_null(test_zone->z_pageq_partial));
11089 		assert(!zone_pva_is_null(test_zone->z_pageq_va));
11090 		assert(zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
11091 		assert(zone_pva_is_null(test_pcpu_zone->z_pageq_partial));
11092 		assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_va));
11093 
11094 		for (idx = 0; idx < num_allocs; idx++) {
11095 			assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx]));
11096 		}
11097 
11098 		/* make sure the zone is still usable after a GC */
11099 
11100 		for (idx = 0; idx < num_allocs; idx++) {
11101 			allocs[idx] = zalloc(test_zone);
11102 			assert(allocs[idx]);
11103 			printf("alloc[%d] %p\n", idx, allocs[idx]);
11104 		}
11105 		for (idx = 0; idx < num_allocs; idx++) {
11106 			zfree(test_zone, allocs[idx]);
11107 		}
11108 
11109 		for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
11110 			allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
11111 			    Z_WAITOK | Z_ZERO);
11112 			assert(NULL != allocs_pcpu[idx]);
11113 		}
11114 		for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
11115 			zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
11116 		}
11117 
11118 		assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
11119 
11120 		kmem_free(kernel_map, (vm_address_t)allocs_pcpu, PAGE_SIZE);
11121 
11122 		zdestroy(test_zone);
11123 		zdestroy(test_pcpu_zone);
11124 	}
11125 #endif /* KASAN_CLASSIC */
11126 
11127 	printf("zone_basic_test: Test passed\n");
11128 
11129 
11130 	*out = 1;
11131 out:
11132 	os_atomic_store(&any_zone_test_running, false, relaxed);
11133 	return rc;
11134 }
11135 SYSCTL_TEST_REGISTER(zone_basic_test, zone_basic_test_run);
11136 
11137 #define N_ALLOCATIONS 100
11138 
11139 static int
run_kalloc_guard_insertion_test(int64_t in __unused,int64_t * out)11140 run_kalloc_guard_insertion_test(int64_t in __unused, int64_t *out)
11141 {
11142 	size_t alloc_size = 24576;
11143 	uint64_t *ptrs[N_ALLOCATIONS];
11144 	uint32_t n_guard_regions = 0;
11145 	zalloc_flags_t flags = Z_WAITOK | Z_FULLSIZE;
11146 	int retval = 1;
11147 
11148 	*out = 0;
11149 
11150 	for (uint i = 0; i < N_ALLOCATIONS; ++i) {
11151 		uint64_t *data_ptr = kalloc_ext(KHEAP_DATA_BUFFERS, alloc_size,
11152 		    flags, &data_ptr).addr;
11153 		if (!data_ptr) {
11154 			printf("%s: kalloc_ext %zu with owner and Z_FULLSIZE returned null\n",
11155 			    __func__, alloc_size);
11156 			goto cleanup;
11157 		}
11158 		ptrs[i] = data_ptr;
11159 	}
11160 
11161 	/* We don't know where there are guard regions, but let's try to find one. */
11162 	for (uint i = 0; i < N_ALLOCATIONS; i++) {
11163 		vm_address_t addr;
11164 		zone_t z;
11165 		struct zone_page_metadata *meta;
11166 		struct zone_page_metadata *gmeta;
11167 		uint32_t chunk_pages;
11168 
11169 		addr = (vm_address_t)ptrs[i];
11170 		meta = zone_meta_from_addr(addr);
11171 		z = &zone_array[meta->zm_index];
11172 		chunk_pages = z->z_chunk_pages;
11173 
11174 		if (meta->zm_guarded) {
11175 			n_guard_regions++;
11176 			if (meta->zm_chunk_len == chunk_pages) {
11177 				gmeta = meta + chunk_pages;
11178 			} else if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
11179 				gmeta = meta + meta->zm_subchunk_len;
11180 			} else if (meta->zm_chunk_len == ZM_PGZ_GUARD) {
11181 				printf("%s: kalloc_ext gave us address 0x%lx for a guard region.\n",
11182 				    __func__, addr);
11183 				goto cleanup;
11184 			} else if ((meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) && !z->z_percpu) {
11185 				printf("%s: zone [%s%s] is not per-CPU.\n",
11186 				    __func__, zone_heap_name(z), zone_name(z));
11187 				goto cleanup;
11188 			} else {
11189 				printf("%s: zm_chunk_len value not recognized for 0x%lx.\n",
11190 				    __func__, addr);
11191 				goto cleanup;
11192 			}
11193 
11194 			assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
11195 			/* Now check that we have chunk_len of guard pages. */
11196 			for (uint j = 0; j < chunk_pages; j++) {
11197 				if (gmeta->zm_chunk_len != ZM_PGZ_GUARD) {
11198 					printf("%s: page %u / %u is not a guard page.\n",
11199 					    __func__, j + 1, chunk_pages);
11200 					goto cleanup;
11201 				}
11202 				gmeta++;
11203 			}
11204 
11205 			/* The metadata following the guard region should not be a guard page. */
11206 			if (gmeta->zm_chunk_len == ZM_PGZ_GUARD) {
11207 				printf("%s: zone page following guard region is a guard page.\n",
11208 				    __func__);
11209 				goto cleanup;
11210 			}
11211 		}
11212 	}
11213 
11214 	printf("%s: there were %u guard regions in %d allocations.\n",
11215 	    __func__, n_guard_regions, N_ALLOCATIONS);
11216 
11217 	*out = 1;
11218 	retval = 0;
11219 
11220 cleanup:
11221 	for (uint i = 0; i < N_ALLOCATIONS; ++i) {
11222 		kfree_ext(KHEAP_DATA_BUFFERS, ptrs[i], alloc_size);
11223 	}
11224 
11225 	return retval;
11226 }
11227 SYSCTL_TEST_REGISTER(kalloc_guard_regions, run_kalloc_guard_insertion_test);
11228 
11229 
11230 struct zone_stress_obj {
11231 	TAILQ_ENTRY(zone_stress_obj) zso_link;
11232 };
11233 
11234 struct zone_stress_ctx {
11235 	thread_t  zsc_leader;
11236 	lck_mtx_t zsc_lock;
11237 	zone_t    zsc_zone;
11238 	uint64_t  zsc_end;
11239 	uint32_t  zsc_workers;
11240 };
11241 
11242 static void
zone_stress_worker(void * arg,wait_result_t __unused wr)11243 zone_stress_worker(void *arg, wait_result_t __unused wr)
11244 {
11245 	struct zone_stress_ctx *ctx = arg;
11246 	bool leader = ctx->zsc_leader == current_thread();
11247 	TAILQ_HEAD(zone_stress_head, zone_stress_obj) head = TAILQ_HEAD_INITIALIZER(head);
11248 	struct zone_bool_gen bg = { };
11249 	struct zone_stress_obj *obj;
11250 	uint32_t allocs = 0;
11251 
11252 	random_bool_init(&bg.zbg_bg);
11253 
11254 	do {
11255 		for (int i = 0; i < 2000; i++) {
11256 			uint32_t what = random_bool_gen_bits(&bg.zbg_bg,
11257 			    bg.zbg_entropy, ZONE_ENTROPY_CNT, 1);
11258 			switch (what) {
11259 			case 0:
11260 			case 1:
11261 				if (allocs < 10000) {
11262 					obj = zalloc(ctx->zsc_zone);
11263 					TAILQ_INSERT_HEAD(&head, obj, zso_link);
11264 					allocs++;
11265 				}
11266 				break;
11267 			case 2:
11268 			case 3:
11269 				if (allocs < 10000) {
11270 					obj = zalloc(ctx->zsc_zone);
11271 					TAILQ_INSERT_TAIL(&head, obj, zso_link);
11272 					allocs++;
11273 				}
11274 				break;
11275 			case 4:
11276 				if (leader) {
11277 					zone_gc(ZONE_GC_DRAIN);
11278 				}
11279 				break;
11280 			case 5:
11281 			case 6:
11282 				if (!TAILQ_EMPTY(&head)) {
11283 					obj = TAILQ_FIRST(&head);
11284 					TAILQ_REMOVE(&head, obj, zso_link);
11285 					zfree(ctx->zsc_zone, obj);
11286 					allocs--;
11287 				}
11288 				break;
11289 			case 7:
11290 				if (!TAILQ_EMPTY(&head)) {
11291 					obj = TAILQ_LAST(&head, zone_stress_head);
11292 					TAILQ_REMOVE(&head, obj, zso_link);
11293 					zfree(ctx->zsc_zone, obj);
11294 					allocs--;
11295 				}
11296 				break;
11297 			}
11298 		}
11299 	} while (mach_absolute_time() < ctx->zsc_end);
11300 
11301 	while (!TAILQ_EMPTY(&head)) {
11302 		obj = TAILQ_FIRST(&head);
11303 		TAILQ_REMOVE(&head, obj, zso_link);
11304 		zfree(ctx->zsc_zone, obj);
11305 	}
11306 
11307 	lck_mtx_lock(&ctx->zsc_lock);
11308 	if (--ctx->zsc_workers == 0) {
11309 		thread_wakeup(ctx);
11310 	} else if (leader) {
11311 		while (ctx->zsc_workers) {
11312 			lck_mtx_sleep(&ctx->zsc_lock, LCK_SLEEP_DEFAULT, ctx,
11313 			    THREAD_UNINT);
11314 		}
11315 	}
11316 	lck_mtx_unlock(&ctx->zsc_lock);
11317 
11318 	if (!leader) {
11319 		thread_terminate_self();
11320 		__builtin_unreachable();
11321 	}
11322 }
11323 
11324 static int
zone_stress_test_run(__unused int64_t in,int64_t * out)11325 zone_stress_test_run(__unused int64_t in, int64_t *out)
11326 {
11327 	struct zone_stress_ctx ctx = {
11328 		.zsc_leader  = current_thread(),
11329 		.zsc_workers = 3,
11330 	};
11331 	kern_return_t kr;
11332 	thread_t th;
11333 
11334 	if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
11335 		printf("zone_stress_test: Test already running.\n");
11336 		return EALREADY;
11337 	}
11338 
11339 	lck_mtx_init(&ctx.zsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
11340 	ctx.zsc_zone = zone_create("test_zone_344", 344,
11341 	    ZC_DESTRUCTIBLE | ZC_NOCACHING);
11342 	assert(ctx.zsc_zone->z_chunk_pages > 1);
11343 
11344 	clock_interval_to_deadline(5, NSEC_PER_SEC, &ctx.zsc_end);
11345 
11346 	printf("zone_stress_test: Starting (leader %p)\n", current_thread());
11347 
11348 	os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
11349 
11350 	for (uint32_t i = 1; i < ctx.zsc_workers; i++) {
11351 		kr = kernel_thread_start_priority(zone_stress_worker, &ctx,
11352 		    BASEPRI_DEFAULT, &th);
11353 		if (kr == KERN_SUCCESS) {
11354 			printf("zone_stress_test: thread %d: %p\n", i, th);
11355 			thread_deallocate(th);
11356 		} else {
11357 			ctx.zsc_workers--;
11358 		}
11359 	}
11360 
11361 	zone_stress_worker(&ctx, 0);
11362 
11363 	lck_mtx_destroy(&ctx.zsc_lock, &zone_locks_grp);
11364 
11365 	zdestroy(ctx.zsc_zone);
11366 
11367 	printf("zone_stress_test: Done\n");
11368 
11369 	*out = 1;
11370 	os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
11371 	os_atomic_store(&any_zone_test_running, false, relaxed);
11372 	return 0;
11373 }
11374 SYSCTL_TEST_REGISTER(zone_stress_test, zone_stress_test_run);
11375 
11376 struct zone_gc_stress_obj {
11377 	STAILQ_ENTRY(zone_gc_stress_obj) zgso_link;
11378 	uintptr_t                        zgso_pad[63];
11379 };
11380 STAILQ_HEAD(zone_gc_stress_head, zone_gc_stress_obj);
11381 
11382 #define ZONE_GC_OBJ_PER_PAGE  (PAGE_SIZE / sizeof(struct zone_gc_stress_obj))
11383 
11384 KALLOC_TYPE_DEFINE(zone_gc_stress_zone, struct zone_gc_stress_obj, KT_DEFAULT);
11385 
11386 struct zone_gc_stress_ctx {
11387 	bool      zgsc_done;
11388 	lck_mtx_t zgsc_lock;
11389 	zone_t    zgsc_zone;
11390 	uint64_t  zgsc_end;
11391 	uint32_t  zgsc_workers;
11392 };
11393 
11394 static void
zone_gc_stress_test_alloc_n(struct zone_gc_stress_head * head,size_t n)11395 zone_gc_stress_test_alloc_n(struct zone_gc_stress_head *head, size_t n)
11396 {
11397 	struct zone_gc_stress_obj *obj;
11398 
11399 	for (size_t i = 0; i < n; i++) {
11400 		obj = zalloc_flags(zone_gc_stress_zone, Z_WAITOK);
11401 		STAILQ_INSERT_TAIL(head, obj, zgso_link);
11402 	}
11403 }
11404 
11405 static void
zone_gc_stress_test_free_n(struct zone_gc_stress_head * head)11406 zone_gc_stress_test_free_n(struct zone_gc_stress_head *head)
11407 {
11408 	struct zone_gc_stress_obj *obj;
11409 
11410 	while ((obj = STAILQ_FIRST(head))) {
11411 		STAILQ_REMOVE_HEAD(head, zgso_link);
11412 		zfree(zone_gc_stress_zone, obj);
11413 	}
11414 }
11415 
11416 __dead2
11417 static void
zone_gc_stress_worker(void * arg,wait_result_t __unused wr)11418 zone_gc_stress_worker(void *arg, wait_result_t __unused wr)
11419 {
11420 	struct zone_gc_stress_ctx *ctx = arg;
11421 	struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
11422 
11423 	while (!ctx->zgsc_done) {
11424 		zone_gc_stress_test_alloc_n(&head, ZONE_GC_OBJ_PER_PAGE * 4);
11425 		zone_gc_stress_test_free_n(&head);
11426 	}
11427 
11428 	lck_mtx_lock(&ctx->zgsc_lock);
11429 	if (--ctx->zgsc_workers == 0) {
11430 		thread_wakeup(ctx);
11431 	}
11432 	lck_mtx_unlock(&ctx->zgsc_lock);
11433 
11434 	thread_terminate_self();
11435 	__builtin_unreachable();
11436 }
11437 
11438 static int
zone_gc_stress_test_run(__unused int64_t in,int64_t * out)11439 zone_gc_stress_test_run(__unused int64_t in, int64_t *out)
11440 {
11441 	struct zone_gc_stress_head head = STAILQ_HEAD_INITIALIZER(head);
11442 	struct zone_gc_stress_ctx ctx = {
11443 		.zgsc_workers = 3,
11444 	};
11445 	kern_return_t kr;
11446 	thread_t th;
11447 
11448 	if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
11449 		printf("zone_gc_stress_test: Test already running.\n");
11450 		return EALREADY;
11451 	}
11452 
11453 	lck_mtx_init(&ctx.zgsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
11454 	lck_mtx_lock(&ctx.zgsc_lock);
11455 
11456 	printf("zone_gc_stress_test: Starting (leader %p)\n", current_thread());
11457 
11458 	os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
11459 
11460 	for (uint32_t i = 0; i < ctx.zgsc_workers; i++) {
11461 		kr = kernel_thread_start_priority(zone_gc_stress_worker, &ctx,
11462 		    BASEPRI_DEFAULT, &th);
11463 		if (kr == KERN_SUCCESS) {
11464 			printf("zone_gc_stress_test: thread %d: %p\n", i, th);
11465 			thread_deallocate(th);
11466 		} else {
11467 			ctx.zgsc_workers--;
11468 		}
11469 	}
11470 
11471 	for (uint64_t i = 0; i < in; i++) {
11472 		size_t count = zc_mag_size() * zc_free_batch_size() * 10;
11473 
11474 		if (count < ZONE_GC_OBJ_PER_PAGE * 20) {
11475 			count = ZONE_GC_OBJ_PER_PAGE * 20;
11476 		}
11477 
11478 		zone_gc_stress_test_alloc_n(&head, count);
11479 		zone_gc_stress_test_free_n(&head);
11480 
11481 		lck_mtx_lock(&zone_gc_lock);
11482 		zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
11483 		    ZONE_RECLAIM_TRIM);
11484 		lck_mtx_unlock(&zone_gc_lock);
11485 
11486 		printf("zone_gc_stress_test: round %lld/%lld\n", i + 1, in);
11487 	}
11488 
11489 	os_atomic_thread_fence(seq_cst);
11490 	ctx.zgsc_done = true;
11491 	lck_mtx_sleep(&ctx.zgsc_lock, LCK_SLEEP_DEFAULT, &ctx, THREAD_UNINT);
11492 	lck_mtx_unlock(&ctx.zgsc_lock);
11493 
11494 	lck_mtx_destroy(&ctx.zgsc_lock, &zone_locks_grp);
11495 
11496 	lck_mtx_lock(&zone_gc_lock);
11497 	zone_reclaim(zone_gc_stress_zone->kt_zv.zv_zone,
11498 	    ZONE_RECLAIM_DRAIN);
11499 	lck_mtx_unlock(&zone_gc_lock);
11500 
11501 	printf("zone_gc_stress_test: Done\n");
11502 
11503 	*out = 1;
11504 	os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
11505 	os_atomic_store(&any_zone_test_running, false, relaxed);
11506 	return 0;
11507 }
11508 SYSCTL_TEST_REGISTER(zone_gc_stress_test, zone_gc_stress_test_run);
11509 
11510 /*
11511  * Routines to test that zone garbage collection and zone replenish threads
11512  * running at the same time don't cause problems.
11513  */
11514 
11515 static int
zone_gc_replenish_test(__unused int64_t in,int64_t * out)11516 zone_gc_replenish_test(__unused int64_t in, int64_t *out)
11517 {
11518 	zone_gc(ZONE_GC_DRAIN);
11519 	*out = 1;
11520 	return 0;
11521 }
11522 SYSCTL_TEST_REGISTER(zone_gc_replenish_test, zone_gc_replenish_test);
11523 
11524 static int
zone_alloc_replenish_test(__unused int64_t in,int64_t * out)11525 zone_alloc_replenish_test(__unused int64_t in, int64_t *out)
11526 {
11527 	zone_t z = vm_map_entry_zone;
11528 	struct data { struct data *next; } *node, *list = NULL;
11529 
11530 	if (z == NULL) {
11531 		printf("Couldn't find a replenish zone\n");
11532 		return EIO;
11533 	}
11534 
11535 	/* big enough to go past replenishment */
11536 	for (uint32_t i = 0; i < 10 * z->z_elems_rsv; ++i) {
11537 		node = zalloc(z);
11538 		node->next = list;
11539 		list = node;
11540 	}
11541 
11542 	/*
11543 	 * release the memory we allocated
11544 	 */
11545 	while (list != NULL) {
11546 		node = list;
11547 		list = list->next;
11548 		zfree(z, node);
11549 	}
11550 
11551 	*out = 1;
11552 	return 0;
11553 }
11554 SYSCTL_TEST_REGISTER(zone_alloc_replenish_test, zone_alloc_replenish_test);
11555 
11556 
11557 #endif /* DEBUG || DEVELOPMENT */
11558