xref: /xnu-8796.101.5/osfmk/kern/zalloc.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	kern/zalloc.c
60  *	Author:	Avadis Tevanian, Jr.
61  *
62  *	Zone-based memory allocator.  A zone is a collection of fixed size
63  *	data blocks for which quick allocation/deallocation is possible.
64  */
65 
66 #define ZALLOC_ALLOW_DEPRECATED 1
67 #if !ZALLOC_TEST
68 #include <mach/mach_types.h>
69 #include <mach/vm_param.h>
70 #include <mach/kern_return.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/task_server.h>
73 #include <mach/machine/vm_types.h>
74 #include <machine/machine_routines.h>
75 #include <mach/vm_map.h>
76 #include <mach/sdt.h>
77 #if __x86_64__
78 #include <i386/cpuid.h>
79 #endif
80 
81 #include <kern/bits.h>
82 #include <kern/btlog.h>
83 #include <kern/startup.h>
84 #include <kern/kern_types.h>
85 #include <kern/assert.h>
86 #include <kern/backtrace.h>
87 #include <kern/host.h>
88 #include <kern/macro_help.h>
89 #include <kern/sched.h>
90 #include <kern/locks.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/thread_call.h>
94 #include <kern/zalloc_internal.h>
95 #include <kern/kalloc.h>
96 #include <kern/debug.h>
97 
98 #include <prng/random.h>
99 
100 #include <vm/pmap.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_kern.h>
103 #include <vm/vm_page.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_compressor.h> /* C_SLOT_PACKED_PTR* */
106 
107 #include <pexpert/pexpert.h>
108 
109 #include <machine/machparam.h>
110 #include <machine/machine_routines.h>  /* ml_cpu_get_info */
111 
112 #include <os/atomic.h>
113 
114 #include <libkern/OSDebug.h>
115 #include <libkern/OSAtomic.h>
116 #include <libkern/section_keywords.h>
117 #include <sys/kdebug.h>
118 #include <sys/code_signing.h>
119 
120 #include <san/kasan.h>
121 #include <libsa/stdlib.h>
122 #include <sys/errno.h>
123 
124 #include <IOKit/IOBSD.h>
125 #include <arm64/amcc_rorgn.h>
126 
127 #if DEBUG
128 #define z_debug_assert(expr)  assert(expr)
129 #else
130 #define z_debug_assert(expr)  (void)(expr)
131 #endif
132 
133 /* Returns pid of the task with the largest number of VM map entries.  */
134 extern pid_t find_largest_process_vm_map_entries(void);
135 
136 /*
137  * Callout to jetsam. If pid is -1, we wake up the memorystatus thread to do asynchronous kills.
138  * For any other pid we try to kill that process synchronously.
139  */
140 extern boolean_t memorystatus_kill_on_zone_map_exhaustion(pid_t pid);
141 
142 extern zone_t vm_object_zone;
143 extern zone_t ipc_service_port_label_zone;
144 
145 ZONE_DEFINE_TYPE(percpu_u64_zone, "percpu.64", uint64_t,
146     ZC_PERCPU | ZC_ALIGNMENT_REQUIRED | ZC_KASAN_NOREDZONE);
147 
148 #if KASAN_TBI
149 #define ZONE_MIN_ELEM_SIZE      (sizeof(uint64_t) * 2)
150 #define ZONE_ALIGN_SIZE         ZONE_MIN_ELEM_SIZE
151 #else /* KASAN_TBI */
152 #define ZONE_MIN_ELEM_SIZE      sizeof(uint64_t)
153 #define ZONE_ALIGN_SIZE         ZONE_MIN_ELEM_SIZE
154 #endif /* KASAN_TBI */
155 
156 #define ZONE_MAX_ALLOC_SIZE     (32 * 1024)
157 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
158 #define ZONE_CHUNK_ALLOC_SIZE   (256 * 1024)
159 #define ZONE_GUARD_DENSE        (32  * 1024)
160 #define ZONE_GUARD_SPARSE       (64  * 1024)
161 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
162 
163 #if XNU_PLATFORM_MacOSX
164 #define ZONE_MAP_MAX            (32ULL << 30)
165 #define ZONE_MAP_VA_SIZE        (128ULL << 30)
166 #else /* XNU_PLATFORM_MacOSX */
167 #define ZONE_MAP_MAX            (8ULL << 30)
168 #define ZONE_MAP_VA_SIZE        (24ULL << 30)
169 #endif /* !XNU_PLATFORM_MacOSX */
170 
171 __enum_closed_decl(zm_len_t, uint16_t, {
172 	ZM_CHUNK_FREE           = 0x0,
173 	/* 1 through 8 are valid lengths */
174 	ZM_CHUNK_LEN_MAX        = 0x8,
175 
176 	/* PGZ magical values */
177 	ZM_PGZ_FREE             = 0x0,
178 	ZM_PGZ_ALLOCATED        = 0xa, /* [a]llocated   */
179 	ZM_PGZ_GUARD            = 0xb, /* oo[b]         */
180 	ZM_PGZ_DOUBLE_FREE      = 0xd, /* [d]ouble_free */
181 
182 	/* secondary page markers */
183 	ZM_SECONDARY_PAGE       = 0xe,
184 	ZM_SECONDARY_PCPU_PAGE  = 0xf,
185 });
186 
187 static_assert(MAX_ZONES < (1u << 10), "MAX_ZONES must fit in zm_index");
188 
189 struct zone_page_metadata {
190 	union {
191 		struct {
192 			/* The index of the zone this metadata page belongs to */
193 			zone_id_t       zm_index : 10;
194 
195 			/*
196 			 * This chunk ends with a guard page.
197 			 */
198 			uint16_t        zm_guarded : 1;
199 
200 			/*
201 			 * Whether `zm_bitmap` is an inline bitmap
202 			 * or a packed bitmap reference
203 			 */
204 			uint16_t        zm_inline_bitmap : 1;
205 
206 			/*
207 			 * Zones allocate in "chunks" of zone_t::z_chunk_pages
208 			 * consecutive pages, or zpercpu_count() pages if the
209 			 * zone is percpu.
210 			 *
211 			 * The first page of it has its metadata set with:
212 			 * - 0 if none of the pages are currently wired
213 			 * - the number of wired pages in the chunk
214 			 *   (not scaled for percpu).
215 			 *
216 			 * Other pages in the chunk have their zm_chunk_len set
217 			 * to ZM_SECONDARY_PAGE or ZM_SECONDARY_PCPU_PAGE
218 			 * depending on whether the zone is percpu or not.
219 			 * For those, zm_page_index holds the index of that page
220 			 * in the run, and zm_subchunk_len the remaining length
221 			 * within the chunk.
222 			 *
223 			 * Metadata used for PGZ pages can have 3 values:
224 			 * - ZM_PGZ_FREE:         slot is free
225 			 * - ZM_PGZ_ALLOCATED:    slot holds an allocated element
226 			 *                        at offset (zm_pgz_orig_addr & PAGE_MASK)
227 			 * - ZM_PGZ_DOUBLE_FREE:  slot detected a double free
228 			 *                        (will panic).
229 			 */
230 			zm_len_t        zm_chunk_len : 4;
231 		};
232 		uint16_t zm_bits;
233 	};
234 
235 	union {
236 #define ZM_ALLOC_SIZE_LOCK      1u
237 		uint16_t zm_alloc_size; /* first page only */
238 		struct {
239 			uint8_t zm_page_index;   /* secondary pages only */
240 			uint8_t zm_subchunk_len; /* secondary pages only */
241 		};
242 		uint16_t zm_oob_offs;   /* in guard pages  */
243 	};
244 	union {
245 		uint32_t zm_bitmap;     /* most zones      */
246 		uint32_t zm_bump;       /* permanent zones */
247 	};
248 
249 	union {
250 		struct {
251 			zone_pva_t      zm_page_next;
252 			zone_pva_t      zm_page_prev;
253 		};
254 		vm_offset_t zm_pgz_orig_addr;
255 		struct zone_page_metadata *zm_pgz_slot_next;
256 	};
257 };
258 static_assert(sizeof(struct zone_page_metadata) == 16, "validate packing");
259 
260 /*!
261  * @typedef zone_magazine_t
262  *
263  * @brief
264  * Magazine of cached allocations.
265  *
266  * @field zm_next       linkage used by magazine depots.
267  * @field zm_elems      an array of @c zc_mag_size() elements.
268  */
269 struct zone_magazine {
270 	zone_magazine_t         zm_next;
271 	smr_seq_t               zm_seq;
272 	vm_offset_t             zm_elems[0];
273 };
274 
275 /*!
276  * @typedef zone_cache_t
277  *
278  * @brief
279  * Magazine of cached allocations.
280  *
281  * @discussion
282  * Below is a diagram of the caching system. This design is inspired by the
283  * paper "Magazines and Vmem: Extending the Slab Allocator to Many CPUs and
284  * Arbitrary Resources" by Jeff Bonwick and Jonathan Adams and the FreeBSD UMA
285  * zone allocator (itself derived from this seminal work).
286  *
287  * It is divided into 3 layers:
288  * - the per-cpu layer,
289  * - the recirculation depot layer,
290  * - the Zone Allocator.
291  *
292  * The per-cpu and recirculation depot layer use magazines (@c zone_magazine_t),
293  * which are stacks of up to @c zc_mag_size() elements.
294  *
295  * <h2>CPU layer</h2>
296  *
297  * The CPU layer (@c zone_cache_t) looks like this:
298  *
299  *      ╭─ a ─ f ─┬───────── zm_depot ──────────╮
300  *      │ ╭─╮ ╭─╮ │ ╭─╮ ╭─╮ ╭─╮ ╭─╮ ╭─╮         │
301  *      │ │#│ │#│ │ │#│ │#│ │#│ │#│ │#│         │
302  *      │ │#│ │ │ │ │#│ │#│ │#│ │#│ │#│         │
303  *      │ │ │ │ │ │ │#│ │#│ │#│ │#│ │#│         │
304  *      │ ╰─╯ ╰─╯ │ ╰─╯ ╰─╯ ╰─╯ ╰─╯ ╰─╯         │
305  *      ╰─────────┴─────────────────────────────╯
306  *
307  * It has two pre-loaded magazines (a)lloc and (f)ree which we allocate from,
308  * or free to. Serialization is achieved through disabling preemption, and only
309  * the current CPU can acces those allocations. This is represented on the left
310  * hand side of the diagram above.
311  *
312  * The right hand side is the per-cpu depot. It consists of @c zm_depot_count
313  * full magazines, and is protected by the @c zm_depot_lock for access.
314  * The lock is expected to absolutely never be contended, as only the local CPU
315  * tends to access the local per-cpu depot in regular operation mode.
316  *
317  * However unlike UMA, our implementation allows for the zone GC to reclaim
318  * per-CPU magazines aggresively, which is serialized with the @c zm_depot_lock.
319  *
320  *
321  * <h2>Recirculation Depot</h2>
322  *
323  * The recirculation depot layer is a list similar to the per-cpu depot,
324  * however it is different in two fundamental ways:
325  *
326  * - it is protected by the regular zone lock,
327  * - elements referenced by the magazines in that layer appear free
328  *   to the zone layer.
329  *
330  *
331  * <h2>Magazine circulation and sizing</h2>
332  *
333  * The caching system sizes itself dynamically. Operations that allocate/free
334  * a single element call @c zone_lock_nopreempt_check_contention() which records
335  * contention on the lock by doing a trylock and recording its success.
336  *
337  * This information is stored in the @c z_recirc_cont_cur field of the zone,
338  * and a windowed moving average is maintained in @c z_contention_wma.
339  * The periodically run function @c compute_zone_working_set_size() will then
340  * take this into account to decide to grow the number of buckets allowed
341  * in the depot or shrink it based on the @c zc_grow_level and @c zc_shrink_level
342  * thresholds.
343  *
344  * The per-cpu layer will attempt to work with its depot, finding both full and
345  * empty magazines cached there. If it can't get what it needs, then it will
346  * mediate with the zone recirculation layer. Such recirculation is done in
347  * batches in order to amortize lock holds.
348  * (See @c {zalloc,zfree}_cached_depot_recirculate()).
349  *
350  * The recirculation layer keeps a track of what the minimum amount of magazines
351  * it had over time was for each of the full and empty queues. This allows for
352  * @c compute_zone_working_set_size() to return memory to the system when a zone
353  * stops being used as much.
354  *
355  * <h2>Security considerations</h2>
356  *
357  * The zone caching layer has been designed to avoid returning elements in
358  * a strict LIFO behavior: @c zalloc() will allocate from the (a) magazine,
359  * and @c zfree() free to the (f) magazine, and only swap them when the
360  * requested operation cannot be fulfilled.
361  *
362  * The per-cpu overflow depot or the recirculation depots are similarly used
363  * in FIFO order.
364  *
365  * @field zc_depot_lock     a lock to access @c zc_depot, @c zc_depot_cur.
366  * @field zc_alloc_cur      denormalized number of elements in the (a) magazine
367  * @field zc_free_cur       denormalized number of elements in the (f) magazine
368  * @field zc_alloc_elems    a pointer to the array of elements in (a)
369  * @field zc_free_elems     a pointer to the array of elements in (f)
370  *
371  * @field zc_depot          a list of @c zc_depot_cur full magazines
372  */
373 typedef struct zone_cache {
374 	hw_lck_ticket_t            zc_depot_lock;
375 	uint16_t                   zc_alloc_cur;
376 	uint16_t                   zc_free_cur;
377 	vm_offset_t               *zc_alloc_elems;
378 	vm_offset_t               *zc_free_elems;
379 	struct zone_depot          zc_depot;
380 	smr_t                      zc_smr;
381 	zone_smr_free_cb_t XNU_PTRAUTH_SIGNED_FUNCTION_PTR("zc_free") zc_free;
382 } __attribute__((aligned(64))) * zone_cache_t;
383 
384 #if !__x86_64__
385 static
386 #endif
387 __security_const_late struct {
388 	struct mach_vm_range       zi_map_range;  /* all zone submaps     */
389 	struct mach_vm_range       zi_ro_range;   /* read-only range      */
390 	struct mach_vm_range       zi_meta_range; /* debugging only       */
391 	struct mach_vm_range       zi_bits_range; /* bits buddy allocator */
392 	struct mach_vm_range       zi_xtra_range; /* vm tracking metadata */
393 	struct mach_vm_range       zi_pgz_range;
394 	struct zone_page_metadata *zi_pgz_meta;
395 
396 	/*
397 	 * The metadata lives within the zi_meta_range address range.
398 	 *
399 	 * The correct formula to find a metadata index is:
400 	 *     absolute_page_index - page_index(zi_map_range.min_address)
401 	 *
402 	 * And then this index is used to dereference zi_meta_range.min_address
403 	 * as a `struct zone_page_metadata` array.
404 	 *
405 	 * To avoid doing that substraction all the time in the various fast-paths,
406 	 * zi_meta_base are pre-offset with that minimum page index to avoid redoing
407 	 * that math all the time.
408 	 */
409 	struct zone_page_metadata *zi_meta_base;
410 } zone_info;
411 
412 __startup_data static struct mach_vm_range  zone_map_range;
413 __startup_data static vm_map_size_t         zone_meta_size;
414 __startup_data static vm_map_size_t         zone_bits_size;
415 __startup_data static vm_map_size_t         zone_xtra_size;
416 
417 /*
418  * Initial array of metadata for stolen memory.
419  *
420  * The numbers here have to be kept in sync with vm_map_steal_memory()
421  * so that we have reserved enough metadata.
422  *
423  * After zone_init() has run (which happens while the kernel is still single
424  * threaded), the metadata is moved to its final dynamic location, and
425  * this array is unmapped with the rest of __startup_data at lockdown.
426  */
427 #define ZONE_EARLY_META_INLINE_COUNT    64
428 __startup_data
429 static struct zone_page_metadata
430     zone_early_meta_array_startup[ZONE_EARLY_META_INLINE_COUNT];
431 
432 #if __x86_64__
433 /*
434  * On Intel we can't "free" pmap stolen pages,
435  * so instead we use a static array in __KLDDATA
436  * which gets reclaimed at lockdown time.
437  */
438 __startup_data __attribute__((aligned(PAGE_SIZE)))
439 static uint8_t zone_early_pages_to_cram[PAGE_SIZE * 16];
440 #endif
441 
442 /*
443  *	The zone_locks_grp allows for collecting lock statistics.
444  *	All locks are associated to this group in zinit.
445  *	Look at tools/lockstat for debugging lock contention.
446  */
447 LCK_GRP_DECLARE(zone_locks_grp, "zone_locks");
448 static LCK_MTX_DECLARE(zone_metadata_region_lck, &zone_locks_grp);
449 
450 /*
451  *	The zone metadata lock protects:
452  *	- metadata faulting,
453  *	- VM submap VA allocations,
454  *	- early gap page queue list
455  */
456 #define zone_meta_lock()   lck_mtx_lock(&zone_metadata_region_lck);
457 #define zone_meta_unlock() lck_mtx_unlock(&zone_metadata_region_lck);
458 
459 /*
460  *	Exclude more than one concurrent garbage collection
461  */
462 static LCK_GRP_DECLARE(zone_gc_lck_grp, "zone_gc");
463 static LCK_MTX_DECLARE(zone_gc_lock, &zone_gc_lck_grp);
464 static LCK_SPIN_DECLARE(zone_exhausted_lock, &zone_gc_lck_grp);
465 
466 /*
467  * Panic logging metadata
468  */
469 bool panic_include_zprint = false;
470 bool panic_include_kalloc_types = false;
471 zone_t kalloc_type_src_zone = ZONE_NULL;
472 zone_t kalloc_type_dst_zone = ZONE_NULL;
473 mach_memory_info_t *panic_kext_memory_info = NULL;
474 vm_size_t panic_kext_memory_size = 0;
475 vm_offset_t panic_fault_address = 0;
476 
477 /*
478  *      Protects zone_array, num_zones, num_zones_in_use, and
479  *      zone_destroyed_bitmap
480  */
481 static SIMPLE_LOCK_DECLARE(all_zones_lock, 0);
482 static zone_id_t        num_zones_in_use;
483 zone_id_t _Atomic       num_zones;
484 SECURITY_READ_ONLY_LATE(unsigned int) zone_view_count;
485 
486 /*
487  * Initial globals for zone stats until we can allocate the real ones.
488  * Those get migrated inside the per-CPU ones during zone_init() and
489  * this array is unmapped with the rest of __startup_data at lockdown.
490  */
491 
492 /* zone to allocate zone_magazine structs from */
493 static SECURITY_READ_ONLY_LATE(zone_t) zc_magazine_zone;
494 /*
495  * Until pid1 is made, zone caching is off,
496  * until compute_zone_working_set_size() runs for the firt time.
497  *
498  * -1 represents the "never enabled yet" value.
499  */
500 static int8_t zone_caching_disabled = -1;
501 
502 __startup_data
503 static struct zone_stats zone_stats_startup[MAX_ZONES];
504 struct zone              zone_array[MAX_ZONES];
505 SECURITY_READ_ONLY_LATE(zone_security_flags_t) zone_security_array[MAX_ZONES] = {
506 	[0 ... MAX_ZONES - 1] = {
507 		.z_kheap_id       = KHEAP_ID_NONE,
508 		.z_noencrypt      = false,
509 		.z_submap_idx     = Z_SUBMAP_IDX_GENERAL_0,
510 		.z_kalloc_type    = false,
511 	},
512 };
513 SECURITY_READ_ONLY_LATE(struct zone_size_params) zone_ro_size_params[ZONE_ID__LAST_RO + 1];
514 SECURITY_READ_ONLY_LATE(zone_cache_ops_t) zcache_ops[ZONE_ID__FIRST_DYNAMIC];
515 
516 /* Initialized in zone_bootstrap(), how many "copies" the per-cpu system does */
517 static SECURITY_READ_ONLY_LATE(unsigned) zpercpu_early_count;
518 
519 /* Used to keep track of destroyed slots in the zone_array */
520 static bitmap_t zone_destroyed_bitmap[BITMAP_LEN(MAX_ZONES)];
521 
522 /* number of zone mapped pages used by all zones */
523 static size_t _Atomic zone_pages_jetsam_threshold = ~0;
524 size_t zone_pages_wired;
525 size_t zone_guard_pages;
526 
527 /* Time in (ms) after which we panic for zone exhaustions */
528 TUNABLE(int, zone_exhausted_timeout, "zet", 5000);
529 
530 #if VM_TAG_SIZECLASSES
531 /*
532  * Zone tagging allows for per "tag" accounting of allocations for the kalloc
533  * zones only.
534  *
535  * There are 3 kinds of tags that can be used:
536  * - pre-registered VM_KERN_MEMORY_*
537  * - dynamic tags allocated per call sites in core-kernel (using vm_tag_alloc())
538  * - per-kext tags computed by IOKit (using the magic Z_VM_TAG_BT_BIT marker).
539  *
540  * The VM tracks the statistics in lazily allocated structures.
541  * See vm_tag_will_update_zone(), vm_tag_update_zone_size().
542  *
543  * If for some reason the requested tag cannot be accounted for,
544  * the tag is forced to VM_KERN_MEMORY_KALLOC which is pre-allocated.
545  *
546  * Each allocated element also remembers the tag it was assigned,
547  * which lets zalloc/zfree update statistics correctly.
548  */
549 
550 /* enable tags for zones that ask for it */
551 static TUNABLE(bool, zone_tagging_on, "-zt", false);
552 
553 /*
554  * Array of all sizeclasses used by kalloc variants so that we can
555  * have accounting per size class for each kalloc callsite
556  */
557 static uint16_t zone_tags_sizeclasses[VM_TAG_SIZECLASSES];
558 #endif /* VM_TAG_SIZECLASSES */
559 
560 #if DEBUG || DEVELOPMENT
561 static int zalloc_simulate_vm_pressure;
562 #endif /* DEBUG || DEVELOPMENT */
563 
564 #define Z_TUNABLE(t, n, d) \
565 	TUNABLE(t, _##n, #n, d); \
566 	__pure2 static inline t n(void) { return _##n; }
567 
568 /*
569  * Zone caching tunables
570  *
571  * zc_mag_size():
572  *   size of magazines, larger to reduce contention at the expense of memory
573  *
574  * zc_enable_level
575  *   number of contentions per second after which zone caching engages
576  *   automatically.
577  *
578  *   0 to disable.
579  *
580  * zc_grow_level
581  *   number of contentions per second x cpu after which the number of magazines
582  *   allowed in the depot can grow. (in "Z_WMA_UNIT" units).
583  *
584  * zc_shrink_level
585  *   number of contentions per second x cpu below which the number of magazines
586  *   allowed in the depot will shrink. (in "Z_WMA_UNIT" units).
587  *
588  * zc_pcpu_max
589  *   maximum memory size in bytes that can hang from a CPU,
590  *   which will affect how many magazines are allowed in the depot.
591  *
592  *   The alloc/free magazines are assumed to be on average half-empty
593  *   and to count for "1" unit of magazines.
594  *
595  * zc_autotrim_size
596  *   Size allowed to hang extra from the recirculation depot before
597  *   auto-trim kicks in.
598  *
599  * zc_autotrim_buckets
600  *
601  *   How many buckets in excess of the working-set are allowed
602  *   before auto-trim kicks in for empty buckets.
603  *
604  * zc_free_batch_size
605  *   The size of batches of frees/reclaim that can be done keeping
606  *   the zone lock held (and preemption disabled).
607  */
608 Z_TUNABLE(uint16_t, zc_mag_size, 8);
609 static Z_TUNABLE(uint32_t, zc_enable_level, 10);
610 static Z_TUNABLE(uint32_t, zc_grow_level, 5 * Z_WMA_UNIT);
611 static Z_TUNABLE(uint32_t, zc_shrink_level, Z_WMA_UNIT / 2);
612 static Z_TUNABLE(uint32_t, zc_pcpu_max, 128 << 10);
613 static Z_TUNABLE(uint32_t, zc_autotrim_size, 16 << 10);
614 static Z_TUNABLE(uint32_t, zc_autotrim_buckets, 8);
615 static Z_TUNABLE(uint32_t, zc_free_batch_size, 256);
616 
617 static SECURITY_READ_ONLY_LATE(size_t)    zone_pages_wired_max;
618 static SECURITY_READ_ONLY_LATE(vm_map_t)  zone_submaps[Z_SUBMAP_IDX_COUNT];
619 static SECURITY_READ_ONLY_LATE(vm_map_t)  zone_meta_map;
620 static char const * const zone_submaps_names[Z_SUBMAP_IDX_COUNT] = {
621 	[Z_SUBMAP_IDX_VM]               = "VM",
622 	[Z_SUBMAP_IDX_READ_ONLY]        = "RO",
623 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
624 	[Z_SUBMAP_IDX_GENERAL_0]        = "GEN0",
625 	[Z_SUBMAP_IDX_GENERAL_1]        = "GEN1",
626 	[Z_SUBMAP_IDX_GENERAL_2]        = "GEN2",
627 	[Z_SUBMAP_IDX_GENERAL_3]        = "GEN3",
628 #else
629 	[Z_SUBMAP_IDX_GENERAL_0]        = "GEN",
630 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
631 	[Z_SUBMAP_IDX_DATA]             = "DATA",
632 };
633 
634 #if __x86_64__
635 #define ZONE_ENTROPY_CNT 8
636 #else
637 #define ZONE_ENTROPY_CNT 2
638 #endif
639 static struct zone_bool_gen {
640 	struct bool_gen zbg_bg;
641 	uint32_t zbg_entropy[ZONE_ENTROPY_CNT];
642 } zone_bool_gen[MAX_CPUS];
643 
644 #if CONFIG_PROB_GZALLOC
645 /*
646  * Probabilistic gzalloc
647  * =====================
648  *
649  *
650  * Probabilistic guard zalloc samples allocations and will protect them by
651  * double-mapping the page holding them and returning the secondary virtual
652  * address to its callers.
653  *
654  * Its data structures are lazily allocated if the `pgz` or `pgz1` boot-args
655  * are set.
656  *
657  *
658  * Unlike GZalloc, PGZ uses a fixed amount of memory, and is compatible with
659  * most zalloc/kalloc features:
660  * - zone_require is functional
661  * - zone caching or zone tagging is compatible
662  * - non-blocking allocation work (they will always return NULL with gzalloc).
663  *
664  * PGZ limitations:
665  * - VA sequestering isn't respected, as the slots (which are in limited
666  *   quantity) will be reused for any type, however the PGZ quarantine
667  *   somewhat mitigates the impact.
668  * - zones with elements larger than a page cannot be protected.
669  *
670  *
671  * Tunables:
672  * --------
673  *
674  * pgz=1:
675  *   Turn on probabilistic guard malloc for all zones
676  *
677  *   (default on for DEVELOPMENT, off for RELEASE, or if pgz1... are specified)
678  *
679  * pgz_sample_rate=0 to 2^31
680  *   average sample rate between two guarded allocations.
681  *   0 means every allocation.
682  *
683  *   The default is a random number between 1000 and 10,000
684  *
685  * pgz_slots
686  *   how many allocations to protect.
687  *
688  *   Each costs:
689  *   - a PTE in the pmap (when allocated)
690  *   - 2 zone page meta's (every other page is a "guard" one, 32B total)
691  *   - 64 bytes per backtraces.
692  *   On LP64 this is <16K per 100 slots.
693  *
694  *   The default is ~200 slots per G of physical ram (32k / G)
695  *
696  *   TODO:
697  *   - try harder to allocate elements at the "end" to catch OOB more reliably.
698  *
699  * pgz_quarantine
700  *   how many slots should be free at any given time.
701  *
702  *   PGZ will round robin through free slots to be reused, but free slots are
703  *   important to detect use-after-free by acting as a quarantine.
704  *
705  *   By default, PGZ will keep 33% of the slots around at all time.
706  *
707  * pgz1=<name>, pgz2=<name>, ..., pgzn=<name>...
708  *   Specific zones for which to enable probabilistic guard malloc.
709  *   There must be no numbering gap (names after the gap will be ignored).
710  */
711 #if DEBUG || DEVELOPMENT
712 static TUNABLE(bool, pgz_all, "pgz", true);
713 #else
714 static TUNABLE(bool, pgz_all, "pgz", false);
715 #endif
716 static TUNABLE(uint32_t, pgz_sample_rate, "pgz_sample_rate", 0);
717 static TUNABLE(uint32_t, pgz_slots, "pgz_slots", UINT32_MAX);
718 static TUNABLE(uint32_t, pgz_quarantine, "pgz_quarantine", 0);
719 #endif /* CONFIG_PROB_GZALLOC */
720 
721 static zone_t zone_find_largest(uint64_t *zone_size);
722 
723 #endif /* !ZALLOC_TEST */
724 #pragma mark Zone metadata
725 #if !ZALLOC_TEST
726 
727 static inline bool
zone_has_index(zone_t z,zone_id_t zid)728 zone_has_index(zone_t z, zone_id_t zid)
729 {
730 	return zone_array + zid == z;
731 }
732 
733 __abortlike
734 void
zone_invalid_panic(zone_t zone)735 zone_invalid_panic(zone_t zone)
736 {
737 	panic("zone %p isn't in the zone_array", zone);
738 }
739 
740 __abortlike
741 static void
zone_metadata_corruption(zone_t zone,struct zone_page_metadata * meta,const char * kind)742 zone_metadata_corruption(zone_t zone, struct zone_page_metadata *meta,
743     const char *kind)
744 {
745 	panic("zone metadata corruption: %s (meta %p, zone %s%s)",
746 	    kind, meta, zone_heap_name(zone), zone->z_name);
747 }
748 
749 __abortlike
750 static void
zone_invalid_element_addr_panic(zone_t zone,vm_offset_t addr)751 zone_invalid_element_addr_panic(zone_t zone, vm_offset_t addr)
752 {
753 	panic("zone element pointer validation failed (addr: %p, zone %s%s)",
754 	    (void *)addr, zone_heap_name(zone), zone->z_name);
755 }
756 
757 __abortlike
758 static void
zone_page_metadata_index_confusion_panic(zone_t zone,vm_offset_t addr,struct zone_page_metadata * meta)759 zone_page_metadata_index_confusion_panic(zone_t zone, vm_offset_t addr,
760     struct zone_page_metadata *meta)
761 {
762 	zone_security_flags_t zsflags = zone_security_config(zone), src_zsflags;
763 	zone_id_t zidx;
764 	zone_t src_zone;
765 
766 	if (zsflags.z_kalloc_type) {
767 		panic_include_kalloc_types = true;
768 		kalloc_type_dst_zone = zone;
769 	}
770 
771 	zidx = meta->zm_index;
772 	if (zidx >= os_atomic_load(&num_zones, relaxed)) {
773 		panic("%p expected in zone %s%s[%d], but metadata has invalid zidx: %d",
774 		    (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
775 		    zidx);
776 	}
777 
778 	src_zone = &zone_array[zidx];
779 	src_zsflags = zone_security_array[zidx];
780 	if (src_zsflags.z_kalloc_type) {
781 		panic_include_kalloc_types = true;
782 		kalloc_type_src_zone = src_zone;
783 	}
784 
785 	panic("%p not in the expected zone %s%s[%d], but found in %s%s[%d]",
786 	    (void *)addr, zone_heap_name(zone), zone->z_name, zone_index(zone),
787 	    zone_heap_name(src_zone), src_zone->z_name, zidx);
788 }
789 
790 __abortlike
791 static void
zone_page_metadata_list_corruption(zone_t zone,struct zone_page_metadata * meta)792 zone_page_metadata_list_corruption(zone_t zone, struct zone_page_metadata *meta)
793 {
794 	panic("metadata list corruption through element %p detected in zone %s%s",
795 	    meta, zone_heap_name(zone), zone->z_name);
796 }
797 
798 __abortlike
799 static void
zone_page_meta_accounting_panic(zone_t zone,struct zone_page_metadata * meta,const char * kind)800 zone_page_meta_accounting_panic(zone_t zone, struct zone_page_metadata *meta,
801     const char *kind)
802 {
803 	panic("accounting mismatch (%s) for zone %s%s, meta %p", kind,
804 	    zone_heap_name(zone), zone->z_name, meta);
805 }
806 
807 __abortlike
808 static void
zone_meta_double_free_panic(zone_t zone,vm_offset_t addr,const char * caller)809 zone_meta_double_free_panic(zone_t zone, vm_offset_t addr, const char *caller)
810 {
811 	panic("%s: double free of %p to zone %s%s", caller,
812 	    (void *)addr, zone_heap_name(zone), zone->z_name);
813 }
814 
815 __abortlike
816 static void
zone_accounting_panic(zone_t zone,const char * kind)817 zone_accounting_panic(zone_t zone, const char *kind)
818 {
819 	panic("accounting mismatch (%s) for zone %s%s", kind,
820 	    zone_heap_name(zone), zone->z_name);
821 }
822 
823 #define zone_counter_sub(z, stat, value)  ({ \
824 	if (os_sub_overflow((z)->stat, value, &(z)->stat)) { \
825 	    zone_accounting_panic(z, #stat " wrap-around"); \
826 	} \
827 	(z)->stat; \
828 })
829 
830 static inline uint16_t
zone_meta_alloc_size_add(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)831 zone_meta_alloc_size_add(zone_t z, struct zone_page_metadata *m,
832     vm_offset_t esize)
833 {
834 	if (os_add_overflow(m->zm_alloc_size, (uint16_t)esize, &m->zm_alloc_size)) {
835 		zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
836 	}
837 	return m->zm_alloc_size;
838 }
839 
840 static inline uint16_t
zone_meta_alloc_size_sub(zone_t z,struct zone_page_metadata * m,vm_offset_t esize)841 zone_meta_alloc_size_sub(zone_t z, struct zone_page_metadata *m,
842     vm_offset_t esize)
843 {
844 	if (os_sub_overflow(m->zm_alloc_size, esize, &m->zm_alloc_size)) {
845 		zone_page_meta_accounting_panic(z, m, "alloc_size wrap-around");
846 	}
847 	return m->zm_alloc_size;
848 }
849 
850 __abortlike
851 static void
zone_nofail_panic(zone_t zone)852 zone_nofail_panic(zone_t zone)
853 {
854 	panic("zalloc(Z_NOFAIL) can't be satisfied for zone %s%s (potential leak)",
855 	    zone_heap_name(zone), zone->z_name);
856 }
857 
858 __header_always_inline bool
zone_spans_ro_va(vm_offset_t addr_start,vm_offset_t addr_end)859 zone_spans_ro_va(vm_offset_t addr_start, vm_offset_t addr_end)
860 {
861 	const struct mach_vm_range *ro_r = &zone_info.zi_ro_range;
862 	struct mach_vm_range r = { addr_start, addr_end };
863 
864 	return mach_vm_range_intersects(ro_r, &r);
865 }
866 
867 #define from_range(r, addr, size) \
868 	__builtin_choose_expr(__builtin_constant_p(size) ? (size) == 1 : 0, \
869 	mach_vm_range_contains(r, (mach_vm_offset_t)(addr)), \
870 	mach_vm_range_contains(r, (mach_vm_offset_t)(addr), size))
871 
872 #define from_ro_map(addr, size) \
873 	from_range(&zone_info.zi_ro_range, addr, size)
874 
875 #define from_zone_map(addr, size) \
876 	from_range(&zone_info.zi_map_range, addr, size)
877 
878 __header_always_inline bool
zone_pva_is_null(zone_pva_t page)879 zone_pva_is_null(zone_pva_t page)
880 {
881 	return page.packed_address == 0;
882 }
883 
884 __header_always_inline bool
zone_pva_is_queue(zone_pva_t page)885 zone_pva_is_queue(zone_pva_t page)
886 {
887 	// actual kernel pages have the top bit set
888 	return (int32_t)page.packed_address > 0;
889 }
890 
891 __header_always_inline bool
zone_pva_is_equal(zone_pva_t pva1,zone_pva_t pva2)892 zone_pva_is_equal(zone_pva_t pva1, zone_pva_t pva2)
893 {
894 	return pva1.packed_address == pva2.packed_address;
895 }
896 
897 __header_always_inline zone_pva_t *
zone_pageq_base(void)898 zone_pageq_base(void)
899 {
900 	extern zone_pva_t data_seg_start[] __SEGMENT_START_SYM("__DATA");
901 
902 	/*
903 	 * `-1` so that if the first __DATA variable is a page queue,
904 	 * it gets a non 0 index
905 	 */
906 	return data_seg_start - 1;
907 }
908 
909 __header_always_inline void
zone_queue_set_head(zone_t z,zone_pva_t queue,zone_pva_t oldv,struct zone_page_metadata * meta)910 zone_queue_set_head(zone_t z, zone_pva_t queue, zone_pva_t oldv,
911     struct zone_page_metadata *meta)
912 {
913 	zone_pva_t *queue_head = &zone_pageq_base()[queue.packed_address];
914 
915 	if (!zone_pva_is_equal(*queue_head, oldv)) {
916 		zone_page_metadata_list_corruption(z, meta);
917 	}
918 	*queue_head = meta->zm_page_next;
919 }
920 
921 __header_always_inline zone_pva_t
zone_queue_encode(zone_pva_t * headp)922 zone_queue_encode(zone_pva_t *headp)
923 {
924 	return (zone_pva_t){ (uint32_t)(headp - zone_pageq_base()) };
925 }
926 
927 __header_always_inline zone_pva_t
zone_pva_from_addr(vm_address_t addr)928 zone_pva_from_addr(vm_address_t addr)
929 {
930 	// cannot use atop() because we want to maintain the sign bit
931 	return (zone_pva_t){ (uint32_t)((intptr_t)addr >> PAGE_SHIFT) };
932 }
933 
934 __header_always_inline vm_address_t
zone_pva_to_addr(zone_pva_t page)935 zone_pva_to_addr(zone_pva_t page)
936 {
937 	// cause sign extension so that we end up with the right address
938 	return (vm_offset_t)(int32_t)page.packed_address << PAGE_SHIFT;
939 }
940 
941 __header_always_inline struct zone_page_metadata *
zone_pva_to_meta(zone_pva_t page)942 zone_pva_to_meta(zone_pva_t page)
943 {
944 	return &zone_info.zi_meta_base[page.packed_address];
945 }
946 
947 __header_always_inline zone_pva_t
zone_pva_from_meta(struct zone_page_metadata * meta)948 zone_pva_from_meta(struct zone_page_metadata *meta)
949 {
950 	return (zone_pva_t){ (uint32_t)(meta - zone_info.zi_meta_base) };
951 }
952 
953 __header_always_inline struct zone_page_metadata *
zone_meta_from_addr(vm_offset_t addr)954 zone_meta_from_addr(vm_offset_t addr)
955 {
956 	return zone_pva_to_meta(zone_pva_from_addr(addr));
957 }
958 
959 __header_always_inline zone_id_t
zone_index_from_ptr(const void * ptr)960 zone_index_from_ptr(const void *ptr)
961 {
962 	return zone_pva_to_meta(zone_pva_from_addr((vm_offset_t)ptr))->zm_index;
963 }
964 
965 __header_always_inline vm_offset_t
zone_meta_to_addr(struct zone_page_metadata * meta)966 zone_meta_to_addr(struct zone_page_metadata *meta)
967 {
968 	return ptoa((int32_t)(meta - zone_info.zi_meta_base));
969 }
970 
971 __attribute__((overloadable))
972 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta,vm_address_t addr)973 zone_meta_validate(zone_t z, struct zone_page_metadata *meta, vm_address_t addr)
974 {
975 	if (!zone_has_index(z, meta->zm_index)) {
976 		zone_page_metadata_index_confusion_panic(z, addr, meta);
977 	}
978 }
979 
980 __attribute__((overloadable))
981 __header_always_inline void
zone_meta_validate(zone_t z,struct zone_page_metadata * meta)982 zone_meta_validate(zone_t z, struct zone_page_metadata *meta)
983 {
984 	zone_meta_validate(z, meta, zone_meta_to_addr(meta));
985 }
986 
987 __header_always_inline void
zone_meta_queue_push(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)988 zone_meta_queue_push(zone_t z, zone_pva_t *headp,
989     struct zone_page_metadata *meta)
990 {
991 	zone_pva_t head = *headp;
992 	zone_pva_t queue_pva = zone_queue_encode(headp);
993 	struct zone_page_metadata *tmp;
994 
995 	meta->zm_page_next = head;
996 	if (!zone_pva_is_null(head)) {
997 		tmp = zone_pva_to_meta(head);
998 		if (!zone_pva_is_equal(tmp->zm_page_prev, queue_pva)) {
999 			zone_page_metadata_list_corruption(z, meta);
1000 		}
1001 		tmp->zm_page_prev = zone_pva_from_meta(meta);
1002 	}
1003 	meta->zm_page_prev = queue_pva;
1004 	*headp = zone_pva_from_meta(meta);
1005 }
1006 
1007 __header_always_inline struct zone_page_metadata *
zone_meta_queue_pop(zone_t z,zone_pva_t * headp)1008 zone_meta_queue_pop(zone_t z, zone_pva_t *headp)
1009 {
1010 	zone_pva_t head = *headp;
1011 	struct zone_page_metadata *meta = zone_pva_to_meta(head);
1012 	struct zone_page_metadata *tmp;
1013 
1014 	zone_meta_validate(z, meta);
1015 
1016 	if (!zone_pva_is_null(meta->zm_page_next)) {
1017 		tmp = zone_pva_to_meta(meta->zm_page_next);
1018 		if (!zone_pva_is_equal(tmp->zm_page_prev, head)) {
1019 			zone_page_metadata_list_corruption(z, meta);
1020 		}
1021 		tmp->zm_page_prev = meta->zm_page_prev;
1022 	}
1023 	*headp = meta->zm_page_next;
1024 
1025 	meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1026 
1027 	return meta;
1028 }
1029 
1030 __header_always_inline void
zone_meta_remqueue(zone_t z,struct zone_page_metadata * meta)1031 zone_meta_remqueue(zone_t z, struct zone_page_metadata *meta)
1032 {
1033 	zone_pva_t meta_pva = zone_pva_from_meta(meta);
1034 	struct zone_page_metadata *tmp;
1035 
1036 	if (!zone_pva_is_null(meta->zm_page_next)) {
1037 		tmp = zone_pva_to_meta(meta->zm_page_next);
1038 		if (!zone_pva_is_equal(tmp->zm_page_prev, meta_pva)) {
1039 			zone_page_metadata_list_corruption(z, meta);
1040 		}
1041 		tmp->zm_page_prev = meta->zm_page_prev;
1042 	}
1043 	if (zone_pva_is_queue(meta->zm_page_prev)) {
1044 		zone_queue_set_head(z, meta->zm_page_prev, meta_pva, meta);
1045 	} else {
1046 		tmp = zone_pva_to_meta(meta->zm_page_prev);
1047 		if (!zone_pva_is_equal(tmp->zm_page_next, meta_pva)) {
1048 			zone_page_metadata_list_corruption(z, meta);
1049 		}
1050 		tmp->zm_page_next = meta->zm_page_next;
1051 	}
1052 
1053 	meta->zm_page_next = meta->zm_page_prev = (zone_pva_t){ 0 };
1054 }
1055 
1056 __header_always_inline void
zone_meta_requeue(zone_t z,zone_pva_t * headp,struct zone_page_metadata * meta)1057 zone_meta_requeue(zone_t z, zone_pva_t *headp,
1058     struct zone_page_metadata *meta)
1059 {
1060 	zone_meta_remqueue(z, meta);
1061 	zone_meta_queue_push(z, headp, meta);
1062 }
1063 
1064 /* prevents a given metadata from ever reaching the z_pageq_empty queue */
1065 static inline void
zone_meta_lock_in_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1066 zone_meta_lock_in_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1067 {
1068 	uint16_t new_size = zone_meta_alloc_size_add(z, m, ZM_ALLOC_SIZE_LOCK);
1069 
1070 	assert(new_size % sizeof(vm_offset_t) == ZM_ALLOC_SIZE_LOCK);
1071 	if (new_size == ZM_ALLOC_SIZE_LOCK) {
1072 		zone_meta_requeue(z, &z->z_pageq_partial, m);
1073 		zone_counter_sub(z, z_wired_empty, len);
1074 	}
1075 }
1076 
1077 /* allows a given metadata to reach the z_pageq_empty queue again */
1078 static inline void
zone_meta_unlock_from_partial(zone_t z,struct zone_page_metadata * m,uint32_t len)1079 zone_meta_unlock_from_partial(zone_t z, struct zone_page_metadata *m, uint32_t len)
1080 {
1081 	uint16_t new_size = zone_meta_alloc_size_sub(z, m, ZM_ALLOC_SIZE_LOCK);
1082 
1083 	assert(new_size % sizeof(vm_offset_t) == 0);
1084 	if (new_size == 0) {
1085 		zone_meta_requeue(z, &z->z_pageq_empty, m);
1086 		z->z_wired_empty += len;
1087 	}
1088 }
1089 
1090 /*
1091  * Routine to populate a page backing metadata in the zone_metadata_region.
1092  * Must be called without the zone lock held as it might potentially block.
1093  */
1094 static void
zone_meta_populate(vm_offset_t base,vm_size_t size)1095 zone_meta_populate(vm_offset_t base, vm_size_t size)
1096 {
1097 	struct zone_page_metadata *from = zone_meta_from_addr(base);
1098 	struct zone_page_metadata *to   = from + atop(size);
1099 	vm_offset_t page_addr = trunc_page(from);
1100 
1101 	for (; page_addr < (vm_offset_t)to; page_addr += PAGE_SIZE) {
1102 #if !KASAN
1103 		/*
1104 		 * This can race with another thread doing a populate on the same metadata
1105 		 * page, where we see an updated pmap but unmapped KASan shadow, causing a
1106 		 * fault in the shadow when we first access the metadata page. Avoid this
1107 		 * by always synchronizing on the zone_metadata_region lock with KASan.
1108 		 */
1109 		if (pmap_find_phys(kernel_pmap, page_addr)) {
1110 			continue;
1111 		}
1112 #endif
1113 
1114 		for (;;) {
1115 			kern_return_t ret = KERN_SUCCESS;
1116 
1117 			/*
1118 			 * All updates to the zone_metadata_region are done
1119 			 * under the zone_metadata_region_lck
1120 			 */
1121 			zone_meta_lock();
1122 			if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
1123 				ret = kernel_memory_populate(page_addr,
1124 				    PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
1125 				    VM_KERN_MEMORY_OSFMK);
1126 			}
1127 			zone_meta_unlock();
1128 
1129 			if (ret == KERN_SUCCESS) {
1130 				break;
1131 			}
1132 
1133 			/*
1134 			 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
1135 			 * to bad system deadlocks, so if the allocation failed,
1136 			 * we need to do the VM_PAGE_WAIT() outside of the lock.
1137 			 */
1138 			VM_PAGE_WAIT();
1139 		}
1140 	}
1141 }
1142 
1143 __abortlike
1144 static void
zone_invalid_element_panic(zone_t zone,vm_offset_t addr)1145 zone_invalid_element_panic(zone_t zone, vm_offset_t addr)
1146 {
1147 	struct zone_page_metadata *meta;
1148 	const char *from_cache = "";
1149 	vm_offset_t page;
1150 
1151 	if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1152 		panic("addr %p being freed to zone %s%s%s, isn't from zone map",
1153 		    (void *)addr, zone_heap_name(zone), zone->z_name, from_cache);
1154 	}
1155 	page = trunc_page(addr);
1156 	meta = zone_meta_from_addr(addr);
1157 
1158 	if (!zone_has_index(zone, meta->zm_index)) {
1159 		zone_page_metadata_index_confusion_panic(zone, addr, meta);
1160 	}
1161 
1162 	if (meta->zm_chunk_len == ZM_SECONDARY_PCPU_PAGE) {
1163 		panic("metadata %p corresponding to addr %p being freed to "
1164 		    "zone %s%s%s, is marked as secondary per cpu page",
1165 		    meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1166 		    from_cache);
1167 	}
1168 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1169 		page -= ptoa(meta->zm_page_index);
1170 		meta -= meta->zm_page_index;
1171 	}
1172 
1173 	if (meta->zm_chunk_len > ZM_CHUNK_LEN_MAX) {
1174 		panic("metadata %p corresponding to addr %p being freed to "
1175 		    "zone %s%s%s, has chunk len greater than max",
1176 		    meta, (void *)addr, zone_heap_name(zone), zone->z_name,
1177 		    from_cache);
1178 	}
1179 
1180 	if ((addr - zone_elem_inner_offs(zone) - page) % zone_elem_outer_size(zone)) {
1181 		panic("addr %p being freed to zone %s%s%s, isn't aligned to "
1182 		    "zone element size", (void *)addr, zone_heap_name(zone),
1183 		    zone->z_name, from_cache);
1184 	}
1185 
1186 	zone_invalid_element_addr_panic(zone, addr);
1187 }
1188 
1189 __attribute__((always_inline))
1190 static struct zone_page_metadata *
zone_element_resolve(zone_t zone,vm_offset_t addr,vm_offset_t * idx)1191 zone_element_resolve(
1192 	zone_t                  zone,
1193 	vm_offset_t             addr,
1194 	vm_offset_t            *idx)
1195 {
1196 	struct zone_page_metadata *meta;
1197 	vm_offset_t offs, eidx;
1198 
1199 	meta = zone_meta_from_addr(addr);
1200 	if (!from_zone_map(addr, 1) || !zone_has_index(zone, meta->zm_index)) {
1201 		zone_invalid_element_panic(zone, addr);
1202 	}
1203 
1204 	offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1205 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1206 		offs += ptoa(meta->zm_page_index);
1207 		meta -= meta->zm_page_index;
1208 	}
1209 
1210 	eidx = Z_FAST_QUO(offs, zone->z_quo_magic);
1211 	if (eidx * zone_elem_outer_size(zone) != offs) {
1212 		zone_invalid_element_panic(zone, addr);
1213 	}
1214 
1215 	*idx = eidx;
1216 	return meta;
1217 }
1218 
1219 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1220 void *
zone_element_pgz_oob_adjust(void * ptr,vm_size_t req_size,vm_size_t elem_size)1221 zone_element_pgz_oob_adjust(void *ptr, vm_size_t req_size, vm_size_t elem_size)
1222 {
1223 	vm_offset_t addr = (vm_offset_t)ptr;
1224 	vm_offset_t end = addr + elem_size;
1225 	vm_offset_t offs;
1226 
1227 	/*
1228 	 * 0-sized allocations in a KALLOC_MINSIZE bucket
1229 	 * would be offset to the next allocation which is incorrect.
1230 	 */
1231 	req_size = MAX(roundup(req_size, KALLOC_MINALIGN), KALLOC_MINALIGN);
1232 
1233 	/*
1234 	 * Given how chunks work, for a zone with PGZ guards on,
1235 	 * there's a single element which ends precisely
1236 	 * at the page boundary: the last one.
1237 	 */
1238 	if (req_size == elem_size ||
1239 	    (end & PAGE_MASK) ||
1240 	    !zone_meta_from_addr(addr)->zm_guarded) {
1241 		return ptr;
1242 	}
1243 
1244 	offs = elem_size - req_size;
1245 	zone_meta_from_addr(end)->zm_oob_offs = (uint16_t)offs;
1246 
1247 	return (char *)addr + offs;
1248 }
1249 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1250 
1251 __abortlike
1252 static void
zone_element_bounds_check_panic(vm_address_t addr,vm_size_t len)1253 zone_element_bounds_check_panic(vm_address_t addr, vm_size_t len)
1254 {
1255 	struct zone_page_metadata *meta;
1256 	vm_offset_t offs, size, page;
1257 	zone_t      zone;
1258 
1259 	page = trunc_page(addr);
1260 	meta = zone_meta_from_addr(addr);
1261 	zone = &zone_array[meta->zm_index];
1262 
1263 	if (zone->z_percpu) {
1264 		panic("zone bound checks: address %p is a per-cpu allocation",
1265 		    (void *)addr);
1266 	}
1267 
1268 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1269 		page -= ptoa(meta->zm_page_index);
1270 		meta -= meta->zm_page_index;
1271 	}
1272 
1273 	size = zone_elem_outer_size(zone);
1274 	offs = Z_FAST_MOD(addr - zone_elem_inner_offs(zone) - page + size,
1275 	    zone->z_quo_magic, size);
1276 	panic("zone bound checks: buffer %p of length %zd overflows "
1277 	    "object %p of size %zd in zone %p[%s%s]",
1278 	    (void *)addr, len, (void *)(addr - offs - zone_elem_redzone(zone)),
1279 	    zone_elem_inner_size(zone), zone, zone_heap_name(zone), zone_name(zone));
1280 }
1281 
1282 void
zone_element_bounds_check(vm_address_t addr,vm_size_t len)1283 zone_element_bounds_check(vm_address_t addr, vm_size_t len)
1284 {
1285 	struct zone_page_metadata *meta;
1286 	vm_offset_t offs, size;
1287 	zone_t      zone;
1288 
1289 	if (!from_zone_map(addr, 1)) {
1290 		return;
1291 	}
1292 
1293 #if CONFIG_PROB_GZALLOC
1294 	if (__improbable(pgz_owned(addr))) {
1295 		meta = zone_meta_from_addr(addr);
1296 		addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
1297 	}
1298 #endif /* CONFIG_PROB_GZALLOC */
1299 	meta = zone_meta_from_addr(addr);
1300 	zone = zone_by_id(meta->zm_index);
1301 
1302 	if (zone->z_percpu) {
1303 		zone_element_bounds_check_panic(addr, len);
1304 	}
1305 
1306 	if (zone->z_permanent) {
1307 		/* We don't know bounds for those */
1308 		return;
1309 	}
1310 
1311 	offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
1312 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1313 		offs += ptoa(meta->zm_page_index);
1314 	}
1315 	size = zone_elem_outer_size(zone);
1316 	offs = Z_FAST_MOD(offs + size, zone->z_quo_magic, size);
1317 	if (len + zone_elem_redzone(zone) > size - offs) {
1318 		zone_element_bounds_check_panic(addr, len);
1319 	}
1320 }
1321 
1322 /*
1323  * Routine to get the size of a zone allocated address.
1324  * If the address doesnt belong to the zone maps, returns 0.
1325  */
1326 vm_size_t
zone_element_size(void * elem,zone_t * z,bool clear_oob,vm_offset_t * oob_offs)1327 zone_element_size(void *elem, zone_t *z, bool clear_oob, vm_offset_t *oob_offs)
1328 {
1329 	vm_address_t addr = (vm_address_t)elem;
1330 	struct zone_page_metadata *meta;
1331 	vm_size_t esize, offs, end;
1332 	zone_t zone;
1333 
1334 	if (from_zone_map(addr, sizeof(void *))) {
1335 		meta  = zone_meta_from_addr(addr);
1336 		zone  = zone_by_id(meta->zm_index);
1337 		esize = zone_elem_inner_size(zone);
1338 		end   = addr + esize;
1339 		offs  = 0;
1340 
1341 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1342 		/*
1343 		 * If the chunk uses guards, and that (addr + esize)
1344 		 * either crosses a page boundary or is at the boundary,
1345 		 * we need to look harder.
1346 		 */
1347 		if (oob_offs && meta->zm_guarded && atop(addr ^ end)) {
1348 			/*
1349 			 * Because in the vast majority of cases the element
1350 			 * size is sub-page, and that meta[1] must be faulted,
1351 			 * we can quickly peek at whether it's a guard.
1352 			 *
1353 			 * For elements larger than a page, finding the guard
1354 			 * page requires a little more effort.
1355 			 */
1356 			if (meta[1].zm_chunk_len == ZM_PGZ_GUARD) {
1357 				offs = meta[1].zm_oob_offs;
1358 				if (clear_oob) {
1359 					meta[1].zm_oob_offs = 0;
1360 				}
1361 			} else if (esize > PAGE_SIZE) {
1362 				struct zone_page_metadata *gmeta;
1363 
1364 				if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
1365 					gmeta = meta + meta->zm_subchunk_len;
1366 				} else {
1367 					gmeta = meta + zone->z_chunk_pages;
1368 				}
1369 				assert(gmeta->zm_chunk_len == ZM_PGZ_GUARD);
1370 
1371 				if (end >= zone_meta_to_addr(gmeta)) {
1372 					offs = gmeta->zm_oob_offs;
1373 					if (clear_oob) {
1374 						gmeta->zm_oob_offs = 0;
1375 					}
1376 				}
1377 			}
1378 		}
1379 #else
1380 #pragma unused(end, clear_oob)
1381 #endif /* ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1382 
1383 		if (oob_offs) {
1384 			*oob_offs = offs;
1385 		}
1386 		if (z) {
1387 			*z = zone;
1388 		}
1389 		return esize;
1390 	}
1391 
1392 	if (oob_offs) {
1393 		*oob_offs = 0;
1394 	}
1395 
1396 	return 0;
1397 }
1398 
1399 zone_id_t
zone_id_for_element(void * addr,vm_size_t esize)1400 zone_id_for_element(void *addr, vm_size_t esize)
1401 {
1402 	zone_id_t zid = ZONE_ID_INVALID;
1403 	if (from_zone_map(addr, esize)) {
1404 		zid = zone_index_from_ptr(addr);
1405 		__builtin_assume(zid != ZONE_ID_INVALID);
1406 	}
1407 	return zid;
1408 }
1409 
1410 /* This function just formats the reason for the panics by redoing the checks */
1411 __abortlike
1412 static void
zone_require_panic(zone_t zone,void * addr)1413 zone_require_panic(zone_t zone, void *addr)
1414 {
1415 	uint32_t zindex;
1416 	zone_t other;
1417 
1418 	if (!from_zone_map(addr, zone_elem_inner_size(zone))) {
1419 		panic("zone_require failed: address not in a zone (addr: %p)", addr);
1420 	}
1421 
1422 	zindex = zone_index_from_ptr(addr);
1423 	other = &zone_array[zindex];
1424 	if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
1425 		panic("zone_require failed: invalid zone index %d "
1426 		    "(addr: %p, expected: %s%s)", zindex,
1427 		    addr, zone_heap_name(zone), zone->z_name);
1428 	} else {
1429 		panic("zone_require failed: address in unexpected zone id %d (%s%s) "
1430 		    "(addr: %p, expected: %s%s)",
1431 		    zindex, zone_heap_name(other), other->z_name,
1432 		    addr, zone_heap_name(zone), zone->z_name);
1433 	}
1434 }
1435 
1436 __abortlike
1437 static void
zone_id_require_panic(zone_id_t zid,void * addr)1438 zone_id_require_panic(zone_id_t zid, void *addr)
1439 {
1440 	zone_require_panic(&zone_array[zid], addr);
1441 }
1442 
1443 /*
1444  * Routines to panic if a pointer is not mapped to an expected zone.
1445  * This can be used as a means of pinning an object to the zone it is expected
1446  * to be a part of.  Causes a panic if the address does not belong to any
1447  * specified zone, does not belong to any zone, has been freed and therefore
1448  * unmapped from the zone, or the pointer contains an uninitialized value that
1449  * does not belong to any zone.
1450  */
1451 void
zone_require(zone_t zone,void * addr)1452 zone_require(zone_t zone, void *addr)
1453 {
1454 	vm_size_t esize = zone_elem_inner_size(zone);
1455 
1456 	if (from_zone_map(addr, esize) &&
1457 	    zone_has_index(zone, zone_index_from_ptr(addr))) {
1458 		return;
1459 	}
1460 	zone_require_panic(zone, addr);
1461 }
1462 
1463 void
zone_id_require(zone_id_t zid,vm_size_t esize,void * addr)1464 zone_id_require(zone_id_t zid, vm_size_t esize, void *addr)
1465 {
1466 	if (from_zone_map(addr, esize) && zid == zone_index_from_ptr(addr)) {
1467 		return;
1468 	}
1469 	zone_id_require_panic(zid, addr);
1470 }
1471 
1472 bool
zone_owns(zone_t zone,void * addr)1473 zone_owns(zone_t zone, void *addr)
1474 {
1475 	vm_size_t esize = zone_elem_inner_size(zone);
1476 
1477 	if (from_zone_map(addr, esize)) {
1478 		return zone_has_index(zone, zone_index_from_ptr(addr));
1479 	}
1480 	return false;
1481 }
1482 
1483 static inline struct mach_vm_range
zone_kmem_suballoc(mach_vm_offset_t addr,vm_size_t size,int flags,vm_tag_t tag,vm_map_t * new_map)1484 zone_kmem_suballoc(
1485 	mach_vm_offset_t        addr,
1486 	vm_size_t               size,
1487 	int                     flags,
1488 	vm_tag_t                tag,
1489 	vm_map_t                *new_map)
1490 {
1491 	struct mach_vm_range r;
1492 
1493 	*new_map = kmem_suballoc(kernel_map, &addr, size,
1494 	    VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
1495 	    flags, KMS_PERMANENT | KMS_NOFAIL, tag).kmr_submap;
1496 
1497 	r.min_address = addr;
1498 	r.max_address = addr + size;
1499 	return r;
1500 }
1501 
1502 #endif /* !ZALLOC_TEST */
1503 #pragma mark Zone bits allocator
1504 
1505 /*!
1506  * @defgroup Zone Bitmap allocator
1507  * @{
1508  *
1509  * @brief
1510  * Functions implementing the zone bitmap allocator
1511  *
1512  * @discussion
1513  * The zone allocator maintains which elements are allocated or free in bitmaps.
1514  *
1515  * When the number of elements per page is smaller than 32, it is stored inline
1516  * on the @c zone_page_metadata structure (@c zm_inline_bitmap is set,
1517  * and @c zm_bitmap used for storage).
1518  *
1519  * When the number of elements is larger, then a bitmap is allocated from
1520  * a buddy allocator (impelemented under the @c zba_* namespace). Pointers
1521  * to bitmaps are implemented as a packed 32 bit bitmap reference, stored in
1522  * @c zm_bitmap. The low 3 bits encode the scale (order) of the allocation in
1523  * @c ZBA_GRANULE units, and hence actual allocations encoded with that scheme
1524  * cannot be larger than 1024 bytes (8192 bits).
1525  *
1526  * This buddy allocator can actually accomodate allocations as large
1527  * as 8k on 16k systems and 2k on 4k systems.
1528  *
1529  * Note: @c zba_* functions are implementation details not meant to be used
1530  * outside of the allocation of the allocator itself. Interfaces to the rest of
1531  * the zone allocator are documented and not @c zba_* prefixed.
1532  */
1533 
1534 #define ZBA_CHUNK_SIZE          PAGE_MAX_SIZE
1535 #define ZBA_GRANULE             sizeof(uint64_t)
1536 #define ZBA_GRANULE_BITS        (8 * sizeof(uint64_t))
1537 #define ZBA_MAX_ORDER           (PAGE_MAX_SHIFT - 4)
1538 #define ZBA_MAX_ALLOC_ORDER     7
1539 #define ZBA_SLOTS               (ZBA_CHUNK_SIZE / ZBA_GRANULE)
1540 #define ZBA_HEADS_COUNT         (ZBA_MAX_ORDER + 1)
1541 #define ZBA_PTR_MASK            0x0fffffff
1542 #define ZBA_ORDER_SHIFT         29
1543 #define ZBA_HAS_EXTRA_BIT       0x10000000
1544 
1545 static_assert(2ul * ZBA_GRANULE << ZBA_MAX_ORDER == ZBA_CHUNK_SIZE, "chunk sizes");
1546 static_assert(ZBA_MAX_ALLOC_ORDER <= ZBA_MAX_ORDER, "ZBA_MAX_ORDER is enough");
1547 
1548 struct zone_bits_chain {
1549 	uint32_t zbc_next;
1550 	uint32_t zbc_prev;
1551 } __attribute__((aligned(ZBA_GRANULE)));
1552 
1553 struct zone_bits_head {
1554 	uint32_t zbh_next;
1555 	uint32_t zbh_unused;
1556 } __attribute__((aligned(ZBA_GRANULE)));
1557 
1558 static_assert(sizeof(struct zone_bits_chain) == ZBA_GRANULE, "zbc size");
1559 static_assert(sizeof(struct zone_bits_head) == ZBA_GRANULE, "zbh size");
1560 
1561 struct zone_bits_allocator_meta {
1562 	uint32_t  zbam_left;
1563 	uint32_t  zbam_right;
1564 	struct zone_bits_head zbam_lists[ZBA_HEADS_COUNT];
1565 	struct zone_bits_head zbam_lists_with_extra[ZBA_HEADS_COUNT];
1566 };
1567 
1568 struct zone_bits_allocator_header {
1569 	uint64_t zbah_bits[ZBA_SLOTS / (8 * sizeof(uint64_t))];
1570 };
1571 
1572 #if ZALLOC_TEST
1573 static struct zalloc_bits_allocator_test_setup {
1574 	vm_offset_t zbats_base;
1575 	void      (*zbats_populate)(vm_address_t addr, vm_size_t size);
1576 } zba_test_info;
1577 
1578 static struct zone_bits_allocator_header *
zba_base_header(void)1579 zba_base_header(void)
1580 {
1581 	return (struct zone_bits_allocator_header *)zba_test_info.zbats_base;
1582 }
1583 
1584 static kern_return_t
zba_populate(uint32_t n,bool with_extra __unused)1585 zba_populate(uint32_t n, bool with_extra __unused)
1586 {
1587 	vm_address_t base = zba_test_info.zbats_base;
1588 	zba_test_info.zbats_populate(base + n * ZBA_CHUNK_SIZE, ZBA_CHUNK_SIZE);
1589 
1590 	return KERN_SUCCESS;
1591 }
1592 #else
1593 __startup_data __attribute__((aligned(ZBA_CHUNK_SIZE)))
1594 static uint8_t zba_chunk_startup[ZBA_CHUNK_SIZE];
1595 
1596 static SECURITY_READ_ONLY_LATE(uint8_t) zba_xtra_shift;
1597 static LCK_MTX_DECLARE(zba_mtx, &zone_locks_grp);
1598 
1599 static struct zone_bits_allocator_header *
zba_base_header(void)1600 zba_base_header(void)
1601 {
1602 	return (struct zone_bits_allocator_header *)zone_info.zi_bits_range.min_address;
1603 }
1604 
1605 static void
zba_lock(void)1606 zba_lock(void)
1607 {
1608 	lck_mtx_lock(&zba_mtx);
1609 }
1610 
1611 static void
zba_unlock(void)1612 zba_unlock(void)
1613 {
1614 	lck_mtx_unlock(&zba_mtx);
1615 }
1616 
1617 __abortlike
1618 static void
zba_memory_exhausted(void)1619 zba_memory_exhausted(void)
1620 {
1621 	uint64_t zsize = 0;
1622 	zone_t z = zone_find_largest(&zsize);
1623 	panic("zba_populate: out of bitmap space, "
1624 	    "likely due to memory leak in zone [%s%s] "
1625 	    "(%u%c, %d elements allocated)",
1626 	    zone_heap_name(z), zone_name(z),
1627 	    mach_vm_size_pretty(zsize), mach_vm_size_unit(zsize),
1628 	    zone_count_allocated(z));
1629 }
1630 
1631 
1632 static kern_return_t
zba_populate(uint32_t n,bool with_extra)1633 zba_populate(uint32_t n, bool with_extra)
1634 {
1635 	vm_size_t bits_size = ZBA_CHUNK_SIZE;
1636 	vm_size_t xtra_size = bits_size * CHAR_BIT << zba_xtra_shift;
1637 	vm_address_t bits_addr;
1638 	vm_address_t xtra_addr;
1639 	kern_return_t kr;
1640 
1641 	bits_addr = zone_info.zi_bits_range.min_address + n * bits_size;
1642 	xtra_addr = zone_info.zi_xtra_range.min_address + n * xtra_size;
1643 
1644 	kr = kernel_memory_populate(bits_addr, bits_size,
1645 	    KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1646 	    VM_KERN_MEMORY_OSFMK);
1647 	if (kr != KERN_SUCCESS) {
1648 		return kr;
1649 	}
1650 
1651 
1652 	if (with_extra) {
1653 		kr = kernel_memory_populate(xtra_addr, xtra_size,
1654 		    KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1655 		    VM_KERN_MEMORY_OSFMK);
1656 		if (kr != KERN_SUCCESS) {
1657 			kernel_memory_depopulate(bits_addr, bits_size,
1658 			    KMA_ZERO | KMA_KOBJECT | KMA_NOPAGEWAIT,
1659 			    VM_KERN_MEMORY_OSFMK);
1660 		}
1661 	}
1662 
1663 	return kr;
1664 }
1665 #endif
1666 
1667 __pure2
1668 static struct zone_bits_allocator_meta *
zba_meta(void)1669 zba_meta(void)
1670 {
1671 	return (struct zone_bits_allocator_meta *)&zba_base_header()[1];
1672 }
1673 
1674 __pure2
1675 static uint64_t *
zba_slot_base(void)1676 zba_slot_base(void)
1677 {
1678 	return (uint64_t *)zba_base_header();
1679 }
1680 
1681 __pure2
1682 static struct zone_bits_head *
zba_head(uint32_t order,bool with_extra)1683 zba_head(uint32_t order, bool with_extra)
1684 {
1685 	if (with_extra) {
1686 		return &zba_meta()->zbam_lists_with_extra[order];
1687 	} else {
1688 		return &zba_meta()->zbam_lists[order];
1689 	}
1690 }
1691 
1692 __pure2
1693 static uint32_t
zba_head_index(struct zone_bits_head * hd)1694 zba_head_index(struct zone_bits_head *hd)
1695 {
1696 	return (uint32_t)((uint64_t *)hd - zba_slot_base());
1697 }
1698 
1699 __pure2
1700 static struct zone_bits_chain *
zba_chain_for_index(uint32_t index)1701 zba_chain_for_index(uint32_t index)
1702 {
1703 	return (struct zone_bits_chain *)(zba_slot_base() + index);
1704 }
1705 
1706 __pure2
1707 static uint32_t
zba_chain_to_index(const struct zone_bits_chain * zbc)1708 zba_chain_to_index(const struct zone_bits_chain *zbc)
1709 {
1710 	return (uint32_t)((const uint64_t *)zbc - zba_slot_base());
1711 }
1712 
1713 __abortlike
1714 static void
zba_head_corruption_panic(uint32_t order,bool with_extra)1715 zba_head_corruption_panic(uint32_t order, bool with_extra)
1716 {
1717 	panic("zone bits allocator head[%d:%d:%p] is corrupt",
1718 	    order, with_extra, zba_head(order, with_extra));
1719 }
1720 
1721 __abortlike
1722 static void
zba_chain_corruption_panic(struct zone_bits_chain * a,struct zone_bits_chain * b)1723 zba_chain_corruption_panic(struct zone_bits_chain *a, struct zone_bits_chain *b)
1724 {
1725 	panic("zone bits allocator freelist is corrupt (%p <-> %p)", a, b);
1726 }
1727 
1728 static void
zba_push_block(struct zone_bits_chain * zbc,uint32_t order,bool with_extra)1729 zba_push_block(struct zone_bits_chain *zbc, uint32_t order, bool with_extra)
1730 {
1731 	struct zone_bits_head *hd = zba_head(order, with_extra);
1732 	uint32_t hd_index = zba_head_index(hd);
1733 	uint32_t index = zba_chain_to_index(zbc);
1734 	struct zone_bits_chain *next;
1735 
1736 	if (hd->zbh_next) {
1737 		next = zba_chain_for_index(hd->zbh_next);
1738 		if (next->zbc_prev != hd_index) {
1739 			zba_head_corruption_panic(order, with_extra);
1740 		}
1741 		next->zbc_prev = index;
1742 	}
1743 	zbc->zbc_next = hd->zbh_next;
1744 	zbc->zbc_prev = hd_index;
1745 	hd->zbh_next = index;
1746 }
1747 
1748 static void
zba_remove_block(struct zone_bits_chain * zbc)1749 zba_remove_block(struct zone_bits_chain *zbc)
1750 {
1751 	struct zone_bits_chain *prev = zba_chain_for_index(zbc->zbc_prev);
1752 	uint32_t index = zba_chain_to_index(zbc);
1753 
1754 	if (prev->zbc_next != index) {
1755 		zba_chain_corruption_panic(prev, zbc);
1756 	}
1757 	if ((prev->zbc_next = zbc->zbc_next)) {
1758 		struct zone_bits_chain *next = zba_chain_for_index(zbc->zbc_next);
1759 		if (next->zbc_prev != index) {
1760 			zba_chain_corruption_panic(zbc, next);
1761 		}
1762 		next->zbc_prev = zbc->zbc_prev;
1763 	}
1764 }
1765 
1766 static vm_address_t
zba_try_pop_block(uint32_t order,bool with_extra)1767 zba_try_pop_block(uint32_t order, bool with_extra)
1768 {
1769 	struct zone_bits_head *hd = zba_head(order, with_extra);
1770 	struct zone_bits_chain *zbc;
1771 
1772 	if (hd->zbh_next == 0) {
1773 		return 0;
1774 	}
1775 
1776 	zbc = zba_chain_for_index(hd->zbh_next);
1777 	zba_remove_block(zbc);
1778 	return (vm_address_t)zbc;
1779 }
1780 
1781 static struct zone_bits_allocator_header *
zba_header(vm_offset_t addr)1782 zba_header(vm_offset_t addr)
1783 {
1784 	addr &= -(vm_offset_t)ZBA_CHUNK_SIZE;
1785 	return (struct zone_bits_allocator_header *)addr;
1786 }
1787 
1788 static size_t
zba_node_parent(size_t node)1789 zba_node_parent(size_t node)
1790 {
1791 	return (node - 1) / 2;
1792 }
1793 
1794 static size_t
zba_node_left_child(size_t node)1795 zba_node_left_child(size_t node)
1796 {
1797 	return node * 2 + 1;
1798 }
1799 
1800 static size_t
zba_node_buddy(size_t node)1801 zba_node_buddy(size_t node)
1802 {
1803 	return ((node - 1) ^ 1) + 1;
1804 }
1805 
1806 static size_t
zba_node(vm_offset_t addr,uint32_t order)1807 zba_node(vm_offset_t addr, uint32_t order)
1808 {
1809 	vm_offset_t offs = (addr % ZBA_CHUNK_SIZE) / ZBA_GRANULE;
1810 	return (offs >> order) + (1 << (ZBA_MAX_ORDER - order + 1)) - 1;
1811 }
1812 
1813 static struct zone_bits_chain *
zba_chain_for_node(struct zone_bits_allocator_header * zbah,size_t node,uint32_t order)1814 zba_chain_for_node(struct zone_bits_allocator_header *zbah, size_t node, uint32_t order)
1815 {
1816 	vm_offset_t offs = (node - (1 << (ZBA_MAX_ORDER - order + 1)) + 1) << order;
1817 	return (struct zone_bits_chain *)((vm_offset_t)zbah + offs * ZBA_GRANULE);
1818 }
1819 
1820 static void
zba_node_flip_split(struct zone_bits_allocator_header * zbah,size_t node)1821 zba_node_flip_split(struct zone_bits_allocator_header *zbah, size_t node)
1822 {
1823 	zbah->zbah_bits[node / 64] ^= 1ull << (node % 64);
1824 }
1825 
1826 static bool
zba_node_is_split(struct zone_bits_allocator_header * zbah,size_t node)1827 zba_node_is_split(struct zone_bits_allocator_header *zbah, size_t node)
1828 {
1829 	return zbah->zbah_bits[node / 64] & (1ull << (node % 64));
1830 }
1831 
1832 static void
zba_free(vm_offset_t addr,uint32_t order,bool with_extra)1833 zba_free(vm_offset_t addr, uint32_t order, bool with_extra)
1834 {
1835 	struct zone_bits_allocator_header *zbah = zba_header(addr);
1836 	struct zone_bits_chain *zbc;
1837 	size_t node = zba_node(addr, order);
1838 
1839 	while (node) {
1840 		size_t parent = zba_node_parent(node);
1841 
1842 		zba_node_flip_split(zbah, parent);
1843 		if (zba_node_is_split(zbah, parent)) {
1844 			break;
1845 		}
1846 
1847 		zbc = zba_chain_for_node(zbah, zba_node_buddy(node), order);
1848 		zba_remove_block(zbc);
1849 		order++;
1850 		node = parent;
1851 	}
1852 
1853 	zba_push_block(zba_chain_for_node(zbah, node, order), order, with_extra);
1854 }
1855 
1856 static vm_size_t
zba_chunk_header_size(uint32_t n)1857 zba_chunk_header_size(uint32_t n)
1858 {
1859 	vm_size_t hdr_size = sizeof(struct zone_bits_allocator_header);
1860 	if (n == 0) {
1861 		hdr_size += sizeof(struct zone_bits_allocator_meta);
1862 	}
1863 	return hdr_size;
1864 }
1865 
1866 static void
zba_init_chunk(uint32_t n,bool with_extra)1867 zba_init_chunk(uint32_t n, bool with_extra)
1868 {
1869 	vm_size_t hdr_size = zba_chunk_header_size(n);
1870 	vm_offset_t page = (vm_offset_t)zba_base_header() + n * ZBA_CHUNK_SIZE;
1871 	struct zone_bits_allocator_header *zbah = zba_header(page);
1872 	vm_size_t size = ZBA_CHUNK_SIZE;
1873 	size_t node;
1874 
1875 	for (uint32_t o = ZBA_MAX_ORDER + 1; o-- > 0;) {
1876 		if (size < hdr_size + (ZBA_GRANULE << o)) {
1877 			continue;
1878 		}
1879 		size -= ZBA_GRANULE << o;
1880 		node = zba_node(page + size, o);
1881 		zba_node_flip_split(zbah, zba_node_parent(node));
1882 		zba_push_block(zba_chain_for_node(zbah, node, o), o, with_extra);
1883 	}
1884 }
1885 
1886 __attribute__((noinline))
1887 static void
zba_grow(bool with_extra)1888 zba_grow(bool with_extra)
1889 {
1890 	struct zone_bits_allocator_meta *meta = zba_meta();
1891 	kern_return_t kr = KERN_SUCCESS;
1892 	uint32_t chunk;
1893 
1894 #if !ZALLOC_TEST
1895 	if (meta->zbam_left >= meta->zbam_right) {
1896 		zba_memory_exhausted();
1897 	}
1898 #endif
1899 
1900 	if (with_extra) {
1901 		chunk = meta->zbam_right - 1;
1902 	} else {
1903 		chunk = meta->zbam_left;
1904 	}
1905 
1906 	kr = zba_populate(chunk, with_extra);
1907 	if (kr == KERN_SUCCESS) {
1908 		if (with_extra) {
1909 			meta->zbam_right -= 1;
1910 		} else {
1911 			meta->zbam_left += 1;
1912 		}
1913 
1914 		zba_init_chunk(chunk, with_extra);
1915 #if !ZALLOC_TEST
1916 	} else {
1917 		/*
1918 		 * zba_populate() has to be allowed to fail populating,
1919 		 * as we are under a global lock, we need to do the
1920 		 * VM_PAGE_WAIT() outside of the lock.
1921 		 */
1922 		assert(kr == KERN_RESOURCE_SHORTAGE);
1923 		zba_unlock();
1924 		VM_PAGE_WAIT();
1925 		zba_lock();
1926 #endif
1927 	}
1928 }
1929 
1930 static vm_offset_t
zba_alloc(uint32_t order,bool with_extra)1931 zba_alloc(uint32_t order, bool with_extra)
1932 {
1933 	struct zone_bits_allocator_header *zbah;
1934 	uint32_t cur = order;
1935 	vm_address_t addr;
1936 	size_t node;
1937 
1938 	while ((addr = zba_try_pop_block(cur, with_extra)) == 0) {
1939 		if (__improbable(cur++ >= ZBA_MAX_ORDER)) {
1940 			zba_grow(with_extra);
1941 			cur = order;
1942 		}
1943 	}
1944 
1945 	zbah = zba_header(addr);
1946 	node = zba_node(addr, cur);
1947 	zba_node_flip_split(zbah, zba_node_parent(node));
1948 	while (cur > order) {
1949 		cur--;
1950 		zba_node_flip_split(zbah, node);
1951 		node = zba_node_left_child(node);
1952 		zba_push_block(zba_chain_for_node(zbah, node + 1, cur),
1953 		    cur, with_extra);
1954 	}
1955 
1956 	return addr;
1957 }
1958 
1959 #define zba_map_index(type, n)    (n / (8 * sizeof(type)))
1960 #define zba_map_bit(type, n)      ((type)1 << (n % (8 * sizeof(type))))
1961 #define zba_map_mask_lt(type, n)  (zba_map_bit(type, n) - 1)
1962 #define zba_map_mask_ge(type, n)  ((type)-zba_map_bit(type, n))
1963 
1964 #if !ZALLOC_TEST
1965 #if VM_TAG_SIZECLASSES
1966 
1967 static void *
zba_extra_ref_ptr(uint32_t bref,vm_offset_t idx)1968 zba_extra_ref_ptr(uint32_t bref, vm_offset_t idx)
1969 {
1970 	vm_offset_t base = zone_info.zi_xtra_range.min_address;
1971 	vm_offset_t offs = (bref & ZBA_PTR_MASK) * ZBA_GRANULE * CHAR_BIT;
1972 
1973 	return (void *)(base + ((offs + idx) << zba_xtra_shift));
1974 }
1975 
1976 #endif /* VM_TAG_SIZECLASSES */
1977 
1978 static uint32_t
zba_bits_ref_order(uint32_t bref)1979 zba_bits_ref_order(uint32_t bref)
1980 {
1981 	return bref >> ZBA_ORDER_SHIFT;
1982 }
1983 
1984 static bitmap_t *
zba_bits_ref_ptr(uint32_t bref)1985 zba_bits_ref_ptr(uint32_t bref)
1986 {
1987 	return zba_slot_base() + (bref & ZBA_PTR_MASK);
1988 }
1989 
1990 static vm_offset_t
zba_scan_bitmap_inline(zone_t zone,struct zone_page_metadata * meta,zalloc_flags_t flags,vm_offset_t eidx)1991 zba_scan_bitmap_inline(zone_t zone, struct zone_page_metadata *meta,
1992     zalloc_flags_t flags, vm_offset_t eidx)
1993 {
1994 	size_t i = eidx / 32;
1995 	uint32_t map;
1996 
1997 	if (eidx % 32) {
1998 		map = meta[i].zm_bitmap & zba_map_mask_ge(uint32_t, eidx);
1999 		if (map) {
2000 			eidx = __builtin_ctz(map);
2001 			meta[i].zm_bitmap ^= 1u << eidx;
2002 			return i * 32 + eidx;
2003 		}
2004 		i++;
2005 	}
2006 
2007 	uint32_t chunk_len = meta->zm_chunk_len;
2008 	if (flags & Z_PCPU) {
2009 		chunk_len = zpercpu_count();
2010 	}
2011 	for (int j = 0; j < chunk_len; j++, i++) {
2012 		if (i >= chunk_len) {
2013 			i = 0;
2014 		}
2015 		if (__probable(map = meta[i].zm_bitmap)) {
2016 			meta[i].zm_bitmap &= map - 1;
2017 			return i * 32 + __builtin_ctz(map);
2018 		}
2019 	}
2020 
2021 	zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2022 }
2023 
2024 static vm_offset_t
zba_scan_bitmap_ref(zone_t zone,struct zone_page_metadata * meta,vm_offset_t eidx)2025 zba_scan_bitmap_ref(zone_t zone, struct zone_page_metadata *meta,
2026     vm_offset_t eidx)
2027 {
2028 	uint32_t bits_size = 1 << zba_bits_ref_order(meta->zm_bitmap);
2029 	bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2030 	size_t i = eidx / 64;
2031 	uint64_t map;
2032 
2033 	if (eidx % 64) {
2034 		map = bits[i] & zba_map_mask_ge(uint64_t, eidx);
2035 		if (map) {
2036 			eidx = __builtin_ctzll(map);
2037 			bits[i] ^= 1ull << eidx;
2038 			return i * 64 + eidx;
2039 		}
2040 		i++;
2041 	}
2042 
2043 	for (int j = 0; j < bits_size; i++, j++) {
2044 		if (i >= bits_size) {
2045 			i = 0;
2046 		}
2047 		if (__probable(map = bits[i])) {
2048 			bits[i] &= map - 1;
2049 			return i * 64 + __builtin_ctzll(map);
2050 		}
2051 	}
2052 
2053 	zone_page_meta_accounting_panic(zone, meta, "zm_bitmap");
2054 }
2055 
2056 /*!
2057  * @function zone_meta_find_and_clear_bit
2058  *
2059  * @brief
2060  * The core of the bitmap allocator: find a bit set in the bitmaps.
2061  *
2062  * @discussion
2063  * This method will round robin through available allocations,
2064  * with a per-core memory of the last allocated element index allocated.
2065  *
2066  * This is done in order to avoid a fully LIFO behavior which makes exploiting
2067  * double-free bugs way too practical.
2068  *
2069  * @param zone          The zone we're allocating from.
2070  * @param meta          The main metadata for the chunk being allocated from.
2071  * @param flags         the alloc flags (for @c Z_PCPU).
2072  */
2073 static vm_offset_t
zone_meta_find_and_clear_bit(zone_t zone,zone_stats_t zs,struct zone_page_metadata * meta,zalloc_flags_t flags)2074 zone_meta_find_and_clear_bit(
2075 	zone_t                  zone,
2076 	zone_stats_t            zs,
2077 	struct zone_page_metadata *meta,
2078 	zalloc_flags_t          flags)
2079 {
2080 	vm_offset_t eidx = zs->zs_alloc_rr + 1;
2081 
2082 	if (meta->zm_inline_bitmap) {
2083 		eidx = zba_scan_bitmap_inline(zone, meta, flags, eidx);
2084 	} else {
2085 		eidx = zba_scan_bitmap_ref(zone, meta, eidx);
2086 	}
2087 	zs->zs_alloc_rr = (uint16_t)eidx;
2088 	return eidx;
2089 }
2090 
2091 /*!
2092  * @function zone_meta_bits_init_inline
2093  *
2094  * @brief
2095  * Initializes the inline zm_bitmap field(s) for a newly assigned chunk.
2096  *
2097  * @param meta          The main metadata for the initialized chunk.
2098  * @param count         The number of elements the chunk can hold
2099  *                      (which might be partial for partially populated chunks).
2100  */
2101 static void
zone_meta_bits_init_inline(struct zone_page_metadata * meta,uint32_t count)2102 zone_meta_bits_init_inline(struct zone_page_metadata *meta, uint32_t count)
2103 {
2104 	/*
2105 	 * We're called with the metadata zm_bitmap fields already zeroed out.
2106 	 */
2107 	for (size_t i = 0; i < count / 32; i++) {
2108 		meta[i].zm_bitmap = ~0u;
2109 	}
2110 	if (count % 32) {
2111 		meta[count / 32].zm_bitmap = zba_map_mask_lt(uint32_t, count);
2112 	}
2113 }
2114 
2115 /*!
2116  * @function zone_meta_bits_alloc_init
2117  *
2118  * @brief
2119  * Allocates a  zm_bitmap field for a newly assigned chunk.
2120  *
2121  * @param count         The number of elements the chunk can hold
2122  *                      (which might be partial for partially populated chunks).
2123  * @param nbits         The maximum nuber of bits that will be used.
2124  * @param with_extra    Whether "VM Tracking" metadata needs to be allocated.
2125  */
2126 static uint32_t
zone_meta_bits_alloc_init(uint32_t count,uint32_t nbits,bool with_extra)2127 zone_meta_bits_alloc_init(uint32_t count, uint32_t nbits, bool with_extra)
2128 {
2129 	static_assert(ZONE_MAX_ALLOC_SIZE / ZONE_MIN_ELEM_SIZE <=
2130 	    ZBA_GRANULE_BITS << ZBA_MAX_ORDER, "bitmaps will be large enough");
2131 
2132 	uint32_t order = flsll((nbits - 1) / ZBA_GRANULE_BITS);
2133 	uint64_t *bits;
2134 	size_t   i = 0;
2135 
2136 	assert(order <= ZBA_MAX_ALLOC_ORDER);
2137 	assert(count <= ZBA_GRANULE_BITS << order);
2138 
2139 	zba_lock();
2140 	bits = (uint64_t *)zba_alloc(order, with_extra);
2141 	zba_unlock();
2142 
2143 	while (i < count / 64) {
2144 		bits[i++] = ~0ull;
2145 	}
2146 	if (count % 64) {
2147 		bits[i++] = zba_map_mask_lt(uint64_t, count);
2148 	}
2149 	while (i < 1u << order) {
2150 		bits[i++] = 0;
2151 	}
2152 
2153 	return (uint32_t)(bits - zba_slot_base()) +
2154 	       (order << ZBA_ORDER_SHIFT) +
2155 	       (with_extra ? ZBA_HAS_EXTRA_BIT : 0);
2156 }
2157 
2158 /*!
2159  * @function zone_meta_bits_merge
2160  *
2161  * @brief
2162  * Adds elements <code>[start, end)</code> to a chunk being extended.
2163  *
2164  * @param meta          The main metadata for the extended chunk.
2165  * @param start         The index of the first element to add to the chunk.
2166  * @param end           The index of the last (exclusive) element to add.
2167  */
2168 static void
zone_meta_bits_merge(struct zone_page_metadata * meta,uint32_t start,uint32_t end)2169 zone_meta_bits_merge(struct zone_page_metadata *meta,
2170     uint32_t start, uint32_t end)
2171 {
2172 	if (meta->zm_inline_bitmap) {
2173 		while (start < end) {
2174 			size_t s_i = start / 32;
2175 			size_t s_e = end / 32;
2176 
2177 			if (s_i == s_e) {
2178 				meta[s_i].zm_bitmap |= zba_map_mask_lt(uint32_t, end) &
2179 				    zba_map_mask_ge(uint32_t, start);
2180 				break;
2181 			}
2182 
2183 			meta[s_i].zm_bitmap |= zba_map_mask_ge(uint32_t, start);
2184 			start += 32 - (start % 32);
2185 		}
2186 	} else {
2187 		uint64_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2188 
2189 		while (start < end) {
2190 			size_t s_i = start / 64;
2191 			size_t s_e = end / 64;
2192 
2193 			if (s_i == s_e) {
2194 				bits[s_i] |= zba_map_mask_lt(uint64_t, end) &
2195 				    zba_map_mask_ge(uint64_t, start);
2196 				break;
2197 			}
2198 			bits[s_i] |= zba_map_mask_ge(uint64_t, start);
2199 			start += 64 - (start % 64);
2200 		}
2201 	}
2202 }
2203 
2204 /*!
2205  * @function zone_bits_free
2206  *
2207  * @brief
2208  * Frees a bitmap to the zone bitmap allocator.
2209  *
2210  * @param bref
2211  * A bitmap reference set by @c zone_meta_bits_init() in a @c zm_bitmap field.
2212  */
2213 static void
zone_bits_free(uint32_t bref)2214 zone_bits_free(uint32_t bref)
2215 {
2216 	zba_lock();
2217 	zba_free((vm_offset_t)zba_bits_ref_ptr(bref),
2218 	    zba_bits_ref_order(bref), (bref & ZBA_HAS_EXTRA_BIT));
2219 	zba_unlock();
2220 }
2221 
2222 /*!
2223  * @function zone_meta_is_free
2224  *
2225  * @brief
2226  * Returns whether a given element appears free.
2227  */
2228 static bool
zone_meta_is_free(struct zone_page_metadata * meta,vm_offset_t eidx)2229 zone_meta_is_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2230 {
2231 	if (meta->zm_inline_bitmap) {
2232 		uint32_t bit = zba_map_bit(uint32_t, eidx);
2233 		return meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit;
2234 	} else {
2235 		bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2236 		uint64_t bit = zba_map_bit(uint64_t, eidx);
2237 		return bits[zba_map_index(uint64_t, eidx)] & bit;
2238 	}
2239 }
2240 
2241 /*!
2242  * @function zone_meta_mark_free
2243  *
2244  * @brief
2245  * Marks an element as free and returns whether it was marked as used.
2246  */
2247 static bool
zone_meta_mark_free(struct zone_page_metadata * meta,vm_offset_t eidx)2248 zone_meta_mark_free(struct zone_page_metadata *meta, vm_offset_t eidx)
2249 {
2250 	if (meta->zm_inline_bitmap) {
2251 		uint32_t bit = zba_map_bit(uint32_t, eidx);
2252 		if (meta[zba_map_index(uint32_t, eidx)].zm_bitmap & bit) {
2253 			return false;
2254 		}
2255 		meta[zba_map_index(uint32_t, eidx)].zm_bitmap ^= bit;
2256 	} else {
2257 		bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
2258 		uint64_t bit = zba_map_bit(uint64_t, eidx);
2259 		if (bits[zba_map_index(uint64_t, eidx)] & bit) {
2260 			return false;
2261 		}
2262 		bits[zba_map_index(uint64_t, eidx)] ^= bit;
2263 	}
2264 	return true;
2265 }
2266 
2267 #if VM_TAG_SIZECLASSES
2268 
2269 __startup_func
2270 void
__zone_site_register(vm_allocation_site_t * site)2271 __zone_site_register(vm_allocation_site_t *site)
2272 {
2273 	if (zone_tagging_on) {
2274 		vm_tag_alloc(site);
2275 	}
2276 }
2277 
2278 uint16_t
zone_index_from_tag_index(uint32_t sizeclass_idx)2279 zone_index_from_tag_index(uint32_t sizeclass_idx)
2280 {
2281 	return zone_tags_sizeclasses[sizeclass_idx];
2282 }
2283 
2284 #endif /* VM_TAG_SIZECLASSES */
2285 #endif /* !ZALLOC_TEST */
2286 /*! @} */
2287 #pragma mark zalloc helpers
2288 #if !ZALLOC_TEST
2289 
2290 static inline void *
zstack_tbi_fix(vm_offset_t elem)2291 zstack_tbi_fix(vm_offset_t elem)
2292 {
2293 #if KASAN_TBI
2294 	elem = kasan_tbi_fix_address_tag(elem);
2295 #endif
2296 	return (void *)elem;
2297 }
2298 
2299 static inline vm_offset_t
zstack_tbi_fill(void * addr)2300 zstack_tbi_fill(void *addr)
2301 {
2302 	vm_offset_t elem = (vm_offset_t)addr;
2303 
2304 #if KASAN_TBI
2305 	elem = VM_KERNEL_TBI_FILL(elem);
2306 #endif
2307 	return elem;
2308 }
2309 
2310 __attribute__((always_inline))
2311 static inline void
zstack_push_no_delta(zstack_t * stack,void * addr)2312 zstack_push_no_delta(zstack_t *stack, void *addr)
2313 {
2314 	vm_offset_t elem = zstack_tbi_fill(addr);
2315 
2316 	*(vm_offset_t *)addr = stack->z_head - elem;
2317 	stack->z_head = elem;
2318 }
2319 
2320 __attribute__((always_inline))
2321 void
zstack_push(zstack_t * stack,void * addr)2322 zstack_push(zstack_t *stack, void *addr)
2323 {
2324 	zstack_push_no_delta(stack, addr);
2325 	stack->z_count++;
2326 }
2327 
2328 __attribute__((always_inline))
2329 static inline void *
zstack_pop_no_delta(zstack_t * stack)2330 zstack_pop_no_delta(zstack_t *stack)
2331 {
2332 	void *addr = zstack_tbi_fix(stack->z_head);
2333 
2334 	stack->z_head += *(vm_offset_t *)addr;
2335 	*(vm_offset_t *)addr = 0;
2336 
2337 	return addr;
2338 }
2339 
2340 __attribute__((always_inline))
2341 void *
zstack_pop(zstack_t * stack)2342 zstack_pop(zstack_t *stack)
2343 {
2344 	stack->z_count--;
2345 	return zstack_pop_no_delta(stack);
2346 }
2347 
2348 static inline void
zone_recirc_lock_nopreempt_check_contention(zone_t zone)2349 zone_recirc_lock_nopreempt_check_contention(zone_t zone)
2350 {
2351 	uint32_t ticket;
2352 
2353 	if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_recirc_lock,
2354 	    &ticket, &zone_locks_grp))) {
2355 		return;
2356 	}
2357 
2358 	hw_lck_ticket_wait(&zone->z_recirc_lock, ticket, NULL, &zone_locks_grp);
2359 
2360 	/*
2361 	 * If zone caching has been disabled due to memory pressure,
2362 	 * then recording contention is not useful, give the system
2363 	 * time to recover.
2364 	 */
2365 	if (__probable(!zone_caching_disabled)) {
2366 		zone->z_recirc_cont_cur++;
2367 	}
2368 }
2369 
2370 static inline void
zone_recirc_lock_nopreempt(zone_t zone)2371 zone_recirc_lock_nopreempt(zone_t zone)
2372 {
2373 	hw_lck_ticket_lock_nopreempt(&zone->z_recirc_lock, &zone_locks_grp);
2374 }
2375 
2376 static inline void
zone_recirc_unlock_nopreempt(zone_t zone)2377 zone_recirc_unlock_nopreempt(zone_t zone)
2378 {
2379 	hw_lck_ticket_unlock_nopreempt(&zone->z_recirc_lock);
2380 }
2381 
2382 static inline void
zone_lock_nopreempt_check_contention(zone_t zone)2383 zone_lock_nopreempt_check_contention(zone_t zone)
2384 {
2385 	uint32_t ticket;
2386 #if KASAN_FAKESTACK
2387 	spl_t s = 0;
2388 	if (zone->z_kasan_fakestacks) {
2389 		s = splsched();
2390 	}
2391 #endif /* KASAN_FAKESTACK */
2392 
2393 	if (__probable(hw_lck_ticket_reserve_nopreempt(&zone->z_lock, &ticket,
2394 	    &zone_locks_grp))) {
2395 #if KASAN_FAKESTACK
2396 		zone->z_kasan_spl = s;
2397 #endif /* KASAN_FAKESTACK */
2398 		return;
2399 	}
2400 
2401 	hw_lck_ticket_wait(&zone->z_lock, ticket, NULL, &zone_locks_grp);
2402 #if KASAN_FAKESTACK
2403 	zone->z_kasan_spl = s;
2404 #endif /* KASAN_FAKESTACK */
2405 
2406 	/*
2407 	 * If zone caching has been disabled due to memory pressure,
2408 	 * then recording contention is not useful, give the system
2409 	 * time to recover.
2410 	 */
2411 	if (__probable(!zone_caching_disabled && !zone->z_pcpu_cache)) {
2412 		zone->z_recirc_cont_cur++;
2413 	}
2414 }
2415 
2416 static inline void
zone_lock_nopreempt(zone_t zone)2417 zone_lock_nopreempt(zone_t zone)
2418 {
2419 #if KASAN_FAKESTACK
2420 	spl_t s = 0;
2421 	if (zone->z_kasan_fakestacks) {
2422 		s = splsched();
2423 	}
2424 #endif /* KASAN_FAKESTACK */
2425 	hw_lck_ticket_lock_nopreempt(&zone->z_lock, &zone_locks_grp);
2426 #if KASAN_FAKESTACK
2427 	zone->z_kasan_spl = s;
2428 #endif /* KASAN_FAKESTACK */
2429 }
2430 
2431 static inline void
zone_unlock_nopreempt(zone_t zone)2432 zone_unlock_nopreempt(zone_t zone)
2433 {
2434 #if KASAN_FAKESTACK
2435 	spl_t s = zone->z_kasan_spl;
2436 	zone->z_kasan_spl = 0;
2437 #endif /* KASAN_FAKESTACK */
2438 	hw_lck_ticket_unlock_nopreempt(&zone->z_lock);
2439 #if KASAN_FAKESTACK
2440 	if (zone->z_kasan_fakestacks) {
2441 		splx(s);
2442 	}
2443 #endif /* KASAN_FAKESTACK */
2444 }
2445 
2446 static inline void
zone_depot_lock_nopreempt(zone_cache_t zc)2447 zone_depot_lock_nopreempt(zone_cache_t zc)
2448 {
2449 	hw_lck_ticket_lock_nopreempt(&zc->zc_depot_lock, &zone_locks_grp);
2450 }
2451 
2452 static inline void
zone_depot_unlock_nopreempt(zone_cache_t zc)2453 zone_depot_unlock_nopreempt(zone_cache_t zc)
2454 {
2455 	hw_lck_ticket_unlock_nopreempt(&zc->zc_depot_lock);
2456 }
2457 
2458 static inline void
zone_depot_lock(zone_cache_t zc)2459 zone_depot_lock(zone_cache_t zc)
2460 {
2461 	hw_lck_ticket_lock(&zc->zc_depot_lock, &zone_locks_grp);
2462 }
2463 
2464 static inline void
zone_depot_unlock(zone_cache_t zc)2465 zone_depot_unlock(zone_cache_t zc)
2466 {
2467 	hw_lck_ticket_unlock(&zc->zc_depot_lock);
2468 }
2469 
2470 zone_t
zone_by_id(size_t zid)2471 zone_by_id(size_t zid)
2472 {
2473 	return (zone_t)((uintptr_t)zone_array + zid * sizeof(struct zone));
2474 }
2475 
2476 static inline bool
zone_supports_vm(zone_t z)2477 zone_supports_vm(zone_t z)
2478 {
2479 	/*
2480 	 * VM_MAP_ENTRY and VM_MAP_HOLES zones are allowed
2481 	 * to overcommit because they're used to reclaim memory
2482 	 * (VM support).
2483 	 */
2484 	return z >= &zone_array[ZONE_ID_VM_MAP_ENTRY] &&
2485 	       z <= &zone_array[ZONE_ID_VM_MAP_HOLES];
2486 }
2487 
2488 const char *
zone_name(zone_t z)2489 zone_name(zone_t z)
2490 {
2491 	return z->z_name;
2492 }
2493 
2494 const char *
zone_heap_name(zone_t z)2495 zone_heap_name(zone_t z)
2496 {
2497 	zone_security_flags_t zsflags = zone_security_config(z);
2498 	if (__probable(zsflags.z_kheap_id < KHEAP_ID_COUNT)) {
2499 		return kalloc_heap_names[zsflags.z_kheap_id];
2500 	}
2501 	return "invalid";
2502 }
2503 
2504 static uint32_t
zone_alloc_pages_for_nelems(zone_t z,vm_size_t max_elems)2505 zone_alloc_pages_for_nelems(zone_t z, vm_size_t max_elems)
2506 {
2507 	vm_size_t elem_count, chunks;
2508 
2509 	elem_count = ptoa(z->z_percpu ? 1 : z->z_chunk_pages) /
2510 	    zone_elem_outer_size(z);
2511 	chunks = (max_elems + elem_count - 1) / elem_count;
2512 
2513 	return (uint32_t)MIN(UINT32_MAX, chunks * z->z_chunk_pages);
2514 }
2515 
2516 static inline vm_size_t
zone_submaps_approx_size(void)2517 zone_submaps_approx_size(void)
2518 {
2519 	vm_size_t size = 0;
2520 
2521 	for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
2522 		if (zone_submaps[idx] != VM_MAP_NULL) {
2523 			size += zone_submaps[idx]->size;
2524 		}
2525 	}
2526 
2527 	return size;
2528 }
2529 
2530 static inline void
zone_depot_init(struct zone_depot * zd)2531 zone_depot_init(struct zone_depot *zd)
2532 {
2533 	*zd = (struct zone_depot){
2534 		.zd_tail = &zd->zd_head,
2535 	};
2536 }
2537 
2538 static inline void
zone_depot_insert_head_full(struct zone_depot * zd,zone_magazine_t mag)2539 zone_depot_insert_head_full(struct zone_depot *zd, zone_magazine_t mag)
2540 {
2541 	if (zd->zd_full++ == 0) {
2542 		zd->zd_tail = &mag->zm_next;
2543 	}
2544 	mag->zm_next = zd->zd_head;
2545 	zd->zd_head = mag;
2546 }
2547 
2548 static inline void
zone_depot_insert_tail_full(struct zone_depot * zd,zone_magazine_t mag)2549 zone_depot_insert_tail_full(struct zone_depot *zd, zone_magazine_t mag)
2550 {
2551 	zd->zd_full++;
2552 	mag->zm_next = *zd->zd_tail;
2553 	*zd->zd_tail = mag;
2554 	zd->zd_tail = &mag->zm_next;
2555 }
2556 
2557 static inline void
zone_depot_insert_head_empty(struct zone_depot * zd,zone_magazine_t mag)2558 zone_depot_insert_head_empty(struct zone_depot *zd, zone_magazine_t mag)
2559 {
2560 	zd->zd_empty++;
2561 	mag->zm_next = *zd->zd_tail;
2562 	*zd->zd_tail = mag;
2563 }
2564 
2565 static inline zone_magazine_t
zone_depot_pop_head_full(struct zone_depot * zd,zone_t z)2566 zone_depot_pop_head_full(struct zone_depot *zd, zone_t z)
2567 {
2568 	zone_magazine_t mag = zd->zd_head;
2569 
2570 	assert(zd->zd_full);
2571 
2572 	zd->zd_full--;
2573 	if (z && z->z_recirc_full_min > zd->zd_full) {
2574 		z->z_recirc_full_min = zd->zd_full;
2575 	}
2576 	zd->zd_head = mag->zm_next;
2577 	if (zd->zd_full == 0) {
2578 		zd->zd_tail = &zd->zd_head;
2579 	}
2580 
2581 	mag->zm_next = NULL;
2582 	return mag;
2583 }
2584 
2585 static inline zone_magazine_t
zone_depot_pop_head_empty(struct zone_depot * zd,zone_t z)2586 zone_depot_pop_head_empty(struct zone_depot *zd, zone_t z)
2587 {
2588 	zone_magazine_t mag = *zd->zd_tail;
2589 
2590 	assert(zd->zd_empty);
2591 
2592 	zd->zd_empty--;
2593 	if (z && z->z_recirc_empty_min > zd->zd_empty) {
2594 		z->z_recirc_empty_min = zd->zd_empty;
2595 	}
2596 	*zd->zd_tail = mag->zm_next;
2597 
2598 	mag->zm_next = NULL;
2599 	return mag;
2600 }
2601 
2602 static inline smr_seq_t
zone_depot_move_full(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2603 zone_depot_move_full(
2604 	struct zone_depot      *dst,
2605 	struct zone_depot      *src,
2606 	uint32_t                n,
2607 	zone_t                  z)
2608 {
2609 	zone_magazine_t head, last;
2610 
2611 	assert(n);
2612 	assert(src->zd_full >= n);
2613 
2614 	src->zd_full -= n;
2615 	if (z && z->z_recirc_full_min > src->zd_full) {
2616 		z->z_recirc_full_min = src->zd_full;
2617 	}
2618 	head = last = src->zd_head;
2619 	for (uint32_t i = n; i-- > 1;) {
2620 		last = last->zm_next;
2621 	}
2622 
2623 	src->zd_head = last->zm_next;
2624 	if (src->zd_full == 0) {
2625 		src->zd_tail = &src->zd_head;
2626 	}
2627 
2628 	if (z && zone_security_array[zone_index(z)].z_lifo) {
2629 		if (dst->zd_full == 0) {
2630 			dst->zd_tail = &last->zm_next;
2631 		}
2632 		last->zm_next = dst->zd_head;
2633 		dst->zd_head = head;
2634 	} else {
2635 		last->zm_next = *dst->zd_tail;
2636 		*dst->zd_tail = head;
2637 		dst->zd_tail = &last->zm_next;
2638 	}
2639 	dst->zd_full += n;
2640 
2641 	return last->zm_seq;
2642 }
2643 
2644 static inline void
zone_depot_move_empty(struct zone_depot * dst,struct zone_depot * src,uint32_t n,zone_t z)2645 zone_depot_move_empty(
2646 	struct zone_depot      *dst,
2647 	struct zone_depot      *src,
2648 	uint32_t                n,
2649 	zone_t                  z)
2650 {
2651 	zone_magazine_t head, last;
2652 
2653 	assert(n);
2654 	assert(src->zd_empty >= n);
2655 
2656 	src->zd_empty -= n;
2657 	if (z && z->z_recirc_empty_min > src->zd_empty) {
2658 		z->z_recirc_empty_min = src->zd_empty;
2659 	}
2660 	head = last = *src->zd_tail;
2661 	for (uint32_t i = n; i-- > 1;) {
2662 		last = last->zm_next;
2663 	}
2664 
2665 	*src->zd_tail = last->zm_next;
2666 
2667 	dst->zd_empty += n;
2668 	last->zm_next = *dst->zd_tail;
2669 	*dst->zd_tail = head;
2670 }
2671 
2672 static void
zone_cache_swap_magazines(zone_cache_t cache)2673 zone_cache_swap_magazines(zone_cache_t cache)
2674 {
2675 	uint16_t count_a = cache->zc_alloc_cur;
2676 	uint16_t count_f = cache->zc_free_cur;
2677 	vm_offset_t *elems_a = cache->zc_alloc_elems;
2678 	vm_offset_t *elems_f = cache->zc_free_elems;
2679 
2680 	z_debug_assert(count_a <= zc_mag_size());
2681 	z_debug_assert(count_f <= zc_mag_size());
2682 
2683 	cache->zc_alloc_cur = count_f;
2684 	cache->zc_free_cur = count_a;
2685 	cache->zc_alloc_elems = elems_f;
2686 	cache->zc_free_elems = elems_a;
2687 }
2688 
2689 __pure2
2690 static smr_t
zone_cache_smr(zone_cache_t cache)2691 zone_cache_smr(zone_cache_t cache)
2692 {
2693 	return cache->zc_smr;
2694 }
2695 
2696 /*!
2697  * @function zone_magazine_replace
2698  *
2699  * @brief
2700  * Unlod a magazine and load a new one instead.
2701  */
2702 static zone_magazine_t
zone_magazine_replace(zone_cache_t zc,zone_magazine_t mag,bool empty)2703 zone_magazine_replace(zone_cache_t zc, zone_magazine_t mag, bool empty)
2704 {
2705 	zone_magazine_t old;
2706 	vm_offset_t **elems;
2707 
2708 	mag->zm_seq = SMR_SEQ_INVALID;
2709 
2710 	if (empty) {
2711 		elems = &zc->zc_free_elems;
2712 		zc->zc_free_cur = 0;
2713 	} else {
2714 		elems = &zc->zc_alloc_elems;
2715 		zc->zc_alloc_cur = zc_mag_size();
2716 	}
2717 	old = (zone_magazine_t)((uintptr_t)*elems -
2718 	    offsetof(struct zone_magazine, zm_elems));
2719 	*elems = mag->zm_elems;
2720 
2721 	return old;
2722 }
2723 
2724 static zone_magazine_t
zone_magazine_alloc(zalloc_flags_t flags)2725 zone_magazine_alloc(zalloc_flags_t flags)
2726 {
2727 	return zalloc_flags(zc_magazine_zone, flags | Z_ZERO);
2728 }
2729 
2730 static void
zone_magazine_free(zone_magazine_t mag)2731 zone_magazine_free(zone_magazine_t mag)
2732 {
2733 	(zfree)(zc_magazine_zone, mag);
2734 }
2735 
2736 static void
zone_magazine_free_list(struct zone_depot * zd)2737 zone_magazine_free_list(struct zone_depot *zd)
2738 {
2739 	zone_magazine_t tmp, mag = *zd->zd_tail;
2740 
2741 	while (mag) {
2742 		tmp = mag->zm_next;
2743 		zone_magazine_free(mag);
2744 		mag = tmp;
2745 	}
2746 
2747 	*zd->zd_tail = NULL;
2748 	zd->zd_empty = 0;
2749 }
2750 
2751 void
zone_enable_caching(zone_t zone)2752 zone_enable_caching(zone_t zone)
2753 {
2754 	size_t size_per_mag = zone_elem_inner_size(zone) * zc_mag_size();
2755 	zone_cache_t caches;
2756 	size_t depot_limit;
2757 
2758 	depot_limit = zc_pcpu_max() / size_per_mag;
2759 	zone->z_depot_limit = (uint16_t)MIN(depot_limit, INT16_MAX);
2760 
2761 	caches = zalloc_percpu_permanent_type(struct zone_cache);
2762 	zpercpu_foreach(zc, caches) {
2763 		zc->zc_alloc_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2764 		zc->zc_free_elems = zone_magazine_alloc(Z_WAITOK | Z_NOFAIL)->zm_elems;
2765 		zone_depot_init(&zc->zc_depot);
2766 		hw_lck_ticket_init(&zc->zc_depot_lock, &zone_locks_grp);
2767 	}
2768 
2769 	zone_lock(zone);
2770 	assert(zone->z_pcpu_cache == NULL);
2771 	zone->z_pcpu_cache = caches;
2772 	zone->z_recirc_cont_cur = 0;
2773 	zone->z_recirc_cont_wma = 0;
2774 	zone->z_elems_free_min = 0; /* becomes z_recirc_empty_min */
2775 	zone->z_elems_free_wma = 0; /* becomes z_recirc_empty_wma */
2776 	zone_unlock(zone);
2777 }
2778 
2779 bool
zone_maps_owned(vm_address_t addr,vm_size_t size)2780 zone_maps_owned(vm_address_t addr, vm_size_t size)
2781 {
2782 	return from_zone_map(addr, size);
2783 }
2784 
2785 #if KASAN_LIGHT
2786 bool
kasan_zone_maps_owned(vm_address_t addr,vm_size_t size)2787 kasan_zone_maps_owned(vm_address_t addr, vm_size_t size)
2788 {
2789 	return from_zone_map(addr, size) ||
2790 	       mach_vm_range_size(&zone_info.zi_map_range) == 0;
2791 }
2792 #endif /* KASAN_LIGHT */
2793 
2794 void
zone_map_sizes(vm_map_size_t * psize,vm_map_size_t * pfree,vm_map_size_t * plargest_free)2795 zone_map_sizes(
2796 	vm_map_size_t    *psize,
2797 	vm_map_size_t    *pfree,
2798 	vm_map_size_t    *plargest_free)
2799 {
2800 	vm_map_size_t size, free, largest;
2801 
2802 	vm_map_sizes(zone_submaps[0], psize, pfree, plargest_free);
2803 
2804 	for (uint32_t i = 1; i < Z_SUBMAP_IDX_COUNT; i++) {
2805 		vm_map_sizes(zone_submaps[i], &size, &free, &largest);
2806 		*psize += size;
2807 		*pfree += free;
2808 		*plargest_free = MAX(*plargest_free, largest);
2809 	}
2810 }
2811 
2812 __attribute__((always_inline))
2813 vm_map_t
zone_submap(zone_security_flags_t zsflags)2814 zone_submap(zone_security_flags_t zsflags)
2815 {
2816 	return zone_submaps[zsflags.z_submap_idx];
2817 }
2818 
2819 unsigned
zpercpu_count(void)2820 zpercpu_count(void)
2821 {
2822 	return zpercpu_early_count;
2823 }
2824 
2825 #if ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC
2826 /*
2827  * Returns a random number of a given bit-width.
2828  *
2829  * DO NOT COPY THIS CODE OUTSIDE OF ZALLOC
2830  *
2831  * This uses Intel's rdrand because random() uses FP registers
2832  * which causes FP faults and allocations which isn't something
2833  * we can do from zalloc itself due to reentrancy problems.
2834  *
2835  * For pre-rdrand machines (which we no longer support),
2836  * we use a bad biased random generator that doesn't use FP.
2837  * Such HW is no longer supported, but VM of newer OSes on older
2838  * bare metal is made to limp along (with reduced security) this way.
2839  */
2840 static uint64_t
zalloc_random_mask64(uint32_t bits)2841 zalloc_random_mask64(uint32_t bits)
2842 {
2843 	uint64_t mask = ~0ull >> (64 - bits);
2844 	uint64_t v;
2845 
2846 #if __x86_64__
2847 	if (__probable(cpuid_features() & CPUID_FEATURE_RDRAND)) {
2848 		asm volatile ("1: rdrand %0; jnc 1b\n" : "=r" (v) :: "cc");
2849 		v &= mask;
2850 	} else {
2851 		disable_preemption();
2852 		int cpu = cpu_number();
2853 		v = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
2854 		    zone_bool_gen[cpu].zbg_entropy,
2855 		    ZONE_ENTROPY_CNT, bits);
2856 		enable_preemption();
2857 	}
2858 #else
2859 	v = early_random() & mask;
2860 #endif
2861 
2862 	return v;
2863 }
2864 
2865 /*
2866  * Returns a random number within [bound_min, bound_max)
2867  *
2868  * This isn't _exactly_ uniform, but the skew is small enough
2869  * not to matter for the consumers of this interface.
2870  *
2871  * Values within [bound_min, 2^64 % (bound_max - bound_min))
2872  * will be returned (bound_max - bound_min) / 2^64 more often
2873  * than values within [2^64 % (bound_max - bound_min), bound_max).
2874  */
2875 static uint32_t
zalloc_random_uniform32(uint32_t bound_min,uint32_t bound_max)2876 zalloc_random_uniform32(uint32_t bound_min, uint32_t bound_max)
2877 {
2878 	uint64_t delta = bound_max - bound_min;
2879 
2880 	return bound_min + (uint32_t)(zalloc_random_mask64(64) % delta);
2881 }
2882 
2883 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) || CONFIG_PROB_GZALLOC */
2884 #if ZALLOC_ENABLE_LOGGING || CONFIG_PROB_GZALLOC
2885 /*
2886  * Track all kalloc zones of specified size for zlog name
2887  * kalloc.type.<size> or kalloc.type.var.<size> or kalloc.<size>
2888  */
2889 static bool
track_kalloc_zones(zone_t z,const char * logname)2890 track_kalloc_zones(zone_t z, const char *logname)
2891 {
2892 	const char *prefix;
2893 	size_t len;
2894 	zone_security_flags_t zsflags = zone_security_config(z);
2895 
2896 	prefix = "kalloc.type.var.";
2897 	len    = strlen(prefix);
2898 	if (zsflags.z_kalloc_type && zsflags.z_kheap_id == KHEAP_ID_KT_VAR &&
2899 	    strncmp(logname, prefix, len) == 0) {
2900 		vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2901 
2902 		return zone_elem_inner_size(z) == sizeclass;
2903 	}
2904 
2905 	prefix = "kalloc.type.";
2906 	len    = strlen(prefix);
2907 	if (zsflags.z_kalloc_type && zsflags.z_kheap_id != KHEAP_ID_KT_VAR &&
2908 	    strncmp(logname, prefix, len) == 0) {
2909 		vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2910 
2911 		return zone_elem_inner_size(z) == sizeclass;
2912 	}
2913 
2914 	prefix = "kalloc.";
2915 	len    = strlen(prefix);
2916 	if ((zsflags.z_kheap_id || zsflags.z_kalloc_type) &&
2917 	    strncmp(logname, prefix, len) == 0) {
2918 		vm_size_t sizeclass = strtoul(logname + len, NULL, 0);
2919 
2920 		return zone_elem_inner_size(z) == sizeclass;
2921 	}
2922 
2923 	return false;
2924 }
2925 #endif
2926 
2927 int
track_this_zone(const char * zonename,const char * logname)2928 track_this_zone(const char *zonename, const char *logname)
2929 {
2930 	unsigned int len;
2931 	const char *zc = zonename;
2932 	const char *lc = logname;
2933 
2934 	/*
2935 	 * Compare the strings.  We bound the compare by MAX_ZONE_NAME.
2936 	 */
2937 
2938 	for (len = 1; len <= MAX_ZONE_NAME; zc++, lc++, len++) {
2939 		/*
2940 		 * If the current characters don't match, check for a space in
2941 		 * in the zone name and a corresponding period in the log name.
2942 		 * If that's not there, then the strings don't match.
2943 		 */
2944 
2945 		if (*zc != *lc && !(*zc == ' ' && *lc == '.')) {
2946 			break;
2947 		}
2948 
2949 		/*
2950 		 * The strings are equal so far.  If we're at the end, then it's a match.
2951 		 */
2952 
2953 		if (*zc == '\0') {
2954 			return TRUE;
2955 		}
2956 	}
2957 
2958 	return FALSE;
2959 }
2960 
2961 #if DEBUG || DEVELOPMENT
2962 
2963 vm_size_t
zone_element_info(void * addr,vm_tag_t * ptag)2964 zone_element_info(void *addr, vm_tag_t * ptag)
2965 {
2966 	vm_size_t     size = 0;
2967 	vm_tag_t      tag = VM_KERN_MEMORY_NONE;
2968 	struct zone *src_zone;
2969 
2970 	if (from_zone_map(addr, sizeof(void *))) {
2971 		src_zone = zone_by_id(zone_index_from_ptr(addr));
2972 		size     = zone_elem_inner_size(src_zone);
2973 #if VM_TAG_SIZECLASSES
2974 		if (__improbable(src_zone->z_uses_tags)) {
2975 			struct zone_page_metadata *meta;
2976 			vm_offset_t eidx;
2977 			vm_tag_t *slot;
2978 
2979 			meta = zone_element_resolve(src_zone,
2980 			    (vm_offset_t)addr, &eidx);
2981 			slot = zba_extra_ref_ptr(meta->zm_bitmap, eidx);
2982 			tag  = *slot;
2983 		}
2984 #endif /* VM_TAG_SIZECLASSES */
2985 	}
2986 
2987 	*ptag = tag;
2988 	return size;
2989 }
2990 
2991 #endif /* DEBUG || DEVELOPMENT */
2992 #if KASAN_CLASSIC
2993 
2994 vm_size_t
kasan_quarantine_resolve(vm_address_t addr,zone_t * zonep)2995 kasan_quarantine_resolve(vm_address_t addr, zone_t *zonep)
2996 {
2997 	zone_t zone = zone_by_id(zone_index_from_ptr((void *)addr));
2998 
2999 	*zonep = zone;
3000 	return zone_elem_inner_size(zone);
3001 }
3002 
3003 #endif /* KASAN_CLASSIC */
3004 #endif /* !ZALLOC_TEST */
3005 #pragma mark Zone zeroing and early random
3006 #if !ZALLOC_TEST
3007 
3008 /*
3009  * Zone zeroing
3010  *
3011  * All allocations from zones are zeroed on free and are additionally
3012  * check that they are still zero on alloc. The check is
3013  * always on, on embedded devices. Perf regression was detected
3014  * on intel as we cant use the vectorized implementation of
3015  * memcmp_zero_ptr_aligned due to cyclic dependenices between
3016  * initization and allocation. Therefore we perform the check
3017  * on 20% of the allocations.
3018  */
3019 #if ZALLOC_ENABLE_ZERO_CHECK
3020 #if defined(__x86_64__)
3021 /*
3022  * Peform zero validation on every 5th allocation
3023  */
3024 static TUNABLE(uint32_t, zzc_rate, "zzc_rate", 5);
3025 static uint32_t PERCPU_DATA(zzc_decrementer);
3026 #endif /* defined(__x86_64__) */
3027 
3028 /*
3029  * Determine if zero validation for allocation should be skipped
3030  */
3031 static bool
zalloc_skip_zero_check(void)3032 zalloc_skip_zero_check(void)
3033 {
3034 #if defined(__x86_64__)
3035 	uint32_t *counterp, cnt;
3036 
3037 	counterp = PERCPU_GET(zzc_decrementer);
3038 	cnt = *counterp;
3039 	if (__probable(cnt > 0)) {
3040 		*counterp  = cnt - 1;
3041 		return true;
3042 	}
3043 	*counterp = zzc_rate - 1;
3044 #endif /* !defined(__x86_64__) */
3045 	return false;
3046 }
3047 
3048 __abortlike
3049 static void
zalloc_uaf_panic(zone_t z,uintptr_t elem,size_t size)3050 zalloc_uaf_panic(zone_t z, uintptr_t elem, size_t size)
3051 {
3052 	uint32_t esize = (uint32_t)zone_elem_inner_size(z);
3053 	uint32_t first_offs = ~0u;
3054 	uintptr_t first_bits = 0, v;
3055 	char buf[1024];
3056 	int pos = 0;
3057 
3058 	buf[0] = '\0';
3059 
3060 	for (uint32_t o = 0; o < size; o += sizeof(v)) {
3061 		if ((v = *(uintptr_t *)(elem + o)) == 0) {
3062 			continue;
3063 		}
3064 		pos += scnprintf(buf + pos, sizeof(buf) - pos, "\n"
3065 		    "%5d: 0x%016lx", o, v);
3066 		if (first_offs > o) {
3067 			first_offs = o;
3068 			first_bits = v;
3069 		}
3070 	}
3071 
3072 	(panic)("[%s%s]: element modified after free "
3073 	"(off:%d, val:0x%016lx, sz:%d, ptr:%p)%s",
3074 	zone_heap_name(z), zone_name(z),
3075 	first_offs, first_bits, esize, (void *)elem, buf);
3076 }
3077 
3078 static void
zalloc_validate_element(zone_t zone,vm_offset_t elem,vm_size_t size,zalloc_flags_t flags)3079 zalloc_validate_element(
3080 	zone_t                  zone,
3081 	vm_offset_t             elem,
3082 	vm_size_t               size,
3083 	zalloc_flags_t          flags)
3084 {
3085 	if (flags & Z_NOZZC) {
3086 		return;
3087 	}
3088 	if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3089 		zalloc_uaf_panic(zone, elem, size);
3090 	}
3091 	if (flags & Z_PCPU) {
3092 		for (size_t i = zpercpu_count(); --i > 0;) {
3093 			elem += PAGE_SIZE;
3094 			if (memcmp_zero_ptr_aligned((void *)elem, size)) {
3095 				zalloc_uaf_panic(zone, elem, size);
3096 			}
3097 		}
3098 	}
3099 }
3100 
3101 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
3102 
3103 __attribute__((noinline))
3104 static void
zone_early_scramble_rr(zone_t zone,int cpu,zone_stats_t zs)3105 zone_early_scramble_rr(zone_t zone, int cpu, zone_stats_t zs)
3106 {
3107 #if KASAN_FAKESTACK
3108 	/*
3109 	 * This can cause re-entrancy with kasan fakestacks
3110 	 */
3111 #pragma unused(zone, cpu, zs)
3112 #else
3113 	uint32_t bits;
3114 
3115 	bits = random_bool_gen_bits(&zone_bool_gen[cpu].zbg_bg,
3116 	    zone_bool_gen[cpu].zbg_entropy, ZONE_ENTROPY_CNT, 8);
3117 
3118 	zs->zs_alloc_rr += bits;
3119 	zs->zs_alloc_rr %= zone->z_chunk_elems;
3120 #endif
3121 }
3122 
3123 #endif /* !ZALLOC_TEST */
3124 #pragma mark Zone Leak Detection
3125 #if !ZALLOC_TEST
3126 #if ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS
3127 
3128 /*
3129  * Zone leak debugging code
3130  *
3131  * When enabled, this code keeps a log to track allocations to a particular
3132  * zone that have not yet been freed.
3133  *
3134  * Examining this log will reveal the source of a zone leak.
3135  *
3136  * The log is allocated only when logging is enabled (it is off by default),
3137  * so there is no effect on the system when it's turned off.
3138  *
3139  * Zone logging is enabled with the `zlog<n>=<zone>` boot-arg for each
3140  * zone name to log, with n starting at 1.
3141  *
3142  * Leaks debugging utilizes 2 tunables:
3143  * - zlsize (in kB) which describes how much "size" the record covers
3144  *   (zones with smaller elements get more records, default is 4M).
3145  *
3146  * - zlfreq (in kB) which describes a sample rate in cumulative allocation
3147  *   size at which automatic leak detection will sample allocations.
3148  *   (default is 16k)
3149  *
3150  *
3151  * Zone corruption logging
3152  *
3153  * Logging can also be used to help identify the source of a zone corruption.
3154  *
3155  * First, identify the zone that is being corrupted,
3156  * then add "-zc zlog<n>=<zone name>" to the boot-args.
3157  *
3158  * When -zc is used in conjunction with zlog,
3159  * it changes the logging style to track both allocations and frees to the zone.
3160  *
3161  * When the corruption is detected, examining the log will show you the stack
3162  * traces of the callers who last allocated and freed any particular element in
3163  * the zone.
3164  *
3165  * Corruption debugging logs will have zrecs records
3166  * (tuned by the zrecs= boot-arg, 16k elements per G of RAM by default).
3167  */
3168 
3169 #define ZRECORDS_MAX            (256u << 10)
3170 #define ZRECORDS_DEFAULT        (16u  << 10)
3171 static TUNABLE(uint32_t, zrecs, "zrecs", 0);
3172 static TUNABLE(uint32_t, zlsize, "zlsize", 4 * 1024);
3173 static TUNABLE(uint32_t, zlfreq, "zlfreq", 16);
3174 
3175 __startup_func
3176 static void
zone_leaks_init_zrecs(void)3177 zone_leaks_init_zrecs(void)
3178 {
3179 	/*
3180 	 * Don't allow more than ZRECORDS_MAX records,
3181 	 * even if the user asked for more.
3182 	 *
3183 	 * This prevents accidentally hogging too much kernel memory
3184 	 * and making the system unusable.
3185 	 */
3186 	if (zrecs == 0) {
3187 		zrecs = ZRECORDS_DEFAULT *
3188 		    (uint32_t)((max_mem + (1ul << 30)) >> 30);
3189 	}
3190 	if (zrecs > ZRECORDS_MAX) {
3191 		zrecs = ZRECORDS_MAX;
3192 	}
3193 }
3194 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_leaks_init_zrecs);
3195 
3196 static uint32_t
zone_leaks_record_count(zone_t z)3197 zone_leaks_record_count(zone_t z)
3198 {
3199 	uint32_t recs = (zlsize << 10) / zone_elem_inner_size(z);
3200 
3201 	return MIN(MAX(recs, ZRECORDS_DEFAULT), ZRECORDS_MAX);
3202 }
3203 
3204 static uint32_t
zone_leaks_sample_rate(zone_t z)3205 zone_leaks_sample_rate(zone_t z)
3206 {
3207 	return (zlfreq << 10) / zone_elem_inner_size(z);
3208 }
3209 
3210 #if ZALLOC_ENABLE_LOGGING
3211 /* Log allocations and frees to help debug a zone element corruption */
3212 static TUNABLE(bool, corruption_debug_flag, "-zc", false);
3213 
3214 /*
3215  * A maximum of 10 zlog<n> boot args can be provided (zlog1 -> zlog10)
3216  */
3217 #define MAX_ZONES_LOG_REQUESTS  10
3218 
3219 /**
3220  * @function zone_setup_logging
3221  *
3222  * @abstract
3223  * Optionally sets up a zone for logging.
3224  *
3225  * @discussion
3226  * We recognized two boot-args:
3227  *
3228  *	zlog=<zone_to_log>
3229  *	zrecs=<num_records_in_log>
3230  *	zlsize=<memory to cover for leaks>
3231  *
3232  * The zlog arg is used to specify the zone name that should be logged,
3233  * and zrecs/zlsize is used to control the size of the log.
3234  */
3235 static void
zone_setup_logging(zone_t z)3236 zone_setup_logging(zone_t z)
3237 {
3238 	char zone_name[MAX_ZONE_NAME]; /* Temp. buffer for the zone name */
3239 	char zlog_name[MAX_ZONE_NAME]; /* Temp. buffer to create the strings zlog1, zlog2 etc... */
3240 	char zlog_val[MAX_ZONE_NAME];  /* the zone name we're logging, if any */
3241 	bool logging_on = false;
3242 
3243 	/*
3244 	 * Append kalloc heap name to zone name (if zone is used by kalloc)
3245 	 */
3246 	snprintf(zone_name, MAX_ZONE_NAME, "%s%s", zone_heap_name(z), z->z_name);
3247 
3248 	/* zlog0 isn't allowed. */
3249 	for (int i = 1; i <= MAX_ZONES_LOG_REQUESTS; i++) {
3250 		snprintf(zlog_name, MAX_ZONE_NAME, "zlog%d", i);
3251 
3252 		if (PE_parse_boot_argn(zlog_name, zlog_val, sizeof(zlog_val))) {
3253 			if (track_this_zone(zone_name, zlog_val) ||
3254 			    track_kalloc_zones(z, zlog_val)) {
3255 				logging_on = true;
3256 				break;
3257 			}
3258 		}
3259 	}
3260 
3261 	/*
3262 	 * Backwards compat. with the old boot-arg used to specify single zone
3263 	 * logging i.e. zlog Needs to happen after the newer zlogn checks
3264 	 * because the prefix will match all the zlogn
3265 	 * boot-args.
3266 	 */
3267 	if (!logging_on &&
3268 	    PE_parse_boot_argn("zlog", zlog_val, sizeof(zlog_val))) {
3269 		if (track_this_zone(zone_name, zlog_val) ||
3270 		    track_kalloc_zones(z, zlog_val)) {
3271 			logging_on = true;
3272 		}
3273 	}
3274 
3275 	/*
3276 	 * If we want to log a zone, see if we need to allocate buffer space for
3277 	 * the log.
3278 	 *
3279 	 * Some vm related zones are zinit'ed before we can do a kmem_alloc, so
3280 	 * we have to defer allocation in that case.
3281 	 *
3282 	 * zone_init() will finish the job.
3283 	 *
3284 	 * If we want to log one of the VM related zones that's set up early on,
3285 	 * we will skip allocation of the log until zinit is called again later
3286 	 * on some other zone.
3287 	 */
3288 	if (logging_on) {
3289 		if (corruption_debug_flag) {
3290 			z->z_btlog = btlog_create(BTLOG_LOG, zrecs, 0);
3291 		} else {
3292 			z->z_btlog = btlog_create(BTLOG_HASH,
3293 			    zone_leaks_record_count(z), 0);
3294 		}
3295 		if (z->z_btlog) {
3296 			z->z_log_on = true;
3297 			printf("zone[%s%s]: logging enabled\n",
3298 			    zone_heap_name(z), z->z_name);
3299 		} else {
3300 			printf("zone[%s%s]: failed to enable logging\n",
3301 			    zone_heap_name(z), z->z_name);
3302 		}
3303 	}
3304 }
3305 
3306 #endif /* ZALLOC_ENABLE_LOGGING */
3307 #if KASAN_TBI
3308 static TUNABLE(uint32_t, kasan_zrecs, "kasan_zrecs", 0);
3309 
3310 __startup_func
3311 static void
kasan_tbi_init_zrecs(void)3312 kasan_tbi_init_zrecs(void)
3313 {
3314 	/*
3315 	 * Don't allow more than ZRECORDS_MAX records,
3316 	 * even if the user asked for more.
3317 	 *
3318 	 * This prevents accidentally hogging too much kernel memory
3319 	 * and making the system unusable.
3320 	 */
3321 	if (kasan_zrecs == 0) {
3322 		kasan_zrecs = ZRECORDS_DEFAULT *
3323 		    (uint32_t)((max_mem + (1ul << 30)) >> 30);
3324 	}
3325 	if (kasan_zrecs > ZRECORDS_MAX) {
3326 		kasan_zrecs = ZRECORDS_MAX;
3327 	}
3328 }
3329 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, kasan_tbi_init_zrecs);
3330 
3331 static void
zone_setup_kasan_logging(zone_t z)3332 zone_setup_kasan_logging(zone_t z)
3333 {
3334 	if (!z->z_tbi_tag) {
3335 		printf("zone[%s%s]: kasan logging disabled for this zone\n",
3336 		    zone_heap_name(z), z->z_name);
3337 		return;
3338 	}
3339 
3340 	z->z_log_on = true;
3341 	z->z_btlog = btlog_create(BTLOG_LOG, kasan_zrecs, 0);
3342 	if (!z->z_btlog) {
3343 		printf("zone[%s%s]: failed to enable kasan logging\n",
3344 		    zone_heap_name(z), z->z_name);
3345 	}
3346 }
3347 
3348 #endif /* KASAN_TBI */
3349 #if CONFIG_ZLEAKS
3350 
3351 static thread_call_data_t zone_leaks_callout;
3352 
3353 /*
3354  * The zone leak detector, abbreviated 'zleak', keeps track
3355  * of a subset of the currently outstanding allocations
3356  * made by the zone allocator.
3357  *
3358  * It will engage itself automatically if the zone map usage
3359  * goes above zleak_pages_global_wired_threshold pages.
3360  *
3361  * When that threshold is reached, zones who use more than
3362  * zleak_pages_per_zone_wired_threshold pages will get
3363  * a BTLOG_HASH btlog with sampling to minimize perf impact,
3364  * yet receive statistical data about the backtrace that is
3365  * the most likely to cause the leak.
3366  *
3367  * If the zone goes under the threshold enough, then the log
3368  * is disabled and backtraces freed. Data can be collected
3369  * from userspace with the zlog(1) command.
3370  */
3371 
3372 /* whether the zleaks subsystem thinks the map is under pressure */
3373 uint32_t                zleak_active;
3374 SECURITY_READ_ONLY_LATE(vm_size_t) zleak_max_zonemap_size;
3375 
3376 /* Size of zone map at which to start collecting data */
3377 static size_t           zleak_pages_global_wired_threshold = ~0;
3378 vm_size_t               zleak_global_tracking_threshold = ~0;
3379 
3380 /* Size a zone will have before we will collect data on it */
3381 static size_t           zleak_pages_per_zone_wired_threshold = ~0;
3382 vm_size_t               zleak_per_zone_tracking_threshold = ~0;
3383 
3384 static inline bool
zleak_should_enable_for_zone(zone_t z)3385 zleak_should_enable_for_zone(zone_t z)
3386 {
3387 	if (z->z_log_on) {
3388 		return false;
3389 	}
3390 	if (z->z_btlog) {
3391 		return false;
3392 	}
3393 	if (!zleak_active) {
3394 		return false;
3395 	}
3396 	return z->z_wired_cur >= zleak_pages_per_zone_wired_threshold;
3397 }
3398 
3399 static inline bool
zleak_should_disable_for_zone(zone_t z)3400 zleak_should_disable_for_zone(zone_t z)
3401 {
3402 	if (z->z_log_on) {
3403 		return false;
3404 	}
3405 	if (!z->z_btlog) {
3406 		return false;
3407 	}
3408 	if (!zleak_active) {
3409 		return true;
3410 	}
3411 	return z->z_wired_cur < zleak_pages_per_zone_wired_threshold / 2;
3412 }
3413 
3414 static inline bool
zleak_should_activate(size_t pages)3415 zleak_should_activate(size_t pages)
3416 {
3417 	return !zleak_active && pages >= zleak_pages_global_wired_threshold;
3418 }
3419 
3420 static inline bool
zleak_should_deactivate(size_t pages)3421 zleak_should_deactivate(size_t pages)
3422 {
3423 	return zleak_active && pages < zleak_pages_global_wired_threshold / 2;
3424 }
3425 
3426 static void
zleaks_enable_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)3427 zleaks_enable_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
3428 {
3429 	size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
3430 	btlog_t log;
3431 
3432 	if (zleak_should_activate(pages)) {
3433 		zleak_active = 1;
3434 	} else if (zleak_should_deactivate(pages)) {
3435 		zleak_active = 0;
3436 	}
3437 
3438 	zone_foreach(z) {
3439 		if (zleak_should_disable_for_zone(z)) {
3440 			log = z->z_btlog;
3441 			z->z_btlog = NULL;
3442 			assert(z->z_btlog_disabled == NULL);
3443 			btlog_disable(log);
3444 			z->z_btlog_disabled = log;
3445 		}
3446 
3447 		if (zleak_should_enable_for_zone(z)) {
3448 			log = z->z_btlog_disabled;
3449 			if (log == NULL) {
3450 				log = btlog_create(BTLOG_HASH,
3451 				    zone_leaks_record_count(z),
3452 				    zone_leaks_sample_rate(z));
3453 			} else if (btlog_enable(log) == KERN_SUCCESS) {
3454 				z->z_btlog_disabled = NULL;
3455 			} else {
3456 				log = NULL;
3457 			}
3458 			os_atomic_store(&z->z_btlog, log, release);
3459 		}
3460 	}
3461 }
3462 
3463 __startup_func
3464 static void
zleak_init(void)3465 zleak_init(void)
3466 {
3467 	zleak_max_zonemap_size = ptoa(zone_pages_wired_max);
3468 
3469 	zleak_update_threshold(&zleak_global_tracking_threshold,
3470 	    zleak_max_zonemap_size / 2);
3471 	zleak_update_threshold(&zleak_per_zone_tracking_threshold,
3472 	    zleak_global_tracking_threshold / 8);
3473 
3474 	thread_call_setup_with_options(&zone_leaks_callout,
3475 	    zleaks_enable_async, NULL, THREAD_CALL_PRIORITY_USER,
3476 	    THREAD_CALL_OPTIONS_ONCE);
3477 }
3478 STARTUP(ZALLOC, STARTUP_RANK_SECOND, zleak_init);
3479 
3480 kern_return_t
zleak_update_threshold(vm_size_t * arg,uint64_t value)3481 zleak_update_threshold(vm_size_t *arg, uint64_t value)
3482 {
3483 	if (value >= zleak_max_zonemap_size) {
3484 		return KERN_INVALID_VALUE;
3485 	}
3486 
3487 	if (arg == &zleak_global_tracking_threshold) {
3488 		zleak_global_tracking_threshold = (vm_size_t)value;
3489 		zleak_pages_global_wired_threshold = atop(value);
3490 		if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3491 			thread_call_enter(&zone_leaks_callout);
3492 		}
3493 		return KERN_SUCCESS;
3494 	}
3495 
3496 	if (arg == &zleak_per_zone_tracking_threshold) {
3497 		zleak_per_zone_tracking_threshold = (vm_size_t)value;
3498 		zleak_pages_per_zone_wired_threshold = atop(value);
3499 		if (startup_phase >= STARTUP_SUB_THREAD_CALL) {
3500 			thread_call_enter(&zone_leaks_callout);
3501 		}
3502 		return KERN_SUCCESS;
3503 	}
3504 
3505 	return KERN_INVALID_ARGUMENT;
3506 }
3507 
3508 static void
panic_display_zleaks(bool has_syms)3509 panic_display_zleaks(bool has_syms)
3510 {
3511 	bool did_header = false;
3512 	vm_address_t bt[BTLOG_MAX_DEPTH];
3513 	uint32_t len, count;
3514 
3515 	zone_foreach(z) {
3516 		btlog_t log = z->z_btlog;
3517 
3518 		if (log == NULL || btlog_get_type(log) != BTLOG_HASH) {
3519 			continue;
3520 		}
3521 
3522 		count = btlog_guess_top(log, bt, &len);
3523 		if (count == 0) {
3524 			continue;
3525 		}
3526 
3527 		if (!did_header) {
3528 			paniclog_append_noflush("Zone (suspected) leak report:\n");
3529 			did_header = true;
3530 		}
3531 
3532 		paniclog_append_noflush("  Zone:    %s%s\n",
3533 		    zone_heap_name(z), zone_name(z));
3534 		paniclog_append_noflush("  Count:   %d (%ld bytes)\n", count,
3535 		    (long)count * zone_scale_for_percpu(z, zone_elem_inner_size(z)));
3536 		paniclog_append_noflush("  Size:    %ld\n",
3537 		    (long)zone_size_wired(z));
3538 		paniclog_append_noflush("  Top backtrace:\n");
3539 		for (uint32_t i = 0; i < len; i++) {
3540 			if (has_syms) {
3541 				paniclog_append_noflush("    %p ", (void *)bt[i]);
3542 				panic_print_symbol_name(bt[i]);
3543 				paniclog_append_noflush("\n");
3544 			} else {
3545 				paniclog_append_noflush("    %p\n", (void *)bt[i]);
3546 			}
3547 		}
3548 
3549 		kmod_panic_dump(bt, len);
3550 		paniclog_append_noflush("\n");
3551 	}
3552 }
3553 #endif /* CONFIG_ZLEAKS */
3554 
3555 #endif /* ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS */
3556 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
3557 
3558 #if !KASAN_TBI
3559 __cold
3560 #endif
3561 static void
zalloc_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3562 zalloc_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3563 {
3564 	btlog_t log = zone->z_btlog;
3565 	btref_get_flags_t flags = 0;
3566 	btref_t ref;
3567 
3568 #if !KASAN_TBI
3569 	if (!log || !btlog_sample(log)) {
3570 		return;
3571 	}
3572 #endif
3573 	if (get_preemption_level() || zone_supports_vm(zone)) {
3574 		/*
3575 		 * VM zones can be used by btlog, avoid reentrancy issues.
3576 		 */
3577 		flags = BTREF_GET_NOWAIT;
3578 	}
3579 
3580 	ref = btref_get(fp, flags);
3581 	while (count-- > 0) {
3582 		if (count) {
3583 			btref_retain(ref);
3584 		}
3585 		btlog_record(log, (void *)addr, ZOP_ALLOC, ref);
3586 		addr += *(vm_offset_t *)addr;
3587 	}
3588 }
3589 
3590 #define ZALLOC_LOG(zone, addr, count)  ({ \
3591 	if ((zone)->z_btlog) {                                                 \
3592 	        zalloc_log(zone, addr, count, __builtin_frame_address(0));     \
3593 	}                                                                      \
3594 })
3595 
3596 #if !KASAN_TBI
3597 __cold
3598 #endif
3599 static void
zfree_log(zone_t zone,vm_offset_t addr,uint32_t count,void * fp)3600 zfree_log(zone_t zone, vm_offset_t addr, uint32_t count, void *fp)
3601 {
3602 	btlog_t log = zone->z_btlog;
3603 	btref_get_flags_t flags = 0;
3604 	btref_t ref;
3605 
3606 #if !KASAN_TBI
3607 	if (!log) {
3608 		return;
3609 	}
3610 #endif
3611 
3612 	/*
3613 	 * See if we're doing logging on this zone.
3614 	 *
3615 	 * There are two styles of logging used depending on
3616 	 * whether we're trying to catch a leak or corruption.
3617 	 */
3618 #if !KASAN_TBI
3619 	if (btlog_get_type(log) == BTLOG_HASH) {
3620 		/*
3621 		 * We're logging to catch a leak.
3622 		 *
3623 		 * Remove any record we might have for this element
3624 		 * since it's being freed.  Note that we may not find it
3625 		 * if the buffer overflowed and that's OK.
3626 		 *
3627 		 * Since the log is of a limited size, old records get
3628 		 * overwritten if there are more zallocs than zfrees.
3629 		 */
3630 		while (count-- > 0) {
3631 			btlog_erase(log, (void *)addr);
3632 			addr += *(vm_offset_t *)addr;
3633 		}
3634 		return;
3635 	}
3636 #endif /* !KASAN_TBI */
3637 
3638 	if (get_preemption_level() || zone_supports_vm(zone)) {
3639 		/*
3640 		 * VM zones can be used by btlog, avoid reentrancy issues.
3641 		 */
3642 		flags = BTREF_GET_NOWAIT;
3643 	}
3644 
3645 	ref = btref_get(fp, flags);
3646 	while (count-- > 0) {
3647 		if (count) {
3648 			btref_retain(ref);
3649 		}
3650 		btlog_record(log, (void *)addr, ZOP_FREE, ref);
3651 		addr += *(vm_offset_t *)addr;
3652 	}
3653 }
3654 
3655 #define ZFREE_LOG(zone, addr, count)  ({ \
3656 	if ((zone)->z_btlog) {                                                 \
3657 	        zfree_log(zone, addr, count, __builtin_frame_address(0));      \
3658 	}                                                                      \
3659 })
3660 
3661 #else
3662 #define ZALLOC_LOG(...)         ((void)0)
3663 #define ZFREE_LOG(...)          ((void)0)
3664 #endif /* ZALLOC_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI */
3665 #endif /* !ZALLOC_TEST */
3666 #pragma mark zone (re)fill
3667 #if !ZALLOC_TEST
3668 
3669 /*!
3670  * @defgroup Zone Refill
3671  * @{
3672  *
3673  * @brief
3674  * Functions handling The zone refill machinery.
3675  *
3676  * @discussion
3677  * Zones are refilled based on 2 mechanisms: direct expansion, async expansion.
3678  *
3679  * @c zalloc_ext() is the codepath that kicks the zone refill when the zone is
3680  * dropping below half of its @c z_elems_rsv (0 for most zones) and will:
3681  *
3682  * - call @c zone_expand_locked() directly if the caller is allowed to block,
3683  *
3684  * - wakeup the asynchroous expansion thread call if the caller is not allowed
3685  *   to block, or if the reserve becomes depleted.
3686  *
3687  *
3688  * <h2>Synchronous expansion</h2>
3689  *
3690  * This mechanism is actually the only one that may refill a zone, and all the
3691  * other ones funnel through this one eventually.
3692  *
3693  * @c zone_expand_locked() implements the core of the expansion mechanism,
3694  * and will do so while a caller specified predicate is true.
3695  *
3696  * Zone expansion allows for up to 2 threads to concurrently refill the zone:
3697  * - one VM privileged thread,
3698  * - one regular thread.
3699  *
3700  * Regular threads that refill will put down their identity in @c z_expander,
3701  * so that priority inversion avoidance can be implemented.
3702  *
3703  * However, VM privileged threads are allowed to use VM page reserves,
3704  * which allows for the system to recover from extreme memory pressure
3705  * situations, allowing for the few allocations that @c zone_gc() or
3706  * killing processes require.
3707  *
3708  * When a VM privileged thread is also expanding, the @c z_expander_vm_priv bit
3709  * is set. @c z_expander is not necessarily the identity of this VM privileged
3710  * thread (it is if the VM privileged thread came in first, but wouldn't be, and
3711  * could even be @c THREAD_NULL otherwise).
3712  *
3713  * Note that the pageout-scan daemon might be BG and is VM privileged. To avoid
3714  * spending a whole pointer on priority inheritance for VM privileged threads
3715  * (and other issues related to having two owners), we use the rwlock boost as
3716  * a stop gap to avoid priority inversions.
3717  *
3718  *
3719  * <h2>Chunk wiring policies</h2>
3720  *
3721  * Zones allocate memory in chunks of @c zone_t::z_chunk_pages pages at a time
3722  * to try to minimize fragmentation relative to element sizes not aligning with
3723  * a chunk size well.  However, this can grow large and be hard to fulfill on
3724  * a system under a lot of memory pressure (chunks can be as long as 8 pages on
3725  * 4k page systems).
3726  *
3727  * This is why, when under memory pressure the system allows chunks to be
3728  * partially populated. The metadata of the first page in the chunk maintains
3729  * the count of actually populated pages.
3730  *
3731  * The metadata for addresses assigned to a zone are found of 4 queues:
3732  * - @c z_pageq_empty has chunk heads with populated pages and no allocated
3733  *   elements (those can be targeted by @c zone_gc()),
3734  * - @c z_pageq_partial has chunk heads with populated pages that are partially
3735  *   used,
3736  * - @c z_pageq_full has chunk heads with populated pages with no free elements
3737  *   left,
3738  * - @c z_pageq_va has either chunk heads for sequestered VA space assigned to
3739  *   the zone forever, or the first secondary metadata for a chunk whose
3740  *   corresponding page is not populated in the chunk.
3741  *
3742  * When new pages need to be wired/populated, chunks from the @c z_pageq_va
3743  * queues are preferred.
3744  *
3745  *
3746  * <h2>Asynchronous expansion</h2>
3747  *
3748  * This mechanism allows for refilling zones used mostly with non blocking
3749  * callers. It relies on a thread call (@c zone_expand_callout) which will
3750  * iterate all zones and refill the ones marked with @c z_async_refilling.
3751  *
3752  * NOTE: If the calling thread for zalloc_noblock is lower priority than
3753  *       the thread_call, then zalloc_noblock to an empty zone may succeed.
3754  *
3755  *
3756  * <h2>Dealing with zone allocations from the mach VM code</h2>
3757  *
3758  * The implementation of the mach VM itself uses the zone allocator
3759  * for things like the vm_map_entry data structure. In order to prevent
3760  * a recursion problem when adding more pages to a zone, the VM zones
3761  * use the Z_SUBMAP_IDX_VM submap which doesn't use kmem_alloc()
3762  * or any VM map functions to allocate.
3763  *
3764  * Instead, a really simple coalescing first-fit allocator is used
3765  * for this submap, and no one else than zalloc can allocate from it.
3766  *
3767  * Memory is directly populated which doesn't require allocation of
3768  * VM map entries, and avoids recursion. The cost of this scheme however,
3769  * is that `vm_map_lookup_entry` will not function on those addresses
3770  * (nor any API relying on it).
3771  */
3772 
3773 static thread_call_data_t zone_expand_callout;
3774 
3775 __attribute__((overloadable))
3776 static inline bool
zone_submap_is_sequestered(zone_submap_idx_t idx)3777 zone_submap_is_sequestered(zone_submap_idx_t idx)
3778 {
3779 	return idx != Z_SUBMAP_IDX_DATA;
3780 }
3781 
3782 __attribute__((overloadable))
3783 static inline bool
zone_submap_is_sequestered(zone_security_flags_t zsflags)3784 zone_submap_is_sequestered(zone_security_flags_t zsflags)
3785 {
3786 	return zone_submap_is_sequestered(zsflags.z_submap_idx);
3787 }
3788 
3789 static inline kma_flags_t
zone_kma_flags(zone_t z,zone_security_flags_t zsflags,zalloc_flags_t flags)3790 zone_kma_flags(zone_t z, zone_security_flags_t zsflags, zalloc_flags_t flags)
3791 {
3792 	kma_flags_t kmaflags = KMA_KOBJECT | KMA_ZERO;
3793 
3794 	if (zsflags.z_noencrypt) {
3795 		kmaflags |= KMA_NOENCRYPT;
3796 	}
3797 	if (flags & Z_NOPAGEWAIT) {
3798 		kmaflags |= KMA_NOPAGEWAIT;
3799 	}
3800 	if (z->z_permanent || (!z->z_destructible &&
3801 	    zone_submap_is_sequestered(zsflags))) {
3802 		kmaflags |= KMA_PERMANENT;
3803 	}
3804 	if (zsflags.z_submap_from_end) {
3805 		kmaflags |= KMA_LAST_FREE;
3806 	}
3807 
3808 	return kmaflags;
3809 }
3810 
3811 static inline void
zone_add_wired_pages(uint32_t pages)3812 zone_add_wired_pages(uint32_t pages)
3813 {
3814 	size_t count = os_atomic_add(&zone_pages_wired, pages, relaxed);
3815 
3816 #if CONFIG_ZLEAKS
3817 	if (__improbable(zleak_should_activate(count) &&
3818 	    startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3819 		thread_call_enter(&zone_leaks_callout);
3820 	}
3821 #else
3822 	(void)count;
3823 #endif
3824 }
3825 
3826 static inline void
zone_remove_wired_pages(uint32_t pages)3827 zone_remove_wired_pages(uint32_t pages)
3828 {
3829 	size_t count = os_atomic_sub(&zone_pages_wired, pages, relaxed);
3830 
3831 #if CONFIG_ZLEAKS
3832 	if (__improbable(zleak_should_deactivate(count) &&
3833 	    startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3834 		thread_call_enter(&zone_leaks_callout);
3835 	}
3836 #else
3837 	(void)count;
3838 #endif
3839 }
3840 
3841 /*!
3842  * @function zcram_and_lock()
3843  *
3844  * @brief
3845  * Prepare some memory for being usable for allocation purposes.
3846  *
3847  * @discussion
3848  * Prepare memory in <code>[addr + ptoa(pg_start), addr + ptoa(pg_end))</code>
3849  * to be usable in the zone.
3850  *
3851  * This function assumes the metadata is already populated for the range.
3852  *
3853  * Calling this function with @c pg_start being 0 means that the memory
3854  * is either a partial chunk, or a full chunk, that isn't published anywhere
3855  * and the initialization can happen without locks held.
3856  *
3857  * Calling this function with a non zero @c pg_start means that we are extending
3858  * an existing chunk: the memory in <code>[addr, addr + ptoa(pg_start))</code>,
3859  * is already usable and published in the zone, so extending it requires holding
3860  * the zone lock.
3861  *
3862  * @param zone          The zone to cram new populated pages into
3863  * @param addr          The base address for the chunk(s)
3864  * @param pg_va_new     The number of virtual pages newly assigned to the zone
3865  * @param pg_start      The first newly populated page relative to @a addr.
3866  * @param pg_end        The after-last newly populated page relative to @a addr.
3867  * @param lock          0 or ZM_ALLOC_SIZE_LOCK (used by early crams)
3868  */
3869 static void
zcram_and_lock(zone_t zone,vm_offset_t addr,uint32_t pg_va_new,uint32_t pg_start,uint32_t pg_end,uint16_t lock)3870 zcram_and_lock(zone_t zone, vm_offset_t addr, uint32_t pg_va_new,
3871     uint32_t pg_start, uint32_t pg_end, uint16_t lock)
3872 {
3873 	zone_id_t zindex = zone_index(zone);
3874 	vm_offset_t elem_size = zone_elem_outer_size(zone);
3875 	uint32_t free_start = 0, free_end = 0;
3876 	uint32_t oob_offs = zone_elem_outer_offs(zone);
3877 
3878 	struct zone_page_metadata *meta = zone_meta_from_addr(addr);
3879 	uint32_t chunk_pages = zone->z_chunk_pages;
3880 	bool guarded = meta->zm_guarded;
3881 
3882 	assert(pg_start < pg_end && pg_end <= chunk_pages);
3883 
3884 	if (pg_start == 0) {
3885 		uint16_t chunk_len = (uint16_t)pg_end;
3886 		uint16_t secondary_len = ZM_SECONDARY_PAGE;
3887 		bool inline_bitmap = false;
3888 
3889 		if (zone->z_percpu) {
3890 			chunk_len = 1;
3891 			secondary_len = ZM_SECONDARY_PCPU_PAGE;
3892 			assert(pg_end == zpercpu_count());
3893 		}
3894 		if (!zone->z_permanent && !zone->z_uses_tags) {
3895 			inline_bitmap = zone->z_chunk_elems <= 32 * chunk_pages;
3896 		}
3897 
3898 		free_end = (uint32_t)(ptoa(chunk_len) - oob_offs) / elem_size;
3899 
3900 		meta[0] = (struct zone_page_metadata){
3901 			.zm_index         = zindex,
3902 			.zm_guarded       = guarded,
3903 			.zm_inline_bitmap = inline_bitmap,
3904 			.zm_chunk_len     = chunk_len,
3905 			.zm_alloc_size    = lock,
3906 		};
3907 
3908 		if (!zone->z_permanent && !inline_bitmap) {
3909 			meta[0].zm_bitmap = zone_meta_bits_alloc_init(free_end,
3910 			    zone->z_chunk_elems, zone->z_uses_tags);
3911 		}
3912 
3913 		for (uint16_t i = 1; i < chunk_pages; i++) {
3914 			meta[i] = (struct zone_page_metadata){
3915 				.zm_index          = zindex,
3916 				.zm_guarded        = guarded,
3917 				.zm_inline_bitmap  = inline_bitmap,
3918 				.zm_chunk_len      = secondary_len,
3919 				.zm_page_index     = (uint8_t)i,
3920 				.zm_bitmap         = meta[0].zm_bitmap,
3921 				.zm_subchunk_len   = (uint8_t)(chunk_pages - i),
3922 			};
3923 		}
3924 
3925 		if (inline_bitmap) {
3926 			zone_meta_bits_init_inline(meta, free_end);
3927 		}
3928 	} else {
3929 		assert(!zone->z_percpu && !zone->z_permanent);
3930 
3931 		free_end = (uint32_t)(ptoa(pg_end) - oob_offs) / elem_size;
3932 		free_start = (uint32_t)(ptoa(pg_start) - oob_offs) / elem_size;
3933 	}
3934 
3935 #if KASAN_CLASSIC
3936 	assert(pg_start == 0); /* KASAN_CLASSIC never does partial chunks */
3937 	if (zone->z_permanent) {
3938 		kasan_poison_range(addr, ptoa(pg_end), ASAN_VALID);
3939 	} else if (zone->z_percpu) {
3940 		for (uint32_t i = 0; i < pg_end; i++) {
3941 			kasan_zmem_add(addr + ptoa(i), PAGE_SIZE,
3942 			    zone_elem_outer_size(zone),
3943 			    zone_elem_outer_offs(zone),
3944 			    zone_elem_redzone(zone));
3945 		}
3946 	} else {
3947 		kasan_zmem_add(addr, ptoa(pg_end),
3948 		    zone_elem_outer_size(zone),
3949 		    zone_elem_outer_offs(zone),
3950 		    zone_elem_redzone(zone));
3951 	}
3952 #endif /* KASAN_CLASSIC */
3953 
3954 	/*
3955 	 * Insert the initialized pages / metadatas into the right lists.
3956 	 */
3957 
3958 	zone_lock(zone);
3959 	assert(zone->z_self == zone);
3960 
3961 	if (pg_start != 0) {
3962 		assert(meta->zm_chunk_len == pg_start);
3963 
3964 		zone_meta_bits_merge(meta, free_start, free_end);
3965 		meta->zm_chunk_len = (uint16_t)pg_end;
3966 
3967 		/*
3968 		 * consume the zone_meta_lock_in_partial()
3969 		 * done in zone_expand_locked()
3970 		 */
3971 		zone_meta_alloc_size_sub(zone, meta, ZM_ALLOC_SIZE_LOCK);
3972 		zone_meta_remqueue(zone, meta);
3973 	}
3974 
3975 	if (zone->z_permanent || meta->zm_alloc_size) {
3976 		zone_meta_queue_push(zone, &zone->z_pageq_partial, meta);
3977 	} else {
3978 		zone_meta_queue_push(zone, &zone->z_pageq_empty, meta);
3979 		zone->z_wired_empty += zone->z_percpu ? 1 : pg_end;
3980 	}
3981 	if (pg_end < chunk_pages) {
3982 		/* push any non populated residual VA on z_pageq_va */
3983 		zone_meta_queue_push(zone, &zone->z_pageq_va, meta + pg_end);
3984 	}
3985 
3986 	zone->z_elems_free  += free_end - free_start;
3987 	zone->z_elems_avail += free_end - free_start;
3988 	zone->z_wired_cur   += zone->z_percpu ? 1 : pg_end - pg_start;
3989 	if (pg_va_new) {
3990 		zone->z_va_cur += zone->z_percpu ? 1 : pg_va_new;
3991 	}
3992 	if (zone->z_wired_hwm < zone->z_wired_cur) {
3993 		zone->z_wired_hwm = zone->z_wired_cur;
3994 	}
3995 
3996 #if CONFIG_ZLEAKS
3997 	if (__improbable(zleak_should_enable_for_zone(zone) &&
3998 	    startup_phase >= STARTUP_SUB_THREAD_CALL)) {
3999 		thread_call_enter(&zone_leaks_callout);
4000 	}
4001 #endif /* CONFIG_ZLEAKS */
4002 
4003 	zone_add_wired_pages(pg_end - pg_start);
4004 }
4005 
4006 static void
zcram(zone_t zone,vm_offset_t addr,uint32_t pages,uint16_t lock)4007 zcram(zone_t zone, vm_offset_t addr, uint32_t pages, uint16_t lock)
4008 {
4009 	uint32_t chunk_pages = zone->z_chunk_pages;
4010 
4011 	assert(pages % chunk_pages == 0);
4012 	for (; pages > 0; pages -= chunk_pages, addr += ptoa(chunk_pages)) {
4013 		zcram_and_lock(zone, addr, chunk_pages, 0, chunk_pages, lock);
4014 		zone_unlock(zone);
4015 	}
4016 }
4017 
4018 __startup_func
4019 void
zone_cram_early(zone_t zone,vm_offset_t newmem,vm_size_t size)4020 zone_cram_early(zone_t zone, vm_offset_t newmem, vm_size_t size)
4021 {
4022 	uint32_t pages = (uint32_t)atop(size);
4023 
4024 	assert(from_zone_map(newmem, size));
4025 	assert3u(size % ptoa(zone->z_chunk_pages), ==, 0);
4026 	assert3u(startup_phase, <, STARTUP_SUB_ZALLOC);
4027 
4028 	/*
4029 	 * The early pages we move at the pmap layer can't be "depopulated"
4030 	 * because there's no vm_page_t for them.
4031 	 *
4032 	 * "Lock" them so that they never hit z_pageq_empty.
4033 	 */
4034 	bzero((void *)newmem, size);
4035 	zcram(zone, newmem, pages, ZM_ALLOC_SIZE_LOCK);
4036 }
4037 
4038 /*!
4039  * @function zone_submap_alloc_sequestered_va
4040  *
4041  * @brief
4042  * Allocates VA without using vm_find_space().
4043  *
4044  * @discussion
4045  * Allocate VA quickly without using the slower vm_find_space() for cases
4046  * when the submaps are fully sequestered.
4047  *
4048  * The VM submap is used to implement the VM itself so it is always sequestered,
4049  * as it can't kmem_alloc which needs to always allocate vm entries.
4050  * However, it can use vm_map_enter() which tries to coalesce entries, which
4051  * always works, so the VM map only ever needs 2 entries (one for each end).
4052  *
4053  * The RO submap is similarly always sequestered if it exists (as a non
4054  * sequestered RO submap makes very little sense).
4055  *
4056  * The allocator is a very simple bump-allocator
4057  * that allocates from either end.
4058  */
4059 static kern_return_t
zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags,uint32_t pages,vm_offset_t * addrp)4060 zone_submap_alloc_sequestered_va(zone_security_flags_t zsflags, uint32_t pages,
4061     vm_offset_t *addrp)
4062 {
4063 	vm_size_t size = ptoa(pages);
4064 	vm_map_t map = zone_submap(zsflags);
4065 	vm_map_entry_t first, last;
4066 	vm_map_offset_t addr;
4067 
4068 	vm_map_lock(map);
4069 
4070 	first = vm_map_first_entry(map);
4071 	last = vm_map_last_entry(map);
4072 
4073 	if (first->vme_end + size > last->vme_start) {
4074 		vm_map_unlock(map);
4075 		return KERN_NO_SPACE;
4076 	}
4077 
4078 	if (zsflags.z_submap_from_end) {
4079 		last->vme_start -= size;
4080 		addr = last->vme_start;
4081 		VME_OFFSET_SET(last, addr);
4082 	} else {
4083 		addr = first->vme_end;
4084 		first->vme_end += size;
4085 	}
4086 	map->size += size;
4087 
4088 	vm_map_unlock(map);
4089 
4090 	*addrp = addr;
4091 	return KERN_SUCCESS;
4092 }
4093 
4094 void
zone_fill_initially(zone_t zone,vm_size_t nelems)4095 zone_fill_initially(zone_t zone, vm_size_t nelems)
4096 {
4097 	kma_flags_t kmaflags = KMA_NOFAIL | KMA_PERMANENT;
4098 	kern_return_t kr;
4099 	vm_offset_t addr;
4100 	uint32_t pages;
4101 	zone_security_flags_t zsflags = zone_security_config(zone);
4102 
4103 	assert(!zone->z_permanent && !zone->collectable && !zone->z_destructible);
4104 	assert(zone->z_elems_avail == 0);
4105 
4106 	kmaflags |= zone_kma_flags(zone, zsflags, Z_WAITOK);
4107 	pages = zone_alloc_pages_for_nelems(zone, nelems);
4108 	if (zone_submap_is_sequestered(zsflags)) {
4109 		kr = zone_submap_alloc_sequestered_va(zsflags, pages, &addr);
4110 		if (kr != KERN_SUCCESS) {
4111 			panic("zone_submap_alloc_sequestered_va() "
4112 			    "of %u pages failed", pages);
4113 		}
4114 		kernel_memory_populate(addr, ptoa(pages),
4115 		    kmaflags, VM_KERN_MEMORY_ZONE);
4116 	} else {
4117 		assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4118 		kmem_alloc(zone_submap(zsflags), &addr, ptoa(pages),
4119 		    kmaflags, VM_KERN_MEMORY_ZONE);
4120 	}
4121 
4122 	zone_meta_populate(addr, ptoa(pages));
4123 	zcram(zone, addr, pages, 0);
4124 }
4125 
4126 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4127 __attribute__((noinline))
4128 static void
zone_scramble_va_and_unlock(zone_t z,struct zone_page_metadata * meta,uint32_t runs,uint32_t pages,uint32_t chunk_pages,uint64_t guard_mask)4129 zone_scramble_va_and_unlock(
4130 	zone_t                      z,
4131 	struct zone_page_metadata  *meta,
4132 	uint32_t                    runs,
4133 	uint32_t                    pages,
4134 	uint32_t                    chunk_pages,
4135 	uint64_t                    guard_mask)
4136 {
4137 	struct zone_page_metadata *arr[ZONE_CHUNK_ALLOC_SIZE / 4096];
4138 
4139 	for (uint32_t run = 0, n = 0; run < runs; run++) {
4140 		arr[run] = meta + n;
4141 		n += chunk_pages + ((guard_mask >> run) & 1);
4142 	}
4143 
4144 	/*
4145 	 * Fisher–Yates shuffle, for an array with indices [0, n)
4146 	 *
4147 	 * for i from n−1 downto 1 do
4148 	 *     j ← random integer such that 0 ≤ j ≤ i
4149 	 *     exchange a[j] and a[i]
4150 	 *
4151 	 * The point here is that early allocations aren't at a fixed
4152 	 * distance from each other.
4153 	 */
4154 	for (uint32_t i = runs - 1; i > 0; i--) {
4155 		uint32_t j = zalloc_random_uniform32(0, i + 1);
4156 
4157 		meta   = arr[j];
4158 		arr[j] = arr[i];
4159 		arr[i] = meta;
4160 	}
4161 
4162 	zone_lock(z);
4163 
4164 	for (uint32_t i = 0; i < runs; i++) {
4165 		zone_meta_queue_push(z, &z->z_pageq_va, arr[i]);
4166 	}
4167 	z->z_va_cur += z->z_percpu ? runs : pages;
4168 }
4169 
4170 static inline uint32_t
dist_u32(uint32_t a,uint32_t b)4171 dist_u32(uint32_t a, uint32_t b)
4172 {
4173 	return a < b ? b - a : a - b;
4174 }
4175 
4176 static uint64_t
zalloc_random_clear_n_bits(uint64_t mask,uint32_t pop,uint32_t n)4177 zalloc_random_clear_n_bits(uint64_t mask, uint32_t pop, uint32_t n)
4178 {
4179 	for (; n-- > 0; pop--) {
4180 		uint32_t bit = zalloc_random_uniform32(0, pop);
4181 		uint64_t m = mask;
4182 
4183 		for (; bit; bit--) {
4184 			m &= m - 1;
4185 		}
4186 
4187 		mask ^= 1ull << __builtin_ctzll(m);
4188 	}
4189 
4190 	return mask;
4191 }
4192 
4193 /**
4194  * @function zalloc_random_bits
4195  *
4196  * @brief
4197  * Compute a random number with a specified number of bit set in a given width.
4198  *
4199  * @discussion
4200  * This function generates a "uniform" distribution of sets of bits set in
4201  * a given width, with typically less than width/4 calls to random.
4202  *
4203  * @param pop           the target number of bits set.
4204  * @param width         the number of bits in the random integer to generate.
4205  */
4206 static uint64_t
zalloc_random_bits(uint32_t pop,uint32_t width)4207 zalloc_random_bits(uint32_t pop, uint32_t width)
4208 {
4209 	uint64_t w_mask = (1ull << width) - 1;
4210 	uint64_t mask;
4211 	uint32_t cur;
4212 
4213 	if (3 * width / 4 <= pop) {
4214 		mask = w_mask;
4215 		cur  = width;
4216 	} else if (pop <= width / 4) {
4217 		mask = 0;
4218 		cur  = 0;
4219 	} else {
4220 		/*
4221 		 * Chosing a random number this way will overwhelmingly
4222 		 * contain `width` bits +/- a few.
4223 		 */
4224 		mask = zalloc_random_mask64(width);
4225 		cur  = __builtin_popcountll(mask);
4226 
4227 		if (dist_u32(cur, pop) > dist_u32(width - cur, pop)) {
4228 			/*
4229 			 * If the opposite mask has a closer popcount,
4230 			 * then start with that one as the seed.
4231 			 */
4232 			cur = width - cur;
4233 			mask ^= w_mask;
4234 		}
4235 	}
4236 
4237 	if (cur < pop) {
4238 		/*
4239 		 * Setting `pop - cur` bits is really clearing that many from
4240 		 * the opposite mask.
4241 		 */
4242 		mask ^= w_mask;
4243 		mask = zalloc_random_clear_n_bits(mask, width - cur, pop - cur);
4244 		mask ^= w_mask;
4245 	} else if (pop < cur) {
4246 		mask = zalloc_random_clear_n_bits(mask, cur, cur - pop);
4247 	}
4248 
4249 	return mask;
4250 }
4251 #endif
4252 
4253 static void
zone_allocate_va_locked(zone_t z,zalloc_flags_t flags)4254 zone_allocate_va_locked(zone_t z, zalloc_flags_t flags)
4255 {
4256 	zone_security_flags_t zsflags = zone_security_config(z);
4257 	struct zone_page_metadata *meta;
4258 	kma_flags_t kmaflags = zone_kma_flags(z, zsflags, flags) | KMA_VAONLY;
4259 	uint32_t chunk_pages = z->z_chunk_pages;
4260 	uint32_t runs, pages, guards, rnum;
4261 	uint64_t guard_mask = 0;
4262 	bool     lead_guard = false;
4263 	kern_return_t kr;
4264 	vm_offset_t addr;
4265 
4266 	zone_unlock(z);
4267 
4268 	/*
4269 	 * A lot of OOB exploitation techniques rely on precise placement
4270 	 * and interleaving of zone pages. The layout that is sought
4271 	 * by attackers will be C/P/T types, where:
4272 	 * - (C)ompromised is the type for which attackers have a bug,
4273 	 * - (P)adding is used to pad memory,
4274 	 * - (T)arget is the type that the attacker will attempt to corrupt
4275 	 *   by exploiting (C).
4276 	 *
4277 	 * Note that in some cases C==T and P isn't needed.
4278 	 *
4279 	 * In order to make those placement games much harder,
4280 	 * we grow zones by random runs of memory, up to 256k.
4281 	 * This makes predicting the precise layout of the heap
4282 	 * quite more complicated.
4283 	 *
4284 	 * Note: this function makes a very heavy use of random,
4285 	 *       however, it is mostly limited to sequestered zones,
4286 	 *       and eventually the layout will be fixed,
4287 	 *       and the usage of random vastly reduced.
4288 	 *
4289 	 *       For non sequestered zones, there's a single call
4290 	 *       to random in order to decide whether we want
4291 	 *       a guard page or not.
4292 	 */
4293 	pages  = chunk_pages;
4294 	guards = 0;
4295 	runs   = 1;
4296 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4297 	if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4298 		pages = atop(ZONE_CHUNK_ALLOC_SIZE);
4299 		runs  = (pages + chunk_pages - 1) / chunk_pages;
4300 		runs  = zalloc_random_uniform32(1, runs + 1);
4301 		pages = runs * chunk_pages;
4302 	}
4303 	static_assert(ZONE_CHUNK_ALLOC_SIZE / 4096 <= 64,
4304 	    "make sure that `runs` will never be larger than 64");
4305 #endif /* !ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4306 
4307 	/*
4308 	 * Zones that are suceptible to OOB (kalloc, ZC_PGZ_USE_GUARDS),
4309 	 * guards might be added after each chunk.
4310 	 *
4311 	 * Those guard pages are marked with the ZM_PGZ_GUARD
4312 	 * magical chunk len, and their zm_oob_offs field
4313 	 * is used to remember optional shift applied
4314 	 * to returned elements, in order to right-align-them
4315 	 * as much as possible.
4316 	 *
4317 	 * In an adversarial context, while guard pages
4318 	 * are extremely effective against linear overflow,
4319 	 * using a predictable density of guard pages feels like
4320 	 * a missed opportunity. Which is why we chose to insert
4321 	 * one guard page for about 32k of memory, and place it
4322 	 * randomly.
4323 	 */
4324 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4325 	if (z->z_percpu) {
4326 		/*
4327 		 * For per-cpu runs, have a 75% chance to have a guard.
4328 		 */
4329 		rnum = zalloc_random_uniform32(0, 4 * 128);
4330 		guards = rnum >= 128;
4331 	} else if (!zsflags.z_pgz_use_guards && !z->z_pgz_use_guards) {
4332 		vm_offset_t rest;
4333 
4334 		/*
4335 		 * For types that are less susceptible to have OOBs,
4336 		 * have a density of 1 guard every 64k, with a uniform
4337 		 * distribution.
4338 		 */
4339 		rnum   = zalloc_random_uniform32(0, ZONE_GUARD_SPARSE);
4340 		guards = (uint32_t)ptoa(pages) / ZONE_GUARD_SPARSE;
4341 		rest   = (uint32_t)ptoa(pages) % ZONE_GUARD_SPARSE;
4342 		guards += rnum < rest;
4343 	} else if (ptoa(chunk_pages) >= ZONE_GUARD_DENSE) {
4344 		/*
4345 		 * For chunks >= 32k, have a 75% chance of guard pages
4346 		 * between chunks.
4347 		 */
4348 		rnum = zalloc_random_uniform32(65, 129);
4349 		guards = runs * rnum / 128;
4350 	} else {
4351 		vm_offset_t rest;
4352 
4353 		/*
4354 		 * Otherwise, aim at 1 guard every 32k,
4355 		 * with a uniform distribution.
4356 		 */
4357 		rnum   = zalloc_random_uniform32(0, ZONE_GUARD_DENSE);
4358 		guards = (uint32_t)ptoa(pages) / ZONE_GUARD_DENSE;
4359 		rest   = (uint32_t)ptoa(pages) % ZONE_GUARD_DENSE;
4360 		guards += rnum < rest;
4361 	}
4362 	assert3u(guards, <=, runs);
4363 
4364 	guard_mask = 0;
4365 
4366 	if (!z->z_percpu && zone_submap_is_sequestered(zsflags)) {
4367 		uint32_t g = 0;
4368 
4369 		/*
4370 		 * Several exploitation strategies rely on a C/T (compromised
4371 		 * then target types) ordering of pages with a sub-page reach
4372 		 * from C into T.
4373 		 *
4374 		 * We want to reliably thwart such exploitations
4375 		 * and hence force a guard page between alternating
4376 		 * memory types.
4377 		 */
4378 		guard_mask |= 1ull << (runs - 1);
4379 		g++;
4380 
4381 		/*
4382 		 * While we randomize the chunks lengths, an attacker with
4383 		 * precise timing control can guess when overflows happen,
4384 		 * and "measure" the runs, which gives them an indication
4385 		 * of where the next run start offset is.
4386 		 *
4387 		 * In order to make this knowledge unusable, add a guard page
4388 		 * _before_ the new run with a 25% probability, regardless
4389 		 * of whether we had enough guard pages.
4390 		 */
4391 		if ((rnum & 3) == 0) {
4392 			lead_guard = true;
4393 			g++;
4394 		}
4395 		if (guards > g) {
4396 			guard_mask |= zalloc_random_bits(guards - g, runs - 1);
4397 		} else {
4398 			guards = g;
4399 		}
4400 	} else {
4401 		assert3u(runs, ==, 1);
4402 		assert3u(guards, <=, 1);
4403 		guard_mask = guards << (runs - 1);
4404 	}
4405 #else
4406 	(void)rnum;
4407 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4408 
4409 	if (zone_submap_is_sequestered(zsflags)) {
4410 		kr = zone_submap_alloc_sequestered_va(zsflags,
4411 		    pages + guards, &addr);
4412 	} else {
4413 		assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
4414 		kr = kmem_alloc(zone_submap(zsflags), &addr,
4415 		    ptoa(pages + guards), kmaflags, VM_KERN_MEMORY_ZONE);
4416 	}
4417 
4418 	if (kr != KERN_SUCCESS) {
4419 		uint64_t zone_size = 0;
4420 		zone_t zone_largest = zone_find_largest(&zone_size);
4421 		panic("zalloc[%d]: zone map exhausted while allocating from zone [%s%s], "
4422 		    "likely due to memory leak in zone [%s%s] "
4423 		    "(%u%c, %d elements allocated)",
4424 		    kr, zone_heap_name(z), zone_name(z),
4425 		    zone_heap_name(zone_largest), zone_name(zone_largest),
4426 		    mach_vm_size_pretty(zone_size),
4427 		    mach_vm_size_unit(zone_size),
4428 		    zone_count_allocated(zone_largest));
4429 	}
4430 
4431 	meta = zone_meta_from_addr(addr);
4432 	zone_meta_populate(addr, ptoa(pages + guards));
4433 
4434 	/*
4435 	 * Handle the leading guard page if any
4436 	 */
4437 	if (lead_guard) {
4438 		meta[0].zm_index = zone_index(z);
4439 		meta[0].zm_chunk_len = ZM_PGZ_GUARD;
4440 		meta[0].zm_guarded = true;
4441 		meta++;
4442 	}
4443 
4444 	for (uint32_t run = 0, n = 0; run < runs; run++) {
4445 		bool guarded = (guard_mask >> run) & 1;
4446 
4447 		for (uint32_t i = 0; i < chunk_pages; i++, n++) {
4448 			meta[n].zm_index = zone_index(z);
4449 			meta[n].zm_guarded = guarded;
4450 		}
4451 		if (guarded) {
4452 			meta[n].zm_index = zone_index(z);
4453 			meta[n].zm_chunk_len = ZM_PGZ_GUARD;
4454 			n++;
4455 		}
4456 	}
4457 	if (guards) {
4458 		os_atomic_add(&zone_guard_pages, guards, relaxed);
4459 	}
4460 
4461 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
4462 	if (__improbable(zone_caching_disabled < 0)) {
4463 		return zone_scramble_va_and_unlock(z, meta, runs, pages,
4464 		           chunk_pages, guard_mask);
4465 	}
4466 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
4467 
4468 	zone_lock(z);
4469 
4470 	for (uint32_t run = 0, n = 0; run < runs; run++) {
4471 		zone_meta_queue_push(z, &z->z_pageq_va, meta + n);
4472 		n += chunk_pages + ((guard_mask >> run) & 1);
4473 	}
4474 	z->z_va_cur += z->z_percpu ? runs : pages;
4475 }
4476 
4477 static bool
zone_expand_pred_nope(__unused zone_t z)4478 zone_expand_pred_nope(__unused zone_t z)
4479 {
4480 	return false;
4481 }
4482 
4483 static inline void
ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)4484 ZONE_TRACE_VM_KERN_REQUEST_START(vm_size_t size)
4485 {
4486 #if DEBUG || DEVELOPMENT
4487 	VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_START,
4488 	    size, 0, 0, 0);
4489 #else
4490 	(void)size;
4491 #endif
4492 }
4493 
4494 static inline void
ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)4495 ZONE_TRACE_VM_KERN_REQUEST_END(uint32_t pages)
4496 {
4497 #if DEBUG || DEVELOPMENT
4498 	task_t task = current_task_early();
4499 	if (pages && task) {
4500 		ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, pages);
4501 	}
4502 	VM_DEBUG_CONSTANT_EVENT(vm_kern_request, VM_KERN_REQUEST, DBG_FUNC_END,
4503 	    pages, 0, 0, 0);
4504 #else
4505 	(void)pages;
4506 #endif
4507 }
4508 
4509 __attribute__((noinline))
4510 static void
__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z,uint32_t pgs)4511 __ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(zone_t z, uint32_t pgs)
4512 {
4513 	uint64_t wait_start = 0;
4514 	long mapped;
4515 
4516 	thread_wakeup(VM_PAGEOUT_GC_EVENT);
4517 
4518 	if (zone_supports_vm(z) || (current_thread()->options & TH_OPT_VMPRIV)) {
4519 		return;
4520 	}
4521 
4522 	mapped = os_atomic_load(&zone_pages_wired, relaxed);
4523 
4524 	/*
4525 	 * If the zone map is really exhausted, wait on the GC thread,
4526 	 * donating our priority (which is important because the GC
4527 	 * thread is at a rather low priority).
4528 	 */
4529 	for (uint32_t n = 1; mapped >= zone_pages_wired_max - pgs; n++) {
4530 		uint32_t wait_ms = n * (n + 1) / 2;
4531 		uint64_t interval;
4532 
4533 		if (n == 1) {
4534 			wait_start = mach_absolute_time();
4535 		} else {
4536 			thread_wakeup(VM_PAGEOUT_GC_EVENT);
4537 		}
4538 		if (zone_exhausted_timeout > 0 &&
4539 		    wait_ms > zone_exhausted_timeout) {
4540 			panic("zone map exhaustion: waited for %dms "
4541 			    "(pages: %ld, max: %ld, wanted: %d)",
4542 			    wait_ms, mapped, zone_pages_wired_max, pgs);
4543 		}
4544 
4545 		clock_interval_to_absolutetime_interval(wait_ms, NSEC_PER_MSEC,
4546 		    &interval);
4547 
4548 		lck_spin_lock(&zone_exhausted_lock);
4549 		lck_spin_sleep_with_inheritor(&zone_exhausted_lock,
4550 		    LCK_SLEEP_UNLOCK, &zone_pages_wired,
4551 		    vm_pageout_gc_thread, THREAD_UNINT, wait_start + interval);
4552 
4553 		mapped = os_atomic_load(&zone_pages_wired, relaxed);
4554 	}
4555 }
4556 
4557 static bool
zone_expand_wait_for_pages(bool waited)4558 zone_expand_wait_for_pages(bool waited)
4559 {
4560 	if (waited) {
4561 		return false;
4562 	}
4563 #if DEBUG || DEVELOPMENT
4564 	if (zalloc_simulate_vm_pressure) {
4565 		return false;
4566 	}
4567 #endif /* DEBUG || DEVELOPMENT */
4568 	return !vm_pool_low();
4569 }
4570 
4571 static inline void
zone_expand_async_schedule_if_allowed(zone_t zone)4572 zone_expand_async_schedule_if_allowed(zone_t zone)
4573 {
4574 	if (zone->z_async_refilling || zone->no_callout) {
4575 		return;
4576 	}
4577 
4578 	if (zone->exhaustible && zone->z_wired_cur >= zone->z_wired_max) {
4579 		return;
4580 	}
4581 
4582 	if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
4583 		return;
4584 	}
4585 
4586 	if (!vm_pool_low() || zone_supports_vm(zone)) {
4587 		zone->z_async_refilling = true;
4588 		thread_call_enter(&zone_expand_callout);
4589 	}
4590 }
4591 
4592 static void
zone_expand_locked(zone_t z,zalloc_flags_t flags,bool (* pred)(zone_t))4593 zone_expand_locked(zone_t z, zalloc_flags_t flags, bool (*pred)(zone_t))
4594 {
4595 	zone_security_flags_t zsflags = zone_security_config(z);
4596 	struct zone_expand ze = {
4597 		.ze_thread  = current_thread(),
4598 	};
4599 
4600 	if (!(ze.ze_thread->options & TH_OPT_VMPRIV) && zone_supports_vm(z)) {
4601 		ze.ze_thread->options |= TH_OPT_VMPRIV;
4602 		ze.ze_clear_priv = true;
4603 	}
4604 
4605 	if (ze.ze_thread->options & TH_OPT_VMPRIV) {
4606 		/*
4607 		 * When the thread is VM privileged,
4608 		 * vm_page_grab() will call VM_PAGE_WAIT()
4609 		 * without our knowledge, so we must assume
4610 		 * it's being called unfortunately.
4611 		 *
4612 		 * In practice it's not a big deal because
4613 		 * Z_NOPAGEWAIT is not really used on zones
4614 		 * that VM privileged threads are going to expand.
4615 		 */
4616 		ze.ze_pg_wait = true;
4617 		ze.ze_vm_priv = true;
4618 	}
4619 
4620 	for (;;) {
4621 		if (!pred) {
4622 			/* NULL pred means "try just once" */
4623 			pred = zone_expand_pred_nope;
4624 		} else if (!pred(z)) {
4625 			goto out;
4626 		}
4627 
4628 		if (z->z_expander == NULL) {
4629 			z->z_expander = &ze;
4630 			break;
4631 		}
4632 
4633 		if (ze.ze_vm_priv && !z->z_expander->ze_vm_priv) {
4634 			change_sleep_inheritor(&z->z_expander, ze.ze_thread);
4635 			ze.ze_next = z->z_expander;
4636 			z->z_expander = &ze;
4637 			break;
4638 		}
4639 
4640 		if ((flags & Z_NOPAGEWAIT) && z->z_expander->ze_pg_wait) {
4641 			goto out;
4642 		}
4643 
4644 		z->z_expanding_wait = true;
4645 		hw_lck_ticket_sleep_with_inheritor(&z->z_lock, &zone_locks_grp,
4646 		    LCK_SLEEP_DEFAULT, &z->z_expander, z->z_expander->ze_thread,
4647 		    TH_UNINT, TIMEOUT_WAIT_FOREVER);
4648 	}
4649 
4650 	do {
4651 		struct zone_page_metadata *meta = NULL;
4652 		uint32_t new_va = 0, cur_pages = 0, min_pages = 0, pages = 0;
4653 		vm_page_t page_list = NULL;
4654 		vm_offset_t addr = 0;
4655 		int waited = 0;
4656 
4657 		/*
4658 		 * While we hold the zone lock, look if there's VA we can:
4659 		 * - complete from partial pages,
4660 		 * - reuse from the sequester list.
4661 		 *
4662 		 * When the page is being populated we pretend we allocated
4663 		 * an extra element so that zone_gc() can't attempt to free
4664 		 * the chunk (as it could become empty while we wait for pages).
4665 		 */
4666 		if (zone_pva_is_null(z->z_pageq_va)) {
4667 			zone_allocate_va_locked(z, flags);
4668 		}
4669 
4670 		meta = zone_meta_queue_pop(z, &z->z_pageq_va);
4671 		addr = zone_meta_to_addr(meta);
4672 		if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
4673 			cur_pages = meta->zm_page_index;
4674 			meta -= cur_pages;
4675 			addr -= ptoa(cur_pages);
4676 			zone_meta_lock_in_partial(z, meta, cur_pages);
4677 		}
4678 		zone_unlock(z);
4679 
4680 		/*
4681 		 * And now allocate pages to populate our VA.
4682 		 */
4683 		min_pages = z->z_chunk_pages;
4684 #if !KASAN_CLASSIC
4685 		if (!z->z_percpu) {
4686 			min_pages = (uint32_t)atop(round_page(zone_elem_outer_offs(z) +
4687 			    zone_elem_outer_size(z)));
4688 		}
4689 #endif /* !KASAN_CLASSIC */
4690 
4691 		/*
4692 		 * Trigger jetsams via VM_PAGEOUT_GC_EVENT
4693 		 * if we're running out of zone memory
4694 		 */
4695 		if (__improbable(zone_map_nearing_exhaustion())) {
4696 			__ZONE_MAP_EXHAUSTED_AND_WAITING_FOR_GC__(z, min_pages);
4697 		}
4698 
4699 		ZONE_TRACE_VM_KERN_REQUEST_START(ptoa(z->z_chunk_pages - cur_pages));
4700 
4701 		while (pages < z->z_chunk_pages - cur_pages) {
4702 			vm_page_t m = vm_page_grab();
4703 
4704 			if (m) {
4705 				pages++;
4706 				m->vmp_snext = page_list;
4707 				page_list = m;
4708 				vm_page_zero_fill(m);
4709 				continue;
4710 			}
4711 
4712 			if (pages >= min_pages &&
4713 			    !zone_expand_wait_for_pages(waited)) {
4714 				break;
4715 			}
4716 
4717 			if ((flags & Z_NOPAGEWAIT) == 0) {
4718 				/*
4719 				 * The first time we're about to wait for pages,
4720 				 * mention that to waiters and wake them all.
4721 				 *
4722 				 * Set `ze_pg_wait` in our zone_expand context
4723 				 * so that waiters who care do not wait again.
4724 				 */
4725 				if (!ze.ze_pg_wait) {
4726 					zone_lock(z);
4727 					if (z->z_expanding_wait) {
4728 						z->z_expanding_wait = false;
4729 						wakeup_all_with_inheritor(&z->z_expander,
4730 						    THREAD_AWAKENED);
4731 					}
4732 					ze.ze_pg_wait = true;
4733 					zone_unlock(z);
4734 				}
4735 
4736 				waited++;
4737 				VM_PAGE_WAIT();
4738 				continue;
4739 			}
4740 
4741 			/*
4742 			 * Undo everything and bail out:
4743 			 *
4744 			 * - free pages
4745 			 * - undo the fake allocation if any
4746 			 * - put the VA back on the VA page queue.
4747 			 */
4748 			vm_page_free_list(page_list, FALSE);
4749 			ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4750 
4751 			zone_lock(z);
4752 
4753 			zone_expand_async_schedule_if_allowed(z);
4754 
4755 			if (cur_pages) {
4756 				zone_meta_unlock_from_partial(z, meta, cur_pages);
4757 			}
4758 			if (meta) {
4759 				zone_meta_queue_push(z, &z->z_pageq_va,
4760 				    meta + cur_pages);
4761 			}
4762 			goto page_shortage;
4763 		}
4764 
4765 		vm_object_lock(kernel_object);
4766 		kernel_memory_populate_object_and_unlock(kernel_object,
4767 		    addr + ptoa(cur_pages), addr + ptoa(cur_pages), ptoa(pages), page_list,
4768 		    zone_kma_flags(z, zsflags, flags), VM_KERN_MEMORY_ZONE,
4769 		    (zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY)
4770 		    ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE);
4771 
4772 		ZONE_TRACE_VM_KERN_REQUEST_END(pages);
4773 
4774 		zcram_and_lock(z, addr, new_va, cur_pages, cur_pages + pages, 0);
4775 
4776 		if (z->z_wired_cur == z->z_wired_max) {
4777 			zone_unlock(z);
4778 			EVENT_INVOKE(ZONE_EXHAUSTED, zone_index(z), z);
4779 			zone_lock(z);
4780 		}
4781 	} while (pred(z));
4782 
4783 page_shortage:
4784 	if (z->z_expander == &ze) {
4785 		z->z_expander = ze.ze_next;
4786 	} else {
4787 		assert(z->z_expander->ze_next == &ze);
4788 		z->z_expander->ze_next = NULL;
4789 	}
4790 	if (z->z_expanding_wait) {
4791 		z->z_expanding_wait = false;
4792 		wakeup_all_with_inheritor(&z->z_expander, THREAD_AWAKENED);
4793 	}
4794 out:
4795 	if (ze.ze_clear_priv) {
4796 		ze.ze_thread->options &= ~TH_OPT_VMPRIV;
4797 	}
4798 }
4799 
4800 static bool
zalloc_needs_refill(zone_t zone)4801 zalloc_needs_refill(zone_t zone)
4802 {
4803 	if (zone->z_elems_free > zone->z_elems_rsv) {
4804 		return false;
4805 	}
4806 	if (zone->z_wired_cur < zone->z_wired_max) {
4807 		return true;
4808 	}
4809 	return !zone->exhaustible;
4810 }
4811 
4812 static void
zone_expand_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)4813 zone_expand_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
4814 {
4815 	zone_foreach(z) {
4816 		if (z->no_callout) {
4817 			/* z_async_refilling will never be set */
4818 			continue;
4819 		}
4820 
4821 		if (!z->z_async_refilling) {
4822 			/*
4823 			 * avoid locking all zones, because the one(s)
4824 			 * we're looking for have been set _before_
4825 			 * thread_call_enter() was called, if we fail
4826 			 * to observe the bit, it means the thread-call
4827 			 * has been "dinged" again and we'll notice it then.
4828 			 */
4829 			continue;
4830 		}
4831 
4832 		zone_lock(z);
4833 		if (z->z_self && z->z_async_refilling) {
4834 			zone_expand_locked(z, Z_WAITOK, zalloc_needs_refill);
4835 			/*
4836 			 * clearing _after_ we grow is important,
4837 			 * so that we avoid waking up the thread call
4838 			 * while we grow and cause to run a second time.
4839 			 */
4840 			z->z_async_refilling = false;
4841 		}
4842 		zone_unlock(z);
4843 	}
4844 }
4845 
4846 #endif /* !ZALLOC_TEST */
4847 #pragma mark zone jetsam integration
4848 #if !ZALLOC_TEST
4849 
4850 /*
4851  * We're being very conservative here and picking a value of 95%. We might need to lower this if
4852  * we find that we're not catching the problem and are still hitting zone map exhaustion panics.
4853  */
4854 #define ZONE_MAP_JETSAM_LIMIT_DEFAULT 95
4855 
4856 /*
4857  * Threshold above which largest zones should be included in the panic log
4858  */
4859 #define ZONE_MAP_EXHAUSTION_PRINT_PANIC 80
4860 
4861 /*
4862  * Trigger zone-map-exhaustion jetsams if the zone map is X% full,
4863  * where X=zone_map_jetsam_limit.
4864  *
4865  * Can be set via boot-arg "zone_map_jetsam_limit". Set to 95% by default.
4866  */
4867 TUNABLE_WRITEABLE(unsigned int, zone_map_jetsam_limit, "zone_map_jetsam_limit",
4868     ZONE_MAP_JETSAM_LIMIT_DEFAULT);
4869 
4870 kern_return_t
zone_map_jetsam_set_limit(uint32_t value)4871 zone_map_jetsam_set_limit(uint32_t value)
4872 {
4873 	if (value <= 0 || value > 100) {
4874 		return KERN_INVALID_VALUE;
4875 	}
4876 
4877 	zone_map_jetsam_limit = value;
4878 	os_atomic_store(&zone_pages_jetsam_threshold,
4879 	    zone_pages_wired_max * value / 100, relaxed);
4880 	return KERN_SUCCESS;
4881 }
4882 
4883 void
get_zone_map_size(uint64_t * current_size,uint64_t * capacity)4884 get_zone_map_size(uint64_t *current_size, uint64_t *capacity)
4885 {
4886 	vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
4887 	*current_size = ptoa_64(phys_pages);
4888 	*capacity = ptoa_64(zone_pages_wired_max);
4889 }
4890 
4891 void
get_largest_zone_info(char * zone_name,size_t zone_name_len,uint64_t * zone_size)4892 get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size)
4893 {
4894 	zone_t largest_zone = zone_find_largest(zone_size);
4895 
4896 	/*
4897 	 * Append kalloc heap name to zone name (if zone is used by kalloc)
4898 	 */
4899 	snprintf(zone_name, zone_name_len, "%s%s",
4900 	    zone_heap_name(largest_zone), largest_zone->z_name);
4901 }
4902 
4903 static bool
zone_map_nearing_threshold(unsigned int threshold)4904 zone_map_nearing_threshold(unsigned int threshold)
4905 {
4906 	uint64_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
4907 	return phys_pages * 100 > zone_pages_wired_max * threshold;
4908 }
4909 
4910 bool
zone_map_nearing_exhaustion(void)4911 zone_map_nearing_exhaustion(void)
4912 {
4913 	vm_size_t pages = os_atomic_load(&zone_pages_wired, relaxed);
4914 
4915 	return pages >= os_atomic_load(&zone_pages_jetsam_threshold, relaxed);
4916 }
4917 
4918 
4919 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
4920 
4921 /*
4922  * Tries to kill a single process if it can attribute one to the largest zone. If not, wakes up the memorystatus thread
4923  * to walk through the jetsam priority bands and kill processes.
4924  */
4925 static zone_t
kill_process_in_largest_zone(void)4926 kill_process_in_largest_zone(void)
4927 {
4928 	pid_t pid = -1;
4929 	uint64_t zone_size = 0;
4930 	zone_t largest_zone = zone_find_largest(&zone_size);
4931 
4932 	printf("zone_map_exhaustion: Zone mapped %lld of %lld, used %lld, capacity %lld [jetsam limit %d%%]\n",
4933 	    ptoa_64(os_atomic_load(&zone_pages_wired, relaxed)),
4934 	    ptoa_64(zone_pages_wired_max),
4935 	    (uint64_t)zone_submaps_approx_size(),
4936 	    (uint64_t)mach_vm_range_size(&zone_info.zi_map_range),
4937 	    zone_map_jetsam_limit);
4938 	printf("zone_map_exhaustion: Largest zone %s%s, size %lu\n", zone_heap_name(largest_zone),
4939 	    largest_zone->z_name, (uintptr_t)zone_size);
4940 
4941 	/*
4942 	 * We want to make sure we don't call this function from userspace.
4943 	 * Or we could end up trying to synchronously kill the process
4944 	 * whose context we're in, causing the system to hang.
4945 	 */
4946 	assert(current_task() == kernel_task);
4947 
4948 	/*
4949 	 * If vm_object_zone is the largest, check to see if the number of
4950 	 * elements in vm_map_entry_zone is comparable.
4951 	 *
4952 	 * If so, consider vm_map_entry_zone as the largest. This lets us target
4953 	 * a specific process to jetsam to quickly recover from the zone map
4954 	 * bloat.
4955 	 */
4956 	if (largest_zone == vm_object_zone) {
4957 		unsigned int vm_object_zone_count = zone_count_allocated(vm_object_zone);
4958 		unsigned int vm_map_entry_zone_count = zone_count_allocated(vm_map_entry_zone);
4959 		/* Is the VM map entries zone count >= 98% of the VM objects zone count? */
4960 		if (vm_map_entry_zone_count >= ((vm_object_zone_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
4961 			largest_zone = vm_map_entry_zone;
4962 			printf("zone_map_exhaustion: Picking VM map entries as the zone to target, size %lu\n",
4963 			    (uintptr_t)zone_size_wired(largest_zone));
4964 		}
4965 	}
4966 
4967 	/* TODO: Extend this to check for the largest process in other zones as well. */
4968 	if (largest_zone == vm_map_entry_zone) {
4969 		pid = find_largest_process_vm_map_entries();
4970 	} else {
4971 		printf("zone_map_exhaustion: Nothing to do for the largest zone [%s%s]. "
4972 		    "Waking up memorystatus thread.\n", zone_heap_name(largest_zone),
4973 		    largest_zone->z_name);
4974 	}
4975 	if (!memorystatus_kill_on_zone_map_exhaustion(pid)) {
4976 		printf("zone_map_exhaustion: Call to memorystatus failed, victim pid: %d\n", pid);
4977 	}
4978 
4979 	return largest_zone;
4980 }
4981 
4982 #endif /* !ZALLOC_TEST */
4983 #pragma mark probabilistic gzalloc
4984 #if !ZALLOC_TEST
4985 #if CONFIG_PROB_GZALLOC
4986 
4987 extern uint32_t random(void);
4988 struct pgz_backtrace {
4989 	uint32_t  pgz_depth;
4990 	int32_t   pgz_bt[MAX_ZTRACE_DEPTH];
4991 };
4992 
4993 static int32_t  PERCPU_DATA(pgz_sample_counter);
4994 static SECURITY_READ_ONLY_LATE(struct pgz_backtrace *) pgz_backtraces;
4995 static uint32_t pgz_uses;       /* number of zones using PGZ */
4996 static int32_t  pgz_slot_avail;
4997 #if OS_ATOMIC_HAS_LLSC
4998 struct zone_page_metadata *pgz_slot_head;
4999 #else
5000 static struct pgz_slot_head {
5001 	uint32_t psh_count;
5002 	uint32_t psh_slot;
5003 } pgz_slot_head;
5004 #endif
5005 struct zone_page_metadata *pgz_slot_tail;
5006 static SECURITY_READ_ONLY_LATE(vm_map_t) pgz_submap;
5007 
5008 static struct zone_page_metadata *
pgz_meta(uint32_t index)5009 pgz_meta(uint32_t index)
5010 {
5011 	return &zone_info.zi_pgz_meta[2 * index + 1];
5012 }
5013 
5014 static struct pgz_backtrace *
pgz_bt(uint32_t slot,bool free)5015 pgz_bt(uint32_t slot, bool free)
5016 {
5017 	return &pgz_backtraces[2 * slot + free];
5018 }
5019 
5020 static void
pgz_backtrace(struct pgz_backtrace * bt,void * fp)5021 pgz_backtrace(struct pgz_backtrace *bt, void *fp)
5022 {
5023 	struct backtrace_control ctl = {
5024 		.btc_frame_addr = (uintptr_t)fp,
5025 	};
5026 
5027 	bt->pgz_depth = (uint32_t)backtrace_packed(BTP_KERN_OFFSET_32,
5028 	    (uint8_t *)bt->pgz_bt, sizeof(bt->pgz_bt), &ctl, NULL) / 4;
5029 }
5030 
5031 static uint32_t
pgz_slot(vm_offset_t addr)5032 pgz_slot(vm_offset_t addr)
5033 {
5034 	return (uint32_t)((addr - zone_info.zi_pgz_range.min_address) >> (PAGE_SHIFT + 1));
5035 }
5036 
5037 static vm_offset_t
pgz_addr(uint32_t slot)5038 pgz_addr(uint32_t slot)
5039 {
5040 	return zone_info.zi_pgz_range.min_address + ptoa(2 * slot + 1);
5041 }
5042 
5043 static bool
pgz_sample(vm_offset_t addr,vm_size_t esize)5044 pgz_sample(vm_offset_t addr, vm_size_t esize)
5045 {
5046 	int32_t *counterp, cnt;
5047 
5048 	if (zone_addr_size_crosses_page(addr, esize)) {
5049 		return false;
5050 	}
5051 
5052 	/*
5053 	 * Note: accessing pgz_sample_counter is racy but this is
5054 	 *       kind of acceptable given that this is not
5055 	 *       a security load bearing feature.
5056 	 */
5057 
5058 	counterp = PERCPU_GET(pgz_sample_counter);
5059 	cnt = *counterp;
5060 	if (__probable(cnt > 0)) {
5061 		*counterp = cnt - 1;
5062 		return false;
5063 	}
5064 
5065 	if (pgz_slot_avail <= 0) {
5066 		return false;
5067 	}
5068 
5069 	/*
5070 	 * zalloc_random_uniform() might block, so when preemption is disabled,
5071 	 * set the counter to `-1` which will cause the next allocation
5072 	 * that can block to generate a new random value.
5073 	 *
5074 	 * No allocation on this CPU will sample until then.
5075 	 */
5076 	if (get_preemption_level()) {
5077 		*counterp = -1;
5078 	} else {
5079 		*counterp = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5080 	}
5081 
5082 	return cnt == 0;
5083 }
5084 
5085 static inline bool
pgz_slot_alloc(uint32_t * slot)5086 pgz_slot_alloc(uint32_t *slot)
5087 {
5088 	struct zone_page_metadata *m;
5089 	uint32_t tries = 100;
5090 
5091 	disable_preemption();
5092 
5093 #if OS_ATOMIC_USE_LLSC
5094 	int32_t ov, nv;
5095 	os_atomic_rmw_loop(&pgz_slot_avail, ov, nv, relaxed, {
5096 		if (__improbable(ov <= 0)) {
5097 		        os_atomic_rmw_loop_give_up({
5098 				enable_preemption();
5099 				return false;
5100 			});
5101 		}
5102 		nv = ov - 1;
5103 	});
5104 #else
5105 	if (__improbable(os_atomic_dec_orig(&pgz_slot_avail, relaxed) <= 0)) {
5106 		os_atomic_inc(&pgz_slot_avail, relaxed);
5107 		enable_preemption();
5108 		return false;
5109 	}
5110 #endif
5111 
5112 again:
5113 	if (__improbable(tries-- == 0)) {
5114 		/*
5115 		 * Too much contention,
5116 		 * extremely unlikely but do not stay stuck.
5117 		 */
5118 		os_atomic_inc(&pgz_slot_avail, relaxed);
5119 		enable_preemption();
5120 		return false;
5121 	}
5122 
5123 #if OS_ATOMIC_HAS_LLSC
5124 	do {
5125 		m = os_atomic_load_exclusive(&pgz_slot_head, dependency);
5126 		if (__improbable(m->zm_pgz_slot_next == NULL)) {
5127 			/*
5128 			 * Either we are waiting for an enqueuer (unlikely)
5129 			 * or we are competing with another core and
5130 			 * are looking at a popped element.
5131 			 */
5132 			os_atomic_clear_exclusive();
5133 			goto again;
5134 		}
5135 	} while (!os_atomic_store_exclusive(&pgz_slot_head,
5136 	    m->zm_pgz_slot_next, relaxed));
5137 #else
5138 	struct zone_page_metadata *base = zone_info.zi_pgz_meta;
5139 	struct pgz_slot_head ov, nv;
5140 	os_atomic_rmw_loop(&pgz_slot_head, ov, nv, dependency, {
5141 		m = &base[ov.psh_slot * 2];
5142 		if (__improbable(m->zm_pgz_slot_next == NULL)) {
5143 		        /*
5144 		         * Either we are waiting for an enqueuer (unlikely)
5145 		         * or we are competing with another core and
5146 		         * are looking at a popped element.
5147 		         */
5148 		        os_atomic_rmw_loop_give_up(goto again);
5149 		}
5150 		nv.psh_count = ov.psh_count + 1;
5151 		nv.psh_slot  = (uint32_t)((m->zm_pgz_slot_next - base) / 2);
5152 	});
5153 #endif
5154 
5155 	enable_preemption();
5156 
5157 	m->zm_pgz_slot_next = NULL;
5158 	*slot = (uint32_t)((m - zone_info.zi_pgz_meta) / 2);
5159 	return true;
5160 }
5161 
5162 static inline bool
pgz_slot_free(uint32_t slot)5163 pgz_slot_free(uint32_t slot)
5164 {
5165 	struct zone_page_metadata *m = &zone_info.zi_pgz_meta[2 * slot];
5166 	struct zone_page_metadata *t;
5167 
5168 	disable_preemption();
5169 	t = os_atomic_xchg(&pgz_slot_tail, m, relaxed);
5170 	os_atomic_store(&t->zm_pgz_slot_next, m, release);
5171 	os_atomic_inc(&pgz_slot_avail, relaxed);
5172 	enable_preemption();
5173 
5174 	return true;
5175 }
5176 
5177 /*!
5178  * @function pgz_protect()
5179  *
5180  * @brief
5181  * Try to protect an allocation with PGZ.
5182  *
5183  * @param zone          The zone the allocation was made against.
5184  * @param addr          An allocated element address to protect.
5185  * @param fp            The caller frame pointer (for the backtrace).
5186  * @returns             The new address for the element, or @c addr.
5187  */
5188 __attribute__((noinline))
5189 static vm_offset_t
pgz_protect(zone_t zone,vm_offset_t addr,void * fp)5190 pgz_protect(zone_t zone, vm_offset_t addr, void *fp)
5191 {
5192 	kern_return_t kr;
5193 	uint32_t slot;
5194 
5195 	if (!pgz_slot_alloc(&slot)) {
5196 		return addr;
5197 	}
5198 
5199 	/*
5200 	 * Try to double-map the page (may fail if Z_NOWAIT).
5201 	 * we will always find a PA because pgz_init() pre-expanded the pmap.
5202 	 */
5203 	vm_offset_t  new_addr = pgz_addr(slot);
5204 	pmap_paddr_t pa = kvtophys(trunc_page(addr));
5205 
5206 	kr = pmap_enter_options_addr(kernel_pmap, new_addr, pa,
5207 	    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
5208 	    get_preemption_level() ? PMAP_OPTIONS_NOWAIT : 0, NULL);
5209 
5210 	if (__improbable(kr != KERN_SUCCESS)) {
5211 		pgz_slot_free(slot);
5212 		return addr;
5213 	}
5214 
5215 	struct zone_page_metadata tmp = {
5216 		.zm_chunk_len = ZM_PGZ_ALLOCATED,
5217 		.zm_index     = zone_index(zone),
5218 	};
5219 	struct zone_page_metadata *meta = pgz_meta(slot);
5220 
5221 	os_atomic_store(&meta->zm_bits, tmp.zm_bits, relaxed);
5222 	os_atomic_store(&meta->zm_pgz_orig_addr, addr, relaxed);
5223 	pgz_backtrace(pgz_bt(slot, false), fp);
5224 
5225 	return new_addr + (addr & PAGE_MASK);
5226 }
5227 
5228 /*!
5229  * @function pgz_unprotect()
5230  *
5231  * @brief
5232  * Release a PGZ slot and returns the original address of a freed element.
5233  *
5234  * @param addr          A PGZ protected element address.
5235  * @param fp            The caller frame pointer (for the backtrace).
5236  * @returns             The non protected address for the element
5237  *                      that was passed to @c pgz_protect().
5238  */
5239 __attribute__((noinline))
5240 static vm_offset_t
pgz_unprotect(vm_offset_t addr,void * fp)5241 pgz_unprotect(vm_offset_t addr, void *fp)
5242 {
5243 	struct zone_page_metadata *meta;
5244 	struct zone_page_metadata tmp;
5245 	uint32_t slot;
5246 
5247 	slot = pgz_slot(addr);
5248 	meta = zone_meta_from_addr(addr);
5249 	tmp  = *meta;
5250 	if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5251 		goto double_free;
5252 	}
5253 
5254 	pmap_remove(kernel_pmap, trunc_page(addr), trunc_page(addr) + PAGE_SIZE);
5255 
5256 	pgz_backtrace(pgz_bt(slot, true), fp);
5257 
5258 	tmp.zm_chunk_len = ZM_PGZ_FREE;
5259 	tmp.zm_bits = os_atomic_xchg(&meta->zm_bits, tmp.zm_bits, relaxed);
5260 	if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5261 		goto double_free;
5262 	}
5263 
5264 	pgz_slot_free(slot);
5265 	return tmp.zm_pgz_orig_addr;
5266 
5267 double_free:
5268 	panic_fault_address = addr;
5269 	meta->zm_chunk_len = ZM_PGZ_DOUBLE_FREE;
5270 	panic("probabilistic gzalloc double free: %p", (void *)addr);
5271 }
5272 
5273 bool
pgz_owned(mach_vm_address_t addr)5274 pgz_owned(mach_vm_address_t addr)
5275 {
5276 #if CONFIG_KERNEL_TBI
5277 	addr = VM_KERNEL_TBI_FILL(addr);
5278 #endif /* CONFIG_KERNEL_TBI */
5279 
5280 	return mach_vm_range_contains(&zone_info.zi_pgz_range, addr);
5281 }
5282 
5283 
5284 __attribute__((always_inline))
5285 vm_offset_t
__pgz_decode(mach_vm_address_t addr,mach_vm_size_t size)5286 __pgz_decode(mach_vm_address_t addr, mach_vm_size_t size)
5287 {
5288 	struct zone_page_metadata *meta;
5289 
5290 	if (__probable(!pgz_owned(addr))) {
5291 		return (vm_offset_t)addr;
5292 	}
5293 
5294 	if (zone_addr_size_crosses_page(addr, size)) {
5295 		panic("invalid size for PGZ protected address %p:%p",
5296 		    (void *)addr, (void *)(addr + size));
5297 	}
5298 
5299 	meta = zone_meta_from_addr((vm_offset_t)addr);
5300 	if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5301 		panic_fault_address = (vm_offset_t)addr;
5302 		panic("probabilistic gzalloc use-after-free: %p", (void *)addr);
5303 	}
5304 
5305 	return trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5306 }
5307 
5308 __attribute__((always_inline))
5309 vm_offset_t
__pgz_decode_allow_invalid(vm_offset_t addr,zone_id_t zid)5310 __pgz_decode_allow_invalid(vm_offset_t addr, zone_id_t zid)
5311 {
5312 	struct zone_page_metadata *meta;
5313 	struct zone_page_metadata tmp;
5314 
5315 	if (__probable(!pgz_owned(addr))) {
5316 		return addr;
5317 	}
5318 
5319 	meta = zone_meta_from_addr(addr);
5320 	tmp.zm_bits = os_atomic_load(&meta->zm_bits, relaxed);
5321 
5322 	addr = trunc_page(meta->zm_pgz_orig_addr) + (addr & PAGE_MASK);
5323 
5324 	if (tmp.zm_chunk_len != ZM_PGZ_ALLOCATED) {
5325 		return 0;
5326 	}
5327 
5328 	if (zid != ZONE_ID_ANY && tmp.zm_index != zid) {
5329 		return 0;
5330 	}
5331 
5332 	return addr;
5333 }
5334 
5335 static void
pgz_zone_init(zone_t z)5336 pgz_zone_init(zone_t z)
5337 {
5338 	char zn[MAX_ZONE_NAME];
5339 	char zv[MAX_ZONE_NAME];
5340 	char key[30];
5341 
5342 	if (zone_elem_inner_size(z) > PAGE_SIZE) {
5343 		return;
5344 	}
5345 
5346 	if (pgz_all) {
5347 		os_atomic_inc(&pgz_uses, relaxed);
5348 		z->z_pgz_tracked = true;
5349 		return;
5350 	}
5351 
5352 	snprintf(zn, sizeof(zn), "%s%s", zone_heap_name(z), zone_name(z));
5353 
5354 	for (int i = 1;; i++) {
5355 		snprintf(key, sizeof(key), "pgz%d", i);
5356 		if (!PE_parse_boot_argn(key, zv, sizeof(zv))) {
5357 			break;
5358 		}
5359 		if (track_this_zone(zn, zv) || track_kalloc_zones(z, zv)) {
5360 			os_atomic_inc(&pgz_uses, relaxed);
5361 			z->z_pgz_tracked = true;
5362 			break;
5363 		}
5364 	}
5365 }
5366 
5367 __startup_func
5368 static vm_size_t
pgz_get_size(void)5369 pgz_get_size(void)
5370 {
5371 	if (pgz_slots == UINT32_MAX) {
5372 		/*
5373 		 * Scale with RAM size: ~200 slots a G
5374 		 */
5375 		pgz_slots = (uint32_t)(sane_size >> 22);
5376 	}
5377 
5378 	/*
5379 	 * Make sure that the slot allocation scheme works.
5380 	 * see pgz_slot_alloc() / pgz_slot_free();
5381 	 */
5382 	if (pgz_slots < zpercpu_count() * 4) {
5383 		pgz_slots = zpercpu_count() * 4;
5384 	}
5385 	if (pgz_slots >= UINT16_MAX) {
5386 		pgz_slots = UINT16_MAX - 1;
5387 	}
5388 
5389 	/*
5390 	 * Quarantine is 33% of slots by default, no more than 90%.
5391 	 */
5392 	if (pgz_quarantine == 0) {
5393 		pgz_quarantine = pgz_slots / 3;
5394 	}
5395 	if (pgz_quarantine > pgz_slots * 9 / 10) {
5396 		pgz_quarantine = pgz_slots * 9 / 10;
5397 	}
5398 	pgz_slot_avail = pgz_slots - pgz_quarantine;
5399 
5400 	return ptoa(2 * pgz_slots + 1);
5401 }
5402 
5403 __startup_func
5404 static void
pgz_init(void)5405 pgz_init(void)
5406 {
5407 	if (!pgz_uses) {
5408 		return;
5409 	}
5410 
5411 	if (pgz_sample_rate == 0) {
5412 		/*
5413 		 * If no rate was provided, pick a random one that scales
5414 		 * with the number of protected zones.
5415 		 *
5416 		 * Use a binomal distribution to avoid having too many
5417 		 * really fast sample rates.
5418 		 */
5419 		uint32_t factor = MIN(pgz_uses, 10);
5420 		uint32_t max_rate = 1000 * factor;
5421 		uint32_t min_rate =  100 * factor;
5422 
5423 		pgz_sample_rate = (zalloc_random_uniform32(min_rate, max_rate) +
5424 		    zalloc_random_uniform32(min_rate, max_rate)) / 2;
5425 	}
5426 
5427 	struct mach_vm_range *r = &zone_info.zi_pgz_range;
5428 	zone_info.zi_pgz_meta = zone_meta_from_addr(r->min_address);
5429 	zone_meta_populate(r->min_address, mach_vm_range_size(r));
5430 
5431 	for (size_t i = 0; i < 2 * pgz_slots + 1; i += 2) {
5432 		zone_info.zi_pgz_meta[i].zm_chunk_len = ZM_PGZ_GUARD;
5433 	}
5434 
5435 	for (size_t i = 1; i < pgz_slots; i++) {
5436 		zone_info.zi_pgz_meta[2 * i - 1].zm_pgz_slot_next =
5437 		    &zone_info.zi_pgz_meta[2 * i + 1];
5438 	}
5439 #if OS_ATOMIC_HAS_LLSC
5440 	pgz_slot_head = &zone_info.zi_pgz_meta[1];
5441 #endif
5442 	pgz_slot_tail = &zone_info.zi_pgz_meta[2 * pgz_slots - 1];
5443 
5444 	pgz_backtraces = zalloc_permanent(sizeof(struct pgz_backtrace) *
5445 	    2 * pgz_slots, ZALIGN_PTR);
5446 
5447 	/*
5448 	 * expand the pmap so that pmap_enter_options_addr()
5449 	 * in pgz_protect() never need to call pmap_expand().
5450 	 */
5451 	for (uint32_t slot = 0; slot < pgz_slots; slot++) {
5452 		(void)pmap_enter_options_addr(kernel_pmap, pgz_addr(slot), 0,
5453 		    VM_PROT_NONE, VM_PROT_NONE, 0, FALSE,
5454 		    PMAP_OPTIONS_NOENTER, NULL);
5455 	}
5456 
5457 	/* do this last as this will enable pgz */
5458 	percpu_foreach(counter, pgz_sample_counter) {
5459 		*counter = zalloc_random_uniform32(0, 2 * pgz_sample_rate);
5460 	}
5461 }
5462 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, pgz_init);
5463 
5464 static void
panic_display_pgz_bt(bool has_syms,uint32_t slot,bool free)5465 panic_display_pgz_bt(bool has_syms, uint32_t slot, bool free)
5466 {
5467 	struct pgz_backtrace *bt = pgz_bt(slot, free);
5468 	const char *what = free ? "Free" : "Allocation";
5469 	uintptr_t buf[MAX_ZTRACE_DEPTH];
5470 
5471 	if (!ml_validate_nofault((vm_offset_t)bt, sizeof(*bt))) {
5472 		paniclog_append_noflush("  Can't decode %s Backtrace\n", what);
5473 		return;
5474 	}
5475 
5476 	backtrace_unpack(BTP_KERN_OFFSET_32, buf, MAX_ZTRACE_DEPTH,
5477 	    (uint8_t *)bt->pgz_bt, 4 * bt->pgz_depth);
5478 
5479 	paniclog_append_noflush("  %s Backtrace:\n", what);
5480 	for (uint32_t i = 0; i < bt->pgz_depth && i < MAX_ZTRACE_DEPTH; i++) {
5481 		if (has_syms) {
5482 			paniclog_append_noflush("    %p ", (void *)buf[i]);
5483 			panic_print_symbol_name(buf[i]);
5484 			paniclog_append_noflush("\n");
5485 		} else {
5486 			paniclog_append_noflush("    %p\n", (void *)buf[i]);
5487 		}
5488 	}
5489 	kmod_panic_dump((vm_offset_t *)buf, bt->pgz_depth);
5490 }
5491 
5492 static void
panic_display_pgz_uaf_info(bool has_syms,vm_offset_t addr)5493 panic_display_pgz_uaf_info(bool has_syms, vm_offset_t addr)
5494 {
5495 	struct zone_page_metadata *meta;
5496 	vm_offset_t elem, esize;
5497 	const char *type;
5498 	const char *prob;
5499 	uint32_t slot;
5500 	zone_t z;
5501 
5502 	slot = pgz_slot(addr);
5503 	meta = pgz_meta(slot);
5504 	elem = pgz_addr(slot) + (meta->zm_pgz_orig_addr & PAGE_MASK);
5505 
5506 	paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
5507 
5508 	if (ml_validate_nofault((vm_offset_t)meta, sizeof(*meta)) &&
5509 	    meta->zm_index &&
5510 	    meta->zm_index < os_atomic_load(&num_zones, relaxed)) {
5511 		z = &zone_array[meta->zm_index];
5512 	} else {
5513 		paniclog_append_noflush("  Zone    : <unknown>\n");
5514 		paniclog_append_noflush("  Address : %p\n", (void *)addr);
5515 		paniclog_append_noflush("\n");
5516 		return;
5517 	}
5518 
5519 	esize = zone_elem_inner_size(z);
5520 	paniclog_append_noflush("  Zone    : %s%s\n",
5521 	    zone_heap_name(z), zone_name(z));
5522 	paniclog_append_noflush("  Address : %p\n", (void *)addr);
5523 	paniclog_append_noflush("  Element : [%p, %p) of size %d\n",
5524 	    (void *)elem, (void *)(elem + esize), (uint32_t)esize);
5525 
5526 	if (addr < elem) {
5527 		type = "out-of-bounds(underflow) + use-after-free";
5528 		prob = "low";
5529 	} else if (meta->zm_chunk_len == ZM_PGZ_DOUBLE_FREE) {
5530 		type = "double-free";
5531 		prob = "high";
5532 	} else if (addr < elem + esize) {
5533 		type = "use-after-free";
5534 		prob = "high";
5535 	} else if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5536 		type = "out-of-bounds + use-after-free";
5537 		prob = "low";
5538 	} else {
5539 		type = "out-of-bounds";
5540 		prob = "high";
5541 	}
5542 	paniclog_append_noflush("  Kind    : %s (%s confidence)\n",
5543 	    type, prob);
5544 	if (addr < elem) {
5545 		paniclog_append_noflush("  Access  : %d byte(s) before\n",
5546 		    (uint32_t)(elem - addr) + 1);
5547 	} else if (addr < elem + esize) {
5548 		paniclog_append_noflush("  Access  : %d byte(s) inside\n",
5549 		    (uint32_t)(addr - elem) + 1);
5550 	} else {
5551 		paniclog_append_noflush("  Access  : %d byte(s) past\n",
5552 		    (uint32_t)(addr - (elem + esize)) + 1);
5553 	}
5554 
5555 	panic_display_pgz_bt(has_syms, slot, false);
5556 	if (meta->zm_chunk_len != ZM_PGZ_ALLOCATED) {
5557 		panic_display_pgz_bt(has_syms, slot, true);
5558 	}
5559 
5560 	paniclog_append_noflush("\n");
5561 }
5562 
5563 #endif /* CONFIG_PROB_GZALLOC */
5564 #endif /* !ZALLOC_TEST */
5565 #pragma mark zfree
5566 #if !ZALLOC_TEST
5567 
5568 /*!
5569  * @defgroup zfree
5570  * @{
5571  *
5572  * @brief
5573  * The codepath for zone frees.
5574  *
5575  * @discussion
5576  * There are 4 major ways to allocate memory that end up in the zone allocator:
5577  * - @c zfree()
5578  * - @c zfree_percpu()
5579  * - @c kfree*()
5580  * - @c zfree_permanent()
5581  *
5582  * While permanent zones have their own allocation scheme, all other codepaths
5583  * will eventually go through the @c zfree_ext() choking point.
5584  */
5585 
5586 __header_always_inline void
zfree_drop(zone_t zone,vm_offset_t addr)5587 zfree_drop(zone_t zone, vm_offset_t addr)
5588 {
5589 	vm_offset_t esize = zone_elem_outer_size(zone);
5590 	struct zone_page_metadata *meta;
5591 	vm_offset_t eidx;
5592 
5593 	meta = zone_element_resolve(zone, addr, &eidx);
5594 
5595 	if (!zone_meta_mark_free(meta, eidx)) {
5596 		zone_meta_double_free_panic(zone, addr, __func__);
5597 	}
5598 
5599 	vm_offset_t old_size = meta->zm_alloc_size;
5600 	vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
5601 	vm_offset_t new_size = zone_meta_alloc_size_sub(zone, meta, esize);
5602 
5603 	if (new_size == 0) {
5604 		/* whether the page was on the intermediate or all_used, queue, move it to free */
5605 		zone_meta_requeue(zone, &zone->z_pageq_empty, meta);
5606 		zone->z_wired_empty += meta->zm_chunk_len;
5607 	} else if (old_size + esize > max_size) {
5608 		/* first free element on page, move from all_used */
5609 		zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
5610 	}
5611 }
5612 
5613 __attribute__((noinline))
5614 static void
zfree_item(zone_t zone,vm_offset_t addr)5615 zfree_item(zone_t zone, vm_offset_t addr)
5616 {
5617 	/* transfer preemption count to lock */
5618 	zone_lock_nopreempt_check_contention(zone);
5619 
5620 	zfree_drop(zone, addr);
5621 	zone->z_elems_free += 1;
5622 
5623 	zone_unlock(zone);
5624 }
5625 
5626 static void
zfree_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache)5627 zfree_cached_depot_recirculate(
5628 	zone_t                  zone,
5629 	uint32_t                depot_max,
5630 	zone_cache_t            cache)
5631 {
5632 	smr_t smr = zone_cache_smr(cache);
5633 	smr_seq_t seq;
5634 	uint32_t n;
5635 
5636 	zone_recirc_lock_nopreempt_check_contention(zone);
5637 
5638 	n = cache->zc_depot.zd_full;
5639 	if (n >= depot_max) {
5640 		n  -= depot_max / 2;
5641 		seq = zone_depot_move_full(&zone->z_recirc, &cache->zc_depot, n, NULL);
5642 		if (smr) {
5643 			smr_deferred_advance_commit(smr, seq);
5644 		}
5645 	}
5646 
5647 	n = depot_max - cache->zc_depot.zd_full;
5648 	if (n > zone->z_recirc.zd_empty) {
5649 		n = zone->z_recirc.zd_empty;
5650 	}
5651 	if (n) {
5652 		zone_depot_move_empty(&cache->zc_depot, &zone->z_recirc,
5653 		    n, zone);
5654 	}
5655 
5656 	zone_recirc_unlock_nopreempt(zone);
5657 }
5658 
5659 static zone_cache_t
zfree_cached_recirculate(zone_t zone,zone_cache_t cache)5660 zfree_cached_recirculate(zone_t zone, zone_cache_t cache)
5661 {
5662 	zone_magazine_t mag = NULL, tmp = NULL;
5663 	smr_t smr = zone_cache_smr(cache);
5664 
5665 	if (zone->z_recirc.zd_empty == 0) {
5666 		mag = zone_magazine_alloc(Z_NOWAIT);
5667 	}
5668 
5669 	zone_recirc_lock_nopreempt_check_contention(zone);
5670 
5671 	if (mag == NULL && zone->z_recirc.zd_empty) {
5672 		mag = zone_depot_pop_head_empty(&zone->z_recirc, zone);
5673 		__builtin_assume(mag);
5674 	}
5675 	if (mag) {
5676 		tmp = zone_magazine_replace(cache, mag, true);
5677 		if (smr) {
5678 			smr_deferred_advance_commit(smr, tmp->zm_seq);
5679 		}
5680 		if (zone_security_array[zone_index(zone)].z_lifo) {
5681 			zone_depot_insert_head_full(&zone->z_recirc, tmp);
5682 		} else {
5683 			zone_depot_insert_tail_full(&zone->z_recirc, tmp);
5684 		}
5685 	}
5686 
5687 	zone_recirc_unlock_nopreempt(zone);
5688 
5689 	return mag ? cache : NULL;
5690 }
5691 
5692 __attribute__((noinline))
5693 static zone_cache_t
zfree_cached_trim(zone_t zone,zone_cache_t cache)5694 zfree_cached_trim(zone_t zone, zone_cache_t cache)
5695 {
5696 	zone_magazine_t mag = NULL, tmp = NULL;
5697 	uint32_t depot_max;
5698 
5699 	depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
5700 	if (depot_max) {
5701 		zone_depot_lock_nopreempt(cache);
5702 
5703 		if (cache->zc_depot.zd_empty == 0) {
5704 			zfree_cached_depot_recirculate(zone, depot_max, cache);
5705 		}
5706 
5707 		if (__probable(cache->zc_depot.zd_empty)) {
5708 			mag = zone_depot_pop_head_empty(&cache->zc_depot, NULL);
5709 			__builtin_assume(mag);
5710 		} else {
5711 			mag = zone_magazine_alloc(Z_NOWAIT);
5712 		}
5713 		if (mag) {
5714 			tmp = zone_magazine_replace(cache, mag, true);
5715 			zone_depot_insert_tail_full(&cache->zc_depot, tmp);
5716 		}
5717 		zone_depot_unlock_nopreempt(cache);
5718 
5719 		return mag ? cache : NULL;
5720 	}
5721 
5722 	return zfree_cached_recirculate(zone, cache);
5723 }
5724 
5725 __attribute__((always_inline))
5726 static inline zone_cache_t
zfree_cached_get_pcpu_cache(zone_t zone,int cpu)5727 zfree_cached_get_pcpu_cache(zone_t zone, int cpu)
5728 {
5729 	zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5730 
5731 	if (__probable(cache->zc_free_cur < zc_mag_size())) {
5732 		return cache;
5733 	}
5734 
5735 	if (__probable(cache->zc_alloc_cur < zc_mag_size())) {
5736 		zone_cache_swap_magazines(cache);
5737 		return cache;
5738 	}
5739 
5740 	return zfree_cached_trim(zone, cache);
5741 }
5742 
5743 __attribute__((always_inline))
5744 static inline zone_cache_t
zfree_cached_get_pcpu_cache_smr(zone_t zone,int cpu)5745 zfree_cached_get_pcpu_cache_smr(zone_t zone, int cpu)
5746 {
5747 	zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
5748 	size_t idx = cache->zc_free_cur;
5749 
5750 	if (__probable(idx + 1 < zc_mag_size())) {
5751 		return cache;
5752 	}
5753 
5754 	/*
5755 	 * when SMR is in use, the bucket is tagged early with
5756 	 * @c smr_deferred_advance(), which costs a full barrier,
5757 	 * but performs no store.
5758 	 *
5759 	 * When zones hit the recirculation layer, the advance is commited,
5760 	 * under the recirculation lock (see zfree_cached_recirculate()).
5761 	 *
5762 	 * When done this way, the zone contention detection mechanism
5763 	 * will adjust the size of the per-cpu depots gracefully, which
5764 	 * mechanically reduces the pace of these commits as usage increases.
5765 	 */
5766 
5767 	if (__probable(idx + 1 == zc_mag_size())) {
5768 		zone_magazine_t mag;
5769 
5770 		mag = (zone_magazine_t)((uintptr_t)cache->zc_free_elems -
5771 		    offsetof(struct zone_magazine, zm_elems));
5772 		mag->zm_seq = smr_deferred_advance(zone_cache_smr(cache));
5773 		return cache;
5774 	}
5775 
5776 	return zfree_cached_trim(zone, cache);
5777 }
5778 
5779 __attribute__((always_inline))
5780 static inline vm_offset_t
__zcache_mark_invalid(zone_t zone,vm_offset_t elem,uint64_t combined_size)5781 __zcache_mark_invalid(zone_t zone, vm_offset_t elem, uint64_t combined_size)
5782 {
5783 	struct zone_page_metadata *meta;
5784 	vm_offset_t offs;
5785 
5786 #pragma unused(combined_size)
5787 #if CONFIG_PROB_GZALLOC
5788 	if (__improbable(pgz_owned(elem))) {
5789 		elem = pgz_unprotect(elem, __builtin_frame_address(0));
5790 	}
5791 #endif /* CONFIG_PROB_GZALLOC */
5792 
5793 	meta = zone_meta_from_addr(elem);
5794 	if (!from_zone_map(elem, 1) || !zone_has_index(zone, meta->zm_index)) {
5795 		zone_invalid_element_panic(zone, elem);
5796 	}
5797 
5798 	offs = (elem & PAGE_MASK) - zone_elem_inner_offs(zone);
5799 	if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
5800 		offs += ptoa(meta->zm_page_index);
5801 	}
5802 
5803 	if (!Z_FAST_ALIGNED(offs, zone->z_align_magic)) {
5804 		zone_invalid_element_panic(zone, elem);
5805 	}
5806 
5807 #if VM_TAG_SIZECLASSES
5808 	if (__improbable(zone->z_uses_tags)) {
5809 		vm_tag_t *slot;
5810 
5811 		slot = zba_extra_ref_ptr(meta->zm_bitmap,
5812 		    Z_FAST_QUO(offs, zone->z_quo_magic));
5813 		vm_tag_update_zone_size(*slot, zone->z_tags_sizeclass,
5814 		    -(long)ZFREE_ELEM_SIZE(combined_size));
5815 		*slot = VM_KERN_MEMORY_NONE;
5816 	}
5817 #endif /* VM_TAG_SIZECLASSES */
5818 
5819 #if KASAN_CLASSIC
5820 	kasan_free(elem, ZFREE_ELEM_SIZE(combined_size),
5821 	    ZFREE_USER_SIZE(combined_size), zone_elem_redzone(zone),
5822 	    zone->z_percpu, __builtin_frame_address(0));
5823 #endif
5824 #if KASAN_TBI
5825 	elem = kasan_tbi_tag_zfree(elem, ZFREE_ELEM_SIZE(combined_size),
5826 	    zone->z_percpu);
5827 #endif
5828 
5829 	return elem;
5830 }
5831 
5832 __attribute__((always_inline))
vm_offset_t(zcache_mark_invalid)5833 vm_offset_t
5834 (zcache_mark_invalid)(zone_t zone, vm_offset_t elem)
5835 {
5836 	vm_size_t esize = zone_elem_inner_offs(zone);
5837 
5838 	ZFREE_LOG(zone, elem, 1);
5839 	return __zcache_mark_invalid(zone, elem, ZFREE_PACK_SIZE(esize, esize));
5840 }
5841 
5842 /*
5843  *     The function is noinline when zlog can be used so that the backtracing can
5844  *     reliably skip the zfree_ext() and zfree_log()
5845  *     boring frames.
5846  */
5847 #if ZALLOC_ENABLE_LOGGING
5848 __attribute__((noinline))
5849 #endif /* ZALLOC_ENABLE_LOGGING */
5850 void
zfree_ext(zone_t zone,zone_stats_t zstats,void * addr,uint64_t combined_size)5851 zfree_ext(zone_t zone, zone_stats_t zstats, void *addr, uint64_t combined_size)
5852 {
5853 	vm_offset_t esize = ZFREE_ELEM_SIZE(combined_size);
5854 	vm_offset_t elem = (vm_offset_t)addr;
5855 	int cpu;
5856 
5857 	DTRACE_VM2(zfree, zone_t, zone, void*, elem);
5858 
5859 	ZFREE_LOG(zone, elem, 1);
5860 	elem = __zcache_mark_invalid(zone, elem, combined_size);
5861 
5862 	disable_preemption();
5863 	cpu = cpu_number();
5864 	zpercpu_get_cpu(zstats, cpu)->zs_mem_freed += esize;
5865 
5866 #if KASAN_CLASSIC
5867 	if (zone->z_kasan_quarantine && startup_phase >= STARTUP_SUB_ZALLOC) {
5868 		struct kasan_quarantine_result kqr;
5869 
5870 		kqr  = kasan_quarantine(elem, esize);
5871 		elem = kqr.addr;
5872 		zone = kqr.zone;
5873 		if (elem == 0) {
5874 			return enable_preemption();
5875 		}
5876 	}
5877 #endif
5878 
5879 	if (zone->z_pcpu_cache) {
5880 		zone_cache_t cache = zfree_cached_get_pcpu_cache(zone, cpu);
5881 
5882 		if (__probable(cache)) {
5883 			cache->zc_free_elems[cache->zc_free_cur++] = elem;
5884 			return enable_preemption();
5885 		}
5886 	}
5887 
5888 	return zfree_item(zone, elem);
5889 }
5890 
5891 __attribute__((always_inline))
5892 static inline zstack_t
zcache_free_stack_to_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,vm_size_t esize,zone_cache_ops_t ops,bool zero)5893 zcache_free_stack_to_cpu(
5894 	zone_id_t               zid,
5895 	zone_cache_t            cache,
5896 	zstack_t                stack,
5897 	vm_size_t               esize,
5898 	zone_cache_ops_t        ops,
5899 	bool                    zero)
5900 {
5901 	size_t       n = MIN(zc_mag_size() - cache->zc_free_cur, stack.z_count);
5902 	vm_offset_t *p;
5903 
5904 	stack.z_count -= n;
5905 	cache->zc_free_cur += n;
5906 	p = cache->zc_free_elems + cache->zc_free_cur;
5907 
5908 	do {
5909 		void *o = zstack_pop_no_delta(&stack);
5910 
5911 		if (ops) {
5912 			o = ops->zc_op_mark_invalid(zid, o);
5913 		} else {
5914 			if (zero) {
5915 				bzero(o, esize);
5916 			}
5917 			o = (void *)__zcache_mark_invalid(zone_by_id(zid),
5918 			    (vm_offset_t)o, ZFREE_PACK_SIZE(esize, esize));
5919 		}
5920 		*--p  = (vm_offset_t)o;
5921 	} while (--n > 0);
5922 
5923 	return stack;
5924 }
5925 
5926 __attribute__((always_inline))
5927 static inline void
zcache_free_1_ext(zone_id_t zid,void * addr,zone_cache_ops_t ops)5928 zcache_free_1_ext(zone_id_t zid, void *addr, zone_cache_ops_t ops)
5929 {
5930 	vm_offset_t elem = (vm_offset_t)addr;
5931 	zone_cache_t cache;
5932 	vm_size_t esize;
5933 	zone_t zone = zone_by_id(zid);
5934 	int cpu;
5935 
5936 	ZFREE_LOG(zone, elem, 1);
5937 
5938 	disable_preemption();
5939 	cpu = cpu_number();
5940 	esize = zone_elem_inner_size(zone);
5941 	zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
5942 	if (!ops) {
5943 		addr = (void *)__zcache_mark_invalid(zone, elem,
5944 		    ZFREE_PACK_SIZE(esize, esize));
5945 	}
5946 	cache = zfree_cached_get_pcpu_cache(zone, cpu);
5947 	if (__probable(cache)) {
5948 		if (ops) {
5949 			addr = ops->zc_op_mark_invalid(zid, addr);
5950 		}
5951 		cache->zc_free_elems[cache->zc_free_cur++] = elem;
5952 		enable_preemption();
5953 	} else if (ops) {
5954 		enable_preemption();
5955 		os_atomic_dec(&zone_by_id(zid)->z_elems_avail, relaxed);
5956 		ops->zc_op_free(zid, addr);
5957 	} else {
5958 		zfree_item(zone, elem);
5959 	}
5960 }
5961 
5962 __attribute__((always_inline))
5963 static inline void
zcache_free_n_ext(zone_id_t zid,zstack_t stack,zone_cache_ops_t ops,bool zero)5964 zcache_free_n_ext(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops, bool zero)
5965 {
5966 	zone_t zone = zone_by_id(zid);
5967 	zone_cache_t cache;
5968 	vm_size_t esize;
5969 	int cpu;
5970 
5971 	ZFREE_LOG(zone, stack.z_head, stack.z_count);
5972 
5973 	disable_preemption();
5974 	cpu = cpu_number();
5975 	esize = zone_elem_inner_size(zone);
5976 	zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed +=
5977 	    stack.z_count * esize;
5978 
5979 	for (;;) {
5980 		cache = zfree_cached_get_pcpu_cache(zone, cpu);
5981 		if (__probable(cache)) {
5982 			stack = zcache_free_stack_to_cpu(zid, cache,
5983 			    stack, esize, ops, zero);
5984 			enable_preemption();
5985 		} else if (ops) {
5986 			enable_preemption();
5987 			os_atomic_dec(&zone->z_elems_avail, relaxed);
5988 			ops->zc_op_free(zid, zstack_pop(&stack));
5989 		} else {
5990 			vm_offset_t addr = (vm_offset_t)zstack_pop(&stack);
5991 
5992 			if (zero) {
5993 				bzero((void *)addr, esize);
5994 			}
5995 			addr = __zcache_mark_invalid(zone, addr,
5996 			    ZFREE_PACK_SIZE(esize, esize));
5997 			zfree_item(zone, addr);
5998 		}
5999 
6000 		if (stack.z_count == 0) {
6001 			break;
6002 		}
6003 
6004 		disable_preemption();
6005 		cpu = cpu_number();
6006 	}
6007 }
6008 
6009 void
6010 (zcache_free)(zone_id_t zid, void *addr, zone_cache_ops_t ops)
6011 {
6012 	__builtin_assume(ops != NULL);
6013 	zcache_free_1_ext(zid, addr, ops);
6014 }
6015 
6016 void
6017 (zcache_free_n)(zone_id_t zid, zstack_t stack, zone_cache_ops_t ops)
6018 {
6019 	__builtin_assume(ops != NULL);
6020 	zcache_free_n_ext(zid, stack, ops, false);
6021 }
6022 
6023 void
6024 (zfree_n)(zone_id_t zid, zstack_t stack)
6025 {
6026 	zcache_free_n_ext(zid, stack, NULL, true);
6027 }
6028 
6029 void
6030 (zfree_nozero)(zone_id_t zid, void *addr)
6031 {
6032 	zcache_free_1_ext(zid, addr, NULL);
6033 }
6034 
6035 void
6036 (zfree_nozero_n)(zone_id_t zid, zstack_t stack)
6037 {
6038 	zcache_free_n_ext(zid, stack, NULL, false);
6039 }
6040 
6041 void
6042 (zfree)(union zone_or_view zov, void *addr)
6043 {
6044 	zone_t zone = zov.zov_view->zv_zone;
6045 	zone_stats_t zstats = zov.zov_view->zv_stats;
6046 	vm_offset_t esize = zone_elem_inner_size(zone);
6047 
6048 	assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6049 	assert(!zone->z_percpu && !zone->z_permanent && !zone->z_smr);
6050 	bzero(addr, esize);
6051 	zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6052 }
6053 
6054 __attribute__((noinline))
6055 void
zfree_percpu(union zone_or_view zov,void * addr)6056 zfree_percpu(union zone_or_view zov, void *addr)
6057 {
6058 	zone_t zone = zov.zov_view->zv_zone;
6059 	zone_stats_t zstats = zov.zov_view->zv_stats;
6060 	vm_offset_t esize = zone_elem_inner_size(zone);
6061 
6062 	assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6063 	assert(zone->z_percpu);
6064 	addr = (void *)__zpcpu_demangle(addr);
6065 	zpercpu_foreach_cpu(i) {
6066 		bzero((char *)addr + ptoa(i), esize);
6067 	}
6068 	zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6069 }
6070 
6071 void
6072 (zfree_id)(zone_id_t zid, void *addr)
6073 {
6074 	(zfree)(&zone_array[zid], addr);
6075 }
6076 
6077 void
6078 (zfree_ro)(zone_id_t zid, void *addr)
6079 {
6080 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6081 	zone_t zone = zone_by_id(zid);
6082 	zone_stats_t zstats = zone->z_stats;
6083 	vm_offset_t esize = zone_ro_size_params[zid].z_elem_size;
6084 
6085 #if ZSECURITY_CONFIG(READ_ONLY)
6086 	assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6087 	pmap_ro_zone_bzero(zid, (vm_offset_t)addr, 0, esize);
6088 #else
6089 	(void)zid;
6090 	bzero(addr, esize);
6091 #endif /* !KASAN_CLASSIC */
6092 	zfree_ext(zone, zstats, addr, ZFREE_PACK_SIZE(esize, esize));
6093 }
6094 
6095 __attribute__((noinline))
6096 static void
zfree_item_smr(zone_t zone,vm_offset_t addr)6097 zfree_item_smr(zone_t zone, vm_offset_t addr)
6098 {
6099 	zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, 0);
6100 	vm_size_t esize = zone_elem_inner_size(zone);
6101 
6102 	/*
6103 	 * This should be taken extremely rarely:
6104 	 * this happens if we failed allocating an empty bucket.
6105 	 */
6106 	smr_synchronize(zone_cache_smr(cache));
6107 
6108 	cache->zc_free((void *)addr, esize);
6109 	addr = __zcache_mark_invalid(zone, addr, ZFREE_PACK_SIZE(esize, esize));
6110 
6111 	zfree_item(zone, addr);
6112 }
6113 
6114 void
6115 (zfree_smr)(zone_t zone, void *addr)
6116 {
6117 	vm_offset_t elem = (vm_offset_t)addr;
6118 	vm_offset_t esize;
6119 	zone_cache_t cache;
6120 	int cpu;
6121 
6122 	ZFREE_LOG(zone, elem, 1);
6123 
6124 	disable_preemption();
6125 	cpu   = cpu_number();
6126 #if MACH_ASSERT
6127 	cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6128 	assert(!smr_entered_cpu(cache->zc_smr, cpu));
6129 #endif
6130 	esize = zone_elem_inner_size(zone);
6131 	zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_freed += esize;
6132 	cache = zfree_cached_get_pcpu_cache_smr(zone, cpu);
6133 	if (__probable(cache)) {
6134 		cache->zc_free_elems[cache->zc_free_cur++] = elem;
6135 		enable_preemption();
6136 	} else {
6137 		zfree_item_smr(zone, elem);
6138 	}
6139 }
6140 
6141 void
6142 (zfree_id_smr)(zone_id_t zid, void *addr)
6143 {
6144 	(zfree_smr)(&zone_array[zid], addr);
6145 }
6146 
6147 /*! @} */
6148 #endif /* !ZALLOC_TEST */
6149 #pragma mark zalloc
6150 #if !ZALLOC_TEST
6151 
6152 /*!
6153  * @defgroup zalloc
6154  * @{
6155  *
6156  * @brief
6157  * The codepath for zone allocations.
6158  *
6159  * @discussion
6160  * There are 4 major ways to allocate memory that end up in the zone allocator:
6161  * - @c zalloc(), @c zalloc_flags(), ...
6162  * - @c zalloc_percpu()
6163  * - @c kalloc*()
6164  * - @c zalloc_permanent()
6165  *
6166  * While permanent zones have their own allocation scheme, all other codepaths
6167  * will eventually go through the @c zalloc_ext() choking point.
6168  *
6169  * @c zalloc_return() is the final function everyone tail calls into,
6170  * which prepares the element for consumption by the caller and deals with
6171  * common treatment (zone logging, tags, kasan, validation, ...).
6172  */
6173 
6174 /*!
6175  * @function zalloc_import
6176  *
6177  * @brief
6178  * Import @c n elements in the specified array, opposite of @c zfree_drop().
6179  *
6180  * @param zone          The zone to import elements from
6181  * @param elems         The array to import into
6182  * @param n             The number of elements to import. Must be non zero,
6183  *                      and smaller than @c zone->z_elems_free.
6184  */
6185 __header_always_inline vm_size_t
zalloc_import(zone_t zone,vm_offset_t * elems,zalloc_flags_t flags,uint32_t n)6186 zalloc_import(
6187 	zone_t                  zone,
6188 	vm_offset_t            *elems,
6189 	zalloc_flags_t          flags,
6190 	uint32_t                n)
6191 {
6192 	vm_offset_t esize = zone_elem_outer_size(zone);
6193 	vm_offset_t offs  = zone_elem_inner_offs(zone);
6194 	zone_stats_t zs;
6195 	int cpu = cpu_number();
6196 	uint32_t i = 0;
6197 
6198 	zs = zpercpu_get_cpu(zone->z_stats, cpu);
6199 
6200 	if (__improbable(zone_caching_disabled < 0)) {
6201 		/*
6202 		 * In the first 10s after boot, mess with
6203 		 * the scan position in order to make early
6204 		 * allocations patterns less predictable.
6205 		 */
6206 		zone_early_scramble_rr(zone, cpu, zs);
6207 	}
6208 
6209 	do {
6210 		vm_offset_t page, eidx, size = 0;
6211 		struct zone_page_metadata *meta;
6212 
6213 		if (!zone_pva_is_null(zone->z_pageq_partial)) {
6214 			meta = zone_pva_to_meta(zone->z_pageq_partial);
6215 			page = zone_pva_to_addr(zone->z_pageq_partial);
6216 		} else if (!zone_pva_is_null(zone->z_pageq_empty)) {
6217 			meta = zone_pva_to_meta(zone->z_pageq_empty);
6218 			page = zone_pva_to_addr(zone->z_pageq_empty);
6219 			zone_counter_sub(zone, z_wired_empty, meta->zm_chunk_len);
6220 		} else {
6221 			zone_accounting_panic(zone, "z_elems_free corruption");
6222 		}
6223 
6224 		zone_meta_validate(zone, meta, page);
6225 
6226 		vm_offset_t old_size = meta->zm_alloc_size;
6227 		vm_offset_t max_size = ptoa(meta->zm_chunk_len) + ZM_ALLOC_SIZE_LOCK;
6228 
6229 		do {
6230 			eidx = zone_meta_find_and_clear_bit(zone, zs, meta, flags);
6231 			elems[i++] = page + offs + eidx * esize;
6232 			size += esize;
6233 		} while (i < n && old_size + size + esize <= max_size);
6234 
6235 		vm_offset_t new_size = zone_meta_alloc_size_add(zone, meta, size);
6236 
6237 		if (new_size + esize > max_size) {
6238 			zone_meta_requeue(zone, &zone->z_pageq_full, meta);
6239 		} else if (old_size == 0) {
6240 			/* remove from free, move to intermediate */
6241 			zone_meta_requeue(zone, &zone->z_pageq_partial, meta);
6242 		}
6243 	} while (i < n);
6244 
6245 	n = zone_counter_sub(zone, z_elems_free, n);
6246 	if (zone->z_pcpu_cache == NULL && zone->z_elems_free_min > n) {
6247 		zone->z_elems_free_min = n;
6248 	}
6249 
6250 	return zone_elem_inner_size(zone);
6251 }
6252 
6253 __attribute__((always_inline))
6254 static inline vm_offset_t
__zcache_mark_valid(zone_t zone,vm_offset_t addr,zalloc_flags_t flags)6255 __zcache_mark_valid(zone_t zone, vm_offset_t addr, zalloc_flags_t flags)
6256 {
6257 #pragma unused(zone, flags)
6258 #if KASAN || CONFIG_PROB_GZALLOC || VM_TAG_SIZECLASSES
6259 	vm_offset_t esize = zone_elem_inner_size(zone);
6260 #endif
6261 
6262 #if VM_TAG_SIZECLASSES
6263 	if (__improbable(zone->z_uses_tags)) {
6264 		struct zone_page_metadata *meta;
6265 		vm_offset_t offs;
6266 		vm_tag_t *slot;
6267 		vm_tag_t tag;
6268 
6269 		tag  = zalloc_flags_get_tag(flags);
6270 		meta = zone_meta_from_addr(addr);
6271 		offs = (addr & PAGE_MASK) - zone_elem_inner_offs(zone);
6272 		if (meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
6273 			offs += ptoa(meta->zm_page_index);
6274 		}
6275 
6276 		slot = zba_extra_ref_ptr(meta->zm_bitmap,
6277 		    Z_FAST_QUO(offs, zone->z_quo_magic));
6278 		*slot = tag;
6279 
6280 		vm_tag_update_zone_size(tag, zone->z_tags_sizeclass,
6281 		    (long)esize);
6282 	}
6283 #endif /* VM_TAG_SIZECLASSES */
6284 
6285 #if CONFIG_PROB_GZALLOC
6286 	if (zone->z_pgz_tracked && pgz_sample(addr, esize)) {
6287 		addr = pgz_protect(zone, addr, __builtin_frame_address(0));
6288 	}
6289 #endif
6290 
6291 	/*
6292 	 * Kasan integration of kalloc heaps are handled by kalloc_ext()
6293 	 */
6294 	if ((flags & Z_SKIP_KASAN) == 0) {
6295 #if KASAN_CLASSIC
6296 		kasan_alloc(addr, esize, esize, zone_elem_redzone(zone),
6297 		    (flags & Z_PCPU), __builtin_frame_address(0));
6298 #endif /* KASAN_CLASSIC */
6299 #if KASAN_TBI
6300 		if (__probable(zone->z_tbi_tag)) {
6301 			addr = kasan_tbi_tag_zalloc(addr, esize, esize,
6302 			    (flags & Z_PCPU));
6303 		} else {
6304 			addr = kasan_tbi_tag_zalloc_default(addr, esize,
6305 			    (flags & Z_PCPU));
6306 		}
6307 #endif /* KASAN_TBI */
6308 	}
6309 
6310 	return addr;
6311 }
6312 
6313 __attribute__((always_inline))
vm_offset_t(zcache_mark_valid)6314 vm_offset_t
6315 (zcache_mark_valid)(zone_t zone, vm_offset_t addr)
6316 {
6317 	addr = __zcache_mark_valid(zone, addr, 0);
6318 	ZALLOC_LOG(zone, addr, 1);
6319 	return addr;
6320 }
6321 
6322 /*!
6323  * @function zalloc_return
6324  *
6325  * @brief
6326  * Performs the tail-end of the work required on allocations before the caller
6327  * uses them.
6328  *
6329  * @discussion
6330  * This function is called without any zone lock held,
6331  * and preemption back to the state it had when @c zalloc_ext() was called.
6332  *
6333  * @param zone          The zone we're allocating from.
6334  * @param addr          The element we just allocated.
6335  * @param flags         The flags passed to @c zalloc_ext() (for Z_ZERO).
6336  * @param elem_size     The element size for this zone.
6337  */
6338 __attribute__((always_inline))
6339 static struct kalloc_result
zalloc_return(zone_t zone,vm_offset_t addr,zalloc_flags_t flags,vm_offset_t elem_size)6340 zalloc_return(
6341 	zone_t                  zone,
6342 	vm_offset_t             addr,
6343 	zalloc_flags_t          flags,
6344 	vm_offset_t             elem_size)
6345 {
6346 	addr = __zcache_mark_valid(zone, addr, flags);
6347 #if ZALLOC_ENABLE_ZERO_CHECK
6348 	zalloc_validate_element(zone, addr, elem_size, flags);
6349 #endif /* ZALLOC_ENABLE_ZERO_CHECK */
6350 	ZALLOC_LOG(zone, addr, 1);
6351 
6352 	DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
6353 	return (struct kalloc_result){ (void *)addr, elem_size };
6354 }
6355 
6356 __attribute__((noinline))
6357 static struct kalloc_result
zalloc_item(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6358 zalloc_item(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6359 {
6360 	vm_offset_t esize, addr;
6361 
6362 	zone_lock_nopreempt_check_contention(zone);
6363 
6364 	if (__improbable(zone->z_elems_free <= zone->z_elems_rsv / 2)) {
6365 		if ((flags & Z_NOWAIT) || zone->z_elems_free) {
6366 			zone_expand_async_schedule_if_allowed(zone);
6367 		} else {
6368 			zone_expand_locked(zone, flags, zalloc_needs_refill);
6369 		}
6370 		if (__improbable(zone->z_elems_free == 0)) {
6371 			zpercpu_get(zstats)->zs_alloc_fail++;
6372 			zone_unlock(zone);
6373 			if (__improbable(flags & Z_NOFAIL)) {
6374 				zone_nofail_panic(zone);
6375 			}
6376 			DTRACE_VM2(zalloc, zone_t, zone, void*, NULL);
6377 			return (struct kalloc_result){ };
6378 		}
6379 	}
6380 
6381 	esize = zalloc_import(zone, &addr, flags, 1);
6382 	zpercpu_get(zstats)->zs_mem_allocated += esize;
6383 	zone_unlock(zone);
6384 
6385 	return zalloc_return(zone, addr, flags, esize);
6386 }
6387 
6388 static void
zalloc_cached_import(zone_t zone,zalloc_flags_t flags,zone_cache_t cache)6389 zalloc_cached_import(
6390 	zone_t                  zone,
6391 	zalloc_flags_t          flags,
6392 	zone_cache_t            cache)
6393 {
6394 	uint16_t n_elems = zc_mag_size();
6395 
6396 	zone_lock_nopreempt(zone);
6397 
6398 	if (__probable(!zone_caching_disabled &&
6399 	    zone->z_elems_free > zone->z_elems_rsv / 2)) {
6400 		if (__improbable(zone->z_elems_free <= zone->z_elems_rsv)) {
6401 			zone_expand_async_schedule_if_allowed(zone);
6402 		}
6403 		if (zone->z_elems_free < n_elems) {
6404 			n_elems = (uint16_t)zone->z_elems_free;
6405 		}
6406 		zalloc_import(zone, cache->zc_alloc_elems, flags, n_elems);
6407 		cache->zc_alloc_cur = n_elems;
6408 	}
6409 
6410 	zone_unlock_nopreempt(zone);
6411 }
6412 
6413 static void
zalloc_cached_depot_recirculate(zone_t zone,uint32_t depot_max,zone_cache_t cache)6414 zalloc_cached_depot_recirculate(
6415 	zone_t                  zone,
6416 	uint32_t                depot_max,
6417 	zone_cache_t            cache)
6418 {
6419 	uint32_t n;
6420 
6421 	zone_recirc_lock_nopreempt_check_contention(zone);
6422 
6423 	n = cache->zc_depot.zd_empty;
6424 	if (n >= depot_max) {
6425 		zone_depot_move_empty(&zone->z_recirc, &cache->zc_depot,
6426 		    n - depot_max / 2, NULL);
6427 	}
6428 
6429 	n = depot_max - cache->zc_depot.zd_empty;
6430 	if (n > zone->z_recirc.zd_full) {
6431 		n = zone->z_recirc.zd_full;
6432 	}
6433 	if (n) {
6434 		zone_depot_move_full(&cache->zc_depot, &zone->z_recirc,
6435 		    n, zone);
6436 	}
6437 
6438 	zone_recirc_unlock_nopreempt(zone);
6439 }
6440 
6441 static inline bool
zalloc_cached_depot_poll(struct zone_depot * depot,smr_t smr)6442 zalloc_cached_depot_poll(struct zone_depot *depot, smr_t smr)
6443 {
6444 	if (depot->zd_full == 0) {
6445 		return false;
6446 	}
6447 
6448 	return smr == NULL || smr_poll(smr, depot->zd_head->zm_seq);
6449 }
6450 
6451 static void
zalloc_cached_reuse_smr(zone_t z,zone_cache_t cache,zone_magazine_t mag)6452 zalloc_cached_reuse_smr(zone_t z, zone_cache_t cache, zone_magazine_t mag)
6453 {
6454 	zone_smr_free_cb_t zc_free = cache->zc_free;
6455 	vm_size_t esize = zone_elem_inner_size(z);
6456 
6457 	for (uint16_t i = 0; i < zc_mag_size(); i++) {
6458 		vm_offset_t elem = mag->zm_elems[i];
6459 
6460 		zc_free((void *)elem, zone_elem_inner_size(z));
6461 		elem = __zcache_mark_invalid(z, elem,
6462 		    ZFREE_PACK_SIZE(esize, esize));
6463 		mag->zm_elems[i] = elem;
6464 	}
6465 }
6466 
6467 static void
zalloc_cached_recirculate(zone_t zone,zone_cache_t cache)6468 zalloc_cached_recirculate(
6469 	zone_t                  zone,
6470 	zone_cache_t            cache)
6471 {
6472 	zone_magazine_t mag = NULL;
6473 
6474 	zone_recirc_lock_nopreempt_check_contention(zone);
6475 
6476 	if (zalloc_cached_depot_poll(&zone->z_recirc, zone_cache_smr(cache))) {
6477 		mag = zone_depot_pop_head_full(&zone->z_recirc, zone);
6478 		if (zone_cache_smr(cache)) {
6479 			zalloc_cached_reuse_smr(zone, cache, mag);
6480 		}
6481 		mag = zone_magazine_replace(cache, mag, false);
6482 		zone_depot_insert_head_empty(&zone->z_recirc, mag);
6483 	}
6484 
6485 	zone_recirc_unlock_nopreempt(zone);
6486 }
6487 
6488 __attribute__((noinline))
6489 static zone_cache_t
zalloc_cached_prime(zone_t zone,zone_cache_ops_t ops,zalloc_flags_t flags,zone_cache_t cache)6490 zalloc_cached_prime(
6491 	zone_t                  zone,
6492 	zone_cache_ops_t        ops,
6493 	zalloc_flags_t          flags,
6494 	zone_cache_t            cache)
6495 {
6496 	zone_magazine_t mag = NULL;
6497 	uint32_t depot_max;
6498 
6499 	depot_max = os_atomic_load(&zone->z_depot_size, relaxed);
6500 	if (depot_max) {
6501 		zone_depot_lock_nopreempt(cache);
6502 
6503 		if (cache->zc_depot.zd_full == 0) {
6504 			zalloc_cached_depot_recirculate(zone, depot_max, cache);
6505 		}
6506 
6507 		if (__probable(zalloc_cached_depot_poll(&cache->zc_depot,
6508 		    zone_cache_smr(cache)))) {
6509 			mag = zone_depot_pop_head_full(&cache->zc_depot, NULL);
6510 			if (zone_cache_smr(cache)) {
6511 				zalloc_cached_reuse_smr(zone, cache, mag);
6512 			}
6513 			mag = zone_magazine_replace(cache, mag, false);
6514 			zone_depot_insert_head_empty(&cache->zc_depot, mag);
6515 		}
6516 
6517 		zone_depot_unlock_nopreempt(cache);
6518 	} else if (zone->z_recirc.zd_full) {
6519 		zalloc_cached_recirculate(zone, cache);
6520 	}
6521 
6522 	if (__probable(cache->zc_alloc_cur)) {
6523 		return cache;
6524 	}
6525 
6526 	if (ops == NULL) {
6527 		zalloc_cached_import(zone, flags, cache);
6528 		if (__probable(cache->zc_alloc_cur)) {
6529 			return cache;
6530 		}
6531 	}
6532 
6533 	return NULL;
6534 }
6535 
6536 __attribute__((always_inline))
6537 static inline zone_cache_t
zalloc_cached_get_pcpu_cache(zone_t zone,zone_cache_ops_t ops,int cpu,zalloc_flags_t flags)6538 zalloc_cached_get_pcpu_cache(
6539 	zone_t                  zone,
6540 	zone_cache_ops_t        ops,
6541 	int                     cpu,
6542 	zalloc_flags_t          flags)
6543 {
6544 	zone_cache_t cache = zpercpu_get_cpu(zone->z_pcpu_cache, cpu);
6545 
6546 	if (__probable(cache->zc_alloc_cur != 0)) {
6547 		return cache;
6548 	}
6549 
6550 	if (__probable(cache->zc_free_cur != 0 && !cache->zc_smr)) {
6551 		zone_cache_swap_magazines(cache);
6552 		return cache;
6553 	}
6554 
6555 	return zalloc_cached_prime(zone, ops, flags, cache);
6556 }
6557 
6558 
6559 /*!
6560  * @function zalloc_ext
6561  *
6562  * @brief
6563  * The core implementation of @c zalloc(), @c zalloc_flags(), @c zalloc_percpu().
6564  */
6565 struct kalloc_result
zalloc_ext(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)6566 zalloc_ext(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
6567 {
6568 	/*
6569 	 * KASan uses zalloc() for fakestack, which can be called anywhere.
6570 	 * However, we make sure these calls can never block.
6571 	 */
6572 	assertf(startup_phase < STARTUP_SUB_EARLY_BOOT ||
6573 #if KASAN_FAKESTACK
6574 	    zone->z_kasan_fakestacks ||
6575 #endif /* KASAN_FAKESTACK */
6576 	    ml_get_interrupts_enabled() ||
6577 	    ml_is_quiescing() ||
6578 	    debug_mode_active(),
6579 	    "Calling {k,z}alloc from interrupt disabled context isn't allowed");
6580 
6581 	/*
6582 	 * Make sure Z_NOFAIL was not obviously misused
6583 	 */
6584 	if (flags & Z_NOFAIL) {
6585 		assert(!zone->exhaustible &&
6586 		    (flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
6587 	}
6588 
6589 #if VM_TAG_SIZECLASSES
6590 	if (__improbable(zone->z_uses_tags)) {
6591 		vm_tag_t tag = zalloc_flags_get_tag(flags);
6592 
6593 		if (flags & Z_VM_TAG_BT_BIT) {
6594 			tag = vm_tag_bt() ?: tag;
6595 		}
6596 		if (tag != VM_KERN_MEMORY_NONE) {
6597 			tag = vm_tag_will_update_zone(tag, zone->z_tags_sizeclass,
6598 			    flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT));
6599 		}
6600 		if (tag == VM_KERN_MEMORY_NONE) {
6601 			zone_security_flags_t zsflags = zone_security_config(zone);
6602 
6603 			if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
6604 				tag = VM_KERN_MEMORY_KALLOC_DATA;
6605 			} else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR ||
6606 			    zsflags.z_kalloc_type) {
6607 				tag = VM_KERN_MEMORY_KALLOC_TYPE;
6608 			} else {
6609 				tag = VM_KERN_MEMORY_KALLOC;
6610 			}
6611 		}
6612 		flags = Z_VM_TAG(flags & ~Z_VM_TAG_MASK, tag);
6613 	}
6614 #endif /* VM_TAG_SIZECLASSES */
6615 
6616 	disable_preemption();
6617 
6618 #if ZALLOC_ENABLE_ZERO_CHECK
6619 	if (zalloc_skip_zero_check()) {
6620 		flags |= Z_NOZZC;
6621 	}
6622 #endif
6623 
6624 	if (zone->z_pcpu_cache) {
6625 		zone_cache_t cache;
6626 		vm_offset_t index, addr, esize;
6627 		int cpu = cpu_number();
6628 
6629 		cache = zalloc_cached_get_pcpu_cache(zone, NULL, cpu, flags);
6630 		if (__probable(cache)) {
6631 			esize = zone_elem_inner_size(zone);
6632 			zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated += esize;
6633 			index = --cache->zc_alloc_cur;
6634 			addr  = cache->zc_alloc_elems[index];
6635 			cache->zc_alloc_elems[index] = 0;
6636 			enable_preemption();
6637 			return zalloc_return(zone, addr, flags, esize);
6638 		}
6639 	}
6640 
6641 	__attribute__((musttail))
6642 	return zalloc_item(zone, zstats, flags);
6643 }
6644 
6645 __attribute__((always_inline))
6646 static inline zstack_t
zcache_alloc_stack_from_cpu(zone_id_t zid,zone_cache_t cache,zstack_t stack,uint32_t n,zone_cache_ops_t ops)6647 zcache_alloc_stack_from_cpu(
6648 	zone_id_t               zid,
6649 	zone_cache_t            cache,
6650 	zstack_t                stack,
6651 	uint32_t                n,
6652 	zone_cache_ops_t        ops)
6653 {
6654 	vm_offset_t *p;
6655 
6656 	n = MIN(n, cache->zc_alloc_cur);
6657 	p = cache->zc_alloc_elems + cache->zc_alloc_cur;
6658 	cache->zc_alloc_cur -= n;
6659 	stack.z_count += n;
6660 
6661 	do {
6662 		vm_offset_t e = *--p;
6663 
6664 		*p = 0;
6665 		if (ops) {
6666 			e = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)e);
6667 		} else {
6668 			e = __zcache_mark_valid(zone_by_id(zid), e, 0);
6669 		}
6670 		zstack_push_no_delta(&stack, (void *)e);
6671 	} while (--n > 0);
6672 
6673 	return stack;
6674 }
6675 
6676 __attribute__((noinline))
6677 static zstack_t
zcache_alloc_fail(zone_id_t zid,zstack_t stack,uint32_t count)6678 zcache_alloc_fail(zone_id_t zid, zstack_t stack, uint32_t count)
6679 {
6680 	zone_t zone = zone_by_id(zid);
6681 	zone_stats_t zstats = zone->z_stats;
6682 	int cpu;
6683 
6684 	count -= stack.z_count;
6685 
6686 	disable_preemption();
6687 	cpu = cpu_number();
6688 	zpercpu_get_cpu(zstats, cpu)->zs_mem_allocated -=
6689 	    count * zone_elem_inner_size(zone);
6690 	zpercpu_get_cpu(zstats, cpu)->zs_alloc_fail += 1;
6691 	enable_preemption();
6692 
6693 	return stack;
6694 }
6695 
6696 __attribute__((always_inline))
6697 static zstack_t
zcache_alloc_n_ext(zone_id_t zid,uint32_t count,zalloc_flags_t flags,zone_cache_ops_t ops)6698 zcache_alloc_n_ext(
6699 	zone_id_t               zid,
6700 	uint32_t                count,
6701 	zalloc_flags_t          flags,
6702 	zone_cache_ops_t        ops)
6703 {
6704 	zstack_t stack = { };
6705 	zone_cache_t cache;
6706 	zone_t zone;
6707 	int cpu;
6708 
6709 	disable_preemption();
6710 	cpu  = cpu_number();
6711 	zone = zone_by_id(zid);
6712 	zpercpu_get_cpu(zone->z_stats, cpu)->zs_mem_allocated +=
6713 	    count * zone_elem_inner_size(zone);
6714 
6715 	for (;;) {
6716 		cache = zalloc_cached_get_pcpu_cache(zone, ops, cpu, flags);
6717 		if (__probable(cache)) {
6718 			stack = zcache_alloc_stack_from_cpu(zid, cache, stack,
6719 			    count - stack.z_count, ops);
6720 			enable_preemption();
6721 		} else {
6722 			void *o;
6723 
6724 			if (ops) {
6725 				enable_preemption();
6726 				o = ops->zc_op_alloc(zid, flags);
6727 			} else {
6728 				o = zalloc_item(zone, zone->z_stats, flags).addr;
6729 			}
6730 			if (__improbable(o == NULL)) {
6731 				return zcache_alloc_fail(zid, stack, count);
6732 			}
6733 			if (ops) {
6734 				os_atomic_inc(&zone->z_elems_avail, relaxed);
6735 			}
6736 			zstack_push(&stack, o);
6737 		}
6738 
6739 		if (stack.z_count == count) {
6740 			break;
6741 		}
6742 
6743 		disable_preemption();
6744 		cpu = cpu_number();
6745 	}
6746 
6747 	ZALLOC_LOG(zone, stack.z_head, stack.z_count);
6748 
6749 	return stack;
6750 }
6751 
6752 zstack_t
zalloc_n(zone_id_t zid,uint32_t count,zalloc_flags_t flags)6753 zalloc_n(zone_id_t zid, uint32_t count, zalloc_flags_t flags)
6754 {
6755 	return zcache_alloc_n_ext(zid, count, flags, NULL);
6756 }
6757 
zstack_t(zcache_alloc_n)6758 zstack_t
6759 (zcache_alloc_n)(
6760 	zone_id_t               zid,
6761 	uint32_t                count,
6762 	zalloc_flags_t          flags,
6763 	zone_cache_ops_t        ops)
6764 {
6765 	__builtin_assume(ops != NULL);
6766 	return zcache_alloc_n_ext(zid, count, flags, ops);
6767 }
6768 
6769 __attribute__((always_inline))
6770 void *
zalloc(union zone_or_view zov)6771 zalloc(union zone_or_view zov)
6772 {
6773 	return zalloc_flags(zov, Z_WAITOK);
6774 }
6775 
6776 __attribute__((always_inline))
6777 void *
zalloc_noblock(union zone_or_view zov)6778 zalloc_noblock(union zone_or_view zov)
6779 {
6780 	return zalloc_flags(zov, Z_NOWAIT);
6781 }
6782 
6783 void *
6784 (zalloc_flags)(union zone_or_view zov, zalloc_flags_t flags)
6785 {
6786 	zone_t zone = zov.zov_view->zv_zone;
6787 	zone_stats_t zstats = zov.zov_view->zv_stats;
6788 
6789 	assert(zone > &zone_array[ZONE_ID__LAST_RO]);
6790 	assert(!zone->z_percpu && !zone->z_permanent);
6791 	return zalloc_ext(zone, zstats, flags).addr;
6792 }
6793 
6794 __attribute__((always_inline))
6795 void *
6796 (zalloc_id)(zone_id_t zid, zalloc_flags_t flags)
6797 {
6798 	return (zalloc_flags)(zone_by_id(zid), flags);
6799 }
6800 
6801 void *
6802 (zalloc_ro)(zone_id_t zid, zalloc_flags_t flags)
6803 {
6804 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6805 	zone_t zone = zone_by_id(zid);
6806 	zone_stats_t zstats = zone->z_stats;
6807 	struct kalloc_result kr;
6808 
6809 	kr = zalloc_ext(zone, zstats, flags);
6810 #if ZSECURITY_CONFIG(READ_ONLY)
6811 	assert(zone_security_array[zid].z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
6812 	if (kr.addr) {
6813 		zone_require_ro(zid, kr.size, kr.addr);
6814 	}
6815 #endif
6816 	return kr.addr;
6817 }
6818 
6819 #if ZSECURITY_CONFIG(READ_ONLY)
6820 
6821 __attribute__((always_inline))
6822 static bool
from_current_stack(vm_offset_t addr,vm_size_t size)6823 from_current_stack(vm_offset_t addr, vm_size_t size)
6824 {
6825 	vm_offset_t start = (vm_offset_t)__builtin_frame_address(0);
6826 	vm_offset_t end = (start + kernel_stack_size - 1) & -kernel_stack_size;
6827 
6828 #if CONFIG_KERNEL_TBI
6829 	addr = VM_KERNEL_TBI_FILL(addr);
6830 #endif /* CONFIG_KERNEL_TBI */
6831 
6832 	return (addr >= start) && (addr + size < end);
6833 }
6834 
6835 /*
6836  * Check if an address is from const memory i.e TEXT or DATA CONST segements
6837  * or the SECURITY_READ_ONLY_LATE section.
6838  */
6839 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
6840 __attribute__((always_inline))
6841 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6842 from_const_memory(const vm_offset_t addr, vm_size_t size)
6843 {
6844 	return rorgn_contains(addr, size, true);
6845 }
6846 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
6847 __attribute__((always_inline))
6848 static bool
from_const_memory(const vm_offset_t addr,vm_size_t size)6849 from_const_memory(const vm_offset_t addr, vm_size_t size)
6850 {
6851 #pragma unused(addr, size)
6852 	return true;
6853 }
6854 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
6855 
6856 __abortlike
6857 static void
zalloc_ro_mut_validation_panic(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6858 zalloc_ro_mut_validation_panic(zone_id_t zid, void *elem,
6859     const vm_offset_t src, vm_size_t src_size)
6860 {
6861 	vm_offset_t stack_start = (vm_offset_t)__builtin_frame_address(0);
6862 	vm_offset_t stack_end = (stack_start + kernel_stack_size - 1) & -kernel_stack_size;
6863 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
6864 	extern vm_offset_t rorgn_begin;
6865 	extern vm_offset_t rorgn_end;
6866 #else
6867 	vm_offset_t const rorgn_begin = 0;
6868 	vm_offset_t const rorgn_end = 0;
6869 #endif
6870 
6871 	if (from_ro_map(src, src_size)) {
6872 		zone_t src_zone = &zone_array[zone_index_from_ptr((void *)src)];
6873 		zone_t dst_zone = &zone_array[zid];
6874 		panic("zalloc_ro_mut failed: source (%p) not from same zone as dst (%p)"
6875 		    " (expected: %s, actual: %s", (void *)src, elem, src_zone->z_name,
6876 		    dst_zone->z_name);
6877 	}
6878 
6879 	panic("zalloc_ro_mut failed: source (%p, phys %p) not from RO zone map (%p - %p), "
6880 	    "current stack (%p - %p) or const memory (phys %p - %p)",
6881 	    (void *)src, (void*)kvtophys(src),
6882 	    (void *)zone_info.zi_ro_range.min_address,
6883 	    (void *)zone_info.zi_ro_range.max_address,
6884 	    (void *)stack_start, (void *)stack_end,
6885 	    (void *)rorgn_begin, (void *)rorgn_end);
6886 }
6887 
6888 __attribute__((always_inline))
6889 static void
zalloc_ro_mut_validate_src(zone_id_t zid,void * elem,const vm_offset_t src,vm_size_t src_size)6890 zalloc_ro_mut_validate_src(zone_id_t zid, void *elem,
6891     const vm_offset_t src, vm_size_t src_size)
6892 {
6893 	if (from_current_stack(src, src_size) ||
6894 	    (from_ro_map(src, src_size) &&
6895 	    zid == zone_index_from_ptr((void *)src)) ||
6896 	    from_const_memory(src, src_size)) {
6897 		return;
6898 	}
6899 	zalloc_ro_mut_validation_panic(zid, elem, src, src_size);
6900 }
6901 
6902 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
6903 
6904 __attribute__((noinline))
6905 void
zalloc_ro_mut(zone_id_t zid,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)6906 zalloc_ro_mut(zone_id_t zid, void *elem, vm_offset_t offset,
6907     const void *new_data, vm_size_t new_data_size)
6908 {
6909 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6910 
6911 #if ZSECURITY_CONFIG(READ_ONLY)
6912 	bool skip_src_check = false;
6913 
6914 	/*
6915 	 * The OSEntitlements RO-zone is a little differently treated. For more
6916 	 * information: rdar://100518485.
6917 	 */
6918 	if (zid == ZONE_ID_AMFI_OSENTITLEMENTS) {
6919 		code_signing_config_t cs_config = 0;
6920 
6921 		code_signing_configuration(NULL, &cs_config);
6922 		if (cs_config & CS_CONFIG_CSM_ENABLED) {
6923 			skip_src_check = true;
6924 		}
6925 	}
6926 
6927 	if (skip_src_check == false) {
6928 		zalloc_ro_mut_validate_src(zid, elem, (vm_offset_t)new_data,
6929 		    new_data_size);
6930 	}
6931 	pmap_ro_zone_memcpy(zid, (vm_offset_t) elem, offset,
6932 	    (vm_offset_t) new_data, new_data_size);
6933 #else
6934 	(void)zid;
6935 	memcpy((void *)((uintptr_t)elem + offset), new_data, new_data_size);
6936 #endif
6937 }
6938 
6939 __attribute__((noinline))
6940 uint64_t
zalloc_ro_mut_atomic(zone_id_t zid,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)6941 zalloc_ro_mut_atomic(zone_id_t zid, void *elem, vm_offset_t offset,
6942     zro_atomic_op_t op, uint64_t value)
6943 {
6944 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6945 
6946 #if ZSECURITY_CONFIG(READ_ONLY)
6947 	value = pmap_ro_zone_atomic_op(zid, (vm_offset_t)elem, offset, op, value);
6948 #else
6949 	(void)zid;
6950 	value = __zalloc_ro_mut_atomic((vm_offset_t)elem + offset, op, value);
6951 #endif
6952 	return value;
6953 }
6954 
6955 void
zalloc_ro_clear(zone_id_t zid,void * elem,vm_offset_t offset,vm_size_t size)6956 zalloc_ro_clear(zone_id_t zid, void *elem, vm_offset_t offset, vm_size_t size)
6957 {
6958 	assert(zid >= ZONE_ID__FIRST_RO && zid <= ZONE_ID__LAST_RO);
6959 #if ZSECURITY_CONFIG(READ_ONLY)
6960 	pmap_ro_zone_bzero(zid, (vm_offset_t)elem, offset, size);
6961 #else
6962 	(void)zid;
6963 	bzero((void *)((uintptr_t)elem + offset), size);
6964 #endif
6965 }
6966 
6967 /*
6968  * This function will run in the PPL and needs to be robust
6969  * against an attacker with arbitrary kernel write.
6970  */
6971 
6972 #if ZSECURITY_CONFIG(READ_ONLY)
6973 
6974 __abortlike
6975 static void
zone_id_require_ro_panic(zone_id_t zid,void * addr)6976 zone_id_require_ro_panic(zone_id_t zid, void *addr)
6977 {
6978 	struct zone_size_params p = zone_ro_size_params[zid];
6979 	vm_offset_t elem = (vm_offset_t)addr;
6980 	uint32_t zindex;
6981 	zone_t other;
6982 	zone_t zone = &zone_array[zid];
6983 
6984 	if (!from_ro_map(addr, 1)) {
6985 		panic("zone_require_ro failed: address not in a ro zone (addr: %p)", addr);
6986 	}
6987 
6988 	if (!Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic)) {
6989 		panic("zone_require_ro failed: element improperly aligned (addr: %p)", addr);
6990 	}
6991 
6992 	zindex = zone_index_from_ptr(addr);
6993 	other = &zone_array[zindex];
6994 	if (zindex >= os_atomic_load(&num_zones, relaxed) || !other->z_self) {
6995 		panic("zone_require_ro failed: invalid zone index %d "
6996 		    "(addr: %p, expected: %s%s)", zindex,
6997 		    addr, zone_heap_name(zone), zone->z_name);
6998 	} else {
6999 		panic("zone_require_ro failed: address in unexpected zone id %d (%s%s) "
7000 		    "(addr: %p, expected: %s%s)",
7001 		    zindex, zone_heap_name(other), other->z_name,
7002 		    addr, zone_heap_name(zone), zone->z_name);
7003 	}
7004 }
7005 
7006 #endif /* ZSECURITY_CONFIG(READ_ONLY) */
7007 
7008 __attribute__((always_inline))
7009 void
zone_require_ro(zone_id_t zid,vm_size_t elem_size __unused,void * addr)7010 zone_require_ro(zone_id_t zid, vm_size_t elem_size __unused, void *addr)
7011 {
7012 #if ZSECURITY_CONFIG(READ_ONLY)
7013 	struct zone_size_params p = zone_ro_size_params[zid];
7014 	vm_offset_t elem = (vm_offset_t)addr;
7015 
7016 	if (!from_ro_map(addr, 1) ||
7017 	    !Z_FAST_ALIGNED(PAGE_SIZE - (elem & PAGE_MASK), p.z_align_magic) ||
7018 	    zid != zone_meta_from_addr(elem)->zm_index) {
7019 		zone_id_require_ro_panic(zid, addr);
7020 	}
7021 #else
7022 #pragma unused(zid, addr)
7023 #endif
7024 }
7025 
7026 void *
7027 (zalloc_percpu)(union zone_or_view zov, zalloc_flags_t flags)
7028 {
7029 	zone_t zone = zov.zov_view->zv_zone;
7030 	zone_stats_t zstats = zov.zov_view->zv_stats;
7031 
7032 	assert(zone > &zone_array[ZONE_ID__LAST_RO]);
7033 	assert(zone->z_percpu);
7034 	flags |= Z_PCPU;
7035 	return (void *)__zpcpu_mangle(zalloc_ext(zone, zstats, flags).addr);
7036 }
7037 
7038 static void *
_zalloc_permanent(zone_t zone,vm_size_t size,vm_offset_t mask)7039 _zalloc_permanent(zone_t zone, vm_size_t size, vm_offset_t mask)
7040 {
7041 	struct zone_page_metadata *page_meta;
7042 	vm_offset_t offs, addr;
7043 	zone_pva_t pva;
7044 
7045 	assert(ml_get_interrupts_enabled() ||
7046 	    ml_is_quiescing() ||
7047 	    debug_mode_active() ||
7048 	    startup_phase < STARTUP_SUB_EARLY_BOOT);
7049 
7050 	size = (size + mask) & ~mask;
7051 	assert(size <= PAGE_SIZE);
7052 
7053 	zone_lock(zone);
7054 	assert(zone->z_self == zone);
7055 
7056 	for (;;) {
7057 		pva = zone->z_pageq_partial;
7058 		while (!zone_pva_is_null(pva)) {
7059 			page_meta = zone_pva_to_meta(pva);
7060 			if (page_meta->zm_bump + size <= PAGE_SIZE) {
7061 				goto found;
7062 			}
7063 			pva = page_meta->zm_page_next;
7064 		}
7065 
7066 		zone_expand_locked(zone, Z_WAITOK, NULL);
7067 	}
7068 
7069 found:
7070 	offs = (uint16_t)((page_meta->zm_bump + mask) & ~mask);
7071 	page_meta->zm_bump = (uint16_t)(offs + size);
7072 	page_meta->zm_alloc_size += size;
7073 	zone->z_elems_free -= size;
7074 	zpercpu_get(zone->z_stats)->zs_mem_allocated += size;
7075 
7076 	if (page_meta->zm_alloc_size >= PAGE_SIZE - sizeof(vm_offset_t)) {
7077 		zone_meta_requeue(zone, &zone->z_pageq_full, page_meta);
7078 	}
7079 
7080 	zone_unlock(zone);
7081 
7082 	addr = offs + zone_pva_to_addr(pva);
7083 
7084 	DTRACE_VM2(zalloc, zone_t, zone, void*, addr);
7085 	return (void *)addr;
7086 }
7087 
7088 static void *
_zalloc_permanent_large(size_t size,vm_offset_t mask,vm_tag_t tag)7089 _zalloc_permanent_large(size_t size, vm_offset_t mask, vm_tag_t tag)
7090 {
7091 	vm_offset_t addr;
7092 
7093 	kernel_memory_allocate(kernel_map, &addr, size, mask,
7094 	    KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_ZERO, tag);
7095 
7096 	return (void *)addr;
7097 }
7098 
7099 void *
zalloc_permanent_tag(vm_size_t size,vm_offset_t mask,vm_tag_t tag)7100 zalloc_permanent_tag(vm_size_t size, vm_offset_t mask, vm_tag_t tag)
7101 {
7102 	if (size <= PAGE_SIZE) {
7103 		zone_t zone = &zone_array[ZONE_ID_PERMANENT];
7104 		return _zalloc_permanent(zone, size, mask);
7105 	}
7106 	return _zalloc_permanent_large(size, mask, tag);
7107 }
7108 
7109 void *
zalloc_percpu_permanent(vm_size_t size,vm_offset_t mask)7110 zalloc_percpu_permanent(vm_size_t size, vm_offset_t mask)
7111 {
7112 	zone_t zone = &zone_array[ZONE_ID_PERCPU_PERMANENT];
7113 	return (void *)__zpcpu_mangle(_zalloc_permanent(zone, size, mask));
7114 }
7115 
7116 /*! @} */
7117 #endif /* !ZALLOC_TEST */
7118 #pragma mark zone GC / trimming
7119 #if !ZALLOC_TEST
7120 
7121 static thread_call_data_t zone_trim_callout;
7122 EVENT_DEFINE(ZONE_EXHAUSTED);
7123 
7124 static void
zone_reclaim_chunk(zone_t z,struct zone_page_metadata * meta,uint32_t free_count)7125 zone_reclaim_chunk(
7126 	zone_t                  z,
7127 	struct zone_page_metadata *meta,
7128 	uint32_t                free_count)
7129 {
7130 	vm_address_t page_addr;
7131 	vm_size_t    size_to_free;
7132 	uint32_t     bitmap_ref;
7133 	uint32_t     page_count;
7134 	zone_security_flags_t zsflags = zone_security_config(z);
7135 	bool         sequester = !z->z_destroyed;
7136 	bool         oob_guard = false;
7137 
7138 	if (zone_submap_is_sequestered(zsflags)) {
7139 		/*
7140 		 * If the entire map is sequestered, we can't return the VA.
7141 		 * It stays pinned to the zone forever.
7142 		 */
7143 		sequester = true;
7144 	}
7145 
7146 	zone_meta_queue_pop(z, &z->z_pageq_empty);
7147 
7148 	page_addr  = zone_meta_to_addr(meta);
7149 	page_count = meta->zm_chunk_len;
7150 	oob_guard  = meta->zm_guarded;
7151 
7152 	if (meta->zm_alloc_size) {
7153 		zone_metadata_corruption(z, meta, "alloc_size");
7154 	}
7155 	if (z->z_percpu) {
7156 		if (page_count != 1) {
7157 			zone_metadata_corruption(z, meta, "page_count");
7158 		}
7159 		size_to_free = ptoa(z->z_chunk_pages);
7160 		zone_remove_wired_pages(z->z_chunk_pages);
7161 	} else {
7162 		if (page_count > z->z_chunk_pages) {
7163 			zone_metadata_corruption(z, meta, "page_count");
7164 		}
7165 		if (page_count < z->z_chunk_pages) {
7166 			/* Dequeue non populated VA from z_pageq_va */
7167 			zone_meta_remqueue(z, meta + page_count);
7168 		}
7169 		size_to_free = ptoa(page_count);
7170 		zone_remove_wired_pages(page_count);
7171 	}
7172 
7173 	zone_counter_sub(z, z_elems_free, free_count);
7174 	zone_counter_sub(z, z_elems_avail, free_count);
7175 	zone_counter_sub(z, z_wired_empty, page_count);
7176 	zone_counter_sub(z, z_wired_cur, page_count);
7177 
7178 	if (z->z_pcpu_cache == NULL) {
7179 		if (z->z_elems_free_min < free_count) {
7180 			z->z_elems_free_min = 0;
7181 		} else {
7182 			z->z_elems_free_min -= free_count;
7183 		}
7184 	}
7185 	if (z->z_elems_free_wma < free_count) {
7186 		z->z_elems_free_wma = 0;
7187 	} else {
7188 		z->z_elems_free_wma -= free_count;
7189 	}
7190 
7191 	bitmap_ref = 0;
7192 	if (sequester) {
7193 		if (meta->zm_inline_bitmap) {
7194 			for (int i = 0; i < meta->zm_chunk_len; i++) {
7195 				meta[i].zm_bitmap = 0;
7196 			}
7197 		} else {
7198 			bitmap_ref = meta->zm_bitmap;
7199 			meta->zm_bitmap = 0;
7200 		}
7201 		meta->zm_chunk_len = 0;
7202 	} else {
7203 		if (!meta->zm_inline_bitmap) {
7204 			bitmap_ref = meta->zm_bitmap;
7205 		}
7206 		zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
7207 		bzero(meta, sizeof(*meta) * (z->z_chunk_pages + oob_guard));
7208 	}
7209 
7210 #if CONFIG_ZLEAKS
7211 	if (__improbable(zleak_should_disable_for_zone(z) &&
7212 	    startup_phase >= STARTUP_SUB_THREAD_CALL)) {
7213 		thread_call_enter(&zone_leaks_callout);
7214 	}
7215 #endif /* CONFIG_ZLEAKS */
7216 
7217 	zone_unlock(z);
7218 
7219 	if (bitmap_ref) {
7220 		zone_bits_free(bitmap_ref);
7221 	}
7222 
7223 	/* Free the pages for metadata and account for them */
7224 #if KASAN_CLASSIC
7225 	if (z->z_percpu) {
7226 		for (uint32_t i = 0; i < z->z_chunk_pages; i++) {
7227 			kasan_zmem_remove(page_addr + ptoa(i), PAGE_SIZE,
7228 			    zone_elem_outer_size(z),
7229 			    zone_elem_outer_offs(z),
7230 			    zone_elem_redzone(z));
7231 		}
7232 	} else {
7233 		kasan_zmem_remove(page_addr, size_to_free,
7234 		    zone_elem_outer_size(z),
7235 		    zone_elem_outer_offs(z),
7236 		    zone_elem_redzone(z));
7237 	}
7238 #endif /* KASAN_CLASSIC */
7239 
7240 	if (sequester) {
7241 		kernel_memory_depopulate(page_addr, size_to_free,
7242 		    KMA_KOBJECT, VM_KERN_MEMORY_ZONE);
7243 	} else {
7244 		assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_VM);
7245 		kmem_free(zone_submap(zsflags), page_addr,
7246 		    ptoa(z->z_chunk_pages + oob_guard));
7247 		if (oob_guard) {
7248 			os_atomic_dec(&zone_guard_pages, relaxed);
7249 		}
7250 	}
7251 
7252 	thread_yield_to_preemption();
7253 
7254 	zone_lock(z);
7255 
7256 	if (sequester) {
7257 		zone_meta_queue_push(z, &z->z_pageq_va, meta);
7258 	}
7259 }
7260 
7261 static void
zone_reclaim_elements(zone_t z,uint16_t n,vm_offset_t * elems)7262 zone_reclaim_elements(zone_t z, uint16_t n, vm_offset_t *elems)
7263 {
7264 	z_debug_assert(n <= zc_mag_size());
7265 
7266 	for (uint16_t i = 0; i < n; i++) {
7267 		vm_offset_t addr = elems[i];
7268 		elems[i] = 0;
7269 		zfree_drop(z, addr);
7270 	}
7271 
7272 	z->z_elems_free += n;
7273 }
7274 
7275 static void
zcache_reclaim_elements(zone_id_t zid,uint16_t n,vm_offset_t * elems)7276 zcache_reclaim_elements(zone_id_t zid, uint16_t n, vm_offset_t *elems)
7277 {
7278 	z_debug_assert(n <= zc_mag_size());
7279 	zone_cache_ops_t ops = zcache_ops[zid];
7280 
7281 	for (uint16_t i = 0; i < n; i++) {
7282 		vm_offset_t addr = elems[i];
7283 		elems[i] = 0;
7284 		addr = (vm_offset_t)ops->zc_op_mark_valid(zid, (void *)addr);
7285 		ops->zc_op_free(zid, (void *)addr);
7286 	}
7287 
7288 	os_atomic_sub(&zone_by_id(zid)->z_elems_avail, n, relaxed);
7289 }
7290 
7291 static void
zone_depot_trim(zone_t z,uint32_t target,struct zone_depot * zd)7292 zone_depot_trim(zone_t z, uint32_t target, struct zone_depot *zd)
7293 {
7294 	zpercpu_foreach(zc, z->z_pcpu_cache) {
7295 		zone_depot_lock(zc);
7296 
7297 		if (zc->zc_depot.zd_full > (target + 1) / 2) {
7298 			uint32_t n = zc->zc_depot.zd_full - (target + 1) / 2;
7299 			zone_depot_move_full(zd, &zc->zc_depot, n, NULL);
7300 		}
7301 
7302 		if (zc->zc_depot.zd_empty > target / 2) {
7303 			uint32_t n = zc->zc_depot.zd_empty - target / 2;
7304 			zone_depot_move_empty(zd, &zc->zc_depot, n, NULL);
7305 		}
7306 
7307 		zone_depot_unlock(zc);
7308 	}
7309 }
7310 
7311 __enum_decl(zone_reclaim_mode_t, uint32_t, {
7312 	ZONE_RECLAIM_TRIM,
7313 	ZONE_RECLAIM_DRAIN,
7314 	ZONE_RECLAIM_DESTROY,
7315 });
7316 
7317 static void
zone_reclaim_pcpu(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)7318 zone_reclaim_pcpu(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
7319 {
7320 	uint32_t depot_max = 0;
7321 	bool cleanup = mode != ZONE_RECLAIM_TRIM;
7322 
7323 	if (z->z_depot_cleanup) {
7324 		z->z_depot_cleanup = false;
7325 		depot_max = z->z_depot_size;
7326 		cleanup = true;
7327 	}
7328 
7329 	if (cleanup) {
7330 		zone_depot_trim(z, depot_max, zd);
7331 	}
7332 
7333 	if (mode == ZONE_RECLAIM_DESTROY) {
7334 		zpercpu_foreach(zc, z->z_pcpu_cache) {
7335 			zone_reclaim_elements(z, zc->zc_alloc_cur,
7336 			    zc->zc_alloc_elems);
7337 			zone_reclaim_elements(z, zc->zc_free_cur,
7338 			    zc->zc_free_elems);
7339 			zc->zc_alloc_cur = zc->zc_free_cur = 0;
7340 		}
7341 
7342 		z->z_recirc_empty_min = 0;
7343 		z->z_recirc_empty_wma = 0;
7344 		z->z_recirc_full_min = 0;
7345 		z->z_recirc_full_wma = 0;
7346 		z->z_recirc_cont_cur = 0;
7347 		z->z_recirc_cont_wma = 0;
7348 	}
7349 }
7350 
7351 static void
zone_reclaim_recirc(zone_t z,zone_reclaim_mode_t mode,struct zone_depot * zd)7352 zone_reclaim_recirc(zone_t z, zone_reclaim_mode_t mode, struct zone_depot *zd)
7353 {
7354 	assert(zd->zd_empty == 0);
7355 	assert(zd->zd_full == 0);
7356 
7357 	zone_recirc_lock_nopreempt(z);
7358 
7359 	if (mode == ZONE_RECLAIM_TRIM) {
7360 		uint32_t count;
7361 
7362 		count = MIN(z->z_recirc_empty_wma / Z_WMA_UNIT,
7363 		    z->z_recirc_empty_min);
7364 		assert(count <= z->z_recirc.zd_empty);
7365 
7366 		if (count) {
7367 			zone_depot_move_empty(zd, &z->z_recirc, count, NULL);
7368 			z->z_recirc_empty_min -= count;
7369 			z->z_recirc_empty_wma -= count * Z_WMA_UNIT;
7370 		}
7371 
7372 		count = MIN(z->z_recirc_full_wma / Z_WMA_UNIT, z->z_recirc_full_min);
7373 		assert(count <= z->z_recirc.zd_full);
7374 		if (count) {
7375 			zone_depot_move_full(zd, &z->z_recirc, count, NULL);
7376 			z->z_recirc_full_min -= count;
7377 			z->z_recirc_full_wma -= count * Z_WMA_UNIT;
7378 		}
7379 	} else {
7380 		*zd = z->z_recirc;
7381 		if (zd->zd_full == 0) {
7382 			zd->zd_tail = &zd->zd_head;
7383 		}
7384 		zone_depot_init(&z->z_recirc);
7385 		z->z_recirc_empty_min = 0;
7386 		z->z_recirc_empty_wma = 0;
7387 		z->z_recirc_full_min = 0;
7388 		z->z_recirc_full_wma = 0;
7389 	}
7390 
7391 	zone_recirc_unlock_nopreempt(z);
7392 }
7393 
7394 /*!
7395  * @function zone_reclaim
7396  *
7397  * @brief
7398  * Drains or trim the zone.
7399  *
7400  * @discussion
7401  * Draining the zone will free it from all its elements.
7402  *
7403  * Trimming the zone tries to respect the working set size, and avoids draining
7404  * the depot when it's not necessary.
7405  *
7406  * @param z             The zone to reclaim from
7407  * @param mode          The purpose of this reclaim.
7408  */
7409 static void
zone_reclaim(zone_t z,zone_reclaim_mode_t mode)7410 zone_reclaim(zone_t z, zone_reclaim_mode_t mode)
7411 {
7412 	struct zone_depot zd;
7413 
7414 	zone_depot_init(&zd);
7415 
7416 	zone_lock(z);
7417 
7418 	if (mode == ZONE_RECLAIM_DESTROY) {
7419 		if (!z->z_destructible || z->z_elems_rsv) {
7420 			panic("zdestroy: Zone %s%s isn't destructible",
7421 			    zone_heap_name(z), z->z_name);
7422 		}
7423 
7424 		if (!z->z_self || z->z_expander ||
7425 		    z->z_async_refilling || z->z_expanding_wait) {
7426 			panic("zdestroy: Zone %s%s in an invalid state for destruction",
7427 			    zone_heap_name(z), z->z_name);
7428 		}
7429 
7430 #if !KASAN_CLASSIC
7431 		/*
7432 		 * Unset the valid bit. We'll hit an assert failure on further
7433 		 * operations on this zone, until zinit() is called again.
7434 		 *
7435 		 * Leave the zone valid for KASan as we will see zfree's on
7436 		 * quarantined free elements even after the zone is destroyed.
7437 		 */
7438 		z->z_self = NULL;
7439 #endif
7440 		z->z_destroyed = true;
7441 	} else if (z->z_destroyed) {
7442 		return zone_unlock(z);
7443 	} else if (zone_count_free(z) <= z->z_elems_rsv) {
7444 		/* If the zone is under its reserve level, leave it alone. */
7445 		return zone_unlock(z);
7446 	}
7447 
7448 	if (z->z_pcpu_cache) {
7449 		zone_magazine_t mag;
7450 		uint32_t freed = 0;
7451 
7452 		/*
7453 		 * This is all done with the zone lock held on purpose.
7454 		 * The work here is O(ncpu), which should still be short.
7455 		 *
7456 		 * We need to keep the lock held until we have reclaimed
7457 		 * at least a few magazines, otherwise if the zone has no
7458 		 * free elements outside of the depot, a thread performing
7459 		 * a concurrent allocatiuon could try to grow the zone
7460 		 * while we're trying to drain it.
7461 		 */
7462 		zone_reclaim_recirc(z, mode, &zd);
7463 		zone_reclaim_pcpu(z, mode, &zd);
7464 
7465 		if (z->z_chunk_elems) {
7466 			zone_cache_t cache = zpercpu_get_cpu(z->z_pcpu_cache, 0);
7467 			smr_t smr = zone_cache_smr(cache);
7468 
7469 			while (zd.zd_full) {
7470 				mag = zone_depot_pop_head_full(&zd, NULL);
7471 				if (smr) {
7472 					smr_wait(smr, mag->zm_seq);
7473 					zalloc_cached_reuse_smr(z, cache, mag);
7474 					freed += zc_mag_size();
7475 				}
7476 				zone_reclaim_elements(z, zc_mag_size(),
7477 				    mag->zm_elems);
7478 				zone_depot_insert_head_empty(&zd, mag);
7479 
7480 				freed += zc_mag_size();
7481 				if (freed >= zc_free_batch_size()) {
7482 					zone_unlock(z);
7483 					zone_magazine_free_list(&zd);
7484 					thread_yield_to_preemption();
7485 					zone_lock(z);
7486 					freed = 0;
7487 				}
7488 			}
7489 		} else {
7490 			zone_id_t zid = zone_index(z);
7491 
7492 			zone_unlock(z);
7493 
7494 			assert(zid <= ZONE_ID__FIRST_DYNAMIC && zcache_ops[zid]);
7495 
7496 			while (zd.zd_full) {
7497 				mag = zone_depot_pop_head_full(&zd, NULL);
7498 				zcache_reclaim_elements(zid, zc_mag_size(),
7499 				    mag->zm_elems);
7500 				zone_magazine_free(mag);
7501 			}
7502 
7503 			goto cleanup;
7504 		}
7505 	}
7506 
7507 	while (!zone_pva_is_null(z->z_pageq_empty)) {
7508 		struct zone_page_metadata *meta;
7509 		uint32_t count, limit = z->z_elems_rsv * 5 / 4;
7510 
7511 		if (mode == ZONE_RECLAIM_TRIM && z->z_pcpu_cache == NULL) {
7512 			limit = MAX(limit, z->z_elems_free -
7513 			    MIN(z->z_elems_free_min, z->z_elems_free_wma));
7514 		}
7515 
7516 		meta  = zone_pva_to_meta(z->z_pageq_empty);
7517 		count = (uint32_t)ptoa(meta->zm_chunk_len) / zone_elem_outer_size(z);
7518 
7519 		if (zone_count_free(z) - count < limit) {
7520 			break;
7521 		}
7522 
7523 		zone_reclaim_chunk(z, meta, count);
7524 	}
7525 
7526 	zone_unlock(z);
7527 
7528 cleanup:
7529 	zone_magazine_free_list(&zd);
7530 }
7531 
7532 void
zone_drain(zone_t zone)7533 zone_drain(zone_t zone)
7534 {
7535 	current_thread()->options |= TH_OPT_ZONE_PRIV;
7536 	lck_mtx_lock(&zone_gc_lock);
7537 	zone_reclaim(zone, ZONE_RECLAIM_DRAIN);
7538 	lck_mtx_unlock(&zone_gc_lock);
7539 	current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7540 }
7541 
7542 void
zcache_drain(zone_id_t zid)7543 zcache_drain(zone_id_t zid)
7544 {
7545 	zone_drain(zone_by_id(zid));
7546 }
7547 
7548 static void
zone_reclaim_all(zone_reclaim_mode_t mode)7549 zone_reclaim_all(zone_reclaim_mode_t mode)
7550 {
7551 	/*
7552 	 * Start with zcaches, so that they flow into the regular zones.
7553 	 *
7554 	 * Then the zones with VA sequester since depopulating
7555 	 * pages will not need to allocate vm map entries for holes,
7556 	 * which will give memory back to the system faster.
7557 	 */
7558 	for (zone_id_t zid = ZONE_ID__LAST_RO + 1; zid < ZONE_ID__FIRST_DYNAMIC; zid++) {
7559 		zone_t z = zone_by_id(zid);
7560 
7561 		if (z->z_self && z->z_chunk_elems == 0) {
7562 			zone_reclaim(z, mode);
7563 		}
7564 	}
7565 	zone_index_foreach(zid) {
7566 		zone_t z = zone_by_id(zid);
7567 
7568 		if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7569 			continue;
7570 		}
7571 		if (zone_submap_is_sequestered(zone_security_array[zid]) &&
7572 		    z->collectable) {
7573 			zone_reclaim(z, mode);
7574 		}
7575 	}
7576 
7577 	zone_index_foreach(zid) {
7578 		zone_t z = zone_by_id(zid);
7579 
7580 		if (z == zc_magazine_zone || z->z_chunk_elems == 0) {
7581 			continue;
7582 		}
7583 		if (!zone_submap_is_sequestered(zone_security_array[zid]) &&
7584 		    z->collectable) {
7585 			zone_reclaim(z, mode);
7586 		}
7587 	}
7588 
7589 	zone_reclaim(zc_magazine_zone, mode);
7590 }
7591 
7592 void
zone_userspace_reboot_checks(void)7593 zone_userspace_reboot_checks(void)
7594 {
7595 	vm_size_t label_zone_size = zone_size_allocated(ipc_service_port_label_zone);
7596 	if (label_zone_size != 0) {
7597 		panic("Zone %s should be empty upon userspace reboot. Actual size: %lu.",
7598 		    ipc_service_port_label_zone->z_name, (unsigned long)label_zone_size);
7599 	}
7600 }
7601 
7602 void
zone_gc(zone_gc_level_t level)7603 zone_gc(zone_gc_level_t level)
7604 {
7605 	zone_reclaim_mode_t mode;
7606 	zone_t largest_zone = NULL;
7607 
7608 	switch (level) {
7609 	case ZONE_GC_TRIM:
7610 		mode = ZONE_RECLAIM_TRIM;
7611 		break;
7612 	case ZONE_GC_DRAIN:
7613 		mode = ZONE_RECLAIM_DRAIN;
7614 		break;
7615 	case ZONE_GC_JETSAM:
7616 		largest_zone = kill_process_in_largest_zone();
7617 		mode = ZONE_RECLAIM_TRIM;
7618 		break;
7619 	}
7620 
7621 	current_thread()->options |= TH_OPT_ZONE_PRIV;
7622 	lck_mtx_lock(&zone_gc_lock);
7623 
7624 	zone_reclaim_all(mode);
7625 
7626 	if (level == ZONE_GC_JETSAM && zone_map_nearing_exhaustion()) {
7627 		/*
7628 		 * If we possibly killed a process, but we're still critical,
7629 		 * we need to drain harder.
7630 		 */
7631 		zone_reclaim(largest_zone, ZONE_RECLAIM_DRAIN);
7632 		zone_reclaim_all(ZONE_RECLAIM_DRAIN);
7633 	}
7634 
7635 	lck_mtx_unlock(&zone_gc_lock);
7636 	current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7637 }
7638 
7639 void
zone_gc_trim(void)7640 zone_gc_trim(void)
7641 {
7642 	zone_gc(ZONE_GC_TRIM);
7643 }
7644 
7645 void
zone_gc_drain(void)7646 zone_gc_drain(void)
7647 {
7648 	zone_gc(ZONE_GC_DRAIN);
7649 }
7650 
7651 static bool
zone_trim_needed(zone_t z)7652 zone_trim_needed(zone_t z)
7653 {
7654 	if (z->z_depot_cleanup) {
7655 		return true;
7656 	}
7657 
7658 	if (z->z_async_refilling) {
7659 		/* Don't fight with refill */
7660 		return false;
7661 	}
7662 
7663 	if (z->z_pcpu_cache) {
7664 		uint32_t e_n, f_n;
7665 
7666 		e_n = MIN(z->z_recirc_empty_wma, z->z_recirc_empty_min * Z_WMA_UNIT);
7667 		f_n = MIN(z->z_recirc_full_wma, z->z_recirc_full_min * Z_WMA_UNIT);
7668 
7669 		if (e_n > zc_autotrim_buckets() * Z_WMA_UNIT) {
7670 			return true;
7671 		}
7672 
7673 		if (f_n * zc_mag_size() > z->z_elems_rsv * Z_WMA_UNIT &&
7674 		    f_n * zc_mag_size() * zone_elem_inner_size(z) >
7675 		    zc_autotrim_size() * Z_WMA_UNIT) {
7676 			return true;
7677 		}
7678 
7679 		return false;
7680 	}
7681 
7682 	if (!zone_pva_is_null(z->z_pageq_empty)) {
7683 		uint32_t n;
7684 
7685 		n = MIN(z->z_elems_free_wma, z->z_elems_free_min);
7686 
7687 		return n >= z->z_elems_rsv + z->z_chunk_elems;
7688 	}
7689 
7690 	return false;
7691 }
7692 
7693 static void
zone_trim_async(__unused thread_call_param_t p0,__unused thread_call_param_t p1)7694 zone_trim_async(__unused thread_call_param_t p0, __unused thread_call_param_t p1)
7695 {
7696 	current_thread()->options |= TH_OPT_ZONE_PRIV;
7697 
7698 	zone_foreach(z) {
7699 		if (!z->collectable || z == zc_magazine_zone) {
7700 			continue;
7701 		}
7702 
7703 		if (zone_trim_needed(z)) {
7704 			lck_mtx_lock(&zone_gc_lock);
7705 			zone_reclaim(z, ZONE_RECLAIM_TRIM);
7706 			lck_mtx_unlock(&zone_gc_lock);
7707 		}
7708 	}
7709 
7710 	if (zone_trim_needed(zc_magazine_zone)) {
7711 		lck_mtx_lock(&zone_gc_lock);
7712 		zone_reclaim(zc_magazine_zone, ZONE_RECLAIM_TRIM);
7713 		lck_mtx_unlock(&zone_gc_lock);
7714 	}
7715 
7716 	current_thread()->options &= ~TH_OPT_ZONE_PRIV;
7717 }
7718 
7719 void
compute_zone_working_set_size(__unused void * param)7720 compute_zone_working_set_size(__unused void *param)
7721 {
7722 	uint32_t zc_auto = zc_enable_level();
7723 	bool needs_trim = false;
7724 
7725 	/*
7726 	 * Keep zone caching disabled until the first proc is made.
7727 	 */
7728 	if (__improbable(zone_caching_disabled < 0)) {
7729 		return;
7730 	}
7731 
7732 	zone_caching_disabled = vm_pool_low();
7733 
7734 	if (os_mul_overflow(zc_auto, Z_WMA_UNIT, &zc_auto)) {
7735 		zc_auto = 0;
7736 	}
7737 
7738 	zone_foreach(z) {
7739 		uint32_t old, wma, cur;
7740 		bool needs_caching = false;
7741 
7742 		if (z->z_self != z) {
7743 			continue;
7744 		}
7745 
7746 		zone_lock(z);
7747 
7748 		zone_recirc_lock_nopreempt(z);
7749 
7750 		if (z->z_pcpu_cache) {
7751 			wma = Z_WMA_MIX(z->z_recirc_empty_wma, z->z_recirc_empty_min);
7752 			z->z_recirc_empty_min = z->z_recirc.zd_empty;
7753 			z->z_recirc_empty_wma = wma;
7754 		} else {
7755 			wma = Z_WMA_MIX(z->z_elems_free_wma, z->z_elems_free_min);
7756 			z->z_elems_free_min = z->z_elems_free;
7757 			z->z_elems_free_wma = wma;
7758 		}
7759 
7760 		wma = Z_WMA_MIX(z->z_recirc_full_wma, z->z_recirc_full_min);
7761 		z->z_recirc_full_min = z->z_recirc.zd_full;
7762 		z->z_recirc_full_wma = wma;
7763 
7764 		/* fixed point decimal of contentions per second */
7765 		old = z->z_recirc_cont_wma;
7766 		cur = z->z_recirc_cont_cur * Z_WMA_UNIT /
7767 		    (zpercpu_count() * ZONE_WSS_UPDATE_PERIOD);
7768 		cur = (3 * old + cur) / 4;
7769 		zone_recirc_unlock_nopreempt(z);
7770 
7771 		if (z->z_pcpu_cache) {
7772 			uint16_t size = z->z_depot_size;
7773 
7774 			if (size < z->z_depot_limit && cur > zc_grow_level()) {
7775 				/*
7776 				 * lose history on purpose now
7777 				 * that we just grew, to give
7778 				 * the sytem time to adjust.
7779 				 */
7780 				cur  = (zc_grow_level() + zc_shrink_level()) / 2;
7781 				size = size ? (3 * size + 2) / 2 : 2;
7782 				z->z_depot_size = MIN(z->z_depot_limit, size);
7783 			} else if (size > 0 && cur <= zc_shrink_level()) {
7784 				/*
7785 				 * lose history on purpose now
7786 				 * that we just shrunk, to give
7787 				 * the sytem time to adjust.
7788 				 */
7789 				cur = (zc_grow_level() + zc_shrink_level()) / 2;
7790 				z->z_depot_size = size - 1;
7791 				z->z_depot_cleanup = true;
7792 			}
7793 		} else if (!z->z_nocaching && !z->exhaustible && zc_auto &&
7794 		    old >= zc_auto && cur >= zc_auto) {
7795 			needs_caching = true;
7796 		}
7797 
7798 		z->z_recirc_cont_wma = cur;
7799 		z->z_recirc_cont_cur = 0;
7800 
7801 		if (!needs_trim && zone_trim_needed(z)) {
7802 			needs_trim = true;
7803 		}
7804 
7805 		zone_unlock(z);
7806 
7807 		if (needs_caching) {
7808 			zone_enable_caching(z);
7809 		}
7810 	}
7811 
7812 	if (needs_trim) {
7813 		thread_call_enter(&zone_trim_callout);
7814 	}
7815 }
7816 
7817 #endif /* !ZALLOC_TEST */
7818 #pragma mark vm integration, MIG routines
7819 #if !ZALLOC_TEST
7820 
7821 extern unsigned int stack_total;
7822 #if defined (__x86_64__)
7823 extern unsigned int inuse_ptepages_count;
7824 #endif
7825 
7826 static const char *
panic_print_get_typename(kalloc_type_views_t cur,kalloc_type_views_t * next,bool is_kt_var)7827 panic_print_get_typename(kalloc_type_views_t cur, kalloc_type_views_t *next,
7828     bool is_kt_var)
7829 {
7830 	if (is_kt_var) {
7831 		next->ktv_var = (kalloc_type_var_view_t) cur.ktv_var->kt_next;
7832 		return cur.ktv_var->kt_name;
7833 	} else {
7834 		next->ktv_fixed = (kalloc_type_view_t) cur.ktv_fixed->kt_zv.zv_next;
7835 		return cur.ktv_fixed->kt_zv.zv_name;
7836 	}
7837 }
7838 
7839 static void
panic_print_types_in_zone(zone_t z,const char * debug_str)7840 panic_print_types_in_zone(zone_t z, const char* debug_str)
7841 {
7842 	kalloc_type_views_t kt_cur = {};
7843 	const char *prev_type = "";
7844 	size_t skip_over_site = sizeof("site.") - 1;
7845 	zone_security_flags_t zsflags = zone_security_config(z);
7846 	bool is_kt_var = false;
7847 
7848 	if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
7849 		uint32_t heap_id = KT_VAR_PTR_HEAP + ((zone_index(z) -
7850 		    kalloc_type_heap_array[KT_VAR_PTR_HEAP].kh_zstart) / KHEAP_NUM_ZONES);
7851 		kt_cur.ktv_var = kalloc_type_heap_array[heap_id].kt_views;
7852 		is_kt_var = true;
7853 	} else {
7854 		kt_cur.ktv_fixed = (kalloc_type_view_t) z->z_views;
7855 	}
7856 
7857 	paniclog_append_noflush("kalloc %s in zone, %s (%s):\n",
7858 	    is_kt_var? "type arrays" : "types", debug_str, z->z_name);
7859 
7860 	while (kt_cur.ktv_fixed) {
7861 		kalloc_type_views_t kt_next = {};
7862 		const char *typename = panic_print_get_typename(kt_cur, &kt_next,
7863 		    is_kt_var) + skip_over_site;
7864 		if (strcmp(typename, prev_type) != 0) {
7865 			paniclog_append_noflush("\t%-50s\n", typename);
7866 			prev_type = typename;
7867 		}
7868 		kt_cur = kt_next;
7869 	}
7870 	paniclog_append_noflush("\n");
7871 }
7872 
7873 static void
panic_display_kalloc_types(void)7874 panic_display_kalloc_types(void)
7875 {
7876 	if (kalloc_type_src_zone) {
7877 		panic_print_types_in_zone(kalloc_type_src_zone, "addr belongs to");
7878 	}
7879 	if (kalloc_type_dst_zone) {
7880 		panic_print_types_in_zone(kalloc_type_dst_zone,
7881 		    "addr is being freed to");
7882 	}
7883 }
7884 
7885 static void
zone_find_n_largest(const uint32_t n,zone_t * largest_zones,uint64_t * zone_size)7886 zone_find_n_largest(const uint32_t n, zone_t *largest_zones,
7887     uint64_t *zone_size)
7888 {
7889 	zone_index_foreach(zid) {
7890 		zone_t z = &zone_array[zid];
7891 		vm_offset_t size = zone_size_wired(z);
7892 
7893 		if (zid == ZONE_ID_VM_PAGES) {
7894 			continue;
7895 		}
7896 		for (uint32_t i = 0; i < n; i++) {
7897 			if (size > zone_size[i]) {
7898 				largest_zones[i] = z;
7899 				zone_size[i] = size;
7900 				break;
7901 			}
7902 		}
7903 	}
7904 }
7905 
7906 #define NUM_LARGEST_ZONES 5
7907 static void
panic_display_largest_zones(void)7908 panic_display_largest_zones(void)
7909 {
7910 	zone_t largest_zones[NUM_LARGEST_ZONES]  = { NULL };
7911 	uint64_t largest_size[NUM_LARGEST_ZONES] = { 0 };
7912 
7913 	zone_find_n_largest(NUM_LARGEST_ZONES, (zone_t *) &largest_zones,
7914 	    (uint64_t *) &largest_size);
7915 
7916 	paniclog_append_noflush("Largest zones:\n%-28s %10s %10s\n",
7917 	    "Zone Name", "Cur Size", "Free Size");
7918 	for (uint32_t i = 0; i < NUM_LARGEST_ZONES; i++) {
7919 		zone_t z = largest_zones[i];
7920 		paniclog_append_noflush("%-8s%-20s %9u%c %9u%c\n",
7921 		    zone_heap_name(z), z->z_name,
7922 		    mach_vm_size_pretty(largest_size[i]),
7923 		    mach_vm_size_unit(largest_size[i]),
7924 		    mach_vm_size_pretty(zone_size_free(z)),
7925 		    mach_vm_size_unit(zone_size_free(z)));
7926 	}
7927 }
7928 
7929 static void
panic_display_zprint(void)7930 panic_display_zprint(void)
7931 {
7932 	panic_display_largest_zones();
7933 	paniclog_append_noflush("%-20s %10lu\n", "Kernel Stacks",
7934 	    (uintptr_t)(kernel_stack_size * stack_total));
7935 #if defined (__x86_64__)
7936 	paniclog_append_noflush("%-20s %10lu\n", "PageTables",
7937 	    (uintptr_t)ptoa(inuse_ptepages_count));
7938 #endif
7939 	paniclog_append_noflush("%-20s %10lu\n", "Kalloc.Large",
7940 	    (uintptr_t)kalloc_large_total);
7941 
7942 	if (panic_kext_memory_info) {
7943 		mach_memory_info_t *mem_info = panic_kext_memory_info;
7944 
7945 		paniclog_append_noflush("\n%-5s %10s\n", "Kmod", "Size");
7946 		for (uint32_t i = 0; i < panic_kext_memory_size / sizeof(mem_info[0]); i++) {
7947 			if ((mem_info[i].flags & VM_KERN_SITE_TYPE) != VM_KERN_SITE_KMOD) {
7948 				continue;
7949 			}
7950 			if (mem_info[i].size > (1024 * 1024)) {
7951 				paniclog_append_noflush("%-5lld %10lld\n",
7952 				    mem_info[i].site, mem_info[i].size);
7953 			}
7954 		}
7955 	}
7956 }
7957 
7958 static void
panic_display_zone_info(void)7959 panic_display_zone_info(void)
7960 {
7961 	paniclog_append_noflush("Zone info:\n");
7962 	paniclog_append_noflush("  Zone map: %p - %p\n",
7963 	    (void *)zone_info.zi_map_range.min_address,
7964 	    (void *)zone_info.zi_map_range.max_address);
7965 #if CONFIG_PROB_GZALLOC
7966 	if (pgz_submap) {
7967 		paniclog_append_noflush("  . PGZ   : %p - %p\n",
7968 		    (void *)pgz_submap->min_offset,
7969 		    (void *)pgz_submap->max_offset);
7970 	}
7971 #endif /* CONFIG_PROB_GZALLOC */
7972 	for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
7973 		vm_map_t map = zone_submaps[i];
7974 
7975 		if (map == VM_MAP_NULL) {
7976 			continue;
7977 		}
7978 		paniclog_append_noflush("  . %-6s: %p - %p\n",
7979 		    zone_submaps_names[i],
7980 		    (void *)map->min_offset,
7981 		    (void *)map->max_offset);
7982 	}
7983 	paniclog_append_noflush("  Metadata: %p - %p\n"
7984 	    "  Bitmaps : %p - %p\n"
7985 	    "  Extra   : %p - %p\n"
7986 	    "\n",
7987 	    (void *)zone_info.zi_meta_range.min_address,
7988 	    (void *)zone_info.zi_meta_range.max_address,
7989 	    (void *)zone_info.zi_bits_range.min_address,
7990 	    (void *)zone_info.zi_bits_range.max_address,
7991 	    (void *)zone_info.zi_xtra_range.min_address,
7992 	    (void *)zone_info.zi_xtra_range.max_address);
7993 }
7994 
7995 static void
panic_display_zone_fault(vm_offset_t addr)7996 panic_display_zone_fault(vm_offset_t addr)
7997 {
7998 	struct zone_page_metadata meta = { };
7999 	vm_map_t map = VM_MAP_NULL;
8000 	vm_offset_t oob_offs = 0, size = 0;
8001 	int map_idx = -1;
8002 	zone_t z = NULL;
8003 	const char *kind = "whild deref";
8004 	bool oob = false;
8005 
8006 	/*
8007 	 * First: look if we bumped into guard pages between submaps
8008 	 */
8009 	for (int i = 0; i < Z_SUBMAP_IDX_COUNT; i++) {
8010 		map = zone_submaps[i];
8011 		if (map == VM_MAP_NULL) {
8012 			continue;
8013 		}
8014 
8015 		if (addr >= map->min_offset && addr < map->max_offset) {
8016 			map_idx = i;
8017 			break;
8018 		}
8019 	}
8020 
8021 	if (map_idx == -1) {
8022 		/* this really shouldn't happen, submaps are back to back */
8023 		return;
8024 	}
8025 
8026 	paniclog_append_noflush("Probabilistic GZAlloc Report:\n");
8027 
8028 	/*
8029 	 * Second: look if there's just no metadata at all
8030 	 */
8031 	if (ml_nofault_copy((vm_offset_t)zone_meta_from_addr(addr),
8032 	    (vm_offset_t)&meta, sizeof(meta)) != sizeof(meta) ||
8033 	    meta.zm_index == 0 || meta.zm_index >= MAX_ZONES ||
8034 	    zone_array[meta.zm_index].z_self == NULL) {
8035 		paniclog_append_noflush("  Zone    : <unknown>\n");
8036 		kind = "wild deref, missing or invalid metadata";
8037 	} else {
8038 		z = &zone_array[meta.zm_index];
8039 		paniclog_append_noflush("  Zone    : %s%s\n",
8040 		    zone_heap_name(z), zone_name(z));
8041 		if (meta.zm_chunk_len == ZM_PGZ_GUARD) {
8042 			kind = "out-of-bounds (high confidence)";
8043 			oob = true;
8044 			size = zone_element_size((void *)addr,
8045 			    &z, false, &oob_offs);
8046 		} else {
8047 			kind = "use-after-free (medium confidence)";
8048 		}
8049 	}
8050 
8051 	paniclog_append_noflush("  Address : %p\n", (void *)addr);
8052 	if (oob) {
8053 		paniclog_append_noflush("  Element : [%p, %p) of size %d\n",
8054 		    (void *)(trunc_page(addr) - (size - oob_offs)),
8055 		    (void *)trunc_page(addr), (uint32_t)(size - oob_offs));
8056 	}
8057 	paniclog_append_noflush("  Submap  : %s [%p; %p)\n",
8058 	    zone_submaps_names[map_idx],
8059 	    (void *)map->min_offset, (void *)map->max_offset);
8060 	paniclog_append_noflush("  Kind    : %s\n", kind);
8061 	if (oob) {
8062 		paniclog_append_noflush("  Access  : %d byte(s) past\n",
8063 		    (uint32_t)(addr & PAGE_MASK) + 1);
8064 	}
8065 	paniclog_append_noflush("  Metadata: zid:%d inl:%d cl:0x%x "
8066 	    "0x%04x 0x%08x 0x%08x 0x%08x\n",
8067 	    meta.zm_index, meta.zm_inline_bitmap, meta.zm_chunk_len,
8068 	    meta.zm_alloc_size, meta.zm_bitmap,
8069 	    meta.zm_page_next.packed_address,
8070 	    meta.zm_page_prev.packed_address);
8071 	paniclog_append_noflush("\n");
8072 }
8073 
8074 void
panic_display_zalloc(void)8075 panic_display_zalloc(void)
8076 {
8077 	bool keepsyms = false;
8078 
8079 	PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
8080 
8081 	panic_display_zone_info();
8082 
8083 	if (panic_fault_address) {
8084 #if CONFIG_PROB_GZALLOC
8085 		if (pgz_owned(panic_fault_address)) {
8086 			panic_display_pgz_uaf_info(keepsyms, panic_fault_address);
8087 		} else
8088 #endif /* CONFIG_PROB_GZALLOC */
8089 		if (zone_maps_owned(panic_fault_address, 1)) {
8090 			panic_display_zone_fault(panic_fault_address);
8091 		}
8092 	}
8093 
8094 	if (panic_include_zprint) {
8095 		panic_display_zprint();
8096 	} else if (zone_map_nearing_threshold(ZONE_MAP_EXHAUSTION_PRINT_PANIC)) {
8097 		panic_display_largest_zones();
8098 	}
8099 #if CONFIG_ZLEAKS
8100 	if (zleak_active) {
8101 		panic_display_zleaks(keepsyms);
8102 	}
8103 #endif
8104 	if (panic_include_kalloc_types) {
8105 		panic_display_kalloc_types();
8106 	}
8107 }
8108 
8109 /*
8110  * Creates a vm_map_copy_t to return to the caller of mach_* MIG calls
8111  * requesting zone information.
8112  * Frees unused pages towards the end of the region, and zero'es out unused
8113  * space on the last page.
8114  */
8115 static vm_map_copy_t
create_vm_map_copy(vm_offset_t start_addr,vm_size_t total_size,vm_size_t used_size)8116 create_vm_map_copy(
8117 	vm_offset_t             start_addr,
8118 	vm_size_t               total_size,
8119 	vm_size_t               used_size)
8120 {
8121 	kern_return_t   kr;
8122 	vm_offset_t             end_addr;
8123 	vm_size_t               free_size;
8124 	vm_map_copy_t   copy;
8125 
8126 	if (used_size != total_size) {
8127 		end_addr = start_addr + used_size;
8128 		free_size = total_size - (round_page(end_addr) - start_addr);
8129 
8130 		if (free_size >= PAGE_SIZE) {
8131 			kmem_free(ipc_kernel_map,
8132 			    round_page(end_addr), free_size);
8133 		}
8134 		bzero((char *) end_addr, round_page(end_addr) - end_addr);
8135 	}
8136 
8137 	kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)start_addr,
8138 	    (vm_map_size_t)used_size, TRUE, &copy);
8139 	assert(kr == KERN_SUCCESS);
8140 
8141 	return copy;
8142 }
8143 
8144 static boolean_t
get_zone_info(zone_t z,mach_zone_name_t * zn,mach_zone_info_t * zi)8145 get_zone_info(
8146 	zone_t                   z,
8147 	mach_zone_name_t        *zn,
8148 	mach_zone_info_t        *zi)
8149 {
8150 	struct zone zcopy;
8151 	vm_size_t cached = 0;
8152 
8153 	assert(z != ZONE_NULL);
8154 	zone_lock(z);
8155 	if (!z->z_self) {
8156 		zone_unlock(z);
8157 		return FALSE;
8158 	}
8159 	zcopy = *z;
8160 	if (z->z_pcpu_cache) {
8161 		zpercpu_foreach(zc, z->z_pcpu_cache) {
8162 			cached += zc->zc_alloc_cur + zc->zc_free_cur;
8163 			cached += zc->zc_depot.zd_full * zc_mag_size();
8164 		}
8165 	}
8166 	zone_unlock(z);
8167 
8168 	if (zn != NULL) {
8169 		/*
8170 		 * Append kalloc heap name to zone name (if zone is used by kalloc)
8171 		 */
8172 		char temp_zone_name[MAX_ZONE_NAME] = "";
8173 		snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8174 		    zone_heap_name(z), z->z_name);
8175 
8176 		/* assuming here the name data is static */
8177 		(void) __nosan_strlcpy(zn->mzn_name, temp_zone_name,
8178 		    strlen(temp_zone_name) + 1);
8179 	}
8180 
8181 	if (zi != NULL) {
8182 		*zi = (mach_zone_info_t) {
8183 			.mzi_count = zone_count_allocated(&zcopy) - cached,
8184 			.mzi_cur_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_cur)),
8185 			// max_size for zprint is now high-watermark of pages used
8186 			.mzi_max_size = ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_hwm)),
8187 			.mzi_elem_size = zone_scale_for_percpu(&zcopy, zcopy.z_elem_size),
8188 			.mzi_alloc_size = ptoa_64(zcopy.z_chunk_pages),
8189 			.mzi_exhaustible = (uint64_t)zcopy.exhaustible,
8190 		};
8191 		if (zcopy.z_chunk_pages == 0) {
8192 			/* this is a zcache */
8193 			zi->mzi_cur_size = zcopy.z_elems_avail * zcopy.z_elem_size;
8194 		}
8195 		zpercpu_foreach(zs, zcopy.z_stats) {
8196 			zi->mzi_sum_size += zs->zs_mem_allocated;
8197 		}
8198 		if (zcopy.collectable) {
8199 			SET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable,
8200 			    ptoa_64(zone_scale_for_percpu(&zcopy, zcopy.z_wired_empty)));
8201 			SET_MZI_COLLECTABLE_FLAG(zi->mzi_collectable, TRUE);
8202 		}
8203 	}
8204 
8205 	return TRUE;
8206 }
8207 
8208 /* mach_memory_info entitlement */
8209 #define MEMORYINFO_ENTITLEMENT "com.apple.private.memoryinfo"
8210 
8211 /* macro needed to rate-limit mach_memory_info */
8212 #define NSEC_DAY (NSEC_PER_SEC * 60 * 60 * 24)
8213 
8214 /* declarations necessary to call kauth_cred_issuser() */
8215 struct ucred;
8216 extern int kauth_cred_issuser(struct ucred *);
8217 extern struct ucred *kauth_cred_get(void);
8218 
8219 static kern_return_t
8220 mach_memory_info_internal(
8221 	host_t                  host,
8222 	mach_zone_name_array_t  *namesp,
8223 	mach_msg_type_number_t  *namesCntp,
8224 	mach_zone_info_array_t  *infop,
8225 	mach_msg_type_number_t  *infoCntp,
8226 	mach_memory_info_array_t *memoryInfop,
8227 	mach_msg_type_number_t   *memoryInfoCntp,
8228 	bool                     redact_info);
8229 
8230 static kern_return_t
mach_memory_info_security_check(bool redact_info)8231 mach_memory_info_security_check(bool redact_info)
8232 {
8233 	/* If not root, only allow redacted calls. */
8234 	if (!kauth_cred_issuser(kauth_cred_get()) && !redact_info) {
8235 		return KERN_NO_ACCESS;
8236 	}
8237 
8238 	/* If does not have the memory entitlement, fail. */
8239 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8240 	if (!IOTaskHasEntitlement(current_task(), MEMORYINFO_ENTITLEMENT)) {
8241 		return KERN_DENIED;
8242 	}
8243 
8244 	/*
8245 	 * On release non-mac arm devices, allow mach_memory_info
8246 	 * to be called twice per day per boot. memorymaintenanced
8247 	 * calls it once per day, which leaves room for a sysdiagnose.
8248 	 * Allow redacted version to be called without rate limit.
8249 	 */
8250 
8251 	if (!redact_info) {
8252 		static uint64_t first_call = 0, second_call = 0;
8253 		uint64_t now = 0;
8254 		absolutetime_to_nanoseconds(ml_get_timebase(), &now);
8255 
8256 		if (!first_call) {
8257 			first_call = now;
8258 		} else if (!second_call) {
8259 			second_call = now;
8260 		} else if (first_call + NSEC_DAY > now) {
8261 			return KERN_DENIED;
8262 		} else if (first_call + NSEC_DAY < now) {
8263 			first_call = now;
8264 			second_call = 0;
8265 		}
8266 	}
8267 #endif
8268 
8269 	return KERN_SUCCESS;
8270 }
8271 
8272 kern_return_t
mach_zone_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp)8273 mach_zone_info(
8274 	mach_port_t             host_port,
8275 	mach_zone_name_array_t  *namesp,
8276 	mach_msg_type_number_t  *namesCntp,
8277 	mach_zone_info_array_t  *infop,
8278 	mach_msg_type_number_t  *infoCntp)
8279 {
8280 	return mach_memory_info(host_port, namesp, namesCntp, infop, infoCntp, NULL, NULL);
8281 }
8282 
8283 kern_return_t
mach_memory_info(mach_port_t host_port,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp)8284 mach_memory_info(
8285 	mach_port_t             host_port,
8286 	mach_zone_name_array_t  *namesp,
8287 	mach_msg_type_number_t  *namesCntp,
8288 	mach_zone_info_array_t  *infop,
8289 	mach_msg_type_number_t  *infoCntp,
8290 	mach_memory_info_array_t *memoryInfop,
8291 	mach_msg_type_number_t   *memoryInfoCntp)
8292 {
8293 	bool redact_info = false;
8294 	host_t host = HOST_NULL;
8295 
8296 	host = convert_port_to_host_priv(host_port);
8297 	if (host == HOST_NULL) {
8298 		redact_info = true;
8299 		host = convert_port_to_host(host_port);
8300 	}
8301 
8302 	return mach_memory_info_internal(host, namesp, namesCntp, infop, infoCntp, memoryInfop, memoryInfoCntp, redact_info);
8303 }
8304 
8305 static void
zone_info_redact(mach_zone_info_t * zi)8306 zone_info_redact(mach_zone_info_t *zi)
8307 {
8308 	zi->mzi_cur_size = 0;
8309 	zi->mzi_max_size = 0;
8310 	zi->mzi_alloc_size = 0;
8311 	zi->mzi_sum_size = 0;
8312 	zi->mzi_collectable = 0;
8313 }
8314 
8315 static bool
zone_info_needs_to_be_coalesced(int zone_index)8316 zone_info_needs_to_be_coalesced(int zone_index)
8317 {
8318 	zone_security_flags_t zsflags = zone_security_array[zone_index];
8319 	if (zsflags.z_kalloc_type || zsflags.z_kheap_id == KHEAP_ID_DEFAULT ||
8320 	    zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
8321 		return true;
8322 	}
8323 	return false;
8324 }
8325 
8326 static bool
zone_info_find_coalesce_zone(mach_zone_info_t * zi,mach_zone_info_t * info,int * coalesce,int coalesce_count,int * coalesce_index)8327 zone_info_find_coalesce_zone(
8328 	mach_zone_info_t *zi,
8329 	mach_zone_info_t *info,
8330 	int              *coalesce,
8331 	int              coalesce_count,
8332 	int              *coalesce_index)
8333 {
8334 	for (int i = 0; i < coalesce_count; i++) {
8335 		if (zi->mzi_elem_size == info[coalesce[i]].mzi_elem_size) {
8336 			*coalesce_index = coalesce[i];
8337 			return true;
8338 		}
8339 	}
8340 
8341 	return false;
8342 }
8343 
8344 static void
zone_info_coalesce(mach_zone_info_t * info,int coalesce_index,mach_zone_info_t * zi)8345 zone_info_coalesce(
8346 	mach_zone_info_t *info,
8347 	int coalesce_index,
8348 	mach_zone_info_t *zi)
8349 {
8350 	info[coalesce_index].mzi_count += zi->mzi_count;
8351 }
8352 
8353 static kern_return_t
mach_memory_info_internal(host_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp,mach_zone_info_array_t * infop,mach_msg_type_number_t * infoCntp,mach_memory_info_array_t * memoryInfop,mach_msg_type_number_t * memoryInfoCntp,bool redact_info)8354 mach_memory_info_internal(
8355 	host_t                  host,
8356 	mach_zone_name_array_t  *namesp,
8357 	mach_msg_type_number_t  *namesCntp,
8358 	mach_zone_info_array_t  *infop,
8359 	mach_msg_type_number_t  *infoCntp,
8360 	mach_memory_info_array_t *memoryInfop,
8361 	mach_msg_type_number_t   *memoryInfoCntp,
8362 	bool                     redact_info)
8363 {
8364 	mach_zone_name_t        *names;
8365 	vm_offset_t             names_addr;
8366 	vm_size_t               names_size;
8367 
8368 	mach_zone_info_t        *info;
8369 	vm_offset_t             info_addr;
8370 	vm_size_t               info_size;
8371 
8372 	int                     *coalesce;
8373 	vm_offset_t             coalesce_addr;
8374 	vm_size_t               coalesce_size;
8375 	int                     coalesce_count = 0;
8376 
8377 	mach_memory_info_t      *memory_info;
8378 	vm_offset_t             memory_info_addr;
8379 	vm_size_t               memory_info_size;
8380 	vm_size_t               memory_info_vmsize;
8381 	unsigned int            num_info;
8382 
8383 	unsigned int            max_zones, used_zones, i;
8384 	mach_zone_name_t        *zn;
8385 	mach_zone_info_t        *zi;
8386 	kern_return_t           kr;
8387 
8388 	uint64_t                zones_collectable_bytes = 0;
8389 
8390 	if (host == HOST_NULL) {
8391 		return KERN_INVALID_HOST;
8392 	}
8393 
8394 	kr = mach_memory_info_security_check(redact_info);
8395 	if (kr != KERN_SUCCESS) {
8396 		return kr;
8397 	}
8398 
8399 	/*
8400 	 *	We assume that zones aren't freed once allocated.
8401 	 *	We won't pick up any zones that are allocated later.
8402 	 */
8403 
8404 	max_zones = os_atomic_load(&num_zones, relaxed);
8405 
8406 	names_size = round_page(max_zones * sizeof *names);
8407 	kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8408 	    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8409 	if (kr != KERN_SUCCESS) {
8410 		return kr;
8411 	}
8412 	names = (mach_zone_name_t *) names_addr;
8413 
8414 	info_size = round_page(max_zones * sizeof *info);
8415 	kr = kmem_alloc(ipc_kernel_map, &info_addr, info_size,
8416 	    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8417 	if (kr != KERN_SUCCESS) {
8418 		kmem_free(ipc_kernel_map,
8419 		    names_addr, names_size);
8420 		return kr;
8421 	}
8422 	info = (mach_zone_info_t *) info_addr;
8423 
8424 	if (redact_info) {
8425 		coalesce_size = round_page(max_zones * sizeof *coalesce);
8426 		kr = kmem_alloc(ipc_kernel_map, &coalesce_addr, coalesce_size,
8427 		    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8428 		if (kr != KERN_SUCCESS) {
8429 			kmem_free(ipc_kernel_map,
8430 			    names_addr, names_size);
8431 			kmem_free(ipc_kernel_map,
8432 			    info_addr, info_size);
8433 			return kr;
8434 		}
8435 		coalesce = (int *)coalesce_addr;
8436 	}
8437 
8438 	zn = &names[0];
8439 	zi = &info[0];
8440 
8441 	used_zones = 0;
8442 	for (i = 0; i < max_zones; i++) {
8443 		if (!get_zone_info(&(zone_array[i]), zn, zi)) {
8444 			continue;
8445 		}
8446 
8447 		if (!redact_info) {
8448 			zones_collectable_bytes += GET_MZI_COLLECTABLE_BYTES(zi->mzi_collectable);
8449 			zn++;
8450 			zi++;
8451 			used_zones++;
8452 			continue;
8453 		}
8454 
8455 		zone_info_redact(zi);
8456 		if (!zone_info_needs_to_be_coalesced(i)) {
8457 			zn++;
8458 			zi++;
8459 			used_zones++;
8460 			continue;
8461 		}
8462 
8463 		int coalesce_index;
8464 		bool found_coalesce_zone = zone_info_find_coalesce_zone(zi, info,
8465 		    coalesce, coalesce_count, &coalesce_index);
8466 
8467 		/* Didn't find a zone to coalesce */
8468 		if (!found_coalesce_zone) {
8469 			/* Updates the zone name */
8470 			__nosan_bzero(zn->mzn_name, MAX_ZONE_NAME);
8471 			snprintf(zn->mzn_name, MAX_ZONE_NAME, "kalloc.%d",
8472 			    (int)zi->mzi_elem_size);
8473 
8474 			coalesce[coalesce_count] = used_zones;
8475 			coalesce_count++;
8476 			zn++;
8477 			zi++;
8478 			used_zones++;
8479 			continue;
8480 		}
8481 
8482 		zone_info_coalesce(info, coalesce_index, zi);
8483 	}
8484 
8485 	if (redact_info) {
8486 		kmem_free(ipc_kernel_map, coalesce_addr, coalesce_size);
8487 	}
8488 
8489 	*namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, used_zones * sizeof *names);
8490 	*namesCntp = used_zones;
8491 
8492 	*infop = (mach_zone_info_t *) create_vm_map_copy(info_addr, info_size, used_zones * sizeof *info);
8493 	*infoCntp = used_zones;
8494 
8495 	num_info = 0;
8496 	memory_info_addr = 0;
8497 
8498 	if (memoryInfop && memoryInfoCntp) {
8499 		vm_map_copy_t           copy;
8500 		num_info = vm_page_diagnose_estimate();
8501 		memory_info_size = num_info * sizeof(*memory_info);
8502 		memory_info_vmsize = round_page(memory_info_size);
8503 		kr = kmem_alloc(ipc_kernel_map, &memory_info_addr, memory_info_vmsize,
8504 		    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8505 		if (kr != KERN_SUCCESS) {
8506 			return kr;
8507 		}
8508 
8509 		kr = vm_map_wire_kernel(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize,
8510 		    VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
8511 		assert(kr == KERN_SUCCESS);
8512 
8513 		memory_info = (mach_memory_info_t *) memory_info_addr;
8514 		vm_page_diagnose(memory_info, num_info, zones_collectable_bytes, redact_info);
8515 
8516 		kr = vm_map_unwire(ipc_kernel_map, memory_info_addr, memory_info_addr + memory_info_vmsize, FALSE);
8517 		assert(kr == KERN_SUCCESS);
8518 
8519 		kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)memory_info_addr,
8520 		    (vm_map_size_t)memory_info_size, TRUE, &copy);
8521 		assert(kr == KERN_SUCCESS);
8522 
8523 		*memoryInfop = (mach_memory_info_t *) copy;
8524 		*memoryInfoCntp = num_info;
8525 	}
8526 
8527 	return KERN_SUCCESS;
8528 }
8529 
8530 kern_return_t
mach_zone_info_for_zone(host_priv_t host,mach_zone_name_t name,mach_zone_info_t * infop)8531 mach_zone_info_for_zone(
8532 	host_priv_t                     host,
8533 	mach_zone_name_t        name,
8534 	mach_zone_info_t        *infop)
8535 {
8536 	zone_t zone_ptr;
8537 
8538 	if (host == HOST_NULL) {
8539 		return KERN_INVALID_HOST;
8540 	}
8541 
8542 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8543 	if (!PE_i_can_has_debugger(NULL)) {
8544 		return KERN_INVALID_HOST;
8545 	}
8546 #endif
8547 
8548 	if (infop == NULL) {
8549 		return KERN_INVALID_ARGUMENT;
8550 	}
8551 
8552 	zone_ptr = ZONE_NULL;
8553 	zone_foreach(z) {
8554 		/*
8555 		 * Append kalloc heap name to zone name (if zone is used by kalloc)
8556 		 */
8557 		char temp_zone_name[MAX_ZONE_NAME] = "";
8558 		snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8559 		    zone_heap_name(z), z->z_name);
8560 
8561 		/* Find the requested zone by name */
8562 		if (track_this_zone(temp_zone_name, name.mzn_name)) {
8563 			zone_ptr = z;
8564 			break;
8565 		}
8566 	}
8567 
8568 	/* No zones found with the requested zone name */
8569 	if (zone_ptr == ZONE_NULL) {
8570 		return KERN_INVALID_ARGUMENT;
8571 	}
8572 
8573 	if (get_zone_info(zone_ptr, NULL, infop)) {
8574 		return KERN_SUCCESS;
8575 	}
8576 	return KERN_FAILURE;
8577 }
8578 
8579 kern_return_t
mach_zone_info_for_largest_zone(host_priv_t host,mach_zone_name_t * namep,mach_zone_info_t * infop)8580 mach_zone_info_for_largest_zone(
8581 	host_priv_t                     host,
8582 	mach_zone_name_t        *namep,
8583 	mach_zone_info_t        *infop)
8584 {
8585 	if (host == HOST_NULL) {
8586 		return KERN_INVALID_HOST;
8587 	}
8588 
8589 #if CONFIG_DEBUGGER_FOR_ZONE_INFO
8590 	if (!PE_i_can_has_debugger(NULL)) {
8591 		return KERN_INVALID_HOST;
8592 	}
8593 #endif
8594 
8595 	if (namep == NULL || infop == NULL) {
8596 		return KERN_INVALID_ARGUMENT;
8597 	}
8598 
8599 	if (get_zone_info(zone_find_largest(NULL), namep, infop)) {
8600 		return KERN_SUCCESS;
8601 	}
8602 	return KERN_FAILURE;
8603 }
8604 
8605 uint64_t
get_zones_collectable_bytes(void)8606 get_zones_collectable_bytes(void)
8607 {
8608 	uint64_t zones_collectable_bytes = 0;
8609 	mach_zone_info_t zi;
8610 
8611 	zone_foreach(z) {
8612 		if (get_zone_info(z, NULL, &zi)) {
8613 			zones_collectable_bytes +=
8614 			    GET_MZI_COLLECTABLE_BYTES(zi.mzi_collectable);
8615 		}
8616 	}
8617 
8618 	return zones_collectable_bytes;
8619 }
8620 
8621 kern_return_t
mach_zone_get_zlog_zones(host_priv_t host,mach_zone_name_array_t * namesp,mach_msg_type_number_t * namesCntp)8622 mach_zone_get_zlog_zones(
8623 	host_priv_t                             host,
8624 	mach_zone_name_array_t  *namesp,
8625 	mach_msg_type_number_t  *namesCntp)
8626 {
8627 #if ZALLOC_ENABLE_LOGGING
8628 	unsigned int max_zones, logged_zones, i;
8629 	kern_return_t kr;
8630 	zone_t zone_ptr;
8631 	mach_zone_name_t *names;
8632 	vm_offset_t names_addr;
8633 	vm_size_t names_size;
8634 
8635 	if (host == HOST_NULL) {
8636 		return KERN_INVALID_HOST;
8637 	}
8638 
8639 	if (namesp == NULL || namesCntp == NULL) {
8640 		return KERN_INVALID_ARGUMENT;
8641 	}
8642 
8643 	max_zones = os_atomic_load(&num_zones, relaxed);
8644 
8645 	names_size = round_page(max_zones * sizeof *names);
8646 	kr = kmem_alloc(ipc_kernel_map, &names_addr, names_size,
8647 	    KMA_PAGEABLE | KMA_DATA, VM_KERN_MEMORY_IPC);
8648 	if (kr != KERN_SUCCESS) {
8649 		return kr;
8650 	}
8651 	names = (mach_zone_name_t *) names_addr;
8652 
8653 	zone_ptr = ZONE_NULL;
8654 	logged_zones = 0;
8655 	for (i = 0; i < max_zones; i++) {
8656 		zone_t z = &(zone_array[i]);
8657 		assert(z != ZONE_NULL);
8658 
8659 		/* Copy out the zone name if zone logging is enabled */
8660 		if (z->z_btlog) {
8661 			get_zone_info(z, &names[logged_zones], NULL);
8662 			logged_zones++;
8663 		}
8664 	}
8665 
8666 	*namesp = (mach_zone_name_t *) create_vm_map_copy(names_addr, names_size, logged_zones * sizeof *names);
8667 	*namesCntp = logged_zones;
8668 
8669 	return KERN_SUCCESS;
8670 
8671 #else /* ZALLOC_ENABLE_LOGGING */
8672 #pragma unused(host, namesp, namesCntp)
8673 	return KERN_FAILURE;
8674 #endif /* ZALLOC_ENABLE_LOGGING */
8675 }
8676 
8677 kern_return_t
mach_zone_get_btlog_records(host_priv_t host,mach_zone_name_t name,zone_btrecord_array_t * recsp,mach_msg_type_number_t * numrecs)8678 mach_zone_get_btlog_records(
8679 	host_priv_t             host,
8680 	mach_zone_name_t        name,
8681 	zone_btrecord_array_t  *recsp,
8682 	mach_msg_type_number_t *numrecs)
8683 {
8684 #if ZALLOC_ENABLE_LOGGING
8685 	zone_btrecord_t *recs;
8686 	kern_return_t    kr;
8687 	vm_address_t     addr;
8688 	vm_size_t        size;
8689 	zone_t           zone_ptr;
8690 	vm_map_copy_t    copy;
8691 
8692 	if (host == HOST_NULL) {
8693 		return KERN_INVALID_HOST;
8694 	}
8695 
8696 	if (recsp == NULL || numrecs == NULL) {
8697 		return KERN_INVALID_ARGUMENT;
8698 	}
8699 
8700 	zone_ptr = ZONE_NULL;
8701 	zone_foreach(z) {
8702 		/*
8703 		 * Append kalloc heap name to zone name (if zone is used by kalloc)
8704 		 */
8705 		char temp_zone_name[MAX_ZONE_NAME] = "";
8706 		snprintf(temp_zone_name, MAX_ZONE_NAME, "%s%s",
8707 		    zone_heap_name(z), z->z_name);
8708 
8709 		/* Find the requested zone by name */
8710 		if (track_this_zone(temp_zone_name, name.mzn_name)) {
8711 			zone_ptr = z;
8712 			break;
8713 		}
8714 	}
8715 
8716 	/* No zones found with the requested zone name */
8717 	if (zone_ptr == ZONE_NULL) {
8718 		return KERN_INVALID_ARGUMENT;
8719 	}
8720 
8721 	/* Logging not turned on for the requested zone */
8722 	if (!zone_ptr->z_btlog) {
8723 		return KERN_FAILURE;
8724 	}
8725 
8726 	kr = btlog_get_records(zone_ptr->z_btlog, &recs, numrecs);
8727 	if (kr != KERN_SUCCESS) {
8728 		return kr;
8729 	}
8730 
8731 	addr = (vm_address_t)recs;
8732 	size = sizeof(zone_btrecord_t) * *numrecs;
8733 
8734 	kr = vm_map_copyin(ipc_kernel_map, addr, size, TRUE, &copy);
8735 	assert(kr == KERN_SUCCESS);
8736 
8737 	*recsp = (zone_btrecord_t *)copy;
8738 	return KERN_SUCCESS;
8739 
8740 #else /* !ZALLOC_ENABLE_LOGGING */
8741 #pragma unused(host, name, recsp, numrecs)
8742 	return KERN_FAILURE;
8743 #endif /* !ZALLOC_ENABLE_LOGGING */
8744 }
8745 
8746 
8747 kern_return_t
mach_zone_force_gc(host_t host)8748 mach_zone_force_gc(
8749 	host_t host)
8750 {
8751 	if (host == HOST_NULL) {
8752 		return KERN_INVALID_HOST;
8753 	}
8754 
8755 #if DEBUG || DEVELOPMENT
8756 	extern boolean_t(*volatile consider_buffer_cache_collect)(int);
8757 	/* Callout to buffer cache GC to drop elements in the apfs zones */
8758 	if (consider_buffer_cache_collect != NULL) {
8759 		(void)(*consider_buffer_cache_collect)(0);
8760 	}
8761 	zone_gc(ZONE_GC_DRAIN);
8762 #endif /* DEBUG || DEVELOPMENT */
8763 	return KERN_SUCCESS;
8764 }
8765 
8766 zone_t
zone_find_largest(uint64_t * zone_size)8767 zone_find_largest(uint64_t *zone_size)
8768 {
8769 	zone_t    largest_zone  = 0;
8770 	uint64_t  largest_zone_size = 0;
8771 	zone_find_n_largest(1, &largest_zone, &largest_zone_size);
8772 	if (zone_size) {
8773 		*zone_size = largest_zone_size;
8774 	}
8775 	return largest_zone;
8776 }
8777 
8778 void
zone_get_stats(zone_t zone,struct zone_basic_stats * stats)8779 zone_get_stats(
8780 	zone_t                  zone,
8781 	struct zone_basic_stats *stats)
8782 {
8783 	stats->zbs_avail = zone->z_elems_avail;
8784 
8785 	stats->zbs_alloc_fail = 0;
8786 	zpercpu_foreach(zs, zone->z_stats) {
8787 		stats->zbs_alloc_fail += zs->zs_alloc_fail;
8788 	}
8789 
8790 	stats->zbs_cached = 0;
8791 	if (zone->z_pcpu_cache) {
8792 		zpercpu_foreach(zc, zone->z_pcpu_cache) {
8793 			stats->zbs_cached += zc->zc_alloc_cur +
8794 			    zc->zc_free_cur +
8795 			    zc->zc_depot.zd_full * zc_mag_size();
8796 		}
8797 	}
8798 
8799 	stats->zbs_free = zone_count_free(zone) + stats->zbs_cached;
8800 
8801 	/*
8802 	 * Since we don't take any locks, deal with possible inconsistencies
8803 	 * as the counters may have changed.
8804 	 */
8805 	if (os_sub_overflow(stats->zbs_avail, stats->zbs_free,
8806 	    &stats->zbs_alloc)) {
8807 		stats->zbs_avail = stats->zbs_free;
8808 		stats->zbs_alloc = 0;
8809 	}
8810 }
8811 
8812 #endif /* !ZALLOC_TEST */
8813 #pragma mark zone creation, configuration, destruction
8814 #if !ZALLOC_TEST
8815 
8816 static zone_t
zone_init_defaults(zone_id_t zid)8817 zone_init_defaults(zone_id_t zid)
8818 {
8819 	zone_t z = &zone_array[zid];
8820 
8821 	z->z_wired_max = ~0u;
8822 	z->collectable = true;
8823 
8824 	hw_lck_ticket_init(&z->z_lock, &zone_locks_grp);
8825 	hw_lck_ticket_init(&z->z_recirc_lock, &zone_locks_grp);
8826 	zone_depot_init(&z->z_recirc);
8827 	return z;
8828 }
8829 
8830 void
zone_set_exhaustible(zone_t zone,vm_size_t nelems)8831 zone_set_exhaustible(zone_t zone, vm_size_t nelems)
8832 {
8833 	zone_lock(zone);
8834 	zone->exhaustible = true;
8835 	zone->z_wired_max = zone_alloc_pages_for_nelems(zone, nelems);
8836 	zone_unlock(zone);
8837 }
8838 
8839 void
zone_raise_reserve(union zone_or_view zov,uint16_t min_elements)8840 zone_raise_reserve(union zone_or_view zov, uint16_t min_elements)
8841 {
8842 	zone_t zone = zov.zov_zone;
8843 
8844 	if (zone < zone_array || zone > &zone_array[MAX_ZONES]) {
8845 		zone = zov.zov_view->zv_zone;
8846 	} else {
8847 		zone = zov.zov_zone;
8848 	}
8849 
8850 	os_atomic_max(&zone->z_elems_rsv, min_elements, relaxed);
8851 }
8852 
8853 /**
8854  * @function zone_create_find
8855  *
8856  * @abstract
8857  * Finds an unused zone for the given name and element size.
8858  *
8859  * @param name          the zone name
8860  * @param size          the element size (including redzones, ...)
8861  * @param flags         the flags passed to @c zone_create*
8862  * @param zid_inout     the desired zone ID or ZONE_ID_ANY
8863  *
8864  * @returns             a zone to initialize further.
8865  */
8866 static zone_t
zone_create_find(const char * name,vm_size_t size,zone_create_flags_t flags,zone_id_t * zid_inout)8867 zone_create_find(
8868 	const char             *name,
8869 	vm_size_t               size,
8870 	zone_create_flags_t     flags,
8871 	zone_id_t              *zid_inout)
8872 {
8873 	zone_id_t nzones, zid = *zid_inout;
8874 	zone_t z;
8875 
8876 	simple_lock(&all_zones_lock, &zone_locks_grp);
8877 
8878 	nzones = (zone_id_t)os_atomic_load(&num_zones, relaxed);
8879 	assert(num_zones_in_use <= nzones && nzones < MAX_ZONES);
8880 
8881 	if (__improbable(nzones < ZONE_ID__FIRST_DYNAMIC)) {
8882 		/*
8883 		 * The first time around, make sure the reserved zone IDs
8884 		 * have an initialized lock as zone_index_foreach() will
8885 		 * enumerate them.
8886 		 */
8887 		while (nzones < ZONE_ID__FIRST_DYNAMIC) {
8888 			zone_init_defaults(nzones++);
8889 		}
8890 
8891 		os_atomic_store(&num_zones, nzones, release);
8892 	}
8893 
8894 	if (zid != ZONE_ID_ANY) {
8895 		if (zid >= ZONE_ID__FIRST_DYNAMIC) {
8896 			panic("zone_create: invalid desired zone ID %d for %s",
8897 			    zid, name);
8898 		}
8899 		if (flags & ZC_DESTRUCTIBLE) {
8900 			panic("zone_create: ID %d (%s) must be permanent", zid, name);
8901 		}
8902 		if (zone_array[zid].z_self) {
8903 			panic("zone_create: creating zone ID %d (%s) twice", zid, name);
8904 		}
8905 		z = &zone_array[zid];
8906 	} else {
8907 		if (flags & ZC_DESTRUCTIBLE) {
8908 			/*
8909 			 * If possible, find a previously zdestroy'ed zone in the
8910 			 * zone_array that we can reuse.
8911 			 */
8912 			for (int i = bitmap_first(zone_destroyed_bitmap, MAX_ZONES);
8913 			    i >= 0; i = bitmap_next(zone_destroyed_bitmap, i)) {
8914 				z = &zone_array[i];
8915 
8916 				/*
8917 				 * If the zone name and the element size are the
8918 				 * same, we can just reuse the old zone struct.
8919 				 */
8920 				if (strcmp(z->z_name, name) ||
8921 				    zone_elem_outer_size(z) != size) {
8922 					continue;
8923 				}
8924 				bitmap_clear(zone_destroyed_bitmap, i);
8925 				z->z_destroyed = false;
8926 				z->z_self = z;
8927 				zid = (zone_id_t)i;
8928 				goto out;
8929 			}
8930 		}
8931 
8932 		zid = nzones++;
8933 		z = zone_init_defaults(zid);
8934 
8935 		/*
8936 		 * The release barrier pairs with the acquire in
8937 		 * zone_index_foreach() and makes sure that enumeration loops
8938 		 * always see an initialized zone lock.
8939 		 */
8940 		os_atomic_store(&num_zones, nzones, release);
8941 	}
8942 
8943 out:
8944 	num_zones_in_use++;
8945 	simple_unlock(&all_zones_lock);
8946 
8947 	*zid_inout = zid;
8948 	return z;
8949 }
8950 
8951 __abortlike
8952 static void
zone_create_panic(const char * name,const char * f1,const char * f2)8953 zone_create_panic(const char *name, const char *f1, const char *f2)
8954 {
8955 	panic("zone_create: creating zone %s: flag %s and %s are incompatible",
8956 	    name, f1, f2);
8957 }
8958 #define zone_create_assert_not_both(name, flags, current_flag, forbidden_flag) \
8959 	if ((flags) & forbidden_flag) { \
8960 	        zone_create_panic(name, #current_flag, #forbidden_flag); \
8961 	}
8962 
8963 /*
8964  * Adjusts the size of the element based on minimum size, alignment
8965  * and kasan redzones
8966  */
8967 static vm_size_t
zone_elem_adjust_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags __unused,uint16_t * redzone __unused)8968 zone_elem_adjust_size(
8969 	const char             *name __unused,
8970 	vm_size_t               elem_size,
8971 	zone_create_flags_t     flags __unused,
8972 	uint16_t               *redzone __unused)
8973 {
8974 	vm_size_t size;
8975 
8976 	/*
8977 	 * Adjust element size for minimum size and pointer alignment
8978 	 */
8979 	size = (elem_size + ZONE_ALIGN_SIZE - 1) & -ZONE_ALIGN_SIZE;
8980 	if (size < ZONE_MIN_ELEM_SIZE) {
8981 		size = ZONE_MIN_ELEM_SIZE;
8982 	}
8983 
8984 #if KASAN_CLASSIC
8985 	/*
8986 	 * Expand the zone allocation size to include the redzones.
8987 	 *
8988 	 * For page-multiple zones add a full guard page because they
8989 	 * likely require alignment.
8990 	 */
8991 	uint16_t redzone_tmp;
8992 	if (flags & (ZC_KASAN_NOREDZONE | ZC_PERCPU | ZC_OBJ_CACHE)) {
8993 		redzone_tmp = 0;
8994 	} else if ((size & PAGE_MASK) == 0) {
8995 		if (size != PAGE_SIZE && (flags & ZC_ALIGNMENT_REQUIRED)) {
8996 			panic("zone_create: zone %s can't provide more than PAGE_SIZE"
8997 			    "alignment", name);
8998 		}
8999 		redzone_tmp = PAGE_SIZE;
9000 	} else if (flags & ZC_ALIGNMENT_REQUIRED) {
9001 		redzone_tmp = 0;
9002 	} else {
9003 		redzone_tmp = KASAN_GUARD_SIZE;
9004 	}
9005 	size += redzone_tmp;
9006 	if (redzone) {
9007 		*redzone = redzone_tmp;
9008 	}
9009 #endif
9010 	return size;
9011 }
9012 
9013 /*
9014  * Returns the allocation chunk size that has least framentation
9015  */
9016 static vm_size_t
zone_get_min_alloc_granule(vm_size_t elem_size,zone_create_flags_t flags)9017 zone_get_min_alloc_granule(
9018 	vm_size_t               elem_size,
9019 	zone_create_flags_t     flags)
9020 {
9021 	vm_size_t alloc_granule = PAGE_SIZE;
9022 	if (flags & ZC_PERCPU) {
9023 		alloc_granule = PAGE_SIZE * zpercpu_count();
9024 		if (PAGE_SIZE % elem_size > 256) {
9025 			panic("zone_create: per-cpu zone has too much fragmentation");
9026 		}
9027 	} else if (flags & ZC_READONLY) {
9028 		alloc_granule = PAGE_SIZE;
9029 	} else if ((elem_size & PAGE_MASK) == 0) {
9030 		/* zero fragmentation by definition */
9031 		alloc_granule = elem_size;
9032 	} else if (alloc_granule % elem_size == 0) {
9033 		/* zero fragmentation by definition */
9034 	} else {
9035 		vm_size_t frag = (alloc_granule % elem_size) * 100 / alloc_granule;
9036 		vm_size_t alloc_tmp = PAGE_SIZE;
9037 		vm_size_t max_chunk_size = ZONE_MAX_ALLOC_SIZE;
9038 
9039 #if __arm64__
9040 		/*
9041 		 * Increase chunk size to 48K for sizes larger than 4K on 16k
9042 		 * machines, so as to reduce internal fragementation for kalloc
9043 		 * zones with sizes 12K and 24K.
9044 		 */
9045 		if (elem_size > 4 * 1024 && PAGE_SIZE == 16 * 1024) {
9046 			max_chunk_size = 48 * 1024;
9047 		}
9048 #endif
9049 		while ((alloc_tmp += PAGE_SIZE) <= max_chunk_size) {
9050 			vm_size_t frag_tmp = (alloc_tmp % elem_size) * 100 / alloc_tmp;
9051 			if (frag_tmp < frag) {
9052 				frag = frag_tmp;
9053 				alloc_granule = alloc_tmp;
9054 			}
9055 		}
9056 	}
9057 	return alloc_granule;
9058 }
9059 
9060 vm_size_t
zone_get_early_alloc_size(const char * name __unused,vm_size_t elem_size,zone_create_flags_t flags,vm_size_t min_elems)9061 zone_get_early_alloc_size(
9062 	const char             *name __unused,
9063 	vm_size_t               elem_size,
9064 	zone_create_flags_t     flags,
9065 	vm_size_t               min_elems)
9066 {
9067 	vm_size_t adjusted_size, alloc_granule, chunk_elems;
9068 
9069 	adjusted_size = zone_elem_adjust_size(name, elem_size, flags, NULL);
9070 	alloc_granule = zone_get_min_alloc_granule(adjusted_size, flags);
9071 	chunk_elems   = alloc_granule / adjusted_size;
9072 
9073 	return ((min_elems + chunk_elems - 1) / chunk_elems) * alloc_granule;
9074 }
9075 
9076 zone_t
9077 zone_create_ext(
9078 	const char             *name,
9079 	vm_size_t               size,
9080 	zone_create_flags_t     flags,
9081 	zone_id_t               zid,
9082 	void                  (^extra_setup)(zone_t))
9083 {
9084 	zone_security_flags_t *zsflags;
9085 	uint16_t redzone;
9086 	zone_t z;
9087 
9088 	if (size > ZONE_MAX_ALLOC_SIZE) {
9089 		panic("zone_create: element size too large: %zd", (size_t)size);
9090 	}
9091 
9092 	if (size < 2 * sizeof(vm_size_t)) {
9093 		/* Elements are too small for kasan. */
9094 		flags |= ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE;
9095 	}
9096 
9097 	size = zone_elem_adjust_size(name, size, flags, &redzone);
9098 
9099 	/*
9100 	 * Allocate the zone slot, return early if we found an older match.
9101 	 */
9102 	z = zone_create_find(name, size, flags, &zid);
9103 	if (__improbable(z->z_self)) {
9104 		/* We found a zone to reuse */
9105 		return z;
9106 	}
9107 	zsflags = &zone_security_array[zid];
9108 
9109 	/*
9110 	 * Initialize the zone properly.
9111 	 */
9112 
9113 	/*
9114 	 * If the kernel is post lockdown, copy the zone name passed in.
9115 	 * Else simply maintain a pointer to the name string as it can only
9116 	 * be a core XNU zone (no unloadable kext exists before lockdown).
9117 	 */
9118 	if (startup_phase >= STARTUP_SUB_LOCKDOWN) {
9119 		size_t nsz = MIN(strlen(name) + 1, MACH_ZONE_NAME_MAX_LEN);
9120 		char *buf = zalloc_permanent(nsz, ZALIGN_NONE);
9121 		strlcpy(buf, name, nsz);
9122 		z->z_name = buf;
9123 	} else {
9124 		z->z_name = name;
9125 	}
9126 	if (__probable(zone_array[ZONE_ID_PERCPU_PERMANENT].z_self)) {
9127 		z->z_stats = zalloc_percpu_permanent_type(struct zone_stats);
9128 	} else {
9129 		/*
9130 		 * zone_init() hasn't run yet, use the storage provided by
9131 		 * zone_stats_startup(), and zone_init() will replace it
9132 		 * with the final value once the PERCPU zone exists.
9133 		 */
9134 		z->z_stats = __zpcpu_mangle_for_boot(&zone_stats_startup[zone_index(z)]);
9135 	}
9136 
9137 	if (flags & ZC_OBJ_CACHE) {
9138 		zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOCACHING);
9139 		zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_PERCPU);
9140 		zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_NOGC);
9141 		zone_create_assert_not_both(name, flags, ZC_OBJ_CACHE, ZC_DESTRUCTIBLE);
9142 
9143 		z->z_elem_size   = (uint16_t)size;
9144 		z->z_chunk_pages = 0;
9145 		z->z_quo_magic   = 0;
9146 		z->z_align_magic = 0;
9147 		z->z_chunk_elems = 0;
9148 		z->z_elem_offs   = 0;
9149 		z->no_callout    = true;
9150 		zsflags->z_lifo  = true;
9151 	} else {
9152 		vm_size_t alloc = zone_get_min_alloc_granule(size, flags);
9153 
9154 		z->z_elem_size   = (uint16_t)(size - redzone);
9155 		z->z_chunk_pages = (uint16_t)atop(alloc);
9156 		z->z_quo_magic   = Z_MAGIC_QUO(size);
9157 		z->z_align_magic = Z_MAGIC_ALIGNED(size);
9158 		if (flags & ZC_PERCPU) {
9159 			z->z_chunk_elems = (uint16_t)(PAGE_SIZE / size);
9160 			z->z_elem_offs = (uint16_t)(PAGE_SIZE % size) + redzone;
9161 		} else {
9162 			z->z_chunk_elems = (uint16_t)(alloc / size);
9163 			z->z_elem_offs = (uint16_t)(alloc % size) + redzone;
9164 		}
9165 	}
9166 
9167 	/*
9168 	 * Handle KPI flags
9169 	 */
9170 
9171 	/* ZC_CACHING applied after all configuration is done */
9172 	if (flags & ZC_NOCACHING) {
9173 		z->z_nocaching = true;
9174 	}
9175 
9176 	if (flags & ZC_READONLY) {
9177 		zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_VM);
9178 		zone_create_assert_not_both(name, flags, ZC_READONLY, ZC_DATA);
9179 		assert(zid <= ZONE_ID__LAST_RO);
9180 #if ZSECURITY_CONFIG(READ_ONLY)
9181 		zsflags->z_submap_idx = Z_SUBMAP_IDX_READ_ONLY;
9182 #endif
9183 		zone_ro_size_params[zid].z_elem_size = z->z_elem_size;
9184 		zone_ro_size_params[zid].z_align_magic = z->z_align_magic;
9185 		assert(size <= PAGE_SIZE);
9186 		if ((PAGE_SIZE % size) * 10 >= PAGE_SIZE) {
9187 			panic("Fragmentation greater than 10%% with elem size %d zone %s%s",
9188 			    (uint32_t)size, zone_heap_name(z), z->z_name);
9189 		}
9190 	}
9191 
9192 	if (flags & ZC_PERCPU) {
9193 		zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_READONLY);
9194 		zone_create_assert_not_both(name, flags, ZC_PERCPU, ZC_PGZ_USE_GUARDS);
9195 		z->z_percpu = true;
9196 	}
9197 	if (flags & ZC_NOGC) {
9198 		z->collectable = false;
9199 	}
9200 	/*
9201 	 * Handle ZC_NOENCRYPT from xnu only
9202 	 */
9203 	if (startup_phase < STARTUP_SUB_LOCKDOWN && flags & ZC_NOENCRYPT) {
9204 		zsflags->z_noencrypt = true;
9205 	}
9206 	if (flags & ZC_NOCALLOUT) {
9207 		z->no_callout = true;
9208 	}
9209 	if (flags & ZC_DESTRUCTIBLE) {
9210 		zone_create_assert_not_both(name, flags, ZC_DESTRUCTIBLE, ZC_READONLY);
9211 		z->z_destructible = true;
9212 	}
9213 	/*
9214 	 * Handle Internal flags
9215 	 */
9216 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9217 	if (flags & ZC_PGZ_USE_GUARDS) {
9218 		/*
9219 		 * Try to turn on guard pages only for zones
9220 		 * with a chance of OOB.
9221 		 */
9222 		if (startup_phase < STARTUP_SUB_LOCKDOWN) {
9223 			zsflags->z_pgz_use_guards = true;
9224 		}
9225 		z->z_pgz_use_guards = true;
9226 	}
9227 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9228 	if (!(flags & ZC_NOTBITAG)) {
9229 		z->z_tbi_tag = true;
9230 	}
9231 	if (flags & ZC_KALLOC_TYPE) {
9232 		zsflags->z_kalloc_type = true;
9233 	}
9234 	if (flags & ZC_VM) {
9235 		zone_create_assert_not_both(name, flags, ZC_VM, ZC_DATA);
9236 		zsflags->z_submap_idx = Z_SUBMAP_IDX_VM;
9237 	}
9238 	if (flags & ZC_DATA) {
9239 		zsflags->z_kheap_id = KHEAP_ID_DATA_BUFFERS;
9240 	}
9241 #if KASAN_CLASSIC
9242 	if (redzone && !(flags & ZC_KASAN_NOQUARANTINE)) {
9243 		z->z_kasan_quarantine = true;
9244 	}
9245 	z->z_kasan_redzone = redzone;
9246 #endif /* KASAN_CLASSIC */
9247 #if KASAN_FAKESTACK
9248 	if (strncmp(name, "fakestack.", sizeof("fakestack.") - 1) == 0) {
9249 		z->z_kasan_fakestacks = true;
9250 	}
9251 #endif /* KASAN_FAKESTACK */
9252 
9253 	/*
9254 	 * Then if there's extra tuning, do it
9255 	 */
9256 	if (extra_setup) {
9257 		extra_setup(z);
9258 	}
9259 
9260 	/*
9261 	 * Configure debugging features
9262 	 */
9263 #if CONFIG_PROB_GZALLOC
9264 	if ((flags & (ZC_READONLY | ZC_PERCPU | ZC_OBJ_CACHE | ZC_NOPGZ)) == 0) {
9265 		pgz_zone_init(z);
9266 	}
9267 #endif
9268 	if (zc_magazine_zone) { /* proxy for "has zone_init run" */
9269 #if ZALLOC_ENABLE_LOGGING
9270 		/*
9271 		 * Check for and set up zone leak detection
9272 		 * if requested via boot-args.
9273 		 */
9274 		zone_setup_logging(z);
9275 #endif /* ZALLOC_ENABLE_LOGGING */
9276 #if KASAN_TBI
9277 		zone_setup_kasan_logging(z);
9278 #endif /* KASAN_TBI */
9279 	}
9280 
9281 #if VM_TAG_SIZECLASSES
9282 	if ((zsflags->z_kheap_id || zsflags->z_kalloc_type) && zone_tagging_on) {
9283 		static uint16_t sizeclass_idx;
9284 
9285 		assert(startup_phase < STARTUP_SUB_LOCKDOWN);
9286 		z->z_uses_tags = true;
9287 		if (zsflags->z_kheap_id == KHEAP_ID_DEFAULT) {
9288 			zone_tags_sizeclasses[sizeclass_idx] = (uint16_t)size;
9289 			z->z_tags_sizeclass = sizeclass_idx++;
9290 		} else {
9291 			uint16_t i = 0;
9292 			for (; i < sizeclass_idx; i++) {
9293 				if (size == zone_tags_sizeclasses[i]) {
9294 					z->z_tags_sizeclass = i;
9295 					break;
9296 				}
9297 			}
9298 
9299 			/*
9300 			 * Size class wasn't found, add it to zone_tags_sizeclasses
9301 			 */
9302 			if (i == sizeclass_idx) {
9303 				assert(i < VM_TAG_SIZECLASSES);
9304 				zone_tags_sizeclasses[i] = (uint16_t)size;
9305 				z->z_tags_sizeclass = sizeclass_idx++;
9306 			}
9307 		}
9308 		assert(z->z_tags_sizeclass < VM_TAG_SIZECLASSES);
9309 	}
9310 #endif
9311 
9312 	/*
9313 	 * Finally, fixup properties based on security policies, boot-args, ...
9314 	 */
9315 	if (zsflags->z_kheap_id == KHEAP_ID_DATA_BUFFERS) {
9316 		/*
9317 		 * We use LIFO in the data map, because workloads like network
9318 		 * usage or similar tend to rotate through allocations very
9319 		 * quickly with sometimes epxloding working-sets and using
9320 		 * a FIFO policy might cause massive TLB trashing with rather
9321 		 * dramatic performance impacts.
9322 		 */
9323 		zsflags->z_submap_idx = Z_SUBMAP_IDX_DATA;
9324 		zsflags->z_lifo = true;
9325 	}
9326 
9327 	if ((flags & (ZC_CACHING | ZC_OBJ_CACHE)) && !z->z_nocaching) {
9328 		/*
9329 		 * No zone made before zone_init() can have ZC_CACHING set.
9330 		 */
9331 		assert(zc_magazine_zone);
9332 		zone_enable_caching(z);
9333 	}
9334 
9335 	zone_lock(z);
9336 	z->z_self = z;
9337 	zone_unlock(z);
9338 
9339 	return z;
9340 }
9341 
9342 void
zone_enable_smr(zone_t zone,struct smr * smr,zone_smr_free_cb_t free_cb)9343 zone_enable_smr(zone_t zone, struct smr *smr, zone_smr_free_cb_t free_cb)
9344 {
9345 	/* moving to SMR must be done before the zone has ever been used */
9346 	assert(zone->z_va_cur == 0 && !zone->z_smr && !zone->z_nocaching);
9347 	assert(!zone_security_array[zone_index(zone)].z_lifo);
9348 
9349 	if (!zone->z_pcpu_cache) {
9350 		zone_enable_caching(zone);
9351 	}
9352 
9353 	zone_lock(zone);
9354 
9355 	zpercpu_foreach(it, zone->z_pcpu_cache) {
9356 		it->zc_smr = smr;
9357 		it->zc_free = free_cb;
9358 	}
9359 	zone->z_smr = true;
9360 
9361 	zone_unlock(zone);
9362 }
9363 
9364 __startup_func
9365 void
zone_create_startup(struct zone_create_startup_spec * spec)9366 zone_create_startup(struct zone_create_startup_spec *spec)
9367 {
9368 	zone_t z;
9369 
9370 	z = zone_create_ext(spec->z_name, spec->z_size,
9371 	    spec->z_flags, spec->z_zid, spec->z_setup);
9372 	if (spec->z_var) {
9373 		*spec->z_var = z;
9374 	}
9375 }
9376 
9377 /*
9378  * The 4 first field of a zone_view and a zone alias, so that the zone_or_view_t
9379  * union works. trust but verify.
9380  */
9381 #define zalloc_check_zov_alias(f1, f2) \
9382     static_assert(offsetof(struct zone, f1) == offsetof(struct zone_view, f2))
9383 zalloc_check_zov_alias(z_self, zv_zone);
9384 zalloc_check_zov_alias(z_stats, zv_stats);
9385 zalloc_check_zov_alias(z_name, zv_name);
9386 zalloc_check_zov_alias(z_views, zv_next);
9387 #undef zalloc_check_zov_alias
9388 
9389 __startup_func
9390 void
zone_view_startup_init(struct zone_view_startup_spec * spec)9391 zone_view_startup_init(struct zone_view_startup_spec *spec)
9392 {
9393 	struct kalloc_heap *heap = NULL;
9394 	zone_view_t zv = spec->zv_view;
9395 	zone_t z;
9396 	zone_security_flags_t zsflags;
9397 
9398 	switch (spec->zv_heapid) {
9399 	case KHEAP_ID_DEFAULT:
9400 		panic("%s: Use KALLOC_TYPE_DEFINE for zone view %s instead"
9401 		    "of ZONE_VIEW_DEFINE as it is from default kalloc heap",
9402 		    __func__, zv->zv_name);
9403 		__builtin_unreachable();
9404 	case KHEAP_ID_DATA_BUFFERS:
9405 		heap = KHEAP_DATA_BUFFERS;
9406 		break;
9407 	default:
9408 		heap = NULL;
9409 	}
9410 
9411 	if (heap) {
9412 		z = kalloc_zone_for_size(heap->kh_zstart, spec->zv_size);
9413 	} else {
9414 		z = *spec->zv_zone;
9415 		assert(spec->zv_size <= zone_elem_inner_size(z));
9416 	}
9417 
9418 	assert(z);
9419 
9420 	zv->zv_zone  = z;
9421 	zv->zv_stats = zalloc_percpu_permanent_type(struct zone_stats);
9422 	zv->zv_next  = z->z_views;
9423 	zsflags = zone_security_config(z);
9424 	if (z->z_views == NULL && zsflags.z_kheap_id == KHEAP_ID_NONE) {
9425 		/*
9426 		 * count the raw view for zones not in a heap,
9427 		 * kalloc_heap_init() already counts it for its members.
9428 		 */
9429 		zone_view_count += 2;
9430 	} else {
9431 		zone_view_count += 1;
9432 	}
9433 	z->z_views = zv;
9434 }
9435 
9436 zone_t
zone_create(const char * name,vm_size_t size,zone_create_flags_t flags)9437 zone_create(
9438 	const char             *name,
9439 	vm_size_t               size,
9440 	zone_create_flags_t     flags)
9441 {
9442 	return zone_create_ext(name, size, flags, ZONE_ID_ANY, NULL);
9443 }
9444 
9445 static_assert(ZONE_ID__LAST_RO_EXT - ZONE_ID__FIRST_RO_EXT == ZC_RO_ID__LAST);
9446 
9447 zone_id_t
zone_create_ro(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)9448 zone_create_ro(
9449 	const char             *name,
9450 	vm_size_t               size,
9451 	zone_create_flags_t     flags,
9452 	zone_create_ro_id_t     zc_ro_id)
9453 {
9454 	assert(zc_ro_id <= ZC_RO_ID__LAST);
9455 	zone_id_t reserved_zid = ZONE_ID__FIRST_RO_EXT + zc_ro_id;
9456 	(void)zone_create_ext(name, size, ZC_READONLY | flags, reserved_zid, NULL);
9457 	return reserved_zid;
9458 }
9459 
9460 zone_t
zinit(vm_size_t size,vm_size_t max,vm_size_t alloc __unused,const char * name)9461 zinit(
9462 	vm_size_t       size,           /* the size of an element */
9463 	vm_size_t       max,            /* maximum memory to use */
9464 	vm_size_t       alloc __unused, /* allocation size */
9465 	const char      *name)          /* a name for the zone */
9466 {
9467 	zone_t z = zone_create(name, size, ZC_DESTRUCTIBLE);
9468 	z->z_wired_max = zone_alloc_pages_for_nelems(z, max / size);
9469 	return z;
9470 }
9471 
9472 void
zdestroy(zone_t z)9473 zdestroy(zone_t z)
9474 {
9475 	unsigned int zindex = zone_index(z);
9476 	zone_security_flags_t zsflags = zone_security_array[zindex];
9477 
9478 	current_thread()->options |= TH_OPT_ZONE_PRIV;
9479 	lck_mtx_lock(&zone_gc_lock);
9480 
9481 	zone_reclaim(z, ZONE_RECLAIM_DESTROY);
9482 
9483 	lck_mtx_unlock(&zone_gc_lock);
9484 	current_thread()->options &= ~TH_OPT_ZONE_PRIV;
9485 
9486 	zone_lock(z);
9487 
9488 	if (!zone_submap_is_sequestered(zsflags)) {
9489 		while (!zone_pva_is_null(z->z_pageq_va)) {
9490 			struct zone_page_metadata *meta;
9491 
9492 			zone_counter_sub(z, z_va_cur, z->z_percpu ? 1 : z->z_chunk_pages);
9493 			meta = zone_meta_queue_pop(z, &z->z_pageq_va);
9494 			assert(meta->zm_chunk_len <= ZM_CHUNK_LEN_MAX);
9495 			bzero(meta, sizeof(*meta) * z->z_chunk_pages);
9496 			zone_unlock(z);
9497 			kmem_free(zone_submap(zsflags), zone_meta_to_addr(meta),
9498 			    ptoa(z->z_chunk_pages));
9499 			zone_lock(z);
9500 		}
9501 	}
9502 
9503 #if !KASAN_CLASSIC
9504 	/* Assert that all counts are zero */
9505 	if (z->z_elems_avail || z->z_elems_free || zone_size_wired(z) ||
9506 	    (z->z_va_cur && !zone_submap_is_sequestered(zsflags))) {
9507 		panic("zdestroy: Zone %s%s isn't empty at zdestroy() time",
9508 		    zone_heap_name(z), z->z_name);
9509 	}
9510 
9511 	/* consistency check: make sure everything is indeed empty */
9512 	assert(zone_pva_is_null(z->z_pageq_empty));
9513 	assert(zone_pva_is_null(z->z_pageq_partial));
9514 	assert(zone_pva_is_null(z->z_pageq_full));
9515 	if (!zone_submap_is_sequestered(zsflags)) {
9516 		assert(zone_pva_is_null(z->z_pageq_va));
9517 	}
9518 #endif
9519 
9520 	zone_unlock(z);
9521 
9522 	simple_lock(&all_zones_lock, &zone_locks_grp);
9523 
9524 	assert(!bitmap_test(zone_destroyed_bitmap, zindex));
9525 	/* Mark the zone as empty in the bitmap */
9526 	bitmap_set(zone_destroyed_bitmap, zindex);
9527 	num_zones_in_use--;
9528 	assert(num_zones_in_use > 0);
9529 
9530 	simple_unlock(&all_zones_lock);
9531 }
9532 
9533 #endif /* !ZALLOC_TEST */
9534 #pragma mark zalloc module init
9535 #if !ZALLOC_TEST
9536 
9537 /*
9538  *	Initialize the "zone of zones" which uses fixed memory allocated
9539  *	earlier in memory initialization.  zone_bootstrap is called
9540  *	before zone_init.
9541  */
9542 __startup_func
9543 void
zone_bootstrap(void)9544 zone_bootstrap(void)
9545 {
9546 #if DEBUG || DEVELOPMENT
9547 #if __x86_64__
9548 	if (PE_parse_boot_argn("kernPOST", NULL, 0)) {
9549 		/*
9550 		 * rdar://79781535 Disable early gaps while running kernPOST on Intel
9551 		 * the fp faulting code gets triggered and deadlocks.
9552 		 */
9553 		zone_caching_disabled = 1;
9554 	}
9555 #endif /* __x86_64__ */
9556 #endif /* DEBUG || DEVELOPMENT */
9557 
9558 	/* Validate struct zone_packed_virtual_address expectations */
9559 	static_assert((intptr_t)VM_MIN_KERNEL_ADDRESS < 0, "the top bit must be 1");
9560 	if (VM_KERNEL_POINTER_SIGNIFICANT_BITS - PAGE_SHIFT > 31) {
9561 		panic("zone_pva_t can't pack a kernel page address in 31 bits");
9562 	}
9563 
9564 	zpercpu_early_count = ml_early_cpu_max_number() + 1;
9565 	if (!PE_parse_boot_argn("zc_mag_size", NULL, 0)) {
9566 		/*
9567 		 * Scale zc_mag_size() per machine.
9568 		 *
9569 		 * - wide machines get 128B magazines to avoid all false sharing
9570 		 * - smaller machines but with enough RAM get a bit bigger
9571 		 *   buckets (empirically affects networking performance)
9572 		 */
9573 		if (zpercpu_early_count >= 10) {
9574 			_zc_mag_size = 14;
9575 		} else if ((sane_size >> 30) >= 4) {
9576 			_zc_mag_size = 10;
9577 		}
9578 	}
9579 
9580 	/*
9581 	 * Initialize random used to scramble early allocations
9582 	 */
9583 	zpercpu_foreach_cpu(cpu) {
9584 		random_bool_init(&zone_bool_gen[cpu].zbg_bg);
9585 	}
9586 
9587 #if CONFIG_PROB_GZALLOC
9588 	/*
9589 	 * Set pgz_sample_counter on the boot CPU so that we do not sample
9590 	 * any allocation until PGZ has been properly setup (in pgz_init()).
9591 	 */
9592 	*PERCPU_GET_MASTER(pgz_sample_counter) = INT32_MAX;
9593 #endif /* CONFIG_PROB_GZALLOC */
9594 
9595 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9596 	/*
9597 	 * Randomly assign zones to one of the 4 general submaps,
9598 	 * and pick whether they allocate from the begining
9599 	 * or the end of it.
9600 	 *
9601 	 * A lot of OOB exploitation relies on precise interleaving
9602 	 * of specific types in the heap.
9603 	 *
9604 	 * Woops, you can't guarantee that anymore.
9605 	 */
9606 	for (zone_id_t i = 1; i < MAX_ZONES; i++) {
9607 		uint32_t r = zalloc_random_uniform32(0,
9608 		    ZSECURITY_CONFIG_GENERAL_SUBMAPS * 2);
9609 
9610 		zone_security_array[i].z_submap_from_end = (r & 1);
9611 		zone_security_array[i].z_submap_idx += (r >> 1);
9612 	}
9613 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9614 
9615 	thread_call_setup_with_options(&zone_expand_callout,
9616 	    zone_expand_async, NULL, THREAD_CALL_PRIORITY_HIGH,
9617 	    THREAD_CALL_OPTIONS_ONCE);
9618 
9619 	thread_call_setup_with_options(&zone_trim_callout,
9620 	    zone_trim_async, NULL, THREAD_CALL_PRIORITY_USER,
9621 	    THREAD_CALL_OPTIONS_ONCE);
9622 }
9623 
9624 #define ZONE_GUARD_SIZE                 (64UL << 10)
9625 
9626 __startup_func
9627 static void
zone_tunables_fixup(void)9628 zone_tunables_fixup(void)
9629 {
9630 	int wdt = 0;
9631 
9632 #if CONFIG_PROB_GZALLOC && (DEVELOPMENT || DEBUG)
9633 	if (!PE_parse_boot_argn("pgz", NULL, 0) &&
9634 	    PE_parse_boot_argn("pgz1", NULL, 0)) {
9635 		/*
9636 		 * if pgz1= was used, but pgz= was not,
9637 		 * then the more specific pgz1 takes precedence.
9638 		 */
9639 		pgz_all = false;
9640 	}
9641 #endif
9642 
9643 	if (zone_map_jetsam_limit == 0 || zone_map_jetsam_limit > 100) {
9644 		zone_map_jetsam_limit = ZONE_MAP_JETSAM_LIMIT_DEFAULT;
9645 	}
9646 	if (PE_parse_boot_argn("wdt", &wdt, sizeof(wdt)) && wdt == -1 &&
9647 	    !PE_parse_boot_argn("zet", NULL, 0)) {
9648 		zone_exhausted_timeout = -1;
9649 	}
9650 }
9651 STARTUP(TUNABLES, STARTUP_RANK_MIDDLE, zone_tunables_fixup);
9652 
9653 __startup_func
9654 static void
zone_submap_init(mach_vm_offset_t * submap_min,zone_submap_idx_t idx,uint64_t zone_sub_map_numer,uint64_t * remaining_denom,vm_offset_t * remaining_size)9655 zone_submap_init(
9656 	mach_vm_offset_t       *submap_min,
9657 	zone_submap_idx_t       idx,
9658 	uint64_t                zone_sub_map_numer,
9659 	uint64_t               *remaining_denom,
9660 	vm_offset_t            *remaining_size)
9661 {
9662 	vm_map_create_options_t vmco;
9663 	vm_map_address_t addr;
9664 	vm_offset_t submap_start, submap_end;
9665 	vm_size_t submap_size;
9666 	vm_map_t  submap;
9667 	vm_prot_t prot = VM_PROT_DEFAULT;
9668 	vm_prot_t prot_max = VM_PROT_ALL;
9669 	kern_return_t kr;
9670 
9671 	submap_size = trunc_page(zone_sub_map_numer * *remaining_size /
9672 	    *remaining_denom);
9673 	submap_start = *submap_min;
9674 
9675 	if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9676 		vm_offset_t submap_padding = pmap_ro_zone_align(submap_start) - submap_start;
9677 		submap_start += submap_padding;
9678 		submap_size = pmap_ro_zone_align(submap_size);
9679 		assert(*remaining_size >= (submap_padding + submap_size));
9680 		*remaining_size -= submap_padding;
9681 		*submap_min = submap_start;
9682 	}
9683 
9684 	submap_end = submap_start + submap_size;
9685 	if (idx == Z_SUBMAP_IDX_VM) {
9686 		vm_packing_verify_range("vm_compressor",
9687 		    submap_start, submap_end, VM_PACKING_PARAMS(C_SLOT_PACKED_PTR));
9688 		vm_packing_verify_range("vm_page",
9689 		    submap_start, submap_end, VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR));
9690 	}
9691 
9692 	vmco = VM_MAP_CREATE_NEVER_FAULTS;
9693 	if (!zone_submap_is_sequestered(idx)) {
9694 		vmco |= VM_MAP_CREATE_DISABLE_HOLELIST;
9695 	}
9696 
9697 	vm_map_will_allocate_early_map(&zone_submaps[idx]);
9698 	submap = kmem_suballoc(kernel_map, submap_min, submap_size, vmco,
9699 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_PERMANENT | KMS_NOFAIL,
9700 	    VM_KERN_MEMORY_ZONE).kmr_submap;
9701 
9702 	if (idx == Z_SUBMAP_IDX_READ_ONLY) {
9703 		zone_info.zi_ro_range.min_address = submap_start;
9704 		zone_info.zi_ro_range.max_address = submap_end;
9705 		prot_max = prot = VM_PROT_NONE;
9706 	}
9707 
9708 	addr = submap_start;
9709 	kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
9710 	    VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vm_tag = VM_KERN_MEMORY_ZONE),
9711 	    kernel_object, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9712 	if (kr != KERN_SUCCESS) {
9713 		panic("ksubmap[%s]: failed to make first entry (%d)",
9714 		    zone_submaps_names[idx], kr);
9715 	}
9716 
9717 	addr = submap_end - ZONE_GUARD_SIZE / 2;
9718 	kr = vm_map_enter(submap, &addr, ZONE_GUARD_SIZE / 2, 0,
9719 	    VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vm_tag = VM_KERN_MEMORY_ZONE),
9720 	    kernel_object, addr, FALSE, prot, prot_max, VM_INHERIT_NONE);
9721 	if (kr != KERN_SUCCESS) {
9722 		panic("ksubmap[%s]: failed to make last entry (%d)",
9723 		    zone_submaps_names[idx], kr);
9724 	}
9725 
9726 #if DEBUG || DEVELOPMENT
9727 	printf("zone_init: map %-5s %p:%p (%u%c)\n",
9728 	    zone_submaps_names[idx], (void *)submap_start, (void *)submap_end,
9729 	    mach_vm_size_pretty(submap_size), mach_vm_size_unit(submap_size));
9730 #endif /* DEBUG || DEVELOPMENT */
9731 
9732 	zone_submaps[idx] = submap;
9733 	*submap_min       = submap_end;
9734 	*remaining_size  -= submap_size;
9735 	*remaining_denom -= zone_sub_map_numer;
9736 }
9737 
9738 static inline void
zone_pva_relocate(zone_pva_t * pva,uint32_t delta)9739 zone_pva_relocate(zone_pva_t *pva, uint32_t delta)
9740 {
9741 	if (!zone_pva_is_null(*pva) && !zone_pva_is_queue(*pva)) {
9742 		pva->packed_address += delta;
9743 	}
9744 }
9745 
9746 /*
9747  * Allocate metadata array and migrate bootstrap initial metadata and memory.
9748  */
9749 __startup_func
9750 static void
zone_metadata_init(void)9751 zone_metadata_init(void)
9752 {
9753 	vm_map_t vm_map = zone_submaps[Z_SUBMAP_IDX_VM];
9754 	vm_map_entry_t first;
9755 
9756 	struct mach_vm_range meta_r, bits_r, xtra_r, early_r;
9757 	vm_size_t early_sz;
9758 	vm_offset_t reloc_base;
9759 
9760 	/*
9761 	 * Step 1: Allocate the metadata + bitmaps range
9762 	 *
9763 	 * Allocations can't be smaller than 8 bytes, which is 128b / 16B per 1k
9764 	 * of physical memory (16M per 1G).
9765 	 *
9766 	 * Let's preallocate for the worst to avoid weird panics.
9767 	 */
9768 	vm_map_will_allocate_early_map(&zone_meta_map);
9769 	meta_r = zone_kmem_suballoc(zone_info.zi_meta_range.min_address,
9770 	    zone_meta_size + zone_bits_size + zone_xtra_size,
9771 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
9772 	    VM_KERN_MEMORY_ZONE, &zone_meta_map);
9773 	meta_r.min_address += ZONE_GUARD_SIZE;
9774 	meta_r.max_address -= ZONE_GUARD_SIZE;
9775 	if (zone_xtra_size) {
9776 		xtra_r.max_address  = meta_r.max_address;
9777 		meta_r.max_address -= zone_xtra_size;
9778 		xtra_r.min_address  = meta_r.max_address;
9779 	} else {
9780 		xtra_r.min_address  = xtra_r.max_address = 0;
9781 	}
9782 	bits_r.max_address  = meta_r.max_address;
9783 	meta_r.max_address -= zone_bits_size;
9784 	bits_r.min_address  = meta_r.max_address;
9785 
9786 #if DEBUG || DEVELOPMENT
9787 	printf("zone_init: metadata  %p:%p (%u%c)\n",
9788 	    (void *)meta_r.min_address, (void *)meta_r.max_address,
9789 	    mach_vm_size_pretty(mach_vm_range_size(&meta_r)),
9790 	    mach_vm_size_unit(mach_vm_range_size(&meta_r)));
9791 	printf("zone_init: metabits  %p:%p (%u%c)\n",
9792 	    (void *)bits_r.min_address, (void *)bits_r.max_address,
9793 	    mach_vm_size_pretty(mach_vm_range_size(&bits_r)),
9794 	    mach_vm_size_unit(mach_vm_range_size(&bits_r)));
9795 	printf("zone_init: extra     %p:%p (%u%c)\n",
9796 	    (void *)xtra_r.min_address, (void *)xtra_r.max_address,
9797 	    mach_vm_size_pretty(mach_vm_range_size(&xtra_r)),
9798 	    mach_vm_size_unit(mach_vm_range_size(&xtra_r)));
9799 #endif /* DEBUG || DEVELOPMENT */
9800 
9801 	bits_r.min_address = (bits_r.min_address + ZBA_CHUNK_SIZE - 1) & -ZBA_CHUNK_SIZE;
9802 	bits_r.max_address = bits_r.max_address & -ZBA_CHUNK_SIZE;
9803 
9804 	/*
9805 	 * Step 2: Install new ranges.
9806 	 *         Relocate metadata and bits.
9807 	 */
9808 	early_r  = zone_info.zi_map_range;
9809 	early_sz = mach_vm_range_size(&early_r);
9810 
9811 	zone_info.zi_map_range  = zone_map_range;
9812 	zone_info.zi_meta_range = meta_r;
9813 	zone_info.zi_bits_range = bits_r;
9814 	zone_info.zi_xtra_range = xtra_r;
9815 	zone_info.zi_meta_base  = (struct zone_page_metadata *)meta_r.min_address -
9816 	    zone_pva_from_addr(zone_map_range.min_address).packed_address;
9817 
9818 	vm_map_lock(vm_map);
9819 	first = vm_map_first_entry(vm_map);
9820 	reloc_base = first->vme_end;
9821 	first->vme_end += early_sz;
9822 	vm_map->size += early_sz;
9823 	vm_map_unlock(vm_map);
9824 
9825 	struct zone_page_metadata *early_meta = zone_early_meta_array_startup;
9826 	struct zone_page_metadata *new_meta = zone_meta_from_addr(reloc_base);
9827 	vm_offset_t reloc_delta = reloc_base - early_r.min_address;
9828 	/* this needs to sign extend */
9829 	uint32_t pva_delta = (uint32_t)((intptr_t)reloc_delta >> PAGE_SHIFT);
9830 
9831 	zone_meta_populate(reloc_base, early_sz);
9832 	memcpy(new_meta, early_meta,
9833 	    atop(early_sz) * sizeof(struct zone_page_metadata));
9834 	for (uint32_t i = 0; i < atop(early_sz); i++) {
9835 		zone_pva_relocate(&new_meta[i].zm_page_next, pva_delta);
9836 		zone_pva_relocate(&new_meta[i].zm_page_prev, pva_delta);
9837 	}
9838 
9839 	static_assert(ZONE_ID_VM_MAP_ENTRY == ZONE_ID_VM_MAP + 1);
9840 	static_assert(ZONE_ID_VM_MAP_HOLES == ZONE_ID_VM_MAP + 2);
9841 
9842 	for (zone_id_t zid = ZONE_ID_VM_MAP; zid <= ZONE_ID_VM_MAP_HOLES; zid++) {
9843 		zone_pva_relocate(&zone_array[zid].z_pageq_partial, pva_delta);
9844 		zone_pva_relocate(&zone_array[zid].z_pageq_full, pva_delta);
9845 	}
9846 
9847 	zba_populate(0, false);
9848 	memcpy(zba_base_header(), zba_chunk_startup, sizeof(zba_chunk_startup));
9849 	zba_meta()->zbam_right = (uint32_t)atop(zone_bits_size);
9850 
9851 	/*
9852 	 * Step 3: Relocate the boostrap VM structs
9853 	 *         (including rewriting their content).
9854 	 */
9855 
9856 #if __x86_64__
9857 	kernel_memory_populate(reloc_base, early_sz,
9858 	    KMA_KOBJECT | KMA_NOENCRYPT | KMA_NOFAIL,
9859 	    VM_KERN_MEMORY_OSFMK);
9860 	__nosan_memcpy((void *)reloc_base, (void *)early_r.min_address, early_sz);
9861 #else
9862 	for (vm_address_t addr = early_r.min_address;
9863 	    addr < early_r.max_address; addr += PAGE_SIZE) {
9864 		pmap_paddr_t pa = kvtophys(trunc_page(addr));
9865 		__assert_only kern_return_t kr;
9866 
9867 		kr = pmap_enter_options_addr(kernel_pmap, addr + reloc_delta,
9868 		    pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE,
9869 		    0, NULL);
9870 		assert(kr == KERN_SUCCESS);
9871 	}
9872 #endif
9873 
9874 #if KASAN
9875 	kasan_notify_address(reloc_base, early_sz);
9876 #if KASAN_TBI
9877 	kasan_tbi_copy_tags(reloc_base, early_r.min_address, early_sz);
9878 #endif /* KASAN_TBI */
9879 #endif /* KASAN */
9880 
9881 	vm_map_relocate_early_maps(reloc_delta);
9882 
9883 	for (uint32_t i = 0; i < atop(early_sz); i++) {
9884 		zone_id_t zid = new_meta[i].zm_index;
9885 		zone_t z = &zone_array[zid];
9886 		vm_size_t esize = zone_elem_outer_size(z);
9887 		vm_address_t base = reloc_base + ptoa(i) + zone_elem_inner_offs(z);
9888 		vm_address_t addr;
9889 
9890 		if (new_meta[i].zm_chunk_len >= ZM_SECONDARY_PAGE) {
9891 			continue;
9892 		}
9893 
9894 		for (uint32_t eidx = 0; eidx < z->z_chunk_elems; eidx++) {
9895 			if (zone_meta_is_free(&new_meta[i], eidx)) {
9896 				continue;
9897 			}
9898 
9899 			addr = base + eidx * esize;
9900 #if KASAN_CLASSIC
9901 			kasan_alloc(addr,
9902 			    zone_elem_inner_size(z), zone_elem_inner_size(z),
9903 			    zone_elem_redzone(z), false,
9904 			    __builtin_frame_address(0));
9905 #endif
9906 			vm_map_relocate_early_elem(zid, addr, reloc_delta);
9907 		}
9908 	}
9909 
9910 #if !__x86_64__
9911 	pmap_remove(kernel_pmap, early_r.min_address, early_r.max_address);
9912 #endif
9913 }
9914 
9915 __startup_data
9916 static uint16_t submap_ratios[Z_SUBMAP_IDX_COUNT] = {
9917 #if ZSECURITY_CONFIG(READ_ONLY)
9918 	[Z_SUBMAP_IDX_VM]               = 15,
9919 	[Z_SUBMAP_IDX_READ_ONLY]        =  5,
9920 #else
9921 	[Z_SUBMAP_IDX_VM]               = 20,
9922 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
9923 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
9924 	[Z_SUBMAP_IDX_GENERAL_0]        = 15,
9925 	[Z_SUBMAP_IDX_GENERAL_1]        = 15,
9926 	[Z_SUBMAP_IDX_GENERAL_2]        = 15,
9927 	[Z_SUBMAP_IDX_GENERAL_3]        = 15,
9928 	[Z_SUBMAP_IDX_DATA]             = 20,
9929 #else
9930 	[Z_SUBMAP_IDX_GENERAL_0]        = 60,
9931 	[Z_SUBMAP_IDX_DATA]             = 20,
9932 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
9933 };
9934 
9935 __startup_func
9936 static inline uint16_t
zone_submap_ratios_denom(void)9937 zone_submap_ratios_denom(void)
9938 {
9939 	uint16_t denom = 0;
9940 
9941 	for (unsigned idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
9942 		denom += submap_ratios[idx];
9943 	}
9944 
9945 	assert(denom == 100);
9946 
9947 	return denom;
9948 }
9949 
9950 __startup_func
9951 static inline vm_offset_t
zone_restricted_va_max(void)9952 zone_restricted_va_max(void)
9953 {
9954 	vm_offset_t compressor_max = VM_PACKING_MAX_PACKABLE(C_SLOT_PACKED_PTR);
9955 	vm_offset_t vm_page_max    = VM_PACKING_MAX_PACKABLE(VM_PAGE_PACKED_PTR);
9956 
9957 	return trunc_page(MIN(compressor_max, vm_page_max));
9958 }
9959 
9960 __startup_func
9961 static void
zone_set_map_sizes(void)9962 zone_set_map_sizes(void)
9963 {
9964 	vm_size_t zsize;
9965 	vm_size_t zsizearg;
9966 
9967 	/*
9968 	 * Compute the physical limits for the zone map
9969 	 */
9970 
9971 	if (PE_parse_boot_argn("zsize", &zsizearg, sizeof(zsizearg))) {
9972 		zsize = zsizearg * (1024ULL * 1024);
9973 	} else {
9974 		/* Set target zone size as 1/4 of physical memory */
9975 		zsize = (vm_size_t)(sane_size >> 2);
9976 		zsize += zsize >> 1;
9977 	}
9978 
9979 	if (zsize < CONFIG_ZONE_MAP_MIN) {
9980 		zsize = CONFIG_ZONE_MAP_MIN;   /* Clamp to min */
9981 	}
9982 	if (zsize > sane_size >> 1) {
9983 		zsize = (vm_size_t)(sane_size >> 1); /* Clamp to half of RAM max */
9984 	}
9985 	if (zsizearg == 0 && zsize > ZONE_MAP_MAX) {
9986 		/* if zsize boot-arg not present and zsize exceeds platform maximum, clip zsize */
9987 		printf("NOTE: zonemap size reduced from 0x%lx to 0x%lx\n",
9988 		    (uintptr_t)zsize, (uintptr_t)ZONE_MAP_MAX);
9989 		zsize = ZONE_MAP_MAX;
9990 	}
9991 
9992 	zone_pages_wired_max = (uint32_t)atop(trunc_page(zsize));
9993 
9994 
9995 	/*
9996 	 * Declare restrictions on zone max
9997 	 */
9998 	vm_offset_t vm_submap_size = round_page(
9999 		(submap_ratios[Z_SUBMAP_IDX_VM] * ZONE_MAP_VA_SIZE) /
10000 		zone_submap_ratios_denom());
10001 
10002 #if CONFIG_PROB_GZALLOC
10003 	vm_submap_size += pgz_get_size();
10004 #endif /* CONFIG_PROB_GZALLOC */
10005 	if (os_sub_overflow(zone_restricted_va_max(), vm_submap_size,
10006 	    &zone_map_range.min_address)) {
10007 		zone_map_range.min_address = 0;
10008 	}
10009 
10010 	zone_meta_size = round_page(atop(ZONE_MAP_VA_SIZE) *
10011 	    sizeof(struct zone_page_metadata)) + ZONE_GUARD_SIZE * 2;
10012 
10013 	static_assert(ZONE_MAP_MAX / (CHAR_BIT * KALLOC_MINSIZE) <=
10014 	    ZBA_PTR_MASK + 1);
10015 	zone_bits_size = round_page(ptoa(zone_pages_wired_max) /
10016 	    (CHAR_BIT * KALLOC_MINSIZE));
10017 
10018 #if VM_TAG_SIZECLASSES
10019 	if (zone_tagging_on) {
10020 		zba_xtra_shift = (uint8_t)fls(sizeof(vm_tag_t) - 1);
10021 	}
10022 	if (zba_xtra_shift) {
10023 		/*
10024 		 * if we need the extra space range, then limit the size of the
10025 		 * bitmaps to something reasonable instead of a theoretical
10026 		 * worst case scenario of all zones being for the smallest
10027 		 * allocation granule, in order to avoid fake VA pressure on
10028 		 * other parts of the system.
10029 		 */
10030 		zone_bits_size = round_page(zone_bits_size / 8);
10031 		zone_xtra_size = round_page(zone_bits_size * CHAR_BIT << zba_xtra_shift);
10032 	}
10033 #endif /* VM_TAG_SIZECLASSES */
10034 }
10035 STARTUP(KMEM, STARTUP_RANK_FIRST, zone_set_map_sizes);
10036 
10037 /*
10038  * Can't use zone_info.zi_map_range at this point as it is being used to
10039  * store the range of early pmap memory that was stolen to bootstrap the
10040  * necessary VM zones.
10041  */
10042 KMEM_RANGE_REGISTER_STATIC(zones, &zone_map_range, ZONE_MAP_VA_SIZE);
10043 KMEM_RANGE_REGISTER_DYNAMIC(zone_meta, &zone_info.zi_meta_range, ^{
10044 	return zone_meta_size + zone_bits_size + zone_xtra_size;
10045 });
10046 
10047 /*
10048  * Global initialization of Zone Allocator.
10049  * Runs after zone_bootstrap.
10050  */
10051 __startup_func
10052 static void
zone_init(void)10053 zone_init(void)
10054 {
10055 	vm_size_t           remaining_size = ZONE_MAP_VA_SIZE;
10056 	mach_vm_offset_t    submap_min = 0;
10057 	uint64_t            denom = zone_submap_ratios_denom();
10058 	/*
10059 	 * And now allocate the various pieces of VA and submaps.
10060 	 */
10061 
10062 	submap_min = zone_map_range.min_address;
10063 
10064 #if CONFIG_PROB_GZALLOC
10065 	vm_size_t pgz_size = pgz_get_size();
10066 
10067 	vm_map_will_allocate_early_map(&pgz_submap);
10068 	zone_info.zi_pgz_range = zone_kmem_suballoc(submap_min, pgz_size,
10069 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
10070 	    VM_KERN_MEMORY_ZONE, &pgz_submap);
10071 
10072 	submap_min     += pgz_size;
10073 	remaining_size -= pgz_size;
10074 #if DEBUG || DEVELOPMENT
10075 	printf("zone_init: pgzalloc  %p:%p (%u%c) [%d slots]\n",
10076 	    (void *)zone_info.zi_pgz_range.min_address,
10077 	    (void *)zone_info.zi_pgz_range.max_address,
10078 	    mach_vm_size_pretty(pgz_size), mach_vm_size_unit(pgz_size),
10079 	    pgz_slots);
10080 #endif /* DEBUG || DEVELOPMENT */
10081 #endif /* CONFIG_PROB_GZALLOC */
10082 
10083 	/*
10084 	 * Allocate the submaps
10085 	 */
10086 	for (zone_submap_idx_t idx = 0; idx < Z_SUBMAP_IDX_COUNT; idx++) {
10087 		if (submap_ratios[idx] == 0) {
10088 			zone_submaps[idx] = VM_MAP_NULL;
10089 		} else {
10090 			zone_submap_init(&submap_min, idx, submap_ratios[idx],
10091 			    &denom, &remaining_size);
10092 		}
10093 	}
10094 
10095 	zone_metadata_init();
10096 
10097 #if VM_TAG_SIZECLASSES
10098 	if (zone_tagging_on) {
10099 		vm_allocation_zones_init();
10100 	}
10101 #endif /* VM_TAG_SIZECLASSES */
10102 
10103 	zone_create_flags_t kma_flags = ZC_NOCACHING | ZC_NOGC | ZC_NOCALLOUT |
10104 	    ZC_KASAN_NOQUARANTINE | ZC_KASAN_NOREDZONE | ZC_VM;
10105 
10106 	(void)zone_create_ext("vm.permanent", 1, kma_flags,
10107 	    ZONE_ID_PERMANENT, ^(zone_t z) {
10108 		z->z_permanent = true;
10109 		z->z_elem_size = 1;
10110 	});
10111 	(void)zone_create_ext("vm.permanent.percpu", 1,
10112 	    kma_flags | ZC_PERCPU, ZONE_ID_PERCPU_PERMANENT, ^(zone_t z) {
10113 		z->z_permanent = true;
10114 		z->z_elem_size = 1;
10115 	});
10116 
10117 	zc_magazine_zone = zone_create("zcc_magazine_zone", sizeof(struct zone_magazine) +
10118 	    zc_mag_size() * sizeof(vm_offset_t),
10119 	    ZC_VM | ZC_NOCACHING | ZC_ZFREE_CLEARMEM | ZC_PGZ_USE_GUARDS);
10120 	zone_raise_reserve(zc_magazine_zone, (uint16_t)(2 * zpercpu_count()));
10121 
10122 	/*
10123 	 * Now migrate the startup statistics into their final storage,
10124 	 * and enable logging for early zones (that zone_create_ext() skipped).
10125 	 */
10126 	int cpu = cpu_number();
10127 	zone_index_foreach(idx) {
10128 		zone_t tz = &zone_array[idx];
10129 
10130 		if (tz->z_stats == __zpcpu_mangle_for_boot(&zone_stats_startup[idx])) {
10131 			zone_stats_t zs = zalloc_percpu_permanent_type(struct zone_stats);
10132 
10133 			*zpercpu_get_cpu(zs, cpu) = *zpercpu_get_cpu(tz->z_stats, cpu);
10134 			tz->z_stats = zs;
10135 		}
10136 		if (tz->z_self == tz) {
10137 #if ZALLOC_ENABLE_LOGGING
10138 			zone_setup_logging(tz);
10139 #endif /* ZALLOC_ENABLE_LOGGING */
10140 #if KASAN_TBI
10141 			zone_setup_kasan_logging(tz);
10142 #endif /* KASAN_TBI */
10143 		}
10144 	}
10145 }
10146 STARTUP(ZALLOC, STARTUP_RANK_FIRST, zone_init);
10147 
10148 void
zalloc_first_proc_made(void)10149 zalloc_first_proc_made(void)
10150 {
10151 	zone_caching_disabled = 0;
10152 }
10153 
10154 __startup_func
10155 vm_offset_t
zone_early_mem_init(vm_size_t size)10156 zone_early_mem_init(vm_size_t size)
10157 {
10158 	vm_offset_t mem;
10159 
10160 	assert3u(atop(size), <=, ZONE_EARLY_META_INLINE_COUNT);
10161 
10162 	/*
10163 	 * The zone that is used early to bring up the VM is stolen here.
10164 	 *
10165 	 * When the zone subsystem is actually initialized,
10166 	 * zone_metadata_init() will be called, and those pages
10167 	 * and the elements they contain, will be relocated into
10168 	 * the VM submap (even for architectures when those zones
10169 	 * do not live there).
10170 	 */
10171 #if __x86_64__
10172 	assert3u(size, <=, sizeof(zone_early_pages_to_cram));
10173 	mem = (vm_offset_t)zone_early_pages_to_cram;
10174 #else
10175 	mem = (vm_offset_t)pmap_steal_memory(size, PAGE_SIZE);
10176 #endif
10177 
10178 	zone_info.zi_meta_base = zone_early_meta_array_startup -
10179 	    zone_pva_from_addr(mem).packed_address;
10180 	zone_info.zi_map_range.min_address = mem;
10181 	zone_info.zi_map_range.max_address = mem + size;
10182 
10183 	zone_info.zi_bits_range = (struct mach_vm_range){
10184 		.min_address = (mach_vm_offset_t)zba_chunk_startup,
10185 		.max_address = (mach_vm_offset_t)zba_chunk_startup +
10186 	    sizeof(zba_chunk_startup),
10187 	};
10188 
10189 	zba_meta()->zbam_left  = 1;
10190 	zba_meta()->zbam_right = 1;
10191 	zba_init_chunk(0, false);
10192 
10193 	return mem;
10194 }
10195 
10196 #endif /* !ZALLOC_TEST */
10197 #pragma mark - tests
10198 #if DEBUG || DEVELOPMENT
10199 
10200 /*
10201  * Used for sysctl zone tests that aren't thread-safe. Ensure only one
10202  * thread goes through at a time.
10203  *
10204  * Or we can end up with multiple test zones (if a second zinit() comes through
10205  * before zdestroy()), which could lead us to run out of zones.
10206  */
10207 static bool any_zone_test_running = FALSE;
10208 
10209 static uintptr_t *
zone_copy_allocations(zone_t z,uintptr_t * elems,zone_pva_t page_index)10210 zone_copy_allocations(zone_t z, uintptr_t *elems, zone_pva_t page_index)
10211 {
10212 	vm_offset_t elem_size = zone_elem_outer_size(z);
10213 	vm_offset_t base;
10214 	struct zone_page_metadata *meta;
10215 
10216 	while (!zone_pva_is_null(page_index)) {
10217 		base  = zone_pva_to_addr(page_index) + zone_elem_inner_offs(z);
10218 		meta  = zone_pva_to_meta(page_index);
10219 
10220 		if (meta->zm_inline_bitmap) {
10221 			for (size_t i = 0; i < meta->zm_chunk_len; i++) {
10222 				uint32_t map = meta[i].zm_bitmap;
10223 
10224 				for (; map; map &= map - 1) {
10225 					*elems++ = INSTANCE_PUT(base +
10226 					    elem_size * __builtin_clz(map));
10227 				}
10228 				base += elem_size * 32;
10229 			}
10230 		} else {
10231 			uint32_t order = zba_bits_ref_order(meta->zm_bitmap);
10232 			bitmap_t *bits = zba_bits_ref_ptr(meta->zm_bitmap);
10233 			for (size_t i = 0; i < (1u << order); i++) {
10234 				uint64_t map = bits[i];
10235 
10236 				for (; map; map &= map - 1) {
10237 					*elems++ = INSTANCE_PUT(base +
10238 					    elem_size * __builtin_clzll(map));
10239 				}
10240 				base += elem_size * 64;
10241 			}
10242 		}
10243 
10244 		page_index = meta->zm_page_next;
10245 	}
10246 	return elems;
10247 }
10248 
10249 kern_return_t
zone_leaks(const char * zoneName,uint32_t nameLen,leak_site_proc proc)10250 zone_leaks(const char * zoneName, uint32_t nameLen, leak_site_proc proc)
10251 {
10252 	zone_t        zone = NULL;
10253 	uintptr_t *   array;
10254 	uintptr_t *   next;
10255 	uintptr_t     element;
10256 	uint32_t      idx, count, found;
10257 	uint32_t      nobtcount;
10258 	uint32_t      elemSize;
10259 	size_t        maxElems;
10260 
10261 	zone_foreach(z) {
10262 		if (!strncmp(zoneName, z->z_name, nameLen)) {
10263 			zone = z;
10264 			break;
10265 		}
10266 	}
10267 	if (zone == NULL) {
10268 		return KERN_INVALID_NAME;
10269 	}
10270 
10271 	elemSize = (uint32_t)zone_elem_inner_size(zone);
10272 	maxElems = (zone->z_elems_avail + 1) & ~1ul;
10273 
10274 	array = kalloc_type_tag(vm_offset_t, maxElems, VM_KERN_MEMORY_DIAG);
10275 	if (array == NULL) {
10276 		return KERN_RESOURCE_SHORTAGE;
10277 	}
10278 
10279 	zone_lock(zone);
10280 
10281 	next = array;
10282 	next = zone_copy_allocations(zone, next, zone->z_pageq_partial);
10283 	next = zone_copy_allocations(zone, next, zone->z_pageq_full);
10284 	count = (uint32_t)(next - array);
10285 
10286 	zone_unlock(zone);
10287 
10288 	zone_leaks_scan(array, count, (uint32_t)zone_elem_outer_size(zone), &found);
10289 	assert(found <= count);
10290 
10291 	for (idx = 0; idx < count; idx++) {
10292 		element = array[idx];
10293 		if (kInstanceFlagReferenced & element) {
10294 			continue;
10295 		}
10296 		element = INSTANCE_PUT(element) & ~kInstanceFlags;
10297 	}
10298 
10299 #if ZALLOC_ENABLE_LOGGING
10300 	if (zone->z_btlog && !corruption_debug_flag) {
10301 		// btlog_copy_backtraces_for_elements will set kInstanceFlagReferenced on elements it found
10302 		static_assert(sizeof(vm_address_t) == sizeof(uintptr_t));
10303 		btlog_copy_backtraces_for_elements(zone->z_btlog,
10304 		    (vm_address_t *)array, &count, elemSize, proc);
10305 	}
10306 #endif /* ZALLOC_ENABLE_LOGGING */
10307 
10308 	for (nobtcount = idx = 0; idx < count; idx++) {
10309 		element = array[idx];
10310 		if (!element) {
10311 			continue;
10312 		}
10313 		if (kInstanceFlagReferenced & element) {
10314 			continue;
10315 		}
10316 		nobtcount++;
10317 	}
10318 	if (nobtcount) {
10319 		proc(nobtcount, elemSize, BTREF_NULL);
10320 	}
10321 
10322 	kfree_type(vm_offset_t, maxElems, array);
10323 	return KERN_SUCCESS;
10324 }
10325 
10326 static int
zone_ro_basic_test_run(__unused int64_t in,int64_t * out)10327 zone_ro_basic_test_run(__unused int64_t in, int64_t *out)
10328 {
10329 	zone_security_flags_t zsflags;
10330 	uint32_t x = 4;
10331 	uint32_t *test_ptr;
10332 
10333 	if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10334 		printf("zone_ro_basic_test: Test already running.\n");
10335 		return EALREADY;
10336 	}
10337 
10338 	zsflags = zone_security_array[ZONE_ID__FIRST_RO];
10339 
10340 	for (int i = 0; i < 3; i++) {
10341 #if ZSECURITY_CONFIG(READ_ONLY)
10342 		/* Basic Test: Create int zone, zalloc int, modify value, free int */
10343 		printf("zone_ro_basic_test: Basic Test iteration %d\n", i);
10344 		printf("zone_ro_basic_test: create a sub-page size zone\n");
10345 
10346 		printf("zone_ro_basic_test: verify flags were set\n");
10347 		assert(zsflags.z_submap_idx == Z_SUBMAP_IDX_READ_ONLY);
10348 
10349 		printf("zone_ro_basic_test: zalloc an element\n");
10350 		test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10351 		assert(test_ptr);
10352 
10353 		printf("zone_ro_basic_test: verify we can't write to it\n");
10354 		assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10355 
10356 		x = 4;
10357 		printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10358 		zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10359 		assert(test_ptr);
10360 		assert(*(uint32_t*)test_ptr == x);
10361 
10362 		x = 5;
10363 		printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10364 		zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10365 		assert(test_ptr);
10366 		assert(*(uint32_t*)test_ptr == x);
10367 
10368 		printf("zone_ro_basic_test: verify we can't write to it after assigning value\n");
10369 		assert(verify_write(&x, test_ptr, sizeof(x)) == EFAULT);
10370 
10371 		printf("zone_ro_basic_test: free elem\n");
10372 		zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10373 		assert(!test_ptr);
10374 #else
10375 		printf("zone_ro_basic_test: Read-only allocator n/a on 32bit platforms, test functionality of API\n");
10376 
10377 		printf("zone_ro_basic_test: verify flags were set\n");
10378 		assert(zsflags.z_submap_idx != Z_SUBMAP_IDX_READ_ONLY);
10379 
10380 		printf("zone_ro_basic_test: zalloc an element\n");
10381 		test_ptr = (zalloc_ro)(ZONE_ID__FIRST_RO, Z_WAITOK);
10382 		assert(test_ptr);
10383 
10384 		x = 4;
10385 		printf("zone_ro_basic_test: test zalloc_ro_mut to assign value\n");
10386 		zalloc_ro_mut(ZONE_ID__FIRST_RO, test_ptr, 0, &x, sizeof(uint32_t));
10387 		assert(test_ptr);
10388 		assert(*(uint32_t*)test_ptr == x);
10389 
10390 		x = 5;
10391 		printf("zone_ro_basic_test: test zalloc_ro_update_elem to assign value\n");
10392 		zalloc_ro_update_elem(ZONE_ID__FIRST_RO, test_ptr, &x);
10393 		assert(test_ptr);
10394 		assert(*(uint32_t*)test_ptr == x);
10395 
10396 		printf("zone_ro_basic_test: free elem\n");
10397 		zfree_ro(ZONE_ID__FIRST_RO, test_ptr);
10398 		assert(!test_ptr);
10399 #endif /* !ZSECURITY_CONFIG(READ_ONLY) */
10400 	}
10401 
10402 	printf("zone_ro_basic_test: garbage collection\n");
10403 	zone_gc(ZONE_GC_DRAIN);
10404 
10405 	printf("zone_ro_basic_test: Test passed\n");
10406 
10407 	*out = 1;
10408 	os_atomic_store(&any_zone_test_running, false, relaxed);
10409 	return 0;
10410 }
10411 SYSCTL_TEST_REGISTER(zone_ro_basic_test, zone_ro_basic_test_run);
10412 
10413 static int
zone_basic_test_run(__unused int64_t in,int64_t * out)10414 zone_basic_test_run(__unused int64_t in, int64_t *out)
10415 {
10416 	static zone_t test_zone_ptr = NULL;
10417 
10418 	unsigned int i = 0, max_iter = 5;
10419 	void * test_ptr;
10420 	zone_t test_zone;
10421 	int rc = 0;
10422 
10423 	if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10424 		printf("zone_basic_test: Test already running.\n");
10425 		return EALREADY;
10426 	}
10427 
10428 	printf("zone_basic_test: Testing zinit(), zalloc(), zfree() and zdestroy() on zone \"test_zone_sysctl\"\n");
10429 
10430 	/* zinit() and zdestroy() a zone with the same name a bunch of times, verify that we get back the same zone each time */
10431 	do {
10432 		test_zone = zinit(sizeof(uint64_t), 100 * sizeof(uint64_t), sizeof(uint64_t), "test_zone_sysctl");
10433 		assert(test_zone);
10434 
10435 #if KASAN_CLASSIC
10436 		if (test_zone_ptr == NULL && test_zone->z_elems_free != 0)
10437 #else
10438 		if (test_zone->z_elems_free != 0)
10439 #endif
10440 		{
10441 			printf("zone_basic_test: free count is not zero\n");
10442 			rc = EIO;
10443 			goto out;
10444 		}
10445 
10446 		if (test_zone_ptr == NULL) {
10447 			/* Stash the zone pointer returned on the fist zinit */
10448 			printf("zone_basic_test: zone created for the first time\n");
10449 			test_zone_ptr = test_zone;
10450 		} else if (test_zone != test_zone_ptr) {
10451 			printf("zone_basic_test: old zone pointer and new zone pointer don't match\n");
10452 			rc = EIO;
10453 			goto out;
10454 		}
10455 
10456 		test_ptr = zalloc_flags(test_zone, Z_WAITOK | Z_NOFAIL);
10457 		zfree(test_zone, test_ptr);
10458 
10459 		zdestroy(test_zone);
10460 		i++;
10461 
10462 		printf("zone_basic_test: Iteration %d successful\n", i);
10463 	} while (i < max_iter);
10464 
10465 #if !KASAN_CLASSIC /* because of the quarantine and redzones */
10466 	/* test Z_VA_SEQUESTER */
10467 	{
10468 		zone_t test_pcpu_zone;
10469 		kern_return_t kr;
10470 		int idx, num_allocs = 8;
10471 		vm_size_t elem_size = 2 * PAGE_SIZE / num_allocs;
10472 		void *allocs[num_allocs];
10473 		void **allocs_pcpu;
10474 		vm_offset_t phys_pages = os_atomic_load(&zone_pages_wired, relaxed);
10475 
10476 		test_zone = zone_create("test_zone_sysctl", elem_size,
10477 		    ZC_DESTRUCTIBLE);
10478 		assert(test_zone);
10479 
10480 		test_pcpu_zone = zone_create("test_zone_sysctl.pcpu", sizeof(uint64_t),
10481 		    ZC_DESTRUCTIBLE | ZC_PERCPU);
10482 		assert(test_pcpu_zone);
10483 
10484 		for (idx = 0; idx < num_allocs; idx++) {
10485 			allocs[idx] = zalloc(test_zone);
10486 			assert(NULL != allocs[idx]);
10487 			printf("alloc[%d] %p\n", idx, allocs[idx]);
10488 		}
10489 		for (idx = 0; idx < num_allocs; idx++) {
10490 			zfree(test_zone, allocs[idx]);
10491 		}
10492 		assert(!zone_pva_is_null(test_zone->z_pageq_empty));
10493 
10494 		kr = kmem_alloc(kernel_map, (vm_address_t *)&allocs_pcpu, PAGE_SIZE,
10495 		    KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
10496 		assert(kr == KERN_SUCCESS);
10497 
10498 		for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10499 			allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10500 			    Z_WAITOK | Z_ZERO);
10501 			assert(NULL != allocs_pcpu[idx]);
10502 		}
10503 		for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10504 			zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10505 		}
10506 		assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10507 
10508 		printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10509 		    vm_page_wire_count, vm_page_free_count,
10510 		    100L * phys_pages / zone_pages_wired_max);
10511 		zone_gc(ZONE_GC_DRAIN);
10512 		printf("vm_page_wire_count %d, vm_page_free_count %d, p to v %ld%%\n",
10513 		    vm_page_wire_count, vm_page_free_count,
10514 		    100L * phys_pages / zone_pages_wired_max);
10515 
10516 		unsigned int allva = 0;
10517 
10518 		zone_foreach(z) {
10519 			zone_lock(z);
10520 			allva += z->z_wired_cur;
10521 			if (zone_pva_is_null(z->z_pageq_va)) {
10522 				zone_unlock(z);
10523 				continue;
10524 			}
10525 			unsigned count = 0;
10526 			uint64_t size;
10527 			zone_pva_t pg = z->z_pageq_va;
10528 			struct zone_page_metadata *page_meta;
10529 			while (pg.packed_address) {
10530 				page_meta = zone_pva_to_meta(pg);
10531 				count += z->z_percpu ? 1 : z->z_chunk_pages;
10532 				if (page_meta->zm_chunk_len == ZM_SECONDARY_PAGE) {
10533 					count -= page_meta->zm_page_index;
10534 				}
10535 				pg = page_meta->zm_page_next;
10536 			}
10537 			size = zone_size_wired(z);
10538 			if (!size) {
10539 				size = 1;
10540 			}
10541 			printf("%s%s: seq %d, res %d, %qd %%\n",
10542 			    zone_heap_name(z), z->z_name, z->z_va_cur - z->z_wired_cur,
10543 			    z->z_wired_cur, zone_size_allocated(z) * 100ULL / size);
10544 			zone_unlock(z);
10545 		}
10546 
10547 		printf("total va: %d\n", allva);
10548 
10549 		assert(zone_pva_is_null(test_zone->z_pageq_empty));
10550 		assert(zone_pva_is_null(test_zone->z_pageq_partial));
10551 		assert(!zone_pva_is_null(test_zone->z_pageq_va));
10552 		assert(zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10553 		assert(zone_pva_is_null(test_pcpu_zone->z_pageq_partial));
10554 		assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_va));
10555 
10556 		for (idx = 0; idx < num_allocs; idx++) {
10557 			assert(0 == pmap_find_phys(kernel_pmap, (addr64_t)(uintptr_t) allocs[idx]));
10558 		}
10559 
10560 		/* make sure the zone is still usable after a GC */
10561 
10562 		for (idx = 0; idx < num_allocs; idx++) {
10563 			allocs[idx] = zalloc(test_zone);
10564 			assert(allocs[idx]);
10565 			printf("alloc[%d] %p\n", idx, allocs[idx]);
10566 		}
10567 		for (idx = 0; idx < num_allocs; idx++) {
10568 			zfree(test_zone, allocs[idx]);
10569 		}
10570 
10571 		for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10572 			allocs_pcpu[idx] = zalloc_percpu(test_pcpu_zone,
10573 			    Z_WAITOK | Z_ZERO);
10574 			assert(NULL != allocs_pcpu[idx]);
10575 		}
10576 		for (idx = 0; idx < PAGE_SIZE / sizeof(uint64_t); idx++) {
10577 			zfree_percpu(test_pcpu_zone, allocs_pcpu[idx]);
10578 		}
10579 
10580 		assert(!zone_pva_is_null(test_pcpu_zone->z_pageq_empty));
10581 
10582 		kmem_free(kernel_map, (vm_address_t)allocs_pcpu, PAGE_SIZE);
10583 
10584 		zdestroy(test_zone);
10585 		zdestroy(test_pcpu_zone);
10586 	}
10587 #endif /* KASAN_CLASSIC */
10588 
10589 	printf("zone_basic_test: Test passed\n");
10590 
10591 
10592 	*out = 1;
10593 out:
10594 	os_atomic_store(&any_zone_test_running, false, relaxed);
10595 	return rc;
10596 }
10597 SYSCTL_TEST_REGISTER(zone_basic_test, zone_basic_test_run);
10598 
10599 struct zone_stress_obj {
10600 	TAILQ_ENTRY(zone_stress_obj) zso_link;
10601 };
10602 
10603 struct zone_stress_ctx {
10604 	thread_t  zsc_leader;
10605 	lck_mtx_t zsc_lock;
10606 	zone_t    zsc_zone;
10607 	uint64_t  zsc_end;
10608 	uint32_t  zsc_workers;
10609 };
10610 
10611 static void
zone_stress_worker(void * arg,wait_result_t __unused wr)10612 zone_stress_worker(void *arg, wait_result_t __unused wr)
10613 {
10614 	struct zone_stress_ctx *ctx = arg;
10615 	bool leader = ctx->zsc_leader == current_thread();
10616 	TAILQ_HEAD(zone_stress_head, zone_stress_obj) head = TAILQ_HEAD_INITIALIZER(head);
10617 	struct zone_bool_gen bg = { };
10618 	struct zone_stress_obj *obj;
10619 	uint32_t allocs = 0;
10620 
10621 	random_bool_init(&bg.zbg_bg);
10622 
10623 	do {
10624 		for (int i = 0; i < 2000; i++) {
10625 			uint32_t what = random_bool_gen_bits(&bg.zbg_bg,
10626 			    bg.zbg_entropy, ZONE_ENTROPY_CNT, 1);
10627 			switch (what) {
10628 			case 0:
10629 			case 1:
10630 				if (allocs < 10000) {
10631 					obj = zalloc(ctx->zsc_zone);
10632 					TAILQ_INSERT_HEAD(&head, obj, zso_link);
10633 					allocs++;
10634 				}
10635 				break;
10636 			case 2:
10637 			case 3:
10638 				if (allocs < 10000) {
10639 					obj = zalloc(ctx->zsc_zone);
10640 					TAILQ_INSERT_TAIL(&head, obj, zso_link);
10641 					allocs++;
10642 				}
10643 				break;
10644 			case 4:
10645 				if (leader) {
10646 					zone_gc(ZONE_GC_DRAIN);
10647 				}
10648 				break;
10649 			case 5:
10650 			case 6:
10651 				if (!TAILQ_EMPTY(&head)) {
10652 					obj = TAILQ_FIRST(&head);
10653 					TAILQ_REMOVE(&head, obj, zso_link);
10654 					zfree(ctx->zsc_zone, obj);
10655 					allocs--;
10656 				}
10657 				break;
10658 			case 7:
10659 				if (!TAILQ_EMPTY(&head)) {
10660 					obj = TAILQ_LAST(&head, zone_stress_head);
10661 					TAILQ_REMOVE(&head, obj, zso_link);
10662 					zfree(ctx->zsc_zone, obj);
10663 					allocs--;
10664 				}
10665 				break;
10666 			}
10667 		}
10668 	} while (mach_absolute_time() < ctx->zsc_end);
10669 
10670 	while (!TAILQ_EMPTY(&head)) {
10671 		obj = TAILQ_FIRST(&head);
10672 		TAILQ_REMOVE(&head, obj, zso_link);
10673 		zfree(ctx->zsc_zone, obj);
10674 	}
10675 
10676 	lck_mtx_lock(&ctx->zsc_lock);
10677 	if (--ctx->zsc_workers == 0) {
10678 		thread_wakeup(ctx);
10679 	} else if (leader) {
10680 		while (ctx->zsc_workers) {
10681 			lck_mtx_sleep(&ctx->zsc_lock, LCK_SLEEP_DEFAULT, ctx,
10682 			    THREAD_UNINT);
10683 		}
10684 	}
10685 	lck_mtx_unlock(&ctx->zsc_lock);
10686 
10687 	if (!leader) {
10688 		thread_terminate_self();
10689 		__builtin_unreachable();
10690 	}
10691 }
10692 
10693 static int
zone_stress_test_run(__unused int64_t in,int64_t * out)10694 zone_stress_test_run(__unused int64_t in, int64_t *out)
10695 {
10696 	struct zone_stress_ctx ctx = {
10697 		.zsc_leader  = current_thread(),
10698 		.zsc_workers = 3,
10699 	};
10700 	kern_return_t kr;
10701 	thread_t th;
10702 
10703 	if (os_atomic_xchg(&any_zone_test_running, true, relaxed)) {
10704 		printf("zone_stress_test: Test already running.\n");
10705 		return EALREADY;
10706 	}
10707 
10708 	lck_mtx_init(&ctx.zsc_lock, &zone_locks_grp, LCK_ATTR_NULL);
10709 	ctx.zsc_zone = zone_create("test_zone_344", 344,
10710 	    ZC_DESTRUCTIBLE | ZC_NOCACHING);
10711 	assert(ctx.zsc_zone->z_chunk_pages > 1);
10712 
10713 	clock_interval_to_deadline(5, NSEC_PER_SEC, &ctx.zsc_end);
10714 
10715 	printf("zone_stress_test: Starting (leader %p)\n", current_thread());
10716 
10717 	os_atomic_inc(&zalloc_simulate_vm_pressure, relaxed);
10718 
10719 	for (uint32_t i = 1; i < ctx.zsc_workers; i++) {
10720 		kr = kernel_thread_start_priority(zone_stress_worker, &ctx,
10721 		    BASEPRI_DEFAULT, &th);
10722 		if (kr == KERN_SUCCESS) {
10723 			printf("zone_stress_test: thread %d: %p\n", i, th);
10724 			thread_deallocate(th);
10725 		} else {
10726 			ctx.zsc_workers--;
10727 		}
10728 	}
10729 
10730 	zone_stress_worker(&ctx, 0);
10731 
10732 	lck_mtx_destroy(&ctx.zsc_lock, &zone_locks_grp);
10733 
10734 	zdestroy(ctx.zsc_zone);
10735 
10736 	printf("zone_stress_test: Done\n");
10737 
10738 	*out = 1;
10739 	os_atomic_dec(&zalloc_simulate_vm_pressure, relaxed);
10740 	os_atomic_store(&any_zone_test_running, false, relaxed);
10741 	return 0;
10742 }
10743 SYSCTL_TEST_REGISTER(zone_stress_test, zone_stress_test_run);
10744 
10745 /*
10746  * Routines to test that zone garbage collection and zone replenish threads
10747  * running at the same time don't cause problems.
10748  */
10749 
10750 static int
zone_gc_replenish_test(__unused int64_t in,int64_t * out)10751 zone_gc_replenish_test(__unused int64_t in, int64_t *out)
10752 {
10753 	zone_gc(ZONE_GC_DRAIN);
10754 	*out = 1;
10755 	return 0;
10756 }
10757 SYSCTL_TEST_REGISTER(zone_gc_replenish_test, zone_gc_replenish_test);
10758 
10759 static int
zone_alloc_replenish_test(__unused int64_t in,int64_t * out)10760 zone_alloc_replenish_test(__unused int64_t in, int64_t *out)
10761 {
10762 	zone_t z = vm_map_entry_zone;
10763 	struct data { struct data *next; } *node, *list = NULL;
10764 
10765 	if (z == NULL) {
10766 		printf("Couldn't find a replenish zone\n");
10767 		return EIO;
10768 	}
10769 
10770 	/* big enough to go past replenishment */
10771 	for (uint32_t i = 0; i < 10 * z->z_elems_rsv; ++i) {
10772 		node = zalloc(z);
10773 		node->next = list;
10774 		list = node;
10775 	}
10776 
10777 	/*
10778 	 * release the memory we allocated
10779 	 */
10780 	while (list != NULL) {
10781 		node = list;
10782 		list = list->next;
10783 		zfree(z, node);
10784 	}
10785 
10786 	*out = 1;
10787 	return 0;
10788 }
10789 SYSCTL_TEST_REGISTER(zone_alloc_replenish_test, zone_alloc_replenish_test);
10790 
10791 #endif /* DEBUG || DEVELOPMENT */
10792