xref: /xnu-8020.121.3/osfmk/kern/kalloc.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2000-2021 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	kern/kalloc.c
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	1985
62  *
63  *	General kernel memory allocator.  This allocator is designed
64  *	to be used by the kernel to manage dynamic memory fast.
65  */
66 
67 #include <mach/boolean.h>
68 #include <mach/sdt.h>
69 #include <mach/machine/vm_types.h>
70 #include <mach/vm_param.h>
71 #include <kern/misc_protos.h>
72 #include <kern/counter.h>
73 #include <kern/zalloc_internal.h>
74 #include <kern/kalloc.h>
75 #include <kern/ledger.h>
76 #include <kern/backtrace.h>
77 #include <vm/vm_kern.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_map_internal.h>
80 #include <sys/kdebug.h>
81 
82 #include <san/kasan.h>
83 #include <libkern/section_keywords.h>
84 #include <libkern/prelink.h>
85 
86 SCALABLE_COUNTER_DEFINE(kalloc_large_count);
87 SCALABLE_COUNTER_DEFINE(kalloc_large_total);
88 
89 #pragma mark initialization
90 
91 /*
92  * All allocations of size less than KHEAP_MAX_SIZE are rounded to the next nearest
93  * sized zone.  This allocator is built on top of the zone allocator.  A zone
94  * is created for each potential size that we are willing to get in small
95  * blocks.
96  *
97  * Allocations of size greater than KHEAP_MAX_SIZE, are allocated from the VM.
98  */
99 
100 /*
101  * The k_zone_cfg table defines the configuration of zones on various platforms.
102  * The currently defined list of zones and their per-CPU caching behavior are as
103  * follows
104  *
105  *     X:zone not present
106  *     N:zone present no cpu-caching
107  *     Y:zone present with cpu-caching
108  *
109  * Size       macOS(64-bit)       embedded(32-bit)    embedded(64-bit)
110  *--------    ----------------    ----------------    ----------------
111  *
112  * 8          X                    Y                   X
113  * 16         Y                    Y                   Y
114  * 24         X                    Y                   X
115  * 32         Y                    Y                   Y
116  * 40         X                    Y                   X
117  * 48         Y                    Y                   Y
118  * 64         Y                    Y                   Y
119  * 72         X                    Y                   X
120  * 80         Y                    X                   Y
121  * 88         X                    Y                   X
122  * 96         Y                    X                   Y
123  * 112        X                    Y                   X
124  * 128        Y                    Y                   Y
125  * 160        Y                    X                   Y
126  * 192        Y                    Y                   Y
127  * 224        Y                    X                   Y
128  * 256        Y                    Y                   Y
129  * 288        Y                    Y                   Y
130  * 368        Y                    X                   Y
131  * 384        X                    Y                   X
132  * 400        Y                    X                   Y
133  * 440        X                    Y                   X
134  * 512        Y                    Y                   Y
135  * 576        Y                    N                   N
136  * 768        Y                    N                   N
137  * 1024       Y                    Y                   Y
138  * 1152       N                    N                   N
139  * 1280       N                    N                   N
140  * 1536       X                    N                   X
141  * 1664       N                    X                   N
142  * 2048       Y                    N                   N
143  * 2128       X                    N                   X
144  * 3072       X                    N                   X
145  * 4096       Y                    N                   N
146  * 6144       N                    N                   N
147  * 8192       Y                    N                   N
148  * 12288      N                    X                   X
149  * 16384      N                    X                   N
150  * 32768      X                    X                   N
151  *
152  */
153 struct kalloc_zone_cfg {
154 	bool kzc_caching;
155 	uint32_t kzc_size;
156 	char kzc_name[MAX_ZONE_NAME];
157 };
158 
159 #define KZC_ENTRY(SIZE, caching) { \
160 	.kzc_caching = (caching), \
161 	.kzc_size = (SIZE), \
162 	.kzc_name = "kalloc." #SIZE \
163 }
164 static SECURITY_READ_ONLY_LATE(struct kalloc_zone_cfg) k_zone_cfg[] = {
165 #if !defined(XNU_TARGET_OS_OSX)
166 
167 #if KALLOC_MINSIZE == 16 && KALLOC_LOG2_MINALIGN == 4
168 	/* Zone config for embedded 64-bit platforms */
169 	KZC_ENTRY(16, true),
170 	KZC_ENTRY(32, true),
171 	KZC_ENTRY(48, true),
172 	KZC_ENTRY(64, true),
173 	KZC_ENTRY(80, true),
174 	KZC_ENTRY(96, true),
175 	KZC_ENTRY(128, true),
176 	KZC_ENTRY(160, true),
177 	KZC_ENTRY(192, true),
178 	KZC_ENTRY(224, true),
179 	KZC_ENTRY(256, true),
180 	KZC_ENTRY(288, true),
181 	KZC_ENTRY(368, true),
182 	KZC_ENTRY(400, true),
183 	KZC_ENTRY(512, true),
184 	KZC_ENTRY(576, false),
185 	KZC_ENTRY(768, false),
186 	KZC_ENTRY(1024, true),
187 	KZC_ENTRY(1152, false),
188 	KZC_ENTRY(1280, false),
189 	KZC_ENTRY(1664, false),
190 	KZC_ENTRY(2048, false),
191 	KZC_ENTRY(4096, false),
192 	KZC_ENTRY(6144, false),
193 	KZC_ENTRY(8192, false),
194 	KZC_ENTRY(16384, false),
195 	KZC_ENTRY(32768, false),
196 
197 #elif KALLOC_MINSIZE == 8 && KALLOC_LOG2_MINALIGN == 3
198 	/* Zone config for embedded 32-bit platforms */
199 	KZC_ENTRY(8, true),
200 	KZC_ENTRY(16, true),
201 	KZC_ENTRY(24, true),
202 	KZC_ENTRY(32, true),
203 	KZC_ENTRY(40, true),
204 	KZC_ENTRY(48, true),
205 	KZC_ENTRY(64, true),
206 	KZC_ENTRY(72, true),
207 	KZC_ENTRY(88, true),
208 	KZC_ENTRY(112, true),
209 	KZC_ENTRY(128, true),
210 	KZC_ENTRY(192, true),
211 	KZC_ENTRY(256, true),
212 	KZC_ENTRY(288, true),
213 	KZC_ENTRY(384, true),
214 	KZC_ENTRY(440, true),
215 	KZC_ENTRY(512, true),
216 	KZC_ENTRY(576, false),
217 	KZC_ENTRY(768, false),
218 	KZC_ENTRY(1024, true),
219 	KZC_ENTRY(1152, false),
220 	KZC_ENTRY(1280, false),
221 	KZC_ENTRY(1536, false),
222 	KZC_ENTRY(2048, false),
223 	KZC_ENTRY(2128, false),
224 	KZC_ENTRY(3072, false),
225 	KZC_ENTRY(4096, false),
226 	KZC_ENTRY(6144, false),
227 	KZC_ENTRY(8192, false),
228 	/* To limit internal fragmentation, only add the following zones if the
229 	 * page size is greater than 4K.
230 	 * Note that we use ARM_PGBYTES here (instead of one of the VM macros)
231 	 * since it's guaranteed to be a compile time constant.
232 	 */
233 #if ARM_PGBYTES > 4096
234 	KZC_ENTRY(16384, false),
235 	KZC_ENTRY(32768, false),
236 #endif /* ARM_PGBYTES > 4096 */
237 
238 #else
239 #error missing or invalid zone size parameters for kalloc
240 #endif
241 
242 #else /* !defined(XNU_TARGET_OS_OSX) */
243 
244 	/* Zone config for macOS 64-bit platforms */
245 	KZC_ENTRY(16, true),
246 	KZC_ENTRY(32, true),
247 	KZC_ENTRY(48, true),
248 	KZC_ENTRY(64, true),
249 	KZC_ENTRY(80, true),
250 	KZC_ENTRY(96, true),
251 	KZC_ENTRY(128, true),
252 	KZC_ENTRY(160, true),
253 	KZC_ENTRY(192, true),
254 	KZC_ENTRY(224, true),
255 	KZC_ENTRY(256, true),
256 	KZC_ENTRY(288, true),
257 	KZC_ENTRY(368, true),
258 	KZC_ENTRY(400, true),
259 	KZC_ENTRY(512, true),
260 	KZC_ENTRY(576, true),
261 	KZC_ENTRY(768, true),
262 	KZC_ENTRY(1024, true),
263 	KZC_ENTRY(1152, false),
264 	KZC_ENTRY(1280, false),
265 	KZC_ENTRY(1664, false),
266 	KZC_ENTRY(2048, true),
267 	KZC_ENTRY(4096, true),
268 	KZC_ENTRY(6144, false),
269 	KZC_ENTRY(8192, true),
270 #if __x86_64__
271 	KZC_ENTRY(12288, false),
272 #endif /* __x86_64__ */
273 	KZC_ENTRY(16384, false),
274 #if __arm64__
275 	KZC_ENTRY(32768, false),
276 #endif
277 #endif /* !defined(XNU_TARGET_OS_OSX) */
278 };
279 
280 
281 static SECURITY_READ_ONLY_LATE(struct kalloc_zone_cfg) k_zone_cfg_data[] = {
282 	KZC_ENTRY(16, true),
283 	KZC_ENTRY(32, true),
284 	KZC_ENTRY(48, true),
285 	KZC_ENTRY(64, true),
286 	KZC_ENTRY(96, true),
287 	KZC_ENTRY(128, true),
288 	KZC_ENTRY(160, true),
289 	KZC_ENTRY(192, true),
290 	KZC_ENTRY(256, true),
291 	KZC_ENTRY(368, true),
292 	KZC_ENTRY(512, true),
293 	KZC_ENTRY(768, false),
294 	KZC_ENTRY(1024, true),
295 	KZC_ENTRY(1152, false),
296 	KZC_ENTRY(1664, false),
297 	KZC_ENTRY(2048, false),
298 	KZC_ENTRY(4096, false),
299 	KZC_ENTRY(6144, false),
300 	KZC_ENTRY(8192, false),
301 	KZC_ENTRY(16384, false),
302 #if __arm64__
303 	KZC_ENTRY(32768, false),
304 #endif
305 };
306 #undef KZC_ENTRY
307 
308 #define MAX_K_ZONE(kzc) (uint32_t)(sizeof(kzc) / sizeof(kzc[0]))
309 
310 /*
311  * Many kalloc() allocations are for small structures containing a few
312  * pointers and longs - the dlut[] direct lookup table, indexed by
313  * size normalized to the minimum alignment, finds the right zone index
314  * for them in one dereference.
315  */
316 
317 #define INDEX_ZDLUT(size)  (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN)
318 #define MAX_SIZE_ZDLUT     ((KALLOC_DLUT_SIZE - 1) * KALLOC_MINALIGN)
319 
320 static SECURITY_READ_ONLY_LATE(zone_t) k_zone_default[MAX_K_ZONE(k_zone_cfg)];
321 static SECURITY_READ_ONLY_LATE(zone_t) k_zone_data[MAX_K_ZONE(k_zone_cfg_data)];
322 
323 #if VM_TAG_SIZECLASSES
324 static_assert(VM_TAG_SIZECLASSES >= MAX_K_ZONE(k_zone_cfg));
325 #endif
326 
327 const char * const kalloc_heap_names[] = {
328 	[KHEAP_ID_NONE]          = "",
329 	[KHEAP_ID_DEFAULT]       = "default.",
330 	[KHEAP_ID_DATA_BUFFERS]  = "data.",
331 	[KHEAP_ID_KT_VAR]        = "",
332 };
333 
334 /*
335  * Default kalloc heap configuration
336  */
337 static SECURITY_READ_ONLY_LATE(struct kheap_zones) kalloc_zones_default = {
338 	.cfg         = k_zone_cfg,
339 	.heap_id     = KHEAP_ID_DEFAULT,
340 	.k_zone      = k_zone_default,
341 	.max_k_zone  = MAX_K_ZONE(k_zone_cfg)
342 };
343 SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_DEFAULT[1] = {
344 	{
345 		.kh_zones    = &kalloc_zones_default,
346 		.kh_name     = "default.",
347 		.kh_heap_id  = KHEAP_ID_DEFAULT,
348 		.kh_tag      = VM_KERN_MEMORY_KALLOC
349 	}
350 };
351 
352 
353 /*
354  * Bag of bytes heap configuration
355  */
356 static SECURITY_READ_ONLY_LATE(struct kheap_zones) kalloc_zones_data = {
357 	.cfg         = k_zone_cfg_data,
358 	.heap_id     = KHEAP_ID_DATA_BUFFERS,
359 	.k_zone      = k_zone_data,
360 	.max_k_zone  = MAX_K_ZONE(k_zone_cfg_data)
361 };
362 SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_DATA_BUFFERS[1] = {
363 	{
364 		.kh_zones    = &kalloc_zones_data,
365 		.kh_name     = "data.",
366 		.kh_heap_id  = KHEAP_ID_DATA_BUFFERS,
367 		.kh_tag      = VM_KERN_MEMORY_KALLOC_DATA,
368 	}
369 };
370 
371 /*
372  * Configuration of variable kalloc type heaps
373  */
374 SECURITY_READ_ONLY_LATE(struct kt_heap_zones)
375 kalloc_type_heap_array[KT_VAR_MAX_HEAPS] = {};
376 SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_KT_VAR[1] = {
377 	{
378 		.kh_name     = "kalloc.type.var",
379 		.kh_heap_id  = KHEAP_ID_KT_VAR,
380 		.kh_tag      = VM_KERN_MEMORY_KALLOC_TYPE
381 	}
382 };
383 
384 /*
385  * Initialize kalloc heap: Create zones, generate direct lookup table and
386  * do a quick test on lookups
387  */
388 __startup_func
389 static void
kalloc_zones_init(struct kalloc_heap * kheap)390 kalloc_zones_init(struct kalloc_heap *kheap)
391 {
392 	struct kheap_zones *zones = kheap->kh_zones;
393 	struct kalloc_zone_cfg *cfg = zones->cfg;
394 	zone_t *k_zone = zones->k_zone;
395 	vm_size_t size;
396 
397 	/*
398 	 * Allocate a zone for each size we are going to handle.
399 	 */
400 	assert(zones->cfg[zones->max_k_zone - 1].kzc_size == KHEAP_MAX_SIZE);
401 
402 	for (uint32_t i = 0; i < zones->max_k_zone &&
403 	    (size = cfg[i].kzc_size) <= KHEAP_MAX_SIZE; i++) {
404 		zone_create_flags_t flags = ZC_KASAN_NOREDZONE |
405 		    ZC_KASAN_NOQUARANTINE | ZC_KALLOC_HEAP | ZC_PGZ_USE_GUARDS;
406 		if (cfg[i].kzc_caching) {
407 			flags |= ZC_CACHING;
408 		}
409 
410 		k_zone[i] = zone_create_ext(cfg[i].kzc_name, size, flags,
411 		    ZONE_ID_ANY, ^(zone_t z){
412 			zone_security_array[zone_index(z)].z_kheap_id = (uint8_t)zones->heap_id;
413 		});
414 		/*
415 		 * Set the updated elem size back to the config
416 		 */
417 		uint32_t elem_size = k_zone[i]->z_elem_size;
418 		if (cfg[i].kzc_size != elem_size) {
419 			cfg[i].kzc_size = elem_size;
420 			snprintf(cfg[i].kzc_name, MAX_ZONE_NAME, "kalloc.%u", elem_size);
421 		}
422 	}
423 
424 	/*
425 	 * Count all the "raw" views for zones in the heap.
426 	 */
427 	zone_view_count += zones->max_k_zone;
428 
429 	/*
430 	 * Build the Direct LookUp Table for small allocations
431 	 * As k_zone_cfg is shared between the heaps the
432 	 * Direct LookUp Table is also shared and doesn't need to
433 	 * be rebuilt per heap.
434 	 */
435 	size = 0;
436 	for (int i = 0; i <= KALLOC_DLUT_SIZE; i++, size += KALLOC_MINALIGN) {
437 		uint8_t zindex = 0;
438 
439 		while ((vm_size_t)(cfg[zindex].kzc_size) < size) {
440 			zindex++;
441 		}
442 
443 		if (i == KALLOC_DLUT_SIZE) {
444 			zones->k_zindex_start = zindex;
445 			break;
446 		}
447 		zones->dlut[i] = zindex;
448 	}
449 }
450 
451 __startup_func
452 static void
kalloc_init(void)453 kalloc_init(void)
454 {
455 	/* Initialize kalloc default heap */
456 	kalloc_zones_init(KHEAP_DEFAULT);
457 	static_assert(KHEAP_MAX_SIZE >= KALLOC_SAFE_ALLOC_SIZE);
458 
459 #if ZSECURITY_CONFIG(SUBMAP_USER_DATA)
460 	/* Initialize kalloc data buffers heap */
461 	kalloc_zones_init(KHEAP_DATA_BUFFERS);
462 #else
463 	*KHEAP_DATA_BUFFERS = *KHEAP_DEFAULT;
464 #endif
465 }
466 STARTUP(ZALLOC, STARTUP_RANK_THIRD, kalloc_init);
467 
468 #define KEXT_ALIGN_SHIFT           6
469 #define KEXT_ALIGN_BYTES           (1<< KEXT_ALIGN_SHIFT)
470 #define KEXT_ALIGN_MASK            (KEXT_ALIGN_BYTES-1)
471 #define kt_scratch_size            (256ul << 10)
472 #define KALLOC_TYPE_SECTION(type) \
473 	(type == KTV_FIXED? "__kalloc_type": "__kalloc_var")
474 
475 /*
476  * Enum to specify the kalloc_type variant being used.
477  */
478 __options_decl(kalloc_type_variant_t, uint16_t, {
479 	KTV_FIXED     = 0x0001,
480 	KTV_VAR       = 0x0002,
481 });
482 
483 /*
484  * Macros that generate the appropriate kalloc_type variant (i.e fixed or
485  * variable) of the desired variable/function.
486  */
487 #define kalloc_type_var(type, var)              \
488 	((type) == KTV_FIXED?                       \
489 	(vm_offset_t) kalloc_type_##var##_fixed:    \
490 	(vm_offset_t) kalloc_type_##var##_var)
491 #define kalloc_type_func(type, func, ...)       \
492 	((type) == KTV_FIXED?                       \
493 	kalloc_type_##func##_fixed(__VA_ARGS__):    \
494 	kalloc_type_##func##_var(__VA_ARGS__))
495 
496 /*
497  * Fields of kalloc_type views that are required to make a redirection
498  * decision i.e VM or data-only
499  */
500 struct kalloc_type_atom {
501 	kalloc_type_flags_t  kt_flags;
502 	vm_size_t            kt_size;
503 	const char          *kt_sig_hdr;
504 	const char          *kt_sig_type;
505 };
506 
507 TUNABLE(kalloc_type_options_t, kt_options, "kt", KT_OPTIONS_LOOSE_FREE);
508 TUNABLE(uint16_t, kt_var_heaps, "kt_var_heaps",
509     ZSECURITY_CONFIG_KT_VAR_BUDGET);
510 /*
511  * Section start/end for fixed kalloc_type views
512  */
513 extern struct kalloc_type_view kalloc_type_sec_start_fixed[]
514 __SECTION_START_SYM(KALLOC_TYPE_SEGMENT, "__kalloc_type");
515 
516 extern struct kalloc_type_view kalloc_type_sec_end_fixed[]
517 __SECTION_END_SYM(KALLOC_TYPE_SEGMENT, "__kalloc_type");
518 
519 /*
520  * Section start/end for variable kalloc_type views
521  */
522 extern struct kalloc_type_var_view kalloc_type_sec_start_var[]
523 __SECTION_START_SYM(KALLOC_TYPE_SEGMENT, "__kalloc_var");
524 
525 extern struct kalloc_type_var_view kalloc_type_sec_end_var[]
526 __SECTION_END_SYM(KALLOC_TYPE_SEGMENT, "__kalloc_var");
527 
528 typedef union kalloc_type_views {
529 	struct kalloc_type_view     *ktv_fixed;
530 	struct kalloc_type_var_view *ktv_var;
531 } kalloc_type_views_t;
532 
533 __startup_data
534 static kalloc_type_views_t *kt_buffer = NULL;
535 __startup_data
536 static uint64_t kt_count;
537 
538 _Static_assert(__builtin_popcount(KT_SUMMARY_MASK_TYPE_BITS) == (KT_GRANULE_MAX + 1),
539     "KT_SUMMARY_MASK_TYPE_BITS doesn't match KT_GRANULE_MAX");
540 
541 #if DEBUG || DEVELOPMENT
542 /*
543  * For use by lldb to iterate over kalloc types
544  */
545 uint64_t num_kt_sizeclass = MAX_K_ZONE(k_zone_cfg);
546 SECURITY_READ_ONLY_LATE(zone_t) kalloc_type_zarray[MAX_K_ZONE(k_zone_cfg)];
547 #endif
548 
549 static_assert(KT_VM_TAG_MASK == Z_VM_TAG_MASK, "validate vm tag mask");
550 static_assert(MAX_K_ZONE(k_zone_cfg) < KALLOC_TYPE_IDX_MASK,
551     "validate idx mask");
552 /* qsort routines */
553 typedef int (*cmpfunc_t)(const void *a, const void *b);
554 extern void qsort(void *a, size_t n, size_t es, cmpfunc_t cmp);
555 
556 static uint32_t
kalloc_idx_for_size(kalloc_heap_t kheap,uint32_t size)557 kalloc_idx_for_size(kalloc_heap_t kheap, uint32_t size)
558 {
559 	struct kheap_zones *khz = kheap->kh_zones;
560 	uint16_t idx;
561 
562 	assert(size <= KHEAP_MAX_SIZE);
563 
564 	if (size < MAX_SIZE_ZDLUT) {
565 		idx = khz->dlut[INDEX_ZDLUT(size)];
566 		return kalloc_type_set_idx(size, idx);
567 	}
568 
569 	idx = khz->k_zindex_start;
570 	while (khz->cfg[idx].kzc_size < size) {
571 		idx++;
572 	}
573 	return kalloc_type_set_idx(size, idx);
574 }
575 
576 static zone_t
kalloc_heap_zone_for_idx(kalloc_heap_t kheap,uint16_t zindex)577 kalloc_heap_zone_for_idx(kalloc_heap_t kheap, uint16_t zindex)
578 {
579 	struct kheap_zones *khz = kheap->kh_zones;
580 	return khz->k_zone[zindex];
581 }
582 
583 static void
kalloc_type_assign_zone_fixed(kalloc_type_view_t * cur,kalloc_type_view_t * end,zone_t z)584 kalloc_type_assign_zone_fixed(kalloc_type_view_t *cur, kalloc_type_view_t *end,
585     zone_t z)
586 {
587 	/*
588 	 * Assign the zone created for every kalloc_type_view
589 	 * of the same unique signature
590 	 */
591 	bool need_raw_view = false;
592 	while (cur < end) {
593 		kalloc_type_view_t kt = *cur;
594 		struct zone_view *zv = &kt->kt_zv;
595 		zv->zv_zone = z;
596 		kalloc_type_flags_t kt_flags = kt->kt_flags;
597 
598 		if (kt_flags & KT_SLID) {
599 			kt->kt_signature -= vm_kernel_slide;
600 			kt->kt_zv.zv_name -= vm_kernel_slide;
601 		}
602 
603 		if ((kt_flags & KT_PRIV_ACCT) ||
604 		    ((kt_options & KT_OPTIONS_ACCT) && (kt_flags & KT_DEFAULT))) {
605 			zv->zv_stats = zalloc_percpu_permanent_type(
606 				struct zone_stats);
607 			need_raw_view = true;
608 			zone_view_count += 1;
609 		} else {
610 			zv->zv_stats = z->z_stats;
611 		}
612 		zv->zv_next = (zone_view_t) z->z_views;
613 		zv->zv_zone->z_views = (zone_view_t) kt;
614 		cur++;
615 	}
616 	if (need_raw_view) {
617 		zone_view_count += 1;
618 	}
619 }
620 
621 __startup_func
622 static void
kalloc_type_assign_zone_var(kalloc_type_var_view_t * cur,kalloc_type_var_view_t * end,uint32_t heap_idx)623 kalloc_type_assign_zone_var(kalloc_type_var_view_t *cur,
624     kalloc_type_var_view_t *end, uint32_t heap_idx)
625 {
626 	struct kt_heap_zones *cfg = &kalloc_type_heap_array[heap_idx];
627 	while (cur < end) {
628 		kalloc_type_var_view_t kt = *cur;
629 		zone_id_t zid = cfg->kh_zstart;
630 		kt->kt_heap_start = zid;
631 		kalloc_type_flags_t kt_flags = kt->kt_flags;
632 
633 		if (kt_flags & KT_SLID) {
634 			if (kt->kt_sig_hdr) {
635 				kt->kt_sig_hdr -= vm_kernel_slide;
636 			}
637 			kt->kt_sig_type -= vm_kernel_slide;
638 			kt->kt_name -= vm_kernel_slide;
639 		}
640 
641 		if ((kt_flags & KT_PRIV_ACCT) ||
642 		    ((kt_options & KT_OPTIONS_ACCT) && (kt_flags & KT_DEFAULT))) {
643 			kt->kt_stats = zalloc_percpu_permanent_type(struct zone_stats);
644 			zone_view_count += 1;
645 		}
646 
647 		kt->kt_next = (zone_view_t) cfg->views;
648 		cfg->views = kt;
649 		cur++;
650 	}
651 }
652 
653 static inline char
kalloc_type_granule_to_char(kt_granule_t granule)654 kalloc_type_granule_to_char(kt_granule_t granule)
655 {
656 	return (char) (granule + '0');
657 }
658 
659 static bool
kalloc_type_sig_check(const char * sig,const kt_granule_t gr)660 kalloc_type_sig_check(const char *sig, const kt_granule_t gr)
661 {
662 	while (*sig == kalloc_type_granule_to_char(gr & KT_GRANULE_PADDING) ||
663 	    *sig == kalloc_type_granule_to_char(gr & KT_GRANULE_POINTER) ||
664 	    *sig == kalloc_type_granule_to_char(gr & KT_GRANULE_DATA) ||
665 	    *sig == kalloc_type_granule_to_char(gr & KT_GRANULE_PAC)) {
666 		sig++;
667 	}
668 	return *sig == '\0';
669 }
670 
671 /*
672  * Check if signature of type is made up of only the specified granules
673  */
674 static bool
kalloc_type_check(struct kalloc_type_atom kt_atom,kalloc_type_flags_t change_flag,kalloc_type_flags_t check_flag,const kt_granule_t check_gr)675 kalloc_type_check(struct kalloc_type_atom kt_atom,
676     kalloc_type_flags_t change_flag, kalloc_type_flags_t check_flag,
677     const kt_granule_t check_gr)
678 {
679 	kalloc_type_flags_t flags = kt_atom.kt_flags;
680 	if (flags & change_flag) {
681 		return flags & check_flag;
682 	} else {
683 		bool kt_hdr_check = kt_atom.kt_sig_hdr?
684 		    kalloc_type_sig_check(kt_atom.kt_sig_hdr, check_gr): true;
685 		bool kt_type_check = kalloc_type_sig_check(kt_atom.kt_sig_type, check_gr);
686 		return kt_hdr_check && kt_type_check;
687 	}
688 }
689 
690 /*
691  * Check if signature of type is made up of only data and padding
692  */
693 static bool
kalloc_type_is_data(struct kalloc_type_atom kt_atom)694 kalloc_type_is_data(struct kalloc_type_atom kt_atom)
695 {
696 	return kalloc_type_check(kt_atom, KT_CHANGED, KT_DATA_ONLY,
697 	           KT_GRANULE_DATA);
698 }
699 
700 /*
701  * Use this version after early boot as sites from kexts that haven't yet
702  * been recompiled with the latest kalloc type defintions, have been fixed
703  * up.
704  */
705 static bool
kalloc_type_is_data_fast(struct kalloc_type_atom kt_atom)706 kalloc_type_is_data_fast(struct kalloc_type_atom kt_atom)
707 {
708 	return kt_atom.kt_flags & KT_DATA_ONLY;
709 }
710 
711 /*
712  * Check if signature of type is made up of only pointers
713  */
714 static bool
kalloc_type_is_ptr_array(struct kalloc_type_atom kt_atom)715 kalloc_type_is_ptr_array(struct kalloc_type_atom kt_atom)
716 {
717 	return kalloc_type_check(kt_atom, KT_CHANGED2, KT_PTR_ARRAY,
718 	           KT_GRANULE_POINTER | KT_GRANULE_PAC);
719 }
720 
721 static bool
kalloc_type_from_vm(struct kalloc_type_atom kt_atom)722 kalloc_type_from_vm(struct kalloc_type_atom kt_atom)
723 {
724 	kalloc_type_flags_t flags = kt_atom.kt_flags;
725 	if (flags & KT_CHANGED) {
726 		return flags & KT_VM;
727 	} else {
728 		return kt_atom.kt_size > KHEAP_MAX_SIZE;
729 	}
730 }
731 
732 __startup_func
733 static inline vm_size_t
kalloc_type_view_sz_fixed(void)734 kalloc_type_view_sz_fixed(void)
735 {
736 	return sizeof(struct kalloc_type_view);
737 }
738 
739 __startup_func
740 static inline vm_size_t
kalloc_type_view_sz_var(void)741 kalloc_type_view_sz_var(void)
742 {
743 	return sizeof(struct kalloc_type_var_view);
744 }
745 
746 __startup_func
747 static inline uint64_t
kalloc_type_view_count(kalloc_type_variant_t type,vm_offset_t start,vm_offset_t end)748 kalloc_type_view_count(kalloc_type_variant_t type, vm_offset_t start,
749     vm_offset_t end)
750 {
751 	return (end - start) / kalloc_type_func(type, view_sz);
752 }
753 
754 static inline struct kalloc_type_atom
kalloc_type_get_atom_fixed(vm_offset_t addr,bool slide)755 kalloc_type_get_atom_fixed(vm_offset_t addr, bool slide)
756 {
757 	struct kalloc_type_atom kt_atom = {};
758 	kalloc_type_view_t ktv = (struct kalloc_type_view *) addr;
759 	kt_atom.kt_flags = ktv->kt_flags;
760 	kt_atom.kt_size = ktv->kt_size;
761 	if (slide) {
762 		ktv->kt_signature += vm_kernel_slide;
763 		ktv->kt_zv.zv_name += vm_kernel_slide;
764 		ktv->kt_flags |= KT_SLID;
765 	}
766 	kt_atom.kt_sig_type = ktv->kt_signature;
767 	return kt_atom;
768 }
769 
770 static inline struct kalloc_type_atom
kalloc_type_get_atom_var(vm_offset_t addr,bool slide)771 kalloc_type_get_atom_var(vm_offset_t addr, bool slide)
772 {
773 	struct kalloc_type_atom kt_atom = {};
774 	kalloc_type_var_view_t ktv = (struct kalloc_type_var_view *) addr;
775 	kt_atom.kt_flags = ktv->kt_flags;
776 	kt_atom.kt_size = ktv->kt_size_hdr + ktv->kt_size_type;
777 	if (slide) {
778 		if (ktv->kt_sig_hdr) {
779 			ktv->kt_sig_hdr += vm_kernel_slide;
780 		}
781 		ktv->kt_sig_type += vm_kernel_slide;
782 		ktv->kt_name += vm_kernel_slide;
783 		ktv->kt_flags |= KT_SLID;
784 	}
785 	kt_atom.kt_sig_hdr = ktv->kt_sig_hdr;
786 	kt_atom.kt_sig_type = ktv->kt_sig_type;
787 	return kt_atom;
788 }
789 
790 __startup_func
791 static inline void
kalloc_type_buffer_copy_fixed(kalloc_type_views_t * buffer,vm_offset_t ktv)792 kalloc_type_buffer_copy_fixed(kalloc_type_views_t *buffer, vm_offset_t ktv)
793 {
794 	buffer->ktv_fixed = (kalloc_type_view_t) ktv;
795 }
796 
797 __startup_func
798 static inline void
kalloc_type_buffer_copy_var(kalloc_type_views_t * buffer,vm_offset_t ktv)799 kalloc_type_buffer_copy_var(kalloc_type_views_t *buffer, vm_offset_t ktv)
800 {
801 	buffer->ktv_var = (kalloc_type_var_view_t) ktv;
802 }
803 
804 __startup_func
805 static void
kalloc_type_handle_data_view_fixed(vm_offset_t addr)806 kalloc_type_handle_data_view_fixed(vm_offset_t addr)
807 {
808 	kalloc_type_view_t cur_data_view = (kalloc_type_view_t) addr;
809 	cur_data_view->kt_size = kalloc_idx_for_size(KHEAP_DATA_BUFFERS,
810 	    cur_data_view->kt_size);
811 	uint16_t kt_idx = kalloc_type_get_idx(cur_data_view->kt_size);
812 	zone_t z = kalloc_heap_zone_for_idx(KHEAP_DATA_BUFFERS, kt_idx);
813 	kalloc_type_assign_zone_fixed(&cur_data_view, &cur_data_view + 1, z);
814 }
815 
816 __startup_func
817 static void
kalloc_type_handle_data_view_var(vm_offset_t addr)818 kalloc_type_handle_data_view_var(vm_offset_t addr)
819 {
820 	kalloc_type_var_view_t ktv = (kalloc_type_var_view_t) addr;
821 	kalloc_type_flags_t kt_flags = ktv->kt_flags;
822 
823 	/*
824 	 * To avoid having to recompute this until rdar://85182551 lands
825 	 * in the build and kexts are rebuilt.
826 	 */
827 	if (!(kt_flags & KT_CHANGED)) {
828 		ktv->kt_flags |= (KT_CHANGED | KT_DATA_ONLY);
829 	}
830 
831 	kalloc_type_assign_zone_var(&ktv, &ktv + 1, KT_VAR_DATA_HEAP);
832 }
833 
834 __startup_func
835 static void
kalloc_type_handle_parray_var(vm_offset_t addr)836 kalloc_type_handle_parray_var(vm_offset_t addr)
837 {
838 	kalloc_type_var_view_t ktv = (kalloc_type_var_view_t) addr;
839 	kalloc_type_assign_zone_var(&ktv, &ktv + 1, KT_VAR_PTR_HEAP);
840 }
841 
842 __startup_func
843 static void
kalloc_type_mark_processed_fixed(vm_offset_t addr)844 kalloc_type_mark_processed_fixed(vm_offset_t addr)
845 {
846 	kalloc_type_view_t ktv = (kalloc_type_view_t) addr;
847 	ktv->kt_flags |= KT_PROCESSED;
848 }
849 
850 __startup_func
851 static void
kalloc_type_mark_processed_var(vm_offset_t addr)852 kalloc_type_mark_processed_var(vm_offset_t addr)
853 {
854 	kalloc_type_var_view_t ktv = (kalloc_type_var_view_t) addr;
855 	ktv->kt_flags |= KT_PROCESSED;
856 }
857 
858 __startup_func
859 static void
kalloc_type_update_view_fixed(vm_offset_t addr)860 kalloc_type_update_view_fixed(vm_offset_t addr)
861 {
862 	kalloc_type_view_t ktv = (kalloc_type_view_t) addr;
863 	ktv->kt_size = kalloc_idx_for_size(KHEAP_DEFAULT, ktv->kt_size);
864 }
865 
866 __startup_func
867 static void
kalloc_type_update_view_var(vm_offset_t addr)868 kalloc_type_update_view_var(vm_offset_t addr)
869 {
870 	(void) addr;
871 }
872 
873 __startup_func
874 static void
kalloc_type_view_copy(const kalloc_type_variant_t type,vm_offset_t start,vm_offset_t end,uint64_t * cur_count,bool slide)875 kalloc_type_view_copy(const kalloc_type_variant_t type, vm_offset_t start,
876     vm_offset_t end, uint64_t *cur_count, bool slide)
877 {
878 	uint64_t count = kalloc_type_view_count(type, start, end);
879 	if (count + *cur_count >= kt_count) {
880 		panic("kalloc_type_view_copy: Insufficient space in scratch buffer");
881 	}
882 	vm_offset_t cur = start;
883 	while (cur < end) {
884 		struct kalloc_type_atom kt_atom = kalloc_type_func(type, get_atom, cur,
885 		    slide);
886 		kalloc_type_func(type, mark_processed, cur);
887 		/*
888 		 * Skip views that go to the VM
889 		 */
890 		if (kalloc_type_from_vm(kt_atom)) {
891 			cur += kalloc_type_func(type, view_sz);
892 			continue;
893 		}
894 
895 		/*
896 		 * If signature indicates that the entire allocation is data move it to
897 		 * KHEAP_DATA_BUFFERS. Note that KT_VAR_DATA_HEAP is a fake "data" heap,
898 		 * variable kalloc_type handles the actual redirection in the entry points
899 		 * kalloc/kfree_type_var_impl.
900 		 */
901 		if (kalloc_type_is_data(kt_atom)) {
902 			kalloc_type_func(type, handle_data_view, cur);
903 			cur += kalloc_type_func(type, view_sz);
904 			continue;
905 		}
906 
907 		/*
908 		 * Redirect variable sized pointer arrays to KT_VAR_PTR_HEAP
909 		 */
910 		if (type == KTV_VAR && kalloc_type_is_ptr_array(kt_atom)) {
911 			kalloc_type_handle_parray_var(cur);
912 			cur += kalloc_type_func(type, view_sz);
913 			continue;
914 		}
915 
916 		kalloc_type_func(type, update_view, cur);
917 		kalloc_type_func(type, buffer_copy, &kt_buffer[*cur_count], cur);
918 		cur += kalloc_type_func(type, view_sz);
919 		*cur_count = *cur_count + 1;
920 	}
921 }
922 
923 __startup_func
924 static uint64_t
kalloc_type_view_parse(const kalloc_type_variant_t type)925 kalloc_type_view_parse(const kalloc_type_variant_t type)
926 {
927 	kc_format_t kc_format;
928 	uint64_t cur_count = 0;
929 
930 	if (!PE_get_primary_kc_format(&kc_format)) {
931 		panic("kalloc_type_view_parse: wasn't able to determine kc format");
932 	}
933 
934 	if (kc_format == KCFormatStatic) {
935 		/*
936 		 * If kc is static or KCGEN, __kalloc_type sections from kexts and
937 		 * xnu are coalesced.
938 		 */
939 		kalloc_type_view_copy(type,
940 		    kalloc_type_var(type, sec_start),
941 		    kalloc_type_var(type, sec_end),
942 		    &cur_count, 0);
943 	} else if (kc_format == KCFormatFileset) {
944 		/*
945 		 * If kc uses filesets, traverse __kalloc_type section for each
946 		 * macho in the BootKC.
947 		 */
948 		kernel_mach_header_t *kc_mh = NULL;
949 		kernel_mach_header_t *kext_mh = NULL;
950 
951 		kc_mh = (kernel_mach_header_t *)PE_get_kc_header(KCKindPrimary);
952 		struct load_command *lc =
953 		    (struct load_command *)((vm_offset_t)kc_mh + sizeof(*kc_mh));
954 		for (uint32_t i = 0; i < kc_mh->ncmds;
955 		    i++, lc = (struct load_command *)((vm_offset_t)lc + lc->cmdsize)) {
956 			if (lc->cmd != LC_FILESET_ENTRY) {
957 				continue;
958 			}
959 			struct fileset_entry_command *fse =
960 			    (struct fileset_entry_command *)(vm_offset_t)lc;
961 			kext_mh = (kernel_mach_header_t *)fse->vmaddr;
962 			kernel_section_t *sect = (kernel_section_t *)getsectbynamefromheader(
963 				kext_mh, KALLOC_TYPE_SEGMENT, KALLOC_TYPE_SECTION(type));
964 			if (sect != NULL) {
965 				kalloc_type_view_copy(type, sect->addr, sect->addr + sect->size,
966 				    &cur_count, false);
967 			}
968 		}
969 	} else if (kc_format == KCFormatKCGEN) {
970 		/*
971 		 * Parse __kalloc_type section from xnu
972 		 */
973 		kalloc_type_view_copy(type,
974 		    kalloc_type_var(type, sec_start),
975 		    kalloc_type_var(type, sec_end), &cur_count, false);
976 
977 #if defined(__LP64__)
978 		/*
979 		 * Parse __kalloc_type section for kexts
980 		 *
981 		 * Note: We don't process the kalloc_type_views for kexts on armv7
982 		 * as this platform has insufficient memory for type based
983 		 * segregation. kalloc_type_impl_external will direct callsites
984 		 * based on their size.
985 		 */
986 		kernel_mach_header_t *xnu_mh = &_mh_execute_header;
987 		vm_offset_t cur = 0;
988 		vm_offset_t end = 0;
989 
990 		/*
991 		 * Kext machos are in the __PRELINK_TEXT segment. Extract the segment
992 		 * and traverse it.
993 		 */
994 		kernel_section_t *prelink_sect = getsectbynamefromheader(
995 			xnu_mh, kPrelinkTextSegment, kPrelinkTextSection);
996 		assert(prelink_sect);
997 		cur = prelink_sect->addr;
998 		end = prelink_sect->addr + prelink_sect->size;
999 
1000 		while (cur < end) {
1001 			uint64_t kext_text_sz = 0;
1002 			kernel_mach_header_t *kext_mh = (kernel_mach_header_t *) cur;
1003 
1004 			if (kext_mh->magic == 0) {
1005 				/*
1006 				 * Assert that we have processed all kexts and all that is left
1007 				 * is padding
1008 				 */
1009 				assert(memcmp_zero_ptr_aligned((void *)kext_mh, end - cur) == 0);
1010 				break;
1011 			} else if (kext_mh->magic != MH_MAGIC_64 &&
1012 			    kext_mh->magic != MH_CIGAM_64) {
1013 				panic("kalloc_type_view_parse: couldn't find kext @ offset:%lx",
1014 				    cur);
1015 			}
1016 
1017 			/*
1018 			 * Kext macho found, iterate through its segments
1019 			 */
1020 			struct load_command *lc =
1021 			    (struct load_command *)(cur + sizeof(kernel_mach_header_t));
1022 			bool isSplitKext = false;
1023 
1024 			for (uint32_t i = 0; i < kext_mh->ncmds && (vm_offset_t)lc < end;
1025 			    i++, lc = (struct load_command *)((vm_offset_t)lc + lc->cmdsize)) {
1026 				if (lc->cmd == LC_SEGMENT_SPLIT_INFO) {
1027 					isSplitKext = true;
1028 					continue;
1029 				} else if (lc->cmd != LC_SEGMENT_64) {
1030 					continue;
1031 				}
1032 
1033 				kernel_segment_command_t *seg_cmd =
1034 				    (struct segment_command_64 *)(vm_offset_t)lc;
1035 				/*
1036 				 * Parse kalloc_type section
1037 				 */
1038 				if (strcmp(seg_cmd->segname, KALLOC_TYPE_SEGMENT) == 0) {
1039 					kernel_section_t *kt_sect = getsectbynamefromseg(seg_cmd,
1040 					    KALLOC_TYPE_SEGMENT, KALLOC_TYPE_SECTION(type));
1041 					if (kt_sect) {
1042 						kalloc_type_view_copy(type, kt_sect->addr + vm_kernel_slide,
1043 						    kt_sect->addr + kt_sect->size + vm_kernel_slide, &cur_count,
1044 						    true);
1045 					}
1046 				}
1047 				/*
1048 				 * If the kext has a __TEXT segment, that is the only thing that
1049 				 * will be in the special __PRELINK_TEXT KC segment, so the next
1050 				 * macho is right after.
1051 				 */
1052 				if (strcmp(seg_cmd->segname, "__TEXT") == 0) {
1053 					kext_text_sz = seg_cmd->filesize;
1054 				}
1055 			}
1056 			/*
1057 			 * If the kext did not have a __TEXT segment (special xnu kexts with
1058 			 * only a __LINKEDIT segment) then the next macho will be after all the
1059 			 * header commands.
1060 			 */
1061 			if (!kext_text_sz) {
1062 				kext_text_sz = kext_mh->sizeofcmds;
1063 			} else if (!isSplitKext) {
1064 				panic("kalloc_type_view_parse: No support for non-split seg KCs");
1065 				break;
1066 			}
1067 
1068 			cur += ((kext_text_sz + (KEXT_ALIGN_BYTES - 1)) & (~KEXT_ALIGN_MASK));
1069 		}
1070 
1071 #endif
1072 	} else {
1073 		/*
1074 		 * When kc_format is KCFormatDynamic or KCFormatUnknown, we don't handle
1075 		 * parsing kalloc_type_view structs during startup.
1076 		 */
1077 		panic("kalloc_type_view_parse: couldn't parse kalloc_type_view structs"
1078 		    " for kc_format = %d\n", kc_format);
1079 	}
1080 	return cur_count;
1081 }
1082 
1083 __startup_func
1084 static int
kalloc_type_cmp_fixed(const void * a,const void * b)1085 kalloc_type_cmp_fixed(const void *a, const void *b)
1086 {
1087 	const kalloc_type_view_t ktA = *(const kalloc_type_view_t *)a;
1088 	const kalloc_type_view_t ktB = *(const kalloc_type_view_t *)b;
1089 
1090 	const uint16_t idxA = kalloc_type_get_idx(ktA->kt_size);
1091 	const uint16_t idxB = kalloc_type_get_idx(ktB->kt_size);
1092 	/*
1093 	 * If the kalloc_type_views are in the same kalloc bucket, sort by
1094 	 * signature else sort by size
1095 	 */
1096 	if (idxA == idxB) {
1097 		int result = strcmp(ktA->kt_signature, ktB->kt_signature);
1098 		/*
1099 		 * If the kalloc_type_views have the same signature sort by site
1100 		 * name
1101 		 */
1102 		if (result == 0) {
1103 			return strcmp(ktA->kt_zv.zv_name, ktB->kt_zv.zv_name);
1104 		}
1105 		return result;
1106 	}
1107 	const uint32_t sizeA = kalloc_type_get_size(ktA->kt_size);
1108 	const uint32_t sizeB = kalloc_type_get_size(ktB->kt_size);
1109 	return (int)(sizeA - sizeB);
1110 }
1111 
1112 __startup_func
1113 static int
kalloc_type_cmp_var(const void * a,const void * b)1114 kalloc_type_cmp_var(const void *a, const void *b)
1115 {
1116 	const kalloc_type_var_view_t ktA = *(const kalloc_type_var_view_t *)a;
1117 	const kalloc_type_var_view_t ktB = *(const kalloc_type_var_view_t *)b;
1118 
1119 	const char *ktA_hdr = ktA->kt_sig_hdr ?: "";
1120 	const char *ktB_hdr = ktB->kt_sig_hdr ?: "";
1121 
1122 	int result = strcmp(ktA->kt_sig_type, ktB->kt_sig_type);
1123 	if (result == 0) {
1124 		return strcmp(ktA_hdr, ktB_hdr);
1125 	}
1126 	return result;
1127 }
1128 
1129 __startup_func
1130 static uint16_t *
kalloc_type_create_iterators_fixed(uint16_t * kt_skip_list_start,uint16_t * kt_freq_list,uint16_t * kt_freq_list_total,uint64_t count)1131 kalloc_type_create_iterators_fixed(uint16_t *kt_skip_list_start,
1132     uint16_t *kt_freq_list, uint16_t *kt_freq_list_total, uint64_t count)
1133 {
1134 	uint16_t *kt_skip_list = kt_skip_list_start;
1135 	/*
1136 	 * cur and prev kalloc size bucket
1137 	 */
1138 	uint16_t p_idx = 0;
1139 	uint16_t c_idx = 0;
1140 
1141 	/*
1142 	 * Init values
1143 	 */
1144 	uint16_t unique_sig = 1;
1145 	uint16_t total_sig = 0;
1146 	kt_skip_list++;
1147 	const char *p_sig = "";
1148 	const char *p_name = "";
1149 
1150 	/*
1151 	 * Walk over each kalloc_type_view
1152 	 */
1153 	for (uint16_t i = 0; i < count; i++) {
1154 		kalloc_type_view_t kt = kt_buffer[i].ktv_fixed;
1155 		c_idx = kalloc_type_get_idx(kt->kt_size);
1156 		/*
1157 		 * When current kalloc_type_view is in a different kalloc size
1158 		 * bucket than the previous, it means we have processed all in
1159 		 * the previous size bucket, so store the accumulated values
1160 		 * and advance the indices.
1161 		 */
1162 		if (c_idx != p_idx) {
1163 			/*
1164 			 * Updates for frequency lists
1165 			 */
1166 			kt_freq_list[p_idx] = unique_sig;
1167 			unique_sig = 1;
1168 			kt_freq_list_total[p_idx] = total_sig;
1169 			total_sig = 1;
1170 			p_idx = c_idx;
1171 
1172 			/*
1173 			 * Updates to signature skip list
1174 			 */
1175 			*kt_skip_list = i;
1176 			kt_skip_list++;
1177 			p_sig = kt->kt_signature;
1178 			continue;
1179 		}
1180 
1181 		/*
1182 		 * When current kalloc_type_views is in the kalloc size bucket as
1183 		 * previous, analyze the siganture to see if it is unique.
1184 		 *
1185 		 * Signatures are collapsible if one is a substring of the next.
1186 		 */
1187 		const char *c_sig = kt->kt_signature;
1188 		if (strncmp(c_sig, p_sig, strlen(p_sig)) != 0) {
1189 			/*
1190 			 * Unique signature detected. Update counts and advance index
1191 			 */
1192 			unique_sig++;
1193 			*kt_skip_list = i;
1194 			kt_skip_list++;
1195 		}
1196 
1197 		/*
1198 		 * Check if current kalloc_type_view corresponds to a new type
1199 		 */
1200 		const char *c_name = kt->kt_zv.zv_name;
1201 		if (strlen(p_name) != strlen(c_name) || strcmp(p_name, c_name) != 0) {
1202 			total_sig++;
1203 		}
1204 		p_name = c_name;
1205 		p_sig = c_sig;
1206 	}
1207 	/*
1208 	 * Final update
1209 	 */
1210 	assert(c_idx == p_idx);
1211 	assert(kt_freq_list[c_idx] == 0);
1212 	kt_freq_list[c_idx] = unique_sig;
1213 	kt_freq_list_total[c_idx] = (uint16_t) total_sig;
1214 	*kt_skip_list = (uint16_t) count;
1215 	return ++kt_skip_list;
1216 }
1217 
1218 #if ZSECURITY_CONFIG(KALLOC_TYPE)
1219 __startup_func
1220 static uint32_t
kalloc_type_create_iterators_var(uint32_t * kt_skip_list_start)1221 kalloc_type_create_iterators_var(uint32_t *kt_skip_list_start)
1222 {
1223 	uint32_t *kt_skip_list = kt_skip_list_start;
1224 	uint32_t n = 0;
1225 	kt_skip_list[n] = 0;
1226 	assert(kt_count > 1);
1227 	for (uint32_t i = 1; i < kt_count; i++) {
1228 		kalloc_type_var_view_t ktA = kt_buffer[i - 1].ktv_var;
1229 		kalloc_type_var_view_t ktB = kt_buffer[i].ktv_var;
1230 		const char *ktA_hdr = ktA->kt_sig_hdr ?: "";
1231 		const char *ktB_hdr = ktB->kt_sig_hdr ?: "";
1232 		if (strcmp(ktA_hdr, ktB_hdr) != 0 ||
1233 		    strcmp(ktA->kt_sig_type, ktB->kt_sig_type) != 0) {
1234 			n++;
1235 			kt_skip_list[n] = i;
1236 		}
1237 	}
1238 	/*
1239 	 * Final update
1240 	 */
1241 	n++;
1242 	kt_skip_list[n] = (uint32_t) kt_count;
1243 	return n;
1244 }
1245 
1246 __startup_func
1247 static uint16_t
kalloc_type_apply_policy(uint16_t * kt_freq_list,uint16_t * kt_zones,uint16_t zone_budget)1248 kalloc_type_apply_policy(uint16_t *kt_freq_list, uint16_t *kt_zones,
1249     uint16_t zone_budget)
1250 {
1251 	uint16_t total_sig = 0;
1252 	uint16_t min_sig = 0;
1253 	uint16_t assigned_zones = 0;
1254 	uint16_t remaining_zones = zone_budget;
1255 	uint16_t min_zones_per_size = 2;
1256 
1257 #if DEBUG || DEVELOPMENT
1258 	if (startup_phase < STARTUP_SUB_LOCKDOWN) {
1259 		uint16_t current_zones = os_atomic_load(&num_zones, relaxed);
1260 		assert(zone_budget + current_zones <= MAX_ZONES);
1261 	}
1262 #endif
1263 
1264 	for (uint16_t i = 0; i < MAX_K_ZONE(k_zone_cfg); i++) {
1265 		uint16_t sig_freq = kt_freq_list[i];
1266 		uint16_t min_zones = min_zones_per_size;
1267 		if (sig_freq < min_zones_per_size) {
1268 			min_zones = sig_freq;
1269 		}
1270 		total_sig += sig_freq;
1271 		kt_zones[i] = min_zones;
1272 		min_sig += min_zones;
1273 	}
1274 	if (remaining_zones > total_sig) {
1275 		remaining_zones = total_sig;
1276 	}
1277 	assert(remaining_zones >= min_sig);
1278 	remaining_zones -= min_sig;
1279 	total_sig -= min_sig;
1280 	assigned_zones += min_sig;
1281 	uint16_t modulo = 0;
1282 	for (uint16_t i = 0; i < MAX_K_ZONE(k_zone_cfg); i++) {
1283 		uint16_t freq = kt_freq_list[i];
1284 		if (freq < min_zones_per_size) {
1285 			continue;
1286 		}
1287 		uint32_t numer = (freq - min_zones_per_size) * remaining_zones;
1288 		uint16_t n_zones = (uint16_t) numer / total_sig;
1289 
1290 		/*
1291 		 * Accumulate remainder and increment n_zones when it goes above
1292 		 * denominator
1293 		 */
1294 		modulo += numer % total_sig;
1295 		if (modulo >= total_sig) {
1296 			n_zones++;
1297 			modulo -= total_sig;
1298 		}
1299 
1300 		/*
1301 		 * Cap the total number of zones to the unique signatures
1302 		 */
1303 		if ((n_zones + min_zones_per_size) > freq) {
1304 			uint16_t extra_zones = n_zones + min_zones_per_size - freq;
1305 			modulo += (extra_zones * total_sig);
1306 			n_zones -= extra_zones;
1307 		}
1308 		kt_zones[i] += n_zones;
1309 		assigned_zones += n_zones;
1310 	}
1311 
1312 	if (kt_options & KT_OPTIONS_DEBUG) {
1313 		printf("kalloc_type_apply_policy: assigned %u zones wasted %u zones\n",
1314 		    assigned_zones, remaining_zones + min_sig - assigned_zones);
1315 	}
1316 	return remaining_zones + min_sig - assigned_zones;
1317 }
1318 
1319 __startup_func
1320 static void
kalloc_type_create_zone_for_size(zone_t * kt_zones_for_size,uint16_t kt_zones,vm_size_t z_size)1321 kalloc_type_create_zone_for_size(zone_t *kt_zones_for_size,
1322     uint16_t kt_zones, vm_size_t z_size)
1323 {
1324 	zone_t p_zone = NULL;
1325 
1326 	for (uint16_t i = 0; i < kt_zones; i++) {
1327 		char *z_name = zalloc_permanent(MAX_ZONE_NAME, ZALIGN_NONE);
1328 		snprintf(z_name, MAX_ZONE_NAME, "kalloc.type%u.%zu", i,
1329 		    (size_t) z_size);
1330 		zone_t z = zone_create(z_name, z_size, ZC_KALLOC_TYPE);
1331 #if DEBUG || DEVELOPMENT
1332 		if (i != 0) {
1333 			p_zone->z_kt_next = z;
1334 		}
1335 #endif
1336 		p_zone = z;
1337 		kt_zones_for_size[i] = z;
1338 	}
1339 }
1340 #endif /* ZSECURITY_CONFIG(KALLOC_TYPE) */
1341 
1342 __startup_func
1343 static void
kalloc_type_create_zones_fixed(uint16_t * kt_skip_list_start,uint16_t * kt_freq_list,uint16_t * kt_freq_list_total,uint16_t * kt_shuffle_buf)1344 kalloc_type_create_zones_fixed(uint16_t *kt_skip_list_start,
1345     uint16_t *kt_freq_list, uint16_t *kt_freq_list_total,
1346     uint16_t *kt_shuffle_buf)
1347 {
1348 	uint16_t *kt_skip_list = kt_skip_list_start;
1349 	uint16_t p_j = 0;
1350 
1351 	uint16_t kt_zones[MAX_K_ZONE(k_zone_cfg)] = {};
1352 
1353 #if DEBUG || DEVELOPMENT
1354 	uint64_t kt_shuffle_count = ((vm_address_t) kt_shuffle_buf -
1355 	    (vm_address_t) kt_buffer) / sizeof(uint16_t);
1356 #endif
1357 	/*
1358 	 * Apply policy to determine how many zones to create for each size
1359 	 * class.
1360 	 */
1361 #if ZSECURITY_CONFIG(KALLOC_TYPE)
1362 	kalloc_type_apply_policy(kt_freq_list, kt_zones,
1363 	    ZSECURITY_CONFIG_KT_BUDGET);
1364 	/*
1365 	 * Print stats when KT_OPTIONS_DEBUG boot-arg present
1366 	 */
1367 	if (kt_options & KT_OPTIONS_DEBUG) {
1368 		printf("Size\ttotal_sig\tunique_signatures\tzones\n");
1369 		for (uint32_t i = 0; i < MAX_K_ZONE(k_zone_cfg); i++) {
1370 			printf("%u\t%u\t%u\t%u\n", k_zone_cfg[i].kzc_size,
1371 			    kt_freq_list_total[i], kt_freq_list[i], kt_zones[i]);
1372 		}
1373 	}
1374 #else /* ZSECURITY_CONFIG(KALLOC_TYPE) */
1375 #pragma unused(kt_freq_list_total)
1376 #endif /* !ZSECURITY_CONFIG(KALLOC_TYPE) */
1377 
1378 	for (uint16_t i = 0; i < MAX_K_ZONE(k_zone_cfg); i++) {
1379 		uint16_t n_unique_sig = kt_freq_list[i];
1380 		vm_size_t z_size = k_zone_cfg[i].kzc_size;
1381 		uint16_t n_zones = kt_zones[i];
1382 
1383 		if (n_unique_sig == 0) {
1384 			continue;
1385 		}
1386 
1387 		assert(n_zones <= 20);
1388 		zone_t kt_zones_for_size[20] = {};
1389 #if ZSECURITY_CONFIG(KALLOC_TYPE)
1390 		kalloc_type_create_zone_for_size(kt_zones_for_size,
1391 		    n_zones, z_size);
1392 #else /* ZSECURITY_CONFIG(KALLOC_TYPE) */
1393 		/*
1394 		 * Default to using KHEAP_DEFAULT if this feature is off
1395 		 */
1396 		n_zones = 1;
1397 		kt_zones_for_size[0] = kalloc_heap_zone_for_size(
1398 			KHEAP_DEFAULT, z_size);
1399 #endif /* !ZSECURITY_CONFIG(KALLOC_TYPE) */
1400 
1401 #if DEBUG || DEVELOPMENT
1402 		kalloc_type_zarray[i] = kt_zones_for_size[0];
1403 		/*
1404 		 * Ensure that there is enough space to shuffle n_unique_sig
1405 		 * indices
1406 		 */
1407 		assert(n_unique_sig < kt_shuffle_count);
1408 #endif
1409 
1410 		/*
1411 		 * Get a shuffled set of signature indices
1412 		 */
1413 		*kt_shuffle_buf = 0;
1414 		if (n_unique_sig > 1) {
1415 			kmem_shuffle(kt_shuffle_buf, n_unique_sig);
1416 		}
1417 
1418 		for (uint16_t j = 0; j < n_unique_sig; j++) {
1419 			/*
1420 			 * For every size that has unique types
1421 			 */
1422 			uint16_t shuffle_idx = kt_shuffle_buf[j];
1423 			uint16_t cur = kt_skip_list[shuffle_idx + p_j];
1424 			uint16_t end = kt_skip_list[shuffle_idx + p_j + 1];
1425 			zone_t zone = kt_zones_for_size[j % n_zones];
1426 			kalloc_type_assign_zone_fixed(&kt_buffer[cur].ktv_fixed,
1427 			    &kt_buffer[end].ktv_fixed, zone);
1428 		}
1429 		p_j += n_unique_sig;
1430 	}
1431 }
1432 
1433 #if ZSECURITY_CONFIG(KALLOC_TYPE)
1434 __startup_func
1435 static void
kalloc_type_create_zones_var(void)1436 kalloc_type_create_zones_var(void)
1437 {
1438 	size_t kheap_zsize[KHEAP_NUM_ZONES] = {};
1439 	size_t step = KHEAP_STEP_START;
1440 	uint32_t start = 0;
1441 	/*
1442 	 * Manually initialize extra initial zones
1443 	 */
1444 #if !__LP64__
1445 	kheap_zsize[start] = 8;
1446 	start++;
1447 #endif
1448 	kheap_zsize[start] = 16;
1449 	kheap_zsize[start + 1] = KHEAP_START_SIZE;
1450 
1451 	/*
1452 	 * Compute sizes for remaining zones
1453 	 */
1454 	for (uint32_t i = 0; i < KHEAP_NUM_STEPS; i++) {
1455 		uint32_t step_idx = (i * 2) + KHEAP_EXTRA_ZONES;
1456 		kheap_zsize[step_idx] = kheap_zsize[step_idx - 1] + step;
1457 		kheap_zsize[step_idx + 1] = kheap_zsize[step_idx] + step;
1458 		step *= 2;
1459 	}
1460 
1461 	/*
1462 	 * Create zones
1463 	 */
1464 	assert(kt_var_heaps + 1 <= KT_VAR_MAX_HEAPS);
1465 	for (uint32_t i = KT_VAR_PTR_HEAP; i < kt_var_heaps + 1; i++) {
1466 		for (uint32_t j = 0; j < KHEAP_NUM_ZONES; j++) {
1467 			char *z_name = zalloc_permanent(MAX_ZONE_NAME, ZALIGN_NONE);
1468 			snprintf(z_name, MAX_ZONE_NAME, "%s%u.%zu", KHEAP_KT_VAR->kh_name, i,
1469 			    kheap_zsize[j]);
1470 			zone_create_flags_t flags = ZC_KASAN_NOREDZONE |
1471 			    ZC_KASAN_NOQUARANTINE | ZC_KALLOC_TYPE |
1472 			    ZC_PGZ_USE_GUARDS;
1473 
1474 			zone_t z_ptr = zone_create_ext(z_name, kheap_zsize[j], flags,
1475 			    ZONE_ID_ANY, ^(zone_t z){
1476 				zone_security_array[zone_index(z)].z_kheap_id = KHEAP_ID_KT_VAR;
1477 			});
1478 			if (j == 0) {
1479 				kalloc_type_heap_array[i].kh_zstart = zone_index(z_ptr);
1480 			}
1481 		}
1482 	}
1483 
1484 	/*
1485 	 * All variable kalloc type allocations are collapsed into a single
1486 	 * stat. Individual accounting can be requested via KT_PRIV_ACCT
1487 	 */
1488 	KHEAP_KT_VAR->kh_stats = zalloc_percpu_permanent_type(struct zone_stats);
1489 	zone_view_count += 1;
1490 }
1491 #endif /* !ZSECURITY_CONFIG(KALLOC_TYPE) */
1492 
1493 
1494 __startup_func
1495 static void
kalloc_type_view_init_fixed(void)1496 kalloc_type_view_init_fixed(void)
1497 {
1498 	/*
1499 	 * Parse __kalloc_type sections and build array of pointers to
1500 	 * all kalloc type views in kt_buffer.
1501 	 */
1502 	kt_count = kalloc_type_view_parse(KTV_FIXED);
1503 	assert(kt_count < KALLOC_TYPE_SIZE_MASK);
1504 
1505 #if DEBUG || DEVELOPMENT
1506 	vm_size_t sig_slist_size = (size_t) kt_count * sizeof(uint16_t);
1507 	vm_size_t kt_buffer_size = (size_t) kt_count * sizeof(kalloc_type_view_t);
1508 	assert(kt_scratch_size >= kt_buffer_size + sig_slist_size);
1509 #endif
1510 
1511 	/*
1512 	 * Sort based on size class and signature
1513 	 */
1514 	qsort(kt_buffer, (size_t) kt_count, sizeof(kalloc_type_view_t),
1515 	    kalloc_type_cmp_fixed);
1516 
1517 	/*
1518 	 * Build a skip list that holds starts of unique signatures and a
1519 	 * frequency list of number of unique and total signatures per kalloc
1520 	 * size class
1521 	 */
1522 	uint16_t *kt_skip_list_start = (uint16_t *)(kt_buffer + kt_count);
1523 	uint16_t kt_freq_list[MAX_K_ZONE(k_zone_cfg)] = { 0 };
1524 	uint16_t kt_freq_list_total[MAX_K_ZONE(k_zone_cfg)] = { 0 };
1525 	uint16_t *kt_shuffle_buf = kalloc_type_create_iterators_fixed(
1526 		kt_skip_list_start, kt_freq_list, kt_freq_list_total, kt_count);
1527 
1528 	/*
1529 	 * Create zones based on signatures
1530 	 */
1531 	kalloc_type_create_zones_fixed(kt_skip_list_start, kt_freq_list,
1532 	    kt_freq_list_total, kt_shuffle_buf);
1533 }
1534 
1535 #if ZSECURITY_CONFIG(KALLOC_TYPE)
1536 __startup_func
1537 static void
kalloc_type_view_init_var(void)1538 kalloc_type_view_init_var(void)
1539 {
1540 	/*
1541 	 * Zones are created prior to parsing the views as zone budget is fixed
1542 	 * per sizeclass and special types identified while parsing are redirected
1543 	 * as they are discovered.
1544 	 */
1545 	kalloc_type_create_zones_var();
1546 
1547 	/*
1548 	 * Parse __kalloc_var sections and build array of pointers to views that
1549 	 * aren't rediected in kt_buffer.
1550 	 */
1551 	kt_count = kalloc_type_view_parse(KTV_VAR);
1552 	assert(kt_count < UINT32_MAX);
1553 
1554 #if DEBUG || DEVELOPMENT
1555 	vm_size_t sig_slist_size = (size_t) kt_count * sizeof(uint32_t);
1556 	vm_size_t kt_buffer_size = (size_t) kt_count * sizeof(kalloc_type_views_t);
1557 	assert(kt_scratch_size >= kt_buffer_size + sig_slist_size);
1558 #endif
1559 
1560 	/*
1561 	 * Sort based on size class and signature
1562 	 */
1563 	qsort(kt_buffer, (size_t) kt_count, sizeof(kalloc_type_var_view_t),
1564 	    kalloc_type_cmp_var);
1565 
1566 	/*
1567 	 * Build a skip list that holds starts of unique signatures
1568 	 */
1569 	uint32_t *kt_skip_list_start = (uint32_t *)(kt_buffer + kt_count);
1570 	uint32_t unique_sig = kalloc_type_create_iterators_var(kt_skip_list_start);
1571 	uint16_t fixed_heaps = KT_VAR__FIRST_FLEXIBLE_HEAP;
1572 	/*
1573 	 * If we have only one heap then other elements share heap with pointer
1574 	 * arrays
1575 	 */
1576 	if (kt_var_heaps < KT_VAR__FIRST_FLEXIBLE_HEAP) {
1577 		fixed_heaps = KT_VAR_PTR_HEAP;
1578 	}
1579 
1580 	for (uint32_t i = 1; i <= unique_sig; i++) {
1581 		uint32_t heap_id = kmem_get_random16(kt_var_heaps - fixed_heaps) +
1582 		    fixed_heaps;
1583 		uint32_t start = kt_skip_list_start[i - 1];
1584 		uint32_t end = kt_skip_list_start[i];
1585 		kalloc_type_assign_zone_var(&kt_buffer[start].ktv_var,
1586 		    &kt_buffer[end].ktv_var, heap_id);
1587 	}
1588 }
1589 #else /* ZSECURITY_CONFIG(KALLOC_TYPE) */
1590 __startup_func
1591 static void
kalloc_type_view_init_var(void)1592 kalloc_type_view_init_var(void)
1593 {
1594 	*KHEAP_KT_VAR = *KHEAP_DEFAULT;
1595 }
1596 #endif /* !ZSECURITY_CONFIG(KALLOC_TYPE) */
1597 
1598 __startup_func
1599 static void
kalloc_type_views_init(void)1600 kalloc_type_views_init(void)
1601 {
1602 	/*
1603 	 * Allocate scratch space to parse kalloc_type_views and create
1604 	 * other structures necessary to process them.
1605 	 */
1606 	uint64_t max_count = kt_count = kt_scratch_size / sizeof(kalloc_type_views_t);
1607 
1608 	kmem_alloc(kernel_map, (vm_offset_t *)&kt_buffer, kt_scratch_size,
1609 	    KMA_NOFAIL | KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_KALLOC);
1610 
1611 	/*
1612 	 * Handle fixed size views
1613 	 */
1614 	kalloc_type_view_init_fixed();
1615 
1616 	/*
1617 	 * Reset
1618 	 */
1619 	bzero(kt_buffer, kt_scratch_size);
1620 	kt_count = max_count;
1621 
1622 	/*
1623 	 * Handle variable size views
1624 	 */
1625 	kalloc_type_view_init_var();
1626 
1627 	/*
1628 	 * Free resources used
1629 	 */
1630 	kmem_free(kernel_map, (vm_offset_t) kt_buffer, kt_scratch_size);
1631 }
1632 STARTUP(ZALLOC, STARTUP_RANK_FOURTH, kalloc_type_views_init);
1633 
1634 #pragma mark accessors
1635 
1636 #define KFREE_ABSURD_SIZE \
1637 	((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_AND_KEXT_ADDRESS) / 2)
1638 
1639 /*
1640  * This is used to sanitize size for kasan or OOB adjustments.
1641  * It assumes that size is [0, KFREE_ABSURD_SIZE) valued.
1642  */
1643 __header_always_inline vm_size_t
K_SANE_SIZE(vm_size_t size)1644 K_SANE_SIZE(vm_size_t size)
1645 {
1646 	return MAX(size, 1);
1647 }
1648 
1649 static void
KALLOC_ZINFO_SALLOC(vm_size_t bytes)1650 KALLOC_ZINFO_SALLOC(vm_size_t bytes)
1651 {
1652 	thread_t thr = current_thread();
1653 	ledger_debit_thread(thr, thr->t_ledger, task_ledgers.tkm_shared, bytes);
1654 }
1655 
1656 static void
KALLOC_ZINFO_SFREE(vm_size_t bytes)1657 KALLOC_ZINFO_SFREE(vm_size_t bytes)
1658 {
1659 	thread_t thr = current_thread();
1660 	ledger_credit_thread(thr, thr->t_ledger, task_ledgers.tkm_shared, bytes);
1661 }
1662 
1663 zone_t
kalloc_heap_zone_for_size(kalloc_heap_t kheap,vm_size_t size)1664 kalloc_heap_zone_for_size(kalloc_heap_t kheap, vm_size_t size)
1665 {
1666 	struct kheap_zones *khz = kheap->kh_zones;
1667 	zone_t z = ZONE_NULL;
1668 
1669 	if (size < MAX_SIZE_ZDLUT) {
1670 		z = khz->k_zone[khz->dlut[INDEX_ZDLUT(size)]];
1671 		__builtin_assume(z != ZONE_NULL);
1672 	} else if (size <= KHEAP_MAX_SIZE) {
1673 		uint32_t zindex = khz->k_zindex_start;
1674 		while (khz->cfg[zindex].kzc_size < size) {
1675 			zindex++;
1676 		}
1677 		z = khz->k_zone[zindex];
1678 		__builtin_assume(z != ZONE_NULL);
1679 	}
1680 
1681 	return z;
1682 }
1683 
1684 static zone_t
kalloc_type_zone_for_size(kalloc_type_var_view_t kt_view,size_t size)1685 kalloc_type_zone_for_size(kalloc_type_var_view_t kt_view, size_t size)
1686 {
1687 	uint32_t zid = kt_view->kt_heap_start, idx;
1688 
1689 	if (size <= KHEAP_START_SIZE) {
1690 #if !__LP64__
1691 		zid  += (size > 8);
1692 #endif
1693 		zid  += (size > 16);
1694 	} else if (size <= KHEAP_MAX_SIZE) {
1695 		/*
1696 		 * . log2down(size - 1) is log2up(size) - 1
1697 		 * . (size - 1) >> (log2down(size - 1) - 1) is either 0x2 or 0x3
1698 		 */
1699 		size -= 1;
1700 		idx   = kmem_log2down((uint32_t)size);
1701 		zid  += KHEAP_EXTRA_ZONES +
1702 		    2 * (idx - KHEAP_START_IDX) +
1703 		    ((uint32_t)size >> (idx - 1)) - 2;
1704 	} else {
1705 		return ZONE_NULL;
1706 	}
1707 
1708 	return zone_array + zid;
1709 }
1710 
1711 static zone_t
kalloc_zone_for_size(kalloc_heap_t kheap,kalloc_type_var_view_t kt_view,vm_size_t size)1712 kalloc_zone_for_size(kalloc_heap_t kheap, kalloc_type_var_view_t kt_view, vm_size_t size)
1713 {
1714 	if (kt_view && kheap->kh_heap_id == KHEAP_ID_KT_VAR) {
1715 		return kalloc_type_zone_for_size(kt_view, size);
1716 	}
1717 
1718 	return kalloc_heap_zone_for_size(kheap, size);
1719 }
1720 
1721 static vm_map_entry_t
vm_map_lookup_kalloc_entry_locked(vm_map_t map,void * ptr)1722 vm_map_lookup_kalloc_entry_locked(vm_map_t map, void *ptr)
1723 {
1724 	vm_map_entry_t vm_entry = NULL;
1725 	vm_offset_t addr = (vm_offset_t)ptr;
1726 
1727 #if CONFIG_KERNEL_TBI
1728 	addr = VM_KERNEL_TBI_FILL(addr);
1729 #endif /* CONFIG_KERNEL_TBI */
1730 
1731 	if (!vm_map_lookup_entry(map, addr, &vm_entry)) {
1732 		panic("address %p not allocated via kalloc, map %p",
1733 		    ptr, map);
1734 	}
1735 	if (vm_entry->vme_start != addr) {
1736 		panic("address %p inside vm entry %p [%p:%p), map %p",
1737 		    ptr, vm_entry, (void *)vm_entry->vme_start,
1738 		    (void *)vm_entry->vme_end, map);
1739 	}
1740 	if (!vm_entry->vme_atomic) {
1741 		panic("address %p not managed by kalloc (entry %p, map %p)",
1742 		    ptr, vm_entry, map);
1743 	}
1744 	return vm_entry;
1745 }
1746 
1747 
1748 #pragma mark kalloc
1749 
1750 static inline kalloc_heap_t
kalloc_type_get_heap(kalloc_type_var_view_t kt_view,bool kt_free __unused)1751 kalloc_type_get_heap(kalloc_type_var_view_t kt_view, bool kt_free __unused)
1752 {
1753 	kalloc_heap_t fallback = KHEAP_DEFAULT;
1754 
1755 	/*
1756 	 * Redirect data-only views
1757 	 */
1758 	if (kalloc_type_is_data_fast(kalloc_type_func(KTV_VAR, get_atom,
1759 	    (vm_offset_t) kt_view, false))) {
1760 		return KHEAP_DATA_BUFFERS;
1761 	}
1762 
1763 	if (kt_view->kt_flags & KT_PROCESSED) {
1764 		return KHEAP_KT_VAR;
1765 	}
1766 
1767 	/*
1768 	 * Views from kexts not in BootKC on macOS
1769 	 */
1770 #if !ZSECURITY_CONFIG(STRICT_IOKIT_FREE)
1771 	if (kt_free) {
1772 		fallback = KHEAP_ANY;
1773 	}
1774 #endif
1775 
1776 	return fallback;
1777 }
1778 
1779 __attribute__((noinline))
1780 static struct kalloc_result
kalloc_large(kalloc_heap_t kheap,vm_size_t req_size,zalloc_flags_t flags,void * site __unused)1781 kalloc_large(
1782 	kalloc_heap_t         kheap,
1783 	vm_size_t             req_size,
1784 	zalloc_flags_t        flags,
1785 	void                 *site __unused)
1786 {
1787 	int kma_flags = KMA_ATOMIC;
1788 	vm_tag_t tag;
1789 	vm_offset_t addr, size;
1790 
1791 	if (flags & Z_NOFAIL) {
1792 		panic("trying to kalloc(Z_NOFAIL) with a large size (%zd)",
1793 		    (size_t)req_size);
1794 	}
1795 
1796 	/*
1797 	 * kmem_alloc could block so we return if noblock
1798 	 *
1799 	 * also, reject sizes larger than our address space is quickly,
1800 	 * as kt_size or IOMallocArraySize() expect this.
1801 	 */
1802 	if ((flags & Z_NOWAIT) ||
1803 	    (req_size >> VM_KERNEL_POINTER_SIGNIFICANT_BITS)) {
1804 		return (struct kalloc_result){ };
1805 	}
1806 
1807 #ifndef __x86_64__
1808 	/*
1809 	 * (73465472) on Intel we didn't use to pass this flag,
1810 	 * which in turned allowed kalloc_large() memory to be shared
1811 	 * with user directly.
1812 	 *
1813 	 * We're bound by this unfortunate ABI.
1814 	 */
1815 	kma_flags |= KMA_KOBJECT;
1816 #endif
1817 	if (flags & Z_NOPAGEWAIT) {
1818 		kma_flags |= KMA_NOPAGEWAIT;
1819 	}
1820 	if (flags & Z_ZERO) {
1821 		kma_flags |= KMA_ZERO;
1822 	}
1823 	if (kheap == KHEAP_DATA_BUFFERS) {
1824 		kma_flags |= KMA_DATA;
1825 	}
1826 
1827 #if KASAN_KALLOC
1828 	/* large allocation - use guard pages instead of small redzones */
1829 	size = round_page(kasan_alloc_resize(req_size) + ptoa(2));
1830 #else
1831 	size = round_page(req_size);
1832 #endif
1833 
1834 	tag = zalloc_flags_get_tag(flags);
1835 	if (flags & Z_VM_TAG_BT_BIT) {
1836 		tag = vm_tag_bt() ?: tag;
1837 	}
1838 	if (tag == VM_KERN_MEMORY_NONE) {
1839 		tag = kheap->kh_tag;
1840 	}
1841 
1842 	if (kernel_memory_allocate(kernel_map, &addr, size, 0, kma_flags, tag) !=
1843 	    KERN_SUCCESS) {
1844 		addr = 0;
1845 	} else {
1846 		counter_inc(&kalloc_large_count);
1847 		counter_add(&kalloc_large_total, size);
1848 		KALLOC_ZINFO_SALLOC(size);
1849 	}
1850 
1851 #if KASAN_KALLOC
1852 	/* fixup the return address to skip the redzone */
1853 	addr = kasan_alloc(addr, size, req_size, PAGE_SIZE);
1854 #else
1855 	if (flags & Z_FULLSIZE) {
1856 		req_size = size;
1857 	}
1858 #endif
1859 
1860 	DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, req_size, void*, addr);
1861 	return (struct kalloc_result){ .addr = (void *)addr, .size = req_size };
1862 }
1863 
1864 static inline struct kalloc_result
kalloc_zone(zone_t z,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t req_size)1865 kalloc_zone(
1866 	zone_t                  z,
1867 	zone_stats_t            zstats,
1868 	zalloc_flags_t          flags,
1869 	vm_size_t               req_size)
1870 {
1871 	struct kalloc_result kr;
1872 	vm_size_t esize;
1873 
1874 	esize   = zone_elem_size(z);
1875 	kr.addr = zalloc_ext(z, zstats ?: z->z_stats, flags | Z_SKIP_KASAN, esize);
1876 	kr.size = req_size;
1877 
1878 	if (__probable(kr.addr)) {
1879 #if !KASAN_KALLOC
1880 		if (flags & Z_FULLSIZE) {
1881 			kr.size = req_size = esize;
1882 		}
1883 #endif /* !KASAN_KALLOC */
1884 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1885 		kr.addr = zone_element_pgz_oob_adjust(kr, esize);
1886 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1887 #if KASAN_KALLOC
1888 		kr.addr = (void *)kasan_alloc((vm_offset_t)kr.addr, esize,
1889 		    kr.size, KASAN_GUARD_SIZE);
1890 		__nosan_bzero((char *)kr.addr, kr.size);
1891 #endif /* KASAN_KALLOC */
1892 #if CONFIG_KERNEL_TBI && KASAN_TBI
1893 		/*
1894 		 * Kasan-TBI at least needs to tag one byte so that
1895 		 * we can prove the allocation was live at kfree_ext()
1896 		 * time by doing a manual __asan_loadN check.
1897 		 */
1898 		kr.addr = (void *)kasan_tbi_tag_zalloc((vm_offset_t)kr.addr,
1899 		    esize, K_SANE_SIZE(kr.size), false);
1900 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
1901 	}
1902 
1903 	DTRACE_VM3(kalloc, vm_size_t, kr.size, vm_size_t, req_size, void*, kr.addr);
1904 	return kr;
1905 }
1906 
1907 struct kalloc_result
kalloc_ext(void * kheap_or_kt_view,vm_size_t req_size,zalloc_flags_t flags,void * site)1908 kalloc_ext(
1909 	void                   *kheap_or_kt_view,
1910 	vm_size_t               req_size,
1911 	zalloc_flags_t          flags,
1912 	void                   *site)
1913 {
1914 	kalloc_type_var_view_t kt_view;
1915 	kalloc_heap_t kheap;
1916 	zone_stats_t zstats;
1917 	vm_size_t size;
1918 	zone_t z;
1919 
1920 	if (kt_is_var_view(kheap_or_kt_view)) {
1921 		kt_view = kt_demangle_var_view(kheap_or_kt_view);
1922 		kheap   = kalloc_type_get_heap(kt_view, false);
1923 		zstats  = kt_view->kt_stats;
1924 	} else {
1925 		kt_view = NULL;
1926 		kheap   = kheap_or_kt_view;
1927 		zstats  = kheap->kh_stats;
1928 	}
1929 
1930 #if KASAN_KALLOC
1931 	/*
1932 	 * Kasan for kalloc heaps will put the redzones *inside*
1933 	 * the allocation, and hence augment its size.
1934 	 *
1935 	 * kalloc heaps do not use zone_t::z_kasan_redzone.
1936 	 */
1937 	size = kasan_alloc_resize(req_size);
1938 #else
1939 	size = req_size;
1940 #endif
1941 
1942 	z = kalloc_zone_for_size(kheap, kt_view, size);
1943 	if (z) {
1944 		return kalloc_zone(z, zstats, flags, req_size);
1945 	} else {
1946 		return kalloc_large(kheap, req_size, flags, site);
1947 	}
1948 }
1949 
1950 void *
1951 kalloc_external(vm_size_t size);
1952 void *
kalloc_external(vm_size_t size)1953 kalloc_external(vm_size_t size)
1954 {
1955 	zalloc_flags_t flags = Z_VM_TAG_BT(Z_WAITOK, VM_KERN_MEMORY_KALLOC);
1956 	return kheap_alloc(KHEAP_DEFAULT, size, flags);
1957 }
1958 
1959 void *
1960 kalloc_data_external(vm_size_t size, zalloc_flags_t flags);
1961 void *
kalloc_data_external(vm_size_t size,zalloc_flags_t flags)1962 kalloc_data_external(vm_size_t size, zalloc_flags_t flags)
1963 {
1964 	flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC_DATA);
1965 	return kheap_alloc(KHEAP_DATA_BUFFERS, size, flags);
1966 }
1967 
1968 #if ZSECURITY_CONFIG(SUBMAP_USER_DATA)
1969 
1970 __abortlike
1971 static void
kalloc_data_require_panic(void * addr,vm_size_t size)1972 kalloc_data_require_panic(void *addr, vm_size_t size)
1973 {
1974 	zone_id_t zid = zone_id_for_element(addr, size);
1975 
1976 	if (zid != ZONE_ID_INVALID) {
1977 		zone_t z = &zone_array[zid];
1978 		zone_security_flags_t zsflags = zone_security_array[zid];
1979 
1980 		if (zsflags.z_kheap_id != KHEAP_ID_DATA_BUFFERS) {
1981 			panic("kalloc_data_require failed: address %p in [%s%s]",
1982 			    addr, zone_heap_name(z), zone_name(z));
1983 		}
1984 
1985 		panic("kalloc_data_require failed: address %p in [%s%s], "
1986 		    "size too large %zd > %zd", addr,
1987 		    zone_heap_name(z), zone_name(z),
1988 		    (size_t)size, (size_t)zone_elem_size(z));
1989 	} else {
1990 		panic("kalloc_data_require failed: address %p not in zone native map",
1991 		    addr);
1992 	}
1993 }
1994 
1995 __abortlike
1996 static void
kalloc_non_data_require_panic(void * addr,vm_size_t size)1997 kalloc_non_data_require_panic(void *addr, vm_size_t size)
1998 {
1999 	zone_id_t zid = zone_id_for_element(addr, size);
2000 
2001 	if (zid != ZONE_ID_INVALID) {
2002 		zone_t z = &zone_array[zid];
2003 		zone_security_flags_t zsflags = zone_security_array[zid];
2004 
2005 		switch (zsflags.z_kheap_id) {
2006 		case KHEAP_ID_NONE:
2007 		case KHEAP_ID_DATA_BUFFERS:
2008 		case KHEAP_ID_KT_VAR:
2009 			panic("kalloc_non_data_require failed: address %p in [%s%s]",
2010 			    addr, zone_heap_name(z), zone_name(z));
2011 		default:
2012 			break;
2013 		}
2014 
2015 		panic("kalloc_non_data_require failed: address %p in [%s%s], "
2016 		    "size too large %zd > %zd", addr,
2017 		    zone_heap_name(z), zone_name(z),
2018 		    (size_t)size, (size_t)zone_elem_size(z));
2019 	} else {
2020 		panic("kalloc_non_data_require failed: address %p not in zone native map",
2021 		    addr);
2022 	}
2023 }
2024 
2025 #endif /* ZSECURITY_CONFIG(SUBMAP_USER_DATA) */
2026 
2027 void
kalloc_data_require(void * addr,vm_size_t size)2028 kalloc_data_require(void *addr, vm_size_t size)
2029 {
2030 #if ZSECURITY_CONFIG(SUBMAP_USER_DATA)
2031 	zone_id_t zid = zone_id_for_element(addr, size);
2032 
2033 	if (zid != ZONE_ID_INVALID) {
2034 		zone_t z = &zone_array[zid];
2035 		zone_security_flags_t zsflags = zone_security_array[zid];
2036 		if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS &&
2037 		    size <= zone_elem_size(z)) {
2038 			return;
2039 		}
2040 	} else if (kmem_range_id_contains(KMEM_RANGE_ID_DATA,
2041 	    (vm_address_t)pgz_decode(addr, size), size)) {
2042 		return;
2043 	}
2044 
2045 	kalloc_data_require_panic(addr, size);
2046 #else
2047 #pragma unused(addr, size)
2048 #endif
2049 }
2050 
2051 void
kalloc_non_data_require(void * addr,vm_size_t size)2052 kalloc_non_data_require(void *addr, vm_size_t size)
2053 {
2054 #if ZSECURITY_CONFIG(SUBMAP_USER_DATA)
2055 	zone_id_t zid = zone_id_for_element(addr, size);
2056 
2057 	if (zid != ZONE_ID_INVALID) {
2058 		zone_t z = &zone_array[zid];
2059 		zone_security_flags_t zsflags = zone_security_array[zid];
2060 		switch (zsflags.z_kheap_id) {
2061 		case KHEAP_ID_NONE:
2062 			if (!zsflags.z_kalloc_type) {
2063 				break;
2064 			}
2065 			OS_FALLTHROUGH;
2066 		case KHEAP_ID_DEFAULT:
2067 		case KHEAP_ID_KT_VAR:
2068 			if (size < zone_elem_size(z)) {
2069 				return;
2070 			}
2071 			break;
2072 		default:
2073 			break;
2074 		}
2075 	} else if (!kmem_range_id_contains(KMEM_RANGE_ID_DATA,
2076 	    (vm_address_t)pgz_decode(addr, size), size)) {
2077 		return;
2078 	}
2079 
2080 	kalloc_non_data_require_panic(addr, size);
2081 #else
2082 #pragma unused(addr, size)
2083 #endif
2084 }
2085 
2086 void *
kalloc_type_impl_external(kalloc_type_view_t kt_view,zalloc_flags_t flags)2087 kalloc_type_impl_external(kalloc_type_view_t kt_view, zalloc_flags_t flags)
2088 {
2089 	/*
2090 	 * Callsites from a kext that aren't in the BootKC on macOS or
2091 	 * any callsites on armv7 are not processed during startup,
2092 	 * default to using kheap_alloc
2093 	 *
2094 	 * Additionally when size is greater KHEAP_MAX_SIZE zone is left
2095 	 * NULL as we need to use the vm for the allocation
2096 	 *
2097 	 */
2098 	if (__improbable(kt_view->kt_zv.zv_zone == ZONE_NULL)) {
2099 		vm_size_t size = kalloc_type_get_size(kt_view->kt_size);
2100 		flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC);
2101 		return kalloc_ext(KHEAP_DEFAULT, size, flags, NULL).addr;
2102 	}
2103 
2104 	flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC);
2105 	return zalloc_flags(kt_view, flags);
2106 }
2107 
2108 void *
2109 kalloc_type_var_impl_external(
2110 	kalloc_type_var_view_t  kt_view,
2111 	vm_size_t               size,
2112 	zalloc_flags_t          flags,
2113 	void                   *site);
2114 void *
kalloc_type_var_impl_external(kalloc_type_var_view_t kt_view,vm_size_t size,zalloc_flags_t flags,void * site)2115 kalloc_type_var_impl_external(
2116 	kalloc_type_var_view_t  kt_view,
2117 	vm_size_t               size,
2118 	zalloc_flags_t          flags,
2119 	void                   *site)
2120 {
2121 	flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC);
2122 	return kalloc_type_var_impl(kt_view, size, flags, site);
2123 }
2124 
2125 #pragma mark kfree
2126 
2127 __abortlike
2128 static void
kfree_heap_confusion_panic(kalloc_heap_t kheap,void * data,size_t size,zone_t z)2129 kfree_heap_confusion_panic(kalloc_heap_t kheap, void *data, size_t size, zone_t z)
2130 {
2131 	zone_security_flags_t zsflags = zone_security_config(z);
2132 	const char *kheap_name = "";
2133 
2134 	if (kheap == KHEAP_ANY) {
2135 		kheap_name = "KHEAP_ANY (default/kalloc type var/data)";
2136 	} else {
2137 		kheap_name = kalloc_heap_names[kheap->kh_heap_id];
2138 	}
2139 
2140 	if (zsflags.z_kalloc_type) {
2141 		panic_include_kalloc_types = true;
2142 		kalloc_type_src_zone = z;
2143 		panic("kfree: addr %p found in kalloc type zone '%s'"
2144 		    "but being freed to %s heap", data, z->z_name, kheap_name);
2145 	}
2146 
2147 	if (zsflags.z_kheap_id == KHEAP_ID_NONE) {
2148 		panic("kfree: addr %p, size %zd found in regular zone '%s%s'",
2149 		    data, size, zone_heap_name(z), z->z_name);
2150 	} else {
2151 		panic("kfree: addr %p, size %zd found in heap %s* instead of %s*",
2152 		    data, size, zone_heap_name(z), kheap_name);
2153 	}
2154 }
2155 
2156 __abortlike
2157 static void
kfree_size_confusion_panic(zone_t z,void * data,size_t oob_offs,size_t size,size_t zsize)2158 kfree_size_confusion_panic(zone_t z, void *data,
2159     size_t oob_offs, size_t size, size_t zsize)
2160 {
2161 	if (z) {
2162 		panic("kfree: addr %p, size %zd (offs:%zd) found in zone '%s%s' "
2163 		    "with elem_size %zd",
2164 		    data, size, oob_offs, zone_heap_name(z), z->z_name, zsize);
2165 	} else {
2166 		panic("kfree: addr %p, size %zd (offs:%zd) not found in any zone",
2167 		    data, size, oob_offs);
2168 	}
2169 }
2170 
2171 __abortlike
2172 static void
kfree_size_invalid_panic(void * data,size_t size)2173 kfree_size_invalid_panic(void *data, size_t size)
2174 {
2175 	panic("kfree: addr %p trying to free with nonsensical size %zd",
2176 	    data, size);
2177 }
2178 
2179 __abortlike
2180 static void
krealloc_size_invalid_panic(void * data,size_t size)2181 krealloc_size_invalid_panic(void *data, size_t size)
2182 {
2183 	panic("krealloc: addr %p trying to free with nonsensical size %zd",
2184 	    data, size);
2185 }
2186 
2187 __abortlike
2188 static void
kfree_size_require_panic(void * data,size_t size,size_t min_size,size_t max_size)2189 kfree_size_require_panic(void *data, size_t size, size_t min_size,
2190     size_t max_size)
2191 {
2192 	panic("kfree: addr %p has size %zd, not in specified bounds [%zd - %zd]",
2193 	    data, size, min_size, max_size);
2194 }
2195 
2196 static void
kfree_size_require(kalloc_heap_t kheap,void * addr,vm_size_t min_size,vm_size_t max_size)2197 kfree_size_require(
2198 	kalloc_heap_t kheap,
2199 	void *addr,
2200 	vm_size_t min_size,
2201 	vm_size_t max_size)
2202 {
2203 	assert3u(min_size, <=, max_size);
2204 #if KASAN_KALLOC
2205 	max_size = kasan_alloc_resize(max_size);
2206 #endif
2207 	zone_t max_zone = kalloc_heap_zone_for_size(kheap, max_size);
2208 	vm_size_t max_zone_size = max_zone->z_elem_size;
2209 	vm_size_t elem_size = zone_element_size(addr, NULL, false, NULL);
2210 	if (elem_size > max_zone_size || elem_size < min_size) {
2211 		kfree_size_require_panic(addr, elem_size, min_size, max_zone_size);
2212 	}
2213 }
2214 
2215 __attribute__((noinline))
2216 static void
kfree_large(kalloc_heap_t kheap __unused,void * addr,vm_size_t size)2217 kfree_large(kalloc_heap_t kheap __unused, void *addr, vm_size_t size)
2218 {
2219 	vm_map_entry_t entry;
2220 	vm_map_t map = kernel_map;
2221 
2222 	vm_map_lock(map);
2223 	entry = vm_map_lookup_kalloc_entry_locked(map, addr);
2224 
2225 	if (size != 0 &&
2226 	    round_page(size) != entry->vme_end - entry->vme_start) {
2227 		panic("address %p has size %lld not %lld, map %p", addr,
2228 		    (uint64_t)(entry->vme_end - entry->vme_start),
2229 		    (uint64_t)round_page(size), map);
2230 	}
2231 
2232 	size = entry->vme_end - entry->vme_start;
2233 
2234 	vm_map_remove_and_unlock(map, entry->vme_start, entry->vme_end,
2235 	    VM_MAP_REMOVE_KUNWIRE);
2236 
2237 	counter_dec(&kalloc_large_count);
2238 	counter_add(&kalloc_large_total, -(uint64_t)size);
2239 	KALLOC_ZINFO_SFREE(size);
2240 	DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, size, void*, addr);
2241 }
2242 
2243 static void
kfree_zone(void * kheap_or_kt_view __unsafe_indexable,void * data,vm_size_t size,zone_t z,vm_size_t zsize)2244 kfree_zone(
2245 	void                   *kheap_or_kt_view __unsafe_indexable,
2246 	void                   *data,
2247 	vm_size_t               size,
2248 	zone_t                  z,
2249 	vm_size_t               zsize)
2250 {
2251 	zone_security_flags_t zsflags = zone_security_config(z);
2252 	kalloc_type_var_view_t kt_view;
2253 	kalloc_heap_t kheap;
2254 	zone_stats_t zstats;
2255 
2256 	if (kt_is_var_view(kheap_or_kt_view)) {
2257 		kt_view = kt_demangle_var_view(kheap_or_kt_view);
2258 		kheap   = kalloc_type_get_heap(kt_view, true);
2259 		zstats  = kt_view->kt_stats;
2260 	} else {
2261 		kt_view = NULL;
2262 		kheap   = kheap_or_kt_view;
2263 		zstats  = kheap ? kheap->kh_stats : NULL;
2264 	}
2265 
2266 	zsflags = zone_security_config(z);
2267 	if (kheap != KHEAP_ANY && kheap != KHEAP_KT_VAR) {
2268 		if (kheap->kh_heap_id != zsflags.z_kheap_id) {
2269 			kfree_heap_confusion_panic(kheap, data, size, z);
2270 		}
2271 	} else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
2272 		if (zstats == NULL) {
2273 			zstats = KHEAP_KT_VAR->kh_stats;
2274 		}
2275 	} else if (zsflags.z_kheap_id != KHEAP_ID_DEFAULT &&
2276 	    (kt_options & KT_OPTIONS_LOOSE_FREE &&
2277 	    zsflags.z_kheap_id != KHEAP_ID_DATA_BUFFERS)) {
2278 		kfree_heap_confusion_panic(kheap, data, size, z);
2279 	}
2280 
2281 	DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, zsize, void*, data);
2282 	__nosan_bzero(data, zsize);
2283 	zfree_ext(z, zstats ?: z->z_stats, data, zsize);
2284 }
2285 
2286 void
kfree_ext(void * kheap_or_kt_view,void * data,vm_size_t size)2287 kfree_ext(
2288 	void                   *kheap_or_kt_view,
2289 	void                   *data,
2290 	vm_size_t               size)
2291 {
2292 	vm_size_t zsize = 0;
2293 	zone_t z;
2294 
2295 	if (data == NULL) {
2296 		return;
2297 	}
2298 
2299 	if (size > KFREE_ABSURD_SIZE) {
2300 		kfree_size_invalid_panic(data, size);
2301 	}
2302 
2303 #if KASAN_KALLOC
2304 	/*
2305 	 * Resize back to the real allocation size and hand off to the KASan
2306 	 * quarantine. `data` may then point to a different allocation.
2307 	 */
2308 	vm_size_t user_size = size;
2309 	kasan_check_free((vm_address_t)data, size, KASAN_HEAP_KALLOC);
2310 	data = (void *)kasan_dealloc((vm_address_t)data, &size);
2311 	kasan_free(&data, &size, KASAN_HEAP_KALLOC, NULL, user_size, true);
2312 	if (!data) {
2313 		return;
2314 	}
2315 #endif /* KASAN_KALLOC */
2316 #if CONFIG_KERNEL_TBI && KASAN_TBI
2317 	__asan_loadN((vm_offset_t)data, K_SANE_SIZE(size));
2318 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
2319 
2320 	if (size <= KHEAP_MAX_SIZE) {
2321 		vm_offset_t oob_offs;
2322 
2323 		zsize = zone_element_size(data, &z, true, &oob_offs);
2324 		if (size + oob_offs > zsize || zsize == 0) {
2325 			kfree_size_confusion_panic(z, data,
2326 			    oob_offs, size, zsize);
2327 		}
2328 		kfree_zone(kheap_or_kt_view, (char *)data - oob_offs,
2329 		    size, z, zsize);
2330 	} else {
2331 		kfree_large(kheap_or_kt_view, data, size);
2332 	}
2333 }
2334 
2335 void
kfree_addr_ext(kalloc_heap_t kheap,void * data)2336 kfree_addr_ext(kalloc_heap_t kheap, void *data)
2337 {
2338 	if (data == NULL) {
2339 		return;
2340 	}
2341 
2342 #if KASAN_KALLOC
2343 	kfree_ext(kheap, data, kasan_user_size((vm_offset_t)data));
2344 #else
2345 	vm_offset_t oob_offs;
2346 	vm_size_t size;
2347 	zone_t z;
2348 
2349 #if CONFIG_KERNEL_TBI && KASAN_TBI
2350 	__asan_loadN((vm_offset_t)data, KALLOC_MINSIZE);
2351 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
2352 
2353 	size = zone_element_size(data, &z, true, &oob_offs);
2354 	if (size) {
2355 		data = (char *)data - oob_offs;
2356 		kfree_zone(kheap, data, size - oob_offs, z, size);
2357 	} else {
2358 		kfree_large(kheap, data, 0);
2359 	}
2360 #endif /* !KASAN_KALLOC */
2361 }
2362 
2363 void
2364 kfree_external(void *addr, vm_size_t size);
2365 void
kfree_external(void * addr,vm_size_t size)2366 kfree_external(void *addr, vm_size_t size)
2367 {
2368 	kfree_ext(KHEAP_ANY, addr, size);
2369 }
2370 
2371 void
2372 (kheap_free_bounded)(kalloc_heap_t kheap, void *addr,
2373     vm_size_t min_sz, vm_size_t max_sz)
2374 {
2375 	if (__improbable(addr == NULL)) {
2376 		return;
2377 	}
2378 	kfree_size_require(kheap, addr, min_sz, max_sz);
2379 	kfree_addr_ext(kheap, addr);
2380 }
2381 
2382 struct kalloc_result
krealloc_ext(void * kheap_or_kt_view,void * addr,vm_size_t old_size,vm_size_t new_size,zalloc_flags_t flags,void * site)2383 krealloc_ext(
2384 	void                   *kheap_or_kt_view,
2385 	void                   *addr,
2386 	vm_size_t               old_size,
2387 	vm_size_t               new_size,
2388 	zalloc_flags_t          flags,
2389 	void                   *site)
2390 {
2391 	vm_size_t old_bucket_size, new_bucket_size, min_size;
2392 	vm_size_t adj_new_size, adj_old_size;
2393 	kalloc_type_var_view_t kt_view;
2394 	kalloc_heap_t kheap;
2395 	zone_stats_t zstats;
2396 	struct kalloc_result kr;
2397 	vm_offset_t oob_offs = 0;
2398 	zone_t z;
2399 
2400 	if (__improbable(new_size == 0)) {
2401 		if (addr) {
2402 			kfree_ext(kheap_or_kt_view, addr, old_size);
2403 		}
2404 		return (struct kalloc_result){ };
2405 	}
2406 
2407 	if (__improbable(addr == NULL)) {
2408 		return kalloc_ext(kheap_or_kt_view, new_size, flags, site);
2409 	}
2410 
2411 	if (old_size > KFREE_ABSURD_SIZE) {
2412 		krealloc_size_invalid_panic(addr, old_size);
2413 	}
2414 
2415 #if KASAN_KALLOC
2416 	/*
2417 	 * Adjust sizes to account for kasan redzones
2418 	 */
2419 	adj_new_size = kasan_alloc_resize(new_size);
2420 	adj_old_size = kasan_alloc_resize(old_size);
2421 #else
2422 	adj_old_size = old_size;
2423 	adj_new_size = new_size;
2424 #endif /* KASAN_KALLOC */
2425 
2426 	if (kt_is_var_view(kheap_or_kt_view)) {
2427 		kt_view = kt_demangle_var_view(kheap_or_kt_view);
2428 		kheap   = kalloc_type_get_heap(kt_view, false);
2429 		zstats  = kt_view->kt_stats;
2430 	} else {
2431 		kt_view = NULL;
2432 		kheap   = kheap_or_kt_view;
2433 		zstats  = kheap ? kheap->kh_stats : NULL;
2434 	}
2435 
2436 	/*
2437 	 * Find out the size of the bucket in which the new sized allocation
2438 	 * would land. If it matches the bucket of the original allocation,
2439 	 * simply return the same address.
2440 	 */
2441 	z = kalloc_zone_for_size(kheap, kt_view, adj_new_size);
2442 	new_bucket_size = z ? zone_elem_size(z) : round_page(adj_new_size);
2443 
2444 	if (adj_old_size <= KHEAP_MAX_SIZE) {
2445 		old_bucket_size = zone_element_size(addr, NULL, true, &oob_offs);
2446 		if (old_size + oob_offs > old_bucket_size || old_bucket_size == 0) {
2447 			kfree_size_confusion_panic(z, addr,
2448 			    oob_offs, old_size, old_bucket_size);
2449 		}
2450 	} else {
2451 		old_bucket_size = round_page(adj_old_size);
2452 	}
2453 	min_size = MIN(old_size, new_size);
2454 
2455 	if (old_bucket_size == new_bucket_size) {
2456 		kr.addr = (char *)addr - oob_offs;
2457 		kr.size = new_size;
2458 #if !KASAN_KALLOC
2459 		if (flags & Z_FULLSIZE) {
2460 			kr.size = new_bucket_size;
2461 		}
2462 #endif /* !KASAN_KALLOC */
2463 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
2464 		if (z) {
2465 			kr.addr = zone_element_pgz_oob_adjust(kr, new_bucket_size);
2466 			if (kr.addr != addr) {
2467 				memmove(kr.addr, addr, min_size);
2468 				bzero((char *)kr.addr + min_size,
2469 				    kr.size - min_size);
2470 			}
2471 		}
2472 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
2473 #if KASAN_KALLOC
2474 		/*
2475 		 * Adjust right redzone in the element and poison it correctly
2476 		 */
2477 		kasan_check_free((vm_address_t)addr, old_size, KASAN_HEAP_KALLOC);
2478 		kr.addr = (void *)kasan_realloc((vm_offset_t)addr,
2479 		    new_bucket_size + ptoa(z ? 0 : 2), kr.size,
2480 		    z ? KASAN_GUARD_SIZE : PAGE_SIZE);
2481 #endif /* KASAN_KALLOC */
2482 #if CONFIG_KERNEL_TBI && KASAN_TBI
2483 		/*
2484 		 * Validate the current buffer, then generate a new tag,
2485 		 * even if the address is stable, it's a "new" allocation.
2486 		 */
2487 		__asan_loadN((vm_offset_t)addr, K_SANE_SIZE(adj_old_size));
2488 		kr.addr = (void *)VM_KERNEL_TBI_FILL((vm_offset_t)kr.addr);
2489 		kr.addr = (void *)kasan_tbi_tag_zalloc((vm_offset_t)kr.addr,
2490 		    kr.size, new_bucket_size, false);
2491 #endif /* CONFIG_KERNEL_TBI && KASAN_TBI */
2492 	} else {
2493 		if (z) {
2494 			kr = kalloc_zone(z, zstats, flags, new_size);
2495 		} else {
2496 			kr = kalloc_large(kheap, new_size, flags, site);
2497 		}
2498 		if (kr.addr != NULL) {
2499 			__nosan_memcpy(kr.addr, addr, min_size);
2500 		}
2501 		if (kr.addr != NULL || (flags & Z_REALLOCF)) {
2502 			kfree_ext(kheap_or_kt_view, (char *)addr - oob_offs, old_size);
2503 		}
2504 	}
2505 	return kr;
2506 }
2507 
2508 void
kfree_type_impl_external(kalloc_type_view_t kt_view,void * ptr)2509 kfree_type_impl_external(kalloc_type_view_t kt_view, void *ptr)
2510 {
2511 	/*
2512 	 * If callsite is from a kext that isn't in the BootKC, it wasn't
2513 	 * processed during startup so default to using kheap_alloc
2514 	 *
2515 	 * Additionally when size is greater KHEAP_MAX_SIZE zone is left
2516 	 * NULL as we need to use the vm for the allocation/free
2517 	 */
2518 	if (kt_view->kt_zv.zv_zone == ZONE_NULL) {
2519 		return kheap_free(KHEAP_DEFAULT, ptr,
2520 		           kalloc_type_get_size(kt_view->kt_size));
2521 	}
2522 	if (__improbable(ptr == NULL)) {
2523 		return;
2524 	}
2525 	return zfree(kt_view, ptr);
2526 }
2527 
2528 void
2529 kfree_type_var_impl_external(
2530 	kalloc_type_var_view_t  kt_view,
2531 	void                   *ptr,
2532 	vm_size_t               size);
2533 void
kfree_type_var_impl_external(kalloc_type_var_view_t kt_view,void * ptr,vm_size_t size)2534 kfree_type_var_impl_external(
2535 	kalloc_type_var_view_t  kt_view,
2536 	void                   *ptr,
2537 	vm_size_t               size)
2538 {
2539 	return kfree_type_var_impl(kt_view, ptr, size);
2540 }
2541 
2542 void
2543 kfree_data_external(void *ptr, vm_size_t size);
2544 void
kfree_data_external(void * ptr,vm_size_t size)2545 kfree_data_external(void *ptr, vm_size_t size)
2546 {
2547 	return kheap_free(KHEAP_DATA_BUFFERS, ptr, size);
2548 }
2549 
2550 void
2551 kfree_data_addr_external(void *ptr);
2552 void
kfree_data_addr_external(void * ptr)2553 kfree_data_addr_external(void *ptr)
2554 {
2555 	return kheap_free_addr(KHEAP_DATA_BUFFERS, ptr);
2556 }
2557 
2558 void *
2559 krealloc_data_external(
2560 	void               *ptr,
2561 	vm_size_t           old_size,
2562 	vm_size_t           new_size,
2563 	zalloc_flags_t      flags);
2564 void *
krealloc_data_external(void * ptr,vm_size_t old_size,vm_size_t new_size,zalloc_flags_t flags)2565 krealloc_data_external(
2566 	void               *ptr,
2567 	vm_size_t           old_size,
2568 	vm_size_t           new_size,
2569 	zalloc_flags_t      flags)
2570 {
2571 	flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC_DATA);
2572 	return krealloc_ext(KHEAP_DATA_BUFFERS, ptr, old_size, new_size, flags, NULL).addr;
2573 }
2574 
2575 __startup_func
2576 void
kheap_startup_init(kalloc_heap_t kheap)2577 kheap_startup_init(kalloc_heap_t kheap)
2578 {
2579 	struct kheap_zones *zones;
2580 	vm_tag_t tag;
2581 
2582 	switch (kheap->kh_heap_id) {
2583 	case KHEAP_ID_DEFAULT:
2584 		zones = KHEAP_DEFAULT->kh_zones;
2585 		tag = KHEAP_DEFAULT->kh_tag;
2586 		break;
2587 	case KHEAP_ID_DATA_BUFFERS:
2588 		zones = KHEAP_DATA_BUFFERS->kh_zones;
2589 		tag = KHEAP_DATA_BUFFERS->kh_tag;
2590 		break;
2591 	default:
2592 		panic("kalloc_heap_startup_init: invalid KHEAP_ID: %d",
2593 		    kheap->kh_heap_id);
2594 	}
2595 
2596 	kheap->kh_heap_id = zones->heap_id;
2597 	kheap->kh_zones = zones;
2598 	kheap->kh_stats = zalloc_percpu_permanent_type(struct zone_stats);
2599 	kheap->kh_next = zones->views;
2600 	zones->views = kheap;
2601 	kheap->kh_tag = tag;
2602 	zone_view_count += 1;
2603 }
2604 
2605 #pragma mark IOKit/libkern helpers
2606 
2607 #if PLATFORM_MacOSX
2608 
2609 void *
2610 kern_os_malloc_external(size_t size);
2611 void *
kern_os_malloc_external(size_t size)2612 kern_os_malloc_external(size_t size)
2613 {
2614 	if (size == 0) {
2615 		return NULL;
2616 	}
2617 
2618 	return kheap_alloc(KERN_OS_MALLOC, size,
2619 	           Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_LIBKERN));
2620 }
2621 
2622 void
2623 kern_os_free_external(void *addr);
2624 void
kern_os_free_external(void * addr)2625 kern_os_free_external(void *addr)
2626 {
2627 	kheap_free_addr(KERN_OS_MALLOC, addr);
2628 }
2629 
2630 void *
2631 kern_os_realloc_external(void *addr, size_t nsize);
2632 void *
kern_os_realloc_external(void * addr,size_t nsize)2633 kern_os_realloc_external(void *addr, size_t nsize)
2634 {
2635 	zalloc_flags_t flags = Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_LIBKERN);
2636 	vm_size_t osize, oob_offs = 0;
2637 
2638 	if (addr == NULL) {
2639 		return kern_os_malloc_external(nsize);
2640 	}
2641 
2642 #if KASAN_KALLOC
2643 	osize = kasan_user_size((vm_offset_t)addr);
2644 #else
2645 	osize = zone_element_size(addr, NULL, false, &oob_offs);
2646 	if (osize == 0) {
2647 		vm_map_entry_t entry;
2648 
2649 		vm_map_lock_read(kernel_map);
2650 		entry = vm_map_lookup_kalloc_entry_locked(kernel_map, addr);
2651 		osize = entry->vme_end - entry->vme_start;
2652 		vm_map_unlock_read(kernel_map);
2653 	}
2654 #endif
2655 	return krealloc_ext(KERN_OS_MALLOC, addr, osize - oob_offs, nsize, flags, NULL).addr;
2656 }
2657 
2658 #endif /* PLATFORM_MacOSX */
2659 
2660 void
kern_os_zfree(zone_t zone,void * addr,vm_size_t size)2661 kern_os_zfree(zone_t zone, void *addr, vm_size_t size)
2662 {
2663 #if ZSECURITY_CONFIG(STRICT_IOKIT_FREE)
2664 #pragma unused(size)
2665 	zfree(zone, addr);
2666 #else
2667 	if (zone_owns(zone, addr)) {
2668 		zfree(zone, addr);
2669 	} else {
2670 		/*
2671 		 * Third party kexts might not know about the operator new
2672 		 * and be allocated from the default heap
2673 		 */
2674 		printf("kern_os_zfree: kheap_free called for object from zone %s\n",
2675 		    zone->z_name);
2676 		kheap_free(KHEAP_DEFAULT, addr, size);
2677 	}
2678 #endif
2679 }
2680 
2681 bool
IOMallocType_from_vm(kalloc_type_view_t ktv)2682 IOMallocType_from_vm(kalloc_type_view_t ktv)
2683 {
2684 	struct kalloc_type_atom kt_atom = kalloc_type_func(KTV_FIXED, get_atom,
2685 	    (vm_offset_t)ktv, false);
2686 	return kalloc_type_from_vm(kt_atom);
2687 }
2688 
2689 void
kern_os_typed_free(kalloc_type_view_t ktv,void * addr,vm_size_t esize)2690 kern_os_typed_free(kalloc_type_view_t ktv, void *addr, vm_size_t esize)
2691 {
2692 #if ZSECURITY_CONFIG(STRICT_IOKIT_FREE) || !ZSECURITY_CONFIG(KALLOC_TYPE)
2693 #pragma unused(esize)
2694 #else
2695 	/*
2696 	 * For third party kexts that have been compiled with sdk pre macOS 11,
2697 	 * an allocation of an OSObject that is defined in xnu or first pary
2698 	 * kexts, by directly calling new will lead to using the default heap
2699 	 * as it will call OSObject_operator_new_external. If this object
2700 	 * is freed by xnu, it panics as xnu uses the typed free which
2701 	 * requires the object to have been allocated in a kalloc.type zone.
2702 	 * To workaround this issue, detect if the allocation being freed is
2703 	 * from the default heap and allow freeing to it.
2704 	 */
2705 	zone_id_t zid = zone_id_for_element(addr, esize);
2706 	if (__probable(zid < MAX_ZONES)) {
2707 		zone_security_flags_t zsflags = zone_security_array[zid];
2708 		if (zsflags.z_kheap_id == KHEAP_ID_DEFAULT) {
2709 			return kheap_free(KHEAP_DEFAULT, addr, esize);
2710 		}
2711 	}
2712 #endif
2713 	kfree_type_impl_external(ktv, addr);
2714 }
2715 
2716 #pragma mark tests
2717 #if DEBUG || DEVELOPMENT
2718 
2719 #include <sys/random.h>
2720 /*
2721  * Ensure that the feature is on when the ZSECURITY_CONFIG is present.
2722  *
2723  * Note: Presence of zones with name kalloc.type* is used to
2724  * determine if the feature is on.
2725  */
2726 static int
kalloc_type_feature_on(void)2727 kalloc_type_feature_on(void)
2728 {
2729 	/*
2730 	 * ZSECURITY_CONFIG not present
2731 	 */
2732 #if !ZSECURITY_CONFIG(KALLOC_TYPE)
2733 	return 1;
2734 #endif /* !ZSECURITY_CONFIG(KALLOC_TYPE) */
2735 
2736 	boolean_t zone_found = false;
2737 	const char kalloc_type_str[] = "kalloc.type";
2738 	for (uint16_t i = 0; i < MAX_K_ZONE(k_zone_cfg); i++) {
2739 		zone_t z = kalloc_type_zarray[i];
2740 		while (z != NULL) {
2741 			zone_found = true;
2742 			if (strncmp(z->z_name, kalloc_type_str,
2743 			    strlen(kalloc_type_str)) != 0) {
2744 				return 0;
2745 			}
2746 			z = z->z_kt_next;
2747 		}
2748 	}
2749 
2750 	if (!zone_found) {
2751 		return 0;
2752 	}
2753 
2754 	return 1;
2755 }
2756 
2757 /*
2758  * Ensure that the policy uses the zone budget completely
2759  */
2760 #if ZSECURITY_CONFIG(KALLOC_TYPE)
2761 static int
kalloc_type_test_policy(int64_t in)2762 kalloc_type_test_policy(int64_t in)
2763 {
2764 	uint16_t zone_budget = (uint16_t) in;
2765 	uint16_t max_bucket_freq = 25;
2766 	uint16_t freq_list[MAX_K_ZONE(k_zone_cfg)] = {};
2767 	uint16_t zones_per_bucket[MAX_K_ZONE(k_zone_cfg)] = {};
2768 	uint16_t random[MAX_K_ZONE(k_zone_cfg)];
2769 	int ret = 0;
2770 
2771 	/*
2772 	 * Need a minimum of 2 zones per size class
2773 	 */
2774 	if (zone_budget < MAX_K_ZONE(k_zone_cfg) * 2) {
2775 		return ret;
2776 	}
2777 	read_random((void *)&random[0], sizeof(random));
2778 	for (uint16_t i = 0; i < MAX_K_ZONE(k_zone_cfg); i++) {
2779 		freq_list[i] = random[i] % max_bucket_freq;
2780 	}
2781 	uint16_t wasted_zone_budget = kalloc_type_apply_policy(freq_list,
2782 	    zones_per_bucket, zone_budget);
2783 	if (wasted_zone_budget == 0) {
2784 		ret = 1;
2785 	}
2786 	return ret;
2787 }
2788 #else /* ZSECURITY_CONFIG(KALLOC_TYPE) */
2789 static int
kalloc_type_test_policy(int64_t in)2790 kalloc_type_test_policy(int64_t in)
2791 {
2792 #pragma unused(in)
2793 	return 1;
2794 }
2795 #endif /* !ZSECURITY_CONFIG(KALLOC_TYPE) */
2796 
2797 /*
2798  * Ensure that size of adopters of kalloc_type fit in the zone
2799  * they have been assigned.
2800  */
2801 static int
kalloc_type_check_size(zone_t z)2802 kalloc_type_check_size(zone_t z)
2803 {
2804 	uint16_t elem_size = z->z_elem_size;
2805 	kalloc_type_view_t kt_cur = (kalloc_type_view_t) z->z_views;
2806 	const char site_str[] = "site.";
2807 	const size_t site_str_len = strlen(site_str);
2808 	while (kt_cur != NULL) {
2809 		/*
2810 		 * Process only kalloc_type_views and skip the zone_views when
2811 		 * feature is off.
2812 		 */
2813 #if !ZSECURITY_CONFIG(KALLOC_TYPE)
2814 		if (strncmp(kt_cur->kt_zv.zv_name, site_str, site_str_len) != 0) {
2815 			kt_cur = (kalloc_type_view_t) kt_cur->kt_zv.zv_next;
2816 			continue;
2817 		}
2818 #else /* !ZSECURITY_CONFIG(KALLOC_TYPE) */
2819 #pragma unused(site_str, site_str_len)
2820 #endif /* ZSECURITY_CONFIG(KALLOC_TYPE) */
2821 		if (kalloc_type_get_size(kt_cur->kt_size) > elem_size) {
2822 			return 0;
2823 		}
2824 		kt_cur = (kalloc_type_view_t) kt_cur->kt_zv.zv_next;
2825 	}
2826 	return 1;
2827 }
2828 
2829 struct test_kt_data {
2830 	int a;
2831 };
2832 
2833 static int
kalloc_type_test_data_redirect()2834 kalloc_type_test_data_redirect()
2835 {
2836 	struct kalloc_type_view ktv_data = {
2837 		.kt_signature = __builtin_xnu_type_signature(struct test_kt_data)
2838 	};
2839 	if (!kalloc_type_is_data(kalloc_type_func(KTV_FIXED, get_atom,
2840 	    (vm_offset_t)&ktv_data, false))) {
2841 		printf("%s: data redirect failed\n", __func__);
2842 		return 0;
2843 	}
2844 	return 1;
2845 }
2846 
2847 static int
run_kalloc_type_test(int64_t in,int64_t * out)2848 run_kalloc_type_test(int64_t in, int64_t *out)
2849 {
2850 	*out = 0;
2851 	for (uint16_t i = 0; i < MAX_K_ZONE(k_zone_cfg); i++) {
2852 		zone_t z = kalloc_type_zarray[i];
2853 		while (z != NULL) {
2854 			if (!kalloc_type_check_size(z)) {
2855 				printf("%s: size check failed\n", __func__);
2856 				return 0;
2857 			}
2858 			z = z->z_kt_next;
2859 		}
2860 	}
2861 
2862 	if (!kalloc_type_test_policy(in)) {
2863 		printf("%s: policy check failed\n", __func__);
2864 		return 0;
2865 	}
2866 
2867 	if (!kalloc_type_feature_on()) {
2868 		printf("%s: boot-arg is on but feature isn't\n", __func__);
2869 		return 0;
2870 	}
2871 
2872 	if (!kalloc_type_test_data_redirect()) {
2873 		printf("%s: kalloc_type redirect for all data signature failed\n",
2874 		    __func__);
2875 		return 0;
2876 	}
2877 
2878 	printf("%s: test passed\n", __func__);
2879 
2880 	*out = 1;
2881 	return 0;
2882 }
2883 SYSCTL_TEST_REGISTER(kalloc_type, run_kalloc_type_test);
2884 
2885 static vm_size_t
test_bucket_size(kalloc_heap_t kheap,vm_size_t size)2886 test_bucket_size(kalloc_heap_t kheap, vm_size_t size)
2887 {
2888 	zone_t z = kalloc_heap_zone_for_size(kheap, size);
2889 
2890 	return z ? zone_elem_size(z) : round_page(size);
2891 }
2892 
2893 static int
run_kalloc_test(int64_t in __unused,int64_t * out)2894 run_kalloc_test(int64_t in __unused, int64_t *out)
2895 {
2896 	*out = 0;
2897 	uint64_t * data_ptr;
2898 	size_t alloc_size, old_alloc_size;
2899 
2900 	printf("%s: test running\n", __func__);
2901 
2902 	alloc_size = sizeof(uint64_t) + 1;
2903 	data_ptr = kalloc_ext(KHEAP_DEFAULT, alloc_size, Z_WAITOK, NULL).addr;
2904 	if (!data_ptr) {
2905 		printf("%s: kalloc sizeof(uint64_t) returned null\n", __func__);
2906 		return 0;
2907 	}
2908 
2909 	struct kalloc_result kr = {};
2910 	old_alloc_size = alloc_size;
2911 	alloc_size++;
2912 	kr = krealloc_ext(KHEAP_DEFAULT, data_ptr, old_alloc_size, alloc_size,
2913 	    Z_WAITOK | Z_NOFAIL, NULL);
2914 	if (!kr.addr || kr.addr != data_ptr ||
2915 	    test_bucket_size(KHEAP_DEFAULT, kr.size) !=
2916 	    test_bucket_size(KHEAP_DEFAULT, old_alloc_size)) {
2917 		printf("%s: same size class realloc failed\n", __func__);
2918 		return 0;
2919 	}
2920 
2921 	old_alloc_size = alloc_size;
2922 	alloc_size *= 2;
2923 	kr = krealloc_ext(KHEAP_DEFAULT, kr.addr, old_alloc_size, alloc_size,
2924 	    Z_WAITOK | Z_NOFAIL, NULL);
2925 	if (!kr.addr || test_bucket_size(KHEAP_DEFAULT, kr.size) ==
2926 	    test_bucket_size(KHEAP_DEFAULT, old_alloc_size)) {
2927 		printf("%s: new size class realloc failed\n", __func__);
2928 		return 0;
2929 	}
2930 
2931 	old_alloc_size = alloc_size;
2932 	alloc_size *= 2;
2933 	data_ptr = krealloc_ext(KHEAP_DEFAULT, kr.addr, old_alloc_size,
2934 	    alloc_size, Z_WAITOK | Z_NOFAIL, NULL).addr;
2935 	if (!data_ptr) {
2936 		printf("%s: realloc without old size returned null\n", __func__);
2937 		return 0;
2938 	}
2939 	kheap_free(KHEAP_DEFAULT, data_ptr, alloc_size);
2940 
2941 	alloc_size = 3544;
2942 	data_ptr = kalloc_ext(KHEAP_DEFAULT, alloc_size, Z_WAITOK, NULL).addr;
2943 	if (!data_ptr) {
2944 		printf("%s: kalloc 3544 returned not null\n", __func__);
2945 		return 0;
2946 	}
2947 	kheap_free(KHEAP_DEFAULT, data_ptr, alloc_size);
2948 
2949 	printf("%s: test passed\n", __func__);
2950 	*out = 1;
2951 	return 0;
2952 }
2953 SYSCTL_TEST_REGISTER(kalloc, run_kalloc_test);
2954 
2955 #endif
2956