1 /*
2 * Copyright (c) 2000-2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/kalloc.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 * General kernel memory allocator. This allocator is designed
64 * to be used by the kernel to manage dynamic memory fast.
65 */
66
67 #include "mach/vm_types.h"
68 #include <mach/boolean.h>
69 #include <mach/sdt.h>
70 #include <mach/machine/vm_types.h>
71 #include <mach/vm_param.h>
72 #include <kern/misc_protos.h>
73 #include <kern/counter.h>
74 #include <kern/zalloc_internal.h>
75 #include <kern/kalloc.h>
76 #include <kern/ledger.h>
77 #include <kern/backtrace.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_map.h>
81 #include <sys/kdebug.h>
82
83 #include <os/hash.h>
84 #include <san/kasan.h>
85 #include <libkern/section_keywords.h>
86 #include <libkern/prelink.h>
87
88 SCALABLE_COUNTER_DEFINE(kalloc_large_count);
89 SCALABLE_COUNTER_DEFINE(kalloc_large_total);
90
91 #pragma mark initialization
92
93 /*
94 * All allocations of size less than KHEAP_MAX_SIZE are rounded to the next nearest
95 * sized zone. This allocator is built on top of the zone allocator. A zone
96 * is created for each potential size that we are willing to get in small
97 * blocks.
98 *
99 * Allocations of size greater than KHEAP_MAX_SIZE, are allocated from the VM.
100 */
101
102 /*
103 * The kt_zone_cfg table defines the configuration of zones on various
104 * platforms for kalloc_type fixed size allocations.
105 */
106
107 #if KASAN_CLASSIC
108 #define K_SIZE_CLASS(size) \
109 (((size) & PAGE_MASK) == 0 ? (size) : \
110 ((size) <= 1024 ? (size) : (size) - KASAN_GUARD_SIZE))
111 #else
112 #define K_SIZE_CLASS(size) (size)
113 #endif
114 static_assert(K_SIZE_CLASS(KHEAP_MAX_SIZE) == KHEAP_MAX_SIZE);
115
116 static const uint16_t kt_zone_cfg[] = {
117 K_SIZE_CLASS(16),
118 K_SIZE_CLASS(32),
119 K_SIZE_CLASS(48),
120 K_SIZE_CLASS(64),
121 K_SIZE_CLASS(80),
122 K_SIZE_CLASS(96),
123 K_SIZE_CLASS(128),
124 K_SIZE_CLASS(160),
125 K_SIZE_CLASS(192),
126 K_SIZE_CLASS(224),
127 K_SIZE_CLASS(256),
128 K_SIZE_CLASS(288),
129 K_SIZE_CLASS(368),
130 K_SIZE_CLASS(400),
131 K_SIZE_CLASS(512),
132 K_SIZE_CLASS(576),
133 K_SIZE_CLASS(768),
134 K_SIZE_CLASS(1024),
135 K_SIZE_CLASS(1152),
136 K_SIZE_CLASS(1280),
137 K_SIZE_CLASS(1664),
138 K_SIZE_CLASS(2048),
139 K_SIZE_CLASS(4096),
140 K_SIZE_CLASS(6144),
141 K_SIZE_CLASS(8192),
142 K_SIZE_CLASS(12288),
143 K_SIZE_CLASS(16384),
144 #if __arm64__
145 K_SIZE_CLASS(24576),
146 K_SIZE_CLASS(32768),
147 #endif /* __arm64__ */
148 };
149
150 #define MAX_K_ZONE(kzc) (uint32_t)(sizeof(kzc) / sizeof(kzc[0]))
151
152 /*
153 * kalloc_type callsites are assigned a zone during early boot. They
154 * use the dlut[] (direct lookup table), indexed by size normalized
155 * to the minimum alignment to find the right zone index quickly.
156 */
157 #define INDEX_ZDLUT(size) (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN)
158 #define KALLOC_DLUT_SIZE (KHEAP_MAX_SIZE / KALLOC_MINALIGN)
159 #define MAX_SIZE_ZDLUT ((KALLOC_DLUT_SIZE - 1) * KALLOC_MINALIGN)
160 static __startup_data uint8_t kalloc_type_dlut[KALLOC_DLUT_SIZE];
161 static __startup_data uint32_t kheap_zsize[KHEAP_NUM_ZONES];
162
163 #if VM_TAG_SIZECLASSES
164 static_assert(VM_TAG_SIZECLASSES >= MAX_K_ZONE(kt_zone_cfg));
165 #endif
166
167 const char * const kalloc_heap_names[] = {
168 [KHEAP_ID_NONE] = "",
169 [KHEAP_ID_DEFAULT] = "default.",
170 [KHEAP_ID_DATA_BUFFERS] = "data.",
171 [KHEAP_ID_KT_VAR] = "",
172 };
173
174 /*
175 * Default kalloc heap configuration
176 */
177 SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_DEFAULT[1] = {
178 {
179 .kh_name = "default.",
180 .kh_heap_id = KHEAP_ID_DEFAULT,
181 .kh_tag = VM_KERN_MEMORY_KALLOC
182 }
183 };
184
185
186 /*
187 * Bag of bytes heap configuration
188 */
189 SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_DATA_BUFFERS[1] = {
190 {
191 .kh_name = "data.",
192 .kh_heap_id = KHEAP_ID_DATA_BUFFERS,
193 .kh_tag = VM_KERN_MEMORY_KALLOC_DATA,
194 }
195 };
196
197 /*
198 * Configuration of variable kalloc type heaps
199 */
200 SECURITY_READ_ONLY_LATE(struct kheap_info)
201 kalloc_type_heap_array[KT_VAR_MAX_HEAPS] = {};
202 SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_KT_VAR[1] = {
203 {
204 .kh_name = "kalloc.type.var",
205 .kh_heap_id = KHEAP_ID_KT_VAR,
206 .kh_tag = VM_KERN_MEMORY_KALLOC_TYPE
207 }
208 };
209
210 __startup_func
211 static void
kalloc_zsize_compute(void)212 kalloc_zsize_compute(void)
213 {
214 uint32_t step = KHEAP_STEP_START;
215 uint32_t size = KHEAP_START_SIZE;
216
217 /*
218 * Manually initialize extra initial zones
219 */
220 kheap_zsize[0] = size / 2;
221 kheap_zsize[1] = size;
222 static_assert(KHEAP_EXTRA_ZONES == 2);
223
224 /*
225 * Compute sizes for remaining zones
226 */
227 for (uint32_t i = 0; i < KHEAP_NUM_STEPS; i++) {
228 uint32_t step_idx = (i * 2) + KHEAP_EXTRA_ZONES;
229
230 kheap_zsize[step_idx] = K_SIZE_CLASS(size + step);
231 kheap_zsize[step_idx + 1] = K_SIZE_CLASS(size + 2 * step);
232
233 step *= 2;
234 size += step;
235 }
236 }
237
238 static zone_t
kalloc_zone_for_size_with_flags(zone_id_t zid,vm_size_t size,zalloc_flags_t flags)239 kalloc_zone_for_size_with_flags(
240 zone_id_t zid,
241 vm_size_t size,
242 zalloc_flags_t flags)
243 {
244 vm_size_t max_size = KHEAP_MAX_SIZE;
245 bool forcopyin = flags & Z_MAY_COPYINMAP;
246 zone_t zone;
247
248 if (flags & Z_KALLOC_ARRAY) {
249 size = roundup(size, KALLOC_ARRAY_GRANULE);
250 }
251
252 if (forcopyin) {
253 #if __x86_64__
254 /*
255 * On Intel, the OSData() ABI used to allocate
256 * from the kernel map starting at PAGE_SIZE.
257 *
258 * If only vm_map_copyin() or a wrapper is used,
259 * then everything will work fine because vm_map_copy_t
260 * will perform an actual copy if the data is smaller
261 * than msg_ool_size_small (== KHEAP_MAX_SIZE).
262 *
263 * However, if anyone is trying to call mach_vm_remap(),
264 * then bad things (TM) happen.
265 *
266 * Avoid this by preserving the ABI and moving
267 * to kalloc_large() earlier.
268 *
269 * Any recent code really ought to use IOMemoryDescriptor
270 * for this purpose however.
271 */
272 max_size = PAGE_SIZE - 1;
273 #endif
274 }
275
276 if (size <= max_size) {
277 uint32_t idx;
278
279 if (size <= KHEAP_START_SIZE) {
280 zid += (size > 16);
281 } else {
282 /*
283 * . log2down(size - 1) is log2up(size) - 1
284 * . (size - 1) >> (log2down(size - 1) - 1)
285 * is either 0x2 or 0x3
286 */
287 idx = kalloc_log2down((uint32_t)(size - 1));
288 zid += KHEAP_EXTRA_ZONES +
289 2 * (idx - KHEAP_START_IDX) +
290 ((uint32_t)(size - 1) >> (idx - 1)) - 2;
291 }
292
293 zone = zone_by_id(zid);
294 #if KASAN_CLASSIC
295 /*
296 * Under kasan classic, certain size classes are a redzone
297 * away from the mathematical formula above, and we need
298 * to "go to the next zone".
299 *
300 * Because the KHEAP_MAX_SIZE bucket _does_ exist however,
301 * this will never go to an "invalid" zone that doesn't
302 * belong to the kheap.
303 */
304 if (size > zone_elem_inner_size(zone)) {
305 zone++;
306 }
307 #endif
308 return zone;
309 }
310
311 return ZONE_NULL;
312 }
313
314 zone_t
kalloc_zone_for_size(zone_id_t zid,size_t size)315 kalloc_zone_for_size(zone_id_t zid, size_t size)
316 {
317 return kalloc_zone_for_size_with_flags(zid, size, Z_WAITOK);
318 }
319
320 static inline bool
kheap_size_from_zone(void * addr,vm_size_t size,zalloc_flags_t flags)321 kheap_size_from_zone(
322 void *addr,
323 vm_size_t size,
324 zalloc_flags_t flags)
325 {
326 vm_size_t max_size = KHEAP_MAX_SIZE;
327 bool forcopyin = flags & Z_MAY_COPYINMAP;
328
329 #if __x86_64__
330 /*
331 * If Z_FULLSIZE is used, then due to kalloc_zone_for_size_with_flags()
332 * behavior, then the element could have a PAGE_SIZE reported size,
333 * yet still be from a zone for Z_MAY_COPYINMAP.
334 */
335 if (forcopyin) {
336 if (size == PAGE_SIZE &&
337 zone_id_for_element(addr, size) != ZONE_ID_INVALID) {
338 return true;
339 }
340
341 max_size = PAGE_SIZE - 1;
342 }
343 #else
344 #pragma unused(addr, forcopyin)
345 #endif
346
347 return size <= max_size;
348 }
349
350 __startup_func
351 static void
kalloc_zone_init(const char * kheap_name,zone_kheap_id_t kheap_id,zone_id_t * kheap_zstart,zone_create_flags_t zc_flags)352 kalloc_zone_init(
353 const char *kheap_name,
354 zone_kheap_id_t kheap_id,
355 zone_id_t *kheap_zstart,
356 zone_create_flags_t zc_flags)
357 {
358 zc_flags |= ZC_PGZ_USE_GUARDS;
359
360 for (uint32_t i = 0; i < KHEAP_NUM_ZONES; i++) {
361 uint32_t size = kheap_zsize[i];
362 char buf[MAX_ZONE_NAME], *z_name;
363 int len;
364
365 len = scnprintf(buf, MAX_ZONE_NAME, "%s.%u", kheap_name, size);
366 z_name = zalloc_permanent(len + 1, ZALIGN_NONE);
367 strlcpy(z_name, buf, len + 1);
368
369 (void)zone_create_ext(z_name, size, zc_flags, ZONE_ID_ANY, ^(zone_t z){
370 #if __arm64e__ || KASAN_TBI
371 uint32_t scale = kalloc_log2down(size / 32);
372
373 if (size == 32 << scale) {
374 z->z_array_size_class = scale;
375 } else {
376 z->z_array_size_class = scale | 0x10;
377 }
378 #endif
379 zone_security_array[zone_index(z)].z_kheap_id = kheap_id;
380 if (i == 0) {
381 *kheap_zstart = zone_index(z);
382 }
383 });
384 }
385 }
386
387 __startup_func
388 static void
kalloc_heap_init(struct kalloc_heap * kheap)389 kalloc_heap_init(struct kalloc_heap *kheap)
390 {
391 kalloc_zone_init("kalloc", kheap->kh_heap_id, &kheap->kh_zstart,
392 ZC_NONE);
393 /*
394 * Count all the "raw" views for zones in the heap.
395 */
396 zone_view_count += KHEAP_NUM_ZONES;
397 }
398
399 #define KEXT_ALIGN_SHIFT 6
400 #define KEXT_ALIGN_BYTES (1<< KEXT_ALIGN_SHIFT)
401 #define KEXT_ALIGN_MASK (KEXT_ALIGN_BYTES-1)
402 #define kt_scratch_size (256ul << 10)
403 #define KALLOC_TYPE_SECTION(type) \
404 (type == KTV_FIXED? "__kalloc_type": "__kalloc_var")
405
406 /*
407 * Enum to specify the kalloc_type variant being used.
408 */
409 __options_decl(kalloc_type_variant_t, uint16_t, {
410 KTV_FIXED = 0x0001,
411 KTV_VAR = 0x0002,
412 });
413
414 /*
415 * Macros that generate the appropriate kalloc_type variant (i.e fixed or
416 * variable) of the desired variable/function.
417 */
418 #define kalloc_type_var(type, var) \
419 ((type) == KTV_FIXED? \
420 (vm_offset_t) kalloc_type_##var##_fixed: \
421 (vm_offset_t) kalloc_type_##var##_var)
422 #define kalloc_type_func(type, func, ...) \
423 ((type) == KTV_FIXED? \
424 kalloc_type_##func##_fixed(__VA_ARGS__): \
425 kalloc_type_##func##_var(__VA_ARGS__))
426
427 TUNABLE(kalloc_type_options_t, kt_options, "kt", 0);
428 TUNABLE(uint16_t, kt_var_heaps, "kt_var_heaps",
429 ZSECURITY_CONFIG_KT_VAR_BUDGET);
430 /*
431 * Section start/end for fixed kalloc_type views
432 */
433 extern struct kalloc_type_view kalloc_type_sec_start_fixed[]
434 __SECTION_START_SYM(KALLOC_TYPE_SEGMENT, "__kalloc_type");
435
436 extern struct kalloc_type_view kalloc_type_sec_end_fixed[]
437 __SECTION_END_SYM(KALLOC_TYPE_SEGMENT, "__kalloc_type");
438
439 /*
440 * Section start/end for variable kalloc_type views
441 */
442 extern struct kalloc_type_var_view kalloc_type_sec_start_var[]
443 __SECTION_START_SYM(KALLOC_TYPE_SEGMENT, "__kalloc_var");
444
445 extern struct kalloc_type_var_view kalloc_type_sec_end_var[]
446 __SECTION_END_SYM(KALLOC_TYPE_SEGMENT, "__kalloc_var");
447
448 __startup_data
449 static kalloc_type_views_t *kt_buffer = NULL;
450 __startup_data
451 static uint64_t kt_count;
452 __startup_data
453 uint32_t kalloc_type_hash_seed;
454
455 _Static_assert(__builtin_popcount(KT_SUMMARY_MASK_TYPE_BITS) == (KT_GRANULE_MAX + 1),
456 "KT_SUMMARY_MASK_TYPE_BITS doesn't match KT_GRANULE_MAX");
457
458 /*
459 * For use by lldb to iterate over kalloc types
460 */
461 SECURITY_READ_ONLY_LATE(uint64_t) num_kt_sizeclass = MAX_K_ZONE(kt_zone_cfg);
462 SECURITY_READ_ONLY_LATE(zone_t) kalloc_type_zarray[MAX_K_ZONE(kt_zone_cfg)];
463
464 #define KT_GET_HASH(flags) (uint16_t)((flags & KT_HASH) >> 16)
465 static_assert(KT_HASH >> 16 == (KMEM_RANGE_MASK | KMEM_HASH_SET |
466 KMEM_DIRECTION_MASK),
467 "Insufficient bits to represent range and dir for VM allocations");
468 static_assert(MAX_K_ZONE(kt_zone_cfg) < KALLOC_TYPE_IDX_MASK,
469 "validate idx mask");
470 /* qsort routines */
471 typedef int (*cmpfunc_t)(const void *a, const void *b);
472 extern void qsort(void *a, size_t n, size_t es, cmpfunc_t cmp);
473
474 static inline uint16_t
kalloc_type_get_idx(uint32_t kt_size)475 kalloc_type_get_idx(uint32_t kt_size)
476 {
477 return (uint16_t) (kt_size >> KALLOC_TYPE_IDX_SHIFT);
478 }
479
480 static inline uint32_t
kalloc_type_set_idx(uint32_t kt_size,uint16_t idx)481 kalloc_type_set_idx(uint32_t kt_size, uint16_t idx)
482 {
483 return kt_size | ((uint32_t) idx << KALLOC_TYPE_IDX_SHIFT);
484 }
485
486 static void
kalloc_type_build_dlut(void)487 kalloc_type_build_dlut(void)
488 {
489 vm_size_t size = 0;
490 for (int i = 0; i < KALLOC_DLUT_SIZE; i++, size += KALLOC_MINALIGN) {
491 uint8_t zindex = 0;
492 while (kt_zone_cfg[zindex] < size) {
493 zindex++;
494 }
495 kalloc_type_dlut[i] = zindex;
496 }
497 }
498
499 static uint32_t
kalloc_type_idx_for_size(uint32_t size)500 kalloc_type_idx_for_size(uint32_t size)
501 {
502 assert(size <= KHEAP_MAX_SIZE);
503 uint16_t idx = kalloc_type_dlut[INDEX_ZDLUT(size)];
504 return kalloc_type_set_idx(size, idx);
505 }
506
507 static void
kalloc_type_assign_zone_fixed(kalloc_type_view_t * cur,kalloc_type_view_t * end,zone_t z)508 kalloc_type_assign_zone_fixed(kalloc_type_view_t *cur, kalloc_type_view_t *end,
509 zone_t z)
510 {
511 /*
512 * Assign the zone created for every kalloc_type_view
513 * of the same unique signature
514 */
515 bool need_raw_view = false;
516 while (cur < end) {
517 kalloc_type_view_t kt = *cur;
518 struct zone_view *zv = &kt->kt_zv;
519 zv->zv_zone = z;
520 kalloc_type_flags_t kt_flags = kt->kt_flags;
521
522 if (kt_flags & KT_SLID) {
523 kt->kt_signature -= vm_kernel_slide;
524 kt->kt_zv.zv_name -= vm_kernel_slide;
525 }
526
527 if ((kt_flags & KT_PRIV_ACCT) ||
528 ((kt_options & KT_OPTIONS_ACCT) && (kt_flags & KT_DEFAULT))) {
529 zv->zv_stats = zalloc_percpu_permanent_type(
530 struct zone_stats);
531 need_raw_view = true;
532 zone_view_count += 1;
533 } else {
534 zv->zv_stats = z->z_stats;
535 }
536 zv->zv_next = (zone_view_t) z->z_views;
537 zv->zv_zone->z_views = (zone_view_t) kt;
538 cur++;
539 }
540 if (need_raw_view) {
541 zone_view_count += 1;
542 }
543 }
544
545 __startup_func
546 static void
kalloc_type_assign_zone_var(kalloc_type_var_view_t * cur,kalloc_type_var_view_t * end,uint32_t heap_idx)547 kalloc_type_assign_zone_var(kalloc_type_var_view_t *cur,
548 kalloc_type_var_view_t *end, uint32_t heap_idx)
549 {
550 struct kheap_info *cfg = &kalloc_type_heap_array[heap_idx];
551 while (cur < end) {
552 kalloc_type_var_view_t kt = *cur;
553 kt->kt_heap_start = cfg->kh_zstart;
554 kalloc_type_flags_t kt_flags = kt->kt_flags;
555
556 if (kt_flags & KT_SLID) {
557 if (kt->kt_sig_hdr) {
558 kt->kt_sig_hdr -= vm_kernel_slide;
559 }
560 kt->kt_sig_type -= vm_kernel_slide;
561 kt->kt_name -= vm_kernel_slide;
562 }
563
564 if ((kt_flags & KT_PRIV_ACCT) ||
565 ((kt_options & KT_OPTIONS_ACCT) && (kt_flags & KT_DEFAULT))) {
566 kt->kt_stats = zalloc_percpu_permanent_type(struct zone_stats);
567 zone_view_count += 1;
568 }
569
570 kt->kt_next = (zone_view_t) cfg->kt_views;
571 cfg->kt_views = kt;
572 cur++;
573 }
574 }
575
576 __startup_func
577 static inline void
kalloc_type_slide_fixed(vm_offset_t addr)578 kalloc_type_slide_fixed(vm_offset_t addr)
579 {
580 kalloc_type_view_t ktv = (struct kalloc_type_view *) addr;
581 ktv->kt_signature += vm_kernel_slide;
582 ktv->kt_zv.zv_name += vm_kernel_slide;
583 ktv->kt_flags |= KT_SLID;
584 }
585
586 __startup_func
587 static inline void
kalloc_type_slide_var(vm_offset_t addr)588 kalloc_type_slide_var(vm_offset_t addr)
589 {
590 kalloc_type_var_view_t ktv = (struct kalloc_type_var_view *) addr;
591 if (ktv->kt_sig_hdr) {
592 ktv->kt_sig_hdr += vm_kernel_slide;
593 }
594 ktv->kt_sig_type += vm_kernel_slide;
595 ktv->kt_name += vm_kernel_slide;
596 ktv->kt_flags |= KT_SLID;
597 }
598
599 __startup_func
600 static void
kalloc_type_validate_flags(kalloc_type_flags_t kt_flags,const char * kt_name,uuid_string_t kext_uuid)601 kalloc_type_validate_flags(
602 kalloc_type_flags_t kt_flags,
603 const char *kt_name,
604 uuid_string_t kext_uuid)
605 {
606 if (!(kt_flags & KT_CHANGED) || !(kt_flags & KT_CHANGED2)) {
607 panic("kalloc_type_view(%s) from kext(%s) hasn't been rebuilt with "
608 "required xnu headers", kt_name, kext_uuid);
609 }
610 }
611
612 static kalloc_type_flags_t
kalloc_type_get_flags_fixed(vm_offset_t addr,uuid_string_t kext_uuid)613 kalloc_type_get_flags_fixed(vm_offset_t addr, uuid_string_t kext_uuid)
614 {
615 kalloc_type_view_t ktv = (kalloc_type_view_t) addr;
616 kalloc_type_validate_flags(ktv->kt_flags, ktv->kt_zv.zv_name, kext_uuid);
617 return ktv->kt_flags;
618 }
619
620 static kalloc_type_flags_t
kalloc_type_get_flags_var(vm_offset_t addr,uuid_string_t kext_uuid)621 kalloc_type_get_flags_var(vm_offset_t addr, uuid_string_t kext_uuid)
622 {
623 kalloc_type_var_view_t ktv = (kalloc_type_var_view_t) addr;
624 kalloc_type_validate_flags(ktv->kt_flags, ktv->kt_name, kext_uuid);
625 return ktv->kt_flags;
626 }
627
628 /*
629 * Check if signature of type is made up of only data and padding
630 */
631 static bool
kalloc_type_is_data(kalloc_type_flags_t kt_flags)632 kalloc_type_is_data(kalloc_type_flags_t kt_flags)
633 {
634 assert(kt_flags & KT_CHANGED);
635 return kt_flags & KT_DATA_ONLY;
636 }
637
638 /*
639 * Check if signature of type is made up of only pointers
640 */
641 static bool
kalloc_type_is_ptr_array(kalloc_type_flags_t kt_flags)642 kalloc_type_is_ptr_array(kalloc_type_flags_t kt_flags)
643 {
644 assert(kt_flags & KT_CHANGED2);
645 return kt_flags & KT_PTR_ARRAY;
646 }
647
648 static bool
kalloc_type_from_vm(kalloc_type_flags_t kt_flags)649 kalloc_type_from_vm(kalloc_type_flags_t kt_flags)
650 {
651 assert(kt_flags & KT_CHANGED);
652 return kt_flags & KT_VM;
653 }
654
655 __startup_func
656 static inline vm_size_t
kalloc_type_view_sz_fixed(void)657 kalloc_type_view_sz_fixed(void)
658 {
659 return sizeof(struct kalloc_type_view);
660 }
661
662 __startup_func
663 static inline vm_size_t
kalloc_type_view_sz_var(void)664 kalloc_type_view_sz_var(void)
665 {
666 return sizeof(struct kalloc_type_var_view);
667 }
668
669 __startup_func
670 static inline uint64_t
kalloc_type_view_count(kalloc_type_variant_t type,vm_offset_t start,vm_offset_t end)671 kalloc_type_view_count(kalloc_type_variant_t type, vm_offset_t start,
672 vm_offset_t end)
673 {
674 return (end - start) / kalloc_type_func(type, view_sz);
675 }
676
677 __startup_func
678 static inline void
kalloc_type_buffer_copy_fixed(kalloc_type_views_t * buffer,vm_offset_t ktv)679 kalloc_type_buffer_copy_fixed(kalloc_type_views_t *buffer, vm_offset_t ktv)
680 {
681 buffer->ktv_fixed = (kalloc_type_view_t) ktv;
682 }
683
684 __startup_func
685 static inline void
kalloc_type_buffer_copy_var(kalloc_type_views_t * buffer,vm_offset_t ktv)686 kalloc_type_buffer_copy_var(kalloc_type_views_t *buffer, vm_offset_t ktv)
687 {
688 buffer->ktv_var = (kalloc_type_var_view_t) ktv;
689 }
690
691 __startup_func
692 static void
kalloc_type_handle_data_view_fixed(vm_offset_t addr)693 kalloc_type_handle_data_view_fixed(vm_offset_t addr)
694 {
695 kalloc_type_view_t cur_data_view = (kalloc_type_view_t) addr;
696 zone_t z = kalloc_zone_for_size(KHEAP_DATA_BUFFERS->kh_zstart,
697 cur_data_view->kt_size);
698 kalloc_type_assign_zone_fixed(&cur_data_view, &cur_data_view + 1, z);
699 }
700
701 __startup_func
702 static void
kalloc_type_handle_data_view_var(vm_offset_t addr)703 kalloc_type_handle_data_view_var(vm_offset_t addr)
704 {
705 kalloc_type_var_view_t ktv = (kalloc_type_var_view_t) addr;
706 kalloc_type_assign_zone_var(&ktv, &ktv + 1, KT_VAR_DATA_HEAP);
707 }
708
709 __startup_func
710 static void
kalloc_type_handle_parray_var(vm_offset_t addr)711 kalloc_type_handle_parray_var(vm_offset_t addr)
712 {
713 kalloc_type_var_view_t ktv = (kalloc_type_var_view_t) addr;
714 kalloc_type_assign_zone_var(&ktv, &ktv + 1, KT_VAR_PTR_HEAP);
715 }
716
717 __startup_func
718 static uint32_t
kalloc_hash_adjust(uint32_t hash,uint32_t shift)719 kalloc_hash_adjust(uint32_t hash, uint32_t shift)
720 {
721 /*
722 * Limit range_id to ptr ranges
723 */
724 uint32_t range_id = kmem_adjust_range_id(hash);
725 uint32_t direction = hash & 0x8000;
726 return (range_id | KMEM_HASH_SET | direction) << shift;
727 }
728
729 __startup_func
730 static void
kalloc_type_set_type_hash(const char * sig_ty,const char * sig_hdr,kalloc_type_flags_t * kt_flags)731 kalloc_type_set_type_hash(const char *sig_ty, const char *sig_hdr,
732 kalloc_type_flags_t *kt_flags)
733 {
734 uint32_t hash = 0;
735
736 assert(sig_ty != NULL);
737 hash = os_hash_jenkins_update(sig_ty, strlen(sig_ty),
738 kalloc_type_hash_seed);
739 if (sig_hdr) {
740 hash = os_hash_jenkins_update(sig_hdr, strlen(sig_hdr), hash);
741 }
742 os_hash_jenkins_finish(hash);
743 hash &= (KMEM_RANGE_MASK | KMEM_DIRECTION_MASK);
744
745 *kt_flags = *kt_flags | kalloc_hash_adjust(hash, 16);
746 }
747
748 __startup_func
749 static void
kalloc_type_set_type_hash_fixed(vm_offset_t addr)750 kalloc_type_set_type_hash_fixed(vm_offset_t addr)
751 {
752 /*
753 * Use backtraces on fixed as we don't have signatures for types that go
754 * to the VM due to rdar://85182551.
755 */
756 (void) addr;
757 }
758
759 __startup_func
760 static void
kalloc_type_set_type_hash_var(vm_offset_t addr)761 kalloc_type_set_type_hash_var(vm_offset_t addr)
762 {
763 kalloc_type_var_view_t ktv = (kalloc_type_var_view_t) addr;
764 kalloc_type_set_type_hash(ktv->kt_sig_type, ktv->kt_sig_hdr,
765 &ktv->kt_flags);
766 }
767
768 __startup_func
769 static void
kalloc_type_mark_processed_fixed(vm_offset_t addr)770 kalloc_type_mark_processed_fixed(vm_offset_t addr)
771 {
772 kalloc_type_view_t ktv = (kalloc_type_view_t) addr;
773 ktv->kt_flags |= KT_PROCESSED;
774 }
775
776 __startup_func
777 static void
kalloc_type_mark_processed_var(vm_offset_t addr)778 kalloc_type_mark_processed_var(vm_offset_t addr)
779 {
780 kalloc_type_var_view_t ktv = (kalloc_type_var_view_t) addr;
781 ktv->kt_flags |= KT_PROCESSED;
782 }
783
784 __startup_func
785 static void
kalloc_type_update_view_fixed(vm_offset_t addr)786 kalloc_type_update_view_fixed(vm_offset_t addr)
787 {
788 kalloc_type_view_t ktv = (kalloc_type_view_t) addr;
789 ktv->kt_size = kalloc_type_idx_for_size(ktv->kt_size);
790 }
791
792 __startup_func
793 static void
kalloc_type_update_view_var(vm_offset_t addr)794 kalloc_type_update_view_var(vm_offset_t addr)
795 {
796 (void) addr;
797 }
798
799 __startup_func
800 static void
kalloc_type_view_copy(const kalloc_type_variant_t type,vm_offset_t start,vm_offset_t end,uint64_t * cur_count,bool slide,uuid_string_t kext_uuid)801 kalloc_type_view_copy(
802 const kalloc_type_variant_t type,
803 vm_offset_t start,
804 vm_offset_t end,
805 uint64_t *cur_count,
806 bool slide,
807 uuid_string_t kext_uuid)
808 {
809 uint64_t count = kalloc_type_view_count(type, start, end);
810 if (count + *cur_count >= kt_count) {
811 panic("kalloc_type_view_copy: Insufficient space in scratch buffer");
812 }
813 vm_offset_t cur = start;
814 while (cur < end) {
815 if (slide) {
816 kalloc_type_func(type, slide, cur);
817 }
818 kalloc_type_flags_t kt_flags = kalloc_type_func(type, get_flags, cur,
819 kext_uuid);
820 kalloc_type_func(type, mark_processed, cur);
821 /*
822 * Skip views that go to the VM
823 */
824 if (kalloc_type_from_vm(kt_flags)) {
825 cur += kalloc_type_func(type, view_sz);
826 continue;
827 }
828
829 /*
830 * If signature indicates that the entire allocation is data move it to
831 * KHEAP_DATA_BUFFERS. Note that KT_VAR_DATA_HEAP is a fake "data" heap,
832 * variable kalloc_type handles the actual redirection in the entry points
833 * kalloc/kfree_type_var_impl.
834 */
835 if (kalloc_type_is_data(kt_flags)) {
836 kalloc_type_func(type, handle_data_view, cur);
837 cur += kalloc_type_func(type, view_sz);
838 continue;
839 }
840
841 /*
842 * Set type hash that is used by kmem_*_guard
843 */
844 kalloc_type_func(type, set_type_hash, cur);
845
846 /*
847 * Redirect variable sized pointer arrays to KT_VAR_PTR_HEAP
848 */
849 if (type == KTV_VAR && kalloc_type_is_ptr_array(kt_flags)) {
850 kalloc_type_handle_parray_var(cur);
851 cur += kalloc_type_func(type, view_sz);
852 continue;
853 }
854
855 kalloc_type_func(type, update_view, cur);
856 kalloc_type_func(type, buffer_copy, &kt_buffer[*cur_count], cur);
857 cur += kalloc_type_func(type, view_sz);
858 *cur_count = *cur_count + 1;
859 }
860 }
861
862 __startup_func
863 static uint64_t
kalloc_type_view_parse(const kalloc_type_variant_t type)864 kalloc_type_view_parse(const kalloc_type_variant_t type)
865 {
866 kc_format_t kc_format;
867 uint64_t cur_count = 0;
868
869 if (!PE_get_primary_kc_format(&kc_format)) {
870 panic("kalloc_type_view_parse: wasn't able to determine kc format");
871 }
872
873 if (kc_format == KCFormatStatic) {
874 /*
875 * If kc is static or KCGEN, __kalloc_type sections from kexts and
876 * xnu are coalesced.
877 */
878 kalloc_type_view_copy(type,
879 kalloc_type_var(type, sec_start),
880 kalloc_type_var(type, sec_end),
881 &cur_count, false, NULL);
882 } else if (kc_format == KCFormatFileset) {
883 /*
884 * If kc uses filesets, traverse __kalloc_type section for each
885 * macho in the BootKC.
886 */
887 kernel_mach_header_t *kc_mh = NULL;
888 kernel_mach_header_t *kext_mh = NULL;
889
890 kc_mh = (kernel_mach_header_t *)PE_get_kc_header(KCKindPrimary);
891 struct load_command *lc =
892 (struct load_command *)((vm_offset_t)kc_mh + sizeof(*kc_mh));
893 for (uint32_t i = 0; i < kc_mh->ncmds;
894 i++, lc = (struct load_command *)((vm_offset_t)lc + lc->cmdsize)) {
895 if (lc->cmd != LC_FILESET_ENTRY) {
896 continue;
897 }
898 struct fileset_entry_command *fse =
899 (struct fileset_entry_command *)(vm_offset_t)lc;
900 kext_mh = (kernel_mach_header_t *)fse->vmaddr;
901 kernel_section_t *sect = (kernel_section_t *)getsectbynamefromheader(
902 kext_mh, KALLOC_TYPE_SEGMENT, KALLOC_TYPE_SECTION(type));
903 if (sect != NULL) {
904 unsigned long uuidlen = 0;
905 void *kext_uuid = getuuidfromheader(kext_mh, &uuidlen);
906 uuid_string_t kext_uuid_str;
907 if ((kext_uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
908 uuid_unparse_upper(*(uuid_t *)kext_uuid, kext_uuid_str);
909 }
910 kalloc_type_view_copy(type, sect->addr, sect->addr + sect->size,
911 &cur_count, false, kext_uuid_str);
912 }
913 }
914 } else if (kc_format == KCFormatKCGEN) {
915 /*
916 * Parse __kalloc_type section from xnu
917 */
918 kalloc_type_view_copy(type,
919 kalloc_type_var(type, sec_start),
920 kalloc_type_var(type, sec_end), &cur_count, false, NULL);
921
922 /*
923 * Parse __kalloc_type section for kexts
924 *
925 * Note: We don't process the kalloc_type_views for kexts on armv7
926 * as this platform has insufficient memory for type based
927 * segregation. kalloc_type_impl_external will direct callsites
928 * based on their size.
929 */
930 kernel_mach_header_t *xnu_mh = &_mh_execute_header;
931 vm_offset_t cur = 0;
932 vm_offset_t end = 0;
933
934 /*
935 * Kext machos are in the __PRELINK_TEXT segment. Extract the segment
936 * and traverse it.
937 */
938 kernel_section_t *prelink_sect = getsectbynamefromheader(
939 xnu_mh, kPrelinkTextSegment, kPrelinkTextSection);
940 assert(prelink_sect);
941 cur = prelink_sect->addr;
942 end = prelink_sect->addr + prelink_sect->size;
943
944 while (cur < end) {
945 uint64_t kext_text_sz = 0;
946 kernel_mach_header_t *kext_mh = (kernel_mach_header_t *) cur;
947
948 if (kext_mh->magic == 0) {
949 /*
950 * Assert that we have processed all kexts and all that is left
951 * is padding
952 */
953 assert(memcmp_zero_ptr_aligned((void *)kext_mh, end - cur) == 0);
954 break;
955 } else if (kext_mh->magic != MH_MAGIC_64 &&
956 kext_mh->magic != MH_CIGAM_64) {
957 panic("kalloc_type_view_parse: couldn't find kext @ offset:%lx",
958 cur);
959 }
960
961 /*
962 * Kext macho found, iterate through its segments
963 */
964 struct load_command *lc =
965 (struct load_command *)(cur + sizeof(kernel_mach_header_t));
966 bool isSplitKext = false;
967
968 for (uint32_t i = 0; i < kext_mh->ncmds && (vm_offset_t)lc < end;
969 i++, lc = (struct load_command *)((vm_offset_t)lc + lc->cmdsize)) {
970 if (lc->cmd == LC_SEGMENT_SPLIT_INFO) {
971 isSplitKext = true;
972 continue;
973 } else if (lc->cmd != LC_SEGMENT_64) {
974 continue;
975 }
976
977 kernel_segment_command_t *seg_cmd =
978 (struct segment_command_64 *)(vm_offset_t)lc;
979 /*
980 * Parse kalloc_type section
981 */
982 if (strcmp(seg_cmd->segname, KALLOC_TYPE_SEGMENT) == 0) {
983 kernel_section_t *kt_sect = getsectbynamefromseg(seg_cmd,
984 KALLOC_TYPE_SEGMENT, KALLOC_TYPE_SECTION(type));
985 if (kt_sect) {
986 kalloc_type_view_copy(type, kt_sect->addr + vm_kernel_slide,
987 kt_sect->addr + kt_sect->size + vm_kernel_slide, &cur_count,
988 true, NULL);
989 }
990 }
991 /*
992 * If the kext has a __TEXT segment, that is the only thing that
993 * will be in the special __PRELINK_TEXT KC segment, so the next
994 * macho is right after.
995 */
996 if (strcmp(seg_cmd->segname, "__TEXT") == 0) {
997 kext_text_sz = seg_cmd->filesize;
998 }
999 }
1000 /*
1001 * If the kext did not have a __TEXT segment (special xnu kexts with
1002 * only a __LINKEDIT segment) then the next macho will be after all the
1003 * header commands.
1004 */
1005 if (!kext_text_sz) {
1006 kext_text_sz = kext_mh->sizeofcmds;
1007 } else if (!isSplitKext) {
1008 panic("kalloc_type_view_parse: No support for non-split seg KCs");
1009 break;
1010 }
1011
1012 cur += ((kext_text_sz + (KEXT_ALIGN_BYTES - 1)) & (~KEXT_ALIGN_MASK));
1013 }
1014 } else {
1015 /*
1016 * When kc_format is KCFormatDynamic or KCFormatUnknown, we don't handle
1017 * parsing kalloc_type_view structs during startup.
1018 */
1019 panic("kalloc_type_view_parse: couldn't parse kalloc_type_view structs"
1020 " for kc_format = %d\n", kc_format);
1021 }
1022 return cur_count;
1023 }
1024
1025 __startup_func
1026 static int
kalloc_type_cmp_fixed(const void * a,const void * b)1027 kalloc_type_cmp_fixed(const void *a, const void *b)
1028 {
1029 const kalloc_type_view_t ktA = *(const kalloc_type_view_t *)a;
1030 const kalloc_type_view_t ktB = *(const kalloc_type_view_t *)b;
1031
1032 const uint16_t idxA = kalloc_type_get_idx(ktA->kt_size);
1033 const uint16_t idxB = kalloc_type_get_idx(ktB->kt_size);
1034 /*
1035 * If the kalloc_type_views are in the same kalloc bucket, sort by
1036 * signature else sort by size
1037 */
1038 if (idxA == idxB) {
1039 int result = strcmp(ktA->kt_signature, ktB->kt_signature);
1040 /*
1041 * If the kalloc_type_views have the same signature sort by site
1042 * name
1043 */
1044 if (result == 0) {
1045 return strcmp(ktA->kt_zv.zv_name, ktB->kt_zv.zv_name);
1046 }
1047 return result;
1048 }
1049 const uint32_t sizeA = kalloc_type_get_size(ktA->kt_size);
1050 const uint32_t sizeB = kalloc_type_get_size(ktB->kt_size);
1051 return (int)(sizeA - sizeB);
1052 }
1053
1054 __startup_func
1055 static int
kalloc_type_cmp_var(const void * a,const void * b)1056 kalloc_type_cmp_var(const void *a, const void *b)
1057 {
1058 const kalloc_type_var_view_t ktA = *(const kalloc_type_var_view_t *)a;
1059 const kalloc_type_var_view_t ktB = *(const kalloc_type_var_view_t *)b;
1060
1061 const char *ktA_hdr = ktA->kt_sig_hdr ?: "";
1062 const char *ktB_hdr = ktB->kt_sig_hdr ?: "";
1063
1064 int result = strcmp(ktA->kt_sig_type, ktB->kt_sig_type);
1065 if (result == 0) {
1066 return strcmp(ktA_hdr, ktB_hdr);
1067 }
1068 return result;
1069 }
1070
1071 __startup_func
1072 static uint16_t *
kalloc_type_create_iterators_fixed(uint16_t * kt_skip_list_start,uint16_t * kt_freq_list,uint16_t * kt_freq_list_total,uint64_t count)1073 kalloc_type_create_iterators_fixed(uint16_t *kt_skip_list_start,
1074 uint16_t *kt_freq_list, uint16_t *kt_freq_list_total, uint64_t count)
1075 {
1076 uint16_t *kt_skip_list = kt_skip_list_start;
1077 /*
1078 * cur and prev kalloc size bucket
1079 */
1080 uint16_t p_idx = 0;
1081 uint16_t c_idx = 0;
1082
1083 /*
1084 * Init values
1085 */
1086 uint16_t unique_sig = 1;
1087 uint16_t total_sig = 0;
1088 kt_skip_list++;
1089 const char *p_sig = "";
1090 const char *p_name = "";
1091
1092 /*
1093 * Walk over each kalloc_type_view
1094 */
1095 for (uint16_t i = 0; i < count; i++) {
1096 kalloc_type_view_t kt = kt_buffer[i].ktv_fixed;
1097 c_idx = kalloc_type_get_idx(kt->kt_size);
1098 /*
1099 * When current kalloc_type_view is in a different kalloc size
1100 * bucket than the previous, it means we have processed all in
1101 * the previous size bucket, so store the accumulated values
1102 * and advance the indices.
1103 */
1104 if (c_idx != p_idx) {
1105 /*
1106 * Updates for frequency lists
1107 */
1108 kt_freq_list[p_idx] = unique_sig;
1109 unique_sig = 1;
1110 kt_freq_list_total[p_idx] = total_sig;
1111 total_sig = 1;
1112 p_idx = c_idx;
1113
1114 /*
1115 * Updates to signature skip list
1116 */
1117 *kt_skip_list = i;
1118 kt_skip_list++;
1119 p_sig = kt->kt_signature;
1120 continue;
1121 }
1122
1123 /*
1124 * When current kalloc_type_views is in the kalloc size bucket as
1125 * previous, analyze the siganture to see if it is unique.
1126 *
1127 * Signatures are collapsible if one is a substring of the next.
1128 */
1129 const char *c_sig = kt->kt_signature;
1130 if (strncmp(c_sig, p_sig, strlen(p_sig)) != 0) {
1131 /*
1132 * Unique signature detected. Update counts and advance index
1133 */
1134 unique_sig++;
1135 *kt_skip_list = i;
1136 kt_skip_list++;
1137 }
1138
1139 /*
1140 * Check if current kalloc_type_view corresponds to a new type
1141 */
1142 const char *c_name = kt->kt_zv.zv_name;
1143 if (strlen(p_name) != strlen(c_name) || strcmp(p_name, c_name) != 0) {
1144 total_sig++;
1145 }
1146 p_name = c_name;
1147 p_sig = c_sig;
1148 }
1149 /*
1150 * Final update
1151 */
1152 assert(c_idx == p_idx);
1153 assert(kt_freq_list[c_idx] == 0);
1154 kt_freq_list[c_idx] = unique_sig;
1155 kt_freq_list_total[c_idx] = (uint16_t) total_sig;
1156 *kt_skip_list = (uint16_t) count;
1157 return ++kt_skip_list;
1158 }
1159
1160 __startup_func
1161 static uint32_t
kalloc_type_create_iterators_var(uint32_t * kt_skip_list_start)1162 kalloc_type_create_iterators_var(uint32_t *kt_skip_list_start)
1163 {
1164 uint32_t *kt_skip_list = kt_skip_list_start;
1165 uint32_t n = 0;
1166 kt_skip_list[n] = 0;
1167 assert(kt_count > 1);
1168 for (uint32_t i = 1; i < kt_count; i++) {
1169 kalloc_type_var_view_t ktA = kt_buffer[i - 1].ktv_var;
1170 kalloc_type_var_view_t ktB = kt_buffer[i].ktv_var;
1171 const char *ktA_hdr = ktA->kt_sig_hdr ?: "";
1172 const char *ktB_hdr = ktB->kt_sig_hdr ?: "";
1173 if (strcmp(ktA_hdr, ktB_hdr) != 0 ||
1174 strcmp(ktA->kt_sig_type, ktB->kt_sig_type) != 0) {
1175 n++;
1176 kt_skip_list[n] = i;
1177 }
1178 }
1179 /*
1180 * Final update
1181 */
1182 n++;
1183 kt_skip_list[n] = (uint32_t) kt_count;
1184 return n;
1185 }
1186
1187 __startup_func
1188 static uint16_t
kalloc_type_apply_policy(uint16_t * kt_freq_list,uint16_t * kt_zones,uint16_t zone_budget)1189 kalloc_type_apply_policy(uint16_t *kt_freq_list, uint16_t *kt_zones,
1190 uint16_t zone_budget)
1191 {
1192 uint16_t total_sig = 0;
1193 uint16_t min_sig = 0;
1194 uint16_t assigned_zones = 0;
1195 uint16_t remaining_zones = zone_budget;
1196 uint16_t min_zones_per_size = 2;
1197
1198 #if DEBUG || DEVELOPMENT
1199 if (startup_phase < STARTUP_SUB_LOCKDOWN) {
1200 uint16_t current_zones = os_atomic_load(&num_zones, relaxed);
1201 assert(zone_budget + current_zones <= MAX_ZONES);
1202 }
1203 #endif
1204
1205 for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
1206 uint16_t sig_freq = kt_freq_list[i];
1207 uint16_t min_zones = min_zones_per_size;
1208 if (sig_freq < min_zones_per_size) {
1209 min_zones = sig_freq;
1210 }
1211 total_sig += sig_freq;
1212 kt_zones[i] = min_zones;
1213 min_sig += min_zones;
1214 }
1215 if (remaining_zones > total_sig) {
1216 remaining_zones = total_sig;
1217 }
1218 assert(remaining_zones >= min_sig);
1219 remaining_zones -= min_sig;
1220 total_sig -= min_sig;
1221 assigned_zones += min_sig;
1222 uint16_t modulo = 0;
1223 for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
1224 uint16_t freq = kt_freq_list[i];
1225 if (freq < min_zones_per_size) {
1226 continue;
1227 }
1228 uint32_t numer = (freq - min_zones_per_size) * remaining_zones;
1229 uint16_t n_zones = (uint16_t) numer / total_sig;
1230
1231 /*
1232 * Accumulate remainder and increment n_zones when it goes above
1233 * denominator
1234 */
1235 modulo += numer % total_sig;
1236 if (modulo >= total_sig) {
1237 n_zones++;
1238 modulo -= total_sig;
1239 }
1240
1241 /*
1242 * Cap the total number of zones to the unique signatures
1243 */
1244 if ((n_zones + min_zones_per_size) > freq) {
1245 uint16_t extra_zones = n_zones + min_zones_per_size - freq;
1246 modulo += (extra_zones * total_sig);
1247 n_zones -= extra_zones;
1248 }
1249 kt_zones[i] += n_zones;
1250 assigned_zones += n_zones;
1251 }
1252
1253 if (kt_options & KT_OPTIONS_DEBUG) {
1254 printf("kalloc_type_apply_policy: assigned %u zones wasted %u zones\n",
1255 assigned_zones, remaining_zones + min_sig - assigned_zones);
1256 }
1257 return remaining_zones + min_sig - assigned_zones;
1258 }
1259
1260 __startup_func
1261 static void
kalloc_type_create_zone_for_size(zone_t * kt_zones_for_size,uint16_t kt_zones,vm_size_t z_size)1262 kalloc_type_create_zone_for_size(zone_t *kt_zones_for_size,
1263 uint16_t kt_zones, vm_size_t z_size)
1264 {
1265 zone_t p_zone = NULL;
1266
1267 for (uint16_t i = 0; i < kt_zones; i++) {
1268 char *z_name = zalloc_permanent(MAX_ZONE_NAME, ZALIGN_NONE);
1269 snprintf(z_name, MAX_ZONE_NAME, "kalloc.type%u.%zu", i,
1270 (size_t) z_size);
1271 zone_t z = zone_create(z_name, z_size, ZC_KALLOC_TYPE);
1272 if (i != 0) {
1273 p_zone->z_kt_next = z;
1274 }
1275 p_zone = z;
1276 kt_zones_for_size[i] = z;
1277 }
1278 }
1279
1280 __startup_func
1281 static void
kalloc_type_create_zones_fixed(uint16_t * kt_skip_list_start,uint16_t * kt_freq_list,uint16_t * kt_freq_list_total,uint16_t * kt_shuffle_buf)1282 kalloc_type_create_zones_fixed(uint16_t *kt_skip_list_start,
1283 uint16_t *kt_freq_list, uint16_t *kt_freq_list_total,
1284 uint16_t *kt_shuffle_buf)
1285 {
1286 uint16_t *kt_skip_list = kt_skip_list_start;
1287 uint16_t p_j = 0;
1288
1289 uint16_t kt_zones[MAX_K_ZONE(kt_zone_cfg)] = {};
1290
1291 #if DEBUG || DEVELOPMENT
1292 uint64_t kt_shuffle_count = ((vm_address_t) kt_shuffle_buf -
1293 (vm_address_t) kt_buffer) / sizeof(uint16_t);
1294 #endif
1295 /*
1296 * Apply policy to determine how many zones to create for each size
1297 * class.
1298 */
1299 kalloc_type_apply_policy(kt_freq_list, kt_zones,
1300 ZSECURITY_CONFIG_KT_BUDGET);
1301 /*
1302 * Print stats when KT_OPTIONS_DEBUG boot-arg present
1303 */
1304 if (kt_options & KT_OPTIONS_DEBUG) {
1305 printf("Size\ttotal_sig\tunique_signatures\tzones\n");
1306 for (uint32_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
1307 printf("%u\t%u\t%u\t%u\n", kt_zone_cfg[i],
1308 kt_freq_list_total[i], kt_freq_list[i], kt_zones[i]);
1309 }
1310 }
1311
1312 for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
1313 uint16_t n_unique_sig = kt_freq_list[i];
1314 vm_size_t z_size = kt_zone_cfg[i];
1315 uint16_t n_zones = kt_zones[i];
1316
1317 if (n_unique_sig == 0) {
1318 continue;
1319 }
1320
1321 assert(n_zones <= 20);
1322 zone_t kt_zones_for_size[20] = {};
1323 kalloc_type_create_zone_for_size(kt_zones_for_size,
1324 n_zones, z_size);
1325
1326 kalloc_type_zarray[i] = kt_zones_for_size[0];
1327 /*
1328 * Ensure that there is enough space to shuffle n_unique_sig
1329 * indices
1330 */
1331 assert(n_unique_sig < kt_shuffle_count);
1332
1333 /*
1334 * Get a shuffled set of signature indices
1335 */
1336 *kt_shuffle_buf = 0;
1337 if (n_unique_sig > 1) {
1338 kmem_shuffle(kt_shuffle_buf, n_unique_sig);
1339 }
1340
1341 for (uint16_t j = 0; j < n_unique_sig; j++) {
1342 /*
1343 * For every size that has unique types
1344 */
1345 uint16_t shuffle_idx = kt_shuffle_buf[j];
1346 uint16_t cur = kt_skip_list[shuffle_idx + p_j];
1347 uint16_t end = kt_skip_list[shuffle_idx + p_j + 1];
1348 zone_t zone = kt_zones_for_size[j % n_zones];
1349 kalloc_type_assign_zone_fixed(&kt_buffer[cur].ktv_fixed,
1350 &kt_buffer[end].ktv_fixed, zone);
1351 }
1352 p_j += n_unique_sig;
1353 }
1354 }
1355
1356 __startup_func
1357 static void
kalloc_type_view_init_fixed(void)1358 kalloc_type_view_init_fixed(void)
1359 {
1360 kalloc_type_hash_seed = (uint32_t) early_random();
1361 kalloc_type_build_dlut();
1362 /*
1363 * Parse __kalloc_type sections and build array of pointers to
1364 * all kalloc type views in kt_buffer.
1365 */
1366 kt_count = kalloc_type_view_parse(KTV_FIXED);
1367 assert(kt_count < KALLOC_TYPE_SIZE_MASK);
1368
1369 #if DEBUG || DEVELOPMENT
1370 vm_size_t sig_slist_size = (size_t) kt_count * sizeof(uint16_t);
1371 vm_size_t kt_buffer_size = (size_t) kt_count * sizeof(kalloc_type_view_t);
1372 assert(kt_scratch_size >= kt_buffer_size + sig_slist_size);
1373 #endif
1374
1375 /*
1376 * Sort based on size class and signature
1377 */
1378 qsort(kt_buffer, (size_t) kt_count, sizeof(kalloc_type_view_t),
1379 kalloc_type_cmp_fixed);
1380
1381 /*
1382 * Build a skip list that holds starts of unique signatures and a
1383 * frequency list of number of unique and total signatures per kalloc
1384 * size class
1385 */
1386 uint16_t *kt_skip_list_start = (uint16_t *)(kt_buffer + kt_count);
1387 uint16_t kt_freq_list[MAX_K_ZONE(kt_zone_cfg)] = { 0 };
1388 uint16_t kt_freq_list_total[MAX_K_ZONE(kt_zone_cfg)] = { 0 };
1389 uint16_t *kt_shuffle_buf = kalloc_type_create_iterators_fixed(
1390 kt_skip_list_start, kt_freq_list, kt_freq_list_total, kt_count);
1391
1392 /*
1393 * Create zones based on signatures
1394 */
1395 kalloc_type_create_zones_fixed(kt_skip_list_start, kt_freq_list,
1396 kt_freq_list_total, kt_shuffle_buf);
1397 }
1398
1399 __startup_func
1400 static void
kalloc_type_heap_init(void)1401 kalloc_type_heap_init(void)
1402 {
1403 assert(kt_var_heaps + 1 <= KT_VAR_MAX_HEAPS);
1404 char kh_name[MAX_ZONE_NAME];
1405
1406 for (uint32_t i = KT_VAR_PTR_HEAP; i < KT_VAR_PTR_HEAP + kt_var_heaps; i++) {
1407 snprintf(&kh_name[0], MAX_ZONE_NAME, "%s%u", KHEAP_KT_VAR->kh_name, i);
1408 kalloc_zone_init((const char *)&kh_name[0], KHEAP_ID_KT_VAR,
1409 &kalloc_type_heap_array[i].kh_zstart, ZC_KALLOC_TYPE);
1410 }
1411 /*
1412 * All variable kalloc type allocations are collapsed into a single
1413 * stat. Individual accounting can be requested via KT_PRIV_ACCT
1414 */
1415 KHEAP_KT_VAR->kh_stats = zalloc_percpu_permanent_type(struct zone_stats);
1416 zone_view_count += 1;
1417 }
1418
1419 __startup_func
1420 static void
kalloc_type_view_init_var(void)1421 kalloc_type_view_init_var(void)
1422 {
1423 /*
1424 * Zones are created prior to parsing the views as zone budget is fixed
1425 * per sizeclass and special types identified while parsing are redirected
1426 * as they are discovered.
1427 */
1428 kalloc_type_heap_init();
1429
1430 /*
1431 * Parse __kalloc_var sections and build array of pointers to views that
1432 * aren't rediected in kt_buffer.
1433 */
1434 kt_count = kalloc_type_view_parse(KTV_VAR);
1435 assert(kt_count < UINT32_MAX);
1436
1437 #if DEBUG || DEVELOPMENT
1438 vm_size_t sig_slist_size = (size_t) kt_count * sizeof(uint32_t);
1439 vm_size_t kt_buffer_size = (size_t) kt_count * sizeof(kalloc_type_views_t);
1440 assert(kt_scratch_size >= kt_buffer_size + sig_slist_size);
1441 #endif
1442
1443 /*
1444 * Sort based on size class and signature
1445 */
1446 qsort(kt_buffer, (size_t) kt_count, sizeof(kalloc_type_var_view_t),
1447 kalloc_type_cmp_var);
1448
1449 /*
1450 * Build a skip list that holds starts of unique signatures
1451 */
1452 uint32_t *kt_skip_list_start = (uint32_t *)(kt_buffer + kt_count);
1453 uint32_t unique_sig = kalloc_type_create_iterators_var(kt_skip_list_start);
1454 uint16_t fixed_heaps = KT_VAR__FIRST_FLEXIBLE_HEAP;
1455 /*
1456 * If we have only one heap then other elements share heap with pointer
1457 * arrays
1458 */
1459 if (kt_var_heaps < KT_VAR__FIRST_FLEXIBLE_HEAP) {
1460 fixed_heaps = KT_VAR_PTR_HEAP;
1461 }
1462
1463 for (uint32_t i = 1; i <= unique_sig; i++) {
1464 uint32_t heap_id = kmem_get_random16(kt_var_heaps - fixed_heaps) +
1465 fixed_heaps;
1466 uint32_t start = kt_skip_list_start[i - 1];
1467 uint32_t end = kt_skip_list_start[i];
1468 kalloc_type_assign_zone_var(&kt_buffer[start].ktv_var,
1469 &kt_buffer[end].ktv_var, heap_id);
1470 }
1471 }
1472
1473 __startup_func
1474 static void
kalloc_init(void)1475 kalloc_init(void)
1476 {
1477 /*
1478 * Allocate scratch space to parse kalloc_type_views and create
1479 * other structures necessary to process them.
1480 */
1481 uint64_t max_count = kt_count = kt_scratch_size / sizeof(kalloc_type_views_t);
1482
1483 static_assert(KHEAP_MAX_SIZE >= KALLOC_SAFE_ALLOC_SIZE);
1484 kalloc_zsize_compute();
1485
1486 /* Initialize kalloc default heap */
1487 kalloc_heap_init(KHEAP_DEFAULT);
1488
1489 /* Initialize kalloc data buffers heap */
1490 kalloc_heap_init(KHEAP_DATA_BUFFERS);
1491
1492 kmem_alloc(kernel_map, (vm_offset_t *)&kt_buffer, kt_scratch_size,
1493 KMA_NOFAIL | KMA_ZERO | KMA_KOBJECT, VM_KERN_MEMORY_KALLOC);
1494
1495 /*
1496 * Handle fixed size views
1497 */
1498 kalloc_type_view_init_fixed();
1499
1500 /*
1501 * Reset
1502 */
1503 bzero(kt_buffer, kt_scratch_size);
1504 kt_count = max_count;
1505
1506 /*
1507 * Handle variable size views
1508 */
1509 kalloc_type_view_init_var();
1510
1511 /*
1512 * Free resources used
1513 */
1514 kmem_free(kernel_map, (vm_offset_t) kt_buffer, kt_scratch_size);
1515 }
1516 STARTUP(ZALLOC, STARTUP_RANK_THIRD, kalloc_init);
1517
1518 #pragma mark accessors
1519
1520 #define KFREE_ABSURD_SIZE \
1521 ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_AND_KEXT_ADDRESS) / 2)
1522
1523 static void
KALLOC_ZINFO_SALLOC(vm_size_t bytes)1524 KALLOC_ZINFO_SALLOC(vm_size_t bytes)
1525 {
1526 thread_t thr = current_thread();
1527 ledger_debit_thread(thr, thr->t_ledger, task_ledgers.tkm_shared, bytes);
1528 }
1529
1530 static void
KALLOC_ZINFO_SFREE(vm_size_t bytes)1531 KALLOC_ZINFO_SFREE(vm_size_t bytes)
1532 {
1533 thread_t thr = current_thread();
1534 ledger_credit_thread(thr, thr->t_ledger, task_ledgers.tkm_shared, bytes);
1535 }
1536
1537 static kmem_guard_t
kalloc_guard(vm_tag_t tag,uint16_t type_hash,const void * owner)1538 kalloc_guard(vm_tag_t tag, uint16_t type_hash, const void *owner)
1539 {
1540 kmem_guard_t guard = {
1541 .kmg_atomic = true,
1542 .kmg_tag = tag,
1543 .kmg_type_hash = type_hash,
1544 .kmg_context = os_hash_kernel_pointer(owner),
1545 };
1546
1547 /*
1548 * TODO: this use is really not sufficiently smart.
1549 */
1550
1551 return guard;
1552 }
1553
1554 #if __arm64e__ || KASAN_TBI
1555
1556 #if __arm64e__
1557 #define KALLOC_ARRAY_TYPE_SHIFT (64 - T1SZ_BOOT - 1)
1558
1559 /*
1560 * Zone encoding is:
1561 *
1562 * <PAC SIG><1><1><PTR value><5 bits of size class>
1563 *
1564 * VM encoding is:
1565 *
1566 * <PAC SIG><1><0><PTR value><14 bits of page count>
1567 *
1568 * The <1> is precisely placed so that <PAC SIG><1> is T1SZ worth of bits,
1569 * so that PAC authentication extends the proper sign bit.
1570 */
1571
1572 static_assert(T1SZ_BOOT + 1 + VM_KERNEL_POINTER_SIGNIFICANT_BITS <= 64);
1573 #else
1574 #define KALLOC_ARRAY_TYPE_SHIFT (64 - 8 - 1)
1575
1576 /*
1577 * Zone encoding is:
1578 *
1579 * <TBI><1><PTR value><5 bits of size class>
1580 *
1581 * VM encoding is:
1582 *
1583 * <TBI><0><PTR value><14 bits of page count>
1584 */
1585
1586 static_assert(8 + 1 + 1 + VM_KERNEL_POINTER_SIGNIFICANT_BITS <= 64);
1587 #endif
1588
1589 SECURITY_READ_ONLY_LATE(uint32_t) kalloc_array_type_shift = KALLOC_ARRAY_TYPE_SHIFT;
1590
1591 __attribute__((always_inline))
1592 struct kalloc_result
__kalloc_array_decode(vm_address_t ptr)1593 __kalloc_array_decode(vm_address_t ptr)
1594 {
1595 struct kalloc_result kr;
1596 vm_address_t zone_mask = 1ul << KALLOC_ARRAY_TYPE_SHIFT;
1597
1598 if (ptr & zone_mask) {
1599 kr.size = (32 + (ptr & 0x10)) << (ptr & 0xf);
1600 ptr &= ~0x1full;
1601 } else if (__probable(ptr)) {
1602 kr.size = (ptr & PAGE_MASK) << PAGE_SHIFT;
1603 ptr &= ~PAGE_MASK;
1604 ptr |= zone_mask;
1605 } else {
1606 kr.size = 0;
1607 }
1608
1609 kr.addr = (void *)ptr;
1610 return kr;
1611 }
1612
1613 static inline void *
__kalloc_array_encode_zone(zone_t z,void * ptr,vm_size_t size __unused)1614 __kalloc_array_encode_zone(zone_t z, void *ptr, vm_size_t size __unused)
1615 {
1616 return (void *)((vm_address_t)ptr | z->z_array_size_class);
1617 }
1618
1619 static inline vm_address_t
__kalloc_array_encode_vm(vm_address_t addr,vm_size_t size)1620 __kalloc_array_encode_vm(vm_address_t addr, vm_size_t size)
1621 {
1622 addr &= ~(0x1ull << KALLOC_ARRAY_TYPE_SHIFT);
1623
1624 return addr | atop(size);
1625 }
1626
1627 #else
1628
1629 SECURITY_READ_ONLY_LATE(uint32_t) kalloc_array_type_shift = 0;
1630
1631 /*
1632 * Encoding is:
1633 * bits 0..46: pointer value
1634 * bits 47..47: 0: zones, 1: VM
1635 * bits 48..63: zones: elem size, VM: number of pages
1636 */
1637
1638 #define KALLOC_ARRAY_TYPE_BIT 47
1639 static_assert(KALLOC_ARRAY_TYPE_BIT > VM_KERNEL_POINTER_SIGNIFICANT_BITS + 1);
1640 static_assert(__builtin_clzll(KHEAP_MAX_SIZE) > KALLOC_ARRAY_TYPE_BIT);
1641
1642 __attribute__((always_inline))
1643 struct kalloc_result
__kalloc_array_decode(vm_address_t ptr)1644 __kalloc_array_decode(vm_address_t ptr)
1645 {
1646 struct kalloc_result kr;
1647 uint32_t shift = 64 - KALLOC_ARRAY_TYPE_BIT;
1648
1649 kr.size = ptr >> (KALLOC_ARRAY_TYPE_BIT + 1);
1650 if (ptr & (1ull << KALLOC_ARRAY_TYPE_BIT)) {
1651 kr.size <<= PAGE_SHIFT;
1652 }
1653 /* sign extend, so that it also works with NULL */
1654 kr.addr = (void *)((long)(ptr << shift) >> shift);
1655
1656 return kr;
1657 }
1658
1659 static inline void *
__kalloc_array_encode_zone(zone_t z __unused,void * ptr,vm_size_t size)1660 __kalloc_array_encode_zone(zone_t z __unused, void *ptr, vm_size_t size)
1661 {
1662 vm_address_t addr = (vm_address_t)ptr;
1663
1664 addr &= (1ull << KALLOC_ARRAY_TYPE_BIT) - 1; /* clear bit */
1665 addr |= size << (KALLOC_ARRAY_TYPE_BIT + 1);
1666
1667 return (void *)addr;
1668 }
1669
1670 static inline vm_address_t
__kalloc_array_encode_vm(vm_address_t addr,vm_size_t size)1671 __kalloc_array_encode_vm(vm_address_t addr, vm_size_t size)
1672 {
1673 addr &= (2ull << KALLOC_ARRAY_TYPE_BIT) - 1; /* keep bit */
1674 addr |= size << (KALLOC_ARRAY_TYPE_BIT + 1 - PAGE_SHIFT);
1675
1676 return addr;
1677 }
1678
1679 #endif
1680
1681 vm_size_t
kalloc_next_good_size(vm_size_t size,uint32_t period)1682 kalloc_next_good_size(vm_size_t size, uint32_t period)
1683 {
1684 uint32_t scale = kalloc_log2down((uint32_t)size);
1685 vm_size_t step, size_class;
1686
1687 if (size < KHEAP_STEP_START) {
1688 return KHEAP_STEP_START;
1689 }
1690 if (size < 2 * KHEAP_STEP_START) {
1691 return 2 * KHEAP_STEP_START;
1692 }
1693
1694 if (size < KHEAP_MAX_SIZE) {
1695 step = 1ul << (scale - 1);
1696 } else {
1697 step = round_page(1ul << (scale - kalloc_log2down(period)));
1698 }
1699
1700 size_class = (size + step) & -step;
1701 #if KASAN_CLASSIC
1702 if (size > K_SIZE_CLASS(size_class)) {
1703 return kalloc_next_good_size(size_class, period);
1704 }
1705 size_class = K_SIZE_CLASS(size_class);
1706 #endif
1707 return size_class;
1708 }
1709
1710
1711 #pragma mark kalloc
1712
1713 static inline kalloc_heap_t
kalloc_type_get_heap(kalloc_type_var_view_t kt_view,bool kt_free __unused)1714 kalloc_type_get_heap(kalloc_type_var_view_t kt_view, bool kt_free __unused)
1715 {
1716 kalloc_heap_t fallback = KHEAP_DEFAULT;
1717
1718 /*
1719 * Redirect data-only views
1720 */
1721 if (kalloc_type_is_data(kt_view->kt_flags)) {
1722 return KHEAP_DATA_BUFFERS;
1723 }
1724
1725 if (kt_view->kt_flags & KT_PROCESSED) {
1726 return KHEAP_KT_VAR;
1727 }
1728
1729 /*
1730 * Views from kexts not in BootKC on macOS
1731 */
1732 #if !ZSECURITY_CONFIG(STRICT_IOKIT_FREE)
1733 if (kt_free) {
1734 fallback = KHEAP_ANY;
1735 }
1736 #endif
1737
1738 return fallback;
1739 }
1740
1741 __attribute__((noinline))
1742 static struct kalloc_result
kalloc_large(kalloc_heap_t kheap,vm_size_t req_size,zalloc_flags_t flags,uint16_t kt_hash,void * owner __unused)1743 kalloc_large(
1744 kalloc_heap_t kheap,
1745 vm_size_t req_size,
1746 zalloc_flags_t flags,
1747 uint16_t kt_hash,
1748 void *owner __unused)
1749 {
1750 kma_flags_t kma_flags = KMA_KASAN_GUARD | KMA_TAG;
1751 vm_tag_t tag;
1752 vm_offset_t addr, size;
1753
1754 if (flags & Z_NOFAIL) {
1755 panic("trying to kalloc(Z_NOFAIL) with a large size (%zd)",
1756 (size_t)req_size);
1757 }
1758
1759 /*
1760 * kmem_alloc could block so we return if noblock
1761 *
1762 * also, reject sizes larger than our address space is quickly,
1763 * as kt_size or IOMallocArraySize() expect this.
1764 */
1765 if ((flags & Z_NOWAIT) ||
1766 (req_size >> VM_KERNEL_POINTER_SIGNIFICANT_BITS)) {
1767 return (struct kalloc_result){ };
1768 }
1769
1770 if ((flags & Z_KALLOC_ARRAY) && req_size > KALLOC_ARRAY_SIZE_MAX) {
1771 return (struct kalloc_result){ };
1772 }
1773
1774 /*
1775 * (73465472) on Intel we didn't use to pass this flag,
1776 * which in turned allowed kalloc_large() memory to be shared
1777 * with user directly.
1778 *
1779 * We're bound by this unfortunate ABI.
1780 */
1781 if ((flags & Z_MAY_COPYINMAP) == 0) {
1782 #ifndef __x86_64__
1783 kma_flags |= KMA_KOBJECT;
1784 #endif
1785 } else {
1786 assert(kheap == KHEAP_DATA_BUFFERS);
1787 kma_flags &= ~KMA_TAG;
1788 }
1789 if (flags & Z_NOPAGEWAIT) {
1790 kma_flags |= KMA_NOPAGEWAIT;
1791 }
1792 if (flags & Z_ZERO) {
1793 kma_flags |= KMA_ZERO;
1794 }
1795 if (kheap == KHEAP_DATA_BUFFERS) {
1796 kma_flags |= KMA_DATA;
1797 } else if (flags & (Z_KALLOC_ARRAY | Z_SPRAYQTN)) {
1798 kma_flags |= KMA_SPRAYQTN;
1799 }
1800
1801 tag = zalloc_flags_get_tag(flags);
1802 if (flags & Z_VM_TAG_BT_BIT) {
1803 tag = vm_tag_bt() ?: tag;
1804 }
1805 if (tag == VM_KERN_MEMORY_NONE) {
1806 tag = kheap->kh_tag;
1807 }
1808
1809 size = round_page(req_size);
1810 if (flags & (Z_FULLSIZE | Z_KALLOC_ARRAY)) {
1811 req_size = round_page(size);
1812 }
1813
1814 addr = kmem_alloc_guard(kernel_map, req_size, 0,
1815 kma_flags, kalloc_guard(tag, kt_hash, owner)).kmr_address;
1816
1817 if (addr != 0) {
1818 counter_inc(&kalloc_large_count);
1819 counter_add(&kalloc_large_total, size);
1820 KALLOC_ZINFO_SALLOC(size);
1821 if (flags & Z_KALLOC_ARRAY) {
1822 addr = __kalloc_array_encode_vm(addr, req_size);
1823 }
1824 } else {
1825 addr = 0;
1826 }
1827
1828 DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, req_size, void*, addr);
1829 return (struct kalloc_result){ .addr = (void *)addr, .size = req_size };
1830 }
1831
1832 static inline struct kalloc_result
kalloc_zone(zone_t z,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t req_size)1833 kalloc_zone(
1834 zone_t z,
1835 zone_stats_t zstats,
1836 zalloc_flags_t flags,
1837 vm_size_t req_size)
1838 {
1839 struct kalloc_result kr;
1840 vm_size_t esize;
1841
1842 kr = zalloc_ext(z, zstats ?: z->z_stats, flags | Z_SKIP_KASAN);
1843 esize = kr.size;
1844
1845 if (__probable(kr.addr)) {
1846 if (flags & (Z_FULLSIZE | Z_KALLOC_ARRAY)) {
1847 req_size = esize;
1848 } else {
1849 kr.size = req_size;
1850 }
1851 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
1852 kr.addr = zone_element_pgz_oob_adjust(kr.addr, req_size, esize);
1853 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
1854 #if KASAN_CLASSIC
1855 kasan_alloc((vm_offset_t)kr.addr, esize, kr.size,
1856 KASAN_GUARD_SIZE, false, __builtin_frame_address(0));
1857 #endif /* KASAN_CLASSIC */
1858 #if KASAN_TBI
1859 /*
1860 * Kasan-TBI at least needs to tag one byte so that
1861 * we can prove the allocation was live at kfree_ext()
1862 * time by doing a manual __asan_loadN check.
1863 */
1864 kr.addr = (void *)kasan_tbi_tag_zalloc((vm_offset_t)kr.addr,
1865 esize, kr.size ?: 1, false);
1866 #endif /* KASAN_TBI */
1867
1868 if (flags & Z_KALLOC_ARRAY) {
1869 kr.addr = __kalloc_array_encode_zone(z, kr.addr, kr.size);
1870 }
1871 }
1872
1873 DTRACE_VM3(kalloc, vm_size_t, req_size, vm_size_t, kr.size, void*, kr.addr);
1874 return kr;
1875 }
1876
1877 #undef kalloc_ext
1878
1879 struct kalloc_result
kalloc_ext(void * kheap_or_kt_view,vm_size_t size,zalloc_flags_t flags,void * owner)1880 kalloc_ext(
1881 void *kheap_or_kt_view,
1882 vm_size_t size,
1883 zalloc_flags_t flags,
1884 void *owner)
1885 {
1886 kalloc_type_var_view_t kt_view;
1887 kalloc_heap_t kheap;
1888 zone_stats_t zstats = NULL;
1889 zone_t z;
1890 uint16_t kt_hash;
1891 zone_id_t zstart;
1892
1893 if (kt_is_var_view(kheap_or_kt_view)) {
1894 kt_view = kt_demangle_var_view(kheap_or_kt_view);
1895 kheap = kalloc_type_get_heap(kt_view, false);
1896 /*
1897 * Use stats from view if present, else use stats from kheap.
1898 * KHEAP_KT_VAR accumulates stats for all allocations going to
1899 * kalloc.type.var zones, while KHEAP_DEFAULT and KHEAP_DATA_BUFFERS
1900 * use stats from the respective zones.
1901 */
1902 zstats = kt_view->kt_stats;
1903 kt_hash = (uint16_t) KT_GET_HASH(kt_view->kt_flags);
1904 zstart = kt_view->kt_heap_start ?: kheap->kh_zstart;
1905 } else {
1906 kt_view = NULL;
1907 kheap = kheap_or_kt_view;
1908 kt_hash = kheap->kh_type_hash;
1909 zstart = kheap->kh_zstart;
1910 }
1911
1912 if (!zstats) {
1913 zstats = kheap->kh_stats;
1914 }
1915
1916 z = kalloc_zone_for_size_with_flags(zstart, size, flags);
1917 if (z) {
1918 return kalloc_zone(z, zstats, flags, size);
1919 } else {
1920 return kalloc_large(kheap, size, flags, kt_hash, owner);
1921 }
1922 }
1923
1924 #if XNU_PLATFORM_MacOSX
1925 void *
1926 kalloc_external(vm_size_t size);
1927 void *
kalloc_external(vm_size_t size)1928 kalloc_external(vm_size_t size)
1929 {
1930 zalloc_flags_t flags = Z_VM_TAG_BT(Z_WAITOK, VM_KERN_MEMORY_KALLOC);
1931 return kheap_alloc(KHEAP_DEFAULT, size, flags);
1932 }
1933 #endif /* XNU_PLATFORM_MacOSX */
1934
1935 void *
1936 kalloc_data_external(vm_size_t size, zalloc_flags_t flags);
1937 void *
kalloc_data_external(vm_size_t size,zalloc_flags_t flags)1938 kalloc_data_external(vm_size_t size, zalloc_flags_t flags)
1939 {
1940 flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC_DATA);
1941 return kheap_alloc(KHEAP_DATA_BUFFERS, size, flags);
1942 }
1943
1944 __abortlike
1945 static void
kalloc_data_require_panic(void * addr,vm_size_t size)1946 kalloc_data_require_panic(void *addr, vm_size_t size)
1947 {
1948 zone_id_t zid = zone_id_for_element(addr, size);
1949
1950 if (zid != ZONE_ID_INVALID) {
1951 zone_t z = &zone_array[zid];
1952 zone_security_flags_t zsflags = zone_security_array[zid];
1953
1954 if (zsflags.z_kheap_id != KHEAP_ID_DATA_BUFFERS) {
1955 panic("kalloc_data_require failed: address %p in [%s%s]",
1956 addr, zone_heap_name(z), zone_name(z));
1957 }
1958
1959 panic("kalloc_data_require failed: address %p in [%s%s], "
1960 "size too large %zd > %zd", addr,
1961 zone_heap_name(z), zone_name(z),
1962 (size_t)size, (size_t)zone_elem_inner_size(z));
1963 } else {
1964 panic("kalloc_data_require failed: address %p not in zone native map",
1965 addr);
1966 }
1967 }
1968
1969 __abortlike
1970 static void
kalloc_non_data_require_panic(void * addr,vm_size_t size)1971 kalloc_non_data_require_panic(void *addr, vm_size_t size)
1972 {
1973 zone_id_t zid = zone_id_for_element(addr, size);
1974
1975 if (zid != ZONE_ID_INVALID) {
1976 zone_t z = &zone_array[zid];
1977 zone_security_flags_t zsflags = zone_security_array[zid];
1978
1979 switch (zsflags.z_kheap_id) {
1980 case KHEAP_ID_NONE:
1981 case KHEAP_ID_DATA_BUFFERS:
1982 case KHEAP_ID_KT_VAR:
1983 panic("kalloc_non_data_require failed: address %p in [%s%s]",
1984 addr, zone_heap_name(z), zone_name(z));
1985 default:
1986 break;
1987 }
1988
1989 panic("kalloc_non_data_require failed: address %p in [%s%s], "
1990 "size too large %zd > %zd", addr,
1991 zone_heap_name(z), zone_name(z),
1992 (size_t)size, (size_t)zone_elem_inner_size(z));
1993 } else {
1994 panic("kalloc_non_data_require failed: address %p not in zone native map",
1995 addr);
1996 }
1997 }
1998
1999 void
kalloc_data_require(void * addr,vm_size_t size)2000 kalloc_data_require(void *addr, vm_size_t size)
2001 {
2002 zone_id_t zid = zone_id_for_element(addr, size);
2003
2004 if (zid != ZONE_ID_INVALID) {
2005 zone_t z = &zone_array[zid];
2006 zone_security_flags_t zsflags = zone_security_array[zid];
2007 if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS &&
2008 size <= zone_elem_inner_size(z)) {
2009 return;
2010 }
2011 } else if (kmem_range_id_contains(KMEM_RANGE_ID_DATA,
2012 (vm_address_t)pgz_decode(addr, size), size)) {
2013 return;
2014 }
2015
2016 kalloc_data_require_panic(addr, size);
2017 }
2018
2019 void
kalloc_non_data_require(void * addr,vm_size_t size)2020 kalloc_non_data_require(void *addr, vm_size_t size)
2021 {
2022 zone_id_t zid = zone_id_for_element(addr, size);
2023
2024 if (zid != ZONE_ID_INVALID) {
2025 zone_t z = &zone_array[zid];
2026 zone_security_flags_t zsflags = zone_security_array[zid];
2027 switch (zsflags.z_kheap_id) {
2028 case KHEAP_ID_NONE:
2029 if (!zsflags.z_kalloc_type) {
2030 break;
2031 }
2032 OS_FALLTHROUGH;
2033 case KHEAP_ID_DEFAULT:
2034 case KHEAP_ID_KT_VAR:
2035 if (size < zone_elem_inner_size(z)) {
2036 return;
2037 }
2038 break;
2039 default:
2040 break;
2041 }
2042 } else if (!kmem_range_id_contains(KMEM_RANGE_ID_DATA,
2043 (vm_address_t)pgz_decode(addr, size), size)) {
2044 return;
2045 }
2046
2047 kalloc_non_data_require_panic(addr, size);
2048 }
2049
2050 void *
kalloc_type_impl_external(kalloc_type_view_t kt_view,zalloc_flags_t flags)2051 kalloc_type_impl_external(kalloc_type_view_t kt_view, zalloc_flags_t flags)
2052 {
2053 /*
2054 * Callsites from a kext that aren't in the BootKC on macOS or
2055 * any callsites on armv7 are not processed during startup,
2056 * default to using kheap_alloc
2057 *
2058 * Additionally when size is greater KHEAP_MAX_SIZE zone is left
2059 * NULL as we need to use the vm for the allocation
2060 *
2061 */
2062 if (__improbable(kt_view->kt_zv.zv_zone == ZONE_NULL)) {
2063 vm_size_t size = kalloc_type_get_size(kt_view->kt_size);
2064 flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC);
2065 return kalloc_ext(KHEAP_DEFAULT, size, flags, NULL).addr;
2066 }
2067
2068 flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC);
2069 return zalloc_flags(kt_view, flags);
2070 }
2071
2072 void *
2073 kalloc_type_var_impl_external(
2074 kalloc_type_var_view_t kt_view,
2075 vm_size_t size,
2076 zalloc_flags_t flags,
2077 void *owner);
2078 void *
kalloc_type_var_impl_external(kalloc_type_var_view_t kt_view,vm_size_t size,zalloc_flags_t flags,void * owner)2079 kalloc_type_var_impl_external(
2080 kalloc_type_var_view_t kt_view,
2081 vm_size_t size,
2082 zalloc_flags_t flags,
2083 void *owner)
2084 {
2085 flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC);
2086 return kalloc_type_var_impl(kt_view, size, flags, owner);
2087 }
2088
2089 #pragma mark kfree
2090
2091 __abortlike
2092 static void
kfree_heap_confusion_panic(kalloc_heap_t kheap,void * data,size_t size,zone_t z)2093 kfree_heap_confusion_panic(kalloc_heap_t kheap, void *data, size_t size, zone_t z)
2094 {
2095 zone_security_flags_t zsflags = zone_security_config(z);
2096 const char *kheap_name = "";
2097
2098 if (kheap == KHEAP_ANY) {
2099 kheap_name = "KHEAP_ANY (default/kalloc type var)";
2100 } else {
2101 kheap_name = kalloc_heap_names[kheap->kh_heap_id];
2102 }
2103
2104 if (zsflags.z_kalloc_type) {
2105 panic_include_kalloc_types = true;
2106 kalloc_type_src_zone = z;
2107 panic("kfree: addr %p found in kalloc type zone '%s'"
2108 "but being freed to %s heap", data, z->z_name, kheap_name);
2109 }
2110
2111 if (zsflags.z_kheap_id == KHEAP_ID_NONE) {
2112 panic("kfree: addr %p, size %zd found in regular zone '%s%s'",
2113 data, size, zone_heap_name(z), z->z_name);
2114 } else {
2115 panic("kfree: addr %p, size %zd found in heap %s* instead of %s*",
2116 data, size, zone_heap_name(z), kheap_name);
2117 }
2118 }
2119
2120 __abortlike
2121 static void
kfree_size_confusion_panic(zone_t z,void * data,size_t oob_offs,size_t size,size_t zsize)2122 kfree_size_confusion_panic(zone_t z, void *data,
2123 size_t oob_offs, size_t size, size_t zsize)
2124 {
2125 if (z) {
2126 panic("kfree: addr %p, size %zd (offs:%zd) found in zone '%s%s' "
2127 "with elem_size %zd",
2128 data, size, oob_offs, zone_heap_name(z), z->z_name, zsize);
2129 } else {
2130 panic("kfree: addr %p, size %zd (offs:%zd) not found in any zone",
2131 data, size, oob_offs);
2132 }
2133 }
2134
2135 __abortlike
2136 static void
kfree_size_invalid_panic(void * data,size_t size)2137 kfree_size_invalid_panic(void *data, size_t size)
2138 {
2139 panic("kfree: addr %p trying to free with nonsensical size %zd",
2140 data, size);
2141 }
2142
2143 __abortlike
2144 static void
kfree_size_require_panic(void * data,size_t size,size_t min_size,size_t max_size)2145 kfree_size_require_panic(void *data, size_t size, size_t min_size,
2146 size_t max_size)
2147 {
2148 panic("kfree: addr %p has size %zd, not in specified bounds [%zd - %zd]",
2149 data, size, min_size, max_size);
2150 }
2151
2152 static void
kfree_size_require(kalloc_heap_t kheap,void * addr,vm_size_t min_size,vm_size_t max_size)2153 kfree_size_require(
2154 kalloc_heap_t kheap,
2155 void *addr,
2156 vm_size_t min_size,
2157 vm_size_t max_size)
2158 {
2159 assert3u(min_size, <=, max_size);
2160 zone_t max_zone = kalloc_zone_for_size(kheap->kh_zstart, max_size);
2161 vm_size_t max_zone_size = zone_elem_inner_size(max_zone);
2162 vm_size_t elem_size = zone_element_size(addr, NULL, false, NULL);
2163 if (elem_size > max_zone_size || elem_size < min_size) {
2164 kfree_size_require_panic(addr, elem_size, min_size, max_zone_size);
2165 }
2166 }
2167
2168 static void
kfree_large(vm_offset_t addr,vm_size_t size,kmf_flags_t flags,void * owner)2169 kfree_large(
2170 vm_offset_t addr,
2171 vm_size_t size,
2172 kmf_flags_t flags,
2173 void *owner)
2174 {
2175 size = kmem_free_guard(kernel_map, addr, size,
2176 flags | KMF_TAG | KMF_KASAN_GUARD,
2177 kalloc_guard(VM_KERN_MEMORY_NONE, 0, owner));
2178
2179 counter_dec(&kalloc_large_count);
2180 counter_add(&kalloc_large_total, -(uint64_t)size);
2181 KALLOC_ZINFO_SFREE(size);
2182 DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, size, void*, addr);
2183 }
2184
2185 static void
kfree_zone(void * kheap_or_kt_view __unsafe_indexable,void * data,vm_size_t size,zone_t z,vm_size_t zsize)2186 kfree_zone(
2187 void *kheap_or_kt_view __unsafe_indexable,
2188 void *data,
2189 vm_size_t size,
2190 zone_t z,
2191 vm_size_t zsize)
2192 {
2193 zone_security_flags_t zsflags = zone_security_config(z);
2194 kalloc_type_var_view_t kt_view;
2195 kalloc_heap_t kheap;
2196 zone_stats_t zstats;
2197
2198 if (kt_is_var_view(kheap_or_kt_view)) {
2199 kt_view = kt_demangle_var_view(kheap_or_kt_view);
2200 kheap = kalloc_type_get_heap(kt_view, true);
2201 zstats = kt_view->kt_stats;
2202 } else {
2203 kt_view = NULL;
2204 kheap = kheap_or_kt_view;
2205 zstats = kheap ? kheap->kh_stats : NULL;
2206 }
2207
2208 zsflags = zone_security_config(z);
2209 if (kheap != KHEAP_ANY && kheap != KHEAP_KT_VAR) {
2210 if (kheap->kh_heap_id != zsflags.z_kheap_id) {
2211 kfree_heap_confusion_panic(kheap, data, size, z);
2212 }
2213 } else if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
2214 /*
2215 * Allocations from kalloc.type.var zones use stats from KHEAP_KT_VAR
2216 * if they don't have private accounting. We need to use the kheap_id here
2217 * as we allow cross frees between default and KT_VAR and we may use
2218 * KHEAP_ANY which will skew the stats if we relie on stats from kheap.
2219 */
2220 if (zstats == NULL) {
2221 zstats = KHEAP_KT_VAR->kh_stats;
2222 }
2223 } else if (zsflags.z_kheap_id != KHEAP_ID_DEFAULT) {
2224 kfree_heap_confusion_panic(kheap, data, size, z);
2225 }
2226
2227 DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, zsize, void*, data);
2228
2229 /* needs to be __nosan because the user size might be partial */
2230 __nosan_bzero(data, zsize);
2231 zfree_ext(z, zstats ?: z->z_stats, data, ZFREE_PACK_SIZE(zsize, size));
2232 }
2233
2234 void
kfree_ext(void * kheap_or_kt_view,void * data,vm_size_t size)2235 kfree_ext(void *kheap_or_kt_view, void *data, vm_size_t size)
2236 {
2237 vm_size_t bucket_size;
2238 zone_t z;
2239
2240 if (data == NULL) {
2241 return;
2242 }
2243
2244 if (size > KFREE_ABSURD_SIZE) {
2245 kfree_size_invalid_panic(data, size);
2246 }
2247
2248 if (size <= KHEAP_MAX_SIZE) {
2249 vm_size_t oob_offs;
2250
2251 bucket_size = zone_element_size(data, &z, true, &oob_offs);
2252 if (size + oob_offs > bucket_size || bucket_size == 0) {
2253 kfree_size_confusion_panic(z, data,
2254 oob_offs, size, bucket_size);
2255 }
2256
2257 data = (char *)data - oob_offs;
2258 kfree_zone(kheap_or_kt_view, data, size, z, bucket_size);
2259 } else {
2260 kfree_large((vm_offset_t)data, size, KMF_NONE, NULL);
2261 }
2262 }
2263
2264 void
kfree_addr_ext(kalloc_heap_t kheap,void * data)2265 kfree_addr_ext(kalloc_heap_t kheap, void *data)
2266 {
2267 vm_offset_t oob_offs;
2268 vm_size_t size, usize = 0;
2269 zone_t z;
2270
2271 if (data == NULL) {
2272 return;
2273 }
2274
2275 size = zone_element_size(data, &z, true, &oob_offs);
2276 if (size) {
2277 #if KASAN_CLASSIC
2278 usize = kasan_user_size((vm_offset_t)data);
2279 #endif
2280 data = (char *)data - oob_offs;
2281 kfree_zone(kheap, data, usize, z, size);
2282 } else {
2283 kfree_large((vm_offset_t)data, 0, KMF_GUESS_SIZE, NULL);
2284 }
2285 }
2286
2287 #if XNU_PLATFORM_MacOSX
2288 void
2289 kfree_external(void *addr, vm_size_t size);
2290 void
kfree_external(void * addr,vm_size_t size)2291 kfree_external(void *addr, vm_size_t size)
2292 {
2293 kfree_ext(KHEAP_ANY, addr, size);
2294 }
2295 #endif /* XNU_PLATFORM_MacOSX */
2296
2297 void
2298 (kheap_free_bounded)(kalloc_heap_t kheap, void *addr,
2299 vm_size_t min_sz, vm_size_t max_sz)
2300 {
2301 if (__improbable(addr == NULL)) {
2302 return;
2303 }
2304 kfree_size_require(kheap, addr, min_sz, max_sz);
2305 kfree_addr_ext(kheap, addr);
2306 }
2307
2308 void
kfree_type_impl_external(kalloc_type_view_t kt_view,void * ptr)2309 kfree_type_impl_external(kalloc_type_view_t kt_view, void *ptr)
2310 {
2311 /*
2312 * If callsite is from a kext that isn't in the BootKC, it wasn't
2313 * processed during startup so default to using kheap_alloc
2314 *
2315 * Additionally when size is greater KHEAP_MAX_SIZE zone is left
2316 * NULL as we need to use the vm for the allocation/free
2317 */
2318 if (kt_view->kt_zv.zv_zone == ZONE_NULL) {
2319 return kheap_free(KHEAP_DEFAULT, ptr,
2320 kalloc_type_get_size(kt_view->kt_size));
2321 }
2322 if (__improbable(ptr == NULL)) {
2323 return;
2324 }
2325 return zfree(kt_view, ptr);
2326 }
2327
2328 void
2329 kfree_type_var_impl_external(
2330 kalloc_type_var_view_t kt_view,
2331 void *ptr,
2332 vm_size_t size);
2333 void
kfree_type_var_impl_external(kalloc_type_var_view_t kt_view,void * ptr,vm_size_t size)2334 kfree_type_var_impl_external(
2335 kalloc_type_var_view_t kt_view,
2336 void *ptr,
2337 vm_size_t size)
2338 {
2339 return kfree_type_var_impl(kt_view, ptr, size);
2340 }
2341
2342 void
2343 kfree_data_external(void *ptr, vm_size_t size);
2344 void
kfree_data_external(void * ptr,vm_size_t size)2345 kfree_data_external(void *ptr, vm_size_t size)
2346 {
2347 return kheap_free(KHEAP_DATA_BUFFERS, ptr, size);
2348 }
2349
2350 void
2351 kfree_data_addr_external(void *ptr);
2352 void
kfree_data_addr_external(void * ptr)2353 kfree_data_addr_external(void *ptr)
2354 {
2355 return kheap_free_addr(KHEAP_DATA_BUFFERS, ptr);
2356 }
2357
2358 #pragma mark krealloc
2359
2360 __abortlike
2361 static void
krealloc_size_invalid_panic(void * data,size_t size)2362 krealloc_size_invalid_panic(void *data, size_t size)
2363 {
2364 panic("krealloc: addr %p trying to free with nonsensical size %zd",
2365 data, size);
2366 }
2367
2368 __attribute__((noinline))
2369 static struct kalloc_result
krealloc_large(kalloc_heap_t kheap,vm_offset_t addr,vm_size_t old_size,vm_size_t new_size,zalloc_flags_t flags,uint16_t kt_hash,void * owner __unused)2370 krealloc_large(
2371 kalloc_heap_t kheap,
2372 vm_offset_t addr,
2373 vm_size_t old_size,
2374 vm_size_t new_size,
2375 zalloc_flags_t flags,
2376 uint16_t kt_hash,
2377 void *owner __unused)
2378 {
2379 kmr_flags_t kmr_flags = KMR_FREEOLD | KMR_TAG | KMR_KASAN_GUARD;
2380 vm_size_t new_req_size = new_size;
2381 vm_size_t old_req_size = old_size;
2382 uint64_t delta;
2383 kmem_return_t kmr;
2384 vm_tag_t tag;
2385
2386 if (flags & Z_NOFAIL) {
2387 panic("trying to kalloc(Z_NOFAIL) with a large size (%zd)",
2388 (size_t)new_req_size);
2389 }
2390
2391 /*
2392 * kmem_alloc could block so we return if noblock
2393 *
2394 * also, reject sizes larger than our address space is quickly,
2395 * as kt_size or IOMallocArraySize() expect this.
2396 */
2397 if ((flags & Z_NOWAIT) ||
2398 (new_req_size >> VM_KERNEL_POINTER_SIGNIFICANT_BITS)) {
2399 return (struct kalloc_result){ };
2400 }
2401
2402 /*
2403 * (73465472) on Intel we didn't use to pass this flag,
2404 * which in turned allowed kalloc_large() memory to be shared
2405 * with user directly.
2406 *
2407 * We're bound by this unfortunate ABI.
2408 */
2409 if ((flags & Z_MAY_COPYINMAP) == 0) {
2410 #ifndef __x86_64__
2411 kmr_flags |= KMR_KOBJECT;
2412 #endif
2413 } else {
2414 assert(kheap == KHEAP_DATA_BUFFERS);
2415 kmr_flags &= ~KMR_TAG;
2416 }
2417 if (flags & Z_NOPAGEWAIT) {
2418 kmr_flags |= KMR_NOPAGEWAIT;
2419 }
2420 if (flags & Z_ZERO) {
2421 kmr_flags |= KMR_ZERO;
2422 }
2423 if (kheap == KHEAP_DATA_BUFFERS) {
2424 kmr_flags |= KMR_DATA;
2425 } else if (flags & (Z_KALLOC_ARRAY | Z_SPRAYQTN)) {
2426 kmr_flags |= KMR_SPRAYQTN;
2427 }
2428 if (flags & Z_REALLOCF) {
2429 kmr_flags |= KMR_REALLOCF;
2430 }
2431
2432 tag = zalloc_flags_get_tag(flags);
2433 if (flags & Z_VM_TAG_BT_BIT) {
2434 tag = vm_tag_bt() ?: tag;
2435 }
2436 if (tag == VM_KERN_MEMORY_NONE) {
2437 tag = kheap->kh_tag;
2438 }
2439
2440 kmr = kmem_realloc_guard(kernel_map, addr, old_req_size, new_req_size,
2441 kmr_flags, kalloc_guard(tag, kt_hash, owner));
2442
2443 new_size = round_page(new_req_size);
2444 old_size = round_page(old_req_size);
2445
2446 if (kmr.kmr_address != 0) {
2447 delta = (uint64_t)(new_size - old_size);
2448 } else if (flags & Z_REALLOCF) {
2449 counter_dec(&kalloc_large_count);
2450 delta = (uint64_t)(-old_size);
2451 } else {
2452 delta = 0;
2453 }
2454
2455 counter_add(&kalloc_large_total, delta);
2456 KALLOC_ZINFO_SALLOC(delta);
2457
2458 if (addr != 0 || (flags & Z_REALLOCF)) {
2459 DTRACE_VM3(kfree, vm_size_t, old_size, vm_size_t, old_req_size,
2460 void*, addr);
2461 }
2462 if (__improbable(kmr.kmr_address == 0)) {
2463 return (struct kalloc_result){ };
2464 }
2465
2466 DTRACE_VM3(kalloc, vm_size_t, new_size, vm_size_t, new_req_size,
2467 void*, kmr.kmr_address);
2468
2469 if (flags & Z_KALLOC_ARRAY) {
2470 kmr.kmr_address = __kalloc_array_encode_vm(kmr.kmr_address,
2471 new_req_size);
2472 }
2473 return (struct kalloc_result){ .addr = kmr.kmr_ptr, .size = new_req_size };
2474 }
2475
2476 #undef krealloc_ext
2477
2478 struct kalloc_result
krealloc_ext(void * kheap_or_kt_view __unsafe_indexable,void * addr,vm_size_t old_size,vm_size_t new_size,zalloc_flags_t flags,void * owner)2479 krealloc_ext(
2480 void *kheap_or_kt_view __unsafe_indexable,
2481 void *addr,
2482 vm_size_t old_size,
2483 vm_size_t new_size,
2484 zalloc_flags_t flags,
2485 void *owner)
2486 {
2487 vm_size_t old_bucket_size, new_bucket_size, min_size;
2488 kalloc_type_var_view_t kt_view;
2489 kalloc_heap_t kheap;
2490 zone_stats_t zstats = NULL;
2491 struct kalloc_result kr;
2492 vm_offset_t oob_offs = 0;
2493 zone_t old_z, new_z;
2494 uint16_t kt_hash = 0;
2495 zone_id_t zstart;
2496
2497 if (old_size > KFREE_ABSURD_SIZE) {
2498 krealloc_size_invalid_panic(addr, old_size);
2499 }
2500
2501 if (addr == NULL && new_size == 0) {
2502 return (struct kalloc_result){ };
2503 }
2504
2505 if (kt_is_var_view(kheap_or_kt_view)) {
2506 kt_view = kt_demangle_var_view(kheap_or_kt_view);
2507 kheap = kalloc_type_get_heap(kt_view, false);
2508 /*
2509 * Similar to kalloc_ext: Use stats from view if present,
2510 * else use stats from kheap.
2511 *
2512 * krealloc_type isn't exposed to kexts, so we don't need to
2513 * handle cross frees or KHEAP_ANY for typed allocations and
2514 * can rely on stats from view or kheap.
2515 */
2516 zstats = kt_view->kt_stats;
2517 kt_hash = KT_GET_HASH(kt_view->kt_flags);
2518 zstart = kt_view->kt_heap_start ?: kheap->kh_zstart;
2519 } else {
2520 kt_view = NULL;
2521 kheap = kheap_or_kt_view;
2522 kt_hash = kheap->kh_type_hash;
2523 zstart = kheap->kh_zstart;
2524 }
2525
2526 if (!zstats && kheap) {
2527 zstats = kheap->kh_stats;
2528 }
2529 /*
2530 * Find out the size of the bucket in which the new sized allocation
2531 * would land. If it matches the bucket of the original allocation,
2532 * simply return the same address.
2533 */
2534 if (new_size == 0) {
2535 new_z = ZONE_NULL;
2536 new_bucket_size = new_size = 0;
2537 } else {
2538 new_z = kalloc_zone_for_size_with_flags(zstart, new_size, flags);
2539 new_bucket_size = new_z ? zone_elem_inner_size(new_z) : round_page(new_size);
2540 }
2541 #if !KASAN_CLASSIC
2542 if (flags & Z_FULLSIZE) {
2543 new_size = new_bucket_size;
2544 }
2545 #endif /* !KASAN_CLASSIC */
2546
2547 if (addr == NULL) {
2548 old_z = ZONE_NULL;
2549 old_size = old_bucket_size = 0;
2550 } else if (kheap_size_from_zone(addr, old_size, flags)) {
2551 old_bucket_size = zone_element_size(addr, &old_z, true, &oob_offs);
2552 if (old_size + oob_offs > old_bucket_size || old_bucket_size == 0) {
2553 kfree_size_confusion_panic(old_z, addr,
2554 oob_offs, old_size, old_bucket_size);
2555 }
2556 __builtin_assume(old_z != ZONE_NULL);
2557 } else {
2558 old_z = ZONE_NULL;
2559 old_bucket_size = round_page(old_size);
2560 }
2561 min_size = MIN(old_size, new_size);
2562
2563 if (old_bucket_size == new_bucket_size && old_z) {
2564 kr.addr = (char *)addr - oob_offs;
2565 kr.size = new_size;
2566 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
2567 kr.addr = zone_element_pgz_oob_adjust(kr.addr,
2568 new_size, new_bucket_size);
2569 if (kr.addr != addr) {
2570 memmove(kr.addr, addr, min_size);
2571 bzero((char *)kr.addr + min_size,
2572 kr.size - min_size);
2573 }
2574 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
2575 #if KASAN_CLASSIC
2576 kasan_check_alloc((vm_offset_t)addr, old_bucket_size, old_size);
2577 kasan_alloc((vm_offset_t)addr, new_bucket_size, kr.size,
2578 KASAN_GUARD_SIZE, false, __builtin_frame_address(0));
2579 #endif /* KASAN_CLASSIC */
2580 #if KASAN_TBI
2581 /*
2582 * Validate the current buffer, then generate a new tag,
2583 * even if the address is stable, it's a "new" allocation.
2584 */
2585 __asan_loadN((vm_offset_t)addr, old_size);
2586 kr.addr = (void *)kasan_tbi_tag_zalloc((vm_offset_t)kr.addr,
2587 new_bucket_size, kr.size, false);
2588 #endif /* KASAN_TBI */
2589 goto out_success;
2590 }
2591
2592 #if !KASAN
2593 /*
2594 * Fallthrough to krealloc_large() for KASAN,
2595 * because we can't use kasan_check_alloc()
2596 * on kalloc_large() memory.
2597 *
2598 * kmem_realloc_guard() will perform all the validations,
2599 * and re-tagging.
2600 */
2601 if (old_bucket_size == new_bucket_size) {
2602 kr.addr = (char *)addr - oob_offs;
2603 kr.size = new_size;
2604 goto out_success;
2605 }
2606 #endif
2607
2608 if (addr && !old_z && new_size && !new_z) {
2609 return krealloc_large(kheap, (vm_offset_t)addr,
2610 old_size, new_size, flags, kt_hash, owner);
2611 }
2612
2613 if (!new_size) {
2614 kr.addr = NULL;
2615 kr.size = 0;
2616 } else if (new_z) {
2617 kr = kalloc_zone(new_z, zstats,
2618 flags & ~Z_KALLOC_ARRAY, new_size);
2619 } else if (old_z || addr == NULL) {
2620 kr = kalloc_large(kheap, new_size,
2621 flags & ~Z_KALLOC_ARRAY, kt_hash, owner);
2622 }
2623
2624 if (addr && kr.addr) {
2625 __nosan_memcpy(kr.addr, addr, min_size);
2626 }
2627
2628 if (addr && (kr.addr || (flags & Z_REALLOCF) || !new_size)) {
2629 if (old_z) {
2630 kfree_zone(kheap_or_kt_view,
2631 (char *)addr - oob_offs, old_size,
2632 old_z, old_bucket_size);
2633 } else {
2634 kfree_large((vm_offset_t)addr, old_size, KMF_NONE, owner);
2635 }
2636 }
2637
2638 if (__improbable(kr.addr == NULL)) {
2639 return kr;
2640 }
2641
2642 out_success:
2643 if ((flags & Z_KALLOC_ARRAY) == 0) {
2644 return kr;
2645 }
2646
2647 if (new_z) {
2648 kr.addr = __kalloc_array_encode_zone(new_z,
2649 kr.addr, kr.size);
2650 } else {
2651 kr.addr = (void *)__kalloc_array_encode_vm((vm_offset_t)kr.addr,
2652 kr.size);
2653 }
2654 return kr;
2655 }
2656
2657 void *
2658 krealloc_data_external(
2659 void *ptr,
2660 vm_size_t old_size,
2661 vm_size_t new_size,
2662 zalloc_flags_t flags);
2663 void *
krealloc_data_external(void * ptr,vm_size_t old_size,vm_size_t new_size,zalloc_flags_t flags)2664 krealloc_data_external(
2665 void *ptr,
2666 vm_size_t old_size,
2667 vm_size_t new_size,
2668 zalloc_flags_t flags)
2669 {
2670 flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC_DATA);
2671 return krealloc_ext(KHEAP_DATA_BUFFERS, ptr, old_size, new_size, flags, NULL).addr;
2672 }
2673
2674 __startup_func
2675 void
kheap_startup_init(kalloc_heap_t kheap)2676 kheap_startup_init(kalloc_heap_t kheap)
2677 {
2678 kalloc_heap_t parent_heap;
2679
2680 switch (kheap->kh_heap_id) {
2681 case KHEAP_ID_DEFAULT:
2682 parent_heap = KHEAP_DEFAULT;
2683 break;
2684 case KHEAP_ID_DATA_BUFFERS:
2685 parent_heap = KHEAP_DATA_BUFFERS;
2686 break;
2687 default:
2688 panic("kalloc_heap_startup_init: invalid KHEAP_ID: %d",
2689 kheap->kh_heap_id);
2690 }
2691
2692 kheap->kh_zstart = parent_heap->kh_zstart;
2693 kheap->kh_heap_id = parent_heap->kh_heap_id;
2694 kheap->kh_tag = parent_heap->kh_tag;
2695 kheap->kh_stats = zalloc_percpu_permanent_type(struct zone_stats);
2696 kheap->kh_views = parent_heap->kh_views;
2697 parent_heap->kh_views = kheap;
2698 zone_view_count += 1;
2699
2700 #if XNU_PLATFORM_MacOSX
2701 if (kheap == KERN_OS_MALLOC) {
2702 kheap->kh_type_hash = (uint16_t) kalloc_hash_adjust(
2703 (uint32_t) early_random(), 0);
2704 }
2705 #endif
2706 }
2707
2708 #pragma mark IOKit/libkern helpers
2709
2710 #if XNU_PLATFORM_MacOSX
2711
2712 void *
2713 kern_os_malloc_external(size_t size);
2714 void *
kern_os_malloc_external(size_t size)2715 kern_os_malloc_external(size_t size)
2716 {
2717 if (size == 0) {
2718 return NULL;
2719 }
2720
2721 return kheap_alloc(KERN_OS_MALLOC, size,
2722 Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_LIBKERN));
2723 }
2724
2725 void
2726 kern_os_free_external(void *addr);
2727 void
kern_os_free_external(void * addr)2728 kern_os_free_external(void *addr)
2729 {
2730 kheap_free_addr(KERN_OS_MALLOC, addr);
2731 }
2732
2733 void *
2734 kern_os_realloc_external(void *addr, size_t nsize);
2735 void *
kern_os_realloc_external(void * addr,size_t nsize)2736 kern_os_realloc_external(void *addr, size_t nsize)
2737 {
2738 zalloc_flags_t flags = Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_LIBKERN);
2739 vm_size_t osize, oob_offs = 0;
2740
2741 if (addr == NULL) {
2742 return kern_os_malloc_external(nsize);
2743 }
2744
2745 osize = zone_element_size(addr, NULL, false, &oob_offs);
2746 if (osize == 0) {
2747 osize = kmem_size_guard(kernel_map, (vm_offset_t)addr,
2748 kalloc_guard(VM_KERN_MEMORY_LIBKERN, 0, NULL));
2749 #if KASAN_CLASSIC
2750 } else {
2751 osize = kasan_user_size((vm_offset_t)addr);
2752 #endif
2753 }
2754 return __kheap_realloc(KERN_OS_MALLOC, addr, osize - oob_offs, nsize, flags, NULL);
2755 }
2756
2757 #endif /* XNU_PLATFORM_MacOSX */
2758
2759 void
kern_os_zfree(zone_t zone,void * addr,vm_size_t size)2760 kern_os_zfree(zone_t zone, void *addr, vm_size_t size)
2761 {
2762 #if ZSECURITY_CONFIG(STRICT_IOKIT_FREE)
2763 #pragma unused(size)
2764 zfree(zone, addr);
2765 #else
2766 if (zone_owns(zone, addr)) {
2767 zfree(zone, addr);
2768 } else {
2769 /*
2770 * Third party kexts might not know about the operator new
2771 * and be allocated from the default heap
2772 */
2773 printf("kern_os_zfree: kheap_free called for object from zone %s\n",
2774 zone->z_name);
2775 kheap_free(KHEAP_DEFAULT, addr, size);
2776 }
2777 #endif
2778 }
2779
2780 bool
IOMallocType_from_vm(kalloc_type_view_t ktv)2781 IOMallocType_from_vm(kalloc_type_view_t ktv)
2782 {
2783 return kalloc_type_from_vm(ktv->kt_flags);
2784 }
2785
2786 void
kern_os_typed_free(kalloc_type_view_t ktv,void * addr,vm_size_t esize)2787 kern_os_typed_free(kalloc_type_view_t ktv, void *addr, vm_size_t esize)
2788 {
2789 #if ZSECURITY_CONFIG(STRICT_IOKIT_FREE)
2790 #pragma unused(esize)
2791 #else
2792 /*
2793 * For third party kexts that have been compiled with sdk pre macOS 11,
2794 * an allocation of an OSObject that is defined in xnu or first pary
2795 * kexts, by directly calling new will lead to using the default heap
2796 * as it will call OSObject_operator_new_external. If this object
2797 * is freed by xnu, it panics as xnu uses the typed free which
2798 * requires the object to have been allocated in a kalloc.type zone.
2799 * To workaround this issue, detect if the allocation being freed is
2800 * from the default heap and allow freeing to it.
2801 */
2802 zone_id_t zid = zone_id_for_element(addr, esize);
2803 if (__probable(zid < MAX_ZONES)) {
2804 zone_security_flags_t zsflags = zone_security_array[zid];
2805 if (zsflags.z_kheap_id == KHEAP_ID_DEFAULT) {
2806 return kheap_free(KHEAP_DEFAULT, addr, esize);
2807 }
2808 }
2809 #endif
2810 kfree_type_impl_external(ktv, addr);
2811 }
2812
2813 #pragma mark tests
2814 #if DEBUG || DEVELOPMENT
2815
2816 #include <sys/random.h>
2817
2818 /*
2819 * Ensure that the feature is on when the ZSECURITY_CONFIG is present.
2820 *
2821 * Note: Presence of zones with name kalloc.type* is used to
2822 * determine if the feature is on.
2823 */
2824 static int
kalloc_type_feature_on(void)2825 kalloc_type_feature_on(void)
2826 {
2827 boolean_t zone_found = false;
2828 const char kalloc_type_str[] = "kalloc.type";
2829 for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
2830 zone_t z = kalloc_type_zarray[i];
2831 while (z != NULL) {
2832 zone_found = true;
2833 if (strncmp(z->z_name, kalloc_type_str,
2834 strlen(kalloc_type_str)) != 0) {
2835 return 0;
2836 }
2837 z = z->z_kt_next;
2838 }
2839 }
2840
2841 if (!zone_found) {
2842 return 0;
2843 }
2844
2845 return 1;
2846 }
2847
2848 /*
2849 * Ensure that the policy uses the zone budget completely
2850 */
2851 static int
kalloc_type_test_policy(int64_t in)2852 kalloc_type_test_policy(int64_t in)
2853 {
2854 uint16_t zone_budget = (uint16_t) in;
2855 uint16_t max_bucket_freq = 25;
2856 uint16_t freq_list[MAX_K_ZONE(kt_zone_cfg)] = {};
2857 uint16_t zones_per_bucket[MAX_K_ZONE(kt_zone_cfg)] = {};
2858 uint16_t random[MAX_K_ZONE(kt_zone_cfg)];
2859 int ret = 0;
2860
2861 /*
2862 * Need a minimum of 2 zones per size class
2863 */
2864 if (zone_budget < MAX_K_ZONE(kt_zone_cfg) * 2) {
2865 return ret;
2866 }
2867 read_random((void *)&random[0], sizeof(random));
2868 for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
2869 freq_list[i] = random[i] % max_bucket_freq;
2870 }
2871 uint16_t wasted_zone_budget = kalloc_type_apply_policy(freq_list,
2872 zones_per_bucket, zone_budget);
2873 if (wasted_zone_budget == 0) {
2874 ret = 1;
2875 }
2876 return ret;
2877 }
2878
2879 /*
2880 * Ensure that size of adopters of kalloc_type fit in the zone
2881 * they have been assigned.
2882 */
2883 static int
kalloc_type_check_size(zone_t z)2884 kalloc_type_check_size(zone_t z)
2885 {
2886 kalloc_type_view_t kt_cur = (kalloc_type_view_t) z->z_views;
2887
2888 while (kt_cur != NULL) {
2889 if (kalloc_type_get_size(kt_cur->kt_size) > z->z_elem_size) {
2890 return 0;
2891 }
2892 kt_cur = (kalloc_type_view_t) kt_cur->kt_zv.zv_next;
2893 }
2894
2895 return 1;
2896 }
2897
2898 struct test_kt_data {
2899 int a;
2900 };
2901
2902 static int
kalloc_type_test_data_redirect(void)2903 kalloc_type_test_data_redirect(void)
2904 {
2905 struct kalloc_type_view ktv_data = {
2906 .kt_flags = KALLOC_TYPE_ADJUST_FLAGS(KT_SHARED_ACCT, struct test_kt_data),
2907 .kt_signature = KALLOC_TYPE_EMIT_SIG(struct test_kt_data),
2908 };
2909 if (!kalloc_type_is_data(ktv_data.kt_flags)) {
2910 printf("%s: data redirect failed\n", __func__);
2911 return 0;
2912 }
2913 return 1;
2914 }
2915
2916 static int
run_kalloc_type_test(int64_t in,int64_t * out)2917 run_kalloc_type_test(int64_t in, int64_t *out)
2918 {
2919 *out = 0;
2920 for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
2921 zone_t z = kalloc_type_zarray[i];
2922 while (z != NULL) {
2923 if (!kalloc_type_check_size(z)) {
2924 printf("%s: size check failed\n", __func__);
2925 return 0;
2926 }
2927 z = z->z_kt_next;
2928 }
2929 }
2930
2931 if (!kalloc_type_test_policy(in)) {
2932 printf("%s: policy check failed\n", __func__);
2933 return 0;
2934 }
2935
2936 if (!kalloc_type_feature_on()) {
2937 printf("%s: boot-arg is on but feature isn't\n", __func__);
2938 return 0;
2939 }
2940
2941 if (!kalloc_type_test_data_redirect()) {
2942 printf("%s: kalloc_type redirect for all data signature failed\n",
2943 __func__);
2944 return 0;
2945 }
2946
2947 printf("%s: test passed\n", __func__);
2948
2949 *out = 1;
2950 return 0;
2951 }
2952 SYSCTL_TEST_REGISTER(kalloc_type, run_kalloc_type_test);
2953
2954 static vm_size_t
test_bucket_size(kalloc_heap_t kheap,vm_size_t size)2955 test_bucket_size(kalloc_heap_t kheap, vm_size_t size)
2956 {
2957 zone_t z = kalloc_zone_for_size(kheap->kh_zstart, size);
2958
2959 return z ? zone_elem_inner_size(z) : round_page(size);
2960 }
2961
2962 static int
run_kalloc_test(int64_t in __unused,int64_t * out)2963 run_kalloc_test(int64_t in __unused, int64_t *out)
2964 {
2965 *out = 0;
2966 uint64_t *data_ptr;
2967 void *strippedp_old, *strippedp_new;
2968 size_t alloc_size = 0, old_alloc_size = 0;
2969 struct kalloc_result kr = {};
2970
2971 printf("%s: test running\n", __func__);
2972
2973 /*
2974 * Test size 0: alloc, free, realloc
2975 */
2976 data_ptr = kalloc_ext(KHEAP_DATA_BUFFERS, alloc_size, Z_WAITOK | Z_NOFAIL,
2977 NULL).addr;
2978 if (!data_ptr) {
2979 printf("%s: kalloc 0 returned null\n", __func__);
2980 return 0;
2981 }
2982 kheap_free(KHEAP_DATA_BUFFERS, data_ptr, alloc_size);
2983
2984 data_ptr = kalloc_ext(KHEAP_DATA_BUFFERS, alloc_size, Z_WAITOK | Z_NOFAIL,
2985 NULL).addr;
2986 alloc_size = sizeof(uint64_t) + 1;
2987 data_ptr = krealloc_ext(KHEAP_DATA_BUFFERS, kr.addr, old_alloc_size,
2988 alloc_size, Z_WAITOK | Z_NOFAIL, NULL).addr;
2989 if (!data_ptr) {
2990 printf("%s: krealloc -> old size 0 failed\n", __func__);
2991 return 0;
2992 }
2993 *data_ptr = 0;
2994
2995 /*
2996 * Test krealloc: same sizeclass, different size classes, 2pgs,
2997 * VM (with owner)
2998 */
2999 old_alloc_size = alloc_size;
3000 alloc_size++;
3001 kr = krealloc_ext(KHEAP_DATA_BUFFERS, data_ptr, old_alloc_size, alloc_size,
3002 Z_WAITOK | Z_NOFAIL, NULL);
3003
3004 #if CONFIG_KERNEL_TBI
3005 strippedp_old = VM_KERNEL_TBI_FILL(data_ptr);
3006 strippedp_new = VM_KERNEL_TBI_FILL(kr.addr);
3007 #else /* CONFIG_KERNEL_TBI */
3008 strippedp_old = data_ptr;
3009 strippedp_new = kr.addr;
3010 #endif /* !CONFIG_KERNEL_TBI */
3011
3012 if (!kr.addr || (strippedp_old != strippedp_new) ||
3013 (test_bucket_size(KHEAP_DATA_BUFFERS, kr.size) !=
3014 test_bucket_size(KHEAP_DATA_BUFFERS, old_alloc_size))) {
3015 printf("%s: krealloc -> same size class failed\n", __func__);
3016 return 0;
3017 }
3018 data_ptr = kr.addr;
3019 *data_ptr = 0;
3020
3021 old_alloc_size = alloc_size;
3022 alloc_size *= 2;
3023 kr = krealloc_ext(KHEAP_DATA_BUFFERS, data_ptr, old_alloc_size, alloc_size,
3024 Z_WAITOK | Z_NOFAIL, NULL);
3025
3026 #if CONFIG_KERNEL_TBI
3027 strippedp_old = VM_KERNEL_TBI_FILL(data_ptr);
3028 strippedp_new = VM_KERNEL_TBI_FILL(kr.addr);
3029 #else /* CONFIG_KERNEL_TBI */
3030 strippedp_old = data_ptr;
3031 strippedp_new = kr.addr;
3032 #endif /* !CONFIG_KERNEL_TBI */
3033
3034 if (!kr.addr || (strippedp_old == strippedp_new) ||
3035 (test_bucket_size(KHEAP_DATA_BUFFERS, kr.size) ==
3036 test_bucket_size(KHEAP_DATA_BUFFERS, old_alloc_size))) {
3037 printf("%s: krealloc -> different size class failed\n", __func__);
3038 return 0;
3039 }
3040 data_ptr = kr.addr;
3041 *data_ptr = 0;
3042
3043 kheap_free(KHEAP_DATA_BUFFERS, kr.addr, alloc_size);
3044
3045 alloc_size = 3544;
3046 data_ptr = kalloc_ext(KHEAP_DATA_BUFFERS, alloc_size,
3047 Z_WAITOK | Z_FULLSIZE, &data_ptr).addr;
3048 if (!data_ptr) {
3049 printf("%s: kalloc 3544 with owner and Z_FULLSIZE returned not null\n",
3050 __func__);
3051 return 0;
3052 }
3053 *data_ptr = 0;
3054
3055 data_ptr = krealloc_ext(KHEAP_DATA_BUFFERS, data_ptr, alloc_size,
3056 PAGE_SIZE * 2, Z_REALLOCF | Z_WAITOK, &data_ptr).addr;
3057 if (!data_ptr) {
3058 printf("%s: krealloc -> 2pgs returned not null\n", __func__);
3059 return 0;
3060 }
3061 *data_ptr = 0;
3062
3063 data_ptr = krealloc_ext(KHEAP_DATA_BUFFERS, data_ptr, PAGE_SIZE * 2,
3064 KHEAP_MAX_SIZE * 2, Z_REALLOCF | Z_WAITOK, &data_ptr).addr;
3065 if (!data_ptr) {
3066 printf("%s: krealloc -> VM1 returned not null\n", __func__);
3067 return 0;
3068 }
3069 *data_ptr = 0;
3070
3071 data_ptr = krealloc_ext(KHEAP_DATA_BUFFERS, data_ptr, KHEAP_MAX_SIZE * 2,
3072 KHEAP_MAX_SIZE * 4, Z_REALLOCF | Z_WAITOK, &data_ptr).addr;
3073 *data_ptr = 0;
3074 if (!data_ptr) {
3075 printf("%s: krealloc -> VM2 returned not null\n", __func__);
3076 return 0;
3077 }
3078
3079 krealloc_ext(KHEAP_DATA_BUFFERS, data_ptr, KHEAP_MAX_SIZE * 4,
3080 0, Z_REALLOCF | Z_WAITOK, &data_ptr);
3081
3082 printf("%s: test passed\n", __func__);
3083 *out = 1;
3084 return 0;
3085 }
3086 SYSCTL_TEST_REGISTER(kalloc, run_kalloc_test);
3087
3088 #endif
3089