1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_kern.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Kernel memory management.
64 */
65
66 #include <mach/kern_return.h>
67 #include <mach/vm_param.h>
68 #include <kern/assert.h>
69 #include <kern/thread.h>
70 #include <vm/vm_kern_internal.h>
71 #include <vm/vm_map_internal.h>
72 #include <vm/vm_object_internal.h>
73 #include <vm/vm_page_internal.h>
74 #include <vm/vm_compressor_xnu.h>
75 #include <vm/vm_pageout_xnu.h>
76 #include <vm/vm_init_xnu.h>
77 #include <vm/vm_fault.h>
78 #include <vm/vm_memtag.h>
79 #include <kern/misc_protos.h>
80 #include <vm/cpm_internal.h>
81 #include <kern/ledger.h>
82 #include <kern/bits.h>
83 #include <kern/startup.h>
84
85 #include <string.h>
86
87 #include <libkern/OSDebug.h>
88 #include <libkern/crypto/sha2.h>
89 #include <libkern/section_keywords.h>
90 #include <sys/kdebug.h>
91 #include <sys/kdebug_triage.h>
92
93 #include <san/kasan.h>
94 #include <kern/kext_alloc.h>
95 #include <kern/backtrace.h>
96 #include <os/hash.h>
97 #include <kern/zalloc_internal.h>
98 #include <libkern/crypto/rand.h>
99
100 /*
101 * Variables exported by this module.
102 */
103
104 SECURITY_READ_ONLY_LATE(vm_map_t) kernel_map;
105 SECURITY_READ_ONLY_LATE(struct mach_vm_range) kmem_ranges[KMEM_RANGE_COUNT];
106 SECURITY_READ_ONLY_LATE(struct mach_vm_range) kmem_large_ranges[KMEM_RANGE_COUNT];
107
108 static TUNABLE(uint32_t, kmem_ptr_ranges, "kmem_ptr_ranges",
109 KMEM_RANGE_ID_NUM_PTR);
110 #define KMEM_GOBJ_THRESHOLD (32ULL << 20)
111 #if DEBUG || DEVELOPMENT
112 #define KMEM_OUTLIER_LOG_SIZE (16ULL << 10)
113 #define KMEM_OUTLIER_SIZE 0
114 #define KMEM_OUTLIER_ALIGN 1
115 btlog_t kmem_outlier_log;
116 #endif /* DEBUG || DEVELOPMENT */
117
118 __startup_data static vm_map_size_t data_range_size;
119 __startup_data static vm_map_size_t ptr_range_size;
120 __startup_data static vm_map_size_t sprayqtn_range_size;
121
122 #pragma mark helpers
123
124 __attribute__((overloadable))
125 __header_always_inline kmem_flags_t
ANYF(kma_flags_t flags)126 ANYF(kma_flags_t flags)
127 {
128 return (kmem_flags_t)flags;
129 }
130
131 __attribute__((overloadable))
132 __header_always_inline kmem_flags_t
ANYF(kmr_flags_t flags)133 ANYF(kmr_flags_t flags)
134 {
135 return (kmem_flags_t)flags;
136 }
137
138 __attribute__((overloadable))
139 __header_always_inline kmem_flags_t
ANYF(kmf_flags_t flags)140 ANYF(kmf_flags_t flags)
141 {
142 return (kmem_flags_t)flags;
143 }
144
145 __abortlike
146 static void
__kmem_invalid_size_panic(vm_map_t map,vm_size_t size,uint32_t flags)147 __kmem_invalid_size_panic(
148 vm_map_t map,
149 vm_size_t size,
150 uint32_t flags)
151 {
152 panic("kmem(map=%p, flags=0x%x): invalid size %zd",
153 map, flags, (size_t)size);
154 }
155
156 __abortlike
157 static void
__kmem_invalid_arguments_panic(const char * what,vm_map_t map,vm_address_t address,vm_size_t size,uint32_t flags)158 __kmem_invalid_arguments_panic(
159 const char *what,
160 vm_map_t map,
161 vm_address_t address,
162 vm_size_t size,
163 uint32_t flags)
164 {
165 panic("kmem_%s(map=%p, addr=%p, size=%zd, flags=0x%x): "
166 "invalid arguments passed",
167 what, map, (void *)address, (size_t)size, flags);
168 }
169
170 __abortlike
171 static void
__kmem_failed_panic(vm_map_t map,vm_size_t size,uint32_t flags,kern_return_t kr,const char * what)172 __kmem_failed_panic(
173 vm_map_t map,
174 vm_size_t size,
175 uint32_t flags,
176 kern_return_t kr,
177 const char *what)
178 {
179 panic("kmem_%s(%p, %zd, 0x%x): failed with %d",
180 what, map, (size_t)size, flags, kr);
181 }
182
183 __abortlike
184 static void
__kmem_entry_not_found_panic(vm_map_t map,vm_offset_t addr)185 __kmem_entry_not_found_panic(
186 vm_map_t map,
187 vm_offset_t addr)
188 {
189 panic("kmem(map=%p) no entry found at %p", map, (void *)addr);
190 }
191
192 static inline vm_object_t
__kmem_object(kmem_flags_t flags)193 __kmem_object(kmem_flags_t flags)
194 {
195 if (flags & KMEM_COMPRESSOR) {
196 if (flags & KMEM_KOBJECT) {
197 panic("both KMEM_KOBJECT and KMEM_COMPRESSOR specified");
198 }
199 return compressor_object;
200 }
201 if (!(flags & KMEM_KOBJECT)) {
202 panic("KMEM_KOBJECT or KMEM_COMPRESSOR is required");
203 }
204 return kernel_object_default;
205 }
206
207 static inline pmap_mapping_type_t
__kmem_mapping_type(kmem_flags_t flags)208 __kmem_mapping_type(kmem_flags_t flags)
209 {
210 if (flags & (KMEM_DATA | KMEM_COMPRESSOR)) {
211 return PMAP_MAPPING_TYPE_DEFAULT;
212 } else {
213 return PMAP_MAPPING_TYPE_RESTRICTED;
214 }
215 }
216
217 static inline vm_size_t
__kmem_guard_left(kmem_flags_t flags)218 __kmem_guard_left(kmem_flags_t flags)
219 {
220 return (flags & KMEM_GUARD_FIRST) ? PAGE_SIZE : 0;
221 }
222
223 static inline vm_size_t
__kmem_guard_right(kmem_flags_t flags)224 __kmem_guard_right(kmem_flags_t flags)
225 {
226 return (flags & KMEM_GUARD_LAST) ? PAGE_SIZE : 0;
227 }
228
229 static inline vm_size_t
__kmem_guard_size(kmem_flags_t flags)230 __kmem_guard_size(kmem_flags_t flags)
231 {
232 return __kmem_guard_left(flags) + __kmem_guard_right(flags);
233 }
234
235 __pure2
236 static inline vm_size_t
__kmem_entry_orig_size(vm_map_entry_t entry)237 __kmem_entry_orig_size(vm_map_entry_t entry)
238 {
239 vm_object_t object = VME_OBJECT(entry);
240
241 if (entry->vme_kernel_object) {
242 return entry->vme_end - entry->vme_start -
243 entry->vme_object_or_delta;
244 } else {
245 return object->vo_size - object->vo_size_delta;
246 }
247 }
248
249
250 #pragma mark kmem range methods
251
252 #if __arm64__
253 // <rdar://problem/48304934> arm64 doesn't use ldp when I'd expect it to
254 #define mach_vm_range_load(r, r_min, r_max) \
255 asm("ldp %[rmin], %[rmax], [%[range]]" \
256 : [rmin] "=r"(r_min), [rmax] "=r"(r_max) \
257 : [range] "r"(r), "m"((r)->min_address), "m"((r)->max_address))
258 #else
259 #define mach_vm_range_load(r, rmin, rmax) \
260 ({ rmin = (r)->min_address; rmax = (r)->max_address; })
261 #endif
262
263 __abortlike
264 static void
__mach_vm_range_overflow(mach_vm_offset_t addr,mach_vm_offset_t size)265 __mach_vm_range_overflow(
266 mach_vm_offset_t addr,
267 mach_vm_offset_t size)
268 {
269 panic("invalid vm range: [0x%llx, 0x%llx + 0x%llx) wraps around",
270 addr, addr, size);
271 }
272
273 __abortlike
274 static void
__mach_vm_range_invalid(mach_vm_offset_t min_address,mach_vm_offset_t max_address)275 __mach_vm_range_invalid(
276 mach_vm_offset_t min_address,
277 mach_vm_offset_t max_address)
278 {
279 panic("invalid vm range: [0x%llx, 0x%llx) wraps around",
280 min_address, max_address);
281 }
282
283 __header_always_inline mach_vm_size_t
mach_vm_range_size(const struct mach_vm_range * r)284 mach_vm_range_size(const struct mach_vm_range *r)
285 {
286 mach_vm_offset_t rmin, rmax;
287
288 mach_vm_range_load(r, rmin, rmax);
289 return rmax - rmin;
290 }
291
292 __attribute__((overloadable))
293 __header_always_inline bool
mach_vm_range_contains(const struct mach_vm_range * r,mach_vm_offset_t addr)294 mach_vm_range_contains(const struct mach_vm_range *r, mach_vm_offset_t addr)
295 {
296 mach_vm_offset_t rmin, rmax;
297
298 #if CONFIG_KERNEL_TAGGING
299 if (VM_KERNEL_ADDRESS(addr)) {
300 addr = vm_memtag_canonicalize_address(addr);
301 }
302 #endif /* CONFIG_KERNEL_TAGGING */
303
304 /*
305 * The `&` is not a typo: we really expect the check to pass,
306 * so encourage the compiler to eagerly load and test without branches
307 */
308 mach_vm_range_load(r, rmin, rmax);
309 return (addr >= rmin) & (addr < rmax);
310 }
311
312 __attribute__((overloadable))
313 __header_always_inline bool
mach_vm_range_contains(const struct mach_vm_range * r,mach_vm_offset_t addr,mach_vm_offset_t size)314 mach_vm_range_contains(
315 const struct mach_vm_range *r,
316 mach_vm_offset_t addr,
317 mach_vm_offset_t size)
318 {
319 mach_vm_offset_t rmin, rmax;
320
321 #if CONFIG_KERNEL_TAGGING
322 if (VM_KERNEL_ADDRESS(addr)) {
323 addr = vm_memtag_canonicalize_address(addr);
324 }
325 #endif /* CONFIG_KERNEL_TAGGING */
326
327 /*
328 * The `&` is not a typo: we really expect the check to pass,
329 * so encourage the compiler to eagerly load and test without branches
330 */
331 mach_vm_range_load(r, rmin, rmax);
332 return (addr >= rmin) & (addr + size >= rmin) & (addr + size <= rmax);
333 }
334
335 __attribute__((overloadable))
336 __header_always_inline bool
mach_vm_range_intersects(const struct mach_vm_range * r1,const struct mach_vm_range * r2)337 mach_vm_range_intersects(
338 const struct mach_vm_range *r1,
339 const struct mach_vm_range *r2)
340 {
341 mach_vm_offset_t r1_min, r1_max;
342 mach_vm_offset_t r2_min, r2_max;
343
344 mach_vm_range_load(r1, r1_min, r1_max);
345 r2_min = r2->min_address;
346 r2_max = r2->max_address;
347
348 if (r1_min > r1_max) {
349 __mach_vm_range_invalid(r1_min, r1_max);
350 }
351
352 if (r2_min > r2_max) {
353 __mach_vm_range_invalid(r2_min, r2_max);
354 }
355
356 return r1_max > r2_min && r1_min < r2_max;
357 }
358
359 __attribute__((overloadable))
360 __header_always_inline bool
mach_vm_range_intersects(const struct mach_vm_range * r1,mach_vm_offset_t addr,mach_vm_offset_t size)361 mach_vm_range_intersects(
362 const struct mach_vm_range *r1,
363 mach_vm_offset_t addr,
364 mach_vm_offset_t size)
365 {
366 struct mach_vm_range r2;
367
368 addr = VM_KERNEL_STRIP_UPTR(addr);
369 r2.min_address = addr;
370 if (os_add_overflow(addr, size, &r2.max_address)) {
371 __mach_vm_range_overflow(addr, size);
372 }
373
374 return mach_vm_range_intersects(r1, &r2);
375 }
376
377 bool
kmem_range_id_contains(kmem_range_id_t range_id,vm_map_offset_t addr,vm_map_size_t size)378 kmem_range_id_contains(
379 kmem_range_id_t range_id,
380 vm_map_offset_t addr,
381 vm_map_size_t size)
382 {
383 return mach_vm_range_contains(&kmem_ranges[range_id], addr, size);
384 }
385
386 __abortlike
387 static void
kmem_range_invalid_panic(kmem_range_id_t range_id,vm_map_offset_t addr,vm_map_size_t size)388 kmem_range_invalid_panic(
389 kmem_range_id_t range_id,
390 vm_map_offset_t addr,
391 vm_map_size_t size)
392 {
393 const struct mach_vm_range *r = &kmem_ranges[range_id];
394 mach_vm_offset_t rmin, rmax;
395
396 mach_vm_range_load(r, rmin, rmax);
397 if (addr + size < rmin) {
398 panic("addr %p + size %llu overflows %p", (void *)addr, size,
399 (void *)(addr + size));
400 }
401 panic("addr %p + size %llu doesnt fit in one range (id: %u min: %p max: %p)",
402 (void *)addr, size, range_id, (void *)rmin, (void *)rmax);
403 }
404
405 /*
406 * Return whether the entire allocation is contained in the given range
407 */
408 static bool
kmem_range_contains_fully(kmem_range_id_t range_id,vm_map_offset_t addr,vm_map_size_t size)409 kmem_range_contains_fully(
410 kmem_range_id_t range_id,
411 vm_map_offset_t addr,
412 vm_map_size_t size)
413 {
414 const struct mach_vm_range *r = &kmem_ranges[range_id];
415 mach_vm_offset_t rmin, rmax;
416 bool result = false;
417
418 if (VM_KERNEL_ADDRESS(addr)) {
419 addr = vm_memtag_canonicalize_address(addr);
420 }
421
422 /*
423 * The `&` is not a typo: we really expect the check to pass,
424 * so encourage the compiler to eagerly load and test without branches
425 */
426 mach_vm_range_load(r, rmin, rmax);
427 result = (addr >= rmin) & (addr < rmax);
428 if (__improbable(result
429 && ((addr + size < rmin) || (addr + size > rmax)))) {
430 kmem_range_invalid_panic(range_id, addr, size);
431 }
432 return result;
433 }
434
435 vm_map_size_t
kmem_range_id_size(kmem_range_id_t range_id)436 kmem_range_id_size(kmem_range_id_t range_id)
437 {
438 return mach_vm_range_size(&kmem_ranges[range_id]);
439 }
440
441 kmem_range_id_t
kmem_addr_get_range(vm_map_offset_t addr,vm_map_size_t size)442 kmem_addr_get_range(vm_map_offset_t addr, vm_map_size_t size)
443 {
444 kmem_range_id_t range_id = KMEM_RANGE_ID_FIRST;
445
446 for (; range_id < KMEM_RANGE_COUNT; range_id++) {
447 if (kmem_range_contains_fully(range_id, addr, size)) {
448 return range_id;
449 }
450 }
451 return KMEM_RANGE_ID_NONE;
452 }
453
454 bool
kmem_is_ptr_range(vm_map_range_id_t range_id)455 kmem_is_ptr_range(vm_map_range_id_t range_id)
456 {
457 return (range_id >= KMEM_RANGE_ID_FIRST) &&
458 (range_id <= KMEM_RANGE_ID_NUM_PTR);
459 }
460
461 __abortlike
462 static void
kmem_range_invalid_for_overwrite(vm_map_offset_t addr)463 kmem_range_invalid_for_overwrite(vm_map_offset_t addr)
464 {
465 panic("Can't overwrite mappings (addr: %p) in kmem ptr ranges",
466 (void *)addr);
467 }
468
469 mach_vm_range_t
kmem_validate_range_for_overwrite(vm_map_offset_t addr,vm_map_size_t size)470 kmem_validate_range_for_overwrite(
471 vm_map_offset_t addr,
472 vm_map_size_t size)
473 {
474 vm_map_range_id_t range_id = kmem_addr_get_range(addr, size);
475
476 if (kmem_is_ptr_range(range_id)) {
477 kmem_range_invalid_for_overwrite(addr);
478 }
479
480 return &kmem_ranges[range_id];
481 }
482
483
484 #pragma mark entry parameters
485
486
487 __abortlike
488 static void
__kmem_entry_validate_panic(vm_map_t map,vm_map_entry_t entry,vm_offset_t addr,vm_size_t size,uint32_t flags,kmem_guard_t guard)489 __kmem_entry_validate_panic(
490 vm_map_t map,
491 vm_map_entry_t entry,
492 vm_offset_t addr,
493 vm_size_t size,
494 uint32_t flags,
495 kmem_guard_t guard)
496 {
497 const char *what = "???";
498
499 if (entry->vme_atomic != guard.kmg_atomic) {
500 what = "atomicity";
501 } else if (entry->is_sub_map != guard.kmg_submap) {
502 what = "objectness";
503 } else if (addr != entry->vme_start) {
504 what = "left bound";
505 } else if ((flags & KMF_GUESS_SIZE) == 0 && addr + size != entry->vme_end) {
506 what = "right bound";
507 } else if (guard.kmg_context != entry->vme_context) {
508 what = "guard";
509 }
510
511 panic("kmem(map=%p, addr=%p, size=%zd, flags=0x%x): "
512 "entry:%p %s mismatch guard(0x%08x)",
513 map, (void *)addr, size, flags, entry,
514 what, guard.kmg_context);
515 }
516
517 static bool
__kmem_entry_validate_guard(vm_map_entry_t entry,vm_offset_t addr,vm_size_t size,kmem_flags_t flags,kmem_guard_t guard)518 __kmem_entry_validate_guard(
519 vm_map_entry_t entry,
520 vm_offset_t addr,
521 vm_size_t size,
522 kmem_flags_t flags,
523 kmem_guard_t guard)
524 {
525 if (entry->vme_atomic != guard.kmg_atomic) {
526 return false;
527 }
528
529 if (!guard.kmg_atomic) {
530 return true;
531 }
532
533 if (entry->is_sub_map != guard.kmg_submap) {
534 return false;
535 }
536
537 if (addr != entry->vme_start) {
538 return false;
539 }
540
541 if ((flags & KMEM_GUESS_SIZE) == 0 && addr + size != entry->vme_end) {
542 return false;
543 }
544
545 if (!guard.kmg_submap && guard.kmg_context != entry->vme_context) {
546 return false;
547 }
548
549 return true;
550 }
551
552 void
kmem_entry_validate_guard(vm_map_t map,vm_map_entry_t entry,vm_offset_t addr,vm_size_t size,kmem_guard_t guard)553 kmem_entry_validate_guard(
554 vm_map_t map,
555 vm_map_entry_t entry,
556 vm_offset_t addr,
557 vm_size_t size,
558 kmem_guard_t guard)
559 {
560 if (!__kmem_entry_validate_guard(entry, addr, size, KMEM_NONE, guard)) {
561 __kmem_entry_validate_panic(map, entry, addr, size, KMEM_NONE, guard);
562 }
563 }
564
565 __abortlike
566 static void
__kmem_entry_validate_object_panic(vm_map_t map,vm_map_entry_t entry,kmem_flags_t flags)567 __kmem_entry_validate_object_panic(
568 vm_map_t map,
569 vm_map_entry_t entry,
570 kmem_flags_t flags)
571 {
572 const char *what;
573 const char *verb;
574
575 if (entry->is_sub_map) {
576 panic("kmem(map=%p) entry %p is a submap", map, entry);
577 }
578
579 if (flags & KMEM_KOBJECT) {
580 what = "kernel";
581 verb = "isn't";
582 } else if (flags & KMEM_COMPRESSOR) {
583 what = "compressor";
584 verb = "isn't";
585 } else if (entry->vme_kernel_object) {
586 what = "kernel";
587 verb = "is unexpectedly";
588 } else {
589 what = "compressor";
590 verb = "is unexpectedly";
591 }
592
593 panic("kmem(map=%p, flags=0x%x): entry %p %s for the %s object",
594 map, flags, entry, verb, what);
595 }
596
597 static bool
__kmem_entry_validate_object(vm_map_entry_t entry,kmem_flags_t flags)598 __kmem_entry_validate_object(
599 vm_map_entry_t entry,
600 kmem_flags_t flags)
601 {
602 if (entry->is_sub_map) {
603 return false;
604 }
605 if ((bool)(flags & KMEM_KOBJECT) != entry->vme_kernel_object) {
606 return false;
607 }
608
609 return (bool)(flags & KMEM_COMPRESSOR) ==
610 (VME_OBJECT(entry) == compressor_object);
611 }
612
613 vm_size_t
kmem_size_guard(vm_map_t map,vm_offset_t addr,kmem_guard_t guard)614 kmem_size_guard(
615 vm_map_t map,
616 vm_offset_t addr,
617 kmem_guard_t guard)
618 {
619 kmem_flags_t flags = KMEM_GUESS_SIZE;
620 vm_map_entry_t entry;
621 vm_size_t size;
622
623 vm_map_lock_read(map);
624
625 #if KASAN_CLASSIC
626 addr -= PAGE_SIZE;
627 #endif /* KASAN_CLASSIC */
628 addr = vm_memtag_canonicalize_address(addr);
629
630 if (!vm_map_lookup_entry(map, addr, &entry)) {
631 __kmem_entry_not_found_panic(map, addr);
632 }
633
634 if (!__kmem_entry_validate_guard(entry, addr, 0, flags, guard)) {
635 __kmem_entry_validate_panic(map, entry, addr, 0, flags, guard);
636 }
637
638 size = __kmem_entry_orig_size(entry);
639
640 vm_map_unlock_read(map);
641
642 return size;
643 }
644
645 static inline uint16_t
kmem_hash_backtrace(void * fp)646 kmem_hash_backtrace(
647 void *fp)
648 {
649 uint64_t bt_count;
650 uintptr_t bt[8] = {};
651
652 struct backtrace_control ctl = {
653 .btc_frame_addr = (uintptr_t)fp,
654 };
655
656 bt_count = backtrace(bt, sizeof(bt) / sizeof(bt[0]), &ctl, NULL);
657 return (uint16_t) os_hash_jenkins(bt, bt_count * sizeof(bt[0]));
658 }
659
660 static_assert(KMEM_RANGE_ID_DATA - 1 <= KMEM_RANGE_MASK,
661 "Insufficient bits to represent ptr ranges");
662
663 kmem_range_id_t
kmem_adjust_range_id(uint32_t hash)664 kmem_adjust_range_id(
665 uint32_t hash)
666 {
667 return (kmem_range_id_t) (KMEM_RANGE_ID_PTR_0 +
668 (hash & KMEM_RANGE_MASK) % kmem_ptr_ranges);
669 }
670
671 static bool
kmem_use_sprayqtn(kma_flags_t kma_flags,vm_map_size_t map_size,vm_offset_t mask)672 kmem_use_sprayqtn(
673 kma_flags_t kma_flags,
674 vm_map_size_t map_size,
675 vm_offset_t mask)
676 {
677 /*
678 * Pointer allocations that are above the guard objects threshold or have
679 * leading guard pages with non standard alignment requests are redirected
680 * to the sprayqtn range.
681 */
682 #if DEBUG || DEVELOPMENT
683 btref_get_flags_t flags = (kma_flags & KMA_NOPAGEWAIT) ?
684 BTREF_GET_NOWAIT : 0;
685
686 if ((kma_flags & KMA_SPRAYQTN) == 0) {
687 if (map_size > KMEM_GOBJ_THRESHOLD) {
688 btlog_record(kmem_outlier_log, (void *)map_size, KMEM_OUTLIER_SIZE,
689 btref_get(__builtin_frame_address(0), flags));
690 } else if ((kma_flags & KMA_GUARD_FIRST) && (mask > PAGE_MASK)) {
691 btlog_record(kmem_outlier_log, (void *)mask, KMEM_OUTLIER_ALIGN,
692 btref_get(__builtin_frame_address(0), flags));
693 }
694 }
695 #endif /* DEBUG || DEVELOPMENT */
696
697 return (kma_flags & KMA_SPRAYQTN) ||
698 (map_size > KMEM_GOBJ_THRESHOLD) ||
699 ((kma_flags & KMA_GUARD_FIRST) && (mask > PAGE_MASK));
700 }
701
702 static void
kmem_apply_security_policy(vm_map_t map,kma_flags_t kma_flags,kmem_guard_t guard,vm_map_size_t map_size,vm_offset_t mask,vm_map_kernel_flags_t * vmk_flags,bool assert_dir __unused)703 kmem_apply_security_policy(
704 vm_map_t map,
705 kma_flags_t kma_flags,
706 kmem_guard_t guard,
707 vm_map_size_t map_size,
708 vm_offset_t mask,
709 vm_map_kernel_flags_t *vmk_flags,
710 bool assert_dir __unused)
711 {
712 kmem_range_id_t range_id;
713 bool from_right;
714 uint16_t type_hash = guard.kmg_type_hash;
715
716 if (startup_phase < STARTUP_SUB_KMEM || map != kernel_map) {
717 return;
718 }
719
720 /*
721 * A non-zero type-hash must be passed by krealloc_type
722 */
723 #if (DEBUG || DEVELOPMENT)
724 if (assert_dir && !(kma_flags & KMA_DATA)) {
725 assert(type_hash != 0);
726 }
727 #endif
728
729 if (kma_flags & KMA_DATA) {
730 range_id = KMEM_RANGE_ID_DATA;
731 /*
732 * As an optimization in KMA_DATA to avoid fragmentation,
733 * allocate static carveouts at the end of the DATA range.
734 */
735 from_right = (bool)(kma_flags & KMA_PERMANENT);
736 } else if (kmem_use_sprayqtn(kma_flags, map_size, mask)) {
737 range_id = KMEM_RANGE_ID_SPRAYQTN;
738 from_right = (bool)(kma_flags & KMA_PERMANENT);
739 } else if (type_hash) {
740 range_id = (kmem_range_id_t)(type_hash & KMEM_RANGE_MASK);
741 from_right = type_hash & KMEM_DIRECTION_MASK;
742 } else {
743 /*
744 * Range id needs to correspond to one of the PTR ranges
745 */
746 type_hash = (uint16_t) kmem_hash_backtrace(__builtin_frame_address(0));
747 range_id = kmem_adjust_range_id(type_hash);
748 from_right = type_hash & KMEM_DIRECTION_MASK;
749 }
750
751 vmk_flags->vmkf_range_id = range_id;
752 vmk_flags->vmkf_last_free = from_right;
753 }
754
755 #pragma mark allocation
756
757 static kmem_return_t
758 kmem_alloc_guard_internal(
759 vm_map_t map,
760 vm_size_t size,
761 vm_offset_t mask,
762 kma_flags_t flags,
763 kmem_guard_t guard,
764 kern_return_t (^alloc_pages)(vm_size_t, kma_flags_t, vm_page_t *))
765 {
766 vm_object_t object;
767 vm_offset_t delta = 0;
768 vm_map_entry_t entry = NULL;
769 vm_map_offset_t map_addr, fill_start;
770 vm_map_size_t map_size, fill_size;
771 vm_page_t guard_left = VM_PAGE_NULL;
772 vm_page_t guard_right = VM_PAGE_NULL;
773 vm_page_t wired_page_list = VM_PAGE_NULL;
774 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_ANYWHERE();
775 bool skip_guards;
776 kmem_return_t kmr = { };
777
778 assert(kernel_map && map->pmap == kernel_pmap);
779
780 #if DEBUG || DEVELOPMENT
781 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_START,
782 size, 0, 0, 0);
783 #endif
784
785
786 if (size == 0 ||
787 (size >> VM_KERNEL_POINTER_SIGNIFICANT_BITS) ||
788 (size < __kmem_guard_size(ANYF(flags)))) {
789 __kmem_invalid_size_panic(map, size, flags);
790 }
791
792 /*
793 * limit the size of a single extent of wired memory
794 * to try and limit the damage to the system if
795 * too many pages get wired down
796 * limit raised to 2GB with 128GB max physical limit,
797 * but scaled by installed memory above this
798 *
799 * Note: kmem_alloc_contig_guard() is immune to this check.
800 */
801 if (__improbable(!(flags & (KMA_VAONLY | KMA_PAGEABLE)) &&
802 alloc_pages == NULL &&
803 size > MAX(1ULL << 31, sane_size / 64))) {
804 kmr.kmr_return = KERN_RESOURCE_SHORTAGE;
805 goto out_error;
806 }
807
808 /*
809 * Guard pages:
810 *
811 * Guard pages are implemented as fictitious pages.
812 *
813 * However, some maps, and some objects are known
814 * to manage their memory explicitly, and do not need
815 * those to be materialized, which saves memory.
816 *
817 * By placing guard pages on either end of a stack,
818 * they can help detect cases where a thread walks
819 * off either end of its stack.
820 *
821 * They are allocated and set up here and attempts
822 * to access those pages are trapped in vm_fault_page().
823 *
824 * The map_size we were passed may include extra space for
825 * guard pages. fill_size represents the actual size to populate.
826 * Similarly, fill_start indicates where the actual pages
827 * will begin in the range.
828 */
829
830 map_size = round_page(size);
831 fill_start = 0;
832 fill_size = map_size - __kmem_guard_size(ANYF(flags));
833
834 #if KASAN_CLASSIC
835 if (flags & KMA_KASAN_GUARD) {
836 assert((flags & (KMA_GUARD_FIRST | KMA_GUARD_LAST)) == 0);
837 flags |= KMA_GUARD_FIRST | KMEM_GUARD_LAST;
838 delta = ptoa(2);
839 map_size += delta;
840 }
841 #else
842 (void)delta;
843 #endif /* KASAN_CLASSIC */
844
845 skip_guards = (flags & (KMA_KOBJECT | KMA_COMPRESSOR)) ||
846 map->never_faults;
847
848 if (flags & KMA_GUARD_FIRST) {
849 vmk_flags.vmkf_guard_before = true;
850 fill_start += PAGE_SIZE;
851 }
852 if ((flags & KMA_GUARD_FIRST) && !skip_guards) {
853 guard_left = vm_page_grab_guard((flags & KMA_NOPAGEWAIT) == 0);
854 if (__improbable(guard_left == VM_PAGE_NULL)) {
855 kmr.kmr_return = KERN_RESOURCE_SHORTAGE;
856 goto out_error;
857 }
858 }
859 if ((flags & KMA_GUARD_LAST) && !skip_guards) {
860 guard_right = vm_page_grab_guard((flags & KMA_NOPAGEWAIT) == 0);
861 if (__improbable(guard_right == VM_PAGE_NULL)) {
862 kmr.kmr_return = KERN_RESOURCE_SHORTAGE;
863 goto out_error;
864 }
865 }
866
867 if (!(flags & (KMA_VAONLY | KMA_PAGEABLE))) {
868 if (alloc_pages) {
869 kmr.kmr_return = alloc_pages(fill_size, flags,
870 &wired_page_list);
871 } else {
872 kmr.kmr_return = vm_page_alloc_list(atop(fill_size), flags,
873 &wired_page_list);
874 }
875 if (__improbable(kmr.kmr_return != KERN_SUCCESS)) {
876 goto out_error;
877 }
878 }
879
880 /*
881 * Allocate a new object (if necessary). We must do this before
882 * locking the map, or risk deadlock with the default pager.
883 */
884 if (flags & KMA_KOBJECT) {
885 {
886 object = kernel_object_default;
887 }
888 vm_object_reference(object);
889 } else if (flags & KMA_COMPRESSOR) {
890 object = compressor_object;
891 vm_object_reference(object);
892 } else {
893 object = vm_object_allocate(map_size);
894 vm_object_lock(object);
895 vm_object_set_size(object, map_size, size);
896 /* stabilize the object to prevent shadowing */
897 object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
898 VM_OBJECT_SET_TRUE_SHARE(object, TRUE);
899 vm_object_unlock(object);
900 }
901
902 if (flags & KMA_LAST_FREE) {
903 vmk_flags.vmkf_last_free = true;
904 }
905 if (flags & KMA_PERMANENT) {
906 vmk_flags.vmf_permanent = true;
907 }
908 kmem_apply_security_policy(map, flags, guard, map_size, mask, &vmk_flags,
909 false);
910
911 kmr.kmr_return = vm_map_find_space(map, 0, map_size, mask,
912 vmk_flags, &entry);
913 if (__improbable(KERN_SUCCESS != kmr.kmr_return)) {
914 vm_object_deallocate(object);
915 goto out_error;
916 }
917
918 map_addr = entry->vme_start;
919 VME_OBJECT_SET(entry, object, guard.kmg_atomic, guard.kmg_context);
920 VME_ALIAS_SET(entry, guard.kmg_tag);
921 if (flags & (KMA_KOBJECT | KMA_COMPRESSOR)) {
922 VME_OFFSET_SET(entry, map_addr);
923 }
924
925 #if KASAN
926 if ((flags & KMA_KOBJECT) && guard.kmg_atomic) {
927 entry->vme_object_or_delta = (-size & PAGE_MASK) + delta;
928 }
929 #endif /* KASAN */
930
931 if (!(flags & (KMA_COMPRESSOR | KMA_PAGEABLE))) {
932 entry->wired_count = 1;
933 vme_btref_consider_and_set(entry, __builtin_frame_address(0));
934 }
935
936 if (guard_left || guard_right || wired_page_list) {
937 vm_object_offset_t offset = 0ull;
938
939 vm_object_lock(object);
940 vm_map_unlock(map);
941
942 if (flags & (KMA_KOBJECT | KMA_COMPRESSOR)) {
943 offset = map_addr;
944 }
945
946 if (guard_left) {
947 vm_page_insert(guard_left, object, offset);
948 guard_left->vmp_busy = FALSE;
949 guard_left = VM_PAGE_NULL;
950 }
951
952 if (guard_right) {
953 vm_page_insert(guard_right, object,
954 offset + fill_start + fill_size);
955 guard_right->vmp_busy = FALSE;
956 guard_right = VM_PAGE_NULL;
957 }
958
959 if (wired_page_list) {
960 kernel_memory_populate_object_and_unlock(object,
961 map_addr + fill_start, offset + fill_start, fill_size,
962 wired_page_list, flags, guard.kmg_tag, VM_PROT_DEFAULT,
963 __kmem_mapping_type(ANYF(flags)));
964 } else {
965 vm_object_unlock(object);
966 }
967 } else {
968 vm_map_unlock(map);
969 }
970
971 /*
972 * now that the pages are wired, we no longer have to fear coalesce
973 */
974 if (flags & (KMA_KOBJECT | KMA_COMPRESSOR)) {
975 vm_map_simplify(map, map_addr);
976 }
977
978 #if DEBUG || DEVELOPMENT
979 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_END,
980 atop(fill_size), 0, 0, 0);
981 #endif /* DEBUG || DEVELOPMENT */
982 kmr.kmr_address = CAST_DOWN(vm_offset_t, map_addr);
983
984 #if KASAN
985 if (flags & (KMA_KASAN_GUARD | KMA_PAGEABLE)) {
986 /*
987 * We need to allow the range for pageable memory,
988 * or faulting will not be allowed.
989 */
990 kasan_notify_address(map_addr, map_size);
991 }
992 #endif /* KASAN */
993 #if KASAN_CLASSIC
994 if (flags & KMA_KASAN_GUARD) {
995 kmr.kmr_address += PAGE_SIZE;
996 kasan_alloc_large(kmr.kmr_address, size);
997 }
998 #endif /* KASAN_CLASSIC */
999 #if CONFIG_KERNEL_TAGGING
1000 if (!(flags & KMA_VAONLY) && (flags & KMA_TAG)) {
1001 kmr.kmr_address = vm_memtag_assign_tag(kmr.kmr_address, size);
1002 vm_memtag_set_tag((vm_offset_t)kmr.kmr_address, size);
1003 #if KASAN_TBI
1004 kasan_tbi_retag_unused_space((vm_offset_t)kmr.kmr_address, map_size, size);
1005 #endif /* KASAN_TBI */
1006 }
1007 #endif /* CONFIG_KERNEL_TAGGING */
1008 return kmr;
1009
1010 out_error:
1011 if (flags & KMA_NOFAIL) {
1012 __kmem_failed_panic(map, size, flags, kmr.kmr_return, "alloc");
1013 }
1014 if (guard_left) {
1015 guard_left->vmp_snext = wired_page_list;
1016 wired_page_list = guard_left;
1017 }
1018 if (guard_right) {
1019 guard_right->vmp_snext = wired_page_list;
1020 wired_page_list = guard_right;
1021 }
1022 if (wired_page_list) {
1023 vm_page_free_list(wired_page_list, FALSE);
1024 }
1025
1026 #if DEBUG || DEVELOPMENT
1027 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_END,
1028 0, 0, 0, 0);
1029 #endif /* DEBUG || DEVELOPMENT */
1030
1031 return kmr;
1032 }
1033
1034 kmem_return_t
kmem_alloc_guard(vm_map_t map,vm_size_t size,vm_offset_t mask,kma_flags_t flags,kmem_guard_t guard)1035 kmem_alloc_guard(
1036 vm_map_t map,
1037 vm_size_t size,
1038 vm_offset_t mask,
1039 kma_flags_t flags,
1040 kmem_guard_t guard)
1041 {
1042 return kmem_alloc_guard_internal(map, size, mask, flags, guard, NULL);
1043 }
1044
1045 kmem_return_t
kmem_alloc_contig_guard(vm_map_t map,vm_size_t size,vm_offset_t mask,ppnum_t max_pnum,ppnum_t pnum_mask,kma_flags_t flags,kmem_guard_t guard)1046 kmem_alloc_contig_guard(
1047 vm_map_t map,
1048 vm_size_t size,
1049 vm_offset_t mask,
1050 ppnum_t max_pnum,
1051 ppnum_t pnum_mask,
1052 kma_flags_t flags,
1053 kmem_guard_t guard)
1054 {
1055 __auto_type alloc_pages = ^(vm_size_t fill_size, kma_flags_t kma_flags, vm_page_t *pages) {
1056 return cpm_allocate(fill_size, pages, max_pnum, pnum_mask, FALSE, kma_flags);
1057 };
1058
1059 return kmem_alloc_guard_internal(map, size, mask, flags, guard, alloc_pages);
1060 }
1061
1062 kmem_return_t
kmem_suballoc(vm_map_t parent,mach_vm_offset_t * addr,vm_size_t size,vm_map_create_options_t vmc_options,int vm_flags,kms_flags_t flags,vm_tag_t tag)1063 kmem_suballoc(
1064 vm_map_t parent,
1065 mach_vm_offset_t *addr,
1066 vm_size_t size,
1067 vm_map_create_options_t vmc_options,
1068 int vm_flags,
1069 kms_flags_t flags,
1070 vm_tag_t tag)
1071 {
1072 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1073 vm_map_offset_t map_addr = 0;
1074 kmem_return_t kmr = { };
1075 vm_map_t map;
1076
1077 assert(page_aligned(size));
1078 assert(parent->pmap == kernel_pmap);
1079
1080 vm_map_kernel_flags_set_vmflags(&vmk_flags, vm_flags, tag);
1081
1082 if (parent == kernel_map) {
1083 assert(vmk_flags.vmf_overwrite || (flags & KMS_DATA));
1084 }
1085
1086 if (vmk_flags.vmf_fixed) {
1087 map_addr = trunc_page(*addr);
1088 }
1089
1090 pmap_reference(vm_map_pmap(parent));
1091 map = vm_map_create_options(vm_map_pmap(parent), 0, size, vmc_options);
1092
1093 /*
1094 * 1. vm_map_enter() will consume one ref on success.
1095 *
1096 * 2. make the entry atomic as kernel submaps should never be split.
1097 *
1098 * 3. instruct vm_map_enter() that it is a fresh submap
1099 * that needs to be taught its bounds as it inserted.
1100 */
1101 vm_map_reference(map);
1102
1103 vmk_flags.vmkf_submap = true;
1104 if ((flags & KMS_DATA) == 0) {
1105 /* FIXME: IOKit submaps get fragmented and can't be atomic */
1106 vmk_flags.vmkf_submap_atomic = true;
1107 }
1108 vmk_flags.vmkf_submap_adjust = true;
1109 if (flags & KMS_LAST_FREE) {
1110 vmk_flags.vmkf_last_free = true;
1111 }
1112 if (flags & KMS_PERMANENT) {
1113 vmk_flags.vmf_permanent = true;
1114 }
1115 if (flags & KMS_DATA) {
1116 vmk_flags.vmkf_range_id = KMEM_RANGE_ID_DATA;
1117 }
1118
1119 kmr.kmr_return = vm_map_enter(parent, &map_addr, size, 0,
1120 vmk_flags, (vm_object_t)map, 0, FALSE,
1121 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
1122
1123 if (kmr.kmr_return != KERN_SUCCESS) {
1124 if (flags & KMS_NOFAIL) {
1125 panic("kmem_suballoc(map=%p, size=%zd) failed with %d",
1126 parent, size, kmr.kmr_return);
1127 }
1128 assert(os_ref_get_count_raw(&map->map_refcnt) == 2);
1129 vm_map_deallocate(map);
1130 vm_map_deallocate(map); /* also removes ref to pmap */
1131 return kmr;
1132 }
1133
1134 /*
1135 * For kmem_suballocs that register a claim and are assigned a range, ensure
1136 * that the exact same range is returned.
1137 */
1138 if (*addr != 0 && parent == kernel_map &&
1139 startup_phase > STARTUP_SUB_KMEM) {
1140 assert(CAST_DOWN(vm_offset_t, map_addr) == *addr);
1141 } else {
1142 *addr = map_addr;
1143 }
1144
1145 kmr.kmr_submap = map;
1146 return kmr;
1147 }
1148
1149 /*
1150 * kmem_alloc:
1151 *
1152 * Allocate wired-down memory in the kernel's address map
1153 * or a submap. The memory is not zero-filled.
1154 */
1155
1156 __exported kern_return_t
1157 kmem_alloc_external(
1158 vm_map_t map,
1159 vm_offset_t *addrp,
1160 vm_size_t size);
1161 kern_return_t
kmem_alloc_external(vm_map_t map,vm_offset_t * addrp,vm_size_t size)1162 kmem_alloc_external(
1163 vm_map_t map,
1164 vm_offset_t *addrp,
1165 vm_size_t size)
1166 {
1167 if (size && (size >> VM_KERNEL_POINTER_SIGNIFICANT_BITS) == 0) {
1168 return kmem_alloc(map, addrp, size, KMA_NONE, vm_tag_bt());
1169 }
1170 /* Maintain ABI compatibility: invalid sizes used to be allowed */
1171 return size ? KERN_NO_SPACE: KERN_INVALID_ARGUMENT;
1172 }
1173
1174
1175 /*
1176 * kmem_alloc_kobject:
1177 *
1178 * Allocate wired-down memory in the kernel's address map
1179 * or a submap. The memory is not zero-filled.
1180 *
1181 * The memory is allocated in the kernel_object.
1182 * It may not be copied with vm_map_copy, and
1183 * it may not be reallocated with kmem_realloc.
1184 */
1185
1186 __exported kern_return_t
1187 kmem_alloc_kobject_external(
1188 vm_map_t map,
1189 vm_offset_t *addrp,
1190 vm_size_t size);
1191 kern_return_t
kmem_alloc_kobject_external(vm_map_t map,vm_offset_t * addrp,vm_size_t size)1192 kmem_alloc_kobject_external(
1193 vm_map_t map,
1194 vm_offset_t *addrp,
1195 vm_size_t size)
1196 {
1197 if (size && (size >> VM_KERNEL_POINTER_SIGNIFICANT_BITS) == 0) {
1198 return kmem_alloc(map, addrp, size, KMA_KOBJECT, vm_tag_bt());
1199 }
1200 /* Maintain ABI compatibility: invalid sizes used to be allowed */
1201 return size ? KERN_NO_SPACE: KERN_INVALID_ARGUMENT;
1202 }
1203
1204 /*
1205 * kmem_alloc_pageable:
1206 *
1207 * Allocate pageable memory in the kernel's address map.
1208 */
1209
1210 __exported kern_return_t
1211 kmem_alloc_pageable_external(
1212 vm_map_t map,
1213 vm_offset_t *addrp,
1214 vm_size_t size);
1215 kern_return_t
kmem_alloc_pageable_external(vm_map_t map,vm_offset_t * addrp,vm_size_t size)1216 kmem_alloc_pageable_external(
1217 vm_map_t map,
1218 vm_offset_t *addrp,
1219 vm_size_t size)
1220 {
1221 if (size && (size >> VM_KERNEL_POINTER_SIGNIFICANT_BITS) == 0) {
1222 return kmem_alloc(map, addrp, size, KMA_PAGEABLE | KMA_DATA, vm_tag_bt());
1223 }
1224 /* Maintain ABI compatibility: invalid sizes used to be allowed */
1225 return size ? KERN_NO_SPACE: KERN_INVALID_ARGUMENT;
1226 }
1227
1228 static inline kern_return_t
mach_vm_allocate_kernel_sanitize(vm_map_t map,mach_vm_offset_ut addr_u,mach_vm_size_ut size_u,vm_map_kernel_flags_t vmk_flags,vm_map_offset_t * map_addr,vm_map_size_t * map_size)1229 mach_vm_allocate_kernel_sanitize(
1230 vm_map_t map,
1231 mach_vm_offset_ut addr_u,
1232 mach_vm_size_ut size_u,
1233 vm_map_kernel_flags_t vmk_flags,
1234 vm_map_offset_t *map_addr,
1235 vm_map_size_t *map_size)
1236 {
1237 kern_return_t result;
1238 vm_map_offset_t map_end;
1239
1240 if (vmk_flags.vmf_fixed) {
1241 result = vm_sanitize_addr_size(addr_u, size_u,
1242 VM_SANITIZE_CALLER_VM_ALLOCATE_FIXED,
1243 map,
1244 VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS | VM_SANITIZE_FLAGS_REALIGN_START,
1245 map_addr, &map_end, map_size);
1246 if (__improbable(result != KERN_SUCCESS)) {
1247 return result;
1248 }
1249 } else {
1250 *map_addr = 0;
1251 result = vm_sanitize_size(0, size_u,
1252 VM_SANITIZE_CALLER_VM_ALLOCATE_ANYWHERE, map,
1253 VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS,
1254 map_size);
1255 if (__improbable(result != KERN_SUCCESS)) {
1256 return result;
1257 }
1258 }
1259
1260 return KERN_SUCCESS;
1261 }
1262
1263 kern_return_t
mach_vm_allocate_kernel(vm_map_t map,mach_vm_offset_ut * addr_u,mach_vm_size_ut size_u,vm_map_kernel_flags_t vmk_flags)1264 mach_vm_allocate_kernel(
1265 vm_map_t map,
1266 mach_vm_offset_ut *addr_u,
1267 mach_vm_size_ut size_u,
1268 vm_map_kernel_flags_t vmk_flags)
1269 {
1270 vm_map_offset_t map_addr;
1271 vm_map_size_t map_size;
1272 kern_return_t result;
1273
1274 if (map == VM_MAP_NULL) {
1275 ktriage_record(thread_tid(current_thread()),
1276 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
1277 KDBG_TRIAGE_RESERVED,
1278 KDBG_TRIAGE_VM_ALLOCATE_KERNEL_BADMAP_ERROR),
1279 KERN_INVALID_ARGUMENT /* arg */);
1280 return KERN_INVALID_ARGUMENT;
1281 }
1282
1283 if (!vm_map_kernel_flags_check_vm_and_kflags(vmk_flags,
1284 VM_FLAGS_USER_ALLOCATE)) {
1285 return KERN_INVALID_ARGUMENT;
1286 }
1287
1288 result = mach_vm_allocate_kernel_sanitize(map,
1289 *addr_u,
1290 size_u,
1291 vmk_flags,
1292 &map_addr,
1293 &map_size);
1294 if (__improbable(result != KERN_SUCCESS)) {
1295 result = vm_sanitize_get_kr(result);
1296 if (result == KERN_SUCCESS) {
1297 *addr_u = vm_sanitize_wrap_addr(0);
1298 } else {
1299 ktriage_record(thread_tid(current_thread()),
1300 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
1301 KDBG_TRIAGE_RESERVED,
1302 KDBG_TRIAGE_VM_ALLOCATE_KERNEL_BADSIZE_ERROR),
1303 KERN_INVALID_ARGUMENT /* arg */);
1304 }
1305 return result;
1306 }
1307
1308 vm_map_kernel_flags_update_range_id(&vmk_flags, map, map_size);
1309
1310 result = vm_map_enter(
1311 map,
1312 &map_addr,
1313 map_size,
1314 (vm_map_offset_t)0,
1315 vmk_flags,
1316 VM_OBJECT_NULL,
1317 (vm_object_offset_t)0,
1318 FALSE,
1319 VM_PROT_DEFAULT,
1320 VM_PROT_ALL,
1321 VM_INHERIT_DEFAULT);
1322
1323 if (result == KERN_SUCCESS) {
1324 #if KASAN
1325 if (map->pmap == kernel_pmap) {
1326 kasan_notify_address(map_addr, map_size);
1327 }
1328 #endif
1329 *addr_u = vm_sanitize_wrap_addr(map_addr);
1330 } else {
1331 ktriage_record(thread_tid(current_thread()),
1332 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
1333 KDBG_TRIAGE_RESERVED,
1334 KDBG_TRIAGE_VM_ALLOCATE_KERNEL_VMMAPENTER_ERROR),
1335 result /* arg */);
1336 }
1337 return result;
1338 }
1339
1340 #pragma mark population
1341
1342 static void
kernel_memory_populate_pmap_enter(vm_object_t object,vm_address_t addr,vm_object_offset_t offset,vm_page_t mem,vm_prot_t prot,int pe_flags,pmap_mapping_type_t mapping_type)1343 kernel_memory_populate_pmap_enter(
1344 vm_object_t object,
1345 vm_address_t addr,
1346 vm_object_offset_t offset,
1347 vm_page_t mem,
1348 vm_prot_t prot,
1349 int pe_flags,
1350 pmap_mapping_type_t mapping_type)
1351 {
1352 kern_return_t pe_result;
1353 int pe_options;
1354
1355 if (VMP_ERROR_GET(mem)) {
1356 panic("VM page %p should not have an error", mem);
1357 }
1358
1359 pe_options = PMAP_OPTIONS_NOWAIT;
1360 if (object->internal) {
1361 pe_options |= PMAP_OPTIONS_INTERNAL;
1362 }
1363 if (mem->vmp_reusable || object->all_reusable) {
1364 pe_options |= PMAP_OPTIONS_REUSABLE;
1365 }
1366
1367 pe_result = pmap_enter_options(kernel_pmap, addr + offset,
1368 VM_PAGE_GET_PHYS_PAGE(mem), prot, VM_PROT_NONE,
1369 pe_flags, /* wired */ TRUE, pe_options, NULL, mapping_type);
1370
1371 if (pe_result == KERN_RESOURCE_SHORTAGE) {
1372 vm_object_unlock(object);
1373
1374 pe_options &= ~PMAP_OPTIONS_NOWAIT;
1375
1376 pe_result = pmap_enter_options(kernel_pmap, addr + offset,
1377 VM_PAGE_GET_PHYS_PAGE(mem), prot, VM_PROT_NONE,
1378 pe_flags, /* wired */ TRUE, pe_options, NULL, mapping_type);
1379
1380 vm_object_lock(object);
1381 }
1382
1383 assert(pe_result == KERN_SUCCESS);
1384 }
1385
1386 void
kernel_memory_populate_object_and_unlock(vm_object_t object,vm_address_t addr,vm_offset_t offset,vm_size_t size,vm_page_t page_list,kma_flags_t flags,vm_tag_t tag,vm_prot_t prot,pmap_mapping_type_t mapping_type)1387 kernel_memory_populate_object_and_unlock(
1388 vm_object_t object, /* must be locked */
1389 vm_address_t addr,
1390 vm_offset_t offset,
1391 vm_size_t size,
1392 vm_page_t page_list,
1393 kma_flags_t flags,
1394 vm_tag_t tag,
1395 vm_prot_t prot,
1396 pmap_mapping_type_t mapping_type)
1397 {
1398 vm_page_t mem;
1399 int pe_flags;
1400 bool gobbled_list = page_list && page_list->vmp_gobbled;
1401
1402 assert(((flags & KMA_KOBJECT) != 0) == (is_kernel_object(object) != 0));
1403 assert3u((bool)(flags & KMA_COMPRESSOR), ==, object == compressor_object);
1404
1405
1406 if (flags & (KMA_KOBJECT | KMA_COMPRESSOR)) {
1407 assert3u(offset, ==, addr);
1408 } else {
1409 /*
1410 * kernel_memory_populate_pmap_enter() might drop the object
1411 * lock, and the caller might not own a reference anymore
1412 * and rely on holding the vm object lock for liveness.
1413 */
1414 vm_object_reference_locked(object);
1415 }
1416
1417 if (flags & KMA_KSTACK) {
1418 pe_flags = VM_MEM_STACK;
1419 } else {
1420 pe_flags = 0;
1421 }
1422
1423
1424 for (vm_object_offset_t pg_offset = 0;
1425 pg_offset < size;
1426 pg_offset += PAGE_SIZE_64) {
1427 if (page_list == NULL) {
1428 panic("%s: page_list too short", __func__);
1429 }
1430
1431 mem = page_list;
1432 page_list = mem->vmp_snext;
1433 mem->vmp_snext = NULL;
1434
1435 assert(mem->vmp_wire_count == 0);
1436 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
1437 assert(!mem->vmp_fictitious && !mem->vmp_private);
1438
1439 if (flags & KMA_COMPRESSOR) {
1440 mem->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
1441 /*
1442 * Background processes doing I/O accounting can call
1443 * into NVME driver to do some work which results in
1444 * an allocation here and so we want to make sure
1445 * that the pages used by compressor, regardless of
1446 * process context, are never on the special Q.
1447 */
1448 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
1449
1450 vm_page_insert(mem, object, offset + pg_offset);
1451 } else {
1452 mem->vmp_q_state = VM_PAGE_IS_WIRED;
1453 mem->vmp_wire_count = 1;
1454
1455 vm_page_insert_wired(mem, object, offset + pg_offset, tag);
1456 }
1457
1458 mem->vmp_gobbled = false;
1459 mem->vmp_busy = false;
1460 mem->vmp_pmapped = true;
1461 mem->vmp_wpmapped = true;
1462
1463 /*
1464 * Manual PMAP_ENTER_OPTIONS() with shortcuts
1465 * for the kernel and compressor objects.
1466 */
1467 kernel_memory_populate_pmap_enter(object, addr, pg_offset,
1468 mem, prot, pe_flags, mapping_type);
1469
1470 if (flags & KMA_NOENCRYPT) {
1471 pmap_set_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
1472 }
1473 }
1474
1475 if (page_list) {
1476 panic("%s: page_list too long", __func__);
1477 }
1478
1479 vm_object_unlock(object);
1480 if ((flags & (KMA_KOBJECT | KMA_COMPRESSOR)) == 0) {
1481 vm_object_deallocate(object);
1482 }
1483
1484 /*
1485 * Update the accounting:
1486 * - the compressor "wired" pages don't really count as wired
1487 * - kmem_alloc_contig_guard() gives gobbled pages,
1488 * which already count as wired but need to be ungobbled.
1489 */
1490 if (gobbled_list) {
1491 vm_page_lockspin_queues();
1492 if (flags & KMA_COMPRESSOR) {
1493 vm_page_wire_count -= atop(size);
1494 }
1495 vm_page_gobble_count -= atop(size);
1496 vm_page_unlock_queues();
1497 } else if ((flags & KMA_COMPRESSOR) == 0) {
1498 vm_page_lockspin_queues();
1499 vm_page_wire_count += atop(size);
1500 vm_page_unlock_queues();
1501 }
1502
1503 if (flags & KMA_KOBJECT) {
1504 /* vm_page_insert_wired() handles regular objects already */
1505 vm_tag_update_size(tag, size, NULL);
1506 }
1507
1508 #if KASAN
1509 if (flags & KMA_COMPRESSOR) {
1510 kasan_notify_address_nopoison(addr, size);
1511 } else {
1512 kasan_notify_address(addr, size);
1513 }
1514 #endif /* KASAN */
1515 }
1516
1517
1518 kern_return_t
kernel_memory_populate(vm_offset_t addr,vm_size_t size,kma_flags_t flags,vm_tag_t tag)1519 kernel_memory_populate(
1520 vm_offset_t addr,
1521 vm_size_t size,
1522 kma_flags_t flags,
1523 vm_tag_t tag)
1524 {
1525 kern_return_t kr = KERN_SUCCESS;
1526 vm_page_t page_list = NULL;
1527 vm_size_t page_count = atop_64(size);
1528 vm_object_t object = __kmem_object(ANYF(flags));
1529
1530 #if DEBUG || DEVELOPMENT
1531 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_START,
1532 size, 0, 0, 0);
1533 #endif /* DEBUG || DEVELOPMENT */
1534
1535
1536 kr = vm_page_alloc_list(page_count, flags, &page_list);
1537 if (kr == KERN_SUCCESS) {
1538 vm_object_lock(object);
1539 kernel_memory_populate_object_and_unlock(object, addr,
1540 addr, size, page_list, flags, tag, VM_PROT_DEFAULT,
1541 __kmem_mapping_type(ANYF(flags)));
1542 }
1543
1544 #if DEBUG || DEVELOPMENT
1545 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_END,
1546 page_count, 0, 0, 0);
1547 #endif /* DEBUG || DEVELOPMENT */
1548 return kr;
1549 }
1550
1551 void
kernel_memory_depopulate(vm_offset_t addr,vm_size_t size,kma_flags_t flags,vm_tag_t tag)1552 kernel_memory_depopulate(
1553 vm_offset_t addr,
1554 vm_size_t size,
1555 kma_flags_t flags,
1556 vm_tag_t tag)
1557 {
1558 vm_object_t object = __kmem_object(ANYF(flags));
1559 vm_object_offset_t offset = addr;
1560 vm_page_t mem;
1561 vm_page_t local_freeq = NULL;
1562 unsigned int pages_unwired = 0;
1563
1564 vm_object_lock(object);
1565
1566 pmap_protect(kernel_pmap, offset, offset + size, VM_PROT_NONE);
1567
1568 for (vm_object_offset_t pg_offset = 0;
1569 pg_offset < size;
1570 pg_offset += PAGE_SIZE_64) {
1571 mem = vm_page_lookup(object, offset + pg_offset);
1572
1573 assert(mem);
1574
1575 if (flags & KMA_COMPRESSOR) {
1576 assert(mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
1577 } else {
1578 assert(mem->vmp_q_state == VM_PAGE_IS_WIRED);
1579 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
1580 pages_unwired++;
1581 }
1582
1583 mem->vmp_busy = TRUE;
1584
1585 assert(mem->vmp_tabled);
1586 vm_page_remove(mem, TRUE);
1587 assert(mem->vmp_busy);
1588
1589 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
1590
1591 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
1592 mem->vmp_snext = local_freeq;
1593 local_freeq = mem;
1594 }
1595
1596 vm_object_unlock(object);
1597
1598 vm_page_free_list(local_freeq, TRUE);
1599
1600 if (!(flags & KMA_COMPRESSOR)) {
1601 vm_page_lockspin_queues();
1602 vm_page_wire_count -= pages_unwired;
1603 vm_page_unlock_queues();
1604 }
1605
1606 if (flags & KMA_KOBJECT) {
1607 /* vm_page_remove() handles regular objects already */
1608 vm_tag_update_size(tag, -ptoa_64(pages_unwired), NULL);
1609 }
1610 }
1611
1612 #pragma mark reallocation
1613
1614 __abortlike
1615 static void
__kmem_realloc_invalid_object_size_panic(vm_map_t map,vm_address_t address,vm_size_t size,vm_map_entry_t entry)1616 __kmem_realloc_invalid_object_size_panic(
1617 vm_map_t map,
1618 vm_address_t address,
1619 vm_size_t size,
1620 vm_map_entry_t entry)
1621 {
1622 vm_object_t object = VME_OBJECT(entry);
1623 vm_size_t objsize = __kmem_entry_orig_size(entry);
1624
1625 panic("kmem_realloc(map=%p, addr=%p, size=%zd, entry=%p): "
1626 "object %p has unexpected size %ld",
1627 map, (void *)address, (size_t)size, entry, object, objsize);
1628 }
1629
1630 __abortlike
1631 static void
__kmem_realloc_invalid_pager_panic(vm_map_t map,vm_address_t address,vm_size_t size,vm_map_entry_t entry)1632 __kmem_realloc_invalid_pager_panic(
1633 vm_map_t map,
1634 vm_address_t address,
1635 vm_size_t size,
1636 vm_map_entry_t entry)
1637 {
1638 vm_object_t object = VME_OBJECT(entry);
1639 memory_object_t pager = object->pager;
1640 bool pager_created = object->pager_created;
1641 bool pager_initialized = object->pager_initialized;
1642 bool pager_ready = object->pager_ready;
1643
1644 panic("kmem_realloc(map=%p, addr=%p, size=%zd, entry=%p): "
1645 "object %p has unexpected pager %p (%d,%d,%d)",
1646 map, (void *)address, (size_t)size, entry, object,
1647 pager, pager_created, pager_initialized, pager_ready);
1648 }
1649
1650 static kmem_return_t
kmem_realloc_shrink_guard(vm_map_t map,vm_offset_t req_oldaddr,vm_size_t req_oldsize,vm_size_t req_newsize,kmr_flags_t flags,kmem_guard_t guard,vm_map_entry_t entry)1651 kmem_realloc_shrink_guard(
1652 vm_map_t map,
1653 vm_offset_t req_oldaddr,
1654 vm_size_t req_oldsize,
1655 vm_size_t req_newsize,
1656 kmr_flags_t flags,
1657 kmem_guard_t guard,
1658 vm_map_entry_t entry)
1659 {
1660 vmr_flags_t vmr_flags = VM_MAP_REMOVE_KUNWIRE;
1661 vm_object_t object;
1662 vm_offset_t delta = 0;
1663 kmem_return_t kmr;
1664 bool was_atomic;
1665 vm_size_t oldsize = round_page(req_oldsize);
1666 vm_size_t newsize = round_page(req_newsize);
1667 vm_address_t oldaddr = req_oldaddr;
1668
1669 #if KASAN_CLASSIC
1670 if (flags & KMR_KASAN_GUARD) {
1671 assert((flags & (KMR_GUARD_FIRST | KMR_GUARD_LAST)) == 0);
1672 flags |= KMR_GUARD_FIRST | KMR_GUARD_LAST;
1673 oldaddr -= PAGE_SIZE;
1674 delta = ptoa(2);
1675 oldsize += delta;
1676 newsize += delta;
1677 }
1678 #endif /* KASAN_CLASSIC */
1679
1680 if (flags & KMR_TAG) {
1681 oldaddr = vm_memtag_canonicalize_address(req_oldaddr);
1682 }
1683
1684 vm_map_lock_assert_exclusive(map);
1685
1686 if ((flags & KMR_KOBJECT) == 0) {
1687 object = VME_OBJECT(entry);
1688 vm_object_reference(object);
1689 }
1690
1691 /*
1692 * Shrinking an atomic entry starts with splitting it,
1693 * and removing the second half.
1694 */
1695 was_atomic = entry->vme_atomic;
1696 entry->vme_atomic = false;
1697 vm_map_clip_end(map, entry, entry->vme_start + newsize);
1698 entry->vme_atomic = was_atomic;
1699
1700 #if KASAN
1701 if (entry->vme_kernel_object && was_atomic) {
1702 entry->vme_object_or_delta = (-req_newsize & PAGE_MASK) + delta;
1703 }
1704 #if KASAN_CLASSIC
1705 if (flags & KMR_KASAN_GUARD) {
1706 kasan_poison_range(oldaddr + newsize, oldsize - newsize,
1707 ASAN_VALID);
1708 }
1709 #endif
1710 #if KASAN_TBI
1711 if (flags & KMR_TAG) {
1712 kasan_tbi_mark_free_space(req_oldaddr + newsize, oldsize - newsize);
1713 }
1714 #endif /* KASAN_TBI */
1715 #endif /* KASAN */
1716 (void)vm_map_remove_and_unlock(map,
1717 oldaddr + newsize, oldaddr + oldsize,
1718 vmr_flags, KMEM_GUARD_NONE);
1719
1720
1721 /*
1722 * Lastly, if there are guard pages, deal with them.
1723 *
1724 * The kernel object just needs to depopulate,
1725 * regular objects require freeing the last page
1726 * and replacing it with a guard.
1727 */
1728 if (flags & KMR_KOBJECT) {
1729 if (flags & KMR_GUARD_LAST) {
1730 kernel_memory_depopulate(oldaddr + newsize - PAGE_SIZE,
1731 PAGE_SIZE, KMA_KOBJECT, guard.kmg_tag);
1732 }
1733 } else {
1734 vm_page_t guard_right = VM_PAGE_NULL;
1735 vm_offset_t remove_start = newsize;
1736
1737 if (flags & KMR_GUARD_LAST) {
1738 if (!map->never_faults) {
1739 guard_right = vm_page_grab_guard(true);
1740 }
1741 remove_start -= PAGE_SIZE;
1742 }
1743
1744 vm_object_lock(object);
1745
1746 if (object->vo_size != oldsize) {
1747 __kmem_realloc_invalid_object_size_panic(map,
1748 req_oldaddr, req_oldsize + delta, entry);
1749 }
1750 vm_object_set_size(object, newsize, req_newsize);
1751
1752 vm_object_page_remove(object, remove_start, oldsize);
1753
1754 if (guard_right) {
1755 vm_page_insert(guard_right, object, newsize - PAGE_SIZE);
1756 guard_right->vmp_busy = false;
1757 }
1758 vm_object_unlock(object);
1759 vm_object_deallocate(object);
1760 }
1761
1762 kmr.kmr_address = req_oldaddr;
1763 kmr.kmr_return = 0;
1764 #if KASAN_CLASSIC
1765 if (flags & KMA_KASAN_GUARD) {
1766 kasan_alloc_large(kmr.kmr_address, req_newsize);
1767 }
1768 #endif /* KASAN_CLASSIC */
1769 #if KASAN_TBI
1770 if ((flags & KMR_TAG) && (flags & KMR_FREEOLD)) {
1771 kmr.kmr_address = vm_memtag_assign_tag(kmr.kmr_address, req_newsize);
1772 vm_memtag_set_tag(kmr.kmr_address, req_newsize);
1773 kasan_tbi_retag_unused_space(kmr.kmr_address, newsize, req_newsize);
1774 }
1775 #endif /* KASAN_TBI */
1776
1777 return kmr;
1778 }
1779
1780 kmem_return_t
kmem_realloc_guard(vm_map_t map,vm_offset_t req_oldaddr,vm_size_t req_oldsize,vm_size_t req_newsize,kmr_flags_t flags,kmem_guard_t guard)1781 kmem_realloc_guard(
1782 vm_map_t map,
1783 vm_offset_t req_oldaddr,
1784 vm_size_t req_oldsize,
1785 vm_size_t req_newsize,
1786 kmr_flags_t flags,
1787 kmem_guard_t guard)
1788 {
1789 vm_object_t object;
1790 vm_size_t oldsize;
1791 vm_size_t newsize;
1792 vm_offset_t delta = 0;
1793 vm_map_offset_t oldaddr;
1794 vm_map_offset_t newaddr;
1795 vm_object_offset_t newoffs;
1796 vm_map_entry_t oldentry;
1797 vm_map_entry_t newentry;
1798 vm_page_t page_list = NULL;
1799 bool needs_wakeup = false;
1800 kmem_return_t kmr = { };
1801 unsigned int last_timestamp;
1802 vm_map_kernel_flags_t vmk_flags = {
1803 .vmkf_last_free = (bool)(flags & KMR_LAST_FREE),
1804 };
1805
1806 assert(KMEM_REALLOC_FLAGS_VALID(flags));
1807 if (!guard.kmg_atomic && (flags & (KMR_DATA | KMR_KOBJECT)) != KMR_DATA) {
1808 __kmem_invalid_arguments_panic("realloc", map, req_oldaddr,
1809 req_oldsize, flags);
1810 }
1811
1812 if (req_oldaddr == 0ul) {
1813 return kmem_alloc_guard(map, req_newsize, 0, (kma_flags_t)flags, guard);
1814 }
1815
1816 if (req_newsize == 0ul) {
1817 kmem_free_guard(map, req_oldaddr, req_oldsize,
1818 (kmf_flags_t)flags, guard);
1819 return kmr;
1820 }
1821
1822 if (req_newsize >> VM_KERNEL_POINTER_SIGNIFICANT_BITS) {
1823 __kmem_invalid_size_panic(map, req_newsize, flags);
1824 }
1825 if (req_newsize < __kmem_guard_size(ANYF(flags))) {
1826 __kmem_invalid_size_panic(map, req_newsize, flags);
1827 }
1828
1829 oldsize = round_page(req_oldsize);
1830 newsize = round_page(req_newsize);
1831 oldaddr = req_oldaddr;
1832 #if KASAN_CLASSIC
1833 if (flags & KMR_KASAN_GUARD) {
1834 flags |= KMR_GUARD_FIRST | KMR_GUARD_LAST;
1835 oldaddr -= PAGE_SIZE;
1836 delta = ptoa(2);
1837 oldsize += delta;
1838 newsize += delta;
1839 }
1840 #endif /* KASAN_CLASSIC */
1841 #if CONFIG_KERNEL_TAGGING
1842 if (flags & KMR_TAG) {
1843 vm_memtag_verify_tag(req_oldaddr);
1844 oldaddr = vm_memtag_canonicalize_address(req_oldaddr);
1845 }
1846 #endif /* CONFIG_KERNEL_TAGGING */
1847
1848 #if !KASAN
1849 /*
1850 * If not on a KASAN variant and no difference in requested size,
1851 * just return.
1852 *
1853 * Otherwise we want to validate the size and re-tag for KASAN_TBI.
1854 */
1855 if (oldsize == newsize) {
1856 kmr.kmr_address = req_oldaddr;
1857 return kmr;
1858 }
1859 #endif /* !KASAN */
1860
1861 /*
1862 * If we're growing the allocation,
1863 * then reserve the pages we'll need,
1864 * and find a spot for its new place.
1865 */
1866 if (oldsize < newsize) {
1867 #if DEBUG || DEVELOPMENT
1868 VM_DEBUG_CONSTANT_EVENT(vm_kern_request,
1869 DBG_VM_KERN_REQUEST, DBG_FUNC_START,
1870 newsize - oldsize, 0, 0, 0);
1871 #endif /* DEBUG || DEVELOPMENT */
1872 kmr.kmr_return = vm_page_alloc_list(atop(newsize - oldsize),
1873 (kma_flags_t)flags, &page_list);
1874 if (kmr.kmr_return == KERN_SUCCESS) {
1875 kmem_apply_security_policy(map, (kma_flags_t)flags, guard,
1876 newsize, 0, &vmk_flags, true);
1877 kmr.kmr_return = vm_map_find_space(map, 0, newsize, 0,
1878 vmk_flags, &newentry);
1879 }
1880 if (__improbable(kmr.kmr_return != KERN_SUCCESS)) {
1881 if (flags & KMR_REALLOCF) {
1882 kmem_free_guard(map, req_oldaddr, req_oldsize,
1883 KMF_NONE, guard);
1884 }
1885 if (page_list) {
1886 vm_page_free_list(page_list, FALSE);
1887 }
1888 #if DEBUG || DEVELOPMENT
1889 VM_DEBUG_CONSTANT_EVENT(vm_kern_request,
1890 DBG_VM_KERN_REQUEST, DBG_FUNC_END,
1891 0, 0, 0, 0);
1892 #endif /* DEBUG || DEVELOPMENT */
1893 return kmr;
1894 }
1895
1896 /* map is locked */
1897 } else {
1898 vm_map_lock(map);
1899 }
1900
1901
1902 /*
1903 * Locate the entry:
1904 * - wait for it to quiesce.
1905 * - validate its guard,
1906 * - learn its correct tag,
1907 */
1908 again:
1909 if (!vm_map_lookup_entry(map, oldaddr, &oldentry)) {
1910 __kmem_entry_not_found_panic(map, req_oldaddr);
1911 }
1912 if ((flags & KMR_KOBJECT) && oldentry->in_transition) {
1913 oldentry->needs_wakeup = true;
1914 vm_map_entry_wait(map, THREAD_UNINT);
1915 goto again;
1916 }
1917 kmem_entry_validate_guard(map, oldentry, oldaddr, oldsize, guard);
1918 if (!__kmem_entry_validate_object(oldentry, ANYF(flags))) {
1919 __kmem_entry_validate_object_panic(map, oldentry, ANYF(flags));
1920 }
1921 /*
1922 * TODO: We should validate for non atomic entries that the range
1923 * we are acting on is what we expect here.
1924 */
1925 #if KASAN
1926 if (__kmem_entry_orig_size(oldentry) != req_oldsize) {
1927 __kmem_realloc_invalid_object_size_panic(map,
1928 req_oldaddr, req_oldsize + delta, oldentry);
1929 }
1930
1931 if (oldsize == newsize) {
1932 kmr.kmr_address = req_oldaddr;
1933 if (oldentry->vme_kernel_object) {
1934 oldentry->vme_object_or_delta = delta +
1935 (-req_newsize & PAGE_MASK);
1936 } else {
1937 object = VME_OBJECT(oldentry);
1938 vm_object_lock(object);
1939 vm_object_set_size(object, newsize, req_newsize);
1940 vm_object_unlock(object);
1941 }
1942 vm_map_unlock(map);
1943
1944 #if KASAN_CLASSIC
1945 if (flags & KMA_KASAN_GUARD) {
1946 kasan_alloc_large(kmr.kmr_address, req_newsize);
1947 }
1948 #endif /* KASAN_CLASSIC */
1949 #if KASAN_TBI
1950 if ((flags & KMR_TAG) && (flags & KMR_FREEOLD)) {
1951 kmr.kmr_address = vm_memtag_assign_tag(kmr.kmr_address, req_newsize);
1952 vm_memtag_set_tag(kmr.kmr_address, req_newsize);
1953 kasan_tbi_retag_unused_space(kmr.kmr_address, newsize, req_newsize);
1954 }
1955 #endif /* KASAN_TBI */
1956 return kmr;
1957 }
1958 #endif /* KASAN */
1959
1960 guard.kmg_tag = VME_ALIAS(oldentry);
1961
1962 if (newsize < oldsize) {
1963 return kmem_realloc_shrink_guard(map, req_oldaddr,
1964 req_oldsize, req_newsize, flags, guard, oldentry);
1965 }
1966
1967
1968 /*
1969 * We are growing the entry
1970 *
1971 * For regular objects we use the object `vo_size` updates
1972 * as a guarantee that no 2 kmem_realloc() can happen
1973 * concurrently (by doing it before the map is unlocked.
1974 *
1975 * For the kernel object, prevent the entry from being
1976 * reallocated or changed by marking it "in_transition".
1977 */
1978
1979 object = VME_OBJECT(oldentry);
1980 vm_object_lock(object);
1981 vm_object_reference_locked(object);
1982
1983 newaddr = newentry->vme_start;
1984 newoffs = oldsize;
1985
1986 VME_OBJECT_SET(newentry, object, guard.kmg_atomic, guard.kmg_context);
1987 VME_ALIAS_SET(newentry, guard.kmg_tag);
1988 if (flags & KMR_KOBJECT) {
1989 oldentry->in_transition = true;
1990 VME_OFFSET_SET(newentry, newaddr);
1991 newentry->wired_count = 1;
1992 vme_btref_consider_and_set(newentry, __builtin_frame_address(0));
1993 newoffs = newaddr + oldsize;
1994 #if KASAN
1995 newentry->vme_object_or_delta = delta +
1996 (-req_newsize & PAGE_MASK);
1997 #endif /* KASAN */
1998 } else {
1999 if (object->pager_created || object->pager) {
2000 /*
2001 * We can't "realloc/grow" the pager, so pageable
2002 * allocations should not go through this path.
2003 */
2004 __kmem_realloc_invalid_pager_panic(map,
2005 req_oldaddr, req_oldsize + delta, oldentry);
2006 }
2007 if (object->vo_size != oldsize) {
2008 __kmem_realloc_invalid_object_size_panic(map,
2009 req_oldaddr, req_oldsize + delta, oldentry);
2010 }
2011 vm_object_set_size(object, newsize, req_newsize);
2012 }
2013
2014 last_timestamp = map->timestamp;
2015 vm_map_unlock(map);
2016
2017
2018 /*
2019 * Now proceed with the population of pages.
2020 *
2021 * Kernel objects can use the kmem population helpers.
2022 *
2023 * Regular objects will insert pages manually,
2024 * then wire the memory into the new range.
2025 */
2026
2027 vm_size_t guard_right_size = __kmem_guard_right(ANYF(flags));
2028
2029 if (flags & KMR_KOBJECT) {
2030 pmap_mapping_type_t mapping_type = __kmem_mapping_type(ANYF(flags));
2031
2032 pmap_protect(kernel_pmap,
2033 oldaddr, oldaddr + oldsize - guard_right_size,
2034 VM_PROT_NONE);
2035
2036 for (vm_object_offset_t offset = 0;
2037 offset < oldsize - guard_right_size;
2038 offset += PAGE_SIZE_64) {
2039 vm_page_t mem;
2040
2041 mem = vm_page_lookup(object, oldaddr + offset);
2042 if (mem == VM_PAGE_NULL) {
2043 continue;
2044 }
2045
2046 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
2047
2048 mem->vmp_busy = true;
2049 vm_page_remove(mem, true);
2050 vm_page_insert_wired(mem, object, newaddr + offset,
2051 guard.kmg_tag);
2052 mem->vmp_busy = false;
2053
2054 kernel_memory_populate_pmap_enter(object, newaddr,
2055 offset, mem, VM_PROT_DEFAULT, 0, mapping_type);
2056 }
2057
2058 kernel_memory_populate_object_and_unlock(object,
2059 newaddr + oldsize - guard_right_size,
2060 newoffs - guard_right_size,
2061 newsize - oldsize,
2062 page_list, (kma_flags_t)flags,
2063 guard.kmg_tag, VM_PROT_DEFAULT, mapping_type);
2064 } else {
2065 vm_page_t guard_right = VM_PAGE_NULL;
2066
2067 /*
2068 * Note: we are borrowing the new entry reference
2069 * on the object for the duration of this code,
2070 * which works because we keep the object locked
2071 * throughout.
2072 */
2073 if ((flags & KMR_GUARD_LAST) && !map->never_faults) {
2074 guard_right = vm_page_lookup(object, oldsize - PAGE_SIZE);
2075 assert(guard_right->vmp_fictitious);
2076 guard_right->vmp_busy = true;
2077 vm_page_remove(guard_right, true);
2078 }
2079
2080 if (flags & KMR_FREEOLD) {
2081 /*
2082 * Freeing the old mapping will make
2083 * the old pages become pageable until
2084 * the new mapping makes them wired again.
2085 * Let's take an extra "wire_count" to
2086 * prevent any accidental "page out".
2087 * We'll have to undo that after wiring
2088 * the new mapping.
2089 */
2090 vm_object_reference_locked(object); /* keep object alive */
2091 for (vm_object_offset_t offset = 0;
2092 offset < oldsize - guard_right_size;
2093 offset += PAGE_SIZE_64) {
2094 vm_page_t mem;
2095
2096 mem = vm_page_lookup(object, offset);
2097 assert(mem != VM_PAGE_NULL);
2098 assertf(!VM_PAGE_PAGEABLE(mem),
2099 "mem %p qstate %d",
2100 mem, mem->vmp_q_state);
2101 if (VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr) {
2102 /* guard pages are not wired */
2103 } else {
2104 assertf(VM_PAGE_WIRED(mem),
2105 "mem %p qstate %d wirecount %d",
2106 mem,
2107 mem->vmp_q_state,
2108 mem->vmp_wire_count);
2109 assertf(mem->vmp_wire_count >= 1,
2110 "mem %p wirecount %d",
2111 mem, mem->vmp_wire_count);
2112 mem->vmp_wire_count++;
2113 }
2114 }
2115 }
2116
2117 for (vm_object_offset_t offset = oldsize - guard_right_size;
2118 offset < newsize - guard_right_size;
2119 offset += PAGE_SIZE_64) {
2120 vm_page_t mem = page_list;
2121
2122 page_list = mem->vmp_snext;
2123 mem->vmp_snext = VM_PAGE_NULL;
2124 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
2125 assert(!VM_PAGE_PAGEABLE(mem));
2126
2127 vm_page_insert(mem, object, offset);
2128 mem->vmp_busy = false;
2129 }
2130
2131 if (guard_right) {
2132 vm_page_insert(guard_right, object, newsize - PAGE_SIZE);
2133 guard_right->vmp_busy = false;
2134 }
2135
2136 vm_object_unlock(object);
2137 }
2138
2139 /*
2140 * Mark the entry as idle again,
2141 * and honor KMR_FREEOLD if needed.
2142 */
2143
2144 vm_map_lock(map);
2145 if (last_timestamp + 1 != map->timestamp &&
2146 !vm_map_lookup_entry(map, oldaddr, &oldentry)) {
2147 __kmem_entry_not_found_panic(map, req_oldaddr);
2148 }
2149
2150 if (flags & KMR_KOBJECT) {
2151 assert(oldentry->in_transition);
2152 oldentry->in_transition = false;
2153 if (oldentry->needs_wakeup) {
2154 needs_wakeup = true;
2155 oldentry->needs_wakeup = false;
2156 }
2157 }
2158
2159 if (flags & KMR_FREEOLD) {
2160 vmr_flags_t vmr_flags = VM_MAP_REMOVE_KUNWIRE;
2161
2162 #if KASAN_CLASSIC
2163 if (flags & KMR_KASAN_GUARD) {
2164 kasan_poison_range(oldaddr, oldsize, ASAN_VALID);
2165 }
2166 #endif
2167 #if KASAN_TBI
2168 if (flags & KMR_TAG) {
2169 kasan_tbi_mark_free_space(req_oldaddr, oldsize);
2170 }
2171 #endif /* KASAN_TBI */
2172 if (flags & KMR_GUARD_LAST) {
2173 vmr_flags |= VM_MAP_REMOVE_NOKUNWIRE_LAST;
2174 }
2175 (void)vm_map_remove_and_unlock(map,
2176 oldaddr, oldaddr + oldsize,
2177 vmr_flags, guard);
2178 } else {
2179 vm_map_unlock(map);
2180 }
2181
2182 if ((flags & KMR_KOBJECT) == 0) {
2183 kern_return_t kr;
2184 /*
2185 * This must happen _after_ we do the KMR_FREEOLD,
2186 * because wiring the pages will call into the pmap,
2187 * and if the pages are typed XNU_KERNEL_RESTRICTED,
2188 * this would cause a second mapping of the page and panic.
2189 */
2190 kr = vm_map_wire_kernel(map,
2191 vm_sanitize_wrap_addr(newaddr),
2192 vm_sanitize_wrap_addr(newaddr + newsize),
2193 vm_sanitize_wrap_prot(VM_PROT_DEFAULT),
2194 guard.kmg_tag, FALSE);
2195 assert(kr == KERN_SUCCESS);
2196
2197 if (flags & KMR_FREEOLD) {
2198 /*
2199 * Undo the extra "wiring" we made above
2200 * and release the extra reference we took
2201 * on the object.
2202 */
2203 vm_object_lock(object);
2204 for (vm_object_offset_t offset = 0;
2205 offset < oldsize - guard_right_size;
2206 offset += PAGE_SIZE_64) {
2207 vm_page_t mem;
2208
2209 mem = vm_page_lookup(object, offset);
2210 assert(mem != VM_PAGE_NULL);
2211 assertf(!VM_PAGE_PAGEABLE(mem),
2212 "mem %p qstate %d",
2213 mem, mem->vmp_q_state);
2214 if (VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr) {
2215 /* guard pages are not wired */
2216 } else {
2217 assertf(VM_PAGE_WIRED(mem),
2218 "mem %p qstate %d wirecount %d",
2219 mem,
2220 mem->vmp_q_state,
2221 mem->vmp_wire_count);
2222 assertf(mem->vmp_wire_count >= 2,
2223 "mem %p wirecount %d",
2224 mem, mem->vmp_wire_count);
2225 mem->vmp_wire_count--;
2226 assert(VM_PAGE_WIRED(mem));
2227 assert(mem->vmp_wire_count >= 1);
2228 }
2229 }
2230 vm_object_unlock(object);
2231 vm_object_deallocate(object); /* release extra ref */
2232 }
2233 }
2234
2235 if (needs_wakeup) {
2236 vm_map_entry_wakeup(map);
2237 }
2238
2239 #if DEBUG || DEVELOPMENT
2240 VM_DEBUG_CONSTANT_EVENT(vm_kern_request, DBG_VM_KERN_REQUEST, DBG_FUNC_END,
2241 atop(newsize - oldsize), 0, 0, 0);
2242 #endif /* DEBUG || DEVELOPMENT */
2243 kmr.kmr_address = newaddr;
2244
2245 #if KASAN
2246 kasan_notify_address(kmr.kmr_address, newsize);
2247 #endif /* KASAN */
2248 #if KASAN_CLASSIC
2249 if (flags & KMR_KASAN_GUARD) {
2250 kmr.kmr_address += PAGE_SIZE;
2251 kasan_alloc_large(kmr.kmr_address, req_newsize);
2252 }
2253 #endif /* KASAN_CLASSIC */
2254 #if KASAN_TBI
2255 if (flags & KMR_TAG) {
2256 kmr.kmr_address = vm_memtag_assign_tag(kmr.kmr_address, req_newsize);
2257 vm_memtag_set_tag(kmr.kmr_address, req_newsize);
2258 kasan_tbi_retag_unused_space(kmr.kmr_address, newsize, req_newsize);
2259 }
2260 #endif /* KASAN_TBI */
2261
2262 return kmr;
2263 }
2264
2265 #pragma mark map/remap/wire
2266
2267 kern_return_t
mach_vm_map_kernel(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut initial_size,mach_vm_offset_ut mask,vm_map_kernel_flags_t vmk_flags,ipc_port_t port,memory_object_offset_ut offset,boolean_t copy,vm_prot_ut cur_protection,vm_prot_ut max_protection,vm_inherit_ut inheritance)2268 mach_vm_map_kernel(
2269 vm_map_t target_map,
2270 mach_vm_offset_ut *address,
2271 mach_vm_size_ut initial_size,
2272 mach_vm_offset_ut mask,
2273 vm_map_kernel_flags_t vmk_flags,
2274 ipc_port_t port,
2275 memory_object_offset_ut offset,
2276 boolean_t copy,
2277 vm_prot_ut cur_protection,
2278 vm_prot_ut max_protection,
2279 vm_inherit_ut inheritance)
2280 {
2281 /* range_id is set by vm_map_enter_mem_object */
2282 return vm_map_enter_mem_object(target_map,
2283 address,
2284 initial_size,
2285 mask,
2286 vmk_flags,
2287 port,
2288 offset,
2289 copy,
2290 cur_protection,
2291 max_protection,
2292 inheritance,
2293 NULL,
2294 0);
2295 }
2296
2297 kern_return_t
mach_vm_remap_new_kernel(vm_map_t target_map,mach_vm_offset_ut * address,mach_vm_size_ut size,mach_vm_offset_ut mask,vm_map_kernel_flags_t vmk_flags,vm_map_t src_map,mach_vm_offset_ut memory_address,boolean_t copy,vm_prot_ut * cur_protection,vm_prot_ut * max_protection,vm_inherit_ut inheritance)2298 mach_vm_remap_new_kernel(
2299 vm_map_t target_map,
2300 mach_vm_offset_ut *address,
2301 mach_vm_size_ut size,
2302 mach_vm_offset_ut mask,
2303 vm_map_kernel_flags_t vmk_flags,
2304 vm_map_t src_map,
2305 mach_vm_offset_ut memory_address,
2306 boolean_t copy,
2307 vm_prot_ut *cur_protection, /* IN/OUT */
2308 vm_prot_ut *max_protection, /* IN/OUT */
2309 vm_inherit_ut inheritance)
2310 {
2311 if (!vm_map_kernel_flags_check_vm_and_kflags(vmk_flags,
2312 VM_FLAGS_USER_REMAP)) {
2313 return KERN_INVALID_ARGUMENT;
2314 }
2315
2316
2317 vmk_flags.vmf_return_data_addr = true;
2318
2319 /* range_id is set by vm_map_remap */
2320 return vm_map_remap(target_map,
2321 address,
2322 size,
2323 mask,
2324 vmk_flags,
2325 src_map,
2326 memory_address,
2327 copy,
2328 cur_protection,
2329 max_protection,
2330 inheritance);
2331 }
2332
2333 #pragma mark free
2334
2335 #if KASAN
2336
2337 __abortlike
2338 static void
__kmem_free_invalid_object_size_panic(vm_map_t map,vm_address_t address,vm_size_t size,vm_map_entry_t entry)2339 __kmem_free_invalid_object_size_panic(
2340 vm_map_t map,
2341 vm_address_t address,
2342 vm_size_t size,
2343 vm_map_entry_t entry)
2344 {
2345 vm_object_t object = VME_OBJECT(entry);
2346 vm_size_t objsize = __kmem_entry_orig_size(entry);
2347
2348 panic("kmem_free(map=%p, addr=%p, size=%zd, entry=%p): "
2349 "object %p has unexpected size %ld",
2350 map, (void *)address, (size_t)size, entry, object, objsize);
2351 }
2352
2353 #endif /* KASAN */
2354
2355 vm_size_t
kmem_free_guard(vm_map_t map,vm_offset_t req_addr,vm_size_t req_size,kmf_flags_t flags,kmem_guard_t guard)2356 kmem_free_guard(
2357 vm_map_t map,
2358 vm_offset_t req_addr,
2359 vm_size_t req_size,
2360 kmf_flags_t flags,
2361 kmem_guard_t guard)
2362 {
2363 vmr_flags_t vmr_flags = VM_MAP_REMOVE_KUNWIRE;
2364 vm_address_t addr = req_addr;
2365 vm_offset_t delta = 0;
2366 vm_size_t size;
2367 #if KASAN
2368 vm_map_entry_t entry;
2369 #endif /* KASAN */
2370
2371 assert(map->pmap == kernel_pmap);
2372
2373 #if KASAN_CLASSIC
2374 if (flags & KMF_KASAN_GUARD) {
2375 addr -= PAGE_SIZE;
2376 delta = ptoa(2);
2377 }
2378 #endif /* KASAN_CLASSIC */
2379 #if CONFIG_KERNEL_TAGGING
2380 if (flags & KMF_TAG) {
2381 vm_memtag_verify_tag(req_addr);
2382 addr = vm_memtag_canonicalize_address(req_addr);
2383 }
2384 #endif /* CONFIG_KERNEL_TAGGING */
2385
2386 if (flags & KMF_GUESS_SIZE) {
2387 vmr_flags |= VM_MAP_REMOVE_GUESS_SIZE;
2388 size = PAGE_SIZE;
2389 } else if (req_size == 0) {
2390 __kmem_invalid_size_panic(map, req_size, flags);
2391 } else {
2392 size = round_page(req_size) + delta;
2393 }
2394
2395 vm_map_lock(map);
2396
2397 #if KASAN
2398 if (!vm_map_lookup_entry(map, addr, &entry)) {
2399 __kmem_entry_not_found_panic(map, req_addr);
2400 }
2401 if (flags & KMF_GUESS_SIZE) {
2402 vmr_flags &= ~VM_MAP_REMOVE_GUESS_SIZE;
2403 req_size = __kmem_entry_orig_size(entry);
2404 size = round_page(req_size + delta);
2405 } else if (guard.kmg_atomic && entry->vme_kernel_object &&
2406 __kmem_entry_orig_size(entry) != req_size) {
2407 /*
2408 * We can't make a strict check for regular
2409 * VM objects because it could be:
2410 *
2411 * - the kmem_guard_free() of a kmem_realloc_guard() without
2412 * KMR_FREEOLD, and in that case the object size won't match.
2413 *
2414 * - a submap, in which case there is no "orig size".
2415 */
2416 __kmem_free_invalid_object_size_panic(map,
2417 req_addr, req_size + delta, entry);
2418 }
2419 #endif /* KASAN */
2420 #if KASAN_CLASSIC
2421 if (flags & KMR_KASAN_GUARD) {
2422 kasan_poison_range(addr, size, ASAN_VALID);
2423 }
2424 #endif
2425 #if KASAN_TBI
2426 if (flags & KMF_TAG) {
2427 kasan_tbi_mark_free_space(req_addr, size);
2428 }
2429 #endif /* KASAN_TBI */
2430
2431 /*
2432 * vm_map_remove_and_unlock is called with VM_MAP_REMOVE_KUNWIRE, which
2433 * unwires the kernel mapping. The page won't be mapped any longer so
2434 * there is no extra step that is required for memory tagging to "clear"
2435 * it -- the page will be later laundered when reused.
2436 */
2437 return vm_map_remove_and_unlock(map, addr, addr + size,
2438 vmr_flags, guard).kmr_size - delta;
2439 }
2440
2441 __exported void
2442 kmem_free_external(
2443 vm_map_t map,
2444 vm_offset_t addr,
2445 vm_size_t size);
2446 void
kmem_free_external(vm_map_t map,vm_offset_t addr,vm_size_t size)2447 kmem_free_external(
2448 vm_map_t map,
2449 vm_offset_t addr,
2450 vm_size_t size)
2451 {
2452 if (size) {
2453 kmem_free(map, trunc_page(addr), size);
2454 #if MACH_ASSERT
2455 } else {
2456 printf("kmem_free(map=%p, addr=%p) called with size=0, lr: %p\n",
2457 map, (void *)addr, __builtin_return_address(0));
2458 #endif
2459 }
2460 }
2461
2462 #pragma mark kmem metadata
2463
2464 /*
2465 * Guard objects for kmem pointer allocation:
2466 *
2467 * Guard objects introduce size slabs to kmem pointer allocations that are
2468 * allocated in chunks of n * sizeclass. When an allocation of a specific
2469 * sizeclass is requested a random slot from [0, n) is returned.
2470 * Allocations are returned from that chunk until m slots are left. The
2471 * remaining m slots are referred to as guard objects. They don't get
2472 * allocated and the chunk is now considered full. When an allocation is
2473 * freed to the chunk 1 slot is now available from m + 1 for the next
2474 * allocation of that sizeclass.
2475 *
2476 * Guard objects are intended to make exploitation of use after frees harder
2477 * as allocations that are freed can no longer be reliable reallocated.
2478 * They also make exploitation of OOBs harder as overflowing out of an
2479 * allocation can no longer be safe even with sufficient spraying.
2480 */
2481
2482 #define KMEM_META_PRIMARY UINT8_MAX
2483 #define KMEM_META_START (UINT8_MAX - 1)
2484 #define KMEM_META_FREE (UINT8_MAX - 2)
2485 #if __ARM_16K_PG__
2486 #define KMEM_MIN_SIZE PAGE_SIZE
2487 #define KMEM_CHUNK_SIZE_MIN (KMEM_MIN_SIZE * 16)
2488 #else /* __ARM_16K_PG__ */
2489 /*
2490 * PAGE_SIZE isn't a compile time constant on some arm64 devices. Those
2491 * devices use 4k page size when their RAM is <= 1GB and 16k otherwise.
2492 * Therefore populate sizeclasses from 4k for those devices.
2493 */
2494 #define KMEM_MIN_SIZE (4 * 1024)
2495 #define KMEM_CHUNK_SIZE_MIN (KMEM_MIN_SIZE * 32)
2496 #endif /* __ARM_16K_PG__ */
2497 #define KMEM_MAX_SIZE (32ULL << 20)
2498 #define KMEM_START_IDX (kmem_log2down(KMEM_MIN_SIZE))
2499 #define KMEM_LAST_IDX (kmem_log2down(KMEM_MAX_SIZE))
2500 #define KMEM_NUM_SIZECLASS (KMEM_LAST_IDX - KMEM_START_IDX + 1)
2501 #define KMEM_FRONTS (KMEM_RANGE_ID_NUM_PTR * 2)
2502 #define KMEM_NUM_GUARDS 2
2503
2504 struct kmem_page_meta {
2505 union {
2506 /*
2507 * On primary allocated chunk with KMEM_META_PRIMARY marker
2508 */
2509 uint32_t km_bitmap;
2510 /*
2511 * On start and end of free chunk with KMEM_META_FREE marker
2512 */
2513 uint32_t km_free_chunks;
2514 };
2515 /*
2516 * KMEM_META_PRIMARY: Start meta of allocated chunk
2517 * KMEM_META_FREE : Start and end meta of free chunk
2518 * KMEM_META_START : Meta region start and end
2519 */
2520 uint8_t km_page_marker;
2521 uint8_t km_sizeclass;
2522 union {
2523 /*
2524 * On primary allocated chunk with KMEM_META_PRIMARY marker
2525 */
2526 uint16_t km_chunk_len;
2527 /*
2528 * On secondary allocated chunks
2529 */
2530 uint16_t km_page_idx;
2531 };
2532 LIST_ENTRY(kmem_page_meta) km_link;
2533 } kmem_page_meta_t;
2534
2535 typedef LIST_HEAD(kmem_list_head, kmem_page_meta) kmem_list_head_t;
2536 struct kmem_sizeclass {
2537 vm_map_size_t ks_size;
2538 uint32_t ks_num_chunk;
2539 uint32_t ks_num_elem;
2540 crypto_random_ctx_t __zpercpu ks_rng_ctx;
2541 kmem_list_head_t ks_allfree_head[KMEM_FRONTS];
2542 kmem_list_head_t ks_partial_head[KMEM_FRONTS];
2543 kmem_list_head_t ks_full_head[KMEM_FRONTS];
2544 };
2545
2546 static struct kmem_sizeclass kmem_size_array[KMEM_NUM_SIZECLASS];
2547
2548 /*
2549 * Locks to synchronize metadata population
2550 */
2551 static LCK_GRP_DECLARE(kmem_locks_grp, "kmem_locks");
2552 static LCK_MTX_DECLARE(kmem_meta_region_lck, &kmem_locks_grp);
2553 #define kmem_meta_lock() lck_mtx_lock(&kmem_meta_region_lck)
2554 #define kmem_meta_unlock() lck_mtx_unlock(&kmem_meta_region_lck)
2555
2556 static SECURITY_READ_ONLY_LATE(struct mach_vm_range)
2557 kmem_meta_range[KMEM_RANGE_ID_NUM_PTR + 1];
2558 static SECURITY_READ_ONLY_LATE(struct kmem_page_meta *)
2559 kmem_meta_base[KMEM_RANGE_ID_NUM_PTR + 1];
2560 /*
2561 * Keeps track of metadata high water mark for each front
2562 */
2563 static struct kmem_page_meta *kmem_meta_hwm[KMEM_FRONTS];
2564 static SECURITY_READ_ONLY_LATE(vm_map_t)
2565 kmem_meta_map[KMEM_RANGE_ID_NUM_PTR + 1];
2566 static vm_map_size_t kmem_meta_size;
2567
2568 static uint32_t
kmem_get_front(kmem_range_id_t range_id,bool from_right)2569 kmem_get_front(
2570 kmem_range_id_t range_id,
2571 bool from_right)
2572 {
2573 assert((range_id >= KMEM_RANGE_ID_FIRST) &&
2574 (range_id <= KMEM_RANGE_ID_NUM_PTR));
2575 return (range_id - KMEM_RANGE_ID_FIRST) * 2 + from_right;
2576 }
2577
2578 static inline uint32_t
kmem_slot_idx_to_bit(uint32_t slot_idx,uint32_t size_idx __unused)2579 kmem_slot_idx_to_bit(
2580 uint32_t slot_idx,
2581 uint32_t size_idx __unused)
2582 {
2583 assert(slot_idx < kmem_size_array[size_idx].ks_num_elem);
2584 return 1ull << slot_idx;
2585 }
2586
2587 static uint32_t
kmem_get_idx_from_size(vm_map_size_t size)2588 kmem_get_idx_from_size(vm_map_size_t size)
2589 {
2590 assert(size >= KMEM_MIN_SIZE && size <= KMEM_MAX_SIZE);
2591 return kmem_log2down(size - 1) - KMEM_START_IDX + 1;
2592 }
2593
2594 __abortlike
2595 static void
kmem_invalid_size_idx(uint32_t idx)2596 kmem_invalid_size_idx(uint32_t idx)
2597 {
2598 panic("Invalid sizeclass idx %u", idx);
2599 }
2600
2601 static vm_map_size_t
kmem_get_size_from_idx(uint32_t idx)2602 kmem_get_size_from_idx(uint32_t idx)
2603 {
2604 if (__improbable(idx >= KMEM_NUM_SIZECLASS)) {
2605 kmem_invalid_size_idx(idx);
2606 }
2607 return 1ul << (idx + KMEM_START_IDX);
2608 }
2609
2610 static inline uint16_t
kmem_get_page_idx(struct kmem_page_meta * meta)2611 kmem_get_page_idx(struct kmem_page_meta *meta)
2612 {
2613 uint8_t page_marker = meta->km_page_marker;
2614
2615 return (page_marker == KMEM_META_PRIMARY) ? 0 : meta->km_page_idx;
2616 }
2617
2618 __abortlike
2619 static void
kmem_invalid_chunk_len(struct kmem_page_meta * meta)2620 kmem_invalid_chunk_len(struct kmem_page_meta *meta)
2621 {
2622 panic("Reading free chunks for meta %p where marker != KMEM_META_PRIMARY",
2623 meta);
2624 }
2625
2626 static inline uint16_t
kmem_get_chunk_len(struct kmem_page_meta * meta)2627 kmem_get_chunk_len(struct kmem_page_meta *meta)
2628 {
2629 if (__improbable(meta->km_page_marker != KMEM_META_PRIMARY)) {
2630 kmem_invalid_chunk_len(meta);
2631 }
2632
2633 return meta->km_chunk_len;
2634 }
2635
2636 __abortlike
2637 static void
kmem_invalid_free_chunk_len(struct kmem_page_meta * meta)2638 kmem_invalid_free_chunk_len(struct kmem_page_meta *meta)
2639 {
2640 panic("Reading free chunks for meta %p where marker != KMEM_META_FREE",
2641 meta);
2642 }
2643
2644 static inline uint32_t
kmem_get_free_chunk_len(struct kmem_page_meta * meta)2645 kmem_get_free_chunk_len(struct kmem_page_meta *meta)
2646 {
2647 if (__improbable(meta->km_page_marker != KMEM_META_FREE)) {
2648 kmem_invalid_free_chunk_len(meta);
2649 }
2650
2651 return meta->km_free_chunks;
2652 }
2653
2654 /*
2655 * Return the metadata corresponding to the specified address
2656 */
2657 static struct kmem_page_meta *
kmem_addr_to_meta(vm_map_offset_t addr,vm_map_range_id_t range_id,vm_map_offset_t * range_start,uint64_t * meta_idx)2658 kmem_addr_to_meta(
2659 vm_map_offset_t addr,
2660 vm_map_range_id_t range_id,
2661 vm_map_offset_t *range_start,
2662 uint64_t *meta_idx)
2663 {
2664 struct kmem_page_meta *meta_base = kmem_meta_base[range_id];
2665
2666 *range_start = kmem_ranges[range_id].min_address;
2667 *meta_idx = (addr - *range_start) / KMEM_CHUNK_SIZE_MIN;
2668 return &meta_base[*meta_idx];
2669 }
2670
2671 /*
2672 * Return the metadata start of the chunk that the address belongs to
2673 */
2674 static struct kmem_page_meta *
kmem_addr_to_meta_start(vm_address_t addr,vm_map_range_id_t range_id,vm_map_offset_t * chunk_start)2675 kmem_addr_to_meta_start(
2676 vm_address_t addr,
2677 vm_map_range_id_t range_id,
2678 vm_map_offset_t *chunk_start)
2679 {
2680 vm_map_offset_t range_start;
2681 uint64_t meta_idx;
2682 struct kmem_page_meta *meta;
2683
2684 meta = kmem_addr_to_meta(addr, range_id, &range_start, &meta_idx);
2685 meta_idx -= kmem_get_page_idx(meta);
2686 meta -= kmem_get_page_idx(meta);
2687 assert(meta->km_page_marker == KMEM_META_PRIMARY);
2688 *chunk_start = range_start + (meta_idx * KMEM_CHUNK_SIZE_MIN);
2689 return meta;
2690 }
2691
2692 __startup_func
2693 static void
kmem_init_meta_front(struct kmem_page_meta * meta,kmem_range_id_t range_id,bool from_right)2694 kmem_init_meta_front(
2695 struct kmem_page_meta *meta,
2696 kmem_range_id_t range_id,
2697 bool from_right)
2698 {
2699 kernel_memory_populate(trunc_page((vm_map_offset_t) meta), PAGE_SIZE,
2700 KMA_KOBJECT | KMA_ZERO | KMA_NOFAIL, VM_KERN_MEMORY_OSFMK);
2701 meta->km_page_marker = KMEM_META_START;
2702 if (!from_right) {
2703 meta++;
2704 kmem_meta_base[range_id] = meta;
2705 }
2706 kmem_meta_hwm[kmem_get_front(range_id, from_right)] = meta;
2707 }
2708
2709 __startup_func
2710 static void
kmem_metadata_init(void)2711 kmem_metadata_init(void)
2712 {
2713 for (kmem_range_id_t i = KMEM_RANGE_ID_FIRST; i <= kmem_ptr_ranges; i++) {
2714 vm_map_offset_t addr = kmem_meta_range[i].min_address;
2715 struct kmem_page_meta *meta;
2716 uint64_t meta_idx;
2717
2718 vm_map_will_allocate_early_map(&kmem_meta_map[i]);
2719 kmem_meta_map[i] = kmem_suballoc(kernel_map, &addr, kmem_meta_size,
2720 VM_MAP_CREATE_NEVER_FAULTS | VM_MAP_CREATE_DISABLE_HOLELIST,
2721 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, KMS_PERMANENT | KMS_NOFAIL,
2722 VM_KERN_MEMORY_OSFMK).kmr_submap;
2723
2724 kmem_meta_range[i].min_address = addr;
2725 kmem_meta_range[i].max_address = addr + kmem_meta_size;
2726
2727 meta = (struct kmem_page_meta *) kmem_meta_range[i].min_address;
2728 kmem_init_meta_front(meta, i, 0);
2729
2730 meta = kmem_addr_to_meta(kmem_ranges[i].max_address, i, &addr,
2731 &meta_idx);
2732 kmem_init_meta_front(meta, i, 1);
2733 }
2734 }
2735
2736 __startup_func
2737 static void
kmem_init_front_head(struct kmem_sizeclass * ks,uint32_t front)2738 kmem_init_front_head(
2739 struct kmem_sizeclass *ks,
2740 uint32_t front)
2741 {
2742 LIST_INIT(&ks->ks_allfree_head[front]);
2743 LIST_INIT(&ks->ks_partial_head[front]);
2744 LIST_INIT(&ks->ks_full_head[front]);
2745 }
2746
2747 __startup_func
2748 static void
kmem_sizeclass_init(void)2749 kmem_sizeclass_init(void)
2750 {
2751 for (uint32_t i = 0; i < KMEM_NUM_SIZECLASS; i++) {
2752 struct kmem_sizeclass *ks = &kmem_size_array[i];
2753 kmem_range_id_t range_id = KMEM_RANGE_ID_FIRST;
2754
2755 ks->ks_size = kmem_get_size_from_idx(i);
2756 ks->ks_num_chunk = roundup(8 * ks->ks_size, KMEM_CHUNK_SIZE_MIN) /
2757 KMEM_CHUNK_SIZE_MIN;
2758 ks->ks_num_elem = (ks->ks_num_chunk * KMEM_CHUNK_SIZE_MIN) / ks->ks_size;
2759 assert(ks->ks_num_elem <=
2760 (sizeof(((struct kmem_page_meta *)0)->km_bitmap) * 8));
2761 for (; range_id <= KMEM_RANGE_ID_NUM_PTR; range_id++) {
2762 kmem_init_front_head(ks, kmem_get_front(range_id, 0));
2763 kmem_init_front_head(ks, kmem_get_front(range_id, 1));
2764 }
2765 }
2766 }
2767
2768 /*
2769 * This is done during EARLY_BOOT as it needs the corecrypto module to be
2770 * set up.
2771 */
2772 __startup_func
2773 static void
kmem_crypto_init(void)2774 kmem_crypto_init(void)
2775 {
2776 vm_size_t ctx_size = crypto_random_kmem_ctx_size();
2777
2778 for (uint32_t i = 0; i < KMEM_NUM_SIZECLASS; i++) {
2779 struct kmem_sizeclass *ks = &kmem_size_array[i];
2780
2781 ks->ks_rng_ctx = zalloc_percpu_permanent(ctx_size, ZALIGN_PTR);
2782 zpercpu_foreach(ctx, ks->ks_rng_ctx) {
2783 crypto_random_kmem_init(ctx);
2784 }
2785 }
2786 }
2787 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, kmem_crypto_init);
2788
2789 __abortlike
2790 static void
kmem_validate_slot_panic(vm_map_offset_t addr,struct kmem_page_meta * meta,uint32_t slot_idx,uint32_t size_idx)2791 kmem_validate_slot_panic(
2792 vm_map_offset_t addr,
2793 struct kmem_page_meta *meta,
2794 uint32_t slot_idx,
2795 uint32_t size_idx)
2796 {
2797 if (meta->km_page_marker != KMEM_META_PRIMARY) {
2798 panic("Metadata (%p) for addr (%p) not primary", meta, (void *)addr);
2799 }
2800 if (meta->km_sizeclass != size_idx) {
2801 panic("Metadata's (%p) sizeclass (%u != %u) changed during deletion",
2802 meta, meta->km_sizeclass, size_idx);
2803 }
2804 panic("Double free detected: Slot (%u) in meta (%p) for addr %p marked free",
2805 slot_idx, meta, (void *)addr);
2806 }
2807
2808 __abortlike
2809 static void
kmem_invalid_slot_for_addr(mach_vm_range_t slot,vm_map_offset_t start,vm_map_offset_t end)2810 kmem_invalid_slot_for_addr(
2811 mach_vm_range_t slot,
2812 vm_map_offset_t start,
2813 vm_map_offset_t end)
2814 {
2815 panic("Invalid kmem ptr slot [%p:%p] for allocation [%p:%p]",
2816 (void *)slot->min_address, (void *)slot->max_address,
2817 (void *)start, (void *)end);
2818 }
2819
2820 void
kmem_validate_slot(vm_map_offset_t addr,struct kmem_page_meta * meta,uint32_t size_idx,uint32_t slot_idx)2821 kmem_validate_slot(
2822 vm_map_offset_t addr,
2823 struct kmem_page_meta *meta,
2824 uint32_t size_idx,
2825 uint32_t slot_idx)
2826 {
2827 if ((meta->km_page_marker != KMEM_META_PRIMARY) ||
2828 (meta->km_sizeclass != size_idx) ||
2829 ((meta->km_bitmap & kmem_slot_idx_to_bit(slot_idx, size_idx)) != 0)) {
2830 kmem_validate_slot_panic(addr, meta, size_idx, slot_idx);
2831 }
2832 }
2833
2834 static void
kmem_validate_slot_initial(mach_vm_range_t slot,vm_map_offset_t start,vm_map_offset_t end,struct kmem_page_meta * meta,uint32_t size_idx,uint32_t slot_idx)2835 kmem_validate_slot_initial(
2836 mach_vm_range_t slot,
2837 vm_map_offset_t start,
2838 vm_map_offset_t end,
2839 struct kmem_page_meta *meta,
2840 uint32_t size_idx,
2841 uint32_t slot_idx)
2842 {
2843 if ((slot->min_address == 0) || (slot->max_address == 0) ||
2844 (start < slot->min_address) || (start >= slot->max_address) ||
2845 (end > slot->max_address)) {
2846 kmem_invalid_slot_for_addr(slot, start, end);
2847 }
2848
2849 kmem_validate_slot(start, meta, size_idx, slot_idx);
2850 }
2851
2852 uint32_t
kmem_addr_get_slot_idx(vm_map_offset_t start,vm_map_offset_t end,vm_map_range_id_t range_id,struct kmem_page_meta ** meta,uint32_t * size_idx,mach_vm_range_t slot)2853 kmem_addr_get_slot_idx(
2854 vm_map_offset_t start,
2855 vm_map_offset_t end,
2856 vm_map_range_id_t range_id,
2857 struct kmem_page_meta **meta,
2858 uint32_t *size_idx,
2859 mach_vm_range_t slot)
2860 {
2861 vm_map_offset_t chunk_start;
2862 vm_map_size_t slot_size;
2863 uint32_t slot_idx;
2864
2865 *meta = kmem_addr_to_meta_start(start, range_id, &chunk_start);
2866 *size_idx = (*meta)->km_sizeclass;
2867 slot_size = kmem_get_size_from_idx(*size_idx);
2868 slot_idx = (start - chunk_start) / slot_size;
2869 slot->min_address = chunk_start + slot_idx * slot_size;
2870 slot->max_address = slot->min_address + slot_size;
2871
2872 kmem_validate_slot_initial(slot, start, end, *meta, *size_idx, slot_idx);
2873
2874 return slot_idx;
2875 }
2876
2877 static bool
kmem_populate_needed(vm_offset_t from,vm_offset_t to)2878 kmem_populate_needed(vm_offset_t from, vm_offset_t to)
2879 {
2880 #if KASAN
2881 #pragma unused(from, to)
2882 return true;
2883 #else
2884 vm_offset_t page_addr = trunc_page(from);
2885
2886 for (; page_addr < to; page_addr += PAGE_SIZE) {
2887 /*
2888 * This can race with another thread doing a populate on the same metadata
2889 * page, where we see an updated pmap but unmapped KASan shadow, causing a
2890 * fault in the shadow when we first access the metadata page. Avoid this
2891 * by always synchronizing on the kmem_meta_lock with KASan.
2892 */
2893 if (!pmap_find_phys(kernel_pmap, page_addr)) {
2894 return true;
2895 }
2896 }
2897
2898 return false;
2899 #endif /* !KASAN */
2900 }
2901
2902 static void
kmem_populate_meta_locked(vm_offset_t from,vm_offset_t to)2903 kmem_populate_meta_locked(vm_offset_t from, vm_offset_t to)
2904 {
2905 vm_offset_t page_addr = trunc_page(from);
2906
2907 vm_map_unlock(kernel_map);
2908
2909 for (; page_addr < to; page_addr += PAGE_SIZE) {
2910 for (;;) {
2911 kern_return_t ret = KERN_SUCCESS;
2912
2913 /*
2914 * All updates to kmem metadata are done under the kmem_meta_lock
2915 */
2916 kmem_meta_lock();
2917 if (0 == pmap_find_phys(kernel_pmap, page_addr)) {
2918 ret = kernel_memory_populate(page_addr,
2919 PAGE_SIZE, KMA_NOPAGEWAIT | KMA_KOBJECT | KMA_ZERO,
2920 VM_KERN_MEMORY_OSFMK);
2921 }
2922 kmem_meta_unlock();
2923
2924 if (ret == KERN_SUCCESS) {
2925 break;
2926 }
2927
2928 /*
2929 * We can't pass KMA_NOPAGEWAIT under a global lock as it leads
2930 * to bad system deadlocks, so if the allocation failed,
2931 * we need to do the VM_PAGE_WAIT() outside of the lock.
2932 */
2933 VM_PAGE_WAIT();
2934 }
2935 }
2936
2937 vm_map_lock(kernel_map);
2938 }
2939
2940 __abortlike
2941 static void
kmem_invalid_meta_panic(struct kmem_page_meta * meta,uint32_t slot_idx,struct kmem_sizeclass sizeclass)2942 kmem_invalid_meta_panic(
2943 struct kmem_page_meta *meta,
2944 uint32_t slot_idx,
2945 struct kmem_sizeclass sizeclass)
2946 {
2947 uint32_t size_idx = kmem_get_idx_from_size(sizeclass.ks_size);
2948
2949 if (slot_idx >= sizeclass.ks_num_elem) {
2950 panic("Invalid slot idx %u [0:%u] for meta %p", slot_idx,
2951 sizeclass.ks_num_elem, meta);
2952 }
2953 if (meta->km_sizeclass != size_idx) {
2954 panic("Invalid size_idx (%u != %u) in meta %p", size_idx,
2955 meta->km_sizeclass, meta);
2956 }
2957 panic("page_marker %u not primary in meta %p", meta->km_page_marker, meta);
2958 }
2959
2960 __abortlike
2961 static void
kmem_slot_has_entry_panic(vm_map_entry_t entry,vm_map_offset_t addr)2962 kmem_slot_has_entry_panic(
2963 vm_map_entry_t entry,
2964 vm_map_offset_t addr)
2965 {
2966 panic("Entry (%p) already exists for addr (%p) being returned",
2967 entry, (void *)addr);
2968 }
2969
2970 __abortlike
2971 static void
kmem_slot_not_found(struct kmem_page_meta * meta,uint32_t slot_idx)2972 kmem_slot_not_found(
2973 struct kmem_page_meta *meta,
2974 uint32_t slot_idx)
2975 {
2976 panic("%uth free slot not found for meta %p bitmap %u", slot_idx, meta,
2977 meta->km_bitmap);
2978 }
2979
2980 /*
2981 * Returns a 16bit random number between 0 and
2982 * upper_limit (inclusive)
2983 */
2984 __startup_func
2985 uint16_t
kmem_get_random16(uint16_t upper_limit)2986 kmem_get_random16(
2987 uint16_t upper_limit)
2988 {
2989 static uint64_t random_entropy;
2990 assert(upper_limit < UINT16_MAX);
2991 if (random_entropy == 0) {
2992 random_entropy = early_random();
2993 }
2994 uint32_t result = random_entropy & UINT32_MAX;
2995 random_entropy >>= 32;
2996 return (uint16_t)(result % (upper_limit + 1));
2997 }
2998
2999 static uint32_t
kmem_get_nth_free_slot(struct kmem_page_meta * meta,uint32_t n,uint32_t bitmap)3000 kmem_get_nth_free_slot(
3001 struct kmem_page_meta *meta,
3002 uint32_t n,
3003 uint32_t bitmap)
3004 {
3005 uint32_t zeros_seen = 0, ones_seen = 0;
3006
3007 while (bitmap) {
3008 uint32_t count = __builtin_ctz(bitmap);
3009
3010 zeros_seen += count;
3011 bitmap >>= count;
3012 if (__probable(~bitmap)) {
3013 count = __builtin_ctz(~bitmap);
3014 } else {
3015 count = 32;
3016 }
3017 if (count + ones_seen > n) {
3018 return zeros_seen + n;
3019 }
3020 ones_seen += count;
3021 bitmap >>= count;
3022 }
3023
3024 kmem_slot_not_found(meta, n);
3025 }
3026
3027
3028 static uint32_t
kmem_get_next_slot(struct kmem_page_meta * meta,struct kmem_sizeclass sizeclass,uint32_t bitmap)3029 kmem_get_next_slot(
3030 struct kmem_page_meta *meta,
3031 struct kmem_sizeclass sizeclass,
3032 uint32_t bitmap)
3033 {
3034 uint32_t num_slots = __builtin_popcount(bitmap);
3035 uint64_t slot_idx = 0;
3036
3037 assert(num_slots > 0);
3038 if (__improbable(startup_phase < STARTUP_SUB_EARLY_BOOT)) {
3039 /*
3040 * Use early random prior to early boot as the ks_rng_ctx requires
3041 * the corecrypto module to be setup before it is initialized and
3042 * used.
3043 *
3044 * num_slots can't be 0 as we take this path when we have more than
3045 * one slot left.
3046 */
3047 slot_idx = kmem_get_random16((uint16_t)num_slots - 1);
3048 } else {
3049 crypto_random_uniform(zpercpu_get(sizeclass.ks_rng_ctx), num_slots,
3050 &slot_idx);
3051 }
3052
3053 return kmem_get_nth_free_slot(meta, slot_idx, bitmap);
3054 }
3055
3056 /*
3057 * Returns an unallocated slot from the given metadata
3058 */
3059 static vm_map_offset_t
kmem_get_addr_from_meta(struct kmem_page_meta * meta,vm_map_range_id_t range_id,struct kmem_sizeclass sizeclass,vm_map_entry_t * entry)3060 kmem_get_addr_from_meta(
3061 struct kmem_page_meta *meta,
3062 vm_map_range_id_t range_id,
3063 struct kmem_sizeclass sizeclass,
3064 vm_map_entry_t *entry)
3065 {
3066 vm_map_offset_t addr;
3067 vm_map_size_t size = sizeclass.ks_size;
3068 uint32_t size_idx = kmem_get_idx_from_size(size);
3069 uint64_t meta_idx = meta - kmem_meta_base[range_id];
3070 mach_vm_offset_t range_start = kmem_ranges[range_id].min_address;
3071 uint32_t slot_bit;
3072 uint32_t slot_idx = kmem_get_next_slot(meta, sizeclass, meta->km_bitmap);
3073
3074 if ((slot_idx >= sizeclass.ks_num_elem) ||
3075 (meta->km_sizeclass != size_idx) ||
3076 (meta->km_page_marker != KMEM_META_PRIMARY)) {
3077 kmem_invalid_meta_panic(meta, slot_idx, sizeclass);
3078 }
3079
3080 slot_bit = kmem_slot_idx_to_bit(slot_idx, size_idx);
3081 meta->km_bitmap &= ~slot_bit;
3082
3083 addr = range_start + (meta_idx * KMEM_CHUNK_SIZE_MIN) + (slot_idx * size);
3084 assert(kmem_range_contains_fully(range_id, addr, size));
3085 if (vm_map_lookup_entry(kernel_map, addr, entry)) {
3086 kmem_slot_has_entry_panic(*entry, addr);
3087 }
3088 if ((*entry != vm_map_to_entry(kernel_map)) &&
3089 ((*entry)->vme_next != vm_map_to_entry(kernel_map)) &&
3090 ((*entry)->vme_next->vme_start < (addr + size))) {
3091 kmem_slot_has_entry_panic(*entry, addr);
3092 }
3093 return addr;
3094 }
3095
3096 __abortlike
3097 static void
kmem_range_out_of_va(kmem_range_id_t range_id,uint32_t num_chunks)3098 kmem_range_out_of_va(
3099 kmem_range_id_t range_id,
3100 uint32_t num_chunks)
3101 {
3102 panic("No more VA to allocate %u chunks in range %u", num_chunks, range_id);
3103 }
3104
3105 static void
kmem_init_allocated_chunk(struct kmem_page_meta * meta,struct kmem_sizeclass sizeclass,uint32_t size_idx)3106 kmem_init_allocated_chunk(
3107 struct kmem_page_meta *meta,
3108 struct kmem_sizeclass sizeclass,
3109 uint32_t size_idx)
3110 {
3111 uint32_t meta_num = sizeclass.ks_num_chunk;
3112 uint32_t num_elem = sizeclass.ks_num_elem;
3113
3114 meta->km_bitmap = (1ull << num_elem) - 1;
3115 meta->km_chunk_len = (uint16_t)meta_num;
3116 assert(LIST_NEXT(meta, km_link) == NULL);
3117 assert(meta->km_link.le_prev == NULL);
3118 meta->km_sizeclass = (uint8_t)size_idx;
3119 meta->km_page_marker = KMEM_META_PRIMARY;
3120 meta++;
3121 for (uint32_t i = 1; i < meta_num; i++) {
3122 meta->km_page_idx = (uint16_t)i;
3123 meta->km_sizeclass = (uint8_t)size_idx;
3124 meta->km_page_marker = 0;
3125 meta->km_bitmap = 0;
3126 meta++;
3127 }
3128 }
3129
3130 static uint32_t
kmem_get_additional_meta(struct kmem_page_meta * meta,uint32_t meta_req,bool from_right,struct kmem_page_meta ** adj_free_meta)3131 kmem_get_additional_meta(
3132 struct kmem_page_meta *meta,
3133 uint32_t meta_req,
3134 bool from_right,
3135 struct kmem_page_meta **adj_free_meta)
3136 {
3137 struct kmem_page_meta *meta_prev = from_right ? meta : (meta - 1);
3138
3139 if (meta_prev->km_page_marker == KMEM_META_FREE) {
3140 uint32_t chunk_len = kmem_get_free_chunk_len(meta_prev);
3141
3142 *adj_free_meta = from_right ? meta_prev : (meta_prev - chunk_len + 1);
3143 meta_req -= chunk_len;
3144 } else {
3145 *adj_free_meta = NULL;
3146 }
3147
3148 return meta_req;
3149 }
3150
3151
3152 static struct kmem_page_meta *
kmem_get_new_chunk(vm_map_range_id_t range_id,bool from_right,uint32_t size_idx)3153 kmem_get_new_chunk(
3154 vm_map_range_id_t range_id,
3155 bool from_right,
3156 uint32_t size_idx)
3157 {
3158 struct kmem_sizeclass sizeclass = kmem_size_array[size_idx];
3159 struct kmem_page_meta *start, *end, *meta_update;
3160 struct kmem_page_meta *adj_free_meta = NULL;
3161 uint32_t meta_req = sizeclass.ks_num_chunk;
3162
3163 for (;;) {
3164 struct kmem_page_meta *metaf = kmem_meta_hwm[kmem_get_front(range_id, 0)];
3165 struct kmem_page_meta *metab = kmem_meta_hwm[kmem_get_front(range_id, 1)];
3166 struct kmem_page_meta *meta;
3167 vm_offset_t start_addr, end_addr;
3168 uint32_t meta_num;
3169
3170 meta = from_right ? metab : metaf;
3171 meta_num = kmem_get_additional_meta(meta, meta_req, from_right,
3172 &adj_free_meta);
3173
3174 if (metaf + meta_num >= metab) {
3175 kmem_range_out_of_va(range_id, meta_num);
3176 }
3177
3178 start = from_right ? (metab - meta_num) : metaf;
3179 end = from_right ? metab : (metaf + meta_num);
3180
3181 start_addr = (vm_offset_t)start;
3182 end_addr = (vm_offset_t)end;
3183
3184 /*
3185 * If the new high watermark stays on the same page,
3186 * no need to populate and drop the lock.
3187 */
3188 if (!page_aligned(from_right ? end_addr : start_addr) &&
3189 trunc_page(start_addr) == trunc_page(end_addr - 1)) {
3190 break;
3191 }
3192 if (!kmem_populate_needed(start_addr, end_addr)) {
3193 break;
3194 }
3195
3196 kmem_populate_meta_locked(start_addr, end_addr);
3197
3198 /*
3199 * Since we dropped the lock, reassess conditions still hold:
3200 * - the HWM we are changing must not have moved
3201 * - the other HWM must not intersect with ours
3202 * - in case of coalescing, the adjacent free meta must still
3203 * be free and of the same size.
3204 *
3205 * If we failed to grow, reevaluate whether freelists have
3206 * entries now by returning NULL.
3207 */
3208 metaf = kmem_meta_hwm[kmem_get_front(range_id, 0)];
3209 metab = kmem_meta_hwm[kmem_get_front(range_id, 1)];
3210 if (meta != (from_right ? metab : metaf)) {
3211 return NULL;
3212 }
3213 if (metaf + meta_num >= metab) {
3214 kmem_range_out_of_va(range_id, meta_num);
3215 }
3216 if (adj_free_meta) {
3217 if (adj_free_meta->km_page_marker != KMEM_META_FREE ||
3218 kmem_get_free_chunk_len(adj_free_meta) !=
3219 meta_req - meta_num) {
3220 return NULL;
3221 }
3222 }
3223
3224 break;
3225 }
3226
3227 /*
3228 * If there is an adjacent free chunk remove it from free list
3229 */
3230 if (adj_free_meta) {
3231 LIST_REMOVE(adj_free_meta, km_link);
3232 LIST_NEXT(adj_free_meta, km_link) = NULL;
3233 adj_free_meta->km_link.le_prev = NULL;
3234 }
3235
3236 /*
3237 * Update hwm
3238 */
3239 meta_update = from_right ? start : end;
3240 kmem_meta_hwm[kmem_get_front(range_id, from_right)] = meta_update;
3241
3242 /*
3243 * Initialize metadata
3244 */
3245 start = from_right ? start : (end - meta_req);
3246 kmem_init_allocated_chunk(start, sizeclass, size_idx);
3247
3248 return start;
3249 }
3250
3251 static void
kmem_requeue_meta(struct kmem_page_meta * meta,struct kmem_list_head * head)3252 kmem_requeue_meta(
3253 struct kmem_page_meta *meta,
3254 struct kmem_list_head *head)
3255 {
3256 LIST_REMOVE(meta, km_link);
3257 LIST_INSERT_HEAD(head, meta, km_link);
3258 }
3259
3260 /*
3261 * Return corresponding sizeclass to stash free chunks in
3262 */
3263 __abortlike
3264 static void
kmem_invalid_chunk_num(uint32_t chunks)3265 kmem_invalid_chunk_num(uint32_t chunks)
3266 {
3267 panic("Invalid number of chunks %u\n", chunks);
3268 }
3269
3270 static uint32_t
kmem_get_size_idx_for_chunks(uint32_t chunks)3271 kmem_get_size_idx_for_chunks(uint32_t chunks)
3272 {
3273 for (uint32_t i = KMEM_NUM_SIZECLASS - 1; i > 0; i--) {
3274 if (chunks >= kmem_size_array[i].ks_num_chunk) {
3275 return i;
3276 }
3277 }
3278 kmem_invalid_chunk_num(chunks);
3279 }
3280
3281 static void
kmem_clear_meta_range(struct kmem_page_meta * meta,uint32_t count)3282 kmem_clear_meta_range(struct kmem_page_meta *meta, uint32_t count)
3283 {
3284 bzero(meta, count * sizeof(struct kmem_page_meta));
3285 }
3286
3287 static void
kmem_check_meta_range_is_clear(struct kmem_page_meta * meta,uint32_t count)3288 kmem_check_meta_range_is_clear(struct kmem_page_meta *meta, uint32_t count)
3289 {
3290 #if MACH_ASSERT
3291 size_t size = count * sizeof(struct kmem_page_meta);
3292
3293 assert(memcmp_zero_ptr_aligned(meta, size) == 0);
3294 #else
3295 #pragma unused(meta, count)
3296 #endif
3297 }
3298
3299 /*!
3300 * @function kmem_init_free_chunk()
3301 *
3302 * @discussion
3303 * This function prepares a range of chunks to be put on a free list.
3304 * The first and last metadata might be dirty, but the "inner" ones
3305 * must be zero filled by the caller prior to calling this function.
3306 */
3307 static void
kmem_init_free_chunk(struct kmem_page_meta * meta,uint32_t num_chunks,uint32_t front)3308 kmem_init_free_chunk(
3309 struct kmem_page_meta *meta,
3310 uint32_t num_chunks,
3311 uint32_t front)
3312 {
3313 struct kmem_sizeclass *sizeclass;
3314 uint32_t size_idx = kmem_get_size_idx_for_chunks(num_chunks);
3315
3316 if (num_chunks > 2) {
3317 kmem_check_meta_range_is_clear(meta + 1, num_chunks - 2);
3318 }
3319
3320 meta[0] = (struct kmem_page_meta){
3321 .km_free_chunks = num_chunks,
3322 .km_page_marker = KMEM_META_FREE,
3323 .km_sizeclass = (uint8_t)size_idx,
3324 };
3325 if (num_chunks > 1) {
3326 meta[num_chunks - 1] = (struct kmem_page_meta){
3327 .km_free_chunks = num_chunks,
3328 .km_page_marker = KMEM_META_FREE,
3329 .km_sizeclass = (uint8_t)size_idx,
3330 };
3331 }
3332
3333 sizeclass = &kmem_size_array[size_idx];
3334 LIST_INSERT_HEAD(&sizeclass->ks_allfree_head[front], meta, km_link);
3335 }
3336
3337 static struct kmem_page_meta *
kmem_get_free_chunk_from_list(struct kmem_sizeclass * org_sizeclass,uint32_t size_idx,uint32_t front)3338 kmem_get_free_chunk_from_list(
3339 struct kmem_sizeclass *org_sizeclass,
3340 uint32_t size_idx,
3341 uint32_t front)
3342 {
3343 struct kmem_sizeclass *sizeclass;
3344 uint32_t num_chunks = org_sizeclass->ks_num_chunk;
3345 struct kmem_page_meta *meta;
3346 uint32_t idx = size_idx;
3347
3348 while (idx < KMEM_NUM_SIZECLASS) {
3349 sizeclass = &kmem_size_array[idx];
3350 meta = LIST_FIRST(&sizeclass->ks_allfree_head[front]);
3351 if (meta) {
3352 break;
3353 }
3354 idx++;
3355 }
3356
3357 /*
3358 * Trim if larger in size
3359 */
3360 if (meta) {
3361 uint32_t num_chunks_free = kmem_get_free_chunk_len(meta);
3362
3363 assert(meta->km_page_marker == KMEM_META_FREE);
3364 LIST_REMOVE(meta, km_link);
3365 LIST_NEXT(meta, km_link) = NULL;
3366 meta->km_link.le_prev = NULL;
3367 if (num_chunks_free > num_chunks) {
3368 num_chunks_free -= num_chunks;
3369 kmem_init_free_chunk(meta + num_chunks, num_chunks_free, front);
3370 }
3371
3372 kmem_init_allocated_chunk(meta, *org_sizeclass, size_idx);
3373 }
3374
3375 return meta;
3376 }
3377
3378 kern_return_t
kmem_locate_space(vm_map_size_t size,vm_map_range_id_t range_id,bool from_right,vm_map_offset_t * start_inout,vm_map_entry_t * entry_out)3379 kmem_locate_space(
3380 vm_map_size_t size,
3381 vm_map_range_id_t range_id,
3382 bool from_right,
3383 vm_map_offset_t *start_inout,
3384 vm_map_entry_t *entry_out)
3385 {
3386 vm_map_entry_t entry;
3387 uint32_t size_idx = kmem_get_idx_from_size(size);
3388 uint32_t front = kmem_get_front(range_id, from_right);
3389 struct kmem_sizeclass *sizeclass = &kmem_size_array[size_idx];
3390 struct kmem_page_meta *meta;
3391
3392 assert(size <= sizeclass->ks_size);
3393 again:
3394 if ((meta = LIST_FIRST(&sizeclass->ks_partial_head[front])) != NULL) {
3395 *start_inout = kmem_get_addr_from_meta(meta, range_id, *sizeclass, &entry);
3396 /*
3397 * Requeue to full if necessary
3398 */
3399 assert(meta->km_page_marker == KMEM_META_PRIMARY);
3400 if (__builtin_popcount(meta->km_bitmap) == KMEM_NUM_GUARDS) {
3401 kmem_requeue_meta(meta, &sizeclass->ks_full_head[front]);
3402 }
3403 } else if ((meta = kmem_get_free_chunk_from_list(sizeclass, size_idx,
3404 front)) != NULL) {
3405 *start_inout = kmem_get_addr_from_meta(meta, range_id, *sizeclass, &entry);
3406 /*
3407 * Queue to partial
3408 */
3409 assert(meta->km_page_marker == KMEM_META_PRIMARY);
3410 assert(__builtin_popcount(meta->km_bitmap) > KMEM_NUM_GUARDS);
3411 LIST_INSERT_HEAD(&sizeclass->ks_partial_head[front], meta, km_link);
3412 } else {
3413 meta = kmem_get_new_chunk(range_id, from_right, size_idx);
3414 if (meta == NULL) {
3415 goto again;
3416 }
3417 *start_inout = kmem_get_addr_from_meta(meta, range_id, *sizeclass, &entry);
3418 assert(meta->km_page_marker == KMEM_META_PRIMARY);
3419 LIST_INSERT_HEAD(&sizeclass->ks_partial_head[front], meta, km_link);
3420 }
3421
3422 if (entry_out) {
3423 *entry_out = entry;
3424 }
3425
3426 return KERN_SUCCESS;
3427 }
3428
3429 /*
3430 * Determine whether the given metadata was allocated from the right
3431 */
3432 static bool
kmem_meta_is_from_right(kmem_range_id_t range_id,struct kmem_page_meta * meta)3433 kmem_meta_is_from_right(
3434 kmem_range_id_t range_id,
3435 struct kmem_page_meta *meta)
3436 {
3437 struct kmem_page_meta *metaf = kmem_meta_hwm[kmem_get_front(range_id, 0)];
3438 __assert_only struct kmem_page_meta *metab = kmem_meta_hwm[kmem_get_front(range_id, 1)];
3439 struct kmem_page_meta *meta_base = kmem_meta_base[range_id];
3440 struct kmem_page_meta *meta_end;
3441
3442 meta_end = (struct kmem_page_meta *)kmem_meta_range[range_id].max_address;
3443
3444 if ((meta >= meta_base) && (meta < metaf)) {
3445 return false;
3446 }
3447
3448 assert(meta >= metab && meta < meta_end);
3449 return true;
3450 }
3451
3452 static void
kmem_free_chunk(kmem_range_id_t range_id,struct kmem_page_meta * meta,bool from_right)3453 kmem_free_chunk(
3454 kmem_range_id_t range_id,
3455 struct kmem_page_meta *meta,
3456 bool from_right)
3457 {
3458 struct kmem_page_meta *meta_coalesce = meta - 1;
3459 struct kmem_page_meta *meta_start = meta;
3460 uint32_t num_chunks = kmem_get_chunk_len(meta);
3461 uint32_t add_chunks;
3462 struct kmem_page_meta *meta_end = meta + num_chunks;
3463 struct kmem_page_meta *meta_hwm_l, *meta_hwm_r;
3464 uint32_t front = kmem_get_front(range_id, from_right);
3465
3466 meta_hwm_l = kmem_meta_hwm[kmem_get_front(range_id, 0)];
3467 meta_hwm_r = kmem_meta_hwm[kmem_get_front(range_id, 1)];
3468
3469 LIST_REMOVE(meta, km_link);
3470 kmem_clear_meta_range(meta, num_chunks);
3471
3472 /*
3473 * Coalesce left
3474 */
3475 if (((from_right && (meta_coalesce >= meta_hwm_r)) || !from_right) &&
3476 (meta_coalesce->km_page_marker == KMEM_META_FREE)) {
3477 meta_start = meta_coalesce - kmem_get_free_chunk_len(meta_coalesce) + 1;
3478 add_chunks = kmem_get_free_chunk_len(meta_start);
3479 num_chunks += add_chunks;
3480 LIST_REMOVE(meta_start, km_link);
3481 kmem_clear_meta_range(meta_start + add_chunks - 1, 1);
3482 }
3483
3484 /*
3485 * Coalesce right
3486 */
3487 if (((!from_right && (meta_end < meta_hwm_l)) || from_right) &&
3488 (meta_end->km_page_marker == KMEM_META_FREE)) {
3489 add_chunks = kmem_get_free_chunk_len(meta_end);
3490 LIST_REMOVE(meta_end, km_link);
3491 kmem_clear_meta_range(meta_end, 1);
3492 meta_end = meta_end + add_chunks;
3493 num_chunks += add_chunks;
3494 }
3495
3496 kmem_init_free_chunk(meta_start, num_chunks, front);
3497 }
3498
3499 static void
kmem_free_slot(kmem_range_id_t range_id,mach_vm_range_t slot)3500 kmem_free_slot(
3501 kmem_range_id_t range_id,
3502 mach_vm_range_t slot)
3503 {
3504 struct kmem_page_meta *meta;
3505 vm_map_offset_t chunk_start;
3506 uint32_t size_idx, chunk_elem, slot_idx, num_elem;
3507 struct kmem_sizeclass *sizeclass;
3508 vm_map_size_t slot_size;
3509
3510 meta = kmem_addr_to_meta_start(slot->min_address, range_id, &chunk_start);
3511 size_idx = meta->km_sizeclass;
3512 slot_size = kmem_get_size_from_idx(size_idx);
3513 slot_idx = (slot->min_address - chunk_start) / slot_size;
3514 assert((meta->km_bitmap & kmem_slot_idx_to_bit(slot_idx, size_idx)) == 0);
3515 meta->km_bitmap |= kmem_slot_idx_to_bit(slot_idx, size_idx);
3516
3517 sizeclass = &kmem_size_array[size_idx];
3518 chunk_elem = sizeclass->ks_num_elem;
3519 num_elem = __builtin_popcount(meta->km_bitmap);
3520
3521 if (num_elem == chunk_elem) {
3522 /*
3523 * If entire chunk empty add to emtpy list
3524 */
3525 bool from_right = kmem_meta_is_from_right(range_id, meta);
3526
3527 kmem_free_chunk(range_id, meta, from_right);
3528 } else if (num_elem == KMEM_NUM_GUARDS + 1) {
3529 /*
3530 * If we freed to full chunk move it to partial
3531 */
3532 uint32_t front = kmem_get_front(range_id,
3533 kmem_meta_is_from_right(range_id, meta));
3534
3535 kmem_requeue_meta(meta, &sizeclass->ks_partial_head[front]);
3536 }
3537 }
3538
3539 void
kmem_free_space(vm_map_offset_t start,vm_map_offset_t end,vm_map_range_id_t range_id,mach_vm_range_t slot)3540 kmem_free_space(
3541 vm_map_offset_t start,
3542 vm_map_offset_t end,
3543 vm_map_range_id_t range_id,
3544 mach_vm_range_t slot)
3545 {
3546 bool entry_present = false;
3547 vm_map_entry_t prev_entry;
3548 vm_map_entry_t next_entry;
3549
3550 if ((slot->min_address == start) && (slot->max_address == end)) {
3551 /*
3552 * Entire slot is being freed at once
3553 */
3554 return kmem_free_slot(range_id, slot);
3555 }
3556
3557 entry_present = vm_map_lookup_entry(kernel_map, start, &prev_entry);
3558 assert(!entry_present);
3559 next_entry = prev_entry->vme_next;
3560
3561 if (((prev_entry == vm_map_to_entry(kernel_map) ||
3562 prev_entry->vme_end <= slot->min_address)) &&
3563 (next_entry == vm_map_to_entry(kernel_map) ||
3564 (next_entry->vme_start >= slot->max_address))) {
3565 /*
3566 * Free entire slot
3567 */
3568 kmem_free_slot(range_id, slot);
3569 }
3570 }
3571
3572 #pragma mark kmem init
3573
3574 /*
3575 * The default percentage of memory that can be mlocked is scaled based on the total
3576 * amount of memory in the system. These percentages are caclulated
3577 * offline and stored in this table. We index this table by
3578 * log2(max_mem) - VM_USER_WIREABLE_MIN_CONFIG. We clamp this index in the range
3579 * [0, sizeof(wire_limit_percents) / sizeof(vm_map_size_t))
3580 *
3581 * Note that these values were picked for mac.
3582 * If we ever have very large memory config arm devices, we may want to revisit
3583 * since the kernel overhead is smaller there due to the larger page size.
3584 */
3585
3586 /* Start scaling iff we're managing > 2^32 = 4GB of RAM. */
3587 #define VM_USER_WIREABLE_MIN_CONFIG 32
3588 #if CONFIG_JETSAM
3589 /* Systems with jetsam can wire a bit more b/c the system can relieve wired
3590 * pressure.
3591 */
3592 static vm_map_size_t wire_limit_percents[] =
3593 { 80, 80, 80, 80, 82, 85, 88, 91, 94, 97};
3594 #else
3595 static vm_map_size_t wire_limit_percents[] =
3596 { 70, 73, 76, 79, 82, 85, 88, 91, 94, 97};
3597 #endif /* CONFIG_JETSAM */
3598
3599 /*
3600 * Sets the default global user wire limit which limits the amount of
3601 * memory that can be locked via mlock() based on the above algorithm..
3602 * This can be overridden via a sysctl.
3603 */
3604 static void
kmem_set_user_wire_limits(void)3605 kmem_set_user_wire_limits(void)
3606 {
3607 uint64_t available_mem_log;
3608 uint64_t max_wire_percent;
3609 size_t wire_limit_percents_length = sizeof(wire_limit_percents) /
3610 sizeof(vm_map_size_t);
3611 vm_map_size_t limit;
3612 uint64_t config_memsize = max_mem;
3613 #if defined(XNU_TARGET_OS_OSX)
3614 config_memsize = max_mem_actual;
3615 #endif /* defined(XNU_TARGET_OS_OSX) */
3616
3617 available_mem_log = bit_floor(config_memsize);
3618
3619 if (available_mem_log < VM_USER_WIREABLE_MIN_CONFIG) {
3620 available_mem_log = 0;
3621 } else {
3622 available_mem_log -= VM_USER_WIREABLE_MIN_CONFIG;
3623 }
3624 if (available_mem_log >= wire_limit_percents_length) {
3625 available_mem_log = wire_limit_percents_length - 1;
3626 }
3627 max_wire_percent = wire_limit_percents[available_mem_log];
3628
3629 limit = config_memsize * max_wire_percent / 100;
3630 /* Cap the number of non lockable bytes at VM_NOT_USER_WIREABLE_MAX */
3631 if (config_memsize - limit > VM_NOT_USER_WIREABLE_MAX) {
3632 limit = config_memsize - VM_NOT_USER_WIREABLE_MAX;
3633 }
3634
3635 vm_global_user_wire_limit = limit;
3636 /* the default per task limit is the same as the global limit */
3637 vm_per_task_user_wire_limit = limit;
3638 vm_add_wire_count_over_global_limit = 0;
3639 vm_add_wire_count_over_user_limit = 0;
3640 }
3641
3642 #define KMEM_MAX_CLAIMS 50
3643 __startup_data
3644 struct kmem_range_startup_spec kmem_claims[KMEM_MAX_CLAIMS] = {};
3645 __startup_data
3646 uint32_t kmem_claim_count = 0;
3647
3648 __startup_func
3649 void
kmem_range_startup_init(struct kmem_range_startup_spec * sp)3650 kmem_range_startup_init(
3651 struct kmem_range_startup_spec *sp)
3652 {
3653 assert(kmem_claim_count < KMEM_MAX_CLAIMS - KMEM_RANGE_COUNT);
3654 if (sp->kc_calculate_sz) {
3655 sp->kc_size = (sp->kc_calculate_sz)();
3656 }
3657 if (sp->kc_size) {
3658 kmem_claims[kmem_claim_count] = *sp;
3659 kmem_claim_count++;
3660 }
3661 }
3662
3663 static vm_offset_t
kmem_fuzz_start(void)3664 kmem_fuzz_start(void)
3665 {
3666 vm_offset_t kmapoff_kaddr = 0;
3667 uint32_t kmapoff_pgcnt = (early_random() & 0x1ff) + 1; /* 9 bits */
3668 vm_map_size_t kmapoff_size = ptoa(kmapoff_pgcnt);
3669
3670 kmem_alloc(kernel_map, &kmapoff_kaddr, kmapoff_size,
3671 KMA_NOFAIL | KMA_KOBJECT | KMA_PERMANENT | KMA_VAONLY,
3672 VM_KERN_MEMORY_OSFMK);
3673 return kmapoff_kaddr + kmapoff_size;
3674 }
3675
3676 /*
3677 * Generate a randomly shuffled array of indices from 0 to count - 1
3678 */
3679 __startup_func
3680 void
kmem_shuffle(uint16_t * shuffle_buf,uint16_t count)3681 kmem_shuffle(
3682 uint16_t *shuffle_buf,
3683 uint16_t count)
3684 {
3685 for (uint16_t i = 0; i < count; i++) {
3686 uint16_t j = kmem_get_random16(i);
3687 if (j != i) {
3688 shuffle_buf[i] = shuffle_buf[j];
3689 }
3690 shuffle_buf[j] = i;
3691 }
3692 }
3693
3694 __startup_func
3695 static void
kmem_shuffle_claims(void)3696 kmem_shuffle_claims(void)
3697 {
3698 uint16_t shuffle_buf[KMEM_MAX_CLAIMS] = {};
3699 uint16_t limit = (uint16_t)kmem_claim_count;
3700
3701 kmem_shuffle(&shuffle_buf[0], limit);
3702 for (uint16_t i = 0; i < limit; i++) {
3703 struct kmem_range_startup_spec tmp = kmem_claims[i];
3704 kmem_claims[i] = kmem_claims[shuffle_buf[i]];
3705 kmem_claims[shuffle_buf[i]] = tmp;
3706 }
3707 }
3708
3709 __startup_func
3710 static void
kmem_readjust_ranges(uint32_t cur_idx)3711 kmem_readjust_ranges(
3712 uint32_t cur_idx)
3713 {
3714 assert(cur_idx != 0);
3715 uint32_t j = cur_idx - 1, random;
3716 struct kmem_range_startup_spec sp = kmem_claims[cur_idx];
3717 struct mach_vm_range *sp_range = sp.kc_range;
3718
3719 /*
3720 * Find max index where restriction is met
3721 */
3722 for (; j > 0; j--) {
3723 struct kmem_range_startup_spec spj = kmem_claims[j];
3724 vm_map_offset_t max_start = spj.kc_range->min_address;
3725 if (spj.kc_flags & KC_NO_MOVE) {
3726 panic("kmem_range_init: Can't scramble with multiple constraints");
3727 }
3728 if (max_start <= sp_range->min_address) {
3729 break;
3730 }
3731 }
3732
3733 /*
3734 * Pick a random index from 0 to max index and shift claims to the right
3735 * to make room for restricted claim
3736 */
3737 random = kmem_get_random16((uint16_t)j);
3738 assert(random <= j);
3739
3740 sp_range->min_address = kmem_claims[random].kc_range->min_address;
3741 sp_range->max_address = sp_range->min_address + sp.kc_size;
3742
3743 for (j = cur_idx - 1; j >= random && j != UINT32_MAX; j--) {
3744 struct kmem_range_startup_spec spj = kmem_claims[j];
3745 struct mach_vm_range *range = spj.kc_range;
3746 range->min_address += sp.kc_size;
3747 range->max_address += sp.kc_size;
3748 kmem_claims[j + 1] = spj;
3749 }
3750
3751 sp.kc_flags = KC_NO_MOVE;
3752 kmem_claims[random] = sp;
3753 }
3754
3755 __startup_func
3756 static vm_map_size_t
kmem_add_ptr_claims(void)3757 kmem_add_ptr_claims(void)
3758 {
3759 uint64_t kmem_meta_num, kmem_ptr_chunks;
3760 vm_map_size_t org_ptr_range_size = ptr_range_size;
3761
3762 ptr_range_size -= PAGE_SIZE;
3763 ptr_range_size *= KMEM_CHUNK_SIZE_MIN;
3764 ptr_range_size /= (KMEM_CHUNK_SIZE_MIN + sizeof(struct kmem_page_meta));
3765
3766 kmem_ptr_chunks = ptr_range_size / KMEM_CHUNK_SIZE_MIN;
3767 ptr_range_size = kmem_ptr_chunks * KMEM_CHUNK_SIZE_MIN;
3768
3769 kmem_meta_num = kmem_ptr_chunks + 2;
3770 kmem_meta_size = round_page(kmem_meta_num * sizeof(struct kmem_page_meta));
3771
3772 assert(kmem_meta_size + ptr_range_size <= org_ptr_range_size);
3773 /*
3774 * Add claims for kmem's ranges
3775 */
3776 for (uint32_t i = 0; i < kmem_ptr_ranges; i++) {
3777 struct kmem_range_startup_spec kmem_spec = {
3778 .kc_name = "kmem_ptr_range",
3779 .kc_range = &kmem_ranges[KMEM_RANGE_ID_PTR_0 + i],
3780 .kc_size = ptr_range_size,
3781 .kc_flags = KC_NO_ENTRY,
3782 };
3783 kmem_claims[kmem_claim_count++] = kmem_spec;
3784
3785 struct kmem_range_startup_spec kmem_meta_spec = {
3786 .kc_name = "kmem_ptr_range_meta",
3787 .kc_range = &kmem_meta_range[KMEM_RANGE_ID_PTR_0 + i],
3788 .kc_size = kmem_meta_size,
3789 .kc_flags = KC_NONE,
3790 };
3791 kmem_claims[kmem_claim_count++] = kmem_meta_spec;
3792 }
3793 return (org_ptr_range_size - ptr_range_size - kmem_meta_size) *
3794 kmem_ptr_ranges;
3795 }
3796
3797 __startup_func
3798 static void
kmem_add_extra_claims(void)3799 kmem_add_extra_claims(void)
3800 {
3801 vm_map_size_t largest_free_size = 0, total_claims = 0;
3802
3803 vm_map_sizes(kernel_map, NULL, NULL, &largest_free_size);
3804 largest_free_size = trunc_page(largest_free_size);
3805
3806 /*
3807 * kasan and configs w/o *TRR need to have just one ptr range due to
3808 * resource constraints.
3809 */
3810 #if !ZSECURITY_CONFIG(KERNEL_PTR_SPLIT)
3811 kmem_ptr_ranges = 1;
3812 #endif
3813 /*
3814 * Determine size of data and pointer kmem_ranges
3815 */
3816 for (uint32_t i = 0; i < kmem_claim_count; i++) {
3817 total_claims += kmem_claims[i].kc_size;
3818 }
3819 assert((total_claims & PAGE_MASK) == 0);
3820 largest_free_size -= total_claims;
3821
3822 /*
3823 * Use half the total available VA for all pointer allocations (this
3824 * includes the kmem_sprayqtn range). Given that we have 4 total
3825 * ranges divide the available VA by 8.
3826 */
3827 ptr_range_size = largest_free_size / ((kmem_ptr_ranges + 1) * 2);
3828 sprayqtn_range_size = ptr_range_size;
3829
3830 if (sprayqtn_range_size > (sane_size / 2)) {
3831 sprayqtn_range_size = sane_size / 2;
3832 }
3833
3834 ptr_range_size = round_page(ptr_range_size);
3835 sprayqtn_range_size = round_page(sprayqtn_range_size);
3836
3837
3838 data_range_size = largest_free_size
3839 - (ptr_range_size * kmem_ptr_ranges)
3840 - sprayqtn_range_size;
3841
3842 /*
3843 * Add claims for kmem's ranges
3844 */
3845 data_range_size += kmem_add_ptr_claims();
3846 assert(data_range_size + sprayqtn_range_size +
3847 ((ptr_range_size + kmem_meta_size) * kmem_ptr_ranges) <=
3848 largest_free_size);
3849
3850 struct kmem_range_startup_spec kmem_spec_sprayqtn = {
3851 .kc_name = "kmem_sprayqtn_range",
3852 .kc_range = &kmem_ranges[KMEM_RANGE_ID_SPRAYQTN],
3853 .kc_size = sprayqtn_range_size,
3854 .kc_flags = KC_NO_ENTRY,
3855 };
3856 kmem_claims[kmem_claim_count++] = kmem_spec_sprayqtn;
3857
3858 struct kmem_range_startup_spec kmem_spec_data = {
3859 .kc_name = "kmem_data_range",
3860 .kc_range = &kmem_ranges[KMEM_RANGE_ID_DATA],
3861 .kc_size = data_range_size,
3862 .kc_flags = KC_NO_ENTRY,
3863 };
3864 kmem_claims[kmem_claim_count++] = kmem_spec_data;
3865 }
3866
3867 __startup_func
3868 static void
kmem_scramble_ranges(void)3869 kmem_scramble_ranges(void)
3870 {
3871 vm_map_offset_t start = 0;
3872
3873 /*
3874 * Initiatize KMEM_RANGE_ID_NONE range to use the entire map so that
3875 * the vm can find the requested ranges.
3876 */
3877 kmem_ranges[KMEM_RANGE_ID_NONE].min_address = MAX(kernel_map->min_offset,
3878 VM_MAP_PAGE_SIZE(kernel_map));
3879 kmem_ranges[KMEM_RANGE_ID_NONE].max_address = kernel_map->max_offset;
3880
3881 /*
3882 * Allocating the g_kext_map prior to randomizing the remaining submaps as
3883 * this map is 2G in size and starts at the end of kernel_text on x86. It
3884 * could overflow into the heap.
3885 */
3886 kext_alloc_init();
3887
3888 /*
3889 * Eat a random amount of kernel_map to fuzz subsequent heap, zone and
3890 * stack addresses. (With a 4K page and 9 bits of randomness, this
3891 * eats about 2M of VA from the map)
3892 *
3893 * Note that we always need to slide by at least one page because the VM
3894 * pointer packing schemes using KERNEL_PMAP_HEAP_RANGE_START as a base
3895 * do not admit this address to be part of any zone submap.
3896 */
3897 start = kmem_fuzz_start();
3898
3899 /*
3900 * Add claims for ptr and data kmem_ranges
3901 */
3902 kmem_add_extra_claims();
3903
3904 /*
3905 * Shuffle registered claims
3906 */
3907 assert(kmem_claim_count < UINT16_MAX);
3908 kmem_shuffle_claims();
3909
3910 /*
3911 * Apply restrictions and determine range for each claim
3912 */
3913 for (uint32_t i = 0; i < kmem_claim_count; i++) {
3914 vm_map_offset_t end = 0;
3915 struct kmem_range_startup_spec sp = kmem_claims[i];
3916 struct mach_vm_range *sp_range = sp.kc_range;
3917
3918 if (vm_map_locate_space_anywhere(kernel_map, sp.kc_size, 0,
3919 VM_MAP_KERNEL_FLAGS_ANYWHERE(), &start, NULL) != KERN_SUCCESS) {
3920 panic("kmem_range_init: vm_map_locate_space failing for claim %s",
3921 sp.kc_name);
3922 }
3923
3924 end = start + sp.kc_size;
3925 /*
3926 * Re-adjust ranges if restriction not met
3927 */
3928 if (sp_range->min_address && start > sp_range->min_address) {
3929 kmem_readjust_ranges(i);
3930 } else {
3931 sp_range->min_address = start;
3932 sp_range->max_address = end;
3933 }
3934 start = end;
3935 }
3936
3937 /*
3938 * We have settled on the ranges, now create temporary entries for the
3939 * claims
3940 */
3941 for (uint32_t i = 0; i < kmem_claim_count; i++) {
3942 struct kmem_range_startup_spec sp = kmem_claims[i];
3943 vm_map_entry_t entry = NULL;
3944 if (sp.kc_flags & KC_NO_ENTRY) {
3945 continue;
3946 }
3947 if (vm_map_find_space(kernel_map, sp.kc_range->min_address, sp.kc_size, 0,
3948 VM_MAP_KERNEL_FLAGS_ANYWHERE(), &entry) != KERN_SUCCESS) {
3949 panic("kmem_range_init: vm_map_find_space failing for claim %s",
3950 sp.kc_name);
3951 }
3952 vm_object_reference(kernel_object_default);
3953 VME_OBJECT_SET(entry, kernel_object_default, false, 0);
3954 VME_OFFSET_SET(entry, entry->vme_start);
3955 vm_map_unlock(kernel_map);
3956 }
3957 /*
3958 * Now that we are done assigning all the ranges, reset
3959 * kmem_ranges[KMEM_RANGE_ID_NONE]
3960 */
3961 kmem_ranges[KMEM_RANGE_ID_NONE] = (struct mach_vm_range) {};
3962
3963 #if DEBUG || DEVELOPMENT
3964 for (uint32_t i = 0; i < kmem_claim_count; i++) {
3965 struct kmem_range_startup_spec sp = kmem_claims[i];
3966
3967 printf("%-24s: %p - %p (%u%c)\n", sp.kc_name,
3968 (void *)sp.kc_range->min_address,
3969 (void *)sp.kc_range->max_address,
3970 mach_vm_size_pretty(sp.kc_size),
3971 mach_vm_size_unit(sp.kc_size));
3972 }
3973 #endif /* DEBUG || DEVELOPMENT */
3974 }
3975
3976 __startup_func
3977 static void
kmem_range_init(void)3978 kmem_range_init(void)
3979 {
3980 vm_size_t range_adjustment;
3981
3982 kmem_scramble_ranges();
3983
3984 range_adjustment = sprayqtn_range_size >> 3;
3985 kmem_large_ranges[KMEM_RANGE_ID_SPRAYQTN].min_address =
3986 kmem_ranges[KMEM_RANGE_ID_SPRAYQTN].min_address + range_adjustment;
3987 kmem_large_ranges[KMEM_RANGE_ID_SPRAYQTN].max_address =
3988 kmem_ranges[KMEM_RANGE_ID_SPRAYQTN].max_address;
3989
3990 range_adjustment = data_range_size >> 3;
3991 kmem_large_ranges[KMEM_RANGE_ID_DATA].min_address =
3992 kmem_ranges[KMEM_RANGE_ID_DATA].min_address + range_adjustment;
3993 kmem_large_ranges[KMEM_RANGE_ID_DATA].max_address =
3994 kmem_ranges[KMEM_RANGE_ID_DATA].max_address;
3995
3996 pmap_init();
3997 kmem_metadata_init();
3998 kmem_sizeclass_init();
3999
4000 #if DEBUG || DEVELOPMENT
4001 for (kmem_range_id_t i = 1; i < KMEM_RANGE_COUNT; i++) {
4002 vm_size_t range_size = mach_vm_range_size(&kmem_large_ranges[i]);
4003 printf("kmem_large_ranges[%d] : %p - %p (%u%c)\n", i,
4004 (void *)kmem_large_ranges[i].min_address,
4005 (void *)kmem_large_ranges[i].max_address,
4006 mach_vm_size_pretty(range_size),
4007 mach_vm_size_unit(range_size));
4008 }
4009 #endif
4010 }
4011 STARTUP(KMEM, STARTUP_RANK_THIRD, kmem_range_init);
4012
4013 #if DEBUG || DEVELOPMENT
4014 __startup_func
4015 static void
kmem_log_init(void)4016 kmem_log_init(void)
4017 {
4018 /*
4019 * Log can only be created after the the kmem subsystem is initialized as
4020 * btlog creation uses kmem
4021 */
4022 kmem_outlier_log = btlog_create(BTLOG_LOG, KMEM_OUTLIER_LOG_SIZE, 0);
4023 }
4024 STARTUP(ZALLOC, STARTUP_RANK_FIRST, kmem_log_init);
4025
4026 kmem_gobj_stats
kmem_get_gobj_stats(void)4027 kmem_get_gobj_stats(void)
4028 {
4029 kmem_gobj_stats stats = {};
4030
4031 vm_map_lock(kernel_map);
4032 for (uint8_t i = 0; i < kmem_ptr_ranges; i++) {
4033 kmem_range_id_t range_id = KMEM_RANGE_ID_FIRST + i;
4034 struct mach_vm_range range = kmem_ranges[range_id];
4035 struct kmem_page_meta *meta = kmem_meta_hwm[kmem_get_front(range_id, 0)];
4036 struct kmem_page_meta *meta_end;
4037 uint64_t meta_idx = meta - kmem_meta_base[range_id];
4038 vm_map_size_t used = 0, va = 0, meta_sz = 0, pte_sz = 0;
4039 vm_map_offset_t addr;
4040 vm_map_entry_t entry;
4041
4042 /*
4043 * Left front
4044 */
4045 va = (meta_idx * KMEM_CHUNK_SIZE_MIN);
4046 meta_sz = round_page(meta_idx * sizeof(struct kmem_page_meta));
4047
4048 /*
4049 * Right front
4050 */
4051 meta = kmem_meta_hwm[kmem_get_front(range_id, 1)];
4052 meta_end = kmem_addr_to_meta(range.max_address, range_id, &addr,
4053 &meta_idx);
4054 meta_idx = meta_end - meta;
4055 meta_sz += round_page(meta_idx * sizeof(struct kmem_page_meta));
4056 va += (meta_idx * KMEM_CHUNK_SIZE_MIN);
4057
4058 /*
4059 * Compute VA allocated in entire range
4060 */
4061 if (vm_map_lookup_entry(kernel_map, range.min_address, &entry) == false) {
4062 entry = entry->vme_next;
4063 }
4064 while (entry != vm_map_to_entry(kernel_map) &&
4065 entry->vme_start < range.max_address) {
4066 used += (entry->vme_end - entry->vme_start);
4067 entry = entry->vme_next;
4068 }
4069
4070 pte_sz = round_page(atop(va - used) * 8);
4071
4072 stats.total_used += used;
4073 stats.total_va += va;
4074 stats.pte_sz += pte_sz;
4075 stats.meta_sz += meta_sz;
4076 }
4077 vm_map_unlock(kernel_map);
4078
4079 return stats;
4080 }
4081
4082 #endif /* DEBUG || DEVELOPMENT */
4083
4084 /*
4085 * kmem_init:
4086 *
4087 * Initialize the kernel's virtual memory map, taking
4088 * into account all memory allocated up to this time.
4089 */
4090 __startup_func
4091 void
kmem_init(vm_offset_t start,vm_offset_t end)4092 kmem_init(
4093 vm_offset_t start,
4094 vm_offset_t end)
4095 {
4096 vm_map_offset_t map_start;
4097 vm_map_offset_t map_end;
4098
4099 map_start = vm_map_trunc_page(start,
4100 VM_MAP_PAGE_MASK(kernel_map));
4101 map_end = vm_map_round_page(end,
4102 VM_MAP_PAGE_MASK(kernel_map));
4103
4104 vm_map_will_allocate_early_map(&kernel_map);
4105 #if defined(__arm64__)
4106 kernel_map = vm_map_create_options(pmap_kernel(),
4107 VM_MIN_KERNEL_AND_KEXT_ADDRESS,
4108 VM_MAX_KERNEL_ADDRESS,
4109 VM_MAP_CREATE_DEFAULT);
4110 /*
4111 * Reserve virtual memory allocated up to this time.
4112 */
4113 {
4114 unsigned int region_select = 0;
4115 vm_map_offset_t region_start;
4116 vm_map_size_t region_size;
4117 vm_map_offset_t map_addr;
4118 kern_return_t kr;
4119
4120 while (pmap_virtual_region(region_select, ®ion_start, ®ion_size)) {
4121 map_addr = region_start;
4122 kr = vm_map_enter(kernel_map, &map_addr,
4123 vm_map_round_page(region_size,
4124 VM_MAP_PAGE_MASK(kernel_map)),
4125 (vm_map_offset_t) 0,
4126 VM_MAP_KERNEL_FLAGS_FIXED_PERMANENT(.vmkf_no_pmap_check = true),
4127 VM_OBJECT_NULL,
4128 (vm_object_offset_t) 0, FALSE, VM_PROT_NONE, VM_PROT_NONE,
4129 VM_INHERIT_DEFAULT);
4130
4131 if (kr != KERN_SUCCESS) {
4132 panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x",
4133 (uint64_t) start, (uint64_t) end, (uint64_t) region_start,
4134 (uint64_t) region_size, kr);
4135 }
4136
4137 region_select++;
4138 }
4139 }
4140 #else
4141 kernel_map = vm_map_create_options(pmap_kernel(),
4142 VM_MIN_KERNEL_AND_KEXT_ADDRESS, map_end,
4143 VM_MAP_CREATE_DEFAULT);
4144 /*
4145 * Reserve virtual memory allocated up to this time.
4146 */
4147 if (start != VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
4148 vm_map_offset_t map_addr;
4149 kern_return_t kr;
4150
4151 map_addr = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
4152 kr = vm_map_enter(kernel_map,
4153 &map_addr,
4154 (vm_map_size_t)(map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
4155 (vm_map_offset_t) 0,
4156 VM_MAP_KERNEL_FLAGS_FIXED(.vmkf_no_pmap_check = true),
4157 VM_OBJECT_NULL,
4158 (vm_object_offset_t) 0, FALSE,
4159 VM_PROT_NONE, VM_PROT_NONE,
4160 VM_INHERIT_DEFAULT);
4161
4162 if (kr != KERN_SUCCESS) {
4163 panic("kmem_init(0x%llx,0x%llx): vm_map_enter(0x%llx,0x%llx) error 0x%x",
4164 (uint64_t) start, (uint64_t) end,
4165 (uint64_t) VM_MIN_KERNEL_AND_KEXT_ADDRESS,
4166 (uint64_t) (map_start - VM_MIN_KERNEL_AND_KEXT_ADDRESS),
4167 kr);
4168 }
4169 }
4170 #endif
4171
4172 kmem_set_user_wire_limits();
4173 }
4174
4175
4176 #pragma mark map copyio
4177 /*
4178 * Note: semantic types aren't used as `copyio` already validates.
4179 */
4180
4181 kern_return_t
copyinmap(vm_map_t map,vm_map_offset_t fromaddr,void * todata,vm_size_t length)4182 copyinmap(
4183 vm_map_t map,
4184 vm_map_offset_t fromaddr,
4185 void *todata,
4186 vm_size_t length)
4187 {
4188 kern_return_t kr = KERN_SUCCESS;
4189 vm_map_t oldmap;
4190
4191 if (vm_map_pmap(map) == pmap_kernel()) {
4192 /* assume a correct copy */
4193 memcpy(todata, CAST_DOWN(void *, fromaddr), length);
4194 } else if (current_map() == map) {
4195 if (copyin(fromaddr, todata, length) != 0) {
4196 kr = KERN_INVALID_ADDRESS;
4197 }
4198 } else {
4199 vm_map_reference(map);
4200 oldmap = vm_map_switch(map);
4201 if (copyin(fromaddr, todata, length) != 0) {
4202 kr = KERN_INVALID_ADDRESS;
4203 }
4204 vm_map_switch(oldmap);
4205 vm_map_deallocate(map);
4206 }
4207 return kr;
4208 }
4209
4210 kern_return_t
copyoutmap(vm_map_t map,void * fromdata,vm_map_address_t toaddr,vm_size_t length)4211 copyoutmap(
4212 vm_map_t map,
4213 void *fromdata,
4214 vm_map_address_t toaddr,
4215 vm_size_t length)
4216 {
4217 kern_return_t kr = KERN_SUCCESS;
4218 vm_map_t oldmap;
4219
4220 if (vm_map_pmap(map) == pmap_kernel()) {
4221 /* assume a correct copy */
4222 memcpy(CAST_DOWN(void *, toaddr), fromdata, length);
4223 } else if (current_map() == map) {
4224 if (copyout(fromdata, toaddr, length) != 0) {
4225 ktriage_record(thread_tid(current_thread()),
4226 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
4227 KDBG_TRIAGE_RESERVED,
4228 KDBG_TRIAGE_VM_COPYOUTMAP_SAMEMAP_ERROR),
4229 KERN_INVALID_ADDRESS /* arg */);
4230 kr = KERN_INVALID_ADDRESS;
4231 }
4232 } else {
4233 vm_map_reference(map);
4234 oldmap = vm_map_switch(map);
4235 if (copyout(fromdata, toaddr, length) != 0) {
4236 ktriage_record(thread_tid(current_thread()),
4237 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
4238 KDBG_TRIAGE_RESERVED,
4239 KDBG_TRIAGE_VM_COPYOUTMAP_DIFFERENTMAP_ERROR),
4240 KERN_INVALID_ADDRESS /* arg */);
4241 kr = KERN_INVALID_ADDRESS;
4242 }
4243 vm_map_switch(oldmap);
4244 vm_map_deallocate(map);
4245 }
4246 return kr;
4247 }
4248
4249 kern_return_t
copyoutmap_atomic32(vm_map_t map,uint32_t value,vm_map_address_t toaddr)4250 copyoutmap_atomic32(
4251 vm_map_t map,
4252 uint32_t value,
4253 vm_map_address_t toaddr)
4254 {
4255 kern_return_t kr = KERN_SUCCESS;
4256 vm_map_t oldmap;
4257
4258 if (vm_map_pmap(map) == pmap_kernel()) {
4259 /* assume a correct toaddr */
4260 *(uint32_t *)toaddr = value;
4261 } else if (current_map() == map) {
4262 if (copyout_atomic32(value, toaddr) != 0) {
4263 kr = KERN_INVALID_ADDRESS;
4264 }
4265 } else {
4266 vm_map_reference(map);
4267 oldmap = vm_map_switch(map);
4268 if (copyout_atomic32(value, toaddr) != 0) {
4269 kr = KERN_INVALID_ADDRESS;
4270 }
4271 vm_map_switch(oldmap);
4272 vm_map_deallocate(map);
4273 }
4274 return kr;
4275 }
4276
4277 kern_return_t
copyoutmap_atomic64(vm_map_t map,uint64_t value,vm_map_address_t toaddr)4278 copyoutmap_atomic64(
4279 vm_map_t map,
4280 uint64_t value,
4281 vm_map_address_t toaddr)
4282 {
4283 kern_return_t kr = KERN_SUCCESS;
4284 vm_map_t oldmap;
4285
4286 if (vm_map_pmap(map) == pmap_kernel()) {
4287 /* assume a correct toaddr */
4288 *(uint64_t *)toaddr = value;
4289 } else if (current_map() == map) {
4290 if (copyout_atomic64(value, toaddr) != 0) {
4291 kr = KERN_INVALID_ADDRESS;
4292 }
4293 } else {
4294 vm_map_reference(map);
4295 oldmap = vm_map_switch(map);
4296 if (copyout_atomic64(value, toaddr) != 0) {
4297 kr = KERN_INVALID_ADDRESS;
4298 }
4299 vm_map_switch(oldmap);
4300 vm_map_deallocate(map);
4301 }
4302 return kr;
4303 }
4304
4305
4306 #pragma mark pointer obfuscation / packing
4307
4308 /*
4309 *
4310 * The following two functions are to be used when exposing kernel
4311 * addresses to userspace via any of the various debug or info
4312 * facilities that exist. These are basically the same as VM_KERNEL_ADDRPERM()
4313 * and VM_KERNEL_UNSLIDE_OR_PERM() except they use a different random seed and
4314 * are exported to KEXTs.
4315 *
4316 * NOTE: USE THE MACRO VERSIONS OF THESE FUNCTIONS (in vm_param.h) FROM WITHIN THE KERNEL
4317 */
4318
4319 vm_offset_t
vm_kernel_addrhash_internal(vm_offset_t addr,uint64_t salt)4320 vm_kernel_addrhash_internal(vm_offset_t addr, uint64_t salt)
4321 {
4322 assert(salt != 0);
4323
4324 if (addr == 0) {
4325 return 0ul;
4326 }
4327
4328 if (VM_KERNEL_IS_SLID(addr)) {
4329 return VM_KERNEL_UNSLIDE(addr);
4330 }
4331
4332 vm_offset_t sha_digest[SHA256_DIGEST_LENGTH / sizeof(vm_offset_t)];
4333 SHA256_CTX sha_ctx;
4334
4335 SHA256_Init(&sha_ctx);
4336 SHA256_Update(&sha_ctx, &salt, sizeof(salt));
4337 SHA256_Update(&sha_ctx, &addr, sizeof(addr));
4338 SHA256_Final(sha_digest, &sha_ctx);
4339
4340 return sha_digest[0];
4341 }
4342
4343 __exported vm_offset_t
4344 vm_kernel_addrhash_external(vm_offset_t addr);
4345 vm_offset_t
vm_kernel_addrhash_external(vm_offset_t addr)4346 vm_kernel_addrhash_external(vm_offset_t addr)
4347 {
4348 return vm_kernel_addrhash_internal(addr, vm_kernel_addrhash_salt_ext);
4349 }
4350
4351 void
vm_kernel_addrhide(vm_offset_t addr,vm_offset_t * hide_addr)4352 vm_kernel_addrhide(
4353 vm_offset_t addr,
4354 vm_offset_t *hide_addr)
4355 {
4356 *hide_addr = VM_KERNEL_ADDRHIDE(addr);
4357 }
4358
4359 void
vm_kernel_addrperm_external(vm_offset_t addr,vm_offset_t * perm_addr)4360 vm_kernel_addrperm_external(
4361 vm_offset_t addr,
4362 vm_offset_t *perm_addr)
4363 {
4364 if (VM_KERNEL_IS_SLID(addr)) {
4365 *perm_addr = VM_KERNEL_UNSLIDE(addr);
4366 } else if (VM_KERNEL_ADDRESS(addr)) {
4367 *perm_addr = addr + vm_kernel_addrperm_ext;
4368 } else {
4369 *perm_addr = addr;
4370 }
4371 }
4372
4373 void
vm_kernel_unslide_or_perm_external(vm_offset_t addr,vm_offset_t * up_addr)4374 vm_kernel_unslide_or_perm_external(
4375 vm_offset_t addr,
4376 vm_offset_t *up_addr)
4377 {
4378 vm_kernel_addrperm_external(addr, up_addr);
4379 }
4380
4381 void
vm_packing_pointer_invalid(vm_offset_t ptr,vm_packing_params_t params)4382 vm_packing_pointer_invalid(vm_offset_t ptr, vm_packing_params_t params)
4383 {
4384 if (ptr & ((1ul << params.vmpp_shift) - 1)) {
4385 panic("pointer %p can't be packed: low %d bits aren't 0",
4386 (void *)ptr, params.vmpp_shift);
4387 } else if (ptr <= params.vmpp_base) {
4388 panic("pointer %p can't be packed: below base %p",
4389 (void *)ptr, (void *)params.vmpp_base);
4390 } else {
4391 panic("pointer %p can't be packed: maximum encodable pointer is %p",
4392 (void *)ptr, (void *)vm_packing_max_packable(params));
4393 }
4394 }
4395
4396 void
vm_packing_verify_range(const char * subsystem,vm_offset_t min_address,vm_offset_t max_address,vm_packing_params_t params)4397 vm_packing_verify_range(
4398 const char *subsystem,
4399 vm_offset_t min_address,
4400 vm_offset_t max_address,
4401 vm_packing_params_t params)
4402 {
4403 if (min_address > max_address) {
4404 panic("%s: %s range invalid min:%p > max:%p",
4405 __func__, subsystem, (void *)min_address, (void *)max_address);
4406 }
4407
4408 if (!params.vmpp_base_relative) {
4409 return;
4410 }
4411
4412 if (min_address <= params.vmpp_base) {
4413 panic("%s: %s range invalid min:%p <= base:%p",
4414 __func__, subsystem, (void *)min_address, (void *)params.vmpp_base);
4415 }
4416
4417 if (max_address > vm_packing_max_packable(params)) {
4418 panic("%s: %s range invalid max:%p >= max packable:%p",
4419 __func__, subsystem, (void *)max_address,
4420 (void *)vm_packing_max_packable(params));
4421 }
4422 }
4423
4424 #pragma mark tests
4425 #if MACH_ASSERT
4426 #include <sys/errno.h>
4427
4428 static void
4429 kmem_test_for_entry(
4430 vm_map_t map,
4431 vm_offset_t addr,
4432 void (^block)(vm_map_entry_t))
4433 {
4434 vm_map_entry_t entry;
4435
4436 vm_map_lock(map);
4437 block(vm_map_lookup_entry(map, addr, &entry) ? entry : NULL);
4438 vm_map_unlock(map);
4439 }
4440
4441 #define kmem_test_assert_map(map, pg, entries) ({ \
4442 assert3u((map)->size, ==, ptoa(pg)); \
4443 assert3u((map)->hdr.nentries, ==, entries); \
4444 })
4445
4446 static bool
can_write_at(vm_offset_t offs,uint32_t page)4447 can_write_at(vm_offset_t offs, uint32_t page)
4448 {
4449 static const int zero;
4450
4451 return verify_write(&zero, (void *)(offs + ptoa(page) + 128), 1) == 0;
4452 }
4453 #define assert_writeable(offs, page) \
4454 assertf(can_write_at(offs, page), \
4455 "can write at %p + ptoa(%d)", (void *)offs, page)
4456
4457 #define assert_faults(offs, page) \
4458 assertf(!can_write_at(offs, page), \
4459 "can write at %p + ptoa(%d)", (void *)offs, page)
4460
4461 #define peek(offs, page) \
4462 (*(uint32_t *)((offs) + ptoa(page)))
4463
4464 #define poke(offs, page, v) \
4465 (*(uint32_t *)((offs) + ptoa(page)) = (v))
4466
4467 __attribute__((noinline))
4468 static void
kmem_alloc_basic_test(vm_map_t map)4469 kmem_alloc_basic_test(vm_map_t map)
4470 {
4471 kmem_guard_t guard = {
4472 .kmg_tag = VM_KERN_MEMORY_DIAG,
4473 };
4474 vm_offset_t addr;
4475
4476 /*
4477 * Test wired basics:
4478 * - KMA_KOBJECT
4479 * - KMA_GUARD_FIRST, KMA_GUARD_LAST
4480 * - allocation alignment
4481 */
4482 addr = kmem_alloc_guard(map, ptoa(10), ptoa(2) - 1,
4483 KMA_KOBJECT | KMA_GUARD_FIRST | KMA_GUARD_LAST, guard).kmr_address;
4484 assertf(addr != 0ull, "kma(%p, 10p, 0, KO | GF | GL)", map);
4485 assert3u((addr + PAGE_SIZE) % ptoa(2), ==, 0);
4486 kmem_test_assert_map(map, 10, 1);
4487
4488 kmem_test_for_entry(map, addr, ^(__assert_only vm_map_entry_t e){
4489 assertf(e, "unable to find address %p in map %p", (void *)addr, map);
4490 assert(e->vme_kernel_object);
4491 assert(!e->vme_atomic);
4492 assert3u(e->vme_start, <=, addr);
4493 assert3u(addr + ptoa(10), <=, e->vme_end);
4494 });
4495
4496 assert_faults(addr, 0);
4497 for (int i = 1; i < 9; i++) {
4498 assert_writeable(addr, i);
4499 }
4500 assert_faults(addr, 9);
4501
4502 kmem_free(map, addr, ptoa(10));
4503 kmem_test_assert_map(map, 0, 0);
4504
4505 /*
4506 * Test pageable basics.
4507 */
4508 addr = kmem_alloc_guard(map, ptoa(10), 0,
4509 KMA_PAGEABLE, guard).kmr_address;
4510 assertf(addr != 0ull, "kma(%p, 10p, 0, KO | PG)", map);
4511 kmem_test_assert_map(map, 10, 1);
4512
4513 for (int i = 0; i < 9; i++) {
4514 assert_faults(addr, i);
4515 poke(addr, i, 42);
4516 assert_writeable(addr, i);
4517 }
4518
4519 kmem_free(map, addr, ptoa(10));
4520 kmem_test_assert_map(map, 0, 0);
4521 }
4522
4523 __attribute__((noinline))
4524 static void
kmem_realloc_basic_test(vm_map_t map,kmr_flags_t kind)4525 kmem_realloc_basic_test(vm_map_t map, kmr_flags_t kind)
4526 {
4527 kmem_guard_t guard = {
4528 .kmg_atomic = !(kind & KMR_DATA),
4529 .kmg_tag = VM_KERN_MEMORY_DIAG,
4530 .kmg_context = 0xefface,
4531 };
4532 vm_offset_t addr, newaddr;
4533 const int N = 10;
4534
4535 /*
4536 * This isn't something kmem_realloc_guard() _needs_ to do,
4537 * we could conceive an implementation where it grows in place
4538 * if there's space after it.
4539 *
4540 * However, this is what the implementation does today.
4541 */
4542 bool realloc_growth_changes_address = true;
4543 bool GL = (kind & KMR_GUARD_LAST);
4544
4545 /*
4546 * Initial N page allocation
4547 */
4548 addr = kmem_alloc_guard(map, ptoa(N), 0,
4549 (kind & (KMA_KOBJECT | KMA_GUARD_LAST | KMA_DATA)) | KMA_ZERO,
4550 guard).kmr_address;
4551 assert3u(addr, !=, 0);
4552 kmem_test_assert_map(map, N, 1);
4553 for (int pg = 0; pg < N - GL; pg++) {
4554 poke(addr, pg, 42 + pg);
4555 }
4556 for (int pg = N - GL; pg < N; pg++) {
4557 assert_faults(addr, pg);
4558 }
4559
4560
4561 /*
4562 * Grow to N + 3 pages
4563 */
4564 newaddr = kmem_realloc_guard(map, addr, ptoa(N), ptoa(N + 3),
4565 kind | KMR_ZERO, guard).kmr_address;
4566 assert3u(newaddr, !=, 0);
4567 if (realloc_growth_changes_address) {
4568 assert3u(addr, !=, newaddr);
4569 }
4570 if ((kind & KMR_FREEOLD) || (addr == newaddr)) {
4571 kmem_test_assert_map(map, N + 3, 1);
4572 } else {
4573 kmem_test_assert_map(map, 2 * N + 3, 2);
4574 }
4575 for (int pg = 0; pg < N - GL; pg++) {
4576 assert3u(peek(newaddr, pg), ==, 42 + pg);
4577 }
4578 if ((kind & KMR_FREEOLD) == 0) {
4579 for (int pg = 0; pg < N - GL; pg++) {
4580 assert3u(peek(addr, pg), ==, 42 + pg);
4581 }
4582 /* check for tru-share */
4583 poke(addr + 16, 0, 1234);
4584 assert3u(peek(newaddr + 16, 0), ==, 1234);
4585 kmem_free_guard(map, addr, ptoa(N), KMF_NONE, guard);
4586 kmem_test_assert_map(map, N + 3, 1);
4587 }
4588 if (addr != newaddr) {
4589 for (int pg = 0; pg < N - GL; pg++) {
4590 assert_faults(addr, pg);
4591 }
4592 }
4593 for (int pg = N - GL; pg < N + 3 - GL; pg++) {
4594 assert3u(peek(newaddr, pg), ==, 0);
4595 }
4596 for (int pg = N + 3 - GL; pg < N + 3; pg++) {
4597 assert_faults(newaddr, pg);
4598 }
4599 addr = newaddr;
4600
4601
4602 /*
4603 * Shrink to N - 2 pages
4604 */
4605 newaddr = kmem_realloc_guard(map, addr, ptoa(N + 3), ptoa(N - 2),
4606 kind | KMR_ZERO, guard).kmr_address;
4607 assert3u(map->size, ==, ptoa(N - 2));
4608 assert3u(newaddr, ==, addr);
4609 kmem_test_assert_map(map, N - 2, 1);
4610
4611 for (int pg = 0; pg < N - 2 - GL; pg++) {
4612 assert3u(peek(addr, pg), ==, 42 + pg);
4613 }
4614 for (int pg = N - 2 - GL; pg < N + 3; pg++) {
4615 assert_faults(addr, pg);
4616 }
4617
4618 kmem_free_guard(map, addr, ptoa(N - 2), KMF_NONE, guard);
4619 kmem_test_assert_map(map, 0, 0);
4620 }
4621
4622 static int
kmem_basic_test(__unused int64_t in,int64_t * out)4623 kmem_basic_test(__unused int64_t in, int64_t *out)
4624 {
4625 mach_vm_offset_t addr;
4626 vm_map_t map;
4627
4628 printf("%s: test running\n", __func__);
4629
4630 map = kmem_suballoc(kernel_map, &addr, 64U << 20,
4631 VM_MAP_CREATE_DEFAULT, VM_FLAGS_ANYWHERE,
4632 KMS_NOFAIL | KMS_DATA, VM_KERN_MEMORY_DIAG).kmr_submap;
4633
4634 printf("%s: kmem_alloc ...\n", __func__);
4635 kmem_alloc_basic_test(map);
4636 printf("%s: PASS\n", __func__);
4637
4638 printf("%s: kmem_realloc (KMR_KOBJECT | KMR_FREEOLD) ...\n", __func__);
4639 kmem_realloc_basic_test(map, KMR_KOBJECT | KMR_FREEOLD);
4640 printf("%s: PASS\n", __func__);
4641
4642 printf("%s: kmem_realloc (KMR_FREEOLD) ...\n", __func__);
4643 kmem_realloc_basic_test(map, KMR_FREEOLD);
4644 printf("%s: PASS\n", __func__);
4645
4646 printf("%s: kmem_realloc (KMR_KOBJECT | KMR_FREEOLD | KMR_GUARD_FIRST) ...\n", __func__);
4647 kmem_realloc_basic_test(map, KMR_KOBJECT | KMR_FREEOLD | KMR_GUARD_FIRST);
4648 printf("%s: PASS\n", __func__);
4649
4650 printf("%s: kmem_realloc (KMR_KOBJECT | KMR_FREEOLD | KMR_GUARD_LAST) ...\n", __func__);
4651 kmem_realloc_basic_test(map, KMR_KOBJECT | KMR_FREEOLD | KMR_GUARD_LAST);
4652 printf("%s: PASS\n", __func__);
4653
4654 printf("%s: kmem_realloc (KMR_KOBJECT | KMR_FREEOLD | KMR_GUARD_FIRST | KMR_GUARD_LAST) ...\n", __func__);
4655 kmem_realloc_basic_test(map, KMR_KOBJECT | KMR_FREEOLD | KMR_GUARD_FIRST | KMR_GUARD_LAST);
4656 printf("%s: PASS\n", __func__);
4657
4658 printf("%s: kmem_realloc (KMR_FREEOLD | KMR_GUARD_FIRST) ...\n", __func__);
4659 kmem_realloc_basic_test(map, KMR_FREEOLD | KMR_GUARD_FIRST);
4660 printf("%s: PASS\n", __func__);
4661
4662 printf("%s: kmem_realloc (KMR_FREEOLD | KMR_GUARD_LAST) ...\n", __func__);
4663 kmem_realloc_basic_test(map, KMR_FREEOLD | KMR_GUARD_LAST);
4664 printf("%s: PASS\n", __func__);
4665
4666 printf("%s: kmem_realloc (KMR_FREEOLD | KMR_GUARD_FIRST | KMR_GUARD_LAST) ...\n", __func__);
4667 kmem_realloc_basic_test(map, KMR_FREEOLD | KMR_GUARD_FIRST | KMR_GUARD_LAST);
4668 printf("%s: PASS\n", __func__);
4669
4670 /* using KMR_DATA signals to test the non atomic realloc path */
4671 printf("%s: kmem_realloc (KMR_DATA | KMR_FREEOLD) ...\n", __func__);
4672 kmem_realloc_basic_test(map, KMR_DATA | KMR_FREEOLD);
4673 printf("%s: PASS\n", __func__);
4674
4675 printf("%s: kmem_realloc (KMR_DATA) ...\n", __func__);
4676 kmem_realloc_basic_test(map, KMR_DATA);
4677 printf("%s: PASS\n", __func__);
4678
4679 kmem_free_guard(kernel_map, addr, 64U << 20, KMF_NONE, KMEM_GUARD_SUBMAP);
4680 vm_map_deallocate(map);
4681
4682 printf("%s: test passed\n", __func__);
4683 *out = 1;
4684 return 0;
4685 }
4686 SYSCTL_TEST_REGISTER(kmem_basic, kmem_basic_test);
4687
4688 static void
kmem_test_get_size_idx_for_chunks(uint32_t chunks)4689 kmem_test_get_size_idx_for_chunks(uint32_t chunks)
4690 {
4691 __assert_only uint32_t idx = kmem_get_size_idx_for_chunks(chunks);
4692
4693 assert(chunks >= kmem_size_array[idx].ks_num_chunk);
4694 }
4695
4696 __attribute__((noinline))
4697 static void
kmem_test_get_size_idx_for_all_chunks()4698 kmem_test_get_size_idx_for_all_chunks()
4699 {
4700 for (uint32_t i = 0; i < KMEM_NUM_SIZECLASS; i++) {
4701 uint32_t chunks = kmem_size_array[i].ks_num_chunk;
4702
4703 if (chunks != 1) {
4704 kmem_test_get_size_idx_for_chunks(chunks - 1);
4705 }
4706 kmem_test_get_size_idx_for_chunks(chunks);
4707 kmem_test_get_size_idx_for_chunks(chunks + 1);
4708 }
4709 }
4710
4711 static int
kmem_guard_obj_test(__unused int64_t in,int64_t * out)4712 kmem_guard_obj_test(__unused int64_t in, int64_t *out)
4713 {
4714 printf("%s: test running\n", __func__);
4715
4716 printf("%s: kmem_get_size_idx_for_chunks\n", __func__);
4717 kmem_test_get_size_idx_for_all_chunks();
4718 printf("%s: PASS\n", __func__);
4719
4720 printf("%s: test passed\n", __func__);
4721 *out = 1;
4722 return 0;
4723 }
4724 SYSCTL_TEST_REGISTER(kmem_guard_obj, kmem_guard_obj_test);
4725 #endif /* MACH_ASSERT */
4726