xref: /xnu-12377.61.12/osfmk/vm/vm_tests.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach_assert.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_vm.h>
33 #include <mach/memory_object.h>
34 #include <mach/vm_map.h>
35 #include <mach/vm_statistics.h>
36 #include <mach/vm32_map_server.h>
37 #include <mach/mach_host.h>
38 #include <mach/host_priv.h>
39 #include <mach/upl.h>
40 
41 #include <kern/ledger.h>
42 #include <kern/host.h>
43 
44 #include <device/device_port.h>
45 #include <vm/memory_object_internal.h>
46 #include <vm/vm_fault.h>
47 #include <vm/vm_fault_internal.h>
48 #include <vm/vm_map_internal.h>
49 #include <vm/vm_object_internal.h>
50 #include <vm/vm_pageout_internal.h>
51 #include <vm/vm_protos.h>
52 #include <vm/vm_memtag.h>
53 #include <vm/vm_memory_entry_xnu.h>
54 #include <vm/vm_kern_xnu.h>
55 #include <vm/vm_iokit.h>
56 #include <vm/vm_page_internal.h>
57 #include <vm/vm_shared_region_xnu.h>
58 #include <vm/vm_far.h>
59 #include <vm/vm_upl.h>
60 
61 #include <kern/zalloc.h>
62 #include <kern/zalloc_internal.h>
63 
64 #include <sys/code_signing.h>
65 #include <sys/errno.h> /* for the sysctl tests */
66 
67 #include <tests/xnupost.h> /* for testing-related functions and macros */
68 
69 #if HAS_MTE
70 #include <arm_acle.h>
71 #endif /* HAS_MTE */
72 
73 extern ledger_template_t        task_ledger_template;
74 
75 extern kern_return_t
76 vm_map_copy_adjust_to_target(
77 	vm_map_copy_t           copy_map,
78 	vm_map_offset_t         offset,
79 	vm_map_size_t           size,
80 	vm_map_t                target_map,
81 	boolean_t               copy,
82 	vm_map_copy_t           *target_copy_map_p,
83 	vm_map_offset_t         *overmap_start_p,
84 	vm_map_offset_t         *overmap_end_p,
85 	vm_map_offset_t         *trimmed_start_p);
86 
87 #define VM_TEST_COLLAPSE_COMPRESSOR             0
88 #define VM_TEST_WIRE_AND_EXTRACT                0
89 #define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC        0
90 #if __arm64__
91 #define VM_TEST_KERNEL_OBJECT_FAULT             0
92 #endif /* __arm64__ */
93 #define VM_TEST_DEVICE_PAGER_TRANSPOSE          (DEVELOPMENT || DEBUG)
94 
95 #if VM_TEST_COLLAPSE_COMPRESSOR
96 extern boolean_t vm_object_collapse_compressor_allowed;
97 #include <IOKit/IOLib.h>
98 static void
vm_test_collapse_compressor(void)99 vm_test_collapse_compressor(void)
100 {
101 	vm_object_size_t        backing_size, top_size;
102 	vm_object_t             backing_object, top_object;
103 	vm_map_offset_t         backing_offset, top_offset;
104 	unsigned char           *backing_address, *top_address;
105 	kern_return_t           kr;
106 
107 	printf("VM_TEST_COLLAPSE_COMPRESSOR:\n");
108 
109 	/* create backing object */
110 	backing_size = 15 * PAGE_SIZE;
111 	backing_object = vm_object_allocate(backing_size, kernel_map->serial_id);
112 	assert(backing_object != VM_OBJECT_NULL);
113 	printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
114 	    backing_object);
115 	/* map backing object */
116 	backing_offset = 0;
117 	kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0,
118 	    VM_MAP_KERNEL_FLAGS_DATA_SHARED_ANYWHERE(),
119 	    backing_object, 0, FALSE,
120 	    VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
121 	assert(kr == KERN_SUCCESS);
122 	backing_address = (unsigned char *) backing_offset;
123 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
124 	    "mapped backing object %p at 0x%llx\n",
125 	    backing_object, (uint64_t) backing_offset);
126 	/* populate with pages to be compressed in backing object */
127 	backing_address[0x1 * PAGE_SIZE] = 0xB1;
128 	backing_address[0x4 * PAGE_SIZE] = 0xB4;
129 	backing_address[0x7 * PAGE_SIZE] = 0xB7;
130 	backing_address[0xa * PAGE_SIZE] = 0xBA;
131 	backing_address[0xd * PAGE_SIZE] = 0xBD;
132 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
133 	    "populated pages to be compressed in "
134 	    "backing_object %p\n", backing_object);
135 	/* compress backing object */
136 	vm_object_pageout(backing_object);
137 	printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
138 	    backing_object);
139 	/* wait for all the pages to be gone */
140 	while (*(volatile int *)&backing_object->resident_page_count != 0) {
141 		IODelay(10);
142 	}
143 	printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
144 	    backing_object);
145 	/* populate with pages to be resident in backing object */
146 	backing_address[0x0 * PAGE_SIZE] = 0xB0;
147 	backing_address[0x3 * PAGE_SIZE] = 0xB3;
148 	backing_address[0x6 * PAGE_SIZE] = 0xB6;
149 	backing_address[0x9 * PAGE_SIZE] = 0xB9;
150 	backing_address[0xc * PAGE_SIZE] = 0xBC;
151 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
152 	    "populated pages to be resident in "
153 	    "backing_object %p\n", backing_object);
154 	/* leave the other pages absent */
155 	/* mess with the paging_offset of the backing_object */
156 	assert(backing_object->paging_offset == 0);
157 	backing_object->paging_offset = 3 * PAGE_SIZE;
158 
159 	/* create top object */
160 	top_size = 9 * PAGE_SIZE;
161 	top_object = vm_object_allocate(top_size, backing_object->vmo_provenance);
162 	assert(top_object != VM_OBJECT_NULL);
163 	printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
164 	    top_object);
165 	/* map top object */
166 	top_offset = 0;
167 	kr = vm_map_enter(kernel_map, &top_offset, top_size, 0,
168 	    VM_MAP_KERNEL_FLAGS_DATA_SHARED_ANYWHERE(),
169 	    top_object, 0, FALSE,
170 	    VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
171 	assert(kr == KERN_SUCCESS);
172 	top_address = (unsigned char *) top_offset;
173 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
174 	    "mapped top object %p at 0x%llx\n",
175 	    top_object, (uint64_t) top_offset);
176 	/* populate with pages to be compressed in top object */
177 	top_address[0x3 * PAGE_SIZE] = 0xA3;
178 	top_address[0x4 * PAGE_SIZE] = 0xA4;
179 	top_address[0x5 * PAGE_SIZE] = 0xA5;
180 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
181 	    "populated pages to be compressed in "
182 	    "top_object %p\n", top_object);
183 	/* compress top object */
184 	vm_object_pageout(top_object);
185 	printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
186 	    top_object);
187 	/* wait for all the pages to be gone */
188 	while (top_object->resident_page_count != 0) {
189 		IODelay(10);
190 	}
191 	printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
192 	    top_object);
193 	/* populate with pages to be resident in top object */
194 	top_address[0x0 * PAGE_SIZE] = 0xA0;
195 	top_address[0x1 * PAGE_SIZE] = 0xA1;
196 	top_address[0x2 * PAGE_SIZE] = 0xA2;
197 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
198 	    "populated pages to be resident in "
199 	    "top_object %p\n", top_object);
200 	/* leave the other pages absent */
201 
202 	/* link the 2 objects */
203 	vm_object_reference(backing_object);
204 	top_object->shadow = backing_object;
205 	top_object->vo_shadow_offset = 3 * PAGE_SIZE;
206 	printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
207 	    top_object, backing_object);
208 
209 	/* unmap backing object */
210 	vm_map_remove(kernel_map,
211 	    backing_offset,
212 	    backing_offset + backing_size,
213 	    VM_MAP_REMOVE_NO_FLAGS);
214 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
215 	    "unmapped backing_object %p [0x%llx:0x%llx]\n",
216 	    backing_object,
217 	    (uint64_t) backing_offset,
218 	    (uint64_t) (backing_offset + backing_size));
219 
220 	/* collapse */
221 	printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object);
222 	vm_object_lock(top_object);
223 	vm_object_collapse(top_object, 0, FALSE);
224 	vm_object_unlock(top_object);
225 	printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object);
226 
227 	/* did it work? */
228 	if (top_object->shadow != VM_OBJECT_NULL) {
229 		printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
230 		printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
231 		if (vm_object_collapse_compressor_allowed) {
232 			panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL");
233 		}
234 	} else {
235 		/* check the contents of the mapping */
236 		unsigned char expect[9] =
237 		{ 0xA0, 0xA1, 0xA2,             /* resident in top */
238 		  0xA3, 0xA4, 0xA5,             /* compressed in top */
239 		  0xB9,         /* resident in backing + shadow_offset */
240 		  0xBD,         /* compressed in backing + shadow_offset + paging_offset */
241 		  0x00 };                       /* absent in both */
242 		unsigned char actual[9];
243 		unsigned int i, errors;
244 
245 		errors = 0;
246 		for (i = 0; i < sizeof(actual); i++) {
247 			actual[i] = (unsigned char) top_address[i * PAGE_SIZE];
248 			if (actual[i] != expect[i]) {
249 				errors++;
250 			}
251 		}
252 		printf("VM_TEST_COLLAPSE_COMPRESSOR: "
253 		    "actual [%x %x %x %x %x %x %x %x %x] "
254 		    "expect [%x %x %x %x %x %x %x %x %x] "
255 		    "%d errors\n",
256 		    actual[0], actual[1], actual[2], actual[3],
257 		    actual[4], actual[5], actual[6], actual[7],
258 		    actual[8],
259 		    expect[0], expect[1], expect[2], expect[3],
260 		    expect[4], expect[5], expect[6], expect[7],
261 		    expect[8],
262 		    errors);
263 		if (errors) {
264 			panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL");
265 		} else {
266 			printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n");
267 		}
268 	}
269 }
270 #else /* VM_TEST_COLLAPSE_COMPRESSOR */
271 #define vm_test_collapse_compressor()
272 #endif /* VM_TEST_COLLAPSE_COMPRESSOR */
273 
274 #if VM_TEST_WIRE_AND_EXTRACT
275 extern ppnum_t vm_map_get_phys_page(vm_map_t map,
276     vm_offset_t offset);
277 static void
vm_test_wire_and_extract(void)278 vm_test_wire_and_extract(void)
279 {
280 	ledger_t                ledger;
281 	vm_map_t                user_map, wire_map;
282 	mach_vm_address_t       user_addr, wire_addr;
283 	mach_vm_size_t          user_size, wire_size;
284 	mach_vm_offset_t        cur_offset;
285 	vm_prot_t               cur_prot, max_prot;
286 	ppnum_t                 user_ppnum, wire_ppnum;
287 	kern_return_t           kr;
288 
289 	ledger = ledger_instantiate(task_ledger_template,
290 	    LEDGER_CREATE_ACTIVE_ENTRIES);
291 	pmap_t user_pmap = pmap_create_options(ledger, 0, PMAP_CREATE_64BIT);
292 	assert(user_pmap);
293 	user_map = vm_map_create_options(user_pmap,
294 	    0x100000000ULL,
295 	    0x200000000ULL,
296 	    VM_MAP_CREATE_PAGEABLE);
297 	wire_map = vm_map_create_options(NULL,
298 	    0x100000000ULL,
299 	    0x200000000ULL,
300 	    VM_MAP_CREATE_PAGEABLE);
301 	user_addr = 0;
302 	user_size = 0x10000;
303 	kr = mach_vm_allocate(user_map,
304 	    &user_addr,
305 	    user_size,
306 	    VM_FLAGS_ANYWHERE);
307 	assert(kr == KERN_SUCCESS);
308 	wire_addr = 0;
309 	wire_size = user_size;
310 	kr = mach_vm_remap(wire_map,
311 	    &wire_addr,
312 	    wire_size,
313 	    0,
314 	    VM_FLAGS_ANYWHERE,
315 	    user_map,
316 	    user_addr,
317 	    FALSE,
318 	    &cur_prot,
319 	    &max_prot,
320 	    VM_INHERIT_NONE);
321 	assert(kr == KERN_SUCCESS);
322 	for (cur_offset = 0;
323 	    cur_offset < wire_size;
324 	    cur_offset += PAGE_SIZE) {
325 		kr = vm_map_wire_and_extract(wire_map,
326 		    wire_addr + cur_offset,
327 		    VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
328 		    TRUE,
329 		    &wire_ppnum);
330 		assert(kr == KERN_SUCCESS);
331 		user_ppnum = vm_map_get_phys_page(user_map,
332 		    user_addr + cur_offset);
333 		printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x "
334 		    "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
335 		    kr,
336 		    user_map, user_addr + cur_offset, user_ppnum,
337 		    wire_map, wire_addr + cur_offset, wire_ppnum);
338 		if (kr != KERN_SUCCESS ||
339 		    wire_ppnum == 0 ||
340 		    wire_ppnum != user_ppnum) {
341 			panic("VM_TEST_WIRE_AND_EXTRACT: FAIL");
342 		}
343 	}
344 	cur_offset -= PAGE_SIZE;
345 	kr = vm_map_wire_and_extract(wire_map,
346 	    wire_addr + cur_offset,
347 	    VM_PROT_DEFAULT,
348 	    TRUE,
349 	    &wire_ppnum);
350 	assert(kr == KERN_SUCCESS);
351 	printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
352 	    "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
353 	    kr,
354 	    user_map, user_addr + cur_offset, user_ppnum,
355 	    wire_map, wire_addr + cur_offset, wire_ppnum);
356 	if (kr != KERN_SUCCESS ||
357 	    wire_ppnum == 0 ||
358 	    wire_ppnum != user_ppnum) {
359 		panic("VM_TEST_WIRE_AND_EXTRACT: FAIL");
360 	}
361 
362 	printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n");
363 }
364 #else /* VM_TEST_WIRE_AND_EXTRACT */
365 #define vm_test_wire_and_extract()
366 #endif /* VM_TEST_WIRE_AND_EXTRACT */
367 
368 #if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC
369 static void
vm_test_page_wire_overflow_panic(void)370 vm_test_page_wire_overflow_panic(void)
371 {
372 	vm_object_t object;
373 	vm_page_t page;
374 
375 	printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n");
376 
377 	object = vm_object_allocate(PAGE_SIZE, VM_MAP_SERIAL_NONE);
378 	while ((page = vm_page_grab()) == VM_PAGE_NULL) {
379 		VM_PAGE_WAIT();
380 	}
381 	vm_object_lock(object);
382 	vm_page_insert(page, object, 0);
383 	vm_page_lock_queues();
384 	do {
385 		vm_page_wire(page, 1, FALSE);
386 	} while (page->wire_count != 0);
387 	vm_page_unlock_queues();
388 	vm_object_unlock(object);
389 	panic("FBDP(%p,%p): wire_count overflow not detected",
390 	    object, page);
391 }
392 #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
393 #define vm_test_page_wire_overflow_panic()
394 #endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
395 
396 #if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT
397 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
398 static void
vm_test_kernel_object_fault(void)399 vm_test_kernel_object_fault(void)
400 {
401 	vm_offset_t stack;
402 	uintptr_t frameb[2];
403 	int ret;
404 
405 	kmem_alloc(kernel_map, &stack,
406 	    kernel_stack_size + ptoa(2),
407 	    KMA_NOFAIL | KMA_KSTACK | KMA_KOBJECT |
408 	    KMA_GUARD_FIRST | KMA_GUARD_LAST,
409 	    VM_KERN_MEMORY_STACK);
410 
411 	ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE);
412 	if (ret != 0) {
413 		printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n");
414 	} else {
415 		printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n");
416 	}
417 
418 	kmem_free_guard(kernel_map, stack, kernel_stack_size + ptoa(2),
419 	    KMF_GUARD_FIRST | KMF_GUARD_LAST, KMEM_GUARD_NONE);
420 	stack = 0;
421 }
422 #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
423 #define vm_test_kernel_object_fault()
424 #endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
425 
426 #if VM_TEST_DEVICE_PAGER_TRANSPOSE
427 static void
vm_test_device_pager_transpose(void)428 vm_test_device_pager_transpose(void)
429 {
430 	memory_object_t device_pager;
431 	vm_object_t     anon_object, device_object;
432 	vm_size_t       size;
433 	vm_map_offset_t device_mapping;
434 	kern_return_t   kr;
435 
436 	size = 3 * PAGE_SIZE;
437 	anon_object = vm_object_allocate(size, kernel_map->serial_id);
438 	assert(anon_object != VM_OBJECT_NULL);
439 	device_pager = device_pager_setup(NULL, 0, size, 0);
440 	assert(device_pager != NULL);
441 	device_object = memory_object_to_vm_object(device_pager);
442 	assert(device_object != VM_OBJECT_NULL);
443 #if 0
444 	/*
445 	 * Can't actually map this, since another thread might do a
446 	 * vm_map_enter() that gets coalesced into this object, which
447 	 * would cause the test to fail.
448 	 */
449 	vm_map_offset_t anon_mapping = 0;
450 	kr = vm_map_enter(kernel_map, &anon_mapping, size, 0,
451 	    VM_MAP_KERNEL_FLAGS_ANYWHERE(),
452 	    anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
453 	    VM_INHERIT_DEFAULT);
454 	assert(kr == KERN_SUCCESS);
455 #endif
456 	device_mapping = 0;
457 	kr = mach_vm_map_kernel(kernel_map,
458 	    vm_sanitize_wrap_addr_ref(&device_mapping),
459 	    size,
460 	    0,
461 	    VM_MAP_KERNEL_FLAGS_DATA_SHARED_ANYWHERE(),
462 	    (void *)device_pager,
463 	    0,
464 	    FALSE,
465 	    VM_PROT_DEFAULT,
466 	    VM_PROT_ALL,
467 	    VM_INHERIT_DEFAULT);
468 	assert(kr == KERN_SUCCESS);
469 	memory_object_deallocate(device_pager);
470 
471 	vm_object_lock(anon_object);
472 	vm_object_activity_begin(anon_object);
473 	anon_object->blocked_access = TRUE;
474 	vm_object_unlock(anon_object);
475 	vm_object_lock(device_object);
476 	vm_object_activity_begin(device_object);
477 	device_object->blocked_access = TRUE;
478 	vm_object_unlock(device_object);
479 
480 	assert(os_ref_get_count_raw(&anon_object->ref_count) == 1);
481 	assert(!anon_object->named);
482 	assert(os_ref_get_count_raw(&device_object->ref_count) == 2);
483 	assert(device_object->named);
484 
485 	kr = vm_object_transpose(device_object, anon_object, size);
486 	assert(kr == KERN_SUCCESS);
487 
488 	vm_object_lock(anon_object);
489 	vm_object_activity_end(anon_object);
490 	anon_object->blocked_access = FALSE;
491 	vm_object_unlock(anon_object);
492 	vm_object_lock(device_object);
493 	vm_object_activity_end(device_object);
494 	device_object->blocked_access = FALSE;
495 	vm_object_unlock(device_object);
496 
497 	assert(os_ref_get_count_raw(&anon_object->ref_count) == 2);
498 	assert(anon_object->named);
499 #if 0
500 	kr = vm_deallocate(kernel_map, anon_mapping, size);
501 	assert(kr == KERN_SUCCESS);
502 #endif
503 	assert(os_ref_get_count_raw(&device_object->ref_count) == 1);
504 	assert(!device_object->named);
505 	kr = vm_deallocate(kernel_map, device_mapping, size);
506 	assert(kr == KERN_SUCCESS);
507 
508 	printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n");
509 }
510 #else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
511 #define vm_test_device_pager_transpose()
512 #endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
513 
514 extern kern_return_t vm_allocate_external(vm_map_t        map,
515     vm_offset_t     *addr,
516     vm_size_t       size,
517     int             flags);
518 extern kern_return_t vm_remap_external(vm_map_t                target_map,
519     vm_offset_t             *address,
520     vm_size_t               size,
521     vm_offset_t             mask,
522     int                     flags,
523     vm_map_t                src_map,
524     vm_offset_t             memory_address,
525     boolean_t               copy,
526     vm_prot_t               *cur_protection,
527     vm_prot_t               *max_protection,
528     vm_inherit_t            inheritance);
529 #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT
530 extern int debug4k_panic_on_misaligned_sharing;
531 void vm_test_4k(void);
532 void
vm_test_4k(void)533 vm_test_4k(void)
534 {
535 	pmap_t test_pmap;
536 	vm_map_t test_map;
537 	kern_return_t kr;
538 	vm_address_t expected_addr;
539 	vm_address_t alloc1_addr, alloc2_addr, alloc3_addr, alloc4_addr;
540 	vm_address_t alloc5_addr, dealloc_addr, remap_src_addr, remap_dst_addr;
541 	vm_size_t alloc1_size, alloc2_size, alloc3_size, alloc4_size;
542 	vm_size_t alloc5_size, remap_src_size;
543 	vm_address_t fault_addr;
544 	vm_prot_t cur_prot, max_prot;
545 	int saved_debug4k_panic_on_misaligned_sharing;
546 
547 	printf("\n\n\nVM_TEST_4K:%d creating 4K map...\n", __LINE__);
548 	test_pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT | PMAP_CREATE_FORCE_4K_PAGES);
549 	assert(test_pmap != NULL);
550 	test_map = vm_map_create_options(test_pmap,
551 	    MACH_VM_MIN_ADDRESS,
552 	    MACH_VM_MAX_ADDRESS,
553 	    VM_MAP_CREATE_PAGEABLE);
554 	assert(test_map != VM_MAP_NULL);
555 	vm_map_set_page_shift(test_map, FOURK_PAGE_SHIFT);
556 	printf("VM_TEST_4K:%d map %p pmap %p page_size 0x%x\n", __LINE__, test_map, test_pmap, VM_MAP_PAGE_SIZE(test_map));
557 
558 	alloc1_addr = 0;
559 	alloc1_size = 1 * FOURK_PAGE_SIZE;
560 	expected_addr = 0x1000;
561 	printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
562 	kr = vm_allocate_external(test_map,
563 	    &alloc1_addr,
564 	    alloc1_size,
565 	    VM_FLAGS_ANYWHERE);
566 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
567 	assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr);
568 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
569 	expected_addr += alloc1_size;
570 
571 	printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
572 	kr = vm_deallocate(test_map, alloc1_addr, alloc1_size);
573 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
574 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
575 
576 	alloc1_addr = 0;
577 	alloc1_size = 1 * FOURK_PAGE_SIZE;
578 	expected_addr = 0x1000;
579 	printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
580 	kr = vm_allocate_external(test_map,
581 	    &alloc1_addr,
582 	    alloc1_size,
583 	    VM_FLAGS_ANYWHERE);
584 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
585 	assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr);
586 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
587 	expected_addr += alloc1_size;
588 
589 	alloc2_addr = 0;
590 	alloc2_size = 3 * FOURK_PAGE_SIZE;
591 	printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc2_addr, alloc2_size);
592 	kr = vm_allocate_external(test_map,
593 	    &alloc2_addr,
594 	    alloc2_size,
595 	    VM_FLAGS_ANYWHERE);
596 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
597 	assertf(alloc2_addr == expected_addr, "alloc2_addr = 0x%lx expected 0x%lx", alloc2_addr, expected_addr);
598 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc2_addr);
599 	expected_addr += alloc2_size;
600 
601 	alloc3_addr = 0;
602 	alloc3_size = 18 * FOURK_PAGE_SIZE;
603 	printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc3_addr, alloc3_size);
604 	kr = vm_allocate_external(test_map,
605 	    &alloc3_addr,
606 	    alloc3_size,
607 	    VM_FLAGS_ANYWHERE);
608 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
609 	assertf(alloc3_addr == expected_addr, "alloc3_addr = 0x%lx expected 0x%lx\n", alloc3_addr, expected_addr);
610 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr);
611 	expected_addr += alloc3_size;
612 
613 	alloc4_addr = 0;
614 	alloc4_size = 1 * FOURK_PAGE_SIZE;
615 	printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc4_addr, alloc4_size);
616 	kr = vm_allocate_external(test_map,
617 	    &alloc4_addr,
618 	    alloc4_size,
619 	    VM_FLAGS_ANYWHERE);
620 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
621 	assertf(alloc4_addr == expected_addr, "alloc4_addr = 0x%lx expected 0x%lx", alloc4_addr, expected_addr);
622 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr);
623 	expected_addr += alloc4_size;
624 
625 	printf("VM_TEST_4K:%d vm_protect(%p, 0x%lx, 0x%lx, READ)...\n", __LINE__, test_map, alloc2_addr, (1UL * FOURK_PAGE_SIZE));
626 	kr = vm_protect(test_map,
627 	    alloc2_addr,
628 	    (1UL * FOURK_PAGE_SIZE),
629 	    FALSE,
630 	    VM_PROT_READ);
631 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
632 
633 	for (fault_addr = alloc1_addr;
634 	    fault_addr < alloc4_addr + alloc4_size + (2 * FOURK_PAGE_SIZE);
635 	    fault_addr += FOURK_PAGE_SIZE) {
636 		printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr);
637 		kr = vm_fault(test_map,
638 		    fault_addr,
639 		    VM_PROT_WRITE,
640 		    FALSE,
641 		    VM_KERN_MEMORY_NONE,
642 		    THREAD_UNINT,
643 		    NULL,
644 		    0);
645 		printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
646 		if (fault_addr == alloc2_addr) {
647 			assertf(kr == KERN_PROTECTION_FAILURE, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_PROTECTION_FAILURE);
648 			printf("VM_TEST_4K:%d read fault at 0x%lx...\n", __LINE__, fault_addr);
649 			kr = vm_fault(test_map,
650 			    fault_addr,
651 			    VM_PROT_READ,
652 			    FALSE,
653 			    VM_KERN_MEMORY_NONE,
654 			    THREAD_UNINT,
655 			    NULL,
656 			    0);
657 			assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS);
658 			printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
659 		} else if (fault_addr >= alloc4_addr + alloc4_size) {
660 			assertf(kr == KERN_INVALID_ADDRESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_INVALID_ADDRESS);
661 		} else {
662 			assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS);
663 		}
664 	}
665 
666 	alloc5_addr = 0;
667 	alloc5_size = 7 * FOURK_PAGE_SIZE;
668 	printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc5_addr, alloc5_size);
669 	kr = vm_allocate_external(test_map,
670 	    &alloc5_addr,
671 	    alloc5_size,
672 	    VM_FLAGS_ANYWHERE);
673 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
674 	assertf(alloc5_addr == expected_addr, "alloc5_addr = 0x%lx expected 0x%lx", alloc5_addr, expected_addr);
675 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc5_addr);
676 	expected_addr += alloc5_size;
677 
678 	dealloc_addr = vm_map_round_page(alloc5_addr, PAGE_SHIFT);
679 	dealloc_addr += FOURK_PAGE_SIZE;
680 	printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%x)...\n", __LINE__, test_map, dealloc_addr, FOURK_PAGE_SIZE);
681 	kr = vm_deallocate(test_map, dealloc_addr, FOURK_PAGE_SIZE);
682 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
683 	printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
684 
685 	remap_src_addr = vm_map_round_page(alloc3_addr, PAGE_SHIFT);
686 	remap_src_addr += FOURK_PAGE_SIZE;
687 	remap_src_size = 2 * FOURK_PAGE_SIZE;
688 	remap_dst_addr = 0;
689 	printf("VM_TEST_4K:%d vm_remap(%p, 0x%lx, 0x%lx, 0x%lx, copy=0)...\n", __LINE__, test_map, remap_dst_addr, remap_src_size, remap_src_addr);
690 	kr = vm_remap_external(test_map,
691 	    &remap_dst_addr,
692 	    remap_src_size,
693 	    0,                    /* mask */
694 	    VM_FLAGS_ANYWHERE,
695 	    test_map,
696 	    remap_src_addr,
697 	    FALSE,                    /* copy */
698 	    &cur_prot,
699 	    &max_prot,
700 	    VM_INHERIT_DEFAULT);
701 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
702 	assertf(remap_dst_addr == expected_addr, "remap_dst_addr = 0x%lx expected 0x%lx", remap_dst_addr, expected_addr);
703 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, remap_dst_addr);
704 	expected_addr += remap_src_size;
705 
706 	for (fault_addr = remap_dst_addr;
707 	    fault_addr < remap_dst_addr + remap_src_size;
708 	    fault_addr += 4096) {
709 		printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr);
710 		kr = vm_fault(test_map,
711 		    fault_addr,
712 		    VM_PROT_WRITE,
713 		    FALSE,
714 		    VM_KERN_MEMORY_NONE,
715 		    THREAD_UNINT,
716 		    NULL,
717 		    0);
718 		assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
719 		printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
720 	}
721 
722 	printf("VM_TEST_4K:\n");
723 	remap_dst_addr = 0;
724 	remap_src_addr = alloc3_addr + 0xc000;
725 	remap_src_size = 0x5000;
726 	printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
727 	kr = vm_remap_external(kernel_map,
728 	    &remap_dst_addr,
729 	    remap_src_size,
730 	    0,                    /* mask */
731 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
732 	    test_map,
733 	    remap_src_addr,
734 	    FALSE,                    /* copy */
735 	    &cur_prot,
736 	    &max_prot,
737 	    VM_INHERIT_DEFAULT);
738 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
739 	printf("VM_TEST_4K: -> remapped (shared) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr);
740 
741 	printf("VM_TEST_4K:\n");
742 	remap_dst_addr = 0;
743 	remap_src_addr = alloc3_addr + 0xc000;
744 	remap_src_size = 0x5000;
745 	printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
746 	kr = vm_remap_external(kernel_map,
747 	    &remap_dst_addr,
748 	    remap_src_size,
749 	    0,                    /* mask */
750 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
751 	    test_map,
752 	    remap_src_addr,
753 	    TRUE,                    /* copy */
754 	    &cur_prot,
755 	    &max_prot,
756 	    VM_INHERIT_DEFAULT);
757 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
758 	printf("VM_TEST_4K: -> remapped (COW) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr);
759 
760 	printf("VM_TEST_4K:\n");
761 	saved_debug4k_panic_on_misaligned_sharing = debug4k_panic_on_misaligned_sharing;
762 	debug4k_panic_on_misaligned_sharing = 0;
763 	remap_dst_addr = 0;
764 	remap_src_addr = alloc1_addr;
765 	remap_src_size = alloc1_size + alloc2_size;
766 	printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
767 	kr = vm_remap_external(kernel_map,
768 	    &remap_dst_addr,
769 	    remap_src_size,
770 	    0,                    /* mask */
771 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
772 	    test_map,
773 	    remap_src_addr,
774 	    FALSE,                    /* copy */
775 	    &cur_prot,
776 	    &max_prot,
777 	    VM_INHERIT_DEFAULT);
778 	assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr);
779 	printf("VM_TEST_4K: -> remap (SHARED) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
780 	debug4k_panic_on_misaligned_sharing = saved_debug4k_panic_on_misaligned_sharing;
781 
782 	printf("VM_TEST_4K:\n");
783 	remap_dst_addr = 0;
784 	remap_src_addr = alloc1_addr;
785 	remap_src_size = alloc1_size + alloc2_size;
786 	printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
787 	kr = vm_remap_external(kernel_map,
788 	    &remap_dst_addr,
789 	    remap_src_size,
790 	    0,                    /* mask */
791 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
792 	    test_map,
793 	    remap_src_addr,
794 	    TRUE,                    /* copy */
795 	    &cur_prot,
796 	    &max_prot,
797 	    VM_INHERIT_DEFAULT);
798 #if 000
799 	assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr);
800 	printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
801 #else /* 000 */
802 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
803 	printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
804 #endif /* 000 */
805 
806 
807 #if 00
808 	printf("VM_TEST_4K:%d vm_map_remove(%p, 0x%llx, 0x%llx)...\n", __LINE__, test_map, test_map->min_offset, test_map->max_offset);
809 	vm_map_remove(test_map, test_map->min_offset, test_map->max_offset);
810 #endif
811 
812 	printf("VM_TEST_4K: PASS\n\n\n\n");
813 }
814 #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */
815 
816 #if MACH_ASSERT
817 static void
vm_test_map_copy_adjust_to_target_one(vm_map_copy_t copy_map,vm_map_t target_map)818 vm_test_map_copy_adjust_to_target_one(
819 	vm_map_copy_t copy_map,
820 	vm_map_t target_map)
821 {
822 	kern_return_t kr;
823 	vm_map_copy_t target_copy;
824 	vm_map_offset_t overmap_start, overmap_end, trimmed_start;
825 
826 	target_copy = VM_MAP_COPY_NULL;
827 	/* size is 2 (4k) pages but range covers 3 pages */
828 	kr = vm_map_copy_adjust_to_target(copy_map,
829 	    0x0 + 0xfff,
830 	    0x1002,
831 	    target_map,
832 	    FALSE,
833 	    &target_copy,
834 	    &overmap_start,
835 	    &overmap_end,
836 	    &trimmed_start);
837 	assert(kr == KERN_SUCCESS);
838 	assert(overmap_start == 0);
839 	assert(overmap_end == 0);
840 	assert(trimmed_start == 0);
841 	assertf(target_copy->size == 0x3000,
842 	    "target_copy %p size 0x%llx\n",
843 	    target_copy, (uint64_t)target_copy->size);
844 	vm_map_copy_discard(target_copy);
845 
846 	/* 1. adjust_to_target() for bad offset -> error */
847 	/* 2. adjust_to_target() for bad size -> error */
848 	/* 3. adjust_to_target() for the whole thing -> unchanged */
849 	/* 4. adjust_to_target() to trim start by less than 1 page */
850 	/* 5. adjust_to_target() to trim end by less than 1 page */
851 	/* 6. adjust_to_target() to trim start and end by less than 1 page */
852 	/* 7. adjust_to_target() to trim start by more than 1 page */
853 	/* 8. adjust_to_target() to trim end by more than 1 page */
854 	/* 9. adjust_to_target() to trim start and end by more than 1 page */
855 	/* 10. adjust_to_target() to trim start by more than 1 entry */
856 	/* 11. adjust_to_target() to trim start by more than 1 entry */
857 	/* 12. adjust_to_target() to trim start and end by more than 1 entry */
858 	/* 13. adjust_to_target() to trim start and end down to 1 entry */
859 }
860 
861 static void
vm_test_map_copy_adjust_to_target(void)862 vm_test_map_copy_adjust_to_target(void)
863 {
864 	kern_return_t kr;
865 	vm_map_t map4k, map16k;
866 	vm_object_t obj1, obj2, obj3, obj4;
867 	vm_map_offset_t addr4k, addr16k;
868 	vm_map_size_t size4k, size16k;
869 	vm_map_copy_t copy4k, copy16k;
870 	vm_prot_t curprot, maxprot;
871 	vm_map_kernel_flags_t vmk_flags;
872 
873 	/* create a 4k map */
874 	map4k = vm_map_create_options(PMAP_NULL, 0, (uint32_t)-1,
875 	    VM_MAP_CREATE_PAGEABLE);
876 	vm_map_set_page_shift(map4k, 12);
877 
878 	/* create a 16k map */
879 	map16k = vm_map_create_options(PMAP_NULL, 0, (uint32_t)-1,
880 	    VM_MAP_CREATE_PAGEABLE);
881 	vm_map_set_page_shift(map16k, 14);
882 
883 	/* create 4 VM objects */
884 	obj1 = vm_object_allocate(0x100000, map4k->serial_id);
885 	obj2 = vm_object_allocate(0x100000, map4k->serial_id);
886 	obj3 = vm_object_allocate(0x100000, map4k->serial_id);
887 	obj4 = vm_object_allocate(0x100000, map4k->serial_id);
888 
889 	/* map objects in 4k map */
890 	vm_object_reference(obj1);
891 	addr4k = 0x1000;
892 	size4k = 0x3000;
893 	kr = vm_map_enter(map4k, &addr4k, size4k, 0,
894 	    VM_MAP_KERNEL_FLAGS_DATA_SHARED_ANYWHERE(), obj1, 0,
895 	    FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
896 	    VM_INHERIT_DEFAULT);
897 	assert(kr == KERN_SUCCESS);
898 	assert(addr4k == 0x1000);
899 
900 	/* map objects in 16k map */
901 	vm_object_reference(obj1);
902 	addr16k = 0x4000;
903 	size16k = 0x8000;
904 	kr = vm_map_enter(map16k, &addr16k, size16k, 0,
905 	    VM_MAP_KERNEL_FLAGS_DATA_SHARED_ANYWHERE(), obj1, 0,
906 	    FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
907 	    VM_INHERIT_DEFAULT);
908 	assert(kr == KERN_SUCCESS);
909 	assert(addr16k == 0x4000);
910 
911 	/* test for <rdar://60959809> */
912 	ipc_port_t mem_entry;
913 	memory_object_size_t mem_entry_size;
914 	mach_vm_size_t map_size;
915 	mem_entry_size = 0x1002;
916 	mem_entry = IPC_PORT_NULL;
917 	kr = mach_make_memory_entry_64(map16k, &mem_entry_size, addr16k + 0x2fff,
918 	    MAP_MEM_VM_SHARE | MAP_MEM_USE_DATA_ADDR | VM_PROT_READ,
919 	    &mem_entry, IPC_PORT_NULL);
920 	assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr);
921 	assertf(mem_entry_size == 0x5001, "mem_entry_size 0x%llx\n", (uint64_t) mem_entry_size);
922 	map_size = 0;
923 	kr = mach_memory_entry_map_size(mem_entry, map4k, 0, 0x1002, &map_size);
924 	assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr);
925 	assertf(map_size == 0x3000, "mem_entry %p map_size 0x%llx\n", mem_entry, (uint64_t)map_size);
926 	mach_memory_entry_port_release(mem_entry);
927 
928 	vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
929 	vmk_flags.vmkf_remap_legacy_mode = true;
930 
931 	/* create 4k copy map */
932 	curprot = VM_PROT_NONE;
933 	maxprot = VM_PROT_NONE;
934 	kr = vm_map_copy_extract(map4k, addr4k, 0x3000,
935 	    FALSE, &copy4k, &curprot, &maxprot,
936 	    VM_INHERIT_DEFAULT, vmk_flags);
937 	assert(kr == KERN_SUCCESS);
938 	assert(copy4k->size == 0x3000);
939 
940 	/* create 16k copy map */
941 	curprot = VM_PROT_NONE;
942 	maxprot = VM_PROT_NONE;
943 	kr = vm_map_copy_extract(map16k, addr16k, 0x4000,
944 	    FALSE, &copy16k, &curprot, &maxprot,
945 	    VM_INHERIT_DEFAULT, vmk_flags);
946 	assert(kr == KERN_SUCCESS);
947 	assert(copy16k->size == 0x4000);
948 
949 	/* test each combination */
950 //	vm_test_map_copy_adjust_to_target_one(copy4k, map4k);
951 //	vm_test_map_copy_adjust_to_target_one(copy16k, map16k);
952 //	vm_test_map_copy_adjust_to_target_one(copy4k, map16k);
953 	vm_test_map_copy_adjust_to_target_one(copy16k, map4k);
954 
955 	/* assert 1 ref on 4k map */
956 	assert(os_ref_get_count_raw(&map4k->map_refcnt) == 1);
957 	/* release 4k map */
958 	vm_map_deallocate(map4k);
959 	/* assert 1 ref on 16k map */
960 	assert(os_ref_get_count_raw(&map16k->map_refcnt) == 1);
961 	/* release 16k map */
962 	vm_map_deallocate(map16k);
963 	/* deallocate copy maps */
964 	vm_map_copy_discard(copy4k);
965 	vm_map_copy_discard(copy16k);
966 	/* assert 1 ref on all VM objects */
967 	assert(os_ref_get_count_raw(&obj1->ref_count) == 1);
968 	assert(os_ref_get_count_raw(&obj2->ref_count) == 1);
969 	assert(os_ref_get_count_raw(&obj3->ref_count) == 1);
970 	assert(os_ref_get_count_raw(&obj4->ref_count) == 1);
971 	/* release all VM objects */
972 	vm_object_deallocate(obj1);
973 	vm_object_deallocate(obj2);
974 	vm_object_deallocate(obj3);
975 	vm_object_deallocate(obj4);
976 }
977 #endif /* MACH_ASSERT */
978 
979 #if __arm64__ && !KASAN
980 __attribute__((noinline))
981 static void
vm_test_per_mapping_internal_accounting(void)982 vm_test_per_mapping_internal_accounting(void)
983 {
984 	ledger_t ledger;
985 	pmap_t user_pmap;
986 	vm_map_t user_map;
987 	kern_return_t kr;
988 	ledger_amount_t balance;
989 	mach_vm_address_t user_addr, user_remap;
990 	vm_map_offset_t device_addr;
991 	mach_vm_size_t user_size;
992 	vm_prot_t cur_prot, max_prot;
993 	upl_size_t upl_size;
994 	upl_t upl;
995 	unsigned int upl_count;
996 	upl_control_flags_t upl_flags;
997 	upl_page_info_t *pl;
998 	ppnum_t ppnum;
999 	vm_object_t device_object;
1000 	vm_map_offset_t map_start, map_end;
1001 	int pmap_flags;
1002 
1003 	pmap_flags = 0;
1004 	if (sizeof(vm_map_offset_t) == 4) {
1005 		map_start = 0x100000000ULL;
1006 		map_end = 0x200000000ULL;
1007 		pmap_flags |= PMAP_CREATE_64BIT;
1008 	} else {
1009 		map_start = 0x10000000;
1010 		map_end = 0x20000000;
1011 	}
1012 	/* create a user address space */
1013 	ledger = ledger_instantiate(task_ledger_template,
1014 	    LEDGER_CREATE_ACTIVE_ENTRIES);
1015 	assert(ledger);
1016 	user_pmap = pmap_create_options(ledger, 0, pmap_flags);
1017 	assert(user_pmap);
1018 	user_map = vm_map_create(user_pmap,
1019 	    map_start,
1020 	    map_end,
1021 	    TRUE);
1022 	assert(user_map);
1023 	/* check ledger */
1024 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1025 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1026 	assertf(balance == 0, "balance=0x%llx", balance);
1027 	/* allocate 1 page in that address space */
1028 	user_addr = 0;
1029 	user_size = PAGE_SIZE;
1030 	kr = mach_vm_allocate(user_map,
1031 	    &user_addr,
1032 	    user_size,
1033 	    VM_FLAGS_ANYWHERE);
1034 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1035 	/* check ledger */
1036 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1037 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1038 	assertf(balance == 0, "balance=0x%llx", balance);
1039 	/* remap the original mapping */
1040 	user_remap = 0;
1041 	kr = mach_vm_remap(user_map,
1042 	    &user_remap,
1043 	    PAGE_SIZE,
1044 	    0,
1045 	    VM_FLAGS_ANYWHERE,
1046 	    user_map,
1047 	    user_addr,
1048 	    FALSE,                /* copy */
1049 	    &cur_prot,
1050 	    &max_prot,
1051 	    VM_INHERIT_DEFAULT);
1052 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1053 	/* check ledger */
1054 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1055 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1056 	assertf(balance == 0, "balance=0x%llx", balance);
1057 	/* create a UPL from the original mapping */
1058 	upl_size = PAGE_SIZE;
1059 	upl = NULL;
1060 	upl_count = 0;
1061 	upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
1062 	kr = vm_map_create_upl(user_map,
1063 	    (vm_map_offset_t)user_addr,
1064 	    &upl_size,
1065 	    &upl,
1066 	    NULL,
1067 	    &upl_count,
1068 	    &upl_flags,
1069 	    VM_KERN_MEMORY_DIAG);
1070 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1071 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1072 	assert(upl_page_present(pl, 0));
1073 	ppnum = upl_phys_page(pl, 0);
1074 	/* check ledger */
1075 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1076 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1077 	assertf(balance == 0, "balance=0x%llx", balance);
1078 	device_object = vm_object_allocate(PAGE_SIZE, kernel_map->serial_id);
1079 	assert(device_object);
1080 	vm_object_lock(device_object);
1081 	VM_OBJECT_SET_PRIVATE(device_object, TRUE);
1082 	VM_OBJECT_SET_PHYS_CONTIGUOUS(device_object, TRUE);
1083 	device_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
1084 	vm_object_unlock(device_object);
1085 	kr = vm_object_populate_with_private(device_object, 0,
1086 	    ppnum, PAGE_SIZE);
1087 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1088 
1089 	/* check ledger */
1090 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1091 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1092 	assertf(balance == 0, "balance=0x%llx", balance);
1093 	/* deallocate the original mapping */
1094 	kr = mach_vm_deallocate(user_map, user_addr, PAGE_SIZE);
1095 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1096 	/* map the device_object in the kernel */
1097 	device_addr = 0;
1098 	vm_object_reference(device_object);
1099 	kr = vm_map_enter(kernel_map,
1100 	    &device_addr,
1101 	    PAGE_SIZE,
1102 	    0,
1103 	    VM_MAP_KERNEL_FLAGS_DATA_SHARED_ANYWHERE(),
1104 	    device_object,
1105 	    0,
1106 	    FALSE,               /* copy */
1107 	    VM_PROT_DEFAULT,
1108 	    VM_PROT_DEFAULT,
1109 	    VM_INHERIT_NONE);
1110 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1111 	/* access the device pager mapping */
1112 	*(char *)device_addr = 'x';
1113 	printf("%s:%d 0x%llx: 0x%x\n", __FUNCTION__, __LINE__, (uint64_t)device_addr, *(uint32_t *)device_addr);
1114 	/* check ledger */
1115 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1116 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1117 	assertf(balance == 0, "balance=0x%llx", balance);
1118 	/* fault in the remap addr */
1119 	kr = vm_fault(user_map, (vm_map_offset_t)user_remap, VM_PROT_READ,
1120 	    FALSE, 0, TRUE, NULL, 0);
1121 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1122 	/* check ledger */
1123 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1124 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1125 	assertf(balance == PAGE_SIZE, "balance=0x%llx", balance);
1126 	/* deallocate remapping */
1127 	kr = mach_vm_deallocate(user_map, user_remap, PAGE_SIZE);
1128 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1129 	/* check ledger */
1130 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1131 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1132 	assertf(balance == 0, "balance=0x%llx", balance);
1133 	/* TODO: cleanup... */
1134 	printf("%s:%d PASS\n", __FUNCTION__, __LINE__);
1135 }
1136 #endif /* __arm64__ && !KASAN */
1137 
1138 static void
vm_test_kernel_tag_accounting_kma(kma_flags_t base,kma_flags_t bit)1139 vm_test_kernel_tag_accounting_kma(kma_flags_t base, kma_flags_t bit)
1140 {
1141 	vm_tag_t tag = VM_KERN_MEMORY_REASON; /* unused during POST */
1142 	uint64_t init_size = vm_tag_get_size(tag);
1143 	__assert_only uint64_t final_size = init_size + PAGE_SIZE;
1144 	vm_address_t  address;
1145 	kern_return_t kr;
1146 
1147 	/*
1148 	 * Test the matrix of:
1149 	 *  - born with or without bit
1150 	 *  - bit flipped or not
1151 	 *  - dies with or without bit
1152 	 */
1153 	for (uint32_t i = 0; i < 4; i++) {
1154 		kma_flags_t flags1 = base | ((i & 1) ? bit : KMA_NONE);
1155 		kma_flags_t flags2 = base | ((i & 2) ? bit : KMA_NONE);
1156 
1157 		kr = kmem_alloc(kernel_map, &address, PAGE_SIZE, flags1, tag);
1158 		assert3u(kr, ==, KERN_SUCCESS);
1159 
1160 		if (flags1 & (KMA_VAONLY | KMA_PAGEABLE)) {
1161 			assert3u(init_size, ==, vm_tag_get_size(tag));
1162 		} else {
1163 			assert3u(final_size, ==, vm_tag_get_size(tag));
1164 		}
1165 
1166 		if ((flags1 ^ flags2) == KMA_VAONLY) {
1167 			if (flags1 & KMA_VAONLY) {
1168 				kernel_memory_populate(address, PAGE_SIZE,
1169 				    KMA_KOBJECT | KMA_NOFAIL, tag);
1170 			} else {
1171 				kernel_memory_depopulate(address, PAGE_SIZE,
1172 				    KMA_KOBJECT, tag);
1173 			}
1174 		}
1175 
1176 		if ((flags1 ^ flags2) == KMA_PAGEABLE) {
1177 			if (flags1 & KMA_PAGEABLE) {
1178 				kr = vm_map_wire_kernel(kernel_map,
1179 				    address, address + PAGE_SIZE,
1180 				    VM_PROT_DEFAULT, tag, false);
1181 				assert3u(kr, ==, KERN_SUCCESS);
1182 			} else {
1183 				kr = vm_map_unwire(kernel_map,
1184 				    address, address + PAGE_SIZE, false);
1185 				assert3u(kr, ==, KERN_SUCCESS);
1186 			}
1187 		}
1188 
1189 		if (flags2 & (KMA_VAONLY | KMA_PAGEABLE)) {
1190 			assert3u(init_size, ==, vm_tag_get_size(tag));
1191 		} else {
1192 			assert3u(final_size, ==, vm_tag_get_size(tag));
1193 		}
1194 
1195 		kmem_free(kernel_map, address, PAGE_SIZE);
1196 		assert3u(init_size, ==, vm_tag_get_size(tag));
1197 	}
1198 }
1199 
1200 __attribute__((noinline))
1201 static void
vm_test_kernel_tag_accounting(void)1202 vm_test_kernel_tag_accounting(void)
1203 {
1204 	printf("%s: test running\n", __func__);
1205 
1206 	printf("%s: account (KMA_KOBJECT + populate)...\n", __func__);
1207 	vm_test_kernel_tag_accounting_kma(KMA_KOBJECT, KMA_VAONLY);
1208 	printf("%s:     PASS\n", __func__);
1209 
1210 	printf("%s: account (regular object + wiring)...\n", __func__);
1211 	vm_test_kernel_tag_accounting_kma(KMA_NONE, KMA_PAGEABLE);
1212 	printf("%s:     PASS\n", __func__);
1213 
1214 	printf("%s: test passed\n", __func__);
1215 
1216 #undef if_bit
1217 }
1218 
1219 __attribute__((noinline))
1220 static void
vm_test_collapse_overflow(void)1221 vm_test_collapse_overflow(void)
1222 {
1223 	vm_object_t object, backing_object;
1224 	vm_object_size_t size;
1225 	vm_page_t m;
1226 
1227 	/* create an object for which (int)(size>>PAGE_SHIFT) = 0 */
1228 	size = 0x400000000000ULL;
1229 	assert((int)(size >> PAGE_SHIFT) == 0);
1230 	backing_object = vm_object_allocate(size + PAGE_SIZE, VM_MAP_SERIAL_NONE);
1231 	assert(backing_object);
1232 	vm_object_reference(backing_object);
1233 	/* insert a page */
1234 	m = VM_PAGE_NULL;
1235 	while (m == VM_PAGE_NULL) {
1236 		m = vm_page_grab();
1237 		if (m == VM_PAGE_NULL) {
1238 			VM_PAGE_WAIT();
1239 		}
1240 	}
1241 	assert(m);
1242 	vm_object_lock(backing_object);
1243 	vm_page_insert(m, backing_object, 0);
1244 	vm_object_unlock(backing_object);
1245 	/* make it back another object */
1246 	object = vm_object_allocate(size, VM_MAP_SERIAL_NONE);
1247 	assert(object);
1248 	vm_object_reference(object);
1249 	object->shadow = backing_object;
1250 	vm_object_reference(backing_object);
1251 	/* trigger a bypass */
1252 	vm_object_lock(object);
1253 	vm_object_collapse(object, 0, TRUE);
1254 	/* check that it did not bypass the backing object */
1255 	if (object->shadow != backing_object) {
1256 		panic("%s:%d FAIL\n", __FUNCTION__, __LINE__);
1257 	}
1258 	vm_object_unlock(object);
1259 
1260 	/* remove the page from the backing object */
1261 	vm_object_lock(backing_object);
1262 	vm_page_remove(m, TRUE);
1263 	vm_object_unlock(backing_object);
1264 	/* trigger a bypass */
1265 	vm_object_lock(object);
1266 	vm_object_collapse(object, 0, TRUE);
1267 	/* check that it did bypass the backing object */
1268 	if (object->shadow == backing_object) {
1269 		panic("%s:%d FAIL\n", __FUNCTION__, __LINE__);
1270 	}
1271 	vm_page_insert(m, object, 0);
1272 	vm_object_unlock(object);
1273 
1274 	/* cleanup */
1275 	vm_object_deallocate(object);
1276 	/* "backing_object" already lost its reference during the bypass */
1277 //	vm_object_deallocate(backing_object);
1278 
1279 	printf("%s:%d PASS\n", __FUNCTION__, __LINE__);
1280 }
1281 
1282 __attribute__((noinline))
1283 static void
vm_test_physical_size_overflow(void)1284 vm_test_physical_size_overflow(void)
1285 {
1286 	vm_map_address_t start;
1287 	mach_vm_size_t size;
1288 	kern_return_t kr;
1289 	mach_vm_size_t phys_size;
1290 	bool fail;
1291 	int failures = 0;
1292 
1293 	/* size == 0 */
1294 	start = 0x100000;
1295 	size = 0x0;
1296 	kr = vm_map_range_physical_size(kernel_map,
1297 	    start,
1298 	    size,
1299 	    &phys_size);
1300 	fail = (kr != KERN_SUCCESS || phys_size != 0);
1301 	printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1302 	    __FUNCTION__, __LINE__,
1303 	    (fail ? "FAIL" : "PASS"),
1304 	    (uint64_t)start, size, kr, phys_size);
1305 	failures += fail;
1306 
1307 	/* plain wraparound */
1308 	start = 0x100000;
1309 	size = 0xffffffffffffffff - 0x10000;
1310 	kr = vm_map_range_physical_size(kernel_map,
1311 	    start,
1312 	    size,
1313 	    &phys_size);
1314 	fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1315 	printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1316 	    __FUNCTION__, __LINE__,
1317 	    (fail ? "FAIL" : "PASS"),
1318 	    (uint64_t)start, size, kr, phys_size);
1319 	failures += fail;
1320 
1321 	/* wraparound after rounding */
1322 	start = 0xffffffffffffff00;
1323 	size = 0xf0;
1324 	kr = vm_map_range_physical_size(kernel_map,
1325 	    start,
1326 	    size,
1327 	    &phys_size);
1328 	fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1329 	printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1330 	    __FUNCTION__, __LINE__,
1331 	    (fail ? "FAIL" : "PASS"),
1332 	    (uint64_t)start, size, kr, phys_size);
1333 	failures += fail;
1334 
1335 	/* wraparound to start after rounding */
1336 	start = 0x100000;
1337 	size = 0xffffffffffffffff;
1338 	kr = vm_map_range_physical_size(kernel_map,
1339 	    start,
1340 	    size,
1341 	    &phys_size);
1342 	fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1343 	printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1344 	    __FUNCTION__, __LINE__,
1345 	    (fail ? "FAIL" : "PASS"),
1346 	    (uint64_t)start, size, kr, phys_size);
1347 	failures += fail;
1348 
1349 	if (failures) {
1350 		panic("%s: FAIL (failures=%d)", __FUNCTION__, failures);
1351 	}
1352 	printf("%s: PASS\n", __FUNCTION__);
1353 }
1354 
1355 #define PTR_UPPER_SHIFT 60
1356 #define PTR_TAG_SHIFT 56
1357 #define PTR_BITS_MASK (((1ULL << PTR_TAG_SHIFT) - 1) | (0xfULL << PTR_UPPER_SHIFT))
1358 
1359 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
1360 static inline vm_map_t
1361 create_map(mach_vm_address_t map_start, mach_vm_address_t map_end);
1362 
1363 static inline void
1364 cleanup_map(vm_map_t *map);
1365 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
1366 
1367 __attribute__((noinline))
1368 static void
vm_test_address_canonicalization(void)1369 vm_test_address_canonicalization(void)
1370 {
1371 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
1372 	kern_return_t kr;
1373 	mach_vm_address_t kernel_addr, user_addr;
1374 	mach_vm_address_t canonicalized_addr;
1375 	mach_vm_address_t intended_result;
1376 	vm_address_t const tag = 0x5;
1377 	T_SETUPBEGIN;
1378 	T_LOG("%s: Allocating an address in the kernel map", __func__);
1379 	kr = mach_vm_allocate(kernel_map, &kernel_addr, PAGE_SIZE, VM_FLAGS_ANYWHERE);
1380 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "mach_vm_allocate in kernel map");
1381 	T_LOG("%s: Allocated kernel addr: 0x%llx", __func__, kernel_addr);
1382 	mach_vm_address_t const tagged_kernel_addr = (kernel_addr & PTR_BITS_MASK) |
1383 	    (tag << PTR_TAG_SHIFT);
1384 	T_LOG("%s: Tagged kernel address: 0x%llx", __func__, tagged_kernel_addr);
1385 
1386 	/* Create userland VM map and vm allocate an address from there */
1387 	vm_map_t user_map = create_map(MACH_VM_MIN_ADDRESS, MACH_VM_MAX_ADDRESS);
1388 	T_ASSERT_NOTNULL(user_map, "VM map creation");
1389 	kr = mach_vm_allocate(user_map, &user_addr, PAGE_SIZE, VM_FLAGS_ANYWHERE);
1390 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "mach_vm_allocate in user map");
1391 	T_LOG("%s: Allocated user address: 0x%llx", __func__, user_addr);
1392 	mach_vm_address_t const tagged_user_addr = (user_addr & PTR_BITS_MASK) |
1393 	    (tag << PTR_TAG_SHIFT);
1394 	T_LOG("%s: Tagged user address: 0x%llx", __func__, tagged_user_addr);
1395 	T_SETUPEND;
1396 
1397 	T_BEGIN("VM address canonicalization test");
1398 	/* canonicalize kernel address with kernel map */
1399 	intended_result = kernel_addr;
1400 	canonicalized_addr = (mach_vm_address_t)vm_memtag_canonicalize(kernel_map, tagged_kernel_addr);
1401 	T_EXPECT_EQ_ULLONG(canonicalized_addr, intended_result,
1402 	    "kernel address with kernel map: canonicalized kernel addr: 0x%llx, intended addr: 0x%llx",
1403 	    canonicalized_addr, intended_result);
1404 
1405 	/* canonicalize kernel address with user map */
1406 	intended_result = (kernel_addr & PTR_BITS_MASK) | ((mach_vm_address_t)0x0 << PTR_TAG_SHIFT);
1407 	canonicalized_addr = (mach_vm_address_t)vm_memtag_canonicalize(user_map, tagged_kernel_addr);
1408 	T_EXPECT_EQ_ULLONG(canonicalized_addr, intended_result,
1409 	    "kernel address with user map: canonicalized kernel addr: 0x%llx, intended addr: 0x%llx",
1410 	    canonicalized_addr, intended_result);
1411 
1412 	/* canonicalize user address with kernel map */
1413 	intended_result = (user_addr & PTR_BITS_MASK) | ((mach_vm_address_t)0xf << PTR_TAG_SHIFT);
1414 	canonicalized_addr = (mach_vm_address_t)vm_memtag_canonicalize(kernel_map, tagged_user_addr);
1415 	T_EXPECT_EQ_ULLONG(canonicalized_addr, intended_result,
1416 	    "user address with kernel map: canonicalized user addr: 0x%llx, intended addr: 0x%llx",
1417 	    canonicalized_addr, intended_result);
1418 
1419 	/* canonicalize user address with user map */
1420 	intended_result = user_addr;
1421 	canonicalized_addr = (mach_vm_address_t)vm_memtag_canonicalize(user_map, tagged_user_addr);
1422 	T_EXPECT_EQ_ULLONG(canonicalized_addr, intended_result,
1423 	    "user address with user map: canonicalized user addr: 0x%llx, intended addr: 0x%llx",
1424 	    canonicalized_addr, intended_result);
1425 	cleanup_map(&user_map);
1426 #else /* !HAS_MTE && !HAS_MTE_EMULATION_SHIMS */
1427 	T_SKIP("System not designed to support this test, skipping...");
1428 #endif /* !HAS_MTE && !HAS_MTE_EMULATION_SHIMS */
1429 }
1430 
1431 
1432 kern_return_t
vm_tests(void)1433 vm_tests(void)
1434 {
1435 	kern_return_t kr = KERN_SUCCESS;
1436 
1437 	/* Avoid VM panics because some of our test vm_maps don't have a pmap. */
1438 	thread_test_context_t ctx CLEANUP_THREAD_TEST_CONTEXT = {
1439 		.test_option_vm_map_allow_null_pmap = true,
1440 	};
1441 	thread_set_test_context(&ctx);
1442 
1443 	vm_test_collapse_compressor();
1444 	vm_test_wire_and_extract();
1445 	vm_test_page_wire_overflow_panic();
1446 	vm_test_kernel_object_fault();
1447 	vm_test_device_pager_transpose();
1448 #if MACH_ASSERT
1449 	vm_test_map_copy_adjust_to_target();
1450 #endif /* MACH_ASSERT */
1451 #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT
1452 	vm_test_4k();
1453 #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */
1454 #if __arm64__ && !KASAN
1455 	vm_test_per_mapping_internal_accounting();
1456 #endif /* __arm64__ && !KASAN */
1457 	vm_test_kernel_tag_accounting();
1458 	vm_test_collapse_overflow();
1459 	vm_test_physical_size_overflow();
1460 	vm_test_address_canonicalization();
1461 
1462 	return kr;
1463 }
1464 
1465 static inline vm_map_t
create_map(mach_vm_address_t map_start,mach_vm_address_t map_end)1466 create_map(mach_vm_address_t map_start, mach_vm_address_t map_end)
1467 {
1468 	ledger_t ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1469 	pmap_t pmap = pmap_create_options(ledger, 0, PMAP_CREATE_64BIT);
1470 	assert(pmap);
1471 	ledger_dereference(ledger);  // now retained by pmap
1472 	vm_map_t map = vm_map_create_options(pmap, map_start, map_end, VM_MAP_CREATE_PAGEABLE);//vm_compute_max_offset
1473 	assert(map);
1474 
1475 #if CONFIG_SPTM
1476 	/* Ensure the map serial looks fine */
1477 	if (map->serial_id != pmap->associated_vm_map_serial_id) {
1478 		panic("Expected a map and its pmap to have exactly the same serial");
1479 	}
1480 #endif /* CONFIG_SPTM */
1481 
1482 	return map;
1483 }
1484 
1485 static inline void
cleanup_map(vm_map_t * map)1486 cleanup_map(vm_map_t *map)
1487 {
1488 	assert(*map);
1489 	kern_return_t kr = vm_map_terminate(*map);
1490 	assert(kr == 0);
1491 	vm_map_deallocate(*map);  // also destroys pmap
1492 }
1493 
1494 kern_return_t
1495 mach_vm_remap_new_external(
1496 	vm_map_t                target_map,
1497 	mach_vm_offset_ut      *address,
1498 	mach_vm_size_ut         size,
1499 	mach_vm_offset_ut       mask,
1500 	int                     flags,
1501 	mach_port_t             src_tport,
1502 	mach_vm_offset_ut       memory_address,
1503 	boolean_t               copy,
1504 	vm_prot_ut             *cur_protection_u,
1505 	vm_prot_ut             *max_protection_u,
1506 	vm_inherit_ut           inheritance);
1507 kern_return_t
1508 vm_remap_new_external(
1509 	vm_map_t                target_map,
1510 	vm_offset_ut           *address,
1511 	vm_size_ut              size,
1512 	vm_offset_ut            mask,
1513 	int                     flags,
1514 	mach_port_t             src_tport,
1515 	vm_offset_ut            memory_address,
1516 	boolean_t               copy,
1517 	vm_prot_ut             *cur_protection,
1518 	vm_prot_ut             *max_protection,
1519 	vm_inherit_ut           inheritance);
1520 kern_return_t
1521 mach_vm_remap_external(
1522 	vm_map_t                target_map,
1523 	mach_vm_offset_ut      *address,
1524 	mach_vm_size_ut         size,
1525 	mach_vm_offset_ut       mask,
1526 	int                     flags,
1527 	vm_map_t                src_map,
1528 	mach_vm_offset_ut       memory_address,
1529 	boolean_t               copy,
1530 	vm_prot_ut             *cur_protection,
1531 	vm_prot_ut             *max_protection,
1532 	vm_inherit_ut           inheritance);
1533 kern_return_t
1534 mach_vm_map_external(
1535 	vm_map_t                target_map,
1536 	mach_vm_offset_ut      *address,
1537 	mach_vm_size_ut         initial_size,
1538 	mach_vm_offset_ut       mask,
1539 	int                     flags,
1540 	ipc_port_t              port,
1541 	memory_object_offset_ut offset,
1542 	boolean_t               copy,
1543 	vm_prot_ut              cur_protection,
1544 	vm_prot_ut              max_protection,
1545 	vm_inherit_ut           inheritance);
1546 kern_return_t
1547 mach_vm_wire_external(
1548 	host_priv_t             host_priv,
1549 	vm_map_t                map,
1550 	mach_vm_address_ut      start,
1551 	mach_vm_size_ut         size,
1552 	vm_prot_ut              access);
1553 kern_return_t
1554 mach_vm_purgable_control_external(
1555 	mach_port_t             target_tport,
1556 	mach_vm_offset_ut       address_u,
1557 	vm_purgable_t           control,
1558 	int                    *state);
1559 kern_return_t
1560 vm_purgable_control_external(
1561 	mach_port_t             target_tport,
1562 	vm_offset_ut            address,
1563 	vm_purgable_t           control,
1564 	int                     *state);
1565 
1566 static int
vm_map_null_tests(__unused int64_t in,int64_t * out)1567 vm_map_null_tests(__unused int64_t in, int64_t *out)
1568 {
1569 	kern_return_t kr;
1570 
1571 	mach_vm_address_t alloced_addr, throwaway_addr;
1572 	mach_vm_address_ut throwaway_addr_ut;
1573 	vm_address_t vm_throwaway_addr;
1574 	vm_address_ut vm_throwaway_addr_ut;
1575 	vm32_address_ut alloced_addr32, throwaway_addr32_u;
1576 	mach_vm_size_t throwaway_size, size_16kb, read_overwrite_data_size;
1577 	vm_size_t vm_size, vm_read_overwrite_data_size, vm_throwaway_size;
1578 	vm_size_ut throwaway_size_ut;
1579 	vm32_size_t data_size32, size32_16kb;
1580 	vm32_size_ut data_size32_u, throwaway_size32_u;
1581 	mach_msg_type_number_t read_data_size;
1582 	mach_port_t mem_entry_result;
1583 	pointer_t read_data;
1584 	pointer_ut read_data_u;
1585 	vm_prot_t prot_default;
1586 	vm_prot_ut prot_allexec_u, prot_default_ut;
1587 	vm_map_t map64, map32;
1588 	vm_machine_attribute_val_t vm_throwaway_attr_val;
1589 	vm_region_extended_info_data_t vm_throwaway_region_extended_info;
1590 	vm_region_recurse_info_t vm_throwaway_region_recurse_info;
1591 	vm_region_recurse_info_64_t vm_throwaway_region_recurse_info_64;
1592 	int throwaway_state;
1593 	uint32_t throwaway_depth;
1594 	vm_page_info_t page_info;
1595 
1596 	page_info = 0;
1597 	throwaway_state = VM_PURGABLE_STATE_MAX;
1598 	vm_throwaway_region_recurse_info_64 = 0;
1599 	vm_throwaway_region_recurse_info = 0;
1600 	vm_throwaway_attr_val = MATTR_VAL_OFF;
1601 
1602 	map64 = create_map(0, vm_compute_max_offset(true));
1603 	map32 = create_map(0, vm_compute_max_offset(false));
1604 
1605 	prot_allexec_u = vm_sanitize_wrap_prot(VM_PROT_ALLEXEC);
1606 	prot_default_ut = vm_sanitize_wrap_prot(VM_PROT_DEFAULT);
1607 	prot_default = VM_PROT_DEFAULT;
1608 
1609 	size_16kb = 16 * 1024;
1610 	size32_16kb = (vm32_size_t) size_16kb;
1611 
1612 	/*
1613 	 * Allocate some address in the map, just so we can pass a valid looking address to functions so they don't
1614 	 * return before checking VM_MAP_NULL
1615 	 */
1616 	kr = mach_vm_allocate(map64, &alloced_addr, size_16kb, VM_FLAGS_ANYWHERE);
1617 	assert(kr == KERN_SUCCESS);
1618 	kr = vm32_vm_allocate(map32, &alloced_addr32, size32_16kb, VM_FLAGS_ANYWHERE);
1619 	assert(kr == KERN_SUCCESS);
1620 
1621 	/*
1622 	 * Call a bunch of MIG entrypoints with VM_MAP_NULL. The goal is to verify they check map != VM_MAP_NULL.
1623 	 * There are no requirements put on the return, so don't assert kr. Just verify no crash occurs.
1624 	 */
1625 	throwaway_size = size_16kb;
1626 	kr = _mach_make_memory_entry(VM_MAP_NULL, &throwaway_size, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1627 	assert(kr != KERN_SUCCESS);
1628 	throwaway_size32_u = vm32_sanitize_wrap_size(size32_16kb);
1629 	kr = vm32_mach_make_memory_entry(VM_MAP_NULL, &throwaway_size32_u, alloced_addr32, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1630 	assert(kr != KERN_SUCCESS);
1631 	throwaway_size_ut = vm_sanitize_wrap_size(size_16kb);
1632 	kr = vm32_mach_make_memory_entry_64(VM_MAP_NULL, &throwaway_size_ut, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1633 	assert(kr != KERN_SUCCESS);
1634 	throwaway_size = size_16kb;
1635 	kr = mach_make_memory_entry_64(VM_MAP_NULL, &throwaway_size, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1636 	assert(kr != KERN_SUCCESS);
1637 	vm_size = size_16kb;
1638 	kr = mach_make_memory_entry(VM_MAP_NULL, &vm_size, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1639 	assert(kr != KERN_SUCCESS);
1640 
1641 	kr = mach_memory_object_memory_entry(HOST_NULL, true, size_16kb, VM_PROT_DEFAULT, MEMORY_OBJECT_NULL, &mem_entry_result);
1642 	assert(kr != KERN_SUCCESS);
1643 	kr = mach_memory_object_memory_entry_64(HOST_NULL, true, size_16kb, VM_PROT_DEFAULT, MEMORY_OBJECT_NULL, &mem_entry_result);
1644 	assert(kr != KERN_SUCCESS);
1645 
1646 	throwaway_addr = alloced_addr;
1647 	kr = mach_vm_allocate(VM_MAP_NULL, &throwaway_addr, size_16kb, VM_FLAGS_ANYWHERE);
1648 	assert(kr != KERN_SUCCESS);
1649 	throwaway_addr32_u = alloced_addr32;
1650 	kr = vm32_vm_allocate(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, VM_FLAGS_ANYWHERE);
1651 	assert(kr != KERN_SUCCESS);
1652 	kr = vm_allocate_external(VM_MAP_NULL, &vm_throwaway_addr, size_16kb, VM_FLAGS_ANYWHERE);
1653 	assert(kr != KERN_SUCCESS);
1654 
1655 	kr = mach_vm_deallocate(VM_MAP_NULL, alloced_addr, size_16kb);
1656 	assert(kr != KERN_SUCCESS);
1657 	kr = vm_deallocate(VM_MAP_NULL, alloced_addr, size_16kb);
1658 	assert(kr != KERN_SUCCESS);
1659 	kr = vm32_vm_deallocate(VM_MAP_NULL, throwaway_addr32_u, size32_16kb);
1660 	assert(kr != KERN_SUCCESS);
1661 
1662 	kr = mach_vm_map(VM_MAP_NULL, &throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1663 	assert(kr != KERN_SUCCESS);
1664 	kr = mach_vm_map_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1665 	assert(kr != KERN_SUCCESS);
1666 
1667 	vm_throwaway_addr = alloced_addr;
1668 	kr = vm_map(VM_MAP_NULL, &vm_throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1669 	assert(kr != KERN_SUCCESS);
1670 	kr = vm32_vm_map(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1671 	assert(kr != KERN_SUCCESS);
1672 	kr = vm32_vm_map_64(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1673 	assert(kr != KERN_SUCCESS);
1674 
1675 	kr = mach_vm_remap(map64, &throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1676 	assert(kr != KERN_SUCCESS);
1677 	kr = mach_vm_remap(VM_MAP_NULL, &throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, map64, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1678 	assert(kr != KERN_SUCCESS);
1679 	kr = mach_vm_remap_external(map64, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1680 	assert(kr != KERN_SUCCESS);
1681 	kr = mach_vm_remap_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, map64, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1682 	assert(kr != KERN_SUCCESS);
1683 	kr = vm_remap_external(map64, &vm_throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1684 	assert(kr != KERN_SUCCESS);
1685 	kr = vm_remap_external(VM_MAP_NULL, &vm_throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, map64, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1686 	assert(kr != KERN_SUCCESS);
1687 	kr = vm32_vm_remap(map32, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1688 	assert(kr != KERN_SUCCESS);
1689 	kr = vm32_vm_remap(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, map32, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1690 	assert(kr != KERN_SUCCESS);
1691 
1692 	kr = mach_vm_remap_new_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1693 	assert(kr != KERN_SUCCESS);
1694 	kr = mach_vm_remap_new_external(map64, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1695 	assert(kr != KERN_SUCCESS);
1696 
1697 	kr = mach_vm_remap_new_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_allexec_u, &prot_allexec_u, VM_INHERIT_DEFAULT);
1698 	assert(kr != KERN_SUCCESS);
1699 	kr = mach_vm_remap_new_external(map64, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_allexec_u, &prot_allexec_u, VM_INHERIT_DEFAULT);
1700 	assert(kr != KERN_SUCCESS);
1701 
1702 	kr = vm_remap_new_external(VM_MAP_NULL, &vm_throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1703 	assert(kr != KERN_SUCCESS);
1704 	kr = vm_remap_new_external(map64, &vm_throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1705 	assert(kr != KERN_SUCCESS);
1706 
1707 	kr = mach_vm_wire_external(host_priv_self(), VM_MAP_NULL, throwaway_addr_ut, size_16kb, VM_PROT_DEFAULT);
1708 	assert(kr != KERN_SUCCESS);
1709 	kr = mach_vm_wire_external(HOST_PRIV_NULL, map64, throwaway_addr_ut, size_16kb, VM_PROT_DEFAULT);
1710 	assert(kr != KERN_SUCCESS);
1711 
1712 	kr = vm_wire(host_priv_self(), VM_MAP_NULL, throwaway_addr, size_16kb, VM_PROT_DEFAULT);
1713 	assert(kr != KERN_SUCCESS);
1714 	kr = vm_wire(HOST_PRIV_NULL, map64, throwaway_addr, size_16kb, VM_PROT_DEFAULT);
1715 	assert(kr != KERN_SUCCESS);
1716 
1717 	kr = task_wire(VM_MAP_NULL, false);
1718 	assert(kr != KERN_SUCCESS);
1719 	kr = vm32_task_wire(VM_MAP_NULL, false);
1720 	assert(kr != KERN_SUCCESS);
1721 
1722 	kr = mach_vm_read(VM_MAP_NULL, alloced_addr, size_16kb, &read_data, &read_data_size);
1723 	assert(kr != KERN_SUCCESS);
1724 	kr = vm_read(VM_MAP_NULL, alloced_addr, size_16kb, &read_data, &read_data_size);
1725 	assert(kr != KERN_SUCCESS);
1726 	kr = vm32_vm_read(VM_MAP_NULL, alloced_addr32, size32_16kb, &read_data_u, &data_size32);
1727 	assert(kr != KERN_SUCCESS);
1728 
1729 	mach_vm_read_entry_t * mach_re = kalloc_type(mach_vm_read_entry_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1730 	(*mach_re)[0].address = alloced_addr;
1731 	(*mach_re)[0].size = size_16kb;
1732 
1733 	vm_read_entry_t * re = kalloc_type(vm_read_entry_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1734 	(*re)[0].address = alloced_addr;
1735 	(*re)[0].size = (vm_size_t) size_16kb;
1736 
1737 	vm32_read_entry_t * re_32 = kalloc_type(vm32_read_entry_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1738 	(*re_32)[0].address = (vm32_address_t) alloced_addr;
1739 	(*re_32)[0].size = (vm32_size_t) size_16kb;
1740 
1741 	kr = mach_vm_read_list(VM_MAP_NULL, *mach_re, 1);
1742 	assert(kr != KERN_SUCCESS);
1743 	kr = vm_read_list(VM_MAP_NULL, *re, 1);
1744 	assert(kr != KERN_SUCCESS);
1745 	kr = vm32_vm_read_list(VM_MAP_NULL, *re_32, 1);
1746 	assert(kr != KERN_SUCCESS);
1747 
1748 	kfree_type(mach_vm_read_entry_t, mach_re);
1749 	kfree_type(vm_read_entry_t, re);
1750 	kfree_type(vm32_read_entry_t, re_32);
1751 
1752 	kr = mach_vm_read_overwrite(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr, &read_overwrite_data_size);
1753 	assert(kr != KERN_SUCCESS);
1754 	kr = vm_read_overwrite(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr, &vm_read_overwrite_data_size);
1755 	assert(kr != KERN_SUCCESS);
1756 	kr = vm32_vm_read_overwrite(VM_MAP_NULL, alloced_addr32, size32_16kb, alloced_addr32, &data_size32_u);
1757 	assert(kr != KERN_SUCCESS);
1758 
1759 	kr = mach_vm_copy(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr);
1760 	assert(kr != KERN_SUCCESS);
1761 	kr = vm_copy(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr);
1762 	assert(kr != KERN_SUCCESS);
1763 	kr = vm32_vm_copy(VM_MAP_NULL, alloced_addr32, size32_16kb, alloced_addr32);
1764 	assert(kr != KERN_SUCCESS);
1765 
1766 	kr = mach_vm_write(VM_MAP_NULL, alloced_addr, alloced_addr, (mach_msg_type_number_t) size_16kb);
1767 	assert(kr != KERN_SUCCESS);
1768 	kr = vm_write(VM_MAP_NULL, alloced_addr, alloced_addr, (mach_msg_type_number_t) size_16kb);
1769 	assert(kr != KERN_SUCCESS);
1770 	kr = vm32_vm_write(VM_MAP_NULL, alloced_addr32, alloced_addr, (mach_msg_type_number_t) size_16kb);
1771 	assert(kr != KERN_SUCCESS);
1772 
1773 	kr = mach_vm_inherit(VM_MAP_NULL, alloced_addr, size_16kb, VM_INHERIT_DEFAULT);
1774 	assert(kr != KERN_SUCCESS);
1775 	kr = vm_inherit(VM_MAP_NULL, alloced_addr, size_16kb, VM_INHERIT_DEFAULT);
1776 	assert(kr != KERN_SUCCESS);
1777 	kr = vm32_vm_inherit(VM_MAP_NULL, alloced_addr32, size32_16kb, VM_INHERIT_DEFAULT);
1778 
1779 	kr = mach_vm_protect(VM_MAP_NULL, alloced_addr, size_16kb, FALSE, VM_PROT_DEFAULT);
1780 	assert(kr != KERN_SUCCESS);
1781 	kr = vm_protect(VM_MAP_NULL, alloced_addr, size_16kb, FALSE, VM_PROT_DEFAULT);
1782 	assert(kr != KERN_SUCCESS);
1783 	kr = vm32_vm_protect(VM_MAP_NULL, alloced_addr32, size32_16kb, FALSE, VM_PROT_DEFAULT);
1784 	assert(kr != KERN_SUCCESS);
1785 
1786 	kr = mach_vm_behavior_set(VM_MAP_NULL, alloced_addr, size_16kb, VM_BEHAVIOR_DEFAULT);
1787 	assert(kr != KERN_SUCCESS);
1788 	kr = vm_behavior_set(VM_MAP_NULL, alloced_addr, size_16kb, VM_BEHAVIOR_DEFAULT);
1789 	assert(kr != KERN_SUCCESS);
1790 	kr = vm32_vm_behavior_set(VM_MAP_NULL, alloced_addr32, size32_16kb, VM_BEHAVIOR_DEFAULT);
1791 	assert(kr != KERN_SUCCESS);
1792 
1793 	kr = mach_vm_msync(VM_MAP_NULL, alloced_addr, size_16kb, VM_SYNC_ASYNCHRONOUS);
1794 	assert(kr != KERN_SUCCESS);
1795 	kr = vm_msync(VM_MAP_NULL, alloced_addr, size_16kb, VM_SYNC_ASYNCHRONOUS);
1796 	assert(kr != KERN_SUCCESS);
1797 	kr = vm32_vm_msync(VM_MAP_NULL, alloced_addr32, size32_16kb, VM_SYNC_ASYNCHRONOUS);
1798 	assert(kr != KERN_SUCCESS);
1799 
1800 	kr = mach_vm_machine_attribute(VM_MAP_NULL, alloced_addr, size_16kb, MATTR_CACHE, &vm_throwaway_attr_val);
1801 	assert(kr != KERN_SUCCESS);
1802 	kr = vm_machine_attribute(VM_MAP_NULL, alloced_addr, size_16kb, MATTR_CACHE, &vm_throwaway_attr_val);
1803 	assert(kr != KERN_SUCCESS);
1804 	kr = vm32_vm_machine_attribute(VM_MAP_NULL, alloced_addr32, size32_16kb, MATTR_CACHE, &vm_throwaway_attr_val);
1805 	assert(kr != KERN_SUCCESS);
1806 
1807 	kr = mach_vm_purgable_control_external(MACH_PORT_NULL, throwaway_addr_ut, VM_PURGABLE_PURGE_ALL, &throwaway_state);
1808 	assert(kr != KERN_SUCCESS);
1809 	kr = vm_purgable_control_external(MACH_PORT_NULL, throwaway_addr_ut, VM_PURGABLE_PURGE_ALL, &throwaway_state);
1810 	assert(kr != KERN_SUCCESS);
1811 	kr = vm32_vm_purgable_control(VM_MAP_NULL, alloced_addr32, VM_PURGABLE_PURGE_ALL, &throwaway_state);
1812 	assert(kr != KERN_SUCCESS);
1813 
1814 	kr = mach_vm_region(VM_MAP_NULL, &throwaway_addr, &throwaway_size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1815 	assert(kr != KERN_SUCCESS);
1816 	kr = vm_region(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1817 	assert(kr != KERN_SUCCESS);
1818 	kr = vm_region_64(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1819 	assert(kr != KERN_SUCCESS);
1820 	kr = vm32_vm_region(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1821 	assert(kr != KERN_SUCCESS);
1822 	kr = vm32_vm_region_64(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1823 	assert(kr != KERN_SUCCESS);
1824 
1825 	kr = mach_vm_region_recurse(VM_MAP_NULL, &throwaway_addr, &throwaway_size, &throwaway_depth, vm_throwaway_region_recurse_info, &read_data_size);
1826 	assert(kr != KERN_SUCCESS);
1827 	kr = vm_region_recurse(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, &throwaway_depth, vm_throwaway_region_recurse_info, &read_data_size);
1828 	assert(kr != KERN_SUCCESS);
1829 	kr = vm_region_recurse_64(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, &throwaway_depth, vm_throwaway_region_recurse_info_64, &read_data_size);
1830 	assert(kr != KERN_SUCCESS);
1831 	kr = vm32_vm_region_recurse(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, &throwaway_depth, vm_throwaway_region_recurse_info, &read_data_size);
1832 	assert(kr != KERN_SUCCESS);
1833 	kr = vm32_vm_region_recurse_64(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, &throwaway_depth, vm_throwaway_region_recurse_info_64, &read_data_size);
1834 	assert(kr != KERN_SUCCESS);
1835 
1836 	kr = mach_vm_page_info(VM_MAP_NULL, alloced_addr, VM_PAGE_INFO_BASIC, page_info, &read_data_size);
1837 	assert(kr != KERN_SUCCESS);
1838 	kr = mach_vm_page_query(VM_MAP_NULL, alloced_addr, &throwaway_state, &throwaway_state);
1839 	assert(kr != KERN_SUCCESS);
1840 	kr = vm_map_page_query(VM_MAP_NULL, vm_throwaway_addr, &throwaway_state, &throwaway_state);
1841 	assert(kr != KERN_SUCCESS);
1842 	kr = vm32_vm_map_page_query(VM_MAP_NULL, throwaway_addr32_u, &throwaway_state, &throwaway_state);
1843 	assert(kr != KERN_SUCCESS);
1844 
1845 	/*
1846 	 * Cleanup our allocations and maps
1847 	 */
1848 	kr = mach_vm_deallocate(map64, alloced_addr, size_16kb);
1849 	assert(kr == KERN_SUCCESS);
1850 	kr = vm32_vm_deallocate(map32, alloced_addr32, size32_16kb);
1851 	assert(kr == KERN_SUCCESS);
1852 
1853 	cleanup_map(&map64);
1854 	cleanup_map(&map32);
1855 
1856 	/*
1857 	 * If we made it far without crashing, the test works.
1858 	 */
1859 
1860 	*out = 1;
1861 	return 0;
1862 }
1863 SYSCTL_TEST_REGISTER(vm_map_null, vm_map_null_tests);
1864 
1865 #if HAS_MTE
1866 static unsigned int const MTE_GRANULE_SIZE = 16;
1867 
1868 static inline unsigned int
extract_mte_tag(void * ptr)1869 extract_mte_tag(void *ptr)
1870 {
1871 	/* TODO: Eventually refactor this symbol entirely */
1872 	return vm_memtag_extract_tag((vm_offset_t)ptr);
1873 }
1874 #endif /* HAS_MTE */
1875 
1876 static int
vm_map_copyio_test(__unused int64_t in,int64_t * out)1877 vm_map_copyio_test(__unused int64_t in, int64_t *out)
1878 {
1879 #if HAS_MTE
1880 	T_SETUPBEGIN;
1881 	uint64_t const test_buf_size = (32 * 1024) + 1; /* 32K + 1 */
1882 
1883 	/* Allocate a tagged kernel buffer to copy from */
1884 	vm_offset_t kern_addr;
1885 	kern_return_t kr = kmem_alloc(
1886 		kernel_map,
1887 		&kern_addr,
1888 		test_buf_size,
1889 		KMA_ZERO | KMA_TAG | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
1890 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kmem_alloc(KMA_TAG) - allocate an MTE enabled page");
1891 	char *tagged_ptr = (char *)kern_addr;
1892 	T_ASSERT_NOTNULL(tagged_ptr, "kmem_alloc(KMA_TAG) ptr not null");
1893 	unsigned int tag = extract_mte_tag(tagged_ptr);
1894 	T_LOG("Allocated ptr: %p, tag assigned: %zx", tagged_ptr, tag);
1895 
1896 	/* Put some data in the kernel buffer */
1897 	for (size_t i = 0; i < test_buf_size; ++i) {
1898 		tagged_ptr[i] = (char)i;
1899 	}
1900 	T_SETUPEND;
1901 
1902 	T_BEGIN("vm_map_copy test");
1903 	/* Do a vm_map_copyin from a kernel buffer */
1904 	vm_map_address_ut tagged_ptr_u;
1905 	vm_map_size_ut len_u;
1906 	vm_map_copy_t copy;
1907 	VM_SANITIZE_UT_SET(tagged_ptr_u, (vm_map_address_t)tagged_ptr);
1908 	VM_SANITIZE_UT_SET(len_u, msg_ool_size_small);
1909 	kr = vm_map_copyin(kernel_map, tagged_ptr_u, len_u, false, &copy);
1910 	T_EXPECT_EQ_INT(kr, KERN_SUCCESS, "vm_map_copyin on 32K");
1911 
1912 	/* Do a vm_map_copyout into this process's address space */
1913 	vm_map_address_t dst_addr;
1914 	kr = vm_map_copyout(kernel_map, &dst_addr, copy);
1915 	T_EXPECT_EQ_INT(kr, KERN_SUCCESS, "vm_map_copyout");
1916 
1917 	/* Make sure we read back the same data */
1918 	char *dst_ptr = (char *)dst_addr;
1919 	T_LOG("dst_ptr: %p", dst_ptr);
1920 	int ret = memcmp(tagged_ptr, dst_ptr, msg_ool_size_small);
1921 	T_EXPECT_EQ_INT(0, ret, "memcmp");
1922 
1923 	/* Do a vm_map_copyin that's > msg_ool_size_small, should fail */
1924 	vm_map_size_ut len_large_u;
1925 	VM_SANITIZE_UT_SET(len_large_u, test_buf_size);
1926 	kr = vm_map_copyin(kernel_map, tagged_ptr_u, len_large_u, false, &copy);
1927 	T_EXPECT_EQ_INT(kr, KERN_NOT_SUPPORTED, "vm_map_copyin on 32K+1");
1928 
1929 	/* Clean up */
1930 	kmem_free(kernel_map, kern_addr, test_buf_size, KMF_TAG);
1931 	T_END;
1932 	*out = 1;
1933 #else /* !HAS_MTE */
1934 	/* Test is not supported */
1935 	*out = ENOTSUP;
1936 #endif /* HAS_MTE */
1937 	return 0;
1938 }
1939 SYSCTL_TEST_REGISTER(vm_map_copyio, vm_map_copyio_test);
1940 
1941 static int
vm_page_relocate_test(__unused int64_t in,int64_t * out)1942 vm_page_relocate_test(__unused int64_t in, int64_t *out)
1943 {
1944 #if HAS_MTE
1945 	vm_map_t map = current_map();
1946 
1947 	/* `in` will contain the address of the memory we have written to */
1948 	vm_map_entry_t entry = NULL;
1949 	vm_map_offset_t tagged_addr = (vm_map_offset_t)in;
1950 	vm_map_offset_t canonical_addr = vm_memtag_canonicalize(map, tagged_addr);
1951 
1952 	vm_page_t m = VM_PAGE_NULL;
1953 	vm_object_t object = VM_OBJECT_NULL;
1954 	ppnum_t old_phys_page = 0;
1955 	kern_return_t kr;
1956 	while (true) {
1957 		if (!pmap_find_phys(map->pmap, canonical_addr)) {
1958 			/*
1959 			 * There's no physical page associated with the memory;
1960 			 * we need to fault it in
1961 			 */
1962 			/* Fault in the page with the data we care about */
1963 			kr = vm_fault(
1964 				map,
1965 				tagged_addr,
1966 				VM_PROT_WRITE,
1967 				FALSE, /* change_wiring */
1968 				VM_KERN_MEMORY_NONE,
1969 				THREAD_INTERRUPTIBLE, /* interruptible */
1970 				NULL, /* caller pmap */
1971 				0);
1972 			T_EXPECT_EQ_INT(kr, 0, "vm_fault");
1973 		}
1974 
1975 		/* Look up page */
1976 		vm_map_lock(map);
1977 		bool result = vm_map_lookup_entry(map, canonical_addr, &entry);
1978 		T_ASSERT_EQ_INT(result, true, "vm_map_lookup_entry");
1979 		object = VME_OBJECT(entry);
1980 		T_ASSERT_NOTNULL(object, "vm object should not be null");
1981 		vm_object_lock(object);
1982 		/* There shouldn't be a shadow chain for MTE objects */
1983 		T_ASSERT_EQ_INT(object->shadowed, FALSE, "vm object should not have a shadow");
1984 		m = vm_page_lookup(
1985 			object,
1986 			(VME_OFFSET(entry) + (canonical_addr - entry->vme_start)));
1987 		old_phys_page = VM_PAGE_GET_PHYS_PAGE(m);
1988 		T_QUIET; T_ASSERT_NE_UINT(old_phys_page, 0, "physical page should not be 0");
1989 		T_LOG("old physical page: 0x%x, vm_page_t: 0x%llx", old_phys_page, m);
1990 		if (m != VM_PAGE_NULL) {
1991 			break;
1992 		}
1993 		vm_object_unlock(object);
1994 		vm_map_unlock(map);
1995 	}
1996 	vm_object_lock_assert_held(object);
1997 	int compressed_pages = 0;
1998 	vm_page_lock_queues();
1999 	kr = vm_page_relocate(m, &compressed_pages, VM_RELOCATE_REASON_CONTIGUOUS, NULL);
2000 	vm_page_unlock_queues();
2001 	T_EXPECT_EQ_INT(kr, 0, "vm_page_relocate");
2002 	vm_page_t new_m = vm_page_lookup(
2003 		object,
2004 		(VME_OFFSET(entry) + (canonical_addr - entry->vme_start)));
2005 	T_EXPECT_NOTNULL(new_m, "new VM page is not null");
2006 	ppnum_t new_phys_page = VM_PAGE_GET_PHYS_PAGE(new_m);
2007 	T_LOG("ppnum of relocated page: %u", new_phys_page);
2008 	T_EXPECT_NE_UINT(old_phys_page, new_phys_page,
2009 	    "old and new physical pages should be different");
2010 	vm_object_unlock(object);
2011 	vm_map_unlock(map);
2012 
2013 	/* There shouldn't be a PTE associated with addr at the moment */
2014 	ppnum_t phys_page = pmap_find_phys(map->pmap, canonical_addr);
2015 	T_EXPECT_EQ_UINT(phys_page, 0, "pmap_find_phys should return 0");
2016 
2017 	/* Kernel touches the page, faulting in new page if not already resident */
2018 	char c = 'b';
2019 	int result = copyout((void *)&c, (user_addr_t)tagged_addr, sizeof(c));
2020 	T_EXPECT_EQ_INT(result, 0, "copyout %c to 0x%llx", c, tagged_addr);
2021 	c = 'c';
2022 	result = copyout((void *)&c, (user_addr_t)(canonical_addr + MTE_GRANULE_SIZE), sizeof(c));
2023 	T_EXPECT_EQ_INT(result, 0, "copyout %c to 0x%llx", c, (canonical_addr + MTE_GRANULE_SIZE));
2024 
2025 	/* There should be a physical page now */
2026 	phys_page = pmap_find_phys(map->pmap, canonical_addr);
2027 	T_EXPECT_NE_UINT(phys_page, 0,
2028 	    "there's a PTE for 0x%llx after writing to it, phys_page: 0x%x",
2029 	    canonical_addr, phys_page);
2030 	*out = 1;
2031 #else /* !HAS_MTE */
2032 	/* Test is not supported */
2033 	*out = ENOTSUP;
2034 #endif /* HAS_MTE */
2035 	return 0;
2036 }
2037 SYSCTL_TEST_REGISTER(vm_page_relocate, vm_page_relocate_test);
2038 
2039 #define PAGE_SHIFT_4K 12
2040 #define PAGE_SHIFT_16K 14
2041 static int
vm_map_copy_entry_subrange_test(__unused int64_t in,int64_t * out)2042 vm_map_copy_entry_subrange_test(__unused int64_t in, int64_t *out)
2043 {
2044 	mach_vm_size_t size_4kb, size_16kb;
2045 	vm_map_t map_4k, map_16k;
2046 	mach_vm_address_t alloced_addr, mapped_addr;
2047 	mach_vm_size_t entry_size;
2048 	mach_port_t entry_handle;
2049 	mach_vm_size_t mapped_size;
2050 	vm_region_basic_info_data_64_t region_info;
2051 	mach_msg_type_number_t region_info_count;
2052 
2053 	kern_return_t kr;
2054 
2055 	size_4kb = 4 * 1024;
2056 	size_16kb = 16 * 1024;
2057 
2058 	map_4k = create_map(0, vm_compute_max_offset(true));
2059 	kr = vm_map_set_page_shift(map_4k, PAGE_SHIFT_4K);
2060 	map_16k = create_map(0, vm_compute_max_offset(true));
2061 	kr = vm_map_set_page_shift(map_16k, PAGE_SHIFT_16K);
2062 
2063 	/*
2064 	 * Test mapping a portion of a copy entry from a 4k map to a 16k one.
2065 	 * The result size should be aligned to the destination's page size (16k).
2066 	 */
2067 	// Get a copy entry to map into the system
2068 	kr = mach_vm_allocate(map_4k, &alloced_addr, size_16kb, VM_FLAGS_ANYWHERE);
2069 	assert(kr == KERN_SUCCESS);
2070 
2071 	entry_size = size_16kb;
2072 	kr = mach_make_memory_entry_64(map_4k, &entry_size, alloced_addr,
2073 	    MAP_MEM_VM_COPY | MAP_MEM_USE_DATA_ADDR | VM_PROT_DEFAULT,
2074 	    &entry_handle, MACH_PORT_NULL);
2075 	assert(kr == KERN_SUCCESS);
2076 	assert(entry_size == size_16kb);
2077 
2078 	// Attempt to map a portion of the entry into the 16k map
2079 	kr = mach_vm_map(map_16k, &mapped_addr, size_4kb, 0, VM_FLAGS_ANYWHERE,
2080 	    entry_handle, 0, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
2081 	    VM_INHERIT_DEFAULT);
2082 	assert(kr == KERN_SUCCESS);
2083 
2084 	// Ensure the entry is actually mapped whole
2085 	region_info_count = VM_REGION_BASIC_INFO_COUNT_64;
2086 	kr = mach_vm_region(map_16k, &mapped_addr, &mapped_size, VM_REGION_BASIC_INFO_64,
2087 	    (vm_region_info_t) &region_info, &region_info_count, NULL);
2088 	assert(kr == KERN_SUCCESS);
2089 	assert(mapped_size == entry_size);
2090 
2091 	// Cleanup
2092 	mach_memory_entry_port_release(entry_handle);
2093 	kr = mach_vm_deallocate(map_16k, mapped_addr, size_16kb);
2094 	assert(kr == KERN_SUCCESS);
2095 	kr = mach_vm_deallocate(map_4k, alloced_addr, size_16kb);
2096 	assert(kr == KERN_SUCCESS);
2097 	cleanup_map(&map_4k);
2098 	cleanup_map(&map_16k);
2099 
2100 	*out = 1;
2101 	return 0;
2102 }
2103 SYSCTL_TEST_REGISTER(vm_map_copy_entry_subrange, vm_map_copy_entry_subrange_test);
2104 
2105 
2106 static int
vm_memory_entry_map_size_null_test(__unused int64_t in,int64_t * out)2107 vm_memory_entry_map_size_null_test(__unused int64_t in, int64_t *out)
2108 {
2109 	mach_vm_size_t size_16kb, map_size;
2110 	vm_map_t map;
2111 
2112 	kern_return_t kr;
2113 
2114 	map = create_map(0, vm_compute_max_offset(true));
2115 	size_16kb = 16 * 1024;
2116 
2117 	map_size = 0xdeadbeef;
2118 	kr = mach_memory_entry_map_size(MACH_PORT_NULL, map, 0, size_16kb, &map_size);
2119 	assert(kr == KERN_INVALID_ARGUMENT);
2120 	assert(map_size == 0);
2121 
2122 	cleanup_map(&map);
2123 
2124 	*out = 1;
2125 	return 0;
2126 }
2127 SYSCTL_TEST_REGISTER(vm_memory_entry_map_size_null, vm_memory_entry_map_size_null_test);
2128 
2129 static int
vm_memory_entry_map_size_overflow_tests(__unused int64_t in,int64_t * out)2130 vm_memory_entry_map_size_overflow_tests(__unused int64_t in, int64_t *out)
2131 {
2132 	mach_vm_size_t size_16kb, entry_size, map_size;
2133 	vm_map_t map;
2134 	mach_port_t parent_handle, entry_handle;
2135 	mach_vm_address_t alloced_addr;
2136 	vm_map_offset_t entry_offset;
2137 	memory_object_offset_t maximum_offset;
2138 
2139 	kern_return_t kr;
2140 
2141 	size_16kb = 16 * 1024;
2142 	map = create_map(0, vm_compute_max_offset(true));
2143 	/*
2144 	 * (1) Attempt to overflow offset + mem_entry->offset
2145 	 */
2146 	// Setup - create an entry with nonzero offset
2147 	kr = mach_memory_object_memory_entry_64((host_t) 1, 1,
2148 	    size_16kb * 2, VM_PROT_DEFAULT, 0, &parent_handle);
2149 	assert(kr == KERN_SUCCESS);
2150 
2151 	entry_size = size_16kb;
2152 	kr = mach_make_memory_entry_64(map, &entry_size, size_16kb,
2153 	    VM_PROT_DEFAULT, &entry_handle, parent_handle);
2154 	assert(kr == KERN_SUCCESS);
2155 
2156 	// Pass in maximum offset to attempt overflow
2157 	maximum_offset = (memory_object_offset_t) -1;
2158 	kr = mach_memory_entry_map_size(entry_handle, map, maximum_offset, size_16kb,
2159 	    &map_size);
2160 	assert(kr == KERN_INVALID_ARGUMENT);
2161 
2162 	// Cleanup
2163 	mach_memory_entry_port_release(parent_handle);
2164 	mach_memory_entry_port_release(entry_handle);
2165 
2166 	/*
2167 	 * (2) Attempt to overflow offset + mem_entry->data_offset
2168 	 */
2169 	// Setup - create an entry with nonzero data_offset
2170 	kr = mach_vm_allocate(map, &alloced_addr, 2 * size_16kb, VM_FLAGS_ANYWHERE);
2171 	assert(kr == KERN_SUCCESS);
2172 
2173 	entry_size = size_16kb;
2174 	entry_offset = alloced_addr + (size_16kb / 2);
2175 	kr = mach_make_memory_entry_64(map, &entry_size, entry_offset,
2176 	    MAP_MEM_VM_COPY | MAP_MEM_USE_DATA_ADDR | VM_PROT_DEFAULT,
2177 	    &entry_handle, MACH_PORT_NULL);
2178 	assert(kr == KERN_SUCCESS);
2179 
2180 	// Pass in maximum offset to attempt overflow
2181 	kr = mach_memory_entry_map_size(entry_handle, map, maximum_offset, size_16kb,
2182 	    &map_size);
2183 	assert(kr == KERN_INVALID_ARGUMENT);
2184 
2185 	// Cleanup
2186 	mach_memory_entry_port_release(entry_handle);
2187 	kr = mach_vm_deallocate(map, alloced_addr, 2 * size_16kb);
2188 	assert(kr == KERN_SUCCESS);
2189 	cleanup_map(&map);
2190 
2191 	*out = 1;
2192 	return 0;
2193 }
2194 SYSCTL_TEST_REGISTER(vm_memory_entry_map_size_overflow, vm_memory_entry_map_size_overflow_tests);
2195 
2196 static int
vm_memory_entry_map_size_copy_tests(__unused int64_t in,int64_t * out)2197 vm_memory_entry_map_size_copy_tests(__unused int64_t in, int64_t *out)
2198 {
2199 	mach_vm_size_t size_2kb, size_4kb, size_16kb;
2200 	mach_vm_size_t entry_size_4k, entry_size_16k;
2201 	mach_vm_size_t map_size;
2202 	vm_map_t map_4k, map_16k;
2203 	mach_port_t entry_4k, entry_16k;
2204 	mach_vm_address_t alloced_addr_4k, alloced_addr_16k;
2205 
2206 	kern_return_t kr;
2207 
2208 	size_2kb = 2 * 1024;
2209 	size_4kb = 4 * 1024;
2210 	size_16kb = 16 * 1024;
2211 
2212 	/*
2213 	 * Setup - initialize maps and create copy entries for each
2214 	 */
2215 	// 4k map and entry
2216 	map_4k = create_map(0, vm_compute_max_offset(true));
2217 	kr = vm_map_set_page_shift(map_4k, PAGE_SHIFT_4K);
2218 	assert(kr == KERN_SUCCESS);
2219 
2220 	kr = mach_vm_allocate(map_4k, &alloced_addr_4k, size_16kb, VM_FLAGS_ANYWHERE);
2221 	assert(kr == KERN_SUCCESS);
2222 
2223 	entry_size_4k = size_16kb;
2224 	kr = mach_make_memory_entry_64(map_4k, &entry_size_4k, alloced_addr_4k,
2225 	    MAP_MEM_VM_COPY | VM_PROT_DEFAULT, &entry_4k, MACH_PORT_NULL);
2226 	assert(kr == KERN_SUCCESS);
2227 	assert(entry_size_4k == size_16kb);
2228 
2229 	// 16k map and entry
2230 	map_16k = create_map(0, vm_compute_max_offset(true));
2231 	kr = vm_map_set_page_shift(map_16k, PAGE_SHIFT_16K);
2232 	assert(kr == KERN_SUCCESS);
2233 
2234 	kr = mach_vm_allocate(map_16k, &alloced_addr_16k, size_16kb, VM_FLAGS_ANYWHERE);
2235 	assert(kr == KERN_SUCCESS);
2236 
2237 	entry_size_16k = size_16kb;
2238 	kr = mach_make_memory_entry_64(map_16k, &entry_size_16k, alloced_addr_16k,
2239 	    MAP_MEM_VM_COPY | VM_PROT_DEFAULT, &entry_16k, MACH_PORT_NULL);
2240 	assert(kr == KERN_SUCCESS);
2241 	assert(entry_size_16k == size_16kb);
2242 
2243 	/*
2244 	 * (1) Test 4k map with 4k entry and 16k map with 16k entry. Page-aligned
2245 	 * ranges should have no size adjustment.
2246 	 */
2247 	for (mach_vm_size_t i = 1; i <= 4; i++) {
2248 		kr = mach_memory_entry_map_size(entry_4k, map_4k, 0, i * size_4kb, &map_size);
2249 		assert(kr == KERN_SUCCESS);
2250 		assert(map_size == (i * size_4kb));
2251 	}
2252 	kr = mach_memory_entry_map_size(entry_16k, map_16k, 0, size_16kb, &map_size);
2253 	assert(kr == KERN_SUCCESS);
2254 	assert(map_size == size_16kb);
2255 
2256 	/*
2257 	 * (2) Test 4k map with 16k entry. Since we have a 4k map, we should be able
2258 	 * to map a 4k range of the entry, but to map a 2k range we will need to map
2259 	 * a full 4k page.
2260 	 */
2261 	kr = mach_memory_entry_map_size(entry_16k, map_4k, 0, size_16kb, &map_size);
2262 	assert(kr == KERN_SUCCESS);
2263 	assert(map_size == size_16kb);
2264 	kr = mach_memory_entry_map_size(entry_16k, map_4k, 0, size_4kb, &map_size);
2265 	assert(kr == KERN_SUCCESS);
2266 	assert(map_size == size_4kb);
2267 	kr = mach_memory_entry_map_size(entry_16k, map_4k, 0, size_2kb, &map_size);
2268 	assert(kr == KERN_SUCCESS);
2269 	assert(map_size == size_4kb);
2270 
2271 	/*
2272 	 * (3) Test 16k map with 4k entry. Since we have a 16k map, we will need to
2273 	 * map the whole 16kb memory entry even if a smaller range is requested.
2274 	 */
2275 	kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, size_16kb, &map_size);
2276 	assert(kr == KERN_SUCCESS);
2277 	assert(map_size == size_16kb);
2278 	kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, size_4kb, &map_size);
2279 	assert(kr == KERN_SUCCESS);
2280 	assert(map_size == size_16kb);
2281 	kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, size_2kb, &map_size);
2282 	assert(kr == KERN_SUCCESS);
2283 	assert(map_size == size_16kb);
2284 
2285 	/*
2286 	 * (4) Detect error in the case where the size requested is too large.
2287 	 */
2288 	map_size = 0xdeadbeef;
2289 	kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, 2 * size_16kb, &map_size);
2290 	assert(kr == KERN_INVALID_ARGUMENT);
2291 	assert(map_size == 0);
2292 
2293 	/*
2294 	 * Clean up memory entries, allocations, and maps
2295 	 */
2296 	mach_memory_entry_port_release(entry_4k);
2297 	mach_memory_entry_port_release(entry_16k);
2298 	kr = mach_vm_deallocate(map_4k, alloced_addr_4k, size_16kb);
2299 	assert(kr == KERN_SUCCESS);
2300 	kr = mach_vm_deallocate(map_16k, alloced_addr_16k, size_16kb);
2301 	assert(kr == KERN_SUCCESS);
2302 	cleanup_map(&map_4k);
2303 	cleanup_map(&map_16k);
2304 
2305 	*out = 1;
2306 	return 0;
2307 }
2308 SYSCTL_TEST_REGISTER(vm_memory_entry_map_size_copy, vm_memory_entry_map_size_copy_tests);
2309 
2310 static int
vm_memory_entry_parent_submap_tests(__unused int64_t in,int64_t * out)2311 vm_memory_entry_parent_submap_tests(__unused int64_t in, int64_t *out)
2312 {
2313 	vm_shared_region_t shared_region;
2314 	mach_port_t parent_handle, entry_handle;
2315 	vm_named_entry_t parent_entry;
2316 	mach_vm_size_t entry_size;
2317 	vm_prot_t vmflags;
2318 
2319 	kern_return_t kr;
2320 
2321 	/*
2322 	 * Use shared region to get a named_entry which refers to a submap
2323 	 */
2324 	shared_region = vm_shared_region_get(current_task());
2325 	parent_handle = shared_region->sr_mem_entry;
2326 	assert(parent_handle != NULL);
2327 	parent_entry = mach_memory_entry_from_port(parent_handle);
2328 	assert(parent_entry->is_sub_map);
2329 
2330 	/*
2331 	 * We should be able to create an entry using the submap entry as the parent
2332 	 */
2333 	entry_size = parent_entry->size;
2334 	vmflags = VM_PROT_DEFAULT;
2335 	kr = mach_make_memory_entry_64(VM_MAP_NULL, &entry_size, 0, vmflags,
2336 	    &entry_handle, parent_handle);
2337 	assert(kr == KERN_SUCCESS);
2338 	mach_memory_entry_port_release(entry_handle);
2339 
2340 	/*
2341 	 * Should fail if using mach_make_memory_entry_mem_only since the parent
2342 	 * entry is not an object
2343 	 */
2344 	vmflags |= MAP_MEM_ONLY;
2345 	kr = mach_make_memory_entry_64(VM_MAP_NULL, &entry_size, 0, vmflags,
2346 	    &entry_handle, parent_handle);
2347 	assert(kr == KERN_INVALID_ARGUMENT);
2348 
2349 	/*
2350 	 * Cleanup
2351 	 */
2352 	vm_shared_region_deallocate(shared_region);
2353 
2354 	*out = 1;
2355 	return 0;
2356 }
2357 SYSCTL_TEST_REGISTER(vm_memory_entry_parent_submap, vm_memory_entry_parent_submap_tests);
2358 
2359 static int
vm_cpu_map_pageout_test(int64_t in,int64_t * out)2360 vm_cpu_map_pageout_test(int64_t in, int64_t *out)
2361 {
2362 #if HAS_MTE
2363 	/*
2364 	 * Since we now allow untagged kernel mappings of tagged user data, we want
2365 	 * to be sure that the underlying physical page is always handled correctly.
2366 	 *
2367 	 * The following sequence may be of particular concern:
2368 	 * 1. We create & populate an MTE userspace mapping
2369 	 * 2. We create an untagged kernel mapping of the user tagged memory
2370 	 * 3. The page is paged out
2371 	 * 4. We fault in the kernel mapping (without first faulting on the user mapping)
2372 	 *
2373 	 * We want to be certain that in this case, we set the correct (MTE-enabled)
2374 	 * cache attributes on the underlying physical page when we fault on our
2375 	 * (non-MTE-enabled) kernel mapping.
2376 	 */
2377 
2378 	/* Get the tagged userspace mapping from the userspace side of the test */
2379 	struct {
2380 		mach_vm_size_t size;
2381 		char *ptr;
2382 		char value;
2383 	} args;
2384 	kern_return_t kr = copyin(in, &args, sizeof(args));
2385 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "copyin arguments from userspace");
2386 
2387 	/*
2388 	 * Create an untagged kernel mapping of the user tagged memory.
2389 	 */
2390 	memory_object_size_ut size = vm_sanitize_wrap_size(args.size);
2391 	memory_object_offset_t offset = (memory_object_offset_t)vm_memtag_canonicalize_user((vm_map_address_t)args.ptr);
2392 	/*
2393 	 * This path is specifically intended for IOMD::map(), so we pretend to be
2394 	 * an IOKit caller to get the correct security policies.
2395 	 */
2396 	vm_named_entry_kernel_flags_t vmne_kflags = { .vmnekf_is_iokit = true };
2397 	ipc_port_t memory_entry;
2398 	kr = mach_make_memory_entry_internal(current_map(), &size, offset,
2399 	    MAP_MEM_VM_SHARE | VM_PROT_DEFAULT, vmne_kflags, &memory_entry,
2400 	    /* parent = */ MACH_PORT_NULL);
2401 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "make memory entry from tagged user memory");
2402 
2403 	mach_vm_offset_t kernel_address = 0;
2404 	kr = mach_vm_map_kernel(kernel_map, (mach_vm_offset_ut*)&kernel_address,
2405 	    args.size, /* mask = */ 0,
2406 	    /* IOMD mappings of user memory are bucketed into KMEM_RANGE_ID_DATA */
2407 	    VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmkf_range_id = KMEM_RANGE_ID_DATA, .vmf_mte = true), memory_entry,
2408 	    /* offset = */ 0, /* copy = */ false, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
2409 	    VM_INHERIT_DEFAULT);
2410 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "remap userspace memory into kernel");
2411 
2412 	/* Validate that the mapping is correct and untagged: */
2413 	assert(extract_mte_tag((void*)kernel_address) == 0xF);
2414 	assert(extract_mte_tag(args.ptr) != 0xF);
2415 	char *kernel_ptr = (char*)kernel_address;
2416 	assert(kernel_ptr[0] == args.value);
2417 	kernel_ptr[0]++;
2418 
2419 	/* Force pageout */
2420 	kr = vm_map_behavior_set(kernel_map, kernel_address,
2421 	    kernel_address + args.size, VM_BEHAVIOR_PAGEOUT);
2422 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "force pageout of tagged mapping");
2423 
2424 	/*
2425 	 * Page in kernel mapping and validate cache attributes. Wire the memory
2426 	 * first to ensure the mapping doesn't go away while we're doing checks on
2427 	 * it.
2428 	 */
2429 	kr = vm_map_wire_kernel(kernel_map, kernel_address, kernel_address + args.size,
2430 	    VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_DIAG, false);
2431 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "wire user-tagged memory");
2432 	kernel_ptr[0]++;
2433 	ppnum_t pn = vm_map_get_phys_page(kernel_map, kernel_address);
2434 	assert(pn);
2435 	T_ASSERT(pmap_is_tagged_page(pn), "page has MTE cache attribute");
2436 	T_ASSERT(!pmap_is_tagged_mapping(vm_map_pmap(kernel_map), kernel_address), "kernel mapping is untagged");
2437 
2438 	/* Now, fault in the user mapping... */
2439 	kr = vm_fault(current_map(), offset, VM_PROT_READ | VM_PROT_WRITE,
2440 	    /* change_wiring */ FALSE, VM_KERN_MEMORY_NONE, THREAD_ABORTSAFE,
2441 	    /* caller_pmap */ NULL, /* caller_pmap_addr */ 0);
2442 
2443 	/* ... and make sure everything still looks good */
2444 	T_ASSERT(pmap_is_tagged_page(pn), "page retains MTE cache attribute");
2445 	T_ASSERT(!pmap_is_tagged_mapping(vm_map_pmap(kernel_map), kernel_address), "kernel mapping remains untagged");
2446 	T_ASSERT(pmap_is_tagged_mapping(vm_map_pmap(current_map()), offset), "user mapping is tagged");
2447 
2448 	/* Cleanup */
2449 	kr = vm_map_unwire(kernel_map, kernel_address, kernel_address + args.size, false);
2450 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "unwire user-tagged memory");
2451 	kr = mach_vm_deallocate(kernel_map, kernel_address, args.size);
2452 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "remove kernel mapping");
2453 	*out = 1;
2454 #else /* HAS_MTE */
2455 	/* Test is not supported */
2456 	(void)in;
2457 	*out = ENOTSUP;
2458 #endif /* HAS_MTE */
2459 	return 0;
2460 }
2461 SYSCTL_TEST_REGISTER(vm_cpu_map_pageout, vm_cpu_map_pageout_test);
2462 
2463 static int
vm_get_wimg_mode(int64_t in,int64_t * out)2464 vm_get_wimg_mode(int64_t in, int64_t *out)
2465 {
2466 	mach_vm_offset_t addr = (mach_vm_offset_t)in;
2467 	vm_map_entry_t entry;
2468 	vm_map_t map = current_map();
2469 	vm_map_lock_read(map);
2470 	bool map_contains_addr = vm_map_lookup_entry(map, addr, &entry);
2471 	if (!map_contains_addr) {
2472 		vm_map_unlock_read(map);
2473 		return EINVAL;
2474 	}
2475 
2476 	if (entry->is_sub_map) {
2477 		vm_map_unlock_read(map);
2478 		return ENOTSUP;
2479 	}
2480 
2481 	*out = 0;
2482 	vm_object_t obj = VME_OBJECT(entry);
2483 	if (obj != VM_OBJECT_NULL) {
2484 		*out = obj->wimg_bits;
2485 	}
2486 
2487 	vm_map_unlock_read(map);
2488 	return 0;
2489 }
2490 SYSCTL_TEST_REGISTER(vm_get_wimg_mode, vm_get_wimg_mode);
2491 
2492 /*
2493  * Make sure copies from 4k->16k maps doesn't lead to address space holes
2494  */
2495 static int
vm_map_4k_16k_test(int64_t in,int64_t * out)2496 vm_map_4k_16k_test(int64_t in, int64_t *out)
2497 {
2498 #if PMAP_CREATE_FORCE_4K_PAGES
2499 	const mach_vm_size_t alloc_size = (36 * 1024);
2500 	assert((alloc_size % FOURK_PAGE_SHIFT) == 0);
2501 	assert((alloc_size % SIXTEENK_PAGE_SHIFT) != 0);
2502 	assert(alloc_size > msg_ool_size_small); // avoid kernel buffer copy optimization
2503 
2504 	/* initialize maps */
2505 	pmap_t pmap_4k, pmap_16k;
2506 	vm_map_t map_4k, map_16k;
2507 	pmap_4k = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT | PMAP_CREATE_FORCE_4K_PAGES);
2508 	assert(pmap_4k);
2509 	map_4k = vm_map_create_options(pmap_4k, MACH_VM_MIN_ADDRESS, MACH_VM_MAX_ADDRESS, VM_MAP_CREATE_PAGEABLE);
2510 	assert(map_4k != VM_MAP_NULL);
2511 	vm_map_set_page_shift(map_4k, FOURK_PAGE_SHIFT);
2512 
2513 	pmap_16k = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT);
2514 	assert(pmap_16k);
2515 	map_16k = vm_map_create_options(pmap_16k, MACH_VM_MIN_ADDRESS, MACH_VM_MAX_ADDRESS, VM_MAP_CREATE_PAGEABLE);
2516 	assert(map_16k != VM_MAP_NULL);
2517 	assert(VM_MAP_PAGE_SHIFT(map_16k) == SIXTEENK_PAGE_SHIFT);
2518 
2519 	/* create mappings in 4k map */
2520 	/* allocate space */
2521 	vm_address_t address_4k;
2522 	kern_return_t kr = vm_allocate_external(map_4k, &address_4k, alloc_size, VM_FLAGS_ANYWHERE);
2523 	assert3u(kr, ==, KERN_SUCCESS); /* reserve space for 4k entries in 4k map */
2524 
2525 	/* overwrite with a bunch of 4k entries */
2526 	for (mach_vm_address_t addr = address_4k; addr < (address_4k + alloc_size); addr += FOURK_PAGE_SIZE) {
2527 		/* allocate 128MB objects, so that they don't get coalesced, preventing entry simplification */
2528 		vm_object_t object = vm_object_allocate(ANON_CHUNK_SIZE, map_4k->serial_id);
2529 		kr = vm_map_enter(map_4k, &addr, FOURK_PAGE_SIZE, /* mask */ 0,
2530 		    VM_MAP_KERNEL_FLAGS_FIXED(.vmf_overwrite = TRUE), object, /* offset */ 0,
2531 		    /* copy */ false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
2532 		assert3u(kr, ==, KERN_SUCCESS); /* overwrite the 4k chunk at addr with its own entry */
2533 	}
2534 
2535 	/* set up vm_map_copy_t */
2536 	vm_map_copy_t copy;
2537 	kr = vm_map_copyin(map_4k, address_4k, alloc_size, true, &copy);
2538 	assert3u(kr, ==, KERN_SUCCESS); /* copyin from 4k map succeeds */
2539 
2540 	/* write out the vm_map_copy_t to the 16k map */
2541 	vm_address_t address_16k;
2542 	if (in == 0) {
2543 		/* vm_map_copyout */
2544 		vm_map_address_t tmp_address;
2545 		kr = vm_map_copyout(map_16k, &tmp_address, copy);
2546 		assert3u(kr, ==, KERN_SUCCESS); /* copyout into 16k map suceeds */
2547 		address_16k = (vm_address_t)tmp_address;
2548 	} else if (in == 1) {
2549 		/* vm_map_copy_overwrite */
2550 		/* reserve space */
2551 		kr = vm_allocate_external(map_16k, &address_16k, alloc_size, VM_FLAGS_ANYWHERE);
2552 		assert3u(kr, ==, KERN_SUCCESS); /* reserve space in 16k map succeeds */
2553 
2554 		/* do the overwrite */
2555 		kr = vm_map_copy_overwrite(map_16k, address_16k, copy, alloc_size,
2556 #if HAS_MTE
2557 		    false,
2558 #endif
2559 		    true);
2560 		assert3u(kr, ==, KERN_SUCCESS); /* copy_overwrite into 16k map succeds */
2561 	} else {
2562 		panic("invalid vm_map_4k_16k_test variant: %lld", in);
2563 	}
2564 
2565 	/* validate that everything is combined into one large 16k-aligned entry */
2566 	mach_vm_size_t expected_size = VM_MAP_ROUND_PAGE(alloc_size, SIXTEENK_PAGE_MASK);
2567 	vm_map_lock_read(map_16k);
2568 	vm_map_entry_t entry;
2569 	bool address_in_map = vm_map_lookup_entry(map_16k, address_16k, &entry);
2570 	assert(address_in_map); /* address_16k found in map_16k */
2571 	assert3u((entry->vme_end - entry->vme_start), ==, expected_size); /* 4k entries combined into a single 16k entry */
2572 	vm_map_unlock_read(map_16k);
2573 #else /* !PMAP_CREATE_FORCE_4K_PAGES */
2574 	(void)in;
2575 #endif /* !PMAP_CREATE_FORCE_4K_PAGES */
2576 	*out = 1;
2577 	return 0;
2578 }
2579 SYSCTL_TEST_REGISTER(vm_map_4k_16k, vm_map_4k_16k_test);
2580 
2581 static int
vm_vector_upl_test(int64_t in,int64_t * out)2582 vm_vector_upl_test(int64_t in, int64_t *out)
2583 {
2584 	extern upl_t vector_upl_create(vm_offset_t, uint32_t);
2585 	extern boolean_t vector_upl_set_subupl(upl_t, upl_t, uint32_t);
2586 
2587 	upl_t vector_upl = NULL;
2588 	vm_address_t kva = 0;
2589 
2590 	*out = 0;
2591 
2592 	struct {
2593 		uint64_t iov;
2594 		uint16_t iovcnt;
2595 	} args;
2596 
2597 	struct {
2598 		uint64_t base;
2599 		uint32_t len;
2600 	} *iov;
2601 
2602 	size_t iovsize = 0;
2603 	iov = NULL;
2604 
2605 	int error = copyin((user_addr_t)in, &args, sizeof(args));
2606 	if ((error != 0) || (args.iovcnt == 0)) {
2607 		goto vector_upl_test_done;
2608 	}
2609 
2610 	iovsize = sizeof(*iov) * args.iovcnt;
2611 
2612 	iov = kalloc_data(iovsize, Z_WAITOK_ZERO);
2613 	if (iov == NULL) {
2614 		error = ENOMEM;
2615 		goto vector_upl_test_done;
2616 	}
2617 
2618 	error = copyin((user_addr_t)args.iov, iov, iovsize);
2619 	if (error != 0) {
2620 		goto vector_upl_test_done;
2621 	}
2622 
2623 	vector_upl = vector_upl_create(iov->base & PAGE_MASK, args.iovcnt);
2624 	upl_size_t vector_upl_size = 0;
2625 
2626 	/* Create each sub-UPL and append it to the top-level vector UPL. */
2627 	for (uint16_t i = 0; i < args.iovcnt; i++) {
2628 		upl_t subupl;
2629 		upl_size_t upl_size = iov[i].len;
2630 		unsigned int upl_count = 0;
2631 		upl_control_flags_t upl_flags = UPL_SET_IO_WIRE | UPL_SET_LITE | UPL_WILL_MODIFY | UPL_SET_INTERNAL;
2632 		kern_return_t kr = vm_map_create_upl(current_map(),
2633 		    (vm_map_offset_t)iov[i].base,
2634 		    &upl_size,
2635 		    &subupl,
2636 		    NULL,
2637 		    &upl_count,
2638 		    &upl_flags,
2639 		    VM_KERN_MEMORY_DIAG);
2640 		if (kr != KERN_SUCCESS) {
2641 			printf("vm_map_create_upl[%d](%p, 0x%lx) returned 0x%x\n",
2642 			    (int)i, (void*)iov[i].base, (unsigned long)iov[i].len, kr);
2643 			error = EIO;
2644 			goto vector_upl_test_done;
2645 		}
2646 		/* This effectively transfers our reference to subupl over to vector_upl. */
2647 		vector_upl_set_subupl(vector_upl, subupl, upl_size);
2648 		vector_upl_set_iostate(vector_upl, subupl, vector_upl_size, upl_size);
2649 		vector_upl_size += upl_size;
2650 	}
2651 
2652 	/* Map the vector UPL as a single KVA region and modify the page contents by adding 1 to each char. */
2653 	kern_return_t kr = vm_upl_map(kernel_map, vector_upl, &kva);
2654 	if (kr != KERN_SUCCESS) {
2655 		error = ENOMEM;
2656 		goto vector_upl_test_done;
2657 	}
2658 
2659 	char *buf = (char*)kva;
2660 	for (upl_size_t i = 0; i < vector_upl_size; i++) {
2661 		buf[i] = buf[i] + 1;
2662 	}
2663 	*out = (int64_t)vector_upl_size;
2664 
2665 vector_upl_test_done:
2666 
2667 	if (kva != 0) {
2668 		vm_upl_unmap(kernel_map, vector_upl);
2669 	}
2670 
2671 	if (vector_upl != NULL) {
2672 		/* Committing the vector UPL will release and deallocate each of its sub-UPLs. */
2673 		upl_commit(vector_upl, NULL, 0);
2674 		upl_deallocate(vector_upl);
2675 	}
2676 
2677 	if (iov != NULL) {
2678 		kfree_data(iov, iovsize);
2679 	}
2680 
2681 	return error;
2682 }
2683 SYSCTL_TEST_REGISTER(vm_vector_upl, vm_vector_upl_test);
2684 
2685 /*
2686  * Test that wiring copy delay memory pushes pages to its copy object
2687  */
2688 static int
vm_map_wire_copy_delay_memory_test(__unused int64_t in,int64_t * out)2689 vm_map_wire_copy_delay_memory_test(__unused int64_t in, int64_t *out)
2690 {
2691 	kern_return_t kr;
2692 	vm_map_t map;
2693 	mach_vm_address_t address_a, address_b, address_c;
2694 	vm_prot_t cur_prot, max_prot;
2695 	vm_map_entry_t entry;
2696 	vm_object_t object;
2697 	vm_page_t m;
2698 	bool result;
2699 
2700 	T_BEGIN("vm_map_wire_copy_delay_memory_test");
2701 	map = create_map(0x100000000ULL, 0x200000000ULL);
2702 
2703 	address_a = 0;
2704 	kr = mach_vm_allocate(
2705 		map,
2706 		&address_a,
2707 		/* size */ PAGE_SIZE,
2708 		VM_FLAGS_ANYWHERE);
2709 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "mach_vm_allocate A");
2710 
2711 	address_b = 0;
2712 	kr = mach_vm_remap(
2713 		map,
2714 		&address_b,
2715 		/* size */ PAGE_SIZE,
2716 		/* mask */ 0,
2717 		VM_FLAGS_ANYWHERE,
2718 		map,
2719 		address_a,
2720 		/* copy */ FALSE,
2721 		&cur_prot,
2722 		&max_prot,
2723 		VM_INHERIT_NONE);
2724 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "mach_vm_remap A->B");
2725 
2726 	address_c = 0;
2727 	kr = mach_vm_remap(
2728 		map,
2729 		&address_c,
2730 		/* size */ PAGE_SIZE,
2731 		/* mask */ 0,
2732 		VM_FLAGS_ANYWHERE,
2733 		map,
2734 		address_b,
2735 		/* copy */ TRUE,
2736 		&cur_prot,
2737 		&max_prot,
2738 		VM_INHERIT_NONE);
2739 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "mach_vm_remap B->C");
2740 
2741 	kr = mach_vm_protect(
2742 		map,
2743 		address_c,
2744 		/* size */ PAGE_SIZE,
2745 		/* set_max */ FALSE,
2746 		VM_PROT_READ);
2747 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "mach_vm_protect C");
2748 
2749 	kr = vm_map_wire_kernel(
2750 		map,
2751 		/* begin */ address_b,
2752 		/* end */ address_b + PAGE_SIZE,
2753 		VM_PROT_NONE,
2754 		VM_KERN_MEMORY_OSFMK,
2755 		false);
2756 	T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "vm_map_wire_kernel B");
2757 
2758 	vm_map_lock(map);
2759 	result = vm_map_lookup_entry(map, address_c, &entry);
2760 	T_ASSERT_EQ_INT(result, true, "vm_map_lookup_entry");
2761 
2762 	object = VME_OBJECT(entry);
2763 	T_ASSERT_NOTNULL(object, "C's object should not be null");
2764 	vm_object_lock(object);
2765 
2766 	m = vm_page_lookup(object, /* offset */ 0);
2767 	T_ASSERT_NOTNULL(m, "C should have a page pushed to it");
2768 
2769 	/* cleanup */
2770 	vm_object_unlock(object);
2771 	vm_map_unlock(map);
2772 	cleanup_map(&map);
2773 
2774 	T_END;
2775 	*out = 1;
2776 	return 0;
2777 }
2778 SYSCTL_TEST_REGISTER(vm_map_wire_copy_delay_memory, vm_map_wire_copy_delay_memory_test);
2779 
2780 #if HAS_MTE
2781 
2782 static void
create_two_mte_maps(vm_map_t * out_mte_map1,vm_map_t * out_mte_map2)2783 create_two_mte_maps(vm_map_t* out_mte_map1, vm_map_t* out_mte_map2)
2784 {
2785 	*out_mte_map1 = create_map(MACH_VM_MIN_ADDRESS, MACH_VM_MAX_ADDRESS);
2786 	vm_map_set_sec_enabled(*out_mte_map1);
2787 	*out_mte_map2 = create_map(MACH_VM_MIN_ADDRESS, MACH_VM_MAX_ADDRESS);
2788 	vm_map_set_sec_enabled(*out_mte_map2);
2789 
2790 	/* And the second map receives a new ID */
2791 	if ((*out_mte_map1)->serial_id == (*out_mte_map2)->serial_id) {
2792 		panic("Expected each map to receive a new ID");
2793 	}
2794 }
2795 
2796 static void
create_mte_and_non_mte_map(vm_map_t * out_mte_map,vm_map_t * out_non_mte_map)2797 create_mte_and_non_mte_map(vm_map_t* out_mte_map, vm_map_t* out_non_mte_map)
2798 {
2799 	*out_mte_map = create_map(MACH_VM_MIN_ADDRESS, MACH_VM_MAX_ADDRESS);
2800 	vm_map_set_sec_enabled(*out_mte_map);
2801 	*out_non_mte_map = create_map(MACH_VM_MIN_ADDRESS, MACH_VM_MAX_ADDRESS);
2802 
2803 	/* And the second map receives a new ID */
2804 	if ((*out_mte_map)->serial_id == (*out_non_mte_map)->serial_id) {
2805 		panic("Expected each map to receive a new ID");
2806 	}
2807 }
2808 
2809 static vm_object_t
create_mte_vm_object(vm_object_size_t size,vm_map_serial_t provenance)2810 create_mte_vm_object(vm_object_size_t size, vm_map_serial_t provenance)
2811 {
2812 	vm_object_t mte_object = vm_object_allocate(size, provenance);
2813 	assert(mte_object != VM_OBJECT_NULL);
2814 	mte_object->wimg_bits = VM_WIMG_MTE;
2815 	/* Specify the expected copy strategy for MTE objects */
2816 	mte_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
2817 	return mte_object;
2818 }
2819 
2820 static vm_object_t
vm_object_for_address(vm_map_t map,vm_map_offset_t address)2821 vm_object_for_address(vm_map_t map, vm_map_offset_t address)
2822 {
2823 	vm_map_entry_t map_entry;
2824 	bool result = vm_map_lookup_entry(map, address, &map_entry);
2825 	vm_object_t object = VME_OBJECT(map_entry);
2826 	assert(result && object);
2827 	return object;
2828 }
2829 
2830 static vm_map_offset_t
map_object_and_expect_mte(vm_map_t map,vm_object_t obj)2831 map_object_and_expect_mte(vm_map_t map, vm_object_t obj)
2832 {
2833 	vm_map_offset_t mapped_address;
2834 	kern_return_t kr = vm_map_enter(map, &mapped_address, obj->vo_size, 0,
2835 	    /* We want the object to be mapped as MTE-enabled */
2836 	    VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmf_mte = true),
2837 	    obj, 0, false,
2838 	    VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
2839 	assert(kr == KERN_SUCCESS);
2840 
2841 	/* And the entry is MTE-enabled as expected */
2842 	assert(vm_object_is_mte_mappable(vm_object_for_address(map, mapped_address)));
2843 
2844 	return mapped_address;
2845 }
2846 
2847 static vm_object_t
expect_object_at_address_to_be_mapped_with_mte_state(vm_map_t map,vm_map_offset_t address,bool expect_mapping_to_be_mte_enabled)2848 expect_object_at_address_to_be_mapped_with_mte_state(
2849 	vm_map_t map,
2850 	vm_map_offset_t address,
2851 	bool expect_mapping_to_be_mte_enabled
2852 	)
2853 {
2854 	vm_object_t alias_object = vm_object_for_address(map, address);
2855 	ppnum_t pn = vm_map_get_phys_page(map, address);
2856 	T_ASSERT(pn != 0, "Expected a non-zero page number");
2857 	/* The page itself should be tagged */
2858 	assert(vm_object_is_mte_mappable(alias_object));
2859 	T_ASSERT(pmap_is_tagged_page(pn), "Expected backing page to be tagged");
2860 
2861 	if (expect_mapping_to_be_mte_enabled) {
2862 		/* And our specific mapping should be tagged */
2863 		T_ASSERT(pmap_is_tagged_mapping(vm_map_pmap(map), address), "Expected alias mapping to be tagged");
2864 	} else {
2865 		/* But our mapping should not be tagged */
2866 		T_ASSERT(!pmap_is_tagged_mapping(vm_map_pmap(map), address), "Expected alias mapping to be untagged");
2867 	}
2868 
2869 	return alias_object;
2870 }
2871 
2872 static vm_object_t
expect_object_at_address_to_be_mte_mapped(vm_map_t map,vm_map_offset_t address)2873 expect_object_at_address_to_be_mte_mapped(
2874 	vm_map_t map,
2875 	vm_map_offset_t address)
2876 {
2877 	return expect_object_at_address_to_be_mapped_with_mte_state(
2878 		map,
2879 		address,
2880 		true);
2881 }
2882 
2883 static void
share_object_and_expect_mapped_with_mte_state(vm_map_t src_map,vm_map_t dst_map,vm_map_offset_t src_address,vm_object_t obj,vm_map_offset_t * out_dst_address,vm_object_t * out_dst_object,bool expect_mapping_to_be_mte_enabled)2884 share_object_and_expect_mapped_with_mte_state(
2885 	vm_map_t src_map,
2886 	vm_map_t dst_map,
2887 	vm_map_offset_t src_address,
2888 	vm_object_t obj,
2889 	vm_map_offset_t* out_dst_address,
2890 	vm_object_t* out_dst_object,
2891 	bool expect_mapping_to_be_mte_enabled)
2892 {
2893 	assert(out_dst_address && out_dst_object);
2894 
2895 	/* Share the object into the target map */
2896 	vm_map_offset_t alias_mapped_address = 0;
2897 	vm_prot_t cur_prot = VM_PROT_DEFAULT;
2898 	vm_prot_t max_prot = VM_PROT_DEFAULT;
2899 	kern_return_t kr = vm_map_remap(
2900 		dst_map,
2901 		vm_sanitize_wrap_addr_ref(&alias_mapped_address),
2902 		obj->vo_size,
2903 		0,
2904 		/* The object may be MTE enabled */
2905 		VM_MAP_KERNEL_FLAGS_ANYWHERE(.vmf_mte = true),
2906 		src_map,
2907 		src_address,
2908 		false,
2909 		vm_sanitize_wrap_prot_ref(&cur_prot),
2910 		vm_sanitize_wrap_prot_ref(&max_prot),
2911 		VM_INHERIT_DEFAULT
2912 		);
2913 	assert(kr == KERN_SUCCESS);
2914 
2915 	/* Fault in the object so we can inspect the pmap state */
2916 	kr = vm_fault(
2917 		dst_map,
2918 		alias_mapped_address,
2919 		VM_PROT_READ,
2920 		false,
2921 		VM_KERN_MEMORY_NONE,
2922 		THREAD_UNINT,
2923 		NULL, 0);
2924 	assert(kr == KERN_SUCCESS);
2925 
2926 	vm_object_t alias_object = expect_object_at_address_to_be_mapped_with_mte_state(
2927 		dst_map,
2928 		alias_mapped_address,
2929 		expect_mapping_to_be_mte_enabled);
2930 
2931 	*out_dst_address = alias_mapped_address;
2932 	*out_dst_object = alias_object;
2933 }
2934 
2935 static void
share_object_and_expect_non_mte(vm_map_t src_map,vm_map_t dst_map,vm_map_offset_t src_address,vm_object_t obj,vm_map_offset_t * out_dst_address,vm_object_t * out_dst_object)2936 share_object_and_expect_non_mte(
2937 	vm_map_t src_map,
2938 	vm_map_t dst_map,
2939 	vm_map_offset_t src_address,
2940 	vm_object_t obj,
2941 	vm_map_offset_t* out_dst_address,
2942 	vm_object_t* out_dst_object
2943 	)
2944 {
2945 	share_object_and_expect_mapped_with_mte_state(
2946 		src_map,
2947 		dst_map,
2948 		src_address,
2949 		obj,
2950 		out_dst_address,
2951 		out_dst_object,
2952 		false);
2953 }
2954 
2955 static void
share_object_and_expect_mte(vm_map_t src_map,vm_map_t dst_map,vm_map_offset_t src_address,vm_object_t obj,vm_map_offset_t * out_dst_address,vm_object_t * out_dst_object)2956 share_object_and_expect_mte(
2957 	vm_map_t src_map,
2958 	vm_map_t dst_map,
2959 	vm_map_offset_t src_address,
2960 	vm_object_t obj,
2961 	vm_map_offset_t* out_dst_address,
2962 	vm_object_t* out_dst_object
2963 	)
2964 {
2965 	share_object_and_expect_mapped_with_mte_state(
2966 		src_map,
2967 		dst_map,
2968 		src_address,
2969 		obj,
2970 		out_dst_address,
2971 		out_dst_object,
2972 		true);
2973 }
2974 
2975 static int
vm_map_id_fork_test(__unused int64_t in,int64_t * out)2976 vm_map_id_fork_test(__unused int64_t in, int64_t *out)
2977 {
2978 	/* Given a map */
2979 	vm_map_t parent_map = create_map(MACH_VM_MIN_ADDRESS, MACH_VM_MAX_ADDRESS);
2980 
2981 	/* When we fork it into a new map */
2982 	ledger_t map_ledger = parent_map->pmap->ledger;
2983 	vm_map_t child_map = vm_map_fork(map_ledger, parent_map, 0);
2984 
2985 	/* Then the forked map shares the same ID as its parent map */
2986 	if (parent_map->serial_id != child_map->serial_id) {
2987 		panic("Expected a forked map to share its parent's ID");
2988 	}
2989 
2990 	/* Cleanup */
2991 	cleanup_map(&child_map);
2992 	cleanup_map(&parent_map);
2993 
2994 	*out = 1;
2995 	return 0;
2996 }
2997 SYSCTL_TEST_REGISTER(vm_map_id_fork, vm_map_id_fork_test);
2998 
2999 static int
vm_map_alias_mte_mapping_in_other_non_mte_map_test(__unused int64_t in,int64_t * out)3000 vm_map_alias_mte_mapping_in_other_non_mte_map_test(__unused int64_t in, int64_t *out)
3001 {
3002 	/* Given an MTE map and a non-MTE map */
3003 	vm_map_t mte_map, non_mte_map;
3004 	create_mte_and_non_mte_map(&mte_map, &non_mte_map);
3005 
3006 	/* And an MTE-enabled object in the MTE map */
3007 	vm_object_t mte_object = create_mte_vm_object(PAGE_SIZE, mte_map->serial_id);
3008 	vm_map_offset_t mte_map_mapped_address = map_object_and_expect_mte(mte_map, mte_object);
3009 
3010 	/* When the mapping is entered into a non-MTE map */
3011 	/* Then the object has been entered as non-MTE in the non-MTE map */
3012 	vm_map_offset_t alias_address;
3013 	vm_object_t alias_object;
3014 	share_object_and_expect_non_mte(mte_map, non_mte_map, mte_map_mapped_address, mte_object, &alias_address, &alias_object);
3015 
3016 	/* And when we remap again back into the original map */
3017 	/* Then the object has been entered as MTE, because it went back to its original map */
3018 	vm_map_offset_t remapped_address;
3019 	vm_object_t remapped_object;
3020 	share_object_and_expect_mte(non_mte_map, mte_map, alias_address, alias_object, &remapped_address, &remapped_object);
3021 
3022 	/* Cleanup */
3023 	cleanup_map(&non_mte_map);
3024 	cleanup_map(&mte_map);
3025 
3026 	*out = 1;
3027 	return 0;
3028 }
3029 SYSCTL_TEST_REGISTER(vm_map_alias_mte_mapping_in_other_non_mte_map, vm_map_alias_mte_mapping_in_other_non_mte_map_test);
3030 
3031 static int
vm_map_alias_mte_mapping_in_other_mte_map_test(__unused int64_t in,int64_t * out)3032 vm_map_alias_mte_mapping_in_other_mte_map_test(__unused int64_t in, int64_t *out)
3033 {
3034 	/* Given two MTE maps */
3035 	vm_map_t mte_map1, mte_map2;
3036 	create_two_mte_maps(&mte_map1, &mte_map2);
3037 
3038 	/* And an MTE-enabled object in the first map */
3039 	vm_object_t mte_object = create_mte_vm_object(PAGE_SIZE, mte_map1->serial_id);
3040 	vm_map_offset_t mte_map1_mapped_address = map_object_and_expect_mte(mte_map1, mte_object);
3041 
3042 	/* When the mapping is entered into the second MTE map */
3043 	/* Then the object has been entered as non-MTE (because the MTE state of the destination map doesn't matter) */
3044 	vm_map_offset_t alias_address;
3045 	vm_object_t alias_object;
3046 	share_object_and_expect_non_mte(
3047 		mte_map1,
3048 		mte_map2,
3049 		mte_map1_mapped_address,
3050 		mte_object,
3051 		&alias_address,
3052 		&alias_object);
3053 
3054 	/* And when we remap again back into the original map */
3055 	/* Then the object has been entered as MTE, because it went back to its original map */
3056 	vm_map_offset_t remapped_address;
3057 	vm_object_t remapped_object;
3058 	share_object_and_expect_mte(
3059 		mte_map2,
3060 		mte_map1,
3061 		alias_address,
3062 		alias_object,
3063 		&remapped_address,
3064 		&remapped_object);
3065 
3066 	/* Cleanup */
3067 	cleanup_map(&mte_map2);
3068 	cleanup_map(&mte_map1);
3069 
3070 	*out = 1;
3071 	return 0;
3072 }
3073 SYSCTL_TEST_REGISTER(vm_map_alias_mte_mapping_in_other_mte_map, vm_map_alias_mte_mapping_in_other_mte_map_test);
3074 
3075 static int
vm_map_alias_mte_mapping_in_fork_map_test(__unused int64_t in,int64_t * out)3076 vm_map_alias_mte_mapping_in_fork_map_test(__unused int64_t in, int64_t *out)
3077 {
3078 	/* Given an MTE parent map */
3079 	vm_map_t parent_map = create_map(MACH_VM_MIN_ADDRESS, MACH_VM_MAX_ADDRESS);
3080 	vm_map_set_sec_enabled(parent_map);
3081 
3082 	/* And an MTE mapping that was created prior to forking */
3083 	vm_object_t mte_object_from_before_fork = create_mte_vm_object(PAGE_SIZE, parent_map->serial_id);
3084 	vm_map_offset_t mapped_address_of_mte_object_from_before_fork = map_object_and_expect_mte(parent_map, mte_object_from_before_fork);
3085 
3086 	/* And a forked child map */
3087 	vm_map_t child_map = vm_map_fork(parent_map->pmap->ledger, parent_map, 0);
3088 
3089 	/*
3090 	 * Then the MTE object that was present in the child is also MTE in the parent
3091 	 * (which is expected via our forking strategy).
3092 	 * (We also need to fault in the mapping in the child first.)
3093 	 */
3094 	kern_return_t kr = vm_fault(
3095 		child_map,
3096 		mapped_address_of_mte_object_from_before_fork,
3097 		VM_PROT_READ,
3098 		false,
3099 		VM_KERN_MEMORY_NONE,
3100 		THREAD_UNINT,
3101 		NULL, 0);
3102 	assert(kr == KERN_SUCCESS);
3103 
3104 	expect_object_at_address_to_be_mte_mapped(
3105 		child_map,
3106 		mapped_address_of_mte_object_from_before_fork
3107 		);
3108 
3109 	/* And when we enter a new MTE object into the parent after the fork() */
3110 	vm_object_t mte_object_after_fork = create_mte_vm_object(PAGE_SIZE, parent_map->serial_id);
3111 	vm_map_offset_t mapped_address_of_mte_object_after_fork = map_object_and_expect_mte(parent_map, mte_object_after_fork);
3112 
3113 	/* And we share this object into the child */
3114 	/* Then the child's alias to this object is MTE enabled (because fork maps share the same serial as their parent) */
3115 	vm_map_offset_t aliased_address;
3116 	vm_object_t aliased_object;
3117 	share_object_and_expect_mte(
3118 		parent_map,
3119 		child_map,
3120 		mapped_address_of_mte_object_after_fork,
3121 		mte_object_after_fork,
3122 		&aliased_address,
3123 		&aliased_object);
3124 
3125 	/* And when we remap again back into the original parent map */
3126 	/* Then the object has been entered as MTE, because it went back to its original map */
3127 	vm_map_offset_t remapped_address;
3128 	vm_object_t remapped_object;
3129 	share_object_and_expect_mte(
3130 		parent_map,
3131 		child_map,
3132 		mapped_address_of_mte_object_after_fork,
3133 		mte_object_after_fork,
3134 		&remapped_address,
3135 		&remapped_object);
3136 
3137 	/* Cleanup */
3138 	cleanup_map(&child_map);
3139 	cleanup_map(&parent_map);
3140 
3141 	*out = 1;
3142 	return 0;
3143 }
3144 SYSCTL_TEST_REGISTER(vm_map_alias_mte_mapping_in_fork_map, vm_map_alias_mte_mapping_in_fork_map_test);
3145 
3146 static int
vm_object_transpose_provenance_test(__unused int64_t in,int64_t * out)3147 vm_object_transpose_provenance_test(__unused int64_t in, int64_t *out)
3148 {
3149 	/* Given two MTE maps */
3150 	vm_map_t mte_map1, mte_map2;
3151 	create_two_mte_maps(&mte_map1, &mte_map2);
3152 
3153 	/* And an object from each map */
3154 	vm_object_t obj1 = create_mte_vm_object(PAGE_SIZE, mte_map1->serial_id);
3155 	vm_object_t obj2 = create_mte_vm_object(PAGE_SIZE, mte_map2->serial_id);
3156 
3157 	/* When we transpose the objects */
3158 	vm_map_serial_t original_obj1_prov = obj1->vmo_provenance;
3159 	vm_map_serial_t original_obj2_prov = obj2->vmo_provenance;
3160 
3161 	vm_object_lock(obj1);
3162 	vm_object_activity_begin(obj1);
3163 	obj1->blocked_access = TRUE;
3164 	vm_object_unlock(obj1);
3165 	vm_object_lock(obj2);
3166 	vm_object_activity_begin(obj2);
3167 	obj2->blocked_access = TRUE;
3168 	vm_object_unlock(obj2);
3169 
3170 	vm_object_transpose(obj2, obj1, PAGE_SIZE);
3171 
3172 	vm_object_lock(obj1);
3173 	vm_object_activity_end(obj1);
3174 	obj1->blocked_access = FALSE;
3175 	vm_object_unlock(obj1);
3176 	vm_object_lock(obj2);
3177 	vm_object_activity_end(obj2);
3178 	obj2->blocked_access = FALSE;
3179 	vm_object_unlock(obj2);
3180 
3181 	/* Then the IDs have been transposed */
3182 	assert(obj1->vmo_provenance == original_obj2_prov);
3183 	assert(obj2->vmo_provenance == original_obj1_prov);
3184 
3185 	/* Cleanup */
3186 	cleanup_map(&mte_map2);
3187 	cleanup_map(&mte_map1);
3188 
3189 	*out = 1;
3190 	return 0;
3191 }
3192 
3193 SYSCTL_TEST_REGISTER(vm_object_transpose_provenance, vm_object_transpose_provenance_test);
3194 
3195 
3196 #endif /* HAS_MTE */
3197 
3198 /*
3199  * Compare the contents of an original userspace buffer with that kernel mapping of a UPL created
3200  * against that userspace buffer.  Also validate that the physical pages in the UPL's page list
3201  * match the physical pages backing the kernel mapping at the pmap layer.  Furthermore, if UPL creation
3202  * was expected to copy the original buffer, validate that the backing pages for the userspace buffer
3203  * don't match the kernel/UPL pages, otherwise validate that they do match.
3204  */
3205 static int
upl_buf_compare(user_addr_t src,upl_t upl,const void * upl_buf,upl_size_t size,bool copy_expected)3206 upl_buf_compare(user_addr_t src, upl_t upl, const void *upl_buf, upl_size_t size, bool copy_expected)
3207 {
3208 	int error = 0;
3209 	void *temp = kalloc_data(PAGE_SIZE, Z_WAITOK);
3210 
3211 	upl_size_t i = 0;
3212 	while (i < size) {
3213 		size_t bytes = MIN(size - i, PAGE_SIZE);
3214 		error = copyin(src + i, temp, bytes);
3215 		if (!error && (memcmp(temp, (const void*)((uintptr_t)upl_buf + i), bytes) != 0)) {
3216 			printf("%s: memcmp(%p, %p, %zu) failed, src[0] = 0x%llx, buf[0] = 0x%llx\n",
3217 			    __func__, (void*)(src + i), (const void*)((uintptr_t)upl_buf + i), bytes, *((unsigned long long*)temp), *((unsigned long long*)((uintptr_t)upl_buf + i)));
3218 			error = EINVAL;
3219 		}
3220 		if (!error) {
3221 			ppnum_t user_pa = pmap_find_phys(current_map()->pmap, (addr64_t)src + i);
3222 			ppnum_t upl_pa = pmap_find_phys(kernel_pmap, (addr64_t)upl_buf + i);
3223 			if ((upl_pa == 0) || /* UPL is wired, PA should always be valid */
3224 			    (!copy_expected && (upl_pa != user_pa)) ||
3225 			    (copy_expected && (upl_pa == user_pa)) ||
3226 			    (upl_pa != (upl->page_list[i >> PAGE_SHIFT].phys_addr))) {
3227 				printf("%s: PA verification[%u] failed: copy=%u, upl_pa = 0x%lx, user_pa = 0x%lx, page list PA = 0x%lx\n",
3228 				    __func__, (unsigned)i, (unsigned)copy_expected, (unsigned long)upl_pa, (unsigned long)user_pa,
3229 				    (unsigned long)upl->page_list[i].phys_addr);
3230 				error = EFAULT;
3231 			}
3232 		}
3233 		if (error) {
3234 			break;
3235 		}
3236 		i += bytes;
3237 	}
3238 
3239 	kfree_data(temp, PAGE_SIZE);
3240 
3241 	return error;
3242 }
3243 
3244 static int
vm_upl_test(int64_t in,int64_t * out __unused)3245 vm_upl_test(int64_t in, int64_t *out __unused)
3246 {
3247 	upl_t upl = NULL;
3248 	vm_address_t kva = 0;
3249 
3250 	struct {
3251 		uint64_t ptr; /* Base address of buffer in userspace */
3252 		uint32_t size; /* Size of userspace buffer (in bytes) */
3253 		char test_pattern; /* Starting char of test pattern we should write (if applicable) */
3254 		bool copy_expected; /* Is UPL creation expected to create a copy of the original buffer? */
3255 		bool should_fail; /* Is UPL creation expected to fail due to permissions checking? */
3256 		bool upl_rw; /* Should the UPL be created RW (!UPL_COPYOUT_FROM) instead of RO? */
3257 	} args;
3258 	int error = copyin((user_addr_t)in, &args, sizeof(args));
3259 	if ((error != 0) || (args.size == 0)) {
3260 		goto upl_test_done;
3261 	}
3262 
3263 	upl_size_t upl_size = args.size;
3264 	unsigned int upl_count = 0;
3265 	upl_control_flags_t upl_flags = UPL_SET_IO_WIRE | UPL_SET_LITE | UPL_SET_INTERNAL;
3266 	if (!args.upl_rw) {
3267 		upl_flags |= UPL_COPYOUT_FROM;
3268 	} else {
3269 		upl_flags |= UPL_WILL_MODIFY;
3270 	}
3271 	kern_return_t kr = vm_map_create_upl(current_map(),
3272 	    (vm_map_offset_t)args.ptr,
3273 	    &upl_size,
3274 	    &upl,
3275 	    NULL,
3276 	    &upl_count,
3277 	    &upl_flags,
3278 	    VM_KERN_MEMORY_DIAG);
3279 	if (args.should_fail && (kr == KERN_PROTECTION_FAILURE)) {
3280 		goto upl_test_done;
3281 	} else if (args.should_fail && (kr == KERN_SUCCESS)) {
3282 		printf("%s: vm_map_create_upl(%p, 0x%lx) did not fail as expected\n",
3283 		    __func__, (void*)args.ptr, (unsigned long)args.size);
3284 		error = EIO;
3285 		goto upl_test_done;
3286 	} else if (kr != KERN_SUCCESS) {
3287 		printf("%s: vm_map_create_upl(%p, 0x%lx) returned 0x%x\n",
3288 		    __func__, (void*)args.ptr, (unsigned long)args.size, kr);
3289 		error = kr;
3290 		goto upl_test_done;
3291 	}
3292 
3293 	kr = vm_upl_map(kernel_map, upl, &kva);
3294 	if (kr != KERN_SUCCESS) {
3295 		error = kr;
3296 		printf("%s: vm_upl_map() returned 0x%x\n", __func__, kr);
3297 		goto upl_test_done;
3298 	}
3299 
3300 	/* Ensure the mapped UPL contents match the original user buffer contents */
3301 	error = upl_buf_compare((user_addr_t)args.ptr, upl, (void*)kva, upl_size, args.copy_expected);
3302 
3303 	if (error) {
3304 		printf("%s: upl_buf_compare(%p, %p, %zu) failed\n",
3305 		    __func__, (void*)args.ptr, (void*)kva, (size_t)upl_size);
3306 	}
3307 
3308 	if (!error && args.upl_rw) {
3309 		/*
3310 		 * If the UPL is writable, update the contents so that userspace can
3311 		 * validate that it sees the updates.
3312 		 */
3313 		for (unsigned int i = 0; i < (upl_size / sizeof(unsigned int)); i++) {
3314 			((unsigned int*)kva)[i] = (unsigned int)args.test_pattern + i;
3315 		}
3316 	}
3317 
3318 upl_test_done:
3319 
3320 	if (kva != 0) {
3321 		vm_upl_unmap(kernel_map, upl);
3322 	}
3323 
3324 	if (upl != NULL) {
3325 		upl_commit(upl, NULL, 0);
3326 		upl_deallocate(upl);
3327 	}
3328 
3329 	return error;
3330 }
3331 SYSCTL_TEST_REGISTER(vm_upl, vm_upl_test);
3332 
3333 static int
vm_upl_submap_test(int64_t in,int64_t * out __unused)3334 vm_upl_submap_test(int64_t in, int64_t *out __unused)
3335 {
3336 	vm_map_address_t start = 0x180000000ULL;
3337 	vm_map_address_t end = start + 0x180000000ULL;
3338 
3339 	upl_t upl = NULL;
3340 	vm_address_t kva = 0;
3341 	int error = 0;
3342 
3343 	/*
3344 	 * Create temporary pmap and VM map for nesting our submap.
3345 	 * We can't directly nest our submap into the current user map, because it will
3346 	 * have already nested the shared region, and our security model doesn't allow
3347 	 * multiple nested pmaps.
3348 	 */
3349 	pmap_t temp_pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT);
3350 
3351 	vm_map_t temp_map = VM_MAP_NULL;
3352 	if (temp_pmap != PMAP_NULL) {
3353 		temp_map = vm_map_create_options(temp_pmap, 0, 0xfffffffffffff, 0);
3354 	}
3355 
3356 	/* Now create the pmap and VM map that will back the submap entry in 'temp_map'. */
3357 	pmap_t nested_pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT | PMAP_CREATE_NESTED);
3358 
3359 	vm_map_t nested_map = VM_MAP_NULL;
3360 	if (nested_pmap != PMAP_NULL) {
3361 #if defined(__arm64__)
3362 		pmap_set_nested(nested_pmap);
3363 #endif /* defined(__arm64__) */
3364 #if CODE_SIGNING_MONITOR
3365 		csm_setup_nested_address_space(nested_pmap, start, end - start);
3366 #endif
3367 		nested_map = vm_map_create_options(nested_pmap, 0, end - start, 0);
3368 	}
3369 
3370 	if (temp_map == VM_MAP_NULL || nested_map == VM_MAP_NULL) {
3371 		error = ENOMEM;
3372 		printf("%s: failed to create VM maps\n", __func__);
3373 		goto upl_submap_test_done;
3374 	}
3375 
3376 	nested_map->is_nested_map = TRUE;
3377 	nested_map->vmmap_sealed = VM_MAP_WILL_BE_SEALED;
3378 
3379 	struct {
3380 		uint64_t ptr; /* Base address of original buffer in userspace */
3381 		uint64_t upl_base; /* Base address in 'temp_map' against which UPL should be created */
3382 		uint32_t size; /* Size of userspace buffer in bytes */
3383 		uint32_t upl_size; /* Size of UPL to create in bytes */
3384 		bool upl_rw; /* Should the UPL be created RW (!UPL_COPYOUT_FROM) instead of RO? */
3385 	} args;
3386 	error = copyin((user_addr_t)in, &args, sizeof(args));
3387 	if ((error != 0) || (args.size == 0) || (args.upl_size == 0)) {
3388 		goto upl_submap_test_done;
3389 	}
3390 
3391 	/*
3392 	 * Remap the original userspace buffer into the nested map, with CoW protection.
3393 	 * This will not actually instantiate new mappings in 'nested_pmap', but will instead create
3394 	 * new shadow object of the original object for the userspace buffer in the nested map.
3395 	 * Mappings would only be created in 'nested_pmap' upon a later non-CoW fault of the nested region,
3396 	 * which we aren't doing here.  That's fine, as we're not testing pmap functionality here; we
3397 	 * only care that UPL creation produces the expected results at the VM map/entry level.
3398 	 */
3399 	mach_vm_offset_t submap_start = 0;
3400 
3401 	vm_prot_ut remap_cur_prot = vm_sanitize_wrap_prot(VM_PROT_READ);
3402 	vm_prot_ut remap_max_prot = vm_sanitize_wrap_prot(VM_PROT_READ);
3403 
3404 	kern_return_t kr = mach_vm_remap_new_kernel(nested_map, (mach_vm_offset_ut*)&submap_start, args.size, 0,
3405 	    VM_MAP_KERNEL_FLAGS_FIXED(.vm_tag = VM_KERN_MEMORY_OSFMK), current_map(), args.ptr, TRUE,
3406 	    &remap_cur_prot, &remap_max_prot, VM_INHERIT_NONE);
3407 	if (kr != KERN_SUCCESS) {
3408 		printf("%s: failed to remap source buffer to nested map: 0x%x\n", __func__, kr);
3409 		error = kr;
3410 		goto upl_submap_test_done;
3411 	}
3412 
3413 	vm_map_seal(nested_map, true);
3414 	pmap_set_shared_region(temp_pmap, nested_pmap, start, end - start);
3415 
3416 	/* Do the actual nesting. */
3417 	vm_map_reference(nested_map);
3418 	kr = vm_map_enter(temp_map, &start, end - start, 0,
3419 	    VM_MAP_KERNEL_FLAGS_FIXED(.vmkf_submap = TRUE, .vmkf_nested_pmap =  TRUE), (vm_object_t)(uintptr_t) nested_map, 0,
3420 	    true, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_DEFAULT);
3421 
3422 	if (kr != KERN_SUCCESS) {
3423 		error = kr;
3424 		printf("%s: failed to enter nested map in test map: 0x%x\n", __func__, kr);
3425 		vm_map_deallocate(nested_map);
3426 		goto upl_submap_test_done;
3427 	}
3428 
3429 	/* Validate that the nesting operation produced the expected submap entry in 'temp_map'. */
3430 	vm_map_entry_t submap_entry;
3431 	if (!vm_map_lookup_entry(temp_map, args.upl_base, &submap_entry) || !submap_entry->is_sub_map) {
3432 		error = ENOENT;
3433 		printf("%s: did not find submap entry at beginning up UPL region\n", __func__);
3434 		goto upl_submap_test_done;
3435 	}
3436 
3437 	upl_size_t upl_size = args.upl_size;
3438 	unsigned int upl_count = 0;
3439 	upl_control_flags_t upl_flags = UPL_SET_IO_WIRE | UPL_SET_LITE | UPL_SET_INTERNAL;
3440 	if (!args.upl_rw) {
3441 		upl_flags |= UPL_COPYOUT_FROM;
3442 	}
3443 	kr = vm_map_create_upl(temp_map,
3444 	    (vm_map_offset_t)args.upl_base,
3445 	    &upl_size,
3446 	    &upl,
3447 	    NULL,
3448 	    &upl_count,
3449 	    &upl_flags,
3450 	    VM_KERN_MEMORY_DIAG);
3451 
3452 	if (kr != KERN_SUCCESS) {
3453 		error = kr;
3454 		printf("%s: failed to create UPL for submap: 0x%x\n", __func__, kr);
3455 		goto upl_submap_test_done;
3456 	}
3457 
3458 	/* Validate that UPL creation unnested a portion of the submap entry. */
3459 	if (!vm_map_lookup_entry(temp_map, args.upl_base, &submap_entry) || submap_entry->is_sub_map) {
3460 		error = ENOENT;
3461 		printf("%s: did not find non-submap entry at beginning up UPL region\n", __func__);
3462 		goto upl_submap_test_done;
3463 	}
3464 
3465 	kr = vm_upl_map(kernel_map, upl, &kva);
3466 	if (kr != KERN_SUCCESS) {
3467 		error = kr;
3468 		goto upl_submap_test_done;
3469 	}
3470 
3471 	/*
3472 	 * Compare the original userspace buffer to the ultimate kernel mapping of the UPL.
3473 	 * The unnesting and CoW faulting performed as part of UPL creation should have copied the original buffer
3474 	 * pages, so we expect the two buffers to be backed by different pages.
3475 	 */
3476 	error = upl_buf_compare((user_addr_t)args.ptr + (args.upl_base - start), upl, (void*)kva, upl_size, true);
3477 
3478 	if (!error) {
3479 		/*
3480 		 * Now validate that the nested region in 'temp_map' matches the original buffer.
3481 		 * The unnesting and CoW faulting performed as part of UPL creation should have acted directly
3482 		 * upon 'temp_map', so the backing pages should be the same here.
3483 		 */
3484 		vm_map_switch_context_t switch_ctx = vm_map_switch_to(temp_map);
3485 		error = upl_buf_compare((user_addr_t)args.upl_base, upl, (void*)kva, upl_size, false);
3486 		vm_map_switch_back(switch_ctx);
3487 	}
3488 
3489 upl_submap_test_done:
3490 
3491 	if (kva != 0) {
3492 		vm_upl_unmap(kernel_map, upl);
3493 	}
3494 
3495 	if (upl != NULL) {
3496 		upl_commit(upl, NULL, 0);
3497 		upl_deallocate(upl);
3498 	}
3499 
3500 	if (temp_map != VM_MAP_NULL) {
3501 		vm_map_deallocate(temp_map);
3502 		temp_pmap = PMAP_NULL;
3503 	}
3504 	if (nested_map != VM_MAP_NULL) {
3505 		vm_map_deallocate(nested_map);
3506 		nested_pmap = PMAP_NULL;
3507 	}
3508 
3509 	if (temp_pmap != PMAP_NULL) {
3510 		pmap_destroy(temp_pmap);
3511 	}
3512 	if (nested_pmap != PMAP_NULL) {
3513 		pmap_destroy(nested_pmap);
3514 	}
3515 
3516 	return error;
3517 }
3518 SYSCTL_TEST_REGISTER(vm_upl_submap, vm_upl_submap_test);
3519 
3520 #if CONFIG_SPTM
3521 
3522 static void
page_clean_timeout(thread_call_param_t param0,__unused thread_call_param_t param1)3523 page_clean_timeout(thread_call_param_t param0, __unused thread_call_param_t param1)
3524 {
3525 	vm_page_t m = (vm_page_t)param0;
3526 	vm_object_t object = VM_PAGE_OBJECT(m);
3527 	vm_object_lock(object);
3528 	m->vmp_cleaning = false;
3529 	vm_page_wakeup(object, m);
3530 	vm_object_unlock(object);
3531 }
3532 
3533 /**
3534  * This sysctl is meant to exercise very specific functionality that can't be exercised through
3535  * the normal vm_map_create_upl() path.  It operates directly against the vm_object backing
3536  * the specified address range, and does not take any locks against the VM map to guarantee
3537  * stability of the specified address range.  It is therefore meant to be used against
3538  * VM regions directly allocated by the userspace caller and guaranteed to not be altered by
3539  * other threads.  The regular vm_upl/vm_upl_submap sysctls should be preferred over this
3540  * if at all possible.
3541  */
3542 static int
vm_upl_object_test(int64_t in,int64_t * out __unused)3543 vm_upl_object_test(int64_t in, int64_t *out __unused)
3544 {
3545 	upl_t upl = NULL;
3546 
3547 	struct {
3548 		uint64_t ptr; /* Base address of buffer in userspace */
3549 		uint32_t size; /* Size of userspace buffer (in bytes) */
3550 		bool upl_rw;
3551 		bool should_fail; /* Is UPL creation expected to fail due to permissions checking? */
3552 		uint8_t fault_prot;
3553 	} args;
3554 	int error = copyin((user_addr_t)in, &args, sizeof(args));
3555 	if ((error != 0) || (args.size == 0)) {
3556 		goto upl_object_test_done;
3557 	}
3558 
3559 	upl_size_t upl_size = args.size;
3560 	unsigned int upl_count = 0;
3561 	upl_control_flags_t upl_flags = UPL_SET_IO_WIRE | UPL_SET_LITE | UPL_SET_INTERNAL;
3562 	if (!args.upl_rw) {
3563 		upl_flags |= UPL_COPYOUT_FROM;
3564 	} else {
3565 		upl_flags |= UPL_WILL_MODIFY;
3566 	}
3567 
3568 	vm_map_entry_t entry;
3569 	vm_object_t object;
3570 	vm_page_t m __unused;
3571 
3572 	if (!vm_map_lookup_entry(current_map(), args.ptr, &entry) || entry->is_sub_map) {
3573 		error = ENOENT;
3574 		printf("%s: did not find entry at beginning up UPL region\n", __func__);
3575 		goto upl_object_test_done;
3576 	}
3577 
3578 	object = VME_OBJECT(entry);
3579 	if (object == VM_OBJECT_NULL) {
3580 		error = ENOENT;
3581 		printf("%s: No VM object associated with entry at beginning of UPL region\n", __func__);
3582 		goto upl_object_test_done;
3583 	}
3584 
3585 	vm_object_reference(object);
3586 
3587 	kern_return_t kr = vm_object_iopl_request(object,
3588 	    (vm_object_offset_t)(args.ptr - entry->vme_start + VME_OFFSET(entry)),
3589 	    upl_size,
3590 	    &upl,
3591 	    NULL,
3592 	    &upl_count,
3593 	    upl_flags,
3594 	    VM_KERN_MEMORY_DIAG);
3595 
3596 	if (args.fault_prot != VM_PROT_NONE) {
3597 		/*
3598 		 * The page may have already been retyped to its "final" executable type by a prior fault,
3599 		 * so simulate a page recycle operation in order to ensure that our simulated exec fault below
3600 		 * will attempt to retype it.
3601 		 */
3602 		vm_object_lock(object);
3603 		m = vm_page_lookup(object, (VME_OFFSET(entry) + ((vm_map_address_t)args.ptr - entry->vme_start)));
3604 		assert(m != VM_PAGE_NULL);
3605 		assert(m->vmp_iopl_wired);
3606 		ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(m);
3607 		pmap_disconnect(pn);
3608 		pmap_lock_phys_page(pn);
3609 		pmap_recycle_page(pn);
3610 		pmap_unlock_phys_page(pn);
3611 		assertf(pmap_will_retype(current_map()->pmap, (vm_map_address_t)args.ptr, VM_PAGE_GET_PHYS_PAGE(m), args.fault_prot,
3612 		    (entry->vme_xnu_user_debug ? PMAP_OPTIONS_XNU_USER_DEBUG : 0), PMAP_MAPPING_TYPE_INFER),
3613 		    "pmap will not retype for vm_page_t %p, prot 0x%x", m, (unsigned int)args.fault_prot);
3614 		vm_object_unlock(object);
3615 	}
3616 
3617 	if (args.should_fail && (kr == KERN_PROTECTION_FAILURE)) {
3618 		goto upl_object_test_done;
3619 	} else if (args.should_fail && (kr == KERN_SUCCESS)) {
3620 		printf("%s: vm_object_iopl_request(%p, 0x%lx) did not fail as expected\n",
3621 		    __func__, (void*)args.ptr, (unsigned long)args.size);
3622 		error = EIO;
3623 		goto upl_object_test_done;
3624 	} else if (kr != KERN_SUCCESS) {
3625 		printf("%s: vm_object_iopl_request(%p, 0x%lx) returned 0x%x\n",
3626 		    __func__, (void*)args.ptr, (unsigned long)args.size, kr);
3627 		error = kr;
3628 		goto upl_object_test_done;
3629 	}
3630 
3631 	if (args.fault_prot != VM_PROT_NONE) {
3632 		kr = vm_fault(current_map(),
3633 		    (vm_map_address_t)args.ptr,
3634 		    args.fault_prot,
3635 		    FALSE,
3636 		    VM_KERN_MEMORY_NONE,
3637 		    THREAD_UNINT,
3638 		    NULL,
3639 		    0);
3640 		/* Page retype attempt with in-flight IOPL should be forbidden. */
3641 		if (kr != KERN_PROTECTION_FAILURE) {
3642 			printf("%s: vm_fault(%p) did not fail as expected\n", __func__, (void*)args.ptr);
3643 			error = ((kr == KERN_SUCCESS) ? EIO : kr);
3644 			goto upl_object_test_done;
3645 		}
3646 		assertf(pmap_will_retype(current_map()->pmap, (vm_map_address_t)args.ptr, VM_PAGE_GET_PHYS_PAGE(m), args.fault_prot,
3647 		    (entry->vme_xnu_user_debug ? PMAP_OPTIONS_XNU_USER_DEBUG : 0), PMAP_MAPPING_TYPE_INFER),
3648 		    "pmap will not retype for vm_page_t %p, prot 0x%x", m, (unsigned int)args.fault_prot);
3649 	}
3650 
3651 upl_object_test_done:
3652 
3653 	if (upl != NULL) {
3654 		upl_commit(upl, NULL, 0);
3655 		upl_deallocate(upl);
3656 	}
3657 
3658 	if ((error == 0) && (args.fault_prot != VM_PROT_NONE)) {
3659 		/*
3660 		 * Exec page retype attempt without in-flight IOPL should ultimately succeed, but should
3661 		 * block if the page is being cleaned.  Simulate that scenario with a thread call to "finish"
3662 		 * the clean operation and wake up the waiting fault handler after 1s.
3663 		 */
3664 		vm_object_lock(object);
3665 		assert(!m->vmp_iopl_wired);
3666 		m->vmp_cleaning = true;
3667 		vm_object_unlock(object);
3668 		thread_call_t page_clean_timer_call = thread_call_allocate(page_clean_timeout, m);
3669 		uint64_t deadline;
3670 		clock_interval_to_deadline(1, NSEC_PER_SEC, &deadline);
3671 		thread_call_enter_delayed(page_clean_timer_call, deadline);
3672 		kr = vm_fault(current_map(),
3673 		    (vm_map_address_t)args.ptr,
3674 		    args.fault_prot,
3675 		    FALSE,
3676 		    VM_KERN_MEMORY_NONE,
3677 		    THREAD_UNINT,
3678 		    NULL,
3679 		    0);
3680 		/*
3681 		 * Thread call should no longer be active, as its expiry should have been the thing that
3682 		 * unblocked the fault above.
3683 		 */
3684 		assert(!thread_call_isactive(page_clean_timer_call));
3685 		thread_call_free(page_clean_timer_call);
3686 		if (kr != KERN_SUCCESS) {
3687 			printf("%s: vm_fault(%p) did not succeed as expected\n", __func__, (void*)args.ptr);
3688 			error = kr;
3689 		}
3690 	}
3691 
3692 	if (object != VM_OBJECT_NULL) {
3693 		vm_object_deallocate(object);
3694 	}
3695 
3696 	return error;
3697 }
3698 SYSCTL_TEST_REGISTER(vm_upl_object, vm_upl_object_test);
3699 
3700 #endif /* CONFIG_SPTM */
3701