xref: /xnu-10002.1.13/osfmk/vm/vm_tests.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a) !
1 /*
2  * Copyright (c) 2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach_assert.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/memory_object.h>
33 #include <mach/vm_map.h>
34 
35 #include <kern/ledger.h>
36 
37 #include <device/device_port.h>
38 #include <vm/memory_object.h>
39 #include <vm/vm_fault.h>
40 #include <vm/vm_map_internal.h>
41 #include <vm/vm_object.h>
42 #include <vm/vm_pageout.h>
43 #include <vm/vm_protos.h>
44 
45 #include <mach/mach_vm.h>
46 
47 #include <sys/errno.h> /* for the sysctl tests */
48 
49 extern ledger_template_t        task_ledger_template;
50 
51 extern kern_return_t
52 vm_map_copy_adjust_to_target(
53 	vm_map_copy_t           copy_map,
54 	vm_map_offset_t         offset,
55 	vm_map_size_t           size,
56 	vm_map_t                target_map,
57 	boolean_t               copy,
58 	vm_map_copy_t           *target_copy_map_p,
59 	vm_map_offset_t         *overmap_start_p,
60 	vm_map_offset_t         *overmap_end_p,
61 	vm_map_offset_t         *trimmed_start_p);
62 
63 #define VM_TEST_COLLAPSE_COMPRESSOR             0
64 #define VM_TEST_WIRE_AND_EXTRACT                0
65 #define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC        0
66 #if __arm64__
67 #define VM_TEST_KERNEL_OBJECT_FAULT             0
68 #endif /* __arm64__ */
69 #define VM_TEST_DEVICE_PAGER_TRANSPOSE          (DEVELOPMENT || DEBUG)
70 
71 #if VM_TEST_COLLAPSE_COMPRESSOR
72 extern boolean_t vm_object_collapse_compressor_allowed;
73 #include <IOKit/IOLib.h>
74 static void
vm_test_collapse_compressor(void)75 vm_test_collapse_compressor(void)
76 {
77 	vm_object_size_t        backing_size, top_size;
78 	vm_object_t             backing_object, top_object;
79 	vm_map_offset_t         backing_offset, top_offset;
80 	unsigned char           *backing_address, *top_address;
81 	kern_return_t           kr;
82 
83 	printf("VM_TEST_COLLAPSE_COMPRESSOR:\n");
84 
85 	/* create backing object */
86 	backing_size = 15 * PAGE_SIZE;
87 	backing_object = vm_object_allocate(backing_size);
88 	assert(backing_object != VM_OBJECT_NULL);
89 	printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
90 	    backing_object);
91 	/* map backing object */
92 	backing_offset = 0;
93 	kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0,
94 	    VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
95 	    backing_object, 0, FALSE,
96 	    VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
97 	assert(kr == KERN_SUCCESS);
98 	backing_address = (unsigned char *) backing_offset;
99 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
100 	    "mapped backing object %p at 0x%llx\n",
101 	    backing_object, (uint64_t) backing_offset);
102 	/* populate with pages to be compressed in backing object */
103 	backing_address[0x1 * PAGE_SIZE] = 0xB1;
104 	backing_address[0x4 * PAGE_SIZE] = 0xB4;
105 	backing_address[0x7 * PAGE_SIZE] = 0xB7;
106 	backing_address[0xa * PAGE_SIZE] = 0xBA;
107 	backing_address[0xd * PAGE_SIZE] = 0xBD;
108 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
109 	    "populated pages to be compressed in "
110 	    "backing_object %p\n", backing_object);
111 	/* compress backing object */
112 	vm_object_pageout(backing_object);
113 	printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
114 	    backing_object);
115 	/* wait for all the pages to be gone */
116 	while (*(volatile int *)&backing_object->resident_page_count != 0) {
117 		IODelay(10);
118 	}
119 	printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
120 	    backing_object);
121 	/* populate with pages to be resident in backing object */
122 	backing_address[0x0 * PAGE_SIZE] = 0xB0;
123 	backing_address[0x3 * PAGE_SIZE] = 0xB3;
124 	backing_address[0x6 * PAGE_SIZE] = 0xB6;
125 	backing_address[0x9 * PAGE_SIZE] = 0xB9;
126 	backing_address[0xc * PAGE_SIZE] = 0xBC;
127 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
128 	    "populated pages to be resident in "
129 	    "backing_object %p\n", backing_object);
130 	/* leave the other pages absent */
131 	/* mess with the paging_offset of the backing_object */
132 	assert(backing_object->paging_offset == 0);
133 	backing_object->paging_offset = 3 * PAGE_SIZE;
134 
135 	/* create top object */
136 	top_size = 9 * PAGE_SIZE;
137 	top_object = vm_object_allocate(top_size);
138 	assert(top_object != VM_OBJECT_NULL);
139 	printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
140 	    top_object);
141 	/* map top object */
142 	top_offset = 0;
143 	kr = vm_map_enter(kernel_map, &top_offset, top_size, 0,
144 	    VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
145 	    top_object, 0, FALSE,
146 	    VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
147 	assert(kr == KERN_SUCCESS);
148 	top_address = (unsigned char *) top_offset;
149 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
150 	    "mapped top object %p at 0x%llx\n",
151 	    top_object, (uint64_t) top_offset);
152 	/* populate with pages to be compressed in top object */
153 	top_address[0x3 * PAGE_SIZE] = 0xA3;
154 	top_address[0x4 * PAGE_SIZE] = 0xA4;
155 	top_address[0x5 * PAGE_SIZE] = 0xA5;
156 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
157 	    "populated pages to be compressed in "
158 	    "top_object %p\n", top_object);
159 	/* compress top object */
160 	vm_object_pageout(top_object);
161 	printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
162 	    top_object);
163 	/* wait for all the pages to be gone */
164 	while (top_object->resident_page_count != 0) {
165 		IODelay(10);
166 	}
167 	printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
168 	    top_object);
169 	/* populate with pages to be resident in top object */
170 	top_address[0x0 * PAGE_SIZE] = 0xA0;
171 	top_address[0x1 * PAGE_SIZE] = 0xA1;
172 	top_address[0x2 * PAGE_SIZE] = 0xA2;
173 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
174 	    "populated pages to be resident in "
175 	    "top_object %p\n", top_object);
176 	/* leave the other pages absent */
177 
178 	/* link the 2 objects */
179 	vm_object_reference(backing_object);
180 	top_object->shadow = backing_object;
181 	top_object->vo_shadow_offset = 3 * PAGE_SIZE;
182 	printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
183 	    top_object, backing_object);
184 
185 	/* unmap backing object */
186 	vm_map_remove(kernel_map,
187 	    backing_offset,
188 	    backing_offset + backing_size,
189 	    VM_MAP_REMOVE_NO_FLAGS);
190 	printf("VM_TEST_COLLAPSE_COMPRESSOR: "
191 	    "unmapped backing_object %p [0x%llx:0x%llx]\n",
192 	    backing_object,
193 	    (uint64_t) backing_offset,
194 	    (uint64_t) (backing_offset + backing_size));
195 
196 	/* collapse */
197 	printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object);
198 	vm_object_lock(top_object);
199 	vm_object_collapse(top_object, 0, FALSE);
200 	vm_object_unlock(top_object);
201 	printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object);
202 
203 	/* did it work? */
204 	if (top_object->shadow != VM_OBJECT_NULL) {
205 		printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
206 		printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
207 		if (vm_object_collapse_compressor_allowed) {
208 			panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL");
209 		}
210 	} else {
211 		/* check the contents of the mapping */
212 		unsigned char expect[9] =
213 		{ 0xA0, 0xA1, 0xA2,             /* resident in top */
214 		  0xA3, 0xA4, 0xA5,             /* compressed in top */
215 		  0xB9,         /* resident in backing + shadow_offset */
216 		  0xBD,         /* compressed in backing + shadow_offset + paging_offset */
217 		  0x00 };                       /* absent in both */
218 		unsigned char actual[9];
219 		unsigned int i, errors;
220 
221 		errors = 0;
222 		for (i = 0; i < sizeof(actual); i++) {
223 			actual[i] = (unsigned char) top_address[i * PAGE_SIZE];
224 			if (actual[i] != expect[i]) {
225 				errors++;
226 			}
227 		}
228 		printf("VM_TEST_COLLAPSE_COMPRESSOR: "
229 		    "actual [%x %x %x %x %x %x %x %x %x] "
230 		    "expect [%x %x %x %x %x %x %x %x %x] "
231 		    "%d errors\n",
232 		    actual[0], actual[1], actual[2], actual[3],
233 		    actual[4], actual[5], actual[6], actual[7],
234 		    actual[8],
235 		    expect[0], expect[1], expect[2], expect[3],
236 		    expect[4], expect[5], expect[6], expect[7],
237 		    expect[8],
238 		    errors);
239 		if (errors) {
240 			panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL");
241 		} else {
242 			printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n");
243 		}
244 	}
245 }
246 #else /* VM_TEST_COLLAPSE_COMPRESSOR */
247 #define vm_test_collapse_compressor()
248 #endif /* VM_TEST_COLLAPSE_COMPRESSOR */
249 
250 #if VM_TEST_WIRE_AND_EXTRACT
251 extern ppnum_t vm_map_get_phys_page(vm_map_t map,
252     vm_offset_t offset);
253 static void
vm_test_wire_and_extract(void)254 vm_test_wire_and_extract(void)
255 {
256 	ledger_t                ledger;
257 	vm_map_t                user_map, wire_map;
258 	mach_vm_address_t       user_addr, wire_addr;
259 	mach_vm_size_t          user_size, wire_size;
260 	mach_vm_offset_t        cur_offset;
261 	vm_prot_t               cur_prot, max_prot;
262 	ppnum_t                 user_ppnum, wire_ppnum;
263 	kern_return_t           kr;
264 
265 	ledger = ledger_instantiate(task_ledger_template,
266 	    LEDGER_CREATE_ACTIVE_ENTRIES);
267 	pmap_t user_pmap = pmap_create_options(ledger, 0, PMAP_CREATE_64BIT);
268 	assert(user_pmap);
269 	user_map = vm_map_create_options(user_pmap,
270 	    0x100000000ULL,
271 	    0x200000000ULL,
272 	    VM_MAP_CREATE_PAGEABLE);
273 	wire_map = vm_map_create_options(NULL,
274 	    0x100000000ULL,
275 	    0x200000000ULL,
276 	    VM_MAP_CREATE_PAGEABLE);
277 	user_addr = 0;
278 	user_size = 0x10000;
279 	kr = mach_vm_allocate(user_map,
280 	    &user_addr,
281 	    user_size,
282 	    VM_FLAGS_ANYWHERE);
283 	assert(kr == KERN_SUCCESS);
284 	wire_addr = 0;
285 	wire_size = user_size;
286 	kr = mach_vm_remap(wire_map,
287 	    &wire_addr,
288 	    wire_size,
289 	    0,
290 	    VM_FLAGS_ANYWHERE,
291 	    user_map,
292 	    user_addr,
293 	    FALSE,
294 	    &cur_prot,
295 	    &max_prot,
296 	    VM_INHERIT_NONE);
297 	assert(kr == KERN_SUCCESS);
298 	for (cur_offset = 0;
299 	    cur_offset < wire_size;
300 	    cur_offset += PAGE_SIZE) {
301 		kr = vm_map_wire_and_extract(wire_map,
302 		    wire_addr + cur_offset,
303 		    VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
304 		    TRUE,
305 		    &wire_ppnum);
306 		assert(kr == KERN_SUCCESS);
307 		user_ppnum = vm_map_get_phys_page(user_map,
308 		    user_addr + cur_offset);
309 		printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x "
310 		    "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
311 		    kr,
312 		    user_map, user_addr + cur_offset, user_ppnum,
313 		    wire_map, wire_addr + cur_offset, wire_ppnum);
314 		if (kr != KERN_SUCCESS ||
315 		    wire_ppnum == 0 ||
316 		    wire_ppnum != user_ppnum) {
317 			panic("VM_TEST_WIRE_AND_EXTRACT: FAIL");
318 		}
319 	}
320 	cur_offset -= PAGE_SIZE;
321 	kr = vm_map_wire_and_extract(wire_map,
322 	    wire_addr + cur_offset,
323 	    VM_PROT_DEFAULT,
324 	    TRUE,
325 	    &wire_ppnum);
326 	assert(kr == KERN_SUCCESS);
327 	printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
328 	    "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
329 	    kr,
330 	    user_map, user_addr + cur_offset, user_ppnum,
331 	    wire_map, wire_addr + cur_offset, wire_ppnum);
332 	if (kr != KERN_SUCCESS ||
333 	    wire_ppnum == 0 ||
334 	    wire_ppnum != user_ppnum) {
335 		panic("VM_TEST_WIRE_AND_EXTRACT: FAIL");
336 	}
337 
338 	printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n");
339 }
340 #else /* VM_TEST_WIRE_AND_EXTRACT */
341 #define vm_test_wire_and_extract()
342 #endif /* VM_TEST_WIRE_AND_EXTRACT */
343 
344 #if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC
345 static void
vm_test_page_wire_overflow_panic(void)346 vm_test_page_wire_overflow_panic(void)
347 {
348 	vm_object_t object;
349 	vm_page_t page;
350 
351 	printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n");
352 
353 	object = vm_object_allocate(PAGE_SIZE);
354 	vm_object_lock(object);
355 	page = vm_page_alloc(object, 0x0);
356 	vm_page_lock_queues();
357 	do {
358 		vm_page_wire(page, 1, FALSE);
359 	} while (page->wire_count != 0);
360 	vm_page_unlock_queues();
361 	vm_object_unlock(object);
362 	panic("FBDP(%p,%p): wire_count overflow not detected",
363 	    object, page);
364 }
365 #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
366 #define vm_test_page_wire_overflow_panic()
367 #endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
368 
369 #if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT
370 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
371 static void
vm_test_kernel_object_fault(void)372 vm_test_kernel_object_fault(void)
373 {
374 	vm_offset_t stack;
375 	uintptr_t frameb[2];
376 	int ret;
377 
378 	kmem_alloc(kernel_map, &stack,
379 	    kernel_stack_size + ptoa(2),
380 	    KMA_NOFAIL | KMA_KSTACK | KMA_KOBJECT |
381 	    KMA_GUARD_FIRST | KMA_GUARD_LAST,
382 	    VM_KERN_MEMORY_STACK);
383 
384 	ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE);
385 	if (ret != 0) {
386 		printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n");
387 	} else {
388 		printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n");
389 	}
390 
391 	kmem_free(kernel_map, stack, kernel_stack_size + ptoa(2));
392 	stack = 0;
393 }
394 #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
395 #define vm_test_kernel_object_fault()
396 #endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
397 
398 #if VM_TEST_DEVICE_PAGER_TRANSPOSE
399 static void
vm_test_device_pager_transpose(void)400 vm_test_device_pager_transpose(void)
401 {
402 	memory_object_t device_pager;
403 	vm_object_t     anon_object, device_object;
404 	vm_size_t       size;
405 	vm_map_offset_t device_mapping;
406 	kern_return_t   kr;
407 
408 	size = 3 * PAGE_SIZE;
409 	anon_object = vm_object_allocate(size);
410 	assert(anon_object != VM_OBJECT_NULL);
411 	device_pager = device_pager_setup(NULL, 0, size, 0);
412 	assert(device_pager != NULL);
413 	device_object = memory_object_to_vm_object(device_pager);
414 	assert(device_object != VM_OBJECT_NULL);
415 #if 0
416 	/*
417 	 * Can't actually map this, since another thread might do a
418 	 * vm_map_enter() that gets coalesced into this object, which
419 	 * would cause the test to fail.
420 	 */
421 	vm_map_offset_t anon_mapping = 0;
422 	kr = vm_map_enter(kernel_map, &anon_mapping, size, 0,
423 	    VM_MAP_KERNEL_FLAGS_ANYWHERE(),
424 	    anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
425 	    VM_INHERIT_DEFAULT);
426 	assert(kr == KERN_SUCCESS);
427 #endif
428 	device_mapping = 0;
429 	kr = vm_map_enter_mem_object(kernel_map, &device_mapping, size, 0,
430 	    VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
431 	    (void *)device_pager, 0, FALSE,
432 	    VM_PROT_DEFAULT, VM_PROT_ALL,
433 	    VM_INHERIT_DEFAULT);
434 	assert(kr == KERN_SUCCESS);
435 	memory_object_deallocate(device_pager);
436 
437 	vm_object_lock(anon_object);
438 	vm_object_activity_begin(anon_object);
439 	anon_object->blocked_access = TRUE;
440 	vm_object_unlock(anon_object);
441 	vm_object_lock(device_object);
442 	vm_object_activity_begin(device_object);
443 	device_object->blocked_access = TRUE;
444 	vm_object_unlock(device_object);
445 
446 	assert(anon_object->ref_count == 1);
447 	assert(!anon_object->named);
448 	assert(device_object->ref_count == 2);
449 	assert(device_object->named);
450 
451 	kr = vm_object_transpose(device_object, anon_object, size);
452 	assert(kr == KERN_SUCCESS);
453 
454 	vm_object_lock(anon_object);
455 	vm_object_activity_end(anon_object);
456 	anon_object->blocked_access = FALSE;
457 	vm_object_unlock(anon_object);
458 	vm_object_lock(device_object);
459 	vm_object_activity_end(device_object);
460 	device_object->blocked_access = FALSE;
461 	vm_object_unlock(device_object);
462 
463 	assert(anon_object->ref_count == 2);
464 	assert(anon_object->named);
465 #if 0
466 	kr = vm_deallocate(kernel_map, anon_mapping, size);
467 	assert(kr == KERN_SUCCESS);
468 #endif
469 	assert(device_object->ref_count == 1);
470 	assert(!device_object->named);
471 	kr = vm_deallocate(kernel_map, device_mapping, size);
472 	assert(kr == KERN_SUCCESS);
473 
474 	printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n");
475 }
476 #else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
477 #define vm_test_device_pager_transpose()
478 #endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
479 
480 #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT
481 extern kern_return_t vm_allocate_external(vm_map_t        map,
482     vm_offset_t     *addr,
483     vm_size_t       size,
484     int             flags);
485 extern kern_return_t vm_remap_external(vm_map_t                target_map,
486     vm_offset_t             *address,
487     vm_size_t               size,
488     vm_offset_t             mask,
489     int                     flags,
490     vm_map_t                src_map,
491     vm_offset_t             memory_address,
492     boolean_t               copy,
493     vm_prot_t               *cur_protection,
494     vm_prot_t               *max_protection,
495     vm_inherit_t            inheritance);
496 extern int debug4k_panic_on_misaligned_sharing;
497 
498 void vm_test_4k(void);
499 void
vm_test_4k(void)500 vm_test_4k(void)
501 {
502 	pmap_t test_pmap;
503 	vm_map_t test_map;
504 	kern_return_t kr;
505 	vm_address_t expected_addr;
506 	vm_address_t alloc1_addr, alloc2_addr, alloc3_addr, alloc4_addr;
507 	vm_address_t alloc5_addr, dealloc_addr, remap_src_addr, remap_dst_addr;
508 	vm_size_t alloc1_size, alloc2_size, alloc3_size, alloc4_size;
509 	vm_size_t alloc5_size, remap_src_size;
510 	vm_address_t fault_addr;
511 	vm_prot_t cur_prot, max_prot;
512 	int saved_debug4k_panic_on_misaligned_sharing;
513 
514 	printf("\n\n\nVM_TEST_4K:%d creating 4K map...\n", __LINE__);
515 	test_pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT | PMAP_CREATE_FORCE_4K_PAGES);
516 	assert(test_pmap != NULL);
517 	test_map = vm_map_create_options(test_pmap,
518 	    MACH_VM_MIN_ADDRESS,
519 	    MACH_VM_MAX_ADDRESS,
520 	    VM_MAP_CREATE_PAGEABLE);
521 	assert(test_map != VM_MAP_NULL);
522 	vm_map_set_page_shift(test_map, FOURK_PAGE_SHIFT);
523 	printf("VM_TEST_4K:%d map %p pmap %p page_size 0x%x\n", __LINE__, test_map, test_pmap, VM_MAP_PAGE_SIZE(test_map));
524 
525 	alloc1_addr = 0;
526 	alloc1_size = 1 * FOURK_PAGE_SIZE;
527 	expected_addr = 0x1000;
528 	printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
529 	kr = vm_allocate_external(test_map,
530 	    &alloc1_addr,
531 	    alloc1_size,
532 	    VM_FLAGS_ANYWHERE);
533 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
534 	assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr);
535 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
536 	expected_addr += alloc1_size;
537 
538 	printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
539 	kr = vm_deallocate(test_map, alloc1_addr, alloc1_size);
540 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
541 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
542 
543 	alloc1_addr = 0;
544 	alloc1_size = 1 * FOURK_PAGE_SIZE;
545 	expected_addr = 0x1000;
546 	printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
547 	kr = vm_allocate_external(test_map,
548 	    &alloc1_addr,
549 	    alloc1_size,
550 	    VM_FLAGS_ANYWHERE);
551 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
552 	assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr);
553 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
554 	expected_addr += alloc1_size;
555 
556 	alloc2_addr = 0;
557 	alloc2_size = 3 * FOURK_PAGE_SIZE;
558 	printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc2_addr, alloc2_size);
559 	kr = vm_allocate_external(test_map,
560 	    &alloc2_addr,
561 	    alloc2_size,
562 	    VM_FLAGS_ANYWHERE);
563 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
564 	assertf(alloc2_addr == expected_addr, "alloc2_addr = 0x%lx expected 0x%lx", alloc2_addr, expected_addr);
565 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc2_addr);
566 	expected_addr += alloc2_size;
567 
568 	alloc3_addr = 0;
569 	alloc3_size = 18 * FOURK_PAGE_SIZE;
570 	printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc3_addr, alloc3_size);
571 	kr = vm_allocate_external(test_map,
572 	    &alloc3_addr,
573 	    alloc3_size,
574 	    VM_FLAGS_ANYWHERE);
575 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
576 	assertf(alloc3_addr == expected_addr, "alloc3_addr = 0x%lx expected 0x%lx\n", alloc3_addr, expected_addr);
577 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr);
578 	expected_addr += alloc3_size;
579 
580 	alloc4_addr = 0;
581 	alloc4_size = 1 * FOURK_PAGE_SIZE;
582 	printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc4_addr, alloc4_size);
583 	kr = vm_allocate_external(test_map,
584 	    &alloc4_addr,
585 	    alloc4_size,
586 	    VM_FLAGS_ANYWHERE);
587 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
588 	assertf(alloc4_addr == expected_addr, "alloc4_addr = 0x%lx expected 0x%lx", alloc4_addr, expected_addr);
589 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr);
590 	expected_addr += alloc4_size;
591 
592 	printf("VM_TEST_4K:%d vm_protect(%p, 0x%lx, 0x%lx, READ)...\n", __LINE__, test_map, alloc2_addr, (1UL * FOURK_PAGE_SIZE));
593 	kr = vm_protect(test_map,
594 	    alloc2_addr,
595 	    (1UL * FOURK_PAGE_SIZE),
596 	    FALSE,
597 	    VM_PROT_READ);
598 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
599 
600 	for (fault_addr = alloc1_addr;
601 	    fault_addr < alloc4_addr + alloc4_size + (2 * FOURK_PAGE_SIZE);
602 	    fault_addr += FOURK_PAGE_SIZE) {
603 		printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr);
604 		kr = vm_fault(test_map,
605 		    fault_addr,
606 		    VM_PROT_WRITE,
607 		    FALSE,
608 		    VM_KERN_MEMORY_NONE,
609 		    THREAD_UNINT,
610 		    NULL,
611 		    0);
612 		printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
613 		if (fault_addr == alloc2_addr) {
614 			assertf(kr == KERN_PROTECTION_FAILURE, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_PROTECTION_FAILURE);
615 			printf("VM_TEST_4K:%d read fault at 0x%lx...\n", __LINE__, fault_addr);
616 			kr = vm_fault(test_map,
617 			    fault_addr,
618 			    VM_PROT_READ,
619 			    FALSE,
620 			    VM_KERN_MEMORY_NONE,
621 			    THREAD_UNINT,
622 			    NULL,
623 			    0);
624 			assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS);
625 			printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
626 		} else if (fault_addr >= alloc4_addr + alloc4_size) {
627 			assertf(kr == KERN_INVALID_ADDRESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_INVALID_ADDRESS);
628 		} else {
629 			assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS);
630 		}
631 	}
632 
633 	alloc5_addr = 0;
634 	alloc5_size = 7 * FOURK_PAGE_SIZE;
635 	printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc5_addr, alloc5_size);
636 	kr = vm_allocate_external(test_map,
637 	    &alloc5_addr,
638 	    alloc5_size,
639 	    VM_FLAGS_ANYWHERE);
640 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
641 	assertf(alloc5_addr == expected_addr, "alloc5_addr = 0x%lx expected 0x%lx", alloc5_addr, expected_addr);
642 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc5_addr);
643 	expected_addr += alloc5_size;
644 
645 	dealloc_addr = vm_map_round_page(alloc5_addr, PAGE_SHIFT);
646 	dealloc_addr += FOURK_PAGE_SIZE;
647 	printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%x)...\n", __LINE__, test_map, dealloc_addr, FOURK_PAGE_SIZE);
648 	kr = vm_deallocate(test_map, dealloc_addr, FOURK_PAGE_SIZE);
649 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
650 	printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
651 
652 	remap_src_addr = vm_map_round_page(alloc3_addr, PAGE_SHIFT);
653 	remap_src_addr += FOURK_PAGE_SIZE;
654 	remap_src_size = 2 * FOURK_PAGE_SIZE;
655 	remap_dst_addr = 0;
656 	printf("VM_TEST_4K:%d vm_remap(%p, 0x%lx, 0x%lx, 0x%lx, copy=0)...\n", __LINE__, test_map, remap_dst_addr, remap_src_size, remap_src_addr);
657 	kr = vm_remap_external(test_map,
658 	    &remap_dst_addr,
659 	    remap_src_size,
660 	    0,                    /* mask */
661 	    VM_FLAGS_ANYWHERE,
662 	    test_map,
663 	    remap_src_addr,
664 	    FALSE,                    /* copy */
665 	    &cur_prot,
666 	    &max_prot,
667 	    VM_INHERIT_DEFAULT);
668 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
669 	assertf(remap_dst_addr == expected_addr, "remap_dst_addr = 0x%lx expected 0x%lx", remap_dst_addr, expected_addr);
670 	printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, remap_dst_addr);
671 	expected_addr += remap_src_size;
672 
673 	for (fault_addr = remap_dst_addr;
674 	    fault_addr < remap_dst_addr + remap_src_size;
675 	    fault_addr += 4096) {
676 		printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr);
677 		kr = vm_fault(test_map,
678 		    fault_addr,
679 		    VM_PROT_WRITE,
680 		    FALSE,
681 		    VM_KERN_MEMORY_NONE,
682 		    THREAD_UNINT,
683 		    NULL,
684 		    0);
685 		assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
686 		printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
687 	}
688 
689 	printf("VM_TEST_4K:\n");
690 	remap_dst_addr = 0;
691 	remap_src_addr = alloc3_addr + 0xc000;
692 	remap_src_size = 0x5000;
693 	printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
694 	kr = vm_remap_external(kernel_map,
695 	    &remap_dst_addr,
696 	    remap_src_size,
697 	    0,                    /* mask */
698 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
699 	    test_map,
700 	    remap_src_addr,
701 	    FALSE,                    /* copy */
702 	    &cur_prot,
703 	    &max_prot,
704 	    VM_INHERIT_DEFAULT);
705 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
706 	printf("VM_TEST_4K: -> remapped (shared) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr);
707 
708 	printf("VM_TEST_4K:\n");
709 	remap_dst_addr = 0;
710 	remap_src_addr = alloc3_addr + 0xc000;
711 	remap_src_size = 0x5000;
712 	printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
713 	kr = vm_remap_external(kernel_map,
714 	    &remap_dst_addr,
715 	    remap_src_size,
716 	    0,                    /* mask */
717 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
718 	    test_map,
719 	    remap_src_addr,
720 	    TRUE,                    /* copy */
721 	    &cur_prot,
722 	    &max_prot,
723 	    VM_INHERIT_DEFAULT);
724 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
725 	printf("VM_TEST_4K: -> remapped (COW) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr);
726 
727 	printf("VM_TEST_4K:\n");
728 	saved_debug4k_panic_on_misaligned_sharing = debug4k_panic_on_misaligned_sharing;
729 	debug4k_panic_on_misaligned_sharing = 0;
730 	remap_dst_addr = 0;
731 	remap_src_addr = alloc1_addr;
732 	remap_src_size = alloc1_size + alloc2_size;
733 	printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
734 	kr = vm_remap_external(kernel_map,
735 	    &remap_dst_addr,
736 	    remap_src_size,
737 	    0,                    /* mask */
738 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
739 	    test_map,
740 	    remap_src_addr,
741 	    FALSE,                    /* copy */
742 	    &cur_prot,
743 	    &max_prot,
744 	    VM_INHERIT_DEFAULT);
745 	assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr);
746 	printf("VM_TEST_4K: -> remap (SHARED) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
747 	debug4k_panic_on_misaligned_sharing = saved_debug4k_panic_on_misaligned_sharing;
748 
749 	printf("VM_TEST_4K:\n");
750 	remap_dst_addr = 0;
751 	remap_src_addr = alloc1_addr;
752 	remap_src_size = alloc1_size + alloc2_size;
753 	printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
754 	kr = vm_remap_external(kernel_map,
755 	    &remap_dst_addr,
756 	    remap_src_size,
757 	    0,                    /* mask */
758 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
759 	    test_map,
760 	    remap_src_addr,
761 	    TRUE,                    /* copy */
762 	    &cur_prot,
763 	    &max_prot,
764 	    VM_INHERIT_DEFAULT);
765 #if 000
766 	assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr);
767 	printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
768 #else /* 000 */
769 	assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
770 	printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
771 #endif /* 000 */
772 
773 
774 #if 00
775 	printf("VM_TEST_4K:%d vm_map_remove(%p, 0x%llx, 0x%llx)...\n", __LINE__, test_map, test_map->min_offset, test_map->max_offset);
776 	vm_map_remove(test_map, test_map->min_offset, test_map->max_offset);
777 #endif
778 
779 	printf("VM_TEST_4K: PASS\n\n\n\n");
780 }
781 #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */
782 
783 #if MACH_ASSERT
784 static void
vm_test_map_copy_adjust_to_target_one(vm_map_copy_t copy_map,vm_map_t target_map)785 vm_test_map_copy_adjust_to_target_one(
786 	vm_map_copy_t copy_map,
787 	vm_map_t target_map)
788 {
789 	kern_return_t kr;
790 	vm_map_copy_t target_copy;
791 	vm_map_offset_t overmap_start, overmap_end, trimmed_start;
792 
793 	target_copy = VM_MAP_COPY_NULL;
794 	/* size is 2 (4k) pages but range covers 3 pages */
795 	kr = vm_map_copy_adjust_to_target(copy_map,
796 	    0x0 + 0xfff,
797 	    0x1002,
798 	    target_map,
799 	    FALSE,
800 	    &target_copy,
801 	    &overmap_start,
802 	    &overmap_end,
803 	    &trimmed_start);
804 	assert(kr == KERN_SUCCESS);
805 	assert(overmap_start == 0);
806 	assert(overmap_end == 0);
807 	assert(trimmed_start == 0);
808 	assertf(target_copy->size == 0x3000,
809 	    "target_copy %p size 0x%llx\n",
810 	    target_copy, (uint64_t)target_copy->size);
811 	vm_map_copy_discard(target_copy);
812 
813 	/* 1. adjust_to_target() for bad offset -> error */
814 	/* 2. adjust_to_target() for bad size -> error */
815 	/* 3. adjust_to_target() for the whole thing -> unchanged */
816 	/* 4. adjust_to_target() to trim start by less than 1 page */
817 	/* 5. adjust_to_target() to trim end by less than 1 page */
818 	/* 6. adjust_to_target() to trim start and end by less than 1 page */
819 	/* 7. adjust_to_target() to trim start by more than 1 page */
820 	/* 8. adjust_to_target() to trim end by more than 1 page */
821 	/* 9. adjust_to_target() to trim start and end by more than 1 page */
822 	/* 10. adjust_to_target() to trim start by more than 1 entry */
823 	/* 11. adjust_to_target() to trim start by more than 1 entry */
824 	/* 12. adjust_to_target() to trim start and end by more than 1 entry */
825 	/* 13. adjust_to_target() to trim start and end down to 1 entry */
826 }
827 
828 static void
vm_test_map_copy_adjust_to_target(void)829 vm_test_map_copy_adjust_to_target(void)
830 {
831 	kern_return_t kr;
832 	vm_map_t map4k, map16k;
833 	vm_object_t obj1, obj2, obj3, obj4;
834 	vm_map_offset_t addr4k, addr16k;
835 	vm_map_size_t size4k, size16k;
836 	vm_map_copy_t copy4k, copy16k;
837 	vm_prot_t curprot, maxprot;
838 
839 	/* create a 4k map */
840 	map4k = vm_map_create_options(PMAP_NULL, 0, (uint32_t)-1,
841 	    VM_MAP_CREATE_PAGEABLE);
842 	vm_map_set_page_shift(map4k, 12);
843 
844 	/* create a 16k map */
845 	map16k = vm_map_create_options(PMAP_NULL, 0, (uint32_t)-1,
846 	    VM_MAP_CREATE_PAGEABLE);
847 	vm_map_set_page_shift(map16k, 14);
848 
849 	/* create 4 VM objects */
850 	obj1 = vm_object_allocate(0x100000);
851 	obj2 = vm_object_allocate(0x100000);
852 	obj3 = vm_object_allocate(0x100000);
853 	obj4 = vm_object_allocate(0x100000);
854 
855 	/* map objects in 4k map */
856 	vm_object_reference(obj1);
857 	addr4k = 0x1000;
858 	size4k = 0x3000;
859 	kr = vm_map_enter(map4k, &addr4k, size4k, 0,
860 	    VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(), obj1, 0,
861 	    FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
862 	    VM_INHERIT_DEFAULT);
863 	assert(kr == KERN_SUCCESS);
864 	assert(addr4k == 0x1000);
865 
866 	/* map objects in 16k map */
867 	vm_object_reference(obj1);
868 	addr16k = 0x4000;
869 	size16k = 0x8000;
870 	kr = vm_map_enter(map16k, &addr16k, size16k, 0,
871 	    VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(), obj1, 0,
872 	    FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
873 	    VM_INHERIT_DEFAULT);
874 	assert(kr == KERN_SUCCESS);
875 	assert(addr16k == 0x4000);
876 
877 	/* test for <rdar://60959809> */
878 	ipc_port_t mem_entry;
879 	memory_object_size_t mem_entry_size;
880 	mach_vm_size_t map_size;
881 	mem_entry_size = 0x1002;
882 	mem_entry = IPC_PORT_NULL;
883 	kr = mach_make_memory_entry_64(map16k, &mem_entry_size, addr16k + 0x2fff,
884 	    MAP_MEM_VM_SHARE | MAP_MEM_USE_DATA_ADDR | VM_PROT_READ,
885 	    &mem_entry, IPC_PORT_NULL);
886 	assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr);
887 	assertf(mem_entry_size == 0x5001, "mem_entry_size 0x%llx\n", (uint64_t) mem_entry_size);
888 	map_size = 0;
889 	kr = mach_memory_entry_map_size(mem_entry, map4k, 0, 0x1002, &map_size);
890 	assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr);
891 	assertf(map_size == 0x3000, "mem_entry %p map_size 0x%llx\n", mem_entry, (uint64_t)map_size);
892 	mach_memory_entry_port_release(mem_entry);
893 
894 	/* create 4k copy map */
895 	curprot = VM_PROT_NONE;
896 	maxprot = VM_PROT_NONE;
897 	kr = vm_map_copy_extract(map4k, addr4k, 0x3000,
898 	    FALSE, &copy4k, &curprot, &maxprot,
899 	    VM_INHERIT_DEFAULT, VM_MAP_KERNEL_FLAGS_NONE);
900 	assert(kr == KERN_SUCCESS);
901 	assert(copy4k->size == 0x3000);
902 
903 	/* create 16k copy map */
904 	curprot = VM_PROT_NONE;
905 	maxprot = VM_PROT_NONE;
906 	kr = vm_map_copy_extract(map16k, addr16k, 0x4000,
907 	    FALSE, &copy16k, &curprot, &maxprot,
908 	    VM_INHERIT_DEFAULT, VM_MAP_KERNEL_FLAGS_NONE);
909 	assert(kr == KERN_SUCCESS);
910 	assert(copy16k->size == 0x4000);
911 
912 	/* test each combination */
913 //	vm_test_map_copy_adjust_to_target_one(copy4k, map4k);
914 //	vm_test_map_copy_adjust_to_target_one(copy16k, map16k);
915 //	vm_test_map_copy_adjust_to_target_one(copy4k, map16k);
916 	vm_test_map_copy_adjust_to_target_one(copy16k, map4k);
917 
918 	/* assert 1 ref on 4k map */
919 	assert(os_ref_get_count_raw(&map4k->map_refcnt) == 1);
920 	/* release 4k map */
921 	vm_map_deallocate(map4k);
922 	/* assert 1 ref on 16k map */
923 	assert(os_ref_get_count_raw(&map16k->map_refcnt) == 1);
924 	/* release 16k map */
925 	vm_map_deallocate(map16k);
926 	/* deallocate copy maps */
927 	vm_map_copy_discard(copy4k);
928 	vm_map_copy_discard(copy16k);
929 	/* assert 1 ref on all VM objects */
930 	assert(obj1->ref_count == 1);
931 	assert(obj2->ref_count == 1);
932 	assert(obj3->ref_count == 1);
933 	assert(obj4->ref_count == 1);
934 	/* release all VM objects */
935 	vm_object_deallocate(obj1);
936 	vm_object_deallocate(obj2);
937 	vm_object_deallocate(obj3);
938 	vm_object_deallocate(obj4);
939 }
940 #endif /* MACH_ASSERT */
941 
942 #if __arm64__ && !KASAN
943 __attribute__((noinline))
944 static void
vm_test_per_mapping_internal_accounting(void)945 vm_test_per_mapping_internal_accounting(void)
946 {
947 	ledger_t ledger;
948 	pmap_t user_pmap;
949 	vm_map_t user_map;
950 	kern_return_t kr;
951 	ledger_amount_t balance;
952 	mach_vm_address_t user_addr, user_remap;
953 	vm_map_offset_t device_addr;
954 	mach_vm_size_t user_size;
955 	vm_prot_t cur_prot, max_prot;
956 	upl_size_t upl_size;
957 	upl_t upl;
958 	unsigned int upl_count;
959 	upl_control_flags_t upl_flags;
960 	upl_page_info_t *pl;
961 	ppnum_t ppnum;
962 	vm_object_t device_object;
963 	vm_map_offset_t map_start, map_end;
964 	int pmap_flags;
965 
966 	pmap_flags = 0;
967 	if (sizeof(vm_map_offset_t) == 4) {
968 		map_start = 0x100000000ULL;
969 		map_end = 0x200000000ULL;
970 		pmap_flags |= PMAP_CREATE_64BIT;
971 	} else {
972 		map_start = 0x10000000;
973 		map_end = 0x20000000;
974 	}
975 	/* create a user address space */
976 	ledger = ledger_instantiate(task_ledger_template,
977 	    LEDGER_CREATE_ACTIVE_ENTRIES);
978 	assert(ledger);
979 	user_pmap = pmap_create_options(ledger, 0, pmap_flags);
980 	assert(user_pmap);
981 	user_map = vm_map_create(user_pmap,
982 	    map_start,
983 	    map_end,
984 	    TRUE);
985 	assert(user_map);
986 	/* check ledger */
987 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
988 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
989 	assertf(balance == 0, "balance=0x%llx", balance);
990 	/* allocate 1 page in that address space */
991 	user_addr = 0;
992 	user_size = PAGE_SIZE;
993 	kr = mach_vm_allocate(user_map,
994 	    &user_addr,
995 	    user_size,
996 	    VM_FLAGS_ANYWHERE);
997 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
998 	/* check ledger */
999 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1000 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1001 	assertf(balance == 0, "balance=0x%llx", balance);
1002 	/* remap the original mapping */
1003 	user_remap = 0;
1004 	kr = mach_vm_remap(user_map,
1005 	    &user_remap,
1006 	    PAGE_SIZE,
1007 	    0,
1008 	    VM_FLAGS_ANYWHERE,
1009 	    user_map,
1010 	    user_addr,
1011 	    FALSE,                /* copy */
1012 	    &cur_prot,
1013 	    &max_prot,
1014 	    VM_INHERIT_DEFAULT);
1015 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1016 	/* check ledger */
1017 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1018 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1019 	assertf(balance == 0, "balance=0x%llx", balance);
1020 	/* create a UPL from the original mapping */
1021 	upl_size = PAGE_SIZE;
1022 	upl = NULL;
1023 	upl_count = 0;
1024 	upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
1025 	kr = vm_map_create_upl(user_map,
1026 	    (vm_map_offset_t)user_addr,
1027 	    &upl_size,
1028 	    &upl,
1029 	    NULL,
1030 	    &upl_count,
1031 	    &upl_flags,
1032 	    VM_KERN_MEMORY_DIAG);
1033 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1034 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1035 	assert(upl_page_present(pl, 0));
1036 	ppnum = upl_phys_page(pl, 0);
1037 	/* check ledger */
1038 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1039 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1040 	assertf(balance == 0, "balance=0x%llx", balance);
1041 	device_object = vm_object_allocate(PAGE_SIZE);
1042 	assert(device_object);
1043 	device_object->private = TRUE;
1044 	device_object->phys_contiguous = TRUE;
1045 	kr = vm_object_populate_with_private(device_object, 0,
1046 	    ppnum, PAGE_SIZE);
1047 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1048 
1049 	/* check ledger */
1050 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1051 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1052 	assertf(balance == 0, "balance=0x%llx", balance);
1053 	/* deallocate the original mapping */
1054 	kr = mach_vm_deallocate(user_map, user_addr, PAGE_SIZE);
1055 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1056 	/* map the device_object in the kernel */
1057 	device_addr = 0;
1058 	vm_object_reference(device_object);
1059 	kr = vm_map_enter(kernel_map,
1060 	    &device_addr,
1061 	    PAGE_SIZE,
1062 	    0,
1063 	    VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
1064 	    device_object,
1065 	    0,
1066 	    FALSE,               /* copy */
1067 	    VM_PROT_DEFAULT,
1068 	    VM_PROT_DEFAULT,
1069 	    VM_INHERIT_NONE);
1070 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1071 	/* access the device pager mapping */
1072 	*(char *)device_addr = 'x';
1073 	printf("%s:%d 0x%llx: 0x%x\n", __FUNCTION__, __LINE__, (uint64_t)device_addr, *(uint32_t *)device_addr);
1074 	/* check ledger */
1075 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1076 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1077 	assertf(balance == 0, "balance=0x%llx", balance);
1078 	/* fault in the remap addr */
1079 	kr = vm_fault(user_map, (vm_map_offset_t)user_remap, VM_PROT_READ,
1080 	    FALSE, 0, TRUE, NULL, 0);
1081 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1082 	/* check ledger */
1083 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1084 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1085 	assertf(balance == PAGE_SIZE, "balance=0x%llx", balance);
1086 	/* deallocate remapping */
1087 	kr = mach_vm_deallocate(user_map, user_remap, PAGE_SIZE);
1088 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1089 	/* check ledger */
1090 	kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1091 	assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1092 	assertf(balance == 0, "balance=0x%llx", balance);
1093 	/* TODO: cleanup... */
1094 	printf("%s:%d PASS\n", __FUNCTION__, __LINE__);
1095 }
1096 #endif /* __arm64__ && !KASAN */
1097 
1098 static void
vm_test_kernel_tag_accounting_kma(kma_flags_t base,kma_flags_t bit)1099 vm_test_kernel_tag_accounting_kma(kma_flags_t base, kma_flags_t bit)
1100 {
1101 	vm_tag_t tag = VM_KERN_MEMORY_REASON; /* unused during POST */
1102 	uint64_t init_size = vm_tag_get_size(tag);
1103 	uint64_t final_size = init_size + PAGE_SIZE;
1104 	vm_address_t  address;
1105 	kern_return_t kr;
1106 
1107 	/*
1108 	 * Test the matrix of:
1109 	 *  - born with or without bit
1110 	 *  - bit flipped or not
1111 	 *  - dies with or without bit
1112 	 */
1113 	for (uint32_t i = 0; i < 4; i++) {
1114 		kma_flags_t flags1 = base | ((i & 1) ? bit : KMA_NONE);
1115 		kma_flags_t flags2 = base | ((i & 2) ? bit : KMA_NONE);
1116 
1117 		kr = kmem_alloc(kernel_map, &address, PAGE_SIZE, flags1, tag);
1118 		assert3u(kr, ==, KERN_SUCCESS);
1119 
1120 		if (flags1 & (KMA_VAONLY | KMA_PAGEABLE)) {
1121 			assert3u(init_size, ==, vm_tag_get_size(tag));
1122 		} else {
1123 			assert3u(final_size, ==, vm_tag_get_size(tag));
1124 		}
1125 
1126 		if ((flags1 ^ flags2) == KMA_VAONLY) {
1127 			if (flags1 & KMA_VAONLY) {
1128 				kernel_memory_populate(address, PAGE_SIZE,
1129 				    KMA_KOBJECT | KMA_NOFAIL, tag);
1130 			} else {
1131 				kernel_memory_depopulate(address, PAGE_SIZE,
1132 				    KMA_KOBJECT, tag);
1133 			}
1134 		}
1135 
1136 		if ((flags1 ^ flags2) == KMA_PAGEABLE) {
1137 			if (flags1 & KMA_PAGEABLE) {
1138 				kr = vm_map_wire_kernel(kernel_map,
1139 				    address, address + PAGE_SIZE,
1140 				    VM_PROT_DEFAULT, tag, false);
1141 				assert3u(kr, ==, KERN_SUCCESS);
1142 			} else {
1143 				kr = vm_map_unwire(kernel_map,
1144 				    address, address + PAGE_SIZE, false);
1145 				assert3u(kr, ==, KERN_SUCCESS);
1146 			}
1147 		}
1148 
1149 		if (flags2 & (KMA_VAONLY | KMA_PAGEABLE)) {
1150 			assert3u(init_size, ==, vm_tag_get_size(tag));
1151 		} else {
1152 			assert3u(final_size, ==, vm_tag_get_size(tag));
1153 		}
1154 
1155 		kmem_free(kernel_map, address, PAGE_SIZE);
1156 		assert3u(init_size, ==, vm_tag_get_size(tag));
1157 	}
1158 }
1159 
1160 __attribute__((noinline))
1161 static void
vm_test_kernel_tag_accounting(void)1162 vm_test_kernel_tag_accounting(void)
1163 {
1164 	printf("%s: test running\n", __func__);
1165 
1166 	printf("%s: account (KMA_KOBJECT + populate)...\n", __func__);
1167 	vm_test_kernel_tag_accounting_kma(KMA_KOBJECT, KMA_VAONLY);
1168 	printf("%s:     PASS\n", __func__);
1169 
1170 	printf("%s: account (regular object + wiring)...\n", __func__);
1171 	vm_test_kernel_tag_accounting_kma(KMA_NONE, KMA_PAGEABLE);
1172 	printf("%s:     PASS\n", __func__);
1173 
1174 	printf("%s: test passed\n", __func__);
1175 
1176 #undef if_bit
1177 }
1178 
1179 __attribute__((noinline))
1180 static void
vm_test_collapse_overflow(void)1181 vm_test_collapse_overflow(void)
1182 {
1183 	vm_object_t object, backing_object;
1184 	vm_object_size_t size;
1185 	vm_page_t m;
1186 
1187 	/* create an object for which (int)(size>>PAGE_SHIFT) = 0 */
1188 	size = 0x400000000000ULL;
1189 	assert((int)(size >> PAGE_SHIFT) == 0);
1190 	backing_object = vm_object_allocate(size + PAGE_SIZE);
1191 	assert(backing_object);
1192 	vm_object_reference(backing_object);
1193 	/* insert a page */
1194 	m = VM_PAGE_NULL;
1195 	while (m == VM_PAGE_NULL) {
1196 		m = vm_page_grab();
1197 		if (m == VM_PAGE_NULL) {
1198 			VM_PAGE_WAIT();
1199 		}
1200 	}
1201 	assert(m);
1202 	vm_object_lock(backing_object);
1203 	vm_page_insert(m, backing_object, 0);
1204 	vm_object_unlock(backing_object);
1205 	/* make it back another object */
1206 	object = vm_object_allocate(size);
1207 	assert(object);
1208 	vm_object_reference(object);
1209 	object->shadow = backing_object;
1210 	vm_object_reference(backing_object);
1211 	/* trigger a bypass */
1212 	vm_object_lock(object);
1213 	vm_object_collapse(object, 0, TRUE);
1214 	/* check that it did not bypass the backing object */
1215 	if (object->shadow != backing_object) {
1216 		panic("%s:%d FAIL\n", __FUNCTION__, __LINE__);
1217 	}
1218 	vm_object_unlock(object);
1219 
1220 	/* remove the page from the backing object */
1221 	vm_object_lock(backing_object);
1222 	vm_page_remove(m, TRUE);
1223 	vm_object_unlock(backing_object);
1224 	/* trigger a bypass */
1225 	vm_object_lock(object);
1226 	vm_object_collapse(object, 0, TRUE);
1227 	/* check that it did bypass the backing object */
1228 	if (object->shadow == backing_object) {
1229 		panic("%s:%d FAIL\n", __FUNCTION__, __LINE__);
1230 	}
1231 	vm_page_insert(m, object, 0);
1232 	vm_object_unlock(object);
1233 
1234 	/* cleanup */
1235 	vm_object_deallocate(object);
1236 	/* "backing_object" already lost its reference during the bypass */
1237 //	vm_object_deallocate(backing_object);
1238 
1239 	printf("%s:%d PASS\n", __FUNCTION__, __LINE__);
1240 }
1241 
1242 __attribute__((noinline))
1243 static void
vm_test_physical_size_overflow(void)1244 vm_test_physical_size_overflow(void)
1245 {
1246 	vm_map_address_t start;
1247 	mach_vm_size_t size;
1248 	kern_return_t kr;
1249 	mach_vm_size_t phys_size;
1250 	bool fail;
1251 	int failures = 0;
1252 
1253 	/* size == 0 */
1254 	start = 0x100000;
1255 	size = 0x0;
1256 	kr = vm_map_range_physical_size(kernel_map,
1257 	    start,
1258 	    size,
1259 	    &phys_size);
1260 	fail = (kr != KERN_SUCCESS || phys_size != 0);
1261 	printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1262 	    __FUNCTION__, __LINE__,
1263 	    (fail ? "FAIL" : "PASS"),
1264 	    (uint64_t)start, size, kr, phys_size);
1265 	failures += fail;
1266 
1267 	/* plain wraparound */
1268 	start = 0x100000;
1269 	size = 0xffffffffffffffff - 0x10000;
1270 	kr = vm_map_range_physical_size(kernel_map,
1271 	    start,
1272 	    size,
1273 	    &phys_size);
1274 	fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1275 	printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1276 	    __FUNCTION__, __LINE__,
1277 	    (fail ? "FAIL" : "PASS"),
1278 	    (uint64_t)start, size, kr, phys_size);
1279 	failures += fail;
1280 
1281 	/* wraparound after rounding */
1282 	start = 0xffffffffffffff00;
1283 	size = 0xf0;
1284 	kr = vm_map_range_physical_size(kernel_map,
1285 	    start,
1286 	    size,
1287 	    &phys_size);
1288 	fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1289 	printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1290 	    __FUNCTION__, __LINE__,
1291 	    (fail ? "FAIL" : "PASS"),
1292 	    (uint64_t)start, size, kr, phys_size);
1293 	failures += fail;
1294 
1295 	/* wraparound to start after rounding */
1296 	start = 0x100000;
1297 	size = 0xffffffffffffffff;
1298 	kr = vm_map_range_physical_size(kernel_map,
1299 	    start,
1300 	    size,
1301 	    &phys_size);
1302 	fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1303 	printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1304 	    __FUNCTION__, __LINE__,
1305 	    (fail ? "FAIL" : "PASS"),
1306 	    (uint64_t)start, size, kr, phys_size);
1307 	failures += fail;
1308 
1309 	if (failures) {
1310 		panic("%s: FAIL (failures=%d)", __FUNCTION__, failures);
1311 	}
1312 	printf("%s: PASS\n", __FUNCTION__);
1313 }
1314 
1315 boolean_t vm_tests_in_progress = FALSE;
1316 
1317 kern_return_t
vm_tests(void)1318 vm_tests(void)
1319 {
1320 	kern_return_t kr = KERN_SUCCESS;
1321 	vm_tests_in_progress = TRUE;
1322 
1323 	vm_test_collapse_compressor();
1324 	vm_test_wire_and_extract();
1325 	vm_test_page_wire_overflow_panic();
1326 	vm_test_kernel_object_fault();
1327 	vm_test_device_pager_transpose();
1328 #if MACH_ASSERT
1329 	vm_test_map_copy_adjust_to_target();
1330 #endif /* MACH_ASSERT */
1331 #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT
1332 	vm_test_4k();
1333 #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */
1334 #if __arm64__ && !KASAN
1335 	vm_test_per_mapping_internal_accounting();
1336 #endif /* __arm64__ && !KASAN */
1337 	vm_test_kernel_tag_accounting();
1338 	vm_test_collapse_overflow();
1339 	vm_test_physical_size_overflow();
1340 
1341 	vm_tests_in_progress = FALSE;
1342 
1343 	return kr;
1344 }
1345 
1346 /*
1347  * Checks that vm_map_delete() can deal with map unaligned entries.
1348  * rdar://88969652
1349  */
1350 static int
vm_map_non_aligned_test(__unused int64_t in,int64_t * out)1351 vm_map_non_aligned_test(__unused int64_t in, int64_t *out)
1352 {
1353 	vm_map_t map = current_map();
1354 	mach_vm_size_t size = 2 * VM_MAP_PAGE_SIZE(map);
1355 	mach_vm_address_t addr;
1356 	vm_map_entry_t entry;
1357 	kern_return_t kr;
1358 
1359 	if (VM_MAP_PAGE_SHIFT(map) > PAGE_SHIFT) {
1360 		kr = mach_vm_allocate(map, &addr, size, VM_FLAGS_ANYWHERE);
1361 		if (kr != KERN_SUCCESS) {
1362 			return ENOMEM;
1363 		}
1364 
1365 		vm_map_lock(map);
1366 		if (!vm_map_lookup_entry(map, addr, &entry)) {
1367 			panic("couldn't find the entry we just made: "
1368 			    "map:%p addr:0x%0llx", map, addr);
1369 		}
1370 
1371 		/*
1372 		 * Now break the entry into:
1373 		 *  2 * 4k
1374 		 *  2 * 4k
1375 		 *  1 * 16k
1376 		 */
1377 		vm_map_clip_end(map, entry, addr + VM_MAP_PAGE_SIZE(map));
1378 		entry->map_aligned = FALSE;
1379 		vm_map_clip_end(map, entry, addr + PAGE_SIZE * 2);
1380 		vm_map_unlock(map);
1381 
1382 		kr = mach_vm_deallocate(map, addr, size);
1383 		assert(kr == KERN_SUCCESS);
1384 	}
1385 
1386 	*out = 1;
1387 	return 0;
1388 }
1389 SYSCTL_TEST_REGISTER(vm_map_non_aligned, vm_map_non_aligned_test);
1390