1 /*
2 * Copyright (c) 2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_assert.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_vm.h>
33 #include <mach/memory_object.h>
34 #include <mach/vm_map.h>
35 #include <mach/vm_statistics.h>
36 #include <mach/vm32_map_server.h>
37 #include <mach/mach_host.h>
38 #include <mach/host_priv.h>
39
40 #include <kern/ledger.h>
41 #include <kern/host.h>
42
43 #include <device/device_port.h>
44 #include <vm/memory_object_internal.h>
45 #include <vm/vm_fault.h>
46 #include <vm/vm_fault_internal.h>
47 #include <vm/vm_map_internal.h>
48 #include <vm/vm_object_internal.h>
49 #include <vm/vm_pageout_xnu.h>
50 #include <vm/vm_protos.h>
51 #include <vm/vm_memtag.h>
52 #include <vm/vm_memory_entry_xnu.h>
53 #include <vm/vm_kern_xnu.h>
54 #include <vm/vm_iokit.h>
55 #include <vm/vm_page_internal.h>
56 #include <vm/vm_shared_region_xnu.h>
57 #include <vm/vm_far.h>
58
59 #include <kern/zalloc.h>
60 #include <kern/zalloc_internal.h>
61
62 #include <sys/errno.h> /* for the sysctl tests */
63
64 #include <tests/xnupost.h> /* for testing-related functions and macros */
65
66
67 extern ledger_template_t task_ledger_template;
68
69 extern kern_return_t
70 vm_map_copy_adjust_to_target(
71 vm_map_copy_t copy_map,
72 vm_map_offset_t offset,
73 vm_map_size_t size,
74 vm_map_t target_map,
75 boolean_t copy,
76 vm_map_copy_t *target_copy_map_p,
77 vm_map_offset_t *overmap_start_p,
78 vm_map_offset_t *overmap_end_p,
79 vm_map_offset_t *trimmed_start_p);
80
81 #define VM_TEST_COLLAPSE_COMPRESSOR 0
82 #define VM_TEST_WIRE_AND_EXTRACT 0
83 #define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0
84 #if __arm64__
85 #define VM_TEST_KERNEL_OBJECT_FAULT 0
86 #endif /* __arm64__ */
87 #define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG)
88
89 #if VM_TEST_COLLAPSE_COMPRESSOR
90 extern boolean_t vm_object_collapse_compressor_allowed;
91 #include <IOKit/IOLib.h>
92 static void
vm_test_collapse_compressor(void)93 vm_test_collapse_compressor(void)
94 {
95 vm_object_size_t backing_size, top_size;
96 vm_object_t backing_object, top_object;
97 vm_map_offset_t backing_offset, top_offset;
98 unsigned char *backing_address, *top_address;
99 kern_return_t kr;
100
101 printf("VM_TEST_COLLAPSE_COMPRESSOR:\n");
102
103 /* create backing object */
104 backing_size = 15 * PAGE_SIZE;
105 backing_object = vm_object_allocate(backing_size, kernel_map->serial_id);
106 assert(backing_object != VM_OBJECT_NULL);
107 printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
108 backing_object);
109 /* map backing object */
110 backing_offset = 0;
111 kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0,
112 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
113 backing_object, 0, FALSE,
114 VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
115 assert(kr == KERN_SUCCESS);
116 backing_address = (unsigned char *) backing_offset;
117 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
118 "mapped backing object %p at 0x%llx\n",
119 backing_object, (uint64_t) backing_offset);
120 /* populate with pages to be compressed in backing object */
121 backing_address[0x1 * PAGE_SIZE] = 0xB1;
122 backing_address[0x4 * PAGE_SIZE] = 0xB4;
123 backing_address[0x7 * PAGE_SIZE] = 0xB7;
124 backing_address[0xa * PAGE_SIZE] = 0xBA;
125 backing_address[0xd * PAGE_SIZE] = 0xBD;
126 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
127 "populated pages to be compressed in "
128 "backing_object %p\n", backing_object);
129 /* compress backing object */
130 vm_object_pageout(backing_object);
131 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
132 backing_object);
133 /* wait for all the pages to be gone */
134 while (*(volatile int *)&backing_object->resident_page_count != 0) {
135 IODelay(10);
136 }
137 printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
138 backing_object);
139 /* populate with pages to be resident in backing object */
140 backing_address[0x0 * PAGE_SIZE] = 0xB0;
141 backing_address[0x3 * PAGE_SIZE] = 0xB3;
142 backing_address[0x6 * PAGE_SIZE] = 0xB6;
143 backing_address[0x9 * PAGE_SIZE] = 0xB9;
144 backing_address[0xc * PAGE_SIZE] = 0xBC;
145 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
146 "populated pages to be resident in "
147 "backing_object %p\n", backing_object);
148 /* leave the other pages absent */
149 /* mess with the paging_offset of the backing_object */
150 assert(backing_object->paging_offset == 0);
151 backing_object->paging_offset = 3 * PAGE_SIZE;
152
153 /* create top object */
154 top_size = 9 * PAGE_SIZE;
155 top_object = vm_object_allocate(top_size, backing_object->vmo_provenance);
156 assert(top_object != VM_OBJECT_NULL);
157 printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
158 top_object);
159 /* map top object */
160 top_offset = 0;
161 kr = vm_map_enter(kernel_map, &top_offset, top_size, 0,
162 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
163 top_object, 0, FALSE,
164 VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
165 assert(kr == KERN_SUCCESS);
166 top_address = (unsigned char *) top_offset;
167 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
168 "mapped top object %p at 0x%llx\n",
169 top_object, (uint64_t) top_offset);
170 /* populate with pages to be compressed in top object */
171 top_address[0x3 * PAGE_SIZE] = 0xA3;
172 top_address[0x4 * PAGE_SIZE] = 0xA4;
173 top_address[0x5 * PAGE_SIZE] = 0xA5;
174 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
175 "populated pages to be compressed in "
176 "top_object %p\n", top_object);
177 /* compress top object */
178 vm_object_pageout(top_object);
179 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
180 top_object);
181 /* wait for all the pages to be gone */
182 while (top_object->resident_page_count != 0) {
183 IODelay(10);
184 }
185 printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
186 top_object);
187 /* populate with pages to be resident in top object */
188 top_address[0x0 * PAGE_SIZE] = 0xA0;
189 top_address[0x1 * PAGE_SIZE] = 0xA1;
190 top_address[0x2 * PAGE_SIZE] = 0xA2;
191 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
192 "populated pages to be resident in "
193 "top_object %p\n", top_object);
194 /* leave the other pages absent */
195
196 /* link the 2 objects */
197 vm_object_reference(backing_object);
198 top_object->shadow = backing_object;
199 top_object->vo_shadow_offset = 3 * PAGE_SIZE;
200 printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
201 top_object, backing_object);
202
203 /* unmap backing object */
204 vm_map_remove(kernel_map,
205 backing_offset,
206 backing_offset + backing_size,
207 VM_MAP_REMOVE_NO_FLAGS);
208 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
209 "unmapped backing_object %p [0x%llx:0x%llx]\n",
210 backing_object,
211 (uint64_t) backing_offset,
212 (uint64_t) (backing_offset + backing_size));
213
214 /* collapse */
215 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object);
216 vm_object_lock(top_object);
217 vm_object_collapse(top_object, 0, FALSE);
218 vm_object_unlock(top_object);
219 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object);
220
221 /* did it work? */
222 if (top_object->shadow != VM_OBJECT_NULL) {
223 printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
224 printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
225 if (vm_object_collapse_compressor_allowed) {
226 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL");
227 }
228 } else {
229 /* check the contents of the mapping */
230 unsigned char expect[9] =
231 { 0xA0, 0xA1, 0xA2, /* resident in top */
232 0xA3, 0xA4, 0xA5, /* compressed in top */
233 0xB9, /* resident in backing + shadow_offset */
234 0xBD, /* compressed in backing + shadow_offset + paging_offset */
235 0x00 }; /* absent in both */
236 unsigned char actual[9];
237 unsigned int i, errors;
238
239 errors = 0;
240 for (i = 0; i < sizeof(actual); i++) {
241 actual[i] = (unsigned char) top_address[i * PAGE_SIZE];
242 if (actual[i] != expect[i]) {
243 errors++;
244 }
245 }
246 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
247 "actual [%x %x %x %x %x %x %x %x %x] "
248 "expect [%x %x %x %x %x %x %x %x %x] "
249 "%d errors\n",
250 actual[0], actual[1], actual[2], actual[3],
251 actual[4], actual[5], actual[6], actual[7],
252 actual[8],
253 expect[0], expect[1], expect[2], expect[3],
254 expect[4], expect[5], expect[6], expect[7],
255 expect[8],
256 errors);
257 if (errors) {
258 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL");
259 } else {
260 printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n");
261 }
262 }
263 }
264 #else /* VM_TEST_COLLAPSE_COMPRESSOR */
265 #define vm_test_collapse_compressor()
266 #endif /* VM_TEST_COLLAPSE_COMPRESSOR */
267
268 #if VM_TEST_WIRE_AND_EXTRACT
269 extern ppnum_t vm_map_get_phys_page(vm_map_t map,
270 vm_offset_t offset);
271 static void
vm_test_wire_and_extract(void)272 vm_test_wire_and_extract(void)
273 {
274 ledger_t ledger;
275 vm_map_t user_map, wire_map;
276 mach_vm_address_t user_addr, wire_addr;
277 mach_vm_size_t user_size, wire_size;
278 mach_vm_offset_t cur_offset;
279 vm_prot_t cur_prot, max_prot;
280 ppnum_t user_ppnum, wire_ppnum;
281 kern_return_t kr;
282
283 ledger = ledger_instantiate(task_ledger_template,
284 LEDGER_CREATE_ACTIVE_ENTRIES);
285 pmap_t user_pmap = pmap_create_options(ledger, 0, PMAP_CREATE_64BIT);
286 assert(user_pmap);
287 user_map = vm_map_create_options(user_pmap,
288 0x100000000ULL,
289 0x200000000ULL,
290 VM_MAP_CREATE_PAGEABLE);
291 wire_map = vm_map_create_options(NULL,
292 0x100000000ULL,
293 0x200000000ULL,
294 VM_MAP_CREATE_PAGEABLE);
295 user_addr = 0;
296 user_size = 0x10000;
297 kr = mach_vm_allocate(user_map,
298 &user_addr,
299 user_size,
300 VM_FLAGS_ANYWHERE);
301 assert(kr == KERN_SUCCESS);
302 wire_addr = 0;
303 wire_size = user_size;
304 kr = mach_vm_remap(wire_map,
305 &wire_addr,
306 wire_size,
307 0,
308 VM_FLAGS_ANYWHERE,
309 user_map,
310 user_addr,
311 FALSE,
312 &cur_prot,
313 &max_prot,
314 VM_INHERIT_NONE);
315 assert(kr == KERN_SUCCESS);
316 for (cur_offset = 0;
317 cur_offset < wire_size;
318 cur_offset += PAGE_SIZE) {
319 kr = vm_map_wire_and_extract(wire_map,
320 wire_addr + cur_offset,
321 VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
322 TRUE,
323 &wire_ppnum);
324 assert(kr == KERN_SUCCESS);
325 user_ppnum = vm_map_get_phys_page(user_map,
326 user_addr + cur_offset);
327 printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x "
328 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
329 kr,
330 user_map, user_addr + cur_offset, user_ppnum,
331 wire_map, wire_addr + cur_offset, wire_ppnum);
332 if (kr != KERN_SUCCESS ||
333 wire_ppnum == 0 ||
334 wire_ppnum != user_ppnum) {
335 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL");
336 }
337 }
338 cur_offset -= PAGE_SIZE;
339 kr = vm_map_wire_and_extract(wire_map,
340 wire_addr + cur_offset,
341 VM_PROT_DEFAULT,
342 TRUE,
343 &wire_ppnum);
344 assert(kr == KERN_SUCCESS);
345 printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
346 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
347 kr,
348 user_map, user_addr + cur_offset, user_ppnum,
349 wire_map, wire_addr + cur_offset, wire_ppnum);
350 if (kr != KERN_SUCCESS ||
351 wire_ppnum == 0 ||
352 wire_ppnum != user_ppnum) {
353 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL");
354 }
355
356 printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n");
357 }
358 #else /* VM_TEST_WIRE_AND_EXTRACT */
359 #define vm_test_wire_and_extract()
360 #endif /* VM_TEST_WIRE_AND_EXTRACT */
361
362 #if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC
363 static void
vm_test_page_wire_overflow_panic(void)364 vm_test_page_wire_overflow_panic(void)
365 {
366 vm_object_t object;
367 vm_page_t page;
368
369 printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n");
370
371 object = vm_object_allocate(PAGE_SIZE, VM_MAP_SERIAL_NONE);
372 while ((page = vm_page_grab()) == VM_PAGE_NULL) {
373 VM_PAGE_WAIT();
374 }
375 vm_object_lock(object);
376 vm_page_insert(page, object, 0);
377 vm_page_lock_queues();
378 do {
379 vm_page_wire(page, 1, FALSE);
380 } while (page->wire_count != 0);
381 vm_page_unlock_queues();
382 vm_object_unlock(object);
383 panic("FBDP(%p,%p): wire_count overflow not detected",
384 object, page);
385 }
386 #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
387 #define vm_test_page_wire_overflow_panic()
388 #endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
389
390 #if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT
391 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
392 static void
vm_test_kernel_object_fault(void)393 vm_test_kernel_object_fault(void)
394 {
395 vm_offset_t stack;
396 uintptr_t frameb[2];
397 int ret;
398
399 kmem_alloc(kernel_map, &stack,
400 kernel_stack_size + ptoa(2),
401 KMA_NOFAIL | KMA_KSTACK | KMA_KOBJECT |
402 KMA_GUARD_FIRST | KMA_GUARD_LAST,
403 VM_KERN_MEMORY_STACK);
404
405 ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE);
406 if (ret != 0) {
407 printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n");
408 } else {
409 printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n");
410 }
411
412 kmem_free_guard(kernel_map, stack, kernel_stack_size + ptoa(2),
413 KMF_GUARD_FIRST | KMF_GUARD_LAST, KMEM_GUARD_NONE);
414 stack = 0;
415 }
416 #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
417 #define vm_test_kernel_object_fault()
418 #endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
419
420 #if VM_TEST_DEVICE_PAGER_TRANSPOSE
421 static void
vm_test_device_pager_transpose(void)422 vm_test_device_pager_transpose(void)
423 {
424 memory_object_t device_pager;
425 vm_object_t anon_object, device_object;
426 vm_size_t size;
427 vm_map_offset_t device_mapping;
428 kern_return_t kr;
429
430 size = 3 * PAGE_SIZE;
431 anon_object = vm_object_allocate(size, kernel_map->serial_id);
432 assert(anon_object != VM_OBJECT_NULL);
433 device_pager = device_pager_setup(NULL, 0, size, 0);
434 assert(device_pager != NULL);
435 device_object = memory_object_to_vm_object(device_pager);
436 assert(device_object != VM_OBJECT_NULL);
437 #if 0
438 /*
439 * Can't actually map this, since another thread might do a
440 * vm_map_enter() that gets coalesced into this object, which
441 * would cause the test to fail.
442 */
443 vm_map_offset_t anon_mapping = 0;
444 kr = vm_map_enter(kernel_map, &anon_mapping, size, 0,
445 VM_MAP_KERNEL_FLAGS_ANYWHERE(),
446 anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
447 VM_INHERIT_DEFAULT);
448 assert(kr == KERN_SUCCESS);
449 #endif
450 device_mapping = 0;
451 kr = mach_vm_map_kernel(kernel_map,
452 vm_sanitize_wrap_addr_ref(&device_mapping),
453 size,
454 0,
455 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
456 (void *)device_pager,
457 0,
458 FALSE,
459 VM_PROT_DEFAULT,
460 VM_PROT_ALL,
461 VM_INHERIT_DEFAULT);
462 assert(kr == KERN_SUCCESS);
463 memory_object_deallocate(device_pager);
464
465 vm_object_lock(anon_object);
466 vm_object_activity_begin(anon_object);
467 anon_object->blocked_access = TRUE;
468 vm_object_unlock(anon_object);
469 vm_object_lock(device_object);
470 vm_object_activity_begin(device_object);
471 device_object->blocked_access = TRUE;
472 vm_object_unlock(device_object);
473
474 assert(os_ref_get_count_raw(&anon_object->ref_count) == 1);
475 assert(!anon_object->named);
476 assert(os_ref_get_count_raw(&device_object->ref_count) == 2);
477 assert(device_object->named);
478
479 kr = vm_object_transpose(device_object, anon_object, size);
480 assert(kr == KERN_SUCCESS);
481
482 vm_object_lock(anon_object);
483 vm_object_activity_end(anon_object);
484 anon_object->blocked_access = FALSE;
485 vm_object_unlock(anon_object);
486 vm_object_lock(device_object);
487 vm_object_activity_end(device_object);
488 device_object->blocked_access = FALSE;
489 vm_object_unlock(device_object);
490
491 assert(os_ref_get_count_raw(&anon_object->ref_count) == 2);
492 assert(anon_object->named);
493 #if 0
494 kr = vm_deallocate(kernel_map, anon_mapping, size);
495 assert(kr == KERN_SUCCESS);
496 #endif
497 assert(os_ref_get_count_raw(&device_object->ref_count) == 1);
498 assert(!device_object->named);
499 kr = vm_deallocate(kernel_map, device_mapping, size);
500 assert(kr == KERN_SUCCESS);
501
502 printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n");
503 }
504 #else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
505 #define vm_test_device_pager_transpose()
506 #endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
507
508 extern kern_return_t vm_allocate_external(vm_map_t map,
509 vm_offset_t *addr,
510 vm_size_t size,
511 int flags);
512 extern kern_return_t vm_remap_external(vm_map_t target_map,
513 vm_offset_t *address,
514 vm_size_t size,
515 vm_offset_t mask,
516 int flags,
517 vm_map_t src_map,
518 vm_offset_t memory_address,
519 boolean_t copy,
520 vm_prot_t *cur_protection,
521 vm_prot_t *max_protection,
522 vm_inherit_t inheritance);
523 #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT
524 extern int debug4k_panic_on_misaligned_sharing;
525 void vm_test_4k(void);
526 void
vm_test_4k(void)527 vm_test_4k(void)
528 {
529 pmap_t test_pmap;
530 vm_map_t test_map;
531 kern_return_t kr;
532 vm_address_t expected_addr;
533 vm_address_t alloc1_addr, alloc2_addr, alloc3_addr, alloc4_addr;
534 vm_address_t alloc5_addr, dealloc_addr, remap_src_addr, remap_dst_addr;
535 vm_size_t alloc1_size, alloc2_size, alloc3_size, alloc4_size;
536 vm_size_t alloc5_size, remap_src_size;
537 vm_address_t fault_addr;
538 vm_prot_t cur_prot, max_prot;
539 int saved_debug4k_panic_on_misaligned_sharing;
540
541 printf("\n\n\nVM_TEST_4K:%d creating 4K map...\n", __LINE__);
542 test_pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT | PMAP_CREATE_FORCE_4K_PAGES);
543 assert(test_pmap != NULL);
544 test_map = vm_map_create_options(test_pmap,
545 MACH_VM_MIN_ADDRESS,
546 MACH_VM_MAX_ADDRESS,
547 VM_MAP_CREATE_PAGEABLE);
548 assert(test_map != VM_MAP_NULL);
549 vm_map_set_page_shift(test_map, FOURK_PAGE_SHIFT);
550 printf("VM_TEST_4K:%d map %p pmap %p page_size 0x%x\n", __LINE__, test_map, test_pmap, VM_MAP_PAGE_SIZE(test_map));
551
552 alloc1_addr = 0;
553 alloc1_size = 1 * FOURK_PAGE_SIZE;
554 expected_addr = 0x1000;
555 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
556 kr = vm_allocate_external(test_map,
557 &alloc1_addr,
558 alloc1_size,
559 VM_FLAGS_ANYWHERE);
560 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
561 assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr);
562 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
563 expected_addr += alloc1_size;
564
565 printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
566 kr = vm_deallocate(test_map, alloc1_addr, alloc1_size);
567 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
568 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
569
570 alloc1_addr = 0;
571 alloc1_size = 1 * FOURK_PAGE_SIZE;
572 expected_addr = 0x1000;
573 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
574 kr = vm_allocate_external(test_map,
575 &alloc1_addr,
576 alloc1_size,
577 VM_FLAGS_ANYWHERE);
578 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
579 assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr);
580 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
581 expected_addr += alloc1_size;
582
583 alloc2_addr = 0;
584 alloc2_size = 3 * FOURK_PAGE_SIZE;
585 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc2_addr, alloc2_size);
586 kr = vm_allocate_external(test_map,
587 &alloc2_addr,
588 alloc2_size,
589 VM_FLAGS_ANYWHERE);
590 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
591 assertf(alloc2_addr == expected_addr, "alloc2_addr = 0x%lx expected 0x%lx", alloc2_addr, expected_addr);
592 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc2_addr);
593 expected_addr += alloc2_size;
594
595 alloc3_addr = 0;
596 alloc3_size = 18 * FOURK_PAGE_SIZE;
597 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc3_addr, alloc3_size);
598 kr = vm_allocate_external(test_map,
599 &alloc3_addr,
600 alloc3_size,
601 VM_FLAGS_ANYWHERE);
602 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
603 assertf(alloc3_addr == expected_addr, "alloc3_addr = 0x%lx expected 0x%lx\n", alloc3_addr, expected_addr);
604 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr);
605 expected_addr += alloc3_size;
606
607 alloc4_addr = 0;
608 alloc4_size = 1 * FOURK_PAGE_SIZE;
609 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc4_addr, alloc4_size);
610 kr = vm_allocate_external(test_map,
611 &alloc4_addr,
612 alloc4_size,
613 VM_FLAGS_ANYWHERE);
614 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
615 assertf(alloc4_addr == expected_addr, "alloc4_addr = 0x%lx expected 0x%lx", alloc4_addr, expected_addr);
616 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr);
617 expected_addr += alloc4_size;
618
619 printf("VM_TEST_4K:%d vm_protect(%p, 0x%lx, 0x%lx, READ)...\n", __LINE__, test_map, alloc2_addr, (1UL * FOURK_PAGE_SIZE));
620 kr = vm_protect(test_map,
621 alloc2_addr,
622 (1UL * FOURK_PAGE_SIZE),
623 FALSE,
624 VM_PROT_READ);
625 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
626
627 for (fault_addr = alloc1_addr;
628 fault_addr < alloc4_addr + alloc4_size + (2 * FOURK_PAGE_SIZE);
629 fault_addr += FOURK_PAGE_SIZE) {
630 printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr);
631 kr = vm_fault(test_map,
632 fault_addr,
633 VM_PROT_WRITE,
634 FALSE,
635 VM_KERN_MEMORY_NONE,
636 THREAD_UNINT,
637 NULL,
638 0);
639 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
640 if (fault_addr == alloc2_addr) {
641 assertf(kr == KERN_PROTECTION_FAILURE, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_PROTECTION_FAILURE);
642 printf("VM_TEST_4K:%d read fault at 0x%lx...\n", __LINE__, fault_addr);
643 kr = vm_fault(test_map,
644 fault_addr,
645 VM_PROT_READ,
646 FALSE,
647 VM_KERN_MEMORY_NONE,
648 THREAD_UNINT,
649 NULL,
650 0);
651 assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS);
652 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
653 } else if (fault_addr >= alloc4_addr + alloc4_size) {
654 assertf(kr == KERN_INVALID_ADDRESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_INVALID_ADDRESS);
655 } else {
656 assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS);
657 }
658 }
659
660 alloc5_addr = 0;
661 alloc5_size = 7 * FOURK_PAGE_SIZE;
662 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc5_addr, alloc5_size);
663 kr = vm_allocate_external(test_map,
664 &alloc5_addr,
665 alloc5_size,
666 VM_FLAGS_ANYWHERE);
667 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
668 assertf(alloc5_addr == expected_addr, "alloc5_addr = 0x%lx expected 0x%lx", alloc5_addr, expected_addr);
669 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc5_addr);
670 expected_addr += alloc5_size;
671
672 dealloc_addr = vm_map_round_page(alloc5_addr, PAGE_SHIFT);
673 dealloc_addr += FOURK_PAGE_SIZE;
674 printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%x)...\n", __LINE__, test_map, dealloc_addr, FOURK_PAGE_SIZE);
675 kr = vm_deallocate(test_map, dealloc_addr, FOURK_PAGE_SIZE);
676 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
677 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
678
679 remap_src_addr = vm_map_round_page(alloc3_addr, PAGE_SHIFT);
680 remap_src_addr += FOURK_PAGE_SIZE;
681 remap_src_size = 2 * FOURK_PAGE_SIZE;
682 remap_dst_addr = 0;
683 printf("VM_TEST_4K:%d vm_remap(%p, 0x%lx, 0x%lx, 0x%lx, copy=0)...\n", __LINE__, test_map, remap_dst_addr, remap_src_size, remap_src_addr);
684 kr = vm_remap_external(test_map,
685 &remap_dst_addr,
686 remap_src_size,
687 0, /* mask */
688 VM_FLAGS_ANYWHERE,
689 test_map,
690 remap_src_addr,
691 FALSE, /* copy */
692 &cur_prot,
693 &max_prot,
694 VM_INHERIT_DEFAULT);
695 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
696 assertf(remap_dst_addr == expected_addr, "remap_dst_addr = 0x%lx expected 0x%lx", remap_dst_addr, expected_addr);
697 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, remap_dst_addr);
698 expected_addr += remap_src_size;
699
700 for (fault_addr = remap_dst_addr;
701 fault_addr < remap_dst_addr + remap_src_size;
702 fault_addr += 4096) {
703 printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr);
704 kr = vm_fault(test_map,
705 fault_addr,
706 VM_PROT_WRITE,
707 FALSE,
708 VM_KERN_MEMORY_NONE,
709 THREAD_UNINT,
710 NULL,
711 0);
712 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
713 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
714 }
715
716 printf("VM_TEST_4K:\n");
717 remap_dst_addr = 0;
718 remap_src_addr = alloc3_addr + 0xc000;
719 remap_src_size = 0x5000;
720 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
721 kr = vm_remap_external(kernel_map,
722 &remap_dst_addr,
723 remap_src_size,
724 0, /* mask */
725 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
726 test_map,
727 remap_src_addr,
728 FALSE, /* copy */
729 &cur_prot,
730 &max_prot,
731 VM_INHERIT_DEFAULT);
732 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
733 printf("VM_TEST_4K: -> remapped (shared) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr);
734
735 printf("VM_TEST_4K:\n");
736 remap_dst_addr = 0;
737 remap_src_addr = alloc3_addr + 0xc000;
738 remap_src_size = 0x5000;
739 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
740 kr = vm_remap_external(kernel_map,
741 &remap_dst_addr,
742 remap_src_size,
743 0, /* mask */
744 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
745 test_map,
746 remap_src_addr,
747 TRUE, /* copy */
748 &cur_prot,
749 &max_prot,
750 VM_INHERIT_DEFAULT);
751 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
752 printf("VM_TEST_4K: -> remapped (COW) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr);
753
754 printf("VM_TEST_4K:\n");
755 saved_debug4k_panic_on_misaligned_sharing = debug4k_panic_on_misaligned_sharing;
756 debug4k_panic_on_misaligned_sharing = 0;
757 remap_dst_addr = 0;
758 remap_src_addr = alloc1_addr;
759 remap_src_size = alloc1_size + alloc2_size;
760 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
761 kr = vm_remap_external(kernel_map,
762 &remap_dst_addr,
763 remap_src_size,
764 0, /* mask */
765 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
766 test_map,
767 remap_src_addr,
768 FALSE, /* copy */
769 &cur_prot,
770 &max_prot,
771 VM_INHERIT_DEFAULT);
772 assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr);
773 printf("VM_TEST_4K: -> remap (SHARED) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
774 debug4k_panic_on_misaligned_sharing = saved_debug4k_panic_on_misaligned_sharing;
775
776 printf("VM_TEST_4K:\n");
777 remap_dst_addr = 0;
778 remap_src_addr = alloc1_addr;
779 remap_src_size = alloc1_size + alloc2_size;
780 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
781 kr = vm_remap_external(kernel_map,
782 &remap_dst_addr,
783 remap_src_size,
784 0, /* mask */
785 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
786 test_map,
787 remap_src_addr,
788 TRUE, /* copy */
789 &cur_prot,
790 &max_prot,
791 VM_INHERIT_DEFAULT);
792 #if 000
793 assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr);
794 printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
795 #else /* 000 */
796 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
797 printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
798 #endif /* 000 */
799
800
801 #if 00
802 printf("VM_TEST_4K:%d vm_map_remove(%p, 0x%llx, 0x%llx)...\n", __LINE__, test_map, test_map->min_offset, test_map->max_offset);
803 vm_map_remove(test_map, test_map->min_offset, test_map->max_offset);
804 #endif
805
806 printf("VM_TEST_4K: PASS\n\n\n\n");
807 }
808 #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */
809
810 #if MACH_ASSERT
811 static void
vm_test_map_copy_adjust_to_target_one(vm_map_copy_t copy_map,vm_map_t target_map)812 vm_test_map_copy_adjust_to_target_one(
813 vm_map_copy_t copy_map,
814 vm_map_t target_map)
815 {
816 kern_return_t kr;
817 vm_map_copy_t target_copy;
818 vm_map_offset_t overmap_start, overmap_end, trimmed_start;
819
820 target_copy = VM_MAP_COPY_NULL;
821 /* size is 2 (4k) pages but range covers 3 pages */
822 kr = vm_map_copy_adjust_to_target(copy_map,
823 0x0 + 0xfff,
824 0x1002,
825 target_map,
826 FALSE,
827 &target_copy,
828 &overmap_start,
829 &overmap_end,
830 &trimmed_start);
831 assert(kr == KERN_SUCCESS);
832 assert(overmap_start == 0);
833 assert(overmap_end == 0);
834 assert(trimmed_start == 0);
835 assertf(target_copy->size == 0x3000,
836 "target_copy %p size 0x%llx\n",
837 target_copy, (uint64_t)target_copy->size);
838 vm_map_copy_discard(target_copy);
839
840 /* 1. adjust_to_target() for bad offset -> error */
841 /* 2. adjust_to_target() for bad size -> error */
842 /* 3. adjust_to_target() for the whole thing -> unchanged */
843 /* 4. adjust_to_target() to trim start by less than 1 page */
844 /* 5. adjust_to_target() to trim end by less than 1 page */
845 /* 6. adjust_to_target() to trim start and end by less than 1 page */
846 /* 7. adjust_to_target() to trim start by more than 1 page */
847 /* 8. adjust_to_target() to trim end by more than 1 page */
848 /* 9. adjust_to_target() to trim start and end by more than 1 page */
849 /* 10. adjust_to_target() to trim start by more than 1 entry */
850 /* 11. adjust_to_target() to trim start by more than 1 entry */
851 /* 12. adjust_to_target() to trim start and end by more than 1 entry */
852 /* 13. adjust_to_target() to trim start and end down to 1 entry */
853 }
854
855 static void
vm_test_map_copy_adjust_to_target(void)856 vm_test_map_copy_adjust_to_target(void)
857 {
858 kern_return_t kr;
859 vm_map_t map4k, map16k;
860 vm_object_t obj1, obj2, obj3, obj4;
861 vm_map_offset_t addr4k, addr16k;
862 vm_map_size_t size4k, size16k;
863 vm_map_copy_t copy4k, copy16k;
864 vm_prot_t curprot, maxprot;
865 vm_map_kernel_flags_t vmk_flags;
866
867 /* create a 4k map */
868 map4k = vm_map_create_options(PMAP_NULL, 0, (uint32_t)-1,
869 VM_MAP_CREATE_PAGEABLE);
870 vm_map_set_page_shift(map4k, 12);
871
872 /* create a 16k map */
873 map16k = vm_map_create_options(PMAP_NULL, 0, (uint32_t)-1,
874 VM_MAP_CREATE_PAGEABLE);
875 vm_map_set_page_shift(map16k, 14);
876
877 /* create 4 VM objects */
878 obj1 = vm_object_allocate(0x100000, map4k->serial_id);
879 obj2 = vm_object_allocate(0x100000, map4k->serial_id);
880 obj3 = vm_object_allocate(0x100000, map4k->serial_id);
881 obj4 = vm_object_allocate(0x100000, map4k->serial_id);
882
883 /* map objects in 4k map */
884 vm_object_reference(obj1);
885 addr4k = 0x1000;
886 size4k = 0x3000;
887 kr = vm_map_enter(map4k, &addr4k, size4k, 0,
888 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(), obj1, 0,
889 FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
890 VM_INHERIT_DEFAULT);
891 assert(kr == KERN_SUCCESS);
892 assert(addr4k == 0x1000);
893
894 /* map objects in 16k map */
895 vm_object_reference(obj1);
896 addr16k = 0x4000;
897 size16k = 0x8000;
898 kr = vm_map_enter(map16k, &addr16k, size16k, 0,
899 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(), obj1, 0,
900 FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
901 VM_INHERIT_DEFAULT);
902 assert(kr == KERN_SUCCESS);
903 assert(addr16k == 0x4000);
904
905 /* test for <rdar://60959809> */
906 ipc_port_t mem_entry;
907 memory_object_size_t mem_entry_size;
908 mach_vm_size_t map_size;
909 mem_entry_size = 0x1002;
910 mem_entry = IPC_PORT_NULL;
911 kr = mach_make_memory_entry_64(map16k, &mem_entry_size, addr16k + 0x2fff,
912 MAP_MEM_VM_SHARE | MAP_MEM_USE_DATA_ADDR | VM_PROT_READ,
913 &mem_entry, IPC_PORT_NULL);
914 assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr);
915 assertf(mem_entry_size == 0x5001, "mem_entry_size 0x%llx\n", (uint64_t) mem_entry_size);
916 map_size = 0;
917 kr = mach_memory_entry_map_size(mem_entry, map4k, 0, 0x1002, &map_size);
918 assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr);
919 assertf(map_size == 0x3000, "mem_entry %p map_size 0x%llx\n", mem_entry, (uint64_t)map_size);
920 mach_memory_entry_port_release(mem_entry);
921
922 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
923 vmk_flags.vmkf_remap_legacy_mode = true;
924
925 /* create 4k copy map */
926 curprot = VM_PROT_NONE;
927 maxprot = VM_PROT_NONE;
928 kr = vm_map_copy_extract(map4k, addr4k, 0x3000,
929 FALSE, ©4k, &curprot, &maxprot,
930 VM_INHERIT_DEFAULT, vmk_flags);
931 assert(kr == KERN_SUCCESS);
932 assert(copy4k->size == 0x3000);
933
934 /* create 16k copy map */
935 curprot = VM_PROT_NONE;
936 maxprot = VM_PROT_NONE;
937 kr = vm_map_copy_extract(map16k, addr16k, 0x4000,
938 FALSE, ©16k, &curprot, &maxprot,
939 VM_INHERIT_DEFAULT, vmk_flags);
940 assert(kr == KERN_SUCCESS);
941 assert(copy16k->size == 0x4000);
942
943 /* test each combination */
944 // vm_test_map_copy_adjust_to_target_one(copy4k, map4k);
945 // vm_test_map_copy_adjust_to_target_one(copy16k, map16k);
946 // vm_test_map_copy_adjust_to_target_one(copy4k, map16k);
947 vm_test_map_copy_adjust_to_target_one(copy16k, map4k);
948
949 /* assert 1 ref on 4k map */
950 assert(os_ref_get_count_raw(&map4k->map_refcnt) == 1);
951 /* release 4k map */
952 vm_map_deallocate(map4k);
953 /* assert 1 ref on 16k map */
954 assert(os_ref_get_count_raw(&map16k->map_refcnt) == 1);
955 /* release 16k map */
956 vm_map_deallocate(map16k);
957 /* deallocate copy maps */
958 vm_map_copy_discard(copy4k);
959 vm_map_copy_discard(copy16k);
960 /* assert 1 ref on all VM objects */
961 assert(os_ref_get_count_raw(&obj1->ref_count) == 1);
962 assert(os_ref_get_count_raw(&obj2->ref_count) == 1);
963 assert(os_ref_get_count_raw(&obj3->ref_count) == 1);
964 assert(os_ref_get_count_raw(&obj4->ref_count) == 1);
965 /* release all VM objects */
966 vm_object_deallocate(obj1);
967 vm_object_deallocate(obj2);
968 vm_object_deallocate(obj3);
969 vm_object_deallocate(obj4);
970 }
971 #endif /* MACH_ASSERT */
972
973 #if __arm64__ && !KASAN
974 __attribute__((noinline))
975 static void
vm_test_per_mapping_internal_accounting(void)976 vm_test_per_mapping_internal_accounting(void)
977 {
978 ledger_t ledger;
979 pmap_t user_pmap;
980 vm_map_t user_map;
981 kern_return_t kr;
982 ledger_amount_t balance;
983 mach_vm_address_t user_addr, user_remap;
984 vm_map_offset_t device_addr;
985 mach_vm_size_t user_size;
986 vm_prot_t cur_prot, max_prot;
987 upl_size_t upl_size;
988 upl_t upl;
989 unsigned int upl_count;
990 upl_control_flags_t upl_flags;
991 upl_page_info_t *pl;
992 ppnum_t ppnum;
993 vm_object_t device_object;
994 vm_map_offset_t map_start, map_end;
995 int pmap_flags;
996
997 pmap_flags = 0;
998 if (sizeof(vm_map_offset_t) == 4) {
999 map_start = 0x100000000ULL;
1000 map_end = 0x200000000ULL;
1001 pmap_flags |= PMAP_CREATE_64BIT;
1002 } else {
1003 map_start = 0x10000000;
1004 map_end = 0x20000000;
1005 }
1006 /* create a user address space */
1007 ledger = ledger_instantiate(task_ledger_template,
1008 LEDGER_CREATE_ACTIVE_ENTRIES);
1009 assert(ledger);
1010 user_pmap = pmap_create_options(ledger, 0, pmap_flags);
1011 assert(user_pmap);
1012 user_map = vm_map_create(user_pmap,
1013 map_start,
1014 map_end,
1015 TRUE);
1016 assert(user_map);
1017 /* check ledger */
1018 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1019 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1020 assertf(balance == 0, "balance=0x%llx", balance);
1021 /* allocate 1 page in that address space */
1022 user_addr = 0;
1023 user_size = PAGE_SIZE;
1024 kr = mach_vm_allocate(user_map,
1025 &user_addr,
1026 user_size,
1027 VM_FLAGS_ANYWHERE);
1028 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1029 /* check ledger */
1030 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1031 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1032 assertf(balance == 0, "balance=0x%llx", balance);
1033 /* remap the original mapping */
1034 user_remap = 0;
1035 kr = mach_vm_remap(user_map,
1036 &user_remap,
1037 PAGE_SIZE,
1038 0,
1039 VM_FLAGS_ANYWHERE,
1040 user_map,
1041 user_addr,
1042 FALSE, /* copy */
1043 &cur_prot,
1044 &max_prot,
1045 VM_INHERIT_DEFAULT);
1046 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1047 /* check ledger */
1048 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1049 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1050 assertf(balance == 0, "balance=0x%llx", balance);
1051 /* create a UPL from the original mapping */
1052 upl_size = PAGE_SIZE;
1053 upl = NULL;
1054 upl_count = 0;
1055 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
1056 kr = vm_map_create_upl(user_map,
1057 (vm_map_offset_t)user_addr,
1058 &upl_size,
1059 &upl,
1060 NULL,
1061 &upl_count,
1062 &upl_flags,
1063 VM_KERN_MEMORY_DIAG);
1064 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1065 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1066 assert(upl_page_present(pl, 0));
1067 ppnum = upl_phys_page(pl, 0);
1068 /* check ledger */
1069 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1070 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1071 assertf(balance == 0, "balance=0x%llx", balance);
1072 device_object = vm_object_allocate(PAGE_SIZE, kernel_map->serial_id);
1073 assert(device_object);
1074 vm_object_lock(device_object);
1075 VM_OBJECT_SET_PRIVATE(device_object, TRUE);
1076 VM_OBJECT_SET_PHYS_CONTIGUOUS(device_object, TRUE);
1077 device_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
1078 vm_object_unlock(device_object);
1079 kr = vm_object_populate_with_private(device_object, 0,
1080 ppnum, PAGE_SIZE);
1081 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1082
1083 /* check ledger */
1084 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1085 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1086 assertf(balance == 0, "balance=0x%llx", balance);
1087 /* deallocate the original mapping */
1088 kr = mach_vm_deallocate(user_map, user_addr, PAGE_SIZE);
1089 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1090 /* map the device_object in the kernel */
1091 device_addr = 0;
1092 vm_object_reference(device_object);
1093 kr = vm_map_enter(kernel_map,
1094 &device_addr,
1095 PAGE_SIZE,
1096 0,
1097 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
1098 device_object,
1099 0,
1100 FALSE, /* copy */
1101 VM_PROT_DEFAULT,
1102 VM_PROT_DEFAULT,
1103 VM_INHERIT_NONE);
1104 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1105 /* access the device pager mapping */
1106 *(char *)device_addr = 'x';
1107 printf("%s:%d 0x%llx: 0x%x\n", __FUNCTION__, __LINE__, (uint64_t)device_addr, *(uint32_t *)device_addr);
1108 /* check ledger */
1109 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1110 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1111 assertf(balance == 0, "balance=0x%llx", balance);
1112 /* fault in the remap addr */
1113 kr = vm_fault(user_map, (vm_map_offset_t)user_remap, VM_PROT_READ,
1114 FALSE, 0, TRUE, NULL, 0);
1115 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1116 /* check ledger */
1117 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1118 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1119 assertf(balance == PAGE_SIZE, "balance=0x%llx", balance);
1120 /* deallocate remapping */
1121 kr = mach_vm_deallocate(user_map, user_remap, PAGE_SIZE);
1122 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1123 /* check ledger */
1124 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1125 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1126 assertf(balance == 0, "balance=0x%llx", balance);
1127 /* TODO: cleanup... */
1128 printf("%s:%d PASS\n", __FUNCTION__, __LINE__);
1129 }
1130 #endif /* __arm64__ && !KASAN */
1131
1132 static void
vm_test_kernel_tag_accounting_kma(kma_flags_t base,kma_flags_t bit)1133 vm_test_kernel_tag_accounting_kma(kma_flags_t base, kma_flags_t bit)
1134 {
1135 vm_tag_t tag = VM_KERN_MEMORY_REASON; /* unused during POST */
1136 uint64_t init_size = vm_tag_get_size(tag);
1137 __assert_only uint64_t final_size = init_size + PAGE_SIZE;
1138 vm_address_t address;
1139 kern_return_t kr;
1140
1141 /*
1142 * Test the matrix of:
1143 * - born with or without bit
1144 * - bit flipped or not
1145 * - dies with or without bit
1146 */
1147 for (uint32_t i = 0; i < 4; i++) {
1148 kma_flags_t flags1 = base | ((i & 1) ? bit : KMA_NONE);
1149 kma_flags_t flags2 = base | ((i & 2) ? bit : KMA_NONE);
1150
1151 kr = kmem_alloc(kernel_map, &address, PAGE_SIZE, flags1, tag);
1152 assert3u(kr, ==, KERN_SUCCESS);
1153
1154 if (flags1 & (KMA_VAONLY | KMA_PAGEABLE)) {
1155 assert3u(init_size, ==, vm_tag_get_size(tag));
1156 } else {
1157 assert3u(final_size, ==, vm_tag_get_size(tag));
1158 }
1159
1160 if ((flags1 ^ flags2) == KMA_VAONLY) {
1161 if (flags1 & KMA_VAONLY) {
1162 kernel_memory_populate(address, PAGE_SIZE,
1163 KMA_KOBJECT | KMA_NOFAIL, tag);
1164 } else {
1165 kernel_memory_depopulate(address, PAGE_SIZE,
1166 KMA_KOBJECT, tag);
1167 }
1168 }
1169
1170 if ((flags1 ^ flags2) == KMA_PAGEABLE) {
1171 if (flags1 & KMA_PAGEABLE) {
1172 kr = vm_map_wire_kernel(kernel_map,
1173 address, address + PAGE_SIZE,
1174 VM_PROT_DEFAULT, tag, false);
1175 assert3u(kr, ==, KERN_SUCCESS);
1176 } else {
1177 kr = vm_map_unwire(kernel_map,
1178 address, address + PAGE_SIZE, false);
1179 assert3u(kr, ==, KERN_SUCCESS);
1180 }
1181 }
1182
1183 if (flags2 & (KMA_VAONLY | KMA_PAGEABLE)) {
1184 assert3u(init_size, ==, vm_tag_get_size(tag));
1185 } else {
1186 assert3u(final_size, ==, vm_tag_get_size(tag));
1187 }
1188
1189 kmem_free(kernel_map, address, PAGE_SIZE);
1190 assert3u(init_size, ==, vm_tag_get_size(tag));
1191 }
1192 }
1193
1194 __attribute__((noinline))
1195 static void
vm_test_kernel_tag_accounting(void)1196 vm_test_kernel_tag_accounting(void)
1197 {
1198 printf("%s: test running\n", __func__);
1199
1200 printf("%s: account (KMA_KOBJECT + populate)...\n", __func__);
1201 vm_test_kernel_tag_accounting_kma(KMA_KOBJECT, KMA_VAONLY);
1202 printf("%s: PASS\n", __func__);
1203
1204 printf("%s: account (regular object + wiring)...\n", __func__);
1205 vm_test_kernel_tag_accounting_kma(KMA_NONE, KMA_PAGEABLE);
1206 printf("%s: PASS\n", __func__);
1207
1208 printf("%s: test passed\n", __func__);
1209
1210 #undef if_bit
1211 }
1212
1213 __attribute__((noinline))
1214 static void
vm_test_collapse_overflow(void)1215 vm_test_collapse_overflow(void)
1216 {
1217 vm_object_t object, backing_object;
1218 vm_object_size_t size;
1219 vm_page_t m;
1220
1221 /* create an object for which (int)(size>>PAGE_SHIFT) = 0 */
1222 size = 0x400000000000ULL;
1223 assert((int)(size >> PAGE_SHIFT) == 0);
1224 backing_object = vm_object_allocate(size + PAGE_SIZE, VM_MAP_SERIAL_NONE);
1225 assert(backing_object);
1226 vm_object_reference(backing_object);
1227 /* insert a page */
1228 m = VM_PAGE_NULL;
1229 while (m == VM_PAGE_NULL) {
1230 m = vm_page_grab();
1231 if (m == VM_PAGE_NULL) {
1232 VM_PAGE_WAIT();
1233 }
1234 }
1235 assert(m);
1236 vm_object_lock(backing_object);
1237 vm_page_insert(m, backing_object, 0);
1238 vm_object_unlock(backing_object);
1239 /* make it back another object */
1240 object = vm_object_allocate(size, VM_MAP_SERIAL_NONE);
1241 assert(object);
1242 vm_object_reference(object);
1243 object->shadow = backing_object;
1244 vm_object_reference(backing_object);
1245 /* trigger a bypass */
1246 vm_object_lock(object);
1247 vm_object_collapse(object, 0, TRUE);
1248 /* check that it did not bypass the backing object */
1249 if (object->shadow != backing_object) {
1250 panic("%s:%d FAIL\n", __FUNCTION__, __LINE__);
1251 }
1252 vm_object_unlock(object);
1253
1254 /* remove the page from the backing object */
1255 vm_object_lock(backing_object);
1256 vm_page_remove(m, TRUE);
1257 vm_object_unlock(backing_object);
1258 /* trigger a bypass */
1259 vm_object_lock(object);
1260 vm_object_collapse(object, 0, TRUE);
1261 /* check that it did bypass the backing object */
1262 if (object->shadow == backing_object) {
1263 panic("%s:%d FAIL\n", __FUNCTION__, __LINE__);
1264 }
1265 vm_page_insert(m, object, 0);
1266 vm_object_unlock(object);
1267
1268 /* cleanup */
1269 vm_object_deallocate(object);
1270 /* "backing_object" already lost its reference during the bypass */
1271 // vm_object_deallocate(backing_object);
1272
1273 printf("%s:%d PASS\n", __FUNCTION__, __LINE__);
1274 }
1275
1276 __attribute__((noinline))
1277 static void
vm_test_physical_size_overflow(void)1278 vm_test_physical_size_overflow(void)
1279 {
1280 vm_map_address_t start;
1281 mach_vm_size_t size;
1282 kern_return_t kr;
1283 mach_vm_size_t phys_size;
1284 bool fail;
1285 int failures = 0;
1286
1287 /* size == 0 */
1288 start = 0x100000;
1289 size = 0x0;
1290 kr = vm_map_range_physical_size(kernel_map,
1291 start,
1292 size,
1293 &phys_size);
1294 fail = (kr != KERN_SUCCESS || phys_size != 0);
1295 printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1296 __FUNCTION__, __LINE__,
1297 (fail ? "FAIL" : "PASS"),
1298 (uint64_t)start, size, kr, phys_size);
1299 failures += fail;
1300
1301 /* plain wraparound */
1302 start = 0x100000;
1303 size = 0xffffffffffffffff - 0x10000;
1304 kr = vm_map_range_physical_size(kernel_map,
1305 start,
1306 size,
1307 &phys_size);
1308 fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1309 printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1310 __FUNCTION__, __LINE__,
1311 (fail ? "FAIL" : "PASS"),
1312 (uint64_t)start, size, kr, phys_size);
1313 failures += fail;
1314
1315 /* wraparound after rounding */
1316 start = 0xffffffffffffff00;
1317 size = 0xf0;
1318 kr = vm_map_range_physical_size(kernel_map,
1319 start,
1320 size,
1321 &phys_size);
1322 fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1323 printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1324 __FUNCTION__, __LINE__,
1325 (fail ? "FAIL" : "PASS"),
1326 (uint64_t)start, size, kr, phys_size);
1327 failures += fail;
1328
1329 /* wraparound to start after rounding */
1330 start = 0x100000;
1331 size = 0xffffffffffffffff;
1332 kr = vm_map_range_physical_size(kernel_map,
1333 start,
1334 size,
1335 &phys_size);
1336 fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1337 printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1338 __FUNCTION__, __LINE__,
1339 (fail ? "FAIL" : "PASS"),
1340 (uint64_t)start, size, kr, phys_size);
1341 failures += fail;
1342
1343 if (failures) {
1344 panic("%s: FAIL (failures=%d)", __FUNCTION__, failures);
1345 }
1346 printf("%s: PASS\n", __FUNCTION__);
1347 }
1348
1349 #define PTR_UPPER_SHIFT 60
1350 #define PTR_TAG_SHIFT 56
1351 #define PTR_BITS_MASK (((1ULL << PTR_TAG_SHIFT) - 1) | (0xfULL << PTR_UPPER_SHIFT))
1352
1353
1354 __attribute__((noinline))
1355 static void
vm_test_address_canonicalization(void)1356 vm_test_address_canonicalization(void)
1357 {
1358 T_SKIP("System not designed to support this test, skipping...");
1359 }
1360
1361
1362 kern_return_t
vm_tests(void)1363 vm_tests(void)
1364 {
1365 kern_return_t kr = KERN_SUCCESS;
1366
1367 /* Avoid VM panics because some of our test vm_maps don't have a pmap. */
1368 thread_test_context_t ctx CLEANUP_THREAD_TEST_CONTEXT = {
1369 .test_option_vm_map_allow_null_pmap = true,
1370 };
1371 thread_set_test_context(&ctx);
1372
1373 vm_test_collapse_compressor();
1374 vm_test_wire_and_extract();
1375 vm_test_page_wire_overflow_panic();
1376 vm_test_kernel_object_fault();
1377 vm_test_device_pager_transpose();
1378 #if MACH_ASSERT
1379 vm_test_map_copy_adjust_to_target();
1380 #endif /* MACH_ASSERT */
1381 #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT
1382 vm_test_4k();
1383 #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */
1384 #if __arm64__ && !KASAN
1385 vm_test_per_mapping_internal_accounting();
1386 #endif /* __arm64__ && !KASAN */
1387 vm_test_kernel_tag_accounting();
1388 vm_test_collapse_overflow();
1389 vm_test_physical_size_overflow();
1390 vm_test_address_canonicalization();
1391
1392 return kr;
1393 }
1394
1395 /*
1396 * Checks that vm_map_delete() can deal with map unaligned entries.
1397 * rdar://88969652
1398 */
1399 static int
vm_map_non_aligned_test(__unused int64_t in,int64_t * out)1400 vm_map_non_aligned_test(__unused int64_t in, int64_t *out)
1401 {
1402 vm_map_t map = current_map();
1403 mach_vm_size_t size = 2 * VM_MAP_PAGE_SIZE(map);
1404 mach_vm_address_t addr;
1405 vm_map_entry_t entry;
1406 kern_return_t kr;
1407
1408 if (VM_MAP_PAGE_SHIFT(map) > PAGE_SHIFT) {
1409 kr = mach_vm_allocate(map, &addr, size, VM_FLAGS_ANYWHERE);
1410 if (kr != KERN_SUCCESS) {
1411 return ENOMEM;
1412 }
1413
1414 vm_map_lock(map);
1415 if (!vm_map_lookup_entry(map, addr, &entry)) {
1416 panic("couldn't find the entry we just made: "
1417 "map:%p addr:0x%0llx", map, addr);
1418 }
1419
1420 /*
1421 * Now break the entry into:
1422 * 2 * 4k
1423 * 2 * 4k
1424 * 1 * 16k
1425 */
1426 vm_map_clip_end(map, entry, addr + VM_MAP_PAGE_SIZE(map));
1427 entry->map_aligned = FALSE;
1428 vm_map_clip_end(map, entry, addr + PAGE_SIZE * 2);
1429 vm_map_unlock(map);
1430
1431 kr = mach_vm_deallocate(map, addr, size);
1432 assert(kr == KERN_SUCCESS);
1433 }
1434
1435 *out = 1;
1436 return 0;
1437 }
1438 SYSCTL_TEST_REGISTER(vm_map_non_aligned, vm_map_non_aligned_test);
1439
1440 static inline vm_map_t
create_map(mach_vm_address_t map_start,mach_vm_address_t map_end)1441 create_map(mach_vm_address_t map_start, mach_vm_address_t map_end)
1442 {
1443 ledger_t ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1444 pmap_t pmap = pmap_create_options(ledger, 0, PMAP_CREATE_64BIT);
1445 assert(pmap);
1446 ledger_dereference(ledger); // now retained by pmap
1447 vm_map_t map = vm_map_create_options(pmap, map_start, map_end, VM_MAP_CREATE_PAGEABLE);//vm_compute_max_offset
1448 assert(map);
1449
1450 #if CONFIG_SPTM
1451 /* Ensure the map serial looks fine */
1452 if (map->serial_id != pmap->associated_vm_map_serial_id) {
1453 panic("Expected a map and its pmap to have exactly the same serial");
1454 }
1455 #endif /* CONFIG_SPTM */
1456
1457 return map;
1458 }
1459
1460 static inline void
cleanup_map(vm_map_t * map)1461 cleanup_map(vm_map_t *map)
1462 {
1463 assert(*map);
1464 kern_return_t kr = vm_map_terminate(*map);
1465 assert(kr == 0);
1466 vm_map_deallocate(*map); // also destroys pmap
1467 }
1468
1469 kern_return_t
1470 mach_vm_remap_new_external(
1471 vm_map_t target_map,
1472 mach_vm_offset_ut *address,
1473 mach_vm_size_ut size,
1474 mach_vm_offset_ut mask,
1475 int flags,
1476 mach_port_t src_tport,
1477 mach_vm_offset_ut memory_address,
1478 boolean_t copy,
1479 vm_prot_ut *cur_protection_u,
1480 vm_prot_ut *max_protection_u,
1481 vm_inherit_ut inheritance);
1482 kern_return_t
1483 vm_remap_new_external(
1484 vm_map_t target_map,
1485 vm_offset_ut *address,
1486 vm_size_ut size,
1487 vm_offset_ut mask,
1488 int flags,
1489 mach_port_t src_tport,
1490 vm_offset_ut memory_address,
1491 boolean_t copy,
1492 vm_prot_ut *cur_protection,
1493 vm_prot_ut *max_protection,
1494 vm_inherit_ut inheritance);
1495 kern_return_t
1496 mach_vm_remap_external(
1497 vm_map_t target_map,
1498 mach_vm_offset_ut *address,
1499 mach_vm_size_ut size,
1500 mach_vm_offset_ut mask,
1501 int flags,
1502 vm_map_t src_map,
1503 mach_vm_offset_ut memory_address,
1504 boolean_t copy,
1505 vm_prot_ut *cur_protection,
1506 vm_prot_ut *max_protection,
1507 vm_inherit_ut inheritance);
1508 kern_return_t
1509 mach_vm_map_external(
1510 vm_map_t target_map,
1511 mach_vm_offset_ut *address,
1512 mach_vm_size_ut initial_size,
1513 mach_vm_offset_ut mask,
1514 int flags,
1515 ipc_port_t port,
1516 memory_object_offset_ut offset,
1517 boolean_t copy,
1518 vm_prot_ut cur_protection,
1519 vm_prot_ut max_protection,
1520 vm_inherit_ut inheritance);
1521 kern_return_t
1522 mach_vm_wire_external(
1523 host_priv_t host_priv,
1524 vm_map_t map,
1525 mach_vm_address_ut start,
1526 mach_vm_size_ut size,
1527 vm_prot_ut access);
1528 kern_return_t
1529 mach_vm_purgable_control_external(
1530 mach_port_t target_tport,
1531 mach_vm_offset_ut address_u,
1532 vm_purgable_t control,
1533 int *state);
1534 kern_return_t
1535 vm_purgable_control_external(
1536 mach_port_t target_tport,
1537 vm_offset_ut address,
1538 vm_purgable_t control,
1539 int *state);
1540
1541 static int
vm_map_null_tests(__unused int64_t in,int64_t * out)1542 vm_map_null_tests(__unused int64_t in, int64_t *out)
1543 {
1544 kern_return_t kr;
1545
1546 mach_vm_address_t alloced_addr, throwaway_addr;
1547 mach_vm_address_ut throwaway_addr_ut;
1548 vm_address_t vm_throwaway_addr;
1549 vm_address_ut vm_throwaway_addr_ut;
1550 vm32_address_ut alloced_addr32, throwaway_addr32_u;
1551 mach_vm_size_t throwaway_size, size_16kb, read_overwrite_data_size;
1552 vm_size_t vm_size, vm_read_overwrite_data_size, vm_throwaway_size;
1553 vm_size_ut throwaway_size_ut;
1554 vm32_size_t data_size32, size32_16kb;
1555 vm32_size_ut data_size32_u, throwaway_size32_u;
1556 mach_msg_type_number_t read_data_size;
1557 mach_port_t mem_entry_result;
1558 pointer_t read_data;
1559 pointer_ut read_data_u;
1560 vm_prot_t prot_default;
1561 vm_prot_ut prot_allexec_u, prot_default_ut;
1562 vm_map_t map64, map32;
1563 vm_machine_attribute_val_t vm_throwaway_attr_val;
1564 vm_region_extended_info_data_t vm_throwaway_region_extended_info;
1565 vm_region_recurse_info_t vm_throwaway_region_recurse_info;
1566 vm_region_recurse_info_64_t vm_throwaway_region_recurse_info_64;
1567 int throwaway_state;
1568 uint32_t throwaway_depth;
1569 vm_page_info_t page_info;
1570
1571 page_info = 0;
1572 throwaway_state = VM_PURGABLE_STATE_MAX;
1573 vm_throwaway_region_recurse_info_64 = 0;
1574 vm_throwaway_region_recurse_info = 0;
1575 vm_throwaway_attr_val = MATTR_VAL_OFF;
1576
1577 map64 = create_map(0, vm_compute_max_offset(true));
1578 map32 = create_map(0, vm_compute_max_offset(false));
1579
1580 prot_allexec_u = vm_sanitize_wrap_prot(VM_PROT_ALLEXEC);
1581 prot_default_ut = vm_sanitize_wrap_prot(VM_PROT_DEFAULT);
1582 prot_default = VM_PROT_DEFAULT;
1583
1584 size_16kb = 16 * 1024;
1585 size32_16kb = (vm32_size_t) size_16kb;
1586
1587 /*
1588 * Allocate some address in the map, just so we can pass a valid looking address to functions so they don't
1589 * return before checking VM_MAP_NULL
1590 */
1591 kr = mach_vm_allocate(map64, &alloced_addr, size_16kb, VM_FLAGS_ANYWHERE);
1592 assert(kr == KERN_SUCCESS);
1593 kr = vm32_vm_allocate(map32, &alloced_addr32, size32_16kb, VM_FLAGS_ANYWHERE);
1594 assert(kr == KERN_SUCCESS);
1595
1596 /*
1597 * Call a bunch of MIG entrypoints with VM_MAP_NULL. The goal is to verify they check map != VM_MAP_NULL.
1598 * There are no requirements put on the return, so don't assert kr. Just verify no crash occurs.
1599 */
1600 throwaway_size = size_16kb;
1601 kr = _mach_make_memory_entry(VM_MAP_NULL, &throwaway_size, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1602 assert(kr != KERN_SUCCESS);
1603 throwaway_size32_u = vm32_sanitize_wrap_size(size32_16kb);
1604 kr = vm32_mach_make_memory_entry(VM_MAP_NULL, &throwaway_size32_u, alloced_addr32, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1605 assert(kr != KERN_SUCCESS);
1606 throwaway_size_ut = vm_sanitize_wrap_size(size_16kb);
1607 kr = vm32_mach_make_memory_entry_64(VM_MAP_NULL, &throwaway_size_ut, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1608 assert(kr != KERN_SUCCESS);
1609 throwaway_size = size_16kb;
1610 kr = mach_make_memory_entry_64(VM_MAP_NULL, &throwaway_size, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1611 assert(kr != KERN_SUCCESS);
1612 vm_size = size_16kb;
1613 kr = mach_make_memory_entry(VM_MAP_NULL, &vm_size, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1614 assert(kr != KERN_SUCCESS);
1615
1616 kr = mach_memory_object_memory_entry(HOST_NULL, true, size_16kb, VM_PROT_DEFAULT, MEMORY_OBJECT_NULL, &mem_entry_result);
1617 assert(kr != KERN_SUCCESS);
1618 kr = mach_memory_object_memory_entry_64(HOST_NULL, true, size_16kb, VM_PROT_DEFAULT, MEMORY_OBJECT_NULL, &mem_entry_result);
1619 assert(kr != KERN_SUCCESS);
1620
1621 throwaway_addr = alloced_addr;
1622 kr = mach_vm_allocate(VM_MAP_NULL, &throwaway_addr, size_16kb, VM_FLAGS_ANYWHERE);
1623 assert(kr != KERN_SUCCESS);
1624 throwaway_addr32_u = alloced_addr32;
1625 kr = vm32_vm_allocate(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, VM_FLAGS_ANYWHERE);
1626 assert(kr != KERN_SUCCESS);
1627 kr = vm_allocate_external(VM_MAP_NULL, &vm_throwaway_addr, size_16kb, VM_FLAGS_ANYWHERE);
1628 assert(kr != KERN_SUCCESS);
1629
1630 kr = mach_vm_deallocate(VM_MAP_NULL, alloced_addr, size_16kb);
1631 assert(kr != KERN_SUCCESS);
1632 kr = vm_deallocate(VM_MAP_NULL, alloced_addr, size_16kb);
1633 assert(kr != KERN_SUCCESS);
1634 kr = vm32_vm_deallocate(VM_MAP_NULL, throwaway_addr32_u, size32_16kb);
1635 assert(kr != KERN_SUCCESS);
1636
1637 kr = mach_vm_map(VM_MAP_NULL, &throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1638 assert(kr != KERN_SUCCESS);
1639 kr = mach_vm_map_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1640 assert(kr != KERN_SUCCESS);
1641
1642 vm_throwaway_addr = alloced_addr;
1643 kr = vm_map(VM_MAP_NULL, &vm_throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1644 assert(kr != KERN_SUCCESS);
1645 kr = vm32_vm_map(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1646 assert(kr != KERN_SUCCESS);
1647 kr = vm32_vm_map_64(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1648 assert(kr != KERN_SUCCESS);
1649
1650 kr = mach_vm_remap(map64, &throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1651 assert(kr != KERN_SUCCESS);
1652 kr = mach_vm_remap(VM_MAP_NULL, &throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, map64, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1653 assert(kr != KERN_SUCCESS);
1654 kr = mach_vm_remap_external(map64, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1655 assert(kr != KERN_SUCCESS);
1656 kr = mach_vm_remap_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, map64, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1657 assert(kr != KERN_SUCCESS);
1658 kr = vm_remap_external(map64, &vm_throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1659 assert(kr != KERN_SUCCESS);
1660 kr = vm_remap_external(VM_MAP_NULL, &vm_throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, map64, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1661 assert(kr != KERN_SUCCESS);
1662 kr = vm32_vm_remap(map32, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1663 assert(kr != KERN_SUCCESS);
1664 kr = vm32_vm_remap(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, map32, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1665 assert(kr != KERN_SUCCESS);
1666
1667 kr = mach_vm_remap_new_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1668 assert(kr != KERN_SUCCESS);
1669 kr = mach_vm_remap_new_external(map64, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1670 assert(kr != KERN_SUCCESS);
1671
1672 kr = mach_vm_remap_new_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_allexec_u, &prot_allexec_u, VM_INHERIT_DEFAULT);
1673 assert(kr != KERN_SUCCESS);
1674 kr = mach_vm_remap_new_external(map64, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_allexec_u, &prot_allexec_u, VM_INHERIT_DEFAULT);
1675 assert(kr != KERN_SUCCESS);
1676
1677 kr = vm_remap_new_external(VM_MAP_NULL, &vm_throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1678 assert(kr != KERN_SUCCESS);
1679 kr = vm_remap_new_external(map64, &vm_throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1680 assert(kr != KERN_SUCCESS);
1681
1682 kr = mach_vm_wire_external(host_priv_self(), VM_MAP_NULL, throwaway_addr_ut, size_16kb, VM_PROT_DEFAULT);
1683 assert(kr != KERN_SUCCESS);
1684 kr = mach_vm_wire_external(HOST_PRIV_NULL, map64, throwaway_addr_ut, size_16kb, VM_PROT_DEFAULT);
1685 assert(kr != KERN_SUCCESS);
1686
1687 kr = vm_wire(host_priv_self(), VM_MAP_NULL, throwaway_addr, size_16kb, VM_PROT_DEFAULT);
1688 assert(kr != KERN_SUCCESS);
1689 kr = vm_wire(HOST_PRIV_NULL, map64, throwaway_addr, size_16kb, VM_PROT_DEFAULT);
1690 assert(kr != KERN_SUCCESS);
1691
1692 kr = task_wire(VM_MAP_NULL, false);
1693 assert(kr != KERN_SUCCESS);
1694 kr = vm32_task_wire(VM_MAP_NULL, false);
1695 assert(kr != KERN_SUCCESS);
1696
1697 kr = mach_vm_read(VM_MAP_NULL, alloced_addr, size_16kb, &read_data, &read_data_size);
1698 assert(kr != KERN_SUCCESS);
1699 kr = vm_read(VM_MAP_NULL, alloced_addr, size_16kb, &read_data, &read_data_size);
1700 assert(kr != KERN_SUCCESS);
1701 kr = vm32_vm_read(VM_MAP_NULL, alloced_addr32, size32_16kb, &read_data_u, &data_size32);
1702 assert(kr != KERN_SUCCESS);
1703
1704 mach_vm_read_entry_t * mach_re = kalloc_type(mach_vm_read_entry_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1705 (*mach_re)[0].address = alloced_addr;
1706 (*mach_re)[0].size = size_16kb;
1707
1708 vm_read_entry_t * re = kalloc_type(vm_read_entry_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1709 (*re)[0].address = alloced_addr;
1710 (*re)[0].size = (vm_size_t) size_16kb;
1711
1712 vm32_read_entry_t * re_32 = kalloc_type(vm32_read_entry_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1713 (*re_32)[0].address = (vm32_address_t) alloced_addr;
1714 (*re_32)[0].size = (vm32_size_t) size_16kb;
1715
1716 kr = mach_vm_read_list(VM_MAP_NULL, *mach_re, 1);
1717 assert(kr != KERN_SUCCESS);
1718 kr = vm_read_list(VM_MAP_NULL, *re, 1);
1719 assert(kr != KERN_SUCCESS);
1720 kr = vm32_vm_read_list(VM_MAP_NULL, *re_32, 1);
1721 assert(kr != KERN_SUCCESS);
1722
1723 kfree_type(mach_vm_read_entry_t, mach_re);
1724 kfree_type(vm_read_entry_t, re);
1725 kfree_type(vm32_read_entry_t, re_32);
1726
1727 kr = mach_vm_read_overwrite(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr, &read_overwrite_data_size);
1728 assert(kr != KERN_SUCCESS);
1729 kr = vm_read_overwrite(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr, &vm_read_overwrite_data_size);
1730 assert(kr != KERN_SUCCESS);
1731 kr = vm32_vm_read_overwrite(VM_MAP_NULL, alloced_addr32, size32_16kb, alloced_addr32, &data_size32_u);
1732 assert(kr != KERN_SUCCESS);
1733
1734 kr = mach_vm_copy(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr);
1735 assert(kr != KERN_SUCCESS);
1736 kr = vm_copy(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr);
1737 assert(kr != KERN_SUCCESS);
1738 kr = vm32_vm_copy(VM_MAP_NULL, alloced_addr32, size32_16kb, alloced_addr32);
1739 assert(kr != KERN_SUCCESS);
1740
1741 kr = mach_vm_write(VM_MAP_NULL, alloced_addr, alloced_addr, (mach_msg_type_number_t) size_16kb);
1742 assert(kr != KERN_SUCCESS);
1743 kr = vm_write(VM_MAP_NULL, alloced_addr, alloced_addr, (mach_msg_type_number_t) size_16kb);
1744 assert(kr != KERN_SUCCESS);
1745 kr = vm32_vm_write(VM_MAP_NULL, alloced_addr32, alloced_addr, (mach_msg_type_number_t) size_16kb);
1746 assert(kr != KERN_SUCCESS);
1747
1748 kr = mach_vm_inherit(VM_MAP_NULL, alloced_addr, size_16kb, VM_INHERIT_DEFAULT);
1749 assert(kr != KERN_SUCCESS);
1750 kr = vm_inherit(VM_MAP_NULL, alloced_addr, size_16kb, VM_INHERIT_DEFAULT);
1751 assert(kr != KERN_SUCCESS);
1752 kr = vm32_vm_inherit(VM_MAP_NULL, alloced_addr32, size32_16kb, VM_INHERIT_DEFAULT);
1753
1754 kr = mach_vm_protect(VM_MAP_NULL, alloced_addr, size_16kb, FALSE, VM_PROT_DEFAULT);
1755 assert(kr != KERN_SUCCESS);
1756 kr = vm_protect(VM_MAP_NULL, alloced_addr, size_16kb, FALSE, VM_PROT_DEFAULT);
1757 assert(kr != KERN_SUCCESS);
1758 kr = vm32_vm_protect(VM_MAP_NULL, alloced_addr32, size32_16kb, FALSE, VM_PROT_DEFAULT);
1759 assert(kr != KERN_SUCCESS);
1760
1761 kr = mach_vm_behavior_set(VM_MAP_NULL, alloced_addr, size_16kb, VM_BEHAVIOR_DEFAULT);
1762 assert(kr != KERN_SUCCESS);
1763 kr = vm_behavior_set(VM_MAP_NULL, alloced_addr, size_16kb, VM_BEHAVIOR_DEFAULT);
1764 assert(kr != KERN_SUCCESS);
1765 kr = vm32_vm_behavior_set(VM_MAP_NULL, alloced_addr32, size32_16kb, VM_BEHAVIOR_DEFAULT);
1766 assert(kr != KERN_SUCCESS);
1767
1768 kr = mach_vm_msync(VM_MAP_NULL, alloced_addr, size_16kb, VM_SYNC_ASYNCHRONOUS);
1769 assert(kr != KERN_SUCCESS);
1770 kr = vm_msync(VM_MAP_NULL, alloced_addr, size_16kb, VM_SYNC_ASYNCHRONOUS);
1771 assert(kr != KERN_SUCCESS);
1772 kr = vm32_vm_msync(VM_MAP_NULL, alloced_addr32, size32_16kb, VM_SYNC_ASYNCHRONOUS);
1773 assert(kr != KERN_SUCCESS);
1774
1775 kr = mach_vm_machine_attribute(VM_MAP_NULL, alloced_addr, size_16kb, MATTR_CACHE, &vm_throwaway_attr_val);
1776 assert(kr != KERN_SUCCESS);
1777 kr = vm_machine_attribute(VM_MAP_NULL, alloced_addr, size_16kb, MATTR_CACHE, &vm_throwaway_attr_val);
1778 assert(kr != KERN_SUCCESS);
1779 kr = vm32_vm_machine_attribute(VM_MAP_NULL, alloced_addr32, size32_16kb, MATTR_CACHE, &vm_throwaway_attr_val);
1780 assert(kr != KERN_SUCCESS);
1781
1782 kr = mach_vm_purgable_control_external(MACH_PORT_NULL, throwaway_addr_ut, VM_PURGABLE_PURGE_ALL, &throwaway_state);
1783 assert(kr != KERN_SUCCESS);
1784 kr = vm_purgable_control_external(MACH_PORT_NULL, throwaway_addr_ut, VM_PURGABLE_PURGE_ALL, &throwaway_state);
1785 assert(kr != KERN_SUCCESS);
1786 kr = vm32_vm_purgable_control(VM_MAP_NULL, alloced_addr32, VM_PURGABLE_PURGE_ALL, &throwaway_state);
1787 assert(kr != KERN_SUCCESS);
1788
1789 kr = mach_vm_region(VM_MAP_NULL, &throwaway_addr, &throwaway_size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1790 assert(kr != KERN_SUCCESS);
1791 kr = vm_region(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1792 assert(kr != KERN_SUCCESS);
1793 kr = vm_region_64(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1794 assert(kr != KERN_SUCCESS);
1795 kr = vm32_vm_region(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1796 assert(kr != KERN_SUCCESS);
1797 kr = vm32_vm_region_64(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1798 assert(kr != KERN_SUCCESS);
1799
1800 kr = mach_vm_region_recurse(VM_MAP_NULL, &throwaway_addr, &throwaway_size, &throwaway_depth, vm_throwaway_region_recurse_info, &read_data_size);
1801 assert(kr != KERN_SUCCESS);
1802 kr = vm_region_recurse(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, &throwaway_depth, vm_throwaway_region_recurse_info, &read_data_size);
1803 assert(kr != KERN_SUCCESS);
1804 kr = vm_region_recurse_64(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, &throwaway_depth, vm_throwaway_region_recurse_info_64, &read_data_size);
1805 assert(kr != KERN_SUCCESS);
1806 kr = vm32_vm_region_recurse(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, &throwaway_depth, vm_throwaway_region_recurse_info, &read_data_size);
1807 assert(kr != KERN_SUCCESS);
1808 kr = vm32_vm_region_recurse_64(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, &throwaway_depth, vm_throwaway_region_recurse_info_64, &read_data_size);
1809 assert(kr != KERN_SUCCESS);
1810
1811 kr = mach_vm_page_info(VM_MAP_NULL, alloced_addr, VM_PAGE_INFO_BASIC, page_info, &read_data_size);
1812 assert(kr != KERN_SUCCESS);
1813 kr = mach_vm_page_query(VM_MAP_NULL, alloced_addr, &throwaway_state, &throwaway_state);
1814 assert(kr != KERN_SUCCESS);
1815 kr = vm_map_page_query(VM_MAP_NULL, vm_throwaway_addr, &throwaway_state, &throwaway_state);
1816 assert(kr != KERN_SUCCESS);
1817 kr = vm32_vm_map_page_query(VM_MAP_NULL, throwaway_addr32_u, &throwaway_state, &throwaway_state);
1818 assert(kr != KERN_SUCCESS);
1819
1820 /*
1821 * Cleanup our allocations and maps
1822 */
1823 kr = mach_vm_deallocate(map64, alloced_addr, size_16kb);
1824 assert(kr == KERN_SUCCESS);
1825 kr = vm32_vm_deallocate(map32, alloced_addr32, size32_16kb);
1826 assert(kr == KERN_SUCCESS);
1827
1828 cleanup_map(&map64);
1829 cleanup_map(&map32);
1830
1831 /*
1832 * If we made it far without crashing, the test works.
1833 */
1834
1835 *out = 1;
1836 return 0;
1837 }
1838 SYSCTL_TEST_REGISTER(vm_map_null, vm_map_null_tests);
1839
1840 #if CONFIG_PROB_GZALLOC
1841 extern vm_offset_t pgz_protect_for_testing_only(zone_t zone, vm_offset_t addr, void *fp);
1842
1843 static int
vm_memory_entry_pgz_test(__unused int64_t in,int64_t * out)1844 vm_memory_entry_pgz_test(__unused int64_t in, int64_t *out)
1845 {
1846 kern_return_t kr;
1847 ipc_port_t mem_entry_ptr;
1848 mach_vm_address_t allocation_addr = 0;
1849 vm_size_t size = PAGE_SIZE;
1850
1851 allocation_addr = (mach_vm_address_t) kalloc_data(size, Z_WAITOK);
1852 if (!allocation_addr) {
1853 *out = -1;
1854 return 0;
1855 }
1856
1857 /*
1858 * Make sure we get a pgz protected address
1859 * If we aren't already protected, try to protect it
1860 */
1861 if (!pgz_owned(allocation_addr)) {
1862 zone_id_t zid = zone_id_for_element((void *) allocation_addr, size);
1863 zone_t zone = &zone_array[zid];
1864 allocation_addr = pgz_protect_for_testing_only(zone, allocation_addr, __builtin_frame_address(0));
1865 }
1866 /*
1867 * If we still aren't protected, tell userspace to skip the test
1868 */
1869 if (!pgz_owned(allocation_addr)) {
1870 *out = 2;
1871 return 0;
1872 }
1873
1874 kr = mach_make_memory_entry(kernel_map, &size, (mach_vm_offset_t) allocation_addr, VM_PROT_READ | VM_PROT_WRITE | MAP_MEM_VM_COPY, &mem_entry_ptr, IPC_PORT_NULL);
1875 assert(kr == KERN_SUCCESS);
1876
1877 ipc_port_release(mem_entry_ptr);
1878 kfree_data(allocation_addr, size);
1879
1880 *out = 1;
1881 return 0;
1882 }
1883 #else /* CONFIG_PROB_GZALLOC */
1884 static int
vm_memory_entry_pgz_test(__unused int64_t in,int64_t * out)1885 vm_memory_entry_pgz_test(__unused int64_t in, int64_t *out)
1886 {
1887 *out = 1;
1888 return 0;
1889 }
1890 #endif /* CONFIG_PROB_GZALLOC */
1891
1892 SYSCTL_TEST_REGISTER(vm_memory_entry_pgz, vm_memory_entry_pgz_test);
1893
1894
1895 static int
vm_map_copyio_test(__unused int64_t in,int64_t * out)1896 vm_map_copyio_test(__unused int64_t in, int64_t *out)
1897 {
1898 /* Test is not supported */
1899 *out = ENOTSUP;
1900 return 0;
1901 }
1902 SYSCTL_TEST_REGISTER(vm_map_copyio, vm_map_copyio_test);
1903
1904 static int
vm_page_relocate_test(__unused int64_t in,int64_t * out)1905 vm_page_relocate_test(__unused int64_t in, int64_t *out)
1906 {
1907 /* Test is not supported */
1908 *out = ENOTSUP;
1909 return 0;
1910 }
1911 SYSCTL_TEST_REGISTER(vm_page_relocate, vm_page_relocate_test);
1912
1913 #define PAGE_SHIFT_4K 12
1914 #define PAGE_SHIFT_16K 14
1915 static int
vm_map_copy_entry_subrange_test(__unused int64_t in,int64_t * out)1916 vm_map_copy_entry_subrange_test(__unused int64_t in, int64_t *out)
1917 {
1918 mach_vm_size_t size_4kb, size_16kb;
1919 vm_map_t map_4k, map_16k;
1920 mach_vm_address_t alloced_addr, mapped_addr;
1921 mach_vm_size_t entry_size;
1922 mach_port_t entry_handle;
1923 mach_vm_size_t mapped_size;
1924 vm_region_basic_info_data_64_t region_info;
1925 mach_msg_type_number_t region_info_count;
1926
1927 kern_return_t kr;
1928
1929 size_4kb = 4 * 1024;
1930 size_16kb = 16 * 1024;
1931
1932 map_4k = create_map(0, vm_compute_max_offset(true));
1933 kr = vm_map_set_page_shift(map_4k, PAGE_SHIFT_4K);
1934 map_16k = create_map(0, vm_compute_max_offset(true));
1935 kr = vm_map_set_page_shift(map_16k, PAGE_SHIFT_16K);
1936
1937 /*
1938 * Test mapping a portion of a copy entry from a 4k map to a 16k one.
1939 * The result size should be aligned to the destination's page size (16k).
1940 */
1941 // Get a copy entry to map into the system
1942 kr = mach_vm_allocate(map_4k, &alloced_addr, size_16kb, VM_FLAGS_ANYWHERE);
1943 assert(kr == KERN_SUCCESS);
1944
1945 entry_size = size_16kb;
1946 kr = mach_make_memory_entry_64(map_4k, &entry_size, alloced_addr,
1947 MAP_MEM_VM_COPY | MAP_MEM_USE_DATA_ADDR | VM_PROT_DEFAULT,
1948 &entry_handle, MACH_PORT_NULL);
1949 assert(kr == KERN_SUCCESS);
1950 assert(entry_size == size_16kb);
1951
1952 // Attempt to map a portion of the entry into the 16k map
1953 kr = mach_vm_map(map_16k, &mapped_addr, size_4kb, 0, VM_FLAGS_ANYWHERE,
1954 entry_handle, 0, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
1955 VM_INHERIT_DEFAULT);
1956 assert(kr == KERN_SUCCESS);
1957
1958 // Ensure the entry is actually mapped whole
1959 region_info_count = VM_REGION_BASIC_INFO_COUNT_64;
1960 kr = mach_vm_region(map_16k, &mapped_addr, &mapped_size, VM_REGION_BASIC_INFO_64,
1961 (vm_region_info_t) ®ion_info, ®ion_info_count, NULL);
1962 assert(kr == KERN_SUCCESS);
1963 assert(mapped_size == entry_size);
1964
1965 // Cleanup
1966 mach_memory_entry_port_release(entry_handle);
1967 kr = mach_vm_deallocate(map_16k, mapped_addr, size_16kb);
1968 assert(kr == KERN_SUCCESS);
1969 kr = mach_vm_deallocate(map_4k, alloced_addr, size_16kb);
1970 assert(kr == KERN_SUCCESS);
1971 cleanup_map(&map_4k);
1972 cleanup_map(&map_16k);
1973
1974 *out = 1;
1975 return 0;
1976 }
1977 SYSCTL_TEST_REGISTER(vm_map_copy_entry_subrange, vm_map_copy_entry_subrange_test);
1978
1979
1980 static int
vm_memory_entry_map_size_null_test(__unused int64_t in,int64_t * out)1981 vm_memory_entry_map_size_null_test(__unused int64_t in, int64_t *out)
1982 {
1983 mach_vm_size_t size_16kb, map_size;
1984 vm_map_t map;
1985
1986 kern_return_t kr;
1987
1988 map = create_map(0, vm_compute_max_offset(true));
1989 size_16kb = 16 * 1024;
1990
1991 map_size = 0xdeadbeef;
1992 kr = mach_memory_entry_map_size(MACH_PORT_NULL, map, 0, size_16kb, &map_size);
1993 assert(kr == KERN_INVALID_ARGUMENT);
1994 assert(map_size == 0);
1995
1996 cleanup_map(&map);
1997
1998 *out = 1;
1999 return 0;
2000 }
2001 SYSCTL_TEST_REGISTER(vm_memory_entry_map_size_null, vm_memory_entry_map_size_null_test);
2002
2003 static int
vm_memory_entry_map_size_overflow_tests(__unused int64_t in,int64_t * out)2004 vm_memory_entry_map_size_overflow_tests(__unused int64_t in, int64_t *out)
2005 {
2006 mach_vm_size_t size_16kb, entry_size, map_size;
2007 vm_map_t map;
2008 mach_port_t parent_handle, entry_handle;
2009 mach_vm_address_t alloced_addr;
2010 vm_map_offset_t entry_offset;
2011 memory_object_offset_t maximum_offset;
2012
2013 kern_return_t kr;
2014
2015 size_16kb = 16 * 1024;
2016 map = create_map(0, vm_compute_max_offset(true));
2017 /*
2018 * (1) Attempt to overflow offset + mem_entry->offset
2019 */
2020 // Setup - create an entry with nonzero offset
2021 kr = mach_memory_object_memory_entry_64((host_t) 1, 1,
2022 size_16kb * 2, VM_PROT_DEFAULT, 0, &parent_handle);
2023 assert(kr == KERN_SUCCESS);
2024
2025 entry_size = size_16kb;
2026 kr = mach_make_memory_entry_64(map, &entry_size, size_16kb,
2027 VM_PROT_DEFAULT, &entry_handle, parent_handle);
2028 assert(kr == KERN_SUCCESS);
2029
2030 // Pass in maximum offset to attempt overflow
2031 maximum_offset = (memory_object_offset_t) -1;
2032 kr = mach_memory_entry_map_size(entry_handle, map, maximum_offset, size_16kb,
2033 &map_size);
2034 assert(kr == KERN_INVALID_ARGUMENT);
2035
2036 // Cleanup
2037 mach_memory_entry_port_release(parent_handle);
2038 mach_memory_entry_port_release(entry_handle);
2039
2040 /*
2041 * (2) Attempt to overflow offset + mem_entry->data_offset
2042 */
2043 // Setup - create an entry with nonzero data_offset
2044 kr = mach_vm_allocate(map, &alloced_addr, 2 * size_16kb, VM_FLAGS_ANYWHERE);
2045 assert(kr == KERN_SUCCESS);
2046
2047 entry_size = size_16kb;
2048 entry_offset = alloced_addr + (size_16kb / 2);
2049 kr = mach_make_memory_entry_64(map, &entry_size, entry_offset,
2050 MAP_MEM_VM_COPY | MAP_MEM_USE_DATA_ADDR | VM_PROT_DEFAULT,
2051 &entry_handle, MACH_PORT_NULL);
2052 assert(kr == KERN_SUCCESS);
2053
2054 // Pass in maximum offset to attempt overflow
2055 kr = mach_memory_entry_map_size(entry_handle, map, maximum_offset, size_16kb,
2056 &map_size);
2057 assert(kr == KERN_INVALID_ARGUMENT);
2058
2059 // Cleanup
2060 mach_memory_entry_port_release(entry_handle);
2061 kr = mach_vm_deallocate(map, alloced_addr, 2 * size_16kb);
2062 assert(kr == KERN_SUCCESS);
2063 cleanup_map(&map);
2064
2065 *out = 1;
2066 return 0;
2067 }
2068 SYSCTL_TEST_REGISTER(vm_memory_entry_map_size_overflow, vm_memory_entry_map_size_overflow_tests);
2069
2070 static int
vm_memory_entry_map_size_copy_tests(__unused int64_t in,int64_t * out)2071 vm_memory_entry_map_size_copy_tests(__unused int64_t in, int64_t *out)
2072 {
2073 mach_vm_size_t size_2kb, size_4kb, size_16kb;
2074 mach_vm_size_t entry_size_4k, entry_size_16k;
2075 mach_vm_size_t map_size;
2076 vm_map_t map_4k, map_16k;
2077 mach_port_t entry_4k, entry_16k;
2078 mach_vm_address_t alloced_addr_4k, alloced_addr_16k;
2079
2080 kern_return_t kr;
2081
2082 size_2kb = 2 * 1024;
2083 size_4kb = 4 * 1024;
2084 size_16kb = 16 * 1024;
2085
2086 /*
2087 * Setup - initialize maps and create copy entries for each
2088 */
2089 // 4k map and entry
2090 map_4k = create_map(0, vm_compute_max_offset(true));
2091 kr = vm_map_set_page_shift(map_4k, PAGE_SHIFT_4K);
2092 assert(kr == KERN_SUCCESS);
2093
2094 kr = mach_vm_allocate(map_4k, &alloced_addr_4k, size_16kb, VM_FLAGS_ANYWHERE);
2095 assert(kr == KERN_SUCCESS);
2096
2097 entry_size_4k = size_16kb;
2098 kr = mach_make_memory_entry_64(map_4k, &entry_size_4k, alloced_addr_4k,
2099 MAP_MEM_VM_COPY | VM_PROT_DEFAULT, &entry_4k, MACH_PORT_NULL);
2100 assert(kr == KERN_SUCCESS);
2101 assert(entry_size_4k == size_16kb);
2102
2103 // 16k map and entry
2104 map_16k = create_map(0, vm_compute_max_offset(true));
2105 kr = vm_map_set_page_shift(map_16k, PAGE_SHIFT_16K);
2106 assert(kr == KERN_SUCCESS);
2107
2108 kr = mach_vm_allocate(map_16k, &alloced_addr_16k, size_16kb, VM_FLAGS_ANYWHERE);
2109 assert(kr == KERN_SUCCESS);
2110
2111 entry_size_16k = size_16kb;
2112 kr = mach_make_memory_entry_64(map_16k, &entry_size_16k, alloced_addr_16k,
2113 MAP_MEM_VM_COPY | VM_PROT_DEFAULT, &entry_16k, MACH_PORT_NULL);
2114 assert(kr == KERN_SUCCESS);
2115 assert(entry_size_16k == size_16kb);
2116
2117 /*
2118 * (1) Test 4k map with 4k entry and 16k map with 16k entry. Page-aligned
2119 * ranges should have no size adjustment.
2120 */
2121 for (mach_vm_size_t i = 1; i <= 4; i++) {
2122 kr = mach_memory_entry_map_size(entry_4k, map_4k, 0, i * size_4kb, &map_size);
2123 assert(kr == KERN_SUCCESS);
2124 assert(map_size == (i * size_4kb));
2125 }
2126 kr = mach_memory_entry_map_size(entry_16k, map_16k, 0, size_16kb, &map_size);
2127 assert(kr == KERN_SUCCESS);
2128 assert(map_size == size_16kb);
2129
2130 /*
2131 * (2) Test 4k map with 16k entry. Since we have a 4k map, we should be able
2132 * to map a 4k range of the entry, but to map a 2k range we will need to map
2133 * a full 4k page.
2134 */
2135 kr = mach_memory_entry_map_size(entry_16k, map_4k, 0, size_16kb, &map_size);
2136 assert(kr == KERN_SUCCESS);
2137 assert(map_size == size_16kb);
2138 kr = mach_memory_entry_map_size(entry_16k, map_4k, 0, size_4kb, &map_size);
2139 assert(kr == KERN_SUCCESS);
2140 assert(map_size == size_4kb);
2141 kr = mach_memory_entry_map_size(entry_16k, map_4k, 0, size_2kb, &map_size);
2142 assert(kr == KERN_SUCCESS);
2143 assert(map_size == size_4kb);
2144
2145 /*
2146 * (3) Test 16k map with 4k entry. Since we have a 16k map, we will need to
2147 * map the whole 16kb memory entry even if a smaller range is requested.
2148 */
2149 kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, size_16kb, &map_size);
2150 assert(kr == KERN_SUCCESS);
2151 assert(map_size == size_16kb);
2152 kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, size_4kb, &map_size);
2153 assert(kr == KERN_SUCCESS);
2154 assert(map_size == size_16kb);
2155 kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, size_2kb, &map_size);
2156 assert(kr == KERN_SUCCESS);
2157 assert(map_size == size_16kb);
2158
2159 /*
2160 * (4) Detect error in the case where the size requested is too large.
2161 */
2162 map_size = 0xdeadbeef;
2163 kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, 2 * size_16kb, &map_size);
2164 assert(kr == KERN_INVALID_ARGUMENT);
2165 assert(map_size == 0);
2166
2167 /*
2168 * Clean up memory entries, allocations, and maps
2169 */
2170 mach_memory_entry_port_release(entry_4k);
2171 mach_memory_entry_port_release(entry_16k);
2172 kr = mach_vm_deallocate(map_4k, alloced_addr_4k, size_16kb);
2173 assert(kr == KERN_SUCCESS);
2174 kr = mach_vm_deallocate(map_16k, alloced_addr_16k, size_16kb);
2175 assert(kr == KERN_SUCCESS);
2176 cleanup_map(&map_4k);
2177 cleanup_map(&map_16k);
2178
2179 *out = 1;
2180 return 0;
2181 }
2182 SYSCTL_TEST_REGISTER(vm_memory_entry_map_size_copy, vm_memory_entry_map_size_copy_tests);
2183
2184 static int
vm_memory_entry_parent_submap_tests(__unused int64_t in,int64_t * out)2185 vm_memory_entry_parent_submap_tests(__unused int64_t in, int64_t *out)
2186 {
2187 vm_shared_region_t shared_region;
2188 mach_port_t parent_handle, entry_handle;
2189 vm_named_entry_t parent_entry;
2190 mach_vm_size_t entry_size;
2191 vm_prot_t vmflags;
2192
2193 kern_return_t kr;
2194
2195 /*
2196 * Use shared region to get a named_entry which refers to a submap
2197 */
2198 shared_region = vm_shared_region_get(current_task());
2199 parent_handle = shared_region->sr_mem_entry;
2200 assert(parent_handle != NULL);
2201 parent_entry = mach_memory_entry_from_port(parent_handle);
2202 assert(parent_entry->is_sub_map);
2203
2204 /*
2205 * We should be able to create an entry using the submap entry as the parent
2206 */
2207 entry_size = parent_entry->size;
2208 vmflags = VM_PROT_DEFAULT;
2209 kr = mach_make_memory_entry_64(VM_MAP_NULL, &entry_size, 0, vmflags,
2210 &entry_handle, parent_handle);
2211 assert(kr == KERN_SUCCESS);
2212 mach_memory_entry_port_release(entry_handle);
2213
2214 /*
2215 * Should fail if using mach_make_memory_entry_mem_only since the parent
2216 * entry is not an object
2217 */
2218 vmflags |= MAP_MEM_ONLY;
2219 kr = mach_make_memory_entry_64(VM_MAP_NULL, &entry_size, 0, vmflags,
2220 &entry_handle, parent_handle);
2221 assert(kr == KERN_INVALID_ARGUMENT);
2222
2223 /*
2224 * Cleanup
2225 */
2226 vm_shared_region_deallocate(shared_region);
2227
2228 *out = 1;
2229 return 0;
2230 }
2231 SYSCTL_TEST_REGISTER(vm_memory_entry_parent_submap, vm_memory_entry_parent_submap_tests);
2232
2233 static int
vm_cpu_map_pageout_test(int64_t in,int64_t * out)2234 vm_cpu_map_pageout_test(int64_t in, int64_t *out)
2235 {
2236 /* Test is not supported */
2237 (void)in;
2238 *out = ENOTSUP;
2239 return 0;
2240 }
2241 SYSCTL_TEST_REGISTER(vm_cpu_map_pageout, vm_cpu_map_pageout_test);
2242
2243 static int
vm_get_wimg_mode(int64_t in,int64_t * out)2244 vm_get_wimg_mode(int64_t in, int64_t *out)
2245 {
2246 mach_vm_offset_t addr = (mach_vm_offset_t)in;
2247 vm_map_entry_t entry;
2248 vm_map_t map = current_map();
2249 vm_map_lock_read(map);
2250 bool map_contains_addr = vm_map_lookup_entry(map, addr, &entry);
2251 if (!map_contains_addr) {
2252 vm_map_unlock_read(map);
2253 return EINVAL;
2254 }
2255
2256 if (entry->is_sub_map) {
2257 vm_map_unlock_read(map);
2258 return ENOTSUP;
2259 }
2260
2261 vm_object_t obj = VME_OBJECT(entry);
2262 *out = obj->wimg_bits;
2263
2264 vm_map_unlock_read(map);
2265 return 0;
2266 }
2267 SYSCTL_TEST_REGISTER(vm_get_wimg_mode, vm_get_wimg_mode);
2268
2269