1 /*
2 * Copyright (c) 2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_assert.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_vm.h>
33 #include <mach/memory_object.h>
34 #include <mach/vm_map.h>
35 #include <mach/vm_statistics.h>
36 #include <mach/vm32_map_server.h>
37 #include <mach/mach_host.h>
38 #include <mach/host_priv.h>
39
40 #include <kern/ledger.h>
41 #include <kern/host.h>
42
43 #include <device/device_port.h>
44 #include <vm/memory_object_internal.h>
45 #include <vm/vm_fault.h>
46 #include <vm/vm_fault_internal.h>
47 #include <vm/vm_map_internal.h>
48 #include <vm/vm_object_internal.h>
49 #include <vm/vm_pageout_xnu.h>
50 #include <vm/vm_protos.h>
51 #include <vm/vm_memtag.h>
52 #include <vm/vm_memory_entry_xnu.h>
53 #include <vm/vm_kern_xnu.h>
54 #include <vm/vm_iokit.h>
55 #include <vm/vm_page_internal.h>
56 #include <vm/vm_shared_region_xnu.h>
57 #include <vm/vm_far.h>
58
59 #include <kern/zalloc.h>
60 #include <kern/zalloc_internal.h>
61
62 #include <sys/errno.h> /* for the sysctl tests */
63
64 #include <tests/xnupost.h> /* for testing-related functions and macros */
65
66
67 extern ledger_template_t task_ledger_template;
68
69 extern kern_return_t
70 vm_map_copy_adjust_to_target(
71 vm_map_copy_t copy_map,
72 vm_map_offset_t offset,
73 vm_map_size_t size,
74 vm_map_t target_map,
75 boolean_t copy,
76 vm_map_copy_t *target_copy_map_p,
77 vm_map_offset_t *overmap_start_p,
78 vm_map_offset_t *overmap_end_p,
79 vm_map_offset_t *trimmed_start_p);
80
81 #define VM_TEST_COLLAPSE_COMPRESSOR 0
82 #define VM_TEST_WIRE_AND_EXTRACT 0
83 #define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0
84 #if __arm64__
85 #define VM_TEST_KERNEL_OBJECT_FAULT 0
86 #endif /* __arm64__ */
87 #define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG)
88
89 #if VM_TEST_COLLAPSE_COMPRESSOR
90 extern boolean_t vm_object_collapse_compressor_allowed;
91 #include <IOKit/IOLib.h>
92 static void
vm_test_collapse_compressor(void)93 vm_test_collapse_compressor(void)
94 {
95 vm_object_size_t backing_size, top_size;
96 vm_object_t backing_object, top_object;
97 vm_map_offset_t backing_offset, top_offset;
98 unsigned char *backing_address, *top_address;
99 kern_return_t kr;
100
101 printf("VM_TEST_COLLAPSE_COMPRESSOR:\n");
102
103 /* create backing object */
104 backing_size = 15 * PAGE_SIZE;
105 backing_object = vm_object_allocate(backing_size, kernel_map->serial_id);
106 assert(backing_object != VM_OBJECT_NULL);
107 printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
108 backing_object);
109 /* map backing object */
110 backing_offset = 0;
111 kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0,
112 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
113 backing_object, 0, FALSE,
114 VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
115 assert(kr == KERN_SUCCESS);
116 backing_address = (unsigned char *) backing_offset;
117 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
118 "mapped backing object %p at 0x%llx\n",
119 backing_object, (uint64_t) backing_offset);
120 /* populate with pages to be compressed in backing object */
121 backing_address[0x1 * PAGE_SIZE] = 0xB1;
122 backing_address[0x4 * PAGE_SIZE] = 0xB4;
123 backing_address[0x7 * PAGE_SIZE] = 0xB7;
124 backing_address[0xa * PAGE_SIZE] = 0xBA;
125 backing_address[0xd * PAGE_SIZE] = 0xBD;
126 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
127 "populated pages to be compressed in "
128 "backing_object %p\n", backing_object);
129 /* compress backing object */
130 vm_object_pageout(backing_object);
131 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
132 backing_object);
133 /* wait for all the pages to be gone */
134 while (*(volatile int *)&backing_object->resident_page_count != 0) {
135 IODelay(10);
136 }
137 printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
138 backing_object);
139 /* populate with pages to be resident in backing object */
140 backing_address[0x0 * PAGE_SIZE] = 0xB0;
141 backing_address[0x3 * PAGE_SIZE] = 0xB3;
142 backing_address[0x6 * PAGE_SIZE] = 0xB6;
143 backing_address[0x9 * PAGE_SIZE] = 0xB9;
144 backing_address[0xc * PAGE_SIZE] = 0xBC;
145 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
146 "populated pages to be resident in "
147 "backing_object %p\n", backing_object);
148 /* leave the other pages absent */
149 /* mess with the paging_offset of the backing_object */
150 assert(backing_object->paging_offset == 0);
151 backing_object->paging_offset = 3 * PAGE_SIZE;
152
153 /* create top object */
154 top_size = 9 * PAGE_SIZE;
155 top_object = vm_object_allocate(top_size, backing_object->vmo_provenance);
156 assert(top_object != VM_OBJECT_NULL);
157 printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
158 top_object);
159 /* map top object */
160 top_offset = 0;
161 kr = vm_map_enter(kernel_map, &top_offset, top_size, 0,
162 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
163 top_object, 0, FALSE,
164 VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
165 assert(kr == KERN_SUCCESS);
166 top_address = (unsigned char *) top_offset;
167 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
168 "mapped top object %p at 0x%llx\n",
169 top_object, (uint64_t) top_offset);
170 /* populate with pages to be compressed in top object */
171 top_address[0x3 * PAGE_SIZE] = 0xA3;
172 top_address[0x4 * PAGE_SIZE] = 0xA4;
173 top_address[0x5 * PAGE_SIZE] = 0xA5;
174 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
175 "populated pages to be compressed in "
176 "top_object %p\n", top_object);
177 /* compress top object */
178 vm_object_pageout(top_object);
179 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
180 top_object);
181 /* wait for all the pages to be gone */
182 while (top_object->resident_page_count != 0) {
183 IODelay(10);
184 }
185 printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
186 top_object);
187 /* populate with pages to be resident in top object */
188 top_address[0x0 * PAGE_SIZE] = 0xA0;
189 top_address[0x1 * PAGE_SIZE] = 0xA1;
190 top_address[0x2 * PAGE_SIZE] = 0xA2;
191 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
192 "populated pages to be resident in "
193 "top_object %p\n", top_object);
194 /* leave the other pages absent */
195
196 /* link the 2 objects */
197 vm_object_reference(backing_object);
198 top_object->shadow = backing_object;
199 top_object->vo_shadow_offset = 3 * PAGE_SIZE;
200 printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
201 top_object, backing_object);
202
203 /* unmap backing object */
204 vm_map_remove(kernel_map,
205 backing_offset,
206 backing_offset + backing_size,
207 VM_MAP_REMOVE_NO_FLAGS);
208 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
209 "unmapped backing_object %p [0x%llx:0x%llx]\n",
210 backing_object,
211 (uint64_t) backing_offset,
212 (uint64_t) (backing_offset + backing_size));
213
214 /* collapse */
215 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object);
216 vm_object_lock(top_object);
217 vm_object_collapse(top_object, 0, FALSE);
218 vm_object_unlock(top_object);
219 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object);
220
221 /* did it work? */
222 if (top_object->shadow != VM_OBJECT_NULL) {
223 printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
224 printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
225 if (vm_object_collapse_compressor_allowed) {
226 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL");
227 }
228 } else {
229 /* check the contents of the mapping */
230 unsigned char expect[9] =
231 { 0xA0, 0xA1, 0xA2, /* resident in top */
232 0xA3, 0xA4, 0xA5, /* compressed in top */
233 0xB9, /* resident in backing + shadow_offset */
234 0xBD, /* compressed in backing + shadow_offset + paging_offset */
235 0x00 }; /* absent in both */
236 unsigned char actual[9];
237 unsigned int i, errors;
238
239 errors = 0;
240 for (i = 0; i < sizeof(actual); i++) {
241 actual[i] = (unsigned char) top_address[i * PAGE_SIZE];
242 if (actual[i] != expect[i]) {
243 errors++;
244 }
245 }
246 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
247 "actual [%x %x %x %x %x %x %x %x %x] "
248 "expect [%x %x %x %x %x %x %x %x %x] "
249 "%d errors\n",
250 actual[0], actual[1], actual[2], actual[3],
251 actual[4], actual[5], actual[6], actual[7],
252 actual[8],
253 expect[0], expect[1], expect[2], expect[3],
254 expect[4], expect[5], expect[6], expect[7],
255 expect[8],
256 errors);
257 if (errors) {
258 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL");
259 } else {
260 printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n");
261 }
262 }
263 }
264 #else /* VM_TEST_COLLAPSE_COMPRESSOR */
265 #define vm_test_collapse_compressor()
266 #endif /* VM_TEST_COLLAPSE_COMPRESSOR */
267
268 #if VM_TEST_WIRE_AND_EXTRACT
269 extern ppnum_t vm_map_get_phys_page(vm_map_t map,
270 vm_offset_t offset);
271 static void
vm_test_wire_and_extract(void)272 vm_test_wire_and_extract(void)
273 {
274 ledger_t ledger;
275 vm_map_t user_map, wire_map;
276 mach_vm_address_t user_addr, wire_addr;
277 mach_vm_size_t user_size, wire_size;
278 mach_vm_offset_t cur_offset;
279 vm_prot_t cur_prot, max_prot;
280 ppnum_t user_ppnum, wire_ppnum;
281 kern_return_t kr;
282
283 ledger = ledger_instantiate(task_ledger_template,
284 LEDGER_CREATE_ACTIVE_ENTRIES);
285 pmap_t user_pmap = pmap_create_options(ledger, 0, PMAP_CREATE_64BIT);
286 assert(user_pmap);
287 user_map = vm_map_create_options(user_pmap,
288 0x100000000ULL,
289 0x200000000ULL,
290 VM_MAP_CREATE_PAGEABLE);
291 wire_map = vm_map_create_options(NULL,
292 0x100000000ULL,
293 0x200000000ULL,
294 VM_MAP_CREATE_PAGEABLE);
295 user_addr = 0;
296 user_size = 0x10000;
297 kr = mach_vm_allocate(user_map,
298 &user_addr,
299 user_size,
300 VM_FLAGS_ANYWHERE);
301 assert(kr == KERN_SUCCESS);
302 wire_addr = 0;
303 wire_size = user_size;
304 kr = mach_vm_remap(wire_map,
305 &wire_addr,
306 wire_size,
307 0,
308 VM_FLAGS_ANYWHERE,
309 user_map,
310 user_addr,
311 FALSE,
312 &cur_prot,
313 &max_prot,
314 VM_INHERIT_NONE);
315 assert(kr == KERN_SUCCESS);
316 for (cur_offset = 0;
317 cur_offset < wire_size;
318 cur_offset += PAGE_SIZE) {
319 kr = vm_map_wire_and_extract(wire_map,
320 wire_addr + cur_offset,
321 VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
322 TRUE,
323 &wire_ppnum);
324 assert(kr == KERN_SUCCESS);
325 user_ppnum = vm_map_get_phys_page(user_map,
326 user_addr + cur_offset);
327 printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x "
328 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
329 kr,
330 user_map, user_addr + cur_offset, user_ppnum,
331 wire_map, wire_addr + cur_offset, wire_ppnum);
332 if (kr != KERN_SUCCESS ||
333 wire_ppnum == 0 ||
334 wire_ppnum != user_ppnum) {
335 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL");
336 }
337 }
338 cur_offset -= PAGE_SIZE;
339 kr = vm_map_wire_and_extract(wire_map,
340 wire_addr + cur_offset,
341 VM_PROT_DEFAULT,
342 TRUE,
343 &wire_ppnum);
344 assert(kr == KERN_SUCCESS);
345 printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
346 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
347 kr,
348 user_map, user_addr + cur_offset, user_ppnum,
349 wire_map, wire_addr + cur_offset, wire_ppnum);
350 if (kr != KERN_SUCCESS ||
351 wire_ppnum == 0 ||
352 wire_ppnum != user_ppnum) {
353 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL");
354 }
355
356 printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n");
357 }
358 #else /* VM_TEST_WIRE_AND_EXTRACT */
359 #define vm_test_wire_and_extract()
360 #endif /* VM_TEST_WIRE_AND_EXTRACT */
361
362 #if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC
363 static void
vm_test_page_wire_overflow_panic(void)364 vm_test_page_wire_overflow_panic(void)
365 {
366 vm_object_t object;
367 vm_page_t page;
368
369 printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n");
370
371 object = vm_object_allocate(PAGE_SIZE, VM_MAP_SERIAL_NONE);
372 vm_object_lock(object);
373 page = vm_page_alloc(object, 0x0);
374 vm_page_lock_queues();
375 do {
376 vm_page_wire(page, 1, FALSE);
377 } while (page->wire_count != 0);
378 vm_page_unlock_queues();
379 vm_object_unlock(object);
380 panic("FBDP(%p,%p): wire_count overflow not detected",
381 object, page);
382 }
383 #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
384 #define vm_test_page_wire_overflow_panic()
385 #endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
386
387 #if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT
388 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
389 static void
vm_test_kernel_object_fault(void)390 vm_test_kernel_object_fault(void)
391 {
392 vm_offset_t stack;
393 uintptr_t frameb[2];
394 int ret;
395
396 kmem_alloc(kernel_map, &stack,
397 kernel_stack_size + ptoa(2),
398 KMA_NOFAIL | KMA_KSTACK | KMA_KOBJECT |
399 KMA_GUARD_FIRST | KMA_GUARD_LAST,
400 VM_KERN_MEMORY_STACK);
401
402 ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE);
403 if (ret != 0) {
404 printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n");
405 } else {
406 printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n");
407 }
408
409 kmem_free_guard(kernel_map, stack, kernel_stack_size + ptoa(2),
410 KMF_GUARD_FIRST | KMF_GUARD_LAST, KMEM_GUARD_NONE);
411 stack = 0;
412 }
413 #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
414 #define vm_test_kernel_object_fault()
415 #endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
416
417 #if VM_TEST_DEVICE_PAGER_TRANSPOSE
418 static void
vm_test_device_pager_transpose(void)419 vm_test_device_pager_transpose(void)
420 {
421 memory_object_t device_pager;
422 vm_object_t anon_object, device_object;
423 vm_size_t size;
424 vm_map_offset_t device_mapping;
425 kern_return_t kr;
426
427 size = 3 * PAGE_SIZE;
428 anon_object = vm_object_allocate(size, kernel_map->serial_id);
429 assert(anon_object != VM_OBJECT_NULL);
430 device_pager = device_pager_setup(NULL, 0, size, 0);
431 assert(device_pager != NULL);
432 device_object = memory_object_to_vm_object(device_pager);
433 assert(device_object != VM_OBJECT_NULL);
434 #if 0
435 /*
436 * Can't actually map this, since another thread might do a
437 * vm_map_enter() that gets coalesced into this object, which
438 * would cause the test to fail.
439 */
440 vm_map_offset_t anon_mapping = 0;
441 kr = vm_map_enter(kernel_map, &anon_mapping, size, 0,
442 VM_MAP_KERNEL_FLAGS_ANYWHERE(),
443 anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
444 VM_INHERIT_DEFAULT);
445 assert(kr == KERN_SUCCESS);
446 #endif
447 device_mapping = 0;
448 kr = mach_vm_map_kernel(kernel_map,
449 vm_sanitize_wrap_addr_ref(&device_mapping),
450 size,
451 0,
452 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
453 (void *)device_pager,
454 0,
455 FALSE,
456 VM_PROT_DEFAULT,
457 VM_PROT_ALL,
458 VM_INHERIT_DEFAULT);
459 assert(kr == KERN_SUCCESS);
460 memory_object_deallocate(device_pager);
461
462 vm_object_lock(anon_object);
463 vm_object_activity_begin(anon_object);
464 anon_object->blocked_access = TRUE;
465 vm_object_unlock(anon_object);
466 vm_object_lock(device_object);
467 vm_object_activity_begin(device_object);
468 device_object->blocked_access = TRUE;
469 vm_object_unlock(device_object);
470
471 assert(os_ref_get_count_raw(&anon_object->ref_count) == 1);
472 assert(!anon_object->named);
473 assert(os_ref_get_count_raw(&device_object->ref_count) == 2);
474 assert(device_object->named);
475
476 kr = vm_object_transpose(device_object, anon_object, size);
477 assert(kr == KERN_SUCCESS);
478
479 vm_object_lock(anon_object);
480 vm_object_activity_end(anon_object);
481 anon_object->blocked_access = FALSE;
482 vm_object_unlock(anon_object);
483 vm_object_lock(device_object);
484 vm_object_activity_end(device_object);
485 device_object->blocked_access = FALSE;
486 vm_object_unlock(device_object);
487
488 assert(os_ref_get_count_raw(&anon_object->ref_count) == 2);
489 assert(anon_object->named);
490 #if 0
491 kr = vm_deallocate(kernel_map, anon_mapping, size);
492 assert(kr == KERN_SUCCESS);
493 #endif
494 assert(os_ref_get_count_raw(&device_object->ref_count) == 1);
495 assert(!device_object->named);
496 kr = vm_deallocate(kernel_map, device_mapping, size);
497 assert(kr == KERN_SUCCESS);
498
499 printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n");
500 }
501 #else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
502 #define vm_test_device_pager_transpose()
503 #endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
504
505 extern kern_return_t vm_allocate_external(vm_map_t map,
506 vm_offset_t *addr,
507 vm_size_t size,
508 int flags);
509 extern kern_return_t vm_remap_external(vm_map_t target_map,
510 vm_offset_t *address,
511 vm_size_t size,
512 vm_offset_t mask,
513 int flags,
514 vm_map_t src_map,
515 vm_offset_t memory_address,
516 boolean_t copy,
517 vm_prot_t *cur_protection,
518 vm_prot_t *max_protection,
519 vm_inherit_t inheritance);
520 #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT
521 extern int debug4k_panic_on_misaligned_sharing;
522 void vm_test_4k(void);
523 void
vm_test_4k(void)524 vm_test_4k(void)
525 {
526 pmap_t test_pmap;
527 vm_map_t test_map;
528 kern_return_t kr;
529 vm_address_t expected_addr;
530 vm_address_t alloc1_addr, alloc2_addr, alloc3_addr, alloc4_addr;
531 vm_address_t alloc5_addr, dealloc_addr, remap_src_addr, remap_dst_addr;
532 vm_size_t alloc1_size, alloc2_size, alloc3_size, alloc4_size;
533 vm_size_t alloc5_size, remap_src_size;
534 vm_address_t fault_addr;
535 vm_prot_t cur_prot, max_prot;
536 int saved_debug4k_panic_on_misaligned_sharing;
537
538 printf("\n\n\nVM_TEST_4K:%d creating 4K map...\n", __LINE__);
539 test_pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT | PMAP_CREATE_FORCE_4K_PAGES);
540 assert(test_pmap != NULL);
541 test_map = vm_map_create_options(test_pmap,
542 MACH_VM_MIN_ADDRESS,
543 MACH_VM_MAX_ADDRESS,
544 VM_MAP_CREATE_PAGEABLE);
545 assert(test_map != VM_MAP_NULL);
546 vm_map_set_page_shift(test_map, FOURK_PAGE_SHIFT);
547 printf("VM_TEST_4K:%d map %p pmap %p page_size 0x%x\n", __LINE__, test_map, test_pmap, VM_MAP_PAGE_SIZE(test_map));
548
549 alloc1_addr = 0;
550 alloc1_size = 1 * FOURK_PAGE_SIZE;
551 expected_addr = 0x1000;
552 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
553 kr = vm_allocate_external(test_map,
554 &alloc1_addr,
555 alloc1_size,
556 VM_FLAGS_ANYWHERE);
557 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
558 assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr);
559 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
560 expected_addr += alloc1_size;
561
562 printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
563 kr = vm_deallocate(test_map, alloc1_addr, alloc1_size);
564 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
565 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
566
567 alloc1_addr = 0;
568 alloc1_size = 1 * FOURK_PAGE_SIZE;
569 expected_addr = 0x1000;
570 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
571 kr = vm_allocate_external(test_map,
572 &alloc1_addr,
573 alloc1_size,
574 VM_FLAGS_ANYWHERE);
575 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
576 assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr);
577 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
578 expected_addr += alloc1_size;
579
580 alloc2_addr = 0;
581 alloc2_size = 3 * FOURK_PAGE_SIZE;
582 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc2_addr, alloc2_size);
583 kr = vm_allocate_external(test_map,
584 &alloc2_addr,
585 alloc2_size,
586 VM_FLAGS_ANYWHERE);
587 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
588 assertf(alloc2_addr == expected_addr, "alloc2_addr = 0x%lx expected 0x%lx", alloc2_addr, expected_addr);
589 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc2_addr);
590 expected_addr += alloc2_size;
591
592 alloc3_addr = 0;
593 alloc3_size = 18 * FOURK_PAGE_SIZE;
594 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc3_addr, alloc3_size);
595 kr = vm_allocate_external(test_map,
596 &alloc3_addr,
597 alloc3_size,
598 VM_FLAGS_ANYWHERE);
599 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
600 assertf(alloc3_addr == expected_addr, "alloc3_addr = 0x%lx expected 0x%lx\n", alloc3_addr, expected_addr);
601 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr);
602 expected_addr += alloc3_size;
603
604 alloc4_addr = 0;
605 alloc4_size = 1 * FOURK_PAGE_SIZE;
606 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc4_addr, alloc4_size);
607 kr = vm_allocate_external(test_map,
608 &alloc4_addr,
609 alloc4_size,
610 VM_FLAGS_ANYWHERE);
611 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
612 assertf(alloc4_addr == expected_addr, "alloc4_addr = 0x%lx expected 0x%lx", alloc4_addr, expected_addr);
613 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr);
614 expected_addr += alloc4_size;
615
616 printf("VM_TEST_4K:%d vm_protect(%p, 0x%lx, 0x%lx, READ)...\n", __LINE__, test_map, alloc2_addr, (1UL * FOURK_PAGE_SIZE));
617 kr = vm_protect(test_map,
618 alloc2_addr,
619 (1UL * FOURK_PAGE_SIZE),
620 FALSE,
621 VM_PROT_READ);
622 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
623
624 for (fault_addr = alloc1_addr;
625 fault_addr < alloc4_addr + alloc4_size + (2 * FOURK_PAGE_SIZE);
626 fault_addr += FOURK_PAGE_SIZE) {
627 printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr);
628 kr = vm_fault(test_map,
629 fault_addr,
630 VM_PROT_WRITE,
631 FALSE,
632 VM_KERN_MEMORY_NONE,
633 THREAD_UNINT,
634 NULL,
635 0);
636 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
637 if (fault_addr == alloc2_addr) {
638 assertf(kr == KERN_PROTECTION_FAILURE, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_PROTECTION_FAILURE);
639 printf("VM_TEST_4K:%d read fault at 0x%lx...\n", __LINE__, fault_addr);
640 kr = vm_fault(test_map,
641 fault_addr,
642 VM_PROT_READ,
643 FALSE,
644 VM_KERN_MEMORY_NONE,
645 THREAD_UNINT,
646 NULL,
647 0);
648 assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS);
649 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
650 } else if (fault_addr >= alloc4_addr + alloc4_size) {
651 assertf(kr == KERN_INVALID_ADDRESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_INVALID_ADDRESS);
652 } else {
653 assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS);
654 }
655 }
656
657 alloc5_addr = 0;
658 alloc5_size = 7 * FOURK_PAGE_SIZE;
659 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc5_addr, alloc5_size);
660 kr = vm_allocate_external(test_map,
661 &alloc5_addr,
662 alloc5_size,
663 VM_FLAGS_ANYWHERE);
664 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
665 assertf(alloc5_addr == expected_addr, "alloc5_addr = 0x%lx expected 0x%lx", alloc5_addr, expected_addr);
666 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc5_addr);
667 expected_addr += alloc5_size;
668
669 dealloc_addr = vm_map_round_page(alloc5_addr, PAGE_SHIFT);
670 dealloc_addr += FOURK_PAGE_SIZE;
671 printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%x)...\n", __LINE__, test_map, dealloc_addr, FOURK_PAGE_SIZE);
672 kr = vm_deallocate(test_map, dealloc_addr, FOURK_PAGE_SIZE);
673 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
674 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
675
676 remap_src_addr = vm_map_round_page(alloc3_addr, PAGE_SHIFT);
677 remap_src_addr += FOURK_PAGE_SIZE;
678 remap_src_size = 2 * FOURK_PAGE_SIZE;
679 remap_dst_addr = 0;
680 printf("VM_TEST_4K:%d vm_remap(%p, 0x%lx, 0x%lx, 0x%lx, copy=0)...\n", __LINE__, test_map, remap_dst_addr, remap_src_size, remap_src_addr);
681 kr = vm_remap_external(test_map,
682 &remap_dst_addr,
683 remap_src_size,
684 0, /* mask */
685 VM_FLAGS_ANYWHERE,
686 test_map,
687 remap_src_addr,
688 FALSE, /* copy */
689 &cur_prot,
690 &max_prot,
691 VM_INHERIT_DEFAULT);
692 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
693 assertf(remap_dst_addr == expected_addr, "remap_dst_addr = 0x%lx expected 0x%lx", remap_dst_addr, expected_addr);
694 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, remap_dst_addr);
695 expected_addr += remap_src_size;
696
697 for (fault_addr = remap_dst_addr;
698 fault_addr < remap_dst_addr + remap_src_size;
699 fault_addr += 4096) {
700 printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr);
701 kr = vm_fault(test_map,
702 fault_addr,
703 VM_PROT_WRITE,
704 FALSE,
705 VM_KERN_MEMORY_NONE,
706 THREAD_UNINT,
707 NULL,
708 0);
709 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
710 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
711 }
712
713 printf("VM_TEST_4K:\n");
714 remap_dst_addr = 0;
715 remap_src_addr = alloc3_addr + 0xc000;
716 remap_src_size = 0x5000;
717 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
718 kr = vm_remap_external(kernel_map,
719 &remap_dst_addr,
720 remap_src_size,
721 0, /* mask */
722 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
723 test_map,
724 remap_src_addr,
725 FALSE, /* copy */
726 &cur_prot,
727 &max_prot,
728 VM_INHERIT_DEFAULT);
729 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
730 printf("VM_TEST_4K: -> remapped (shared) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr);
731
732 printf("VM_TEST_4K:\n");
733 remap_dst_addr = 0;
734 remap_src_addr = alloc3_addr + 0xc000;
735 remap_src_size = 0x5000;
736 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
737 kr = vm_remap_external(kernel_map,
738 &remap_dst_addr,
739 remap_src_size,
740 0, /* mask */
741 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
742 test_map,
743 remap_src_addr,
744 TRUE, /* copy */
745 &cur_prot,
746 &max_prot,
747 VM_INHERIT_DEFAULT);
748 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
749 printf("VM_TEST_4K: -> remapped (COW) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr);
750
751 printf("VM_TEST_4K:\n");
752 saved_debug4k_panic_on_misaligned_sharing = debug4k_panic_on_misaligned_sharing;
753 debug4k_panic_on_misaligned_sharing = 0;
754 remap_dst_addr = 0;
755 remap_src_addr = alloc1_addr;
756 remap_src_size = alloc1_size + alloc2_size;
757 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
758 kr = vm_remap_external(kernel_map,
759 &remap_dst_addr,
760 remap_src_size,
761 0, /* mask */
762 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
763 test_map,
764 remap_src_addr,
765 FALSE, /* copy */
766 &cur_prot,
767 &max_prot,
768 VM_INHERIT_DEFAULT);
769 assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr);
770 printf("VM_TEST_4K: -> remap (SHARED) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
771 debug4k_panic_on_misaligned_sharing = saved_debug4k_panic_on_misaligned_sharing;
772
773 printf("VM_TEST_4K:\n");
774 remap_dst_addr = 0;
775 remap_src_addr = alloc1_addr;
776 remap_src_size = alloc1_size + alloc2_size;
777 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
778 kr = vm_remap_external(kernel_map,
779 &remap_dst_addr,
780 remap_src_size,
781 0, /* mask */
782 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
783 test_map,
784 remap_src_addr,
785 TRUE, /* copy */
786 &cur_prot,
787 &max_prot,
788 VM_INHERIT_DEFAULT);
789 #if 000
790 assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr);
791 printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
792 #else /* 000 */
793 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
794 printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
795 #endif /* 000 */
796
797
798 #if 00
799 printf("VM_TEST_4K:%d vm_map_remove(%p, 0x%llx, 0x%llx)...\n", __LINE__, test_map, test_map->min_offset, test_map->max_offset);
800 vm_map_remove(test_map, test_map->min_offset, test_map->max_offset);
801 #endif
802
803 printf("VM_TEST_4K: PASS\n\n\n\n");
804 }
805 #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */
806
807 #if MACH_ASSERT
808 static void
vm_test_map_copy_adjust_to_target_one(vm_map_copy_t copy_map,vm_map_t target_map)809 vm_test_map_copy_adjust_to_target_one(
810 vm_map_copy_t copy_map,
811 vm_map_t target_map)
812 {
813 kern_return_t kr;
814 vm_map_copy_t target_copy;
815 vm_map_offset_t overmap_start, overmap_end, trimmed_start;
816
817 target_copy = VM_MAP_COPY_NULL;
818 /* size is 2 (4k) pages but range covers 3 pages */
819 kr = vm_map_copy_adjust_to_target(copy_map,
820 0x0 + 0xfff,
821 0x1002,
822 target_map,
823 FALSE,
824 &target_copy,
825 &overmap_start,
826 &overmap_end,
827 &trimmed_start);
828 assert(kr == KERN_SUCCESS);
829 assert(overmap_start == 0);
830 assert(overmap_end == 0);
831 assert(trimmed_start == 0);
832 assertf(target_copy->size == 0x3000,
833 "target_copy %p size 0x%llx\n",
834 target_copy, (uint64_t)target_copy->size);
835 vm_map_copy_discard(target_copy);
836
837 /* 1. adjust_to_target() for bad offset -> error */
838 /* 2. adjust_to_target() for bad size -> error */
839 /* 3. adjust_to_target() for the whole thing -> unchanged */
840 /* 4. adjust_to_target() to trim start by less than 1 page */
841 /* 5. adjust_to_target() to trim end by less than 1 page */
842 /* 6. adjust_to_target() to trim start and end by less than 1 page */
843 /* 7. adjust_to_target() to trim start by more than 1 page */
844 /* 8. adjust_to_target() to trim end by more than 1 page */
845 /* 9. adjust_to_target() to trim start and end by more than 1 page */
846 /* 10. adjust_to_target() to trim start by more than 1 entry */
847 /* 11. adjust_to_target() to trim start by more than 1 entry */
848 /* 12. adjust_to_target() to trim start and end by more than 1 entry */
849 /* 13. adjust_to_target() to trim start and end down to 1 entry */
850 }
851
852 static void
vm_test_map_copy_adjust_to_target(void)853 vm_test_map_copy_adjust_to_target(void)
854 {
855 kern_return_t kr;
856 vm_map_t map4k, map16k;
857 vm_object_t obj1, obj2, obj3, obj4;
858 vm_map_offset_t addr4k, addr16k;
859 vm_map_size_t size4k, size16k;
860 vm_map_copy_t copy4k, copy16k;
861 vm_prot_t curprot, maxprot;
862 vm_map_kernel_flags_t vmk_flags;
863
864 /* create a 4k map */
865 map4k = vm_map_create_options(PMAP_NULL, 0, (uint32_t)-1,
866 VM_MAP_CREATE_PAGEABLE);
867 vm_map_set_page_shift(map4k, 12);
868
869 /* create a 16k map */
870 map16k = vm_map_create_options(PMAP_NULL, 0, (uint32_t)-1,
871 VM_MAP_CREATE_PAGEABLE);
872 vm_map_set_page_shift(map16k, 14);
873
874 /* create 4 VM objects */
875 obj1 = vm_object_allocate(0x100000, map4k->serial_id);
876 obj2 = vm_object_allocate(0x100000, map4k->serial_id);
877 obj3 = vm_object_allocate(0x100000, map4k->serial_id);
878 obj4 = vm_object_allocate(0x100000, map4k->serial_id);
879
880 /* map objects in 4k map */
881 vm_object_reference(obj1);
882 addr4k = 0x1000;
883 size4k = 0x3000;
884 kr = vm_map_enter(map4k, &addr4k, size4k, 0,
885 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(), obj1, 0,
886 FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
887 VM_INHERIT_DEFAULT);
888 assert(kr == KERN_SUCCESS);
889 assert(addr4k == 0x1000);
890
891 /* map objects in 16k map */
892 vm_object_reference(obj1);
893 addr16k = 0x4000;
894 size16k = 0x8000;
895 kr = vm_map_enter(map16k, &addr16k, size16k, 0,
896 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(), obj1, 0,
897 FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
898 VM_INHERIT_DEFAULT);
899 assert(kr == KERN_SUCCESS);
900 assert(addr16k == 0x4000);
901
902 /* test for <rdar://60959809> */
903 ipc_port_t mem_entry;
904 memory_object_size_t mem_entry_size;
905 mach_vm_size_t map_size;
906 mem_entry_size = 0x1002;
907 mem_entry = IPC_PORT_NULL;
908 kr = mach_make_memory_entry_64(map16k, &mem_entry_size, addr16k + 0x2fff,
909 MAP_MEM_VM_SHARE | MAP_MEM_USE_DATA_ADDR | VM_PROT_READ,
910 &mem_entry, IPC_PORT_NULL);
911 assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr);
912 assertf(mem_entry_size == 0x5001, "mem_entry_size 0x%llx\n", (uint64_t) mem_entry_size);
913 map_size = 0;
914 kr = mach_memory_entry_map_size(mem_entry, map4k, 0, 0x1002, &map_size);
915 assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr);
916 assertf(map_size == 0x3000, "mem_entry %p map_size 0x%llx\n", mem_entry, (uint64_t)map_size);
917 mach_memory_entry_port_release(mem_entry);
918
919 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
920 vmk_flags.vmkf_remap_legacy_mode = true;
921
922 /* create 4k copy map */
923 curprot = VM_PROT_NONE;
924 maxprot = VM_PROT_NONE;
925 kr = vm_map_copy_extract(map4k, addr4k, 0x3000,
926 FALSE, ©4k, &curprot, &maxprot,
927 VM_INHERIT_DEFAULT, vmk_flags);
928 assert(kr == KERN_SUCCESS);
929 assert(copy4k->size == 0x3000);
930
931 /* create 16k copy map */
932 curprot = VM_PROT_NONE;
933 maxprot = VM_PROT_NONE;
934 kr = vm_map_copy_extract(map16k, addr16k, 0x4000,
935 FALSE, ©16k, &curprot, &maxprot,
936 VM_INHERIT_DEFAULT, vmk_flags);
937 assert(kr == KERN_SUCCESS);
938 assert(copy16k->size == 0x4000);
939
940 /* test each combination */
941 // vm_test_map_copy_adjust_to_target_one(copy4k, map4k);
942 // vm_test_map_copy_adjust_to_target_one(copy16k, map16k);
943 // vm_test_map_copy_adjust_to_target_one(copy4k, map16k);
944 vm_test_map_copy_adjust_to_target_one(copy16k, map4k);
945
946 /* assert 1 ref on 4k map */
947 assert(os_ref_get_count_raw(&map4k->map_refcnt) == 1);
948 /* release 4k map */
949 vm_map_deallocate(map4k);
950 /* assert 1 ref on 16k map */
951 assert(os_ref_get_count_raw(&map16k->map_refcnt) == 1);
952 /* release 16k map */
953 vm_map_deallocate(map16k);
954 /* deallocate copy maps */
955 vm_map_copy_discard(copy4k);
956 vm_map_copy_discard(copy16k);
957 /* assert 1 ref on all VM objects */
958 assert(os_ref_get_count_raw(&obj1->ref_count) == 1);
959 assert(os_ref_get_count_raw(&obj2->ref_count) == 1);
960 assert(os_ref_get_count_raw(&obj3->ref_count) == 1);
961 assert(os_ref_get_count_raw(&obj4->ref_count) == 1);
962 /* release all VM objects */
963 vm_object_deallocate(obj1);
964 vm_object_deallocate(obj2);
965 vm_object_deallocate(obj3);
966 vm_object_deallocate(obj4);
967 }
968 #endif /* MACH_ASSERT */
969
970 #if __arm64__ && !KASAN
971 __attribute__((noinline))
972 static void
vm_test_per_mapping_internal_accounting(void)973 vm_test_per_mapping_internal_accounting(void)
974 {
975 ledger_t ledger;
976 pmap_t user_pmap;
977 vm_map_t user_map;
978 kern_return_t kr;
979 ledger_amount_t balance;
980 mach_vm_address_t user_addr, user_remap;
981 vm_map_offset_t device_addr;
982 mach_vm_size_t user_size;
983 vm_prot_t cur_prot, max_prot;
984 upl_size_t upl_size;
985 upl_t upl;
986 unsigned int upl_count;
987 upl_control_flags_t upl_flags;
988 upl_page_info_t *pl;
989 ppnum_t ppnum;
990 vm_object_t device_object;
991 vm_map_offset_t map_start, map_end;
992 int pmap_flags;
993
994 pmap_flags = 0;
995 if (sizeof(vm_map_offset_t) == 4) {
996 map_start = 0x100000000ULL;
997 map_end = 0x200000000ULL;
998 pmap_flags |= PMAP_CREATE_64BIT;
999 } else {
1000 map_start = 0x10000000;
1001 map_end = 0x20000000;
1002 }
1003 /* create a user address space */
1004 ledger = ledger_instantiate(task_ledger_template,
1005 LEDGER_CREATE_ACTIVE_ENTRIES);
1006 assert(ledger);
1007 user_pmap = pmap_create_options(ledger, 0, pmap_flags);
1008 assert(user_pmap);
1009 user_map = vm_map_create(user_pmap,
1010 map_start,
1011 map_end,
1012 TRUE);
1013 assert(user_map);
1014 /* check ledger */
1015 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1016 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1017 assertf(balance == 0, "balance=0x%llx", balance);
1018 /* allocate 1 page in that address space */
1019 user_addr = 0;
1020 user_size = PAGE_SIZE;
1021 kr = mach_vm_allocate(user_map,
1022 &user_addr,
1023 user_size,
1024 VM_FLAGS_ANYWHERE);
1025 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1026 /* check ledger */
1027 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1028 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1029 assertf(balance == 0, "balance=0x%llx", balance);
1030 /* remap the original mapping */
1031 user_remap = 0;
1032 kr = mach_vm_remap(user_map,
1033 &user_remap,
1034 PAGE_SIZE,
1035 0,
1036 VM_FLAGS_ANYWHERE,
1037 user_map,
1038 user_addr,
1039 FALSE, /* copy */
1040 &cur_prot,
1041 &max_prot,
1042 VM_INHERIT_DEFAULT);
1043 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1044 /* check ledger */
1045 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1046 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1047 assertf(balance == 0, "balance=0x%llx", balance);
1048 /* create a UPL from the original mapping */
1049 upl_size = PAGE_SIZE;
1050 upl = NULL;
1051 upl_count = 0;
1052 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
1053 kr = vm_map_create_upl(user_map,
1054 (vm_map_offset_t)user_addr,
1055 &upl_size,
1056 &upl,
1057 NULL,
1058 &upl_count,
1059 &upl_flags,
1060 VM_KERN_MEMORY_DIAG);
1061 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1062 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1063 assert(upl_page_present(pl, 0));
1064 ppnum = upl_phys_page(pl, 0);
1065 /* check ledger */
1066 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1067 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1068 assertf(balance == 0, "balance=0x%llx", balance);
1069 device_object = vm_object_allocate(PAGE_SIZE, kernel_map->serial_id);
1070 assert(device_object);
1071 vm_object_lock(device_object);
1072 VM_OBJECT_SET_PRIVATE(device_object, TRUE);
1073 VM_OBJECT_SET_PHYS_CONTIGUOUS(device_object, TRUE);
1074 device_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
1075 vm_object_unlock(device_object);
1076 kr = vm_object_populate_with_private(device_object, 0,
1077 ppnum, PAGE_SIZE);
1078 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1079
1080 /* check ledger */
1081 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1082 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1083 assertf(balance == 0, "balance=0x%llx", balance);
1084 /* deallocate the original mapping */
1085 kr = mach_vm_deallocate(user_map, user_addr, PAGE_SIZE);
1086 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1087 /* map the device_object in the kernel */
1088 device_addr = 0;
1089 vm_object_reference(device_object);
1090 kr = vm_map_enter(kernel_map,
1091 &device_addr,
1092 PAGE_SIZE,
1093 0,
1094 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
1095 device_object,
1096 0,
1097 FALSE, /* copy */
1098 VM_PROT_DEFAULT,
1099 VM_PROT_DEFAULT,
1100 VM_INHERIT_NONE);
1101 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1102 /* access the device pager mapping */
1103 *(char *)device_addr = 'x';
1104 printf("%s:%d 0x%llx: 0x%x\n", __FUNCTION__, __LINE__, (uint64_t)device_addr, *(uint32_t *)device_addr);
1105 /* check ledger */
1106 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1107 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1108 assertf(balance == 0, "balance=0x%llx", balance);
1109 /* fault in the remap addr */
1110 kr = vm_fault(user_map, (vm_map_offset_t)user_remap, VM_PROT_READ,
1111 FALSE, 0, TRUE, NULL, 0);
1112 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1113 /* check ledger */
1114 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1115 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1116 assertf(balance == PAGE_SIZE, "balance=0x%llx", balance);
1117 /* deallocate remapping */
1118 kr = mach_vm_deallocate(user_map, user_remap, PAGE_SIZE);
1119 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1120 /* check ledger */
1121 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1122 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1123 assertf(balance == 0, "balance=0x%llx", balance);
1124 /* TODO: cleanup... */
1125 printf("%s:%d PASS\n", __FUNCTION__, __LINE__);
1126 }
1127 #endif /* __arm64__ && !KASAN */
1128
1129 static void
vm_test_kernel_tag_accounting_kma(kma_flags_t base,kma_flags_t bit)1130 vm_test_kernel_tag_accounting_kma(kma_flags_t base, kma_flags_t bit)
1131 {
1132 vm_tag_t tag = VM_KERN_MEMORY_REASON; /* unused during POST */
1133 uint64_t init_size = vm_tag_get_size(tag);
1134 __assert_only uint64_t final_size = init_size + PAGE_SIZE;
1135 vm_address_t address;
1136 kern_return_t kr;
1137
1138 /*
1139 * Test the matrix of:
1140 * - born with or without bit
1141 * - bit flipped or not
1142 * - dies with or without bit
1143 */
1144 for (uint32_t i = 0; i < 4; i++) {
1145 kma_flags_t flags1 = base | ((i & 1) ? bit : KMA_NONE);
1146 kma_flags_t flags2 = base | ((i & 2) ? bit : KMA_NONE);
1147
1148 kr = kmem_alloc(kernel_map, &address, PAGE_SIZE, flags1, tag);
1149 assert3u(kr, ==, KERN_SUCCESS);
1150
1151 if (flags1 & (KMA_VAONLY | KMA_PAGEABLE)) {
1152 assert3u(init_size, ==, vm_tag_get_size(tag));
1153 } else {
1154 assert3u(final_size, ==, vm_tag_get_size(tag));
1155 }
1156
1157 if ((flags1 ^ flags2) == KMA_VAONLY) {
1158 if (flags1 & KMA_VAONLY) {
1159 kernel_memory_populate(address, PAGE_SIZE,
1160 KMA_KOBJECT | KMA_NOFAIL, tag);
1161 } else {
1162 kernel_memory_depopulate(address, PAGE_SIZE,
1163 KMA_KOBJECT, tag);
1164 }
1165 }
1166
1167 if ((flags1 ^ flags2) == KMA_PAGEABLE) {
1168 if (flags1 & KMA_PAGEABLE) {
1169 kr = vm_map_wire_kernel(kernel_map,
1170 address, address + PAGE_SIZE,
1171 VM_PROT_DEFAULT, tag, false);
1172 assert3u(kr, ==, KERN_SUCCESS);
1173 } else {
1174 kr = vm_map_unwire(kernel_map,
1175 address, address + PAGE_SIZE, false);
1176 assert3u(kr, ==, KERN_SUCCESS);
1177 }
1178 }
1179
1180 if (flags2 & (KMA_VAONLY | KMA_PAGEABLE)) {
1181 assert3u(init_size, ==, vm_tag_get_size(tag));
1182 } else {
1183 assert3u(final_size, ==, vm_tag_get_size(tag));
1184 }
1185
1186 kmem_free(kernel_map, address, PAGE_SIZE);
1187 assert3u(init_size, ==, vm_tag_get_size(tag));
1188 }
1189 }
1190
1191 __attribute__((noinline))
1192 static void
vm_test_kernel_tag_accounting(void)1193 vm_test_kernel_tag_accounting(void)
1194 {
1195 printf("%s: test running\n", __func__);
1196
1197 printf("%s: account (KMA_KOBJECT + populate)...\n", __func__);
1198 vm_test_kernel_tag_accounting_kma(KMA_KOBJECT, KMA_VAONLY);
1199 printf("%s: PASS\n", __func__);
1200
1201 printf("%s: account (regular object + wiring)...\n", __func__);
1202 vm_test_kernel_tag_accounting_kma(KMA_NONE, KMA_PAGEABLE);
1203 printf("%s: PASS\n", __func__);
1204
1205 printf("%s: test passed\n", __func__);
1206
1207 #undef if_bit
1208 }
1209
1210 __attribute__((noinline))
1211 static void
vm_test_collapse_overflow(void)1212 vm_test_collapse_overflow(void)
1213 {
1214 vm_object_t object, backing_object;
1215 vm_object_size_t size;
1216 vm_page_t m;
1217
1218 /* create an object for which (int)(size>>PAGE_SHIFT) = 0 */
1219 size = 0x400000000000ULL;
1220 assert((int)(size >> PAGE_SHIFT) == 0);
1221 backing_object = vm_object_allocate(size + PAGE_SIZE, VM_MAP_SERIAL_NONE);
1222 assert(backing_object);
1223 vm_object_reference(backing_object);
1224 /* insert a page */
1225 m = VM_PAGE_NULL;
1226 while (m == VM_PAGE_NULL) {
1227 m = vm_page_grab();
1228 if (m == VM_PAGE_NULL) {
1229 VM_PAGE_WAIT();
1230 }
1231 }
1232 assert(m);
1233 vm_object_lock(backing_object);
1234 vm_page_insert(m, backing_object, 0);
1235 vm_object_unlock(backing_object);
1236 /* make it back another object */
1237 object = vm_object_allocate(size, VM_MAP_SERIAL_NONE);
1238 assert(object);
1239 vm_object_reference(object);
1240 object->shadow = backing_object;
1241 vm_object_reference(backing_object);
1242 /* trigger a bypass */
1243 vm_object_lock(object);
1244 vm_object_collapse(object, 0, TRUE);
1245 /* check that it did not bypass the backing object */
1246 if (object->shadow != backing_object) {
1247 panic("%s:%d FAIL\n", __FUNCTION__, __LINE__);
1248 }
1249 vm_object_unlock(object);
1250
1251 /* remove the page from the backing object */
1252 vm_object_lock(backing_object);
1253 vm_page_remove(m, TRUE);
1254 vm_object_unlock(backing_object);
1255 /* trigger a bypass */
1256 vm_object_lock(object);
1257 vm_object_collapse(object, 0, TRUE);
1258 /* check that it did bypass the backing object */
1259 if (object->shadow == backing_object) {
1260 panic("%s:%d FAIL\n", __FUNCTION__, __LINE__);
1261 }
1262 vm_page_insert(m, object, 0);
1263 vm_object_unlock(object);
1264
1265 /* cleanup */
1266 vm_object_deallocate(object);
1267 /* "backing_object" already lost its reference during the bypass */
1268 // vm_object_deallocate(backing_object);
1269
1270 printf("%s:%d PASS\n", __FUNCTION__, __LINE__);
1271 }
1272
1273 __attribute__((noinline))
1274 static void
vm_test_physical_size_overflow(void)1275 vm_test_physical_size_overflow(void)
1276 {
1277 vm_map_address_t start;
1278 mach_vm_size_t size;
1279 kern_return_t kr;
1280 mach_vm_size_t phys_size;
1281 bool fail;
1282 int failures = 0;
1283
1284 /* size == 0 */
1285 start = 0x100000;
1286 size = 0x0;
1287 kr = vm_map_range_physical_size(kernel_map,
1288 start,
1289 size,
1290 &phys_size);
1291 fail = (kr != KERN_SUCCESS || phys_size != 0);
1292 printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1293 __FUNCTION__, __LINE__,
1294 (fail ? "FAIL" : "PASS"),
1295 (uint64_t)start, size, kr, phys_size);
1296 failures += fail;
1297
1298 /* plain wraparound */
1299 start = 0x100000;
1300 size = 0xffffffffffffffff - 0x10000;
1301 kr = vm_map_range_physical_size(kernel_map,
1302 start,
1303 size,
1304 &phys_size);
1305 fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1306 printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1307 __FUNCTION__, __LINE__,
1308 (fail ? "FAIL" : "PASS"),
1309 (uint64_t)start, size, kr, phys_size);
1310 failures += fail;
1311
1312 /* wraparound after rounding */
1313 start = 0xffffffffffffff00;
1314 size = 0xf0;
1315 kr = vm_map_range_physical_size(kernel_map,
1316 start,
1317 size,
1318 &phys_size);
1319 fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1320 printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1321 __FUNCTION__, __LINE__,
1322 (fail ? "FAIL" : "PASS"),
1323 (uint64_t)start, size, kr, phys_size);
1324 failures += fail;
1325
1326 /* wraparound to start after rounding */
1327 start = 0x100000;
1328 size = 0xffffffffffffffff;
1329 kr = vm_map_range_physical_size(kernel_map,
1330 start,
1331 size,
1332 &phys_size);
1333 fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1334 printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1335 __FUNCTION__, __LINE__,
1336 (fail ? "FAIL" : "PASS"),
1337 (uint64_t)start, size, kr, phys_size);
1338 failures += fail;
1339
1340 if (failures) {
1341 panic("%s: FAIL (failures=%d)", __FUNCTION__, failures);
1342 }
1343 printf("%s: PASS\n", __FUNCTION__);
1344 }
1345
1346 #define PTR_UPPER_SHIFT 60
1347 #define PTR_TAG_SHIFT 56
1348 #define PTR_BITS_MASK (((1ULL << PTR_TAG_SHIFT) - 1) | (0xfULL << PTR_UPPER_SHIFT))
1349
1350
1351 __attribute__((noinline))
1352 static void
vm_test_address_canonicalization(void)1353 vm_test_address_canonicalization(void)
1354 {
1355 T_SKIP("System not designed to support this test, skipping...");
1356 }
1357
1358
1359 kern_return_t
vm_tests(void)1360 vm_tests(void)
1361 {
1362 kern_return_t kr = KERN_SUCCESS;
1363
1364 /* Avoid VM panics because some of our test vm_maps don't have a pmap. */
1365 thread_test_context_t ctx CLEANUP_THREAD_TEST_CONTEXT = {
1366 .test_option_vm_map_allow_null_pmap = true,
1367 };
1368 thread_set_test_context(&ctx);
1369
1370 vm_test_collapse_compressor();
1371 vm_test_wire_and_extract();
1372 vm_test_page_wire_overflow_panic();
1373 vm_test_kernel_object_fault();
1374 vm_test_device_pager_transpose();
1375 #if MACH_ASSERT
1376 vm_test_map_copy_adjust_to_target();
1377 #endif /* MACH_ASSERT */
1378 #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT
1379 vm_test_4k();
1380 #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */
1381 #if __arm64__ && !KASAN
1382 vm_test_per_mapping_internal_accounting();
1383 #endif /* __arm64__ && !KASAN */
1384 vm_test_kernel_tag_accounting();
1385 vm_test_collapse_overflow();
1386 vm_test_physical_size_overflow();
1387 vm_test_address_canonicalization();
1388
1389 return kr;
1390 }
1391
1392 /*
1393 * Checks that vm_map_delete() can deal with map unaligned entries.
1394 * rdar://88969652
1395 */
1396 static int
vm_map_non_aligned_test(__unused int64_t in,int64_t * out)1397 vm_map_non_aligned_test(__unused int64_t in, int64_t *out)
1398 {
1399 vm_map_t map = current_map();
1400 mach_vm_size_t size = 2 * VM_MAP_PAGE_SIZE(map);
1401 mach_vm_address_t addr;
1402 vm_map_entry_t entry;
1403 kern_return_t kr;
1404
1405 if (VM_MAP_PAGE_SHIFT(map) > PAGE_SHIFT) {
1406 kr = mach_vm_allocate(map, &addr, size, VM_FLAGS_ANYWHERE);
1407 if (kr != KERN_SUCCESS) {
1408 return ENOMEM;
1409 }
1410
1411 vm_map_lock(map);
1412 if (!vm_map_lookup_entry(map, addr, &entry)) {
1413 panic("couldn't find the entry we just made: "
1414 "map:%p addr:0x%0llx", map, addr);
1415 }
1416
1417 /*
1418 * Now break the entry into:
1419 * 2 * 4k
1420 * 2 * 4k
1421 * 1 * 16k
1422 */
1423 vm_map_clip_end(map, entry, addr + VM_MAP_PAGE_SIZE(map));
1424 entry->map_aligned = FALSE;
1425 vm_map_clip_end(map, entry, addr + PAGE_SIZE * 2);
1426 vm_map_unlock(map);
1427
1428 kr = mach_vm_deallocate(map, addr, size);
1429 assert(kr == KERN_SUCCESS);
1430 }
1431
1432 *out = 1;
1433 return 0;
1434 }
1435 SYSCTL_TEST_REGISTER(vm_map_non_aligned, vm_map_non_aligned_test);
1436
1437 static inline vm_map_t
create_map(mach_vm_address_t map_start,mach_vm_address_t map_end)1438 create_map(mach_vm_address_t map_start, mach_vm_address_t map_end)
1439 {
1440 ledger_t ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1441 pmap_t pmap = pmap_create_options(ledger, 0, PMAP_CREATE_64BIT);
1442 assert(pmap);
1443 ledger_dereference(ledger); // now retained by pmap
1444 vm_map_t map = vm_map_create_options(pmap, map_start, map_end, VM_MAP_CREATE_PAGEABLE);//vm_compute_max_offset
1445 assert(map);
1446
1447 #if CONFIG_SPTM
1448 /* Ensure the map serial looks fine */
1449 if (map->serial_id != pmap->associated_vm_map_serial_id) {
1450 panic("Expected a map and its pmap to have exactly the same serial");
1451 }
1452 #endif /* CONFIG_SPTM */
1453
1454 return map;
1455 }
1456
1457 static inline void
cleanup_map(vm_map_t * map)1458 cleanup_map(vm_map_t *map)
1459 {
1460 assert(*map);
1461 kern_return_t kr = vm_map_terminate(*map);
1462 assert(kr == 0);
1463 vm_map_deallocate(*map); // also destroys pmap
1464 }
1465
1466 kern_return_t
1467 mach_vm_remap_new_external(
1468 vm_map_t target_map,
1469 mach_vm_offset_ut *address,
1470 mach_vm_size_ut size,
1471 mach_vm_offset_ut mask,
1472 int flags,
1473 mach_port_t src_tport,
1474 mach_vm_offset_ut memory_address,
1475 boolean_t copy,
1476 vm_prot_ut *cur_protection_u,
1477 vm_prot_ut *max_protection_u,
1478 vm_inherit_ut inheritance);
1479 kern_return_t
1480 vm_remap_new_external(
1481 vm_map_t target_map,
1482 vm_offset_ut *address,
1483 vm_size_ut size,
1484 vm_offset_ut mask,
1485 int flags,
1486 mach_port_t src_tport,
1487 vm_offset_ut memory_address,
1488 boolean_t copy,
1489 vm_prot_ut *cur_protection,
1490 vm_prot_ut *max_protection,
1491 vm_inherit_ut inheritance);
1492 kern_return_t
1493 mach_vm_remap_external(
1494 vm_map_t target_map,
1495 mach_vm_offset_ut *address,
1496 mach_vm_size_ut size,
1497 mach_vm_offset_ut mask,
1498 int flags,
1499 vm_map_t src_map,
1500 mach_vm_offset_ut memory_address,
1501 boolean_t copy,
1502 vm_prot_ut *cur_protection,
1503 vm_prot_ut *max_protection,
1504 vm_inherit_ut inheritance);
1505 kern_return_t
1506 mach_vm_map_external(
1507 vm_map_t target_map,
1508 mach_vm_offset_ut *address,
1509 mach_vm_size_ut initial_size,
1510 mach_vm_offset_ut mask,
1511 int flags,
1512 ipc_port_t port,
1513 memory_object_offset_ut offset,
1514 boolean_t copy,
1515 vm_prot_ut cur_protection,
1516 vm_prot_ut max_protection,
1517 vm_inherit_ut inheritance);
1518 kern_return_t
1519 mach_vm_wire_external(
1520 host_priv_t host_priv,
1521 vm_map_t map,
1522 mach_vm_address_ut start,
1523 mach_vm_size_ut size,
1524 vm_prot_ut access);
1525 kern_return_t
1526 mach_vm_purgable_control_external(
1527 mach_port_t target_tport,
1528 mach_vm_offset_ut address_u,
1529 vm_purgable_t control,
1530 int *state);
1531 kern_return_t
1532 vm_purgable_control_external(
1533 mach_port_t target_tport,
1534 vm_offset_ut address,
1535 vm_purgable_t control,
1536 int *state);
1537
1538 static int
vm_map_null_tests(__unused int64_t in,int64_t * out)1539 vm_map_null_tests(__unused int64_t in, int64_t *out)
1540 {
1541 kern_return_t kr;
1542
1543 mach_vm_address_t alloced_addr, throwaway_addr;
1544 mach_vm_address_ut throwaway_addr_ut;
1545 vm_address_t vm_throwaway_addr;
1546 vm_address_ut vm_throwaway_addr_ut;
1547 vm32_address_ut alloced_addr32, throwaway_addr32_u;
1548 mach_vm_size_t throwaway_size, size_16kb, read_overwrite_data_size;
1549 vm_size_t vm_size, vm_read_overwrite_data_size, vm_throwaway_size;
1550 vm_size_ut throwaway_size_ut;
1551 vm32_size_t data_size32, size32_16kb;
1552 vm32_size_ut data_size32_u, throwaway_size32_u;
1553 mach_msg_type_number_t read_data_size;
1554 mach_port_t mem_entry_result;
1555 pointer_t read_data;
1556 pointer_ut read_data_u;
1557 vm_prot_t prot_default;
1558 vm_prot_ut prot_allexec_u, prot_default_ut;
1559 vm_map_t map64, map32;
1560 vm_machine_attribute_val_t vm_throwaway_attr_val;
1561 vm_region_extended_info_data_t vm_throwaway_region_extended_info;
1562 vm_region_recurse_info_t vm_throwaway_region_recurse_info;
1563 vm_region_recurse_info_64_t vm_throwaway_region_recurse_info_64;
1564 int throwaway_state;
1565 uint32_t throwaway_depth;
1566 vm_page_info_t page_info;
1567
1568 page_info = 0;
1569 throwaway_state = VM_PURGABLE_STATE_MAX;
1570 vm_throwaway_region_recurse_info_64 = 0;
1571 vm_throwaway_region_recurse_info = 0;
1572 vm_throwaway_attr_val = MATTR_VAL_OFF;
1573
1574 map64 = create_map(0, vm_compute_max_offset(true));
1575 map32 = create_map(0, vm_compute_max_offset(false));
1576
1577 prot_allexec_u = vm_sanitize_wrap_prot(VM_PROT_ALLEXEC);
1578 prot_default_ut = vm_sanitize_wrap_prot(VM_PROT_DEFAULT);
1579 prot_default = VM_PROT_DEFAULT;
1580
1581 size_16kb = 16 * 1024;
1582 size32_16kb = (vm32_size_t) size_16kb;
1583
1584 /*
1585 * Allocate some address in the map, just so we can pass a valid looking address to functions so they don't
1586 * return before checking VM_MAP_NULL
1587 */
1588 kr = mach_vm_allocate(map64, &alloced_addr, size_16kb, VM_FLAGS_ANYWHERE);
1589 assert(kr == KERN_SUCCESS);
1590 kr = vm32_vm_allocate(map32, &alloced_addr32, size32_16kb, VM_FLAGS_ANYWHERE);
1591 assert(kr == KERN_SUCCESS);
1592
1593 /*
1594 * Call a bunch of MIG entrypoints with VM_MAP_NULL. The goal is to verify they check map != VM_MAP_NULL.
1595 * There are no requirements put on the return, so don't assert kr. Just verify no crash occurs.
1596 */
1597 throwaway_size = size_16kb;
1598 kr = _mach_make_memory_entry(VM_MAP_NULL, &throwaway_size, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1599 assert(kr != KERN_SUCCESS);
1600 throwaway_size32_u = vm32_sanitize_wrap_size(size32_16kb);
1601 kr = vm32_mach_make_memory_entry(VM_MAP_NULL, &throwaway_size32_u, alloced_addr32, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1602 assert(kr != KERN_SUCCESS);
1603 throwaway_size_ut = vm_sanitize_wrap_size(size_16kb);
1604 kr = vm32_mach_make_memory_entry_64(VM_MAP_NULL, &throwaway_size_ut, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1605 assert(kr != KERN_SUCCESS);
1606 throwaway_size = size_16kb;
1607 kr = mach_make_memory_entry_64(VM_MAP_NULL, &throwaway_size, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1608 assert(kr != KERN_SUCCESS);
1609 vm_size = size_16kb;
1610 kr = mach_make_memory_entry(VM_MAP_NULL, &vm_size, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1611 assert(kr != KERN_SUCCESS);
1612
1613 kr = mach_memory_object_memory_entry(HOST_NULL, true, size_16kb, VM_PROT_DEFAULT, MEMORY_OBJECT_NULL, &mem_entry_result);
1614 assert(kr != KERN_SUCCESS);
1615 kr = mach_memory_object_memory_entry_64(HOST_NULL, true, size_16kb, VM_PROT_DEFAULT, MEMORY_OBJECT_NULL, &mem_entry_result);
1616 assert(kr != KERN_SUCCESS);
1617
1618 throwaway_addr = alloced_addr;
1619 kr = mach_vm_allocate(VM_MAP_NULL, &throwaway_addr, size_16kb, VM_FLAGS_ANYWHERE);
1620 assert(kr != KERN_SUCCESS);
1621 throwaway_addr32_u = alloced_addr32;
1622 kr = vm32_vm_allocate(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, VM_FLAGS_ANYWHERE);
1623 assert(kr != KERN_SUCCESS);
1624 kr = vm_allocate_external(VM_MAP_NULL, &vm_throwaway_addr, size_16kb, VM_FLAGS_ANYWHERE);
1625 assert(kr != KERN_SUCCESS);
1626
1627 kr = mach_vm_deallocate(VM_MAP_NULL, alloced_addr, size_16kb);
1628 assert(kr != KERN_SUCCESS);
1629 kr = vm_deallocate(VM_MAP_NULL, alloced_addr, size_16kb);
1630 assert(kr != KERN_SUCCESS);
1631 kr = vm32_vm_deallocate(VM_MAP_NULL, throwaway_addr32_u, size32_16kb);
1632 assert(kr != KERN_SUCCESS);
1633
1634 kr = mach_vm_map(VM_MAP_NULL, &throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1635 assert(kr != KERN_SUCCESS);
1636 kr = mach_vm_map_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1637 assert(kr != KERN_SUCCESS);
1638
1639 vm_throwaway_addr = alloced_addr;
1640 kr = vm_map(VM_MAP_NULL, &vm_throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1641 assert(kr != KERN_SUCCESS);
1642 kr = vm32_vm_map(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1643 assert(kr != KERN_SUCCESS);
1644 kr = vm32_vm_map_64(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1645 assert(kr != KERN_SUCCESS);
1646
1647 kr = mach_vm_remap(map64, &throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1648 assert(kr != KERN_SUCCESS);
1649 kr = mach_vm_remap(VM_MAP_NULL, &throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, map64, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1650 assert(kr != KERN_SUCCESS);
1651 kr = mach_vm_remap_external(map64, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1652 assert(kr != KERN_SUCCESS);
1653 kr = mach_vm_remap_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, map64, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1654 assert(kr != KERN_SUCCESS);
1655 kr = vm_remap_external(map64, &vm_throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1656 assert(kr != KERN_SUCCESS);
1657 kr = vm_remap_external(VM_MAP_NULL, &vm_throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, map64, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1658 assert(kr != KERN_SUCCESS);
1659 kr = vm32_vm_remap(map32, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1660 assert(kr != KERN_SUCCESS);
1661 kr = vm32_vm_remap(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, map32, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1662 assert(kr != KERN_SUCCESS);
1663
1664 kr = mach_vm_remap_new_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1665 assert(kr != KERN_SUCCESS);
1666 kr = mach_vm_remap_new_external(map64, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1667 assert(kr != KERN_SUCCESS);
1668
1669 kr = mach_vm_remap_new_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_allexec_u, &prot_allexec_u, VM_INHERIT_DEFAULT);
1670 assert(kr != KERN_SUCCESS);
1671 kr = mach_vm_remap_new_external(map64, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_allexec_u, &prot_allexec_u, VM_INHERIT_DEFAULT);
1672 assert(kr != KERN_SUCCESS);
1673
1674 kr = vm_remap_new_external(VM_MAP_NULL, &vm_throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1675 assert(kr != KERN_SUCCESS);
1676 kr = vm_remap_new_external(map64, &vm_throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1677 assert(kr != KERN_SUCCESS);
1678
1679 kr = mach_vm_wire_external(host_priv_self(), VM_MAP_NULL, throwaway_addr_ut, size_16kb, VM_PROT_DEFAULT);
1680 assert(kr != KERN_SUCCESS);
1681 kr = mach_vm_wire_external(HOST_PRIV_NULL, map64, throwaway_addr_ut, size_16kb, VM_PROT_DEFAULT);
1682 assert(kr != KERN_SUCCESS);
1683
1684 kr = vm_wire(host_priv_self(), VM_MAP_NULL, throwaway_addr, size_16kb, VM_PROT_DEFAULT);
1685 assert(kr != KERN_SUCCESS);
1686 kr = vm_wire(HOST_PRIV_NULL, map64, throwaway_addr, size_16kb, VM_PROT_DEFAULT);
1687 assert(kr != KERN_SUCCESS);
1688
1689 kr = task_wire(VM_MAP_NULL, false);
1690 assert(kr != KERN_SUCCESS);
1691 kr = vm32_task_wire(VM_MAP_NULL, false);
1692 assert(kr != KERN_SUCCESS);
1693
1694 kr = mach_vm_read(VM_MAP_NULL, alloced_addr, size_16kb, &read_data, &read_data_size);
1695 assert(kr != KERN_SUCCESS);
1696 kr = vm_read(VM_MAP_NULL, alloced_addr, size_16kb, &read_data, &read_data_size);
1697 assert(kr != KERN_SUCCESS);
1698 kr = vm32_vm_read(VM_MAP_NULL, alloced_addr32, size32_16kb, &read_data_u, &data_size32);
1699 assert(kr != KERN_SUCCESS);
1700
1701 mach_vm_read_entry_t * mach_re = kalloc_type(mach_vm_read_entry_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1702 (*mach_re)[0].address = alloced_addr;
1703 (*mach_re)[0].size = size_16kb;
1704
1705 vm_read_entry_t * re = kalloc_type(vm_read_entry_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1706 (*re)[0].address = alloced_addr;
1707 (*re)[0].size = (vm_size_t) size_16kb;
1708
1709 vm32_read_entry_t * re_32 = kalloc_type(vm32_read_entry_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1710 (*re_32)[0].address = (vm32_address_t) alloced_addr;
1711 (*re_32)[0].size = (vm32_size_t) size_16kb;
1712
1713 kr = mach_vm_read_list(VM_MAP_NULL, *mach_re, 1);
1714 assert(kr != KERN_SUCCESS);
1715 kr = vm_read_list(VM_MAP_NULL, *re, 1);
1716 assert(kr != KERN_SUCCESS);
1717 kr = vm32_vm_read_list(VM_MAP_NULL, *re_32, 1);
1718 assert(kr != KERN_SUCCESS);
1719
1720 kfree_type(mach_vm_read_entry_t, mach_re);
1721 kfree_type(vm_read_entry_t, re);
1722 kfree_type(vm32_read_entry_t, re_32);
1723
1724 kr = mach_vm_read_overwrite(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr, &read_overwrite_data_size);
1725 assert(kr != KERN_SUCCESS);
1726 kr = vm_read_overwrite(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr, &vm_read_overwrite_data_size);
1727 assert(kr != KERN_SUCCESS);
1728 kr = vm32_vm_read_overwrite(VM_MAP_NULL, alloced_addr32, size32_16kb, alloced_addr32, &data_size32_u);
1729 assert(kr != KERN_SUCCESS);
1730
1731 kr = mach_vm_copy(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr);
1732 assert(kr != KERN_SUCCESS);
1733 kr = vm_copy(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr);
1734 assert(kr != KERN_SUCCESS);
1735 kr = vm32_vm_copy(VM_MAP_NULL, alloced_addr32, size32_16kb, alloced_addr32);
1736 assert(kr != KERN_SUCCESS);
1737
1738 kr = mach_vm_write(VM_MAP_NULL, alloced_addr, alloced_addr, (mach_msg_type_number_t) size_16kb);
1739 assert(kr != KERN_SUCCESS);
1740 kr = vm_write(VM_MAP_NULL, alloced_addr, alloced_addr, (mach_msg_type_number_t) size_16kb);
1741 assert(kr != KERN_SUCCESS);
1742 kr = vm32_vm_write(VM_MAP_NULL, alloced_addr32, alloced_addr, (mach_msg_type_number_t) size_16kb);
1743 assert(kr != KERN_SUCCESS);
1744
1745 kr = mach_vm_inherit(VM_MAP_NULL, alloced_addr, size_16kb, VM_INHERIT_DEFAULT);
1746 assert(kr != KERN_SUCCESS);
1747 kr = vm_inherit(VM_MAP_NULL, alloced_addr, size_16kb, VM_INHERIT_DEFAULT);
1748 assert(kr != KERN_SUCCESS);
1749 kr = vm32_vm_inherit(VM_MAP_NULL, alloced_addr32, size32_16kb, VM_INHERIT_DEFAULT);
1750
1751 kr = mach_vm_protect(VM_MAP_NULL, alloced_addr, size_16kb, FALSE, VM_PROT_DEFAULT);
1752 assert(kr != KERN_SUCCESS);
1753 kr = vm_protect(VM_MAP_NULL, alloced_addr, size_16kb, FALSE, VM_PROT_DEFAULT);
1754 assert(kr != KERN_SUCCESS);
1755 kr = vm32_vm_protect(VM_MAP_NULL, alloced_addr32, size32_16kb, FALSE, VM_PROT_DEFAULT);
1756 assert(kr != KERN_SUCCESS);
1757
1758 kr = mach_vm_behavior_set(VM_MAP_NULL, alloced_addr, size_16kb, VM_BEHAVIOR_DEFAULT);
1759 assert(kr != KERN_SUCCESS);
1760 kr = vm_behavior_set(VM_MAP_NULL, alloced_addr, size_16kb, VM_BEHAVIOR_DEFAULT);
1761 assert(kr != KERN_SUCCESS);
1762 kr = vm32_vm_behavior_set(VM_MAP_NULL, alloced_addr32, size32_16kb, VM_BEHAVIOR_DEFAULT);
1763 assert(kr != KERN_SUCCESS);
1764
1765 kr = mach_vm_msync(VM_MAP_NULL, alloced_addr, size_16kb, VM_SYNC_ASYNCHRONOUS);
1766 assert(kr != KERN_SUCCESS);
1767 kr = vm_msync(VM_MAP_NULL, alloced_addr, size_16kb, VM_SYNC_ASYNCHRONOUS);
1768 assert(kr != KERN_SUCCESS);
1769 kr = vm32_vm_msync(VM_MAP_NULL, alloced_addr32, size32_16kb, VM_SYNC_ASYNCHRONOUS);
1770 assert(kr != KERN_SUCCESS);
1771
1772 kr = mach_vm_machine_attribute(VM_MAP_NULL, alloced_addr, size_16kb, MATTR_CACHE, &vm_throwaway_attr_val);
1773 assert(kr != KERN_SUCCESS);
1774 kr = vm_machine_attribute(VM_MAP_NULL, alloced_addr, size_16kb, MATTR_CACHE, &vm_throwaway_attr_val);
1775 assert(kr != KERN_SUCCESS);
1776 kr = vm32_vm_machine_attribute(VM_MAP_NULL, alloced_addr32, size32_16kb, MATTR_CACHE, &vm_throwaway_attr_val);
1777 assert(kr != KERN_SUCCESS);
1778
1779 kr = mach_vm_purgable_control_external(MACH_PORT_NULL, throwaway_addr_ut, VM_PURGABLE_PURGE_ALL, &throwaway_state);
1780 assert(kr != KERN_SUCCESS);
1781 kr = vm_purgable_control_external(MACH_PORT_NULL, throwaway_addr_ut, VM_PURGABLE_PURGE_ALL, &throwaway_state);
1782 assert(kr != KERN_SUCCESS);
1783 kr = vm32_vm_purgable_control(VM_MAP_NULL, alloced_addr32, VM_PURGABLE_PURGE_ALL, &throwaway_state);
1784 assert(kr != KERN_SUCCESS);
1785
1786 kr = mach_vm_region(VM_MAP_NULL, &throwaway_addr, &throwaway_size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1787 assert(kr != KERN_SUCCESS);
1788 kr = vm_region(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1789 assert(kr != KERN_SUCCESS);
1790 kr = vm_region_64(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1791 assert(kr != KERN_SUCCESS);
1792 kr = vm32_vm_region(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1793 assert(kr != KERN_SUCCESS);
1794 kr = vm32_vm_region_64(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1795 assert(kr != KERN_SUCCESS);
1796
1797 kr = mach_vm_region_recurse(VM_MAP_NULL, &throwaway_addr, &throwaway_size, &throwaway_depth, vm_throwaway_region_recurse_info, &read_data_size);
1798 assert(kr != KERN_SUCCESS);
1799 kr = vm_region_recurse(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, &throwaway_depth, vm_throwaway_region_recurse_info, &read_data_size);
1800 assert(kr != KERN_SUCCESS);
1801 kr = vm_region_recurse_64(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, &throwaway_depth, vm_throwaway_region_recurse_info_64, &read_data_size);
1802 assert(kr != KERN_SUCCESS);
1803 kr = vm32_vm_region_recurse(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, &throwaway_depth, vm_throwaway_region_recurse_info, &read_data_size);
1804 assert(kr != KERN_SUCCESS);
1805 kr = vm32_vm_region_recurse_64(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, &throwaway_depth, vm_throwaway_region_recurse_info_64, &read_data_size);
1806 assert(kr != KERN_SUCCESS);
1807
1808 kr = mach_vm_page_info(VM_MAP_NULL, alloced_addr, VM_PAGE_INFO_BASIC, page_info, &read_data_size);
1809 assert(kr != KERN_SUCCESS);
1810 kr = mach_vm_page_query(VM_MAP_NULL, alloced_addr, &throwaway_state, &throwaway_state);
1811 assert(kr != KERN_SUCCESS);
1812 kr = vm_map_page_query(VM_MAP_NULL, vm_throwaway_addr, &throwaway_state, &throwaway_state);
1813 assert(kr != KERN_SUCCESS);
1814 kr = vm32_vm_map_page_query(VM_MAP_NULL, throwaway_addr32_u, &throwaway_state, &throwaway_state);
1815 assert(kr != KERN_SUCCESS);
1816
1817 /*
1818 * Cleanup our allocations and maps
1819 */
1820 kr = mach_vm_deallocate(map64, alloced_addr, size_16kb);
1821 assert(kr == KERN_SUCCESS);
1822 kr = vm32_vm_deallocate(map32, alloced_addr32, size32_16kb);
1823 assert(kr == KERN_SUCCESS);
1824
1825 cleanup_map(&map64);
1826 cleanup_map(&map32);
1827
1828 /*
1829 * If we made it far without crashing, the test works.
1830 */
1831
1832 *out = 1;
1833 return 0;
1834 }
1835 SYSCTL_TEST_REGISTER(vm_map_null, vm_map_null_tests);
1836
1837 #if CONFIG_PROB_GZALLOC
1838 extern vm_offset_t pgz_protect_for_testing_only(zone_t zone, vm_offset_t addr, void *fp);
1839
1840 static int
vm_memory_entry_pgz_test(__unused int64_t in,int64_t * out)1841 vm_memory_entry_pgz_test(__unused int64_t in, int64_t *out)
1842 {
1843 kern_return_t kr;
1844 ipc_port_t mem_entry_ptr;
1845 mach_vm_address_t allocation_addr = 0;
1846 vm_size_t size = PAGE_SIZE;
1847
1848 allocation_addr = (mach_vm_address_t) kalloc_data(size, Z_WAITOK);
1849 if (!allocation_addr) {
1850 *out = -1;
1851 return 0;
1852 }
1853
1854 /*
1855 * Make sure we get a pgz protected address
1856 * If we aren't already protected, try to protect it
1857 */
1858 if (!pgz_owned(allocation_addr)) {
1859 zone_id_t zid = zone_id_for_element((void *) allocation_addr, size);
1860 zone_t zone = &zone_array[zid];
1861 allocation_addr = pgz_protect_for_testing_only(zone, allocation_addr, __builtin_frame_address(0));
1862 }
1863 /*
1864 * If we still aren't protected, tell userspace to skip the test
1865 */
1866 if (!pgz_owned(allocation_addr)) {
1867 *out = 2;
1868 return 0;
1869 }
1870
1871 kr = mach_make_memory_entry(kernel_map, &size, (mach_vm_offset_t) allocation_addr, VM_PROT_READ | VM_PROT_WRITE | MAP_MEM_VM_COPY, &mem_entry_ptr, IPC_PORT_NULL);
1872 assert(kr == KERN_SUCCESS);
1873
1874 ipc_port_release(mem_entry_ptr);
1875 kfree_data(allocation_addr, size);
1876
1877 *out = 1;
1878 return 0;
1879 }
1880 #else /* CONFIG_PROB_GZALLOC */
1881 static int
vm_memory_entry_pgz_test(__unused int64_t in,int64_t * out)1882 vm_memory_entry_pgz_test(__unused int64_t in, int64_t *out)
1883 {
1884 *out = 1;
1885 return 0;
1886 }
1887 #endif /* CONFIG_PROB_GZALLOC */
1888
1889 SYSCTL_TEST_REGISTER(vm_memory_entry_pgz, vm_memory_entry_pgz_test);
1890
1891
1892 static int
vm_map_copyio_test(__unused int64_t in,int64_t * out)1893 vm_map_copyio_test(__unused int64_t in, int64_t *out)
1894 {
1895 /* Test is not supported */
1896 *out = ENOTSUP;
1897 return 0;
1898 }
1899 SYSCTL_TEST_REGISTER(vm_map_copyio, vm_map_copyio_test);
1900
1901 static int
vm_page_relocate_test(__unused int64_t in,int64_t * out)1902 vm_page_relocate_test(__unused int64_t in, int64_t *out)
1903 {
1904 /* Test is not supported */
1905 *out = ENOTSUP;
1906 return 0;
1907 }
1908 SYSCTL_TEST_REGISTER(vm_page_relocate, vm_page_relocate_test);
1909
1910 #define PAGE_SHIFT_4K 12
1911 #define PAGE_SHIFT_16K 14
1912 static int
vm_map_copy_entry_subrange_test(__unused int64_t in,int64_t * out)1913 vm_map_copy_entry_subrange_test(__unused int64_t in, int64_t *out)
1914 {
1915 mach_vm_size_t size_4kb, size_16kb;
1916 vm_map_t map_4k, map_16k;
1917 mach_vm_address_t alloced_addr, mapped_addr;
1918 mach_vm_size_t entry_size;
1919 mach_port_t entry_handle;
1920 mach_vm_size_t mapped_size;
1921 vm_region_basic_info_data_64_t region_info;
1922 mach_msg_type_number_t region_info_count;
1923
1924 kern_return_t kr;
1925
1926 size_4kb = 4 * 1024;
1927 size_16kb = 16 * 1024;
1928
1929 map_4k = create_map(0, vm_compute_max_offset(true));
1930 kr = vm_map_set_page_shift(map_4k, PAGE_SHIFT_4K);
1931 map_16k = create_map(0, vm_compute_max_offset(true));
1932 kr = vm_map_set_page_shift(map_16k, PAGE_SHIFT_16K);
1933
1934 /*
1935 * Test mapping a portion of a copy entry from a 4k map to a 16k one.
1936 * The result size should be aligned to the destination's page size (16k).
1937 */
1938 // Get a copy entry to map into the system
1939 kr = mach_vm_allocate(map_4k, &alloced_addr, size_16kb, VM_FLAGS_ANYWHERE);
1940 assert(kr == KERN_SUCCESS);
1941
1942 entry_size = size_16kb;
1943 kr = mach_make_memory_entry_64(map_4k, &entry_size, alloced_addr,
1944 MAP_MEM_VM_COPY | MAP_MEM_USE_DATA_ADDR | VM_PROT_DEFAULT,
1945 &entry_handle, MACH_PORT_NULL);
1946 assert(kr == KERN_SUCCESS);
1947 assert(entry_size == size_16kb);
1948
1949 // Attempt to map a portion of the entry into the 16k map
1950 kr = mach_vm_map(map_16k, &mapped_addr, size_4kb, 0, VM_FLAGS_ANYWHERE,
1951 entry_handle, 0, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
1952 VM_INHERIT_DEFAULT);
1953 assert(kr == KERN_SUCCESS);
1954
1955 // Ensure the entry is actually mapped whole
1956 region_info_count = VM_REGION_BASIC_INFO_COUNT_64;
1957 kr = mach_vm_region(map_16k, &mapped_addr, &mapped_size, VM_REGION_BASIC_INFO_64,
1958 (vm_region_info_t) ®ion_info, ®ion_info_count, NULL);
1959 assert(kr == KERN_SUCCESS);
1960 assert(mapped_size == entry_size);
1961
1962 // Cleanup
1963 mach_memory_entry_port_release(entry_handle);
1964 kr = mach_vm_deallocate(map_16k, mapped_addr, size_16kb);
1965 assert(kr == KERN_SUCCESS);
1966 kr = mach_vm_deallocate(map_4k, alloced_addr, size_16kb);
1967 assert(kr == KERN_SUCCESS);
1968 cleanup_map(&map_4k);
1969 cleanup_map(&map_16k);
1970
1971 *out = 1;
1972 return 0;
1973 }
1974 SYSCTL_TEST_REGISTER(vm_map_copy_entry_subrange, vm_map_copy_entry_subrange_test);
1975
1976
1977 static int
vm_memory_entry_map_size_null_test(__unused int64_t in,int64_t * out)1978 vm_memory_entry_map_size_null_test(__unused int64_t in, int64_t *out)
1979 {
1980 mach_vm_size_t size_16kb, map_size;
1981 vm_map_t map;
1982
1983 kern_return_t kr;
1984
1985 map = create_map(0, vm_compute_max_offset(true));
1986 size_16kb = 16 * 1024;
1987
1988 map_size = 0xdeadbeef;
1989 kr = mach_memory_entry_map_size(MACH_PORT_NULL, map, 0, size_16kb, &map_size);
1990 assert(kr == KERN_INVALID_ARGUMENT);
1991 assert(map_size == 0);
1992
1993 cleanup_map(&map);
1994
1995 *out = 1;
1996 return 0;
1997 }
1998 SYSCTL_TEST_REGISTER(vm_memory_entry_map_size_null, vm_memory_entry_map_size_null_test);
1999
2000 static int
vm_memory_entry_map_size_overflow_tests(__unused int64_t in,int64_t * out)2001 vm_memory_entry_map_size_overflow_tests(__unused int64_t in, int64_t *out)
2002 {
2003 mach_vm_size_t size_16kb, entry_size, map_size;
2004 vm_map_t map;
2005 mach_port_t parent_handle, entry_handle;
2006 mach_vm_address_t alloced_addr;
2007 vm_map_offset_t entry_offset;
2008 memory_object_offset_t maximum_offset;
2009
2010 kern_return_t kr;
2011
2012 size_16kb = 16 * 1024;
2013 map = create_map(0, vm_compute_max_offset(true));
2014 /*
2015 * (1) Attempt to overflow offset + mem_entry->offset
2016 */
2017 // Setup - create an entry with nonzero offset
2018 kr = mach_memory_object_memory_entry_64((host_t) 1, 1,
2019 size_16kb * 2, VM_PROT_DEFAULT, 0, &parent_handle);
2020 assert(kr == KERN_SUCCESS);
2021
2022 entry_size = size_16kb;
2023 kr = mach_make_memory_entry_64(map, &entry_size, size_16kb,
2024 VM_PROT_DEFAULT, &entry_handle, parent_handle);
2025 assert(kr == KERN_SUCCESS);
2026
2027 // Pass in maximum offset to attempt overflow
2028 maximum_offset = (memory_object_offset_t) -1;
2029 kr = mach_memory_entry_map_size(entry_handle, map, maximum_offset, size_16kb,
2030 &map_size);
2031 assert(kr == KERN_INVALID_ARGUMENT);
2032
2033 // Cleanup
2034 mach_memory_entry_port_release(parent_handle);
2035 mach_memory_entry_port_release(entry_handle);
2036
2037 /*
2038 * (2) Attempt to overflow offset + mem_entry->data_offset
2039 */
2040 // Setup - create an entry with nonzero data_offset
2041 kr = mach_vm_allocate(map, &alloced_addr, 2 * size_16kb, VM_FLAGS_ANYWHERE);
2042 assert(kr == KERN_SUCCESS);
2043
2044 entry_size = size_16kb;
2045 entry_offset = alloced_addr + (size_16kb / 2);
2046 kr = mach_make_memory_entry_64(map, &entry_size, entry_offset,
2047 MAP_MEM_VM_COPY | MAP_MEM_USE_DATA_ADDR | VM_PROT_DEFAULT,
2048 &entry_handle, MACH_PORT_NULL);
2049 assert(kr == KERN_SUCCESS);
2050
2051 // Pass in maximum offset to attempt overflow
2052 kr = mach_memory_entry_map_size(entry_handle, map, maximum_offset, size_16kb,
2053 &map_size);
2054 assert(kr == KERN_INVALID_ARGUMENT);
2055
2056 // Cleanup
2057 mach_memory_entry_port_release(entry_handle);
2058 kr = mach_vm_deallocate(map, alloced_addr, 2 * size_16kb);
2059 assert(kr == KERN_SUCCESS);
2060 cleanup_map(&map);
2061
2062 *out = 1;
2063 return 0;
2064 }
2065 SYSCTL_TEST_REGISTER(vm_memory_entry_map_size_overflow, vm_memory_entry_map_size_overflow_tests);
2066
2067 static int
vm_memory_entry_map_size_copy_tests(__unused int64_t in,int64_t * out)2068 vm_memory_entry_map_size_copy_tests(__unused int64_t in, int64_t *out)
2069 {
2070 mach_vm_size_t size_2kb, size_4kb, size_16kb;
2071 mach_vm_size_t entry_size_4k, entry_size_16k;
2072 mach_vm_size_t map_size;
2073 vm_map_t map_4k, map_16k;
2074 mach_port_t entry_4k, entry_16k;
2075 mach_vm_address_t alloced_addr_4k, alloced_addr_16k;
2076
2077 kern_return_t kr;
2078
2079 size_2kb = 2 * 1024;
2080 size_4kb = 4 * 1024;
2081 size_16kb = 16 * 1024;
2082
2083 /*
2084 * Setup - initialize maps and create copy entries for each
2085 */
2086 // 4k map and entry
2087 map_4k = create_map(0, vm_compute_max_offset(true));
2088 kr = vm_map_set_page_shift(map_4k, PAGE_SHIFT_4K);
2089 assert(kr == KERN_SUCCESS);
2090
2091 kr = mach_vm_allocate(map_4k, &alloced_addr_4k, size_16kb, VM_FLAGS_ANYWHERE);
2092 assert(kr == KERN_SUCCESS);
2093
2094 entry_size_4k = size_16kb;
2095 kr = mach_make_memory_entry_64(map_4k, &entry_size_4k, alloced_addr_4k,
2096 MAP_MEM_VM_COPY | VM_PROT_DEFAULT, &entry_4k, MACH_PORT_NULL);
2097 assert(kr == KERN_SUCCESS);
2098 assert(entry_size_4k == size_16kb);
2099
2100 // 16k map and entry
2101 map_16k = create_map(0, vm_compute_max_offset(true));
2102 kr = vm_map_set_page_shift(map_16k, PAGE_SHIFT_16K);
2103 assert(kr == KERN_SUCCESS);
2104
2105 kr = mach_vm_allocate(map_16k, &alloced_addr_16k, size_16kb, VM_FLAGS_ANYWHERE);
2106 assert(kr == KERN_SUCCESS);
2107
2108 entry_size_16k = size_16kb;
2109 kr = mach_make_memory_entry_64(map_16k, &entry_size_16k, alloced_addr_16k,
2110 MAP_MEM_VM_COPY | VM_PROT_DEFAULT, &entry_16k, MACH_PORT_NULL);
2111 assert(kr == KERN_SUCCESS);
2112 assert(entry_size_16k == size_16kb);
2113
2114 /*
2115 * (1) Test 4k map with 4k entry and 16k map with 16k entry. Page-aligned
2116 * ranges should have no size adjustment.
2117 */
2118 for (mach_vm_size_t i = 1; i <= 4; i++) {
2119 kr = mach_memory_entry_map_size(entry_4k, map_4k, 0, i * size_4kb, &map_size);
2120 assert(kr == KERN_SUCCESS);
2121 assert(map_size == (i * size_4kb));
2122 }
2123 kr = mach_memory_entry_map_size(entry_16k, map_16k, 0, size_16kb, &map_size);
2124 assert(kr == KERN_SUCCESS);
2125 assert(map_size == size_16kb);
2126
2127 /*
2128 * (2) Test 4k map with 16k entry. Since we have a 4k map, we should be able
2129 * to map a 4k range of the entry, but to map a 2k range we will need to map
2130 * a full 4k page.
2131 */
2132 kr = mach_memory_entry_map_size(entry_16k, map_4k, 0, size_16kb, &map_size);
2133 assert(kr == KERN_SUCCESS);
2134 assert(map_size == size_16kb);
2135 kr = mach_memory_entry_map_size(entry_16k, map_4k, 0, size_4kb, &map_size);
2136 assert(kr == KERN_SUCCESS);
2137 assert(map_size == size_4kb);
2138 kr = mach_memory_entry_map_size(entry_16k, map_4k, 0, size_2kb, &map_size);
2139 assert(kr == KERN_SUCCESS);
2140 assert(map_size == size_4kb);
2141
2142 /*
2143 * (3) Test 16k map with 4k entry. Since we have a 16k map, we will need to
2144 * map the whole 16kb memory entry even if a smaller range is requested.
2145 */
2146 kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, size_16kb, &map_size);
2147 assert(kr == KERN_SUCCESS);
2148 assert(map_size == size_16kb);
2149 kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, size_4kb, &map_size);
2150 assert(kr == KERN_SUCCESS);
2151 assert(map_size == size_16kb);
2152 kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, size_2kb, &map_size);
2153 assert(kr == KERN_SUCCESS);
2154 assert(map_size == size_16kb);
2155
2156 /*
2157 * (4) Detect error in the case where the size requested is too large.
2158 */
2159 map_size = 0xdeadbeef;
2160 kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, 2 * size_16kb, &map_size);
2161 assert(kr == KERN_INVALID_ARGUMENT);
2162 assert(map_size == 0);
2163
2164 /*
2165 * Clean up memory entries, allocations, and maps
2166 */
2167 mach_memory_entry_port_release(entry_4k);
2168 mach_memory_entry_port_release(entry_16k);
2169 kr = mach_vm_deallocate(map_4k, alloced_addr_4k, size_16kb);
2170 assert(kr == KERN_SUCCESS);
2171 kr = mach_vm_deallocate(map_16k, alloced_addr_16k, size_16kb);
2172 assert(kr == KERN_SUCCESS);
2173 cleanup_map(&map_4k);
2174 cleanup_map(&map_16k);
2175
2176 *out = 1;
2177 return 0;
2178 }
2179 SYSCTL_TEST_REGISTER(vm_memory_entry_map_size_copy, vm_memory_entry_map_size_copy_tests);
2180
2181 static int
vm_memory_entry_parent_submap_tests(__unused int64_t in,int64_t * out)2182 vm_memory_entry_parent_submap_tests(__unused int64_t in, int64_t *out)
2183 {
2184 vm_shared_region_t shared_region;
2185 mach_port_t parent_handle, entry_handle;
2186 vm_named_entry_t parent_entry;
2187 mach_vm_size_t entry_size;
2188 vm_prot_t vmflags;
2189
2190 kern_return_t kr;
2191
2192 /*
2193 * Use shared region to get a named_entry which refers to a submap
2194 */
2195 shared_region = vm_shared_region_get(current_task());
2196 parent_handle = shared_region->sr_mem_entry;
2197 assert(parent_handle != NULL);
2198 parent_entry = mach_memory_entry_from_port(parent_handle);
2199 assert(parent_entry->is_sub_map);
2200
2201 /*
2202 * We should be able to create an entry using the submap entry as the parent
2203 */
2204 entry_size = parent_entry->size;
2205 vmflags = VM_PROT_DEFAULT;
2206 kr = mach_make_memory_entry_64(VM_MAP_NULL, &entry_size, 0, vmflags,
2207 &entry_handle, parent_handle);
2208 assert(kr == KERN_SUCCESS);
2209 mach_memory_entry_port_release(entry_handle);
2210
2211 /*
2212 * Should fail if using mach_make_memory_entry_mem_only since the parent
2213 * entry is not an object
2214 */
2215 vmflags |= MAP_MEM_ONLY;
2216 kr = mach_make_memory_entry_64(VM_MAP_NULL, &entry_size, 0, vmflags,
2217 &entry_handle, parent_handle);
2218 assert(kr == KERN_INVALID_ARGUMENT);
2219
2220 /*
2221 * Cleanup
2222 */
2223 vm_shared_region_deallocate(shared_region);
2224
2225 *out = 1;
2226 return 0;
2227 }
2228 SYSCTL_TEST_REGISTER(vm_memory_entry_parent_submap, vm_memory_entry_parent_submap_tests);
2229
2230 static int
vm_cpu_map_pageout_test(int64_t in,int64_t * out)2231 vm_cpu_map_pageout_test(int64_t in, int64_t *out)
2232 {
2233 /* Test is not supported */
2234 (void)in;
2235 *out = ENOTSUP;
2236 return 0;
2237 }
2238 SYSCTL_TEST_REGISTER(vm_cpu_map_pageout, vm_cpu_map_pageout_test);
2239
2240 static int
vm_get_wimg_mode(int64_t in,int64_t * out)2241 vm_get_wimg_mode(int64_t in, int64_t *out)
2242 {
2243 mach_vm_offset_t addr = (mach_vm_offset_t)in;
2244 vm_map_entry_t entry;
2245 vm_map_t map = current_map();
2246 vm_map_lock_read(map);
2247 bool map_contains_addr = vm_map_lookup_entry(map, addr, &entry);
2248 if (!map_contains_addr) {
2249 vm_map_unlock_read(map);
2250 return EINVAL;
2251 }
2252
2253 if (entry->is_sub_map) {
2254 vm_map_unlock_read(map);
2255 return ENOTSUP;
2256 }
2257
2258 vm_object_t obj = VME_OBJECT(entry);
2259 *out = obj->wimg_bits;
2260
2261 vm_map_unlock_read(map);
2262 return 0;
2263 }
2264 SYSCTL_TEST_REGISTER(vm_get_wimg_mode, vm_get_wimg_mode);
2265
2266