1 /*
2 * Copyright (c) 2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_assert.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/memory_object.h>
33 #include <mach/vm_map.h>
34 #include <mach/vm32_map_server.h>
35 #include <mach/mach_host.h>
36 #include <mach/host_priv.h>
37
38 #include <kern/ledger.h>
39 #include <kern/host.h>
40
41 #include <device/device_port.h>
42 #include <vm/memory_object_internal.h>
43 #include <vm/vm_fault.h>
44 #include <vm/vm_map_internal.h>
45 #include <vm/vm_object_internal.h>
46 #include <vm/vm_pageout_xnu.h>
47 #include <vm/vm_protos.h>
48 #include <vm/vm_memtag.h>
49 #include <vm/vm_memory_entry_xnu.h>
50 #include <vm/vm_kern_xnu.h>
51 #include <vm/vm_iokit.h>
52 #include <vm/vm_page_internal.h>
53 #include <vm/vm_shared_region_xnu.h>
54
55 #include <kern/zalloc.h>
56 #include <kern/zalloc_internal.h>
57
58 #include <mach/mach_vm.h>
59
60 #include <sys/errno.h> /* for the sysctl tests */
61
62 #include <tests/xnupost.h> /* for testing-related functions and macros */
63
64 extern ledger_template_t task_ledger_template;
65
66 extern kern_return_t
67 vm_map_copy_adjust_to_target(
68 vm_map_copy_t copy_map,
69 vm_map_offset_t offset,
70 vm_map_size_t size,
71 vm_map_t target_map,
72 boolean_t copy,
73 vm_map_copy_t *target_copy_map_p,
74 vm_map_offset_t *overmap_start_p,
75 vm_map_offset_t *overmap_end_p,
76 vm_map_offset_t *trimmed_start_p);
77
78 #define VM_TEST_COLLAPSE_COMPRESSOR 0
79 #define VM_TEST_WIRE_AND_EXTRACT 0
80 #define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0
81 #if __arm64__
82 #define VM_TEST_KERNEL_OBJECT_FAULT 0
83 #endif /* __arm64__ */
84 #define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG)
85
86 #if VM_TEST_COLLAPSE_COMPRESSOR
87 extern boolean_t vm_object_collapse_compressor_allowed;
88 #include <IOKit/IOLib.h>
89 static void
vm_test_collapse_compressor(void)90 vm_test_collapse_compressor(void)
91 {
92 vm_object_size_t backing_size, top_size;
93 vm_object_t backing_object, top_object;
94 vm_map_offset_t backing_offset, top_offset;
95 unsigned char *backing_address, *top_address;
96 kern_return_t kr;
97
98 printf("VM_TEST_COLLAPSE_COMPRESSOR:\n");
99
100 /* create backing object */
101 backing_size = 15 * PAGE_SIZE;
102 backing_object = vm_object_allocate(backing_size);
103 assert(backing_object != VM_OBJECT_NULL);
104 printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
105 backing_object);
106 /* map backing object */
107 backing_offset = 0;
108 kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0,
109 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
110 backing_object, 0, FALSE,
111 VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
112 assert(kr == KERN_SUCCESS);
113 backing_address = (unsigned char *) backing_offset;
114 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
115 "mapped backing object %p at 0x%llx\n",
116 backing_object, (uint64_t) backing_offset);
117 /* populate with pages to be compressed in backing object */
118 backing_address[0x1 * PAGE_SIZE] = 0xB1;
119 backing_address[0x4 * PAGE_SIZE] = 0xB4;
120 backing_address[0x7 * PAGE_SIZE] = 0xB7;
121 backing_address[0xa * PAGE_SIZE] = 0xBA;
122 backing_address[0xd * PAGE_SIZE] = 0xBD;
123 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
124 "populated pages to be compressed in "
125 "backing_object %p\n", backing_object);
126 /* compress backing object */
127 vm_object_pageout(backing_object);
128 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
129 backing_object);
130 /* wait for all the pages to be gone */
131 while (*(volatile int *)&backing_object->resident_page_count != 0) {
132 IODelay(10);
133 }
134 printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
135 backing_object);
136 /* populate with pages to be resident in backing object */
137 backing_address[0x0 * PAGE_SIZE] = 0xB0;
138 backing_address[0x3 * PAGE_SIZE] = 0xB3;
139 backing_address[0x6 * PAGE_SIZE] = 0xB6;
140 backing_address[0x9 * PAGE_SIZE] = 0xB9;
141 backing_address[0xc * PAGE_SIZE] = 0xBC;
142 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
143 "populated pages to be resident in "
144 "backing_object %p\n", backing_object);
145 /* leave the other pages absent */
146 /* mess with the paging_offset of the backing_object */
147 assert(backing_object->paging_offset == 0);
148 backing_object->paging_offset = 3 * PAGE_SIZE;
149
150 /* create top object */
151 top_size = 9 * PAGE_SIZE;
152 top_object = vm_object_allocate(top_size);
153 assert(top_object != VM_OBJECT_NULL);
154 printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
155 top_object);
156 /* map top object */
157 top_offset = 0;
158 kr = vm_map_enter(kernel_map, &top_offset, top_size, 0,
159 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
160 top_object, 0, FALSE,
161 VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
162 assert(kr == KERN_SUCCESS);
163 top_address = (unsigned char *) top_offset;
164 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
165 "mapped top object %p at 0x%llx\n",
166 top_object, (uint64_t) top_offset);
167 /* populate with pages to be compressed in top object */
168 top_address[0x3 * PAGE_SIZE] = 0xA3;
169 top_address[0x4 * PAGE_SIZE] = 0xA4;
170 top_address[0x5 * PAGE_SIZE] = 0xA5;
171 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
172 "populated pages to be compressed in "
173 "top_object %p\n", top_object);
174 /* compress top object */
175 vm_object_pageout(top_object);
176 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
177 top_object);
178 /* wait for all the pages to be gone */
179 while (top_object->resident_page_count != 0) {
180 IODelay(10);
181 }
182 printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
183 top_object);
184 /* populate with pages to be resident in top object */
185 top_address[0x0 * PAGE_SIZE] = 0xA0;
186 top_address[0x1 * PAGE_SIZE] = 0xA1;
187 top_address[0x2 * PAGE_SIZE] = 0xA2;
188 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
189 "populated pages to be resident in "
190 "top_object %p\n", top_object);
191 /* leave the other pages absent */
192
193 /* link the 2 objects */
194 vm_object_reference(backing_object);
195 top_object->shadow = backing_object;
196 top_object->vo_shadow_offset = 3 * PAGE_SIZE;
197 printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
198 top_object, backing_object);
199
200 /* unmap backing object */
201 vm_map_remove(kernel_map,
202 backing_offset,
203 backing_offset + backing_size,
204 VM_MAP_REMOVE_NO_FLAGS);
205 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
206 "unmapped backing_object %p [0x%llx:0x%llx]\n",
207 backing_object,
208 (uint64_t) backing_offset,
209 (uint64_t) (backing_offset + backing_size));
210
211 /* collapse */
212 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object);
213 vm_object_lock(top_object);
214 vm_object_collapse(top_object, 0, FALSE);
215 vm_object_unlock(top_object);
216 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object);
217
218 /* did it work? */
219 if (top_object->shadow != VM_OBJECT_NULL) {
220 printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
221 printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
222 if (vm_object_collapse_compressor_allowed) {
223 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL");
224 }
225 } else {
226 /* check the contents of the mapping */
227 unsigned char expect[9] =
228 { 0xA0, 0xA1, 0xA2, /* resident in top */
229 0xA3, 0xA4, 0xA5, /* compressed in top */
230 0xB9, /* resident in backing + shadow_offset */
231 0xBD, /* compressed in backing + shadow_offset + paging_offset */
232 0x00 }; /* absent in both */
233 unsigned char actual[9];
234 unsigned int i, errors;
235
236 errors = 0;
237 for (i = 0; i < sizeof(actual); i++) {
238 actual[i] = (unsigned char) top_address[i * PAGE_SIZE];
239 if (actual[i] != expect[i]) {
240 errors++;
241 }
242 }
243 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
244 "actual [%x %x %x %x %x %x %x %x %x] "
245 "expect [%x %x %x %x %x %x %x %x %x] "
246 "%d errors\n",
247 actual[0], actual[1], actual[2], actual[3],
248 actual[4], actual[5], actual[6], actual[7],
249 actual[8],
250 expect[0], expect[1], expect[2], expect[3],
251 expect[4], expect[5], expect[6], expect[7],
252 expect[8],
253 errors);
254 if (errors) {
255 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL");
256 } else {
257 printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n");
258 }
259 }
260 }
261 #else /* VM_TEST_COLLAPSE_COMPRESSOR */
262 #define vm_test_collapse_compressor()
263 #endif /* VM_TEST_COLLAPSE_COMPRESSOR */
264
265 #if VM_TEST_WIRE_AND_EXTRACT
266 extern ppnum_t vm_map_get_phys_page(vm_map_t map,
267 vm_offset_t offset);
268 static void
vm_test_wire_and_extract(void)269 vm_test_wire_and_extract(void)
270 {
271 ledger_t ledger;
272 vm_map_t user_map, wire_map;
273 mach_vm_address_t user_addr, wire_addr;
274 mach_vm_size_t user_size, wire_size;
275 mach_vm_offset_t cur_offset;
276 vm_prot_t cur_prot, max_prot;
277 ppnum_t user_ppnum, wire_ppnum;
278 kern_return_t kr;
279
280 ledger = ledger_instantiate(task_ledger_template,
281 LEDGER_CREATE_ACTIVE_ENTRIES);
282 pmap_t user_pmap = pmap_create_options(ledger, 0, PMAP_CREATE_64BIT);
283 assert(user_pmap);
284 user_map = vm_map_create_options(user_pmap,
285 0x100000000ULL,
286 0x200000000ULL,
287 VM_MAP_CREATE_PAGEABLE);
288 wire_map = vm_map_create_options(NULL,
289 0x100000000ULL,
290 0x200000000ULL,
291 VM_MAP_CREATE_PAGEABLE);
292 user_addr = 0;
293 user_size = 0x10000;
294 kr = mach_vm_allocate(user_map,
295 &user_addr,
296 user_size,
297 VM_FLAGS_ANYWHERE);
298 assert(kr == KERN_SUCCESS);
299 wire_addr = 0;
300 wire_size = user_size;
301 kr = mach_vm_remap(wire_map,
302 &wire_addr,
303 wire_size,
304 0,
305 VM_FLAGS_ANYWHERE,
306 user_map,
307 user_addr,
308 FALSE,
309 &cur_prot,
310 &max_prot,
311 VM_INHERIT_NONE);
312 assert(kr == KERN_SUCCESS);
313 for (cur_offset = 0;
314 cur_offset < wire_size;
315 cur_offset += PAGE_SIZE) {
316 kr = vm_map_wire_and_extract(wire_map,
317 wire_addr + cur_offset,
318 VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK),
319 TRUE,
320 &wire_ppnum);
321 assert(kr == KERN_SUCCESS);
322 user_ppnum = vm_map_get_phys_page(user_map,
323 user_addr + cur_offset);
324 printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x "
325 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
326 kr,
327 user_map, user_addr + cur_offset, user_ppnum,
328 wire_map, wire_addr + cur_offset, wire_ppnum);
329 if (kr != KERN_SUCCESS ||
330 wire_ppnum == 0 ||
331 wire_ppnum != user_ppnum) {
332 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL");
333 }
334 }
335 cur_offset -= PAGE_SIZE;
336 kr = vm_map_wire_and_extract(wire_map,
337 wire_addr + cur_offset,
338 VM_PROT_DEFAULT,
339 TRUE,
340 &wire_ppnum);
341 assert(kr == KERN_SUCCESS);
342 printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
343 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
344 kr,
345 user_map, user_addr + cur_offset, user_ppnum,
346 wire_map, wire_addr + cur_offset, wire_ppnum);
347 if (kr != KERN_SUCCESS ||
348 wire_ppnum == 0 ||
349 wire_ppnum != user_ppnum) {
350 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL");
351 }
352
353 printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n");
354 }
355 #else /* VM_TEST_WIRE_AND_EXTRACT */
356 #define vm_test_wire_and_extract()
357 #endif /* VM_TEST_WIRE_AND_EXTRACT */
358
359 #if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC
360 static void
vm_test_page_wire_overflow_panic(void)361 vm_test_page_wire_overflow_panic(void)
362 {
363 vm_object_t object;
364 vm_page_t page;
365
366 printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n");
367
368 object = vm_object_allocate(PAGE_SIZE);
369 vm_object_lock(object);
370 page = vm_page_alloc(object, 0x0);
371 vm_page_lock_queues();
372 do {
373 vm_page_wire(page, 1, FALSE);
374 } while (page->wire_count != 0);
375 vm_page_unlock_queues();
376 vm_object_unlock(object);
377 panic("FBDP(%p,%p): wire_count overflow not detected",
378 object, page);
379 }
380 #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
381 #define vm_test_page_wire_overflow_panic()
382 #endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
383
384 #if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT
385 extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit);
386 static void
vm_test_kernel_object_fault(void)387 vm_test_kernel_object_fault(void)
388 {
389 vm_offset_t stack;
390 uintptr_t frameb[2];
391 int ret;
392
393 kmem_alloc(kernel_map, &stack,
394 kernel_stack_size + ptoa(2),
395 KMA_NOFAIL | KMA_KSTACK | KMA_KOBJECT |
396 KMA_GUARD_FIRST | KMA_GUARD_LAST,
397 VM_KERN_MEMORY_STACK);
398
399 ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE);
400 if (ret != 0) {
401 printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n");
402 } else {
403 printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n");
404 }
405
406 kmem_free(kernel_map, stack, kernel_stack_size + ptoa(2));
407 stack = 0;
408 }
409 #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
410 #define vm_test_kernel_object_fault()
411 #endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
412
413 #if VM_TEST_DEVICE_PAGER_TRANSPOSE
414 static void
vm_test_device_pager_transpose(void)415 vm_test_device_pager_transpose(void)
416 {
417 memory_object_t device_pager;
418 vm_object_t anon_object, device_object;
419 vm_size_t size;
420 vm_map_offset_t device_mapping;
421 kern_return_t kr;
422
423 size = 3 * PAGE_SIZE;
424 anon_object = vm_object_allocate(size);
425 assert(anon_object != VM_OBJECT_NULL);
426 device_pager = device_pager_setup(NULL, 0, size, 0);
427 assert(device_pager != NULL);
428 device_object = memory_object_to_vm_object(device_pager);
429 assert(device_object != VM_OBJECT_NULL);
430 #if 0
431 /*
432 * Can't actually map this, since another thread might do a
433 * vm_map_enter() that gets coalesced into this object, which
434 * would cause the test to fail.
435 */
436 vm_map_offset_t anon_mapping = 0;
437 kr = vm_map_enter(kernel_map, &anon_mapping, size, 0,
438 VM_MAP_KERNEL_FLAGS_ANYWHERE(),
439 anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
440 VM_INHERIT_DEFAULT);
441 assert(kr == KERN_SUCCESS);
442 #endif
443 device_mapping = 0;
444 kr = mach_vm_map_kernel(kernel_map,
445 vm_sanitize_wrap_addr_ref(&device_mapping),
446 size,
447 0,
448 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
449 (void *)device_pager,
450 0,
451 FALSE,
452 VM_PROT_DEFAULT,
453 VM_PROT_ALL,
454 VM_INHERIT_DEFAULT);
455 assert(kr == KERN_SUCCESS);
456 memory_object_deallocate(device_pager);
457
458 vm_object_lock(anon_object);
459 vm_object_activity_begin(anon_object);
460 anon_object->blocked_access = TRUE;
461 vm_object_unlock(anon_object);
462 vm_object_lock(device_object);
463 vm_object_activity_begin(device_object);
464 device_object->blocked_access = TRUE;
465 vm_object_unlock(device_object);
466
467 assert(os_ref_get_count_raw(&anon_object->ref_count) == 1);
468 assert(!anon_object->named);
469 assert(os_ref_get_count_raw(&device_object->ref_count) == 2);
470 assert(device_object->named);
471
472 kr = vm_object_transpose(device_object, anon_object, size);
473 assert(kr == KERN_SUCCESS);
474
475 vm_object_lock(anon_object);
476 vm_object_activity_end(anon_object);
477 anon_object->blocked_access = FALSE;
478 vm_object_unlock(anon_object);
479 vm_object_lock(device_object);
480 vm_object_activity_end(device_object);
481 device_object->blocked_access = FALSE;
482 vm_object_unlock(device_object);
483
484 assert(os_ref_get_count_raw(&anon_object->ref_count) == 2);
485 assert(anon_object->named);
486 #if 0
487 kr = vm_deallocate(kernel_map, anon_mapping, size);
488 assert(kr == KERN_SUCCESS);
489 #endif
490 assert(os_ref_get_count_raw(&device_object->ref_count) == 1);
491 assert(!device_object->named);
492 kr = vm_deallocate(kernel_map, device_mapping, size);
493 assert(kr == KERN_SUCCESS);
494
495 printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n");
496 }
497 #else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
498 #define vm_test_device_pager_transpose()
499 #endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
500
501 extern kern_return_t vm_allocate_external(vm_map_t map,
502 vm_offset_t *addr,
503 vm_size_t size,
504 int flags);
505 extern kern_return_t vm_remap_external(vm_map_t target_map,
506 vm_offset_t *address,
507 vm_size_t size,
508 vm_offset_t mask,
509 int flags,
510 vm_map_t src_map,
511 vm_offset_t memory_address,
512 boolean_t copy,
513 vm_prot_t *cur_protection,
514 vm_prot_t *max_protection,
515 vm_inherit_t inheritance);
516 #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT
517 extern int debug4k_panic_on_misaligned_sharing;
518 void vm_test_4k(void);
519 void
vm_test_4k(void)520 vm_test_4k(void)
521 {
522 pmap_t test_pmap;
523 vm_map_t test_map;
524 kern_return_t kr;
525 vm_address_t expected_addr;
526 vm_address_t alloc1_addr, alloc2_addr, alloc3_addr, alloc4_addr;
527 vm_address_t alloc5_addr, dealloc_addr, remap_src_addr, remap_dst_addr;
528 vm_size_t alloc1_size, alloc2_size, alloc3_size, alloc4_size;
529 vm_size_t alloc5_size, remap_src_size;
530 vm_address_t fault_addr;
531 vm_prot_t cur_prot, max_prot;
532 int saved_debug4k_panic_on_misaligned_sharing;
533
534 printf("\n\n\nVM_TEST_4K:%d creating 4K map...\n", __LINE__);
535 test_pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT | PMAP_CREATE_FORCE_4K_PAGES);
536 assert(test_pmap != NULL);
537 test_map = vm_map_create_options(test_pmap,
538 MACH_VM_MIN_ADDRESS,
539 MACH_VM_MAX_ADDRESS,
540 VM_MAP_CREATE_PAGEABLE);
541 assert(test_map != VM_MAP_NULL);
542 vm_map_set_page_shift(test_map, FOURK_PAGE_SHIFT);
543 printf("VM_TEST_4K:%d map %p pmap %p page_size 0x%x\n", __LINE__, test_map, test_pmap, VM_MAP_PAGE_SIZE(test_map));
544
545 alloc1_addr = 0;
546 alloc1_size = 1 * FOURK_PAGE_SIZE;
547 expected_addr = 0x1000;
548 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
549 kr = vm_allocate_external(test_map,
550 &alloc1_addr,
551 alloc1_size,
552 VM_FLAGS_ANYWHERE);
553 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
554 assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr);
555 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
556 expected_addr += alloc1_size;
557
558 printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
559 kr = vm_deallocate(test_map, alloc1_addr, alloc1_size);
560 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
561 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
562
563 alloc1_addr = 0;
564 alloc1_size = 1 * FOURK_PAGE_SIZE;
565 expected_addr = 0x1000;
566 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size);
567 kr = vm_allocate_external(test_map,
568 &alloc1_addr,
569 alloc1_size,
570 VM_FLAGS_ANYWHERE);
571 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
572 assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr);
573 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr);
574 expected_addr += alloc1_size;
575
576 alloc2_addr = 0;
577 alloc2_size = 3 * FOURK_PAGE_SIZE;
578 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc2_addr, alloc2_size);
579 kr = vm_allocate_external(test_map,
580 &alloc2_addr,
581 alloc2_size,
582 VM_FLAGS_ANYWHERE);
583 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
584 assertf(alloc2_addr == expected_addr, "alloc2_addr = 0x%lx expected 0x%lx", alloc2_addr, expected_addr);
585 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc2_addr);
586 expected_addr += alloc2_size;
587
588 alloc3_addr = 0;
589 alloc3_size = 18 * FOURK_PAGE_SIZE;
590 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc3_addr, alloc3_size);
591 kr = vm_allocate_external(test_map,
592 &alloc3_addr,
593 alloc3_size,
594 VM_FLAGS_ANYWHERE);
595 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
596 assertf(alloc3_addr == expected_addr, "alloc3_addr = 0x%lx expected 0x%lx\n", alloc3_addr, expected_addr);
597 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr);
598 expected_addr += alloc3_size;
599
600 alloc4_addr = 0;
601 alloc4_size = 1 * FOURK_PAGE_SIZE;
602 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc4_addr, alloc4_size);
603 kr = vm_allocate_external(test_map,
604 &alloc4_addr,
605 alloc4_size,
606 VM_FLAGS_ANYWHERE);
607 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
608 assertf(alloc4_addr == expected_addr, "alloc4_addr = 0x%lx expected 0x%lx", alloc4_addr, expected_addr);
609 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr);
610 expected_addr += alloc4_size;
611
612 printf("VM_TEST_4K:%d vm_protect(%p, 0x%lx, 0x%lx, READ)...\n", __LINE__, test_map, alloc2_addr, (1UL * FOURK_PAGE_SIZE));
613 kr = vm_protect(test_map,
614 alloc2_addr,
615 (1UL * FOURK_PAGE_SIZE),
616 FALSE,
617 VM_PROT_READ);
618 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
619
620 for (fault_addr = alloc1_addr;
621 fault_addr < alloc4_addr + alloc4_size + (2 * FOURK_PAGE_SIZE);
622 fault_addr += FOURK_PAGE_SIZE) {
623 printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr);
624 kr = vm_fault(test_map,
625 fault_addr,
626 VM_PROT_WRITE,
627 FALSE,
628 VM_KERN_MEMORY_NONE,
629 THREAD_UNINT,
630 NULL,
631 0);
632 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
633 if (fault_addr == alloc2_addr) {
634 assertf(kr == KERN_PROTECTION_FAILURE, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_PROTECTION_FAILURE);
635 printf("VM_TEST_4K:%d read fault at 0x%lx...\n", __LINE__, fault_addr);
636 kr = vm_fault(test_map,
637 fault_addr,
638 VM_PROT_READ,
639 FALSE,
640 VM_KERN_MEMORY_NONE,
641 THREAD_UNINT,
642 NULL,
643 0);
644 assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS);
645 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
646 } else if (fault_addr >= alloc4_addr + alloc4_size) {
647 assertf(kr == KERN_INVALID_ADDRESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_INVALID_ADDRESS);
648 } else {
649 assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS);
650 }
651 }
652
653 alloc5_addr = 0;
654 alloc5_size = 7 * FOURK_PAGE_SIZE;
655 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc5_addr, alloc5_size);
656 kr = vm_allocate_external(test_map,
657 &alloc5_addr,
658 alloc5_size,
659 VM_FLAGS_ANYWHERE);
660 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
661 assertf(alloc5_addr == expected_addr, "alloc5_addr = 0x%lx expected 0x%lx", alloc5_addr, expected_addr);
662 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc5_addr);
663 expected_addr += alloc5_size;
664
665 dealloc_addr = vm_map_round_page(alloc5_addr, PAGE_SHIFT);
666 dealloc_addr += FOURK_PAGE_SIZE;
667 printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%x)...\n", __LINE__, test_map, dealloc_addr, FOURK_PAGE_SIZE);
668 kr = vm_deallocate(test_map, dealloc_addr, FOURK_PAGE_SIZE);
669 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
670 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
671
672 remap_src_addr = vm_map_round_page(alloc3_addr, PAGE_SHIFT);
673 remap_src_addr += FOURK_PAGE_SIZE;
674 remap_src_size = 2 * FOURK_PAGE_SIZE;
675 remap_dst_addr = 0;
676 printf("VM_TEST_4K:%d vm_remap(%p, 0x%lx, 0x%lx, 0x%lx, copy=0)...\n", __LINE__, test_map, remap_dst_addr, remap_src_size, remap_src_addr);
677 kr = vm_remap_external(test_map,
678 &remap_dst_addr,
679 remap_src_size,
680 0, /* mask */
681 VM_FLAGS_ANYWHERE,
682 test_map,
683 remap_src_addr,
684 FALSE, /* copy */
685 &cur_prot,
686 &max_prot,
687 VM_INHERIT_DEFAULT);
688 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
689 assertf(remap_dst_addr == expected_addr, "remap_dst_addr = 0x%lx expected 0x%lx", remap_dst_addr, expected_addr);
690 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, remap_dst_addr);
691 expected_addr += remap_src_size;
692
693 for (fault_addr = remap_dst_addr;
694 fault_addr < remap_dst_addr + remap_src_size;
695 fault_addr += 4096) {
696 printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr);
697 kr = vm_fault(test_map,
698 fault_addr,
699 VM_PROT_WRITE,
700 FALSE,
701 VM_KERN_MEMORY_NONE,
702 THREAD_UNINT,
703 NULL,
704 0);
705 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
706 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr);
707 }
708
709 printf("VM_TEST_4K:\n");
710 remap_dst_addr = 0;
711 remap_src_addr = alloc3_addr + 0xc000;
712 remap_src_size = 0x5000;
713 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
714 kr = vm_remap_external(kernel_map,
715 &remap_dst_addr,
716 remap_src_size,
717 0, /* mask */
718 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
719 test_map,
720 remap_src_addr,
721 FALSE, /* copy */
722 &cur_prot,
723 &max_prot,
724 VM_INHERIT_DEFAULT);
725 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
726 printf("VM_TEST_4K: -> remapped (shared) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr);
727
728 printf("VM_TEST_4K:\n");
729 remap_dst_addr = 0;
730 remap_src_addr = alloc3_addr + 0xc000;
731 remap_src_size = 0x5000;
732 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
733 kr = vm_remap_external(kernel_map,
734 &remap_dst_addr,
735 remap_src_size,
736 0, /* mask */
737 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
738 test_map,
739 remap_src_addr,
740 TRUE, /* copy */
741 &cur_prot,
742 &max_prot,
743 VM_INHERIT_DEFAULT);
744 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
745 printf("VM_TEST_4K: -> remapped (COW) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr);
746
747 printf("VM_TEST_4K:\n");
748 saved_debug4k_panic_on_misaligned_sharing = debug4k_panic_on_misaligned_sharing;
749 debug4k_panic_on_misaligned_sharing = 0;
750 remap_dst_addr = 0;
751 remap_src_addr = alloc1_addr;
752 remap_src_size = alloc1_size + alloc2_size;
753 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
754 kr = vm_remap_external(kernel_map,
755 &remap_dst_addr,
756 remap_src_size,
757 0, /* mask */
758 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
759 test_map,
760 remap_src_addr,
761 FALSE, /* copy */
762 &cur_prot,
763 &max_prot,
764 VM_INHERIT_DEFAULT);
765 assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr);
766 printf("VM_TEST_4K: -> remap (SHARED) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
767 debug4k_panic_on_misaligned_sharing = saved_debug4k_panic_on_misaligned_sharing;
768
769 printf("VM_TEST_4K:\n");
770 remap_dst_addr = 0;
771 remap_src_addr = alloc1_addr;
772 remap_src_size = alloc1_size + alloc2_size;
773 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map);
774 kr = vm_remap_external(kernel_map,
775 &remap_dst_addr,
776 remap_src_size,
777 0, /* mask */
778 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
779 test_map,
780 remap_src_addr,
781 TRUE, /* copy */
782 &cur_prot,
783 &max_prot,
784 VM_INHERIT_DEFAULT);
785 #if 000
786 assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr);
787 printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
788 #else /* 000 */
789 assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr);
790 printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr);
791 #endif /* 000 */
792
793
794 #if 00
795 printf("VM_TEST_4K:%d vm_map_remove(%p, 0x%llx, 0x%llx)...\n", __LINE__, test_map, test_map->min_offset, test_map->max_offset);
796 vm_map_remove(test_map, test_map->min_offset, test_map->max_offset);
797 #endif
798
799 printf("VM_TEST_4K: PASS\n\n\n\n");
800 }
801 #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */
802
803 #if MACH_ASSERT
804 static void
vm_test_map_copy_adjust_to_target_one(vm_map_copy_t copy_map,vm_map_t target_map)805 vm_test_map_copy_adjust_to_target_one(
806 vm_map_copy_t copy_map,
807 vm_map_t target_map)
808 {
809 kern_return_t kr;
810 vm_map_copy_t target_copy;
811 vm_map_offset_t overmap_start, overmap_end, trimmed_start;
812
813 target_copy = VM_MAP_COPY_NULL;
814 /* size is 2 (4k) pages but range covers 3 pages */
815 kr = vm_map_copy_adjust_to_target(copy_map,
816 0x0 + 0xfff,
817 0x1002,
818 target_map,
819 FALSE,
820 &target_copy,
821 &overmap_start,
822 &overmap_end,
823 &trimmed_start);
824 assert(kr == KERN_SUCCESS);
825 assert(overmap_start == 0);
826 assert(overmap_end == 0);
827 assert(trimmed_start == 0);
828 assertf(target_copy->size == 0x3000,
829 "target_copy %p size 0x%llx\n",
830 target_copy, (uint64_t)target_copy->size);
831 vm_map_copy_discard(target_copy);
832
833 /* 1. adjust_to_target() for bad offset -> error */
834 /* 2. adjust_to_target() for bad size -> error */
835 /* 3. adjust_to_target() for the whole thing -> unchanged */
836 /* 4. adjust_to_target() to trim start by less than 1 page */
837 /* 5. adjust_to_target() to trim end by less than 1 page */
838 /* 6. adjust_to_target() to trim start and end by less than 1 page */
839 /* 7. adjust_to_target() to trim start by more than 1 page */
840 /* 8. adjust_to_target() to trim end by more than 1 page */
841 /* 9. adjust_to_target() to trim start and end by more than 1 page */
842 /* 10. adjust_to_target() to trim start by more than 1 entry */
843 /* 11. adjust_to_target() to trim start by more than 1 entry */
844 /* 12. adjust_to_target() to trim start and end by more than 1 entry */
845 /* 13. adjust_to_target() to trim start and end down to 1 entry */
846 }
847
848 static void
vm_test_map_copy_adjust_to_target(void)849 vm_test_map_copy_adjust_to_target(void)
850 {
851 kern_return_t kr;
852 vm_map_t map4k, map16k;
853 vm_object_t obj1, obj2, obj3, obj4;
854 vm_map_offset_t addr4k, addr16k;
855 vm_map_size_t size4k, size16k;
856 vm_map_copy_t copy4k, copy16k;
857 vm_prot_t curprot, maxprot;
858 vm_map_kernel_flags_t vmk_flags;
859
860 /* create a 4k map */
861 map4k = vm_map_create_options(PMAP_NULL, 0, (uint32_t)-1,
862 VM_MAP_CREATE_PAGEABLE);
863 vm_map_set_page_shift(map4k, 12);
864
865 /* create a 16k map */
866 map16k = vm_map_create_options(PMAP_NULL, 0, (uint32_t)-1,
867 VM_MAP_CREATE_PAGEABLE);
868 vm_map_set_page_shift(map16k, 14);
869
870 /* create 4 VM objects */
871 obj1 = vm_object_allocate(0x100000);
872 obj2 = vm_object_allocate(0x100000);
873 obj3 = vm_object_allocate(0x100000);
874 obj4 = vm_object_allocate(0x100000);
875
876 /* map objects in 4k map */
877 vm_object_reference(obj1);
878 addr4k = 0x1000;
879 size4k = 0x3000;
880 kr = vm_map_enter(map4k, &addr4k, size4k, 0,
881 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(), obj1, 0,
882 FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
883 VM_INHERIT_DEFAULT);
884 assert(kr == KERN_SUCCESS);
885 assert(addr4k == 0x1000);
886
887 /* map objects in 16k map */
888 vm_object_reference(obj1);
889 addr16k = 0x4000;
890 size16k = 0x8000;
891 kr = vm_map_enter(map16k, &addr16k, size16k, 0,
892 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(), obj1, 0,
893 FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
894 VM_INHERIT_DEFAULT);
895 assert(kr == KERN_SUCCESS);
896 assert(addr16k == 0x4000);
897
898 /* test for <rdar://60959809> */
899 ipc_port_t mem_entry;
900 memory_object_size_t mem_entry_size;
901 mach_vm_size_t map_size;
902 mem_entry_size = 0x1002;
903 mem_entry = IPC_PORT_NULL;
904 kr = mach_make_memory_entry_64(map16k, &mem_entry_size, addr16k + 0x2fff,
905 MAP_MEM_VM_SHARE | MAP_MEM_USE_DATA_ADDR | VM_PROT_READ,
906 &mem_entry, IPC_PORT_NULL);
907 assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr);
908 assertf(mem_entry_size == 0x5001, "mem_entry_size 0x%llx\n", (uint64_t) mem_entry_size);
909 map_size = 0;
910 kr = mach_memory_entry_map_size(mem_entry, map4k, 0, 0x1002, &map_size);
911 assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr);
912 assertf(map_size == 0x3000, "mem_entry %p map_size 0x%llx\n", mem_entry, (uint64_t)map_size);
913 mach_memory_entry_port_release(mem_entry);
914
915 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
916 vmk_flags.vmkf_remap_legacy_mode = true;
917
918 /* create 4k copy map */
919 curprot = VM_PROT_NONE;
920 maxprot = VM_PROT_NONE;
921 kr = vm_map_copy_extract(map4k, addr4k, 0x3000,
922 FALSE, ©4k, &curprot, &maxprot,
923 VM_INHERIT_DEFAULT, vmk_flags);
924 assert(kr == KERN_SUCCESS);
925 assert(copy4k->size == 0x3000);
926
927 /* create 16k copy map */
928 curprot = VM_PROT_NONE;
929 maxprot = VM_PROT_NONE;
930 kr = vm_map_copy_extract(map16k, addr16k, 0x4000,
931 FALSE, ©16k, &curprot, &maxprot,
932 VM_INHERIT_DEFAULT, vmk_flags);
933 assert(kr == KERN_SUCCESS);
934 assert(copy16k->size == 0x4000);
935
936 /* test each combination */
937 // vm_test_map_copy_adjust_to_target_one(copy4k, map4k);
938 // vm_test_map_copy_adjust_to_target_one(copy16k, map16k);
939 // vm_test_map_copy_adjust_to_target_one(copy4k, map16k);
940 vm_test_map_copy_adjust_to_target_one(copy16k, map4k);
941
942 /* assert 1 ref on 4k map */
943 assert(os_ref_get_count_raw(&map4k->map_refcnt) == 1);
944 /* release 4k map */
945 vm_map_deallocate(map4k);
946 /* assert 1 ref on 16k map */
947 assert(os_ref_get_count_raw(&map16k->map_refcnt) == 1);
948 /* release 16k map */
949 vm_map_deallocate(map16k);
950 /* deallocate copy maps */
951 vm_map_copy_discard(copy4k);
952 vm_map_copy_discard(copy16k);
953 /* assert 1 ref on all VM objects */
954 assert(os_ref_get_count_raw(&obj1->ref_count) == 1);
955 assert(os_ref_get_count_raw(&obj2->ref_count) == 1);
956 assert(os_ref_get_count_raw(&obj3->ref_count) == 1);
957 assert(os_ref_get_count_raw(&obj4->ref_count) == 1);
958 /* release all VM objects */
959 vm_object_deallocate(obj1);
960 vm_object_deallocate(obj2);
961 vm_object_deallocate(obj3);
962 vm_object_deallocate(obj4);
963 }
964 #endif /* MACH_ASSERT */
965
966 #if __arm64__ && !KASAN
967 __attribute__((noinline))
968 static void
vm_test_per_mapping_internal_accounting(void)969 vm_test_per_mapping_internal_accounting(void)
970 {
971 ledger_t ledger;
972 pmap_t user_pmap;
973 vm_map_t user_map;
974 kern_return_t kr;
975 ledger_amount_t balance;
976 mach_vm_address_t user_addr, user_remap;
977 vm_map_offset_t device_addr;
978 mach_vm_size_t user_size;
979 vm_prot_t cur_prot, max_prot;
980 upl_size_t upl_size;
981 upl_t upl;
982 unsigned int upl_count;
983 upl_control_flags_t upl_flags;
984 upl_page_info_t *pl;
985 ppnum_t ppnum;
986 vm_object_t device_object;
987 vm_map_offset_t map_start, map_end;
988 int pmap_flags;
989
990 pmap_flags = 0;
991 if (sizeof(vm_map_offset_t) == 4) {
992 map_start = 0x100000000ULL;
993 map_end = 0x200000000ULL;
994 pmap_flags |= PMAP_CREATE_64BIT;
995 } else {
996 map_start = 0x10000000;
997 map_end = 0x20000000;
998 }
999 /* create a user address space */
1000 ledger = ledger_instantiate(task_ledger_template,
1001 LEDGER_CREATE_ACTIVE_ENTRIES);
1002 assert(ledger);
1003 user_pmap = pmap_create_options(ledger, 0, pmap_flags);
1004 assert(user_pmap);
1005 user_map = vm_map_create(user_pmap,
1006 map_start,
1007 map_end,
1008 TRUE);
1009 assert(user_map);
1010 /* check ledger */
1011 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1012 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1013 assertf(balance == 0, "balance=0x%llx", balance);
1014 /* allocate 1 page in that address space */
1015 user_addr = 0;
1016 user_size = PAGE_SIZE;
1017 kr = mach_vm_allocate(user_map,
1018 &user_addr,
1019 user_size,
1020 VM_FLAGS_ANYWHERE);
1021 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1022 /* check ledger */
1023 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1024 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1025 assertf(balance == 0, "balance=0x%llx", balance);
1026 /* remap the original mapping */
1027 user_remap = 0;
1028 kr = mach_vm_remap(user_map,
1029 &user_remap,
1030 PAGE_SIZE,
1031 0,
1032 VM_FLAGS_ANYWHERE,
1033 user_map,
1034 user_addr,
1035 FALSE, /* copy */
1036 &cur_prot,
1037 &max_prot,
1038 VM_INHERIT_DEFAULT);
1039 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1040 /* check ledger */
1041 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1042 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1043 assertf(balance == 0, "balance=0x%llx", balance);
1044 /* create a UPL from the original mapping */
1045 upl_size = PAGE_SIZE;
1046 upl = NULL;
1047 upl_count = 0;
1048 upl_flags = UPL_FILE_IO | UPL_NO_SYNC | UPL_SET_INTERNAL | UPL_SET_LITE | UPL_SET_IO_WIRE;
1049 kr = vm_map_create_upl(user_map,
1050 (vm_map_offset_t)user_addr,
1051 &upl_size,
1052 &upl,
1053 NULL,
1054 &upl_count,
1055 &upl_flags,
1056 VM_KERN_MEMORY_DIAG);
1057 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1058 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1059 assert(upl_page_present(pl, 0));
1060 ppnum = upl_phys_page(pl, 0);
1061 /* check ledger */
1062 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1063 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1064 assertf(balance == 0, "balance=0x%llx", balance);
1065 device_object = vm_object_allocate(PAGE_SIZE);
1066 assert(device_object);
1067 vm_object_lock(device_object);
1068 VM_OBJECT_SET_PRIVATE(device_object, TRUE);
1069 VM_OBJECT_SET_PHYS_CONTIGUOUS(device_object, TRUE);
1070 device_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
1071 vm_object_unlock(device_object);
1072 kr = vm_object_populate_with_private(device_object, 0,
1073 ppnum, PAGE_SIZE);
1074 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1075
1076 /* check ledger */
1077 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1078 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1079 assertf(balance == 0, "balance=0x%llx", balance);
1080 /* deallocate the original mapping */
1081 kr = mach_vm_deallocate(user_map, user_addr, PAGE_SIZE);
1082 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1083 /* map the device_object in the kernel */
1084 device_addr = 0;
1085 vm_object_reference(device_object);
1086 kr = vm_map_enter(kernel_map,
1087 &device_addr,
1088 PAGE_SIZE,
1089 0,
1090 VM_MAP_KERNEL_FLAGS_DATA_ANYWHERE(),
1091 device_object,
1092 0,
1093 FALSE, /* copy */
1094 VM_PROT_DEFAULT,
1095 VM_PROT_DEFAULT,
1096 VM_INHERIT_NONE);
1097 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1098 /* access the device pager mapping */
1099 *(char *)device_addr = 'x';
1100 printf("%s:%d 0x%llx: 0x%x\n", __FUNCTION__, __LINE__, (uint64_t)device_addr, *(uint32_t *)device_addr);
1101 /* check ledger */
1102 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1103 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1104 assertf(balance == 0, "balance=0x%llx", balance);
1105 /* fault in the remap addr */
1106 kr = vm_fault(user_map, (vm_map_offset_t)user_remap, VM_PROT_READ,
1107 FALSE, 0, TRUE, NULL, 0);
1108 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1109 /* check ledger */
1110 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1111 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1112 assertf(balance == PAGE_SIZE, "balance=0x%llx", balance);
1113 /* deallocate remapping */
1114 kr = mach_vm_deallocate(user_map, user_remap, PAGE_SIZE);
1115 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1116 /* check ledger */
1117 kr = ledger_get_balance(ledger, task_ledgers.internal, &balance);
1118 assertf(kr == KERN_SUCCESS, "kr=0x%x", kr);
1119 assertf(balance == 0, "balance=0x%llx", balance);
1120 /* TODO: cleanup... */
1121 printf("%s:%d PASS\n", __FUNCTION__, __LINE__);
1122 }
1123 #endif /* __arm64__ && !KASAN */
1124
1125 static void
vm_test_kernel_tag_accounting_kma(kma_flags_t base,kma_flags_t bit)1126 vm_test_kernel_tag_accounting_kma(kma_flags_t base, kma_flags_t bit)
1127 {
1128 vm_tag_t tag = VM_KERN_MEMORY_REASON; /* unused during POST */
1129 uint64_t init_size = vm_tag_get_size(tag);
1130 __assert_only uint64_t final_size = init_size + PAGE_SIZE;
1131 vm_address_t address;
1132 kern_return_t kr;
1133
1134 /*
1135 * Test the matrix of:
1136 * - born with or without bit
1137 * - bit flipped or not
1138 * - dies with or without bit
1139 */
1140 for (uint32_t i = 0; i < 4; i++) {
1141 kma_flags_t flags1 = base | ((i & 1) ? bit : KMA_NONE);
1142 kma_flags_t flags2 = base | ((i & 2) ? bit : KMA_NONE);
1143
1144 kr = kmem_alloc(kernel_map, &address, PAGE_SIZE, flags1, tag);
1145 assert3u(kr, ==, KERN_SUCCESS);
1146
1147 if (flags1 & (KMA_VAONLY | KMA_PAGEABLE)) {
1148 assert3u(init_size, ==, vm_tag_get_size(tag));
1149 } else {
1150 assert3u(final_size, ==, vm_tag_get_size(tag));
1151 }
1152
1153 if ((flags1 ^ flags2) == KMA_VAONLY) {
1154 if (flags1 & KMA_VAONLY) {
1155 kernel_memory_populate(address, PAGE_SIZE,
1156 KMA_KOBJECT | KMA_NOFAIL, tag);
1157 } else {
1158 kernel_memory_depopulate(address, PAGE_SIZE,
1159 KMA_KOBJECT, tag);
1160 }
1161 }
1162
1163 if ((flags1 ^ flags2) == KMA_PAGEABLE) {
1164 if (flags1 & KMA_PAGEABLE) {
1165 kr = vm_map_wire_kernel(kernel_map,
1166 address, address + PAGE_SIZE,
1167 VM_PROT_DEFAULT, tag, false);
1168 assert3u(kr, ==, KERN_SUCCESS);
1169 } else {
1170 kr = vm_map_unwire(kernel_map,
1171 address, address + PAGE_SIZE, false);
1172 assert3u(kr, ==, KERN_SUCCESS);
1173 }
1174 }
1175
1176 if (flags2 & (KMA_VAONLY | KMA_PAGEABLE)) {
1177 assert3u(init_size, ==, vm_tag_get_size(tag));
1178 } else {
1179 assert3u(final_size, ==, vm_tag_get_size(tag));
1180 }
1181
1182 kmem_free(kernel_map, address, PAGE_SIZE);
1183 assert3u(init_size, ==, vm_tag_get_size(tag));
1184 }
1185 }
1186
1187 __attribute__((noinline))
1188 static void
vm_test_kernel_tag_accounting(void)1189 vm_test_kernel_tag_accounting(void)
1190 {
1191 printf("%s: test running\n", __func__);
1192
1193 printf("%s: account (KMA_KOBJECT + populate)...\n", __func__);
1194 vm_test_kernel_tag_accounting_kma(KMA_KOBJECT, KMA_VAONLY);
1195 printf("%s: PASS\n", __func__);
1196
1197 printf("%s: account (regular object + wiring)...\n", __func__);
1198 vm_test_kernel_tag_accounting_kma(KMA_NONE, KMA_PAGEABLE);
1199 printf("%s: PASS\n", __func__);
1200
1201 printf("%s: test passed\n", __func__);
1202
1203 #undef if_bit
1204 }
1205
1206 __attribute__((noinline))
1207 static void
vm_test_collapse_overflow(void)1208 vm_test_collapse_overflow(void)
1209 {
1210 vm_object_t object, backing_object;
1211 vm_object_size_t size;
1212 vm_page_t m;
1213
1214 /* create an object for which (int)(size>>PAGE_SHIFT) = 0 */
1215 size = 0x400000000000ULL;
1216 assert((int)(size >> PAGE_SHIFT) == 0);
1217 backing_object = vm_object_allocate(size + PAGE_SIZE);
1218 assert(backing_object);
1219 vm_object_reference(backing_object);
1220 /* insert a page */
1221 m = VM_PAGE_NULL;
1222 while (m == VM_PAGE_NULL) {
1223 m = vm_page_grab();
1224 if (m == VM_PAGE_NULL) {
1225 VM_PAGE_WAIT();
1226 }
1227 }
1228 assert(m);
1229 vm_object_lock(backing_object);
1230 vm_page_insert(m, backing_object, 0);
1231 vm_object_unlock(backing_object);
1232 /* make it back another object */
1233 object = vm_object_allocate(size);
1234 assert(object);
1235 vm_object_reference(object);
1236 object->shadow = backing_object;
1237 vm_object_reference(backing_object);
1238 /* trigger a bypass */
1239 vm_object_lock(object);
1240 vm_object_collapse(object, 0, TRUE);
1241 /* check that it did not bypass the backing object */
1242 if (object->shadow != backing_object) {
1243 panic("%s:%d FAIL\n", __FUNCTION__, __LINE__);
1244 }
1245 vm_object_unlock(object);
1246
1247 /* remove the page from the backing object */
1248 vm_object_lock(backing_object);
1249 vm_page_remove(m, TRUE);
1250 vm_object_unlock(backing_object);
1251 /* trigger a bypass */
1252 vm_object_lock(object);
1253 vm_object_collapse(object, 0, TRUE);
1254 /* check that it did bypass the backing object */
1255 if (object->shadow == backing_object) {
1256 panic("%s:%d FAIL\n", __FUNCTION__, __LINE__);
1257 }
1258 vm_page_insert(m, object, 0);
1259 vm_object_unlock(object);
1260
1261 /* cleanup */
1262 vm_object_deallocate(object);
1263 /* "backing_object" already lost its reference during the bypass */
1264 // vm_object_deallocate(backing_object);
1265
1266 printf("%s:%d PASS\n", __FUNCTION__, __LINE__);
1267 }
1268
1269 __attribute__((noinline))
1270 static void
vm_test_physical_size_overflow(void)1271 vm_test_physical_size_overflow(void)
1272 {
1273 vm_map_address_t start;
1274 mach_vm_size_t size;
1275 kern_return_t kr;
1276 mach_vm_size_t phys_size;
1277 bool fail;
1278 int failures = 0;
1279
1280 /* size == 0 */
1281 start = 0x100000;
1282 size = 0x0;
1283 kr = vm_map_range_physical_size(kernel_map,
1284 start,
1285 size,
1286 &phys_size);
1287 fail = (kr != KERN_SUCCESS || phys_size != 0);
1288 printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1289 __FUNCTION__, __LINE__,
1290 (fail ? "FAIL" : "PASS"),
1291 (uint64_t)start, size, kr, phys_size);
1292 failures += fail;
1293
1294 /* plain wraparound */
1295 start = 0x100000;
1296 size = 0xffffffffffffffff - 0x10000;
1297 kr = vm_map_range_physical_size(kernel_map,
1298 start,
1299 size,
1300 &phys_size);
1301 fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1302 printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1303 __FUNCTION__, __LINE__,
1304 (fail ? "FAIL" : "PASS"),
1305 (uint64_t)start, size, kr, phys_size);
1306 failures += fail;
1307
1308 /* wraparound after rounding */
1309 start = 0xffffffffffffff00;
1310 size = 0xf0;
1311 kr = vm_map_range_physical_size(kernel_map,
1312 start,
1313 size,
1314 &phys_size);
1315 fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1316 printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1317 __FUNCTION__, __LINE__,
1318 (fail ? "FAIL" : "PASS"),
1319 (uint64_t)start, size, kr, phys_size);
1320 failures += fail;
1321
1322 /* wraparound to start after rounding */
1323 start = 0x100000;
1324 size = 0xffffffffffffffff;
1325 kr = vm_map_range_physical_size(kernel_map,
1326 start,
1327 size,
1328 &phys_size);
1329 fail = (kr != KERN_INVALID_ARGUMENT || phys_size != 0);
1330 printf("%s:%d %s start=0x%llx size=0x%llx -> kr=%d phys_size=0x%llx\n",
1331 __FUNCTION__, __LINE__,
1332 (fail ? "FAIL" : "PASS"),
1333 (uint64_t)start, size, kr, phys_size);
1334 failures += fail;
1335
1336 if (failures) {
1337 panic("%s: FAIL (failures=%d)", __FUNCTION__, failures);
1338 }
1339 printf("%s: PASS\n", __FUNCTION__);
1340 }
1341
1342 #define PTR_UPPER_SHIFT 60
1343 #define PTR_TAG_SHIFT 56
1344 #define PTR_BITS_MASK (((1ULL << PTR_TAG_SHIFT) - 1) | (0xfULL << PTR_UPPER_SHIFT))
1345
1346 static inline vm_map_t
1347 create_map(mach_vm_address_t map_start, mach_vm_address_t map_end);
1348 static inline void
1349 cleanup_map(vm_map_t *map);
1350
1351 __attribute__((noinline))
1352 static void
vm_test_address_canonicalization(void)1353 vm_test_address_canonicalization(void)
1354 {
1355 T_SKIP("System not designed to support this test, skipping...");
1356 }
1357
1358 kern_return_t
vm_tests(void)1359 vm_tests(void)
1360 {
1361 kern_return_t kr = KERN_SUCCESS;
1362
1363 /* Avoid VM panics because some of our test vm_maps don't have a pmap. */
1364 thread_test_context_t ctx CLEANUP_THREAD_TEST_CONTEXT = {
1365 .test_option_vm_map_allow_null_pmap = true,
1366 };
1367 thread_set_test_context(&ctx);
1368
1369 vm_test_collapse_compressor();
1370 vm_test_wire_and_extract();
1371 vm_test_page_wire_overflow_panic();
1372 vm_test_kernel_object_fault();
1373 vm_test_device_pager_transpose();
1374 #if MACH_ASSERT
1375 vm_test_map_copy_adjust_to_target();
1376 #endif /* MACH_ASSERT */
1377 #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT
1378 vm_test_4k();
1379 #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */
1380 #if __arm64__ && !KASAN
1381 vm_test_per_mapping_internal_accounting();
1382 #endif /* __arm64__ && !KASAN */
1383 vm_test_kernel_tag_accounting();
1384 vm_test_collapse_overflow();
1385 vm_test_physical_size_overflow();
1386 vm_test_address_canonicalization();
1387
1388 return kr;
1389 }
1390
1391 /*
1392 * Checks that vm_map_delete() can deal with map unaligned entries.
1393 * rdar://88969652
1394 */
1395 static int
vm_map_non_aligned_test(__unused int64_t in,int64_t * out)1396 vm_map_non_aligned_test(__unused int64_t in, int64_t *out)
1397 {
1398 vm_map_t map = current_map();
1399 mach_vm_size_t size = 2 * VM_MAP_PAGE_SIZE(map);
1400 mach_vm_address_t addr;
1401 vm_map_entry_t entry;
1402 kern_return_t kr;
1403
1404 if (VM_MAP_PAGE_SHIFT(map) > PAGE_SHIFT) {
1405 kr = mach_vm_allocate(map, &addr, size, VM_FLAGS_ANYWHERE);
1406 if (kr != KERN_SUCCESS) {
1407 return ENOMEM;
1408 }
1409
1410 vm_map_lock(map);
1411 if (!vm_map_lookup_entry(map, addr, &entry)) {
1412 panic("couldn't find the entry we just made: "
1413 "map:%p addr:0x%0llx", map, addr);
1414 }
1415
1416 /*
1417 * Now break the entry into:
1418 * 2 * 4k
1419 * 2 * 4k
1420 * 1 * 16k
1421 */
1422 vm_map_clip_end(map, entry, addr + VM_MAP_PAGE_SIZE(map));
1423 entry->map_aligned = FALSE;
1424 vm_map_clip_end(map, entry, addr + PAGE_SIZE * 2);
1425 vm_map_unlock(map);
1426
1427 kr = mach_vm_deallocate(map, addr, size);
1428 assert(kr == KERN_SUCCESS);
1429 }
1430
1431 *out = 1;
1432 return 0;
1433 }
1434 SYSCTL_TEST_REGISTER(vm_map_non_aligned, vm_map_non_aligned_test);
1435
1436 static inline vm_map_t
create_map(mach_vm_address_t map_start,mach_vm_address_t map_end)1437 create_map(mach_vm_address_t map_start, mach_vm_address_t map_end)
1438 {
1439 ledger_t ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
1440 pmap_t pmap = pmap_create_options(ledger, 0, PMAP_CREATE_64BIT);
1441 assert(pmap);
1442 ledger_dereference(ledger); // now retained by pmap
1443 vm_map_t map = vm_map_create_options(pmap, map_start, map_end, VM_MAP_CREATE_PAGEABLE);//vm_compute_max_offset
1444 assert(map);
1445
1446 return map;
1447 }
1448
1449 static inline void
cleanup_map(vm_map_t * map)1450 cleanup_map(vm_map_t *map)
1451 {
1452 assert(*map);
1453 kern_return_t kr = vm_map_terminate(*map);
1454 assert(kr == 0);
1455 vm_map_deallocate(*map); // also destroys pmap
1456 }
1457
1458 kern_return_t
1459 mach_vm_remap_new_external(
1460 vm_map_t target_map,
1461 mach_vm_offset_ut *address,
1462 mach_vm_size_ut size,
1463 mach_vm_offset_ut mask,
1464 int flags,
1465 mach_port_t src_tport,
1466 mach_vm_offset_ut memory_address,
1467 boolean_t copy,
1468 vm_prot_ut *cur_protection_u,
1469 vm_prot_ut *max_protection_u,
1470 vm_inherit_ut inheritance);
1471 kern_return_t
1472 vm_remap_new_external(
1473 vm_map_t target_map,
1474 vm_offset_ut *address,
1475 vm_size_ut size,
1476 vm_offset_ut mask,
1477 int flags,
1478 mach_port_t src_tport,
1479 vm_offset_ut memory_address,
1480 boolean_t copy,
1481 vm_prot_ut *cur_protection,
1482 vm_prot_ut *max_protection,
1483 vm_inherit_ut inheritance);
1484 kern_return_t
1485 mach_vm_remap_external(
1486 vm_map_t target_map,
1487 mach_vm_offset_ut *address,
1488 mach_vm_size_ut size,
1489 mach_vm_offset_ut mask,
1490 int flags,
1491 vm_map_t src_map,
1492 mach_vm_offset_ut memory_address,
1493 boolean_t copy,
1494 vm_prot_ut *cur_protection,
1495 vm_prot_ut *max_protection,
1496 vm_inherit_ut inheritance);
1497 kern_return_t
1498 mach_vm_map_external(
1499 vm_map_t target_map,
1500 mach_vm_offset_ut *address,
1501 mach_vm_size_ut initial_size,
1502 mach_vm_offset_ut mask,
1503 int flags,
1504 ipc_port_t port,
1505 memory_object_offset_ut offset,
1506 boolean_t copy,
1507 vm_prot_ut cur_protection,
1508 vm_prot_ut max_protection,
1509 vm_inherit_ut inheritance);
1510 kern_return_t
1511 mach_vm_wire_external(
1512 host_priv_t host_priv,
1513 vm_map_t map,
1514 mach_vm_address_ut start,
1515 mach_vm_size_ut size,
1516 vm_prot_ut access);
1517 kern_return_t
1518 mach_vm_purgable_control_external(
1519 mach_port_t target_tport,
1520 mach_vm_offset_ut address_u,
1521 vm_purgable_t control,
1522 int *state);
1523 kern_return_t
1524 vm_purgable_control_external(
1525 mach_port_t target_tport,
1526 vm_offset_ut address,
1527 vm_purgable_t control,
1528 int *state);
1529
1530 static int
vm_map_null_tests(__unused int64_t in,int64_t * out)1531 vm_map_null_tests(__unused int64_t in, int64_t *out)
1532 {
1533 kern_return_t kr;
1534
1535 mach_vm_address_t alloced_addr, throwaway_addr;
1536 mach_vm_address_ut throwaway_addr_ut;
1537 vm_address_t vm_throwaway_addr;
1538 vm_address_ut vm_throwaway_addr_ut;
1539 vm32_address_ut alloced_addr32, throwaway_addr32_u;
1540 mach_vm_size_t throwaway_size, size_16kb, read_overwrite_data_size;
1541 vm_size_t vm_size, vm_read_overwrite_data_size, vm_throwaway_size;
1542 vm_size_ut throwaway_size_ut;
1543 vm32_size_t data_size32, size32_16kb;
1544 vm32_size_ut data_size32_u, throwaway_size32_u;
1545 mach_msg_type_number_t read_data_size;
1546 mach_port_t mem_entry_result;
1547 pointer_t read_data;
1548 pointer_ut read_data_u;
1549 vm_prot_t prot_default;
1550 vm_prot_ut prot_allexec_u, prot_default_ut;
1551 vm_map_t map64, map32;
1552 vm_machine_attribute_val_t vm_throwaway_attr_val;
1553 vm_region_extended_info_data_t vm_throwaway_region_extended_info;
1554 vm_region_recurse_info_t vm_throwaway_region_recurse_info;
1555 vm_region_recurse_info_64_t vm_throwaway_region_recurse_info_64;
1556 int throwaway_state;
1557 uint32_t throwaway_depth;
1558 vm_page_info_t page_info;
1559
1560 page_info = 0;
1561 throwaway_state = VM_PURGABLE_STATE_MAX;
1562 vm_throwaway_region_recurse_info_64 = 0;
1563 vm_throwaway_region_recurse_info = 0;
1564 vm_throwaway_attr_val = MATTR_VAL_OFF;
1565
1566 map64 = create_map(0, vm_compute_max_offset(true));
1567 map32 = create_map(0, vm_compute_max_offset(false));
1568
1569 prot_allexec_u = vm_sanitize_wrap_prot(VM_PROT_ALLEXEC);
1570 prot_default_ut = vm_sanitize_wrap_prot(VM_PROT_DEFAULT);
1571 prot_default = VM_PROT_DEFAULT;
1572
1573 size_16kb = 16 * 1024;
1574 size32_16kb = (vm32_size_t) size_16kb;
1575
1576 /*
1577 * Allocate some address in the map, just so we can pass a valid looking address to functions so they don't
1578 * return before checking VM_MAP_NULL
1579 */
1580 kr = mach_vm_allocate(map64, &alloced_addr, size_16kb, VM_FLAGS_ANYWHERE);
1581 assert(kr == KERN_SUCCESS);
1582 kr = vm32_vm_allocate(map32, &alloced_addr32, size32_16kb, VM_FLAGS_ANYWHERE);
1583 assert(kr == KERN_SUCCESS);
1584
1585 /*
1586 * Call a bunch of MIG entrypoints with VM_MAP_NULL. The goal is to verify they check map != VM_MAP_NULL.
1587 * There are no requirements put on the return, so don't assert kr. Just verify no crash occurs.
1588 */
1589 throwaway_size = size_16kb;
1590 kr = _mach_make_memory_entry(VM_MAP_NULL, &throwaway_size, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1591 assert(kr != KERN_SUCCESS);
1592 throwaway_size32_u = vm32_sanitize_wrap_size(size32_16kb);
1593 kr = vm32_mach_make_memory_entry(VM_MAP_NULL, &throwaway_size32_u, alloced_addr32, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1594 assert(kr != KERN_SUCCESS);
1595 throwaway_size_ut = vm_sanitize_wrap_size(size_16kb);
1596 kr = vm32_mach_make_memory_entry_64(VM_MAP_NULL, &throwaway_size_ut, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1597 assert(kr != KERN_SUCCESS);
1598 throwaway_size = size_16kb;
1599 kr = mach_make_memory_entry_64(VM_MAP_NULL, &throwaway_size, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1600 assert(kr != KERN_SUCCESS);
1601 vm_size = size_16kb;
1602 kr = mach_make_memory_entry(VM_MAP_NULL, &vm_size, alloced_addr, VM_PROT_DEFAULT, &mem_entry_result, IPC_PORT_NULL);
1603 assert(kr != KERN_SUCCESS);
1604
1605 kr = mach_memory_object_memory_entry(HOST_NULL, true, size_16kb, VM_PROT_DEFAULT, MEMORY_OBJECT_NULL, &mem_entry_result);
1606 assert(kr != KERN_SUCCESS);
1607 kr = mach_memory_object_memory_entry_64(HOST_NULL, true, size_16kb, VM_PROT_DEFAULT, MEMORY_OBJECT_NULL, &mem_entry_result);
1608 assert(kr != KERN_SUCCESS);
1609
1610 throwaway_addr = alloced_addr;
1611 kr = mach_vm_allocate(VM_MAP_NULL, &throwaway_addr, size_16kb, VM_FLAGS_ANYWHERE);
1612 assert(kr != KERN_SUCCESS);
1613 throwaway_addr32_u = alloced_addr32;
1614 kr = vm32_vm_allocate(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, VM_FLAGS_ANYWHERE);
1615 assert(kr != KERN_SUCCESS);
1616 kr = vm_allocate_external(VM_MAP_NULL, &vm_throwaway_addr, size_16kb, VM_FLAGS_ANYWHERE);
1617 assert(kr != KERN_SUCCESS);
1618
1619 kr = mach_vm_deallocate(VM_MAP_NULL, alloced_addr, size_16kb);
1620 assert(kr != KERN_SUCCESS);
1621 kr = vm_deallocate(VM_MAP_NULL, alloced_addr, size_16kb);
1622 assert(kr != KERN_SUCCESS);
1623 kr = vm32_vm_deallocate(VM_MAP_NULL, throwaway_addr32_u, size32_16kb);
1624 assert(kr != KERN_SUCCESS);
1625
1626 kr = mach_vm_map(VM_MAP_NULL, &throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1627 assert(kr != KERN_SUCCESS);
1628 kr = mach_vm_map_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1629 assert(kr != KERN_SUCCESS);
1630
1631 vm_throwaway_addr = alloced_addr;
1632 kr = vm_map(VM_MAP_NULL, &vm_throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1633 assert(kr != KERN_SUCCESS);
1634 kr = vm32_vm_map(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1635 assert(kr != KERN_SUCCESS);
1636 kr = vm32_vm_map_64(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, IPC_PORT_NULL, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1637 assert(kr != KERN_SUCCESS);
1638
1639 kr = mach_vm_remap(map64, &throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1640 assert(kr != KERN_SUCCESS);
1641 kr = mach_vm_remap(VM_MAP_NULL, &throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, map64, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1642 assert(kr != KERN_SUCCESS);
1643 kr = mach_vm_remap_external(map64, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1644 assert(kr != KERN_SUCCESS);
1645 kr = mach_vm_remap_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, map64, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1646 assert(kr != KERN_SUCCESS);
1647 kr = vm_remap_external(map64, &vm_throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1648 assert(kr != KERN_SUCCESS);
1649 kr = vm_remap_external(VM_MAP_NULL, &vm_throwaway_addr, size_16kb, 0, VM_FLAGS_ANYWHERE, map64, 0, false, &prot_default, &prot_default, VM_INHERIT_DEFAULT);
1650 assert(kr != KERN_SUCCESS);
1651 kr = vm32_vm_remap(map32, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, VM_MAP_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1652 assert(kr != KERN_SUCCESS);
1653 kr = vm32_vm_remap(VM_MAP_NULL, &throwaway_addr32_u, size32_16kb, 0, VM_FLAGS_ANYWHERE, map32, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1654 assert(kr != KERN_SUCCESS);
1655
1656 kr = mach_vm_remap_new_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1657 assert(kr != KERN_SUCCESS);
1658 kr = mach_vm_remap_new_external(map64, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1659 assert(kr != KERN_SUCCESS);
1660
1661 kr = mach_vm_remap_new_external(VM_MAP_NULL, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_allexec_u, &prot_allexec_u, VM_INHERIT_DEFAULT);
1662 assert(kr != KERN_SUCCESS);
1663 kr = mach_vm_remap_new_external(map64, &throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_allexec_u, &prot_allexec_u, VM_INHERIT_DEFAULT);
1664 assert(kr != KERN_SUCCESS);
1665
1666 kr = vm_remap_new_external(VM_MAP_NULL, &vm_throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1667 assert(kr != KERN_SUCCESS);
1668 kr = vm_remap_new_external(map64, &vm_throwaway_addr_ut, size_16kb, 0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL, 0, false, &prot_default_ut, &prot_default_ut, VM_INHERIT_DEFAULT);
1669 assert(kr != KERN_SUCCESS);
1670
1671 kr = mach_vm_wire_external(host_priv_self(), VM_MAP_NULL, throwaway_addr_ut, size_16kb, VM_PROT_DEFAULT);
1672 assert(kr != KERN_SUCCESS);
1673 kr = mach_vm_wire_external(HOST_PRIV_NULL, map64, throwaway_addr_ut, size_16kb, VM_PROT_DEFAULT);
1674 assert(kr != KERN_SUCCESS);
1675
1676 kr = vm_wire(host_priv_self(), VM_MAP_NULL, throwaway_addr, size_16kb, VM_PROT_DEFAULT);
1677 assert(kr != KERN_SUCCESS);
1678 kr = vm_wire(HOST_PRIV_NULL, map64, throwaway_addr, size_16kb, VM_PROT_DEFAULT);
1679 assert(kr != KERN_SUCCESS);
1680
1681 kr = task_wire(VM_MAP_NULL, false);
1682 assert(kr != KERN_SUCCESS);
1683 kr = vm32_task_wire(VM_MAP_NULL, false);
1684 assert(kr != KERN_SUCCESS);
1685
1686 kr = mach_vm_read(VM_MAP_NULL, alloced_addr, size_16kb, &read_data, &read_data_size);
1687 assert(kr != KERN_SUCCESS);
1688 kr = vm_read(VM_MAP_NULL, alloced_addr, size_16kb, &read_data, &read_data_size);
1689 assert(kr != KERN_SUCCESS);
1690 kr = vm32_vm_read(VM_MAP_NULL, alloced_addr32, size32_16kb, &read_data_u, &data_size32);
1691 assert(kr != KERN_SUCCESS);
1692
1693 mach_vm_read_entry_t * mach_re = kalloc_type(mach_vm_read_entry_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1694 (*mach_re)[0].address = alloced_addr;
1695 (*mach_re)[0].size = size_16kb;
1696
1697 vm_read_entry_t * re = kalloc_type(vm_read_entry_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1698 (*re)[0].address = alloced_addr;
1699 (*re)[0].size = (vm_size_t) size_16kb;
1700
1701 vm32_read_entry_t * re_32 = kalloc_type(vm32_read_entry_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1702 (*re_32)[0].address = (vm32_address_t) alloced_addr;
1703 (*re_32)[0].size = (vm32_size_t) size_16kb;
1704
1705 kr = mach_vm_read_list(VM_MAP_NULL, *mach_re, 1);
1706 assert(kr != KERN_SUCCESS);
1707 kr = vm_read_list(VM_MAP_NULL, *re, 1);
1708 assert(kr != KERN_SUCCESS);
1709 kr = vm32_vm_read_list(VM_MAP_NULL, *re_32, 1);
1710 assert(kr != KERN_SUCCESS);
1711
1712 kfree_type(mach_vm_read_entry_t, mach_re);
1713 kfree_type(vm_read_entry_t, re);
1714 kfree_type(vm32_read_entry_t, re_32);
1715
1716 kr = mach_vm_read_overwrite(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr, &read_overwrite_data_size);
1717 assert(kr != KERN_SUCCESS);
1718 kr = vm_read_overwrite(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr, &vm_read_overwrite_data_size);
1719 assert(kr != KERN_SUCCESS);
1720 kr = vm32_vm_read_overwrite(VM_MAP_NULL, alloced_addr32, size32_16kb, alloced_addr32, &data_size32_u);
1721 assert(kr != KERN_SUCCESS);
1722
1723 kr = mach_vm_copy(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr);
1724 assert(kr != KERN_SUCCESS);
1725 kr = vm_copy(VM_MAP_NULL, alloced_addr, size_16kb, alloced_addr);
1726 assert(kr != KERN_SUCCESS);
1727 kr = vm32_vm_copy(VM_MAP_NULL, alloced_addr32, size32_16kb, alloced_addr32);
1728 assert(kr != KERN_SUCCESS);
1729
1730 kr = mach_vm_write(VM_MAP_NULL, alloced_addr, alloced_addr, (mach_msg_type_number_t) size_16kb);
1731 assert(kr != KERN_SUCCESS);
1732 kr = vm_write(VM_MAP_NULL, alloced_addr, alloced_addr, (mach_msg_type_number_t) size_16kb);
1733 assert(kr != KERN_SUCCESS);
1734 kr = vm32_vm_write(VM_MAP_NULL, alloced_addr32, alloced_addr, (mach_msg_type_number_t) size_16kb);
1735 assert(kr != KERN_SUCCESS);
1736
1737 kr = mach_vm_inherit(VM_MAP_NULL, alloced_addr, size_16kb, VM_INHERIT_DEFAULT);
1738 assert(kr != KERN_SUCCESS);
1739 kr = vm_inherit(VM_MAP_NULL, alloced_addr, size_16kb, VM_INHERIT_DEFAULT);
1740 assert(kr != KERN_SUCCESS);
1741 kr = vm32_vm_inherit(VM_MAP_NULL, alloced_addr32, size32_16kb, VM_INHERIT_DEFAULT);
1742
1743 kr = mach_vm_protect(VM_MAP_NULL, alloced_addr, size_16kb, FALSE, VM_PROT_DEFAULT);
1744 assert(kr != KERN_SUCCESS);
1745 kr = vm_protect(VM_MAP_NULL, alloced_addr, size_16kb, FALSE, VM_PROT_DEFAULT);
1746 assert(kr != KERN_SUCCESS);
1747 kr = vm32_vm_protect(VM_MAP_NULL, alloced_addr32, size32_16kb, FALSE, VM_PROT_DEFAULT);
1748 assert(kr != KERN_SUCCESS);
1749
1750 kr = mach_vm_behavior_set(VM_MAP_NULL, alloced_addr, size_16kb, VM_BEHAVIOR_DEFAULT);
1751 assert(kr != KERN_SUCCESS);
1752 kr = vm_behavior_set(VM_MAP_NULL, alloced_addr, size_16kb, VM_BEHAVIOR_DEFAULT);
1753 assert(kr != KERN_SUCCESS);
1754 kr = vm32_vm_behavior_set(VM_MAP_NULL, alloced_addr32, size32_16kb, VM_BEHAVIOR_DEFAULT);
1755 assert(kr != KERN_SUCCESS);
1756
1757 kr = mach_vm_msync(VM_MAP_NULL, alloced_addr, size_16kb, VM_SYNC_ASYNCHRONOUS);
1758 assert(kr != KERN_SUCCESS);
1759 kr = vm_msync(VM_MAP_NULL, alloced_addr, size_16kb, VM_SYNC_ASYNCHRONOUS);
1760 assert(kr != KERN_SUCCESS);
1761 kr = vm32_vm_msync(VM_MAP_NULL, alloced_addr32, size32_16kb, VM_SYNC_ASYNCHRONOUS);
1762 assert(kr != KERN_SUCCESS);
1763
1764 kr = mach_vm_machine_attribute(VM_MAP_NULL, alloced_addr, size_16kb, MATTR_CACHE, &vm_throwaway_attr_val);
1765 assert(kr != KERN_SUCCESS);
1766 kr = vm_machine_attribute(VM_MAP_NULL, alloced_addr, size_16kb, MATTR_CACHE, &vm_throwaway_attr_val);
1767 assert(kr != KERN_SUCCESS);
1768 kr = vm32_vm_machine_attribute(VM_MAP_NULL, alloced_addr32, size32_16kb, MATTR_CACHE, &vm_throwaway_attr_val);
1769 assert(kr != KERN_SUCCESS);
1770
1771 kr = mach_vm_purgable_control_external(MACH_PORT_NULL, throwaway_addr_ut, VM_PURGABLE_PURGE_ALL, &throwaway_state);
1772 assert(kr != KERN_SUCCESS);
1773 kr = vm_purgable_control_external(MACH_PORT_NULL, throwaway_addr_ut, VM_PURGABLE_PURGE_ALL, &throwaway_state);
1774 assert(kr != KERN_SUCCESS);
1775 kr = vm32_vm_purgable_control(VM_MAP_NULL, alloced_addr32, VM_PURGABLE_PURGE_ALL, &throwaway_state);
1776 assert(kr != KERN_SUCCESS);
1777
1778 kr = mach_vm_region(VM_MAP_NULL, &throwaway_addr, &throwaway_size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1779 assert(kr != KERN_SUCCESS);
1780 kr = vm_region(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1781 assert(kr != KERN_SUCCESS);
1782 kr = vm_region_64(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1783 assert(kr != KERN_SUCCESS);
1784 kr = vm32_vm_region(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1785 assert(kr != KERN_SUCCESS);
1786 kr = vm32_vm_region_64(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&vm_throwaway_region_extended_info, &read_data_size, &mem_entry_result);
1787 assert(kr != KERN_SUCCESS);
1788
1789 kr = mach_vm_region_recurse(VM_MAP_NULL, &throwaway_addr, &throwaway_size, &throwaway_depth, vm_throwaway_region_recurse_info, &read_data_size);
1790 assert(kr != KERN_SUCCESS);
1791 kr = vm_region_recurse(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, &throwaway_depth, vm_throwaway_region_recurse_info, &read_data_size);
1792 assert(kr != KERN_SUCCESS);
1793 kr = vm_region_recurse_64(VM_MAP_NULL, &vm_throwaway_addr, &vm_throwaway_size, &throwaway_depth, vm_throwaway_region_recurse_info_64, &read_data_size);
1794 assert(kr != KERN_SUCCESS);
1795 kr = vm32_vm_region_recurse(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, &throwaway_depth, vm_throwaway_region_recurse_info, &read_data_size);
1796 assert(kr != KERN_SUCCESS);
1797 kr = vm32_vm_region_recurse_64(VM_MAP_NULL, &throwaway_addr32_u, &throwaway_size32_u, &throwaway_depth, vm_throwaway_region_recurse_info_64, &read_data_size);
1798 assert(kr != KERN_SUCCESS);
1799
1800 kr = mach_vm_page_info(VM_MAP_NULL, alloced_addr, VM_PAGE_INFO_BASIC, page_info, &read_data_size);
1801 assert(kr != KERN_SUCCESS);
1802 kr = mach_vm_page_query(VM_MAP_NULL, alloced_addr, &throwaway_state, &throwaway_state);
1803 assert(kr != KERN_SUCCESS);
1804 kr = vm_map_page_query(VM_MAP_NULL, vm_throwaway_addr, &throwaway_state, &throwaway_state);
1805 assert(kr != KERN_SUCCESS);
1806 kr = vm32_vm_map_page_query(VM_MAP_NULL, throwaway_addr32_u, &throwaway_state, &throwaway_state);
1807 assert(kr != KERN_SUCCESS);
1808
1809 /*
1810 * Cleanup our allocations and maps
1811 */
1812 kr = mach_vm_deallocate(map64, alloced_addr, size_16kb);
1813 assert(kr == KERN_SUCCESS);
1814 kr = vm32_vm_deallocate(map32, alloced_addr32, size32_16kb);
1815 assert(kr == KERN_SUCCESS);
1816
1817 cleanup_map(&map64);
1818 cleanup_map(&map32);
1819
1820 /*
1821 * If we made it far without crashing, the test works.
1822 */
1823
1824 *out = 1;
1825 return 0;
1826 }
1827 SYSCTL_TEST_REGISTER(vm_map_null, vm_map_null_tests);
1828
1829 #if CONFIG_PROB_GZALLOC
1830 extern vm_offset_t pgz_protect_for_testing_only(zone_t zone, vm_offset_t addr, void *fp);
1831
1832 static int
vm_memory_entry_pgz_test(__unused int64_t in,int64_t * out)1833 vm_memory_entry_pgz_test(__unused int64_t in, int64_t *out)
1834 {
1835 kern_return_t kr;
1836 ipc_port_t mem_entry_ptr;
1837 mach_vm_address_t allocation_addr = 0;
1838 vm_size_t size = PAGE_SIZE;
1839
1840 allocation_addr = (mach_vm_address_t) kalloc_data(size, Z_WAITOK);
1841 if (!allocation_addr) {
1842 *out = -1;
1843 return 0;
1844 }
1845
1846 /*
1847 * Make sure we get a pgz protected address
1848 * If we aren't already protected, try to protect it
1849 */
1850 if (!pgz_owned(allocation_addr)) {
1851 zone_id_t zid = zone_id_for_element((void *) allocation_addr, size);
1852 zone_t zone = &zone_array[zid];
1853 allocation_addr = pgz_protect_for_testing_only(zone, allocation_addr, __builtin_frame_address(0));
1854 }
1855 /*
1856 * If we still aren't protected, tell userspace to skip the test
1857 */
1858 if (!pgz_owned(allocation_addr)) {
1859 *out = 2;
1860 return 0;
1861 }
1862
1863 kr = mach_make_memory_entry(kernel_map, &size, (mach_vm_offset_t) allocation_addr, VM_PROT_READ | VM_PROT_WRITE | MAP_MEM_VM_COPY, &mem_entry_ptr, IPC_PORT_NULL);
1864 assert(kr == KERN_SUCCESS);
1865
1866 ipc_port_release(mem_entry_ptr);
1867 kfree_data(allocation_addr, size);
1868
1869 *out = 1;
1870 return 0;
1871 }
1872 #else /* CONFIG_PROB_GZALLOC */
1873 static int
vm_memory_entry_pgz_test(__unused int64_t in,int64_t * out)1874 vm_memory_entry_pgz_test(__unused int64_t in, int64_t *out)
1875 {
1876 *out = 1;
1877 return 0;
1878 }
1879 #endif /* CONFIG_PROB_GZALLOC */
1880
1881 SYSCTL_TEST_REGISTER(vm_memory_entry_pgz, vm_memory_entry_pgz_test);
1882
1883 #define PAGE_SHIFT_4K 12
1884 #define PAGE_SHIFT_16K 14
1885 static int
vm_map_copy_entry_subrange_test(__unused int64_t in,int64_t * out)1886 vm_map_copy_entry_subrange_test(__unused int64_t in, int64_t *out)
1887 {
1888 mach_vm_size_t size_4kb, size_16kb;
1889 mach_vm_size_t mapped_size;
1890 vm_map_t map_4k, map_16k;
1891 mach_vm_address_t alloced_addr, mapped_addr;
1892 mach_vm_size_t entry_size;
1893 mach_port_t entry_handle;
1894 vm_region_basic_info_data_64_t region_info;
1895 mach_msg_type_number_t region_info_count;
1896
1897 kern_return_t kr;
1898
1899 size_4kb = 4 * 1024;
1900 size_16kb = 16 * 1024;
1901
1902 map_4k = create_map(0, vm_compute_max_offset(true));
1903 kr = vm_map_set_page_shift(map_4k, PAGE_SHIFT_4K);
1904 map_16k = create_map(0, vm_compute_max_offset(true));
1905 kr = vm_map_set_page_shift(map_16k, PAGE_SHIFT_16K);
1906
1907 /*
1908 * Test mapping a portion of a copy entry from a 4k map to a 16k one.
1909 * The result size should be aligned to the destination's page size (16k).
1910 */
1911 // Get a copy entry to map into the system
1912 kr = mach_vm_allocate(map_4k, &alloced_addr, size_16kb, VM_FLAGS_ANYWHERE);
1913 assert(kr == KERN_SUCCESS);
1914
1915 entry_size = size_16kb;
1916 kr = mach_make_memory_entry_64(map_4k, &entry_size, alloced_addr,
1917 MAP_MEM_VM_COPY | MAP_MEM_USE_DATA_ADDR | VM_PROT_DEFAULT,
1918 &entry_handle, MACH_PORT_NULL);
1919 assert(kr == KERN_SUCCESS);
1920 assert(entry_size == size_16kb);
1921
1922 // Attempt to map a portion of the entry into the 16k map
1923 kr = mach_vm_map(map_16k, &mapped_addr, size_4kb, 0, VM_FLAGS_ANYWHERE,
1924 entry_handle, 0, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT,
1925 VM_INHERIT_DEFAULT);
1926 assert(kr == KERN_SUCCESS);
1927
1928 // Ensure the entry is actually mapped whole
1929 region_info_count = VM_REGION_BASIC_INFO_COUNT_64;
1930 kr = mach_vm_region(map_16k, &mapped_addr, &mapped_size, VM_REGION_BASIC_INFO_64,
1931 (vm_region_info_t) ®ion_info, ®ion_info_count, NULL);
1932 assert(kr == KERN_SUCCESS);
1933 assert(mapped_size == entry_size);
1934
1935 // Cleanup
1936 mach_memory_entry_port_release(entry_handle);
1937 kr = mach_vm_deallocate(map_16k, mapped_addr, size_16kb);
1938 assert(kr == KERN_SUCCESS);
1939 kr = mach_vm_deallocate(map_4k, alloced_addr, size_16kb);
1940 assert(kr == KERN_SUCCESS);
1941 cleanup_map(&map_4k);
1942 cleanup_map(&map_16k);
1943
1944 *out = 1;
1945 return 0;
1946 }
1947 SYSCTL_TEST_REGISTER(vm_map_copy_entry_subrange, vm_map_copy_entry_subrange_test);
1948
1949
1950 static int
vm_memory_entry_map_size_null_test(__unused int64_t in,int64_t * out)1951 vm_memory_entry_map_size_null_test(__unused int64_t in, int64_t *out)
1952 {
1953 mach_vm_size_t size_16kb, map_size;
1954 vm_map_t map;
1955
1956 kern_return_t kr;
1957
1958 map = create_map(0, vm_compute_max_offset(true));
1959 size_16kb = 16 * 1024;
1960
1961 map_size = 0xdeadbeef;
1962 kr = mach_memory_entry_map_size(MACH_PORT_NULL, map, 0, size_16kb, &map_size);
1963 assert(kr == KERN_INVALID_ARGUMENT);
1964 assert(map_size == 0);
1965
1966 cleanup_map(&map);
1967
1968 *out = 1;
1969 return 0;
1970 }
1971 SYSCTL_TEST_REGISTER(vm_memory_entry_map_size_null, vm_memory_entry_map_size_null_test);
1972
1973 static int
vm_memory_entry_map_size_overflow_tests(__unused int64_t in,int64_t * out)1974 vm_memory_entry_map_size_overflow_tests(__unused int64_t in, int64_t *out)
1975 {
1976 mach_vm_size_t size_16kb, entry_size, map_size;
1977 vm_map_t map;
1978 mach_port_t parent_handle, entry_handle;
1979 mach_vm_address_t alloced_addr;
1980 vm_map_offset_t entry_offset;
1981 memory_object_offset_t maximum_offset;
1982
1983 kern_return_t kr;
1984
1985 size_16kb = 16 * 1024;
1986 map = create_map(0, vm_compute_max_offset(true));
1987 /*
1988 * (1) Attempt to overflow offset + mem_entry->offset
1989 */
1990 // Setup - create an entry with nonzero offset
1991 kr = mach_memory_object_memory_entry_64((host_t) 1, 1,
1992 size_16kb * 2, VM_PROT_DEFAULT, 0, &parent_handle);
1993 assert(kr == KERN_SUCCESS);
1994
1995 entry_size = size_16kb;
1996 kr = mach_make_memory_entry_64(map, &entry_size, size_16kb,
1997 VM_PROT_DEFAULT, &entry_handle, parent_handle);
1998 assert(kr == KERN_SUCCESS);
1999
2000 // Pass in maximum offset to attempt overflow
2001 maximum_offset = (memory_object_offset_t) -1;
2002 kr = mach_memory_entry_map_size(entry_handle, map, maximum_offset, size_16kb,
2003 &map_size);
2004 assert(kr == KERN_INVALID_ARGUMENT);
2005
2006 // Cleanup
2007 mach_memory_entry_port_release(parent_handle);
2008 mach_memory_entry_port_release(entry_handle);
2009
2010 /*
2011 * (2) Attempt to overflow offset + mem_entry->data_offset
2012 */
2013 // Setup - create an entry with nonzero data_offset
2014 kr = mach_vm_allocate(map, &alloced_addr, 2 * size_16kb, VM_FLAGS_ANYWHERE);
2015 assert(kr == KERN_SUCCESS);
2016
2017 entry_size = size_16kb;
2018 entry_offset = alloced_addr + (size_16kb / 2);
2019 kr = mach_make_memory_entry_64(map, &entry_size, entry_offset,
2020 MAP_MEM_VM_COPY | MAP_MEM_USE_DATA_ADDR | VM_PROT_DEFAULT,
2021 &entry_handle, MACH_PORT_NULL);
2022 assert(kr == KERN_SUCCESS);
2023
2024 // Pass in maximum offset to attempt overflow
2025 kr = mach_memory_entry_map_size(entry_handle, map, maximum_offset, size_16kb,
2026 &map_size);
2027 assert(kr == KERN_INVALID_ARGUMENT);
2028
2029 // Cleanup
2030 mach_memory_entry_port_release(entry_handle);
2031 kr = mach_vm_deallocate(map, alloced_addr, 2 * size_16kb);
2032 assert(kr == KERN_SUCCESS);
2033 cleanup_map(&map);
2034
2035 *out = 1;
2036 return 0;
2037 }
2038 SYSCTL_TEST_REGISTER(vm_memory_entry_map_size_overflow, vm_memory_entry_map_size_overflow_tests);
2039
2040 static int
vm_memory_entry_map_size_copy_tests(__unused int64_t in,int64_t * out)2041 vm_memory_entry_map_size_copy_tests(__unused int64_t in, int64_t *out)
2042 {
2043 mach_vm_size_t size_2kb, size_4kb, size_16kb;
2044 mach_vm_size_t entry_size_4k, entry_size_16k;
2045 mach_vm_size_t map_size;
2046 vm_map_t map_4k, map_16k;
2047 mach_port_t entry_4k, entry_16k;
2048 mach_vm_address_t alloced_addr_4k, alloced_addr_16k;
2049
2050 kern_return_t kr;
2051
2052 size_2kb = 2 * 1024;
2053 size_4kb = 4 * 1024;
2054 size_16kb = 16 * 1024;
2055
2056 /*
2057 * Setup - initialize maps and create copy entries for each
2058 */
2059 // 4k map and entry
2060 map_4k = create_map(0, vm_compute_max_offset(true));
2061 kr = vm_map_set_page_shift(map_4k, PAGE_SHIFT_4K);
2062 assert(kr == KERN_SUCCESS);
2063
2064 kr = mach_vm_allocate(map_4k, &alloced_addr_4k, size_16kb, VM_FLAGS_ANYWHERE);
2065 assert(kr == KERN_SUCCESS);
2066
2067 entry_size_4k = size_16kb;
2068 kr = mach_make_memory_entry_64(map_4k, &entry_size_4k, alloced_addr_4k,
2069 MAP_MEM_VM_COPY | VM_PROT_DEFAULT, &entry_4k, MACH_PORT_NULL);
2070 assert(kr == KERN_SUCCESS);
2071 assert(entry_size_4k == size_16kb);
2072
2073 // 16k map and entry
2074 map_16k = create_map(0, vm_compute_max_offset(true));
2075 kr = vm_map_set_page_shift(map_16k, PAGE_SHIFT_16K);
2076 assert(kr == KERN_SUCCESS);
2077
2078 kr = mach_vm_allocate(map_16k, &alloced_addr_16k, size_16kb, VM_FLAGS_ANYWHERE);
2079 assert(kr == KERN_SUCCESS);
2080
2081 entry_size_16k = size_16kb;
2082 kr = mach_make_memory_entry_64(map_16k, &entry_size_16k, alloced_addr_16k,
2083 MAP_MEM_VM_COPY | VM_PROT_DEFAULT, &entry_16k, MACH_PORT_NULL);
2084 assert(kr == KERN_SUCCESS);
2085 assert(entry_size_16k == size_16kb);
2086
2087 /*
2088 * (1) Test 4k map with 4k entry and 16k map with 16k entry. Page-aligned
2089 * ranges should have no size adjustment.
2090 */
2091 for (mach_vm_size_t i = 1; i <= 4; i++) {
2092 kr = mach_memory_entry_map_size(entry_4k, map_4k, 0, i * size_4kb, &map_size);
2093 assert(kr == KERN_SUCCESS);
2094 assert(map_size == (i * size_4kb));
2095 }
2096 kr = mach_memory_entry_map_size(entry_16k, map_16k, 0, size_16kb, &map_size);
2097 assert(kr == KERN_SUCCESS);
2098 assert(map_size == size_16kb);
2099
2100 /*
2101 * (2) Test 4k map with 16k entry. Since we have a 4k map, we should be able
2102 * to map a 4k range of the entry, but to map a 2k range we will need to map
2103 * a full 4k page.
2104 */
2105 kr = mach_memory_entry_map_size(entry_16k, map_4k, 0, size_16kb, &map_size);
2106 assert(kr == KERN_SUCCESS);
2107 assert(map_size == size_16kb);
2108 kr = mach_memory_entry_map_size(entry_16k, map_4k, 0, size_4kb, &map_size);
2109 assert(kr == KERN_SUCCESS);
2110 assert(map_size == size_4kb);
2111 kr = mach_memory_entry_map_size(entry_16k, map_4k, 0, size_2kb, &map_size);
2112 assert(kr == KERN_SUCCESS);
2113 assert(map_size == size_4kb);
2114
2115 /*
2116 * (3) Test 16k map with 4k entry. Since we have a 16k map, we will need to
2117 * map the whole 16kb memory entry even if a smaller range is requested.
2118 */
2119 kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, size_16kb, &map_size);
2120 assert(kr == KERN_SUCCESS);
2121 assert(map_size == size_16kb);
2122 kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, size_4kb, &map_size);
2123 assert(kr == KERN_SUCCESS);
2124 assert(map_size == size_16kb);
2125 kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, size_2kb, &map_size);
2126 assert(kr == KERN_SUCCESS);
2127 assert(map_size == size_16kb);
2128
2129 /*
2130 * (4) Detect error in the case where the size requested is too large.
2131 */
2132 map_size = 0xdeadbeef;
2133 kr = mach_memory_entry_map_size(entry_4k, map_16k, 0, 2 * size_16kb, &map_size);
2134 assert(kr == KERN_INVALID_ARGUMENT);
2135 assert(map_size == 0);
2136
2137 /*
2138 * Clean up memory entries, allocations, and maps
2139 */
2140 mach_memory_entry_port_release(entry_4k);
2141 mach_memory_entry_port_release(entry_16k);
2142 kr = mach_vm_deallocate(map_4k, alloced_addr_4k, size_16kb);
2143 assert(kr == KERN_SUCCESS);
2144 kr = mach_vm_deallocate(map_16k, alloced_addr_16k, size_16kb);
2145 assert(kr == KERN_SUCCESS);
2146 cleanup_map(&map_4k);
2147 cleanup_map(&map_16k);
2148
2149 *out = 1;
2150 return 0;
2151 }
2152 SYSCTL_TEST_REGISTER(vm_memory_entry_map_size_copy, vm_memory_entry_map_size_copy_tests);
2153
2154 static int
vm_memory_entry_parent_submap_tests(__unused int64_t in,int64_t * out)2155 vm_memory_entry_parent_submap_tests(__unused int64_t in, int64_t *out)
2156 {
2157 vm_shared_region_t shared_region;
2158 mach_port_t parent_handle, entry_handle;
2159 vm_named_entry_t parent_entry;
2160 mach_vm_size_t entry_size;
2161 vm_prot_t vmflags;
2162
2163 kern_return_t kr;
2164
2165 /*
2166 * Use shared region to get a named_entry which refers to a submap
2167 */
2168 shared_region = vm_shared_region_get(current_task());
2169 parent_handle = shared_region->sr_mem_entry;
2170 assert(parent_handle != NULL);
2171 parent_entry = mach_memory_entry_from_port(parent_handle);
2172 assert(parent_entry->is_sub_map);
2173
2174 /*
2175 * We should be able to create an entry using the submap entry as the parent
2176 */
2177 entry_size = parent_entry->size;
2178 vmflags = VM_PROT_DEFAULT;
2179 kr = mach_make_memory_entry_64(VM_MAP_NULL, &entry_size, 0, vmflags,
2180 &entry_handle, parent_handle);
2181 assert(kr == KERN_SUCCESS);
2182 mach_memory_entry_port_release(entry_handle);
2183
2184 /*
2185 * Should fail if using mach_make_memory_entry_mem_only since the parent
2186 * entry is not an object
2187 */
2188 vmflags |= MAP_MEM_ONLY;
2189 kr = mach_make_memory_entry_64(VM_MAP_NULL, &entry_size, 0, vmflags,
2190 &entry_handle, parent_handle);
2191 assert(kr == KERN_INVALID_ARGUMENT);
2192
2193 /*
2194 * Cleanup
2195 */
2196 vm_shared_region_deallocate(shared_region);
2197
2198 *out = 1;
2199 return 0;
2200 }
2201 SYSCTL_TEST_REGISTER(vm_memory_entry_parent_submap, vm_memory_entry_parent_submap_tests);
2202