1 /* Mach vm map miscellaneous unit tests
2 *
3 * This test program serves to be a regression test suite for legacy
4 * vm issues, ideally each test will be linked to a radar number and
5 * perform a set of certain validations.
6 *
7 */
8 #include <darwintest.h>
9
10 #include <dlfcn.h>
11 #include <errno.h>
12 #include <ptrauth.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <time.h>
17
18 #include <sys/mman.h>
19
20 #include <mach/mach_error.h>
21 #include <mach/mach_init.h>
22 #include <mach/mach_port.h>
23 #include <mach/mach_vm.h>
24 #include <mach/vm_map.h>
25 #include <mach/task.h>
26 #include <mach/task_info.h>
27 #include <mach/shared_region.h>
28 #include <machine/cpu_capabilities.h>
29
30 T_GLOBAL_META(
31 T_META_NAMESPACE("xnu.vm"),
32 T_META_RADAR_COMPONENT_NAME("xnu"),
33 T_META_RADAR_COMPONENT_VERSION("VM"),
34 T_META_RUN_CONCURRENTLY(true));
35
36 static void
test_memory_entry_tagging(int override_tag)37 test_memory_entry_tagging(int override_tag)
38 {
39 int pass;
40 int do_copy;
41 kern_return_t kr;
42 mach_vm_address_t vmaddr_orig, vmaddr_shared, vmaddr_copied;
43 mach_vm_size_t vmsize_orig, vmsize_shared, vmsize_copied;
44 mach_vm_address_t *vmaddr_ptr;
45 mach_vm_size_t *vmsize_ptr;
46 mach_vm_address_t vmaddr_chunk;
47 mach_vm_size_t vmsize_chunk;
48 mach_vm_offset_t vmoff;
49 mach_port_t mem_entry_copied, mem_entry_shared;
50 mach_port_t *mem_entry_ptr;
51 int i;
52 vm_region_submap_short_info_data_64_t ri;
53 mach_msg_type_number_t ri_count;
54 unsigned int depth;
55 int vm_flags;
56 int expected_tag;
57
58 vmaddr_copied = 0;
59 vmaddr_shared = 0;
60 vmsize_copied = 0;
61 vmsize_shared = 0;
62 vmaddr_chunk = 0;
63 vmsize_chunk = 16 * 1024;
64 vmaddr_orig = 0;
65 vmsize_orig = 3 * vmsize_chunk;
66 mem_entry_copied = MACH_PORT_NULL;
67 mem_entry_shared = MACH_PORT_NULL;
68 pass = 0;
69
70 vmaddr_orig = 0;
71 kr = mach_vm_allocate(mach_task_self(),
72 &vmaddr_orig,
73 vmsize_orig,
74 VM_FLAGS_ANYWHERE);
75 T_QUIET;
76 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
77 override_tag, vmsize_orig);
78 if (T_RESULT == T_RESULT_FAIL) {
79 goto done;
80 }
81
82 for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
83 vmaddr_chunk = vmaddr_orig + (i * vmsize_chunk);
84 kr = mach_vm_allocate(mach_task_self(),
85 &vmaddr_chunk,
86 vmsize_chunk,
87 (VM_FLAGS_FIXED |
88 VM_FLAGS_OVERWRITE |
89 VM_MAKE_TAG(100 + i)));
90 T_QUIET;
91 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
92 override_tag, vmsize_chunk);
93 if (T_RESULT == T_RESULT_FAIL) {
94 goto done;
95 }
96 }
97
98 for (vmoff = 0;
99 vmoff < vmsize_orig;
100 vmoff += PAGE_SIZE) {
101 *((unsigned char *)(uintptr_t)(vmaddr_orig + vmoff)) = 'x';
102 }
103
104 do_copy = time(NULL) & 1;
105 again:
106 *((unsigned char *)(uintptr_t)vmaddr_orig) = 'x';
107 if (do_copy) {
108 mem_entry_ptr = &mem_entry_copied;
109 vmsize_copied = vmsize_orig;
110 vmsize_ptr = &vmsize_copied;
111 vmaddr_copied = 0;
112 vmaddr_ptr = &vmaddr_copied;
113 vm_flags = MAP_MEM_VM_COPY;
114 } else {
115 mem_entry_ptr = &mem_entry_shared;
116 vmsize_shared = vmsize_orig;
117 vmsize_ptr = &vmsize_shared;
118 vmaddr_shared = 0;
119 vmaddr_ptr = &vmaddr_shared;
120 vm_flags = MAP_MEM_VM_SHARE;
121 }
122 kr = mach_make_memory_entry_64(mach_task_self(),
123 vmsize_ptr,
124 vmaddr_orig, /* offset */
125 (vm_flags |
126 VM_PROT_READ | VM_PROT_WRITE),
127 mem_entry_ptr,
128 MACH_PORT_NULL);
129 T_QUIET;
130 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_make_memory_entry()",
131 override_tag, do_copy);
132 if (T_RESULT == T_RESULT_FAIL) {
133 goto done;
134 }
135 T_QUIET;
136 T_EXPECT_EQ(*vmsize_ptr, vmsize_orig, "[override_tag:%d][do_copy:%d] vmsize (0x%llx) != vmsize_orig (0x%llx)",
137 override_tag, do_copy, (uint64_t) *vmsize_ptr, (uint64_t) vmsize_orig);
138 if (T_RESULT == T_RESULT_FAIL) {
139 goto done;
140 }
141 T_QUIET;
142 T_EXPECT_NOTNULL(*mem_entry_ptr, "[override_tag:%d][do_copy:%d] mem_entry == 0x%x",
143 override_tag, do_copy, *mem_entry_ptr);
144 if (T_RESULT == T_RESULT_FAIL) {
145 goto done;
146 }
147
148 *vmaddr_ptr = 0;
149 if (override_tag) {
150 vm_flags = VM_MAKE_TAG(200);
151 } else {
152 vm_flags = 0;
153 }
154 kr = mach_vm_map(mach_task_self(),
155 vmaddr_ptr,
156 vmsize_orig,
157 0, /* mask */
158 vm_flags | VM_FLAGS_ANYWHERE,
159 *mem_entry_ptr,
160 0, /* offset */
161 FALSE, /* copy */
162 VM_PROT_READ | VM_PROT_WRITE,
163 VM_PROT_READ | VM_PROT_WRITE,
164 VM_INHERIT_DEFAULT);
165 T_QUIET;
166 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_map()",
167 override_tag, do_copy);
168 if (T_RESULT == T_RESULT_FAIL) {
169 goto done;
170 }
171
172 *((unsigned char *)(uintptr_t)vmaddr_orig) = 'X';
173 if (*(unsigned char *)(uintptr_t)*vmaddr_ptr == 'X') {
174 T_QUIET;
175 T_EXPECT_EQ(do_copy, 0, "[override_tag:%d][do_copy:%d] memory shared instead of copied",
176 override_tag, do_copy);
177 if (T_RESULT == T_RESULT_FAIL) {
178 goto done;
179 }
180 } else {
181 T_QUIET;
182 T_EXPECT_NE(do_copy, 0, "[override_tag:%d][do_copy:%d] memory copied instead of shared",
183 override_tag, do_copy);
184 if (T_RESULT == T_RESULT_FAIL) {
185 goto done;
186 }
187 }
188
189 for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
190 mach_vm_address_t vmaddr_info;
191 mach_vm_size_t vmsize_info;
192
193 vmaddr_info = *vmaddr_ptr + (i * vmsize_chunk);
194 vmsize_info = 0;
195 depth = 1;
196 ri_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
197 kr = mach_vm_region_recurse(mach_task_self(),
198 &vmaddr_info,
199 &vmsize_info,
200 &depth,
201 (vm_region_recurse_info_t) &ri,
202 &ri_count);
203 T_QUIET;
204 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx)",
205 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk);
206 if (T_RESULT == T_RESULT_FAIL) {
207 goto done;
208 }
209 T_QUIET;
210 T_EXPECT_EQ(vmaddr_info, *vmaddr_ptr + (i * vmsize_chunk), "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned addr 0x%llx",
211 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmaddr_info);
212 if (T_RESULT == T_RESULT_FAIL) {
213 goto done;
214 }
215 T_QUIET;
216 T_EXPECT_EQ(vmsize_info, vmsize_chunk, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned size 0x%llx expected 0x%llx",
217 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmsize_info, vmsize_chunk);
218 if (T_RESULT == T_RESULT_FAIL) {
219 goto done;
220 }
221 if (override_tag) {
222 expected_tag = 200;
223 } else {
224 expected_tag = 100 + i;
225 }
226 T_QUIET;
227 T_EXPECT_EQ(ri.user_tag, expected_tag, "[override_tag:%d][do_copy:%d] i=%d tag=%d expected %d",
228 override_tag, do_copy, i, ri.user_tag, expected_tag);
229 if (T_RESULT == T_RESULT_FAIL) {
230 goto done;
231 }
232 }
233
234 if (++pass < 2) {
235 do_copy = !do_copy;
236 goto again;
237 }
238
239 done:
240 if (vmaddr_orig != 0) {
241 mach_vm_deallocate(mach_task_self(),
242 vmaddr_orig,
243 vmsize_orig);
244 vmaddr_orig = 0;
245 vmsize_orig = 0;
246 }
247 if (vmaddr_copied != 0) {
248 mach_vm_deallocate(mach_task_self(),
249 vmaddr_copied,
250 vmsize_copied);
251 vmaddr_copied = 0;
252 vmsize_copied = 0;
253 }
254 if (vmaddr_shared != 0) {
255 mach_vm_deallocate(mach_task_self(),
256 vmaddr_shared,
257 vmsize_shared);
258 vmaddr_shared = 0;
259 vmsize_shared = 0;
260 }
261 if (mem_entry_copied != MACH_PORT_NULL) {
262 mach_port_deallocate(mach_task_self(), mem_entry_copied);
263 mem_entry_copied = MACH_PORT_NULL;
264 }
265 if (mem_entry_shared != MACH_PORT_NULL) {
266 mach_port_deallocate(mach_task_self(), mem_entry_shared);
267 mem_entry_shared = MACH_PORT_NULL;
268 }
269
270 return;
271 }
272
273 static void
test_map_memory_entry(void)274 test_map_memory_entry(void)
275 {
276 kern_return_t kr;
277 mach_vm_address_t vmaddr1, vmaddr2;
278 mach_vm_size_t vmsize1, vmsize2;
279 mach_port_t mem_entry;
280 unsigned char *cp1, *cp2;
281
282 vmaddr1 = 0;
283 vmsize1 = 0;
284 vmaddr2 = 0;
285 vmsize2 = 0;
286 mem_entry = MACH_PORT_NULL;
287
288 vmsize1 = 1;
289 vmaddr1 = 0;
290 kr = mach_vm_allocate(mach_task_self(),
291 &vmaddr1,
292 vmsize1,
293 VM_FLAGS_ANYWHERE);
294 T_QUIET;
295 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate(%lld)", vmsize1);
296 if (T_RESULT == T_RESULT_FAIL) {
297 goto done;
298 }
299
300 cp1 = (unsigned char *)(uintptr_t)vmaddr1;
301 *cp1 = '1';
302
303 vmsize2 = 1;
304 mem_entry = MACH_PORT_NULL;
305 kr = mach_make_memory_entry_64(mach_task_self(),
306 &vmsize2,
307 vmaddr1, /* offset */
308 (MAP_MEM_VM_COPY |
309 VM_PROT_READ | VM_PROT_WRITE),
310 &mem_entry,
311 MACH_PORT_NULL);
312 T_QUIET;
313 T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry()");
314 if (T_RESULT == T_RESULT_FAIL) {
315 goto done;
316 }
317 T_QUIET;
318 T_EXPECT_GE(vmsize2, vmsize1, "vmsize2 (0x%llx) < vmsize1 (0x%llx)",
319 (uint64_t) vmsize2, (uint64_t) vmsize1);
320 if (T_RESULT == T_RESULT_FAIL) {
321 goto done;
322 }
323 T_QUIET;
324 T_EXPECT_NOTNULL(mem_entry, "mem_entry == 0x%x", mem_entry);
325 if (T_RESULT == T_RESULT_FAIL) {
326 goto done;
327 }
328
329 vmaddr2 = 0;
330 kr = mach_vm_map(mach_task_self(),
331 &vmaddr2,
332 vmsize2,
333 0, /* mask */
334 VM_FLAGS_ANYWHERE,
335 mem_entry,
336 0, /* offset */
337 TRUE, /* copy */
338 VM_PROT_READ | VM_PROT_WRITE,
339 VM_PROT_READ | VM_PROT_WRITE,
340 VM_INHERIT_DEFAULT);
341 T_QUIET;
342 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_map()");
343 if (T_RESULT == T_RESULT_FAIL) {
344 goto done;
345 }
346
347 cp2 = (unsigned char *)(uintptr_t)vmaddr2;
348 T_QUIET;
349 T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '1')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
350 *cp1, *cp2, '1', '1');
351 if (T_RESULT == T_RESULT_FAIL) {
352 goto done;
353 }
354
355 *cp2 = '2';
356 T_QUIET;
357 T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '2')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
358 *cp1, *cp2, '1', '2');
359 if (T_RESULT == T_RESULT_FAIL) {
360 goto done;
361 }
362
363 done:
364 if (vmaddr1 != 0) {
365 mach_vm_deallocate(mach_task_self(), vmaddr1, vmsize1);
366 vmaddr1 = 0;
367 vmsize1 = 0;
368 }
369 if (vmaddr2 != 0) {
370 mach_vm_deallocate(mach_task_self(), vmaddr2, vmsize2);
371 vmaddr2 = 0;
372 vmsize2 = 0;
373 }
374 if (mem_entry != MACH_PORT_NULL) {
375 mach_port_deallocate(mach_task_self(), mem_entry);
376 mem_entry = MACH_PORT_NULL;
377 }
378
379 return;
380 }
381
382 T_DECL(memory_entry_tagging, "test mem entry tag for rdar://problem/23334087 \
383 VM memory tags should be propagated through memory entries",
384 T_META_ALL_VALID_ARCHS(true))
385 {
386 test_memory_entry_tagging(0);
387 test_memory_entry_tagging(1);
388 }
389
390 T_DECL(map_memory_entry, "test mapping mem entry for rdar://problem/22611816 \
391 mach_make_memory_entry(MAP_MEM_VM_COPY) should never use a KERNEL_BUFFER \
392 copy", T_META_ALL_VALID_ARCHS(true))
393 {
394 test_map_memory_entry();
395 }
396
397 static char *vm_purgable_state[4] = { "NONVOLATILE", "VOLATILE", "EMPTY", "DENY" };
398
399 static uint64_t
task_footprint(void)400 task_footprint(void)
401 {
402 task_vm_info_data_t ti;
403 kern_return_t kr;
404 mach_msg_type_number_t count;
405
406 count = TASK_VM_INFO_COUNT;
407 kr = task_info(mach_task_self(),
408 TASK_VM_INFO,
409 (task_info_t) &ti,
410 &count);
411 T_QUIET;
412 T_ASSERT_MACH_SUCCESS(kr, "task_info()");
413 #if defined(__arm64__)
414 T_QUIET;
415 T_ASSERT_EQ(count, TASK_VM_INFO_COUNT, "task_info() count = %d (expected %d)",
416 count, TASK_VM_INFO_COUNT);
417 #endif /* defined(__arm64__) */
418 return ti.phys_footprint;
419 }
420
421 T_DECL(purgeable_empty_to_volatile, "test task physical footprint when \
422 emptying, volatilizing purgeable vm")
423 {
424 kern_return_t kr;
425 mach_vm_address_t vm_addr;
426 mach_vm_size_t vm_size;
427 char *cp;
428 int ret;
429 vm_purgable_t state;
430 uint64_t footprint[8];
431
432 vm_addr = 0;
433 vm_size = 1 * 1024 * 1024;
434 T_LOG("--> allocate %llu bytes", vm_size);
435 kr = mach_vm_allocate(mach_task_self(),
436 &vm_addr,
437 vm_size,
438 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
439 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
440
441 /* footprint0 */
442 footprint[0] = task_footprint();
443 T_LOG(" footprint[0] = %llu", footprint[0]);
444
445 T_LOG("--> access %llu bytes", vm_size);
446 for (cp = (char *) vm_addr;
447 cp < (char *) (vm_addr + vm_size);
448 cp += vm_kernel_page_size) {
449 *cp = 'x';
450 }
451 /* footprint1 == footprint0 + vm_size */
452 footprint[1] = task_footprint();
453 T_LOG(" footprint[1] = %llu", footprint[1]);
454 if (footprint[1] != footprint[0] + vm_size) {
455 T_LOG("WARN: footprint[1] != footprint[0] + vm_size");
456 }
457
458 T_LOG("--> wire %llu bytes", vm_size / 2);
459 ret = mlock((char *)vm_addr, (size_t) (vm_size / 2));
460 T_ASSERT_POSIX_SUCCESS(ret, "mlock()");
461
462 /* footprint2 == footprint1 */
463 footprint[2] = task_footprint();
464 T_LOG(" footprint[2] = %llu", footprint[2]);
465 if (footprint[2] != footprint[1]) {
466 T_LOG("WARN: footprint[2] != footprint[1]");
467 }
468
469 T_LOG("--> VOLATILE");
470 state = VM_PURGABLE_VOLATILE;
471 kr = mach_vm_purgable_control(mach_task_self(),
472 vm_addr,
473 VM_PURGABLE_SET_STATE,
474 &state);
475 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
476 T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, "NONVOLATILE->VOLATILE: state was %s",
477 vm_purgable_state[state]);
478 /* footprint3 == footprint2 - (vm_size / 2) */
479 footprint[3] = task_footprint();
480 T_LOG(" footprint[3] = %llu", footprint[3]);
481 if (footprint[3] != footprint[2] - (vm_size / 2)) {
482 T_LOG("WARN: footprint[3] != footprint[2] - (vm_size / 2)");
483 }
484
485 T_LOG("--> EMPTY");
486 state = VM_PURGABLE_EMPTY;
487 kr = mach_vm_purgable_control(mach_task_self(),
488 vm_addr,
489 VM_PURGABLE_SET_STATE,
490 &state);
491 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(EMPTY)");
492 if (state != VM_PURGABLE_VOLATILE &&
493 state != VM_PURGABLE_EMPTY) {
494 T_ASSERT_FAIL("VOLATILE->EMPTY: state was %s",
495 vm_purgable_state[state]);
496 }
497 /* footprint4 == footprint3 */
498 footprint[4] = task_footprint();
499 T_LOG(" footprint[4] = %llu", footprint[4]);
500 if (footprint[4] != footprint[3]) {
501 T_LOG("WARN: footprint[4] != footprint[3]");
502 }
503
504 T_LOG("--> unwire %llu bytes", vm_size / 2);
505 ret = munlock((char *)vm_addr, (size_t) (vm_size / 2));
506 T_ASSERT_POSIX_SUCCESS(ret, "munlock()");
507
508 /* footprint5 == footprint4 - (vm_size/2) (unless memory pressure) */
509 /* footprint5 == footprint0 */
510 footprint[5] = task_footprint();
511 T_LOG(" footprint[5] = %llu", footprint[5]);
512 if (footprint[5] != footprint[4] - (vm_size / 2)) {
513 T_LOG("WARN: footprint[5] != footprint[4] - (vm_size/2)");
514 }
515 if (footprint[5] != footprint[0]) {
516 T_LOG("WARN: footprint[5] != footprint[0]");
517 }
518
519 T_LOG("--> VOLATILE");
520 state = VM_PURGABLE_VOLATILE;
521 kr = mach_vm_purgable_control(mach_task_self(),
522 vm_addr,
523 VM_PURGABLE_SET_STATE,
524 &state);
525 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
526 T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->VOLATILE: state == %s",
527 vm_purgable_state[state]);
528 /* footprint6 == footprint5 */
529 /* footprint6 == footprint0 */
530 footprint[6] = task_footprint();
531 T_LOG(" footprint[6] = %llu", footprint[6]);
532 if (footprint[6] != footprint[5]) {
533 T_LOG("WARN: footprint[6] != footprint[5]");
534 }
535 if (footprint[6] != footprint[0]) {
536 T_LOG("WARN: footprint[6] != footprint[0]");
537 }
538
539 T_LOG("--> NONVOLATILE");
540 state = VM_PURGABLE_NONVOLATILE;
541 kr = mach_vm_purgable_control(mach_task_self(),
542 vm_addr,
543 VM_PURGABLE_SET_STATE,
544 &state);
545 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(NONVOLATILE)");
546 T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->NONVOLATILE: state == %s",
547 vm_purgable_state[state]);
548 /* footprint7 == footprint6 */
549 /* footprint7 == footprint0 */
550 footprint[7] = task_footprint();
551 T_LOG(" footprint[7] = %llu", footprint[7]);
552 if (footprint[7] != footprint[6]) {
553 T_LOG("WARN: footprint[7] != footprint[6]");
554 }
555 if (footprint[7] != footprint[0]) {
556 T_LOG("WARN: footprint[7] != footprint[0]");
557 }
558 }
559
560 kern_return_t
get_reusable_size(uint64_t * reusable)561 get_reusable_size(uint64_t *reusable)
562 {
563 task_vm_info_data_t ti;
564 mach_msg_type_number_t ti_count = TASK_VM_INFO_COUNT;
565 kern_return_t kr;
566
567 kr = task_info(mach_task_self(),
568 TASK_VM_INFO,
569 (task_info_t) &ti,
570 &ti_count);
571 T_QUIET;
572 T_EXPECT_MACH_SUCCESS(kr, "task_info()");
573 T_QUIET;
574 *reusable = ti.reusable;
575 return kr;
576 }
577
578 T_DECL(madvise_shared, "test madvise shared for rdar://problem/2295713 logging \
579 rethink needs madvise(MADV_FREE_HARDER)",
580 T_META_RUN_CONCURRENTLY(false),
581 T_META_ALL_VALID_ARCHS(true))
582 {
583 vm_address_t vmaddr = 0, vmaddr2 = 0;
584 vm_size_t vmsize, vmsize1, vmsize2;
585 kern_return_t kr;
586 char *cp;
587 vm_prot_t curprot, maxprot;
588 int ret;
589 int vmflags;
590 uint64_t footprint_before, footprint_after;
591 uint64_t reusable_before, reusable_after, reusable_expected;
592
593
594 vmsize1 = 64 * 1024; /* 64KB to madvise() */
595 vmsize2 = 32 * 1024; /* 32KB to mlock() */
596 vmsize = vmsize1 + vmsize2;
597 vmflags = VM_FLAGS_ANYWHERE;
598 VM_SET_FLAGS_ALIAS(vmflags, VM_MEMORY_MALLOC);
599
600 kr = get_reusable_size(&reusable_before);
601 if (kr) {
602 goto done;
603 }
604
605 kr = vm_allocate(mach_task_self(),
606 &vmaddr,
607 vmsize,
608 vmflags);
609 T_QUIET;
610 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
611 if (T_RESULT == T_RESULT_FAIL) {
612 goto done;
613 }
614
615 for (cp = (char *)(uintptr_t)vmaddr;
616 cp < (char *)(uintptr_t)(vmaddr + vmsize);
617 cp++) {
618 *cp = 'x';
619 }
620
621 kr = vm_remap(mach_task_self(),
622 &vmaddr2,
623 vmsize,
624 0, /* mask */
625 VM_FLAGS_ANYWHERE,
626 mach_task_self(),
627 vmaddr,
628 FALSE, /* copy */
629 &curprot,
630 &maxprot,
631 VM_INHERIT_DEFAULT);
632 T_QUIET;
633 T_EXPECT_MACH_SUCCESS(kr, "vm_remap()");
634 if (T_RESULT == T_RESULT_FAIL) {
635 goto done;
636 }
637
638 for (cp = (char *)(uintptr_t)vmaddr2;
639 cp < (char *)(uintptr_t)(vmaddr2 + vmsize);
640 cp++) {
641 T_QUIET;
642 T_EXPECT_EQ(*cp, 'x', "vmaddr=%p vmaddr2=%p %p:0x%x",
643 (void *)(uintptr_t)vmaddr,
644 (void *)(uintptr_t)vmaddr2,
645 (void *)cp,
646 (unsigned char)*cp);
647 if (T_RESULT == T_RESULT_FAIL) {
648 goto done;
649 }
650 }
651 cp = (char *)(uintptr_t)vmaddr;
652 *cp = 'X';
653 cp = (char *)(uintptr_t)vmaddr2;
654 T_QUIET;
655 T_EXPECT_EQ(*cp, 'X', "memory was not properly shared");
656 if (T_RESULT == T_RESULT_FAIL) {
657 goto done;
658 }
659
660 #if defined(__x86_64__) || defined(__i386__)
661 if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
662 T_LOG("Skipping madvise reusable tests because we're running under translation.");
663 goto done;
664 }
665 #endif /* defined(__x86_64__) || defined(__i386__) */
666
667 ret = mlock((char *)(uintptr_t)(vmaddr2 + vmsize1),
668 vmsize2);
669 T_QUIET; T_EXPECT_POSIX_SUCCESS(ret, "mlock()");
670
671 footprint_before = task_footprint();
672
673 ret = madvise((char *)(uintptr_t)vmaddr,
674 vmsize1,
675 MADV_FREE_REUSABLE);
676 T_QUIET;
677 T_EXPECT_POSIX_SUCCESS(ret, "madvise()");
678 if (T_RESULT == T_RESULT_FAIL) {
679 goto done;
680 }
681
682 footprint_after = task_footprint();
683 T_ASSERT_EQ(footprint_after, footprint_before - 2 * vmsize1, NULL);
684
685 kr = get_reusable_size(&reusable_after);
686 if (kr) {
687 goto done;
688 }
689 reusable_expected = 2ULL * vmsize1 + reusable_before;
690 T_EXPECT_EQ(reusable_after, reusable_expected, "actual=%lld expected %lld",
691 reusable_after, reusable_expected);
692 if (T_RESULT == T_RESULT_FAIL) {
693 goto done;
694 }
695
696 done:
697 if (vmaddr != 0) {
698 vm_deallocate(mach_task_self(), vmaddr, vmsize);
699 vmaddr = 0;
700 }
701 if (vmaddr2 != 0) {
702 vm_deallocate(mach_task_self(), vmaddr2, vmsize);
703 vmaddr2 = 0;
704 }
705 }
706
707 T_DECL(madvise_purgeable_can_reuse, "test madvise purgeable can reuse for \
708 rdar://problem/37476183 Preview Footprint memory regressions ~100MB \
709 [ purgeable_malloc became eligible for reuse ]",
710 T_META_ALL_VALID_ARCHS(true))
711 {
712 #if defined(__x86_64__) || defined(__i386__)
713 if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
714 T_SKIP("madvise reusable is not supported under Rosetta translation. Skipping.)");
715 }
716 #endif /* defined(__x86_64__) || defined(__i386__) */
717 vm_address_t vmaddr = 0;
718 vm_size_t vmsize;
719 kern_return_t kr;
720 char *cp;
721 int ret;
722
723 vmsize = 10 * 1024 * 1024; /* 10MB */
724 kr = vm_allocate(mach_task_self(),
725 &vmaddr,
726 vmsize,
727 (VM_FLAGS_ANYWHERE |
728 VM_FLAGS_PURGABLE |
729 VM_MAKE_TAG(VM_MEMORY_MALLOC)));
730 T_QUIET;
731 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
732 if (T_RESULT == T_RESULT_FAIL) {
733 goto done;
734 }
735
736 for (cp = (char *)(uintptr_t)vmaddr;
737 cp < (char *)(uintptr_t)(vmaddr + vmsize);
738 cp++) {
739 *cp = 'x';
740 }
741
742 ret = madvise((char *)(uintptr_t)vmaddr,
743 vmsize,
744 MADV_CAN_REUSE);
745 T_QUIET;
746 T_EXPECT_TRUE(((ret == -1) && (errno == EINVAL)), "madvise(): purgeable vm can't be adviced to reuse");
747 if (T_RESULT == T_RESULT_FAIL) {
748 goto done;
749 }
750
751 done:
752 if (vmaddr != 0) {
753 vm_deallocate(mach_task_self(), vmaddr, vmsize);
754 vmaddr = 0;
755 }
756 }
757
758 static bool
validate_memory_is_zero(vm_address_t start,vm_size_t vmsize,vm_address_t * non_zero_addr)759 validate_memory_is_zero(
760 vm_address_t start,
761 vm_size_t vmsize,
762 vm_address_t *non_zero_addr)
763 {
764 for (vm_size_t sz = 0; sz < vmsize; sz += sizeof(uint64_t)) {
765 vm_address_t addr = start + sz;
766
767 if (*(uint64_t *)(addr) != 0) {
768 *non_zero_addr = addr;
769 return false;
770 }
771 }
772 return true;
773 }
774
775 T_DECL(madvise_zero, "test madvise zero")
776 {
777 vm_address_t vmaddr = 0;
778 vm_size_t vmsize = PAGE_SIZE * 3;
779 vm_address_t non_zero_addr = 0;
780 kern_return_t kr;
781 int ret;
782 unsigned char vec;
783
784 kr = vm_allocate(mach_task_self(),
785 &vmaddr,
786 vmsize,
787 (VM_FLAGS_ANYWHERE |
788 VM_MAKE_TAG(VM_MEMORY_MALLOC)));
789 T_QUIET;
790 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
791 if (T_RESULT == T_RESULT_FAIL) {
792 goto done;
793 }
794
795 memset((void *)vmaddr, 'A', vmsize);
796 ret = madvise(vmaddr, vmsize, MADV_FREE_REUSABLE);
797 T_QUIET;
798 T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_FREE_REUSABLE)");
799 if (T_RESULT == T_RESULT_FAIL) {
800 goto done;
801 }
802
803 memset((void *)vmaddr, 'B', PAGE_SIZE);
804 ret = madvise(vmaddr, vmsize, MADV_ZERO);
805 T_QUIET;
806 T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO)");
807 if (T_RESULT == T_RESULT_FAIL) {
808 goto done;
809 }
810
811 T_QUIET;
812 T_EXPECT_EQ(validate_memory_is_zero(vmaddr, vmsize, &non_zero_addr), true,
813 "madvise(%p, %llu, MADV_ZERO) returned non zero mem at %p",
814 (void *)vmaddr, vmsize, (void *)non_zero_addr);
815 if (T_RESULT == T_RESULT_FAIL) {
816 goto done;
817 }
818
819 memset((void *)vmaddr, 'C', PAGE_SIZE);
820 ret = madvise(vmaddr, vmsize, MADV_PAGEOUT);
821 T_QUIET;
822 T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_PAGEOUT)");
823 if (T_RESULT == T_RESULT_FAIL) {
824 goto done;
825 }
826
827 /* wait for the pages to be (asynchronously) compressed */
828 T_QUIET; T_LOG("waiting for first page to be paged out");
829 do {
830 ret = mincore(vmaddr, 1, &vec);
831 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mincore(1st)");
832 } while (vec & MINCORE_INCORE);
833 T_QUIET; T_LOG("waiting for last page to be paged out");
834 do {
835 ret = mincore((vmaddr + vmsize - 1), 1, (char *)&vec);
836 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mincore(last)");
837 } while (vec & MINCORE_INCORE);
838
839 ret = madvise(vmaddr, vmsize, MADV_ZERO);
840 T_QUIET;
841 T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO)");
842 if (T_RESULT == T_RESULT_FAIL) {
843 goto done;
844 }
845 T_QUIET;
846 T_EXPECT_EQ(validate_memory_is_zero(vmaddr, vmsize, &non_zero_addr), true,
847 "madvise(%p, %llu, MADV_ZERO) returned non zero mem at %p",
848 (void *)vmaddr, vmsize, (void *)non_zero_addr);
849 if (T_RESULT == T_RESULT_FAIL) {
850 goto done;
851 }
852
853 done:
854 if (vmaddr != 0) {
855 vm_deallocate(mach_task_self(), vmaddr, vmsize);
856 vmaddr = 0;
857 }
858 }
859
860 #define DEST_PATTERN 0xFEDCBA98
861
862 T_DECL(map_read_overwrite, "test overwriting vm map from other map - \
863 rdar://31075370",
864 T_META_ALL_VALID_ARCHS(true))
865 {
866 kern_return_t kr;
867 mach_vm_address_t vmaddr1, vmaddr2;
868 mach_vm_size_t vmsize1, vmsize2;
869 int *ip;
870 int i;
871
872 vmaddr1 = 0;
873 vmsize1 = 4 * 4096;
874 kr = mach_vm_allocate(mach_task_self(),
875 &vmaddr1,
876 vmsize1,
877 VM_FLAGS_ANYWHERE);
878 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
879
880 ip = (int *)(uintptr_t)vmaddr1;
881 for (i = 0; i < vmsize1 / sizeof(*ip); i++) {
882 ip[i] = i;
883 }
884
885 vmaddr2 = 0;
886 kr = mach_vm_allocate(mach_task_self(),
887 &vmaddr2,
888 vmsize1,
889 VM_FLAGS_ANYWHERE);
890 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
891
892 ip = (int *)(uintptr_t)vmaddr2;
893 for (i = 0; i < vmsize1 / sizeof(*ip); i++) {
894 ip[i] = DEST_PATTERN;
895 }
896
897 vmsize2 = vmsize1 - 2 * (sizeof(*ip));
898 kr = mach_vm_read_overwrite(mach_task_self(),
899 vmaddr1 + sizeof(*ip),
900 vmsize2,
901 vmaddr2 + sizeof(*ip),
902 &vmsize2);
903 T_ASSERT_MACH_SUCCESS(kr, "vm_read_overwrite()");
904
905 ip = (int *)(uintptr_t)vmaddr2;
906 for (i = 0; i < 1; i++) {
907 T_QUIET;
908 T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
909 i, ip[i], DEST_PATTERN);
910 }
911 for (; i < (vmsize1 - 2) / sizeof(*ip); i++) {
912 T_QUIET;
913 T_ASSERT_EQ(ip[i], i, "vmaddr2[%d] = 0x%x instead of 0x%x",
914 i, ip[i], i);
915 }
916 for (; i < vmsize1 / sizeof(*ip); i++) {
917 T_QUIET;
918 T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
919 i, ip[i], DEST_PATTERN);
920 }
921 }
922
923 T_DECL(copy_none_use_pmap, "test copy-on-write remapping of COPY_NONE vm \
924 objects - rdar://35610377",
925 T_META_ALL_VALID_ARCHS(true))
926 {
927 kern_return_t kr;
928 mach_vm_address_t vmaddr1, vmaddr2, vmaddr3;
929 mach_vm_size_t vmsize;
930 vm_prot_t curprot, maxprot;
931
932 vmsize = 32 * 1024 * 1024;
933
934 vmaddr1 = 0;
935 kr = mach_vm_allocate(mach_task_self(),
936 &vmaddr1,
937 vmsize,
938 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
939 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
940
941 memset((void *)(uintptr_t)vmaddr1, 'x', vmsize);
942
943 vmaddr2 = 0;
944 kr = mach_vm_remap(mach_task_self(),
945 &vmaddr2,
946 vmsize,
947 0, /* mask */
948 VM_FLAGS_ANYWHERE,
949 mach_task_self(),
950 vmaddr1,
951 TRUE, /* copy */
952 &curprot,
953 &maxprot,
954 VM_INHERIT_DEFAULT);
955 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #1");
956
957 vmaddr3 = 0;
958 kr = mach_vm_remap(mach_task_self(),
959 &vmaddr3,
960 vmsize,
961 0, /* mask */
962 VM_FLAGS_ANYWHERE,
963 mach_task_self(),
964 vmaddr2,
965 TRUE, /* copy */
966 &curprot,
967 &maxprot,
968 VM_INHERIT_DEFAULT);
969 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #2");
970 }
971
972 T_DECL(purgable_deny, "test purgeable memory is not allowed to be converted to \
973 non-purgeable - rdar://31990033",
974 T_META_ALL_VALID_ARCHS(true))
975 {
976 kern_return_t kr;
977 vm_address_t vmaddr;
978 vm_purgable_t state;
979
980 vmaddr = 0;
981 kr = vm_allocate(mach_task_self(), &vmaddr, 1,
982 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
983 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
984
985 state = VM_PURGABLE_DENY;
986 kr = vm_purgable_control(mach_task_self(), vmaddr,
987 VM_PURGABLE_SET_STATE, &state);
988 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
989 "vm_purgable_control(VM_PURGABLE_DENY) -> 0x%x (%s)",
990 kr, mach_error_string(kr));
991
992 kr = vm_deallocate(mach_task_self(), vmaddr, 1);
993 T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate()");
994 }
995
996 #define VMSIZE 0x10000
997
998 T_DECL(vm_remap_zero, "test vm map of zero size - rdar://33114981",
999 T_META_ALL_VALID_ARCHS(true))
1000 {
1001 kern_return_t kr;
1002 mach_vm_address_t vmaddr1, vmaddr2;
1003 mach_vm_size_t vmsize;
1004 vm_prot_t curprot, maxprot;
1005
1006 vmaddr1 = 0;
1007 vmsize = VMSIZE;
1008 kr = mach_vm_allocate(mach_task_self(),
1009 &vmaddr1,
1010 vmsize,
1011 VM_FLAGS_ANYWHERE);
1012 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
1013
1014 vmaddr2 = 0;
1015 vmsize = 0;
1016 kr = mach_vm_remap(mach_task_self(),
1017 &vmaddr2,
1018 vmsize,
1019 0,
1020 VM_FLAGS_ANYWHERE,
1021 mach_task_self(),
1022 vmaddr1,
1023 FALSE,
1024 &curprot,
1025 &maxprot,
1026 VM_INHERIT_DEFAULT);
1027 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
1028 vmsize, kr, mach_error_string(kr));
1029
1030 vmaddr2 = 0;
1031 vmsize = (mach_vm_size_t)-2;
1032 kr = mach_vm_remap(mach_task_self(),
1033 &vmaddr2,
1034 vmsize,
1035 0,
1036 VM_FLAGS_ANYWHERE,
1037 mach_task_self(),
1038 vmaddr1,
1039 FALSE,
1040 &curprot,
1041 &maxprot,
1042 VM_INHERIT_DEFAULT);
1043 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
1044 vmsize, kr, mach_error_string(kr));
1045 }
1046
1047 extern int __shared_region_check_np(uint64_t *);
1048
1049 T_DECL(nested_pmap_trigger, "nested pmap should only be triggered from kernel \
1050 - rdar://problem/41481703",
1051 T_META_ALL_VALID_ARCHS(true))
1052 {
1053 int ret;
1054 kern_return_t kr;
1055 mach_vm_address_t sr_start;
1056 mach_vm_size_t vmsize;
1057 mach_vm_address_t vmaddr;
1058 mach_port_t mem_entry;
1059
1060 ret = __shared_region_check_np(&sr_start);
1061 if (ret != 0) {
1062 int saved_errno;
1063 saved_errno = errno;
1064
1065 T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
1066 saved_errno, strerror(saved_errno));
1067 T_END;
1068 }
1069
1070 vmsize = PAGE_SIZE;
1071 kr = mach_make_memory_entry_64(mach_task_self(),
1072 &vmsize,
1073 sr_start,
1074 MAP_MEM_VM_SHARE | VM_PROT_READ,
1075 &mem_entry,
1076 MACH_PORT_NULL);
1077 T_ASSERT_MACH_SUCCESS(kr, "make_memory_entry(0x%llx)", sr_start);
1078
1079 vmaddr = 0;
1080 kr = mach_vm_map(mach_task_self(),
1081 &vmaddr,
1082 vmsize,
1083 0,
1084 VM_FLAGS_ANYWHERE,
1085 mem_entry,
1086 0,
1087 FALSE,
1088 VM_PROT_READ,
1089 VM_PROT_READ,
1090 VM_INHERIT_DEFAULT);
1091 T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1092 }
1093
1094 static const char *prot_str[] = { "---", "r--", "-w-", "rw-", "--x", "r-x", "-wx", "rwx" };
1095 static const char *share_mode_str[] = { "---", "COW", "PRIVATE", "EMPTY", "SHARED", "TRUESHARED", "PRIVATE_ALIASED", "SHARED_ALIASED", "LARGE_PAGE" };
1096
1097 T_DECL(shared_region_share_writable, "sharing a writable mapping of the shared region shoudl not give write access to shared region - rdar://problem/74469953",
1098 T_META_ALL_VALID_ARCHS(true))
1099 {
1100 int ret;
1101 uint64_t sr_start;
1102 kern_return_t kr;
1103 mach_vm_address_t address, tmp_address, remap_address;
1104 mach_vm_size_t size, tmp_size, remap_size;
1105 uint32_t depth;
1106 mach_msg_type_number_t count;
1107 vm_region_submap_info_data_64_t info;
1108 vm_prot_t cur_prot, max_prot;
1109 uint32_t before, after, remap;
1110 mach_port_t mem_entry;
1111
1112 ret = __shared_region_check_np(&sr_start);
1113 if (ret != 0) {
1114 int saved_errno;
1115 saved_errno = errno;
1116
1117 T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
1118 saved_errno, strerror(saved_errno));
1119 T_END;
1120 }
1121 T_LOG("SHARED_REGION_BASE 0x%llx", SHARED_REGION_BASE);
1122 T_LOG("SHARED_REGION_SIZE 0x%llx", SHARED_REGION_SIZE);
1123 T_LOG("shared region starts at 0x%llx", sr_start);
1124 T_QUIET; T_ASSERT_GE(sr_start, SHARED_REGION_BASE,
1125 "shared region starts below BASE");
1126 T_QUIET; T_ASSERT_LT(sr_start, SHARED_REGION_BASE + SHARED_REGION_SIZE,
1127 "shared region starts above BASE+SIZE");
1128
1129 /*
1130 * Step 1 - check that one can not get write access to a read-only
1131 * mapping in the shared region.
1132 */
1133 size = 0;
1134 for (address = SHARED_REGION_BASE;
1135 address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1136 address += size) {
1137 size = 0;
1138 depth = 99;
1139 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1140 kr = mach_vm_region_recurse(mach_task_self(),
1141 &address,
1142 &size,
1143 &depth,
1144 (vm_region_recurse_info_t)&info,
1145 &count);
1146 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1147 if (kr == KERN_INVALID_ADDRESS) {
1148 T_SKIP("could not find read-only nested mapping");
1149 T_END;
1150 }
1151 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1152 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1153 address, address + size, depth,
1154 prot_str[info.protection],
1155 prot_str[info.max_protection],
1156 share_mode_str[info.share_mode],
1157 info.object_id);
1158 if (depth > 0 &&
1159 (info.protection == VM_PROT_READ) &&
1160 (info.max_protection == VM_PROT_READ)) {
1161 /* nested and read-only: bingo! */
1162 break;
1163 }
1164 }
1165 if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1166 T_SKIP("could not find read-only nested mapping");
1167 T_END;
1168 }
1169
1170 /* test vm_remap() of RO */
1171 before = *(uint32_t *)(uintptr_t)address;
1172 remap_address = 0;
1173 remap_size = size;
1174 kr = mach_vm_remap(mach_task_self(),
1175 &remap_address,
1176 remap_size,
1177 0,
1178 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1179 mach_task_self(),
1180 address,
1181 FALSE,
1182 &cur_prot,
1183 &max_prot,
1184 VM_INHERIT_DEFAULT);
1185 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1186 // T_QUIET; T_ASSERT_EQ(cur_prot, VM_PROT_READ, "cur_prot is read-only");
1187 // T_QUIET; T_ASSERT_EQ(max_prot, VM_PROT_READ, "max_prot is read-only");
1188 /* check that region is still nested */
1189 tmp_address = address;
1190 tmp_size = 0;
1191 depth = 99;
1192 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1193 kr = mach_vm_region_recurse(mach_task_self(),
1194 &tmp_address,
1195 &tmp_size,
1196 &depth,
1197 (vm_region_recurse_info_t)&info,
1198 &count);
1199 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1200 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1201 tmp_address, tmp_address + tmp_size, depth,
1202 prot_str[info.protection],
1203 prot_str[info.max_protection],
1204 share_mode_str[info.share_mode],
1205 info.object_id);
1206 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1207 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1208 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1209 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1210 // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1211 /* check that new mapping is read-only */
1212 tmp_address = remap_address;
1213 tmp_size = 0;
1214 depth = 99;
1215 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1216 kr = mach_vm_region_recurse(mach_task_self(),
1217 &tmp_address,
1218 &tmp_size,
1219 &depth,
1220 (vm_region_recurse_info_t)&info,
1221 &count);
1222 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1223 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1224 tmp_address, tmp_address + tmp_size, depth,
1225 prot_str[info.protection],
1226 prot_str[info.max_protection],
1227 share_mode_str[info.share_mode],
1228 info.object_id);
1229 T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1230 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1231 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1232 // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1233 remap = *(uint32_t *)(uintptr_t)remap_address;
1234 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1235 // this would crash if actually read-only:
1236 // *(uint32_t *)(uintptr_t)remap_address = before + 1;
1237 after = *(uint32_t *)(uintptr_t)address;
1238 T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1239 // *(uint32_t *)(uintptr_t)remap_address = before;
1240 if (before != after) {
1241 T_FAIL("vm_remap() bypassed copy-on-write");
1242 } else {
1243 T_PASS("vm_remap() did not bypass copy-on-write");
1244 }
1245 /* cleanup */
1246 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1247 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1248 T_PASS("vm_remap() read-only");
1249
1250 #if defined(VM_MEMORY_ROSETTA)
1251 if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1252 T_PASS("vm_remap_new() is not present");
1253 goto skip_vm_remap_new_ro;
1254 }
1255 /* test vm_remap_new() of RO */
1256 before = *(uint32_t *)(uintptr_t)address;
1257 remap_address = 0;
1258 remap_size = size;
1259 cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1260 max_prot = VM_PROT_READ | VM_PROT_WRITE;
1261 kr = mach_vm_remap_new(mach_task_self(),
1262 &remap_address,
1263 remap_size,
1264 0,
1265 VM_FLAGS_ANYWHERE,
1266 mach_task_self(),
1267 address,
1268 FALSE,
1269 &cur_prot,
1270 &max_prot,
1271 VM_INHERIT_DEFAULT);
1272 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1273 if (kr == KERN_PROTECTION_FAILURE) {
1274 /* wrong but not a security issue... */
1275 goto skip_vm_remap_new_ro;
1276 }
1277 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1278 remap = *(uint32_t *)(uintptr_t)remap_address;
1279 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1280 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1281 after = *(uint32_t *)(uintptr_t)address;
1282 T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1283 *(uint32_t *)(uintptr_t)remap_address = before;
1284 if (before != after) {
1285 T_FAIL("vm_remap_new() bypassed copy-on-write");
1286 } else {
1287 T_PASS("vm_remap_new() did not bypass copy-on-write");
1288 }
1289 /* check that region is still nested */
1290 tmp_address = address;
1291 tmp_size = 0;
1292 depth = 99;
1293 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1294 kr = mach_vm_region_recurse(mach_task_self(),
1295 &tmp_address,
1296 &tmp_size,
1297 &depth,
1298 (vm_region_recurse_info_t)&info,
1299 &count);
1300 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1301 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1302 tmp_address, tmp_address + tmp_size, depth,
1303 prot_str[info.protection],
1304 prot_str[info.max_protection],
1305 share_mode_str[info.share_mode],
1306 info.object_id);
1307 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1308 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1309 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1310 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1311 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1312 T_PASS("vm_remap_new() read-only");
1313 skip_vm_remap_new_ro:
1314 #else /* defined(VM_MEMORY_ROSETTA) */
1315 /* pre-BigSur SDK: no vm_remap_new() */
1316 T_LOG("No vm_remap_new() to test");
1317 #endif /* defined(VM_MEMORY_ROSETTA) */
1318
1319 /* test mach_make_memory_entry_64(VM_SHARE) of RO */
1320 before = *(uint32_t *)(uintptr_t)address;
1321 remap_size = size;
1322 mem_entry = MACH_PORT_NULL;
1323 kr = mach_make_memory_entry_64(mach_task_self(),
1324 &remap_size,
1325 address,
1326 MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1327 &mem_entry,
1328 MACH_PORT_NULL);
1329 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1330 if (kr == KERN_PROTECTION_FAILURE) {
1331 /* wrong but not a security issue... */
1332 goto skip_mem_entry_vm_share_ro;
1333 }
1334 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1335 remap_address = 0;
1336 kr = mach_vm_map(mach_task_self(),
1337 &remap_address,
1338 remap_size,
1339 0, /* mask */
1340 VM_FLAGS_ANYWHERE,
1341 mem_entry,
1342 0, /* offset */
1343 FALSE, /* copy */
1344 VM_PROT_READ | VM_PROT_WRITE,
1345 VM_PROT_READ | VM_PROT_WRITE,
1346 VM_INHERIT_DEFAULT);
1347 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1348 remap = *(uint32_t *)(uintptr_t)remap_address;
1349 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1350 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1351 after = *(uint32_t *)(uintptr_t)address;
1352 T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1353 *(uint32_t *)(uintptr_t)remap_address = before;
1354 if (before != after) {
1355 T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1356 } else {
1357 T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1358 }
1359 /* check that region is still nested */
1360 tmp_address = address;
1361 tmp_size = 0;
1362 depth = 99;
1363 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1364 kr = mach_vm_region_recurse(mach_task_self(),
1365 &tmp_address,
1366 &tmp_size,
1367 &depth,
1368 (vm_region_recurse_info_t)&info,
1369 &count);
1370 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1371 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1372 tmp_address, tmp_address + tmp_size, depth,
1373 prot_str[info.protection],
1374 prot_str[info.max_protection],
1375 share_mode_str[info.share_mode],
1376 info.object_id);
1377 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1378 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1379 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1380 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1381 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1382 /* check that new mapping is a copy */
1383 tmp_address = remap_address;
1384 tmp_size = 0;
1385 depth = 99;
1386 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1387 kr = mach_vm_region_recurse(mach_task_self(),
1388 &tmp_address,
1389 &tmp_size,
1390 &depth,
1391 (vm_region_recurse_info_t)&info,
1392 &count);
1393 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1394 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1395 tmp_address, tmp_address + tmp_size, depth,
1396 prot_str[info.protection],
1397 prot_str[info.max_protection],
1398 share_mode_str[info.share_mode],
1399 info.object_id);
1400 T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1401 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1402 T_QUIET; T_ASSERT_EQ(depth, 0, "new mapping is unnested");
1403 // T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1404 // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1405 /* cleanup */
1406 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1407 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1408 T_PASS("mem_entry(VM_SHARE) read-only");
1409 skip_mem_entry_vm_share_ro:
1410
1411 /* test mach_make_memory_entry_64() of RO */
1412 before = *(uint32_t *)(uintptr_t)address;
1413 remap_size = size;
1414 mem_entry = MACH_PORT_NULL;
1415 kr = mach_make_memory_entry_64(mach_task_self(),
1416 &remap_size,
1417 address,
1418 VM_PROT_READ | VM_PROT_WRITE,
1419 &mem_entry,
1420 MACH_PORT_NULL);
1421 T_QUIET; T_ASSERT_EQ(kr, KERN_PROTECTION_FAILURE, "mach_make_memory_entry_64()");
1422 /* check that region is still nested */
1423 tmp_address = address;
1424 tmp_size = 0;
1425 depth = 99;
1426 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1427 kr = mach_vm_region_recurse(mach_task_self(),
1428 &tmp_address,
1429 &tmp_size,
1430 &depth,
1431 (vm_region_recurse_info_t)&info,
1432 &count);
1433 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1434 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1435 tmp_address, tmp_address + tmp_size, depth,
1436 prot_str[info.protection],
1437 prot_str[info.max_protection],
1438 share_mode_str[info.share_mode],
1439 info.object_id);
1440 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1441 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1442 // T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1443 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1444 if (depth > 0) {
1445 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1446 }
1447 T_PASS("mem_entry() read-only");
1448
1449
1450 /*
1451 * Step 2 - check that one can not share write access with a writable
1452 * mapping in the shared region.
1453 */
1454 size = 0;
1455 for (address = SHARED_REGION_BASE;
1456 address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1457 address += size) {
1458 size = 0;
1459 depth = 99;
1460 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1461 kr = mach_vm_region_recurse(mach_task_self(),
1462 &address,
1463 &size,
1464 &depth,
1465 (vm_region_recurse_info_t)&info,
1466 &count);
1467 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1468 if (kr == KERN_INVALID_ADDRESS) {
1469 T_SKIP("could not find writable nested mapping");
1470 T_END;
1471 }
1472 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1473 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1474 address, address + size, depth,
1475 prot_str[info.protection],
1476 prot_str[info.max_protection],
1477 share_mode_str[info.share_mode],
1478 info.object_id);
1479 if (depth > 0 && (info.protection & VM_PROT_WRITE)) {
1480 /* nested and writable: bingo! */
1481 break;
1482 }
1483 }
1484 if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1485 T_SKIP("could not find writable nested mapping");
1486 T_END;
1487 }
1488
1489 /* test vm_remap() of RW */
1490 before = *(uint32_t *)(uintptr_t)address;
1491 remap_address = 0;
1492 remap_size = size;
1493 kr = mach_vm_remap(mach_task_self(),
1494 &remap_address,
1495 remap_size,
1496 0,
1497 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1498 mach_task_self(),
1499 address,
1500 FALSE,
1501 &cur_prot,
1502 &max_prot,
1503 VM_INHERIT_DEFAULT);
1504 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1505 if (!(cur_prot & VM_PROT_WRITE)) {
1506 T_LOG("vm_remap(): 0x%llx not writable %s/%s",
1507 remap_address, prot_str[cur_prot], prot_str[max_prot]);
1508 T_ASSERT_FAIL("vm_remap() remapping not writable");
1509 }
1510 remap = *(uint32_t *)(uintptr_t)remap_address;
1511 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1512 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1513 after = *(uint32_t *)(uintptr_t)address;
1514 T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1515 *(uint32_t *)(uintptr_t)remap_address = before;
1516 if (before != after) {
1517 T_FAIL("vm_remap() bypassed copy-on-write");
1518 } else {
1519 T_PASS("vm_remap() did not bypass copy-on-write");
1520 }
1521 /* check that region is still nested */
1522 tmp_address = address;
1523 tmp_size = 0;
1524 depth = 99;
1525 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1526 kr = mach_vm_region_recurse(mach_task_self(),
1527 &tmp_address,
1528 &tmp_size,
1529 &depth,
1530 (vm_region_recurse_info_t)&info,
1531 &count);
1532 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1533 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1534 tmp_address, tmp_address + tmp_size, depth,
1535 prot_str[info.protection],
1536 prot_str[info.max_protection],
1537 share_mode_str[info.share_mode],
1538 info.object_id);
1539 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1540 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1541 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1542 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1543 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1544 /* cleanup */
1545 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1546 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1547
1548 #if defined(VM_MEMORY_ROSETTA)
1549 if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1550 T_PASS("vm_remap_new() is not present");
1551 goto skip_vm_remap_new_rw;
1552 }
1553 /* test vm_remap_new() of RW */
1554 before = *(uint32_t *)(uintptr_t)address;
1555 remap_address = 0;
1556 remap_size = size;
1557 cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1558 max_prot = VM_PROT_READ | VM_PROT_WRITE;
1559 kr = mach_vm_remap_new(mach_task_self(),
1560 &remap_address,
1561 remap_size,
1562 0,
1563 VM_FLAGS_ANYWHERE,
1564 mach_task_self(),
1565 address,
1566 FALSE,
1567 &cur_prot,
1568 &max_prot,
1569 VM_INHERIT_DEFAULT);
1570 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1571 if (kr == KERN_PROTECTION_FAILURE) {
1572 /* wrong but not a security issue... */
1573 goto skip_vm_remap_new_rw;
1574 }
1575 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1576 if (!(cur_prot & VM_PROT_WRITE)) {
1577 T_LOG("vm_remap_new(): 0x%llx not writable %s/%s",
1578 remap_address, prot_str[cur_prot], prot_str[max_prot]);
1579 T_ASSERT_FAIL("vm_remap_new() remapping not writable");
1580 }
1581 remap = *(uint32_t *)(uintptr_t)remap_address;
1582 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1583 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1584 after = *(uint32_t *)(uintptr_t)address;
1585 T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1586 *(uint32_t *)(uintptr_t)remap_address = before;
1587 if (before != after) {
1588 T_FAIL("vm_remap_new() bypassed copy-on-write");
1589 } else {
1590 T_PASS("vm_remap_new() did not bypass copy-on-write");
1591 }
1592 /* check that region is still nested */
1593 tmp_address = address;
1594 tmp_size = 0;
1595 depth = 99;
1596 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1597 kr = mach_vm_region_recurse(mach_task_self(),
1598 &tmp_address,
1599 &tmp_size,
1600 &depth,
1601 (vm_region_recurse_info_t)&info,
1602 &count);
1603 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1604 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1605 tmp_address, tmp_address + tmp_size, depth,
1606 prot_str[info.protection],
1607 prot_str[info.max_protection],
1608 share_mode_str[info.share_mode],
1609 info.object_id);
1610 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1611 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1612 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1613 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1614 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1615 /* cleanup */
1616 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1617 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1618 skip_vm_remap_new_rw:
1619 #else /* defined(VM_MEMORY_ROSETTA) */
1620 /* pre-BigSur SDK: no vm_remap_new() */
1621 T_LOG("No vm_remap_new() to test");
1622 #endif /* defined(VM_MEMORY_ROSETTA) */
1623
1624 /* test mach_make_memory_entry_64(VM_SHARE) of RW */
1625 before = *(uint32_t *)(uintptr_t)address;
1626 remap_size = size;
1627 mem_entry = MACH_PORT_NULL;
1628 kr = mach_make_memory_entry_64(mach_task_self(),
1629 &remap_size,
1630 address,
1631 MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1632 &mem_entry,
1633 MACH_PORT_NULL);
1634 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1635 if (kr == KERN_PROTECTION_FAILURE) {
1636 /* wrong but not a security issue... */
1637 goto skip_mem_entry_vm_share_rw;
1638 }
1639 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1640 T_QUIET; T_ASSERT_EQ(remap_size, size, "mem_entry(VM_SHARE) should cover whole mapping");
1641 // T_LOG("AFTER MAKE_MEM_ENTRY(VM_SHARE) 0x%llx...", address); fflush(stdout); fflush(stderr); getchar();
1642 remap_address = 0;
1643 kr = mach_vm_map(mach_task_self(),
1644 &remap_address,
1645 remap_size,
1646 0, /* mask */
1647 VM_FLAGS_ANYWHERE,
1648 mem_entry,
1649 0, /* offset */
1650 FALSE, /* copy */
1651 VM_PROT_READ | VM_PROT_WRITE,
1652 VM_PROT_READ | VM_PROT_WRITE,
1653 VM_INHERIT_DEFAULT);
1654 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1655 remap = *(uint32_t *)(uintptr_t)remap_address;
1656 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1657 // T_LOG("AFTER VM_MAP 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1658 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1659 // T_LOG("AFTER WRITE 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1660 after = *(uint32_t *)(uintptr_t)address;
1661 T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1662 *(uint32_t *)(uintptr_t)remap_address = before;
1663 if (before != after) {
1664 T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1665 } else {
1666 T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1667 }
1668 /* check that region is still nested */
1669 tmp_address = address;
1670 tmp_size = 0;
1671 depth = 99;
1672 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1673 kr = mach_vm_region_recurse(mach_task_self(),
1674 &tmp_address,
1675 &tmp_size,
1676 &depth,
1677 (vm_region_recurse_info_t)&info,
1678 &count);
1679 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1680 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1681 tmp_address, tmp_address + tmp_size, depth,
1682 prot_str[info.protection],
1683 prot_str[info.max_protection],
1684 share_mode_str[info.share_mode],
1685 info.object_id);
1686 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1687 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1688 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1689 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1690 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1691 /* cleanup */
1692 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1693 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1694 mach_port_deallocate(mach_task_self(), mem_entry);
1695 skip_mem_entry_vm_share_rw:
1696
1697 /* test mach_make_memory_entry_64() of RW */
1698 before = *(uint32_t *)(uintptr_t)address;
1699 remap_size = size;
1700 mem_entry = MACH_PORT_NULL;
1701 kr = mach_make_memory_entry_64(mach_task_self(),
1702 &remap_size,
1703 address,
1704 VM_PROT_READ | VM_PROT_WRITE,
1705 &mem_entry,
1706 MACH_PORT_NULL);
1707 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64()");
1708 remap_address = 0;
1709 kr = mach_vm_map(mach_task_self(),
1710 &remap_address,
1711 remap_size,
1712 0, /* mask */
1713 VM_FLAGS_ANYWHERE,
1714 mem_entry,
1715 0, /* offset */
1716 FALSE, /* copy */
1717 VM_PROT_READ | VM_PROT_WRITE,
1718 VM_PROT_READ | VM_PROT_WRITE,
1719 VM_INHERIT_DEFAULT);
1720 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1721 remap = *(uint32_t *)(uintptr_t)remap_address;
1722 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1723 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1724 after = *(uint32_t *)(uintptr_t)address;
1725 T_LOG("mem_entry(): 0x%llx 0x%x -> 0x%x", address, before, after);
1726 *(uint32_t *)(uintptr_t)remap_address = before;
1727 /* check that region is no longer nested */
1728 tmp_address = address;
1729 tmp_size = 0;
1730 depth = 99;
1731 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1732 kr = mach_vm_region_recurse(mach_task_self(),
1733 &tmp_address,
1734 &tmp_size,
1735 &depth,
1736 (vm_region_recurse_info_t)&info,
1737 &count);
1738 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1739 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1740 tmp_address, tmp_address + tmp_size, depth,
1741 prot_str[info.protection],
1742 prot_str[info.max_protection],
1743 share_mode_str[info.share_mode],
1744 info.object_id);
1745 if (before != after) {
1746 if (depth == 0) {
1747 T_PASS("mem_entry() honored copy-on-write");
1748 } else {
1749 T_FAIL("mem_entry() did not trigger copy-on_write");
1750 }
1751 } else {
1752 T_FAIL("mem_entry() did not honor copy-on-write");
1753 }
1754 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1755 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1756 T_QUIET; T_ASSERT_EQ(depth, 0, "no longer nested");
1757 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1758 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1759 /* cleanup */
1760 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1761 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1762 mach_port_deallocate(mach_task_self(), mem_entry);
1763 }
1764
1765 T_DECL(copyoverwrite_submap_protection, "test copywrite vm region submap \
1766 protection", T_META_ALL_VALID_ARCHS(true))
1767 {
1768 kern_return_t kr;
1769 mach_vm_address_t vmaddr;
1770 mach_vm_size_t vmsize;
1771 natural_t depth;
1772 vm_region_submap_short_info_data_64_t region_info;
1773 mach_msg_type_number_t region_info_count;
1774
1775 for (vmaddr = SHARED_REGION_BASE;
1776 vmaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1777 vmaddr += vmsize) {
1778 depth = 99;
1779 region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1780 kr = mach_vm_region_recurse(mach_task_self(),
1781 &vmaddr,
1782 &vmsize,
1783 &depth,
1784 (vm_region_info_t) ®ion_info,
1785 ®ion_info_count);
1786 if (kr == KERN_INVALID_ADDRESS) {
1787 break;
1788 }
1789 T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse(0x%llx)", vmaddr);
1790 T_ASSERT_EQ(region_info_count,
1791 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
1792 "vm_region_recurse(0x%llx) count = %d expected %d",
1793 vmaddr, region_info_count,
1794 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
1795
1796 T_LOG("--> region: vmaddr 0x%llx depth %d prot 0x%x/0x%x",
1797 vmaddr, depth, region_info.protection,
1798 region_info.max_protection);
1799 if (depth == 0) {
1800 /* not a submap mapping: next mapping */
1801 continue;
1802 }
1803 if (vmaddr >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1804 break;
1805 }
1806 kr = mach_vm_copy(mach_task_self(),
1807 vmaddr,
1808 vmsize,
1809 vmaddr);
1810 if (kr == KERN_PROTECTION_FAILURE ||
1811 kr == KERN_INVALID_ADDRESS) {
1812 T_PASS("vm_copy(0x%llx,0x%llx) expected prot error 0x%x (%s)",
1813 vmaddr, vmsize, kr, mach_error_string(kr));
1814 continue;
1815 }
1816 T_ASSERT_MACH_SUCCESS(kr, "vm_copy(0x%llx,0x%llx) prot 0x%x",
1817 vmaddr, vmsize, region_info.protection);
1818 depth = 0;
1819 region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1820 kr = mach_vm_region_recurse(mach_task_self(),
1821 &vmaddr,
1822 &vmsize,
1823 &depth,
1824 (vm_region_info_t) ®ion_info,
1825 ®ion_info_count);
1826 T_ASSERT_MACH_SUCCESS(kr, "m_region_recurse(0x%llx)", vmaddr);
1827 T_ASSERT_EQ(region_info_count,
1828 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
1829 "vm_region_recurse() count = %d expected %d",
1830 region_info_count, VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
1831
1832 T_ASSERT_EQ(depth, 0, "vm_region_recurse(0x%llx): depth = %d expected 0",
1833 vmaddr, depth);
1834 T_ASSERT_EQ((region_info.protection & VM_PROT_EXECUTE),
1835 0, "vm_region_recurse(0x%llx): prot 0x%x",
1836 vmaddr, region_info.protection);
1837 }
1838 }
1839
1840 T_DECL(wire_text, "test wired text for rdar://problem/16783546 Wiring code in \
1841 the shared region triggers code-signing violations",
1842 T_META_ALL_VALID_ARCHS(true))
1843 {
1844 uint32_t *addr, before, after;
1845 int retval;
1846 int saved_errno;
1847 kern_return_t kr;
1848 vm_address_t map_addr, remap_addr;
1849 vm_prot_t curprot, maxprot;
1850
1851 addr = (uint32_t *)&printf;
1852 #if __has_feature(ptrauth_calls)
1853 map_addr = (vm_address_t)(uintptr_t)ptrauth_strip(addr, ptrauth_key_function_pointer);
1854 #else /* __has_feature(ptrauth_calls) */
1855 map_addr = (vm_address_t)(uintptr_t)addr;
1856 #endif /* __has_feature(ptrauth_calls) */
1857 remap_addr = 0;
1858 kr = vm_remap(mach_task_self(), &remap_addr, 4096,
1859 0, /* mask */
1860 VM_FLAGS_ANYWHERE,
1861 mach_task_self(), map_addr,
1862 FALSE, /* copy */
1863 &curprot, &maxprot,
1864 VM_INHERIT_DEFAULT);
1865 T_ASSERT_EQ(kr, KERN_SUCCESS, "vm_remap error 0x%x (%s)",
1866 kr, mach_error_string(kr));
1867 before = *addr;
1868 retval = mlock(addr, 4096);
1869 after = *addr;
1870 if (retval != 0) {
1871 saved_errno = errno;
1872 T_ASSERT_EQ(saved_errno, EACCES, "wire shared text error %d (%s), expected: %d",
1873 saved_errno, strerror(saved_errno), EACCES);
1874 } else if (after != before) {
1875 T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", addr, before, after);
1876 } else {
1877 T_PASS("wire shared text");
1878 }
1879
1880 addr = (uint32_t *) &fprintf;
1881 before = *addr;
1882 retval = mlock(addr, 4096);
1883 after = *addr;
1884 if (retval != 0) {
1885 saved_errno = errno;
1886 T_ASSERT_EQ(saved_errno, EACCES, "wire shared text error %d (%s), expected: %d",
1887 saved_errno, strerror(saved_errno), EACCES);
1888 } else if (after != before) {
1889 T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", addr, before, after);
1890 } else {
1891 T_PASS("wire shared text");
1892 }
1893
1894 addr = (uint32_t *) &testmain_wire_text;
1895 before = *addr;
1896 retval = mlock(addr, 4096);
1897 after = *addr;
1898 if (retval != 0) {
1899 saved_errno = errno;
1900 T_ASSERT_EQ(saved_errno, EACCES, "wire text error return error %d (%s)",
1901 saved_errno, strerror(saved_errno));
1902 } else if (after != before) {
1903 T_ASSERT_FAIL("text changed by wiring at %p 0x%x -> 0x%x", addr, before, after);
1904 } else {
1905 T_PASS("wire text");
1906 }
1907 }
1908
1909 T_DECL(remap_comm_page, "test remapping of the commpage - rdar://93177124",
1910 T_META_ALL_VALID_ARCHS(true))
1911 {
1912 kern_return_t kr;
1913 mach_vm_address_t commpage_addr, remap_addr;
1914 mach_vm_size_t vmsize;
1915 vm_prot_t curprot, maxprot;
1916
1917 #if __arm__
1918 commpage_addr = 0xFFFF4000ULL;
1919 #elif __arm64__
1920 commpage_addr = 0x0000000FFFFFC000ULL;
1921 #elif __x86_64__
1922 commpage_addr = 0x00007FFFFFE00000ULL;
1923 #else
1924 T_FAIL("unknown commpage address for this architecture");
1925 #endif
1926
1927 T_LOG("Remapping commpage from 0x%llx", commpage_addr);
1928 vmsize = vm_kernel_page_size;
1929 remap_addr = 0;
1930 kr = mach_vm_remap(mach_task_self(),
1931 &remap_addr,
1932 vmsize,
1933 0, /* mask */
1934 VM_FLAGS_ANYWHERE,
1935 mach_task_self(),
1936 commpage_addr,
1937 TRUE, /* copy */
1938 &curprot,
1939 &maxprot,
1940 VM_INHERIT_DEFAULT);
1941 if (kr == KERN_INVALID_ADDRESS) {
1942 T_SKIP("No mapping found at 0x%llx\n", commpage_addr);
1943 return;
1944 }
1945 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() of commpage from 0x%llx", commpage_addr);
1946 }
1947