1 /* Mach vm map miscellaneous unit tests
2 *
3 * This test program serves to be a regression test suite for legacy
4 * vm issues, ideally each test will be linked to a radar number and
5 * perform a set of certain validations.
6 *
7 */
8 #include <darwintest.h>
9
10 #include <dlfcn.h>
11 #include <errno.h>
12 #include <ptrauth.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <time.h>
17
18 #include <sys/mman.h>
19
20 #include <mach/mach_error.h>
21 #include <mach/mach_init.h>
22 #include <mach/mach_port.h>
23 #include <mach/mach_vm.h>
24 #include <mach/vm_map.h>
25 #include <mach/task.h>
26 #include <mach/task_info.h>
27 #include <mach/shared_region.h>
28 #include <machine/cpu_capabilities.h>
29
30 T_GLOBAL_META(
31 T_META_NAMESPACE("xnu.vm"),
32 T_META_RADAR_COMPONENT_NAME("xnu"),
33 T_META_RADAR_COMPONENT_VERSION("VM"),
34 T_META_RUN_CONCURRENTLY(true));
35
36 static void
test_memory_entry_tagging(int override_tag)37 test_memory_entry_tagging(int override_tag)
38 {
39 int pass;
40 int do_copy;
41 kern_return_t kr;
42 mach_vm_address_t vmaddr_orig, vmaddr_shared, vmaddr_copied;
43 mach_vm_size_t vmsize_orig, vmsize_shared, vmsize_copied;
44 mach_vm_address_t *vmaddr_ptr;
45 mach_vm_size_t *vmsize_ptr;
46 mach_vm_address_t vmaddr_chunk;
47 mach_vm_size_t vmsize_chunk;
48 mach_vm_offset_t vmoff;
49 mach_port_t mem_entry_copied, mem_entry_shared;
50 mach_port_t *mem_entry_ptr;
51 int i;
52 vm_region_submap_short_info_data_64_t ri;
53 mach_msg_type_number_t ri_count;
54 unsigned int depth;
55 int vm_flags;
56 int expected_tag;
57
58 vmaddr_copied = 0;
59 vmaddr_shared = 0;
60 vmsize_copied = 0;
61 vmsize_shared = 0;
62 vmaddr_chunk = 0;
63 vmsize_chunk = 16 * 1024;
64 vmaddr_orig = 0;
65 vmsize_orig = 3 * vmsize_chunk;
66 mem_entry_copied = MACH_PORT_NULL;
67 mem_entry_shared = MACH_PORT_NULL;
68 pass = 0;
69
70 vmaddr_orig = 0;
71 kr = mach_vm_allocate(mach_task_self(),
72 &vmaddr_orig,
73 vmsize_orig,
74 VM_FLAGS_ANYWHERE);
75 T_QUIET;
76 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
77 override_tag, vmsize_orig);
78 if (T_RESULT == T_RESULT_FAIL) {
79 goto done;
80 }
81
82 for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
83 vmaddr_chunk = vmaddr_orig + (i * vmsize_chunk);
84 kr = mach_vm_allocate(mach_task_self(),
85 &vmaddr_chunk,
86 vmsize_chunk,
87 (VM_FLAGS_FIXED |
88 VM_FLAGS_OVERWRITE |
89 VM_MAKE_TAG(100 + i)));
90 T_QUIET;
91 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
92 override_tag, vmsize_chunk);
93 if (T_RESULT == T_RESULT_FAIL) {
94 goto done;
95 }
96 }
97
98 for (vmoff = 0;
99 vmoff < vmsize_orig;
100 vmoff += PAGE_SIZE) {
101 *((unsigned char *)(uintptr_t)(vmaddr_orig + vmoff)) = 'x';
102 }
103
104 do_copy = time(NULL) & 1;
105 again:
106 *((unsigned char *)(uintptr_t)vmaddr_orig) = 'x';
107 if (do_copy) {
108 mem_entry_ptr = &mem_entry_copied;
109 vmsize_copied = vmsize_orig;
110 vmsize_ptr = &vmsize_copied;
111 vmaddr_copied = 0;
112 vmaddr_ptr = &vmaddr_copied;
113 vm_flags = MAP_MEM_VM_COPY;
114 } else {
115 mem_entry_ptr = &mem_entry_shared;
116 vmsize_shared = vmsize_orig;
117 vmsize_ptr = &vmsize_shared;
118 vmaddr_shared = 0;
119 vmaddr_ptr = &vmaddr_shared;
120 vm_flags = MAP_MEM_VM_SHARE;
121 }
122 kr = mach_make_memory_entry_64(mach_task_self(),
123 vmsize_ptr,
124 vmaddr_orig, /* offset */
125 (vm_flags |
126 VM_PROT_READ | VM_PROT_WRITE),
127 mem_entry_ptr,
128 MACH_PORT_NULL);
129 T_QUIET;
130 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_make_memory_entry()",
131 override_tag, do_copy);
132 if (T_RESULT == T_RESULT_FAIL) {
133 goto done;
134 }
135 T_QUIET;
136 T_EXPECT_EQ(*vmsize_ptr, vmsize_orig, "[override_tag:%d][do_copy:%d] vmsize (0x%llx) != vmsize_orig (0x%llx)",
137 override_tag, do_copy, (uint64_t) *vmsize_ptr, (uint64_t) vmsize_orig);
138 if (T_RESULT == T_RESULT_FAIL) {
139 goto done;
140 }
141 T_QUIET;
142 T_EXPECT_NOTNULL(*mem_entry_ptr, "[override_tag:%d][do_copy:%d] mem_entry == 0x%x",
143 override_tag, do_copy, *mem_entry_ptr);
144 if (T_RESULT == T_RESULT_FAIL) {
145 goto done;
146 }
147
148 *vmaddr_ptr = 0;
149 if (override_tag) {
150 vm_flags = VM_MAKE_TAG(200);
151 } else {
152 vm_flags = 0;
153 }
154 kr = mach_vm_map(mach_task_self(),
155 vmaddr_ptr,
156 vmsize_orig,
157 0, /* mask */
158 vm_flags | VM_FLAGS_ANYWHERE,
159 *mem_entry_ptr,
160 0, /* offset */
161 FALSE, /* copy */
162 VM_PROT_READ | VM_PROT_WRITE,
163 VM_PROT_READ | VM_PROT_WRITE,
164 VM_INHERIT_DEFAULT);
165 T_QUIET;
166 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_map()",
167 override_tag, do_copy);
168 if (T_RESULT == T_RESULT_FAIL) {
169 goto done;
170 }
171
172 *((unsigned char *)(uintptr_t)vmaddr_orig) = 'X';
173 if (*(unsigned char *)(uintptr_t)*vmaddr_ptr == 'X') {
174 T_QUIET;
175 T_EXPECT_EQ(do_copy, 0, "[override_tag:%d][do_copy:%d] memory shared instead of copied",
176 override_tag, do_copy);
177 if (T_RESULT == T_RESULT_FAIL) {
178 goto done;
179 }
180 } else {
181 T_QUIET;
182 T_EXPECT_NE(do_copy, 0, "[override_tag:%d][do_copy:%d] memory copied instead of shared",
183 override_tag, do_copy);
184 if (T_RESULT == T_RESULT_FAIL) {
185 goto done;
186 }
187 }
188
189 for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
190 mach_vm_address_t vmaddr_info;
191 mach_vm_size_t vmsize_info;
192
193 vmaddr_info = *vmaddr_ptr + (i * vmsize_chunk);
194 vmsize_info = 0;
195 depth = 1;
196 ri_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
197 kr = mach_vm_region_recurse(mach_task_self(),
198 &vmaddr_info,
199 &vmsize_info,
200 &depth,
201 (vm_region_recurse_info_t) &ri,
202 &ri_count);
203 T_QUIET;
204 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx)",
205 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk);
206 if (T_RESULT == T_RESULT_FAIL) {
207 goto done;
208 }
209 T_QUIET;
210 T_EXPECT_EQ(vmaddr_info, *vmaddr_ptr + (i * vmsize_chunk), "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned addr 0x%llx",
211 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmaddr_info);
212 if (T_RESULT == T_RESULT_FAIL) {
213 goto done;
214 }
215 T_QUIET;
216 T_EXPECT_EQ(vmsize_info, vmsize_chunk, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned size 0x%llx expected 0x%llx",
217 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmsize_info, vmsize_chunk);
218 if (T_RESULT == T_RESULT_FAIL) {
219 goto done;
220 }
221 if (override_tag) {
222 expected_tag = 200;
223 } else {
224 expected_tag = 100 + i;
225 }
226 T_QUIET;
227 T_EXPECT_EQ(ri.user_tag, expected_tag, "[override_tag:%d][do_copy:%d] i=%d tag=%d expected %d",
228 override_tag, do_copy, i, ri.user_tag, expected_tag);
229 if (T_RESULT == T_RESULT_FAIL) {
230 goto done;
231 }
232 }
233
234 if (++pass < 2) {
235 do_copy = !do_copy;
236 goto again;
237 }
238
239 done:
240 if (vmaddr_orig != 0) {
241 mach_vm_deallocate(mach_task_self(),
242 vmaddr_orig,
243 vmsize_orig);
244 vmaddr_orig = 0;
245 vmsize_orig = 0;
246 }
247 if (vmaddr_copied != 0) {
248 mach_vm_deallocate(mach_task_self(),
249 vmaddr_copied,
250 vmsize_copied);
251 vmaddr_copied = 0;
252 vmsize_copied = 0;
253 }
254 if (vmaddr_shared != 0) {
255 mach_vm_deallocate(mach_task_self(),
256 vmaddr_shared,
257 vmsize_shared);
258 vmaddr_shared = 0;
259 vmsize_shared = 0;
260 }
261 if (mem_entry_copied != MACH_PORT_NULL) {
262 mach_port_deallocate(mach_task_self(), mem_entry_copied);
263 mem_entry_copied = MACH_PORT_NULL;
264 }
265 if (mem_entry_shared != MACH_PORT_NULL) {
266 mach_port_deallocate(mach_task_self(), mem_entry_shared);
267 mem_entry_shared = MACH_PORT_NULL;
268 }
269
270 return;
271 }
272
273 static void
test_map_memory_entry(void)274 test_map_memory_entry(void)
275 {
276 kern_return_t kr;
277 mach_vm_address_t vmaddr1, vmaddr2;
278 mach_vm_size_t vmsize1, vmsize2;
279 mach_port_t mem_entry;
280 unsigned char *cp1, *cp2;
281
282 vmaddr1 = 0;
283 vmsize1 = 0;
284 vmaddr2 = 0;
285 vmsize2 = 0;
286 mem_entry = MACH_PORT_NULL;
287
288 vmsize1 = 1;
289 vmaddr1 = 0;
290 kr = mach_vm_allocate(mach_task_self(),
291 &vmaddr1,
292 vmsize1,
293 VM_FLAGS_ANYWHERE);
294 T_QUIET;
295 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate(%lld)", vmsize1);
296 if (T_RESULT == T_RESULT_FAIL) {
297 goto done;
298 }
299
300 cp1 = (unsigned char *)(uintptr_t)vmaddr1;
301 *cp1 = '1';
302
303 vmsize2 = 1;
304 mem_entry = MACH_PORT_NULL;
305 kr = mach_make_memory_entry_64(mach_task_self(),
306 &vmsize2,
307 vmaddr1, /* offset */
308 (MAP_MEM_VM_COPY |
309 VM_PROT_READ | VM_PROT_WRITE),
310 &mem_entry,
311 MACH_PORT_NULL);
312 T_QUIET;
313 T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry()");
314 if (T_RESULT == T_RESULT_FAIL) {
315 goto done;
316 }
317 T_QUIET;
318 T_EXPECT_GE(vmsize2, vmsize1, "vmsize2 (0x%llx) < vmsize1 (0x%llx)",
319 (uint64_t) vmsize2, (uint64_t) vmsize1);
320 if (T_RESULT == T_RESULT_FAIL) {
321 goto done;
322 }
323 T_QUIET;
324 T_EXPECT_NOTNULL(mem_entry, "mem_entry == 0x%x", mem_entry);
325 if (T_RESULT == T_RESULT_FAIL) {
326 goto done;
327 }
328
329 vmaddr2 = 0;
330 kr = mach_vm_map(mach_task_self(),
331 &vmaddr2,
332 vmsize2,
333 0, /* mask */
334 VM_FLAGS_ANYWHERE,
335 mem_entry,
336 0, /* offset */
337 TRUE, /* copy */
338 VM_PROT_READ | VM_PROT_WRITE,
339 VM_PROT_READ | VM_PROT_WRITE,
340 VM_INHERIT_DEFAULT);
341 T_QUIET;
342 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_map()");
343 if (T_RESULT == T_RESULT_FAIL) {
344 goto done;
345 }
346
347 cp2 = (unsigned char *)(uintptr_t)vmaddr2;
348 T_QUIET;
349 T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '1')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
350 *cp1, *cp2, '1', '1');
351 if (T_RESULT == T_RESULT_FAIL) {
352 goto done;
353 }
354
355 *cp2 = '2';
356 T_QUIET;
357 T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '2')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
358 *cp1, *cp2, '1', '2');
359 if (T_RESULT == T_RESULT_FAIL) {
360 goto done;
361 }
362
363 done:
364 if (vmaddr1 != 0) {
365 mach_vm_deallocate(mach_task_self(), vmaddr1, vmsize1);
366 vmaddr1 = 0;
367 vmsize1 = 0;
368 }
369 if (vmaddr2 != 0) {
370 mach_vm_deallocate(mach_task_self(), vmaddr2, vmsize2);
371 vmaddr2 = 0;
372 vmsize2 = 0;
373 }
374 if (mem_entry != MACH_PORT_NULL) {
375 mach_port_deallocate(mach_task_self(), mem_entry);
376 mem_entry = MACH_PORT_NULL;
377 }
378
379 return;
380 }
381
382 T_DECL(memory_entry_tagging, "test mem entry tag for rdar://problem/23334087 \
383 VM memory tags should be propagated through memory entries",
384 T_META_ALL_VALID_ARCHS(true))
385 {
386 test_memory_entry_tagging(0);
387 test_memory_entry_tagging(1);
388 }
389
390 T_DECL(map_memory_entry, "test mapping mem entry for rdar://problem/22611816 \
391 mach_make_memory_entry(MAP_MEM_VM_COPY) should never use a KERNEL_BUFFER \
392 copy", T_META_ALL_VALID_ARCHS(true))
393 {
394 test_map_memory_entry();
395 }
396
397 static char *vm_purgable_state[4] = { "NONVOLATILE", "VOLATILE", "EMPTY", "DENY" };
398
399 static uint64_t
task_footprint(void)400 task_footprint(void)
401 {
402 task_vm_info_data_t ti;
403 kern_return_t kr;
404 mach_msg_type_number_t count;
405
406 count = TASK_VM_INFO_COUNT;
407 kr = task_info(mach_task_self(),
408 TASK_VM_INFO,
409 (task_info_t) &ti,
410 &count);
411 T_QUIET;
412 T_ASSERT_MACH_SUCCESS(kr, "task_info()");
413 #if defined(__arm64__)
414 T_QUIET;
415 T_ASSERT_EQ(count, TASK_VM_INFO_COUNT, "task_info() count = %d (expected %d)",
416 count, TASK_VM_INFO_COUNT);
417 #endif /* defined(__arm64__) */
418 return ti.phys_footprint;
419 }
420
421 T_DECL(purgeable_empty_to_volatile, "test task physical footprint when \
422 emptying, volatilizing purgeable vm")
423 {
424 kern_return_t kr;
425 mach_vm_address_t vm_addr;
426 mach_vm_size_t vm_size;
427 char *cp;
428 int ret;
429 vm_purgable_t state;
430 uint64_t footprint[8];
431
432 vm_addr = 0;
433 vm_size = 1 * 1024 * 1024;
434 T_LOG("--> allocate %llu bytes", vm_size);
435 kr = mach_vm_allocate(mach_task_self(),
436 &vm_addr,
437 vm_size,
438 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
439 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
440
441 /* footprint0 */
442 footprint[0] = task_footprint();
443 T_LOG(" footprint[0] = %llu", footprint[0]);
444
445 T_LOG("--> access %llu bytes", vm_size);
446 for (cp = (char *) vm_addr;
447 cp < (char *) (vm_addr + vm_size);
448 cp += vm_kernel_page_size) {
449 *cp = 'x';
450 }
451 /* footprint1 == footprint0 + vm_size */
452 footprint[1] = task_footprint();
453 T_LOG(" footprint[1] = %llu", footprint[1]);
454 if (footprint[1] != footprint[0] + vm_size) {
455 T_LOG("WARN: footprint[1] != footprint[0] + vm_size");
456 }
457
458 T_LOG("--> wire %llu bytes", vm_size / 2);
459 ret = mlock((char *)vm_addr, (size_t) (vm_size / 2));
460 T_ASSERT_POSIX_SUCCESS(ret, "mlock()");
461
462 /* footprint2 == footprint1 */
463 footprint[2] = task_footprint();
464 T_LOG(" footprint[2] = %llu", footprint[2]);
465 if (footprint[2] != footprint[1]) {
466 T_LOG("WARN: footprint[2] != footprint[1]");
467 }
468
469 T_LOG("--> VOLATILE");
470 state = VM_PURGABLE_VOLATILE;
471 kr = mach_vm_purgable_control(mach_task_self(),
472 vm_addr,
473 VM_PURGABLE_SET_STATE,
474 &state);
475 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
476 T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, "NONVOLATILE->VOLATILE: state was %s",
477 vm_purgable_state[state]);
478 /* footprint3 == footprint2 - (vm_size / 2) */
479 footprint[3] = task_footprint();
480 T_LOG(" footprint[3] = %llu", footprint[3]);
481 if (footprint[3] != footprint[2] - (vm_size / 2)) {
482 T_LOG("WARN: footprint[3] != footprint[2] - (vm_size / 2)");
483 }
484
485 T_LOG("--> EMPTY");
486 state = VM_PURGABLE_EMPTY;
487 kr = mach_vm_purgable_control(mach_task_self(),
488 vm_addr,
489 VM_PURGABLE_SET_STATE,
490 &state);
491 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(EMPTY)");
492 if (state != VM_PURGABLE_VOLATILE &&
493 state != VM_PURGABLE_EMPTY) {
494 T_ASSERT_FAIL("VOLATILE->EMPTY: state was %s",
495 vm_purgable_state[state]);
496 }
497 /* footprint4 == footprint3 */
498 footprint[4] = task_footprint();
499 T_LOG(" footprint[4] = %llu", footprint[4]);
500 if (footprint[4] != footprint[3]) {
501 T_LOG("WARN: footprint[4] != footprint[3]");
502 }
503
504 T_LOG("--> unwire %llu bytes", vm_size / 2);
505 ret = munlock((char *)vm_addr, (size_t) (vm_size / 2));
506 T_ASSERT_POSIX_SUCCESS(ret, "munlock()");
507
508 /* footprint5 == footprint4 - (vm_size/2) (unless memory pressure) */
509 /* footprint5 == footprint0 */
510 footprint[5] = task_footprint();
511 T_LOG(" footprint[5] = %llu", footprint[5]);
512 if (footprint[5] != footprint[4] - (vm_size / 2)) {
513 T_LOG("WARN: footprint[5] != footprint[4] - (vm_size/2)");
514 }
515 if (footprint[5] != footprint[0]) {
516 T_LOG("WARN: footprint[5] != footprint[0]");
517 }
518
519 T_LOG("--> VOLATILE");
520 state = VM_PURGABLE_VOLATILE;
521 kr = mach_vm_purgable_control(mach_task_self(),
522 vm_addr,
523 VM_PURGABLE_SET_STATE,
524 &state);
525 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
526 T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->VOLATILE: state == %s",
527 vm_purgable_state[state]);
528 /* footprint6 == footprint5 */
529 /* footprint6 == footprint0 */
530 footprint[6] = task_footprint();
531 T_LOG(" footprint[6] = %llu", footprint[6]);
532 if (footprint[6] != footprint[5]) {
533 T_LOG("WARN: footprint[6] != footprint[5]");
534 }
535 if (footprint[6] != footprint[0]) {
536 T_LOG("WARN: footprint[6] != footprint[0]");
537 }
538
539 T_LOG("--> NONVOLATILE");
540 state = VM_PURGABLE_NONVOLATILE;
541 kr = mach_vm_purgable_control(mach_task_self(),
542 vm_addr,
543 VM_PURGABLE_SET_STATE,
544 &state);
545 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(NONVOLATILE)");
546 T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->NONVOLATILE: state == %s",
547 vm_purgable_state[state]);
548 /* footprint7 == footprint6 */
549 /* footprint7 == footprint0 */
550 footprint[7] = task_footprint();
551 T_LOG(" footprint[7] = %llu", footprint[7]);
552 if (footprint[7] != footprint[6]) {
553 T_LOG("WARN: footprint[7] != footprint[6]");
554 }
555 if (footprint[7] != footprint[0]) {
556 T_LOG("WARN: footprint[7] != footprint[0]");
557 }
558 }
559
560 T_DECL(madvise_shared, "test madvise shared for rdar://problem/2295713 logging \
561 rethink needs madvise(MADV_FREE_HARDER)",
562 T_META_RUN_CONCURRENTLY(false),
563 T_META_ALL_VALID_ARCHS(true))
564 {
565 vm_address_t vmaddr = 0, vmaddr2 = 0;
566 vm_size_t vmsize, vmsize1, vmsize2;
567 kern_return_t kr;
568 char *cp;
569 vm_prot_t curprot, maxprot;
570 int ret;
571 task_vm_info_data_t ti;
572 mach_msg_type_number_t ti_count;
573 int vmflags;
574 uint64_t footprint_before, footprint_after;
575
576 vmsize1 = 64 * 1024; /* 64KB to madvise() */
577 vmsize2 = 32 * 1024; /* 32KB to mlock() */
578 vmsize = vmsize1 + vmsize2;
579 vmflags = VM_FLAGS_ANYWHERE;
580 VM_SET_FLAGS_ALIAS(vmflags, VM_MEMORY_MALLOC);
581 kr = vm_allocate(mach_task_self(),
582 &vmaddr,
583 vmsize,
584 vmflags);
585 T_QUIET;
586 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
587 if (T_RESULT == T_RESULT_FAIL) {
588 goto done;
589 }
590
591 for (cp = (char *)(uintptr_t)vmaddr;
592 cp < (char *)(uintptr_t)(vmaddr + vmsize);
593 cp++) {
594 *cp = 'x';
595 }
596
597 kr = vm_remap(mach_task_self(),
598 &vmaddr2,
599 vmsize,
600 0, /* mask */
601 VM_FLAGS_ANYWHERE,
602 mach_task_self(),
603 vmaddr,
604 FALSE, /* copy */
605 &curprot,
606 &maxprot,
607 VM_INHERIT_DEFAULT);
608 T_QUIET;
609 T_EXPECT_MACH_SUCCESS(kr, "vm_remap()");
610 if (T_RESULT == T_RESULT_FAIL) {
611 goto done;
612 }
613
614 for (cp = (char *)(uintptr_t)vmaddr2;
615 cp < (char *)(uintptr_t)(vmaddr2 + vmsize);
616 cp++) {
617 T_QUIET;
618 T_EXPECT_EQ(*cp, 'x', "vmaddr=%p vmaddr2=%p %p:0x%x",
619 (void *)(uintptr_t)vmaddr,
620 (void *)(uintptr_t)vmaddr2,
621 (void *)cp,
622 (unsigned char)*cp);
623 if (T_RESULT == T_RESULT_FAIL) {
624 goto done;
625 }
626 }
627 cp = (char *)(uintptr_t)vmaddr;
628 *cp = 'X';
629 cp = (char *)(uintptr_t)vmaddr2;
630 T_QUIET;
631 T_EXPECT_EQ(*cp, 'X', "memory was not properly shared");
632 if (T_RESULT == T_RESULT_FAIL) {
633 goto done;
634 }
635
636 #if defined(__x86_64__) || defined(__i386__)
637 if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
638 T_LOG("Skipping madvise reusable tests because we're running under translation.");
639 goto done;
640 }
641 #endif /* defined(__x86_64__) || defined(__i386__) */
642
643 ret = mlock((char *)(uintptr_t)(vmaddr2 + vmsize1),
644 vmsize2);
645 T_QUIET; T_EXPECT_POSIX_SUCCESS(ret, "mlock()");
646
647 footprint_before = task_footprint();
648
649 ret = madvise((char *)(uintptr_t)vmaddr,
650 vmsize1,
651 MADV_FREE_REUSABLE);
652 T_QUIET;
653 T_EXPECT_POSIX_SUCCESS(ret, "madvise()");
654 if (T_RESULT == T_RESULT_FAIL) {
655 goto done;
656 }
657
658 footprint_after = task_footprint();
659 T_ASSERT_EQ(footprint_after, footprint_before - 2 * vmsize1, NULL);
660
661 ti_count = TASK_VM_INFO_COUNT;
662 kr = task_info(mach_task_self(),
663 TASK_VM_INFO,
664 (task_info_t) &ti,
665 &ti_count);
666 T_QUIET;
667 T_EXPECT_MACH_SUCCESS(kr, "task_info()");
668 if (T_RESULT == T_RESULT_FAIL) {
669 goto done;
670 }
671
672 T_QUIET;
673 T_EXPECT_EQ(ti.reusable, 2ULL * vmsize1, "ti.reusable=%lld expected %lld",
674 ti.reusable, (uint64_t)(2 * vmsize1));
675 if (T_RESULT == T_RESULT_FAIL) {
676 goto done;
677 }
678
679 done:
680 if (vmaddr != 0) {
681 vm_deallocate(mach_task_self(), vmaddr, vmsize);
682 vmaddr = 0;
683 }
684 if (vmaddr2 != 0) {
685 vm_deallocate(mach_task_self(), vmaddr2, vmsize);
686 vmaddr2 = 0;
687 }
688 }
689
690 T_DECL(madvise_purgeable_can_reuse, "test madvise purgeable can reuse for \
691 rdar://problem/37476183 Preview Footprint memory regressions ~100MB \
692 [ purgeable_malloc became eligible for reuse ]",
693 T_META_ALL_VALID_ARCHS(true))
694 {
695 #if defined(__x86_64__) || defined(__i386__)
696 if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
697 T_SKIP("madvise reusable is not supported under Rosetta translation. Skipping.)");
698 }
699 #endif /* defined(__x86_64__) || defined(__i386__) */
700 vm_address_t vmaddr = 0;
701 vm_size_t vmsize;
702 kern_return_t kr;
703 char *cp;
704 int ret;
705
706 vmsize = 10 * 1024 * 1024; /* 10MB */
707 kr = vm_allocate(mach_task_self(),
708 &vmaddr,
709 vmsize,
710 (VM_FLAGS_ANYWHERE |
711 VM_FLAGS_PURGABLE |
712 VM_MAKE_TAG(VM_MEMORY_MALLOC)));
713 T_QUIET;
714 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
715 if (T_RESULT == T_RESULT_FAIL) {
716 goto done;
717 }
718
719 for (cp = (char *)(uintptr_t)vmaddr;
720 cp < (char *)(uintptr_t)(vmaddr + vmsize);
721 cp++) {
722 *cp = 'x';
723 }
724
725 ret = madvise((char *)(uintptr_t)vmaddr,
726 vmsize,
727 MADV_CAN_REUSE);
728 T_QUIET;
729 T_EXPECT_TRUE(((ret == -1) && (errno == EINVAL)), "madvise(): purgeable vm can't be adviced to reuse");
730 if (T_RESULT == T_RESULT_FAIL) {
731 goto done;
732 }
733
734 done:
735 if (vmaddr != 0) {
736 vm_deallocate(mach_task_self(), vmaddr, vmsize);
737 vmaddr = 0;
738 }
739 }
740
741 #define DEST_PATTERN 0xFEDCBA98
742
743 T_DECL(map_read_overwrite, "test overwriting vm map from other map - \
744 rdar://31075370",
745 T_META_ALL_VALID_ARCHS(true))
746 {
747 kern_return_t kr;
748 mach_vm_address_t vmaddr1, vmaddr2;
749 mach_vm_size_t vmsize1, vmsize2;
750 int *ip;
751 int i;
752
753 vmaddr1 = 0;
754 vmsize1 = 4 * 4096;
755 kr = mach_vm_allocate(mach_task_self(),
756 &vmaddr1,
757 vmsize1,
758 VM_FLAGS_ANYWHERE);
759 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
760
761 ip = (int *)(uintptr_t)vmaddr1;
762 for (i = 0; i < vmsize1 / sizeof(*ip); i++) {
763 ip[i] = i;
764 }
765
766 vmaddr2 = 0;
767 kr = mach_vm_allocate(mach_task_self(),
768 &vmaddr2,
769 vmsize1,
770 VM_FLAGS_ANYWHERE);
771 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
772
773 ip = (int *)(uintptr_t)vmaddr2;
774 for (i = 0; i < vmsize1 / sizeof(*ip); i++) {
775 ip[i] = DEST_PATTERN;
776 }
777
778 vmsize2 = vmsize1 - 2 * (sizeof(*ip));
779 kr = mach_vm_read_overwrite(mach_task_self(),
780 vmaddr1 + sizeof(*ip),
781 vmsize2,
782 vmaddr2 + sizeof(*ip),
783 &vmsize2);
784 T_ASSERT_MACH_SUCCESS(kr, "vm_read_overwrite()");
785
786 ip = (int *)(uintptr_t)vmaddr2;
787 for (i = 0; i < 1; i++) {
788 T_QUIET;
789 T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
790 i, ip[i], DEST_PATTERN);
791 }
792 for (; i < (vmsize1 - 2) / sizeof(*ip); i++) {
793 T_QUIET;
794 T_ASSERT_EQ(ip[i], i, "vmaddr2[%d] = 0x%x instead of 0x%x",
795 i, ip[i], i);
796 }
797 for (; i < vmsize1 / sizeof(*ip); i++) {
798 T_QUIET;
799 T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
800 i, ip[i], DEST_PATTERN);
801 }
802 }
803
804 T_DECL(copy_none_use_pmap, "test copy-on-write remapping of COPY_NONE vm \
805 objects - rdar://35610377",
806 T_META_ALL_VALID_ARCHS(true))
807 {
808 kern_return_t kr;
809 mach_vm_address_t vmaddr1, vmaddr2, vmaddr3;
810 mach_vm_size_t vmsize;
811 vm_prot_t curprot, maxprot;
812
813 vmsize = 32 * 1024 * 1024;
814
815 vmaddr1 = 0;
816 kr = mach_vm_allocate(mach_task_self(),
817 &vmaddr1,
818 vmsize,
819 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
820 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
821
822 memset((void *)(uintptr_t)vmaddr1, 'x', vmsize);
823
824 vmaddr2 = 0;
825 kr = mach_vm_remap(mach_task_self(),
826 &vmaddr2,
827 vmsize,
828 0, /* mask */
829 VM_FLAGS_ANYWHERE,
830 mach_task_self(),
831 vmaddr1,
832 TRUE, /* copy */
833 &curprot,
834 &maxprot,
835 VM_INHERIT_DEFAULT);
836 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #1");
837
838 vmaddr3 = 0;
839 kr = mach_vm_remap(mach_task_self(),
840 &vmaddr3,
841 vmsize,
842 0, /* mask */
843 VM_FLAGS_ANYWHERE,
844 mach_task_self(),
845 vmaddr2,
846 TRUE, /* copy */
847 &curprot,
848 &maxprot,
849 VM_INHERIT_DEFAULT);
850 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #2");
851 }
852
853 T_DECL(purgable_deny, "test purgeable memory is not allowed to be converted to \
854 non-purgeable - rdar://31990033",
855 T_META_ALL_VALID_ARCHS(true))
856 {
857 kern_return_t kr;
858 vm_address_t vmaddr;
859 vm_purgable_t state;
860
861 vmaddr = 0;
862 kr = vm_allocate(mach_task_self(), &vmaddr, 1,
863 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
864 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
865
866 state = VM_PURGABLE_DENY;
867 kr = vm_purgable_control(mach_task_self(), vmaddr,
868 VM_PURGABLE_SET_STATE, &state);
869 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
870 "vm_purgable_control(VM_PURGABLE_DENY) -> 0x%x (%s)",
871 kr, mach_error_string(kr));
872
873 kr = vm_deallocate(mach_task_self(), vmaddr, 1);
874 T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate()");
875 }
876
877 #define VMSIZE 0x10000
878
879 T_DECL(vm_remap_zero, "test vm map of zero size - rdar://33114981",
880 T_META_ALL_VALID_ARCHS(true))
881 {
882 kern_return_t kr;
883 mach_vm_address_t vmaddr1, vmaddr2;
884 mach_vm_size_t vmsize;
885 vm_prot_t curprot, maxprot;
886
887 vmaddr1 = 0;
888 vmsize = VMSIZE;
889 kr = mach_vm_allocate(mach_task_self(),
890 &vmaddr1,
891 vmsize,
892 VM_FLAGS_ANYWHERE);
893 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
894
895 vmaddr2 = 0;
896 vmsize = 0;
897 kr = mach_vm_remap(mach_task_self(),
898 &vmaddr2,
899 vmsize,
900 0,
901 VM_FLAGS_ANYWHERE,
902 mach_task_self(),
903 vmaddr1,
904 FALSE,
905 &curprot,
906 &maxprot,
907 VM_INHERIT_DEFAULT);
908 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
909 vmsize, kr, mach_error_string(kr));
910
911 vmaddr2 = 0;
912 vmsize = (mach_vm_size_t)-2;
913 kr = mach_vm_remap(mach_task_self(),
914 &vmaddr2,
915 vmsize,
916 0,
917 VM_FLAGS_ANYWHERE,
918 mach_task_self(),
919 vmaddr1,
920 FALSE,
921 &curprot,
922 &maxprot,
923 VM_INHERIT_DEFAULT);
924 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
925 vmsize, kr, mach_error_string(kr));
926 }
927
928 extern int __shared_region_check_np(uint64_t *);
929
930 T_DECL(nested_pmap_trigger, "nested pmap should only be triggered from kernel \
931 - rdar://problem/41481703",
932 T_META_ALL_VALID_ARCHS(true))
933 {
934 int ret;
935 kern_return_t kr;
936 mach_vm_address_t sr_start;
937 mach_vm_size_t vmsize;
938 mach_vm_address_t vmaddr;
939 mach_port_t mem_entry;
940
941 ret = __shared_region_check_np(&sr_start);
942 if (ret != 0) {
943 int saved_errno;
944 saved_errno = errno;
945
946 T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
947 saved_errno, strerror(saved_errno));
948 T_END;
949 }
950
951 vmsize = PAGE_SIZE;
952 kr = mach_make_memory_entry_64(mach_task_self(),
953 &vmsize,
954 sr_start,
955 MAP_MEM_VM_SHARE | VM_PROT_READ,
956 &mem_entry,
957 MACH_PORT_NULL);
958 T_ASSERT_MACH_SUCCESS(kr, "make_memory_entry(0x%llx)", sr_start);
959
960 vmaddr = 0;
961 kr = mach_vm_map(mach_task_self(),
962 &vmaddr,
963 vmsize,
964 0,
965 VM_FLAGS_ANYWHERE,
966 mem_entry,
967 0,
968 FALSE,
969 VM_PROT_READ,
970 VM_PROT_READ,
971 VM_INHERIT_DEFAULT);
972 T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
973 }
974
975 static const char *prot_str[] = { "---", "r--", "-w-", "rw-", "--x", "r-x", "-wx", "rwx" };
976 static const char *share_mode_str[] = { "---", "COW", "PRIVATE", "EMPTY", "SHARED", "TRUESHARED", "PRIVATE_ALIASED", "SHARED_ALIASED", "LARGE_PAGE" };
977
978 T_DECL(shared_region_share_writable, "sharing a writable mapping of the shared region shoudl not give write access to shared region - rdar://problem/74469953",
979 T_META_ALL_VALID_ARCHS(true))
980 {
981 int ret;
982 uint64_t sr_start;
983 kern_return_t kr;
984 mach_vm_address_t address, tmp_address, remap_address;
985 mach_vm_size_t size, tmp_size, remap_size;
986 uint32_t depth;
987 mach_msg_type_number_t count;
988 vm_region_submap_info_data_64_t info;
989 vm_prot_t cur_prot, max_prot;
990 uint32_t before, after, remap;
991 mach_port_t mem_entry;
992
993 ret = __shared_region_check_np(&sr_start);
994 if (ret != 0) {
995 int saved_errno;
996 saved_errno = errno;
997
998 T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
999 saved_errno, strerror(saved_errno));
1000 T_END;
1001 }
1002 T_LOG("SHARED_REGION_BASE 0x%llx", SHARED_REGION_BASE);
1003 T_LOG("SHARED_REGION_SIZE 0x%llx", SHARED_REGION_SIZE);
1004 T_LOG("shared region starts at 0x%llx", sr_start);
1005 T_QUIET; T_ASSERT_GE(sr_start, SHARED_REGION_BASE,
1006 "shared region starts below BASE");
1007 T_QUIET; T_ASSERT_LT(sr_start, SHARED_REGION_BASE + SHARED_REGION_SIZE,
1008 "shared region starts above BASE+SIZE");
1009
1010 /*
1011 * Step 1 - check that one can not get write access to a read-only
1012 * mapping in the shared region.
1013 */
1014 size = 0;
1015 for (address = SHARED_REGION_BASE;
1016 address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1017 address += size) {
1018 size = 0;
1019 depth = 99;
1020 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1021 kr = mach_vm_region_recurse(mach_task_self(),
1022 &address,
1023 &size,
1024 &depth,
1025 (vm_region_recurse_info_t)&info,
1026 &count);
1027 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1028 if (kr == KERN_INVALID_ADDRESS) {
1029 T_SKIP("could not find read-only nested mapping");
1030 T_END;
1031 }
1032 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1033 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1034 address, address + size, depth,
1035 prot_str[info.protection],
1036 prot_str[info.max_protection],
1037 share_mode_str[info.share_mode],
1038 info.object_id);
1039 if (depth > 0 &&
1040 (info.protection == VM_PROT_READ) &&
1041 (info.max_protection == VM_PROT_READ)) {
1042 /* nested and read-only: bingo! */
1043 break;
1044 }
1045 }
1046 if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1047 T_SKIP("could not find read-only nested mapping");
1048 T_END;
1049 }
1050
1051 /* test vm_remap() of RO */
1052 before = *(uint32_t *)(uintptr_t)address;
1053 remap_address = 0;
1054 remap_size = size;
1055 kr = mach_vm_remap(mach_task_self(),
1056 &remap_address,
1057 remap_size,
1058 0,
1059 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1060 mach_task_self(),
1061 address,
1062 FALSE,
1063 &cur_prot,
1064 &max_prot,
1065 VM_INHERIT_DEFAULT);
1066 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1067 // T_QUIET; T_ASSERT_EQ(cur_prot, VM_PROT_READ, "cur_prot is read-only");
1068 // T_QUIET; T_ASSERT_EQ(max_prot, VM_PROT_READ, "max_prot is read-only");
1069 /* check that region is still nested */
1070 tmp_address = address;
1071 tmp_size = 0;
1072 depth = 99;
1073 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1074 kr = mach_vm_region_recurse(mach_task_self(),
1075 &tmp_address,
1076 &tmp_size,
1077 &depth,
1078 (vm_region_recurse_info_t)&info,
1079 &count);
1080 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1081 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1082 tmp_address, tmp_address + tmp_size, depth,
1083 prot_str[info.protection],
1084 prot_str[info.max_protection],
1085 share_mode_str[info.share_mode],
1086 info.object_id);
1087 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1088 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1089 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1090 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1091 // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1092 /* check that new mapping is read-only */
1093 tmp_address = remap_address;
1094 tmp_size = 0;
1095 depth = 99;
1096 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1097 kr = mach_vm_region_recurse(mach_task_self(),
1098 &tmp_address,
1099 &tmp_size,
1100 &depth,
1101 (vm_region_recurse_info_t)&info,
1102 &count);
1103 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1104 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1105 tmp_address, tmp_address + tmp_size, depth,
1106 prot_str[info.protection],
1107 prot_str[info.max_protection],
1108 share_mode_str[info.share_mode],
1109 info.object_id);
1110 T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1111 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1112 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1113 // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1114 remap = *(uint32_t *)(uintptr_t)remap_address;
1115 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1116 // this would crash if actually read-only:
1117 // *(uint32_t *)(uintptr_t)remap_address = before + 1;
1118 after = *(uint32_t *)(uintptr_t)address;
1119 T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1120 // *(uint32_t *)(uintptr_t)remap_address = before;
1121 if (before != after) {
1122 T_FAIL("vm_remap() bypassed copy-on-write");
1123 } else {
1124 T_PASS("vm_remap() did not bypass copy-on-write");
1125 }
1126 /* cleanup */
1127 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1128 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1129 T_PASS("vm_remap() read-only");
1130
1131 #if defined(VM_MEMORY_ROSETTA)
1132 if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1133 T_PASS("vm_remap_new() is not present");
1134 goto skip_vm_remap_new_ro;
1135 }
1136 /* test vm_remap_new() of RO */
1137 before = *(uint32_t *)(uintptr_t)address;
1138 remap_address = 0;
1139 remap_size = size;
1140 cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1141 max_prot = VM_PROT_READ | VM_PROT_WRITE;
1142 kr = mach_vm_remap_new(mach_task_self(),
1143 &remap_address,
1144 remap_size,
1145 0,
1146 VM_FLAGS_ANYWHERE,
1147 mach_task_self(),
1148 address,
1149 FALSE,
1150 &cur_prot,
1151 &max_prot,
1152 VM_INHERIT_DEFAULT);
1153 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1154 if (kr == KERN_PROTECTION_FAILURE) {
1155 /* wrong but not a security issue... */
1156 goto skip_vm_remap_new_ro;
1157 }
1158 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1159 remap = *(uint32_t *)(uintptr_t)remap_address;
1160 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1161 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1162 after = *(uint32_t *)(uintptr_t)address;
1163 T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1164 *(uint32_t *)(uintptr_t)remap_address = before;
1165 if (before != after) {
1166 T_FAIL("vm_remap_new() bypassed copy-on-write");
1167 } else {
1168 T_PASS("vm_remap_new() did not bypass copy-on-write");
1169 }
1170 /* check that region is still nested */
1171 tmp_address = address;
1172 tmp_size = 0;
1173 depth = 99;
1174 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1175 kr = mach_vm_region_recurse(mach_task_self(),
1176 &tmp_address,
1177 &tmp_size,
1178 &depth,
1179 (vm_region_recurse_info_t)&info,
1180 &count);
1181 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1182 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1183 tmp_address, tmp_address + tmp_size, depth,
1184 prot_str[info.protection],
1185 prot_str[info.max_protection],
1186 share_mode_str[info.share_mode],
1187 info.object_id);
1188 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1189 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1190 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1191 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1192 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1193 T_PASS("vm_remap_new() read-only");
1194 skip_vm_remap_new_ro:
1195 #else /* defined(VM_MEMORY_ROSETTA) */
1196 /* pre-BigSur SDK: no vm_remap_new() */
1197 T_LOG("No vm_remap_new() to test");
1198 #endif /* defined(VM_MEMORY_ROSETTA) */
1199
1200 /* test mach_make_memory_entry_64(VM_SHARE) of RO */
1201 before = *(uint32_t *)(uintptr_t)address;
1202 remap_size = size;
1203 mem_entry = MACH_PORT_NULL;
1204 kr = mach_make_memory_entry_64(mach_task_self(),
1205 &remap_size,
1206 address,
1207 MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1208 &mem_entry,
1209 MACH_PORT_NULL);
1210 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1211 if (kr == KERN_PROTECTION_FAILURE) {
1212 /* wrong but not a security issue... */
1213 goto skip_mem_entry_vm_share_ro;
1214 }
1215 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1216 remap_address = 0;
1217 kr = mach_vm_map(mach_task_self(),
1218 &remap_address,
1219 remap_size,
1220 0, /* mask */
1221 VM_FLAGS_ANYWHERE,
1222 mem_entry,
1223 0, /* offset */
1224 FALSE, /* copy */
1225 VM_PROT_READ | VM_PROT_WRITE,
1226 VM_PROT_READ | VM_PROT_WRITE,
1227 VM_INHERIT_DEFAULT);
1228 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1229 remap = *(uint32_t *)(uintptr_t)remap_address;
1230 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1231 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1232 after = *(uint32_t *)(uintptr_t)address;
1233 T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1234 *(uint32_t *)(uintptr_t)remap_address = before;
1235 if (before != after) {
1236 T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1237 } else {
1238 T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1239 }
1240 /* check that region is still nested */
1241 tmp_address = address;
1242 tmp_size = 0;
1243 depth = 99;
1244 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1245 kr = mach_vm_region_recurse(mach_task_self(),
1246 &tmp_address,
1247 &tmp_size,
1248 &depth,
1249 (vm_region_recurse_info_t)&info,
1250 &count);
1251 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1252 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1253 tmp_address, tmp_address + tmp_size, depth,
1254 prot_str[info.protection],
1255 prot_str[info.max_protection],
1256 share_mode_str[info.share_mode],
1257 info.object_id);
1258 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1259 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1260 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1261 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1262 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1263 /* check that new mapping is a copy */
1264 tmp_address = remap_address;
1265 tmp_size = 0;
1266 depth = 99;
1267 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1268 kr = mach_vm_region_recurse(mach_task_self(),
1269 &tmp_address,
1270 &tmp_size,
1271 &depth,
1272 (vm_region_recurse_info_t)&info,
1273 &count);
1274 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1275 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1276 tmp_address, tmp_address + tmp_size, depth,
1277 prot_str[info.protection],
1278 prot_str[info.max_protection],
1279 share_mode_str[info.share_mode],
1280 info.object_id);
1281 T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1282 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1283 T_QUIET; T_ASSERT_EQ(depth, 0, "new mapping is unnested");
1284 // T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1285 // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1286 /* cleanup */
1287 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1288 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1289 T_PASS("mem_entry(VM_SHARE) read-only");
1290 skip_mem_entry_vm_share_ro:
1291
1292 /* test mach_make_memory_entry_64() of RO */
1293 before = *(uint32_t *)(uintptr_t)address;
1294 remap_size = size;
1295 mem_entry = MACH_PORT_NULL;
1296 kr = mach_make_memory_entry_64(mach_task_self(),
1297 &remap_size,
1298 address,
1299 VM_PROT_READ | VM_PROT_WRITE,
1300 &mem_entry,
1301 MACH_PORT_NULL);
1302 T_QUIET; T_ASSERT_EQ(kr, KERN_PROTECTION_FAILURE, "mach_make_memory_entry_64()");
1303 /* check that region is still nested */
1304 tmp_address = address;
1305 tmp_size = 0;
1306 depth = 99;
1307 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1308 kr = mach_vm_region_recurse(mach_task_self(),
1309 &tmp_address,
1310 &tmp_size,
1311 &depth,
1312 (vm_region_recurse_info_t)&info,
1313 &count);
1314 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1315 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1316 tmp_address, tmp_address + tmp_size, depth,
1317 prot_str[info.protection],
1318 prot_str[info.max_protection],
1319 share_mode_str[info.share_mode],
1320 info.object_id);
1321 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1322 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1323 // T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1324 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1325 if (depth > 0) {
1326 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1327 }
1328 T_PASS("mem_entry() read-only");
1329
1330
1331 /*
1332 * Step 2 - check that one can not share write access with a writable
1333 * mapping in the shared region.
1334 */
1335 size = 0;
1336 for (address = SHARED_REGION_BASE;
1337 address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1338 address += size) {
1339 size = 0;
1340 depth = 99;
1341 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1342 kr = mach_vm_region_recurse(mach_task_self(),
1343 &address,
1344 &size,
1345 &depth,
1346 (vm_region_recurse_info_t)&info,
1347 &count);
1348 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1349 if (kr == KERN_INVALID_ADDRESS) {
1350 T_SKIP("could not find writable nested mapping");
1351 T_END;
1352 }
1353 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1354 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1355 address, address + size, depth,
1356 prot_str[info.protection],
1357 prot_str[info.max_protection],
1358 share_mode_str[info.share_mode],
1359 info.object_id);
1360 if (depth > 0 && (info.protection & VM_PROT_WRITE)) {
1361 /* nested and writable: bingo! */
1362 break;
1363 }
1364 }
1365 if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1366 T_SKIP("could not find writable nested mapping");
1367 T_END;
1368 }
1369
1370 /* test vm_remap() of RW */
1371 before = *(uint32_t *)(uintptr_t)address;
1372 remap_address = 0;
1373 remap_size = size;
1374 kr = mach_vm_remap(mach_task_self(),
1375 &remap_address,
1376 remap_size,
1377 0,
1378 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1379 mach_task_self(),
1380 address,
1381 FALSE,
1382 &cur_prot,
1383 &max_prot,
1384 VM_INHERIT_DEFAULT);
1385 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1386 if (!(cur_prot & VM_PROT_WRITE)) {
1387 T_LOG("vm_remap(): 0x%llx not writable %s/%s",
1388 remap_address, prot_str[cur_prot], prot_str[max_prot]);
1389 T_ASSERT_FAIL("vm_remap() remapping not writable");
1390 }
1391 remap = *(uint32_t *)(uintptr_t)remap_address;
1392 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1393 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1394 after = *(uint32_t *)(uintptr_t)address;
1395 T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1396 *(uint32_t *)(uintptr_t)remap_address = before;
1397 if (before != after) {
1398 T_FAIL("vm_remap() bypassed copy-on-write");
1399 } else {
1400 T_PASS("vm_remap() did not bypass copy-on-write");
1401 }
1402 /* check that region is still nested */
1403 tmp_address = address;
1404 tmp_size = 0;
1405 depth = 99;
1406 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1407 kr = mach_vm_region_recurse(mach_task_self(),
1408 &tmp_address,
1409 &tmp_size,
1410 &depth,
1411 (vm_region_recurse_info_t)&info,
1412 &count);
1413 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1414 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1415 tmp_address, tmp_address + tmp_size, depth,
1416 prot_str[info.protection],
1417 prot_str[info.max_protection],
1418 share_mode_str[info.share_mode],
1419 info.object_id);
1420 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1421 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1422 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1423 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1424 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1425 /* cleanup */
1426 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1427 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1428
1429 #if defined(VM_MEMORY_ROSETTA)
1430 if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1431 T_PASS("vm_remap_new() is not present");
1432 goto skip_vm_remap_new_rw;
1433 }
1434 /* test vm_remap_new() of RW */
1435 before = *(uint32_t *)(uintptr_t)address;
1436 remap_address = 0;
1437 remap_size = size;
1438 cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1439 max_prot = VM_PROT_READ | VM_PROT_WRITE;
1440 kr = mach_vm_remap_new(mach_task_self(),
1441 &remap_address,
1442 remap_size,
1443 0,
1444 VM_FLAGS_ANYWHERE,
1445 mach_task_self(),
1446 address,
1447 FALSE,
1448 &cur_prot,
1449 &max_prot,
1450 VM_INHERIT_DEFAULT);
1451 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1452 if (kr == KERN_PROTECTION_FAILURE) {
1453 /* wrong but not a security issue... */
1454 goto skip_vm_remap_new_rw;
1455 }
1456 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1457 if (!(cur_prot & VM_PROT_WRITE)) {
1458 T_LOG("vm_remap_new(): 0x%llx not writable %s/%s",
1459 remap_address, prot_str[cur_prot], prot_str[max_prot]);
1460 T_ASSERT_FAIL("vm_remap_new() remapping not writable");
1461 }
1462 remap = *(uint32_t *)(uintptr_t)remap_address;
1463 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1464 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1465 after = *(uint32_t *)(uintptr_t)address;
1466 T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1467 *(uint32_t *)(uintptr_t)remap_address = before;
1468 if (before != after) {
1469 T_FAIL("vm_remap_new() bypassed copy-on-write");
1470 } else {
1471 T_PASS("vm_remap_new() did not bypass copy-on-write");
1472 }
1473 /* check that region is still nested */
1474 tmp_address = address;
1475 tmp_size = 0;
1476 depth = 99;
1477 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1478 kr = mach_vm_region_recurse(mach_task_self(),
1479 &tmp_address,
1480 &tmp_size,
1481 &depth,
1482 (vm_region_recurse_info_t)&info,
1483 &count);
1484 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1485 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1486 tmp_address, tmp_address + tmp_size, depth,
1487 prot_str[info.protection],
1488 prot_str[info.max_protection],
1489 share_mode_str[info.share_mode],
1490 info.object_id);
1491 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1492 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1493 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1494 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1495 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1496 /* cleanup */
1497 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1498 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1499 skip_vm_remap_new_rw:
1500 #else /* defined(VM_MEMORY_ROSETTA) */
1501 /* pre-BigSur SDK: no vm_remap_new() */
1502 T_LOG("No vm_remap_new() to test");
1503 #endif /* defined(VM_MEMORY_ROSETTA) */
1504
1505 /* test mach_make_memory_entry_64(VM_SHARE) of RW */
1506 before = *(uint32_t *)(uintptr_t)address;
1507 remap_size = size;
1508 mem_entry = MACH_PORT_NULL;
1509 kr = mach_make_memory_entry_64(mach_task_self(),
1510 &remap_size,
1511 address,
1512 MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1513 &mem_entry,
1514 MACH_PORT_NULL);
1515 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1516 if (kr == KERN_PROTECTION_FAILURE) {
1517 /* wrong but not a security issue... */
1518 goto skip_mem_entry_vm_share_rw;
1519 }
1520 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1521 T_QUIET; T_ASSERT_EQ(remap_size, size, "mem_entry(VM_SHARE) should cover whole mapping");
1522 // T_LOG("AFTER MAKE_MEM_ENTRY(VM_SHARE) 0x%llx...", address); fflush(stdout); fflush(stderr); getchar();
1523 remap_address = 0;
1524 kr = mach_vm_map(mach_task_self(),
1525 &remap_address,
1526 remap_size,
1527 0, /* mask */
1528 VM_FLAGS_ANYWHERE,
1529 mem_entry,
1530 0, /* offset */
1531 FALSE, /* copy */
1532 VM_PROT_READ | VM_PROT_WRITE,
1533 VM_PROT_READ | VM_PROT_WRITE,
1534 VM_INHERIT_DEFAULT);
1535 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1536 remap = *(uint32_t *)(uintptr_t)remap_address;
1537 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1538 // T_LOG("AFTER VM_MAP 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1539 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1540 // T_LOG("AFTER WRITE 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1541 after = *(uint32_t *)(uintptr_t)address;
1542 T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1543 *(uint32_t *)(uintptr_t)remap_address = before;
1544 if (before != after) {
1545 T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1546 } else {
1547 T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1548 }
1549 /* check that region is still nested */
1550 tmp_address = address;
1551 tmp_size = 0;
1552 depth = 99;
1553 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1554 kr = mach_vm_region_recurse(mach_task_self(),
1555 &tmp_address,
1556 &tmp_size,
1557 &depth,
1558 (vm_region_recurse_info_t)&info,
1559 &count);
1560 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1561 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1562 tmp_address, tmp_address + tmp_size, depth,
1563 prot_str[info.protection],
1564 prot_str[info.max_protection],
1565 share_mode_str[info.share_mode],
1566 info.object_id);
1567 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1568 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1569 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1570 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1571 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1572 /* cleanup */
1573 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1574 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1575 mach_port_deallocate(mach_task_self(), mem_entry);
1576 skip_mem_entry_vm_share_rw:
1577
1578 /* test mach_make_memory_entry_64() of RW */
1579 before = *(uint32_t *)(uintptr_t)address;
1580 remap_size = size;
1581 mem_entry = MACH_PORT_NULL;
1582 kr = mach_make_memory_entry_64(mach_task_self(),
1583 &remap_size,
1584 address,
1585 VM_PROT_READ | VM_PROT_WRITE,
1586 &mem_entry,
1587 MACH_PORT_NULL);
1588 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64()");
1589 remap_address = 0;
1590 kr = mach_vm_map(mach_task_self(),
1591 &remap_address,
1592 remap_size,
1593 0, /* mask */
1594 VM_FLAGS_ANYWHERE,
1595 mem_entry,
1596 0, /* offset */
1597 FALSE, /* copy */
1598 VM_PROT_READ | VM_PROT_WRITE,
1599 VM_PROT_READ | VM_PROT_WRITE,
1600 VM_INHERIT_DEFAULT);
1601 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1602 remap = *(uint32_t *)(uintptr_t)remap_address;
1603 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1604 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1605 after = *(uint32_t *)(uintptr_t)address;
1606 T_LOG("mem_entry(): 0x%llx 0x%x -> 0x%x", address, before, after);
1607 *(uint32_t *)(uintptr_t)remap_address = before;
1608 /* check that region is no longer nested */
1609 tmp_address = address;
1610 tmp_size = 0;
1611 depth = 99;
1612 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1613 kr = mach_vm_region_recurse(mach_task_self(),
1614 &tmp_address,
1615 &tmp_size,
1616 &depth,
1617 (vm_region_recurse_info_t)&info,
1618 &count);
1619 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1620 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1621 tmp_address, tmp_address + tmp_size, depth,
1622 prot_str[info.protection],
1623 prot_str[info.max_protection],
1624 share_mode_str[info.share_mode],
1625 info.object_id);
1626 if (before != after) {
1627 if (depth == 0) {
1628 T_PASS("mem_entry() honored copy-on-write");
1629 } else {
1630 T_FAIL("mem_entry() did not trigger copy-on_write");
1631 }
1632 } else {
1633 T_FAIL("mem_entry() did not honor copy-on-write");
1634 }
1635 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1636 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1637 T_QUIET; T_ASSERT_EQ(depth, 0, "no longer nested");
1638 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1639 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1640 /* cleanup */
1641 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1642 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1643 mach_port_deallocate(mach_task_self(), mem_entry);
1644 }
1645
1646 T_DECL(copyoverwrite_submap_protection, "test copywrite vm region submap \
1647 protection", T_META_ALL_VALID_ARCHS(true))
1648 {
1649 kern_return_t kr;
1650 mach_vm_address_t vmaddr;
1651 mach_vm_size_t vmsize;
1652 natural_t depth;
1653 vm_region_submap_short_info_data_64_t region_info;
1654 mach_msg_type_number_t region_info_count;
1655
1656 for (vmaddr = SHARED_REGION_BASE;
1657 vmaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1658 vmaddr += vmsize) {
1659 depth = 99;
1660 region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1661 kr = mach_vm_region_recurse(mach_task_self(),
1662 &vmaddr,
1663 &vmsize,
1664 &depth,
1665 (vm_region_info_t) ®ion_info,
1666 ®ion_info_count);
1667 if (kr == KERN_INVALID_ADDRESS) {
1668 break;
1669 }
1670 T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse(0x%llx)", vmaddr);
1671 T_ASSERT_EQ(region_info_count,
1672 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
1673 "vm_region_recurse(0x%llx) count = %d expected %d",
1674 vmaddr, region_info_count,
1675 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
1676
1677 T_LOG("--> region: vmaddr 0x%llx depth %d prot 0x%x/0x%x",
1678 vmaddr, depth, region_info.protection,
1679 region_info.max_protection);
1680 if (depth == 0) {
1681 /* not a submap mapping: next mapping */
1682 continue;
1683 }
1684 if (vmaddr >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1685 break;
1686 }
1687 kr = mach_vm_copy(mach_task_self(),
1688 vmaddr,
1689 vmsize,
1690 vmaddr);
1691 if (kr == KERN_PROTECTION_FAILURE ||
1692 kr == KERN_INVALID_ADDRESS) {
1693 T_PASS("vm_copy(0x%llx,0x%llx) expected prot error 0x%x (%s)",
1694 vmaddr, vmsize, kr, mach_error_string(kr));
1695 continue;
1696 }
1697 T_ASSERT_MACH_SUCCESS(kr, "vm_copy(0x%llx,0x%llx) prot 0x%x",
1698 vmaddr, vmsize, region_info.protection);
1699 depth = 0;
1700 region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1701 kr = mach_vm_region_recurse(mach_task_self(),
1702 &vmaddr,
1703 &vmsize,
1704 &depth,
1705 (vm_region_info_t) ®ion_info,
1706 ®ion_info_count);
1707 T_ASSERT_MACH_SUCCESS(kr, "m_region_recurse(0x%llx)", vmaddr);
1708 T_ASSERT_EQ(region_info_count,
1709 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
1710 "vm_region_recurse() count = %d expected %d",
1711 region_info_count, VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
1712
1713 T_ASSERT_EQ(depth, 0, "vm_region_recurse(0x%llx): depth = %d expected 0",
1714 vmaddr, depth);
1715 T_ASSERT_EQ((region_info.protection & VM_PROT_EXECUTE),
1716 0, "vm_region_recurse(0x%llx): prot 0x%x",
1717 vmaddr, region_info.protection);
1718 }
1719 }
1720
1721 T_DECL(wire_text, "test wired text for rdar://problem/16783546 Wiring code in \
1722 the shared region triggers code-signing violations",
1723 T_META_ALL_VALID_ARCHS(true))
1724 {
1725 uint32_t *addr, before, after;
1726 int retval;
1727 int saved_errno;
1728 kern_return_t kr;
1729 vm_address_t map_addr, remap_addr;
1730 vm_prot_t curprot, maxprot;
1731
1732 addr = (uint32_t *)&printf;
1733 #if __has_feature(ptrauth_calls)
1734 map_addr = (vm_address_t)(uintptr_t)ptrauth_strip(addr, ptrauth_key_function_pointer);
1735 #else /* __has_feature(ptrauth_calls) */
1736 map_addr = (vm_address_t)(uintptr_t)addr;
1737 #endif /* __has_feature(ptrauth_calls) */
1738 remap_addr = 0;
1739 kr = vm_remap(mach_task_self(), &remap_addr, 4096,
1740 0, /* mask */
1741 VM_FLAGS_ANYWHERE,
1742 mach_task_self(), map_addr,
1743 FALSE, /* copy */
1744 &curprot, &maxprot,
1745 VM_INHERIT_DEFAULT);
1746 T_ASSERT_EQ(kr, KERN_SUCCESS, "vm_remap error 0x%x (%s)",
1747 kr, mach_error_string(kr));
1748 before = *addr;
1749 retval = mlock(addr, 4096);
1750 after = *addr;
1751 if (retval != 0) {
1752 saved_errno = errno;
1753 T_ASSERT_EQ(saved_errno, EACCES, "wire shared text error %d (%s), expected: %d",
1754 saved_errno, strerror(saved_errno), EACCES);
1755 } else if (after != before) {
1756 T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", addr, before, after);
1757 } else {
1758 T_PASS("wire shared text");
1759 }
1760
1761 addr = (uint32_t *) &fprintf;
1762 before = *addr;
1763 retval = mlock(addr, 4096);
1764 after = *addr;
1765 if (retval != 0) {
1766 saved_errno = errno;
1767 T_ASSERT_EQ(saved_errno, EACCES, "wire shared text error %d (%s), expected: %d",
1768 saved_errno, strerror(saved_errno), EACCES);
1769 } else if (after != before) {
1770 T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", addr, before, after);
1771 } else {
1772 T_PASS("wire shared text");
1773 }
1774
1775 addr = (uint32_t *) &testmain_wire_text;
1776 before = *addr;
1777 retval = mlock(addr, 4096);
1778 after = *addr;
1779 if (retval != 0) {
1780 saved_errno = errno;
1781 T_ASSERT_EQ(saved_errno, EACCES, "wire text error return error %d (%s)",
1782 saved_errno, strerror(saved_errno));
1783 } else if (after != before) {
1784 T_ASSERT_FAIL("text changed by wiring at %p 0x%x -> 0x%x", addr, before, after);
1785 } else {
1786 T_PASS("wire text");
1787 }
1788 }
1789
1790 T_DECL(remap_comm_page, "test remapping of the commpage - rdar://93177124",
1791 T_META_ALL_VALID_ARCHS(true))
1792 {
1793 kern_return_t kr;
1794 mach_vm_address_t commpage_addr, remap_addr;
1795 mach_vm_size_t vmsize;
1796 vm_prot_t curprot, maxprot;
1797
1798 #if __arm__
1799 commpage_addr = 0xFFFF4000ULL;
1800 #elif __arm64__
1801 commpage_addr = 0x0000000FFFFFC000ULL;
1802 #elif __x86_64__
1803 commpage_addr = 0x00007FFFFFE00000ULL;
1804 #else
1805 T_FAIL("unknown commpage address for this architecture");
1806 #endif
1807
1808 T_LOG("Remapping commpage from 0x%llx", commpage_addr);
1809 vmsize = vm_kernel_page_size;
1810 remap_addr = 0;
1811 kr = mach_vm_remap(mach_task_self(),
1812 &remap_addr,
1813 vmsize,
1814 0, /* mask */
1815 VM_FLAGS_ANYWHERE,
1816 mach_task_self(),
1817 commpage_addr,
1818 TRUE, /* copy */
1819 &curprot,
1820 &maxprot,
1821 VM_INHERIT_DEFAULT);
1822 if (kr == KERN_INVALID_ADDRESS) {
1823 T_SKIP("No mapping found at 0x%llx\n", commpage_addr);
1824 return;
1825 }
1826 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() of commpage from 0x%llx", commpage_addr);
1827 }
1828