1 /* Mach vm map miscellaneous unit tests
2 *
3 * This test program serves to be a regression test suite for legacy
4 * vm issues, ideally each test will be linked to a radar number and
5 * perform a set of certain validations.
6 *
7 */
8 #include <darwintest.h>
9
10 #include <dlfcn.h>
11 #include <errno.h>
12 #include <ptrauth.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <time.h>
17
18 #include <sys/mman.h>
19
20 #include <mach/mach_error.h>
21 #include <mach/mach_init.h>
22 #include <mach/mach_port.h>
23 #include <mach/mach_vm.h>
24 #include <mach/vm_map.h>
25 #include <mach/task.h>
26 #include <mach/task_info.h>
27 #include <mach/shared_region.h>
28 #include <machine/cpu_capabilities.h>
29
30 T_GLOBAL_META(
31 T_META_NAMESPACE("xnu.vm"),
32 T_META_RADAR_COMPONENT_NAME("xnu"),
33 T_META_RADAR_COMPONENT_VERSION("VM"),
34 T_META_RUN_CONCURRENTLY(true));
35
36 static void
test_memory_entry_tagging(int override_tag)37 test_memory_entry_tagging(int override_tag)
38 {
39 int pass;
40 int do_copy;
41 kern_return_t kr;
42 mach_vm_address_t vmaddr_orig, vmaddr_shared, vmaddr_copied;
43 mach_vm_size_t vmsize_orig, vmsize_shared, vmsize_copied;
44 mach_vm_address_t *vmaddr_ptr;
45 mach_vm_size_t *vmsize_ptr;
46 mach_vm_address_t vmaddr_chunk;
47 mach_vm_size_t vmsize_chunk;
48 mach_vm_offset_t vmoff;
49 mach_port_t mem_entry_copied, mem_entry_shared;
50 mach_port_t *mem_entry_ptr;
51 int i;
52 vm_region_submap_short_info_data_64_t ri;
53 mach_msg_type_number_t ri_count;
54 unsigned int depth;
55 int vm_flags;
56 int expected_tag;
57
58 vmaddr_copied = 0;
59 vmaddr_shared = 0;
60 vmsize_copied = 0;
61 vmsize_shared = 0;
62 vmaddr_chunk = 0;
63 vmsize_chunk = 16 * 1024;
64 vmaddr_orig = 0;
65 vmsize_orig = 3 * vmsize_chunk;
66 mem_entry_copied = MACH_PORT_NULL;
67 mem_entry_shared = MACH_PORT_NULL;
68 pass = 0;
69
70 vmaddr_orig = 0;
71 kr = mach_vm_allocate(mach_task_self(),
72 &vmaddr_orig,
73 vmsize_orig,
74 VM_FLAGS_ANYWHERE);
75 T_QUIET;
76 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
77 override_tag, vmsize_orig);
78 if (T_RESULT == T_RESULT_FAIL) {
79 goto done;
80 }
81
82 for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
83 vmaddr_chunk = vmaddr_orig + (i * vmsize_chunk);
84 kr = mach_vm_allocate(mach_task_self(),
85 &vmaddr_chunk,
86 vmsize_chunk,
87 (VM_FLAGS_FIXED |
88 VM_FLAGS_OVERWRITE |
89 VM_MAKE_TAG(100 + i)));
90 T_QUIET;
91 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
92 override_tag, vmsize_chunk);
93 if (T_RESULT == T_RESULT_FAIL) {
94 goto done;
95 }
96 }
97
98 for (vmoff = 0;
99 vmoff < vmsize_orig;
100 vmoff += PAGE_SIZE) {
101 *((unsigned char *)(uintptr_t)(vmaddr_orig + vmoff)) = 'x';
102 }
103
104 do_copy = time(NULL) & 1;
105 again:
106 *((unsigned char *)(uintptr_t)vmaddr_orig) = 'x';
107 if (do_copy) {
108 mem_entry_ptr = &mem_entry_copied;
109 vmsize_copied = vmsize_orig;
110 vmsize_ptr = &vmsize_copied;
111 vmaddr_copied = 0;
112 vmaddr_ptr = &vmaddr_copied;
113 vm_flags = MAP_MEM_VM_COPY;
114 } else {
115 mem_entry_ptr = &mem_entry_shared;
116 vmsize_shared = vmsize_orig;
117 vmsize_ptr = &vmsize_shared;
118 vmaddr_shared = 0;
119 vmaddr_ptr = &vmaddr_shared;
120 vm_flags = MAP_MEM_VM_SHARE;
121 }
122 kr = mach_make_memory_entry_64(mach_task_self(),
123 vmsize_ptr,
124 vmaddr_orig, /* offset */
125 (vm_flags |
126 VM_PROT_READ | VM_PROT_WRITE),
127 mem_entry_ptr,
128 MACH_PORT_NULL);
129 T_QUIET;
130 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_make_memory_entry()",
131 override_tag, do_copy);
132 if (T_RESULT == T_RESULT_FAIL) {
133 goto done;
134 }
135 T_QUIET;
136 T_EXPECT_EQ(*vmsize_ptr, vmsize_orig, "[override_tag:%d][do_copy:%d] vmsize (0x%llx) != vmsize_orig (0x%llx)",
137 override_tag, do_copy, (uint64_t) *vmsize_ptr, (uint64_t) vmsize_orig);
138 if (T_RESULT == T_RESULT_FAIL) {
139 goto done;
140 }
141 T_QUIET;
142 T_EXPECT_NOTNULL(*mem_entry_ptr, "[override_tag:%d][do_copy:%d] mem_entry == 0x%x",
143 override_tag, do_copy, *mem_entry_ptr);
144 if (T_RESULT == T_RESULT_FAIL) {
145 goto done;
146 }
147
148 *vmaddr_ptr = 0;
149 if (override_tag) {
150 vm_flags = VM_MAKE_TAG(200);
151 } else {
152 vm_flags = 0;
153 }
154 kr = mach_vm_map(mach_task_self(),
155 vmaddr_ptr,
156 vmsize_orig,
157 0, /* mask */
158 vm_flags | VM_FLAGS_ANYWHERE,
159 *mem_entry_ptr,
160 0, /* offset */
161 FALSE, /* copy */
162 VM_PROT_READ | VM_PROT_WRITE,
163 VM_PROT_READ | VM_PROT_WRITE,
164 VM_INHERIT_DEFAULT);
165 T_QUIET;
166 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_map()",
167 override_tag, do_copy);
168 if (T_RESULT == T_RESULT_FAIL) {
169 goto done;
170 }
171
172 *((unsigned char *)(uintptr_t)vmaddr_orig) = 'X';
173 if (*(unsigned char *)(uintptr_t)*vmaddr_ptr == 'X') {
174 T_QUIET;
175 T_EXPECT_EQ(do_copy, 0, "[override_tag:%d][do_copy:%d] memory shared instead of copied",
176 override_tag, do_copy);
177 if (T_RESULT == T_RESULT_FAIL) {
178 goto done;
179 }
180 } else {
181 T_QUIET;
182 T_EXPECT_NE(do_copy, 0, "[override_tag:%d][do_copy:%d] memory copied instead of shared",
183 override_tag, do_copy);
184 if (T_RESULT == T_RESULT_FAIL) {
185 goto done;
186 }
187 }
188
189 for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
190 mach_vm_address_t vmaddr_info;
191 mach_vm_size_t vmsize_info;
192
193 vmaddr_info = *vmaddr_ptr + (i * vmsize_chunk);
194 vmsize_info = 0;
195 depth = 1;
196 ri_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
197 kr = mach_vm_region_recurse(mach_task_self(),
198 &vmaddr_info,
199 &vmsize_info,
200 &depth,
201 (vm_region_recurse_info_t) &ri,
202 &ri_count);
203 T_QUIET;
204 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx)",
205 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk);
206 if (T_RESULT == T_RESULT_FAIL) {
207 goto done;
208 }
209 T_QUIET;
210 T_EXPECT_EQ(vmaddr_info, *vmaddr_ptr + (i * vmsize_chunk), "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned addr 0x%llx",
211 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmaddr_info);
212 if (T_RESULT == T_RESULT_FAIL) {
213 goto done;
214 }
215 T_QUIET;
216 T_EXPECT_EQ(vmsize_info, vmsize_chunk, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned size 0x%llx expected 0x%llx",
217 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmsize_info, vmsize_chunk);
218 if (T_RESULT == T_RESULT_FAIL) {
219 goto done;
220 }
221 if (override_tag) {
222 expected_tag = 200;
223 } else {
224 expected_tag = 100 + i;
225 }
226 T_QUIET;
227 T_EXPECT_EQ(ri.user_tag, expected_tag, "[override_tag:%d][do_copy:%d] i=%d tag=%d expected %d",
228 override_tag, do_copy, i, ri.user_tag, expected_tag);
229 if (T_RESULT == T_RESULT_FAIL) {
230 goto done;
231 }
232 }
233
234 if (++pass < 2) {
235 do_copy = !do_copy;
236 goto again;
237 }
238
239 done:
240 if (vmaddr_orig != 0) {
241 mach_vm_deallocate(mach_task_self(),
242 vmaddr_orig,
243 vmsize_orig);
244 vmaddr_orig = 0;
245 vmsize_orig = 0;
246 }
247 if (vmaddr_copied != 0) {
248 mach_vm_deallocate(mach_task_self(),
249 vmaddr_copied,
250 vmsize_copied);
251 vmaddr_copied = 0;
252 vmsize_copied = 0;
253 }
254 if (vmaddr_shared != 0) {
255 mach_vm_deallocate(mach_task_self(),
256 vmaddr_shared,
257 vmsize_shared);
258 vmaddr_shared = 0;
259 vmsize_shared = 0;
260 }
261 if (mem_entry_copied != MACH_PORT_NULL) {
262 mach_port_deallocate(mach_task_self(), mem_entry_copied);
263 mem_entry_copied = MACH_PORT_NULL;
264 }
265 if (mem_entry_shared != MACH_PORT_NULL) {
266 mach_port_deallocate(mach_task_self(), mem_entry_shared);
267 mem_entry_shared = MACH_PORT_NULL;
268 }
269
270 return;
271 }
272
273 static void
test_map_memory_entry(void)274 test_map_memory_entry(void)
275 {
276 kern_return_t kr;
277 mach_vm_address_t vmaddr1, vmaddr2;
278 mach_vm_size_t vmsize1, vmsize2;
279 mach_port_t mem_entry;
280 unsigned char *cp1, *cp2;
281
282 vmaddr1 = 0;
283 vmsize1 = 0;
284 vmaddr2 = 0;
285 vmsize2 = 0;
286 mem_entry = MACH_PORT_NULL;
287
288 vmsize1 = 1;
289 vmaddr1 = 0;
290 kr = mach_vm_allocate(mach_task_self(),
291 &vmaddr1,
292 vmsize1,
293 VM_FLAGS_ANYWHERE);
294 T_QUIET;
295 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate(%lld)", vmsize1);
296 if (T_RESULT == T_RESULT_FAIL) {
297 goto done;
298 }
299
300 cp1 = (unsigned char *)(uintptr_t)vmaddr1;
301 *cp1 = '1';
302
303 vmsize2 = 1;
304 mem_entry = MACH_PORT_NULL;
305 kr = mach_make_memory_entry_64(mach_task_self(),
306 &vmsize2,
307 vmaddr1, /* offset */
308 (MAP_MEM_VM_COPY |
309 VM_PROT_READ | VM_PROT_WRITE),
310 &mem_entry,
311 MACH_PORT_NULL);
312 T_QUIET;
313 T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry()");
314 if (T_RESULT == T_RESULT_FAIL) {
315 goto done;
316 }
317 T_QUIET;
318 T_EXPECT_GE(vmsize2, vmsize1, "vmsize2 (0x%llx) < vmsize1 (0x%llx)",
319 (uint64_t) vmsize2, (uint64_t) vmsize1);
320 if (T_RESULT == T_RESULT_FAIL) {
321 goto done;
322 }
323 T_QUIET;
324 T_EXPECT_NOTNULL(mem_entry, "mem_entry == 0x%x", mem_entry);
325 if (T_RESULT == T_RESULT_FAIL) {
326 goto done;
327 }
328
329 vmaddr2 = 0;
330 kr = mach_vm_map(mach_task_self(),
331 &vmaddr2,
332 vmsize2,
333 0, /* mask */
334 VM_FLAGS_ANYWHERE,
335 mem_entry,
336 0, /* offset */
337 TRUE, /* copy */
338 VM_PROT_READ | VM_PROT_WRITE,
339 VM_PROT_READ | VM_PROT_WRITE,
340 VM_INHERIT_DEFAULT);
341 T_QUIET;
342 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_map()");
343 if (T_RESULT == T_RESULT_FAIL) {
344 goto done;
345 }
346
347 cp2 = (unsigned char *)(uintptr_t)vmaddr2;
348 T_QUIET;
349 T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '1')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
350 *cp1, *cp2, '1', '1');
351 if (T_RESULT == T_RESULT_FAIL) {
352 goto done;
353 }
354
355 *cp2 = '2';
356 T_QUIET;
357 T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '2')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
358 *cp1, *cp2, '1', '2');
359 if (T_RESULT == T_RESULT_FAIL) {
360 goto done;
361 }
362
363 done:
364 if (vmaddr1 != 0) {
365 mach_vm_deallocate(mach_task_self(), vmaddr1, vmsize1);
366 vmaddr1 = 0;
367 vmsize1 = 0;
368 }
369 if (vmaddr2 != 0) {
370 mach_vm_deallocate(mach_task_self(), vmaddr2, vmsize2);
371 vmaddr2 = 0;
372 vmsize2 = 0;
373 }
374 if (mem_entry != MACH_PORT_NULL) {
375 mach_port_deallocate(mach_task_self(), mem_entry);
376 mem_entry = MACH_PORT_NULL;
377 }
378
379 return;
380 }
381
382 T_DECL(memory_entry_tagging, "test mem entry tag for rdar://problem/23334087 \
383 VM memory tags should be propagated through memory entries",
384 T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
385 {
386 test_memory_entry_tagging(0);
387 test_memory_entry_tagging(1);
388 }
389
390 T_DECL(map_memory_entry, "test mapping mem entry for rdar://problem/22611816 \
391 mach_make_memory_entry(MAP_MEM_VM_COPY) should never use a KERNEL_BUFFER \
392 copy", T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
393 {
394 test_map_memory_entry();
395 }
396
397 static char *vm_purgable_state[4] = { "NONVOLATILE", "VOLATILE", "EMPTY", "DENY" };
398
399 static uint64_t
task_footprint(void)400 task_footprint(void)
401 {
402 task_vm_info_data_t ti;
403 kern_return_t kr;
404 mach_msg_type_number_t count;
405
406 count = TASK_VM_INFO_COUNT;
407 kr = task_info(mach_task_self(),
408 TASK_VM_INFO,
409 (task_info_t) &ti,
410 &count);
411 T_QUIET;
412 T_ASSERT_MACH_SUCCESS(kr, "task_info()");
413 #if defined(__arm64__)
414 T_QUIET;
415 T_ASSERT_EQ(count, TASK_VM_INFO_COUNT, "task_info() count = %d (expected %d)",
416 count, TASK_VM_INFO_COUNT);
417 #endif /* defined(__arm64__) */
418 return ti.phys_footprint;
419 }
420
421 T_DECL(purgeable_empty_to_volatile, "test task physical footprint when \
422 emptying, volatilizing purgeable vm", T_META_TAG_VM_PREFERRED)
423 {
424 kern_return_t kr;
425 mach_vm_address_t vm_addr;
426 mach_vm_size_t vm_size;
427 char *cp;
428 int ret;
429 vm_purgable_t state;
430 uint64_t footprint[8];
431
432 vm_addr = 0;
433 vm_size = 1 * 1024 * 1024;
434 T_LOG("--> allocate %llu bytes", vm_size);
435 kr = mach_vm_allocate(mach_task_self(),
436 &vm_addr,
437 vm_size,
438 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
439 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
440
441 /* footprint0 */
442 footprint[0] = task_footprint();
443 T_LOG(" footprint[0] = %llu", footprint[0]);
444
445 T_LOG("--> access %llu bytes", vm_size);
446 for (cp = (char *) vm_addr;
447 cp < (char *) (vm_addr + vm_size);
448 cp += vm_kernel_page_size) {
449 *cp = 'x';
450 }
451 /* footprint1 == footprint0 + vm_size */
452 footprint[1] = task_footprint();
453 T_LOG(" footprint[1] = %llu", footprint[1]);
454 if (footprint[1] != footprint[0] + vm_size) {
455 T_LOG("WARN: footprint[1] != footprint[0] + vm_size");
456 }
457
458 T_LOG("--> wire %llu bytes", vm_size / 2);
459 ret = mlock((char *)vm_addr, (size_t) (vm_size / 2));
460 T_ASSERT_POSIX_SUCCESS(ret, "mlock()");
461
462 /* footprint2 == footprint1 */
463 footprint[2] = task_footprint();
464 T_LOG(" footprint[2] = %llu", footprint[2]);
465 if (footprint[2] != footprint[1]) {
466 T_LOG("WARN: footprint[2] != footprint[1]");
467 }
468
469 T_LOG("--> VOLATILE");
470 state = VM_PURGABLE_VOLATILE;
471 kr = mach_vm_purgable_control(mach_task_self(),
472 vm_addr,
473 VM_PURGABLE_SET_STATE,
474 &state);
475 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
476 T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, "NONVOLATILE->VOLATILE: state was %s",
477 vm_purgable_state[state]);
478 /* footprint3 == footprint2 - (vm_size / 2) */
479 footprint[3] = task_footprint();
480 T_LOG(" footprint[3] = %llu", footprint[3]);
481 if (footprint[3] != footprint[2] - (vm_size / 2)) {
482 T_LOG("WARN: footprint[3] != footprint[2] - (vm_size / 2)");
483 }
484
485 T_LOG("--> EMPTY");
486 state = VM_PURGABLE_EMPTY;
487 kr = mach_vm_purgable_control(mach_task_self(),
488 vm_addr,
489 VM_PURGABLE_SET_STATE,
490 &state);
491 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(EMPTY)");
492 if (state != VM_PURGABLE_VOLATILE &&
493 state != VM_PURGABLE_EMPTY) {
494 T_ASSERT_FAIL("VOLATILE->EMPTY: state was %s",
495 vm_purgable_state[state]);
496 }
497 /* footprint4 == footprint3 */
498 footprint[4] = task_footprint();
499 T_LOG(" footprint[4] = %llu", footprint[4]);
500 if (footprint[4] != footprint[3]) {
501 T_LOG("WARN: footprint[4] != footprint[3]");
502 }
503
504 T_LOG("--> unwire %llu bytes", vm_size / 2);
505 ret = munlock((char *)vm_addr, (size_t) (vm_size / 2));
506 T_ASSERT_POSIX_SUCCESS(ret, "munlock()");
507
508 /* footprint5 == footprint4 - (vm_size/2) (unless memory pressure) */
509 /* footprint5 == footprint0 */
510 footprint[5] = task_footprint();
511 T_LOG(" footprint[5] = %llu", footprint[5]);
512 if (footprint[5] != footprint[4] - (vm_size / 2)) {
513 T_LOG("WARN: footprint[5] != footprint[4] - (vm_size/2)");
514 }
515 if (footprint[5] != footprint[0]) {
516 T_LOG("WARN: footprint[5] != footprint[0]");
517 }
518
519 T_LOG("--> VOLATILE");
520 state = VM_PURGABLE_VOLATILE;
521 kr = mach_vm_purgable_control(mach_task_self(),
522 vm_addr,
523 VM_PURGABLE_SET_STATE,
524 &state);
525 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
526 T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->VOLATILE: state == %s",
527 vm_purgable_state[state]);
528 /* footprint6 == footprint5 */
529 /* footprint6 == footprint0 */
530 footprint[6] = task_footprint();
531 T_LOG(" footprint[6] = %llu", footprint[6]);
532 if (footprint[6] != footprint[5]) {
533 T_LOG("WARN: footprint[6] != footprint[5]");
534 }
535 if (footprint[6] != footprint[0]) {
536 T_LOG("WARN: footprint[6] != footprint[0]");
537 }
538
539 T_LOG("--> NONVOLATILE");
540 state = VM_PURGABLE_NONVOLATILE;
541 kr = mach_vm_purgable_control(mach_task_self(),
542 vm_addr,
543 VM_PURGABLE_SET_STATE,
544 &state);
545 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(NONVOLATILE)");
546 T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->NONVOLATILE: state == %s",
547 vm_purgable_state[state]);
548 /* footprint7 == footprint6 */
549 /* footprint7 == footprint0 */
550 footprint[7] = task_footprint();
551 T_LOG(" footprint[7] = %llu", footprint[7]);
552 if (footprint[7] != footprint[6]) {
553 T_LOG("WARN: footprint[7] != footprint[6]");
554 }
555 if (footprint[7] != footprint[0]) {
556 T_LOG("WARN: footprint[7] != footprint[0]");
557 }
558 }
559
560 kern_return_t
get_reusable_size(uint64_t * reusable)561 get_reusable_size(uint64_t *reusable)
562 {
563 task_vm_info_data_t ti;
564 mach_msg_type_number_t ti_count = TASK_VM_INFO_COUNT;
565 kern_return_t kr;
566
567 kr = task_info(mach_task_self(),
568 TASK_VM_INFO,
569 (task_info_t) &ti,
570 &ti_count);
571 T_QUIET;
572 T_EXPECT_MACH_SUCCESS(kr, "task_info()");
573 T_QUIET;
574 *reusable = ti.reusable;
575 return kr;
576 }
577
578 T_DECL(madvise_shared, "test madvise shared for rdar://problem/2295713 logging \
579 rethink needs madvise(MADV_FREE_HARDER)",
580 T_META_RUN_CONCURRENTLY(false),
581 T_META_ALL_VALID_ARCHS(true),
582 T_META_TAG_VM_PREFERRED)
583 {
584 vm_address_t vmaddr = 0, vmaddr2 = 0;
585 vm_size_t vmsize, vmsize1, vmsize2;
586 kern_return_t kr;
587 char *cp;
588 vm_prot_t curprot, maxprot;
589 int ret;
590 int vmflags;
591 uint64_t footprint_before, footprint_after;
592 uint64_t reusable_before, reusable_after, reusable_expected;
593
594
595 vmsize1 = 64 * 1024; /* 64KB to madvise() */
596 vmsize2 = 32 * 1024; /* 32KB to mlock() */
597 vmsize = vmsize1 + vmsize2;
598 vmflags = VM_FLAGS_ANYWHERE;
599 VM_SET_FLAGS_ALIAS(vmflags, VM_MEMORY_MALLOC);
600
601 kr = get_reusable_size(&reusable_before);
602 if (kr) {
603 goto done;
604 }
605
606 kr = vm_allocate(mach_task_self(),
607 &vmaddr,
608 vmsize,
609 vmflags);
610 T_QUIET;
611 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
612 if (T_RESULT == T_RESULT_FAIL) {
613 goto done;
614 }
615
616 for (cp = (char *)(uintptr_t)vmaddr;
617 cp < (char *)(uintptr_t)(vmaddr + vmsize);
618 cp++) {
619 *cp = 'x';
620 }
621
622 kr = vm_remap(mach_task_self(),
623 &vmaddr2,
624 vmsize,
625 0, /* mask */
626 VM_FLAGS_ANYWHERE,
627 mach_task_self(),
628 vmaddr,
629 FALSE, /* copy */
630 &curprot,
631 &maxprot,
632 VM_INHERIT_DEFAULT);
633 T_QUIET;
634 T_EXPECT_MACH_SUCCESS(kr, "vm_remap()");
635 if (T_RESULT == T_RESULT_FAIL) {
636 goto done;
637 }
638
639 for (cp = (char *)(uintptr_t)vmaddr2;
640 cp < (char *)(uintptr_t)(vmaddr2 + vmsize);
641 cp++) {
642 T_QUIET;
643 T_EXPECT_EQ(*cp, 'x', "vmaddr=%p vmaddr2=%p %p:0x%x",
644 (void *)(uintptr_t)vmaddr,
645 (void *)(uintptr_t)vmaddr2,
646 (void *)cp,
647 (unsigned char)*cp);
648 if (T_RESULT == T_RESULT_FAIL) {
649 goto done;
650 }
651 }
652 cp = (char *)(uintptr_t)vmaddr;
653 *cp = 'X';
654 cp = (char *)(uintptr_t)vmaddr2;
655 T_QUIET;
656 T_EXPECT_EQ(*cp, 'X', "memory was not properly shared");
657 if (T_RESULT == T_RESULT_FAIL) {
658 goto done;
659 }
660
661 #if defined(__x86_64__) || defined(__i386__)
662 if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
663 T_LOG("Skipping madvise reusable tests because we're running under translation.");
664 goto done;
665 }
666 #endif /* defined(__x86_64__) || defined(__i386__) */
667
668 ret = mlock((char *)(uintptr_t)(vmaddr2 + vmsize1),
669 vmsize2);
670 T_QUIET; T_EXPECT_POSIX_SUCCESS(ret, "mlock()");
671
672 footprint_before = task_footprint();
673
674 ret = madvise((char *)(uintptr_t)vmaddr,
675 vmsize1,
676 MADV_FREE_REUSABLE);
677 T_QUIET;
678 T_EXPECT_POSIX_SUCCESS(ret, "madvise()");
679 if (T_RESULT == T_RESULT_FAIL) {
680 goto done;
681 }
682
683 footprint_after = task_footprint();
684 T_ASSERT_EQ(footprint_after, footprint_before - 2 * vmsize1, NULL);
685
686 kr = get_reusable_size(&reusable_after);
687 if (kr) {
688 goto done;
689 }
690 reusable_expected = 2ULL * vmsize1 + reusable_before;
691 T_EXPECT_EQ(reusable_after, reusable_expected, "actual=%lld expected %lld",
692 reusable_after, reusable_expected);
693 if (T_RESULT == T_RESULT_FAIL) {
694 goto done;
695 }
696
697 done:
698 if (vmaddr != 0) {
699 vm_deallocate(mach_task_self(), vmaddr, vmsize);
700 vmaddr = 0;
701 }
702 if (vmaddr2 != 0) {
703 vm_deallocate(mach_task_self(), vmaddr2, vmsize);
704 vmaddr2 = 0;
705 }
706 }
707
708 T_DECL(madvise_purgeable_can_reuse, "test madvise purgeable can reuse for \
709 rdar://problem/37476183 Preview Footprint memory regressions ~100MB \
710 [ purgeable_malloc became eligible for reuse ]",
711 T_META_ALL_VALID_ARCHS(true),
712 T_META_TAG_VM_PREFERRED)
713 {
714 #if defined(__x86_64__) || defined(__i386__)
715 if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
716 T_SKIP("madvise reusable is not supported under Rosetta translation. Skipping.)");
717 }
718 #endif /* defined(__x86_64__) || defined(__i386__) */
719 vm_address_t vmaddr = 0;
720 vm_size_t vmsize;
721 kern_return_t kr;
722 char *cp;
723 int ret;
724
725 vmsize = 10 * 1024 * 1024; /* 10MB */
726 kr = vm_allocate(mach_task_self(),
727 &vmaddr,
728 vmsize,
729 (VM_FLAGS_ANYWHERE |
730 VM_FLAGS_PURGABLE |
731 VM_MAKE_TAG(VM_MEMORY_MALLOC)));
732 T_QUIET;
733 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
734 if (T_RESULT == T_RESULT_FAIL) {
735 goto done;
736 }
737
738 for (cp = (char *)(uintptr_t)vmaddr;
739 cp < (char *)(uintptr_t)(vmaddr + vmsize);
740 cp++) {
741 *cp = 'x';
742 }
743
744 ret = madvise((char *)(uintptr_t)vmaddr,
745 vmsize,
746 MADV_CAN_REUSE);
747 T_QUIET;
748 T_EXPECT_TRUE(((ret == -1) && (errno == EINVAL)), "madvise(): purgeable vm can't be adviced to reuse");
749 if (T_RESULT == T_RESULT_FAIL) {
750 goto done;
751 }
752
753 done:
754 if (vmaddr != 0) {
755 vm_deallocate(mach_task_self(), vmaddr, vmsize);
756 vmaddr = 0;
757 }
758 }
759
760 static bool
validate_memory_is_zero(vm_address_t start,vm_size_t vmsize,vm_address_t * non_zero_addr)761 validate_memory_is_zero(
762 vm_address_t start,
763 vm_size_t vmsize,
764 vm_address_t *non_zero_addr)
765 {
766 for (vm_size_t sz = 0; sz < vmsize; sz += sizeof(uint64_t)) {
767 vm_address_t addr = start + sz;
768
769 if (*(uint64_t *)(addr) != 0) {
770 *non_zero_addr = addr;
771 return false;
772 }
773 }
774 return true;
775 }
776
777 T_DECL(madvise_zero, "test madvise zero", T_META_TAG_VM_PREFERRED)
778 {
779 vm_address_t vmaddr = 0;
780 vm_size_t vmsize = PAGE_SIZE * 3;
781 vm_address_t non_zero_addr = 0;
782 kern_return_t kr;
783 int ret;
784 unsigned char vec;
785
786 kr = vm_allocate(mach_task_self(),
787 &vmaddr,
788 vmsize,
789 (VM_FLAGS_ANYWHERE |
790 VM_MAKE_TAG(VM_MEMORY_MALLOC)));
791 T_QUIET;
792 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
793 if (T_RESULT == T_RESULT_FAIL) {
794 goto done;
795 }
796
797 memset((void *)vmaddr, 'A', vmsize);
798 ret = madvise((void*)vmaddr, vmsize, MADV_FREE_REUSABLE);
799 T_QUIET;
800 T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_FREE_REUSABLE)");
801 if (T_RESULT == T_RESULT_FAIL) {
802 goto done;
803 }
804
805 memset((void *)vmaddr, 'B', PAGE_SIZE);
806 ret = madvise((void*)vmaddr, vmsize, MADV_ZERO);
807 T_QUIET;
808 T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO)");
809 if (T_RESULT == T_RESULT_FAIL) {
810 goto done;
811 }
812
813 T_QUIET;
814 T_EXPECT_EQ(validate_memory_is_zero(vmaddr, vmsize, &non_zero_addr), true,
815 "madvise(%p, %llu, MADV_ZERO) returned non zero mem at %p",
816 (void *)vmaddr, vmsize, (void *)non_zero_addr);
817 if (T_RESULT == T_RESULT_FAIL) {
818 goto done;
819 }
820
821 memset((void *)vmaddr, 'C', PAGE_SIZE);
822 ret = madvise((void*)vmaddr, vmsize, MADV_PAGEOUT);
823 T_QUIET;
824 T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_PAGEOUT)");
825 if (T_RESULT == T_RESULT_FAIL) {
826 goto done;
827 }
828
829 /* wait for the pages to be (asynchronously) compressed */
830 T_QUIET; T_LOG("waiting for first page to be paged out");
831 do {
832 ret = mincore((void*)vmaddr, 1, &vec);
833 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mincore(1st)");
834 } while (vec & MINCORE_INCORE);
835 T_QUIET; T_LOG("waiting for last page to be paged out");
836 do {
837 ret = mincore((void*)(vmaddr + vmsize - 1), 1, (char *)&vec);
838 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mincore(last)");
839 } while (vec & MINCORE_INCORE);
840
841 ret = madvise((void*)vmaddr, vmsize, MADV_ZERO);
842 T_QUIET;
843 T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO)");
844 if (T_RESULT == T_RESULT_FAIL) {
845 goto done;
846 }
847 T_QUIET;
848 T_EXPECT_EQ(validate_memory_is_zero(vmaddr, vmsize, &non_zero_addr), true,
849 "madvise(%p, %llu, MADV_ZERO) returned non zero mem at %p",
850 (void *)vmaddr, vmsize, (void *)non_zero_addr);
851 if (T_RESULT == T_RESULT_FAIL) {
852 goto done;
853 }
854
855 done:
856 if (vmaddr != 0) {
857 vm_deallocate(mach_task_self(), vmaddr, vmsize);
858 vmaddr = 0;
859 }
860 }
861
862 #define DEST_PATTERN 0xFEDCBA98
863
864 T_DECL(map_read_overwrite, "test overwriting vm map from other map - \
865 rdar://31075370",
866 T_META_ALL_VALID_ARCHS(true),
867 T_META_TAG_VM_PREFERRED)
868 {
869 kern_return_t kr;
870 mach_vm_address_t vmaddr1, vmaddr2;
871 mach_vm_size_t vmsize1, vmsize2;
872 int *ip;
873 int i;
874
875 vmaddr1 = 0;
876 vmsize1 = 4 * 4096;
877 kr = mach_vm_allocate(mach_task_self(),
878 &vmaddr1,
879 vmsize1,
880 VM_FLAGS_ANYWHERE);
881 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
882
883 ip = (int *)(uintptr_t)vmaddr1;
884 for (i = 0; i < vmsize1 / sizeof(*ip); i++) {
885 ip[i] = i;
886 }
887
888 vmaddr2 = 0;
889 kr = mach_vm_allocate(mach_task_self(),
890 &vmaddr2,
891 vmsize1,
892 VM_FLAGS_ANYWHERE);
893 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
894
895 ip = (int *)(uintptr_t)vmaddr2;
896 for (i = 0; i < vmsize1 / sizeof(*ip); i++) {
897 ip[i] = DEST_PATTERN;
898 }
899
900 vmsize2 = vmsize1 - 2 * (sizeof(*ip));
901 kr = mach_vm_read_overwrite(mach_task_self(),
902 vmaddr1 + sizeof(*ip),
903 vmsize2,
904 vmaddr2 + sizeof(*ip),
905 &vmsize2);
906 T_ASSERT_MACH_SUCCESS(kr, "vm_read_overwrite()");
907
908 ip = (int *)(uintptr_t)vmaddr2;
909 for (i = 0; i < 1; i++) {
910 T_QUIET;
911 T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
912 i, ip[i], DEST_PATTERN);
913 }
914 for (; i < (vmsize1 - 2) / sizeof(*ip); i++) {
915 T_QUIET;
916 T_ASSERT_EQ(ip[i], i, "vmaddr2[%d] = 0x%x instead of 0x%x",
917 i, ip[i], i);
918 }
919 for (; i < vmsize1 / sizeof(*ip); i++) {
920 T_QUIET;
921 T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
922 i, ip[i], DEST_PATTERN);
923 }
924 }
925
926 T_DECL(copy_none_use_pmap, "test copy-on-write remapping of COPY_NONE vm \
927 objects - rdar://35610377",
928 T_META_ALL_VALID_ARCHS(true),
929 T_META_TAG_VM_PREFERRED)
930 {
931 kern_return_t kr;
932 mach_vm_address_t vmaddr1, vmaddr2, vmaddr3;
933 mach_vm_size_t vmsize;
934 vm_prot_t curprot, maxprot;
935
936 vmsize = 32 * 1024 * 1024;
937
938 vmaddr1 = 0;
939 kr = mach_vm_allocate(mach_task_self(),
940 &vmaddr1,
941 vmsize,
942 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
943 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
944
945 memset((void *)(uintptr_t)vmaddr1, 'x', vmsize);
946
947 vmaddr2 = 0;
948 kr = mach_vm_remap(mach_task_self(),
949 &vmaddr2,
950 vmsize,
951 0, /* mask */
952 VM_FLAGS_ANYWHERE,
953 mach_task_self(),
954 vmaddr1,
955 TRUE, /* copy */
956 &curprot,
957 &maxprot,
958 VM_INHERIT_DEFAULT);
959 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #1");
960
961 vmaddr3 = 0;
962 kr = mach_vm_remap(mach_task_self(),
963 &vmaddr3,
964 vmsize,
965 0, /* mask */
966 VM_FLAGS_ANYWHERE,
967 mach_task_self(),
968 vmaddr2,
969 TRUE, /* copy */
970 &curprot,
971 &maxprot,
972 VM_INHERIT_DEFAULT);
973 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #2");
974 }
975
976 T_DECL(purgable_deny, "test purgeable memory is not allowed to be converted to \
977 non-purgeable - rdar://31990033",
978 T_META_ALL_VALID_ARCHS(true),
979 T_META_TAG_VM_PREFERRED)
980 {
981 kern_return_t kr;
982 vm_address_t vmaddr;
983 vm_purgable_t state;
984
985 vmaddr = 0;
986 kr = vm_allocate(mach_task_self(), &vmaddr, 1,
987 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
988 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
989
990 state = VM_PURGABLE_DENY;
991 kr = vm_purgable_control(mach_task_self(), vmaddr,
992 VM_PURGABLE_SET_STATE, &state);
993 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
994 "vm_purgable_control(VM_PURGABLE_DENY) -> 0x%x (%s)",
995 kr, mach_error_string(kr));
996
997 kr = vm_deallocate(mach_task_self(), vmaddr, 1);
998 T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate()");
999 }
1000
1001 #define VMSIZE 0x10000
1002
1003 T_DECL(vm_remap_zero, "test vm map of zero size - rdar://33114981",
1004 T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1005 {
1006 kern_return_t kr;
1007 mach_vm_address_t vmaddr1, vmaddr2;
1008 mach_vm_size_t vmsize;
1009 vm_prot_t curprot, maxprot;
1010
1011 vmaddr1 = 0;
1012 vmsize = VMSIZE;
1013 kr = mach_vm_allocate(mach_task_self(),
1014 &vmaddr1,
1015 vmsize,
1016 VM_FLAGS_ANYWHERE);
1017 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
1018
1019 vmaddr2 = 0;
1020 vmsize = 0;
1021 kr = mach_vm_remap(mach_task_self(),
1022 &vmaddr2,
1023 vmsize,
1024 0,
1025 VM_FLAGS_ANYWHERE,
1026 mach_task_self(),
1027 vmaddr1,
1028 FALSE,
1029 &curprot,
1030 &maxprot,
1031 VM_INHERIT_DEFAULT);
1032 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
1033 vmsize, kr, mach_error_string(kr));
1034
1035 vmaddr2 = 0;
1036 vmsize = (mach_vm_size_t)-2;
1037 kr = mach_vm_remap(mach_task_self(),
1038 &vmaddr2,
1039 vmsize,
1040 0,
1041 VM_FLAGS_ANYWHERE,
1042 mach_task_self(),
1043 vmaddr1,
1044 FALSE,
1045 &curprot,
1046 &maxprot,
1047 VM_INHERIT_DEFAULT);
1048 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
1049 vmsize, kr, mach_error_string(kr));
1050 }
1051
1052 extern int __shared_region_check_np(uint64_t *);
1053
1054 T_DECL(nested_pmap_trigger, "nested pmap should only be triggered from kernel \
1055 - rdar://problem/41481703",
1056 T_META_ALL_VALID_ARCHS(true),
1057 T_META_TAG_VM_PREFERRED)
1058 {
1059 int ret;
1060 kern_return_t kr;
1061 mach_vm_address_t sr_start;
1062 mach_vm_size_t vmsize;
1063 mach_vm_address_t vmaddr;
1064 mach_port_t mem_entry;
1065
1066 ret = __shared_region_check_np(&sr_start);
1067 if (ret != 0) {
1068 int saved_errno;
1069 saved_errno = errno;
1070
1071 T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
1072 saved_errno, strerror(saved_errno));
1073 T_END;
1074 }
1075
1076 vmsize = PAGE_SIZE;
1077 kr = mach_make_memory_entry_64(mach_task_self(),
1078 &vmsize,
1079 sr_start,
1080 MAP_MEM_VM_SHARE | VM_PROT_READ,
1081 &mem_entry,
1082 MACH_PORT_NULL);
1083 T_ASSERT_MACH_SUCCESS(kr, "make_memory_entry(0x%llx)", sr_start);
1084
1085 vmaddr = 0;
1086 kr = mach_vm_map(mach_task_self(),
1087 &vmaddr,
1088 vmsize,
1089 0,
1090 VM_FLAGS_ANYWHERE,
1091 mem_entry,
1092 0,
1093 FALSE,
1094 VM_PROT_READ,
1095 VM_PROT_READ,
1096 VM_INHERIT_DEFAULT);
1097 T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1098 }
1099
1100 static const char *prot_str[] = { "---", "r--", "-w-", "rw-", "--x", "r-x", "-wx", "rwx" };
1101 static const char *share_mode_str[] = { "---", "COW", "PRIVATE", "EMPTY", "SHARED", "TRUESHARED", "PRIVATE_ALIASED", "SHARED_ALIASED", "LARGE_PAGE" };
1102
1103 T_DECL(shared_region_share_writable, "sharing a writable mapping of the shared region shoudl not give write access to shared region - rdar://problem/74469953",
1104 T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1105 {
1106 int ret;
1107 uint64_t sr_start;
1108 kern_return_t kr;
1109 mach_vm_address_t address, tmp_address, remap_address;
1110 mach_vm_size_t size, tmp_size, remap_size;
1111 uint32_t depth;
1112 mach_msg_type_number_t count;
1113 vm_region_submap_info_data_64_t info;
1114 vm_prot_t cur_prot, max_prot;
1115 uint32_t before, after, remap;
1116 mach_port_t mem_entry;
1117
1118 ret = __shared_region_check_np(&sr_start);
1119 if (ret != 0) {
1120 int saved_errno;
1121 saved_errno = errno;
1122
1123 T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
1124 saved_errno, strerror(saved_errno));
1125 T_END;
1126 }
1127 T_LOG("SHARED_REGION_BASE 0x%llx", SHARED_REGION_BASE);
1128 T_LOG("SHARED_REGION_SIZE 0x%llx", SHARED_REGION_SIZE);
1129 T_LOG("shared region starts at 0x%llx", sr_start);
1130 T_QUIET; T_ASSERT_GE(sr_start, SHARED_REGION_BASE,
1131 "shared region starts below BASE");
1132 T_QUIET; T_ASSERT_LT(sr_start, SHARED_REGION_BASE + SHARED_REGION_SIZE,
1133 "shared region starts above BASE+SIZE");
1134
1135 /*
1136 * Step 1 - check that one can not get write access to a read-only
1137 * mapping in the shared region.
1138 */
1139 size = 0;
1140 for (address = SHARED_REGION_BASE;
1141 address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1142 address += size) {
1143 size = 0;
1144 depth = 99;
1145 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1146 kr = mach_vm_region_recurse(mach_task_self(),
1147 &address,
1148 &size,
1149 &depth,
1150 (vm_region_recurse_info_t)&info,
1151 &count);
1152 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1153 if (kr == KERN_INVALID_ADDRESS) {
1154 T_SKIP("could not find read-only nested mapping");
1155 T_END;
1156 }
1157 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1158 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1159 address, address + size, depth,
1160 prot_str[info.protection],
1161 prot_str[info.max_protection],
1162 share_mode_str[info.share_mode],
1163 info.object_id);
1164 if (depth > 0 &&
1165 (info.protection == VM_PROT_READ) &&
1166 (info.max_protection == VM_PROT_READ)) {
1167 /* nested and read-only: bingo! */
1168 break;
1169 }
1170 }
1171 if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1172 T_SKIP("could not find read-only nested mapping");
1173 T_END;
1174 }
1175
1176 /* test vm_remap() of RO */
1177 before = *(uint32_t *)(uintptr_t)address;
1178 remap_address = 0;
1179 remap_size = size;
1180 kr = mach_vm_remap(mach_task_self(),
1181 &remap_address,
1182 remap_size,
1183 0,
1184 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1185 mach_task_self(),
1186 address,
1187 FALSE,
1188 &cur_prot,
1189 &max_prot,
1190 VM_INHERIT_DEFAULT);
1191 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1192 // T_QUIET; T_ASSERT_EQ(cur_prot, VM_PROT_READ, "cur_prot is read-only");
1193 // T_QUIET; T_ASSERT_EQ(max_prot, VM_PROT_READ, "max_prot is read-only");
1194 /* check that region is still nested */
1195 tmp_address = address;
1196 tmp_size = 0;
1197 depth = 99;
1198 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1199 kr = mach_vm_region_recurse(mach_task_self(),
1200 &tmp_address,
1201 &tmp_size,
1202 &depth,
1203 (vm_region_recurse_info_t)&info,
1204 &count);
1205 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1206 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1207 tmp_address, tmp_address + tmp_size, depth,
1208 prot_str[info.protection],
1209 prot_str[info.max_protection],
1210 share_mode_str[info.share_mode],
1211 info.object_id);
1212 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1213 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1214 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1215 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1216 // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1217 /* check that new mapping is read-only */
1218 tmp_address = remap_address;
1219 tmp_size = 0;
1220 depth = 99;
1221 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1222 kr = mach_vm_region_recurse(mach_task_self(),
1223 &tmp_address,
1224 &tmp_size,
1225 &depth,
1226 (vm_region_recurse_info_t)&info,
1227 &count);
1228 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1229 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1230 tmp_address, tmp_address + tmp_size, depth,
1231 prot_str[info.protection],
1232 prot_str[info.max_protection],
1233 share_mode_str[info.share_mode],
1234 info.object_id);
1235 T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1236 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1237 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1238 // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1239 remap = *(uint32_t *)(uintptr_t)remap_address;
1240 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1241 // this would crash if actually read-only:
1242 // *(uint32_t *)(uintptr_t)remap_address = before + 1;
1243 after = *(uint32_t *)(uintptr_t)address;
1244 T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1245 // *(uint32_t *)(uintptr_t)remap_address = before;
1246 if (before != after) {
1247 T_FAIL("vm_remap() bypassed copy-on-write");
1248 } else {
1249 T_PASS("vm_remap() did not bypass copy-on-write");
1250 }
1251 /* cleanup */
1252 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1253 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1254 T_PASS("vm_remap() read-only");
1255
1256 #if defined(VM_MEMORY_ROSETTA)
1257 if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1258 T_PASS("vm_remap_new() is not present");
1259 goto skip_vm_remap_new_ro;
1260 }
1261 /* test vm_remap_new() of RO */
1262 before = *(uint32_t *)(uintptr_t)address;
1263 remap_address = 0;
1264 remap_size = size;
1265 cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1266 max_prot = VM_PROT_READ | VM_PROT_WRITE;
1267 kr = mach_vm_remap_new(mach_task_self(),
1268 &remap_address,
1269 remap_size,
1270 0,
1271 VM_FLAGS_ANYWHERE,
1272 mach_task_self(),
1273 address,
1274 FALSE,
1275 &cur_prot,
1276 &max_prot,
1277 VM_INHERIT_DEFAULT);
1278 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1279 if (kr == KERN_PROTECTION_FAILURE) {
1280 /* wrong but not a security issue... */
1281 goto skip_vm_remap_new_ro;
1282 }
1283 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1284 remap = *(uint32_t *)(uintptr_t)remap_address;
1285 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1286 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1287 after = *(uint32_t *)(uintptr_t)address;
1288 T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1289 *(uint32_t *)(uintptr_t)remap_address = before;
1290 if (before != after) {
1291 T_FAIL("vm_remap_new() bypassed copy-on-write");
1292 } else {
1293 T_PASS("vm_remap_new() did not bypass copy-on-write");
1294 }
1295 /* check that region is still nested */
1296 tmp_address = address;
1297 tmp_size = 0;
1298 depth = 99;
1299 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1300 kr = mach_vm_region_recurse(mach_task_self(),
1301 &tmp_address,
1302 &tmp_size,
1303 &depth,
1304 (vm_region_recurse_info_t)&info,
1305 &count);
1306 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1307 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1308 tmp_address, tmp_address + tmp_size, depth,
1309 prot_str[info.protection],
1310 prot_str[info.max_protection],
1311 share_mode_str[info.share_mode],
1312 info.object_id);
1313 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1314 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1315 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1316 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1317 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1318 T_PASS("vm_remap_new() read-only");
1319 skip_vm_remap_new_ro:
1320 #else /* defined(VM_MEMORY_ROSETTA) */
1321 /* pre-BigSur SDK: no vm_remap_new() */
1322 T_LOG("No vm_remap_new() to test");
1323 #endif /* defined(VM_MEMORY_ROSETTA) */
1324
1325 /* test mach_make_memory_entry_64(VM_SHARE) of RO */
1326 before = *(uint32_t *)(uintptr_t)address;
1327 remap_size = size;
1328 mem_entry = MACH_PORT_NULL;
1329 kr = mach_make_memory_entry_64(mach_task_self(),
1330 &remap_size,
1331 address,
1332 MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1333 &mem_entry,
1334 MACH_PORT_NULL);
1335 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1336 if (kr == KERN_PROTECTION_FAILURE) {
1337 /* wrong but not a security issue... */
1338 goto skip_mem_entry_vm_share_ro;
1339 }
1340 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1341 remap_address = 0;
1342 kr = mach_vm_map(mach_task_self(),
1343 &remap_address,
1344 remap_size,
1345 0, /* mask */
1346 VM_FLAGS_ANYWHERE,
1347 mem_entry,
1348 0, /* offset */
1349 FALSE, /* copy */
1350 VM_PROT_READ | VM_PROT_WRITE,
1351 VM_PROT_READ | VM_PROT_WRITE,
1352 VM_INHERIT_DEFAULT);
1353 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1354 remap = *(uint32_t *)(uintptr_t)remap_address;
1355 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1356 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1357 after = *(uint32_t *)(uintptr_t)address;
1358 T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1359 *(uint32_t *)(uintptr_t)remap_address = before;
1360 if (before != after) {
1361 T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1362 } else {
1363 T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1364 }
1365 /* check that region is still nested */
1366 tmp_address = address;
1367 tmp_size = 0;
1368 depth = 99;
1369 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1370 kr = mach_vm_region_recurse(mach_task_self(),
1371 &tmp_address,
1372 &tmp_size,
1373 &depth,
1374 (vm_region_recurse_info_t)&info,
1375 &count);
1376 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1377 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1378 tmp_address, tmp_address + tmp_size, depth,
1379 prot_str[info.protection],
1380 prot_str[info.max_protection],
1381 share_mode_str[info.share_mode],
1382 info.object_id);
1383 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1384 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1385 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1386 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1387 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1388 /* check that new mapping is a copy */
1389 tmp_address = remap_address;
1390 tmp_size = 0;
1391 depth = 99;
1392 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1393 kr = mach_vm_region_recurse(mach_task_self(),
1394 &tmp_address,
1395 &tmp_size,
1396 &depth,
1397 (vm_region_recurse_info_t)&info,
1398 &count);
1399 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1400 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1401 tmp_address, tmp_address + tmp_size, depth,
1402 prot_str[info.protection],
1403 prot_str[info.max_protection],
1404 share_mode_str[info.share_mode],
1405 info.object_id);
1406 T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1407 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1408 T_QUIET; T_ASSERT_EQ(depth, 0, "new mapping is unnested");
1409 // T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1410 // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1411 /* cleanup */
1412 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1413 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1414 T_PASS("mem_entry(VM_SHARE) read-only");
1415 skip_mem_entry_vm_share_ro:
1416
1417 /* test mach_make_memory_entry_64() of RO */
1418 before = *(uint32_t *)(uintptr_t)address;
1419 remap_size = size;
1420 mem_entry = MACH_PORT_NULL;
1421 kr = mach_make_memory_entry_64(mach_task_self(),
1422 &remap_size,
1423 address,
1424 VM_PROT_READ | VM_PROT_WRITE,
1425 &mem_entry,
1426 MACH_PORT_NULL);
1427 T_QUIET; T_ASSERT_EQ(kr, KERN_PROTECTION_FAILURE, "mach_make_memory_entry_64()");
1428 /* check that region is still nested */
1429 tmp_address = address;
1430 tmp_size = 0;
1431 depth = 99;
1432 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1433 kr = mach_vm_region_recurse(mach_task_self(),
1434 &tmp_address,
1435 &tmp_size,
1436 &depth,
1437 (vm_region_recurse_info_t)&info,
1438 &count);
1439 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1440 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1441 tmp_address, tmp_address + tmp_size, depth,
1442 prot_str[info.protection],
1443 prot_str[info.max_protection],
1444 share_mode_str[info.share_mode],
1445 info.object_id);
1446 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1447 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1448 // T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1449 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1450 if (depth > 0) {
1451 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1452 }
1453 T_PASS("mem_entry() read-only");
1454
1455 /* test mach_make_memory_entry_64(READ | WRITE | VM_PROT_IS_MASK) of RO */
1456 before = *(uint32_t *)(uintptr_t)address;
1457 remap_size = size;
1458 mem_entry = MACH_PORT_NULL;
1459 kr = mach_make_memory_entry_64(mach_task_self(),
1460 &remap_size,
1461 address,
1462 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_IS_MASK,
1463 &mem_entry,
1464 MACH_PORT_NULL);
1465 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(READ | WRITE | IS_MASK)");
1466 remap_address = 0;
1467 kr = mach_vm_map(mach_task_self(),
1468 &remap_address,
1469 remap_size,
1470 0, /* mask */
1471 VM_FLAGS_ANYWHERE,
1472 mem_entry,
1473 0, /* offset */
1474 FALSE, /* copy */
1475 VM_PROT_READ | VM_PROT_WRITE,
1476 VM_PROT_READ | VM_PROT_WRITE,
1477 VM_INHERIT_DEFAULT);
1478 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_RIGHT, "vm_map(read/write)");
1479 remap_address = 0;
1480 kr = mach_vm_map(mach_task_self(),
1481 &remap_address,
1482 remap_size,
1483 0, /* mask */
1484 VM_FLAGS_ANYWHERE,
1485 mem_entry,
1486 0, /* offset */
1487 FALSE, /* copy */
1488 VM_PROT_READ,
1489 VM_PROT_READ,
1490 VM_INHERIT_DEFAULT);
1491 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map(read only)");
1492 remap = *(uint32_t *)(uintptr_t)remap_address;
1493 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1494 /* check that region is still nested */
1495 tmp_address = address;
1496 tmp_size = 0;
1497 depth = 99;
1498 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1499 kr = mach_vm_region_recurse(mach_task_self(),
1500 &tmp_address,
1501 &tmp_size,
1502 &depth,
1503 (vm_region_recurse_info_t)&info,
1504 &count);
1505 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1506 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1507 tmp_address, tmp_address + tmp_size, depth,
1508 prot_str[info.protection],
1509 prot_str[info.max_protection],
1510 share_mode_str[info.share_mode],
1511 info.object_id);
1512 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1513 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1514 // T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1515 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1516 if (depth > 0) {
1517 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1518 }
1519 /* check that new mapping is a copy */
1520 tmp_address = remap_address;
1521 tmp_size = 0;
1522 depth = 99;
1523 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1524 kr = mach_vm_region_recurse(mach_task_self(),
1525 &tmp_address,
1526 &tmp_size,
1527 &depth,
1528 (vm_region_recurse_info_t)&info,
1529 &count);
1530 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1531 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1532 tmp_address, tmp_address + tmp_size, depth,
1533 prot_str[info.protection],
1534 prot_str[info.max_protection],
1535 share_mode_str[info.share_mode],
1536 info.object_id);
1537 T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1538 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1539 T_QUIET; T_ASSERT_EQ(depth, 0, "new mapping is unnested");
1540 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1541 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1542 /* cleanup */
1543 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1544 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1545 T_PASS("mem_entry(READ | WRITE | IS_MASK) read-only");
1546
1547
1548 /*
1549 * Step 2 - check that one can not share write access with a writable
1550 * mapping in the shared region.
1551 */
1552 size = 0;
1553 for (address = SHARED_REGION_BASE;
1554 address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1555 address += size) {
1556 size = 0;
1557 depth = 99;
1558 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1559 kr = mach_vm_region_recurse(mach_task_self(),
1560 &address,
1561 &size,
1562 &depth,
1563 (vm_region_recurse_info_t)&info,
1564 &count);
1565 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1566 if (kr == KERN_INVALID_ADDRESS) {
1567 T_SKIP("could not find writable nested mapping");
1568 T_END;
1569 }
1570 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1571 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1572 address, address + size, depth,
1573 prot_str[info.protection],
1574 prot_str[info.max_protection],
1575 share_mode_str[info.share_mode],
1576 info.object_id);
1577 if (depth > 0 && (info.protection & VM_PROT_WRITE)) {
1578 /* nested and writable: bingo! */
1579 break;
1580 }
1581 }
1582 if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1583 T_SKIP("could not find writable nested mapping");
1584 T_END;
1585 }
1586
1587 /* test vm_remap() of RW */
1588 before = *(uint32_t *)(uintptr_t)address;
1589 remap_address = 0;
1590 remap_size = size;
1591 kr = mach_vm_remap(mach_task_self(),
1592 &remap_address,
1593 remap_size,
1594 0,
1595 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1596 mach_task_self(),
1597 address,
1598 FALSE,
1599 &cur_prot,
1600 &max_prot,
1601 VM_INHERIT_DEFAULT);
1602 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1603 if (!(cur_prot & VM_PROT_WRITE)) {
1604 T_LOG("vm_remap(): 0x%llx not writable %s/%s",
1605 remap_address, prot_str[cur_prot], prot_str[max_prot]);
1606 T_ASSERT_FAIL("vm_remap() remapping not writable");
1607 }
1608 remap = *(uint32_t *)(uintptr_t)remap_address;
1609 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1610 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1611 after = *(uint32_t *)(uintptr_t)address;
1612 T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1613 *(uint32_t *)(uintptr_t)remap_address = before;
1614 if (before != after) {
1615 T_FAIL("vm_remap() bypassed copy-on-write");
1616 } else {
1617 T_PASS("vm_remap() did not bypass copy-on-write");
1618 }
1619 /* check that region is still nested */
1620 tmp_address = address;
1621 tmp_size = 0;
1622 depth = 99;
1623 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1624 kr = mach_vm_region_recurse(mach_task_self(),
1625 &tmp_address,
1626 &tmp_size,
1627 &depth,
1628 (vm_region_recurse_info_t)&info,
1629 &count);
1630 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1631 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1632 tmp_address, tmp_address + tmp_size, depth,
1633 prot_str[info.protection],
1634 prot_str[info.max_protection],
1635 share_mode_str[info.share_mode],
1636 info.object_id);
1637 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1638 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1639 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1640 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1641 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1642 /* cleanup */
1643 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1644 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1645
1646 #if defined(VM_MEMORY_ROSETTA)
1647 if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1648 T_PASS("vm_remap_new() is not present");
1649 goto skip_vm_remap_new_rw;
1650 }
1651 /* test vm_remap_new() of RW */
1652 before = *(uint32_t *)(uintptr_t)address;
1653 remap_address = 0;
1654 remap_size = size;
1655 cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1656 max_prot = VM_PROT_READ | VM_PROT_WRITE;
1657 kr = mach_vm_remap_new(mach_task_self(),
1658 &remap_address,
1659 remap_size,
1660 0,
1661 VM_FLAGS_ANYWHERE,
1662 mach_task_self(),
1663 address,
1664 FALSE,
1665 &cur_prot,
1666 &max_prot,
1667 VM_INHERIT_DEFAULT);
1668 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1669 if (kr == KERN_PROTECTION_FAILURE) {
1670 /* wrong but not a security issue... */
1671 goto skip_vm_remap_new_rw;
1672 }
1673 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1674 if (!(cur_prot & VM_PROT_WRITE)) {
1675 T_LOG("vm_remap_new(): 0x%llx not writable %s/%s",
1676 remap_address, prot_str[cur_prot], prot_str[max_prot]);
1677 T_ASSERT_FAIL("vm_remap_new() remapping not writable");
1678 }
1679 remap = *(uint32_t *)(uintptr_t)remap_address;
1680 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1681 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1682 after = *(uint32_t *)(uintptr_t)address;
1683 T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1684 *(uint32_t *)(uintptr_t)remap_address = before;
1685 if (before != after) {
1686 T_FAIL("vm_remap_new() bypassed copy-on-write");
1687 } else {
1688 T_PASS("vm_remap_new() did not bypass copy-on-write");
1689 }
1690 /* check that region is still nested */
1691 tmp_address = address;
1692 tmp_size = 0;
1693 depth = 99;
1694 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1695 kr = mach_vm_region_recurse(mach_task_self(),
1696 &tmp_address,
1697 &tmp_size,
1698 &depth,
1699 (vm_region_recurse_info_t)&info,
1700 &count);
1701 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1702 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1703 tmp_address, tmp_address + tmp_size, depth,
1704 prot_str[info.protection],
1705 prot_str[info.max_protection],
1706 share_mode_str[info.share_mode],
1707 info.object_id);
1708 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1709 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1710 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1711 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1712 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1713 /* cleanup */
1714 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1715 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1716 skip_vm_remap_new_rw:
1717 #else /* defined(VM_MEMORY_ROSETTA) */
1718 /* pre-BigSur SDK: no vm_remap_new() */
1719 T_LOG("No vm_remap_new() to test");
1720 #endif /* defined(VM_MEMORY_ROSETTA) */
1721
1722 /* test mach_make_memory_entry_64(VM_SHARE) of RW */
1723 before = *(uint32_t *)(uintptr_t)address;
1724 remap_size = size;
1725 mem_entry = MACH_PORT_NULL;
1726 kr = mach_make_memory_entry_64(mach_task_self(),
1727 &remap_size,
1728 address,
1729 MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1730 &mem_entry,
1731 MACH_PORT_NULL);
1732 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1733 if (kr == KERN_PROTECTION_FAILURE) {
1734 /* wrong but not a security issue... */
1735 goto skip_mem_entry_vm_share_rw;
1736 }
1737 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1738 T_QUIET; T_ASSERT_EQ(remap_size, size, "mem_entry(VM_SHARE) should cover whole mapping");
1739 // T_LOG("AFTER MAKE_MEM_ENTRY(VM_SHARE) 0x%llx...", address); fflush(stdout); fflush(stderr); getchar();
1740 remap_address = 0;
1741 kr = mach_vm_map(mach_task_self(),
1742 &remap_address,
1743 remap_size,
1744 0, /* mask */
1745 VM_FLAGS_ANYWHERE,
1746 mem_entry,
1747 0, /* offset */
1748 FALSE, /* copy */
1749 VM_PROT_READ | VM_PROT_WRITE,
1750 VM_PROT_READ | VM_PROT_WRITE,
1751 VM_INHERIT_DEFAULT);
1752 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1753 remap = *(uint32_t *)(uintptr_t)remap_address;
1754 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1755 // T_LOG("AFTER VM_MAP 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1756 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1757 // T_LOG("AFTER WRITE 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1758 after = *(uint32_t *)(uintptr_t)address;
1759 T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1760 *(uint32_t *)(uintptr_t)remap_address = before;
1761 if (before != after) {
1762 T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1763 } else {
1764 T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1765 }
1766 /* check that region is still nested */
1767 tmp_address = address;
1768 tmp_size = 0;
1769 depth = 99;
1770 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1771 kr = mach_vm_region_recurse(mach_task_self(),
1772 &tmp_address,
1773 &tmp_size,
1774 &depth,
1775 (vm_region_recurse_info_t)&info,
1776 &count);
1777 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1778 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1779 tmp_address, tmp_address + tmp_size, depth,
1780 prot_str[info.protection],
1781 prot_str[info.max_protection],
1782 share_mode_str[info.share_mode],
1783 info.object_id);
1784 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1785 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1786 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1787 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1788 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1789 /* cleanup */
1790 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1791 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1792 mach_port_deallocate(mach_task_self(), mem_entry);
1793 skip_mem_entry_vm_share_rw:
1794
1795 /* test mach_make_memory_entry_64() of RW */
1796 before = *(uint32_t *)(uintptr_t)address;
1797 remap_size = size;
1798 mem_entry = MACH_PORT_NULL;
1799 kr = mach_make_memory_entry_64(mach_task_self(),
1800 &remap_size,
1801 address,
1802 VM_PROT_READ | VM_PROT_WRITE,
1803 &mem_entry,
1804 MACH_PORT_NULL);
1805 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64()");
1806 remap_address = 0;
1807 kr = mach_vm_map(mach_task_self(),
1808 &remap_address,
1809 remap_size,
1810 0, /* mask */
1811 VM_FLAGS_ANYWHERE,
1812 mem_entry,
1813 0, /* offset */
1814 FALSE, /* copy */
1815 VM_PROT_READ | VM_PROT_WRITE,
1816 VM_PROT_READ | VM_PROT_WRITE,
1817 VM_INHERIT_DEFAULT);
1818 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1819 remap = *(uint32_t *)(uintptr_t)remap_address;
1820 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1821 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1822 after = *(uint32_t *)(uintptr_t)address;
1823 T_LOG("mem_entry(): 0x%llx 0x%x -> 0x%x", address, before, after);
1824 *(uint32_t *)(uintptr_t)remap_address = before;
1825 /* check that region is no longer nested */
1826 tmp_address = address;
1827 tmp_size = 0;
1828 depth = 99;
1829 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1830 kr = mach_vm_region_recurse(mach_task_self(),
1831 &tmp_address,
1832 &tmp_size,
1833 &depth,
1834 (vm_region_recurse_info_t)&info,
1835 &count);
1836 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1837 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1838 tmp_address, tmp_address + tmp_size, depth,
1839 prot_str[info.protection],
1840 prot_str[info.max_protection],
1841 share_mode_str[info.share_mode],
1842 info.object_id);
1843 if (before != after) {
1844 if (depth == 0) {
1845 T_PASS("mem_entry() honored copy-on-write");
1846 } else {
1847 T_FAIL("mem_entry() did not trigger copy-on_write");
1848 }
1849 } else {
1850 T_FAIL("mem_entry() did not honor copy-on-write");
1851 }
1852 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1853 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1854 T_QUIET; T_ASSERT_EQ(depth, 0, "no longer nested");
1855 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1856 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1857 /* cleanup */
1858 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1859 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1860 mach_port_deallocate(mach_task_self(), mem_entry);
1861 }
1862
1863 T_DECL(copyoverwrite_submap_protection, "test copywrite vm region submap \
1864 protection", T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1865 {
1866 kern_return_t kr;
1867 mach_vm_address_t vmaddr;
1868 mach_vm_size_t vmsize;
1869 natural_t depth;
1870 vm_region_submap_short_info_data_64_t region_info;
1871 mach_msg_type_number_t region_info_count;
1872
1873 for (vmaddr = SHARED_REGION_BASE;
1874 vmaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1875 vmaddr += vmsize) {
1876 depth = 99;
1877 region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1878 kr = mach_vm_region_recurse(mach_task_self(),
1879 &vmaddr,
1880 &vmsize,
1881 &depth,
1882 (vm_region_info_t) ®ion_info,
1883 ®ion_info_count);
1884 if (kr == KERN_INVALID_ADDRESS) {
1885 break;
1886 }
1887 T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse(0x%llx)", vmaddr);
1888 T_ASSERT_EQ(region_info_count,
1889 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
1890 "vm_region_recurse(0x%llx) count = %d expected %d",
1891 vmaddr, region_info_count,
1892 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
1893
1894 T_LOG("--> region: vmaddr 0x%llx depth %d prot 0x%x/0x%x",
1895 vmaddr, depth, region_info.protection,
1896 region_info.max_protection);
1897 if (depth == 0) {
1898 /* not a submap mapping: next mapping */
1899 continue;
1900 }
1901 if (vmaddr >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1902 break;
1903 }
1904 kr = mach_vm_copy(mach_task_self(),
1905 vmaddr,
1906 vmsize,
1907 vmaddr);
1908 if (kr == KERN_PROTECTION_FAILURE ||
1909 kr == KERN_INVALID_ADDRESS) {
1910 T_PASS("vm_copy(0x%llx,0x%llx) expected prot error 0x%x (%s)",
1911 vmaddr, vmsize, kr, mach_error_string(kr));
1912 continue;
1913 }
1914 T_ASSERT_MACH_SUCCESS(kr, "vm_copy(0x%llx,0x%llx) prot 0x%x",
1915 vmaddr, vmsize, region_info.protection);
1916 depth = 0;
1917 region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1918 kr = mach_vm_region_recurse(mach_task_self(),
1919 &vmaddr,
1920 &vmsize,
1921 &depth,
1922 (vm_region_info_t) ®ion_info,
1923 ®ion_info_count);
1924 T_ASSERT_MACH_SUCCESS(kr, "m_region_recurse(0x%llx)", vmaddr);
1925 T_ASSERT_EQ(region_info_count,
1926 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
1927 "vm_region_recurse() count = %d expected %d",
1928 region_info_count, VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
1929
1930 T_ASSERT_EQ(depth, 0, "vm_region_recurse(0x%llx): depth = %d expected 0",
1931 vmaddr, depth);
1932 T_ASSERT_EQ((region_info.protection & VM_PROT_EXECUTE),
1933 0, "vm_region_recurse(0x%llx): prot 0x%x",
1934 vmaddr, region_info.protection);
1935 }
1936 }
1937
1938 T_DECL(wire_text, "test wired text for rdar://problem/16783546 Wiring code in \
1939 the shared region triggers code-signing violations",
1940 T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1941 {
1942 uint32_t *addr, before, after;
1943 int retval;
1944 int saved_errno;
1945 kern_return_t kr;
1946 vm_address_t map_addr, remap_addr;
1947 vm_prot_t curprot, maxprot;
1948
1949 addr = (uint32_t *)&printf;
1950 #if __has_feature(ptrauth_calls)
1951 map_addr = (vm_address_t)(uintptr_t)ptrauth_strip(addr, ptrauth_key_function_pointer);
1952 #else /* __has_feature(ptrauth_calls) */
1953 map_addr = (vm_address_t)(uintptr_t)addr;
1954 #endif /* __has_feature(ptrauth_calls) */
1955 remap_addr = 0;
1956 kr = vm_remap(mach_task_self(), &remap_addr, 4096,
1957 0, /* mask */
1958 VM_FLAGS_ANYWHERE,
1959 mach_task_self(), map_addr,
1960 FALSE, /* copy */
1961 &curprot, &maxprot,
1962 VM_INHERIT_DEFAULT);
1963 T_ASSERT_EQ(kr, KERN_SUCCESS, "vm_remap error 0x%x (%s)",
1964 kr, mach_error_string(kr));
1965 before = *addr;
1966 retval = mlock(addr, 4096);
1967 after = *addr;
1968 if (retval != 0) {
1969 saved_errno = errno;
1970 T_ASSERT_EQ(saved_errno, EPERM, "wire shared text error %d (%s), expected: %d",
1971 saved_errno, strerror(saved_errno), EPERM);
1972 } else if (after != before) {
1973 T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", addr, before, after);
1974 } else {
1975 T_PASS("wire shared text");
1976 }
1977
1978 addr = (uint32_t *) &fprintf;
1979 before = *addr;
1980 retval = mlock(addr, 4096);
1981 after = *addr;
1982 if (retval != 0) {
1983 saved_errno = errno;
1984 T_ASSERT_EQ(saved_errno, EPERM, "wire shared text error %d (%s), expected: %d",
1985 saved_errno, strerror(saved_errno), EPERM);
1986 } else if (after != before) {
1987 T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", addr, before, after);
1988 } else {
1989 T_PASS("wire shared text");
1990 }
1991
1992 addr = (uint32_t *) &testmain_wire_text;
1993 before = *addr;
1994 retval = mlock(addr, 4096);
1995 after = *addr;
1996 if (retval != 0) {
1997 saved_errno = errno;
1998 T_ASSERT_EQ(saved_errno, EPERM, "wire text error return error %d (%s)",
1999 saved_errno, strerror(saved_errno));
2000 } else if (after != before) {
2001 T_ASSERT_FAIL("text changed by wiring at %p 0x%x -> 0x%x", addr, before, after);
2002 } else {
2003 T_PASS("wire text");
2004 }
2005 }
2006
2007 T_DECL(remap_comm_page, "test remapping of the commpage - rdar://93177124",
2008 T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
2009 {
2010 kern_return_t kr;
2011 mach_vm_address_t commpage_addr, remap_addr;
2012 mach_vm_size_t vmsize;
2013 vm_prot_t curprot, maxprot;
2014
2015 #if __arm__
2016 commpage_addr = 0xFFFF4000ULL;
2017 #elif __arm64__
2018 commpage_addr = 0x0000000FFFFFC000ULL;
2019 #elif __x86_64__
2020 commpage_addr = 0x00007FFFFFE00000ULL;
2021 #else
2022 T_FAIL("unknown commpage address for this architecture");
2023 #endif
2024
2025 T_LOG("Remapping commpage from 0x%llx", commpage_addr);
2026 vmsize = vm_kernel_page_size;
2027 remap_addr = 0;
2028 kr = mach_vm_remap(mach_task_self(),
2029 &remap_addr,
2030 vmsize,
2031 0, /* mask */
2032 VM_FLAGS_ANYWHERE,
2033 mach_task_self(),
2034 commpage_addr,
2035 TRUE, /* copy */
2036 &curprot,
2037 &maxprot,
2038 VM_INHERIT_DEFAULT);
2039 if (kr == KERN_INVALID_ADDRESS) {
2040 T_SKIP("No mapping found at 0x%llx\n", commpage_addr);
2041 return;
2042 }
2043 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() of commpage from 0x%llx", commpage_addr);
2044 }
2045