1 /* Mach vm map miscellaneous unit tests
2 *
3 * This test program serves to be a regression test suite for legacy
4 * vm issues, ideally each test will be linked to a radar number and
5 * perform a set of certain validations.
6 *
7 */
8 #include <darwintest.h>
9 #include <darwintest_utils.h>
10
11 #include <dlfcn.h>
12 #include <fcntl.h>
13 #include <errno.h>
14 #include <ptrauth.h>
15 #include <signal.h>
16 #include <stdio.h>
17 #include <stdlib.h>
18 #include <string.h>
19 #include <time.h>
20
21 #include <sys/mman.h>
22 #include <sys/proc.h>
23
24 #include <mach/mach_error.h>
25 #include <mach/mach_init.h>
26 #include <mach/mach_port.h>
27 #include <mach/mach_vm.h>
28 #include <mach/vm_map.h>
29 #include <mach/vm_param.h>
30 #include <mach/task.h>
31 #include <mach/task_info.h>
32 #include <mach/shared_region.h>
33 #include <machine/cpu_capabilities.h>
34
35 #include <sys/mman.h>
36 #include <sys/syslimits.h>
37
38 T_GLOBAL_META(
39 T_META_NAMESPACE("xnu.vm"),
40 T_META_RADAR_COMPONENT_NAME("xnu"),
41 T_META_RADAR_COMPONENT_VERSION("VM"),
42 T_META_RUN_CONCURRENTLY(true));
43
44 static void
test_memory_entry_tagging(int override_tag)45 test_memory_entry_tagging(int override_tag)
46 {
47 int pass;
48 int do_copy;
49 kern_return_t kr;
50 mach_vm_address_t vmaddr_orig, vmaddr_shared, vmaddr_copied;
51 mach_vm_size_t vmsize_orig, vmsize_shared, vmsize_copied;
52 mach_vm_address_t *vmaddr_ptr;
53 mach_vm_size_t *vmsize_ptr;
54 mach_vm_address_t vmaddr_chunk;
55 mach_vm_size_t vmsize_chunk;
56 mach_vm_offset_t vmoff;
57 mach_port_t mem_entry_copied, mem_entry_shared;
58 mach_port_t *mem_entry_ptr;
59 unsigned int i;
60 vm_region_submap_short_info_data_64_t ri;
61 mach_msg_type_number_t ri_count;
62 unsigned int depth;
63 int vm_flags;
64 unsigned int expected_tag;
65
66 vmaddr_copied = 0;
67 vmaddr_shared = 0;
68 vmsize_copied = 0;
69 vmsize_shared = 0;
70 vmaddr_chunk = 0;
71 vmsize_chunk = 16 * 1024;
72 vmaddr_orig = 0;
73 vmsize_orig = 3 * vmsize_chunk;
74 mem_entry_copied = MACH_PORT_NULL;
75 mem_entry_shared = MACH_PORT_NULL;
76 pass = 0;
77
78 vmaddr_orig = 0;
79 kr = mach_vm_allocate(mach_task_self(),
80 &vmaddr_orig,
81 vmsize_orig,
82 VM_FLAGS_ANYWHERE);
83 T_QUIET;
84 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
85 override_tag, vmsize_orig);
86 if (T_RESULT == T_RESULT_FAIL) {
87 goto done;
88 }
89
90 for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
91 vmaddr_chunk = vmaddr_orig + ((mach_vm_size_t)i * vmsize_chunk);
92 kr = mach_vm_allocate(mach_task_self(),
93 &vmaddr_chunk,
94 vmsize_chunk,
95 (VM_FLAGS_FIXED |
96 VM_FLAGS_OVERWRITE |
97 VM_MAKE_TAG(100 + (int)i)));
98 T_QUIET;
99 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
100 override_tag, vmsize_chunk);
101 if (T_RESULT == T_RESULT_FAIL) {
102 goto done;
103 }
104 }
105
106 for (vmoff = 0;
107 vmoff < vmsize_orig;
108 vmoff += PAGE_SIZE) {
109 *((unsigned char *)(uintptr_t)(vmaddr_orig + vmoff)) = 'x';
110 }
111
112 do_copy = time(NULL) & 1;
113 again:
114 *((unsigned char *)(uintptr_t)vmaddr_orig) = 'x';
115 if (do_copy) {
116 mem_entry_ptr = &mem_entry_copied;
117 vmsize_copied = vmsize_orig;
118 vmsize_ptr = &vmsize_copied;
119 vmaddr_copied = 0;
120 vmaddr_ptr = &vmaddr_copied;
121 vm_flags = MAP_MEM_VM_COPY;
122 } else {
123 mem_entry_ptr = &mem_entry_shared;
124 vmsize_shared = vmsize_orig;
125 vmsize_ptr = &vmsize_shared;
126 vmaddr_shared = 0;
127 vmaddr_ptr = &vmaddr_shared;
128 vm_flags = MAP_MEM_VM_SHARE;
129 }
130 kr = mach_make_memory_entry_64(mach_task_self(),
131 vmsize_ptr,
132 vmaddr_orig, /* offset */
133 (vm_flags |
134 VM_PROT_READ | VM_PROT_WRITE),
135 mem_entry_ptr,
136 MACH_PORT_NULL);
137 T_QUIET;
138 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_make_memory_entry()",
139 override_tag, do_copy);
140 if (T_RESULT == T_RESULT_FAIL) {
141 goto done;
142 }
143 T_QUIET;
144 T_EXPECT_EQ(*vmsize_ptr, vmsize_orig, "[override_tag:%d][do_copy:%d] vmsize (0x%llx) != vmsize_orig (0x%llx)",
145 override_tag, do_copy, (uint64_t) *vmsize_ptr, (uint64_t) vmsize_orig);
146 if (T_RESULT == T_RESULT_FAIL) {
147 goto done;
148 }
149 T_QUIET;
150 T_EXPECT_NOTNULL(*mem_entry_ptr, "[override_tag:%d][do_copy:%d] mem_entry == 0x%x",
151 override_tag, do_copy, *mem_entry_ptr);
152 if (T_RESULT == T_RESULT_FAIL) {
153 goto done;
154 }
155
156 *vmaddr_ptr = 0;
157 if (override_tag) {
158 vm_flags = VM_MAKE_TAG(200);
159 } else {
160 vm_flags = 0;
161 }
162 kr = mach_vm_map(mach_task_self(),
163 vmaddr_ptr,
164 vmsize_orig,
165 0, /* mask */
166 vm_flags | VM_FLAGS_ANYWHERE,
167 *mem_entry_ptr,
168 0, /* offset */
169 FALSE, /* copy */
170 VM_PROT_READ | VM_PROT_WRITE,
171 VM_PROT_READ | VM_PROT_WRITE,
172 VM_INHERIT_DEFAULT);
173 T_QUIET;
174 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_map()",
175 override_tag, do_copy);
176 if (T_RESULT == T_RESULT_FAIL) {
177 goto done;
178 }
179
180 *((unsigned char *)(uintptr_t)vmaddr_orig) = 'X';
181 if (*(unsigned char *)(uintptr_t)*vmaddr_ptr == 'X') {
182 T_QUIET;
183 T_EXPECT_EQ(do_copy, 0, "[override_tag:%d][do_copy:%d] memory shared instead of copied",
184 override_tag, do_copy);
185 if (T_RESULT == T_RESULT_FAIL) {
186 goto done;
187 }
188 } else {
189 T_QUIET;
190 T_EXPECT_NE(do_copy, 0, "[override_tag:%d][do_copy:%d] memory copied instead of shared",
191 override_tag, do_copy);
192 if (T_RESULT == T_RESULT_FAIL) {
193 goto done;
194 }
195 }
196
197 for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
198 mach_vm_address_t vmaddr_info;
199 mach_vm_size_t vmsize_info;
200
201 vmaddr_info = *vmaddr_ptr + ((mach_vm_size_t)i * vmsize_chunk);
202 vmsize_info = 0;
203 depth = 1;
204 ri_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
205 kr = mach_vm_region_recurse(mach_task_self(),
206 &vmaddr_info,
207 &vmsize_info,
208 &depth,
209 (vm_region_recurse_info_t) &ri,
210 &ri_count);
211 T_QUIET;
212 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx)",
213 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk);
214 if (T_RESULT == T_RESULT_FAIL) {
215 goto done;
216 }
217 T_QUIET;
218 T_EXPECT_EQ(vmaddr_info, *vmaddr_ptr + (i * vmsize_chunk), "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned addr 0x%llx",
219 override_tag, do_copy, *vmaddr_ptr, (mach_vm_size_t)i * vmsize_chunk, vmaddr_info);
220 if (T_RESULT == T_RESULT_FAIL) {
221 goto done;
222 }
223 T_QUIET;
224 T_EXPECT_EQ(vmsize_info, vmsize_chunk, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned size 0x%llx expected 0x%llx",
225 override_tag, do_copy, *vmaddr_ptr, (mach_vm_size_t)i * vmsize_chunk, vmsize_info, vmsize_chunk);
226 if (T_RESULT == T_RESULT_FAIL) {
227 goto done;
228 }
229 if (override_tag) {
230 expected_tag = 200;
231 } else {
232 expected_tag = 100 + i;
233 }
234 T_QUIET;
235 T_EXPECT_EQ(ri.user_tag, expected_tag, "[override_tag:%d][do_copy:%d] i=%u tag=%u expected %u",
236 override_tag, do_copy, i, ri.user_tag, expected_tag);
237 if (T_RESULT == T_RESULT_FAIL) {
238 goto done;
239 }
240 }
241
242 if (++pass < 2) {
243 do_copy = !do_copy;
244 goto again;
245 }
246
247 done:
248 if (vmaddr_orig != 0) {
249 mach_vm_deallocate(mach_task_self(),
250 vmaddr_orig,
251 vmsize_orig);
252 vmaddr_orig = 0;
253 vmsize_orig = 0;
254 }
255 if (vmaddr_copied != 0) {
256 mach_vm_deallocate(mach_task_self(),
257 vmaddr_copied,
258 vmsize_copied);
259 vmaddr_copied = 0;
260 vmsize_copied = 0;
261 }
262 if (vmaddr_shared != 0) {
263 mach_vm_deallocate(mach_task_self(),
264 vmaddr_shared,
265 vmsize_shared);
266 vmaddr_shared = 0;
267 vmsize_shared = 0;
268 }
269 if (mem_entry_copied != MACH_PORT_NULL) {
270 mach_port_deallocate(mach_task_self(), mem_entry_copied);
271 mem_entry_copied = MACH_PORT_NULL;
272 }
273 if (mem_entry_shared != MACH_PORT_NULL) {
274 mach_port_deallocate(mach_task_self(), mem_entry_shared);
275 mem_entry_shared = MACH_PORT_NULL;
276 }
277
278 return;
279 }
280
281 static void
test_map_memory_entry(void)282 test_map_memory_entry(void)
283 {
284 kern_return_t kr;
285 mach_vm_address_t vmaddr1, vmaddr2;
286 mach_vm_size_t vmsize1, vmsize2;
287 mach_port_t mem_entry;
288 unsigned char *cp1, *cp2;
289
290 vmaddr1 = 0;
291 vmsize1 = 0;
292 vmaddr2 = 0;
293 vmsize2 = 0;
294 mem_entry = MACH_PORT_NULL;
295
296 vmsize1 = 1;
297 vmaddr1 = 0;
298 kr = mach_vm_allocate(mach_task_self(),
299 &vmaddr1,
300 vmsize1,
301 VM_FLAGS_ANYWHERE);
302 T_QUIET;
303 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate(%lld)", vmsize1);
304 if (T_RESULT == T_RESULT_FAIL) {
305 goto done;
306 }
307
308 cp1 = (unsigned char *)(uintptr_t)vmaddr1;
309 *cp1 = '1';
310
311 vmsize2 = 1;
312 mem_entry = MACH_PORT_NULL;
313 kr = mach_make_memory_entry_64(mach_task_self(),
314 &vmsize2,
315 vmaddr1, /* offset */
316 (MAP_MEM_VM_COPY |
317 VM_PROT_READ | VM_PROT_WRITE),
318 &mem_entry,
319 MACH_PORT_NULL);
320 T_QUIET;
321 T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry()");
322 if (T_RESULT == T_RESULT_FAIL) {
323 goto done;
324 }
325 T_QUIET;
326 T_EXPECT_GE(vmsize2, vmsize1, "vmsize2 (0x%llx) < vmsize1 (0x%llx)",
327 (uint64_t) vmsize2, (uint64_t) vmsize1);
328 if (T_RESULT == T_RESULT_FAIL) {
329 goto done;
330 }
331 T_QUIET;
332 T_EXPECT_NOTNULL(mem_entry, "mem_entry == 0x%x", mem_entry);
333 if (T_RESULT == T_RESULT_FAIL) {
334 goto done;
335 }
336
337 vmaddr2 = 0;
338 kr = mach_vm_map(mach_task_self(),
339 &vmaddr2,
340 vmsize2,
341 0, /* mask */
342 VM_FLAGS_ANYWHERE,
343 mem_entry,
344 0, /* offset */
345 TRUE, /* copy */
346 VM_PROT_READ | VM_PROT_WRITE,
347 VM_PROT_READ | VM_PROT_WRITE,
348 VM_INHERIT_DEFAULT);
349 T_QUIET;
350 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_map()");
351 if (T_RESULT == T_RESULT_FAIL) {
352 goto done;
353 }
354
355 cp2 = (unsigned char *)(uintptr_t)vmaddr2;
356 T_QUIET;
357 T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '1')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
358 *cp1, *cp2, '1', '1');
359 if (T_RESULT == T_RESULT_FAIL) {
360 goto done;
361 }
362
363 *cp2 = '2';
364 T_QUIET;
365 T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '2')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
366 *cp1, *cp2, '1', '2');
367 if (T_RESULT == T_RESULT_FAIL) {
368 goto done;
369 }
370
371 done:
372 if (vmaddr1 != 0) {
373 mach_vm_deallocate(mach_task_self(), vmaddr1, vmsize1);
374 vmaddr1 = 0;
375 vmsize1 = 0;
376 }
377 if (vmaddr2 != 0) {
378 mach_vm_deallocate(mach_task_self(), vmaddr2, vmsize2);
379 vmaddr2 = 0;
380 vmsize2 = 0;
381 }
382 if (mem_entry != MACH_PORT_NULL) {
383 mach_port_deallocate(mach_task_self(), mem_entry);
384 mem_entry = MACH_PORT_NULL;
385 }
386
387 return;
388 }
389
390 T_DECL(memory_entry_tagging, "test mem entry tag for rdar://problem/23334087 \
391 VM memory tags should be propagated through memory entries",
392 T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
393 {
394 test_memory_entry_tagging(0);
395 test_memory_entry_tagging(1);
396 }
397
398 T_DECL(map_memory_entry, "test mapping mem entry for rdar://problem/22611816 \
399 mach_make_memory_entry(MAP_MEM_VM_COPY) should never use a KERNEL_BUFFER \
400 copy", T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
401 {
402 test_map_memory_entry();
403 }
404
405 static char *vm_purgable_state[4] = { "NONVOLATILE", "VOLATILE", "EMPTY", "DENY" };
406
407 static uint64_t
task_footprint(void)408 task_footprint(void)
409 {
410 task_vm_info_data_t ti;
411 kern_return_t kr;
412 mach_msg_type_number_t count;
413
414 count = TASK_VM_INFO_COUNT;
415 kr = task_info(mach_task_self(),
416 TASK_VM_INFO,
417 (task_info_t) &ti,
418 &count);
419 T_QUIET;
420 T_ASSERT_MACH_SUCCESS(kr, "task_info()");
421 #if defined(__arm64__)
422 T_QUIET;
423 T_ASSERT_EQ(count, TASK_VM_INFO_COUNT, "task_info() count = %d (expected %d)",
424 count, TASK_VM_INFO_COUNT);
425 #endif /* defined(__arm64__) */
426 return ti.phys_footprint;
427 }
428
429 T_DECL(purgeable_empty_to_volatile, "test task physical footprint when \
430 emptying, volatilizing purgeable vm", T_META_TAG_VM_PREFERRED)
431 {
432 kern_return_t kr;
433 mach_vm_address_t vm_addr;
434 mach_vm_size_t vm_size;
435 char *cp;
436 int ret;
437 vm_purgable_t state;
438 uint64_t footprint[8];
439
440 vm_addr = 0;
441 vm_size = 1 * 1024 * 1024;
442 T_LOG("--> allocate %llu bytes", vm_size);
443 kr = mach_vm_allocate(mach_task_self(),
444 &vm_addr,
445 vm_size,
446 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
447 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
448
449 /* footprint0 */
450 footprint[0] = task_footprint();
451 T_LOG(" footprint[0] = %llu", footprint[0]);
452
453 T_LOG("--> access %llu bytes", vm_size);
454 for (cp = (char *) vm_addr;
455 cp < (char *) (vm_addr + vm_size);
456 cp += vm_kernel_page_size) {
457 *cp = 'x';
458 }
459 /* footprint1 == footprint0 + vm_size */
460 footprint[1] = task_footprint();
461 T_LOG(" footprint[1] = %llu", footprint[1]);
462 if (footprint[1] != footprint[0] + vm_size) {
463 T_LOG("WARN: footprint[1] != footprint[0] + vm_size");
464 }
465
466 T_LOG("--> wire %llu bytes", vm_size / 2);
467 ret = mlock((char *)vm_addr, (size_t) (vm_size / 2));
468 T_ASSERT_POSIX_SUCCESS(ret, "mlock()");
469
470 /* footprint2 == footprint1 */
471 footprint[2] = task_footprint();
472 T_LOG(" footprint[2] = %llu", footprint[2]);
473 if (footprint[2] != footprint[1]) {
474 T_LOG("WARN: footprint[2] != footprint[1]");
475 }
476
477 T_LOG("--> VOLATILE");
478 state = VM_PURGABLE_VOLATILE;
479 kr = mach_vm_purgable_control(mach_task_self(),
480 vm_addr,
481 VM_PURGABLE_SET_STATE,
482 &state);
483 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
484 T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, "NONVOLATILE->VOLATILE: state was %s",
485 vm_purgable_state[state]);
486 /* footprint3 == footprint2 - (vm_size / 2) */
487 footprint[3] = task_footprint();
488 T_LOG(" footprint[3] = %llu", footprint[3]);
489 if (footprint[3] != footprint[2] - (vm_size / 2)) {
490 T_LOG("WARN: footprint[3] != footprint[2] - (vm_size / 2)");
491 }
492
493 T_LOG("--> EMPTY");
494 state = VM_PURGABLE_EMPTY;
495 kr = mach_vm_purgable_control(mach_task_self(),
496 vm_addr,
497 VM_PURGABLE_SET_STATE,
498 &state);
499 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(EMPTY)");
500 if (state != VM_PURGABLE_VOLATILE &&
501 state != VM_PURGABLE_EMPTY) {
502 T_ASSERT_FAIL("VOLATILE->EMPTY: state was %s",
503 vm_purgable_state[state]);
504 }
505 /* footprint4 == footprint3 */
506 footprint[4] = task_footprint();
507 T_LOG(" footprint[4] = %llu", footprint[4]);
508 if (footprint[4] != footprint[3]) {
509 T_LOG("WARN: footprint[4] != footprint[3]");
510 }
511
512 T_LOG("--> unwire %llu bytes", vm_size / 2);
513 ret = munlock((char *)vm_addr, (size_t) (vm_size / 2));
514 T_ASSERT_POSIX_SUCCESS(ret, "munlock()");
515
516 /* footprint5 == footprint4 - (vm_size/2) (unless memory pressure) */
517 /* footprint5 == footprint0 */
518 footprint[5] = task_footprint();
519 T_LOG(" footprint[5] = %llu", footprint[5]);
520 if (footprint[5] != footprint[4] - (vm_size / 2)) {
521 T_LOG("WARN: footprint[5] != footprint[4] - (vm_size/2)");
522 }
523 if (footprint[5] != footprint[0]) {
524 T_LOG("WARN: footprint[5] != footprint[0]");
525 }
526
527 T_LOG("--> VOLATILE");
528 state = VM_PURGABLE_VOLATILE;
529 kr = mach_vm_purgable_control(mach_task_self(),
530 vm_addr,
531 VM_PURGABLE_SET_STATE,
532 &state);
533 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
534 T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->VOLATILE: state == %s",
535 vm_purgable_state[state]);
536 /* footprint6 == footprint5 */
537 /* footprint6 == footprint0 */
538 footprint[6] = task_footprint();
539 T_LOG(" footprint[6] = %llu", footprint[6]);
540 if (footprint[6] != footprint[5]) {
541 T_LOG("WARN: footprint[6] != footprint[5]");
542 }
543 if (footprint[6] != footprint[0]) {
544 T_LOG("WARN: footprint[6] != footprint[0]");
545 }
546
547 T_LOG("--> NONVOLATILE");
548 state = VM_PURGABLE_NONVOLATILE;
549 kr = mach_vm_purgable_control(mach_task_self(),
550 vm_addr,
551 VM_PURGABLE_SET_STATE,
552 &state);
553 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(NONVOLATILE)");
554 T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->NONVOLATILE: state == %s",
555 vm_purgable_state[state]);
556 /* footprint7 == footprint6 */
557 /* footprint7 == footprint0 */
558 footprint[7] = task_footprint();
559 T_LOG(" footprint[7] = %llu", footprint[7]);
560 if (footprint[7] != footprint[6]) {
561 T_LOG("WARN: footprint[7] != footprint[6]");
562 }
563 if (footprint[7] != footprint[0]) {
564 T_LOG("WARN: footprint[7] != footprint[0]");
565 }
566 }
567
568 static kern_return_t
get_reusable_size(uint64_t * reusable)569 get_reusable_size(uint64_t *reusable)
570 {
571 task_vm_info_data_t ti;
572 mach_msg_type_number_t ti_count = TASK_VM_INFO_COUNT;
573 kern_return_t kr;
574
575 kr = task_info(mach_task_self(),
576 TASK_VM_INFO,
577 (task_info_t) &ti,
578 &ti_count);
579 T_QUIET;
580 T_EXPECT_MACH_SUCCESS(kr, "task_info()");
581 T_QUIET;
582 *reusable = ti.reusable;
583 return kr;
584 }
585
586 T_DECL(madvise_shared, "test madvise shared for rdar://problem/2295713 logging \
587 rethink needs madvise(MADV_FREE_HARDER)",
588 T_META_RUN_CONCURRENTLY(false),
589 T_META_ALL_VALID_ARCHS(true),
590 T_META_TAG_VM_PREFERRED)
591 {
592 vm_address_t vmaddr = 0, vmaddr2 = 0;
593 vm_size_t vmsize, vmsize1, vmsize2;
594 kern_return_t kr;
595 char *cp;
596 vm_prot_t curprot, maxprot;
597 int ret;
598 int vmflags;
599 uint64_t footprint_before, footprint_after;
600 uint64_t reusable_before, reusable_after, reusable_expected;
601
602
603 vmsize1 = 64 * 1024; /* 64KB to madvise() */
604 vmsize2 = 32 * 1024; /* 32KB to mlock() */
605 vmsize = vmsize1 + vmsize2;
606 vmflags = VM_FLAGS_ANYWHERE;
607 VM_SET_FLAGS_ALIAS(vmflags, VM_MEMORY_MALLOC);
608
609 kr = get_reusable_size(&reusable_before);
610 if (kr) {
611 goto done;
612 }
613
614 kr = vm_allocate(mach_task_self(),
615 &vmaddr,
616 vmsize,
617 vmflags);
618 T_QUIET;
619 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
620 if (T_RESULT == T_RESULT_FAIL) {
621 goto done;
622 }
623
624 for (cp = (char *)(uintptr_t)vmaddr;
625 cp < (char *)(uintptr_t)(vmaddr + vmsize);
626 cp++) {
627 *cp = 'x';
628 }
629
630 kr = vm_remap(mach_task_self(),
631 &vmaddr2,
632 vmsize,
633 0, /* mask */
634 VM_FLAGS_ANYWHERE,
635 mach_task_self(),
636 vmaddr,
637 FALSE, /* copy */
638 &curprot,
639 &maxprot,
640 VM_INHERIT_DEFAULT);
641 T_QUIET;
642 T_EXPECT_MACH_SUCCESS(kr, "vm_remap()");
643 if (T_RESULT == T_RESULT_FAIL) {
644 goto done;
645 }
646
647 for (cp = (char *)(uintptr_t)vmaddr2;
648 cp < (char *)(uintptr_t)(vmaddr2 + vmsize);
649 cp++) {
650 T_QUIET;
651 T_EXPECT_EQ(*cp, 'x', "vmaddr=%p vmaddr2=%p %p:0x%x",
652 (void *)(uintptr_t)vmaddr,
653 (void *)(uintptr_t)vmaddr2,
654 (void *)cp,
655 (unsigned char)*cp);
656 if (T_RESULT == T_RESULT_FAIL) {
657 goto done;
658 }
659 }
660 cp = (char *)(uintptr_t)vmaddr;
661 *cp = 'X';
662 cp = (char *)(uintptr_t)vmaddr2;
663 T_QUIET;
664 T_EXPECT_EQ(*cp, 'X', "memory was not properly shared");
665 if (T_RESULT == T_RESULT_FAIL) {
666 goto done;
667 }
668
669 #if defined(__x86_64__) || defined(__i386__)
670 if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
671 T_LOG("Skipping madvise reusable tests because we're running under translation.");
672 goto done;
673 }
674 #endif /* defined(__x86_64__) || defined(__i386__) */
675
676 ret = mlock((char *)(uintptr_t)(vmaddr2 + vmsize1),
677 vmsize2);
678 T_QUIET; T_EXPECT_POSIX_SUCCESS(ret, "mlock()");
679
680 footprint_before = task_footprint();
681
682 ret = madvise((char *)(uintptr_t)vmaddr,
683 vmsize1,
684 MADV_FREE_REUSABLE);
685 T_QUIET;
686 T_EXPECT_POSIX_SUCCESS(ret, "madvise()");
687 if (T_RESULT == T_RESULT_FAIL) {
688 goto done;
689 }
690
691 footprint_after = task_footprint();
692 T_ASSERT_EQ(footprint_after, footprint_before - 2 * vmsize1, NULL);
693
694 kr = get_reusable_size(&reusable_after);
695 if (kr) {
696 goto done;
697 }
698 reusable_expected = 2ULL * vmsize1 + reusable_before;
699 T_EXPECT_EQ(reusable_after, reusable_expected, "actual=%lld expected %lld",
700 reusable_after, reusable_expected);
701 if (T_RESULT == T_RESULT_FAIL) {
702 goto done;
703 }
704
705 done:
706 if (vmaddr != 0) {
707 vm_deallocate(mach_task_self(), vmaddr, vmsize);
708 vmaddr = 0;
709 }
710 if (vmaddr2 != 0) {
711 vm_deallocate(mach_task_self(), vmaddr2, vmsize);
712 vmaddr2 = 0;
713 }
714 }
715
716 T_DECL(madvise_purgeable_can_reuse, "test madvise purgeable can reuse for \
717 rdar://problem/37476183 Preview Footprint memory regressions ~100MB \
718 [ purgeable_malloc became eligible for reuse ]",
719 T_META_ALL_VALID_ARCHS(true),
720 T_META_TAG_VM_PREFERRED)
721 {
722 #if defined(__x86_64__) || defined(__i386__)
723 if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
724 T_SKIP("madvise reusable is not supported under Rosetta translation. Skipping.)");
725 }
726 #endif /* defined(__x86_64__) || defined(__i386__) */
727 vm_address_t vmaddr = 0;
728 vm_size_t vmsize;
729 kern_return_t kr;
730 char *cp;
731 int ret;
732
733 vmsize = 10 * 1024 * 1024; /* 10MB */
734 kr = vm_allocate(mach_task_self(),
735 &vmaddr,
736 vmsize,
737 (VM_FLAGS_ANYWHERE |
738 VM_FLAGS_PURGABLE |
739 VM_MAKE_TAG(VM_MEMORY_MALLOC)));
740 T_QUIET;
741 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
742 if (T_RESULT == T_RESULT_FAIL) {
743 goto done;
744 }
745
746 for (cp = (char *)(uintptr_t)vmaddr;
747 cp < (char *)(uintptr_t)(vmaddr + vmsize);
748 cp++) {
749 *cp = 'x';
750 }
751
752 ret = madvise((char *)(uintptr_t)vmaddr,
753 vmsize,
754 MADV_CAN_REUSE);
755 T_QUIET;
756 T_EXPECT_TRUE(((ret == -1) && (errno == EINVAL)), "madvise(): purgeable vm can't be adviced to reuse");
757 if (T_RESULT == T_RESULT_FAIL) {
758 goto done;
759 }
760
761 done:
762 if (vmaddr != 0) {
763 vm_deallocate(mach_task_self(), vmaddr, vmsize);
764 vmaddr = 0;
765 }
766 }
767
768 static bool
validate_memory_is_zero(vm_address_t start,vm_size_t vmsize,vm_address_t * non_zero_addr)769 validate_memory_is_zero(
770 vm_address_t start,
771 vm_size_t vmsize,
772 vm_address_t *non_zero_addr)
773 {
774 for (vm_size_t sz = 0; sz < vmsize; sz += sizeof(uint64_t)) {
775 vm_address_t addr = start + sz;
776
777 if (*(uint64_t *)(addr) != 0) {
778 *non_zero_addr = addr;
779 return false;
780 }
781 }
782 return true;
783 }
784
785 T_DECL(madvise_zero, "test madvise zero", T_META_TAG_VM_PREFERRED)
786 {
787 vm_address_t vmaddr = 0;
788 vm_size_t vmsize = PAGE_SIZE * 3;
789 vm_address_t non_zero_addr = 0;
790 kern_return_t kr;
791 int ret;
792 unsigned char vec;
793
794 kr = vm_allocate(mach_task_self(),
795 &vmaddr,
796 vmsize,
797 (VM_FLAGS_ANYWHERE |
798 VM_MAKE_TAG(VM_MEMORY_MALLOC)));
799 T_QUIET;
800 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
801 if (T_RESULT == T_RESULT_FAIL) {
802 goto done;
803 }
804
805 memset((void *)vmaddr, 'A', vmsize);
806 ret = madvise((void*)vmaddr, vmsize, MADV_FREE_REUSABLE);
807 T_QUIET;
808 T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_FREE_REUSABLE)");
809 if (T_RESULT == T_RESULT_FAIL) {
810 goto done;
811 }
812
813 memset((void *)vmaddr, 'B', PAGE_SIZE);
814 ret = madvise((void*)vmaddr, vmsize, MADV_ZERO);
815 T_QUIET;
816 T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO)");
817 if (T_RESULT == T_RESULT_FAIL) {
818 goto done;
819 }
820
821 T_QUIET;
822 T_EXPECT_EQ(validate_memory_is_zero(vmaddr, vmsize, &non_zero_addr), true,
823 "madvise(%p, %lu, MADV_ZERO) returned non zero mem at %p",
824 (void *)vmaddr, vmsize, (void *)non_zero_addr);
825 if (T_RESULT == T_RESULT_FAIL) {
826 goto done;
827 }
828
829 memset((void *)vmaddr, 'C', PAGE_SIZE);
830 ret = madvise((void*)vmaddr, vmsize, MADV_PAGEOUT);
831 T_QUIET;
832 T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_PAGEOUT)");
833 if (T_RESULT == T_RESULT_FAIL) {
834 goto done;
835 }
836
837 /* wait for the pages to be (asynchronously) compressed */
838 T_QUIET; T_LOG("waiting for first page to be paged out");
839 do {
840 ret = mincore((void*)vmaddr, 1, (char *)&vec);
841 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mincore(1st)");
842 } while (vec & MINCORE_INCORE);
843 T_QUIET; T_LOG("waiting for last page to be paged out");
844 do {
845 ret = mincore((void*)(vmaddr + vmsize - 1), 1, (char *)&vec);
846 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mincore(last)");
847 } while (vec & MINCORE_INCORE);
848
849 ret = madvise((void*)vmaddr, vmsize, MADV_ZERO);
850 T_QUIET;
851 T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO)");
852 if (T_RESULT == T_RESULT_FAIL) {
853 goto done;
854 }
855 T_QUIET;
856 T_EXPECT_EQ(validate_memory_is_zero(vmaddr, vmsize, &non_zero_addr), true,
857 "madvise(%p, %lu, MADV_ZERO) returned non zero mem at %p",
858 (void *)vmaddr, vmsize, (void *)non_zero_addr);
859 if (T_RESULT == T_RESULT_FAIL) {
860 goto done;
861 }
862
863 done:
864 if (vmaddr != 0) {
865 vm_deallocate(mach_task_self(), vmaddr, vmsize);
866 vmaddr = 0;
867 }
868 }
869
870 T_DECL(madvise_zero_wired, "test madvise(MADV_ZERO_WIRED_PAGES)", T_META_TAG_VM_PREFERRED)
871 {
872 vm_address_t vmaddr;
873 vm_address_t vmaddr_remap;
874 vm_size_t vmsize = PAGE_SIZE * 3;
875 vm_prot_t cur_prot, max_prot;
876 vm_address_t non_zero_addr = 0;
877 kern_return_t kr;
878 int ret;
879
880 /*
881 * madvise(MADV_ZERO_WIRED_PAGES) should cause wired pages to get zero-filled
882 * when they get deallocated.
883 */
884 vmaddr = 0;
885 kr = vm_allocate(mach_task_self(),
886 &vmaddr,
887 vmsize,
888 (VM_FLAGS_ANYWHERE |
889 VM_MAKE_TAG(VM_MEMORY_MALLOC)));
890 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
891 memset((void *)vmaddr, 'A', vmsize);
892 T_QUIET; T_ASSERT_EQ(*(char *)vmaddr, 'A', " ");
893 vmaddr_remap = 0;
894 kr = vm_remap(mach_task_self(), &vmaddr_remap, vmsize, 0, VM_FLAGS_ANYWHERE,
895 mach_task_self(), vmaddr, FALSE, &cur_prot, &max_prot,
896 VM_INHERIT_DEFAULT);
897 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
898 ret = madvise((void*)vmaddr, vmsize, MADV_ZERO_WIRED_PAGES);
899 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO_WIRED_PAGES)");
900 ret = mlock((void*)vmaddr, vmsize);
901 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mlock()");
902 T_QUIET; T_ASSERT_EQ(*(char *)vmaddr, 'A', " ");
903 ret = munmap((void*)vmaddr, vmsize);
904 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "munmap()");
905 T_ASSERT_EQ(*(char *)vmaddr_remap, 0, "wired pages are zero-filled on unmap");
906 T_QUIET; T_ASSERT_EQ(validate_memory_is_zero(vmaddr_remap, vmsize, &non_zero_addr),
907 true, "madvise(%p, %lu, MADV_ZERO_WIRED) did not zero-fill mem at %p",
908 (void *)vmaddr, vmsize, (void *)non_zero_addr);
909 ret = munmap((void *)vmaddr_remap, vmsize);
910 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "munmap()");
911
912 /*
913 * madvise(MADV_ZERO_WIRED_PAGES) should fail with EPERM if the
914 * mapping is not writable.
915 */
916 vmaddr = 0;
917 kr = vm_allocate(mach_task_self(),
918 &vmaddr,
919 vmsize,
920 (VM_FLAGS_ANYWHERE |
921 VM_MAKE_TAG(VM_MEMORY_MALLOC)));
922 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
923 memset((void *)vmaddr, 'A', vmsize);
924 T_QUIET; T_ASSERT_EQ(*(char *)vmaddr, 'A', " ");
925 ret = mprotect((void*)vmaddr, vmsize, PROT_READ);
926 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mprotect(PROT_READ)");
927 ret = madvise((void*)vmaddr, vmsize, MADV_ZERO_WIRED_PAGES);
928 //T_LOG("madv() ret %d errno %d\n", ret, errno);
929 T_ASSERT_POSIX_FAILURE(ret, EPERM,
930 "madvise(MADV_ZERO_WIRED_PAGES) returns EPERM on non-writable mapping ret %d errno %d", ret, errno);
931 ret = munmap((void*)vmaddr, vmsize);
932 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "munmap()");
933
934 /*
935 * madvise(MADV_ZERO_WIRED_PAGES) should not zero-fill the pages
936 * if the mapping is no longer writable when it gets unwired.
937 */
938 vmaddr = 0;
939 kr = vm_allocate(mach_task_self(),
940 &vmaddr,
941 vmsize,
942 (VM_FLAGS_ANYWHERE |
943 VM_MAKE_TAG(VM_MEMORY_MALLOC)));
944 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
945 memset((void *)vmaddr, 'A', vmsize);
946 T_QUIET; T_ASSERT_EQ(*(char *)vmaddr, 'A', " ");
947 vmaddr_remap = 0;
948 kr = vm_remap(mach_task_self(), &vmaddr_remap, vmsize, 0, VM_FLAGS_ANYWHERE,
949 mach_task_self(), vmaddr, FALSE, &cur_prot, &max_prot,
950 VM_INHERIT_DEFAULT);
951 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
952 ret = madvise((void*)vmaddr, vmsize, MADV_ZERO_WIRED_PAGES);
953 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO_WIRED_PAGES)");
954 ret = mprotect((void*)vmaddr, vmsize, PROT_READ);
955 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mprotect(PROT_READ)");
956 ret = mlock((void*)vmaddr, vmsize);
957 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mlock()");
958 T_QUIET; T_ASSERT_EQ(*(char *)vmaddr, 'A', " ");
959 ret = munmap((void*)vmaddr, vmsize);
960 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "munmap()");
961 T_ASSERT_EQ(*(char *)vmaddr_remap, 'A', "RO wired pages NOT zero-filled on unmap");
962 ret = munmap((void *)vmaddr_remap, vmsize);
963 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "munmap()");
964 }
965
966 #define DEST_PATTERN 0xFEDCBA98
967
968 T_DECL(map_read_overwrite, "test overwriting vm map from other map - \
969 rdar://31075370",
970 T_META_ALL_VALID_ARCHS(true),
971 T_META_TAG_VM_PREFERRED)
972 {
973 kern_return_t kr;
974 mach_vm_address_t vmaddr1, vmaddr2;
975 mach_vm_size_t vmsize1, vmsize2;
976 uint32_t *ip;
977 uint32_t i;
978
979 vmaddr1 = 0;
980 vmsize1 = 4 * 4096;
981 kr = mach_vm_allocate(mach_task_self(),
982 &vmaddr1,
983 vmsize1,
984 VM_FLAGS_ANYWHERE);
985 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
986
987 ip = (uint32_t *)(uintptr_t)vmaddr1;
988 for (i = 0; (mach_vm_size_t)i < vmsize1 / sizeof(*ip); i++) {
989 ip[i] = i;
990 }
991
992 vmaddr2 = 0;
993 kr = mach_vm_allocate(mach_task_self(),
994 &vmaddr2,
995 vmsize1,
996 VM_FLAGS_ANYWHERE);
997 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
998
999 ip = (uint32_t *)(uintptr_t)vmaddr2;
1000 for (i = 0; (mach_vm_size_t)i < vmsize1 / sizeof(*ip); i++) {
1001 ip[i] = DEST_PATTERN;
1002 }
1003
1004 vmsize2 = vmsize1 - 2 * (sizeof(*ip));
1005 kr = mach_vm_read_overwrite(mach_task_self(),
1006 vmaddr1 + sizeof(*ip),
1007 vmsize2,
1008 vmaddr2 + sizeof(*ip),
1009 &vmsize2);
1010 T_ASSERT_MACH_SUCCESS(kr, "vm_read_overwrite()");
1011
1012 ip = (uint32_t *)(uintptr_t)vmaddr2;
1013 for (i = 0; i < 1; i++) {
1014 T_QUIET;
1015 T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
1016 i, ip[i], DEST_PATTERN);
1017 }
1018 for (; (mach_vm_size_t)i < (vmsize1 - 2) / sizeof(*ip); i++) {
1019 T_QUIET;
1020 T_ASSERT_EQ(ip[i], i, "vmaddr2[%d] = 0x%x instead of 0x%x",
1021 i, ip[i], i);
1022 }
1023 for (; (mach_vm_size_t)i < vmsize1 / sizeof(*ip); i++) {
1024 T_QUIET;
1025 T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
1026 i, ip[i], DEST_PATTERN);
1027 }
1028 }
1029
1030 T_DECL(copy_none_use_pmap, "test copy-on-write remapping of COPY_NONE vm \
1031 objects - rdar://35610377",
1032 T_META_ALL_VALID_ARCHS(true),
1033 T_META_TAG_VM_PREFERRED)
1034 {
1035 kern_return_t kr;
1036 mach_vm_address_t vmaddr1, vmaddr2, vmaddr3;
1037 mach_vm_size_t vmsize;
1038 vm_prot_t curprot, maxprot;
1039
1040 vmsize = 32 * 1024 * 1024;
1041
1042 vmaddr1 = 0;
1043 kr = mach_vm_allocate(mach_task_self(),
1044 &vmaddr1,
1045 vmsize,
1046 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
1047 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
1048
1049 memset((void *)(uintptr_t)vmaddr1, 'x', vmsize);
1050
1051 vmaddr2 = 0;
1052 kr = mach_vm_remap(mach_task_self(),
1053 &vmaddr2,
1054 vmsize,
1055 0, /* mask */
1056 VM_FLAGS_ANYWHERE,
1057 mach_task_self(),
1058 vmaddr1,
1059 TRUE, /* copy */
1060 &curprot,
1061 &maxprot,
1062 VM_INHERIT_DEFAULT);
1063 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #1");
1064
1065 vmaddr3 = 0;
1066 kr = mach_vm_remap(mach_task_self(),
1067 &vmaddr3,
1068 vmsize,
1069 0, /* mask */
1070 VM_FLAGS_ANYWHERE,
1071 mach_task_self(),
1072 vmaddr2,
1073 TRUE, /* copy */
1074 &curprot,
1075 &maxprot,
1076 VM_INHERIT_DEFAULT);
1077 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #2");
1078 }
1079
1080 T_DECL(purgable_deny, "test purgeable memory is not allowed to be converted to \
1081 non-purgeable - rdar://31990033",
1082 T_META_ALL_VALID_ARCHS(true),
1083 T_META_TAG_VM_PREFERRED)
1084 {
1085 kern_return_t kr;
1086 vm_address_t vmaddr;
1087 vm_purgable_t state;
1088
1089 vmaddr = 0;
1090 kr = vm_allocate(mach_task_self(), &vmaddr, 1,
1091 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
1092 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
1093
1094 state = VM_PURGABLE_DENY;
1095 kr = vm_purgable_control(mach_task_self(), vmaddr,
1096 VM_PURGABLE_SET_STATE, &state);
1097 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1098 "vm_purgable_control(VM_PURGABLE_DENY) -> 0x%x (%s)",
1099 kr, mach_error_string(kr));
1100
1101 kr = vm_deallocate(mach_task_self(), vmaddr, 1);
1102 T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate()");
1103 }
1104
1105 #define VMSIZE 0x10000
1106
1107 T_DECL(vm_remap_zero, "test vm map of zero size - rdar://33114981",
1108 T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1109 {
1110 kern_return_t kr;
1111 mach_vm_address_t vmaddr1, vmaddr2;
1112 mach_vm_size_t vmsize;
1113 vm_prot_t curprot, maxprot;
1114
1115 vmaddr1 = 0;
1116 vmsize = VMSIZE;
1117 kr = mach_vm_allocate(mach_task_self(),
1118 &vmaddr1,
1119 vmsize,
1120 VM_FLAGS_ANYWHERE);
1121 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
1122
1123 vmaddr2 = 0;
1124 vmsize = 0;
1125 kr = mach_vm_remap(mach_task_self(),
1126 &vmaddr2,
1127 vmsize,
1128 0,
1129 VM_FLAGS_ANYWHERE,
1130 mach_task_self(),
1131 vmaddr1,
1132 FALSE,
1133 &curprot,
1134 &maxprot,
1135 VM_INHERIT_DEFAULT);
1136 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
1137 vmsize, kr, mach_error_string(kr));
1138
1139 vmaddr2 = 0;
1140 vmsize = (mach_vm_size_t)-2;
1141 kr = mach_vm_remap(mach_task_self(),
1142 &vmaddr2,
1143 vmsize,
1144 0,
1145 VM_FLAGS_ANYWHERE,
1146 mach_task_self(),
1147 vmaddr1,
1148 FALSE,
1149 &curprot,
1150 &maxprot,
1151 VM_INHERIT_DEFAULT);
1152 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
1153 vmsize, kr, mach_error_string(kr));
1154 }
1155
1156 extern int __shared_region_check_np(uint64_t *);
1157
1158 T_DECL(nested_pmap_trigger, "nested pmap should only be triggered from kernel \
1159 - rdar://problem/41481703",
1160 T_META_ALL_VALID_ARCHS(true),
1161 T_META_TAG_VM_PREFERRED)
1162 {
1163 int ret;
1164 kern_return_t kr;
1165 mach_vm_address_t sr_start;
1166 mach_vm_size_t vmsize;
1167 mach_vm_address_t vmaddr;
1168 mach_port_t mem_entry;
1169
1170 ret = __shared_region_check_np(&sr_start);
1171 if (ret != 0) {
1172 int saved_errno;
1173 saved_errno = errno;
1174
1175 T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
1176 saved_errno, strerror(saved_errno));
1177 T_END;
1178 }
1179
1180 vmsize = PAGE_SIZE;
1181 kr = mach_make_memory_entry_64(mach_task_self(),
1182 &vmsize,
1183 sr_start,
1184 MAP_MEM_VM_SHARE | VM_PROT_READ,
1185 &mem_entry,
1186 MACH_PORT_NULL);
1187 T_ASSERT_MACH_SUCCESS(kr, "make_memory_entry(0x%llx)", sr_start);
1188
1189 vmaddr = 0;
1190 kr = mach_vm_map(mach_task_self(),
1191 &vmaddr,
1192 vmsize,
1193 0,
1194 VM_FLAGS_ANYWHERE,
1195 mem_entry,
1196 0,
1197 FALSE,
1198 VM_PROT_READ,
1199 VM_PROT_READ,
1200 VM_INHERIT_DEFAULT);
1201 T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1202 }
1203
1204 static const char *prot_str[] = { "---", "r--", "-w-", "rw-", "--x", "r-x", "-wx", "rwx" };
1205 static const char *share_mode_str[] = { "---", "COW", "PRIVATE", "EMPTY", "SHARED", "TRUESHARED", "PRIVATE_ALIASED", "SHARED_ALIASED", "LARGE_PAGE" };
1206
1207 T_DECL(shared_region_share_writable, "sharing a writable mapping of the shared region shoudl not give write access to shared region - rdar://problem/74469953",
1208 T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1209 {
1210 int ret;
1211 uint64_t sr_start;
1212 kern_return_t kr;
1213 mach_vm_address_t address, tmp_address, remap_address;
1214 mach_vm_size_t size, tmp_size, remap_size;
1215 uint32_t depth;
1216 mach_msg_type_number_t count;
1217 vm_region_submap_info_data_64_t info;
1218 vm_prot_t cur_prot, max_prot;
1219 uint32_t before, after, remap;
1220 mach_port_t mem_entry;
1221
1222 ret = __shared_region_check_np(&sr_start);
1223 if (ret != 0) {
1224 int saved_errno;
1225 saved_errno = errno;
1226
1227 T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
1228 saved_errno, strerror(saved_errno));
1229 T_END;
1230 }
1231 T_LOG("SHARED_REGION_BASE 0x%llx", SHARED_REGION_BASE);
1232 T_LOG("SHARED_REGION_SIZE 0x%llx", SHARED_REGION_SIZE);
1233 T_LOG("shared region starts at 0x%llx", sr_start);
1234 T_QUIET; T_ASSERT_GE(sr_start, SHARED_REGION_BASE,
1235 "shared region starts below BASE");
1236 T_QUIET; T_ASSERT_LT(sr_start, SHARED_REGION_BASE + SHARED_REGION_SIZE,
1237 "shared region starts above BASE+SIZE");
1238
1239 /*
1240 * Step 1 - check that one can not get write access to a read-only
1241 * mapping in the shared region.
1242 */
1243 size = 0;
1244 for (address = SHARED_REGION_BASE;
1245 address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1246 address += size) {
1247 size = 0;
1248 depth = 99;
1249 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1250 kr = mach_vm_region_recurse(mach_task_self(),
1251 &address,
1252 &size,
1253 &depth,
1254 (vm_region_recurse_info_t)&info,
1255 &count);
1256 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1257 if (kr == KERN_INVALID_ADDRESS) {
1258 T_SKIP("could not find read-only nested mapping");
1259 T_END;
1260 }
1261 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1262 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1263 address, address + size, depth,
1264 prot_str[info.protection],
1265 prot_str[info.max_protection],
1266 share_mode_str[info.share_mode],
1267 info.object_id);
1268 if (depth > 0 &&
1269 (info.protection == VM_PROT_READ) &&
1270 (info.max_protection == VM_PROT_READ)) {
1271 /* nested and read-only: bingo! */
1272 break;
1273 }
1274 }
1275 if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1276 T_SKIP("could not find read-only nested mapping");
1277 T_END;
1278 }
1279
1280 /* test vm_remap() of RO */
1281 before = *(uint32_t *)(uintptr_t)address;
1282 remap_address = 0;
1283 remap_size = size;
1284 kr = mach_vm_remap(mach_task_self(),
1285 &remap_address,
1286 remap_size,
1287 0,
1288 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1289 mach_task_self(),
1290 address,
1291 FALSE,
1292 &cur_prot,
1293 &max_prot,
1294 VM_INHERIT_DEFAULT);
1295 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1296 // T_QUIET; T_ASSERT_EQ(cur_prot, VM_PROT_READ, "cur_prot is read-only");
1297 // T_QUIET; T_ASSERT_EQ(max_prot, VM_PROT_READ, "max_prot is read-only");
1298 /* check that region is still nested */
1299 tmp_address = address;
1300 tmp_size = 0;
1301 depth = 99;
1302 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1303 kr = mach_vm_region_recurse(mach_task_self(),
1304 &tmp_address,
1305 &tmp_size,
1306 &depth,
1307 (vm_region_recurse_info_t)&info,
1308 &count);
1309 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1310 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1311 tmp_address, tmp_address + tmp_size, depth,
1312 prot_str[info.protection],
1313 prot_str[info.max_protection],
1314 share_mode_str[info.share_mode],
1315 info.object_id);
1316 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1317 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1318 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1319 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1320 // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1321 /* check that new mapping is read-only */
1322 tmp_address = remap_address;
1323 tmp_size = 0;
1324 depth = 99;
1325 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1326 kr = mach_vm_region_recurse(mach_task_self(),
1327 &tmp_address,
1328 &tmp_size,
1329 &depth,
1330 (vm_region_recurse_info_t)&info,
1331 &count);
1332 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1333 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1334 tmp_address, tmp_address + tmp_size, depth,
1335 prot_str[info.protection],
1336 prot_str[info.max_protection],
1337 share_mode_str[info.share_mode],
1338 info.object_id);
1339 T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1340 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1341 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1342 // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1343 remap = *(uint32_t *)(uintptr_t)remap_address;
1344 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1345 // this would crash if actually read-only:
1346 // *(uint32_t *)(uintptr_t)remap_address = before + 1;
1347 after = *(uint32_t *)(uintptr_t)address;
1348 T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1349 // *(uint32_t *)(uintptr_t)remap_address = before;
1350 if (before != after) {
1351 T_FAIL("vm_remap() bypassed copy-on-write");
1352 } else {
1353 T_PASS("vm_remap() did not bypass copy-on-write");
1354 }
1355 /* cleanup */
1356 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1357 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1358 T_PASS("vm_remap() read-only");
1359
1360 #if defined(VM_MEMORY_ROSETTA)
1361 if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1362 T_PASS("vm_remap_new() is not present");
1363 goto skip_vm_remap_new_ro;
1364 }
1365 /* test vm_remap_new() of RO */
1366 before = *(uint32_t *)(uintptr_t)address;
1367 remap_address = 0;
1368 remap_size = size;
1369 cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1370 max_prot = VM_PROT_READ | VM_PROT_WRITE;
1371 kr = mach_vm_remap_new(mach_task_self(),
1372 &remap_address,
1373 remap_size,
1374 0,
1375 VM_FLAGS_ANYWHERE,
1376 mach_task_self(),
1377 address,
1378 FALSE,
1379 &cur_prot,
1380 &max_prot,
1381 VM_INHERIT_DEFAULT);
1382 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1383 if (kr == KERN_PROTECTION_FAILURE) {
1384 /* wrong but not a security issue... */
1385 goto skip_vm_remap_new_ro;
1386 }
1387 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1388 remap = *(uint32_t *)(uintptr_t)remap_address;
1389 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1390 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1391 after = *(uint32_t *)(uintptr_t)address;
1392 T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1393 *(uint32_t *)(uintptr_t)remap_address = before;
1394 if (before != after) {
1395 T_FAIL("vm_remap_new() bypassed copy-on-write");
1396 } else {
1397 T_PASS("vm_remap_new() did not bypass copy-on-write");
1398 }
1399 /* check that region is still nested */
1400 tmp_address = address;
1401 tmp_size = 0;
1402 depth = 99;
1403 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1404 kr = mach_vm_region_recurse(mach_task_self(),
1405 &tmp_address,
1406 &tmp_size,
1407 &depth,
1408 (vm_region_recurse_info_t)&info,
1409 &count);
1410 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1411 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1412 tmp_address, tmp_address + tmp_size, depth,
1413 prot_str[info.protection],
1414 prot_str[info.max_protection],
1415 share_mode_str[info.share_mode],
1416 info.object_id);
1417 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1418 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1419 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1420 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1421 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1422 T_PASS("vm_remap_new() read-only");
1423 skip_vm_remap_new_ro:
1424 #else /* defined(VM_MEMORY_ROSETTA) */
1425 /* pre-BigSur SDK: no vm_remap_new() */
1426 T_LOG("No vm_remap_new() to test");
1427 #endif /* defined(VM_MEMORY_ROSETTA) */
1428
1429 /* test mach_make_memory_entry_64(VM_SHARE) of RO */
1430 before = *(uint32_t *)(uintptr_t)address;
1431 remap_size = size;
1432 mem_entry = MACH_PORT_NULL;
1433 kr = mach_make_memory_entry_64(mach_task_self(),
1434 &remap_size,
1435 address,
1436 MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1437 &mem_entry,
1438 MACH_PORT_NULL);
1439 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1440 if (kr == KERN_PROTECTION_FAILURE) {
1441 /* wrong but not a security issue... */
1442 goto skip_mem_entry_vm_share_ro;
1443 }
1444 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1445 remap_address = 0;
1446 kr = mach_vm_map(mach_task_self(),
1447 &remap_address,
1448 remap_size,
1449 0, /* mask */
1450 VM_FLAGS_ANYWHERE,
1451 mem_entry,
1452 0, /* offset */
1453 FALSE, /* copy */
1454 VM_PROT_READ | VM_PROT_WRITE,
1455 VM_PROT_READ | VM_PROT_WRITE,
1456 VM_INHERIT_DEFAULT);
1457 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1458 remap = *(uint32_t *)(uintptr_t)remap_address;
1459 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1460 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1461 after = *(uint32_t *)(uintptr_t)address;
1462 T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1463 *(uint32_t *)(uintptr_t)remap_address = before;
1464 if (before != after) {
1465 T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1466 } else {
1467 T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1468 }
1469 /* check that region is still nested */
1470 tmp_address = address;
1471 tmp_size = 0;
1472 depth = 99;
1473 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1474 kr = mach_vm_region_recurse(mach_task_self(),
1475 &tmp_address,
1476 &tmp_size,
1477 &depth,
1478 (vm_region_recurse_info_t)&info,
1479 &count);
1480 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1481 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1482 tmp_address, tmp_address + tmp_size, depth,
1483 prot_str[info.protection],
1484 prot_str[info.max_protection],
1485 share_mode_str[info.share_mode],
1486 info.object_id);
1487 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1488 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1489 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1490 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1491 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1492 /* check that new mapping is a copy */
1493 tmp_address = remap_address;
1494 tmp_size = 0;
1495 depth = 99;
1496 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1497 kr = mach_vm_region_recurse(mach_task_self(),
1498 &tmp_address,
1499 &tmp_size,
1500 &depth,
1501 (vm_region_recurse_info_t)&info,
1502 &count);
1503 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1504 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1505 tmp_address, tmp_address + tmp_size, depth,
1506 prot_str[info.protection],
1507 prot_str[info.max_protection],
1508 share_mode_str[info.share_mode],
1509 info.object_id);
1510 T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1511 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1512 T_QUIET; T_ASSERT_EQ(depth, 0, "new mapping is unnested");
1513 // T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1514 // T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1515 /* cleanup */
1516 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1517 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1518 T_PASS("mem_entry(VM_SHARE) read-only");
1519 skip_mem_entry_vm_share_ro:
1520
1521 /* test mach_make_memory_entry_64() of RO */
1522 before = *(uint32_t *)(uintptr_t)address;
1523 remap_size = size;
1524 mem_entry = MACH_PORT_NULL;
1525 kr = mach_make_memory_entry_64(mach_task_self(),
1526 &remap_size,
1527 address,
1528 VM_PROT_READ | VM_PROT_WRITE,
1529 &mem_entry,
1530 MACH_PORT_NULL);
1531 T_QUIET; T_ASSERT_EQ(kr, KERN_PROTECTION_FAILURE, "mach_make_memory_entry_64()");
1532 /* check that region is still nested */
1533 tmp_address = address;
1534 tmp_size = 0;
1535 depth = 99;
1536 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1537 kr = mach_vm_region_recurse(mach_task_self(),
1538 &tmp_address,
1539 &tmp_size,
1540 &depth,
1541 (vm_region_recurse_info_t)&info,
1542 &count);
1543 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1544 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1545 tmp_address, tmp_address + tmp_size, depth,
1546 prot_str[info.protection],
1547 prot_str[info.max_protection],
1548 share_mode_str[info.share_mode],
1549 info.object_id);
1550 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1551 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1552 // T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1553 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1554 if (depth > 0) {
1555 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1556 }
1557 T_PASS("mem_entry() read-only");
1558
1559 /* test mach_make_memory_entry_64(READ | WRITE | VM_PROT_IS_MASK) of RO */
1560 before = *(uint32_t *)(uintptr_t)address;
1561 remap_size = size;
1562 mem_entry = MACH_PORT_NULL;
1563 kr = mach_make_memory_entry_64(mach_task_self(),
1564 &remap_size,
1565 address,
1566 VM_PROT_READ | VM_PROT_WRITE | VM_PROT_IS_MASK,
1567 &mem_entry,
1568 MACH_PORT_NULL);
1569 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(READ | WRITE | IS_MASK)");
1570 remap_address = 0;
1571 kr = mach_vm_map(mach_task_self(),
1572 &remap_address,
1573 remap_size,
1574 0, /* mask */
1575 VM_FLAGS_ANYWHERE,
1576 mem_entry,
1577 0, /* offset */
1578 FALSE, /* copy */
1579 VM_PROT_READ | VM_PROT_WRITE,
1580 VM_PROT_READ | VM_PROT_WRITE,
1581 VM_INHERIT_DEFAULT);
1582 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_RIGHT, "vm_map(read/write)");
1583 remap_address = 0;
1584 kr = mach_vm_map(mach_task_self(),
1585 &remap_address,
1586 remap_size,
1587 0, /* mask */
1588 VM_FLAGS_ANYWHERE,
1589 mem_entry,
1590 0, /* offset */
1591 FALSE, /* copy */
1592 VM_PROT_READ,
1593 VM_PROT_READ,
1594 VM_INHERIT_DEFAULT);
1595 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map(read only)");
1596 remap = *(uint32_t *)(uintptr_t)remap_address;
1597 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1598 /* check that region is still nested */
1599 tmp_address = address;
1600 tmp_size = 0;
1601 depth = 99;
1602 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1603 kr = mach_vm_region_recurse(mach_task_self(),
1604 &tmp_address,
1605 &tmp_size,
1606 &depth,
1607 (vm_region_recurse_info_t)&info,
1608 &count);
1609 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1610 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1611 tmp_address, tmp_address + tmp_size, depth,
1612 prot_str[info.protection],
1613 prot_str[info.max_protection],
1614 share_mode_str[info.share_mode],
1615 info.object_id);
1616 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1617 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1618 // T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1619 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1620 if (depth > 0) {
1621 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1622 }
1623 /* check that new mapping is a copy */
1624 tmp_address = remap_address;
1625 tmp_size = 0;
1626 depth = 99;
1627 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1628 kr = mach_vm_region_recurse(mach_task_self(),
1629 &tmp_address,
1630 &tmp_size,
1631 &depth,
1632 (vm_region_recurse_info_t)&info,
1633 &count);
1634 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1635 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1636 tmp_address, tmp_address + tmp_size, depth,
1637 prot_str[info.protection],
1638 prot_str[info.max_protection],
1639 share_mode_str[info.share_mode],
1640 info.object_id);
1641 T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1642 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1643 T_QUIET; T_ASSERT_EQ(depth, 0, "new mapping is unnested");
1644 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1645 T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1646 /* cleanup */
1647 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1648 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1649 T_PASS("mem_entry(READ | WRITE | IS_MASK) read-only");
1650
1651
1652 /*
1653 * Step 2 - check that one can not share write access with a writable
1654 * mapping in the shared region.
1655 */
1656 size = 0;
1657 for (address = SHARED_REGION_BASE;
1658 address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1659 address += size) {
1660 size = 0;
1661 depth = 99;
1662 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1663 kr = mach_vm_region_recurse(mach_task_self(),
1664 &address,
1665 &size,
1666 &depth,
1667 (vm_region_recurse_info_t)&info,
1668 &count);
1669 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1670 if (kr == KERN_INVALID_ADDRESS) {
1671 T_SKIP("could not find writable nested mapping");
1672 T_END;
1673 }
1674 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1675 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1676 address, address + size, depth,
1677 prot_str[info.protection],
1678 prot_str[info.max_protection],
1679 share_mode_str[info.share_mode],
1680 info.object_id);
1681 if (depth > 0 && (info.protection & VM_PROT_WRITE)) {
1682 /* nested and writable: bingo! */
1683 break;
1684 }
1685 }
1686 if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1687 T_SKIP("could not find writable nested mapping");
1688 T_END;
1689 }
1690
1691 /* test vm_remap() of RW */
1692 before = *(uint32_t *)(uintptr_t)address;
1693 remap_address = 0;
1694 remap_size = size;
1695 kr = mach_vm_remap(mach_task_self(),
1696 &remap_address,
1697 remap_size,
1698 0,
1699 VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1700 mach_task_self(),
1701 address,
1702 FALSE,
1703 &cur_prot,
1704 &max_prot,
1705 VM_INHERIT_DEFAULT);
1706 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1707 if (!(cur_prot & VM_PROT_WRITE)) {
1708 T_LOG("vm_remap(): 0x%llx not writable %s/%s",
1709 remap_address, prot_str[cur_prot], prot_str[max_prot]);
1710 T_ASSERT_FAIL("vm_remap() remapping not writable");
1711 }
1712 remap = *(uint32_t *)(uintptr_t)remap_address;
1713 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1714 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1715 after = *(uint32_t *)(uintptr_t)address;
1716 T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1717 *(uint32_t *)(uintptr_t)remap_address = before;
1718 if (before != after) {
1719 T_FAIL("vm_remap() bypassed copy-on-write");
1720 } else {
1721 T_PASS("vm_remap() did not bypass copy-on-write");
1722 }
1723 /* check that region is still nested */
1724 tmp_address = address;
1725 tmp_size = 0;
1726 depth = 99;
1727 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1728 kr = mach_vm_region_recurse(mach_task_self(),
1729 &tmp_address,
1730 &tmp_size,
1731 &depth,
1732 (vm_region_recurse_info_t)&info,
1733 &count);
1734 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1735 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1736 tmp_address, tmp_address + tmp_size, depth,
1737 prot_str[info.protection],
1738 prot_str[info.max_protection],
1739 share_mode_str[info.share_mode],
1740 info.object_id);
1741 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1742 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1743 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1744 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1745 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1746 /* cleanup */
1747 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1748 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1749
1750 #if defined(VM_MEMORY_ROSETTA)
1751 if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1752 T_PASS("vm_remap_new() is not present");
1753 goto skip_vm_remap_new_rw;
1754 }
1755 /* test vm_remap_new() of RW */
1756 before = *(uint32_t *)(uintptr_t)address;
1757 remap_address = 0;
1758 remap_size = size;
1759 cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1760 max_prot = VM_PROT_READ | VM_PROT_WRITE;
1761 kr = mach_vm_remap_new(mach_task_self(),
1762 &remap_address,
1763 remap_size,
1764 0,
1765 VM_FLAGS_ANYWHERE,
1766 mach_task_self(),
1767 address,
1768 FALSE,
1769 &cur_prot,
1770 &max_prot,
1771 VM_INHERIT_DEFAULT);
1772 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1773 if (kr == KERN_PROTECTION_FAILURE) {
1774 /* wrong but not a security issue... */
1775 goto skip_vm_remap_new_rw;
1776 }
1777 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1778 if (!(cur_prot & VM_PROT_WRITE)) {
1779 T_LOG("vm_remap_new(): 0x%llx not writable %s/%s",
1780 remap_address, prot_str[cur_prot], prot_str[max_prot]);
1781 T_ASSERT_FAIL("vm_remap_new() remapping not writable");
1782 }
1783 remap = *(uint32_t *)(uintptr_t)remap_address;
1784 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1785 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1786 after = *(uint32_t *)(uintptr_t)address;
1787 T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1788 *(uint32_t *)(uintptr_t)remap_address = before;
1789 if (before != after) {
1790 T_FAIL("vm_remap_new() bypassed copy-on-write");
1791 } else {
1792 T_PASS("vm_remap_new() did not bypass copy-on-write");
1793 }
1794 /* check that region is still nested */
1795 tmp_address = address;
1796 tmp_size = 0;
1797 depth = 99;
1798 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1799 kr = mach_vm_region_recurse(mach_task_self(),
1800 &tmp_address,
1801 &tmp_size,
1802 &depth,
1803 (vm_region_recurse_info_t)&info,
1804 &count);
1805 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1806 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1807 tmp_address, tmp_address + tmp_size, depth,
1808 prot_str[info.protection],
1809 prot_str[info.max_protection],
1810 share_mode_str[info.share_mode],
1811 info.object_id);
1812 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1813 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1814 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1815 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1816 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1817 /* cleanup */
1818 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1819 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1820 skip_vm_remap_new_rw:
1821 #else /* defined(VM_MEMORY_ROSETTA) */
1822 /* pre-BigSur SDK: no vm_remap_new() */
1823 T_LOG("No vm_remap_new() to test");
1824 #endif /* defined(VM_MEMORY_ROSETTA) */
1825
1826 /* test mach_make_memory_entry_64(VM_SHARE) of RW */
1827 before = *(uint32_t *)(uintptr_t)address;
1828 remap_size = size;
1829 mem_entry = MACH_PORT_NULL;
1830 kr = mach_make_memory_entry_64(mach_task_self(),
1831 &remap_size,
1832 address,
1833 MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1834 &mem_entry,
1835 MACH_PORT_NULL);
1836 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1837 if (kr == KERN_PROTECTION_FAILURE) {
1838 /* wrong but not a security issue... */
1839 goto skip_mem_entry_vm_share_rw;
1840 }
1841 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1842 T_QUIET; T_ASSERT_EQ(remap_size, size, "mem_entry(VM_SHARE) should cover whole mapping");
1843 // T_LOG("AFTER MAKE_MEM_ENTRY(VM_SHARE) 0x%llx...", address); fflush(stdout); fflush(stderr); getchar();
1844 remap_address = 0;
1845 kr = mach_vm_map(mach_task_self(),
1846 &remap_address,
1847 remap_size,
1848 0, /* mask */
1849 VM_FLAGS_ANYWHERE,
1850 mem_entry,
1851 0, /* offset */
1852 FALSE, /* copy */
1853 VM_PROT_READ | VM_PROT_WRITE,
1854 VM_PROT_READ | VM_PROT_WRITE,
1855 VM_INHERIT_DEFAULT);
1856 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1857 remap = *(uint32_t *)(uintptr_t)remap_address;
1858 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1859 // T_LOG("AFTER VM_MAP 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1860 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1861 // T_LOG("AFTER WRITE 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1862 after = *(uint32_t *)(uintptr_t)address;
1863 T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1864 *(uint32_t *)(uintptr_t)remap_address = before;
1865 if (before != after) {
1866 T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1867 } else {
1868 T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1869 }
1870 /* check that region is still nested */
1871 tmp_address = address;
1872 tmp_size = 0;
1873 depth = 99;
1874 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1875 kr = mach_vm_region_recurse(mach_task_self(),
1876 &tmp_address,
1877 &tmp_size,
1878 &depth,
1879 (vm_region_recurse_info_t)&info,
1880 &count);
1881 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1882 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1883 tmp_address, tmp_address + tmp_size, depth,
1884 prot_str[info.protection],
1885 prot_str[info.max_protection],
1886 share_mode_str[info.share_mode],
1887 info.object_id);
1888 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1889 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1890 T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1891 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1892 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1893 /* cleanup */
1894 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1895 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1896 mach_port_deallocate(mach_task_self(), mem_entry);
1897 skip_mem_entry_vm_share_rw:
1898
1899 /* test mach_make_memory_entry_64() of RW */
1900 before = *(uint32_t *)(uintptr_t)address;
1901 remap_size = size;
1902 mem_entry = MACH_PORT_NULL;
1903 kr = mach_make_memory_entry_64(mach_task_self(),
1904 &remap_size,
1905 address,
1906 VM_PROT_READ | VM_PROT_WRITE,
1907 &mem_entry,
1908 MACH_PORT_NULL);
1909 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64()");
1910 remap_address = 0;
1911 kr = mach_vm_map(mach_task_self(),
1912 &remap_address,
1913 remap_size,
1914 0, /* mask */
1915 VM_FLAGS_ANYWHERE,
1916 mem_entry,
1917 0, /* offset */
1918 FALSE, /* copy */
1919 VM_PROT_READ | VM_PROT_WRITE,
1920 VM_PROT_READ | VM_PROT_WRITE,
1921 VM_INHERIT_DEFAULT);
1922 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1923 remap = *(uint32_t *)(uintptr_t)remap_address;
1924 T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1925 *(uint32_t *)(uintptr_t)remap_address = before + 1;
1926 after = *(uint32_t *)(uintptr_t)address;
1927 T_LOG("mem_entry(): 0x%llx 0x%x -> 0x%x", address, before, after);
1928 *(uint32_t *)(uintptr_t)remap_address = before;
1929 /* check that region is no longer nested */
1930 tmp_address = address;
1931 tmp_size = 0;
1932 depth = 99;
1933 count = VM_REGION_SUBMAP_INFO_COUNT_64;
1934 kr = mach_vm_region_recurse(mach_task_self(),
1935 &tmp_address,
1936 &tmp_size,
1937 &depth,
1938 (vm_region_recurse_info_t)&info,
1939 &count);
1940 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1941 T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1942 tmp_address, tmp_address + tmp_size, depth,
1943 prot_str[info.protection],
1944 prot_str[info.max_protection],
1945 share_mode_str[info.share_mode],
1946 info.object_id);
1947 if (before != after) {
1948 if (depth == 0) {
1949 T_PASS("mem_entry() honored copy-on-write");
1950 } else {
1951 T_FAIL("mem_entry() did not trigger copy-on_write");
1952 }
1953 } else {
1954 T_FAIL("mem_entry() did not honor copy-on-write");
1955 }
1956 T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1957 // T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1958 T_QUIET; T_ASSERT_EQ(depth, 0, "no longer nested");
1959 T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1960 T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1961 /* cleanup */
1962 kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1963 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1964 mach_port_deallocate(mach_task_self(), mem_entry);
1965 }
1966
1967 T_DECL(copyoverwrite_submap_protection, "test copywrite vm region submap \
1968 protection", T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1969 {
1970 kern_return_t kr;
1971 mach_vm_address_t vmaddr;
1972 mach_vm_size_t vmsize;
1973 natural_t depth;
1974 vm_region_submap_short_info_data_64_t region_info;
1975 mach_msg_type_number_t region_info_count;
1976
1977 for (vmaddr = SHARED_REGION_BASE;
1978 vmaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1979 vmaddr += vmsize) {
1980 depth = 99;
1981 region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1982 kr = mach_vm_region_recurse(mach_task_self(),
1983 &vmaddr,
1984 &vmsize,
1985 &depth,
1986 (vm_region_info_t) ®ion_info,
1987 ®ion_info_count);
1988 if (kr == KERN_INVALID_ADDRESS) {
1989 break;
1990 }
1991 T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse(0x%llx)", vmaddr);
1992 T_ASSERT_EQ(region_info_count,
1993 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
1994 "vm_region_recurse(0x%llx) count = %d expected %d",
1995 vmaddr, region_info_count,
1996 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
1997
1998 T_LOG("--> region: vmaddr 0x%llx depth %d prot 0x%x/0x%x",
1999 vmaddr, depth, region_info.protection,
2000 region_info.max_protection);
2001 if (depth == 0) {
2002 /* not a submap mapping: next mapping */
2003 continue;
2004 }
2005 if (vmaddr >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
2006 break;
2007 }
2008 kr = mach_vm_copy(mach_task_self(),
2009 vmaddr,
2010 vmsize,
2011 vmaddr);
2012 if (kr == KERN_PROTECTION_FAILURE ||
2013 kr == KERN_INVALID_ADDRESS) {
2014 T_PASS("vm_copy(0x%llx,0x%llx) expected prot error 0x%x (%s)",
2015 vmaddr, vmsize, kr, mach_error_string(kr));
2016 continue;
2017 }
2018 T_ASSERT_MACH_SUCCESS(kr, "vm_copy(0x%llx,0x%llx) prot 0x%x",
2019 vmaddr, vmsize, region_info.protection);
2020 depth = 0;
2021 region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
2022 kr = mach_vm_region_recurse(mach_task_self(),
2023 &vmaddr,
2024 &vmsize,
2025 &depth,
2026 (vm_region_info_t) ®ion_info,
2027 ®ion_info_count);
2028 T_ASSERT_MACH_SUCCESS(kr, "m_region_recurse(0x%llx)", vmaddr);
2029 T_ASSERT_EQ(region_info_count,
2030 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
2031 "vm_region_recurse() count = %d expected %d",
2032 region_info_count, VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
2033
2034 T_ASSERT_EQ(depth, 0, "vm_region_recurse(0x%llx): depth = %d expected 0",
2035 vmaddr, depth);
2036 T_ASSERT_EQ((region_info.protection & VM_PROT_EXECUTE),
2037 0, "vm_region_recurse(0x%llx): prot 0x%x",
2038 vmaddr, region_info.protection);
2039 }
2040 }
2041
2042 T_DECL(wire_text, "test wired text for rdar://problem/16783546 Wiring code in \
2043 the shared region triggers code-signing violations",
2044 T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
2045 {
2046 uint32_t *addr, before, after;
2047 int retval;
2048 int saved_errno;
2049 kern_return_t kr;
2050 vm_address_t map_addr, remap_addr;
2051 vm_prot_t curprot, maxprot;
2052
2053 addr = (uint32_t *)&printf;
2054 #if __has_feature(ptrauth_calls)
2055 map_addr = (vm_address_t)(uintptr_t)ptrauth_strip(addr, ptrauth_key_function_pointer);
2056 #else /* __has_feature(ptrauth_calls) */
2057 map_addr = (vm_address_t)(uintptr_t)addr;
2058 #endif /* __has_feature(ptrauth_calls) */
2059 remap_addr = 0;
2060 kr = vm_remap(mach_task_self(), &remap_addr, 4096,
2061 0, /* mask */
2062 VM_FLAGS_ANYWHERE,
2063 mach_task_self(), map_addr,
2064 FALSE, /* copy */
2065 &curprot, &maxprot,
2066 VM_INHERIT_DEFAULT);
2067 T_ASSERT_EQ(kr, KERN_SUCCESS, "vm_remap error 0x%x (%s)",
2068 kr, mach_error_string(kr));
2069 before = *addr;
2070 retval = mlock(addr, 4096);
2071 after = *addr;
2072 if (retval != 0) {
2073 saved_errno = errno;
2074 T_ASSERT_EQ(saved_errno, EPERM, "wire shared text error %d (%s), expected: %d",
2075 saved_errno, strerror(saved_errno), EPERM);
2076 } else if (after != before) {
2077 T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", (void *)addr, before, after);
2078 } else {
2079 T_PASS("wire shared text");
2080 }
2081
2082 addr = (uint32_t *) &fprintf;
2083 before = *addr;
2084 retval = mlock(addr, 4096);
2085 after = *addr;
2086 if (retval != 0) {
2087 saved_errno = errno;
2088 T_ASSERT_EQ(saved_errno, EPERM, "wire shared text error %d (%s), expected: %d",
2089 saved_errno, strerror(saved_errno), EPERM);
2090 } else if (after != before) {
2091 T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", (void *)addr, before, after);
2092 } else {
2093 T_PASS("wire shared text");
2094 }
2095
2096 addr = (uint32_t *) &testmain_wire_text;
2097 before = *addr;
2098 retval = mlock(addr, 4096);
2099 after = *addr;
2100 if (retval != 0) {
2101 saved_errno = errno;
2102 T_ASSERT_EQ(saved_errno, EPERM, "wire text error return error %d (%s)",
2103 saved_errno, strerror(saved_errno));
2104 } else if (after != before) {
2105 T_ASSERT_FAIL("text changed by wiring at %p 0x%x -> 0x%x", (void *)addr, before, after);
2106 } else {
2107 T_PASS("wire text");
2108 }
2109 }
2110
2111 T_DECL(remap_comm_page, "test remapping of the commpage - rdar://93177124",
2112 T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
2113 {
2114 kern_return_t kr;
2115 mach_vm_address_t commpage_addr, remap_addr;
2116 mach_vm_size_t vmsize;
2117 vm_prot_t curprot, maxprot;
2118
2119 #if __arm__
2120 commpage_addr = 0xFFFF4000ULL;
2121 #elif __arm64__
2122 commpage_addr = 0x0000000FFFFFC000ULL;
2123 #elif __x86_64__
2124 commpage_addr = 0x00007FFFFFE00000ULL;
2125 #else
2126 T_FAIL("unknown commpage address for this architecture");
2127 #endif
2128
2129 T_LOG("Remapping commpage from 0x%llx", commpage_addr);
2130 vmsize = vm_kernel_page_size;
2131 remap_addr = 0;
2132 kr = mach_vm_remap(mach_task_self(),
2133 &remap_addr,
2134 vmsize,
2135 0, /* mask */
2136 VM_FLAGS_ANYWHERE,
2137 mach_task_self(),
2138 commpage_addr,
2139 TRUE, /* copy */
2140 &curprot,
2141 &maxprot,
2142 VM_INHERIT_DEFAULT);
2143 if (kr == KERN_INVALID_ADDRESS) {
2144 T_SKIP("No mapping found at 0x%llx\n", commpage_addr);
2145 return;
2146 }
2147 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() of commpage from 0x%llx", commpage_addr);
2148 }
2149
2150 /* rdar://132439059 */
2151 T_DECL(mach_vm_remap_new_task_read_port,
2152 "Ensure shared, writable mappings cannot be created with a process's task read port using mach_vm_remap_new",
2153 T_META_TAG_VM_PREFERRED,
2154 T_META_RUN_CONCURRENTLY(true))
2155 {
2156 mach_vm_address_t private_data = 0;
2157 pid_t pid = -1;
2158 int fds[2];
2159 uint32_t depth = 9999;
2160 mach_vm_size_t size = PAGE_SIZE;
2161 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
2162 vm_region_submap_info_data_64_t info;
2163 kern_return_t kr = KERN_FAILURE;
2164 int ret = -1;
2165
2166 kr = mach_vm_allocate(mach_task_self(), &private_data, size, VM_FLAGS_ANYWHERE);
2167 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_allocate");
2168
2169 ret = pipe(fds);
2170 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "pipe");
2171
2172 pid = fork();
2173 T_QUIET; T_ASSERT_POSIX_SUCCESS(pid, "fork");
2174
2175 if (pid == 0) {
2176 char data[2];
2177 ssize_t nbytes_read = -1;
2178
2179 /* Close write end of the pipe */
2180 ret = close(fds[1]);
2181 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "child: close write end");
2182
2183 /* Check that the permissions are VM_PROT_DEFAULT/VM_PROT_ALL */
2184 kr = mach_vm_region_recurse(mach_task_self(),
2185 &private_data,
2186 &size,
2187 &depth,
2188 (vm_region_recurse_info_t)&info,
2189 &count);
2190 T_ASSERT_MACH_SUCCESS(kr, "child: mach_vm_region_recurse");
2191 T_EXPECT_EQ_INT(info.protection, VM_PROT_DEFAULT, "child: current protection is VM_PROT_DEFAULT");
2192 T_EXPECT_EQ_INT(info.max_protection, VM_PROT_ALL, "child: maximum protextion is VM_PROT_ALL");
2193
2194 /* The child tries to read data from the pipe (that never comes) */
2195 nbytes_read = read(fds[0], data, 2);
2196 T_QUIET; T_EXPECT_EQ_LONG(nbytes_read, 0L, "child: read 0 bytes");
2197
2198 exit(0);
2199 } else {
2200 mach_port_t read_port = MACH_PORT_NULL;
2201 mach_vm_address_t remap_addr = 0;
2202 int status;
2203
2204 /* Close read end of the pipe */
2205 ret = close(fds[0]);
2206 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "close read end");
2207
2208 /* Get a read port */
2209 ret = task_read_for_pid(mach_task_self(), pid, &read_port);
2210 T_ASSERT_POSIX_SUCCESS(ret, "parent: task_read_for_pid");
2211
2212 /* Make a shared mapping with the child's data */
2213 vm_prot_t cur_prot = VM_PROT_NONE;
2214 vm_prot_t max_prot = VM_PROT_NONE;
2215 kr = mach_vm_remap_new(
2216 mach_task_self(),
2217 &remap_addr,
2218 size,
2219 0, /* mask */
2220 VM_FLAGS_ANYWHERE,
2221 read_port,
2222 private_data,
2223 FALSE, /* copy */
2224 &cur_prot,
2225 &max_prot,
2226 VM_INHERIT_DEFAULT);
2227 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "parent: mach_vm_remap_new");
2228
2229 /* Check that permissions of the remapped region are VM_PROT_NONE */
2230 kr = mach_vm_region_recurse(mach_task_self(),
2231 &remap_addr,
2232 &size,
2233 &depth,
2234 (vm_region_recurse_info_t)&info,
2235 &count);
2236 T_ASSERT_MACH_SUCCESS(kr, "parent: mach_vm_region_recurse");
2237 T_EXPECT_EQ_INT(info.protection, VM_PROT_NONE, "parent: current protection is VM_PROT_NONE");
2238 T_EXPECT_EQ_INT(info.max_protection, VM_PROT_NONE, "parent: maximum protextion is VM_PROT_NONE");
2239
2240 /* Tell the child it is done and can exit. */
2241 ret = close(fds[1]);
2242 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "parent: close write end");
2243
2244 /* Clean up the child */
2245 ret = waitpid(pid, &status, 0);
2246 T_EXPECT_EQ_INT(ret, pid, "waitpid: child was stopped or terminated");
2247 }
2248 }
2249