xref: /xnu-8792.61.2/tests/vm_test_mach_map.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /* Mach vm map miscellaneous unit tests
2  *
3  * This test program serves to be a regression test suite for legacy
4  * vm issues, ideally each test will be linked to a radar number and
5  * perform a set of certain validations.
6  *
7  */
8 #include <darwintest.h>
9 
10 #include <dlfcn.h>
11 #include <errno.h>
12 #include <ptrauth.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <time.h>
17 
18 #include <sys/mman.h>
19 
20 #include <mach/mach_error.h>
21 #include <mach/mach_init.h>
22 #include <mach/mach_port.h>
23 #include <mach/mach_vm.h>
24 #include <mach/vm_map.h>
25 #include <mach/task.h>
26 #include <mach/task_info.h>
27 #include <mach/shared_region.h>
28 #include <machine/cpu_capabilities.h>
29 
30 T_GLOBAL_META(
31 	T_META_NAMESPACE("xnu.vm"),
32 	T_META_RADAR_COMPONENT_NAME("xnu"),
33 	T_META_RADAR_COMPONENT_VERSION("VM"),
34 	T_META_RUN_CONCURRENTLY(true));
35 
36 static void
test_memory_entry_tagging(int override_tag)37 test_memory_entry_tagging(int override_tag)
38 {
39 	int                     pass;
40 	int                     do_copy;
41 	kern_return_t           kr;
42 	mach_vm_address_t       vmaddr_orig, vmaddr_shared, vmaddr_copied;
43 	mach_vm_size_t          vmsize_orig, vmsize_shared, vmsize_copied;
44 	mach_vm_address_t       *vmaddr_ptr;
45 	mach_vm_size_t          *vmsize_ptr;
46 	mach_vm_address_t       vmaddr_chunk;
47 	mach_vm_size_t          vmsize_chunk;
48 	mach_vm_offset_t        vmoff;
49 	mach_port_t             mem_entry_copied, mem_entry_shared;
50 	mach_port_t             *mem_entry_ptr;
51 	int                     i;
52 	vm_region_submap_short_info_data_64_t ri;
53 	mach_msg_type_number_t  ri_count;
54 	unsigned int            depth;
55 	int                     vm_flags;
56 	int                     expected_tag;
57 
58 	vmaddr_copied = 0;
59 	vmaddr_shared = 0;
60 	vmsize_copied = 0;
61 	vmsize_shared = 0;
62 	vmaddr_chunk = 0;
63 	vmsize_chunk = 16 * 1024;
64 	vmaddr_orig = 0;
65 	vmsize_orig = 3 * vmsize_chunk;
66 	mem_entry_copied = MACH_PORT_NULL;
67 	mem_entry_shared = MACH_PORT_NULL;
68 	pass = 0;
69 
70 	vmaddr_orig = 0;
71 	kr = mach_vm_allocate(mach_task_self(),
72 	    &vmaddr_orig,
73 	    vmsize_orig,
74 	    VM_FLAGS_ANYWHERE);
75 	T_QUIET;
76 	T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
77 	    override_tag, vmsize_orig);
78 	if (T_RESULT == T_RESULT_FAIL) {
79 		goto done;
80 	}
81 
82 	for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
83 		vmaddr_chunk = vmaddr_orig + (i * vmsize_chunk);
84 		kr = mach_vm_allocate(mach_task_self(),
85 		    &vmaddr_chunk,
86 		    vmsize_chunk,
87 		    (VM_FLAGS_FIXED |
88 		    VM_FLAGS_OVERWRITE |
89 		    VM_MAKE_TAG(100 + i)));
90 		T_QUIET;
91 		T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
92 		    override_tag, vmsize_chunk);
93 		if (T_RESULT == T_RESULT_FAIL) {
94 			goto done;
95 		}
96 	}
97 
98 	for (vmoff = 0;
99 	    vmoff < vmsize_orig;
100 	    vmoff += PAGE_SIZE) {
101 		*((unsigned char *)(uintptr_t)(vmaddr_orig + vmoff)) = 'x';
102 	}
103 
104 	do_copy = time(NULL) & 1;
105 again:
106 	*((unsigned char *)(uintptr_t)vmaddr_orig) = 'x';
107 	if (do_copy) {
108 		mem_entry_ptr = &mem_entry_copied;
109 		vmsize_copied = vmsize_orig;
110 		vmsize_ptr = &vmsize_copied;
111 		vmaddr_copied = 0;
112 		vmaddr_ptr = &vmaddr_copied;
113 		vm_flags = MAP_MEM_VM_COPY;
114 	} else {
115 		mem_entry_ptr = &mem_entry_shared;
116 		vmsize_shared = vmsize_orig;
117 		vmsize_ptr = &vmsize_shared;
118 		vmaddr_shared = 0;
119 		vmaddr_ptr = &vmaddr_shared;
120 		vm_flags = MAP_MEM_VM_SHARE;
121 	}
122 	kr = mach_make_memory_entry_64(mach_task_self(),
123 	    vmsize_ptr,
124 	    vmaddr_orig,                            /* offset */
125 	    (vm_flags |
126 	    VM_PROT_READ | VM_PROT_WRITE),
127 	    mem_entry_ptr,
128 	    MACH_PORT_NULL);
129 	T_QUIET;
130 	T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_make_memory_entry()",
131 	    override_tag, do_copy);
132 	if (T_RESULT == T_RESULT_FAIL) {
133 		goto done;
134 	}
135 	T_QUIET;
136 	T_EXPECT_EQ(*vmsize_ptr, vmsize_orig, "[override_tag:%d][do_copy:%d] vmsize (0x%llx) != vmsize_orig (0x%llx)",
137 	    override_tag, do_copy, (uint64_t) *vmsize_ptr, (uint64_t) vmsize_orig);
138 	if (T_RESULT == T_RESULT_FAIL) {
139 		goto done;
140 	}
141 	T_QUIET;
142 	T_EXPECT_NOTNULL(*mem_entry_ptr, "[override_tag:%d][do_copy:%d] mem_entry == 0x%x",
143 	    override_tag, do_copy, *mem_entry_ptr);
144 	if (T_RESULT == T_RESULT_FAIL) {
145 		goto done;
146 	}
147 
148 	*vmaddr_ptr = 0;
149 	if (override_tag) {
150 		vm_flags = VM_MAKE_TAG(200);
151 	} else {
152 		vm_flags = 0;
153 	}
154 	kr = mach_vm_map(mach_task_self(),
155 	    vmaddr_ptr,
156 	    vmsize_orig,
157 	    0,              /* mask */
158 	    vm_flags | VM_FLAGS_ANYWHERE,
159 	    *mem_entry_ptr,
160 	    0,              /* offset */
161 	    FALSE,              /* copy */
162 	    VM_PROT_READ | VM_PROT_WRITE,
163 	    VM_PROT_READ | VM_PROT_WRITE,
164 	    VM_INHERIT_DEFAULT);
165 	T_QUIET;
166 	T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_map()",
167 	    override_tag, do_copy);
168 	if (T_RESULT == T_RESULT_FAIL) {
169 		goto done;
170 	}
171 
172 	*((unsigned char *)(uintptr_t)vmaddr_orig) = 'X';
173 	if (*(unsigned char *)(uintptr_t)*vmaddr_ptr == 'X') {
174 		T_QUIET;
175 		T_EXPECT_EQ(do_copy, 0, "[override_tag:%d][do_copy:%d] memory shared instead of copied",
176 		    override_tag, do_copy);
177 		if (T_RESULT == T_RESULT_FAIL) {
178 			goto done;
179 		}
180 	} else {
181 		T_QUIET;
182 		T_EXPECT_NE(do_copy, 0, "[override_tag:%d][do_copy:%d] memory copied instead of shared",
183 		    override_tag, do_copy);
184 		if (T_RESULT == T_RESULT_FAIL) {
185 			goto done;
186 		}
187 	}
188 
189 	for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
190 		mach_vm_address_t       vmaddr_info;
191 		mach_vm_size_t          vmsize_info;
192 
193 		vmaddr_info = *vmaddr_ptr + (i * vmsize_chunk);
194 		vmsize_info = 0;
195 		depth = 1;
196 		ri_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
197 		kr = mach_vm_region_recurse(mach_task_self(),
198 		    &vmaddr_info,
199 		    &vmsize_info,
200 		    &depth,
201 		    (vm_region_recurse_info_t) &ri,
202 		    &ri_count);
203 		T_QUIET;
204 		T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx)",
205 		    override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk);
206 		if (T_RESULT == T_RESULT_FAIL) {
207 			goto done;
208 		}
209 		T_QUIET;
210 		T_EXPECT_EQ(vmaddr_info, *vmaddr_ptr + (i * vmsize_chunk), "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned addr 0x%llx",
211 		    override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmaddr_info);
212 		if (T_RESULT == T_RESULT_FAIL) {
213 			goto done;
214 		}
215 		T_QUIET;
216 		T_EXPECT_EQ(vmsize_info, vmsize_chunk, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned size 0x%llx expected 0x%llx",
217 		    override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmsize_info, vmsize_chunk);
218 		if (T_RESULT == T_RESULT_FAIL) {
219 			goto done;
220 		}
221 		if (override_tag) {
222 			expected_tag = 200;
223 		} else {
224 			expected_tag = 100 + i;
225 		}
226 		T_QUIET;
227 		T_EXPECT_EQ(ri.user_tag, expected_tag, "[override_tag:%d][do_copy:%d] i=%d tag=%d expected %d",
228 		    override_tag, do_copy, i, ri.user_tag, expected_tag);
229 		if (T_RESULT == T_RESULT_FAIL) {
230 			goto done;
231 		}
232 	}
233 
234 	if (++pass < 2) {
235 		do_copy = !do_copy;
236 		goto again;
237 	}
238 
239 done:
240 	if (vmaddr_orig != 0) {
241 		mach_vm_deallocate(mach_task_self(),
242 		    vmaddr_orig,
243 		    vmsize_orig);
244 		vmaddr_orig = 0;
245 		vmsize_orig = 0;
246 	}
247 	if (vmaddr_copied != 0) {
248 		mach_vm_deallocate(mach_task_self(),
249 		    vmaddr_copied,
250 		    vmsize_copied);
251 		vmaddr_copied = 0;
252 		vmsize_copied = 0;
253 	}
254 	if (vmaddr_shared != 0) {
255 		mach_vm_deallocate(mach_task_self(),
256 		    vmaddr_shared,
257 		    vmsize_shared);
258 		vmaddr_shared = 0;
259 		vmsize_shared = 0;
260 	}
261 	if (mem_entry_copied != MACH_PORT_NULL) {
262 		mach_port_deallocate(mach_task_self(), mem_entry_copied);
263 		mem_entry_copied = MACH_PORT_NULL;
264 	}
265 	if (mem_entry_shared != MACH_PORT_NULL) {
266 		mach_port_deallocate(mach_task_self(), mem_entry_shared);
267 		mem_entry_shared = MACH_PORT_NULL;
268 	}
269 
270 	return;
271 }
272 
273 static void
test_map_memory_entry(void)274 test_map_memory_entry(void)
275 {
276 	kern_return_t           kr;
277 	mach_vm_address_t       vmaddr1, vmaddr2;
278 	mach_vm_size_t          vmsize1, vmsize2;
279 	mach_port_t             mem_entry;
280 	unsigned char           *cp1, *cp2;
281 
282 	vmaddr1 = 0;
283 	vmsize1 = 0;
284 	vmaddr2 = 0;
285 	vmsize2 = 0;
286 	mem_entry = MACH_PORT_NULL;
287 
288 	vmsize1 = 1;
289 	vmaddr1 = 0;
290 	kr = mach_vm_allocate(mach_task_self(),
291 	    &vmaddr1,
292 	    vmsize1,
293 	    VM_FLAGS_ANYWHERE);
294 	T_QUIET;
295 	T_EXPECT_MACH_SUCCESS(kr, "vm_allocate(%lld)", vmsize1);
296 	if (T_RESULT == T_RESULT_FAIL) {
297 		goto done;
298 	}
299 
300 	cp1 = (unsigned char *)(uintptr_t)vmaddr1;
301 	*cp1 = '1';
302 
303 	vmsize2 = 1;
304 	mem_entry = MACH_PORT_NULL;
305 	kr = mach_make_memory_entry_64(mach_task_self(),
306 	    &vmsize2,
307 	    vmaddr1,                            /* offset */
308 	    (MAP_MEM_VM_COPY |
309 	    VM_PROT_READ | VM_PROT_WRITE),
310 	    &mem_entry,
311 	    MACH_PORT_NULL);
312 	T_QUIET;
313 	T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry()");
314 	if (T_RESULT == T_RESULT_FAIL) {
315 		goto done;
316 	}
317 	T_QUIET;
318 	T_EXPECT_GE(vmsize2, vmsize1, "vmsize2 (0x%llx) < vmsize1 (0x%llx)",
319 	    (uint64_t) vmsize2, (uint64_t) vmsize1);
320 	if (T_RESULT == T_RESULT_FAIL) {
321 		goto done;
322 	}
323 	T_QUIET;
324 	T_EXPECT_NOTNULL(mem_entry, "mem_entry == 0x%x", mem_entry);
325 	if (T_RESULT == T_RESULT_FAIL) {
326 		goto done;
327 	}
328 
329 	vmaddr2 = 0;
330 	kr = mach_vm_map(mach_task_self(),
331 	    &vmaddr2,
332 	    vmsize2,
333 	    0,              /* mask */
334 	    VM_FLAGS_ANYWHERE,
335 	    mem_entry,
336 	    0,              /* offset */
337 	    TRUE,              /* copy */
338 	    VM_PROT_READ | VM_PROT_WRITE,
339 	    VM_PROT_READ | VM_PROT_WRITE,
340 	    VM_INHERIT_DEFAULT);
341 	T_QUIET;
342 	T_EXPECT_MACH_SUCCESS(kr, "mach_vm_map()");
343 	if (T_RESULT == T_RESULT_FAIL) {
344 		goto done;
345 	}
346 
347 	cp2 = (unsigned char *)(uintptr_t)vmaddr2;
348 	T_QUIET;
349 	T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '1')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
350 	    *cp1, *cp2, '1', '1');
351 	if (T_RESULT == T_RESULT_FAIL) {
352 		goto done;
353 	}
354 
355 	*cp2 = '2';
356 	T_QUIET;
357 	T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '2')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
358 	    *cp1, *cp2, '1', '2');
359 	if (T_RESULT == T_RESULT_FAIL) {
360 		goto done;
361 	}
362 
363 done:
364 	if (vmaddr1 != 0) {
365 		mach_vm_deallocate(mach_task_self(), vmaddr1, vmsize1);
366 		vmaddr1 = 0;
367 		vmsize1 = 0;
368 	}
369 	if (vmaddr2 != 0) {
370 		mach_vm_deallocate(mach_task_self(), vmaddr2, vmsize2);
371 		vmaddr2 = 0;
372 		vmsize2 = 0;
373 	}
374 	if (mem_entry != MACH_PORT_NULL) {
375 		mach_port_deallocate(mach_task_self(), mem_entry);
376 		mem_entry = MACH_PORT_NULL;
377 	}
378 
379 	return;
380 }
381 
382 T_DECL(memory_entry_tagging, "test mem entry tag for rdar://problem/23334087 \
383     VM memory tags should be propagated through memory entries",
384     T_META_ALL_VALID_ARCHS(true))
385 {
386 	test_memory_entry_tagging(0);
387 	test_memory_entry_tagging(1);
388 }
389 
390 T_DECL(map_memory_entry, "test mapping mem entry for rdar://problem/22611816 \
391     mach_make_memory_entry(MAP_MEM_VM_COPY) should never use a KERNEL_BUFFER \
392     copy", T_META_ALL_VALID_ARCHS(true))
393 {
394 	test_map_memory_entry();
395 }
396 
397 static char *vm_purgable_state[4] = { "NONVOLATILE", "VOLATILE", "EMPTY", "DENY" };
398 
399 static uint64_t
task_footprint(void)400 task_footprint(void)
401 {
402 	task_vm_info_data_t ti;
403 	kern_return_t kr;
404 	mach_msg_type_number_t count;
405 
406 	count = TASK_VM_INFO_COUNT;
407 	kr = task_info(mach_task_self(),
408 	    TASK_VM_INFO,
409 	    (task_info_t) &ti,
410 	    &count);
411 	T_QUIET;
412 	T_ASSERT_MACH_SUCCESS(kr, "task_info()");
413 #if defined(__arm64__)
414 	T_QUIET;
415 	T_ASSERT_EQ(count, TASK_VM_INFO_COUNT, "task_info() count = %d (expected %d)",
416 	    count, TASK_VM_INFO_COUNT);
417 #endif /* defined(__arm64__) */
418 	return ti.phys_footprint;
419 }
420 
421 T_DECL(purgeable_empty_to_volatile, "test task physical footprint when \
422     emptying, volatilizing purgeable vm")
423 {
424 	kern_return_t kr;
425 	mach_vm_address_t vm_addr;
426 	mach_vm_size_t vm_size;
427 	char *cp;
428 	int ret;
429 	vm_purgable_t state;
430 	uint64_t footprint[8];
431 
432 	vm_addr = 0;
433 	vm_size = 1 * 1024 * 1024;
434 	T_LOG("--> allocate %llu bytes", vm_size);
435 	kr = mach_vm_allocate(mach_task_self(),
436 	    &vm_addr,
437 	    vm_size,
438 	    VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
439 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
440 
441 	/* footprint0 */
442 	footprint[0] = task_footprint();
443 	T_LOG("    footprint[0] = %llu", footprint[0]);
444 
445 	T_LOG("--> access %llu bytes", vm_size);
446 	for (cp = (char *) vm_addr;
447 	    cp < (char *) (vm_addr + vm_size);
448 	    cp += vm_kernel_page_size) {
449 		*cp = 'x';
450 	}
451 	/* footprint1 == footprint0 + vm_size */
452 	footprint[1] = task_footprint();
453 	T_LOG("    footprint[1] = %llu", footprint[1]);
454 	if (footprint[1] != footprint[0] + vm_size) {
455 		T_LOG("WARN: footprint[1] != footprint[0] + vm_size");
456 	}
457 
458 	T_LOG("--> wire %llu bytes", vm_size / 2);
459 	ret = mlock((char *)vm_addr, (size_t) (vm_size / 2));
460 	T_ASSERT_POSIX_SUCCESS(ret, "mlock()");
461 
462 	/* footprint2 == footprint1 */
463 	footprint[2] = task_footprint();
464 	T_LOG("    footprint[2] = %llu", footprint[2]);
465 	if (footprint[2] != footprint[1]) {
466 		T_LOG("WARN: footprint[2] != footprint[1]");
467 	}
468 
469 	T_LOG("--> VOLATILE");
470 	state = VM_PURGABLE_VOLATILE;
471 	kr = mach_vm_purgable_control(mach_task_self(),
472 	    vm_addr,
473 	    VM_PURGABLE_SET_STATE,
474 	    &state);
475 	T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
476 	T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, "NONVOLATILE->VOLATILE: state was %s",
477 	    vm_purgable_state[state]);
478 	/* footprint3 == footprint2 - (vm_size / 2) */
479 	footprint[3] = task_footprint();
480 	T_LOG("    footprint[3] = %llu", footprint[3]);
481 	if (footprint[3] != footprint[2] - (vm_size / 2)) {
482 		T_LOG("WARN: footprint[3] != footprint[2] - (vm_size / 2)");
483 	}
484 
485 	T_LOG("--> EMPTY");
486 	state = VM_PURGABLE_EMPTY;
487 	kr = mach_vm_purgable_control(mach_task_self(),
488 	    vm_addr,
489 	    VM_PURGABLE_SET_STATE,
490 	    &state);
491 	T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(EMPTY)");
492 	if (state != VM_PURGABLE_VOLATILE &&
493 	    state != VM_PURGABLE_EMPTY) {
494 		T_ASSERT_FAIL("VOLATILE->EMPTY: state was %s",
495 		    vm_purgable_state[state]);
496 	}
497 	/* footprint4 == footprint3 */
498 	footprint[4] = task_footprint();
499 	T_LOG("    footprint[4] = %llu", footprint[4]);
500 	if (footprint[4] != footprint[3]) {
501 		T_LOG("WARN: footprint[4] != footprint[3]");
502 	}
503 
504 	T_LOG("--> unwire %llu bytes", vm_size / 2);
505 	ret = munlock((char *)vm_addr, (size_t) (vm_size / 2));
506 	T_ASSERT_POSIX_SUCCESS(ret, "munlock()");
507 
508 	/* footprint5 == footprint4 - (vm_size/2) (unless memory pressure) */
509 	/* footprint5 == footprint0 */
510 	footprint[5] = task_footprint();
511 	T_LOG("    footprint[5] = %llu", footprint[5]);
512 	if (footprint[5] != footprint[4] - (vm_size / 2)) {
513 		T_LOG("WARN: footprint[5] != footprint[4] - (vm_size/2)");
514 	}
515 	if (footprint[5] != footprint[0]) {
516 		T_LOG("WARN: footprint[5] != footprint[0]");
517 	}
518 
519 	T_LOG("--> VOLATILE");
520 	state = VM_PURGABLE_VOLATILE;
521 	kr = mach_vm_purgable_control(mach_task_self(),
522 	    vm_addr,
523 	    VM_PURGABLE_SET_STATE,
524 	    &state);
525 	T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
526 	T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->VOLATILE: state == %s",
527 	    vm_purgable_state[state]);
528 	/* footprint6 == footprint5 */
529 	/* footprint6 == footprint0 */
530 	footprint[6] = task_footprint();
531 	T_LOG("    footprint[6] = %llu", footprint[6]);
532 	if (footprint[6] != footprint[5]) {
533 		T_LOG("WARN: footprint[6] != footprint[5]");
534 	}
535 	if (footprint[6] != footprint[0]) {
536 		T_LOG("WARN: footprint[6] != footprint[0]");
537 	}
538 
539 	T_LOG("--> NONVOLATILE");
540 	state = VM_PURGABLE_NONVOLATILE;
541 	kr = mach_vm_purgable_control(mach_task_self(),
542 	    vm_addr,
543 	    VM_PURGABLE_SET_STATE,
544 	    &state);
545 	T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(NONVOLATILE)");
546 	T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->NONVOLATILE: state == %s",
547 	    vm_purgable_state[state]);
548 	/* footprint7 == footprint6 */
549 	/* footprint7 == footprint0 */
550 	footprint[7] = task_footprint();
551 	T_LOG("    footprint[7] = %llu", footprint[7]);
552 	if (footprint[7] != footprint[6]) {
553 		T_LOG("WARN: footprint[7] != footprint[6]");
554 	}
555 	if (footprint[7] != footprint[0]) {
556 		T_LOG("WARN: footprint[7] != footprint[0]");
557 	}
558 }
559 
560 T_DECL(madvise_shared, "test madvise shared for rdar://problem/2295713 logging \
561     rethink needs madvise(MADV_FREE_HARDER)",
562     T_META_RUN_CONCURRENTLY(false),
563     T_META_ALL_VALID_ARCHS(true))
564 {
565 	vm_address_t            vmaddr = 0, vmaddr2 = 0;
566 	vm_size_t               vmsize;
567 	kern_return_t           kr;
568 	char                    *cp;
569 	vm_prot_t               curprot, maxprot;
570 	int                     ret;
571 	task_vm_info_data_t     ti;
572 	mach_msg_type_number_t  ti_count;
573 
574 	vmsize = 10 * 1024 * 1024; /* 10MB */
575 	kr = vm_allocate(mach_task_self(),
576 	    &vmaddr,
577 	    vmsize,
578 	    VM_FLAGS_ANYWHERE);
579 	T_QUIET;
580 	T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
581 	if (T_RESULT == T_RESULT_FAIL) {
582 		goto done;
583 	}
584 
585 	for (cp = (char *)(uintptr_t)vmaddr;
586 	    cp < (char *)(uintptr_t)(vmaddr + vmsize);
587 	    cp++) {
588 		*cp = 'x';
589 	}
590 
591 	kr = vm_remap(mach_task_self(),
592 	    &vmaddr2,
593 	    vmsize,
594 	    0,           /* mask */
595 	    VM_FLAGS_ANYWHERE,
596 	    mach_task_self(),
597 	    vmaddr,
598 	    FALSE,           /* copy */
599 	    &curprot,
600 	    &maxprot,
601 	    VM_INHERIT_DEFAULT);
602 	T_QUIET;
603 	T_EXPECT_MACH_SUCCESS(kr, "vm_remap()");
604 	if (T_RESULT == T_RESULT_FAIL) {
605 		goto done;
606 	}
607 
608 	for (cp = (char *)(uintptr_t)vmaddr2;
609 	    cp < (char *)(uintptr_t)(vmaddr2 + vmsize);
610 	    cp++) {
611 		T_QUIET;
612 		T_EXPECT_EQ(*cp, 'x', "vmaddr=%p vmaddr2=%p %p:0x%x",
613 		    (void *)(uintptr_t)vmaddr,
614 		    (void *)(uintptr_t)vmaddr2,
615 		    (void *)cp,
616 		    (unsigned char)*cp);
617 		if (T_RESULT == T_RESULT_FAIL) {
618 			goto done;
619 		}
620 	}
621 	cp = (char *)(uintptr_t)vmaddr;
622 	*cp = 'X';
623 	cp = (char *)(uintptr_t)vmaddr2;
624 	T_QUIET;
625 	T_EXPECT_EQ(*cp, 'X', "memory was not properly shared");
626 	if (T_RESULT == T_RESULT_FAIL) {
627 		goto done;
628 	}
629 
630 #if defined(__x86_64__) || defined(__i386__)
631 	if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
632 		T_LOG("Skipping madvise reusable tests because we're running under translation.");
633 		goto done;
634 	}
635 #endif /* defined(__x86_64__) || defined(__i386__) */
636 	ret = madvise((char *)(uintptr_t)vmaddr,
637 	    vmsize,
638 	    MADV_FREE_REUSABLE);
639 	T_QUIET;
640 	T_EXPECT_POSIX_SUCCESS(ret, "madvise()");
641 	if (T_RESULT == T_RESULT_FAIL) {
642 		goto done;
643 	}
644 
645 	ti_count = TASK_VM_INFO_COUNT;
646 	kr = task_info(mach_task_self(),
647 	    TASK_VM_INFO,
648 	    (task_info_t) &ti,
649 	    &ti_count);
650 	T_QUIET;
651 	T_EXPECT_MACH_SUCCESS(kr, "task_info()");
652 	if (T_RESULT == T_RESULT_FAIL) {
653 		goto done;
654 	}
655 
656 	T_QUIET;
657 	T_EXPECT_EQ(ti.reusable, 2ULL * vmsize, "ti.reusable=%lld expected %lld",
658 	    ti.reusable, (uint64_t)(2 * vmsize));
659 	if (T_RESULT == T_RESULT_FAIL) {
660 		goto done;
661 	}
662 
663 done:
664 	if (vmaddr != 0) {
665 		vm_deallocate(mach_task_self(), vmaddr, vmsize);
666 		vmaddr = 0;
667 	}
668 	if (vmaddr2 != 0) {
669 		vm_deallocate(mach_task_self(), vmaddr2, vmsize);
670 		vmaddr2 = 0;
671 	}
672 }
673 
674 T_DECL(madvise_purgeable_can_reuse, "test madvise purgeable can reuse for \
675     rdar://problem/37476183 Preview Footprint memory regressions ~100MB \
676     [ purgeable_malloc became eligible for reuse ]",
677     T_META_ALL_VALID_ARCHS(true))
678 {
679 #if defined(__x86_64__) || defined(__i386__)
680 	if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
681 		T_SKIP("madvise reusable is not supported under Rosetta translation. Skipping.)");
682 	}
683 #endif /* defined(__x86_64__) || defined(__i386__) */
684 	vm_address_t            vmaddr = 0;
685 	vm_size_t               vmsize;
686 	kern_return_t           kr;
687 	char                    *cp;
688 	int                     ret;
689 
690 	vmsize = 10 * 1024 * 1024; /* 10MB */
691 	kr = vm_allocate(mach_task_self(),
692 	    &vmaddr,
693 	    vmsize,
694 	    (VM_FLAGS_ANYWHERE |
695 	    VM_FLAGS_PURGABLE |
696 	    VM_MAKE_TAG(VM_MEMORY_MALLOC)));
697 	T_QUIET;
698 	T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
699 	if (T_RESULT == T_RESULT_FAIL) {
700 		goto done;
701 	}
702 
703 	for (cp = (char *)(uintptr_t)vmaddr;
704 	    cp < (char *)(uintptr_t)(vmaddr + vmsize);
705 	    cp++) {
706 		*cp = 'x';
707 	}
708 
709 	ret = madvise((char *)(uintptr_t)vmaddr,
710 	    vmsize,
711 	    MADV_CAN_REUSE);
712 	T_QUIET;
713 	T_EXPECT_TRUE(((ret == -1) && (errno == EINVAL)), "madvise(): purgeable vm can't be adviced to reuse");
714 	if (T_RESULT == T_RESULT_FAIL) {
715 		goto done;
716 	}
717 
718 done:
719 	if (vmaddr != 0) {
720 		vm_deallocate(mach_task_self(), vmaddr, vmsize);
721 		vmaddr = 0;
722 	}
723 }
724 
725 #define DEST_PATTERN 0xFEDCBA98
726 
727 T_DECL(map_read_overwrite, "test overwriting vm map from other map - \
728     rdar://31075370",
729     T_META_ALL_VALID_ARCHS(true))
730 {
731 	kern_return_t           kr;
732 	mach_vm_address_t       vmaddr1, vmaddr2;
733 	mach_vm_size_t          vmsize1, vmsize2;
734 	int                     *ip;
735 	int                     i;
736 
737 	vmaddr1 = 0;
738 	vmsize1 = 4 * 4096;
739 	kr = mach_vm_allocate(mach_task_self(),
740 	    &vmaddr1,
741 	    vmsize1,
742 	    VM_FLAGS_ANYWHERE);
743 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
744 
745 	ip = (int *)(uintptr_t)vmaddr1;
746 	for (i = 0; i < vmsize1 / sizeof(*ip); i++) {
747 		ip[i] = i;
748 	}
749 
750 	vmaddr2 = 0;
751 	kr = mach_vm_allocate(mach_task_self(),
752 	    &vmaddr2,
753 	    vmsize1,
754 	    VM_FLAGS_ANYWHERE);
755 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
756 
757 	ip = (int *)(uintptr_t)vmaddr2;
758 	for (i = 0; i < vmsize1 / sizeof(*ip); i++) {
759 		ip[i] = DEST_PATTERN;
760 	}
761 
762 	vmsize2 = vmsize1 - 2 * (sizeof(*ip));
763 	kr = mach_vm_read_overwrite(mach_task_self(),
764 	    vmaddr1 + sizeof(*ip),
765 	    vmsize2,
766 	    vmaddr2 + sizeof(*ip),
767 	    &vmsize2);
768 	T_ASSERT_MACH_SUCCESS(kr, "vm_read_overwrite()");
769 
770 	ip = (int *)(uintptr_t)vmaddr2;
771 	for (i = 0; i < 1; i++) {
772 		T_QUIET;
773 		T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
774 		    i, ip[i], DEST_PATTERN);
775 	}
776 	for (; i < (vmsize1 - 2) / sizeof(*ip); i++) {
777 		T_QUIET;
778 		T_ASSERT_EQ(ip[i], i, "vmaddr2[%d] = 0x%x instead of 0x%x",
779 		    i, ip[i], i);
780 	}
781 	for (; i < vmsize1 / sizeof(*ip); i++) {
782 		T_QUIET;
783 		T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
784 		    i, ip[i], DEST_PATTERN);
785 	}
786 }
787 
788 T_DECL(copy_none_use_pmap, "test copy-on-write remapping of COPY_NONE vm \
789     objects - rdar://35610377",
790     T_META_ALL_VALID_ARCHS(true))
791 {
792 	kern_return_t           kr;
793 	mach_vm_address_t       vmaddr1, vmaddr2, vmaddr3;
794 	mach_vm_size_t          vmsize;
795 	vm_prot_t               curprot, maxprot;
796 
797 	vmsize = 32 * 1024 * 1024;
798 
799 	vmaddr1 = 0;
800 	kr = mach_vm_allocate(mach_task_self(),
801 	    &vmaddr1,
802 	    vmsize,
803 	    VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
804 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
805 
806 	memset((void *)(uintptr_t)vmaddr1, 'x', vmsize);
807 
808 	vmaddr2 = 0;
809 	kr = mach_vm_remap(mach_task_self(),
810 	    &vmaddr2,
811 	    vmsize,
812 	    0,                /* mask */
813 	    VM_FLAGS_ANYWHERE,
814 	    mach_task_self(),
815 	    vmaddr1,
816 	    TRUE,                /* copy */
817 	    &curprot,
818 	    &maxprot,
819 	    VM_INHERIT_DEFAULT);
820 	T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #1");
821 
822 	vmaddr3 = 0;
823 	kr = mach_vm_remap(mach_task_self(),
824 	    &vmaddr3,
825 	    vmsize,
826 	    0,                /* mask */
827 	    VM_FLAGS_ANYWHERE,
828 	    mach_task_self(),
829 	    vmaddr2,
830 	    TRUE,                /* copy */
831 	    &curprot,
832 	    &maxprot,
833 	    VM_INHERIT_DEFAULT);
834 	T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #2");
835 }
836 
837 T_DECL(purgable_deny, "test purgeable memory is not allowed to be converted to \
838     non-purgeable - rdar://31990033",
839     T_META_ALL_VALID_ARCHS(true))
840 {
841 	kern_return_t   kr;
842 	vm_address_t    vmaddr;
843 	vm_purgable_t   state;
844 
845 	vmaddr = 0;
846 	kr = vm_allocate(mach_task_self(), &vmaddr, 1,
847 	    VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
848 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
849 
850 	state = VM_PURGABLE_DENY;
851 	kr = vm_purgable_control(mach_task_self(), vmaddr,
852 	    VM_PURGABLE_SET_STATE, &state);
853 	T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
854 	    "vm_purgable_control(VM_PURGABLE_DENY) -> 0x%x (%s)",
855 	    kr, mach_error_string(kr));
856 
857 	kr = vm_deallocate(mach_task_self(), vmaddr, 1);
858 	T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate()");
859 }
860 
861 #define VMSIZE 0x10000
862 
863 T_DECL(vm_remap_zero, "test vm map of zero size - rdar://33114981",
864     T_META_ALL_VALID_ARCHS(true))
865 {
866 	kern_return_t           kr;
867 	mach_vm_address_t       vmaddr1, vmaddr2;
868 	mach_vm_size_t          vmsize;
869 	vm_prot_t               curprot, maxprot;
870 
871 	vmaddr1 = 0;
872 	vmsize = VMSIZE;
873 	kr = mach_vm_allocate(mach_task_self(),
874 	    &vmaddr1,
875 	    vmsize,
876 	    VM_FLAGS_ANYWHERE);
877 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
878 
879 	vmaddr2 = 0;
880 	vmsize = 0;
881 	kr = mach_vm_remap(mach_task_self(),
882 	    &vmaddr2,
883 	    vmsize,
884 	    0,
885 	    VM_FLAGS_ANYWHERE,
886 	    mach_task_self(),
887 	    vmaddr1,
888 	    FALSE,
889 	    &curprot,
890 	    &maxprot,
891 	    VM_INHERIT_DEFAULT);
892 	T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
893 	    vmsize, kr, mach_error_string(kr));
894 
895 	vmaddr2 = 0;
896 	vmsize = (mach_vm_size_t)-2;
897 	kr = mach_vm_remap(mach_task_self(),
898 	    &vmaddr2,
899 	    vmsize,
900 	    0,
901 	    VM_FLAGS_ANYWHERE,
902 	    mach_task_self(),
903 	    vmaddr1,
904 	    FALSE,
905 	    &curprot,
906 	    &maxprot,
907 	    VM_INHERIT_DEFAULT);
908 	T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
909 	    vmsize, kr, mach_error_string(kr));
910 }
911 
912 extern int __shared_region_check_np(uint64_t *);
913 
914 T_DECL(nested_pmap_trigger, "nested pmap should only be triggered from kernel \
915     - rdar://problem/41481703",
916     T_META_ALL_VALID_ARCHS(true))
917 {
918 	int                     ret;
919 	kern_return_t           kr;
920 	mach_vm_address_t       sr_start;
921 	mach_vm_size_t          vmsize;
922 	mach_vm_address_t       vmaddr;
923 	mach_port_t             mem_entry;
924 
925 	ret = __shared_region_check_np(&sr_start);
926 	if (ret != 0) {
927 		int saved_errno;
928 		saved_errno = errno;
929 
930 		T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
931 		    saved_errno, strerror(saved_errno));
932 		T_END;
933 	}
934 
935 	vmsize = PAGE_SIZE;
936 	kr = mach_make_memory_entry_64(mach_task_self(),
937 	    &vmsize,
938 	    sr_start,
939 	    MAP_MEM_VM_SHARE | VM_PROT_READ,
940 	    &mem_entry,
941 	    MACH_PORT_NULL);
942 	T_ASSERT_MACH_SUCCESS(kr, "make_memory_entry(0x%llx)", sr_start);
943 
944 	vmaddr = 0;
945 	kr = mach_vm_map(mach_task_self(),
946 	    &vmaddr,
947 	    vmsize,
948 	    0,
949 	    VM_FLAGS_ANYWHERE,
950 	    mem_entry,
951 	    0,
952 	    FALSE,
953 	    VM_PROT_READ,
954 	    VM_PROT_READ,
955 	    VM_INHERIT_DEFAULT);
956 	T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
957 }
958 
959 static const char *prot_str[] = { "---", "r--", "-w-", "rw-", "--x", "r-x", "-wx", "rwx" };
960 static const char *share_mode_str[] = { "---", "COW", "PRIVATE", "EMPTY", "SHARED", "TRUESHARED", "PRIVATE_ALIASED", "SHARED_ALIASED", "LARGE_PAGE" };
961 
962 T_DECL(shared_region_share_writable, "sharing a writable mapping of the shared region shoudl not give write access to shared region - rdar://problem/74469953",
963     T_META_ALL_VALID_ARCHS(true))
964 {
965 	int ret;
966 	uint64_t sr_start;
967 	kern_return_t kr;
968 	mach_vm_address_t address, tmp_address, remap_address;
969 	mach_vm_size_t size, tmp_size, remap_size;
970 	uint32_t depth;
971 	mach_msg_type_number_t count;
972 	vm_region_submap_info_data_64_t info;
973 	vm_prot_t cur_prot, max_prot;
974 	uint32_t before, after, remap;
975 	mach_port_t mem_entry;
976 
977 	ret = __shared_region_check_np(&sr_start);
978 	if (ret != 0) {
979 		int saved_errno;
980 		saved_errno = errno;
981 
982 		T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
983 		    saved_errno, strerror(saved_errno));
984 		T_END;
985 	}
986 	T_LOG("SHARED_REGION_BASE 0x%llx", SHARED_REGION_BASE);
987 	T_LOG("SHARED_REGION_SIZE 0x%llx", SHARED_REGION_SIZE);
988 	T_LOG("shared region starts at 0x%llx", sr_start);
989 	T_QUIET; T_ASSERT_GE(sr_start, SHARED_REGION_BASE,
990 	    "shared region starts below BASE");
991 	T_QUIET; T_ASSERT_LT(sr_start, SHARED_REGION_BASE + SHARED_REGION_SIZE,
992 	    "shared region starts above BASE+SIZE");
993 
994 	/*
995 	 * Step 1 - check that one can not get write access to a read-only
996 	 * mapping in the shared region.
997 	 */
998 	size = 0;
999 	for (address = SHARED_REGION_BASE;
1000 	    address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1001 	    address += size) {
1002 		size = 0;
1003 		depth = 99;
1004 		count = VM_REGION_SUBMAP_INFO_COUNT_64;
1005 		kr = mach_vm_region_recurse(mach_task_self(),
1006 		    &address,
1007 		    &size,
1008 		    &depth,
1009 		    (vm_region_recurse_info_t)&info,
1010 		    &count);
1011 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1012 		if (kr == KERN_INVALID_ADDRESS) {
1013 			T_SKIP("could not find read-only nested mapping");
1014 			T_END;
1015 		}
1016 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1017 		T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1018 		    address, address + size, depth,
1019 		    prot_str[info.protection],
1020 		    prot_str[info.max_protection],
1021 		    share_mode_str[info.share_mode],
1022 		    info.object_id);
1023 		if (depth > 0 &&
1024 		    (info.protection == VM_PROT_READ) &&
1025 		    (info.max_protection == VM_PROT_READ)) {
1026 			/* nested and read-only: bingo! */
1027 			break;
1028 		}
1029 	}
1030 	if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1031 		T_SKIP("could not find read-only nested mapping");
1032 		T_END;
1033 	}
1034 
1035 	/* test vm_remap() of RO */
1036 	before = *(uint32_t *)(uintptr_t)address;
1037 	remap_address = 0;
1038 	remap_size = size;
1039 	kr = mach_vm_remap(mach_task_self(),
1040 	    &remap_address,
1041 	    remap_size,
1042 	    0,
1043 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1044 	    mach_task_self(),
1045 	    address,
1046 	    FALSE,
1047 	    &cur_prot,
1048 	    &max_prot,
1049 	    VM_INHERIT_DEFAULT);
1050 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1051 //	T_QUIET; T_ASSERT_EQ(cur_prot, VM_PROT_READ, "cur_prot is read-only");
1052 //	T_QUIET; T_ASSERT_EQ(max_prot, VM_PROT_READ, "max_prot is read-only");
1053 	/* check that region is still nested */
1054 	tmp_address = address;
1055 	tmp_size = 0;
1056 	depth = 99;
1057 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1058 	kr = mach_vm_region_recurse(mach_task_self(),
1059 	    &tmp_address,
1060 	    &tmp_size,
1061 	    &depth,
1062 	    (vm_region_recurse_info_t)&info,
1063 	    &count);
1064 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1065 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1066 	    tmp_address, tmp_address + tmp_size, depth,
1067 	    prot_str[info.protection],
1068 	    prot_str[info.max_protection],
1069 	    share_mode_str[info.share_mode],
1070 	    info.object_id);
1071 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1072 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1073 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1074 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1075 //	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1076 	/* check that new mapping is read-only */
1077 	tmp_address = remap_address;
1078 	tmp_size = 0;
1079 	depth = 99;
1080 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1081 	kr = mach_vm_region_recurse(mach_task_self(),
1082 	    &tmp_address,
1083 	    &tmp_size,
1084 	    &depth,
1085 	    (vm_region_recurse_info_t)&info,
1086 	    &count);
1087 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1088 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1089 	    tmp_address, tmp_address + tmp_size, depth,
1090 	    prot_str[info.protection],
1091 	    prot_str[info.max_protection],
1092 	    share_mode_str[info.share_mode],
1093 	    info.object_id);
1094 	T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1095 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1096 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1097 //	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1098 	remap = *(uint32_t *)(uintptr_t)remap_address;
1099 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1100 // this would crash if actually read-only:
1101 //	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1102 	after = *(uint32_t *)(uintptr_t)address;
1103 	T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1104 //	*(uint32_t *)(uintptr_t)remap_address = before;
1105 	if (before != after) {
1106 		T_FAIL("vm_remap() bypassed copy-on-write");
1107 	} else {
1108 		T_PASS("vm_remap() did not bypass copy-on-write");
1109 	}
1110 	/* cleanup */
1111 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1112 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1113 	T_PASS("vm_remap() read-only");
1114 
1115 #if defined(VM_MEMORY_ROSETTA)
1116 	if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1117 		T_PASS("vm_remap_new() is not present");
1118 		goto skip_vm_remap_new_ro;
1119 	}
1120 	/* test vm_remap_new() of RO */
1121 	before = *(uint32_t *)(uintptr_t)address;
1122 	remap_address = 0;
1123 	remap_size = size;
1124 	cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1125 	max_prot = VM_PROT_READ | VM_PROT_WRITE;
1126 	kr = mach_vm_remap_new(mach_task_self(),
1127 	    &remap_address,
1128 	    remap_size,
1129 	    0,
1130 	    VM_FLAGS_ANYWHERE,
1131 	    mach_task_self(),
1132 	    address,
1133 	    FALSE,
1134 	    &cur_prot,
1135 	    &max_prot,
1136 	    VM_INHERIT_DEFAULT);
1137 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1138 	if (kr == KERN_PROTECTION_FAILURE) {
1139 		/* wrong but not a security issue... */
1140 		goto skip_vm_remap_new_ro;
1141 	}
1142 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1143 	remap = *(uint32_t *)(uintptr_t)remap_address;
1144 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1145 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1146 	after = *(uint32_t *)(uintptr_t)address;
1147 	T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1148 	*(uint32_t *)(uintptr_t)remap_address = before;
1149 	if (before != after) {
1150 		T_FAIL("vm_remap_new() bypassed copy-on-write");
1151 	} else {
1152 		T_PASS("vm_remap_new() did not bypass copy-on-write");
1153 	}
1154 	/* check that region is still nested */
1155 	tmp_address = address;
1156 	tmp_size = 0;
1157 	depth = 99;
1158 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1159 	kr = mach_vm_region_recurse(mach_task_self(),
1160 	    &tmp_address,
1161 	    &tmp_size,
1162 	    &depth,
1163 	    (vm_region_recurse_info_t)&info,
1164 	    &count);
1165 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1166 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1167 	    tmp_address, tmp_address + tmp_size, depth,
1168 	    prot_str[info.protection],
1169 	    prot_str[info.max_protection],
1170 	    share_mode_str[info.share_mode],
1171 	    info.object_id);
1172 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1173 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1174 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1175 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1176 	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1177 	T_PASS("vm_remap_new() read-only");
1178 skip_vm_remap_new_ro:
1179 #else /* defined(VM_MEMORY_ROSETTA) */
1180 	/* pre-BigSur SDK: no vm_remap_new() */
1181 	T_LOG("No vm_remap_new() to test");
1182 #endif /* defined(VM_MEMORY_ROSETTA) */
1183 
1184 	/* test mach_make_memory_entry_64(VM_SHARE) of RO */
1185 	before = *(uint32_t *)(uintptr_t)address;
1186 	remap_size = size;
1187 	mem_entry = MACH_PORT_NULL;
1188 	kr = mach_make_memory_entry_64(mach_task_self(),
1189 	    &remap_size,
1190 	    address,
1191 	    MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1192 	    &mem_entry,
1193 	    MACH_PORT_NULL);
1194 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1195 	if (kr == KERN_PROTECTION_FAILURE) {
1196 		/* wrong but not a security issue... */
1197 		goto skip_mem_entry_vm_share_ro;
1198 	}
1199 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1200 	remap_address = 0;
1201 	kr = mach_vm_map(mach_task_self(),
1202 	    &remap_address,
1203 	    remap_size,
1204 	    0,              /* mask */
1205 	    VM_FLAGS_ANYWHERE,
1206 	    mem_entry,
1207 	    0,              /* offset */
1208 	    FALSE,              /* copy */
1209 	    VM_PROT_READ | VM_PROT_WRITE,
1210 	    VM_PROT_READ | VM_PROT_WRITE,
1211 	    VM_INHERIT_DEFAULT);
1212 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1213 	remap = *(uint32_t *)(uintptr_t)remap_address;
1214 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1215 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1216 	after = *(uint32_t *)(uintptr_t)address;
1217 	T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1218 	*(uint32_t *)(uintptr_t)remap_address = before;
1219 	if (before != after) {
1220 		T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1221 	} else {
1222 		T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1223 	}
1224 	/* check that region is still nested */
1225 	tmp_address = address;
1226 	tmp_size = 0;
1227 	depth = 99;
1228 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1229 	kr = mach_vm_region_recurse(mach_task_self(),
1230 	    &tmp_address,
1231 	    &tmp_size,
1232 	    &depth,
1233 	    (vm_region_recurse_info_t)&info,
1234 	    &count);
1235 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1236 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1237 	    tmp_address, tmp_address + tmp_size, depth,
1238 	    prot_str[info.protection],
1239 	    prot_str[info.max_protection],
1240 	    share_mode_str[info.share_mode],
1241 	    info.object_id);
1242 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1243 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1244 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1245 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1246 	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1247 	/* check that new mapping is a copy */
1248 	tmp_address = remap_address;
1249 	tmp_size = 0;
1250 	depth = 99;
1251 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1252 	kr = mach_vm_region_recurse(mach_task_self(),
1253 	    &tmp_address,
1254 	    &tmp_size,
1255 	    &depth,
1256 	    (vm_region_recurse_info_t)&info,
1257 	    &count);
1258 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1259 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1260 	    tmp_address, tmp_address + tmp_size, depth,
1261 	    prot_str[info.protection],
1262 	    prot_str[info.max_protection],
1263 	    share_mode_str[info.share_mode],
1264 	    info.object_id);
1265 	T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1266 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1267 	T_QUIET; T_ASSERT_EQ(depth, 0, "new mapping is unnested");
1268 //	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1269 //	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1270 	/* cleanup */
1271 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1272 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1273 	T_PASS("mem_entry(VM_SHARE) read-only");
1274 skip_mem_entry_vm_share_ro:
1275 
1276 	/* test mach_make_memory_entry_64() of RO */
1277 	before = *(uint32_t *)(uintptr_t)address;
1278 	remap_size = size;
1279 	mem_entry = MACH_PORT_NULL;
1280 	kr = mach_make_memory_entry_64(mach_task_self(),
1281 	    &remap_size,
1282 	    address,
1283 	    VM_PROT_READ | VM_PROT_WRITE,
1284 	    &mem_entry,
1285 	    MACH_PORT_NULL);
1286 	T_QUIET; T_ASSERT_EQ(kr, KERN_PROTECTION_FAILURE, "mach_make_memory_entry_64()");
1287 	/* check that region is still nested */
1288 	tmp_address = address;
1289 	tmp_size = 0;
1290 	depth = 99;
1291 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1292 	kr = mach_vm_region_recurse(mach_task_self(),
1293 	    &tmp_address,
1294 	    &tmp_size,
1295 	    &depth,
1296 	    (vm_region_recurse_info_t)&info,
1297 	    &count);
1298 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1299 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1300 	    tmp_address, tmp_address + tmp_size, depth,
1301 	    prot_str[info.protection],
1302 	    prot_str[info.max_protection],
1303 	    share_mode_str[info.share_mode],
1304 	    info.object_id);
1305 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1306 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1307 //	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1308 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1309 	if (depth > 0) {
1310 		T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1311 	}
1312 	T_PASS("mem_entry() read-only");
1313 
1314 
1315 	/*
1316 	 * Step 2 - check that one can not share write access with a writable
1317 	 * mapping in the shared region.
1318 	 */
1319 	size = 0;
1320 	for (address = SHARED_REGION_BASE;
1321 	    address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1322 	    address += size) {
1323 		size = 0;
1324 		depth = 99;
1325 		count = VM_REGION_SUBMAP_INFO_COUNT_64;
1326 		kr = mach_vm_region_recurse(mach_task_self(),
1327 		    &address,
1328 		    &size,
1329 		    &depth,
1330 		    (vm_region_recurse_info_t)&info,
1331 		    &count);
1332 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1333 		if (kr == KERN_INVALID_ADDRESS) {
1334 			T_SKIP("could not find writable nested mapping");
1335 			T_END;
1336 		}
1337 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1338 		T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1339 		    address, address + size, depth,
1340 		    prot_str[info.protection],
1341 		    prot_str[info.max_protection],
1342 		    share_mode_str[info.share_mode],
1343 		    info.object_id);
1344 		if (depth > 0 && (info.protection & VM_PROT_WRITE)) {
1345 			/* nested and writable: bingo! */
1346 			break;
1347 		}
1348 	}
1349 	if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1350 		T_SKIP("could not find writable nested mapping");
1351 		T_END;
1352 	}
1353 
1354 	/* test vm_remap() of RW */
1355 	before = *(uint32_t *)(uintptr_t)address;
1356 	remap_address = 0;
1357 	remap_size = size;
1358 	kr = mach_vm_remap(mach_task_self(),
1359 	    &remap_address,
1360 	    remap_size,
1361 	    0,
1362 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1363 	    mach_task_self(),
1364 	    address,
1365 	    FALSE,
1366 	    &cur_prot,
1367 	    &max_prot,
1368 	    VM_INHERIT_DEFAULT);
1369 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1370 	if (!(cur_prot & VM_PROT_WRITE)) {
1371 		T_LOG("vm_remap(): 0x%llx not writable %s/%s",
1372 		    remap_address, prot_str[cur_prot], prot_str[max_prot]);
1373 		T_ASSERT_FAIL("vm_remap() remapping not writable");
1374 	}
1375 	remap = *(uint32_t *)(uintptr_t)remap_address;
1376 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1377 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1378 	after = *(uint32_t *)(uintptr_t)address;
1379 	T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1380 	*(uint32_t *)(uintptr_t)remap_address = before;
1381 	if (before != after) {
1382 		T_FAIL("vm_remap() bypassed copy-on-write");
1383 	} else {
1384 		T_PASS("vm_remap() did not bypass copy-on-write");
1385 	}
1386 	/* check that region is still nested */
1387 	tmp_address = address;
1388 	tmp_size = 0;
1389 	depth = 99;
1390 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1391 	kr = mach_vm_region_recurse(mach_task_self(),
1392 	    &tmp_address,
1393 	    &tmp_size,
1394 	    &depth,
1395 	    (vm_region_recurse_info_t)&info,
1396 	    &count);
1397 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1398 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1399 	    tmp_address, tmp_address + tmp_size, depth,
1400 	    prot_str[info.protection],
1401 	    prot_str[info.max_protection],
1402 	    share_mode_str[info.share_mode],
1403 	    info.object_id);
1404 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1405 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1406 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1407 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1408 	T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1409 	/* cleanup */
1410 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1411 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1412 
1413 #if defined(VM_MEMORY_ROSETTA)
1414 	if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1415 		T_PASS("vm_remap_new() is not present");
1416 		goto skip_vm_remap_new_rw;
1417 	}
1418 	/* test vm_remap_new() of RW */
1419 	before = *(uint32_t *)(uintptr_t)address;
1420 	remap_address = 0;
1421 	remap_size = size;
1422 	cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1423 	max_prot = VM_PROT_READ | VM_PROT_WRITE;
1424 	kr = mach_vm_remap_new(mach_task_self(),
1425 	    &remap_address,
1426 	    remap_size,
1427 	    0,
1428 	    VM_FLAGS_ANYWHERE,
1429 	    mach_task_self(),
1430 	    address,
1431 	    FALSE,
1432 	    &cur_prot,
1433 	    &max_prot,
1434 	    VM_INHERIT_DEFAULT);
1435 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1436 	if (kr == KERN_PROTECTION_FAILURE) {
1437 		/* wrong but not a security issue... */
1438 		goto skip_vm_remap_new_rw;
1439 	}
1440 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1441 	if (!(cur_prot & VM_PROT_WRITE)) {
1442 		T_LOG("vm_remap_new(): 0x%llx not writable %s/%s",
1443 		    remap_address, prot_str[cur_prot], prot_str[max_prot]);
1444 		T_ASSERT_FAIL("vm_remap_new() remapping not writable");
1445 	}
1446 	remap = *(uint32_t *)(uintptr_t)remap_address;
1447 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1448 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1449 	after = *(uint32_t *)(uintptr_t)address;
1450 	T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1451 	*(uint32_t *)(uintptr_t)remap_address = before;
1452 	if (before != after) {
1453 		T_FAIL("vm_remap_new() bypassed copy-on-write");
1454 	} else {
1455 		T_PASS("vm_remap_new() did not bypass copy-on-write");
1456 	}
1457 	/* check that region is still nested */
1458 	tmp_address = address;
1459 	tmp_size = 0;
1460 	depth = 99;
1461 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1462 	kr = mach_vm_region_recurse(mach_task_self(),
1463 	    &tmp_address,
1464 	    &tmp_size,
1465 	    &depth,
1466 	    (vm_region_recurse_info_t)&info,
1467 	    &count);
1468 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1469 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1470 	    tmp_address, tmp_address + tmp_size, depth,
1471 	    prot_str[info.protection],
1472 	    prot_str[info.max_protection],
1473 	    share_mode_str[info.share_mode],
1474 	    info.object_id);
1475 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1476 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1477 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1478 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1479 	T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1480 	/* cleanup */
1481 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1482 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1483 skip_vm_remap_new_rw:
1484 #else /* defined(VM_MEMORY_ROSETTA) */
1485 	/* pre-BigSur SDK: no vm_remap_new() */
1486 	T_LOG("No vm_remap_new() to test");
1487 #endif /* defined(VM_MEMORY_ROSETTA) */
1488 
1489 	/* test mach_make_memory_entry_64(VM_SHARE) of RW */
1490 	before = *(uint32_t *)(uintptr_t)address;
1491 	remap_size = size;
1492 	mem_entry = MACH_PORT_NULL;
1493 	kr = mach_make_memory_entry_64(mach_task_self(),
1494 	    &remap_size,
1495 	    address,
1496 	    MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1497 	    &mem_entry,
1498 	    MACH_PORT_NULL);
1499 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1500 	if (kr == KERN_PROTECTION_FAILURE) {
1501 		/* wrong but not a security issue... */
1502 		goto skip_mem_entry_vm_share_rw;
1503 	}
1504 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1505 	T_QUIET; T_ASSERT_EQ(remap_size, size, "mem_entry(VM_SHARE) should cover whole mapping");
1506 //	T_LOG("AFTER MAKE_MEM_ENTRY(VM_SHARE) 0x%llx...", address); fflush(stdout); fflush(stderr); getchar();
1507 	remap_address = 0;
1508 	kr = mach_vm_map(mach_task_self(),
1509 	    &remap_address,
1510 	    remap_size,
1511 	    0,              /* mask */
1512 	    VM_FLAGS_ANYWHERE,
1513 	    mem_entry,
1514 	    0,              /* offset */
1515 	    FALSE,              /* copy */
1516 	    VM_PROT_READ | VM_PROT_WRITE,
1517 	    VM_PROT_READ | VM_PROT_WRITE,
1518 	    VM_INHERIT_DEFAULT);
1519 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1520 	remap = *(uint32_t *)(uintptr_t)remap_address;
1521 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1522 //	T_LOG("AFTER VM_MAP 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1523 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1524 //	T_LOG("AFTER WRITE 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1525 	after = *(uint32_t *)(uintptr_t)address;
1526 	T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1527 	*(uint32_t *)(uintptr_t)remap_address = before;
1528 	if (before != after) {
1529 		T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1530 	} else {
1531 		T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1532 	}
1533 	/* check that region is still nested */
1534 	tmp_address = address;
1535 	tmp_size = 0;
1536 	depth = 99;
1537 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1538 	kr = mach_vm_region_recurse(mach_task_self(),
1539 	    &tmp_address,
1540 	    &tmp_size,
1541 	    &depth,
1542 	    (vm_region_recurse_info_t)&info,
1543 	    &count);
1544 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1545 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1546 	    tmp_address, tmp_address + tmp_size, depth,
1547 	    prot_str[info.protection],
1548 	    prot_str[info.max_protection],
1549 	    share_mode_str[info.share_mode],
1550 	    info.object_id);
1551 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1552 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1553 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1554 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1555 	T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1556 	/* cleanup */
1557 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1558 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1559 	mach_port_deallocate(mach_task_self(), mem_entry);
1560 skip_mem_entry_vm_share_rw:
1561 
1562 	/* test mach_make_memory_entry_64() of RW */
1563 	before = *(uint32_t *)(uintptr_t)address;
1564 	remap_size = size;
1565 	mem_entry = MACH_PORT_NULL;
1566 	kr = mach_make_memory_entry_64(mach_task_self(),
1567 	    &remap_size,
1568 	    address,
1569 	    VM_PROT_READ | VM_PROT_WRITE,
1570 	    &mem_entry,
1571 	    MACH_PORT_NULL);
1572 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64()");
1573 	remap_address = 0;
1574 	kr = mach_vm_map(mach_task_self(),
1575 	    &remap_address,
1576 	    remap_size,
1577 	    0,              /* mask */
1578 	    VM_FLAGS_ANYWHERE,
1579 	    mem_entry,
1580 	    0,              /* offset */
1581 	    FALSE,              /* copy */
1582 	    VM_PROT_READ | VM_PROT_WRITE,
1583 	    VM_PROT_READ | VM_PROT_WRITE,
1584 	    VM_INHERIT_DEFAULT);
1585 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1586 	remap = *(uint32_t *)(uintptr_t)remap_address;
1587 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1588 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1589 	after = *(uint32_t *)(uintptr_t)address;
1590 	T_LOG("mem_entry(): 0x%llx 0x%x -> 0x%x", address, before, after);
1591 	*(uint32_t *)(uintptr_t)remap_address = before;
1592 	/* check that region is no longer nested */
1593 	tmp_address = address;
1594 	tmp_size = 0;
1595 	depth = 99;
1596 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1597 	kr = mach_vm_region_recurse(mach_task_self(),
1598 	    &tmp_address,
1599 	    &tmp_size,
1600 	    &depth,
1601 	    (vm_region_recurse_info_t)&info,
1602 	    &count);
1603 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1604 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1605 	    tmp_address, tmp_address + tmp_size, depth,
1606 	    prot_str[info.protection],
1607 	    prot_str[info.max_protection],
1608 	    share_mode_str[info.share_mode],
1609 	    info.object_id);
1610 	if (before != after) {
1611 		if (depth == 0) {
1612 			T_PASS("mem_entry() honored copy-on-write");
1613 		} else {
1614 			T_FAIL("mem_entry() did not trigger copy-on_write");
1615 		}
1616 	} else {
1617 		T_FAIL("mem_entry() did not honor copy-on-write");
1618 	}
1619 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1620 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1621 	T_QUIET; T_ASSERT_EQ(depth, 0, "no longer nested");
1622 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1623 	T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1624 	/* cleanup */
1625 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1626 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1627 	mach_port_deallocate(mach_task_self(), mem_entry);
1628 }
1629 
1630 T_DECL(copyoverwrite_submap_protection, "test copywrite vm region submap \
1631     protection", T_META_ALL_VALID_ARCHS(true))
1632 {
1633 	kern_return_t           kr;
1634 	mach_vm_address_t       vmaddr;
1635 	mach_vm_size_t          vmsize;
1636 	natural_t               depth;
1637 	vm_region_submap_short_info_data_64_t region_info;
1638 	mach_msg_type_number_t  region_info_count;
1639 
1640 	for (vmaddr = SHARED_REGION_BASE;
1641 	    vmaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1642 	    vmaddr += vmsize) {
1643 		depth = 99;
1644 		region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1645 		kr = mach_vm_region_recurse(mach_task_self(),
1646 		    &vmaddr,
1647 		    &vmsize,
1648 		    &depth,
1649 		    (vm_region_info_t) &region_info,
1650 		    &region_info_count);
1651 		if (kr == KERN_INVALID_ADDRESS) {
1652 			break;
1653 		}
1654 		T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse(0x%llx)", vmaddr);
1655 		T_ASSERT_EQ(region_info_count,
1656 		    VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
1657 		    "vm_region_recurse(0x%llx) count = %d expected %d",
1658 		    vmaddr, region_info_count,
1659 		    VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
1660 
1661 		T_LOG("--> region: vmaddr 0x%llx depth %d prot 0x%x/0x%x",
1662 		    vmaddr, depth, region_info.protection,
1663 		    region_info.max_protection);
1664 		if (depth == 0) {
1665 			/* not a submap mapping: next mapping */
1666 			continue;
1667 		}
1668 		if (vmaddr >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1669 			break;
1670 		}
1671 		kr = mach_vm_copy(mach_task_self(),
1672 		    vmaddr,
1673 		    vmsize,
1674 		    vmaddr);
1675 		if (kr == KERN_PROTECTION_FAILURE ||
1676 		    kr == KERN_INVALID_ADDRESS) {
1677 			T_PASS("vm_copy(0x%llx,0x%llx) expected prot error 0x%x (%s)",
1678 			    vmaddr, vmsize, kr, mach_error_string(kr));
1679 			continue;
1680 		}
1681 		T_ASSERT_MACH_SUCCESS(kr, "vm_copy(0x%llx,0x%llx) prot 0x%x",
1682 		    vmaddr, vmsize, region_info.protection);
1683 		depth = 0;
1684 		region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1685 		kr = mach_vm_region_recurse(mach_task_self(),
1686 		    &vmaddr,
1687 		    &vmsize,
1688 		    &depth,
1689 		    (vm_region_info_t) &region_info,
1690 		    &region_info_count);
1691 		T_ASSERT_MACH_SUCCESS(kr, "m_region_recurse(0x%llx)", vmaddr);
1692 		T_ASSERT_EQ(region_info_count,
1693 		    VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
1694 		    "vm_region_recurse() count = %d expected %d",
1695 		    region_info_count, VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
1696 
1697 		T_ASSERT_EQ(depth, 0, "vm_region_recurse(0x%llx): depth = %d expected 0",
1698 		    vmaddr, depth);
1699 		T_ASSERT_EQ((region_info.protection & VM_PROT_EXECUTE),
1700 		    0, "vm_region_recurse(0x%llx): prot 0x%x",
1701 		    vmaddr, region_info.protection);
1702 	}
1703 }
1704 
1705 T_DECL(wire_text, "test wired text for rdar://problem/16783546 Wiring code in \
1706     the shared region triggers code-signing violations",
1707     T_META_ALL_VALID_ARCHS(true))
1708 {
1709 	uint32_t *addr, before, after;
1710 	int retval;
1711 	int saved_errno;
1712 	kern_return_t kr;
1713 	vm_address_t map_addr, remap_addr;
1714 	vm_prot_t curprot, maxprot;
1715 
1716 	addr = (uint32_t *)&printf;
1717 #if __has_feature(ptrauth_calls)
1718 	map_addr = (vm_address_t)(uintptr_t)ptrauth_strip(addr, ptrauth_key_function_pointer);
1719 #else /* __has_feature(ptrauth_calls) */
1720 	map_addr = (vm_address_t)(uintptr_t)addr;
1721 #endif /* __has_feature(ptrauth_calls) */
1722 	remap_addr = 0;
1723 	kr = vm_remap(mach_task_self(), &remap_addr, 4096,
1724 	    0,           /* mask */
1725 	    VM_FLAGS_ANYWHERE,
1726 	    mach_task_self(), map_addr,
1727 	    FALSE,           /* copy */
1728 	    &curprot, &maxprot,
1729 	    VM_INHERIT_DEFAULT);
1730 	T_ASSERT_EQ(kr, KERN_SUCCESS, "vm_remap error 0x%x (%s)",
1731 	    kr, mach_error_string(kr));
1732 	before = *addr;
1733 	retval = mlock(addr, 4096);
1734 	after = *addr;
1735 	if (retval != 0) {
1736 		saved_errno = errno;
1737 		T_ASSERT_EQ(saved_errno, EACCES, "wire shared text error %d (%s), expected: %d",
1738 		    saved_errno, strerror(saved_errno), EACCES);
1739 	} else if (after != before) {
1740 		T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", addr, before, after);
1741 	} else {
1742 		T_PASS("wire shared text");
1743 	}
1744 
1745 	addr = (uint32_t *) &fprintf;
1746 	before = *addr;
1747 	retval = mlock(addr, 4096);
1748 	after = *addr;
1749 	if (retval != 0) {
1750 		saved_errno = errno;
1751 		T_ASSERT_EQ(saved_errno, EACCES, "wire shared text error %d (%s), expected: %d",
1752 		    saved_errno, strerror(saved_errno), EACCES);
1753 	} else if (after != before) {
1754 		T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", addr, before, after);
1755 	} else {
1756 		T_PASS("wire shared text");
1757 	}
1758 
1759 	addr = (uint32_t *) &testmain_wire_text;
1760 	before = *addr;
1761 	retval = mlock(addr, 4096);
1762 	after = *addr;
1763 	if (retval != 0) {
1764 		saved_errno = errno;
1765 		T_ASSERT_EQ(saved_errno, EACCES, "wire text error return error %d (%s)",
1766 		    saved_errno, strerror(saved_errno));
1767 	} else if (after != before) {
1768 		T_ASSERT_FAIL("text changed by wiring at %p 0x%x -> 0x%x", addr, before, after);
1769 	} else {
1770 		T_PASS("wire text");
1771 	}
1772 }
1773 
1774 T_DECL(remap_comm_page, "test remapping of the commpage - rdar://93177124",
1775     T_META_ALL_VALID_ARCHS(true))
1776 {
1777 	kern_return_t           kr;
1778 	mach_vm_address_t       commpage_addr, remap_addr;
1779 	mach_vm_size_t          vmsize;
1780 	vm_prot_t               curprot, maxprot;
1781 
1782 #if __arm__
1783 	commpage_addr = 0xFFFF4000ULL;
1784 #elif __arm64__
1785 	commpage_addr = 0x0000000FFFFFC000ULL;
1786 #elif __x86_64__
1787 	commpage_addr = 0x00007FFFFFE00000ULL;
1788 #else
1789 	T_FAIL("unknown commpage address for this architecture");
1790 #endif
1791 
1792 	T_LOG("Remapping commpage from 0x%llx", commpage_addr);
1793 	vmsize = vm_kernel_page_size;
1794 	remap_addr = 0;
1795 	kr = mach_vm_remap(mach_task_self(),
1796 	    &remap_addr,
1797 	    vmsize,
1798 	    0, /* mask */
1799 	    VM_FLAGS_ANYWHERE,
1800 	    mach_task_self(),
1801 	    commpage_addr,
1802 	    TRUE, /* copy */
1803 	    &curprot,
1804 	    &maxprot,
1805 	    VM_INHERIT_DEFAULT);
1806 	if (kr == KERN_INVALID_ADDRESS) {
1807 		T_SKIP("No mapping found at 0x%llx\n", commpage_addr);
1808 		return;
1809 	}
1810 	T_ASSERT_MACH_SUCCESS(kr, "vm_remap() of commpage from 0x%llx", commpage_addr);
1811 }
1812