xref: /xnu-12377.1.9/tests/vm_test_mach_map.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /* Mach vm map miscellaneous unit tests
2  *
3  * This test program serves to be a regression test suite for legacy
4  * vm issues, ideally each test will be linked to a radar number and
5  * perform a set of certain validations.
6  *
7  */
8 #include <darwintest.h>
9 #include <darwintest_utils.h>
10 #include "try_read_write.h"
11 
12 #include <dlfcn.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <ptrauth.h>
16 #include <signal.h>
17 #include <stdio.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <time.h>
21 
22 #include <sys/mman.h>
23 #include <sys/proc.h>
24 
25 #include <mach/mach_error.h>
26 #include <mach/mach_init.h>
27 #include <mach/mach_port.h>
28 #include <mach/mach_vm.h>
29 #include <mach/vm_map.h>
30 #include <mach/vm_param.h>
31 #include <mach/task.h>
32 #include <mach/task_info.h>
33 #include <mach/shared_region.h>
34 #include <machine/cpu_capabilities.h>
35 
36 #include <sys/mman.h>
37 #include <sys/syslimits.h>
38 
39 #include <mach-o/dyld.h>
40 
41 #if __has_include(<os/security_config_private.h>)
42 #import <os/security_config_private.h>     // for os_security_config_get()
43 #endif /* __has_include(<os/security_config_private.h>) */
44 
45 #include "test_utils.h"
46 
47 T_GLOBAL_META(
48 	T_META_NAMESPACE("xnu.vm"),
49 	T_META_RADAR_COMPONENT_NAME("xnu"),
50 	T_META_RADAR_COMPONENT_VERSION("VM"),
51 	T_META_RUN_CONCURRENTLY(true));
52 
53 static void
test_memory_entry_tagging(int override_tag)54 test_memory_entry_tagging(int override_tag)
55 {
56 	int                     pass;
57 	int                     do_copy;
58 	kern_return_t           kr;
59 	mach_vm_address_t       vmaddr_orig, vmaddr_shared, vmaddr_copied;
60 	mach_vm_size_t          vmsize_orig, vmsize_shared, vmsize_copied;
61 	mach_vm_address_t       *vmaddr_ptr;
62 	mach_vm_size_t          *vmsize_ptr;
63 	mach_vm_address_t       vmaddr_chunk;
64 	mach_vm_size_t          vmsize_chunk;
65 	mach_vm_offset_t        vmoff;
66 	mach_port_t             mem_entry_copied, mem_entry_shared;
67 	mach_port_t             *mem_entry_ptr;
68 	unsigned int            i;
69 	vm_region_submap_short_info_data_64_t ri;
70 	mach_msg_type_number_t  ri_count;
71 	unsigned int            depth;
72 	int                     vm_flags;
73 	unsigned int            expected_tag;
74 
75 	vmaddr_copied = 0;
76 	vmaddr_shared = 0;
77 	vmsize_copied = 0;
78 	vmsize_shared = 0;
79 	vmaddr_chunk = 0;
80 	vmsize_chunk = 16 * 1024;
81 	vmaddr_orig = 0;
82 	vmsize_orig = 3 * vmsize_chunk;
83 	mem_entry_copied = MACH_PORT_NULL;
84 	mem_entry_shared = MACH_PORT_NULL;
85 	pass = 0;
86 
87 	vmaddr_orig = 0;
88 	kr = mach_vm_allocate(mach_task_self(),
89 	    &vmaddr_orig,
90 	    vmsize_orig,
91 	    VM_FLAGS_ANYWHERE);
92 	T_QUIET;
93 	T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
94 	    override_tag, vmsize_orig);
95 	if (T_RESULT == T_RESULT_FAIL) {
96 		goto done;
97 	}
98 
99 	for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
100 		vmaddr_chunk = vmaddr_orig + ((mach_vm_size_t)i * vmsize_chunk);
101 		kr = mach_vm_allocate(mach_task_self(),
102 		    &vmaddr_chunk,
103 		    vmsize_chunk,
104 		    (VM_FLAGS_FIXED |
105 		    VM_FLAGS_OVERWRITE |
106 		    VM_MAKE_TAG(100 + (int)i)));
107 		T_QUIET;
108 		T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
109 		    override_tag, vmsize_chunk);
110 		if (T_RESULT == T_RESULT_FAIL) {
111 			goto done;
112 		}
113 	}
114 
115 	for (vmoff = 0;
116 	    vmoff < vmsize_orig;
117 	    vmoff += PAGE_SIZE) {
118 		*((unsigned char *)(uintptr_t)(vmaddr_orig + vmoff)) = 'x';
119 	}
120 
121 	do_copy = time(NULL) & 1;
122 again:
123 	*((unsigned char *)(uintptr_t)vmaddr_orig) = 'x';
124 	if (do_copy) {
125 		mem_entry_ptr = &mem_entry_copied;
126 		vmsize_copied = vmsize_orig;
127 		vmsize_ptr = &vmsize_copied;
128 		vmaddr_copied = 0;
129 		vmaddr_ptr = &vmaddr_copied;
130 		vm_flags = MAP_MEM_VM_COPY;
131 	} else {
132 		mem_entry_ptr = &mem_entry_shared;
133 		vmsize_shared = vmsize_orig;
134 		vmsize_ptr = &vmsize_shared;
135 		vmaddr_shared = 0;
136 		vmaddr_ptr = &vmaddr_shared;
137 		vm_flags = MAP_MEM_VM_SHARE;
138 	}
139 	kr = mach_make_memory_entry_64(mach_task_self(),
140 	    vmsize_ptr,
141 	    vmaddr_orig,                            /* offset */
142 	    (vm_flags |
143 	    VM_PROT_READ | VM_PROT_WRITE),
144 	    mem_entry_ptr,
145 	    MACH_PORT_NULL);
146 	T_QUIET;
147 	T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_make_memory_entry()",
148 	    override_tag, do_copy);
149 	if (T_RESULT == T_RESULT_FAIL) {
150 		goto done;
151 	}
152 	T_QUIET;
153 	T_EXPECT_EQ(*vmsize_ptr, vmsize_orig, "[override_tag:%d][do_copy:%d] vmsize (0x%llx) != vmsize_orig (0x%llx)",
154 	    override_tag, do_copy, (uint64_t) *vmsize_ptr, (uint64_t) vmsize_orig);
155 	if (T_RESULT == T_RESULT_FAIL) {
156 		goto done;
157 	}
158 	T_QUIET;
159 	T_EXPECT_NOTNULL(*mem_entry_ptr, "[override_tag:%d][do_copy:%d] mem_entry == 0x%x",
160 	    override_tag, do_copy, *mem_entry_ptr);
161 	if (T_RESULT == T_RESULT_FAIL) {
162 		goto done;
163 	}
164 
165 	*vmaddr_ptr = 0;
166 	if (override_tag) {
167 		vm_flags = VM_MAKE_TAG(200);
168 	} else {
169 		vm_flags = 0;
170 	}
171 	kr = mach_vm_map(mach_task_self(),
172 	    vmaddr_ptr,
173 	    vmsize_orig,
174 	    0,              /* mask */
175 	    vm_flags | VM_FLAGS_ANYWHERE,
176 	    *mem_entry_ptr,
177 	    0,              /* offset */
178 	    FALSE,              /* copy */
179 	    VM_PROT_READ | VM_PROT_WRITE,
180 	    VM_PROT_READ | VM_PROT_WRITE,
181 	    VM_INHERIT_DEFAULT);
182 	T_QUIET;
183 	T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_map()",
184 	    override_tag, do_copy);
185 	if (T_RESULT == T_RESULT_FAIL) {
186 		goto done;
187 	}
188 
189 	*((unsigned char *)(uintptr_t)vmaddr_orig) = 'X';
190 	if (*(unsigned char *)(uintptr_t)*vmaddr_ptr == 'X') {
191 		T_QUIET;
192 		T_EXPECT_EQ(do_copy, 0, "[override_tag:%d][do_copy:%d] memory shared instead of copied",
193 		    override_tag, do_copy);
194 		if (T_RESULT == T_RESULT_FAIL) {
195 			goto done;
196 		}
197 	} else {
198 		T_QUIET;
199 		T_EXPECT_NE(do_copy, 0, "[override_tag:%d][do_copy:%d] memory copied instead of shared",
200 		    override_tag, do_copy);
201 		if (T_RESULT == T_RESULT_FAIL) {
202 			goto done;
203 		}
204 	}
205 
206 	for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
207 		mach_vm_address_t       vmaddr_info;
208 		mach_vm_size_t          vmsize_info;
209 
210 		vmaddr_info = *vmaddr_ptr + ((mach_vm_size_t)i * vmsize_chunk);
211 		vmsize_info = 0;
212 		depth = 1;
213 		ri_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
214 		kr = mach_vm_region_recurse(mach_task_self(),
215 		    &vmaddr_info,
216 		    &vmsize_info,
217 		    &depth,
218 		    (vm_region_recurse_info_t) &ri,
219 		    &ri_count);
220 		T_QUIET;
221 		T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx)",
222 		    override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk);
223 		if (T_RESULT == T_RESULT_FAIL) {
224 			goto done;
225 		}
226 		T_QUIET;
227 		T_EXPECT_EQ(vmaddr_info, *vmaddr_ptr + (i * vmsize_chunk), "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned addr 0x%llx",
228 		    override_tag, do_copy, *vmaddr_ptr, (mach_vm_size_t)i * vmsize_chunk, vmaddr_info);
229 		if (T_RESULT == T_RESULT_FAIL) {
230 			goto done;
231 		}
232 		T_QUIET;
233 		T_EXPECT_EQ(vmsize_info, vmsize_chunk, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned size 0x%llx expected 0x%llx",
234 		    override_tag, do_copy, *vmaddr_ptr, (mach_vm_size_t)i * vmsize_chunk, vmsize_info, vmsize_chunk);
235 		if (T_RESULT == T_RESULT_FAIL) {
236 			goto done;
237 		}
238 		if (override_tag) {
239 			expected_tag = 200;
240 		} else {
241 			expected_tag = 100 + i;
242 		}
243 		T_QUIET;
244 		T_EXPECT_EQ(ri.user_tag, expected_tag, "[override_tag:%d][do_copy:%d] i=%u tag=%u expected %u",
245 		    override_tag, do_copy, i, ri.user_tag, expected_tag);
246 		if (T_RESULT == T_RESULT_FAIL) {
247 			goto done;
248 		}
249 	}
250 
251 	if (++pass < 2) {
252 		do_copy = !do_copy;
253 		goto again;
254 	}
255 
256 done:
257 	if (vmaddr_orig != 0) {
258 		mach_vm_deallocate(mach_task_self(),
259 		    vmaddr_orig,
260 		    vmsize_orig);
261 		vmaddr_orig = 0;
262 		vmsize_orig = 0;
263 	}
264 	if (vmaddr_copied != 0) {
265 		mach_vm_deallocate(mach_task_self(),
266 		    vmaddr_copied,
267 		    vmsize_copied);
268 		vmaddr_copied = 0;
269 		vmsize_copied = 0;
270 	}
271 	if (vmaddr_shared != 0) {
272 		mach_vm_deallocate(mach_task_self(),
273 		    vmaddr_shared,
274 		    vmsize_shared);
275 		vmaddr_shared = 0;
276 		vmsize_shared = 0;
277 	}
278 	if (mem_entry_copied != MACH_PORT_NULL) {
279 		mach_port_deallocate(mach_task_self(), mem_entry_copied);
280 		mem_entry_copied = MACH_PORT_NULL;
281 	}
282 	if (mem_entry_shared != MACH_PORT_NULL) {
283 		mach_port_deallocate(mach_task_self(), mem_entry_shared);
284 		mem_entry_shared = MACH_PORT_NULL;
285 	}
286 
287 	return;
288 }
289 
290 static void
test_map_memory_entry(void)291 test_map_memory_entry(void)
292 {
293 	kern_return_t           kr;
294 	mach_vm_address_t       vmaddr1, vmaddr2;
295 	mach_vm_size_t          vmsize1, vmsize2;
296 	mach_port_t             mem_entry;
297 	unsigned char           *cp1, *cp2;
298 
299 	vmaddr1 = 0;
300 	vmsize1 = 0;
301 	vmaddr2 = 0;
302 	vmsize2 = 0;
303 	mem_entry = MACH_PORT_NULL;
304 
305 	vmsize1 = 1;
306 	vmaddr1 = 0;
307 	kr = mach_vm_allocate(mach_task_self(),
308 	    &vmaddr1,
309 	    vmsize1,
310 	    VM_FLAGS_ANYWHERE);
311 	T_QUIET;
312 	T_EXPECT_MACH_SUCCESS(kr, "vm_allocate(%lld)", vmsize1);
313 	if (T_RESULT == T_RESULT_FAIL) {
314 		goto done;
315 	}
316 
317 	cp1 = (unsigned char *)(uintptr_t)vmaddr1;
318 	*cp1 = '1';
319 
320 	vmsize2 = 1;
321 	mem_entry = MACH_PORT_NULL;
322 	kr = mach_make_memory_entry_64(mach_task_self(),
323 	    &vmsize2,
324 	    vmaddr1,                            /* offset */
325 	    (MAP_MEM_VM_COPY |
326 	    VM_PROT_READ | VM_PROT_WRITE),
327 	    &mem_entry,
328 	    MACH_PORT_NULL);
329 	T_QUIET;
330 	T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry()");
331 	if (T_RESULT == T_RESULT_FAIL) {
332 		goto done;
333 	}
334 	T_QUIET;
335 	T_EXPECT_GE(vmsize2, vmsize1, "vmsize2 (0x%llx) < vmsize1 (0x%llx)",
336 	    (uint64_t) vmsize2, (uint64_t) vmsize1);
337 	if (T_RESULT == T_RESULT_FAIL) {
338 		goto done;
339 	}
340 	T_QUIET;
341 	T_EXPECT_NOTNULL(mem_entry, "mem_entry == 0x%x", mem_entry);
342 	if (T_RESULT == T_RESULT_FAIL) {
343 		goto done;
344 	}
345 
346 	vmaddr2 = 0;
347 	kr = mach_vm_map(mach_task_self(),
348 	    &vmaddr2,
349 	    vmsize2,
350 	    0,              /* mask */
351 	    VM_FLAGS_ANYWHERE,
352 	    mem_entry,
353 	    0,              /* offset */
354 	    TRUE,              /* copy */
355 	    VM_PROT_READ | VM_PROT_WRITE,
356 	    VM_PROT_READ | VM_PROT_WRITE,
357 	    VM_INHERIT_DEFAULT);
358 	T_QUIET;
359 	T_EXPECT_MACH_SUCCESS(kr, "mach_vm_map()");
360 	if (T_RESULT == T_RESULT_FAIL) {
361 		goto done;
362 	}
363 
364 	cp2 = (unsigned char *)(uintptr_t)vmaddr2;
365 	T_QUIET;
366 	T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '1')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
367 	    *cp1, *cp2, '1', '1');
368 	if (T_RESULT == T_RESULT_FAIL) {
369 		goto done;
370 	}
371 
372 	*cp2 = '2';
373 	T_QUIET;
374 	T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '2')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
375 	    *cp1, *cp2, '1', '2');
376 	if (T_RESULT == T_RESULT_FAIL) {
377 		goto done;
378 	}
379 
380 done:
381 	if (vmaddr1 != 0) {
382 		mach_vm_deallocate(mach_task_self(), vmaddr1, vmsize1);
383 		vmaddr1 = 0;
384 		vmsize1 = 0;
385 	}
386 	if (vmaddr2 != 0) {
387 		mach_vm_deallocate(mach_task_self(), vmaddr2, vmsize2);
388 		vmaddr2 = 0;
389 		vmsize2 = 0;
390 	}
391 	if (mem_entry != MACH_PORT_NULL) {
392 		mach_port_deallocate(mach_task_self(), mem_entry);
393 		mem_entry = MACH_PORT_NULL;
394 	}
395 
396 	return;
397 }
398 
399 T_DECL(memory_entry_tagging, "test mem entry tag for rdar://problem/23334087 \
400     VM memory tags should be propagated through memory entries",
401     T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
402 {
403 	test_memory_entry_tagging(0);
404 	test_memory_entry_tagging(1);
405 }
406 
407 T_DECL(map_memory_entry, "test mapping mem entry for rdar://problem/22611816 \
408     mach_make_memory_entry(MAP_MEM_VM_COPY) should never use a KERNEL_BUFFER \
409     copy", T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
410 {
411 	test_map_memory_entry();
412 }
413 
414 static char *vm_purgable_state[4] = { "NONVOLATILE", "VOLATILE", "EMPTY", "DENY" };
415 
416 static uint64_t
task_footprint(void)417 task_footprint(void)
418 {
419 	task_vm_info_data_t ti;
420 	kern_return_t kr;
421 	mach_msg_type_number_t count;
422 
423 	count = TASK_VM_INFO_COUNT;
424 	kr = task_info(mach_task_self(),
425 	    TASK_VM_INFO,
426 	    (task_info_t) &ti,
427 	    &count);
428 	T_QUIET;
429 	T_ASSERT_MACH_SUCCESS(kr, "task_info()");
430 #if defined(__arm64__)
431 	T_QUIET;
432 	T_ASSERT_EQ(count, TASK_VM_INFO_COUNT, "task_info() count = %d (expected %d)",
433 	    count, TASK_VM_INFO_COUNT);
434 #endif /* defined(__arm64__) */
435 	return ti.phys_footprint;
436 }
437 
438 T_DECL(purgeable_empty_to_volatile, "test task physical footprint when \
439     emptying, volatilizing purgeable vm", T_META_TAG_VM_PREFERRED)
440 {
441 	kern_return_t kr;
442 	mach_vm_address_t vm_addr;
443 	mach_vm_size_t vm_size;
444 	char *cp;
445 	int ret;
446 	vm_purgable_t state;
447 	uint64_t footprint[8];
448 
449 	vm_addr = 0;
450 	vm_size = 1 * 1024 * 1024;
451 	T_LOG("--> allocate %llu bytes", vm_size);
452 	kr = mach_vm_allocate(mach_task_self(),
453 	    &vm_addr,
454 	    vm_size,
455 	    VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
456 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
457 
458 	/* footprint0 */
459 	footprint[0] = task_footprint();
460 	T_LOG("    footprint[0] = %llu", footprint[0]);
461 
462 	T_LOG("--> access %llu bytes", vm_size);
463 	for (cp = (char *) vm_addr;
464 	    cp < (char *) (vm_addr + vm_size);
465 	    cp += vm_kernel_page_size) {
466 		*cp = 'x';
467 	}
468 	/* footprint1 == footprint0 + vm_size */
469 	footprint[1] = task_footprint();
470 	T_LOG("    footprint[1] = %llu", footprint[1]);
471 	if (footprint[1] != footprint[0] + vm_size) {
472 		T_LOG("WARN: footprint[1] != footprint[0] + vm_size");
473 	}
474 
475 	T_LOG("--> wire %llu bytes", vm_size / 2);
476 	ret = mlock((char *)vm_addr, (size_t) (vm_size / 2));
477 	T_ASSERT_POSIX_SUCCESS(ret, "mlock()");
478 
479 	/* footprint2 == footprint1 */
480 	footprint[2] = task_footprint();
481 	T_LOG("    footprint[2] = %llu", footprint[2]);
482 	if (footprint[2] != footprint[1]) {
483 		T_LOG("WARN: footprint[2] != footprint[1]");
484 	}
485 
486 	T_LOG("--> VOLATILE");
487 	state = VM_PURGABLE_VOLATILE;
488 	kr = mach_vm_purgable_control(mach_task_self(),
489 	    vm_addr,
490 	    VM_PURGABLE_SET_STATE,
491 	    &state);
492 	T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
493 	T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, "NONVOLATILE->VOLATILE: state was %s",
494 	    vm_purgable_state[state]);
495 	/* footprint3 == footprint2 - (vm_size / 2) */
496 	footprint[3] = task_footprint();
497 	T_LOG("    footprint[3] = %llu", footprint[3]);
498 	if (footprint[3] != footprint[2] - (vm_size / 2)) {
499 		T_LOG("WARN: footprint[3] != footprint[2] - (vm_size / 2)");
500 	}
501 
502 	T_LOG("--> EMPTY");
503 	state = VM_PURGABLE_EMPTY;
504 	kr = mach_vm_purgable_control(mach_task_self(),
505 	    vm_addr,
506 	    VM_PURGABLE_SET_STATE,
507 	    &state);
508 	T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(EMPTY)");
509 	if (state != VM_PURGABLE_VOLATILE &&
510 	    state != VM_PURGABLE_EMPTY) {
511 		T_ASSERT_FAIL("VOLATILE->EMPTY: state was %s",
512 		    vm_purgable_state[state]);
513 	}
514 	/* footprint4 == footprint3 */
515 	footprint[4] = task_footprint();
516 	T_LOG("    footprint[4] = %llu", footprint[4]);
517 	if (footprint[4] != footprint[3]) {
518 		T_LOG("WARN: footprint[4] != footprint[3]");
519 	}
520 
521 	T_LOG("--> unwire %llu bytes", vm_size / 2);
522 	ret = munlock((char *)vm_addr, (size_t) (vm_size / 2));
523 	T_ASSERT_POSIX_SUCCESS(ret, "munlock()");
524 
525 	/* footprint5 == footprint4 - (vm_size/2) (unless memory pressure) */
526 	/* footprint5 == footprint0 */
527 	footprint[5] = task_footprint();
528 	T_LOG("    footprint[5] = %llu", footprint[5]);
529 	if (footprint[5] != footprint[4] - (vm_size / 2)) {
530 		T_LOG("WARN: footprint[5] != footprint[4] - (vm_size/2)");
531 	}
532 	if (footprint[5] != footprint[0]) {
533 		T_LOG("WARN: footprint[5] != footprint[0]");
534 	}
535 
536 	T_LOG("--> VOLATILE");
537 	state = VM_PURGABLE_VOLATILE;
538 	kr = mach_vm_purgable_control(mach_task_self(),
539 	    vm_addr,
540 	    VM_PURGABLE_SET_STATE,
541 	    &state);
542 	T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
543 	T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->VOLATILE: state == %s",
544 	    vm_purgable_state[state]);
545 	/* footprint6 == footprint5 */
546 	/* footprint6 == footprint0 */
547 	footprint[6] = task_footprint();
548 	T_LOG("    footprint[6] = %llu", footprint[6]);
549 	if (footprint[6] != footprint[5]) {
550 		T_LOG("WARN: footprint[6] != footprint[5]");
551 	}
552 	if (footprint[6] != footprint[0]) {
553 		T_LOG("WARN: footprint[6] != footprint[0]");
554 	}
555 
556 	T_LOG("--> NONVOLATILE");
557 	state = VM_PURGABLE_NONVOLATILE;
558 	kr = mach_vm_purgable_control(mach_task_self(),
559 	    vm_addr,
560 	    VM_PURGABLE_SET_STATE,
561 	    &state);
562 	T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(NONVOLATILE)");
563 	T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->NONVOLATILE: state == %s",
564 	    vm_purgable_state[state]);
565 	/* footprint7 == footprint6 */
566 	/* footprint7 == footprint0 */
567 	footprint[7] = task_footprint();
568 	T_LOG("    footprint[7] = %llu", footprint[7]);
569 	if (footprint[7] != footprint[6]) {
570 		T_LOG("WARN: footprint[7] != footprint[6]");
571 	}
572 	if (footprint[7] != footprint[0]) {
573 		T_LOG("WARN: footprint[7] != footprint[0]");
574 	}
575 }
576 
577 static kern_return_t
get_reusable_size(uint64_t * reusable)578 get_reusable_size(uint64_t *reusable)
579 {
580 	task_vm_info_data_t     ti;
581 	mach_msg_type_number_t  ti_count = TASK_VM_INFO_COUNT;
582 	kern_return_t kr;
583 
584 	kr = task_info(mach_task_self(),
585 	    TASK_VM_INFO,
586 	    (task_info_t) &ti,
587 	    &ti_count);
588 	T_QUIET;
589 	T_EXPECT_MACH_SUCCESS(kr, "task_info()");
590 	T_QUIET;
591 	*reusable = ti.reusable;
592 	return kr;
593 }
594 
595 T_DECL(madvise_shared, "test madvise shared for rdar://problem/2295713 logging \
596     rethink needs madvise(MADV_FREE_HARDER)",
597     T_META_RUN_CONCURRENTLY(false),
598     T_META_ALL_VALID_ARCHS(true),
599     T_META_TAG_VM_PREFERRED)
600 {
601 	vm_address_t            vmaddr = 0, vmaddr2 = 0;
602 	vm_size_t               vmsize, vmsize1, vmsize2;
603 	kern_return_t           kr;
604 	char                    *cp;
605 	vm_prot_t               curprot, maxprot;
606 	int                     ret;
607 	int                     vmflags;
608 	uint64_t                footprint_before, footprint_after;
609 	uint64_t                reusable_before, reusable_after, reusable_expected;
610 
611 
612 	vmsize1 = 64 * 1024; /* 64KB to madvise() */
613 	vmsize2 = 32 * 1024; /* 32KB to mlock() */
614 	vmsize = vmsize1 + vmsize2;
615 	vmflags = VM_FLAGS_ANYWHERE;
616 	VM_SET_FLAGS_ALIAS(vmflags, VM_MEMORY_MALLOC);
617 
618 	kr = get_reusable_size(&reusable_before);
619 	if (kr) {
620 		goto done;
621 	}
622 
623 	kr = vm_allocate(mach_task_self(),
624 	    &vmaddr,
625 	    vmsize,
626 	    vmflags);
627 	T_QUIET;
628 	T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
629 	if (T_RESULT == T_RESULT_FAIL) {
630 		goto done;
631 	}
632 
633 	for (cp = (char *)(uintptr_t)vmaddr;
634 	    cp < (char *)(uintptr_t)(vmaddr + vmsize);
635 	    cp++) {
636 		*cp = 'x';
637 	}
638 
639 	kr = vm_remap(mach_task_self(),
640 	    &vmaddr2,
641 	    vmsize,
642 	    0,           /* mask */
643 	    VM_FLAGS_ANYWHERE,
644 	    mach_task_self(),
645 	    vmaddr,
646 	    FALSE,           /* copy */
647 	    &curprot,
648 	    &maxprot,
649 	    VM_INHERIT_DEFAULT);
650 	T_QUIET;
651 	T_EXPECT_MACH_SUCCESS(kr, "vm_remap()");
652 	if (T_RESULT == T_RESULT_FAIL) {
653 		goto done;
654 	}
655 
656 	for (cp = (char *)(uintptr_t)vmaddr2;
657 	    cp < (char *)(uintptr_t)(vmaddr2 + vmsize);
658 	    cp++) {
659 		T_QUIET;
660 		T_EXPECT_EQ(*cp, 'x', "vmaddr=%p vmaddr2=%p %p:0x%x",
661 		    (void *)(uintptr_t)vmaddr,
662 		    (void *)(uintptr_t)vmaddr2,
663 		    (void *)cp,
664 		    (unsigned char)*cp);
665 		if (T_RESULT == T_RESULT_FAIL) {
666 			goto done;
667 		}
668 	}
669 	cp = (char *)(uintptr_t)vmaddr;
670 	*cp = 'X';
671 	cp = (char *)(uintptr_t)vmaddr2;
672 	T_QUIET;
673 	T_EXPECT_EQ(*cp, 'X', "memory was not properly shared");
674 	if (T_RESULT == T_RESULT_FAIL) {
675 		goto done;
676 	}
677 
678 #if defined(__x86_64__) || defined(__i386__)
679 	if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
680 		T_LOG("Skipping madvise reusable tests because we're running under translation.");
681 		goto done;
682 	}
683 #endif /* defined(__x86_64__) || defined(__i386__) */
684 
685 	ret = mlock((char *)(uintptr_t)(vmaddr2 + vmsize1),
686 	    vmsize2);
687 	T_QUIET; T_EXPECT_POSIX_SUCCESS(ret, "mlock()");
688 
689 	footprint_before = task_footprint();
690 
691 	ret = madvise((char *)(uintptr_t)vmaddr,
692 	    vmsize1,
693 	    MADV_FREE_REUSABLE);
694 	T_QUIET;
695 	T_EXPECT_POSIX_SUCCESS(ret, "madvise()");
696 	if (T_RESULT == T_RESULT_FAIL) {
697 		goto done;
698 	}
699 
700 	footprint_after = task_footprint();
701 	T_ASSERT_EQ(footprint_after, footprint_before - 2 * vmsize1, NULL);
702 
703 	kr = get_reusable_size(&reusable_after);
704 	if (kr) {
705 		goto done;
706 	}
707 	reusable_expected = 2ULL * vmsize1 + reusable_before;
708 	T_EXPECT_EQ(reusable_after, reusable_expected, "actual=%lld expected %lld",
709 	    reusable_after, reusable_expected);
710 	if (T_RESULT == T_RESULT_FAIL) {
711 		goto done;
712 	}
713 
714 done:
715 	if (vmaddr != 0) {
716 		vm_deallocate(mach_task_self(), vmaddr, vmsize);
717 		vmaddr = 0;
718 	}
719 	if (vmaddr2 != 0) {
720 		vm_deallocate(mach_task_self(), vmaddr2, vmsize);
721 		vmaddr2 = 0;
722 	}
723 }
724 
725 T_DECL(madvise_purgeable_can_reuse, "test madvise purgeable can reuse for \
726     rdar://problem/37476183 Preview Footprint memory regressions ~100MB \
727     [ purgeable_malloc became eligible for reuse ]",
728     T_META_ALL_VALID_ARCHS(true),
729     T_META_TAG_VM_PREFERRED)
730 {
731 #if defined(__x86_64__) || defined(__i386__)
732 	if (COMM_PAGE_READ(uint64_t, CPU_CAPABILITIES64) & kIsTranslated) {
733 		T_SKIP("madvise reusable is not supported under Rosetta translation. Skipping.)");
734 	}
735 #endif /* defined(__x86_64__) || defined(__i386__) */
736 	vm_address_t            vmaddr = 0;
737 	vm_size_t               vmsize;
738 	kern_return_t           kr;
739 	char                    *cp;
740 	int                     ret;
741 
742 	vmsize = 10 * 1024 * 1024; /* 10MB */
743 	kr = vm_allocate(mach_task_self(),
744 	    &vmaddr,
745 	    vmsize,
746 	    (VM_FLAGS_ANYWHERE |
747 	    VM_FLAGS_PURGABLE |
748 	    VM_MAKE_TAG(VM_MEMORY_MALLOC)));
749 	T_QUIET;
750 	T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
751 	if (T_RESULT == T_RESULT_FAIL) {
752 		goto done;
753 	}
754 
755 	for (cp = (char *)(uintptr_t)vmaddr;
756 	    cp < (char *)(uintptr_t)(vmaddr + vmsize);
757 	    cp++) {
758 		*cp = 'x';
759 	}
760 
761 	ret = madvise((char *)(uintptr_t)vmaddr,
762 	    vmsize,
763 	    MADV_CAN_REUSE);
764 	T_QUIET;
765 	T_EXPECT_TRUE(((ret == -1) && (errno == EINVAL)), "madvise(): purgeable vm can't be adviced to reuse");
766 	if (T_RESULT == T_RESULT_FAIL) {
767 		goto done;
768 	}
769 
770 done:
771 	if (vmaddr != 0) {
772 		vm_deallocate(mach_task_self(), vmaddr, vmsize);
773 		vmaddr = 0;
774 	}
775 }
776 
777 static bool
validate_memory_is_zero(vm_address_t start,vm_size_t vmsize,vm_address_t * non_zero_addr)778 validate_memory_is_zero(
779 	vm_address_t            start,
780 	vm_size_t               vmsize,
781 	vm_address_t           *non_zero_addr)
782 {
783 	for (vm_size_t sz = 0; sz < vmsize; sz += sizeof(uint64_t)) {
784 		vm_address_t addr = start + sz;
785 
786 		if (*(uint64_t *)(addr) != 0) {
787 			*non_zero_addr = addr;
788 			return false;
789 		}
790 	}
791 	return true;
792 }
793 
794 T_DECL(madvise_zero, "test madvise zero", T_META_TAG_VM_PREFERRED)
795 {
796 	vm_address_t            vmaddr = 0;
797 	vm_size_t               vmsize = PAGE_SIZE * 3;
798 	vm_address_t            non_zero_addr = 0;
799 	kern_return_t           kr;
800 	int                     ret;
801 	unsigned char           vec;
802 
803 	kr = vm_allocate(mach_task_self(),
804 	    &vmaddr,
805 	    vmsize,
806 	    (VM_FLAGS_ANYWHERE |
807 	    VM_MAKE_TAG(VM_MEMORY_MALLOC)));
808 	T_QUIET;
809 	T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
810 	if (T_RESULT == T_RESULT_FAIL) {
811 		goto done;
812 	}
813 
814 	memset((void *)vmaddr, 'A', vmsize);
815 	ret = madvise((void*)vmaddr, vmsize, MADV_FREE_REUSABLE);
816 	T_QUIET;
817 	T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_FREE_REUSABLE)");
818 	if (T_RESULT == T_RESULT_FAIL) {
819 		goto done;
820 	}
821 
822 	memset((void *)vmaddr, 'B', PAGE_SIZE);
823 	ret = madvise((void*)vmaddr, vmsize, MADV_ZERO);
824 	T_QUIET;
825 	T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO)");
826 	if (T_RESULT == T_RESULT_FAIL) {
827 		goto done;
828 	}
829 
830 	T_QUIET;
831 	T_EXPECT_EQ(validate_memory_is_zero(vmaddr, vmsize, &non_zero_addr), true,
832 	    "madvise(%p, %lu, MADV_ZERO) returned non zero mem at %p",
833 	    (void *)vmaddr, vmsize, (void *)non_zero_addr);
834 	if (T_RESULT == T_RESULT_FAIL) {
835 		goto done;
836 	}
837 
838 	memset((void *)vmaddr, 'C', PAGE_SIZE);
839 	ret = madvise((void*)vmaddr, vmsize, MADV_PAGEOUT);
840 	T_QUIET;
841 	T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_PAGEOUT)");
842 	if (T_RESULT == T_RESULT_FAIL) {
843 		goto done;
844 	}
845 
846 	/* wait for the pages to be (asynchronously) compressed */
847 	T_QUIET; T_LOG("waiting for first page to be paged out");
848 	do {
849 		ret = mincore((void*)vmaddr, 1, (char *)&vec);
850 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mincore(1st)");
851 	} while (vec & MINCORE_INCORE);
852 	T_QUIET; T_LOG("waiting for last page to be paged out");
853 	do {
854 		ret = mincore((void*)(vmaddr + vmsize - 1), 1, (char *)&vec);
855 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mincore(last)");
856 	} while (vec & MINCORE_INCORE);
857 
858 	ret = madvise((void*)vmaddr, vmsize, MADV_ZERO);
859 	T_QUIET;
860 	T_EXPECT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO)");
861 	if (T_RESULT == T_RESULT_FAIL) {
862 		goto done;
863 	}
864 	T_QUIET;
865 	T_EXPECT_EQ(validate_memory_is_zero(vmaddr, vmsize, &non_zero_addr), true,
866 	    "madvise(%p, %lu, MADV_ZERO) returned non zero mem at %p",
867 	    (void *)vmaddr, vmsize, (void *)non_zero_addr);
868 	if (T_RESULT == T_RESULT_FAIL) {
869 		goto done;
870 	}
871 
872 done:
873 	if (vmaddr != 0) {
874 		vm_deallocate(mach_task_self(), vmaddr, vmsize);
875 		vmaddr = 0;
876 	}
877 }
878 
879 T_DECL(madvise_zero_wired, "test madvise(MADV_ZERO_WIRED_PAGES)", T_META_TAG_VM_PREFERRED)
880 {
881 	vm_address_t            vmaddr;
882 	vm_address_t            vmaddr_remap;
883 	vm_size_t               vmsize = PAGE_SIZE * 3;
884 	vm_prot_t               cur_prot, max_prot;
885 	vm_address_t            non_zero_addr = 0;
886 	kern_return_t           kr;
887 	int                     ret;
888 
889 	/*
890 	 * madvise(MADV_ZERO_WIRED_PAGES) should cause wired pages to get zero-filled
891 	 * when they get deallocated.
892 	 */
893 	vmaddr = 0;
894 	kr = vm_allocate(mach_task_self(),
895 	    &vmaddr,
896 	    vmsize,
897 	    (VM_FLAGS_ANYWHERE |
898 	    VM_MAKE_TAG(VM_MEMORY_MALLOC)));
899 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
900 	memset((void *)vmaddr, 'A', vmsize);
901 	T_QUIET; T_ASSERT_EQ(*(char *)vmaddr, 'A', " ");
902 	vmaddr_remap = 0;
903 	kr = vm_remap(mach_task_self(), &vmaddr_remap, vmsize, 0, VM_FLAGS_ANYWHERE,
904 	    mach_task_self(), vmaddr, FALSE, &cur_prot, &max_prot,
905 	    VM_INHERIT_DEFAULT);
906 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
907 	ret = madvise((void*)vmaddr, vmsize, MADV_ZERO_WIRED_PAGES);
908 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO_WIRED_PAGES)");
909 	ret = mlock((void*)vmaddr, vmsize);
910 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mlock()");
911 	T_QUIET; T_ASSERT_EQ(*(char *)vmaddr, 'A', " ");
912 	ret = munmap((void*)vmaddr, vmsize);
913 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "munmap()");
914 	T_ASSERT_EQ(*(char *)vmaddr_remap, 0, "wired pages are zero-filled on unmap");
915 	T_QUIET; T_ASSERT_EQ(validate_memory_is_zero(vmaddr_remap, vmsize, &non_zero_addr),
916 	    true, "madvise(%p, %lu, MADV_ZERO_WIRED) did not zero-fill mem at %p",
917 	    (void *)vmaddr, vmsize, (void *)non_zero_addr);
918 	ret = munmap((void *)vmaddr_remap, vmsize);
919 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "munmap()");
920 
921 	/*
922 	 * madvise(MADV_ZERO_WIRED_PAGES) should fail with EPERM if the
923 	 * mapping is not writable.
924 	 */
925 	vmaddr = 0;
926 	kr = vm_allocate(mach_task_self(),
927 	    &vmaddr,
928 	    vmsize,
929 	    (VM_FLAGS_ANYWHERE |
930 	    VM_MAKE_TAG(VM_MEMORY_MALLOC)));
931 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
932 	memset((void *)vmaddr, 'A', vmsize);
933 	T_QUIET; T_ASSERT_EQ(*(char *)vmaddr, 'A', " ");
934 	ret = mprotect((void*)vmaddr, vmsize, PROT_READ);
935 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mprotect(PROT_READ)");
936 	ret = madvise((void*)vmaddr, vmsize, MADV_ZERO_WIRED_PAGES);
937 	//T_LOG("madv() ret %d errno %d\n", ret, errno);
938 	T_ASSERT_POSIX_FAILURE(ret, EPERM,
939 	    "madvise(MADV_ZERO_WIRED_PAGES) returns EPERM on non-writable mapping ret %d errno %d", ret, errno);
940 	ret = munmap((void*)vmaddr, vmsize);
941 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "munmap()");
942 
943 	/*
944 	 * madvise(MADV_ZERO_WIRED_PAGES) should not zero-fill the pages
945 	 * if the mapping is no longer writable when it gets unwired.
946 	 */
947 	vmaddr = 0;
948 	kr = vm_allocate(mach_task_self(),
949 	    &vmaddr,
950 	    vmsize,
951 	    (VM_FLAGS_ANYWHERE |
952 	    VM_MAKE_TAG(VM_MEMORY_MALLOC)));
953 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
954 	memset((void *)vmaddr, 'A', vmsize);
955 	T_QUIET; T_ASSERT_EQ(*(char *)vmaddr, 'A', " ");
956 	vmaddr_remap = 0;
957 	kr = vm_remap(mach_task_self(), &vmaddr_remap, vmsize, 0, VM_FLAGS_ANYWHERE,
958 	    mach_task_self(), vmaddr, FALSE, &cur_prot, &max_prot,
959 	    VM_INHERIT_DEFAULT);
960 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
961 	ret = madvise((void*)vmaddr, vmsize, MADV_ZERO_WIRED_PAGES);
962 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "madvise(MADV_ZERO_WIRED_PAGES)");
963 	ret = mprotect((void*)vmaddr, vmsize, PROT_READ);
964 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mprotect(PROT_READ)");
965 	ret = mlock((void*)vmaddr, vmsize);
966 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mlock()");
967 	T_QUIET; T_ASSERT_EQ(*(char *)vmaddr, 'A', " ");
968 	ret = munmap((void*)vmaddr, vmsize);
969 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "munmap()");
970 	T_ASSERT_EQ(*(char *)vmaddr_remap, 'A', "RO wired pages NOT zero-filled on unmap");
971 	ret = munmap((void *)vmaddr_remap, vmsize);
972 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "munmap()");
973 }
974 
975 #define DEST_PATTERN 0xFEDCBA98
976 
977 T_DECL(map_read_overwrite, "test overwriting vm map from other map - \
978     rdar://31075370",
979     T_META_ALL_VALID_ARCHS(true),
980     T_META_TAG_VM_PREFERRED)
981 {
982 	kern_return_t           kr;
983 	mach_vm_address_t       vmaddr1, vmaddr2;
984 	mach_vm_size_t          vmsize1, vmsize2;
985 	uint32_t                *ip;
986 	uint32_t                i;
987 
988 	vmaddr1 = 0;
989 	vmsize1 = 4 * 4096;
990 	kr = mach_vm_allocate(mach_task_self(),
991 	    &vmaddr1,
992 	    vmsize1,
993 	    VM_FLAGS_ANYWHERE);
994 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
995 
996 	ip = (uint32_t *)(uintptr_t)vmaddr1;
997 	for (i = 0; (mach_vm_size_t)i < vmsize1 / sizeof(*ip); i++) {
998 		ip[i] = i;
999 	}
1000 
1001 	vmaddr2 = 0;
1002 	kr = mach_vm_allocate(mach_task_self(),
1003 	    &vmaddr2,
1004 	    vmsize1,
1005 	    VM_FLAGS_ANYWHERE);
1006 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
1007 
1008 	ip = (uint32_t *)(uintptr_t)vmaddr2;
1009 	for (i = 0; (mach_vm_size_t)i < vmsize1 / sizeof(*ip); i++) {
1010 		ip[i] = DEST_PATTERN;
1011 	}
1012 
1013 	vmsize2 = vmsize1 - 2 * (sizeof(*ip));
1014 	kr = mach_vm_read_overwrite(mach_task_self(),
1015 	    vmaddr1 + sizeof(*ip),
1016 	    vmsize2,
1017 	    vmaddr2 + sizeof(*ip),
1018 	    &vmsize2);
1019 	T_ASSERT_MACH_SUCCESS(kr, "vm_read_overwrite()");
1020 
1021 	ip = (uint32_t *)(uintptr_t)vmaddr2;
1022 	for (i = 0; i < 1; i++) {
1023 		T_QUIET;
1024 		T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
1025 		    i, ip[i], DEST_PATTERN);
1026 	}
1027 	for (; (mach_vm_size_t)i < (vmsize1 - 2) / sizeof(*ip); i++) {
1028 		T_QUIET;
1029 		T_ASSERT_EQ(ip[i], i, "vmaddr2[%d] = 0x%x instead of 0x%x",
1030 		    i, ip[i], i);
1031 	}
1032 	for (; (mach_vm_size_t)i < vmsize1 / sizeof(*ip); i++) {
1033 		T_QUIET;
1034 		T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
1035 		    i, ip[i], DEST_PATTERN);
1036 	}
1037 }
1038 
1039 T_DECL(copy_none_use_pmap, "test copy-on-write remapping of COPY_NONE vm \
1040     objects - rdar://35610377",
1041     T_META_ALL_VALID_ARCHS(true),
1042     T_META_TAG_VM_PREFERRED)
1043 {
1044 	kern_return_t           kr;
1045 	mach_vm_address_t       vmaddr1, vmaddr2, vmaddr3;
1046 	mach_vm_size_t          vmsize;
1047 	vm_prot_t               curprot, maxprot;
1048 
1049 	vmsize = 32 * 1024 * 1024;
1050 
1051 	vmaddr1 = 0;
1052 	kr = mach_vm_allocate(mach_task_self(),
1053 	    &vmaddr1,
1054 	    vmsize,
1055 	    VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
1056 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
1057 
1058 	memset((void *)(uintptr_t)vmaddr1, 'x', vmsize);
1059 
1060 	vmaddr2 = 0;
1061 	kr = mach_vm_remap(mach_task_self(),
1062 	    &vmaddr2,
1063 	    vmsize,
1064 	    0,                /* mask */
1065 	    VM_FLAGS_ANYWHERE,
1066 	    mach_task_self(),
1067 	    vmaddr1,
1068 	    TRUE,                /* copy */
1069 	    &curprot,
1070 	    &maxprot,
1071 	    VM_INHERIT_DEFAULT);
1072 	T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #1");
1073 
1074 	vmaddr3 = 0;
1075 	kr = mach_vm_remap(mach_task_self(),
1076 	    &vmaddr3,
1077 	    vmsize,
1078 	    0,                /* mask */
1079 	    VM_FLAGS_ANYWHERE,
1080 	    mach_task_self(),
1081 	    vmaddr2,
1082 	    TRUE,                /* copy */
1083 	    &curprot,
1084 	    &maxprot,
1085 	    VM_INHERIT_DEFAULT);
1086 	T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #2");
1087 }
1088 
1089 T_DECL(purgable_deny, "test purgeable memory is not allowed to be converted to \
1090     non-purgeable - rdar://31990033",
1091     T_META_ALL_VALID_ARCHS(true),
1092     T_META_TAG_VM_PREFERRED)
1093 {
1094 	kern_return_t   kr;
1095 	vm_address_t    vmaddr;
1096 	vm_purgable_t   state;
1097 
1098 	vmaddr = 0;
1099 	kr = vm_allocate(mach_task_self(), &vmaddr, 1,
1100 	    VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
1101 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
1102 
1103 	state = VM_PURGABLE_DENY;
1104 	kr = vm_purgable_control(mach_task_self(), vmaddr,
1105 	    VM_PURGABLE_SET_STATE, &state);
1106 	T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1107 	    "vm_purgable_control(VM_PURGABLE_DENY) -> 0x%x (%s)",
1108 	    kr, mach_error_string(kr));
1109 
1110 	kr = vm_deallocate(mach_task_self(), vmaddr, 1);
1111 	T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate()");
1112 }
1113 
1114 #define VMSIZE 0x10000
1115 
1116 T_DECL(vm_remap_zero, "test vm map of zero size - rdar://33114981",
1117     T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1118 {
1119 	kern_return_t           kr;
1120 	mach_vm_address_t       vmaddr1, vmaddr2;
1121 	mach_vm_size_t          vmsize;
1122 	vm_prot_t               curprot, maxprot;
1123 
1124 	vmaddr1 = 0;
1125 	vmsize = VMSIZE;
1126 	kr = mach_vm_allocate(mach_task_self(),
1127 	    &vmaddr1,
1128 	    vmsize,
1129 	    VM_FLAGS_ANYWHERE);
1130 	T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
1131 
1132 	vmaddr2 = 0;
1133 	vmsize = 0;
1134 	kr = mach_vm_remap(mach_task_self(),
1135 	    &vmaddr2,
1136 	    vmsize,
1137 	    0,
1138 	    VM_FLAGS_ANYWHERE,
1139 	    mach_task_self(),
1140 	    vmaddr1,
1141 	    FALSE,
1142 	    &curprot,
1143 	    &maxprot,
1144 	    VM_INHERIT_DEFAULT);
1145 	T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
1146 	    vmsize, kr, mach_error_string(kr));
1147 
1148 	vmaddr2 = 0;
1149 	vmsize = (mach_vm_size_t)-2;
1150 	kr = mach_vm_remap(mach_task_self(),
1151 	    &vmaddr2,
1152 	    vmsize,
1153 	    0,
1154 	    VM_FLAGS_ANYWHERE,
1155 	    mach_task_self(),
1156 	    vmaddr1,
1157 	    FALSE,
1158 	    &curprot,
1159 	    &maxprot,
1160 	    VM_INHERIT_DEFAULT);
1161 	T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
1162 	    vmsize, kr, mach_error_string(kr));
1163 }
1164 
1165 extern int __shared_region_check_np(uint64_t *);
1166 
1167 T_DECL(nested_pmap_trigger, "nested pmap should only be triggered from kernel \
1168     - rdar://problem/41481703",
1169     T_META_ALL_VALID_ARCHS(true),
1170     T_META_TAG_VM_PREFERRED)
1171 {
1172 	int                     ret;
1173 	kern_return_t           kr;
1174 	mach_vm_address_t       sr_start;
1175 	mach_vm_size_t          vmsize;
1176 	mach_vm_address_t       vmaddr;
1177 	mach_port_t             mem_entry;
1178 
1179 	ret = __shared_region_check_np(&sr_start);
1180 	if (ret != 0) {
1181 		int saved_errno;
1182 		saved_errno = errno;
1183 
1184 		T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
1185 		    saved_errno, strerror(saved_errno));
1186 		T_END;
1187 	}
1188 
1189 	vmsize = PAGE_SIZE;
1190 	kr = mach_make_memory_entry_64(mach_task_self(),
1191 	    &vmsize,
1192 	    sr_start,
1193 	    MAP_MEM_VM_SHARE | VM_PROT_READ,
1194 	    &mem_entry,
1195 	    MACH_PORT_NULL);
1196 	T_ASSERT_MACH_SUCCESS(kr, "make_memory_entry(0x%llx)", sr_start);
1197 
1198 	vmaddr = 0;
1199 	kr = mach_vm_map(mach_task_self(),
1200 	    &vmaddr,
1201 	    vmsize,
1202 	    0,
1203 	    VM_FLAGS_ANYWHERE,
1204 	    mem_entry,
1205 	    0,
1206 	    FALSE,
1207 	    VM_PROT_READ,
1208 	    VM_PROT_READ,
1209 	    VM_INHERIT_DEFAULT);
1210 	T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1211 }
1212 
1213 static const char *prot_str[] = { "---", "r--", "-w-", "rw-", "--x", "r-x", "-wx", "rwx" };
1214 static const char *share_mode_str[] = { "---", "COW", "PRIVATE", "EMPTY", "SHARED", "TRUESHARED", "PRIVATE_ALIASED", "SHARED_ALIASED", "LARGE_PAGE" };
1215 
1216 T_DECL(shared_region_share_writable, "sharing a writable mapping of the shared region shoudl not give write access to shared region - rdar://problem/74469953",
1217     T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1218 {
1219 	int ret;
1220 	uint64_t sr_start;
1221 	kern_return_t kr;
1222 	mach_vm_address_t address, tmp_address, remap_address;
1223 	mach_vm_size_t size, tmp_size, remap_size;
1224 	uint32_t depth;
1225 	mach_msg_type_number_t count;
1226 	vm_region_submap_info_data_64_t info;
1227 	vm_prot_t cur_prot, max_prot;
1228 	uint32_t before, after, remap;
1229 	mach_port_t mem_entry;
1230 
1231 	ret = __shared_region_check_np(&sr_start);
1232 	if (ret != 0) {
1233 		int saved_errno;
1234 		saved_errno = errno;
1235 
1236 		T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
1237 		    saved_errno, strerror(saved_errno));
1238 		T_END;
1239 	}
1240 	T_LOG("SHARED_REGION_BASE 0x%llx", SHARED_REGION_BASE);
1241 	T_LOG("SHARED_REGION_SIZE 0x%llx", SHARED_REGION_SIZE);
1242 	T_LOG("shared region starts at 0x%llx", sr_start);
1243 	T_QUIET; T_ASSERT_GE(sr_start, SHARED_REGION_BASE,
1244 	    "shared region starts below BASE");
1245 	T_QUIET; T_ASSERT_LT(sr_start, SHARED_REGION_BASE + SHARED_REGION_SIZE,
1246 	    "shared region starts above BASE+SIZE");
1247 
1248 	/*
1249 	 * Step 1 - check that one can not get write access to a read-only
1250 	 * mapping in the shared region.
1251 	 */
1252 	size = 0;
1253 	for (address = SHARED_REGION_BASE;
1254 	    address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1255 	    address += size) {
1256 		size = 0;
1257 		depth = 99;
1258 		count = VM_REGION_SUBMAP_INFO_COUNT_64;
1259 		kr = mach_vm_region_recurse(mach_task_self(),
1260 		    &address,
1261 		    &size,
1262 		    &depth,
1263 		    (vm_region_recurse_info_t)&info,
1264 		    &count);
1265 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1266 		if (kr == KERN_INVALID_ADDRESS) {
1267 			T_SKIP("could not find read-only nested mapping");
1268 			T_END;
1269 		}
1270 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1271 		T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1272 		    address, address + size, depth,
1273 		    prot_str[info.protection],
1274 		    prot_str[info.max_protection],
1275 		    share_mode_str[info.share_mode],
1276 		    info.object_id);
1277 		if (depth > 0 &&
1278 		    (info.protection == VM_PROT_READ) &&
1279 		    (info.max_protection == VM_PROT_READ)) {
1280 			/* nested and read-only: bingo! */
1281 			break;
1282 		}
1283 	}
1284 	if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1285 		T_SKIP("could not find read-only nested mapping");
1286 		T_END;
1287 	}
1288 
1289 	/* test vm_remap() of RO */
1290 	before = *(uint32_t *)(uintptr_t)address;
1291 	remap_address = 0;
1292 	remap_size = size;
1293 	kr = mach_vm_remap(mach_task_self(),
1294 	    &remap_address,
1295 	    remap_size,
1296 	    0,
1297 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1298 	    mach_task_self(),
1299 	    address,
1300 	    FALSE,
1301 	    &cur_prot,
1302 	    &max_prot,
1303 	    VM_INHERIT_DEFAULT);
1304 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1305 //	T_QUIET; T_ASSERT_EQ(cur_prot, VM_PROT_READ, "cur_prot is read-only");
1306 //	T_QUIET; T_ASSERT_EQ(max_prot, VM_PROT_READ, "max_prot is read-only");
1307 	/* check that region is still nested */
1308 	tmp_address = address;
1309 	tmp_size = 0;
1310 	depth = 99;
1311 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1312 	kr = mach_vm_region_recurse(mach_task_self(),
1313 	    &tmp_address,
1314 	    &tmp_size,
1315 	    &depth,
1316 	    (vm_region_recurse_info_t)&info,
1317 	    &count);
1318 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1319 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1320 	    tmp_address, tmp_address + tmp_size, depth,
1321 	    prot_str[info.protection],
1322 	    prot_str[info.max_protection],
1323 	    share_mode_str[info.share_mode],
1324 	    info.object_id);
1325 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1326 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1327 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1328 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1329 //	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1330 	/* check that new mapping is read-only */
1331 	tmp_address = remap_address;
1332 	tmp_size = 0;
1333 	depth = 99;
1334 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1335 	kr = mach_vm_region_recurse(mach_task_self(),
1336 	    &tmp_address,
1337 	    &tmp_size,
1338 	    &depth,
1339 	    (vm_region_recurse_info_t)&info,
1340 	    &count);
1341 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1342 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1343 	    tmp_address, tmp_address + tmp_size, depth,
1344 	    prot_str[info.protection],
1345 	    prot_str[info.max_protection],
1346 	    share_mode_str[info.share_mode],
1347 	    info.object_id);
1348 	T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1349 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1350 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1351 //	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1352 	remap = *(uint32_t *)(uintptr_t)remap_address;
1353 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1354 // this would crash if actually read-only:
1355 //	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1356 	after = *(uint32_t *)(uintptr_t)address;
1357 	T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1358 //	*(uint32_t *)(uintptr_t)remap_address = before;
1359 	if (before != after) {
1360 		T_FAIL("vm_remap() bypassed copy-on-write");
1361 	} else {
1362 		T_PASS("vm_remap() did not bypass copy-on-write");
1363 	}
1364 	/* cleanup */
1365 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1366 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1367 	T_PASS("vm_remap() read-only");
1368 
1369 #if defined(VM_MEMORY_ROSETTA)
1370 	if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1371 		T_PASS("vm_remap_new() is not present");
1372 		goto skip_vm_remap_new_ro;
1373 	}
1374 	/* test vm_remap_new() of RO */
1375 	before = *(uint32_t *)(uintptr_t)address;
1376 	remap_address = 0;
1377 	remap_size = size;
1378 	cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1379 	max_prot = VM_PROT_READ | VM_PROT_WRITE;
1380 	kr = mach_vm_remap_new(mach_task_self(),
1381 	    &remap_address,
1382 	    remap_size,
1383 	    0,
1384 	    VM_FLAGS_ANYWHERE,
1385 	    mach_task_self(),
1386 	    address,
1387 	    FALSE,
1388 	    &cur_prot,
1389 	    &max_prot,
1390 	    VM_INHERIT_DEFAULT);
1391 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1392 	if (kr == KERN_PROTECTION_FAILURE) {
1393 		/* wrong but not a security issue... */
1394 		goto skip_vm_remap_new_ro;
1395 	}
1396 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1397 	remap = *(uint32_t *)(uintptr_t)remap_address;
1398 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1399 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1400 	after = *(uint32_t *)(uintptr_t)address;
1401 	T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1402 	*(uint32_t *)(uintptr_t)remap_address = before;
1403 	if (before != after) {
1404 		T_FAIL("vm_remap_new() bypassed copy-on-write");
1405 	} else {
1406 		T_PASS("vm_remap_new() did not bypass copy-on-write");
1407 	}
1408 	/* check that region is still nested */
1409 	tmp_address = address;
1410 	tmp_size = 0;
1411 	depth = 99;
1412 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1413 	kr = mach_vm_region_recurse(mach_task_self(),
1414 	    &tmp_address,
1415 	    &tmp_size,
1416 	    &depth,
1417 	    (vm_region_recurse_info_t)&info,
1418 	    &count);
1419 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1420 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1421 	    tmp_address, tmp_address + tmp_size, depth,
1422 	    prot_str[info.protection],
1423 	    prot_str[info.max_protection],
1424 	    share_mode_str[info.share_mode],
1425 	    info.object_id);
1426 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1427 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1428 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1429 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1430 	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1431 	T_PASS("vm_remap_new() read-only");
1432 skip_vm_remap_new_ro:
1433 #else /* defined(VM_MEMORY_ROSETTA) */
1434 	/* pre-BigSur SDK: no vm_remap_new() */
1435 	T_LOG("No vm_remap_new() to test");
1436 #endif /* defined(VM_MEMORY_ROSETTA) */
1437 
1438 	/* test mach_make_memory_entry_64(VM_SHARE) of RO */
1439 	before = *(uint32_t *)(uintptr_t)address;
1440 	remap_size = size;
1441 	mem_entry = MACH_PORT_NULL;
1442 	kr = mach_make_memory_entry_64(mach_task_self(),
1443 	    &remap_size,
1444 	    address,
1445 	    MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1446 	    &mem_entry,
1447 	    MACH_PORT_NULL);
1448 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1449 	if (kr == KERN_PROTECTION_FAILURE) {
1450 		/* wrong but not a security issue... */
1451 		goto skip_mem_entry_vm_share_ro;
1452 	}
1453 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1454 	remap_address = 0;
1455 	kr = mach_vm_map(mach_task_self(),
1456 	    &remap_address,
1457 	    remap_size,
1458 	    0,              /* mask */
1459 	    VM_FLAGS_ANYWHERE,
1460 	    mem_entry,
1461 	    0,              /* offset */
1462 	    FALSE,              /* copy */
1463 	    VM_PROT_READ | VM_PROT_WRITE,
1464 	    VM_PROT_READ | VM_PROT_WRITE,
1465 	    VM_INHERIT_DEFAULT);
1466 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1467 	remap = *(uint32_t *)(uintptr_t)remap_address;
1468 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1469 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1470 	after = *(uint32_t *)(uintptr_t)address;
1471 	T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1472 	*(uint32_t *)(uintptr_t)remap_address = before;
1473 	if (before != after) {
1474 		T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1475 	} else {
1476 		T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1477 	}
1478 	/* check that region is still nested */
1479 	tmp_address = address;
1480 	tmp_size = 0;
1481 	depth = 99;
1482 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1483 	kr = mach_vm_region_recurse(mach_task_self(),
1484 	    &tmp_address,
1485 	    &tmp_size,
1486 	    &depth,
1487 	    (vm_region_recurse_info_t)&info,
1488 	    &count);
1489 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1490 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1491 	    tmp_address, tmp_address + tmp_size, depth,
1492 	    prot_str[info.protection],
1493 	    prot_str[info.max_protection],
1494 	    share_mode_str[info.share_mode],
1495 	    info.object_id);
1496 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1497 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1498 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1499 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1500 	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1501 	/* check that new mapping is a copy */
1502 	tmp_address = remap_address;
1503 	tmp_size = 0;
1504 	depth = 99;
1505 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1506 	kr = mach_vm_region_recurse(mach_task_self(),
1507 	    &tmp_address,
1508 	    &tmp_size,
1509 	    &depth,
1510 	    (vm_region_recurse_info_t)&info,
1511 	    &count);
1512 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1513 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1514 	    tmp_address, tmp_address + tmp_size, depth,
1515 	    prot_str[info.protection],
1516 	    prot_str[info.max_protection],
1517 	    share_mode_str[info.share_mode],
1518 	    info.object_id);
1519 	T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1520 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1521 	T_QUIET; T_ASSERT_EQ(depth, 0, "new mapping is unnested");
1522 //	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1523 //	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1524 	/* cleanup */
1525 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1526 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1527 	T_PASS("mem_entry(VM_SHARE) read-only");
1528 skip_mem_entry_vm_share_ro:
1529 
1530 	/* test mach_make_memory_entry_64() of RO */
1531 	before = *(uint32_t *)(uintptr_t)address;
1532 	remap_size = size;
1533 	mem_entry = MACH_PORT_NULL;
1534 	kr = mach_make_memory_entry_64(mach_task_self(),
1535 	    &remap_size,
1536 	    address,
1537 	    VM_PROT_READ | VM_PROT_WRITE,
1538 	    &mem_entry,
1539 	    MACH_PORT_NULL);
1540 	T_QUIET; T_ASSERT_EQ(kr, KERN_PROTECTION_FAILURE, "mach_make_memory_entry_64()");
1541 	/* check that region is still nested */
1542 	tmp_address = address;
1543 	tmp_size = 0;
1544 	depth = 99;
1545 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1546 	kr = mach_vm_region_recurse(mach_task_self(),
1547 	    &tmp_address,
1548 	    &tmp_size,
1549 	    &depth,
1550 	    (vm_region_recurse_info_t)&info,
1551 	    &count);
1552 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1553 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1554 	    tmp_address, tmp_address + tmp_size, depth,
1555 	    prot_str[info.protection],
1556 	    prot_str[info.max_protection],
1557 	    share_mode_str[info.share_mode],
1558 	    info.object_id);
1559 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1560 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1561 //	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1562 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1563 	if (depth > 0) {
1564 		T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1565 	}
1566 	T_PASS("mem_entry() read-only");
1567 
1568 	/* test mach_make_memory_entry_64(READ | WRITE | VM_PROT_IS_MASK) of RO */
1569 	before = *(uint32_t *)(uintptr_t)address;
1570 	remap_size = size;
1571 	mem_entry = MACH_PORT_NULL;
1572 	kr = mach_make_memory_entry_64(mach_task_self(),
1573 	    &remap_size,
1574 	    address,
1575 	    VM_PROT_READ | VM_PROT_WRITE | VM_PROT_IS_MASK,
1576 	    &mem_entry,
1577 	    MACH_PORT_NULL);
1578 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(READ | WRITE | IS_MASK)");
1579 	remap_address = 0;
1580 	kr = mach_vm_map(mach_task_self(),
1581 	    &remap_address,
1582 	    remap_size,
1583 	    0,              /* mask */
1584 	    VM_FLAGS_ANYWHERE,
1585 	    mem_entry,
1586 	    0,              /* offset */
1587 	    FALSE,              /* copy */
1588 	    VM_PROT_READ | VM_PROT_WRITE,
1589 	    VM_PROT_READ | VM_PROT_WRITE,
1590 	    VM_INHERIT_DEFAULT);
1591 	T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_RIGHT, "vm_map(read/write)");
1592 	remap_address = 0;
1593 	kr = mach_vm_map(mach_task_self(),
1594 	    &remap_address,
1595 	    remap_size,
1596 	    0,              /* mask */
1597 	    VM_FLAGS_ANYWHERE,
1598 	    mem_entry,
1599 	    0,              /* offset */
1600 	    FALSE,              /* copy */
1601 	    VM_PROT_READ,
1602 	    VM_PROT_READ,
1603 	    VM_INHERIT_DEFAULT);
1604 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map(read only)");
1605 	remap = *(uint32_t *)(uintptr_t)remap_address;
1606 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1607 	/* check that region is still nested */
1608 	tmp_address = address;
1609 	tmp_size = 0;
1610 	depth = 99;
1611 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1612 	kr = mach_vm_region_recurse(mach_task_self(),
1613 	    &tmp_address,
1614 	    &tmp_size,
1615 	    &depth,
1616 	    (vm_region_recurse_info_t)&info,
1617 	    &count);
1618 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1619 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1620 	    tmp_address, tmp_address + tmp_size, depth,
1621 	    prot_str[info.protection],
1622 	    prot_str[info.max_protection],
1623 	    share_mode_str[info.share_mode],
1624 	    info.object_id);
1625 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1626 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1627 //	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1628 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "cur_prot still read-only");
1629 	if (depth > 0) {
1630 		T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "max_prot still read-only");
1631 	}
1632 	/* check that new mapping is a copy */
1633 	tmp_address = remap_address;
1634 	tmp_size = 0;
1635 	depth = 99;
1636 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1637 	kr = mach_vm_region_recurse(mach_task_self(),
1638 	    &tmp_address,
1639 	    &tmp_size,
1640 	    &depth,
1641 	    (vm_region_recurse_info_t)&info,
1642 	    &count);
1643 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1644 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1645 	    tmp_address, tmp_address + tmp_size, depth,
1646 	    prot_str[info.protection],
1647 	    prot_str[info.max_protection],
1648 	    share_mode_str[info.share_mode],
1649 	    info.object_id);
1650 	T_QUIET; T_ASSERT_EQ(tmp_address, remap_address, "address hasn't changed");
1651 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1652 	T_QUIET; T_ASSERT_EQ(depth, 0, "new mapping is unnested");
1653 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_READ, "new cur_prot read-only");
1654 	T_QUIET; T_ASSERT_EQ(info.max_protection, VM_PROT_READ, "new max_prot read-only");
1655 	/* cleanup */
1656 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1657 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1658 	T_PASS("mem_entry(READ | WRITE | IS_MASK) read-only");
1659 
1660 
1661 	/*
1662 	 * Step 2 - check that one can not share write access with a writable
1663 	 * mapping in the shared region.
1664 	 */
1665 	size = 0;
1666 	for (address = SHARED_REGION_BASE;
1667 	    address < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1668 	    address += size) {
1669 		size = 0;
1670 		depth = 99;
1671 		count = VM_REGION_SUBMAP_INFO_COUNT_64;
1672 		kr = mach_vm_region_recurse(mach_task_self(),
1673 		    &address,
1674 		    &size,
1675 		    &depth,
1676 		    (vm_region_recurse_info_t)&info,
1677 		    &count);
1678 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_region_recurse()");
1679 		if (kr == KERN_INVALID_ADDRESS) {
1680 			T_SKIP("could not find writable nested mapping");
1681 			T_END;
1682 		}
1683 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1684 		T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1685 		    address, address + size, depth,
1686 		    prot_str[info.protection],
1687 		    prot_str[info.max_protection],
1688 		    share_mode_str[info.share_mode],
1689 		    info.object_id);
1690 		if (depth > 0 && (info.protection & VM_PROT_WRITE)) {
1691 			/* nested and writable: bingo! */
1692 			break;
1693 		}
1694 	}
1695 	if (address >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
1696 		T_SKIP("could not find writable nested mapping");
1697 		T_END;
1698 	}
1699 
1700 	/* test vm_remap() of RW */
1701 	before = *(uint32_t *)(uintptr_t)address;
1702 	remap_address = 0;
1703 	remap_size = size;
1704 	kr = mach_vm_remap(mach_task_self(),
1705 	    &remap_address,
1706 	    remap_size,
1707 	    0,
1708 	    VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR,
1709 	    mach_task_self(),
1710 	    address,
1711 	    FALSE,
1712 	    &cur_prot,
1713 	    &max_prot,
1714 	    VM_INHERIT_DEFAULT);
1715 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap()");
1716 	if (!(cur_prot & VM_PROT_WRITE)) {
1717 		T_LOG("vm_remap(): 0x%llx not writable %s/%s",
1718 		    remap_address, prot_str[cur_prot], prot_str[max_prot]);
1719 		T_ASSERT_FAIL("vm_remap() remapping not writable");
1720 	}
1721 	remap = *(uint32_t *)(uintptr_t)remap_address;
1722 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1723 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1724 	after = *(uint32_t *)(uintptr_t)address;
1725 	T_LOG("vm_remap(): 0x%llx 0x%x -> 0x%x", address, before, after);
1726 	*(uint32_t *)(uintptr_t)remap_address = before;
1727 	if (before != after) {
1728 		T_FAIL("vm_remap() bypassed copy-on-write");
1729 	} else {
1730 		T_PASS("vm_remap() did not bypass copy-on-write");
1731 	}
1732 	/* check that region is still nested */
1733 	tmp_address = address;
1734 	tmp_size = 0;
1735 	depth = 99;
1736 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1737 	kr = mach_vm_region_recurse(mach_task_self(),
1738 	    &tmp_address,
1739 	    &tmp_size,
1740 	    &depth,
1741 	    (vm_region_recurse_info_t)&info,
1742 	    &count);
1743 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1744 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1745 	    tmp_address, tmp_address + tmp_size, depth,
1746 	    prot_str[info.protection],
1747 	    prot_str[info.max_protection],
1748 	    share_mode_str[info.share_mode],
1749 	    info.object_id);
1750 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1751 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1752 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1753 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1754 	T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1755 	/* cleanup */
1756 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1757 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1758 
1759 #if defined(VM_MEMORY_ROSETTA)
1760 	if (dlsym(RTLD_DEFAULT, "mach_vm_remap_new") == NULL) {
1761 		T_PASS("vm_remap_new() is not present");
1762 		goto skip_vm_remap_new_rw;
1763 	}
1764 	/* test vm_remap_new() of RW */
1765 	before = *(uint32_t *)(uintptr_t)address;
1766 	remap_address = 0;
1767 	remap_size = size;
1768 	cur_prot = VM_PROT_READ | VM_PROT_WRITE;
1769 	max_prot = VM_PROT_READ | VM_PROT_WRITE;
1770 	kr = mach_vm_remap_new(mach_task_self(),
1771 	    &remap_address,
1772 	    remap_size,
1773 	    0,
1774 	    VM_FLAGS_ANYWHERE,
1775 	    mach_task_self(),
1776 	    address,
1777 	    FALSE,
1778 	    &cur_prot,
1779 	    &max_prot,
1780 	    VM_INHERIT_DEFAULT);
1781 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_remap_new()");
1782 	if (kr == KERN_PROTECTION_FAILURE) {
1783 		/* wrong but not a security issue... */
1784 		goto skip_vm_remap_new_rw;
1785 	}
1786 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_remap_new()");
1787 	if (!(cur_prot & VM_PROT_WRITE)) {
1788 		T_LOG("vm_remap_new(): 0x%llx not writable %s/%s",
1789 		    remap_address, prot_str[cur_prot], prot_str[max_prot]);
1790 		T_ASSERT_FAIL("vm_remap_new() remapping not writable");
1791 	}
1792 	remap = *(uint32_t *)(uintptr_t)remap_address;
1793 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1794 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1795 	after = *(uint32_t *)(uintptr_t)address;
1796 	T_LOG("vm_remap_new(): 0x%llx 0x%x -> 0x%x", address, before, after);
1797 	*(uint32_t *)(uintptr_t)remap_address = before;
1798 	if (before != after) {
1799 		T_FAIL("vm_remap_new() bypassed copy-on-write");
1800 	} else {
1801 		T_PASS("vm_remap_new() did not bypass copy-on-write");
1802 	}
1803 	/* check that region is still nested */
1804 	tmp_address = address;
1805 	tmp_size = 0;
1806 	depth = 99;
1807 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1808 	kr = mach_vm_region_recurse(mach_task_self(),
1809 	    &tmp_address,
1810 	    &tmp_size,
1811 	    &depth,
1812 	    (vm_region_recurse_info_t)&info,
1813 	    &count);
1814 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1815 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1816 	    tmp_address, tmp_address + tmp_size, depth,
1817 	    prot_str[info.protection],
1818 	    prot_str[info.max_protection],
1819 	    share_mode_str[info.share_mode],
1820 	    info.object_id);
1821 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1822 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1823 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1824 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1825 	T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1826 	/* cleanup */
1827 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1828 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1829 skip_vm_remap_new_rw:
1830 #else /* defined(VM_MEMORY_ROSETTA) */
1831 	/* pre-BigSur SDK: no vm_remap_new() */
1832 	T_LOG("No vm_remap_new() to test");
1833 #endif /* defined(VM_MEMORY_ROSETTA) */
1834 
1835 	/* test mach_make_memory_entry_64(VM_SHARE) of RW */
1836 	before = *(uint32_t *)(uintptr_t)address;
1837 	remap_size = size;
1838 	mem_entry = MACH_PORT_NULL;
1839 	kr = mach_make_memory_entry_64(mach_task_self(),
1840 	    &remap_size,
1841 	    address,
1842 	    MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
1843 	    &mem_entry,
1844 	    MACH_PORT_NULL);
1845 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1846 	if (kr == KERN_PROTECTION_FAILURE) {
1847 		/* wrong but not a security issue... */
1848 		goto skip_mem_entry_vm_share_rw;
1849 	}
1850 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(VM_SHARE)");
1851 	T_QUIET; T_ASSERT_EQ(remap_size, size, "mem_entry(VM_SHARE) should cover whole mapping");
1852 //	T_LOG("AFTER MAKE_MEM_ENTRY(VM_SHARE) 0x%llx...", address); fflush(stdout); fflush(stderr); getchar();
1853 	remap_address = 0;
1854 	kr = mach_vm_map(mach_task_self(),
1855 	    &remap_address,
1856 	    remap_size,
1857 	    0,              /* mask */
1858 	    VM_FLAGS_ANYWHERE,
1859 	    mem_entry,
1860 	    0,              /* offset */
1861 	    FALSE,              /* copy */
1862 	    VM_PROT_READ | VM_PROT_WRITE,
1863 	    VM_PROT_READ | VM_PROT_WRITE,
1864 	    VM_INHERIT_DEFAULT);
1865 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1866 	remap = *(uint32_t *)(uintptr_t)remap_address;
1867 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1868 //	T_LOG("AFTER VM_MAP 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1869 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1870 //	T_LOG("AFTER WRITE 0x%llx...", remap_address); fflush(stdout); fflush(stderr); getchar();
1871 	after = *(uint32_t *)(uintptr_t)address;
1872 	T_LOG("mem_entry(VM_SHARE): 0x%llx 0x%x -> 0x%x", address, before, after);
1873 	*(uint32_t *)(uintptr_t)remap_address = before;
1874 	if (before != after) {
1875 		T_FAIL("mem_entry(VM_SHARE) bypassed copy-on-write");
1876 	} else {
1877 		T_PASS("mem_entry(VM_SHARE) did not bypass copy-on-write");
1878 	}
1879 	/* check that region is still nested */
1880 	tmp_address = address;
1881 	tmp_size = 0;
1882 	depth = 99;
1883 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1884 	kr = mach_vm_region_recurse(mach_task_self(),
1885 	    &tmp_address,
1886 	    &tmp_size,
1887 	    &depth,
1888 	    (vm_region_recurse_info_t)&info,
1889 	    &count);
1890 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1891 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1892 	    tmp_address, tmp_address + tmp_size, depth,
1893 	    prot_str[info.protection],
1894 	    prot_str[info.max_protection],
1895 	    share_mode_str[info.share_mode],
1896 	    info.object_id);
1897 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1898 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1899 	T_QUIET; T_ASSERT_GT(depth, 0, "still nested");
1900 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1901 	T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1902 	/* cleanup */
1903 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1904 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1905 	mach_port_deallocate(mach_task_self(), mem_entry);
1906 skip_mem_entry_vm_share_rw:
1907 
1908 	/* test mach_make_memory_entry_64() of RW */
1909 	before = *(uint32_t *)(uintptr_t)address;
1910 	remap_size = size;
1911 	mem_entry = MACH_PORT_NULL;
1912 	kr = mach_make_memory_entry_64(mach_task_self(),
1913 	    &remap_size,
1914 	    address,
1915 	    VM_PROT_READ | VM_PROT_WRITE,
1916 	    &mem_entry,
1917 	    MACH_PORT_NULL);
1918 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64()");
1919 	remap_address = 0;
1920 	kr = mach_vm_map(mach_task_self(),
1921 	    &remap_address,
1922 	    remap_size,
1923 	    0,              /* mask */
1924 	    VM_FLAGS_ANYWHERE,
1925 	    mem_entry,
1926 	    0,              /* offset */
1927 	    FALSE,              /* copy */
1928 	    VM_PROT_READ | VM_PROT_WRITE,
1929 	    VM_PROT_READ | VM_PROT_WRITE,
1930 	    VM_INHERIT_DEFAULT);
1931 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
1932 	remap = *(uint32_t *)(uintptr_t)remap_address;
1933 	T_QUIET; T_ASSERT_EQ(remap, before, "remap matches original");
1934 	*(uint32_t *)(uintptr_t)remap_address = before + 1;
1935 	after = *(uint32_t *)(uintptr_t)address;
1936 	T_LOG("mem_entry(): 0x%llx 0x%x -> 0x%x", address, before, after);
1937 	*(uint32_t *)(uintptr_t)remap_address = before;
1938 	/* check that region is no longer nested */
1939 	tmp_address = address;
1940 	tmp_size = 0;
1941 	depth = 99;
1942 	count = VM_REGION_SUBMAP_INFO_COUNT_64;
1943 	kr = mach_vm_region_recurse(mach_task_self(),
1944 	    &tmp_address,
1945 	    &tmp_size,
1946 	    &depth,
1947 	    (vm_region_recurse_info_t)&info,
1948 	    &count);
1949 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse()");
1950 	T_LOG("0x%llx - 0x%llx depth:%d %s/%s %s 0x%x",
1951 	    tmp_address, tmp_address + tmp_size, depth,
1952 	    prot_str[info.protection],
1953 	    prot_str[info.max_protection],
1954 	    share_mode_str[info.share_mode],
1955 	    info.object_id);
1956 	if (before != after) {
1957 		if (depth == 0) {
1958 			T_PASS("mem_entry() honored copy-on-write");
1959 		} else {
1960 			T_FAIL("mem_entry() did not trigger copy-on_write");
1961 		}
1962 	} else {
1963 		T_FAIL("mem_entry() did not honor copy-on-write");
1964 	}
1965 	T_QUIET; T_ASSERT_EQ(tmp_address, address, "address hasn't changed");
1966 //	T_QUIET; T_ASSERT_EQ(tmp_size, size, "size hasn't changed");
1967 	T_QUIET; T_ASSERT_EQ(depth, 0, "no longer nested");
1968 	T_QUIET; T_ASSERT_EQ(info.protection, VM_PROT_DEFAULT, "cur_prot still writable");
1969 	T_QUIET; T_ASSERT_EQ((info.max_protection & VM_PROT_WRITE), VM_PROT_WRITE, "max_prot still writable");
1970 	/* cleanup */
1971 	kr = mach_vm_deallocate(mach_task_self(), remap_address, remap_size);
1972 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "vm_deallocate()");
1973 	mach_port_deallocate(mach_task_self(), mem_entry);
1974 }
1975 
1976 T_DECL(copyoverwrite_submap_protection, "test copywrite vm region submap \
1977     protection", T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
1978 {
1979 	kern_return_t           kr;
1980 	mach_vm_address_t       vmaddr;
1981 	mach_vm_size_t          vmsize;
1982 	natural_t               depth;
1983 	vm_region_submap_short_info_data_64_t region_info;
1984 	mach_msg_type_number_t  region_info_count;
1985 
1986 	for (vmaddr = SHARED_REGION_BASE;
1987 	    vmaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE;
1988 	    vmaddr += vmsize) {
1989 		depth = 99;
1990 		region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1991 		kr = mach_vm_region_recurse(mach_task_self(),
1992 		    &vmaddr,
1993 		    &vmsize,
1994 		    &depth,
1995 		    (vm_region_info_t) &region_info,
1996 		    &region_info_count);
1997 		if (kr == KERN_INVALID_ADDRESS) {
1998 			break;
1999 		}
2000 		T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse(0x%llx)", vmaddr);
2001 		T_ASSERT_EQ(region_info_count,
2002 		    VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
2003 		    "vm_region_recurse(0x%llx) count = %d expected %d",
2004 		    vmaddr, region_info_count,
2005 		    VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
2006 
2007 		T_LOG("--> region: vmaddr 0x%llx depth %d prot 0x%x/0x%x",
2008 		    vmaddr, depth, region_info.protection,
2009 		    region_info.max_protection);
2010 		if (depth == 0) {
2011 			/* not a submap mapping: next mapping */
2012 			continue;
2013 		}
2014 		if (vmaddr >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
2015 			break;
2016 		}
2017 		kr = mach_vm_copy(mach_task_self(),
2018 		    vmaddr,
2019 		    vmsize,
2020 		    vmaddr);
2021 		if (kr == KERN_PROTECTION_FAILURE ||
2022 		    kr == KERN_INVALID_ADDRESS) {
2023 			T_PASS("vm_copy(0x%llx,0x%llx) expected prot error 0x%x (%s)",
2024 			    vmaddr, vmsize, kr, mach_error_string(kr));
2025 			continue;
2026 		}
2027 		T_ASSERT_MACH_SUCCESS(kr, "vm_copy(0x%llx,0x%llx) prot 0x%x",
2028 		    vmaddr, vmsize, region_info.protection);
2029 		depth = 0;
2030 		region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
2031 		kr = mach_vm_region_recurse(mach_task_self(),
2032 		    &vmaddr,
2033 		    &vmsize,
2034 		    &depth,
2035 		    (vm_region_info_t) &region_info,
2036 		    &region_info_count);
2037 		T_ASSERT_MACH_SUCCESS(kr, "m_region_recurse(0x%llx)", vmaddr);
2038 		T_ASSERT_EQ(region_info_count,
2039 		    VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
2040 		    "vm_region_recurse() count = %d expected %d",
2041 		    region_info_count, VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
2042 
2043 		T_ASSERT_EQ(depth, 0, "vm_region_recurse(0x%llx): depth = %d expected 0",
2044 		    vmaddr, depth);
2045 		T_ASSERT_EQ((region_info.protection & VM_PROT_EXECUTE),
2046 		    0, "vm_region_recurse(0x%llx): prot 0x%x",
2047 		    vmaddr, region_info.protection);
2048 	}
2049 }
2050 
2051 T_DECL(wire_text, "test wired text for rdar://problem/16783546 Wiring code in \
2052     the shared region triggers code-signing violations",
2053     T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
2054 {
2055 	uint32_t *addr, before, after;
2056 	int retval;
2057 	int saved_errno;
2058 	kern_return_t kr;
2059 	vm_address_t map_addr, remap_addr;
2060 	vm_prot_t curprot, maxprot;
2061 
2062 	addr = (uint32_t *)&printf;
2063 #if __has_feature(ptrauth_calls)
2064 	map_addr = (vm_address_t)(uintptr_t)ptrauth_strip(addr, ptrauth_key_function_pointer);
2065 #else /* __has_feature(ptrauth_calls) */
2066 	map_addr = (vm_address_t)(uintptr_t)addr;
2067 #endif /* __has_feature(ptrauth_calls) */
2068 	remap_addr = 0;
2069 	kr = vm_remap(mach_task_self(), &remap_addr, 4096,
2070 	    0,           /* mask */
2071 	    VM_FLAGS_ANYWHERE,
2072 	    mach_task_self(), map_addr,
2073 	    FALSE,           /* copy */
2074 	    &curprot, &maxprot,
2075 	    VM_INHERIT_DEFAULT);
2076 	T_ASSERT_EQ(kr, KERN_SUCCESS, "vm_remap error 0x%x (%s)",
2077 	    kr, mach_error_string(kr));
2078 	before = *addr;
2079 	retval = mlock(addr, 4096);
2080 	after = *addr;
2081 	if (retval != 0) {
2082 		saved_errno = errno;
2083 		T_ASSERT_EQ(saved_errno, EPERM, "wire shared text error %d (%s), expected: %d",
2084 		    saved_errno, strerror(saved_errno), EPERM);
2085 	} else if (after != before) {
2086 		T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", (void *)addr, before, after);
2087 	} else {
2088 		T_PASS("wire shared text");
2089 	}
2090 
2091 	addr = (uint32_t *) &fprintf;
2092 	before = *addr;
2093 	retval = mlock(addr, 4096);
2094 	after = *addr;
2095 	if (retval != 0) {
2096 		saved_errno = errno;
2097 		T_ASSERT_EQ(saved_errno, EPERM, "wire shared text error %d (%s), expected: %d",
2098 		    saved_errno, strerror(saved_errno), EPERM);
2099 	} else if (after != before) {
2100 		T_ASSERT_FAIL("shared text changed by wiring at %p 0x%x -> 0x%x", (void *)addr, before, after);
2101 	} else {
2102 		T_PASS("wire shared text");
2103 	}
2104 
2105 	addr = (uint32_t *) &testmain_wire_text;
2106 	before = *addr;
2107 	retval = mlock(addr, 4096);
2108 	after = *addr;
2109 	if (retval != 0) {
2110 		saved_errno = errno;
2111 		T_ASSERT_EQ(saved_errno, EPERM, "wire text error return error %d (%s)",
2112 		    saved_errno, strerror(saved_errno));
2113 	} else if (after != before) {
2114 		T_ASSERT_FAIL("text changed by wiring at %p 0x%x -> 0x%x", (void *)addr, before, after);
2115 	} else {
2116 		T_PASS("wire text");
2117 	}
2118 }
2119 
2120 T_DECL(remap_comm_page, "test remapping of the commpage - rdar://93177124",
2121     T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
2122 {
2123 	kern_return_t           kr;
2124 	mach_vm_address_t       commpage_addr, remap_addr;
2125 	mach_vm_size_t          vmsize;
2126 	vm_prot_t               curprot, maxprot;
2127 
2128 #if __arm__
2129 	commpage_addr = 0xFFFF4000ULL;
2130 #elif __arm64__
2131 	commpage_addr = 0x0000000FFFFFC000ULL;
2132 #elif __x86_64__
2133 	commpage_addr = 0x00007FFFFFE00000ULL;
2134 #else
2135 	T_FAIL("unknown commpage address for this architecture");
2136 #endif
2137 
2138 	T_LOG("Remapping commpage from 0x%llx", commpage_addr);
2139 	vmsize = vm_kernel_page_size;
2140 	remap_addr = 0;
2141 	kr = mach_vm_remap(mach_task_self(),
2142 	    &remap_addr,
2143 	    vmsize,
2144 	    0, /* mask */
2145 	    VM_FLAGS_ANYWHERE,
2146 	    mach_task_self(),
2147 	    commpage_addr,
2148 	    TRUE, /* copy */
2149 	    &curprot,
2150 	    &maxprot,
2151 	    VM_INHERIT_DEFAULT);
2152 	if (kr == KERN_INVALID_ADDRESS) {
2153 		T_SKIP("No mapping found at 0x%llx\n", commpage_addr);
2154 		return;
2155 	}
2156 	T_ASSERT_MACH_SUCCESS(kr, "vm_remap() of commpage from 0x%llx", commpage_addr);
2157 }
2158 
2159 /* rdar://132439059 */
2160 T_DECL(mach_vm_remap_new_task_read_port,
2161     "Ensure shared, writable mappings cannot be created with a process's task read port using mach_vm_remap_new",
2162     T_META_TAG_VM_PREFERRED,
2163     T_META_RUN_CONCURRENTLY(true))
2164 {
2165 	mach_vm_address_t private_data = 0;
2166 	pid_t pid = -1;
2167 	int fds[2];
2168 	uint32_t depth = 9999;
2169 	mach_vm_size_t size = PAGE_SIZE;
2170 	mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
2171 	vm_region_submap_info_data_64_t info;
2172 	kern_return_t kr = KERN_FAILURE;
2173 	int ret = -1;
2174 
2175 	kr = mach_vm_allocate(mach_task_self(), &private_data, size, VM_FLAGS_ANYWHERE);
2176 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_allocate");
2177 
2178 	ret = pipe(fds);
2179 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "pipe");
2180 
2181 	pid = fork();
2182 	T_QUIET; T_ASSERT_POSIX_SUCCESS(pid, "fork");
2183 
2184 	if (pid == 0) {
2185 		char data[2];
2186 		ssize_t nbytes_read = -1;
2187 
2188 		/* Close write end of the pipe */
2189 		ret = close(fds[1]);
2190 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "child: close write end");
2191 
2192 		/* Check that the permissions are VM_PROT_DEFAULT/VM_PROT_ALL */
2193 		kr = mach_vm_region_recurse(mach_task_self(),
2194 		    &private_data,
2195 		    &size,
2196 		    &depth,
2197 		    (vm_region_recurse_info_t)&info,
2198 		    &count);
2199 		T_ASSERT_MACH_SUCCESS(kr, "child: mach_vm_region_recurse");
2200 		T_EXPECT_EQ_INT(info.protection, VM_PROT_DEFAULT, "child: current protection is VM_PROT_DEFAULT");
2201 		T_EXPECT_EQ_INT(info.max_protection, VM_PROT_ALL, "child: maximum protextion is VM_PROT_ALL");
2202 
2203 		/* The child tries to read data from the pipe (that never comes) */
2204 		nbytes_read = read(fds[0], data, 2);
2205 		T_QUIET; T_EXPECT_EQ_LONG(nbytes_read, 0L, "child: read 0 bytes");
2206 
2207 		exit(0);
2208 	} else {
2209 		mach_port_t read_port = MACH_PORT_NULL;
2210 		mach_vm_address_t remap_addr = 0;
2211 		int status;
2212 
2213 		/* Close read end of the pipe */
2214 		ret = close(fds[0]);
2215 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "close read end");
2216 
2217 		/* Get a read port */
2218 		ret = task_read_for_pid(mach_task_self(), pid, &read_port);
2219 		T_ASSERT_POSIX_SUCCESS(ret, "parent: task_read_for_pid");
2220 
2221 		/* Make a shared mapping with the child's data */
2222 		vm_prot_t cur_prot = VM_PROT_NONE;
2223 		vm_prot_t max_prot = VM_PROT_NONE;
2224 		kr = mach_vm_remap_new(
2225 			mach_task_self(),
2226 			&remap_addr,
2227 			size,
2228 			0, /* mask */
2229 			VM_FLAGS_ANYWHERE,
2230 			read_port,
2231 			private_data,
2232 			FALSE, /* copy */
2233 			&cur_prot,
2234 			&max_prot,
2235 			VM_INHERIT_DEFAULT);
2236 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "parent: mach_vm_remap_new");
2237 
2238 		/* Check that permissions of the remapped region are VM_PROT_NONE */
2239 		kr = mach_vm_region_recurse(mach_task_self(),
2240 		    &remap_addr,
2241 		    &size,
2242 		    &depth,
2243 		    (vm_region_recurse_info_t)&info,
2244 		    &count);
2245 		T_ASSERT_MACH_SUCCESS(kr, "parent: mach_vm_region_recurse");
2246 		T_EXPECT_EQ_INT(info.protection, VM_PROT_NONE, "parent: current protection is VM_PROT_NONE");
2247 		T_EXPECT_EQ_INT(info.max_protection, VM_PROT_NONE, "parent: maximum protextion is VM_PROT_NONE");
2248 
2249 		/* Tell the child it is done and can exit. */
2250 		ret = close(fds[1]);
2251 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "parent: close write end");
2252 
2253 		/* Clean up the child */
2254 		ret = waitpid(pid, &status, 0);
2255 		T_EXPECT_EQ_INT(ret, pid, "waitpid: child was stopped or terminated");
2256 	}
2257 }
2258 
2259 
2260 static void
zsm_vm_map(size_t size,int flags,mem_entry_name_port_t port,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance,mach_vm_address_t * out_addr,size_t * out_size)2261 zsm_vm_map(size_t size,
2262     int flags,
2263     mem_entry_name_port_t port,
2264     memory_object_offset_t offset,
2265     boolean_t copy,
2266     vm_prot_t cur_protection,
2267     vm_prot_t max_protection,
2268     vm_inherit_t inheritance,
2269     mach_vm_address_t *out_addr,
2270     size_t *out_size
2271     )
2272 {
2273 	mach_vm_address_t addr_info = 0;
2274 	if (!(flags & VM_FLAGS_ANYWHERE)) {
2275 		flags |= VM_FLAGS_ANYWHERE;
2276 	}
2277 
2278 	cur_protection &= max_protection;
2279 	kern_return_t kr = mach_vm_map(mach_task_self(), &addr_info, size, 0, flags, port, offset,
2280 	    copy, cur_protection, max_protection, inheritance);
2281 	T_ASSERT_MACH_SUCCESS(kr, "mach_vm_map");
2282 	T_LOG("mapped memory at %llx", addr_info);
2283 
2284 	*out_addr = addr_info;
2285 	*out_size = size;
2286 }
2287 
2288 static mem_entry_name_port_t
zsm_vm_mach_make_memory_entry(mach_vm_address_t addr,size_t size,int flags,mem_entry_name_port_t parent,kern_return_t expected_kr,bool discard)2289 zsm_vm_mach_make_memory_entry(mach_vm_address_t addr, size_t size, int flags,
2290     mem_entry_name_port_t parent,
2291     kern_return_t expected_kr, bool discard)
2292 {
2293 	T_LOG("making memory entry for addr=%llx  size=%zx", addr, size);
2294 	mem_entry_name_port_t port = 0;
2295 	kern_return_t kr = mach_make_memory_entry(mach_task_self(), &size, addr, flags, &port, parent);
2296 	T_ASSERT_EQ(kr, expected_kr, "mach_make_memory_entry expected return %d", kr);
2297 	if (kr == KERN_SUCCESS) {
2298 		T_ASSERT_NE(port, 0, "got non zero port");
2299 		if (discard) {
2300 			mach_port_deallocate(mach_task_self(), port);
2301 			port = 0;
2302 		}
2303 	}
2304 	return port;
2305 }
2306 
2307 T_DECL(memory_entry_zero_sized,
2308     "Test that creating a zero-sized memory-entry with parent fails correctly")
2309 {
2310 	mach_vm_address_t addr = 0;
2311 	size_t size = 0;
2312 	kern_return_t kr;
2313 	zsm_vm_map(0xa7c000,
2314 	    0,        /* flags */
2315 	    0,        /* port */
2316 	    0,        /* offset */
2317 	    0,        /* copy */
2318 	    VM_PROT_EXECUTE, VM_PROT_EXECUTE,
2319 	    0x1,         /* inheritance */
2320 	    &addr, &size);
2321 	mem_entry_name_port_t parent_entry = zsm_vm_mach_make_memory_entry(addr, size, 0, 0, KERN_SUCCESS, false);
2322 
2323 	zsm_vm_mach_make_memory_entry(0, 0, 0, parent_entry, KERN_INVALID_ARGUMENT, true);
2324 	zsm_vm_mach_make_memory_entry(0, 1, 0, parent_entry, KERN_SUCCESS, true);
2325 	zsm_vm_mach_make_memory_entry(1, 0, 0, parent_entry, KERN_SUCCESS, true);
2326 
2327 	zsm_vm_mach_make_memory_entry(PAGE_SIZE, 0, 0, parent_entry, KERN_INVALID_ARGUMENT, true);
2328 	zsm_vm_mach_make_memory_entry(PAGE_SIZE, 1, 0, parent_entry, KERN_SUCCESS, true);
2329 	zsm_vm_mach_make_memory_entry(PAGE_SIZE + 1, 0, 0, parent_entry, KERN_SUCCESS, true);
2330 
2331 	kr = mach_port_deallocate(mach_task_self(), parent_entry);
2332 	T_ASSERT_MACH_SUCCESS(kr, "mach_port_deallocate");
2333 	kr = mach_vm_deallocate(mach_task_self(), addr, size);
2334 	T_ASSERT_MACH_SUCCESS(kr, "mach_vm_deallocate");
2335 }
2336 
2337 T_DECL(memory_entry_null_obj,
2338     "Test creating a memory-entry with null vm_object")
2339 {
2340 	mach_vm_address_t addr = 0;
2341 	size_t size = 0;
2342 	kern_return_t kr = 0;
2343 	uint8_t value = 0;
2344 
2345 	// create an allocation with vm_object == NULL
2346 	zsm_vm_map(0x604000, /* size */
2347 	    0,           /* flags */
2348 	    0,           /* port */
2349 	    0,           /* offset */
2350 	    TRUE,         /* copy */
2351 	    VM_PROT_NONE, VM_PROT_NONE,
2352 	    0x0,         /* inheritance */
2353 	    &addr, &size);
2354 
2355 	// verify it's NONE
2356 	bool read_success = try_read_byte(addr, &value, &kr);
2357 	T_ASSERT_FALSE(read_success, "can't read from NONE address");
2358 	bool write_succeded = try_write_byte(addr, 42, &kr);
2359 	T_ASSERT_FALSE(write_succeded, "can't write to NONE address");
2360 
2361 	// size 0 entry of the allocated memory - should fail
2362 	zsm_vm_mach_make_memory_entry(addr, /*size=*/ 0, /*flags=*/ 0x0, /*parent=*/ 0x0, KERN_INVALID_ARGUMENT, true);
2363 
2364 	// trying to get a 'copy' entry of a PROT_NONE entry
2365 	zsm_vm_mach_make_memory_entry(addr, size, /*flags=*/ 0x0, /*parent=*/ 0x0, KERN_PROTECTION_FAILURE, true);
2366 
2367 	// get a 'share' entry of a PROT_NONE entry and remap it
2368 	mem_entry_name_port_t np = zsm_vm_mach_make_memory_entry(addr, size, MAP_MEM_VM_SHARE, 0x0, KERN_SUCCESS, false);
2369 
2370 	mach_vm_address_t m_addr = 0;
2371 	size_t m_size = 0;
2372 	zsm_vm_map(size,
2373 	    0,            /* size */
2374 	    np,
2375 	    0,            /* offset */
2376 	    FALSE,        /* copy */
2377 	    VM_PROT_NONE, VM_PROT_NONE,
2378 	    0x0,                         /* inheritance */
2379 	    &m_addr, &m_size);
2380 
2381 	// try to accessremapped area
2382 	read_success = try_read_byte(m_addr, &value, &kr);
2383 	T_ASSERT_FALSE(read_success, "can't read from NONE address");
2384 	write_succeded = try_write_byte(m_addr, 42, &kr);
2385 	T_ASSERT_FALSE(write_succeded, "can't write to NONE address");
2386 
2387 	kr = mach_port_deallocate(mach_task_self(), np);
2388 	T_ASSERT_MACH_SUCCESS(kr, "mach_port_deallocate");
2389 	kr = mach_vm_deallocate(mach_task_self(), addr, size);
2390 	T_ASSERT_MACH_SUCCESS(kr, "mach_vm_deallocate");
2391 	kr = mach_vm_deallocate(mach_task_self(), m_addr, m_size);
2392 	T_ASSERT_MACH_SUCCESS(kr, "mach_vm_deallocate mapped");
2393 }
2394 
2395 #if __arm64e__
2396 #define TARGET_CPU_ARM64E true
2397 #else
2398 #define TARGET_CPU_ARM64E false
2399 #endif
2400 
2401 T_DECL(vm_region_recurse_tpro_info,
2402     "Ensure metadata returned by vm_region_recurse correct reflects TPRO status",
2403     T_META_ENABLED(TARGET_CPU_ARM64E),
2404     XNU_T_META_SOC_SPECIFIC,
2405     T_META_ASROOT(true))
2406 {
2407 	T_SETUPBEGIN;
2408 
2409 	/* First things first, do nothing unless we're TPRO enabled */
2410 	if (!(os_security_config_get() & OS_SECURITY_CONFIG_TPRO)) {
2411 		T_SKIP("Skipping because we're not running under TPRO");
2412 		return;
2413 	}
2414 
2415 	/* Given an allocation from dyld's heap */
2416 	const char* tpro_allocation = _dyld_get_image_name(0);
2417 
2418 	/* And an allocation from our own heap (which is not TPRO) */
2419 	mach_vm_address_t non_tpro_allocation;
2420 	mach_vm_size_t alloc_size = PAGE_SIZE;
2421 	kern_return_t kr = mach_vm_allocate(
2422 		mach_task_self(),
2423 		&non_tpro_allocation,
2424 		alloc_size,
2425 		VM_FLAGS_ANYWHERE );
2426 	T_ASSERT_MACH_SUCCESS(kr, "Allocated non-TPRO region");
2427 	/* (And write to it to be sure we populate a VM object) */
2428 	memset((uint8_t*)non_tpro_allocation, 0, alloc_size);
2429 
2430 	T_SETUPEND;
2431 
2432 	/* When we query the attributes of the region covering the TPRO-enabled buffer */
2433 	mach_vm_address_t addr = (mach_vm_address_t)tpro_allocation;
2434 	mach_vm_size_t addr_size = 16;
2435 	uint32_t nesting_depth = UINT_MAX;
2436 	mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_V2_COUNT_64;
2437 	vm_region_submap_info_data_64_t region_info;
2438 	kr = vm_region_recurse_64(mach_task_self(), (vm_address_t*)&addr, (vm_size_t*)&addr_size, &nesting_depth, (vm_region_recurse_info_t)&region_info, &count);
2439 
2440 	/* Then our metadata confirms that the region contains a TPRO entry */
2441 	T_ASSERT_MACH_SUCCESS(kr, "Query TPRO-enabled region");
2442 	T_ASSERT_TRUE(region_info.flags & VM_REGION_FLAG_TPRO_ENABLED, "Expected metadata to reflect a TPRO entry");
2443 
2444 	/* And when we query the same thing via the 'short' info */
2445 	addr = (mach_vm_address_t)tpro_allocation;
2446 	addr_size = alloc_size;
2447 	nesting_depth = UINT_MAX;
2448 	count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
2449 	vm_region_submap_short_info_data_64_t short_info;
2450 	kr = mach_vm_region_recurse(mach_task_self(), (mach_vm_address_t*)&addr, (mach_vm_size_t*)&addr_size, &nesting_depth, (vm_region_info_t)&short_info, &count);
2451 
2452 	/* Then the short metadata also confirms that the region contains a TPRO entry */
2453 	T_ASSERT_MACH_SUCCESS(kr, "Query TPRO-enabled region");
2454 	T_ASSERT_TRUE(short_info.flags & VM_REGION_FLAG_TPRO_ENABLED, "Expected metadata to reflect a TPRO entry");
2455 
2456 	/* And when we query the attributes of the region covering the non-TPRO allocation */
2457 	addr = non_tpro_allocation;
2458 	addr_size = alloc_size;
2459 	nesting_depth = UINT_MAX;
2460 	count = VM_REGION_SUBMAP_INFO_V2_COUNT_64;
2461 	memset(&region_info, 0, sizeof(region_info));
2462 	kr = mach_vm_region_recurse(mach_task_self(), (mach_vm_address_t*)&addr, (mach_vm_size_t*)&addr_size, &nesting_depth, (vm_region_info_t)&region_info, &count);
2463 
2464 	/* Then our metadata confirm that the region does not contain a TPRO entry */
2465 	T_ASSERT_MACH_SUCCESS(kr, "Query non-TPRO region");
2466 	T_ASSERT_FALSE(region_info.flags & VM_REGION_FLAG_TPRO_ENABLED, "Expected metadata to reflect no TPRO entry");
2467 
2468 	/* And when we query the same thing via the 'short' info */
2469 	addr = non_tpro_allocation;
2470 	addr_size = alloc_size;
2471 	nesting_depth = UINT_MAX;
2472 	count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
2473 	memset(&short_info, 0, sizeof(short_info));
2474 	kr = mach_vm_region_recurse(mach_task_self(), (mach_vm_address_t*)&addr, (mach_vm_size_t*)&addr_size, &nesting_depth, (vm_region_info_t)&short_info, &count);
2475 
2476 	/* Then the short metadata also confirms that the region does not contain a TPRO entry */
2477 	T_ASSERT_MACH_SUCCESS(kr, "Query non-TPRO region");
2478 	T_ASSERT_FALSE(short_info.flags & VM_REGION_FLAG_TPRO_ENABLED, "Expected metadata to reflect no TPRO entry");
2479 
2480 	/* Cleanup */
2481 	kr = mach_vm_deallocate(mach_task_self(), non_tpro_allocation, alloc_size);
2482 	T_ASSERT_MACH_SUCCESS(kr, "deallocate memory");
2483 }
2484 
2485 T_DECL(vm_region_recurse_jit_info,
2486     "Ensure metadata returned by vm_region_recurse correct reflects JIT status",
2487     XNU_T_META_SOC_SPECIFIC,
2488     /* Only attempt to run on macOS so we don't need to worry about JIT policy */
2489     T_META_ENABLED(TARGET_OS_OSX),
2490     T_META_ASROOT(true))
2491 {
2492 	T_SETUPBEGIN;
2493 
2494 	/* Given a JIT region */
2495 	mach_vm_size_t alloc_size = PAGE_SIZE * 4;
2496 	void* jit_region = mmap(NULL, alloc_size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE | MAP_JIT, -1, 0);
2497 	T_ASSERT_NE_PTR(jit_region, MAP_FAILED, "MAP_JIT");
2498 
2499 	/* And a non-JIT region */
2500 	mach_vm_address_t non_jit_allocation;
2501 	kern_return_t kr = mach_vm_allocate(
2502 		mach_task_self(),
2503 		&non_jit_allocation,
2504 		alloc_size,
2505 		VM_FLAGS_ANYWHERE);
2506 	T_ASSERT_MACH_SUCCESS(kr, "Allocated non-JIT region");
2507 	/* (And write to it to be sure we populate a VM object) */
2508 	memset((uint8_t*)non_jit_allocation, 0, alloc_size);
2509 
2510 	T_SETUPEND;
2511 
2512 	/* When we query the attributes of the region covering the JIT-enabled buffer */
2513 	mach_vm_address_t addr = (mach_vm_address_t)jit_region;
2514 	mach_vm_size_t addr_size = alloc_size;
2515 	uint32_t nesting_depth = UINT_MAX;
2516 	mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_V2_COUNT_64;
2517 	vm_region_submap_info_data_64_t region_info;
2518 	kr = vm_region_recurse_64(mach_task_self(), (vm_address_t*)&addr, (vm_size_t*)&addr_size, &nesting_depth, (vm_region_recurse_info_t)&region_info, &count);
2519 
2520 	/* Then our metadata confirms that the region contains a JIT entry */
2521 	T_ASSERT_MACH_SUCCESS(kr, "Query JIT-enabled region");
2522 	T_ASSERT_TRUE(region_info.flags & VM_REGION_FLAG_JIT_ENABLED, "Expected metadata to reflect a JIT entry");
2523 
2524 	/* And when we query the same thing via the 'short' info */
2525 	addr = (mach_vm_address_t)jit_region;
2526 	addr_size = alloc_size;
2527 	nesting_depth = UINT_MAX;
2528 	count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
2529 	vm_region_submap_short_info_data_64_t short_info;
2530 	kr = mach_vm_region_recurse(mach_task_self(), (mach_vm_address_t*)&addr, (mach_vm_size_t*)&addr_size, &nesting_depth, (vm_region_info_t)&short_info, &count);
2531 
2532 	/* Then the short metadata also confirms that the region contains a JIT entry */
2533 	T_ASSERT_MACH_SUCCESS(kr, "Query JIT-enabled region");
2534 	T_ASSERT_TRUE(short_info.flags & VM_REGION_FLAG_JIT_ENABLED, "Expected metadata to reflect a JIT entry");
2535 
2536 	/* And when we query the attributes of the region covering the non-JIT allocation */
2537 	addr = non_jit_allocation;
2538 	addr_size = alloc_size;
2539 	nesting_depth = UINT_MAX;
2540 	count = VM_REGION_SUBMAP_INFO_V2_COUNT_64;
2541 	memset(&region_info, 0, sizeof(region_info));
2542 	kr = mach_vm_region_recurse(mach_task_self(), (mach_vm_address_t*)&addr, (mach_vm_size_t*)&addr_size, &nesting_depth, (vm_region_info_t)&region_info, &count);
2543 
2544 	/* Then our metadata confirm that the region does not contain a JIT entry */
2545 	T_ASSERT_MACH_SUCCESS(kr, "Query non-JIT region");
2546 	T_ASSERT_FALSE(region_info.flags & VM_REGION_FLAG_JIT_ENABLED, "Expected metadata to reflect no JIT entry");
2547 
2548 	/* And when we query the same thing via the 'short' info */
2549 	addr = non_jit_allocation;
2550 	addr_size = alloc_size;
2551 	nesting_depth = UINT_MAX;
2552 	count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
2553 	memset(&short_info, 0, sizeof(short_info));
2554 	kr = mach_vm_region_recurse(mach_task_self(), (mach_vm_address_t*)&addr, (mach_vm_size_t*)&addr_size, &nesting_depth, (vm_region_info_t)&short_info, &count);
2555 
2556 	/* Then the short metadata also confirms that the region does not contain a JIT entry */
2557 	T_ASSERT_MACH_SUCCESS(kr, "Query non-JIT region");
2558 	T_ASSERT_FALSE(short_info.flags & VM_REGION_FLAG_JIT_ENABLED, "Expected metadata to reflect no JIT entry");
2559 
2560 	/* Cleanup */
2561 	kr = mach_vm_deallocate(mach_task_self(), non_jit_allocation, alloc_size);
2562 	T_ASSERT_MACH_SUCCESS(kr, "deallocate memory");
2563 }
2564