xref: /xnu-11417.101.15/tests/vm/vm_parameter_validation.c (revision e3723e1f17661b24996789d8afc084c0c3303b26)
1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 #include <test_utils.h>
4 
5 #include <sys/types.h>
6 #include <sys/sysctl.h>
7 #include <mach/mach.h>
8 #include <mach/mach_vm.h>
9 #include <mach/memory_entry.h>
10 #include <mach/shared_region.h>
11 #include <mach/vm_reclaim.h>
12 #include <mach/vm_types.h>
13 #include <sys/mman.h>
14 #include <unistd.h>
15 #include <TargetConditionals.h>
16 #include <mach-o/dyld.h>
17 #include <libgen.h>
18 
19 #include <os/bsd.h> // For os_parse_boot_arg_int
20 
21 // workarounds for buggy MIG declarations
22 // see tests/vm/vm_parameter_validation_replacement_*.defs
23 // and tests/Makefile for details
24 #include "vm_parameter_validation_replacement_mach_host.h"
25 #include "vm_parameter_validation_replacement_host_priv.h"
26 
27 // code shared with kernel/kext tests
28 #include "../../osfmk/tests/vm_parameter_validation.h"
29 
30 #define GOLDEN_FILES_VERSION "vm_parameter_validation_golden_images_edeef315.tar.xz"
31 #define GOLDEN_FILES_ASSET_FILE_POINTER GOLDEN_FILES_VERSION
32 
33 T_GLOBAL_META(
34 	T_META_NAMESPACE("xnu.vm"),
35 	T_META_RADAR_COMPONENT_NAME("xnu"),
36 	T_META_RADAR_COMPONENT_VERSION("VM"),
37 	T_META_S3_ASSET(GOLDEN_FILES_ASSET_FILE_POINTER),
38 	T_META_ASROOT(true),  /* required for vm_wire tests on macOS */
39 	T_META_RUN_CONCURRENTLY(false), /* vm_parameter_validation_kern uses kernel globals */
40 	T_META_ALL_VALID_ARCHS(true),
41 	XNU_T_META_REQUIRES_DEVELOPMENT_KERNEL
42 	);
43 
44 /*
45  * vm_parameter_validation.c
46  * Test parameter validation of vm's userspace API
47  *
48  * The test compares the return values against a 'golden' list, which is a text
49  * file previously generated and compressed in .xz files, per platform.
50  * When vm_parameter_validation runs, it calls assets/vm_parameter_validation/decompress.sh,
51  * which detects the platform and decompresses the corresponding user and kern
52  * golden files.
53  *
54  * Any return code mismatch is reported as a failure, printing test name and iteration.
55  * New tests not present in the 'golden' list will run but they are also reported as a failure.
56  *
57  * There are two environment variable flags that makes development work easier and
58  * can temporarily disable golden list testing.
59  *
60  * SKIP_TESTS
61  * When running with SKIP_TESTS set, the test will not compare the results
62  * against the golden files.
63  *
64  * DUMP_RESULTS
65  * When running with DUMP_RESULTS set, the test will print all the returned values
66  * (as opposed to only the failing ones). To pretty-print this output use the python script:
67  * DUMP_RESULTS=1 vm_parameter_validation | tools/format_vm_parameter_validation.py
68  */
69 
70 
71 
72 /*
73  * xnu/libsyscall/mach/mach_vm.c intercepts some VM calls from userspace,
74  * sometimes doing something other than the expected MIG call.
75  * This test generates its own MIG userspace call sites to call the kernel
76  * entrypoints directly, bypassing libsyscall's interference.
77  *
78  * The custom MIG call sites are generated into:
79  * vm_parameter_validation_vm_map_user.c
80  * vm_parameter_validation_mach_vm_user.c
81  */
82 
83 #pragma clang diagnostic ignored "-Wdeclaration-after-statement"
84 #pragma clang diagnostic ignored "-Wmissing-prototypes"
85 #pragma clang diagnostic ignored "-Wpedantic"
86 
87 /*
88  * Our wire tests often try to wire the whole address space.
89  * In that case the error code is determined by the first range of addresses
90  * that cannot be wired.
91  * In most cases that is a protection failure on a malloc guard page. But
92  * sometimes, circumstances outside of our control change the address map of
93  * our test process and add holes, which means we get a bad address error
94  * instead, and the test fails because the return code doesn't match what's
95  * recorded in the golden files.
96  * To avoid this, we want to keep a guard page inside our data section.
97  * Because that data section is one of the first things in our address space,
98  * the behavior of wire is (more) predictable.
99  */
100 static _Alignas(KB16) char guard_page[KB16];
101 
102 static void
set_up_guard_page(void)103 set_up_guard_page(void)
104 {
105 	/*
106 	 * Ensure that _Alignas worked as expected.
107 	 */
108 	assert(0 == (((mach_vm_address_t)guard_page) & PAGE_MASK));
109 	/*
110 	 * Remove all permissions on guard_page such that it is a guard page.
111 	 */
112 	assert(0 == mprotect(guard_page, sizeof(guard_page), 0));
113 }
114 
115 // Return a file descriptor that tests can read and write.
116 // A single temporary file is shared among all tests.
117 static int
get_fd()118 get_fd()
119 {
120 	static int fd = -1;
121 	if (fd > 0) {
122 		return fd;
123 	}
124 
125 	char filename[] = "/tmp/vm_parameter_validation_XXXXXX";
126 	fd = mkstemp(filename);
127 	assert(fd > 2);  // not stdin/stdout/stderr
128 	return fd;
129 }
130 
131 static int rosetta_dyld_fd = -1;
132 // Return a file descriptor that Rosetta dyld will accept
133 static int
get_dyld_fd()134 get_dyld_fd()
135 {
136 	if (rosetta_dyld_fd >= 0) {
137 		return rosetta_dyld_fd;
138 	}
139 
140 	if (!isRosetta()) {
141 		rosetta_dyld_fd = 0;
142 		return rosetta_dyld_fd;
143 	}
144 
145 	rosetta_dyld_fd = 0;
146 	return rosetta_dyld_fd;
147 }
148 
149 // Close the Rosetta dyld fd (only one test calls this)
150 static void
close_dyld_fd()151 close_dyld_fd()
152 {
153 	if (isRosetta()) {
154 		assert(rosetta_dyld_fd > 2);
155 		if (close(rosetta_dyld_fd) != 0) {
156 			assert(0);
157 		}
158 		rosetta_dyld_fd = -1;
159 	}
160 }
161 
162 static int
munmap_helper(void * ptr,size_t size)163 munmap_helper(void *ptr, size_t size)
164 {
165 	mach_vm_address_t start, end;
166 	if (0 != size) { // munmap rejects size == 0 even though mmap accepts it
167 		/*
168 		 * munmap expects aligned inputs, even though mmap sometimes
169 		 * returns unaligned values
170 		 */
171 		start = ((mach_vm_address_t)ptr) & ~PAGE_MASK;
172 		end = (((mach_vm_address_t)ptr) + size + PAGE_MASK) & ~PAGE_MASK;
173 		return munmap((void*)start, end - start);
174 	}
175 	return 0;
176 }
177 
178 // Some tests provoke EXC_GUARD exceptions.
179 // We disable EXC_GUARD if possible. If we can't, we disable those tests instead.
180 static bool EXC_GUARD_ENABLED = true;
181 
182 static int
call_munlock(void * start,size_t size)183 call_munlock(void *start, size_t size)
184 {
185 	int err = munlock(start, size);
186 	return err ? errno : 0;
187 }
188 
189 static int
call_mlock(void * start,size_t size)190 call_mlock(void *start, size_t size)
191 {
192 	int err = mlock(start, size);
193 	return err ? errno : 0;
194 }
195 
196 extern int __munmap(void *, size_t);
197 
198 static kern_return_t
call_munmap(MAP_T map __unused,mach_vm_address_t start,mach_vm_size_t size)199 call_munmap(MAP_T map __unused, mach_vm_address_t start, mach_vm_size_t size)
200 {
201 	int err = __munmap((void*)start, (size_t)size);
202 	return err ? errno : 0;
203 }
204 
205 static int
call_mremap_encrypted(void * start,size_t size)206 call_mremap_encrypted(void *start, size_t size)
207 {
208 	int err = mremap_encrypted(start, size, CRYPTID_NO_ENCRYPTION, /*cputype=*/ 0, /*cpusubtype=*/ 0);
209 	return err ? errno : 0;
210 }
211 
212 /////////////////////////////////////////////////////
213 // Mach tests
214 
215 static mach_port_t
make_a_mem_object(mach_vm_size_t size)216 make_a_mem_object(mach_vm_size_t size)
217 {
218 	mach_port_t out_handle;
219 	kern_return_t kr = mach_memory_object_memory_entry_64(mach_host_self(), 1, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_handle);
220 	assert(kr == 0);
221 	return out_handle;
222 }
223 
224 static mach_port_t
make_a_mem_entry(vm_size_t size)225 make_a_mem_entry(vm_size_t size)
226 {
227 	mach_port_t port;
228 	memory_object_size_t s = (memory_object_size_t)size;
229 	kern_return_t kr = mach_make_memory_entry_64(mach_host_self(), &s, (memory_object_offset_t)0, MAP_MEM_NAMED_CREATE | MAP_MEM_LEDGER_TAGGED, &port, MACH_PORT_NULL);
230 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "allocate memory entry");
231 	return port;
232 }
233 
234 static inline void
check_mach_memory_entry_outparam_changes(kern_return_t * kr,mach_port_t out_handle,mach_port_t saved_handle)235 check_mach_memory_entry_outparam_changes(kern_return_t * kr, mach_port_t out_handle, mach_port_t saved_handle)
236 {
237 	if (*kr != KERN_SUCCESS) {
238 		if (out_handle != (mach_port_t) saved_handle) {
239 			*kr = OUT_PARAM_BAD;
240 		}
241 	}
242 }
243 // mach_make_memory_entry is really several functions wearing a trenchcoat.
244 // Run a separate test for each variation.
245 
246 // mach_make_memory_entry also has a confusing number of entrypoints:
247 // U64: mach_make_memory_entry_64(64) (mach_make_memory_entry is the same MIG message)
248 // U32: mach_make_memory_entry(32), mach_make_memory_entry_64(64), _mach_make_memory_entry(64) (each is a unique MIG message)
249 #define IMPL(FN, T)                                                               \
250 	static kern_return_t                                                      \
251 	call_ ## FN ## __start_size__memonly(MAP_T map, T start, T size)                      \
252 	{                                                                         \
253 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
254 	        T io_size = size;                                                 \
255 	        mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT;            \
256 	        mach_port_t out_handle = invalid_value;                           \
257 	        kern_return_t kr = FN(map, &io_size, start,                       \
258 	                              VM_PROT_READ | MAP_MEM_ONLY, &out_handle, memobject); \
259 	        if (kr == 0) {                                                    \
260 	                (void)mach_port_deallocate(mach_task_self(), out_handle); \
261 	/* MAP_MEM_ONLY doesn't use the size. It should not change it. */         \
262 	                if(io_size != size) {                                     \
263 	                        kr = OUT_PARAM_BAD;                               \
264 	                }                                                         \
265 	        }                                                                 \
266 	        (void)mach_port_deallocate(mach_task_self(), memobject);          \
267 	        check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
268 	        return kr;                                                        \
269 	}                                                                         \
270                                                                                   \
271 	static kern_return_t                                                      \
272 	call_ ## FN ## __start_size__namedcreate(MAP_T map, T start, T size)                  \
273 	{                                                                         \
274 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
275 	        T io_size = size;                                                 \
276 	        mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT;            \
277 	        mach_port_t out_handle = invalid_value;                           \
278 	        kern_return_t kr = FN(map, &io_size, start,                       \
279 	                              VM_PROT_READ | MAP_MEM_NAMED_CREATE, &out_handle, memobject); \
280 	        if (kr == 0) {                                                    \
281 	                (void)mach_port_deallocate(mach_task_self(), out_handle); \
282 	        }                                                                 \
283 	        (void)mach_port_deallocate(mach_task_self(), memobject);          \
284 	        check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
285 	        return kr;                                                        \
286 	}                                                                         \
287                                                                                   \
288 	static kern_return_t                                                      \
289 	call_ ## FN ## __start_size__copy(MAP_T map, T start, T size)                         \
290 	{                                                                         \
291 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
292 	        T io_size = size;                                                 \
293 	        mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT;            \
294 	        mach_port_t out_handle = invalid_value;                           \
295 	        kern_return_t kr = FN(map, &io_size, start,                       \
296 	                              VM_PROT_READ | MAP_MEM_VM_COPY, &out_handle, memobject); \
297 	        if (kr == 0) {                                                    \
298 	                (void)mach_port_deallocate(mach_task_self(), out_handle); \
299 	        }                                                                 \
300 	        (void)mach_port_deallocate(mach_task_self(), memobject);          \
301 	        check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
302 	        return kr;                                                        \
303 	}                                                                         \
304                                                                                   \
305 	static kern_return_t                                                      \
306 	call_ ## FN ## __start_size__share(MAP_T map, T start, T size)                         \
307 	{                                                                         \
308 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
309 	        T io_size = size;                                                 \
310 	        mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT;            \
311 	        mach_port_t out_handle = invalid_value;                           \
312 	        kern_return_t kr = FN(map, &io_size, start,                       \
313 	                              VM_PROT_READ | MAP_MEM_VM_SHARE, &out_handle, memobject); \
314 	        if (kr == 0) {                                                    \
315 	                (void)mach_port_deallocate(mach_task_self(), out_handle); \
316 	        }                                                                 \
317 	        (void)mach_port_deallocate(mach_task_self(), memobject);          \
318 	        check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
319 	        return kr;                                                        \
320 	}                                                                         \
321                                                                                   \
322 	static kern_return_t                                                      \
323 	call_ ## FN ## __start_size__namedreuse(MAP_T map, T start, T size)                   \
324 	{                                                                         \
325 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
326 	        T io_size = size;                                                 \
327 	        mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT;            \
328 	        mach_port_t out_handle = invalid_value;                           \
329 	        kern_return_t kr = FN(map, &io_size, start,                       \
330 	                              VM_PROT_READ | MAP_MEM_NAMED_REUSE, &out_handle, memobject); \
331 	        if (kr == 0) {                                                    \
332 	                (void)mach_port_deallocate(mach_task_self(), out_handle); \
333 	        }                                                                 \
334 	        (void)mach_port_deallocate(mach_task_self(), memobject);          \
335 	        check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
336 	        return kr;                                                        \
337 	}                                                                         \
338                                                                                   \
339 	static kern_return_t                                                      \
340 	call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot)      \
341 	{                                                                         \
342 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
343 	        T io_size = size;                                                 \
344 	        mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT;            \
345 	        mach_port_t out_handle = invalid_value;                           \
346 	        kern_return_t kr = FN(map, &io_size, start,                       \
347 	                              prot, &out_handle, memobject); \
348 	        if (kr == 0) {                                                    \
349 	                (void)mach_port_deallocate(mach_task_self(), out_handle); \
350 	        }                                                                 \
351 	        (void)mach_port_deallocate(mach_task_self(), memobject);          \
352 	        check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
353 	        return kr;                                                        \
354 	}
355 
IMPL(mach_make_memory_entry_64,mach_vm_address_t)356 IMPL(mach_make_memory_entry_64, mach_vm_address_t)
357 #if TEST_OLD_STYLE_MACH
358 IMPL(mach_make_memory_entry, vm_address_t)
359 IMPL(_mach_make_memory_entry, mach_vm_address_t)
360 #endif
361 #undef IMPL
362 
363 static inline void
364 check_mach_memory_object_memory_entry_outparam_changes(kern_return_t * kr, mach_port_t out_handle,
365     mach_port_t saved_out_handle)
366 {
367 	if (*kr != KERN_SUCCESS) {
368 		if (out_handle != saved_out_handle) {
369 			*kr = OUT_PARAM_BAD;
370 		}
371 	}
372 }
373 
374 #define IMPL(FN) \
375 	static kern_return_t                                            \
376 	call_ ## FN ## __size(MAP_T map __unused, mach_vm_size_t size)  \
377 	{                                                               \
378 	        kern_return_t kr;                                       \
379 	        mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT;  \
380 	        mach_port_t out_entry = invalid_value;                  \
381 	        kr = FN(mach_host_self(), 1, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_entry); \
382 	        if (kr == 0) {                                          \
383 	                (void)mach_port_deallocate(mach_task_self(), out_entry); \
384 	        }                                                       \
385 	        check_mach_memory_object_memory_entry_outparam_changes(&kr, out_entry, invalid_value); \
386 	        return kr;                                              \
387 	}                                                               \
388 	static kern_return_t                                            \
389 	call_ ## FN ## __vm_prot(MAP_T map __unused, mach_vm_size_t size, vm_prot_t prot) \
390 	{                                                               \
391 	        kern_return_t kr;                                       \
392 	        mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT;  \
393 	        mach_port_t out_entry = invalid_value;                  \
394 	        kr = FN(mach_host_self(), 1, size, prot, 0, &out_entry); \
395 	        if (kr == 0) {                                          \
396 	                (void)mach_port_deallocate(mach_task_self(), out_entry); \
397 	        }                                                       \
398 	        check_mach_memory_object_memory_entry_outparam_changes(&kr, out_entry, invalid_value); \
399 	        return kr;                                              \
400 	}
401 
402 // The declaration of mach_memory_object_memory_entry is buggy on U32.
403 // We compile in our own MIG user stub for it with a "replacement_" prefix.
404 // rdar://117927965
405 IMPL(replacement_mach_memory_object_memory_entry)
IMPL(mach_memory_object_memory_entry_64)406 IMPL(mach_memory_object_memory_entry_64)
407 #undef IMPL
408 
409 static inline void
410 check_vm_read_outparam_changes(kern_return_t * kr, mach_vm_size_t size, mach_vm_size_t requested_size,
411     mach_vm_address_t addr)
412 {
413 	if (*kr == KERN_SUCCESS) {
414 		if (size != requested_size) {
415 			*kr = OUT_PARAM_BAD;
416 		}
417 		if (size == 0) {
418 			if (addr != 0) {
419 				*kr = OUT_PARAM_BAD;
420 			}
421 		}
422 	}
423 }
424 
425 
426 static kern_return_t
call_mach_vm_read(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)427 call_mach_vm_read(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
428 {
429 	vm_offset_t out_addr = UNLIKELY_INITIAL_ADDRESS;
430 	mach_msg_type_number_t out_size = UNLIKELY_INITIAL_SIZE;
431 	kern_return_t kr = mach_vm_read(map, start, size, &out_addr, &out_size);
432 	if (kr == 0) {
433 		(void)mach_vm_deallocate(mach_task_self(), out_addr, out_size);
434 	}
435 	check_vm_read_outparam_changes(&kr, out_size, size, out_addr);
436 	return kr;
437 }
438 #if TEST_OLD_STYLE_MACH
439 static kern_return_t
call_vm_read(MAP_T map,vm_address_t start,vm_size_t size)440 call_vm_read(MAP_T map, vm_address_t start, vm_size_t size)
441 {
442 	vm_offset_t out_addr = UNLIKELY_INITIAL_ADDRESS;
443 	mach_msg_type_number_t out_size = UNLIKELY_INITIAL_SIZE;
444 	kern_return_t kr = vm_read(map, start, size, &out_addr, &out_size);
445 	if (kr == 0) {
446 		(void)mach_vm_deallocate(mach_task_self(), out_addr, out_size);
447 	}
448 	check_vm_read_outparam_changes(&kr, out_size, size, out_addr);
449 	return kr;
450 }
451 #endif
452 
453 static kern_return_t
call_mach_vm_read_list(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)454 call_mach_vm_read_list(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
455 {
456 	mach_vm_read_entry_t re = {{.address = start, .size = size}};
457 	kern_return_t kr = mach_vm_read_list(map, re, 1);
458 	if (kr == 0) {
459 		(void)mach_vm_deallocate(mach_task_self(), re[0].address, re[0].size);
460 	}
461 	return kr;
462 }
463 #if TEST_OLD_STYLE_MACH
464 static kern_return_t
call_vm_read_list(MAP_T map,vm_address_t start,vm_size_t size)465 call_vm_read_list(MAP_T map, vm_address_t start, vm_size_t size)
466 {
467 	vm_read_entry_t re = {{.address = start, .size = size}};
468 	kern_return_t kr = vm_read_list(map, re, 1);
469 	if (kr == 0) {
470 		(void)mach_vm_deallocate(mach_task_self(), re[0].address, re[0].size);
471 	}
472 	return kr;
473 }
474 #endif
475 
476 static inline void
check_vm_read_overwrite_outparam_changes(kern_return_t * kr,mach_vm_size_t size,mach_vm_size_t requested_size)477 check_vm_read_overwrite_outparam_changes(kern_return_t * kr, mach_vm_size_t size, mach_vm_size_t requested_size)
478 {
479 	if (*kr == KERN_SUCCESS) {
480 		if (size != requested_size) {
481 			*kr = OUT_PARAM_BAD;
482 		}
483 	}
484 }
485 
486 static kern_return_t __unused
call_mach_vm_read_overwrite__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)487 call_mach_vm_read_overwrite__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
488 {
489 	mach_vm_size_t out_size;
490 	kern_return_t kr = mach_vm_read_overwrite(map, start, size, start_2, &out_size);
491 	check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
492 	return kr;
493 }
494 
495 static kern_return_t
call_mach_vm_read_overwrite__src(MAP_T map,mach_vm_address_t src,mach_vm_size_t size)496 call_mach_vm_read_overwrite__src(MAP_T map, mach_vm_address_t src, mach_vm_size_t size)
497 {
498 	mach_vm_size_t out_size;
499 	allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
500 	kern_return_t kr = mach_vm_read_overwrite(map, src, size, dst.addr, &out_size);
501 	check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
502 	return kr;
503 }
504 
505 static kern_return_t
call_mach_vm_read_overwrite__dst(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size)506 call_mach_vm_read_overwrite__dst(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size)
507 {
508 	mach_vm_size_t out_size;
509 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
510 	kern_return_t kr = mach_vm_read_overwrite(map, src.addr, size, dst, &out_size);
511 	check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
512 	return kr;
513 }
514 
515 #if TEST_OLD_STYLE_MACH
516 static kern_return_t __unused
call_vm_read_overwrite__ssz(MAP_T map,vm_address_t start,vm_address_t start_2,vm_size_t size)517 call_vm_read_overwrite__ssz(MAP_T map, vm_address_t start, vm_address_t start_2, vm_size_t size)
518 {
519 	vm_size_t out_size;
520 	kern_return_t kr = vm_read_overwrite(map, start, size, start_2, &out_size);
521 	check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
522 	return kr;
523 }
524 
525 static kern_return_t
call_vm_read_overwrite__src(MAP_T map,vm_address_t src,vm_size_t size)526 call_vm_read_overwrite__src(MAP_T map, vm_address_t src, vm_size_t size)
527 {
528 	vm_size_t out_size;
529 	allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
530 	kern_return_t kr = vm_read_overwrite(map, src, size, (vm_address_t) dst.addr, &out_size);
531 	check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
532 	return kr;
533 }
534 
535 static kern_return_t
call_vm_read_overwrite__dst(MAP_T map,vm_address_t dst,vm_size_t size)536 call_vm_read_overwrite__dst(MAP_T map, vm_address_t dst, vm_size_t size)
537 {
538 	vm_size_t out_size;
539 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
540 	kern_return_t kr = vm_read_overwrite(map, (vm_address_t) src.addr, size, dst, &out_size);
541 	check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
542 	return kr;
543 }
544 #endif
545 
546 
547 
548 static kern_return_t __unused
call_mach_vm_copy__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)549 call_mach_vm_copy__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
550 {
551 	kern_return_t kr = mach_vm_copy(map, start, size, start_2);
552 	return kr;
553 }
554 
555 static kern_return_t
call_mach_vm_copy__src(MAP_T map,mach_vm_address_t src,mach_vm_size_t size)556 call_mach_vm_copy__src(MAP_T map, mach_vm_address_t src, mach_vm_size_t size)
557 {
558 	allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
559 	kern_return_t kr = mach_vm_copy(map, src, size, dst.addr);
560 	return kr;
561 }
562 
563 static kern_return_t
call_mach_vm_copy__dst(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size)564 call_mach_vm_copy__dst(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size)
565 {
566 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
567 	kern_return_t kr = mach_vm_copy(map, src.addr, size, dst);
568 	return kr;
569 }
570 
571 #if TEST_OLD_STYLE_MACH
572 static kern_return_t __unused
call_vm_copy__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)573 call_vm_copy__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
574 {
575 	kern_return_t kr = vm_copy(map, (vm_address_t) start, (vm_size_t) size, (vm_address_t) start_2);
576 	return kr;
577 }
578 
579 static kern_return_t
call_vm_copy__src(MAP_T map,vm_address_t src,vm_size_t size)580 call_vm_copy__src(MAP_T map, vm_address_t src, vm_size_t size)
581 {
582 	allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
583 	kern_return_t kr = vm_copy(map, src, size, (vm_address_t) dst.addr);
584 	return kr;
585 }
586 
587 static kern_return_t
call_vm_copy__dst(MAP_T map,vm_address_t dst,vm_size_t size)588 call_vm_copy__dst(MAP_T map, vm_address_t dst, vm_size_t size)
589 {
590 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
591 	kern_return_t kr = vm_copy(map, (vm_address_t) src.addr, size, dst);
592 	return kr;
593 }
594 #endif
595 
596 static kern_return_t __unused
call_mach_vm_write__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)597 call_mach_vm_write__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
598 {
599 	kern_return_t kr = mach_vm_write(map, start, (vm_offset_t) start_2, (mach_msg_type_number_t) size);
600 	return kr;
601 }
602 
603 static kern_return_t
call_mach_vm_write__src(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)604 call_mach_vm_write__src(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
605 {
606 	allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
607 	kern_return_t kr = mach_vm_write(map, dst.addr, (vm_offset_t) start, (mach_msg_type_number_t) size);
608 	return kr;
609 }
610 
611 static kern_return_t
call_mach_vm_write__dst(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)612 call_mach_vm_write__dst(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
613 {
614 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
615 	kern_return_t kr = mach_vm_write(map, start, (vm_offset_t) src.addr, (mach_msg_type_number_t) size);
616 	return kr;
617 }
618 
619 #if TEST_OLD_STYLE_MACH
620 static kern_return_t __unused
call_vm_write__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)621 call_vm_write__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
622 {
623 	kern_return_t kr = vm_write(map, (vm_address_t) start, (vm_offset_t) start_2, (mach_msg_type_number_t) size);
624 	return kr;
625 }
626 
627 static kern_return_t
call_vm_write__src(MAP_T map,vm_address_t start,vm_size_t size)628 call_vm_write__src(MAP_T map, vm_address_t start, vm_size_t size)
629 {
630 	allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
631 	kern_return_t kr = vm_write(map, (vm_address_t) dst.addr, start, (mach_msg_type_number_t) size);
632 	return kr;
633 }
634 
635 static kern_return_t
call_vm_write__dst(MAP_T map,vm_address_t start,vm_size_t size)636 call_vm_write__dst(MAP_T map, vm_address_t start, vm_size_t size)
637 {
638 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
639 	kern_return_t kr = vm_write(map, start, (vm_offset_t) src.addr, (mach_msg_type_number_t) size);
640 	return kr;
641 }
642 #endif
643 
644 // mach_vm_wire, vm_wire (start/size)
645 // "wire" and "unwire" paths diverge internally; test both
646 #define IMPL(FN, T, FLAVOR, PROT)                                       \
647 	static kern_return_t                                            \
648 	call_ ## FN ## __ ## FLAVOR(MAP_T map, T start, T size)         \
649 	{                                                               \
650 	        mach_port_t host_priv = HOST_PRIV_NULL;                 \
651 	        kern_return_t kr = host_get_host_priv_port(mach_host_self(), &host_priv); \
652 	        assert(kr == 0);  /* host priv port on macOS requires entitlements or root */ \
653 	        kr = FN(host_priv, map, start, size, PROT);             \
654 	        return kr;                                              \
655 	}
656 IMPL(mach_vm_wire, mach_vm_address_t, wire, VM_PROT_READ)
657 IMPL(mach_vm_wire, mach_vm_address_t, unwire, VM_PROT_NONE)
658 // The declaration of vm_wire is buggy on U32.
659 // We compile in our own MIG user stub for it with a "replacement_" prefix.
660 // rdar://118258929
661 IMPL(replacement_vm_wire, mach_vm_address_t, wire, VM_PROT_READ)
662 IMPL(replacement_vm_wire, mach_vm_address_t, unwire, VM_PROT_NONE)
663 #undef IMPL
664 
665 // mach_vm_wire, vm_wire (vm_prot_t)
666 #define IMPL(FN, T)                                                     \
667 	static kern_return_t                                            \
668 	call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot) \
669 	{                                                               \
670 	        mach_port_t host_priv = HOST_PRIV_NULL;                 \
671 	        kern_return_t kr = host_get_host_priv_port(mach_host_self(), &host_priv); \
672 	        assert(kr == 0);  /* host priv port on macOS requires entitlements or root */ \
673 	        kr = FN(host_priv, map, start, size, prot);             \
674 	        return kr;                                              \
675 	}
676 IMPL(mach_vm_wire, mach_vm_address_t)
677 // The declaration of vm_wire is buggy on U32.
678 // We compile in our own MIG user stub for it with a "replacement_" prefix.
679 // rdar://118258929
680 IMPL(replacement_vm_wire, mach_vm_address_t)
681 #undef IMPL
682 
683 
684 // mach_vm_map/vm32_map/vm32_map_64 infra
685 
686 typedef kern_return_t (*map_fn_t)(vm_map_t target_task,
687     mach_vm_address_t *address,
688     mach_vm_size_t size,
689     mach_vm_offset_t mask,
690     int flags,
691     mem_entry_name_port_t object,
692     memory_object_offset_t offset,
693     boolean_t copy,
694     vm_prot_t cur_protection,
695     vm_prot_t max_protection,
696     vm_inherit_t inheritance);
697 
698 static kern_return_t
call_map_fn__allocate_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)699 call_map_fn__allocate_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
700 {
701 	mach_vm_address_t out_addr = start;
702 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
703 	    0, 0, 0, 0, 0, VM_INHERIT_NONE);
704 	// fixed-overwrite with pre-existing allocation, don't deallocate
705 	return kr;
706 }
707 
708 static kern_return_t
call_map_fn__allocate_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)709 call_map_fn__allocate_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
710 {
711 	mach_vm_address_t out_addr = start;
712 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
713 	    0, 0, true, 0, 0, VM_INHERIT_NONE);
714 	// fixed-overwrite with pre-existing allocation, don't deallocate
715 	return kr;
716 }
717 
718 static kern_return_t
call_map_fn__allocate_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)719 call_map_fn__allocate_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
720 {
721 	mach_vm_address_t out_addr = start_hint;
722 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, 0, 0, 0, 0, 0, VM_INHERIT_NONE);
723 	if (kr == 0) {
724 		(void)mach_vm_deallocate(map, out_addr, size);
725 	}
726 	return kr;
727 }
728 
729 static kern_return_t
call_map_fn__memobject_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)730 call_map_fn__memobject_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
731 {
732 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
733 	mach_vm_address_t out_addr = start;
734 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
735 	    memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
736 	(void)mach_port_deallocate(mach_task_self(), memobject);
737 	// fixed-overwrite with pre-existing allocation, don't deallocate
738 	return kr;
739 }
740 
741 static kern_return_t
call_map_fn__memobject_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)742 call_map_fn__memobject_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
743 {
744 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
745 	mach_vm_address_t out_addr = start;
746 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
747 	    memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
748 	(void)mach_port_deallocate(mach_task_self(), memobject);
749 	// fixed-overwrite with pre-existing allocation, don't deallocate
750 	return kr;
751 }
752 
753 static kern_return_t
call_map_fn__memobject_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)754 call_map_fn__memobject_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
755 {
756 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
757 	mach_vm_address_t out_addr = start_hint;
758 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, memobject,
759 	    KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
760 	if (kr == 0) {
761 		(void)mach_vm_deallocate(map, out_addr, size);
762 	}
763 	(void)mach_port_deallocate(mach_task_self(), memobject);
764 	return kr;
765 }
766 
767 static kern_return_t
helper_call_map_fn__memobject__ssoo(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)768 helper_call_map_fn__memobject__ssoo(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
769 {
770 	mach_port_t memobject = make_a_mem_object(obj_size);
771 	mach_vm_address_t out_addr = start;
772 	kern_return_t kr = fn(map, &out_addr, size, 0, flags, memobject,
773 	    offset, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
774 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
775 	(void)mach_port_deallocate(mach_task_self(), memobject);
776 	return kr;
777 }
778 
779 static kern_return_t
call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)780 call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
781 {
782 	return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, offset, obj_size);
783 }
784 
785 static kern_return_t
call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)786 call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
787 {
788 	return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, offset, obj_size);
789 }
790 
791 static kern_return_t
call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)792 call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
793 {
794 	return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_ANYWHERE, false, start, size, offset, obj_size);
795 }
796 
797 static kern_return_t
help_call_map_fn__allocate__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)798 help_call_map_fn__allocate__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
799 {
800 	mach_vm_address_t out_addr = start;
801 	kern_return_t kr = fn(map, &out_addr, size, 0, flags,
802 	    0, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
803 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
804 	return kr;
805 }
806 
807 static kern_return_t
call_map_fn__allocate_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)808 call_map_fn__allocate_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
809 {
810 	return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
811 }
812 
813 static kern_return_t
call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)814 call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
815 {
816 	return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
817 }
818 
819 static kern_return_t
call_map_fn__allocate_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)820 call_map_fn__allocate_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
821 {
822 	return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
823 }
824 
825 static kern_return_t
help_call_map_fn__memobject__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)826 help_call_map_fn__memobject__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
827 {
828 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
829 	mach_vm_address_t out_addr = start;
830 	kern_return_t kr = fn(map, &out_addr, size, 0, flags,
831 	    memobject, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
832 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
833 	(void)mach_port_deallocate(mach_task_self(), memobject);
834 	return kr;
835 }
836 
837 static kern_return_t
call_map_fn__memobject_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)838 call_map_fn__memobject_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
839 {
840 	return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
841 }
842 
843 static kern_return_t
call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)844 call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
845 {
846 	return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
847 }
848 
849 static kern_return_t
call_map_fn__memobject_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)850 call_map_fn__memobject_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
851 {
852 	return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
853 }
854 
855 static kern_return_t
call_map_fn__allocate__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)856 call_map_fn__allocate__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
857 {
858 	kern_return_t kr = fn(map, start, size, 0, flags,
859 	    0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
860 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
861 	return kr;
862 }
863 
864 static kern_return_t
call_map_fn__allocate_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)865 call_map_fn__allocate_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
866 {
867 	kern_return_t kr = fn(map, start, size, 0, flags,
868 	    0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
869 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
870 	return kr;
871 }
872 
873 static kern_return_t
call_map_fn__memobject__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)874 call_map_fn__memobject__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
875 {
876 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
877 	kern_return_t kr = fn(map, start, size, 0, flags,
878 	    memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
879 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
880 	(void)mach_port_deallocate(mach_task_self(), memobject);
881 	return kr;
882 }
883 
884 static kern_return_t
call_map_fn__memobject_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)885 call_map_fn__memobject_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
886 {
887 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
888 	kern_return_t kr = fn(map, start, size, 0, flags,
889 	    memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
890 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
891 	(void)mach_port_deallocate(mach_task_self(), memobject);
892 	return kr;
893 }
894 
895 static kern_return_t
help_call_map_fn__allocate__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)896 help_call_map_fn__allocate__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
897 {
898 	mach_vm_address_t out_addr = 0;
899 	kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
900 	    0, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
901 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
902 	return kr;
903 }
904 
905 static kern_return_t
call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)906 call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
907 {
908 	return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
909 }
910 
911 static kern_return_t
call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)912 call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
913 {
914 	return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
915 }
916 
917 static kern_return_t
call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)918 call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
919 {
920 	return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
921 }
922 
923 static kern_return_t
help_call_map_fn__memobject__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)924 help_call_map_fn__memobject__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
925 {
926 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
927 	mach_vm_address_t out_addr = 0;
928 	kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
929 	    memobject, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
930 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
931 	(void)mach_port_deallocate(mach_task_self(), memobject);
932 	return kr;
933 }
934 
935 static kern_return_t
call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)936 call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
937 {
938 	return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
939 }
940 
941 static kern_return_t
call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)942 call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
943 {
944 	return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
945 }
946 
947 static kern_return_t
call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)948 call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
949 {
950 	return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
951 }
952 
953 // implementations
954 
955 #define IMPL_MAP_FN_START_SIZE(map_fn, instance)                                                \
956     static kern_return_t                                                                        \
957     call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start, mach_vm_size_t size) \
958     {                                                                                           \
959 	return call_map_fn__ ## instance(map_fn, map, start, size);                             \
960     }
961 
962 #define IMPL_MAP_FN_HINT_SIZE(map_fn, instance)                                                      \
963     static kern_return_t                                                                             \
964     call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size) \
965     {                                                                                                \
966 	return call_map_fn__ ## instance(map_fn, map, start_hint, size);                             \
967     }
968 
969 #define IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, instance)                                                                                                                   \
970     static kern_return_t                                                                                                                                                         \
971     call_ ## map_fn ## __ ## instance ## __start_size_offset_object(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size) \
972     {                                                                                                                                                                            \
973 	return call_map_fn__ ## instance ## __start_size_offset_object(map_fn, map, start, size, offset, obj_size);                                                              \
974     }
975 
976 #define IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, instance)                                                                          \
977     static kern_return_t                                                                                                          \
978     call_ ## map_fn ## __ ## instance ## __inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit) \
979     {                                                                                                                             \
980 	return call_map_fn__ ## instance ## __inherit(map_fn, map, start, size, inherit);                                         \
981     }
982 
983 #define IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, instance)                                                                 \
984     static kern_return_t                                                                                               \
985     call_ ## map_fn ## __ ## instance ## __flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags) \
986     {                                                                                                                  \
987 	return call_map_fn__ ## instance ## __flags(map_fn, map, start, size, flags);                                  \
988     }
989 
990 #define IMPL_MAP_FN_PROT_PAIRS(map_fn, instance)                                               \
991     static kern_return_t                                                                       \
992     call_ ## map_fn ## __ ## instance ## __prot_pairs(MAP_T map, vm_prot_t cur, vm_prot_t max) \
993     {                                                                                          \
994 	return call_map_fn__ ## instance ## __prot_pairs(map_fn, map, cur, max);               \
995     }
996 
997 #define IMPL(map_fn)                                                       \
998 	IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed)                     \
999 	IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed_copy)                \
1000 	IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed)                    \
1001 	IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed_copy)               \
1002 	IMPL_MAP_FN_HINT_SIZE(map_fn, allocate_anywhere)                   \
1003 	IMPL_MAP_FN_HINT_SIZE(map_fn, memobject_anywhere)                  \
1004 	IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed)      \
1005 	IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed_copy) \
1006 	IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_anywhere)   \
1007 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed)             \
1008 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed_copy)        \
1009 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_anywhere)          \
1010 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed)            \
1011 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed_copy)       \
1012 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_anywhere)         \
1013 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate)                     \
1014 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate_copy)                \
1015 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject)                    \
1016 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject_copy)               \
1017 	IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed)                     \
1018 	IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed_copy)                \
1019 	IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_anywhere)                  \
1020 	IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed)                    \
1021 	IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed_copy)               \
1022 	IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_anywhere)                 \
1023 
1024 static kern_return_t
mach_vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1025 mach_vm_map_wrapped(vm_map_t target_task,
1026     mach_vm_address_t *address,
1027     mach_vm_size_t size,
1028     mach_vm_offset_t mask,
1029     int flags,
1030     mem_entry_name_port_t object,
1031     memory_object_offset_t offset,
1032     boolean_t copy,
1033     vm_prot_t cur_protection,
1034     vm_prot_t max_protection,
1035     vm_inherit_t inheritance)
1036 {
1037 	mach_vm_address_t addr = *address;
1038 	kern_return_t kr = mach_vm_map(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1039 	check_mach_vm_map_outparam_changes(&kr, addr, *address, flags, target_task);
1040 	*address = addr;
1041 	return kr;
1042 }
IMPL(mach_vm_map_wrapped)1043 IMPL(mach_vm_map_wrapped)
1044 
1045 #if TEST_OLD_STYLE_MACH
1046 static kern_return_t
1047 vm_map_64_retyped(vm_map_t target_task,
1048     mach_vm_address_t *address,
1049     mach_vm_size_t size,
1050     mach_vm_offset_t mask,
1051     int flags,
1052     mem_entry_name_port_t object,
1053     memory_object_offset_t offset,
1054     boolean_t copy,
1055     vm_prot_t cur_protection,
1056     vm_prot_t max_protection,
1057     vm_inherit_t inheritance)
1058 {
1059 	vm_address_t addr = (vm_address_t)*address;
1060 	kern_return_t kr = vm_map_64(target_task, &addr, (vm_size_t)size, (vm_address_t)mask, flags, object, (vm_offset_t)offset, copy, cur_protection, max_protection, inheritance);
1061 	check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1062 	*address = addr;
1063 	return kr;
1064 }
IMPL(vm_map_64_retyped)1065 IMPL(vm_map_64_retyped)
1066 
1067 static kern_return_t
1068 vm_map_retyped(vm_map_t target_task,
1069     mach_vm_address_t *address,
1070     mach_vm_size_t size,
1071     mach_vm_offset_t mask,
1072     int flags,
1073     mem_entry_name_port_t object,
1074     memory_object_offset_t offset,
1075     boolean_t copy,
1076     vm_prot_t cur_protection,
1077     vm_prot_t max_protection,
1078     vm_inherit_t inheritance)
1079 {
1080 	vm_address_t addr = (vm_address_t)*address;
1081 	kern_return_t kr = vm_map(target_task, &addr, (vm_size_t)size, (vm_address_t)mask, flags, object, (vm_offset_t)offset, copy, cur_protection, max_protection, inheritance);
1082 	check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1083 	*address = addr;
1084 	return kr;
1085 }
1086 IMPL(vm_map_retyped)
1087 #endif
1088 
1089 #undef IMPL_MAP_FN_START_SIZE
1090 #undef IMPL_MAP_FN_SIZE
1091 #undef IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT
1092 #undef IMPL_MAP_FN_START_SIZE_INHERIT
1093 #undef IMPL_MAP_FN_START_SIZE_FLAGS
1094 #undef IMPL_MAP_FN_PROT_PAIRS
1095 #undef IMPL
1096 
1097 
1098 // mmap
1099 // Directly calling this symbol lets us hit the syscall directly instead of the libsyscall wrapper.
1100 void *__mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off);
1101 
1102 // We invert MAP_UNIX03 in the flags. This is because by default libsyscall intercepts calls to mmap and adds MAP_UNIX03.
1103 // That means MAP_UNIX03 should be the default for most of our tests, and we should only test without MAP_UNIX03 when we explicitly want to.
1104 void *
mmap_wrapper(void * addr,size_t len,int prot,int flags,int fildes,off_t off)1105 mmap_wrapper(void *addr, size_t len, int prot, int flags, int fildes, off_t off)
1106 {
1107 	flags ^= MAP_UNIX03;
1108 	return __mmap(addr, len, prot, flags, fildes, off);
1109 }
1110 
1111 // Rename the UNIX03 flag for the code below since we're inverting its meaning.
1112 #define MAP_NOT_UNIX03 0x40000
1113 static_assert(MAP_NOT_UNIX03 == MAP_UNIX03, "MAP_UNIX03 value changed");
1114 #undef MAP_UNIX03
1115 #define MAP_UNIX03 dont_use_MAP_UNIX03
1116 
1117 // helpers
1118 
1119 // Return true if security policy disallows unsigned code.
1120 // Some test results are expected to change with this set.
1121 static bool
unsigned_code_is_disallowed(void)1122 unsigned_code_is_disallowed(void)
1123 {
1124 	if (isRosetta()) {
1125 		return false;
1126 	}
1127 
1128 	int out_value = 0;
1129 	size_t io_size = sizeof(out_value);
1130 	if (0 == sysctlbyname("security.mac.amfi.unsigned_code_policy",
1131 	    &out_value, &io_size, NULL, 0)) {
1132 		return out_value;
1133 	}
1134 
1135 	// sysctl not present, assume unsigned code is okay
1136 	return false;
1137 }
1138 
1139 static int
maybe_hide_mmap_failure(int ret,int prot,int fd)1140 maybe_hide_mmap_failure(int ret, int prot, int fd)
1141 {
1142 	// Special case for mmap(PROT_EXEC, fd).
1143 	// When SIP is enabled these get EPERM from mac_file_check_mmap().
1144 	// The golden files record the SIP-disabled values.
1145 	// This special case also allows the test to succeed when SIP
1146 	// is enabled even though the return value isn't the golden one.
1147 	if (ret == EPERM && fd != -1 && (prot & PROT_EXEC) &&
1148 	    unsigned_code_is_disallowed()) {
1149 		return ACCEPTABLE;
1150 	}
1151 	return ret;
1152 }
1153 
1154 static kern_return_t
help_call_mmap__vm_prot(MAP_T map __unused,int flags,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)1155 help_call_mmap__vm_prot(MAP_T map __unused, int flags, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
1156 {
1157 	int fd = -1;
1158 	if (!(flags & MAP_ANON)) {
1159 		fd = get_fd();
1160 	}
1161 	void *rv = mmap_wrapper((void *)start, (size_t) size, prot, flags, fd, 0);
1162 	if (rv == MAP_FAILED) {
1163 		return maybe_hide_mmap_failure(errno, prot, fd);
1164 	} else {
1165 		assert(0 == munmap_helper(rv, size));
1166 		return 0;
1167 	}
1168 }
1169 
1170 static kern_return_t
help_call_mmap__kernel_flags(MAP_T map __unused,int mmap_flags,mach_vm_address_t start,mach_vm_size_t size,int kernel_flags)1171 help_call_mmap__kernel_flags(MAP_T map __unused, int mmap_flags, mach_vm_address_t start, mach_vm_size_t size, int kernel_flags)
1172 {
1173 	void *rv = mmap_wrapper((void *)start, (size_t) size, VM_PROT_DEFAULT, mmap_flags, kernel_flags, 0);
1174 	if (rv == MAP_FAILED) {
1175 		return errno;
1176 	} else {
1177 		assert(0 == munmap_helper(rv, size));
1178 		return 0;
1179 	}
1180 }
1181 
1182 static kern_return_t
help_call_mmap__dst_size_fileoff(MAP_T map __unused,int flags,mach_vm_address_t dst,mach_vm_size_t size,mach_vm_address_t fileoff)1183 help_call_mmap__dst_size_fileoff(MAP_T map __unused, int flags, mach_vm_address_t dst, mach_vm_size_t size, mach_vm_address_t fileoff)
1184 {
1185 	int fd = -1;
1186 	if (!(flags & MAP_ANON)) {
1187 		fd = get_fd();
1188 	}
1189 	void *rv = mmap_wrapper((void *)dst, (size_t) size, VM_PROT_DEFAULT, flags, fd, (off_t)fileoff);
1190 	if (rv == MAP_FAILED) {
1191 		return errno;
1192 	} else {
1193 		assert(0 == munmap_helper(rv, size));
1194 		return 0;
1195 	}
1196 }
1197 
1198 static kern_return_t
help_call_mmap__start_size(MAP_T map __unused,int flags,mach_vm_address_t start,mach_vm_size_t size)1199 help_call_mmap__start_size(MAP_T map __unused, int flags, mach_vm_address_t start, mach_vm_size_t size)
1200 {
1201 	int fd = -1;
1202 	if (!(flags & MAP_ANON)) {
1203 		fd = get_fd();
1204 	}
1205 	void *rv = mmap_wrapper((void *)start, (size_t) size, VM_PROT_DEFAULT, flags, fd, 0);
1206 	if (rv == MAP_FAILED) {
1207 		return errno;
1208 	} else {
1209 		assert(0 == munmap_helper(rv, size));
1210 		return 0;
1211 	}
1212 }
1213 
1214 static kern_return_t
help_call_mmap__offset_size(MAP_T map __unused,int flags,mach_vm_address_t offset,mach_vm_size_t size)1215 help_call_mmap__offset_size(MAP_T map __unused, int flags, mach_vm_address_t offset, mach_vm_size_t size)
1216 {
1217 	int fd = -1;
1218 	if (!(flags & MAP_ANON)) {
1219 		fd = get_fd();
1220 	}
1221 	void *rv = mmap_wrapper((void *)0, (size_t) size, VM_PROT_DEFAULT, flags, fd, (off_t)offset);
1222 	if (rv == MAP_FAILED) {
1223 		return errno;
1224 	} else {
1225 		assert(0 == munmap_helper(rv, size));
1226 		return 0;
1227 	}
1228 }
1229 
1230 #define IMPL_ONE_FROM_HELPER(type, variant, flags, ...)                                                                                 \
1231 	static kern_return_t                                                                                                            \
1232 	__attribute__((used))                                                                                                           \
1233 	call_mmap ## __ ## variant ## __ ## type(MAP_T map, mach_vm_address_t start, mach_vm_size_t size DROP_COMMAS(__VA_ARGS__)) {    \
1234 	        return help_call_mmap__ ## type(map, flags, start, size DROP_TYPES(__VA_ARGS__));                                       \
1235 	}
1236 
1237 // call functions
1238 
1239 #define IMPL_FROM_HELPER(type, ...) \
1240 	IMPL_ONE_FROM_HELPER(type, file_private,          MAP_FILE | MAP_PRIVATE,                          ##__VA_ARGS__)  \
1241 	IMPL_ONE_FROM_HELPER(type, anon_private,          MAP_ANON | MAP_PRIVATE,                          ##__VA_ARGS__)  \
1242 	IMPL_ONE_FROM_HELPER(type, file_shared,           MAP_FILE | MAP_SHARED,                           ##__VA_ARGS__)  \
1243 	IMPL_ONE_FROM_HELPER(type, anon_shared,           MAP_ANON | MAP_SHARED,                           ##__VA_ARGS__)  \
1244 	IMPL_ONE_FROM_HELPER(type, file_private_codesign, MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_CODESIGN, ##__VA_ARGS__)  \
1245 	IMPL_ONE_FROM_HELPER(type, file_private_media,    MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_MEDIA,    ##__VA_ARGS__)  \
1246 	IMPL_ONE_FROM_HELPER(type, nounix03_private,      MAP_FILE | MAP_PRIVATE | MAP_NOT_UNIX03,         ##__VA_ARGS__)  \
1247 	IMPL_ONE_FROM_HELPER(type, fixed_private,         MAP_FILE | MAP_PRIVATE | MAP_FIXED,              ##__VA_ARGS__)  \
1248 
IMPL_FROM_HELPER(vm_prot,vm_prot_t,prot)1249 IMPL_FROM_HELPER(vm_prot, vm_prot_t, prot)
1250 IMPL_FROM_HELPER(dst_size_fileoff, mach_vm_address_t, fileoff)
1251 IMPL_FROM_HELPER(start_size)
1252 IMPL_FROM_HELPER(offset_size)
1253 
1254 IMPL_ONE_FROM_HELPER(kernel_flags, anon_private, MAP_ANON | MAP_PRIVATE, int, kernel_flags)
1255 IMPL_ONE_FROM_HELPER(kernel_flags, anon_shared, MAP_ANON | MAP_SHARED, int, kernel_flags)
1256 
1257 static kern_return_t
1258 call_mmap__mmap_flags(MAP_T map __unused, mach_vm_address_t start, mach_vm_size_t size, int mmap_flags)
1259 {
1260 	int fd = -1;
1261 	if (!(mmap_flags & MAP_ANON)) {
1262 		fd = get_fd();
1263 	}
1264 	void *rv = mmap_wrapper((void *)start, (size_t) size, VM_PROT_DEFAULT, mmap_flags, fd, 0);
1265 	if (rv == MAP_FAILED) {
1266 		return errno;
1267 	} else {
1268 		assert(0 == munmap(rv, (size_t) size));
1269 		return 0;
1270 	}
1271 }
1272 
1273 // Mach memory entry ownership
1274 
1275 static kern_return_t
call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused,int ledger_tag)1276 call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused, int ledger_tag)
1277 {
1278 	mach_port_t mementry = make_a_mem_entry(TEST_ALLOC_SIZE + 1);
1279 	kern_return_t kr = mach_memory_entry_ownership(mementry, mach_task_self(), ledger_tag, 0);
1280 	(void)mach_port_deallocate(mach_task_self(), mementry);
1281 	return kr;
1282 }
1283 
1284 static kern_return_t
call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused,int ledger_flag)1285 call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused, int ledger_flag)
1286 {
1287 	mach_port_t mementry = make_a_mem_entry(TEST_ALLOC_SIZE + 1);
1288 	kern_return_t kr = mach_memory_entry_ownership(mementry, mach_task_self(), VM_LEDGER_TAG_DEFAULT, ledger_flag);
1289 	(void)mach_port_deallocate(mach_task_self(), mementry);
1290 	return kr;
1291 }
1292 
1293 
1294 // For deallocators like munmap and vm_deallocate.
1295 // Return a non-zero error code if we should avoid performing this trial.
1296 kern_return_t
short_circuit_deallocator(MAP_T map,start_size_trial_t trial)1297 short_circuit_deallocator(MAP_T map, start_size_trial_t trial)
1298 {
1299 	// mach_vm_deallocate(size == 0) is safe
1300 	if (trial.size == 0) {
1301 		return 0;
1302 	}
1303 
1304 	// Allow deallocation attempts based on a valid allocation
1305 	// (assumes the test loop will slide this trial to a valid allocation)
1306 	if (!trial.start_is_absolute && trial.size_is_absolute) {
1307 		return 0;
1308 	}
1309 
1310 	// Avoid overwriting random live memory.
1311 	if (!vm_sanitize_range_overflows_strict_zero(trial.start, trial.size, VM_MAP_PAGE_MASK(map))) {
1312 		return IGNORED;
1313 	}
1314 
1315 	// Avoid EXC_GUARD if it is still enabled.
1316 	mach_vm_address_t sum;
1317 	if (!__builtin_add_overflow(trial.start, trial.size, &sum) &&
1318 	    trial.start + trial.size != 0 &&
1319 	    round_up_page(trial.start + trial.size, PAGE_SIZE) == 0) {
1320 		// this case provokes EXC_GUARD
1321 		if (EXC_GUARD_ENABLED) {
1322 			return GUARD;
1323 		}
1324 	}
1325 
1326 	// Allow.
1327 	return 0;
1328 }
1329 
1330 static kern_return_t
call_mach_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1331 call_mach_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1332 {
1333 	kern_return_t kr = mach_vm_deallocate(map, start, size);
1334 	return kr;
1335 }
1336 
1337 #if TEST_OLD_STYLE_MACH
1338 static kern_return_t
call_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1339 call_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1340 {
1341 	kern_return_t kr = vm_deallocate(map, (vm_address_t) start, (vm_size_t) size);
1342 	return kr;
1343 }
1344 #endif
1345 
1346 static kern_return_t
call_mach_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1347 call_mach_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1348 {
1349 	mach_vm_address_t saved_start = *start;
1350 	kern_return_t kr = mach_vm_allocate(map, start, size, flags);
1351 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
1352 	return kr;
1353 }
1354 
1355 
1356 static kern_return_t
call_mach_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)1357 call_mach_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
1358 {
1359 	mach_vm_address_t saved_start = *start;
1360 	kern_return_t kr = mach_vm_allocate(map, start, size, VM_FLAGS_FIXED);
1361 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
1362 	return kr;
1363 }
1364 
1365 static kern_return_t
call_mach_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)1366 call_mach_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
1367 {
1368 	mach_vm_address_t saved_start = *start;
1369 	kern_return_t kr = mach_vm_allocate(map, start, size, VM_FLAGS_ANYWHERE);
1370 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
1371 	return kr;
1372 }
1373 
1374 static kern_return_t
call_mach_vm_inherit(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1375 call_mach_vm_inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1376 {
1377 	kern_return_t kr = mach_vm_inherit(map, start, size, VM_INHERIT_NONE);
1378 	return kr;
1379 }
1380 #if TEST_OLD_STYLE_MACH
1381 static kern_return_t
call_vm_inherit(MAP_T map,vm_address_t start,vm_size_t size)1382 call_vm_inherit(MAP_T map, vm_address_t start, vm_size_t size)
1383 {
1384 	kern_return_t kr = vm_inherit(map, start, size, VM_INHERIT_NONE);
1385 	return kr;
1386 }
1387 #endif
1388 
1389 static int
call_minherit(void * start,size_t size)1390 call_minherit(void *start, size_t size)
1391 {
1392 	int err = minherit(start, size, VM_INHERIT_SHARE);
1393 	return err ? errno : 0;
1394 }
1395 
1396 static kern_return_t
call_mach_vm_inherit__inherit(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t value)1397 call_mach_vm_inherit__inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t value)
1398 {
1399 	kern_return_t kr = mach_vm_inherit(map, start, size, value);
1400 	return kr;
1401 }
1402 
1403 static int
call_minherit__inherit(void * start,size_t size,int value)1404 call_minherit__inherit(void * start, size_t size, int value)
1405 {
1406 	int err = minherit(start, size, value);
1407 	return err ? errno : 0;
1408 }
1409 
1410 static kern_return_t
call_mach_vm_protect__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1411 call_mach_vm_protect__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1412 {
1413 	kern_return_t kr = mach_vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
1414 	return kr;
1415 }
1416 static kern_return_t
call_mach_vm_protect__vm_prot(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)1417 call_mach_vm_protect__vm_prot(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
1418 {
1419 	kern_return_t kr = mach_vm_protect(map, start, size, 0, prot);
1420 	return kr;
1421 }
1422 #if TEST_OLD_STYLE_MACH
1423 static kern_return_t
call_vm_protect__start_size(MAP_T map,vm_address_t start,vm_size_t size)1424 call_vm_protect__start_size(MAP_T map, vm_address_t start, vm_size_t size)
1425 {
1426 	kern_return_t kr = vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
1427 	return kr;
1428 }
1429 static kern_return_t
call_vm_protect__vm_prot(MAP_T map,vm_address_t start,vm_size_t size,vm_prot_t prot)1430 call_vm_protect__vm_prot(MAP_T map, vm_address_t start, vm_size_t size, vm_prot_t prot)
1431 {
1432 	kern_return_t kr = vm_protect(map, start, size, 0, prot);
1433 	return kr;
1434 }
1435 #endif
1436 
1437 extern int __mprotect(void *, size_t, int);
1438 
1439 static int
call_mprotect__start_size(void * start,size_t size)1440 call_mprotect__start_size(void *start, size_t size)
1441 {
1442 	int err = __mprotect(start, size, PROT_READ | PROT_WRITE);
1443 	return err ? errno : 0;
1444 }
1445 
1446 static int
call_mprotect__vm_prot(void * start,size_t size,int prot)1447 call_mprotect__vm_prot(void *start, size_t size, int prot)
1448 {
1449 	int err = __mprotect(start, size, prot);
1450 	return err ? errno : 0;
1451 }
1452 
1453 #if TEST_OLD_STYLE_MACH
1454 static kern_return_t
call_vm_behavior_set__start_size__default(MAP_T map,vm_address_t start,vm_size_t size)1455 call_vm_behavior_set__start_size__default(MAP_T map, vm_address_t start, vm_size_t size)
1456 {
1457 	kern_return_t kr = vm_behavior_set(map, start, size, VM_BEHAVIOR_DEFAULT);
1458 	return kr;
1459 }
1460 
1461 static kern_return_t
call_vm_behavior_set__start_size__can_reuse(MAP_T map,vm_address_t start,vm_size_t size)1462 call_vm_behavior_set__start_size__can_reuse(MAP_T map, vm_address_t start, vm_size_t size)
1463 {
1464 	kern_return_t kr = vm_behavior_set(map, start, size, VM_BEHAVIOR_CAN_REUSE);
1465 	return kr;
1466 }
1467 
1468 static kern_return_t
call_vm_behavior_set__vm_behavior(MAP_T map,vm_address_t start,vm_size_t size,vm_behavior_t behavior)1469 call_vm_behavior_set__vm_behavior(MAP_T map, vm_address_t start, vm_size_t size, vm_behavior_t behavior)
1470 {
1471 	kern_return_t kr = vm_behavior_set(map, start, size, behavior);
1472 	return kr;
1473 }
1474 #endif /* TEST_OLD_STYLE_MACH */
1475 
1476 extern int __shared_region_map_and_slide_2_np(uint32_t files_count,
1477     const struct shared_file_np *files,
1478     uint32_t mappings_count,
1479     const struct shared_file_mapping_slide_np *mappings);
1480 
1481 static int
maybe_hide_shared_region_map_failure(int ret,uint32_t files_count,const struct shared_file_np * files,uint32_t mappings_count)1482 maybe_hide_shared_region_map_failure(int ret,
1483     uint32_t files_count, const struct shared_file_np *files,
1484     uint32_t mappings_count)
1485 {
1486 	// Special case for __shared_region_map_and_slide_2_np().
1487 	// When SIP is enabled this case gets EPERM instead of EINVAL due to
1488 	// vm_shared_region_map_file returning KERN_PROTECTION_FAILURE instead of
1489 	// KERN_INVALID_ARGUMENT.
1490 	if (ret == EPERM && files_count == 1 && mappings_count == 1 &&
1491 	    files->sf_fd == get_fd() && files->sf_mappings_count == 1 &&
1492 	    unsigned_code_is_disallowed()) {
1493 		return ACCEPTABLE;
1494 	}
1495 	return ret;
1496 }
1497 
1498 static int
call_shared_region_map_and_slide_2_np_child(uint32_t files_count,const struct shared_file_np * files,uint32_t mappings_count,const struct shared_file_mapping_slide_np * mappings)1499 call_shared_region_map_and_slide_2_np_child(uint32_t files_count, const struct shared_file_np *files,
1500     uint32_t mappings_count, const struct shared_file_mapping_slide_np *mappings)
1501 {
1502 	int err = __shared_region_map_and_slide_2_np(files_count, files, mappings_count, mappings);
1503 	return err ? maybe_hide_shared_region_map_failure(errno, files_count, files, mappings_count) : 0;
1504 }
1505 
1506 typedef struct {
1507 	uint32_t files_count;
1508 	const struct shared_file_np *files;
1509 	uint32_t mappings_count;
1510 	const struct shared_file_mapping_slide_np *mappings;
1511 } map_n_slice_thread_args;
1512 
1513 void*
thread_func(void * args)1514 thread_func(void* args)
1515 {
1516 	map_n_slice_thread_args *thread_args = (map_n_slice_thread_args *)args;
1517 	uint32_t files_count = thread_args->files_count;
1518 	const struct shared_file_np *files = thread_args->files;
1519 	uint32_t mappings_count = thread_args->mappings_count;
1520 	const struct shared_file_mapping_slide_np *mappings = thread_args->mappings;
1521 
1522 	int err = call_shared_region_map_and_slide_2_np_child(files_count, files, mappings_count, mappings);
1523 
1524 	int *result = malloc(sizeof(int));
1525 	assert(result != NULL);
1526 	*result = err;
1527 	return result;
1528 }
1529 
1530 static int
call_shared_region_map_and_slide_2_np_in_thread(uint32_t files_count,const struct shared_file_np * files,uint32_t mappings_count,const struct shared_file_mapping_slide_np * mappings)1531 call_shared_region_map_and_slide_2_np_in_thread(uint32_t files_count, const struct shared_file_np *files,
1532     uint32_t mappings_count, const struct shared_file_mapping_slide_np *mappings)
1533 {
1534 	// From vm/vm_shared_region.c: After a chroot(), the calling process keeps using its original shared region [...]
1535 	// But its children will use a different shared region [...]
1536 	if (chroot(".") < 0) {
1537 		return BUSTED;
1538 	}
1539 
1540 	map_n_slice_thread_args args = {files_count, files, mappings_count, mappings};
1541 	pthread_t thread;
1542 	if (pthread_create(&thread, NULL, thread_func, (void *)&args) < 0) {
1543 		return -91;
1544 	}
1545 
1546 	int *err;
1547 	if (pthread_join(thread, (void**)&err) < 0) {
1548 		return BUSTED;
1549 	}
1550 
1551 	if (chroot("/") < 0) {
1552 		return BUSTED;
1553 	}
1554 
1555 	return *err;
1556 }
1557 
1558 static int
call_madvise__start_size(void * start,size_t size)1559 call_madvise__start_size(void *start, size_t size)
1560 {
1561 	int err = madvise(start, size, MADV_NORMAL);
1562 	return err ? errno : 0;
1563 }
1564 
1565 static int
call_madvise__vm_advise(void * start,size_t size,int advise)1566 call_madvise__vm_advise(void *start, size_t size, int advise)
1567 {
1568 	int err = madvise(start, size, advise);
1569 	return err ? errno : 0;
1570 }
1571 
1572 static int
call_mach_vm_msync__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1573 call_mach_vm_msync__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1574 {
1575 	kern_return_t kr = mach_vm_msync(map, start, size, VM_SYNC_ASYNCHRONOUS);
1576 	return kr;
1577 }
1578 
1579 static int
call_mach_vm_msync__vm_sync(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_sync_t sync)1580 call_mach_vm_msync__vm_sync(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_sync_t sync)
1581 {
1582 	kern_return_t kr = mach_vm_msync(map, start, size, sync);
1583 	return kr;
1584 }
1585 
1586 #if TEST_OLD_STYLE_MACH
1587 static int
call_vm_msync__start_size(MAP_T map,vm_address_t start,vm_size_t size)1588 call_vm_msync__start_size(MAP_T map, vm_address_t start, vm_size_t size)
1589 {
1590 	kern_return_t kr = vm_msync(map, start, size, VM_SYNC_ASYNCHRONOUS);
1591 	return kr;
1592 }
1593 
1594 static int
call_vm_msync__vm_sync(MAP_T map,vm_address_t start,vm_size_t size,vm_sync_t sync)1595 call_vm_msync__vm_sync(MAP_T map, vm_address_t start, vm_size_t size, vm_sync_t sync)
1596 {
1597 	kern_return_t kr = vm_msync(map, start, size, sync);
1598 	return kr;
1599 }
1600 #endif /* TEST_OLD_STYLE_MACH */
1601 
1602 // msync has a libsyscall wrapper that does alignment. We want the raw syscall.
1603 int __msync(void *, size_t, int);
1604 
1605 static int
call_msync__start_size(void * start,size_t size)1606 call_msync__start_size(void *start, size_t size)
1607 {
1608 	int err = __msync(start, size, MS_SYNC);
1609 	return err ? errno : 0;
1610 }
1611 
1612 static int
call_msync__vm_msync(void * start,size_t size,int msync_value)1613 call_msync__vm_msync(void *start, size_t size, int msync_value)
1614 {
1615 	int err = __msync(start, size, msync_value);
1616 	return err ? errno : 0;
1617 }
1618 
1619 // msync nocancel isn't declared, but we want to directly hit the syscall
1620 int __msync_nocancel(void *, size_t, int);
1621 
1622 static int
call_msync_nocancel__start_size(void * start,size_t size)1623 call_msync_nocancel__start_size(void *start, size_t size)
1624 {
1625 	int err = __msync_nocancel(start, size, MS_SYNC);
1626 	return err ? errno : 0;
1627 }
1628 
1629 static int
call_msync_nocancel__vm_msync(void * start,size_t size,int msync_value)1630 call_msync_nocancel__vm_msync(void *start, size_t size, int msync_value)
1631 {
1632 	int err = __msync_nocancel(start, size, msync_value);
1633 	return err ? errno : 0;
1634 }
1635 
1636 static void
check_mach_vm_machine_attribute_outparam_changes(kern_return_t * kr,vm_machine_attribute_val_t value,vm_machine_attribute_val_t saved_value)1637 check_mach_vm_machine_attribute_outparam_changes(kern_return_t * kr, vm_machine_attribute_val_t value, vm_machine_attribute_val_t saved_value)
1638 {
1639 	if (value != saved_value) {
1640 		*kr = OUT_PARAM_BAD;
1641 	}
1642 }
1643 
1644 static int
call_mach_vm_machine_attribute__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1645 call_mach_vm_machine_attribute__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1646 {
1647 	vm_machine_attribute_val_t value = MATTR_VAL_GET;
1648 	vm_machine_attribute_val_t initial_value = value;
1649 	kern_return_t kr = mach_vm_machine_attribute(map, start, size, MATTR_CACHE, &value);
1650 	check_mach_vm_machine_attribute_outparam_changes(&kr, value, initial_value);
1651 	return kr;
1652 }
1653 
1654 
1655 static int
call_mach_vm_machine_attribute__machine_attribute(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_machine_attribute_t attr)1656 call_mach_vm_machine_attribute__machine_attribute(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_machine_attribute_t attr)
1657 {
1658 	vm_machine_attribute_val_t value = MATTR_VAL_GET;
1659 	vm_machine_attribute_val_t initial_value = value;
1660 	kern_return_t kr = mach_vm_machine_attribute(map, start, size, attr, &value);
1661 	check_mach_vm_machine_attribute_outparam_changes(&kr, value, initial_value);
1662 	return kr;
1663 }
1664 
1665 #if TEST_OLD_STYLE_MACH
1666 static int
call_vm_machine_attribute__start_size(MAP_T map,vm_address_t start,vm_size_t size)1667 call_vm_machine_attribute__start_size(MAP_T map, vm_address_t start, vm_size_t size)
1668 {
1669 	vm_machine_attribute_val_t value = MATTR_VAL_GET;
1670 	vm_machine_attribute_val_t initial_value = value;
1671 	kern_return_t kr = vm_machine_attribute(map, start, size, MATTR_CACHE, &value);
1672 	check_mach_vm_machine_attribute_outparam_changes(&kr, value, initial_value);
1673 	return kr;
1674 }
1675 
1676 static int
call_vm_machine_attribute__machine_attribute(MAP_T map,vm_address_t start,vm_size_t size,vm_machine_attribute_t attr)1677 call_vm_machine_attribute__machine_attribute(MAP_T map, vm_address_t start, vm_size_t size, vm_machine_attribute_t attr)
1678 {
1679 	vm_machine_attribute_val_t value = MATTR_VAL_GET;
1680 	vm_machine_attribute_val_t initial_value = value;
1681 	kern_return_t kr = vm_machine_attribute(map, start, size, attr, &value);
1682 	check_mach_vm_machine_attribute_outparam_changes(&kr, value, initial_value);
1683 	return kr;
1684 }
1685 #endif /* TEST_OLD_STYLE_MACH */
1686 
1687 static int
call_mach_vm_purgable_control__address__get(MAP_T map,mach_vm_address_t addr)1688 call_mach_vm_purgable_control__address__get(MAP_T map, mach_vm_address_t addr)
1689 {
1690 	int state = INVALID_PURGABLE_STATE;
1691 	int initial_state = state;
1692 	kern_return_t kr = mach_vm_purgable_control(map, addr, VM_PURGABLE_GET_STATE, &state);
1693 	check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_GET_STATE);
1694 	return kr;
1695 }
1696 
1697 
1698 static int
call_mach_vm_purgable_control__address__purge_all(MAP_T map,mach_vm_address_t addr)1699 call_mach_vm_purgable_control__address__purge_all(MAP_T map, mach_vm_address_t addr)
1700 {
1701 	int state = INVALID_PURGABLE_STATE;
1702 	int initial_state = state;
1703 	kern_return_t kr = mach_vm_purgable_control(map, addr, VM_PURGABLE_PURGE_ALL, &state);
1704 	check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_PURGE_ALL);
1705 	return kr;
1706 }
1707 
1708 static int
call_mach_vm_purgable_control__purgeable_state(MAP_T map,mach_vm_address_t addr,vm_purgable_t control,int state)1709 call_mach_vm_purgable_control__purgeable_state(MAP_T map, mach_vm_address_t addr, vm_purgable_t control, int state)
1710 {
1711 	int initial_state = state;
1712 	kern_return_t kr = mach_vm_purgable_control(map, addr, control, &state);
1713 	check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, control);
1714 	return kr;
1715 }
1716 
1717 #if TEST_OLD_STYLE_MACH
1718 static int
call_vm_purgable_control__address__get(MAP_T map,vm_address_t addr)1719 call_vm_purgable_control__address__get(MAP_T map, vm_address_t addr)
1720 {
1721 	int state = INVALID_PURGABLE_STATE;
1722 	int initial_state = state;
1723 	kern_return_t kr = vm_purgable_control(map, addr, VM_PURGABLE_GET_STATE, &state);
1724 	check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_GET_STATE);
1725 	return kr;
1726 }
1727 
1728 static int
call_vm_purgable_control__address__purge_all(MAP_T map,vm_address_t addr)1729 call_vm_purgable_control__address__purge_all(MAP_T map, vm_address_t addr)
1730 {
1731 	int state = INVALID_PURGABLE_STATE;
1732 	int initial_state = state;
1733 	kern_return_t kr = vm_purgable_control(map, addr, VM_PURGABLE_PURGE_ALL, &state);
1734 	check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_PURGE_ALL);
1735 	return kr;
1736 }
1737 
1738 static int
call_vm_purgable_control__purgeable_state(MAP_T map,vm_address_t addr,vm_purgable_t control,int state)1739 call_vm_purgable_control__purgeable_state(MAP_T map, vm_address_t addr, vm_purgable_t control, int state)
1740 {
1741 	int initial_state = state;
1742 	kern_return_t kr = vm_purgable_control(map, addr, control, &state);
1743 	check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, control);
1744 	return kr;
1745 }
1746 #endif /* TEST_OLD_STYLE_MACH */
1747 
1748 static void
check_mach_vm_region_recurse_outparam_changes(kern_return_t * kr,void * info,void * saved_info,size_t info_size,natural_t depth,natural_t saved_depth,mach_vm_address_t addr,mach_vm_address_t saved_addr,mach_vm_size_t size,mach_vm_size_t saved_size)1749 check_mach_vm_region_recurse_outparam_changes(kern_return_t * kr, void * info, void * saved_info, size_t info_size,
1750     natural_t depth, natural_t saved_depth, mach_vm_address_t addr, mach_vm_address_t saved_addr,
1751     mach_vm_size_t size, mach_vm_size_t saved_size)
1752 {
1753 	if (*kr == KERN_SUCCESS) {
1754 		if (depth == saved_depth) {
1755 			*kr = OUT_PARAM_BAD;
1756 		}
1757 		if (size == saved_size) {
1758 			*kr = OUT_PARAM_BAD;
1759 		}
1760 		if (memcmp(info, saved_info, info_size) == 0) {
1761 			*kr = OUT_PARAM_BAD;
1762 		}
1763 	} else {
1764 		if (depth != saved_depth || addr != saved_addr || size != saved_size || memcmp(info, saved_info, info_size) != 0) {
1765 			*kr = OUT_PARAM_BAD;
1766 		}
1767 	}
1768 }
1769 
1770 static kern_return_t
call_mach_vm_region_recurse(MAP_T map,mach_vm_address_t addr)1771 call_mach_vm_region_recurse(MAP_T map, mach_vm_address_t addr)
1772 {
1773 	vm_region_submap_info_data_64_t info;
1774 	info.inheritance = INVALID_INHERIT;
1775 	vm_region_submap_info_data_64_t saved_info = info;
1776 	mach_vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
1777 	mach_vm_size_t saved_size = size_out;
1778 	natural_t depth = 10;
1779 	natural_t saved_depth = depth;
1780 	mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
1781 	mach_vm_address_t addr_cpy = addr;
1782 
1783 	kern_return_t kr = mach_vm_region_recurse(map,
1784 	    &addr_cpy,
1785 	    &size_out,
1786 	    &depth,
1787 	    (vm_region_recurse_info_t)&info,
1788 	    &count);
1789 	check_mach_vm_region_recurse_outparam_changes(&kr, &info, &saved_info, sizeof(info), depth, saved_depth,
1790 	    addr, addr_cpy, size_out, saved_size);
1791 
1792 	return kr;
1793 }
1794 
1795 #if TEST_OLD_STYLE_MACH
1796 static kern_return_t
call_vm_region_recurse(MAP_T map,vm_address_t addr)1797 call_vm_region_recurse(MAP_T map, vm_address_t addr)
1798 {
1799 	vm_region_submap_info_data_t info;
1800 	info.inheritance = INVALID_INHERIT;
1801 	vm_region_submap_info_data_t saved_info = info;
1802 
1803 	vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
1804 	vm_size_t saved_size = size_out;
1805 
1806 	natural_t depth = 10;
1807 	natural_t saved_depth = depth;
1808 
1809 	mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT;
1810 	vm_address_t addr_cpy = addr;
1811 
1812 	kern_return_t kr = vm_region_recurse(map,
1813 	    &addr_cpy,
1814 	    &size_out,
1815 	    &depth,
1816 	    (vm_region_recurse_info_t)&info,
1817 	    &count);
1818 
1819 	check_mach_vm_region_recurse_outparam_changes(&kr, &info, &saved_info, sizeof(info), depth, saved_depth,
1820 	    addr_cpy, addr, size_out, saved_size);
1821 
1822 	return kr;
1823 }
1824 
1825 static kern_return_t
call_vm_region_recurse_64(MAP_T map,vm_address_t addr)1826 call_vm_region_recurse_64(MAP_T map, vm_address_t addr)
1827 {
1828 	vm_region_submap_info_data_64_t info;
1829 	info.inheritance = INVALID_INHERIT;
1830 	vm_region_submap_info_data_64_t saved_info = info;
1831 
1832 	vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
1833 	vm_size_t saved_size = size_out;
1834 
1835 	natural_t depth = 10;
1836 	natural_t saved_depth = depth;
1837 
1838 	mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
1839 	vm_address_t addr_cpy = addr;
1840 
1841 	kern_return_t kr = vm_region_recurse_64(map,
1842 	    &addr_cpy,
1843 	    &size_out,
1844 	    &depth,
1845 	    (vm_region_recurse_info_t)&info,
1846 	    &count);
1847 
1848 	check_mach_vm_region_recurse_outparam_changes(&kr, &info, &saved_info, sizeof(info), depth, saved_depth,
1849 	    addr_cpy, addr, size_out, saved_size);
1850 
1851 	return kr;
1852 }
1853 #endif /* TEST_OLD_STYLE_MACH */
1854 
1855 static kern_return_t
call_mach_vm_page_info(MAP_T map,mach_vm_address_t addr)1856 call_mach_vm_page_info(MAP_T map, mach_vm_address_t addr)
1857 {
1858 	vm_page_info_flavor_t flavor = VM_PAGE_INFO_BASIC;
1859 	mach_msg_type_number_t count = VM_PAGE_INFO_BASIC_COUNT;
1860 	mach_msg_type_number_t saved_count = count;
1861 	vm_page_info_basic_data_t info = {0};
1862 	info.depth = -1;
1863 	vm_page_info_basic_data_t saved_info = info;
1864 
1865 	kern_return_t kr = mach_vm_page_info(map, addr, flavor, (vm_page_info_t)&info, &count);
1866 	check_mach_vm_page_info_outparam_changes(&kr, info, saved_info, count, saved_count);
1867 	return kr;
1868 }
1869 
1870 static void
check_mach_vm_page_query_outparam_changes(kern_return_t * kr,int disposition,int saved_disposition,int ref_count)1871 check_mach_vm_page_query_outparam_changes(kern_return_t * kr, int disposition, int saved_disposition, int ref_count)
1872 {
1873 	if (*kr == KERN_SUCCESS) {
1874 		/*
1875 		 * There should be no outside references to the memory created for this test
1876 		 */
1877 		if (ref_count != 0) {
1878 			*kr = OUT_PARAM_BAD;
1879 		}
1880 		if (disposition == saved_disposition) {
1881 			*kr = OUT_PARAM_BAD;
1882 		}
1883 	}
1884 }
1885 
1886 static kern_return_t
call_mach_vm_page_query(MAP_T map,mach_vm_address_t addr)1887 call_mach_vm_page_query(MAP_T map, mach_vm_address_t addr)
1888 {
1889 	int disp = INVALID_DISPOSITION_VALUE, ref = 0;
1890 	int saved_disposition = disp;
1891 	kern_return_t kr = mach_vm_page_query(map, addr, &disp, &ref);
1892 	check_mach_vm_page_query_outparam_changes(&kr, disp, saved_disposition, ref);
1893 	return kr;
1894 }
1895 
1896 #if TEST_OLD_STYLE_MACH
1897 static kern_return_t
call_vm_map_page_query(MAP_T map,vm_address_t addr)1898 call_vm_map_page_query(MAP_T map, vm_address_t addr)
1899 {
1900 	int disp = INVALID_DISPOSITION_VALUE, ref = 0;
1901 	int saved_disposition = disp;
1902 	kern_return_t kr = vm_map_page_query(map, addr, &disp, &ref);
1903 	check_mach_vm_page_query_outparam_changes(&kr, disp, saved_disposition, ref);
1904 	return kr;
1905 }
1906 #endif /* TEST_OLD_STYLE_MACH */
1907 
1908 static void
check_mach_vm_page_range_query_outparam_changes(kern_return_t * kr,mach_vm_size_t out_count,mach_vm_size_t in_count)1909 check_mach_vm_page_range_query_outparam_changes(kern_return_t * kr, mach_vm_size_t out_count, mach_vm_size_t in_count)
1910 {
1911 	if (out_count != in_count) {
1912 		*kr = OUT_PARAM_BAD;
1913 	}
1914 }
1915 
1916 static kern_return_t
call_mach_vm_page_range_query(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1917 call_mach_vm_page_range_query(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1918 {
1919 	// mach_vm_page_range_query writes one int per page output
1920 	// and can accept any address range as input
1921 	// We can't provide that much storage for very large lengths.
1922 	// Instead we provide a limited output buffer,
1923 	// write-protect the page after it, and "succeed" if the kernel
1924 	// fills the buffer and then returns EFAULT.
1925 
1926 	// enough space for MAX_PAGE_RANGE_QUERY with 4KB pages, twice
1927 	mach_vm_size_t prq_buf_size = 2 * 262144 * sizeof(int);
1928 	mach_vm_address_t prq_buf = 0;
1929 	kern_return_t kr = mach_vm_allocate(map, &prq_buf,
1930 	    prq_buf_size + KB16, VM_FLAGS_ANYWHERE);
1931 	assert(kr == 0);
1932 
1933 	// protect the guard page
1934 	mach_vm_address_t prq_guard = prq_buf + prq_buf_size;
1935 	kr = mach_vm_protect(map, prq_guard, KB16, 0, VM_PROT_NONE);
1936 	assert(kr == 0);
1937 
1938 	// pre-fill the output buffer with an invalid value
1939 	memset((char *)prq_buf, 0xff, prq_buf_size);
1940 
1941 	mach_vm_size_t in_count = size / KB16 + (size % KB16 ? 1 : 0);
1942 	mach_vm_size_t out_count = in_count;
1943 	kr = mach_vm_page_range_query(map, start, size, prq_buf, &out_count);
1944 
1945 	// yes, EFAULT as a kern_return_t because mach_vm_page_range_query returns copyio's error
1946 	if (kr == EFAULT) {
1947 		bool bad = false;
1948 		for (unsigned i = 0; i < prq_buf_size / sizeof(uint32_t); i++) {
1949 			if (((uint32_t *)prq_buf)[i] == 0xffffffff) {
1950 				// kernel didn't fill the entire writeable buffer, that's bad
1951 				bad = true;
1952 				break;
1953 			}
1954 		}
1955 		if (!bad) {
1956 			// kernel filled our buffer and then hit our fault page
1957 			// we'll allow it
1958 			kr = 0;
1959 		}
1960 	}
1961 
1962 	check_mach_vm_page_range_query_outparam_changes(&kr, out_count, in_count);
1963 	(void)mach_vm_deallocate(map, prq_buf, prq_buf_size + KB16);
1964 
1965 	return kr;
1966 }
1967 
1968 static int
call_mincore(void * start,size_t size)1969 call_mincore(void *start, size_t size)
1970 {
1971 	// mincore writes one byte per page output
1972 	// and can accept any address range as input
1973 	// We can't provide that much storage for very large lengths.
1974 	// Instead we provide a limited output buffer,
1975 	// write-protect the page after it, and "succeed" if the kernel
1976 	// fills the buffer and then returns EFAULT.
1977 
1978 	// enough space for MAX_PAGE_RANGE_QUERY with 4KB pages, twice
1979 	size_t mincore_buf_size = 2 * 262144;
1980 	char *mincore_buf = 0;
1981 	mincore_buf = mmap(NULL, mincore_buf_size + KB16, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
1982 	assert(mincore_buf != MAP_FAILED);
1983 
1984 	// protect the guard page
1985 	char *mincore_guard = mincore_buf + mincore_buf_size;
1986 	int err = mprotect(mincore_guard, KB16, PROT_NONE);
1987 	assert(err == 0);
1988 
1989 	// pre-fill the output buffer with an invalid value
1990 	memset(mincore_buf, 0xff, mincore_buf_size);
1991 
1992 	int ret;
1993 	err = mincore(start, size, mincore_buf);
1994 	if (err == 0) {
1995 		ret = 0;
1996 	} else if (errno != EFAULT) {
1997 		ret = errno;
1998 	} else {
1999 		// EFAULT - check if kernel hit our guard page
2000 		bool bad = false;
2001 		for (unsigned i = 0; i < mincore_buf_size; i++) {
2002 			if (mincore_buf[i] == (char)0xff) {
2003 				// kernel didn't fill the entire writeable buffer, that's bad
2004 				bad = true;
2005 				break;
2006 			}
2007 		}
2008 		if (!bad) {
2009 			// kernel filled our buffer and then hit our guard page
2010 			// we'll allow it
2011 			ret = 0;
2012 		} else {
2013 			ret = errno;
2014 		}
2015 	}
2016 
2017 	(void)munmap(mincore_buf, mincore_buf_size + PAGE_SIZE);
2018 
2019 	return ret;
2020 }
2021 
2022 // TODO: re-enable deferred reclaim tests (rdar://136157720)
2023 #if 0
2024 typedef kern_return_t (*fn_mach_vm_deferred_reclamation_buffer_init)(task_t task, mach_vm_address_t address, mach_vm_size_t size);
2025 
2026 static results_t *
2027 test_mach_vm_deferred_reclamation_buffer_init(fn_mach_vm_deferred_reclamation_buffer_init func,
2028     const char * testname)
2029 {
2030 	int ret = 0;
2031 	// Set vm.reclaim_max_threshold to non-zero
2032 	int orig_reclaim_max_threshold = 0;
2033 	int new_reclaim_max_threshold = 1;
2034 	size_t size = sizeof(orig_reclaim_max_threshold);
2035 	int sysctl_res = sysctlbyname("vm.reclaim_max_threshold", &orig_reclaim_max_threshold, &size, NULL, 0);
2036 	assert(sysctl_res == 0);
2037 	sysctl_res = sysctlbyname("vm.reclaim_max_threshold", NULL, 0, &new_reclaim_max_threshold, size);
2038 	assert(sysctl_res == 0);
2039 
2040 	reclamation_buffer_init_trials_t *trials SMART_RECLAMATION_BUFFER_INIT_TRIALS();
2041 	results_t *results = alloc_results(testname, eSMART_RECLAMATION_BUFFER_INIT_TRIALS, trials->count);
2042 
2043 	// reserve last trial to run without modified sysctl
2044 	for (unsigned i = 0; i < trials->count - 1; i++) {
2045 		reclamation_buffer_init_trial_t trial = trials->list[i];
2046 		ret = func(trial.task, trial.address, trial.size);
2047 		append_result(results, ret, trial.name);
2048 	}
2049 
2050 	// run with vm.reclaim_max_threshold = 0 and exercise KERN_NOT_SUPPORTED path
2051 	new_reclaim_max_threshold = 0;
2052 	reclamation_buffer_init_trial_t last_trial = trials->list[trials->count - 1];
2053 
2054 	sysctl_res = sysctlbyname("vm.reclaim_max_threshold", NULL, 0, &new_reclaim_max_threshold, size);
2055 	assert(sysctl_res == 0);
2056 
2057 	ret = func(last_trial.task, last_trial.address, last_trial.size);
2058 	if (__improbable(ret == KERN_INVALID_ARGUMENT)) {
2059 		// Unlikely case when args are rejected before sysctl check.
2060 		// When this happens during test run, return acceptable, but if this happens
2061 		// during golden file generation, record the expected value.
2062 		ret = generate_golden ? KERN_NOT_SUPPORTED : ACCEPTABLE;
2063 	}
2064 	append_result(results, ret, last_trial.name);
2065 
2066 	// Revert vm.reclaim_max_threshold to how we found it
2067 	sysctl_res = sysctlbyname("vm.reclaim_max_threshold", NULL, 0, &orig_reclaim_max_threshold, size);
2068 	assert(sysctl_res == 0);
2069 
2070 	return results;
2071 }
2072 #endif // 0
2073 
2074 static vm_map_kernel_flags_trials_t *
generate_mmap_kernel_flags_trials()2075 generate_mmap_kernel_flags_trials()
2076 {
2077 	// mmap rejects both ANYWHERE and FIXED | OVERWRITE
2078 	// so don't set any prefix flags.
2079 	return generate_prefixed_vm_map_kernel_flags_trials(0, "");
2080 }
2081 
2082 
2083 #define SMART_MMAP_KERNEL_FLAGS_TRIALS()                                \
2084 	__attribute__((cleanup(cleanup_vm_map_kernel_flags_trials)))    \
2085 	= generate_mmap_kernel_flags_trials()
2086 
2087 static results_t *
test_mmap_with_allocated_vm_map_kernel_flags_t(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,int flags),const char * testname)2088 test_mmap_with_allocated_vm_map_kernel_flags_t(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, int flags), const char * testname)
2089 {
2090 	MAP_T map SMART_MAP;
2091 
2092 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2093 	vm_map_kernel_flags_trials_t * trials SMART_MMAP_KERNEL_FLAGS_TRIALS();
2094 	results_t *results = alloc_results(testname, eSMART_MMAP_KERNEL_FLAGS_TRIALS, trials->count);
2095 
2096 	for (unsigned i = 0; i < trials->count; i++) {
2097 		kern_return_t ret = func(map, base.addr, base.size, trials->list[i].flags);
2098 		append_result(results, ret, trials->list[i].name);
2099 	}
2100 	return results;
2101 }
2102 
2103 // Test a Unix function.
2104 // Run each trial with an allocated vm region and a vm_inherit_t
2105 typedef int (*unix_with_inherit_fn)(void *start, size_t size, int inherit);
2106 
2107 static results_t *
test_unix_with_allocated_vm_inherit_t(unix_with_inherit_fn fn,const char * testname)2108 test_unix_with_allocated_vm_inherit_t(unix_with_inherit_fn fn, const char * testname)
2109 {
2110 	MAP_T map SMART_MAP;
2111 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2112 	vm_inherit_trials_t *trials SMART_VM_INHERIT_TRIALS();
2113 	results_t *results = alloc_results(testname, eSMART_VM_INHERIT_TRIALS, trials->count);
2114 
2115 	for (unsigned i = 0; i < trials->count; i++) {
2116 		vm_inherit_trial_t trial = trials->list[i];
2117 		int ret = fn((void*)(uintptr_t)base.addr, (size_t)base.size, (int)trial.value);
2118 		append_result(results, ret, trial.name);
2119 	}
2120 	return results;
2121 }
2122 
2123 // Test a Unix function.
2124 // Run each trial with an allocated vm region and a vm_msync_t
2125 typedef int (*unix_with_msync_fn)(void *start, size_t size, int msync_value);
2126 
2127 static results_t *
test_unix_with_allocated_vm_msync_t(unix_with_msync_fn fn,const char * testname)2128 test_unix_with_allocated_vm_msync_t(unix_with_msync_fn fn, const char * testname)
2129 {
2130 	MAP_T map SMART_MAP;
2131 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2132 	vm_msync_trials_t *trials SMART_VM_MSYNC_TRIALS();
2133 	results_t *results = alloc_results(testname, eSMART_VM_MSYNC_TRIALS, trials->count);
2134 
2135 	for (unsigned i = 0; i < trials->count; i++) {
2136 		vm_msync_trial_t trial = trials->list[i];
2137 		int ret = fn((void*)(uintptr_t)base.addr, (size_t)base.size, (int)trial.value);
2138 		append_result(results, ret, trial.name);
2139 	}
2140 	return results;
2141 }
2142 
2143 // Test a Unix function.
2144 // Run each trial with an allocated vm region and an advise
2145 typedef int (*unix_with_advise_fn)(void *start, size_t size, int advise);
2146 
2147 static results_t *
test_unix_with_allocated_aligned_vm_advise_t(unix_with_advise_fn fn,mach_vm_size_t align_mask,const char * testname)2148 test_unix_with_allocated_aligned_vm_advise_t(unix_with_advise_fn fn, mach_vm_size_t align_mask, const char * testname)
2149 {
2150 	MAP_T map SMART_MAP;
2151 	allocation_t base SMART_ALLOCATE_ALIGNED_VM(map, TEST_ALLOC_SIZE, align_mask, VM_PROT_DEFAULT);
2152 	vm_advise_trials_t *trials SMART_VM_ADVISE_TRIALS();
2153 	results_t *results = alloc_results(testname, eSMART_VM_ADVISE_TRIALS, trials->count);
2154 
2155 	for (unsigned i = 0; i < trials->count; i++) {
2156 		vm_advise_trial_t trial = trials->list[i];
2157 		int ret = fn((void*)(uintptr_t)base.addr, (size_t)base.size, (int)trial.value);
2158 		append_result(results, ret, trial.name);
2159 	}
2160 	return results;
2161 }
2162 
2163 // Rosetta userspace intercepts shared_region_map_and_slide_2_np calls and this Rosetta wrapper
2164 // function doesn't have the necessary checks to support invalid input arguments. Skip these trials
2165 // intead of crashing the test.
2166 static bool
shared_region_map_and_slide_would_crash(shared_region_map_and_slide_2_trial_t * trial)2167 shared_region_map_and_slide_would_crash(shared_region_map_and_slide_2_trial_t *trial)
2168 {
2169 	uint32_t files_count = trial->files_count;
2170 	struct shared_file_np *files = trial->files;
2171 	uint32_t mappings_count = trial->mappings_count;
2172 	struct shared_file_mapping_slide_np *mappings = trial->mappings;
2173 
2174 	if (files_count == 0 || files_count == 1 || files_count > _SR_FILE_MAPPINGS_MAX_FILES) {
2175 		return true;
2176 	}
2177 	if (mappings_count == 0 || mappings_count > SFM_MAX) {
2178 		return true;
2179 	}
2180 	if (!files) {
2181 		return true;
2182 	}
2183 	if (!mappings) {
2184 		return true;
2185 	}
2186 	if (mappings_count != (((files_count - 1) * kNumSharedCacheMappings) + 1) &&
2187 	    mappings_count != (files_count * kNumSharedCacheMappings)) {
2188 		return true;
2189 	}
2190 	if (files_count >= kMaxSubcaches) {
2191 		return true;
2192 	}
2193 	return false;
2194 }
2195 
2196 typedef int (*unix_shared_region_map_and_slide_2_np)(uint32_t files_coun, const struct shared_file_np *files, uint32_t mappings_count, const struct shared_file_mapping_slide_np *mappings);
2197 
2198 static results_t *
test_unix_shared_region_map_and_slide_2_np(unix_shared_region_map_and_slide_2_np func,const char * testname)2199 test_unix_shared_region_map_and_slide_2_np(unix_shared_region_map_and_slide_2_np func, const char *testname)
2200 {
2201 	uint64_t dyld_fp = (uint64_t)get_dyld_fd();
2202 	shared_region_map_and_slide_2_trials_t *trials SMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS(dyld_fp);
2203 	results_t *results = alloc_results(testname, eSMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS, dyld_fp, trials->count);
2204 
2205 	for (unsigned i = 0; i < trials->count; i++) {
2206 		int ret;
2207 		shared_region_map_and_slide_2_trial_t trial = trials->list[i];
2208 		if (isRosetta() && shared_region_map_and_slide_would_crash(&trial)) {
2209 			ret = IGNORED;
2210 		} else {
2211 			ret = func(trial.files_count, trial.files, trial.mappings_count, trial.mappings);
2212 		}
2213 		append_result(results, ret, trial.name);
2214 	}
2215 
2216 	close_dyld_fd();
2217 	return results;
2218 }
2219 
2220 static results_t *
test_dst_size_fileoff(kern_return_t (* func)(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size,mach_vm_address_t fileoff),const char * testname)2221 test_dst_size_fileoff(kern_return_t (*func)(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size, mach_vm_address_t fileoff), const char * testname)
2222 {
2223 	MAP_T map SMART_MAP;
2224 	src_dst_size_trials_t * trials SMART_FILEOFF_DST_SIZE_TRIALS();
2225 	results_t *results = alloc_results(testname, eSMART_FILEOFF_DST_SIZE_TRIALS, trials->count);
2226 
2227 	for (unsigned i = 0; i < trials->count; i++) {
2228 		src_dst_size_trial_t trial = trials->list[i];
2229 		unallocation_t dst_base SMART_UNALLOCATE_VM(map, TEST_ALLOC_SIZE);
2230 		// src a.k.a. mmap fileoff doesn't slide
2231 		trial = slide_trial_dst(trial, dst_base.addr);
2232 		int ret = func(map, trial.dst, trial.size, trial.src);
2233 		append_result(results, ret, trial.name);
2234 	}
2235 	return results;
2236 }
2237 
2238 // Try to allocate a destination for mmap(MAP_FIXED) to overwrite.
2239 // On exit:
2240 // *out_dst *out_size are the allocation, or 0
2241 // *out_panic is true if the trial should stop and record PANIC
2242 // (because the trial specifies an absolute address that is already occupied)
2243 // *out_slide is true if the trial should slide by *out_dst
2244 static __attribute__((overloadable)) void
allocate_for_mmap_fixed(MAP_T map,mach_vm_address_t trial_dst,mach_vm_size_t trial_size,bool trial_dst_is_absolute,bool trial_size_is_absolute,mach_vm_address_t * out_dst,mach_vm_size_t * out_size,bool * out_panic,bool * out_slide)2245 allocate_for_mmap_fixed(MAP_T map, mach_vm_address_t trial_dst, mach_vm_size_t trial_size, bool trial_dst_is_absolute, bool trial_size_is_absolute, mach_vm_address_t *out_dst, mach_vm_size_t *out_size, bool *out_panic, bool *out_slide)
2246 {
2247 	*out_panic = false;
2248 	*out_slide = false;
2249 
2250 	if (trial_dst_is_absolute && trial_size_is_absolute) {
2251 		// known dst addr, known size
2252 		*out_dst = trial_dst;
2253 		*out_size = trial_size;
2254 		kern_return_t kr = mach_vm_allocate(map, out_dst, *out_size, VM_FLAGS_FIXED);
2255 		if (kr == KERN_NO_SPACE) {
2256 			// this space is in use, we can't allow mmap to try to overwrite it
2257 			*out_panic = true;
2258 			*out_dst = 0;
2259 			*out_size = 0;
2260 		} else if (kr != 0) {
2261 			// some other error, assume mmap will also fail
2262 			*out_dst = 0;
2263 			*out_size = 0;
2264 		}
2265 		// no slide, trial and allocation are already at the same place
2266 		*out_slide = false;
2267 	} else {
2268 		// other cases either fit in a small allocation or fail
2269 		*out_dst = 0;
2270 		*out_size = TEST_ALLOC_SIZE;
2271 		kern_return_t kr = mach_vm_allocate(map, out_dst, *out_size, VM_FLAGS_ANYWHERE);
2272 		if (kr != 0) {
2273 			// allocation error, assume mmap will also fail
2274 			*out_dst = 0;
2275 			*out_size = 0;
2276 		}
2277 		*out_slide = true;
2278 	}
2279 }
2280 
2281 static __attribute__((overloadable)) void
allocate_for_mmap_fixed(MAP_T map,start_size_trial_t trial,mach_vm_address_t * out_dst,mach_vm_size_t * out_size,bool * out_panic,bool * out_slide)2282 allocate_for_mmap_fixed(MAP_T map, start_size_trial_t trial, mach_vm_address_t *out_dst, mach_vm_size_t *out_size, bool *out_panic, bool *out_slide)
2283 {
2284 	allocate_for_mmap_fixed(map, trial.start, trial.size, trial.start_is_absolute, trial.size_is_absolute,
2285 	    out_dst, out_size, out_panic, out_slide);
2286 }
2287 static __attribute__((overloadable)) void
allocate_for_mmap_fixed(MAP_T map,src_dst_size_trial_t trial,mach_vm_address_t * out_dst,mach_vm_size_t * out_size,bool * out_panic,bool * out_slide)2288 allocate_for_mmap_fixed(MAP_T map, src_dst_size_trial_t trial, mach_vm_address_t *out_dst, mach_vm_size_t *out_size, bool *out_panic, bool *out_slide)
2289 {
2290 	allocate_for_mmap_fixed(map, trial.dst, trial.size, trial.dst_is_absolute, !trial.size_is_dst_relative,
2291 	    out_dst, out_size, out_panic, out_slide);
2292 }
2293 
2294 // Like test_dst_size_fileoff, but specialized for mmap(MAP_FIXED).
2295 // mmap(MAP_FIXED) is destructive, forcibly unmapping anything
2296 // already at that address.
2297 // We must ensure that each trial is either obviously invalid and caught
2298 // by the sanitizers, or is valid and overwrites an allocation we control.
2299 static results_t *
test_fixed_dst_size_fileoff(kern_return_t (* func)(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size,mach_vm_address_t fileoff),const char * testname)2300 test_fixed_dst_size_fileoff(kern_return_t (*func)(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size, mach_vm_address_t fileoff), const char * testname)
2301 {
2302 	MAP_T map SMART_MAP;
2303 	src_dst_size_trials_t * trials SMART_FILEOFF_DST_SIZE_TRIALS();
2304 	results_t *results = alloc_results(testname, eSMART_FILEOFF_DST_SIZE_TRIALS, trials->count);
2305 	for (unsigned i = 0; i < trials->count; i++) {
2306 		src_dst_size_trial_t trial = trials->list[i];
2307 		// Try to create an allocation for mmap to overwrite.
2308 		mach_vm_address_t dst_alloc;
2309 		mach_vm_size_t dst_size;
2310 		bool should_panic;
2311 		bool should_slide_trial;
2312 		allocate_for_mmap_fixed(map, trial, &dst_alloc, &dst_size, &should_panic, &should_slide_trial);
2313 		if (should_panic) {
2314 			append_result(results, PANIC, trial.name);
2315 			continue;
2316 		}
2317 		if (should_slide_trial) {
2318 			// src a.k.a. mmap fileoff doesn't slide
2319 			trial = slide_trial_dst(trial, dst_alloc);
2320 		}
2321 
2322 		kern_return_t ret = func(map, trial.dst, trial.size, trial.src);
2323 
2324 		if (dst_alloc != 0) {
2325 			(void)mach_vm_deallocate(map, dst_alloc, dst_size);
2326 		}
2327 		append_result(results, ret, trial.name);
2328 	}
2329 	return results;
2330 }
2331 
2332 // Like test_mach_with_allocated_start_size, but specialized for mmap(MAP_FIXED).
2333 // See test_fixed_dst_size_fileoff for more.
2334 static results_t *
test_fixed_dst_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size),const char * testname)2335 test_fixed_dst_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size), const char *testname)
2336 {
2337 	MAP_T map SMART_MAP;
2338 	start_size_trials_t *trials SMART_START_SIZE_TRIALS(0);  // no base addr
2339 	results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, 0, trials->count);
2340 	for (unsigned i = 0; i < trials->count; i++) {
2341 		start_size_trial_t trial = trials->list[i];
2342 		// Try to create an allocation for mmap to overwrite.
2343 		mach_vm_address_t dst_alloc;
2344 		mach_vm_size_t dst_size;
2345 		bool should_panic;
2346 		bool should_slide_trial;
2347 		allocate_for_mmap_fixed(map, trial, &dst_alloc, &dst_size, &should_panic, &should_slide_trial);
2348 		if (should_panic) {
2349 			append_result(results, PANIC, trial.name);
2350 			continue;
2351 		}
2352 		if (should_slide_trial) {
2353 			trial = slide_trial(trial, dst_alloc);
2354 		}
2355 
2356 		kern_return_t ret = func(map, trial.start, trial.size);
2357 
2358 		if (dst_alloc != 0) {
2359 			(void)mach_vm_deallocate(map, dst_alloc, dst_size);
2360 		}
2361 		append_result(results, ret, trial.name);
2362 	}
2363 	return results;
2364 }
2365 
2366 static results_t *
test_allocated_src_allocated_dst_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,mach_vm_address_t dst),const char * testname)2367 test_allocated_src_allocated_dst_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, mach_vm_address_t dst), const char * testname)
2368 {
2369 	/*
2370 	 * Require src < dst. Some tests may get different error codes if src > dst.
2371 	 *
2372 	 * (No actual examples are known today, but see the comment in
2373 	 * test_allocated_src_unallocated_dst_size for an example in that
2374 	 * function. Here we are being conservatively careful.)
2375 	 *
2376 	 * TODO: test both src < dst and src > dst.
2377 	 */
2378 	MAP_T map SMART_MAP;
2379 	allocation_t src_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2380 	allocation_t dst_base SMART_ALLOCATE_VM_AFTER(map, src_base.addr, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2381 	assert(src_base.addr < dst_base.addr);
2382 	src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
2383 	results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
2384 
2385 	for (unsigned i = 0; i < trials->count; i++) {
2386 		src_dst_size_trial_t trial = trials->list[i];
2387 		trial = slide_trial_src(trial, src_base.addr);
2388 		trial = slide_trial_dst(trial, dst_base.addr);
2389 		int ret = func(map, trial.src, trial.size, trial.dst);
2390 		// func should be fixed-overwrite, nothing new to deallocate
2391 		append_result(results, ret, trial.name);
2392 	}
2393 	return results;
2394 }
2395 
2396 static task_exc_guard_behavior_t saved_exc_guard_behavior;
2397 
2398 static void
disable_exc_guard()2399 disable_exc_guard()
2400 {
2401 	T_SETUPBEGIN;
2402 
2403 	// Disable EXC_GUARD for the duration of the test.
2404 	// We restore it at the end.
2405 	kern_return_t kr = task_get_exc_guard_behavior(mach_task_self(), &saved_exc_guard_behavior);
2406 	assert(kr == 0);
2407 
2408 	kr = task_set_exc_guard_behavior(mach_task_self(), TASK_EXC_GUARD_NONE);
2409 	if (kr) {
2410 		T_LOG("warning, couldn't disable EXC_GUARD; some tests are disabled");
2411 		EXC_GUARD_ENABLED = true;
2412 	} else {
2413 		EXC_GUARD_ENABLED = false;
2414 	}
2415 
2416 	T_SETUPEND;
2417 }
2418 
2419 static void
restore_exc_guard()2420 restore_exc_guard()
2421 {
2422 	// restore process's EXC_GUARD handling
2423 	(void)task_set_exc_guard_behavior(mach_task_self(), saved_exc_guard_behavior);
2424 }
2425 
2426 static int
set_disable_vm_sanitize_telemetry_via_sysctl(uint32_t val)2427 set_disable_vm_sanitize_telemetry_via_sysctl(uint32_t val)
2428 {
2429 	int ret = sysctlbyname("debug.disable_vm_sanitize_telemetry", NULL, NULL, &val, sizeof(uint32_t));
2430 	if (ret != 0) {
2431 		printf("sysctl failed with errno %d.\n", errno);
2432 	}
2433 	return ret;
2434 }
2435 
2436 static int
disable_vm_sanitize_telemetry(void)2437 disable_vm_sanitize_telemetry(void)
2438 {
2439 	return set_disable_vm_sanitize_telemetry_via_sysctl(1);
2440 }
2441 
2442 static int
reenable_vm_sanitize_telemetry(void)2443 reenable_vm_sanitize_telemetry(void)
2444 {
2445 	return set_disable_vm_sanitize_telemetry_via_sysctl(0);
2446 }
2447 
2448 #define MAX_LINE_LENGTH 100
2449 #define MAX_NUM_TESTS 350
2450 #define TMP_DIR "/tmp/"
2451 #define ASSETS_DIR "../assets/vm_parameter_validation/"
2452 #define DECOMPRESS ASSETS_DIR "decompress.sh"
2453 #define GOLDEN_FILE TMP_DIR "user_golden_image.log"
2454 
2455 #define KERN_GOLDEN_FILE TMP_DIR "kern_golden_image.log"
2456 
2457 results_t *golden_list[MAX_NUM_TESTS];
2458 results_t *kern_list[MAX_NUM_TESTS];
2459 
2460 #define FILL_TRIALS_NAMES_AND_CONTINUE(results, trials, t_count) { \
2461 	for (unsigned i = 0; i < t_count; i++) { \
2462 	/* trials names are free'd in dealloc_results() */ \
2463 	        (results)->list[i].name = kstrdup((trials)->list[i].name); \
2464 	} \
2465 }
2466 
2467 #define FILL_TRIALS_NAMES(results, trials) { \
2468 	unsigned t_count = ((trials)->count < (results)->count) ? (trials)->count : (results)->count; \
2469 	if ((trials)->count != (results)->count) { \
2470 	        T_LOG("%s:%d Trials count mismatch, expected %u, golden file %u\n", \
2471 	                __func__, __LINE__, (trials)->count, (results)->count); \
2472 	}\
2473 	FILL_TRIALS_NAMES_AND_CONTINUE((results), (trials), (t_count)) \
2474 	break; \
2475 }
2476 
2477 static void
fill_golden_trials(uint64_t trialsargs[static TRIALSARGUMENTS_SIZE],results_t * results)2478 fill_golden_trials(uint64_t trialsargs[static TRIALSARGUMENTS_SIZE],
2479     results_t *results)
2480 {
2481 	trialsformula_t formula = results->trialsformula;
2482 	uint64_t trialsargs0 = trialsargs[0];
2483 	uint64_t trialsargs1 = trialsargs[1];
2484 	switch (formula) {
2485 	case eUNKNOWN_TRIALS:
2486 		// Leave them empty
2487 		T_FAIL("Golden file with unknown trials, testname: %s\n", results->testname);
2488 		break;
2489 	case eSMART_VM_MAP_KERNEL_FLAGS_TRIALS: {
2490 		vm_map_kernel_flags_trials_t * trials SMART_VM_MAP_KERNEL_FLAGS_TRIALS();
2491 		FILL_TRIALS_NAMES(results, trials);
2492 	}
2493 	case eSMART_VM_INHERIT_TRIALS: {
2494 		vm_inherit_trials_t *trials SMART_VM_INHERIT_TRIALS();
2495 		FILL_TRIALS_NAMES(results, trials);
2496 	}
2497 	case eSMART_MMAP_KERNEL_FLAGS_TRIALS: {
2498 		vm_map_kernel_flags_trials_t * trials SMART_MMAP_KERNEL_FLAGS_TRIALS();
2499 		FILL_TRIALS_NAMES(results, trials);
2500 	}
2501 	case eSMART_MMAP_FLAGS_TRIALS: {
2502 		mmap_flags_trials_t *trials SMART_MMAP_FLAGS_TRIALS();
2503 		FILL_TRIALS_NAMES(results, trials);
2504 	}
2505 	case eSMART_GENERIC_FLAG_TRIALS: {
2506 		generic_flag_trials_t *trials SMART_GENERIC_FLAG_TRIALS();
2507 		FILL_TRIALS_NAMES(results, trials);
2508 	}
2509 	case eSMART_VM_TAG_TRIALS: {
2510 		// special case, trails (vm_tag_trials_values) depend on data only available on KERNEL
2511 		vm_tag_trials_t *trials SMART_VM_TAG_TRIALS();
2512 		FILL_TRIALS_NAMES(results, trials);
2513 	}
2514 	case eSMART_VM_PROT_TRIALS: {
2515 		vm_prot_trials_t *trials SMART_VM_PROT_TRIALS();
2516 		FILL_TRIALS_NAMES(results, trials);
2517 	}
2518 	case eSMART_VM_PROT_PAIR_TRIALS: {
2519 		vm_prot_pair_trials_t *trials SMART_VM_PROT_PAIR_TRIALS();
2520 		FILL_TRIALS_NAMES(results, trials);
2521 	}
2522 	case eSMART_LEDGER_TAG_TRIALS: {
2523 		ledger_tag_trials_t *trials SMART_LEDGER_TAG_TRIALS();
2524 		FILL_TRIALS_NAMES(results, trials);
2525 	}
2526 	case eSMART_LEDGER_FLAG_TRIALS: {
2527 		ledger_flag_trials_t *trials SMART_LEDGER_FLAG_TRIALS();
2528 		FILL_TRIALS_NAMES(results, trials);
2529 	}
2530 	case eSMART_ADDR_TRIALS: {
2531 		addr_trials_t *trials SMART_ADDR_TRIALS(trialsargs0);
2532 		if (trialsargs1) {
2533 			// Special case with an additional trial such that obj_size + addr == 0
2534 			FILL_TRIALS_NAMES_AND_CONTINUE(results, trials, trials->count);
2535 			assert(trials->count + 1 == results->count);
2536 			char *trial_desc;
2537 			kasprintf(&trial_desc, "addr: -0x%llx", trialsargs1);
2538 			results->list[results->count - 1].name = kstrdup(trial_desc);
2539 			kfree_str(trial_desc);
2540 			break;
2541 		} else {
2542 			FILL_TRIALS_NAMES(results, trials);
2543 		}
2544 	}
2545 	case eSMART_SIZE_TRIALS: {
2546 		size_trials_t *trials SMART_SIZE_TRIALS();
2547 		FILL_TRIALS_NAMES(results, trials);
2548 	}
2549 	case eSMART_START_SIZE_TRIALS: {
2550 		// NB: base.addr is not constant between runs but doesn't affect trial name
2551 		start_size_trials_t *trials SMART_START_SIZE_TRIALS(trialsargs0);
2552 		FILL_TRIALS_NAMES(results, trials);
2553 	}
2554 	case eSMART_START_SIZE_OFFSET_OBJECT_TRIALS: {
2555 		start_size_offset_object_trials_t *trials SMART_START_SIZE_OFFSET_OBJECT_TRIALS();
2556 		FILL_TRIALS_NAMES(results, trials);
2557 	}
2558 	case eSMART_START_SIZE_OFFSET_TRIALS: {
2559 		start_size_offset_trials_t *trials SMART_START_SIZE_OFFSET_TRIALS();
2560 		FILL_TRIALS_NAMES(results, trials);
2561 	}
2562 	case eSMART_SIZE_SIZE_TRIALS: {
2563 		T_FAIL("SIZE_SIZE_TRIALS not used\n");
2564 		break;
2565 	}
2566 	case eSMART_SRC_DST_SIZE_TRIALS: {
2567 		src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
2568 		FILL_TRIALS_NAMES(results, trials);
2569 	}
2570 	case eSMART_FILEOFF_DST_SIZE_TRIALS: {
2571 		src_dst_size_trials_t * trials SMART_FILEOFF_DST_SIZE_TRIALS();
2572 		FILL_TRIALS_NAMES(results, trials);
2573 	}
2574 	case eSMART_VM_BEHAVIOR_TRIALS: {
2575 		vm_behavior_trials_t *trials SMART_VM_BEHAVIOR_TRIALS();
2576 		FILL_TRIALS_NAMES(results, trials);
2577 	}
2578 	case eSMART_VM_ADVISE_TRIALS: {
2579 		vm_advise_trials_t *trials SMART_VM_ADVISE_TRIALS();
2580 		FILL_TRIALS_NAMES(results, trials);
2581 	}
2582 	case eSMART_VM_SYNC_TRIALS: {
2583 		vm_sync_trials_t *trials SMART_VM_SYNC_TRIALS();
2584 		FILL_TRIALS_NAMES(results, trials);
2585 	}
2586 	case eSMART_VM_MSYNC_TRIALS: {
2587 		vm_msync_trials_t *trials SMART_VM_MSYNC_TRIALS();
2588 		FILL_TRIALS_NAMES(results, trials);
2589 	}
2590 	case eSMART_VM_MACHINE_ATTRIBUTE_TRIALS: {
2591 		vm_machine_attribute_trials_t *trials SMART_VM_MACHINE_ATTRIBUTE_TRIALS();
2592 		FILL_TRIALS_NAMES(results, trials);
2593 	}
2594 	case eSMART_VM_PURGEABLE_AND_STATE_TRIALS: {
2595 		vm_purgeable_and_state_trials_t *trials SMART_VM_PURGEABLE_AND_STATE_TRIALS();
2596 		FILL_TRIALS_NAMES(results, trials);
2597 	}
2598 	case eSMART_START_SIZE_START_SIZE_TRIALS: {
2599 		start_size_start_size_trials_t *trials SMART_START_SIZE_START_SIZE_TRIALS();
2600 		FILL_TRIALS_NAMES(results, trials);
2601 	}
2602 	case eSMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS: {
2603 		shared_region_map_and_slide_2_trials_t *trials SMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS(trialsargs0);
2604 		FILL_TRIALS_NAMES(results, trials);
2605 	}
2606 	case eSMART_RECLAMATION_BUFFER_INIT_TRIALS: {
2607 #if 0
2608 		reclamation_buffer_init_trials_t * trials SMART_RECLAMATION_BUFFER_INIT_TRIALS();
2609 		FILL_TRIALS_NAMES(results, trials);
2610 #else
2611 		break;
2612 #endif
2613 	}
2614 	default:
2615 		T_FAIL("New formula %u, args %llu %llu, update fill_golden_trials, testname: %s\n",
2616 		    formula, trialsargs[0], trialsargs[1], results->testname);
2617 	}
2618 }
2619 
2620 // Number of test trials with ret == OUT_PARAM_BAD
2621 int out_param_bad_count = 0;
2622 
2623 static results_t *
test_name_to_golden_results(const char * testname)2624 test_name_to_golden_results(const char* testname)
2625 {
2626 	results_t *golden_results = NULL;
2627 	results_t *golden_results_found = NULL;
2628 
2629 	for (uint32_t x = 0; x < num_tests; x++) {
2630 		golden_results = golden_list[x];
2631 		if (strncmp(golden_results->testname, testname, strlen(testname)) == 0) {
2632 			golden_results->tested_count += 1;
2633 			golden_results_found = golden_results;
2634 			break;
2635 		}
2636 	}
2637 
2638 	return golden_results_found;
2639 }
2640 
2641 static void
dump_results_list(results_t * res_list[],uint32_t res_num_tests)2642 dump_results_list(results_t *res_list[], uint32_t res_num_tests)
2643 {
2644 	for (uint32_t x = 0; x < res_num_tests; x++) {
2645 		results_t *results = res_list[x];
2646 		testprintf("\t[%u] %s (%u)\n", x, results->testname, results->count);
2647 	}
2648 }
2649 
2650 static void
dump_golden_list()2651 dump_golden_list()
2652 {
2653 	testprintf("======\n");
2654 	testprintf("golden_list %p, num_tests %u\n", golden_list, num_tests);
2655 	dump_results_list(golden_list, num_tests);
2656 	testprintf("======\n");
2657 }
2658 
2659 static void
dump_kernel_results_list()2660 dump_kernel_results_list()
2661 {
2662 	testprintf("======\n");
2663 	testprintf("kernel_results_list %p, num_tests %u\n", kern_list, num_kern_tests);
2664 	dump_results_list(kern_list, num_kern_tests);
2665 	testprintf("======\n");
2666 }
2667 
2668 // Read results written by dump_golden_results().
2669 static int
populate_golden_results(const char * filename)2670 populate_golden_results(const char *filename)
2671 {
2672 	FILE *file;
2673 	char line[MAX_LINE_LENGTH];
2674 	char trial_formula[20];
2675 	results_t *results = NULL;
2676 	trialsformula_t formula = eUNKNOWN_TRIALS;
2677 	uint64_t trial_args[TRIALSARGUMENTS_SIZE] = {0, 0};
2678 	uint32_t num_results = 0;
2679 	uint32_t result_number = 0;
2680 	int result_ret = 0;
2681 	char *test_name = NULL;
2682 	char *sub_line = NULL;
2683 	char *s_num_results = NULL;
2684 	bool in_test = FALSE;
2685 	out_param_bad_count = 0;
2686 	kern_trialname_generation = strnstr(filename, "kern_golden_image", strlen(filename)) != NULL;
2687 
2688 	// cd to the directory containing this executable
2689 	// Test files are located relative to there.
2690 	uint32_t exesize = 0;
2691 	_NSGetExecutablePath(NULL, &exesize);
2692 	char *exe = malloc(exesize);
2693 	assert(exe != NULL);
2694 	_NSGetExecutablePath(exe, &exesize);
2695 	char *dir = dirname(exe);
2696 	chdir(dir);
2697 	free(exe);
2698 
2699 	file = fopen(filename, "r");
2700 	if (file == NULL) {
2701 		T_FAIL("Could not open file %s\n", filename);
2702 		return 1;
2703 	}
2704 
2705 	// Read file line by line
2706 	while (fgets(line, MAX_LINE_LENGTH, file) != NULL) {
2707 		// Check if the line starts with "TESTNAME" or "RESULT COUNT"
2708 		if (strncmp(line, TESTNAME_DELIMITER, strlen(TESTNAME_DELIMITER)) == 0) {
2709 			// remove the newline char
2710 			line[strcspn(line, "\n")] = 0;
2711 			sub_line = line + strlen(TESTNAME_DELIMITER);
2712 			test_name = strdup(sub_line);
2713 			formula = eUNKNOWN_TRIALS;
2714 			trial_args[0] = TRIALSARGUMENTS_NONE;
2715 			trial_args[1] = TRIALSARGUMENTS_NONE;
2716 			// T_LOG("TESTNAME %u : %s", num_tests, test_name);
2717 			in_test = TRUE;
2718 		} else if (in_test && strncmp(line, TRIALSFORMULA_DELIMITER, strlen(TRIALSFORMULA_DELIMITER)) == 0) {
2719 			sscanf(line, "%*s %s %*s %llu,%llu,%llu", trial_formula, &trial_args[0], &trial_args[1], &trial_page_size);
2720 			formula = trialsformula_from_string(trial_formula);
2721 		} else if (in_test && strncmp(line, RESULTCOUNT_DELIMITER, strlen(RESULTCOUNT_DELIMITER)) == 0) {
2722 			assert(num_tests < MAX_NUM_TESTS);
2723 			s_num_results = line + strlen(RESULTCOUNT_DELIMITER);
2724 			num_results = (uint32_t)strtoul(s_num_results, NULL, 10);
2725 			results = alloc_results(test_name, formula, trial_args, TRIALSARGUMENTS_SIZE, num_results);
2726 			assert(results);
2727 			results->count = num_results;
2728 			fill_golden_trials(trial_args, results);
2729 			golden_list[num_tests++] = results;
2730 			// T_LOG("num_tests %u, testname %s, count: %u", num_tests, results->testname, results->count);
2731 		} else if (in_test && strncmp(line, TESTRESULT_DELIMITER, strlen(TESTRESULT_DELIMITER)) == 0) {
2732 			sscanf(line, "%d: %d", &result_number, &result_ret);
2733 			assert(result_number < num_results);
2734 			// T_LOG("\tresult #%u: %d\n", result_number, result_ret);
2735 			results->list[result_number].ret = result_ret;
2736 			if (result_ret == OUT_PARAM_BAD) {
2737 				out_param_bad_count += 1;
2738 				T_FAIL("Out parameter violation in test %s - %s\n", results->testname, results->list[result_number].name);
2739 			}
2740 		} else {
2741 			// T_LOG("Unknown line: %s\n", line);
2742 			in_test = FALSE;
2743 		}
2744 	}
2745 
2746 	fclose(file);
2747 
2748 	if (!out_param_bad_count) {
2749 		dump_golden_list();
2750 	}
2751 	kern_trialname_generation = FALSE;
2752 
2753 	return out_param_bad_count;
2754 }
2755 
2756 static void
clean_golden_results()2757 clean_golden_results()
2758 {
2759 	for (uint32_t x = 0; x < num_tests; ++x) {
2760 		if (golden_list[x]->tested_count == 0) {
2761 			T_LOG("WARN: Test %s found in golden file but no test with that name was run\n",
2762 			    golden_list[x]->testname);
2763 		}
2764 		if (golden_list[x]->tested_count > 1) {
2765 			T_LOG("WARN: Test %s found in golden file with %d runs\n",
2766 			    golden_list[x]->testname, golden_list[x]->tested_count);
2767 		}
2768 		dealloc_results(golden_list[x]);
2769 		golden_list[x] = NULL;
2770 	}
2771 }
2772 
2773 static void
clean_kernel_results()2774 clean_kernel_results()
2775 {
2776 	for (uint32_t x = 0; x < num_kern_tests; ++x) {
2777 		dealloc_results(kern_list[x]);
2778 		kern_list[x] = NULL;
2779 	}
2780 }
2781 
2782 // buffer to output userspace golden file results (using same size as the kern buffer)
2783 static const int64_t GOLDEN_OUTPUT_BUFFER_SIZE = SYSCTL_OUTPUT_BUFFER_SIZE;
2784 static char* GOLDEN_OUTPUT_START;
2785 static char* GOLDEN_OUTPUT_BUF;
2786 static char* GOLDEN_OUTPUT_END;
2787 
2788 void
goldenprintf(const char * format,...)2789 goldenprintf(const char *format, ...)
2790 {
2791 	if (!GOLDEN_OUTPUT_START) {
2792 		GOLDEN_OUTPUT_START = calloc(GOLDEN_OUTPUT_BUFFER_SIZE, 1);
2793 		GOLDEN_OUTPUT_BUF = GOLDEN_OUTPUT_START;
2794 		GOLDEN_OUTPUT_END = GOLDEN_OUTPUT_BUF + GOLDEN_OUTPUT_BUFFER_SIZE;
2795 	}
2796 
2797 	int printed;
2798 	ssize_t s_buffer_size = GOLDEN_OUTPUT_END - GOLDEN_OUTPUT_BUF;
2799 	assert(s_buffer_size > 0 && s_buffer_size <= GOLDEN_OUTPUT_BUFFER_SIZE);
2800 	size_t buffer_size = (size_t)s_buffer_size;
2801 	va_list args;
2802 	va_start(args, format);
2803 	printed = vsnprintf(GOLDEN_OUTPUT_BUF, buffer_size, format, args);
2804 	va_end(args);
2805 	assert(printed >= 0);
2806 	assert((unsigned)printed < buffer_size - 1);
2807 	assert(GOLDEN_OUTPUT_BUF + printed + 1 < GOLDEN_OUTPUT_END);
2808 	GOLDEN_OUTPUT_BUF += printed;
2809 }
2810 
2811 // Verbose output in dump_results, controlled by DUMP_RESULTS env.
2812 bool dump = FALSE;
2813 // Output to create a golden test result, controlled by GENERATE_GOLDEN_IMAGE.
2814 bool generate_golden = FALSE;
2815 // Read existing golden file and print its contents in verbose format (like dump_results). Controlled by DUMP_GOLDEN_IMAGE.
2816 bool dump_golden = FALSE;
2817 // Run tests as tests (i.e. emit TS_{PASS/FAIL}), enabled unless golden image generation is true.
2818 bool should_test_results =  TRUE;
2819 
2820 T_DECL(vm_parameter_validation_user,
2821     "parameter validation for userspace calls",
2822     T_META_SPAWN_TOOL(DECOMPRESS),
2823     T_META_SPAWN_TOOL_ARG("user"),
2824     T_META_SPAWN_TOOL_ARG(TMP_DIR),
2825     T_META_SPAWN_TOOL_ARG(GOLDEN_FILES_VERSION)
2826     )
2827 {
2828 	if (disable_vm_sanitize_telemetry() != 0) {
2829 		T_FAIL("Could not disable VM API telemetry. Bailing out early.");
2830 		return;
2831 	}
2832 
2833 	read_env();
2834 
2835 	T_LOG("dump %d, golden %d, dump_golden %d, test %d\n", dump, generate_golden, dump_golden, should_test_results);
2836 
2837 	if (generate_golden && unsigned_code_is_disallowed()) {
2838 		// Some test results change when SIP is enabled.
2839 		// Golden files must record the SIP-disabled values.
2840 		T_FAIL("Can't generate golden files with SIP enabled. Disable SIP and try again.\n");
2841 		return;
2842 	}
2843 
2844 	if ((dump_golden || should_test_results) && populate_golden_results(GOLDEN_FILE)) {
2845 		// bail out early, problem loading golden test results
2846 		T_FAIL("Could not load golden file '%s'\n", GOLDEN_FILE);
2847 		return;
2848 	}
2849 
2850 	set_up_guard_page();
2851 
2852 	disable_exc_guard();
2853 
2854 	if (dump_golden) {
2855 		// just print the parsed golden file
2856 		for (uint32_t x = 0; x < num_tests; ++x) {
2857 			__dump_results(golden_list[x]);
2858 		}
2859 		goto out;
2860 	}
2861 
2862 	/*
2863 	 * -- memory entry functions --
2864 	 * The memory entry test functions use macros to generate each flavor of memory entry function.
2865 	 * This is partially becauseof many entrypoints (mach_make_memory_entry/mach_make_memory_entry_64/mach_make_memory_entry)
2866 	 * and partially because many flavors of each function are called (copy/memonly/share/...).
2867 	 */
2868 
2869 	// Mach start/size with both old-style and new-style types
2870 	// (co-located so old and new can be compared more easily)
2871 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2872 #if TEST_OLD_STYLE_MACH
2873 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
2874 #define RUN_OLD64(fn, name) RUN_NEW(fn, name)
2875 #else
2876 #define RUN_OLD(fn, name) do {} while (0)
2877 #define RUN_OLD64(fn, name) do {} while (0)
2878 #endif
2879 	// mach_make_memory_entry has up to three entry points on U32, unlike other functions that have two
2880 	RUN_NEW(call_mach_make_memory_entry_64__start_size__copy, "mach_make_memory_entry_64 (copy)");
2881 	RUN_OLD(call_mach_make_memory_entry__start_size__copy, "mach_make_memory_entry (copy)");
2882 	RUN_OLD64(call__mach_make_memory_entry__start_size__copy, "_mach_make_memory_entry (copy)");
2883 	RUN_NEW(call_mach_make_memory_entry_64__start_size__memonly, "mach_make_memory_entry_64 (mem_only)");
2884 	RUN_OLD(call_mach_make_memory_entry__start_size__memonly, "mach_make_memory_entry (mem_only)");
2885 	RUN_OLD64(call__mach_make_memory_entry__start_size__memonly, "_mach_make_memory_entry (mem_only)");
2886 	RUN_NEW(call_mach_make_memory_entry_64__start_size__namedcreate, "mach_make_memory_entry_64 (named_create)");
2887 	RUN_OLD(call_mach_make_memory_entry__start_size__namedcreate, "mach_make_memory_entry (named_create)");
2888 	RUN_OLD64(call__mach_make_memory_entry__start_size__namedcreate, "_mach_make_memory_entry (named_create)");
2889 	RUN_NEW(call_mach_make_memory_entry_64__start_size__share, "mach_make_memory_entry_64 (share)");
2890 	RUN_OLD(call_mach_make_memory_entry__start_size__share, "mach_make_memory_entry (share)");
2891 	RUN_OLD64(call__mach_make_memory_entry__start_size__share, "_mach_make_memory_entry (share)");
2892 	RUN_NEW(call_mach_make_memory_entry_64__start_size__namedreuse, "mach_make_memory_entry_64 (named_reuse)");
2893 	RUN_OLD(call_mach_make_memory_entry__start_size__namedreuse, "mach_make_memory_entry (named_reuse)");
2894 	RUN_OLD64(call__mach_make_memory_entry__start_size__namedreuse, "_mach_make_memory_entry (named_reuse)");
2895 #undef RUN_NEW
2896 #undef RUN_OLD
2897 #undef RUN_OLD64
2898 
2899 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_size(fn, name " (size)")))
2900 	RUN(call_mach_memory_object_memory_entry_64__size, "mach_memory_object_memory_entry_64");
2901 	RUN(call_replacement_mach_memory_object_memory_entry__size, "mach_memory_object_memory_entry");
2902 #undef RUN
2903 
2904 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
2905 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
2906 #define RUN_OLD64(fn, name) RUN_NEW(fn, name)
2907 
2908 	RUN_NEW(call_mach_make_memory_entry_64__vm_prot, "mach_make_memory_entry_64");
2909 #if TEST_OLD_STYLE_MACH
2910 	RUN_OLD(call_mach_make_memory_entry__vm_prot, "mach_make_memory_entry");
2911 	RUN_OLD64(call__mach_make_memory_entry__vm_prot, "_mach_make_memory_entry");
2912 #endif
2913 
2914 #undef RUN_NEW
2915 #undef RUN_OLD
2916 #undef RUN_OLD64
2917 
2918 #define RUN(fn, name) dealloc_results(process_results(test_mach_vm_prot(fn, name " (vm_prot_t)")))
2919 	RUN(call_mach_memory_object_memory_entry_64__vm_prot, "mach_memory_object_memory_entry_64");
2920 	RUN(call_replacement_mach_memory_object_memory_entry__vm_prot, "mach_memory_object_memory_entry");
2921 #undef RUN
2922 
2923 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_tag(fn, name " (ledger tag)")))
2924 	RUN(call_mach_memory_entry_ownership__ledger_tag, "mach_memory_entry_ownership");
2925 #undef RUN
2926 
2927 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_flag(fn, name " (ledger flag)")))
2928 	RUN(call_mach_memory_entry_ownership__ledger_flag, "mach_memory_entry_ownership");
2929 #undef RUN
2930 
2931 	/*
2932 	 * -- allocate/deallocate functions --
2933 	 */
2934 
2935 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_start_size(fn, name)))
2936 	RUN(call_mach_vm_allocate__start_size_fixed, "mach_vm_allocate (fixed) (realigned start/size)");
2937 	RUN(call_mach_vm_allocate__start_size_anywhere, "mach_vm_allocate (anywhere) (hint/size)");
2938 #undef RUN
2939 
2940 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
2941 	RUN(call_mach_vm_allocate__flags, "mach_vm_allocate");
2942 #undef RUN
2943 
2944 	dealloc_results(process_results(test_deallocator(call_mach_vm_deallocate, "mach_vm_deallocate (start/size)")));
2945 #if TEST_OLD_STYLE_MACH
2946 	dealloc_results(process_results(test_deallocator(call_vm_deallocate, "vm_deallocate (start/size)")));
2947 #endif
2948 
2949 #define RUN(fn, name) dealloc_results(process_results(test_deallocator(fn, name " (start/size)")))
2950 	RUN(call_munmap, "munmap");
2951 #undef RUN
2952 
2953 	/*
2954 	 * -- map/unmap functions --
2955 	 * The map/unmap functions use multiple layers of macros.
2956 	 * The macros are used both for function generation (see IMPL_ONE_FROM_HELPER) and to call all of those.
2957 	 * This was written this way to further avoid lots of code duplication, as the map/remap functions
2958 	 * have many different parameter combinations we want to test.
2959 	 */
2960 
2961 	// map tests
2962 
2963 #define RUN_START_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (realigned start/size)")))
2964 #define RUN_HINT_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
2965 #define RUN_PROT_PAIR(fn, name) dealloc_results(process_results(test_mach_vm_prot_pair(fn, name " (prot_pairs)")))
2966 #define RUN_INHERIT(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
2967 #define RUN_FLAGS(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
2968 #define RUN_SSOO(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size_offset_object(fn, name " (start/size/offset/object)")))
2969 
2970 #define RUN_ALL(fn, name)     \
2971 	RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)");   \
2972 	RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)");  \
2973 	RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)");  \
2974 	RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
2975 	RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)");  \
2976 	RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)");  \
2977 	RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)");  \
2978 	RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)");  \
2979 	RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)");  \
2980 	RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)");  \
2981 	RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)");  \
2982 	RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)");  \
2983 	RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)");  \
2984 	RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)");  \
2985 	RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)");  \
2986 	RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)");  \
2987 	RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)");  \
2988 	RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)");  \
2989 	RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)");  \
2990 	RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)");  \
2991 	RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)");  \
2992 	RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)");  \
2993 	RUN_SSOO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)");  \
2994 	RUN_SSOO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)");  \
2995 	RUN_SSOO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)");  \
2996 
2997 	RUN_ALL(mach_vm_map_wrapped, mach_vm_map);
2998 #if TEST_OLD_STYLE_MACH
2999 	RUN_ALL(vm_map_64_retyped, vm_map_64);
3000 	RUN_ALL(vm_map_retyped, vm_map);
3001 #endif
3002 
3003 #undef RUN_ALL
3004 #undef RUN_START_SIZE
3005 #undef RUN_HINT_SIZE
3006 #undef RUN_PROT_PAIR
3007 #undef RUN_INHERIT
3008 #undef RUN_FLAGS
3009 #undef RUN_SSOO
3010 
3011 	// remap tests
3012 
3013 #define FN_NAME(fn, variant, type) call_ ## fn ## __  ## variant ## __ ## type
3014 #define RUN_HELPER(harness, fn, variant, type, type_name, name) dealloc_results(process_results(harness(FN_NAME(fn, variant, type), #name " (" #variant ") (" type_name ")")))
3015 #define RUN_SRC_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, src_size, type_name, name)
3016 #define RUN_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, dst_size, type_name, name)
3017 #define RUN_PROT_PAIRS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_prot_pair, fn, variant, prot_pairs, "prot_pairs", name)
3018 #define RUN_INHERIT(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_inherit_t, fn, variant, inherit, "inherit", name)
3019 #define RUN_FLAGS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_map_kernel_flags_t, fn, variant, flags, "flags", name)
3020 #define RUN_SRC_DST_SIZE(fn, dst, variant, type_name, name) RUN_HELPER(test_allocated_src_##dst##_dst_size, fn, variant, src_dst_size, type_name, name)
3021 
3022 #define RUN_ALL(fn, realigned, name)                                    \
3023 	RUN_SRC_SIZE(fn, copy, realigned "src/size", name);             \
3024 	RUN_SRC_SIZE(fn, nocopy, realigned "src/size", name);           \
3025 	RUN_DST_SIZE(fn, fixed, "realigned dst/size", name);            \
3026 	RUN_DST_SIZE(fn, fixed_copy, "realigned dst/size", name);       \
3027 	RUN_DST_SIZE(fn, anywhere, "hint/size", name);                  \
3028 	RUN_INHERIT(fn, fixed, name);                                   \
3029 	RUN_INHERIT(fn, fixed_copy, name);                              \
3030 	RUN_INHERIT(fn, anywhere, name);                                \
3031 	RUN_FLAGS(fn, nocopy, name);                                    \
3032 	RUN_FLAGS(fn, copy, name);                                      \
3033 	RUN_PROT_PAIRS(fn, fixed, name);                                \
3034 	RUN_PROT_PAIRS(fn, fixed_copy, name);                           \
3035 	RUN_PROT_PAIRS(fn, anywhere, name);                             \
3036 	RUN_SRC_DST_SIZE(fn, allocated, fixed, "src/dst/size", name);   \
3037 	RUN_SRC_DST_SIZE(fn, allocated, fixed_copy, "src/dst/size", name); \
3038 	RUN_SRC_DST_SIZE(fn, unallocated, anywhere, "src/dst/size", name); \
3039 
3040 	RUN_ALL(mach_vm_remap_user, "realigned ", mach_vm_remap);
3041 	RUN_ALL(mach_vm_remap_new_user, , mach_vm_remap_new);
3042 
3043 #if TEST_OLD_STYLE_MACH
3044 	RUN_ALL(vm_remap_retyped, "realigned ", vm_remap);
3045 #endif
3046 
3047 #undef RUN_ALL
3048 #undef RUN_HELPER
3049 #undef RUN_SRC_SIZE
3050 #undef RUN_DST_SIZE
3051 #undef RUN_PROT_PAIRS
3052 #undef RUN_INHERIT
3053 #undef RUN_FLAGS
3054 #undef RUN_SRC_DST_SIZE
3055 
3056 	// mmap tests
3057 
3058 #define RUN(fn, name) dealloc_results(process_results(test_mmap_with_allocated_vm_map_kernel_flags_t(fn, name " (kernel flags)")))
3059 	RUN(call_mmap__anon_private__kernel_flags, "mmap (anon private)");
3060 	RUN(call_mmap__anon_shared__kernel_flags, "mmap (anon shared)");
3061 #undef RUN
3062 
3063 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_mmap_flags(fn, name " (mmap flags)")))
3064 	RUN(call_mmap__mmap_flags, "mmap");
3065 #undef RUN
3066 
3067 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
3068 	RUN(call_mmap__file_private__start_size, "mmap (file private)");
3069 	RUN(call_mmap__anon_private__start_size, "mmap (anon private)");
3070 	RUN(call_mmap__file_shared__start_size, "mmap (file shared)");
3071 	RUN(call_mmap__anon_shared__start_size, "mmap (anon shared)");
3072 	RUN(call_mmap__file_private_codesign__start_size, "mmap (file private codesign)");
3073 	RUN(call_mmap__file_private_media__start_size, "mmap (file private media)");
3074 	RUN(call_mmap__nounix03_private__start_size, "mmap (no unix03)");
3075 #undef RUN
3076 
3077 #define RUN(fn, name) dealloc_results(process_results(test_fixed_dst_size(fn, name " (dst/size)")))
3078 	RUN(call_mmap__fixed_private__start_size, "mmap (fixed)");
3079 #undef RUN
3080 
3081 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (offset/size)")))
3082 	RUN(call_mmap__file_private__offset_size, "mmap (file private)");
3083 	RUN(call_mmap__anon_private__offset_size, "mmap (anon private)");
3084 	RUN(call_mmap__file_shared__offset_size, "mmap (file shared)");
3085 	RUN(call_mmap__anon_shared__offset_size, "mmap (anon shared)");
3086 	RUN(call_mmap__file_private_codesign__offset_size, "mmap (file private codesign)");
3087 	RUN(call_mmap__file_private_media__offset_size, "mmap (file private media)");
3088 	RUN(call_mmap__nounix03_private__offset_size, "mmap (no unix03)");
3089 #undef RUN
3090 
3091 #define RUN(fn, name) dealloc_results(process_results(test_dst_size_fileoff(fn, name " (hint/size/fileoff)")))
3092 	RUN(call_mmap__file_private__dst_size_fileoff, "mmap (file private)");
3093 	RUN(call_mmap__anon_private__dst_size_fileoff, "mmap (anon private)");
3094 	RUN(call_mmap__file_shared__dst_size_fileoff, "mmap (file shared)");
3095 	RUN(call_mmap__anon_shared__dst_size_fileoff, "mmap (anon shared)");
3096 	RUN(call_mmap__file_private_codesign__dst_size_fileoff, "mmap (file private codesign)");
3097 	RUN(call_mmap__file_private_media__dst_size_fileoff, "mmap (file private media)");
3098 	RUN(call_mmap__nounix03_private__dst_size_fileoff, "mmap (no unix03)");
3099 #undef RUN
3100 
3101 #define RUN(fn, name) dealloc_results(process_results(test_fixed_dst_size_fileoff(fn, name " (dst/size/fileoff)")))
3102 	RUN(call_mmap__fixed_private__dst_size_fileoff, "mmap (fixed)");
3103 #undef RUN
3104 
3105 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3106 	RUN(call_mmap__file_private__vm_prot, "mmap (file private)");
3107 	RUN(call_mmap__anon_private__vm_prot, "mmap (anon private)");
3108 	RUN(call_mmap__file_shared__vm_prot, "mmap (file shared)");
3109 	RUN(call_mmap__anon_shared__vm_prot, "mmap (anon shared)");
3110 	RUN(call_mmap__file_private_codesign__vm_prot, "mmap (file private codesign)");
3111 	RUN(call_mmap__file_private_media__vm_prot, "mmap (file private media)");
3112 	RUN(call_mmap__nounix03_private__vm_prot, "mmap (no unix03)");
3113 	RUN(call_mmap__fixed_private__vm_prot, "mmap (fixed)");
3114 #undef RUN
3115 
3116 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3117 	RUN(call_mremap_encrypted, "mremap_encrypted");
3118 #undef RUN
3119 
3120 	/*
3121 	 * -- wire/unwire functions --
3122 	 */
3123 
3124 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3125 	RUN(call_mlock, "mlock");
3126 	RUN(call_munlock, "munlock");
3127 #undef RUN
3128 
3129 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3130 	RUN(call_mach_vm_wire__wire, "mach_vm_wire (wire)");
3131 	RUN(call_replacement_vm_wire__wire, "vm_wire (wire)");
3132 	RUN(call_mach_vm_wire__unwire, "mach_vm_wire (unwire)");
3133 	RUN(call_replacement_vm_wire__unwire, "vm_wire (unwire)");
3134 #undef RUN
3135 
3136 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3137 	RUN(call_mach_vm_wire__vm_prot, "mach_vm_wire");
3138 	RUN(call_replacement_vm_wire__vm_prot, "vm_wire");
3139 #undef RUN
3140 
3141 	/*
3142 	 * -- copyin/copyout functions --
3143 	 */
3144 
3145 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3146 #if TEST_OLD_STYLE_MACH
3147 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3148 #else
3149 #define RUN_OLD(fn, name) do {} while (0)
3150 #endif
3151 	RUN_NEW(call_mach_vm_read, "mach_vm_read");
3152 	RUN_OLD(call_vm_read, "vm_read");
3153 	RUN_NEW(call_mach_vm_read_list, "mach_vm_read_list");
3154 	RUN_OLD(call_vm_read_list, "vm_read_list");
3155 
3156 	RUN_NEW(call_mach_vm_read_overwrite__src, "mach_vm_read_overwrite (src)");
3157 	RUN_NEW(call_mach_vm_read_overwrite__dst, "mach_vm_read_overwrite (dst)");
3158 	RUN_OLD(call_vm_read_overwrite__src, "vm_read_overwrite (src)");
3159 	RUN_OLD(call_vm_read_overwrite__dst, "vm_read_overwrite (dst)");
3160 
3161 	RUN_NEW(call_mach_vm_write__src, "mach_vm_write (src)");
3162 	RUN_NEW(call_mach_vm_write__dst, "mach_vm_write (dst)");
3163 	RUN_OLD(call_vm_write__src, "vm_write (src)");
3164 	RUN_OLD(call_vm_write__dst, "vm_write (dst)");
3165 
3166 	RUN_NEW(call_mach_vm_copy__src, "mach_vm_copy (src)");
3167 	RUN_NEW(call_mach_vm_copy__dst, "mach_vm_copy (dst)");
3168 	RUN_OLD(call_vm_copy__src, "vm_copy (src)");
3169 	RUN_OLD(call_vm_copy__dst, "vm_copy (dst)");
3170 #undef RUN_NEW
3171 #undef RUN_OLD
3172 
3173 	/*
3174 	 * -- inherit functions --
3175 	 */
3176 
3177 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3178 #if TEST_OLD_STYLE_MACH
3179 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3180 #else
3181 #define RUN_OLD(fn, name) do {} while (0)
3182 #endif
3183 	RUN_NEW(call_mach_vm_inherit, "mach_vm_inherit");
3184 	RUN_OLD(call_vm_inherit, "vm_inherit");
3185 #undef RUN_OLD
3186 #undef RUN_NEW
3187 
3188 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3189 	RUN(call_minherit, "minherit");
3190 #undef RUN
3191 
3192 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
3193 	RUN(call_mach_vm_inherit__inherit, "mach_vm_inherit");
3194 #undef RUN
3195 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
3196 	RUN(call_minherit__inherit, "minherit");
3197 #undef RUN
3198 
3199 	/*
3200 	 * -- protection functions --
3201 	 */
3202 
3203 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3204 #if TEST_OLD_STYLE_MACH
3205 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3206 #else
3207 #define RUN_OLD(fn, name) do {} while (0)
3208 #endif
3209 	RUN_NEW(call_mach_vm_protect__start_size, "mach_vm_protect");
3210 	RUN_OLD(call_vm_protect__start_size, "vm_protect");
3211 #undef RUN_NEW
3212 #undef RUN_OLD
3213 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3214 #if TEST_OLD_STYLE_MACH
3215 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3216 #else
3217 #define RUN_OLD(fn, name) do {} while (0)
3218 #endif
3219 	RUN_NEW(call_mach_vm_protect__vm_prot, "mach_vm_protect");
3220 	RUN_OLD(call_vm_protect__vm_prot, "vm_protect");
3221 #undef RUN_NEW
3222 #undef RUN_OLD
3223 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3224 	RUN(call_mprotect__start_size, "mprotect");
3225 #undef RUN
3226 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3227 	RUN(call_mprotect__vm_prot, "mprotect");
3228 #undef RUN
3229 
3230 	/*
3231 	 * -- madvise/behavior functions --
3232 	 */
3233 
3234 	unsigned alignment_for_can_reuse;
3235 	if (isRosetta()) {
3236 		/*
3237 		 * VM_BEHAVIOR_CAN_REUSE and MADV_CAN_REUSE get different errors
3238 		 * on Rosetta when the allocation happens to be 4K vs 16K aligned.
3239 		 * Force 16K alignment for consistent results.
3240 		 */
3241 		alignment_for_can_reuse = KB16 - 1;
3242 	} else {
3243 		/* Use default alignment everywhere else. */
3244 		alignment_for_can_reuse = 0;
3245 	}
3246 
3247 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3248 #if TEST_OLD_STYLE_MACH
3249 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3250 #else
3251 #define RUN_OLD(fn, name) do {} while (0)
3252 #endif
3253 	RUN_NEW(call_mach_vm_behavior_set__start_size__default, "mach_vm_behavior_set (VM_BEHAVIOR_DEFAULT)");
3254 	RUN_OLD(call_vm_behavior_set__start_size__default, "vm_behavior_set (VM_BEHAVIOR_DEFAULT)");
3255 #undef RUN_NEW
3256 #undef RUN_OLD
3257 
3258 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_aligned_start_size(fn, alignment_for_can_reuse, name " (start/size)")))
3259 #if TEST_OLD_STYLE_MACH
3260 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_aligned_start_size(fn, alignment_for_can_reuse, name " (start/size)")))
3261 #else
3262 #define RUN_OLD(fn, name) do {} while (0)
3263 #endif
3264 	RUN_NEW(call_mach_vm_behavior_set__start_size__can_reuse, "mach_vm_behavior_set (VM_BEHAVIOR_CAN_REUSE)");
3265 	RUN_OLD(call_vm_behavior_set__start_size__can_reuse, "vm_behavior_set (VM_BEHAVIOR_CAN_REUSE)");
3266 #undef RUN_NEW
3267 #undef RUN_OLD
3268 
3269 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_aligned_vm_behavior_t(fn, alignment_for_can_reuse, name " (vm_behavior_t)")))
3270 #if TEST_OLD_STYLE_MACH
3271 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_aligned_vm_behavior_t(fn, alignment_for_can_reuse, name " (vm_behavior_t)")))
3272 #else
3273 #define RUN_OLD(fn, name) do {} while (0)
3274 #endif
3275 	RUN_NEW(call_mach_vm_behavior_set__vm_behavior, "mach_vm_behavior_set");
3276 	RUN_OLD(call_vm_behavior_set__vm_behavior, "vm_behavior_set");
3277 #undef RUN_NEW
3278 #undef RUN_OLD
3279 
3280 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3281 	RUN(call_madvise__start_size, "madvise");
3282 #undef RUN
3283 
3284 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_aligned_vm_advise_t(fn, alignment_for_can_reuse, name " (vm_advise_t)")))
3285 	RUN(call_madvise__vm_advise, "madvise");
3286 #undef RUN
3287 
3288 	/*
3289 	 * -- msync functions --
3290 	 */
3291 
3292 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3293 #if TEST_OLD_STYLE_MACH
3294 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3295 #else
3296 #define RUN_OLD(fn, name) do {} while (0)
3297 #endif
3298 	RUN_NEW(call_mach_vm_msync__start_size, "mach_vm_msync");
3299 	RUN_OLD(call_vm_msync__start_size, "vm_msync");
3300 #undef RUN_NEW
3301 #undef RUN_OLD
3302 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_sync_t(fn, name " (vm_sync_t)")))
3303 #if TEST_OLD_STYLE_MACH
3304 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_vm_sync_t(fn, name " (vm_sync_t)")))
3305 #else
3306 #define RUN_OLD(fn, name) do {} while (0)
3307 #endif
3308 	RUN_NEW(call_mach_vm_msync__vm_sync, "mach_vm_msync");
3309 	RUN_OLD(call_vm_msync__vm_sync, "vm_msync");
3310 #undef RUN_NEW
3311 #undef RUN_OLD
3312 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3313 	RUN(call_msync__start_size, "msync");
3314 	RUN(call_msync_nocancel__start_size, "msync_nocancel");
3315 #undef RUN
3316 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_msync_t(fn, name " (msync flags)")))
3317 	RUN(call_msync__vm_msync, "msync");
3318 	RUN(call_msync_nocancel__vm_msync, "msync_nocancel");
3319 #undef RUN
3320 
3321 	/*
3322 	 * -- machine attribute functions --
3323 	 */
3324 
3325 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3326 #if TEST_OLD_STYLE_MACH
3327 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3328 #else
3329 #define RUN_OLD(fn, name) do {} while (0)
3330 #endif
3331 	RUN_NEW(call_mach_vm_machine_attribute__start_size, "mach_vm_machine_attribute");
3332 	RUN_OLD(call_vm_machine_attribute__start_size, "vm_machine_attribute");
3333 #undef RUN_NEW
3334 #undef RUN_OLD
3335 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_machine_attribute_t(fn, name " (machine_attribute_t)")))
3336 #if TEST_OLD_STYLE_MACH
3337 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_vm_machine_attribute_t(fn, name " (machine_attribute_t)")))
3338 #else
3339 #define RUN_OLD(fn, name) do {} while (0)
3340 #endif
3341 	RUN_NEW(call_mach_vm_machine_attribute__machine_attribute, "mach_vm_machine_attribute");
3342 	RUN_OLD(call_vm_machine_attribute__machine_attribute, "vm_machine_attribute");
3343 #undef RUN_NEW
3344 #undef RUN_OLD
3345 
3346 	/*
3347 	 * -- purgability/purgeability functions --
3348 	 */
3349 
3350 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_addr(fn, name " (addr)")))
3351 #if TEST_OLD_STYLE_MACH
3352 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_purgeable_addr(fn, name " (addr)")))
3353 #else
3354 #define RUN_OLD(fn, name) do {} while (0)
3355 #endif
3356 	RUN_NEW(call_mach_vm_purgable_control__address__get, "mach_vm_purgable_control (get)");
3357 	RUN_OLD(call_vm_purgable_control__address__get, "vm_purgable_control (get)");
3358 
3359 	RUN_NEW(call_mach_vm_purgable_control__address__purge_all, "mach_vm_purgable_control (purge all)");
3360 	RUN_OLD(call_vm_purgable_control__address__purge_all, "vm_purgable_control (purge all)");
3361 #undef RUN_NEW
3362 #undef RUN_OLD
3363 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_and_state(fn, name " (purgeable and state)")))
3364 #if TEST_OLD_STYLE_MACH
3365 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_purgeable_and_state(fn, name " (purgeable and state)")))
3366 #else
3367 #define RUN_OLD(fn, name) do {} while (0)
3368 #endif
3369 	RUN_NEW(call_mach_vm_purgable_control__purgeable_state, "mach_vm_purgable_control");
3370 	RUN_OLD(call_vm_purgable_control__purgeable_state, "vm_purgable_control");
3371 #undef RUN_NEW
3372 #undef RUN_OLD
3373 
3374 	/*
3375 	 * -- region info functions --
3376 	 */
3377 
3378 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
3379 #if TEST_OLD_STYLE_MACH
3380 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_addr(fn, name " (addr)")))
3381 #else
3382 #define RUN_OLD(fn, name) do {} while (0)
3383 #endif
3384 	RUN_NEW(call_mach_vm_region, "mach_vm_region");
3385 	RUN_OLD(call_vm_region, "vm_region");
3386 	RUN_NEW(call_mach_vm_region_recurse, "mach_vm_region_recurse");
3387 	RUN_OLD(call_vm_region_recurse, "vm_region_recurse");
3388 	RUN_OLD(call_vm_region_recurse_64, "vm_region_recurse_64");
3389 #undef RUN_NEW
3390 #undef RUN_OLD
3391 
3392 	/*
3393 	 * -- page info functions --
3394 	 */
3395 
3396 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
3397 #if TEST_OLD_STYLE_MACH
3398 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_addr(fn, name " (addr)")))
3399 #else
3400 #define RUN_OLD(fn, name) do {} while (0)
3401 #endif
3402 	RUN_NEW(call_mach_vm_page_info, "mach_vm_page_info");
3403 	RUN_NEW(call_mach_vm_page_query, "mach_vm_page_query");
3404 	RUN_OLD(call_vm_map_page_query, "vm_map_page_query");
3405 #undef RUN_NEW
3406 #undef RUN_OLD
3407 
3408 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3409 	RUN_NEW(call_mach_vm_page_range_query, "mach_vm_page_range_query");
3410 #undef RUN_NEW
3411 
3412 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3413 	RUN(call_mincore, "mincore");
3414 #undef RUN
3415 
3416 	/*
3417 	 * -- miscellaneous functions --
3418 	 */
3419 
3420 #define RUN(fn, name) dealloc_results(process_results(test_unix_shared_region_map_and_slide_2_np(fn, name " (files/mappings)")))
3421 	RUN(call_shared_region_map_and_slide_2_np_child, "shared_region_map_and_slide_2_np");
3422 	RUN(call_shared_region_map_and_slide_2_np_in_thread, "different thread shared_region_map_and_slide_2_np");
3423 #undef RUN
3424 
3425 #if 0
3426 #define RUN(fn, name) dealloc_results(process_results(test_mach_vm_deferred_reclamation_buffer_init(fn, name)))
3427 	RUN(call_mach_vm_deferred_reclamation_buffer_init, "mach_vm_deferred_reclamation_buffer_init");
3428 #undef RUN
3429 #endif
3430 
3431 out:
3432 	restore_exc_guard();
3433 
3434 	if (generate_golden) {
3435 		if (!out_param_bad_count || (dump && !should_test_results)) {
3436 			// Print after verified there is not OUT_PARAM_BAD results before printing,
3437 			// or user explicitly set DUMP_RESULTS=1 GENERATE_GOLDEN_IMAGE=1
3438 			printf("%s", GOLDEN_OUTPUT_START);
3439 		}
3440 	}
3441 	free(GOLDEN_OUTPUT_START);
3442 
3443 	if (dump_golden || should_test_results) {
3444 		clean_golden_results();
3445 	}
3446 
3447 	if (reenable_vm_sanitize_telemetry() != 0) {
3448 		T_FAIL("Failed to reenable VM API telemetry.");
3449 		return;
3450 	}
3451 
3452 	T_PASS("vm parameter validation userspace");
3453 }
3454 
3455 
3456 /////////////////////////////////////////////////////
3457 // Kernel test invocation.
3458 // The actual test code is in:
3459 // osfmk/tests/vm_parameter_validation_kern.c
3460 
3461 #define KERN_RESULT_DELIMITER "\n"
3462 
3463 #ifndef STRINGIFY
3464 #define __STR(x)        #x
3465 #define STRINGIFY(x)    __STR(x)
3466 #endif
3467 
3468 // Verify golden list being generated doesn't contain OUT_BAD_PARAM
3469 static int
out_bad_param_in_kern_golden_results(char * kern_buffer)3470 out_bad_param_in_kern_golden_results(char *kern_buffer)
3471 {
3472 	const char *out_param_bad_str = STRINGIFY(OUT_PARAM_BAD);
3473 	char *out_param_bad_match = strstr(kern_buffer, out_param_bad_str);
3474 	if (out_param_bad_match) {
3475 		T_FAIL("Out parameter violation return code (%s) found in results, aborting.\n", out_param_bad_str);
3476 		return 1;
3477 	}
3478 	return 0;
3479 }
3480 
3481 
3482 // Read results written by __dump_results()
3483 static int
populate_kernel_results(char * kern_buffer)3484 populate_kernel_results(char *kern_buffer)
3485 {
3486 	char *line = NULL;
3487 	char *sub_line = NULL;
3488 	char *test_name = NULL;
3489 	char *result_name = NULL;
3490 	char *token = NULL;
3491 	char *s_num_kern_results = NULL;
3492 	results_t *kern_results = NULL;
3493 	uint32_t num_kern_results = 0;
3494 	uint32_t result_number = 0;
3495 	int result_ret = 0;
3496 	bool in_test = FALSE;
3497 
3498 	line = strtok(kern_buffer, KERN_RESULT_DELIMITER);
3499 	while (line != NULL) {
3500 		if (strncmp(line, TESTNAME_DELIMITER, strlen(TESTNAME_DELIMITER)) == 0) {
3501 			sub_line = line + strlen(TESTNAME_DELIMITER);
3502 			test_name = strdup(sub_line);
3503 			result_number = 0;
3504 			in_test = TRUE;
3505 		} else if (in_test && strncmp(line, RESULTCOUNT_DELIMITER, strlen(RESULTCOUNT_DELIMITER)) == 0) {
3506 			s_num_kern_results = line + strlen(RESULTCOUNT_DELIMITER);
3507 			num_kern_results = (uint32_t)strtoul(s_num_kern_results, NULL, 10);
3508 			kern_results = alloc_results(test_name, eUNKNOWN_TRIALS, num_kern_results);
3509 			kern_results->count = num_kern_results;
3510 			kern_list[num_kern_tests++] = kern_results;
3511 		} else if (in_test && strncmp(line, TESTCONFIG_DELIMITER, strlen(TESTCONFIG_DELIMITER)) == 0) {
3512 			sub_line = line + strlen(TESTCONFIG_DELIMITER);
3513 			kern_results->testconfig = strdup(sub_line);
3514 		} else if (in_test && strstr(line, KERN_TESTRESULT_DELIMITER)) {
3515 			// should have found TESTCONFIG already
3516 			assert(kern_results->testconfig != NULL);
3517 			sscanf(line, KERN_TESTRESULT_DELIMITER "%d", &result_ret);
3518 			// get result name (comes after the first ,)
3519 			token = strchr(line, ',');
3520 			if (token) {
3521 				token = token + 2; // skip the , and the extra space
3522 				result_name = strdup(token);
3523 				if (result_number >= num_kern_results) {
3524 					T_LOG("\tKERN Invalid output in test %s, seeing more results (%u) than expected (%u), ignoring trial RESULT %d, %s\n",
3525 					    test_name, result_number, num_kern_results, result_ret, result_name);
3526 					free(result_name);
3527 				} else {
3528 					kern_results->list[result_number++] = (result_t){.ret = result_ret, .name = result_name};
3529 				}
3530 			}
3531 		} else {
3532 			// T_LOG("Unknown kernel result line: %s\n", line);
3533 			//in_test = FALSE;
3534 		}
3535 
3536 		line = strtok(NULL, KERN_RESULT_DELIMITER);
3537 	}
3538 
3539 	dump_kernel_results_list();
3540 
3541 	return 0;
3542 }
3543 
3544 static int64_t
run_sysctl_test(const char * t,int64_t value)3545 run_sysctl_test(const char *t, int64_t value)
3546 {
3547 	char name[1024];
3548 	int64_t result = 0;
3549 	size_t s = sizeof(value);
3550 	int rc;
3551 
3552 	snprintf(name, sizeof(name), "debug.test.%s", t);
3553 	rc = sysctlbyname(name, &result, &s, &value, s);
3554 	T_QUIET; T_ASSERT_POSIX_SUCCESS(rc, "sysctlbyname(%s)", t);
3555 	return result;
3556 }
3557 
3558 T_DECL(vm_parameter_validation_kern,
3559     "parameter validation for kext/xnu calls",
3560     T_META_SPAWN_TOOL(DECOMPRESS),
3561     T_META_SPAWN_TOOL_ARG("kern"),
3562     T_META_SPAWN_TOOL_ARG(TMP_DIR),
3563     T_META_SPAWN_TOOL_ARG(GOLDEN_FILES_VERSION)
3564     )
3565 {
3566 	if (disable_vm_sanitize_telemetry() != 0) {
3567 		T_FAIL("Could not disable VM API telemetry. Bailing out early.");
3568 		return;
3569 	}
3570 
3571 	read_env();
3572 
3573 	T_LOG("dump %d, golden %d, dump_golden %d, test %d\n", dump, generate_golden, dump_golden, should_test_results);
3574 
3575 	disable_exc_guard();
3576 
3577 	if (dump_golden) {
3578 		if (populate_golden_results(KERN_GOLDEN_FILE)) {
3579 			// couldn't load golden test results
3580 			T_FAIL("Could not load golden file '%s'\n", KERN_GOLDEN_FILE);
3581 			goto out;
3582 		}
3583 
3584 		// just print the parsed golden file
3585 		for (uint32_t x = 0; x < num_tests; ++x) {
3586 			__dump_results(golden_list[x]);
3587 		}
3588 		clean_golden_results();
3589 		goto out;
3590 	}
3591 
3592 	T_LOG("Running kernel tests\n");
3593 
3594 	// We allocate a large buffer. The kernel-side code writes output to it.
3595 	// Then we print that output. This is faster than making the kernel-side
3596 	// code print directly to the serial console, which takes many minutes
3597 	// to transfer our test output at 14.4 KB/s.
3598 	// We align this buffer to KB16 to allow the lower bits to be used for a fd.
3599 	void *output;
3600 	int alloc_failed = posix_memalign(&output, KB16, SYSCTL_OUTPUT_BUFFER_SIZE);
3601 	assert(alloc_failed == 0);
3602 
3603 	memset(output, 0, SYSCTL_OUTPUT_BUFFER_SIZE);
3604 
3605 	int fd = get_fd();
3606 	assert((fd & ((int)KB16 - 1)) == fd);
3607 	if (generate_golden) {
3608 		// pass flag on the msb of the fd
3609 		assert((fd & ((int)(KB16 >> 1) - 1)) == fd);
3610 		fd |=  KB16 >> 1;
3611 	}
3612 	int64_t result = run_sysctl_test("vm_parameter_validation_kern", (int64_t)output + fd);
3613 
3614 	T_QUIET; T_EXPECT_EQ(1ull, result, "vm_parameter_validation_kern");
3615 
3616 	if (generate_golden) {
3617 		if (!out_bad_param_in_kern_golden_results(output) || (dump && !should_test_results)) {
3618 			// Print after verified there is not OUT_PARAM_BAD results before printing,
3619 			// or user explicitly set DUMP_RESULTS=1 GENERATE_GOLDEN_IMAGE=1
3620 			printf("%s", output);
3621 		}
3622 		free(output);
3623 		output = NULL;
3624 	} else {
3625 		// recreate a results_t to compare against the golden file results
3626 		if (populate_kernel_results(output)) {
3627 			T_FAIL("Error while parsing results\n");
3628 		}
3629 		free(output);
3630 		output = NULL;
3631 
3632 		if (should_test_results && populate_golden_results(KERN_GOLDEN_FILE)) {
3633 			// couldn't load golden test results
3634 			T_FAIL("Could not load golden file '%s'\n", KERN_GOLDEN_FILE);
3635 			clean_kernel_results();
3636 			goto out;
3637 		}
3638 
3639 		// compare results against values from golden list
3640 		for (uint32_t x = 0; x < num_kern_tests; ++x) {
3641 			process_results(kern_list[x]);
3642 			dealloc_results(kern_list[x]);
3643 			kern_list[x] = NULL;
3644 		}
3645 		clean_golden_results();
3646 	}
3647 
3648 out:
3649 	restore_exc_guard();
3650 
3651 	if (reenable_vm_sanitize_telemetry() != 0) {
3652 		T_FAIL("Failed to reenable VM API telemetry.");
3653 		return;
3654 	}
3655 
3656 	T_PASS("vm parameter validation kern");
3657 }
3658