xref: /xnu-11215.41.3/tests/vm/vm_parameter_validation.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 #include <test_utils.h>
4 
5 #include <sys/types.h>
6 #include <sys/sysctl.h>
7 #include <mach/mach.h>
8 #include <mach/mach_vm.h>
9 #include <mach/memory_entry.h>
10 #include <mach/vm_types.h>
11 #include <sys/mman.h>
12 #include <unistd.h>
13 #include <TargetConditionals.h>
14 #include <mach-o/dyld.h>
15 #include <libgen.h>
16 
17 #include <os/bsd.h> // For os_parse_boot_arg_int
18 
19 // workarounds for buggy MIG declarations
20 // see tests/vm/vm_parameter_validation_replacement_*.defs
21 // and tests/Makefile for details
22 #include "vm_parameter_validation_replacement_mach_host.h"
23 #include "vm_parameter_validation_replacement_host_priv.h"
24 
25 // code shared with kernel/kext tests
26 #include "../../osfmk/tests/vm_parameter_validation.h"
27 
28 T_GLOBAL_META(
29 	T_META_NAMESPACE("xnu.vm"),
30 	T_META_RADAR_COMPONENT_NAME("xnu"),
31 	T_META_RADAR_COMPONENT_VERSION("VM"),
32 	T_META_ASROOT(true),  /* required for vm_wire tests on macOS */
33 	T_META_RUN_CONCURRENTLY(false), /* vm_parameter_validation_kern uses kernel globals */
34 	T_META_ALL_VALID_ARCHS(true),
35 	XNU_T_META_REQUIRES_DEVELOPMENT_KERNEL
36 	);
37 
38 /*
39  * vm_parameter_validation.c
40  * Test parameter validation of vm's userspace API
41  *
42  * The test compares the return values against a 'golden' list, which is a text
43  * file previously generated and compressed in .xz files, per platform.
44  * When vm_parameter_validation runs, it calls assets/vm_parameter_validation/decompress.sh,
45  * which detects the platform and decompresses the corresponding user and kern
46  * golden files.
47  *
48  * Any return code mismatch is reported as a failure, printing test name and iteration.
49  * New tests not present in the 'golden' list will run but they are also reported as a failure.
50  *
51  * There are two environment variable flags that makes development work easier and
52  * can temporarily disable golden list testing.
53  *
54  * SKIP_TESTS
55  * When running with SKIP_TESTS set, the test will not compare the results
56  * against the golden files.
57  *
58  * DUMP_RESULTS
59  * When running with DUMP_RESULTS set, the test will print all the returned values
60  * (as opposed to only the failing ones). To pretty-print this output use the python script:
61  * DUMP_RESULTS=1 vm_parameter_validation | tools/format_vm_parameter_validation.py
62  */
63 
64 
65 
66 /*
67  * xnu/libsyscall/mach/mach_vm.c intercepts some VM calls from userspace,
68  * sometimes doing something other than the expected MIG call.
69  * This test generates its own MIG userspace call sites to call the kernel
70  * entrypoints directly, bypassing libsyscall's interference.
71  *
72  * The custom MIG call sites are generated into:
73  * vm_parameter_validation_vm_map_user.c
74  * vm_parameter_validation_mach_vm_user.c
75  */
76 
77 #pragma clang diagnostic ignored "-Wdeclaration-after-statement"
78 #pragma clang diagnostic ignored "-Wmissing-prototypes"
79 #pragma clang diagnostic ignored "-Wpedantic"
80 
81 /*
82  * Our wire tests often try to wire the whole address space.
83  * In that case the error code is determined by the first range of addresses
84  * that cannot be wired.
85  * In most cases that is a protection failure on a malloc guard page. But
86  * sometimes, circumstances outside of our control change the address map of
87  * our test process and add holes, which means we get a bad address error
88  * instead, and the test fails because the return code doesn't match what's
89  * recorded in the golden files.
90  * To avoid this, we want to keep a guard page inside our data section.
91  * Because that data section is one of the first things in our address space,
92  * the behavior of wire is (more) predictable.
93  */
94 _Alignas(KB16) char guard_page[KB16];
95 
96 static void
set_up_guard_page(void)97 set_up_guard_page(void)
98 {
99 	/*
100 	 * Ensure that _Alignas worked as expected.
101 	 */
102 	assert(0 == (((mach_vm_address_t)guard_page) & PAGE_MASK));
103 	/*
104 	 * Remove all permissions on guard_page such that it is a guard page.
105 	 */
106 	assert(0 == mprotect(guard_page, sizeof(guard_page), 0));
107 }
108 
109 // Return a file descriptor that tests can read and write.
110 // A single temporary file is shared among all tests.
111 static int
get_fd()112 get_fd()
113 {
114 	static int fd = -1;
115 	if (fd > 0) {
116 		return fd;
117 	}
118 
119 	char filename[] = "/tmp/vm_parameter_validation_XXXXXX";
120 	fd = mkstemp(filename);
121 	assert(fd > 2);  // not stdin/stdout/stderr
122 	return fd;
123 }
124 
125 static int
munmap_helper(void * ptr,size_t size)126 munmap_helper(void *ptr, size_t size)
127 {
128 	mach_vm_address_t start, end;
129 	if (0 != size) { // munmap rejects size == 0 even though mmap accepts it
130 		/*
131 		 * munmap expects aligned inputs, even though mmap sometimes
132 		 * returns unaligned values
133 		 */
134 		start = ((mach_vm_address_t)ptr) & ~PAGE_MASK;
135 		end = (((mach_vm_address_t)ptr) + size + PAGE_MASK) & ~PAGE_MASK;
136 		return munmap((void*)start, end - start);
137 	}
138 	return 0;
139 }
140 
141 // Some tests provoke EXC_GUARD exceptions.
142 // We disable EXC_GUARD if possible. If we can't, we disable those tests instead.
143 static bool EXC_GUARD_ENABLED = true;
144 
145 static int
call_munlock(void * start,size_t size)146 call_munlock(void *start, size_t size)
147 {
148 	int err = munlock(start, size);
149 	return err ? errno : 0;
150 }
151 
152 static int
call_mlock(void * start,size_t size)153 call_mlock(void *start, size_t size)
154 {
155 	int err = mlock(start, size);
156 	return err ? errno : 0;
157 }
158 
159 static kern_return_t
call_munmap(MAP_T map __unused,mach_vm_address_t start,mach_vm_size_t size)160 call_munmap(MAP_T map __unused, mach_vm_address_t start, mach_vm_size_t size)
161 {
162 	int err = munmap((void*)start, (size_t)size);
163 	return err ? errno : 0;
164 }
165 
166 static int
call_mremap_encrypted(void * start,size_t size)167 call_mremap_encrypted(void *start, size_t size)
168 {
169 	int err = mremap_encrypted(start, size, CRYPTID_NO_ENCRYPTION, /*cputype=*/ 0, /*cpusubtype=*/ 0);
170 	return err ? errno : 0;
171 }
172 
173 /////////////////////////////////////////////////////
174 // Mach tests
175 
176 static mach_port_t
make_a_mem_object(vm_size_t size)177 make_a_mem_object(vm_size_t size)
178 {
179 	mach_port_t out_handle;
180 	kern_return_t kr = mach_memory_object_memory_entry_64(mach_host_self(), 1, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_handle);
181 	assert(kr == 0);
182 	return out_handle;
183 }
184 
185 static mach_port_t
make_a_mem_entry(vm_size_t size)186 make_a_mem_entry(vm_size_t size)
187 {
188 	mach_port_t port;
189 	memory_object_size_t s = (memory_object_size_t)size;
190 	kern_return_t kr = mach_make_memory_entry_64(mach_host_self(), &s, (memory_object_offset_t)0, MAP_MEM_NAMED_CREATE | MAP_MEM_LEDGER_TAGGED, &port, MACH_PORT_NULL);
191 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "allocate memory entry");
192 	return port;
193 }
194 
195 static inline void
check_mach_memory_entry_outparam_changes(kern_return_t * kr,mach_port_t out_handle,mach_port_t saved_handle)196 check_mach_memory_entry_outparam_changes(kern_return_t * kr, mach_port_t out_handle, mach_port_t saved_handle)
197 {
198 	if (*kr != KERN_SUCCESS) {
199 		if (out_handle != (mach_port_t) saved_handle) {
200 			*kr = OUT_PARAM_BAD;
201 		}
202 	}
203 }
204 // mach_make_memory_entry is really several functions wearing a trenchcoat.
205 // Run a separate test for each variation.
206 
207 // mach_make_memory_entry also has a confusing number of entrypoints:
208 // U64: mach_make_memory_entry_64(64) (mach_make_memory_entry is the same MIG message)
209 // U32: mach_make_memory_entry(32), mach_make_memory_entry_64(64), _mach_make_memory_entry(64) (each is a unique MIG message)
210 #define IMPL(FN, T)                                                               \
211 	static kern_return_t                                                      \
212 	call_ ## FN ## __start_size__memonly(MAP_T map, T start, T size)                      \
213 	{                                                                         \
214 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
215 	        T io_size = size;                                                 \
216 	        mach_port_t invalid_value = INVALID_INITIAL_MACH_PORT;            \
217 	        mach_port_t out_handle = invalid_value;                           \
218 	        kern_return_t kr = FN(map, &io_size, start,                       \
219 	                              VM_PROT_READ | MAP_MEM_ONLY, &out_handle, memobject); \
220 	        if (kr == 0) {                                                    \
221 	                (void)mach_port_deallocate(mach_task_self(), out_handle); \
222 	/* MAP_MEM_ONLY doesn't use the size. It should not change it. */         \
223 	                assert(io_size == size);                                  \
224 	        }                                                                 \
225 	        (void)mach_port_deallocate(mach_task_self(), memobject);          \
226 	        check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
227 	        return kr;                                                        \
228 	}                                                                         \
229                                                                                   \
230 	static kern_return_t                                                      \
231 	call_ ## FN ## __start_size__namedcreate(MAP_T map, T start, T size)                  \
232 	{                                                                         \
233 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
234 	        T io_size = size;                                                 \
235 	        mach_port_t invalid_value = INVALID_INITIAL_MACH_PORT;            \
236 	        mach_port_t out_handle = invalid_value;                           \
237 	        kern_return_t kr = FN(map, &io_size, start,                       \
238 	                              VM_PROT_READ | MAP_MEM_NAMED_CREATE, &out_handle, memobject); \
239 	        if (kr == 0) {                                                    \
240 	                (void)mach_port_deallocate(mach_task_self(), out_handle); \
241 	        }                                                                 \
242 	        (void)mach_port_deallocate(mach_task_self(), memobject);          \
243 	        check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
244 	        return kr;                                                        \
245 	}                                                                         \
246                                                                                   \
247 	static kern_return_t                                                      \
248 	call_ ## FN ## __start_size__copy(MAP_T map, T start, T size)                         \
249 	{                                                                         \
250 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
251 	        T io_size = size;                                                 \
252 	        mach_port_t invalid_value = INVALID_INITIAL_MACH_PORT;            \
253 	        mach_port_t out_handle = invalid_value;                           \
254 	        kern_return_t kr = FN(map, &io_size, start,                       \
255 	                              VM_PROT_READ | MAP_MEM_VM_COPY, &out_handle, memobject); \
256 	        if (kr == 0) {                                                    \
257 	                (void)mach_port_deallocate(mach_task_self(), out_handle); \
258 	        }                                                                 \
259 	        (void)mach_port_deallocate(mach_task_self(), memobject);          \
260 	        check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
261 	        return kr;                                                        \
262 	}                                                                         \
263                                                                                   \
264 	static kern_return_t                                                      \
265 	call_ ## FN ## __start_size__share(MAP_T map, T start, T size)                         \
266 	{                                                                         \
267 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
268 	        T io_size = size;                                                 \
269 	        mach_port_t invalid_value = INVALID_INITIAL_MACH_PORT;            \
270 	        mach_port_t out_handle = invalid_value;                           \
271 	        kern_return_t kr = FN(map, &io_size, start,                       \
272 	                              VM_PROT_READ | MAP_MEM_VM_SHARE, &out_handle, memobject); \
273 	        if (kr == 0) {                                                    \
274 	                (void)mach_port_deallocate(mach_task_self(), out_handle); \
275 	        }                                                                 \
276 	        (void)mach_port_deallocate(mach_task_self(), memobject);          \
277 	        check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
278 	        return kr;                                                        \
279 	}                                                                         \
280                                                                                   \
281 	static kern_return_t                                                      \
282 	call_ ## FN ## __start_size__namedreuse(MAP_T map, T start, T size)                   \
283 	{                                                                         \
284 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
285 	        T io_size = size;                                                 \
286 	        mach_port_t invalid_value = INVALID_INITIAL_MACH_PORT;            \
287 	        mach_port_t out_handle = invalid_value;                           \
288 	        kern_return_t kr = FN(map, &io_size, start,                       \
289 	                              VM_PROT_READ | MAP_MEM_NAMED_REUSE, &out_handle, memobject); \
290 	        if (kr == 0) {                                                    \
291 	                (void)mach_port_deallocate(mach_task_self(), out_handle); \
292 	        }                                                                 \
293 	        (void)mach_port_deallocate(mach_task_self(), memobject);          \
294 	        check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
295 	        return kr;                                                        \
296 	}                                                                         \
297                                                                                   \
298 	static kern_return_t                                                      \
299 	call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot)      \
300 	{                                                                         \
301 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
302 	        T io_size = size;                                                 \
303 	        mach_port_t invalid_value = INVALID_INITIAL_MACH_PORT;            \
304 	        mach_port_t out_handle = invalid_value;                           \
305 	        kern_return_t kr = FN(map, &io_size, start,                       \
306 	                              prot, &out_handle, memobject); \
307 	        if (kr == 0) {                                                    \
308 	                (void)mach_port_deallocate(mach_task_self(), out_handle); \
309 	        }                                                                 \
310 	        (void)mach_port_deallocate(mach_task_self(), memobject);          \
311 	        check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
312 	        return kr;                                                        \
313 	}
314 
IMPL(mach_make_memory_entry_64,mach_vm_address_t)315 IMPL(mach_make_memory_entry_64, mach_vm_address_t)
316 #if TEST_OLD_STYLE_MACH
317 IMPL(mach_make_memory_entry, vm_address_t)
318 IMPL(_mach_make_memory_entry, mach_vm_address_t)
319 #endif
320 #undef IMPL
321 
322 static inline void
323 check_mach_memory_object_memory_entry_outparam_changes(kern_return_t * kr, mach_port_t out_handle,
324     mach_port_t saved_out_handle)
325 {
326 	if (*kr != KERN_SUCCESS) {
327 		if (out_handle != saved_out_handle) {
328 			*kr = OUT_PARAM_BAD;
329 		}
330 	}
331 }
332 
333 #define IMPL(FN) \
334 	static kern_return_t                                            \
335 	call_ ## FN ## __size(MAP_T map __unused, mach_vm_size_t size)  \
336 	{                                                               \
337 	        kern_return_t kr;                                       \
338 	        mach_port_t invalid_value = INVALID_INITIAL_MACH_PORT;  \
339 	        mach_port_t out_entry = invalid_value;                  \
340 	        kr = FN(mach_host_self(), 1, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_entry); \
341 	        if (kr == 0) {                                          \
342 	                (void)mach_port_deallocate(mach_task_self(), out_entry); \
343 	        }                                                       \
344 	        check_mach_memory_object_memory_entry_outparam_changes(&kr, out_entry, invalid_value); \
345 	        return kr;                                              \
346 	}                                                               \
347 	static kern_return_t                                            \
348 	call_ ## FN ## __vm_prot(MAP_T map __unused, mach_vm_size_t size, vm_prot_t prot) \
349 	{                                                               \
350 	        kern_return_t kr;                                       \
351 	        mach_port_t invalid_value = INVALID_INITIAL_MACH_PORT;  \
352 	        mach_port_t out_entry = invalid_value;                  \
353 	        kr = FN(mach_host_self(), 1, size, prot, 0, &out_entry); \
354 	        if (kr == 0) {                                          \
355 	                (void)mach_port_deallocate(mach_task_self(), out_entry); \
356 	        }                                                       \
357 	        check_mach_memory_object_memory_entry_outparam_changes(&kr, out_entry, invalid_value); \
358 	        return kr;                                              \
359 	}
360 
361 // The declaration of mach_memory_object_memory_entry is buggy on U32.
362 // We compile in our own MIG user stub for it with a "replacement_" prefix.
363 // rdar://117927965
364 IMPL(replacement_mach_memory_object_memory_entry)
IMPL(mach_memory_object_memory_entry_64)365 IMPL(mach_memory_object_memory_entry_64)
366 #undef IMPL
367 
368 static inline void
369 check_vm_read_outparam_changes(kern_return_t * kr, mach_vm_size_t size, mach_vm_size_t requested_size,
370     mach_vm_address_t addr)
371 {
372 	if (*kr == KERN_SUCCESS) {
373 		if (size != requested_size) {
374 			*kr = OUT_PARAM_BAD;
375 		}
376 		if (size == 0) {
377 			if (addr != 0) {
378 				*kr = OUT_PARAM_BAD;
379 			}
380 		}
381 	}
382 }
383 
384 
385 static kern_return_t
call_mach_vm_read(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)386 call_mach_vm_read(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
387 {
388 	vm_offset_t out_addr = INVALID_INITIAL_ADDRESS;
389 	mach_msg_type_number_t out_size = INVALID_INITIAL_SIZE;
390 	kern_return_t kr = mach_vm_read(map, start, size, &out_addr, &out_size);
391 	if (kr == 0) {
392 		(void)mach_vm_deallocate(mach_task_self(), out_addr, out_size);
393 	}
394 	check_vm_read_outparam_changes(&kr, out_size, size, out_addr);
395 	return kr;
396 }
397 #if TEST_OLD_STYLE_MACH
398 static kern_return_t
call_vm_read(MAP_T map,vm_address_t start,vm_size_t size)399 call_vm_read(MAP_T map, vm_address_t start, vm_size_t size)
400 {
401 	vm_offset_t out_addr = INVALID_INITIAL_ADDRESS;
402 	mach_msg_type_number_t out_size = INVALID_INITIAL_SIZE;
403 	kern_return_t kr = vm_read(map, start, size, &out_addr, &out_size);
404 	if (kr == 0) {
405 		(void)mach_vm_deallocate(mach_task_self(), out_addr, out_size);
406 	}
407 	check_vm_read_outparam_changes(&kr, out_size, size, out_addr);
408 	return kr;
409 }
410 #endif
411 
412 static kern_return_t
call_mach_vm_read_list(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)413 call_mach_vm_read_list(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
414 {
415 	mach_vm_read_entry_t re = {{.address = start, .size = size}};
416 	kern_return_t kr = mach_vm_read_list(map, re, 1);
417 	if (kr == 0) {
418 		(void)mach_vm_deallocate(mach_task_self(), re[0].address, re[0].size);
419 	}
420 	return kr;
421 }
422 #if TEST_OLD_STYLE_MACH
423 static kern_return_t
call_vm_read_list(MAP_T map,vm_address_t start,vm_size_t size)424 call_vm_read_list(MAP_T map, vm_address_t start, vm_size_t size)
425 {
426 	vm_read_entry_t re = {{.address = start, .size = size}};
427 	kern_return_t kr = vm_read_list(map, re, 1);
428 	if (kr == 0) {
429 		(void)mach_vm_deallocate(mach_task_self(), re[0].address, re[0].size);
430 	}
431 	return kr;
432 }
433 #endif
434 
435 static inline void
check_vm_read_overwrite_outparam_changes(kern_return_t * kr,mach_vm_size_t size,mach_vm_size_t requested_size)436 check_vm_read_overwrite_outparam_changes(kern_return_t * kr, mach_vm_size_t size, mach_vm_size_t requested_size)
437 {
438 	if (*kr == KERN_SUCCESS) {
439 		if (size != requested_size) {
440 			*kr = OUT_PARAM_BAD;
441 		}
442 	}
443 }
444 
445 static kern_return_t __unused
call_mach_vm_read_overwrite__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)446 call_mach_vm_read_overwrite__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
447 {
448 	mach_vm_size_t out_size;
449 	kern_return_t kr = mach_vm_read_overwrite(map, start, size, start_2, &out_size);
450 	check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
451 	return kr;
452 }
453 
454 static kern_return_t
call_mach_vm_read_overwrite__src(MAP_T map,mach_vm_address_t src,mach_vm_size_t size)455 call_mach_vm_read_overwrite__src(MAP_T map, mach_vm_address_t src, mach_vm_size_t size)
456 {
457 	mach_vm_size_t out_size;
458 	allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
459 	kern_return_t kr = mach_vm_read_overwrite(map, src, size, dst.addr, &out_size);
460 	check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
461 	return kr;
462 }
463 
464 static kern_return_t
call_mach_vm_read_overwrite__dst(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size)465 call_mach_vm_read_overwrite__dst(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size)
466 {
467 	mach_vm_size_t out_size;
468 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
469 	kern_return_t kr = mach_vm_read_overwrite(map, src.addr, size, dst, &out_size);
470 	check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
471 	return kr;
472 }
473 
474 #if TEST_OLD_STYLE_MACH
475 static kern_return_t __unused
call_vm_read_overwrite__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)476 call_vm_read_overwrite__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
477 {
478 	vm_size_t out_size;
479 	kern_return_t kr = vm_read_overwrite(map, (vm_address_t) start, (vm_size_t) size, (vm_address_t) start_2, &out_size);
480 	check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
481 	return kr;
482 }
483 
484 static kern_return_t
call_vm_read_overwrite__src(MAP_T map,mach_vm_address_t src,mach_vm_size_t size)485 call_vm_read_overwrite__src(MAP_T map, mach_vm_address_t src, mach_vm_size_t size)
486 {
487 	vm_size_t out_size;
488 	allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
489 	kern_return_t kr = vm_read_overwrite(map, (vm_address_t) src, (vm_size_t) size, (vm_address_t) dst.addr, &out_size);
490 	check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
491 	return kr;
492 }
493 
494 static kern_return_t
call_vm_read_overwrite__dst(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size)495 call_vm_read_overwrite__dst(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size)
496 {
497 	vm_size_t out_size;
498 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
499 	kern_return_t kr = vm_read_overwrite(map, (vm_address_t) src.addr, (vm_size_t) size, (vm_address_t) dst, &out_size);
500 	check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
501 	return kr;
502 }
503 #endif
504 
505 
506 
507 static kern_return_t __unused
call_mach_vm_copy__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)508 call_mach_vm_copy__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
509 {
510 	kern_return_t kr = mach_vm_copy(map, start, size, start_2);
511 	return kr;
512 }
513 
514 static kern_return_t
call_mach_vm_copy__src(MAP_T map,mach_vm_address_t src,mach_vm_size_t size)515 call_mach_vm_copy__src(MAP_T map, mach_vm_address_t src, mach_vm_size_t size)
516 {
517 	allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
518 	kern_return_t kr = mach_vm_copy(map, src, size, dst.addr);
519 	return kr;
520 }
521 
522 static kern_return_t
call_mach_vm_copy__dst(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size)523 call_mach_vm_copy__dst(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size)
524 {
525 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
526 	kern_return_t kr = mach_vm_copy(map, src.addr, size, dst);
527 	return kr;
528 }
529 
530 #if TEST_OLD_STYLE_MACH
531 static kern_return_t __unused
call_vm_copy__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)532 call_vm_copy__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
533 {
534 	kern_return_t kr = vm_copy(map, (vm_address_t) start, (vm_size_t) size, (vm_address_t) start_2);
535 	return kr;
536 }
537 
538 static kern_return_t
call_vm_copy__src(MAP_T map,mach_vm_address_t src,mach_vm_size_t size)539 call_vm_copy__src(MAP_T map, mach_vm_address_t src, mach_vm_size_t size)
540 {
541 	allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
542 	kern_return_t kr = vm_copy(map, (vm_address_t) src, (vm_size_t) size, (vm_address_t) dst.addr);
543 	return kr;
544 }
545 
546 static kern_return_t
call_vm_copy__dst(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size)547 call_vm_copy__dst(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size)
548 {
549 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
550 	kern_return_t kr = vm_copy(map, (vm_address_t) src.addr, (vm_size_t) size, (vm_address_t) dst);
551 	return kr;
552 }
553 #endif
554 
555 static kern_return_t __unused
call_mach_vm_write__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)556 call_mach_vm_write__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
557 {
558 	kern_return_t kr = mach_vm_write(map, start, (vm_offset_t) start_2, (mach_msg_type_number_t) size);
559 	return kr;
560 }
561 
562 static kern_return_t
call_mach_vm_write__src(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)563 call_mach_vm_write__src(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
564 {
565 	allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
566 	kern_return_t kr = mach_vm_write(map, dst.addr, (vm_offset_t) start, (mach_msg_type_number_t) size);
567 	return kr;
568 }
569 
570 static kern_return_t
call_mach_vm_write__dst(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)571 call_mach_vm_write__dst(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
572 {
573 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
574 	kern_return_t kr = mach_vm_write(map, start, (vm_offset_t) src.addr, (mach_msg_type_number_t) size);
575 	return kr;
576 }
577 
578 #if TEST_OLD_STYLE_MACH
579 static kern_return_t __unused
call_vm_write__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)580 call_vm_write__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
581 {
582 	kern_return_t kr = vm_write(map, (vm_address_t) start, (vm_offset_t) start_2, (mach_msg_type_number_t) size);
583 	return kr;
584 }
585 
586 static kern_return_t
call_vm_write__src(MAP_T map,vm_address_t start,vm_size_t size)587 call_vm_write__src(MAP_T map, vm_address_t start, vm_size_t size)
588 {
589 	allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
590 	kern_return_t kr = vm_write(map, (vm_address_t) dst.addr, start, (mach_msg_type_number_t) size);
591 	return kr;
592 }
593 
594 static kern_return_t
call_vm_write__dst(MAP_T map,vm_address_t start,vm_size_t size)595 call_vm_write__dst(MAP_T map, vm_address_t start, vm_size_t size)
596 {
597 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
598 	kern_return_t kr = vm_write(map, start, (vm_offset_t) src.addr, (mach_msg_type_number_t) size);
599 	return kr;
600 }
601 #endif
602 
603 // mach_vm_wire, vm_wire (start/size)
604 // "wire" and "unwire" paths diverge internally; test both
605 #define IMPL(FN, T, FLAVOR, PROT)                                       \
606 	static kern_return_t                                            \
607 	call_ ## FN ## __ ## FLAVOR(MAP_T map, T start, T size)         \
608 	{                                                               \
609 	        mach_port_t host_priv = HOST_PRIV_NULL;                 \
610 	        kern_return_t kr = host_get_host_priv_port(mach_host_self(), &host_priv); \
611 	        assert(kr == 0);  /* host priv port on macOS requires entitlements or root */ \
612 	        kr = FN(host_priv, map, start, size, PROT);             \
613 	        return kr;                                              \
614 	}
615 IMPL(mach_vm_wire, mach_vm_address_t, wire, VM_PROT_READ)
616 IMPL(mach_vm_wire, mach_vm_address_t, unwire, VM_PROT_NONE)
617 // The declaration of vm_wire is buggy on U32.
618 // We compile in our own MIG user stub for it with a "replacement_" prefix.
619 // rdar://118258929
620 IMPL(replacement_vm_wire, mach_vm_address_t, wire, VM_PROT_READ)
621 IMPL(replacement_vm_wire, mach_vm_address_t, unwire, VM_PROT_NONE)
622 #undef IMPL
623 
624 // mach_vm_wire, vm_wire (vm_prot_t)
625 #define IMPL(FN, T)                                                     \
626 	static kern_return_t                                            \
627 	call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot) \
628 	{                                                               \
629 	        mach_port_t host_priv = HOST_PRIV_NULL;                 \
630 	        kern_return_t kr = host_get_host_priv_port(mach_host_self(), &host_priv); \
631 	        assert(kr == 0);  /* host priv port on macOS requires entitlements or root */ \
632 	        kr = FN(host_priv, map, start, size, prot);             \
633 	        return kr;                                              \
634 	}
635 IMPL(mach_vm_wire, mach_vm_address_t)
636 // The declaration of vm_wire is buggy on U32.
637 // We compile in our own MIG user stub for it with a "replacement_" prefix.
638 // rdar://118258929
639 IMPL(replacement_vm_wire, mach_vm_address_t)
640 #undef IMPL
641 
642 
643 // mach_vm_map/vm32_map/vm32_map_64 infra
644 
645 typedef kern_return_t (*map_fn_t)(vm_map_t target_task,
646     mach_vm_address_t *address,
647     mach_vm_size_t size,
648     mach_vm_offset_t mask,
649     int flags,
650     mem_entry_name_port_t object,
651     memory_object_offset_t offset,
652     boolean_t copy,
653     vm_prot_t cur_protection,
654     vm_prot_t max_protection,
655     vm_inherit_t inheritance);
656 
657 static kern_return_t
call_map_fn__allocate_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)658 call_map_fn__allocate_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
659 {
660 	mach_vm_address_t out_addr = start;
661 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
662 	    0, 0, 0, 0, 0, VM_INHERIT_NONE);
663 	// fixed-overwrite with pre-existing allocation, don't deallocate
664 	return kr;
665 }
666 
667 static kern_return_t
call_map_fn__allocate_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)668 call_map_fn__allocate_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
669 {
670 	mach_vm_address_t out_addr = start;
671 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
672 	    0, 0, true, 0, 0, VM_INHERIT_NONE);
673 	// fixed-overwrite with pre-existing allocation, don't deallocate
674 	return kr;
675 }
676 
677 static kern_return_t
call_map_fn__allocate_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)678 call_map_fn__allocate_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
679 {
680 	mach_vm_address_t out_addr = start_hint;
681 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, 0, 0, 0, 0, 0, VM_INHERIT_NONE);
682 	if (kr == 0) {
683 		(void)mach_vm_deallocate(map, out_addr, size);
684 	}
685 	return kr;
686 }
687 
688 static kern_return_t
call_map_fn__memobject_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)689 call_map_fn__memobject_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
690 {
691 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
692 	mach_vm_address_t out_addr = start;
693 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
694 	    memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
695 	(void)mach_port_deallocate(mach_task_self(), memobject);
696 	// fixed-overwrite with pre-existing allocation, don't deallocate
697 	return kr;
698 }
699 
700 static kern_return_t
call_map_fn__memobject_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)701 call_map_fn__memobject_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
702 {
703 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
704 	mach_vm_address_t out_addr = start;
705 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
706 	    memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
707 	(void)mach_port_deallocate(mach_task_self(), memobject);
708 	// fixed-overwrite with pre-existing allocation, don't deallocate
709 	return kr;
710 }
711 
712 static kern_return_t
call_map_fn__memobject_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)713 call_map_fn__memobject_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
714 {
715 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
716 	mach_vm_address_t out_addr = start_hint;
717 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, memobject,
718 	    KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
719 	if (kr == 0) {
720 		(void)mach_vm_deallocate(map, out_addr, size);
721 	}
722 	(void)mach_port_deallocate(mach_task_self(), memobject);
723 	return kr;
724 }
725 
726 static kern_return_t
helper_call_map_fn__memobject__ssoo(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)727 helper_call_map_fn__memobject__ssoo(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
728 {
729 	mach_port_t memobject = make_a_mem_object(obj_size);
730 	mach_vm_address_t out_addr = start;
731 	kern_return_t kr = fn(map, &out_addr, size, 0, flags, memobject,
732 	    offset, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
733 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
734 	(void)mach_port_deallocate(mach_task_self(), memobject);
735 	return kr;
736 }
737 
738 static kern_return_t
call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)739 call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
740 {
741 	return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, offset, obj_size);
742 }
743 
744 static kern_return_t
call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)745 call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
746 {
747 	return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, offset, obj_size);
748 }
749 
750 static kern_return_t
call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)751 call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
752 {
753 	return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_ANYWHERE, false, start, size, offset, obj_size);
754 }
755 
756 static kern_return_t
help_call_map_fn__allocate__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)757 help_call_map_fn__allocate__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
758 {
759 	mach_vm_address_t out_addr = start;
760 	kern_return_t kr = fn(map, &out_addr, size, 0, flags,
761 	    0, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
762 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
763 	return kr;
764 }
765 
766 static kern_return_t
call_map_fn__allocate_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)767 call_map_fn__allocate_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
768 {
769 	return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
770 }
771 
772 static kern_return_t
call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)773 call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
774 {
775 	return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
776 }
777 
778 static kern_return_t
call_map_fn__allocate_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)779 call_map_fn__allocate_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
780 {
781 	return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
782 }
783 
784 static kern_return_t
help_call_map_fn__memobject__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)785 help_call_map_fn__memobject__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
786 {
787 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
788 	mach_vm_address_t out_addr = start;
789 	kern_return_t kr = fn(map, &out_addr, size, 0, flags,
790 	    memobject, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
791 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
792 	(void)mach_port_deallocate(mach_task_self(), memobject);
793 	return kr;
794 }
795 
796 static kern_return_t
call_map_fn__memobject_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)797 call_map_fn__memobject_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
798 {
799 	return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
800 }
801 
802 static kern_return_t
call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)803 call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
804 {
805 	return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
806 }
807 
808 static kern_return_t
call_map_fn__memobject_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)809 call_map_fn__memobject_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
810 {
811 	return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
812 }
813 
814 static kern_return_t
call_map_fn__allocate__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)815 call_map_fn__allocate__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
816 {
817 	kern_return_t kr = fn(map, start, size, 0, flags,
818 	    0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
819 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
820 	return kr;
821 }
822 
823 static kern_return_t
call_map_fn__allocate_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)824 call_map_fn__allocate_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
825 {
826 	kern_return_t kr = fn(map, start, size, 0, flags,
827 	    0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
828 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
829 	return kr;
830 }
831 
832 static kern_return_t
call_map_fn__memobject__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)833 call_map_fn__memobject__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
834 {
835 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
836 	kern_return_t kr = fn(map, start, size, 0, flags,
837 	    memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
838 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
839 	(void)mach_port_deallocate(mach_task_self(), memobject);
840 	return kr;
841 }
842 
843 static kern_return_t
call_map_fn__memobject_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)844 call_map_fn__memobject_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
845 {
846 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
847 	kern_return_t kr = fn(map, start, size, 0, flags,
848 	    memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
849 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
850 	(void)mach_port_deallocate(mach_task_self(), memobject);
851 	return kr;
852 }
853 
854 static kern_return_t
help_call_map_fn__allocate__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)855 help_call_map_fn__allocate__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
856 {
857 	mach_vm_address_t out_addr = 0;
858 	kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
859 	    0, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
860 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
861 	return kr;
862 }
863 
864 static kern_return_t
call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)865 call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
866 {
867 	return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
868 }
869 
870 static kern_return_t
call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)871 call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
872 {
873 	return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
874 }
875 
876 static kern_return_t
call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)877 call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
878 {
879 	return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
880 }
881 
882 static kern_return_t
help_call_map_fn__memobject__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)883 help_call_map_fn__memobject__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
884 {
885 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
886 	mach_vm_address_t out_addr = 0;
887 	kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
888 	    memobject, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
889 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
890 	return kr;
891 }
892 
893 static kern_return_t
call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)894 call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
895 {
896 	return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
897 }
898 
899 static kern_return_t
call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)900 call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
901 {
902 	return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
903 }
904 
905 static kern_return_t
call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)906 call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
907 {
908 	return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
909 }
910 
911 // implementations
912 
913 #define IMPL_MAP_FN_START_SIZE(map_fn, instance)                                                \
914     static kern_return_t                                                                        \
915     call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start, mach_vm_size_t size) \
916     {                                                                                           \
917 	return call_map_fn__ ## instance(map_fn, map, start, size);                             \
918     }
919 
920 #define IMPL_MAP_FN_HINT_SIZE(map_fn, instance)                                                      \
921     static kern_return_t                                                                             \
922     call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size) \
923     {                                                                                                \
924 	return call_map_fn__ ## instance(map_fn, map, start_hint, size);                             \
925     }
926 
927 #define IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, instance)                                                                                                                   \
928     static kern_return_t                                                                                                                                                         \
929     call_ ## map_fn ## __ ## instance ## __start_size_offset_object(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size) \
930     {                                                                                                                                                                            \
931 	return call_map_fn__ ## instance ## __start_size_offset_object(map_fn, map, start, size, offset, obj_size);                                                              \
932     }
933 
934 #define IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, instance)                                                                          \
935     static kern_return_t                                                                                                          \
936     call_ ## map_fn ## __ ## instance ## __inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit) \
937     {                                                                                                                             \
938 	return call_map_fn__ ## instance ## __inherit(map_fn, map, start, size, inherit);                                         \
939     }
940 
941 #define IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, instance)                                                                 \
942     static kern_return_t                                                                                               \
943     call_ ## map_fn ## __ ## instance ## __flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags) \
944     {                                                                                                                  \
945 	return call_map_fn__ ## instance ## __flags(map_fn, map, start, size, flags);                                  \
946     }
947 
948 #define IMPL_MAP_FN_PROT_PAIRS(map_fn, instance)                                               \
949     static kern_return_t                                                                       \
950     call_ ## map_fn ## __ ## instance ## __prot_pairs(MAP_T map, vm_prot_t cur, vm_prot_t max) \
951     {                                                                                          \
952 	return call_map_fn__ ## instance ## __prot_pairs(map_fn, map, cur, max);               \
953     }
954 
955 #define IMPL(map_fn)                                                       \
956 	IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed)                     \
957 	IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed_copy)                \
958 	IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed)                    \
959 	IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed_copy)               \
960 	IMPL_MAP_FN_HINT_SIZE(map_fn, allocate_anywhere)                   \
961 	IMPL_MAP_FN_HINT_SIZE(map_fn, memobject_anywhere)                  \
962 	IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed)      \
963 	IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed_copy) \
964 	IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_anywhere)   \
965 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed)             \
966 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed_copy)        \
967 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_anywhere)          \
968 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed)            \
969 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed_copy)       \
970 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_anywhere)         \
971 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate)                     \
972 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate_copy)                \
973 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject)                    \
974 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject_copy)               \
975 	IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed)                     \
976 	IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed_copy)                \
977 	IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_anywhere)                  \
978 	IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed)                    \
979 	IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed_copy)               \
980 	IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_anywhere)                 \
981 
982 static kern_return_t
mach_vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)983 mach_vm_map_wrapped(vm_map_t target_task,
984     mach_vm_address_t *address,
985     mach_vm_size_t size,
986     mach_vm_offset_t mask,
987     int flags,
988     mem_entry_name_port_t object,
989     memory_object_offset_t offset,
990     boolean_t copy,
991     vm_prot_t cur_protection,
992     vm_prot_t max_protection,
993     vm_inherit_t inheritance)
994 {
995 	mach_vm_address_t addr = *address;
996 	kern_return_t kr = mach_vm_map(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
997 	check_mach_vm_map_outparam_changes(&kr, addr, *address, flags, target_task);
998 	*address = addr;
999 	return kr;
1000 }
IMPL(mach_vm_map_wrapped)1001 IMPL(mach_vm_map_wrapped)
1002 
1003 #if TEST_OLD_STYLE_MACH
1004 static kern_return_t
1005 vm_map_64_retyped(vm_map_t target_task,
1006     mach_vm_address_t *address,
1007     mach_vm_size_t size,
1008     mach_vm_offset_t mask,
1009     int flags,
1010     mem_entry_name_port_t object,
1011     memory_object_offset_t offset,
1012     boolean_t copy,
1013     vm_prot_t cur_protection,
1014     vm_prot_t max_protection,
1015     vm_inherit_t inheritance)
1016 {
1017 	vm_address_t addr = (vm_address_t)*address;
1018 	kern_return_t kr = vm_map_64(target_task, &addr, (vm_size_t)size, (vm_address_t)mask, flags, object, (vm_offset_t)offset, copy, cur_protection, max_protection, inheritance);
1019 	check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1020 	*address = addr;
1021 	return kr;
1022 }
IMPL(vm_map_64_retyped)1023 IMPL(vm_map_64_retyped)
1024 
1025 static kern_return_t
1026 vm_map_retyped(vm_map_t target_task,
1027     mach_vm_address_t *address,
1028     mach_vm_size_t size,
1029     mach_vm_offset_t mask,
1030     int flags,
1031     mem_entry_name_port_t object,
1032     memory_object_offset_t offset,
1033     boolean_t copy,
1034     vm_prot_t cur_protection,
1035     vm_prot_t max_protection,
1036     vm_inherit_t inheritance)
1037 {
1038 	vm_address_t addr = (vm_address_t)*address;
1039 	kern_return_t kr = vm_map(target_task, &addr, (vm_size_t)size, (vm_address_t)mask, flags, object, (vm_offset_t)offset, copy, cur_protection, max_protection, inheritance);
1040 	check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1041 	*address = addr;
1042 	return kr;
1043 }
1044 IMPL(vm_map_retyped)
1045 #endif
1046 
1047 #undef IMPL_MAP_FN_START_SIZE
1048 #undef IMPL_MAP_FN_SIZE
1049 #undef IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT
1050 #undef IMPL_MAP_FN_START_SIZE_INHERIT
1051 #undef IMPL_MAP_FN_START_SIZE_FLAGS
1052 #undef IMPL_MAP_FN_PROT_PAIRS
1053 #undef IMPL
1054 
1055 
1056 // mmap
1057 // Directly calling this symbol lets us hit the syscall directly instead of the libsyscall wrapper.
1058 void *__mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off);
1059 
1060 // We invert MAP_UNIX03 in the flags. This is because by default libsyscall intercepts calls to mmap and adds MAP_UNIX03.
1061 // That means MAP_UNIX03 should be the default for most of our tests, and we should only test without MAP_UNIX03 when we explicitly want to.
1062 void *
mmap_wrapper(void * addr,size_t len,int prot,int flags,int fildes,off_t off)1063 mmap_wrapper(void *addr, size_t len, int prot, int flags, int fildes, off_t off)
1064 {
1065 	flags ^= MAP_UNIX03;
1066 	return __mmap(addr, len, prot, flags, fildes, off);
1067 }
1068 
1069 // Rename the UNIX03 flag for the code below since we're inverting its meaning.
1070 #define MAP_NOT_UNIX03 0x40000
1071 static_assert(MAP_NOT_UNIX03 == MAP_UNIX03, "MAP_UNIX03 value changed");
1072 #undef MAP_UNIX03
1073 #define MAP_UNIX03 dont_use_MAP_UNIX03
1074 
1075 // helpers
1076 
1077 // Return true if security policy disallows unsigned code.
1078 // Some test results are expected to change with this set.
1079 static bool
unsigned_code_is_disallowed(void)1080 unsigned_code_is_disallowed(void)
1081 {
1082 	if (isRosetta()) {
1083 		return false;
1084 	}
1085 
1086 	int out_value = 0;
1087 	size_t io_size = sizeof(out_value);
1088 	if (0 == sysctlbyname("security.mac.amfi.unsigned_code_policy",
1089 	    &out_value, &io_size, NULL, 0)) {
1090 		return out_value;
1091 	}
1092 
1093 	// sysctl not present, assume unsigned code is okay
1094 	return false;
1095 }
1096 
1097 static int
maybe_hide_mmap_failure(int ret,int prot,int fd)1098 maybe_hide_mmap_failure(int ret, int prot, int fd)
1099 {
1100 	// Special case for mmap(PROT_EXEC, fd).
1101 	// When SIP is enabled these get EPERM from mac_file_check_mmap().
1102 	// The golden files record the SIP-disabled values.
1103 	// This special case also allows the test to succeed when SIP
1104 	// is enabled even though the return value isn't the golden one.
1105 	if (ret == EPERM && fd != -1 && (prot & PROT_EXEC) &&
1106 	    unsigned_code_is_disallowed()) {
1107 		return ACCEPTABLE;
1108 	}
1109 	return ret;
1110 }
1111 
1112 static kern_return_t
help_call_mmap__vm_prot(MAP_T map __unused,int flags,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)1113 help_call_mmap__vm_prot(MAP_T map __unused, int flags, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
1114 {
1115 	int fd = -1;
1116 	if (!(flags & MAP_ANON)) {
1117 		fd = get_fd();
1118 	}
1119 	void *rv = mmap_wrapper((void *)start, size, prot, flags, fd, 0);
1120 	if (rv == MAP_FAILED) {
1121 		return maybe_hide_mmap_failure(errno, prot, fd);
1122 	} else {
1123 		assert(0 == munmap_helper(rv, size));
1124 		return 0;
1125 	}
1126 }
1127 
1128 static kern_return_t
help_call_mmap__kernel_flags(MAP_T map __unused,int mmap_flags,mach_vm_address_t start,mach_vm_size_t size,int kernel_flags)1129 help_call_mmap__kernel_flags(MAP_T map __unused, int mmap_flags, mach_vm_address_t start, mach_vm_size_t size, int kernel_flags)
1130 {
1131 	void *rv = mmap_wrapper((void *)start, size, VM_PROT_DEFAULT, mmap_flags, kernel_flags, 0);
1132 	if (rv == MAP_FAILED) {
1133 		return errno;
1134 	} else {
1135 		assert(0 == munmap_helper(rv, size));
1136 		return 0;
1137 	}
1138 }
1139 
1140 static kern_return_t
help_call_mmap__dst_size_fileoff(MAP_T map __unused,int flags,mach_vm_address_t dst,mach_vm_size_t size,mach_vm_address_t fileoff)1141 help_call_mmap__dst_size_fileoff(MAP_T map __unused, int flags, mach_vm_address_t dst, mach_vm_size_t size, mach_vm_address_t fileoff)
1142 {
1143 	int fd = -1;
1144 	if (!(flags & MAP_ANON)) {
1145 		fd = get_fd();
1146 	}
1147 	void *rv = mmap_wrapper((void *)dst, size, VM_PROT_DEFAULT, flags, fd, (off_t)fileoff);
1148 	if (rv == MAP_FAILED) {
1149 		return errno;
1150 	} else {
1151 		assert(0 == munmap_helper(rv, size));
1152 		return 0;
1153 	}
1154 }
1155 
1156 static kern_return_t
help_call_mmap__start_size(MAP_T map __unused,int flags,mach_vm_address_t start,mach_vm_size_t size)1157 help_call_mmap__start_size(MAP_T map __unused, int flags, mach_vm_address_t start, mach_vm_size_t size)
1158 {
1159 	int fd = -1;
1160 	if (!(flags & MAP_ANON)) {
1161 		fd = get_fd();
1162 	}
1163 	void *rv = mmap_wrapper((void *)start, size, VM_PROT_DEFAULT, flags, fd, 0);
1164 	if (rv == MAP_FAILED) {
1165 		return errno;
1166 	} else {
1167 		assert(0 == munmap_helper(rv, size));
1168 		return 0;
1169 	}
1170 }
1171 
1172 static kern_return_t
help_call_mmap__offset_size(MAP_T map __unused,int flags,mach_vm_address_t offset,mach_vm_size_t size)1173 help_call_mmap__offset_size(MAP_T map __unused, int flags, mach_vm_address_t offset, mach_vm_size_t size)
1174 {
1175 	int fd = -1;
1176 	if (!(flags & MAP_ANON)) {
1177 		fd = get_fd();
1178 	}
1179 	void *rv = mmap_wrapper((void *)0, size, VM_PROT_DEFAULT, flags, fd, (off_t)offset);
1180 	if (rv == MAP_FAILED) {
1181 		return errno;
1182 	} else {
1183 		assert(0 == munmap_helper(rv, size));
1184 		return 0;
1185 	}
1186 }
1187 
1188 #define IMPL_ONE_FROM_HELPER(type, variant, flags, ...)                                                                                 \
1189 	static kern_return_t                                                                                                            \
1190 	call_mmap ## __ ## variant ## __ ## type(MAP_T map, mach_vm_address_t start, mach_vm_size_t size DROP_COMMAS(__VA_ARGS__)) {    \
1191 	        return help_call_mmap__ ## type(map, flags, start, size DROP_TYPES(__VA_ARGS__));                                       \
1192 	}
1193 
1194 // call functions
1195 
1196 #define IMPL_FROM_HELPER(type, ...) \
1197 	IMPL_ONE_FROM_HELPER(type, file_private,          MAP_FILE | MAP_PRIVATE,                          ##__VA_ARGS__)  \
1198 	IMPL_ONE_FROM_HELPER(type, anon_private,          MAP_ANON | MAP_PRIVATE,                          ##__VA_ARGS__)  \
1199 	IMPL_ONE_FROM_HELPER(type, file_shared,           MAP_FILE | MAP_SHARED,                           ##__VA_ARGS__)  \
1200 	IMPL_ONE_FROM_HELPER(type, anon_shared,           MAP_ANON | MAP_SHARED,                           ##__VA_ARGS__)  \
1201 	IMPL_ONE_FROM_HELPER(type, file_private_codesign, MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_CODESIGN, ##__VA_ARGS__)  \
1202 	IMPL_ONE_FROM_HELPER(type, file_private_media,    MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_MEDIA,    ##__VA_ARGS__)  \
1203 	IMPL_ONE_FROM_HELPER(type, nounix03_private,      MAP_FILE | MAP_PRIVATE | MAP_NOT_UNIX03,         ##__VA_ARGS__)  \
1204 	IMPL_ONE_FROM_HELPER(type, fixed_private,         MAP_FILE | MAP_PRIVATE | MAP_FIXED,              ##__VA_ARGS__)  \
1205 
IMPL_FROM_HELPER(vm_prot,vm_prot_t,prot)1206 IMPL_FROM_HELPER(vm_prot, vm_prot_t, prot)
1207 IMPL_FROM_HELPER(dst_size_fileoff, mach_vm_address_t, fileoff)
1208 IMPL_FROM_HELPER(start_size)
1209 IMPL_FROM_HELPER(offset_size)
1210 
1211 IMPL_ONE_FROM_HELPER(kernel_flags, anon_private, MAP_ANON | MAP_PRIVATE, int, kernel_flags)
1212 IMPL_ONE_FROM_HELPER(kernel_flags, anon_shared, MAP_ANON | MAP_SHARED, int, kernel_flags)
1213 
1214 static kern_return_t
1215 call_mmap__mmap_flags(MAP_T map __unused, mach_vm_address_t start, mach_vm_size_t size, int mmap_flags)
1216 {
1217 	int fd = -1;
1218 	if (!(mmap_flags & MAP_ANON)) {
1219 		fd = get_fd();
1220 	}
1221 	void *rv = mmap_wrapper((void *)start, size, VM_PROT_DEFAULT, mmap_flags, fd, 0);
1222 	if (rv == MAP_FAILED) {
1223 		return errno;
1224 	} else {
1225 		assert(0 == munmap(rv, size));
1226 		return 0;
1227 	}
1228 }
1229 
1230 // Mach memory entry ownership
1231 
1232 static kern_return_t
call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused,int ledger_tag)1233 call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused, int ledger_tag)
1234 {
1235 	mach_port_t mementry = make_a_mem_entry(TEST_ALLOC_SIZE + 1);
1236 	kern_return_t kr = mach_memory_entry_ownership(mementry, mach_task_self(), ledger_tag, 0);
1237 	(void)mach_port_deallocate(mach_task_self(), mementry);
1238 	return kr;
1239 }
1240 
1241 static kern_return_t
call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused,int ledger_flag)1242 call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused, int ledger_flag)
1243 {
1244 	mach_port_t mementry = make_a_mem_entry(TEST_ALLOC_SIZE + 1);
1245 	kern_return_t kr = mach_memory_entry_ownership(mementry, mach_task_self(), VM_LEDGER_TAG_DEFAULT, ledger_flag);
1246 	(void)mach_port_deallocate(mach_task_self(), mementry);
1247 	return kr;
1248 }
1249 
1250 
1251 // For deallocators like munmap and vm_deallocate.
1252 // Return a non-zero error code if we should avoid performing this trial.
1253 kern_return_t
short_circuit_deallocator(MAP_T map,start_size_trial_t trial)1254 short_circuit_deallocator(MAP_T map, start_size_trial_t trial)
1255 {
1256 	// mach_vm_deallocate(size == 0) is safe
1257 	if (trial.size == 0) {
1258 		return 0;
1259 	}
1260 
1261 	// Allow deallocation attempts based on a valid allocation
1262 	// (assumes the test loop will slide this trial to a valid allocation)
1263 	if (!trial.start_is_absolute && trial.size_is_absolute) {
1264 		return 0;
1265 	}
1266 
1267 	// Avoid overwriting random live memory.
1268 	if (!range_overflows_strict_zero(trial.start, trial.size, VM_MAP_PAGE_MASK(map))) {
1269 		return IGNORED;
1270 	}
1271 
1272 	// Avoid EXC_GUARD if it is still enabled.
1273 	mach_vm_address_t sum;
1274 	if (!__builtin_add_overflow(trial.start, trial.size, &sum) &&
1275 	    trial.start + trial.size != 0 &&
1276 	    round_up_page(trial.start + trial.size, PAGE_SIZE) == 0) {
1277 		// this case provokes EXC_GUARD
1278 		if (EXC_GUARD_ENABLED) {
1279 			return GUARD;
1280 		}
1281 	}
1282 
1283 	// Allow.
1284 	return 0;
1285 }
1286 
1287 static kern_return_t
call_mach_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1288 call_mach_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1289 {
1290 	kern_return_t kr = mach_vm_deallocate(map, start, size);
1291 	return kr;
1292 }
1293 
1294 static kern_return_t
call_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1295 call_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1296 {
1297 	kern_return_t kr = vm_deallocate(map, (vm_address_t) start, (vm_size_t) size);
1298 	return kr;
1299 }
1300 
1301 
1302 static kern_return_t
call_mach_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1303 call_mach_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1304 {
1305 	mach_vm_address_t saved_start = *start;
1306 	kern_return_t kr = mach_vm_allocate(map, start, size, flags);
1307 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
1308 	return kr;
1309 }
1310 
1311 
1312 static kern_return_t
call_mach_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)1313 call_mach_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
1314 {
1315 	mach_vm_address_t saved_start = *start;
1316 	kern_return_t kr = mach_vm_allocate(map, start, size, VM_FLAGS_FIXED);
1317 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
1318 	return kr;
1319 }
1320 
1321 static kern_return_t
call_mach_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)1322 call_mach_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
1323 {
1324 	mach_vm_address_t saved_start = *start;
1325 	kern_return_t kr = mach_vm_allocate(map, start, size, VM_FLAGS_ANYWHERE);
1326 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
1327 	return kr;
1328 }
1329 
1330 static results_t *
test_mach_allocated_with_vm_inherit_t(kern_return_t (* func)(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t flags),const char * testname)1331 test_mach_allocated_with_vm_inherit_t(kern_return_t (*func)(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t flags), const char * testname)
1332 {
1333 	MAP_T map SMART_MAP;
1334 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
1335 	vm_inherit_trials_t * trials SMART_VM_INHERIT_TRIALS();
1336 	results_t *results = alloc_results(testname, trials->count);
1337 
1338 	for (unsigned i = 0; i < trials->count; i++) {
1339 		int ret = func(map, base.addr, base.size, trials->list[i].value);
1340 		append_result(results, ret, trials->list[i].name);
1341 	}
1342 	return results;
1343 }
1344 
1345 
1346 static results_t *
test_unix_allocated_with_vm_inherit_t(kern_return_t (* func)(mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t flags),const char * testname)1347 test_unix_allocated_with_vm_inherit_t(kern_return_t (*func)(mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t flags), const char * testname)
1348 {
1349 	MAP_T map SMART_MAP;
1350 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
1351 	vm_inherit_trials_t * trials SMART_VM_INHERIT_TRIALS();
1352 	results_t *results = alloc_results(testname, trials->count);
1353 
1354 	for (unsigned i = 0; i < trials->count; i++) {
1355 		int ret = func(base.addr, base.size, trials->list[i].value);
1356 		append_result(results, ret, trials->list[i].name);
1357 	}
1358 	return results;
1359 }
1360 
1361 static task_exc_guard_behavior_t saved_exc_guard_behavior;
1362 
1363 static void
disable_exc_guard()1364 disable_exc_guard()
1365 {
1366 	T_SETUPBEGIN;
1367 
1368 	// Disable EXC_GUARD for the duration of the test.
1369 	// We restore it at the end.
1370 	kern_return_t kr = task_get_exc_guard_behavior(mach_task_self(), &saved_exc_guard_behavior);
1371 	assert(kr == 0);
1372 
1373 	kr = task_set_exc_guard_behavior(mach_task_self(), TASK_EXC_GUARD_NONE);
1374 	if (kr) {
1375 		T_LOG("warning, couldn't disable EXC_GUARD; some tests are disabled");
1376 		EXC_GUARD_ENABLED = true;
1377 	} else {
1378 		EXC_GUARD_ENABLED = false;
1379 	}
1380 
1381 	T_SETUPEND;
1382 }
1383 
1384 static void
restore_exc_guard()1385 restore_exc_guard()
1386 {
1387 	// restore process's EXC_GUARD handling
1388 	(void)task_set_exc_guard_behavior(mach_task_self(), saved_exc_guard_behavior);
1389 }
1390 
1391 static int
set_disable_vm_sanitize_telemetry_via_sysctl(uint32_t val)1392 set_disable_vm_sanitize_telemetry_via_sysctl(uint32_t val)
1393 {
1394 	int ret = sysctlbyname("debug.disable_vm_sanitize_telemetry", NULL, NULL, &val, sizeof(uint32_t));
1395 	if (ret != 0) {
1396 		printf("sysctl failed with errno %d.\n", errno);
1397 	}
1398 	return ret;
1399 }
1400 
1401 static int
disable_vm_sanitize_telemetry(void)1402 disable_vm_sanitize_telemetry(void)
1403 {
1404 	return set_disable_vm_sanitize_telemetry_via_sysctl(1);
1405 }
1406 
1407 static int
reenable_vm_sanitize_telemetry(void)1408 reenable_vm_sanitize_telemetry(void)
1409 {
1410 	return set_disable_vm_sanitize_telemetry_via_sysctl(0);
1411 }
1412 
1413 #define MAX_LINE_LENGTH 100
1414 #define MAX_NUM_TESTS 350
1415 #define GOLDEN_FILES_VERSION "vm_parameter_validation_golden_images_168d625.tar.xz"
1416 #define TMP_DIR "/tmp/"
1417 #define ASSETS_DIR "../assets/vm_parameter_validation/"
1418 #define DECOMPRESS ASSETS_DIR "decompress.sh"
1419 #define GOLDEN_FILE TMP_DIR "user_golden_image.log"
1420 
1421 #define KERN_GOLDEN_FILE TMP_DIR "kern_golden_image.log"
1422 #define KERN_MAX_UNKNOWN_TEST_RESULTS    64
1423 
1424 results_t *golden_list[MAX_NUM_TESTS];
1425 results_t *kern_list[MAX_NUM_TESTS];
1426 
1427 // Read results written by dump_golden_results().
1428 static int
populate_golden_results(const char * filename)1429 populate_golden_results(const char *filename)
1430 {
1431 	FILE *file;
1432 	char line[MAX_LINE_LENGTH];
1433 	results_t *results = NULL;
1434 	uint32_t num_results = 0;
1435 	uint32_t result_number = 0;
1436 	int result_ret = 0;
1437 	char *test_name = NULL;
1438 	char *sub_line = NULL;
1439 	char *s_num_results = NULL;
1440 	bool in_test = FALSE;
1441 
1442 	// cd to the directory containing this executable
1443 	// Test files are located relative to there.
1444 	uint32_t exesize = 0;
1445 	_NSGetExecutablePath(NULL, &exesize);
1446 	char *exe = malloc(exesize);
1447 	_NSGetExecutablePath(exe, &exesize);
1448 	char *dir = dirname(exe);
1449 	chdir(dir);
1450 	free(exe);
1451 
1452 	file = fopen(filename, "r");
1453 	if (file == NULL) {
1454 		T_LOG("Could not open file %s\n", filename);
1455 		return 1;
1456 	}
1457 
1458 	// Read file line by line
1459 	while (fgets(line, MAX_LINE_LENGTH, file) != NULL) {
1460 		// Check if the line starts with "TESTNAME" or "RESULT COUNT"
1461 		if (strncmp(line, TESTNAME_DELIMITER, strlen(TESTNAME_DELIMITER)) == 0) {
1462 			// remove the newline char
1463 			line[strcspn(line, "\r")] = 0;
1464 			sub_line = line + strlen(TESTNAME_DELIMITER);
1465 			test_name = strdup(sub_line);
1466 			// T_LOG("TESTNAME %u : %s", num_tests, test_name);
1467 			in_test = TRUE;
1468 		} else if (in_test && strncmp(line, RESULTCOUNT_DELIMITER, strlen(RESULTCOUNT_DELIMITER)) == 0) {
1469 			assert(num_tests < MAX_NUM_TESTS);
1470 			s_num_results = line + strlen(RESULTCOUNT_DELIMITER);
1471 			num_results = (uint32_t)strtoul(s_num_results, NULL, 10);
1472 			results = alloc_results(test_name, num_results);
1473 			results->count = num_results;
1474 			golden_list[num_tests++] = results;
1475 			// T_LOG("num_tests %u, testname %s, count: %u", num_tests, results->testname, results->count);
1476 		} else if (in_test && strncmp(line, TESTRESULT_DELIMITER, strlen(TESTRESULT_DELIMITER)) == 0) {
1477 			// T_LOG("checking: %s\n", line);
1478 			sscanf(line, "%d: %d", &result_number, &result_ret);
1479 			assert(result_number < num_results);
1480 			// T_LOG("\tresult #%u: %d\n", result_number, result_ret);
1481 			results->list[result_number] = (result_t){.ret = result_ret};
1482 		} else {
1483 			// T_LOG("Unknown line: %s\n", line);
1484 			in_test = FALSE;
1485 		}
1486 	}
1487 
1488 	fclose(file);
1489 
1490 	dump_golden_list();
1491 
1492 	return 0;
1493 }
1494 
1495 static void
clean_golden_results()1496 clean_golden_results()
1497 {
1498 	for (uint32_t x = 0; x < num_tests; ++x) {
1499 		dealloc_results(golden_list[x]);
1500 		golden_list[x] = NULL;
1501 	}
1502 }
1503 
1504 static void
clean_kernel_results()1505 clean_kernel_results()
1506 {
1507 	for (uint32_t x = 0; x < num_kern_tests; ++x) {
1508 		dealloc_results(kern_list[x]);
1509 		kern_list[x] = NULL;
1510 	}
1511 }
1512 
1513 // Verbose output in dump_results, controlled by DUMP_RESULTS env.
1514 bool dump = FALSE;
1515 // Output to create a golden test result, controlled by GENERATE_GOLDEN_IMAGE.
1516 bool generate_golden = FALSE;
1517 // Run tests as tests (i.e. emit TS_{PASS/FAIL}), enabled unless golden image generation is true.
1518 bool test_results =  TRUE;
1519 
1520 T_DECL(vm_parameter_validation_user,
1521     "parameter validation for userspace calls",
1522     T_META_SPAWN_TOOL(DECOMPRESS),
1523     T_META_SPAWN_TOOL_ARG("user"),
1524     T_META_SPAWN_TOOL_ARG(TMP_DIR),
1525     T_META_SPAWN_TOOL_ARG(GOLDEN_FILES_VERSION)
1526     )
1527 {
1528 	if (disable_vm_sanitize_telemetry() != 0) {
1529 		T_FAIL("Could not disable VM API telemetry. Bailing out early.");
1530 		return;
1531 	}
1532 
1533 	read_env();
1534 
1535 	T_LOG("dump %d, golden %d, test %d\n", dump, generate_golden, test_results);
1536 
1537 	if (generate_golden && unsigned_code_is_disallowed()) {
1538 		// Some test results change when SIP is enabled.
1539 		// Golden files must record the SIP-disabled values.
1540 		T_FAIL("Can't generate golden files with SIP enabled. Disable SIP and try again.\n");
1541 		return;
1542 	}
1543 
1544 	if (test_results && populate_golden_results(GOLDEN_FILE)) {
1545 		// bail out early, couldn't load golden test results
1546 		T_FAIL("Could not open golden file '%s'\n", GOLDEN_FILE);
1547 		return;
1548 	}
1549 
1550 	set_up_guard_page();
1551 
1552 	disable_exc_guard();
1553 
1554 	/*
1555 	 * Group 1: memory entry
1556 	 */
1557 
1558 	// Mach start/size with both old-style and new-style types
1559 	// (co-located so old and new can be compared more easily)
1560 #define RUN_NEW(fn, name) dealloc_results(dump_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
1561 #if TEST_OLD_STYLE_MACH
1562 #define RUN_OLD(fn, name) dealloc_results(dump_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
1563 #define RUN_OLD64(fn, name) RUN_NEW(fn, name)
1564 #else
1565 #define RUN_OLD(fn, name) do {} while (0)
1566 #define RUN_OLD64(fn, name) do {} while (0)
1567 #endif
1568 	// mach_make_memory_entry has up to three entry points on U32, unlike other functions that have two
1569 	RUN_NEW(call_mach_make_memory_entry_64__start_size__copy, "mach_make_memory_entry_64 (copy)");
1570 	RUN_OLD(call_mach_make_memory_entry__start_size__copy, "mach_make_memory_entry (copy)");
1571 	RUN_OLD64(call__mach_make_memory_entry__start_size__copy, "_mach_make_memory_entry (copy)");
1572 	RUN_NEW(call_mach_make_memory_entry_64__start_size__memonly, "mach_make_memory_entry_64 (mem_only)");
1573 	RUN_OLD(call_mach_make_memory_entry__start_size__memonly, "mach_make_memory_entry (mem_only)");
1574 	RUN_OLD64(call__mach_make_memory_entry__start_size__memonly, "_mach_make_memory_entry (mem_only)");
1575 	RUN_NEW(call_mach_make_memory_entry_64__start_size__namedcreate, "mach_make_memory_entry_64 (named_create)");
1576 	RUN_OLD(call_mach_make_memory_entry__start_size__namedcreate, "mach_make_memory_entry (named_create)");
1577 	RUN_OLD64(call__mach_make_memory_entry__start_size__namedcreate, "_mach_make_memory_entry (named_create)");
1578 	RUN_NEW(call_mach_make_memory_entry_64__start_size__share, "mach_make_memory_entry_64 (share)");
1579 	RUN_OLD(call_mach_make_memory_entry__start_size__share, "mach_make_memory_entry (share)");
1580 	RUN_OLD64(call__mach_make_memory_entry__start_size__share, "_mach_make_memory_entry (share)");
1581 	RUN_NEW(call_mach_make_memory_entry_64__start_size__namedreuse, "mach_make_memory_entry_64 (named_reuse)");
1582 	RUN_OLD(call_mach_make_memory_entry__start_size__namedreuse, "mach_make_memory_entry (named_reuse)");
1583 	RUN_OLD64(call__mach_make_memory_entry__start_size__namedreuse, "_mach_make_memory_entry (named_reuse)");
1584 #undef RUN_NEW
1585 #undef RUN_OLD
1586 #undef RUN_OLD64
1587 
1588 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_size(fn, name " (size)")))
1589 	RUN(call_mach_memory_object_memory_entry_64__size, "mach_memory_object_memory_entry_64");
1590 	RUN(call_replacement_mach_memory_object_memory_entry__size, "mach_memory_object_memory_entry");
1591 #undef RUN
1592 
1593 #define RUN_NEW(fn, name) dealloc_results(dump_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
1594 #define RUN_OLD(fn, name) dealloc_results(dump_results(test_oldmach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
1595 #define RUN_OLD64(fn, name) RUN_NEW(fn, name)
1596 
1597 	RUN_NEW(call_mach_make_memory_entry_64__vm_prot, "mach_make_memory_entry_64");
1598 #if TEST_OLD_STYLE_MACH
1599 	RUN_OLD(call_mach_make_memory_entry__vm_prot, "mach_make_memory_entry");
1600 	RUN_OLD64(call__mach_make_memory_entry__vm_prot, "_mach_make_memory_entry");
1601 #endif
1602 
1603 #undef RUN_NEW
1604 #undef RUN_OLD
1605 #undef RUN_OLD64
1606 
1607 #define RUN(fn, name) dealloc_results(dump_results(test_mach_vm_prot(fn, name " (vm_prot_t)")))
1608 	RUN(call_mach_memory_object_memory_entry_64__vm_prot, "mach_memory_object_memory_entry_64");
1609 	RUN(call_replacement_mach_memory_object_memory_entry__vm_prot, "mach_memory_object_memory_entry");
1610 #undef RUN
1611 
1612 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_ledger_tag(fn, name " (ledger tag)")))
1613 	RUN(call_mach_memory_entry_ownership__ledger_tag, "mach_memory_entry_ownership");
1614 #undef RUN
1615 
1616 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_ledger_flag(fn, name " (ledger flag)")))
1617 	RUN(call_mach_memory_entry_ownership__ledger_flag, "mach_memory_entry_ownership");
1618 #undef RUN
1619 
1620 	/*
1621 	 * Group 2: allocate/deallocate
1622 	 */
1623 
1624 #define RUN(fn, name) dealloc_results(dump_results(test_mach_allocation_func_with_start_size(fn, name)))
1625 	RUN(call_mach_vm_allocate__start_size_fixed, "mach_vm_allocate (fixed) (realigned start/size)");
1626 	RUN(call_mach_vm_allocate__start_size_anywhere, "mach_vm_allocate (anywhere) (hint/size)");
1627 #undef RUN
1628 
1629 #define RUN(fn, name) dealloc_results(dump_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1630 	RUN(call_mach_vm_allocate__flags, "mach_vm_allocate");
1631 #undef RUN
1632 
1633 	dealloc_results(dump_results(test_deallocator(call_mach_vm_deallocate, "mach_vm_deallocate (start/size)")));
1634 #if TEST_OLD_STYLE_MACH
1635 	dealloc_results(dump_results(test_deallocator(call_vm_deallocate, "vm_deallocate (start/size)")));
1636 #endif
1637 
1638 #define RUN(fn, name) dealloc_results(dump_results(test_deallocator(fn, name " (start/size)")))
1639 	RUN(call_munmap, "munmap");
1640 #undef RUN
1641 
1642 	/*
1643 	 * Group 3: map/unmap
1644 	 */
1645 
1646 	// map tests
1647 
1648 #define RUN_START_SIZE(fn, name) dealloc_results(dump_results(test_mach_with_allocated_start_size(fn, name " (realigned start/size)")))
1649 #define RUN_HINT_SIZE(fn, name) dealloc_results(dump_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
1650 #define RUN_PROT_PAIR(fn, name) dealloc_results(dump_results(test_mach_vm_prot_pair(fn, name " (prot_pairs)")))
1651 #define RUN_INHERIT(fn, name) dealloc_results(dump_results(test_mach_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
1652 #define RUN_FLAGS(fn, name) dealloc_results(dump_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1653 #define RUN_SSOO(fn, name) dealloc_results(dump_results(test_mach_with_start_size_offset_object(fn, name " (start/size/offset/object)")))
1654 
1655 #define RUN_ALL(fn, name)     \
1656 	RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)");   \
1657 	RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)");  \
1658 	RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)");  \
1659 	RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
1660 	RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)");  \
1661 	RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)");  \
1662 	RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)");  \
1663 	RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)");  \
1664 	RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)");  \
1665 	RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)");  \
1666 	RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)");  \
1667 	RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)");  \
1668 	RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)");  \
1669 	RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)");  \
1670 	RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)");  \
1671 	RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)");  \
1672 	RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)");  \
1673 	RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)");  \
1674 	RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)");  \
1675 	RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)");  \
1676 	RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)");  \
1677 	RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)");  \
1678 	RUN_SSOO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)");  \
1679 	RUN_SSOO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)");  \
1680 	RUN_SSOO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)");  \
1681 
1682 	RUN_ALL(mach_vm_map_wrapped, mach_vm_map);
1683 #if TEST_OLD_STYLE_MACH
1684 	RUN_ALL(vm_map_64_retyped, vm_map_64);
1685 	RUN_ALL(vm_map_retyped, vm_map);
1686 #endif
1687 
1688 #undef RUN_ALL
1689 #undef RUN_START_SIZE
1690 #undef RUN_HINT_SIZE
1691 #undef RUN_PROT_PAIR
1692 #undef RUN_INHERIT
1693 #undef RUN_FLAGS
1694 #undef RUN_SSOO
1695 
1696 	// remap tests
1697 
1698 #define FN_NAME(fn, variant, type) call_ ## fn ## __  ## variant ## __ ## type
1699 #define RUN_HELPER(harness, fn, variant, type, type_name, name) dealloc_results(dump_results(harness(FN_NAME(fn, variant, type), #name " (" #variant ") (" type_name ")")))
1700 #define RUN_SRC_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, src_size, type_name, name)
1701 #define RUN_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, dst_size, type_name, name)
1702 #define RUN_PROT_PAIRS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_prot_pair, fn, variant, prot_pairs, "prot_pairs", name)
1703 #define RUN_INHERIT(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_inherit_t, fn, variant, inherit, "inherit", name)
1704 #define RUN_FLAGS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_map_kernel_flags_t, fn, variant, flags, "flags", name)
1705 #define RUN_SRC_DST_SIZE(fn, dst, variant, type_name, name) RUN_HELPER(test_allocated_src_##dst##_dst_size, fn, variant, src_dst_size, type_name, name)
1706 
1707 #define RUN_ALL(fn, realigned, name)                                    \
1708 	RUN_SRC_SIZE(fn, copy, realigned "src/size", name);             \
1709 	RUN_SRC_SIZE(fn, nocopy, realigned "src/size", name);           \
1710 	RUN_DST_SIZE(fn, fixed, "realigned dst/size", name);            \
1711 	RUN_DST_SIZE(fn, fixed_copy, "realigned dst/size", name);       \
1712 	RUN_DST_SIZE(fn, anywhere, "hint/size", name);                  \
1713 	RUN_INHERIT(fn, fixed, name);                                   \
1714 	RUN_INHERIT(fn, fixed_copy, name);                              \
1715 	RUN_INHERIT(fn, anywhere, name);                                \
1716 	RUN_FLAGS(fn, nocopy, name);                                    \
1717 	RUN_FLAGS(fn, copy, name);                                      \
1718 	RUN_PROT_PAIRS(fn, fixed, name);                                \
1719 	RUN_PROT_PAIRS(fn, fixed_copy, name);                           \
1720 	RUN_PROT_PAIRS(fn, anywhere, name);                             \
1721 	RUN_SRC_DST_SIZE(fn, allocated, fixed, "src/dst/size", name);   \
1722 	RUN_SRC_DST_SIZE(fn, allocated, fixed_copy, "src/dst/size", name); \
1723 	RUN_SRC_DST_SIZE(fn, unallocated, anywhere, "src/dst/size", name); \
1724 
1725 	RUN_ALL(mach_vm_remap_user, "realigned ", mach_vm_remap);
1726 	RUN_ALL(mach_vm_remap_new_user, , mach_vm_remap_new);
1727 
1728 #if TEST_OLD_STYLE_MACH
1729 	RUN_ALL(vm_remap_retyped, "realigned ", vm_remap);
1730 #endif
1731 
1732 #undef RUN_ALL
1733 #undef RUN_HELPER
1734 #undef RUN_SRC_SIZE
1735 #undef RUN_DST_SIZE
1736 #undef RUN_PROT_PAIRS
1737 #undef RUN_INHERIT
1738 #undef RUN_FLAGS
1739 #undef RUN_SRC_DST_SIZE
1740 
1741 	// mmap tests
1742 
1743 #define RUN(fn, name) dealloc_results(dump_results(test_mmap_with_allocated_vm_map_kernel_flags_t(fn, name " (kernel flags)")))
1744 	RUN(call_mmap__anon_private__kernel_flags, "mmap (anon private)");
1745 	RUN(call_mmap__anon_shared__kernel_flags, "mmap (anon shared)");
1746 #undef RUN
1747 
1748 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_allocated_mmap_flags(fn, name " (mmap flags)")))
1749 	RUN(call_mmap__mmap_flags, "mmap");
1750 #undef RUN
1751 
1752 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
1753 	RUN(call_mmap__file_private__start_size, "mmap (file private)");
1754 	RUN(call_mmap__anon_private__start_size, "mmap (anon private)");
1755 	RUN(call_mmap__file_shared__start_size, "mmap (file shared)");
1756 	RUN(call_mmap__anon_shared__start_size, "mmap (anon shared)");
1757 	RUN(call_mmap__file_private_codesign__start_size, "mmap (file private codesign)");
1758 	RUN(call_mmap__file_private_media__start_size, "mmap (file private media)");
1759 	RUN(call_mmap__nounix03_private__start_size, "mmap (no unix03)");
1760 #undef RUN
1761 
1762 #define RUN(fn, name) dealloc_results(dump_results(test_fixed_dst_size(fn, name " (dst/size)")))
1763 	RUN(call_mmap__fixed_private__start_size, "mmap (fixed)");
1764 #undef RUN
1765 
1766 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_allocated_start_size(fn, name " (offset/size)")))
1767 	RUN(call_mmap__file_private__offset_size, "mmap (file private)");
1768 	RUN(call_mmap__anon_private__offset_size, "mmap (anon private)");
1769 	RUN(call_mmap__file_shared__offset_size, "mmap (file shared)");
1770 	RUN(call_mmap__anon_shared__offset_size, "mmap (anon shared)");
1771 	RUN(call_mmap__file_private_codesign__offset_size, "mmap (file private codesign)");
1772 	RUN(call_mmap__file_private_media__offset_size, "mmap (file private media)");
1773 	RUN(call_mmap__nounix03_private__offset_size, "mmap (no unix03)");
1774 #undef RUN
1775 
1776 #define RUN(fn, name) dealloc_results(dump_results(test_dst_size_fileoff(fn, name " (hint/size/fileoff)")))
1777 	RUN(call_mmap__file_private__dst_size_fileoff, "mmap (file private)");
1778 	RUN(call_mmap__anon_private__dst_size_fileoff, "mmap (anon private)");
1779 	RUN(call_mmap__file_shared__dst_size_fileoff, "mmap (file shared)");
1780 	RUN(call_mmap__anon_shared__dst_size_fileoff, "mmap (anon shared)");
1781 	RUN(call_mmap__file_private_codesign__dst_size_fileoff, "mmap (file private codesign)");
1782 	RUN(call_mmap__file_private_media__dst_size_fileoff, "mmap (file private media)");
1783 	RUN(call_mmap__nounix03_private__dst_size_fileoff, "mmap (no unix03)");
1784 #undef RUN
1785 
1786 #define RUN(fn, name) dealloc_results(dump_results(test_fixed_dst_size_fileoff(fn, name " (dst/size/fileoff)")))
1787 	RUN(call_mmap__fixed_private__dst_size_fileoff, "mmap (fixed)");
1788 #undef RUN
1789 
1790 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
1791 	RUN(call_mmap__file_private__vm_prot, "mmap (file private)");
1792 	RUN(call_mmap__anon_private__vm_prot, "mmap (anon private)");
1793 	RUN(call_mmap__file_shared__vm_prot, "mmap (file shared)");
1794 	RUN(call_mmap__anon_shared__vm_prot, "mmap (anon shared)");
1795 	RUN(call_mmap__file_private_codesign__vm_prot, "mmap (file private codesign)");
1796 	RUN(call_mmap__file_private_media__vm_prot, "mmap (file private media)");
1797 	RUN(call_mmap__nounix03_private__vm_prot, "mmap (no unix03)");
1798 	RUN(call_mmap__fixed_private__vm_prot, "mmap (fixed)");
1799 #undef RUN
1800 
1801 #define RUN(fn, name) dealloc_results(dump_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
1802 	RUN(call_mremap_encrypted, "mremap_encrypted");
1803 #undef RUN
1804 
1805 	/*
1806 	 * Group 4: wire/unwire
1807 	 */
1808 
1809 #define RUN(fn, name) dealloc_results(dump_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
1810 	RUN(call_mlock, "mlock");
1811 	RUN(call_munlock, "munlock");
1812 #undef RUN
1813 
1814 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
1815 	RUN(call_mach_vm_wire__wire, "mach_vm_wire (wire)");
1816 	RUN(call_replacement_vm_wire__wire, "vm_wire (wire)");
1817 	RUN(call_mach_vm_wire__unwire, "mach_vm_wire (unwire)");
1818 	RUN(call_replacement_vm_wire__unwire, "vm_wire (unwire)");
1819 #undef RUN
1820 
1821 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
1822 	RUN(call_mach_vm_wire__vm_prot, "mach_vm_wire");
1823 	RUN(call_replacement_vm_wire__vm_prot, "vm_wire");
1824 #undef RUN
1825 
1826 	/*
1827 	 * Group 5: copyin/copyout
1828 	 */
1829 
1830 #define RUN_NEW(fn, name) dealloc_results(dump_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
1831 #if TEST_OLD_STYLE_MACH
1832 #define RUN_OLD(fn, name) dealloc_results(dump_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
1833 #else
1834 #define RUN_OLD(fn, name) do {} while (0)
1835 #endif
1836 	RUN_NEW(call_mach_vm_read, "mach_vm_read");
1837 	RUN_OLD(call_vm_read, "vm_read");
1838 	RUN_NEW(call_mach_vm_read_list, "mach_vm_read_list");
1839 	RUN_OLD(call_vm_read_list, "vm_read_list");
1840 
1841 	RUN_NEW(call_mach_vm_read_overwrite__src, "mach_vm_read_overwrite (src)");
1842 	RUN_NEW(call_mach_vm_read_overwrite__dst, "mach_vm_read_overwrite (dst)");
1843 	RUN_OLD(call_vm_read_overwrite__src, "vm_read_overwrite (src)");
1844 	RUN_OLD(call_vm_read_overwrite__dst, "vm_read_overwrite (dst)");
1845 
1846 	RUN_NEW(call_mach_vm_write__src, "mach_vm_write (src)");
1847 	RUN_NEW(call_mach_vm_write__dst, "mach_vm_write (dst)");
1848 	RUN_OLD(call_vm_write__src, "vm_write (src)");
1849 	RUN_OLD(call_vm_write__dst, "vm_write (dst)");
1850 
1851 	RUN_NEW(call_mach_vm_copy__src, "mach_vm_copy (src)");
1852 	RUN_NEW(call_mach_vm_copy__dst, "mach_vm_copy (dst)");
1853 	RUN_OLD(call_vm_copy__src, "vm_copy (src)");
1854 	RUN_OLD(call_vm_copy__dst, "vm_copy (dst)");
1855 #undef RUN_NEW
1856 #undef RUN_OLD
1857 
1858 	restore_exc_guard();
1859 
1860 	if (test_results) {
1861 		clean_golden_results();
1862 	}
1863 
1864 	if (reenable_vm_sanitize_telemetry() != 0) {
1865 		T_FAIL("Failed to reenable VM API telemetry.");
1866 		return;
1867 	}
1868 
1869 	T_PASS("vm parameter validation userspace");
1870 }
1871 
1872 
1873 /////////////////////////////////////////////////////
1874 // Kernel test invocation.
1875 // The actual test code is in:
1876 // osfmk/tests/vm_parameter_validation_kern.c
1877 
1878 #define KERN_RESULT_DELIMITER "\n"
1879 
1880 // Read results written by __dump_results()
1881 static int
populate_kernel_results(char * kern_buffer)1882 populate_kernel_results(char *kern_buffer)
1883 {
1884 	char *line = NULL;
1885 	char *sub_line = NULL;
1886 	char *test_name = NULL;
1887 	char *result_name = NULL;
1888 	char *token = NULL;
1889 	results_t *kern_results = NULL;
1890 	uint32_t num_kern_results = 0;
1891 	uint32_t result_number = 0;
1892 	int result_ret = 0;
1893 	bool in_test = FALSE;
1894 
1895 	line = strtok(kern_buffer, KERN_RESULT_DELIMITER);
1896 	while (line != NULL) {
1897 		if (strncmp(line, TESTNAME_DELIMITER, strlen(TESTNAME_DELIMITER)) == 0) {
1898 			sub_line = line + strlen(TESTNAME_DELIMITER);
1899 			test_name = strdup(sub_line);
1900 			// Some test trials are up to 614656 combinations, use count from golden list if possible.
1901 			// Otherwise just get a small number of them (full results can be printed with DUMP=1)
1902 			num_kern_results = KERN_MAX_UNKNOWN_TEST_RESULTS;
1903 			results_t *golden_result = test_name_to_golden_results(test_name);
1904 			if (golden_result) {
1905 				num_kern_results = golden_result->count;
1906 			} else {
1907 				T_LOG("kern %s not found in golden list\n", test_name);
1908 			}
1909 			kern_results = alloc_results(test_name, NULL, num_kern_results);
1910 			kern_results->count = num_kern_results;
1911 			kern_list[num_kern_tests++] = kern_results;
1912 			result_number = 0;
1913 			in_test = TRUE;
1914 		} else if (in_test && strncmp(line, TESTCONFIG_DELIMITER, strlen(TESTCONFIG_DELIMITER)) == 0) {
1915 			assert(kern_results->testconfig == NULL);
1916 			sub_line = line + strlen(TESTCONFIG_DELIMITER);
1917 			kern_results->testconfig = strdup(sub_line);
1918 		} else if (in_test && strstr(line, KERN_TESTRESULT_DELIMITER)) {
1919 			// should have found TESTCONFIG already
1920 			assert(kern_results->testconfig != NULL);
1921 			sscanf(line, KERN_TESTRESULT_DELIMITER "%d", &result_ret);
1922 			// get result name (comes after the first ,)
1923 			token = strchr(line, ',');
1924 			if (token) {
1925 				token = token + 2; // skip the , and the extra space
1926 				result_name = strdup(token);
1927 				if (result_number >= num_kern_results) {
1928 					T_LOG("\tKERN Recreate Golden List? skipping result %d - %s from test %s\n", result_ret, result_name, test_name);
1929 					free(result_name);
1930 				} else {
1931 					kern_results->list[result_number++] = (result_t){.ret = result_ret, .name = result_name};
1932 				}
1933 			}
1934 		} else {
1935 			// T_LOG("Unknown kernel result line: %s\n", line);
1936 			//in_test = FALSE;
1937 		}
1938 
1939 		line = strtok(NULL, KERN_RESULT_DELIMITER);
1940 	}
1941 
1942 	dump_kernel_results_list();
1943 
1944 	return 0;
1945 }
1946 
1947 static int64_t
run_sysctl_test(const char * t,int64_t value)1948 run_sysctl_test(const char *t, int64_t value)
1949 {
1950 	char name[1024];
1951 	int64_t result = 0;
1952 	size_t s = sizeof(value);
1953 	int rc;
1954 
1955 	snprintf(name, sizeof(name), "debug.test.%s", t);
1956 	rc = sysctlbyname(name, &result, &s, &value, s);
1957 	T_QUIET; T_ASSERT_POSIX_SUCCESS(rc, "sysctlbyname(%s)", t);
1958 	return result;
1959 }
1960 
1961 T_DECL(vm_parameter_validation_kern,
1962     "parameter validation for kext/xnu calls",
1963     T_META_SPAWN_TOOL(DECOMPRESS),
1964     T_META_SPAWN_TOOL_ARG("kern"),
1965     T_META_SPAWN_TOOL_ARG(TMP_DIR),
1966     T_META_SPAWN_TOOL_ARG(GOLDEN_FILES_VERSION)
1967     )
1968 {
1969 	if (disable_vm_sanitize_telemetry() != 0) {
1970 		T_FAIL("Could not disable VM API telemetry. Bailing out early.");
1971 		return;
1972 	}
1973 
1974 	read_env();
1975 
1976 	// Check if kernel will return using golding list format.
1977 	int64_t kern_golden_arg = 0;
1978 	if (os_parse_boot_arg_int("vm_parameter_validation_kern_golden", &kern_golden_arg)) {
1979 		T_LOG("vm_parameter_validation_kern_golden=%lld found in boot args\n", kern_golden_arg);
1980 		generate_golden |= (kern_golden_arg == 1);
1981 	}
1982 
1983 	T_LOG("dump %d, golden %d, test %d\n", dump, generate_golden, test_results);
1984 	if (test_results && populate_golden_results(KERN_GOLDEN_FILE)) {
1985 		// couldn't load golden test results
1986 		T_FAIL("Could not open golden file '%s'\n", KERN_GOLDEN_FILE);
1987 		return;
1988 	}
1989 
1990 	disable_exc_guard();
1991 
1992 	T_LOG("Continue to test part\n");
1993 
1994 	// We allocate a large buffer. The kernel-side code writes output to it.
1995 	// Then we print that output. This is faster than making the kernel-side
1996 	// code print directly to the serial console, which takes many minutes
1997 	// to transfer our test output at 14.4 KB/s.
1998 	// We align this buffer to KB16 to allow the lower bits to be used for a fd.
1999 	void *output;
2000 	int alloc_failed = posix_memalign(&output, KB16, SYSCTL_OUTPUT_BUFFER_SIZE);
2001 	assert(alloc_failed == 0);
2002 
2003 	memset(output, 0, SYSCTL_OUTPUT_BUFFER_SIZE);
2004 
2005 	int fd = get_fd();
2006 	assert((fd & ((int)KB16 - 1)) == fd);
2007 	if (generate_golden) {
2008 		// pass flag on the msb of the fd
2009 		assert((fd & ((int)(KB16 >> 1) - 1)) == fd);
2010 		fd |=  KB16 >> 1;
2011 	}
2012 	int64_t result = run_sysctl_test("vm_parameter_validation_kern", (int64_t)output + fd);
2013 
2014 	T_QUIET; T_EXPECT_EQ(1ull, result, "vm_parameter_validation_kern");
2015 
2016 	if (generate_golden || !test_results) {
2017 		// just print the reduced list result
2018 		printf("%s", output);
2019 	} else {
2020 		// recreate a results_t to compare against the golden file results
2021 		if (populate_kernel_results(output)) {
2022 			T_FAIL("Error while parsing results\n");
2023 		}
2024 
2025 		// compare results against values from golden list
2026 		for (uint32_t x = 0; x < num_kern_tests; ++x) {
2027 			dump_results(kern_list[x]);
2028 		}
2029 	}
2030 
2031 	free(output);
2032 
2033 	if (!generate_golden) {
2034 		clean_kernel_results();
2035 		clean_golden_results();
2036 	}
2037 
2038 	restore_exc_guard();
2039 
2040 	if (reenable_vm_sanitize_telemetry() != 0) {
2041 		T_FAIL("Failed to reenable VM API telemetry.");
2042 		return;
2043 	}
2044 
2045 	T_PASS("vm parameter validation kern");
2046 }
2047