1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 #include <test_utils.h>
4
5 #include <sys/types.h>
6 #include <sys/sysctl.h>
7 #include <mach/mach.h>
8 #include <mach/mach_vm.h>
9 #include <mach/memory_entry.h>
10 #include <mach/shared_region.h>
11 #include <mach/vm_reclaim.h>
12 #include <mach/vm_types.h>
13 #include <sys/mman.h>
14 #include <unistd.h>
15 #include <TargetConditionals.h>
16 #include <mach-o/dyld.h>
17 #include <libgen.h>
18
19 #include <os/bsd.h> // For os_parse_boot_arg_int
20
21 // workarounds for buggy MIG declarations
22 // see tests/vm/vm_parameter_validation_replacement_*.defs
23 // and tests/Makefile for details
24 #include "vm_parameter_validation_replacement_mach_host.h"
25 #include "vm_parameter_validation_replacement_host_priv.h"
26
27 // code shared with kernel/kext tests
28 #include "../../osfmk/tests/vm_parameter_validation.h"
29
30 #define GOLDEN_FILES_VERSION "vm_parameter_validation_golden_images_a2474e92.tar.xz"
31 #define GOLDEN_FILES_ASSET_FILE_POINTER GOLDEN_FILES_VERSION
32
33 /*
34 * Architecture to pass to the golden file decompressor.
35 * watchOS passes 'arm64' or 'arm64_32'.
36 * Decompressor ignores this parameter on other platforms.
37 */
38 #if TARGET_OS_WATCH
39 # if TARGET_CPU_ARM64
40 # if TARGET_RT_64_BIT
41 # define GOLDEN_FILES_ARCH "arm64"
42 # else
43 # define GOLDEN_FILES_ARCH "arm64_32"
44 # endif
45 # else
46 # error unknown watchOS architecture
47 # endif
48 #else
49 # define GOLDEN_FILES_ARCH "unspecified"
50 #endif
51
52 T_GLOBAL_META(
53 T_META_NAMESPACE("xnu.vm"),
54 T_META_RADAR_COMPONENT_NAME("xnu"),
55 T_META_RADAR_COMPONENT_VERSION("VM"),
56 T_META_S3_ASSET(GOLDEN_FILES_ASSET_FILE_POINTER),
57 T_META_ASROOT(true), /* required for vm_wire tests on macOS */
58 T_META_RUN_CONCURRENTLY(false), /* tests should be concurrency-safe now, but keep this in case concurrent tests would provoke timeouts */
59 T_META_ALL_VALID_ARCHS(true),
60 XNU_T_META_REQUIRES_DEVELOPMENT_KERNEL
61 );
62
63 /*
64 * vm_parameter_validation.c
65 * Test parameter validation of vm's userspace API
66 *
67 * The test compares the return values against a 'golden' list, which is a text
68 * file previously generated and compressed in .xz files, per platform.
69 * When vm_parameter_validation runs, it calls assets/vm_parameter_validation/decompress.sh,
70 * which detects the platform and decompresses the corresponding user and kern
71 * golden files.
72 *
73 * Any return code mismatch is reported as a failure, printing test name and iteration.
74 * New tests not present in the 'golden' list will run but they are also reported as a failure.
75 *
76 * There are two environment variable flags that makes development work easier and
77 * can temporarily disable golden list testing.
78 *
79 * SKIP_TESTS
80 * When running with SKIP_TESTS set, the test will not compare the results
81 * against the golden files.
82 *
83 * DUMP_RESULTS
84 * When running with DUMP_RESULTS set, the test will print all the returned values
85 * (as opposed to only the failing ones). To pretty-print this output use the python script:
86 * DUMP_RESULTS=1 vm_parameter_validation | tools/format_vm_parameter_validation.py
87 */
88
89
90
91 /*
92 * xnu/libsyscall/mach/mach_vm.c intercepts some VM calls from userspace,
93 * sometimes doing something other than the expected MIG call.
94 * This test generates its own MIG userspace call sites to call the kernel
95 * entrypoints directly, bypassing libsyscall's interference.
96 *
97 * The custom MIG call sites are generated into:
98 * vm_parameter_validation_vm_map_user.c
99 * vm_parameter_validation_mach_vm_user.c
100 */
101
102 #pragma clang diagnostic ignored "-Wdeclaration-after-statement"
103 #pragma clang diagnostic ignored "-Wmissing-prototypes"
104 #pragma clang diagnostic ignored "-Wpedantic"
105
106 /*
107 * Our wire tests often try to wire the whole address space.
108 * In that case the error code is determined by the first range of addresses
109 * that cannot be wired.
110 * In most cases that is a protection failure on a malloc guard page. But
111 * sometimes, circumstances outside of our control change the address map of
112 * our test process and add holes, which means we get a bad address error
113 * instead, and the test fails because the return code doesn't match what's
114 * recorded in the golden files.
115 * To avoid this, we want to keep a guard page inside our data section.
116 * Because that data section is one of the first things in our address space,
117 * the behavior of wire is (more) predictable.
118 */
119 static _Alignas(KB16) char guard_page[KB16];
120
121 static void
set_up_guard_page(void)122 set_up_guard_page(void)
123 {
124 /*
125 * Ensure that _Alignas worked as expected.
126 */
127 assert(0 == (((mach_vm_address_t)guard_page) & PAGE_MASK));
128 /*
129 * Remove all permissions on guard_page such that it is a guard page.
130 */
131 assert(0 == mprotect(guard_page, sizeof(guard_page), 0));
132 }
133
134 // Return a file descriptor that tests can read and write.
135 // A single temporary file is shared among all tests.
136 static int
get_fd()137 get_fd()
138 {
139 static int fd = -1;
140 if (fd > 0) {
141 return fd;
142 }
143
144 char filename[] = "/tmp/vm_parameter_validation_XXXXXX";
145 fd = mkstemp(filename);
146 assert(fd > 2); // not stdin/stdout/stderr
147 return fd;
148 }
149
150 static int rosetta_dyld_fd = -1;
151 // Return a file descriptor that Rosetta dyld will accept
152 static int
get_dyld_fd()153 get_dyld_fd()
154 {
155 if (rosetta_dyld_fd >= 0) {
156 return rosetta_dyld_fd;
157 }
158
159 if (!isRosetta()) {
160 rosetta_dyld_fd = 0;
161 return rosetta_dyld_fd;
162 }
163
164 rosetta_dyld_fd = 0;
165 return rosetta_dyld_fd;
166 }
167
168 // Close the Rosetta dyld fd (only one test calls this)
169 static void
close_dyld_fd()170 close_dyld_fd()
171 {
172 if (isRosetta()) {
173 assert(rosetta_dyld_fd > 2);
174 if (close(rosetta_dyld_fd) != 0) {
175 assert(0);
176 }
177 rosetta_dyld_fd = -1;
178 }
179 }
180
181 static int
munmap_helper(void * ptr,size_t size)182 munmap_helper(void *ptr, size_t size)
183 {
184 mach_vm_address_t start, end;
185 if (0 != size) { // munmap rejects size == 0 even though mmap accepts it
186 /*
187 * munmap expects aligned inputs, even though mmap sometimes
188 * returns unaligned values
189 */
190 start = ((mach_vm_address_t)ptr) & ~PAGE_MASK;
191 end = (((mach_vm_address_t)ptr) + size + PAGE_MASK) & ~PAGE_MASK;
192 return munmap((void*)start, end - start);
193 }
194 return 0;
195 }
196
197 // Some tests provoke EXC_GUARD exceptions.
198 // We disable EXC_GUARD if possible. If we can't, we disable those tests instead.
199 static bool EXC_GUARD_ENABLED = true;
200
201 static int
call_munlock(void * start,size_t size)202 call_munlock(void *start, size_t size)
203 {
204 int err = munlock(start, size);
205 return err ? errno : 0;
206 }
207
208 static int
call_mlock(void * start,size_t size)209 call_mlock(void *start, size_t size)
210 {
211 int err = mlock(start, size);
212 return err ? errno : 0;
213 }
214
215 extern int __munmap(void *, size_t);
216
217 static kern_return_t
call_munmap(MAP_T map __unused,mach_vm_address_t start,mach_vm_size_t size)218 call_munmap(MAP_T map __unused, mach_vm_address_t start, mach_vm_size_t size)
219 {
220 int err = __munmap((void*)start, (size_t)size);
221 return err ? errno : 0;
222 }
223
224 static int
call_mremap_encrypted(void * start,size_t size)225 call_mremap_encrypted(void *start, size_t size)
226 {
227 int err = mremap_encrypted(start, size, CRYPTID_NO_ENCRYPTION, /*cputype=*/ 0, /*cpusubtype=*/ 0);
228 return err ? errno : 0;
229 }
230
231 /////////////////////////////////////////////////////
232 // Mach tests
233
234 static mach_port_t
make_a_mem_object(mach_vm_size_t size)235 make_a_mem_object(mach_vm_size_t size)
236 {
237 mach_port_t out_handle;
238 kern_return_t kr = mach_memory_object_memory_entry_64(mach_host_self(), 1, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_handle);
239 assert(kr == 0);
240 return out_handle;
241 }
242
243 static mach_port_t
make_a_mem_entry(vm_size_t size)244 make_a_mem_entry(vm_size_t size)
245 {
246 mach_port_t port;
247 memory_object_size_t s = (memory_object_size_t)size;
248 kern_return_t kr = mach_make_memory_entry_64(mach_host_self(), &s, (memory_object_offset_t)0, MAP_MEM_NAMED_CREATE | MAP_MEM_LEDGER_TAGGED, &port, MACH_PORT_NULL);
249 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "allocate memory entry");
250 return port;
251 }
252
253 static inline void
check_mach_memory_entry_outparam_changes(kern_return_t * kr,mach_port_t out_handle,mach_port_t saved_handle)254 check_mach_memory_entry_outparam_changes(kern_return_t * kr, mach_port_t out_handle, mach_port_t saved_handle)
255 {
256 if (*kr != KERN_SUCCESS) {
257 if (out_handle != (mach_port_t) saved_handle) {
258 *kr = OUT_PARAM_BAD;
259 }
260 }
261 }
262 // mach_make_memory_entry is really several functions wearing a trenchcoat.
263 // Run a separate test for each variation.
264
265 // mach_make_memory_entry also has a confusing number of entrypoints:
266 // U64: mach_make_memory_entry_64(64) (mach_make_memory_entry is the same MIG message)
267 // U32: mach_make_memory_entry(32), mach_make_memory_entry_64(64), _mach_make_memory_entry(64) (each is a unique MIG message)
268 #define IMPL(FN, T) \
269 static kern_return_t \
270 call_ ## FN ## __start_size__memonly(MAP_T map, T start, T size) \
271 { \
272 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
273 T io_size = size; \
274 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
275 mach_port_t out_handle = invalid_value; \
276 kern_return_t kr = FN(map, &io_size, start, \
277 VM_PROT_READ | MAP_MEM_ONLY, &out_handle, memobject); \
278 if (kr == 0) { \
279 (void)mach_port_deallocate(mach_task_self(), out_handle); \
280 /* MAP_MEM_ONLY doesn't use the size. It should not change it. */ \
281 if(io_size != size) { \
282 kr = OUT_PARAM_BAD; \
283 } \
284 } \
285 (void)mach_port_deallocate(mach_task_self(), memobject); \
286 check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
287 return kr; \
288 } \
289 \
290 static kern_return_t \
291 call_ ## FN ## __start_size__namedcreate(MAP_T map, T start, T size) \
292 { \
293 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
294 T io_size = size; \
295 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
296 mach_port_t out_handle = invalid_value; \
297 kern_return_t kr = FN(map, &io_size, start, \
298 VM_PROT_READ | MAP_MEM_NAMED_CREATE, &out_handle, memobject); \
299 if (kr == 0) { \
300 (void)mach_port_deallocate(mach_task_self(), out_handle); \
301 } \
302 (void)mach_port_deallocate(mach_task_self(), memobject); \
303 check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
304 return kr; \
305 } \
306 \
307 static kern_return_t \
308 call_ ## FN ## __start_size__copy(MAP_T map, T start, T size) \
309 { \
310 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
311 T io_size = size; \
312 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
313 mach_port_t out_handle = invalid_value; \
314 kern_return_t kr = FN(map, &io_size, start, \
315 VM_PROT_READ | MAP_MEM_VM_COPY, &out_handle, memobject); \
316 if (kr == 0) { \
317 (void)mach_port_deallocate(mach_task_self(), out_handle); \
318 } \
319 (void)mach_port_deallocate(mach_task_self(), memobject); \
320 check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
321 return kr; \
322 } \
323 \
324 static kern_return_t \
325 call_ ## FN ## __start_size__share(MAP_T map, T start, T size) \
326 { \
327 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
328 T io_size = size; \
329 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
330 mach_port_t out_handle = invalid_value; \
331 kern_return_t kr = FN(map, &io_size, start, \
332 VM_PROT_READ | MAP_MEM_VM_SHARE, &out_handle, memobject); \
333 if (kr == 0) { \
334 (void)mach_port_deallocate(mach_task_self(), out_handle); \
335 } \
336 (void)mach_port_deallocate(mach_task_self(), memobject); \
337 check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
338 return kr; \
339 } \
340 \
341 static kern_return_t \
342 call_ ## FN ## __start_size__namedreuse(MAP_T map, T start, T size) \
343 { \
344 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
345 T io_size = size; \
346 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
347 mach_port_t out_handle = invalid_value; \
348 kern_return_t kr = FN(map, &io_size, start, \
349 VM_PROT_READ | MAP_MEM_NAMED_REUSE, &out_handle, memobject); \
350 if (kr == 0) { \
351 (void)mach_port_deallocate(mach_task_self(), out_handle); \
352 } \
353 (void)mach_port_deallocate(mach_task_self(), memobject); \
354 check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
355 return kr; \
356 } \
357 \
358 static kern_return_t \
359 call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot) \
360 { \
361 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
362 T io_size = size; \
363 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
364 mach_port_t out_handle = invalid_value; \
365 kern_return_t kr = FN(map, &io_size, start, \
366 prot, &out_handle, memobject); \
367 if (kr == 0) { \
368 (void)mach_port_deallocate(mach_task_self(), out_handle); \
369 } \
370 (void)mach_port_deallocate(mach_task_self(), memobject); \
371 check_mach_memory_entry_outparam_changes(&kr, out_handle, invalid_value); \
372 return kr; \
373 }
374
IMPL(mach_make_memory_entry_64,mach_vm_address_t)375 IMPL(mach_make_memory_entry_64, mach_vm_address_t)
376 #if TEST_OLD_STYLE_MACH
377 IMPL(mach_make_memory_entry, vm_address_t)
378 IMPL(_mach_make_memory_entry, mach_vm_address_t)
379 #endif
380 #undef IMPL
381
382 static inline void
383 check_mach_memory_object_memory_entry_outparam_changes(kern_return_t * kr, mach_port_t out_handle,
384 mach_port_t saved_out_handle)
385 {
386 if (*kr != KERN_SUCCESS) {
387 if (out_handle != saved_out_handle) {
388 *kr = OUT_PARAM_BAD;
389 }
390 }
391 }
392
393 #define IMPL(FN) \
394 static kern_return_t \
395 call_ ## FN ## __size(MAP_T map __unused, mach_vm_size_t size) \
396 { \
397 kern_return_t kr; \
398 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
399 mach_port_t out_entry = invalid_value; \
400 kr = FN(mach_host_self(), 1, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_entry); \
401 if (kr == 0) { \
402 (void)mach_port_deallocate(mach_task_self(), out_entry); \
403 } \
404 check_mach_memory_object_memory_entry_outparam_changes(&kr, out_entry, invalid_value); \
405 return kr; \
406 } \
407 static kern_return_t \
408 call_ ## FN ## __vm_prot(MAP_T map __unused, mach_vm_size_t size, vm_prot_t prot) \
409 { \
410 kern_return_t kr; \
411 mach_port_t invalid_value = UNLIKELY_INITIAL_MACH_PORT; \
412 mach_port_t out_entry = invalid_value; \
413 kr = FN(mach_host_self(), 1, size, prot, 0, &out_entry); \
414 if (kr == 0) { \
415 (void)mach_port_deallocate(mach_task_self(), out_entry); \
416 } \
417 check_mach_memory_object_memory_entry_outparam_changes(&kr, out_entry, invalid_value); \
418 return kr; \
419 }
420
421 // The declaration of mach_memory_object_memory_entry is buggy on U32.
422 // We compile in our own MIG user stub for it with a "replacement_" prefix.
423 // rdar://117927965
424 IMPL(replacement_mach_memory_object_memory_entry)
IMPL(mach_memory_object_memory_entry_64)425 IMPL(mach_memory_object_memory_entry_64)
426 #undef IMPL
427
428 static inline void
429 check_vm_read_outparam_changes(kern_return_t * kr, mach_vm_size_t size, mach_vm_size_t requested_size,
430 mach_vm_address_t addr)
431 {
432 if (*kr == KERN_SUCCESS) {
433 if (size != requested_size) {
434 *kr = OUT_PARAM_BAD;
435 }
436 if (size == 0) {
437 if (addr != 0) {
438 *kr = OUT_PARAM_BAD;
439 }
440 }
441 }
442 }
443
444
445 static kern_return_t
call_mach_vm_read(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)446 call_mach_vm_read(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
447 {
448 vm_offset_t out_addr = UNLIKELY_INITIAL_ADDRESS;
449 mach_msg_type_number_t out_size = UNLIKELY_INITIAL_SIZE;
450 kern_return_t kr = mach_vm_read(map, start, size, &out_addr, &out_size);
451 if (kr == 0) {
452 (void)mach_vm_deallocate(mach_task_self(), out_addr, out_size);
453 }
454 check_vm_read_outparam_changes(&kr, out_size, size, out_addr);
455 return kr;
456 }
457 #if TEST_OLD_STYLE_MACH
458 static kern_return_t
call_vm_read(MAP_T map,vm_address_t start,vm_size_t size)459 call_vm_read(MAP_T map, vm_address_t start, vm_size_t size)
460 {
461 vm_offset_t out_addr = UNLIKELY_INITIAL_ADDRESS;
462 mach_msg_type_number_t out_size = UNLIKELY_INITIAL_SIZE;
463 kern_return_t kr = vm_read(map, start, size, &out_addr, &out_size);
464 if (kr == 0) {
465 (void)mach_vm_deallocate(mach_task_self(), out_addr, out_size);
466 }
467 check_vm_read_outparam_changes(&kr, out_size, size, out_addr);
468 return kr;
469 }
470 #endif
471
472 static kern_return_t
call_mach_vm_read_list(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)473 call_mach_vm_read_list(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
474 {
475 mach_vm_read_entry_t re = {{.address = start, .size = size}};
476 kern_return_t kr = mach_vm_read_list(map, re, 1);
477 if (kr == 0) {
478 (void)mach_vm_deallocate(mach_task_self(), re[0].address, re[0].size);
479 }
480 return kr;
481 }
482 #if TEST_OLD_STYLE_MACH
483 static kern_return_t
call_vm_read_list(MAP_T map,vm_address_t start,vm_size_t size)484 call_vm_read_list(MAP_T map, vm_address_t start, vm_size_t size)
485 {
486 vm_read_entry_t re = {{.address = start, .size = size}};
487 kern_return_t kr = vm_read_list(map, re, 1);
488 if (kr == 0) {
489 (void)mach_vm_deallocate(mach_task_self(), re[0].address, re[0].size);
490 }
491 return kr;
492 }
493 #endif
494
495 static inline void
check_vm_read_overwrite_outparam_changes(kern_return_t * kr,mach_vm_size_t size,mach_vm_size_t requested_size)496 check_vm_read_overwrite_outparam_changes(kern_return_t * kr, mach_vm_size_t size, mach_vm_size_t requested_size)
497 {
498 if (*kr == KERN_SUCCESS) {
499 if (size != requested_size) {
500 *kr = OUT_PARAM_BAD;
501 }
502 }
503 }
504
505 static kern_return_t __unused
call_mach_vm_read_overwrite__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)506 call_mach_vm_read_overwrite__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
507 {
508 mach_vm_size_t out_size;
509 kern_return_t kr = mach_vm_read_overwrite(map, start, size, start_2, &out_size);
510 check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
511 return kr;
512 }
513
514 static kern_return_t
call_mach_vm_read_overwrite__src(MAP_T map,mach_vm_address_t src,mach_vm_size_t size)515 call_mach_vm_read_overwrite__src(MAP_T map, mach_vm_address_t src, mach_vm_size_t size)
516 {
517 mach_vm_size_t out_size;
518 allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
519 kern_return_t kr = mach_vm_read_overwrite(map, src, size, dst.addr, &out_size);
520 check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
521 return kr;
522 }
523
524 static kern_return_t
call_mach_vm_read_overwrite__dst(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size)525 call_mach_vm_read_overwrite__dst(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size)
526 {
527 mach_vm_size_t out_size;
528 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
529 kern_return_t kr = mach_vm_read_overwrite(map, src.addr, size, dst, &out_size);
530 check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
531 return kr;
532 }
533
534 #if TEST_OLD_STYLE_MACH
535 static kern_return_t __unused
call_vm_read_overwrite__ssz(MAP_T map,vm_address_t start,vm_address_t start_2,vm_size_t size)536 call_vm_read_overwrite__ssz(MAP_T map, vm_address_t start, vm_address_t start_2, vm_size_t size)
537 {
538 vm_size_t out_size;
539 kern_return_t kr = vm_read_overwrite(map, start, size, start_2, &out_size);
540 check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
541 return kr;
542 }
543
544 static kern_return_t
call_vm_read_overwrite__src(MAP_T map,vm_address_t src,vm_size_t size)545 call_vm_read_overwrite__src(MAP_T map, vm_address_t src, vm_size_t size)
546 {
547 vm_size_t out_size;
548 allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
549 kern_return_t kr = vm_read_overwrite(map, src, size, (vm_address_t) dst.addr, &out_size);
550 check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
551 return kr;
552 }
553
554 static kern_return_t
call_vm_read_overwrite__dst(MAP_T map,vm_address_t dst,vm_size_t size)555 call_vm_read_overwrite__dst(MAP_T map, vm_address_t dst, vm_size_t size)
556 {
557 vm_size_t out_size;
558 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
559 kern_return_t kr = vm_read_overwrite(map, (vm_address_t) src.addr, size, dst, &out_size);
560 check_vm_read_overwrite_outparam_changes(&kr, out_size, size);
561 return kr;
562 }
563 #endif
564
565
566
567 static kern_return_t __unused
call_mach_vm_copy__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)568 call_mach_vm_copy__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
569 {
570 kern_return_t kr = mach_vm_copy(map, start, size, start_2);
571 return kr;
572 }
573
574 static kern_return_t
call_mach_vm_copy__src(MAP_T map,mach_vm_address_t src,mach_vm_size_t size)575 call_mach_vm_copy__src(MAP_T map, mach_vm_address_t src, mach_vm_size_t size)
576 {
577 allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
578 kern_return_t kr = mach_vm_copy(map, src, size, dst.addr);
579 return kr;
580 }
581
582 static kern_return_t
call_mach_vm_copy__dst(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size)583 call_mach_vm_copy__dst(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size)
584 {
585 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
586 kern_return_t kr = mach_vm_copy(map, src.addr, size, dst);
587 return kr;
588 }
589
590 #if TEST_OLD_STYLE_MACH
591 static kern_return_t __unused
call_vm_copy__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)592 call_vm_copy__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
593 {
594 kern_return_t kr = vm_copy(map, (vm_address_t) start, (vm_size_t) size, (vm_address_t) start_2);
595 return kr;
596 }
597
598 static kern_return_t
call_vm_copy__src(MAP_T map,vm_address_t src,vm_size_t size)599 call_vm_copy__src(MAP_T map, vm_address_t src, vm_size_t size)
600 {
601 allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
602 kern_return_t kr = vm_copy(map, src, size, (vm_address_t) dst.addr);
603 return kr;
604 }
605
606 static kern_return_t
call_vm_copy__dst(MAP_T map,vm_address_t dst,vm_size_t size)607 call_vm_copy__dst(MAP_T map, vm_address_t dst, vm_size_t size)
608 {
609 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
610 kern_return_t kr = vm_copy(map, (vm_address_t) src.addr, size, dst);
611 return kr;
612 }
613 #endif
614
615 static kern_return_t __unused
call_mach_vm_write__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)616 call_mach_vm_write__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
617 {
618 kern_return_t kr = mach_vm_write(map, start, (vm_offset_t) start_2, (mach_msg_type_number_t) size);
619 return kr;
620 }
621
622 static kern_return_t
call_mach_vm_write__src(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)623 call_mach_vm_write__src(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
624 {
625 allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
626 kern_return_t kr = mach_vm_write(map, dst.addr, (vm_offset_t) start, (mach_msg_type_number_t) size);
627 return kr;
628 }
629
630 static kern_return_t
call_mach_vm_write__dst(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)631 call_mach_vm_write__dst(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
632 {
633 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
634 kern_return_t kr = mach_vm_write(map, start, (vm_offset_t) src.addr, (mach_msg_type_number_t) size);
635 return kr;
636 }
637
638 #if TEST_OLD_STYLE_MACH
639 static kern_return_t __unused
call_vm_write__ssz(MAP_T map,mach_vm_address_t start,mach_vm_address_t start_2,mach_vm_size_t size)640 call_vm_write__ssz(MAP_T map, mach_vm_address_t start, mach_vm_address_t start_2, mach_vm_size_t size)
641 {
642 kern_return_t kr = vm_write(map, (vm_address_t) start, (vm_offset_t) start_2, (mach_msg_type_number_t) size);
643 return kr;
644 }
645
646 static kern_return_t
call_vm_write__src(MAP_T map,vm_address_t start,vm_size_t size)647 call_vm_write__src(MAP_T map, vm_address_t start, vm_size_t size)
648 {
649 allocation_t dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
650 kern_return_t kr = vm_write(map, (vm_address_t) dst.addr, start, (mach_msg_type_number_t) size);
651 return kr;
652 }
653
654 static kern_return_t
call_vm_write__dst(MAP_T map,vm_address_t start,vm_size_t size)655 call_vm_write__dst(MAP_T map, vm_address_t start, vm_size_t size)
656 {
657 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
658 kern_return_t kr = vm_write(map, start, (vm_offset_t) src.addr, (mach_msg_type_number_t) size);
659 return kr;
660 }
661 #endif
662
663 // mach_vm_wire, vm_wire (start/size)
664 // "wire" and "unwire" paths diverge internally; test both
665 #define IMPL(FN, T, FLAVOR, PROT) \
666 static kern_return_t \
667 call_ ## FN ## __ ## FLAVOR(MAP_T map, T start, T size) \
668 { \
669 mach_port_t host_priv = HOST_PRIV_NULL; \
670 kern_return_t kr = host_get_host_priv_port(mach_host_self(), &host_priv); \
671 assert(kr == 0); /* host priv port on macOS requires entitlements or root */ \
672 kr = FN(host_priv, map, start, size, PROT); \
673 return kr; \
674 }
675 IMPL(mach_vm_wire, mach_vm_address_t, wire, VM_PROT_READ)
676 IMPL(mach_vm_wire, mach_vm_address_t, unwire, VM_PROT_NONE)
677 // The declaration of vm_wire is buggy on U32.
678 // We compile in our own MIG user stub for it with a "replacement_" prefix.
679 // rdar://118258929
680 IMPL(replacement_vm_wire, mach_vm_address_t, wire, VM_PROT_READ)
681 IMPL(replacement_vm_wire, mach_vm_address_t, unwire, VM_PROT_NONE)
682 #undef IMPL
683
684 // mach_vm_wire, vm_wire (vm_prot_t)
685 #define IMPL(FN, T) \
686 static kern_return_t \
687 call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot) \
688 { \
689 mach_port_t host_priv = HOST_PRIV_NULL; \
690 kern_return_t kr = host_get_host_priv_port(mach_host_self(), &host_priv); \
691 assert(kr == 0); /* host priv port on macOS requires entitlements or root */ \
692 kr = FN(host_priv, map, start, size, prot); \
693 return kr; \
694 }
695 IMPL(mach_vm_wire, mach_vm_address_t)
696 // The declaration of vm_wire is buggy on U32.
697 // We compile in our own MIG user stub for it with a "replacement_" prefix.
698 // rdar://118258929
699 IMPL(replacement_vm_wire, mach_vm_address_t)
700 #undef IMPL
701
702
703 // mach_vm_map/vm32_map/vm32_map_64 infra
704
705 typedef kern_return_t (*map_fn_t)(vm_map_t target_task,
706 mach_vm_address_t *address,
707 mach_vm_size_t size,
708 mach_vm_offset_t mask,
709 int flags,
710 mem_entry_name_port_t object,
711 memory_object_offset_t offset,
712 boolean_t copy,
713 vm_prot_t cur_protection,
714 vm_prot_t max_protection,
715 vm_inherit_t inheritance);
716
717 static kern_return_t
call_map_fn__allocate_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)718 call_map_fn__allocate_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
719 {
720 mach_vm_address_t out_addr = start;
721 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
722 0, 0, 0, 0, 0, VM_INHERIT_NONE);
723 // fixed-overwrite with pre-existing allocation, don't deallocate
724 return kr;
725 }
726
727 static kern_return_t
call_map_fn__allocate_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)728 call_map_fn__allocate_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
729 {
730 mach_vm_address_t out_addr = start;
731 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
732 0, 0, true, 0, 0, VM_INHERIT_NONE);
733 // fixed-overwrite with pre-existing allocation, don't deallocate
734 return kr;
735 }
736
737 static kern_return_t
call_map_fn__allocate_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)738 call_map_fn__allocate_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
739 {
740 mach_vm_address_t out_addr = start_hint;
741 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, 0, 0, 0, 0, 0, VM_INHERIT_NONE);
742 if (kr == 0) {
743 (void)mach_vm_deallocate(map, out_addr, size);
744 }
745 return kr;
746 }
747
748 static kern_return_t
call_map_fn__memobject_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)749 call_map_fn__memobject_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
750 {
751 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
752 mach_vm_address_t out_addr = start;
753 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
754 memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
755 (void)mach_port_deallocate(mach_task_self(), memobject);
756 // fixed-overwrite with pre-existing allocation, don't deallocate
757 return kr;
758 }
759
760 static kern_return_t
call_map_fn__memobject_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)761 call_map_fn__memobject_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
762 {
763 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
764 mach_vm_address_t out_addr = start;
765 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
766 memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
767 (void)mach_port_deallocate(mach_task_self(), memobject);
768 // fixed-overwrite with pre-existing allocation, don't deallocate
769 return kr;
770 }
771
772 static kern_return_t
call_map_fn__memobject_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)773 call_map_fn__memobject_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
774 {
775 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
776 mach_vm_address_t out_addr = start_hint;
777 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, memobject,
778 KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
779 if (kr == 0) {
780 (void)mach_vm_deallocate(map, out_addr, size);
781 }
782 (void)mach_port_deallocate(mach_task_self(), memobject);
783 return kr;
784 }
785
786 static kern_return_t
helper_call_map_fn__memobject__ssoo(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)787 helper_call_map_fn__memobject__ssoo(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
788 {
789 mach_port_t memobject = make_a_mem_object(obj_size);
790 mach_vm_address_t out_addr = start;
791 kern_return_t kr = fn(map, &out_addr, size, 0, flags, memobject,
792 offset, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
793 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
794 (void)mach_port_deallocate(mach_task_self(), memobject);
795 return kr;
796 }
797
798 static kern_return_t
call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)799 call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
800 {
801 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, offset, obj_size);
802 }
803
804 static kern_return_t
call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)805 call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
806 {
807 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, offset, obj_size);
808 }
809
810 static kern_return_t
call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)811 call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
812 {
813 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_ANYWHERE, false, start, size, offset, obj_size);
814 }
815
816 static kern_return_t
help_call_map_fn__allocate__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)817 help_call_map_fn__allocate__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
818 {
819 mach_vm_address_t out_addr = start;
820 kern_return_t kr = fn(map, &out_addr, size, 0, flags,
821 0, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
822 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
823 return kr;
824 }
825
826 static kern_return_t
call_map_fn__allocate_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)827 call_map_fn__allocate_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
828 {
829 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
830 }
831
832 static kern_return_t
call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)833 call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
834 {
835 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
836 }
837
838 static kern_return_t
call_map_fn__allocate_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)839 call_map_fn__allocate_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
840 {
841 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
842 }
843
844 static kern_return_t
help_call_map_fn__memobject__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)845 help_call_map_fn__memobject__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
846 {
847 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
848 mach_vm_address_t out_addr = start;
849 kern_return_t kr = fn(map, &out_addr, size, 0, flags,
850 memobject, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
851 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
852 (void)mach_port_deallocate(mach_task_self(), memobject);
853 return kr;
854 }
855
856 static kern_return_t
call_map_fn__memobject_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)857 call_map_fn__memobject_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
858 {
859 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
860 }
861
862 static kern_return_t
call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)863 call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
864 {
865 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
866 }
867
868 static kern_return_t
call_map_fn__memobject_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)869 call_map_fn__memobject_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
870 {
871 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
872 }
873
874 static kern_return_t
call_map_fn__allocate__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)875 call_map_fn__allocate__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
876 {
877 kern_return_t kr = fn(map, start, size, 0, flags,
878 0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
879 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
880 return kr;
881 }
882
883 static kern_return_t
call_map_fn__allocate_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)884 call_map_fn__allocate_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
885 {
886 kern_return_t kr = fn(map, start, size, 0, flags,
887 0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
888 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
889 return kr;
890 }
891
892 static kern_return_t
call_map_fn__memobject__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)893 call_map_fn__memobject__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
894 {
895 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
896 kern_return_t kr = fn(map, start, size, 0, flags,
897 memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
898 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
899 (void)mach_port_deallocate(mach_task_self(), memobject);
900 return kr;
901 }
902
903 static kern_return_t
call_map_fn__memobject_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)904 call_map_fn__memobject_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
905 {
906 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
907 kern_return_t kr = fn(map, start, size, 0, flags,
908 memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
909 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
910 (void)mach_port_deallocate(mach_task_self(), memobject);
911 return kr;
912 }
913
914 static kern_return_t
help_call_map_fn__allocate__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)915 help_call_map_fn__allocate__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
916 {
917 mach_vm_address_t out_addr = 0;
918 kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
919 0, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
920 deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
921 return kr;
922 }
923
924 static kern_return_t
call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)925 call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
926 {
927 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
928 }
929
930 static kern_return_t
call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)931 call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
932 {
933 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
934 }
935
936 static kern_return_t
call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)937 call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
938 {
939 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
940 }
941
942 static kern_return_t
help_call_map_fn__memobject__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)943 help_call_map_fn__memobject__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
944 {
945 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
946 mach_vm_address_t out_addr = 0;
947 kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
948 memobject, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
949 deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
950 (void)mach_port_deallocate(mach_task_self(), memobject);
951 return kr;
952 }
953
954 static kern_return_t
call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)955 call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
956 {
957 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
958 }
959
960 static kern_return_t
call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)961 call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
962 {
963 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
964 }
965
966 static kern_return_t
call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)967 call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
968 {
969 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
970 }
971
972 // implementations
973
974 #define IMPL_MAP_FN_START_SIZE(map_fn, instance) \
975 static kern_return_t \
976 call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start, mach_vm_size_t size) \
977 { \
978 return call_map_fn__ ## instance(map_fn, map, start, size); \
979 }
980
981 #define IMPL_MAP_FN_HINT_SIZE(map_fn, instance) \
982 static kern_return_t \
983 call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size) \
984 { \
985 return call_map_fn__ ## instance(map_fn, map, start_hint, size); \
986 }
987
988 #define IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, instance) \
989 static kern_return_t \
990 call_ ## map_fn ## __ ## instance ## __start_size_offset_object(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size) \
991 { \
992 return call_map_fn__ ## instance ## __start_size_offset_object(map_fn, map, start, size, offset, obj_size); \
993 }
994
995 #define IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, instance) \
996 static kern_return_t \
997 call_ ## map_fn ## __ ## instance ## __inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit) \
998 { \
999 return call_map_fn__ ## instance ## __inherit(map_fn, map, start, size, inherit); \
1000 }
1001
1002 #define IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, instance) \
1003 static kern_return_t \
1004 call_ ## map_fn ## __ ## instance ## __flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags) \
1005 { \
1006 return call_map_fn__ ## instance ## __flags(map_fn, map, start, size, flags); \
1007 }
1008
1009 #define IMPL_MAP_FN_PROT_PAIRS(map_fn, instance) \
1010 static kern_return_t \
1011 call_ ## map_fn ## __ ## instance ## __prot_pairs(MAP_T map, vm_prot_t cur, vm_prot_t max) \
1012 { \
1013 return call_map_fn__ ## instance ## __prot_pairs(map_fn, map, cur, max); \
1014 }
1015
1016 #define IMPL(map_fn) \
1017 IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed) \
1018 IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed_copy) \
1019 IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed) \
1020 IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed_copy) \
1021 IMPL_MAP_FN_HINT_SIZE(map_fn, allocate_anywhere) \
1022 IMPL_MAP_FN_HINT_SIZE(map_fn, memobject_anywhere) \
1023 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed) \
1024 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed_copy) \
1025 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_anywhere) \
1026 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed) \
1027 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed_copy) \
1028 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_anywhere) \
1029 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed) \
1030 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed_copy) \
1031 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_anywhere) \
1032 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate) \
1033 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate_copy) \
1034 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject) \
1035 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject_copy) \
1036 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed) \
1037 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed_copy) \
1038 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_anywhere) \
1039 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed) \
1040 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed_copy) \
1041 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_anywhere) \
1042
1043 static kern_return_t
mach_vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1044 mach_vm_map_wrapped(vm_map_t target_task,
1045 mach_vm_address_t *address,
1046 mach_vm_size_t size,
1047 mach_vm_offset_t mask,
1048 int flags,
1049 mem_entry_name_port_t object,
1050 memory_object_offset_t offset,
1051 boolean_t copy,
1052 vm_prot_t cur_protection,
1053 vm_prot_t max_protection,
1054 vm_inherit_t inheritance)
1055 {
1056 mach_vm_address_t addr = *address;
1057 kern_return_t kr = mach_vm_map(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1058 check_mach_vm_map_outparam_changes(&kr, addr, *address, flags, target_task);
1059 *address = addr;
1060 return kr;
1061 }
IMPL(mach_vm_map_wrapped)1062 IMPL(mach_vm_map_wrapped)
1063
1064 #if TEST_OLD_STYLE_MACH
1065 static kern_return_t
1066 vm_map_64_retyped(vm_map_t target_task,
1067 mach_vm_address_t *address,
1068 mach_vm_size_t size,
1069 mach_vm_offset_t mask,
1070 int flags,
1071 mem_entry_name_port_t object,
1072 memory_object_offset_t offset,
1073 boolean_t copy,
1074 vm_prot_t cur_protection,
1075 vm_prot_t max_protection,
1076 vm_inherit_t inheritance)
1077 {
1078 vm_address_t addr = (vm_address_t)*address;
1079 kern_return_t kr = vm_map_64(target_task, &addr, (vm_size_t)size, (vm_address_t)mask, flags, object, (vm_offset_t)offset, copy, cur_protection, max_protection, inheritance);
1080 check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1081 *address = addr;
1082 return kr;
1083 }
IMPL(vm_map_64_retyped)1084 IMPL(vm_map_64_retyped)
1085
1086 static kern_return_t
1087 vm_map_retyped(vm_map_t target_task,
1088 mach_vm_address_t *address,
1089 mach_vm_size_t size,
1090 mach_vm_offset_t mask,
1091 int flags,
1092 mem_entry_name_port_t object,
1093 memory_object_offset_t offset,
1094 boolean_t copy,
1095 vm_prot_t cur_protection,
1096 vm_prot_t max_protection,
1097 vm_inherit_t inheritance)
1098 {
1099 vm_address_t addr = (vm_address_t)*address;
1100 kern_return_t kr = vm_map(target_task, &addr, (vm_size_t)size, (vm_address_t)mask, flags, object, (vm_offset_t)offset, copy, cur_protection, max_protection, inheritance);
1101 check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1102 *address = addr;
1103 return kr;
1104 }
1105 IMPL(vm_map_retyped)
1106 #endif
1107
1108 #undef IMPL_MAP_FN_START_SIZE
1109 #undef IMPL_MAP_FN_SIZE
1110 #undef IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT
1111 #undef IMPL_MAP_FN_START_SIZE_INHERIT
1112 #undef IMPL_MAP_FN_START_SIZE_FLAGS
1113 #undef IMPL_MAP_FN_PROT_PAIRS
1114 #undef IMPL
1115
1116
1117 // mmap
1118 // Directly calling this symbol lets us hit the syscall directly instead of the libsyscall wrapper.
1119 void *__mmap(void *addr, size_t len, int prot, int flags, int fildes, off_t off);
1120
1121 // We invert MAP_UNIX03 in the flags. This is because by default libsyscall intercepts calls to mmap and adds MAP_UNIX03.
1122 // That means MAP_UNIX03 should be the default for most of our tests, and we should only test without MAP_UNIX03 when we explicitly want to.
1123 void *
mmap_wrapper(void * addr,size_t len,int prot,int flags,int fildes,off_t off)1124 mmap_wrapper(void *addr, size_t len, int prot, int flags, int fildes, off_t off)
1125 {
1126 flags ^= MAP_UNIX03;
1127 return __mmap(addr, len, prot, flags, fildes, off);
1128 }
1129
1130 // Rename the UNIX03 flag for the code below since we're inverting its meaning.
1131 #define MAP_NOT_UNIX03 0x40000
1132 static_assert(MAP_NOT_UNIX03 == MAP_UNIX03, "MAP_UNIX03 value changed");
1133 #undef MAP_UNIX03
1134 #define MAP_UNIX03 dont_use_MAP_UNIX03
1135
1136 // helpers
1137
1138 // Return true if security policy disallows unsigned code.
1139 // Some test results are expected to change with this set.
1140 static bool
unsigned_code_is_disallowed(void)1141 unsigned_code_is_disallowed(void)
1142 {
1143 if (isRosetta()) {
1144 return false;
1145 }
1146
1147 int out_value = 0;
1148 size_t io_size = sizeof(out_value);
1149 if (0 == sysctlbyname("security.mac.amfi.unsigned_code_policy",
1150 &out_value, &io_size, NULL, 0)) {
1151 return out_value;
1152 }
1153
1154 // sysctl not present, assume unsigned code is okay
1155 return false;
1156 }
1157
1158 static int
maybe_hide_mmap_failure(int ret,int prot,int fd)1159 maybe_hide_mmap_failure(int ret, int prot, int fd)
1160 {
1161 // Special case for mmap(PROT_EXEC, fd).
1162 // When SIP is enabled these get EPERM from mac_file_check_mmap().
1163 // The golden files record the SIP-disabled values.
1164 // This special case also allows the test to succeed when SIP
1165 // is enabled even though the return value isn't the golden one.
1166 if (ret == EPERM && fd != -1 && (prot & PROT_EXEC) &&
1167 unsigned_code_is_disallowed()) {
1168 return ACCEPTABLE;
1169 }
1170 return ret;
1171 }
1172
1173 static kern_return_t
help_call_mmap__vm_prot(MAP_T map __unused,int flags,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)1174 help_call_mmap__vm_prot(MAP_T map __unused, int flags, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
1175 {
1176 int fd = -1;
1177 if (!(flags & MAP_ANON)) {
1178 fd = get_fd();
1179 }
1180 void *rv = mmap_wrapper((void *)start, (size_t) size, prot, flags, fd, 0);
1181 if (rv == MAP_FAILED) {
1182 return maybe_hide_mmap_failure(errno, prot, fd);
1183 } else {
1184 assert(0 == munmap_helper(rv, size));
1185 return 0;
1186 }
1187 }
1188
1189 static kern_return_t
help_call_mmap__kernel_flags(MAP_T map __unused,int mmap_flags,mach_vm_address_t start,mach_vm_size_t size,int kernel_flags)1190 help_call_mmap__kernel_flags(MAP_T map __unused, int mmap_flags, mach_vm_address_t start, mach_vm_size_t size, int kernel_flags)
1191 {
1192 void *rv = mmap_wrapper((void *)start, (size_t) size, VM_PROT_DEFAULT, mmap_flags, kernel_flags, 0);
1193 if (rv == MAP_FAILED) {
1194 return errno;
1195 } else {
1196 assert(0 == munmap_helper(rv, size));
1197 return 0;
1198 }
1199 }
1200
1201 static kern_return_t
help_call_mmap__dst_size_fileoff(MAP_T map __unused,int flags,mach_vm_address_t dst,mach_vm_size_t size,mach_vm_address_t fileoff)1202 help_call_mmap__dst_size_fileoff(MAP_T map __unused, int flags, mach_vm_address_t dst, mach_vm_size_t size, mach_vm_address_t fileoff)
1203 {
1204 int fd = -1;
1205 if (!(flags & MAP_ANON)) {
1206 fd = get_fd();
1207 }
1208 void *rv = mmap_wrapper((void *)dst, (size_t) size, VM_PROT_DEFAULT, flags, fd, (off_t)fileoff);
1209 if (rv == MAP_FAILED) {
1210 return errno;
1211 } else {
1212 assert(0 == munmap_helper(rv, size));
1213 return 0;
1214 }
1215 }
1216
1217 static kern_return_t
help_call_mmap__start_size(MAP_T map __unused,int flags,mach_vm_address_t start,mach_vm_size_t size)1218 help_call_mmap__start_size(MAP_T map __unused, int flags, mach_vm_address_t start, mach_vm_size_t size)
1219 {
1220 int fd = -1;
1221 if (!(flags & MAP_ANON)) {
1222 fd = get_fd();
1223 }
1224 void *rv = mmap_wrapper((void *)start, (size_t) size, VM_PROT_DEFAULT, flags, fd, 0);
1225 if (rv == MAP_FAILED) {
1226 return errno;
1227 } else {
1228 assert(0 == munmap_helper(rv, size));
1229 return 0;
1230 }
1231 }
1232
1233 static kern_return_t
help_call_mmap__offset_size(MAP_T map __unused,int flags,mach_vm_address_t offset,mach_vm_size_t size)1234 help_call_mmap__offset_size(MAP_T map __unused, int flags, mach_vm_address_t offset, mach_vm_size_t size)
1235 {
1236 int fd = -1;
1237 if (!(flags & MAP_ANON)) {
1238 fd = get_fd();
1239 }
1240 void *rv = mmap_wrapper((void *)0, (size_t) size, VM_PROT_DEFAULT, flags, fd, (off_t)offset);
1241 if (rv == MAP_FAILED) {
1242 return errno;
1243 } else {
1244 assert(0 == munmap_helper(rv, size));
1245 return 0;
1246 }
1247 }
1248
1249 #define IMPL_ONE_FROM_HELPER(type, variant, flags, ...) \
1250 static kern_return_t \
1251 __attribute__((used)) \
1252 call_mmap ## __ ## variant ## __ ## type(MAP_T map, mach_vm_address_t start, mach_vm_size_t size DROP_COMMAS(__VA_ARGS__)) { \
1253 return help_call_mmap__ ## type(map, flags, start, size DROP_TYPES(__VA_ARGS__)); \
1254 }
1255
1256 // call functions
1257
1258 #define IMPL_FROM_HELPER(type, ...) \
1259 IMPL_ONE_FROM_HELPER(type, file_private, MAP_FILE | MAP_PRIVATE, ##__VA_ARGS__) \
1260 IMPL_ONE_FROM_HELPER(type, anon_private, MAP_ANON | MAP_PRIVATE, ##__VA_ARGS__) \
1261 IMPL_ONE_FROM_HELPER(type, file_shared, MAP_FILE | MAP_SHARED, ##__VA_ARGS__) \
1262 IMPL_ONE_FROM_HELPER(type, anon_shared, MAP_ANON | MAP_SHARED, ##__VA_ARGS__) \
1263 IMPL_ONE_FROM_HELPER(type, file_private_codesign, MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_CODESIGN, ##__VA_ARGS__) \
1264 IMPL_ONE_FROM_HELPER(type, file_private_media, MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_MEDIA, ##__VA_ARGS__) \
1265 IMPL_ONE_FROM_HELPER(type, nounix03_private, MAP_FILE | MAP_PRIVATE | MAP_NOT_UNIX03, ##__VA_ARGS__) \
1266 IMPL_ONE_FROM_HELPER(type, fixed_private, MAP_FILE | MAP_PRIVATE | MAP_FIXED, ##__VA_ARGS__) \
1267
IMPL_FROM_HELPER(vm_prot,vm_prot_t,prot)1268 IMPL_FROM_HELPER(vm_prot, vm_prot_t, prot)
1269 IMPL_FROM_HELPER(dst_size_fileoff, mach_vm_address_t, fileoff)
1270 IMPL_FROM_HELPER(start_size)
1271 IMPL_FROM_HELPER(offset_size)
1272
1273 IMPL_ONE_FROM_HELPER(kernel_flags, anon_private, MAP_ANON | MAP_PRIVATE, int, kernel_flags)
1274 IMPL_ONE_FROM_HELPER(kernel_flags, anon_shared, MAP_ANON | MAP_SHARED, int, kernel_flags)
1275
1276 static kern_return_t
1277 call_mmap__mmap_flags(MAP_T map __unused, mach_vm_address_t start, mach_vm_size_t size, int mmap_flags)
1278 {
1279 int fd = -1;
1280 if (!(mmap_flags & MAP_ANON)) {
1281 fd = get_fd();
1282 }
1283 void *rv = mmap_wrapper((void *)start, (size_t) size, VM_PROT_DEFAULT, mmap_flags, fd, 0);
1284 if (rv == MAP_FAILED) {
1285 return errno;
1286 } else {
1287 assert(0 == munmap(rv, (size_t) size));
1288 return 0;
1289 }
1290 }
1291
1292 // Mach memory entry ownership
1293
1294 static kern_return_t
call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused,int ledger_tag)1295 call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused, int ledger_tag)
1296 {
1297 mach_port_t mementry = make_a_mem_entry(TEST_ALLOC_SIZE + 1);
1298 kern_return_t kr = mach_memory_entry_ownership(mementry, mach_task_self(), ledger_tag, 0);
1299 (void)mach_port_deallocate(mach_task_self(), mementry);
1300 return kr;
1301 }
1302
1303 static kern_return_t
call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused,int ledger_flag)1304 call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused, int ledger_flag)
1305 {
1306 mach_port_t mementry = make_a_mem_entry(TEST_ALLOC_SIZE + 1);
1307 kern_return_t kr = mach_memory_entry_ownership(mementry, mach_task_self(), VM_LEDGER_TAG_DEFAULT, ledger_flag);
1308 (void)mach_port_deallocate(mach_task_self(), mementry);
1309 return kr;
1310 }
1311
1312
1313 // For deallocators like munmap and vm_deallocate.
1314 // Return a non-zero error code if we should avoid performing this trial.
1315 kern_return_t
short_circuit_deallocator(MAP_T map,start_size_trial_t trial)1316 short_circuit_deallocator(MAP_T map, start_size_trial_t trial)
1317 {
1318 // mach_vm_deallocate(size == 0) is safe
1319 if (trial.size == 0) {
1320 return 0;
1321 }
1322
1323 // Allow deallocation attempts based on a valid allocation
1324 // (assumes the test loop will slide this trial to a valid allocation)
1325 if (!trial.start_is_absolute && trial.size_is_absolute) {
1326 return 0;
1327 }
1328
1329 // Avoid overwriting random live memory.
1330 if (!vm_sanitize_range_overflows_strict_zero(trial.start, trial.size, VM_MAP_PAGE_MASK(map))) {
1331 return IGNORED;
1332 }
1333
1334 // Avoid EXC_GUARD if it is still enabled.
1335 mach_vm_address_t sum;
1336 if (!__builtin_add_overflow(trial.start, trial.size, &sum) &&
1337 trial.start + trial.size != 0 &&
1338 round_up_page(trial.start + trial.size, PAGE_SIZE) == 0) {
1339 // this case provokes EXC_GUARD
1340 if (EXC_GUARD_ENABLED) {
1341 return GUARD;
1342 }
1343 }
1344
1345 // Allow.
1346 return 0;
1347 }
1348
1349 static kern_return_t
call_mach_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1350 call_mach_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1351 {
1352 kern_return_t kr = mach_vm_deallocate(map, start, size);
1353 return kr;
1354 }
1355
1356 #if TEST_OLD_STYLE_MACH
1357 static kern_return_t
call_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1358 call_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1359 {
1360 kern_return_t kr = vm_deallocate(map, (vm_address_t) start, (vm_size_t) size);
1361 return kr;
1362 }
1363 #endif
1364
1365 static kern_return_t
call_mach_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1366 call_mach_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1367 {
1368 mach_vm_address_t saved_start = *start;
1369 kern_return_t kr = mach_vm_allocate(map, start, size, flags);
1370 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
1371 return kr;
1372 }
1373
1374
1375 static kern_return_t
call_mach_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)1376 call_mach_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
1377 {
1378 mach_vm_address_t saved_start = *start;
1379 kern_return_t kr = mach_vm_allocate(map, start, size, VM_FLAGS_FIXED);
1380 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
1381 return kr;
1382 }
1383
1384 static kern_return_t
call_mach_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)1385 call_mach_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
1386 {
1387 mach_vm_address_t saved_start = *start;
1388 kern_return_t kr = mach_vm_allocate(map, start, size, VM_FLAGS_ANYWHERE);
1389 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
1390 return kr;
1391 }
1392
1393 static kern_return_t
call_mach_vm_inherit(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1394 call_mach_vm_inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1395 {
1396 kern_return_t kr = mach_vm_inherit(map, start, size, VM_INHERIT_NONE);
1397 return kr;
1398 }
1399 #if TEST_OLD_STYLE_MACH
1400 static kern_return_t
call_vm_inherit(MAP_T map,vm_address_t start,vm_size_t size)1401 call_vm_inherit(MAP_T map, vm_address_t start, vm_size_t size)
1402 {
1403 kern_return_t kr = vm_inherit(map, start, size, VM_INHERIT_NONE);
1404 return kr;
1405 }
1406 #endif
1407
1408 static int
call_minherit(void * start,size_t size)1409 call_minherit(void *start, size_t size)
1410 {
1411 int err = minherit(start, size, VM_INHERIT_SHARE);
1412 return err ? errno : 0;
1413 }
1414
1415 static kern_return_t
call_mach_vm_inherit__inherit(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t value)1416 call_mach_vm_inherit__inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t value)
1417 {
1418 kern_return_t kr = mach_vm_inherit(map, start, size, value);
1419 return kr;
1420 }
1421
1422 static int
call_minherit__inherit(void * start,size_t size,int value)1423 call_minherit__inherit(void * start, size_t size, int value)
1424 {
1425 int err = minherit(start, size, value);
1426 return err ? errno : 0;
1427 }
1428
1429 static kern_return_t
call_mach_vm_protect__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1430 call_mach_vm_protect__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1431 {
1432 kern_return_t kr = mach_vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
1433 return kr;
1434 }
1435 static kern_return_t
call_mach_vm_protect__vm_prot(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)1436 call_mach_vm_protect__vm_prot(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
1437 {
1438 kern_return_t kr = mach_vm_protect(map, start, size, 0, prot);
1439 return kr;
1440 }
1441 #if TEST_OLD_STYLE_MACH
1442 static kern_return_t
call_vm_protect__start_size(MAP_T map,vm_address_t start,vm_size_t size)1443 call_vm_protect__start_size(MAP_T map, vm_address_t start, vm_size_t size)
1444 {
1445 kern_return_t kr = vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
1446 return kr;
1447 }
1448 static kern_return_t
call_vm_protect__vm_prot(MAP_T map,vm_address_t start,vm_size_t size,vm_prot_t prot)1449 call_vm_protect__vm_prot(MAP_T map, vm_address_t start, vm_size_t size, vm_prot_t prot)
1450 {
1451 kern_return_t kr = vm_protect(map, start, size, 0, prot);
1452 return kr;
1453 }
1454 #endif
1455
1456 extern int __mprotect(void *, size_t, int);
1457
1458 static int
call_mprotect__start_size(void * start,size_t size)1459 call_mprotect__start_size(void *start, size_t size)
1460 {
1461 int err = __mprotect(start, size, PROT_READ | PROT_WRITE);
1462 return err ? errno : 0;
1463 }
1464
1465 static int
call_mprotect__vm_prot(void * start,size_t size,int prot)1466 call_mprotect__vm_prot(void *start, size_t size, int prot)
1467 {
1468 int err = __mprotect(start, size, prot);
1469 return err ? errno : 0;
1470 }
1471
1472 #if TEST_OLD_STYLE_MACH
1473 static kern_return_t
call_vm_behavior_set__start_size__default(MAP_T map,vm_address_t start,vm_size_t size)1474 call_vm_behavior_set__start_size__default(MAP_T map, vm_address_t start, vm_size_t size)
1475 {
1476 kern_return_t kr = vm_behavior_set(map, start, size, VM_BEHAVIOR_DEFAULT);
1477 return kr;
1478 }
1479
1480 static kern_return_t
call_vm_behavior_set__start_size__can_reuse(MAP_T map,vm_address_t start,vm_size_t size)1481 call_vm_behavior_set__start_size__can_reuse(MAP_T map, vm_address_t start, vm_size_t size)
1482 {
1483 kern_return_t kr = vm_behavior_set(map, start, size, VM_BEHAVIOR_CAN_REUSE);
1484 return kr;
1485 }
1486
1487 static kern_return_t
call_vm_behavior_set__vm_behavior(MAP_T map,vm_address_t start,vm_size_t size,vm_behavior_t behavior)1488 call_vm_behavior_set__vm_behavior(MAP_T map, vm_address_t start, vm_size_t size, vm_behavior_t behavior)
1489 {
1490 kern_return_t kr = vm_behavior_set(map, start, size, behavior);
1491 return kr;
1492 }
1493 #endif /* TEST_OLD_STYLE_MACH */
1494
1495 extern int __shared_region_map_and_slide_2_np(uint32_t files_count,
1496 const struct shared_file_np *files,
1497 uint32_t mappings_count,
1498 const struct shared_file_mapping_slide_np *mappings);
1499
1500 static int
maybe_hide_shared_region_map_failure(int ret,uint32_t files_count,const struct shared_file_np * files,uint32_t mappings_count)1501 maybe_hide_shared_region_map_failure(int ret,
1502 uint32_t files_count, const struct shared_file_np *files,
1503 uint32_t mappings_count)
1504 {
1505 // Special case for __shared_region_map_and_slide_2_np().
1506 // When SIP is enabled this case gets EPERM instead of EINVAL due to
1507 // vm_shared_region_map_file returning KERN_PROTECTION_FAILURE instead of
1508 // KERN_INVALID_ARGUMENT.
1509 if (ret == EPERM && files_count == 1 && mappings_count == 1 &&
1510 files->sf_fd == get_fd() && files->sf_mappings_count == 1 &&
1511 unsigned_code_is_disallowed()) {
1512 return ACCEPTABLE;
1513 }
1514 return ret;
1515 }
1516
1517 static int
call_shared_region_map_and_slide_2_np_child(uint32_t files_count,const struct shared_file_np * files,uint32_t mappings_count,const struct shared_file_mapping_slide_np * mappings)1518 call_shared_region_map_and_slide_2_np_child(uint32_t files_count, const struct shared_file_np *files,
1519 uint32_t mappings_count, const struct shared_file_mapping_slide_np *mappings)
1520 {
1521 int err = __shared_region_map_and_slide_2_np(files_count, files, mappings_count, mappings);
1522 return err ? maybe_hide_shared_region_map_failure(errno, files_count, files, mappings_count) : 0;
1523 }
1524
1525 typedef struct {
1526 uint32_t files_count;
1527 const struct shared_file_np *files;
1528 uint32_t mappings_count;
1529 const struct shared_file_mapping_slide_np *mappings;
1530 } map_n_slice_thread_args;
1531
1532 void*
thread_func(void * args)1533 thread_func(void* args)
1534 {
1535 map_n_slice_thread_args *thread_args = (map_n_slice_thread_args *)args;
1536 uint32_t files_count = thread_args->files_count;
1537 const struct shared_file_np *files = thread_args->files;
1538 uint32_t mappings_count = thread_args->mappings_count;
1539 const struct shared_file_mapping_slide_np *mappings = thread_args->mappings;
1540
1541 int err = call_shared_region_map_and_slide_2_np_child(files_count, files, mappings_count, mappings);
1542
1543 int *result = malloc(sizeof(int));
1544 assert(result != NULL);
1545 *result = err;
1546 return result;
1547 }
1548
1549 static int
call_shared_region_map_and_slide_2_np_in_thread(uint32_t files_count,const struct shared_file_np * files,uint32_t mappings_count,const struct shared_file_mapping_slide_np * mappings)1550 call_shared_region_map_and_slide_2_np_in_thread(uint32_t files_count, const struct shared_file_np *files,
1551 uint32_t mappings_count, const struct shared_file_mapping_slide_np *mappings)
1552 {
1553 // From vm/vm_shared_region.c: After a chroot(), the calling process keeps using its original shared region [...]
1554 // But its children will use a different shared region [...]
1555 if (chroot(".") < 0) {
1556 return BUSTED;
1557 }
1558
1559 map_n_slice_thread_args args = {files_count, files, mappings_count, mappings};
1560 pthread_t thread;
1561 if (pthread_create(&thread, NULL, thread_func, (void *)&args) < 0) {
1562 return -91;
1563 }
1564
1565 int *err;
1566 if (pthread_join(thread, (void**)&err) < 0) {
1567 return BUSTED;
1568 }
1569
1570 if (chroot("/") < 0) {
1571 return BUSTED;
1572 }
1573
1574 return *err;
1575 }
1576
1577 static int
call_madvise__start_size(void * start,size_t size)1578 call_madvise__start_size(void *start, size_t size)
1579 {
1580 int err = madvise(start, size, MADV_NORMAL);
1581 return err ? errno : 0;
1582 }
1583
1584 static int
call_madvise__vm_advise(void * start,size_t size,int advise)1585 call_madvise__vm_advise(void *start, size_t size, int advise)
1586 {
1587 int err = madvise(start, size, advise);
1588 return err ? errno : 0;
1589 }
1590
1591 static int
call_mach_vm_msync__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1592 call_mach_vm_msync__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1593 {
1594 kern_return_t kr = mach_vm_msync(map, start, size, VM_SYNC_ASYNCHRONOUS);
1595 return kr;
1596 }
1597
1598 static int
call_mach_vm_msync__vm_sync(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_sync_t sync)1599 call_mach_vm_msync__vm_sync(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_sync_t sync)
1600 {
1601 kern_return_t kr = mach_vm_msync(map, start, size, sync);
1602 return kr;
1603 }
1604
1605 #if TEST_OLD_STYLE_MACH
1606 static int
call_vm_msync__start_size(MAP_T map,vm_address_t start,vm_size_t size)1607 call_vm_msync__start_size(MAP_T map, vm_address_t start, vm_size_t size)
1608 {
1609 kern_return_t kr = vm_msync(map, start, size, VM_SYNC_ASYNCHRONOUS);
1610 return kr;
1611 }
1612
1613 static int
call_vm_msync__vm_sync(MAP_T map,vm_address_t start,vm_size_t size,vm_sync_t sync)1614 call_vm_msync__vm_sync(MAP_T map, vm_address_t start, vm_size_t size, vm_sync_t sync)
1615 {
1616 kern_return_t kr = vm_msync(map, start, size, sync);
1617 return kr;
1618 }
1619 #endif /* TEST_OLD_STYLE_MACH */
1620
1621 // msync has a libsyscall wrapper that does alignment. We want the raw syscall.
1622 int __msync(void *, size_t, int);
1623
1624 static int
call_msync__start_size(void * start,size_t size)1625 call_msync__start_size(void *start, size_t size)
1626 {
1627 int err = __msync(start, size, MS_SYNC);
1628 return err ? errno : 0;
1629 }
1630
1631 static int
call_msync__vm_msync(void * start,size_t size,int msync_value)1632 call_msync__vm_msync(void *start, size_t size, int msync_value)
1633 {
1634 int err = __msync(start, size, msync_value);
1635 return err ? errno : 0;
1636 }
1637
1638 // msync nocancel isn't declared, but we want to directly hit the syscall
1639 int __msync_nocancel(void *, size_t, int);
1640
1641 static int
call_msync_nocancel__start_size(void * start,size_t size)1642 call_msync_nocancel__start_size(void *start, size_t size)
1643 {
1644 int err = __msync_nocancel(start, size, MS_SYNC);
1645 return err ? errno : 0;
1646 }
1647
1648 static int
call_msync_nocancel__vm_msync(void * start,size_t size,int msync_value)1649 call_msync_nocancel__vm_msync(void *start, size_t size, int msync_value)
1650 {
1651 int err = __msync_nocancel(start, size, msync_value);
1652 return err ? errno : 0;
1653 }
1654
1655 static void
check_mach_vm_machine_attribute_outparam_changes(kern_return_t * kr,vm_machine_attribute_val_t value,vm_machine_attribute_val_t saved_value)1656 check_mach_vm_machine_attribute_outparam_changes(kern_return_t * kr, vm_machine_attribute_val_t value, vm_machine_attribute_val_t saved_value)
1657 {
1658 if (value != saved_value) {
1659 *kr = OUT_PARAM_BAD;
1660 }
1661 }
1662
1663 static int
call_mach_vm_machine_attribute__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1664 call_mach_vm_machine_attribute__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1665 {
1666 vm_machine_attribute_val_t value = MATTR_VAL_GET;
1667 vm_machine_attribute_val_t initial_value = value;
1668 kern_return_t kr = mach_vm_machine_attribute(map, start, size, MATTR_CACHE, &value);
1669 check_mach_vm_machine_attribute_outparam_changes(&kr, value, initial_value);
1670 return kr;
1671 }
1672
1673
1674 static int
call_mach_vm_machine_attribute__machine_attribute(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_machine_attribute_t attr)1675 call_mach_vm_machine_attribute__machine_attribute(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_machine_attribute_t attr)
1676 {
1677 vm_machine_attribute_val_t value = MATTR_VAL_GET;
1678 vm_machine_attribute_val_t initial_value = value;
1679 kern_return_t kr = mach_vm_machine_attribute(map, start, size, attr, &value);
1680 check_mach_vm_machine_attribute_outparam_changes(&kr, value, initial_value);
1681 return kr;
1682 }
1683
1684 #if TEST_OLD_STYLE_MACH
1685 static int
call_vm_machine_attribute__start_size(MAP_T map,vm_address_t start,vm_size_t size)1686 call_vm_machine_attribute__start_size(MAP_T map, vm_address_t start, vm_size_t size)
1687 {
1688 vm_machine_attribute_val_t value = MATTR_VAL_GET;
1689 vm_machine_attribute_val_t initial_value = value;
1690 kern_return_t kr = vm_machine_attribute(map, start, size, MATTR_CACHE, &value);
1691 check_mach_vm_machine_attribute_outparam_changes(&kr, value, initial_value);
1692 return kr;
1693 }
1694
1695 static int
call_vm_machine_attribute__machine_attribute(MAP_T map,vm_address_t start,vm_size_t size,vm_machine_attribute_t attr)1696 call_vm_machine_attribute__machine_attribute(MAP_T map, vm_address_t start, vm_size_t size, vm_machine_attribute_t attr)
1697 {
1698 vm_machine_attribute_val_t value = MATTR_VAL_GET;
1699 vm_machine_attribute_val_t initial_value = value;
1700 kern_return_t kr = vm_machine_attribute(map, start, size, attr, &value);
1701 check_mach_vm_machine_attribute_outparam_changes(&kr, value, initial_value);
1702 return kr;
1703 }
1704 #endif /* TEST_OLD_STYLE_MACH */
1705
1706 static int
call_mach_vm_purgable_control__address__get(MAP_T map,mach_vm_address_t addr)1707 call_mach_vm_purgable_control__address__get(MAP_T map, mach_vm_address_t addr)
1708 {
1709 int state = INVALID_PURGABLE_STATE;
1710 int initial_state = state;
1711 kern_return_t kr = mach_vm_purgable_control(map, addr, VM_PURGABLE_GET_STATE, &state);
1712 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_GET_STATE);
1713 return kr;
1714 }
1715
1716
1717 static int
call_mach_vm_purgable_control__address__purge_all(MAP_T map,mach_vm_address_t addr)1718 call_mach_vm_purgable_control__address__purge_all(MAP_T map, mach_vm_address_t addr)
1719 {
1720 int state = INVALID_PURGABLE_STATE;
1721 int initial_state = state;
1722 kern_return_t kr = mach_vm_purgable_control(map, addr, VM_PURGABLE_PURGE_ALL, &state);
1723 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_PURGE_ALL);
1724 return kr;
1725 }
1726
1727 static int
call_mach_vm_purgable_control__purgeable_state(MAP_T map,mach_vm_address_t addr,vm_purgable_t control,int state)1728 call_mach_vm_purgable_control__purgeable_state(MAP_T map, mach_vm_address_t addr, vm_purgable_t control, int state)
1729 {
1730 int initial_state = state;
1731 kern_return_t kr = mach_vm_purgable_control(map, addr, control, &state);
1732 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, control);
1733 return kr;
1734 }
1735
1736 #if TEST_OLD_STYLE_MACH
1737 static int
call_vm_purgable_control__address__get(MAP_T map,vm_address_t addr)1738 call_vm_purgable_control__address__get(MAP_T map, vm_address_t addr)
1739 {
1740 int state = INVALID_PURGABLE_STATE;
1741 int initial_state = state;
1742 kern_return_t kr = vm_purgable_control(map, addr, VM_PURGABLE_GET_STATE, &state);
1743 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_GET_STATE);
1744 return kr;
1745 }
1746
1747 static int
call_vm_purgable_control__address__purge_all(MAP_T map,vm_address_t addr)1748 call_vm_purgable_control__address__purge_all(MAP_T map, vm_address_t addr)
1749 {
1750 int state = INVALID_PURGABLE_STATE;
1751 int initial_state = state;
1752 kern_return_t kr = vm_purgable_control(map, addr, VM_PURGABLE_PURGE_ALL, &state);
1753 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_PURGE_ALL);
1754 return kr;
1755 }
1756
1757 static int
call_vm_purgable_control__purgeable_state(MAP_T map,vm_address_t addr,vm_purgable_t control,int state)1758 call_vm_purgable_control__purgeable_state(MAP_T map, vm_address_t addr, vm_purgable_t control, int state)
1759 {
1760 int initial_state = state;
1761 kern_return_t kr = vm_purgable_control(map, addr, control, &state);
1762 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, control);
1763 return kr;
1764 }
1765 #endif /* TEST_OLD_STYLE_MACH */
1766
1767 static void
check_mach_vm_region_recurse_outparam_changes(kern_return_t * kr,void * info,void * saved_info,size_t info_size,natural_t depth,natural_t saved_depth,mach_vm_address_t addr,mach_vm_address_t saved_addr,mach_vm_size_t size,mach_vm_size_t saved_size)1768 check_mach_vm_region_recurse_outparam_changes(kern_return_t * kr, void * info, void * saved_info, size_t info_size,
1769 natural_t depth, natural_t saved_depth, mach_vm_address_t addr, mach_vm_address_t saved_addr,
1770 mach_vm_size_t size, mach_vm_size_t saved_size)
1771 {
1772 if (*kr == KERN_SUCCESS) {
1773 if (depth == saved_depth) {
1774 *kr = OUT_PARAM_BAD;
1775 }
1776 if (size == saved_size) {
1777 *kr = OUT_PARAM_BAD;
1778 }
1779 if (memcmp(info, saved_info, info_size) == 0) {
1780 *kr = OUT_PARAM_BAD;
1781 }
1782 } else {
1783 if (depth != saved_depth || addr != saved_addr || size != saved_size || memcmp(info, saved_info, info_size) != 0) {
1784 *kr = OUT_PARAM_BAD;
1785 }
1786 }
1787 }
1788
1789 static kern_return_t
call_mach_vm_region_recurse(MAP_T map,mach_vm_address_t addr)1790 call_mach_vm_region_recurse(MAP_T map, mach_vm_address_t addr)
1791 {
1792 vm_region_submap_info_data_64_t info;
1793 info.inheritance = INVALID_INHERIT;
1794 vm_region_submap_info_data_64_t saved_info = info;
1795 mach_vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
1796 mach_vm_size_t saved_size = size_out;
1797 natural_t depth = 10;
1798 natural_t saved_depth = depth;
1799 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
1800 mach_vm_address_t addr_cpy = addr;
1801
1802 kern_return_t kr = mach_vm_region_recurse(map,
1803 &addr_cpy,
1804 &size_out,
1805 &depth,
1806 (vm_region_recurse_info_t)&info,
1807 &count);
1808 check_mach_vm_region_recurse_outparam_changes(&kr, &info, &saved_info, sizeof(info), depth, saved_depth,
1809 addr, addr_cpy, size_out, saved_size);
1810
1811 return kr;
1812 }
1813
1814 #if TEST_OLD_STYLE_MACH
1815 static kern_return_t
call_vm_region_recurse(MAP_T map,vm_address_t addr)1816 call_vm_region_recurse(MAP_T map, vm_address_t addr)
1817 {
1818 vm_region_submap_info_data_t info;
1819 info.inheritance = INVALID_INHERIT;
1820 vm_region_submap_info_data_t saved_info = info;
1821
1822 vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
1823 vm_size_t saved_size = size_out;
1824
1825 natural_t depth = 10;
1826 natural_t saved_depth = depth;
1827
1828 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT;
1829 vm_address_t addr_cpy = addr;
1830
1831 kern_return_t kr = vm_region_recurse(map,
1832 &addr_cpy,
1833 &size_out,
1834 &depth,
1835 (vm_region_recurse_info_t)&info,
1836 &count);
1837
1838 check_mach_vm_region_recurse_outparam_changes(&kr, &info, &saved_info, sizeof(info), depth, saved_depth,
1839 addr_cpy, addr, size_out, saved_size);
1840
1841 return kr;
1842 }
1843
1844 static kern_return_t
call_vm_region_recurse_64(MAP_T map,vm_address_t addr)1845 call_vm_region_recurse_64(MAP_T map, vm_address_t addr)
1846 {
1847 vm_region_submap_info_data_64_t info;
1848 info.inheritance = INVALID_INHERIT;
1849 vm_region_submap_info_data_64_t saved_info = info;
1850
1851 vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
1852 vm_size_t saved_size = size_out;
1853
1854 natural_t depth = 10;
1855 natural_t saved_depth = depth;
1856
1857 mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
1858 vm_address_t addr_cpy = addr;
1859
1860 kern_return_t kr = vm_region_recurse_64(map,
1861 &addr_cpy,
1862 &size_out,
1863 &depth,
1864 (vm_region_recurse_info_t)&info,
1865 &count);
1866
1867 check_mach_vm_region_recurse_outparam_changes(&kr, &info, &saved_info, sizeof(info), depth, saved_depth,
1868 addr_cpy, addr, size_out, saved_size);
1869
1870 return kr;
1871 }
1872 #endif /* TEST_OLD_STYLE_MACH */
1873
1874 static kern_return_t
call_mach_vm_page_info(MAP_T map,mach_vm_address_t addr)1875 call_mach_vm_page_info(MAP_T map, mach_vm_address_t addr)
1876 {
1877 vm_page_info_flavor_t flavor = VM_PAGE_INFO_BASIC;
1878 mach_msg_type_number_t count = VM_PAGE_INFO_BASIC_COUNT;
1879 mach_msg_type_number_t saved_count = count;
1880 vm_page_info_basic_data_t info = {0};
1881 info.depth = -1;
1882 vm_page_info_basic_data_t saved_info = info;
1883
1884 kern_return_t kr = mach_vm_page_info(map, addr, flavor, (vm_page_info_t)&info, &count);
1885 check_mach_vm_page_info_outparam_changes(&kr, info, saved_info, count, saved_count);
1886 return kr;
1887 }
1888
1889 static void
check_mach_vm_page_query_outparam_changes(kern_return_t * kr,int disposition,int saved_disposition,int ref_count)1890 check_mach_vm_page_query_outparam_changes(kern_return_t * kr, int disposition, int saved_disposition, int ref_count)
1891 {
1892 if (*kr == KERN_SUCCESS) {
1893 /*
1894 * There should be no outside references to the memory created for this test
1895 */
1896 if (ref_count != 0) {
1897 *kr = OUT_PARAM_BAD;
1898 }
1899 if (disposition == saved_disposition) {
1900 *kr = OUT_PARAM_BAD;
1901 }
1902 }
1903 }
1904
1905 static kern_return_t
call_mach_vm_page_query(MAP_T map,mach_vm_address_t addr)1906 call_mach_vm_page_query(MAP_T map, mach_vm_address_t addr)
1907 {
1908 int disp = INVALID_DISPOSITION_VALUE, ref = 0;
1909 int saved_disposition = disp;
1910 kern_return_t kr = mach_vm_page_query(map, addr, &disp, &ref);
1911 check_mach_vm_page_query_outparam_changes(&kr, disp, saved_disposition, ref);
1912 return kr;
1913 }
1914
1915 #if TEST_OLD_STYLE_MACH
1916 static kern_return_t
call_vm_map_page_query(MAP_T map,vm_address_t addr)1917 call_vm_map_page_query(MAP_T map, vm_address_t addr)
1918 {
1919 int disp = INVALID_DISPOSITION_VALUE, ref = 0;
1920 int saved_disposition = disp;
1921 kern_return_t kr = vm_map_page_query(map, addr, &disp, &ref);
1922 check_mach_vm_page_query_outparam_changes(&kr, disp, saved_disposition, ref);
1923 return kr;
1924 }
1925 #endif /* TEST_OLD_STYLE_MACH */
1926
1927 static void
check_mach_vm_page_range_query_outparam_changes(kern_return_t * kr,mach_vm_size_t out_count,mach_vm_size_t in_count)1928 check_mach_vm_page_range_query_outparam_changes(kern_return_t * kr, mach_vm_size_t out_count, mach_vm_size_t in_count)
1929 {
1930 if (out_count != in_count) {
1931 *kr = OUT_PARAM_BAD;
1932 }
1933 }
1934
1935 static kern_return_t
call_mach_vm_page_range_query(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1936 call_mach_vm_page_range_query(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1937 {
1938 // mach_vm_page_range_query writes one int per page output
1939 // and can accept any address range as input
1940 // We can't provide that much storage for very large lengths.
1941 // Instead we provide a limited output buffer,
1942 // write-protect the page after it, and "succeed" if the kernel
1943 // fills the buffer and then returns EFAULT.
1944
1945 // enough space for MAX_PAGE_RANGE_QUERY with 4KB pages, twice
1946 mach_vm_size_t prq_buf_size = 2 * 262144 * sizeof(int);
1947 mach_vm_address_t prq_buf = 0;
1948 kern_return_t kr = mach_vm_allocate(map, &prq_buf,
1949 prq_buf_size + KB16, VM_FLAGS_ANYWHERE);
1950 assert(kr == 0);
1951
1952 // protect the guard page
1953 mach_vm_address_t prq_guard = prq_buf + prq_buf_size;
1954 kr = mach_vm_protect(map, prq_guard, KB16, 0, VM_PROT_NONE);
1955 assert(kr == 0);
1956
1957 // pre-fill the output buffer with an invalid value
1958 memset((char *)prq_buf, 0xff, prq_buf_size);
1959
1960 mach_vm_size_t in_count = size / KB16 + (size % KB16 ? 1 : 0);
1961 mach_vm_size_t out_count = in_count;
1962 kr = mach_vm_page_range_query(map, start, size, prq_buf, &out_count);
1963
1964 // yes, EFAULT as a kern_return_t because mach_vm_page_range_query returns copyio's error
1965 if (kr == EFAULT) {
1966 bool bad = false;
1967 for (unsigned i = 0; i < prq_buf_size / sizeof(uint32_t); i++) {
1968 if (((uint32_t *)prq_buf)[i] == 0xffffffff) {
1969 // kernel didn't fill the entire writeable buffer, that's bad
1970 bad = true;
1971 break;
1972 }
1973 }
1974 if (!bad) {
1975 // kernel filled our buffer and then hit our fault page
1976 // we'll allow it
1977 kr = 0;
1978 }
1979 }
1980
1981 check_mach_vm_page_range_query_outparam_changes(&kr, out_count, in_count);
1982 (void)mach_vm_deallocate(map, prq_buf, prq_buf_size + KB16);
1983
1984 return kr;
1985 }
1986
1987 static int
call_mincore(void * start,size_t size)1988 call_mincore(void *start, size_t size)
1989 {
1990 // mincore writes one byte per page output
1991 // and can accept any address range as input
1992 // We can't provide that much storage for very large lengths.
1993 // Instead we provide a limited output buffer,
1994 // write-protect the page after it, and "succeed" if the kernel
1995 // fills the buffer and then returns EFAULT.
1996
1997 // enough space for MAX_PAGE_RANGE_QUERY with 4KB pages, twice
1998 size_t mincore_buf_size = 2 * 262144;
1999 char *mincore_buf = 0;
2000 mincore_buf = mmap(NULL, mincore_buf_size + KB16, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
2001 assert(mincore_buf != MAP_FAILED);
2002
2003 // protect the guard page
2004 char *mincore_guard = mincore_buf + mincore_buf_size;
2005 int err = mprotect(mincore_guard, KB16, PROT_NONE);
2006 assert(err == 0);
2007
2008 // pre-fill the output buffer with an invalid value
2009 memset(mincore_buf, 0xff, mincore_buf_size);
2010
2011 int ret;
2012 err = mincore(start, size, mincore_buf);
2013 if (err == 0) {
2014 ret = 0;
2015 } else if (errno != EFAULT) {
2016 ret = errno;
2017 } else {
2018 // EFAULT - check if kernel hit our guard page
2019 bool bad = false;
2020 for (unsigned i = 0; i < mincore_buf_size; i++) {
2021 if (mincore_buf[i] == (char)0xff) {
2022 // kernel didn't fill the entire writeable buffer, that's bad
2023 bad = true;
2024 break;
2025 }
2026 }
2027 if (!bad) {
2028 // kernel filled our buffer and then hit our guard page
2029 // we'll allow it
2030 ret = 0;
2031 } else {
2032 ret = errno;
2033 }
2034 }
2035
2036 (void)munmap(mincore_buf, mincore_buf_size + PAGE_SIZE);
2037
2038 return ret;
2039 }
2040
2041 // TODO: re-enable deferred reclaim tests (rdar://136157720)
2042 #if 0
2043 typedef kern_return_t (*fn_mach_vm_deferred_reclamation_buffer_init)(task_t task, mach_vm_address_t address, mach_vm_size_t size);
2044
2045 static results_t *
2046 test_mach_vm_deferred_reclamation_buffer_init(fn_mach_vm_deferred_reclamation_buffer_init func,
2047 const char * testname)
2048 {
2049 int ret = 0;
2050 // Set vm.reclaim_max_threshold to non-zero
2051 int orig_reclaim_max_threshold = 0;
2052 int new_reclaim_max_threshold = 1;
2053 size_t size = sizeof(orig_reclaim_max_threshold);
2054 int sysctl_res = sysctlbyname("vm.reclaim_max_threshold", &orig_reclaim_max_threshold, &size, NULL, 0);
2055 assert(sysctl_res == 0);
2056 sysctl_res = sysctlbyname("vm.reclaim_max_threshold", NULL, 0, &new_reclaim_max_threshold, size);
2057 assert(sysctl_res == 0);
2058
2059 reclamation_buffer_init_trials_t *trials SMART_RECLAMATION_BUFFER_INIT_TRIALS();
2060 results_t *results = alloc_results(testname, eSMART_RECLAMATION_BUFFER_INIT_TRIALS, trials->count);
2061
2062 // reserve last trial to run without modified sysctl
2063 for (unsigned i = 0; i < trials->count - 1; i++) {
2064 reclamation_buffer_init_trial_t trial = trials->list[i];
2065 ret = func(trial.task, trial.address, trial.size);
2066 append_result(results, ret, trial.name);
2067 }
2068
2069 // run with vm.reclaim_max_threshold = 0 and exercise KERN_NOT_SUPPORTED path
2070 new_reclaim_max_threshold = 0;
2071 reclamation_buffer_init_trial_t last_trial = trials->list[trials->count - 1];
2072
2073 sysctl_res = sysctlbyname("vm.reclaim_max_threshold", NULL, 0, &new_reclaim_max_threshold, size);
2074 assert(sysctl_res == 0);
2075
2076 ret = func(last_trial.task, last_trial.address, last_trial.size);
2077 if (__improbable(ret == KERN_INVALID_ARGUMENT)) {
2078 // Unlikely case when args are rejected before sysctl check.
2079 // When this happens during test run, return acceptable, but if this happens
2080 // during golden file generation, record the expected value.
2081 ret = generate_golden ? KERN_NOT_SUPPORTED : ACCEPTABLE;
2082 }
2083 append_result(results, ret, last_trial.name);
2084
2085 // Revert vm.reclaim_max_threshold to how we found it
2086 sysctl_res = sysctlbyname("vm.reclaim_max_threshold", NULL, 0, &orig_reclaim_max_threshold, size);
2087 assert(sysctl_res == 0);
2088
2089 return results;
2090 }
2091 #endif // 0
2092
2093 static vm_map_kernel_flags_trials_t *
generate_mmap_kernel_flags_trials()2094 generate_mmap_kernel_flags_trials()
2095 {
2096 // mmap rejects both ANYWHERE and FIXED | OVERWRITE
2097 // so don't set any prefix flags.
2098 return generate_prefixed_vm_map_kernel_flags_trials(0, "");
2099 }
2100
2101
2102 #define SMART_MMAP_KERNEL_FLAGS_TRIALS() \
2103 __attribute__((cleanup(cleanup_vm_map_kernel_flags_trials))) \
2104 = generate_mmap_kernel_flags_trials()
2105
2106 static results_t *
test_mmap_with_allocated_vm_map_kernel_flags_t(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,int flags),const char * testname)2107 test_mmap_with_allocated_vm_map_kernel_flags_t(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, int flags), const char * testname)
2108 {
2109 MAP_T map SMART_MAP;
2110
2111 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2112 vm_map_kernel_flags_trials_t * trials SMART_MMAP_KERNEL_FLAGS_TRIALS();
2113 results_t *results = alloc_results(testname, eSMART_MMAP_KERNEL_FLAGS_TRIALS, trials->count);
2114
2115 for (unsigned i = 0; i < trials->count; i++) {
2116 kern_return_t ret = func(map, base.addr, base.size, trials->list[i].flags);
2117 append_result(results, ret, trials->list[i].name);
2118 }
2119 return results;
2120 }
2121
2122 // Test a Unix function.
2123 // Run each trial with an allocated vm region and a vm_inherit_t
2124 typedef int (*unix_with_inherit_fn)(void *start, size_t size, int inherit);
2125
2126 static results_t *
test_unix_with_allocated_vm_inherit_t(unix_with_inherit_fn fn,const char * testname)2127 test_unix_with_allocated_vm_inherit_t(unix_with_inherit_fn fn, const char * testname)
2128 {
2129 MAP_T map SMART_MAP;
2130 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2131 vm_inherit_trials_t *trials SMART_VM_INHERIT_TRIALS();
2132 results_t *results = alloc_results(testname, eSMART_VM_INHERIT_TRIALS, trials->count);
2133
2134 for (unsigned i = 0; i < trials->count; i++) {
2135 vm_inherit_trial_t trial = trials->list[i];
2136 int ret = fn((void*)(uintptr_t)base.addr, (size_t)base.size, (int)trial.value);
2137 append_result(results, ret, trial.name);
2138 }
2139 return results;
2140 }
2141
2142 // Test a Unix function.
2143 // Run each trial with an allocated vm region and a vm_msync_t
2144 typedef int (*unix_with_msync_fn)(void *start, size_t size, int msync_value);
2145
2146 static results_t *
test_unix_with_allocated_vm_msync_t(unix_with_msync_fn fn,const char * testname)2147 test_unix_with_allocated_vm_msync_t(unix_with_msync_fn fn, const char * testname)
2148 {
2149 MAP_T map SMART_MAP;
2150 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2151 vm_msync_trials_t *trials SMART_VM_MSYNC_TRIALS();
2152 results_t *results = alloc_results(testname, eSMART_VM_MSYNC_TRIALS, trials->count);
2153
2154 for (unsigned i = 0; i < trials->count; i++) {
2155 vm_msync_trial_t trial = trials->list[i];
2156 int ret = fn((void*)(uintptr_t)base.addr, (size_t)base.size, (int)trial.value);
2157 append_result(results, ret, trial.name);
2158 }
2159 return results;
2160 }
2161
2162 // Test a Unix function.
2163 // Run each trial with an allocated vm region and an advise
2164 typedef int (*unix_with_advise_fn)(void *start, size_t size, int advise);
2165
2166 static results_t *
test_unix_with_allocated_aligned_vm_advise_t(unix_with_advise_fn fn,mach_vm_size_t align_mask,const char * testname)2167 test_unix_with_allocated_aligned_vm_advise_t(unix_with_advise_fn fn, mach_vm_size_t align_mask, const char * testname)
2168 {
2169 MAP_T map SMART_MAP;
2170 allocation_t base SMART_ALLOCATE_ALIGNED_VM(map, TEST_ALLOC_SIZE, align_mask, VM_PROT_DEFAULT);
2171 vm_advise_trials_t *trials SMART_VM_ADVISE_TRIALS();
2172 results_t *results = alloc_results(testname, eSMART_VM_ADVISE_TRIALS, trials->count);
2173
2174 for (unsigned i = 0; i < trials->count; i++) {
2175 vm_advise_trial_t trial = trials->list[i];
2176 int ret = fn((void*)(uintptr_t)base.addr, (size_t)base.size, (int)trial.value);
2177 append_result(results, ret, trial.name);
2178 }
2179 return results;
2180 }
2181
2182 // Rosetta userspace intercepts shared_region_map_and_slide_2_np calls and this Rosetta wrapper
2183 // function doesn't have the necessary checks to support invalid input arguments. Skip these trials
2184 // intead of crashing the test.
2185 static bool
shared_region_map_and_slide_would_crash(shared_region_map_and_slide_2_trial_t * trial)2186 shared_region_map_and_slide_would_crash(shared_region_map_and_slide_2_trial_t *trial)
2187 {
2188 uint32_t files_count = trial->files_count;
2189 struct shared_file_np *files = trial->files;
2190 uint32_t mappings_count = trial->mappings_count;
2191 struct shared_file_mapping_slide_np *mappings = trial->mappings;
2192
2193 if (files_count == 0 || files_count == 1 || files_count > _SR_FILE_MAPPINGS_MAX_FILES) {
2194 return true;
2195 }
2196 if (mappings_count == 0 || mappings_count > SFM_MAX) {
2197 return true;
2198 }
2199 if (!files) {
2200 return true;
2201 }
2202 if (!mappings) {
2203 return true;
2204 }
2205 if (mappings_count != (((files_count - 1) * kNumSharedCacheMappings) + 1) &&
2206 mappings_count != (files_count * kNumSharedCacheMappings)) {
2207 return true;
2208 }
2209 if (files_count >= kMaxSubcaches) {
2210 return true;
2211 }
2212 return false;
2213 }
2214
2215 typedef int (*unix_shared_region_map_and_slide_2_np)(uint32_t files_coun, const struct shared_file_np *files, uint32_t mappings_count, const struct shared_file_mapping_slide_np *mappings);
2216
2217 static results_t *
test_unix_shared_region_map_and_slide_2_np(unix_shared_region_map_and_slide_2_np func,const char * testname)2218 test_unix_shared_region_map_and_slide_2_np(unix_shared_region_map_and_slide_2_np func, const char *testname)
2219 {
2220 uint64_t dyld_fp = (uint64_t)get_dyld_fd();
2221 shared_region_map_and_slide_2_trials_t *trials SMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS(dyld_fp);
2222 results_t *results = alloc_results(testname, eSMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS, dyld_fp, trials->count);
2223
2224 for (unsigned i = 0; i < trials->count; i++) {
2225 int ret;
2226 shared_region_map_and_slide_2_trial_t trial = trials->list[i];
2227 if (isRosetta() && shared_region_map_and_slide_would_crash(&trial)) {
2228 ret = IGNORED;
2229 } else {
2230 ret = func(trial.files_count, trial.files, trial.mappings_count, trial.mappings);
2231 }
2232 append_result(results, ret, trial.name);
2233 }
2234
2235 close_dyld_fd();
2236 return results;
2237 }
2238
2239 static results_t *
test_dst_size_fileoff(kern_return_t (* func)(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size,mach_vm_address_t fileoff),const char * testname)2240 test_dst_size_fileoff(kern_return_t (*func)(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size, mach_vm_address_t fileoff), const char * testname)
2241 {
2242 MAP_T map SMART_MAP;
2243 src_dst_size_trials_t * trials SMART_FILEOFF_DST_SIZE_TRIALS();
2244 results_t *results = alloc_results(testname, eSMART_FILEOFF_DST_SIZE_TRIALS, trials->count);
2245
2246 for (unsigned i = 0; i < trials->count; i++) {
2247 src_dst_size_trial_t trial = trials->list[i];
2248 unallocation_t dst_base SMART_UNALLOCATE_VM(map, TEST_ALLOC_SIZE);
2249 // src a.k.a. mmap fileoff doesn't slide
2250 trial = slide_trial_dst(trial, dst_base.addr);
2251 int ret = func(map, trial.dst, trial.size, trial.src);
2252 append_result(results, ret, trial.name);
2253 }
2254 return results;
2255 }
2256
2257 // Try to allocate a destination for mmap(MAP_FIXED) to overwrite.
2258 // On exit:
2259 // *out_dst *out_size are the allocation, or 0
2260 // *out_panic is true if the trial should stop and record PANIC
2261 // (because the trial specifies an absolute address that is already occupied)
2262 // *out_slide is true if the trial should slide by *out_dst
2263 static __attribute__((overloadable)) void
allocate_for_mmap_fixed(MAP_T map,mach_vm_address_t trial_dst,mach_vm_size_t trial_size,bool trial_dst_is_absolute,bool trial_size_is_absolute,mach_vm_address_t * out_dst,mach_vm_size_t * out_size,bool * out_panic,bool * out_slide)2264 allocate_for_mmap_fixed(MAP_T map, mach_vm_address_t trial_dst, mach_vm_size_t trial_size, bool trial_dst_is_absolute, bool trial_size_is_absolute, mach_vm_address_t *out_dst, mach_vm_size_t *out_size, bool *out_panic, bool *out_slide)
2265 {
2266 *out_panic = false;
2267 *out_slide = false;
2268
2269 if (trial_dst_is_absolute && trial_size_is_absolute) {
2270 // known dst addr, known size
2271 *out_dst = trial_dst;
2272 *out_size = trial_size;
2273 kern_return_t kr = mach_vm_allocate(map, out_dst, *out_size, VM_FLAGS_FIXED);
2274 if (kr == KERN_NO_SPACE) {
2275 // this space is in use, we can't allow mmap to try to overwrite it
2276 *out_panic = true;
2277 *out_dst = 0;
2278 *out_size = 0;
2279 } else if (kr != 0) {
2280 // some other error, assume mmap will also fail
2281 *out_dst = 0;
2282 *out_size = 0;
2283 }
2284 // no slide, trial and allocation are already at the same place
2285 *out_slide = false;
2286 } else {
2287 // other cases either fit in a small allocation or fail
2288 *out_dst = 0;
2289 *out_size = TEST_ALLOC_SIZE;
2290 kern_return_t kr = mach_vm_allocate(map, out_dst, *out_size, VM_FLAGS_ANYWHERE);
2291 if (kr != 0) {
2292 // allocation error, assume mmap will also fail
2293 *out_dst = 0;
2294 *out_size = 0;
2295 }
2296 *out_slide = true;
2297 }
2298 }
2299
2300 static __attribute__((overloadable)) void
allocate_for_mmap_fixed(MAP_T map,start_size_trial_t trial,mach_vm_address_t * out_dst,mach_vm_size_t * out_size,bool * out_panic,bool * out_slide)2301 allocate_for_mmap_fixed(MAP_T map, start_size_trial_t trial, mach_vm_address_t *out_dst, mach_vm_size_t *out_size, bool *out_panic, bool *out_slide)
2302 {
2303 allocate_for_mmap_fixed(map, trial.start, trial.size, trial.start_is_absolute, trial.size_is_absolute,
2304 out_dst, out_size, out_panic, out_slide);
2305 }
2306 static __attribute__((overloadable)) void
allocate_for_mmap_fixed(MAP_T map,src_dst_size_trial_t trial,mach_vm_address_t * out_dst,mach_vm_size_t * out_size,bool * out_panic,bool * out_slide)2307 allocate_for_mmap_fixed(MAP_T map, src_dst_size_trial_t trial, mach_vm_address_t *out_dst, mach_vm_size_t *out_size, bool *out_panic, bool *out_slide)
2308 {
2309 allocate_for_mmap_fixed(map, trial.dst, trial.size, trial.dst_is_absolute, !trial.size_is_dst_relative,
2310 out_dst, out_size, out_panic, out_slide);
2311 }
2312
2313 // Like test_dst_size_fileoff, but specialized for mmap(MAP_FIXED).
2314 // mmap(MAP_FIXED) is destructive, forcibly unmapping anything
2315 // already at that address.
2316 // We must ensure that each trial is either obviously invalid and caught
2317 // by the sanitizers, or is valid and overwrites an allocation we control.
2318 static results_t *
test_fixed_dst_size_fileoff(kern_return_t (* func)(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size,mach_vm_address_t fileoff),const char * testname)2319 test_fixed_dst_size_fileoff(kern_return_t (*func)(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size, mach_vm_address_t fileoff), const char * testname)
2320 {
2321 MAP_T map SMART_MAP;
2322 src_dst_size_trials_t * trials SMART_FILEOFF_DST_SIZE_TRIALS();
2323 results_t *results = alloc_results(testname, eSMART_FILEOFF_DST_SIZE_TRIALS, trials->count);
2324 for (unsigned i = 0; i < trials->count; i++) {
2325 src_dst_size_trial_t trial = trials->list[i];
2326 // Try to create an allocation for mmap to overwrite.
2327 mach_vm_address_t dst_alloc;
2328 mach_vm_size_t dst_size;
2329 bool should_panic;
2330 bool should_slide_trial;
2331 allocate_for_mmap_fixed(map, trial, &dst_alloc, &dst_size, &should_panic, &should_slide_trial);
2332 if (should_panic) {
2333 append_result(results, PANIC, trial.name);
2334 continue;
2335 }
2336 if (should_slide_trial) {
2337 // src a.k.a. mmap fileoff doesn't slide
2338 trial = slide_trial_dst(trial, dst_alloc);
2339 }
2340
2341 kern_return_t ret = func(map, trial.dst, trial.size, trial.src);
2342
2343 if (dst_alloc != 0) {
2344 (void)mach_vm_deallocate(map, dst_alloc, dst_size);
2345 }
2346 append_result(results, ret, trial.name);
2347 }
2348 return results;
2349 }
2350
2351 // Like test_mach_with_allocated_start_size, but specialized for mmap(MAP_FIXED).
2352 // See test_fixed_dst_size_fileoff for more.
2353 static results_t *
test_fixed_dst_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size),const char * testname)2354 test_fixed_dst_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size), const char *testname)
2355 {
2356 MAP_T map SMART_MAP;
2357 start_size_trials_t *trials SMART_START_SIZE_TRIALS(0); // no base addr
2358 results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, 0, trials->count);
2359 for (unsigned i = 0; i < trials->count; i++) {
2360 start_size_trial_t trial = trials->list[i];
2361 // Try to create an allocation for mmap to overwrite.
2362 mach_vm_address_t dst_alloc;
2363 mach_vm_size_t dst_size;
2364 bool should_panic;
2365 bool should_slide_trial;
2366 allocate_for_mmap_fixed(map, trial, &dst_alloc, &dst_size, &should_panic, &should_slide_trial);
2367 if (should_panic) {
2368 append_result(results, PANIC, trial.name);
2369 continue;
2370 }
2371 if (should_slide_trial) {
2372 trial = slide_trial(trial, dst_alloc);
2373 }
2374
2375 kern_return_t ret = func(map, trial.start, trial.size);
2376
2377 if (dst_alloc != 0) {
2378 (void)mach_vm_deallocate(map, dst_alloc, dst_size);
2379 }
2380 append_result(results, ret, trial.name);
2381 }
2382 return results;
2383 }
2384
2385 static results_t *
test_allocated_src_allocated_dst_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,mach_vm_address_t dst),const char * testname)2386 test_allocated_src_allocated_dst_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, mach_vm_address_t dst), const char * testname)
2387 {
2388 /*
2389 * Require src < dst. Some tests may get different error codes if src > dst.
2390 *
2391 * (No actual examples are known today, but see the comment in
2392 * test_allocated_src_unallocated_dst_size for an example in that
2393 * function. Here we are being conservatively careful.)
2394 *
2395 * TODO: test both src < dst and src > dst.
2396 */
2397 MAP_T map SMART_MAP;
2398 allocation_t src_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2399 allocation_t dst_base SMART_ALLOCATE_VM_AFTER(map, src_base.addr, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2400 assert(src_base.addr < dst_base.addr);
2401 src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
2402 results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
2403
2404 for (unsigned i = 0; i < trials->count; i++) {
2405 src_dst_size_trial_t trial = trials->list[i];
2406 trial = slide_trial_src(trial, src_base.addr);
2407 trial = slide_trial_dst(trial, dst_base.addr);
2408 int ret = func(map, trial.src, trial.size, trial.dst);
2409 // func should be fixed-overwrite, nothing new to deallocate
2410 append_result(results, ret, trial.name);
2411 }
2412 return results;
2413 }
2414
2415 static task_exc_guard_behavior_t saved_exc_guard_behavior;
2416
2417 static void
disable_exc_guard()2418 disable_exc_guard()
2419 {
2420 T_SETUPBEGIN;
2421
2422 // Disable EXC_GUARD for the duration of the test.
2423 // We restore it at the end.
2424 kern_return_t kr = task_get_exc_guard_behavior(mach_task_self(), &saved_exc_guard_behavior);
2425 assert(kr == 0);
2426
2427 kr = task_set_exc_guard_behavior(mach_task_self(), TASK_EXC_GUARD_NONE);
2428 if (kr) {
2429 T_LOG("warning, couldn't disable EXC_GUARD; some tests are disabled");
2430 EXC_GUARD_ENABLED = true;
2431 } else {
2432 EXC_GUARD_ENABLED = false;
2433 }
2434
2435 T_SETUPEND;
2436 }
2437
2438 static void
restore_exc_guard()2439 restore_exc_guard()
2440 {
2441 // restore process's EXC_GUARD handling
2442 (void)task_set_exc_guard_behavior(mach_task_self(), saved_exc_guard_behavior);
2443 }
2444
2445 static int
set_disable_vm_sanitize_telemetry_via_sysctl(uint32_t val)2446 set_disable_vm_sanitize_telemetry_via_sysctl(uint32_t val)
2447 {
2448 int ret = sysctlbyname("debug.disable_vm_sanitize_telemetry", NULL, NULL, &val, sizeof(uint32_t));
2449 if (ret != 0) {
2450 printf("sysctl failed with errno %d.\n", errno);
2451 }
2452 return ret;
2453 }
2454
2455 static int
disable_vm_sanitize_telemetry(void)2456 disable_vm_sanitize_telemetry(void)
2457 {
2458 return set_disable_vm_sanitize_telemetry_via_sysctl(1);
2459 }
2460
2461 static int
reenable_vm_sanitize_telemetry(void)2462 reenable_vm_sanitize_telemetry(void)
2463 {
2464 return set_disable_vm_sanitize_telemetry_via_sysctl(0);
2465 }
2466
2467 #define MAX_LINE_LENGTH 100
2468 #define MAX_NUM_TESTS 350
2469 #define TMP_DIR "/tmp/"
2470 #define ASSETS_DIR "../assets/vm_parameter_validation/"
2471 #define DECOMPRESS ASSETS_DIR "decompress.sh"
2472 #define GOLDEN_FILE TMP_DIR "user_golden_image.log"
2473
2474 #define KERN_GOLDEN_FILE TMP_DIR "kern_golden_image.log"
2475
2476 static results_t *golden_list[MAX_NUM_TESTS];
2477 static results_t *kern_list[MAX_NUM_TESTS];
2478 static uint32_t num_tests = 0; // num of tests in golden_list
2479 static uint32_t num_kern_tests = 0; // num of tests in kern_list
2480
2481 #define FILL_TRIALS_NAMES_AND_CONTINUE(results, trials, t_count) { \
2482 for (unsigned i = 0; i < t_count; i++) { \
2483 /* trials names are free'd in dealloc_results() */ \
2484 (results)->list[i].name = kstrdup((trials)->list[i].name); \
2485 } \
2486 }
2487
2488 #define FILL_TRIALS_NAMES(results, trials) { \
2489 unsigned t_count = ((trials)->count < (results)->count) ? (trials)->count : (results)->count; \
2490 if ((trials)->count != (results)->count) { \
2491 T_LOG("%s:%d Trials count mismatch, expected %u, golden file %u\n", \
2492 __func__, __LINE__, (trials)->count, (results)->count); \
2493 }\
2494 FILL_TRIALS_NAMES_AND_CONTINUE((results), (trials), (t_count)) \
2495 break; \
2496 }
2497
2498 static void
fill_golden_trials(uint64_t trialsargs[static TRIALSARGUMENTS_SIZE],results_t * results)2499 fill_golden_trials(uint64_t trialsargs[static TRIALSARGUMENTS_SIZE],
2500 results_t *results)
2501 {
2502 trialsformula_t formula = results->trialsformula;
2503 uint64_t trialsargs0 = trialsargs[0];
2504 uint64_t trialsargs1 = trialsargs[1];
2505 switch (formula) {
2506 case eUNKNOWN_TRIALS:
2507 // Leave them empty
2508 T_FAIL("Golden file with unknown trials, testname: %s\n", results->testname);
2509 break;
2510 case eSMART_VM_MAP_KERNEL_FLAGS_TRIALS: {
2511 vm_map_kernel_flags_trials_t * trials SMART_VM_MAP_KERNEL_FLAGS_TRIALS();
2512 FILL_TRIALS_NAMES(results, trials);
2513 }
2514 case eSMART_VM_INHERIT_TRIALS: {
2515 vm_inherit_trials_t *trials SMART_VM_INHERIT_TRIALS();
2516 FILL_TRIALS_NAMES(results, trials);
2517 }
2518 case eSMART_MMAP_KERNEL_FLAGS_TRIALS: {
2519 vm_map_kernel_flags_trials_t * trials SMART_MMAP_KERNEL_FLAGS_TRIALS();
2520 FILL_TRIALS_NAMES(results, trials);
2521 }
2522 case eSMART_MMAP_FLAGS_TRIALS: {
2523 mmap_flags_trials_t *trials SMART_MMAP_FLAGS_TRIALS();
2524 FILL_TRIALS_NAMES(results, trials);
2525 }
2526 case eSMART_GENERIC_FLAG_TRIALS: {
2527 generic_flag_trials_t *trials SMART_GENERIC_FLAG_TRIALS();
2528 FILL_TRIALS_NAMES(results, trials);
2529 }
2530 case eSMART_VM_TAG_TRIALS: {
2531 // special case, trails (vm_tag_trials_values) depend on data only available on KERNEL
2532 vm_tag_trials_t *trials SMART_VM_TAG_TRIALS();
2533 FILL_TRIALS_NAMES(results, trials);
2534 }
2535 case eSMART_VM_PROT_TRIALS: {
2536 vm_prot_trials_t *trials SMART_VM_PROT_TRIALS();
2537 FILL_TRIALS_NAMES(results, trials);
2538 }
2539 case eSMART_VM_PROT_PAIR_TRIALS: {
2540 vm_prot_pair_trials_t *trials SMART_VM_PROT_PAIR_TRIALS();
2541 FILL_TRIALS_NAMES(results, trials);
2542 }
2543 case eSMART_LEDGER_TAG_TRIALS: {
2544 ledger_tag_trials_t *trials SMART_LEDGER_TAG_TRIALS();
2545 FILL_TRIALS_NAMES(results, trials);
2546 }
2547 case eSMART_LEDGER_FLAG_TRIALS: {
2548 ledger_flag_trials_t *trials SMART_LEDGER_FLAG_TRIALS();
2549 FILL_TRIALS_NAMES(results, trials);
2550 }
2551 case eSMART_ADDR_TRIALS: {
2552 addr_trials_t *trials SMART_ADDR_TRIALS(trialsargs0);
2553 if (trialsargs1) {
2554 // Special case with an additional trial such that obj_size + addr == 0
2555 FILL_TRIALS_NAMES_AND_CONTINUE(results, trials, trials->count);
2556 assert(trials->count + 1 == results->count);
2557 char *trial_desc;
2558 kasprintf(&trial_desc, "addr: -0x%llx", trialsargs1);
2559 results->list[results->count - 1].name = kstrdup(trial_desc);
2560 kfree_str(trial_desc);
2561 break;
2562 } else {
2563 FILL_TRIALS_NAMES(results, trials);
2564 }
2565 }
2566 case eSMART_SIZE_TRIALS: {
2567 size_trials_t *trials SMART_SIZE_TRIALS();
2568 FILL_TRIALS_NAMES(results, trials);
2569 }
2570 case eSMART_START_SIZE_TRIALS: {
2571 // NB: base.addr is not constant between runs but doesn't affect trial name
2572 start_size_trials_t *trials SMART_START_SIZE_TRIALS(trialsargs0);
2573 FILL_TRIALS_NAMES(results, trials);
2574 }
2575 case eSMART_START_SIZE_OFFSET_OBJECT_TRIALS: {
2576 start_size_offset_object_trials_t *trials SMART_START_SIZE_OFFSET_OBJECT_TRIALS();
2577 FILL_TRIALS_NAMES(results, trials);
2578 }
2579 case eSMART_START_SIZE_OFFSET_TRIALS: {
2580 start_size_offset_trials_t *trials SMART_START_SIZE_OFFSET_TRIALS();
2581 FILL_TRIALS_NAMES(results, trials);
2582 }
2583 case eSMART_SIZE_SIZE_TRIALS: {
2584 T_FAIL("SIZE_SIZE_TRIALS not used\n");
2585 break;
2586 }
2587 case eSMART_SRC_DST_SIZE_TRIALS: {
2588 src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
2589 FILL_TRIALS_NAMES(results, trials);
2590 }
2591 case eSMART_FILEOFF_DST_SIZE_TRIALS: {
2592 src_dst_size_trials_t * trials SMART_FILEOFF_DST_SIZE_TRIALS();
2593 FILL_TRIALS_NAMES(results, trials);
2594 }
2595 case eSMART_VM_BEHAVIOR_TRIALS: {
2596 vm_behavior_trials_t *trials SMART_VM_BEHAVIOR_TRIALS();
2597 FILL_TRIALS_NAMES(results, trials);
2598 }
2599 case eSMART_VM_ADVISE_TRIALS: {
2600 vm_advise_trials_t *trials SMART_VM_ADVISE_TRIALS();
2601 FILL_TRIALS_NAMES(results, trials);
2602 }
2603 case eSMART_VM_SYNC_TRIALS: {
2604 vm_sync_trials_t *trials SMART_VM_SYNC_TRIALS();
2605 FILL_TRIALS_NAMES(results, trials);
2606 }
2607 case eSMART_VM_MSYNC_TRIALS: {
2608 vm_msync_trials_t *trials SMART_VM_MSYNC_TRIALS();
2609 FILL_TRIALS_NAMES(results, trials);
2610 }
2611 case eSMART_VM_MACHINE_ATTRIBUTE_TRIALS: {
2612 vm_machine_attribute_trials_t *trials SMART_VM_MACHINE_ATTRIBUTE_TRIALS();
2613 FILL_TRIALS_NAMES(results, trials);
2614 }
2615 case eSMART_VM_PURGEABLE_AND_STATE_TRIALS: {
2616 vm_purgeable_and_state_trials_t *trials SMART_VM_PURGEABLE_AND_STATE_TRIALS();
2617 FILL_TRIALS_NAMES(results, trials);
2618 }
2619 case eSMART_START_SIZE_START_SIZE_TRIALS: {
2620 start_size_start_size_trials_t *trials SMART_START_SIZE_START_SIZE_TRIALS();
2621 FILL_TRIALS_NAMES(results, trials);
2622 }
2623 case eSMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS: {
2624 shared_region_map_and_slide_2_trials_t *trials SMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS(trialsargs0);
2625 FILL_TRIALS_NAMES(results, trials);
2626 }
2627 case eSMART_RECLAMATION_BUFFER_INIT_TRIALS: {
2628 #if 0
2629 reclamation_buffer_init_trials_t * trials SMART_RECLAMATION_BUFFER_INIT_TRIALS();
2630 FILL_TRIALS_NAMES(results, trials);
2631 #else
2632 break;
2633 #endif
2634 }
2635 default:
2636 T_FAIL("New formula %u, args %llu %llu, update fill_golden_trials, testname: %s\n",
2637 formula, trialsargs[0], trialsargs[1], results->testname);
2638 }
2639 }
2640
2641 // Number of test trials with ret == OUT_PARAM_BAD
2642 int out_param_bad_count = 0;
2643
2644 static results_t *
test_name_to_golden_results(const char * testname)2645 test_name_to_golden_results(const char* testname)
2646 {
2647 results_t *golden_results = NULL;
2648 results_t *golden_results_found = NULL;
2649
2650 for (uint32_t x = 0; x < num_tests; x++) {
2651 golden_results = golden_list[x];
2652 if (strncmp(golden_results->testname, testname, strlen(testname)) == 0) {
2653 golden_results->tested_count += 1;
2654 golden_results_found = golden_results;
2655 break;
2656 }
2657 }
2658
2659 return golden_results_found;
2660 }
2661
2662 static void
dump_results_list(results_t * res_list[],uint32_t res_num_tests)2663 dump_results_list(results_t *res_list[], uint32_t res_num_tests)
2664 {
2665 for (uint32_t x = 0; x < res_num_tests; x++) {
2666 results_t *results = res_list[x];
2667 testprintf("\t[%u] %s (%u)\n", x, results->testname, results->count);
2668 }
2669 }
2670
2671 static void
dump_golden_list()2672 dump_golden_list()
2673 {
2674 testprintf("======\n");
2675 testprintf("golden_list %p, num_tests %u\n", golden_list, num_tests);
2676 dump_results_list(golden_list, num_tests);
2677 testprintf("======\n");
2678 }
2679
2680 static void
dump_kernel_results_list()2681 dump_kernel_results_list()
2682 {
2683 testprintf("======\n");
2684 testprintf("kernel_results_list %p, num_tests %u\n", kern_list, num_kern_tests);
2685 dump_results_list(kern_list, num_kern_tests);
2686 testprintf("======\n");
2687 }
2688
2689 // Read results written by dump_golden_results().
2690 static int
populate_golden_results(const char * filename)2691 populate_golden_results(const char *filename)
2692 {
2693 FILE *file;
2694 char line[MAX_LINE_LENGTH];
2695 char trial_formula[20];
2696 results_t *results = NULL;
2697 trialsformula_t formula = eUNKNOWN_TRIALS;
2698 uint64_t trial_args[TRIALSARGUMENTS_SIZE] = {0, 0};
2699 uint32_t num_results = 0;
2700 uint32_t result_number = 0;
2701 int result_ret = 0;
2702 char *test_name = NULL;
2703 char *sub_line = NULL;
2704 char *s_num_results = NULL;
2705 bool in_test = FALSE;
2706 out_param_bad_count = 0;
2707 kern_trialname_generation = strnstr(filename, "kern_golden_image", strlen(filename)) != NULL;
2708
2709 // cd to the directory containing this executable
2710 // Test files are located relative to there.
2711 uint32_t exesize = 0;
2712 _NSGetExecutablePath(NULL, &exesize);
2713 char *exe = malloc(exesize);
2714 assert(exe != NULL);
2715 _NSGetExecutablePath(exe, &exesize);
2716 char *dir = dirname(exe);
2717 chdir(dir);
2718 free(exe);
2719
2720 file = fopen(filename, "r");
2721 if (file == NULL) {
2722 T_FAIL("Could not open file %s\n", filename);
2723 return 1;
2724 }
2725
2726 // Read file line by line
2727 while (fgets(line, MAX_LINE_LENGTH, file) != NULL) {
2728 // Check if the line starts with "TESTNAME" or "RESULT COUNT"
2729 if (strncmp(line, TESTNAME_DELIMITER, strlen(TESTNAME_DELIMITER)) == 0) {
2730 // remove the newline char
2731 line[strcspn(line, "\n")] = 0;
2732 sub_line = line + strlen(TESTNAME_DELIMITER);
2733 test_name = strdup(sub_line);
2734 formula = eUNKNOWN_TRIALS;
2735 trial_args[0] = TRIALSARGUMENTS_NONE;
2736 trial_args[1] = TRIALSARGUMENTS_NONE;
2737 // T_LOG("TESTNAME %u : %s", num_tests, test_name);
2738 in_test = TRUE;
2739 } else if (in_test && strncmp(line, TRIALSFORMULA_DELIMITER, strlen(TRIALSFORMULA_DELIMITER)) == 0) {
2740 sscanf(line, "%*s %s %*s %llu,%llu,%llu", trial_formula, &trial_args[0], &trial_args[1], &trial_page_size);
2741 formula = trialsformula_from_string(trial_formula);
2742 } else if (in_test && strncmp(line, RESULTCOUNT_DELIMITER, strlen(RESULTCOUNT_DELIMITER)) == 0) {
2743 assert(num_tests < MAX_NUM_TESTS);
2744 s_num_results = line + strlen(RESULTCOUNT_DELIMITER);
2745 num_results = (uint32_t)strtoul(s_num_results, NULL, 10);
2746 results = alloc_results(test_name, formula, trial_args, TRIALSARGUMENTS_SIZE, num_results);
2747 assert(results);
2748 results->count = num_results;
2749 fill_golden_trials(trial_args, results);
2750 golden_list[num_tests++] = results;
2751 // T_LOG("num_tests %u, testname %s, count: %u", num_tests, results->testname, results->count);
2752 } else if (in_test && strncmp(line, TESTRESULT_DELIMITER, strlen(TESTRESULT_DELIMITER)) == 0) {
2753 sscanf(line, "%d: %d", &result_number, &result_ret);
2754 assert(result_number < num_results);
2755 // T_LOG("\tresult #%u: %d\n", result_number, result_ret);
2756 results->list[result_number].ret = result_ret;
2757 if (result_ret == OUT_PARAM_BAD) {
2758 out_param_bad_count += 1;
2759 T_FAIL("Out parameter violation in test %s - %s\n", results->testname, results->list[result_number].name);
2760 }
2761 } else {
2762 // T_LOG("Unknown line: %s\n", line);
2763 in_test = FALSE;
2764 }
2765 }
2766
2767 fclose(file);
2768
2769 if (!out_param_bad_count) {
2770 dump_golden_list();
2771 }
2772 kern_trialname_generation = FALSE;
2773
2774 return out_param_bad_count;
2775 }
2776
2777 static void
clean_golden_results()2778 clean_golden_results()
2779 {
2780 for (uint32_t x = 0; x < num_tests; ++x) {
2781 if (golden_list[x]->tested_count == 0) {
2782 T_LOG("WARN: Test %s found in golden file but no test with that name was run\n",
2783 golden_list[x]->testname);
2784 }
2785 if (golden_list[x]->tested_count > 1) {
2786 T_LOG("WARN: Test %s found in golden file with %d runs\n",
2787 golden_list[x]->testname, golden_list[x]->tested_count);
2788 }
2789 dealloc_results(golden_list[x]);
2790 golden_list[x] = NULL;
2791 }
2792 }
2793
2794 static void
clean_kernel_results()2795 clean_kernel_results()
2796 {
2797 for (uint32_t x = 0; x < num_kern_tests; ++x) {
2798 dealloc_results(kern_list[x]);
2799 kern_list[x] = NULL;
2800 }
2801 }
2802
2803 // buffer to output userspace golden file results (using same size as the kern buffer)
2804 static const int64_t GOLDEN_OUTPUT_BUFFER_SIZE = SYSCTL_OUTPUT_BUFFER_SIZE;
2805 static char* GOLDEN_OUTPUT_START;
2806 static char* GOLDEN_OUTPUT_BUF;
2807 static char* GOLDEN_OUTPUT_END;
2808
2809 void
goldenprintf(const char * format,...)2810 goldenprintf(const char *format, ...)
2811 {
2812 if (!GOLDEN_OUTPUT_START) {
2813 GOLDEN_OUTPUT_START = calloc(GOLDEN_OUTPUT_BUFFER_SIZE, 1);
2814 GOLDEN_OUTPUT_BUF = GOLDEN_OUTPUT_START;
2815 GOLDEN_OUTPUT_END = GOLDEN_OUTPUT_BUF + GOLDEN_OUTPUT_BUFFER_SIZE;
2816 }
2817
2818 int printed;
2819 ssize_t s_buffer_size = GOLDEN_OUTPUT_END - GOLDEN_OUTPUT_BUF;
2820 assert(s_buffer_size > 0 && s_buffer_size <= GOLDEN_OUTPUT_BUFFER_SIZE);
2821 size_t buffer_size = (size_t)s_buffer_size;
2822 va_list args;
2823 va_start(args, format);
2824 printed = vsnprintf(GOLDEN_OUTPUT_BUF, buffer_size, format, args);
2825 va_end(args);
2826 assert(printed >= 0);
2827 assert((unsigned)printed < buffer_size - 1);
2828 assert(GOLDEN_OUTPUT_BUF + printed + 1 < GOLDEN_OUTPUT_END);
2829 GOLDEN_OUTPUT_BUF += printed;
2830 }
2831
2832 // Knobs controlled by environment variables
2833
2834 // Verbose output in dump_results, controlled by DUMP_RESULTS env.
2835 static bool dump = FALSE;
2836 // Output to create a golden test result, controlled by GENERATE_GOLDEN_IMAGE.
2837 static bool generate_golden = FALSE;
2838 // Read existing golden file and print its contents in verbose format (like dump_results). Controlled by DUMP_GOLDEN_IMAGE.
2839 static bool dump_golden = FALSE;
2840 // Run tests as tests (i.e. emit TS_{PASS/FAIL}), enabled unless golden image generation is true.
2841 static bool should_test_results = TRUE;
2842
2843 static void
read_env()2844 read_env()
2845 {
2846 dump = (getenv("DUMP_RESULTS") != NULL);
2847 dump_golden = (getenv("DUMP_GOLDEN_IMAGE") != NULL);
2848 // Shouldn't do both
2849 generate_golden = (getenv("GENERATE_GOLDEN_IMAGE") != NULL) && !dump_golden;
2850 // Only test when no other golden image flag is set
2851 should_test_results = (getenv("SKIP_TESTS") == NULL) && !dump_golden && !generate_golden;
2852 }
2853
2854 // Comparator function for sorting result_t list by name
2855 static int
compare_names(const void * a,const void * b)2856 compare_names(const void *a, const void *b)
2857 {
2858 assert(((const result_t *)a)->name);
2859 assert(((const result_t *)b)->name);
2860 return strcmp(((const result_t *)a)->name, ((const result_t *)b)->name);
2861 }
2862
2863 static unsigned
binary_search(result_t * list,unsigned count,const result_t * trial)2864 binary_search(result_t *list, unsigned count, const result_t *trial)
2865 {
2866 const char *name = trial->name;
2867 unsigned left = 0, right = count;
2868 while (left < right) {
2869 // Range [left, right) is to be searched.
2870 unsigned mid = left + (right - left) / 2;
2871 int cmp = strcmp(list[mid].name, name);
2872 if (cmp == 0) {
2873 return mid;
2874 } else if (cmp < 0) {
2875 // Narrow search to [mid + 1, right).
2876 left = mid + 1;
2877 } else {
2878 // Narrow search to [left, mid).
2879 right = mid;
2880 }
2881 }
2882 return UINT_MAX; // Not found
2883 }
2884
2885 static inline bool
trial_name_equals(const result_t * a,const result_t * b)2886 trial_name_equals(const result_t *a, const result_t *b)
2887 {
2888 // NB: strlen match need to handle cases where a shorter 'bname' would match a longer 'aname'.
2889 if (strlen(a->name) == strlen(b->name) && compare_names(a, b) == 0) {
2890 return true;
2891 }
2892 return false;
2893 }
2894
2895 static const result_t *
get_golden_result(results_t * golden_results,const result_t * trial,unsigned trial_idx)2896 get_golden_result(results_t *golden_results, const result_t *trial, unsigned trial_idx)
2897 {
2898 if (golden_results->trialsformula == eUNKNOWN_TRIALS) {
2899 // golden results don't contain trials names
2900 T_LOG("%s: update test's alloc_results to have a valid trialsformula_t\n", golden_results->testname);
2901 return NULL;
2902 }
2903
2904 if (trial_idx < golden_results->count &&
2905 golden_results->list[trial_idx].name &&
2906 trial_name_equals(&golden_results->list[trial_idx], trial)) {
2907 // "fast search" path taken when golden file is in sync to test.
2908 return &golden_results->list[trial_idx];
2909 }
2910
2911 // "slow search" path taken when tests idxs are not aligned. Sort the array
2912 // by name and do binary search.
2913 qsort(golden_results->list, golden_results->count, sizeof(result_t), compare_names);
2914 unsigned g_idx = binary_search(golden_results->list, golden_results->count, trial);
2915 if (g_idx < golden_results->count) {
2916 return &golden_results->list[g_idx];
2917 }
2918
2919 return NULL;
2920 }
2921
2922 static void
test_results(results_t * golden_results,results_t * results)2923 test_results(results_t *golden_results, results_t *results)
2924 {
2925 bool passed = TRUE;
2926 unsigned result_count = results->count;
2927 unsigned acceptable_count = 0;
2928 const unsigned acceptable_max = 16; // log up to this many ACCEPTABLE results
2929 const result_t *golden_result = NULL;
2930 if (golden_results->count != results->count) {
2931 if (results->kernel_buffer_full) {
2932 T_FAIL("%s: number of iterations mismatch (wanted %u, got %u) "
2933 "(kernel output buffer full)",
2934 results->testname, golden_results->count, results->count);
2935 passed = FALSE;
2936 } else {
2937 T_LOG("%s: number of iterations mismatch (wanted %u, got %u)",
2938 results->testname, golden_results->count, results->count);
2939 }
2940 }
2941 for (unsigned i = 0; i < result_count; i++) {
2942 golden_result = get_golden_result(golden_results, &results->list[i], i);
2943 if (golden_result) {
2944 if (results->list[i].ret == ACCEPTABLE) {
2945 // trial has declared itself to be correct
2946 // no matter what the golden result is
2947 acceptable_count++;
2948 if (acceptable_count <= acceptable_max) {
2949 T_LOG("%s RESULT ACCEPTABLE (expected %d), %s\n",
2950 results->testname,
2951 golden_result->ret, results->list[i].name);
2952 }
2953 } else if (results->list[i].ret != golden_result->ret) {
2954 T_FAIL("%s RESULT %d (expected %d), %s\n",
2955 results->testname, results->list[i].ret,
2956 golden_result->ret, results->list[i].name);
2957 passed = FALSE;
2958 }
2959 } else {
2960 /*
2961 * This trial is not present in the golden results.
2962 *
2963 * This may be caused by new tests that require
2964 * updates to the golden results.
2965 * Or this may be caused by the last trial name being
2966 * truncated when the kernel's output buffer is full.
2967 * (Or both at once, in which case we only complain
2968 * about one of them.)
2969 */
2970 const char *suggestion;
2971 if (results->kernel_buffer_full && i == results->count - 1) {
2972 suggestion = "kernel test output buffer is full";
2973 } else {
2974 suggestion = "regenerate golden files to fix this";
2975 }
2976 T_FAIL("%s NEW RESULT %d, %s -- %s\n",
2977 results->testname, results->list[i].ret,
2978 results->list[i].name, suggestion);
2979 passed = FALSE;
2980 }
2981 }
2982
2983 if (acceptable_count > acceptable_max) {
2984 T_LOG("%s %u more RESULT ACCEPTABLE trials not logged\n",
2985 results->testname, acceptable_count - acceptable_max);
2986 }
2987 if (passed) {
2988 T_PASS("%s passed\n", results->testname);
2989 }
2990 }
2991
2992 static results_t *
process_results(results_t * results)2993 process_results(results_t *results)
2994 {
2995 results_t *golden_results = NULL;
2996
2997 if (dump && !generate_golden) {
2998 __dump_results(results);
2999 }
3000
3001 if (generate_golden) {
3002 dump_golden_results(results);
3003 }
3004
3005 if (should_test_results) {
3006 golden_results = test_name_to_golden_results(results->testname);
3007
3008 if (golden_results) {
3009 test_results(golden_results, results);
3010 } else {
3011 T_FAIL("New test %s found, update golden list to allow return code testing", results->testname);
3012 // Dump results if not done previously
3013 if (!dump) {
3014 __dump_results(results);
3015 }
3016 }
3017 }
3018
3019 return results;
3020 }
3021
3022 T_DECL(vm_parameter_validation_user,
3023 "parameter validation for userspace calls",
3024 T_META_SPAWN_TOOL(DECOMPRESS),
3025 T_META_SPAWN_TOOL_ARG("user"),
3026 T_META_SPAWN_TOOL_ARG(TMP_DIR),
3027 T_META_SPAWN_TOOL_ARG(GOLDEN_FILES_VERSION),
3028 T_META_SPAWN_TOOL_ARG(GOLDEN_FILES_ARCH)
3029 )
3030 {
3031 if (disable_vm_sanitize_telemetry() != 0) {
3032 T_FAIL("Could not disable VM API telemetry. Bailing out early.");
3033 return;
3034 }
3035
3036 read_env();
3037
3038 T_LOG("dump %d, golden %d, dump_golden %d, test %d\n", dump, generate_golden, dump_golden, should_test_results);
3039
3040 if (generate_golden && unsigned_code_is_disallowed()) {
3041 // Some test results change when SIP is enabled.
3042 // Golden files must record the SIP-disabled values.
3043 T_FAIL("Can't generate golden files with SIP enabled. Disable SIP and try again.\n");
3044 return;
3045 }
3046
3047 if ((dump_golden || should_test_results) && populate_golden_results(GOLDEN_FILE)) {
3048 // bail out early, problem loading golden test results
3049 T_FAIL("Could not load golden file '%s'\n", GOLDEN_FILE);
3050 return;
3051 }
3052
3053 set_up_guard_page();
3054
3055 disable_exc_guard();
3056
3057 if (dump_golden) {
3058 // just print the parsed golden file
3059 for (uint32_t x = 0; x < num_tests; ++x) {
3060 __dump_results(golden_list[x]);
3061 }
3062 goto out;
3063 }
3064
3065 /*
3066 * -- memory entry functions --
3067 * The memory entry test functions use macros to generate each flavor of memory entry function.
3068 * This is partially becauseof many entrypoints (mach_make_memory_entry/mach_make_memory_entry_64/mach_make_memory_entry)
3069 * and partially because many flavors of each function are called (copy/memonly/share/...).
3070 */
3071
3072 // Mach start/size with both old-style and new-style types
3073 // (co-located so old and new can be compared more easily)
3074 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3075 #if TEST_OLD_STYLE_MACH
3076 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3077 #define RUN_OLD64(fn, name) RUN_NEW(fn, name)
3078 #else
3079 #define RUN_OLD(fn, name) do {} while (0)
3080 #define RUN_OLD64(fn, name) do {} while (0)
3081 #endif
3082 // mach_make_memory_entry has up to three entry points on U32, unlike other functions that have two
3083 RUN_NEW(call_mach_make_memory_entry_64__start_size__copy, "mach_make_memory_entry_64 (copy)");
3084 RUN_OLD(call_mach_make_memory_entry__start_size__copy, "mach_make_memory_entry (copy)");
3085 RUN_OLD64(call__mach_make_memory_entry__start_size__copy, "_mach_make_memory_entry (copy)");
3086 RUN_NEW(call_mach_make_memory_entry_64__start_size__memonly, "mach_make_memory_entry_64 (mem_only)");
3087 RUN_OLD(call_mach_make_memory_entry__start_size__memonly, "mach_make_memory_entry (mem_only)");
3088 RUN_OLD64(call__mach_make_memory_entry__start_size__memonly, "_mach_make_memory_entry (mem_only)");
3089 RUN_NEW(call_mach_make_memory_entry_64__start_size__namedcreate, "mach_make_memory_entry_64 (named_create)");
3090 RUN_OLD(call_mach_make_memory_entry__start_size__namedcreate, "mach_make_memory_entry (named_create)");
3091 RUN_OLD64(call__mach_make_memory_entry__start_size__namedcreate, "_mach_make_memory_entry (named_create)");
3092 RUN_NEW(call_mach_make_memory_entry_64__start_size__share, "mach_make_memory_entry_64 (share)");
3093 RUN_OLD(call_mach_make_memory_entry__start_size__share, "mach_make_memory_entry (share)");
3094 RUN_OLD64(call__mach_make_memory_entry__start_size__share, "_mach_make_memory_entry (share)");
3095 RUN_NEW(call_mach_make_memory_entry_64__start_size__namedreuse, "mach_make_memory_entry_64 (named_reuse)");
3096 RUN_OLD(call_mach_make_memory_entry__start_size__namedreuse, "mach_make_memory_entry (named_reuse)");
3097 RUN_OLD64(call__mach_make_memory_entry__start_size__namedreuse, "_mach_make_memory_entry (named_reuse)");
3098 #undef RUN_NEW
3099 #undef RUN_OLD
3100 #undef RUN_OLD64
3101
3102 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_size(fn, name " (size)")))
3103 RUN(call_mach_memory_object_memory_entry_64__size, "mach_memory_object_memory_entry_64");
3104 RUN(call_replacement_mach_memory_object_memory_entry__size, "mach_memory_object_memory_entry");
3105 #undef RUN
3106
3107 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3108 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3109 #define RUN_OLD64(fn, name) RUN_NEW(fn, name)
3110
3111 RUN_NEW(call_mach_make_memory_entry_64__vm_prot, "mach_make_memory_entry_64");
3112 #if TEST_OLD_STYLE_MACH
3113 RUN_OLD(call_mach_make_memory_entry__vm_prot, "mach_make_memory_entry");
3114 RUN_OLD64(call__mach_make_memory_entry__vm_prot, "_mach_make_memory_entry");
3115 #endif
3116
3117 #undef RUN_NEW
3118 #undef RUN_OLD
3119 #undef RUN_OLD64
3120
3121 #define RUN(fn, name) dealloc_results(process_results(test_mach_vm_prot(fn, name " (vm_prot_t)")))
3122 RUN(call_mach_memory_object_memory_entry_64__vm_prot, "mach_memory_object_memory_entry_64");
3123 RUN(call_replacement_mach_memory_object_memory_entry__vm_prot, "mach_memory_object_memory_entry");
3124 #undef RUN
3125
3126 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_tag(fn, name " (ledger tag)")))
3127 RUN(call_mach_memory_entry_ownership__ledger_tag, "mach_memory_entry_ownership");
3128 #undef RUN
3129
3130 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_flag(fn, name " (ledger flag)")))
3131 RUN(call_mach_memory_entry_ownership__ledger_flag, "mach_memory_entry_ownership");
3132 #undef RUN
3133
3134 /*
3135 * -- allocate/deallocate functions --
3136 */
3137
3138 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_start_size(fn, name)))
3139 RUN(call_mach_vm_allocate__start_size_fixed, "mach_vm_allocate (fixed) (realigned start/size)");
3140 RUN(call_mach_vm_allocate__start_size_anywhere, "mach_vm_allocate (anywhere) (hint/size)");
3141 #undef RUN
3142
3143 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
3144 RUN(call_mach_vm_allocate__flags, "mach_vm_allocate");
3145 #undef RUN
3146
3147 dealloc_results(process_results(test_deallocator(call_mach_vm_deallocate, "mach_vm_deallocate (start/size)")));
3148 #if TEST_OLD_STYLE_MACH
3149 dealloc_results(process_results(test_deallocator(call_vm_deallocate, "vm_deallocate (start/size)")));
3150 #endif
3151
3152 #define RUN(fn, name) dealloc_results(process_results(test_deallocator(fn, name " (start/size)")))
3153 RUN(call_munmap, "munmap");
3154 #undef RUN
3155
3156 /*
3157 * -- map/unmap functions --
3158 * The map/unmap functions use multiple layers of macros.
3159 * The macros are used both for function generation (see IMPL_ONE_FROM_HELPER) and to call all of those.
3160 * This was written this way to further avoid lots of code duplication, as the map/remap functions
3161 * have many different parameter combinations we want to test.
3162 */
3163
3164 // map tests
3165
3166 #define RUN_START_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (realigned start/size)")))
3167 #define RUN_HINT_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
3168 #define RUN_PROT_PAIR(fn, name) dealloc_results(process_results(test_mach_vm_prot_pair(fn, name " (prot_pairs)")))
3169 #define RUN_INHERIT(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
3170 #define RUN_FLAGS(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
3171 #define RUN_SSOO(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size_offset_object(fn, name " (start/size/offset/object)")))
3172
3173 #define RUN_ALL(fn, name) \
3174 RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)"); \
3175 RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)"); \
3176 RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)"); \
3177 RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
3178 RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)"); \
3179 RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)"); \
3180 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)"); \
3181 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)"); \
3182 RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)"); \
3183 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)"); \
3184 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)"); \
3185 RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)"); \
3186 RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)"); \
3187 RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)"); \
3188 RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)"); \
3189 RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)"); \
3190 RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)"); \
3191 RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)"); \
3192 RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)"); \
3193 RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)"); \
3194 RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)"); \
3195 RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)"); \
3196 RUN_SSOO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)"); \
3197 RUN_SSOO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)"); \
3198 RUN_SSOO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)"); \
3199
3200 RUN_ALL(mach_vm_map_wrapped, mach_vm_map);
3201 #if TEST_OLD_STYLE_MACH
3202 RUN_ALL(vm_map_64_retyped, vm_map_64);
3203 RUN_ALL(vm_map_retyped, vm_map);
3204 #endif
3205
3206 #undef RUN_ALL
3207 #undef RUN_START_SIZE
3208 #undef RUN_HINT_SIZE
3209 #undef RUN_PROT_PAIR
3210 #undef RUN_INHERIT
3211 #undef RUN_FLAGS
3212 #undef RUN_SSOO
3213
3214 // remap tests
3215
3216 #define FN_NAME(fn, variant, type) call_ ## fn ## __ ## variant ## __ ## type
3217 #define RUN_HELPER(harness, fn, variant, type, type_name, name) dealloc_results(process_results(harness(FN_NAME(fn, variant, type), #name " (" #variant ") (" type_name ")")))
3218 #define RUN_SRC_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, src_size, type_name, name)
3219 #define RUN_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, dst_size, type_name, name)
3220 #define RUN_PROT_PAIRS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_prot_pair, fn, variant, prot_pairs, "prot_pairs", name)
3221 #define RUN_INHERIT(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_inherit_t, fn, variant, inherit, "inherit", name)
3222 #define RUN_FLAGS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_map_kernel_flags_t, fn, variant, flags, "flags", name)
3223 #define RUN_SRC_DST_SIZE(fn, dst, variant, type_name, name) RUN_HELPER(test_allocated_src_##dst##_dst_size, fn, variant, src_dst_size, type_name, name)
3224
3225 #define RUN_ALL(fn, realigned, name) \
3226 RUN_SRC_SIZE(fn, copy, realigned "src/size", name); \
3227 RUN_SRC_SIZE(fn, nocopy, realigned "src/size", name); \
3228 RUN_DST_SIZE(fn, fixed, "realigned dst/size", name); \
3229 RUN_DST_SIZE(fn, fixed_copy, "realigned dst/size", name); \
3230 RUN_DST_SIZE(fn, anywhere, "hint/size", name); \
3231 RUN_INHERIT(fn, fixed, name); \
3232 RUN_INHERIT(fn, fixed_copy, name); \
3233 RUN_INHERIT(fn, anywhere, name); \
3234 RUN_FLAGS(fn, nocopy, name); \
3235 RUN_FLAGS(fn, copy, name); \
3236 RUN_PROT_PAIRS(fn, fixed, name); \
3237 RUN_PROT_PAIRS(fn, fixed_copy, name); \
3238 RUN_PROT_PAIRS(fn, anywhere, name); \
3239 RUN_SRC_DST_SIZE(fn, allocated, fixed, "src/dst/size", name); \
3240 RUN_SRC_DST_SIZE(fn, allocated, fixed_copy, "src/dst/size", name); \
3241 RUN_SRC_DST_SIZE(fn, unallocated, anywhere, "src/dst/size", name); \
3242
3243 RUN_ALL(mach_vm_remap_user, "realigned ", mach_vm_remap);
3244 RUN_ALL(mach_vm_remap_new_user, , mach_vm_remap_new);
3245
3246 #if TEST_OLD_STYLE_MACH
3247 RUN_ALL(vm_remap_retyped, "realigned ", vm_remap);
3248 #endif
3249
3250 #undef RUN_ALL
3251 #undef RUN_HELPER
3252 #undef RUN_SRC_SIZE
3253 #undef RUN_DST_SIZE
3254 #undef RUN_PROT_PAIRS
3255 #undef RUN_INHERIT
3256 #undef RUN_FLAGS
3257 #undef RUN_SRC_DST_SIZE
3258
3259 // mmap tests
3260
3261 #define RUN(fn, name) dealloc_results(process_results(test_mmap_with_allocated_vm_map_kernel_flags_t(fn, name " (kernel flags)")))
3262 RUN(call_mmap__anon_private__kernel_flags, "mmap (anon private)");
3263 RUN(call_mmap__anon_shared__kernel_flags, "mmap (anon shared)");
3264 #undef RUN
3265
3266 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_mmap_flags(fn, name " (mmap flags)")))
3267 RUN(call_mmap__mmap_flags, "mmap");
3268 #undef RUN
3269
3270 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
3271 RUN(call_mmap__file_private__start_size, "mmap (file private)");
3272 RUN(call_mmap__anon_private__start_size, "mmap (anon private)");
3273 RUN(call_mmap__file_shared__start_size, "mmap (file shared)");
3274 RUN(call_mmap__anon_shared__start_size, "mmap (anon shared)");
3275 RUN(call_mmap__file_private_codesign__start_size, "mmap (file private codesign)");
3276 RUN(call_mmap__file_private_media__start_size, "mmap (file private media)");
3277 RUN(call_mmap__nounix03_private__start_size, "mmap (no unix03)");
3278 #undef RUN
3279
3280 #define RUN(fn, name) dealloc_results(process_results(test_fixed_dst_size(fn, name " (dst/size)")))
3281 RUN(call_mmap__fixed_private__start_size, "mmap (fixed)");
3282 #undef RUN
3283
3284 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (offset/size)")))
3285 RUN(call_mmap__file_private__offset_size, "mmap (file private)");
3286 RUN(call_mmap__anon_private__offset_size, "mmap (anon private)");
3287 RUN(call_mmap__file_shared__offset_size, "mmap (file shared)");
3288 RUN(call_mmap__anon_shared__offset_size, "mmap (anon shared)");
3289 RUN(call_mmap__file_private_codesign__offset_size, "mmap (file private codesign)");
3290 RUN(call_mmap__file_private_media__offset_size, "mmap (file private media)");
3291 RUN(call_mmap__nounix03_private__offset_size, "mmap (no unix03)");
3292 #undef RUN
3293
3294 #define RUN(fn, name) dealloc_results(process_results(test_dst_size_fileoff(fn, name " (hint/size/fileoff)")))
3295 RUN(call_mmap__file_private__dst_size_fileoff, "mmap (file private)");
3296 RUN(call_mmap__anon_private__dst_size_fileoff, "mmap (anon private)");
3297 RUN(call_mmap__file_shared__dst_size_fileoff, "mmap (file shared)");
3298 RUN(call_mmap__anon_shared__dst_size_fileoff, "mmap (anon shared)");
3299 RUN(call_mmap__file_private_codesign__dst_size_fileoff, "mmap (file private codesign)");
3300 RUN(call_mmap__file_private_media__dst_size_fileoff, "mmap (file private media)");
3301 RUN(call_mmap__nounix03_private__dst_size_fileoff, "mmap (no unix03)");
3302 #undef RUN
3303
3304 #define RUN(fn, name) dealloc_results(process_results(test_fixed_dst_size_fileoff(fn, name " (dst/size/fileoff)")))
3305 RUN(call_mmap__fixed_private__dst_size_fileoff, "mmap (fixed)");
3306 #undef RUN
3307
3308 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3309 RUN(call_mmap__file_private__vm_prot, "mmap (file private)");
3310 RUN(call_mmap__anon_private__vm_prot, "mmap (anon private)");
3311 RUN(call_mmap__file_shared__vm_prot, "mmap (file shared)");
3312 RUN(call_mmap__anon_shared__vm_prot, "mmap (anon shared)");
3313 RUN(call_mmap__file_private_codesign__vm_prot, "mmap (file private codesign)");
3314 RUN(call_mmap__file_private_media__vm_prot, "mmap (file private media)");
3315 RUN(call_mmap__nounix03_private__vm_prot, "mmap (no unix03)");
3316 RUN(call_mmap__fixed_private__vm_prot, "mmap (fixed)");
3317 #undef RUN
3318
3319 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3320 RUN(call_mremap_encrypted, "mremap_encrypted");
3321 #undef RUN
3322
3323 /*
3324 * -- wire/unwire functions --
3325 */
3326
3327 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3328 RUN(call_mlock, "mlock");
3329 RUN(call_munlock, "munlock");
3330 #undef RUN
3331
3332 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3333 RUN(call_mach_vm_wire__wire, "mach_vm_wire (wire)");
3334 RUN(call_replacement_vm_wire__wire, "vm_wire (wire)");
3335 RUN(call_mach_vm_wire__unwire, "mach_vm_wire (unwire)");
3336 RUN(call_replacement_vm_wire__unwire, "vm_wire (unwire)");
3337 #undef RUN
3338
3339 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3340 RUN(call_mach_vm_wire__vm_prot, "mach_vm_wire");
3341 RUN(call_replacement_vm_wire__vm_prot, "vm_wire");
3342 #undef RUN
3343
3344 /*
3345 * -- copyin/copyout functions --
3346 */
3347
3348 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3349 #if TEST_OLD_STYLE_MACH
3350 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3351 #else
3352 #define RUN_OLD(fn, name) do {} while (0)
3353 #endif
3354 RUN_NEW(call_mach_vm_read, "mach_vm_read");
3355 RUN_OLD(call_vm_read, "vm_read");
3356 RUN_NEW(call_mach_vm_read_list, "mach_vm_read_list");
3357 RUN_OLD(call_vm_read_list, "vm_read_list");
3358
3359 RUN_NEW(call_mach_vm_read_overwrite__src, "mach_vm_read_overwrite (src)");
3360 RUN_NEW(call_mach_vm_read_overwrite__dst, "mach_vm_read_overwrite (dst)");
3361 RUN_OLD(call_vm_read_overwrite__src, "vm_read_overwrite (src)");
3362 RUN_OLD(call_vm_read_overwrite__dst, "vm_read_overwrite (dst)");
3363
3364 RUN_NEW(call_mach_vm_write__src, "mach_vm_write (src)");
3365 RUN_NEW(call_mach_vm_write__dst, "mach_vm_write (dst)");
3366 RUN_OLD(call_vm_write__src, "vm_write (src)");
3367 RUN_OLD(call_vm_write__dst, "vm_write (dst)");
3368
3369 RUN_NEW(call_mach_vm_copy__src, "mach_vm_copy (src)");
3370 RUN_NEW(call_mach_vm_copy__dst, "mach_vm_copy (dst)");
3371 RUN_OLD(call_vm_copy__src, "vm_copy (src)");
3372 RUN_OLD(call_vm_copy__dst, "vm_copy (dst)");
3373 #undef RUN_NEW
3374 #undef RUN_OLD
3375
3376 /*
3377 * -- inherit functions --
3378 */
3379
3380 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3381 #if TEST_OLD_STYLE_MACH
3382 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3383 #else
3384 #define RUN_OLD(fn, name) do {} while (0)
3385 #endif
3386 RUN_NEW(call_mach_vm_inherit, "mach_vm_inherit");
3387 RUN_OLD(call_vm_inherit, "vm_inherit");
3388 #undef RUN_OLD
3389 #undef RUN_NEW
3390
3391 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3392 RUN(call_minherit, "minherit");
3393 #undef RUN
3394
3395 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
3396 RUN(call_mach_vm_inherit__inherit, "mach_vm_inherit");
3397 #undef RUN
3398 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
3399 RUN(call_minherit__inherit, "minherit");
3400 #undef RUN
3401
3402 /*
3403 * -- protection functions --
3404 */
3405
3406 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3407 #if TEST_OLD_STYLE_MACH
3408 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3409 #else
3410 #define RUN_OLD(fn, name) do {} while (0)
3411 #endif
3412 RUN_NEW(call_mach_vm_protect__start_size, "mach_vm_protect");
3413 RUN_OLD(call_vm_protect__start_size, "vm_protect");
3414 #undef RUN_NEW
3415 #undef RUN_OLD
3416 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3417 #if TEST_OLD_STYLE_MACH
3418 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3419 #else
3420 #define RUN_OLD(fn, name) do {} while (0)
3421 #endif
3422 RUN_NEW(call_mach_vm_protect__vm_prot, "mach_vm_protect");
3423 RUN_OLD(call_vm_protect__vm_prot, "vm_protect");
3424 #undef RUN_NEW
3425 #undef RUN_OLD
3426 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3427 RUN(call_mprotect__start_size, "mprotect");
3428 #undef RUN
3429 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
3430 RUN(call_mprotect__vm_prot, "mprotect");
3431 #undef RUN
3432
3433 /*
3434 * -- madvise/behavior functions --
3435 */
3436
3437 unsigned alignment_for_can_reuse;
3438 if (isRosetta()) {
3439 /*
3440 * VM_BEHAVIOR_CAN_REUSE and MADV_CAN_REUSE get different errors
3441 * on Rosetta when the allocation happens to be 4K vs 16K aligned.
3442 * Force 16K alignment for consistent results.
3443 */
3444 alignment_for_can_reuse = KB16 - 1;
3445 } else {
3446 /* Use default alignment everywhere else. */
3447 alignment_for_can_reuse = 0;
3448 }
3449
3450 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3451 #if TEST_OLD_STYLE_MACH
3452 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3453 #else
3454 #define RUN_OLD(fn, name) do {} while (0)
3455 #endif
3456 RUN_NEW(call_mach_vm_behavior_set__start_size__default, "mach_vm_behavior_set (VM_BEHAVIOR_DEFAULT)");
3457 RUN_OLD(call_vm_behavior_set__start_size__default, "vm_behavior_set (VM_BEHAVIOR_DEFAULT)");
3458 #undef RUN_NEW
3459 #undef RUN_OLD
3460
3461 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_aligned_start_size(fn, alignment_for_can_reuse, name " (start/size)")))
3462 #if TEST_OLD_STYLE_MACH
3463 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_aligned_start_size(fn, alignment_for_can_reuse, name " (start/size)")))
3464 #else
3465 #define RUN_OLD(fn, name) do {} while (0)
3466 #endif
3467 RUN_NEW(call_mach_vm_behavior_set__start_size__can_reuse, "mach_vm_behavior_set (VM_BEHAVIOR_CAN_REUSE)");
3468 RUN_OLD(call_vm_behavior_set__start_size__can_reuse, "vm_behavior_set (VM_BEHAVIOR_CAN_REUSE)");
3469 #undef RUN_NEW
3470 #undef RUN_OLD
3471
3472 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_aligned_vm_behavior_t(fn, alignment_for_can_reuse, name " (vm_behavior_t)")))
3473 #if TEST_OLD_STYLE_MACH
3474 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_aligned_vm_behavior_t(fn, alignment_for_can_reuse, name " (vm_behavior_t)")))
3475 #else
3476 #define RUN_OLD(fn, name) do {} while (0)
3477 #endif
3478 RUN_NEW(call_mach_vm_behavior_set__vm_behavior, "mach_vm_behavior_set");
3479 RUN_OLD(call_vm_behavior_set__vm_behavior, "vm_behavior_set");
3480 #undef RUN_NEW
3481 #undef RUN_OLD
3482
3483 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3484 RUN(call_madvise__start_size, "madvise");
3485 #undef RUN
3486
3487 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_aligned_vm_advise_t(fn, alignment_for_can_reuse, name " (vm_advise_t)")))
3488 RUN(call_madvise__vm_advise, "madvise");
3489 #undef RUN
3490
3491 /*
3492 * -- msync functions --
3493 */
3494
3495 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3496 #if TEST_OLD_STYLE_MACH
3497 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3498 #else
3499 #define RUN_OLD(fn, name) do {} while (0)
3500 #endif
3501 RUN_NEW(call_mach_vm_msync__start_size, "mach_vm_msync");
3502 RUN_OLD(call_vm_msync__start_size, "vm_msync");
3503 #undef RUN_NEW
3504 #undef RUN_OLD
3505 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_sync_t(fn, name " (vm_sync_t)")))
3506 #if TEST_OLD_STYLE_MACH
3507 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_vm_sync_t(fn, name " (vm_sync_t)")))
3508 #else
3509 #define RUN_OLD(fn, name) do {} while (0)
3510 #endif
3511 RUN_NEW(call_mach_vm_msync__vm_sync, "mach_vm_msync");
3512 RUN_OLD(call_vm_msync__vm_sync, "vm_msync");
3513 #undef RUN_NEW
3514 #undef RUN_OLD
3515 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3516 RUN(call_msync__start_size, "msync");
3517 RUN(call_msync_nocancel__start_size, "msync_nocancel");
3518 #undef RUN
3519 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_msync_t(fn, name " (msync flags)")))
3520 RUN(call_msync__vm_msync, "msync");
3521 RUN(call_msync_nocancel__vm_msync, "msync_nocancel");
3522 #undef RUN
3523
3524 /*
3525 * -- machine attribute functions --
3526 */
3527
3528 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3529 #if TEST_OLD_STYLE_MACH
3530 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_start_size(fn, name " (start/size)")))
3531 #else
3532 #define RUN_OLD(fn, name) do {} while (0)
3533 #endif
3534 RUN_NEW(call_mach_vm_machine_attribute__start_size, "mach_vm_machine_attribute");
3535 RUN_OLD(call_vm_machine_attribute__start_size, "vm_machine_attribute");
3536 #undef RUN_NEW
3537 #undef RUN_OLD
3538 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_machine_attribute_t(fn, name " (machine_attribute_t)")))
3539 #if TEST_OLD_STYLE_MACH
3540 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_vm_machine_attribute_t(fn, name " (machine_attribute_t)")))
3541 #else
3542 #define RUN_OLD(fn, name) do {} while (0)
3543 #endif
3544 RUN_NEW(call_mach_vm_machine_attribute__machine_attribute, "mach_vm_machine_attribute");
3545 RUN_OLD(call_vm_machine_attribute__machine_attribute, "vm_machine_attribute");
3546 #undef RUN_NEW
3547 #undef RUN_OLD
3548
3549 /*
3550 * -- purgability/purgeability functions --
3551 */
3552
3553 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_addr(fn, name " (addr)")))
3554 #if TEST_OLD_STYLE_MACH
3555 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_purgeable_addr(fn, name " (addr)")))
3556 #else
3557 #define RUN_OLD(fn, name) do {} while (0)
3558 #endif
3559 RUN_NEW(call_mach_vm_purgable_control__address__get, "mach_vm_purgable_control (get)");
3560 RUN_OLD(call_vm_purgable_control__address__get, "vm_purgable_control (get)");
3561
3562 RUN_NEW(call_mach_vm_purgable_control__address__purge_all, "mach_vm_purgable_control (purge all)");
3563 RUN_OLD(call_vm_purgable_control__address__purge_all, "vm_purgable_control (purge all)");
3564 #undef RUN_NEW
3565 #undef RUN_OLD
3566 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_and_state(fn, name " (purgeable and state)")))
3567 #if TEST_OLD_STYLE_MACH
3568 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_purgeable_and_state(fn, name " (purgeable and state)")))
3569 #else
3570 #define RUN_OLD(fn, name) do {} while (0)
3571 #endif
3572 RUN_NEW(call_mach_vm_purgable_control__purgeable_state, "mach_vm_purgable_control");
3573 RUN_OLD(call_vm_purgable_control__purgeable_state, "vm_purgable_control");
3574 #undef RUN_NEW
3575 #undef RUN_OLD
3576
3577 /*
3578 * -- region info functions --
3579 */
3580
3581 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
3582 #if TEST_OLD_STYLE_MACH
3583 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_addr(fn, name " (addr)")))
3584 #else
3585 #define RUN_OLD(fn, name) do {} while (0)
3586 #endif
3587 RUN_NEW(call_mach_vm_region, "mach_vm_region");
3588 RUN_OLD(call_vm_region, "vm_region");
3589 RUN_NEW(call_mach_vm_region_recurse, "mach_vm_region_recurse");
3590 RUN_OLD(call_vm_region_recurse, "vm_region_recurse");
3591 RUN_OLD(call_vm_region_recurse_64, "vm_region_recurse_64");
3592 #undef RUN_NEW
3593 #undef RUN_OLD
3594
3595 /*
3596 * -- page info functions --
3597 */
3598
3599 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
3600 #if TEST_OLD_STYLE_MACH
3601 #define RUN_OLD(fn, name) dealloc_results(process_results(test_oldmach_with_allocated_addr(fn, name " (addr)")))
3602 #else
3603 #define RUN_OLD(fn, name) do {} while (0)
3604 #endif
3605 RUN_NEW(call_mach_vm_page_info, "mach_vm_page_info");
3606 RUN_NEW(call_mach_vm_page_query, "mach_vm_page_query");
3607 RUN_OLD(call_vm_map_page_query, "vm_map_page_query");
3608 #undef RUN_NEW
3609 #undef RUN_OLD
3610
3611 #define RUN_NEW(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
3612 RUN_NEW(call_mach_vm_page_range_query, "mach_vm_page_range_query");
3613 #undef RUN_NEW
3614
3615 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
3616 RUN(call_mincore, "mincore");
3617 #undef RUN
3618
3619 /*
3620 * -- miscellaneous functions --
3621 */
3622
3623 #define RUN(fn, name) dealloc_results(process_results(test_unix_shared_region_map_and_slide_2_np(fn, name " (files/mappings)")))
3624 RUN(call_shared_region_map_and_slide_2_np_child, "shared_region_map_and_slide_2_np");
3625 RUN(call_shared_region_map_and_slide_2_np_in_thread, "different thread shared_region_map_and_slide_2_np");
3626 #undef RUN
3627
3628 #if 0
3629 #define RUN(fn, name) dealloc_results(process_results(test_mach_vm_deferred_reclamation_buffer_init(fn, name)))
3630 RUN(call_mach_vm_deferred_reclamation_buffer_init, "mach_vm_deferred_reclamation_buffer_init");
3631 #undef RUN
3632 #endif
3633
3634 out:
3635 restore_exc_guard();
3636
3637 if (generate_golden) {
3638 if (!out_param_bad_count || (dump && !should_test_results)) {
3639 // Print after verified there is not OUT_PARAM_BAD results before printing,
3640 // or user explicitly set DUMP_RESULTS=1 GENERATE_GOLDEN_IMAGE=1
3641 printf("%s", GOLDEN_OUTPUT_START);
3642 }
3643 }
3644 free(GOLDEN_OUTPUT_START);
3645
3646 if (dump_golden || should_test_results) {
3647 clean_golden_results();
3648 }
3649
3650 if (reenable_vm_sanitize_telemetry() != 0) {
3651 T_FAIL("Failed to reenable VM API telemetry.");
3652 return;
3653 }
3654
3655 T_PASS("vm parameter validation userspace");
3656 }
3657
3658
3659 /////////////////////////////////////////////////////
3660 // Kernel test invocation.
3661 // The actual test code is in:
3662 // osfmk/tests/vm_parameter_validation_kern.c
3663
3664 #ifndef STRINGIFY
3665 #define __STR(x) #x
3666 #define STRINGIFY(x) __STR(x)
3667 #endif
3668
3669 // Verify golden list being generated doesn't contain OUT_BAD_PARAM
3670 static int
out_bad_param_in_kern_golden_results(char * kern_buffer)3671 out_bad_param_in_kern_golden_results(char *kern_buffer)
3672 {
3673 const char *out_param_bad_str = STRINGIFY(OUT_PARAM_BAD);
3674 char *out_param_bad_match = strstr(kern_buffer, out_param_bad_str);
3675 if (out_param_bad_match) {
3676 T_FAIL("Out parameter violation return code (%s) found in results, aborting.\n", out_param_bad_str);
3677 return 1;
3678 }
3679 return 0;
3680 }
3681
3682
3683 // Read results written by __dump_results()
3684 static int
populate_kernel_results(char * kern_buffer)3685 populate_kernel_results(char *kern_buffer)
3686 {
3687 char *line = NULL;
3688 char *test_name = NULL;
3689 results_t *kern_results = NULL;
3690 bool in_test = FALSE;
3691
3692 line = strtok(kern_buffer, KERN_RESULT_DELIMITER);
3693 while (line != NULL) {
3694 if (strncmp(line, TESTNAME_DELIMITER, strlen(TESTNAME_DELIMITER)) == 0) {
3695 char *sub_line = line + strlen(TESTNAME_DELIMITER);
3696 test_name = strdup(sub_line);
3697 in_test = TRUE;
3698 } else if (in_test && strncmp(line, RESULTCOUNT_DELIMITER, strlen(RESULTCOUNT_DELIMITER)) == 0) {
3699 char *s_num_kern_results = line + strlen(RESULTCOUNT_DELIMITER);
3700 uint32_t num_kern_results = (uint32_t)strtoul(s_num_kern_results, NULL, 10);
3701 kern_results = alloc_results(test_name, eUNKNOWN_TRIALS, num_kern_results);
3702 kern_list[num_kern_tests++] = kern_results;
3703 } else if (in_test && strncmp(line, TESTCONFIG_DELIMITER, strlen(TESTCONFIG_DELIMITER)) == 0) {
3704 char *sub_line = line + strlen(TESTCONFIG_DELIMITER);
3705 kern_results->testconfig = strdup(sub_line);
3706 } else if (in_test && strstr(line, KERN_TESTRESULT_DELIMITER)) {
3707 // should have found TESTCONFIG already
3708 assert(kern_results->testconfig != NULL);
3709 int result_ret = 0;
3710 char *token;
3711 sscanf(line, KERN_TESTRESULT_DELIMITER "%d", &result_ret);
3712 // get result name (comes after the first ,)
3713 token = strchr(line, ',');
3714 if (token && strlen(token) > 2) {
3715 token = token + 2; // skip the , and the extra space
3716 char *result_name = strdup(token);
3717 if (kern_results->count >= kern_results->capacity) {
3718 T_LOG("\tKERN Invalid output in test %s, "
3719 "too many results (expected %u), "
3720 "ignoring trial RESULT %d, %s\n",
3721 test_name, kern_results->capacity, result_ret, result_name);
3722 free(result_name);
3723 } else {
3724 kern_results->list[kern_results->count++] =
3725 (result_t){.ret = result_ret, .name = result_name};
3726 }
3727 }
3728 } else if (strncmp(line, KERN_FAILURE_DELIMITER, strlen(KERN_FAILURE_DELIMITER)) == 0) {
3729 /*
3730 * A fatal error message interrupted the output.
3731 * (for example, the kernel test's output buffer is full)
3732 * Clean up the last test because it may be
3733 * invalid due to truncated output.
3734 */
3735 T_FAIL("%s", line);
3736 if (kern_results != NULL) {
3737 if (kern_results->testconfig == NULL) {
3738 // We didn't get any results for this test.
3739 // Just drop it.
3740 dealloc_results(kern_results);
3741 kern_results = NULL;
3742 kern_list[--num_kern_tests] = NULL;
3743 } else {
3744 kern_results->kernel_buffer_full = true;
3745 }
3746 }
3747
3748 // Stop reading results now.
3749 break;
3750 } else {
3751 /*
3752 * Unrecognized output text.
3753 * One possible cause is that the kernel test's output
3754 * buffer is full so this line was truncated beyond
3755 * recognition. In that case we'll hit the
3756 * KERN_FAILURE_DELIMITER line next.
3757 */
3758
3759 // T_LOG("Unknown kernel result line: %s\n", line);
3760 }
3761
3762 line = strtok(NULL, KERN_RESULT_DELIMITER);
3763 }
3764
3765 dump_kernel_results_list();
3766
3767 return 0;
3768 }
3769
3770 static int64_t
run_sysctl_test(const char * t,int64_t value)3771 run_sysctl_test(const char *t, int64_t value)
3772 {
3773 char name[1024];
3774 int64_t result = 0;
3775 size_t s = sizeof(value);
3776 int rc;
3777
3778 snprintf(name, sizeof(name), "debug.test.%s", t);
3779 rc = sysctlbyname(name, &result, &s, &value, s);
3780 if (rc == -1 && errno == ENOENT) {
3781 /*
3782 * sysctl name not found. Probably an older kernel with the
3783 * previous version of this test.
3784 */
3785 T_FAIL("sysctl %s not found; may be running on an older kernel "
3786 "that does not implement the current version of this test",
3787 name);
3788 exit(1);
3789 }
3790 T_QUIET; T_ASSERT_POSIX_SUCCESS(rc, "sysctlbyname(%s)", t);
3791 return result;
3792 }
3793
3794 T_DECL(vm_parameter_validation_kern,
3795 "parameter validation for kext/xnu calls",
3796 T_META_SPAWN_TOOL(DECOMPRESS),
3797 T_META_SPAWN_TOOL_ARG("kern"),
3798 T_META_SPAWN_TOOL_ARG(TMP_DIR),
3799 T_META_SPAWN_TOOL_ARG(GOLDEN_FILES_VERSION),
3800 T_META_SPAWN_TOOL_ARG(GOLDEN_FILES_ARCH)
3801 )
3802 {
3803 if (disable_vm_sanitize_telemetry() != 0) {
3804 T_FAIL("Could not disable VM API telemetry. Bailing out early.");
3805 return;
3806 }
3807
3808 read_env();
3809
3810 T_LOG("dump %d, golden %d, dump_golden %d, test %d\n", dump, generate_golden, dump_golden, should_test_results);
3811
3812 disable_exc_guard();
3813
3814 if (dump_golden) {
3815 if (populate_golden_results(KERN_GOLDEN_FILE)) {
3816 // couldn't load golden test results
3817 T_FAIL("Could not load golden file '%s'\n", KERN_GOLDEN_FILE);
3818 goto out;
3819 }
3820
3821 // just print the parsed golden file
3822 for (uint32_t x = 0; x < num_tests; ++x) {
3823 __dump_results(golden_list[x]);
3824 }
3825 clean_golden_results();
3826 goto out;
3827 }
3828
3829 T_LOG("Running kernel tests\n");
3830
3831 // We allocate a large buffer. The kernel-side code writes output to it.
3832 // Then we print that output. This is faster than making the kernel-side
3833 // code print directly to the serial console, which takes many minutes
3834 // to transfer our test output at 14.4 KB/s.
3835 // We align this buffer to KB16 to allow the lower bits to be used for a fd.
3836 char *output = calloc(SYSCTL_OUTPUT_BUFFER_SIZE, 1);
3837
3838 vm_parameter_validation_kern_args_t args = {
3839 .sizeof_args = sizeof(args),
3840 .output_buffer_address = (uint64_t)output,
3841 .output_buffer_size = SYSCTL_OUTPUT_BUFFER_SIZE,
3842 .file_descriptor = get_fd(),
3843 .generate_golden = generate_golden
3844 };
3845 int64_t result = run_sysctl_test("vm_parameter_validation_kern_v2", (int64_t)&args);
3846
3847 switch (result) {
3848 case KERN_TEST_SUCCESS:
3849 break;
3850 case KERN_TEST_BAD_ARGS:
3851 T_FAIL("version mismatch between test and kernel: "
3852 "sizeof(vm_parameter_validation_kern_args_t) did not match");
3853 goto out;
3854 case KERN_TEST_FAILED:
3855 if (output[0] == 0) {
3856 // no output from the kernel test; print a generic error
3857 T_FAIL("kernel test failed for unknown reasons");
3858 } else {
3859 // kernel provided a message: print it
3860 T_FAIL("kernel test failed: %s", output);
3861 }
3862 goto out;
3863 default:
3864 T_FAIL("kernel test failed with unknown error %llu", result);
3865 goto out;
3866 }
3867
3868 if (generate_golden) {
3869 if (!out_bad_param_in_kern_golden_results(output) || (dump && !should_test_results)) {
3870 // Print after verified there is not OUT_PARAM_BAD results before printing,
3871 // or user explicitly set DUMP_RESULTS=1 GENERATE_GOLDEN_IMAGE=1
3872 printf("%s", output);
3873 }
3874 free(output);
3875 output = NULL;
3876 } else {
3877 // recreate a results_t to compare against the golden file results
3878 if (populate_kernel_results(output)) {
3879 T_FAIL("Error while parsing results\n");
3880 }
3881 free(output);
3882 output = NULL;
3883
3884 if (should_test_results && populate_golden_results(KERN_GOLDEN_FILE)) {
3885 // couldn't load golden test results
3886 T_FAIL("Could not load golden file '%s'\n", KERN_GOLDEN_FILE);
3887 clean_kernel_results();
3888 goto out;
3889 }
3890
3891 // compare results against values from golden list
3892 for (uint32_t x = 0; x < num_kern_tests; ++x) {
3893 process_results(kern_list[x]);
3894 dealloc_results(kern_list[x]);
3895 kern_list[x] = NULL;
3896 }
3897 clean_golden_results();
3898 }
3899
3900 out:
3901 restore_exc_guard();
3902
3903 if (reenable_vm_sanitize_telemetry() != 0) {
3904 T_FAIL("Failed to reenable VM API telemetry.");
3905 return;
3906 }
3907
3908 T_PASS("vm parameter validation kern");
3909 }
3910