xref: /xnu-11215.1.10/osfmk/tests/vm_parameter_validation.h (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 #ifndef VM_PARAMETER_VALIDATION_H
2 #define VM_PARAMETER_VALIDATION_H
3 
4 
5 /*
6  * Common Naming Conventions:
7  * call_* functions are harnesses used to call a single function under test.
8  * They take all arguments needed to call the function and avoid calling functions with PANICing values.
9  * test_* functions are used to call the call_ functions. They iterate through possibilities of interesting parameters
10  * and provide those as arguments to the call_ functions.
11  *
12  * Common Abbreviations:
13  * ssz: Start + Start + Size
14  * ssoo: Start + Size + Offset + Object
15  * sso: Start + Start + Offset
16  */
17 
18 #if KERNEL
19 
20 #include <mach/vm_map.h>
21 #include <mach/mach_vm.h>
22 #include <mach/mach_types.h>
23 #include <mach/mach_host.h>
24 #include <mach/memory_object.h>
25 #include <mach/memory_entry.h>
26 #include <mach/mach_vm_server.h>
27 
28 #include <device/device_port.h>
29 #include <sys/mman.h>
30 #include <sys/errno.h>
31 #include <vm/memory_object.h>
32 #include <vm/vm_fault.h>
33 #include <vm/vm_map_internal.h>
34 #include <vm/vm_kern_internal.h>
35 #include <vm/vm_pageout.h>
36 #include <vm/vm_protos.h>
37 #include <vm/vm_memtag.h>
38 #include <vm/vm_memory_entry.h>
39 #include <vm/vm_memory_entry_xnu.h>
40 #include <vm/vm_object_internal.h>
41 #include <vm/vm_iokit.h>
42 #include <kern/ledger.h>
43 extern ledger_template_t        task_ledger_template;
44 
45 // Temporary bridging of vm header rearrangement.
46 // Remove this after integration is complete.
47 #if 0  /* old style */
48 
49 #define FLAGS_AND_TAG(f, t) f, t
50 #define vm_map_wire_and_extract vm_map_wire_and_extract_external
51 
52 #else /* new style */
53 
54 #define FLAGS_AND_TAG(f, t) ({                             \
55 	vm_map_kernel_flags_t vmk_flags;                   \
56 	vm_map_kernel_flags_set_vmflags(&vmk_flags, f, t); \
57 	vmk_flags;                                         \
58 })
59 
60 #endif
61 
62 #else  // KERNEL
63 
64 #include <TargetConditionals.h>
65 
66 #endif // KERNEL
67 
68 // fixme re-enable -Wunused-function when we're done writing new tests
69 #pragma clang diagnostic ignored "-Wunused-function"
70 
71 // ignore some warnings inside this file
72 #pragma clang diagnostic push
73 #pragma clang diagnostic ignored "-Wdeclaration-after-statement"
74 #pragma clang diagnostic ignored "-Wincompatible-function-pointer-types"
75 #pragma clang diagnostic ignored "-Wmissing-prototypes"
76 #pragma clang diagnostic ignored "-Wpedantic"
77 #pragma clang diagnostic ignored "-Wgcc-compat"
78 
79 
80 #define INVALID_INITIAL_ADDRESS 0xabababab
81 /*
82  * It's important for us to never have a test with a size like
83  * INVALID_INITIAL_SIZE, and for this to stay non page aligned.
84  * See comment in call_mach_memory_entry_map_size__start_size for more info
85  */
86 #define INVALID_INITIAL_SIZE 0xabababab
87 #define INVALID_INITIAL_PPNUM 0xabababab
88 #define INVALID_INITIAL_MACH_PORT (mach_port_t) 0xbabababa
89 // This cannot possibly be a valid vm_map_copy_t as they are pointers
90 #define INVALID_INITIAL_COPY (vm_map_copy_t) (void *) -1
91 
92 // output buffer size for kext/xnu sysctl tests
93 // note: 1 GB is too big for watchOS
94 static const int64_t SYSCTL_OUTPUT_BUFFER_SIZE = 512 * 1024 * 1024;  // 512 MB
95 
96 // caller name (kernel/kext/userspace), used to label the output
97 #if KERNEL
98 #       define CALLER_NAME "kernel"
99 #else
100 #       define CALLER_NAME "userspace"
101 #endif
102 
103 // os name, used to label the output
104 #if KERNEL
105 #       if XNU_TARGET_OS_OSX
106 #               define OS_NAME "macos"
107 #       elif XNU_TARGET_OS_IOS
108 #              define OS_NAME "ios"
109 #       elif XNU_TARGET_OS_TV
110 #               define OS_NAME "tvos"
111 #       elif XNU_TARGET_OS_WATCH
112 #               define OS_NAME "watchos"
113 #       elif XNU_TARGET_OS_BRIDGE
114 #               define OS_NAME "bridgeos"
115 #       else
116 #               define OS_NAME "unknown-os"
117 #       endif
118 #else
119 #       if TARGET_OS_OSX
120 #               define OS_NAME "macos"
121 #       elif TARGET_OS_MACCATALYST
122 #               define OS_NAME "catalyst"
123 #       elif TARGET_OS_IOS
124 #              define OS_NAME "ios"
125 #       elif TARGET_OS_TV
126 #               define OS_NAME "tvos"
127 #       elif TARGET_OS_WATCH
128 #               define OS_NAME "watchos"
129 #       elif TARGET_OS_BRIDGE
130 #               define OS_NAME "bridgeos"
131 #       else
132 #               define OS_NAME "unknown-os"
133 #       endif
134 #endif
135 
136 // architecture name, used to label the output
137 #if KERNEL
138 #       if __i386__
139 #               define ARCH_NAME "i386"
140 #       elif __x86_64__
141 #               define ARCH_NAME "x86_64"
142 #       elif __arm64__ && __LP64__
143 #               define ARCH_NAME "arm64"
144 #       elif __arm64__ && !__LP64__
145 #               define ARCH_NAME "arm64_32"
146 #       elif __arm__
147 #               define ARCH_NAME "arm"
148 #       else
149 #               define ARCH_NAME "unknown-arch"
150 #       endif
151 #else
152 #       if TARGET_CPU_X86
153 #               define ARCH_NAME "i386"
154 #       elif TARGET_CPU_X86_64
155 #               define ARCH_NAME "x86_64"
156 #       elif TARGET_CPU_ARM64 && __LP64__
157 #               define ARCH_NAME "arm64"
158 #       elif TARGET_CPU_ARM64 && !__LP64__
159 #               define ARCH_NAME "arm64_32"
160 #       elif TARGET_CPU_ARM
161 #               define ARCH_NAME "arm"
162 #       else
163 #               define ARCH_NAME "unknown-arch"
164 #       endif
165 #endif
166 
167 #if KERNEL
168 #       define MAP_T vm_map_t
169 #else
170 #       define MAP_T mach_port_t
171 #endif
172 
173 // Mach has new-style functions with 64-bit address and size
174 // and old-style functions with pointer-size address and size.
175 // On U64 platforms both names send the same MIG message
176 // and run the same kernel code so we need not test both.
177 // On U32 platforms they are different inside the kernel.
178 // fixme for kext/kernel, verify that vm32 entrypoints are not used and not exported
179 #if KERNEL || __LP64__
180 #       define TEST_OLD_STYLE_MACH 0
181 #else
182 #       define TEST_OLD_STYLE_MACH 1
183 #endif
184 
185 // always 64-bit: addr_t, mach_vm_address/size_t, memory_object_size/offset_t
186 // always 32-bit: mach_msg_type_number_t, natural_t
187 // pointer-size:  void*, vm_address_t, vm_size_t
188 typedef uint64_t addr_t;
189 
190 // We often use 4KB or 16KB instead of PAGE_SIZE
191 // (for example using 16KB instead of PAGE_SIZE to avoid Rosetta complications)
192 #define KB4 ((addr_t)4*1024)
193 #define KB16 ((addr_t)16*1024)
194 
195 // Allocation size commonly used in tests.
196 // This size is big enough that our trials of small
197 // address offsets and sizes will still fit inside it.
198 #define TEST_ALLOC_SIZE (4 * KB16)
199 
200 // Magic return codes used for in-band signalling.
201 // These must avoid kern_return_t and errno values.
202 #define BUSTED        -99  // trial is broken
203 #define IGNORED       -98  // trial not performed for acceptable reasons
204 #define ZEROSIZE      -97  // trial succeeded because size==0 (FAKE tests only)
205 #define PANIC         -96  // trial not performed because it would provoke a panic
206 #define GUARD         -95  // trial not performed because it would provoke EXC_GUARD
207 #define ACCEPTABLE    -94  // trial should be considered successful no matter what the golden result is
208 #define OUT_PARAM_BAD -93  // trial has incorrect setting of out parameter values
209 
210 static inline bool
is_fake_error(int err)211 is_fake_error(int err)
212 {
213 	return err == BUSTED || err == IGNORED || err == ZEROSIZE ||
214 	       err == PANIC || err == GUARD || err == OUT_PARAM_BAD;
215 }
216 
217 // Return the count of a (non-decayed!) array.
218 #define countof(array) (sizeof(array) / sizeof((array)[0]))
219 
220 #if !KERNEL
221 static inline uint64_t
VM_MAP_PAGE_SIZE(MAP_T map __unused)222 VM_MAP_PAGE_SIZE(MAP_T map __unused)
223 {
224 	// fixme wrong for out-of-process maps
225 	// on platforms that support processes with two different page sizes
226 	return PAGE_SIZE;
227 }
228 
229 static inline uint64_t
VM_MAP_PAGE_MASK(MAP_T map __unused)230 VM_MAP_PAGE_MASK(MAP_T map __unused)
231 {
232 	// fixme wrong for out-of-process maps
233 	// on platforms that support processes with two different page sizes
234 	return PAGE_MASK;
235 }
236 #endif
237 
238 
239 #define IMPL(T)                                                         \
240 	/* Round up to the given page mask. */                          \
241 	__attribute__((overloadable, used))                             \
242 	static inline T                                                 \
243 	round_up_mask(T addr, uint64_t pagemask) {                      \
244 	        return (addr + (T)pagemask) & ~((T)pagemask);           \
245 	}                                                               \
246                                                                         \
247 	/* Round up to the given page size. */                          \
248 	__attribute__((overloadable, used))                             \
249 	static inline T                                                 \
250 	round_up_page(T addr, uint64_t pagesize) {                      \
251 	        return round_up_mask(addr, pagesize - 1);               \
252 	}                                                               \
253                                                                         \
254 	/* Round up to the given map's page size. */                    \
255 	__attribute__((overloadable, used))                             \
256 	static inline T                                                 \
257 	round_up_map(MAP_T map, T addr) {                               \
258 	        return round_up_mask(addr, VM_MAP_PAGE_MASK(map));      \
259 	}                                                               \
260                                                                         \
261 	/* Truncate to the given page mask. */                          \
262 	__attribute__((overloadable, used))                             \
263 	static inline T                                                 \
264 	trunc_down_mask(T addr, uint64_t pagemask)                      \
265 	{                                                               \
266 	        return addr & ~((T)pagemask);                           \
267 	}                                                               \
268                                                                         \
269 	/* Truncate to the given page size. */                          \
270 	__attribute__((overloadable, used))                             \
271 	static inline T                                                 \
272 	trunc_down_page(T addr, uint64_t pagesize)                      \
273 	{                                                               \
274 	        return trunc_down_mask(addr, pagesize - 1);             \
275 	}                                                               \
276                                                                         \
277 	/* Truncate to the given map's page size. */                    \
278 	__attribute__((overloadable, used))                             \
279 	static inline T                                                 \
280 	trunc_down_map(MAP_T map, T addr)                               \
281 	{                                                               \
282 	        return trunc_down_mask(addr, VM_MAP_PAGE_MASK(map));    \
283 	}                                                               \
284                                                                         \
285 	__attribute__((overloadable, unavailable("use round_up_page instead"))) \
286 	extern T                                                        \
287 	round_up(T addr, uint64_t pagesize);                            \
288 	__attribute__((overloadable, unavailable("use trunc_down_page instead"))) \
289 	extern T                                                        \
290 	trunc_down(T addr, uint64_t pagesize);
291 
292 IMPL(uint64_t)
IMPL(uint32_t)293 IMPL(uint32_t)
294 #undef IMPL
295 
296 
297 // duplicate the logic of VM's vm_map_range_overflows()
298 // false == good start+size combo, true == bad combo
299 #define IMPL(T)                                                         \
300 	__attribute__((overloadable, used))                             \
301 	static bool                                                     \
302 	range_overflows_allow_zero(T start, T size, T pgmask)           \
303 	{                                                               \
304 	        if (size == 0) {                                        \
305 	                return false;                                   \
306 	        }                                                       \
307                                                                         \
308 	        T sum;                                                  \
309 	        if (__builtin_add_overflow(start, size, &sum)) {        \
310 	                return true;                                    \
311 	        }                                                       \
312                                                                         \
313 	        T aligned_start = trunc_down_mask(start, pgmask);       \
314 	        T aligned_end = round_up_mask(start + size, pgmask);    \
315 	        if (aligned_end <= aligned_start) {                     \
316 	                return true;                                    \
317 	        }                                                       \
318                                                                         \
319 	        return false;                                           \
320 	}                                                               \
321                                                                         \
322 	/* like range_overflows_allow_zero(), but without the */        \
323 	/* unconditional approval of size==0 */                         \
324 	__attribute__((overloadable, used))                             \
325 	static bool                                                     \
326 	range_overflows_strict_zero(T start, T size, T pgmask)                      \
327 	{                                                               \
328 	        T sum;                                                  \
329 	        if (__builtin_add_overflow(start, size, &sum)) {        \
330 	                return true;                                    \
331 	        }                                                       \
332                                                                         \
333 	        T aligned_start = trunc_down_mask(start, pgmask);       \
334 	        T aligned_end = round_up_mask(start + size, pgmask);    \
335 	        if (aligned_end <= aligned_start) {                     \
336 	                return true;                                    \
337 	        }                                                       \
338                                                                         \
339 	        return false;                                           \
340 	}                                                               \
341 
342 IMPL(uint64_t)
343 IMPL(uint32_t)
344 #undef IMPL
345 
346 
347 // return true if the process is running under Rosetta translation
348 // https://developer.apple.com/documentation/apple-silicon/about-the-rosetta-translation-environment#Determine-Whether-Your-App-Is-Running-as-a-Translated-Binary
349 static bool
350 isRosetta()
351 {
352 #if KERNEL
353 	return false;
354 #else
355 	int out_value = 0;
356 	size_t io_size = sizeof(out_value);
357 	if (sysctlbyname("sysctl.proc_translated", &out_value, &io_size, NULL, 0) == 0) {
358 		assert(io_size >= sizeof(out_value));
359 		return out_value;
360 	}
361 	return false;
362 #endif
363 }
364 
365 #if KERNEL
366 // Knobs controlled by boot arguments
367 extern bool kernel_generate_golden;
368 static void
init_kernel_generate_golden()369 init_kernel_generate_golden()
370 {
371 	kernel_generate_golden = FALSE;
372 	uint32_t kern_golden_arg;
373 	if (PE_parse_boot_argn("vm_parameter_validation_kern_golden", &kern_golden_arg, sizeof(kern_golden_arg))) {
374 		kernel_generate_golden = (kern_golden_arg == 1);
375 	}
376 }
377 #else
378 // Knobs controlled by environment variables
379 extern bool dump;
380 extern bool generate_golden;
381 extern bool test_results;
382 static void
read_env()383 read_env()
384 {
385 	dump = (getenv("DUMP_RESULTS") != NULL);
386 	generate_golden = (getenv("GENERATE_GOLDEN_IMAGE") != NULL);
387 	test_results = (getenv("SKIP_TESTS") == NULL) && !generate_golden; // Shouldn't do both
388 }
389 #endif
390 
391 
392 /////////////////////////////////////////////////////
393 // String functions that work in both kernel and userspace.
394 
395 // Test output function.
396 // This prints either to stdout (userspace tests) or to a userspace buffer (kernel sysctl tests)
397 #if KERNEL
398 extern void testprintf(const char *, ...) __printflike(1, 2);
399 #else
400 #define testprintf printf
401 #endif
402 
403 // kstrdup() is like strdup() but in the kernel it uses kalloc_data()
404 static inline char *
kstrdup(const char * str)405 kstrdup(const char *str)
406 {
407 #if KERNEL
408 	size_t size = strlen(str) + 1;
409 	char *copy = kalloc_data(size, Z_WAITOK | Z_ZERO);
410 	memcpy(copy, str, size);
411 	return copy;
412 #else
413 	return strdup(str);
414 #endif
415 }
416 
417 // kfree_str() is like free() but in the kernel it uses kfree_data_addr()
418 static inline void
kfree_str(char * str)419 kfree_str(char *str)
420 {
421 #if KERNEL
422 	kfree_data_addr(str);
423 #else
424 	free(str);
425 #endif
426 }
427 
428 // kasprintf() is like asprintf() but in the kernel it uses kalloc_data()
429 
430 #if !KERNEL
431 #       define kasprintf asprintf
432 #else
433 extern int vsnprintf(char *, size_t, const char *, va_list) __printflike(3, 0);
434 static inline int
kasprintf(char ** __restrict out_str,const char * __restrict format,...)435 kasprintf(char ** __restrict out_str, const char * __restrict format, ...) __printflike(2, 3)
436 {
437 	va_list args1, args2;
438 
439 	// compute length
440 	char c;
441 	va_start(args1, format);
442 	va_copy(args2, args1);
443 	int len1 = vsnprintf(&c, sizeof(c), format, args1);
444 	va_end(args1);
445 	if (len1 < 0) {
446 		*out_str = NULL;
447 		return len1;
448 	}
449 
450 	// allocate and print
451 	char *str = kalloc_data(len1 + 1, Z_NOFAIL);
452 	int len2 = vsnprintf(str, len1 + 1, format, args2);
453 	va_end(args2);
454 	if (len2 < 0) {
455 		kfree_data_addr(str);
456 		*out_str = NULL;
457 		return len1;
458 	}
459 	assert(len1 == len2);
460 
461 	*out_str = str;
462 	return len1;
463 }
464 // KERNEL
465 #endif
466 
467 
468 /////////////////////////////////////////////////////
469 // Record trials and return values from tested functions (BSD int or Mach kern_return_t)
470 
471 // ret: return value of this trial
472 // name: name of this trial, including the input values passed in
473 typedef struct {
474 	int ret;
475 	char *name;
476 } result_t;
477 
478 typedef struct {
479 	const char *testname;
480 	char *testconfig;
481 	unsigned capacity;
482 	unsigned count;
483 	result_t list[];
484 } results_t;
485 
486 extern results_t *golden_list[];
487 extern results_t *kern_list[];
488 static uint32_t num_tests = 0; // num of tests in golden list
489 static uint32_t num_kern_tests = 0; // num of tests in kernel results list
490 
491 static __attribute__((overloadable))
492 results_t *
alloc_results(const char * testname,char * testconfig,unsigned capacity)493 alloc_results(const char *testname, char *testconfig, unsigned capacity)
494 {
495 	results_t *results;
496 #if KERNEL
497 	results = kalloc_type(results_t, result_t, capacity, Z_WAITOK | Z_ZERO);
498 #else
499 	results = calloc(sizeof(results_t) + capacity * sizeof(result_t), 1);
500 #endif
501 	assert(results != NULL);
502 	results->testname = testname;
503 	results->testconfig = testconfig;
504 	results->capacity = capacity;
505 	results->count = 0;
506 	return results;
507 }
508 
509 static char *
alloc_default_testconfig(void)510 alloc_default_testconfig(void)
511 {
512 	char *result;
513 	kasprintf(&result, "%s %s %s%s",
514 	    OS_NAME, ARCH_NAME, CALLER_NAME, isRosetta() ? " rosetta" : "");
515 	return result;
516 }
517 
518 static __attribute__((overloadable))
519 results_t *
alloc_results(const char * testname,unsigned capacity)520 alloc_results(const char *testname, unsigned capacity)
521 {
522 	return alloc_results(testname, alloc_default_testconfig(), capacity);
523 }
524 
525 static void __unused
dealloc_results(results_t * results)526 dealloc_results(results_t *results)
527 {
528 	for (unsigned int i = 0; i < results->count; i++) {
529 		kfree_str(results->list[i].name);
530 	}
531 	kfree_str(results->testconfig);
532 #if KERNEL
533 	kfree_type(results_t, result_t, results->capacity, results);
534 #else
535 	free(results);
536 #endif
537 }
538 
539 static void __attribute__((overloadable, unused))
append_result(results_t * results,int ret,const char * name)540 append_result(results_t *results, int ret, const char *name)
541 {
542 	// halt if the results list is already full
543 	// fixme reallocate instead if we can't always choose the size in advance
544 	assert(results->count < results->capacity);
545 
546 	// name may be freed before we make use of it
547 	char * name_cpy = kstrdup(name);
548 	assert(name_cpy);
549 	results->list[results->count++] =
550 	    (result_t){.ret = ret, .name = name_cpy};
551 }
552 
553 static results_t *
test_name_to_golden_results(const char * testname)554 test_name_to_golden_results(const char* testname)
555 {
556 	results_t *golden_results = NULL;
557 	results_t *golden_results_found = NULL;
558 
559 	for (uint32_t x = 0; x < num_tests; x++) {
560 		golden_results = golden_list[x];
561 		if (strncmp(golden_results->testname, testname, strlen(testname)) == 0) {
562 			golden_results_found = golden_results;
563 			break;
564 		}
565 	}
566 
567 	return golden_results_found;
568 }
569 
570 static void
dump_results_list(results_t * res_list[],uint32_t res_num_tests)571 dump_results_list(results_t *res_list[], uint32_t res_num_tests)
572 {
573 	for (uint32_t x = 0; x < res_num_tests; x++) {
574 		results_t *results = res_list[x];
575 		testprintf("\t[%u] %s (%u)\n", x, results->testname, results->count);
576 	}
577 }
578 
579 static void
dump_golden_list()580 dump_golden_list()
581 {
582 	testprintf("======\n");
583 	testprintf("golden_list %p, num_tests %u\n", golden_list, num_tests);
584 	dump_results_list(golden_list, num_tests);
585 	testprintf("======\n");
586 }
587 
588 static void
dump_kernel_results_list()589 dump_kernel_results_list()
590 {
591 	testprintf("======\n");
592 	testprintf("kernel_results_list %p, num_tests %u\n", kern_list, num_kern_tests);
593 	dump_results_list(kern_list, num_kern_tests);
594 	testprintf("======\n");
595 }
596 
597 #define TESTNAME_DELIMITER        "TESTNAME "
598 #define RESULTCOUNT_DELIMITER     "RESULT COUNT "
599 #define TESTRESULT_DELIMITER      " "
600 #define TESTCONFIG_DELIMITER      "  TESTCONFIG "
601 #define KERN_TESTRESULT_DELIMITER "  RESULT "
602 
603 // print results, unformatted
604 // This output is read by populate_kernel_results()
605 // and by tools/format_vm_parameter_validation.py
606 static results_t *
__dump_results(results_t * results)607 __dump_results(results_t *results)
608 {
609 	testprintf(TESTNAME_DELIMITER "%s\n", results->testname);
610 	testprintf(TESTCONFIG_DELIMITER "%s\n", results->testconfig);
611 
612 	for (unsigned i = 0; i < results->count; i++) {
613 		testprintf(KERN_TESTRESULT_DELIMITER "%d, %s\n", results->list[i].ret, results->list[i].name);
614 	}
615 
616 	return results;
617 }
618 
619 // This output is read by populate_golden_results()
620 static results_t *
dump_golden_results(results_t * results)621 dump_golden_results(results_t *results)
622 {
623 	testprintf(TESTNAME_DELIMITER "%s\n", results->testname);
624 	testprintf(RESULTCOUNT_DELIMITER "%d\n", results->count);
625 
626 	for (unsigned i = 0; i < results->count; i++) {
627 		testprintf(TESTRESULT_DELIMITER "%d: %d\n", i, results->list[i].ret);
628 	}
629 
630 	return results;
631 }
632 
633 #if !KERNEL
634 static void
do_tests(results_t * golden_results,results_t * results)635 do_tests(results_t *golden_results, results_t *results)
636 {
637 	bool passed = TRUE;
638 	unsigned result_count = golden_results->count;
639 	if (golden_results->count != results->count) {
640 		T_LOG("%s: number of iterations mismatch (%u vs %u)",
641 		    results->testname, golden_results->count, results->count);
642 		result_count = golden_results->count < results->count ? golden_results->count : results->count;
643 	}
644 	for (unsigned i = 0; i < result_count; i++) {
645 		if (results->list[i].ret == ACCEPTABLE) {
646 			// trial has declared itself to be correct
647 			// no matter what the golden result is
648 			T_LOG("%s RESULT ACCEPTABLE (expected %d), %s\n",
649 			    results->testname,
650 			    golden_results->list[i].ret, results->list[i].name);
651 		} else if (results->list[i].ret != golden_results->list[i].ret) {
652 			T_FAIL("%s RESULT %d (expected %d), %s\n",
653 			    results->testname, results->list[i].ret,
654 			    golden_results->list[i].ret, results->list[i].name);
655 			passed = FALSE;
656 		}
657 	}
658 
659 	if (passed) {
660 		T_PASS("%s passed\n", results->testname);
661 	}
662 }
663 #endif
664 
665 static results_t *
dump_results(results_t * results)666 dump_results(results_t *results)
667 {
668 #if KERNEL
669 	if (kernel_generate_golden) {
670 		return dump_golden_results(results);
671 	} else {
672 		return __dump_results(results);
673 	}
674 #else
675 	results_t *golden_results = NULL;
676 
677 	if (dump && !generate_golden) {
678 		__dump_results(results);
679 	}
680 
681 	if (generate_golden) {
682 		dump_golden_results(results);
683 	}
684 
685 	if (test_results) {
686 		golden_results = test_name_to_golden_results(results->testname);
687 
688 		if (golden_results) {
689 			do_tests(golden_results, results);
690 		} else {
691 			T_FAIL("New test %s found, update golden list to allow return code testing", results->testname);
692 			// Dump results if not done previously
693 			if (!dump) {
694 				__dump_results(results);
695 			}
696 		}
697 	}
698 
699 	return results;
700 #endif
701 }
702 
703 static inline mach_vm_address_t
truncate_vm_map_addr_with_flags(MAP_T map,mach_vm_address_t addr,int flags)704 truncate_vm_map_addr_with_flags(MAP_T map, mach_vm_address_t addr, int flags)
705 {
706 	mach_vm_address_t truncated_addr = addr;
707 	if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) {
708 		// VM_FLAGS_RETURN_4K_DATA_ADDR means return a 4k aligned address rather than the
709 		// base of the page. Truncate to 4k.
710 		truncated_addr = trunc_down_page(addr, KB4);
711 	} else if (flags & VM_FLAGS_RETURN_DATA_ADDR) {
712 		// On VM_FLAGS_RETURN_DATA_ADDR, we expect to get back the unaligned address.
713 		// Don't truncate.
714 	} else {
715 		// Otherwise we truncate to the map page size
716 		truncated_addr = trunc_down_map(map, addr);
717 	}
718 	return truncated_addr;
719 }
720 
721 
722 static inline mach_vm_address_t
get_expected_remap_misalignment(MAP_T map,mach_vm_address_t addr,int flags)723 get_expected_remap_misalignment(MAP_T map, mach_vm_address_t addr, int flags)
724 {
725 	mach_vm_address_t misalignment;
726 	if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) {
727 		// VM_FLAGS_RETURN_4K_DATA_ADDR means return a 4k aligned address rather than the
728 		// base of the page. The misalignment is relative to the first 4k page
729 		misalignment = addr - trunc_down_page(addr, KB4);
730 	} else if (flags & VM_FLAGS_RETURN_DATA_ADDR) {
731 		// On VM_FLAGS_RETURN_DATA_ADDR, we expect to get back the unaligned address.
732 		// The misalignment is therefore the low bits
733 		misalignment = addr - trunc_down_map(map, addr);
734 	} else {
735 		// Otherwise we expect it to be aligned
736 		misalignment = 0;
737 	}
738 	return misalignment;
739 }
740 
741 // absolute and relative offsets are used to specify a trial's values
742 
743 typedef struct {
744 	bool is_absolute;
745 	addr_t offset;
746 } absolute_or_relative_offset_t;
747 
748 typedef struct {
749 	unsigned count;
750 	unsigned capacity;
751 	absolute_or_relative_offset_t list[];
752 } offset_list_t;
753 
754 static offset_list_t *
allocate_offsets(unsigned capacity)755 allocate_offsets(unsigned capacity)
756 {
757 	offset_list_t *offsets;
758 #if KERNEL
759 	offsets = kalloc_type(offset_list_t, absolute_or_relative_offset_t, capacity, Z_WAITOK | Z_ZERO);
760 #else
761 	offsets = calloc(sizeof(offset_list_t) + capacity * sizeof(absolute_or_relative_offset_t), 1);
762 #endif
763 	offsets->count = 0;
764 	offsets->capacity = capacity;
765 	return offsets;
766 }
767 
768 static void
append_offset(offset_list_t * offsets,bool is_absolute,addr_t offset)769 append_offset(offset_list_t *offsets, bool is_absolute, addr_t offset)
770 {
771 	assert(offsets->count < offsets->capacity);
772 	offsets->list[offsets->count].is_absolute = is_absolute;
773 	offsets->list[offsets->count].offset = offset;
774 	offsets->count++;
775 }
776 
777 static void
free_offsets(offset_list_t * offsets)778 free_offsets(offset_list_t *offsets)
779 {
780 #if KERNEL
781 	kfree_type(offset_list_t, absolute_or_relative_offset_t, offsets->capacity, offsets);
782 #else
783 	free(offsets);
784 #endif
785 }
786 
787 
788 /////////////////////////////////////////////////////
789 // Generation of trials and their parameter values
790 // A "trial" is a single execution of a function to be tested
791 
792 #if KERNEL
793 #define ALLOC_TRIALS(NAME, new_capacity)                                \
794 	(NAME ## _trials_t *)kalloc_type(NAME ## _trials_t, NAME ## _trial_t, \
795 	                                 new_capacity, Z_WAITOK | Z_ZERO)
796 #define FREE_TRIALS(NAME, trials)                                       \
797 	kfree_type(NAME ## _trials_t, NAME ## _trial_t, trials->capacity, trials)
798 #else
799 #define ALLOC_TRIALS(NAME, new_capacity)                                \
800 	(NAME ## _trials_t *)calloc(sizeof(NAME ## _trials_t) + (new_capacity) * sizeof(NAME ## _trial_t), 1)
801 #define FREE_TRIALS(NAME, trials)               \
802 	free(trials)
803 #endif
804 
805 #define TRIALS_IMPL(NAME)                                               \
806 	static NAME ## _trials_t *                                      \
807 	allocate_ ## NAME ## _trials(unsigned capacity)                 \
808 	{                                                               \
809 	        NAME ## _trials_t *trials = ALLOC_TRIALS(NAME, capacity); \
810 	        assert(trials);                                         \
811 	        trials->count = 0;                                      \
812 	        trials->capacity = capacity;                            \
813 	        return trials;                                          \
814 	}                                                               \
815                                                                         \
816 	static void __attribute__((overloadable, used))                 \
817 	free_trials(NAME ## _trials_t *trials)                          \
818 	{                                                               \
819 	        FREE_TRIALS(NAME, trials);                              \
820 	}                                                               \
821                                                                         \
822 	static void __attribute__((overloadable, used))                 \
823 	append_trial(NAME ## _trials_t *trials, NAME ## _trial_t new_trial) \
824 	{                                                               \
825 	        assert(trials->count < trials->capacity);               \
826 	        trials->list[trials->count++] = new_trial;              \
827 	}                                                               \
828                                                                         \
829 	static void __attribute__((overloadable, used))                 \
830 	append_trials(NAME ## _trials_t *trials, NAME ## _trial_t *new_trials, unsigned new_count) \
831 	{                                                               \
832 	        for (unsigned i = 0; i < new_count; i++) {              \
833 	                append_trial(trials, new_trials[i]);            \
834 	        }                                                       \
835 	}
836 
837 // allocate vm_inherit_t trials, and deallocate it at end of scope
838 #define SMART_VM_INHERIT_TRIALS()                                               \
839 	__attribute__((cleanup(cleanup_vm_inherit_trials)))             \
840 	= allocate_vm_inherit_trials(countof(vm_inherit_trials_values));        \
841 	append_trials(trials, vm_inherit_trials_values, countof(vm_inherit_trials_values))
842 
843 // generate vm_inherit_t trials
844 
845 typedef struct {
846 	vm_inherit_t value;
847 	const char * name;
848 } vm_inherit_trial_t;
849 
850 typedef struct {
851 	unsigned count;
852 	unsigned capacity;
853 	vm_inherit_trial_t list[];
854 } vm_inherit_trials_t;
855 
856 
857 #define VM_INHERIT_TRIAL(new_value) \
858 	(vm_inherit_trial_t) {.value = (vm_inherit_t)new_value, .name ="vm_inherit " #new_value}
859 
860 static vm_inherit_trial_t vm_inherit_trials_values[] = {
861 	VM_INHERIT_TRIAL(VM_INHERIT_SHARE),
862 	VM_INHERIT_TRIAL(VM_INHERIT_COPY),
863 	VM_INHERIT_TRIAL(VM_INHERIT_NONE),
864 	// end valid ones
865 	VM_INHERIT_TRIAL(VM_INHERIT_DONATE_COPY), // yes this is invalid
866 	VM_INHERIT_TRIAL(0x12345),
867 	VM_INHERIT_TRIAL(0xffffffff),
868 };
869 
TRIALS_IMPL(vm_inherit)870 TRIALS_IMPL(vm_inherit)
871 
872 static void
873 cleanup_vm_inherit_trials(vm_inherit_trials_t **trials)
874 {
875 	free_trials(*trials);
876 }
877 
878 // allocate vm_map_kernel_flags trials, and deallocate it at end of scope
879 #define SMART_VM_MAP_KERNEL_FLAGS_TRIALS()                              \
880 	__attribute__((cleanup(cleanup_vm_map_kernel_flags_trials)))    \
881 	= generate_vm_map_kernel_flags_trials()
882 
883 #define SMART_MMAP_KERNEL_FLAGS_TRIALS()                                \
884 	__attribute__((cleanup(cleanup_vm_map_kernel_flags_trials)))    \
885 	= generate_mmap_kernel_flags_trials()
886 
887 // generate vm_map_kernel_flags_t trials
888 
889 typedef struct {
890 	int flags;
891 	char * name;
892 } vm_map_kernel_flags_trial_t;
893 
894 typedef struct {
895 	unsigned count;
896 	unsigned capacity;
897 	vm_map_kernel_flags_trial_t list[];
898 } vm_map_kernel_flags_trials_t;
899 
900 #define VM_MAP_KERNEL_FLAGS_TRIAL(new_flags) \
901 	(vm_map_kernel_flags_trial_t) {.flags = (int)(new_flags), .name ="vm_map_kernel_flags " #new_flags}
902 
TRIALS_IMPL(vm_map_kernel_flags)903 TRIALS_IMPL(vm_map_kernel_flags)
904 
905 static vm_map_kernel_flags_trials_t *
906 generate_prefixed_vm_map_kernel_flags_trials(int prefix_flags, const char *prefix_name)
907 {
908 	vm_map_kernel_flags_trials_t *trials;
909 	trials = allocate_vm_map_kernel_flags_trials(32);
910 
911 	char *str;
912 #define APPEND(flag)                                                    \
913 	({                                                              \
914 	        kasprintf(&str, "vm_map_kernel_flags %s%s%s", \
915 	            prefix_name, prefix_flags == 0 ? "" : " | ", #flag); \
916 	        append_trial(trials, (vm_map_kernel_flags_trial_t){ prefix_flags | (int)flag, str }); \
917 	})
918 
919 	// First trial is just the prefix flags set, if any.
920 	// (either ANYWHERE or FIXED | OVERWRITE)
921 	if (prefix_flags != 0) {
922 		kasprintf(&str, "vm_map_kernel_flags %s", prefix_name);
923 		append_trial(trials, (vm_map_kernel_flags_trial_t){ prefix_flags, str });
924 	}
925 
926 	// Try each other flag with the prefix flags.
927 	// Skip FIXED and ANYWHERE and OVERWRITE because they cause
928 	// memory management changes that the caller may not be prepared for.
929 	// skip 0x00000000 VM_FLAGS_FIXED
930 	// skip 0x00000001 VM_FLAGS_ANYWHERE
931 	APPEND(VM_FLAGS_PURGABLE);
932 	APPEND(VM_FLAGS_4GB_CHUNK);
933 	APPEND(VM_FLAGS_RANDOM_ADDR);
934 	APPEND(VM_FLAGS_NO_CACHE);
935 	APPEND(VM_FLAGS_RESILIENT_CODESIGN);
936 	APPEND(VM_FLAGS_RESILIENT_MEDIA);
937 	APPEND(VM_FLAGS_PERMANENT);
938 	// skip 0x00001000 VM_FLAGS_TPRO; it only works on some hardware.
939 	APPEND(0x00002000);
940 	// skip 0x00004000 VM_FLAGS_OVERWRITE
941 	APPEND(0x00008000);
942 	APPEND(VM_FLAGS_SUPERPAGE_MASK); // 0x10000, 0x20000, 0x40000
943 	APPEND(0x00080000);
944 	APPEND(VM_FLAGS_RETURN_DATA_ADDR);
945 	APPEND(VM_FLAGS_RETURN_4K_DATA_ADDR);
946 	APPEND(VM_FLAGS_ALIAS_MASK);
947 
948 	return trials;
949 }
950 
951 static vm_map_kernel_flags_trials_t *
generate_vm_map_kernel_flags_trials()952 generate_vm_map_kernel_flags_trials()
953 {
954 	vm_map_kernel_flags_trials_t *fixed =
955 	    generate_prefixed_vm_map_kernel_flags_trials(
956 		VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, "VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE");
957 	vm_map_kernel_flags_trials_t *anywhere =
958 	    generate_prefixed_vm_map_kernel_flags_trials(
959 		VM_FLAGS_ANYWHERE, "VM_FLAGS_ANYWHERE");
960 	vm_map_kernel_flags_trials_t *trials =
961 	    allocate_vm_map_kernel_flags_trials(fixed->count + anywhere->count);
962 	append_trials(trials, fixed->list, fixed->count);
963 	append_trials(trials, anywhere->list, anywhere->count);
964 
965 	// free not cleanup, trials has stolen their strings
966 	free_trials(fixed);
967 	free_trials(anywhere);
968 
969 	return trials;
970 }
971 
972 static vm_map_kernel_flags_trials_t *
generate_mmap_kernel_flags_trials()973 generate_mmap_kernel_flags_trials()
974 {
975 	// mmap rejects both ANYWHERE and FIXED | OVERWRITE
976 	// so don't set any prefix flags.
977 	return generate_prefixed_vm_map_kernel_flags_trials(0, "");
978 }
979 
980 static void
cleanup_vm_map_kernel_flags_trials(vm_map_kernel_flags_trials_t ** trials)981 cleanup_vm_map_kernel_flags_trials(vm_map_kernel_flags_trials_t **trials)
982 {
983 	for (size_t i = 0; i < (*trials)->count; i++) {
984 		kfree_str((*trials)->list[i].name);
985 	}
986 	free_trials(*trials);
987 }
988 
989 
990 // generate mmap flags trials
991 
992 typedef struct {
993 	int flags;
994 	const char *name;
995 } mmap_flags_trial_t;
996 
997 typedef struct {
998 	unsigned count;
999 	unsigned capacity;
1000 	mmap_flags_trial_t list[];
1001 } mmap_flags_trials_t;
1002 
1003 #define MMAP_FLAGS_TRIAL(new_flags)                                             \
1004 	(mmap_flags_trial_t){ .flags = (int)(new_flags), .name = "mmap flags "#new_flags }
1005 
1006 static mmap_flags_trial_t mmap_flags_trials_values[] = {
1007 	MMAP_FLAGS_TRIAL(MAP_FILE),
1008 	MMAP_FLAGS_TRIAL(MAP_ANON),
1009 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_SHARED),
1010 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE),
1011 	MMAP_FLAGS_TRIAL(MAP_ANON | MAP_SHARED),
1012 	MMAP_FLAGS_TRIAL(MAP_ANON | MAP_PRIVATE),
1013 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_SHARED | MAP_PRIVATE),
1014 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_FIXED),
1015 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_RENAME),
1016 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_NORESERVE),
1017 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_RESERVED0080),
1018 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_NOEXTEND),
1019 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_HASSEMAPHORE),
1020 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_NOCACHE),
1021 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_JIT),
1022 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_CODESIGN),
1023 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_MEDIA),
1024 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_TRANSLATED_ALLOW_EXECUTE),
1025 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_UNIX03),
1026 	// skip MAP_TPRO; it only works on some hardware
1027 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 3),
1028 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 4),
1029 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 5),
1030 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 6),
1031 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 7),
1032 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 8),
1033 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 9),
1034 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 10),
1035 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 11),
1036 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 12),
1037 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 13),
1038 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 14),
1039 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 15),
1040 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 16),
1041 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 17),
1042 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 18),
1043 	// skip MAP_TPRO (1<<19); it only works on some hardware
1044 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 20),
1045 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 21),
1046 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 22),
1047 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 23),
1048 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 24),
1049 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 25),
1050 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 26),
1051 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 27),
1052 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 28),
1053 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 29),
1054 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 30),
1055 	MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 31),
1056 };
1057 
TRIALS_IMPL(mmap_flags)1058 TRIALS_IMPL(mmap_flags)
1059 
1060 static void
1061 cleanup_mmap_flags_trials(mmap_flags_trials_t **trials)
1062 {
1063 	free_trials(*trials);
1064 }
1065 
1066 // allocate mmap_flag trials, and deallocate it at end of scope
1067 #define SMART_MMAP_FLAGS_TRIALS()                                               \
1068 	__attribute__((cleanup(cleanup_mmap_flags_trials)))             \
1069 	= allocate_mmap_flags_trials(countof(mmap_flags_trials_values));        \
1070 	append_trials(trials, mmap_flags_trials_values, countof(mmap_flags_trials_values))
1071 
1072 // generate generic flag trials
1073 
1074 typedef struct {
1075 	int flag;
1076 	const char *name;
1077 } generic_flag_trial_t;
1078 
1079 typedef struct {
1080 	unsigned count;
1081 	unsigned capacity;
1082 	generic_flag_trial_t list[];
1083 } generic_flag_trials_t;
1084 
1085 #define GENERIC_FLAG_TRIAL(new_flag)                                            \
1086 	(generic_flag_trial_t){ .flag = (int)(new_flag), .name = "generic flag "#new_flag }
1087 
1088 static generic_flag_trial_t generic_flag_trials_values[] = {
1089 	GENERIC_FLAG_TRIAL(0),
1090 	GENERIC_FLAG_TRIAL(1),
1091 	GENERIC_FLAG_TRIAL(2),
1092 	GENERIC_FLAG_TRIAL(3),
1093 	GENERIC_FLAG_TRIAL(4),
1094 	GENERIC_FLAG_TRIAL(5),
1095 	GENERIC_FLAG_TRIAL(6),
1096 	GENERIC_FLAG_TRIAL(7),
1097 	GENERIC_FLAG_TRIAL(1u << 3),
1098 	GENERIC_FLAG_TRIAL(1u << 4),
1099 	GENERIC_FLAG_TRIAL(1u << 5),
1100 	GENERIC_FLAG_TRIAL(1u << 6),
1101 	GENERIC_FLAG_TRIAL(1u << 7),
1102 	GENERIC_FLAG_TRIAL(1u << 8),
1103 	GENERIC_FLAG_TRIAL(1u << 9),
1104 	GENERIC_FLAG_TRIAL(1u << 10),
1105 	GENERIC_FLAG_TRIAL(1u << 11),
1106 	GENERIC_FLAG_TRIAL(1u << 12),
1107 	GENERIC_FLAG_TRIAL(1u << 13),
1108 	GENERIC_FLAG_TRIAL(1u << 14),
1109 	GENERIC_FLAG_TRIAL(1u << 15),
1110 	GENERIC_FLAG_TRIAL(1u << 16),
1111 	GENERIC_FLAG_TRIAL(1u << 17),
1112 	GENERIC_FLAG_TRIAL(1u << 18),
1113 	GENERIC_FLAG_TRIAL(1u << 19),
1114 	GENERIC_FLAG_TRIAL(1u << 20),
1115 	GENERIC_FLAG_TRIAL(1u << 21),
1116 	GENERIC_FLAG_TRIAL(1u << 22),
1117 	GENERIC_FLAG_TRIAL(1u << 23),
1118 	GENERIC_FLAG_TRIAL(1u << 24),
1119 	GENERIC_FLAG_TRIAL(1u << 25),
1120 	GENERIC_FLAG_TRIAL(1u << 26),
1121 	GENERIC_FLAG_TRIAL(1u << 27),
1122 	GENERIC_FLAG_TRIAL(1u << 28),
1123 	GENERIC_FLAG_TRIAL(1u << 29),
1124 	GENERIC_FLAG_TRIAL(1u << 30),
1125 	GENERIC_FLAG_TRIAL(1u << 31),
1126 };
1127 
TRIALS_IMPL(generic_flag)1128 TRIALS_IMPL(generic_flag)
1129 
1130 static void
1131 cleanup_generic_flag_trials(generic_flag_trials_t **trials)
1132 {
1133 	free_trials(*trials);
1134 }
1135 
1136 // allocate mmap_flag trials, and deallocate it at end of scope
1137 #define SMART_GENERIC_FLAG_TRIALS()                                             \
1138 	__attribute__((cleanup(cleanup_generic_flag_trials)))           \
1139 	= allocate_generic_flag_trials(countof(generic_flag_trials_values));    \
1140 	append_trials(trials, generic_flag_trials_values, countof(generic_flag_trials_values))
1141 
1142 
1143 // generate vm_prot_t trials
1144 
1145 #ifndef KERNEL
1146 typedef int vm_tag_t;
1147 #endif /* KERNEL */
1148 
1149 typedef struct {
1150 	vm_tag_t tag;
1151 	const char *name;
1152 } vm_tag_trial_t;
1153 
1154 typedef struct {
1155 	unsigned count;
1156 	unsigned capacity;
1157 	vm_tag_trial_t list[];
1158 } vm_tag_trials_t;
1159 
1160 #define VM_TAG_TRIAL(new_tag)                                           \
1161 	(vm_tag_trial_t){ .tag = (vm_tag_t)(new_tag), .name = "vm_tag "#new_tag }
1162 
1163 static vm_tag_trial_t vm_tag_trials_values[] = {
1164 	#ifdef KERNEL
1165 	VM_TAG_TRIAL(VM_KERN_MEMORY_NONE),
1166 	VM_TAG_TRIAL(VM_KERN_MEMORY_OSFMK),
1167 	VM_TAG_TRIAL(VM_KERN_MEMORY_BSD),
1168 	VM_TAG_TRIAL(VM_KERN_MEMORY_IOKIT),
1169 	VM_TAG_TRIAL(VM_KERN_MEMORY_LIBKERN),
1170 	VM_TAG_TRIAL(VM_KERN_MEMORY_OSKEXT),
1171 	VM_TAG_TRIAL(VM_KERN_MEMORY_KEXT),
1172 	VM_TAG_TRIAL(VM_KERN_MEMORY_IPC),
1173 	VM_TAG_TRIAL(VM_KERN_MEMORY_STACK),
1174 	VM_TAG_TRIAL(VM_KERN_MEMORY_CPU),
1175 	VM_TAG_TRIAL(VM_KERN_MEMORY_PMAP),
1176 	VM_TAG_TRIAL(VM_KERN_MEMORY_PTE),
1177 	VM_TAG_TRIAL(VM_KERN_MEMORY_ZONE),
1178 	VM_TAG_TRIAL(VM_KERN_MEMORY_KALLOC),
1179 	VM_TAG_TRIAL(VM_KERN_MEMORY_COMPRESSOR),
1180 	VM_TAG_TRIAL(VM_KERN_MEMORY_COMPRESSED_DATA),
1181 	VM_TAG_TRIAL(VM_KERN_MEMORY_PHANTOM_CACHE),
1182 	VM_TAG_TRIAL(VM_KERN_MEMORY_WAITQ),
1183 	VM_TAG_TRIAL(VM_KERN_MEMORY_DIAG),
1184 	VM_TAG_TRIAL(VM_KERN_MEMORY_LOG),
1185 	VM_TAG_TRIAL(VM_KERN_MEMORY_FILE),
1186 	VM_TAG_TRIAL(VM_KERN_MEMORY_MBUF),
1187 	VM_TAG_TRIAL(VM_KERN_MEMORY_UBC),
1188 	VM_TAG_TRIAL(VM_KERN_MEMORY_SECURITY),
1189 	VM_TAG_TRIAL(VM_KERN_MEMORY_MLOCK),
1190 	VM_TAG_TRIAL(VM_KERN_MEMORY_REASON),
1191 	VM_TAG_TRIAL(VM_KERN_MEMORY_SKYWALK),
1192 	VM_TAG_TRIAL(VM_KERN_MEMORY_LTABLE),
1193 	VM_TAG_TRIAL(VM_KERN_MEMORY_HV),
1194 	VM_TAG_TRIAL(VM_KERN_MEMORY_KALLOC_DATA),
1195 	VM_TAG_TRIAL(VM_KERN_MEMORY_RETIRED),
1196 	VM_TAG_TRIAL(VM_KERN_MEMORY_KALLOC_TYPE),
1197 	VM_TAG_TRIAL(VM_KERN_MEMORY_TRIAGE),
1198 	VM_TAG_TRIAL(VM_KERN_MEMORY_RECOUNT),
1199 	#endif /* KERNEL */
1200 };
1201 
TRIALS_IMPL(vm_tag)1202 TRIALS_IMPL(vm_tag)
1203 
1204 static void
1205 cleanup_vm_tag_trials(vm_tag_trials_t **trials)
1206 {
1207 	free_trials(*trials);
1208 }
1209 
1210 #define SMART_VM_TAG_TRIALS()                                           \
1211 	__attribute__((cleanup(cleanup_vm_tag_trials)))         \
1212 	= allocate_vm_tag_trials(countof(vm_tag_trials_values));        \
1213 	append_trials(trials, vm_tag_trials_values, countof(vm_tag_trials_values))
1214 
1215 //END vm_tag_t
1216 
1217 // generate vm_prot_t trials
1218 
1219 typedef struct {
1220 	vm_prot_t prot;
1221 	const char *name;
1222 } vm_prot_trial_t;
1223 
1224 typedef struct {
1225 	unsigned count;
1226 	unsigned capacity;
1227 	vm_prot_trial_t list[];
1228 } vm_prot_trials_t;
1229 
1230 #define VM_PROT_TRIAL(new_prot)                                         \
1231 	(vm_prot_trial_t){ .prot = (vm_prot_t)(new_prot), .name = "vm_prot "#new_prot }
1232 
1233 static vm_prot_trial_t vm_prot_trials_values[] = {
1234 	// none
1235 	VM_PROT_TRIAL(VM_PROT_NONE),
1236 	// ordinary r-- / rw- / r-x
1237 	VM_PROT_TRIAL(VM_PROT_READ),
1238 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE),
1239 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE),
1240 	// rwx (w+x often disallowed)
1241 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE),
1242 	// VM_PROT_READ | VM_PROT_x for each other VM_PROT_x bit
1243 	// plus write and execute for some interesting cases
1244 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 3),
1245 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 4),
1246 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 5),
1247 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 6),
1248 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 7),
1249 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 7),
1250 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 7),
1251 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 8),
1252 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 8),
1253 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 8),
1254 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 9),
1255 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 10),
1256 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 11),
1257 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 12),
1258 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 13),
1259 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 14),
1260 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 15),
1261 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 16),
1262 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 16),
1263 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 16),
1264 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 17),
1265 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 18),
1266 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 19),
1267 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 20),
1268 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 21),
1269 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 22),
1270 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 23),
1271 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 24),
1272 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 25),
1273 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 25),
1274 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 25),
1275 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 26),
1276 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 27),
1277 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 28),
1278 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 29),
1279 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 30),
1280 	VM_PROT_TRIAL(VM_PROT_READ | 1u << 31),
1281 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 31),
1282 	VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 31),
1283 };
1284 
TRIALS_IMPL(vm_prot)1285 TRIALS_IMPL(vm_prot)
1286 
1287 static void
1288 cleanup_vm_prot_trials(vm_prot_trials_t **trials)
1289 {
1290 	free_trials(*trials);
1291 }
1292 
1293 // allocate vm_prot trials, and deallocate it at end of scope
1294 #define SMART_VM_PROT_TRIALS()                                          \
1295 	__attribute__((cleanup(cleanup_vm_prot_trials)))                \
1296 	= allocate_vm_prot_trials(countof(vm_prot_trials_values));      \
1297 	append_trials(trials, vm_prot_trials_values, countof(vm_prot_trials_values))
1298 
1299 // Trials for pairs of vm_prot_t
1300 
1301 typedef struct {
1302 	vm_prot_t cur;
1303 	vm_prot_t max;
1304 	char * name;
1305 } vm_prot_pair_trial_t;
1306 
1307 typedef struct {
1308 	unsigned count;
1309 	unsigned capacity;
1310 	vm_prot_pair_trial_t list[];
1311 } vm_prot_pair_trials_t;
1312 
TRIALS_IMPL(vm_prot_pair)1313 TRIALS_IMPL(vm_prot_pair)
1314 
1315 #define VM_PROT_PAIR_TRIAL(new_cur, new_max, new_name) \
1316 (vm_prot_pair_trial_t){ .cur = (vm_prot_t)(new_cur), \
1317 	        .max = (vm_prot_t)(new_max), \
1318 	        .name = new_name,}
1319 
1320 vm_prot_pair_trials_t *
1321 generate_vm_prot_pair_trials()
1322 {
1323 	const unsigned D = countof(vm_prot_trials_values);
1324 	unsigned num_trials = D * D;
1325 
1326 	vm_prot_pair_trials_t * trials = allocate_vm_prot_pair_trials(num_trials);
1327 	for (size_t i = 0; i < D; i++) {
1328 		for (size_t j = 0; j < D; j++) {
1329 			vm_prot_t cur = vm_prot_trials_values[i].prot;
1330 			vm_prot_t max = vm_prot_trials_values[j].prot;
1331 			char *str;
1332 			kasprintf(&str, "cur: 0x%x, max: 0x%x", cur, max);
1333 			append_trial(trials, VM_PROT_PAIR_TRIAL(cur, max, str));
1334 		}
1335 	}
1336 	return trials;
1337 }
1338 
1339 #define SMART_VM_PROT_PAIR_TRIALS()                                             \
1340 	__attribute__((cleanup(cleanup_vm_prot_pair_trials)))           \
1341 	= generate_vm_prot_pair_trials();
1342 
1343 static void
cleanup_vm_prot_pair_trials(vm_prot_pair_trials_t ** trials)1344 cleanup_vm_prot_pair_trials(vm_prot_pair_trials_t **trials)
1345 {
1346 	for (size_t i = 0; i < (*trials)->count; i++) {
1347 		kfree_str((*trials)->list[i].name);
1348 	}
1349 	free_trials(*trials);
1350 }
1351 
1352 // generate ledger tag trials
1353 
1354 typedef struct {
1355 	int tag;
1356 	const char *name;
1357 } ledger_tag_trial_t;
1358 
1359 typedef struct {
1360 	unsigned count;
1361 	unsigned capacity;
1362 	ledger_tag_trial_t list[];
1363 } ledger_tag_trials_t;
1364 
1365 #define LEDGER_TAG_TRIAL(new_tag)                            \
1366 	(ledger_tag_trial_t){ .tag = (int)(new_tag), .name = "ledger tag "#new_tag }
1367 
1368 static ledger_tag_trial_t ledger_tag_trials_values[] = {
1369 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_NONE),
1370 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_DEFAULT),
1371 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_NETWORK),
1372 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_MEDIA),
1373 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_GRAPHICS),
1374 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_NEURAL),
1375 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_MAX),
1376 	LEDGER_TAG_TRIAL(1u << 16),
1377 	LEDGER_TAG_TRIAL(1u << 17),
1378 	LEDGER_TAG_TRIAL(1u << 18),
1379 	LEDGER_TAG_TRIAL(1u << 19),
1380 	LEDGER_TAG_TRIAL(1u << 20),
1381 	LEDGER_TAG_TRIAL(1u << 21),
1382 	LEDGER_TAG_TRIAL(1u << 22),
1383 	LEDGER_TAG_TRIAL(1u << 23),
1384 	LEDGER_TAG_TRIAL(1u << 24),
1385 	LEDGER_TAG_TRIAL(1u << 25),
1386 	LEDGER_TAG_TRIAL(1u << 26),
1387 	LEDGER_TAG_TRIAL(1u << 27),
1388 	LEDGER_TAG_TRIAL(1u << 28),
1389 	LEDGER_TAG_TRIAL(1u << 29),
1390 	LEDGER_TAG_TRIAL(1u << 30),
1391 	LEDGER_TAG_TRIAL(1u << 31),
1392 	LEDGER_TAG_TRIAL(VM_LEDGER_TAG_UNCHANGED),
1393 };
1394 
TRIALS_IMPL(ledger_tag)1395 TRIALS_IMPL(ledger_tag)
1396 
1397 static void
1398 cleanup_ledger_tag_trials(ledger_tag_trials_t **trials)
1399 {
1400 	free_trials(*trials);
1401 }
1402 
1403 // allocate ledger tag trials, and deallocate it at end of scope
1404 #define SMART_LEDGER_TAG_TRIALS()                                               \
1405 	__attribute__((cleanup(cleanup_ledger_tag_trials)))             \
1406 	= allocate_ledger_tag_trials(countof(ledger_tag_trials_values));        \
1407 	append_trials(trials, ledger_tag_trials_values, countof(ledger_tag_trials_values))
1408 
1409 
1410 // generate ledger flag trials
1411 
1412 typedef struct {
1413 	int flag;
1414 	const char *name;
1415 } ledger_flag_trial_t;
1416 
1417 typedef struct {
1418 	unsigned count;
1419 	unsigned capacity;
1420 	ledger_flag_trial_t list[];
1421 } ledger_flag_trials_t;
1422 
1423 #define LEDGER_FLAG_TRIAL(new_flag)                            \
1424 	(ledger_flag_trial_t){ .flag = (int)(new_flag), .name = "ledger flag "#new_flag }
1425 
1426 static ledger_flag_trial_t ledger_flag_trials_values[] = {
1427 	LEDGER_FLAG_TRIAL(0),
1428 	LEDGER_FLAG_TRIAL(VM_LEDGER_FLAG_NO_FOOTPRINT),
1429 	LEDGER_FLAG_TRIAL(VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG),
1430 	LEDGER_FLAG_TRIAL(VM_LEDGER_FLAGS_USER),
1431 	LEDGER_FLAG_TRIAL(VM_LEDGER_FLAG_FROM_KERNEL),
1432 	LEDGER_FLAG_TRIAL(VM_LEDGER_FLAGS_ALL),
1433 	LEDGER_FLAG_TRIAL(1u << 3),
1434 	LEDGER_FLAG_TRIAL(1u << 4),
1435 	LEDGER_FLAG_TRIAL(1u << 5),
1436 	LEDGER_FLAG_TRIAL(1u << 6),
1437 	LEDGER_FLAG_TRIAL(1u << 7),
1438 	LEDGER_FLAG_TRIAL(1u << 8),
1439 	LEDGER_FLAG_TRIAL(1u << 9),
1440 	LEDGER_FLAG_TRIAL(1u << 10),
1441 	LEDGER_FLAG_TRIAL(1u << 11),
1442 	LEDGER_FLAG_TRIAL(1u << 12),
1443 	LEDGER_FLAG_TRIAL(1u << 13),
1444 	LEDGER_FLAG_TRIAL(1u << 14),
1445 	LEDGER_FLAG_TRIAL(1u << 15),
1446 	LEDGER_FLAG_TRIAL(1u << 16),
1447 	LEDGER_FLAG_TRIAL(1u << 17),
1448 	LEDGER_FLAG_TRIAL(1u << 18),
1449 	LEDGER_FLAG_TRIAL(1u << 19),
1450 	LEDGER_FLAG_TRIAL(1u << 20),
1451 	LEDGER_FLAG_TRIAL(1u << 21),
1452 	LEDGER_FLAG_TRIAL(1u << 22),
1453 	LEDGER_FLAG_TRIAL(1u << 23),
1454 	LEDGER_FLAG_TRIAL(1u << 24),
1455 	LEDGER_FLAG_TRIAL(1u << 25),
1456 	LEDGER_FLAG_TRIAL(1u << 26),
1457 	LEDGER_FLAG_TRIAL(1u << 27),
1458 	LEDGER_FLAG_TRIAL(1u << 28),
1459 	LEDGER_FLAG_TRIAL(1u << 29),
1460 	LEDGER_FLAG_TRIAL(1u << 30),
1461 	LEDGER_FLAG_TRIAL(1u << 31),
1462 };
1463 
TRIALS_IMPL(ledger_flag)1464 TRIALS_IMPL(ledger_flag)
1465 
1466 static void
1467 cleanup_ledger_flag_trials(ledger_flag_trials_t **trials)
1468 {
1469 	free_trials(*trials);
1470 }
1471 
1472 // allocate ledger flag trials, and deallocate it at end of scope
1473 #define SMART_LEDGER_FLAG_TRIALS()                                              \
1474 	__attribute__((cleanup(cleanup_ledger_flag_trials)))            \
1475 	= allocate_ledger_flag_trials(countof(ledger_flag_trials_values));      \
1476 	append_trials(trials, ledger_flag_trials_values, countof(ledger_flag_trials_values))
1477 
1478 // generate address-parameter trials
1479 // where the address has no associated size
1480 // and the callee's arithmetic includes `round_page(addr)`
1481 
1482 typedef struct {
1483 	addr_t addr;
1484 	bool addr_is_absolute;
1485 	char *name;
1486 } addr_trial_t;
1487 
1488 typedef struct {
1489 	unsigned count;
1490 	unsigned capacity;
1491 	addr_trial_t list[];
1492 } addr_trials_t;
1493 
1494 #define ADDR_TRIAL(new_addr, new_absolute, new_name)                    \
1495 	(addr_trial_t){ .addr = (addr_t)(new_addr), .addr_is_absolute = new_absolute, .name = new_name }
1496 
1497 static addr_trial_t __attribute__((overloadable, used))
slide_trial(addr_trial_t trial,mach_vm_address_t slide)1498 slide_trial(addr_trial_t trial, mach_vm_address_t slide)
1499 {
1500 	addr_trial_t result = trial;
1501 	if (!trial.addr_is_absolute) {
1502 		result.addr += slide;
1503 	}
1504 	return result;
1505 }
1506 
1507 static const offset_list_t *
get_addr_trial_offsets(void)1508 get_addr_trial_offsets(void)
1509 {
1510 	static offset_list_t *offsets;
1511 	if (!offsets) {
1512 		offsets = allocate_offsets(20);
1513 		append_offset(offsets, true, 0);
1514 		append_offset(offsets, true, 1);
1515 		append_offset(offsets, true, 2);
1516 		append_offset(offsets, true, PAGE_SIZE - 2);
1517 		append_offset(offsets, true, PAGE_SIZE - 1);
1518 		append_offset(offsets, true, PAGE_SIZE);
1519 		append_offset(offsets, true, PAGE_SIZE + 1);
1520 		append_offset(offsets, true, PAGE_SIZE + 2);
1521 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE - 2);
1522 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE - 1);
1523 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE);
1524 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE + 1);
1525 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE + 2);
1526 		append_offset(offsets, true, -(mach_vm_address_t)2);
1527 		append_offset(offsets, true, -(mach_vm_address_t)1);
1528 
1529 		append_offset(offsets, false, 0);
1530 		append_offset(offsets, false, 1);
1531 		append_offset(offsets, false, 2);
1532 		append_offset(offsets, false, PAGE_SIZE - 2);
1533 		append_offset(offsets, false, PAGE_SIZE - 1);
1534 	}
1535 	return offsets;
1536 }
1537 
TRIALS_IMPL(addr)1538 TRIALS_IMPL(addr)
1539 
1540 addr_trials_t *
1541 generate_addr_trials(addr_t base)
1542 {
1543 	const offset_list_t *offsets = get_addr_trial_offsets();
1544 	const unsigned ADDRS = offsets->count;
1545 	addr_trials_t *trials = allocate_addr_trials(ADDRS);
1546 
1547 	for (unsigned a = 0; a < ADDRS; a++) {
1548 		mach_vm_address_t addr_offset = offsets->list[a].offset;
1549 		mach_vm_address_t addr = addr_offset;
1550 		bool addr_is_absolute = offsets->list[a].is_absolute;
1551 		if (!addr_is_absolute) {
1552 			addr += base;
1553 		}
1554 
1555 		char *str;
1556 		kasprintf(&str, "addr: %s0x%llx",
1557 		    addr_is_absolute ? "" : "base+", addr_offset);
1558 		append_trial(trials, ADDR_TRIAL(addr, addr_is_absolute, str));
1559 	}
1560 	return trials;
1561 }
1562 
1563 static void
cleanup_addr_trials(addr_trials_t ** trials)1564 cleanup_addr_trials(addr_trials_t **trials)
1565 {
1566 	for (size_t i = 0; i < (*trials)->count; i++) {
1567 		kfree_str((*trials)->list[i].name);
1568 	}
1569 	free_trials(*trials);
1570 }
1571 
1572 // allocate address trials around a base address
1573 // and deallocate it at end of scope
1574 #define SMART_ADDR_TRIALS(base)                                         \
1575 	__attribute__((cleanup(cleanup_addr_trials)))                   \
1576 	    = generate_addr_trials(base)
1577 
1578 
1579 /////////////////////////////////////////////////////
1580 // generate size-parameter trials
1581 // where the size is not associated with any base address
1582 // and the callee's arithmetic includes `round_page(size)`
1583 
1584 typedef struct {
1585 	addr_t size;
1586 	char *name;
1587 } size_trial_t;
1588 
1589 typedef struct {
1590 	unsigned count;
1591 	unsigned capacity;
1592 	size_trial_t list[];
1593 } size_trials_t;
1594 
1595 #define SIZE_TRIAL(new_size, new_name)                                          \
1596 	(size_trial_t){ .size = (addr_t)(new_size), .name = new_name }
1597 
1598 static const offset_list_t *
get_size_trial_offsets(void)1599 get_size_trial_offsets(void)
1600 {
1601 	static offset_list_t *offsets;
1602 	if (!offsets) {
1603 		offsets = allocate_offsets(15);
1604 		append_offset(offsets, true, 0);
1605 		append_offset(offsets, true, 1);
1606 		append_offset(offsets, true, 2);
1607 		append_offset(offsets, true, PAGE_SIZE - 2);
1608 		append_offset(offsets, true, PAGE_SIZE - 1);
1609 		append_offset(offsets, true, PAGE_SIZE);
1610 		append_offset(offsets, true, PAGE_SIZE + 1);
1611 		append_offset(offsets, true, PAGE_SIZE + 2);
1612 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE - 2);
1613 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE - 1);
1614 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE);
1615 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE + 1);
1616 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE + 2);
1617 		append_offset(offsets, true, -(mach_vm_address_t)2);
1618 		append_offset(offsets, true, -(mach_vm_address_t)1);
1619 	}
1620 	return offsets;
1621 }
1622 
TRIALS_IMPL(size)1623 TRIALS_IMPL(size)
1624 
1625 size_trials_t *
1626 generate_size_trials(void)
1627 {
1628 	const offset_list_t *size_offsets = get_size_trial_offsets();
1629 	const unsigned SIZES = size_offsets->count;
1630 	size_trials_t *trials = allocate_size_trials(SIZES);
1631 
1632 	for (unsigned s = 0; s < SIZES; s++) {
1633 		mach_vm_size_t size = size_offsets->list[s].offset;
1634 
1635 		char *str;
1636 		kasprintf(&str, "size: 0x%llx", size);
1637 		append_trial(trials, SIZE_TRIAL(size, str));
1638 	}
1639 	return trials;
1640 }
1641 
1642 static void
cleanup_size_trials(size_trials_t ** trials)1643 cleanup_size_trials(size_trials_t **trials)
1644 {
1645 	for (size_t i = 0; i < (*trials)->count; i++) {
1646 		kfree_str((*trials)->list[i].name);
1647 	}
1648 	free_trials(*trials);
1649 }
1650 
1651 // allocate size trials, and deallocate it at end of scope
1652 #define SMART_SIZE_TRIALS()                                             \
1653 	__attribute__((cleanup(cleanup_size_trials)))                   \
1654 	= generate_size_trials()
1655 
1656 /////////////////////////////////////////////////////
1657 // generate start/size trials
1658 // using absolute addresses or addresses around a given address
1659 // where `size` is the size of the thing at `start`
1660 // and the callee's arithmetic performs `start+size`
1661 
1662 typedef struct {
1663 	addr_t start;
1664 	addr_t size;
1665 	char *name;
1666 	bool start_is_absolute;  // start computation does not include any allocation's base address
1667 	bool size_is_absolute;   // size computation does not include start
1668 } start_size_trial_t;
1669 
1670 typedef struct {
1671 	unsigned count;
1672 	unsigned capacity;
1673 	start_size_trial_t list[];
1674 } start_size_trials_t;
1675 
1676 
1677 #define START_SIZE_TRIAL(new_start, start_absolute, new_size, size_absolute, new_name) \
1678 	(start_size_trial_t){ .start = (addr_t)(new_start), .size = (addr_t)(new_size), \
1679 	                .name = new_name,                                       \
1680 	                .start_is_absolute = start_absolute, .size_is_absolute = size_absolute }
1681 
1682 static const offset_list_t *
get_start_size_trial_start_offsets(void)1683 get_start_size_trial_start_offsets(void)
1684 {
1685 	return get_addr_trial_offsets();
1686 }
1687 
1688 static const offset_list_t *
get_start_size_trial_size_offsets(void)1689 get_start_size_trial_size_offsets(void)
1690 {
1691 	static offset_list_t *offsets;
1692 	if (!offsets) {
1693 		// use each size offset twice: once absolute and once relative
1694 		const offset_list_t *old_offsets = get_size_trial_offsets();
1695 		offsets = allocate_offsets(2 * old_offsets->count);
1696 		for (unsigned i = 0; i < old_offsets->count; i++) {
1697 			append_offset(offsets, true, old_offsets->list[i].offset);
1698 		}
1699 		for (unsigned i = 0; i < old_offsets->count; i++) {
1700 			append_offset(offsets, false, old_offsets->list[i].offset);
1701 		}
1702 	}
1703 	return offsets;
1704 }
1705 
TRIALS_IMPL(start_size)1706 TRIALS_IMPL(start_size)
1707 
1708 // Return a new start/size trial which is offset by `slide` bytes
1709 // Only "relative" start and size values get slid.
1710 // "absolute" values don't change.
1711 static start_size_trial_t __attribute__((overloadable, used))
1712 slide_trial(start_size_trial_t trial, mach_vm_address_t slide)
1713 {
1714 	start_size_trial_t result = trial;
1715 	if (!result.start_is_absolute) {
1716 		result.start += slide;
1717 		if (!result.size_is_absolute) {
1718 			result.size -= slide;
1719 		}
1720 	}
1721 	return result;
1722 }
1723 
1724 start_size_trials_t *
generate_start_size_trials(addr_t base)1725 generate_start_size_trials(addr_t base)
1726 {
1727 	const offset_list_t *start_offsets = get_start_size_trial_start_offsets();
1728 	const offset_list_t *size_offsets = get_start_size_trial_size_offsets();
1729 
1730 	const unsigned ADDRS = start_offsets->count;
1731 	const unsigned SIZES = size_offsets->count;
1732 
1733 	start_size_trials_t *trials = allocate_start_size_trials(ADDRS * SIZES);
1734 
1735 	for (unsigned a = 0; a < ADDRS; a++) {
1736 		for (unsigned s = 0; s < SIZES; s++) {
1737 			mach_vm_address_t start_offset = start_offsets->list[a].offset;
1738 			mach_vm_address_t start = start_offset;
1739 			bool start_is_absolute = start_offsets->list[a].is_absolute;
1740 			if (!start_is_absolute) {
1741 				start += base;
1742 			}
1743 
1744 			mach_vm_size_t size_offset = size_offsets->list[s].offset;
1745 			mach_vm_size_t size = size_offset;
1746 			bool size_is_absolute = size_offsets->list[s].is_absolute;
1747 			if (!size_is_absolute) {
1748 				size = -start + size;
1749 			}
1750 
1751 			char *str;
1752 			kasprintf(&str, "start: %s0x%llx, size: %s0x%llx",
1753 			    start_is_absolute ? "" : "base+", start_offset,
1754 			    size_is_absolute ? "" :"-start+", size_offset);
1755 			append_trial(trials, START_SIZE_TRIAL(start, start_is_absolute, size, size_is_absolute, str));
1756 		}
1757 	}
1758 	return trials;
1759 }
1760 
1761 static void
cleanup_start_size_trials(start_size_trials_t ** trials)1762 cleanup_start_size_trials(start_size_trials_t **trials)
1763 {
1764 	for (size_t i = 0; i < (*trials)->count; i++) {
1765 		kfree_str((*trials)->list[i].name);
1766 	}
1767 	free_trials(*trials);
1768 }
1769 
1770 // allocate start/size trials around a base address
1771 // and deallocate it at end of scope
1772 #define SMART_START_SIZE_TRIALS(base)                                   \
1773 	__attribute__((cleanup(cleanup_start_size_trials)))             \
1774 	= generate_start_size_trials(base)
1775 
1776 // Trials for start/size/offset/object tuples
1777 
1778 typedef struct {
1779 	mach_vm_address_t start;
1780 	mach_vm_size_t size;
1781 	vm_object_offset_t offset;
1782 	mach_vm_size_t obj_size;
1783 	bool start_is_absolute;
1784 	bool size_is_absolute;
1785 	char * name;
1786 } start_size_offset_object_trial_t;
1787 
1788 typedef struct {
1789 	unsigned count;
1790 	unsigned capacity;
1791 	start_size_offset_object_trial_t list[];
1792 } start_size_offset_object_trials_t;
1793 
TRIALS_IMPL(start_size_offset_object)1794 TRIALS_IMPL(start_size_offset_object)
1795 
1796 #define START_SIZE_OFFSET_OBJECT_TRIAL(new_start, new_size, new_offset, new_obj_size, new_start_is_absolute, new_size_is_absolute, new_name) \
1797 (start_size_offset_object_trial_t){ .start = (mach_vm_address_t)(new_start), \
1798 	        .size = (mach_vm_size_t)(new_size), \
1799 	        .offset = (vm_object_offset_t)(new_offset), \
1800 	        .obj_size = (mach_vm_size_t)(new_obj_size), \
1801 	        .start_is_absolute = (bool)(new_start_is_absolute), \
1802 	        .size_is_absolute = (bool)(new_size_is_absolute), \
1803 	        .name = new_name,}
1804 
1805 bool
1806 obj_size_is_ok(mach_vm_size_t obj_size)
1807 {
1808 	if (round_up_page(obj_size, PAGE_SIZE) == 0) {
1809 		return false;
1810 	}
1811 	/* in rosetta, PAGE_SIZE is 4K but rounding to 16K also panics */ \
1812 	if (isRosetta() && round_up_page(obj_size, KB16) == 0) {
1813 		return false;
1814 	}
1815 	return true;
1816 }
1817 
1818 static start_size_offset_object_trial_t __attribute__((overloadable, used))
slide_trial(start_size_offset_object_trial_t trial,mach_vm_address_t slide)1819 slide_trial(start_size_offset_object_trial_t trial, mach_vm_address_t slide)
1820 {
1821 	start_size_offset_object_trial_t result = trial;
1822 
1823 	if (!trial.start_is_absolute) {
1824 		result.start += slide;
1825 		if (!trial.size_is_absolute) {
1826 			result.size -= slide;
1827 		}
1828 	}
1829 	return result;
1830 }
1831 
1832 static offset_list_t *
get_ssoo_absolute_offsets()1833 get_ssoo_absolute_offsets()
1834 {
1835 	static offset_list_t *offsets;
1836 	if (!offsets) {
1837 		offsets = allocate_offsets(20);
1838 		append_offset(offsets, true, 0);
1839 		append_offset(offsets, true, 1);
1840 		append_offset(offsets, true, 2);
1841 		append_offset(offsets, true, PAGE_SIZE - 2);
1842 		append_offset(offsets, true, PAGE_SIZE - 1);
1843 		append_offset(offsets, true, PAGE_SIZE);
1844 		append_offset(offsets, true, PAGE_SIZE + 1);
1845 		append_offset(offsets, true, PAGE_SIZE + 2);
1846 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE - 2);
1847 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE - 1);
1848 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE);
1849 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE + 1);
1850 		append_offset(offsets, true, -(mach_vm_address_t)PAGE_SIZE + 2);
1851 		append_offset(offsets, true, -(mach_vm_address_t)2);
1852 		append_offset(offsets, true, -(mach_vm_address_t)1);
1853 	}
1854 	return offsets;
1855 }
1856 
1857 static offset_list_t *
get_ssoo_absolute_and_relative_offsets()1858 get_ssoo_absolute_and_relative_offsets()
1859 {
1860 	static offset_list_t *offsets;
1861 	if (!offsets) {
1862 		const offset_list_t *old_offsets = get_ssoo_absolute_offsets();
1863 		offsets = allocate_offsets(old_offsets->count + 5);
1864 		// absolute offsets
1865 		for (unsigned i = 0; i < old_offsets->count; i++) {
1866 			append_offset(offsets, true, old_offsets->list[i].offset);
1867 		}
1868 		// relative offsets
1869 		append_offset(offsets, false, 0);
1870 		append_offset(offsets, false, 1);
1871 		append_offset(offsets, false, 2);
1872 		append_offset(offsets, false, PAGE_SIZE - 2);
1873 		append_offset(offsets, false, PAGE_SIZE - 1);
1874 	}
1875 	return offsets;
1876 }
1877 
1878 start_size_offset_object_trials_t *
generate_start_size_offset_object_trials()1879 generate_start_size_offset_object_trials()
1880 {
1881 	const offset_list_t *start_offsets = get_ssoo_absolute_and_relative_offsets();
1882 	const offset_list_t *size_offsets  = get_ssoo_absolute_and_relative_offsets();
1883 	const offset_list_t *offset_values = get_ssoo_absolute_offsets();
1884 	const offset_list_t *object_sizes  = get_ssoo_absolute_offsets();
1885 
1886 	unsigned num_trials = 0;
1887 	for (size_t d = 0; d < object_sizes->count; d++) {
1888 		mach_vm_size_t obj_size = object_sizes->list[d].offset;
1889 		if (!obj_size_is_ok(obj_size)) { // make_a_mem_object would fail
1890 			continue;
1891 		}
1892 		num_trials++;
1893 	}
1894 	num_trials *= start_offsets->count * size_offsets->count * offset_values->count;
1895 
1896 	start_size_offset_object_trials_t * trials = allocate_start_size_offset_object_trials(num_trials);
1897 	for (size_t a = 0; a < start_offsets->count; a++) {
1898 		for (size_t b = 0; b < size_offsets->count; b++) {
1899 			for (size_t c = 0; c < offset_values->count; c++) {
1900 				for (size_t d = 0; d < object_sizes->count; d++) {
1901 					bool start_is_absolute = start_offsets->list[a].is_absolute;
1902 					bool size_is_absolute = size_offsets->list[b].is_absolute;
1903 					mach_vm_address_t start = start_offsets->list[a].offset;
1904 					mach_vm_size_t size = size_offsets->list[b].offset;
1905 					vm_object_offset_t offset = offset_values->list[c].offset;
1906 					mach_vm_size_t obj_size = object_sizes->list[d].offset;
1907 					if (!obj_size_is_ok(obj_size)) { // make_a_mem_object would fail
1908 						continue;
1909 					}
1910 					char *str;
1911 					kasprintf(&str, "start: %s0x%llx, size: %s0x%llx, offset: 0x%llx, obj_size: 0x%llx",
1912 					    start_is_absolute ? "" : "base+", start,
1913 					    size_is_absolute ? "" :"-start+", size,
1914 					    offset,
1915 					    obj_size);
1916 					append_trial(trials, START_SIZE_OFFSET_OBJECT_TRIAL(start, size, offset, obj_size, start_is_absolute, size_is_absolute, str));
1917 				}
1918 			}
1919 		}
1920 	}
1921 	return trials;
1922 }
1923 
1924 #define SMART_START_SIZE_OFFSET_OBJECT_TRIALS()                                         \
1925 	__attribute__((cleanup(cleanup_start_size_offset_object_trials)))               \
1926 	= generate_start_size_offset_object_trials();
1927 
1928 static void
cleanup_start_size_offset_object_trials(start_size_offset_object_trials_t ** trials)1929 cleanup_start_size_offset_object_trials(start_size_offset_object_trials_t **trials)
1930 {
1931 	for (size_t i = 0; i < (*trials)->count; i++) {
1932 		kfree_str((*trials)->list[i].name);
1933 	}
1934 	free_trials(*trials);
1935 }
1936 
1937 
1938 // start/size/offset: test start+size and a second independent address
1939 // consider src/dst/size instead if the size may be added to both addresses
1940 
1941 typedef struct {
1942 	mach_vm_address_t start;
1943 	mach_vm_size_t size;
1944 	vm_object_offset_t offset;
1945 	bool start_is_absolute;
1946 	bool size_is_absolute;
1947 	char * name;
1948 } start_size_offset_trial_t;
1949 
1950 typedef struct {
1951 	unsigned count;
1952 	unsigned capacity;
1953 	start_size_offset_trial_t list[];
1954 } start_size_offset_trials_t;
1955 
TRIALS_IMPL(start_size_offset)1956 TRIALS_IMPL(start_size_offset)
1957 
1958 #define START_SIZE_OFFSET_TRIAL(new_start, new_size, new_offset, new_start_is_absolute, new_size_is_absolute, new_name) \
1959 (start_size_offset_trial_t){ .start = (mach_vm_address_t)(new_start), \
1960 	        .size = (mach_vm_size_t)(new_size), \
1961 	        .offset = (vm_object_offset_t)(new_offset), \
1962 	        .start_is_absolute = (bool)(new_start_is_absolute), \
1963 	        .size_is_absolute = (bool)(new_size_is_absolute), \
1964 	        .name = new_name,}
1965 
1966 
1967 static start_size_offset_trial_t __attribute__((overloadable, used))
1968 slide_trial(start_size_offset_trial_t trial, mach_vm_address_t slide)
1969 {
1970 	start_size_offset_trial_t result = trial;
1971 
1972 	if (!trial.start_is_absolute) {
1973 		result.start += slide;
1974 		if (!trial.size_is_absolute) {
1975 			result.size -= slide;
1976 		}
1977 	}
1978 	return result;
1979 }
1980 
1981 start_size_offset_trials_t *
generate_start_size_offset_trials()1982 generate_start_size_offset_trials()
1983 {
1984 	const offset_list_t *start_offsets = get_ssoo_absolute_and_relative_offsets();
1985 	const offset_list_t *offset_values = get_ssoo_absolute_offsets();
1986 	const offset_list_t *size_offsets  = get_ssoo_absolute_and_relative_offsets();
1987 
1988 	// output is actually ordered start - offset - size
1989 	// because it pretty-prints better than start - size - offset
1990 	unsigned num_trials = start_offsets->count * offset_values->count * size_offsets->count;
1991 	start_size_offset_trials_t * trials = allocate_start_size_offset_trials(num_trials);
1992 	for (size_t a = 0; a < start_offsets->count; a++) {
1993 		for (size_t b = 0; b < offset_values->count; b++) {
1994 			for (size_t c = 0; c < size_offsets->count; c++) {
1995 				bool start_is_absolute = start_offsets->list[a].is_absolute;
1996 				bool size_is_absolute = size_offsets->list[c].is_absolute;
1997 				mach_vm_address_t start = start_offsets->list[a].offset;
1998 				vm_object_offset_t offset = offset_values->list[b].offset;
1999 				mach_vm_size_t size = size_offsets->list[c].offset;
2000 
2001 				char *str;
2002 				kasprintf(&str, "start: %s0x%llx, offset: 0x%llx, size: %s0x%llx",
2003 				    start_is_absolute ? "" : "base+", start,
2004 				    offset,
2005 				    size_is_absolute ? "" :"-start+", size);
2006 				append_trial(trials, START_SIZE_OFFSET_TRIAL(start, size, offset, start_is_absolute, size_is_absolute, str));
2007 			}
2008 		}
2009 	}
2010 	return trials;
2011 }
2012 
2013 #define SMART_START_SIZE_OFFSET_TRIALS()                                        \
2014 	__attribute__((cleanup(cleanup_start_size_offset_trials)))              \
2015 	= generate_start_size_offset_trials();
2016 
2017 static void
cleanup_start_size_offset_trials(start_size_offset_trials_t ** trials)2018 cleanup_start_size_offset_trials(start_size_offset_trials_t **trials)
2019 {
2020 	for (size_t i = 0; i < (*trials)->count; i++) {
2021 		kfree_str((*trials)->list[i].name);
2022 	}
2023 	free_trials(*trials);
2024 }
2025 
2026 
2027 // size/size: test two independent sizes
2028 
2029 typedef struct {
2030 	addr_t size;
2031 	addr_t size_2;
2032 	const char *name;
2033 } size_size_trial_t;
2034 
2035 typedef struct {
2036 	unsigned count;
2037 	unsigned capacity;
2038 	size_size_trial_t list[];
2039 } size_size_trials_t;
2040 
TRIALS_IMPL(size_size)2041 TRIALS_IMPL(size_size)
2042 
2043 #define SIZE_SIZE_TRIAL(new_size, new_size_2, new_name) \
2044 (size_size_trial_t){ .size = (addr_t)(new_size), \
2045 	        .size_2 = (addr_t) (new_size_2), \
2046 	        .name = new_name }
2047 
2048 size_size_trials_t *
2049 generate_size_size_trials()
2050 {
2051 	const offset_list_t *size_offsets = get_size_trial_offsets();
2052 	unsigned SIZES = size_offsets->count;
2053 	size_size_trials_t * trials = allocate_size_size_trials(SIZES * SIZES);
2054 
2055 	for (size_t i = 0; i < SIZES; i++) {
2056 		for (size_t j = 0; j < SIZES; j++) {
2057 			addr_t size = size_offsets->list[i].offset;
2058 			addr_t size_2 = size_offsets->list[j].offset;
2059 
2060 			char *buf;
2061 			kasprintf(&buf, "size:%lli, size2:%lli", (int64_t) size, size_2);
2062 			append_trial(trials, SIZE_SIZE_TRIAL(size, size_2, buf));
2063 		}
2064 	}
2065 	return trials;
2066 }
2067 
2068 #define SMART_SIZE_SIZE_TRIALS()                                                \
2069 	__attribute__((cleanup(cleanup_size_size_trials)))              \
2070 	= generate_size_size_trials();
2071 
2072 static void
cleanup_size_size_trials(size_size_trials_t ** trials)2073 cleanup_size_size_trials(size_size_trials_t **trials)
2074 {
2075 	// TODO free strings in trials
2076 	free_trials(*trials);
2077 }
2078 
2079 
2080 // src/dst/size: test a source address, a dest address,
2081 // and a common size that may be added to both addresses
2082 
2083 typedef struct {
2084 	addr_t src;
2085 	addr_t dst;
2086 	addr_t size;
2087 	char *name;
2088 	bool src_is_absolute;  // src computation does not include any allocation's base address
2089 	bool dst_is_absolute;  // dst computation does not include any allocation's base address
2090 	bool size_is_src_relative;   // size computation includes src
2091 	bool size_is_dst_relative;   // size computation includes dst
2092 } src_dst_size_trial_t;
2093 
2094 typedef struct {
2095 	unsigned count;
2096 	unsigned capacity;
2097 	src_dst_size_trial_t list[];
2098 } src_dst_size_trials_t;
2099 
TRIALS_IMPL(src_dst_size)2100 TRIALS_IMPL(src_dst_size)
2101 
2102 #define SRC_DST_SIZE_TRIAL(new_src, new_dst, new_size, new_name, src_absolute, dst_absolute, size_src_rel, size_dst_rel) \
2103 	(src_dst_size_trial_t){                                         \
2104 	        .src = (addr_t)(new_src),                               \
2105 	        .dst = (addr_t)(new_dst),                               \
2106 	        .size = (addr_t)(new_size),                             \
2107 	        .name = new_name,                                       \
2108 	        .src_is_absolute = src_absolute,                        \
2109 	        .dst_is_absolute = dst_absolute,                        \
2110 	        .size_is_src_relative = size_src_rel,                   \
2111 	        .size_is_dst_relative = size_dst_rel,                   \
2112 	}
2113 
2114 src_dst_size_trials_t * __attribute__((overloadable))
2115 generate_src_dst_size_trials(const char *srcname, const char *dstname)
2116 {
2117 	const offset_list_t *addr_offsets = get_addr_trial_offsets();
2118 	const offset_list_t *size_offsets = get_size_trial_offsets();
2119 	unsigned src_count = addr_offsets->count;
2120 	unsigned dst_count = src_count;
2121 	unsigned size_count = 3 * size_offsets->count;
2122 	unsigned num_trials = src_count * dst_count * size_count;
2123 	src_dst_size_trials_t * trials = allocate_src_dst_size_trials(num_trials);
2124 
2125 	// each size is used three times:
2126 	// once src-relative, once dst-relative, and once absolute
2127 	unsigned size_part = size_count / 3;
2128 
2129 	for (size_t i = 0; i < src_count; i++) {
2130 		bool rebase_src = !addr_offsets->list[i].is_absolute;
2131 		addr_t src_offset = addr_offsets->list[i].offset;
2132 
2133 		for (size_t j = 0; j < dst_count; j++) {
2134 			bool rebase_dst = !addr_offsets->list[j].is_absolute;
2135 			addr_t dst_offset = addr_offsets->list[j].offset;
2136 
2137 			for (size_t k = 0; k < size_count; k++) {
2138 				bool rebase_size_from_src = false;
2139 				bool rebase_size_from_dst = false;
2140 				addr_t size_offset;
2141 				if (k < size_part) {
2142 					size_offset = size_offsets->list[k].offset;
2143 				} else if (k < 2 * size_part) {
2144 					size_offset = size_offsets->list[k - size_part].offset;
2145 					rebase_size_from_src = true;
2146 					rebase_size_from_dst = false;
2147 				} else {
2148 					size_offset = size_offsets->list[k - 2 * size_part].offset;
2149 					rebase_size_from_src = false;
2150 					rebase_size_from_dst = true;
2151 				}
2152 
2153 				addr_t size;
2154 				char *desc;
2155 				if (rebase_size_from_src) {
2156 					size = -src_offset + size_offset;
2157 					kasprintf(&desc, "%s: %s%lli, %s: %s%lli, size: -%s%+lli",
2158 					    srcname, rebase_src ? "base+" : "", (int64_t)src_offset,
2159 					    dstname, rebase_dst ? "base+" : "", (int64_t)dst_offset,
2160 					    srcname, (int64_t)size_offset);
2161 				} else if (rebase_size_from_dst) {
2162 					size = -dst_offset + size_offset;
2163 					kasprintf(&desc, "%s: %s%lli, %s: %s%lli, size: -%s%+lli",
2164 					    srcname, rebase_src ? "base+" : "", (int64_t)src_offset,
2165 					    dstname, rebase_dst ? "base+" : "", (int64_t)dst_offset,
2166 					    dstname, (int64_t)size_offset);
2167 				} else {
2168 					size = size_offset;
2169 					kasprintf(&desc, "%s: %s%lli, %s: %s%lli, size: %lli",
2170 					    srcname, rebase_src ? "base+" : "", (int64_t)src_offset,
2171 					    dstname, rebase_dst ? "base+" : "", (int64_t)dst_offset,
2172 					    (int64_t)size_offset);
2173 				}
2174 				assert(desc);
2175 				append_trial(trials, SRC_DST_SIZE_TRIAL(src_offset, dst_offset, size, desc,
2176 				    !rebase_src, !rebase_dst, rebase_size_from_src, rebase_size_from_dst));
2177 			}
2178 		}
2179 	}
2180 	return trials;
2181 }
2182 
2183 src_dst_size_trials_t * __attribute__((overloadable))
generate_src_dst_size_trials(void)2184 generate_src_dst_size_trials(void)
2185 {
2186 	return generate_src_dst_size_trials("src", "dst");
2187 }
2188 #define SMART_SRC_DST_SIZE_TRIALS()                                     \
2189 	__attribute__((cleanup(cleanup_src_dst_size_trials)))           \
2190 	= generate_src_dst_size_trials();
2191 
2192 #define SMART_FILEOFF_DST_SIZE_TRIALS()                                 \
2193 	__attribute__((cleanup(cleanup_src_dst_size_trials)))           \
2194 	= generate_src_dst_size_trials("fileoff", "dst");
2195 
2196 static void
cleanup_src_dst_size_trials(src_dst_size_trials_t ** trials)2197 cleanup_src_dst_size_trials(src_dst_size_trials_t **trials)
2198 {
2199 	for (size_t i = 0; i < (*trials)->count; i++) {
2200 		kfree_str((*trials)->list[i].name);
2201 	}
2202 	free_trials(*trials);
2203 }
2204 
2205 static src_dst_size_trial_t __attribute__((overloadable, used))
slide_trial_src(src_dst_size_trial_t trial,mach_vm_address_t slide)2206 slide_trial_src(src_dst_size_trial_t trial, mach_vm_address_t slide)
2207 {
2208 	src_dst_size_trial_t result = trial;
2209 
2210 	if (!trial.src_is_absolute) {
2211 		result.src += slide;
2212 		if (trial.size_is_src_relative) {
2213 			result.size -= slide;
2214 		}
2215 	}
2216 	return result;
2217 }
2218 
2219 static src_dst_size_trial_t __attribute__((overloadable, used))
slide_trial_dst(src_dst_size_trial_t trial,mach_vm_address_t slide)2220 slide_trial_dst(src_dst_size_trial_t trial, mach_vm_address_t slide)
2221 {
2222 	src_dst_size_trial_t result = trial;
2223 
2224 	if (!trial.dst_is_absolute) {
2225 		result.dst += slide;
2226 		if (trial.size_is_dst_relative) {
2227 			result.size -= slide;
2228 		}
2229 	}
2230 	return result;
2231 }
2232 
2233 
2234 /////////////////////////////////////////////////////
2235 // utility code
2236 
2237 // Return true if flags has VM_FLAGS_FIXED
2238 // This is non-trivial because VM_FLAGS_FIXED is zero;
2239 // the real value is the absence of VM_FLAGS_ANYWHERE.
2240 static inline bool
is_fixed(int flags)2241 is_fixed(int flags)
2242 {
2243 	static_assert(VM_FLAGS_FIXED == 0, "this test requies VM_FLAGS_FIXED be zero");
2244 	static_assert(VM_FLAGS_ANYWHERE != 0, "this test requires VM_FLAGS_ANYWHERE be nonzero");
2245 	return !(flags & VM_FLAGS_ANYWHERE);
2246 }
2247 
2248 // Return true if flags has VM_FLAGS_FIXED and VM_FLAGS_OVERWRITE set.
2249 static inline bool
is_fixed_overwrite(int flags)2250 is_fixed_overwrite(int flags)
2251 {
2252 	return is_fixed(flags) && (flags & VM_FLAGS_OVERWRITE);
2253 }
2254 
2255 
2256 // Return true if flags has VM_FLAGS_ANYWHERE and VM_FLAGS_RANDOM_ADDR set.
2257 static inline bool
is_random_anywhere(int flags)2258 is_random_anywhere(int flags)
2259 {
2260 	static_assert(VM_FLAGS_ANYWHERE != 0, "this test requires VM_FLAGS_ANYWHERE be nonzero");
2261 	return (flags & VM_FLAGS_RANDOM_ADDR) && (flags & VM_FLAGS_ANYWHERE);
2262 }
2263 
2264 // Deallocate [start, start+size).
2265 // Don't deallocate if the allocator failed (allocator_kr)
2266 // Don't deallocate if flags include FIXED | OVERWRITE (in which case
2267 //   the memory is a pre-existing allocation and should be left alone)
2268 static void
deallocate_if_not_fixed_overwrite(kern_return_t allocator_kr,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,int flags)2269 deallocate_if_not_fixed_overwrite(kern_return_t allocator_kr, MAP_T map,
2270     mach_vm_address_t start, mach_vm_size_t size, int flags)
2271 {
2272 	if (is_fixed_overwrite(flags)) {
2273 		// fixed-overwrite with pre-existing allocation, don't deallocate
2274 	} else if (allocator_kr != 0) {
2275 		// allocator failed, don't deallocate
2276 	} else {
2277 		(void)mach_vm_deallocate(map, start, size);
2278 	}
2279 }
2280 
2281 #if !KERNEL
2282 
2283 // userspace: use the test task's own vm_map
2284 #define SMART_MAP = mach_task_self()
2285 
2286 #else
2287 
2288 static inline vm_map_t
create_map(mach_vm_address_t map_start,mach_vm_address_t map_end)2289 create_map(mach_vm_address_t map_start, mach_vm_address_t map_end)
2290 {
2291 	ledger_t ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
2292 	pmap_t pmap = pmap_create_options(ledger, 0, PMAP_CREATE_64BIT);
2293 	assert(pmap);
2294 	ledger_dereference(ledger);  // now retained by pmap
2295 	vm_map_t map = vm_map_create_options(pmap, map_start, map_end, VM_MAP_CREATE_PAGEABLE);
2296 	assert(map);
2297 
2298 	return map;
2299 }
2300 
2301 static inline void
cleanup_map(vm_map_t * map)2302 cleanup_map(vm_map_t *map)
2303 {
2304 	assert(*map);
2305 	kern_return_t kr = vm_map_terminate(*map);
2306 	assert(kr == 0);
2307 	vm_map_deallocate(*map);  // also destroys pmap
2308 }
2309 
2310 // kernel: create a new vm_map and deallocate it at end of scope
2311 // fixme choose a user-like and a kernel-like address range
2312 #define SMART_MAP                                                       \
2313 	__attribute__((cleanup(cleanup_map))) = create_map(0, 0xffffffffffffffff)
2314 
2315 #endif
2316 
2317 // Allocate with an address hint.
2318 // Important for kernel tests' empty vm_maps
2319 // to avoid allocating near address 0 and ~0.
2320 static kern_return_t
allocate_away_from_zero(MAP_T map,mach_vm_address_t * address,mach_vm_size_t size)2321 allocate_away_from_zero(
2322 	MAP_T               map,
2323 	mach_vm_address_t  *address,
2324 	mach_vm_size_t      size)
2325 {
2326 	*address = 2ull * 1024 * 1024 * 1024; // 2 GB address hint
2327 	return mach_vm_map(map, address, size,
2328 	           0, VM_FLAGS_ANYWHERE, 0, 0, 0,
2329 	           VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
2330 }
2331 
2332 // allocate a VM region with size and permissions
2333 // and deallocate it at end of scope
2334 #define SMART_ALLOCATE_VM(map, size, perm)                              \
2335     __attribute__((cleanup(cleanup_allocation))) = create_allocation(map, size, perm, false)
2336 
2337 // allocate a VM region with size and permissions
2338 // and deallocate it at end of scope
2339 // If no such region could be allocated, return {.addr = 0}
2340 #define SMART_TRY_ALLOCATE_VM(map, size, perm)                              \
2341     __attribute__((cleanup(cleanup_allocation))) = create_allocation(map, size, perm, true)
2342 
2343 // a VM allocation with unallocated pages around it
2344 typedef struct {
2345 	MAP_T map;
2346 	addr_t guard_size;
2347 	addr_t guard_prefix;        // page-sized
2348 	addr_t unallocated_prefix;  // page-sized
2349 	addr_t addr;
2350 	addr_t size;
2351 	addr_t unallocated_suffix;  // page-sized
2352 	addr_t guard_suffix;        // page-sized
2353 } allocation_t;
2354 
2355 static allocation_t
create_allocation(MAP_T new_map,mach_vm_address_t new_size,vm_prot_t perm,bool allow_failure)2356 create_allocation(MAP_T new_map, mach_vm_address_t new_size, vm_prot_t perm, bool allow_failure)
2357 {
2358 	// allocations in address order:
2359 	// 1 page guard_prefix (allocated, prot none)
2360 	// 1 page unallocated_prefix (unallocated)
2361 	// N pages addr..addr+size
2362 	// 1 page unallocated_suffix (unallocated)
2363 	// 1 page guard_suffix (allocated, prot none)
2364 
2365 	// allocate new_size plus 4 pages
2366 	// then carve it up into our regions
2367 
2368 	allocation_t result;
2369 
2370 	result.map = new_map;
2371 
2372 	result.guard_size = KB16;
2373 	result.size = round_up_page(new_size, KB16);
2374 	if (result.size == 0 && allow_failure) {
2375 		return (allocation_t){new_map, 0, 0, 0, 0, 0, 0, 0};
2376 	}
2377 	assert(result.size != 0);
2378 
2379 	mach_vm_address_t allocated_base;
2380 	mach_vm_size_t allocated_size = result.size;
2381 	if (__builtin_add_overflow(result.size, result.guard_size * 4, &allocated_size)) {
2382 		if (allow_failure) {
2383 			return (allocation_t){new_map, 0, 0, 0, 0, 0, 0, 0};
2384 		} else {
2385 			assert(false);
2386 		}
2387 	}
2388 
2389 	kern_return_t kr;
2390 	kr = allocate_away_from_zero(result.map, &allocated_base, allocated_size);
2391 	if (kr != 0 && allow_failure) {
2392 		return (allocation_t){new_map, 0, 0, 0, 0, 0, 0, 0};
2393 	}
2394 	assert(kr == 0);
2395 
2396 	result.guard_prefix = (addr_t)allocated_base;
2397 	result.unallocated_prefix = result.guard_prefix + result.guard_size;
2398 	result.addr = result.unallocated_prefix + result.guard_size;
2399 	result.unallocated_suffix = result.addr + result.size;
2400 	result.guard_suffix = result.unallocated_suffix + result.guard_size;
2401 
2402 	kr = mach_vm_protect(result.map, result.addr, result.size, false, perm);
2403 	assert(kr == 0);
2404 	kr = mach_vm_protect(result.map, result.guard_prefix, result.guard_size, true, VM_PROT_NONE);
2405 	assert(kr == 0);
2406 	kr = mach_vm_protect(result.map, result.guard_suffix, result.guard_size, true, VM_PROT_NONE);
2407 	assert(kr == 0);
2408 	kr = mach_vm_deallocate(result.map, result.unallocated_prefix, result.guard_size);
2409 	assert(kr == 0);
2410 	kr = mach_vm_deallocate(result.map, result.unallocated_suffix, result.guard_size);
2411 	assert(kr == 0);
2412 
2413 	return result;
2414 }
2415 
2416 // Mark this allocation as deallocated by something else.
2417 // This means cleanup_allocation() won't deallocate it twice.
2418 // cleanup_allocation() will still free the guard pages.
2419 static void
set_already_deallocated(allocation_t * allocation)2420 set_already_deallocated(allocation_t *allocation)
2421 {
2422 	allocation->addr = 0;
2423 	allocation->size = 0;
2424 }
2425 
2426 static void
cleanup_allocation(allocation_t * allocation)2427 cleanup_allocation(allocation_t *allocation)
2428 {
2429 	// fixme verify allocations and unallocated spaces still exist where we expect
2430 	if (allocation->size) {
2431 		(void)mach_vm_deallocate(allocation->map, allocation->addr, allocation->size);
2432 	}
2433 	if (allocation->guard_size) {
2434 		(void)mach_vm_deallocate(allocation->map, allocation->guard_prefix, allocation->guard_size);
2435 		(void)mach_vm_deallocate(allocation->map, allocation->guard_suffix, allocation->guard_size);
2436 	}
2437 }
2438 
2439 
2440 // unallocate a VM region with size
2441 // and deallocate it at end of scope
2442 #define SMART_UNALLOCATE_VM(map, size)                                  \
2443 	__attribute__((cleanup(cleanup_unallocation))) = create_unallocation(map, size)
2444 
2445 // unallocate a VM region with size
2446 // and deallocate it at end of scope
2447 // If no such region could be allocated, return {.addr = 0}
2448 #define SMART_TRY_UNALLOCATE_VM(map, size)                                  \
2449 	__attribute__((cleanup(cleanup_unallocation))) = create_unallocation(map, size, true)
2450 
2451 // a VM space with allocated pages around it
2452 typedef struct {
2453 	MAP_T map;
2454 	addr_t guard_size;
2455 	addr_t guard_prefix;        // page-sized
2456 	addr_t addr;
2457 	addr_t size;
2458 	addr_t guard_suffix;        // page-sized
2459 } unallocation_t;
2460 
2461 static unallocation_t __attribute__((overloadable))
create_unallocation(MAP_T new_map,mach_vm_address_t new_size,bool allow_failure)2462 create_unallocation(MAP_T new_map, mach_vm_address_t new_size, bool allow_failure)
2463 {
2464 	// allocations in address order:
2465 	// 1 page guard_prefix (allocated, prot none)
2466 	// N pages addr..addr+size (unallocated)
2467 	// 1 page guard_suffix (allocated, prot none)
2468 
2469 	// allocate new_size plus 2 pages
2470 	// then carve it up into our regions
2471 
2472 	unallocation_t result;
2473 
2474 	result.map = new_map;
2475 
2476 	result.guard_size = KB16;
2477 	result.size = round_up_page(new_size, KB16);
2478 	if (result.size == 0 && allow_failure) {
2479 		return (unallocation_t){new_map, 0, 0, 0, 0, 0};
2480 	}
2481 	assert(result.size != 0);
2482 
2483 	mach_vm_address_t allocated_base;
2484 	mach_vm_size_t allocated_size = result.size;
2485 	if (__builtin_add_overflow(result.size, result.guard_size * 2, &allocated_size)) {
2486 		if (allow_failure) {
2487 			return (unallocation_t){new_map, 0, 0, 0, 0, 0};
2488 		} else {
2489 			assert(false);
2490 		}
2491 	}
2492 	kern_return_t kr;
2493 	kr = allocate_away_from_zero(result.map, &allocated_base, allocated_size);
2494 	if (kr != 0 && allow_failure) {
2495 		return (unallocation_t){new_map, 0, 0, 0, 0, 0};
2496 	}
2497 	assert(kr == 0);
2498 
2499 	result.guard_prefix = (addr_t)allocated_base;
2500 	result.addr = result.guard_prefix + result.guard_size;
2501 	result.guard_suffix = result.addr + result.size;
2502 
2503 	kr = mach_vm_deallocate(result.map, result.addr, result.size);
2504 	assert(kr == 0);
2505 	kr = mach_vm_protect(result.map, result.guard_prefix, result.guard_size, true, VM_PROT_NONE);
2506 	assert(kr == 0);
2507 	kr = mach_vm_protect(result.map, result.guard_suffix, result.guard_size, true, VM_PROT_NONE);
2508 	assert(kr == 0);
2509 
2510 	return result;
2511 }
2512 
2513 static unallocation_t __attribute__((overloadable))
create_unallocation(MAP_T new_map,mach_vm_address_t new_size)2514 create_unallocation(MAP_T new_map, mach_vm_address_t new_size)
2515 {
2516 	return create_unallocation(new_map, new_size, false /*allow_failure*/);
2517 }
2518 
2519 static void
cleanup_unallocation(unallocation_t * unallocation)2520 cleanup_unallocation(unallocation_t *unallocation)
2521 {
2522 	// fixme verify allocations and unallocated spaces still exist where we expect
2523 	if (unallocation->guard_size) {
2524 		(void)mach_vm_deallocate(unallocation->map, unallocation->guard_prefix, unallocation->guard_size);
2525 		(void)mach_vm_deallocate(unallocation->map, unallocation->guard_suffix, unallocation->guard_size);
2526 	}
2527 }
2528 
2529 
2530 // mach_vm_remap_external/vm_remap_external/vm32_remap/mach_vm_remap_new_external infra
2531 // mach_vm_remap/mach_vm_remap_new_kernel infra
2532 
2533 typedef kern_return_t (*remap_fn_t)(vm_map_t target_task,
2534     mach_vm_address_t *target_address,
2535     mach_vm_size_t size,
2536     mach_vm_offset_t mask,
2537     int flags,
2538     vm_map_t src_task,
2539     mach_vm_address_t src_address,
2540     boolean_t copy,
2541     vm_prot_t *cur_protection,
2542     vm_prot_t *max_protection,
2543     vm_inherit_t inheritance);
2544 
2545 // helpers that call a provided function with certain sets of params
2546 
2547 static kern_return_t
help_call_remap_fn__src_size_etc(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t src,mach_vm_size_t size,vm_prot_t cur,vm_prot_t max,vm_inherit_t inherit)2548 help_call_remap_fn__src_size_etc(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t src, mach_vm_size_t size, vm_prot_t cur, vm_prot_t max, vm_inherit_t inherit)
2549 {
2550 	kern_return_t kr;
2551 #if KERNEL
2552 	if (is_random_anywhere(flags)) {
2553 		// RANDOM_ADDR is likely to fall outside pmap's range
2554 		return PANIC;
2555 	}
2556 #endif
2557 	if (is_fixed_overwrite(flags)) {
2558 		// Try to allocate a dest for vm_remap to fixed-overwrite at.
2559 		allocation_t dst_alloc SMART_TRY_ALLOCATE_VM(map, size, VM_PROT_DEFAULT);
2560 		mach_vm_address_t out_addr = dst_alloc.addr;
2561 		if (out_addr == 0) {
2562 			// Failed to allocate. Clear VM_FLAGS_OVERWRITE
2563 			// to prevent wild mappings.
2564 			flags &= ~VM_FLAGS_OVERWRITE;
2565 		}
2566 		kr = fn(map, &out_addr, size, 0, flags,
2567 		    map, src, copy, &cur, &max, inherit);
2568 	} else {
2569 		// vm_remap will allocate anywhere. Deallocate if it succeeds.
2570 		mach_vm_address_t out_addr = 0;
2571 		kr = fn(map, &out_addr, size, 0, flags,
2572 		    map, src, copy, &cur, &max, inherit);
2573 		if (kr == 0) {
2574 			(void)mach_vm_deallocate(map, out_addr, size);
2575 		}
2576 	}
2577 	return kr;
2578 }
2579 
2580 static kern_return_t
help_call_remap_fn__src_size(remap_fn_t fn,MAP_T map,int unused_flags __unused,bool copy,mach_vm_address_t src,mach_vm_size_t size)2581 help_call_remap_fn__src_size(remap_fn_t fn, MAP_T map, int unused_flags __unused, bool copy, mach_vm_address_t src, mach_vm_size_t size)
2582 {
2583 	assert(unused_flags == 0);
2584 	return help_call_remap_fn__src_size_etc(fn, map, VM_FLAGS_ANYWHERE, copy, src, size, 0, 0, VM_INHERIT_NONE);
2585 }
2586 
2587 static kern_return_t
help_call_remap_fn__dst_size(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t dst,mach_vm_size_t size)2588 help_call_remap_fn__dst_size(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t dst, mach_vm_size_t size)
2589 {
2590 	allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
2591 	mach_vm_address_t out_addr = dst;
2592 	vm_prot_t cur = 0;
2593 	vm_prot_t max = 0;
2594 	kern_return_t kr = fn(map, &out_addr, size, 0, flags,
2595 	    map, src.addr, copy, &cur, &max, VM_INHERIT_NONE);
2596 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
2597 	return kr;
2598 }
2599 
2600 static kern_return_t
help_call_remap_fn__inherit(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t src,mach_vm_size_t size,vm_inherit_t inherit)2601 help_call_remap_fn__inherit(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t src, mach_vm_size_t size, vm_inherit_t inherit)
2602 {
2603 	return help_call_remap_fn__src_size_etc(fn, map, flags, copy, src, size, 0, 0, inherit);
2604 }
2605 
2606 static kern_return_t
help_call_remap_fn__flags(remap_fn_t fn,MAP_T map,int unused_flags __unused,bool copy,mach_vm_address_t src,mach_vm_size_t size,int trial_flags)2607 help_call_remap_fn__flags(remap_fn_t fn, MAP_T map, int unused_flags __unused, bool copy, mach_vm_address_t src, mach_vm_size_t size, int trial_flags)
2608 {
2609 	assert(unused_flags == 0);
2610 	return help_call_remap_fn__src_size_etc(fn, map, trial_flags, copy, src, size, 0, 0, VM_INHERIT_NONE);
2611 }
2612 
2613 static kern_return_t
help_call_remap_fn__prot_pairs(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t src,mach_vm_size_t size,vm_prot_t cur,vm_prot_t max)2614 help_call_remap_fn__prot_pairs(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t src, mach_vm_size_t size, vm_prot_t cur, vm_prot_t max)
2615 {
2616 	return help_call_remap_fn__src_size_etc(fn, map, flags, copy, src, size, cur, max, VM_INHERIT_NONE);
2617 }
2618 
2619 static kern_return_t
help_call_remap_fn__src_dst_size(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t src,mach_vm_size_t size,mach_vm_address_t dst)2620 help_call_remap_fn__src_dst_size(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t src, mach_vm_size_t size, mach_vm_address_t dst)
2621 {
2622 	mach_vm_address_t out_addr = dst;
2623 	vm_prot_t cur = 0;
2624 	vm_prot_t max = 0;
2625 	kern_return_t kr = fn(map, &out_addr, size, 0, flags,
2626 	    map, src, copy, &cur, &max, VM_INHERIT_NONE);
2627 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
2628 	return kr;
2629 }
2630 
2631 #define GET_INSTANCE(_0, _1, _2, _3, _4, _5, _6, _7, _8, NAME, ...) NAME
2632 
2633 #define DROP_TYPES_8(a, b, ...) , b DROP_TYPES_6(__VA_ARGS__)
2634 #define DROP_TYPES_6(a, b, ...) , b DROP_TYPES_4(__VA_ARGS__)
2635 #define DROP_TYPES_4(a, b, ...) , b DROP_TYPES_2(__VA_ARGS__)
2636 #define DROP_TYPES_2(a, b, ...) , b
2637 #define DROP_TYPES_0()
2638 
2639 // Parses lists of "type1, arg1, type2, arg" into "arg1, arg2"
2640 #define DROP_TYPES(...) GET_INSTANCE(_0 __VA_OPT__(,) __VA_ARGS__, DROP_TYPES_8, DROP_TYPES_8, DROP_TYPES_6, DROP_TYPES_6, DROP_TYPES_4, DROP_TYPES_4, DROP_TYPES_2, DROP_TYPES_2, DROP_TYPES_0, DROP_TYPES_0)(__VA_ARGS__)
2641 
2642 #define DROP_COMMAS_8(a, b, ...) , a b DROP_COMMAS_6(__VA_ARGS__)
2643 #define DROP_COMMAS_6(a, b, ...) , a b DROP_COMMAS_4(__VA_ARGS__)
2644 #define DROP_COMMAS_4(a, b, ...) , a b DROP_COMMAS_2(__VA_ARGS__)
2645 #define DROP_COMMAS_2(a, b) , a b
2646 #define DROP_COMMAS_0()
2647 
2648 // Parses lists of "type1, arg1, type2, arg" into "type1 arg1, type2 arg2"
2649 #define DROP_COMMAS(...) GET_INSTANCE(_0 __VA_OPT__(,) __VA_ARGS__, DROP_COMMAS_8, DROP_COMMAS_8, DROP_COMMAS_6, DROP_COMMAS_6, DROP_COMMAS_4, DROP_COMMAS_4, DROP_COMMAS_2, DROP_COMMAS_2, DROP_COMMAS_0)(__VA_ARGS__)
2650 
2651 // specialize helpers into implementations of call functions that are still agnostic to the remap function
2652 
2653 #define IMPL_ONE_FROM_HELPER(type, variant, flags, copy, ...)                                                                                           \
2654 	static kern_return_t                                                                                                                            \
2655 	call_remap_fn ## __ ## variant ## __ ## type(remap_fn_t fn, MAP_T map, mach_vm_address_t src, mach_vm_size_t size DROP_COMMAS(__VA_ARGS__)) {   \
2656 	        return help_call_remap_fn__ ## type(fn, map, flags, copy, src, size DROP_TYPES(__VA_ARGS__));                                           \
2657 	}
2658 
2659 #define IMPL_FROM_HELPER(type, ...) \
2660 	IMPL_ONE_FROM_HELPER(type, fixed, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, ##__VA_ARGS__)         \
2661 	IMPL_ONE_FROM_HELPER(type, fixed_copy, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, ##__VA_ARGS__)     \
2662 	IMPL_ONE_FROM_HELPER(type, anywhere, VM_FLAGS_ANYWHERE, false, ##__VA_ARGS__)   \
2663 
2664 IMPL_FROM_HELPER(dst_size);
2665 IMPL_FROM_HELPER(inherit, vm_inherit_t, inherit);
2666 IMPL_FROM_HELPER(prot_pairs, vm_prot_t, cur, vm_prot_t, max);
2667 IMPL_FROM_HELPER(src_dst_size, mach_vm_address_t, dst);
2668 
2669 IMPL_ONE_FROM_HELPER(flags, nocopy, 0 /*ignored*/, false, int, flag)
2670 IMPL_ONE_FROM_HELPER(flags, copy, 0 /*ignored*/, true, int, flag)
2671 
2672 IMPL_ONE_FROM_HELPER(src_size, nocopy, 0 /*ignored*/, false)
2673 IMPL_ONE_FROM_HELPER(src_size, copy, 0 /*ignored*/, true)
2674 
2675 #undef IMPL_FROM_HELPER
2676 #undef IMPL_ONE_FROM_HELPER
2677 
2678 // define call functions that are specific to the remap function, and rely on implementations above under the hood
2679 
2680 #define IMPL_REMAP_FN_HELPER(remap_fn, instance, type, ...)                                             \
2681     static kern_return_t                                                                                \
2682     call_ ## remap_fn ## __ ## instance ## __ ## type(MAP_T map DROP_COMMAS(__VA_ARGS__))               \
2683     {                                                                                                   \
2684 	return call_remap_fn__ ## instance ## __ ## type(remap_fn, map DROP_TYPES(__VA_ARGS__));        \
2685     }
2686 
2687 #define IMPL_REMAP_FN_SRC_SIZE(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, src_size, mach_vm_address_t, src, mach_vm_size_t, size)
2688 #define IMPL_REMAP_FN_DST_SIZE(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, dst_size, mach_vm_address_t, src, mach_vm_size_t, size)
2689 #define IMPL_REMAP_FN_SRC_DST_SIZE(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, src_dst_size, mach_vm_address_t, src, mach_vm_size_t, size, mach_vm_address_t, dst)
2690 #define IMPL_REMAP_FN_SRC_SIZE_INHERIT(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, inherit, mach_vm_address_t, src, mach_vm_size_t, size, vm_inherit_t, inherit)
2691 #define IMPL_REMAP_FN_SRC_SIZE_FLAGS(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, flags, mach_vm_address_t, src, mach_vm_size_t, size, int, flags)
2692 #define IMPL_REMAP_FN_PROT_PAIRS(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, prot_pairs, mach_vm_address_t, src, mach_vm_size_t, size, vm_prot_t, cur, vm_prot_t, max)
2693 
2694 #define IMPL(remap_fn)                                          \
2695 	IMPL_REMAP_FN_SRC_SIZE(remap_fn, nocopy);               \
2696 	IMPL_REMAP_FN_SRC_SIZE(remap_fn, copy);                 \
2697                                                                 \
2698 	IMPL_REMAP_FN_DST_SIZE(remap_fn, fixed);                \
2699 	IMPL_REMAP_FN_DST_SIZE(remap_fn, fixed_copy);           \
2700 	IMPL_REMAP_FN_DST_SIZE(remap_fn, anywhere);             \
2701                                                                 \
2702 	IMPL_REMAP_FN_SRC_SIZE_INHERIT(remap_fn, fixed);        \
2703 	IMPL_REMAP_FN_SRC_SIZE_INHERIT(remap_fn, fixed_copy);   \
2704 	IMPL_REMAP_FN_SRC_SIZE_INHERIT(remap_fn, anywhere);     \
2705                                                                 \
2706 	IMPL_REMAP_FN_SRC_SIZE_FLAGS(remap_fn, nocopy);         \
2707 	IMPL_REMAP_FN_SRC_SIZE_FLAGS(remap_fn, copy);           \
2708                                                                 \
2709 	IMPL_REMAP_FN_PROT_PAIRS(remap_fn, fixed);              \
2710 	IMPL_REMAP_FN_PROT_PAIRS(remap_fn, fixed_copy);         \
2711 	IMPL_REMAP_FN_PROT_PAIRS(remap_fn, anywhere);           \
2712                                                                 \
2713 	IMPL_REMAP_FN_SRC_DST_SIZE(remap_fn, fixed);            \
2714 	IMPL_REMAP_FN_SRC_DST_SIZE(remap_fn, fixed_copy);       \
2715 	IMPL_REMAP_FN_SRC_DST_SIZE(remap_fn, anywhere);         \
2716 
2717 static inline void
check_mach_vm_map_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_address_t saved_addr,int flags,MAP_T map)2718 check_mach_vm_map_outparam_changes(kern_return_t * kr, mach_vm_address_t addr, mach_vm_address_t saved_addr,
2719     int flags, MAP_T map)
2720 {
2721 	if (*kr == KERN_SUCCESS) {
2722 		if (is_fixed(flags)) {
2723 			if (addr != truncate_vm_map_addr_with_flags(map, saved_addr, flags)) {
2724 				*kr = OUT_PARAM_BAD;
2725 			}
2726 		}
2727 	} else {
2728 		if (addr != saved_addr) {
2729 			*kr = OUT_PARAM_BAD;
2730 		}
2731 	}
2732 }
2733 
2734 static inline void
check_mach_vm_remap_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_address_t saved_addr,int flags,vm_prot_t cur_prot,vm_prot_t saved_cur_prot,vm_prot_t max_prot,vm_prot_t saved_max_prot,MAP_T map,mach_vm_address_t src_addr)2735 check_mach_vm_remap_outparam_changes(kern_return_t * kr, mach_vm_address_t addr, mach_vm_address_t saved_addr,
2736     int flags, vm_prot_t cur_prot, vm_prot_t saved_cur_prot, vm_prot_t max_prot, vm_prot_t saved_max_prot, MAP_T map,
2737     mach_vm_address_t src_addr)
2738 {
2739 	if (*kr == KERN_SUCCESS) {
2740 		if (is_fixed(flags)) {
2741 			mach_vm_address_t expected_misalignment = get_expected_remap_misalignment(map, src_addr, flags);
2742 			if (addr != trunc_down_map(map, saved_addr) + expected_misalignment) {
2743 				*kr = OUT_PARAM_BAD;
2744 			}
2745 		}
2746 	} else {
2747 		if ((addr != saved_addr) || (cur_prot != saved_cur_prot) ||
2748 		    (max_prot != saved_max_prot)) {
2749 			*kr = OUT_PARAM_BAD;
2750 		}
2751 	}
2752 }
2753 
2754 #if KERNEL
2755 
2756 static bool
2757 dealloc_would_panic(mach_vm_address_t start, mach_vm_size_t size);
2758 
2759 static inline kern_return_t
mach_vm_remap_wrapped_kern(vm_map_t target_task,mach_vm_address_t * target_address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,vm_map_t src_task,mach_vm_address_t src_address,boolean_t copy,vm_prot_t * cur_protection,vm_prot_t * max_protection,vm_inherit_t inheritance)2760 mach_vm_remap_wrapped_kern(vm_map_t target_task,
2761     mach_vm_address_t *target_address,
2762     mach_vm_size_t size,
2763     mach_vm_offset_t mask,
2764     int flags,
2765     vm_map_t src_task,
2766     mach_vm_address_t src_address,
2767     boolean_t copy,
2768     vm_prot_t *cur_protection,
2769     vm_prot_t *max_protection,
2770     vm_inherit_t inheritance)
2771 {
2772 	if (dealloc_would_panic(*target_address, size)) {
2773 		return PANIC;
2774 	}
2775 	mach_vm_address_t saved_addr = *target_address;
2776 	vm_prot_t saved_cur_prot = *cur_protection;
2777 	vm_prot_t saved_max_prot = *max_protection;
2778 	kern_return_t kr = mach_vm_remap(target_task, target_address, size, mask, flags, src_task, src_address, copy, cur_protection, max_protection, inheritance);
2779 	check_mach_vm_remap_outparam_changes(&kr, *target_address, saved_addr, flags,
2780 	    *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
2781 	return kr;
2782 }
IMPL(mach_vm_remap_wrapped_kern)2783 IMPL(mach_vm_remap_wrapped_kern)
2784 
2785 static inline kern_return_t
2786 mach_vm_remap_new_kernel_wrapped(vm_map_t target_task,
2787     mach_vm_address_t *target_address,
2788     mach_vm_size_t size,
2789     mach_vm_offset_t mask,
2790     int flags,
2791     vm_map_t src_task,
2792     mach_vm_address_t src_address,
2793     boolean_t copy,
2794     vm_prot_t *cur_protection,
2795     vm_prot_t *max_protection,
2796     vm_inherit_t inheritance)
2797 {
2798 	if (dealloc_would_panic(*target_address, size)) {
2799 		return PANIC;
2800 	}
2801 	mach_vm_address_t saved_addr = *target_address;
2802 	vm_prot_t saved_cur_prot = *cur_protection;
2803 	vm_prot_t saved_max_prot = *max_protection;
2804 	kern_return_t kr = mach_vm_remap_new_kernel(target_task, target_address, size, mask, FLAGS_AND_TAG(flags, VM_KERN_MEMORY_OSFMK), src_task, src_address, copy, cur_protection, max_protection, inheritance);
2805 	// remap_new sets VM_FLAGS_RETURN_DATA_ADDR
2806 	check_mach_vm_remap_outparam_changes(&kr, *target_address, saved_addr, flags | VM_FLAGS_RETURN_DATA_ADDR,
2807 	    *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
2808 	return kr;
2809 }
2810 IMPL(mach_vm_remap_new_kernel_wrapped)
2811 
2812 #else /* !KERNEL */
2813 
2814 static inline kern_return_t
2815 mach_vm_remap_user(vm_map_t target_task,
2816     mach_vm_address_t *target_address,
2817     mach_vm_size_t size,
2818     mach_vm_offset_t mask,
2819     int flags,
2820     vm_map_t src_task,
2821     mach_vm_address_t src_address,
2822     boolean_t copy,
2823     vm_prot_t *cur_protection,
2824     vm_prot_t *max_protection,
2825     vm_inherit_t inheritance)
2826 {
2827 	mach_vm_address_t saved_addr = *target_address;
2828 	vm_prot_t saved_cur_prot = *cur_protection;
2829 	vm_prot_t saved_max_prot = *max_protection;
2830 	kern_return_t kr = mach_vm_remap(target_task, target_address, size, mask, flags, src_task, src_address, copy, cur_protection, max_protection, inheritance);
2831 	check_mach_vm_remap_outparam_changes(&kr, *target_address, saved_addr, flags,
2832 	    *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
2833 	return kr;
2834 }
2835 IMPL(mach_vm_remap_user)
2836 
2837 static inline kern_return_t
2838 mach_vm_remap_new_user(vm_map_t target_task,
2839     mach_vm_address_t *target_address,
2840     mach_vm_size_t size,
2841     mach_vm_offset_t mask,
2842     int flags,
2843     vm_map_t src_task,
2844     mach_vm_address_t src_address,
2845     boolean_t copy,
2846     vm_prot_t *cur_protection,
2847     vm_prot_t *max_protection,
2848     vm_inherit_t inheritance)
2849 {
2850 	mach_vm_address_t saved_addr = *target_address;
2851 	vm_prot_t saved_cur_prot = *cur_protection;
2852 	vm_prot_t saved_max_prot = *max_protection;
2853 	kern_return_t kr = mach_vm_remap_new(target_task, target_address, size, mask, flags, src_task, src_address, copy, cur_protection, max_protection, inheritance);
2854 	// remap_new sets VM_FLAGS_RETURN_DATA_ADDR
2855 	check_mach_vm_remap_outparam_changes(&kr, *target_address, saved_addr, flags | VM_FLAGS_RETURN_DATA_ADDR,
2856 	    *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
2857 	return kr;
2858 }
2859 IMPL(mach_vm_remap_new_user)
2860 
2861 #if TEST_OLD_STYLE_MACH
2862 static inline kern_return_t
2863 vm_remap_retyped(vm_map_t target_task,
2864     mach_vm_address_t *target_address,
2865     mach_vm_size_t size,
2866     mach_vm_offset_t mask,
2867     int flags,
2868     vm_map_t src_task,
2869     mach_vm_address_t src_address,
2870     boolean_t copy,
2871     vm_prot_t *cur_protection,
2872     vm_prot_t *max_protection,
2873     vm_inherit_t inheritance)
2874 {
2875 	vm_address_t addr = (vm_address_t)*target_address;
2876 	vm_prot_t saved_cur_prot = *cur_protection;
2877 	vm_prot_t saved_max_prot = *max_protection;
2878 	kern_return_t kr = vm_remap(target_task, &addr, (vm_size_t)size, (vm_address_t)mask, flags, src_task, (vm_address_t)src_address, copy, cur_protection, max_protection, inheritance);
2879 	check_mach_vm_remap_outparam_changes(&kr, addr, (vm_address_t) *target_address, flags,
2880 	    *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
2881 	*target_address = addr;
2882 	return kr;
2883 }
2884 
2885 IMPL(vm_remap_retyped)
2886 
2887 #endif /* TEST_OLD_STYLE_MACH */
2888 #endif /* !KERNEL */
2889 
2890 #undef IMPL
2891 #undef IMPL_REMAP_FN_SRC_SIZE
2892 #undef IMPL_REMAP_FN_DST_SIZE
2893 #undef IMPL_REMAP_FN_SRC_DST_SIZE
2894 #undef IMPL_REMAP_FN_SRC_SIZE_INHERIT
2895 #undef IMPL_REMAP_FN_SRC_SIZE_FLAGS
2896 #undef IMPL_REMAP_FN_PROT_PAIRS
2897 #undef IMPL_REMAP_FN_HELPER
2898 
2899 
2900 /////////////////////////////////////////////////////
2901 // Test runners for functions with commonly-used parameter types and setup code.
2902 
2903 #define IMPL(NAME, T)                                                   \
2904 	/* Test a Mach function */                                      \
2905 	/* Run each trial with an allocated vm region and start/size parameters that reference it. */ \
2906 	typedef kern_return_t (*NAME ## mach_with_start_size_fn)(MAP_T map, T start, T size); \
2907                                                                         \
2908 	static results_t * __attribute__((used))                        \
2909 	     test_ ## NAME ## mach_with_allocated_start_size(NAME ## mach_with_start_size_fn fn, const char *testname) \
2910 	{                                                               \
2911 	        MAP_T map SMART_MAP;                                    \
2912 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
2913 	        start_size_trials_t *trials SMART_START_SIZE_TRIALS(base.addr); \
2914 	        results_t *results = alloc_results(testname, trials->count); \
2915                                                                         \
2916 	        for (unsigned i = 0; i < trials->count; i++) {          \
2917 	                T start = (T)trials->list[i].start;             \
2918 	                T size = (T)trials->list[i].size;               \
2919 	                kern_return_t ret = fn(map, start, size);       \
2920 	                append_result(results, ret, trials->list[i].name); \
2921 	        }                                                       \
2922 	        return results;                                         \
2923 	}                                                               \
2924 	/* Test a Mach function. */                                     \
2925 	/* Run each trial with an allocated vm region and an addr parameter that reference it. */ \
2926 	typedef kern_return_t (*NAME ## mach_with_addr_fn)(MAP_T map, T addr); \
2927                                                                         \
2928 	static results_t * __attribute__((used))                        \
2929 	        test_ ## NAME ## mach_with_allocated_addr_of_size_n(NAME ## mach_with_addr_fn fn, size_t obj_size, const char *testname) \
2930 	{                                                               \
2931 	        MAP_T map SMART_MAP;                                    \
2932 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
2933 	        addr_trials_t *trials SMART_ADDR_TRIALS(base.addr);     \
2934 	/* Do all the addr trials and an additional trial such that obj_size + addr == 0 */ \
2935 	        results_t *results = alloc_results(testname, trials->count+1); \
2936                                                                         \
2937 	        for (unsigned i = 0; i < trials->count; i++) {          \
2938 	                T addr = (T)trials->list[i].addr;               \
2939 	                kern_return_t ret = fn(map, addr);              \
2940 	                append_result(results, ret, trials->list[i].name); \
2941 	        }                                                       \
2942 	        kern_return_t ret = fn(map,  - ((T) obj_size));         \
2943 	        char *trial_desc;                                       \
2944 	        kasprintf(&trial_desc, "addr: -0x%lx", obj_size);       \
2945 	        append_result(results, ret, trial_desc);                \
2946 	        kfree_str(trial_desc);                                  \
2947 	        return results;                                         \
2948 	}                                                               \
2949                                                                         \
2950 	/* Test a Mach function. */                                     \
2951 	/* Run each trial with an allocated vm region and an addr parameter that reference it. */ \
2952 	typedef kern_return_t (*NAME ## mach_with_addr_fn)(MAP_T map, T addr); \
2953                                                                         \
2954 	static results_t * __attribute__((used))                        \
2955 	        test_ ## NAME ## mach_with_allocated_addr(NAME ## mach_with_addr_fn fn, const char *testname) \
2956 	{                                                               \
2957 	        MAP_T map SMART_MAP;                                    \
2958 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
2959 	        addr_trials_t *trials SMART_ADDR_TRIALS(base.addr);     \
2960 	        results_t *results = alloc_results(testname, trials->count); \
2961                                                                         \
2962 	        for (unsigned i = 0; i < trials->count; i++) {          \
2963 	                T addr = (T)trials->list[i].addr;               \
2964 	                kern_return_t ret = fn(map, addr);              \
2965 	                append_result(results, ret, trials->list[i].name); \
2966 	        }                                                       \
2967 	        return results;                                         \
2968 	}                                                               \
2969                                                                         \
2970 	/* Test a Mach function. */                                     \
2971 	/* Run each trial with a size parameter. */                     \
2972 	typedef kern_return_t (*NAME ## mach_with_size_fn)(MAP_T map, T size); \
2973                                                                         \
2974 	static results_t * __attribute__((used))                        \
2975 	        test_ ## NAME ## mach_with_size(NAME ## mach_with_size_fn fn, const char *testname) \
2976 	{                                                               \
2977 	        MAP_T map SMART_MAP;                                    \
2978 	        size_trials_t *trials SMART_SIZE_TRIALS();              \
2979 	        results_t *results = alloc_results(testname, trials->count); \
2980                                                                         \
2981 	        for (unsigned i = 0; i < trials->count; i++) {          \
2982 	                T size = (T)trials->list[i].size;               \
2983 	                kern_return_t ret = fn(map, size);              \
2984 	                append_result(results, ret, trials->list[i].name); \
2985 	        }                                                       \
2986 	        return results;                                         \
2987 	}                                                               \
2988                                                                         \
2989 	/* Test a Mach function. */                                     \
2990 	/* Run each trial with a size parameter. */                     \
2991 	typedef kern_return_t (*NAME ## mach_with_start_size_offset_object_fn)(MAP_T map, T addr, T size, T offset, T obj_size); \
2992                                                                         \
2993 	static results_t * __attribute__((used))                        \
2994 	        test_ ## NAME ## mach_with_start_size_offset_object(NAME ## mach_with_start_size_offset_object_fn fn, const char *testname) \
2995 	{                                                               \
2996 	        MAP_T map SMART_MAP;                                    \
2997 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
2998 	        start_size_offset_object_trials_t *trials SMART_START_SIZE_OFFSET_OBJECT_TRIALS(); \
2999 	        results_t *results = alloc_results(testname, trials->count); \
3000                                                                         \
3001 	        for (unsigned i = 0; i < trials->count; i++) {          \
3002 	                start_size_offset_object_trial_t trial = slide_trial(trials->list[i], base.addr); \
3003 	                T start = (T)trial.start;                       \
3004 	                T size = (T)trial.size;                         \
3005 	                T offset = (T)trial.offset;                     \
3006 	                T obj_size = (T)trial.obj_size;                 \
3007 	                kern_return_t ret = fn(map, start, size, offset, obj_size); \
3008 	                append_result(results, ret, trials->list[i].name); \
3009 	        }                                                       \
3010 	        return results;                                         \
3011 	}                                                               \
3012 	/* Test a Mach function. */                                     \
3013 	/* Run each trial with a size parameter. */                     \
3014 	typedef kern_return_t (*NAME ## mach_with_start_size_offset_fn)(MAP_T map, T addr, T size, T offset, T obj_size); \
3015                                                                         \
3016 	static results_t * __attribute__((used))                        \
3017 	        test_ ## NAME ## mach_with_start_size_offset(NAME ## mach_with_start_size_offset_fn fn, const char *testname) \
3018 	{                                                               \
3019 	        MAP_T map SMART_MAP;                                    \
3020 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
3021 	        start_size_offset_trials_t *trials SMART_START_SIZE_OFFSET_TRIALS(); \
3022 	        results_t *results = alloc_results(testname, trials->count); \
3023                                                                         \
3024 	        for (unsigned i = 0; i < trials->count; i++) {          \
3025 	                start_size_offset_trial_t trial = slide_trial(trials->list[i], base.addr); \
3026 	                T start = (T)trial.start;                       \
3027 	                T size = (T)trial.size;                         \
3028 	                T offset = (T)trial.offset;                     \
3029 	                kern_return_t ret = fn(map, start, size, offset, 1); \
3030 	                append_result(results, ret, trials->list[i].name); \
3031 	        }                                                       \
3032 	        return results;                                         \
3033 	}                                                               \
3034                                                                         \
3035 	/* Test a Mach function. */                                     \
3036 	/* Run each trial with an allocated vm region and a set of mmap flags. */ \
3037 	typedef kern_return_t (*NAME ## mach_with_allocated_mmap_flags_fn)(MAP_T map, T addr, T size, int flags); \
3038                                                                         \
3039 	static results_t * __attribute__((used))                        \
3040 	test_ ## NAME ## mach_with_allocated_mmap_flags(NAME ## mach_with_allocated_mmap_flags_fn fn, const char *testname) \
3041 	{                                                               \
3042 	        MAP_T map SMART_MAP;                                    \
3043 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
3044 	        mmap_flags_trials_t *trials SMART_MMAP_FLAGS_TRIALS();  \
3045 	        results_t *results = alloc_results(testname, trials->count); \
3046                                                                         \
3047 	        for (unsigned i = 0; i < trials->count; i++) {          \
3048 	                int flags = trials->list[i].flags;              \
3049 	                kern_return_t ret = fn(map, (T)base.addr, (T)base.size, flags); \
3050 	                append_result(results, ret, trials->list[i].name); \
3051 	        }                                                       \
3052 	        return results;                                         \
3053 	}                                                               \
3054                                                                         \
3055 	/* Test a Mach function. */                                     \
3056 	/* Run each trial with an allocated vm region and a generic 32 bit flag. */ \
3057 	typedef kern_return_t (*NAME ## mach_with_allocated_generic_flag)(MAP_T map, T addr, T size, int flag); \
3058                                                                         \
3059 	static results_t * __attribute__((used))                        \
3060 	test_ ## NAME ## mach_with_allocated_generic_flag(NAME ## mach_with_allocated_generic_flag fn, const char *testname) \
3061 	{                                                               \
3062 	        MAP_T map SMART_MAP;                                    \
3063 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
3064 	        generic_flag_trials_t *trials SMART_GENERIC_FLAG_TRIALS();      \
3065 	        results_t *results = alloc_results(testname, trials->count); \
3066                                                                         \
3067 	        for (unsigned i = 0; i < trials->count; i++) {          \
3068 	                int flag = trials->list[i].flag;                \
3069 	                kern_return_t ret = fn(map, (T)base.addr, (T)base.size, flag); \
3070 	                append_result(results, ret, trials->list[i].name); \
3071 	        }                                                       \
3072 	        return results;                                         \
3073 	}                                                               \
3074                                                                         \
3075 	/* Test a Mach function. */                                     \
3076 	/* Run each trial with a vm_prot_t. */                          \
3077 	typedef kern_return_t (*NAME ## mach_with_prot_fn)(MAP_T map, T size, vm_prot_t prot); \
3078                                                                         \
3079 	static results_t * __attribute__((used))                        \
3080 	test_ ## NAME ## mach_vm_prot(NAME ## mach_with_prot_fn fn, const char *testname) \
3081 	{                                                               \
3082 	        MAP_T map SMART_MAP;                                    \
3083 	        vm_prot_trials_t *trials SMART_VM_PROT_TRIALS();        \
3084 	        results_t *results = alloc_results(testname, trials->count); \
3085                                                                         \
3086 	        for (unsigned i = 0; i < trials->count; i++) {          \
3087 	                kern_return_t ret = fn(map, TEST_ALLOC_SIZE, trials->list[i].prot); \
3088 	                append_result(results, ret, trials->list[i].name); \
3089 	        }                                                       \
3090 	        return results;                                         \
3091 	}                                                               \
3092                                                                         \
3093 	/* Test a Mach function. */                                     \
3094 	/* Run each trial with a pair of vm_prot_t's. */                \
3095 	typedef kern_return_t (*NAME ## mach_with_prot_pair_fn)(MAP_T map, vm_prot_t cur, vm_prot_t max); \
3096                                                                         \
3097 	static results_t * __attribute__((used))                        \
3098 	test_ ## NAME ## mach_vm_prot_pair(NAME ## mach_with_prot_pair_fn fn, const char *testname) \
3099 	{                                                               \
3100 	        MAP_T map SMART_MAP;                                    \
3101 	        vm_prot_pair_trials_t *trials SMART_VM_PROT_PAIR_TRIALS();      \
3102 	        results_t *results = alloc_results(testname, trials->count); \
3103                                                                         \
3104 	        for (unsigned i = 0; i < trials->count; i++) {          \
3105 	                kern_return_t ret = fn(map, trials->list[i].cur, trials->list[i].max); \
3106 	                append_result(results, ret, trials->list[i].name); \
3107 	        }                                                       \
3108 	        return results;                                         \
3109 	}                                                               \
3110                                                                         \
3111 	/* Test a Mach function. */                                     \
3112 	/* Run each trial with a pair of vm_prot_t's. */ \
3113 	typedef kern_return_t (*NAME ## mach_with_allocated_prot_pair_fn)(MAP_T map, T addr, T size, vm_prot_t cur, vm_prot_t max); \
3114                                                                         \
3115 	static results_t * __attribute__((used))                        \
3116 	test_ ## NAME ## mach_with_allocated_vm_prot_pair(NAME ## mach_with_allocated_prot_pair_fn fn, const char *testname) \
3117 	{                                                               \
3118 	        MAP_T map SMART_MAP;                                    \
3119 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
3120 	        vm_prot_pair_trials_t *trials SMART_VM_PROT_PAIR_TRIALS(); \
3121 	        results_t *results = alloc_results(testname, trials->count); \
3122                                                                         \
3123 	        for (unsigned i = 0; i < trials->count; i++) {          \
3124 	                kern_return_t ret = fn(map, (T)base.addr, (T)base.size, trials->list[i].cur, trials->list[i].max); \
3125 	                append_result(results, ret, trials->list[i].name); \
3126 	        }                                                       \
3127 	        return results;                                         \
3128 	}                                                               \
3129                                                                         \
3130 	/* Test a Mach function. */                                     \
3131 	/* Run each trial with an allocated vm region and a vm_prot_t. */ \
3132 	typedef kern_return_t (*NAME ## mach_with_allocated_prot_fn)(MAP_T map, T addr, T size, vm_prot_t prot); \
3133                                                                         \
3134 	static results_t * __attribute__((used))                        \
3135 	test_ ## NAME ## mach_with_allocated_vm_prot_t(NAME ## mach_with_allocated_prot_fn fn, const char *testname) \
3136 	{                                                               \
3137 	        MAP_T map SMART_MAP;                                    \
3138 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
3139 	        vm_prot_trials_t *trials SMART_VM_PROT_TRIALS();        \
3140 	        results_t *results = alloc_results(testname, trials->count); \
3141                                                                         \
3142 	        for (unsigned i = 0; i < trials->count; i++) {          \
3143 	                vm_prot_t prot = trials->list[i].prot;          \
3144 	                kern_return_t ret = fn(map, (T)base.addr, (T)base.size, prot); \
3145 	                append_result(results, ret, trials->list[i].name); \
3146 	        }                                                       \
3147 	        return results;                                         \
3148 	}                                                               \
3149                                                                         \
3150 	/* Test a Mach function. */                                     \
3151 	/* Run each trial with a ledger flag. */ \
3152 	typedef kern_return_t (*NAME ## mach_ledger_flag_fn)(MAP_T map, int ledger_flag); \
3153                                                                         \
3154 	static results_t * __attribute__((used))                        \
3155 	test_ ## NAME ## mach_with_ledger_flag(NAME ## mach_ledger_flag_fn fn, const char *testname) \
3156 	{                                                               \
3157 	        MAP_T map SMART_MAP;                                    \
3158 	        ledger_flag_trials_t *trials SMART_LEDGER_FLAG_TRIALS();        \
3159 	        results_t *results = alloc_results(testname, trials->count); \
3160                                                                         \
3161 	        for (unsigned i = 0; i < trials->count; i++) {          \
3162 	                kern_return_t ret = fn(map, trials->list[i].flag); \
3163 	                append_result(results, ret, trials->list[i].name); \
3164 	        }                                                       \
3165 	        return results;                                         \
3166 	}                                                               \
3167 	/* Test a Mach function. */                                     \
3168 	/* Run each trial with a ledger tag. */                         \
3169 	typedef kern_return_t (*NAME ## mach_ledger_tag_fn)(MAP_T map, int ledger_tag); \
3170                                                                         \
3171 	static results_t * __attribute__((used))                        \
3172 	test_ ## NAME ## mach_with_ledger_tag(NAME ## mach_ledger_tag_fn fn, const char *testname) \
3173 	{                                                               \
3174 	        MAP_T map SMART_MAP;                                    \
3175 	        ledger_tag_trials_t *trials SMART_LEDGER_TAG_TRIALS();  \
3176 	        results_t *results = alloc_results(testname, trials->count); \
3177                                                                         \
3178 	        for (unsigned i = 0; i < trials->count; i++) {          \
3179 	                kern_return_t ret = fn(map, trials->list[i].tag); \
3180 	                append_result(results, ret, trials->list[i].name); \
3181 	        }                                                       \
3182 	        return results;                                         \
3183 	}                                                               \
3184                                                                         \
3185 	/* Test a Mach function. */                                     \
3186 	/* Run each trial with an allocated region and a vm_inherit_t. */ \
3187 	typedef kern_return_t (*NAME ## mach_inherit_fn)(MAP_T map, T addr, T size, vm_inherit_t inherit); \
3188                                                                         \
3189 	static results_t * __attribute__((used))                        \
3190 	test_ ## NAME ## mach_with_allocated_vm_inherit_t(NAME ## mach_inherit_fn fn, const char * testname) { \
3191 	        MAP_T map SMART_MAP;                                    \
3192 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
3193 	        vm_inherit_trials_t *trials SMART_VM_INHERIT_TRIALS();  \
3194 	        results_t *results = alloc_results(testname, trials->count); \
3195                                                                         \
3196 	        for (unsigned i = 0; i < trials->count; i++) {          \
3197 	                vm_inherit_trial_t trial = trials->list[i];     \
3198 	                int ret = fn(map, (T)base.addr, (T)base.size, trial.value); \
3199 	                append_result(results, ret, trial.name); \
3200 	        }                                                       \
3201 	        return results;                                         \
3202 	}                                                               \
3203 	/* Test a Mach function. */                                     \
3204 	/* Run each trial with an allocated vm region and a vm_prot_t. */ \
3205 	typedef kern_return_t (*NAME ## with_start_end_fn)(MAP_T map, T addr, T end); \
3206                                                                         \
3207 	static results_t * __attribute__((used))                        \
3208 	        test_ ## NAME ## with_start_end(NAME ## with_start_end_fn fn, const char *testname) \
3209 	{                                                               \
3210 	        MAP_T map SMART_MAP;                                    \
3211 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
3212 	        start_size_trials_t *trials SMART_START_SIZE_TRIALS(base.addr); \
3213 	        results_t *results = alloc_results(testname, trials->count); \
3214                                                                         \
3215 	        for (unsigned i = 0; i < trials->count; i++) {          \
3216 	                T start = (T)trials->list[i].start;             \
3217 	                T size = (T)trials->list[i].size;               \
3218 	                kern_return_t ret = fn(map, start, start + size);       \
3219 	                append_result(results, ret, trials->list[i].name); \
3220 	        }                                                       \
3221 	        return results;                                         \
3222 	}                                                               \
3223 	/* Test a Mach function. */                                     \
3224 	/* Run each trial with an allocated vm region and a vm_prot_t. */ \
3225 	typedef kern_return_t (*NAME ## with_tag_fn)(MAP_T map, T addr, T end, vm_tag_t tag); \
3226                                                                         \
3227 	static results_t * __attribute__((used))                        \
3228 	        test_ ## NAME ## with_tag(NAME ## with_tag_fn fn, const char *testname) \
3229 	{                                                               \
3230 	        MAP_T map SMART_MAP;                                    \
3231 	        allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
3232 	        vm_tag_trials_t *trials SMART_VM_TAG_TRIALS();  \
3233 	        results_t *results = alloc_results(testname, trials->count); \
3234                                                                         \
3235 	        for (unsigned i = 0; i < trials->count; i++) {          \
3236 	                kern_return_t ret = fn(map, base.addr, base.addr + base.size, trials->list[i].tag); \
3237 	                append_result(results, ret, trials->list[i].name); \
3238 	        }                                                       \
3239 	        return results;                                         \
3240 	}
3241 
3242 IMPL(, uint64_t)
3243 #if TEST_OLD_STYLE_MACH
IMPL(old,uint32_t)3244 IMPL(old, uint32_t)
3245 #endif
3246 #undef IMPL
3247 
3248 // Test a mach allocation function with a start/size
3249 static results_t *
3250 test_mach_allocation_func_with_start_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size), const char * testname)
3251 {
3252 	MAP_T map SMART_MAP;
3253 	start_size_trials_t *trials SMART_START_SIZE_TRIALS(0);
3254 	results_t *results = alloc_results(testname, trials->count);
3255 
3256 	for (unsigned i = 0; i < trials->count; i++) {
3257 		unallocation_t dst SMART_UNALLOCATE_VM(map, TEST_ALLOC_SIZE);
3258 		start_size_trial_t trial = slide_trial(trials->list[i], dst.addr);
3259 		mach_vm_address_t addr = trial.start;
3260 		kern_return_t ret = func(map, &addr, trial.size);
3261 		if (ret == 0) {
3262 			(void)mach_vm_deallocate(map, addr, trial.size);
3263 		}
3264 		append_result(results, ret, trial.name);
3265 	}
3266 	return results;
3267 }
3268 
3269 // Test a mach allocation function with a vm_map_kernel_flags_t
3270 static results_t *
test_mach_allocation_func_with_vm_map_kernel_flags_t(kern_return_t (* func)(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags),const char * testname)3271 test_mach_allocation_func_with_vm_map_kernel_flags_t(kern_return_t (*func)(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags), const char * testname)
3272 {
3273 	MAP_T map SMART_MAP;
3274 	vm_map_kernel_flags_trials_t * trials SMART_VM_MAP_KERNEL_FLAGS_TRIALS();
3275 	results_t *results = alloc_results(testname, trials->count);
3276 
3277 	for (unsigned i = 0; i < trials->count; i++) {
3278 		allocation_t fixed_overwrite_dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3279 		vm_map_kernel_flags_trial_t trial = trials->list[i];
3280 #if KERNEL
3281 		if (is_random_anywhere(trial.flags)) {
3282 			// RANDOM_ADDR is likely to fall outside pmap's range
3283 			append_result(results, PANIC, trial.name);
3284 			continue;
3285 		}
3286 #endif
3287 		mach_vm_address_t addr = 0;
3288 		if (is_fixed_overwrite(trial.flags)) {
3289 			// use a pre-existing destination for fixed-overwrite
3290 			addr = fixed_overwrite_dst.addr;
3291 		}
3292 		kern_return_t ret = func(map, &addr, TEST_ALLOC_SIZE, trial.flags);
3293 		deallocate_if_not_fixed_overwrite(ret, map, addr, TEST_ALLOC_SIZE, trial.flags);
3294 		append_result(results, ret, trial.name);
3295 	}
3296 	return results;
3297 }
3298 
3299 static results_t *
test_mach_with_allocated_vm_map_kernel_flags_t(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,int flags),const char * testname)3300 test_mach_with_allocated_vm_map_kernel_flags_t(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, int flags), const char * testname)
3301 {
3302 	MAP_T map SMART_MAP;
3303 
3304 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3305 	vm_map_kernel_flags_trials_t * trials SMART_VM_MAP_KERNEL_FLAGS_TRIALS();
3306 	results_t *results = alloc_results(testname, trials->count);
3307 
3308 	for (unsigned i = 0; i < trials->count; i++) {
3309 		kern_return_t ret = func(map, base.addr, base.size, trials->list[i].flags);
3310 		append_result(results, ret, trials->list[i].name);
3311 	}
3312 	return results;
3313 }
3314 
3315 static results_t *
test_mmap_with_allocated_vm_map_kernel_flags_t(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,int flags),const char * testname)3316 test_mmap_with_allocated_vm_map_kernel_flags_t(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, int flags), const char * testname)
3317 {
3318 	MAP_T map SMART_MAP;
3319 
3320 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3321 	vm_map_kernel_flags_trials_t * trials SMART_MMAP_KERNEL_FLAGS_TRIALS();
3322 	results_t *results = alloc_results(testname, trials->count);
3323 
3324 	for (unsigned i = 0; i < trials->count; i++) {
3325 		kern_return_t ret = func(map, base.addr, base.size, trials->list[i].flags);
3326 		append_result(results, ret, trials->list[i].name);
3327 	}
3328 	return results;
3329 }
3330 
3331 
3332 // Test a Unix function.
3333 // Run each trial with an allocated vm region and start/size parameters that reference it.
3334 typedef int (*unix_with_start_size_fn)(void *start, size_t size);
3335 
3336 static results_t * __unused
test_unix_with_allocated_start_size(unix_with_start_size_fn fn,const char * testname)3337 test_unix_with_allocated_start_size(unix_with_start_size_fn fn, const char *testname)
3338 {
3339 	MAP_T map SMART_MAP;
3340 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3341 	start_size_trials_t *trials SMART_START_SIZE_TRIALS(base.addr);
3342 	results_t *results = alloc_results(testname, trials->count);
3343 
3344 	for (unsigned i = 0; i < trials->count; i++) {
3345 		addr_t start = trials->list[i].start;
3346 		addr_t size = trials->list[i].size;
3347 		int ret = fn((void*)(uintptr_t)start, (size_t)size);
3348 		append_result(results, ret, trials->list[i].name);
3349 	}
3350 	return results;
3351 }
3352 
3353 // Test a Unix function.
3354 // Run each trial with an allocated vm region and a vm_inherit_t
3355 typedef int (*unix_with_inherit_fn)(void *start, size_t size, int inherit);
3356 
3357 static results_t *
test_unix_with_allocated_vm_inherit_t(unix_with_inherit_fn fn,const char * testname)3358 test_unix_with_allocated_vm_inherit_t(unix_with_inherit_fn fn, const char * testname)
3359 {
3360 	MAP_T map SMART_MAP;
3361 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3362 	vm_inherit_trials_t *trials SMART_VM_INHERIT_TRIALS();
3363 	results_t *results = alloc_results(testname, trials->count);
3364 
3365 	for (unsigned i = 0; i < trials->count; i++) {
3366 		vm_inherit_trial_t trial = trials->list[i];
3367 		int ret = fn((void*)(uintptr_t)base.addr, (size_t)base.size, (int)trial.value);
3368 		append_result(results, ret, trial.name);
3369 	}
3370 	return results;
3371 }
3372 
3373 
3374 #ifdef KERNEL
3375 static results_t * __unused
test_kext_unix_with_allocated_start_size(unix_with_start_size_fn fn,const char * testname)3376 test_kext_unix_with_allocated_start_size(unix_with_start_size_fn fn, const char *testname)
3377 {
3378 	allocation_t base SMART_ALLOCATE_VM(current_map(), TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3379 	start_size_trials_t *trials SMART_START_SIZE_TRIALS(base.addr);
3380 	results_t *results = alloc_results(testname, trials->count);
3381 
3382 	for (unsigned i = 0; i < trials->count; i++) {
3383 		addr_t start = trials->list[i].start;
3384 		addr_t size = trials->list[i].size;
3385 		int ret = fn((void*)(uintptr_t)start, (size_t)size);
3386 		append_result(results, ret, trials->list[i].name);
3387 	}
3388 	return results;
3389 }
3390 
3391 /* Test a Kext function requiring memory allocated with a specific tag. */
3392 /* Run each trial with an allocated vm region and an addr parameter that reference it. */
3393 
3394 static results_t * __attribute__((used))
test_kext_tagged_with_allocated_addr(kern_return_t (* func)(MAP_T map,mach_vm_address_t addr),const char * testname)3395 test_kext_tagged_with_allocated_addr(kern_return_t (*func)(MAP_T map, mach_vm_address_t addr), const char *testname)
3396 {
3397 	allocation_t base SMART_ALLOCATE_VM(current_map(), TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3398 	addr_trials_t *trials SMART_ADDR_TRIALS(base.addr);
3399 	results_t *results = alloc_results(testname, trials->count);
3400 
3401 	for (unsigned i = 0; i < trials->count; i++) {
3402 		mach_vm_address_t addr = (mach_vm_address_t)trials->list[i].addr;
3403 		kern_return_t ret = func(current_map(), addr);
3404 		append_result(results, ret, trials->list[i].name);
3405 	}
3406 	return results;
3407 }
3408 #endif /* KERNEL */
3409 
3410 static results_t * __attribute__((used))
test_with_int64(kern_return_t (* func)(int64_t),const char * testname)3411 test_with_int64(kern_return_t (*func)(int64_t), const char *testname)
3412 {
3413 	size_trials_t *trials SMART_SIZE_TRIALS();
3414 	results_t *results = alloc_results(testname, trials->count);
3415 
3416 	for (unsigned i = 0; i < trials->count; i++) {
3417 		int64_t val = (int64_t)trials->list[i].size;
3418 		kern_return_t ret = func(val);
3419 		append_result(results, ret, trials->list[i].name);
3420 	}
3421 	return results;
3422 }
3423 
3424 
3425 #if !KERNEL
3426 
3427 // For deallocators like munmap and vm_deallocate.
3428 // Return a non-zero error code if we should avoid performing this trial.
3429 // Call this BEFORE sliding the trial to a non-zero base address.
3430 extern
3431 kern_return_t
3432 short_circuit_deallocator(MAP_T map, start_size_trial_t trial);
3433 
3434 // implemented in vm_parameter_validation.c
3435 
3436 #else /* KERNEL */
3437 
3438 static inline
3439 kern_return_t
short_circuit_deallocator(MAP_T map __unused,start_size_trial_t trial __unused)3440 short_circuit_deallocator(MAP_T map __unused, start_size_trial_t trial __unused)
3441 {
3442 	// Kernel tests run with an empty vm_map so we're free to deallocate whatever we want.
3443 	return 0;
3444 }
3445 
3446 #endif /* KERNEL */
3447 
3448 
3449 // Test mach_vm_deallocate or munmap.
3450 // Similar to test_mach_with_allocated_addr_size, but mach_vm_deallocate is destructive
3451 // so we can't test all values and we need to re-allocate the vm allocation each time.
3452 static results_t *
test_deallocator(kern_return_t (* func)(MAP_T map,mach_vm_address_t start,mach_vm_size_t size),const char * testname)3453 test_deallocator(kern_return_t (*func)(MAP_T map, mach_vm_address_t start, mach_vm_size_t size), const char *testname)
3454 {
3455 	MAP_T map SMART_MAP;
3456 
3457 	// allocate trials relative to address zero
3458 	// later we slide them to each allocation's address
3459 	start_size_trials_t *trials SMART_START_SIZE_TRIALS(0);
3460 
3461 	results_t *results = alloc_results(testname, trials->count);
3462 
3463 	for (unsigned i = 0; i < trials->count; i++) {
3464 		start_size_trial_t trial = trials->list[i];
3465 		allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3466 
3467 		// Avoid trials that might deallocate wildly.
3468 		// Check this BEFORE sliding the trial.
3469 		kern_return_t ret = short_circuit_deallocator(map, trial);
3470 		if (ret == 0) {
3471 			// Adjust start and/or size, if that value includes the allocated address
3472 			trial = slide_trial(trial, base.addr);
3473 
3474 			ret = func(map, trial.start, trial.size);
3475 			if (ret == 0) {
3476 				// Deallocation succeeded. Don't deallocate again.
3477 				set_already_deallocated(&base);
3478 			}
3479 		}
3480 		append_result(results, ret, trial.name);
3481 	}
3482 
3483 	return results;
3484 }
3485 
3486 static results_t *
test_allocated_src_unallocated_dst_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,mach_vm_address_t dst),const char * testname)3487 test_allocated_src_unallocated_dst_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, mach_vm_address_t dst), const char * testname)
3488 {
3489 	MAP_T map SMART_MAP;
3490 	allocation_t src_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3491 	src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
3492 	results_t *results = alloc_results(testname, trials->count);
3493 
3494 	for (unsigned i = 0; i < trials->count; i++) {
3495 		src_dst_size_trial_t trial = trials->list[i];
3496 		unallocation_t dst_base SMART_UNALLOCATE_VM(map, TEST_ALLOC_SIZE);
3497 		trial = slide_trial_src(trial, src_base.addr);
3498 		trial = slide_trial_dst(trial, dst_base.addr);
3499 		int ret = func(map, trial.src, trial.size, trial.dst);
3500 		// func deallocates its own allocation
3501 		append_result(results, ret, trial.name);
3502 	}
3503 	return results;
3504 }
3505 
3506 static results_t *
test_allocated_src_allocated_dst_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,mach_vm_address_t dst),const char * testname)3507 test_allocated_src_allocated_dst_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, mach_vm_address_t dst), const char * testname)
3508 {
3509 	MAP_T map SMART_MAP;
3510 	allocation_t src_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3511 	allocation_t dst_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3512 	src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
3513 	results_t *results = alloc_results(testname, trials->count);
3514 
3515 	for (unsigned i = 0; i < trials->count; i++) {
3516 		src_dst_size_trial_t trial = trials->list[i];
3517 		trial = slide_trial_src(trial, src_base.addr);
3518 		trial = slide_trial_dst(trial, dst_base.addr);
3519 		int ret = func(map, trial.src, trial.size, trial.dst);
3520 		// func should be fixed-overwrite, nothing new to deallocate
3521 		append_result(results, ret, trial.name);
3522 	}
3523 	return results;
3524 }
3525 
3526 static results_t *
test_dst_size_fileoff(kern_return_t (* func)(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size,mach_vm_address_t fileoff),const char * testname)3527 test_dst_size_fileoff(kern_return_t (*func)(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size, mach_vm_address_t fileoff), const char * testname)
3528 {
3529 	MAP_T map SMART_MAP;
3530 	src_dst_size_trials_t * trials SMART_FILEOFF_DST_SIZE_TRIALS();
3531 	results_t *results = alloc_results(testname, trials->count);
3532 
3533 	for (unsigned i = 0; i < trials->count; i++) {
3534 		src_dst_size_trial_t trial = trials->list[i];
3535 		unallocation_t dst_base SMART_UNALLOCATE_VM(map, TEST_ALLOC_SIZE);
3536 		// src a.k.a. mmap fileoff doesn't slide
3537 		trial = slide_trial_dst(trial, dst_base.addr);
3538 		int ret = func(map, trial.dst, trial.size, trial.src);
3539 		append_result(results, ret, trial.name);
3540 	}
3541 	return results;
3542 }
3543 
3544 // Try to allocate a destination for mmap(MAP_FIXED) to overwrite.
3545 // On exit:
3546 // *out_dst *out_size are the allocation, or 0
3547 // *out_panic is true if the trial should stop and record PANIC
3548 // (because the trial specifies an absolute address that is already occupied)
3549 // *out_slide is true if the trial should slide by *out_dst
3550 static __attribute__((overloadable)) void
allocate_for_mmap_fixed(MAP_T map,mach_vm_address_t trial_dst,mach_vm_size_t trial_size,bool trial_dst_is_absolute,bool trial_size_is_absolute,mach_vm_address_t * out_dst,mach_vm_size_t * out_size,bool * out_panic,bool * out_slide)3551 allocate_for_mmap_fixed(MAP_T map, mach_vm_address_t trial_dst, mach_vm_size_t trial_size, bool trial_dst_is_absolute, bool trial_size_is_absolute, mach_vm_address_t *out_dst, mach_vm_size_t *out_size, bool *out_panic, bool *out_slide)
3552 {
3553 	*out_panic = false;
3554 	*out_slide = false;
3555 
3556 	if (trial_dst_is_absolute && trial_size_is_absolute) {
3557 		// known dst addr, known size
3558 		*out_dst = trial_dst;
3559 		*out_size = trial_size;
3560 		kern_return_t kr = mach_vm_allocate(map, out_dst, *out_size, VM_FLAGS_FIXED);
3561 		if (kr == KERN_NO_SPACE) {
3562 			// this space is in use, we can't allow mmap to try to overwrite it
3563 			*out_panic = true;
3564 			*out_dst = 0;
3565 			*out_size = 0;
3566 		} else if (kr != 0) {
3567 			// some other error, assume mmap will also fail
3568 			*out_dst = 0;
3569 			*out_size = 0;
3570 		}
3571 		// no slide, trial and allocation are already at the same place
3572 		*out_slide = false;
3573 	} else {
3574 		// other cases either fit in a small allocation or fail
3575 		*out_dst = 0;
3576 		*out_size = TEST_ALLOC_SIZE;
3577 		kern_return_t kr = mach_vm_allocate(map, out_dst, *out_size, VM_FLAGS_ANYWHERE);
3578 		if (kr != 0) {
3579 			// allocation error, assume mmap will also fail
3580 			*out_dst = 0;
3581 			*out_size = 0;
3582 		}
3583 		*out_slide = true;
3584 	}
3585 }
3586 
3587 static __attribute__((overloadable)) void
allocate_for_mmap_fixed(MAP_T map,start_size_trial_t trial,mach_vm_address_t * out_dst,mach_vm_size_t * out_size,bool * out_panic,bool * out_slide)3588 allocate_for_mmap_fixed(MAP_T map, start_size_trial_t trial, mach_vm_address_t *out_dst, mach_vm_size_t *out_size, bool *out_panic, bool *out_slide)
3589 {
3590 	allocate_for_mmap_fixed(map, trial.start, trial.size, trial.start_is_absolute, trial.size_is_absolute,
3591 	    out_dst, out_size, out_panic, out_slide);
3592 }
3593 static __attribute__((overloadable)) void
allocate_for_mmap_fixed(MAP_T map,src_dst_size_trial_t trial,mach_vm_address_t * out_dst,mach_vm_size_t * out_size,bool * out_panic,bool * out_slide)3594 allocate_for_mmap_fixed(MAP_T map, src_dst_size_trial_t trial, mach_vm_address_t *out_dst, mach_vm_size_t *out_size, bool *out_panic, bool *out_slide)
3595 {
3596 	allocate_for_mmap_fixed(map, trial.dst, trial.size, trial.dst_is_absolute, !trial.size_is_dst_relative,
3597 	    out_dst, out_size, out_panic, out_slide);
3598 }
3599 
3600 // Like test_dst_size_fileoff, but specialized for mmap(MAP_FIXED).
3601 // mmap(MAP_FIXED) is destructive, forcibly unmapping anything
3602 // already at that address.
3603 // We must ensure that each trial is either obviously invalid and caught
3604 // by the sanitizers, or is valid and overwrites an allocation we control.
3605 static results_t *
test_fixed_dst_size_fileoff(kern_return_t (* func)(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size,mach_vm_address_t fileoff),const char * testname)3606 test_fixed_dst_size_fileoff(kern_return_t (*func)(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size, mach_vm_address_t fileoff), const char * testname)
3607 {
3608 	MAP_T map SMART_MAP;
3609 	src_dst_size_trials_t * trials SMART_FILEOFF_DST_SIZE_TRIALS();
3610 	results_t *results = alloc_results(testname, trials->count);
3611 	for (unsigned i = 0; i < trials->count; i++) {
3612 		src_dst_size_trial_t trial = trials->list[i];
3613 		// Try to create an allocation for mmap to overwrite.
3614 		mach_vm_address_t dst_alloc;
3615 		mach_vm_size_t dst_size;
3616 		bool should_panic;
3617 		bool should_slide_trial;
3618 		allocate_for_mmap_fixed(map, trial, &dst_alloc, &dst_size, &should_panic, &should_slide_trial);
3619 		if (should_panic) {
3620 			append_result(results, PANIC, trial.name);
3621 			continue;
3622 		}
3623 		if (should_slide_trial) {
3624 			// src a.k.a. mmap fileoff doesn't slide
3625 			trial = slide_trial_dst(trial, dst_alloc);
3626 		}
3627 
3628 		kern_return_t ret = func(map, trial.dst, trial.size, trial.src);
3629 
3630 		if (dst_alloc != 0) {
3631 			(void)mach_vm_deallocate(map, dst_alloc, dst_size);
3632 		}
3633 		append_result(results, ret, trial.name);
3634 	}
3635 	return results;
3636 }
3637 
3638 // Like test_mach_with_allocated_start_size, but specialized for mmap(MAP_FIXED).
3639 // See test_fixed_dst_size_fileoff for more.
3640 static results_t *
test_fixed_dst_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t dst,mach_vm_size_t size),const char * testname)3641 test_fixed_dst_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t dst, mach_vm_size_t size), const char *testname)
3642 {
3643 	MAP_T map SMART_MAP;
3644 	start_size_trials_t *trials SMART_START_SIZE_TRIALS(0);  // no base addr
3645 	results_t *results = alloc_results(testname, trials->count);
3646 	for (unsigned i = 0; i < trials->count; i++) {
3647 		start_size_trial_t trial = trials->list[i];
3648 		// Try to create an allocation for mmap to overwrite.
3649 		mach_vm_address_t dst_alloc;
3650 		mach_vm_size_t dst_size;
3651 		bool should_panic;
3652 		bool should_slide_trial;
3653 		allocate_for_mmap_fixed(map, trial, &dst_alloc, &dst_size, &should_panic, &should_slide_trial);
3654 		if (should_panic) {
3655 			append_result(results, PANIC, trial.name);
3656 			continue;
3657 		}
3658 		if (should_slide_trial) {
3659 			trial = slide_trial(trial, dst_alloc);
3660 		}
3661 
3662 		kern_return_t ret = func(map, trial.start, trial.size);
3663 
3664 		if (dst_alloc != 0) {
3665 			(void)mach_vm_deallocate(map, dst_alloc, dst_size);
3666 		}
3667 		append_result(results, ret, trial.name);
3668 	}
3669 	return results;
3670 }
3671 
3672 
3673 static bool
will_wire_function_panic_due_to_alignment(mach_vm_address_t start,mach_vm_address_t end)3674 will_wire_function_panic_due_to_alignment(mach_vm_address_t start, mach_vm_address_t end)
3675 {
3676 	// Start and end must be page aligned
3677 	if (start & PAGE_MASK) {
3678 		return true;
3679 	}
3680 	if (end & PAGE_MASK) {
3681 		return true;
3682 	}
3683 	return false;
3684 }
3685 
3686 /*
3687  * This function is basically trying to determine if this address is one vm_wire would find in the vm_map and attempt to wire.
3688  * This is because due to the environment in which our test runs, vm_tag_bt() returns VM_KERN_MEMORY_NONE.
3689  * Trying to wire with VM_KERN_MEMORY_NONE results in a panic due to asserts in VM_OBJECT_WIRED_PAGE_UPDATE_END.
3690  */
3691 static bool
will_wire_function_panic_due_to_vm_tag(mach_vm_address_t addr)3692 will_wire_function_panic_due_to_vm_tag(mach_vm_address_t addr)
3693 {
3694 	return (addr > (KB16 * 2)) && (addr < (-KB16 * 2));
3695 }
3696 
3697 static inline void
check_mach_vm_allocate_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_size_t size,mach_vm_address_t saved_start,int flags,MAP_T map)3698 check_mach_vm_allocate_outparam_changes(kern_return_t * kr, mach_vm_address_t addr, mach_vm_size_t size,
3699     mach_vm_address_t saved_start, int flags, MAP_T map)
3700 {
3701 	if (*kr == KERN_SUCCESS) {
3702 		if (size == 0) {
3703 			if (addr != 0) {
3704 				*kr = OUT_PARAM_BAD;
3705 			}
3706 		} else {
3707 			if (is_fixed(flags)) {
3708 				if (addr != trunc_down_map(map, saved_start)) {
3709 					*kr = OUT_PARAM_BAD;
3710 				}
3711 			}
3712 		}
3713 	} else {
3714 		if (saved_start != addr) {
3715 			*kr = OUT_PARAM_BAD;
3716 		}
3717 	}
3718 }
3719 
3720 #pragma clang diagnostic pop
3721 
3722 // VM_PARAMETER_VALIDATION_H
3723 #endif
3724