1 #ifndef VM_PARAMETER_VALIDATION_H
2 #define VM_PARAMETER_VALIDATION_H
3
4
5 /*
6 * Common Naming Conventions:
7 * call_* functions are harnesses used to call a single function under test.
8 * They take all arguments needed to call the function and avoid calling functions with PANICing values.
9 * test_* functions are used to call the call_ functions. They iterate through possibilities of interesting parameters
10 * and provide those as arguments to the call_ functions.
11 *
12 * test_* functions are named in the following way:
13 * Arguments under test are put at the end of the name. e.g. (test_mach_vm_prot) tests a vm_prot_t
14 * test_mach_... functions test a function with the first argument being a MAP_T.
15 * test_unix_... functions test a unix-y function. This means it doesn't take a MAP_T.
16 * In kernel context, it means it operates on current_map instead of an arbitrary vm_map_t
17 * test_..._with_allocated_... means an allocation has already been created, and some parameters referring to that allocation are passed in.
18 *
19 * Common Abbreviations:
20 * ssz: Start + Start + Size
21 * ssoo: Start + Size + Offset + Object
22 * sso: Start + Start + Offset
23 */
24
25 #include <sys/mman.h>
26 #if KERNEL
27
28 #include <mach/vm_map.h>
29 #include <mach/mach_vm.h>
30 #include <mach/vm_reclaim.h>
31 #include <mach/vm_reclaim_private.h>
32 #include <mach/mach_types.h>
33 #include <mach/mach_host.h>
34 #include <mach/memory_object.h>
35 #include <mach/memory_entry.h>
36 #include <mach/mach_vm_server.h>
37
38 #include <device/device_port.h>
39 #include <sys/mman.h>
40 #include <sys/errno.h>
41 #include <vm/memory_object.h>
42 #include <vm/vm_fault.h>
43 #include <vm/vm_map_internal.h>
44 #include <vm/vm_kern_internal.h>
45 #include <vm/vm_pageout.h>
46 #include <vm/vm_protos.h>
47 #include <vm/vm_memtag.h>
48 #include <vm/vm_memory_entry.h>
49 #include <vm/vm_memory_entry_xnu.h>
50 #include <vm/vm_object_internal.h>
51 #include <vm/vm_iokit.h>
52 #include <kern/ledger.h>
53 extern ledger_template_t task_ledger_template;
54
55 #define FLAGS_AND_TAG(f, t) ({ \
56 vm_map_kernel_flags_t vmk_flags; \
57 vm_map_kernel_flags_set_vmflags(&vmk_flags, f, t); \
58 vmk_flags; \
59 })
60
61 #else // KERNEL
62
63 #include <TargetConditionals.h>
64
65 #endif // KERNEL
66
67
68 // ignore some warnings inside this file
69 #pragma clang diagnostic push
70 #pragma clang diagnostic ignored "-Wdeclaration-after-statement"
71 #pragma clang diagnostic ignored "-Wincompatible-function-pointer-types"
72 #pragma clang diagnostic ignored "-Wmissing-prototypes"
73 #pragma clang diagnostic ignored "-Wpedantic"
74 #pragma clang diagnostic ignored "-Wgcc-compat"
75
76 /*
77 * Invalid values for various types. These are used by the outparameter tests.
78 * UNLIKELY_ means the value is not 100% guaranteed to be invalid for that type,
79 * and is just a very unlikely value for it. Tests should not rely on them to compare against UNLIKELY_
80 * values without explicit reason it cannot be possible.
81 *
82 * INVALID_* means the value is 100% guaranteed to be invalid. They can be relied on to be compared against.
83 */
84
85 #define UNLIKELY_INITIAL_ADDRESS 0xabababab
86 /*
87 * It's important for us to never have a test with a size like
88 * UNLIKELY_INITIAL_SIZE, and for this to stay non page aligned.
89 * See comment in call_mach_memory_entry_map_size__start_size for more info
90 */
91 #define UNLIKELY_INITIAL_SIZE 0xabababab
92 #define UNLIKELY_INITIAL_PPNUM 0xabababab
93 #define UNLIKELY_INITIAL_MACH_PORT ((mach_port_t) 0xbabababa)
94 #define UNLIKELY_INITIAL_VID 0xbabababa
95 // This cannot possibly be a valid vnode pointer as they are pointers
96 #define INVALID_VNODE_PTR ((void *) -1)
97 // This cannot possibly be a valid vm_map_copy_t as they are pointers
98 #define INVALID_VM_MAP_COPY ((vm_map_copy_t) (void *) -1)
99 // This cannot be a purgable state (see vm_purgable.h) It's way above the last valid state
100 #define INVALID_PURGABLE_STATE 0xababab
101 static_assert(INVALID_PURGABLE_STATE > VM_PURGABLE_STATE_MAX, "This test requires a purgable state above the max");
102 // Disposition values are generated via the VM_PAGE_QUERY_ values being ored.
103 // This cannot be a valid one as it's above the greatest possible or
104 #define INVALID_DISPOSITION_VALUE 0xffffff0
105 #define INVALID_INHERIT 0xbaba
106 static_assert(INVALID_INHERIT > VM_INHERIT_LAST_VALID, "This test requires an inheritance above the max");
107
108 #define INVALID_INITIAL_VID 0xbabababa
109 // output buffer size for kext/xnu sysctl tests
110 // note: 1 GB is too big for watchOS
111 static const int64_t SYSCTL_OUTPUT_BUFFER_SIZE = 512 * 1024 * 1024; // 512 MB
112
113 // caller name (kernel/kext/userspace), used to label the output
114 #if KERNEL
115 # define CALLER_NAME "kernel"
116 #else
117 # define CALLER_NAME "userspace"
118 #endif
119
120 // os name, used to label the output
121 #if KERNEL
122 # if XNU_TARGET_OS_OSX
123 # define OS_NAME "macos"
124 # elif XNU_TARGET_OS_IOS
125 # define OS_NAME "ios"
126 # elif XNU_TARGET_OS_TV
127 # define OS_NAME "tvos"
128 # elif XNU_TARGET_OS_WATCH
129 # define OS_NAME "watchos"
130 # elif XNU_TARGET_OS_BRIDGE
131 # define OS_NAME "bridgeos"
132 # else
133 # define OS_NAME "unknown-os"
134 # endif
135 #else
136 # if TARGET_OS_OSX
137 # define OS_NAME "macos"
138 # elif TARGET_OS_MACCATALYST
139 # define OS_NAME "catalyst"
140 # elif TARGET_OS_IOS
141 # define OS_NAME "ios"
142 # elif TARGET_OS_TV
143 # define OS_NAME "tvos"
144 # elif TARGET_OS_WATCH
145 # define OS_NAME "watchos"
146 # elif TARGET_OS_BRIDGE
147 # define OS_NAME "bridgeos"
148 # else
149 # define OS_NAME "unknown-os"
150 # endif
151 #endif
152
153 // architecture name, used to label the output
154 #if KERNEL
155 # if __i386__
156 # define ARCH_NAME "i386"
157 # elif __x86_64__
158 # define ARCH_NAME "x86_64"
159 # elif __arm64__ && __LP64__
160 # define ARCH_NAME "arm64"
161 # elif __arm64__ && !__LP64__
162 # define ARCH_NAME "arm64_32"
163 # elif __arm__
164 # define ARCH_NAME "arm"
165 # else
166 # define ARCH_NAME "unknown-arch"
167 # endif
168 #else
169 # if TARGET_CPU_X86
170 # define ARCH_NAME "i386"
171 # elif TARGET_CPU_X86_64
172 # define ARCH_NAME "x86_64"
173 # elif TARGET_CPU_ARM64 && __LP64__
174 # define ARCH_NAME "arm64"
175 # elif TARGET_CPU_ARM64 && !__LP64__
176 # define ARCH_NAME "arm64_32"
177 # elif TARGET_CPU_ARM
178 # define ARCH_NAME "arm"
179 # else
180 # define ARCH_NAME "unknown-arch"
181 # endif
182 #endif
183
184 #if KERNEL
185 # define MAP_T vm_map_t
186 #else
187 # define MAP_T mach_port_t
188 #endif
189
190 // Mach has new-style functions with 64-bit address and size
191 // and old-style functions with pointer-size address and size.
192 // On U64 platforms both names send the same MIG message
193 // and run the same kernel code so we need not test both.
194 // On U32 platforms they are different inside the kernel.
195 // fixme for kext/kernel, verify that vm32 entrypoints are not used and not exported
196 #if KERNEL || __LP64__
197 # define TEST_OLD_STYLE_MACH 0
198 #else
199 # define TEST_OLD_STYLE_MACH 1
200 #endif
201
202 // always 64-bit: addr_t, mach_vm_address/size_t, memory_object_size/offset_t
203 // always 32-bit: mach_msg_type_number_t, natural_t
204 // pointer-size: void*, vm_address_t, vm_size_t
205 typedef uint64_t addr_t;
206
207 // We often use 4KB or 16KB instead of PAGE_SIZE
208 // (for example using 16KB instead of PAGE_SIZE to avoid Rosetta complications)
209 #define KB4 ((addr_t)4*1024)
210 #define KB16 ((addr_t)16*1024)
211
212 // Allocation size commonly used in tests.
213 // This size is big enough that our trials of small
214 // address offsets and sizes will still fit inside it.
215 #define TEST_ALLOC_SIZE (4 * KB16)
216
217 // Magic return codes used for in-band signalling.
218 // These must avoid kern_return_t and errno values.
219 #define BUSTED -99 // trial is broken
220 #define IGNORED -98 // trial not performed for acceptable reasons
221 #define ZEROSIZE -97 // trial succeeded because size==0 (FAKE tests only)
222 #define PANIC -96 // trial not performed because it would provoke a panic
223 #define GUARD -95 // trial not performed because it would provoke EXC_GUARD
224 #define ACCEPTABLE -94 // trial should be considered successful no matter what the golden result is
225 #define OUT_PARAM_BAD -93 // trial has incorrect setting of out parameter values
226
227 static inline bool
is_fake_error(int err)228 is_fake_error(int err)
229 {
230 return err == BUSTED || err == IGNORED || err == ZEROSIZE ||
231 err == PANIC || err == GUARD || err == OUT_PARAM_BAD;
232 }
233
234 // Return the count of a (non-decayed!) array.
235 #define countof(array) (sizeof(array) / sizeof((array)[0]))
236
237 #if !KERNEL
238 static inline uint64_t
VM_MAP_PAGE_SIZE(MAP_T map __unused)239 VM_MAP_PAGE_SIZE(MAP_T map __unused)
240 {
241 // fixme wrong for out-of-process maps
242 // on platforms that support processes with two different page sizes
243 return PAGE_SIZE;
244 }
245
246 static inline uint64_t
VM_MAP_PAGE_MASK(MAP_T map __unused)247 VM_MAP_PAGE_MASK(MAP_T map __unused)
248 {
249 // fixme wrong for out-of-process maps
250 // on platforms that support processes with two different page sizes
251 return PAGE_MASK;
252 }
253 #endif
254
255
256 #define IMPL(T) \
257 /* Round up to the given page mask. */ \
258 __attribute__((overloadable, used)) \
259 static inline T \
260 vm_sanitize_map_round_page_mask(T addr, uint64_t pagemask) { \
261 return (addr + (T)pagemask) & ~((T)pagemask); \
262 } \
263 \
264 /* Round up to the given page size. */ \
265 __attribute__((overloadable, used)) \
266 static inline T \
267 round_up_page(T addr, uint64_t pagesize) { \
268 return vm_sanitize_map_round_page_mask(addr, pagesize - 1); \
269 } \
270 \
271 /* Round up to the given map's page size. */ \
272 __attribute__((overloadable, used)) \
273 static inline T \
274 round_up_map(MAP_T map, T addr) { \
275 return vm_sanitize_map_round_page_mask(addr, VM_MAP_PAGE_MASK(map)); \
276 } \
277 \
278 /* Truncate to the given page mask. */ \
279 __attribute__((overloadable, used)) \
280 static inline T \
281 vm_sanitize_map_trunc_page_mask(T addr, uint64_t pagemask) \
282 { \
283 return addr & ~((T)pagemask); \
284 } \
285 \
286 /* Truncate to the given page size. */ \
287 __attribute__((overloadable, used)) \
288 static inline T \
289 trunc_down_page(T addr, uint64_t pagesize) \
290 { \
291 return vm_sanitize_map_trunc_page_mask(addr, pagesize - 1); \
292 } \
293 \
294 /* Truncate to the given map's page size. */ \
295 __attribute__((overloadable, used)) \
296 static inline T \
297 trunc_down_map(MAP_T map, T addr) \
298 { \
299 return vm_sanitize_map_trunc_page_mask(addr, VM_MAP_PAGE_MASK(map)); \
300 } \
301 \
302 __attribute__((overloadable, unavailable("use round_up_page instead"))) \
303 extern T \
304 round_up(T addr, uint64_t pagesize); \
305 __attribute__((overloadable, unavailable("use trunc_down_page instead"))) \
306 extern T \
307 trunc_down(T addr, uint64_t pagesize);
308
309 IMPL(uint64_t)
IMPL(uint32_t)310 IMPL(uint32_t)
311 #undef IMPL
312
313
314 // duplicate the logic of VM's vm_map_range_overflows()
315 // false == good start+size combo, true == bad combo
316 #define IMPL(T) \
317 __attribute__((overloadable, used)) \
318 static bool \
319 vm_sanitize_range_overflows_allow_zero(T start, T size, T pgmask) \
320 { \
321 if (size == 0) { \
322 return false; \
323 } \
324 \
325 T sum; \
326 if (__builtin_add_overflow(start, size, &sum)) { \
327 return true; \
328 } \
329 \
330 T aligned_start = vm_sanitize_map_trunc_page_mask(start, pgmask); \
331 T aligned_end = vm_sanitize_map_round_page_mask(start + size, pgmask); \
332 if (aligned_end <= aligned_start) { \
333 return true; \
334 } \
335 \
336 return false; \
337 } \
338 \
339 /* like vm_sanitize_range_overflows_allow_zero(), but without the */ \
340 /* unconditional approval of size==0 */ \
341 __attribute__((overloadable, used)) \
342 static bool \
343 vm_sanitize_range_overflows_strict_zero(T start, T size, T pgmask) \
344 { \
345 T sum; \
346 if (__builtin_add_overflow(start, size, &sum)) { \
347 return true; \
348 } \
349 \
350 T aligned_start = vm_sanitize_map_trunc_page_mask(start, pgmask); \
351 T aligned_end = vm_sanitize_map_round_page_mask(start + size, pgmask); \
352 if (aligned_end <= aligned_start) { \
353 return true; \
354 } \
355 \
356 return false; \
357 } \
358
359 IMPL(uint64_t)
360 IMPL(uint32_t)
361 #undef IMPL
362
363
364 // return true if the process is running under Rosetta translation
365 // https://developer.apple.com/documentation/apple-silicon/about-the-rosetta-translation-environment#Determine-Whether-Your-App-Is-Running-as-a-Translated-Binary
366 static bool
367 isRosetta()
368 {
369 #if KERNEL
370 return false;
371 #else
372 int out_value = 0;
373 size_t io_size = sizeof(out_value);
374 if (sysctlbyname("sysctl.proc_translated", &out_value, &io_size, NULL, 0) == 0) {
375 assert(io_size >= sizeof(out_value));
376 return out_value;
377 }
378 return false;
379 #endif
380 }
381
382 // Needed to distinguish between rosetta kernel runs and generating trials names from kern golden files.
383 #if KERNEL
384 #define kern_trialname_generation FALSE
385 #else
386 static bool kern_trialname_generation = FALSE;
387 #endif
388 static addr_t trial_page_size = 0;
389
390 static inline addr_t
adjust_page_size()391 adjust_page_size()
392 {
393 addr_t test_page_size = PAGE_SIZE;
394 #if !KERNEL && __x86_64__
395 // Handle kernel page size variation while recreating trials names for golden files in userspace.
396 if (kern_trialname_generation && isRosetta()) {
397 test_page_size = trial_page_size;
398 }
399 #endif // !KERNEL && __x86_64__
400 return test_page_size;
401 }
402
403 #if KERNEL
404 // Knobs controlled from userspace (and passed in MSB of the file_descriptor)
405 extern bool kernel_generate_golden;
406 #else
407 // Knobs controlled by environment variables
408 extern bool dump;
409 extern bool generate_golden;
410 extern bool dump_golden;
411 extern int out_param_bad_count;
412 extern bool should_test_results;
413 static void
read_env()414 read_env()
415 {
416 dump = (getenv("DUMP_RESULTS") != NULL);
417 dump_golden = (getenv("DUMP_GOLDEN_IMAGE") != NULL);
418 // Shouldn't do both
419 generate_golden = (getenv("GENERATE_GOLDEN_IMAGE") != NULL) && !dump_golden;
420 // Only test when no other golden image flag is set
421 should_test_results = (getenv("SKIP_TESTS") == NULL) && !dump_golden && !generate_golden;
422 }
423 #endif
424
425
426 /////////////////////////////////////////////////////
427 // String functions that work in both kernel and userspace.
428
429 // Test output function.
430 // This prints either to stdout (userspace tests) or to a userspace buffer (kernel sysctl tests)
431 // Golden tests generation in userspace also writes to a buffer (GOLDEN_OUTPUT_BUF)
432 #if KERNEL
433 extern void testprintf(const char *, ...) __printflike(1, 2);
434 #define goldenprintf testprintf
435 #else
436 #define testprintf printf
437 extern void goldenprintf(const char *, ...) __printflike(1, 2);
438 #endif
439
440 // kstrdup() is like strdup() but in the kernel it uses kalloc_data()
441 static inline char *
kstrdup(const char * str)442 kstrdup(const char *str)
443 {
444 #if KERNEL
445 size_t size = strlen(str) + 1;
446 char *copy = kalloc_data(size, Z_WAITOK | Z_ZERO);
447 memcpy(copy, str, size);
448 return copy;
449 #else
450 return strdup(str);
451 #endif
452 }
453
454 // kfree_str() is like free() but in the kernel it uses kfree_data_addr()
455 static inline void
kfree_str(char * str)456 kfree_str(char *str)
457 {
458 #if KERNEL
459 kfree_data_addr(str);
460 #else
461 free(str);
462 #endif
463 }
464
465 // kasprintf() is like asprintf() but in the kernel it uses kalloc_data()
466
467 #if !KERNEL
468 # define kasprintf asprintf
469 #else
470 extern int vsnprintf(char *, size_t, const char *, va_list) __printflike(3, 0);
471 static inline int
kasprintf(char ** __restrict out_str,const char * __restrict format,...)472 kasprintf(char ** __restrict out_str, const char * __restrict format, ...) __printflike(2, 3)
473 {
474 va_list args1, args2;
475
476 // compute length
477 char c;
478 va_start(args1, format);
479 va_copy(args2, args1);
480 int len1 = vsnprintf(&c, sizeof(c), format, args1);
481 va_end(args1);
482 if (len1 < 0) {
483 *out_str = NULL;
484 return len1;
485 }
486
487 // allocate and print
488 char *str = kalloc_data(len1 + 1, Z_NOFAIL);
489 int len2 = vsnprintf(str, len1 + 1, format, args2);
490 va_end(args2);
491 if (len2 < 0) {
492 kfree_data_addr(str);
493 *out_str = NULL;
494 return len1;
495 }
496 assert(len1 == len2);
497
498 *out_str = str;
499 return len1;
500 }
501 // KERNEL
502 #endif
503
504
505 /////////////////////////////////////////////////////
506 // Record trials and return values from tested functions (BSD int or Mach kern_return_t)
507
508 // Maintain list of known trials "smart" generator functions (trial formulae) as
509 // these are included in the golden result list (keeping the enum forces people to
510 // maintain the list up-to-date when adding new functions).
511 #define TRIALSFORMULA_ENUM(VARIANT) \
512 VARIANT(eUNKNOWN_TRIALS) \
513 VARIANT(eSMART_VM_MAP_KERNEL_FLAGS_TRIALS) \
514 VARIANT(eSMART_VM_INHERIT_TRIALS) \
515 VARIANT(eSMART_MMAP_KERNEL_FLAGS_TRIALS) \
516 VARIANT(eSMART_MMAP_FLAGS_TRIALS) \
517 VARIANT(eSMART_GENERIC_FLAG_TRIALS) \
518 VARIANT(eSMART_VM_TAG_TRIALS) \
519 VARIANT(eSMART_VM_PROT_TRIALS) \
520 VARIANT(eSMART_VM_PROT_PAIR_TRIALS) \
521 VARIANT(eSMART_LEDGER_TAG_TRIALS) \
522 VARIANT(eSMART_LEDGER_FLAG_TRIALS) \
523 VARIANT(eSMART_ADDR_TRIALS) \
524 VARIANT(eSMART_SIZE_TRIALS) \
525 VARIANT(eSMART_START_SIZE_TRIALS) \
526 VARIANT(eSMART_START_SIZE_OFFSET_OBJECT_TRIALS) \
527 VARIANT(eSMART_START_SIZE_OFFSET_TRIALS) \
528 VARIANT(eSMART_SIZE_SIZE_TRIALS) \
529 VARIANT(eSMART_SRC_DST_SIZE_TRIALS) \
530 VARIANT(eSMART_FILEOFF_DST_SIZE_TRIALS) \
531 VARIANT(eSMART_VM_BEHAVIOR_TRIALS) \
532 VARIANT(eSMART_VM_ADVISE_TRIALS) \
533 VARIANT(eSMART_VM_SYNC_TRIALS) \
534 VARIANT(eSMART_VM_MSYNC_TRIALS) \
535 VARIANT(eSMART_VM_MACHINE_ATTRIBUTE_TRIALS) \
536 VARIANT(eSMART_VM_PURGEABLE_AND_STATE_TRIALS) \
537 VARIANT(eSMART_START_SIZE_START_SIZE_TRIALS) \
538 VARIANT(eSMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS) \
539 VARIANT(eSMART_RECLAMATION_BUFFER_INIT_TRIALS)
540
541 #define TRIALSFORMULA_ENUM_VARIANT(NAME) NAME,
542 typedef enum {
543 TRIALSFORMULA_ENUM(TRIALSFORMULA_ENUM_VARIANT)
544 } trialsformula_t;
545
546 #define TRIALSARGUMENTS_NONE 0
547 #define TRIALSARGUMENTS_SIZE 2
548
549 // formula enum id to string
550 #define TRIALSFORMULA_ENUM_STRING(NAME) case NAME: return #NAME;
551 const char *
trialsformula_name(trialsformula_t formula)552 trialsformula_name(trialsformula_t formula)
553 {
554 switch (formula) {
555 TRIALSFORMULA_ENUM(TRIALSFORMULA_ENUM_STRING)
556 default:
557 testprintf("Unknown formula_t %d\n", formula);
558 assert(false);
559 }
560 }
561
562 #define TRIALSFORMULA_ENUM_FROM_STRING(NAME) \
563 if (strncmp(string, #NAME, strlen(#NAME)) == 0) return NAME;
564
565 // formula name to enum id
566 trialsformula_t
trialsformula_from_string(const char * string)567 trialsformula_from_string(const char *string)
568 {
569 TRIALSFORMULA_ENUM(TRIALSFORMULA_ENUM_FROM_STRING)
570 // else
571 testprintf("Unknown formula %s\n", string);
572 assert(false);
573 }
574
575 // ret: return value of this trial
576 // name: name of this trial, including the input values passed in
577 typedef struct {
578 int ret;
579 char *name;
580 } result_t;
581
582 typedef struct {
583 const char *testname;
584 char *testconfig;
585 trialsformula_t trialsformula;
586 uint64_t trialsargs[TRIALSARGUMENTS_SIZE];
587 unsigned capacity;
588 unsigned count;
589 unsigned tested_count;
590 result_t list[];
591 } results_t;
592
593 extern results_t *golden_list[];
594 extern results_t *kern_list[];
595 static uint32_t num_tests = 0; // num of tests in golden list
596 static uint32_t num_kern_tests = 0; // num of tests in kernel results list
597
598 static __attribute__((overloadable))
599 results_t *
alloc_results(const char * testname,char * testconfig,trialsformula_t trialsformula,uint64_t trialsargs[static TRIALSARGUMENTS_SIZE],unsigned capacity)600 alloc_results(const char *testname, char *testconfig,
601 trialsformula_t trialsformula, uint64_t trialsargs[static TRIALSARGUMENTS_SIZE],
602 unsigned capacity)
603 {
604 results_t *results;
605 #if KERNEL
606 results = kalloc_type(results_t, result_t, capacity, Z_WAITOK | Z_ZERO);
607 #else
608 results = calloc(sizeof(results_t) + capacity * sizeof(result_t), 1);
609 #endif
610 assert(results != NULL);
611 results->testname = testname;
612 results->testconfig = testconfig;
613 results->trialsformula = trialsformula;
614 for (unsigned i = 0; i < TRIALSARGUMENTS_SIZE; i++) {
615 results->trialsargs[i] = trialsargs[i];
616 }
617 results->capacity = capacity;
618 results->count = 0;
619 results->tested_count = 0;
620 return results;
621 }
622
623 static char *
alloc_default_testconfig(void)624 alloc_default_testconfig(void)
625 {
626 char *result;
627 kasprintf(&result, "%s %s %s%s",
628 OS_NAME, ARCH_NAME,
629 kern_trialname_generation ? "kernel" : CALLER_NAME,
630 !kern_trialname_generation && isRosetta() ? " rosetta" : "");
631 return result;
632 }
633
634 static __attribute__((overloadable))
635 results_t *
alloc_results(const char * testname,trialsformula_t trialsformula,uint64_t * trialsargs,size_t trialsargs_count,unsigned capacity)636 alloc_results(const char *testname,
637 trialsformula_t trialsformula, uint64_t *trialsargs, size_t trialsargs_count,
638 unsigned capacity)
639 {
640 assert(trialsargs_count == TRIALSARGUMENTS_SIZE);
641 return alloc_results(testname, alloc_default_testconfig(), trialsformula, trialsargs, capacity);
642 }
643
644 static __attribute__((overloadable))
645 results_t *
alloc_results(const char * testname,trialsformula_t trialsformula,uint64_t trialsarg0,unsigned capacity)646 alloc_results(const char *testname, trialsformula_t trialsformula, uint64_t trialsarg0, unsigned capacity)
647 {
648 uint64_t trialsargs[TRIALSARGUMENTS_SIZE] = {trialsarg0, TRIALSARGUMENTS_NONE};
649 return alloc_results(testname, trialsformula, trialsargs, TRIALSARGUMENTS_SIZE, capacity);
650 }
651
652 static __attribute__((overloadable))
653 results_t *
alloc_results(const char * testname,trialsformula_t trialsformula,unsigned capacity)654 alloc_results(const char *testname, trialsformula_t trialsformula, unsigned capacity)
655 {
656 uint64_t trialsargs[TRIALSARGUMENTS_SIZE] = {TRIALSARGUMENTS_NONE, TRIALSARGUMENTS_NONE};
657 return alloc_results(testname, trialsformula, trialsargs, TRIALSARGUMENTS_SIZE, capacity);
658 }
659
660 static void __unused
dealloc_results(results_t * results)661 dealloc_results(results_t *results)
662 {
663 for (unsigned int i = 0; i < results->count; i++) {
664 kfree_str(results->list[i].name);
665 }
666 kfree_str(results->testconfig);
667 #if KERNEL
668 kfree_type(results_t, result_t, results->capacity, results);
669 #else
670 free(results);
671 #endif
672 }
673
674 static void __attribute__((overloadable, unused))
append_result(results_t * results,int ret,const char * name)675 append_result(results_t *results, int ret, const char *name)
676 {
677 // halt if the results list is already full
678 // fixme reallocate instead if we can't always choose the size in advance
679 assert(results->count < results->capacity);
680
681 // name may be freed before we make use of it
682 char * name_cpy = kstrdup(name);
683 assert(name_cpy);
684 results->list[results->count++] =
685 (result_t){.ret = ret, .name = name_cpy};
686 }
687
688
689 #define TESTNAME_DELIMITER "TESTNAME "
690 #define RESULTCOUNT_DELIMITER "RESULT COUNT "
691 #define TESTRESULT_DELIMITER " "
692 #define TESTCONFIG_DELIMITER " TESTCONFIG "
693 #define TRIALSFORMULA_DELIMITER "TRIALSFORMULA "
694 #define TRIALSARGUMENTS_DELIMITER "TRIALSARGUMENTS"
695 #define KERN_TESTRESULT_DELIMITER " RESULT "
696
697 // print results, unformatted
698 // This output is read by populate_kernel_results()
699 // and by tools/format_vm_parameter_validation.py
700 static results_t *
__dump_results(results_t * results)701 __dump_results(results_t *results)
702 {
703 testprintf(TESTNAME_DELIMITER "%s\n", results->testname);
704 testprintf(RESULTCOUNT_DELIMITER "%d\n", results->count);
705 testprintf(TESTCONFIG_DELIMITER "%s\n", results->testconfig);
706
707 for (unsigned i = 0; i < results->count; i++) {
708 testprintf(KERN_TESTRESULT_DELIMITER "%d, %s\n", results->list[i].ret, results->list[i].name);
709 }
710
711 results->tested_count += 1;
712 return results;
713 }
714
715 // This output is read by populate_golden_results()
716 static results_t *
dump_golden_results(results_t * results)717 dump_golden_results(results_t *results)
718 {
719 trial_page_size = PAGE_SIZE;
720 goldenprintf(TESTNAME_DELIMITER "%s\n", results->testname);
721 goldenprintf(TRIALSFORMULA_DELIMITER "%s %s %llu,%llu,%llu\n",
722 trialsformula_name(results->trialsformula), TRIALSARGUMENTS_DELIMITER,
723 results->trialsargs[0], results->trialsargs[1], trial_page_size);
724 goldenprintf(RESULTCOUNT_DELIMITER "%d\n", results->count);
725
726 for (unsigned i = 0; i < results->count; i++) {
727 goldenprintf(TESTRESULT_DELIMITER "%d: %d\n", i, results->list[i].ret);
728 #if !KERNEL
729 if (results->list[i].ret == OUT_PARAM_BAD) {
730 out_param_bad_count += 1;
731 T_FAIL("Out parameter violation in test %s - %s\n", results->testname, results->list[i].name);
732 }
733 #endif
734 }
735
736 return results;
737 }
738
739 #if !KERNEL
740 // Comparator function for sorting result_t list by name
741 static int
compare_names(const void * a,const void * b)742 compare_names(const void *a, const void *b)
743 {
744 assert(((const result_t *)a)->name);
745 assert(((const result_t *)b)->name);
746 return strcmp(((const result_t *)a)->name, ((const result_t *)b)->name);
747 }
748
749 static unsigned
binary_search(result_t * list,unsigned count,const result_t * trial)750 binary_search(result_t *list, unsigned count, const result_t *trial)
751 {
752 assert(count > 0);
753 const char *name = trial->name;
754 unsigned left = 0, right = count - 1;
755 while (left <= right) {
756 unsigned mid = left + (right - left) / 2;
757 int cmp = strcmp(list[mid].name, name);
758 if (cmp == 0) {
759 return mid;
760 } else if (cmp < 0) {
761 left = mid + 1;
762 } else {
763 right = mid - 1;
764 }
765 }
766 return UINT_MAX; // Not found
767 }
768
769 static inline bool
trial_name_equals(const result_t * a,const result_t * b)770 trial_name_equals(const result_t *a, const result_t *b)
771 {
772 // NB: strlen match need to handle cases where a shorter 'bname' would match a longer 'aname'.
773 if (strlen(a->name) == strlen(b->name) && compare_names(a, b) == 0) {
774 return true;
775 }
776 return false;
777 }
778
779 static const result_t *
get_golden_result(results_t * golden_results,const result_t * trial,unsigned trial_idx)780 get_golden_result(results_t *golden_results, const result_t *trial, unsigned trial_idx)
781 {
782 if (golden_results->trialsformula == eUNKNOWN_TRIALS) {
783 // golden results don't contain trials names
784 T_LOG("%s: update test's alloc_results to have a valid trialsformula_t\n", golden_results->testname);
785 return NULL;
786 }
787
788 if (trial_idx < golden_results->count &&
789 golden_results->list[trial_idx].name &&
790 trial_name_equals(&golden_results->list[trial_idx], trial)) {
791 // "fast search" path taken when golden file is in sync to test.
792 return &golden_results->list[trial_idx];
793 }
794
795 // "slow search" path taken when tests idxs are not aligned. Sort the array
796 // by name and do binary search.
797 qsort(golden_results->list, golden_results->count, sizeof(result_t), compare_names);
798 unsigned g_idx = binary_search(golden_results->list, golden_results->count, trial);
799 if (g_idx < golden_results->count) {
800 return &golden_results->list[g_idx];
801 }
802
803 return NULL;
804 }
805
806 static void
test_results(results_t * golden_results,results_t * results)807 test_results(results_t *golden_results, results_t *results)
808 {
809 bool passed = TRUE;
810 unsigned result_count = results->count;
811 unsigned acceptable_count = 0;
812 const unsigned acceptable_max = 16; // log up to this many ACCEPTABLE results
813 const result_t *golden_result = NULL;
814 if (golden_results->count != results->count) {
815 T_LOG("%s: number of iterations mismatch (%u vs %u)",
816 results->testname, golden_results->count, results->count);
817 }
818 for (unsigned i = 0; i < result_count; i++) {
819 golden_result = get_golden_result(golden_results, &results->list[i], i);
820 if (golden_result) {
821 if (results->list[i].ret == ACCEPTABLE) {
822 // trial has declared itself to be correct
823 // no matter what the golden result is
824 acceptable_count++;
825 if (acceptable_count <= acceptable_max) {
826 T_LOG("%s RESULT ACCEPTABLE (expected %d), %s\n",
827 results->testname,
828 golden_result->ret, results->list[i].name);
829 }
830 } else if (results->list[i].ret != golden_result->ret) {
831 T_FAIL("%s RESULT %d (expected %d), %s\n",
832 results->testname, results->list[i].ret,
833 golden_result->ret, results->list[i].name);
834 passed = FALSE;
835 }
836 } else {
837 // new trial not present in golden results
838 T_FAIL("%s NEW RESULT %d, %s - (regenerate golden files to fix this)\n",
839 results->testname, results->list[i].ret, results->list[i].name);
840 passed = FALSE;
841 }
842 }
843
844 if (acceptable_count > acceptable_max) {
845 T_LOG("%s %u more RESULT ACCEPTABLE trials not logged\n",
846 results->testname, acceptable_count - acceptable_max);
847 }
848 if (passed) {
849 T_PASS("%s passed\n", results->testname);
850 }
851 }
852 #endif
853
854 #if !KERNEL
855 static results_t *
856 test_name_to_golden_results(const char* testname);
857 #endif
858
859 static results_t *
process_results(results_t * results)860 process_results(results_t *results)
861 {
862 #if KERNEL
863 if (kernel_generate_golden) {
864 return dump_golden_results(results);
865 } else {
866 return __dump_results(results);
867 }
868 #else
869 results_t *golden_results = NULL;
870
871 if (dump && !generate_golden) {
872 __dump_results(results);
873 }
874
875 if (generate_golden) {
876 dump_golden_results(results);
877 }
878
879 if (should_test_results) {
880 golden_results = test_name_to_golden_results(results->testname);
881
882 if (golden_results) {
883 test_results(golden_results, results);
884 } else {
885 T_FAIL("New test %s found, update golden list to allow return code testing", results->testname);
886 // Dump results if not done previously
887 if (!dump) {
888 __dump_results(results);
889 }
890 }
891 }
892
893 return results;
894 #endif
895 }
896
897 static inline mach_vm_address_t
truncate_vm_map_addr_with_flags(MAP_T map,mach_vm_address_t addr,int flags)898 truncate_vm_map_addr_with_flags(MAP_T map, mach_vm_address_t addr, int flags)
899 {
900 mach_vm_address_t truncated_addr = addr;
901 if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) {
902 // VM_FLAGS_RETURN_4K_DATA_ADDR means return a 4k aligned address rather than the
903 // base of the page. Truncate to 4k.
904 truncated_addr = trunc_down_page(addr, KB4);
905 } else if (flags & VM_FLAGS_RETURN_DATA_ADDR) {
906 // On VM_FLAGS_RETURN_DATA_ADDR, we expect to get back the unaligned address.
907 // Don't truncate.
908 } else {
909 // Otherwise we truncate to the map page size
910 truncated_addr = trunc_down_map(map, addr);
911 }
912 return truncated_addr;
913 }
914
915
916 static inline mach_vm_address_t
get_expected_remap_misalignment(MAP_T map,mach_vm_address_t addr,int flags)917 get_expected_remap_misalignment(MAP_T map, mach_vm_address_t addr, int flags)
918 {
919 mach_vm_address_t misalignment;
920 if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) {
921 // VM_FLAGS_RETURN_4K_DATA_ADDR means return a 4k aligned address rather than the
922 // base of the page. The misalignment is relative to the first 4k page
923 misalignment = addr - trunc_down_page(addr, KB4);
924 } else if (flags & VM_FLAGS_RETURN_DATA_ADDR) {
925 // On VM_FLAGS_RETURN_DATA_ADDR, we expect to get back the unaligned address.
926 // The misalignment is therefore the low bits
927 misalignment = addr - trunc_down_map(map, addr);
928 } else {
929 // Otherwise we expect it to be aligned
930 misalignment = 0;
931 }
932 return misalignment;
933 }
934
935 // absolute and relative offsets are used to specify a trial's values
936
937 typedef struct {
938 bool is_absolute;
939 addr_t offset;
940 } absolute_or_relative_offset_t;
941
942 typedef struct {
943 unsigned count;
944 unsigned capacity;
945 absolute_or_relative_offset_t list[];
946 } offset_list_t;
947
948 static offset_list_t *
allocate_offsets(unsigned capacity)949 allocate_offsets(unsigned capacity)
950 {
951 offset_list_t *offsets;
952 #if KERNEL
953 offsets = kalloc_type(offset_list_t, absolute_or_relative_offset_t, capacity, Z_WAITOK | Z_ZERO);
954 #else
955 offsets = calloc(sizeof(offset_list_t) + capacity * sizeof(absolute_or_relative_offset_t), 1);
956 #endif
957 offsets->count = 0;
958 offsets->capacity = capacity;
959 return offsets;
960 }
961
962 static void
append_offset(offset_list_t * offsets,bool is_absolute,addr_t offset)963 append_offset(offset_list_t *offsets, bool is_absolute, addr_t offset)
964 {
965 assert(offsets->count < offsets->capacity);
966 offsets->list[offsets->count].is_absolute = is_absolute;
967 offsets->list[offsets->count].offset = offset;
968 offsets->count++;
969 }
970
971
972 /////////////////////////////////////////////////////
973 // Generation of trials and their parameter values
974 // A "trial" is a single execution of a function to be tested
975
976 #if KERNEL
977 #define ALLOC_TRIALS(NAME, new_capacity) \
978 (NAME ## _trials_t *)kalloc_type(NAME ## _trials_t, NAME ## _trial_t, \
979 new_capacity, Z_WAITOK | Z_ZERO)
980 #define FREE_TRIALS(NAME, trials) \
981 kfree_type(NAME ## _trials_t, NAME ## _trial_t, trials->capacity, trials)
982 #else
983 #define ALLOC_TRIALS(NAME, new_capacity) \
984 (NAME ## _trials_t *)calloc(sizeof(NAME ## _trials_t) + (new_capacity) * sizeof(NAME ## _trial_t), 1)
985 #define FREE_TRIALS(NAME, trials) \
986 free(trials)
987 #endif
988
989 #define TRIALS_IMPL(NAME) \
990 static NAME ## _trials_t * \
991 __attribute__((used)) \
992 allocate_ ## NAME ## _trials(unsigned capacity) \
993 { \
994 NAME ## _trials_t *trials = ALLOC_TRIALS(NAME, capacity); \
995 assert(trials); \
996 trials->count = 0; \
997 trials->capacity = capacity; \
998 return trials; \
999 } \
1000 \
1001 static void __attribute__((overloadable, used)) \
1002 free_trials(NAME ## _trials_t *trials) \
1003 { \
1004 FREE_TRIALS(NAME, trials); \
1005 } \
1006 \
1007 static void __attribute__((overloadable, used)) \
1008 append_trial(NAME ## _trials_t *trials, NAME ## _trial_t new_trial) \
1009 { \
1010 assert(trials->count < trials->capacity); \
1011 trials->list[trials->count++] = new_trial; \
1012 } \
1013 \
1014 static void __attribute__((overloadable, used)) \
1015 append_trials(NAME ## _trials_t *trials, NAME ## _trial_t *new_trials, unsigned new_count) \
1016 { \
1017 for (unsigned i = 0; i < new_count; i++) { \
1018 append_trial(trials, new_trials[i]); \
1019 } \
1020 }
1021
1022 // allocate vm_inherit_t trials, and deallocate it at end of scope
1023 #define SMART_VM_INHERIT_TRIALS() \
1024 __attribute__((cleanup(cleanup_vm_inherit_trials))) \
1025 = allocate_vm_inherit_trials(countof(vm_inherit_trials_values)); \
1026 append_trials(trials, vm_inherit_trials_values, countof(vm_inherit_trials_values))
1027
1028 // generate vm_inherit_t trials
1029
1030 typedef struct {
1031 vm_inherit_t value;
1032 const char * name;
1033 } vm_inherit_trial_t;
1034
1035 typedef struct {
1036 unsigned count;
1037 unsigned capacity;
1038 vm_inherit_trial_t list[];
1039 } vm_inherit_trials_t;
1040
1041
1042 #define VM_INHERIT_TRIAL(new_value) \
1043 (vm_inherit_trial_t) {.value = (vm_inherit_t)(new_value), .name = "vm_inherit " #new_value}
1044
1045 static_assert(VM_INHERIT_LAST_VALID == VM_INHERIT_NONE,
1046 "Update this test with new vm_inherit_t values");
1047 static vm_inherit_trial_t vm_inherit_trials_values[] = {
1048 VM_INHERIT_TRIAL(VM_INHERIT_SHARE),
1049 VM_INHERIT_TRIAL(VM_INHERIT_COPY),
1050 VM_INHERIT_TRIAL(VM_INHERIT_NONE),
1051 // end valid ones
1052 // note: VM_INHERIT_DONATE_COPY is invalid and unimplemented
1053 // VM_INHERIT_LAST_VALID correctly excludes VM_INHERIT_DONATE_COPY
1054 VM_INHERIT_TRIAL(VM_INHERIT_LAST_VALID + 1),
1055 VM_INHERIT_TRIAL(VM_INHERIT_LAST_VALID + 2),
1056 VM_INHERIT_TRIAL(0xffffffff),
1057 };
1058
TRIALS_IMPL(vm_inherit)1059 TRIALS_IMPL(vm_inherit)
1060
1061 static void
1062 cleanup_vm_inherit_trials(vm_inherit_trials_t **trials)
1063 {
1064 free_trials(*trials);
1065 }
1066
1067 // allocate vm_behavior_t trials, and deallocate it at end of scope
1068 #define SMART_VM_BEHAVIOR_TRIALS() \
1069 __attribute__((cleanup(cleanup_vm_behavior_trials))) \
1070 = allocate_vm_behavior_trials(countof(vm_behavior_trials_values)); \
1071 append_trials(trials, vm_behavior_trials_values, countof(vm_behavior_trials_values))
1072
1073 // generate vm_behavior_t trials
1074
1075 typedef struct {
1076 vm_behavior_t value;
1077 const char * name;
1078 } vm_behavior_trial_t;
1079
1080 typedef struct {
1081 unsigned count;
1082 unsigned capacity;
1083 vm_behavior_trial_t list[];
1084 } vm_behavior_trials_t;
1085
1086
1087 #define VM_BEHAVIOR_TRIAL(new_value) \
1088 (vm_behavior_trial_t) {.value = (vm_behavior_t)(new_value), .name = "vm_behavior " #new_value}
1089
1090 static vm_behavior_trial_t vm_behavior_trials_values[] = {
1091 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_DEFAULT),
1092 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_RANDOM),
1093 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_SEQUENTIAL),
1094 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_RSEQNTL),
1095 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_WILLNEED),
1096 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_DONTNEED),
1097 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_FREE),
1098 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_ZERO_WIRED_PAGES),
1099 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_REUSABLE),
1100 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_REUSE),
1101 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_CAN_REUSE),
1102 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_PAGEOUT),
1103 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_ZERO),
1104 // end valid ones
1105 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_LAST_VALID + 1),
1106 VM_BEHAVIOR_TRIAL(VM_BEHAVIOR_LAST_VALID + 2),
1107 VM_BEHAVIOR_TRIAL(0x12345),
1108 VM_BEHAVIOR_TRIAL(0xffffffff),
1109 };
1110
TRIALS_IMPL(vm_behavior)1111 TRIALS_IMPL(vm_behavior)
1112
1113 static void
1114 cleanup_vm_behavior_trials(vm_behavior_trials_t **trials)
1115 {
1116 free_trials(*trials);
1117 }
1118
1119 // allocate vm_sync_t trials, and deallocate it at end of scope
1120 #define SMART_VM_SYNC_TRIALS() \
1121 __attribute__((cleanup(cleanup_vm_sync_trials))) \
1122 = allocate_vm_sync_trials(countof(vm_sync_trials_values)); \
1123 append_trials(trials, vm_sync_trials_values, countof(vm_sync_trials_values))
1124
1125 // generate vm_sync_t trials
1126
1127 typedef struct {
1128 vm_sync_t value;
1129 const char * name;
1130 } vm_sync_trial_t;
1131
1132 typedef struct {
1133 unsigned count;
1134 unsigned capacity;
1135 vm_sync_trial_t list[];
1136 } vm_sync_trials_t;
1137
1138
1139 #define VM_SYNC_TRIAL(new_value) \
1140 (vm_sync_trial_t) {.value = (vm_sync_t)(new_value), .name = "vm_sync_t " #new_value}
1141
1142 static vm_sync_trial_t vm_sync_trials_values[] = {
1143 VM_SYNC_TRIAL(0),
1144 // start valid values
1145 VM_SYNC_TRIAL(VM_SYNC_ASYNCHRONOUS),
1146 VM_SYNC_TRIAL(VM_SYNC_SYNCHRONOUS),
1147 VM_SYNC_TRIAL(VM_SYNC_INVALIDATE),
1148 VM_SYNC_TRIAL(VM_SYNC_KILLPAGES),
1149 VM_SYNC_TRIAL(VM_SYNC_DEACTIVATE),
1150 VM_SYNC_TRIAL(VM_SYNC_CONTIGUOUS),
1151 VM_SYNC_TRIAL(VM_SYNC_REUSABLEPAGES),
1152 // end valid values
1153 VM_SYNC_TRIAL(1u << 7),
1154 VM_SYNC_TRIAL(1u << 8),
1155 VM_SYNC_TRIAL(1u << 9),
1156 VM_SYNC_TRIAL(1u << 10),
1157 VM_SYNC_TRIAL(1u << 11),
1158 VM_SYNC_TRIAL(1u << 12),
1159 VM_SYNC_TRIAL(1u << 13),
1160 VM_SYNC_TRIAL(1u << 14),
1161 VM_SYNC_TRIAL(1u << 15),
1162 VM_SYNC_TRIAL(1u << 16),
1163 VM_SYNC_TRIAL(1u << 17),
1164 VM_SYNC_TRIAL(1u << 18),
1165 VM_SYNC_TRIAL(1u << 19),
1166 VM_SYNC_TRIAL(1u << 20),
1167 VM_SYNC_TRIAL(1u << 21),
1168 VM_SYNC_TRIAL(1u << 22),
1169 VM_SYNC_TRIAL(1u << 23),
1170 VM_SYNC_TRIAL(1u << 24),
1171 VM_SYNC_TRIAL(1u << 25),
1172 VM_SYNC_TRIAL(1u << 26),
1173 VM_SYNC_TRIAL(1u << 27),
1174 VM_SYNC_TRIAL(1u << 28),
1175 VM_SYNC_TRIAL(1u << 29),
1176 VM_SYNC_TRIAL(1u << 30),
1177 VM_SYNC_TRIAL(1u << 31),
1178 VM_SYNC_TRIAL(VM_SYNC_ASYNCHRONOUS | VM_SYNC_SYNCHRONOUS),
1179 VM_SYNC_TRIAL(VM_SYNC_ASYNCHRONOUS | (1u << 7)),
1180 VM_SYNC_TRIAL(0xffffffff),
1181 };
1182
TRIALS_IMPL(vm_sync)1183 TRIALS_IMPL(vm_sync)
1184
1185 static void
1186 cleanup_vm_sync_trials(vm_sync_trials_t **trials)
1187 {
1188 free_trials(*trials);
1189 }
1190
1191 // allocate vm_msync_t trials, and deallocate it at end of scope
1192 #define SMART_VM_MSYNC_TRIALS() \
1193 __attribute__((cleanup(cleanup_vm_msync_trials))) \
1194 = allocate_vm_msync_trials(countof(vm_msync_trials_values)); \
1195 append_trials(trials, vm_msync_trials_values, countof(vm_msync_trials_values))
1196
1197 // generate vm_msync_t trials
1198
1199 typedef struct {
1200 int value;
1201 const char * name;
1202 } vm_msync_trial_t;
1203
1204 typedef struct {
1205 unsigned count;
1206 unsigned capacity;
1207 vm_msync_trial_t list[];
1208 } vm_msync_trials_t;
1209
1210
1211 #define VM_MSYNC_TRIAL(new_value) \
1212 (vm_msync_trial_t) {.value = (int)(new_value), .name = "vm_msync_t " #new_value}
1213
1214 static vm_msync_trial_t vm_msync_trials_values[] = {
1215 VM_MSYNC_TRIAL(0),
1216 // start valid values
1217 VM_MSYNC_TRIAL(MS_ASYNC),
1218 VM_MSYNC_TRIAL(MS_INVALIDATE),
1219 VM_MSYNC_TRIAL(MS_KILLPAGES),
1220 VM_MSYNC_TRIAL(MS_DEACTIVATE),
1221 VM_MSYNC_TRIAL(MS_SYNC),
1222 VM_MSYNC_TRIAL(MS_ASYNC | MS_INVALIDATE),
1223 // end valid values
1224 VM_MSYNC_TRIAL(1u << 5),
1225 VM_MSYNC_TRIAL(1u << 6),
1226 VM_MSYNC_TRIAL(1u << 7),
1227 VM_MSYNC_TRIAL(1u << 8),
1228 VM_MSYNC_TRIAL(1u << 9),
1229 VM_MSYNC_TRIAL(1u << 10),
1230 VM_MSYNC_TRIAL(1u << 11),
1231 VM_MSYNC_TRIAL(1u << 12),
1232 VM_MSYNC_TRIAL(1u << 13),
1233 VM_MSYNC_TRIAL(1u << 14),
1234 VM_MSYNC_TRIAL(1u << 15),
1235 VM_MSYNC_TRIAL(1u << 16),
1236 VM_MSYNC_TRIAL(1u << 17),
1237 VM_MSYNC_TRIAL(1u << 18),
1238 VM_MSYNC_TRIAL(1u << 19),
1239 VM_MSYNC_TRIAL(1u << 20),
1240 VM_MSYNC_TRIAL(1u << 21),
1241 VM_MSYNC_TRIAL(1u << 22),
1242 VM_MSYNC_TRIAL(1u << 23),
1243 VM_MSYNC_TRIAL(1u << 24),
1244 VM_MSYNC_TRIAL(1u << 25),
1245 VM_MSYNC_TRIAL(1u << 26),
1246 VM_MSYNC_TRIAL(1u << 27),
1247 VM_MSYNC_TRIAL(1u << 28),
1248 VM_MSYNC_TRIAL(1u << 29),
1249 VM_MSYNC_TRIAL(1u << 30),
1250 VM_MSYNC_TRIAL(1u << 31),
1251 VM_MSYNC_TRIAL(MS_ASYNC | MS_SYNC),
1252 VM_MSYNC_TRIAL(0xffffffff),
1253 };
1254
TRIALS_IMPL(vm_msync)1255 TRIALS_IMPL(vm_msync)
1256
1257 static void __attribute__((used))
1258 cleanup_vm_msync_trials(vm_msync_trials_t **trials)
1259 {
1260 free_trials(*trials);
1261 }
1262
1263
1264 // allocate advise_t trials, and deallocate it at end of scope
1265 #define SMART_VM_ADVISE_TRIALS() \
1266 __attribute__((cleanup(cleanup_advise_trials))) \
1267 = allocate_vm_advise_trials(countof(vm_advise_trials_values)); \
1268 append_trials(trials, vm_advise_trials_values, countof(vm_advise_trials_values))
1269
1270 // generate advise_t trials
1271
1272 typedef struct {
1273 int value;
1274 const char * name;
1275 } vm_advise_trial_t;
1276
1277 typedef struct {
1278 unsigned count;
1279 unsigned capacity;
1280 vm_advise_trial_t list[];
1281 } vm_advise_trials_t;
1282
1283
1284 #define ADVISE_TRIAL(new_value) \
1285 (vm_advise_trial_t) {.value = (int)(new_value), .name = "advise " #new_value}
1286
1287 static vm_advise_trial_t vm_advise_trials_values[] = {
1288 ADVISE_TRIAL(MADV_NORMAL),
1289 ADVISE_TRIAL(MADV_RANDOM),
1290 ADVISE_TRIAL(MADV_SEQUENTIAL),
1291 ADVISE_TRIAL(MADV_WILLNEED),
1292 ADVISE_TRIAL(MADV_DONTNEED),
1293 ADVISE_TRIAL(MADV_FREE),
1294 ADVISE_TRIAL(MADV_ZERO_WIRED_PAGES),
1295 ADVISE_TRIAL(MADV_FREE_REUSABLE),
1296 ADVISE_TRIAL(MADV_FREE_REUSE),
1297 ADVISE_TRIAL(MADV_CAN_REUSE),
1298 ADVISE_TRIAL(MADV_PAGEOUT),
1299 ADVISE_TRIAL(MADV_ZERO),
1300 // end valid ones
1301 ADVISE_TRIAL(MADV_ZERO + 1),
1302 ADVISE_TRIAL(MADV_ZERO + 2),
1303 ADVISE_TRIAL(0xffffffff),
1304 };
1305
TRIALS_IMPL(vm_advise)1306 TRIALS_IMPL(vm_advise)
1307
1308 static void __attribute__((used))
1309 cleanup_advise_trials(vm_advise_trials_t **trials)
1310 {
1311 free_trials(*trials);
1312 }
1313
1314 // allocate machine_attribute_t trials, and deallocate it at end of scope
1315 #define SMART_VM_MACHINE_ATTRIBUTE_TRIALS() \
1316 __attribute__((cleanup(cleanup_vm_machine_attribute_trials))) \
1317 = allocate_vm_machine_attribute_trials(countof(vm_machine_attribute_trials_values)); \
1318 append_trials(trials, vm_machine_attribute_trials_values, countof(vm_machine_attribute_trials_values))
1319
1320 // generate advise_t trials
1321
1322 typedef struct {
1323 vm_machine_attribute_t value;
1324 const char * name;
1325 } vm_machine_attribute_trial_t;
1326
1327 typedef struct {
1328 unsigned count;
1329 unsigned capacity;
1330 vm_machine_attribute_trial_t list[];
1331 } vm_machine_attribute_trials_t;
1332
1333
1334 #define VM_MACHINE_ATTRIBUTE_TRIAL(new_value) \
1335 (vm_machine_attribute_trial_t) {.value = (vm_machine_attribute_t)(new_value), .name = "vm_machine_attribute_t " #new_value}
1336
1337 static vm_machine_attribute_trial_t vm_machine_attribute_trials_values[] = {
1338 VM_MACHINE_ATTRIBUTE_TRIAL(0),
1339 // start valid ones
1340 VM_MACHINE_ATTRIBUTE_TRIAL(MATTR_CACHE),
1341 VM_MACHINE_ATTRIBUTE_TRIAL(MATTR_MIGRATE),
1342 VM_MACHINE_ATTRIBUTE_TRIAL(MATTR_REPLICATE),
1343 // end valid ones
1344 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 3),
1345 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 4),
1346 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 5),
1347 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 6),
1348 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 7),
1349 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 8),
1350 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 9),
1351 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 10),
1352 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 11),
1353 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 12),
1354 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 13),
1355 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 14),
1356 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 15),
1357 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 16),
1358 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 17),
1359 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 18),
1360 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 19),
1361 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 20),
1362 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 21),
1363 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 22),
1364 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 23),
1365 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 24),
1366 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 25),
1367 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 26),
1368 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 27),
1369 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 28),
1370 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 29),
1371 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 30),
1372 VM_MACHINE_ATTRIBUTE_TRIAL(1u << 31),
1373 };
1374
TRIALS_IMPL(vm_machine_attribute)1375 TRIALS_IMPL(vm_machine_attribute)
1376
1377 static void
1378 cleanup_vm_machine_attribute_trials(vm_machine_attribute_trials_t **trials)
1379 {
1380 free_trials(*trials);
1381 }
1382
1383 // allocate vm_map_kernel_flags trials, and deallocate it at end of scope
1384 #define SMART_VM_MAP_KERNEL_FLAGS_TRIALS() \
1385 __attribute__((cleanup(cleanup_vm_map_kernel_flags_trials))) \
1386 = generate_vm_map_kernel_flags_trials()
1387
1388
1389 // generate vm_map_kernel_flags_t trials
1390
1391 typedef struct {
1392 int flags;
1393 char * name;
1394 } vm_map_kernel_flags_trial_t;
1395
1396 typedef struct {
1397 unsigned count;
1398 unsigned capacity;
1399 vm_map_kernel_flags_trial_t list[];
1400 } vm_map_kernel_flags_trials_t;
1401
1402 #define VM_MAP_KERNEL_FLAGS_TRIAL(new_flags) \
1403 (vm_map_kernel_flags_trial_t) {.flags = (int)(new_flags), .name ="vm_map_kernel_flags " #new_flags}
1404
TRIALS_IMPL(vm_map_kernel_flags)1405 TRIALS_IMPL(vm_map_kernel_flags)
1406
1407 static vm_map_kernel_flags_trials_t *
1408 generate_prefixed_vm_map_kernel_flags_trials(int prefix_flags, const char *prefix_name)
1409 {
1410 vm_map_kernel_flags_trials_t *trials;
1411 trials = allocate_vm_map_kernel_flags_trials(32);
1412
1413 char *str;
1414 #define APPEND(flag) \
1415 ({ \
1416 kasprintf(&str, "vm_map_kernel_flags %s%s%s", \
1417 prefix_name, prefix_flags == 0 ? "" : " | ", #flag); \
1418 append_trial(trials, (vm_map_kernel_flags_trial_t){ prefix_flags | (int)flag, str }); \
1419 })
1420
1421 // First trial is just the prefix flags set, if any.
1422 // (either ANYWHERE or FIXED | OVERWRITE)
1423 if (prefix_flags != 0) {
1424 kasprintf(&str, "vm_map_kernel_flags %s", prefix_name);
1425 append_trial(trials, (vm_map_kernel_flags_trial_t){ prefix_flags, str });
1426 }
1427
1428 // Try each other flag with the prefix flags.
1429 // Skip FIXED and ANYWHERE and OVERWRITE because they cause
1430 // memory management changes that the caller may not be prepared for.
1431 // skip 0x00000000 VM_FLAGS_FIXED
1432 // skip 0x00000001 VM_FLAGS_ANYWHERE
1433 APPEND(VM_FLAGS_PURGABLE);
1434 APPEND(VM_FLAGS_4GB_CHUNK);
1435 APPEND(VM_FLAGS_RANDOM_ADDR);
1436 APPEND(VM_FLAGS_NO_CACHE);
1437 APPEND(VM_FLAGS_RESILIENT_CODESIGN);
1438 APPEND(VM_FLAGS_RESILIENT_MEDIA);
1439 APPEND(VM_FLAGS_PERMANENT);
1440 // skip 0x00001000 VM_FLAGS_TPRO; it only works on some hardware.
1441 APPEND(0x00002000);
1442 // skip 0x00004000 VM_FLAGS_OVERWRITE
1443 APPEND(0x00008000);
1444 APPEND(VM_FLAGS_SUPERPAGE_MASK); // 0x10000, 0x20000, 0x40000
1445 APPEND(0x00080000);
1446 APPEND(VM_FLAGS_RETURN_DATA_ADDR);
1447 APPEND(VM_FLAGS_RETURN_4K_DATA_ADDR);
1448 APPEND(VM_FLAGS_ALIAS_MASK);
1449
1450 return trials;
1451 }
1452
1453 static vm_map_kernel_flags_trials_t *
generate_vm_map_kernel_flags_trials()1454 generate_vm_map_kernel_flags_trials()
1455 {
1456 vm_map_kernel_flags_trials_t *fixed =
1457 generate_prefixed_vm_map_kernel_flags_trials(
1458 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, "VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE");
1459 vm_map_kernel_flags_trials_t *anywhere =
1460 generate_prefixed_vm_map_kernel_flags_trials(
1461 VM_FLAGS_ANYWHERE, "VM_FLAGS_ANYWHERE");
1462 vm_map_kernel_flags_trials_t *trials =
1463 allocate_vm_map_kernel_flags_trials(fixed->count + anywhere->count);
1464 append_trials(trials, fixed->list, fixed->count);
1465 append_trials(trials, anywhere->list, anywhere->count);
1466
1467 // free not cleanup, trials has stolen their strings
1468 free_trials(fixed);
1469 free_trials(anywhere);
1470
1471 return trials;
1472 }
1473
1474 static void
cleanup_vm_map_kernel_flags_trials(vm_map_kernel_flags_trials_t ** trials)1475 cleanup_vm_map_kernel_flags_trials(vm_map_kernel_flags_trials_t **trials)
1476 {
1477 for (size_t i = 0; i < (*trials)->count; i++) {
1478 kfree_str((*trials)->list[i].name);
1479 }
1480 free_trials(*trials);
1481 }
1482
1483
1484 // generate mmap flags trials
1485
1486 typedef struct {
1487 int flags;
1488 const char *name;
1489 } mmap_flags_trial_t;
1490
1491 typedef struct {
1492 unsigned count;
1493 unsigned capacity;
1494 mmap_flags_trial_t list[];
1495 } mmap_flags_trials_t;
1496
1497 #define MMAP_FLAGS_TRIAL(new_flags) \
1498 (mmap_flags_trial_t){ .flags = (int)(new_flags), .name = "mmap flags "#new_flags }
1499
1500 static mmap_flags_trial_t mmap_flags_trials_values[] = {
1501 MMAP_FLAGS_TRIAL(MAP_FILE),
1502 MMAP_FLAGS_TRIAL(MAP_ANON),
1503 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_SHARED),
1504 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE),
1505 MMAP_FLAGS_TRIAL(MAP_ANON | MAP_SHARED),
1506 MMAP_FLAGS_TRIAL(MAP_ANON | MAP_PRIVATE),
1507 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_SHARED | MAP_PRIVATE),
1508 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_FIXED),
1509 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_RENAME),
1510 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_NORESERVE),
1511 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_RESERVED0080),
1512 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_NOEXTEND),
1513 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_HASSEMAPHORE),
1514 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_NOCACHE),
1515 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_JIT),
1516 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_CODESIGN),
1517 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_RESILIENT_MEDIA),
1518 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_TRANSLATED_ALLOW_EXECUTE),
1519 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | MAP_UNIX03),
1520 // skip MAP_TPRO; it only works on some hardware
1521 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 3),
1522 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 4),
1523 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 5),
1524 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 6),
1525 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 7),
1526 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 8),
1527 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 9),
1528 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 10),
1529 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 11),
1530 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 12),
1531 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 13),
1532 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 14),
1533 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 15),
1534 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 16),
1535 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 17),
1536 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 18),
1537 // skip MAP_TPRO (1<<19); it only works on some hardware
1538 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 20),
1539 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 21),
1540 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 22),
1541 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 23),
1542 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 24),
1543 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 25),
1544 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 26),
1545 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 27),
1546 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 28),
1547 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 29),
1548 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 30),
1549 MMAP_FLAGS_TRIAL(MAP_FILE | MAP_PRIVATE | 1u << 31),
1550 };
1551
TRIALS_IMPL(mmap_flags)1552 TRIALS_IMPL(mmap_flags)
1553
1554 static void
1555 cleanup_mmap_flags_trials(mmap_flags_trials_t **trials)
1556 {
1557 free_trials(*trials);
1558 }
1559
1560 // allocate mmap_flag trials, and deallocate it at end of scope
1561 #define SMART_MMAP_FLAGS_TRIALS() \
1562 __attribute__((cleanup(cleanup_mmap_flags_trials))) \
1563 = allocate_mmap_flags_trials(countof(mmap_flags_trials_values)); \
1564 append_trials(trials, mmap_flags_trials_values, countof(mmap_flags_trials_values))
1565
1566 // generate generic flag trials
1567
1568 typedef struct {
1569 int flag;
1570 const char *name;
1571 } generic_flag_trial_t;
1572
1573 typedef struct {
1574 unsigned count;
1575 unsigned capacity;
1576 generic_flag_trial_t list[];
1577 } generic_flag_trials_t;
1578
1579 #define GENERIC_FLAG_TRIAL(new_flag) \
1580 (generic_flag_trial_t){ .flag = (int)(new_flag), .name = "generic flag "#new_flag }
1581
1582 static generic_flag_trial_t generic_flag_trials_values[] = {
1583 GENERIC_FLAG_TRIAL(0),
1584 GENERIC_FLAG_TRIAL(1),
1585 GENERIC_FLAG_TRIAL(2),
1586 GENERIC_FLAG_TRIAL(3),
1587 GENERIC_FLAG_TRIAL(4),
1588 GENERIC_FLAG_TRIAL(5),
1589 GENERIC_FLAG_TRIAL(6),
1590 GENERIC_FLAG_TRIAL(7),
1591 GENERIC_FLAG_TRIAL(1u << 3),
1592 GENERIC_FLAG_TRIAL(1u << 4),
1593 GENERIC_FLAG_TRIAL(1u << 5),
1594 GENERIC_FLAG_TRIAL(1u << 6),
1595 GENERIC_FLAG_TRIAL(1u << 7),
1596 GENERIC_FLAG_TRIAL(1u << 8),
1597 GENERIC_FLAG_TRIAL(1u << 9),
1598 GENERIC_FLAG_TRIAL(1u << 10),
1599 GENERIC_FLAG_TRIAL(1u << 11),
1600 GENERIC_FLAG_TRIAL(1u << 12),
1601 GENERIC_FLAG_TRIAL(1u << 13),
1602 GENERIC_FLAG_TRIAL(1u << 14),
1603 GENERIC_FLAG_TRIAL(1u << 15),
1604 GENERIC_FLAG_TRIAL(1u << 16),
1605 GENERIC_FLAG_TRIAL(1u << 17),
1606 GENERIC_FLAG_TRIAL(1u << 18),
1607 GENERIC_FLAG_TRIAL(1u << 19),
1608 GENERIC_FLAG_TRIAL(1u << 20),
1609 GENERIC_FLAG_TRIAL(1u << 21),
1610 GENERIC_FLAG_TRIAL(1u << 22),
1611 GENERIC_FLAG_TRIAL(1u << 23),
1612 GENERIC_FLAG_TRIAL(1u << 24),
1613 GENERIC_FLAG_TRIAL(1u << 25),
1614 GENERIC_FLAG_TRIAL(1u << 26),
1615 GENERIC_FLAG_TRIAL(1u << 27),
1616 GENERIC_FLAG_TRIAL(1u << 28),
1617 GENERIC_FLAG_TRIAL(1u << 29),
1618 GENERIC_FLAG_TRIAL(1u << 30),
1619 GENERIC_FLAG_TRIAL(1u << 31),
1620 };
1621
TRIALS_IMPL(generic_flag)1622 TRIALS_IMPL(generic_flag)
1623
1624 static void
1625 cleanup_generic_flag_trials(generic_flag_trials_t **trials)
1626 {
1627 free_trials(*trials);
1628 }
1629
1630 // allocate mmap_flag trials, and deallocate it at end of scope
1631 #define SMART_GENERIC_FLAG_TRIALS() \
1632 __attribute__((cleanup(cleanup_generic_flag_trials))) \
1633 = allocate_generic_flag_trials(countof(generic_flag_trials_values)); \
1634 append_trials(trials, generic_flag_trials_values, countof(generic_flag_trials_values))
1635
1636
1637 // generate vm_prot_t trials
1638
1639 #ifndef KERNEL
1640 typedef int vm_tag_t;
1641 #endif /* KERNEL */
1642
1643 typedef struct {
1644 vm_tag_t tag;
1645 const char *name;
1646 } vm_tag_trial_t;
1647
1648 typedef struct {
1649 unsigned count;
1650 unsigned capacity;
1651 vm_tag_trial_t list[];
1652 } vm_tag_trials_t;
1653
1654 #if KERNEL
1655 #define KERNEL_VM_TAG_TRIAL(new_tag) \
1656 (vm_tag_trial_t){ .tag = (vm_tag_t)(new_tag), .name = "vm_tag "#new_tag }
1657
1658 #define VM_TAG_TRIAL KERNEL_VM_TAG_TRIAL
1659 #else
1660 #define USER_VM_TAG_TRIAL(new_tag) \
1661 (vm_tag_trial_t){ .tag = (vm_tag_t)0, .name = "vm_tag "#new_tag }
1662
1663 #define VM_TAG_TRIAL USER_VM_TAG_TRIAL
1664 #endif
1665
1666 static vm_tag_trial_t vm_tag_trials_values[] = {
1667 VM_TAG_TRIAL(VM_KERN_MEMORY_NONE),
1668 VM_TAG_TRIAL(VM_KERN_MEMORY_OSFMK),
1669 VM_TAG_TRIAL(VM_KERN_MEMORY_BSD),
1670 VM_TAG_TRIAL(VM_KERN_MEMORY_IOKIT),
1671 VM_TAG_TRIAL(VM_KERN_MEMORY_LIBKERN),
1672 VM_TAG_TRIAL(VM_KERN_MEMORY_OSKEXT),
1673 VM_TAG_TRIAL(VM_KERN_MEMORY_KEXT),
1674 VM_TAG_TRIAL(VM_KERN_MEMORY_IPC),
1675 VM_TAG_TRIAL(VM_KERN_MEMORY_STACK),
1676 VM_TAG_TRIAL(VM_KERN_MEMORY_CPU),
1677 VM_TAG_TRIAL(VM_KERN_MEMORY_PMAP),
1678 VM_TAG_TRIAL(VM_KERN_MEMORY_PTE),
1679 VM_TAG_TRIAL(VM_KERN_MEMORY_ZONE),
1680 VM_TAG_TRIAL(VM_KERN_MEMORY_KALLOC),
1681 VM_TAG_TRIAL(VM_KERN_MEMORY_COMPRESSOR),
1682 VM_TAG_TRIAL(VM_KERN_MEMORY_COMPRESSED_DATA),
1683 VM_TAG_TRIAL(VM_KERN_MEMORY_PHANTOM_CACHE),
1684 VM_TAG_TRIAL(VM_KERN_MEMORY_WAITQ),
1685 VM_TAG_TRIAL(VM_KERN_MEMORY_DIAG),
1686 VM_TAG_TRIAL(VM_KERN_MEMORY_LOG),
1687 VM_TAG_TRIAL(VM_KERN_MEMORY_FILE),
1688 VM_TAG_TRIAL(VM_KERN_MEMORY_MBUF),
1689 VM_TAG_TRIAL(VM_KERN_MEMORY_UBC),
1690 VM_TAG_TRIAL(VM_KERN_MEMORY_SECURITY),
1691 VM_TAG_TRIAL(VM_KERN_MEMORY_MLOCK),
1692 VM_TAG_TRIAL(VM_KERN_MEMORY_REASON),
1693 VM_TAG_TRIAL(VM_KERN_MEMORY_SKYWALK),
1694 VM_TAG_TRIAL(VM_KERN_MEMORY_LTABLE),
1695 VM_TAG_TRIAL(VM_KERN_MEMORY_HV),
1696 VM_TAG_TRIAL(VM_KERN_MEMORY_KALLOC_DATA),
1697 VM_TAG_TRIAL(VM_KERN_MEMORY_RETIRED),
1698 VM_TAG_TRIAL(VM_KERN_MEMORY_KALLOC_TYPE),
1699 VM_TAG_TRIAL(VM_KERN_MEMORY_TRIAGE),
1700 VM_TAG_TRIAL(VM_KERN_MEMORY_RECOUNT),
1701 };
1702
TRIALS_IMPL(vm_tag)1703 TRIALS_IMPL(vm_tag)
1704
1705 static void
1706 cleanup_vm_tag_trials(vm_tag_trials_t **trials)
1707 {
1708 free_trials(*trials);
1709 }
1710
1711 #define SMART_VM_TAG_TRIALS() \
1712 __attribute__((cleanup(cleanup_vm_tag_trials))) \
1713 = allocate_vm_tag_trials(countof(vm_tag_trials_values)); \
1714 append_trials(trials, vm_tag_trials_values, countof(vm_tag_trials_values))
1715
1716 //END vm_tag_t
1717
1718 // generate vm_prot_t trials
1719
1720 typedef struct {
1721 vm_prot_t prot;
1722 const char *name;
1723 } vm_prot_trial_t;
1724
1725 typedef struct {
1726 unsigned count;
1727 unsigned capacity;
1728 vm_prot_trial_t list[];
1729 } vm_prot_trials_t;
1730
1731 #define VM_PROT_TRIAL(new_prot) \
1732 (vm_prot_trial_t){ .prot = (vm_prot_t)(new_prot), .name = "vm_prot "#new_prot }
1733
1734 static vm_prot_trial_t vm_prot_trials_values[] = {
1735 // none
1736 VM_PROT_TRIAL(VM_PROT_NONE),
1737 // ordinary r-- / rw- / r-x
1738 VM_PROT_TRIAL(VM_PROT_READ),
1739 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE),
1740 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE),
1741 // rwx (w+x often disallowed)
1742 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE),
1743 // VM_PROT_READ | VM_PROT_x for each other VM_PROT_x bit
1744 // plus write and execute for some interesting cases
1745 VM_PROT_TRIAL(VM_PROT_READ | 1u << 3),
1746 VM_PROT_TRIAL(VM_PROT_READ | 1u << 4),
1747 VM_PROT_TRIAL(VM_PROT_READ | 1u << 5),
1748 VM_PROT_TRIAL(VM_PROT_READ | 1u << 6),
1749 VM_PROT_TRIAL(VM_PROT_READ | 1u << 7),
1750 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 7),
1751 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 7),
1752 VM_PROT_TRIAL(VM_PROT_READ | 1u << 8),
1753 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 8),
1754 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 8),
1755 VM_PROT_TRIAL(VM_PROT_READ | 1u << 9),
1756 VM_PROT_TRIAL(VM_PROT_READ | 1u << 10),
1757 VM_PROT_TRIAL(VM_PROT_READ | 1u << 11),
1758 VM_PROT_TRIAL(VM_PROT_READ | 1u << 12),
1759 VM_PROT_TRIAL(VM_PROT_READ | 1u << 13),
1760 VM_PROT_TRIAL(VM_PROT_READ | 1u << 14),
1761 VM_PROT_TRIAL(VM_PROT_READ | 1u << 15),
1762 VM_PROT_TRIAL(VM_PROT_READ | 1u << 16),
1763 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 16),
1764 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 16),
1765 VM_PROT_TRIAL(VM_PROT_READ | 1u << 17),
1766 VM_PROT_TRIAL(VM_PROT_READ | 1u << 18),
1767 VM_PROT_TRIAL(VM_PROT_READ | 1u << 19),
1768 VM_PROT_TRIAL(VM_PROT_READ | 1u << 20),
1769 VM_PROT_TRIAL(VM_PROT_READ | 1u << 21),
1770 VM_PROT_TRIAL(VM_PROT_READ | 1u << 22),
1771 VM_PROT_TRIAL(VM_PROT_READ | 1u << 23),
1772 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 23),
1773 VM_PROT_TRIAL(VM_PROT_READ | 1u << 24),
1774 VM_PROT_TRIAL(VM_PROT_READ | 1u << 25),
1775 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 25),
1776 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 25),
1777 VM_PROT_TRIAL(VM_PROT_READ | 1u << 26),
1778 VM_PROT_TRIAL(VM_PROT_READ | 1u << 27),
1779 VM_PROT_TRIAL(VM_PROT_READ | 1u << 28),
1780 VM_PROT_TRIAL(VM_PROT_READ | 1u << 29),
1781 VM_PROT_TRIAL(VM_PROT_READ | 1u << 30),
1782 VM_PROT_TRIAL(VM_PROT_READ | 1u << 31),
1783 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | 1u << 31),
1784 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_EXECUTE | 1u << 31),
1785
1786 // error case coverage in specific subfunctions
1787 VM_PROT_TRIAL(VM_PROT_READ | MAP_MEM_ONLY | MAP_MEM_USE_DATA_ADDR),
1788 VM_PROT_TRIAL(VM_PROT_READ | MAP_MEM_ONLY | MAP_MEM_4K_DATA_ADDR),
1789 VM_PROT_TRIAL(VM_PROT_READ | MAP_MEM_NAMED_CREATE | MAP_MEM_USE_DATA_ADDR),
1790 VM_PROT_TRIAL(VM_PROT_READ | MAP_MEM_NAMED_CREATE | MAP_MEM_4K_DATA_ADDR),
1791 VM_PROT_TRIAL(VM_PROT_READ | MAP_MEM_NAMED_CREATE | MAP_MEM_PURGABLE),
1792 VM_PROT_TRIAL(VM_PROT_NONE | MAP_MEM_VM_SHARE | VM_PROT_IS_MASK),
1793
1794 // interesting non-error cases for additional test coverage
1795 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | MAP_MEM_NAMED_CREATE | MAP_MEM_PURGABLE),
1796 VM_PROT_TRIAL(VM_PROT_READ | VM_PROT_WRITE | MAP_MEM_NAMED_CREATE |
1797 MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY),
1798 };
1799
TRIALS_IMPL(vm_prot)1800 TRIALS_IMPL(vm_prot)
1801
1802 static void
1803 cleanup_vm_prot_trials(vm_prot_trials_t **trials)
1804 {
1805 free_trials(*trials);
1806 }
1807
1808 // allocate vm_prot trials, and deallocate it at end of scope
1809 #define SMART_VM_PROT_TRIALS() \
1810 __attribute__((cleanup(cleanup_vm_prot_trials))) \
1811 = allocate_vm_prot_trials(countof(vm_prot_trials_values)); \
1812 append_trials(trials, vm_prot_trials_values, countof(vm_prot_trials_values))
1813
1814 // Trials for pairs of vm_prot_t
1815
1816 typedef struct {
1817 vm_prot_t cur;
1818 vm_prot_t max;
1819 char * name;
1820 } vm_prot_pair_trial_t;
1821
1822 typedef struct {
1823 unsigned count;
1824 unsigned capacity;
1825 vm_prot_pair_trial_t list[];
1826 } vm_prot_pair_trials_t;
1827
TRIALS_IMPL(vm_prot_pair)1828 TRIALS_IMPL(vm_prot_pair)
1829
1830 #define VM_PROT_PAIR_TRIAL(new_cur, new_max, new_name) \
1831 (vm_prot_pair_trial_t){ .cur = (vm_prot_t)(new_cur), \
1832 .max = (vm_prot_t)(new_max), \
1833 .name = new_name,}
1834
1835 vm_prot_pair_trials_t *
1836 generate_vm_prot_pair_trials()
1837 {
1838 const unsigned D = countof(vm_prot_trials_values);
1839 unsigned num_trials = D * D;
1840
1841 vm_prot_pair_trials_t * trials = allocate_vm_prot_pair_trials(num_trials);
1842 for (size_t i = 0; i < D; i++) {
1843 for (size_t j = 0; j < D; j++) {
1844 vm_prot_t cur = vm_prot_trials_values[i].prot;
1845 vm_prot_t max = vm_prot_trials_values[j].prot;
1846 char *str;
1847 kasprintf(&str, "cur: 0x%x, max: 0x%x", cur, max);
1848 append_trial(trials, VM_PROT_PAIR_TRIAL(cur, max, str));
1849 }
1850 }
1851 return trials;
1852 }
1853
1854 #define SMART_VM_PROT_PAIR_TRIALS() \
1855 __attribute__((cleanup(cleanup_vm_prot_pair_trials))) \
1856 = generate_vm_prot_pair_trials();
1857
1858 static void
cleanup_vm_prot_pair_trials(vm_prot_pair_trials_t ** trials)1859 cleanup_vm_prot_pair_trials(vm_prot_pair_trials_t **trials)
1860 {
1861 for (size_t i = 0; i < (*trials)->count; i++) {
1862 kfree_str((*trials)->list[i].name);
1863 }
1864 free_trials(*trials);
1865 }
1866
1867
1868 // vm_purgeable_t trial contents.
1869 typedef struct {
1870 vm_purgable_t value;
1871 char * name;
1872 } vm_purgeable_trial_t;
1873
1874 #define VM_PURGEABLE_TRIAL(new_value) \
1875 (vm_purgeable_trial_t) {.value = (vm_purgable_t)(new_value), .name = "vm_purgeable_t " #new_value}
1876
1877 static vm_purgeable_trial_t vm_purgeable_trials_values[] = {
1878 VM_PURGEABLE_TRIAL(VM_PURGABLE_SET_STATE),
1879 VM_PURGEABLE_TRIAL(VM_PURGABLE_GET_STATE),
1880 VM_PURGEABLE_TRIAL(VM_PURGABLE_PURGE_ALL),
1881 VM_PURGEABLE_TRIAL(VM_PURGABLE_SET_STATE_FROM_KERNEL),
1882 // end valid values
1883 VM_PURGEABLE_TRIAL(VM_PURGABLE_SET_STATE_FROM_KERNEL + 1),
1884 VM_PURGEABLE_TRIAL(VM_PURGABLE_SET_STATE_FROM_KERNEL + 2),
1885 VM_PURGEABLE_TRIAL(0x12345),
1886 VM_PURGEABLE_TRIAL(0xffffffff),
1887 };
1888
1889 typedef struct {
1890 int value;
1891 char * name;
1892 } vm_purgeable_state_trial_t;
1893
1894 #define VM_PURGEABLE_STATE_TRIAL(new_value) \
1895 (vm_purgeable_state_trial_t) {.value = (int)(new_value), .name = "state " #new_value}
1896
1897 static vm_purgeable_state_trial_t vm_purgeable_state_trials_values[] = {
1898 VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_NO_AGING),
1899 VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_DEBUG_EMPTY),
1900 VM_PURGEABLE_STATE_TRIAL(VM_VOLATILE_GROUP_0),
1901 VM_PURGEABLE_STATE_TRIAL(VM_VOLATILE_GROUP_7),
1902 VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_BEHAVIOR_FIFO),
1903 VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_ORDERING_NORMAL),
1904 VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_EMPTY),
1905 VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_DENY),
1906 VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_NONVOLATILE),
1907 VM_PURGEABLE_STATE_TRIAL(VM_PURGABLE_VOLATILE),
1908 VM_PURGEABLE_STATE_TRIAL(0x12345),
1909 VM_PURGEABLE_STATE_TRIAL(0xffffffff),
1910 };
1911
1912 // Trials for vm_purgeable_t and state
1913 typedef struct {
1914 vm_purgable_t control;
1915 int state;
1916 char * name;
1917 } vm_purgeable_and_state_trial_t;
1918
1919 typedef struct {
1920 unsigned count;
1921 unsigned capacity;
1922 vm_purgeable_and_state_trial_t list[];
1923 } vm_purgeable_and_state_trials_t;
1924
TRIALS_IMPL(vm_purgeable_and_state)1925 TRIALS_IMPL(vm_purgeable_and_state)
1926
1927 #define VM_PURGEABLE_AND_STATE_TRIAL(new_control, new_state, new_name) \
1928 (vm_purgeable_and_state_trial_t){ .control = (vm_purgable_t)(new_control), \
1929 .state = (int)(new_state), \
1930 .name = new_name,}
1931
1932 vm_purgeable_and_state_trials_t *
1933 generate_vm_purgeable_t_and_state_trials()
1934 {
1935 const unsigned purgeable_trial_count = countof(vm_purgeable_trials_values);
1936 const unsigned state_trial_count = countof(vm_purgeable_state_trials_values);
1937 unsigned num_trials = purgeable_trial_count * state_trial_count;
1938
1939 vm_purgeable_and_state_trials_t * trials = allocate_vm_purgeable_and_state_trials(num_trials);
1940 for (size_t i = 0; i < purgeable_trial_count; i++) {
1941 for (size_t j = 0; j < state_trial_count; j++) {
1942 vm_purgeable_trial_t control_trial = vm_purgeable_trials_values[i];
1943 vm_purgeable_state_trial_t state_trial = vm_purgeable_state_trials_values[j];
1944 char *str;
1945 kasprintf(&str, "%s, %s", control_trial.name, state_trial.name);
1946 append_trial(trials, VM_PURGEABLE_AND_STATE_TRIAL(control_trial.value, state_trial.value, str));
1947 }
1948 }
1949 return trials;
1950 }
1951
1952 #define SMART_VM_PURGEABLE_AND_STATE_TRIALS() \
1953 __attribute__((cleanup(cleanup_vm_purgeable_t_and_state_trials))) \
1954 = generate_vm_purgeable_t_and_state_trials();
1955
1956 static void
cleanup_vm_purgeable_t_and_state_trials(vm_purgeable_and_state_trials_t ** trials)1957 cleanup_vm_purgeable_t_and_state_trials(vm_purgeable_and_state_trials_t **trials)
1958 {
1959 for (size_t i = 0; i < (*trials)->count; i++) {
1960 kfree_str((*trials)->list[i].name);
1961 }
1962 free_trials(*trials);
1963 }
1964
1965 // generate ledger tag trials
1966
1967 typedef struct {
1968 int tag;
1969 const char *name;
1970 } ledger_tag_trial_t;
1971
1972 typedef struct {
1973 unsigned count;
1974 unsigned capacity;
1975 ledger_tag_trial_t list[];
1976 } ledger_tag_trials_t;
1977
1978 #define LEDGER_TAG_TRIAL(new_tag) \
1979 (ledger_tag_trial_t){ .tag = (int)(new_tag), .name = "ledger tag "#new_tag }
1980
1981 static ledger_tag_trial_t ledger_tag_trials_values[] = {
1982 LEDGER_TAG_TRIAL(VM_LEDGER_TAG_NONE),
1983 LEDGER_TAG_TRIAL(VM_LEDGER_TAG_DEFAULT),
1984 LEDGER_TAG_TRIAL(VM_LEDGER_TAG_NETWORK),
1985 LEDGER_TAG_TRIAL(VM_LEDGER_TAG_MEDIA),
1986 LEDGER_TAG_TRIAL(VM_LEDGER_TAG_GRAPHICS),
1987 LEDGER_TAG_TRIAL(VM_LEDGER_TAG_NEURAL),
1988 LEDGER_TAG_TRIAL(VM_LEDGER_TAG_MAX),
1989 LEDGER_TAG_TRIAL(1u << 16),
1990 LEDGER_TAG_TRIAL(1u << 17),
1991 LEDGER_TAG_TRIAL(1u << 18),
1992 LEDGER_TAG_TRIAL(1u << 19),
1993 LEDGER_TAG_TRIAL(1u << 20),
1994 LEDGER_TAG_TRIAL(1u << 21),
1995 LEDGER_TAG_TRIAL(1u << 22),
1996 LEDGER_TAG_TRIAL(1u << 23),
1997 LEDGER_TAG_TRIAL(1u << 24),
1998 LEDGER_TAG_TRIAL(1u << 25),
1999 LEDGER_TAG_TRIAL(1u << 26),
2000 LEDGER_TAG_TRIAL(1u << 27),
2001 LEDGER_TAG_TRIAL(1u << 28),
2002 LEDGER_TAG_TRIAL(1u << 29),
2003 LEDGER_TAG_TRIAL(1u << 30),
2004 LEDGER_TAG_TRIAL(1u << 31),
2005 LEDGER_TAG_TRIAL(VM_LEDGER_TAG_UNCHANGED),
2006 };
2007
TRIALS_IMPL(ledger_tag)2008 TRIALS_IMPL(ledger_tag)
2009
2010 static void
2011 cleanup_ledger_tag_trials(ledger_tag_trials_t **trials)
2012 {
2013 free_trials(*trials);
2014 }
2015
2016 // allocate ledger tag trials, and deallocate it at end of scope
2017 #define SMART_LEDGER_TAG_TRIALS() \
2018 __attribute__((cleanup(cleanup_ledger_tag_trials))) \
2019 = allocate_ledger_tag_trials(countof(ledger_tag_trials_values)); \
2020 append_trials(trials, ledger_tag_trials_values, countof(ledger_tag_trials_values))
2021
2022
2023 // generate ledger flag trials
2024
2025 typedef struct {
2026 int flag;
2027 const char *name;
2028 } ledger_flag_trial_t;
2029
2030 typedef struct {
2031 unsigned count;
2032 unsigned capacity;
2033 ledger_flag_trial_t list[];
2034 } ledger_flag_trials_t;
2035
2036 #define LEDGER_FLAG_TRIAL(new_flag) \
2037 (ledger_flag_trial_t){ .flag = (int)(new_flag), .name = "ledger flag "#new_flag }
2038
2039 static ledger_flag_trial_t ledger_flag_trials_values[] = {
2040 LEDGER_FLAG_TRIAL(0),
2041 LEDGER_FLAG_TRIAL(VM_LEDGER_FLAG_NO_FOOTPRINT),
2042 LEDGER_FLAG_TRIAL(VM_LEDGER_FLAG_NO_FOOTPRINT_FOR_DEBUG),
2043 LEDGER_FLAG_TRIAL(VM_LEDGER_FLAGS_USER),
2044 LEDGER_FLAG_TRIAL(VM_LEDGER_FLAG_FROM_KERNEL),
2045 LEDGER_FLAG_TRIAL(VM_LEDGER_FLAGS_ALL),
2046 LEDGER_FLAG_TRIAL(1u << 3),
2047 LEDGER_FLAG_TRIAL(1u << 4),
2048 LEDGER_FLAG_TRIAL(1u << 5),
2049 LEDGER_FLAG_TRIAL(1u << 6),
2050 LEDGER_FLAG_TRIAL(1u << 7),
2051 LEDGER_FLAG_TRIAL(1u << 8),
2052 LEDGER_FLAG_TRIAL(1u << 9),
2053 LEDGER_FLAG_TRIAL(1u << 10),
2054 LEDGER_FLAG_TRIAL(1u << 11),
2055 LEDGER_FLAG_TRIAL(1u << 12),
2056 LEDGER_FLAG_TRIAL(1u << 13),
2057 LEDGER_FLAG_TRIAL(1u << 14),
2058 LEDGER_FLAG_TRIAL(1u << 15),
2059 LEDGER_FLAG_TRIAL(1u << 16),
2060 LEDGER_FLAG_TRIAL(1u << 17),
2061 LEDGER_FLAG_TRIAL(1u << 18),
2062 LEDGER_FLAG_TRIAL(1u << 19),
2063 LEDGER_FLAG_TRIAL(1u << 20),
2064 LEDGER_FLAG_TRIAL(1u << 21),
2065 LEDGER_FLAG_TRIAL(1u << 22),
2066 LEDGER_FLAG_TRIAL(1u << 23),
2067 LEDGER_FLAG_TRIAL(1u << 24),
2068 LEDGER_FLAG_TRIAL(1u << 25),
2069 LEDGER_FLAG_TRIAL(1u << 26),
2070 LEDGER_FLAG_TRIAL(1u << 27),
2071 LEDGER_FLAG_TRIAL(1u << 28),
2072 LEDGER_FLAG_TRIAL(1u << 29),
2073 LEDGER_FLAG_TRIAL(1u << 30),
2074 LEDGER_FLAG_TRIAL(1u << 31),
2075 };
2076
TRIALS_IMPL(ledger_flag)2077 TRIALS_IMPL(ledger_flag)
2078
2079 static void
2080 cleanup_ledger_flag_trials(ledger_flag_trials_t **trials)
2081 {
2082 free_trials(*trials);
2083 }
2084
2085 // allocate ledger flag trials, and deallocate it at end of scope
2086 #define SMART_LEDGER_FLAG_TRIALS() \
2087 __attribute__((cleanup(cleanup_ledger_flag_trials))) \
2088 = allocate_ledger_flag_trials(countof(ledger_flag_trials_values)); \
2089 append_trials(trials, ledger_flag_trials_values, countof(ledger_flag_trials_values))
2090
2091 // generate address-parameter trials
2092 // where the address has no associated size
2093 // and the callee's arithmetic includes `round_page(addr)`
2094
2095 typedef struct {
2096 addr_t addr;
2097 bool addr_is_absolute;
2098 char *name;
2099 } addr_trial_t;
2100
2101 typedef struct {
2102 unsigned count;
2103 unsigned capacity;
2104 addr_trial_t list[];
2105 } addr_trials_t;
2106
2107 #define ADDR_TRIAL(new_addr, new_absolute, new_name) \
2108 (addr_trial_t){ .addr = (addr_t)(new_addr), .addr_is_absolute = new_absolute, .name = new_name }
2109
2110 static addr_trial_t __attribute__((overloadable, used))
slide_trial(addr_trial_t trial,mach_vm_address_t slide)2111 slide_trial(addr_trial_t trial, mach_vm_address_t slide)
2112 {
2113 addr_trial_t result = trial;
2114 if (!trial.addr_is_absolute) {
2115 result.addr += slide;
2116 }
2117 return result;
2118 }
2119
2120 static const offset_list_t *
get_addr_trial_offsets(void)2121 get_addr_trial_offsets(void)
2122 {
2123 static offset_list_t *offsets;
2124 addr_t test_page_size = adjust_page_size();
2125 if (!offsets) {
2126 offsets = allocate_offsets(20);
2127 append_offset(offsets, true, 0);
2128 append_offset(offsets, true, 1);
2129 append_offset(offsets, true, 2);
2130 append_offset(offsets, true, test_page_size - 2);
2131 append_offset(offsets, true, test_page_size - 1);
2132 append_offset(offsets, true, test_page_size);
2133 append_offset(offsets, true, test_page_size + 1);
2134 append_offset(offsets, true, test_page_size + 2);
2135 append_offset(offsets, true, -(mach_vm_address_t)test_page_size - 2);
2136 append_offset(offsets, true, -(mach_vm_address_t)test_page_size - 1);
2137 append_offset(offsets, true, -(mach_vm_address_t)test_page_size);
2138 append_offset(offsets, true, -(mach_vm_address_t)test_page_size + 1);
2139 append_offset(offsets, true, -(mach_vm_address_t)test_page_size + 2);
2140 append_offset(offsets, true, -(mach_vm_address_t)2);
2141 append_offset(offsets, true, -(mach_vm_address_t)1);
2142
2143 append_offset(offsets, false, 0);
2144 append_offset(offsets, false, 1);
2145 append_offset(offsets, false, 2);
2146 append_offset(offsets, false, test_page_size - 2);
2147 append_offset(offsets, false, test_page_size - 1);
2148 }
2149 return offsets;
2150 }
2151
TRIALS_IMPL(addr)2152 TRIALS_IMPL(addr)
2153
2154 addr_trials_t *
2155 generate_addr_trials(addr_t base)
2156 {
2157 const offset_list_t *offsets = get_addr_trial_offsets();
2158 const unsigned ADDRS = offsets->count;
2159 addr_trials_t *trials = allocate_addr_trials(ADDRS);
2160
2161 for (unsigned a = 0; a < ADDRS; a++) {
2162 mach_vm_address_t addr_offset = offsets->list[a].offset;
2163 mach_vm_address_t addr = addr_offset;
2164 bool addr_is_absolute = offsets->list[a].is_absolute;
2165 if (!addr_is_absolute) {
2166 addr += base;
2167 }
2168
2169 char *str;
2170 kasprintf(&str, "addr: %s0x%llx",
2171 addr_is_absolute ? "" : "base+", addr_offset);
2172 append_trial(trials, ADDR_TRIAL(addr, addr_is_absolute, str));
2173 }
2174 return trials;
2175 }
2176
2177 static void
cleanup_addr_trials(addr_trials_t ** trials)2178 cleanup_addr_trials(addr_trials_t **trials)
2179 {
2180 for (size_t i = 0; i < (*trials)->count; i++) {
2181 kfree_str((*trials)->list[i].name);
2182 }
2183 free_trials(*trials);
2184 }
2185
2186 // allocate address trials around a base address
2187 // and deallocate it at end of scope
2188 #define SMART_ADDR_TRIALS(base) \
2189 __attribute__((cleanup(cleanup_addr_trials))) \
2190 = generate_addr_trials(base)
2191
2192
2193 /////////////////////////////////////////////////////
2194 // generate size-parameter trials
2195 // where the size is not associated with any base address
2196 // and the callee's arithmetic includes `round_page(size)`
2197
2198 typedef struct {
2199 addr_t size;
2200 char *name;
2201 } size_trial_t;
2202
2203 typedef struct {
2204 unsigned count;
2205 unsigned capacity;
2206 size_trial_t list[];
2207 } size_trials_t;
2208
2209 #define SIZE_TRIAL(new_size, new_name) \
2210 (size_trial_t){ .size = (addr_t)(new_size), .name = new_name }
2211
2212 static const offset_list_t *
get_size_trial_offsets(void)2213 get_size_trial_offsets(void)
2214 {
2215 static offset_list_t *offsets;
2216 addr_t test_page_size = adjust_page_size();
2217 if (!offsets) {
2218 offsets = allocate_offsets(15);
2219 append_offset(offsets, true, 0);
2220 append_offset(offsets, true, 1);
2221 append_offset(offsets, true, 2);
2222 append_offset(offsets, true, test_page_size - 2);
2223 append_offset(offsets, true, test_page_size - 1);
2224 append_offset(offsets, true, test_page_size);
2225 append_offset(offsets, true, test_page_size + 1);
2226 append_offset(offsets, true, test_page_size + 2);
2227 append_offset(offsets, true, -(mach_vm_address_t)test_page_size - 2);
2228 append_offset(offsets, true, -(mach_vm_address_t)test_page_size - 1);
2229 append_offset(offsets, true, -(mach_vm_address_t)test_page_size);
2230 append_offset(offsets, true, -(mach_vm_address_t)test_page_size + 1);
2231 append_offset(offsets, true, -(mach_vm_address_t)test_page_size + 2);
2232 append_offset(offsets, true, -(mach_vm_address_t)2);
2233 append_offset(offsets, true, -(mach_vm_address_t)1);
2234 }
2235 return offsets;
2236 }
2237
TRIALS_IMPL(size)2238 TRIALS_IMPL(size)
2239
2240 size_trials_t *
2241 generate_size_trials(void)
2242 {
2243 const offset_list_t *size_offsets = get_size_trial_offsets();
2244 const unsigned SIZES = size_offsets->count;
2245 size_trials_t *trials = allocate_size_trials(SIZES);
2246
2247 for (unsigned s = 0; s < SIZES; s++) {
2248 mach_vm_size_t size = size_offsets->list[s].offset;
2249
2250 char *str;
2251 kasprintf(&str, "size: 0x%llx", size);
2252 append_trial(trials, SIZE_TRIAL(size, str));
2253 }
2254 return trials;
2255 }
2256
2257 static void
cleanup_size_trials(size_trials_t ** trials)2258 cleanup_size_trials(size_trials_t **trials)
2259 {
2260 for (size_t i = 0; i < (*trials)->count; i++) {
2261 kfree_str((*trials)->list[i].name);
2262 }
2263 free_trials(*trials);
2264 }
2265
2266 // allocate size trials, and deallocate it at end of scope
2267 #define SMART_SIZE_TRIALS() \
2268 __attribute__((cleanup(cleanup_size_trials))) \
2269 = generate_size_trials()
2270
2271 /////////////////////////////////////////////////////
2272 // generate start/size trials
2273 // using absolute addresses or addresses around a given address
2274 // where `size` is the size of the thing at `start`
2275 // and the callee's arithmetic performs `start+size`
2276
2277 typedef struct {
2278 addr_t start;
2279 addr_t size;
2280 char *name;
2281 bool start_is_absolute; // start computation does not include any allocation's base address
2282 bool size_is_absolute; // size computation does not include start
2283 } start_size_trial_t;
2284
2285 typedef struct {
2286 unsigned count;
2287 unsigned capacity;
2288 start_size_trial_t list[];
2289 } start_size_trials_t;
2290
2291
2292 #define START_SIZE_TRIAL(new_start, start_absolute, new_size, size_absolute, new_name) \
2293 (start_size_trial_t){ .start = (addr_t)(new_start), .size = (addr_t)(new_size), \
2294 .name = new_name, \
2295 .start_is_absolute = start_absolute, .size_is_absolute = size_absolute }
2296
2297 static const offset_list_t *
get_start_size_trial_start_offsets(void)2298 get_start_size_trial_start_offsets(void)
2299 {
2300 return get_addr_trial_offsets();
2301 }
2302
2303 static const offset_list_t *
get_start_size_trial_size_offsets(void)2304 get_start_size_trial_size_offsets(void)
2305 {
2306 static offset_list_t *offsets;
2307 if (!offsets) {
2308 // use each size offset twice: once absolute and once relative
2309 const offset_list_t *old_offsets = get_size_trial_offsets();
2310 offsets = allocate_offsets(2 * old_offsets->count);
2311 for (unsigned i = 0; i < old_offsets->count; i++) {
2312 append_offset(offsets, true, old_offsets->list[i].offset);
2313 }
2314 for (unsigned i = 0; i < old_offsets->count; i++) {
2315 append_offset(offsets, false, old_offsets->list[i].offset);
2316 }
2317 }
2318 return offsets;
2319 }
2320
TRIALS_IMPL(start_size)2321 TRIALS_IMPL(start_size)
2322
2323 // Return a new start/size trial which is offset by `slide` bytes
2324 // Only "relative" start and size values get slid.
2325 // "absolute" values don't change.
2326 static start_size_trial_t __attribute__((overloadable, used))
2327 slide_trial(start_size_trial_t trial, mach_vm_address_t slide)
2328 {
2329 start_size_trial_t result = trial;
2330 if (!result.start_is_absolute) {
2331 result.start += slide;
2332 if (!result.size_is_absolute) {
2333 result.size -= slide;
2334 }
2335 }
2336 return result;
2337 }
2338
2339 start_size_trials_t *
generate_start_size_trials(addr_t base)2340 generate_start_size_trials(addr_t base)
2341 {
2342 const offset_list_t *start_offsets = get_start_size_trial_start_offsets();
2343 const offset_list_t *size_offsets = get_start_size_trial_size_offsets();
2344
2345 const unsigned ADDRS = start_offsets->count;
2346 const unsigned SIZES = size_offsets->count;
2347
2348 start_size_trials_t *trials = allocate_start_size_trials(ADDRS * SIZES);
2349
2350 for (unsigned a = 0; a < ADDRS; a++) {
2351 for (unsigned s = 0; s < SIZES; s++) {
2352 mach_vm_address_t start_offset = start_offsets->list[a].offset;
2353 mach_vm_address_t start = start_offset;
2354 bool start_is_absolute = start_offsets->list[a].is_absolute;
2355 if (!start_is_absolute) {
2356 start += base;
2357 }
2358
2359 mach_vm_size_t size_offset = size_offsets->list[s].offset;
2360 mach_vm_size_t size = size_offset;
2361 bool size_is_absolute = size_offsets->list[s].is_absolute;
2362 if (!size_is_absolute) {
2363 size = -start + size;
2364 }
2365
2366 char *str;
2367 kasprintf(&str, "start: %s0x%llx, size: %s0x%llx",
2368 start_is_absolute ? "" : "base+", start_offset,
2369 size_is_absolute ? "" :"-start+", size_offset);
2370 append_trial(trials, START_SIZE_TRIAL(start, start_is_absolute, size, size_is_absolute, str));
2371 }
2372 }
2373 return trials;
2374 }
2375
2376 static void
cleanup_start_size_trials(start_size_trials_t ** trials)2377 cleanup_start_size_trials(start_size_trials_t **trials)
2378 {
2379 for (size_t i = 0; i < (*trials)->count; i++) {
2380 kfree_str((*trials)->list[i].name);
2381 }
2382 free_trials(*trials);
2383 }
2384
2385 // allocate start/size trials around a base address
2386 // and deallocate it at end of scope
2387 #define SMART_START_SIZE_TRIALS(base) \
2388 __attribute__((cleanup(cleanup_start_size_trials))) \
2389 = generate_start_size_trials(base)
2390
2391 // Trials for start/size/offset/object tuples
2392
2393 typedef struct {
2394 mach_vm_address_t start;
2395 mach_vm_size_t size;
2396 vm_object_offset_t offset;
2397 mach_vm_size_t obj_size;
2398 bool start_is_absolute;
2399 bool size_is_absolute;
2400 char * name;
2401 } start_size_offset_object_trial_t;
2402
2403 typedef struct {
2404 unsigned count;
2405 unsigned capacity;
2406 start_size_offset_object_trial_t list[];
2407 } start_size_offset_object_trials_t;
2408
TRIALS_IMPL(start_size_offset_object)2409 TRIALS_IMPL(start_size_offset_object)
2410
2411 #define START_SIZE_OFFSET_OBJECT_TRIAL(new_start, new_size, new_offset, new_obj_size, new_start_is_absolute, new_size_is_absolute, new_name) \
2412 (start_size_offset_object_trial_t){ .start = (mach_vm_address_t)(new_start), \
2413 .size = (mach_vm_size_t)(new_size), \
2414 .offset = (vm_object_offset_t)(new_offset), \
2415 .obj_size = (mach_vm_size_t)(new_obj_size), \
2416 .start_is_absolute = (bool)(new_start_is_absolute), \
2417 .size_is_absolute = (bool)(new_size_is_absolute), \
2418 .name = new_name,}
2419
2420 bool
2421 obj_size_is_ok(mach_vm_size_t obj_size)
2422 {
2423 addr_t test_page_size = adjust_page_size();
2424 if (round_up_page(obj_size, test_page_size) == 0) {
2425 return false;
2426 }
2427 /* in rosetta, PAGE_SIZE is 4K but rounding to 16K also panics */ \
2428 if (!kern_trialname_generation && isRosetta() && round_up_page(obj_size, KB16) == 0) {
2429 return false;
2430 }
2431 return true;
2432 }
2433
2434 static start_size_offset_object_trial_t __attribute__((overloadable, used))
slide_trial(start_size_offset_object_trial_t trial,mach_vm_address_t slide)2435 slide_trial(start_size_offset_object_trial_t trial, mach_vm_address_t slide)
2436 {
2437 start_size_offset_object_trial_t result = trial;
2438
2439 if (!trial.start_is_absolute) {
2440 result.start += slide;
2441 if (!trial.size_is_absolute) {
2442 result.size -= slide;
2443 }
2444 }
2445 return result;
2446 }
2447
2448 static offset_list_t *
get_ssoo_absolute_offsets()2449 get_ssoo_absolute_offsets()
2450 {
2451 static offset_list_t *offsets;
2452 addr_t test_page_size = adjust_page_size();
2453 if (!offsets) {
2454 offsets = allocate_offsets(20);
2455 append_offset(offsets, true, 0);
2456 append_offset(offsets, true, 1);
2457 append_offset(offsets, true, 2);
2458 append_offset(offsets, true, test_page_size - 2);
2459 append_offset(offsets, true, test_page_size - 1);
2460 append_offset(offsets, true, test_page_size);
2461 append_offset(offsets, true, test_page_size + 1);
2462 append_offset(offsets, true, test_page_size + 2);
2463 append_offset(offsets, true, -(mach_vm_address_t)test_page_size - 2);
2464 append_offset(offsets, true, -(mach_vm_address_t)test_page_size - 1);
2465 append_offset(offsets, true, -(mach_vm_address_t)test_page_size);
2466 append_offset(offsets, true, -(mach_vm_address_t)test_page_size + 1);
2467 append_offset(offsets, true, -(mach_vm_address_t)test_page_size + 2);
2468 append_offset(offsets, true, -(mach_vm_address_t)2);
2469 append_offset(offsets, true, -(mach_vm_address_t)1);
2470 }
2471 return offsets;
2472 }
2473
2474 static offset_list_t *
get_ssoo_absolute_and_relative_offsets()2475 get_ssoo_absolute_and_relative_offsets()
2476 {
2477 static offset_list_t *offsets;
2478 addr_t test_page_size = adjust_page_size();
2479 if (!offsets) {
2480 const offset_list_t *old_offsets = get_ssoo_absolute_offsets();
2481 offsets = allocate_offsets(old_offsets->count + 5);
2482 // absolute offsets
2483 for (unsigned i = 0; i < old_offsets->count; i++) {
2484 append_offset(offsets, true, old_offsets->list[i].offset);
2485 }
2486 // relative offsets
2487 append_offset(offsets, false, 0);
2488 append_offset(offsets, false, 1);
2489 append_offset(offsets, false, 2);
2490 append_offset(offsets, false, test_page_size - 2);
2491 append_offset(offsets, false, test_page_size - 1);
2492 }
2493 return offsets;
2494 }
2495
2496 start_size_offset_object_trials_t *
generate_start_size_offset_object_trials()2497 generate_start_size_offset_object_trials()
2498 {
2499 const offset_list_t *start_offsets = get_ssoo_absolute_and_relative_offsets();
2500 const offset_list_t *size_offsets = get_ssoo_absolute_and_relative_offsets();
2501 const offset_list_t *offset_values = get_ssoo_absolute_offsets();
2502 const offset_list_t *object_sizes = get_ssoo_absolute_offsets();
2503
2504 unsigned num_trials = 0;
2505 for (size_t d = 0; d < object_sizes->count; d++) {
2506 mach_vm_size_t obj_size = object_sizes->list[d].offset;
2507 if (!obj_size_is_ok(obj_size)) { // make_a_mem_object would fail
2508 continue;
2509 }
2510 num_trials++;
2511 }
2512 num_trials *= start_offsets->count * size_offsets->count * offset_values->count;
2513
2514 start_size_offset_object_trials_t * trials = allocate_start_size_offset_object_trials(num_trials);
2515 for (size_t a = 0; a < start_offsets->count; a++) {
2516 for (size_t b = 0; b < size_offsets->count; b++) {
2517 for (size_t c = 0; c < offset_values->count; c++) {
2518 for (size_t d = 0; d < object_sizes->count; d++) {
2519 bool start_is_absolute = start_offsets->list[a].is_absolute;
2520 bool size_is_absolute = size_offsets->list[b].is_absolute;
2521 mach_vm_address_t start = start_offsets->list[a].offset;
2522 mach_vm_size_t size = size_offsets->list[b].offset;
2523 vm_object_offset_t offset = offset_values->list[c].offset;
2524 mach_vm_size_t obj_size = object_sizes->list[d].offset;
2525 if (!obj_size_is_ok(obj_size)) { // make_a_mem_object would fail
2526 continue;
2527 }
2528 char *str;
2529 kasprintf(&str, "start: %s0x%llx, size: %s0x%llx, offset: 0x%llx, obj_size: 0x%llx",
2530 start_is_absolute ? "" : "base+", start,
2531 size_is_absolute ? "" :"-start+", size,
2532 offset,
2533 obj_size);
2534 append_trial(trials, START_SIZE_OFFSET_OBJECT_TRIAL(start, size, offset, obj_size, start_is_absolute, size_is_absolute, str));
2535 }
2536 }
2537 }
2538 }
2539 return trials;
2540 }
2541
2542 #define SMART_START_SIZE_OFFSET_OBJECT_TRIALS() \
2543 __attribute__((cleanup(cleanup_start_size_offset_object_trials))) \
2544 = generate_start_size_offset_object_trials();
2545
2546 static void
cleanup_start_size_offset_object_trials(start_size_offset_object_trials_t ** trials)2547 cleanup_start_size_offset_object_trials(start_size_offset_object_trials_t **trials)
2548 {
2549 for (size_t i = 0; i < (*trials)->count; i++) {
2550 kfree_str((*trials)->list[i].name);
2551 }
2552 free_trials(*trials);
2553 }
2554
2555
2556 // Trials for start/size/start/size tuples
2557
2558 typedef struct {
2559 mach_vm_address_t start;
2560 mach_vm_size_t size;
2561 mach_vm_address_t second_start;
2562 mach_vm_size_t second_size;
2563 bool start_is_absolute;
2564 bool size_is_absolute;
2565 bool second_start_is_absolute;
2566 bool second_size_is_absolute;
2567 char * name;
2568 } start_size_start_size_trial_t;
2569
2570 typedef struct {
2571 unsigned count;
2572 unsigned capacity;
2573 start_size_start_size_trial_t list[];
2574 } start_size_start_size_trials_t;
2575
TRIALS_IMPL(start_size_start_size)2576 TRIALS_IMPL(start_size_start_size)
2577
2578 #define START_SIZE_START_SIZE_TRIAL(new_start, new_size, new_second_start, new_second_size, new_start_is_absolute, \
2579 new_size_is_absolute, new_second_start_is_absolute, new_second_size_is_absolute, new_name) \
2580 (start_size_start_size_trial_t){ .start = (mach_vm_address_t)(new_start), \
2581 .size = (mach_vm_size_t)(new_size), \
2582 .second_start = (mach_vm_address_t)(new_second_start), \
2583 .second_size = (mach_vm_size_t)(new_second_size), \
2584 .start_is_absolute = (bool)(new_start_is_absolute), \
2585 .size_is_absolute = (bool)(new_size_is_absolute), \
2586 .second_start_is_absolute = (bool)(new_second_start_is_absolute), \
2587 .second_size_is_absolute = (bool)(new_second_size_is_absolute),\
2588 .name = new_name,}
2589
2590 static start_size_start_size_trial_t __attribute__((overloadable, used))
2591 slide_trial(start_size_start_size_trial_t trial, mach_vm_address_t slide, mach_vm_address_t second_slide)
2592 {
2593 start_size_start_size_trial_t result = trial;
2594
2595 if (!trial.start_is_absolute) {
2596 result.start += slide;
2597 if (!trial.size_is_absolute) {
2598 result.size -= slide;
2599 }
2600 }
2601 if (!trial.second_start_is_absolute) {
2602 result.second_start += second_slide;
2603 if (!trial.second_size_is_absolute) {
2604 result.second_size -= second_slide;
2605 }
2606 }
2607 return result;
2608 }
2609
2610 start_size_start_size_trials_t *
generate_start_size_start_size_trials()2611 generate_start_size_start_size_trials()
2612 {
2613 /*
2614 * Reuse the starts/sizes from start/size/offset/object
2615 */
2616 const offset_list_t *start_offsets = get_ssoo_absolute_and_relative_offsets();
2617 const offset_list_t *size_offsets = get_ssoo_absolute_and_relative_offsets();
2618 const offset_list_t *second_start_offsets = get_ssoo_absolute_and_relative_offsets();
2619 const offset_list_t *second_size_offsets = get_ssoo_absolute_and_relative_offsets();
2620
2621 unsigned num_trials = start_offsets->count * size_offsets->count
2622 * second_start_offsets->count * second_start_offsets->count;
2623
2624 start_size_start_size_trials_t * trials = allocate_start_size_start_size_trials(num_trials);
2625 for (size_t a = 0; a < start_offsets->count; a++) {
2626 for (size_t b = 0; b < size_offsets->count; b++) {
2627 for (size_t c = 0; c < second_start_offsets->count; c++) {
2628 for (size_t d = 0; d < second_size_offsets->count; d++) {
2629 bool start_is_absolute = start_offsets->list[a].is_absolute;
2630 bool size_is_absolute = size_offsets->list[b].is_absolute;
2631 bool second_start_is_absolute = second_start_offsets->list[c].is_absolute;
2632 bool second_size_is_absolute = second_size_offsets->list[d].is_absolute;
2633 mach_vm_address_t start = start_offsets->list[a].offset;
2634 mach_vm_size_t size = size_offsets->list[b].offset;
2635 mach_vm_address_t second_start = second_start_offsets->list[c].offset;
2636 mach_vm_size_t second_size = second_size_offsets->list[d].offset;
2637
2638 char *str;
2639 kasprintf(&str, "start: %s0x%llx, size: %s0x%llx, second_start: %s0x%llx, second_size: %s0x%llx",
2640 start_is_absolute ? "" : "base+", start,
2641 size_is_absolute ? "" :"-start+", size,
2642 second_start_is_absolute ? "" : "base+", second_start,
2643 second_size_is_absolute ? "" : "-start+", second_size);
2644 append_trial(trials, START_SIZE_START_SIZE_TRIAL(start, size, second_start, second_size,
2645 start_is_absolute, size_is_absolute,
2646 second_start_is_absolute, second_size_is_absolute, str));
2647 }
2648 }
2649 }
2650 }
2651 return trials;
2652 }
2653
2654 #define SMART_START_SIZE_START_SIZE_TRIALS() \
2655 __attribute__((cleanup(cleanup_start_size_start_size_trials))) \
2656 = generate_start_size_start_size_trials();
2657
2658 static void __attribute__((used))
cleanup_start_size_start_size_trials(start_size_start_size_trials_t ** trials)2659 cleanup_start_size_start_size_trials(start_size_start_size_trials_t **trials)
2660 {
2661 for (size_t i = 0; i < (*trials)->count; i++) {
2662 kfree_str((*trials)->list[i].name);
2663 }
2664 free_trials(*trials);
2665 }
2666
2667
2668 // start/size/offset: test start+size and a second independent address
2669 // consider src/dst/size instead if the size may be added to both addresses
2670
2671 typedef struct {
2672 mach_vm_address_t start;
2673 mach_vm_size_t size;
2674 vm_object_offset_t offset;
2675 bool start_is_absolute;
2676 bool size_is_absolute;
2677 char * name;
2678 } start_size_offset_trial_t;
2679
2680 typedef struct {
2681 unsigned count;
2682 unsigned capacity;
2683 start_size_offset_trial_t list[];
2684 } start_size_offset_trials_t;
2685
TRIALS_IMPL(start_size_offset)2686 TRIALS_IMPL(start_size_offset)
2687
2688 #define START_SIZE_OFFSET_TRIAL(new_start, new_size, new_offset, new_start_is_absolute, new_size_is_absolute, new_name) \
2689 (start_size_offset_trial_t){ .start = (mach_vm_address_t)(new_start), \
2690 .size = (mach_vm_size_t)(new_size), \
2691 .offset = (vm_object_offset_t)(new_offset), \
2692 .start_is_absolute = (bool)(new_start_is_absolute), \
2693 .size_is_absolute = (bool)(new_size_is_absolute), \
2694 .name = new_name,}
2695
2696
2697 static start_size_offset_trial_t __attribute__((overloadable, used))
2698 slide_trial(start_size_offset_trial_t trial, mach_vm_address_t slide)
2699 {
2700 start_size_offset_trial_t result = trial;
2701
2702 if (!trial.start_is_absolute) {
2703 result.start += slide;
2704 if (!trial.size_is_absolute) {
2705 result.size -= slide;
2706 }
2707 }
2708 return result;
2709 }
2710
2711 start_size_offset_trials_t *
generate_start_size_offset_trials()2712 generate_start_size_offset_trials()
2713 {
2714 const offset_list_t *start_offsets = get_ssoo_absolute_and_relative_offsets();
2715 const offset_list_t *offset_values = get_ssoo_absolute_offsets();
2716 const offset_list_t *size_offsets = get_ssoo_absolute_and_relative_offsets();
2717
2718 // output is actually ordered start - offset - size
2719 // because it pretty-prints better than start - size - offset
2720 unsigned num_trials = start_offsets->count * offset_values->count * size_offsets->count;
2721 start_size_offset_trials_t * trials = allocate_start_size_offset_trials(num_trials);
2722 for (size_t a = 0; a < start_offsets->count; a++) {
2723 for (size_t b = 0; b < offset_values->count; b++) {
2724 for (size_t c = 0; c < size_offsets->count; c++) {
2725 bool start_is_absolute = start_offsets->list[a].is_absolute;
2726 bool size_is_absolute = size_offsets->list[c].is_absolute;
2727 mach_vm_address_t start = start_offsets->list[a].offset;
2728 vm_object_offset_t offset = offset_values->list[b].offset;
2729 mach_vm_size_t size = size_offsets->list[c].offset;
2730
2731 char *str;
2732 kasprintf(&str, "start: %s0x%llx, offset: 0x%llx, size: %s0x%llx",
2733 start_is_absolute ? "" : "base+", start,
2734 offset,
2735 size_is_absolute ? "" :"-start+", size);
2736 append_trial(trials, START_SIZE_OFFSET_TRIAL(start, size, offset, start_is_absolute, size_is_absolute, str));
2737 }
2738 }
2739 }
2740 return trials;
2741 }
2742
2743 #define SMART_START_SIZE_OFFSET_TRIALS() \
2744 __attribute__((cleanup(cleanup_start_size_offset_trials))) \
2745 = generate_start_size_offset_trials();
2746
2747 static void
cleanup_start_size_offset_trials(start_size_offset_trials_t ** trials)2748 cleanup_start_size_offset_trials(start_size_offset_trials_t **trials)
2749 {
2750 for (size_t i = 0; i < (*trials)->count; i++) {
2751 kfree_str((*trials)->list[i].name);
2752 }
2753 free_trials(*trials);
2754 }
2755
2756 // src/dst/size: test a source address, a dest address,
2757 // and a common size that may be added to both addresses
2758
2759 typedef struct {
2760 addr_t src;
2761 addr_t dst;
2762 addr_t size;
2763 char *name;
2764 bool src_is_absolute; // src computation does not include any allocation's base address
2765 bool dst_is_absolute; // dst computation does not include any allocation's base address
2766 bool size_is_src_relative; // size computation includes src
2767 bool size_is_dst_relative; // size computation includes dst
2768 } src_dst_size_trial_t;
2769
2770 typedef struct {
2771 unsigned count;
2772 unsigned capacity;
2773 src_dst_size_trial_t list[];
2774 } src_dst_size_trials_t;
2775
TRIALS_IMPL(src_dst_size)2776 TRIALS_IMPL(src_dst_size)
2777
2778 #define SRC_DST_SIZE_TRIAL(new_src, new_dst, new_size, new_name, src_absolute, dst_absolute, size_src_rel, size_dst_rel) \
2779 (src_dst_size_trial_t){ \
2780 .src = (addr_t)(new_src), \
2781 .dst = (addr_t)(new_dst), \
2782 .size = (addr_t)(new_size), \
2783 .name = new_name, \
2784 .src_is_absolute = src_absolute, \
2785 .dst_is_absolute = dst_absolute, \
2786 .size_is_src_relative = size_src_rel, \
2787 .size_is_dst_relative = size_dst_rel, \
2788 }
2789
2790 src_dst_size_trials_t * __attribute__((overloadable))
2791 generate_src_dst_size_trials(const char *srcname, const char *dstname)
2792 {
2793 const offset_list_t *addr_offsets = get_addr_trial_offsets();
2794 const offset_list_t *size_offsets = get_size_trial_offsets();
2795 unsigned src_count = addr_offsets->count;
2796 unsigned dst_count = src_count;
2797 unsigned size_count = 3 * size_offsets->count;
2798 unsigned num_trials = src_count * dst_count * size_count;
2799 src_dst_size_trials_t * trials = allocate_src_dst_size_trials(num_trials);
2800
2801 // each size is used three times:
2802 // once src-relative, once dst-relative, and once absolute
2803 unsigned size_part = size_count / 3;
2804
2805 for (size_t i = 0; i < src_count; i++) {
2806 bool rebase_src = !addr_offsets->list[i].is_absolute;
2807 addr_t src_offset = addr_offsets->list[i].offset;
2808
2809 for (size_t j = 0; j < dst_count; j++) {
2810 bool rebase_dst = !addr_offsets->list[j].is_absolute;
2811 addr_t dst_offset = addr_offsets->list[j].offset;
2812
2813 for (size_t k = 0; k < size_count; k++) {
2814 bool rebase_size_from_src = false;
2815 bool rebase_size_from_dst = false;
2816 addr_t size_offset;
2817 if (k < size_part) {
2818 size_offset = size_offsets->list[k].offset;
2819 } else if (k < 2 * size_part) {
2820 size_offset = size_offsets->list[k - size_part].offset;
2821 rebase_size_from_src = true;
2822 rebase_size_from_dst = false;
2823 } else {
2824 size_offset = size_offsets->list[k - 2 * size_part].offset;
2825 rebase_size_from_src = false;
2826 rebase_size_from_dst = true;
2827 }
2828
2829 addr_t size;
2830 char *desc;
2831 if (rebase_size_from_src) {
2832 size = -src_offset + size_offset;
2833 kasprintf(&desc, "%s: %s%lli, %s: %s%lli, size: -%s%+lli",
2834 srcname, rebase_src ? "base+" : "", (int64_t)src_offset,
2835 dstname, rebase_dst ? "base+" : "", (int64_t)dst_offset,
2836 srcname, (int64_t)size_offset);
2837 } else if (rebase_size_from_dst) {
2838 size = -dst_offset + size_offset;
2839 kasprintf(&desc, "%s: %s%lli, %s: %s%lli, size: -%s%+lli",
2840 srcname, rebase_src ? "base+" : "", (int64_t)src_offset,
2841 dstname, rebase_dst ? "base+" : "", (int64_t)dst_offset,
2842 dstname, (int64_t)size_offset);
2843 } else {
2844 size = size_offset;
2845 kasprintf(&desc, "%s: %s%lli, %s: %s%lli, size: %lli",
2846 srcname, rebase_src ? "base+" : "", (int64_t)src_offset,
2847 dstname, rebase_dst ? "base+" : "", (int64_t)dst_offset,
2848 (int64_t)size_offset);
2849 }
2850 assert(desc);
2851 append_trial(trials, SRC_DST_SIZE_TRIAL(src_offset, dst_offset, size, desc,
2852 !rebase_src, !rebase_dst, rebase_size_from_src, rebase_size_from_dst));
2853 }
2854 }
2855 }
2856 return trials;
2857 }
2858
2859 src_dst_size_trials_t * __attribute__((overloadable))
generate_src_dst_size_trials(void)2860 generate_src_dst_size_trials(void)
2861 {
2862 return generate_src_dst_size_trials("src", "dst");
2863 }
2864 #define SMART_SRC_DST_SIZE_TRIALS() \
2865 __attribute__((cleanup(cleanup_src_dst_size_trials))) \
2866 = generate_src_dst_size_trials();
2867
2868 #define SMART_FILEOFF_DST_SIZE_TRIALS() \
2869 __attribute__((cleanup(cleanup_src_dst_size_trials))) \
2870 = generate_src_dst_size_trials("fileoff", "dst");
2871
2872 static void
cleanup_src_dst_size_trials(src_dst_size_trials_t ** trials)2873 cleanup_src_dst_size_trials(src_dst_size_trials_t **trials)
2874 {
2875 for (size_t i = 0; i < (*trials)->count; i++) {
2876 kfree_str((*trials)->list[i].name);
2877 }
2878 free_trials(*trials);
2879 }
2880
2881 static src_dst_size_trial_t __attribute__((overloadable, used))
slide_trial_src(src_dst_size_trial_t trial,mach_vm_address_t slide)2882 slide_trial_src(src_dst_size_trial_t trial, mach_vm_address_t slide)
2883 {
2884 src_dst_size_trial_t result = trial;
2885
2886 if (!trial.src_is_absolute) {
2887 result.src += slide;
2888 if (trial.size_is_src_relative) {
2889 result.size -= slide;
2890 }
2891 }
2892 return result;
2893 }
2894
2895 static src_dst_size_trial_t __attribute__((overloadable, used))
slide_trial_dst(src_dst_size_trial_t trial,mach_vm_address_t slide)2896 slide_trial_dst(src_dst_size_trial_t trial, mach_vm_address_t slide)
2897 {
2898 src_dst_size_trial_t result = trial;
2899
2900 if (!trial.dst_is_absolute) {
2901 result.dst += slide;
2902 if (trial.size_is_dst_relative) {
2903 result.size -= slide;
2904 }
2905 }
2906 return result;
2907 }
2908
2909 #if !KERNEL
2910 // shared_file_np / shared_file_mapping_slide_np tests
2911
2912 // copied from bsd/vm/vm_unix.c
2913 #define _SR_FILE_MAPPINGS_MAX_FILES 256
2914 #define SFM_MAX (_SR_FILE_MAPPINGS_MAX_FILES * 8)
2915
2916 // From Rosetta dyld
2917 #define kNumSharedCacheMappings 4
2918 #define kMaxSubcaches 16
2919
2920 typedef struct {
2921 uint32_t files_count;
2922 struct shared_file_np *files;
2923 char *name;
2924 } shared_file_np_trial_t;
2925
2926 typedef struct {
2927 unsigned count;
2928 unsigned capacity;
2929 shared_file_np_trial_t list[];
2930 } shared_file_np_trials_t;
2931
TRIALS_IMPL(shared_file_np)2932 TRIALS_IMPL(shared_file_np)
2933
2934 #define SHARED_FILE_NP_TRIAL(new_files_count, new_files, new_name) \
2935 (shared_file_np_trial_t){ .files_count = (uint32_t)(new_files_count), \
2936 .files = (struct shared_file_np *)(new_files), \
2937 .name = "files_count="#new_files_count new_name }
2938
2939 struct shared_file_np *
2940 alloc_shared_file_np(uint32_t files_count)
2941 {
2942 struct shared_file_np *files;
2943 #if KERNEL
2944 files = kalloc_type(struct shared_file_np, files_count, Z_WAITOK | Z_ZERO);
2945 #else
2946 files = calloc(files_count, sizeof(struct shared_file_np));
2947 #endif
2948 return files;
2949 }
2950
2951 void
free_shared_file_np(shared_file_np_trial_t * trial)2952 free_shared_file_np(shared_file_np_trial_t *trial)
2953 {
2954 #if KERNEL
2955 // some trials have files_count > 0 but null files.
2956 if (trial->files) {
2957 kfree_type(struct shared_file_np, trial->files_count, trial->files);
2958 }
2959 #else
2960 free(trial->files);
2961 #endif
2962 }
2963
2964 static int get_fd();
2965
2966 shared_file_np_trials_t *
get_shared_file_np_trials(uint64_t dyld_fd)2967 get_shared_file_np_trials(uint64_t dyld_fd)
2968 {
2969 struct shared_file_np * files = NULL;
2970 shared_file_np_trials_t *trials = allocate_shared_file_np_trials(11);
2971 append_trial(trials, SHARED_FILE_NP_TRIAL(0, NULL, " (NULL files)"));
2972 append_trial(trials, SHARED_FILE_NP_TRIAL(1, NULL, " (NULL files)"));
2973 append_trial(trials, SHARED_FILE_NP_TRIAL(_SR_FILE_MAPPINGS_MAX_FILES - 1, NULL, " (NULL files)"));
2974 append_trial(trials, SHARED_FILE_NP_TRIAL(_SR_FILE_MAPPINGS_MAX_FILES, NULL, " (NULL files)"));
2975 append_trial(trials, SHARED_FILE_NP_TRIAL(_SR_FILE_MAPPINGS_MAX_FILES + 1, NULL, " (NULL files)"));
2976 files = alloc_shared_file_np(1);
2977 append_trial(trials, SHARED_FILE_NP_TRIAL(1, files, ""));
2978 files = alloc_shared_file_np(_SR_FILE_MAPPINGS_MAX_FILES - 1);
2979 append_trial(trials, SHARED_FILE_NP_TRIAL(_SR_FILE_MAPPINGS_MAX_FILES - 1, files, ""));
2980 files = alloc_shared_file_np(_SR_FILE_MAPPINGS_MAX_FILES);
2981 append_trial(trials, SHARED_FILE_NP_TRIAL(_SR_FILE_MAPPINGS_MAX_FILES, files, ""));
2982 files = alloc_shared_file_np(_SR_FILE_MAPPINGS_MAX_FILES + 1);
2983 append_trial(trials, SHARED_FILE_NP_TRIAL(_SR_FILE_MAPPINGS_MAX_FILES + 1, files, ""));
2984 files = alloc_shared_file_np(1);
2985 files->sf_fd = get_fd();
2986 files->sf_slide = 4096;
2987 files->sf_mappings_count = 1;
2988 append_trial(trials, SHARED_FILE_NP_TRIAL(1, files, " non-zero shared_file_np"));
2989 files = alloc_shared_file_np(2);
2990 files[0].sf_fd = (int)dyld_fd;
2991 files[0].sf_mappings_count = 1;
2992 files[1].sf_fd = files[0].sf_fd;
2993 files[1].sf_mappings_count = 4;
2994 append_trial(trials, SHARED_FILE_NP_TRIAL(2, files, " checks shared_file_np"));
2995 return trials;
2996 }
2997
2998 static void
cleanup_shared_file_np_trials(shared_file_np_trials_t ** trials)2999 cleanup_shared_file_np_trials(shared_file_np_trials_t **trials)
3000 {
3001 for (size_t i = 0; i < (*trials)->count; i++) {
3002 free_shared_file_np(&(*trials)->list[i]);
3003 }
3004 free_trials(*trials);
3005 }
3006
3007 typedef struct {
3008 uint32_t mappings_count;
3009 struct shared_file_mapping_slide_np *mappings;
3010 char *name;
3011 } shared_file_mapping_slide_np_trial_t;
3012
3013 typedef struct {
3014 unsigned count;
3015 unsigned capacity;
3016 shared_file_mapping_slide_np_trial_t list[];
3017 } shared_file_mapping_slide_np_trials_t;
3018
TRIALS_IMPL(shared_file_mapping_slide_np)3019 TRIALS_IMPL(shared_file_mapping_slide_np)
3020
3021 #define SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(new_mappings_count, new_mappings, new_name) \
3022 (shared_file_mapping_slide_np_trial_t){ .mappings_count = (uint32_t)(new_mappings_count), \
3023 .mappings = (struct shared_file_mapping_slide_np *)(new_mappings), \
3024 .name = "mappings_count="#new_mappings_count new_name }
3025
3026 struct shared_file_mapping_slide_np *
3027 alloc_shared_file_mapping_slide_np(uint32_t mappings_count)
3028 {
3029 struct shared_file_mapping_slide_np *mappings;
3030 #if KERNEL
3031 mappings = kalloc_type(struct shared_file_mapping_slide_np, mappings_count, Z_WAITOK | Z_ZERO);
3032 #else
3033 mappings = calloc(mappings_count, sizeof(struct shared_file_mapping_slide_np));
3034 #endif
3035 return mappings;
3036 }
3037
3038 void
free_shared_file_mapping_slide_np(shared_file_mapping_slide_np_trial_t * trial)3039 free_shared_file_mapping_slide_np(shared_file_mapping_slide_np_trial_t *trial)
3040 {
3041 #if KERNEL
3042 // some trials have files_count > 0 but null files.
3043 if (trial->mappings) {
3044 kfree_type(struct shared_file_mapping_slide_np, trial->mappings_count, trial->mappings);
3045 }
3046 #else
3047 free(trial->mappings);
3048 #endif
3049 }
3050
3051 typedef enum { MP_NORMAL = 0, MP_ADDR_SIZE = 1, MP_OFFSET_SIZE, MP_PROTS } mapping_slide_np_test_style_t;
3052
3053 static inline struct shared_file_mapping_slide_np *
alloc_and_fill_shared_file_mappings(uint32_t num_mappings,mapping_slide_np_test_style_t style)3054 alloc_and_fill_shared_file_mappings(uint32_t num_mappings, mapping_slide_np_test_style_t style)
3055 {
3056 assert(num_mappings > 0);
3057 struct shared_file_mapping_slide_np *mappings = alloc_shared_file_mapping_slide_np(num_mappings);
3058
3059 // Checks happen in a for-loop so is desirable to differentiate the first mapping.
3060 switch (style) {
3061 case MP_NORMAL:
3062 mappings[0].sms_slide_size = KB4;
3063 mappings[0].sms_slide_start = KB4;
3064 mappings[0].sms_max_prot = VM_PROT_DEFAULT;
3065 mappings[0].sms_init_prot = VM_PROT_DEFAULT;
3066 break;
3067 case MP_ADDR_SIZE:
3068 mappings[0].sms_address = 1;
3069 mappings[0].sms_size = UINT64_MAX;
3070 mappings[0].sms_file_offset = 0;
3071 mappings[0].sms_slide_size = KB4;
3072 mappings[0].sms_slide_start = KB4;
3073 mappings[0].sms_max_prot = VM_PROT_DEFAULT;
3074 mappings[0].sms_init_prot = VM_PROT_DEFAULT;
3075 break;
3076 case MP_OFFSET_SIZE:
3077 mappings[0].sms_size = 0;
3078 mappings[0].sms_file_offset = UINT64_MAX;
3079 mappings[0].sms_slide_size = KB4;
3080 mappings[0].sms_slide_start = KB4;
3081 mappings[0].sms_max_prot = VM_PROT_DEFAULT;
3082 mappings[0].sms_init_prot = VM_PROT_DEFAULT;
3083 break;
3084 case MP_PROTS:
3085 mappings[0].sms_slide_size = KB4;
3086 mappings[0].sms_slide_start = KB4;
3087 mappings[0].sms_max_prot = VM_PROT_DEFAULT;
3088 mappings[0].sms_init_prot = INT_MAX;
3089 break;
3090 default:
3091 assert(0);
3092 break;
3093 }
3094
3095 for (size_t idx = 1; idx < num_mappings; idx++) {
3096 size_t i = idx % 4;
3097 switch (i) {
3098 case 0:
3099 mappings[idx].sms_slide_size = KB4;
3100 mappings[idx].sms_slide_start = KB4;
3101 mappings[idx].sms_max_prot = VM_PROT_DEFAULT;
3102 mappings[idx].sms_init_prot = VM_PROT_DEFAULT;
3103 break;
3104 case 1:
3105 mappings[idx].sms_slide_size = KB4;
3106 mappings[idx].sms_slide_start = UINT64_MAX;
3107 mappings[idx].sms_max_prot = VM_PROT_DEFAULT;
3108 mappings[idx].sms_init_prot = VM_PROT_DEFAULT;
3109 break;
3110 case 2:
3111 mappings[idx].sms_slide_size = 0;
3112 mappings[idx].sms_slide_start = UINT64_MAX;
3113 mappings[idx].sms_max_prot = VM_PROT_DEFAULT;
3114 mappings[idx].sms_init_prot = INT_MAX;
3115 break;
3116 case 3:
3117 mappings[idx].sms_slide_size = KB4;
3118 mappings[idx].sms_slide_start = 0;
3119 mappings[idx].sms_max_prot = INT_MAX;
3120 mappings[idx].sms_init_prot = VM_PROT_DEFAULT;
3121 break;
3122 default:
3123 assert(0);
3124 break;
3125 }
3126 }
3127 return mappings;
3128 }
3129
3130 shared_file_mapping_slide_np_trials_t*
get_shared_file_mapping_slide_np_trials(void)3131 get_shared_file_mapping_slide_np_trials(void)
3132 {
3133 struct shared_file_mapping_slide_np *mappings = NULL;
3134 shared_file_mapping_slide_np_trials_t *trials = allocate_shared_file_mapping_slide_np_trials(14);
3135 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(0, NULL, " (NULL mappings)"));
3136 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(1, NULL, " (NULL mappings)"));
3137 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(SFM_MAX - 1, NULL, " (NULL mappings)"));
3138 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(SFM_MAX, NULL, " (NULL mappings)"));
3139 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(SFM_MAX + 1, NULL, " (NULL mappings)"));
3140 mappings = alloc_and_fill_shared_file_mappings(1, MP_NORMAL);
3141 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(1, mappings, " (normal)"));
3142 mappings = alloc_and_fill_shared_file_mappings(1, MP_ADDR_SIZE);
3143 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(1, mappings, " (sms_address+sms_size check)"));
3144 mappings = alloc_and_fill_shared_file_mappings(1, MP_OFFSET_SIZE);
3145 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(1, mappings, " (sms_file_offset+sms_size check)"));
3146 mappings = alloc_and_fill_shared_file_mappings(1, MP_PROTS);
3147 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(1, mappings, " (sms_init_prot check)"));
3148 mappings = alloc_and_fill_shared_file_mappings(SFM_MAX - 1, MP_NORMAL);
3149 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(SFM_MAX - 1, mappings, ""));
3150 mappings = alloc_and_fill_shared_file_mappings(SFM_MAX, MP_NORMAL);
3151 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(SFM_MAX, mappings, ""));
3152 mappings = alloc_and_fill_shared_file_mappings(SFM_MAX + 1, MP_NORMAL);
3153 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(SFM_MAX + 1, mappings, ""));
3154 mappings = alloc_and_fill_shared_file_mappings(kNumSharedCacheMappings, MP_NORMAL);
3155 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(kNumSharedCacheMappings, mappings, ""));
3156 mappings = alloc_and_fill_shared_file_mappings(2 * kNumSharedCacheMappings, MP_NORMAL);
3157 append_trial(trials, SHARED_FILE_MAPPING_SLIDE_NP_TRIAL(2 * kNumSharedCacheMappings, mappings, ""));
3158
3159 return trials;
3160 }
3161
3162 static void
cleanup_shared_file_mapping_slide_np_trials(shared_file_mapping_slide_np_trials_t ** trials)3163 cleanup_shared_file_mapping_slide_np_trials(shared_file_mapping_slide_np_trials_t **trials)
3164 {
3165 for (size_t i = 0; i < (*trials)->count; i++) {
3166 free_shared_file_mapping_slide_np(&(*trials)->list[i]);
3167 }
3168 free_trials(*trials);
3169 }
3170
3171 typedef struct {
3172 uint32_t files_count;
3173 struct shared_file_np *files;
3174 uint32_t mappings_count;
3175 struct shared_file_mapping_slide_np *mappings;
3176 char *name;
3177 } shared_region_map_and_slide_2_trial_t;
3178
3179 typedef struct {
3180 unsigned count;
3181 unsigned capacity;
3182 shared_file_np_trials_t *shared_files_trials;
3183 shared_file_mapping_slide_np_trials_t *shared_mappings_trials;
3184 shared_region_map_and_slide_2_trial_t list[];
3185 } shared_region_map_and_slide_2_trials_t;
3186
TRIALS_IMPL(shared_region_map_and_slide_2)3187 TRIALS_IMPL(shared_region_map_and_slide_2)
3188
3189 #define SHARED_REGION_MAP_AND_SLIDE_2_TRIAL(new_files_count, new_files, new_mappings_count, new_mappings, new_name) \
3190 (shared_region_map_and_slide_2_trial_t){ .files_count = (uint32_t)(new_files_count), \
3191 .files = (struct shared_file_np *)(new_files), \
3192 .mappings_count = (uint32_t)(new_mappings_count), \
3193 .mappings = (struct shared_file_mapping_slide_np *)(new_mappings), \
3194 .name = new_name }
3195
3196 shared_region_map_and_slide_2_trials_t *
3197 generate_shared_region_map_and_slide_2_trials(uint64_t dyld_fd)
3198 {
3199 shared_file_np_trials_t *shared_files = get_shared_file_np_trials(dyld_fd);
3200 shared_file_mapping_slide_np_trials_t *shared_mappings = get_shared_file_mapping_slide_np_trials();
3201 unsigned num_trials = shared_files->count * shared_mappings->count;
3202 shared_region_map_and_slide_2_trials_t *trials = allocate_shared_region_map_and_slide_2_trials(num_trials);
3203 trials->shared_files_trials = shared_files;
3204 trials->shared_mappings_trials = shared_mappings;
3205 for (size_t i = 0; i < shared_files->count; i++) {
3206 for (size_t j = 0; j < shared_mappings->count; j++) {
3207 char *buf;
3208 shared_file_np_trial_t shared_file = shared_files->list[i];
3209 shared_file_mapping_slide_np_trial_t shared_mapping = shared_mappings->list[j];
3210 kasprintf(&buf, "%s, %s", shared_file.name, shared_mapping.name);
3211 append_trial(trials, SHARED_REGION_MAP_AND_SLIDE_2_TRIAL(shared_file.files_count, shared_file.files, shared_mapping.mappings_count, shared_mapping.mappings, buf));
3212 }
3213 }
3214 return trials;
3215 }
3216
3217 #define SMART_SHARED_REGION_MAP_AND_SLIDE_2_TRIALS(dyld_fd) \
3218 __attribute__((cleanup(cleanup_shared_region_map_and_slide_2_trials))) \
3219 = generate_shared_region_map_and_slide_2_trials(dyld_fd);
3220
3221 static void __attribute__((used))
cleanup_shared_region_map_and_slide_2_trials(shared_region_map_and_slide_2_trials_t ** trials)3222 cleanup_shared_region_map_and_slide_2_trials(shared_region_map_and_slide_2_trials_t **trials)
3223 {
3224 for (size_t i = 0; i < (*trials)->count; i++) {
3225 kfree_str((*trials)->list[i].name);
3226 }
3227 cleanup_shared_file_np_trials(&(*trials)->shared_files_trials);
3228 cleanup_shared_file_mapping_slide_np_trials(&(*trials)->shared_mappings_trials);
3229 free_trials(*trials);
3230 }
3231 #endif // !KERNEL
3232
3233 /////////////////////////////////////////////////////
3234 // utility code
3235
3236 // Return true if flags has VM_FLAGS_FIXED
3237 // This is non-trivial because VM_FLAGS_FIXED is zero;
3238 // the real value is the absence of VM_FLAGS_ANYWHERE.
3239 static inline bool
is_fixed(int flags)3240 is_fixed(int flags)
3241 {
3242 static_assert(VM_FLAGS_FIXED == 0, "this test requies VM_FLAGS_FIXED be zero");
3243 static_assert(VM_FLAGS_ANYWHERE != 0, "this test requires VM_FLAGS_ANYWHERE be nonzero");
3244 return !(flags & VM_FLAGS_ANYWHERE);
3245 }
3246
3247 // Return true if flags has VM_FLAGS_FIXED and VM_FLAGS_OVERWRITE set.
3248 static inline bool
is_fixed_overwrite(int flags)3249 is_fixed_overwrite(int flags)
3250 {
3251 return is_fixed(flags) && (flags & VM_FLAGS_OVERWRITE);
3252 }
3253
3254
3255 // Return true if flags has VM_FLAGS_ANYWHERE and VM_FLAGS_RANDOM_ADDR set.
3256 static inline bool
is_random_anywhere(int flags)3257 is_random_anywhere(int flags)
3258 {
3259 static_assert(VM_FLAGS_ANYWHERE != 0, "this test requires VM_FLAGS_ANYWHERE be nonzero");
3260 return (flags & VM_FLAGS_RANDOM_ADDR) && (flags & VM_FLAGS_ANYWHERE);
3261 }
3262
3263 // Deallocate [start, start+size).
3264 // Don't deallocate if the allocator failed (allocator_kr)
3265 // Don't deallocate if flags include FIXED | OVERWRITE (in which case
3266 // the memory is a pre-existing allocation and should be left alone)
3267 static void
deallocate_if_not_fixed_overwrite(kern_return_t allocator_kr,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,int flags)3268 deallocate_if_not_fixed_overwrite(kern_return_t allocator_kr, MAP_T map,
3269 mach_vm_address_t start, mach_vm_size_t size, int flags)
3270 {
3271 if (is_fixed_overwrite(flags)) {
3272 // fixed-overwrite with pre-existing allocation, don't deallocate
3273 } else if (allocator_kr != 0) {
3274 // allocator failed, don't deallocate
3275 } else {
3276 (void)mach_vm_deallocate(map, start, size);
3277 }
3278 }
3279
3280 // PPL is inefficient at deallocations of very large address ranges.
3281 // Skip those trials to avoid test timeouts.
3282 // We assume that tests on other devices will cover any testing gaps.
3283 static inline bool
dealloc_would_time_out(mach_vm_address_t addr __unused,mach_vm_size_t size __unused,vm_map_t map __unused)3284 dealloc_would_time_out(
3285 mach_vm_address_t addr __unused,
3286 mach_vm_size_t size __unused,
3287 vm_map_t map __unused)
3288 {
3289 #if CONFIG_SPTM
3290 /* not PPL - okay */
3291 return false;
3292 #elif !(__ARM_42BIT_PA_SPACE__ || ARM_LARGE_MEMORY)
3293 /* PPL but small pmap address space - okay */
3294 return false;
3295 #else
3296 /*
3297 * PPL with large pmap address space - bad
3298 * Pre-empt trials of very large allocations.
3299 */
3300 return size > 0x8000000000;
3301 #endif
3302 }
3303
3304 #if !KERNEL
3305
3306 // SMART_MAP is mach_task_self() in userspace and a new empty map in kernel
3307 #define SMART_MAP = mach_task_self()
3308
3309 // CURRENT_MAP is mach_task_self() in userspace and current_map() in kernel
3310 #define CURRENT_MAP = mach_task_self()
3311
3312 #else
3313
3314 static inline vm_map_t
create_map(mach_vm_address_t map_start,mach_vm_address_t map_end)3315 create_map(mach_vm_address_t map_start, mach_vm_address_t map_end)
3316 {
3317 ledger_t ledger = ledger_instantiate(task_ledger_template, LEDGER_CREATE_ACTIVE_ENTRIES);
3318 pmap_t pmap = pmap_create_options(ledger, 0, PMAP_CREATE_64BIT);
3319 assert(pmap);
3320 ledger_dereference(ledger); // now retained by pmap
3321 vm_map_t map = vm_map_create_options(pmap, map_start, map_end, VM_MAP_CREATE_PAGEABLE);
3322 assert(map);
3323
3324 /*
3325 * Normally, we would vm_map_setup a task's map, but since we're breaking the assumed
3326 * 1:1 correspondence between map and task here, we must manually set up the map's
3327 * back pointer, without repeating any one-time task setup (e.g. registering reclaim
3328 * buffers)
3329 */
3330 map->owning_task = current_task();
3331
3332 return map;
3333 }
3334
3335 static inline void
cleanup_map(vm_map_t * map)3336 cleanup_map(vm_map_t *map)
3337 {
3338 assert(*map);
3339 kern_return_t kr = vm_map_terminate(*map);
3340 assert(kr == 0);
3341 vm_map_deallocate(*map); // also destroys pmap
3342 }
3343
3344 // kernel: create a new vm_map and deallocate it at end of scope
3345 // fixme choose a user-like and a kernel-like address range
3346 #define SMART_MAP \
3347 __attribute__((cleanup(cleanup_map))) = create_map(0, 0xffffffffffffffff)
3348
3349 // This map has a map_offset that matches what a user would get. This allows
3350 // vm_map_user_ranges to work properly when tested from the kernel
3351 #define SMART_RANGE_MAP \
3352 __attribute__((cleanup(cleanup_map))) = create_map(0, vm_compute_max_offset(true))
3353
3354 #define CURRENT_MAP = current_map()
3355
3356 #endif
3357
3358 // Allocate with an address hint.
3359 static kern_return_t
allocate_after(MAP_T map,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_size_t align_mask,int additional_map_flags)3360 allocate_after(
3361 MAP_T map,
3362 mach_vm_address_t *address,
3363 mach_vm_size_t size,
3364 mach_vm_size_t align_mask,
3365 int additional_map_flags)
3366 {
3367 return mach_vm_map(map, address, size, align_mask,
3368 VM_FLAGS_ANYWHERE | additional_map_flags, 0, 0, 0,
3369 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
3370 }
3371
3372 static inline mach_vm_address_t
default_allocation_address_hint(void)3373 default_allocation_address_hint(void)
3374 {
3375 /*
3376 * Try to allocate after address 2 GB. It is important in
3377 * in-kernel tests of empty maps to avoid addresses near 0 and ~0.
3378 */
3379 return 2ull * 1024 * 1024 * 1024;
3380 }
3381
3382 // allocate a purgeable VM region with size and permissions
3383 // and deallocate it at end of scope
3384 #define SMART_ALLOCATE_PURGEABLE_VM(map, size, perm) \
3385 __attribute__((cleanup(cleanup_allocation))) = create_allocation(map, size, 0, perm, false, VM_FLAGS_PURGABLE)
3386
3387 // allocate a VM region with size and permissions
3388 // and deallocate it at end of scope
3389 #define SMART_ALLOCATE_VM(map, size, perm) \
3390 __attribute__((cleanup(cleanup_allocation))) = create_allocation(map, size, 0, perm, false, 0)
3391
3392 // allocate a VM region with size and permissions
3393 // and an address hint to allocate after
3394 // and deallocate it at end of scope
3395 #define SMART_ALLOCATE_VM_AFTER(map, address_hint, size, perm) \
3396 __attribute__((cleanup(cleanup_allocation))) = create_allocation_after(map, address_hint, size, 0, perm, false, 0)
3397
3398 // allocate a VM region with size and permissions and alignment
3399 // and deallocate it at end of scope
3400 #define SMART_ALLOCATE_ALIGNED_VM(map, size, align_mask, perm) \
3401 __attribute__((cleanup(cleanup_allocation))) = create_allocation(map, size, align_mask, perm, false, 0)
3402
3403 // allocate a VM region with size and permissions
3404 // and deallocate it at end of scope
3405 // If no such region could be allocated, return {.addr = 0}
3406 #define SMART_TRY_ALLOCATE_VM(map, size, perm) \
3407 __attribute__((cleanup(cleanup_allocation))) = create_allocation(map, size, 0, perm, true, 0)
3408
3409 // a VM allocation with unallocated pages around it
3410 typedef struct {
3411 MAP_T map;
3412 addr_t guard_size;
3413 addr_t guard_prefix; // guard_size bytes
3414 addr_t unallocated_prefix; // guard_size bytes
3415 addr_t addr;
3416 addr_t size;
3417 addr_t unallocated_suffix; // guard_size bytes
3418 addr_t guard_suffix; // guard_size bytes
3419 } allocation_t;
3420
3421 static allocation_t
create_allocation_after(MAP_T new_map,mach_vm_address_t address_hint,mach_vm_address_t new_size,mach_vm_size_t align_mask,vm_prot_t perm,bool allow_failure,int additional_map_flags)3422 create_allocation_after(MAP_T new_map, mach_vm_address_t address_hint, mach_vm_address_t new_size, mach_vm_size_t align_mask,
3423 vm_prot_t perm, bool allow_failure, int additional_map_flags)
3424 {
3425 // allocations in address order:
3426 // 16K guard_prefix (allocated, prot none)
3427 // 16K unallocated_prefix (unallocated)
3428 // N addr..addr+size
3429 // 16K unallocated_suffix (unallocated)
3430 // 16K guard_suffix (allocated, prot none)
3431
3432 // allocate new_size + 4 * 16K bytes
3433 // then carve it up into our regions
3434
3435 allocation_t result;
3436
3437 result.map = new_map;
3438
3439 // this implementation only works with some alignment values
3440 assert(align_mask == 0 || align_mask == KB4 - 1 || align_mask == KB16 - 1);
3441
3442 result.guard_size = KB16;
3443 result.size = round_up_page(new_size, KB16);
3444 if (result.size == 0 && allow_failure) {
3445 return (allocation_t){new_map, 0, 0, 0, 0, 0, 0, 0};
3446 }
3447 assert(result.size != 0);
3448
3449 mach_vm_address_t allocated_base = address_hint;
3450 mach_vm_size_t allocated_size = result.size;
3451 if (__builtin_add_overflow(result.size, result.guard_size * 4, &allocated_size)) {
3452 if (allow_failure) {
3453 return (allocation_t){new_map, 0, 0, 0, 0, 0, 0, 0};
3454 } else {
3455 assert(false);
3456 }
3457 }
3458
3459 kern_return_t kr;
3460 kr = allocate_after(result.map, &allocated_base, allocated_size,
3461 align_mask, additional_map_flags);
3462 if (kr != 0 && allow_failure) {
3463 return (allocation_t){new_map, 0, 0, 0, 0, 0, 0, 0};
3464 }
3465 assert(kr == 0);
3466
3467 result.guard_prefix = (addr_t)allocated_base;
3468 result.unallocated_prefix = result.guard_prefix + result.guard_size;
3469 result.addr = result.unallocated_prefix + result.guard_size;
3470 result.unallocated_suffix = result.addr + result.size;
3471 result.guard_suffix = result.unallocated_suffix + result.guard_size;
3472
3473 kr = mach_vm_protect(result.map, result.addr, result.size, false, perm);
3474 assert(kr == 0);
3475 kr = mach_vm_protect(result.map, result.guard_prefix, result.guard_size, true, VM_PROT_NONE);
3476 assert(kr == 0);
3477 kr = mach_vm_protect(result.map, result.guard_suffix, result.guard_size, true, VM_PROT_NONE);
3478 assert(kr == 0);
3479 kr = mach_vm_deallocate(result.map, result.unallocated_prefix, result.guard_size);
3480 assert(kr == 0);
3481 kr = mach_vm_deallocate(result.map, result.unallocated_suffix, result.guard_size);
3482 assert(kr == 0);
3483
3484 return result;
3485 }
3486
3487 static allocation_t
create_allocation(MAP_T new_map,mach_vm_address_t new_size,mach_vm_size_t align_mask,vm_prot_t perm,bool allow_failure,int additional_map_flags)3488 create_allocation(MAP_T new_map, mach_vm_address_t new_size, mach_vm_size_t align_mask,
3489 vm_prot_t perm, bool allow_failure, int additional_map_flags)
3490 {
3491 mach_vm_address_t address_hint = default_allocation_address_hint();
3492 return create_allocation_after(new_map, address_hint, new_size, align_mask, perm, allow_failure, additional_map_flags);
3493 }
3494
3495 // Mark this allocation as deallocated by something else.
3496 // This means cleanup_allocation() won't deallocate it twice.
3497 // cleanup_allocation() will still free the guard pages.
3498 static void
set_already_deallocated(allocation_t * allocation)3499 set_already_deallocated(allocation_t *allocation)
3500 {
3501 allocation->addr = 0;
3502 allocation->size = 0;
3503 }
3504
3505 static void
cleanup_allocation(allocation_t * allocation)3506 cleanup_allocation(allocation_t *allocation)
3507 {
3508 // fixme verify allocations and unallocated spaces still exist where we expect
3509 if (allocation->size) {
3510 (void)mach_vm_deallocate(allocation->map, allocation->addr, allocation->size);
3511 }
3512 if (allocation->guard_size) {
3513 (void)mach_vm_deallocate(allocation->map, allocation->guard_prefix, allocation->guard_size);
3514 (void)mach_vm_deallocate(allocation->map, allocation->guard_suffix, allocation->guard_size);
3515 }
3516 }
3517
3518
3519 // unallocate a VM region with size
3520 // and deallocate it at end of scope
3521 #define SMART_UNALLOCATE_VM(map, size) \
3522 __attribute__((cleanup(cleanup_unallocation))) = create_unallocation(map, size)
3523
3524 // unallocate a VM region with size
3525 // and an address hint to allocate above
3526 // and deallocate it at end of scope
3527 #define SMART_UNALLOCATE_VM_AFTER(map, address_hint, size) \
3528 __attribute__((cleanup(cleanup_unallocation))) = create_unallocation_after(map, address_hint, size, false)
3529
3530 // unallocate a VM region with size
3531 // and deallocate it at end of scope
3532 // If no such region could be allocated, return {.addr = 0}
3533 #define SMART_TRY_UNALLOCATE_VM(map, size) \
3534 __attribute__((cleanup(cleanup_unallocation))) = create_unallocation(map, size, true)
3535
3536 // a VM space with allocated pages around it
3537 typedef struct {
3538 MAP_T map;
3539 addr_t guard_size;
3540 addr_t guard_prefix; // 16K
3541 addr_t addr;
3542 addr_t size;
3543 addr_t guard_suffix; // 16K
3544 } unallocation_t;
3545
3546 static unallocation_t __attribute__((overloadable))
create_unallocation_after(MAP_T new_map,mach_vm_address_t address_hint,mach_vm_address_t new_size,bool allow_failure)3547 create_unallocation_after(MAP_T new_map, mach_vm_address_t address_hint, mach_vm_address_t new_size, bool allow_failure)
3548 {
3549 // allocations in address order:
3550 // 16K guard_prefix (allocated, prot none)
3551 // N addr..addr+size (unallocated)
3552 // 16K guard_suffix (allocated, prot none)
3553
3554 // allocate new_size + 2 * 16K bytes
3555 // then carve it up into our regions
3556
3557 unallocation_t result;
3558
3559 result.map = new_map;
3560
3561 result.guard_size = KB16;
3562 result.size = round_up_page(new_size, KB16);
3563 if (result.size == 0 && allow_failure) {
3564 return (unallocation_t){new_map, 0, 0, 0, 0, 0};
3565 }
3566 assert(result.size != 0);
3567
3568 mach_vm_address_t allocated_base = address_hint;
3569 mach_vm_size_t allocated_size = result.size;
3570 if (__builtin_add_overflow(result.size, result.guard_size * 2, &allocated_size)) {
3571 if (allow_failure) {
3572 return (unallocation_t){new_map, 0, 0, 0, 0, 0};
3573 } else {
3574 assert(false);
3575 }
3576 }
3577 kern_return_t kr;
3578 kr = allocate_after(result.map, &allocated_base, allocated_size, 0, 0);
3579 if (kr != 0 && allow_failure) {
3580 return (unallocation_t){new_map, 0, 0, 0, 0, 0};
3581 }
3582 assert(kr == 0);
3583
3584 result.guard_prefix = (addr_t)allocated_base;
3585 result.addr = result.guard_prefix + result.guard_size;
3586 result.guard_suffix = result.addr + result.size;
3587
3588 kr = mach_vm_deallocate(result.map, result.addr, result.size);
3589 assert(kr == 0);
3590 kr = mach_vm_protect(result.map, result.guard_prefix, result.guard_size, true, VM_PROT_NONE);
3591 assert(kr == 0);
3592 kr = mach_vm_protect(result.map, result.guard_suffix, result.guard_size, true, VM_PROT_NONE);
3593 assert(kr == 0);
3594
3595 return result;
3596 }
3597
3598 static unallocation_t __attribute__((overloadable))
create_unallocation(MAP_T new_map,mach_vm_address_t new_size,bool allow_failure)3599 create_unallocation(MAP_T new_map, mach_vm_address_t new_size, bool allow_failure)
3600 {
3601 mach_vm_address_t address_hint = default_allocation_address_hint();
3602 return create_unallocation_after(new_map, address_hint, new_size, allow_failure);
3603 }
3604
3605 static unallocation_t __attribute__((overloadable))
create_unallocation(MAP_T new_map,mach_vm_address_t new_size)3606 create_unallocation(MAP_T new_map, mach_vm_address_t new_size)
3607 {
3608 return create_unallocation(new_map, new_size, false /*allow_failure*/);
3609 }
3610
3611 static void
cleanup_unallocation(unallocation_t * unallocation)3612 cleanup_unallocation(unallocation_t *unallocation)
3613 {
3614 // fixme verify allocations and unallocated spaces still exist where we expect
3615 if (unallocation->guard_size) {
3616 (void)mach_vm_deallocate(unallocation->map, unallocation->guard_prefix, unallocation->guard_size);
3617 (void)mach_vm_deallocate(unallocation->map, unallocation->guard_suffix, unallocation->guard_size);
3618 }
3619 }
3620
3621 // TODO: re-enable deferred reclaim tests (rdar://136157720)
3622 #if 0
3623 // vm_deferred_reclamation_buffer_init_internal tests
3624 typedef struct {
3625 task_t task;
3626 mach_vm_address_t address;
3627 mach_vm_reclaim_count_t initial_capacity;
3628 mach_vm_reclaim_count_t max_capacity;
3629 char *name;
3630 } reclamation_buffer_init_trial_t;
3631
3632 typedef struct {
3633 unsigned count;
3634 unsigned capacity;
3635 reclamation_buffer_init_trial_t list[];
3636 } reclamation_buffer_init_trials_t;
3637
3638 TRIALS_IMPL(reclamation_buffer_init)
3639
3640 #define RECLAMATION_BUFFER_INIT_TRIAL(new_task, new_address, new_initial_capacity, new_max_capacity, new_name) \
3641 (reclamation_buffer_init_trial_t){ .task = (task_t)(new_task), \
3642 .address = (mach_vm_address_t)(new_address), \
3643 .initial_capacity= (mach_vm_reclaim_count_t)(new_initial_capacity), \
3644 .max_capacity= (mach_vm_reclaim_count_t)(new_max_capacity), \
3645 .name = new_name }
3646
3647 #define RECLAMATION_BUFFER_INIT_EXTRA_TRIALS 7
3648
3649 reclamation_buffer_init_trials_t *
3650 generate_reclamation_buffer_init_trials(void)
3651 {
3652 MAP_T map SMART_MAP;
3653 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3654 addr_trials_t *addr_trials SMART_ADDR_TRIALS(0);
3655 reclamation_buffer_init_trials_t *trials = allocate_reclamation_buffer_init_trials(addr_trials->count + RECLAMATION_BUFFER_INIT_EXTRA_TRIALS);
3656 for (size_t i = 0; i < addr_trials->count; i++) {
3657 char *buf;
3658 mach_vm_size_t size = i * 512;
3659 kasprintf(&buf, "%s, size: 0x%llu", addr_trials->list[i].name, size);
3660 append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), addr_trials->list[i].addr, size, size, buf));
3661 }
3662
3663 append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), base.addr, 0, 0, "size: 0"));
3664 append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), base.addr, VM_RECLAIM_MAX_CAPACITY - 1, VM_RECLAIM_MAX_CAPACITY - 1, "size: MAX - 1"));
3665 append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), base.addr, VM_RECLAIM_MAX_CAPACITY, VM_RECLAIM_MAX_CAPACITY, "size: MAX"));
3666 append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), base.addr, UINT32_MAX, UINT32_MAX, "size: UINT32_MAX"));
3667 append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), base.addr, 2, 1, "size: max < initial"));
3668 append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(NULL, NULL, 0, 0, "null task, null address, size: 0"));
3669 append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), NULL, 0, 0, "null address, size: 0"));
3670 append_trial(trials, RECLAMATION_BUFFER_INIT_TRIAL(current_task(), base.addr, 1024, 1024, "valid arguments to test KERN_NOT_SUPPORTED"));
3671
3672 return trials;
3673 }
3674
3675 #define SMART_RECLAMATION_BUFFER_INIT_TRIALS() \
3676 __attribute__((cleanup(cleanup_reclamation_buffer_init_trials))) \
3677 = generate_reclamation_buffer_init_trials();
3678
3679 static void __attribute__((used))
3680 cleanup_reclamation_buffer_init_trials(reclamation_buffer_init_trials_t **trials)
3681 {
3682 for (size_t i = 0; i < (*trials)->count - RECLAMATION_BUFFER_INIT_EXTRA_TRIALS; i++) {
3683 kfree_str((*trials)->list[i].name);
3684 }
3685 free_trials(*trials);
3686 }
3687
3688 static kern_return_t
3689 call_mach_vm_deferred_reclamation_buffer_init(task_t task, mach_vm_address_t address, mach_vm_reclaim_count_t initial_capacity, mach_vm_reclaim_count_t max_capacity)
3690 {
3691 kern_return_t kr = 0;
3692 mach_vm_address_t saved_address = address;
3693 if (task && max_capacity > 0 && address == 0) {
3694 // prevent assert3u(*address, !=, 0)
3695 return PANIC;
3696 }
3697
3698 kr = mach_vm_deferred_reclamation_buffer_allocate(task, &address, initial_capacity, max_capacity);
3699
3700 //Out-param validation, failure shouldn't change inout address.
3701 if (kr != KERN_SUCCESS && saved_address != address) {
3702 kr = OUT_PARAM_BAD;
3703 }
3704 if (kr == KERN_SUCCESS && saved_address == address) {
3705 kr = OUT_PARAM_BAD;
3706 }
3707
3708 return kr;
3709 }
3710 #endif // 0
3711
3712
3713 // mach_vm_remap_external/vm_remap_external/vm32_remap/mach_vm_remap_new_external infra
3714 // mach_vm_remap/mach_vm_remap_new_kernel infra
3715
3716 /*
3717 * This comment describes the testing approach that was fleshed out through
3718 * writing the tests for the map family of functions, and more fully realized
3719 * for the remap family of functions.
3720 *
3721 * This method attempts to radically minimize code reuse, at the expense of
3722 * decreased navigability (cmd+click is unlikely to work for you for this code)
3723 * and increased upfront costs for understanding this code. Maintainability
3724 * should be better in most cases: if a fix needs to happen, it can be
3725 * implemented in the right place once and doesn’t need to be copy-and-pasted
3726 * in multiple duplicated functions. There may however be cases where the
3727 * change you want to make doesn’t fit the spirit of this approach (for
3728 * instance changing the behavior of the test for only one function in the
3729 * family).
3730 *
3731 * The framework is built around the idea that there are three types of
3732 * parameters:
3733 * 1. Parameters that will be fixed for all calls to the function (e.g. some
3734 * uncommon type specific to the function that doesn’t impact the input
3735 * validation flow)
3736 * 2. Parameters that cause input validation to change significantly (typically
3737 * flags, e.g. fixed vs anywhere). For those we basically want to treat
3738 * different values of the flags as calling into different functions (for
3739 * the purpose of input validation).
3740 * 3. Parameters that can be tested. For every test this is further broken down
3741 * into 2 subtypes:
3742 * A. Parameters being iterated over during the test (e.g. start+size)
3743 * B. Parameters that should stay fixed during this test (e.g. pick a
3744 * sane value of prot and pass that same value for all values of
3745 * start/size)
3746 *
3747 * Often, many functions have very similar signatures (they are in the same
3748 * function family). We want to avoid copy/pasting tests for each function in
3749 * the family.
3750 *
3751 * Here is the flow used for the remap family of functions:
3752 * 1. Typedef a function type with shared parameters (see remap_fn_t)
3753 * 2. Define function wrappers that fit the above typedef for each function
3754 * in the family (see e.g. mach_vm_remap_new_kernel_wrapped). These might
3755 * set values for “type 1” params.
3756 * 3. Define “helper” functions that take in parameters of types 2 and 3.A.,
3757 * and call the wrapper, filling in type 3.B. params. See, e.g.,
3758 * help_call_remap_fn__src_size. For remap, all helpers can easily be
3759 * implemented as a single call to a core helper function
3760 * help_call_remap_fn__src_size_etc.
3761 * 4. Define generic “caller” functions that take in a wrapper and parameters
3762 * of type 3.A. and call the helper. Macros are used to mass implement these
3763 * for all values of type 2 parameters and for all functions in the family.
3764 * See, e.g., `IMPL_FROM_HELPER(dst_size);`.
3765 * 5. Specialize the above "caller" functions for each wrapper in the family,
3766 * again using macros. See `#define IMPL(remap_fn)` and its uses below.
3767 * This results in a number of specialized caller functions that is the
3768 * product of the number of functions in the family by the number of
3769 * variants induced by type 2 parameters.
3770 * 6. Use macros to call test harnesses on caller functions en masse at test
3771 * time for all functions. See the call sites in `vm_parameter_validation.c`
3772 * e.g. `RUN_ALL(mach_vm_remap_new_user, , mach_vm_remap_new);`.
3773 */
3774
3775 typedef kern_return_t (*remap_fn_t)(vm_map_t target_task,
3776 mach_vm_address_t *target_address,
3777 mach_vm_size_t size,
3778 mach_vm_offset_t mask,
3779 int flags,
3780 vm_map_t src_task,
3781 mach_vm_address_t src_address,
3782 boolean_t copy,
3783 vm_prot_t *cur_protection,
3784 vm_prot_t *max_protection,
3785 vm_inherit_t inheritance);
3786
3787 // helpers that call a provided function with certain sets of params
3788
3789 static kern_return_t
help_call_remap_fn__src_size_etc(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t src,mach_vm_size_t size,vm_prot_t cur,vm_prot_t max,vm_inherit_t inherit)3790 help_call_remap_fn__src_size_etc(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t src, mach_vm_size_t size, vm_prot_t cur, vm_prot_t max, vm_inherit_t inherit)
3791 {
3792 kern_return_t kr;
3793 #if KERNEL
3794 if (is_random_anywhere(flags)) {
3795 // RANDOM_ADDR is likely to fall outside pmap's range
3796 return PANIC;
3797 }
3798 #endif
3799 if (is_fixed_overwrite(flags)) {
3800 // Try to allocate a dest for vm_remap to fixed-overwrite at.
3801 allocation_t dst_alloc SMART_TRY_ALLOCATE_VM(map, size, VM_PROT_DEFAULT);
3802 mach_vm_address_t out_addr = dst_alloc.addr;
3803 if (out_addr == 0) {
3804 // Failed to allocate. Clear VM_FLAGS_OVERWRITE
3805 // to prevent wild mappings.
3806 flags &= ~VM_FLAGS_OVERWRITE;
3807 }
3808 kr = fn(map, &out_addr, size, 0, flags,
3809 map, src, copy, &cur, &max, inherit);
3810 } else {
3811 // vm_remap will allocate anywhere. Deallocate if it succeeds.
3812 mach_vm_address_t out_addr = 0;
3813 kr = fn(map, &out_addr, size, 0, flags,
3814 map, src, copy, &cur, &max, inherit);
3815 if (kr == 0) {
3816 (void)mach_vm_deallocate(map, out_addr, size);
3817 }
3818 }
3819 return kr;
3820 }
3821
3822 static kern_return_t
help_call_remap_fn__src_size(remap_fn_t fn,MAP_T map,int unused_flags __unused,bool copy,mach_vm_address_t src,mach_vm_size_t size)3823 help_call_remap_fn__src_size(remap_fn_t fn, MAP_T map, int unused_flags __unused, bool copy, mach_vm_address_t src, mach_vm_size_t size)
3824 {
3825 assert(unused_flags == 0);
3826 return help_call_remap_fn__src_size_etc(fn, map, VM_FLAGS_ANYWHERE, copy, src, size, 0, 0, VM_INHERIT_NONE);
3827 }
3828
3829 static kern_return_t
help_call_remap_fn__dst_size(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t dst,mach_vm_size_t size)3830 help_call_remap_fn__dst_size(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t dst, mach_vm_size_t size)
3831 {
3832 allocation_t src SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
3833 mach_vm_address_t out_addr = dst;
3834 vm_prot_t cur = 0;
3835 vm_prot_t max = 0;
3836 kern_return_t kr = fn(map, &out_addr, size, 0, flags,
3837 map, src.addr, copy, &cur, &max, VM_INHERIT_NONE);
3838 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
3839 return kr;
3840 }
3841
3842 static kern_return_t
help_call_remap_fn__inherit(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t src,mach_vm_size_t size,vm_inherit_t inherit)3843 help_call_remap_fn__inherit(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t src, mach_vm_size_t size, vm_inherit_t inherit)
3844 {
3845 return help_call_remap_fn__src_size_etc(fn, map, flags, copy, src, size, 0, 0, inherit);
3846 }
3847
3848 static kern_return_t
help_call_remap_fn__flags(remap_fn_t fn,MAP_T map,int unused_flags __unused,bool copy,mach_vm_address_t src,mach_vm_size_t size,int trial_flags)3849 help_call_remap_fn__flags(remap_fn_t fn, MAP_T map, int unused_flags __unused, bool copy, mach_vm_address_t src, mach_vm_size_t size, int trial_flags)
3850 {
3851 assert(unused_flags == 0);
3852 return help_call_remap_fn__src_size_etc(fn, map, trial_flags, copy, src, size, 0, 0, VM_INHERIT_NONE);
3853 }
3854
3855 static kern_return_t
help_call_remap_fn__prot_pairs(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t src,mach_vm_size_t size,vm_prot_t cur,vm_prot_t max)3856 help_call_remap_fn__prot_pairs(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t src, mach_vm_size_t size, vm_prot_t cur, vm_prot_t max)
3857 {
3858 return help_call_remap_fn__src_size_etc(fn, map, flags, copy, src, size, cur, max, VM_INHERIT_NONE);
3859 }
3860
3861 static kern_return_t
help_call_remap_fn__src_dst_size(remap_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t src,mach_vm_size_t size,mach_vm_address_t dst)3862 help_call_remap_fn__src_dst_size(remap_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t src, mach_vm_size_t size, mach_vm_address_t dst)
3863 {
3864 mach_vm_address_t out_addr = dst;
3865 vm_prot_t cur = 0;
3866 vm_prot_t max = 0;
3867 kern_return_t kr = fn(map, &out_addr, size, 0, flags,
3868 map, src, copy, &cur, &max, VM_INHERIT_NONE);
3869 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
3870 return kr;
3871 }
3872
3873 #define GET_INSTANCE(_0, _1, _2, _3, _4, _5, _6, _7, _8, NAME, ...) NAME
3874
3875 #define DROP_TYPES_8(a, b, ...) , b DROP_TYPES_6(__VA_ARGS__)
3876 #define DROP_TYPES_6(a, b, ...) , b DROP_TYPES_4(__VA_ARGS__)
3877 #define DROP_TYPES_4(a, b, ...) , b DROP_TYPES_2(__VA_ARGS__)
3878 #define DROP_TYPES_2(a, b, ...) , b
3879 #define DROP_TYPES_0()
3880
3881 // Parses lists of "type1, arg1, type2, arg" into "arg1, arg2"
3882 #define DROP_TYPES(...) GET_INSTANCE(_0 __VA_OPT__(,) __VA_ARGS__, DROP_TYPES_8, DROP_TYPES_8, DROP_TYPES_6, DROP_TYPES_6, DROP_TYPES_4, DROP_TYPES_4, DROP_TYPES_2, DROP_TYPES_2, DROP_TYPES_0, DROP_TYPES_0)(__VA_ARGS__)
3883
3884 #define DROP_COMMAS_8(a, b, ...) , a b DROP_COMMAS_6(__VA_ARGS__)
3885 #define DROP_COMMAS_6(a, b, ...) , a b DROP_COMMAS_4(__VA_ARGS__)
3886 #define DROP_COMMAS_4(a, b, ...) , a b DROP_COMMAS_2(__VA_ARGS__)
3887 #define DROP_COMMAS_2(a, b) , a b
3888 #define DROP_COMMAS_0()
3889
3890 // Parses lists of "type1, arg1, type2, arg" into "type1 arg1, type2 arg2"
3891 #define DROP_COMMAS(...) GET_INSTANCE(_0 __VA_OPT__(,) __VA_ARGS__, DROP_COMMAS_8, DROP_COMMAS_8, DROP_COMMAS_6, DROP_COMMAS_6, DROP_COMMAS_4, DROP_COMMAS_4, DROP_COMMAS_2, DROP_COMMAS_2, DROP_COMMAS_0)(__VA_ARGS__)
3892
3893 // specialize helpers into implementations of call functions that are still agnostic to the remap function
3894
3895 #define IMPL_ONE_FROM_HELPER(type, variant, flags, copy, ...) \
3896 static kern_return_t \
3897 call_remap_fn ## __ ## variant ## __ ## type(remap_fn_t fn, MAP_T map, mach_vm_address_t src, mach_vm_size_t size DROP_COMMAS(__VA_ARGS__)) { \
3898 return help_call_remap_fn__ ## type(fn, map, flags, copy, src, size DROP_TYPES(__VA_ARGS__)); \
3899 }
3900
3901 #define IMPL_FROM_HELPER(type, ...) \
3902 IMPL_ONE_FROM_HELPER(type, fixed, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, ##__VA_ARGS__) \
3903 IMPL_ONE_FROM_HELPER(type, fixed_copy, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, ##__VA_ARGS__) \
3904 IMPL_ONE_FROM_HELPER(type, anywhere, VM_FLAGS_ANYWHERE, false, ##__VA_ARGS__) \
3905
3906 IMPL_FROM_HELPER(dst_size);
3907 IMPL_FROM_HELPER(inherit, vm_inherit_t, inherit);
3908 IMPL_FROM_HELPER(prot_pairs, vm_prot_t, cur, vm_prot_t, max);
3909 IMPL_FROM_HELPER(src_dst_size, mach_vm_address_t, dst);
3910
3911 IMPL_ONE_FROM_HELPER(flags, nocopy, 0 /*ignored*/, false, int, flag)
3912 IMPL_ONE_FROM_HELPER(flags, copy, 0 /*ignored*/, true, int, flag)
3913
3914 IMPL_ONE_FROM_HELPER(src_size, nocopy, 0 /*ignored*/, false)
3915 IMPL_ONE_FROM_HELPER(src_size, copy, 0 /*ignored*/, true)
3916
3917 #undef IMPL_FROM_HELPER
3918 #undef IMPL_ONE_FROM_HELPER
3919
3920 // define call functions that are specific to the remap function, and rely on implementations above under the hood
3921
3922 #define IMPL_REMAP_FN_HELPER(remap_fn, instance, type, ...) \
3923 static kern_return_t \
3924 call_ ## remap_fn ## __ ## instance ## __ ## type(MAP_T map DROP_COMMAS(__VA_ARGS__)) \
3925 { \
3926 return call_remap_fn__ ## instance ## __ ## type(remap_fn, map DROP_TYPES(__VA_ARGS__)); \
3927 }
3928
3929 #define IMPL_REMAP_FN_SRC_SIZE(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, src_size, mach_vm_address_t, src, mach_vm_size_t, size)
3930 #define IMPL_REMAP_FN_DST_SIZE(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, dst_size, mach_vm_address_t, src, mach_vm_size_t, size)
3931 #define IMPL_REMAP_FN_SRC_DST_SIZE(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, src_dst_size, mach_vm_address_t, src, mach_vm_size_t, size, mach_vm_address_t, dst)
3932 #define IMPL_REMAP_FN_SRC_SIZE_INHERIT(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, inherit, mach_vm_address_t, src, mach_vm_size_t, size, vm_inherit_t, inherit)
3933 #define IMPL_REMAP_FN_SRC_SIZE_FLAGS(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, flags, mach_vm_address_t, src, mach_vm_size_t, size, int, flags)
3934 #define IMPL_REMAP_FN_PROT_PAIRS(remap_fn, instance) IMPL_REMAP_FN_HELPER(remap_fn, instance, prot_pairs, mach_vm_address_t, src, mach_vm_size_t, size, vm_prot_t, cur, vm_prot_t, max)
3935
3936 #define IMPL(remap_fn) \
3937 IMPL_REMAP_FN_SRC_SIZE(remap_fn, nocopy); \
3938 IMPL_REMAP_FN_SRC_SIZE(remap_fn, copy); \
3939 \
3940 IMPL_REMAP_FN_DST_SIZE(remap_fn, fixed); \
3941 IMPL_REMAP_FN_DST_SIZE(remap_fn, fixed_copy); \
3942 IMPL_REMAP_FN_DST_SIZE(remap_fn, anywhere); \
3943 \
3944 IMPL_REMAP_FN_SRC_SIZE_INHERIT(remap_fn, fixed); \
3945 IMPL_REMAP_FN_SRC_SIZE_INHERIT(remap_fn, fixed_copy); \
3946 IMPL_REMAP_FN_SRC_SIZE_INHERIT(remap_fn, anywhere); \
3947 \
3948 IMPL_REMAP_FN_SRC_SIZE_FLAGS(remap_fn, nocopy); \
3949 IMPL_REMAP_FN_SRC_SIZE_FLAGS(remap_fn, copy); \
3950 \
3951 IMPL_REMAP_FN_PROT_PAIRS(remap_fn, fixed); \
3952 IMPL_REMAP_FN_PROT_PAIRS(remap_fn, fixed_copy); \
3953 IMPL_REMAP_FN_PROT_PAIRS(remap_fn, anywhere); \
3954 \
3955 IMPL_REMAP_FN_SRC_DST_SIZE(remap_fn, fixed); \
3956 IMPL_REMAP_FN_SRC_DST_SIZE(remap_fn, fixed_copy); \
3957 IMPL_REMAP_FN_SRC_DST_SIZE(remap_fn, anywhere); \
3958
3959 static inline void
check_mach_vm_map_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_address_t saved_addr,int flags,MAP_T map)3960 check_mach_vm_map_outparam_changes(kern_return_t * kr, mach_vm_address_t addr, mach_vm_address_t saved_addr,
3961 int flags, MAP_T map)
3962 {
3963 if (*kr == KERN_SUCCESS) {
3964 if (is_fixed(flags)) {
3965 if (addr != truncate_vm_map_addr_with_flags(map, saved_addr, flags)) {
3966 *kr = OUT_PARAM_BAD;
3967 }
3968 }
3969 } else {
3970 if (addr != saved_addr) {
3971 *kr = OUT_PARAM_BAD;
3972 }
3973 }
3974 }
3975
3976 static inline void
check_mach_vm_remap_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_address_t saved_addr,int flags,vm_prot_t cur_prot,vm_prot_t saved_cur_prot,vm_prot_t max_prot,vm_prot_t saved_max_prot,MAP_T map,mach_vm_address_t src_addr)3977 check_mach_vm_remap_outparam_changes(kern_return_t * kr, mach_vm_address_t addr, mach_vm_address_t saved_addr,
3978 int flags, vm_prot_t cur_prot, vm_prot_t saved_cur_prot, vm_prot_t max_prot, vm_prot_t saved_max_prot, MAP_T map,
3979 mach_vm_address_t src_addr)
3980 {
3981 if (*kr == KERN_SUCCESS) {
3982 if (is_fixed(flags)) {
3983 mach_vm_address_t expected_misalignment = get_expected_remap_misalignment(map, src_addr, flags);
3984 if (addr != trunc_down_map(map, saved_addr) + expected_misalignment) {
3985 *kr = OUT_PARAM_BAD;
3986 }
3987 }
3988 } else {
3989 if ((addr != saved_addr) || (cur_prot != saved_cur_prot) ||
3990 (max_prot != saved_max_prot)) {
3991 *kr = OUT_PARAM_BAD;
3992 }
3993 }
3994 }
3995
3996 #if KERNEL
3997
3998 static inline kern_return_t
mach_vm_remap_wrapped_kern(vm_map_t target_task,mach_vm_address_t * target_address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,vm_map_t src_task,mach_vm_address_t src_address,boolean_t copy,vm_prot_t * cur_protection,vm_prot_t * max_protection,vm_inherit_t inheritance)3999 mach_vm_remap_wrapped_kern(vm_map_t target_task,
4000 mach_vm_address_t *target_address,
4001 mach_vm_size_t size,
4002 mach_vm_offset_t mask,
4003 int flags,
4004 vm_map_t src_task,
4005 mach_vm_address_t src_address,
4006 boolean_t copy,
4007 vm_prot_t *cur_protection,
4008 vm_prot_t *max_protection,
4009 vm_inherit_t inheritance)
4010 {
4011 if (dealloc_would_time_out(*target_address, size, target_task)) {
4012 return ACCEPTABLE;
4013 }
4014
4015 mach_vm_address_t saved_addr = *target_address;
4016 vm_prot_t saved_cur_prot = *cur_protection;
4017 vm_prot_t saved_max_prot = *max_protection;
4018 kern_return_t kr = mach_vm_remap(target_task, target_address, size, mask, flags, src_task, src_address, copy, cur_protection, max_protection, inheritance);
4019 check_mach_vm_remap_outparam_changes(&kr, *target_address, saved_addr, flags,
4020 *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
4021 return kr;
4022 }
IMPL(mach_vm_remap_wrapped_kern)4023 IMPL(mach_vm_remap_wrapped_kern)
4024
4025 static inline kern_return_t
4026 mach_vm_remap_new_kernel_wrapped(vm_map_t target_task,
4027 mach_vm_address_t *target_address,
4028 mach_vm_size_t size,
4029 mach_vm_offset_t mask,
4030 int flags,
4031 vm_map_t src_task,
4032 mach_vm_address_t src_address,
4033 boolean_t copy,
4034 vm_prot_t *cur_protection,
4035 vm_prot_t *max_protection,
4036 vm_inherit_t inheritance)
4037 {
4038 if (dealloc_would_time_out(*target_address, size, target_task)) {
4039 return ACCEPTABLE;
4040 }
4041
4042 mach_vm_address_t saved_addr = *target_address;
4043 vm_prot_t saved_cur_prot = *cur_protection;
4044 vm_prot_t saved_max_prot = *max_protection;
4045 kern_return_t kr = mach_vm_remap_new_kernel(target_task, target_address, size, mask, FLAGS_AND_TAG(flags, VM_KERN_MEMORY_OSFMK), src_task, src_address, copy, cur_protection, max_protection, inheritance);
4046 // remap_new sets VM_FLAGS_RETURN_DATA_ADDR
4047 check_mach_vm_remap_outparam_changes(&kr, *target_address, saved_addr, flags | VM_FLAGS_RETURN_DATA_ADDR,
4048 *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
4049 return kr;
4050 }
4051 IMPL(mach_vm_remap_new_kernel_wrapped)
4052
4053 #else /* !KERNEL */
4054
4055 static inline kern_return_t
4056 mach_vm_remap_user(vm_map_t target_task,
4057 mach_vm_address_t *target_address,
4058 mach_vm_size_t size,
4059 mach_vm_offset_t mask,
4060 int flags,
4061 vm_map_t src_task,
4062 mach_vm_address_t src_address,
4063 boolean_t copy,
4064 vm_prot_t *cur_protection,
4065 vm_prot_t *max_protection,
4066 vm_inherit_t inheritance)
4067 {
4068 mach_vm_address_t saved_addr = *target_address;
4069 vm_prot_t saved_cur_prot = *cur_protection;
4070 vm_prot_t saved_max_prot = *max_protection;
4071 kern_return_t kr = mach_vm_remap(target_task, target_address, size, mask, flags, src_task, src_address, copy, cur_protection, max_protection, inheritance);
4072 check_mach_vm_remap_outparam_changes(&kr, *target_address, saved_addr, flags,
4073 *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
4074 return kr;
4075 }
4076 IMPL(mach_vm_remap_user)
4077
4078 static inline kern_return_t
4079 mach_vm_remap_new_user(vm_map_t target_task,
4080 mach_vm_address_t *target_address,
4081 mach_vm_size_t size,
4082 mach_vm_offset_t mask,
4083 int flags,
4084 vm_map_t src_task,
4085 mach_vm_address_t src_address,
4086 boolean_t copy,
4087 vm_prot_t *cur_protection,
4088 vm_prot_t *max_protection,
4089 vm_inherit_t inheritance)
4090 {
4091 mach_vm_address_t saved_addr = *target_address;
4092 vm_prot_t saved_cur_prot = *cur_protection;
4093 vm_prot_t saved_max_prot = *max_protection;
4094 kern_return_t kr = mach_vm_remap_new(target_task, target_address, size, mask, flags, src_task, src_address, copy, cur_protection, max_protection, inheritance);
4095 // remap_new sets VM_FLAGS_RETURN_DATA_ADDR
4096 check_mach_vm_remap_outparam_changes(&kr, *target_address, saved_addr, flags | VM_FLAGS_RETURN_DATA_ADDR,
4097 *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
4098 return kr;
4099 }
4100 IMPL(mach_vm_remap_new_user)
4101
4102 #if TEST_OLD_STYLE_MACH
4103 static inline kern_return_t
4104 vm_remap_retyped(vm_map_t target_task,
4105 mach_vm_address_t *target_address,
4106 mach_vm_size_t size,
4107 mach_vm_offset_t mask,
4108 int flags,
4109 vm_map_t src_task,
4110 mach_vm_address_t src_address,
4111 boolean_t copy,
4112 vm_prot_t *cur_protection,
4113 vm_prot_t *max_protection,
4114 vm_inherit_t inheritance)
4115 {
4116 vm_address_t addr = (vm_address_t)*target_address;
4117 vm_prot_t saved_cur_prot = *cur_protection;
4118 vm_prot_t saved_max_prot = *max_protection;
4119 kern_return_t kr = vm_remap(target_task, &addr, (vm_size_t)size, (vm_address_t)mask, flags, src_task, (vm_address_t)src_address, copy, cur_protection, max_protection, inheritance);
4120 check_mach_vm_remap_outparam_changes(&kr, addr, (vm_address_t) *target_address, flags,
4121 *cur_protection, saved_cur_prot, *max_protection, saved_max_prot, target_task, src_address);
4122 *target_address = addr;
4123 return kr;
4124 }
4125
4126 IMPL(vm_remap_retyped)
4127
4128 #endif /* TEST_OLD_STYLE_MACH */
4129 #endif /* !KERNEL */
4130
4131 #undef IMPL
4132 #undef IMPL_REMAP_FN_SRC_SIZE
4133 #undef IMPL_REMAP_FN_DST_SIZE
4134 #undef IMPL_REMAP_FN_SRC_DST_SIZE
4135 #undef IMPL_REMAP_FN_SRC_SIZE_INHERIT
4136 #undef IMPL_REMAP_FN_SRC_SIZE_FLAGS
4137 #undef IMPL_REMAP_FN_PROT_PAIRS
4138 #undef IMPL_REMAP_FN_HELPER
4139
4140
4141 /////////////////////////////////////////////////////
4142 // Test runners for functions with commonly-used parameter types and setup code.
4143
4144 #define IMPL(NAME, T) \
4145 /* Test a Mach function */ \
4146 /* Run each trial with an allocated vm region and start/size parameters that reference it. */ \
4147 typedef kern_return_t (*NAME ## mach_with_start_size_fn)(MAP_T map, T start, T size); \
4148 \
4149 /* ...and the allocation has a specified minimum alignment */ \
4150 static results_t * __attribute__((used)) \
4151 test_ ## NAME ## mach_with_allocated_aligned_start_size(NAME ## mach_with_start_size_fn fn, T align_mask, const char *testname) \
4152 { \
4153 MAP_T map SMART_MAP; \
4154 allocation_t base SMART_ALLOCATE_ALIGNED_VM(map, TEST_ALLOC_SIZE, align_mask, VM_PROT_DEFAULT); \
4155 start_size_trials_t *trials SMART_START_SIZE_TRIALS(base.addr); \
4156 results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, base.addr, trials->count); \
4157 \
4158 for (unsigned i = 0; i < trials->count; i++) { \
4159 T start = (T)trials->list[i].start; \
4160 T size = (T)trials->list[i].size; \
4161 kern_return_t ret = fn(map, start, size); \
4162 append_result(results, ret, trials->list[i].name); \
4163 } \
4164 return results; \
4165 } \
4166 \
4167 /* ...and the allocation gets default alignment */ \
4168 static results_t * __attribute__((used)) \
4169 test_ ## NAME ## mach_with_allocated_start_size(NAME ## mach_with_start_size_fn fn, const char *testname) \
4170 { \
4171 return test_ ## NAME ## mach_with_allocated_aligned_start_size(fn, 0, testname); \
4172 } \
4173 \
4174 /* Test a Mach function. */ \
4175 /* Run each trial with an allocated vm region and an addr parameter that reference it. */ \
4176 typedef kern_return_t (*NAME ## mach_with_addr_fn)(MAP_T map, T addr); \
4177 \
4178 static results_t * __attribute__((used)) \
4179 test_ ## NAME ## mach_with_allocated_addr_of_size_n(NAME ## mach_with_addr_fn fn, size_t obj_size, const char *testname) \
4180 { \
4181 MAP_T map SMART_MAP; \
4182 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4183 addr_trials_t *trials SMART_ADDR_TRIALS(base.addr); \
4184 /* Do all the addr trials and an additional trial such that obj_size + addr == 0 */ \
4185 uint64_t trial_args[TRIALSARGUMENTS_SIZE] = {base.addr, obj_size}; \
4186 results_t *results = alloc_results(testname, eSMART_ADDR_TRIALS, trial_args, TRIALSARGUMENTS_SIZE, trials->count+1); \
4187 \
4188 for (unsigned i = 0; i < trials->count; i++) { \
4189 T addr = (T)trials->list[i].addr; \
4190 kern_return_t ret = fn(map, addr); \
4191 append_result(results, ret, trials->list[i].name); \
4192 } \
4193 kern_return_t ret = fn(map, - ((T) obj_size)); \
4194 char *trial_desc; \
4195 kasprintf(&trial_desc, "addr: -0x%lx", obj_size); \
4196 append_result(results, ret, trial_desc); \
4197 kfree_str(trial_desc); \
4198 return results; \
4199 } \
4200 \
4201 /* Test a Mach function. */ \
4202 /* Run each trial with an allocated vm region and an addr parameter that reference it. */ \
4203 typedef kern_return_t (*NAME ## mach_with_addr_fn)(MAP_T map, T addr); \
4204 \
4205 static results_t * __attribute__((used)) \
4206 test_ ## NAME ## mach_with_allocated_addr(NAME ## mach_with_addr_fn fn, const char *testname) \
4207 { \
4208 MAP_T map SMART_MAP; \
4209 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4210 addr_trials_t *trials SMART_ADDR_TRIALS(base.addr); \
4211 results_t *results = alloc_results(testname, eSMART_ADDR_TRIALS, base.addr, trials->count); \
4212 \
4213 for (unsigned i = 0; i < trials->count; i++) { \
4214 T addr = (T)trials->list[i].addr; \
4215 kern_return_t ret = fn(map, addr); \
4216 append_result(results, ret, trials->list[i].name); \
4217 } \
4218 return results; \
4219 } \
4220 \
4221 static results_t * __attribute__((used)) \
4222 test_ ## NAME ## mach_with_allocated_purgeable_addr(NAME ## mach_with_addr_fn fn, const char *testname) \
4223 { \
4224 MAP_T map SMART_MAP; \
4225 allocation_t base SMART_ALLOCATE_PURGEABLE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4226 addr_trials_t *trials SMART_ADDR_TRIALS(base.addr); \
4227 results_t *results = alloc_results(testname, eSMART_ADDR_TRIALS, base.addr, trials->count); \
4228 \
4229 for (unsigned i = 0; i < trials->count; i++) { \
4230 T addr = (T)trials->list[i].addr; \
4231 kern_return_t ret = fn(map, addr); \
4232 append_result(results, ret, trials->list[i].name); \
4233 } \
4234 return results; \
4235 } \
4236 \
4237 /* Test a Mach function. */ \
4238 /* Run each trial with a size parameter. */ \
4239 typedef kern_return_t (*NAME ## mach_with_size_fn)(MAP_T map, T size); \
4240 \
4241 static results_t * __attribute__((used)) \
4242 test_ ## NAME ## mach_with_size(NAME ## mach_with_size_fn fn, const char *testname) \
4243 { \
4244 MAP_T map SMART_MAP; \
4245 size_trials_t *trials SMART_SIZE_TRIALS(); \
4246 results_t *results = alloc_results(testname, eSMART_SIZE_TRIALS, trials->count); \
4247 \
4248 for (unsigned i = 0; i < trials->count; i++) { \
4249 T size = (T)trials->list[i].size; \
4250 kern_return_t ret = fn(map, size); \
4251 append_result(results, ret, trials->list[i].name); \
4252 } \
4253 return results; \
4254 } \
4255 \
4256 /* Test a Mach function. */ \
4257 /* Run each trial with a size parameter. */ \
4258 typedef kern_return_t (*NAME ## mach_with_start_size_offset_object_fn)(MAP_T map, T addr, T size, T offset, T obj_size); \
4259 \
4260 static results_t * __attribute__((used)) \
4261 test_ ## NAME ## mach_with_allocated_start_size_offset_object(NAME ## mach_with_start_size_offset_object_fn fn, const char *testname) \
4262 { \
4263 MAP_T map SMART_MAP; \
4264 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4265 start_size_offset_object_trials_t *trials SMART_START_SIZE_OFFSET_OBJECT_TRIALS(); \
4266 results_t *results = alloc_results(testname, eSMART_START_SIZE_OFFSET_OBJECT_TRIALS, trials->count); \
4267 \
4268 for (unsigned i = 0; i < trials->count; i++) { \
4269 start_size_offset_object_trial_t trial = slide_trial(trials->list[i], base.addr); \
4270 T start = (T)trial.start; \
4271 T size = (T)trial.size; \
4272 T offset = (T)trial.offset; \
4273 T obj_size = (T)trial.obj_size; \
4274 kern_return_t ret = fn(map, start, size, offset, obj_size); \
4275 append_result(results, ret, trials->list[i].name); \
4276 } \
4277 return results; \
4278 } \
4279 /* Test a Mach function. */ \
4280 /* Run each trial with a size parameter. */ \
4281 typedef kern_return_t (*NAME ## mach_with_start_size_offset_fn)(MAP_T map, T addr, T size, T offset, T obj_size); \
4282 \
4283 static results_t * __attribute__((used)) \
4284 test_ ## NAME ## mach_with_allocated_start_size_offset(NAME ## mach_with_start_size_offset_fn fn, const char *testname) \
4285 { \
4286 MAP_T map SMART_MAP; \
4287 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4288 start_size_offset_trials_t *trials SMART_START_SIZE_OFFSET_TRIALS(); \
4289 results_t *results = alloc_results(testname, eSMART_START_SIZE_OFFSET_TRIALS, trials->count); \
4290 \
4291 for (unsigned i = 0; i < trials->count; i++) { \
4292 start_size_offset_trial_t trial = slide_trial(trials->list[i], base.addr); \
4293 T start = (T)trial.start; \
4294 T size = (T)trial.size; \
4295 T offset = (T)trial.offset; \
4296 kern_return_t ret = fn(map, start, size, offset, 1); \
4297 append_result(results, ret, trials->list[i].name); \
4298 } \
4299 return results; \
4300 } \
4301 \
4302 /* Test a Mach function. */ \
4303 /* Run each trial with an allocated vm region and a set of mmap flags. */ \
4304 typedef kern_return_t (*NAME ## mach_with_allocated_mmap_flags_fn)(MAP_T map, T addr, T size, int flags); \
4305 \
4306 static results_t * __attribute__((used)) \
4307 test_ ## NAME ## mach_with_allocated_mmap_flags(NAME ## mach_with_allocated_mmap_flags_fn fn, const char *testname) \
4308 { \
4309 MAP_T map SMART_MAP; \
4310 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4311 mmap_flags_trials_t *trials SMART_MMAP_FLAGS_TRIALS(); \
4312 results_t *results = alloc_results(testname, eSMART_MMAP_FLAGS_TRIALS, trials->count); \
4313 \
4314 for (unsigned i = 0; i < trials->count; i++) { \
4315 int flags = trials->list[i].flags; \
4316 kern_return_t ret = fn(map, (T)base.addr, (T)base.size, flags); \
4317 append_result(results, ret, trials->list[i].name); \
4318 } \
4319 return results; \
4320 } \
4321 \
4322 /* Test a Mach function. */ \
4323 /* Run each trial with an allocated vm region and a generic 32 bit flag. */ \
4324 typedef kern_return_t (*NAME ## mach_with_allocated_generic_flag)(MAP_T map, T addr, T size, int flag); \
4325 \
4326 static results_t * __attribute__((used)) \
4327 test_ ## NAME ## mach_with_allocated_generic_flag(NAME ## mach_with_allocated_generic_flag fn, const char *testname) \
4328 { \
4329 MAP_T map SMART_MAP; \
4330 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4331 generic_flag_trials_t *trials SMART_GENERIC_FLAG_TRIALS(); \
4332 results_t *results = alloc_results(testname, eSMART_GENERIC_FLAG_TRIALS, trials->count); \
4333 \
4334 for (unsigned i = 0; i < trials->count; i++) { \
4335 int flag = trials->list[i].flag; \
4336 kern_return_t ret = fn(map, (T)base.addr, (T)base.size, flag); \
4337 append_result(results, ret, trials->list[i].name); \
4338 } \
4339 return results; \
4340 } \
4341 \
4342 /* Test a Mach function. */ \
4343 /* Run each trial with a vm_prot_t. */ \
4344 typedef kern_return_t (*NAME ## mach_with_prot_fn)(MAP_T map, T size, vm_prot_t prot); \
4345 \
4346 static results_t * __attribute__((used)) \
4347 test_ ## NAME ## mach_vm_prot(NAME ## mach_with_prot_fn fn, const char *testname) \
4348 { \
4349 MAP_T map SMART_MAP; \
4350 vm_prot_trials_t *trials SMART_VM_PROT_TRIALS(); \
4351 results_t *results = alloc_results(testname, eSMART_VM_PROT_TRIALS, trials->count); \
4352 \
4353 for (unsigned i = 0; i < trials->count; i++) { \
4354 kern_return_t ret = fn(map, TEST_ALLOC_SIZE, trials->list[i].prot); \
4355 append_result(results, ret, trials->list[i].name); \
4356 } \
4357 return results; \
4358 } \
4359 \
4360 /* Test a Mach function. */ \
4361 /* Run each trial with a pair of vm_prot_t's. */ \
4362 typedef kern_return_t (*NAME ## mach_with_prot_pair_fn)(MAP_T map, vm_prot_t cur, vm_prot_t max); \
4363 \
4364 static results_t * __attribute__((used)) \
4365 test_ ## NAME ## mach_vm_prot_pair(NAME ## mach_with_prot_pair_fn fn, const char *testname) \
4366 { \
4367 MAP_T map SMART_MAP; \
4368 vm_prot_pair_trials_t *trials SMART_VM_PROT_PAIR_TRIALS(); \
4369 results_t *results = alloc_results(testname, eSMART_VM_PROT_PAIR_TRIALS, trials->count); \
4370 \
4371 for (unsigned i = 0; i < trials->count; i++) { \
4372 kern_return_t ret = fn(map, trials->list[i].cur, trials->list[i].max); \
4373 append_result(results, ret, trials->list[i].name); \
4374 } \
4375 return results; \
4376 } \
4377 \
4378 /* Test a Mach function. */ \
4379 /* Run each trial with a pair of vm_prot_t's. */ \
4380 typedef kern_return_t (*NAME ## mach_with_allocated_prot_pair_fn)(MAP_T map, T addr, T size, vm_prot_t cur, vm_prot_t max); \
4381 \
4382 static results_t * __attribute__((used)) \
4383 test_ ## NAME ## mach_with_allocated_vm_prot_pair(NAME ## mach_with_allocated_prot_pair_fn fn, const char *testname) \
4384 { \
4385 MAP_T map SMART_MAP; \
4386 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4387 vm_prot_pair_trials_t *trials SMART_VM_PROT_PAIR_TRIALS(); \
4388 results_t *results = alloc_results(testname, eSMART_VM_PROT_PAIR_TRIALS, trials->count); \
4389 \
4390 for (unsigned i = 0; i < trials->count; i++) { \
4391 kern_return_t ret = fn(map, (T)base.addr, (T)base.size, trials->list[i].cur, trials->list[i].max); \
4392 append_result(results, ret, trials->list[i].name); \
4393 } \
4394 return results; \
4395 } \
4396 \
4397 /* Test a Mach function. */ \
4398 /* Run each trial with an allocated vm region and a vm_prot_t. */ \
4399 typedef kern_return_t (*NAME ## mach_with_allocated_prot_fn)(MAP_T map, T addr, T size, vm_prot_t prot); \
4400 \
4401 static results_t * __attribute__((used)) \
4402 test_ ## NAME ## mach_with_allocated_vm_prot_t(NAME ## mach_with_allocated_prot_fn fn, const char *testname) \
4403 { \
4404 MAP_T map SMART_MAP; \
4405 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4406 vm_prot_trials_t *trials SMART_VM_PROT_TRIALS(); \
4407 results_t *results = alloc_results(testname, eSMART_VM_PROT_TRIALS, trials->count); \
4408 \
4409 for (unsigned i = 0; i < trials->count; i++) { \
4410 vm_prot_t prot = trials->list[i].prot; \
4411 kern_return_t ret = fn(map, (T)base.addr, (T)base.size, prot); \
4412 append_result(results, ret, trials->list[i].name); \
4413 } \
4414 return results; \
4415 } \
4416 \
4417 /* Test a Mach function. */ \
4418 /* Run each trial with a ledger flag. */ \
4419 typedef kern_return_t (*NAME ## mach_ledger_flag_fn)(MAP_T map, int ledger_flag); \
4420 \
4421 static results_t * __attribute__((used)) \
4422 test_ ## NAME ## mach_with_ledger_flag(NAME ## mach_ledger_flag_fn fn, const char *testname) \
4423 { \
4424 MAP_T map SMART_MAP; \
4425 ledger_flag_trials_t *trials SMART_LEDGER_FLAG_TRIALS(); \
4426 results_t *results = alloc_results(testname, eSMART_LEDGER_FLAG_TRIALS, trials->count); \
4427 \
4428 for (unsigned i = 0; i < trials->count; i++) { \
4429 kern_return_t ret = fn(map, trials->list[i].flag); \
4430 append_result(results, ret, trials->list[i].name); \
4431 } \
4432 return results; \
4433 } \
4434 /* Test a Mach function. */ \
4435 /* Run each trial with a ledger tag. */ \
4436 typedef kern_return_t (*NAME ## mach_ledger_tag_fn)(MAP_T map, int ledger_tag); \
4437 \
4438 static results_t * __attribute__((used)) \
4439 test_ ## NAME ## mach_with_ledger_tag(NAME ## mach_ledger_tag_fn fn, const char *testname) \
4440 { \
4441 MAP_T map SMART_MAP; \
4442 ledger_tag_trials_t *trials SMART_LEDGER_TAG_TRIALS(); \
4443 results_t *results = alloc_results(testname, eSMART_LEDGER_TAG_TRIALS, trials->count); \
4444 \
4445 for (unsigned i = 0; i < trials->count; i++) { \
4446 kern_return_t ret = fn(map, trials->list[i].tag); \
4447 append_result(results, ret, trials->list[i].name); \
4448 } \
4449 return results; \
4450 } \
4451 \
4452 /* Test a Mach function. */ \
4453 /* Run each trial with an allocated region and a vm_inherit_t. */ \
4454 typedef kern_return_t (*NAME ## mach_inherit_fn)(MAP_T map, T addr, T size, vm_inherit_t inherit); \
4455 \
4456 static results_t * __attribute__((used)) \
4457 test_ ## NAME ## mach_with_allocated_vm_inherit_t(NAME ## mach_inherit_fn fn, const char * testname) { \
4458 MAP_T map SMART_MAP; \
4459 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4460 vm_inherit_trials_t *trials SMART_VM_INHERIT_TRIALS(); \
4461 results_t *results = alloc_results(testname, eSMART_VM_INHERIT_TRIALS, trials->count); \
4462 \
4463 for (unsigned i = 0; i < trials->count; i++) { \
4464 vm_inherit_trial_t trial = trials->list[i]; \
4465 int ret = fn(map, (T)base.addr, (T)base.size, trial.value); \
4466 append_result(results, ret, trial.name); \
4467 } \
4468 return results; \
4469 } \
4470 /* Test a Mach function. */ \
4471 /* Run each trial with an allocated vm region and a vm_prot_t. */ \
4472 typedef kern_return_t (*NAME ## with_start_end_fn)(MAP_T map, T addr, T end); \
4473 \
4474 static results_t * __attribute__((used)) \
4475 test_ ## NAME ## mach_with_allocated_start_end(NAME ## with_start_end_fn fn, const char *testname) \
4476 { \
4477 MAP_T map SMART_MAP; \
4478 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4479 start_size_trials_t *trials SMART_START_SIZE_TRIALS(base.addr); \
4480 results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, base.addr, trials->count); \
4481 \
4482 for (unsigned i = 0; i < trials->count; i++) { \
4483 T start = (T)trials->list[i].start; \
4484 T size = (T)trials->list[i].size; \
4485 kern_return_t ret = fn(map, start, start + size); \
4486 append_result(results, ret, trials->list[i].name); \
4487 } \
4488 return results; \
4489 } \
4490 /* Test a Mach function. */ \
4491 /* Run each trial with an allocated vm region and a vm_prot_t. */ \
4492 typedef kern_return_t (*NAME ## with_tag_fn)(MAP_T map, T addr, T end, vm_tag_t tag); \
4493 \
4494 static results_t * __attribute__((used)) \
4495 test_ ## NAME ## mach_with_allocated_tag(NAME ## with_tag_fn fn, const char *testname) \
4496 { \
4497 MAP_T map SMART_MAP; \
4498 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4499 vm_tag_trials_t *trials SMART_VM_TAG_TRIALS(); \
4500 results_t *results = alloc_results(testname, eSMART_VM_TAG_TRIALS, trials->count); \
4501 \
4502 for (unsigned i = 0; i < trials->count; i++) { \
4503 kern_return_t ret = fn(map, (T)base.addr, (T)(base.addr + base.size), trials->list[i].tag); \
4504 append_result(results, ret, trials->list[i].name); \
4505 } \
4506 return results; \
4507 } \
4508 /* Test a Mach function. */ \
4509 /* Run each trial with an allocated region and a vm_behavior_t. */ \
4510 typedef kern_return_t (*NAME ## mach_behavior_fn)(MAP_T map, T addr, T size, vm_behavior_t behavior); \
4511 \
4512 static results_t * __attribute__((used)) \
4513 test_ ## NAME ## mach_with_allocated_aligned_vm_behavior_t(NAME ## mach_behavior_fn fn, mach_vm_size_t align_mask, const char * testname) { \
4514 MAP_T map SMART_MAP; \
4515 allocation_t base SMART_ALLOCATE_ALIGNED_VM(map, TEST_ALLOC_SIZE, align_mask, VM_PROT_DEFAULT); \
4516 vm_behavior_trials_t *trials SMART_VM_BEHAVIOR_TRIALS(); \
4517 results_t *results = alloc_results(testname, eSMART_VM_BEHAVIOR_TRIALS, trials->count); \
4518 \
4519 for (unsigned i = 0; i < trials->count; i++) { \
4520 vm_behavior_trial_t trial = trials->list[i]; \
4521 int ret = fn(map, (T)base.addr, (T)base.size, trial.value); \
4522 append_result(results, ret, trial.name); \
4523 } \
4524 return results; \
4525 } \
4526 \
4527 static results_t * __attribute__((used)) \
4528 test_ ## NAME ## mach_with_allocated_vm_behavior_t(NAME ## mach_behavior_fn fn, const char * testname) { \
4529 return test_ ## NAME ## mach_with_allocated_aligned_vm_behavior_t(fn, 0, testname); \
4530 } \
4531 \
4532 /* Test a Mach function. */ \
4533 /* Run each trial with an allocated region and a vm_sync_t. */ \
4534 typedef kern_return_t (*NAME ## mach_sync_fn)(MAP_T map, T addr, T size, vm_sync_t behavior); \
4535 \
4536 static results_t * __attribute__((used)) \
4537 test_ ## NAME ## mach_with_allocated_vm_sync_t(NAME ## mach_sync_fn fn, const char * testname) { \
4538 MAP_T map SMART_MAP; \
4539 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4540 vm_sync_trials_t *trials SMART_VM_SYNC_TRIALS(); \
4541 results_t *results = alloc_results(testname, eSMART_VM_SYNC_TRIALS, trials->count); \
4542 \
4543 for (unsigned i = 0; i < trials->count; i++) { \
4544 vm_sync_trial_t trial = trials->list[i]; \
4545 int ret = fn(map, (T)base.addr, (T)base.size, trial.value); \
4546 append_result(results, ret, trial.name); \
4547 } \
4548 return results; \
4549 } \
4550 /* Test a Mach function. */ \
4551 /* Run each trial with an allocated region and a vm_machine_attribute_t. */ \
4552 typedef kern_return_t (*NAME ## mach_attribute_fn)(MAP_T map, T addr, T size, vm_machine_attribute_t attr); \
4553 \
4554 static results_t * __attribute__((used)) \
4555 test_ ## NAME ## mach_with_allocated_vm_machine_attribute_t(NAME ## mach_attribute_fn fn, const char * testname) { \
4556 MAP_T map SMART_MAP; \
4557 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4558 vm_machine_attribute_trials_t *trials SMART_VM_MACHINE_ATTRIBUTE_TRIALS(); \
4559 results_t *results = alloc_results(testname, eSMART_VM_MACHINE_ATTRIBUTE_TRIALS, trials->count); \
4560 \
4561 for (unsigned i = 0; i < trials->count; i++) { \
4562 vm_machine_attribute_trial_t trial = trials->list[i]; \
4563 int ret = fn(map, (T)base.addr, (T)base.size, trial.value); \
4564 append_result(results, ret, trial.name); \
4565 } \
4566 return results; \
4567 } \
4568 /* Test a Mach function. */ \
4569 /* Run each trial with an allocated region and a purgeable trial. */ \
4570 typedef kern_return_t (*NAME ## mach_purgable_fn)(MAP_T map, T addr, vm_purgable_t control, int state); \
4571 \
4572 static results_t * __attribute__((used)) \
4573 test_ ## NAME ## mach_with_allocated_purgeable_and_state(NAME ## mach_purgable_fn fn, const char * testname) { \
4574 MAP_T map SMART_MAP; \
4575 vm_purgeable_and_state_trials_t *trials SMART_VM_PURGEABLE_AND_STATE_TRIALS(); \
4576 results_t *results = alloc_results(testname, eSMART_VM_PURGEABLE_AND_STATE_TRIALS, trials->count); \
4577 \
4578 for (unsigned i = 0; i < trials->count; i++) { \
4579 allocation_t base SMART_ALLOCATE_PURGEABLE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT); \
4580 vm_purgeable_and_state_trial_t trial = trials->list[i]; \
4581 int ret = fn(map, (T)base.addr, trial.control, trial.state); \
4582 append_result(results, ret, trial.name); \
4583 } \
4584 return results; \
4585 }
4586
4587 IMPL(, uint64_t)
4588 #if TEST_OLD_STYLE_MACH
IMPL(old,uint32_t)4589 IMPL(old, uint32_t)
4590 #endif
4591 #undef IMPL
4592
4593 #if KERNEL && CONFIG_MAP_RANGES
4594 /*
4595 * The vm_range_create tests assume we don't ever do range_creates that should succeed
4596 * that take more than 2 * PAGE_SIZE. This enforces that.
4597 */
4598 void
4599 verify_largest_valid_trial_size_fits(start_size_start_size_trial_t trial)
4600 {
4601 if (trial.size > 2 * PAGE_SIZE) {
4602 assert(trial.size > 0xfffffffffffffff);
4603 }
4604 if (trial.second_size > 2 * PAGE_SIZE) {
4605 assert(trial.second_size > 0xfffffffffffffff);
4606 }
4607 }
4608
4609 /* Run each trial with start/size/start/size parameters. */
4610 typedef kern_return_t (mach_with_start_size_start_size_fn)(MAP_T map, mach_vm_address_t addr,
4611 mach_vm_size_t size, mach_vm_address_t second_addr, mach_vm_size_t second_size);
4612
4613 static results_t * __attribute__((used))
test_mach_vm_range_create(mach_with_start_size_start_size_fn fn,const char * testname)4614 test_mach_vm_range_create(mach_with_start_size_start_size_fn fn, const char *testname)
4615 {
4616 start_size_start_size_trials_t *trials SMART_START_SIZE_START_SIZE_TRIALS();
4617 results_t *results = alloc_results(testname, eSMART_START_SIZE_START_SIZE_TRIALS, trials->count);
4618
4619 for (unsigned i = 0; i < trials->count; i++) {
4620 /*
4621 * Allocate and configure a new map for every trial so that the map has no user ranges.
4622 */
4623 MAP_T map SMART_RANGE_MAP;
4624 bool has_ranges = vm_map_range_configure(map, false) == KERN_SUCCESS;
4625 bool has_space_in_ranges = false;
4626
4627 struct mach_vm_range void1 = {
4628 .min_address = map->default_range.max_address,
4629 .max_address = map->data_range.min_address,
4630 };
4631 struct mach_vm_range void2 = {
4632 .min_address = map->data_range.max_address,
4633 .max_address = vm_map_max(map),
4634 };
4635 struct mach_vm_range range_to_test;
4636
4637 /*
4638 * For our tests to succeed for good cases, but also trigger failures
4639 * when overlap occurs we need:
4640 * range1 = {.start = addr}, range2 = {.start = addr + PAGE_SIZE * 2}.
4641 * We also want at least 2 * PAGE_SIZE memory available after the start of range2.
4642 * We additionally start our first range 2 PAGE_SIZE away from the start.
4643 */
4644 if (void1.min_address + (PAGE_SIZE * 6) < void1.max_address) {
4645 range_to_test = void1;
4646 has_space_in_ranges = true;
4647 } else if (void2.min_address + (PAGE_SIZE * 6) < void2.max_address) {
4648 range_to_test = void2;
4649 has_space_in_ranges = true;
4650 }
4651
4652 mach_vm_address_t addr_base = range_to_test.min_address + PAGE_SIZE * 2;
4653 if (has_ranges && has_space_in_ranges) {
4654 mach_vm_address_t second_addr_base = addr_base + PAGE_SIZE * 2;
4655
4656 start_size_start_size_trial_t trial = slide_trial(trials->list[i], addr_base, second_addr_base);
4657
4658 verify_largest_valid_trial_size_fits(trial);
4659
4660 mach_vm_address_t start = trial.start;
4661 mach_vm_size_t size = trial.size;
4662 mach_vm_address_t second_start = trial.second_start;
4663 mach_vm_size_t second_size = trial.second_size;
4664 kern_return_t ret = fn(map, start, size, second_start, second_size);
4665 append_result(results, ret, trials->list[i].name);
4666 } else {
4667 append_result(results, IGNORED, trials->list[i].name);
4668 }
4669 }
4670 return results;
4671 }
4672 #endif /* KERNEL && CONFIG_MAP_RANGES */
4673
4674 // Test a mach allocation function with a start/size
4675 static results_t *
test_mach_allocation_func_with_start_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size),const char * testname)4676 test_mach_allocation_func_with_start_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size), const char * testname)
4677 {
4678 MAP_T map SMART_MAP;
4679 start_size_trials_t *trials SMART_START_SIZE_TRIALS(0);
4680 results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, 0, trials->count);
4681
4682 for (unsigned i = 0; i < trials->count; i++) {
4683 unallocation_t dst SMART_UNALLOCATE_VM(map, TEST_ALLOC_SIZE);
4684 start_size_trial_t trial = slide_trial(trials->list[i], dst.addr);
4685 mach_vm_address_t addr = trial.start;
4686 kern_return_t ret = func(map, &addr, trial.size);
4687 if (ret == 0) {
4688 (void)mach_vm_deallocate(map, addr, trial.size);
4689 }
4690 append_result(results, ret, trial.name);
4691 }
4692 return results;
4693 }
4694
4695 // Test a mach allocation function with a vm_map_kernel_flags_t
4696 static results_t *
test_mach_allocation_func_with_vm_map_kernel_flags_t(kern_return_t (* func)(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags),const char * testname)4697 test_mach_allocation_func_with_vm_map_kernel_flags_t(kern_return_t (*func)(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags), const char * testname)
4698 {
4699 MAP_T map SMART_MAP;
4700 vm_map_kernel_flags_trials_t * trials SMART_VM_MAP_KERNEL_FLAGS_TRIALS();
4701 results_t *results = alloc_results(testname, eSMART_VM_MAP_KERNEL_FLAGS_TRIALS, trials->count);
4702
4703 for (unsigned i = 0; i < trials->count; i++) {
4704 allocation_t fixed_overwrite_dst SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4705 vm_map_kernel_flags_trial_t trial = trials->list[i];
4706 #if KERNEL
4707 if (is_random_anywhere(trial.flags)) {
4708 // RANDOM_ADDR is likely to fall outside pmap's range
4709 append_result(results, PANIC, trial.name);
4710 continue;
4711 }
4712 #endif
4713 mach_vm_address_t addr = 0;
4714 if (is_fixed_overwrite(trial.flags)) {
4715 // use a pre-existing destination for fixed-overwrite
4716 addr = fixed_overwrite_dst.addr;
4717 }
4718 kern_return_t ret = func(map, &addr, TEST_ALLOC_SIZE, trial.flags);
4719 deallocate_if_not_fixed_overwrite(ret, map, addr, TEST_ALLOC_SIZE, trial.flags);
4720 append_result(results, ret, trial.name);
4721 }
4722 return results;
4723 }
4724
4725 static results_t *
test_mach_with_allocated_vm_map_kernel_flags_t(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,int flags),const char * testname)4726 test_mach_with_allocated_vm_map_kernel_flags_t(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, int flags), const char * testname)
4727 {
4728 MAP_T map SMART_MAP;
4729
4730 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4731 vm_map_kernel_flags_trials_t * trials SMART_VM_MAP_KERNEL_FLAGS_TRIALS();
4732 results_t *results = alloc_results(testname, eSMART_VM_MAP_KERNEL_FLAGS_TRIALS, trials->count);
4733
4734 for (unsigned i = 0; i < trials->count; i++) {
4735 kern_return_t ret = func(map, base.addr, base.size, trials->list[i].flags);
4736 append_result(results, ret, trials->list[i].name);
4737 }
4738 return results;
4739 }
4740
4741 static results_t *
test_unix_with_allocated_vm_prot_t(int (* func)(void * start,size_t size,int flags),const char * testname)4742 test_unix_with_allocated_vm_prot_t(int (*func)(void * start, size_t size, int flags), const char * testname)
4743 {
4744 MAP_T map CURRENT_MAP;
4745 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4746 vm_prot_trials_t * trials SMART_VM_PROT_TRIALS();
4747 results_t *results = alloc_results(testname, eSMART_VM_PROT_TRIALS, trials->count);
4748
4749 for (unsigned i = 0; i < trials->count; i++) {
4750 int ret = func((void *) base.addr, (size_t) base.size, (int) trials->list[i].prot);
4751 append_result(results, ret, trials->list[i].name);
4752 }
4753 return results;
4754 }
4755
4756 // Test a Unix function.
4757 // Run each trial with an allocated vm region and start/size parameters that reference it.
4758 typedef int (*unix_with_start_size_fn)(void *start, size_t size);
4759
4760 static results_t * __unused
test_unix_with_allocated_aligned_start_size(unix_with_start_size_fn fn,mach_vm_size_t align_mask,const char * testname)4761 test_unix_with_allocated_aligned_start_size(unix_with_start_size_fn fn, mach_vm_size_t align_mask, const char *testname)
4762 {
4763 MAP_T map CURRENT_MAP;
4764 allocation_t base SMART_ALLOCATE_ALIGNED_VM(map, TEST_ALLOC_SIZE, align_mask, VM_PROT_DEFAULT);
4765 start_size_trials_t *trials SMART_START_SIZE_TRIALS(base.addr);
4766 results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, base.addr, trials->count);
4767
4768 for (unsigned i = 0; i < trials->count; i++) {
4769 addr_t start = trials->list[i].start;
4770 addr_t size = trials->list[i].size;
4771 int ret = fn((void*)(uintptr_t)start, (size_t)size);
4772 append_result(results, ret, trials->list[i].name);
4773 }
4774 return results;
4775 }
4776
4777 static results_t * __unused
test_unix_with_allocated_start_size(unix_with_start_size_fn fn,const char * testname)4778 test_unix_with_allocated_start_size(unix_with_start_size_fn fn, const char *testname)
4779 {
4780 return test_unix_with_allocated_aligned_start_size(fn, 0, testname);
4781 }
4782
4783 #if KERNEL
4784 static results_t * __unused
test_kext_unix_with_allocated_start_size(unix_with_start_size_fn fn,const char * testname)4785 test_kext_unix_with_allocated_start_size(unix_with_start_size_fn fn, const char *testname)
4786 {
4787 MAP_T map CURRENT_MAP;
4788 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4789 start_size_trials_t *trials SMART_START_SIZE_TRIALS(base.addr);
4790 results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, base.addr, trials->count);
4791
4792 for (unsigned i = 0; i < trials->count; i++) {
4793 addr_t start = trials->list[i].start;
4794 addr_t size = trials->list[i].size;
4795 int ret = fn((void*)(uintptr_t)start, (size_t)size);
4796 append_result(results, ret, trials->list[i].name);
4797 }
4798 return results;
4799 }
4800
4801 /* Test a Kext function requiring memory allocated with a specific tag. */
4802 /* Run each trial with an allocated vm region and an addr parameter that reference it. */
4803
4804 static results_t * __attribute__((used))
test_kext_tagged_with_allocated_addr(kern_return_t (* func)(MAP_T map,mach_vm_address_t addr),const char * testname)4805 test_kext_tagged_with_allocated_addr(kern_return_t (*func)(MAP_T map, mach_vm_address_t addr), const char *testname)
4806 {
4807 MAP_T map CURRENT_MAP;
4808 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4809 addr_trials_t *trials SMART_ADDR_TRIALS(base.addr);
4810 results_t *results = alloc_results(testname, eSMART_ADDR_TRIALS, base.addr, trials->count);
4811
4812 for (unsigned i = 0; i < trials->count; i++) {
4813 mach_vm_address_t addr = (mach_vm_address_t)trials->list[i].addr;
4814 kern_return_t ret = func(map, addr);
4815 append_result(results, ret, trials->list[i].name);
4816 }
4817 return results;
4818 }
4819 #endif /* KERNEL */
4820
4821 static results_t * __attribute__((used))
test_with_int64(kern_return_t (* func)(int64_t),const char * testname)4822 test_with_int64(kern_return_t (*func)(int64_t), const char *testname)
4823 {
4824 size_trials_t *trials SMART_SIZE_TRIALS();
4825 results_t *results = alloc_results(testname, eSMART_SIZE_TRIALS, trials->count);
4826
4827 for (unsigned i = 0; i < trials->count; i++) {
4828 int64_t val = (int64_t)trials->list[i].size;
4829 kern_return_t ret = func(val);
4830 append_result(results, ret, trials->list[i].name);
4831 }
4832 return results;
4833 }
4834
4835
4836 #if !KERNEL
4837
4838 // For deallocators like munmap and vm_deallocate.
4839 // Return a non-zero error code if we should avoid performing this trial.
4840 // Call this BEFORE sliding the trial to a non-zero base address.
4841 extern
4842 kern_return_t
4843 short_circuit_deallocator(MAP_T map, start_size_trial_t trial);
4844
4845 // implemented in vm_parameter_validation.c
4846
4847 #else /* KERNEL */
4848
4849 static inline
4850 kern_return_t
short_circuit_deallocator(MAP_T map __unused,start_size_trial_t trial __unused)4851 short_circuit_deallocator(MAP_T map __unused, start_size_trial_t trial __unused)
4852 {
4853 // Kernel tests run with an empty vm_map so we're free to deallocate whatever we want.
4854 return 0;
4855 }
4856
4857 #endif /* KERNEL */
4858
4859
4860 // Test mach_vm_deallocate or munmap.
4861 // Similar to test_mach_with_allocated_addr_size, but mach_vm_deallocate is destructive
4862 // so we can't test all values and we need to re-allocate the vm allocation each time.
4863 static results_t *
test_deallocator(kern_return_t (* func)(MAP_T map,mach_vm_address_t start,mach_vm_size_t size),const char * testname)4864 test_deallocator(kern_return_t (*func)(MAP_T map, mach_vm_address_t start, mach_vm_size_t size), const char *testname)
4865 {
4866 MAP_T map SMART_MAP;
4867
4868 // allocate trials relative to address zero
4869 // later we slide them to each allocation's address
4870 start_size_trials_t *trials SMART_START_SIZE_TRIALS(0);
4871
4872 results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, 0, trials->count);
4873
4874 for (unsigned i = 0; i < trials->count; i++) {
4875 start_size_trial_t trial = trials->list[i];
4876 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4877
4878 // Avoid trials that might deallocate wildly.
4879 // Check this BEFORE sliding the trial.
4880 kern_return_t ret = short_circuit_deallocator(map, trial);
4881 if (ret == 0) {
4882 // Adjust start and/or size, if that value includes the allocated address
4883 trial = slide_trial(trial, base.addr);
4884
4885 ret = func(map, trial.start, trial.size);
4886 if (ret == 0) {
4887 // Deallocation succeeded. Don't deallocate again.
4888 set_already_deallocated(&base);
4889 }
4890 }
4891 append_result(results, ret, trial.name);
4892 }
4893
4894 return results;
4895 }
4896
4897 static results_t *
test_allocated_src_unallocated_dst_size(kern_return_t (* func)(MAP_T map,mach_vm_address_t src,mach_vm_size_t size,mach_vm_address_t dst),const char * testname)4898 test_allocated_src_unallocated_dst_size(kern_return_t (*func)(MAP_T map, mach_vm_address_t src, mach_vm_size_t size, mach_vm_address_t dst), const char * testname)
4899 {
4900 MAP_T map SMART_MAP;
4901 allocation_t src_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
4902 src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
4903 results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
4904
4905 for (unsigned i = 0; i < trials->count; i++) {
4906 /*
4907 * Require src < dst. Some tests may get different error codes if src > dst.
4908 *
4909 * Example: size == -dst-1 for functions like vm_remap where dst
4910 * is a hint (i.e. dst + size overflow is ok) (rdar://132099195).
4911 * If src > dst then src + size overflows and the
4912 * function returns KERN_INVALID_ARGUMENT.
4913 * If src < dst then src + size does not overflow and the
4914 * function fails and returns KERN_INVALID_ADDRESS because
4915 * [src, src + size) is an unreasonable address range.
4916 *
4917 * TODO: test both src < dst and src > dst.
4918 */
4919 src_dst_size_trial_t trial = trials->list[i];
4920 unallocation_t dst_base SMART_UNALLOCATE_VM_AFTER(map, src_base.addr, TEST_ALLOC_SIZE);
4921 assert(src_base.addr < dst_base.addr);
4922
4923 trial = slide_trial_src(trial, src_base.addr);
4924 trial = slide_trial_dst(trial, dst_base.addr);
4925 int ret = func(map, trial.src, trial.size, trial.dst);
4926 // func deallocates its own allocation
4927 append_result(results, ret, trial.name);
4928 }
4929 return results;
4930 }
4931
4932
4933 static inline void
check_mach_vm_allocate_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_size_t size,mach_vm_address_t saved_start,int flags,MAP_T map)4934 check_mach_vm_allocate_outparam_changes(kern_return_t * kr, mach_vm_address_t addr, mach_vm_size_t size,
4935 mach_vm_address_t saved_start, int flags, MAP_T map)
4936 {
4937 if (*kr == KERN_SUCCESS) {
4938 if (size == 0) {
4939 if (addr != 0) {
4940 *kr = OUT_PARAM_BAD;
4941 }
4942 } else {
4943 if (is_fixed(flags)) {
4944 if (addr != trunc_down_map(map, saved_start)) {
4945 *kr = OUT_PARAM_BAD;
4946 }
4947 }
4948 }
4949 } else {
4950 if (saved_start != addr) {
4951 *kr = OUT_PARAM_BAD;
4952 }
4953 }
4954 }
4955
4956 static kern_return_t
call_mach_vm_behavior_set__start_size__default(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)4957 call_mach_vm_behavior_set__start_size__default(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
4958 {
4959 kern_return_t kr = mach_vm_behavior_set(map, start, size, VM_BEHAVIOR_DEFAULT);
4960 return kr;
4961 }
4962
4963 /*
4964 * VM_BEHAVIOR_CAN_REUSE is additionally tested as it uses slightly different page rounding semantics
4965 */
4966 static kern_return_t
call_mach_vm_behavior_set__start_size__can_reuse(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)4967 call_mach_vm_behavior_set__start_size__can_reuse(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
4968 {
4969 kern_return_t kr = mach_vm_behavior_set(map, start, size, VM_BEHAVIOR_CAN_REUSE);
4970 return kr;
4971 }
4972
4973 static kern_return_t
call_mach_vm_behavior_set__vm_behavior(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_behavior_t behavior)4974 call_mach_vm_behavior_set__vm_behavior(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_behavior_t behavior)
4975 {
4976 kern_return_t kr = mach_vm_behavior_set(map, start, size, behavior);
4977 return kr;
4978 }
4979
4980 static void
check_mach_vm_purgable_control_outparam_changes(kern_return_t * kr,int state,int saved_state,int control)4981 check_mach_vm_purgable_control_outparam_changes(kern_return_t * kr, int state, int saved_state, int control)
4982 {
4983 if (*kr == KERN_SUCCESS) {
4984 if (control == VM_PURGABLE_PURGE_ALL || VM_PURGABLE_SET_STATE) {
4985 if (state != saved_state) {
4986 *kr = OUT_PARAM_BAD;
4987 }
4988 }
4989 if (control == VM_PURGABLE_GET_STATE) {
4990 /*
4991 * The default state is VM_PURGABLE_NONVOLATILE for a newly created region
4992 */
4993 if (state != VM_PURGABLE_NONVOLATILE) {
4994 *kr = OUT_PARAM_BAD;
4995 }
4996 }
4997 } else {
4998 if (state != saved_state) {
4999 *kr = OUT_PARAM_BAD;
5000 }
5001 }
5002 }
5003
5004 static void
check_mach_vm_region_outparam_changes(kern_return_t * kr,MAP_T map,void * info,void * saved_info,size_t info_size,mach_port_t object_name,mach_port_t saved_object_name,mach_vm_address_t addr,mach_vm_address_t saved_addr,mach_vm_size_t size,mach_vm_size_t saved_size)5005 check_mach_vm_region_outparam_changes(kern_return_t * kr, MAP_T map, void * info, void * saved_info, size_t info_size,
5006 mach_port_t object_name, mach_port_t saved_object_name, mach_vm_address_t addr, mach_vm_address_t saved_addr,
5007 mach_vm_size_t size, mach_vm_size_t saved_size)
5008 {
5009 if (*kr == KERN_SUCCESS) {
5010 if (object_name != 0) {
5011 *kr = OUT_PARAM_BAD;
5012 }
5013 if (addr < trunc_down_map(map, saved_addr)) {
5014 *kr = OUT_PARAM_BAD;
5015 }
5016 if (size == saved_size) {
5017 *kr = OUT_PARAM_BAD;
5018 }
5019 if (memcmp(info, saved_info, info_size) == 0) {
5020 *kr = OUT_PARAM_BAD;
5021 }
5022 } else {
5023 if (object_name != saved_object_name || addr != saved_addr || size != saved_size || memcmp(info, saved_info, info_size) != 0) {
5024 *kr = OUT_PARAM_BAD;
5025 }
5026 }
5027 }
5028
5029 static int
call_mach_vm_region(MAP_T map,mach_vm_address_t addr)5030 call_mach_vm_region(MAP_T map, mach_vm_address_t addr)
5031 {
5032 mach_vm_address_t addr_cpy = addr;
5033 mach_vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
5034 mach_vm_size_t saved_size = size_out;
5035 mach_port_t object_name_out = UNLIKELY_INITIAL_MACH_PORT;
5036 mach_port_t saved_name = object_name_out;
5037 vm_region_basic_info_data_64_t info;
5038 info.inheritance = INVALID_INHERIT;
5039 vm_region_basic_info_data_64_t saved_info = info;
5040
5041 mach_msg_type_number_t infoCnt = VM_REGION_BASIC_INFO_COUNT_64;
5042 kern_return_t kr = mach_vm_region(map, &addr_cpy, &size_out, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info,
5043 &infoCnt, &object_name_out);
5044 check_mach_vm_region_outparam_changes(&kr, map, &info, &saved_info, sizeof(info), object_name_out, saved_name, addr_cpy, addr, size_out, saved_size);
5045
5046 return kr;
5047 }
5048
5049 #if TEST_OLD_STYLE_MACH || KERNEL
5050 static int
call_vm_region(MAP_T map,vm_address_t addr)5051 call_vm_region(MAP_T map, vm_address_t addr)
5052 {
5053 vm_address_t addr_cpy = addr;
5054 vm_size_t size_out = UNLIKELY_INITIAL_SIZE;
5055 vm_size_t saved_size = size_out;
5056 mach_port_t object_name_out = UNLIKELY_INITIAL_MACH_PORT;
5057 mach_port_t saved_name = object_name_out;
5058 vm_region_basic_info_data_64_t info;
5059 info.inheritance = INVALID_INHERIT;
5060 vm_region_basic_info_data_64_t saved_info = info;
5061
5062 mach_msg_type_number_t infoCnt = VM_REGION_BASIC_INFO_COUNT_64;
5063 kern_return_t kr = vm_region(map, &addr_cpy, &size_out, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info,
5064 &infoCnt, &object_name_out);
5065 check_mach_vm_region_outparam_changes(&kr, map, &info, &saved_info, sizeof(info), object_name_out, saved_name, addr_cpy, addr, size_out, saved_size);
5066
5067 return kr;
5068 }
5069 #endif /* TEST_OLD_STYLE_MACH || KERNEL */
5070
5071 static void
check_mach_vm_page_info_outparam_changes(kern_return_t * kr,vm_page_info_basic_data_t info,vm_page_info_basic_data_t saved_info,mach_msg_type_number_t count,mach_msg_type_number_t saved_count)5072 check_mach_vm_page_info_outparam_changes(kern_return_t * kr, vm_page_info_basic_data_t info, vm_page_info_basic_data_t saved_info,
5073 mach_msg_type_number_t count, mach_msg_type_number_t saved_count)
5074 {
5075 if (*kr == KERN_SUCCESS) {
5076 if (memcmp(&info, &saved_info, sizeof(vm_page_info_basic_data_t)) == 0) {
5077 *kr = OUT_PARAM_BAD;
5078 }
5079 } else {
5080 if (memcmp(&info, &saved_info, sizeof(vm_page_info_basic_data_t)) != 0) {
5081 *kr = OUT_PARAM_BAD;
5082 }
5083 }
5084 if (count != saved_count) {
5085 *kr = OUT_PARAM_BAD;
5086 }
5087 }
5088
5089 #pragma clang diagnostic pop
5090
5091 // VM_PARAMETER_VALIDATION_H
5092 #endif
5093