1 /* Mach virtual memory unit tests
2 *
3 * The main goal of this code is to facilitate the construction,
4 * running, result logging and clean up of a test suite, taking care
5 * of all the scaffolding. A test suite is a sequence of very targeted
6 * unit tests, each running as a separate process to isolate its
7 * address space.
8 * A unit test is abstracted as a unit_test_t structure, consisting of
9 * a test function and a logging identifier. A test suite is a suite_t
10 * structure, consisting of an unit_test_t array, fixture set up and
11 * tear down functions.
12 * Test suites are created dynamically. Each of its unit test runs in
13 * its own fork()d process, with the fixture set up and tear down
14 * running before and after each test. The parent process will log a
15 * pass result if the child exits normally, and a fail result in any
16 * other case (non-zero exit status, abnormal signal). The suite
17 * results are then aggregated and logged after the [SUMMARY] keyword,
18 * and finally the test suite is destroyed.
19 * The included test suites cover the Mach memory allocators,
20 * mach_vm_allocate() and mach_vm_map() with various options, and
21 * mach_vm_deallocate(), mach_vm_read(), mach_vm_write(),
22 * mach_vm_protect(), mach_vm_copy().
23 *
24 * Author: Renaud Dreyer ([email protected])
25 *
26 * Transformed to libdarwintest by Tristan Ye ([email protected]) */
27
28 #include <darwintest.h>
29
30 #include <stdlib.h>
31 #include <ctype.h>
32 #include <inttypes.h>
33 #include <stdio.h>
34 #include <math.h>
35 #include <errno.h>
36 #include <signal.h>
37 #include <getopt.h>
38 #include <mach/mach.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_vm.h>
41 #include <sys/sysctl.h>
42 #include <time.h>
43
44 T_GLOBAL_META(
45 T_META_NAMESPACE("xnu.vm"),
46 T_META_RADAR_COMPONENT_NAME("xnu"),
47 T_META_RADAR_COMPONENT_VERSION("VM"));
48
49 /**************************/
50 /**************************/
51 /* Unit Testing Framework */
52 /**************************/
53 /**************************/
54
55 /*********************/
56 /* Private interface */
57 /*********************/
58
59 static const char frameworkname[] = "vm_unitester";
60
61 /* Type for test, fixture set up and fixture tear down functions. */
62 typedef void (*test_fn_t)();
63
64 /* Unit test structure. */
65 typedef struct {
66 const char * name;
67 test_fn_t test;
68 } unit_test_t;
69
70 /* Test suite structure. */
71 typedef struct {
72 const char * name;
73 int numoftests;
74 test_fn_t set_up;
75 unit_test_t * tests;
76 test_fn_t tear_down;
77 } suite_t;
78
79 int _quietness = 0;
80 int _expected_signal = 0;
81
82 struct {
83 uintmax_t numoftests;
84 uintmax_t passed_tests;
85 } results = {0, 0};
86
87 #define logr(format, ...) \
88 do { \
89 if (_quietness <= 1) { \
90 T_LOG(format, ## __VA_ARGS__); \
91 } \
92 } while (0)
93
94 #define logv(format, ...) \
95 do { \
96 if (_quietness == 0) { \
97 T_LOG(format, ## __VA_ARGS__); \
98 } \
99 } while (0)
100
101 static suite_t *
create_suite(const char * name,int numoftests,test_fn_t set_up,unit_test_t * tests,test_fn_t tear_down)102 create_suite(const char * name, int numoftests, test_fn_t set_up, unit_test_t * tests, test_fn_t tear_down)
103 {
104 suite_t * suite = (suite_t *)malloc(sizeof(suite_t));
105 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(suite, "malloc()");
106
107 suite->name = name;
108 suite->numoftests = numoftests;
109 suite->set_up = set_up;
110 suite->tests = tests;
111 suite->tear_down = tear_down;
112 return suite;
113 }
114
115 static void
destroy_suite(suite_t * suite)116 destroy_suite(suite_t * suite)
117 {
118 free(suite);
119 }
120
121 static void
log_suite_info(suite_t * suite)122 log_suite_info(suite_t * suite)
123 {
124 logr("[TEST] %s", suite->name);
125 logr("Number of tests: %d\n", suite->numoftests);
126 }
127
128 static void
log_suite_results(suite_t * suite,int passed_tests)129 log_suite_results(suite_t * suite, int passed_tests)
130 {
131 results.numoftests += (uintmax_t)suite->numoftests;
132 results.passed_tests += (uintmax_t)passed_tests;
133 }
134
135 static void
log_test_info(unit_test_t * unit_test,unsigned test_num)136 log_test_info(unit_test_t * unit_test, unsigned test_num)
137 {
138 logr("[BEGIN] #%04d: %s", test_num, unit_test->name);
139 }
140
141 static void
log_test_result(unit_test_t * unit_test,boolean_t test_passed,unsigned test_num)142 log_test_result(unit_test_t * unit_test, boolean_t test_passed, unsigned test_num)
143 {
144 logr("[%s] #%04d: %s\n", test_passed ? "PASS" : "FAIL", test_num, unit_test->name);
145 }
146
147 /* Run a test with fixture set up and teardown, while enforcing the
148 * time out constraint. */
149 static void
run_test(suite_t * suite,unit_test_t * unit_test,unsigned test_num)150 run_test(suite_t * suite, unit_test_t * unit_test, unsigned test_num)
151 {
152 log_test_info(unit_test, test_num);
153
154 suite->set_up();
155 unit_test->test();
156 suite->tear_down();
157 }
158
159 /* Check a child return status. */
160 static boolean_t
child_terminated_normally(int child_status)161 child_terminated_normally(int child_status)
162 {
163 boolean_t normal_exit = FALSE;
164
165 if (WIFEXITED(child_status)) {
166 int exit_status = WEXITSTATUS(child_status);
167 if (exit_status) {
168 T_LOG("Child process unexpectedly exited with code %d.",
169 exit_status);
170 } else if (!_expected_signal) {
171 normal_exit = TRUE;
172 }
173 } else if (WIFSIGNALED(child_status)) {
174 int signal = WTERMSIG(child_status);
175 if (signal == _expected_signal ||
176 (_expected_signal == -1 && (signal == SIGBUS || signal == SIGSEGV))) {
177 if (_quietness <= 0) {
178 T_LOG("Child process died with expected signal "
179 "%d.", signal);
180 }
181 normal_exit = TRUE;
182 } else {
183 T_LOG("Child process unexpectedly died with signal %d.",
184 signal);
185 }
186 } else {
187 T_LOG("Child process unexpectedly did not exit nor die");
188 }
189
190 return normal_exit;
191 }
192
193 /* Run a test in its own process, and report the result. */
194 static boolean_t
child_test_passed(suite_t * suite,unit_test_t * unit_test)195 child_test_passed(suite_t * suite, unit_test_t * unit_test)
196 {
197 int test_status;
198 static unsigned test_num = 0;
199
200 test_num++;
201
202 pid_t test_pid = fork();
203 T_QUIET; T_ASSERT_POSIX_SUCCESS(test_pid, "fork()");
204 if (!test_pid) {
205 run_test(suite, unit_test, test_num);
206 exit(0);
207 }
208 while (waitpid(test_pid, &test_status, 0) != test_pid) {
209 continue;
210 }
211 boolean_t test_result = child_terminated_normally(test_status);
212 log_test_result(unit_test, test_result, test_num);
213 return test_result;
214 }
215
216 /* Run each test in a suite, and report the results. */
217 static int
count_passed_suite_tests(suite_t * suite)218 count_passed_suite_tests(suite_t * suite)
219 {
220 int passed_tests = 0;
221 int i;
222
223 for (i = 0; i < suite->numoftests; i++) {
224 passed_tests += child_test_passed(suite, &(suite->tests[i]));
225 }
226 return passed_tests;
227 }
228
229 /********************/
230 /* Public interface */
231 /********************/
232
233 #define DEFAULT_QUIETNESS 0 /* verbose */
234 #define RESULT_ERR_QUIETNESS 1 /* result and error */
235 #define ERROR_ONLY_QUIETNESS 2 /* error only */
236
237 #define run_suite(set_up, tests, tear_down, ...) \
238 _run_suite((sizeof(tests) / sizeof(tests[0])), (set_up), (tests), (tear_down), __VA_ARGS__)
239
240 typedef unit_test_t UnitTests[];
241
242 void _run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...)
243 __printflike(5, 6);
244
245 void
_run_suite(int numoftests,test_fn_t set_up,UnitTests tests,test_fn_t tear_down,const char * format,...)246 _run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...)
247 {
248 va_list ap;
249 char * name;
250
251 va_start(ap, format);
252 T_QUIET; T_ASSERT_POSIX_SUCCESS(vasprintf(&name, format, ap), "vasprintf()");
253 va_end(ap);
254 suite_t * suite = create_suite(name, numoftests, set_up, tests, tear_down);
255 log_suite_info(suite);
256 log_suite_results(suite, count_passed_suite_tests(suite));
257 free(name);
258 destroy_suite(suite);
259 }
260
261 /* Setters and getters for various test framework global
262 * variables. Should only be used outside of the test, set up and tear
263 * down functions. */
264
265 /* Expected signal for a test, default is 0. */
266 void
set_expected_signal(int signal)267 set_expected_signal(int signal)
268 {
269 _expected_signal = signal;
270 }
271
272 int
get_expected_signal()273 get_expected_signal()
274 {
275 return _expected_signal;
276 }
277
278 /* Logging verbosity. */
279 void
set_quietness(int value)280 set_quietness(int value)
281 {
282 _quietness = value;
283 }
284
285 int
get_quietness()286 get_quietness()
287 {
288 return _quietness;
289 }
290
291 /* For fixture set up and tear down functions, and units tests. */
292 void
do_nothing()293 do_nothing()
294 {
295 }
296
297 void
log_aggregated_results()298 log_aggregated_results()
299 {
300 T_LOG("[SUMMARY] Aggregated Test Results\n");
301 T_LOG("Total: %ju", results.numoftests);
302 T_LOG("Passed: %ju", results.passed_tests);
303 T_LOG("Failed: %ju\n", results.numoftests - results.passed_tests);
304
305 T_QUIET; T_ASSERT_EQ(results.passed_tests, results.numoftests,
306 "%d passed of total %d tests",
307 results.passed_tests, results.numoftests);
308 }
309
310 /*******************************/
311 /*******************************/
312 /* Virtual memory unit testing */
313 /*******************************/
314 /*******************************/
315
316 /* Test exit values:
317 * 0: pass
318 * 1: fail, generic unexpected failure
319 * 2: fail, unexpected Mach return value
320 * 3: fail, time out */
321
322 #define DEFAULT_VM_SIZE ((mach_vm_size_t)(1024ULL * 4096ULL))
323
324 #define POINTER(address) ((char *)(uintptr_t)(address))
325 #define MACH_VM_ADDRESS_T(address) (*((mach_vm_address_t *)(uintptr_t)(address)))
326
327 static int vm_address_size = sizeof(mach_vm_address_t);
328
329 static char *progname = "";
330
331 /*************************/
332 /* xnu version functions */
333 /*************************/
334
335 /* Find the xnu version string. */
336 char *
xnu_version_string()337 xnu_version_string()
338 {
339 size_t length;
340 int mib[2];
341 mib[0] = CTL_KERN;
342 mib[1] = KERN_VERSION;
343
344 T_QUIET;
345 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 2, NULL, &length, NULL, 0), "sysctl()");
346 char * version = (char *)malloc(length);
347 T_QUIET;
348 T_WITH_ERRNO;
349 T_ASSERT_NOTNULL(version, "malloc()");
350 T_QUIET;
351 T_EXPECT_POSIX_SUCCESS(sysctl(mib, 2, version, &length, NULL, 0), "sysctl()");
352 if (T_RESULT == T_RESULT_FAIL) {
353 free(version);
354 T_END;
355 }
356 char * xnu_string = strstr(version, "xnu-");
357 free(version);
358 T_QUIET;
359 T_ASSERT_NOTNULL(xnu_string, "%s: error finding xnu version string.", progname);
360 return xnu_string;
361 }
362
363 /* Find the xnu major version number. */
364 unsigned int
xnu_major_version()365 xnu_major_version()
366 {
367 char * endptr;
368 char * xnu_substring = xnu_version_string() + 4;
369
370 errno = 0;
371 unsigned int xnu_version = strtoul(xnu_substring, &endptr, 0);
372 T_QUIET;
373 T_ASSERT_TRUE((errno != ERANGE && endptr != xnu_substring),
374 "%s: error finding xnu major version number.", progname);
375 return xnu_version;
376 }
377
378 /*************************/
379 /* Mach assert functions */
380 /*************************/
381
382 static inline void
assert_mach_return(kern_return_t kr,kern_return_t expected_kr,const char * mach_routine)383 assert_mach_return(kern_return_t kr, kern_return_t expected_kr, const char * mach_routine)
384 {
385 T_QUIET; T_ASSERT_EQ(kr, expected_kr,
386 "%s unexpectedly returned: %s."
387 "Should have returned: %s.",
388 mach_routine, mach_error_string(kr),
389 mach_error_string(expected_kr));
390 }
391
392 /*******************************/
393 /* Arrays for test suite loops */
394 /*******************************/
395
396 /* Memory allocators */
397 typedef kern_return_t (*allocate_fn_t)(vm_map_t, mach_vm_address_t *, mach_vm_size_t, int);
398
399
400 /*
401 * Remember any pre-reserved fixed address, which needs to be released prior to an allocation.
402 */
403 static mach_vm_address_t fixed_vm_address = 0x0;
404 static mach_vm_size_t fixed_vm_size = 0;
405
406 /* forward decl */
407 void assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size);
408
409 /*
410 * If trying to allocate at a fixed address, we need to do the delayed deallocate first.
411 */
412 static void
check_fixed_address(mach_vm_address_t * address,mach_vm_size_t size)413 check_fixed_address(mach_vm_address_t *address, mach_vm_size_t size)
414 {
415 if (fixed_vm_address != 0 &&
416 fixed_vm_address <= *address &&
417 *address + size <= fixed_vm_address + fixed_vm_size) {
418 assert_deallocate_success(fixed_vm_address, fixed_vm_size);
419 fixed_vm_address = 0;
420 fixed_vm_size = 0;
421 }
422 }
423
424 kern_return_t
wrapper_mach_vm_allocate(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)425 wrapper_mach_vm_allocate(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
426 {
427 check_fixed_address(address, size);
428 return mach_vm_allocate(map, address, size, flags);
429 }
430
431 kern_return_t
wrapper_mach_vm_map(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)432 wrapper_mach_vm_map(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
433 {
434 check_fixed_address(address, size);
435 return mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
436 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
437 }
438
439 /* Should have the same behavior as when mask is zero. */
440 kern_return_t
wrapper_mach_vm_map_4kB(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)441 wrapper_mach_vm_map_4kB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
442 {
443 check_fixed_address(address, size);
444 return mach_vm_map(map, address, size, (mach_vm_offset_t)0xFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
445 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
446 }
447
448 kern_return_t
wrapper_mach_vm_map_2MB(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)449 wrapper_mach_vm_map_2MB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
450 {
451 check_fixed_address(address, size);
452 return mach_vm_map(map, address, size, (mach_vm_offset_t)0x1FFFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
453 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
454 }
455
456 kern_return_t
memory_entry(mach_vm_size_t * size,mach_port_t * object_handle)457 memory_entry(mach_vm_size_t * size, mach_port_t *object_handle)
458 {
459 mach_vm_size_t original_size = *size;
460 kern_return_t kr;
461
462 kr = mach_make_memory_entry_64(mach_task_self(), size,
463 (memory_object_offset_t)0, (MAP_MEM_NAMED_CREATE | VM_PROT_ALL),
464 object_handle, 0);
465 if (kr != KERN_SUCCESS) {
466 return kr;
467 }
468 T_QUIET; T_ASSERT_EQ(*size, round_page(original_size),
469 "mach_make_memory_entry_64() unexpectedly returned a named "
470 "entry of size 0x%jx (%ju).\n"
471 "Should have returned a "
472 "named entry of size 0x%jx (%ju).",
473 (uintmax_t)*size, (uintmax_t)*size, (uintmax_t)original_size, (uintmax_t)original_size);
474 return KERN_SUCCESS;
475 }
476
477 kern_return_t
wrapper_mach_vm_map_named_entry(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)478 wrapper_mach_vm_map_named_entry(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
479 {
480 mach_port_t object_handle = MACH_PORT_NULL;
481 kern_return_t kr = memory_entry(&size, &object_handle);
482
483 if (kr != KERN_SUCCESS) {
484 return kr;
485 }
486 check_fixed_address(address, size);
487 kr = mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, object_handle, (memory_object_offset_t)0, FALSE,
488 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
489 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_port_deallocate(mach_task_self(), object_handle), "mach_port_deallocate()");
490 return kr;
491 }
492
493 static struct {
494 allocate_fn_t allocate;
495 const char * description;
496 } allocators[] = {
497 {wrapper_mach_vm_allocate, "mach_vm_allocate()"},
498 {wrapper_mach_vm_map, "mach_vm_map() (zero mask)"},
499 {wrapper_mach_vm_map_4kB,
500 "mach_vm_map() "
501 "(4 kB address alignment)"},
502 {wrapper_mach_vm_map_2MB,
503 "mach_vm_map() "
504 "(2 MB address alignment)"},
505 {wrapper_mach_vm_map_named_entry,
506 "mach_vm_map() (named "
507 "entry, zero mask)"},
508 };
509 static int numofallocators = sizeof(allocators) / sizeof(allocators[0]);
510 static int allocators_idx;
511 enum { MACH_VM_ALLOCATE, MACH_VM_MAP, MACH_VM_MAP_4kB, MACH_VM_MAP_2MB, MACH_VM_MAP_NAMED_ENTRY };
512
513 /* VM size */
514 static struct {
515 mach_vm_size_t size;
516 const char * description;
517 } vm_sizes[] = {
518 {DEFAULT_VM_SIZE, "default/input"},
519 {0, "zero"},
520 {4096ULL, "aligned"},
521 {1ULL, "unaligned"},
522 {4095ULL, "unaligned"},
523 {4097ULL, "unaligned"},
524 };
525 static int numofsizes = sizeof(vm_sizes) / sizeof(vm_sizes[0]);
526 static int sizes_idx;
527 static int buffer_sizes_idx;
528 enum { DEFAULT_INPUT, ZERO_BYTES, ONE_PAGE, ONE_BYTE, ONE_PAGE_MINUS_ONE_BYTE, ONE_PAGE_AND_ONE_BYTE };
529
530 /* Unspecified/fixed address */
531 static struct {
532 int flag;
533 const char * description;
534 } address_flags[] = {
535 {VM_FLAGS_ANYWHERE, "unspecified"}, {VM_FLAGS_FIXED, "fixed"},
536 };
537 static int numofflags = sizeof(address_flags) / sizeof(address_flags[0]);
538 static int flags_idx;
539 enum { ANYWHERE, FIXED };
540
541 /* Address alignment */
542 static struct {
543 boolean_t alignment;
544 const char * description;
545 } address_alignments[] = {
546 {TRUE, " aligned"}, {FALSE, " unaligned"},
547 };
548 static int numofalignments = sizeof(address_alignments) / sizeof(*address_alignments);
549 static int alignments_idx;
550 enum { ALIGNED, UNALIGNED };
551
552 /* Buffer offset */
553 static struct {
554 int offset;
555 const char * description;
556 } buffer_offsets[] = {
557 {0, ""}, {1, ""}, {2, ""},
558 };
559 static int numofoffsets = sizeof(buffer_offsets) / sizeof(buffer_offsets[0]);
560 static int offsets_idx;
561 enum { ZERO, ONE, TWO };
562
563 /* mach_vm_copy() post actions */
564 enum { VMCOPY_MODIFY_SRC, VMCOPY_MODIFY_DST, VMCOPY_MODIFY_SHARED_COPIED };
565
566 static struct {
567 int action;
568 const char * description;
569 } vmcopy_actions[] = {
570 {VMCOPY_MODIFY_SRC, "modify vm_copy() source"},
571 {VMCOPY_MODIFY_DST, "modify vm_copy() destination"},
572 {VMCOPY_MODIFY_SHARED_COPIED,
573 "modify vm_copy source's shared "
574 "or copied from/to region"},
575 };
576 static int numofvmcopyactions = sizeof(vmcopy_actions) / sizeof(vmcopy_actions[0]);
577 static int vmcopy_action_idx;
578
579 /************************************/
580 /* Setters and getters for fixtures */
581 /************************************/
582
583 /* Allocation memory range. */
584 static allocate_fn_t _allocator = wrapper_mach_vm_allocate;
585 static mach_vm_size_t _vm_size = DEFAULT_VM_SIZE;
586 static int _address_flag = VM_FLAGS_ANYWHERE;
587 static boolean_t _address_alignment = TRUE;
588 static mach_vm_address_t _vm_address = 0x0;
589
590 /* Buffer for mach_vm_write(). */
591 static mach_vm_size_t _buffer_size = DEFAULT_VM_SIZE;
592 static mach_vm_address_t _buffer_address = 0x0;
593 static int _buffer_offset = 0;
594
595 /* Post action for mach_vm_copy(). */
596 static int _vmcopy_post_action = VMCOPY_MODIFY_SRC;
597
598 static void
set_allocator(allocate_fn_t allocate)599 set_allocator(allocate_fn_t allocate)
600 {
601 _allocator = allocate;
602 }
603
604 static allocate_fn_t
get_allocator()605 get_allocator()
606 {
607 return _allocator;
608 }
609
610 static void
set_vm_size(mach_vm_size_t size)611 set_vm_size(mach_vm_size_t size)
612 {
613 _vm_size = size;
614 }
615
616 static mach_vm_size_t
get_vm_size()617 get_vm_size()
618 {
619 return _vm_size;
620 }
621
622 static void
set_address_flag(int flag)623 set_address_flag(int flag)
624 {
625 _address_flag = flag;
626 }
627
628 static int
get_address_flag()629 get_address_flag()
630 {
631 return _address_flag;
632 }
633
634 static void
set_address_alignment(boolean_t alignment)635 set_address_alignment(boolean_t alignment)
636 {
637 _address_alignment = alignment;
638 }
639
640 static boolean_t
get_address_alignment()641 get_address_alignment()
642 {
643 return _address_alignment;
644 }
645
646 static void
set_vm_address(mach_vm_address_t address)647 set_vm_address(mach_vm_address_t address)
648 {
649 _vm_address = address;
650 }
651
652 static mach_vm_address_t
get_vm_address()653 get_vm_address()
654 {
655 return _vm_address;
656 }
657
658 static void
set_buffer_size(mach_vm_size_t size)659 set_buffer_size(mach_vm_size_t size)
660 {
661 _buffer_size = size;
662 }
663
664 static mach_vm_size_t
get_buffer_size()665 get_buffer_size()
666 {
667 return _buffer_size;
668 }
669
670 static void
set_buffer_address(mach_vm_address_t address)671 set_buffer_address(mach_vm_address_t address)
672 {
673 _buffer_address = address;
674 }
675
676 static mach_vm_address_t
get_buffer_address()677 get_buffer_address()
678 {
679 return _buffer_address;
680 }
681
682 static void
set_buffer_offset(int offset)683 set_buffer_offset(int offset)
684 {
685 _buffer_offset = offset;
686 }
687
688 static int
get_buffer_offset()689 get_buffer_offset()
690 {
691 return _buffer_offset;
692 }
693
694 static void
set_vmcopy_post_action(int action)695 set_vmcopy_post_action(int action)
696 {
697 _vmcopy_post_action = action;
698 }
699
700 static int
get_vmcopy_post_action()701 get_vmcopy_post_action()
702 {
703 return _vmcopy_post_action;
704 }
705
706 /*******************************/
707 /* Usage and option processing */
708 /*******************************/
709 static boolean_t flag_run_allocate_test = FALSE;
710 static boolean_t flag_run_deallocate_test = FALSE;
711 static boolean_t flag_run_read_test = FALSE;
712 static boolean_t flag_run_write_test = FALSE;
713 static boolean_t flag_run_protect_test = FALSE;
714 static boolean_t flag_run_copy_test = FALSE;
715
716 #define VM_TEST_ALLOCATE 0x00000001
717 #define VM_TEST_DEALLOCATE 0x00000002
718 #define VM_TEST_READ 0x00000004
719 #define VM_TEST_WRITE 0x00000008
720 #define VM_TEST_PROTECT 0x00000010
721 #define VM_TEST_COPY 0x00000020
722
723 typedef struct test_option {
724 uint32_t to_flags;
725 int to_quietness;
726 mach_vm_size_t to_vmsize;
727 } test_option_t;
728
729 typedef struct test_info {
730 char *ti_name;
731 boolean_t *ti_flag;
732 } test_info_t;
733
734 static test_option_t test_options;
735
736 enum {ALLOCATE = 0, DEALLOCATE, READ, WRITE, PROTECT, COPY};
737
738 static test_info_t test_info[] = {
739 {"allocate", &flag_run_allocate_test},
740 {"deallocate", &flag_run_deallocate_test},
741 {"read", &flag_run_read_test},
742 {"write", &flag_run_write_test},
743 {"protect", &flag_run_protect_test},
744 {"copy", &flag_run_copy_test},
745 {NULL, NULL}
746 };
747
748 static void
die_on_invalid_value(int condition,const char * value_string)749 die_on_invalid_value(int condition, const char * value_string)
750 {
751 T_QUIET;
752 T_ASSERT_EQ(condition, 0, "%s: invalid value: %s.",
753 progname, value_string);
754 }
755
756 static void
process_options(test_option_t options)757 process_options(test_option_t options)
758 {
759 test_info_t *tp;
760
761 setvbuf(stdout, NULL, _IONBF, 0);
762
763 set_vm_size(DEFAULT_VM_SIZE);
764 set_quietness(DEFAULT_QUIETNESS);
765
766 if (NULL != getenv("LTERDOS")) {
767 logr("LTERDOS=YES this is LeanTestEnvironment\nIncreasing quietness by 1.");
768 set_quietness(get_quietness() + 1);
769 } else {
770 if (options.to_quietness > 0) {
771 set_quietness(options.to_quietness);
772 }
773 }
774
775 if (options.to_vmsize != 0) {
776 vm_sizes[0].size = options.to_vmsize;
777 }
778
779 if (options.to_flags == 0) {
780 for (tp = test_info; tp->ti_name != NULL; ++tp) {
781 *tp->ti_flag = TRUE;
782 }
783 } else {
784 if (options.to_flags & VM_TEST_ALLOCATE) {
785 *(test_info[ALLOCATE].ti_flag) = TRUE;
786 }
787
788 if (options.to_flags & VM_TEST_DEALLOCATE) {
789 *(test_info[DEALLOCATE].ti_flag) = TRUE;
790 }
791
792 if (options.to_flags & VM_TEST_READ) {
793 *(test_info[READ].ti_flag) = TRUE;
794 }
795
796 if (options.to_flags & VM_TEST_WRITE) {
797 *(test_info[WRITE].ti_flag) = TRUE;
798 }
799
800 if (options.to_flags & VM_TEST_PROTECT) {
801 *(test_info[PROTECT].ti_flag) = TRUE;
802 }
803
804 if (options.to_flags & VM_TEST_COPY) {
805 *(test_info[COPY].ti_flag) = TRUE;
806 }
807 }
808 }
809
810 /*****************/
811 /* Various tools */
812 /*****************/
813
814 /* Find the allocator address alignment mask. */
815 mach_vm_address_t
get_mask()816 get_mask()
817 {
818 mach_vm_address_t mask;
819
820 if (get_allocator() == wrapper_mach_vm_map_2MB) {
821 mask = (mach_vm_address_t)0x1FFFFF;
822 } else {
823 mask = vm_page_size - 1;
824 }
825 return mask;
826 }
827
828 /* Find the size of the smallest aligned region containing a given
829 * memory range. */
830 mach_vm_size_t
aligned_size(mach_vm_address_t address,mach_vm_size_t size)831 aligned_size(mach_vm_address_t address, mach_vm_size_t size)
832 {
833 return round_page(address - mach_vm_trunc_page(address) + size);
834 }
835
836 /********************/
837 /* Assert functions */
838 /********************/
839
840 /* Address is aligned on allocator boundary. */
841 static inline void
assert_aligned_address(mach_vm_address_t address)842 assert_aligned_address(mach_vm_address_t address)
843 {
844 T_QUIET; T_ASSERT_EQ((address & get_mask()), 0,
845 "Address 0x%jx is unexpectedly "
846 "unaligned.",
847 (uintmax_t)address);
848 }
849
850 /* Address is truncated to allocator boundary. */
851 static inline void
assert_trunc_address(mach_vm_address_t address,mach_vm_address_t trunc_address)852 assert_trunc_address(mach_vm_address_t address, mach_vm_address_t trunc_address)
853 {
854 T_QUIET; T_ASSERT_EQ(trunc_address, (address & ~get_mask()),
855 "Address "
856 "0x%jx is unexpectedly not truncated to address 0x%jx.",
857 (uintmax_t)address, (uintmax_t)trunc_address);
858 }
859
860 static inline void
assert_address_value(mach_vm_address_t address,mach_vm_address_t marker)861 assert_address_value(mach_vm_address_t address, mach_vm_address_t marker)
862 {
863 /* this assert is used so frequently so that we simply judge on
864 * its own instead of leaving this to LD macro for efficiency
865 */
866 if (MACH_VM_ADDRESS_T(address) != marker) {
867 T_ASSERT_FAIL("Address 0x%jx unexpectedly has value 0x%jx, "
868 "instead of 0x%jx.", (uintmax_t)address,
869 (uintmax_t)MACH_VM_ADDRESS_T(address), (uintmax_t)marker);
870 }
871 }
872
873 void
assert_allocate_return(mach_vm_address_t * address,mach_vm_size_t size,int address_flag,kern_return_t expected_kr)874 assert_allocate_return(mach_vm_address_t * address, mach_vm_size_t size, int address_flag, kern_return_t expected_kr)
875 {
876 assert_mach_return(get_allocator()(mach_task_self(), address, size, address_flag), expected_kr, "Allocator");
877 }
878
879 void
assert_allocate_success(mach_vm_address_t * address,mach_vm_size_t size,int address_flag)880 assert_allocate_success(mach_vm_address_t * address, mach_vm_size_t size, int address_flag)
881 {
882 assert_allocate_return(address, size, address_flag, KERN_SUCCESS);
883 }
884
885 void
assert_deallocate_return(mach_vm_address_t address,mach_vm_size_t size,kern_return_t expected_kr)886 assert_deallocate_return(mach_vm_address_t address, mach_vm_size_t size, kern_return_t expected_kr)
887 {
888 assert_mach_return(mach_vm_deallocate(mach_task_self(), address, size), expected_kr, "mach_vm_deallocate()");
889 }
890
891 void
assert_deallocate_success(mach_vm_address_t address,mach_vm_size_t size)892 assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size)
893 {
894 assert_deallocate_return(address, size, KERN_SUCCESS);
895 }
896
897 void
assert_read_return(mach_vm_address_t address,mach_vm_size_t size,vm_offset_t * data,mach_msg_type_number_t * data_size,kern_return_t expected_kr)898 assert_read_return(mach_vm_address_t address,
899 mach_vm_size_t size,
900 vm_offset_t * data,
901 mach_msg_type_number_t * data_size,
902 kern_return_t expected_kr)
903 {
904 assert_mach_return(mach_vm_read(mach_task_self(), address, size, data, data_size), expected_kr, "mach_vm_read()");
905 }
906
907 void
assert_read_success(mach_vm_address_t address,mach_vm_size_t size,vm_offset_t * data,mach_msg_type_number_t * data_size)908 assert_read_success(mach_vm_address_t address, mach_vm_size_t size, vm_offset_t * data, mach_msg_type_number_t * data_size)
909 {
910 assert_read_return(address, size, data, data_size, KERN_SUCCESS);
911 T_QUIET; T_ASSERT_EQ(*data_size, size,
912 "Returned buffer size 0x%jx "
913 "(%ju) is unexpectedly different from source size 0x%jx "
914 "(%ju).",
915 (uintmax_t)*data_size, (uintmax_t)*data_size, (uintmax_t)size, (uintmax_t)size);
916 }
917
918 void
assert_write_return(mach_vm_address_t address,vm_offset_t data,mach_msg_type_number_t data_size,kern_return_t expected_kr)919 assert_write_return(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size, kern_return_t expected_kr)
920 {
921 assert_mach_return(mach_vm_write(mach_task_self(), address, data, data_size), expected_kr, "mach_vm_write()");
922 }
923
924 void
assert_write_success(mach_vm_address_t address,vm_offset_t data,mach_msg_type_number_t data_size)925 assert_write_success(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size)
926 {
927 assert_write_return(address, data, data_size, KERN_SUCCESS);
928 }
929
930 void
assert_allocate_copy_return(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t * dest,kern_return_t expected_kr)931 assert_allocate_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest, kern_return_t expected_kr)
932 {
933 assert_allocate_success(dest, size, VM_FLAGS_ANYWHERE);
934 assert_mach_return(mach_vm_copy(mach_task_self(), source, size, *dest), expected_kr, "mach_vm_copy()");
935 }
936 void
assert_allocate_copy_success(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t * dest)937 assert_allocate_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest)
938 {
939 assert_allocate_copy_return(source, size, dest, KERN_SUCCESS);
940 }
941
942 void
assert_copy_return(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t dest,kern_return_t expected_kr)943 assert_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest, kern_return_t expected_kr)
944 {
945 assert_mach_return(mach_vm_copy(mach_task_self(), source, size, dest), expected_kr, "mach_vm_copy()");
946 }
947
948 void
assert_copy_success(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t dest)949 assert_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest)
950 {
951 assert_copy_return(source, size, dest, KERN_SUCCESS);
952 }
953
954 /*******************/
955 /* Memory patterns */
956 /*******************/
957
958 typedef boolean_t (*address_filter_t)(mach_vm_address_t);
959 typedef void (*address_action_t)(mach_vm_address_t, mach_vm_address_t);
960
961 /* Map over a memory region pattern and its complement, through a
962 * (possibly reversed) boolean filter and a starting value. */
963 void
filter_addresses_do_else(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,address_action_t if_action,address_action_t else_action,mach_vm_address_t start_value)964 filter_addresses_do_else(address_filter_t filter,
965 boolean_t reversed,
966 mach_vm_address_t address,
967 mach_vm_size_t size,
968 address_action_t if_action,
969 address_action_t else_action,
970 mach_vm_address_t start_value)
971 {
972 mach_vm_address_t i;
973 for (i = 0; i + vm_address_size < size; i += vm_address_size) {
974 if (filter(address + i) != reversed) {
975 if_action(address + i, start_value + i);
976 } else {
977 else_action(address + i, start_value + i);
978 }
979 }
980 }
981
982 /* Various pattern actions. */
983 void
no_action(mach_vm_address_t i,mach_vm_address_t value)984 no_action(mach_vm_address_t i, mach_vm_address_t value)
985 {
986 }
987
988 void
read_zero(mach_vm_address_t i,mach_vm_address_t value)989 read_zero(mach_vm_address_t i, mach_vm_address_t value)
990 {
991 assert_address_value(i, 0);
992 }
993
994 void
verify_address(mach_vm_address_t i,mach_vm_address_t value)995 verify_address(mach_vm_address_t i, mach_vm_address_t value)
996 {
997 assert_address_value(i, value);
998 }
999
1000 void
write_address(mach_vm_address_t i,mach_vm_address_t value)1001 write_address(mach_vm_address_t i, mach_vm_address_t value)
1002 {
1003 MACH_VM_ADDRESS_T(i) = value;
1004 }
1005
1006 /* Various patterns. */
1007 boolean_t
empty(mach_vm_address_t i)1008 empty(mach_vm_address_t i)
1009 {
1010 return FALSE;
1011 }
1012
1013 boolean_t
checkerboard(mach_vm_address_t i)1014 checkerboard(mach_vm_address_t i)
1015 {
1016 return !((i / vm_address_size) & 0x1);
1017 }
1018
1019 boolean_t
page_ends(mach_vm_address_t i)1020 page_ends(mach_vm_address_t i)
1021 {
1022 mach_vm_address_t residue = i % vm_page_size;
1023
1024 return residue == 0 || residue == vm_page_size - vm_address_size;
1025 }
1026
1027 /*************************************/
1028 /* Global variables set up functions */
1029 /*************************************/
1030
1031 void
set_up_allocator()1032 set_up_allocator()
1033 {
1034 T_QUIET; T_ASSERT_TRUE(allocators_idx >= 0 && allocators_idx < numofallocators, "Invalid allocators[] index: %d.", allocators_idx);
1035 set_allocator(allocators[allocators_idx].allocate);
1036 }
1037
1038 /* Find a fixed allocatable address by retrieving the address
1039 * populated by mach_vm_allocate() with VM_FLAGS_ANYWHERE. */
1040 mach_vm_address_t
get_fixed_address(mach_vm_size_t size)1041 get_fixed_address(mach_vm_size_t size)
1042 {
1043 /* mach_vm_map() starts looking for an address at 0x0. */
1044 mach_vm_address_t address = 0x0;
1045
1046 /*
1047 * The tests seem to have some funky off by one allocations. To avoid problems, we'll bump anything
1048 * non-zero to have at least an extra couple pages.
1049 */
1050 if (size != 0) {
1051 size = round_page(size + 2 * vm_page_size);
1052 }
1053
1054 assert_allocate_success(&address, size, VM_FLAGS_ANYWHERE);
1055
1056 /*
1057 * Keep the memory allocated, otherwise the logv()/printf() activity sprinkled in these tests can
1058 * cause malloc() to use the desired range and tests will randomly fail. The allocate routines will
1059 * do the delayed vm_deallocate() to free the fixed memory just before allocation testing in the wrapper.
1060 */
1061 T_QUIET; T_ASSERT_EQ(fixed_vm_address, 0, "previous fixed address not used");
1062 T_QUIET; T_ASSERT_EQ(fixed_vm_size, 0, "previous fixed size not used");
1063 fixed_vm_address = address;
1064 fixed_vm_size = size;
1065
1066 assert_aligned_address(address);
1067 return address;
1068 }
1069
1070 /* If needed, find an address at which a region of the specified size
1071 * can be allocated. Otherwise, set the address to 0x0. */
1072 void
set_up_vm_address(mach_vm_size_t size)1073 set_up_vm_address(mach_vm_size_t size)
1074 {
1075 T_QUIET; T_ASSERT_TRUE(flags_idx >= 0 && flags_idx < numofflags, "Invalid address_flags[] index: %d.", flags_idx);
1076 T_QUIET; T_ASSERT_TRUE(alignments_idx >= 0 && alignments_idx < numofalignments, "Invalid address_alignments[] index: %d.", alignments_idx);
1077 set_address_flag(address_flags[flags_idx].flag);
1078 set_address_alignment(address_alignments[alignments_idx].alignment);
1079
1080 if (!(get_address_flag() & VM_FLAGS_ANYWHERE)) {
1081 boolean_t aligned = get_address_alignment();
1082 logv(
1083 "Looking for fixed %saligned address for allocation "
1084 "of 0x%jx (%ju) byte%s...",
1085 aligned ? "" : "un", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1086 mach_vm_address_t address = get_fixed_address(size);
1087 if (!aligned) {
1088 address++;
1089 }
1090 set_vm_address(address);
1091 logv("Found %saligned fixed address 0x%jx.", aligned ? "" : "un", (uintmax_t)address);
1092 } else {
1093 /* mach_vm_map() with VM_FLAGS_ANYWHERE starts looking for
1094 * an address at the one supplied and goes up, without
1095 * wrapping around. */
1096 set_vm_address(0x0);
1097 }
1098 }
1099
1100 void
set_up_vm_size()1101 set_up_vm_size()
1102 {
1103 T_QUIET; T_ASSERT_TRUE(sizes_idx >= 0 && sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", sizes_idx);
1104 set_vm_size(vm_sizes[sizes_idx].size);
1105 }
1106
1107 void
set_up_buffer_size()1108 set_up_buffer_size()
1109 {
1110 T_QUIET; T_ASSERT_TRUE(buffer_sizes_idx >= 0 && buffer_sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", buffer_sizes_idx);
1111 set_buffer_size(vm_sizes[buffer_sizes_idx].size);
1112 }
1113
1114 void
set_up_buffer_offset()1115 set_up_buffer_offset()
1116 {
1117 T_QUIET; T_ASSERT_TRUE(offsets_idx >= 0 && offsets_idx < numofoffsets, "Invalid buffer_offsets[] index: %d.", offsets_idx);
1118 set_buffer_offset(buffer_offsets[offsets_idx].offset);
1119 }
1120
1121 void
set_up_vmcopy_action()1122 set_up_vmcopy_action()
1123 {
1124 T_QUIET; T_ASSERT_TRUE(vmcopy_action_idx >= 0 && vmcopy_action_idx < numofvmcopyactions, "Invalid vmcopy_actions[] index: %d.",
1125 vmcopy_action_idx);
1126 set_vmcopy_post_action(vmcopy_actions[vmcopy_action_idx].action);
1127 }
1128
1129 void
set_up_allocator_and_vm_size()1130 set_up_allocator_and_vm_size()
1131 {
1132 set_up_allocator();
1133 set_up_vm_size();
1134 }
1135
1136 void
set_up_vm_variables()1137 set_up_vm_variables()
1138 {
1139 set_up_vm_size();
1140 set_up_vm_address(get_vm_size());
1141 }
1142
1143 void
set_up_allocator_and_vm_variables()1144 set_up_allocator_and_vm_variables()
1145 {
1146 set_up_allocator();
1147 set_up_vm_variables();
1148 }
1149
1150 void
set_up_buffer_variables()1151 set_up_buffer_variables()
1152 {
1153 set_up_buffer_size();
1154 set_up_buffer_offset();
1155 }
1156
1157 void
set_up_copy_shared_mode_variables()1158 set_up_copy_shared_mode_variables()
1159 {
1160 set_up_vmcopy_action();
1161 }
1162
1163 /*******************************/
1164 /* Allocation set up functions */
1165 /*******************************/
1166
1167 /* Allocate VM region of given size. */
1168 void
allocate(mach_vm_size_t size)1169 allocate(mach_vm_size_t size)
1170 {
1171 mach_vm_address_t address = get_vm_address();
1172 int flag = get_address_flag();
1173
1174 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1175 if (!(flag & VM_FLAGS_ANYWHERE)) {
1176 logv(" at address 0x%jx", (uintmax_t)address);
1177 }
1178 logv("...");
1179 assert_allocate_success(&address, size, flag);
1180 logv(
1181 "Memory of rounded size 0x%jx (%ju) allocated at "
1182 "address 0x%jx.",
1183 (uintmax_t)round_page(size), (uintmax_t)round_page(size), (uintmax_t)address);
1184 /* Fixed allocation address is truncated to the allocator
1185 * boundary. */
1186 if (!(flag & VM_FLAGS_ANYWHERE)) {
1187 mach_vm_address_t old_address = get_vm_address();
1188 assert_trunc_address(old_address, address);
1189 logv(
1190 "Address 0x%jx is correctly truncated to allocated "
1191 "address 0x%jx.",
1192 (uintmax_t)old_address, (uintmax_t)address);
1193 }
1194 set_vm_address(address);
1195 }
1196
1197 void
allocate_buffer(mach_vm_size_t buffer_size)1198 allocate_buffer(mach_vm_size_t buffer_size)
1199 {
1200 mach_vm_address_t data = 0x0;
1201
1202 logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)buffer_size, (uintmax_t)buffer_size, (buffer_size == 1) ? "" : "s");
1203 assert_allocate_success(&data, buffer_size, VM_FLAGS_ANYWHERE);
1204 logv(
1205 "Memory of rounded size 0x%jx (%ju) allocated at "
1206 "address 0x%jx.",
1207 (uintmax_t)round_page(buffer_size), (uintmax_t)round_page(buffer_size), (uintmax_t)data);
1208 data += get_buffer_offset();
1209 T_QUIET; T_ASSERT_EQ((vm_offset_t)data, data,
1210 "Address 0x%jx "
1211 "unexpectedly overflows to 0x%jx when cast as "
1212 "vm_offset_t type.",
1213 (uintmax_t)data, (uintmax_t)(vm_offset_t)data);
1214 set_buffer_address(data);
1215 }
1216
1217 /****************************************************/
1218 /* Global variables and allocation set up functions */
1219 /****************************************************/
1220
1221 void
set_up_vm_variables_and_allocate()1222 set_up_vm_variables_and_allocate()
1223 {
1224 set_up_vm_variables();
1225 allocate(get_vm_size());
1226 }
1227
1228 void
set_up_allocator_and_vm_variables_and_allocate()1229 set_up_allocator_and_vm_variables_and_allocate()
1230 {
1231 set_up_allocator();
1232 set_up_vm_variables_and_allocate();
1233 }
1234
1235 void
set_up_vm_variables_and_allocate_extra_page()1236 set_up_vm_variables_and_allocate_extra_page()
1237 {
1238 set_up_vm_size();
1239 /* Increment the size to insure we get an extra allocated page
1240 * for unaligned start addresses. */
1241 mach_vm_size_t allocation_size = get_vm_size() + 1;
1242 set_up_vm_address(allocation_size);
1243
1244 allocate(allocation_size);
1245 /* In the fixed unaligned address case, restore the returned
1246 * (truncated) allocation address to its unaligned value. */
1247 if (!get_address_alignment()) {
1248 set_vm_address(get_vm_address() + 1);
1249 }
1250 }
1251
1252 void
set_up_buffer_variables_and_allocate_extra_page()1253 set_up_buffer_variables_and_allocate_extra_page()
1254 {
1255 set_up_buffer_variables();
1256 /* Increment the size to insure we get an extra allocated page
1257 * for unaligned start addresses. */
1258 allocate_buffer(get_buffer_size() + get_buffer_offset());
1259 }
1260
1261 /* Allocate some destination and buffer memory for subsequent
1262 * writing, including extra pages for non-aligned start addresses. */
1263 void
set_up_vm_and_buffer_variables_allocate_for_writing()1264 set_up_vm_and_buffer_variables_allocate_for_writing()
1265 {
1266 set_up_vm_variables_and_allocate_extra_page();
1267 set_up_buffer_variables_and_allocate_extra_page();
1268 }
1269
1270 /* Allocate some destination and source regions for subsequent
1271 * copying, including extra pages for non-aligned start addresses. */
1272 void
set_up_vm_and_buffer_variables_allocate_for_copying()1273 set_up_vm_and_buffer_variables_allocate_for_copying()
1274 {
1275 set_up_vm_and_buffer_variables_allocate_for_writing();
1276 }
1277
1278 /************************************/
1279 /* Deallocation tear down functions */
1280 /************************************/
1281
1282 void
deallocate_range(mach_vm_address_t address,mach_vm_size_t size)1283 deallocate_range(mach_vm_address_t address, mach_vm_size_t size)
1284 {
1285 logv("Deallocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1286 (uintmax_t)address);
1287 assert_deallocate_success(address, size);
1288 }
1289
1290 void
deallocate()1291 deallocate()
1292 {
1293 deallocate_range(get_vm_address(), get_vm_size());
1294 }
1295
1296 /* Deallocate source memory, including the extra page for unaligned
1297 * start addresses. */
1298 void
deallocate_extra_page()1299 deallocate_extra_page()
1300 {
1301 /* Set the address and size to their original allocation
1302 * values. */
1303 deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1);
1304 }
1305
1306 /* Deallocate buffer and destination memory for mach_vm_write(),
1307 * including the extra page for unaligned start addresses. */
1308 void
deallocate_vm_and_buffer()1309 deallocate_vm_and_buffer()
1310 {
1311 deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1);
1312 deallocate_range(mach_vm_trunc_page(get_buffer_address()), get_buffer_size() + get_buffer_offset());
1313 }
1314
1315 /***********************************/
1316 /* mach_vm_read() set up functions */
1317 /***********************************/
1318
1319 /* Read the source memory into a buffer, deallocate the source, set
1320 * the global address and size from the buffer's. */
1321 void
read_deallocate()1322 read_deallocate()
1323 {
1324 mach_vm_size_t size = get_vm_size();
1325 mach_vm_address_t address = get_vm_address();
1326 vm_offset_t read_address;
1327 mach_msg_type_number_t read_size;
1328
1329 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1330 (uintmax_t)address);
1331 assert_read_success(address, size, &read_address, &read_size);
1332 logv(
1333 "Memory of size 0x%jx (%ju) read into buffer of "
1334 "address 0x%jx.",
1335 (uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)read_address);
1336 /* Deallocate the originally allocated memory, including the
1337 * extra allocated page in
1338 * set_up_vm_variables_and_allocate_extra_page(). */
1339 deallocate_range(mach_vm_trunc_page(address), size + 1);
1340
1341 /* Promoting to mach_vm types after checking for overflow, and
1342 * setting the global address from the buffer's. */
1343 T_QUIET; T_ASSERT_EQ((mach_vm_address_t)read_address, read_address,
1344 "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1345 "as mach_vm_address_t type.",
1346 (uintmax_t)read_address, (uintmax_t)(mach_vm_address_t)read_address);
1347 T_QUIET; T_ASSERT_EQ((mach_vm_size_t)read_size, read_size,
1348 "Size 0x%jx (%ju) unexpectedly overflows to 0x%jx (%ju) "
1349 "when cast as mach_vm_size_t type.",
1350 (uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)(mach_vm_size_t)read_size, (uintmax_t)(mach_vm_size_t)read_size);
1351 set_vm_address((mach_vm_address_t)read_address);
1352 set_vm_size((mach_vm_size_t)read_size);
1353 }
1354
1355 /* Allocate some source memory, read it into a buffer, deallocate the
1356 * source, set the global address and size from the buffer's. */
1357 void
set_up_vm_variables_allocate_read_deallocate()1358 set_up_vm_variables_allocate_read_deallocate()
1359 {
1360 set_up_vm_variables_and_allocate_extra_page();
1361 read_deallocate();
1362 }
1363
1364 /************************************/
1365 /* mach_vm_write() set up functions */
1366 /************************************/
1367
1368 /* Write the buffer into the destination memory. */
1369 void
write_buffer()1370 write_buffer()
1371 {
1372 mach_vm_address_t address = get_vm_address();
1373 vm_offset_t data = (vm_offset_t)get_buffer_address();
1374 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
1375
1376 logv(
1377 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
1378 "memory at address 0x%jx...",
1379 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
1380 assert_write_success(address, data, buffer_size);
1381 logv("Buffer written.");
1382 }
1383
1384 /* Allocate some destination and buffer memory, and write the buffer
1385 * into the destination memory. */
1386 void
set_up_vm_and_buffer_variables_allocate_write()1387 set_up_vm_and_buffer_variables_allocate_write()
1388 {
1389 set_up_vm_and_buffer_variables_allocate_for_writing();
1390 write_buffer();
1391 }
1392
1393 /***********************************/
1394 /* mach_vm_copy() set up functions */
1395 /***********************************/
1396
1397 void
copy_deallocate(void)1398 copy_deallocate(void)
1399 {
1400 mach_vm_size_t size = get_vm_size();
1401 mach_vm_address_t source = get_vm_address();
1402 mach_vm_address_t dest = 0;
1403
1404 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1405 (uintmax_t)source);
1406 assert_allocate_copy_success(source, size, &dest);
1407 logv(
1408 "Memory of size 0x%jx (%ju) copy into region of "
1409 "address 0x%jx.",
1410 (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
1411 /* Deallocate the originally allocated memory, including the
1412 * extra allocated page in
1413 * set_up_vm_variables_and_allocate_extra_page(). */
1414 deallocate_range(mach_vm_trunc_page(source), size + 1);
1415 /* Promoting to mach_vm types after checking for overflow, and
1416 * setting the global address from the buffer's. */
1417 T_QUIET; T_ASSERT_EQ((vm_offset_t)dest, dest,
1418 "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1419 "as mach_vm_address_t type.",
1420 (uintmax_t)dest, (uintmax_t)(vm_offset_t)dest);
1421 set_vm_address(dest);
1422 set_vm_size(size);
1423 }
1424
1425 /* Copy the source region into the destination region. */
1426 void
copy_region()1427 copy_region()
1428 {
1429 mach_vm_address_t source = get_vm_address();
1430 mach_vm_address_t dest = get_buffer_address();
1431 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
1432
1433 logv(
1434 "Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
1435 "memory at address 0x%jx...",
1436 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
1437 assert_copy_success(source, size, dest);
1438 logv("Buffer written.");
1439 }
1440
1441 /* Allocate some source memory, copy it to another region, deallocate the
1442 * source, set the global address and size from the designation region. */
1443 void
set_up_vm_variables_allocate_copy_deallocate()1444 set_up_vm_variables_allocate_copy_deallocate()
1445 {
1446 set_up_vm_variables_and_allocate_extra_page();
1447 copy_deallocate();
1448 }
1449
1450 /* Allocate some destination and source memory, and copy the source
1451 * into the destination memory. */
1452 void
set_up_source_and_dest_variables_allocate_copy()1453 set_up_source_and_dest_variables_allocate_copy()
1454 {
1455 set_up_vm_and_buffer_variables_allocate_for_copying();
1456 copy_region();
1457 }
1458
1459 /**************************************/
1460 /* mach_vm_protect() set up functions */
1461 /**************************************/
1462
1463 void
set_up_vm_variables_allocate_protect(vm_prot_t protection,const char * protection_name)1464 set_up_vm_variables_allocate_protect(vm_prot_t protection, const char * protection_name)
1465 {
1466 set_up_vm_variables_and_allocate_extra_page();
1467 mach_vm_size_t size = get_vm_size();
1468 mach_vm_address_t address = get_vm_address();
1469
1470 logv(
1471 "Setting %s-protection on 0x%jx (%ju) byte%s at address "
1472 "0x%jx...",
1473 protection_name, (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
1474 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address, size, FALSE, protection), "mach_vm_protect()");
1475 logv("Region %s-protected.", protection_name);
1476 }
1477
1478 void
set_up_vm_variables_allocate_readprotect()1479 set_up_vm_variables_allocate_readprotect()
1480 {
1481 set_up_vm_variables_allocate_protect(VM_PROT_WRITE, "read");
1482 }
1483
1484 void
set_up_vm_variables_allocate_writeprotect()1485 set_up_vm_variables_allocate_writeprotect()
1486 {
1487 set_up_vm_variables_allocate_protect(VM_PROT_READ, "write");
1488 }
1489
1490 /*****************/
1491 /* Address tests */
1492 /*****************/
1493
1494 /* Allocated address is nonzero iff size is nonzero. */
1495 void
test_nonzero_address_iff_nonzero_size()1496 test_nonzero_address_iff_nonzero_size()
1497 {
1498 mach_vm_address_t address = get_vm_address();
1499 mach_vm_size_t size = get_vm_size();
1500
1501 T_QUIET; T_ASSERT_TRUE((address && size) || (!address && !size), "Address 0x%jx is unexpectedly %szero.", (uintmax_t)address,
1502 address ? "non" : "");
1503 logv("Address 0x%jx is %szero as expected.", (uintmax_t)address, size ? "non" : "");
1504 }
1505
1506 /* Allocated address is aligned. */
1507 void
test_aligned_address()1508 test_aligned_address()
1509 {
1510 mach_vm_address_t address = get_vm_address();
1511
1512 assert_aligned_address(address);
1513 logv("Address 0x%jx is aligned.", (uintmax_t)address);
1514 }
1515
1516 /************************/
1517 /* Read and write tests */
1518 /************************/
1519
1520 void
verify_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1521 verify_pattern(
1522 address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1523 {
1524 logv(
1525 "Verifying %s pattern on region of address 0x%jx "
1526 "and size 0x%jx (%ju)...",
1527 pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1528 filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address);
1529 logv("Pattern verified.");
1530 }
1531
1532 void
write_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1533 write_pattern(
1534 address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1535 {
1536 logv(
1537 "Writing %s pattern on region of address 0x%jx "
1538 "and size 0x%jx (%ju)...",
1539 pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1540 filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address);
1541 logv("Pattern writen.");
1542 }
1543
1544 void
write_and_verify_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1545 write_and_verify_pattern(
1546 address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1547 {
1548 logv(
1549 "Writing and verifying %s pattern on region of "
1550 "address 0x%jx and size 0x%jx (%ju)...",
1551 pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1552 filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address);
1553 filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address);
1554 logv("Pattern written and verified.");
1555 }
1556
1557 /* Verify that the smallest aligned region containing the
1558 * given range is zero-filled. */
1559 void
test_zero_filled()1560 test_zero_filled()
1561 {
1562 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), aligned_size(get_vm_address(), get_vm_size()),
1563 "zero-filled");
1564 }
1565
1566 void
test_write_address_filled()1567 test_write_address_filled()
1568 {
1569 write_and_verify_pattern(empty, TRUE, get_vm_address(), round_page(get_vm_size()), "address-filled");
1570 }
1571
1572 void
test_write_checkerboard()1573 test_write_checkerboard()
1574 {
1575 write_and_verify_pattern(checkerboard, FALSE, get_vm_address(), round_page(get_vm_size()), "checkerboard");
1576 }
1577
1578 void
test_write_reverse_checkerboard()1579 test_write_reverse_checkerboard()
1580 {
1581 write_and_verify_pattern(checkerboard, TRUE, get_vm_address(), round_page(get_vm_size()), "reverse checkerboard");
1582 }
1583
1584 void
test_write_page_ends()1585 test_write_page_ends()
1586 {
1587 write_and_verify_pattern(page_ends, FALSE, get_vm_address(), round_page(get_vm_size()), "page ends");
1588 }
1589
1590 void
test_write_page_interiors()1591 test_write_page_interiors()
1592 {
1593 write_and_verify_pattern(page_ends, TRUE, get_vm_address(), round_page(get_vm_size()), "page interiors");
1594 }
1595
1596 /*********************************/
1597 /* Allocation error return tests */
1598 /*********************************/
1599
1600 /* Reallocating a page in the smallest aligned region containing the
1601 * given allocated range fails. */
1602 void
test_reallocate_pages()1603 test_reallocate_pages()
1604 {
1605 allocate_fn_t allocator = get_allocator();
1606 vm_map_t this_task = mach_task_self();
1607 mach_vm_address_t address = mach_vm_trunc_page(get_vm_address());
1608 mach_vm_size_t size = aligned_size(get_vm_address(), get_vm_size());
1609 mach_vm_address_t i;
1610 kern_return_t kr;
1611
1612 logv(
1613 "Reallocating pages in allocated region of address 0x%jx "
1614 "and size 0x%jx (%ju)...",
1615 (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1616 for (i = address; i < address + size; i += vm_page_size) {
1617 kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED);
1618 T_QUIET; T_ASSERT_EQ(kr, KERN_NO_SPACE,
1619 "Allocator "
1620 "at address 0x%jx unexpectedly returned: %s.\n"
1621 "Should have returned: %s.",
1622 (uintmax_t)address, mach_error_string(kr), mach_error_string(KERN_NO_SPACE));
1623 }
1624 logv("Returned expected error at each page: %s.", mach_error_string(KERN_NO_SPACE));
1625 }
1626
1627 /* Allocating in VM_MAP_NULL fails. */
1628 void
test_allocate_in_null_map()1629 test_allocate_in_null_map()
1630 {
1631 mach_vm_address_t address = get_vm_address();
1632 mach_vm_size_t size = get_vm_size();
1633 int flag = get_address_flag();
1634
1635 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1636 if (!(flag & VM_FLAGS_ANYWHERE)) {
1637 logv(" at address 0x%jx", (uintmax_t)address);
1638 }
1639 logv(" in NULL VM map...");
1640 assert_mach_return(get_allocator()(VM_MAP_NULL, &address, size, flag), MACH_SEND_INVALID_DEST, "Allocator");
1641 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
1642 }
1643
1644 /* Allocating with non-user flags fails. */
1645 void
test_allocate_with_kernel_flags()1646 test_allocate_with_kernel_flags()
1647 {
1648 allocate_fn_t allocator = get_allocator();
1649 vm_map_t this_task = mach_task_self();
1650 mach_vm_address_t address = get_vm_address();
1651 mach_vm_size_t size = get_vm_size();
1652 int flag = get_address_flag();
1653 int bad_flag, i;
1654 kern_return_t kr;
1655 int valid_flags = VM_FLAGS_USER_ALLOCATE | VM_FLAGS_USER_MAP | VM_FLAGS_USER_REMAP | VM_FLAGS_ALIAS_MASK;
1656
1657 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1658 if (!(flag & VM_FLAGS_ANYWHERE)) {
1659 logv(" at address 0x%jx", (uintmax_t)address);
1660 }
1661 logv(" with various invalid flags...");
1662 for (i = 0; i < sizeof(int) * 8; i++) {
1663 int test_flag = 1 << i;
1664
1665 /* Skip user valid flags */
1666 if (valid_flags & test_flag) {
1667 continue;
1668 }
1669
1670 bad_flag = test_flag | flag;
1671 kr = allocator(this_task, &address, size, bad_flag);
1672 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1673 "Allocator "
1674 "with invalid flag 0x%x unexpectedly returned: %s.\n"
1675 "Should have returned: %s.",
1676 bad_flag, mach_error_string(kr), mach_error_string(KERN_INVALID_ARGUMENT));
1677 }
1678 logv("Returned expected error with each invalid flag: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1679 }
1680
1681 /*****************************/
1682 /* mach_vm_map() error tests */
1683 /*****************************/
1684
1685 /* mach_vm_map() fails with invalid protection or inheritance
1686 * arguments. */
1687 void
test_mach_vm_map_protection_inheritance_error()1688 test_mach_vm_map_protection_inheritance_error()
1689 {
1690 kern_return_t kr;
1691 vm_map_t my_task = mach_task_self();
1692 mach_vm_address_t address = get_vm_address();
1693 mach_vm_size_t size = get_vm_size();
1694 vm_map_offset_t mask = (get_allocator() == wrapper_mach_vm_map || get_allocator() == wrapper_mach_vm_map_named_entry)
1695 ? (mach_vm_offset_t)0
1696 : (mach_vm_offset_t)get_mask();
1697 int flag = get_address_flag();
1698 mach_port_t object_handle = MACH_PORT_NULL;
1699 vm_prot_t cur_protections[] = {VM_PROT_DEFAULT, VM_PROT_ALL + 1, ~VM_PROT_IS_MASK, INT_MAX};
1700 vm_prot_t max_protections[] = {VM_PROT_ALL, VM_PROT_ALL + 1, ~VM_PROT_IS_MASK, INT_MAX};
1701 vm_inherit_t inheritances[] = {VM_INHERIT_DEFAULT, VM_INHERIT_LAST_VALID + 1, UINT_MAX};
1702 int i, j, k;
1703
1704 if (get_allocator() == wrapper_mach_vm_map_named_entry) {
1705 assert_mach_success(memory_entry(&size, &object_handle), "mach_make_memory_entry_64()");
1706 }
1707 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1708 if (!(flag & VM_FLAGS_ANYWHERE)) {
1709 logv(" at address 0x%jx", (uintmax_t)address);
1710 }
1711 logv(
1712 " with various invalid protection/inheritance "
1713 "arguments...");
1714
1715 for (i = 0; i < 4; i++) {
1716 for (j = 0; j < 4; j++) {
1717 for (k = 0; k < 3; k++) {
1718 /* Skip the case with all valid arguments. */
1719 if (i == (j == (k == 0))) {
1720 continue;
1721 }
1722 kr = mach_vm_map(my_task, &address, size, mask, flag, object_handle, (memory_object_offset_t)0, FALSE,
1723 cur_protections[i], max_protections[j], inheritances[k]);
1724 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1725 "mach_vm_map() "
1726 "with cur_protection 0x%x, max_protection 0x%x, "
1727 "inheritance 0x%x unexpectedly returned: %s.\n"
1728 "Should have returned: %s.",
1729 cur_protections[i], max_protections[j], inheritances[k], mach_error_string(kr),
1730 mach_error_string(KERN_INVALID_ARGUMENT));
1731 }
1732 }
1733 }
1734 logv("Returned expected error in each case: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1735 }
1736
1737 /* mach_vm_map() with unspecified address fails if the starting
1738 * address overflows when rounded up to a boundary value. */
1739 void
test_mach_vm_map_large_mask_overflow_error()1740 test_mach_vm_map_large_mask_overflow_error()
1741 {
1742 mach_vm_address_t address = 0x1;
1743 mach_vm_size_t size = get_vm_size();
1744 mach_vm_offset_t mask = (mach_vm_offset_t)UINTMAX_MAX;
1745 /* mach_vm_map() cannot allocate 0 bytes at an unspecified
1746 * address, see 8003930. */
1747 kern_return_t kr_expected = size ? KERN_NO_SPACE : KERN_INVALID_ARGUMENT;
1748
1749 logv(
1750 "Allocating 0x%jx (%ju) byte%s at an unspecified address "
1751 "starting at 0x%jx with mask 0x%jx...",
1752 (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address, (uintmax_t)mask);
1753 assert_mach_return(mach_vm_map(mach_task_self(), &address, size, mask, VM_FLAGS_ANYWHERE, MACH_PORT_NULL,
1754 (memory_object_offset_t)0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT),
1755 kr_expected, "mach_vm_map()");
1756 logv("Returned expected error: %s.", mach_error_string(kr_expected));
1757 }
1758
1759 /************************/
1760 /* Size edge case tests */
1761 /************************/
1762
1763 void
allocate_edge_size(mach_vm_address_t * address,mach_vm_size_t size,kern_return_t expected_kr)1764 allocate_edge_size(mach_vm_address_t * address, mach_vm_size_t size, kern_return_t expected_kr)
1765 {
1766 logv("Allocating 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size);
1767 assert_allocate_return(address, size, VM_FLAGS_ANYWHERE, expected_kr);
1768 logv("Returned expected value: %s.", mach_error_string(expected_kr));
1769 }
1770
1771 void
test_allocate_zero_size()1772 test_allocate_zero_size()
1773 {
1774 mach_vm_address_t address = 0x0;
1775 /* mach_vm_map() cannot allocate 0 bytes at an unspecified
1776 * address, see 8003930. Other allocators succeed. */
1777 kern_return_t kr_expected = (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS;
1778
1779 allocate_edge_size(&address, 0, kr_expected);
1780 if (kr_expected == KERN_SUCCESS) {
1781 deallocate_range(address, 0);
1782 }
1783 }
1784
1785 /* Testing the allocation of the largest size that does not overflow
1786 * when rounded up to a page-aligned value. */
1787 void
test_allocate_invalid_large_size()1788 test_allocate_invalid_large_size()
1789 {
1790 mach_vm_size_t size = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1791 if (get_allocator() != wrapper_mach_vm_map_named_entry) {
1792 mach_vm_address_t address = 0x0;
1793 allocate_edge_size(&address, size, KERN_NO_SPACE);
1794 } else {
1795 /* Named entries cannot currently be bigger than 4 GB
1796 * - 4 kb. */
1797 mach_port_t object_handle = MACH_PORT_NULL;
1798 logv("Creating named entry of 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size);
1799 assert_mach_return(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)0,
1800 (MAP_MEM_NAMED_CREATE | VM_PROT_ALL), &object_handle, 0),
1801 KERN_FAILURE, "mach_make_memory_entry_64()");
1802 logv("Returned expected error: %s.", mach_error_string(KERN_FAILURE));
1803 }
1804 }
1805
1806 /* A UINTMAX_MAX VM size will overflow to 0 when rounded up to a
1807 * page-aligned value. */
1808 void
test_allocate_overflowing_size()1809 test_allocate_overflowing_size()
1810 {
1811 mach_vm_address_t address = 0x0;
1812
1813 allocate_edge_size(&address, (mach_vm_size_t)UINTMAX_MAX, KERN_INVALID_ARGUMENT);
1814 }
1815
1816 /****************************/
1817 /* Address allocation tests */
1818 /****************************/
1819
1820 /* Allocation at address zero fails iff size is nonzero. */
1821 void
test_allocate_at_zero()1822 test_allocate_at_zero()
1823 {
1824 mach_vm_address_t address = 0x0;
1825 mach_vm_size_t size = get_vm_size();
1826
1827 kern_return_t kr_expected =
1828 size ? KERN_INVALID_ADDRESS : (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS;
1829
1830 logv("Allocating 0x%jx (%ju) byte%s at address 0x0...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1831 assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected);
1832 logv("Returned expected value: %s.", mach_error_string(kr_expected));
1833 if (kr_expected == KERN_SUCCESS) {
1834 T_QUIET; T_ASSERT_EQ(address, 0,
1835 "Address 0x%jx is unexpectedly "
1836 "nonzero.\n",
1837 (uintmax_t)address);
1838 logv("Allocated address 0x%jx is zero.", (uintmax_t)address);
1839 deallocate_range(address, size);
1840 }
1841 }
1842
1843 /* Allocation at page-aligned but 2 MB boundary-unaligned address
1844 * fails with KERN_NO_SPACE. */
1845 void
test_allocate_2MB_boundary_unaligned_page_aligned_address()1846 test_allocate_2MB_boundary_unaligned_page_aligned_address()
1847 {
1848 mach_vm_size_t size = get_vm_size();
1849
1850 mach_vm_address_t address = get_fixed_address(size + vm_page_size) + vm_page_size;
1851 logv(
1852 "Found 2 MB boundary-unaligned, page aligned address "
1853 "0x%jx.",
1854 (uintmax_t)address);
1855
1856 /* mach_vm_allocate() cannot allocate 0 bytes, and fails with a
1857 * fixed boundary-unaligned truncated address. */
1858 kern_return_t kr_expected = (!size && get_allocator() != wrapper_mach_vm_allocate)
1859 ? KERN_INVALID_ARGUMENT
1860 : (get_allocator() == wrapper_mach_vm_map_2MB) ? KERN_NO_SPACE : KERN_SUCCESS;
1861 logv("Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1862 (uintmax_t)address);
1863 assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected);
1864 logv("Returned expected value: %s.", mach_error_string(kr_expected));
1865 if (kr_expected == KERN_SUCCESS) {
1866 deallocate_range(address, size);
1867 }
1868 }
1869
1870 /* With VM_FLAGS_ANYWHERE set, mach_vm_allocate() starts looking for
1871 * an allocation address at 0x0, while mach_vm_map() starts at the
1872 * supplied address and does not wrap around. See 8016663. */
1873 void
test_allocate_page_with_highest_address_hint()1874 test_allocate_page_with_highest_address_hint()
1875 {
1876 /* Highest valid page-aligned address. */
1877 mach_vm_address_t address = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1878
1879 logv(
1880 "Allocating one page with unspecified address, but hint at "
1881 "0x%jx...",
1882 (uintmax_t)address);
1883 if (get_allocator() == wrapper_mach_vm_allocate) {
1884 /* mach_vm_allocate() starts from 0x0 and succeeds. */
1885 assert_allocate_success(&address, vm_page_size, VM_FLAGS_ANYWHERE);
1886 logv("Memory allocated at address 0x%jx.", (uintmax_t)address);
1887 assert_aligned_address(address);
1888 deallocate_range(address, vm_page_size);
1889 } else {
1890 /* mach_vm_map() starts from the supplied address, and fails
1891 * with KERN_NO_SPACE, see 8016663. */
1892 assert_allocate_return(&address, vm_page_size, VM_FLAGS_ANYWHERE, KERN_NO_SPACE);
1893 logv("Returned expected error: %s.", mach_error_string(KERN_NO_SPACE));
1894 }
1895 }
1896
1897 /* Allocators find an allocation address with a first fit strategy. */
1898 void
test_allocate_first_fit_pages()1899 test_allocate_first_fit_pages()
1900 {
1901 allocate_fn_t allocator = get_allocator();
1902 mach_vm_address_t address1 = 0x0;
1903 mach_vm_address_t i;
1904 kern_return_t kr;
1905 vm_map_t this_task = mach_task_self();
1906
1907 logv(
1908 "Looking for first fit address for allocating one "
1909 "page...");
1910 assert_allocate_success(&address1, vm_page_size, VM_FLAGS_ANYWHERE);
1911 logv("Found address 0x%jx.", (uintmax_t)address1);
1912 assert_aligned_address(address1);
1913 mach_vm_address_t address2 = address1;
1914 logv(
1915 "Looking for next higher first fit address for allocating "
1916 "one page...");
1917 assert_allocate_success(&address2, vm_page_size, VM_FLAGS_ANYWHERE);
1918 logv("Found address 0x%jx.", (uintmax_t)address2);
1919 assert_aligned_address(address2);
1920 T_QUIET; T_ASSERT_GT(address2, address1,
1921 "Second address 0x%jx is "
1922 "unexpectedly not higher than first address 0x%jx.",
1923 (uintmax_t)address2, (uintmax_t)address1);
1924
1925 logv("Allocating pages between 0x%jx and 0x%jx...", (uintmax_t)address1, (uintmax_t)address2);
1926 for (i = address1; i <= address2; i += vm_page_size) {
1927 kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED);
1928 T_QUIET; T_ASSERT_NE(kr, KERN_SUCCESS,
1929 "Allocator at address 0x%jx "
1930 "unexpectedly succeeded.",
1931 (uintmax_t)i);
1932 }
1933 logv("Expectedly returned error at each page.");
1934 deallocate_range(address1, vm_page_size);
1935 deallocate_range(address2, vm_page_size);
1936 }
1937
1938 /*******************************/
1939 /* Deallocation segfault tests */
1940 /*******************************/
1941
1942 /* mach_vm_deallocate() deallocates the smallest aligned region
1943 * (integral number of pages) containing the given range. */
1944
1945 /* Addresses in deallocated range are inaccessible. */
1946 void
access_deallocated_range_address(mach_vm_address_t address,const char * position)1947 access_deallocated_range_address(mach_vm_address_t address, const char * position)
1948 {
1949 logv("Will deallocate and read from %s 0x%jx of deallocated range...", position, (uintmax_t)address);
1950 deallocate();
1951 mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address);
1952 T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx.\n"
1953 "Should have died with signal SIGSEGV.",
1954 (uintmax_t)bad_value, (uintmax_t)address);
1955 }
1956
1957 /* Start of deallocated range is inaccessible. */
1958 void
test_access_deallocated_range_start()1959 test_access_deallocated_range_start()
1960 {
1961 access_deallocated_range_address(get_vm_address(), "start");
1962 }
1963
1964 /* Middle of deallocated range is inaccessible. */
1965 void
test_access_deallocated_range_middle()1966 test_access_deallocated_range_middle()
1967 {
1968 access_deallocated_range_address(get_vm_address() + (round_page(get_vm_size()) >> 1), "middle");
1969 }
1970
1971 /* End of deallocated range is inaccessible. */
1972 void
test_access_deallocated_range_end()1973 test_access_deallocated_range_end()
1974 {
1975 access_deallocated_range_address(round_page(get_vm_size()) - vm_address_size + get_vm_address(), "end");
1976 }
1977
1978 /* Deallocating almost the whole address space causes a SIGSEGV or SIGBUS. We
1979 * deallocate the largest valid aligned size to avoid overflowing when
1980 * rounding up. */
1981 void
test_deallocate_suicide()1982 test_deallocate_suicide()
1983 {
1984 mach_vm_address_t address = 0x0;
1985 mach_vm_size_t size = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1986
1987 logv("Deallocating 0x%jx (%ju) bytes at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (uintmax_t)address);
1988 kern_return_t kr = mach_vm_deallocate(mach_task_self(), address, size);
1989 T_ASSERT_FAIL("mach_vm_deallocate() with address 0x%jx and "
1990 "size 0x%jx (%ju) unexpectedly returned: %s.\n"
1991 "Should have died with signal SIGSEGV or SIGBUS.",
1992 (uintmax_t)address, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr));
1993 }
1994
1995 /***************************************/
1996 /* Deallocation and reallocation tests */
1997 /***************************************/
1998
1999 /* Deallocating memory twice succeeds. */
2000 void
test_deallocate_twice()2001 test_deallocate_twice()
2002 {
2003 deallocate();
2004 deallocate();
2005 }
2006
2007 /* Deallocated and reallocated memory is zero-filled. Deallocated
2008 * memory is inaccessible since it can be reallocated. */
2009 void
test_write_pattern_deallocate_reallocate_zero_filled()2010 test_write_pattern_deallocate_reallocate_zero_filled()
2011 {
2012 mach_vm_address_t address = get_vm_address();
2013 mach_vm_size_t size = get_vm_size();
2014
2015 write_pattern(page_ends, FALSE, address, size, "page ends");
2016 logv("Deallocating, then Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2017 (uintmax_t)address);
2018 deallocate();
2019 assert_allocate_success(&address, size, VM_FLAGS_FIXED);
2020 logv("Memory allocated.");
2021 verify_pattern(empty, FALSE, address, size, "zero-filled");
2022 deallocate();
2023 }
2024
2025 /********************************/
2026 /* Deallocation edge case tests */
2027 /********************************/
2028
2029 /* Zero size deallocation always succeeds. */
2030 void
test_deallocate_zero_size_ranges()2031 test_deallocate_zero_size_ranges()
2032 {
2033 int i;
2034 kern_return_t kr;
2035 vm_map_t this_task = mach_task_self();
2036 mach_vm_address_t addresses[] = {0x0,
2037 0x1,
2038 vm_page_size - 1,
2039 vm_page_size,
2040 vm_page_size + 1,
2041 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2042 (mach_vm_address_t)UINT_MAX,
2043 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2044 (mach_vm_address_t)UINTMAX_MAX};
2045 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2046
2047 logv("Deallocating 0x0 (0) bytes at various addresses...");
2048 for (i = 0; i < numofaddresses; i++) {
2049 kr = mach_vm_deallocate(this_task, addresses[i], 0);
2050 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_deallocate() at "
2051 "address 0x%jx unexpectedly failed: %s.",
2052 (uintmax_t)addresses[i], mach_error_string(kr));
2053 }
2054 logv("Deallocations successful.");
2055 }
2056
2057 /* Deallocation succeeds if the end of the range rounds to 0x0. */
2058 void
test_deallocate_rounded_zero_end_ranges()2059 test_deallocate_rounded_zero_end_ranges()
2060 {
2061 int i;
2062 kern_return_t kr;
2063 vm_map_t this_task = mach_task_self();
2064 struct {
2065 mach_vm_address_t address;
2066 mach_vm_size_t size;
2067 } ranges[] = {
2068 {0x0, (mach_vm_size_t)UINTMAX_MAX},
2069 {0x0, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 2},
2070 {0x1, (mach_vm_size_t)UINTMAX_MAX - 1},
2071 {0x1, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
2072 {0x2, (mach_vm_size_t)UINTMAX_MAX - 2},
2073 {0x2, (mach_vm_size_t)UINTMAX_MAX - vm_page_size},
2074 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size - 1},
2075 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, 1},
2076 {(mach_vm_address_t)UINTMAX_MAX - 1, 1},
2077 };
2078 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2079
2080 logv(
2081 "Deallocating various memory ranges whose end rounds to "
2082 "0x0...");
2083 for (i = 0; i < numofranges; i++) {
2084 kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size);
2085 T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
2086 "mach_vm_deallocate() with address 0x%jx and size "
2087 "0x%jx (%ju) unexpectedly returned: %s.\n"
2088 "Should have succeeded.",
2089 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr));
2090 }
2091 logv("Deallocations successful.");
2092 }
2093
2094 /* Deallocating a range wrapped around the address space fails. */
2095 void
test_deallocate_wrapped_around_ranges()2096 test_deallocate_wrapped_around_ranges()
2097 {
2098 int i;
2099 kern_return_t kr;
2100 vm_map_t this_task = mach_task_self();
2101 struct {
2102 mach_vm_address_t address;
2103 mach_vm_size_t size;
2104 } ranges[] = {
2105 {0x1, (mach_vm_size_t)UINTMAX_MAX},
2106 {vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
2107 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2108 {(mach_vm_address_t)UINTMAX_MAX, 1},
2109 };
2110 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2111
2112 logv(
2113 "Deallocating various memory ranges wrapping around the "
2114 "address space...");
2115 for (i = 0; i < numofranges; i++) {
2116 kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size);
2117 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
2118 "mach_vm_deallocate() with address 0x%jx and size "
2119 "0x%jx (%ju) unexpectedly returned: %s.\n"
2120 "Should have returned: %s.",
2121 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2122 mach_error_string(KERN_INVALID_ARGUMENT));
2123 }
2124 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
2125 }
2126
2127 /* Deallocating in VM_MAP_NULL fails. */
2128 void
test_deallocate_in_null_map()2129 test_deallocate_in_null_map()
2130 {
2131 mach_vm_address_t address = get_vm_address();
2132 mach_vm_size_t size = get_vm_size();
2133 int flag = get_address_flag();
2134
2135 logv("Deallocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
2136 if (!(flag & VM_FLAGS_ANYWHERE)) {
2137 logv(" at address 0x%jx", (uintmax_t)address);
2138 }
2139 logv(" in NULL VM map...");
2140 assert_mach_return(mach_vm_deallocate(VM_MAP_NULL, address, size), MACH_SEND_INVALID_DEST, "mach_vm_deallocate()");
2141 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2142 }
2143
2144 /*****************************/
2145 /* mach_vm_read() main tests */
2146 /*****************************/
2147
2148 /* Read memory of size less than a page has aligned starting
2149 * address. Otherwise, the destination buffer's starting address has
2150 * the same boundary offset as the source region's. */
2151 void
test_read_address_offset()2152 test_read_address_offset()
2153 {
2154 mach_vm_address_t address = get_vm_address();
2155 mach_vm_size_t size = get_vm_size();
2156
2157 if (size < vm_page_size * 2 || get_address_alignment()) {
2158 assert_aligned_address(address);
2159 logv("Buffer address 0x%jx is aligned as expected.", (uintmax_t)address);
2160 } else {
2161 T_QUIET; T_ASSERT_EQ(((address - 1) & (vm_page_size - 1)), 0,
2162 "Buffer "
2163 "address 0x%jx does not have the expected boundary "
2164 "offset of 1.",
2165 (uintmax_t)address);
2166 logv(
2167 "Buffer address 0x%jx has the expected boundary "
2168 "offset of 1.",
2169 (uintmax_t)address);
2170 }
2171 }
2172
2173 /* Reading from VM_MAP_NULL fails. */
2174 void
test_read_null_map()2175 test_read_null_map()
2176 {
2177 mach_vm_address_t address = get_vm_address();
2178 mach_vm_size_t size = get_vm_size();
2179 vm_offset_t read_address;
2180 mach_msg_type_number_t read_size;
2181
2182 logv(
2183 "Reading 0x%jx (%ju) byte%s at address 0x%jx in NULL VM "
2184 "map...",
2185 (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
2186 assert_mach_return(mach_vm_read(VM_MAP_NULL, address, size, &read_address, &read_size), MACH_SEND_INVALID_DEST,
2187 "mach_vm_read()");
2188 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2189 }
2190
2191 /* Reading partially deallocated memory fails. */
2192 void
test_read_partially_deallocated_range()2193 test_read_partially_deallocated_range()
2194 {
2195 mach_vm_address_t address = get_vm_address();
2196 mach_vm_size_t size = get_vm_size();
2197 mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2);
2198 vm_offset_t read_address;
2199 mach_msg_type_number_t read_size;
2200
2201 logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2202 assert_deallocate_success(mid_point, vm_page_size);
2203 logv("Page deallocated.");
2204
2205 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2206 (uintmax_t)address);
2207 assert_read_return(address, size, &read_address, &read_size, KERN_INVALID_ADDRESS);
2208 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2209 }
2210
2211 /* Reading partially read-protected memory fails. */
2212 void
test_read_partially_unreadable_range()2213 test_read_partially_unreadable_range()
2214 {
2215 mach_vm_address_t address = get_vm_address();
2216 mach_vm_size_t size = get_vm_size();
2217 mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2);
2218 vm_offset_t read_address;
2219 mach_msg_type_number_t read_size;
2220
2221 /* For sizes < msg_ool_size_small, vm_map_copyin_common() uses
2222 * vm_map_copyin_kernel_buffer() to read in the memory,
2223 * returning different errors, see 8182239. */
2224 kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2225
2226 logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2227 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2228 logv("Page read-protected.");
2229
2230 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2231 (uintmax_t)address);
2232 assert_read_return(address, size, &read_address, &read_size, kr_expected);
2233 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2234 }
2235
2236 /**********************************/
2237 /* mach_vm_read() edge case tests */
2238 /**********************************/
2239
2240 void
read_edge_size(mach_vm_size_t size,kern_return_t expected_kr)2241 read_edge_size(mach_vm_size_t size, kern_return_t expected_kr)
2242 {
2243 int i;
2244 kern_return_t kr;
2245 vm_map_t this_task = mach_task_self();
2246 mach_vm_address_t addresses[] = {vm_page_size - 1,
2247 vm_page_size,
2248 vm_page_size + 1,
2249 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2250 (mach_vm_address_t)UINT_MAX,
2251 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2252 (mach_vm_address_t)UINTMAX_MAX};
2253 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2254 vm_offset_t read_address;
2255 mach_msg_type_number_t read_size;
2256
2257 logv("Reading 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size);
2258 for (i = 0; i < numofaddresses; i++) {
2259 kr = mach_vm_read(this_task, addresses[i], size, &read_address, &read_size);
2260 T_QUIET; T_ASSERT_EQ(kr, expected_kr,
2261 "mach_vm_read() at "
2262 "address 0x%jx unexpectedly returned: %s.\n"
2263 "Should have returned: %s.",
2264 (uintmax_t)addresses[i], mach_error_string(kr), mach_error_string(expected_kr));
2265 }
2266 logv(
2267 "mach_vm_read() returned expected value in each case: "
2268 "%s.",
2269 mach_error_string(expected_kr));
2270 }
2271
2272 /* Reading 0 bytes always succeeds. */
2273 void
test_read_zero_size()2274 test_read_zero_size()
2275 {
2276 read_edge_size(0, KERN_SUCCESS);
2277 }
2278
2279 /* Reading 4GB or higher always fails. */
2280 void
test_read_invalid_large_size()2281 test_read_invalid_large_size()
2282 {
2283 read_edge_size((mach_vm_size_t)UINT_MAX + 1, KERN_INVALID_ARGUMENT);
2284 }
2285
2286 /* Reading a range wrapped around the address space fails. */
2287 void
test_read_wrapped_around_ranges()2288 test_read_wrapped_around_ranges()
2289 {
2290 int i;
2291 kern_return_t kr;
2292 vm_map_t this_task = mach_task_self();
2293 struct {
2294 mach_vm_address_t address;
2295 mach_vm_size_t size;
2296 } ranges[] = {
2297 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX},
2298 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)},
2299 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2300 {(mach_vm_address_t)UINTMAX_MAX, 1},
2301 };
2302 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2303 vm_offset_t read_address;
2304 mach_msg_type_number_t read_size;
2305
2306 logv(
2307 "Reading various memory ranges wrapping around the "
2308 "address space...");
2309 for (i = 0; i < numofranges; i++) {
2310 kr = mach_vm_read(this_task, ranges[i].address, ranges[i].size, &read_address, &read_size);
2311 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ADDRESS,
2312 "mach_vm_read() at address 0x%jx with size "
2313 "0x%jx (%ju) unexpectedly returned: %s.\n"
2314 "Should have returned: %s.",
2315 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2316 mach_error_string(KERN_INVALID_ADDRESS));
2317 }
2318 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2319 }
2320
2321 /********************************/
2322 /* mach_vm_read() pattern tests */
2323 /********************************/
2324
2325 /* Write a pattern on pre-allocated memory, read into a buffer and
2326 * verify the pattern on the buffer. */
2327 void
write_read_verify_pattern(address_filter_t filter,boolean_t reversed,const char * pattern_name)2328 write_read_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2329 {
2330 mach_vm_address_t address = get_vm_address();
2331
2332 write_pattern(filter, reversed, address, get_vm_size(), pattern_name);
2333 read_deallocate();
2334 /* Getting the address and size of the read buffer. */
2335 mach_vm_address_t read_address = get_vm_address();
2336 mach_vm_size_t read_size = get_vm_size();
2337 logv(
2338 "Verifying %s pattern on buffer of "
2339 "address 0x%jx and size 0x%jx (%ju)...",
2340 pattern_name, (uintmax_t)read_address, (uintmax_t)read_size, (uintmax_t)read_size);
2341 filter_addresses_do_else(filter, reversed, read_address, read_size, verify_address, read_zero, address);
2342 logv("Pattern verified on destination buffer.");
2343 }
2344
2345 void
test_read_address_filled()2346 test_read_address_filled()
2347 {
2348 write_read_verify_pattern(empty, TRUE, "address-filled");
2349 }
2350
2351 void
test_read_checkerboard()2352 test_read_checkerboard()
2353 {
2354 write_read_verify_pattern(checkerboard, FALSE, "checkerboard");
2355 }
2356
2357 void
test_read_reverse_checkerboard()2358 test_read_reverse_checkerboard()
2359 {
2360 write_read_verify_pattern(checkerboard, TRUE, "reverse checkerboard");
2361 }
2362
2363 /***********************************/
2364 /* mach_vm_write() edge case tests */
2365 /***********************************/
2366
2367 /* Writing in VM_MAP_NULL fails. */
2368 void
test_write_null_map()2369 test_write_null_map()
2370 {
2371 mach_vm_address_t address = get_vm_address();
2372 vm_offset_t data = (vm_offset_t)get_buffer_address();
2373 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2374
2375 logv(
2376 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2377 "memory at address 0x%jx in NULL VM MAP...",
2378 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2379 assert_mach_return(mach_vm_write(VM_MAP_NULL, address, data, buffer_size), MACH_SEND_INVALID_DEST, "mach_vm_write()");
2380 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2381 }
2382
2383 /* Writing 0 bytes always succeeds. */
2384 void
test_write_zero_size()2385 test_write_zero_size()
2386 {
2387 set_buffer_size(0);
2388 write_buffer();
2389 }
2390
2391 /*****************************************/
2392 /* mach_vm_write() inaccessibility tests */
2393 /*****************************************/
2394
2395 /* Writing a partially deallocated buffer fails. */
2396 void
test_write_partially_deallocated_buffer()2397 test_write_partially_deallocated_buffer()
2398 {
2399 mach_vm_address_t address = get_vm_address();
2400 vm_offset_t data = (vm_offset_t)get_buffer_address();
2401 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2402 mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2);
2403
2404 logv(
2405 "Deallocating a mid-range buffer page at address "
2406 "0x%jx...",
2407 (uintmax_t)buffer_mid_point);
2408 assert_deallocate_success(buffer_mid_point, vm_page_size);
2409 logv("Page deallocated.");
2410
2411 logv(
2412 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2413 "memory at address 0x%jx...",
2414 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2415 assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY);
2416 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY));
2417 }
2418
2419 /* Writing a partially read-protected buffer fails. */
2420 void
test_write_partially_unreadable_buffer()2421 test_write_partially_unreadable_buffer()
2422 {
2423 mach_vm_address_t address = get_vm_address();
2424 vm_offset_t data = (vm_offset_t)get_buffer_address();
2425 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2426 mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2);
2427
2428 logv(
2429 "Read-protecting a mid-range buffer page at address "
2430 "0x%jx...",
2431 (uintmax_t)buffer_mid_point);
2432 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), buffer_mid_point, vm_page_size, FALSE, VM_PROT_WRITE),
2433 "mach_vm_protect()");
2434 logv("Page read-protected.");
2435
2436 logv(
2437 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2438 "memory at address 0x%jx...",
2439 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2440 assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY);
2441 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY));
2442 }
2443
2444 /* Writing on partially deallocated memory fails. */
2445 void
test_write_on_partially_deallocated_range()2446 test_write_on_partially_deallocated_range()
2447 {
2448 mach_vm_address_t address = get_vm_address();
2449 mach_vm_address_t start = mach_vm_trunc_page(address);
2450 vm_offset_t data = (vm_offset_t)get_buffer_address();
2451 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2452
2453 logv(
2454 "Deallocating the first destination page at address "
2455 "0x%jx...",
2456 (uintmax_t)start);
2457 assert_deallocate_success(start, vm_page_size);
2458 logv("Page deallocated.");
2459
2460 logv(
2461 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2462 "memory at address 0x%jx...",
2463 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2464 assert_write_return(address, data, buffer_size, KERN_INVALID_ADDRESS);
2465 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2466 }
2467
2468 /* Writing on partially unwritable memory fails. */
2469 void
test_write_on_partially_unwritable_range()2470 test_write_on_partially_unwritable_range()
2471 {
2472 mach_vm_address_t address = get_vm_address();
2473 mach_vm_address_t start = mach_vm_trunc_page(address);
2474 vm_offset_t data = (vm_offset_t)get_buffer_address();
2475 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2476
2477 /* For sizes < msg_ool_size_small,
2478 * vm_map_copy_overwrite_nested() uses
2479 * vm_map_copyout_kernel_buffer() to read in the memory,
2480 * returning different errors, see 8217123. */
2481 kern_return_t kr_expected = (buffer_size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2482
2483 logv(
2484 "Write-protecting the first destination page at address "
2485 "0x%jx...",
2486 (uintmax_t)start);
2487 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2488 logv("Page write-protected.");
2489
2490 logv(
2491 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2492 "memory at address 0x%jx...",
2493 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2494 assert_write_return(address, data, buffer_size, kr_expected);
2495 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2496 }
2497
2498 /*********************************/
2499 /* mach_vm_write() pattern tests */
2500 /*********************************/
2501
2502 /* Verify that a zero-filled buffer and destination memory are still
2503 * zero-filled after writing. */
2504 void
test_zero_filled_write()2505 test_zero_filled_write()
2506 {
2507 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
2508 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()),
2509 round_page(get_buffer_size() + get_buffer_offset()), "zero-filled");
2510 }
2511
2512 /* Write a pattern on a buffer, write the buffer into some destination
2513 * memory, and verify the pattern on both buffer and destination. */
2514 void
pattern_write(address_filter_t filter,boolean_t reversed,const char * pattern_name)2515 pattern_write(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2516 {
2517 mach_vm_address_t address = get_vm_address();
2518 mach_vm_size_t size = get_vm_size();
2519 mach_vm_address_t buffer_address = get_buffer_address();
2520 mach_vm_size_t buffer_size = get_buffer_size();
2521
2522 write_pattern(filter, reversed, buffer_address, buffer_size, pattern_name);
2523 write_buffer();
2524 verify_pattern(filter, reversed, buffer_address, buffer_size, pattern_name);
2525 logv(
2526 "Verifying %s pattern on destination of "
2527 "address 0x%jx and size 0x%jx (%ju)...",
2528 pattern_name, (uintmax_t)address, (uintmax_t)buffer_size, (uintmax_t)size);
2529 filter_addresses_do_else(filter, reversed, address, buffer_size, verify_address, read_zero, buffer_address);
2530 logv("Pattern verified on destination.");
2531 }
2532
2533 void
test_address_filled_write()2534 test_address_filled_write()
2535 {
2536 pattern_write(empty, TRUE, "address-filled");
2537 }
2538
2539 void
test_checkerboard_write()2540 test_checkerboard_write()
2541 {
2542 pattern_write(checkerboard, FALSE, "checkerboard");
2543 }
2544
2545 void
test_reverse_checkerboard_write()2546 test_reverse_checkerboard_write()
2547 {
2548 pattern_write(checkerboard, TRUE, "reverse checkerboard");
2549 }
2550
2551 /**********************************/
2552 /* mach_vm_copy() edge case tests */
2553 /**********************************/
2554
2555 /* Copying in VM_MAP_NULL fails. */
2556 void
test_copy_null_map()2557 test_copy_null_map()
2558 {
2559 mach_vm_address_t source = get_vm_address();
2560 mach_vm_address_t dest = get_buffer_address();
2561 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2562
2563 logv(
2564 "Copying buffer of address 0x%jx and size 0x%jx (%ju), on "
2565 "memory at address 0x%jx in NULL VM MAP...",
2566 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2567 assert_mach_return(mach_vm_copy(VM_MAP_NULL, source, size, dest), MACH_SEND_INVALID_DEST, "mach_vm_copy()");
2568 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2569 }
2570
2571 void
copy_edge_size(mach_vm_size_t size,kern_return_t expected_kr)2572 copy_edge_size(mach_vm_size_t size, kern_return_t expected_kr)
2573 {
2574 int i;
2575 kern_return_t kr;
2576 vm_map_t this_task = mach_task_self();
2577 mach_vm_address_t addresses[] = {0x0,
2578 0x1,
2579 vm_page_size - 1,
2580 vm_page_size,
2581 vm_page_size + 1,
2582 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2583 (mach_vm_address_t)UINT_MAX,
2584 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2585 (mach_vm_address_t)UINTMAX_MAX};
2586 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2587 mach_vm_address_t dest = 0;
2588
2589 logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
2590 assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE);
2591 logv("Copying 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size);
2592 for (i = 0; i < numofaddresses; i++) {
2593 kr = mach_vm_copy(this_task, addresses[i], size, dest);
2594 T_QUIET; T_ASSERT_EQ(kr, expected_kr,
2595 "mach_vm_copy() at "
2596 "address 0x%jx unexpectedly returned: %s.\n"
2597 "Should have returned: %s.",
2598 (uintmax_t)addresses[i], mach_error_string(kr), mach_error_string(expected_kr));
2599 }
2600 logv(
2601 "mach_vm_copy() returned expected value in each case: "
2602 "%s.",
2603 mach_error_string(expected_kr));
2604
2605 deallocate_range(dest, 4096);
2606 }
2607
2608 /* Copying 0 bytes always succeeds. */
2609 void
test_copy_zero_size()2610 test_copy_zero_size()
2611 {
2612 copy_edge_size(0, KERN_SUCCESS);
2613 }
2614
2615 /* Copying 4GB or higher always fails. */
2616 void
test_copy_invalid_large_size()2617 test_copy_invalid_large_size()
2618 {
2619 copy_edge_size((mach_vm_size_t)UINT_MAX - 1, KERN_INVALID_ADDRESS);
2620 }
2621
2622 /* Reading a range wrapped around the address space fails. */
2623 void
test_copy_wrapped_around_ranges()2624 test_copy_wrapped_around_ranges()
2625 {
2626 int i;
2627 kern_return_t kr;
2628 vm_map_t this_task = mach_task_self();
2629 struct {
2630 mach_vm_address_t address;
2631 mach_vm_size_t size;
2632 } ranges[] = {
2633 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX},
2634 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)},
2635 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2636 {(mach_vm_address_t)UINTMAX_MAX, 1},
2637 };
2638 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2639 mach_vm_address_t dest = 0;
2640
2641 logv("Allocating 0x1000 (4096) bytes...");
2642 assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE);
2643
2644 logv(
2645 "Copying various memory ranges wrapping around the "
2646 "address space...");
2647 for (i = 0; i < numofranges; i++) {
2648 kr = mach_vm_copy(this_task, ranges[i].address, ranges[i].size, dest);
2649 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ADDRESS,
2650 "mach_vm_copy() at address 0x%jx with size "
2651 "0x%jx (%ju) unexpectedly returned: %s.\n"
2652 "Should have returned: %s.",
2653 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2654 mach_error_string(KERN_INVALID_ADDRESS));
2655 }
2656 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2657
2658 deallocate_range(dest, 4096);
2659 }
2660
2661 /********************************/
2662 /* mach_vm_copy() pattern tests */
2663 /********************************/
2664
2665 /* Write a pattern on pre-allocated region, copy into another region
2666 * and verify the pattern in the region. */
2667 void
write_copy_verify_pattern(address_filter_t filter,boolean_t reversed,const char * pattern_name)2668 write_copy_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2669 {
2670 mach_vm_address_t source = get_vm_address();
2671 mach_vm_size_t src_size = get_vm_size();
2672 write_pattern(filter, reversed, source, src_size, pattern_name);
2673 /* Getting the address and size of the dest region */
2674 mach_vm_address_t dest = get_buffer_address();
2675 mach_vm_size_t dst_size = get_buffer_size();
2676
2677 logv(
2678 "Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
2679 "memory at address 0x%jx...",
2680 (uintmax_t)source, (uintmax_t)dst_size, (uintmax_t)dst_size, (uintmax_t)dest);
2681 assert_copy_success(source, dst_size, dest);
2682 logv(
2683 "Verifying %s pattern in region of "
2684 "address 0x%jx and size 0x%jx (%ju)...",
2685 pattern_name, (uintmax_t)dest, (uintmax_t)dst_size, (uintmax_t)dst_size);
2686 filter_addresses_do_else(filter, reversed, dest, dst_size, verify_address, read_zero, source);
2687 logv("Pattern verified on destination region.");
2688 }
2689
2690 void
test_copy_address_filled()2691 test_copy_address_filled()
2692 {
2693 write_copy_verify_pattern(empty, TRUE, "address-filled");
2694 }
2695
2696 void
test_copy_checkerboard()2697 test_copy_checkerboard()
2698 {
2699 write_copy_verify_pattern(checkerboard, FALSE, "checkerboard");
2700 }
2701
2702 void
test_copy_reverse_checkerboard()2703 test_copy_reverse_checkerboard()
2704 {
2705 write_copy_verify_pattern(checkerboard, TRUE, "reverse checkerboard");
2706 }
2707
2708 /* Verify that a zero-filled source and destination memory are still
2709 * zero-filled after writing. */
2710 void
test_zero_filled_copy_dest()2711 test_zero_filled_copy_dest()
2712 {
2713 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
2714 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()),
2715 round_page(get_buffer_size() + get_buffer_offset()), "zero-filled");
2716 }
2717
2718 /****************************************/
2719 /* mach_vm_copy() inaccessibility tests */
2720 /****************************************/
2721
2722 /* Copying partially deallocated memory fails. */
2723 void
test_copy_partially_deallocated_range()2724 test_copy_partially_deallocated_range()
2725 {
2726 mach_vm_address_t source = get_vm_address();
2727 mach_vm_size_t size = get_vm_size();
2728 mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2);
2729 mach_vm_address_t dest = 0;
2730
2731 logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2732 assert_deallocate_success(mid_point, vm_page_size);
2733 logv("Page deallocated.");
2734
2735 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2736 (uintmax_t)source);
2737
2738 assert_allocate_copy_return(source, size, &dest, KERN_INVALID_ADDRESS);
2739
2740 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2741
2742 deallocate_range(dest, size);
2743 }
2744
2745 /* Copy partially read-protected memory fails. */
2746 void
test_copy_partially_unreadable_range()2747 test_copy_partially_unreadable_range()
2748 {
2749 mach_vm_address_t source = get_vm_address();
2750 mach_vm_size_t size = get_vm_size();
2751 mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2);
2752 mach_vm_address_t dest = 0;
2753
2754 /* For sizes < 1 page, vm_map_copyin_common() uses
2755 * vm_map_copyin_kernel_buffer() to read in the memory,
2756 * returning different errors, see 8182239. */
2757 kern_return_t kr_expected = (size < vm_page_size) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2758
2759 logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2760 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2761 logv("Page read-protected.");
2762
2763 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2764 (uintmax_t)source);
2765 assert_allocate_copy_return(source, size, &dest, kr_expected);
2766 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2767
2768 deallocate_range(dest, size);
2769 }
2770
2771 /* Copying to a partially deallocated region fails. */
2772 void
test_copy_dest_partially_deallocated_region()2773 test_copy_dest_partially_deallocated_region()
2774 {
2775 mach_vm_address_t dest = get_vm_address();
2776 mach_vm_address_t source = get_buffer_address();
2777 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2778 mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2);
2779 #if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080
2780 logv(
2781 "Deallocating a mid-range source page at address "
2782 "0x%jx...",
2783 (uintmax_t)source_mid_point);
2784 assert_deallocate_success(source_mid_point, vm_page_size);
2785 logv("Page deallocated.");
2786
2787 logv(
2788 "Copying region of address 0x%jx and size 0x%jx (%ju), on "
2789 "memory at address 0x%jx...",
2790 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2791 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2792 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2793 #else
2794 logv(
2795 "Bypassing partially deallocated region test "
2796 "(See <rdar://problem/12190999>)");
2797 #endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */
2798 }
2799
2800 /* Copying from a partially deallocated region fails. */
2801 void
test_copy_source_partially_deallocated_region()2802 test_copy_source_partially_deallocated_region()
2803 {
2804 mach_vm_address_t source = get_vm_address();
2805 mach_vm_address_t dest = get_buffer_address();
2806 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2807 mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2);
2808
2809 logv(
2810 "Deallocating a mid-range source page at address "
2811 "0x%jx...",
2812 (uintmax_t)source_mid_point);
2813 assert_deallocate_success(source_mid_point, vm_page_size);
2814 logv("Page deallocated.");
2815
2816 logv(
2817 "Copying region of address 0x%jx and size 0x%jx (%ju), on "
2818 "memory at address 0x%jx...",
2819 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2820 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2821 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2822 }
2823
2824 /* Copying from a partially read-protected region fails. */
2825 void
test_copy_source_partially_unreadable_region()2826 test_copy_source_partially_unreadable_region()
2827 {
2828 mach_vm_address_t source = get_vm_address();
2829 mach_vm_address_t dest = get_buffer_address();
2830 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2831 mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2);
2832 kern_return_t kr = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2833
2834 logv(
2835 "Read-protecting a mid-range buffer page at address "
2836 "0x%jx...",
2837 (uintmax_t)mid_point);
2838 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2839 logv("Page read-protected.");
2840
2841 logv(
2842 "Copying region at address 0x%jx and size 0x%jx (%ju), on "
2843 "memory at address 0x%jx...",
2844 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2845
2846 assert_copy_return(source, size, dest, kr);
2847 logv("Returned expected error: %s.", mach_error_string(kr));
2848 }
2849
2850 /* Copying to a partially write-protected region fails. */
2851 void
test_copy_dest_partially_unwriteable_region()2852 test_copy_dest_partially_unwriteable_region()
2853 {
2854 kern_return_t kr;
2855 mach_vm_address_t dest = get_vm_address();
2856 mach_vm_address_t source = get_buffer_address();
2857 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2858 mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2);
2859
2860 #if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080
2861 logv(
2862 "Read-protecting a mid-range buffer page at address "
2863 "0x%jx...",
2864 (uintmax_t)mid_point);
2865 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2866 logv("Page read-protected.");
2867 logv(
2868 "Copying region at address 0x%jx and size 0x%jx (%ju), on "
2869 "memory at address 0x%jx...",
2870 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2871 if (size >= vm_page_size) {
2872 kr = KERN_PROTECTION_FAILURE;
2873 } else {
2874 kr = KERN_INVALID_ADDRESS;
2875 }
2876 assert_copy_return(source, size, dest, kr);
2877 logv("Returned expected error: %s.", mach_error_string(kr));
2878 #else
2879 logv(
2880 "Bypassing partially unwriteable region test "
2881 "(See <rdar://problem/12190999>)");
2882 #endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */
2883 }
2884
2885 /* Copying on partially deallocated memory fails. */
2886 void
test_copy_source_on_partially_deallocated_range()2887 test_copy_source_on_partially_deallocated_range()
2888 {
2889 mach_vm_address_t source = get_vm_address();
2890 mach_vm_address_t dest = get_buffer_address();
2891 mach_vm_address_t start = mach_vm_trunc_page(source);
2892 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2893
2894 logv(
2895 "Deallocating the first source page at address "
2896 "0x%jx...",
2897 (uintmax_t)start);
2898 assert_deallocate_success(start, vm_page_size);
2899 logv("Page deallocated.");
2900
2901 logv(
2902 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2903 "memory at address 0x%jx...",
2904 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2905 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2906 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2907 }
2908
2909 /* Copying on partially deallocated memory fails. */
2910 void
test_copy_dest_on_partially_deallocated_range()2911 test_copy_dest_on_partially_deallocated_range()
2912 {
2913 mach_vm_address_t source = get_vm_address();
2914 mach_vm_address_t dest = get_buffer_address();
2915 mach_vm_address_t start = mach_vm_trunc_page(dest);
2916 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2917
2918 logv(
2919 "Deallocating the first destination page at address "
2920 "0x%jx...",
2921 (uintmax_t)start);
2922 assert_deallocate_success(start, vm_page_size);
2923 logv("Page deallocated.");
2924
2925 logv(
2926 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2927 "memory at address 0x%jx...",
2928 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2929 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2930 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2931 }
2932
2933 /* Copying on partially unwritable memory fails. */
2934 void
test_copy_dest_on_partially_unwritable_range()2935 test_copy_dest_on_partially_unwritable_range()
2936 {
2937 mach_vm_address_t source = get_vm_address();
2938 mach_vm_address_t dest = get_buffer_address();
2939 mach_vm_address_t start = mach_vm_trunc_page(dest);
2940 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2941
2942 /* For sizes < msg_ool_size_small,
2943 * vm_map_copy_overwrite_nested() uses
2944 * vm_map_copyout_kernel_buffer() to read in the memory,
2945 * returning different errors, see 8217123. */
2946 kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2947
2948 logv(
2949 "Write-protecting the first destination page at address "
2950 "0x%jx...",
2951 (uintmax_t)start);
2952 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2953 logv("Page write-protected.");
2954
2955 logv(
2956 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2957 "memory at address 0x%jx...",
2958 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2959 assert_copy_return(source, size, dest, kr_expected);
2960 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2961 }
2962
2963 /* Copying on partially unreadable memory fails. */
2964 void
test_copy_source_on_partially_unreadable_range()2965 test_copy_source_on_partially_unreadable_range()
2966 {
2967 mach_vm_address_t source = get_vm_address();
2968 mach_vm_address_t dest = get_buffer_address();
2969 mach_vm_address_t start = mach_vm_trunc_page(source);
2970 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2971
2972 /* For sizes < msg_ool_size_small,
2973 * vm_map_copy_overwrite_nested() uses
2974 * vm_map_copyout_kernel_buffer() to read in the memory,
2975 * returning different errors, see 8217123. */
2976 kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2977
2978 logv(
2979 "Read-protecting the first destination page at address "
2980 "0x%jx...",
2981 (uintmax_t)start);
2982 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2983 logv("Page read-protected.");
2984
2985 logv(
2986 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2987 "memory at address 0x%jx...",
2988 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2989 assert_copy_return(source, size, dest, kr_expected);
2990 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2991 }
2992
2993 /********************************/
2994 /* mach_vm_protect() main tests */
2995 /********************************/
2996
2997 void
test_zero_filled_extended()2998 test_zero_filled_extended()
2999 {
3000 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
3001 }
3002
3003 /* Allocated region is still zero-filled after read-protecting it and
3004 * then restoring read-access. */
3005 void
test_zero_filled_readprotect()3006 test_zero_filled_readprotect()
3007 {
3008 mach_vm_address_t address = get_vm_address();
3009 mach_vm_size_t size = get_vm_size();
3010
3011 logv("Setting read access on 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size,
3012 (size == 1) ? "" : "s", (uintmax_t)address);
3013 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address, size, FALSE, VM_PROT_DEFAULT), "mach_vm_protect()");
3014 logv("Region has read access.");
3015 test_zero_filled_extended();
3016 }
3017
3018 void
verify_protection(vm_prot_t protection,const char * protection_name)3019 verify_protection(vm_prot_t protection, const char * protection_name)
3020 {
3021 mach_vm_address_t address = get_vm_address();
3022 mach_vm_size_t size = get_vm_size();
3023 mach_vm_size_t original_size = size;
3024 vm_region_basic_info_data_64_t info;
3025 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
3026 mach_port_t unused;
3027
3028 logv(
3029 "Verifying %s-protection on region of address 0x%jx and "
3030 "size 0x%jx (%ju) with mach_vm_region()...",
3031 protection_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
3032 T_QUIET; T_ASSERT_MACH_SUCCESS(
3033 mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info, &count, &unused),
3034 "mach_vm_region()");
3035 if (original_size) {
3036 T_QUIET; T_ASSERT_EQ((info.protection & protection), 0,
3037 "Region "
3038 "is unexpectedly %s-unprotected.",
3039 protection_name);
3040 logv("Region is %s-protected as expected.", protection_name);
3041 } else {
3042 T_QUIET; T_ASSERT_NE(info.protection & protection, 0,
3043 "Region is "
3044 "unexpectedly %s-protected.",
3045 protection_name);
3046 logv("Region is %s-unprotected as expected.", protection_name);
3047 }
3048 }
3049
3050 void
test_verify_readprotection()3051 test_verify_readprotection()
3052 {
3053 verify_protection(VM_PROT_READ, "read");
3054 }
3055
3056 void
test_verify_writeprotection()3057 test_verify_writeprotection()
3058 {
3059 verify_protection(VM_PROT_WRITE, "write");
3060 }
3061
3062 /******************************/
3063 /* Protection bus error tests */
3064 /******************************/
3065
3066 /* mach_vm_protect() affects the smallest aligned region (integral
3067 * number of pages) containing the given range. */
3068
3069 /* Addresses in read-protected range are inaccessible. */
3070 void
access_readprotected_range_address(mach_vm_address_t address,const char * position)3071 access_readprotected_range_address(mach_vm_address_t address, const char * position)
3072 {
3073 logv("Reading from %s 0x%jx of read-protected range...", position, (uintmax_t)address);
3074 mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address);
3075 T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx."
3076 "Should have died with signal SIGBUS.",
3077 (uintmax_t)bad_value, (uintmax_t)address);
3078 }
3079
3080 /* Start of read-protected range is inaccessible. */
3081 void
test_access_readprotected_range_start()3082 test_access_readprotected_range_start()
3083 {
3084 access_readprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3085 }
3086
3087 /* Middle of read-protected range is inaccessible. */
3088 void
test_access_readprotected_range_middle()3089 test_access_readprotected_range_middle()
3090 {
3091 mach_vm_address_t address = get_vm_address();
3092 access_readprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle");
3093 }
3094
3095 /* End of read-protected range is inaccessible. */
3096 void
test_access_readprotected_range_end()3097 test_access_readprotected_range_end()
3098 {
3099 access_readprotected_range_address(round_page(get_vm_address() + get_vm_size()) - vm_address_size, "end");
3100 }
3101
3102 /* Addresses in write-protected range are unwritable. */
3103 void
write_writeprotected_range_address(mach_vm_address_t address,const char * position)3104 write_writeprotected_range_address(mach_vm_address_t address, const char * position)
3105 {
3106 logv("Writing on %s 0x%jx of write-protected range...", position, (uintmax_t)address);
3107 MACH_VM_ADDRESS_T(address) = 0x0;
3108 T_ASSERT_FAIL("Unexpectedly wrote value 0x0 value at address 0x%jx."
3109 "Should have died with signal SIGBUS.",
3110 (uintmax_t)address);
3111 }
3112
3113 /* Start of write-protected range is unwritable. */
3114 void
test_write_writeprotected_range_start()3115 test_write_writeprotected_range_start()
3116 {
3117 write_writeprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3118 }
3119
3120 /* Middle of write-protected range is unwritable. */
3121 void
test_write_writeprotected_range_middle()3122 test_write_writeprotected_range_middle()
3123 {
3124 mach_vm_address_t address = get_vm_address();
3125 write_writeprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle");
3126 }
3127
3128 /* End of write-protected range is unwritable. */
3129 void
test_write_writeprotected_range_end()3130 test_write_writeprotected_range_end()
3131 {
3132 write_writeprotected_range_address(round_page(get_vm_address() + get_vm_size()) - vm_address_size, "end");
3133 }
3134
3135 /*************************************/
3136 /* mach_vm_protect() edge case tests */
3137 /*************************************/
3138
3139 void
protect_zero_size(vm_prot_t protection,const char * protection_name)3140 protect_zero_size(vm_prot_t protection, const char * protection_name)
3141 {
3142 int i;
3143 kern_return_t kr;
3144 vm_map_t this_task = mach_task_self();
3145 mach_vm_address_t addresses[] = {0x0,
3146 0x1,
3147 vm_page_size - 1,
3148 vm_page_size,
3149 vm_page_size + 1,
3150 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
3151 (mach_vm_address_t)UINT_MAX,
3152 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
3153 (mach_vm_address_t)UINTMAX_MAX};
3154 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
3155
3156 logv("%s-protecting 0x0 (0) bytes at various addresses...", protection_name);
3157 for (i = 0; i < numofaddresses; i++) {
3158 kr = mach_vm_protect(this_task, addresses[i], 0, FALSE, protection);
3159 T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
3160 "mach_vm_protect() at "
3161 "address 0x%jx unexpectedly failed: %s.",
3162 (uintmax_t)addresses[i], mach_error_string(kr));
3163 }
3164 logv("Protection successful.");
3165 }
3166
3167 void
test_readprotect_zero_size()3168 test_readprotect_zero_size()
3169 {
3170 protect_zero_size(VM_PROT_READ, "Read");
3171 }
3172
3173 void
test_writeprotect_zero_size()3174 test_writeprotect_zero_size()
3175 {
3176 protect_zero_size(VM_PROT_WRITE, "Write");
3177 }
3178
3179 /* Protecting a range wrapped around the address space fails. */
3180 void
protect_wrapped_around_ranges(vm_prot_t protection,const char * protection_name)3181 protect_wrapped_around_ranges(vm_prot_t protection, const char * protection_name)
3182 {
3183 int i;
3184 kern_return_t kr;
3185 vm_map_t this_task = mach_task_self();
3186 struct {
3187 mach_vm_address_t address;
3188 mach_vm_size_t size;
3189 } ranges[] = {
3190 {0x1, (mach_vm_size_t)UINTMAX_MAX},
3191 {vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
3192 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
3193 {(mach_vm_address_t)UINTMAX_MAX, 1},
3194 };
3195 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
3196
3197 logv(
3198 "%s-protecting various memory ranges wrapping around the "
3199 "address space...",
3200 protection_name);
3201 for (i = 0; i < numofranges; i++) {
3202 kr = mach_vm_protect(this_task, ranges[i].address, ranges[i].size, FALSE, protection);
3203 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
3204 "mach_vm_protect() with address 0x%jx and size "
3205 "0x%jx (%ju) unexpectedly returned: %s.\n"
3206 "Should have returned: %s.",
3207 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
3208 mach_error_string(KERN_INVALID_ARGUMENT));
3209 }
3210 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
3211 }
3212
3213 void
test_readprotect_wrapped_around_ranges()3214 test_readprotect_wrapped_around_ranges()
3215 {
3216 protect_wrapped_around_ranges(VM_PROT_READ, "Read");
3217 }
3218
3219 void
test_writeprotect_wrapped_around_ranges()3220 test_writeprotect_wrapped_around_ranges()
3221 {
3222 protect_wrapped_around_ranges(VM_PROT_WRITE, "Write");
3223 }
3224
3225 /*******************/
3226 /* vm_copy() tests */
3227 /*******************/
3228
3229 /* Verify the address space is being shared. */
3230 void
assert_share_mode(mach_vm_address_t address,unsigned share_mode,const char * share_mode_name)3231 assert_share_mode(mach_vm_address_t address, unsigned share_mode, const char * share_mode_name)
3232 {
3233 mach_vm_size_t size = get_vm_size();
3234 vm_region_extended_info_data_t info;
3235 mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
3236 mach_port_t unused;
3237
3238 /*
3239 * XXX Fails on UVM kernel. See <rdar://problem/12164664>
3240 */
3241 #if notyet /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */
3242 logv(
3243 "Verifying %s share mode on region of address 0x%jx and "
3244 "size 0x%jx (%ju)...",
3245 share_mode_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
3246 T_QUIET; T_ASSERT_MACH_SUCCESS(
3247 mach_vm_region(mach_task_self(), &address, &size, VM_REGION_EXTENDED_INFO, (vm_region_info_t)&info, &count, &unused),
3248 "mach_vm_region()");
3249 T_QUIET; T_ASSERT_EQ(info.share_mode, share_mode,
3250 "Region's share mode "
3251 " unexpectedly is not %s but %d.",
3252 share_mode_name, info.share_mode);
3253 logv("Region has a share mode of %s as expected.", share_mode_name);
3254 #else
3255 logv("Bypassing share_mode verification (See <rdar://problem/12164664>)");
3256 #endif /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */
3257 }
3258
3259 /* Do the vm_copy() and verify its success. */
3260 void
assert_vmcopy_success(vm_address_t src,vm_address_t dst,const char * source_name)3261 assert_vmcopy_success(vm_address_t src, vm_address_t dst, const char * source_name)
3262 {
3263 kern_return_t kr;
3264 mach_vm_size_t size = get_vm_size();
3265
3266 logv("Copying (using mach_vm_copy()) from a %s source...", source_name);
3267 kr = mach_vm_copy(mach_task_self(), src, size, dst);
3268 T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
3269 "mach_vm_copy() with the source address "
3270 "0x%jx, designation address 0x%jx, and size 0x%jx (%ju) unexpectly "
3271 "returned %s.\n Should have returned: %s.",
3272 (uintmax_t)src, (uintmax_t)dst, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr),
3273 mach_error_string(KERN_SUCCESS));
3274 logv("Copy (mach_vm_copy()) was successful as expected.");
3275 }
3276
3277 void
write_region(mach_vm_address_t address,mach_vm_size_t start)3278 write_region(mach_vm_address_t address, mach_vm_size_t start)
3279 {
3280 mach_vm_size_t size = get_vm_size();
3281
3282 filter_addresses_do_else(empty, FALSE, address, size, write_address, write_address, start);
3283 }
3284
3285 void
verify_region(mach_vm_address_t address,mach_vm_address_t start)3286 verify_region(mach_vm_address_t address, mach_vm_address_t start)
3287 {
3288 mach_vm_size_t size = get_vm_size();
3289
3290 filter_addresses_do_else(empty, FALSE, address, size, verify_address, verify_address, start);
3291 }
3292
3293 /* Perform the post vm_copy() action and verify its results. */
3294 void
modify_one_and_verify_all_regions(vm_address_t src,vm_address_t dst,vm_address_t shared_copied,boolean_t shared)3295 modify_one_and_verify_all_regions(vm_address_t src, vm_address_t dst, vm_address_t shared_copied, boolean_t shared)
3296 {
3297 mach_vm_size_t size = get_vm_size();
3298 int action = get_vmcopy_post_action();
3299
3300 /* Do the post vm_copy() action. */
3301 switch (action) {
3302 case VMCOPY_MODIFY_SRC:
3303 logv("Modifying: source%s...", shared ? " (shared with other region)" : "");
3304 write_region(src, 1);
3305 break;
3306
3307 case VMCOPY_MODIFY_DST:
3308 logv("Modifying: destination...");
3309 write_region(dst, 1);
3310 break;
3311
3312 case VMCOPY_MODIFY_SHARED_COPIED:
3313 /* If no shared_copied then no need to verify (nothing changed). */
3314 if (!shared_copied) {
3315 return;
3316 }
3317 logv("Modifying: shared/copied%s...", shared ? " (shared with source region)" : "");
3318 write_region(shared_copied, 1);
3319 break;
3320
3321 default:
3322 T_ASSERT_FAIL("Unknown post vm_copy() action (%d)", action);
3323 }
3324 logv("Modification was successful as expected.");
3325
3326 /* Verify all the regions with what is expected. */
3327 logv("Verifying: source... ");
3328 verify_region(src, (VMCOPY_MODIFY_SRC == action || (shared && VMCOPY_MODIFY_SHARED_COPIED == action)) ? 1 : 0);
3329 logv("destination... ");
3330 verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0);
3331 if (shared_copied) {
3332 logv("shared/copied... ");
3333 verify_region(shared_copied, (VMCOPY_MODIFY_SHARED_COPIED == action || (shared && VMCOPY_MODIFY_SRC == action)) ? 1 : 0);
3334 }
3335 logv("Verification was successful as expected.");
3336 }
3337
3338 /* Test source being a simple fresh region. */
3339 void
test_vmcopy_fresh_source()3340 test_vmcopy_fresh_source()
3341 {
3342 mach_vm_size_t size = get_vm_size();
3343 mach_vm_address_t src, dst;
3344
3345 if (get_vmcopy_post_action() == VMCOPY_MODIFY_SHARED_COPIED) {
3346 /* No shared/copied region to modify so just return. */
3347 logv("No shared/copied region as expected.");
3348 return;
3349 }
3350
3351 assert_allocate_success(&src, size, TRUE);
3352
3353 assert_share_mode(src, SM_EMPTY, "SM_EMPTY");
3354
3355 write_region(src, 0);
3356
3357 assert_allocate_success(&dst, size, TRUE);
3358
3359 assert_vmcopy_success(src, dst, "freshly allocated");
3360
3361 modify_one_and_verify_all_regions(src, dst, 0, FALSE);
3362
3363 assert_deallocate_success(src, size);
3364 assert_deallocate_success(dst, size);
3365 }
3366
3367 /* Test source copied from a shared region. */
3368 void
test_vmcopy_shared_source()3369 test_vmcopy_shared_source()
3370 {
3371 mach_vm_size_t size = get_vm_size();
3372 mach_vm_address_t src, dst, shared;
3373 int action = get_vmcopy_post_action();
3374 int pid, status;
3375
3376 assert_allocate_success(&src, size, TRUE);
3377
3378 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_inherit(mach_task_self(), src, size, VM_INHERIT_SHARE), "mach_vm_inherit()");
3379
3380 write_region(src, 0);
3381
3382 pid = fork();
3383 if (pid == 0) {
3384 /* Verify that the child's 'src' is shared with the
3385 * parent's src */
3386 assert_share_mode(src, SM_SHARED, "SM_SHARED");
3387 assert_allocate_success(&dst, size, TRUE);
3388 assert_vmcopy_success(src, dst, "shared");
3389 if (VMCOPY_MODIFY_SHARED_COPIED == action) {
3390 logv("Modifying: shared...");
3391 write_region(src, 1);
3392 logv("Modification was successsful as expected.");
3393 logv("Verifying: source... ");
3394 verify_region(src, 1);
3395 logv("destination...");
3396 verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0);
3397 logv("Verification was successful as expected.");
3398 } else {
3399 modify_one_and_verify_all_regions(src, dst, 0, TRUE);
3400 }
3401 assert_deallocate_success(dst, size);
3402 exit(0);
3403 } else if (pid > 0) {
3404 /* In the parent the src becomes the shared */
3405 shared = src;
3406 wait(&status);
3407 if (WEXITSTATUS(status) != 0) {
3408 exit(status);
3409 }
3410 /* verify shared (shared with child's src) */
3411 logv("Verifying: shared...");
3412 verify_region(shared, (VMCOPY_MODIFY_SHARED_COPIED == action || VMCOPY_MODIFY_SRC == action) ? 1 : 0);
3413 logv("Verification was successful as expected.");
3414 } else {
3415 T_WITH_ERRNO; T_ASSERT_FAIL("fork failed");
3416 }
3417
3418 assert_deallocate_success(src, size);
3419 }
3420
3421 /* Test source copied from another mapping. */
3422 void
test_vmcopy_copied_from_source()3423 test_vmcopy_copied_from_source()
3424 {
3425 mach_vm_size_t size = get_vm_size();
3426 mach_vm_address_t src, dst, copied;
3427
3428 assert_allocate_success(&copied, size, TRUE);
3429 write_region(copied, 0);
3430
3431 assert_allocate_success(&src, size, TRUE);
3432
3433 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), copied, size, src), "mach_vm_copy()");
3434
3435 assert_share_mode(src, SM_COW, "SM_COW");
3436
3437 assert_allocate_success(&dst, size, TRUE);
3438
3439 assert_vmcopy_success(src, dst, "copied from");
3440
3441 modify_one_and_verify_all_regions(src, dst, copied, FALSE);
3442
3443 assert_deallocate_success(src, size);
3444 assert_deallocate_success(dst, size);
3445 assert_deallocate_success(copied, size);
3446 }
3447
3448 /* Test source copied to another mapping. */
3449 void
test_vmcopy_copied_to_source()3450 test_vmcopy_copied_to_source()
3451 {
3452 mach_vm_size_t size = get_vm_size();
3453 mach_vm_address_t src, dst, copied;
3454
3455 assert_allocate_success(&src, size, TRUE);
3456 write_region(src, 0);
3457
3458 assert_allocate_success(&copied, size, TRUE);
3459
3460 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), src, size, copied), "mach_vm_copy()");
3461
3462 assert_share_mode(src, SM_COW, "SM_COW");
3463
3464 assert_allocate_success(&dst, size, TRUE);
3465
3466 assert_vmcopy_success(src, dst, "copied to");
3467
3468 modify_one_and_verify_all_regions(src, dst, copied, FALSE);
3469
3470 assert_deallocate_success(src, size);
3471 assert_deallocate_success(dst, size);
3472 assert_deallocate_success(copied, size);
3473 }
3474
3475 /* Test a truedshared source copied. */
3476 void
test_vmcopy_trueshared_source()3477 test_vmcopy_trueshared_source()
3478 {
3479 mach_vm_size_t size = get_vm_size();
3480 mach_vm_address_t src = 0x0, dst, shared;
3481 vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE);
3482 vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE);
3483 mem_entry_name_port_t mem_obj;
3484
3485 assert_allocate_success(&shared, size, TRUE);
3486 write_region(shared, 0);
3487
3488 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)shared, cur_protect, &mem_obj,
3489 (mem_entry_name_port_t)NULL),
3490 "mach_make_memory_entry_64()");
3491 T_QUIET; T_ASSERT_MACH_SUCCESS(
3492 mach_vm_map(mach_task_self(), &src, size, 0, TRUE, mem_obj, 0, FALSE, cur_protect, max_protect, VM_INHERIT_NONE),
3493 "mach_vm_map()");
3494
3495 assert_share_mode(src, SM_TRUESHARED, "SM_TRUESHARED");
3496
3497 assert_allocate_success(&dst, size, TRUE);
3498
3499 assert_vmcopy_success(src, dst, "true shared");
3500
3501 modify_one_and_verify_all_regions(src, dst, shared, TRUE);
3502
3503 assert_deallocate_success(src, size);
3504 assert_deallocate_success(dst, size);
3505 assert_deallocate_success(shared, size);
3506 }
3507
3508 /* Test a private aliazed source copied. */
3509 void
test_vmcopy_private_aliased_source()3510 test_vmcopy_private_aliased_source()
3511 {
3512 mach_vm_size_t size = get_vm_size();
3513 mach_vm_address_t src = 0x0, dst, shared;
3514 vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE);
3515 vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE);
3516
3517 assert_allocate_success(&shared, size, TRUE);
3518 write_region(shared, 0);
3519
3520 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_remap(mach_task_self(), &src, size, 0, TRUE, mach_task_self(), shared, FALSE, &cur_protect,
3521 &max_protect, VM_INHERIT_NONE),
3522 "mach_vm_remap()");
3523
3524 assert_share_mode(src, SM_PRIVATE_ALIASED, "SM_PRIVATE_ALIASED");
3525
3526 assert_allocate_success(&dst, size, TRUE);
3527
3528 assert_vmcopy_success(src, dst, "true shared");
3529
3530 modify_one_and_verify_all_regions(src, dst, shared, TRUE);
3531
3532 assert_deallocate_success(src, size);
3533 assert_deallocate_success(dst, size);
3534 assert_deallocate_success(shared, size);
3535 }
3536
3537 /*************/
3538 /* VM Suites */
3539 /*************/
3540
3541 void
run_allocate_test_suites()3542 run_allocate_test_suites()
3543 {
3544 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
3545 * error finding xnu major version number. */
3546 /* unsigned int xnu_version = xnu_major_version(); */
3547
3548 UnitTests allocate_main_tests = {
3549 {"Allocated address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size},
3550 {"Allocated address is page-aligned", test_aligned_address},
3551 {"Allocated memory is zero-filled", test_zero_filled},
3552 {"Write and verify address-filled pattern", test_write_address_filled},
3553 {"Write and verify checkerboard pattern", test_write_checkerboard},
3554 {"Write and verify reverse checkerboard pattern", test_write_reverse_checkerboard},
3555 {"Write and verify page ends pattern", test_write_page_ends},
3556 {"Write and verify page interiors pattern", test_write_page_interiors},
3557 {"Reallocate allocated pages", test_reallocate_pages},
3558 };
3559 UnitTests allocate_address_error_tests = {
3560 {"Allocate at address zero", test_allocate_at_zero},
3561 {"Allocate at a 2 MB boundary-unaligned, page-aligned "
3562 "address",
3563 test_allocate_2MB_boundary_unaligned_page_aligned_address},
3564 };
3565 UnitTests allocate_argument_error_tests = {
3566 {"Allocate in NULL VM map", test_allocate_in_null_map}, {"Allocate with kernel flags", test_allocate_with_kernel_flags},
3567 };
3568 UnitTests allocate_fixed_size_tests = {
3569 {"Allocate zero size", test_allocate_zero_size},
3570 {"Allocate overflowing size", test_allocate_overflowing_size},
3571 {"Allocate a page with highest address hint", test_allocate_page_with_highest_address_hint},
3572 {"Allocate two pages and verify first fit strategy", test_allocate_first_fit_pages},
3573 };
3574 UnitTests allocate_invalid_large_size_test = {
3575 {"Allocate invalid large size", test_allocate_invalid_large_size},
3576 };
3577 UnitTests mach_vm_map_protection_inheritance_error_test = {
3578 {"mach_vm_map() with invalid protection/inheritance "
3579 "arguments",
3580 test_mach_vm_map_protection_inheritance_error},
3581 };
3582 UnitTests mach_vm_map_large_mask_overflow_error_test = {
3583 {"mach_vm_map() with large address mask", test_mach_vm_map_large_mask_overflow_error},
3584 };
3585
3586 /* Run the test suites with various allocators and VM sizes, and
3587 * unspecified or fixed (page-aligned or page-unaligned),
3588 * addresses. */
3589 for (allocators_idx = 0; allocators_idx < numofallocators; allocators_idx++) {
3590 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3591 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3592 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3593 /* An allocated address will be page-aligned. */
3594 /* Only run the zero size mach_vm_map() error tests in the
3595 * unspecified address case, since we won't be able to retrieve a
3596 * fixed address for allocation. See 8003930. */
3597 if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED) ||
3598 (allocators_idx != MACH_VM_ALLOCATE && sizes_idx == ZERO_BYTES && flags_idx == FIXED)) {
3599 continue;
3600 }
3601 run_suite(set_up_allocator_and_vm_variables, allocate_argument_error_tests, do_nothing,
3602 "%s argument error tests, %s%s address, "
3603 "%s size: 0x%jx (%ju)",
3604 allocators[allocators_idx].description, address_flags[flags_idx].description,
3605 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3606 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3607 (uintmax_t)vm_sizes[sizes_idx].size);
3608 /* mach_vm_map() only protection and inheritance error
3609 * tests. */
3610 if (allocators_idx != MACH_VM_ALLOCATE) {
3611 run_suite(set_up_allocator_and_vm_variables, mach_vm_map_protection_inheritance_error_test, do_nothing,
3612 "%s protection and inheritance "
3613 "error test, %s%s address, %s size: 0x%jx "
3614 "(%ju)",
3615 allocators[allocators_idx].description, address_flags[flags_idx].description,
3616 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3617 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3618 (uintmax_t)vm_sizes[sizes_idx].size);
3619 }
3620 /* mach_vm_map() cannot allocate 0 bytes, see 8003930. */
3621 if (allocators_idx == MACH_VM_ALLOCATE || sizes_idx != ZERO_BYTES) {
3622 run_suite(set_up_allocator_and_vm_variables_and_allocate, allocate_main_tests, deallocate,
3623 "%s main "
3624 "allocation tests, %s%s address, %s size: 0x%jx "
3625 "(%ju)",
3626 allocators[allocators_idx].description, address_flags[flags_idx].description,
3627 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3628 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3629 (uintmax_t)vm_sizes[sizes_idx].size);
3630 }
3631 }
3632 }
3633 run_suite(set_up_allocator_and_vm_size, allocate_address_error_tests, do_nothing,
3634 "%s address "
3635 "error allocation tests, %s size: 0x%jx (%ju)",
3636 allocators[allocators_idx].description, vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3637 (uintmax_t)vm_sizes[sizes_idx].size);
3638 }
3639 run_suite(set_up_allocator, allocate_fixed_size_tests, do_nothing, "%s fixed size allocation tests",
3640 allocators[allocators_idx].description);
3641 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
3642 * error finding xnu major version number. */
3643 /* mach_vm_map() with a named entry triggers a panic with this test
3644 * unless under xnu-1598 or later, see 8048580. */
3645 /* if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY
3646 || xnu_version >= 1598) { */
3647 if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY) {
3648 run_suite(set_up_allocator, allocate_invalid_large_size_test, do_nothing, "%s invalid large size allocation test",
3649 allocators[allocators_idx].description);
3650 }
3651 }
3652 /* mach_vm_map() only large mask overflow tests. */
3653 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3654 run_suite(set_up_vm_size, mach_vm_map_large_mask_overflow_error_test, do_nothing,
3655 "mach_vm_map() large mask overflow "
3656 "error test, size: 0x%jx (%ju)",
3657 (uintmax_t)vm_sizes[sizes_idx].size, (uintmax_t)vm_sizes[sizes_idx].size);
3658 }
3659 }
3660
3661 void
run_deallocate_test_suites()3662 run_deallocate_test_suites()
3663 {
3664 UnitTests access_deallocated_memory_tests = {
3665 {"Read start of deallocated range", test_access_deallocated_range_start},
3666 {"Read middle of deallocated range", test_access_deallocated_range_middle},
3667 {"Read end of deallocated range", test_access_deallocated_range_end},
3668 };
3669 UnitTests deallocate_reallocate_tests = {
3670 {"Deallocate twice", test_deallocate_twice},
3671 {"Write pattern, deallocate, reallocate (deallocated "
3672 "memory is inaccessible), and verify memory is "
3673 "zero-filled",
3674 test_write_pattern_deallocate_reallocate_zero_filled},
3675 };
3676 UnitTests deallocate_null_map_test = {
3677 {"Deallocate in NULL VM map", test_deallocate_in_null_map},
3678 };
3679 UnitTests deallocate_edge_case_tests = {
3680 {"Deallocate zero size ranges", test_deallocate_zero_size_ranges},
3681 {"Deallocate memory ranges whose end rounds to 0x0", test_deallocate_rounded_zero_end_ranges},
3682 {"Deallocate wrapped around memory ranges", test_deallocate_wrapped_around_ranges},
3683 };
3684 UnitTests deallocate_suicide_test = {
3685 {"Deallocate whole address space", test_deallocate_suicide},
3686 };
3687
3688 /* All allocations done with mach_vm_allocate(). */
3689 set_allocator(wrapper_mach_vm_allocate);
3690
3691 /* Run the test suites with various VM sizes, and unspecified or
3692 * fixed (page-aligned or page-unaligned), addresses. */
3693 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3694 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3695 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3696 /* An allocated address will be page-aligned. */
3697 if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3698 continue;
3699 }
3700 /* Accessing deallocated memory should cause a segmentation
3701 * fault. */
3702 /* Nothing gets deallocated if size is zero. */
3703 if (sizes_idx != ZERO_BYTES) {
3704 set_expected_signal(SIGSEGV);
3705 run_suite(set_up_vm_variables_and_allocate, access_deallocated_memory_tests, do_nothing,
3706 "Deallocated memory access tests, "
3707 "%s%s address, %s size: 0x%jx (%ju)",
3708 address_flags[flags_idx].description,
3709 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3710 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3711 (uintmax_t)vm_sizes[sizes_idx].size);
3712 set_expected_signal(0);
3713 }
3714 run_suite(set_up_vm_variables_and_allocate, deallocate_reallocate_tests, do_nothing,
3715 "Deallocation and reallocation tests, %s%s "
3716 "address, %s size: 0x%jx (%ju)",
3717 address_flags[flags_idx].description,
3718 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3719 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3720 (uintmax_t)vm_sizes[sizes_idx].size);
3721 run_suite(set_up_vm_variables, deallocate_null_map_test, do_nothing,
3722 "mach_vm_deallocate() null map test, "
3723 "%s%s address, %s size: 0x%jx (%ju)",
3724 address_flags[flags_idx].description,
3725 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3726 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3727 (uintmax_t)vm_sizes[sizes_idx].size);
3728 }
3729 }
3730 }
3731 run_suite(do_nothing, deallocate_edge_case_tests, do_nothing, "Edge case deallocation tests");
3732
3733 set_expected_signal(-1); /* SIGSEGV or SIGBUS */
3734 run_suite(do_nothing, deallocate_suicide_test, do_nothing, "Whole address space deallocation test");
3735 set_expected_signal(0);
3736 }
3737
3738 void
run_read_test_suites()3739 run_read_test_suites()
3740 {
3741 UnitTests read_main_tests = {
3742 {"Read address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size},
3743 {"Read address has the correct boundary offset", test_read_address_offset},
3744 {"Reallocate read pages", test_reallocate_pages},
3745 {"Read and verify zero-filled memory", test_zero_filled},
3746 };
3747 UnitTests read_pattern_tests = {
3748 {"Read address-filled pattern", test_read_address_filled},
3749 {"Read checkerboard pattern", test_read_checkerboard},
3750 {"Read reverse checkerboard pattern", test_read_reverse_checkerboard},
3751 };
3752 UnitTests read_null_map_test = {
3753 {"Read from NULL VM map", test_read_null_map},
3754 };
3755 UnitTests read_edge_case_tests = {
3756 {"Read zero size", test_read_zero_size},
3757 {"Read invalid large size", test_read_invalid_large_size},
3758 {"Read wrapped around memory ranges", test_read_wrapped_around_ranges},
3759 };
3760 UnitTests read_inaccessible_tests = {
3761 {"Read partially decallocated memory", test_read_partially_deallocated_range},
3762 {"Read partially read-protected memory", test_read_partially_unreadable_range},
3763 };
3764
3765 /* All allocations done with mach_vm_allocate(). */
3766 set_allocator(wrapper_mach_vm_allocate);
3767
3768 /* Run the test suites with various VM sizes, and unspecified or
3769 * fixed (page-aligned or page-unaligned) addresses. */
3770 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3771 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3772 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3773 /* An allocated address will be page-aligned. */
3774 if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3775 continue;
3776 }
3777 run_suite(set_up_vm_variables_allocate_read_deallocate, read_main_tests, deallocate,
3778 "mach_vm_read() "
3779 "main tests, %s%s address, %s size: 0x%jx (%ju)",
3780 address_flags[flags_idx].description,
3781 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3782 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3783 (uintmax_t)vm_sizes[sizes_idx].size);
3784 run_suite(set_up_vm_variables_and_allocate_extra_page, read_pattern_tests, deallocate,
3785 "mach_vm_read() pattern tests, %s%s address, %s "
3786 "size: 0x%jx (%ju)",
3787 address_flags[flags_idx].description,
3788 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3789 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3790 (uintmax_t)vm_sizes[sizes_idx].size);
3791 run_suite(set_up_vm_variables_and_allocate_extra_page, read_null_map_test, deallocate_extra_page,
3792 "mach_vm_read() null map test, "
3793 "%s%s address, %s size: 0x%jx (%ju)",
3794 address_flags[flags_idx].description,
3795 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3796 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3797 (uintmax_t)vm_sizes[sizes_idx].size);
3798 /* A zero size range is always accessible. */
3799 if (sizes_idx != ZERO_BYTES) {
3800 run_suite(set_up_vm_variables_and_allocate_extra_page, read_inaccessible_tests, deallocate_extra_page,
3801 "mach_vm_read() inaccessibility tests, %s%s "
3802 "address, %s size: 0x%jx (%ju)",
3803 address_flags[flags_idx].description,
3804 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3805 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3806 (uintmax_t)vm_sizes[sizes_idx].size);
3807 }
3808 }
3809 }
3810 }
3811 run_suite(do_nothing, read_edge_case_tests, do_nothing, "mach_vm_read() fixed size tests");
3812 }
3813
3814 void
run_write_test_suites()3815 run_write_test_suites()
3816 {
3817 UnitTests write_main_tests = {
3818 {"Write and verify zero-filled memory", test_zero_filled_write},
3819 };
3820 UnitTests write_pattern_tests = {
3821 {"Write address-filled pattern", test_address_filled_write},
3822 {"Write checkerboard pattern", test_checkerboard_write},
3823 {"Write reverse checkerboard pattern", test_reverse_checkerboard_write},
3824 };
3825 UnitTests write_edge_case_tests = {
3826 {"Write into NULL VM map", test_write_null_map}, {"Write zero size", test_write_zero_size},
3827 };
3828 UnitTests write_inaccessible_tests = {
3829 {"Write partially decallocated buffer", test_write_partially_deallocated_buffer},
3830 {"Write partially read-protected buffer", test_write_partially_unreadable_buffer},
3831 {"Write on partially deallocated range", test_write_on_partially_deallocated_range},
3832 {"Write on partially write-protected range", test_write_on_partially_unwritable_range},
3833 };
3834
3835 /* All allocations done with mach_vm_allocate(). */
3836 set_allocator(wrapper_mach_vm_allocate);
3837
3838 /* Run the test suites with various destination sizes and
3839 * unspecified or fixed (page-aligned or page-unaligned)
3840 * addresses, and various buffer sizes and boundary offsets. */
3841 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3842 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3843 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3844 for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) {
3845 for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) {
3846 /* An allocated address will be page-aligned. */
3847 if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) {
3848 continue;
3849 }
3850 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_edge_case_tests,
3851 deallocate_vm_and_buffer,
3852 "mach_vm_write() edge case tests, %s%s address, %s "
3853 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3854 "buffer boundary offset: %d",
3855 address_flags[flags_idx].description,
3856 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3857 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3858 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3859 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3860 buffer_offsets[offsets_idx].offset);
3861 /* A zero size buffer is always accessible. */
3862 if (buffer_sizes_idx != ZERO_BYTES) {
3863 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_inaccessible_tests,
3864 deallocate_vm_and_buffer,
3865 "mach_vm_write() inaccessibility tests, "
3866 "%s%s address, %s size: 0x%jx (%ju), buffer "
3867 "%s size: 0x%jx (%ju), buffer boundary "
3868 "offset: %d",
3869 address_flags[flags_idx].description,
3870 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3871 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3872 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3873 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3874 buffer_offsets[offsets_idx].offset);
3875 }
3876 /* The buffer cannot be larger than the destination. */
3877 if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) {
3878 continue;
3879 }
3880 run_suite(set_up_vm_and_buffer_variables_allocate_write, write_main_tests, deallocate_vm_and_buffer,
3881 "mach_vm_write() main tests, %s%s address, %s "
3882 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3883 "buffer boundary offset: %d",
3884 address_flags[flags_idx].description,
3885 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3886 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3887 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3888 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3889 buffer_offsets[offsets_idx].offset);
3890 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_pattern_tests,
3891 deallocate_vm_and_buffer,
3892 "mach_vm_write() pattern tests, %s%s address, %s "
3893 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3894 "buffer boundary offset: %d",
3895 address_flags[flags_idx].description,
3896 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3897 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3898 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3899 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3900 buffer_offsets[offsets_idx].offset);
3901 }
3902 }
3903 }
3904 }
3905 }
3906 }
3907
3908 void
run_protect_test_suites()3909 run_protect_test_suites()
3910 {
3911 UnitTests readprotection_main_tests = {
3912 {"Read-protect, read-allow and verify zero-filled memory", test_zero_filled_readprotect},
3913 {"Verify that region is read-protected iff size is "
3914 "nonzero",
3915 test_verify_readprotection},
3916 };
3917 UnitTests access_readprotected_memory_tests = {
3918 {"Read start of read-protected range", test_access_readprotected_range_start},
3919 {"Read middle of read-protected range", test_access_readprotected_range_middle},
3920 {"Read end of read-protected range", test_access_readprotected_range_end},
3921 };
3922 UnitTests writeprotection_main_tests = {
3923 {"Write-protect and verify zero-filled memory", test_zero_filled_extended},
3924 {"Verify that region is write-protected iff size is "
3925 "nonzero",
3926 test_verify_writeprotection},
3927 };
3928 UnitTests write_writeprotected_memory_tests = {
3929 {"Write at start of write-protected range", test_write_writeprotected_range_start},
3930 {"Write in middle of write-protected range", test_write_writeprotected_range_middle},
3931 {"Write at end of write-protected range", test_write_writeprotected_range_end},
3932 };
3933 UnitTests protect_edge_case_tests = {
3934 {"Read-protect zero size ranges", test_readprotect_zero_size},
3935 {"Write-protect zero size ranges", test_writeprotect_zero_size},
3936 {"Read-protect wrapped around memory ranges", test_readprotect_wrapped_around_ranges},
3937 {"Write-protect wrapped around memory ranges", test_writeprotect_wrapped_around_ranges},
3938 };
3939
3940 /* All allocations done with mach_vm_allocate(). */
3941 set_allocator(wrapper_mach_vm_allocate);
3942
3943 /* Run the test suites with various VM sizes, and unspecified or
3944 * fixed (page-aligned or page-unaligned), addresses. */
3945 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3946 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3947 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3948 /* An allocated address will be page-aligned. */
3949 if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3950 continue;
3951 }
3952 run_suite(set_up_vm_variables_allocate_readprotect, readprotection_main_tests, deallocate_extra_page,
3953 "Main read-protection tests, %s%s address, %s "
3954 "size: 0x%jx (%ju)",
3955 address_flags[flags_idx].description,
3956 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3957 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3958 (uintmax_t)vm_sizes[sizes_idx].size);
3959 run_suite(set_up_vm_variables_allocate_writeprotect, writeprotection_main_tests, deallocate_extra_page,
3960 "Main write-protection tests, %s%s address, %s "
3961 "size: 0x%jx (%ju)",
3962 address_flags[flags_idx].description,
3963 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3964 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3965 (uintmax_t)vm_sizes[sizes_idx].size);
3966 /* Nothing gets protected if size is zero. */
3967 if (sizes_idx != ZERO_BYTES) {
3968 set_expected_signal(SIGBUS);
3969 /* Accessing read-protected memory should cause a bus
3970 * error. */
3971 run_suite(set_up_vm_variables_allocate_readprotect, access_readprotected_memory_tests, deallocate_extra_page,
3972 "Read-protected memory access tests, %s%s "
3973 "address, %s size: 0x%jx (%ju)",
3974 address_flags[flags_idx].description,
3975 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3976 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3977 (uintmax_t)vm_sizes[sizes_idx].size);
3978 /* Writing on write-protected memory should cause a bus
3979 * error. */
3980 run_suite(set_up_vm_variables_allocate_writeprotect, write_writeprotected_memory_tests, deallocate_extra_page,
3981 "Write-protected memory writing tests, %s%s "
3982 "address, %s size: 0x%jx (%ju)",
3983 address_flags[flags_idx].description,
3984 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3985 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3986 (uintmax_t)vm_sizes[sizes_idx].size);
3987 set_expected_signal(0);
3988 }
3989 }
3990 }
3991 }
3992 run_suite(do_nothing, protect_edge_case_tests, do_nothing, "Edge case protection tests");
3993 }
3994
3995 void
run_copy_test_suites()3996 run_copy_test_suites()
3997 {
3998 /* Copy tests */
3999 UnitTests copy_main_tests = {
4000 {"Copy and verify zero-filled memory", test_zero_filled_copy_dest},
4001 };
4002 UnitTests copy_pattern_tests = {
4003 {"Copy address-filled pattern", test_copy_address_filled},
4004 {"Copy checkerboard pattern", test_copy_checkerboard},
4005 {"Copy reverse checkerboard pattern", test_copy_reverse_checkerboard},
4006 };
4007 UnitTests copy_edge_case_tests = {
4008 {"Copy with NULL VM map", test_copy_null_map},
4009 {"Copy zero size", test_copy_zero_size},
4010 {"Copy invalid large size", test_copy_invalid_large_size},
4011 {"Read wrapped around memory ranges", test_copy_wrapped_around_ranges},
4012 };
4013 UnitTests copy_inaccessible_tests = {
4014 {"Copy source partially decallocated region", test_copy_source_partially_deallocated_region},
4015 /* XXX */
4016 {"Copy destination partially decallocated region", test_copy_dest_partially_deallocated_region},
4017 {"Copy source partially read-protected region", test_copy_source_partially_unreadable_region},
4018 /* XXX */
4019 {"Copy destination partially write-protected region", test_copy_dest_partially_unwriteable_region},
4020 {"Copy source on partially deallocated range", test_copy_source_on_partially_deallocated_range},
4021 {"Copy destination on partially deallocated range", test_copy_dest_on_partially_deallocated_range},
4022 {"Copy source on partially read-protected range", test_copy_source_on_partially_unreadable_range},
4023 {"Copy destination on partially write-protected range", test_copy_dest_on_partially_unwritable_range},
4024 };
4025
4026 UnitTests copy_shared_mode_tests = {
4027 {"Copy using freshly allocated source", test_vmcopy_fresh_source},
4028 {"Copy using shared source", test_vmcopy_shared_source},
4029 {"Copy using a \'copied from\' source", test_vmcopy_copied_from_source},
4030 {"Copy using a \'copied to\' source", test_vmcopy_copied_to_source},
4031 {"Copy using a true shared source", test_vmcopy_trueshared_source},
4032 {"Copy using a private aliased source", test_vmcopy_private_aliased_source},
4033 };
4034
4035 /* All allocations done with mach_vm_allocate(). */
4036 set_allocator(wrapper_mach_vm_allocate);
4037
4038 /* All the tests are done with page size regions. */
4039 set_vm_size(vm_page_size);
4040
4041 /* Run the test suites with various shared modes for source */
4042 for (vmcopy_action_idx = 0; vmcopy_action_idx < numofvmcopyactions; vmcopy_action_idx++) {
4043 run_suite(set_up_copy_shared_mode_variables, copy_shared_mode_tests, do_nothing, "Copy shared mode tests, %s",
4044 vmcopy_actions[vmcopy_action_idx].description);
4045 }
4046
4047 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
4048 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
4049 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
4050 for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) {
4051 for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) {
4052 /* An allocated address will be page-aligned. */
4053 if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) {
4054 continue;
4055 }
4056 run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_edge_case_tests,
4057 deallocate_vm_and_buffer,
4058 "mach_vm_copy() edge case tests, %s%s address, %s "
4059 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
4060 "buffer boundary offset: %d",
4061 address_flags[flags_idx].description,
4062 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4063 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4064 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4065 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4066 buffer_offsets[offsets_idx].offset);
4067 /* The buffer cannot be larger than the destination. */
4068 if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) {
4069 continue;
4070 }
4071
4072 /* A zero size buffer is always accessible. */
4073 if (buffer_sizes_idx != ZERO_BYTES) {
4074 run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_inaccessible_tests,
4075 deallocate_vm_and_buffer,
4076 "mach_vm_copy() inaccessibility tests, "
4077 "%s%s address, %s size: 0x%jx (%ju), buffer "
4078 "%s size: 0x%jx (%ju), buffer boundary "
4079 "offset: %d",
4080 address_flags[flags_idx].description,
4081 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4082 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4083 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4084 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4085 buffer_offsets[offsets_idx].offset);
4086 }
4087 run_suite(set_up_source_and_dest_variables_allocate_copy, copy_main_tests, deallocate_vm_and_buffer,
4088 "mach_vm_copy() main tests, %s%s address, %s "
4089 "size: 0x%jx (%ju), destination %s size: 0x%jx (%ju), "
4090 "destination boundary offset: %d",
4091 address_flags[flags_idx].description,
4092 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4093 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4094 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4095 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4096 buffer_offsets[offsets_idx].offset);
4097 run_suite(set_up_source_and_dest_variables_allocate_copy, copy_pattern_tests, deallocate_vm_and_buffer,
4098 "mach_vm_copy() pattern tests, %s%s address, %s "
4099 "size: 0x%jx (%ju) destination %s size: 0x%jx (%ju), "
4100 "destination boundary offset: %d",
4101 address_flags[flags_idx].description,
4102 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4103 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4104 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4105 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4106 buffer_offsets[offsets_idx].offset);
4107 }
4108 }
4109 }
4110 }
4111 }
4112 }
4113
4114 void
perform_test_with_options(test_option_t options)4115 perform_test_with_options(test_option_t options)
4116 {
4117 process_options(options);
4118
4119 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
4120 * error finding xnu major version number. */
4121 /* printf("xnu version is %s.\n\n", xnu_version_string()); */
4122
4123 if (flag_run_allocate_test) {
4124 run_allocate_test_suites();
4125 }
4126
4127 if (flag_run_deallocate_test) {
4128 run_deallocate_test_suites();
4129 }
4130
4131 if (flag_run_read_test) {
4132 run_read_test_suites();
4133 }
4134
4135 if (flag_run_write_test) {
4136 run_write_test_suites();
4137 }
4138
4139 if (flag_run_protect_test) {
4140 run_protect_test_suites();
4141 }
4142
4143 if (flag_run_copy_test) {
4144 run_copy_test_suites();
4145 }
4146
4147 log_aggregated_results();
4148 }
4149
4150 T_DECL(vm_test_allocate, "Allocate VM unit test")
4151 {
4152 test_options.to_flags = VM_TEST_ALLOCATE;
4153 test_options.to_vmsize = 0;
4154 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4155
4156 perform_test_with_options(test_options);
4157 }
4158
4159 T_DECL(vm_test_deallocate, "Deallocate VM unit test",
4160 T_META_IGNORECRASHES(".*vm_allocation.*"))
4161 {
4162 test_options.to_flags = VM_TEST_DEALLOCATE;
4163 test_options.to_vmsize = 0;
4164 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4165
4166 perform_test_with_options(test_options);
4167 }
4168
4169 T_DECL(vm_test_read, "Read VM unit test")
4170 {
4171 test_options.to_flags = VM_TEST_READ;
4172 test_options.to_vmsize = 0;
4173 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4174
4175 perform_test_with_options(test_options);
4176 }
4177
4178 T_DECL(vm_test_write, "Write VM unit test")
4179 {
4180 test_options.to_flags = VM_TEST_WRITE;
4181 test_options.to_vmsize = 0;
4182 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4183
4184 perform_test_with_options(test_options);
4185 }
4186
4187 T_DECL(vm_test_protect, "Protect VM unit test",
4188 T_META_IGNORECRASHES(".*vm_allocation.*"))
4189 {
4190 test_options.to_flags = VM_TEST_PROTECT;
4191 test_options.to_vmsize = 0;
4192 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4193
4194 perform_test_with_options(test_options);
4195 }
4196
4197 T_DECL(vm_test_copy, "Copy VM unit test")
4198 {
4199 test_options.to_flags = VM_TEST_COPY;
4200 test_options.to_vmsize = 0;
4201 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4202
4203 perform_test_with_options(test_options);
4204 }
4205