1 /* Mach virtual memory unit tests
2 *
3 * The main goal of this code is to facilitate the construction,
4 * running, result logging and clean up of a test suite, taking care
5 * of all the scaffolding. A test suite is a sequence of very targeted
6 * unit tests, each running as a separate process to isolate its
7 * address space.
8 * A unit test is abstracted as a unit_test_t structure, consisting of
9 * a test function and a logging identifier. A test suite is a suite_t
10 * structure, consisting of an unit_test_t array, fixture set up and
11 * tear down functions.
12 * Test suites are created dynamically. Each of its unit test runs in
13 * its own fork()d process, with the fixture set up and tear down
14 * running before and after each test. The parent process will log a
15 * pass result if the child exits normally, and a fail result in any
16 * other case (non-zero exit status, abnormal signal). The suite
17 * results are then aggregated and logged after the [SUMMARY] keyword,
18 * and finally the test suite is destroyed.
19 * The included test suites cover the Mach memory allocators,
20 * mach_vm_allocate() and mach_vm_map() with various options, and
21 * mach_vm_deallocate(), mach_vm_read(), mach_vm_write(),
22 * mach_vm_protect(), mach_vm_copy().
23 *
24 * Author: Renaud Dreyer ([email protected])
25 *
26 * Transformed to libdarwintest by Tristan Ye ([email protected]) */
27
28 #include <darwintest.h>
29
30 #include <stdlib.h>
31 #include <ctype.h>
32 #include <inttypes.h>
33 #include <stdio.h>
34 #include <math.h>
35 #include <errno.h>
36 #include <signal.h>
37 #include <getopt.h>
38 #include <mach/mach.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_vm.h>
41 #include <sys/sysctl.h>
42 #include <time.h>
43
44 T_GLOBAL_META(
45 T_META_NAMESPACE("xnu.vm"),
46 T_META_RADAR_COMPONENT_NAME("xnu"),
47 T_META_RADAR_COMPONENT_VERSION("VM"));
48
49 /**************************/
50 /**************************/
51 /* Unit Testing Framework */
52 /**************************/
53 /**************************/
54
55 /*********************/
56 /* Private interface */
57 /*********************/
58
59 static const char frameworkname[] = "vm_unitester";
60
61 /* Type for test, fixture set up and fixture tear down functions. */
62 typedef void (*test_fn_t)();
63
64 /* Unit test structure. */
65 typedef struct {
66 const char * name;
67 test_fn_t test;
68 } unit_test_t;
69
70 /* Test suite structure. */
71 typedef struct {
72 const char * name;
73 int numoftests;
74 test_fn_t set_up;
75 unit_test_t * tests;
76 test_fn_t tear_down;
77 } suite_t;
78
79 int _quietness = 0;
80 int _expected_signal = 0;
81
82 struct {
83 uintmax_t numoftests;
84 uintmax_t passed_tests;
85 } results = {0, 0};
86
87 #define logr(format, ...) \
88 do { \
89 if (_quietness <= 1) { \
90 T_LOG(format, ## __VA_ARGS__); \
91 } \
92 } while (0)
93
94 #define logv(format, ...) \
95 do { \
96 if (_quietness == 0) { \
97 T_LOG(format, ## __VA_ARGS__); \
98 } \
99 } while (0)
100
101 static suite_t *
create_suite(const char * name,int numoftests,test_fn_t set_up,unit_test_t * tests,test_fn_t tear_down)102 create_suite(const char * name, int numoftests, test_fn_t set_up, unit_test_t * tests, test_fn_t tear_down)
103 {
104 suite_t * suite = (suite_t *)malloc(sizeof(suite_t));
105 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(suite, "malloc()");
106
107 suite->name = name;
108 suite->numoftests = numoftests;
109 suite->set_up = set_up;
110 suite->tests = tests;
111 suite->tear_down = tear_down;
112 return suite;
113 }
114
115 static void
destroy_suite(suite_t * suite)116 destroy_suite(suite_t * suite)
117 {
118 free(suite);
119 }
120
121 static void
log_suite_info(suite_t * suite)122 log_suite_info(suite_t * suite)
123 {
124 logr("[TEST] %s", suite->name);
125 logr("Number of tests: %d\n", suite->numoftests);
126 }
127
128 static void
log_suite_results(suite_t * suite,int passed_tests)129 log_suite_results(suite_t * suite, int passed_tests)
130 {
131 results.numoftests += (uintmax_t)suite->numoftests;
132 results.passed_tests += (uintmax_t)passed_tests;
133 }
134
135 static void
log_test_info(unit_test_t * unit_test,unsigned test_num)136 log_test_info(unit_test_t * unit_test, unsigned test_num)
137 {
138 logr("[BEGIN] #%04d: %s", test_num, unit_test->name);
139 }
140
141 static void
log_test_result(unit_test_t * unit_test,boolean_t test_passed,unsigned test_num)142 log_test_result(unit_test_t * unit_test, boolean_t test_passed, unsigned test_num)
143 {
144 logr("[%s] #%04d: %s\n", test_passed ? "PASS" : "FAIL", test_num, unit_test->name);
145 }
146
147 /* Run a test with fixture set up and teardown, while enforcing the
148 * time out constraint. */
149 static void
run_test(suite_t * suite,unit_test_t * unit_test,unsigned test_num)150 run_test(suite_t * suite, unit_test_t * unit_test, unsigned test_num)
151 {
152 log_test_info(unit_test, test_num);
153
154 suite->set_up();
155 unit_test->test();
156 suite->tear_down();
157 }
158
159 /* Check a child return status. */
160 static boolean_t
child_terminated_normally(int child_status)161 child_terminated_normally(int child_status)
162 {
163 boolean_t normal_exit = FALSE;
164
165 if (WIFEXITED(child_status)) {
166 int exit_status = WEXITSTATUS(child_status);
167 if (exit_status) {
168 T_LOG("Child process unexpectedly exited with code %d.",
169 exit_status);
170 } else if (!_expected_signal) {
171 normal_exit = TRUE;
172 }
173 } else if (WIFSIGNALED(child_status)) {
174 int signal = WTERMSIG(child_status);
175 if (signal == _expected_signal ||
176 (_expected_signal == -1 && (signal == SIGBUS || signal == SIGSEGV))) {
177 if (_quietness <= 0) {
178 T_LOG("Child process died with expected signal "
179 "%d.", signal);
180 }
181 normal_exit = TRUE;
182 } else {
183 T_LOG("Child process unexpectedly died with signal %d.",
184 signal);
185 }
186 } else {
187 T_LOG("Child process unexpectedly did not exit nor die");
188 }
189
190 return normal_exit;
191 }
192
193 /* Run a test in its own process, and report the result. */
194 static boolean_t
child_test_passed(suite_t * suite,unit_test_t * unit_test)195 child_test_passed(suite_t * suite, unit_test_t * unit_test)
196 {
197 int test_status;
198 static unsigned test_num = 0;
199
200 test_num++;
201
202 pid_t test_pid = fork();
203 T_QUIET; T_ASSERT_POSIX_SUCCESS(test_pid, "fork()");
204 if (!test_pid) {
205 run_test(suite, unit_test, test_num);
206 exit(0);
207 }
208 while (waitpid(test_pid, &test_status, 0) != test_pid) {
209 continue;
210 }
211 boolean_t test_result = child_terminated_normally(test_status);
212 log_test_result(unit_test, test_result, test_num);
213 return test_result;
214 }
215
216 /* Run each test in a suite, and report the results. */
217 static int
count_passed_suite_tests(suite_t * suite)218 count_passed_suite_tests(suite_t * suite)
219 {
220 int passed_tests = 0;
221 int i;
222
223 for (i = 0; i < suite->numoftests; i++) {
224 passed_tests += child_test_passed(suite, &(suite->tests[i]));
225 }
226 return passed_tests;
227 }
228
229 /********************/
230 /* Public interface */
231 /********************/
232
233 #define DEFAULT_QUIETNESS 0 /* verbose */
234 #define RESULT_ERR_QUIETNESS 1 /* result and error */
235 #define ERROR_ONLY_QUIETNESS 2 /* error only */
236
237 #define run_suite(set_up, tests, tear_down, ...) \
238 _run_suite((sizeof(tests) / sizeof(tests[0])), (set_up), (tests), (tear_down), __VA_ARGS__)
239
240 typedef unit_test_t UnitTests[];
241
242 void _run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...)
243 __printflike(5, 6);
244
245 void
_run_suite(int numoftests,test_fn_t set_up,UnitTests tests,test_fn_t tear_down,const char * format,...)246 _run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...)
247 {
248 va_list ap;
249 char * name;
250
251 va_start(ap, format);
252 T_QUIET; T_ASSERT_POSIX_SUCCESS(vasprintf(&name, format, ap), "vasprintf()");
253 va_end(ap);
254 suite_t * suite = create_suite(name, numoftests, set_up, tests, tear_down);
255 log_suite_info(suite);
256 log_suite_results(suite, count_passed_suite_tests(suite));
257 free(name);
258 destroy_suite(suite);
259 }
260
261 /* Setters and getters for various test framework global
262 * variables. Should only be used outside of the test, set up and tear
263 * down functions. */
264
265 /* Expected signal for a test, default is 0. */
266 void
set_expected_signal(int signal)267 set_expected_signal(int signal)
268 {
269 _expected_signal = signal;
270 }
271
272 int
get_expected_signal()273 get_expected_signal()
274 {
275 return _expected_signal;
276 }
277
278 /* Logging verbosity. */
279 void
set_quietness(int value)280 set_quietness(int value)
281 {
282 _quietness = value;
283 }
284
285 int
get_quietness()286 get_quietness()
287 {
288 return _quietness;
289 }
290
291 /* For fixture set up and tear down functions, and units tests. */
292 void
do_nothing()293 do_nothing()
294 {
295 }
296
297 void
log_aggregated_results()298 log_aggregated_results()
299 {
300 T_LOG("[SUMMARY] Aggregated Test Results\n");
301 T_LOG("Total: %ju", results.numoftests);
302 T_LOG("Passed: %ju", results.passed_tests);
303 T_LOG("Failed: %ju\n", results.numoftests - results.passed_tests);
304
305 T_QUIET; T_ASSERT_EQ(results.passed_tests, results.numoftests,
306 "%d passed of total %d tests",
307 results.passed_tests, results.numoftests);
308 }
309
310 /*******************************/
311 /*******************************/
312 /* Virtual memory unit testing */
313 /*******************************/
314 /*******************************/
315
316 /* Test exit values:
317 * 0: pass
318 * 1: fail, generic unexpected failure
319 * 2: fail, unexpected Mach return value
320 * 3: fail, time out */
321
322 #define DEFAULT_VM_SIZE ((mach_vm_size_t)(1024ULL * 4096ULL))
323
324 #define POINTER(address) ((char *)(uintptr_t)(address))
325 #define MACH_VM_ADDRESS_T(address) (*((mach_vm_address_t *)(uintptr_t)(address)))
326
327 static int vm_address_size = sizeof(mach_vm_address_t);
328
329 static char *progname = "";
330
331 /*************************/
332 /* xnu version functions */
333 /*************************/
334
335 /* Find the xnu version string. */
336 char *
xnu_version_string()337 xnu_version_string()
338 {
339 size_t length;
340 int mib[2];
341 mib[0] = CTL_KERN;
342 mib[1] = KERN_VERSION;
343
344 T_QUIET;
345 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 2, NULL, &length, NULL, 0), "sysctl()");
346 char * version = (char *)malloc(length);
347 T_QUIET;
348 T_WITH_ERRNO;
349 T_ASSERT_NOTNULL(version, "malloc()");
350 T_QUIET;
351 T_EXPECT_POSIX_SUCCESS(sysctl(mib, 2, version, &length, NULL, 0), "sysctl()");
352 if (T_RESULT == T_RESULT_FAIL) {
353 free(version);
354 T_END;
355 }
356 char * xnu_string = strstr(version, "xnu-");
357 free(version);
358 T_QUIET;
359 T_ASSERT_NOTNULL(xnu_string, "%s: error finding xnu version string.", progname);
360 return xnu_string;
361 }
362
363 /* Find the xnu major version number. */
364 unsigned int
xnu_major_version()365 xnu_major_version()
366 {
367 char * endptr;
368 char * xnu_substring = xnu_version_string() + 4;
369
370 errno = 0;
371 unsigned int xnu_version = strtoul(xnu_substring, &endptr, 0);
372 T_QUIET;
373 T_ASSERT_TRUE((errno != ERANGE && endptr != xnu_substring),
374 "%s: error finding xnu major version number.", progname);
375 return xnu_version;
376 }
377
378 /*************************/
379 /* Mach assert functions */
380 /*************************/
381
382 static inline void
assert_mach_return(kern_return_t kr,kern_return_t expected_kr,const char * mach_routine)383 assert_mach_return(kern_return_t kr, kern_return_t expected_kr, const char * mach_routine)
384 {
385 T_QUIET; T_ASSERT_EQ(kr, expected_kr,
386 "%s unexpectedly returned: %s."
387 "Should have returned: %s.",
388 mach_routine, mach_error_string(kr),
389 mach_error_string(expected_kr));
390 }
391
392 /*******************************/
393 /* Arrays for test suite loops */
394 /*******************************/
395
396 /* Memory allocators */
397 typedef kern_return_t (*allocate_fn_t)(vm_map_t, mach_vm_address_t *, mach_vm_size_t, int);
398
399
400 /*
401 * Remember any pre-reserved fixed address, which needs to be released prior to an allocation.
402 */
403 static mach_vm_address_t fixed_vm_address = 0x0;
404 static mach_vm_size_t fixed_vm_size = 0;
405
406 /* forward decl */
407 void assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size);
408
409 /*
410 * If trying to allocate at a fixed address, we need to do the delayed deallocate first.
411 */
412 static void
check_fixed_address(mach_vm_address_t * address,mach_vm_size_t size)413 check_fixed_address(mach_vm_address_t *address, mach_vm_size_t size)
414 {
415 if (fixed_vm_address != 0 &&
416 fixed_vm_address <= *address &&
417 *address + size <= fixed_vm_address + fixed_vm_size) {
418 assert_deallocate_success(fixed_vm_address, fixed_vm_size);
419 fixed_vm_address = 0;
420 fixed_vm_size = 0;
421 }
422 }
423
424 kern_return_t
wrapper_mach_vm_allocate(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)425 wrapper_mach_vm_allocate(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
426 {
427 check_fixed_address(address, size);
428 return mach_vm_allocate(map, address, size, flags);
429 }
430
431 kern_return_t
wrapper_mach_vm_map(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)432 wrapper_mach_vm_map(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
433 {
434 check_fixed_address(address, size);
435 return mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
436 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
437 }
438
439 /* Should have the same behavior as when mask is zero. */
440 kern_return_t
wrapper_mach_vm_map_4kB(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)441 wrapper_mach_vm_map_4kB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
442 {
443 check_fixed_address(address, size);
444 return mach_vm_map(map, address, size, (mach_vm_offset_t)0xFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
445 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
446 }
447
448 kern_return_t
wrapper_mach_vm_map_2MB(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)449 wrapper_mach_vm_map_2MB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
450 {
451 check_fixed_address(address, size);
452 return mach_vm_map(map, address, size, (mach_vm_offset_t)0x1FFFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
453 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
454 }
455
456 mach_port_t
memory_entry(mach_vm_size_t * size)457 memory_entry(mach_vm_size_t * size)
458 {
459 mach_port_t object_handle = MACH_PORT_NULL;
460 mach_vm_size_t original_size = *size;
461
462 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry_64(mach_task_self(), size, (memory_object_offset_t)0,
463 (MAP_MEM_NAMED_CREATE | VM_PROT_ALL), &object_handle, 0),
464 "mach_make_memory_entry_64()");
465 T_QUIET; T_ASSERT_EQ(*size, round_page(original_size),
466 "mach_make_memory_entry_64() unexpectedly returned a named "
467 "entry of size 0x%jx (%ju).\n"
468 "Should have returned a "
469 "named entry of size 0x%jx (%ju).",
470 (uintmax_t)*size, (uintmax_t)*size, (uintmax_t)original_size, (uintmax_t)original_size);
471 return object_handle;
472 }
473
474 kern_return_t
wrapper_mach_vm_map_named_entry(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)475 wrapper_mach_vm_map_named_entry(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
476 {
477 mach_port_t object_handle = memory_entry(&size);
478 check_fixed_address(address, size);
479 kern_return_t kr = mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, object_handle, (memory_object_offset_t)0, FALSE,
480 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
481 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_port_deallocate(mach_task_self(), object_handle), "mach_port_deallocate()");
482 return kr;
483 }
484
485 static struct {
486 allocate_fn_t allocate;
487 const char * description;
488 } allocators[] = {
489 {wrapper_mach_vm_allocate, "mach_vm_allocate()"},
490 {wrapper_mach_vm_map, "mach_vm_map() (zero mask)"},
491 {wrapper_mach_vm_map_4kB,
492 "mach_vm_map() "
493 "(4 kB address alignment)"},
494 {wrapper_mach_vm_map_2MB,
495 "mach_vm_map() "
496 "(2 MB address alignment)"},
497 {wrapper_mach_vm_map_named_entry,
498 "mach_vm_map() (named "
499 "entry, zero mask)"},
500 };
501 static int numofallocators = sizeof(allocators) / sizeof(allocators[0]);
502 static int allocators_idx;
503 enum { MACH_VM_ALLOCATE, MACH_VM_MAP, MACH_VM_MAP_4kB, MACH_VM_MAP_2MB, MACH_VM_MAP_NAMED_ENTRY };
504
505 /* VM size */
506 static struct {
507 mach_vm_size_t size;
508 const char * description;
509 } vm_sizes[] = {
510 {DEFAULT_VM_SIZE, "default/input"},
511 {0, "zero"},
512 {4096ULL, "aligned"},
513 {1ULL, "unaligned"},
514 {4095ULL, "unaligned"},
515 {4097ULL, "unaligned"},
516 };
517 static int numofsizes = sizeof(vm_sizes) / sizeof(vm_sizes[0]);
518 static int sizes_idx;
519 static int buffer_sizes_idx;
520 enum { DEFAULT_INPUT, ZERO_BYTES, ONE_PAGE, ONE_BYTE, ONE_PAGE_MINUS_ONE_BYTE, ONE_PAGE_AND_ONE_BYTE };
521
522 /* Unspecified/fixed address */
523 static struct {
524 int flag;
525 const char * description;
526 } address_flags[] = {
527 {VM_FLAGS_ANYWHERE, "unspecified"}, {VM_FLAGS_FIXED, "fixed"},
528 };
529 static int numofflags = sizeof(address_flags) / sizeof(address_flags[0]);
530 static int flags_idx;
531 enum { ANYWHERE, FIXED };
532
533 /* Address alignment */
534 static struct {
535 boolean_t alignment;
536 const char * description;
537 } address_alignments[] = {
538 {TRUE, " aligned"}, {FALSE, " unaligned"},
539 };
540 static int numofalignments = sizeof(address_alignments) / sizeof(*address_alignments);
541 static int alignments_idx;
542 enum { ALIGNED, UNALIGNED };
543
544 /* Buffer offset */
545 static struct {
546 int offset;
547 const char * description;
548 } buffer_offsets[] = {
549 {0, ""}, {1, ""}, {2, ""},
550 };
551 static int numofoffsets = sizeof(buffer_offsets) / sizeof(buffer_offsets[0]);
552 static int offsets_idx;
553 enum { ZERO, ONE, TWO };
554
555 /* mach_vm_copy() post actions */
556 enum { VMCOPY_MODIFY_SRC, VMCOPY_MODIFY_DST, VMCOPY_MODIFY_SHARED_COPIED };
557
558 static struct {
559 int action;
560 const char * description;
561 } vmcopy_actions[] = {
562 {VMCOPY_MODIFY_SRC, "modify vm_copy() source"},
563 {VMCOPY_MODIFY_DST, "modify vm_copy() destination"},
564 {VMCOPY_MODIFY_SHARED_COPIED,
565 "modify vm_copy source's shared "
566 "or copied from/to region"},
567 };
568 static int numofvmcopyactions = sizeof(vmcopy_actions) / sizeof(vmcopy_actions[0]);
569 static int vmcopy_action_idx;
570
571 /************************************/
572 /* Setters and getters for fixtures */
573 /************************************/
574
575 /* Allocation memory range. */
576 static allocate_fn_t _allocator = wrapper_mach_vm_allocate;
577 static mach_vm_size_t _vm_size = DEFAULT_VM_SIZE;
578 static int _address_flag = VM_FLAGS_ANYWHERE;
579 static boolean_t _address_alignment = TRUE;
580 static mach_vm_address_t _vm_address = 0x0;
581
582 /* Buffer for mach_vm_write(). */
583 static mach_vm_size_t _buffer_size = DEFAULT_VM_SIZE;
584 static mach_vm_address_t _buffer_address = 0x0;
585 static int _buffer_offset = 0;
586
587 /* Post action for mach_vm_copy(). */
588 static int _vmcopy_post_action = VMCOPY_MODIFY_SRC;
589
590 static void
set_allocator(allocate_fn_t allocate)591 set_allocator(allocate_fn_t allocate)
592 {
593 _allocator = allocate;
594 }
595
596 static allocate_fn_t
get_allocator()597 get_allocator()
598 {
599 return _allocator;
600 }
601
602 static void
set_vm_size(mach_vm_size_t size)603 set_vm_size(mach_vm_size_t size)
604 {
605 _vm_size = size;
606 }
607
608 static mach_vm_size_t
get_vm_size()609 get_vm_size()
610 {
611 return _vm_size;
612 }
613
614 static void
set_address_flag(int flag)615 set_address_flag(int flag)
616 {
617 _address_flag = flag;
618 }
619
620 static int
get_address_flag()621 get_address_flag()
622 {
623 return _address_flag;
624 }
625
626 static void
set_address_alignment(boolean_t alignment)627 set_address_alignment(boolean_t alignment)
628 {
629 _address_alignment = alignment;
630 }
631
632 static boolean_t
get_address_alignment()633 get_address_alignment()
634 {
635 return _address_alignment;
636 }
637
638 static void
set_vm_address(mach_vm_address_t address)639 set_vm_address(mach_vm_address_t address)
640 {
641 _vm_address = address;
642 }
643
644 static mach_vm_address_t
get_vm_address()645 get_vm_address()
646 {
647 return _vm_address;
648 }
649
650 static void
set_buffer_size(mach_vm_size_t size)651 set_buffer_size(mach_vm_size_t size)
652 {
653 _buffer_size = size;
654 }
655
656 static mach_vm_size_t
get_buffer_size()657 get_buffer_size()
658 {
659 return _buffer_size;
660 }
661
662 static void
set_buffer_address(mach_vm_address_t address)663 set_buffer_address(mach_vm_address_t address)
664 {
665 _buffer_address = address;
666 }
667
668 static mach_vm_address_t
get_buffer_address()669 get_buffer_address()
670 {
671 return _buffer_address;
672 }
673
674 static void
set_buffer_offset(int offset)675 set_buffer_offset(int offset)
676 {
677 _buffer_offset = offset;
678 }
679
680 static int
get_buffer_offset()681 get_buffer_offset()
682 {
683 return _buffer_offset;
684 }
685
686 static void
set_vmcopy_post_action(int action)687 set_vmcopy_post_action(int action)
688 {
689 _vmcopy_post_action = action;
690 }
691
692 static int
get_vmcopy_post_action()693 get_vmcopy_post_action()
694 {
695 return _vmcopy_post_action;
696 }
697
698 /*******************************/
699 /* Usage and option processing */
700 /*******************************/
701 static boolean_t flag_run_allocate_test = FALSE;
702 static boolean_t flag_run_deallocate_test = FALSE;
703 static boolean_t flag_run_read_test = FALSE;
704 static boolean_t flag_run_write_test = FALSE;
705 static boolean_t flag_run_protect_test = FALSE;
706 static boolean_t flag_run_copy_test = FALSE;
707
708 #define VM_TEST_ALLOCATE 0x00000001
709 #define VM_TEST_DEALLOCATE 0x00000002
710 #define VM_TEST_READ 0x00000004
711 #define VM_TEST_WRITE 0x00000008
712 #define VM_TEST_PROTECT 0x00000010
713 #define VM_TEST_COPY 0x00000020
714
715 typedef struct test_option {
716 uint32_t to_flags;
717 int to_quietness;
718 mach_vm_size_t to_vmsize;
719 } test_option_t;
720
721 typedef struct test_info {
722 char *ti_name;
723 boolean_t *ti_flag;
724 } test_info_t;
725
726 static test_option_t test_options;
727
728 enum {ALLOCATE = 0, DEALLOCATE, READ, WRITE, PROTECT, COPY};
729
730 static test_info_t test_info[] = {
731 {"allocate", &flag_run_allocate_test},
732 {"deallocate", &flag_run_deallocate_test},
733 {"read", &flag_run_read_test},
734 {"write", &flag_run_write_test},
735 {"protect", &flag_run_protect_test},
736 {"copy", &flag_run_copy_test},
737 {NULL, NULL}
738 };
739
740 static void
die_on_invalid_value(int condition,const char * value_string)741 die_on_invalid_value(int condition, const char * value_string)
742 {
743 T_QUIET;
744 T_ASSERT_EQ(condition, 0, "%s: invalid value: %s.",
745 progname, value_string);
746 }
747
748 static void
process_options(test_option_t options)749 process_options(test_option_t options)
750 {
751 test_info_t *tp;
752
753 setvbuf(stdout, NULL, _IONBF, 0);
754
755 set_vm_size(DEFAULT_VM_SIZE);
756 set_quietness(DEFAULT_QUIETNESS);
757
758 if (NULL != getenv("LTERDOS")) {
759 logr("LTERDOS=YES this is LeanTestEnvironment\nIncreasing quietness by 1.");
760 set_quietness(get_quietness() + 1);
761 } else {
762 if (options.to_quietness > 0) {
763 set_quietness(options.to_quietness);
764 }
765 }
766
767 if (options.to_vmsize != 0) {
768 vm_sizes[0].size = options.to_vmsize;
769 }
770
771 if (options.to_flags == 0) {
772 for (tp = test_info; tp->ti_name != NULL; ++tp) {
773 *tp->ti_flag = TRUE;
774 }
775 } else {
776 if (options.to_flags & VM_TEST_ALLOCATE) {
777 *(test_info[ALLOCATE].ti_flag) = TRUE;
778 }
779
780 if (options.to_flags & VM_TEST_DEALLOCATE) {
781 *(test_info[DEALLOCATE].ti_flag) = TRUE;
782 }
783
784 if (options.to_flags & VM_TEST_READ) {
785 *(test_info[READ].ti_flag) = TRUE;
786 }
787
788 if (options.to_flags & VM_TEST_WRITE) {
789 *(test_info[WRITE].ti_flag) = TRUE;
790 }
791
792 if (options.to_flags & VM_TEST_PROTECT) {
793 *(test_info[PROTECT].ti_flag) = TRUE;
794 }
795
796 if (options.to_flags & VM_TEST_COPY) {
797 *(test_info[COPY].ti_flag) = TRUE;
798 }
799 }
800 }
801
802 /*****************/
803 /* Various tools */
804 /*****************/
805
806 /* Find the allocator address alignment mask. */
807 mach_vm_address_t
get_mask()808 get_mask()
809 {
810 mach_vm_address_t mask;
811
812 if (get_allocator() == wrapper_mach_vm_map_2MB) {
813 mask = (mach_vm_address_t)0x1FFFFF;
814 } else {
815 mask = vm_page_size - 1;
816 }
817 return mask;
818 }
819
820 /* Find the size of the smallest aligned region containing a given
821 * memory range. */
822 mach_vm_size_t
aligned_size(mach_vm_address_t address,mach_vm_size_t size)823 aligned_size(mach_vm_address_t address, mach_vm_size_t size)
824 {
825 return round_page(address - mach_vm_trunc_page(address) + size);
826 }
827
828 /********************/
829 /* Assert functions */
830 /********************/
831
832 /* Address is aligned on allocator boundary. */
833 static inline void
assert_aligned_address(mach_vm_address_t address)834 assert_aligned_address(mach_vm_address_t address)
835 {
836 T_QUIET; T_ASSERT_EQ((address & get_mask()), 0,
837 "Address 0x%jx is unexpectedly "
838 "unaligned.",
839 (uintmax_t)address);
840 }
841
842 /* Address is truncated to allocator boundary. */
843 static inline void
assert_trunc_address(mach_vm_address_t address,mach_vm_address_t trunc_address)844 assert_trunc_address(mach_vm_address_t address, mach_vm_address_t trunc_address)
845 {
846 T_QUIET; T_ASSERT_EQ(trunc_address, (address & ~get_mask()),
847 "Address "
848 "0x%jx is unexpectedly not truncated to address 0x%jx.",
849 (uintmax_t)address, (uintmax_t)trunc_address);
850 }
851
852 static inline void
assert_address_value(mach_vm_address_t address,mach_vm_address_t marker)853 assert_address_value(mach_vm_address_t address, mach_vm_address_t marker)
854 {
855 /* this assert is used so frequently so that we simply judge on
856 * its own instead of leaving this to LD macro for efficiency
857 */
858 if (MACH_VM_ADDRESS_T(address) != marker) {
859 T_ASSERT_FAIL("Address 0x%jx unexpectedly has value 0x%jx, "
860 "instead of 0x%jx.", (uintmax_t)address,
861 (uintmax_t)MACH_VM_ADDRESS_T(address), (uintmax_t)marker);
862 }
863 }
864
865 void
assert_allocate_return(mach_vm_address_t * address,mach_vm_size_t size,int address_flag,kern_return_t expected_kr)866 assert_allocate_return(mach_vm_address_t * address, mach_vm_size_t size, int address_flag, kern_return_t expected_kr)
867 {
868 assert_mach_return(get_allocator()(mach_task_self(), address, size, address_flag), expected_kr, "Allocator");
869 }
870
871 void
assert_allocate_success(mach_vm_address_t * address,mach_vm_size_t size,int address_flag)872 assert_allocate_success(mach_vm_address_t * address, mach_vm_size_t size, int address_flag)
873 {
874 assert_allocate_return(address, size, address_flag, KERN_SUCCESS);
875 }
876
877 void
assert_deallocate_return(mach_vm_address_t address,mach_vm_size_t size,kern_return_t expected_kr)878 assert_deallocate_return(mach_vm_address_t address, mach_vm_size_t size, kern_return_t expected_kr)
879 {
880 assert_mach_return(mach_vm_deallocate(mach_task_self(), address, size), expected_kr, "mach_vm_deallocate()");
881 }
882
883 void
assert_deallocate_success(mach_vm_address_t address,mach_vm_size_t size)884 assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size)
885 {
886 assert_deallocate_return(address, size, KERN_SUCCESS);
887 }
888
889 void
assert_read_return(mach_vm_address_t address,mach_vm_size_t size,vm_offset_t * data,mach_msg_type_number_t * data_size,kern_return_t expected_kr)890 assert_read_return(mach_vm_address_t address,
891 mach_vm_size_t size,
892 vm_offset_t * data,
893 mach_msg_type_number_t * data_size,
894 kern_return_t expected_kr)
895 {
896 assert_mach_return(mach_vm_read(mach_task_self(), address, size, data, data_size), expected_kr, "mach_vm_read()");
897 }
898
899 void
assert_read_success(mach_vm_address_t address,mach_vm_size_t size,vm_offset_t * data,mach_msg_type_number_t * data_size)900 assert_read_success(mach_vm_address_t address, mach_vm_size_t size, vm_offset_t * data, mach_msg_type_number_t * data_size)
901 {
902 assert_read_return(address, size, data, data_size, KERN_SUCCESS);
903 T_QUIET; T_ASSERT_EQ(*data_size, size,
904 "Returned buffer size 0x%jx "
905 "(%ju) is unexpectedly different from source size 0x%jx "
906 "(%ju).",
907 (uintmax_t)*data_size, (uintmax_t)*data_size, (uintmax_t)size, (uintmax_t)size);
908 }
909
910 void
assert_write_return(mach_vm_address_t address,vm_offset_t data,mach_msg_type_number_t data_size,kern_return_t expected_kr)911 assert_write_return(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size, kern_return_t expected_kr)
912 {
913 assert_mach_return(mach_vm_write(mach_task_self(), address, data, data_size), expected_kr, "mach_vm_write()");
914 }
915
916 void
assert_write_success(mach_vm_address_t address,vm_offset_t data,mach_msg_type_number_t data_size)917 assert_write_success(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size)
918 {
919 assert_write_return(address, data, data_size, KERN_SUCCESS);
920 }
921
922 void
assert_allocate_copy_return(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t * dest,kern_return_t expected_kr)923 assert_allocate_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest, kern_return_t expected_kr)
924 {
925 assert_allocate_success(dest, size, VM_FLAGS_ANYWHERE);
926 assert_mach_return(mach_vm_copy(mach_task_self(), source, size, *dest), expected_kr, "mach_vm_copy()");
927 }
928 void
assert_allocate_copy_success(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t * dest)929 assert_allocate_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest)
930 {
931 assert_allocate_copy_return(source, size, dest, KERN_SUCCESS);
932 }
933
934 void
assert_copy_return(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t dest,kern_return_t expected_kr)935 assert_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest, kern_return_t expected_kr)
936 {
937 assert_mach_return(mach_vm_copy(mach_task_self(), source, size, dest), expected_kr, "mach_vm_copy()");
938 }
939
940 void
assert_copy_success(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t dest)941 assert_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest)
942 {
943 assert_copy_return(source, size, dest, KERN_SUCCESS);
944 }
945
946 /*******************/
947 /* Memory patterns */
948 /*******************/
949
950 typedef boolean_t (*address_filter_t)(mach_vm_address_t);
951 typedef void (*address_action_t)(mach_vm_address_t, mach_vm_address_t);
952
953 /* Map over a memory region pattern and its complement, through a
954 * (possibly reversed) boolean filter and a starting value. */
955 void
filter_addresses_do_else(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,address_action_t if_action,address_action_t else_action,mach_vm_address_t start_value)956 filter_addresses_do_else(address_filter_t filter,
957 boolean_t reversed,
958 mach_vm_address_t address,
959 mach_vm_size_t size,
960 address_action_t if_action,
961 address_action_t else_action,
962 mach_vm_address_t start_value)
963 {
964 mach_vm_address_t i;
965 for (i = 0; i + vm_address_size < size; i += vm_address_size) {
966 if (filter(address + i) != reversed) {
967 if_action(address + i, start_value + i);
968 } else {
969 else_action(address + i, start_value + i);
970 }
971 }
972 }
973
974 /* Various pattern actions. */
975 void
no_action(mach_vm_address_t i,mach_vm_address_t value)976 no_action(mach_vm_address_t i, mach_vm_address_t value)
977 {
978 }
979
980 void
read_zero(mach_vm_address_t i,mach_vm_address_t value)981 read_zero(mach_vm_address_t i, mach_vm_address_t value)
982 {
983 assert_address_value(i, 0);
984 }
985
986 void
verify_address(mach_vm_address_t i,mach_vm_address_t value)987 verify_address(mach_vm_address_t i, mach_vm_address_t value)
988 {
989 assert_address_value(i, value);
990 }
991
992 void
write_address(mach_vm_address_t i,mach_vm_address_t value)993 write_address(mach_vm_address_t i, mach_vm_address_t value)
994 {
995 MACH_VM_ADDRESS_T(i) = value;
996 }
997
998 /* Various patterns. */
999 boolean_t
empty(mach_vm_address_t i)1000 empty(mach_vm_address_t i)
1001 {
1002 return FALSE;
1003 }
1004
1005 boolean_t
checkerboard(mach_vm_address_t i)1006 checkerboard(mach_vm_address_t i)
1007 {
1008 return !((i / vm_address_size) & 0x1);
1009 }
1010
1011 boolean_t
page_ends(mach_vm_address_t i)1012 page_ends(mach_vm_address_t i)
1013 {
1014 mach_vm_address_t residue = i % vm_page_size;
1015
1016 return residue == 0 || residue == vm_page_size - vm_address_size;
1017 }
1018
1019 /*************************************/
1020 /* Global variables set up functions */
1021 /*************************************/
1022
1023 void
set_up_allocator()1024 set_up_allocator()
1025 {
1026 T_QUIET; T_ASSERT_TRUE(allocators_idx >= 0 && allocators_idx < numofallocators, "Invalid allocators[] index: %d.", allocators_idx);
1027 set_allocator(allocators[allocators_idx].allocate);
1028 }
1029
1030 /* Find a fixed allocatable address by retrieving the address
1031 * populated by mach_vm_allocate() with VM_FLAGS_ANYWHERE. */
1032 mach_vm_address_t
get_fixed_address(mach_vm_size_t size)1033 get_fixed_address(mach_vm_size_t size)
1034 {
1035 /* mach_vm_map() starts looking for an address at 0x0. */
1036 mach_vm_address_t address = 0x0;
1037
1038 /*
1039 * The tests seem to have some funky off by one allocations. To avoid problems, we'll bump anything
1040 * non-zero to have at least an extra couple pages.
1041 */
1042 if (size != 0) {
1043 size = round_page(size + 2 * vm_page_size);
1044 }
1045
1046 assert_allocate_success(&address, size, VM_FLAGS_ANYWHERE);
1047
1048 /*
1049 * Keep the memory allocated, otherwise the logv()/printf() activity sprinkled in these tests can
1050 * cause malloc() to use the desired range and tests will randomly fail. The allocate routines will
1051 * do the delayed vm_deallocate() to free the fixed memory just before allocation testing in the wrapper.
1052 */
1053 T_QUIET; T_ASSERT_EQ(fixed_vm_address, 0, "previous fixed address not used");
1054 T_QUIET; T_ASSERT_EQ(fixed_vm_size, 0, "previous fixed size not used");
1055 fixed_vm_address = address;
1056 fixed_vm_size = size;
1057
1058 assert_aligned_address(address);
1059 return address;
1060 }
1061
1062 /* If needed, find an address at which a region of the specified size
1063 * can be allocated. Otherwise, set the address to 0x0. */
1064 void
set_up_vm_address(mach_vm_size_t size)1065 set_up_vm_address(mach_vm_size_t size)
1066 {
1067 T_QUIET; T_ASSERT_TRUE(flags_idx >= 0 && flags_idx < numofflags, "Invalid address_flags[] index: %d.", flags_idx);
1068 T_QUIET; T_ASSERT_TRUE(alignments_idx >= 0 && alignments_idx < numofalignments, "Invalid address_alignments[] index: %d.", alignments_idx);
1069 set_address_flag(address_flags[flags_idx].flag);
1070 set_address_alignment(address_alignments[alignments_idx].alignment);
1071
1072 if (!(get_address_flag() & VM_FLAGS_ANYWHERE)) {
1073 boolean_t aligned = get_address_alignment();
1074 logv(
1075 "Looking for fixed %saligned address for allocation "
1076 "of 0x%jx (%ju) byte%s...",
1077 aligned ? "" : "un", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1078 mach_vm_address_t address = get_fixed_address(size);
1079 if (!aligned) {
1080 address++;
1081 }
1082 set_vm_address(address);
1083 logv("Found %saligned fixed address 0x%jx.", aligned ? "" : "un", (uintmax_t)address);
1084 } else {
1085 /* mach_vm_map() with VM_FLAGS_ANYWHERE starts looking for
1086 * an address at the one supplied and goes up, without
1087 * wrapping around. */
1088 set_vm_address(0x0);
1089 }
1090 }
1091
1092 void
set_up_vm_size()1093 set_up_vm_size()
1094 {
1095 T_QUIET; T_ASSERT_TRUE(sizes_idx >= 0 && sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", sizes_idx);
1096 set_vm_size(vm_sizes[sizes_idx].size);
1097 }
1098
1099 void
set_up_buffer_size()1100 set_up_buffer_size()
1101 {
1102 T_QUIET; T_ASSERT_TRUE(buffer_sizes_idx >= 0 && buffer_sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", buffer_sizes_idx);
1103 set_buffer_size(vm_sizes[buffer_sizes_idx].size);
1104 }
1105
1106 void
set_up_buffer_offset()1107 set_up_buffer_offset()
1108 {
1109 T_QUIET; T_ASSERT_TRUE(offsets_idx >= 0 && offsets_idx < numofoffsets, "Invalid buffer_offsets[] index: %d.", offsets_idx);
1110 set_buffer_offset(buffer_offsets[offsets_idx].offset);
1111 }
1112
1113 void
set_up_vmcopy_action()1114 set_up_vmcopy_action()
1115 {
1116 T_QUIET; T_ASSERT_TRUE(vmcopy_action_idx >= 0 && vmcopy_action_idx < numofvmcopyactions, "Invalid vmcopy_actions[] index: %d.",
1117 vmcopy_action_idx);
1118 set_vmcopy_post_action(vmcopy_actions[vmcopy_action_idx].action);
1119 }
1120
1121 void
set_up_allocator_and_vm_size()1122 set_up_allocator_and_vm_size()
1123 {
1124 set_up_allocator();
1125 set_up_vm_size();
1126 }
1127
1128 void
set_up_vm_variables()1129 set_up_vm_variables()
1130 {
1131 set_up_vm_size();
1132 set_up_vm_address(get_vm_size());
1133 }
1134
1135 void
set_up_allocator_and_vm_variables()1136 set_up_allocator_and_vm_variables()
1137 {
1138 set_up_allocator();
1139 set_up_vm_variables();
1140 }
1141
1142 void
set_up_buffer_variables()1143 set_up_buffer_variables()
1144 {
1145 set_up_buffer_size();
1146 set_up_buffer_offset();
1147 }
1148
1149 void
set_up_copy_shared_mode_variables()1150 set_up_copy_shared_mode_variables()
1151 {
1152 set_up_vmcopy_action();
1153 }
1154
1155 /*******************************/
1156 /* Allocation set up functions */
1157 /*******************************/
1158
1159 /* Allocate VM region of given size. */
1160 void
allocate(mach_vm_size_t size)1161 allocate(mach_vm_size_t size)
1162 {
1163 mach_vm_address_t address = get_vm_address();
1164 int flag = get_address_flag();
1165
1166 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1167 if (!(flag & VM_FLAGS_ANYWHERE)) {
1168 logv(" at address 0x%jx", (uintmax_t)address);
1169 }
1170 logv("...");
1171 assert_allocate_success(&address, size, flag);
1172 logv(
1173 "Memory of rounded size 0x%jx (%ju) allocated at "
1174 "address 0x%jx.",
1175 (uintmax_t)round_page(size), (uintmax_t)round_page(size), (uintmax_t)address);
1176 /* Fixed allocation address is truncated to the allocator
1177 * boundary. */
1178 if (!(flag & VM_FLAGS_ANYWHERE)) {
1179 mach_vm_address_t old_address = get_vm_address();
1180 assert_trunc_address(old_address, address);
1181 logv(
1182 "Address 0x%jx is correctly truncated to allocated "
1183 "address 0x%jx.",
1184 (uintmax_t)old_address, (uintmax_t)address);
1185 }
1186 set_vm_address(address);
1187 }
1188
1189 void
allocate_buffer(mach_vm_size_t buffer_size)1190 allocate_buffer(mach_vm_size_t buffer_size)
1191 {
1192 mach_vm_address_t data = 0x0;
1193
1194 logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)buffer_size, (uintmax_t)buffer_size, (buffer_size == 1) ? "" : "s");
1195 assert_allocate_success(&data, buffer_size, VM_FLAGS_ANYWHERE);
1196 logv(
1197 "Memory of rounded size 0x%jx (%ju) allocated at "
1198 "address 0x%jx.",
1199 (uintmax_t)round_page(buffer_size), (uintmax_t)round_page(buffer_size), (uintmax_t)data);
1200 data += get_buffer_offset();
1201 T_QUIET; T_ASSERT_EQ((vm_offset_t)data, data,
1202 "Address 0x%jx "
1203 "unexpectedly overflows to 0x%jx when cast as "
1204 "vm_offset_t type.",
1205 (uintmax_t)data, (uintmax_t)(vm_offset_t)data);
1206 set_buffer_address(data);
1207 }
1208
1209 /****************************************************/
1210 /* Global variables and allocation set up functions */
1211 /****************************************************/
1212
1213 void
set_up_vm_variables_and_allocate()1214 set_up_vm_variables_and_allocate()
1215 {
1216 set_up_vm_variables();
1217 allocate(get_vm_size());
1218 }
1219
1220 void
set_up_allocator_and_vm_variables_and_allocate()1221 set_up_allocator_and_vm_variables_and_allocate()
1222 {
1223 set_up_allocator();
1224 set_up_vm_variables_and_allocate();
1225 }
1226
1227 void
set_up_vm_variables_and_allocate_extra_page()1228 set_up_vm_variables_and_allocate_extra_page()
1229 {
1230 set_up_vm_size();
1231 /* Increment the size to insure we get an extra allocated page
1232 * for unaligned start addresses. */
1233 mach_vm_size_t allocation_size = get_vm_size() + 1;
1234 set_up_vm_address(allocation_size);
1235
1236 allocate(allocation_size);
1237 /* In the fixed unaligned address case, restore the returned
1238 * (truncated) allocation address to its unaligned value. */
1239 if (!get_address_alignment()) {
1240 set_vm_address(get_vm_address() + 1);
1241 }
1242 }
1243
1244 void
set_up_buffer_variables_and_allocate_extra_page()1245 set_up_buffer_variables_and_allocate_extra_page()
1246 {
1247 set_up_buffer_variables();
1248 /* Increment the size to insure we get an extra allocated page
1249 * for unaligned start addresses. */
1250 allocate_buffer(get_buffer_size() + get_buffer_offset());
1251 }
1252
1253 /* Allocate some destination and buffer memory for subsequent
1254 * writing, including extra pages for non-aligned start addresses. */
1255 void
set_up_vm_and_buffer_variables_allocate_for_writing()1256 set_up_vm_and_buffer_variables_allocate_for_writing()
1257 {
1258 set_up_vm_variables_and_allocate_extra_page();
1259 set_up_buffer_variables_and_allocate_extra_page();
1260 }
1261
1262 /* Allocate some destination and source regions for subsequent
1263 * copying, including extra pages for non-aligned start addresses. */
1264 void
set_up_vm_and_buffer_variables_allocate_for_copying()1265 set_up_vm_and_buffer_variables_allocate_for_copying()
1266 {
1267 set_up_vm_and_buffer_variables_allocate_for_writing();
1268 }
1269
1270 /************************************/
1271 /* Deallocation tear down functions */
1272 /************************************/
1273
1274 void
deallocate_range(mach_vm_address_t address,mach_vm_size_t size)1275 deallocate_range(mach_vm_address_t address, mach_vm_size_t size)
1276 {
1277 logv("Deallocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1278 (uintmax_t)address);
1279 assert_deallocate_success(address, size);
1280 }
1281
1282 void
deallocate()1283 deallocate()
1284 {
1285 deallocate_range(get_vm_address(), get_vm_size());
1286 }
1287
1288 /* Deallocate source memory, including the extra page for unaligned
1289 * start addresses. */
1290 void
deallocate_extra_page()1291 deallocate_extra_page()
1292 {
1293 /* Set the address and size to their original allocation
1294 * values. */
1295 deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1);
1296 }
1297
1298 /* Deallocate buffer and destination memory for mach_vm_write(),
1299 * including the extra page for unaligned start addresses. */
1300 void
deallocate_vm_and_buffer()1301 deallocate_vm_and_buffer()
1302 {
1303 deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1);
1304 deallocate_range(mach_vm_trunc_page(get_buffer_address()), get_buffer_size() + get_buffer_offset());
1305 }
1306
1307 /***********************************/
1308 /* mach_vm_read() set up functions */
1309 /***********************************/
1310
1311 /* Read the source memory into a buffer, deallocate the source, set
1312 * the global address and size from the buffer's. */
1313 void
read_deallocate()1314 read_deallocate()
1315 {
1316 mach_vm_size_t size = get_vm_size();
1317 mach_vm_address_t address = get_vm_address();
1318 vm_offset_t read_address;
1319 mach_msg_type_number_t read_size;
1320
1321 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1322 (uintmax_t)address);
1323 assert_read_success(address, size, &read_address, &read_size);
1324 logv(
1325 "Memory of size 0x%jx (%ju) read into buffer of "
1326 "address 0x%jx.",
1327 (uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)read_address);
1328 /* Deallocate the originally allocated memory, including the
1329 * extra allocated page in
1330 * set_up_vm_variables_and_allocate_extra_page(). */
1331 deallocate_range(mach_vm_trunc_page(address), size + 1);
1332
1333 /* Promoting to mach_vm types after checking for overflow, and
1334 * setting the global address from the buffer's. */
1335 T_QUIET; T_ASSERT_EQ((mach_vm_address_t)read_address, read_address,
1336 "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1337 "as mach_vm_address_t type.",
1338 (uintmax_t)read_address, (uintmax_t)(mach_vm_address_t)read_address);
1339 T_QUIET; T_ASSERT_EQ((mach_vm_size_t)read_size, read_size,
1340 "Size 0x%jx (%ju) unexpectedly overflows to 0x%jx (%ju) "
1341 "when cast as mach_vm_size_t type.",
1342 (uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)(mach_vm_size_t)read_size, (uintmax_t)(mach_vm_size_t)read_size);
1343 set_vm_address((mach_vm_address_t)read_address);
1344 set_vm_size((mach_vm_size_t)read_size);
1345 }
1346
1347 /* Allocate some source memory, read it into a buffer, deallocate the
1348 * source, set the global address and size from the buffer's. */
1349 void
set_up_vm_variables_allocate_read_deallocate()1350 set_up_vm_variables_allocate_read_deallocate()
1351 {
1352 set_up_vm_variables_and_allocate_extra_page();
1353 read_deallocate();
1354 }
1355
1356 /************************************/
1357 /* mach_vm_write() set up functions */
1358 /************************************/
1359
1360 /* Write the buffer into the destination memory. */
1361 void
write_buffer()1362 write_buffer()
1363 {
1364 mach_vm_address_t address = get_vm_address();
1365 vm_offset_t data = (vm_offset_t)get_buffer_address();
1366 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
1367
1368 logv(
1369 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
1370 "memory at address 0x%jx...",
1371 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
1372 assert_write_success(address, data, buffer_size);
1373 logv("Buffer written.");
1374 }
1375
1376 /* Allocate some destination and buffer memory, and write the buffer
1377 * into the destination memory. */
1378 void
set_up_vm_and_buffer_variables_allocate_write()1379 set_up_vm_and_buffer_variables_allocate_write()
1380 {
1381 set_up_vm_and_buffer_variables_allocate_for_writing();
1382 write_buffer();
1383 }
1384
1385 /***********************************/
1386 /* mach_vm_copy() set up functions */
1387 /***********************************/
1388
1389 void
copy_deallocate(void)1390 copy_deallocate(void)
1391 {
1392 mach_vm_size_t size = get_vm_size();
1393 mach_vm_address_t source = get_vm_address();
1394 mach_vm_address_t dest = 0;
1395
1396 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1397 (uintmax_t)source);
1398 assert_allocate_copy_success(source, size, &dest);
1399 logv(
1400 "Memory of size 0x%jx (%ju) copy into region of "
1401 "address 0x%jx.",
1402 (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
1403 /* Deallocate the originally allocated memory, including the
1404 * extra allocated page in
1405 * set_up_vm_variables_and_allocate_extra_page(). */
1406 deallocate_range(mach_vm_trunc_page(source), size + 1);
1407 /* Promoting to mach_vm types after checking for overflow, and
1408 * setting the global address from the buffer's. */
1409 T_QUIET; T_ASSERT_EQ((vm_offset_t)dest, dest,
1410 "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1411 "as mach_vm_address_t type.",
1412 (uintmax_t)dest, (uintmax_t)(vm_offset_t)dest);
1413 set_vm_address(dest);
1414 set_vm_size(size);
1415 }
1416
1417 /* Copy the source region into the destination region. */
1418 void
copy_region()1419 copy_region()
1420 {
1421 mach_vm_address_t source = get_vm_address();
1422 mach_vm_address_t dest = get_buffer_address();
1423 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
1424
1425 logv(
1426 "Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
1427 "memory at address 0x%jx...",
1428 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
1429 assert_copy_success(source, size, dest);
1430 logv("Buffer written.");
1431 }
1432
1433 /* Allocate some source memory, copy it to another region, deallocate the
1434 * source, set the global address and size from the designation region. */
1435 void
set_up_vm_variables_allocate_copy_deallocate()1436 set_up_vm_variables_allocate_copy_deallocate()
1437 {
1438 set_up_vm_variables_and_allocate_extra_page();
1439 copy_deallocate();
1440 }
1441
1442 /* Allocate some destination and source memory, and copy the source
1443 * into the destination memory. */
1444 void
set_up_source_and_dest_variables_allocate_copy()1445 set_up_source_and_dest_variables_allocate_copy()
1446 {
1447 set_up_vm_and_buffer_variables_allocate_for_copying();
1448 copy_region();
1449 }
1450
1451 /**************************************/
1452 /* mach_vm_protect() set up functions */
1453 /**************************************/
1454
1455 void
set_up_vm_variables_allocate_protect(vm_prot_t protection,const char * protection_name)1456 set_up_vm_variables_allocate_protect(vm_prot_t protection, const char * protection_name)
1457 {
1458 set_up_vm_variables_and_allocate_extra_page();
1459 mach_vm_size_t size = get_vm_size();
1460 mach_vm_address_t address = get_vm_address();
1461
1462 logv(
1463 "Setting %s-protection on 0x%jx (%ju) byte%s at address "
1464 "0x%jx...",
1465 protection_name, (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
1466 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address, size, FALSE, protection), "mach_vm_protect()");
1467 logv("Region %s-protected.", protection_name);
1468 }
1469
1470 void
set_up_vm_variables_allocate_readprotect()1471 set_up_vm_variables_allocate_readprotect()
1472 {
1473 set_up_vm_variables_allocate_protect(VM_PROT_WRITE, "read");
1474 }
1475
1476 void
set_up_vm_variables_allocate_writeprotect()1477 set_up_vm_variables_allocate_writeprotect()
1478 {
1479 set_up_vm_variables_allocate_protect(VM_PROT_READ, "write");
1480 }
1481
1482 /*****************/
1483 /* Address tests */
1484 /*****************/
1485
1486 /* Allocated address is nonzero iff size is nonzero. */
1487 void
test_nonzero_address_iff_nonzero_size()1488 test_nonzero_address_iff_nonzero_size()
1489 {
1490 mach_vm_address_t address = get_vm_address();
1491 mach_vm_size_t size = get_vm_size();
1492
1493 T_QUIET; T_ASSERT_TRUE((address && size) || (!address && !size), "Address 0x%jx is unexpectedly %szero.", (uintmax_t)address,
1494 address ? "non" : "");
1495 logv("Address 0x%jx is %szero as expected.", (uintmax_t)address, size ? "non" : "");
1496 }
1497
1498 /* Allocated address is aligned. */
1499 void
test_aligned_address()1500 test_aligned_address()
1501 {
1502 mach_vm_address_t address = get_vm_address();
1503
1504 assert_aligned_address(address);
1505 logv("Address 0x%jx is aligned.", (uintmax_t)address);
1506 }
1507
1508 /************************/
1509 /* Read and write tests */
1510 /************************/
1511
1512 void
verify_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1513 verify_pattern(
1514 address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1515 {
1516 logv(
1517 "Verifying %s pattern on region of address 0x%jx "
1518 "and size 0x%jx (%ju)...",
1519 pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1520 filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address);
1521 logv("Pattern verified.");
1522 }
1523
1524 void
write_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1525 write_pattern(
1526 address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1527 {
1528 logv(
1529 "Writing %s pattern on region of address 0x%jx "
1530 "and size 0x%jx (%ju)...",
1531 pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1532 filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address);
1533 logv("Pattern writen.");
1534 }
1535
1536 void
write_and_verify_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1537 write_and_verify_pattern(
1538 address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1539 {
1540 logv(
1541 "Writing and verifying %s pattern on region of "
1542 "address 0x%jx and size 0x%jx (%ju)...",
1543 pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1544 filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address);
1545 filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address);
1546 logv("Pattern written and verified.");
1547 }
1548
1549 /* Verify that the smallest aligned region containing the
1550 * given range is zero-filled. */
1551 void
test_zero_filled()1552 test_zero_filled()
1553 {
1554 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), aligned_size(get_vm_address(), get_vm_size()),
1555 "zero-filled");
1556 }
1557
1558 void
test_write_address_filled()1559 test_write_address_filled()
1560 {
1561 write_and_verify_pattern(empty, TRUE, get_vm_address(), round_page(get_vm_size()), "address-filled");
1562 }
1563
1564 void
test_write_checkerboard()1565 test_write_checkerboard()
1566 {
1567 write_and_verify_pattern(checkerboard, FALSE, get_vm_address(), round_page(get_vm_size()), "checkerboard");
1568 }
1569
1570 void
test_write_reverse_checkerboard()1571 test_write_reverse_checkerboard()
1572 {
1573 write_and_verify_pattern(checkerboard, TRUE, get_vm_address(), round_page(get_vm_size()), "reverse checkerboard");
1574 }
1575
1576 void
test_write_page_ends()1577 test_write_page_ends()
1578 {
1579 write_and_verify_pattern(page_ends, FALSE, get_vm_address(), round_page(get_vm_size()), "page ends");
1580 }
1581
1582 void
test_write_page_interiors()1583 test_write_page_interiors()
1584 {
1585 write_and_verify_pattern(page_ends, TRUE, get_vm_address(), round_page(get_vm_size()), "page interiors");
1586 }
1587
1588 /*********************************/
1589 /* Allocation error return tests */
1590 /*********************************/
1591
1592 /* Reallocating a page in the smallest aligned region containing the
1593 * given allocated range fails. */
1594 void
test_reallocate_pages()1595 test_reallocate_pages()
1596 {
1597 allocate_fn_t allocator = get_allocator();
1598 vm_map_t this_task = mach_task_self();
1599 mach_vm_address_t address = mach_vm_trunc_page(get_vm_address());
1600 mach_vm_size_t size = aligned_size(get_vm_address(), get_vm_size());
1601 mach_vm_address_t i;
1602 kern_return_t kr;
1603
1604 logv(
1605 "Reallocating pages in allocated region of address 0x%jx "
1606 "and size 0x%jx (%ju)...",
1607 (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1608 for (i = address; i < address + size; i += vm_page_size) {
1609 kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED);
1610 T_QUIET; T_ASSERT_EQ(kr, KERN_NO_SPACE,
1611 "Allocator "
1612 "at address 0x%jx unexpectedly returned: %s.\n"
1613 "Should have returned: %s.",
1614 (uintmax_t)address, mach_error_string(kr), mach_error_string(KERN_NO_SPACE));
1615 }
1616 logv("Returned expected error at each page: %s.", mach_error_string(KERN_NO_SPACE));
1617 }
1618
1619 /* Allocating in VM_MAP_NULL fails. */
1620 void
test_allocate_in_null_map()1621 test_allocate_in_null_map()
1622 {
1623 mach_vm_address_t address = get_vm_address();
1624 mach_vm_size_t size = get_vm_size();
1625 int flag = get_address_flag();
1626
1627 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1628 if (!(flag & VM_FLAGS_ANYWHERE)) {
1629 logv(" at address 0x%jx", (uintmax_t)address);
1630 }
1631 logv(" in NULL VM map...");
1632 assert_mach_return(get_allocator()(VM_MAP_NULL, &address, size, flag), MACH_SEND_INVALID_DEST, "Allocator");
1633 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
1634 }
1635
1636 /* Allocating with non-user flags fails. */
1637 void
test_allocate_with_kernel_flags()1638 test_allocate_with_kernel_flags()
1639 {
1640 allocate_fn_t allocator = get_allocator();
1641 vm_map_t this_task = mach_task_self();
1642 mach_vm_address_t address = get_vm_address();
1643 mach_vm_size_t size = get_vm_size();
1644 int flag = get_address_flag();
1645 int bad_flag, i;
1646 kern_return_t kr;
1647 int valid_flags = VM_FLAGS_USER_ALLOCATE | VM_FLAGS_USER_MAP | VM_FLAGS_USER_REMAP | VM_FLAGS_ALIAS_MASK;
1648
1649 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1650 if (!(flag & VM_FLAGS_ANYWHERE)) {
1651 logv(" at address 0x%jx", (uintmax_t)address);
1652 }
1653 logv(" with various invalid flags...");
1654 for (i = 0; i < sizeof(int) * 8; i++) {
1655 int test_flag = 1 << i;
1656
1657 /* Skip user valid flags */
1658 if (valid_flags & test_flag) {
1659 continue;
1660 }
1661
1662 bad_flag = test_flag | flag;
1663 kr = allocator(this_task, &address, size, bad_flag);
1664 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1665 "Allocator "
1666 "with invalid flag 0x%x unexpectedly returned: %s.\n"
1667 "Should have returned: %s.",
1668 bad_flag, mach_error_string(kr), mach_error_string(KERN_INVALID_ARGUMENT));
1669 }
1670 logv("Returned expected error with each invalid flag: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1671 }
1672
1673 /*****************************/
1674 /* mach_vm_map() error tests */
1675 /*****************************/
1676
1677 /* mach_vm_map() fails with invalid protection or inheritance
1678 * arguments. */
1679 void
test_mach_vm_map_protection_inheritance_error()1680 test_mach_vm_map_protection_inheritance_error()
1681 {
1682 kern_return_t kr;
1683 vm_map_t my_task = mach_task_self();
1684 mach_vm_address_t address = get_vm_address();
1685 mach_vm_size_t size = get_vm_size();
1686 vm_map_offset_t mask = (get_allocator() == wrapper_mach_vm_map || get_allocator() == wrapper_mach_vm_map_named_entry)
1687 ? (mach_vm_offset_t)0
1688 : (mach_vm_offset_t)get_mask();
1689 int flag = get_address_flag();
1690 mach_port_t object_handle = (get_allocator() == wrapper_mach_vm_map_named_entry) ? memory_entry(&size) : MACH_PORT_NULL;
1691 vm_prot_t cur_protections[] = {VM_PROT_DEFAULT, VM_PROT_ALL + 1, ~VM_PROT_IS_MASK, INT_MAX};
1692 vm_prot_t max_protections[] = {VM_PROT_ALL, VM_PROT_ALL + 1, ~VM_PROT_IS_MASK, INT_MAX};
1693 vm_inherit_t inheritances[] = {VM_INHERIT_DEFAULT, VM_INHERIT_LAST_VALID + 1, UINT_MAX};
1694 int i, j, k;
1695
1696 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1697 if (!(flag & VM_FLAGS_ANYWHERE)) {
1698 logv(" at address 0x%jx", (uintmax_t)address);
1699 }
1700 logv(
1701 " with various invalid protection/inheritance "
1702 "arguments...");
1703
1704 for (i = 0; i < 4; i++) {
1705 for (j = 0; j < 4; j++) {
1706 for (k = 0; k < 3; k++) {
1707 /* Skip the case with all valid arguments. */
1708 if (i == (j == (k == 0))) {
1709 continue;
1710 }
1711 kr = mach_vm_map(my_task, &address, size, mask, flag, object_handle, (memory_object_offset_t)0, FALSE,
1712 cur_protections[i], max_protections[j], inheritances[k]);
1713 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1714 "mach_vm_map() "
1715 "with cur_protection 0x%x, max_protection 0x%x, "
1716 "inheritance 0x%x unexpectedly returned: %s.\n"
1717 "Should have returned: %s.",
1718 cur_protections[i], max_protections[j], inheritances[k], mach_error_string(kr),
1719 mach_error_string(KERN_INVALID_ARGUMENT));
1720 }
1721 }
1722 }
1723 logv("Returned expected error in each case: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1724 }
1725
1726 /* mach_vm_map() with unspecified address fails if the starting
1727 * address overflows when rounded up to a boundary value. */
1728 void
test_mach_vm_map_large_mask_overflow_error()1729 test_mach_vm_map_large_mask_overflow_error()
1730 {
1731 mach_vm_address_t address = 0x1;
1732 mach_vm_size_t size = get_vm_size();
1733 mach_vm_offset_t mask = (mach_vm_offset_t)UINTMAX_MAX;
1734 /* mach_vm_map() cannot allocate 0 bytes at an unspecified
1735 * address, see 8003930. */
1736 kern_return_t kr_expected = size ? KERN_NO_SPACE : KERN_INVALID_ARGUMENT;
1737
1738 logv(
1739 "Allocating 0x%jx (%ju) byte%s at an unspecified address "
1740 "starting at 0x%jx with mask 0x%jx...",
1741 (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address, (uintmax_t)mask);
1742 assert_mach_return(mach_vm_map(mach_task_self(), &address, size, mask, VM_FLAGS_ANYWHERE, MACH_PORT_NULL,
1743 (memory_object_offset_t)0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT),
1744 kr_expected, "mach_vm_map()");
1745 logv("Returned expected error: %s.", mach_error_string(kr_expected));
1746 }
1747
1748 /************************/
1749 /* Size edge case tests */
1750 /************************/
1751
1752 void
allocate_edge_size(mach_vm_address_t * address,mach_vm_size_t size,kern_return_t expected_kr)1753 allocate_edge_size(mach_vm_address_t * address, mach_vm_size_t size, kern_return_t expected_kr)
1754 {
1755 logv("Allocating 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size);
1756 assert_allocate_return(address, size, VM_FLAGS_ANYWHERE, expected_kr);
1757 logv("Returned expected value: %s.", mach_error_string(expected_kr));
1758 }
1759
1760 void
test_allocate_zero_size()1761 test_allocate_zero_size()
1762 {
1763 mach_vm_address_t address = 0x0;
1764 /* mach_vm_map() cannot allocate 0 bytes at an unspecified
1765 * address, see 8003930. Other allocators succeed. */
1766 kern_return_t kr_expected = (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS;
1767
1768 allocate_edge_size(&address, 0, kr_expected);
1769 if (kr_expected == KERN_SUCCESS) {
1770 deallocate_range(address, 0);
1771 }
1772 }
1773
1774 /* Testing the allocation of the largest size that does not overflow
1775 * when rounded up to a page-aligned value. */
1776 void
test_allocate_invalid_large_size()1777 test_allocate_invalid_large_size()
1778 {
1779 mach_vm_size_t size = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1780 if (get_allocator() != wrapper_mach_vm_map_named_entry) {
1781 mach_vm_address_t address = 0x0;
1782 allocate_edge_size(&address, size, KERN_NO_SPACE);
1783 } else {
1784 /* Named entries cannot currently be bigger than 4 GB
1785 * - 4 kb. */
1786 mach_port_t object_handle = MACH_PORT_NULL;
1787 logv("Creating named entry of 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size);
1788 assert_mach_return(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)0,
1789 (MAP_MEM_NAMED_CREATE | VM_PROT_ALL), &object_handle, 0),
1790 KERN_FAILURE, "mach_make_memory_entry_64()");
1791 logv("Returned expected error: %s.", mach_error_string(KERN_FAILURE));
1792 }
1793 }
1794
1795 /* A UINTMAX_MAX VM size will overflow to 0 when rounded up to a
1796 * page-aligned value. */
1797 void
test_allocate_overflowing_size()1798 test_allocate_overflowing_size()
1799 {
1800 mach_vm_address_t address = 0x0;
1801
1802 allocate_edge_size(&address, (mach_vm_size_t)UINTMAX_MAX, KERN_INVALID_ARGUMENT);
1803 }
1804
1805 /****************************/
1806 /* Address allocation tests */
1807 /****************************/
1808
1809 /* Allocation at address zero fails iff size is nonzero. */
1810 void
test_allocate_at_zero()1811 test_allocate_at_zero()
1812 {
1813 mach_vm_address_t address = 0x0;
1814 mach_vm_size_t size = get_vm_size();
1815
1816 kern_return_t kr_expected =
1817 size ? KERN_INVALID_ADDRESS : (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS;
1818
1819 logv("Allocating 0x%jx (%ju) byte%s at address 0x0...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1820 assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected);
1821 logv("Returned expected value: %s.", mach_error_string(kr_expected));
1822 if (kr_expected == KERN_SUCCESS) {
1823 T_QUIET; T_ASSERT_EQ(address, 0,
1824 "Address 0x%jx is unexpectedly "
1825 "nonzero.\n",
1826 (uintmax_t)address);
1827 logv("Allocated address 0x%jx is zero.", (uintmax_t)address);
1828 deallocate_range(address, size);
1829 }
1830 }
1831
1832 /* Allocation at page-aligned but 2 MB boundary-unaligned address
1833 * fails with KERN_NO_SPACE. */
1834 void
test_allocate_2MB_boundary_unaligned_page_aligned_address()1835 test_allocate_2MB_boundary_unaligned_page_aligned_address()
1836 {
1837 mach_vm_size_t size = get_vm_size();
1838
1839 mach_vm_address_t address = get_fixed_address(size + vm_page_size) + vm_page_size;
1840 logv(
1841 "Found 2 MB boundary-unaligned, page aligned address "
1842 "0x%jx.",
1843 (uintmax_t)address);
1844
1845 /* mach_vm_allocate() cannot allocate 0 bytes, and fails with a
1846 * fixed boundary-unaligned truncated address. */
1847 kern_return_t kr_expected = (!size && get_allocator() != wrapper_mach_vm_allocate)
1848 ? KERN_INVALID_ARGUMENT
1849 : (get_allocator() == wrapper_mach_vm_map_2MB) ? KERN_NO_SPACE : KERN_SUCCESS;
1850 logv("Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1851 (uintmax_t)address);
1852 assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected);
1853 logv("Returned expected value: %s.", mach_error_string(kr_expected));
1854 if (kr_expected == KERN_SUCCESS) {
1855 deallocate_range(address, size);
1856 }
1857 }
1858
1859 /* With VM_FLAGS_ANYWHERE set, mach_vm_allocate() starts looking for
1860 * an allocation address at 0x0, while mach_vm_map() starts at the
1861 * supplied address and does not wrap around. See 8016663. */
1862 void
test_allocate_page_with_highest_address_hint()1863 test_allocate_page_with_highest_address_hint()
1864 {
1865 /* Highest valid page-aligned address. */
1866 mach_vm_address_t address = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1867
1868 logv(
1869 "Allocating one page with unspecified address, but hint at "
1870 "0x%jx...",
1871 (uintmax_t)address);
1872 if (get_allocator() == wrapper_mach_vm_allocate) {
1873 /* mach_vm_allocate() starts from 0x0 and succeeds. */
1874 assert_allocate_success(&address, vm_page_size, VM_FLAGS_ANYWHERE);
1875 logv("Memory allocated at address 0x%jx.", (uintmax_t)address);
1876 assert_aligned_address(address);
1877 deallocate_range(address, vm_page_size);
1878 } else {
1879 /* mach_vm_map() starts from the supplied address, and fails
1880 * with KERN_NO_SPACE, see 8016663. */
1881 assert_allocate_return(&address, vm_page_size, VM_FLAGS_ANYWHERE, KERN_NO_SPACE);
1882 logv("Returned expected error: %s.", mach_error_string(KERN_NO_SPACE));
1883 }
1884 }
1885
1886 /* Allocators find an allocation address with a first fit strategy. */
1887 void
test_allocate_first_fit_pages()1888 test_allocate_first_fit_pages()
1889 {
1890 allocate_fn_t allocator = get_allocator();
1891 mach_vm_address_t address1 = 0x0;
1892 mach_vm_address_t i;
1893 kern_return_t kr;
1894 vm_map_t this_task = mach_task_self();
1895
1896 logv(
1897 "Looking for first fit address for allocating one "
1898 "page...");
1899 assert_allocate_success(&address1, vm_page_size, VM_FLAGS_ANYWHERE);
1900 logv("Found address 0x%jx.", (uintmax_t)address1);
1901 assert_aligned_address(address1);
1902 mach_vm_address_t address2 = address1;
1903 logv(
1904 "Looking for next higher first fit address for allocating "
1905 "one page...");
1906 assert_allocate_success(&address2, vm_page_size, VM_FLAGS_ANYWHERE);
1907 logv("Found address 0x%jx.", (uintmax_t)address2);
1908 assert_aligned_address(address2);
1909 T_QUIET; T_ASSERT_GT(address2, address1,
1910 "Second address 0x%jx is "
1911 "unexpectedly not higher than first address 0x%jx.",
1912 (uintmax_t)address2, (uintmax_t)address1);
1913
1914 logv("Allocating pages between 0x%jx and 0x%jx...", (uintmax_t)address1, (uintmax_t)address2);
1915 for (i = address1; i <= address2; i += vm_page_size) {
1916 kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED);
1917 T_QUIET; T_ASSERT_NE(kr, KERN_SUCCESS,
1918 "Allocator at address 0x%jx "
1919 "unexpectedly succeeded.",
1920 (uintmax_t)i);
1921 }
1922 logv("Expectedly returned error at each page.");
1923 deallocate_range(address1, vm_page_size);
1924 deallocate_range(address2, vm_page_size);
1925 }
1926
1927 /*******************************/
1928 /* Deallocation segfault tests */
1929 /*******************************/
1930
1931 /* mach_vm_deallocate() deallocates the smallest aligned region
1932 * (integral number of pages) containing the given range. */
1933
1934 /* Addresses in deallocated range are inaccessible. */
1935 void
access_deallocated_range_address(mach_vm_address_t address,const char * position)1936 access_deallocated_range_address(mach_vm_address_t address, const char * position)
1937 {
1938 logv("Will deallocate and read from %s 0x%jx of deallocated range...", position, (uintmax_t)address);
1939 deallocate();
1940 mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address);
1941 T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx.\n"
1942 "Should have died with signal SIGSEGV.",
1943 (uintmax_t)bad_value, (uintmax_t)address);
1944 }
1945
1946 /* Start of deallocated range is inaccessible. */
1947 void
test_access_deallocated_range_start()1948 test_access_deallocated_range_start()
1949 {
1950 access_deallocated_range_address(get_vm_address(), "start");
1951 }
1952
1953 /* Middle of deallocated range is inaccessible. */
1954 void
test_access_deallocated_range_middle()1955 test_access_deallocated_range_middle()
1956 {
1957 access_deallocated_range_address(get_vm_address() + (round_page(get_vm_size()) >> 1), "middle");
1958 }
1959
1960 /* End of deallocated range is inaccessible. */
1961 void
test_access_deallocated_range_end()1962 test_access_deallocated_range_end()
1963 {
1964 access_deallocated_range_address(round_page(get_vm_size()) - vm_address_size + get_vm_address(), "end");
1965 }
1966
1967 /* Deallocating almost the whole address space causes a SIGSEGV or SIGBUS. We
1968 * deallocate the largest valid aligned size to avoid overflowing when
1969 * rounding up. */
1970 void
test_deallocate_suicide()1971 test_deallocate_suicide()
1972 {
1973 mach_vm_address_t address = 0x0;
1974 mach_vm_size_t size = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1975
1976 logv("Deallocating 0x%jx (%ju) bytes at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (uintmax_t)address);
1977 kern_return_t kr = mach_vm_deallocate(mach_task_self(), address, size);
1978 T_ASSERT_FAIL("mach_vm_deallocate() with address 0x%jx and "
1979 "size 0x%jx (%ju) unexpectedly returned: %s.\n"
1980 "Should have died with signal SIGSEGV or SIGBUS.",
1981 (uintmax_t)address, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr));
1982 }
1983
1984 /***************************************/
1985 /* Deallocation and reallocation tests */
1986 /***************************************/
1987
1988 /* Deallocating memory twice succeeds. */
1989 void
test_deallocate_twice()1990 test_deallocate_twice()
1991 {
1992 deallocate();
1993 deallocate();
1994 }
1995
1996 /* Deallocated and reallocated memory is zero-filled. Deallocated
1997 * memory is inaccessible since it can be reallocated. */
1998 void
test_write_pattern_deallocate_reallocate_zero_filled()1999 test_write_pattern_deallocate_reallocate_zero_filled()
2000 {
2001 mach_vm_address_t address = get_vm_address();
2002 mach_vm_size_t size = get_vm_size();
2003
2004 write_pattern(page_ends, FALSE, address, size, "page ends");
2005 logv("Deallocating, then Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2006 (uintmax_t)address);
2007 deallocate();
2008 assert_allocate_success(&address, size, VM_FLAGS_FIXED);
2009 logv("Memory allocated.");
2010 verify_pattern(empty, FALSE, address, size, "zero-filled");
2011 deallocate();
2012 }
2013
2014 /********************************/
2015 /* Deallocation edge case tests */
2016 /********************************/
2017
2018 /* Zero size deallocation always succeeds. */
2019 void
test_deallocate_zero_size_ranges()2020 test_deallocate_zero_size_ranges()
2021 {
2022 int i;
2023 kern_return_t kr;
2024 vm_map_t this_task = mach_task_self();
2025 mach_vm_address_t addresses[] = {0x0,
2026 0x1,
2027 vm_page_size - 1,
2028 vm_page_size,
2029 vm_page_size + 1,
2030 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2031 (mach_vm_address_t)UINT_MAX,
2032 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2033 (mach_vm_address_t)UINTMAX_MAX};
2034 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2035
2036 logv("Deallocating 0x0 (0) bytes at various addresses...");
2037 for (i = 0; i < numofaddresses; i++) {
2038 kr = mach_vm_deallocate(this_task, addresses[i], 0);
2039 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_deallocate() at "
2040 "address 0x%jx unexpectedly failed: %s.",
2041 (uintmax_t)addresses[i], mach_error_string(kr));
2042 }
2043 logv("Deallocations successful.");
2044 }
2045
2046 /* Deallocation succeeds if the end of the range rounds to 0x0. */
2047 void
test_deallocate_rounded_zero_end_ranges()2048 test_deallocate_rounded_zero_end_ranges()
2049 {
2050 int i;
2051 kern_return_t kr;
2052 vm_map_t this_task = mach_task_self();
2053 struct {
2054 mach_vm_address_t address;
2055 mach_vm_size_t size;
2056 } ranges[] = {
2057 {0x0, (mach_vm_size_t)UINTMAX_MAX},
2058 {0x0, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 2},
2059 {0x1, (mach_vm_size_t)UINTMAX_MAX - 1},
2060 {0x1, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
2061 {0x2, (mach_vm_size_t)UINTMAX_MAX - 2},
2062 {0x2, (mach_vm_size_t)UINTMAX_MAX - vm_page_size},
2063 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size - 1},
2064 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, 1},
2065 {(mach_vm_address_t)UINTMAX_MAX - 1, 1},
2066 };
2067 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2068
2069 logv(
2070 "Deallocating various memory ranges whose end rounds to "
2071 "0x0...");
2072 for (i = 0; i < numofranges; i++) {
2073 kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size);
2074 T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
2075 "mach_vm_deallocate() with address 0x%jx and size "
2076 "0x%jx (%ju) unexpectedly returned: %s.\n"
2077 "Should have succeeded.",
2078 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr));
2079 }
2080 logv("Deallocations successful.");
2081 }
2082
2083 /* Deallocating a range wrapped around the address space fails. */
2084 void
test_deallocate_wrapped_around_ranges()2085 test_deallocate_wrapped_around_ranges()
2086 {
2087 int i;
2088 kern_return_t kr;
2089 vm_map_t this_task = mach_task_self();
2090 struct {
2091 mach_vm_address_t address;
2092 mach_vm_size_t size;
2093 } ranges[] = {
2094 {0x1, (mach_vm_size_t)UINTMAX_MAX},
2095 {vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
2096 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2097 {(mach_vm_address_t)UINTMAX_MAX, 1},
2098 };
2099 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2100
2101 logv(
2102 "Deallocating various memory ranges wrapping around the "
2103 "address space...");
2104 for (i = 0; i < numofranges; i++) {
2105 kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size);
2106 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
2107 "mach_vm_deallocate() with address 0x%jx and size "
2108 "0x%jx (%ju) unexpectedly returned: %s.\n"
2109 "Should have returned: %s.",
2110 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2111 mach_error_string(KERN_INVALID_ARGUMENT));
2112 }
2113 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
2114 }
2115
2116 /* Deallocating in VM_MAP_NULL fails. */
2117 void
test_deallocate_in_null_map()2118 test_deallocate_in_null_map()
2119 {
2120 mach_vm_address_t address = get_vm_address();
2121 mach_vm_size_t size = get_vm_size();
2122 int flag = get_address_flag();
2123
2124 logv("Deallocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
2125 if (!(flag & VM_FLAGS_ANYWHERE)) {
2126 logv(" at address 0x%jx", (uintmax_t)address);
2127 }
2128 logv(" in NULL VM map...");
2129 assert_mach_return(mach_vm_deallocate(VM_MAP_NULL, address, size), MACH_SEND_INVALID_DEST, "mach_vm_deallocate()");
2130 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2131 }
2132
2133 /*****************************/
2134 /* mach_vm_read() main tests */
2135 /*****************************/
2136
2137 /* Read memory of size less than a page has aligned starting
2138 * address. Otherwise, the destination buffer's starting address has
2139 * the same boundary offset as the source region's. */
2140 void
test_read_address_offset()2141 test_read_address_offset()
2142 {
2143 mach_vm_address_t address = get_vm_address();
2144 mach_vm_size_t size = get_vm_size();
2145
2146 if (size < vm_page_size * 2 || get_address_alignment()) {
2147 assert_aligned_address(address);
2148 logv("Buffer address 0x%jx is aligned as expected.", (uintmax_t)address);
2149 } else {
2150 T_QUIET; T_ASSERT_EQ(((address - 1) & (vm_page_size - 1)), 0,
2151 "Buffer "
2152 "address 0x%jx does not have the expected boundary "
2153 "offset of 1.",
2154 (uintmax_t)address);
2155 logv(
2156 "Buffer address 0x%jx has the expected boundary "
2157 "offset of 1.",
2158 (uintmax_t)address);
2159 }
2160 }
2161
2162 /* Reading from VM_MAP_NULL fails. */
2163 void
test_read_null_map()2164 test_read_null_map()
2165 {
2166 mach_vm_address_t address = get_vm_address();
2167 mach_vm_size_t size = get_vm_size();
2168 vm_offset_t read_address;
2169 mach_msg_type_number_t read_size;
2170
2171 logv(
2172 "Reading 0x%jx (%ju) byte%s at address 0x%jx in NULL VM "
2173 "map...",
2174 (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
2175 assert_mach_return(mach_vm_read(VM_MAP_NULL, address, size, &read_address, &read_size), MACH_SEND_INVALID_DEST,
2176 "mach_vm_read()");
2177 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2178 }
2179
2180 /* Reading partially deallocated memory fails. */
2181 void
test_read_partially_deallocated_range()2182 test_read_partially_deallocated_range()
2183 {
2184 mach_vm_address_t address = get_vm_address();
2185 mach_vm_size_t size = get_vm_size();
2186 mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2);
2187 vm_offset_t read_address;
2188 mach_msg_type_number_t read_size;
2189
2190 logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2191 assert_deallocate_success(mid_point, vm_page_size);
2192 logv("Page deallocated.");
2193
2194 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2195 (uintmax_t)address);
2196 assert_read_return(address, size, &read_address, &read_size, KERN_INVALID_ADDRESS);
2197 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2198 }
2199
2200 /* Reading partially read-protected memory fails. */
2201 void
test_read_partially_unreadable_range()2202 test_read_partially_unreadable_range()
2203 {
2204 mach_vm_address_t address = get_vm_address();
2205 mach_vm_size_t size = get_vm_size();
2206 mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2);
2207 vm_offset_t read_address;
2208 mach_msg_type_number_t read_size;
2209
2210 /* For sizes < msg_ool_size_small, vm_map_copyin_common() uses
2211 * vm_map_copyin_kernel_buffer() to read in the memory,
2212 * returning different errors, see 8182239. */
2213 kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2214
2215 logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2216 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2217 logv("Page read-protected.");
2218
2219 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2220 (uintmax_t)address);
2221 assert_read_return(address, size, &read_address, &read_size, kr_expected);
2222 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2223 }
2224
2225 /**********************************/
2226 /* mach_vm_read() edge case tests */
2227 /**********************************/
2228
2229 void
read_edge_size(mach_vm_size_t size,kern_return_t expected_kr)2230 read_edge_size(mach_vm_size_t size, kern_return_t expected_kr)
2231 {
2232 int i;
2233 kern_return_t kr;
2234 vm_map_t this_task = mach_task_self();
2235 mach_vm_address_t addresses[] = {vm_page_size - 1,
2236 vm_page_size,
2237 vm_page_size + 1,
2238 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2239 (mach_vm_address_t)UINT_MAX,
2240 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2241 (mach_vm_address_t)UINTMAX_MAX};
2242 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2243 vm_offset_t read_address;
2244 mach_msg_type_number_t read_size;
2245
2246 logv("Reading 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size);
2247 for (i = 0; i < numofaddresses; i++) {
2248 kr = mach_vm_read(this_task, addresses[i], size, &read_address, &read_size);
2249 T_QUIET; T_ASSERT_EQ(kr, expected_kr,
2250 "mach_vm_read() at "
2251 "address 0x%jx unexpectedly returned: %s.\n"
2252 "Should have returned: %s.",
2253 (uintmax_t)addresses[i], mach_error_string(kr), mach_error_string(expected_kr));
2254 }
2255 logv(
2256 "mach_vm_read() returned expected value in each case: "
2257 "%s.",
2258 mach_error_string(expected_kr));
2259 }
2260
2261 /* Reading 0 bytes always succeeds. */
2262 void
test_read_zero_size()2263 test_read_zero_size()
2264 {
2265 read_edge_size(0, KERN_SUCCESS);
2266 }
2267
2268 /* Reading 4GB or higher always fails. */
2269 void
test_read_invalid_large_size()2270 test_read_invalid_large_size()
2271 {
2272 read_edge_size((mach_vm_size_t)UINT_MAX + 1, KERN_INVALID_ARGUMENT);
2273 }
2274
2275 /* Reading a range wrapped around the address space fails. */
2276 void
test_read_wrapped_around_ranges()2277 test_read_wrapped_around_ranges()
2278 {
2279 int i;
2280 kern_return_t kr;
2281 vm_map_t this_task = mach_task_self();
2282 struct {
2283 mach_vm_address_t address;
2284 mach_vm_size_t size;
2285 } ranges[] = {
2286 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX},
2287 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)},
2288 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2289 {(mach_vm_address_t)UINTMAX_MAX, 1},
2290 };
2291 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2292 vm_offset_t read_address;
2293 mach_msg_type_number_t read_size;
2294
2295 logv(
2296 "Reading various memory ranges wrapping around the "
2297 "address space...");
2298 for (i = 0; i < numofranges; i++) {
2299 kr = mach_vm_read(this_task, ranges[i].address, ranges[i].size, &read_address, &read_size);
2300 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ADDRESS,
2301 "mach_vm_read() at address 0x%jx with size "
2302 "0x%jx (%ju) unexpectedly returned: %s.\n"
2303 "Should have returned: %s.",
2304 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2305 mach_error_string(KERN_INVALID_ADDRESS));
2306 }
2307 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2308 }
2309
2310 /********************************/
2311 /* mach_vm_read() pattern tests */
2312 /********************************/
2313
2314 /* Write a pattern on pre-allocated memory, read into a buffer and
2315 * verify the pattern on the buffer. */
2316 void
write_read_verify_pattern(address_filter_t filter,boolean_t reversed,const char * pattern_name)2317 write_read_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2318 {
2319 mach_vm_address_t address = get_vm_address();
2320
2321 write_pattern(filter, reversed, address, get_vm_size(), pattern_name);
2322 read_deallocate();
2323 /* Getting the address and size of the read buffer. */
2324 mach_vm_address_t read_address = get_vm_address();
2325 mach_vm_size_t read_size = get_vm_size();
2326 logv(
2327 "Verifying %s pattern on buffer of "
2328 "address 0x%jx and size 0x%jx (%ju)...",
2329 pattern_name, (uintmax_t)read_address, (uintmax_t)read_size, (uintmax_t)read_size);
2330 filter_addresses_do_else(filter, reversed, read_address, read_size, verify_address, read_zero, address);
2331 logv("Pattern verified on destination buffer.");
2332 }
2333
2334 void
test_read_address_filled()2335 test_read_address_filled()
2336 {
2337 write_read_verify_pattern(empty, TRUE, "address-filled");
2338 }
2339
2340 void
test_read_checkerboard()2341 test_read_checkerboard()
2342 {
2343 write_read_verify_pattern(checkerboard, FALSE, "checkerboard");
2344 }
2345
2346 void
test_read_reverse_checkerboard()2347 test_read_reverse_checkerboard()
2348 {
2349 write_read_verify_pattern(checkerboard, TRUE, "reverse checkerboard");
2350 }
2351
2352 /***********************************/
2353 /* mach_vm_write() edge case tests */
2354 /***********************************/
2355
2356 /* Writing in VM_MAP_NULL fails. */
2357 void
test_write_null_map()2358 test_write_null_map()
2359 {
2360 mach_vm_address_t address = get_vm_address();
2361 vm_offset_t data = (vm_offset_t)get_buffer_address();
2362 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2363
2364 logv(
2365 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2366 "memory at address 0x%jx in NULL VM MAP...",
2367 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2368 assert_mach_return(mach_vm_write(VM_MAP_NULL, address, data, buffer_size), MACH_SEND_INVALID_DEST, "mach_vm_write()");
2369 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2370 }
2371
2372 /* Writing 0 bytes always succeeds. */
2373 void
test_write_zero_size()2374 test_write_zero_size()
2375 {
2376 set_buffer_size(0);
2377 write_buffer();
2378 }
2379
2380 /*****************************************/
2381 /* mach_vm_write() inaccessibility tests */
2382 /*****************************************/
2383
2384 /* Writing a partially deallocated buffer fails. */
2385 void
test_write_partially_deallocated_buffer()2386 test_write_partially_deallocated_buffer()
2387 {
2388 mach_vm_address_t address = get_vm_address();
2389 vm_offset_t data = (vm_offset_t)get_buffer_address();
2390 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2391 mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2);
2392
2393 logv(
2394 "Deallocating a mid-range buffer page at address "
2395 "0x%jx...",
2396 (uintmax_t)buffer_mid_point);
2397 assert_deallocate_success(buffer_mid_point, vm_page_size);
2398 logv("Page deallocated.");
2399
2400 logv(
2401 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2402 "memory at address 0x%jx...",
2403 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2404 assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY);
2405 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY));
2406 }
2407
2408 /* Writing a partially read-protected buffer fails. */
2409 void
test_write_partially_unreadable_buffer()2410 test_write_partially_unreadable_buffer()
2411 {
2412 mach_vm_address_t address = get_vm_address();
2413 vm_offset_t data = (vm_offset_t)get_buffer_address();
2414 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2415 mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2);
2416
2417 logv(
2418 "Read-protecting a mid-range buffer page at address "
2419 "0x%jx...",
2420 (uintmax_t)buffer_mid_point);
2421 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), buffer_mid_point, vm_page_size, FALSE, VM_PROT_WRITE),
2422 "mach_vm_protect()");
2423 logv("Page read-protected.");
2424
2425 logv(
2426 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2427 "memory at address 0x%jx...",
2428 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2429 assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY);
2430 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY));
2431 }
2432
2433 /* Writing on partially deallocated memory fails. */
2434 void
test_write_on_partially_deallocated_range()2435 test_write_on_partially_deallocated_range()
2436 {
2437 mach_vm_address_t address = get_vm_address();
2438 mach_vm_address_t start = mach_vm_trunc_page(address);
2439 vm_offset_t data = (vm_offset_t)get_buffer_address();
2440 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2441
2442 logv(
2443 "Deallocating the first destination page at address "
2444 "0x%jx...",
2445 (uintmax_t)start);
2446 assert_deallocate_success(start, vm_page_size);
2447 logv("Page deallocated.");
2448
2449 logv(
2450 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2451 "memory at address 0x%jx...",
2452 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2453 assert_write_return(address, data, buffer_size, KERN_INVALID_ADDRESS);
2454 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2455 }
2456
2457 /* Writing on partially unwritable memory fails. */
2458 void
test_write_on_partially_unwritable_range()2459 test_write_on_partially_unwritable_range()
2460 {
2461 mach_vm_address_t address = get_vm_address();
2462 mach_vm_address_t start = mach_vm_trunc_page(address);
2463 vm_offset_t data = (vm_offset_t)get_buffer_address();
2464 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2465
2466 /* For sizes < msg_ool_size_small,
2467 * vm_map_copy_overwrite_nested() uses
2468 * vm_map_copyout_kernel_buffer() to read in the memory,
2469 * returning different errors, see 8217123. */
2470 kern_return_t kr_expected = (buffer_size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2471
2472 logv(
2473 "Write-protecting the first destination page at address "
2474 "0x%jx...",
2475 (uintmax_t)start);
2476 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2477 logv("Page write-protected.");
2478
2479 logv(
2480 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2481 "memory at address 0x%jx...",
2482 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2483 assert_write_return(address, data, buffer_size, kr_expected);
2484 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2485 }
2486
2487 /*********************************/
2488 /* mach_vm_write() pattern tests */
2489 /*********************************/
2490
2491 /* Verify that a zero-filled buffer and destination memory are still
2492 * zero-filled after writing. */
2493 void
test_zero_filled_write()2494 test_zero_filled_write()
2495 {
2496 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
2497 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()),
2498 round_page(get_buffer_size() + get_buffer_offset()), "zero-filled");
2499 }
2500
2501 /* Write a pattern on a buffer, write the buffer into some destination
2502 * memory, and verify the pattern on both buffer and destination. */
2503 void
pattern_write(address_filter_t filter,boolean_t reversed,const char * pattern_name)2504 pattern_write(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2505 {
2506 mach_vm_address_t address = get_vm_address();
2507 mach_vm_size_t size = get_vm_size();
2508 mach_vm_address_t buffer_address = get_buffer_address();
2509 mach_vm_size_t buffer_size = get_buffer_size();
2510
2511 write_pattern(filter, reversed, buffer_address, buffer_size, pattern_name);
2512 write_buffer();
2513 verify_pattern(filter, reversed, buffer_address, buffer_size, pattern_name);
2514 logv(
2515 "Verifying %s pattern on destination of "
2516 "address 0x%jx and size 0x%jx (%ju)...",
2517 pattern_name, (uintmax_t)address, (uintmax_t)buffer_size, (uintmax_t)size);
2518 filter_addresses_do_else(filter, reversed, address, buffer_size, verify_address, read_zero, buffer_address);
2519 logv("Pattern verified on destination.");
2520 }
2521
2522 void
test_address_filled_write()2523 test_address_filled_write()
2524 {
2525 pattern_write(empty, TRUE, "address-filled");
2526 }
2527
2528 void
test_checkerboard_write()2529 test_checkerboard_write()
2530 {
2531 pattern_write(checkerboard, FALSE, "checkerboard");
2532 }
2533
2534 void
test_reverse_checkerboard_write()2535 test_reverse_checkerboard_write()
2536 {
2537 pattern_write(checkerboard, TRUE, "reverse checkerboard");
2538 }
2539
2540 /**********************************/
2541 /* mach_vm_copy() edge case tests */
2542 /**********************************/
2543
2544 /* Copying in VM_MAP_NULL fails. */
2545 void
test_copy_null_map()2546 test_copy_null_map()
2547 {
2548 mach_vm_address_t source = get_vm_address();
2549 mach_vm_address_t dest = get_buffer_address();
2550 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2551
2552 logv(
2553 "Copying buffer of address 0x%jx and size 0x%jx (%ju), on "
2554 "memory at address 0x%jx in NULL VM MAP...",
2555 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2556 assert_mach_return(mach_vm_copy(VM_MAP_NULL, source, size, dest), MACH_SEND_INVALID_DEST, "mach_vm_copy()");
2557 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2558 }
2559
2560 void
copy_edge_size(mach_vm_size_t size,kern_return_t expected_kr)2561 copy_edge_size(mach_vm_size_t size, kern_return_t expected_kr)
2562 {
2563 int i;
2564 kern_return_t kr;
2565 vm_map_t this_task = mach_task_self();
2566 mach_vm_address_t addresses[] = {0x0,
2567 0x1,
2568 vm_page_size - 1,
2569 vm_page_size,
2570 vm_page_size + 1,
2571 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2572 (mach_vm_address_t)UINT_MAX,
2573 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2574 (mach_vm_address_t)UINTMAX_MAX};
2575 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2576 mach_vm_address_t dest = 0;
2577
2578 logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
2579 assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE);
2580 logv("Copying 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size);
2581 for (i = 0; i < numofaddresses; i++) {
2582 kr = mach_vm_copy(this_task, addresses[i], size, dest);
2583 T_QUIET; T_ASSERT_EQ(kr, expected_kr,
2584 "mach_vm_copy() at "
2585 "address 0x%jx unexpectedly returned: %s.\n"
2586 "Should have returned: %s.",
2587 (uintmax_t)addresses[i], mach_error_string(kr), mach_error_string(expected_kr));
2588 }
2589 logv(
2590 "mach_vm_copy() returned expected value in each case: "
2591 "%s.",
2592 mach_error_string(expected_kr));
2593
2594 deallocate_range(dest, 4096);
2595 }
2596
2597 /* Copying 0 bytes always succeeds. */
2598 void
test_copy_zero_size()2599 test_copy_zero_size()
2600 {
2601 copy_edge_size(0, KERN_SUCCESS);
2602 }
2603
2604 /* Copying 4GB or higher always fails. */
2605 void
test_copy_invalid_large_size()2606 test_copy_invalid_large_size()
2607 {
2608 copy_edge_size((mach_vm_size_t)UINT_MAX - 1, KERN_INVALID_ADDRESS);
2609 }
2610
2611 /* Reading a range wrapped around the address space fails. */
2612 void
test_copy_wrapped_around_ranges()2613 test_copy_wrapped_around_ranges()
2614 {
2615 int i;
2616 kern_return_t kr;
2617 vm_map_t this_task = mach_task_self();
2618 struct {
2619 mach_vm_address_t address;
2620 mach_vm_size_t size;
2621 } ranges[] = {
2622 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX},
2623 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)},
2624 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2625 {(mach_vm_address_t)UINTMAX_MAX, 1},
2626 };
2627 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2628 mach_vm_address_t dest = 0;
2629
2630 logv("Allocating 0x1000 (4096) bytes...");
2631 assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE);
2632
2633 logv(
2634 "Copying various memory ranges wrapping around the "
2635 "address space...");
2636 for (i = 0; i < numofranges; i++) {
2637 kr = mach_vm_copy(this_task, ranges[i].address, ranges[i].size, dest);
2638 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ADDRESS,
2639 "mach_vm_copy() at address 0x%jx with size "
2640 "0x%jx (%ju) unexpectedly returned: %s.\n"
2641 "Should have returned: %s.",
2642 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2643 mach_error_string(KERN_INVALID_ADDRESS));
2644 }
2645 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2646
2647 deallocate_range(dest, 4096);
2648 }
2649
2650 /********************************/
2651 /* mach_vm_copy() pattern tests */
2652 /********************************/
2653
2654 /* Write a pattern on pre-allocated region, copy into another region
2655 * and verify the pattern in the region. */
2656 void
write_copy_verify_pattern(address_filter_t filter,boolean_t reversed,const char * pattern_name)2657 write_copy_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2658 {
2659 mach_vm_address_t source = get_vm_address();
2660 mach_vm_size_t src_size = get_vm_size();
2661 write_pattern(filter, reversed, source, src_size, pattern_name);
2662 /* Getting the address and size of the dest region */
2663 mach_vm_address_t dest = get_buffer_address();
2664 mach_vm_size_t dst_size = get_buffer_size();
2665
2666 logv(
2667 "Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
2668 "memory at address 0x%jx...",
2669 (uintmax_t)source, (uintmax_t)dst_size, (uintmax_t)dst_size, (uintmax_t)dest);
2670 assert_copy_success(source, dst_size, dest);
2671 logv(
2672 "Verifying %s pattern in region of "
2673 "address 0x%jx and size 0x%jx (%ju)...",
2674 pattern_name, (uintmax_t)dest, (uintmax_t)dst_size, (uintmax_t)dst_size);
2675 filter_addresses_do_else(filter, reversed, dest, dst_size, verify_address, read_zero, source);
2676 logv("Pattern verified on destination region.");
2677 }
2678
2679 void
test_copy_address_filled()2680 test_copy_address_filled()
2681 {
2682 write_copy_verify_pattern(empty, TRUE, "address-filled");
2683 }
2684
2685 void
test_copy_checkerboard()2686 test_copy_checkerboard()
2687 {
2688 write_copy_verify_pattern(checkerboard, FALSE, "checkerboard");
2689 }
2690
2691 void
test_copy_reverse_checkerboard()2692 test_copy_reverse_checkerboard()
2693 {
2694 write_copy_verify_pattern(checkerboard, TRUE, "reverse checkerboard");
2695 }
2696
2697 /* Verify that a zero-filled source and destination memory are still
2698 * zero-filled after writing. */
2699 void
test_zero_filled_copy_dest()2700 test_zero_filled_copy_dest()
2701 {
2702 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
2703 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()),
2704 round_page(get_buffer_size() + get_buffer_offset()), "zero-filled");
2705 }
2706
2707 /****************************************/
2708 /* mach_vm_copy() inaccessibility tests */
2709 /****************************************/
2710
2711 /* Copying partially deallocated memory fails. */
2712 void
test_copy_partially_deallocated_range()2713 test_copy_partially_deallocated_range()
2714 {
2715 mach_vm_address_t source = get_vm_address();
2716 mach_vm_size_t size = get_vm_size();
2717 mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2);
2718 mach_vm_address_t dest = 0;
2719
2720 logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2721 assert_deallocate_success(mid_point, vm_page_size);
2722 logv("Page deallocated.");
2723
2724 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2725 (uintmax_t)source);
2726
2727 assert_allocate_copy_return(source, size, &dest, KERN_INVALID_ADDRESS);
2728
2729 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2730
2731 deallocate_range(dest, size);
2732 }
2733
2734 /* Copy partially read-protected memory fails. */
2735 void
test_copy_partially_unreadable_range()2736 test_copy_partially_unreadable_range()
2737 {
2738 mach_vm_address_t source = get_vm_address();
2739 mach_vm_size_t size = get_vm_size();
2740 mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2);
2741 mach_vm_address_t dest = 0;
2742
2743 /* For sizes < 1 page, vm_map_copyin_common() uses
2744 * vm_map_copyin_kernel_buffer() to read in the memory,
2745 * returning different errors, see 8182239. */
2746 kern_return_t kr_expected = (size < vm_page_size) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2747
2748 logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2749 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2750 logv("Page read-protected.");
2751
2752 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2753 (uintmax_t)source);
2754 assert_allocate_copy_return(source, size, &dest, kr_expected);
2755 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2756
2757 deallocate_range(dest, size);
2758 }
2759
2760 /* Copying to a partially deallocated region fails. */
2761 void
test_copy_dest_partially_deallocated_region()2762 test_copy_dest_partially_deallocated_region()
2763 {
2764 mach_vm_address_t dest = get_vm_address();
2765 mach_vm_address_t source = get_buffer_address();
2766 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2767 mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2);
2768 #if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080
2769 logv(
2770 "Deallocating a mid-range source page at address "
2771 "0x%jx...",
2772 (uintmax_t)source_mid_point);
2773 assert_deallocate_success(source_mid_point, vm_page_size);
2774 logv("Page deallocated.");
2775
2776 logv(
2777 "Copying region of address 0x%jx and size 0x%jx (%ju), on "
2778 "memory at address 0x%jx...",
2779 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2780 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2781 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2782 #else
2783 logv(
2784 "Bypassing partially deallocated region test "
2785 "(See <rdar://problem/12190999>)");
2786 #endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */
2787 }
2788
2789 /* Copying from a partially deallocated region fails. */
2790 void
test_copy_source_partially_deallocated_region()2791 test_copy_source_partially_deallocated_region()
2792 {
2793 mach_vm_address_t source = get_vm_address();
2794 mach_vm_address_t dest = get_buffer_address();
2795 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2796 mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2);
2797
2798 logv(
2799 "Deallocating a mid-range source page at address "
2800 "0x%jx...",
2801 (uintmax_t)source_mid_point);
2802 assert_deallocate_success(source_mid_point, vm_page_size);
2803 logv("Page deallocated.");
2804
2805 logv(
2806 "Copying region of address 0x%jx and size 0x%jx (%ju), on "
2807 "memory at address 0x%jx...",
2808 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2809 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2810 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2811 }
2812
2813 /* Copying from a partially read-protected region fails. */
2814 void
test_copy_source_partially_unreadable_region()2815 test_copy_source_partially_unreadable_region()
2816 {
2817 mach_vm_address_t source = get_vm_address();
2818 mach_vm_address_t dest = get_buffer_address();
2819 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2820 mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2);
2821 kern_return_t kr = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2822
2823 logv(
2824 "Read-protecting a mid-range buffer page at address "
2825 "0x%jx...",
2826 (uintmax_t)mid_point);
2827 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2828 logv("Page read-protected.");
2829
2830 logv(
2831 "Copying region at address 0x%jx and size 0x%jx (%ju), on "
2832 "memory at address 0x%jx...",
2833 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2834
2835 assert_copy_return(source, size, dest, kr);
2836 logv("Returned expected error: %s.", mach_error_string(kr));
2837 }
2838
2839 /* Copying to a partially write-protected region fails. */
2840 void
test_copy_dest_partially_unwriteable_region()2841 test_copy_dest_partially_unwriteable_region()
2842 {
2843 kern_return_t kr;
2844 mach_vm_address_t dest = get_vm_address();
2845 mach_vm_address_t source = get_buffer_address();
2846 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2847 mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2);
2848
2849 #if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080
2850 logv(
2851 "Read-protecting a mid-range buffer page at address "
2852 "0x%jx...",
2853 (uintmax_t)mid_point);
2854 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2855 logv("Page read-protected.");
2856 logv(
2857 "Copying region at address 0x%jx and size 0x%jx (%ju), on "
2858 "memory at address 0x%jx...",
2859 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2860 if (size >= vm_page_size) {
2861 kr = KERN_PROTECTION_FAILURE;
2862 } else {
2863 kr = KERN_INVALID_ADDRESS;
2864 }
2865 assert_copy_return(source, size, dest, kr);
2866 logv("Returned expected error: %s.", mach_error_string(kr));
2867 #else
2868 logv(
2869 "Bypassing partially unwriteable region test "
2870 "(See <rdar://problem/12190999>)");
2871 #endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */
2872 }
2873
2874 /* Copying on partially deallocated memory fails. */
2875 void
test_copy_source_on_partially_deallocated_range()2876 test_copy_source_on_partially_deallocated_range()
2877 {
2878 mach_vm_address_t source = get_vm_address();
2879 mach_vm_address_t dest = get_buffer_address();
2880 mach_vm_address_t start = mach_vm_trunc_page(source);
2881 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2882
2883 logv(
2884 "Deallocating the first source page at address "
2885 "0x%jx...",
2886 (uintmax_t)start);
2887 assert_deallocate_success(start, vm_page_size);
2888 logv("Page deallocated.");
2889
2890 logv(
2891 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2892 "memory at address 0x%jx...",
2893 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2894 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2895 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2896 }
2897
2898 /* Copying on partially deallocated memory fails. */
2899 void
test_copy_dest_on_partially_deallocated_range()2900 test_copy_dest_on_partially_deallocated_range()
2901 {
2902 mach_vm_address_t source = get_vm_address();
2903 mach_vm_address_t dest = get_buffer_address();
2904 mach_vm_address_t start = mach_vm_trunc_page(dest);
2905 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2906
2907 logv(
2908 "Deallocating the first destination page at address "
2909 "0x%jx...",
2910 (uintmax_t)start);
2911 assert_deallocate_success(start, vm_page_size);
2912 logv("Page deallocated.");
2913
2914 logv(
2915 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2916 "memory at address 0x%jx...",
2917 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2918 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2919 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2920 }
2921
2922 /* Copying on partially unwritable memory fails. */
2923 void
test_copy_dest_on_partially_unwritable_range()2924 test_copy_dest_on_partially_unwritable_range()
2925 {
2926 mach_vm_address_t source = get_vm_address();
2927 mach_vm_address_t dest = get_buffer_address();
2928 mach_vm_address_t start = mach_vm_trunc_page(dest);
2929 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2930
2931 /* For sizes < msg_ool_size_small,
2932 * vm_map_copy_overwrite_nested() uses
2933 * vm_map_copyout_kernel_buffer() to read in the memory,
2934 * returning different errors, see 8217123. */
2935 kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2936
2937 logv(
2938 "Write-protecting the first destination page at address "
2939 "0x%jx...",
2940 (uintmax_t)start);
2941 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2942 logv("Page write-protected.");
2943
2944 logv(
2945 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2946 "memory at address 0x%jx...",
2947 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2948 assert_copy_return(source, size, dest, kr_expected);
2949 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2950 }
2951
2952 /* Copying on partially unreadable memory fails. */
2953 void
test_copy_source_on_partially_unreadable_range()2954 test_copy_source_on_partially_unreadable_range()
2955 {
2956 mach_vm_address_t source = get_vm_address();
2957 mach_vm_address_t dest = get_buffer_address();
2958 mach_vm_address_t start = mach_vm_trunc_page(source);
2959 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2960
2961 /* For sizes < msg_ool_size_small,
2962 * vm_map_copy_overwrite_nested() uses
2963 * vm_map_copyout_kernel_buffer() to read in the memory,
2964 * returning different errors, see 8217123. */
2965 kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2966
2967 logv(
2968 "Read-protecting the first destination page at address "
2969 "0x%jx...",
2970 (uintmax_t)start);
2971 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2972 logv("Page read-protected.");
2973
2974 logv(
2975 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2976 "memory at address 0x%jx...",
2977 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2978 assert_copy_return(source, size, dest, kr_expected);
2979 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2980 }
2981
2982 /********************************/
2983 /* mach_vm_protect() main tests */
2984 /********************************/
2985
2986 void
test_zero_filled_extended()2987 test_zero_filled_extended()
2988 {
2989 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
2990 }
2991
2992 /* Allocated region is still zero-filled after read-protecting it and
2993 * then restoring read-access. */
2994 void
test_zero_filled_readprotect()2995 test_zero_filled_readprotect()
2996 {
2997 mach_vm_address_t address = get_vm_address();
2998 mach_vm_size_t size = get_vm_size();
2999
3000 logv("Setting read access on 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size,
3001 (size == 1) ? "" : "s", (uintmax_t)address);
3002 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address, size, FALSE, VM_PROT_DEFAULT), "mach_vm_protect()");
3003 logv("Region has read access.");
3004 test_zero_filled_extended();
3005 }
3006
3007 void
verify_protection(vm_prot_t protection,const char * protection_name)3008 verify_protection(vm_prot_t protection, const char * protection_name)
3009 {
3010 mach_vm_address_t address = get_vm_address();
3011 mach_vm_size_t size = get_vm_size();
3012 mach_vm_size_t original_size = size;
3013 vm_region_basic_info_data_64_t info;
3014 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
3015 mach_port_t unused;
3016
3017 logv(
3018 "Verifying %s-protection on region of address 0x%jx and "
3019 "size 0x%jx (%ju) with mach_vm_region()...",
3020 protection_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
3021 T_QUIET; T_ASSERT_MACH_SUCCESS(
3022 mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info, &count, &unused),
3023 "mach_vm_region()");
3024 if (original_size) {
3025 T_QUIET; T_ASSERT_EQ((info.protection & protection), 0,
3026 "Region "
3027 "is unexpectedly %s-unprotected.",
3028 protection_name);
3029 logv("Region is %s-protected as expected.", protection_name);
3030 } else {
3031 T_QUIET; T_ASSERT_NE(info.protection & protection, 0,
3032 "Region is "
3033 "unexpectedly %s-protected.",
3034 protection_name);
3035 logv("Region is %s-unprotected as expected.", protection_name);
3036 }
3037 }
3038
3039 void
test_verify_readprotection()3040 test_verify_readprotection()
3041 {
3042 verify_protection(VM_PROT_READ, "read");
3043 }
3044
3045 void
test_verify_writeprotection()3046 test_verify_writeprotection()
3047 {
3048 verify_protection(VM_PROT_WRITE, "write");
3049 }
3050
3051 /******************************/
3052 /* Protection bus error tests */
3053 /******************************/
3054
3055 /* mach_vm_protect() affects the smallest aligned region (integral
3056 * number of pages) containing the given range. */
3057
3058 /* Addresses in read-protected range are inaccessible. */
3059 void
access_readprotected_range_address(mach_vm_address_t address,const char * position)3060 access_readprotected_range_address(mach_vm_address_t address, const char * position)
3061 {
3062 logv("Reading from %s 0x%jx of read-protected range...", position, (uintmax_t)address);
3063 mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address);
3064 T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx."
3065 "Should have died with signal SIGBUS.",
3066 (uintmax_t)bad_value, (uintmax_t)address);
3067 }
3068
3069 /* Start of read-protected range is inaccessible. */
3070 void
test_access_readprotected_range_start()3071 test_access_readprotected_range_start()
3072 {
3073 access_readprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3074 }
3075
3076 /* Middle of read-protected range is inaccessible. */
3077 void
test_access_readprotected_range_middle()3078 test_access_readprotected_range_middle()
3079 {
3080 mach_vm_address_t address = get_vm_address();
3081 access_readprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle");
3082 }
3083
3084 /* End of read-protected range is inaccessible. */
3085 void
test_access_readprotected_range_end()3086 test_access_readprotected_range_end()
3087 {
3088 access_readprotected_range_address(round_page(get_vm_address() + get_vm_size()) - vm_address_size, "end");
3089 }
3090
3091 /* Addresses in write-protected range are unwritable. */
3092 void
write_writeprotected_range_address(mach_vm_address_t address,const char * position)3093 write_writeprotected_range_address(mach_vm_address_t address, const char * position)
3094 {
3095 logv("Writing on %s 0x%jx of write-protected range...", position, (uintmax_t)address);
3096 MACH_VM_ADDRESS_T(address) = 0x0;
3097 T_ASSERT_FAIL("Unexpectedly wrote value 0x0 value at address 0x%jx."
3098 "Should have died with signal SIGBUS.",
3099 (uintmax_t)address);
3100 }
3101
3102 /* Start of write-protected range is unwritable. */
3103 void
test_write_writeprotected_range_start()3104 test_write_writeprotected_range_start()
3105 {
3106 write_writeprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3107 }
3108
3109 /* Middle of write-protected range is unwritable. */
3110 void
test_write_writeprotected_range_middle()3111 test_write_writeprotected_range_middle()
3112 {
3113 mach_vm_address_t address = get_vm_address();
3114 write_writeprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle");
3115 }
3116
3117 /* End of write-protected range is unwritable. */
3118 void
test_write_writeprotected_range_end()3119 test_write_writeprotected_range_end()
3120 {
3121 write_writeprotected_range_address(round_page(get_vm_address() + get_vm_size()) - vm_address_size, "end");
3122 }
3123
3124 /*************************************/
3125 /* mach_vm_protect() edge case tests */
3126 /*************************************/
3127
3128 void
protect_zero_size(vm_prot_t protection,const char * protection_name)3129 protect_zero_size(vm_prot_t protection, const char * protection_name)
3130 {
3131 int i;
3132 kern_return_t kr;
3133 vm_map_t this_task = mach_task_self();
3134 mach_vm_address_t addresses[] = {0x0,
3135 0x1,
3136 vm_page_size - 1,
3137 vm_page_size,
3138 vm_page_size + 1,
3139 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
3140 (mach_vm_address_t)UINT_MAX,
3141 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
3142 (mach_vm_address_t)UINTMAX_MAX};
3143 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
3144
3145 logv("%s-protecting 0x0 (0) bytes at various addresses...", protection_name);
3146 for (i = 0; i < numofaddresses; i++) {
3147 kr = mach_vm_protect(this_task, addresses[i], 0, FALSE, protection);
3148 T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
3149 "mach_vm_protect() at "
3150 "address 0x%jx unexpectedly failed: %s.",
3151 (uintmax_t)addresses[i], mach_error_string(kr));
3152 }
3153 logv("Protection successful.");
3154 }
3155
3156 void
test_readprotect_zero_size()3157 test_readprotect_zero_size()
3158 {
3159 protect_zero_size(VM_PROT_READ, "Read");
3160 }
3161
3162 void
test_writeprotect_zero_size()3163 test_writeprotect_zero_size()
3164 {
3165 protect_zero_size(VM_PROT_WRITE, "Write");
3166 }
3167
3168 /* Protecting a range wrapped around the address space fails. */
3169 void
protect_wrapped_around_ranges(vm_prot_t protection,const char * protection_name)3170 protect_wrapped_around_ranges(vm_prot_t protection, const char * protection_name)
3171 {
3172 int i;
3173 kern_return_t kr;
3174 vm_map_t this_task = mach_task_self();
3175 struct {
3176 mach_vm_address_t address;
3177 mach_vm_size_t size;
3178 } ranges[] = {
3179 {0x1, (mach_vm_size_t)UINTMAX_MAX},
3180 {vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
3181 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
3182 {(mach_vm_address_t)UINTMAX_MAX, 1},
3183 };
3184 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
3185
3186 logv(
3187 "%s-protecting various memory ranges wrapping around the "
3188 "address space...",
3189 protection_name);
3190 for (i = 0; i < numofranges; i++) {
3191 kr = mach_vm_protect(this_task, ranges[i].address, ranges[i].size, FALSE, protection);
3192 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
3193 "mach_vm_protect() with address 0x%jx and size "
3194 "0x%jx (%ju) unexpectedly returned: %s.\n"
3195 "Should have returned: %s.",
3196 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
3197 mach_error_string(KERN_INVALID_ARGUMENT));
3198 }
3199 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
3200 }
3201
3202 void
test_readprotect_wrapped_around_ranges()3203 test_readprotect_wrapped_around_ranges()
3204 {
3205 protect_wrapped_around_ranges(VM_PROT_READ, "Read");
3206 }
3207
3208 void
test_writeprotect_wrapped_around_ranges()3209 test_writeprotect_wrapped_around_ranges()
3210 {
3211 protect_wrapped_around_ranges(VM_PROT_WRITE, "Write");
3212 }
3213
3214 /*******************/
3215 /* vm_copy() tests */
3216 /*******************/
3217
3218 /* Verify the address space is being shared. */
3219 void
assert_share_mode(mach_vm_address_t address,unsigned share_mode,const char * share_mode_name)3220 assert_share_mode(mach_vm_address_t address, unsigned share_mode, const char * share_mode_name)
3221 {
3222 mach_vm_size_t size = get_vm_size();
3223 vm_region_extended_info_data_t info;
3224 mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
3225 mach_port_t unused;
3226
3227 /*
3228 * XXX Fails on UVM kernel. See <rdar://problem/12164664>
3229 */
3230 #if notyet /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */
3231 logv(
3232 "Verifying %s share mode on region of address 0x%jx and "
3233 "size 0x%jx (%ju)...",
3234 share_mode_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
3235 T_QUIET; T_ASSERT_MACH_SUCCESS(
3236 mach_vm_region(mach_task_self(), &address, &size, VM_REGION_EXTENDED_INFO, (vm_region_info_t)&info, &count, &unused),
3237 "mach_vm_region()");
3238 T_QUIET; T_ASSERT_EQ(info.share_mode, share_mode,
3239 "Region's share mode "
3240 " unexpectedly is not %s but %d.",
3241 share_mode_name, info.share_mode);
3242 logv("Region has a share mode of %s as expected.", share_mode_name);
3243 #else
3244 logv("Bypassing share_mode verification (See <rdar://problem/12164664>)");
3245 #endif /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */
3246 }
3247
3248 /* Do the vm_copy() and verify its success. */
3249 void
assert_vmcopy_success(vm_address_t src,vm_address_t dst,const char * source_name)3250 assert_vmcopy_success(vm_address_t src, vm_address_t dst, const char * source_name)
3251 {
3252 kern_return_t kr;
3253 mach_vm_size_t size = get_vm_size();
3254
3255 logv("Copying (using mach_vm_copy()) from a %s source...", source_name);
3256 kr = mach_vm_copy(mach_task_self(), src, size, dst);
3257 T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
3258 "mach_vm_copy() with the source address "
3259 "0x%jx, designation address 0x%jx, and size 0x%jx (%ju) unexpectly "
3260 "returned %s.\n Should have returned: %s.",
3261 (uintmax_t)src, (uintmax_t)dst, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr),
3262 mach_error_string(KERN_SUCCESS));
3263 logv("Copy (mach_vm_copy()) was successful as expected.");
3264 }
3265
3266 void
write_region(mach_vm_address_t address,mach_vm_size_t start)3267 write_region(mach_vm_address_t address, mach_vm_size_t start)
3268 {
3269 mach_vm_size_t size = get_vm_size();
3270
3271 filter_addresses_do_else(empty, FALSE, address, size, write_address, write_address, start);
3272 }
3273
3274 void
verify_region(mach_vm_address_t address,mach_vm_address_t start)3275 verify_region(mach_vm_address_t address, mach_vm_address_t start)
3276 {
3277 mach_vm_size_t size = get_vm_size();
3278
3279 filter_addresses_do_else(empty, FALSE, address, size, verify_address, verify_address, start);
3280 }
3281
3282 /* Perform the post vm_copy() action and verify its results. */
3283 void
modify_one_and_verify_all_regions(vm_address_t src,vm_address_t dst,vm_address_t shared_copied,boolean_t shared)3284 modify_one_and_verify_all_regions(vm_address_t src, vm_address_t dst, vm_address_t shared_copied, boolean_t shared)
3285 {
3286 mach_vm_size_t size = get_vm_size();
3287 int action = get_vmcopy_post_action();
3288
3289 /* Do the post vm_copy() action. */
3290 switch (action) {
3291 case VMCOPY_MODIFY_SRC:
3292 logv("Modifying: source%s...", shared ? " (shared with other region)" : "");
3293 write_region(src, 1);
3294 break;
3295
3296 case VMCOPY_MODIFY_DST:
3297 logv("Modifying: destination...");
3298 write_region(dst, 1);
3299 break;
3300
3301 case VMCOPY_MODIFY_SHARED_COPIED:
3302 /* If no shared_copied then no need to verify (nothing changed). */
3303 if (!shared_copied) {
3304 return;
3305 }
3306 logv("Modifying: shared/copied%s...", shared ? " (shared with source region)" : "");
3307 write_region(shared_copied, 1);
3308 break;
3309
3310 default:
3311 T_ASSERT_FAIL("Unknown post vm_copy() action (%d)", action);
3312 }
3313 logv("Modification was successful as expected.");
3314
3315 /* Verify all the regions with what is expected. */
3316 logv("Verifying: source... ");
3317 verify_region(src, (VMCOPY_MODIFY_SRC == action || (shared && VMCOPY_MODIFY_SHARED_COPIED == action)) ? 1 : 0);
3318 logv("destination... ");
3319 verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0);
3320 if (shared_copied) {
3321 logv("shared/copied... ");
3322 verify_region(shared_copied, (VMCOPY_MODIFY_SHARED_COPIED == action || (shared && VMCOPY_MODIFY_SRC == action)) ? 1 : 0);
3323 }
3324 logv("Verification was successful as expected.");
3325 }
3326
3327 /* Test source being a simple fresh region. */
3328 void
test_vmcopy_fresh_source()3329 test_vmcopy_fresh_source()
3330 {
3331 mach_vm_size_t size = get_vm_size();
3332 mach_vm_address_t src, dst;
3333
3334 if (get_vmcopy_post_action() == VMCOPY_MODIFY_SHARED_COPIED) {
3335 /* No shared/copied region to modify so just return. */
3336 logv("No shared/copied region as expected.");
3337 return;
3338 }
3339
3340 assert_allocate_success(&src, size, TRUE);
3341
3342 assert_share_mode(src, SM_EMPTY, "SM_EMPTY");
3343
3344 write_region(src, 0);
3345
3346 assert_allocate_success(&dst, size, TRUE);
3347
3348 assert_vmcopy_success(src, dst, "freshly allocated");
3349
3350 modify_one_and_verify_all_regions(src, dst, 0, FALSE);
3351
3352 assert_deallocate_success(src, size);
3353 assert_deallocate_success(dst, size);
3354 }
3355
3356 /* Test source copied from a shared region. */
3357 void
test_vmcopy_shared_source()3358 test_vmcopy_shared_source()
3359 {
3360 mach_vm_size_t size = get_vm_size();
3361 mach_vm_address_t src, dst, shared;
3362 int action = get_vmcopy_post_action();
3363 int pid, status;
3364
3365 assert_allocate_success(&src, size, TRUE);
3366
3367 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_inherit(mach_task_self(), src, size, VM_INHERIT_SHARE), "mach_vm_inherit()");
3368
3369 write_region(src, 0);
3370
3371 pid = fork();
3372 if (pid == 0) {
3373 /* Verify that the child's 'src' is shared with the
3374 * parent's src */
3375 assert_share_mode(src, SM_SHARED, "SM_SHARED");
3376 assert_allocate_success(&dst, size, TRUE);
3377 assert_vmcopy_success(src, dst, "shared");
3378 if (VMCOPY_MODIFY_SHARED_COPIED == action) {
3379 logv("Modifying: shared...");
3380 write_region(src, 1);
3381 logv("Modification was successsful as expected.");
3382 logv("Verifying: source... ");
3383 verify_region(src, 1);
3384 logv("destination...");
3385 verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0);
3386 logv("Verification was successful as expected.");
3387 } else {
3388 modify_one_and_verify_all_regions(src, dst, 0, TRUE);
3389 }
3390 assert_deallocate_success(dst, size);
3391 exit(0);
3392 } else if (pid > 0) {
3393 /* In the parent the src becomes the shared */
3394 shared = src;
3395 wait(&status);
3396 if (WEXITSTATUS(status) != 0) {
3397 exit(status);
3398 }
3399 /* verify shared (shared with child's src) */
3400 logv("Verifying: shared...");
3401 verify_region(shared, (VMCOPY_MODIFY_SHARED_COPIED == action || VMCOPY_MODIFY_SRC == action) ? 1 : 0);
3402 logv("Verification was successful as expected.");
3403 } else {
3404 T_WITH_ERRNO; T_ASSERT_FAIL("fork failed");
3405 }
3406
3407 assert_deallocate_success(src, size);
3408 }
3409
3410 /* Test source copied from another mapping. */
3411 void
test_vmcopy_copied_from_source()3412 test_vmcopy_copied_from_source()
3413 {
3414 mach_vm_size_t size = get_vm_size();
3415 mach_vm_address_t src, dst, copied;
3416
3417 assert_allocate_success(&copied, size, TRUE);
3418 write_region(copied, 0);
3419
3420 assert_allocate_success(&src, size, TRUE);
3421
3422 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), copied, size, src), "mach_vm_copy()");
3423
3424 assert_share_mode(src, SM_COW, "SM_COW");
3425
3426 assert_allocate_success(&dst, size, TRUE);
3427
3428 assert_vmcopy_success(src, dst, "copied from");
3429
3430 modify_one_and_verify_all_regions(src, dst, copied, FALSE);
3431
3432 assert_deallocate_success(src, size);
3433 assert_deallocate_success(dst, size);
3434 assert_deallocate_success(copied, size);
3435 }
3436
3437 /* Test source copied to another mapping. */
3438 void
test_vmcopy_copied_to_source()3439 test_vmcopy_copied_to_source()
3440 {
3441 mach_vm_size_t size = get_vm_size();
3442 mach_vm_address_t src, dst, copied;
3443
3444 assert_allocate_success(&src, size, TRUE);
3445 write_region(src, 0);
3446
3447 assert_allocate_success(&copied, size, TRUE);
3448
3449 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), src, size, copied), "mach_vm_copy()");
3450
3451 assert_share_mode(src, SM_COW, "SM_COW");
3452
3453 assert_allocate_success(&dst, size, TRUE);
3454
3455 assert_vmcopy_success(src, dst, "copied to");
3456
3457 modify_one_and_verify_all_regions(src, dst, copied, FALSE);
3458
3459 assert_deallocate_success(src, size);
3460 assert_deallocate_success(dst, size);
3461 assert_deallocate_success(copied, size);
3462 }
3463
3464 /* Test a truedshared source copied. */
3465 void
test_vmcopy_trueshared_source()3466 test_vmcopy_trueshared_source()
3467 {
3468 mach_vm_size_t size = get_vm_size();
3469 mach_vm_address_t src = 0x0, dst, shared;
3470 vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE);
3471 vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE);
3472 mem_entry_name_port_t mem_obj;
3473
3474 assert_allocate_success(&shared, size, TRUE);
3475 write_region(shared, 0);
3476
3477 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)shared, cur_protect, &mem_obj,
3478 (mem_entry_name_port_t)NULL),
3479 "mach_make_memory_entry_64()");
3480 T_QUIET; T_ASSERT_MACH_SUCCESS(
3481 mach_vm_map(mach_task_self(), &src, size, 0, TRUE, mem_obj, 0, FALSE, cur_protect, max_protect, VM_INHERIT_NONE),
3482 "mach_vm_map()");
3483
3484 assert_share_mode(src, SM_TRUESHARED, "SM_TRUESHARED");
3485
3486 assert_allocate_success(&dst, size, TRUE);
3487
3488 assert_vmcopy_success(src, dst, "true shared");
3489
3490 modify_one_and_verify_all_regions(src, dst, shared, TRUE);
3491
3492 assert_deallocate_success(src, size);
3493 assert_deallocate_success(dst, size);
3494 assert_deallocate_success(shared, size);
3495 }
3496
3497 /* Test a private aliazed source copied. */
3498 void
test_vmcopy_private_aliased_source()3499 test_vmcopy_private_aliased_source()
3500 {
3501 mach_vm_size_t size = get_vm_size();
3502 mach_vm_address_t src = 0x0, dst, shared;
3503 vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE);
3504 vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE);
3505
3506 assert_allocate_success(&shared, size, TRUE);
3507 write_region(shared, 0);
3508
3509 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_remap(mach_task_self(), &src, size, 0, TRUE, mach_task_self(), shared, FALSE, &cur_protect,
3510 &max_protect, VM_INHERIT_NONE),
3511 "mach_vm_remap()");
3512
3513 assert_share_mode(src, SM_PRIVATE_ALIASED, "SM_PRIVATE_ALIASED");
3514
3515 assert_allocate_success(&dst, size, TRUE);
3516
3517 assert_vmcopy_success(src, dst, "true shared");
3518
3519 modify_one_and_verify_all_regions(src, dst, shared, TRUE);
3520
3521 assert_deallocate_success(src, size);
3522 assert_deallocate_success(dst, size);
3523 assert_deallocate_success(shared, size);
3524 }
3525
3526 /*************/
3527 /* VM Suites */
3528 /*************/
3529
3530 void
run_allocate_test_suites()3531 run_allocate_test_suites()
3532 {
3533 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
3534 * error finding xnu major version number. */
3535 /* unsigned int xnu_version = xnu_major_version(); */
3536
3537 UnitTests allocate_main_tests = {
3538 {"Allocated address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size},
3539 {"Allocated address is page-aligned", test_aligned_address},
3540 {"Allocated memory is zero-filled", test_zero_filled},
3541 {"Write and verify address-filled pattern", test_write_address_filled},
3542 {"Write and verify checkerboard pattern", test_write_checkerboard},
3543 {"Write and verify reverse checkerboard pattern", test_write_reverse_checkerboard},
3544 {"Write and verify page ends pattern", test_write_page_ends},
3545 {"Write and verify page interiors pattern", test_write_page_interiors},
3546 {"Reallocate allocated pages", test_reallocate_pages},
3547 };
3548 UnitTests allocate_address_error_tests = {
3549 {"Allocate at address zero", test_allocate_at_zero},
3550 {"Allocate at a 2 MB boundary-unaligned, page-aligned "
3551 "address",
3552 test_allocate_2MB_boundary_unaligned_page_aligned_address},
3553 };
3554 UnitTests allocate_argument_error_tests = {
3555 {"Allocate in NULL VM map", test_allocate_in_null_map}, {"Allocate with kernel flags", test_allocate_with_kernel_flags},
3556 };
3557 UnitTests allocate_fixed_size_tests = {
3558 {"Allocate zero size", test_allocate_zero_size},
3559 {"Allocate overflowing size", test_allocate_overflowing_size},
3560 {"Allocate a page with highest address hint", test_allocate_page_with_highest_address_hint},
3561 {"Allocate two pages and verify first fit strategy", test_allocate_first_fit_pages},
3562 };
3563 UnitTests allocate_invalid_large_size_test = {
3564 {"Allocate invalid large size", test_allocate_invalid_large_size},
3565 };
3566 UnitTests mach_vm_map_protection_inheritance_error_test = {
3567 {"mach_vm_map() with invalid protection/inheritance "
3568 "arguments",
3569 test_mach_vm_map_protection_inheritance_error},
3570 };
3571 UnitTests mach_vm_map_large_mask_overflow_error_test = {
3572 {"mach_vm_map() with large address mask", test_mach_vm_map_large_mask_overflow_error},
3573 };
3574
3575 /* Run the test suites with various allocators and VM sizes, and
3576 * unspecified or fixed (page-aligned or page-unaligned),
3577 * addresses. */
3578 for (allocators_idx = 0; allocators_idx < numofallocators; allocators_idx++) {
3579 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3580 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3581 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3582 /* An allocated address will be page-aligned. */
3583 /* Only run the zero size mach_vm_map() error tests in the
3584 * unspecified address case, since we won't be able to retrieve a
3585 * fixed address for allocation. See 8003930. */
3586 if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED) ||
3587 (allocators_idx != MACH_VM_ALLOCATE && sizes_idx == ZERO_BYTES && flags_idx == FIXED)) {
3588 continue;
3589 }
3590 run_suite(set_up_allocator_and_vm_variables, allocate_argument_error_tests, do_nothing,
3591 "%s argument error tests, %s%s address, "
3592 "%s size: 0x%jx (%ju)",
3593 allocators[allocators_idx].description, address_flags[flags_idx].description,
3594 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3595 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3596 (uintmax_t)vm_sizes[sizes_idx].size);
3597 /* mach_vm_map() only protection and inheritance error
3598 * tests. */
3599 if (allocators_idx != MACH_VM_ALLOCATE) {
3600 run_suite(set_up_allocator_and_vm_variables, mach_vm_map_protection_inheritance_error_test, do_nothing,
3601 "%s protection and inheritance "
3602 "error test, %s%s address, %s size: 0x%jx "
3603 "(%ju)",
3604 allocators[allocators_idx].description, address_flags[flags_idx].description,
3605 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3606 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3607 (uintmax_t)vm_sizes[sizes_idx].size);
3608 }
3609 /* mach_vm_map() cannot allocate 0 bytes, see 8003930. */
3610 if (allocators_idx == MACH_VM_ALLOCATE || sizes_idx != ZERO_BYTES) {
3611 run_suite(set_up_allocator_and_vm_variables_and_allocate, allocate_main_tests, deallocate,
3612 "%s main "
3613 "allocation tests, %s%s address, %s size: 0x%jx "
3614 "(%ju)",
3615 allocators[allocators_idx].description, address_flags[flags_idx].description,
3616 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3617 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3618 (uintmax_t)vm_sizes[sizes_idx].size);
3619 }
3620 }
3621 }
3622 run_suite(set_up_allocator_and_vm_size, allocate_address_error_tests, do_nothing,
3623 "%s address "
3624 "error allocation tests, %s size: 0x%jx (%ju)",
3625 allocators[allocators_idx].description, vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3626 (uintmax_t)vm_sizes[sizes_idx].size);
3627 }
3628 run_suite(set_up_allocator, allocate_fixed_size_tests, do_nothing, "%s fixed size allocation tests",
3629 allocators[allocators_idx].description);
3630 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
3631 * error finding xnu major version number. */
3632 /* mach_vm_map() with a named entry triggers a panic with this test
3633 * unless under xnu-1598 or later, see 8048580. */
3634 /* if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY
3635 || xnu_version >= 1598) { */
3636 if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY) {
3637 run_suite(set_up_allocator, allocate_invalid_large_size_test, do_nothing, "%s invalid large size allocation test",
3638 allocators[allocators_idx].description);
3639 }
3640 }
3641 /* mach_vm_map() only large mask overflow tests. */
3642 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3643 run_suite(set_up_vm_size, mach_vm_map_large_mask_overflow_error_test, do_nothing,
3644 "mach_vm_map() large mask overflow "
3645 "error test, size: 0x%jx (%ju)",
3646 (uintmax_t)vm_sizes[sizes_idx].size, (uintmax_t)vm_sizes[sizes_idx].size);
3647 }
3648 }
3649
3650 void
run_deallocate_test_suites()3651 run_deallocate_test_suites()
3652 {
3653 UnitTests access_deallocated_memory_tests = {
3654 {"Read start of deallocated range", test_access_deallocated_range_start},
3655 {"Read middle of deallocated range", test_access_deallocated_range_middle},
3656 {"Read end of deallocated range", test_access_deallocated_range_end},
3657 };
3658 UnitTests deallocate_reallocate_tests = {
3659 {"Deallocate twice", test_deallocate_twice},
3660 {"Write pattern, deallocate, reallocate (deallocated "
3661 "memory is inaccessible), and verify memory is "
3662 "zero-filled",
3663 test_write_pattern_deallocate_reallocate_zero_filled},
3664 };
3665 UnitTests deallocate_null_map_test = {
3666 {"Deallocate in NULL VM map", test_deallocate_in_null_map},
3667 };
3668 UnitTests deallocate_edge_case_tests = {
3669 {"Deallocate zero size ranges", test_deallocate_zero_size_ranges},
3670 {"Deallocate memory ranges whose end rounds to 0x0", test_deallocate_rounded_zero_end_ranges},
3671 {"Deallocate wrapped around memory ranges", test_deallocate_wrapped_around_ranges},
3672 };
3673 UnitTests deallocate_suicide_test = {
3674 {"Deallocate whole address space", test_deallocate_suicide},
3675 };
3676
3677 /* All allocations done with mach_vm_allocate(). */
3678 set_allocator(wrapper_mach_vm_allocate);
3679
3680 /* Run the test suites with various VM sizes, and unspecified or
3681 * fixed (page-aligned or page-unaligned), addresses. */
3682 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3683 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3684 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3685 /* An allocated address will be page-aligned. */
3686 if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3687 continue;
3688 }
3689 /* Accessing deallocated memory should cause a segmentation
3690 * fault. */
3691 /* Nothing gets deallocated if size is zero. */
3692 if (sizes_idx != ZERO_BYTES) {
3693 set_expected_signal(SIGSEGV);
3694 run_suite(set_up_vm_variables_and_allocate, access_deallocated_memory_tests, do_nothing,
3695 "Deallocated memory access tests, "
3696 "%s%s address, %s size: 0x%jx (%ju)",
3697 address_flags[flags_idx].description,
3698 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3699 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3700 (uintmax_t)vm_sizes[sizes_idx].size);
3701 set_expected_signal(0);
3702 }
3703 run_suite(set_up_vm_variables_and_allocate, deallocate_reallocate_tests, do_nothing,
3704 "Deallocation and reallocation tests, %s%s "
3705 "address, %s size: 0x%jx (%ju)",
3706 address_flags[flags_idx].description,
3707 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3708 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3709 (uintmax_t)vm_sizes[sizes_idx].size);
3710 run_suite(set_up_vm_variables, deallocate_null_map_test, do_nothing,
3711 "mach_vm_deallocate() null map test, "
3712 "%s%s address, %s size: 0x%jx (%ju)",
3713 address_flags[flags_idx].description,
3714 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3715 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3716 (uintmax_t)vm_sizes[sizes_idx].size);
3717 }
3718 }
3719 }
3720 run_suite(do_nothing, deallocate_edge_case_tests, do_nothing, "Edge case deallocation tests");
3721
3722 set_expected_signal(-1); /* SIGSEGV or SIGBUS */
3723 run_suite(do_nothing, deallocate_suicide_test, do_nothing, "Whole address space deallocation test");
3724 set_expected_signal(0);
3725 }
3726
3727 void
run_read_test_suites()3728 run_read_test_suites()
3729 {
3730 UnitTests read_main_tests = {
3731 {"Read address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size},
3732 {"Read address has the correct boundary offset", test_read_address_offset},
3733 {"Reallocate read pages", test_reallocate_pages},
3734 {"Read and verify zero-filled memory", test_zero_filled},
3735 };
3736 UnitTests read_pattern_tests = {
3737 {"Read address-filled pattern", test_read_address_filled},
3738 {"Read checkerboard pattern", test_read_checkerboard},
3739 {"Read reverse checkerboard pattern", test_read_reverse_checkerboard},
3740 };
3741 UnitTests read_null_map_test = {
3742 {"Read from NULL VM map", test_read_null_map},
3743 };
3744 UnitTests read_edge_case_tests = {
3745 {"Read zero size", test_read_zero_size},
3746 {"Read invalid large size", test_read_invalid_large_size},
3747 {"Read wrapped around memory ranges", test_read_wrapped_around_ranges},
3748 };
3749 UnitTests read_inaccessible_tests = {
3750 {"Read partially decallocated memory", test_read_partially_deallocated_range},
3751 {"Read partially read-protected memory", test_read_partially_unreadable_range},
3752 };
3753
3754 /* All allocations done with mach_vm_allocate(). */
3755 set_allocator(wrapper_mach_vm_allocate);
3756
3757 /* Run the test suites with various VM sizes, and unspecified or
3758 * fixed (page-aligned or page-unaligned) addresses. */
3759 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3760 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3761 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3762 /* An allocated address will be page-aligned. */
3763 if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3764 continue;
3765 }
3766 run_suite(set_up_vm_variables_allocate_read_deallocate, read_main_tests, deallocate,
3767 "mach_vm_read() "
3768 "main tests, %s%s address, %s size: 0x%jx (%ju)",
3769 address_flags[flags_idx].description,
3770 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3771 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3772 (uintmax_t)vm_sizes[sizes_idx].size);
3773 run_suite(set_up_vm_variables_and_allocate_extra_page, read_pattern_tests, deallocate,
3774 "mach_vm_read() pattern tests, %s%s address, %s "
3775 "size: 0x%jx (%ju)",
3776 address_flags[flags_idx].description,
3777 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3778 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3779 (uintmax_t)vm_sizes[sizes_idx].size);
3780 run_suite(set_up_vm_variables_and_allocate_extra_page, read_null_map_test, deallocate_extra_page,
3781 "mach_vm_read() null map test, "
3782 "%s%s address, %s size: 0x%jx (%ju)",
3783 address_flags[flags_idx].description,
3784 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3785 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3786 (uintmax_t)vm_sizes[sizes_idx].size);
3787 /* A zero size range is always accessible. */
3788 if (sizes_idx != ZERO_BYTES) {
3789 run_suite(set_up_vm_variables_and_allocate_extra_page, read_inaccessible_tests, deallocate_extra_page,
3790 "mach_vm_read() inaccessibility tests, %s%s "
3791 "address, %s size: 0x%jx (%ju)",
3792 address_flags[flags_idx].description,
3793 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3794 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3795 (uintmax_t)vm_sizes[sizes_idx].size);
3796 }
3797 }
3798 }
3799 }
3800 run_suite(do_nothing, read_edge_case_tests, do_nothing, "mach_vm_read() fixed size tests");
3801 }
3802
3803 void
run_write_test_suites()3804 run_write_test_suites()
3805 {
3806 UnitTests write_main_tests = {
3807 {"Write and verify zero-filled memory", test_zero_filled_write},
3808 };
3809 UnitTests write_pattern_tests = {
3810 {"Write address-filled pattern", test_address_filled_write},
3811 {"Write checkerboard pattern", test_checkerboard_write},
3812 {"Write reverse checkerboard pattern", test_reverse_checkerboard_write},
3813 };
3814 UnitTests write_edge_case_tests = {
3815 {"Write into NULL VM map", test_write_null_map}, {"Write zero size", test_write_zero_size},
3816 };
3817 UnitTests write_inaccessible_tests = {
3818 {"Write partially decallocated buffer", test_write_partially_deallocated_buffer},
3819 {"Write partially read-protected buffer", test_write_partially_unreadable_buffer},
3820 {"Write on partially deallocated range", test_write_on_partially_deallocated_range},
3821 {"Write on partially write-protected range", test_write_on_partially_unwritable_range},
3822 };
3823
3824 /* All allocations done with mach_vm_allocate(). */
3825 set_allocator(wrapper_mach_vm_allocate);
3826
3827 /* Run the test suites with various destination sizes and
3828 * unspecified or fixed (page-aligned or page-unaligned)
3829 * addresses, and various buffer sizes and boundary offsets. */
3830 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3831 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3832 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3833 for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) {
3834 for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) {
3835 /* An allocated address will be page-aligned. */
3836 if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) {
3837 continue;
3838 }
3839 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_edge_case_tests,
3840 deallocate_vm_and_buffer,
3841 "mach_vm_write() edge case tests, %s%s address, %s "
3842 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3843 "buffer boundary offset: %d",
3844 address_flags[flags_idx].description,
3845 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3846 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3847 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3848 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3849 buffer_offsets[offsets_idx].offset);
3850 /* A zero size buffer is always accessible. */
3851 if (buffer_sizes_idx != ZERO_BYTES) {
3852 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_inaccessible_tests,
3853 deallocate_vm_and_buffer,
3854 "mach_vm_write() inaccessibility tests, "
3855 "%s%s address, %s size: 0x%jx (%ju), buffer "
3856 "%s size: 0x%jx (%ju), buffer boundary "
3857 "offset: %d",
3858 address_flags[flags_idx].description,
3859 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3860 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3861 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3862 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3863 buffer_offsets[offsets_idx].offset);
3864 }
3865 /* The buffer cannot be larger than the destination. */
3866 if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) {
3867 continue;
3868 }
3869 run_suite(set_up_vm_and_buffer_variables_allocate_write, write_main_tests, deallocate_vm_and_buffer,
3870 "mach_vm_write() main tests, %s%s address, %s "
3871 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3872 "buffer boundary offset: %d",
3873 address_flags[flags_idx].description,
3874 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3875 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3876 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3877 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3878 buffer_offsets[offsets_idx].offset);
3879 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_pattern_tests,
3880 deallocate_vm_and_buffer,
3881 "mach_vm_write() pattern tests, %s%s address, %s "
3882 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3883 "buffer boundary offset: %d",
3884 address_flags[flags_idx].description,
3885 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3886 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3887 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3888 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3889 buffer_offsets[offsets_idx].offset);
3890 }
3891 }
3892 }
3893 }
3894 }
3895 }
3896
3897 void
run_protect_test_suites()3898 run_protect_test_suites()
3899 {
3900 UnitTests readprotection_main_tests = {
3901 {"Read-protect, read-allow and verify zero-filled memory", test_zero_filled_readprotect},
3902 {"Verify that region is read-protected iff size is "
3903 "nonzero",
3904 test_verify_readprotection},
3905 };
3906 UnitTests access_readprotected_memory_tests = {
3907 {"Read start of read-protected range", test_access_readprotected_range_start},
3908 {"Read middle of read-protected range", test_access_readprotected_range_middle},
3909 {"Read end of read-protected range", test_access_readprotected_range_end},
3910 };
3911 UnitTests writeprotection_main_tests = {
3912 {"Write-protect and verify zero-filled memory", test_zero_filled_extended},
3913 {"Verify that region is write-protected iff size is "
3914 "nonzero",
3915 test_verify_writeprotection},
3916 };
3917 UnitTests write_writeprotected_memory_tests = {
3918 {"Write at start of write-protected range", test_write_writeprotected_range_start},
3919 {"Write in middle of write-protected range", test_write_writeprotected_range_middle},
3920 {"Write at end of write-protected range", test_write_writeprotected_range_end},
3921 };
3922 UnitTests protect_edge_case_tests = {
3923 {"Read-protect zero size ranges", test_readprotect_zero_size},
3924 {"Write-protect zero size ranges", test_writeprotect_zero_size},
3925 {"Read-protect wrapped around memory ranges", test_readprotect_wrapped_around_ranges},
3926 {"Write-protect wrapped around memory ranges", test_writeprotect_wrapped_around_ranges},
3927 };
3928
3929 /* All allocations done with mach_vm_allocate(). */
3930 set_allocator(wrapper_mach_vm_allocate);
3931
3932 /* Run the test suites with various VM sizes, and unspecified or
3933 * fixed (page-aligned or page-unaligned), addresses. */
3934 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3935 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3936 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3937 /* An allocated address will be page-aligned. */
3938 if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3939 continue;
3940 }
3941 run_suite(set_up_vm_variables_allocate_readprotect, readprotection_main_tests, deallocate_extra_page,
3942 "Main read-protection tests, %s%s address, %s "
3943 "size: 0x%jx (%ju)",
3944 address_flags[flags_idx].description,
3945 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3946 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3947 (uintmax_t)vm_sizes[sizes_idx].size);
3948 run_suite(set_up_vm_variables_allocate_writeprotect, writeprotection_main_tests, deallocate_extra_page,
3949 "Main write-protection tests, %s%s address, %s "
3950 "size: 0x%jx (%ju)",
3951 address_flags[flags_idx].description,
3952 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3953 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3954 (uintmax_t)vm_sizes[sizes_idx].size);
3955 /* Nothing gets protected if size is zero. */
3956 if (sizes_idx != ZERO_BYTES) {
3957 set_expected_signal(SIGBUS);
3958 /* Accessing read-protected memory should cause a bus
3959 * error. */
3960 run_suite(set_up_vm_variables_allocate_readprotect, access_readprotected_memory_tests, deallocate_extra_page,
3961 "Read-protected memory access tests, %s%s "
3962 "address, %s size: 0x%jx (%ju)",
3963 address_flags[flags_idx].description,
3964 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3965 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3966 (uintmax_t)vm_sizes[sizes_idx].size);
3967 /* Writing on write-protected memory should cause a bus
3968 * error. */
3969 run_suite(set_up_vm_variables_allocate_writeprotect, write_writeprotected_memory_tests, deallocate_extra_page,
3970 "Write-protected memory writing tests, %s%s "
3971 "address, %s size: 0x%jx (%ju)",
3972 address_flags[flags_idx].description,
3973 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3974 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3975 (uintmax_t)vm_sizes[sizes_idx].size);
3976 set_expected_signal(0);
3977 }
3978 }
3979 }
3980 }
3981 run_suite(do_nothing, protect_edge_case_tests, do_nothing, "Edge case protection tests");
3982 }
3983
3984 void
run_copy_test_suites()3985 run_copy_test_suites()
3986 {
3987 /* Copy tests */
3988 UnitTests copy_main_tests = {
3989 {"Copy and verify zero-filled memory", test_zero_filled_copy_dest},
3990 };
3991 UnitTests copy_pattern_tests = {
3992 {"Copy address-filled pattern", test_copy_address_filled},
3993 {"Copy checkerboard pattern", test_copy_checkerboard},
3994 {"Copy reverse checkerboard pattern", test_copy_reverse_checkerboard},
3995 };
3996 UnitTests copy_edge_case_tests = {
3997 {"Copy with NULL VM map", test_copy_null_map},
3998 {"Copy zero size", test_copy_zero_size},
3999 {"Copy invalid large size", test_copy_invalid_large_size},
4000 {"Read wrapped around memory ranges", test_copy_wrapped_around_ranges},
4001 };
4002 UnitTests copy_inaccessible_tests = {
4003 {"Copy source partially decallocated region", test_copy_source_partially_deallocated_region},
4004 /* XXX */
4005 {"Copy destination partially decallocated region", test_copy_dest_partially_deallocated_region},
4006 {"Copy source partially read-protected region", test_copy_source_partially_unreadable_region},
4007 /* XXX */
4008 {"Copy destination partially write-protected region", test_copy_dest_partially_unwriteable_region},
4009 {"Copy source on partially deallocated range", test_copy_source_on_partially_deallocated_range},
4010 {"Copy destination on partially deallocated range", test_copy_dest_on_partially_deallocated_range},
4011 {"Copy source on partially read-protected range", test_copy_source_on_partially_unreadable_range},
4012 {"Copy destination on partially write-protected range", test_copy_dest_on_partially_unwritable_range},
4013 };
4014
4015 UnitTests copy_shared_mode_tests = {
4016 {"Copy using freshly allocated source", test_vmcopy_fresh_source},
4017 {"Copy using shared source", test_vmcopy_shared_source},
4018 {"Copy using a \'copied from\' source", test_vmcopy_copied_from_source},
4019 {"Copy using a \'copied to\' source", test_vmcopy_copied_to_source},
4020 {"Copy using a true shared source", test_vmcopy_trueshared_source},
4021 {"Copy using a private aliased source", test_vmcopy_private_aliased_source},
4022 };
4023
4024 /* All allocations done with mach_vm_allocate(). */
4025 set_allocator(wrapper_mach_vm_allocate);
4026
4027 /* All the tests are done with page size regions. */
4028 set_vm_size(vm_page_size);
4029
4030 /* Run the test suites with various shared modes for source */
4031 for (vmcopy_action_idx = 0; vmcopy_action_idx < numofvmcopyactions; vmcopy_action_idx++) {
4032 run_suite(set_up_copy_shared_mode_variables, copy_shared_mode_tests, do_nothing, "Copy shared mode tests, %s",
4033 vmcopy_actions[vmcopy_action_idx].description);
4034 }
4035
4036 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
4037 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
4038 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
4039 for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) {
4040 for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) {
4041 /* An allocated address will be page-aligned. */
4042 if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) {
4043 continue;
4044 }
4045 run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_edge_case_tests,
4046 deallocate_vm_and_buffer,
4047 "mach_vm_copy() edge case tests, %s%s address, %s "
4048 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
4049 "buffer boundary offset: %d",
4050 address_flags[flags_idx].description,
4051 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4052 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4053 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4054 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4055 buffer_offsets[offsets_idx].offset);
4056 /* The buffer cannot be larger than the destination. */
4057 if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) {
4058 continue;
4059 }
4060
4061 /* A zero size buffer is always accessible. */
4062 if (buffer_sizes_idx != ZERO_BYTES) {
4063 run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_inaccessible_tests,
4064 deallocate_vm_and_buffer,
4065 "mach_vm_copy() inaccessibility tests, "
4066 "%s%s address, %s size: 0x%jx (%ju), buffer "
4067 "%s size: 0x%jx (%ju), buffer boundary "
4068 "offset: %d",
4069 address_flags[flags_idx].description,
4070 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4071 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4072 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4073 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4074 buffer_offsets[offsets_idx].offset);
4075 }
4076 run_suite(set_up_source_and_dest_variables_allocate_copy, copy_main_tests, deallocate_vm_and_buffer,
4077 "mach_vm_copy() main tests, %s%s address, %s "
4078 "size: 0x%jx (%ju), destination %s size: 0x%jx (%ju), "
4079 "destination boundary offset: %d",
4080 address_flags[flags_idx].description,
4081 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4082 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4083 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4084 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4085 buffer_offsets[offsets_idx].offset);
4086 run_suite(set_up_source_and_dest_variables_allocate_copy, copy_pattern_tests, deallocate_vm_and_buffer,
4087 "mach_vm_copy() pattern tests, %s%s address, %s "
4088 "size: 0x%jx (%ju) destination %s size: 0x%jx (%ju), "
4089 "destination boundary offset: %d",
4090 address_flags[flags_idx].description,
4091 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4092 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4093 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4094 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4095 buffer_offsets[offsets_idx].offset);
4096 }
4097 }
4098 }
4099 }
4100 }
4101 }
4102
4103 void
perform_test_with_options(test_option_t options)4104 perform_test_with_options(test_option_t options)
4105 {
4106 process_options(options);
4107
4108 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
4109 * error finding xnu major version number. */
4110 /* printf("xnu version is %s.\n\n", xnu_version_string()); */
4111
4112 if (flag_run_allocate_test) {
4113 run_allocate_test_suites();
4114 }
4115
4116 if (flag_run_deallocate_test) {
4117 run_deallocate_test_suites();
4118 }
4119
4120 if (flag_run_read_test) {
4121 run_read_test_suites();
4122 }
4123
4124 if (flag_run_write_test) {
4125 run_write_test_suites();
4126 }
4127
4128 if (flag_run_protect_test) {
4129 run_protect_test_suites();
4130 }
4131
4132 if (flag_run_copy_test) {
4133 run_copy_test_suites();
4134 }
4135
4136 log_aggregated_results();
4137 }
4138
4139 T_DECL(vm_test_allocate, "Allocate VM unit test")
4140 {
4141 test_options.to_flags = VM_TEST_ALLOCATE;
4142 test_options.to_vmsize = 0;
4143 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4144
4145 perform_test_with_options(test_options);
4146 }
4147
4148 T_DECL(vm_test_deallocate, "Deallocate VM unit test",
4149 T_META_IGNORECRASHES(".*vm_allocation.*"))
4150 {
4151 test_options.to_flags = VM_TEST_DEALLOCATE;
4152 test_options.to_vmsize = 0;
4153 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4154
4155 perform_test_with_options(test_options);
4156 }
4157
4158 T_DECL(vm_test_read, "Read VM unit test")
4159 {
4160 test_options.to_flags = VM_TEST_READ;
4161 test_options.to_vmsize = 0;
4162 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4163
4164 perform_test_with_options(test_options);
4165 }
4166
4167 T_DECL(vm_test_write, "Write VM unit test")
4168 {
4169 test_options.to_flags = VM_TEST_WRITE;
4170 test_options.to_vmsize = 0;
4171 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4172
4173 perform_test_with_options(test_options);
4174 }
4175
4176 T_DECL(vm_test_protect, "Protect VM unit test",
4177 T_META_IGNORECRASHES(".*vm_allocation.*"))
4178 {
4179 test_options.to_flags = VM_TEST_PROTECT;
4180 test_options.to_vmsize = 0;
4181 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4182
4183 perform_test_with_options(test_options);
4184 }
4185
4186 T_DECL(vm_test_copy, "Copy VM unit test")
4187 {
4188 test_options.to_flags = VM_TEST_COPY;
4189 test_options.to_vmsize = 0;
4190 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4191
4192 perform_test_with_options(test_options);
4193 }
4194