1 /* Mach virtual memory unit tests
2 *
3 * The main goal of this code is to facilitate the construction,
4 * running, result logging and clean up of a test suite, taking care
5 * of all the scaffolding. A test suite is a sequence of very targeted
6 * unit tests, each running as a separate process to isolate its
7 * address space.
8 * A unit test is abstracted as a unit_test_t structure, consisting of
9 * a test function and a logging identifier. A test suite is a suite_t
10 * structure, consisting of an unit_test_t array, fixture set up and
11 * tear down functions.
12 * Test suites are created dynamically. Each of its unit test runs in
13 * its own fork()d process, with the fixture set up and tear down
14 * running before and after each test. The parent process will log a
15 * pass result if the child exits normally, and a fail result in any
16 * other case (non-zero exit status, abnormal signal). The suite
17 * results are then aggregated and logged after the [SUMMARY] keyword,
18 * and finally the test suite is destroyed.
19 * The included test suites cover the Mach memory allocators,
20 * mach_vm_allocate() and mach_vm_map() with various options, and
21 * mach_vm_deallocate(), mach_vm_read(), mach_vm_write(),
22 * mach_vm_protect(), mach_vm_copy().
23 *
24 * Author: Renaud Dreyer ([email protected])
25 *
26 * Transformed to libdarwintest by Tristan Ye ([email protected]) */
27
28 #include <darwintest.h>
29
30 #include <stdlib.h>
31 #include <ctype.h>
32 #include <inttypes.h>
33 #include <stdio.h>
34 #include <math.h>
35 #include <errno.h>
36 #include <signal.h>
37 #include <getopt.h>
38 #include <mach/mach.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_vm.h>
41 #include <sys/sysctl.h>
42 #include <time.h>
43
44 T_GLOBAL_META(
45 T_META_NAMESPACE("xnu.vm"),
46 T_META_RADAR_COMPONENT_NAME("xnu"),
47 T_META_RADAR_COMPONENT_VERSION("VM"));
48
49 /**************************/
50 /**************************/
51 /* Unit Testing Framework */
52 /**************************/
53 /**************************/
54
55 /*********************/
56 /* Private interface */
57 /*********************/
58
59 static const char frameworkname[] = "vm_unitester";
60
61 /* Type for test, fixture set up and fixture tear down functions. */
62 typedef void (*test_fn_t)();
63
64 /* Unit test structure. */
65 typedef struct {
66 const char * name;
67 test_fn_t test;
68 } unit_test_t;
69
70 /* Test suite structure. */
71 typedef struct {
72 const char * name;
73 int numoftests;
74 test_fn_t set_up;
75 unit_test_t * tests;
76 test_fn_t tear_down;
77 } suite_t;
78
79 int _quietness = 0;
80 int _expected_signal = 0;
81
82 struct {
83 uintmax_t numoftests;
84 uintmax_t passed_tests;
85 } results = {0, 0};
86
87 #define logr(format, ...) \
88 do { \
89 if (_quietness <= 1) { \
90 T_LOG(format, ## __VA_ARGS__); \
91 } \
92 } while (0)
93
94 #define logv(format, ...) \
95 do { \
96 if (_quietness == 0) { \
97 T_LOG(format, ## __VA_ARGS__); \
98 } \
99 } while (0)
100
101 static suite_t *
create_suite(const char * name,int numoftests,test_fn_t set_up,unit_test_t * tests,test_fn_t tear_down)102 create_suite(const char * name, int numoftests, test_fn_t set_up, unit_test_t * tests, test_fn_t tear_down)
103 {
104 suite_t * suite = (suite_t *)malloc(sizeof(suite_t));
105 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(suite, "malloc()");
106
107 suite->name = name;
108 suite->numoftests = numoftests;
109 suite->set_up = set_up;
110 suite->tests = tests;
111 suite->tear_down = tear_down;
112 return suite;
113 }
114
115 static void
destroy_suite(suite_t * suite)116 destroy_suite(suite_t * suite)
117 {
118 free(suite);
119 }
120
121 static void
log_suite_info(suite_t * suite)122 log_suite_info(suite_t * suite)
123 {
124 logr("[TEST] %s", suite->name);
125 logr("Number of tests: %d\n", suite->numoftests);
126 }
127
128 static void
log_suite_results(suite_t * suite,int passed_tests)129 log_suite_results(suite_t * suite, int passed_tests)
130 {
131 results.numoftests += (uintmax_t)suite->numoftests;
132 results.passed_tests += (uintmax_t)passed_tests;
133 }
134
135 static void
log_test_info(unit_test_t * unit_test,unsigned test_num)136 log_test_info(unit_test_t * unit_test, unsigned test_num)
137 {
138 logr("[BEGIN] #%04d: %s", test_num, unit_test->name);
139 }
140
141 static void
log_test_result(unit_test_t * unit_test,boolean_t test_passed,unsigned test_num)142 log_test_result(unit_test_t * unit_test, boolean_t test_passed, unsigned test_num)
143 {
144 logr("[%s] #%04d: %s\n", test_passed ? "PASS" : "FAIL", test_num, unit_test->name);
145 }
146
147 /* Run a test with fixture set up and teardown, while enforcing the
148 * time out constraint. */
149 static void
run_test(suite_t * suite,unit_test_t * unit_test,unsigned test_num)150 run_test(suite_t * suite, unit_test_t * unit_test, unsigned test_num)
151 {
152 log_test_info(unit_test, test_num);
153
154 suite->set_up();
155 unit_test->test();
156 suite->tear_down();
157 }
158
159 /* Check a child return status. */
160 static boolean_t
child_terminated_normally(int child_status)161 child_terminated_normally(int child_status)
162 {
163 boolean_t normal_exit = FALSE;
164
165 if (WIFEXITED(child_status)) {
166 int exit_status = WEXITSTATUS(child_status);
167 if (exit_status) {
168 T_LOG("Child process unexpectedly exited with code %d.",
169 exit_status);
170 } else if (!_expected_signal) {
171 normal_exit = TRUE;
172 }
173 } else if (WIFSIGNALED(child_status)) {
174 int signal = WTERMSIG(child_status);
175 if (signal == _expected_signal ||
176 (_expected_signal == -1 && (signal == SIGBUS || signal == SIGSEGV))) {
177 if (_quietness <= 0) {
178 T_LOG("Child process died with expected signal "
179 "%d.", signal);
180 }
181 normal_exit = TRUE;
182 } else {
183 T_LOG("Child process unexpectedly died with signal %d.",
184 signal);
185 }
186 } else {
187 T_LOG("Child process unexpectedly did not exit nor die");
188 }
189
190 return normal_exit;
191 }
192
193 /* Run a test in its own process, and report the result. */
194 static boolean_t
child_test_passed(suite_t * suite,unit_test_t * unit_test)195 child_test_passed(suite_t * suite, unit_test_t * unit_test)
196 {
197 int test_status;
198 static unsigned test_num = 0;
199
200 test_num++;
201
202 pid_t test_pid = fork();
203 T_QUIET; T_ASSERT_POSIX_SUCCESS(test_pid, "fork()");
204 if (!test_pid) {
205 run_test(suite, unit_test, test_num);
206 exit(0);
207 }
208 while (waitpid(test_pid, &test_status, 0) != test_pid) {
209 continue;
210 }
211 boolean_t test_result = child_terminated_normally(test_status);
212 log_test_result(unit_test, test_result, test_num);
213 return test_result;
214 }
215
216 /* Run each test in a suite, and report the results. */
217 static int
count_passed_suite_tests(suite_t * suite)218 count_passed_suite_tests(suite_t * suite)
219 {
220 int passed_tests = 0;
221 int i;
222
223 for (i = 0; i < suite->numoftests; i++) {
224 passed_tests += child_test_passed(suite, &(suite->tests[i]));
225 }
226 return passed_tests;
227 }
228
229 /********************/
230 /* Public interface */
231 /********************/
232
233 #define DEFAULT_QUIETNESS 0 /* verbose */
234 #define RESULT_ERR_QUIETNESS 1 /* result and error */
235 #define ERROR_ONLY_QUIETNESS 2 /* error only */
236
237 #define run_suite(set_up, tests, tear_down, ...) \
238 _run_suite((sizeof(tests) / sizeof(tests[0])), (set_up), (tests), (tear_down), __VA_ARGS__)
239
240 typedef unit_test_t UnitTests[];
241
242 void _run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...)
243 __printflike(5, 6);
244
245 void
_run_suite(int numoftests,test_fn_t set_up,UnitTests tests,test_fn_t tear_down,const char * format,...)246 _run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...)
247 {
248 va_list ap;
249 char * name;
250
251 va_start(ap, format);
252 T_QUIET; T_ASSERT_POSIX_SUCCESS(vasprintf(&name, format, ap), "vasprintf()");
253 va_end(ap);
254 suite_t * suite = create_suite(name, numoftests, set_up, tests, tear_down);
255 log_suite_info(suite);
256 log_suite_results(suite, count_passed_suite_tests(suite));
257 free(name);
258 destroy_suite(suite);
259 }
260
261 /* Setters and getters for various test framework global
262 * variables. Should only be used outside of the test, set up and tear
263 * down functions. */
264
265 /* Expected signal for a test, default is 0. */
266 void
set_expected_signal(int signal)267 set_expected_signal(int signal)
268 {
269 _expected_signal = signal;
270 }
271
272 int
get_expected_signal()273 get_expected_signal()
274 {
275 return _expected_signal;
276 }
277
278 /* Logging verbosity. */
279 void
set_quietness(int value)280 set_quietness(int value)
281 {
282 _quietness = value;
283 }
284
285 int
get_quietness()286 get_quietness()
287 {
288 return _quietness;
289 }
290
291 /* For fixture set up and tear down functions, and units tests. */
292 void
do_nothing()293 do_nothing()
294 {
295 }
296
297 void
log_aggregated_results()298 log_aggregated_results()
299 {
300 T_LOG("[SUMMARY] Aggregated Test Results\n");
301 T_LOG("Total: %ju", results.numoftests);
302 T_LOG("Passed: %ju", results.passed_tests);
303 T_LOG("Failed: %ju\n", results.numoftests - results.passed_tests);
304
305 T_QUIET; T_ASSERT_EQ(results.passed_tests, results.numoftests,
306 "%d passed of total %d tests",
307 results.passed_tests, results.numoftests);
308 }
309
310 /*******************************/
311 /*******************************/
312 /* Virtual memory unit testing */
313 /*******************************/
314 /*******************************/
315
316 /* Test exit values:
317 * 0: pass
318 * 1: fail, generic unexpected failure
319 * 2: fail, unexpected Mach return value
320 * 3: fail, time out */
321
322 #define DEFAULT_VM_SIZE ((mach_vm_size_t)(1024ULL * 4096ULL))
323
324 #define POINTER(address) ((char *)(uintptr_t)(address))
325 #define MACH_VM_ADDRESS_T(address) (*((mach_vm_address_t *)(uintptr_t)(address)))
326
327 static int vm_address_size = sizeof(mach_vm_address_t);
328
329 static char *progname = "";
330
331 /*************************/
332 /* xnu version functions */
333 /*************************/
334
335 /* Find the xnu version string. */
336 char *
xnu_version_string()337 xnu_version_string()
338 {
339 size_t length;
340 int mib[2];
341 mib[0] = CTL_KERN;
342 mib[1] = KERN_VERSION;
343
344 T_QUIET;
345 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 2, NULL, &length, NULL, 0), "sysctl()");
346 char * version = (char *)malloc(length);
347 T_QUIET;
348 T_WITH_ERRNO;
349 T_ASSERT_NOTNULL(version, "malloc()");
350 T_QUIET;
351 T_EXPECT_POSIX_SUCCESS(sysctl(mib, 2, version, &length, NULL, 0), "sysctl()");
352 if (T_RESULT == T_RESULT_FAIL) {
353 free(version);
354 T_END;
355 }
356 char * xnu_string = strstr(version, "xnu-");
357 free(version);
358 T_QUIET;
359 T_ASSERT_NOTNULL(xnu_string, "%s: error finding xnu version string.", progname);
360 return xnu_string;
361 }
362
363 /* Find the xnu major version number. */
364 unsigned int
xnu_major_version()365 xnu_major_version()
366 {
367 char * endptr;
368 char * xnu_substring = xnu_version_string() + 4;
369
370 errno = 0;
371 unsigned int xnu_version = strtoul(xnu_substring, &endptr, 0);
372 T_QUIET;
373 T_ASSERT_TRUE((errno != ERANGE && endptr != xnu_substring),
374 "%s: error finding xnu major version number.", progname);
375 return xnu_version;
376 }
377
378 /*************************/
379 /* Mach assert functions */
380 /*************************/
381
382 static inline void
assert_mach_return(kern_return_t kr,kern_return_t expected_kr,const char * mach_routine)383 assert_mach_return(kern_return_t kr, kern_return_t expected_kr, const char * mach_routine)
384 {
385 T_QUIET; T_ASSERT_EQ(kr, expected_kr,
386 "%s unexpectedly returned: %s."
387 "Should have returned: %s.",
388 mach_routine, mach_error_string(kr),
389 mach_error_string(expected_kr));
390 }
391
392 /*******************************/
393 /* Arrays for test suite loops */
394 /*******************************/
395
396 /* Memory allocators */
397 typedef kern_return_t (*allocate_fn_t)(vm_map_t, mach_vm_address_t *, mach_vm_size_t, int);
398
399
400 /*
401 * Remember any pre-reserved fixed address, which needs to be released prior to an allocation.
402 */
403 static mach_vm_address_t fixed_vm_address = 0x0;
404 static mach_vm_size_t fixed_vm_size = 0;
405
406 /* forward decl */
407 void assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size);
408
409 /*
410 * If trying to allocate at a fixed address, we need to do the delayed deallocate first.
411 */
412 static void
check_fixed_address(mach_vm_address_t * address,mach_vm_size_t size)413 check_fixed_address(mach_vm_address_t *address, mach_vm_size_t size)
414 {
415 if (fixed_vm_address != 0 &&
416 fixed_vm_address <= *address &&
417 *address + size <= fixed_vm_address + fixed_vm_size) {
418 assert_deallocate_success(fixed_vm_address, fixed_vm_size);
419 fixed_vm_address = 0;
420 fixed_vm_size = 0;
421 }
422 }
423
424 kern_return_t
wrapper_mach_vm_allocate(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)425 wrapper_mach_vm_allocate(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
426 {
427 check_fixed_address(address, size);
428 return mach_vm_allocate(map, address, size, flags);
429 }
430
431 kern_return_t
wrapper_mach_vm_map(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)432 wrapper_mach_vm_map(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
433 {
434 check_fixed_address(address, size);
435 return mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
436 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
437 }
438
439 /* Should have the same behavior as when mask is zero. */
440 kern_return_t
wrapper_mach_vm_map_4kB(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)441 wrapper_mach_vm_map_4kB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
442 {
443 check_fixed_address(address, size);
444 return mach_vm_map(map, address, size, (mach_vm_offset_t)0xFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
445 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
446 }
447
448 kern_return_t
wrapper_mach_vm_map_2MB(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)449 wrapper_mach_vm_map_2MB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
450 {
451 check_fixed_address(address, size);
452 return mach_vm_map(map, address, size, (mach_vm_offset_t)0x1FFFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
453 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
454 }
455
456 mach_port_t
memory_entry(mach_vm_size_t * size)457 memory_entry(mach_vm_size_t * size)
458 {
459 mach_port_t object_handle = MACH_PORT_NULL;
460 mach_vm_size_t original_size = *size;
461
462 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry_64(mach_task_self(), size, (memory_object_offset_t)0,
463 (MAP_MEM_NAMED_CREATE | VM_PROT_ALL), &object_handle, 0),
464 "mach_make_memory_entry_64()");
465 T_QUIET; T_ASSERT_EQ(*size, round_page(original_size),
466 "mach_make_memory_entry_64() unexpectedly returned a named "
467 "entry of size 0x%jx (%ju).\n"
468 "Should have returned a "
469 "named entry of size 0x%jx (%ju).",
470 (uintmax_t)*size, (uintmax_t)*size, (uintmax_t)original_size, (uintmax_t)original_size);
471 return object_handle;
472 }
473
474 kern_return_t
wrapper_mach_vm_map_named_entry(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)475 wrapper_mach_vm_map_named_entry(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
476 {
477 mach_port_t object_handle = memory_entry(&size);
478 check_fixed_address(address, size);
479 kern_return_t kr = mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, object_handle, (memory_object_offset_t)0, FALSE,
480 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
481 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_port_deallocate(mach_task_self(), object_handle), "mach_port_deallocate()");
482 return kr;
483 }
484
485 static struct {
486 allocate_fn_t allocate;
487 const char * description;
488 } allocators[] = {
489 {wrapper_mach_vm_allocate, "mach_vm_allocate()"},
490 {wrapper_mach_vm_map, "mach_vm_map() (zero mask)"},
491 {wrapper_mach_vm_map_4kB,
492 "mach_vm_map() "
493 "(4 kB address alignment)"},
494 {wrapper_mach_vm_map_2MB,
495 "mach_vm_map() "
496 "(2 MB address alignment)"},
497 {wrapper_mach_vm_map_named_entry,
498 "mach_vm_map() (named "
499 "entry, zero mask)"},
500 };
501 static int numofallocators = sizeof(allocators) / sizeof(allocators[0]);
502 static int allocators_idx;
503 enum { MACH_VM_ALLOCATE, MACH_VM_MAP, MACH_VM_MAP_4kB, MACH_VM_MAP_2MB, MACH_VM_MAP_NAMED_ENTRY };
504
505 /* VM size */
506 static struct {
507 mach_vm_size_t size;
508 const char * description;
509 } vm_sizes[] = {
510 {DEFAULT_VM_SIZE, "default/input"},
511 {0, "zero"},
512 {4096ULL, "aligned"},
513 {1ULL, "unaligned"},
514 {4095ULL, "unaligned"},
515 {4097ULL, "unaligned"},
516 };
517 static int numofsizes = sizeof(vm_sizes) / sizeof(vm_sizes[0]);
518 static int sizes_idx;
519 static int buffer_sizes_idx;
520 enum { DEFAULT_INPUT, ZERO_BYTES, ONE_PAGE, ONE_BYTE, ONE_PAGE_MINUS_ONE_BYTE, ONE_PAGE_AND_ONE_BYTE };
521
522 /* Unspecified/fixed address */
523 static struct {
524 int flag;
525 const char * description;
526 } address_flags[] = {
527 {VM_FLAGS_ANYWHERE, "unspecified"}, {VM_FLAGS_FIXED, "fixed"},
528 };
529 static int numofflags = sizeof(address_flags) / sizeof(address_flags[0]);
530 static int flags_idx;
531 enum { ANYWHERE, FIXED };
532
533 /* Address alignment */
534 static struct {
535 boolean_t alignment;
536 const char * description;
537 } address_alignments[] = {
538 {TRUE, " aligned"}, {FALSE, " unaligned"},
539 };
540 static int numofalignments = sizeof(address_alignments) / sizeof(*address_alignments);
541 static int alignments_idx;
542 enum { ALIGNED, UNALIGNED };
543
544 /* Buffer offset */
545 static struct {
546 int offset;
547 const char * description;
548 } buffer_offsets[] = {
549 {0, ""}, {1, ""}, {2, ""},
550 };
551 static int numofoffsets = sizeof(buffer_offsets) / sizeof(buffer_offsets[0]);
552 static int offsets_idx;
553 enum { ZERO, ONE, TWO };
554
555 /* mach_vm_copy() post actions */
556 enum { VMCOPY_MODIFY_SRC, VMCOPY_MODIFY_DST, VMCOPY_MODIFY_SHARED_COPIED };
557
558 static struct {
559 int action;
560 const char * description;
561 } vmcopy_actions[] = {
562 {VMCOPY_MODIFY_SRC, "modify vm_copy() source"},
563 {VMCOPY_MODIFY_DST, "modify vm_copy() destination"},
564 {VMCOPY_MODIFY_SHARED_COPIED,
565 "modify vm_copy source's shared "
566 "or copied from/to region"},
567 };
568 static int numofvmcopyactions = sizeof(vmcopy_actions) / sizeof(vmcopy_actions[0]);
569 static int vmcopy_action_idx;
570
571 /************************************/
572 /* Setters and getters for fixtures */
573 /************************************/
574
575 /* Allocation memory range. */
576 static allocate_fn_t _allocator = wrapper_mach_vm_allocate;
577 static mach_vm_size_t _vm_size = DEFAULT_VM_SIZE;
578 static int _address_flag = VM_FLAGS_ANYWHERE;
579 static boolean_t _address_alignment = TRUE;
580 static mach_vm_address_t _vm_address = 0x0;
581
582 /* Buffer for mach_vm_write(). */
583 static mach_vm_size_t _buffer_size = DEFAULT_VM_SIZE;
584 static mach_vm_address_t _buffer_address = 0x0;
585 static int _buffer_offset = 0;
586
587 /* Post action for mach_vm_copy(). */
588 static int _vmcopy_post_action = VMCOPY_MODIFY_SRC;
589
590 static void
set_allocator(allocate_fn_t allocate)591 set_allocator(allocate_fn_t allocate)
592 {
593 _allocator = allocate;
594 }
595
596 static allocate_fn_t
get_allocator()597 get_allocator()
598 {
599 return _allocator;
600 }
601
602 static void
set_vm_size(mach_vm_size_t size)603 set_vm_size(mach_vm_size_t size)
604 {
605 _vm_size = size;
606 }
607
608 static mach_vm_size_t
get_vm_size()609 get_vm_size()
610 {
611 return _vm_size;
612 }
613
614 static void
set_address_flag(int flag)615 set_address_flag(int flag)
616 {
617 _address_flag = flag;
618 }
619
620 static int
get_address_flag()621 get_address_flag()
622 {
623 return _address_flag;
624 }
625
626 static void
set_address_alignment(boolean_t alignment)627 set_address_alignment(boolean_t alignment)
628 {
629 _address_alignment = alignment;
630 }
631
632 static boolean_t
get_address_alignment()633 get_address_alignment()
634 {
635 return _address_alignment;
636 }
637
638 static void
set_vm_address(mach_vm_address_t address)639 set_vm_address(mach_vm_address_t address)
640 {
641 _vm_address = address;
642 }
643
644 static mach_vm_address_t
get_vm_address()645 get_vm_address()
646 {
647 return _vm_address;
648 }
649
650 static void
set_buffer_size(mach_vm_size_t size)651 set_buffer_size(mach_vm_size_t size)
652 {
653 _buffer_size = size;
654 }
655
656 static mach_vm_size_t
get_buffer_size()657 get_buffer_size()
658 {
659 return _buffer_size;
660 }
661
662 static void
set_buffer_address(mach_vm_address_t address)663 set_buffer_address(mach_vm_address_t address)
664 {
665 _buffer_address = address;
666 }
667
668 static mach_vm_address_t
get_buffer_address()669 get_buffer_address()
670 {
671 return _buffer_address;
672 }
673
674 static void
set_buffer_offset(int offset)675 set_buffer_offset(int offset)
676 {
677 _buffer_offset = offset;
678 }
679
680 static int
get_buffer_offset()681 get_buffer_offset()
682 {
683 return _buffer_offset;
684 }
685
686 static void
set_vmcopy_post_action(int action)687 set_vmcopy_post_action(int action)
688 {
689 _vmcopy_post_action = action;
690 }
691
692 static int
get_vmcopy_post_action()693 get_vmcopy_post_action()
694 {
695 return _vmcopy_post_action;
696 }
697
698 /*******************************/
699 /* Usage and option processing */
700 /*******************************/
701 static boolean_t flag_run_allocate_test = FALSE;
702 static boolean_t flag_run_deallocate_test = FALSE;
703 static boolean_t flag_run_read_test = FALSE;
704 static boolean_t flag_run_write_test = FALSE;
705 static boolean_t flag_run_protect_test = FALSE;
706 static boolean_t flag_run_copy_test = FALSE;
707
708 #define VM_TEST_ALLOCATE 0x00000001
709 #define VM_TEST_DEALLOCATE 0x00000002
710 #define VM_TEST_READ 0x00000004
711 #define VM_TEST_WRITE 0x00000008
712 #define VM_TEST_PROTECT 0x00000010
713 #define VM_TEST_COPY 0x00000020
714
715 typedef struct test_option {
716 uint32_t to_flags;
717 int to_quietness;
718 mach_vm_size_t to_vmsize;
719 } test_option_t;
720
721 typedef struct test_info {
722 char *ti_name;
723 boolean_t *ti_flag;
724 } test_info_t;
725
726 static test_option_t test_options;
727
728 enum {ALLOCATE = 0, DEALLOCATE, READ, WRITE, PROTECT, COPY};
729
730 static test_info_t test_info[] = {
731 {"allocate", &flag_run_allocate_test},
732 {"deallocate", &flag_run_deallocate_test},
733 {"read", &flag_run_read_test},
734 {"write", &flag_run_write_test},
735 {"protect", &flag_run_protect_test},
736 {"copy", &flag_run_copy_test},
737 {NULL, NULL}
738 };
739
740 static void
die_on_invalid_value(int condition,const char * value_string)741 die_on_invalid_value(int condition, const char * value_string)
742 {
743 T_QUIET;
744 T_ASSERT_EQ(condition, 0, "%s: invalid value: %s.",
745 progname, value_string);
746 }
747
748 static void
process_options(test_option_t options)749 process_options(test_option_t options)
750 {
751 test_info_t *tp;
752
753 setvbuf(stdout, NULL, _IONBF, 0);
754
755 set_vm_size(DEFAULT_VM_SIZE);
756 set_quietness(DEFAULT_QUIETNESS);
757
758 if (NULL != getenv("LTERDOS")) {
759 logr("LTERDOS=YES this is LeanTestEnvironment\nIncreasing quietness by 1.");
760 set_quietness(get_quietness() + 1);
761 } else {
762 if (options.to_quietness > 0) {
763 set_quietness(options.to_quietness);
764 }
765 }
766
767 if (options.to_vmsize != 0) {
768 vm_sizes[0].size = options.to_vmsize;
769 }
770
771 if (options.to_flags == 0) {
772 for (tp = test_info; tp->ti_name != NULL; ++tp) {
773 *tp->ti_flag = TRUE;
774 }
775 } else {
776 if (options.to_flags & VM_TEST_ALLOCATE) {
777 *(test_info[ALLOCATE].ti_flag) = TRUE;
778 }
779
780 if (options.to_flags & VM_TEST_DEALLOCATE) {
781 *(test_info[DEALLOCATE].ti_flag) = TRUE;
782 }
783
784 if (options.to_flags & VM_TEST_READ) {
785 *(test_info[READ].ti_flag) = TRUE;
786 }
787
788 if (options.to_flags & VM_TEST_WRITE) {
789 *(test_info[WRITE].ti_flag) = TRUE;
790 }
791
792 if (options.to_flags & VM_TEST_PROTECT) {
793 *(test_info[PROTECT].ti_flag) = TRUE;
794 }
795
796 if (options.to_flags & VM_TEST_COPY) {
797 *(test_info[COPY].ti_flag) = TRUE;
798 }
799 }
800 }
801
802 /*****************/
803 /* Various tools */
804 /*****************/
805
806 /* Find the allocator address alignment mask. */
807 mach_vm_address_t
get_mask()808 get_mask()
809 {
810 mach_vm_address_t mask;
811
812 if (get_allocator() == wrapper_mach_vm_map_2MB) {
813 mask = (mach_vm_address_t)0x1FFFFF;
814 } else {
815 mask = vm_page_size - 1;
816 }
817 return mask;
818 }
819
820 /* Find the size of the smallest aligned region containing a given
821 * memory range. */
822 mach_vm_size_t
aligned_size(mach_vm_address_t address,mach_vm_size_t size)823 aligned_size(mach_vm_address_t address, mach_vm_size_t size)
824 {
825 return round_page(address - mach_vm_trunc_page(address) + size);
826 }
827
828 /********************/
829 /* Assert functions */
830 /********************/
831
832 /* Address is aligned on allocator boundary. */
833 static inline void
assert_aligned_address(mach_vm_address_t address)834 assert_aligned_address(mach_vm_address_t address)
835 {
836 T_QUIET; T_ASSERT_EQ((address & get_mask()), 0,
837 "Address 0x%jx is unexpectedly "
838 "unaligned.",
839 (uintmax_t)address);
840 }
841
842 /* Address is truncated to allocator boundary. */
843 static inline void
assert_trunc_address(mach_vm_address_t address,mach_vm_address_t trunc_address)844 assert_trunc_address(mach_vm_address_t address, mach_vm_address_t trunc_address)
845 {
846 T_QUIET; T_ASSERT_EQ(trunc_address, (address & ~get_mask()),
847 "Address "
848 "0x%jx is unexpectedly not truncated to address 0x%jx.",
849 (uintmax_t)address, (uintmax_t)trunc_address);
850 }
851
852 static inline void
assert_address_value(mach_vm_address_t address,mach_vm_address_t marker)853 assert_address_value(mach_vm_address_t address, mach_vm_address_t marker)
854 {
855 /* this assert is used so frequently so that we simply judge on
856 * its own instead of leaving this to LD macro for efficiency
857 */
858 if (MACH_VM_ADDRESS_T(address) != marker) {
859 T_ASSERT_FAIL("Address 0x%jx unexpectedly has value 0x%jx, "
860 "instead of 0x%jx.", (uintmax_t)address,
861 (uintmax_t)MACH_VM_ADDRESS_T(address), (uintmax_t)marker);
862 }
863 }
864
865 void
assert_allocate_return(mach_vm_address_t * address,mach_vm_size_t size,int address_flag,kern_return_t expected_kr)866 assert_allocate_return(mach_vm_address_t * address, mach_vm_size_t size, int address_flag, kern_return_t expected_kr)
867 {
868 assert_mach_return(get_allocator()(mach_task_self(), address, size, address_flag), expected_kr, "Allocator");
869 }
870
871 void
assert_allocate_success(mach_vm_address_t * address,mach_vm_size_t size,int address_flag)872 assert_allocate_success(mach_vm_address_t * address, mach_vm_size_t size, int address_flag)
873 {
874 assert_allocate_return(address, size, address_flag, KERN_SUCCESS);
875 }
876
877 void
assert_deallocate_return(mach_vm_address_t address,mach_vm_size_t size,kern_return_t expected_kr)878 assert_deallocate_return(mach_vm_address_t address, mach_vm_size_t size, kern_return_t expected_kr)
879 {
880 assert_mach_return(mach_vm_deallocate(mach_task_self(), address, size), expected_kr, "mach_vm_deallocate()");
881 }
882
883 void
assert_deallocate_success(mach_vm_address_t address,mach_vm_size_t size)884 assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size)
885 {
886 assert_deallocate_return(address, size, KERN_SUCCESS);
887 }
888
889 void
assert_read_return(mach_vm_address_t address,mach_vm_size_t size,vm_offset_t * data,mach_msg_type_number_t * data_size,kern_return_t expected_kr)890 assert_read_return(mach_vm_address_t address,
891 mach_vm_size_t size,
892 vm_offset_t * data,
893 mach_msg_type_number_t * data_size,
894 kern_return_t expected_kr)
895 {
896 assert_mach_return(mach_vm_read(mach_task_self(), address, size, data, data_size), expected_kr, "mach_vm_read()");
897 }
898
899 void
assert_read_success(mach_vm_address_t address,mach_vm_size_t size,vm_offset_t * data,mach_msg_type_number_t * data_size)900 assert_read_success(mach_vm_address_t address, mach_vm_size_t size, vm_offset_t * data, mach_msg_type_number_t * data_size)
901 {
902 assert_read_return(address, size, data, data_size, KERN_SUCCESS);
903 T_QUIET; T_ASSERT_EQ(*data_size, size,
904 "Returned buffer size 0x%jx "
905 "(%ju) is unexpectedly different from source size 0x%jx "
906 "(%ju).",
907 (uintmax_t)*data_size, (uintmax_t)*data_size, (uintmax_t)size, (uintmax_t)size);
908 }
909
910 void
assert_write_return(mach_vm_address_t address,vm_offset_t data,mach_msg_type_number_t data_size,kern_return_t expected_kr)911 assert_write_return(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size, kern_return_t expected_kr)
912 {
913 assert_mach_return(mach_vm_write(mach_task_self(), address, data, data_size), expected_kr, "mach_vm_write()");
914 }
915
916 void
assert_write_success(mach_vm_address_t address,vm_offset_t data,mach_msg_type_number_t data_size)917 assert_write_success(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size)
918 {
919 assert_write_return(address, data, data_size, KERN_SUCCESS);
920 }
921
922 void
assert_allocate_copy_return(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t * dest,kern_return_t expected_kr)923 assert_allocate_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest, kern_return_t expected_kr)
924 {
925 assert_allocate_success(dest, size, VM_FLAGS_ANYWHERE);
926 assert_mach_return(mach_vm_copy(mach_task_self(), source, size, *dest), expected_kr, "mach_vm_copy()");
927 }
928 void
assert_allocate_copy_success(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t * dest)929 assert_allocate_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest)
930 {
931 assert_allocate_copy_return(source, size, dest, KERN_SUCCESS);
932 }
933
934 void
assert_copy_return(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t dest,kern_return_t expected_kr)935 assert_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest, kern_return_t expected_kr)
936 {
937 assert_mach_return(mach_vm_copy(mach_task_self(), source, size, dest), expected_kr, "mach_vm_copy()");
938 }
939
940 void
assert_copy_success(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t dest)941 assert_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest)
942 {
943 assert_copy_return(source, size, dest, KERN_SUCCESS);
944 }
945
946 /*******************/
947 /* Memory patterns */
948 /*******************/
949
950 typedef boolean_t (*address_filter_t)(mach_vm_address_t);
951 typedef void (*address_action_t)(mach_vm_address_t, mach_vm_address_t);
952
953 /* Map over a memory region pattern and its complement, through a
954 * (possibly reversed) boolean filter and a starting value. */
955 void
filter_addresses_do_else(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,address_action_t if_action,address_action_t else_action,mach_vm_address_t start_value)956 filter_addresses_do_else(address_filter_t filter,
957 boolean_t reversed,
958 mach_vm_address_t address,
959 mach_vm_size_t size,
960 address_action_t if_action,
961 address_action_t else_action,
962 mach_vm_address_t start_value)
963 {
964 mach_vm_address_t i;
965 for (i = 0; i + vm_address_size < size; i += vm_address_size) {
966 if (filter(address + i) != reversed) {
967 if_action(address + i, start_value + i);
968 } else {
969 else_action(address + i, start_value + i);
970 }
971 }
972 }
973
974 /* Various pattern actions. */
975 void
no_action(mach_vm_address_t i,mach_vm_address_t value)976 no_action(mach_vm_address_t i, mach_vm_address_t value)
977 {
978 }
979
980 void
read_zero(mach_vm_address_t i,mach_vm_address_t value)981 read_zero(mach_vm_address_t i, mach_vm_address_t value)
982 {
983 assert_address_value(i, 0);
984 }
985
986 void
verify_address(mach_vm_address_t i,mach_vm_address_t value)987 verify_address(mach_vm_address_t i, mach_vm_address_t value)
988 {
989 assert_address_value(i, value);
990 }
991
992 void
write_address(mach_vm_address_t i,mach_vm_address_t value)993 write_address(mach_vm_address_t i, mach_vm_address_t value)
994 {
995 MACH_VM_ADDRESS_T(i) = value;
996 }
997
998 /* Various patterns. */
999 boolean_t
empty(mach_vm_address_t i)1000 empty(mach_vm_address_t i)
1001 {
1002 return FALSE;
1003 }
1004
1005 boolean_t
checkerboard(mach_vm_address_t i)1006 checkerboard(mach_vm_address_t i)
1007 {
1008 return !((i / vm_address_size) & 0x1);
1009 }
1010
1011 boolean_t
page_ends(mach_vm_address_t i)1012 page_ends(mach_vm_address_t i)
1013 {
1014 mach_vm_address_t residue = i % vm_page_size;
1015
1016 return residue == 0 || residue == vm_page_size - vm_address_size;
1017 }
1018
1019 /*************************************/
1020 /* Global variables set up functions */
1021 /*************************************/
1022
1023 void
set_up_allocator()1024 set_up_allocator()
1025 {
1026 T_QUIET; T_ASSERT_TRUE(allocators_idx >= 0 && allocators_idx < numofallocators, "Invalid allocators[] index: %d.", allocators_idx);
1027 set_allocator(allocators[allocators_idx].allocate);
1028 }
1029
1030 /* Find a fixed allocatable address by retrieving the address
1031 * populated by mach_vm_allocate() with VM_FLAGS_ANYWHERE. */
1032 mach_vm_address_t
get_fixed_address(mach_vm_size_t size)1033 get_fixed_address(mach_vm_size_t size)
1034 {
1035 /* mach_vm_map() starts looking for an address at 0x0. */
1036 mach_vm_address_t address = 0x0;
1037
1038 /*
1039 * The tests seem to have some funky off by one allocations. To avoid problems, we'll bump anything
1040 * non-zero to have at least an extra couple pages.
1041 */
1042 if (size != 0) {
1043 size = round_page(size + 2 * vm_page_size);
1044 }
1045
1046 assert_allocate_success(&address, size, VM_FLAGS_ANYWHERE);
1047
1048 /*
1049 * Keep the memory allocated, otherwise the logv()/printf() activity sprinkled in these tests can
1050 * cause malloc() to use the desired range and tests will randomly fail. The allocate routines will
1051 * do the delayed vm_deallocate() to free the fixed memory just before allocation testing in the wrapper.
1052 */
1053 T_QUIET; T_ASSERT_EQ(fixed_vm_address, 0, "previous fixed address not used");
1054 T_QUIET; T_ASSERT_EQ(fixed_vm_size, 0, "previous fixed size not used");
1055 fixed_vm_address = address;
1056 fixed_vm_size = size;
1057
1058 assert_aligned_address(address);
1059 return address;
1060 }
1061
1062 /* If needed, find an address at which a region of the specified size
1063 * can be allocated. Otherwise, set the address to 0x0. */
1064 void
set_up_vm_address(mach_vm_size_t size)1065 set_up_vm_address(mach_vm_size_t size)
1066 {
1067 T_QUIET; T_ASSERT_TRUE(flags_idx >= 0 && flags_idx < numofflags, "Invalid address_flags[] index: %d.", flags_idx);
1068 T_QUIET; T_ASSERT_TRUE(alignments_idx >= 0 && alignments_idx < numofalignments, "Invalid address_alignments[] index: %d.", alignments_idx);
1069 set_address_flag(address_flags[flags_idx].flag);
1070 set_address_alignment(address_alignments[alignments_idx].alignment);
1071
1072 if (!(get_address_flag() & VM_FLAGS_ANYWHERE)) {
1073 boolean_t aligned = get_address_alignment();
1074 logv(
1075 "Looking for fixed %saligned address for allocation "
1076 "of 0x%jx (%ju) byte%s...",
1077 aligned ? "" : "un", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1078 mach_vm_address_t address = get_fixed_address(size);
1079 if (!aligned) {
1080 address++;
1081 }
1082 set_vm_address(address);
1083 logv("Found %saligned fixed address 0x%jx.", aligned ? "" : "un", (uintmax_t)address);
1084 } else {
1085 /* mach_vm_map() with VM_FLAGS_ANYWHERE starts looking for
1086 * an address at the one supplied and goes up, without
1087 * wrapping around. */
1088 set_vm_address(0x0);
1089 }
1090 }
1091
1092 void
set_up_vm_size()1093 set_up_vm_size()
1094 {
1095 T_QUIET; T_ASSERT_TRUE(sizes_idx >= 0 && sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", sizes_idx);
1096 set_vm_size(vm_sizes[sizes_idx].size);
1097 }
1098
1099 void
set_up_buffer_size()1100 set_up_buffer_size()
1101 {
1102 T_QUIET; T_ASSERT_TRUE(buffer_sizes_idx >= 0 && buffer_sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", buffer_sizes_idx);
1103 set_buffer_size(vm_sizes[buffer_sizes_idx].size);
1104 }
1105
1106 void
set_up_buffer_offset()1107 set_up_buffer_offset()
1108 {
1109 T_QUIET; T_ASSERT_TRUE(offsets_idx >= 0 && offsets_idx < numofoffsets, "Invalid buffer_offsets[] index: %d.", offsets_idx);
1110 set_buffer_offset(buffer_offsets[offsets_idx].offset);
1111 }
1112
1113 void
set_up_vmcopy_action()1114 set_up_vmcopy_action()
1115 {
1116 T_QUIET; T_ASSERT_TRUE(vmcopy_action_idx >= 0 && vmcopy_action_idx < numofvmcopyactions, "Invalid vmcopy_actions[] index: %d.",
1117 vmcopy_action_idx);
1118 set_vmcopy_post_action(vmcopy_actions[vmcopy_action_idx].action);
1119 }
1120
1121 void
set_up_allocator_and_vm_size()1122 set_up_allocator_and_vm_size()
1123 {
1124 set_up_allocator();
1125 set_up_vm_size();
1126 }
1127
1128 void
set_up_vm_variables()1129 set_up_vm_variables()
1130 {
1131 set_up_vm_size();
1132 set_up_vm_address(get_vm_size());
1133 }
1134
1135 void
set_up_allocator_and_vm_variables()1136 set_up_allocator_and_vm_variables()
1137 {
1138 set_up_allocator();
1139 set_up_vm_variables();
1140 }
1141
1142 void
set_up_buffer_variables()1143 set_up_buffer_variables()
1144 {
1145 set_up_buffer_size();
1146 set_up_buffer_offset();
1147 }
1148
1149 void
set_up_copy_shared_mode_variables()1150 set_up_copy_shared_mode_variables()
1151 {
1152 set_up_vmcopy_action();
1153 }
1154
1155 /*******************************/
1156 /* Allocation set up functions */
1157 /*******************************/
1158
1159 /* Allocate VM region of given size. */
1160 void
allocate(mach_vm_size_t size)1161 allocate(mach_vm_size_t size)
1162 {
1163 mach_vm_address_t address = get_vm_address();
1164 int flag = get_address_flag();
1165
1166 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1167 if (!(flag & VM_FLAGS_ANYWHERE)) {
1168 logv(" at address 0x%jx", (uintmax_t)address);
1169 }
1170 logv("...");
1171 assert_allocate_success(&address, size, flag);
1172 logv(
1173 "Memory of rounded size 0x%jx (%ju) allocated at "
1174 "address 0x%jx.",
1175 (uintmax_t)round_page(size), (uintmax_t)round_page(size), (uintmax_t)address);
1176 /* Fixed allocation address is truncated to the allocator
1177 * boundary. */
1178 if (!(flag & VM_FLAGS_ANYWHERE)) {
1179 mach_vm_address_t old_address = get_vm_address();
1180 assert_trunc_address(old_address, address);
1181 logv(
1182 "Address 0x%jx is correctly truncated to allocated "
1183 "address 0x%jx.",
1184 (uintmax_t)old_address, (uintmax_t)address);
1185 }
1186 set_vm_address(address);
1187 }
1188
1189 void
allocate_buffer(mach_vm_size_t buffer_size)1190 allocate_buffer(mach_vm_size_t buffer_size)
1191 {
1192 mach_vm_address_t data = 0x0;
1193
1194 logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)buffer_size, (uintmax_t)buffer_size, (buffer_size == 1) ? "" : "s");
1195 assert_allocate_success(&data, buffer_size, VM_FLAGS_ANYWHERE);
1196 logv(
1197 "Memory of rounded size 0x%jx (%ju) allocated at "
1198 "address 0x%jx.",
1199 (uintmax_t)round_page(buffer_size), (uintmax_t)round_page(buffer_size), (uintmax_t)data);
1200 data += get_buffer_offset();
1201 T_QUIET; T_ASSERT_EQ((vm_offset_t)data, data,
1202 "Address 0x%jx "
1203 "unexpectedly overflows to 0x%jx when cast as "
1204 "vm_offset_t type.",
1205 (uintmax_t)data, (uintmax_t)(vm_offset_t)data);
1206 set_buffer_address(data);
1207 }
1208
1209 /****************************************************/
1210 /* Global variables and allocation set up functions */
1211 /****************************************************/
1212
1213 void
set_up_vm_variables_and_allocate()1214 set_up_vm_variables_and_allocate()
1215 {
1216 set_up_vm_variables();
1217 allocate(get_vm_size());
1218 }
1219
1220 void
set_up_allocator_and_vm_variables_and_allocate()1221 set_up_allocator_and_vm_variables_and_allocate()
1222 {
1223 set_up_allocator();
1224 set_up_vm_variables_and_allocate();
1225 }
1226
1227 void
set_up_vm_variables_and_allocate_extra_page()1228 set_up_vm_variables_and_allocate_extra_page()
1229 {
1230 set_up_vm_size();
1231 /* Increment the size to insure we get an extra allocated page
1232 * for unaligned start addresses. */
1233 mach_vm_size_t allocation_size = get_vm_size() + 1;
1234 set_up_vm_address(allocation_size);
1235
1236 allocate(allocation_size);
1237 /* In the fixed unaligned address case, restore the returned
1238 * (truncated) allocation address to its unaligned value. */
1239 if (!get_address_alignment()) {
1240 set_vm_address(get_vm_address() + 1);
1241 }
1242 }
1243
1244 void
set_up_buffer_variables_and_allocate_extra_page()1245 set_up_buffer_variables_and_allocate_extra_page()
1246 {
1247 set_up_buffer_variables();
1248 /* Increment the size to insure we get an extra allocated page
1249 * for unaligned start addresses. */
1250 allocate_buffer(get_buffer_size() + get_buffer_offset());
1251 }
1252
1253 /* Allocate some destination and buffer memory for subsequent
1254 * writing, including extra pages for non-aligned start addresses. */
1255 void
set_up_vm_and_buffer_variables_allocate_for_writing()1256 set_up_vm_and_buffer_variables_allocate_for_writing()
1257 {
1258 set_up_vm_variables_and_allocate_extra_page();
1259 set_up_buffer_variables_and_allocate_extra_page();
1260 }
1261
1262 /* Allocate some destination and source regions for subsequent
1263 * copying, including extra pages for non-aligned start addresses. */
1264 void
set_up_vm_and_buffer_variables_allocate_for_copying()1265 set_up_vm_and_buffer_variables_allocate_for_copying()
1266 {
1267 set_up_vm_and_buffer_variables_allocate_for_writing();
1268 }
1269
1270 /************************************/
1271 /* Deallocation tear down functions */
1272 /************************************/
1273
1274 void
deallocate_range(mach_vm_address_t address,mach_vm_size_t size)1275 deallocate_range(mach_vm_address_t address, mach_vm_size_t size)
1276 {
1277 logv("Deallocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1278 (uintmax_t)address);
1279 assert_deallocate_success(address, size);
1280 }
1281
1282 void
deallocate()1283 deallocate()
1284 {
1285 deallocate_range(get_vm_address(), get_vm_size());
1286 }
1287
1288 /* Deallocate source memory, including the extra page for unaligned
1289 * start addresses. */
1290 void
deallocate_extra_page()1291 deallocate_extra_page()
1292 {
1293 /* Set the address and size to their original allocation
1294 * values. */
1295 deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1);
1296 }
1297
1298 /* Deallocate buffer and destination memory for mach_vm_write(),
1299 * including the extra page for unaligned start addresses. */
1300 void
deallocate_vm_and_buffer()1301 deallocate_vm_and_buffer()
1302 {
1303 deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1);
1304 deallocate_range(mach_vm_trunc_page(get_buffer_address()), get_buffer_size() + get_buffer_offset());
1305 }
1306
1307 /***********************************/
1308 /* mach_vm_read() set up functions */
1309 /***********************************/
1310
1311 /* Read the source memory into a buffer, deallocate the source, set
1312 * the global address and size from the buffer's. */
1313 void
read_deallocate()1314 read_deallocate()
1315 {
1316 mach_vm_size_t size = get_vm_size();
1317 mach_vm_address_t address = get_vm_address();
1318 vm_offset_t read_address;
1319 mach_msg_type_number_t read_size;
1320
1321 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1322 (uintmax_t)address);
1323 assert_read_success(address, size, &read_address, &read_size);
1324 logv(
1325 "Memory of size 0x%jx (%ju) read into buffer of "
1326 "address 0x%jx.",
1327 (uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)read_address);
1328 /* Deallocate the originally allocated memory, including the
1329 * extra allocated page in
1330 * set_up_vm_variables_and_allocate_extra_page(). */
1331 deallocate_range(mach_vm_trunc_page(address), size + 1);
1332
1333 /* Promoting to mach_vm types after checking for overflow, and
1334 * setting the global address from the buffer's. */
1335 T_QUIET; T_ASSERT_EQ((mach_vm_address_t)read_address, read_address,
1336 "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1337 "as mach_vm_address_t type.",
1338 (uintmax_t)read_address, (uintmax_t)(mach_vm_address_t)read_address);
1339 T_QUIET; T_ASSERT_EQ((mach_vm_size_t)read_size, read_size,
1340 "Size 0x%jx (%ju) unexpectedly overflows to 0x%jx (%ju) "
1341 "when cast as mach_vm_size_t type.",
1342 (uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)(mach_vm_size_t)read_size, (uintmax_t)(mach_vm_size_t)read_size);
1343 set_vm_address((mach_vm_address_t)read_address);
1344 set_vm_size((mach_vm_size_t)read_size);
1345 }
1346
1347 /* Allocate some source memory, read it into a buffer, deallocate the
1348 * source, set the global address and size from the buffer's. */
1349 void
set_up_vm_variables_allocate_read_deallocate()1350 set_up_vm_variables_allocate_read_deallocate()
1351 {
1352 set_up_vm_variables_and_allocate_extra_page();
1353 read_deallocate();
1354 }
1355
1356 /************************************/
1357 /* mach_vm_write() set up functions */
1358 /************************************/
1359
1360 /* Write the buffer into the destination memory. */
1361 void
write_buffer()1362 write_buffer()
1363 {
1364 mach_vm_address_t address = get_vm_address();
1365 vm_offset_t data = (vm_offset_t)get_buffer_address();
1366 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
1367
1368 logv(
1369 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
1370 "memory at address 0x%jx...",
1371 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
1372 assert_write_success(address, data, buffer_size);
1373 logv("Buffer written.");
1374 }
1375
1376 /* Allocate some destination and buffer memory, and write the buffer
1377 * into the destination memory. */
1378 void
set_up_vm_and_buffer_variables_allocate_write()1379 set_up_vm_and_buffer_variables_allocate_write()
1380 {
1381 set_up_vm_and_buffer_variables_allocate_for_writing();
1382 write_buffer();
1383 }
1384
1385 /***********************************/
1386 /* mach_vm_copy() set up functions */
1387 /***********************************/
1388
1389 void
copy_deallocate(void)1390 copy_deallocate(void)
1391 {
1392 mach_vm_size_t size = get_vm_size();
1393 mach_vm_address_t source = get_vm_address();
1394 mach_vm_address_t dest = 0;
1395
1396 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1397 (uintmax_t)source);
1398 assert_allocate_copy_success(source, size, &dest);
1399 logv(
1400 "Memory of size 0x%jx (%ju) copy into region of "
1401 "address 0x%jx.",
1402 (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
1403 /* Deallocate the originally allocated memory, including the
1404 * extra allocated page in
1405 * set_up_vm_variables_and_allocate_extra_page(). */
1406 deallocate_range(mach_vm_trunc_page(source), size + 1);
1407 /* Promoting to mach_vm types after checking for overflow, and
1408 * setting the global address from the buffer's. */
1409 T_QUIET; T_ASSERT_EQ((vm_offset_t)dest, dest,
1410 "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1411 "as mach_vm_address_t type.",
1412 (uintmax_t)dest, (uintmax_t)(vm_offset_t)dest);
1413 set_vm_address(dest);
1414 set_vm_size(size);
1415 }
1416
1417 /* Copy the source region into the destination region. */
1418 void
copy_region()1419 copy_region()
1420 {
1421 mach_vm_address_t source = get_vm_address();
1422 mach_vm_address_t dest = get_buffer_address();
1423 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
1424
1425 logv(
1426 "Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
1427 "memory at address 0x%jx...",
1428 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
1429 assert_copy_success(source, size, dest);
1430 logv("Buffer written.");
1431 }
1432
1433 /* Allocate some source memory, copy it to another region, deallocate the
1434 * source, set the global address and size from the designation region. */
1435 void
set_up_vm_variables_allocate_copy_deallocate()1436 set_up_vm_variables_allocate_copy_deallocate()
1437 {
1438 set_up_vm_variables_and_allocate_extra_page();
1439 copy_deallocate();
1440 }
1441
1442 /* Allocate some destination and source memory, and copy the source
1443 * into the destination memory. */
1444 void
set_up_source_and_dest_variables_allocate_copy()1445 set_up_source_and_dest_variables_allocate_copy()
1446 {
1447 set_up_vm_and_buffer_variables_allocate_for_copying();
1448 copy_region();
1449 }
1450
1451 /**************************************/
1452 /* mach_vm_protect() set up functions */
1453 /**************************************/
1454
1455 void
set_up_vm_variables_allocate_protect(vm_prot_t protection,const char * protection_name)1456 set_up_vm_variables_allocate_protect(vm_prot_t protection, const char * protection_name)
1457 {
1458 set_up_vm_variables_and_allocate_extra_page();
1459 mach_vm_size_t size = get_vm_size();
1460 mach_vm_address_t address = get_vm_address();
1461
1462 logv(
1463 "Setting %s-protection on 0x%jx (%ju) byte%s at address "
1464 "0x%jx...",
1465 protection_name, (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
1466 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address, size, FALSE, protection), "mach_vm_protect()");
1467 logv("Region %s-protected.", protection_name);
1468 }
1469
1470 void
set_up_vm_variables_allocate_readprotect()1471 set_up_vm_variables_allocate_readprotect()
1472 {
1473 set_up_vm_variables_allocate_protect(VM_PROT_WRITE, "read");
1474 }
1475
1476 void
set_up_vm_variables_allocate_writeprotect()1477 set_up_vm_variables_allocate_writeprotect()
1478 {
1479 set_up_vm_variables_allocate_protect(VM_PROT_READ, "write");
1480 }
1481
1482 /*****************/
1483 /* Address tests */
1484 /*****************/
1485
1486 /* Allocated address is nonzero iff size is nonzero. */
1487 void
test_nonzero_address_iff_nonzero_size()1488 test_nonzero_address_iff_nonzero_size()
1489 {
1490 mach_vm_address_t address = get_vm_address();
1491 mach_vm_size_t size = get_vm_size();
1492
1493 T_QUIET; T_ASSERT_TRUE((address && size) || (!address && !size), "Address 0x%jx is unexpectedly %szero.", (uintmax_t)address,
1494 address ? "non" : "");
1495 logv("Address 0x%jx is %szero as expected.", (uintmax_t)address, size ? "non" : "");
1496 }
1497
1498 /* Allocated address is aligned. */
1499 void
test_aligned_address()1500 test_aligned_address()
1501 {
1502 mach_vm_address_t address = get_vm_address();
1503
1504 assert_aligned_address(address);
1505 logv("Address 0x%jx is aligned.", (uintmax_t)address);
1506 }
1507
1508 /************************/
1509 /* Read and write tests */
1510 /************************/
1511
1512 void
verify_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1513 verify_pattern(
1514 address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1515 {
1516 logv(
1517 "Verifying %s pattern on region of address 0x%jx "
1518 "and size 0x%jx (%ju)...",
1519 pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1520 filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address);
1521 logv("Pattern verified.");
1522 }
1523
1524 void
write_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1525 write_pattern(
1526 address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1527 {
1528 logv(
1529 "Writing %s pattern on region of address 0x%jx "
1530 "and size 0x%jx (%ju)...",
1531 pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1532 filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address);
1533 logv("Pattern writen.");
1534 }
1535
1536 void
write_and_verify_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1537 write_and_verify_pattern(
1538 address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1539 {
1540 logv(
1541 "Writing and verifying %s pattern on region of "
1542 "address 0x%jx and size 0x%jx (%ju)...",
1543 pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1544 filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address);
1545 filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address);
1546 logv("Pattern written and verified.");
1547 }
1548
1549 /* Verify that the smallest aligned region containing the
1550 * given range is zero-filled. */
1551 void
test_zero_filled()1552 test_zero_filled()
1553 {
1554 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), aligned_size(get_vm_address(), get_vm_size()),
1555 "zero-filled");
1556 }
1557
1558 void
test_write_address_filled()1559 test_write_address_filled()
1560 {
1561 write_and_verify_pattern(empty, TRUE, get_vm_address(), round_page(get_vm_size()), "address-filled");
1562 }
1563
1564 void
test_write_checkerboard()1565 test_write_checkerboard()
1566 {
1567 write_and_verify_pattern(checkerboard, FALSE, get_vm_address(), round_page(get_vm_size()), "checkerboard");
1568 }
1569
1570 void
test_write_reverse_checkerboard()1571 test_write_reverse_checkerboard()
1572 {
1573 write_and_verify_pattern(checkerboard, TRUE, get_vm_address(), round_page(get_vm_size()), "reverse checkerboard");
1574 }
1575
1576 void
test_write_page_ends()1577 test_write_page_ends()
1578 {
1579 write_and_verify_pattern(page_ends, FALSE, get_vm_address(), round_page(get_vm_size()), "page ends");
1580 }
1581
1582 void
test_write_page_interiors()1583 test_write_page_interiors()
1584 {
1585 write_and_verify_pattern(page_ends, TRUE, get_vm_address(), round_page(get_vm_size()), "page interiors");
1586 }
1587
1588 /*********************************/
1589 /* Allocation error return tests */
1590 /*********************************/
1591
1592 /* Reallocating a page in the smallest aligned region containing the
1593 * given allocated range fails. */
1594 void
test_reallocate_pages()1595 test_reallocate_pages()
1596 {
1597 allocate_fn_t allocator = get_allocator();
1598 vm_map_t this_task = mach_task_self();
1599 mach_vm_address_t address = mach_vm_trunc_page(get_vm_address());
1600 mach_vm_size_t size = aligned_size(get_vm_address(), get_vm_size());
1601 mach_vm_address_t i;
1602 kern_return_t kr;
1603
1604 logv(
1605 "Reallocating pages in allocated region of address 0x%jx "
1606 "and size 0x%jx (%ju)...",
1607 (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1608 for (i = address; i < address + size; i += vm_page_size) {
1609 kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED);
1610 T_QUIET; T_ASSERT_EQ(kr, KERN_NO_SPACE,
1611 "Allocator "
1612 "at address 0x%jx unexpectedly returned: %s.\n"
1613 "Should have returned: %s.",
1614 (uintmax_t)address, mach_error_string(kr), mach_error_string(KERN_NO_SPACE));
1615 }
1616 logv("Returned expected error at each page: %s.", mach_error_string(KERN_NO_SPACE));
1617 }
1618
1619 /* Allocating in VM_MAP_NULL fails. */
1620 void
test_allocate_in_null_map()1621 test_allocate_in_null_map()
1622 {
1623 mach_vm_address_t address = get_vm_address();
1624 mach_vm_size_t size = get_vm_size();
1625 int flag = get_address_flag();
1626
1627 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1628 if (!(flag & VM_FLAGS_ANYWHERE)) {
1629 logv(" at address 0x%jx", (uintmax_t)address);
1630 }
1631 logv(" in NULL VM map...");
1632 assert_mach_return(get_allocator()(VM_MAP_NULL, &address, size, flag), MACH_SEND_INVALID_DEST, "Allocator");
1633 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
1634 }
1635
1636 /* Allocating with non-user flags fails. */
1637 void
test_allocate_with_kernel_flags()1638 test_allocate_with_kernel_flags()
1639 {
1640 allocate_fn_t allocator = get_allocator();
1641 vm_map_t this_task = mach_task_self();
1642 mach_vm_address_t address = get_vm_address();
1643 mach_vm_size_t size = get_vm_size();
1644 int flag = get_address_flag();
1645 int bad_flag, i;
1646 kern_return_t kr;
1647 int kernel_flags[] = {0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x8000, INT_MAX};
1648 int numofflags = sizeof(kernel_flags) / sizeof(kernel_flags[0]);
1649
1650 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1651 if (!(flag & VM_FLAGS_ANYWHERE)) {
1652 logv(" at address 0x%jx", (uintmax_t)address);
1653 }
1654 logv(" with various kernel flags...");
1655 for (i = 0; i < numofflags; i++) {
1656 bad_flag = kernel_flags[i] | flag;
1657 kr = allocator(this_task, &address, size, bad_flag);
1658 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1659 "Allocator "
1660 "with kernel flag 0x%x unexpectedly returned: %s.\n"
1661 "Should have returned: %s.",
1662 bad_flag, mach_error_string(kr), mach_error_string(KERN_INVALID_ARGUMENT));
1663 }
1664 logv("Returned expected error with each kernel flag: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1665 }
1666
1667 /*****************************/
1668 /* mach_vm_map() error tests */
1669 /*****************************/
1670
1671 /* mach_vm_map() fails with invalid protection or inheritance
1672 * arguments. */
1673 void
test_mach_vm_map_protection_inheritance_error()1674 test_mach_vm_map_protection_inheritance_error()
1675 {
1676 kern_return_t kr;
1677 vm_map_t my_task = mach_task_self();
1678 mach_vm_address_t address = get_vm_address();
1679 mach_vm_size_t size = get_vm_size();
1680 vm_map_offset_t mask = (get_allocator() == wrapper_mach_vm_map || get_allocator() == wrapper_mach_vm_map_named_entry)
1681 ? (mach_vm_offset_t)0
1682 : (mach_vm_offset_t)get_mask();
1683 int flag = get_address_flag();
1684 mach_port_t object_handle = (get_allocator() == wrapper_mach_vm_map_named_entry) ? memory_entry(&size) : MACH_PORT_NULL;
1685 vm_prot_t cur_protections[] = {VM_PROT_DEFAULT, VM_PROT_ALL + 1, ~VM_PROT_IS_MASK, INT_MAX};
1686 vm_prot_t max_protections[] = {VM_PROT_ALL, VM_PROT_ALL + 1, ~VM_PROT_IS_MASK, INT_MAX};
1687 vm_inherit_t inheritances[] = {VM_INHERIT_DEFAULT, VM_INHERIT_LAST_VALID + 1, UINT_MAX};
1688 int i, j, k;
1689
1690 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1691 if (!(flag & VM_FLAGS_ANYWHERE)) {
1692 logv(" at address 0x%jx", (uintmax_t)address);
1693 }
1694 logv(
1695 " with various invalid protection/inheritance "
1696 "arguments...");
1697
1698 for (i = 0; i < 4; i++) {
1699 for (j = 0; j < 4; j++) {
1700 for (k = 0; k < 3; k++) {
1701 /* Skip the case with all valid arguments. */
1702 if (i == (j == (k == 0))) {
1703 continue;
1704 }
1705 kr = mach_vm_map(my_task, &address, size, mask, flag, object_handle, (memory_object_offset_t)0, FALSE,
1706 cur_protections[i], max_protections[j], inheritances[k]);
1707 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1708 "mach_vm_map() "
1709 "with cur_protection 0x%x, max_protection 0x%x, "
1710 "inheritance 0x%x unexpectedly returned: %s.\n"
1711 "Should have returned: %s.",
1712 cur_protections[i], max_protections[j], inheritances[k], mach_error_string(kr),
1713 mach_error_string(KERN_INVALID_ARGUMENT));
1714 }
1715 }
1716 }
1717 logv("Returned expected error in each case: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1718 }
1719
1720 /* mach_vm_map() with unspecified address fails if the starting
1721 * address overflows when rounded up to a boundary value. */
1722 void
test_mach_vm_map_large_mask_overflow_error()1723 test_mach_vm_map_large_mask_overflow_error()
1724 {
1725 mach_vm_address_t address = 0x1;
1726 mach_vm_size_t size = get_vm_size();
1727 mach_vm_offset_t mask = (mach_vm_offset_t)UINTMAX_MAX;
1728 /* mach_vm_map() cannot allocate 0 bytes at an unspecified
1729 * address, see 8003930. */
1730 kern_return_t kr_expected = size ? KERN_NO_SPACE : KERN_INVALID_ARGUMENT;
1731
1732 logv(
1733 "Allocating 0x%jx (%ju) byte%s at an unspecified address "
1734 "starting at 0x%jx with mask 0x%jx...",
1735 (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address, (uintmax_t)mask);
1736 assert_mach_return(mach_vm_map(mach_task_self(), &address, size, mask, VM_FLAGS_ANYWHERE, MACH_PORT_NULL,
1737 (memory_object_offset_t)0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT),
1738 kr_expected, "mach_vm_map()");
1739 logv("Returned expected error: %s.", mach_error_string(kr_expected));
1740 }
1741
1742 /************************/
1743 /* Size edge case tests */
1744 /************************/
1745
1746 void
allocate_edge_size(mach_vm_address_t * address,mach_vm_size_t size,kern_return_t expected_kr)1747 allocate_edge_size(mach_vm_address_t * address, mach_vm_size_t size, kern_return_t expected_kr)
1748 {
1749 logv("Allocating 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size);
1750 assert_allocate_return(address, size, VM_FLAGS_ANYWHERE, expected_kr);
1751 logv("Returned expected value: %s.", mach_error_string(expected_kr));
1752 }
1753
1754 void
test_allocate_zero_size()1755 test_allocate_zero_size()
1756 {
1757 mach_vm_address_t address = 0x0;
1758 /* mach_vm_map() cannot allocate 0 bytes at an unspecified
1759 * address, see 8003930. Other allocators succeed. */
1760 kern_return_t kr_expected = (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS;
1761
1762 allocate_edge_size(&address, 0, kr_expected);
1763 if (kr_expected == KERN_SUCCESS) {
1764 deallocate_range(address, 0);
1765 }
1766 }
1767
1768 /* Testing the allocation of the largest size that does not overflow
1769 * when rounded up to a page-aligned value. */
1770 void
test_allocate_invalid_large_size()1771 test_allocate_invalid_large_size()
1772 {
1773 mach_vm_size_t size = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1774 if (get_allocator() != wrapper_mach_vm_map_named_entry) {
1775 mach_vm_address_t address = 0x0;
1776 allocate_edge_size(&address, size, KERN_NO_SPACE);
1777 } else {
1778 /* Named entries cannot currently be bigger than 4 GB
1779 * - 4 kb. */
1780 mach_port_t object_handle = MACH_PORT_NULL;
1781 logv("Creating named entry of 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size);
1782 assert_mach_return(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)0,
1783 (MAP_MEM_NAMED_CREATE | VM_PROT_ALL), &object_handle, 0),
1784 KERN_FAILURE, "mach_make_memory_entry_64()");
1785 logv("Returned expected error: %s.", mach_error_string(KERN_FAILURE));
1786 }
1787 }
1788
1789 /* A UINTMAX_MAX VM size will overflow to 0 when rounded up to a
1790 * page-aligned value. */
1791 void
test_allocate_overflowing_size()1792 test_allocate_overflowing_size()
1793 {
1794 mach_vm_address_t address = 0x0;
1795
1796 allocate_edge_size(&address, (mach_vm_size_t)UINTMAX_MAX, KERN_INVALID_ARGUMENT);
1797 }
1798
1799 /****************************/
1800 /* Address allocation tests */
1801 /****************************/
1802
1803 /* Allocation at address zero fails iff size is nonzero. */
1804 void
test_allocate_at_zero()1805 test_allocate_at_zero()
1806 {
1807 mach_vm_address_t address = 0x0;
1808 mach_vm_size_t size = get_vm_size();
1809
1810 kern_return_t kr_expected =
1811 size ? KERN_INVALID_ADDRESS : (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS;
1812
1813 logv("Allocating 0x%jx (%ju) byte%s at address 0x0...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1814 assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected);
1815 logv("Returned expected value: %s.", mach_error_string(kr_expected));
1816 if (kr_expected == KERN_SUCCESS) {
1817 T_QUIET; T_ASSERT_EQ(address, 0,
1818 "Address 0x%jx is unexpectedly "
1819 "nonzero.\n",
1820 (uintmax_t)address);
1821 logv("Allocated address 0x%jx is zero.", (uintmax_t)address);
1822 deallocate_range(address, size);
1823 }
1824 }
1825
1826 /* Allocation at page-aligned but 2 MB boundary-unaligned address
1827 * fails with KERN_NO_SPACE. */
1828 void
test_allocate_2MB_boundary_unaligned_page_aligned_address()1829 test_allocate_2MB_boundary_unaligned_page_aligned_address()
1830 {
1831 mach_vm_size_t size = get_vm_size();
1832
1833 mach_vm_address_t address = get_fixed_address(size + vm_page_size) + vm_page_size;
1834 logv(
1835 "Found 2 MB boundary-unaligned, page aligned address "
1836 "0x%jx.",
1837 (uintmax_t)address);
1838
1839 /* mach_vm_allocate() cannot allocate 0 bytes, and fails with a
1840 * fixed boundary-unaligned truncated address. */
1841 kern_return_t kr_expected = (!size && get_allocator() != wrapper_mach_vm_allocate)
1842 ? KERN_INVALID_ARGUMENT
1843 : (get_allocator() == wrapper_mach_vm_map_2MB) ? KERN_NO_SPACE : KERN_SUCCESS;
1844 logv("Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1845 (uintmax_t)address);
1846 assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected);
1847 logv("Returned expected value: %s.", mach_error_string(kr_expected));
1848 if (kr_expected == KERN_SUCCESS) {
1849 deallocate_range(address, size);
1850 }
1851 }
1852
1853 /* With VM_FLAGS_ANYWHERE set, mach_vm_allocate() starts looking for
1854 * an allocation address at 0x0, while mach_vm_map() starts at the
1855 * supplied address and does not wrap around. See 8016663. */
1856 void
test_allocate_page_with_highest_address_hint()1857 test_allocate_page_with_highest_address_hint()
1858 {
1859 /* Highest valid page-aligned address. */
1860 mach_vm_address_t address = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1861
1862 logv(
1863 "Allocating one page with unspecified address, but hint at "
1864 "0x%jx...",
1865 (uintmax_t)address);
1866 if (get_allocator() == wrapper_mach_vm_allocate) {
1867 /* mach_vm_allocate() starts from 0x0 and succeeds. */
1868 assert_allocate_success(&address, vm_page_size, VM_FLAGS_ANYWHERE);
1869 logv("Memory allocated at address 0x%jx.", (uintmax_t)address);
1870 assert_aligned_address(address);
1871 deallocate_range(address, vm_page_size);
1872 } else {
1873 /* mach_vm_map() starts from the supplied address, and fails
1874 * with KERN_NO_SPACE, see 8016663. */
1875 assert_allocate_return(&address, vm_page_size, VM_FLAGS_ANYWHERE, KERN_NO_SPACE);
1876 logv("Returned expected error: %s.", mach_error_string(KERN_NO_SPACE));
1877 }
1878 }
1879
1880 /* Allocators find an allocation address with a first fit strategy. */
1881 void
test_allocate_first_fit_pages()1882 test_allocate_first_fit_pages()
1883 {
1884 allocate_fn_t allocator = get_allocator();
1885 mach_vm_address_t address1 = 0x0;
1886 mach_vm_address_t i;
1887 kern_return_t kr;
1888 vm_map_t this_task = mach_task_self();
1889
1890 logv(
1891 "Looking for first fit address for allocating one "
1892 "page...");
1893 assert_allocate_success(&address1, vm_page_size, VM_FLAGS_ANYWHERE);
1894 logv("Found address 0x%jx.", (uintmax_t)address1);
1895 assert_aligned_address(address1);
1896 mach_vm_address_t address2 = address1;
1897 logv(
1898 "Looking for next higher first fit address for allocating "
1899 "one page...");
1900 assert_allocate_success(&address2, vm_page_size, VM_FLAGS_ANYWHERE);
1901 logv("Found address 0x%jx.", (uintmax_t)address2);
1902 assert_aligned_address(address2);
1903 T_QUIET; T_ASSERT_GT(address2, address1,
1904 "Second address 0x%jx is "
1905 "unexpectedly not higher than first address 0x%jx.",
1906 (uintmax_t)address2, (uintmax_t)address1);
1907
1908 logv("Allocating pages between 0x%jx and 0x%jx...", (uintmax_t)address1, (uintmax_t)address2);
1909 for (i = address1; i <= address2; i += vm_page_size) {
1910 kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED);
1911 T_QUIET; T_ASSERT_NE(kr, KERN_SUCCESS,
1912 "Allocator at address 0x%jx "
1913 "unexpectedly succeeded.",
1914 (uintmax_t)i);
1915 }
1916 logv("Expectedly returned error at each page.");
1917 deallocate_range(address1, vm_page_size);
1918 deallocate_range(address2, vm_page_size);
1919 }
1920
1921 /*******************************/
1922 /* Deallocation segfault tests */
1923 /*******************************/
1924
1925 /* mach_vm_deallocate() deallocates the smallest aligned region
1926 * (integral number of pages) containing the given range. */
1927
1928 /* Addresses in deallocated range are inaccessible. */
1929 void
access_deallocated_range_address(mach_vm_address_t address,const char * position)1930 access_deallocated_range_address(mach_vm_address_t address, const char * position)
1931 {
1932 logv("Will deallocate and read from %s 0x%jx of deallocated range...", position, (uintmax_t)address);
1933 deallocate();
1934 mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address);
1935 T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx.\n"
1936 "Should have died with signal SIGSEGV.",
1937 (uintmax_t)bad_value, (uintmax_t)address);
1938 }
1939
1940 /* Start of deallocated range is inaccessible. */
1941 void
test_access_deallocated_range_start()1942 test_access_deallocated_range_start()
1943 {
1944 access_deallocated_range_address(get_vm_address(), "start");
1945 }
1946
1947 /* Middle of deallocated range is inaccessible. */
1948 void
test_access_deallocated_range_middle()1949 test_access_deallocated_range_middle()
1950 {
1951 access_deallocated_range_address(get_vm_address() + (round_page(get_vm_size()) >> 1), "middle");
1952 }
1953
1954 /* End of deallocated range is inaccessible. */
1955 void
test_access_deallocated_range_end()1956 test_access_deallocated_range_end()
1957 {
1958 access_deallocated_range_address(round_page(get_vm_size()) - vm_address_size + get_vm_address(), "end");
1959 }
1960
1961 /* Deallocating almost the whole address space causes a SIGSEGV or SIGBUS. We
1962 * deallocate the largest valid aligned size to avoid overflowing when
1963 * rounding up. */
1964 void
test_deallocate_suicide()1965 test_deallocate_suicide()
1966 {
1967 mach_vm_address_t address = 0x0;
1968 mach_vm_size_t size = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1969
1970 logv("Deallocating 0x%jx (%ju) bytes at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (uintmax_t)address);
1971 kern_return_t kr = mach_vm_deallocate(mach_task_self(), address, size);
1972 T_ASSERT_FAIL("mach_vm_deallocate() with address 0x%jx and "
1973 "size 0x%jx (%ju) unexpectedly returned: %s.\n"
1974 "Should have died with signal SIGSEGV or SIGBUS.",
1975 (uintmax_t)address, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr));
1976 }
1977
1978 /***************************************/
1979 /* Deallocation and reallocation tests */
1980 /***************************************/
1981
1982 /* Deallocating memory twice succeeds. */
1983 void
test_deallocate_twice()1984 test_deallocate_twice()
1985 {
1986 deallocate();
1987 deallocate();
1988 }
1989
1990 /* Deallocated and reallocated memory is zero-filled. Deallocated
1991 * memory is inaccessible since it can be reallocated. */
1992 void
test_write_pattern_deallocate_reallocate_zero_filled()1993 test_write_pattern_deallocate_reallocate_zero_filled()
1994 {
1995 mach_vm_address_t address = get_vm_address();
1996 mach_vm_size_t size = get_vm_size();
1997
1998 write_pattern(page_ends, FALSE, address, size, "page ends");
1999 logv("Deallocating, then Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2000 (uintmax_t)address);
2001 deallocate();
2002 assert_allocate_success(&address, size, VM_FLAGS_FIXED);
2003 logv("Memory allocated.");
2004 verify_pattern(empty, FALSE, address, size, "zero-filled");
2005 deallocate();
2006 }
2007
2008 /********************************/
2009 /* Deallocation edge case tests */
2010 /********************************/
2011
2012 /* Zero size deallocation always succeeds. */
2013 void
test_deallocate_zero_size_ranges()2014 test_deallocate_zero_size_ranges()
2015 {
2016 int i;
2017 kern_return_t kr;
2018 vm_map_t this_task = mach_task_self();
2019 mach_vm_address_t addresses[] = {0x0,
2020 0x1,
2021 vm_page_size - 1,
2022 vm_page_size,
2023 vm_page_size + 1,
2024 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2025 (mach_vm_address_t)UINT_MAX,
2026 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2027 (mach_vm_address_t)UINTMAX_MAX};
2028 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2029
2030 logv("Deallocating 0x0 (0) bytes at various addresses...");
2031 for (i = 0; i < numofaddresses; i++) {
2032 kr = mach_vm_deallocate(this_task, addresses[i], 0);
2033 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_deallocate() at "
2034 "address 0x%jx unexpectedly failed: %s.",
2035 (uintmax_t)addresses[i], mach_error_string(kr));
2036 }
2037 logv("Deallocations successful.");
2038 }
2039
2040 /* Deallocation succeeds if the end of the range rounds to 0x0. */
2041 void
test_deallocate_rounded_zero_end_ranges()2042 test_deallocate_rounded_zero_end_ranges()
2043 {
2044 int i;
2045 kern_return_t kr;
2046 vm_map_t this_task = mach_task_self();
2047 struct {
2048 mach_vm_address_t address;
2049 mach_vm_size_t size;
2050 } ranges[] = {
2051 {0x0, (mach_vm_size_t)UINTMAX_MAX},
2052 {0x0, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 2},
2053 {0x1, (mach_vm_size_t)UINTMAX_MAX - 1},
2054 {0x1, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
2055 {0x2, (mach_vm_size_t)UINTMAX_MAX - 2},
2056 {0x2, (mach_vm_size_t)UINTMAX_MAX - vm_page_size},
2057 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size - 1},
2058 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, 1},
2059 {(mach_vm_address_t)UINTMAX_MAX - 1, 1},
2060 };
2061 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2062
2063 logv(
2064 "Deallocating various memory ranges whose end rounds to "
2065 "0x0...");
2066 for (i = 0; i < numofranges; i++) {
2067 kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size);
2068 T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
2069 "mach_vm_deallocate() with address 0x%jx and size "
2070 "0x%jx (%ju) unexpectedly returned: %s.\n"
2071 "Should have succeeded.",
2072 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr));
2073 }
2074 logv("Deallocations successful.");
2075 }
2076
2077 /* Deallocating a range wrapped around the address space fails. */
2078 void
test_deallocate_wrapped_around_ranges()2079 test_deallocate_wrapped_around_ranges()
2080 {
2081 int i;
2082 kern_return_t kr;
2083 vm_map_t this_task = mach_task_self();
2084 struct {
2085 mach_vm_address_t address;
2086 mach_vm_size_t size;
2087 } ranges[] = {
2088 {0x1, (mach_vm_size_t)UINTMAX_MAX},
2089 {vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
2090 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2091 {(mach_vm_address_t)UINTMAX_MAX, 1},
2092 };
2093 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2094
2095 logv(
2096 "Deallocating various memory ranges wrapping around the "
2097 "address space...");
2098 for (i = 0; i < numofranges; i++) {
2099 kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size);
2100 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
2101 "mach_vm_deallocate() with address 0x%jx and size "
2102 "0x%jx (%ju) unexpectedly returned: %s.\n"
2103 "Should have returned: %s.",
2104 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2105 mach_error_string(KERN_INVALID_ARGUMENT));
2106 }
2107 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
2108 }
2109
2110 /* Deallocating in VM_MAP_NULL fails. */
2111 void
test_deallocate_in_null_map()2112 test_deallocate_in_null_map()
2113 {
2114 mach_vm_address_t address = get_vm_address();
2115 mach_vm_size_t size = get_vm_size();
2116 int flag = get_address_flag();
2117
2118 logv("Deallocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
2119 if (!(flag & VM_FLAGS_ANYWHERE)) {
2120 logv(" at address 0x%jx", (uintmax_t)address);
2121 }
2122 logv(" in NULL VM map...");
2123 assert_mach_return(mach_vm_deallocate(VM_MAP_NULL, address, size), MACH_SEND_INVALID_DEST, "mach_vm_deallocate()");
2124 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2125 }
2126
2127 /*****************************/
2128 /* mach_vm_read() main tests */
2129 /*****************************/
2130
2131 /* Read memory of size less than a page has aligned starting
2132 * address. Otherwise, the destination buffer's starting address has
2133 * the same boundary offset as the source region's. */
2134 void
test_read_address_offset()2135 test_read_address_offset()
2136 {
2137 mach_vm_address_t address = get_vm_address();
2138 mach_vm_size_t size = get_vm_size();
2139
2140 if (size < vm_page_size * 2 || get_address_alignment()) {
2141 assert_aligned_address(address);
2142 logv("Buffer address 0x%jx is aligned as expected.", (uintmax_t)address);
2143 } else {
2144 T_QUIET; T_ASSERT_EQ(((address - 1) & (vm_page_size - 1)), 0,
2145 "Buffer "
2146 "address 0x%jx does not have the expected boundary "
2147 "offset of 1.",
2148 (uintmax_t)address);
2149 logv(
2150 "Buffer address 0x%jx has the expected boundary "
2151 "offset of 1.",
2152 (uintmax_t)address);
2153 }
2154 }
2155
2156 /* Reading from VM_MAP_NULL fails. */
2157 void
test_read_null_map()2158 test_read_null_map()
2159 {
2160 mach_vm_address_t address = get_vm_address();
2161 mach_vm_size_t size = get_vm_size();
2162 vm_offset_t read_address;
2163 mach_msg_type_number_t read_size;
2164
2165 logv(
2166 "Reading 0x%jx (%ju) byte%s at address 0x%jx in NULL VM "
2167 "map...",
2168 (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
2169 assert_mach_return(mach_vm_read(VM_MAP_NULL, address, size, &read_address, &read_size), MACH_SEND_INVALID_DEST,
2170 "mach_vm_read()");
2171 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2172 }
2173
2174 /* Reading partially deallocated memory fails. */
2175 void
test_read_partially_deallocated_range()2176 test_read_partially_deallocated_range()
2177 {
2178 mach_vm_address_t address = get_vm_address();
2179 mach_vm_size_t size = get_vm_size();
2180 mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2);
2181 vm_offset_t read_address;
2182 mach_msg_type_number_t read_size;
2183
2184 logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2185 assert_deallocate_success(mid_point, vm_page_size);
2186 logv("Page deallocated.");
2187
2188 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2189 (uintmax_t)address);
2190 assert_read_return(address, size, &read_address, &read_size, KERN_INVALID_ADDRESS);
2191 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2192 }
2193
2194 /* Reading partially read-protected memory fails. */
2195 void
test_read_partially_unreadable_range()2196 test_read_partially_unreadable_range()
2197 {
2198 mach_vm_address_t address = get_vm_address();
2199 mach_vm_size_t size = get_vm_size();
2200 mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2);
2201 vm_offset_t read_address;
2202 mach_msg_type_number_t read_size;
2203
2204 /* For sizes < msg_ool_size_small, vm_map_copyin_common() uses
2205 * vm_map_copyin_kernel_buffer() to read in the memory,
2206 * returning different errors, see 8182239. */
2207 kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2208
2209 logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2210 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2211 logv("Page read-protected.");
2212
2213 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2214 (uintmax_t)address);
2215 assert_read_return(address, size, &read_address, &read_size, kr_expected);
2216 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2217 }
2218
2219 /**********************************/
2220 /* mach_vm_read() edge case tests */
2221 /**********************************/
2222
2223 void
read_edge_size(mach_vm_size_t size,kern_return_t expected_kr)2224 read_edge_size(mach_vm_size_t size, kern_return_t expected_kr)
2225 {
2226 int i;
2227 kern_return_t kr;
2228 vm_map_t this_task = mach_task_self();
2229 mach_vm_address_t addresses[] = {vm_page_size - 1,
2230 vm_page_size,
2231 vm_page_size + 1,
2232 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2233 (mach_vm_address_t)UINT_MAX,
2234 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2235 (mach_vm_address_t)UINTMAX_MAX};
2236 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2237 vm_offset_t read_address;
2238 mach_msg_type_number_t read_size;
2239
2240 logv("Reading 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size);
2241 for (i = 0; i < numofaddresses; i++) {
2242 kr = mach_vm_read(this_task, addresses[i], size, &read_address, &read_size);
2243 T_QUIET; T_ASSERT_EQ(kr, expected_kr,
2244 "mach_vm_read() at "
2245 "address 0x%jx unexpectedly returned: %s.\n"
2246 "Should have returned: %s.",
2247 (uintmax_t)addresses[i], mach_error_string(kr), mach_error_string(expected_kr));
2248 }
2249 logv(
2250 "mach_vm_read() returned expected value in each case: "
2251 "%s.",
2252 mach_error_string(expected_kr));
2253 }
2254
2255 /* Reading 0 bytes always succeeds. */
2256 void
test_read_zero_size()2257 test_read_zero_size()
2258 {
2259 read_edge_size(0, KERN_SUCCESS);
2260 }
2261
2262 /* Reading 4GB or higher always fails. */
2263 void
test_read_invalid_large_size()2264 test_read_invalid_large_size()
2265 {
2266 read_edge_size((mach_vm_size_t)UINT_MAX + 1, KERN_INVALID_ARGUMENT);
2267 }
2268
2269 /* Reading a range wrapped around the address space fails. */
2270 void
test_read_wrapped_around_ranges()2271 test_read_wrapped_around_ranges()
2272 {
2273 int i;
2274 kern_return_t kr;
2275 vm_map_t this_task = mach_task_self();
2276 struct {
2277 mach_vm_address_t address;
2278 mach_vm_size_t size;
2279 } ranges[] = {
2280 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX},
2281 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)},
2282 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2283 {(mach_vm_address_t)UINTMAX_MAX, 1},
2284 };
2285 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2286 vm_offset_t read_address;
2287 mach_msg_type_number_t read_size;
2288
2289 logv(
2290 "Reading various memory ranges wrapping around the "
2291 "address space...");
2292 for (i = 0; i < numofranges; i++) {
2293 kr = mach_vm_read(this_task, ranges[i].address, ranges[i].size, &read_address, &read_size);
2294 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ADDRESS,
2295 "mach_vm_read() at address 0x%jx with size "
2296 "0x%jx (%ju) unexpectedly returned: %s.\n"
2297 "Should have returned: %s.",
2298 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2299 mach_error_string(KERN_INVALID_ADDRESS));
2300 }
2301 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2302 }
2303
2304 /********************************/
2305 /* mach_vm_read() pattern tests */
2306 /********************************/
2307
2308 /* Write a pattern on pre-allocated memory, read into a buffer and
2309 * verify the pattern on the buffer. */
2310 void
write_read_verify_pattern(address_filter_t filter,boolean_t reversed,const char * pattern_name)2311 write_read_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2312 {
2313 mach_vm_address_t address = get_vm_address();
2314
2315 write_pattern(filter, reversed, address, get_vm_size(), pattern_name);
2316 read_deallocate();
2317 /* Getting the address and size of the read buffer. */
2318 mach_vm_address_t read_address = get_vm_address();
2319 mach_vm_size_t read_size = get_vm_size();
2320 logv(
2321 "Verifying %s pattern on buffer of "
2322 "address 0x%jx and size 0x%jx (%ju)...",
2323 pattern_name, (uintmax_t)read_address, (uintmax_t)read_size, (uintmax_t)read_size);
2324 filter_addresses_do_else(filter, reversed, read_address, read_size, verify_address, read_zero, address);
2325 logv("Pattern verified on destination buffer.");
2326 }
2327
2328 void
test_read_address_filled()2329 test_read_address_filled()
2330 {
2331 write_read_verify_pattern(empty, TRUE, "address-filled");
2332 }
2333
2334 void
test_read_checkerboard()2335 test_read_checkerboard()
2336 {
2337 write_read_verify_pattern(checkerboard, FALSE, "checkerboard");
2338 }
2339
2340 void
test_read_reverse_checkerboard()2341 test_read_reverse_checkerboard()
2342 {
2343 write_read_verify_pattern(checkerboard, TRUE, "reverse checkerboard");
2344 }
2345
2346 /***********************************/
2347 /* mach_vm_write() edge case tests */
2348 /***********************************/
2349
2350 /* Writing in VM_MAP_NULL fails. */
2351 void
test_write_null_map()2352 test_write_null_map()
2353 {
2354 mach_vm_address_t address = get_vm_address();
2355 vm_offset_t data = (vm_offset_t)get_buffer_address();
2356 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2357
2358 logv(
2359 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2360 "memory at address 0x%jx in NULL VM MAP...",
2361 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2362 assert_mach_return(mach_vm_write(VM_MAP_NULL, address, data, buffer_size), MACH_SEND_INVALID_DEST, "mach_vm_write()");
2363 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2364 }
2365
2366 /* Writing 0 bytes always succeeds. */
2367 void
test_write_zero_size()2368 test_write_zero_size()
2369 {
2370 set_buffer_size(0);
2371 write_buffer();
2372 }
2373
2374 /*****************************************/
2375 /* mach_vm_write() inaccessibility tests */
2376 /*****************************************/
2377
2378 /* Writing a partially deallocated buffer fails. */
2379 void
test_write_partially_deallocated_buffer()2380 test_write_partially_deallocated_buffer()
2381 {
2382 mach_vm_address_t address = get_vm_address();
2383 vm_offset_t data = (vm_offset_t)get_buffer_address();
2384 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2385 mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2);
2386
2387 logv(
2388 "Deallocating a mid-range buffer page at address "
2389 "0x%jx...",
2390 (uintmax_t)buffer_mid_point);
2391 assert_deallocate_success(buffer_mid_point, vm_page_size);
2392 logv("Page deallocated.");
2393
2394 logv(
2395 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2396 "memory at address 0x%jx...",
2397 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2398 assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY);
2399 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY));
2400 }
2401
2402 /* Writing a partially read-protected buffer fails. */
2403 void
test_write_partially_unreadable_buffer()2404 test_write_partially_unreadable_buffer()
2405 {
2406 mach_vm_address_t address = get_vm_address();
2407 vm_offset_t data = (vm_offset_t)get_buffer_address();
2408 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2409 mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2);
2410
2411 logv(
2412 "Read-protecting a mid-range buffer page at address "
2413 "0x%jx...",
2414 (uintmax_t)buffer_mid_point);
2415 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), buffer_mid_point, vm_page_size, FALSE, VM_PROT_WRITE),
2416 "mach_vm_protect()");
2417 logv("Page read-protected.");
2418
2419 logv(
2420 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2421 "memory at address 0x%jx...",
2422 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2423 assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY);
2424 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY));
2425 }
2426
2427 /* Writing on partially deallocated memory fails. */
2428 void
test_write_on_partially_deallocated_range()2429 test_write_on_partially_deallocated_range()
2430 {
2431 mach_vm_address_t address = get_vm_address();
2432 mach_vm_address_t start = mach_vm_trunc_page(address);
2433 vm_offset_t data = (vm_offset_t)get_buffer_address();
2434 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2435
2436 logv(
2437 "Deallocating the first destination page at address "
2438 "0x%jx...",
2439 (uintmax_t)start);
2440 assert_deallocate_success(start, vm_page_size);
2441 logv("Page deallocated.");
2442
2443 logv(
2444 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2445 "memory at address 0x%jx...",
2446 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2447 assert_write_return(address, data, buffer_size, KERN_INVALID_ADDRESS);
2448 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2449 }
2450
2451 /* Writing on partially unwritable memory fails. */
2452 void
test_write_on_partially_unwritable_range()2453 test_write_on_partially_unwritable_range()
2454 {
2455 mach_vm_address_t address = get_vm_address();
2456 mach_vm_address_t start = mach_vm_trunc_page(address);
2457 vm_offset_t data = (vm_offset_t)get_buffer_address();
2458 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2459
2460 /* For sizes < msg_ool_size_small,
2461 * vm_map_copy_overwrite_nested() uses
2462 * vm_map_copyout_kernel_buffer() to read in the memory,
2463 * returning different errors, see 8217123. */
2464 kern_return_t kr_expected = (buffer_size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2465
2466 logv(
2467 "Write-protecting the first destination page at address "
2468 "0x%jx...",
2469 (uintmax_t)start);
2470 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2471 logv("Page write-protected.");
2472
2473 logv(
2474 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2475 "memory at address 0x%jx...",
2476 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2477 assert_write_return(address, data, buffer_size, kr_expected);
2478 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2479 }
2480
2481 /*********************************/
2482 /* mach_vm_write() pattern tests */
2483 /*********************************/
2484
2485 /* Verify that a zero-filled buffer and destination memory are still
2486 * zero-filled after writing. */
2487 void
test_zero_filled_write()2488 test_zero_filled_write()
2489 {
2490 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
2491 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()),
2492 round_page(get_buffer_size() + get_buffer_offset()), "zero-filled");
2493 }
2494
2495 /* Write a pattern on a buffer, write the buffer into some destination
2496 * memory, and verify the pattern on both buffer and destination. */
2497 void
pattern_write(address_filter_t filter,boolean_t reversed,const char * pattern_name)2498 pattern_write(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2499 {
2500 mach_vm_address_t address = get_vm_address();
2501 mach_vm_size_t size = get_vm_size();
2502 mach_vm_address_t buffer_address = get_buffer_address();
2503 mach_vm_size_t buffer_size = get_buffer_size();
2504
2505 write_pattern(filter, reversed, buffer_address, buffer_size, pattern_name);
2506 write_buffer();
2507 verify_pattern(filter, reversed, buffer_address, buffer_size, pattern_name);
2508 logv(
2509 "Verifying %s pattern on destination of "
2510 "address 0x%jx and size 0x%jx (%ju)...",
2511 pattern_name, (uintmax_t)address, (uintmax_t)buffer_size, (uintmax_t)size);
2512 filter_addresses_do_else(filter, reversed, address, buffer_size, verify_address, read_zero, buffer_address);
2513 logv("Pattern verified on destination.");
2514 }
2515
2516 void
test_address_filled_write()2517 test_address_filled_write()
2518 {
2519 pattern_write(empty, TRUE, "address-filled");
2520 }
2521
2522 void
test_checkerboard_write()2523 test_checkerboard_write()
2524 {
2525 pattern_write(checkerboard, FALSE, "checkerboard");
2526 }
2527
2528 void
test_reverse_checkerboard_write()2529 test_reverse_checkerboard_write()
2530 {
2531 pattern_write(checkerboard, TRUE, "reverse checkerboard");
2532 }
2533
2534 /**********************************/
2535 /* mach_vm_copy() edge case tests */
2536 /**********************************/
2537
2538 /* Copying in VM_MAP_NULL fails. */
2539 void
test_copy_null_map()2540 test_copy_null_map()
2541 {
2542 mach_vm_address_t source = get_vm_address();
2543 mach_vm_address_t dest = get_buffer_address();
2544 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2545
2546 logv(
2547 "Copying buffer of address 0x%jx and size 0x%jx (%ju), on "
2548 "memory at address 0x%jx in NULL VM MAP...",
2549 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2550 assert_mach_return(mach_vm_copy(VM_MAP_NULL, source, size, dest), MACH_SEND_INVALID_DEST, "mach_vm_copy()");
2551 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2552 }
2553
2554 void
copy_edge_size(mach_vm_size_t size,kern_return_t expected_kr)2555 copy_edge_size(mach_vm_size_t size, kern_return_t expected_kr)
2556 {
2557 int i;
2558 kern_return_t kr;
2559 vm_map_t this_task = mach_task_self();
2560 mach_vm_address_t addresses[] = {0x0,
2561 0x1,
2562 vm_page_size - 1,
2563 vm_page_size,
2564 vm_page_size + 1,
2565 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2566 (mach_vm_address_t)UINT_MAX,
2567 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2568 (mach_vm_address_t)UINTMAX_MAX};
2569 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2570 mach_vm_address_t dest = 0;
2571
2572 logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
2573 assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE);
2574 logv("Copying 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size);
2575 for (i = 0; i < numofaddresses; i++) {
2576 kr = mach_vm_copy(this_task, addresses[i], size, dest);
2577 T_QUIET; T_ASSERT_EQ(kr, expected_kr,
2578 "mach_vm_copy() at "
2579 "address 0x%jx unexpectedly returned: %s.\n"
2580 "Should have returned: %s.",
2581 (uintmax_t)addresses[i], mach_error_string(kr), mach_error_string(expected_kr));
2582 }
2583 logv(
2584 "mach_vm_copy() returned expected value in each case: "
2585 "%s.",
2586 mach_error_string(expected_kr));
2587
2588 deallocate_range(dest, 4096);
2589 }
2590
2591 /* Copying 0 bytes always succeeds. */
2592 void
test_copy_zero_size()2593 test_copy_zero_size()
2594 {
2595 copy_edge_size(0, KERN_SUCCESS);
2596 }
2597
2598 /* Copying 4GB or higher always fails. */
2599 void
test_copy_invalid_large_size()2600 test_copy_invalid_large_size()
2601 {
2602 copy_edge_size((mach_vm_size_t)UINT_MAX - 1, KERN_INVALID_ADDRESS);
2603 }
2604
2605 /* Reading a range wrapped around the address space fails. */
2606 void
test_copy_wrapped_around_ranges()2607 test_copy_wrapped_around_ranges()
2608 {
2609 int i;
2610 kern_return_t kr;
2611 vm_map_t this_task = mach_task_self();
2612 struct {
2613 mach_vm_address_t address;
2614 mach_vm_size_t size;
2615 } ranges[] = {
2616 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX},
2617 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)},
2618 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2619 {(mach_vm_address_t)UINTMAX_MAX, 1},
2620 };
2621 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2622 mach_vm_address_t dest = 0;
2623
2624 logv("Allocating 0x1000 (4096) bytes...");
2625 assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE);
2626
2627 logv(
2628 "Copying various memory ranges wrapping around the "
2629 "address space...");
2630 for (i = 0; i < numofranges; i++) {
2631 kr = mach_vm_copy(this_task, ranges[i].address, ranges[i].size, dest);
2632 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ADDRESS,
2633 "mach_vm_copy() at address 0x%jx with size "
2634 "0x%jx (%ju) unexpectedly returned: %s.\n"
2635 "Should have returned: %s.",
2636 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2637 mach_error_string(KERN_INVALID_ADDRESS));
2638 }
2639 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2640
2641 deallocate_range(dest, 4096);
2642 }
2643
2644 /********************************/
2645 /* mach_vm_copy() pattern tests */
2646 /********************************/
2647
2648 /* Write a pattern on pre-allocated region, copy into another region
2649 * and verify the pattern in the region. */
2650 void
write_copy_verify_pattern(address_filter_t filter,boolean_t reversed,const char * pattern_name)2651 write_copy_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2652 {
2653 mach_vm_address_t source = get_vm_address();
2654 mach_vm_size_t src_size = get_vm_size();
2655 write_pattern(filter, reversed, source, src_size, pattern_name);
2656 /* Getting the address and size of the dest region */
2657 mach_vm_address_t dest = get_buffer_address();
2658 mach_vm_size_t dst_size = get_buffer_size();
2659
2660 logv(
2661 "Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
2662 "memory at address 0x%jx...",
2663 (uintmax_t)source, (uintmax_t)dst_size, (uintmax_t)dst_size, (uintmax_t)dest);
2664 assert_copy_success(source, dst_size, dest);
2665 logv(
2666 "Verifying %s pattern in region of "
2667 "address 0x%jx and size 0x%jx (%ju)...",
2668 pattern_name, (uintmax_t)dest, (uintmax_t)dst_size, (uintmax_t)dst_size);
2669 filter_addresses_do_else(filter, reversed, dest, dst_size, verify_address, read_zero, source);
2670 logv("Pattern verified on destination region.");
2671 }
2672
2673 void
test_copy_address_filled()2674 test_copy_address_filled()
2675 {
2676 write_copy_verify_pattern(empty, TRUE, "address-filled");
2677 }
2678
2679 void
test_copy_checkerboard()2680 test_copy_checkerboard()
2681 {
2682 write_copy_verify_pattern(checkerboard, FALSE, "checkerboard");
2683 }
2684
2685 void
test_copy_reverse_checkerboard()2686 test_copy_reverse_checkerboard()
2687 {
2688 write_copy_verify_pattern(checkerboard, TRUE, "reverse checkerboard");
2689 }
2690
2691 /* Verify that a zero-filled source and destination memory are still
2692 * zero-filled after writing. */
2693 void
test_zero_filled_copy_dest()2694 test_zero_filled_copy_dest()
2695 {
2696 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
2697 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()),
2698 round_page(get_buffer_size() + get_buffer_offset()), "zero-filled");
2699 }
2700
2701 /****************************************/
2702 /* mach_vm_copy() inaccessibility tests */
2703 /****************************************/
2704
2705 /* Copying partially deallocated memory fails. */
2706 void
test_copy_partially_deallocated_range()2707 test_copy_partially_deallocated_range()
2708 {
2709 mach_vm_address_t source = get_vm_address();
2710 mach_vm_size_t size = get_vm_size();
2711 mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2);
2712 mach_vm_address_t dest = 0;
2713
2714 logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2715 assert_deallocate_success(mid_point, vm_page_size);
2716 logv("Page deallocated.");
2717
2718 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2719 (uintmax_t)source);
2720
2721 assert_allocate_copy_return(source, size, &dest, KERN_INVALID_ADDRESS);
2722
2723 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2724
2725 deallocate_range(dest, size);
2726 }
2727
2728 /* Copy partially read-protected memory fails. */
2729 void
test_copy_partially_unreadable_range()2730 test_copy_partially_unreadable_range()
2731 {
2732 mach_vm_address_t source = get_vm_address();
2733 mach_vm_size_t size = get_vm_size();
2734 mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2);
2735 mach_vm_address_t dest = 0;
2736
2737 /* For sizes < 1 page, vm_map_copyin_common() uses
2738 * vm_map_copyin_kernel_buffer() to read in the memory,
2739 * returning different errors, see 8182239. */
2740 kern_return_t kr_expected = (size < vm_page_size) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2741
2742 logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2743 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2744 logv("Page read-protected.");
2745
2746 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2747 (uintmax_t)source);
2748 assert_allocate_copy_return(source, size, &dest, kr_expected);
2749 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2750
2751 deallocate_range(dest, size);
2752 }
2753
2754 /* Copying to a partially deallocated region fails. */
2755 void
test_copy_dest_partially_deallocated_region()2756 test_copy_dest_partially_deallocated_region()
2757 {
2758 mach_vm_address_t dest = get_vm_address();
2759 mach_vm_address_t source = get_buffer_address();
2760 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2761 mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2);
2762 #if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080
2763 logv(
2764 "Deallocating a mid-range source page at address "
2765 "0x%jx...",
2766 (uintmax_t)source_mid_point);
2767 assert_deallocate_success(source_mid_point, vm_page_size);
2768 logv("Page deallocated.");
2769
2770 logv(
2771 "Copying region of address 0x%jx and size 0x%jx (%ju), on "
2772 "memory at address 0x%jx...",
2773 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2774 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2775 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2776 #else
2777 logv(
2778 "Bypassing partially deallocated region test "
2779 "(See <rdar://problem/12190999>)");
2780 #endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */
2781 }
2782
2783 /* Copying from a partially deallocated region fails. */
2784 void
test_copy_source_partially_deallocated_region()2785 test_copy_source_partially_deallocated_region()
2786 {
2787 mach_vm_address_t source = get_vm_address();
2788 mach_vm_address_t dest = get_buffer_address();
2789 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2790 mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2);
2791
2792 logv(
2793 "Deallocating a mid-range source page at address "
2794 "0x%jx...",
2795 (uintmax_t)source_mid_point);
2796 assert_deallocate_success(source_mid_point, vm_page_size);
2797 logv("Page deallocated.");
2798
2799 logv(
2800 "Copying region of address 0x%jx and size 0x%jx (%ju), on "
2801 "memory at address 0x%jx...",
2802 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2803 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2804 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2805 }
2806
2807 /* Copying from a partially read-protected region fails. */
2808 void
test_copy_source_partially_unreadable_region()2809 test_copy_source_partially_unreadable_region()
2810 {
2811 mach_vm_address_t source = get_vm_address();
2812 mach_vm_address_t dest = get_buffer_address();
2813 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2814 mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2);
2815 kern_return_t kr = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2816
2817 logv(
2818 "Read-protecting a mid-range buffer page at address "
2819 "0x%jx...",
2820 (uintmax_t)mid_point);
2821 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2822 logv("Page read-protected.");
2823
2824 logv(
2825 "Copying region at address 0x%jx and size 0x%jx (%ju), on "
2826 "memory at address 0x%jx...",
2827 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2828
2829 assert_copy_return(source, size, dest, kr);
2830 logv("Returned expected error: %s.", mach_error_string(kr));
2831 }
2832
2833 /* Copying to a partially write-protected region fails. */
2834 void
test_copy_dest_partially_unwriteable_region()2835 test_copy_dest_partially_unwriteable_region()
2836 {
2837 kern_return_t kr;
2838 mach_vm_address_t dest = get_vm_address();
2839 mach_vm_address_t source = get_buffer_address();
2840 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2841 mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2);
2842
2843 #if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080
2844 logv(
2845 "Read-protecting a mid-range buffer page at address "
2846 "0x%jx...",
2847 (uintmax_t)mid_point);
2848 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2849 logv("Page read-protected.");
2850 logv(
2851 "Copying region at address 0x%jx and size 0x%jx (%ju), on "
2852 "memory at address 0x%jx...",
2853 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2854 if (size >= vm_page_size) {
2855 kr = KERN_PROTECTION_FAILURE;
2856 } else {
2857 kr = KERN_INVALID_ADDRESS;
2858 }
2859 assert_copy_return(source, size, dest, kr);
2860 logv("Returned expected error: %s.", mach_error_string(kr));
2861 #else
2862 logv(
2863 "Bypassing partially unwriteable region test "
2864 "(See <rdar://problem/12190999>)");
2865 #endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */
2866 }
2867
2868 /* Copying on partially deallocated memory fails. */
2869 void
test_copy_source_on_partially_deallocated_range()2870 test_copy_source_on_partially_deallocated_range()
2871 {
2872 mach_vm_address_t source = get_vm_address();
2873 mach_vm_address_t dest = get_buffer_address();
2874 mach_vm_address_t start = mach_vm_trunc_page(source);
2875 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2876
2877 logv(
2878 "Deallocating the first source page at address "
2879 "0x%jx...",
2880 (uintmax_t)start);
2881 assert_deallocate_success(start, vm_page_size);
2882 logv("Page deallocated.");
2883
2884 logv(
2885 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2886 "memory at address 0x%jx...",
2887 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2888 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2889 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2890 }
2891
2892 /* Copying on partially deallocated memory fails. */
2893 void
test_copy_dest_on_partially_deallocated_range()2894 test_copy_dest_on_partially_deallocated_range()
2895 {
2896 mach_vm_address_t source = get_vm_address();
2897 mach_vm_address_t dest = get_buffer_address();
2898 mach_vm_address_t start = mach_vm_trunc_page(dest);
2899 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2900
2901 logv(
2902 "Deallocating the first destination page at address "
2903 "0x%jx...",
2904 (uintmax_t)start);
2905 assert_deallocate_success(start, vm_page_size);
2906 logv("Page deallocated.");
2907
2908 logv(
2909 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2910 "memory at address 0x%jx...",
2911 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2912 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2913 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2914 }
2915
2916 /* Copying on partially unwritable memory fails. */
2917 void
test_copy_dest_on_partially_unwritable_range()2918 test_copy_dest_on_partially_unwritable_range()
2919 {
2920 mach_vm_address_t source = get_vm_address();
2921 mach_vm_address_t dest = get_buffer_address();
2922 mach_vm_address_t start = mach_vm_trunc_page(dest);
2923 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2924
2925 /* For sizes < msg_ool_size_small,
2926 * vm_map_copy_overwrite_nested() uses
2927 * vm_map_copyout_kernel_buffer() to read in the memory,
2928 * returning different errors, see 8217123. */
2929 kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2930
2931 logv(
2932 "Write-protecting the first destination page at address "
2933 "0x%jx...",
2934 (uintmax_t)start);
2935 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2936 logv("Page write-protected.");
2937
2938 logv(
2939 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2940 "memory at address 0x%jx...",
2941 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2942 assert_copy_return(source, size, dest, kr_expected);
2943 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2944 }
2945
2946 /* Copying on partially unreadable memory fails. */
2947 void
test_copy_source_on_partially_unreadable_range()2948 test_copy_source_on_partially_unreadable_range()
2949 {
2950 mach_vm_address_t source = get_vm_address();
2951 mach_vm_address_t dest = get_buffer_address();
2952 mach_vm_address_t start = mach_vm_trunc_page(source);
2953 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2954
2955 /* For sizes < msg_ool_size_small,
2956 * vm_map_copy_overwrite_nested() uses
2957 * vm_map_copyout_kernel_buffer() to read in the memory,
2958 * returning different errors, see 8217123. */
2959 kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2960
2961 logv(
2962 "Read-protecting the first destination page at address "
2963 "0x%jx...",
2964 (uintmax_t)start);
2965 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2966 logv("Page read-protected.");
2967
2968 logv(
2969 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2970 "memory at address 0x%jx...",
2971 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2972 assert_copy_return(source, size, dest, kr_expected);
2973 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2974 }
2975
2976 /********************************/
2977 /* mach_vm_protect() main tests */
2978 /********************************/
2979
2980 void
test_zero_filled_extended()2981 test_zero_filled_extended()
2982 {
2983 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
2984 }
2985
2986 /* Allocated region is still zero-filled after read-protecting it and
2987 * then restoring read-access. */
2988 void
test_zero_filled_readprotect()2989 test_zero_filled_readprotect()
2990 {
2991 mach_vm_address_t address = get_vm_address();
2992 mach_vm_size_t size = get_vm_size();
2993
2994 logv("Setting read access on 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size,
2995 (size == 1) ? "" : "s", (uintmax_t)address);
2996 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address, size, FALSE, VM_PROT_DEFAULT), "mach_vm_protect()");
2997 logv("Region has read access.");
2998 test_zero_filled_extended();
2999 }
3000
3001 void
verify_protection(vm_prot_t protection,const char * protection_name)3002 verify_protection(vm_prot_t protection, const char * protection_name)
3003 {
3004 mach_vm_address_t address = get_vm_address();
3005 mach_vm_size_t size = get_vm_size();
3006 mach_vm_size_t original_size = size;
3007 vm_region_basic_info_data_64_t info;
3008 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
3009 mach_port_t unused;
3010
3011 logv(
3012 "Verifying %s-protection on region of address 0x%jx and "
3013 "size 0x%jx (%ju) with mach_vm_region()...",
3014 protection_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
3015 T_QUIET; T_ASSERT_MACH_SUCCESS(
3016 mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info, &count, &unused),
3017 "mach_vm_region()");
3018 if (original_size) {
3019 T_QUIET; T_ASSERT_EQ((info.protection & protection), 0,
3020 "Region "
3021 "is unexpectedly %s-unprotected.",
3022 protection_name);
3023 logv("Region is %s-protected as expected.", protection_name);
3024 } else {
3025 T_QUIET; T_ASSERT_NE(info.protection & protection, 0,
3026 "Region is "
3027 "unexpectedly %s-protected.",
3028 protection_name);
3029 logv("Region is %s-unprotected as expected.", protection_name);
3030 }
3031 }
3032
3033 void
test_verify_readprotection()3034 test_verify_readprotection()
3035 {
3036 verify_protection(VM_PROT_READ, "read");
3037 }
3038
3039 void
test_verify_writeprotection()3040 test_verify_writeprotection()
3041 {
3042 verify_protection(VM_PROT_WRITE, "write");
3043 }
3044
3045 /******************************/
3046 /* Protection bus error tests */
3047 /******************************/
3048
3049 /* mach_vm_protect() affects the smallest aligned region (integral
3050 * number of pages) containing the given range. */
3051
3052 /* Addresses in read-protected range are inaccessible. */
3053 void
access_readprotected_range_address(mach_vm_address_t address,const char * position)3054 access_readprotected_range_address(mach_vm_address_t address, const char * position)
3055 {
3056 logv("Reading from %s 0x%jx of read-protected range...", position, (uintmax_t)address);
3057 mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address);
3058 T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx."
3059 "Should have died with signal SIGBUS.",
3060 (uintmax_t)bad_value, (uintmax_t)address);
3061 }
3062
3063 /* Start of read-protected range is inaccessible. */
3064 void
test_access_readprotected_range_start()3065 test_access_readprotected_range_start()
3066 {
3067 access_readprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3068 }
3069
3070 /* Middle of read-protected range is inaccessible. */
3071 void
test_access_readprotected_range_middle()3072 test_access_readprotected_range_middle()
3073 {
3074 mach_vm_address_t address = get_vm_address();
3075 access_readprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle");
3076 }
3077
3078 /* End of read-protected range is inaccessible. */
3079 void
test_access_readprotected_range_end()3080 test_access_readprotected_range_end()
3081 {
3082 access_readprotected_range_address(round_page(get_vm_address() + get_vm_size()) - vm_address_size, "end");
3083 }
3084
3085 /* Addresses in write-protected range are unwritable. */
3086 void
write_writeprotected_range_address(mach_vm_address_t address,const char * position)3087 write_writeprotected_range_address(mach_vm_address_t address, const char * position)
3088 {
3089 logv("Writing on %s 0x%jx of write-protected range...", position, (uintmax_t)address);
3090 MACH_VM_ADDRESS_T(address) = 0x0;
3091 T_ASSERT_FAIL("Unexpectedly wrote value 0x0 value at address 0x%jx."
3092 "Should have died with signal SIGBUS.",
3093 (uintmax_t)address);
3094 }
3095
3096 /* Start of write-protected range is unwritable. */
3097 void
test_write_writeprotected_range_start()3098 test_write_writeprotected_range_start()
3099 {
3100 write_writeprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3101 }
3102
3103 /* Middle of write-protected range is unwritable. */
3104 void
test_write_writeprotected_range_middle()3105 test_write_writeprotected_range_middle()
3106 {
3107 mach_vm_address_t address = get_vm_address();
3108 write_writeprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle");
3109 }
3110
3111 /* End of write-protected range is unwritable. */
3112 void
test_write_writeprotected_range_end()3113 test_write_writeprotected_range_end()
3114 {
3115 write_writeprotected_range_address(round_page(get_vm_address() + get_vm_size()) - vm_address_size, "end");
3116 }
3117
3118 /*************************************/
3119 /* mach_vm_protect() edge case tests */
3120 /*************************************/
3121
3122 void
protect_zero_size(vm_prot_t protection,const char * protection_name)3123 protect_zero_size(vm_prot_t protection, const char * protection_name)
3124 {
3125 int i;
3126 kern_return_t kr;
3127 vm_map_t this_task = mach_task_self();
3128 mach_vm_address_t addresses[] = {0x0,
3129 0x1,
3130 vm_page_size - 1,
3131 vm_page_size,
3132 vm_page_size + 1,
3133 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
3134 (mach_vm_address_t)UINT_MAX,
3135 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
3136 (mach_vm_address_t)UINTMAX_MAX};
3137 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
3138
3139 logv("%s-protecting 0x0 (0) bytes at various addresses...", protection_name);
3140 for (i = 0; i < numofaddresses; i++) {
3141 kr = mach_vm_protect(this_task, addresses[i], 0, FALSE, protection);
3142 T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
3143 "mach_vm_protect() at "
3144 "address 0x%jx unexpectedly failed: %s.",
3145 (uintmax_t)addresses[i], mach_error_string(kr));
3146 }
3147 logv("Protection successful.");
3148 }
3149
3150 void
test_readprotect_zero_size()3151 test_readprotect_zero_size()
3152 {
3153 protect_zero_size(VM_PROT_READ, "Read");
3154 }
3155
3156 void
test_writeprotect_zero_size()3157 test_writeprotect_zero_size()
3158 {
3159 protect_zero_size(VM_PROT_WRITE, "Write");
3160 }
3161
3162 /* Protecting a range wrapped around the address space fails. */
3163 void
protect_wrapped_around_ranges(vm_prot_t protection,const char * protection_name)3164 protect_wrapped_around_ranges(vm_prot_t protection, const char * protection_name)
3165 {
3166 int i;
3167 kern_return_t kr;
3168 vm_map_t this_task = mach_task_self();
3169 struct {
3170 mach_vm_address_t address;
3171 mach_vm_size_t size;
3172 } ranges[] = {
3173 {0x1, (mach_vm_size_t)UINTMAX_MAX},
3174 {vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
3175 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
3176 {(mach_vm_address_t)UINTMAX_MAX, 1},
3177 };
3178 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
3179
3180 logv(
3181 "%s-protecting various memory ranges wrapping around the "
3182 "address space...",
3183 protection_name);
3184 for (i = 0; i < numofranges; i++) {
3185 kr = mach_vm_protect(this_task, ranges[i].address, ranges[i].size, FALSE, protection);
3186 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
3187 "mach_vm_protect() with address 0x%jx and size "
3188 "0x%jx (%ju) unexpectedly returned: %s.\n"
3189 "Should have returned: %s.",
3190 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
3191 mach_error_string(KERN_INVALID_ARGUMENT));
3192 }
3193 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
3194 }
3195
3196 void
test_readprotect_wrapped_around_ranges()3197 test_readprotect_wrapped_around_ranges()
3198 {
3199 protect_wrapped_around_ranges(VM_PROT_READ, "Read");
3200 }
3201
3202 void
test_writeprotect_wrapped_around_ranges()3203 test_writeprotect_wrapped_around_ranges()
3204 {
3205 protect_wrapped_around_ranges(VM_PROT_WRITE, "Write");
3206 }
3207
3208 /*******************/
3209 /* vm_copy() tests */
3210 /*******************/
3211
3212 /* Verify the address space is being shared. */
3213 void
assert_share_mode(mach_vm_address_t address,unsigned share_mode,const char * share_mode_name)3214 assert_share_mode(mach_vm_address_t address, unsigned share_mode, const char * share_mode_name)
3215 {
3216 mach_vm_size_t size = get_vm_size();
3217 vm_region_extended_info_data_t info;
3218 mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
3219 mach_port_t unused;
3220
3221 /*
3222 * XXX Fails on UVM kernel. See <rdar://problem/12164664>
3223 */
3224 #if notyet /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */
3225 logv(
3226 "Verifying %s share mode on region of address 0x%jx and "
3227 "size 0x%jx (%ju)...",
3228 share_mode_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
3229 T_QUIET; T_ASSERT_MACH_SUCCESS(
3230 mach_vm_region(mach_task_self(), &address, &size, VM_REGION_EXTENDED_INFO, (vm_region_info_t)&info, &count, &unused),
3231 "mach_vm_region()");
3232 T_QUIET; T_ASSERT_EQ(info.share_mode, share_mode,
3233 "Region's share mode "
3234 " unexpectedly is not %s but %d.",
3235 share_mode_name, info.share_mode);
3236 logv("Region has a share mode of %s as expected.", share_mode_name);
3237 #else
3238 logv("Bypassing share_mode verification (See <rdar://problem/12164664>)");
3239 #endif /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */
3240 }
3241
3242 /* Do the vm_copy() and verify its success. */
3243 void
assert_vmcopy_success(vm_address_t src,vm_address_t dst,const char * source_name)3244 assert_vmcopy_success(vm_address_t src, vm_address_t dst, const char * source_name)
3245 {
3246 kern_return_t kr;
3247 mach_vm_size_t size = get_vm_size();
3248
3249 logv("Copying (using mach_vm_copy()) from a %s source...", source_name);
3250 kr = mach_vm_copy(mach_task_self(), src, size, dst);
3251 T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
3252 "mach_vm_copy() with the source address "
3253 "0x%jx, designation address 0x%jx, and size 0x%jx (%ju) unexpectly "
3254 "returned %s.\n Should have returned: %s.",
3255 (uintmax_t)src, (uintmax_t)dst, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr),
3256 mach_error_string(KERN_SUCCESS));
3257 logv("Copy (mach_vm_copy()) was successful as expected.");
3258 }
3259
3260 void
write_region(mach_vm_address_t address,mach_vm_size_t start)3261 write_region(mach_vm_address_t address, mach_vm_size_t start)
3262 {
3263 mach_vm_size_t size = get_vm_size();
3264
3265 filter_addresses_do_else(empty, FALSE, address, size, write_address, write_address, start);
3266 }
3267
3268 void
verify_region(mach_vm_address_t address,mach_vm_address_t start)3269 verify_region(mach_vm_address_t address, mach_vm_address_t start)
3270 {
3271 mach_vm_size_t size = get_vm_size();
3272
3273 filter_addresses_do_else(empty, FALSE, address, size, verify_address, verify_address, start);
3274 }
3275
3276 /* Perform the post vm_copy() action and verify its results. */
3277 void
modify_one_and_verify_all_regions(vm_address_t src,vm_address_t dst,vm_address_t shared_copied,boolean_t shared)3278 modify_one_and_verify_all_regions(vm_address_t src, vm_address_t dst, vm_address_t shared_copied, boolean_t shared)
3279 {
3280 mach_vm_size_t size = get_vm_size();
3281 int action = get_vmcopy_post_action();
3282
3283 /* Do the post vm_copy() action. */
3284 switch (action) {
3285 case VMCOPY_MODIFY_SRC:
3286 logv("Modifying: source%s...", shared ? " (shared with other region)" : "");
3287 write_region(src, 1);
3288 break;
3289
3290 case VMCOPY_MODIFY_DST:
3291 logv("Modifying: destination...");
3292 write_region(dst, 1);
3293 break;
3294
3295 case VMCOPY_MODIFY_SHARED_COPIED:
3296 /* If no shared_copied then no need to verify (nothing changed). */
3297 if (!shared_copied) {
3298 return;
3299 }
3300 logv("Modifying: shared/copied%s...", shared ? " (shared with source region)" : "");
3301 write_region(shared_copied, 1);
3302 break;
3303
3304 default:
3305 T_ASSERT_FAIL("Unknown post vm_copy() action (%d)", action);
3306 }
3307 logv("Modification was successful as expected.");
3308
3309 /* Verify all the regions with what is expected. */
3310 logv("Verifying: source... ");
3311 verify_region(src, (VMCOPY_MODIFY_SRC == action || (shared && VMCOPY_MODIFY_SHARED_COPIED == action)) ? 1 : 0);
3312 logv("destination... ");
3313 verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0);
3314 if (shared_copied) {
3315 logv("shared/copied... ");
3316 verify_region(shared_copied, (VMCOPY_MODIFY_SHARED_COPIED == action || (shared && VMCOPY_MODIFY_SRC == action)) ? 1 : 0);
3317 }
3318 logv("Verification was successful as expected.");
3319 }
3320
3321 /* Test source being a simple fresh region. */
3322 void
test_vmcopy_fresh_source()3323 test_vmcopy_fresh_source()
3324 {
3325 mach_vm_size_t size = get_vm_size();
3326 mach_vm_address_t src, dst;
3327
3328 if (get_vmcopy_post_action() == VMCOPY_MODIFY_SHARED_COPIED) {
3329 /* No shared/copied region to modify so just return. */
3330 logv("No shared/copied region as expected.");
3331 return;
3332 }
3333
3334 assert_allocate_success(&src, size, TRUE);
3335
3336 assert_share_mode(src, SM_EMPTY, "SM_EMPTY");
3337
3338 write_region(src, 0);
3339
3340 assert_allocate_success(&dst, size, TRUE);
3341
3342 assert_vmcopy_success(src, dst, "freshly allocated");
3343
3344 modify_one_and_verify_all_regions(src, dst, 0, FALSE);
3345
3346 assert_deallocate_success(src, size);
3347 assert_deallocate_success(dst, size);
3348 }
3349
3350 /* Test source copied from a shared region. */
3351 void
test_vmcopy_shared_source()3352 test_vmcopy_shared_source()
3353 {
3354 mach_vm_size_t size = get_vm_size();
3355 mach_vm_address_t src, dst, shared;
3356 int action = get_vmcopy_post_action();
3357 int pid, status;
3358
3359 assert_allocate_success(&src, size, TRUE);
3360
3361 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_inherit(mach_task_self(), src, size, VM_INHERIT_SHARE), "mach_vm_inherit()");
3362
3363 write_region(src, 0);
3364
3365 pid = fork();
3366 if (pid == 0) {
3367 /* Verify that the child's 'src' is shared with the
3368 * parent's src */
3369 assert_share_mode(src, SM_SHARED, "SM_SHARED");
3370 assert_allocate_success(&dst, size, TRUE);
3371 assert_vmcopy_success(src, dst, "shared");
3372 if (VMCOPY_MODIFY_SHARED_COPIED == action) {
3373 logv("Modifying: shared...");
3374 write_region(src, 1);
3375 logv("Modification was successsful as expected.");
3376 logv("Verifying: source... ");
3377 verify_region(src, 1);
3378 logv("destination...");
3379 verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0);
3380 logv("Verification was successful as expected.");
3381 } else {
3382 modify_one_and_verify_all_regions(src, dst, 0, TRUE);
3383 }
3384 assert_deallocate_success(dst, size);
3385 exit(0);
3386 } else if (pid > 0) {
3387 /* In the parent the src becomes the shared */
3388 shared = src;
3389 wait(&status);
3390 if (WEXITSTATUS(status) != 0) {
3391 exit(status);
3392 }
3393 /* verify shared (shared with child's src) */
3394 logv("Verifying: shared...");
3395 verify_region(shared, (VMCOPY_MODIFY_SHARED_COPIED == action || VMCOPY_MODIFY_SRC == action) ? 1 : 0);
3396 logv("Verification was successful as expected.");
3397 } else {
3398 T_WITH_ERRNO; T_ASSERT_FAIL("fork failed");
3399 }
3400
3401 assert_deallocate_success(src, size);
3402 }
3403
3404 /* Test source copied from another mapping. */
3405 void
test_vmcopy_copied_from_source()3406 test_vmcopy_copied_from_source()
3407 {
3408 mach_vm_size_t size = get_vm_size();
3409 mach_vm_address_t src, dst, copied;
3410
3411 assert_allocate_success(&copied, size, TRUE);
3412 write_region(copied, 0);
3413
3414 assert_allocate_success(&src, size, TRUE);
3415
3416 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), copied, size, src), "mach_vm_copy()");
3417
3418 assert_share_mode(src, SM_COW, "SM_COW");
3419
3420 assert_allocate_success(&dst, size, TRUE);
3421
3422 assert_vmcopy_success(src, dst, "copied from");
3423
3424 modify_one_and_verify_all_regions(src, dst, copied, FALSE);
3425
3426 assert_deallocate_success(src, size);
3427 assert_deallocate_success(dst, size);
3428 assert_deallocate_success(copied, size);
3429 }
3430
3431 /* Test source copied to another mapping. */
3432 void
test_vmcopy_copied_to_source()3433 test_vmcopy_copied_to_source()
3434 {
3435 mach_vm_size_t size = get_vm_size();
3436 mach_vm_address_t src, dst, copied;
3437
3438 assert_allocate_success(&src, size, TRUE);
3439 write_region(src, 0);
3440
3441 assert_allocate_success(&copied, size, TRUE);
3442
3443 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), src, size, copied), "mach_vm_copy()");
3444
3445 assert_share_mode(src, SM_COW, "SM_COW");
3446
3447 assert_allocate_success(&dst, size, TRUE);
3448
3449 assert_vmcopy_success(src, dst, "copied to");
3450
3451 modify_one_and_verify_all_regions(src, dst, copied, FALSE);
3452
3453 assert_deallocate_success(src, size);
3454 assert_deallocate_success(dst, size);
3455 assert_deallocate_success(copied, size);
3456 }
3457
3458 /* Test a truedshared source copied. */
3459 void
test_vmcopy_trueshared_source()3460 test_vmcopy_trueshared_source()
3461 {
3462 mach_vm_size_t size = get_vm_size();
3463 mach_vm_address_t src = 0x0, dst, shared;
3464 vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE);
3465 vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE);
3466 mem_entry_name_port_t mem_obj;
3467
3468 assert_allocate_success(&shared, size, TRUE);
3469 write_region(shared, 0);
3470
3471 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)shared, cur_protect, &mem_obj,
3472 (mem_entry_name_port_t)NULL),
3473 "mach_make_memory_entry_64()");
3474 T_QUIET; T_ASSERT_MACH_SUCCESS(
3475 mach_vm_map(mach_task_self(), &src, size, 0, TRUE, mem_obj, 0, FALSE, cur_protect, max_protect, VM_INHERIT_NONE),
3476 "mach_vm_map()");
3477
3478 assert_share_mode(src, SM_TRUESHARED, "SM_TRUESHARED");
3479
3480 assert_allocate_success(&dst, size, TRUE);
3481
3482 assert_vmcopy_success(src, dst, "true shared");
3483
3484 modify_one_and_verify_all_regions(src, dst, shared, TRUE);
3485
3486 assert_deallocate_success(src, size);
3487 assert_deallocate_success(dst, size);
3488 assert_deallocate_success(shared, size);
3489 }
3490
3491 /* Test a private aliazed source copied. */
3492 void
test_vmcopy_private_aliased_source()3493 test_vmcopy_private_aliased_source()
3494 {
3495 mach_vm_size_t size = get_vm_size();
3496 mach_vm_address_t src = 0x0, dst, shared;
3497 vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE);
3498 vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE);
3499
3500 assert_allocate_success(&shared, size, TRUE);
3501 write_region(shared, 0);
3502
3503 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_remap(mach_task_self(), &src, size, 0, TRUE, mach_task_self(), shared, FALSE, &cur_protect,
3504 &max_protect, VM_INHERIT_NONE),
3505 "mach_vm_remap()");
3506
3507 assert_share_mode(src, SM_PRIVATE_ALIASED, "SM_PRIVATE_ALIASED");
3508
3509 assert_allocate_success(&dst, size, TRUE);
3510
3511 assert_vmcopy_success(src, dst, "true shared");
3512
3513 modify_one_and_verify_all_regions(src, dst, shared, TRUE);
3514
3515 assert_deallocate_success(src, size);
3516 assert_deallocate_success(dst, size);
3517 assert_deallocate_success(shared, size);
3518 }
3519
3520 /*************/
3521 /* VM Suites */
3522 /*************/
3523
3524 void
run_allocate_test_suites()3525 run_allocate_test_suites()
3526 {
3527 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
3528 * error finding xnu major version number. */
3529 /* unsigned int xnu_version = xnu_major_version(); */
3530
3531 UnitTests allocate_main_tests = {
3532 {"Allocated address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size},
3533 {"Allocated address is page-aligned", test_aligned_address},
3534 {"Allocated memory is zero-filled", test_zero_filled},
3535 {"Write and verify address-filled pattern", test_write_address_filled},
3536 {"Write and verify checkerboard pattern", test_write_checkerboard},
3537 {"Write and verify reverse checkerboard pattern", test_write_reverse_checkerboard},
3538 {"Write and verify page ends pattern", test_write_page_ends},
3539 {"Write and verify page interiors pattern", test_write_page_interiors},
3540 {"Reallocate allocated pages", test_reallocate_pages},
3541 };
3542 UnitTests allocate_address_error_tests = {
3543 {"Allocate at address zero", test_allocate_at_zero},
3544 {"Allocate at a 2 MB boundary-unaligned, page-aligned "
3545 "address",
3546 test_allocate_2MB_boundary_unaligned_page_aligned_address},
3547 };
3548 UnitTests allocate_argument_error_tests = {
3549 {"Allocate in NULL VM map", test_allocate_in_null_map}, {"Allocate with kernel flags", test_allocate_with_kernel_flags},
3550 };
3551 UnitTests allocate_fixed_size_tests = {
3552 {"Allocate zero size", test_allocate_zero_size},
3553 {"Allocate overflowing size", test_allocate_overflowing_size},
3554 {"Allocate a page with highest address hint", test_allocate_page_with_highest_address_hint},
3555 {"Allocate two pages and verify first fit strategy", test_allocate_first_fit_pages},
3556 };
3557 UnitTests allocate_invalid_large_size_test = {
3558 {"Allocate invalid large size", test_allocate_invalid_large_size},
3559 };
3560 UnitTests mach_vm_map_protection_inheritance_error_test = {
3561 {"mach_vm_map() with invalid protection/inheritance "
3562 "arguments",
3563 test_mach_vm_map_protection_inheritance_error},
3564 };
3565 UnitTests mach_vm_map_large_mask_overflow_error_test = {
3566 {"mach_vm_map() with large address mask", test_mach_vm_map_large_mask_overflow_error},
3567 };
3568
3569 /* Run the test suites with various allocators and VM sizes, and
3570 * unspecified or fixed (page-aligned or page-unaligned),
3571 * addresses. */
3572 for (allocators_idx = 0; allocators_idx < numofallocators; allocators_idx++) {
3573 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3574 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3575 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3576 /* An allocated address will be page-aligned. */
3577 /* Only run the zero size mach_vm_map() error tests in the
3578 * unspecified address case, since we won't be able to retrieve a
3579 * fixed address for allocation. See 8003930. */
3580 if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED) ||
3581 (allocators_idx != MACH_VM_ALLOCATE && sizes_idx == ZERO_BYTES && flags_idx == FIXED)) {
3582 continue;
3583 }
3584 run_suite(set_up_allocator_and_vm_variables, allocate_argument_error_tests, do_nothing,
3585 "%s argument error tests, %s%s address, "
3586 "%s size: 0x%jx (%ju)",
3587 allocators[allocators_idx].description, address_flags[flags_idx].description,
3588 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3589 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3590 (uintmax_t)vm_sizes[sizes_idx].size);
3591 /* mach_vm_map() only protection and inheritance error
3592 * tests. */
3593 if (allocators_idx != MACH_VM_ALLOCATE) {
3594 run_suite(set_up_allocator_and_vm_variables, mach_vm_map_protection_inheritance_error_test, do_nothing,
3595 "%s protection and inheritance "
3596 "error test, %s%s address, %s size: 0x%jx "
3597 "(%ju)",
3598 allocators[allocators_idx].description, address_flags[flags_idx].description,
3599 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3600 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3601 (uintmax_t)vm_sizes[sizes_idx].size);
3602 }
3603 /* mach_vm_map() cannot allocate 0 bytes, see 8003930. */
3604 if (allocators_idx == MACH_VM_ALLOCATE || sizes_idx != ZERO_BYTES) {
3605 run_suite(set_up_allocator_and_vm_variables_and_allocate, allocate_main_tests, deallocate,
3606 "%s main "
3607 "allocation tests, %s%s address, %s size: 0x%jx "
3608 "(%ju)",
3609 allocators[allocators_idx].description, address_flags[flags_idx].description,
3610 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3611 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3612 (uintmax_t)vm_sizes[sizes_idx].size);
3613 }
3614 }
3615 }
3616 run_suite(set_up_allocator_and_vm_size, allocate_address_error_tests, do_nothing,
3617 "%s address "
3618 "error allocation tests, %s size: 0x%jx (%ju)",
3619 allocators[allocators_idx].description, vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3620 (uintmax_t)vm_sizes[sizes_idx].size);
3621 }
3622 run_suite(set_up_allocator, allocate_fixed_size_tests, do_nothing, "%s fixed size allocation tests",
3623 allocators[allocators_idx].description);
3624 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
3625 * error finding xnu major version number. */
3626 /* mach_vm_map() with a named entry triggers a panic with this test
3627 * unless under xnu-1598 or later, see 8048580. */
3628 /* if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY
3629 || xnu_version >= 1598) { */
3630 if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY) {
3631 run_suite(set_up_allocator, allocate_invalid_large_size_test, do_nothing, "%s invalid large size allocation test",
3632 allocators[allocators_idx].description);
3633 }
3634 }
3635 /* mach_vm_map() only large mask overflow tests. */
3636 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3637 run_suite(set_up_vm_size, mach_vm_map_large_mask_overflow_error_test, do_nothing,
3638 "mach_vm_map() large mask overflow "
3639 "error test, size: 0x%jx (%ju)",
3640 (uintmax_t)vm_sizes[sizes_idx].size, (uintmax_t)vm_sizes[sizes_idx].size);
3641 }
3642 }
3643
3644 void
run_deallocate_test_suites()3645 run_deallocate_test_suites()
3646 {
3647 UnitTests access_deallocated_memory_tests = {
3648 {"Read start of deallocated range", test_access_deallocated_range_start},
3649 {"Read middle of deallocated range", test_access_deallocated_range_middle},
3650 {"Read end of deallocated range", test_access_deallocated_range_end},
3651 };
3652 UnitTests deallocate_reallocate_tests = {
3653 {"Deallocate twice", test_deallocate_twice},
3654 {"Write pattern, deallocate, reallocate (deallocated "
3655 "memory is inaccessible), and verify memory is "
3656 "zero-filled",
3657 test_write_pattern_deallocate_reallocate_zero_filled},
3658 };
3659 UnitTests deallocate_null_map_test = {
3660 {"Deallocate in NULL VM map", test_deallocate_in_null_map},
3661 };
3662 UnitTests deallocate_edge_case_tests = {
3663 {"Deallocate zero size ranges", test_deallocate_zero_size_ranges},
3664 {"Deallocate memory ranges whose end rounds to 0x0", test_deallocate_rounded_zero_end_ranges},
3665 {"Deallocate wrapped around memory ranges", test_deallocate_wrapped_around_ranges},
3666 };
3667 UnitTests deallocate_suicide_test = {
3668 {"Deallocate whole address space", test_deallocate_suicide},
3669 };
3670
3671 /* All allocations done with mach_vm_allocate(). */
3672 set_allocator(wrapper_mach_vm_allocate);
3673
3674 /* Run the test suites with various VM sizes, and unspecified or
3675 * fixed (page-aligned or page-unaligned), addresses. */
3676 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3677 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3678 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3679 /* An allocated address will be page-aligned. */
3680 if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3681 continue;
3682 }
3683 /* Accessing deallocated memory should cause a segmentation
3684 * fault. */
3685 /* Nothing gets deallocated if size is zero. */
3686 if (sizes_idx != ZERO_BYTES) {
3687 set_expected_signal(SIGSEGV);
3688 run_suite(set_up_vm_variables_and_allocate, access_deallocated_memory_tests, do_nothing,
3689 "Deallocated memory access tests, "
3690 "%s%s address, %s size: 0x%jx (%ju)",
3691 address_flags[flags_idx].description,
3692 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3693 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3694 (uintmax_t)vm_sizes[sizes_idx].size);
3695 set_expected_signal(0);
3696 }
3697 run_suite(set_up_vm_variables_and_allocate, deallocate_reallocate_tests, do_nothing,
3698 "Deallocation and reallocation tests, %s%s "
3699 "address, %s size: 0x%jx (%ju)",
3700 address_flags[flags_idx].description,
3701 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3702 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3703 (uintmax_t)vm_sizes[sizes_idx].size);
3704 run_suite(set_up_vm_variables, deallocate_null_map_test, do_nothing,
3705 "mach_vm_deallocate() null map test, "
3706 "%s%s address, %s size: 0x%jx (%ju)",
3707 address_flags[flags_idx].description,
3708 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3709 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3710 (uintmax_t)vm_sizes[sizes_idx].size);
3711 }
3712 }
3713 }
3714 run_suite(do_nothing, deallocate_edge_case_tests, do_nothing, "Edge case deallocation tests");
3715
3716 set_expected_signal(-1); /* SIGSEGV or SIGBUS */
3717 run_suite(do_nothing, deallocate_suicide_test, do_nothing, "Whole address space deallocation test");
3718 set_expected_signal(0);
3719 }
3720
3721 void
run_read_test_suites()3722 run_read_test_suites()
3723 {
3724 UnitTests read_main_tests = {
3725 {"Read address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size},
3726 {"Read address has the correct boundary offset", test_read_address_offset},
3727 {"Reallocate read pages", test_reallocate_pages},
3728 {"Read and verify zero-filled memory", test_zero_filled},
3729 };
3730 UnitTests read_pattern_tests = {
3731 {"Read address-filled pattern", test_read_address_filled},
3732 {"Read checkerboard pattern", test_read_checkerboard},
3733 {"Read reverse checkerboard pattern", test_read_reverse_checkerboard},
3734 };
3735 UnitTests read_null_map_test = {
3736 {"Read from NULL VM map", test_read_null_map},
3737 };
3738 UnitTests read_edge_case_tests = {
3739 {"Read zero size", test_read_zero_size},
3740 {"Read invalid large size", test_read_invalid_large_size},
3741 {"Read wrapped around memory ranges", test_read_wrapped_around_ranges},
3742 };
3743 UnitTests read_inaccessible_tests = {
3744 {"Read partially decallocated memory", test_read_partially_deallocated_range},
3745 {"Read partially read-protected memory", test_read_partially_unreadable_range},
3746 };
3747
3748 /* All allocations done with mach_vm_allocate(). */
3749 set_allocator(wrapper_mach_vm_allocate);
3750
3751 /* Run the test suites with various VM sizes, and unspecified or
3752 * fixed (page-aligned or page-unaligned) addresses. */
3753 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3754 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3755 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3756 /* An allocated address will be page-aligned. */
3757 if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3758 continue;
3759 }
3760 run_suite(set_up_vm_variables_allocate_read_deallocate, read_main_tests, deallocate,
3761 "mach_vm_read() "
3762 "main tests, %s%s address, %s size: 0x%jx (%ju)",
3763 address_flags[flags_idx].description,
3764 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3765 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3766 (uintmax_t)vm_sizes[sizes_idx].size);
3767 run_suite(set_up_vm_variables_and_allocate_extra_page, read_pattern_tests, deallocate,
3768 "mach_vm_read() pattern tests, %s%s address, %s "
3769 "size: 0x%jx (%ju)",
3770 address_flags[flags_idx].description,
3771 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3772 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3773 (uintmax_t)vm_sizes[sizes_idx].size);
3774 run_suite(set_up_vm_variables_and_allocate_extra_page, read_null_map_test, deallocate_extra_page,
3775 "mach_vm_read() null map test, "
3776 "%s%s address, %s size: 0x%jx (%ju)",
3777 address_flags[flags_idx].description,
3778 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3779 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3780 (uintmax_t)vm_sizes[sizes_idx].size);
3781 /* A zero size range is always accessible. */
3782 if (sizes_idx != ZERO_BYTES) {
3783 run_suite(set_up_vm_variables_and_allocate_extra_page, read_inaccessible_tests, deallocate_extra_page,
3784 "mach_vm_read() inaccessibility tests, %s%s "
3785 "address, %s size: 0x%jx (%ju)",
3786 address_flags[flags_idx].description,
3787 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3788 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3789 (uintmax_t)vm_sizes[sizes_idx].size);
3790 }
3791 }
3792 }
3793 }
3794 run_suite(do_nothing, read_edge_case_tests, do_nothing, "mach_vm_read() fixed size tests");
3795 }
3796
3797 void
run_write_test_suites()3798 run_write_test_suites()
3799 {
3800 UnitTests write_main_tests = {
3801 {"Write and verify zero-filled memory", test_zero_filled_write},
3802 };
3803 UnitTests write_pattern_tests = {
3804 {"Write address-filled pattern", test_address_filled_write},
3805 {"Write checkerboard pattern", test_checkerboard_write},
3806 {"Write reverse checkerboard pattern", test_reverse_checkerboard_write},
3807 };
3808 UnitTests write_edge_case_tests = {
3809 {"Write into NULL VM map", test_write_null_map}, {"Write zero size", test_write_zero_size},
3810 };
3811 UnitTests write_inaccessible_tests = {
3812 {"Write partially decallocated buffer", test_write_partially_deallocated_buffer},
3813 {"Write partially read-protected buffer", test_write_partially_unreadable_buffer},
3814 {"Write on partially deallocated range", test_write_on_partially_deallocated_range},
3815 {"Write on partially write-protected range", test_write_on_partially_unwritable_range},
3816 };
3817
3818 /* All allocations done with mach_vm_allocate(). */
3819 set_allocator(wrapper_mach_vm_allocate);
3820
3821 /* Run the test suites with various destination sizes and
3822 * unspecified or fixed (page-aligned or page-unaligned)
3823 * addresses, and various buffer sizes and boundary offsets. */
3824 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3825 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3826 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3827 for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) {
3828 for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) {
3829 /* An allocated address will be page-aligned. */
3830 if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) {
3831 continue;
3832 }
3833 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_edge_case_tests,
3834 deallocate_vm_and_buffer,
3835 "mach_vm_write() edge case tests, %s%s address, %s "
3836 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3837 "buffer boundary offset: %d",
3838 address_flags[flags_idx].description,
3839 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3840 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3841 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3842 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3843 buffer_offsets[offsets_idx].offset);
3844 /* A zero size buffer is always accessible. */
3845 if (buffer_sizes_idx != ZERO_BYTES) {
3846 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_inaccessible_tests,
3847 deallocate_vm_and_buffer,
3848 "mach_vm_write() inaccessibility tests, "
3849 "%s%s address, %s size: 0x%jx (%ju), buffer "
3850 "%s size: 0x%jx (%ju), buffer boundary "
3851 "offset: %d",
3852 address_flags[flags_idx].description,
3853 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3854 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3855 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3856 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3857 buffer_offsets[offsets_idx].offset);
3858 }
3859 /* The buffer cannot be larger than the destination. */
3860 if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) {
3861 continue;
3862 }
3863 run_suite(set_up_vm_and_buffer_variables_allocate_write, write_main_tests, deallocate_vm_and_buffer,
3864 "mach_vm_write() main tests, %s%s address, %s "
3865 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3866 "buffer boundary offset: %d",
3867 address_flags[flags_idx].description,
3868 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3869 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3870 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3871 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3872 buffer_offsets[offsets_idx].offset);
3873 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_pattern_tests,
3874 deallocate_vm_and_buffer,
3875 "mach_vm_write() pattern tests, %s%s address, %s "
3876 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3877 "buffer boundary offset: %d",
3878 address_flags[flags_idx].description,
3879 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3880 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3881 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3882 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3883 buffer_offsets[offsets_idx].offset);
3884 }
3885 }
3886 }
3887 }
3888 }
3889 }
3890
3891 void
run_protect_test_suites()3892 run_protect_test_suites()
3893 {
3894 UnitTests readprotection_main_tests = {
3895 {"Read-protect, read-allow and verify zero-filled memory", test_zero_filled_readprotect},
3896 {"Verify that region is read-protected iff size is "
3897 "nonzero",
3898 test_verify_readprotection},
3899 };
3900 UnitTests access_readprotected_memory_tests = {
3901 {"Read start of read-protected range", test_access_readprotected_range_start},
3902 {"Read middle of read-protected range", test_access_readprotected_range_middle},
3903 {"Read end of read-protected range", test_access_readprotected_range_end},
3904 };
3905 UnitTests writeprotection_main_tests = {
3906 {"Write-protect and verify zero-filled memory", test_zero_filled_extended},
3907 {"Verify that region is write-protected iff size is "
3908 "nonzero",
3909 test_verify_writeprotection},
3910 };
3911 UnitTests write_writeprotected_memory_tests = {
3912 {"Write at start of write-protected range", test_write_writeprotected_range_start},
3913 {"Write in middle of write-protected range", test_write_writeprotected_range_middle},
3914 {"Write at end of write-protected range", test_write_writeprotected_range_end},
3915 };
3916 UnitTests protect_edge_case_tests = {
3917 {"Read-protect zero size ranges", test_readprotect_zero_size},
3918 {"Write-protect zero size ranges", test_writeprotect_zero_size},
3919 {"Read-protect wrapped around memory ranges", test_readprotect_wrapped_around_ranges},
3920 {"Write-protect wrapped around memory ranges", test_writeprotect_wrapped_around_ranges},
3921 };
3922
3923 /* All allocations done with mach_vm_allocate(). */
3924 set_allocator(wrapper_mach_vm_allocate);
3925
3926 /* Run the test suites with various VM sizes, and unspecified or
3927 * fixed (page-aligned or page-unaligned), addresses. */
3928 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3929 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3930 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3931 /* An allocated address will be page-aligned. */
3932 if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3933 continue;
3934 }
3935 run_suite(set_up_vm_variables_allocate_readprotect, readprotection_main_tests, deallocate_extra_page,
3936 "Main read-protection tests, %s%s address, %s "
3937 "size: 0x%jx (%ju)",
3938 address_flags[flags_idx].description,
3939 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3940 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3941 (uintmax_t)vm_sizes[sizes_idx].size);
3942 run_suite(set_up_vm_variables_allocate_writeprotect, writeprotection_main_tests, deallocate_extra_page,
3943 "Main write-protection tests, %s%s address, %s "
3944 "size: 0x%jx (%ju)",
3945 address_flags[flags_idx].description,
3946 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3947 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3948 (uintmax_t)vm_sizes[sizes_idx].size);
3949 /* Nothing gets protected if size is zero. */
3950 if (sizes_idx != ZERO_BYTES) {
3951 set_expected_signal(SIGBUS);
3952 /* Accessing read-protected memory should cause a bus
3953 * error. */
3954 run_suite(set_up_vm_variables_allocate_readprotect, access_readprotected_memory_tests, deallocate_extra_page,
3955 "Read-protected memory access tests, %s%s "
3956 "address, %s size: 0x%jx (%ju)",
3957 address_flags[flags_idx].description,
3958 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3959 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3960 (uintmax_t)vm_sizes[sizes_idx].size);
3961 /* Writing on write-protected memory should cause a bus
3962 * error. */
3963 run_suite(set_up_vm_variables_allocate_writeprotect, write_writeprotected_memory_tests, deallocate_extra_page,
3964 "Write-protected memory writing tests, %s%s "
3965 "address, %s size: 0x%jx (%ju)",
3966 address_flags[flags_idx].description,
3967 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3968 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3969 (uintmax_t)vm_sizes[sizes_idx].size);
3970 set_expected_signal(0);
3971 }
3972 }
3973 }
3974 }
3975 run_suite(do_nothing, protect_edge_case_tests, do_nothing, "Edge case protection tests");
3976 }
3977
3978 void
run_copy_test_suites()3979 run_copy_test_suites()
3980 {
3981 /* Copy tests */
3982 UnitTests copy_main_tests = {
3983 {"Copy and verify zero-filled memory", test_zero_filled_copy_dest},
3984 };
3985 UnitTests copy_pattern_tests = {
3986 {"Copy address-filled pattern", test_copy_address_filled},
3987 {"Copy checkerboard pattern", test_copy_checkerboard},
3988 {"Copy reverse checkerboard pattern", test_copy_reverse_checkerboard},
3989 };
3990 UnitTests copy_edge_case_tests = {
3991 {"Copy with NULL VM map", test_copy_null_map},
3992 {"Copy zero size", test_copy_zero_size},
3993 {"Copy invalid large size", test_copy_invalid_large_size},
3994 {"Read wrapped around memory ranges", test_copy_wrapped_around_ranges},
3995 };
3996 UnitTests copy_inaccessible_tests = {
3997 {"Copy source partially decallocated region", test_copy_source_partially_deallocated_region},
3998 /* XXX */
3999 {"Copy destination partially decallocated region", test_copy_dest_partially_deallocated_region},
4000 {"Copy source partially read-protected region", test_copy_source_partially_unreadable_region},
4001 /* XXX */
4002 {"Copy destination partially write-protected region", test_copy_dest_partially_unwriteable_region},
4003 {"Copy source on partially deallocated range", test_copy_source_on_partially_deallocated_range},
4004 {"Copy destination on partially deallocated range", test_copy_dest_on_partially_deallocated_range},
4005 {"Copy source on partially read-protected range", test_copy_source_on_partially_unreadable_range},
4006 {"Copy destination on partially write-protected range", test_copy_dest_on_partially_unwritable_range},
4007 };
4008
4009 UnitTests copy_shared_mode_tests = {
4010 {"Copy using freshly allocated source", test_vmcopy_fresh_source},
4011 {"Copy using shared source", test_vmcopy_shared_source},
4012 {"Copy using a \'copied from\' source", test_vmcopy_copied_from_source},
4013 {"Copy using a \'copied to\' source", test_vmcopy_copied_to_source},
4014 {"Copy using a true shared source", test_vmcopy_trueshared_source},
4015 {"Copy using a private aliased source", test_vmcopy_private_aliased_source},
4016 };
4017
4018 /* All allocations done with mach_vm_allocate(). */
4019 set_allocator(wrapper_mach_vm_allocate);
4020
4021 /* All the tests are done with page size regions. */
4022 set_vm_size(vm_page_size);
4023
4024 /* Run the test suites with various shared modes for source */
4025 for (vmcopy_action_idx = 0; vmcopy_action_idx < numofvmcopyactions; vmcopy_action_idx++) {
4026 run_suite(set_up_copy_shared_mode_variables, copy_shared_mode_tests, do_nothing, "Copy shared mode tests, %s",
4027 vmcopy_actions[vmcopy_action_idx].description);
4028 }
4029
4030 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
4031 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
4032 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
4033 for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) {
4034 for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) {
4035 /* An allocated address will be page-aligned. */
4036 if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) {
4037 continue;
4038 }
4039 run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_edge_case_tests,
4040 deallocate_vm_and_buffer,
4041 "mach_vm_copy() edge case tests, %s%s address, %s "
4042 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
4043 "buffer boundary offset: %d",
4044 address_flags[flags_idx].description,
4045 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4046 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4047 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4048 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4049 buffer_offsets[offsets_idx].offset);
4050 /* The buffer cannot be larger than the destination. */
4051 if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) {
4052 continue;
4053 }
4054
4055 /* A zero size buffer is always accessible. */
4056 if (buffer_sizes_idx != ZERO_BYTES) {
4057 run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_inaccessible_tests,
4058 deallocate_vm_and_buffer,
4059 "mach_vm_copy() inaccessibility tests, "
4060 "%s%s address, %s size: 0x%jx (%ju), buffer "
4061 "%s size: 0x%jx (%ju), buffer boundary "
4062 "offset: %d",
4063 address_flags[flags_idx].description,
4064 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4065 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4066 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4067 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4068 buffer_offsets[offsets_idx].offset);
4069 }
4070 run_suite(set_up_source_and_dest_variables_allocate_copy, copy_main_tests, deallocate_vm_and_buffer,
4071 "mach_vm_copy() main tests, %s%s address, %s "
4072 "size: 0x%jx (%ju), destination %s size: 0x%jx (%ju), "
4073 "destination boundary offset: %d",
4074 address_flags[flags_idx].description,
4075 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4076 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4077 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4078 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4079 buffer_offsets[offsets_idx].offset);
4080 run_suite(set_up_source_and_dest_variables_allocate_copy, copy_pattern_tests, deallocate_vm_and_buffer,
4081 "mach_vm_copy() pattern tests, %s%s address, %s "
4082 "size: 0x%jx (%ju) destination %s size: 0x%jx (%ju), "
4083 "destination boundary offset: %d",
4084 address_flags[flags_idx].description,
4085 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4086 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4087 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4088 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4089 buffer_offsets[offsets_idx].offset);
4090 }
4091 }
4092 }
4093 }
4094 }
4095 }
4096
4097 void
perform_test_with_options(test_option_t options)4098 perform_test_with_options(test_option_t options)
4099 {
4100 process_options(options);
4101
4102 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
4103 * error finding xnu major version number. */
4104 /* printf("xnu version is %s.\n\n", xnu_version_string()); */
4105
4106 if (flag_run_allocate_test) {
4107 run_allocate_test_suites();
4108 }
4109
4110 if (flag_run_deallocate_test) {
4111 run_deallocate_test_suites();
4112 }
4113
4114 if (flag_run_read_test) {
4115 run_read_test_suites();
4116 }
4117
4118 if (flag_run_write_test) {
4119 run_write_test_suites();
4120 }
4121
4122 if (flag_run_protect_test) {
4123 run_protect_test_suites();
4124 }
4125
4126 if (flag_run_copy_test) {
4127 run_copy_test_suites();
4128 }
4129
4130 log_aggregated_results();
4131 }
4132
4133 T_DECL(vm_test_allocate, "Allocate VM unit test")
4134 {
4135 test_options.to_flags = VM_TEST_ALLOCATE;
4136 test_options.to_vmsize = 0;
4137 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4138
4139 perform_test_with_options(test_options);
4140 }
4141
4142 T_DECL(vm_test_deallocate, "Deallocate VM unit test",
4143 T_META_IGNORECRASHES(".*vm_allocation.*"))
4144 {
4145 test_options.to_flags = VM_TEST_DEALLOCATE;
4146 test_options.to_vmsize = 0;
4147 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4148
4149 perform_test_with_options(test_options);
4150 }
4151
4152 T_DECL(vm_test_read, "Read VM unit test")
4153 {
4154 test_options.to_flags = VM_TEST_READ;
4155 test_options.to_vmsize = 0;
4156 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4157
4158 perform_test_with_options(test_options);
4159 }
4160
4161 T_DECL(vm_test_write, "Write VM unit test")
4162 {
4163 test_options.to_flags = VM_TEST_WRITE;
4164 test_options.to_vmsize = 0;
4165 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4166
4167 perform_test_with_options(test_options);
4168 }
4169
4170 T_DECL(vm_test_protect, "Protect VM unit test",
4171 T_META_IGNORECRASHES(".*vm_allocation.*"))
4172 {
4173 test_options.to_flags = VM_TEST_PROTECT;
4174 test_options.to_vmsize = 0;
4175 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4176
4177 perform_test_with_options(test_options);
4178 }
4179
4180 T_DECL(vm_test_copy, "Copy VM unit test")
4181 {
4182 test_options.to_flags = VM_TEST_COPY;
4183 test_options.to_vmsize = 0;
4184 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4185
4186 perform_test_with_options(test_options);
4187 }
4188