xref: /xnu-12377.81.4/tests/vm/vm_allocation.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796) !
1 /* Mach virtual memory unit tests
2  *
3  * The main goal of this code is to facilitate the construction,
4  * running, result logging and clean up of a test suite, taking care
5  * of all the scaffolding. A test suite is a sequence of very targeted
6  * unit tests, each running as a separate process to isolate its
7  * address space.
8  * A unit test is abstracted as a unit_test_t structure, consisting of
9  * a test function and a logging identifier. A test suite is a suite_t
10  * structure, consisting of an unit_test_t array, fixture set up and
11  * tear down functions.
12  * Test suites are created dynamically. Each of its unit test runs in
13  * its own fork()d process, with the fixture set up and tear down
14  * running before and after each test. The parent process will log a
15  * pass result if the child exits normally, and a fail result in any
16  * other case (non-zero exit status, abnormal signal). The suite
17  * results are then aggregated and logged after the [SUMMARY] keyword,
18  * and finally the test suite is destroyed.
19  * The included test suites cover the Mach memory allocators,
20  * mach_vm_allocate() and mach_vm_map() with various options, and
21  * mach_vm_deallocate(), mach_vm_read(), mach_vm_write(),
22  * mach_vm_protect(), mach_vm_copy().
23  *
24  * Author: Renaud Dreyer ([email protected])
25  *
26  * Transformed to libdarwintest by Tristan Ye ([email protected]) */
27 
28 #include <darwintest.h>
29 
30 #include <stdlib.h>
31 #include <ctype.h>
32 #include <inttypes.h>
33 #include <stdio.h>
34 #include <math.h>
35 #include <errno.h>
36 #include <signal.h>
37 #include <getopt.h>
38 #include <mach/mach.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_vm.h>
41 #include <sys/sysctl.h>
42 #include <time.h>
43 #include <stdbool.h>
44 
45 T_GLOBAL_META(
46 	T_META_NAMESPACE("xnu.vm"),
47 	T_META_RADAR_COMPONENT_NAME("xnu"),
48 	T_META_RADAR_COMPONENT_VERSION("VM"));
49 
50 /**************************/
51 /**************************/
52 /* Unit Testing Framework */
53 /**************************/
54 /**************************/
55 
56 /*********************/
57 /* Private interface */
58 /*********************/
59 
60 /* Type for test, fixture set up and fixture tear down functions. */
61 typedef void (*test_fn_t)();
62 
63 /* Unit test structure. */
64 typedef struct {
65 	const char * name;
66 	test_fn_t test;
67 	int expected_signal;
68 } unit_test_t;
69 
70 /* Test suite structure. */
71 typedef struct {
72 	const char * name;
73 	int numoftests;
74 	test_fn_t set_up;
75 	unit_test_t * tests;
76 	test_fn_t tear_down;
77 } suite_t;
78 
79 int _quietness        = 0;
80 int _expected_signal  = 0;
81 int _expected_vm_exc_guard_signal = 0;
82 
83 struct {
84 	uintmax_t numoftests;
85 	uintmax_t passed_tests;
86 } results = {0, 0};
87 
88 #define logr(format, ...) \
89 	do { \
90 	        if (_quietness <= 1) { \
91 	                T_LOG(format, ## __VA_ARGS__); \
92 	        } \
93 	} while (0)
94 
95 #define logv(format, ...) \
96 	do { \
97 	        if (_quietness == 0) { \
98 	                T_LOG(format, ## __VA_ARGS__); \
99 	        } \
100 	} while (0)
101 
102 static suite_t *
create_suite(const char * name,int numoftests,test_fn_t set_up,unit_test_t * tests,test_fn_t tear_down)103 create_suite(const char * name, int numoftests, test_fn_t set_up, unit_test_t * tests, test_fn_t tear_down)
104 {
105 	suite_t * suite = (suite_t *)malloc(sizeof(suite_t));
106 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(suite, "malloc()");
107 
108 	suite->name       = name;
109 	suite->numoftests = numoftests;
110 	suite->set_up     = set_up;
111 	suite->tests      = tests;
112 	suite->tear_down  = tear_down;
113 	return suite;
114 }
115 
116 static void
destroy_suite(suite_t * suite)117 destroy_suite(suite_t * suite)
118 {
119 	free(suite);
120 }
121 
122 static void
log_suite_info(suite_t * suite)123 log_suite_info(suite_t * suite)
124 {
125 	logr("[TEST] %s", suite->name);
126 	logr("Number of tests: %d", suite->numoftests);
127 }
128 
129 static void
log_suite_results(suite_t * suite,int passed_tests)130 log_suite_results(suite_t * suite, int passed_tests)
131 {
132 	results.numoftests += (uintmax_t)suite->numoftests;
133 	results.passed_tests += (uintmax_t)passed_tests;
134 }
135 
136 static void
log_test_info(unit_test_t * unit_test,unsigned test_num)137 log_test_info(unit_test_t * unit_test, unsigned test_num)
138 {
139 	if (unit_test->expected_signal) {
140 		logr("[BEGIN] #%04d: %s, SIGNAL(%d) expected", test_num, unit_test->name, unit_test->expected_signal);
141 	} else {
142 		logr("[BEGIN] #%04d: %s", test_num, unit_test->name);
143 	}
144 }
145 
146 static void
log_test_result(unit_test_t * unit_test,boolean_t test_passed,unsigned test_num)147 log_test_result(unit_test_t * unit_test, boolean_t test_passed, unsigned test_num)
148 {
149 	logr("[%s] #%04d: %s", test_passed ? "PASS" : "FAIL", test_num, unit_test->name);
150 }
151 
152 /* Run a test with fixture set up and teardown, while enforcing the
153  * time out constraint. */
154 static void
run_test(suite_t * suite,unit_test_t * unit_test,unsigned test_num)155 run_test(suite_t * suite, unit_test_t * unit_test, unsigned test_num)
156 {
157 	log_test_info(unit_test, test_num);
158 
159 	suite->set_up();
160 	unit_test->test();
161 	suite->tear_down();
162 }
163 
164 /* Expected signal for a test, default is 0. */
165 void
set_expected_signal(int signal)166 set_expected_signal(int signal)
167 {
168 	_expected_signal = signal;
169 }
170 
171 int
get_expected_signal()172 get_expected_signal()
173 {
174 	return _expected_signal;
175 }
176 
177 /* Check a child return status. */
178 static boolean_t
child_terminated_normally(int child_status)179 child_terminated_normally(int child_status)
180 {
181 	boolean_t normal_exit = FALSE;
182 
183 	if (WIFEXITED(child_status)) {
184 		int exit_status = WEXITSTATUS(child_status);
185 		if (exit_status) {
186 			T_LOG("Child process unexpectedly exited with code %d.",
187 			    exit_status);
188 		} else if (!_expected_signal) {
189 			normal_exit = TRUE;
190 		} else {
191 			T_LOG(
192 				"Child process unexpectedly exited with zero, "
193 				"where signal %d is expected.", _expected_signal);
194 		}
195 	} else if (WIFSIGNALED(child_status)) {
196 		int signal = WTERMSIG(child_status);
197 		if (signal == _expected_signal ||
198 		    (_expected_signal == -1 && (signal == SIGBUS || signal == SIGSEGV || signal == SIGKILL))) {
199 			if (_quietness <= 0) {
200 				T_LOG("Child process died with expected signal "
201 				    "%d.", signal);
202 			}
203 			normal_exit = TRUE;
204 		} else {
205 			T_LOG("Child process unexpectedly died with signal %d.",
206 			    signal);
207 		}
208 	} else {
209 		T_LOG("Child process unexpectedly did not exit nor die");
210 	}
211 
212 	return normal_exit;
213 }
214 
215 /* Run a test in its own process, and report the result. */
216 static boolean_t
child_test_passed(suite_t * suite,unit_test_t * unit_test)217 child_test_passed(suite_t * suite, unit_test_t * unit_test)
218 {
219 	int test_status;
220 	static unsigned test_num = 0;
221 	boolean_t use_default_expected_signal = FALSE;
222 
223 	test_num++;
224 
225 	pid_t test_pid = fork();
226 	T_QUIET; T_ASSERT_POSIX_SUCCESS(test_pid, "fork()");
227 	if (!test_pid) {
228 		run_test(suite, unit_test, test_num);
229 		exit(0);
230 	}
231 	while (waitpid(test_pid, &test_status, 0) != test_pid) {
232 		continue;
233 	}
234 
235 	/*
236 	 * Allow overriding unit_test's default expected signal
237 	 */
238 	if ((get_expected_signal() == 0) &&
239 	    (unit_test->expected_signal != 0)) {
240 		set_expected_signal(unit_test->expected_signal);
241 		use_default_expected_signal = TRUE;
242 	}
243 	boolean_t test_result = child_terminated_normally(test_status);
244 	log_test_result(unit_test, test_result, test_num);
245 	if (use_default_expected_signal) {
246 		set_expected_signal(0);
247 	}
248 
249 	return test_result;
250 }
251 
252 /* Run each test in a suite, and report the results. */
253 static int
count_passed_suite_tests(suite_t * suite)254 count_passed_suite_tests(suite_t * suite)
255 {
256 	int passed_tests = 0;
257 	int i;
258 
259 	for (i = 0; i < suite->numoftests; i++) {
260 		passed_tests += child_test_passed(suite, &(suite->tests[i]));
261 	}
262 	return passed_tests;
263 }
264 
265 /********************/
266 /* Public interface */
267 /********************/
268 
269 #define DEFAULT_QUIETNESS    0 /* verbose */
270 #define RESULT_ERR_QUIETNESS 1 /* result and error */
271 #define ERROR_ONLY_QUIETNESS 2 /* error only */
272 
273 #define run_suite(set_up, tests, tear_down, ...) \
274 	_run_suite((sizeof(tests) / sizeof(tests[0])), (set_up), (tests), (tear_down), __VA_ARGS__)
275 
276 typedef unit_test_t UnitTests[];
277 
278 void _run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...)
279 __printflike(5, 6);
280 
281 void
_run_suite(int numoftests,test_fn_t set_up,UnitTests tests,test_fn_t tear_down,const char * format,...)282 _run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...)
283 {
284 	va_list ap;
285 	char * name;
286 
287 	va_start(ap, format);
288 	T_QUIET; T_ASSERT_POSIX_SUCCESS(vasprintf(&name, format, ap), "vasprintf()");
289 	va_end(ap);
290 	suite_t * suite = create_suite(name, numoftests, set_up, tests, tear_down);
291 	log_suite_info(suite);
292 	log_suite_results(suite, count_passed_suite_tests(suite));
293 	free(name);
294 	destroy_suite(suite);
295 }
296 
297 /* Setters and getters for various test framework global
298  * variables. Should only be used outside of the test, set up and tear
299  * down functions. */
300 
301 /* Logging verbosity. */
302 void
set_quietness(int value)303 set_quietness(int value)
304 {
305 	_quietness = value;
306 }
307 
308 int
get_quietness()309 get_quietness()
310 {
311 	return _quietness;
312 }
313 
314 /* For fixture set up and tear down functions, and units tests. */
315 void
do_nothing()316 do_nothing()
317 {
318 }
319 
320 void
log_aggregated_results()321 log_aggregated_results()
322 {
323 	T_LOG("[SUMMARY] Aggregated Test Results");
324 	T_LOG("Total: %ju", results.numoftests);
325 	T_LOG("Passed: %ju", results.passed_tests);
326 	T_LOG("Failed: %ju", results.numoftests - results.passed_tests);
327 
328 	T_ASSERT_EQ(results.passed_tests, results.numoftests,
329 	    "%ju passed of total %ju tests",
330 	    results.passed_tests, results.numoftests);
331 }
332 
333 /*******************************/
334 /*******************************/
335 /* Virtual memory unit testing */
336 /*******************************/
337 /*******************************/
338 
339 /* Test exit values:
340  * 0: pass
341  * 1: fail, generic unexpected failure
342  * 2: fail, unexpected Mach return value
343  * 3: fail, time out */
344 
345 #define DEFAULT_VM_SIZE ((mach_vm_size_t)(1024ULL * 4096ULL))
346 
347 #define POINTER(address) ((char *)(uintptr_t)(address))
348 #define MACH_VM_ADDRESS_T(address) (*((mach_vm_address_t *)(uintptr_t)(address)))
349 
350 static int vm_address_size = sizeof(mach_vm_address_t);
351 
352 /*************************/
353 /* Mach assert functions */
354 /*************************/
355 
356 #define assert_mach_return(kr, expected_kr, format, ...)                \
357 	do {                                                            \
358 	/* fixme T_QUIET is not working */                      \
359 	        if ((kr) != (expected_kr)) {                            \
360 	                T_QUIET; T_ASSERT_MACH_ERROR(kr, expected_kr, format, ##__VA_ARGS__); \
361 	        }                                                       \
362 	} while (0)
363 
364 #define assert_mach_success(kr, format, ...)                            \
365 	do {                                                            \
366 	/* fixme T_QUIET is not working */                      \
367 	        if (kr != KERN_SUCCESS) {                               \
368 	                T_QUIET; T_ASSERT_MACH_SUCCESS(kr, format, ##__VA_ARGS__); \
369 	        }                                                       \
370 	} while (0)                                                     \
371 
372 #define assert_mach_failure(kr, format, ...)                            \
373 	do {                                                            \
374 	/* fixme T_QUIET is not working */                      \
375 	        if (kr == KERN_SUCCESS) {                               \
376 	                T_QUIET; T_ASSERT_NE(kr, KERN_SUCCESS, format, ##__VA_ARGS__); \
377 	        }                                                       \
378 	} while (0)                                                     \
379 
380 /* Determine if TASK_EXC_GUARD_VM_FATAL is enabled for task */
381 static boolean_t
get_task_exc_guard_vm_fatal(void)382 get_task_exc_guard_vm_fatal(void)
383 {
384 	task_exc_guard_behavior_t behavior;
385 
386 	assert_mach_success(task_get_exc_guard_behavior(mach_task_self(), &behavior), "task_get_exc_guard_behavior");
387 	if ((behavior & TASK_EXC_GUARD_VM_DELIVER) &&
388 	    (behavior & TASK_EXC_GUARD_VM_FATAL)) {
389 		return TRUE;
390 	} else {
391 		return FALSE;
392 	}
393 }
394 
395 /*******************************/
396 /* Arrays for test suite loops */
397 /*******************************/
398 
399 /* Memory allocators */
400 typedef kern_return_t (*allocate_fn_t)(vm_map_t, mach_vm_address_t *, mach_vm_size_t, int);
401 
402 
403 /*
404  * Remember any pre-reserved fixed address, which needs to be released prior to an allocation.
405  */
406 static mach_vm_address_t fixed_vm_address = 0x0;
407 static mach_vm_size_t fixed_vm_size = 0;
408 
409 /* forward decl */
410 void assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size);
411 
412 /*
413  * If trying to allocate at a fixed address, we need to do the delayed deallocate first.
414  */
415 static void
check_fixed_address(mach_vm_address_t * address,mach_vm_size_t size)416 check_fixed_address(mach_vm_address_t *address, mach_vm_size_t size)
417 {
418 	if (fixed_vm_address != 0 &&
419 	    fixed_vm_address <= *address &&
420 	    *address + size <= fixed_vm_address + fixed_vm_size) {
421 		assert_deallocate_success(fixed_vm_address, fixed_vm_size);
422 		fixed_vm_address = 0;
423 		fixed_vm_size = 0;
424 	}
425 }
426 
427 kern_return_t
wrapper_mach_vm_allocate(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)428 wrapper_mach_vm_allocate(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
429 {
430 	check_fixed_address(address, size);
431 	return mach_vm_allocate(map, address, size, flags);
432 }
433 
434 kern_return_t
wrapper_mach_vm_map(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)435 wrapper_mach_vm_map(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
436 {
437 	check_fixed_address(address, size);
438 	return mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
439 	           VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
440 }
441 
442 /* Should have the same behavior as when mask is zero. */
443 kern_return_t
wrapper_mach_vm_map_4kB(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)444 wrapper_mach_vm_map_4kB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
445 {
446 	check_fixed_address(address, size);
447 	return mach_vm_map(map, address, size, (mach_vm_offset_t)0xFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
448 	           VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
449 }
450 
451 kern_return_t
wrapper_mach_vm_map_2MB(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)452 wrapper_mach_vm_map_2MB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
453 {
454 	check_fixed_address(address, size);
455 	return mach_vm_map(map, address, size, (mach_vm_offset_t)0x1FFFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
456 	           VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
457 }
458 
459 kern_return_t
memory_entry(mach_vm_size_t * size,mach_port_t * object_handle)460 memory_entry(mach_vm_size_t * size, mach_port_t *object_handle)
461 {
462 	mach_vm_size_t original_size = *size;
463 	kern_return_t kr;
464 
465 	kr = mach_make_memory_entry_64(mach_task_self(), size,
466 	    (memory_object_offset_t)0, (MAP_MEM_NAMED_CREATE | VM_PROT_ALL),
467 	    object_handle, 0);
468 	if (kr != KERN_SUCCESS) {
469 		return kr;
470 	}
471 	T_QUIET; T_ASSERT_EQ(*size, round_page(original_size),
472 	    "mach_make_memory_entry_64() unexpectedly returned a named "
473 	    "entry of size 0x%jx (%ju). "
474 	    "Should have returned a "
475 	    "named entry of size 0x%jx (%ju).",
476 	    (uintmax_t)*size, (uintmax_t)*size, (uintmax_t)original_size, (uintmax_t)original_size);
477 	return KERN_SUCCESS;
478 }
479 
480 kern_return_t
wrapper_mach_vm_map_named_entry(vm_map_t map,mach_vm_address_t * address,mach_vm_size_t size,int flags)481 wrapper_mach_vm_map_named_entry(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
482 {
483 	mach_port_t object_handle = MACH_PORT_NULL;
484 	kern_return_t kr = memory_entry(&size, &object_handle);
485 
486 	if (kr != KERN_SUCCESS) {
487 		return kr;
488 	}
489 	check_fixed_address(address, size);
490 	kr = mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, object_handle, (memory_object_offset_t)0, FALSE,
491 	    VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
492 	assert_mach_success(mach_port_deallocate(mach_task_self(), object_handle), "mach_port_deallocate()");
493 	return kr;
494 }
495 
496 static struct {
497 	allocate_fn_t allocate;
498 	const char * description;
499 } allocators[] = {
500 	{wrapper_mach_vm_allocate, "mach_vm_allocate()"},
501 	{wrapper_mach_vm_map, "mach_vm_map() (zero mask)"},
502 	{wrapper_mach_vm_map_4kB,
503 	 "mach_vm_map() "
504 	 "(4 kB address alignment)"},
505 	{wrapper_mach_vm_map_2MB,
506 	 "mach_vm_map() "
507 	 "(2 MB address alignment)"},
508 	{wrapper_mach_vm_map_named_entry,
509 	 "mach_vm_map() (named "
510 	 "entry, zero mask)"},
511 };
512 static int numofallocators = sizeof(allocators) / sizeof(allocators[0]);
513 static int allocators_idx;
514 enum { MACH_VM_ALLOCATE, MACH_VM_MAP, MACH_VM_MAP_4kB, MACH_VM_MAP_2MB, MACH_VM_MAP_NAMED_ENTRY };
515 
516 /* VM size */
517 static struct {
518 	mach_vm_size_t size;
519 	const char * description;
520 } vm_sizes[] = {
521 	{DEFAULT_VM_SIZE, "default/input"},
522 	{0, "zero"},
523 	{4096ULL, "aligned"},
524 	{1ULL, "unaligned"},
525 	{4095ULL, "unaligned"},
526 	{4097ULL, "unaligned"},
527 };
528 static int numofsizes = sizeof(vm_sizes) / sizeof(vm_sizes[0]);
529 static int sizes_idx;
530 static int buffer_sizes_idx;
531 enum { DEFAULT_INPUT, ZERO_BYTES, ONE_PAGE, ONE_BYTE, ONE_PAGE_MINUS_ONE_BYTE, ONE_PAGE_AND_ONE_BYTE };
532 
533 /* Unspecified/fixed address */
534 static struct {
535 	int flag;
536 	const char * description;
537 } address_flags[] = {
538 	{VM_FLAGS_ANYWHERE, "unspecified"}, {VM_FLAGS_FIXED, "fixed"},
539 };
540 static int numofflags = sizeof(address_flags) / sizeof(address_flags[0]);
541 static int flags_idx;
542 enum { ANYWHERE, FIXED };
543 
544 /* Address alignment */
545 static struct {
546 	boolean_t alignment;
547 	const char * description;
548 } address_alignments[] = {
549 	{TRUE, " aligned"}, {FALSE, " unaligned"},
550 };
551 static int numofalignments = sizeof(address_alignments) / sizeof(*address_alignments);
552 static int alignments_idx;
553 enum { ALIGNED, UNALIGNED };
554 
555 /* Buffer offset */
556 static struct {
557 	int offset;
558 	const char * description;
559 } buffer_offsets[] = {
560 	{0, ""}, {1, ""}, {2, ""},
561 };
562 static int numofoffsets = sizeof(buffer_offsets) / sizeof(buffer_offsets[0]);
563 static int offsets_idx;
564 enum { ZERO, ONE, TWO };
565 
566 /* mach_vm_copy() post actions */
567 enum { VMCOPY_MODIFY_SRC, VMCOPY_MODIFY_DST, VMCOPY_MODIFY_SHARED_COPIED };
568 
569 static struct {
570 	int action;
571 	const char * description;
572 } vmcopy_actions[] = {
573 	{VMCOPY_MODIFY_SRC, "modify vm_copy() source"},
574 	{VMCOPY_MODIFY_DST, "modify vm_copy() destination"},
575 	{VMCOPY_MODIFY_SHARED_COPIED,
576 	 "modify vm_copy source's shared "
577 	 "or copied from/to region"},
578 };
579 static int numofvmcopyactions = sizeof(vmcopy_actions) / sizeof(vmcopy_actions[0]);
580 static int vmcopy_action_idx;
581 
582 /************************************/
583 /* Setters and getters for fixtures */
584 /************************************/
585 
586 /* Allocation memory range. */
587 static allocate_fn_t _allocator      = wrapper_mach_vm_allocate;
588 static mach_vm_size_t _vm_size       = DEFAULT_VM_SIZE;
589 static int _address_flag             = VM_FLAGS_ANYWHERE;
590 static boolean_t _address_alignment  = TRUE;
591 static mach_vm_address_t _vm_address = 0x0;
592 static mach_vm_address_t _already_deallocated_vm_page = 0x0;
593 
594 /* Buffer for mach_vm_write(). */
595 static mach_vm_size_t _buffer_size       = DEFAULT_VM_SIZE;
596 static mach_vm_address_t _buffer_address = 0x0;
597 static int _buffer_offset                = 0;
598 static mach_vm_address_t _already_deallocated_buffer_page = 0x0;
599 
600 /* Post action for mach_vm_copy(). */
601 static int _vmcopy_post_action = VMCOPY_MODIFY_SRC;
602 
603 static void
set_allocator(allocate_fn_t allocate)604 set_allocator(allocate_fn_t allocate)
605 {
606 	_allocator = allocate;
607 }
608 
609 static allocate_fn_t
get_allocator()610 get_allocator()
611 {
612 	return _allocator;
613 }
614 
615 static void
set_vm_size(mach_vm_size_t size)616 set_vm_size(mach_vm_size_t size)
617 {
618 	_vm_size = size;
619 }
620 
621 static mach_vm_size_t
get_vm_size()622 get_vm_size()
623 {
624 	return _vm_size;
625 }
626 
627 static void
set_address_flag(int flag)628 set_address_flag(int flag)
629 {
630 	_address_flag = flag;
631 }
632 
633 static int
get_address_flag()634 get_address_flag()
635 {
636 	return _address_flag;
637 }
638 
639 static void
set_address_alignment(boolean_t alignment)640 set_address_alignment(boolean_t alignment)
641 {
642 	_address_alignment = alignment;
643 }
644 
645 static boolean_t
get_address_alignment()646 get_address_alignment()
647 {
648 	return _address_alignment;
649 }
650 
651 static void
set_vm_address(mach_vm_address_t address)652 set_vm_address(mach_vm_address_t address)
653 {
654 	_vm_address = address;
655 }
656 
657 static mach_vm_address_t
get_vm_address()658 get_vm_address()
659 {
660 	return _vm_address;
661 }
662 
663 static void
set_already_deallocated_vm_page(mach_vm_address_t address)664 set_already_deallocated_vm_page(mach_vm_address_t address)
665 {
666 	_already_deallocated_vm_page = address;
667 }
668 
669 static mach_vm_address_t
get_already_deallocated_vm_page()670 get_already_deallocated_vm_page()
671 {
672 	return _already_deallocated_vm_page;
673 }
674 
675 static void
set_buffer_size(mach_vm_size_t size)676 set_buffer_size(mach_vm_size_t size)
677 {
678 	_buffer_size = size;
679 }
680 
681 static mach_vm_size_t
get_buffer_size()682 get_buffer_size()
683 {
684 	return _buffer_size;
685 }
686 
687 static void
set_buffer_address(mach_vm_address_t address)688 set_buffer_address(mach_vm_address_t address)
689 {
690 	_buffer_address = address;
691 }
692 
693 static mach_vm_address_t
get_buffer_address()694 get_buffer_address()
695 {
696 	return _buffer_address;
697 }
698 
699 static void
set_buffer_offset(int offset)700 set_buffer_offset(int offset)
701 {
702 	_buffer_offset = offset;
703 }
704 
705 static int
get_buffer_offset()706 get_buffer_offset()
707 {
708 	return _buffer_offset;
709 }
710 
711 static void
set_already_deallocated_buffer_page(mach_vm_address_t address)712 set_already_deallocated_buffer_page(mach_vm_address_t address)
713 {
714 	_already_deallocated_buffer_page = address;
715 }
716 
717 static mach_vm_address_t
get_already_deallocated_buffer_page()718 get_already_deallocated_buffer_page()
719 {
720 	return _already_deallocated_buffer_page;
721 }
722 
723 static void
set_vmcopy_post_action(int action)724 set_vmcopy_post_action(int action)
725 {
726 	_vmcopy_post_action = action;
727 }
728 
729 static int
get_vmcopy_post_action()730 get_vmcopy_post_action()
731 {
732 	return _vmcopy_post_action;
733 }
734 
735 /*******************************/
736 /* Usage and option processing */
737 /*******************************/
738 static boolean_t flag_run_allocate_test = FALSE;
739 static boolean_t flag_run_deallocate_test = FALSE;
740 static boolean_t flag_run_read_test = FALSE;
741 static boolean_t flag_run_write_test = FALSE;
742 static boolean_t flag_run_protect_test = FALSE;
743 static boolean_t flag_run_copy_test = FALSE;
744 
745 #define VM_TEST_ALLOCATE   0x00000001
746 #define VM_TEST_DEALLOCATE 0x00000002
747 #define VM_TEST_READ       0x00000004
748 #define VM_TEST_WRITE      0x00000008
749 #define VM_TEST_PROTECT    0x00000010
750 #define VM_TEST_COPY       0x00000020
751 
752 typedef struct test_option {
753 	uint32_t        to_flags;
754 	int             to_quietness;
755 	mach_vm_size_t  to_vmsize;
756 } test_option_t;
757 
758 typedef struct test_info {
759 	char            *ti_name;
760 	boolean_t       *ti_flag;
761 } test_info_t;
762 
763 static test_option_t test_options;
764 
765 enum {ALLOCATE = 0, DEALLOCATE, READ, WRITE, PROTECT, COPY};
766 
767 static test_info_t test_info[] = {
768 	{"allocate", &flag_run_allocate_test},
769 	{"deallocate", &flag_run_deallocate_test},
770 	{"read", &flag_run_read_test},
771 	{"write", &flag_run_write_test},
772 	{"protect", &flag_run_protect_test},
773 	{"copy", &flag_run_copy_test},
774 	{NULL, NULL}
775 };
776 
777 static void
process_options(test_option_t options)778 process_options(test_option_t options)
779 {
780 	test_info_t *tp;
781 
782 	setvbuf(stdout, NULL, _IONBF, 0);
783 
784 	set_vm_size(DEFAULT_VM_SIZE);
785 	set_quietness(DEFAULT_QUIETNESS);
786 	if (get_task_exc_guard_vm_fatal()) {
787 		_expected_vm_exc_guard_signal = SIGKILL;
788 	}
789 
790 	if (NULL != getenv("LTERDOS")) {
791 		logr("LTERDOS=YES this is LeanTestEnvironment. Increasing quietness by 1.");
792 		set_quietness(get_quietness() + 1);
793 	} else {
794 		if (options.to_quietness > 0) {
795 			set_quietness(options.to_quietness);
796 		}
797 	}
798 
799 	if (options.to_vmsize != 0) {
800 		vm_sizes[0].size = options.to_vmsize;
801 	}
802 
803 	if (options.to_flags == 0) {
804 		for (tp = test_info; tp->ti_name != NULL; ++tp) {
805 			*tp->ti_flag = TRUE;
806 		}
807 	} else {
808 		if (options.to_flags & VM_TEST_ALLOCATE) {
809 			*(test_info[ALLOCATE].ti_flag) = TRUE;
810 		}
811 
812 		if (options.to_flags & VM_TEST_DEALLOCATE) {
813 			*(test_info[DEALLOCATE].ti_flag) = TRUE;
814 		}
815 
816 		if (options.to_flags & VM_TEST_READ) {
817 			*(test_info[READ].ti_flag) = TRUE;
818 		}
819 
820 		if (options.to_flags & VM_TEST_WRITE) {
821 			*(test_info[WRITE].ti_flag) = TRUE;
822 		}
823 
824 		if (options.to_flags & VM_TEST_PROTECT) {
825 			*(test_info[PROTECT].ti_flag) = TRUE;
826 		}
827 
828 		if (options.to_flags & VM_TEST_COPY) {
829 			*(test_info[COPY].ti_flag) = TRUE;
830 		}
831 	}
832 }
833 
834 /*****************/
835 /* Various tools */
836 /*****************/
837 
838 /* Find the allocator address alignment mask. */
839 mach_vm_address_t
get_mask()840 get_mask()
841 {
842 	mach_vm_address_t mask;
843 
844 	if (get_allocator() == wrapper_mach_vm_map_2MB) {
845 		mask = (mach_vm_address_t)0x1FFFFF;
846 	} else {
847 		mask = vm_page_size - 1;
848 	}
849 	return mask;
850 }
851 
852 /* Find the size of the smallest aligned region containing a given
853  * memory range. */
854 mach_vm_size_t
aligned_size(mach_vm_address_t address,mach_vm_size_t size)855 aligned_size(mach_vm_address_t address, mach_vm_size_t size)
856 {
857 	return round_page(address - mach_vm_trunc_page(address) + size);
858 }
859 
860 /********************/
861 /* Assert functions */
862 /********************/
863 
864 /* Address is aligned on allocator boundary. */
865 static inline void
assert_aligned_address(mach_vm_address_t address)866 assert_aligned_address(mach_vm_address_t address)
867 {
868 	T_QUIET; T_ASSERT_EQ((address & get_mask()), 0ull,
869 	    "Address 0x%jx is unexpectedly "
870 	    "unaligned.",
871 	    (uintmax_t)address);
872 }
873 
874 /* Address is truncated to allocator boundary. */
875 static inline void
assert_trunc_address(mach_vm_address_t address,mach_vm_address_t trunc_address)876 assert_trunc_address(mach_vm_address_t address, mach_vm_address_t trunc_address)
877 {
878 	T_QUIET; T_ASSERT_EQ(trunc_address, (address & ~get_mask()),
879 	    "Address "
880 	    "0x%jx is unexpectedly not truncated to address 0x%jx.",
881 	    (uintmax_t)address, (uintmax_t)trunc_address);
882 }
883 
884 static inline void
assert_address_value(mach_vm_address_t address,mach_vm_address_t marker)885 assert_address_value(mach_vm_address_t address, mach_vm_address_t marker)
886 {
887 	/* this assert is used so frequently so that we simply judge on
888 	 * its own instead of leaving this to LD macro for efficiency
889 	 */
890 	if (MACH_VM_ADDRESS_T(address) != marker) {
891 		T_ASSERT_FAIL("Address 0x%jx unexpectedly has value 0x%jx, "
892 		    "instead of 0x%jx.", (uintmax_t)address,
893 		    (uintmax_t)MACH_VM_ADDRESS_T(address), (uintmax_t)marker);
894 	}
895 }
896 
897 void
assert_allocate_return(mach_vm_address_t * address,mach_vm_size_t size,int address_flag,kern_return_t expected_kr)898 assert_allocate_return(mach_vm_address_t * address, mach_vm_size_t size, int address_flag, kern_return_t expected_kr)
899 {
900 	assert_mach_return(get_allocator()(mach_task_self(), address, size, address_flag), expected_kr, "Allocator");
901 }
902 
903 void
assert_allocate_success(mach_vm_address_t * address,mach_vm_size_t size,int address_flag)904 assert_allocate_success(mach_vm_address_t * address, mach_vm_size_t size, int address_flag)
905 {
906 	assert_allocate_return(address, size, address_flag, KERN_SUCCESS);
907 }
908 
909 void
assert_deallocate_return(mach_vm_address_t address,mach_vm_size_t size,kern_return_t expected_kr)910 assert_deallocate_return(mach_vm_address_t address, mach_vm_size_t size, kern_return_t expected_kr)
911 {
912 	assert_mach_return(mach_vm_deallocate(mach_task_self(), address, size), expected_kr, "mach_vm_deallocate()");
913 }
914 
915 void
assert_deallocate_success(mach_vm_address_t address,mach_vm_size_t size)916 assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size)
917 {
918 	assert_deallocate_return(address, size, KERN_SUCCESS);
919 }
920 
921 void
assert_read_return(mach_vm_address_t address,mach_vm_size_t size,vm_offset_t * data,mach_msg_type_number_t * data_size,kern_return_t expected_kr)922 assert_read_return(mach_vm_address_t address,
923     mach_vm_size_t size,
924     vm_offset_t * data,
925     mach_msg_type_number_t * data_size,
926     kern_return_t expected_kr)
927 {
928 	assert_mach_return(mach_vm_read(mach_task_self(), address, size, data, data_size), expected_kr, "mach_vm_read()");
929 }
930 
931 void
assert_read_success(mach_vm_address_t address,mach_vm_size_t size,vm_offset_t * data,mach_msg_type_number_t * data_size)932 assert_read_success(mach_vm_address_t address, mach_vm_size_t size, vm_offset_t * data, mach_msg_type_number_t * data_size)
933 {
934 	assert_read_return(address, size, data, data_size, KERN_SUCCESS);
935 	T_QUIET; T_ASSERT_EQ((mach_vm_size_t)*data_size, size,
936 	    "Returned buffer size 0x%jx "
937 	    "(%ju) is unexpectedly different from source size 0x%jx "
938 	    "(%ju).",
939 	    (uintmax_t)*data_size, (uintmax_t)*data_size, (uintmax_t)size, (uintmax_t)size);
940 }
941 
942 void
assert_write_return(mach_vm_address_t address,vm_offset_t data,mach_msg_type_number_t data_size,kern_return_t expected_kr)943 assert_write_return(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size, kern_return_t expected_kr)
944 {
945 	assert_mach_return(mach_vm_write(mach_task_self(), address, data, data_size), expected_kr, "mach_vm_write()");
946 }
947 
948 void
assert_write_success(mach_vm_address_t address,vm_offset_t data,mach_msg_type_number_t data_size)949 assert_write_success(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size)
950 {
951 	assert_write_return(address, data, data_size, KERN_SUCCESS);
952 }
953 
954 void
assert_allocate_copy_return(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t * dest,kern_return_t expected_kr)955 assert_allocate_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest, kern_return_t expected_kr)
956 {
957 	assert_allocate_success(dest, size, VM_FLAGS_ANYWHERE);
958 	assert_mach_return(mach_vm_copy(mach_task_self(), source, size, *dest), expected_kr, "mach_vm_copy()");
959 }
960 void
assert_allocate_copy_success(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t * dest)961 assert_allocate_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest)
962 {
963 	assert_allocate_copy_return(source, size, dest, KERN_SUCCESS);
964 }
965 
966 void
assert_copy_return(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t dest,kern_return_t expected_kr)967 assert_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest, kern_return_t expected_kr)
968 {
969 	assert_mach_return(mach_vm_copy(mach_task_self(), source, size, dest), expected_kr, "mach_vm_copy()");
970 }
971 
972 void
assert_copy_success(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t dest)973 assert_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest)
974 {
975 	assert_copy_return(source, size, dest, KERN_SUCCESS);
976 }
977 
978 void
assert_copy_failure(mach_vm_address_t source,mach_vm_size_t size,mach_vm_address_t dest)979 assert_copy_failure(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest)
980 {
981 	assert_mach_failure(mach_vm_copy(mach_task_self(), source, size, dest), "mach_vm_copy()");
982 }
983 
984 
985 /*******************/
986 /* Memory patterns */
987 /*******************/
988 
989 typedef boolean_t (*address_filter_t)(mach_vm_address_t);
990 typedef void (*address_action_t)(mach_vm_address_t, mach_vm_address_t);
991 
992 /* Map over a memory region pattern and its complement, through a
993  * (possibly reversed) boolean filter and a starting value. */
994 void
filter_addresses_do_else(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,address_action_t if_action,address_action_t else_action,mach_vm_address_t start_value)995 filter_addresses_do_else(address_filter_t filter,
996     boolean_t reversed,
997     mach_vm_address_t address,
998     mach_vm_size_t size,
999     address_action_t if_action,
1000     address_action_t else_action,
1001     mach_vm_address_t start_value)
1002 {
1003 	mach_vm_address_t i;
1004 	for (i = 0; i + vm_address_size < size; i += vm_address_size) {
1005 		if (filter(address + i) != reversed) {
1006 			if_action(address + i, start_value + i);
1007 		} else {
1008 			else_action(address + i, start_value + i);
1009 		}
1010 	}
1011 }
1012 
1013 /* Various pattern actions. */
1014 void
no_action(mach_vm_address_t i,mach_vm_address_t value)1015 no_action(mach_vm_address_t i, mach_vm_address_t value)
1016 {
1017 }
1018 
1019 void
read_zero(mach_vm_address_t i,mach_vm_address_t value)1020 read_zero(mach_vm_address_t i, mach_vm_address_t value)
1021 {
1022 	assert_address_value(i, 0);
1023 }
1024 
1025 void
verify_address(mach_vm_address_t i,mach_vm_address_t value)1026 verify_address(mach_vm_address_t i, mach_vm_address_t value)
1027 {
1028 	assert_address_value(i, value);
1029 }
1030 
1031 void
write_address(mach_vm_address_t i,mach_vm_address_t value)1032 write_address(mach_vm_address_t i, mach_vm_address_t value)
1033 {
1034 	MACH_VM_ADDRESS_T(i) = value;
1035 }
1036 
1037 /* Various patterns. */
1038 boolean_t
empty(mach_vm_address_t i)1039 empty(mach_vm_address_t i)
1040 {
1041 	return FALSE;
1042 }
1043 
1044 boolean_t
checkerboard(mach_vm_address_t i)1045 checkerboard(mach_vm_address_t i)
1046 {
1047 	return !((i / vm_address_size) & 0x1);
1048 }
1049 
1050 boolean_t
page_ends(mach_vm_address_t i)1051 page_ends(mach_vm_address_t i)
1052 {
1053 	mach_vm_address_t residue = i % vm_page_size;
1054 
1055 	return residue == 0 || residue == vm_page_size - vm_address_size;
1056 }
1057 
1058 /*************************************/
1059 /* Global variables set up functions */
1060 /*************************************/
1061 
1062 void
set_up_allocator()1063 set_up_allocator()
1064 {
1065 	T_QUIET; T_ASSERT_TRUE(allocators_idx >= 0 && allocators_idx < numofallocators, "Invalid allocators[] index: %d.", allocators_idx);
1066 	set_allocator(allocators[allocators_idx].allocate);
1067 }
1068 
1069 /* Find a fixed allocatable address by retrieving the address
1070  * populated by mach_vm_allocate() with VM_FLAGS_ANYWHERE. */
1071 mach_vm_address_t
get_fixed_address(mach_vm_size_t size)1072 get_fixed_address(mach_vm_size_t size)
1073 {
1074 	/* mach_vm_map() starts looking for an address at 0x0. */
1075 	mach_vm_address_t address = 0x0;
1076 
1077 	/*
1078 	 * The tests seem to have some funky off by one allocations. To avoid problems, we'll bump anything
1079 	 * non-zero to have at least an extra couple pages.
1080 	 */
1081 	if (size != 0) {
1082 		size = round_page(size + 2 * vm_page_size);
1083 	}
1084 
1085 	assert_allocate_success(&address, size, VM_FLAGS_ANYWHERE);
1086 
1087 	/*
1088 	 * Keep the memory allocated, otherwise the logv()/printf() activity sprinkled in these tests can
1089 	 * cause malloc() to use the desired range and tests will randomly fail. The allocate routines will
1090 	 * do the delayed vm_deallocate() to free the fixed memory just before allocation testing in the wrapper.
1091 	 */
1092 	T_QUIET; T_ASSERT_EQ(fixed_vm_address, 0ull, "previous fixed address not used");
1093 	T_QUIET; T_ASSERT_EQ(fixed_vm_size, 0ull, "previous fixed size not used");
1094 	fixed_vm_address = address;
1095 	fixed_vm_size = size;
1096 
1097 	assert_aligned_address(address);
1098 	return address;
1099 }
1100 
1101 /* If needed, find an address at which a region of the specified size
1102  * can be allocated. Otherwise, set the address to 0x0. */
1103 void
set_up_vm_address(mach_vm_size_t size)1104 set_up_vm_address(mach_vm_size_t size)
1105 {
1106 	T_QUIET; T_ASSERT_TRUE(flags_idx >= 0 && flags_idx < numofflags, "Invalid address_flags[] index: %d.", flags_idx);
1107 	T_QUIET; T_ASSERT_TRUE(alignments_idx >= 0 && alignments_idx < numofalignments, "Invalid address_alignments[] index: %d.", alignments_idx);
1108 	set_address_flag(address_flags[flags_idx].flag);
1109 	set_address_alignment(address_alignments[alignments_idx].alignment);
1110 
1111 	if (!(get_address_flag() & VM_FLAGS_ANYWHERE)) {
1112 		boolean_t aligned = get_address_alignment();
1113 		logv(
1114 			"Looking for fixed %saligned address for allocation "
1115 			"of 0x%jx (%ju) byte%s...",
1116 			aligned ? "" : "un", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1117 		mach_vm_address_t address = get_fixed_address(size);
1118 		if (!aligned) {
1119 			address++;
1120 		}
1121 		set_vm_address(address);
1122 		logv("Found %saligned fixed address 0x%jx.", aligned ? "" : "un", (uintmax_t)address);
1123 	} else {
1124 		/* mach_vm_map() with VM_FLAGS_ANYWHERE starts looking for
1125 		 *  an address at the one supplied and goes up, without
1126 		 *  wrapping around. */
1127 		set_vm_address(0x0);
1128 	}
1129 }
1130 
1131 void
set_up_vm_size()1132 set_up_vm_size()
1133 {
1134 	T_QUIET; T_ASSERT_TRUE(sizes_idx >= 0 && sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", sizes_idx);
1135 	set_vm_size(vm_sizes[sizes_idx].size);
1136 }
1137 
1138 void
set_up_buffer_size()1139 set_up_buffer_size()
1140 {
1141 	T_QUIET; T_ASSERT_TRUE(buffer_sizes_idx >= 0 && buffer_sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", buffer_sizes_idx);
1142 	set_buffer_size(vm_sizes[buffer_sizes_idx].size);
1143 }
1144 
1145 void
set_up_buffer_offset()1146 set_up_buffer_offset()
1147 {
1148 	T_QUIET; T_ASSERT_TRUE(offsets_idx >= 0 && offsets_idx < numofoffsets, "Invalid buffer_offsets[] index: %d.", offsets_idx);
1149 	set_buffer_offset(buffer_offsets[offsets_idx].offset);
1150 }
1151 
1152 void
set_up_vmcopy_action()1153 set_up_vmcopy_action()
1154 {
1155 	T_QUIET; T_ASSERT_TRUE(vmcopy_action_idx >= 0 && vmcopy_action_idx < numofvmcopyactions, "Invalid vmcopy_actions[] index: %d.",
1156 	    vmcopy_action_idx);
1157 	set_vmcopy_post_action(vmcopy_actions[vmcopy_action_idx].action);
1158 }
1159 
1160 void
set_up_allocator_and_vm_size()1161 set_up_allocator_and_vm_size()
1162 {
1163 	set_up_allocator();
1164 	set_up_vm_size();
1165 }
1166 
1167 void
set_up_vm_variables()1168 set_up_vm_variables()
1169 {
1170 	set_up_vm_size();
1171 	set_up_vm_address(get_vm_size());
1172 }
1173 
1174 void
set_up_allocator_and_vm_variables()1175 set_up_allocator_and_vm_variables()
1176 {
1177 	set_up_allocator();
1178 	set_up_vm_variables();
1179 }
1180 
1181 void
set_up_buffer_variables()1182 set_up_buffer_variables()
1183 {
1184 	set_up_buffer_size();
1185 	set_up_buffer_offset();
1186 }
1187 
1188 void
set_up_copy_shared_mode_variables()1189 set_up_copy_shared_mode_variables()
1190 {
1191 	set_up_vmcopy_action();
1192 }
1193 
1194 /*******************************/
1195 /* Allocation set up functions */
1196 /*******************************/
1197 
1198 static void
log_allocation(mach_vm_size_t size,int flags,mach_vm_address_t address,const char * message)1199 log_allocation(mach_vm_size_t size, int flags, mach_vm_address_t address, const char *message)
1200 {
1201 	if (flags & VM_FLAGS_ANYWHERE) {
1202 		logv("Allocating 0x%jx (%ju) byte%s %s...",
1203 		    (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", message);
1204 	} else {
1205 		logv("Allocating 0x%jx (%ju) byte%s at address 0x%jx %s...",
1206 		    (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address, message);
1207 	}
1208 }
1209 
1210 /* Allocate VM region of given size. */
1211 void
allocate(mach_vm_size_t size)1212 allocate(mach_vm_size_t size)
1213 {
1214 	mach_vm_address_t address = get_vm_address();
1215 	int flag                  = get_address_flag();
1216 
1217 	log_allocation(size, flag, address, "");
1218 	assert_allocate_success(&address, size, flag);
1219 	logv(
1220 		"Memory of rounded size 0x%jx (%ju) allocated at "
1221 		"address 0x%jx.",
1222 		(uintmax_t)round_page(size), (uintmax_t)round_page(size), (uintmax_t)address);
1223 	/* Fixed allocation address is truncated to the allocator
1224 	 *  boundary. */
1225 	if (!(flag & VM_FLAGS_ANYWHERE)) {
1226 		mach_vm_address_t old_address = get_vm_address();
1227 		assert_trunc_address(old_address, address);
1228 		logv(
1229 			"Address 0x%jx is correctly truncated to allocated "
1230 			"address 0x%jx.",
1231 			(uintmax_t)old_address, (uintmax_t)address);
1232 	}
1233 	set_vm_address(address);
1234 	set_already_deallocated_vm_page(0x0);
1235 }
1236 
1237 void
allocate_buffer(mach_vm_size_t buffer_size)1238 allocate_buffer(mach_vm_size_t buffer_size)
1239 {
1240 	mach_vm_address_t data = 0x0;
1241 
1242 	log_allocation(buffer_size, VM_FLAGS_ANYWHERE, 0, "");
1243 	assert_allocate_success(&data, buffer_size, VM_FLAGS_ANYWHERE);
1244 	logv(
1245 		"Memory of rounded size 0x%jx (%ju) allocated at "
1246 		"address 0x%jx.",
1247 		(uintmax_t)round_page(buffer_size), (uintmax_t)round_page(buffer_size), (uintmax_t)data);
1248 	data += get_buffer_offset();
1249 	T_QUIET; T_ASSERT_EQ((mach_vm_address_t)(vm_offset_t)data, data,
1250 	    "Address 0x%jx "
1251 	    "unexpectedly overflows to 0x%jx when cast as "
1252 	    "vm_offset_t type.",
1253 	    (uintmax_t)data, (uintmax_t)(vm_offset_t)data);
1254 	set_buffer_address(data);
1255 	set_already_deallocated_buffer_page(0x0);
1256 }
1257 
1258 /****************************************************/
1259 /* Global variables and allocation set up functions */
1260 /****************************************************/
1261 
1262 void
set_up_vm_variables_and_allocate()1263 set_up_vm_variables_and_allocate()
1264 {
1265 	set_up_vm_variables();
1266 	allocate(get_vm_size());
1267 }
1268 
1269 void
set_up_allocator_and_vm_variables_and_allocate()1270 set_up_allocator_and_vm_variables_and_allocate()
1271 {
1272 	set_up_allocator();
1273 	set_up_vm_variables_and_allocate();
1274 }
1275 
1276 void
set_up_vm_variables_and_allocate_extra_page()1277 set_up_vm_variables_and_allocate_extra_page()
1278 {
1279 	set_up_vm_size();
1280 	/* Increment the size to insure we get an extra allocated page
1281 	 *  for unaligned start addresses. */
1282 	mach_vm_size_t allocation_size = get_vm_size() + 1;
1283 	set_up_vm_address(allocation_size);
1284 
1285 	allocate(allocation_size);
1286 	/* In the fixed unaligned address case, restore the returned
1287 	*  (truncated) allocation address to its unaligned value. */
1288 	if (!get_address_alignment()) {
1289 		set_vm_address(get_vm_address() + 1);
1290 	}
1291 }
1292 
1293 void
set_up_buffer_variables_and_allocate_extra_page()1294 set_up_buffer_variables_and_allocate_extra_page()
1295 {
1296 	set_up_buffer_variables();
1297 	/* Increment the size to insure we get an extra allocated page
1298 	 *  for unaligned start addresses. */
1299 	allocate_buffer(get_buffer_size() + get_buffer_offset());
1300 }
1301 
1302 /* Allocate some destination and buffer memory for subsequent
1303  * writing, including extra pages for non-aligned start addresses. */
1304 void
set_up_vm_and_buffer_variables_allocate_for_writing()1305 set_up_vm_and_buffer_variables_allocate_for_writing()
1306 {
1307 	set_up_vm_variables_and_allocate_extra_page();
1308 	set_up_buffer_variables_and_allocate_extra_page();
1309 }
1310 
1311 /* Allocate some destination and source regions for subsequent
1312  * copying, including extra pages for non-aligned start addresses. */
1313 void
set_up_vm_and_buffer_variables_allocate_for_copying()1314 set_up_vm_and_buffer_variables_allocate_for_copying()
1315 {
1316 	set_up_vm_and_buffer_variables_allocate_for_writing();
1317 }
1318 
1319 /************************************/
1320 /* Deallocation tear down functions */
1321 /************************************/
1322 
1323 void
deallocate_range(mach_vm_address_t address,mach_vm_size_t size)1324 deallocate_range(mach_vm_address_t address, mach_vm_size_t size)
1325 {
1326 	logv("Deallocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1327 	    (uintmax_t)address);
1328 	assert_deallocate_success(address, size);
1329 }
1330 
1331 /*
1332  * Same as deallocate_range, buf if already_deallocated_address
1333  * is not zero then that page of memory is not deallocated.
1334  */
1335 void
deallocate_range_except_page(mach_vm_address_t address,mach_vm_size_t size,mach_vm_address_t already_deallocated_address)1336 deallocate_range_except_page(mach_vm_address_t address, mach_vm_size_t size,
1337     mach_vm_address_t already_deallocated_address)
1338 {
1339 	if (already_deallocated_address != 0) {
1340 		mach_vm_address_t end = mach_vm_round_page(address + size);
1341 		mach_vm_address_t already_deallocated_end = already_deallocated_address + vm_page_size;
1342 		deallocate_range(address, already_deallocated_address - address);
1343 		logv("Skipping already-deallocated page at 0x%jx (%ju bytes)",
1344 		    (uintmax_t)already_deallocated_address, (uintmax_t)vm_page_size);
1345 		deallocate_range(already_deallocated_end, end - already_deallocated_end);
1346 	} else {
1347 		deallocate_range(address, size);
1348 	}
1349 }
1350 
1351 void
deallocate()1352 deallocate()
1353 {
1354 	deallocate_range_except_page(get_vm_address(), get_vm_size(), get_already_deallocated_vm_page());
1355 	set_already_deallocated_vm_page(0x0);
1356 }
1357 
1358 /* Deallocate source memory, including the extra page for unaligned
1359  * start addresses. */
1360 void
deallocate_extra_page()1361 deallocate_extra_page()
1362 {
1363 	/* Set the address and size to their original allocation
1364 	 *  values. */
1365 	deallocate_range_except_page(
1366 		mach_vm_trunc_page(get_vm_address()),
1367 		get_vm_size() + 1,
1368 		get_already_deallocated_vm_page());
1369 	set_already_deallocated_vm_page(0x0);
1370 }
1371 
1372 /* Deallocate buffer and destination memory for mach_vm_write(),
1373  * including the extra page for unaligned start addresses. */
1374 void
deallocate_vm_and_buffer()1375 deallocate_vm_and_buffer()
1376 {
1377 	deallocate_range_except_page(
1378 		mach_vm_trunc_page(get_vm_address()),
1379 		get_vm_size() + 1,
1380 		get_already_deallocated_vm_page());
1381 	set_already_deallocated_vm_page(0x0);
1382 
1383 	deallocate_range_except_page(
1384 		mach_vm_trunc_page(get_buffer_address()),
1385 		get_buffer_size() + get_buffer_offset(),
1386 		get_already_deallocated_buffer_page());
1387 	set_already_deallocated_buffer_page(0x0);
1388 }
1389 
1390 /*
1391  * Deallocate vm_page_size bytes within the source memory.
1392  * Later deallocate() or deallocate_extra_page() or deallocate_vm_and_buffer()
1393  * will not deallocate it again.
1394  */
1395 void
deallocate_vm_page_early(mach_vm_address_t address)1396 deallocate_vm_page_early(mach_vm_address_t address)
1397 {
1398 	mach_vm_address_t vm_start = mach_vm_trunc_page(get_vm_address());
1399 	mach_vm_address_t vm_end = mach_vm_round_page(vm_start + get_vm_size() + 1);
1400 	T_QUIET; T_ASSERT_EQ(get_already_deallocated_vm_page(), 0ull,
1401 	    "deallocate_vm_page_early can only be used once per test");
1402 	T_QUIET; T_ASSERT_EQ(address, mach_vm_trunc_page(address),
1403 	    "deallocate_vm_page_early address must be page aligned");
1404 	T_QUIET; T_ASSERT_TRUE(address >= vm_start && address + vm_page_size <= vm_end,
1405 	    "deallocate_vm_page_early address must be within source memory");
1406 
1407 	assert_deallocate_success(address, vm_page_size);
1408 	set_already_deallocated_vm_page(address);
1409 }
1410 
1411 /*
1412  * Deallocate vm_page_size bytes within the mach_vm_write() buffer.
1413  * Later deallocate_vm_and_buffer() will not deallocate it again.
1414  */
1415 void
deallocate_buffer_page_early(mach_vm_address_t address)1416 deallocate_buffer_page_early(mach_vm_address_t address)
1417 {
1418 	mach_vm_address_t buffer_start = mach_vm_trunc_page(get_buffer_address());
1419 	mach_vm_address_t buffer_end = mach_vm_round_page(buffer_start + get_buffer_size() + get_buffer_offset());
1420 	T_QUIET; T_ASSERT_EQ(get_already_deallocated_buffer_page(), 0ull,
1421 	    "deallocate_buffer_page_early can only be used once per test");
1422 	T_QUIET; T_ASSERT_EQ(address, mach_vm_trunc_page(address),
1423 	    "deallocate_buffer_page_early address must be page aligned");
1424 	T_QUIET; T_ASSERT_TRUE(address >= buffer_start && address + vm_page_size <= buffer_end,
1425 	    "deallocate_buffer_page_early address must be within buffer memory");
1426 
1427 	assert_deallocate_success(address, vm_page_size);
1428 	set_already_deallocated_buffer_page(address);
1429 }
1430 
1431 /***********************************/
1432 /* mach_vm_read() set up functions */
1433 /***********************************/
1434 
1435 /* Read the source memory into a buffer, deallocate the source, set
1436  * the global address and size from the buffer's. */
1437 void
read_deallocate()1438 read_deallocate()
1439 {
1440 	mach_vm_size_t size       = get_vm_size();
1441 	mach_vm_address_t address = get_vm_address();
1442 	vm_offset_t read_address;
1443 	mach_msg_type_number_t read_size;
1444 
1445 	logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1446 	    (uintmax_t)address);
1447 	assert_read_success(address, size, &read_address, &read_size);
1448 	logv(
1449 		"Memory of size 0x%jx (%ju) read into buffer of "
1450 		"address 0x%jx.",
1451 		(uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)read_address);
1452 	/* Deallocate the originally allocated memory, including the
1453 	 *  extra allocated page in
1454 	 *  set_up_vm_variables_and_allocate_extra_page(). */
1455 	deallocate_range(mach_vm_trunc_page(address), size + 1);
1456 
1457 	/* Promoting to mach_vm types after checking for overflow, and
1458 	 *  setting the global address from the buffer's. */
1459 	T_QUIET; T_ASSERT_EQ((vm_offset_t)(mach_vm_address_t)read_address, read_address,
1460 	    "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1461 	    "as mach_vm_address_t type.",
1462 	    (uintmax_t)read_address, (uintmax_t)(mach_vm_address_t)read_address);
1463 	T_QUIET; T_ASSERT_EQ((mach_msg_type_number_t)(mach_vm_size_t)read_size, read_size,
1464 	    "Size 0x%jx (%ju) unexpectedly overflows to 0x%jx (%ju) "
1465 	    "when cast as mach_vm_size_t type.",
1466 	    (uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)(mach_vm_size_t)read_size, (uintmax_t)(mach_vm_size_t)read_size);
1467 	set_vm_address((mach_vm_address_t)read_address);
1468 	set_vm_size((mach_vm_size_t)read_size);
1469 }
1470 
1471 /* Allocate some source memory, read it into a buffer, deallocate the
1472  * source, set the global address and size from the buffer's. */
1473 void
set_up_vm_variables_allocate_read_deallocate()1474 set_up_vm_variables_allocate_read_deallocate()
1475 {
1476 	set_up_vm_variables_and_allocate_extra_page();
1477 	read_deallocate();
1478 }
1479 
1480 /************************************/
1481 /* mach_vm_write() set up functions */
1482 /************************************/
1483 
1484 /* Write the buffer into the destination memory. */
1485 void
write_buffer()1486 write_buffer()
1487 {
1488 	mach_vm_address_t address          = get_vm_address();
1489 	vm_offset_t data                   = (vm_offset_t)get_buffer_address();
1490 	mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
1491 
1492 	logv(
1493 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
1494 		"memory at address 0x%jx...",
1495 		(uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
1496 	assert_write_success(address, data, buffer_size);
1497 	logv("Buffer written.");
1498 }
1499 
1500 /* Allocate some destination and buffer memory, and write the buffer
1501  * into the destination memory. */
1502 void
set_up_vm_and_buffer_variables_allocate_write()1503 set_up_vm_and_buffer_variables_allocate_write()
1504 {
1505 	set_up_vm_and_buffer_variables_allocate_for_writing();
1506 	write_buffer();
1507 }
1508 
1509 /***********************************/
1510 /* mach_vm_copy() set up functions */
1511 /***********************************/
1512 
1513 void
copy_deallocate(void)1514 copy_deallocate(void)
1515 {
1516 	mach_vm_size_t size      = get_vm_size();
1517 	mach_vm_address_t source = get_vm_address();
1518 	mach_vm_address_t dest   = 0;
1519 
1520 	logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1521 	    (uintmax_t)source);
1522 	assert_allocate_copy_success(source, size, &dest);
1523 	logv(
1524 		"Memory of size 0x%jx (%ju) copy into region of "
1525 		"address 0x%jx.",
1526 		(uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
1527 	/* Deallocate the originally allocated memory, including the
1528 	 *  extra allocated page in
1529 	 *  set_up_vm_variables_and_allocate_extra_page(). */
1530 	deallocate_range(mach_vm_trunc_page(source), size + 1);
1531 	/* Promoting to mach_vm types after checking for overflow, and
1532 	 *  setting the global address from the buffer's. */
1533 	T_QUIET; T_ASSERT_EQ((mach_vm_address_t)(vm_offset_t)dest, dest,
1534 	    "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1535 	    "as mach_vm_address_t type.",
1536 	    (uintmax_t)dest, (uintmax_t)(vm_offset_t)dest);
1537 	set_vm_address(dest);
1538 	set_vm_size(size);
1539 }
1540 
1541 /* Copy the source region into the destination region. */
1542 void
copy_region()1543 copy_region()
1544 {
1545 	mach_vm_address_t source    = get_vm_address();
1546 	mach_vm_address_t dest      = get_buffer_address();
1547 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
1548 
1549 	logv(
1550 		"Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
1551 		"memory at address 0x%jx...",
1552 		(uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
1553 	assert_copy_success(source, size, dest);
1554 	logv("Buffer written.");
1555 }
1556 
1557 /* Allocate some source memory, copy it to another region, deallocate the
1558 * source, set the global address and size from the designation region. */
1559 void
set_up_vm_variables_allocate_copy_deallocate()1560 set_up_vm_variables_allocate_copy_deallocate()
1561 {
1562 	set_up_vm_variables_and_allocate_extra_page();
1563 	copy_deallocate();
1564 }
1565 
1566 /* Allocate some destination and source memory, and copy the source
1567  * into the destination memory. */
1568 void
set_up_source_and_dest_variables_allocate_copy()1569 set_up_source_and_dest_variables_allocate_copy()
1570 {
1571 	set_up_vm_and_buffer_variables_allocate_for_copying();
1572 	copy_region();
1573 }
1574 
1575 /**************************************/
1576 /* mach_vm_protect() set up functions */
1577 /**************************************/
1578 
1579 void
set_up_vm_variables_allocate_protect(vm_prot_t protection,const char * protection_name)1580 set_up_vm_variables_allocate_protect(vm_prot_t protection, const char * protection_name)
1581 {
1582 	set_up_vm_variables_and_allocate_extra_page();
1583 	mach_vm_size_t size       = get_vm_size();
1584 	mach_vm_address_t address = get_vm_address();
1585 
1586 	logv(
1587 		"Setting %s-protection on 0x%jx (%ju) byte%s at address "
1588 		"0x%jx...",
1589 		protection_name, (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
1590 	assert_mach_success(mach_vm_protect(mach_task_self(), address, size, FALSE, protection), "mach_vm_protect()");
1591 	logv("Region %s-protected.", protection_name);
1592 }
1593 
1594 void
set_up_vm_variables_allocate_readprotect()1595 set_up_vm_variables_allocate_readprotect()
1596 {
1597 	set_up_vm_variables_allocate_protect(VM_PROT_WRITE, "read");
1598 }
1599 
1600 void
set_up_vm_variables_allocate_writeprotect()1601 set_up_vm_variables_allocate_writeprotect()
1602 {
1603 	set_up_vm_variables_allocate_protect(VM_PROT_READ, "write");
1604 }
1605 
1606 /*****************/
1607 /* Address tests */
1608 /*****************/
1609 
1610 /* Allocated address is nonzero iff size is nonzero. */
1611 void
test_nonzero_address_iff_nonzero_size()1612 test_nonzero_address_iff_nonzero_size()
1613 {
1614 	mach_vm_address_t address = get_vm_address();
1615 	mach_vm_size_t size       = get_vm_size();
1616 
1617 	T_QUIET; T_ASSERT_TRUE((address && size) || (!address && !size), "Address 0x%jx is unexpectedly %szero.", (uintmax_t)address,
1618 	    address ? "non" : "");
1619 	logv("Address 0x%jx is %szero as expected.", (uintmax_t)address, size ? "non" : "");
1620 }
1621 
1622 /* Allocated address is aligned. */
1623 void
test_aligned_address()1624 test_aligned_address()
1625 {
1626 	mach_vm_address_t address = get_vm_address();
1627 
1628 	assert_aligned_address(address);
1629 	logv("Address 0x%jx is aligned.", (uintmax_t)address);
1630 }
1631 
1632 /************************/
1633 /* Read and write tests */
1634 /************************/
1635 
1636 void
verify_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1637 verify_pattern(
1638 	address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1639 {
1640 	logv(
1641 		"Verifying %s pattern on region of address 0x%jx "
1642 		"and size 0x%jx (%ju)...",
1643 		pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1644 	filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address);
1645 	logv("Pattern verified.");
1646 }
1647 
1648 void
write_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1649 write_pattern(
1650 	address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1651 {
1652 	logv(
1653 		"Writing %s pattern on region of address 0x%jx "
1654 		"and size 0x%jx (%ju)...",
1655 		pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1656 	filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address);
1657 	logv("Pattern written.");
1658 }
1659 
1660 void
write_and_verify_pattern(address_filter_t filter,boolean_t reversed,mach_vm_address_t address,mach_vm_size_t size,const char * pattern_name)1661 write_and_verify_pattern(
1662 	address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1663 {
1664 	logv(
1665 		"Writing and verifying %s pattern on region of "
1666 		"address 0x%jx and size 0x%jx (%ju)...",
1667 		pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1668 	filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address);
1669 	filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address);
1670 	logv("Pattern written and verified.");
1671 }
1672 
1673 /* Verify that the smallest aligned region containing the
1674  * given range is zero-filled. */
1675 void
test_zero_filled()1676 test_zero_filled()
1677 {
1678 	verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), aligned_size(get_vm_address(), get_vm_size()),
1679 	    "zero-filled");
1680 }
1681 
1682 void
test_write_address_filled()1683 test_write_address_filled()
1684 {
1685 	write_and_verify_pattern(empty, TRUE, get_vm_address(), round_page(get_vm_size()), "address-filled");
1686 }
1687 
1688 void
test_write_checkerboard()1689 test_write_checkerboard()
1690 {
1691 	write_and_verify_pattern(checkerboard, FALSE, get_vm_address(), round_page(get_vm_size()), "checkerboard");
1692 }
1693 
1694 void
test_write_reverse_checkerboard()1695 test_write_reverse_checkerboard()
1696 {
1697 	write_and_verify_pattern(checkerboard, TRUE, get_vm_address(), round_page(get_vm_size()), "reverse checkerboard");
1698 }
1699 
1700 void
test_write_page_ends()1701 test_write_page_ends()
1702 {
1703 	write_and_verify_pattern(page_ends, FALSE, get_vm_address(), round_page(get_vm_size()), "page ends");
1704 }
1705 
1706 void
test_write_page_interiors()1707 test_write_page_interiors()
1708 {
1709 	write_and_verify_pattern(page_ends, TRUE, get_vm_address(), round_page(get_vm_size()), "page interiors");
1710 }
1711 
1712 /*********************************/
1713 /* Allocation error return tests */
1714 /*********************************/
1715 
1716 /* Reallocating a page in the smallest aligned region containing the
1717  * given allocated range fails. */
1718 void
test_reallocate_pages()1719 test_reallocate_pages()
1720 {
1721 	allocate_fn_t allocator   = get_allocator();
1722 	vm_map_t this_task        = mach_task_self();
1723 	mach_vm_address_t address = mach_vm_trunc_page(get_vm_address());
1724 	mach_vm_size_t size       = aligned_size(get_vm_address(), get_vm_size());
1725 	mach_vm_address_t i;
1726 	kern_return_t kr;
1727 
1728 	logv(
1729 		"Reallocating pages in allocated region of address 0x%jx "
1730 		"and size 0x%jx (%ju)...",
1731 		(uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1732 	for (i = address; i < address + size; i += vm_page_size) {
1733 		kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED);
1734 		assert_mach_return(kr, KERN_NO_SPACE,
1735 		    "Allocator at address 0x%jx expected KERN_NO_SPACE",
1736 		    (uintmax_t)address);
1737 	}
1738 	logv("Returned expected error at each page: %s.", mach_error_string(KERN_NO_SPACE));
1739 }
1740 
1741 /* Allocating in VM_MAP_NULL fails. */
1742 void
test_allocate_in_null_map()1743 test_allocate_in_null_map()
1744 {
1745 	mach_vm_address_t address = get_vm_address();
1746 	mach_vm_size_t size       = get_vm_size();
1747 	int flag                  = get_address_flag();
1748 
1749 	log_allocation(size, flag, address, "in NULL VM map");
1750 	assert_mach_return(get_allocator()(VM_MAP_NULL, &address, size, flag), MACH_SEND_INVALID_DEST, "Allocator");
1751 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
1752 }
1753 
1754 /* Allocating with non-user flags fails. */
1755 void
test_allocate_with_kernel_flags()1756 test_allocate_with_kernel_flags()
1757 {
1758 	allocate_fn_t allocator   = get_allocator();
1759 	vm_map_t this_task        = mach_task_self();
1760 	mach_vm_address_t address = get_vm_address();
1761 	mach_vm_size_t size       = get_vm_size();
1762 	int flag                  = get_address_flag();
1763 	int bad_flag, i;
1764 	kern_return_t kr;
1765 	int valid_flags = VM_FLAGS_USER_ALLOCATE | VM_FLAGS_USER_MAP | VM_FLAGS_USER_REMAP | VM_FLAGS_ALIAS_MASK;
1766 
1767 	log_allocation(size, flag, address, "with various invalid flags");
1768 	for (i = 0; i < sizeof(int) * 8; i++) {
1769 		int test_flag = 1 << i;
1770 
1771 		/* Skip user valid flags */
1772 		if (valid_flags & test_flag) {
1773 			continue;
1774 		}
1775 
1776 		bad_flag = test_flag | flag;
1777 		kr = allocator(this_task, &address, size, bad_flag);
1778 		assert_mach_return(kr, KERN_INVALID_ARGUMENT,
1779 		    "Allocator with invalid flag 0x%x expected KERN_INVALID_ARGUMENT.",
1780 		    bad_flag);
1781 	}
1782 	logv("Returned expected error with each invalid flag: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1783 }
1784 
1785 /* Allocating super-page with incompatible flags fails. */
1786 void
test_allocate_superpage_with_incompatible_flags()1787 test_allocate_superpage_with_incompatible_flags()
1788 {
1789 }
1790 
1791 /*****************************/
1792 /* mach_vm_map() error tests */
1793 /*****************************/
1794 
1795 /* mach_vm_map() fails with invalid protection or inheritance
1796  *  arguments. */
1797 void
test_mach_vm_map_protection_inheritance_error()1798 test_mach_vm_map_protection_inheritance_error()
1799 {
1800 	kern_return_t kr;
1801 	vm_map_t my_task          = mach_task_self();
1802 	mach_vm_address_t address = get_vm_address();
1803 	mach_vm_size_t size       = get_vm_size();
1804 	vm_map_offset_t mask = (get_allocator() == wrapper_mach_vm_map || get_allocator() == wrapper_mach_vm_map_named_entry)
1805 	    ? (mach_vm_offset_t)0
1806 	    : (mach_vm_offset_t)get_mask();
1807 	int flag                    = get_address_flag();
1808 	mach_port_t object_handle   = MACH_PORT_NULL;
1809 	vm_prot_t cur_protections[] = {VM_PROT_DEFAULT, (VM_PROT_ALL | VM_PROT_ALLEXEC) + 1, ~VM_PROT_IS_MASK, INT_MAX};
1810 	vm_prot_t max_protections[] = {VM_PROT_ALL, (VM_PROT_ALL | VM_PROT_ALLEXEC) + 1, ~VM_PROT_IS_MASK, INT_MAX};
1811 	vm_inherit_t inheritances[] = {VM_INHERIT_DEFAULT, VM_INHERIT_LAST_VALID + 1, UINT_MAX};
1812 	int i, j, k;
1813 
1814 	if (get_allocator() == wrapper_mach_vm_map_named_entry) {
1815 		assert_mach_success(memory_entry(&size, &object_handle), "mach_make_memory_entry_64()");
1816 	}
1817 	log_allocation(size, flag, address, "with various invalid protection/inheritance arguments");
1818 
1819 	for (i = 0; i < 4; i++) {
1820 		for (j = 0; j < 4; j++) {
1821 			for (k = 0; k < 3; k++) {
1822 				/* Skip the case with all valid arguments. */
1823 				if (i == (j == (k == 0))) {
1824 					continue;
1825 				}
1826 				kr = mach_vm_map(my_task, &address, size, mask, flag, object_handle, (memory_object_offset_t)0, FALSE,
1827 				    cur_protections[i], max_protections[j], inheritances[k]);
1828 				assert_mach_return(kr, KERN_INVALID_ARGUMENT,
1829 				    "mach_vm_map() "
1830 				    "with cur_protection 0x%x, max_protection 0x%x, "
1831 				    "inheritance 0x%x expected KERN_INVALID_ARGUMENT",
1832 				    cur_protections[i], max_protections[j], inheritances[k]);
1833 			}
1834 		}
1835 	}
1836 	logv("Returned expected error in each case: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1837 }
1838 
1839 /* mach_vm_map() with unspecified address fails if the starting
1840  *  address overflows when rounded up to a boundary value. */
1841 void
test_mach_vm_map_large_mask_overflow_error()1842 test_mach_vm_map_large_mask_overflow_error()
1843 {
1844 	mach_vm_address_t address = 0x1;
1845 	mach_vm_size_t size       = get_vm_size();
1846 	mach_vm_offset_t mask     = (mach_vm_offset_t)UINTMAX_MAX;
1847 	/* mach_vm_map() cannot allocate 0 bytes at an unspecified
1848 	 *  address, see 8003930. */
1849 	kern_return_t kr_expected = size ? KERN_NO_SPACE : KERN_INVALID_ARGUMENT;
1850 
1851 	logv(
1852 		"Allocating 0x%jx (%ju) byte%s at an unspecified address "
1853 		"starting at 0x%jx with mask 0x%jx...",
1854 		(uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address, (uintmax_t)mask);
1855 	assert_mach_return(mach_vm_map(mach_task_self(), &address, size, mask, VM_FLAGS_ANYWHERE, MACH_PORT_NULL,
1856 	    (memory_object_offset_t)0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT),
1857 	    kr_expected, "mach_vm_map()");
1858 	logv("Returned expected error: %s.", mach_error_string(kr_expected));
1859 }
1860 
1861 /************************/
1862 /* Size edge case tests */
1863 /************************/
1864 
1865 void
allocate_edge_size(mach_vm_address_t * address,mach_vm_size_t size,kern_return_t expected_kr)1866 allocate_edge_size(mach_vm_address_t * address, mach_vm_size_t size, kern_return_t expected_kr)
1867 {
1868 	logv("Allocating 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size);
1869 	assert_allocate_return(address, size, VM_FLAGS_ANYWHERE, expected_kr);
1870 	logv("Returned expected value: %s.", mach_error_string(expected_kr));
1871 }
1872 
1873 void
test_allocate_zero_size()1874 test_allocate_zero_size()
1875 {
1876 	mach_vm_address_t address = 0x0;
1877 	/* mach_vm_map() cannot allocate 0 bytes at an unspecified
1878 	 *  address, see 8003930. Other allocators succeed. */
1879 	kern_return_t kr_expected = (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS;
1880 
1881 	allocate_edge_size(&address, 0, kr_expected);
1882 	if (kr_expected == KERN_SUCCESS) {
1883 		deallocate_range(address, 0);
1884 	}
1885 }
1886 
1887 /* Testing the allocation of the largest size that does not overflow
1888  * when rounded up to a page-aligned value. */
1889 void
test_allocate_invalid_large_size()1890 test_allocate_invalid_large_size()
1891 {
1892 	mach_vm_size_t size = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1893 	if (get_allocator() != wrapper_mach_vm_map_named_entry) {
1894 		mach_vm_address_t address = 0x0;
1895 		allocate_edge_size(&address, size, KERN_NO_SPACE);
1896 	} else {
1897 		/* Named entries cannot currently be bigger than 4 GB
1898 		 *  - 4 kb. */
1899 		mach_port_t object_handle = MACH_PORT_NULL;
1900 		logv("Creating named entry of 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size);
1901 		assert_mach_return(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)0,
1902 		    (MAP_MEM_NAMED_CREATE | VM_PROT_ALL), &object_handle, 0),
1903 		    KERN_FAILURE, "mach_make_memory_entry_64()");
1904 		logv("Returned expected error: %s.", mach_error_string(KERN_FAILURE));
1905 	}
1906 }
1907 
1908 /* A UINTMAX_MAX VM size will overflow to 0 when rounded up to a
1909  * page-aligned value. */
1910 void
test_allocate_overflowing_size()1911 test_allocate_overflowing_size()
1912 {
1913 	mach_vm_address_t address = 0x0;
1914 
1915 	allocate_edge_size(&address, (mach_vm_size_t)UINTMAX_MAX, KERN_INVALID_ARGUMENT);
1916 }
1917 
1918 /****************************/
1919 /* Address allocation tests */
1920 /****************************/
1921 
1922 /* Allocation at address zero fails iff size is nonzero. */
1923 void
test_allocate_at_zero()1924 test_allocate_at_zero()
1925 {
1926 	mach_vm_address_t address = 0x0;
1927 	mach_vm_size_t size       = get_vm_size();
1928 
1929 	kern_return_t kr_expected =
1930 	    size ? KERN_INVALID_ADDRESS : (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS;
1931 
1932 	logv("Allocating 0x%jx (%ju) byte%s at address 0x0...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1933 	assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected);
1934 	logv("Returned expected value: %s.", mach_error_string(kr_expected));
1935 	if (kr_expected == KERN_SUCCESS) {
1936 		T_QUIET; T_ASSERT_EQ(address, 0ull,
1937 		    "Address 0x%jx is unexpectedly "
1938 		    "nonzero.",
1939 		    (uintmax_t)address);
1940 		logv("Allocated address 0x%jx is zero.", (uintmax_t)address);
1941 		deallocate_range(address, size);
1942 	}
1943 }
1944 
1945 /* Allocation at page-aligned but 2 MB boundary-unaligned address
1946  *  fails with KERN_NO_SPACE. */
1947 void
test_allocate_2MB_boundary_unaligned_page_aligned_address()1948 test_allocate_2MB_boundary_unaligned_page_aligned_address()
1949 {
1950 	mach_vm_size_t size = get_vm_size();
1951 
1952 	mach_vm_address_t address = get_fixed_address(size + vm_page_size) + vm_page_size;
1953 	logv(
1954 		"Found 2 MB boundary-unaligned, page aligned address "
1955 		"0x%jx.",
1956 		(uintmax_t)address);
1957 
1958 	/* mach_vm_allocate() cannot allocate 0 bytes, and fails with a
1959 	 *  fixed boundary-unaligned truncated address. */
1960 	kern_return_t kr_expected = (!size && get_allocator() != wrapper_mach_vm_allocate)
1961 	    ? KERN_INVALID_ARGUMENT
1962 	    : (get_allocator() == wrapper_mach_vm_map_2MB) ? KERN_NO_SPACE : KERN_SUCCESS;
1963 	logv("Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1964 	    (uintmax_t)address);
1965 	assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected);
1966 	logv("Returned expected value: %s.", mach_error_string(kr_expected));
1967 	if (kr_expected == KERN_SUCCESS) {
1968 		deallocate_range(address, size);
1969 	}
1970 }
1971 
1972 /* With VM_FLAGS_ANYWHERE set, mach_vm_allocate() starts looking for
1973  *  an allocation address at 0x0, while mach_vm_map() starts at the
1974  *  supplied address and does not wrap around. See 8016663. */
1975 void
test_allocate_page_with_highest_address_hint()1976 test_allocate_page_with_highest_address_hint()
1977 {
1978 	/* Highest valid page-aligned address. */
1979 	mach_vm_address_t address = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1980 
1981 	logv(
1982 		"Allocating one page with unspecified address, but hint at "
1983 		"0x%jx...",
1984 		(uintmax_t)address);
1985 	if (get_allocator() == wrapper_mach_vm_allocate) {
1986 		/* mach_vm_allocate() starts from 0x0 and succeeds. */
1987 		assert_allocate_success(&address, vm_page_size, VM_FLAGS_ANYWHERE);
1988 		logv("Memory allocated at address 0x%jx.", (uintmax_t)address);
1989 		assert_aligned_address(address);
1990 		deallocate_range(address, vm_page_size);
1991 	} else {
1992 		/* mach_vm_map() starts from the supplied address, and fails
1993 		 *  with KERN_NO_SPACE, see 8016663. */
1994 		assert_allocate_return(&address, vm_page_size, VM_FLAGS_ANYWHERE, KERN_NO_SPACE);
1995 		logv("Returned expected error: %s.", mach_error_string(KERN_NO_SPACE));
1996 	}
1997 }
1998 
1999 /* Allocators find an allocation address with a first fit strategy. */
2000 void
test_allocate_first_fit_pages()2001 test_allocate_first_fit_pages()
2002 {
2003 	allocate_fn_t allocator    = get_allocator();
2004 	mach_vm_address_t address1 = 0x0;
2005 	mach_vm_address_t i;
2006 	kern_return_t kr;
2007 	vm_map_t this_task = mach_task_self();
2008 
2009 	logv(
2010 		"Looking for first fit address for allocating one "
2011 		"page...");
2012 	assert_allocate_success(&address1, vm_page_size, VM_FLAGS_ANYWHERE);
2013 	logv("Found address 0x%jx.", (uintmax_t)address1);
2014 	assert_aligned_address(address1);
2015 	mach_vm_address_t address2 = address1;
2016 	logv(
2017 		"Looking for next higher first fit address for allocating "
2018 		"one page...");
2019 	assert_allocate_success(&address2, vm_page_size, VM_FLAGS_ANYWHERE);
2020 	logv("Found address 0x%jx.", (uintmax_t)address2);
2021 	assert_aligned_address(address2);
2022 	T_QUIET; T_ASSERT_GT(address2, address1,
2023 	    "Second address 0x%jx is "
2024 	    "unexpectedly not higher than first address 0x%jx.",
2025 	    (uintmax_t)address2, (uintmax_t)address1);
2026 
2027 	logv("Allocating pages between 0x%jx and 0x%jx...", (uintmax_t)address1, (uintmax_t)address2);
2028 	for (i = address1; i <= address2; i += vm_page_size) {
2029 		kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED);
2030 		assert_mach_failure(kr,
2031 		    "Allocator at address 0x%jx "
2032 		    "unexpectedly succeeded.",
2033 		    (uintmax_t)i);
2034 	}
2035 	logv("Expectedly returned error at each page.");
2036 	deallocate_range(address1, vm_page_size);
2037 	deallocate_range(address2, vm_page_size);
2038 }
2039 
2040 /*******************************/
2041 /* Deallocation segfault tests */
2042 /*******************************/
2043 
2044 /* mach_vm_deallocate() deallocates the smallest aligned region
2045  * (integral number of pages) containing the given range. */
2046 
2047 /* Addresses in deallocated range are inaccessible. */
2048 void
access_deallocated_range_address(mach_vm_address_t address,const char * position)2049 access_deallocated_range_address(mach_vm_address_t address, const char * position)
2050 {
2051 	logv("Will deallocate and read from %s 0x%jx of deallocated range...", position, (uintmax_t)address);
2052 	deallocate();
2053 	mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address);
2054 	T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx. "
2055 	    "Should have died with signal SIGSEGV.",
2056 	    (uintmax_t)bad_value, (uintmax_t)address);
2057 }
2058 
2059 /* Start of deallocated range is inaccessible. */
2060 void
test_access_deallocated_range_start()2061 test_access_deallocated_range_start()
2062 {
2063 	access_deallocated_range_address(get_vm_address(), "start");
2064 }
2065 
2066 /* Middle of deallocated range is inaccessible. */
2067 void
test_access_deallocated_range_middle()2068 test_access_deallocated_range_middle()
2069 {
2070 	access_deallocated_range_address(get_vm_address() + (round_page(get_vm_size()) >> 1), "middle");
2071 }
2072 
2073 /* End of deallocated range is inaccessible. */
2074 void
test_access_deallocated_range_end()2075 test_access_deallocated_range_end()
2076 {
2077 	access_deallocated_range_address(round_page(get_vm_size()) - vm_address_size + get_vm_address(), "end");
2078 }
2079 
2080 /* Deallocating almost the whole address space causes a SIGSEGV or SIGBUS. We
2081  * deallocate the largest valid aligned size to avoid overflowing when
2082  * rounding up. */
2083 void
test_deallocate_suicide()2084 test_deallocate_suicide()
2085 {
2086 	mach_vm_address_t address = 0x0;
2087 	mach_vm_size_t size       = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
2088 
2089 	logv("Deallocating 0x%jx (%ju) bytes at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (uintmax_t)address);
2090 	kern_return_t kr = mach_vm_deallocate(mach_task_self(), address, size);
2091 	T_ASSERT_FAIL("mach_vm_deallocate() with address 0x%jx and "
2092 	    "size 0x%jx (%ju) unexpectedly returned: %s. "
2093 	    "Should have died with signal SIGSEGV or SIGBUS.",
2094 	    (uintmax_t)address, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr));
2095 }
2096 
2097 /***************************************/
2098 /* Deallocation and reallocation tests */
2099 /***************************************/
2100 
2101 /* Deallocating memory twice succeeds. */
2102 void
test_deallocate_twice()2103 test_deallocate_twice()
2104 {
2105 	deallocate();
2106 	deallocate();
2107 }
2108 
2109 /* Deallocated and reallocated memory is zero-filled. Deallocated
2110  * memory is inaccessible since it can be reallocated. */
2111 void
test_write_pattern_deallocate_reallocate_zero_filled()2112 test_write_pattern_deallocate_reallocate_zero_filled()
2113 {
2114 	mach_vm_address_t address = get_vm_address();
2115 	mach_vm_size_t size       = get_vm_size();
2116 
2117 	write_pattern(page_ends, FALSE, address, size, "page ends");
2118 	logv("Deallocating, then Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2119 	    (uintmax_t)address);
2120 	deallocate();
2121 	assert_allocate_success(&address, size, VM_FLAGS_FIXED);
2122 	logv("Memory allocated.");
2123 	verify_pattern(empty, FALSE, address, size, "zero-filled");
2124 	deallocate();
2125 }
2126 
2127 /********************************/
2128 /* Deallocation edge case tests */
2129 /********************************/
2130 
2131 /* Zero size deallocation always succeeds. */
2132 void
test_deallocate_zero_size_ranges()2133 test_deallocate_zero_size_ranges()
2134 {
2135 	int i;
2136 	kern_return_t kr;
2137 	vm_map_t this_task            = mach_task_self();
2138 	mach_vm_address_t addresses[] = {0x0,
2139 		                         0x1,
2140 		                         vm_page_size - 1,
2141 		                         vm_page_size,
2142 		                         vm_page_size + 1,
2143 		                         (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2144 		                         (mach_vm_address_t)UINT_MAX,
2145 		                         (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2146 		                         (mach_vm_address_t)UINTMAX_MAX};
2147 	int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2148 
2149 	logv("Deallocating 0x0 (0) bytes at various addresses...");
2150 	for (i = 0; i < numofaddresses; i++) {
2151 		kr = mach_vm_deallocate(this_task, addresses[i], 0);
2152 		assert_mach_success(kr, "mach_vm_deallocate() at "
2153 		    "address 0x%jx unexpectedly failed",
2154 		    (uintmax_t)addresses[i]);
2155 	}
2156 	logv("Deallocations successful.");
2157 }
2158 
2159 /* Deallocating a range wrapped around the address space fails. */
2160 void
test_deallocate_wrapped_around_ranges()2161 test_deallocate_wrapped_around_ranges()
2162 {
2163 	int i;
2164 	kern_return_t kr;
2165 	vm_map_t this_task = mach_task_self();
2166 	struct {
2167 		mach_vm_address_t address;
2168 		mach_vm_size_t size;
2169 	} ranges[] = {
2170 		{0x1, (mach_vm_size_t)UINTMAX_MAX},
2171 		{vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
2172 		{(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2173 		{(mach_vm_address_t)UINTMAX_MAX, 1},
2174 	};
2175 	int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2176 
2177 	logv(
2178 		"Deallocating various memory ranges wrapping around the "
2179 		"address space...");
2180 
2181 	for (i = 0; i < numofranges; i++) {
2182 		kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size);
2183 		assert_mach_return(kr, KERN_INVALID_ARGUMENT,
2184 		    "mach_vm_deallocate() with address 0x%jx and size "
2185 		    "0x%jx (%ju) expected KERN_INVALID_ARGUMENT",
2186 		    (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size,
2187 		    (uintmax_t)ranges[i].size);
2188 	}
2189 	logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
2190 }
2191 
2192 /* Deallocating in VM_MAP_NULL fails. */
2193 void
test_deallocate_in_null_map()2194 test_deallocate_in_null_map()
2195 {
2196 	mach_vm_address_t address = get_vm_address();
2197 	mach_vm_size_t size       = get_vm_size();
2198 
2199 	logv("Deallocating 0x%jx (%ju) byte%s at address 0x%jx in NULL VM map...",
2200 	    (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
2201 	assert_mach_return(mach_vm_deallocate(VM_MAP_NULL, address, size), MACH_SEND_INVALID_DEST, "mach_vm_deallocate()");
2202 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2203 }
2204 
2205 /*****************************/
2206 /* mach_vm_read() main tests */
2207 /*****************************/
2208 
2209 /* Read memory of size less than a page has aligned starting
2210  * address. Otherwise, the destination buffer's starting address has
2211  * the same boundary offset as the source region's. */
2212 void
test_read_address_offset()2213 test_read_address_offset()
2214 {
2215 	mach_vm_address_t address = get_vm_address();
2216 	mach_vm_size_t size       = get_vm_size();
2217 
2218 	if (size < vm_page_size * 2 || get_address_alignment()) {
2219 		assert_aligned_address(address);
2220 		logv("Buffer address 0x%jx is aligned as expected.", (uintmax_t)address);
2221 	} else {
2222 		T_QUIET; T_ASSERT_EQ(((address - 1) & (vm_page_size - 1)), 0ull,
2223 		    "Buffer "
2224 		    "address 0x%jx does not have the expected boundary "
2225 		    "offset of 1.",
2226 		    (uintmax_t)address);
2227 		logv(
2228 			"Buffer address 0x%jx has the expected boundary "
2229 			"offset of 1.",
2230 			(uintmax_t)address);
2231 	}
2232 }
2233 
2234 /* Reading from VM_MAP_NULL fails. */
2235 void
test_read_null_map()2236 test_read_null_map()
2237 {
2238 	mach_vm_address_t address = get_vm_address();
2239 	mach_vm_size_t size       = get_vm_size();
2240 	vm_offset_t read_address;
2241 	mach_msg_type_number_t read_size;
2242 
2243 	logv(
2244 		"Reading 0x%jx (%ju) byte%s at address 0x%jx in NULL VM "
2245 		"map...",
2246 		(uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
2247 	assert_mach_return(mach_vm_read(VM_MAP_NULL, address, size, &read_address, &read_size),
2248 	    MACH_SEND_INVALID_DEST, "mach_vm_read()");
2249 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2250 }
2251 
2252 /* Reading partially deallocated memory fails. */
2253 void
test_read_partially_deallocated_range()2254 test_read_partially_deallocated_range()
2255 {
2256 	mach_vm_address_t address   = get_vm_address();
2257 	mach_vm_size_t size         = get_vm_size();
2258 	mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2);
2259 	vm_offset_t read_address;
2260 	mach_msg_type_number_t read_size;
2261 
2262 	logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2263 	deallocate_vm_page_early(mid_point);
2264 	logv("Page deallocated.");
2265 
2266 	logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2267 	    (uintmax_t)address);
2268 	assert_read_return(address, size, &read_address, &read_size, KERN_INVALID_ADDRESS);
2269 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2270 }
2271 
2272 /* Reading partially read-protected memory fails. */
2273 void
test_read_partially_unreadable_range()2274 test_read_partially_unreadable_range()
2275 {
2276 	mach_vm_address_t address   = get_vm_address();
2277 	mach_vm_size_t size         = get_vm_size();
2278 	mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2);
2279 	vm_offset_t read_address;
2280 	mach_msg_type_number_t read_size;
2281 
2282 	/*  For sizes < msg_ool_size_small, vm_map_copyin_common() uses
2283 	 *  vm_map_copyin_kernel_buffer() to read in the memory,
2284 	 *  returning different errors, see 8182239. */
2285 	kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2286 
2287 	logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2288 	assert_mach_success(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2289 	logv("Page read-protected.");
2290 
2291 	logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2292 	    (uintmax_t)address);
2293 	assert_read_return(address, size, &read_address, &read_size, kr_expected);
2294 	logv("Returned expected error: %s.", mach_error_string(kr_expected));
2295 }
2296 
2297 /**********************************/
2298 /* mach_vm_read() edge case tests */
2299 /**********************************/
2300 
2301 void
read_edge_size(mach_vm_size_t size,kern_return_t expected_kr)2302 read_edge_size(mach_vm_size_t size, kern_return_t expected_kr)
2303 {
2304 	int i;
2305 	kern_return_t kr;
2306 	vm_map_t this_task            = mach_task_self();
2307 	mach_vm_address_t addresses[] = {vm_page_size - 1,
2308 		                         vm_page_size,
2309 		                         vm_page_size + 1,
2310 		                         (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2311 		                         (mach_vm_address_t)UINT_MAX,
2312 		                         (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2313 		                         (mach_vm_address_t)UINTMAX_MAX};
2314 	int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2315 	vm_offset_t read_address;
2316 	mach_msg_type_number_t read_size;
2317 
2318 	logv("Reading 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size);
2319 	for (i = 0; i < numofaddresses; i++) {
2320 		kr = mach_vm_read(this_task, addresses[i], size, &read_address, &read_size);
2321 		assert_mach_return(kr, expected_kr,
2322 		    "mach_vm_read() at "
2323 		    "address 0x%jx expected %s",
2324 		    (uintmax_t)addresses[i], mach_error_string(expected_kr));
2325 	}
2326 	logv(
2327 		"mach_vm_read() returned expected value in each case: "
2328 		"%s.",
2329 		mach_error_string(expected_kr));
2330 }
2331 
2332 /* Reading 0 bytes always succeeds. */
2333 void
test_read_zero_size()2334 test_read_zero_size()
2335 {
2336 	read_edge_size(0, KERN_SUCCESS);
2337 }
2338 
2339 /* Reading 4GB or higher always fails. */
2340 void
test_read_invalid_large_size()2341 test_read_invalid_large_size()
2342 {
2343 	read_edge_size((mach_vm_size_t)UINT_MAX + 1, KERN_INVALID_ARGUMENT);
2344 }
2345 
2346 /* Reading a range wrapped around the address space fails. */
2347 void
test_read_wrapped_around_ranges()2348 test_read_wrapped_around_ranges()
2349 {
2350 	int i;
2351 	kern_return_t kr;
2352 	vm_map_t this_task = mach_task_self();
2353 	struct {
2354 		mach_vm_address_t address;
2355 		mach_vm_size_t size;
2356 	} ranges[] = {
2357 		{(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX},
2358 		{(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)},
2359 		{(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2360 		{(mach_vm_address_t)UINTMAX_MAX, 1},
2361 	};
2362 	int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2363 	vm_offset_t read_address;
2364 	mach_msg_type_number_t read_size;
2365 
2366 	logv(
2367 		"Reading various memory ranges wrapping around the "
2368 		"address space...");
2369 	for (i = 0; i < numofranges; i++) {
2370 		kr = mach_vm_read(this_task, ranges[i].address, ranges[i].size, &read_address, &read_size);
2371 		assert_mach_return(kr, KERN_INVALID_ADDRESS,
2372 		    "mach_vm_read() at address 0x%jx with size "
2373 		    "0x%jx (%ju) expected KERN_INVALID_ADDRESS",
2374 		    (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size);
2375 	}
2376 	logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2377 }
2378 
2379 /********************************/
2380 /* mach_vm_read() pattern tests */
2381 /********************************/
2382 
2383 /* Write a pattern on pre-allocated memory, read into a buffer and
2384  * verify the pattern on the buffer. */
2385 void
write_read_verify_pattern(address_filter_t filter,boolean_t reversed,const char * pattern_name)2386 write_read_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2387 {
2388 	mach_vm_address_t address = get_vm_address();
2389 
2390 	write_pattern(filter, reversed, address, get_vm_size(), pattern_name);
2391 	read_deallocate();
2392 	/* Getting the address and size of the read buffer. */
2393 	mach_vm_address_t read_address = get_vm_address();
2394 	mach_vm_size_t read_size = get_vm_size();
2395 	logv(
2396 		"Verifying %s pattern on buffer of "
2397 		"address 0x%jx and size 0x%jx (%ju)...",
2398 		pattern_name, (uintmax_t)read_address, (uintmax_t)read_size, (uintmax_t)read_size);
2399 	filter_addresses_do_else(filter, reversed, read_address, read_size, verify_address, read_zero, address);
2400 	logv("Pattern verified on destination buffer.");
2401 }
2402 
2403 void
test_read_address_filled()2404 test_read_address_filled()
2405 {
2406 	write_read_verify_pattern(empty, TRUE, "address-filled");
2407 }
2408 
2409 void
test_read_checkerboard()2410 test_read_checkerboard()
2411 {
2412 	write_read_verify_pattern(checkerboard, FALSE, "checkerboard");
2413 }
2414 
2415 void
test_read_reverse_checkerboard()2416 test_read_reverse_checkerboard()
2417 {
2418 	write_read_verify_pattern(checkerboard, TRUE, "reverse checkerboard");
2419 }
2420 
2421 /***********************************/
2422 /* mach_vm_write() edge case tests */
2423 /***********************************/
2424 
2425 /* Writing in VM_MAP_NULL fails. */
2426 void
test_write_null_map()2427 test_write_null_map()
2428 {
2429 	mach_vm_address_t address          = get_vm_address();
2430 	vm_offset_t data                   = (vm_offset_t)get_buffer_address();
2431 	mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2432 
2433 	logv(
2434 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2435 		"memory at address 0x%jx in NULL VM MAP...",
2436 		(uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2437 	assert_mach_return(mach_vm_write(VM_MAP_NULL, address, data, buffer_size), MACH_SEND_INVALID_DEST, "mach_vm_write()");
2438 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2439 }
2440 
2441 /* Writing 0 bytes always succeeds. */
2442 void
test_write_zero_size()2443 test_write_zero_size()
2444 {
2445 	set_buffer_size(0);
2446 	write_buffer();
2447 }
2448 
2449 /*****************************************/
2450 /* mach_vm_write() inaccessibility tests */
2451 /*****************************************/
2452 
2453 /* Writing a partially deallocated buffer fails. */
2454 void
test_write_partially_deallocated_buffer()2455 test_write_partially_deallocated_buffer()
2456 {
2457 	mach_vm_address_t address          = get_vm_address();
2458 	vm_offset_t data                   = (vm_offset_t)get_buffer_address();
2459 	mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2460 	mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2);
2461 
2462 	logv(
2463 		"Deallocating a mid-range buffer page at address "
2464 		"0x%jx...",
2465 		(uintmax_t)buffer_mid_point);
2466 	deallocate_buffer_page_early(buffer_mid_point);
2467 	logv("Page deallocated.");
2468 
2469 	logv(
2470 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2471 		"memory at address 0x%jx...",
2472 		(uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2473 	assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY);
2474 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY));
2475 }
2476 
2477 /* Writing a partially read-protected buffer fails. */
2478 void
test_write_partially_unreadable_buffer()2479 test_write_partially_unreadable_buffer()
2480 {
2481 	mach_vm_address_t address          = get_vm_address();
2482 	vm_offset_t data                   = (vm_offset_t)get_buffer_address();
2483 	mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2484 	mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2);
2485 
2486 	logv(
2487 		"Read-protecting a mid-range buffer page at address "
2488 		"0x%jx...",
2489 		(uintmax_t)buffer_mid_point);
2490 	assert_mach_success(mach_vm_protect(mach_task_self(), buffer_mid_point, vm_page_size, FALSE, VM_PROT_WRITE),
2491 	    "mach_vm_protect()");
2492 	logv("Page read-protected.");
2493 
2494 	logv(
2495 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2496 		"memory at address 0x%jx...",
2497 		(uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2498 	assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY);
2499 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY));
2500 }
2501 
2502 /* Writing on partially deallocated memory fails. */
2503 void
test_write_on_partially_deallocated_range()2504 test_write_on_partially_deallocated_range()
2505 {
2506 	mach_vm_address_t address          = get_vm_address();
2507 	mach_vm_address_t start            = mach_vm_trunc_page(address);
2508 	vm_offset_t data                   = (vm_offset_t)get_buffer_address();
2509 	mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2510 
2511 	logv(
2512 		"Deallocating the first destination page at address "
2513 		"0x%jx...",
2514 		(uintmax_t)start);
2515 	deallocate_vm_page_early(start);
2516 	logv("Page deallocated.");
2517 
2518 	logv(
2519 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2520 		"memory at address 0x%jx...",
2521 		(uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2522 	assert_write_return(address, data, buffer_size, KERN_INVALID_ADDRESS);
2523 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2524 }
2525 
2526 /* Writing on partially unwritable memory fails. */
2527 void
test_write_on_partially_unwritable_range()2528 test_write_on_partially_unwritable_range()
2529 {
2530 	mach_vm_address_t address          = get_vm_address();
2531 	mach_vm_address_t start            = mach_vm_trunc_page(address);
2532 	vm_offset_t data                   = (vm_offset_t)get_buffer_address();
2533 	mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2534 
2535 	/*  For sizes < msg_ool_size_small,
2536 	 *  vm_map_copy_overwrite_nested() uses
2537 	 *  vm_map_copyout_kernel_buffer() to read in the memory,
2538 	 *  returning different errors, see 8217123. */
2539 	kern_return_t kr_expected = (buffer_size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2540 
2541 	logv(
2542 		"Write-protecting the first destination page at address "
2543 		"0x%jx...",
2544 		(uintmax_t)start);
2545 	assert_mach_success(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2546 	logv("Page write-protected.");
2547 
2548 	logv(
2549 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2550 		"memory at address 0x%jx...",
2551 		(uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2552 	assert_write_return(address, data, buffer_size, kr_expected);
2553 	logv("Returned expected error: %s.", mach_error_string(kr_expected));
2554 }
2555 
2556 /*********************************/
2557 /* mach_vm_write() pattern tests */
2558 /*********************************/
2559 
2560 /* Verify that a zero-filled buffer and destination memory are still
2561  * zero-filled after writing. */
2562 void
test_zero_filled_write()2563 test_zero_filled_write()
2564 {
2565 	verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
2566 	verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()),
2567 	    round_page(get_buffer_size() + get_buffer_offset()), "zero-filled");
2568 }
2569 
2570 /* Write a pattern on a buffer, write the buffer into some destination
2571  * memory, and verify the pattern on both buffer and destination. */
2572 void
pattern_write(address_filter_t filter,boolean_t reversed,const char * pattern_name)2573 pattern_write(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2574 {
2575 	mach_vm_address_t address        = get_vm_address();
2576 	mach_vm_size_t size              = get_vm_size();
2577 	mach_vm_address_t buffer_address = get_buffer_address();
2578 	mach_vm_size_t buffer_size       = get_buffer_size();
2579 
2580 	write_pattern(filter, reversed, buffer_address, buffer_size, pattern_name);
2581 	write_buffer();
2582 	verify_pattern(filter, reversed, buffer_address, buffer_size, pattern_name);
2583 	logv(
2584 		"Verifying %s pattern on destination of "
2585 		"address 0x%jx and size 0x%jx (%ju)...",
2586 		pattern_name, (uintmax_t)address, (uintmax_t)buffer_size, (uintmax_t)size);
2587 	filter_addresses_do_else(filter, reversed, address, buffer_size, verify_address, read_zero, buffer_address);
2588 	logv("Pattern verified on destination.");
2589 }
2590 
2591 void
test_address_filled_write()2592 test_address_filled_write()
2593 {
2594 	pattern_write(empty, TRUE, "address-filled");
2595 }
2596 
2597 void
test_checkerboard_write()2598 test_checkerboard_write()
2599 {
2600 	pattern_write(checkerboard, FALSE, "checkerboard");
2601 }
2602 
2603 void
test_reverse_checkerboard_write()2604 test_reverse_checkerboard_write()
2605 {
2606 	pattern_write(checkerboard, TRUE, "reverse checkerboard");
2607 }
2608 
2609 /**********************************/
2610 /* mach_vm_copy() edge case tests */
2611 /**********************************/
2612 
2613 /* Copying in VM_MAP_NULL fails. */
2614 void
test_copy_null_map()2615 test_copy_null_map()
2616 {
2617 	mach_vm_address_t source    = get_vm_address();
2618 	mach_vm_address_t dest      = get_buffer_address();
2619 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2620 
2621 	logv(
2622 		"Copying buffer of address 0x%jx and size 0x%jx (%ju), on "
2623 		"memory at address 0x%jx in NULL VM MAP...",
2624 		(uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2625 	assert_mach_return(mach_vm_copy(VM_MAP_NULL, source, size, dest), MACH_SEND_INVALID_DEST, "mach_vm_copy()");
2626 	logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2627 }
2628 
2629 void
copy_edge_size(mach_vm_size_t size,kern_return_t expected_kr)2630 copy_edge_size(mach_vm_size_t size, kern_return_t expected_kr)
2631 {
2632 	int i;
2633 	kern_return_t kr;
2634 	vm_map_t this_task            = mach_task_self();
2635 	mach_vm_address_t addresses[] = {0x0,
2636 		                         0x1,
2637 		                         vm_page_size - 1,
2638 		                         vm_page_size,
2639 		                         vm_page_size + 1,
2640 		                         (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2641 		                         (mach_vm_address_t)UINT_MAX,
2642 		                         (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2643 		                         (mach_vm_address_t)UINTMAX_MAX};
2644 	int numofaddresses     = sizeof(addresses) / sizeof(addresses[0]);
2645 	mach_vm_address_t dest = 0;
2646 
2647 	logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
2648 	assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE);
2649 	logv("Copying 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size);
2650 	for (i = 0; i < numofaddresses; i++) {
2651 		kr = mach_vm_copy(this_task, addresses[i], size, dest);
2652 		assert_mach_return(kr, expected_kr,
2653 		    "mach_vm_copy() at "
2654 		    "address 0x%jx expected %s",
2655 		    (uintmax_t)addresses[i], mach_error_string(expected_kr));
2656 	}
2657 	logv(
2658 		"mach_vm_copy() returned expected value in each case: "
2659 		"%s.",
2660 		mach_error_string(expected_kr));
2661 
2662 	deallocate_range(dest, 4096);
2663 }
2664 
2665 /* Copying 0 bytes always succeeds. */
2666 void
test_copy_zero_size()2667 test_copy_zero_size()
2668 {
2669 	copy_edge_size(0, KERN_SUCCESS);
2670 }
2671 
2672 /* Copying 4GB or higher always fails. */
2673 void
test_copy_invalid_large_size()2674 test_copy_invalid_large_size()
2675 {
2676 	copy_edge_size((mach_vm_size_t)UINT_MAX - 1, KERN_INVALID_ADDRESS);
2677 }
2678 
2679 /* Reading a range wrapped around the address space fails. */
2680 void
test_copy_wrapped_around_ranges()2681 test_copy_wrapped_around_ranges()
2682 {
2683 	int i;
2684 	kern_return_t kr;
2685 	vm_map_t this_task = mach_task_self();
2686 	struct {
2687 		mach_vm_address_t address;
2688 		mach_vm_size_t size;
2689 	} ranges[] = {
2690 		{(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX},
2691 		{(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)},
2692 		{(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2693 		{(mach_vm_address_t)UINTMAX_MAX, 1},
2694 	};
2695 	int numofranges        = sizeof(ranges) / sizeof(ranges[0]);
2696 	mach_vm_address_t dest = 0;
2697 
2698 	logv("Allocating 0x1000 (4096) bytes...");
2699 	assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE);
2700 
2701 	logv(
2702 		"Copying various memory ranges wrapping around the "
2703 		"address space...");
2704 	for (i = 0; i < numofranges; i++) {
2705 		kr = mach_vm_copy(this_task, ranges[i].address, ranges[i].size, dest);
2706 		assert_mach_return(kr, KERN_INVALID_ADDRESS,
2707 		    "mach_vm_copy() at address 0x%jx with size "
2708 		    "0x%jx (%ju) expected KERN_INVALID_ADDRESS",
2709 		    (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size);
2710 	}
2711 	logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2712 
2713 	deallocate_range(dest, 4096);
2714 }
2715 
2716 /********************************/
2717 /* mach_vm_copy() pattern tests */
2718 /********************************/
2719 
2720 /* Write a pattern on pre-allocated region, copy into another region
2721  * and verify the pattern in the region. */
2722 void
write_copy_verify_pattern(address_filter_t filter,boolean_t reversed,const char * pattern_name)2723 write_copy_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2724 {
2725 	mach_vm_address_t source = get_vm_address();
2726 	mach_vm_size_t src_size = get_vm_size();
2727 	write_pattern(filter, reversed, source, src_size, pattern_name);
2728 	/* Getting the address and size of the dest region */
2729 	mach_vm_address_t dest  = get_buffer_address();
2730 	mach_vm_size_t dst_size = get_buffer_size();
2731 
2732 	logv(
2733 		"Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
2734 		"memory at address 0x%jx...",
2735 		(uintmax_t)source, (uintmax_t)dst_size, (uintmax_t)dst_size, (uintmax_t)dest);
2736 	assert_copy_success(source, dst_size, dest);
2737 	logv(
2738 		"Verifying %s pattern in region of "
2739 		"address 0x%jx and size 0x%jx (%ju)...",
2740 		pattern_name, (uintmax_t)dest, (uintmax_t)dst_size, (uintmax_t)dst_size);
2741 	filter_addresses_do_else(filter, reversed, dest, dst_size, verify_address, read_zero, source);
2742 	logv("Pattern verified on destination region.");
2743 }
2744 
2745 void
test_copy_address_filled()2746 test_copy_address_filled()
2747 {
2748 	write_copy_verify_pattern(empty, TRUE, "address-filled");
2749 }
2750 
2751 void
test_copy_checkerboard()2752 test_copy_checkerboard()
2753 {
2754 	write_copy_verify_pattern(checkerboard, FALSE, "checkerboard");
2755 }
2756 
2757 void
test_copy_reverse_checkerboard()2758 test_copy_reverse_checkerboard()
2759 {
2760 	write_copy_verify_pattern(checkerboard, TRUE, "reverse checkerboard");
2761 }
2762 
2763 /* Verify that a zero-filled source and destination memory are still
2764  * zero-filled after writing. */
2765 void
test_zero_filled_copy_dest()2766 test_zero_filled_copy_dest()
2767 {
2768 	verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
2769 	verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()),
2770 	    round_page(get_buffer_size() + get_buffer_offset()), "zero-filled");
2771 }
2772 
2773 /****************************************/
2774 /* mach_vm_copy() inaccessibility tests */
2775 /****************************************/
2776 
2777 /* Copying partially deallocated memory fails. */
2778 void
test_copy_partially_deallocated_range()2779 test_copy_partially_deallocated_range()
2780 {
2781 	mach_vm_address_t source    = get_vm_address();
2782 	mach_vm_size_t size         = get_vm_size();
2783 	mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2);
2784 	mach_vm_address_t dest      = 0;
2785 
2786 	logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2787 	deallocate_vm_page_early(mid_point);
2788 	logv("Page deallocated.");
2789 
2790 	logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2791 	    (uintmax_t)source);
2792 
2793 	assert_allocate_copy_return(source, size, &dest, KERN_INVALID_ADDRESS);
2794 
2795 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2796 
2797 	deallocate_range(dest, size);
2798 }
2799 
2800 /* Copy partially read-protected memory fails. */
2801 void
test_copy_partially_unreadable_range()2802 test_copy_partially_unreadable_range()
2803 {
2804 	mach_vm_address_t source    = get_vm_address();
2805 	mach_vm_size_t size         = get_vm_size();
2806 	mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2);
2807 	mach_vm_address_t dest      = 0;
2808 
2809 	/*  For sizes < 1 page, vm_map_copyin_common() uses
2810 	 *  vm_map_copyin_kernel_buffer() to read in the memory,
2811 	 *  returning different errors, see 8182239. */
2812 	kern_return_t kr_expected = (size < vm_page_size) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2813 
2814 	logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2815 	assert_mach_success(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2816 	logv("Page read-protected.");
2817 
2818 	logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2819 	    (uintmax_t)source);
2820 	assert_allocate_copy_return(source, size, &dest, kr_expected);
2821 	logv("Returned expected error: %s.", mach_error_string(kr_expected));
2822 
2823 	deallocate_range(dest, size);
2824 }
2825 
2826 /* Copying to a partially deallocated region fails. */
2827 void
test_copy_dest_partially_deallocated_region()2828 test_copy_dest_partially_deallocated_region()
2829 {
2830 	mach_vm_address_t dest             = get_vm_address();
2831 	mach_vm_address_t source           = get_buffer_address();
2832 	mach_msg_type_number_t size        = (mach_msg_type_number_t)get_buffer_size();
2833 	mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2);
2834 
2835 	logv(
2836 		"Deallocating a mid-range source page at address "
2837 		"0x%jx...",
2838 		(uintmax_t)source_mid_point);
2839 	deallocate_vm_page_early(source_mid_point);
2840 	logv("Page deallocated.");
2841 
2842 	logv(
2843 		"Copying region of address 0x%jx and size 0x%jx (%ju), on "
2844 		"memory at address 0x%jx...",
2845 		(uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2846 	assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2847 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2848 }
2849 
2850 /* Copying from a partially deallocated region fails. */
2851 void
test_copy_source_partially_deallocated_region()2852 test_copy_source_partially_deallocated_region()
2853 {
2854 	mach_vm_address_t source           = get_vm_address();
2855 	mach_vm_address_t dest             = get_buffer_address();
2856 	mach_msg_type_number_t size        = (mach_msg_type_number_t)get_buffer_size();
2857 	mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2);
2858 
2859 	logv(
2860 		"Deallocating a mid-range source page at address "
2861 		"0x%jx...",
2862 		(uintmax_t)source_mid_point);
2863 	deallocate_vm_page_early(source_mid_point);
2864 	logv("Page deallocated.");
2865 
2866 	logv(
2867 		"Copying region of address 0x%jx and size 0x%jx (%ju), on "
2868 		"memory at address 0x%jx...",
2869 		(uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2870 	assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2871 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2872 }
2873 
2874 /* Copying from a partially read-protected region fails. */
2875 void
test_copy_source_partially_unreadable_region()2876 test_copy_source_partially_unreadable_region()
2877 {
2878 	mach_vm_address_t source    = get_vm_address();
2879 	mach_vm_address_t dest      = get_buffer_address();
2880 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2881 	mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2);
2882 	kern_return_t kr            = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2883 
2884 	logv(
2885 		"Read-protecting a mid-range buffer page at address "
2886 		"0x%jx...",
2887 		(uintmax_t)mid_point);
2888 	assert_mach_success(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2889 	logv("Page read-protected.");
2890 
2891 	logv(
2892 		"Copying region at address 0x%jx and size 0x%jx (%ju), on "
2893 		"memory at address 0x%jx...",
2894 		(uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2895 
2896 	assert_copy_return(source, size, dest, kr);
2897 	logv("Returned expected error: %s.", mach_error_string(kr));
2898 }
2899 
2900 /* Copying to a partially write-protected region fails. */
2901 void
test_copy_dest_partially_unwriteable_region()2902 test_copy_dest_partially_unwriteable_region()
2903 {
2904 	mach_vm_address_t dest      = get_vm_address();
2905 	mach_vm_address_t source    = get_buffer_address();
2906 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2907 	mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2);
2908 
2909 	logv(
2910 		"Read-protecting a mid-range buffer page at address "
2911 		"0x%jx...",
2912 		(uintmax_t)mid_point);
2913 	assert_mach_success(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2914 	logv("Page read-protected.");
2915 	logv(
2916 		"Copying region at address 0x%jx and size 0x%jx (%ju), on "
2917 		"memory at address 0x%jx...",
2918 		(uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2919 
2920 	// The type of failure is not guaranteed to be consistent between architectures, so we just make sure it fails.
2921 	assert_copy_failure(source, size, dest);
2922 	logv("Returned expected error.");
2923 }
2924 
2925 /* Copying on partially deallocated memory fails. */
2926 void
test_copy_source_on_partially_deallocated_range()2927 test_copy_source_on_partially_deallocated_range()
2928 {
2929 	mach_vm_address_t source    = get_vm_address();
2930 	mach_vm_address_t dest      = get_buffer_address();
2931 	mach_vm_address_t start     = mach_vm_trunc_page(source);
2932 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2933 
2934 	logv(
2935 		"Deallocating the first source page at address "
2936 		"0x%jx...",
2937 		(uintmax_t)start);
2938 	deallocate_vm_page_early(start);
2939 	logv("Page deallocated.");
2940 
2941 	logv(
2942 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2943 		"memory at address 0x%jx...",
2944 		(uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2945 	assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2946 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2947 }
2948 
2949 /* Copying on partially deallocated memory fails. */
2950 void
test_copy_dest_on_partially_deallocated_range()2951 test_copy_dest_on_partially_deallocated_range()
2952 {
2953 	mach_vm_address_t source    = get_vm_address();
2954 	mach_vm_address_t dest      = get_buffer_address();
2955 	mach_vm_address_t start     = mach_vm_trunc_page(dest);
2956 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2957 
2958 	logv(
2959 		"Deallocating the first destination page at address "
2960 		"0x%jx...",
2961 		(uintmax_t)start);
2962 	deallocate_buffer_page_early(start);
2963 	logv("Page deallocated.");
2964 
2965 	logv(
2966 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2967 		"memory at address 0x%jx...",
2968 		(uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2969 	assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2970 	logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2971 }
2972 
2973 /* Copying on partially unwritable memory fails. */
2974 void
test_copy_dest_on_partially_unwritable_range()2975 test_copy_dest_on_partially_unwritable_range()
2976 {
2977 	mach_vm_address_t source    = get_vm_address();
2978 	mach_vm_address_t dest      = get_buffer_address();
2979 	mach_vm_address_t start     = mach_vm_trunc_page(dest);
2980 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2981 
2982 	/*  For sizes < msg_ool_size_small,
2983 	 *  vm_map_copy_overwrite_nested() uses
2984 	 *  vm_map_copyout_kernel_buffer() to read in the memory,
2985 	 *  returning different errors, see 8217123. */
2986 	kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2987 
2988 	logv(
2989 		"Write-protecting the first destination page at address "
2990 		"0x%jx...",
2991 		(uintmax_t)start);
2992 	assert_mach_success(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2993 	logv("Page write-protected.");
2994 
2995 	logv(
2996 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2997 		"memory at address 0x%jx...",
2998 		(uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2999 	assert_copy_return(source, size, dest, kr_expected);
3000 	logv("Returned expected error: %s.", mach_error_string(kr_expected));
3001 }
3002 
3003 /* Copying on partially unreadable memory fails. */
3004 void
test_copy_source_on_partially_unreadable_range()3005 test_copy_source_on_partially_unreadable_range()
3006 {
3007 	mach_vm_address_t source    = get_vm_address();
3008 	mach_vm_address_t dest      = get_buffer_address();
3009 	mach_vm_address_t start     = mach_vm_trunc_page(source);
3010 	mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
3011 
3012 	/*  For sizes < msg_ool_size_small,
3013 	 *  vm_map_copy_overwrite_nested() uses
3014 	 *  vm_map_copyout_kernel_buffer() to read in the memory,
3015 	 *  returning different errors, see 8217123. */
3016 	kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
3017 
3018 	logv(
3019 		"Read-protecting the first destination page at address "
3020 		"0x%jx...",
3021 		(uintmax_t)start);
3022 	assert_mach_success(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
3023 	logv("Page read-protected.");
3024 
3025 	logv(
3026 		"Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
3027 		"memory at address 0x%jx...",
3028 		(uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
3029 	assert_copy_return(source, size, dest, kr_expected);
3030 	logv("Returned expected error: %s.", mach_error_string(kr_expected));
3031 }
3032 
3033 /********************************/
3034 /* mach_vm_protect() main tests */
3035 /********************************/
3036 
3037 void
test_zero_filled_extended()3038 test_zero_filled_extended()
3039 {
3040 	verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page(get_vm_size() + 1), "zero-filled");
3041 }
3042 
3043 /* Allocated region is still zero-filled after read-protecting it and
3044  * then restoring read-access. */
3045 void
test_zero_filled_readprotect()3046 test_zero_filled_readprotect()
3047 {
3048 	mach_vm_address_t address = get_vm_address();
3049 	mach_vm_size_t size       = get_vm_size();
3050 
3051 	logv("Setting read access on 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size,
3052 	    (size == 1) ? "" : "s", (uintmax_t)address);
3053 	assert_mach_success(mach_vm_protect(mach_task_self(), address, size, FALSE, VM_PROT_DEFAULT), "mach_vm_protect()");
3054 	logv("Region has read access.");
3055 	test_zero_filled_extended();
3056 }
3057 
3058 void
verify_protection(vm_prot_t protection,const char * protection_name)3059 verify_protection(vm_prot_t protection, const char * protection_name)
3060 {
3061 	mach_vm_address_t address    = get_vm_address();
3062 	mach_vm_size_t size          = get_vm_size();
3063 	mach_vm_size_t original_size = size;
3064 	vm_region_basic_info_data_64_t info;
3065 	mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
3066 	mach_port_t unused;
3067 
3068 	logv(
3069 		"Verifying %s-protection on region of address 0x%jx and "
3070 		"size 0x%jx (%ju) with mach_vm_region()...",
3071 		protection_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
3072 	assert_mach_success(
3073 		mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info, &count, &unused),
3074 		"mach_vm_region()");
3075 	if (original_size) {
3076 		T_QUIET; T_ASSERT_EQ((info.protection & protection), 0,
3077 		    "Region "
3078 		    "is unexpectedly %s-unprotected.",
3079 		    protection_name);
3080 		logv("Region is %s-protected as expected.", protection_name);
3081 	} else {
3082 		T_QUIET; T_ASSERT_NE(info.protection & protection, 0,
3083 		    "Region is "
3084 		    "unexpectedly %s-protected.",
3085 		    protection_name);
3086 		logv("Region is %s-unprotected as expected.", protection_name);
3087 	}
3088 }
3089 
3090 void
test_verify_readprotection()3091 test_verify_readprotection()
3092 {
3093 	verify_protection(VM_PROT_READ, "read");
3094 }
3095 
3096 void
test_verify_writeprotection()3097 test_verify_writeprotection()
3098 {
3099 	verify_protection(VM_PROT_WRITE, "write");
3100 }
3101 
3102 /******************************/
3103 /* Protection bus error tests */
3104 /******************************/
3105 
3106 /* mach_vm_protect() affects the smallest aligned region (integral
3107  * number of pages) containing the given range. */
3108 
3109 /* Addresses in read-protected range are inaccessible. */
3110 void
access_readprotected_range_address(mach_vm_address_t address,const char * position)3111 access_readprotected_range_address(mach_vm_address_t address, const char * position)
3112 {
3113 	logv("Reading from %s 0x%jx of read-protected range...", position, (uintmax_t)address);
3114 	mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address);
3115 	T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx. "
3116 	    "Should have died with signal SIGBUS.",
3117 	    (uintmax_t)bad_value, (uintmax_t)address);
3118 }
3119 
3120 /* Start of read-protected range is inaccessible. */
3121 void
test_access_readprotected_range_start()3122 test_access_readprotected_range_start()
3123 {
3124 	access_readprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3125 }
3126 
3127 /* Middle of read-protected range is inaccessible. */
3128 void
test_access_readprotected_range_middle()3129 test_access_readprotected_range_middle()
3130 {
3131 	mach_vm_address_t address = get_vm_address();
3132 	access_readprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle");
3133 }
3134 
3135 /* End of read-protected range is inaccessible. */
3136 void
test_access_readprotected_range_end()3137 test_access_readprotected_range_end()
3138 {
3139 	access_readprotected_range_address(round_page(get_vm_address() + get_vm_size()) - vm_address_size, "end");
3140 }
3141 
3142 /* Addresses in write-protected range are unwritable. */
3143 void
write_writeprotected_range_address(mach_vm_address_t address,const char * position)3144 write_writeprotected_range_address(mach_vm_address_t address, const char * position)
3145 {
3146 	logv("Writing on %s 0x%jx of write-protected range...", position, (uintmax_t)address);
3147 	MACH_VM_ADDRESS_T(address) = 0x0;
3148 	T_ASSERT_FAIL("Unexpectedly wrote value 0x0 value at address 0x%jx. "
3149 	    "Should have died with signal SIGBUS.",
3150 	    (uintmax_t)address);
3151 }
3152 
3153 /* Start of write-protected range is unwritable. */
3154 void
test_write_writeprotected_range_start()3155 test_write_writeprotected_range_start()
3156 {
3157 	write_writeprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3158 }
3159 
3160 /* Middle of write-protected range is unwritable. */
3161 void
test_write_writeprotected_range_middle()3162 test_write_writeprotected_range_middle()
3163 {
3164 	mach_vm_address_t address = get_vm_address();
3165 	write_writeprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle");
3166 }
3167 
3168 /* End of write-protected range is unwritable. */
3169 void
test_write_writeprotected_range_end()3170 test_write_writeprotected_range_end()
3171 {
3172 	write_writeprotected_range_address(round_page(get_vm_address() + get_vm_size()) - vm_address_size, "end");
3173 }
3174 
3175 /*************************************/
3176 /* mach_vm_protect() edge case tests */
3177 /*************************************/
3178 
3179 void
protect_zero_size(vm_prot_t protection,const char * protection_name)3180 protect_zero_size(vm_prot_t protection, const char * protection_name)
3181 {
3182 	int i;
3183 	kern_return_t kr;
3184 	vm_map_t this_task            = mach_task_self();
3185 	mach_vm_address_t addresses[] = {0x0,
3186 		                         0x1,
3187 		                         vm_page_size - 1,
3188 		                         vm_page_size,
3189 		                         vm_page_size + 1,
3190 		                         (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
3191 		                         (mach_vm_address_t)UINT_MAX,
3192 		                         (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
3193 		                         (mach_vm_address_t)UINTMAX_MAX};
3194 	int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
3195 
3196 	logv("%s-protecting 0x0 (0) bytes at various addresses...", protection_name);
3197 	for (i = 0; i < numofaddresses; i++) {
3198 		kr = mach_vm_protect(this_task, addresses[i], 0, FALSE, protection);
3199 		assert_mach_success(kr,
3200 		    "mach_vm_protect() at "
3201 		    "address 0x%jx unexpectedly failed: %s.",
3202 		    (uintmax_t)addresses[i], mach_error_string(kr));
3203 	}
3204 	logv("Protection successful.");
3205 }
3206 
3207 void
test_readprotect_zero_size()3208 test_readprotect_zero_size()
3209 {
3210 	protect_zero_size(VM_PROT_READ, "Read");
3211 }
3212 
3213 void
test_writeprotect_zero_size()3214 test_writeprotect_zero_size()
3215 {
3216 	protect_zero_size(VM_PROT_WRITE, "Write");
3217 }
3218 
3219 /* Protecting a range wrapped around the address space fails. */
3220 void
protect_wrapped_around_ranges(vm_prot_t protection,const char * protection_name)3221 protect_wrapped_around_ranges(vm_prot_t protection, const char * protection_name)
3222 {
3223 	int i;
3224 	kern_return_t kr;
3225 	vm_map_t this_task = mach_task_self();
3226 	struct {
3227 		mach_vm_address_t address;
3228 		mach_vm_size_t size;
3229 	} ranges[] = {
3230 		{0x1, (mach_vm_size_t)UINTMAX_MAX},
3231 		{vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
3232 		{(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
3233 		{(mach_vm_address_t)UINTMAX_MAX, 1},
3234 	};
3235 	int numofranges = sizeof(ranges) / sizeof(ranges[0]);
3236 
3237 	logv(
3238 		"%s-protecting various memory ranges wrapping around the "
3239 		"address space...",
3240 		protection_name);
3241 	for (i = 0; i < numofranges; i++) {
3242 		kr = mach_vm_protect(this_task, ranges[i].address, ranges[i].size, FALSE, protection);
3243 		assert_mach_return(kr, KERN_INVALID_ARGUMENT,
3244 		    "mach_vm_protect() with address 0x%jx and size "
3245 		    "0x%jx (%ju) expected KERN_INVALID_ARGUMENT",
3246 		    (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size);
3247 	}
3248 	logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
3249 }
3250 
3251 void
test_readprotect_wrapped_around_ranges()3252 test_readprotect_wrapped_around_ranges()
3253 {
3254 	protect_wrapped_around_ranges(VM_PROT_READ, "Read");
3255 }
3256 
3257 void
test_writeprotect_wrapped_around_ranges()3258 test_writeprotect_wrapped_around_ranges()
3259 {
3260 	protect_wrapped_around_ranges(VM_PROT_WRITE, "Write");
3261 }
3262 
3263 /*******************/
3264 /* vm_copy() tests */
3265 /*******************/
3266 
3267 /* Verify the address space is being shared. */
3268 void
assert_share_mode(mach_vm_address_t address,unsigned share_mode,const char * share_mode_name)3269 assert_share_mode(mach_vm_address_t address, unsigned share_mode, const char * share_mode_name)
3270 {
3271 }
3272 
3273 /* Do the vm_copy() and verify its success. */
3274 void
assert_vmcopy_success(vm_address_t src,vm_address_t dst,const char * source_name)3275 assert_vmcopy_success(vm_address_t src, vm_address_t dst, const char * source_name)
3276 {
3277 	kern_return_t kr;
3278 	mach_vm_size_t size = get_vm_size();
3279 
3280 	logv("Copying (using mach_vm_copy()) from a %s source...", source_name);
3281 	kr = mach_vm_copy(mach_task_self(), src, size, dst);
3282 	assert_mach_success(kr,
3283 	    "mach_vm_copy() with the source address "
3284 	    "0x%jx, designation address 0x%jx, and size 0x%jx (%ju) "
3285 	    "unexpectedly failed.",
3286 	    (uintmax_t)src, (uintmax_t)dst, (uintmax_t)size, (uintmax_t)size);
3287 	logv("Copy (mach_vm_copy()) was successful as expected.");
3288 }
3289 
3290 void
write_region(mach_vm_address_t address,mach_vm_size_t start)3291 write_region(mach_vm_address_t address, mach_vm_size_t start)
3292 {
3293 	mach_vm_size_t size = get_vm_size();
3294 
3295 	filter_addresses_do_else(empty, FALSE, address, size, write_address, write_address, start);
3296 }
3297 
3298 void
verify_region(mach_vm_address_t address,mach_vm_address_t start)3299 verify_region(mach_vm_address_t address, mach_vm_address_t start)
3300 {
3301 	mach_vm_size_t size = get_vm_size();
3302 
3303 	filter_addresses_do_else(empty, FALSE, address, size, verify_address, verify_address, start);
3304 }
3305 
3306 /* Perform the post vm_copy() action and verify its results. */
3307 void
modify_one_and_verify_all_regions(vm_address_t src,vm_address_t dst,vm_address_t shared_copied,boolean_t shared)3308 modify_one_and_verify_all_regions(vm_address_t src, vm_address_t dst, vm_address_t shared_copied, boolean_t shared)
3309 {
3310 	int action          = get_vmcopy_post_action();
3311 
3312 	/* Do the post vm_copy() action. */
3313 	switch (action) {
3314 	case VMCOPY_MODIFY_SRC:
3315 		logv("Modifying: source%s...", shared ? " (shared with other region)" : "");
3316 		write_region(src, 1);
3317 		break;
3318 
3319 	case VMCOPY_MODIFY_DST:
3320 		logv("Modifying: destination...");
3321 		write_region(dst, 1);
3322 		break;
3323 
3324 	case VMCOPY_MODIFY_SHARED_COPIED:
3325 		/* If no shared_copied then no need to verify (nothing changed). */
3326 		if (!shared_copied) {
3327 			return;
3328 		}
3329 		logv("Modifying: shared/copied%s...", shared ? " (shared with source region)" : "");
3330 		write_region(shared_copied, 1);
3331 		break;
3332 
3333 	default:
3334 		T_ASSERT_FAIL("Unknown post vm_copy() action (%d)", action);
3335 	}
3336 	logv("Modification was successful as expected.");
3337 
3338 	/* Verify all the regions with what is expected. */
3339 	logv("Verifying: source... ");
3340 	verify_region(src, (VMCOPY_MODIFY_SRC == action || (shared && VMCOPY_MODIFY_SHARED_COPIED == action)) ? 1 : 0);
3341 	logv("destination... ");
3342 	verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0);
3343 	if (shared_copied) {
3344 		logv("shared/copied... ");
3345 		verify_region(shared_copied, (VMCOPY_MODIFY_SHARED_COPIED == action || (shared && VMCOPY_MODIFY_SRC == action)) ? 1 : 0);
3346 	}
3347 	logv("Verification was successful as expected.");
3348 }
3349 
3350 /* Test source being a simple fresh region. */
3351 void
test_vmcopy_fresh_source()3352 test_vmcopy_fresh_source()
3353 {
3354 	mach_vm_size_t size = get_vm_size();
3355 	mach_vm_address_t src, dst;
3356 
3357 	if (get_vmcopy_post_action() == VMCOPY_MODIFY_SHARED_COPIED) {
3358 		/* No shared/copied region to modify so just return. */
3359 		logv("No shared/copied region as expected.");
3360 		return;
3361 	}
3362 
3363 	assert_allocate_success(&src, size, TRUE);
3364 
3365 	assert_share_mode(src, SM_EMPTY, "SM_EMPTY");
3366 
3367 	write_region(src, 0);
3368 
3369 	assert_allocate_success(&dst, size, TRUE);
3370 
3371 	assert_vmcopy_success(src, dst, "freshly allocated");
3372 
3373 	modify_one_and_verify_all_regions(src, dst, 0, FALSE);
3374 
3375 	assert_deallocate_success(src, size);
3376 	assert_deallocate_success(dst, size);
3377 }
3378 
3379 /* Test source copied from a shared region. */
3380 void
test_vmcopy_shared_source()3381 test_vmcopy_shared_source()
3382 {
3383 	mach_vm_size_t size = get_vm_size();
3384 	mach_vm_address_t src, dst, shared;
3385 	int action = get_vmcopy_post_action();
3386 	int pid, status;
3387 
3388 	assert_allocate_success(&src, size, TRUE);
3389 
3390 	assert_mach_success(mach_vm_inherit(mach_task_self(), src, size, VM_INHERIT_SHARE), "mach_vm_inherit()");
3391 
3392 	write_region(src, 0);
3393 
3394 	pid = fork();
3395 	if (pid == 0) {
3396 		/* Verify that the child's 'src' is shared with the
3397 		 *  parent's src */
3398 		assert_share_mode(src, SM_SHARED, "SM_SHARED");
3399 		assert_allocate_success(&dst, size, TRUE);
3400 		assert_vmcopy_success(src, dst, "shared");
3401 		if (VMCOPY_MODIFY_SHARED_COPIED == action) {
3402 			logv("Modifying: shared...");
3403 			write_region(src, 1);
3404 			logv("Modification was successsful as expected.");
3405 			logv("Verifying: source... ");
3406 			verify_region(src, 1);
3407 			logv("destination...");
3408 			verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0);
3409 			logv("Verification was successful as expected.");
3410 		} else {
3411 			modify_one_and_verify_all_regions(src, dst, 0, TRUE);
3412 		}
3413 		assert_deallocate_success(dst, size);
3414 		exit(0);
3415 	} else if (pid > 0) {
3416 		/* In the parent the src becomes the shared */
3417 		shared = src;
3418 		wait(&status);
3419 		if (WEXITSTATUS(status) != 0) {
3420 			exit(status);
3421 		}
3422 		/* verify shared (shared with child's src) */
3423 		logv("Verifying: shared...");
3424 		verify_region(shared, (VMCOPY_MODIFY_SHARED_COPIED == action || VMCOPY_MODIFY_SRC == action) ? 1 : 0);
3425 		logv("Verification was successful as expected.");
3426 	} else {
3427 		T_WITH_ERRNO; T_ASSERT_FAIL("fork failed");
3428 	}
3429 
3430 	assert_deallocate_success(src, size);
3431 }
3432 
3433 /* Test source copied from another mapping. */
3434 void
test_vmcopy_copied_from_source()3435 test_vmcopy_copied_from_source()
3436 {
3437 	mach_vm_size_t size = get_vm_size();
3438 	mach_vm_address_t src, dst, copied;
3439 
3440 	assert_allocate_success(&copied, size, TRUE);
3441 	write_region(copied, 0);
3442 
3443 	assert_allocate_success(&src, size, TRUE);
3444 
3445 	assert_mach_success(mach_vm_copy(mach_task_self(), copied, size, src), "mach_vm_copy()");
3446 
3447 	assert_share_mode(src, SM_COW, "SM_COW");
3448 
3449 	assert_allocate_success(&dst, size, TRUE);
3450 
3451 	assert_vmcopy_success(src, dst, "copied from");
3452 
3453 	modify_one_and_verify_all_regions(src, dst, copied, FALSE);
3454 
3455 	assert_deallocate_success(src, size);
3456 	assert_deallocate_success(dst, size);
3457 	assert_deallocate_success(copied, size);
3458 }
3459 
3460 /* Test source copied to another mapping. */
3461 void
test_vmcopy_copied_to_source()3462 test_vmcopy_copied_to_source()
3463 {
3464 	mach_vm_size_t size = get_vm_size();
3465 	mach_vm_address_t src, dst, copied;
3466 
3467 	assert_allocate_success(&src, size, TRUE);
3468 	write_region(src, 0);
3469 
3470 	assert_allocate_success(&copied, size, TRUE);
3471 
3472 	assert_mach_success(mach_vm_copy(mach_task_self(), src, size, copied), "mach_vm_copy()");
3473 
3474 	assert_share_mode(src, SM_COW, "SM_COW");
3475 
3476 	assert_allocate_success(&dst, size, TRUE);
3477 
3478 	assert_vmcopy_success(src, dst, "copied to");
3479 
3480 	modify_one_and_verify_all_regions(src, dst, copied, FALSE);
3481 
3482 	assert_deallocate_success(src, size);
3483 	assert_deallocate_success(dst, size);
3484 	assert_deallocate_success(copied, size);
3485 }
3486 
3487 /* Test a truedshared source copied. */
3488 void
test_vmcopy_trueshared_source()3489 test_vmcopy_trueshared_source()
3490 {
3491 	mach_vm_size_t size   = get_vm_size();
3492 	mach_vm_address_t src = 0x0, dst, shared;
3493 	vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE);
3494 	vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE);
3495 	mem_entry_name_port_t mem_obj;
3496 
3497 	assert_allocate_success(&shared, size, TRUE);
3498 	write_region(shared, 0);
3499 
3500 	assert_mach_success(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)shared, cur_protect, &mem_obj,
3501 	    (mem_entry_name_port_t)NULL),
3502 	    "mach_make_memory_entry_64()");
3503 	assert_mach_success(
3504 		mach_vm_map(mach_task_self(), &src, size, 0, TRUE, mem_obj, 0, FALSE, cur_protect, max_protect, VM_INHERIT_NONE),
3505 		"mach_vm_map()");
3506 
3507 	assert_share_mode(src, SM_TRUESHARED, "SM_TRUESHARED");
3508 
3509 	assert_allocate_success(&dst, size, TRUE);
3510 
3511 	assert_vmcopy_success(src, dst, "true shared");
3512 
3513 	modify_one_and_verify_all_regions(src, dst, shared, TRUE);
3514 
3515 	assert_deallocate_success(src, size);
3516 	assert_deallocate_success(dst, size);
3517 	assert_deallocate_success(shared, size);
3518 }
3519 
3520 /* Test a private aliazed source copied. */
3521 void
test_vmcopy_private_aliased_source()3522 test_vmcopy_private_aliased_source()
3523 {
3524 	mach_vm_size_t size   = get_vm_size();
3525 	mach_vm_address_t src = 0x0, dst, shared;
3526 	vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE);
3527 	vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE);
3528 
3529 	assert_allocate_success(&shared, size, TRUE);
3530 	write_region(shared, 0);
3531 
3532 	assert_mach_success(mach_vm_remap(mach_task_self(), &src, size, 0, TRUE, mach_task_self(), shared, FALSE, &cur_protect,
3533 	    &max_protect, VM_INHERIT_NONE),
3534 	    "mach_vm_remap()");
3535 
3536 	assert_share_mode(src, SM_PRIVATE_ALIASED, "SM_PRIVATE_ALIASED");
3537 
3538 	assert_allocate_success(&dst, size, TRUE);
3539 
3540 	assert_vmcopy_success(src, dst, "true shared");
3541 
3542 	modify_one_and_verify_all_regions(src, dst, shared, TRUE);
3543 
3544 	assert_deallocate_success(src, size);
3545 	assert_deallocate_success(dst, size);
3546 	assert_deallocate_success(shared, size);
3547 }
3548 
3549 /*************/
3550 /* VM Suites */
3551 /*************/
3552 
3553 void
run_allocate_test_suites()3554 run_allocate_test_suites()
3555 {
3556 	UnitTests allocate_main_tests = {
3557 		{"Allocated address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size, 0},
3558 		{"Allocated address is page-aligned", test_aligned_address, 0},
3559 		{"Allocated memory is zero-filled", test_zero_filled, 0},
3560 		{"Write and verify address-filled pattern", test_write_address_filled, 0},
3561 		{"Write and verify checkerboard pattern", test_write_checkerboard, 0},
3562 		{"Write and verify reverse checkerboard pattern", test_write_reverse_checkerboard, 0},
3563 		{"Write and verify page ends pattern", test_write_page_ends, 0},
3564 		{"Write and verify page interiors pattern", test_write_page_interiors, 0},
3565 		{"Reallocate allocated pages", test_reallocate_pages, 0},
3566 	};
3567 	UnitTests allocate_address_error_tests = {
3568 		{"Allocate at address zero", test_allocate_at_zero, 0},
3569 		{"Allocate at a 2 MB boundary-unaligned, page-aligned "
3570 		 "address",
3571 		 test_allocate_2MB_boundary_unaligned_page_aligned_address, 0},
3572 	};
3573 	UnitTests allocate_argument_error_tests = {
3574 		{"Allocate in NULL VM map", test_allocate_in_null_map, 0},
3575 		{"Allocate with kernel flags", test_allocate_with_kernel_flags, 0},
3576 		{"Allocate super-page with incompatible flags", test_allocate_superpage_with_incompatible_flags, 0},
3577 	};
3578 	UnitTests allocate_fixed_size_tests = {
3579 		{"Allocate zero size", test_allocate_zero_size, 0},
3580 		{"Allocate overflowing size", test_allocate_overflowing_size, 0},
3581 		{"Allocate a page with highest address hint", test_allocate_page_with_highest_address_hint, 0},
3582 		{"Allocate two pages and verify first fit strategy", test_allocate_first_fit_pages, 0},
3583 	};
3584 	UnitTests allocate_invalid_large_size_test = {
3585 		{"Allocate invalid large size", test_allocate_invalid_large_size, 0},
3586 	};
3587 	UnitTests mach_vm_map_protection_inheritance_error_test = {
3588 		{"mach_vm_map() with invalid protection/inheritance "
3589 		 "arguments",
3590 		 test_mach_vm_map_protection_inheritance_error, 0},
3591 	};
3592 	UnitTests mach_vm_map_large_mask_overflow_error_test = {
3593 		{"mach_vm_map() with large address mask", test_mach_vm_map_large_mask_overflow_error, 0},
3594 	};
3595 
3596 	/* Run the test suites with various allocators and VM sizes, and
3597 	 *  unspecified or fixed (page-aligned or page-unaligned),
3598 	 *  addresses. */
3599 	for (allocators_idx = 0; allocators_idx < numofallocators; allocators_idx++) {
3600 		for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3601 			for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3602 				for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3603 					/* An allocated address will be page-aligned. */
3604 					/* Only run the zero size mach_vm_map() error tests in the
3605 					 *  unspecified address case, since we won't be able to retrieve a
3606 					 *  fixed address for allocation. See 8003930. */
3607 					if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED) ||
3608 					    (allocators_idx != MACH_VM_ALLOCATE && sizes_idx == ZERO_BYTES && flags_idx == FIXED)) {
3609 						continue;
3610 					}
3611 					run_suite(set_up_allocator_and_vm_variables, allocate_argument_error_tests, do_nothing,
3612 					    "%s argument error tests, %s%s address, "
3613 					    "%s size: 0x%jx (%ju)",
3614 					    allocators[allocators_idx].description, address_flags[flags_idx].description,
3615 					    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3616 					    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3617 					    (uintmax_t)vm_sizes[sizes_idx].size);
3618 					/* mach_vm_map() only protection and inheritance error
3619 					 *  tests. */
3620 					if (allocators_idx != MACH_VM_ALLOCATE) {
3621 						run_suite(set_up_allocator_and_vm_variables, mach_vm_map_protection_inheritance_error_test, do_nothing,
3622 						    "%s protection and inheritance "
3623 						    "error test, %s%s address, %s size: 0x%jx "
3624 						    "(%ju)",
3625 						    allocators[allocators_idx].description, address_flags[flags_idx].description,
3626 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3627 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3628 						    (uintmax_t)vm_sizes[sizes_idx].size);
3629 					}
3630 					/* mach_vm_map() cannot allocate 0 bytes, see 8003930. */
3631 					if (allocators_idx == MACH_VM_ALLOCATE || sizes_idx != ZERO_BYTES) {
3632 						run_suite(set_up_allocator_and_vm_variables_and_allocate, allocate_main_tests, deallocate,
3633 						    "%s main "
3634 						    "allocation tests, %s%s address, %s size: 0x%jx "
3635 						    "(%ju)",
3636 						    allocators[allocators_idx].description, address_flags[flags_idx].description,
3637 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3638 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3639 						    (uintmax_t)vm_sizes[sizes_idx].size);
3640 					}
3641 				}
3642 			}
3643 			run_suite(set_up_allocator_and_vm_size, allocate_address_error_tests, do_nothing,
3644 			    "%s address "
3645 			    "error allocation tests, %s size: 0x%jx (%ju)",
3646 			    allocators[allocators_idx].description, vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3647 			    (uintmax_t)vm_sizes[sizes_idx].size);
3648 		}
3649 		run_suite(set_up_allocator, allocate_fixed_size_tests, do_nothing, "%s fixed size allocation tests",
3650 		    allocators[allocators_idx].description);
3651 		run_suite(set_up_allocator, allocate_invalid_large_size_test, do_nothing, "%s invalid large size allocation test",
3652 		    allocators[allocators_idx].description);
3653 	}
3654 	/* mach_vm_map() only large mask overflow tests. */
3655 	for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3656 		run_suite(set_up_vm_size, mach_vm_map_large_mask_overflow_error_test, do_nothing,
3657 		    "mach_vm_map() large mask overflow "
3658 		    "error test, size: 0x%jx (%ju)",
3659 		    (uintmax_t)vm_sizes[sizes_idx].size, (uintmax_t)vm_sizes[sizes_idx].size);
3660 	}
3661 }
3662 
3663 void
run_deallocate_test_suites()3664 run_deallocate_test_suites()
3665 {
3666 	UnitTests access_deallocated_memory_tests = {
3667 		{"Read start of deallocated range", test_access_deallocated_range_start, SIGSEGV},
3668 		{"Read middle of deallocated range", test_access_deallocated_range_middle, SIGSEGV},
3669 		{"Read end of deallocated range", test_access_deallocated_range_end, SIGSEGV},
3670 	};
3671 	UnitTests deallocate_reallocate_tests = {
3672 		{"Deallocate twice", test_deallocate_twice},
3673 		{"Write pattern, deallocate, reallocate (deallocated "
3674 		 "memory is inaccessible), and verify memory is "
3675 		 "zero-filled",
3676 		 test_write_pattern_deallocate_reallocate_zero_filled, 0},
3677 	};
3678 	UnitTests deallocate_null_map_test = {
3679 		{"Deallocate in NULL VM map", test_deallocate_in_null_map, 0},
3680 	};
3681 	UnitTests deallocate_edge_case_tests = {
3682 		{"Deallocate zero size ranges", test_deallocate_zero_size_ranges, 0},
3683 		{"Deallocate wrapped around memory ranges", test_deallocate_wrapped_around_ranges, 0},
3684 	};
3685 	UnitTests deallocate_suicide_test = {
3686 		{"Deallocate whole address space", test_deallocate_suicide, -1},
3687 	};
3688 
3689 	/* All allocations done with mach_vm_allocate(). */
3690 	set_allocator(wrapper_mach_vm_allocate);
3691 
3692 	/* Run the test suites with various VM sizes, and unspecified or
3693 	 *  fixed (page-aligned or page-unaligned), addresses. */
3694 	for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3695 		for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3696 			for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3697 				/* An allocated address will be page-aligned. */
3698 				if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3699 					continue;
3700 				}
3701 				/* Accessing deallocated memory should cause a segmentation
3702 				 *  fault. */
3703 				/* Nothing gets deallocated if size is zero. */
3704 				if (sizes_idx != ZERO_BYTES) {
3705 					run_suite(set_up_vm_variables_and_allocate, access_deallocated_memory_tests, do_nothing,
3706 					    "Deallocated memory access tests, "
3707 					    "%s%s address, %s size: 0x%jx (%ju)",
3708 					    address_flags[flags_idx].description,
3709 					    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3710 					    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3711 					    (uintmax_t)vm_sizes[sizes_idx].size);
3712 				}
3713 				/* Deallocating zero size range should pass */
3714 				if (vm_sizes[sizes_idx].size == 0) {
3715 					deallocate_reallocate_tests[0].expected_signal = 0;
3716 				} else {
3717 					deallocate_reallocate_tests[0].expected_signal = _expected_vm_exc_guard_signal;
3718 				}
3719 				run_suite(set_up_vm_variables_and_allocate, deallocate_reallocate_tests, do_nothing,
3720 				    "Deallocation and reallocation tests, %s%s "
3721 				    "address, %s size: 0x%jx (%ju)",
3722 				    address_flags[flags_idx].description,
3723 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3724 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3725 				    (uintmax_t)vm_sizes[sizes_idx].size);
3726 				run_suite(set_up_vm_variables, deallocate_null_map_test, do_nothing,
3727 				    "mach_vm_deallocate() null map test, "
3728 				    "%s%s address, %s size: 0x%jx (%ju)",
3729 				    address_flags[flags_idx].description,
3730 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3731 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3732 				    (uintmax_t)vm_sizes[sizes_idx].size);
3733 			}
3734 		}
3735 	}
3736 	run_suite(do_nothing, deallocate_edge_case_tests, do_nothing, "Edge case deallocation tests");
3737 	run_suite(do_nothing, deallocate_suicide_test, do_nothing, "Whole address space deallocation test");
3738 }
3739 
3740 void
run_read_test_suites()3741 run_read_test_suites()
3742 {
3743 	UnitTests read_main_tests = {
3744 		{"Read address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size, 0},
3745 		{"Read address has the correct boundary offset", test_read_address_offset, 0},
3746 		{"Reallocate read pages", test_reallocate_pages, 0},
3747 		{"Read and verify zero-filled memory", test_zero_filled, 0},
3748 	};
3749 	UnitTests read_pattern_tests = {
3750 		{"Read address-filled pattern", test_read_address_filled, 0},
3751 		{"Read checkerboard pattern", test_read_checkerboard, 0},
3752 		{"Read reverse checkerboard pattern", test_read_reverse_checkerboard, 0},
3753 	};
3754 	UnitTests read_null_map_test = {
3755 		{"Read from NULL VM map", test_read_null_map, 0},
3756 	};
3757 	UnitTests read_edge_case_tests = {
3758 		{"Read zero size", test_read_zero_size, 0},
3759 		{"Read invalid large size", test_read_invalid_large_size, 0},
3760 		{"Read wrapped around memory ranges", test_read_wrapped_around_ranges, 0},
3761 	};
3762 	UnitTests read_inaccessible_tests = {
3763 		{"Read partially deallocated memory", test_read_partially_deallocated_range, 0},
3764 		{"Read partially read-protected memory", test_read_partially_unreadable_range, 0},
3765 	};
3766 
3767 	/* All allocations done with mach_vm_allocate(). */
3768 	set_allocator(wrapper_mach_vm_allocate);
3769 
3770 	/* Run the test suites with various VM sizes, and unspecified or
3771 	 *  fixed (page-aligned or page-unaligned) addresses. */
3772 	for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3773 		for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3774 			for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3775 				/* An allocated address will be page-aligned. */
3776 				if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3777 					continue;
3778 				}
3779 				run_suite(set_up_vm_variables_allocate_read_deallocate, read_main_tests, deallocate,
3780 				    "mach_vm_read() "
3781 				    "main tests, %s%s address, %s size: 0x%jx (%ju)",
3782 				    address_flags[flags_idx].description,
3783 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3784 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3785 				    (uintmax_t)vm_sizes[sizes_idx].size);
3786 				run_suite(set_up_vm_variables_and_allocate_extra_page, read_pattern_tests, deallocate,
3787 				    "mach_vm_read() pattern tests, %s%s address, %s "
3788 				    "size: 0x%jx (%ju)",
3789 				    address_flags[flags_idx].description,
3790 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3791 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3792 				    (uintmax_t)vm_sizes[sizes_idx].size);
3793 				run_suite(set_up_vm_variables_and_allocate_extra_page, read_null_map_test, deallocate_extra_page,
3794 				    "mach_vm_read() null map test, "
3795 				    "%s%s address, %s size: 0x%jx (%ju)",
3796 				    address_flags[flags_idx].description,
3797 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3798 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3799 				    (uintmax_t)vm_sizes[sizes_idx].size);
3800 				/* A zero size range is always accessible. */
3801 				if (sizes_idx != ZERO_BYTES) {
3802 					run_suite(set_up_vm_variables_and_allocate_extra_page, read_inaccessible_tests, deallocate_extra_page,
3803 					    "mach_vm_read() inaccessibility tests, %s%s "
3804 					    "address, %s size: 0x%jx (%ju)",
3805 					    address_flags[flags_idx].description,
3806 					    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3807 					    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3808 					    (uintmax_t)vm_sizes[sizes_idx].size);
3809 				}
3810 			}
3811 		}
3812 	}
3813 	run_suite(do_nothing, read_edge_case_tests, do_nothing, "mach_vm_read() fixed size tests");
3814 }
3815 
3816 void
run_write_test_suites()3817 run_write_test_suites()
3818 {
3819 	UnitTests write_main_tests = {
3820 		{"Write and verify zero-filled memory", test_zero_filled_write, 0},
3821 	};
3822 	UnitTests write_pattern_tests = {
3823 		{"Write address-filled pattern", test_address_filled_write, 0},
3824 		{"Write checkerboard pattern", test_checkerboard_write, 0},
3825 		{"Write reverse checkerboard pattern", test_reverse_checkerboard_write, 0},
3826 	};
3827 	UnitTests write_edge_case_tests = {
3828 		{"Write into NULL VM map", test_write_null_map, 0}, {"Write zero size", test_write_zero_size, 0},
3829 	};
3830 	UnitTests write_inaccessible_tests = {
3831 		{"Write partially deallocated buffer", test_write_partially_deallocated_buffer, 0},
3832 		{"Write partially read-protected buffer", test_write_partially_unreadable_buffer, 0},
3833 		{"Write on partially deallocated range", test_write_on_partially_deallocated_range, 0},
3834 		{"Write on partially write-protected range", test_write_on_partially_unwritable_range, 0},
3835 	};
3836 
3837 	/* All allocations done with mach_vm_allocate(). */
3838 	set_allocator(wrapper_mach_vm_allocate);
3839 
3840 	/* Run the test suites with various destination sizes and
3841 	 *  unspecified or fixed (page-aligned or page-unaligned)
3842 	 *  addresses, and various buffer sizes and boundary offsets. */
3843 	for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3844 		for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3845 			for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3846 				for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) {
3847 					for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) {
3848 						/* An allocated address will be page-aligned. */
3849 						if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) {
3850 							continue;
3851 						}
3852 						run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_edge_case_tests,
3853 						    deallocate_vm_and_buffer,
3854 						    "mach_vm_write() edge case tests, %s%s address, %s "
3855 						    "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3856 						    "buffer boundary offset: %d",
3857 						    address_flags[flags_idx].description,
3858 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3859 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3860 						    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3861 						    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3862 						    buffer_offsets[offsets_idx].offset);
3863 						/* A zero size buffer is always accessible. */
3864 						if (buffer_sizes_idx != ZERO_BYTES) {
3865 							run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_inaccessible_tests,
3866 							    deallocate_vm_and_buffer,
3867 							    "mach_vm_write() inaccessibility tests, "
3868 							    "%s%s address, %s size: 0x%jx (%ju), buffer "
3869 							    "%s size: 0x%jx (%ju), buffer boundary "
3870 							    "offset: %d",
3871 							    address_flags[flags_idx].description,
3872 							    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3873 							    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3874 							    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3875 							    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3876 							    buffer_offsets[offsets_idx].offset);
3877 						}
3878 						/* The buffer cannot be larger than the destination. */
3879 						if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) {
3880 							continue;
3881 						}
3882 						run_suite(set_up_vm_and_buffer_variables_allocate_write, write_main_tests, deallocate_vm_and_buffer,
3883 						    "mach_vm_write() main tests, %s%s address, %s "
3884 						    "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3885 						    "buffer boundary offset: %d",
3886 						    address_flags[flags_idx].description,
3887 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3888 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3889 						    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3890 						    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3891 						    buffer_offsets[offsets_idx].offset);
3892 						run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_pattern_tests,
3893 						    deallocate_vm_and_buffer,
3894 						    "mach_vm_write() pattern tests, %s%s address, %s "
3895 						    "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3896 						    "buffer boundary offset: %d",
3897 						    address_flags[flags_idx].description,
3898 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3899 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3900 						    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3901 						    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3902 						    buffer_offsets[offsets_idx].offset);
3903 					}
3904 				}
3905 			}
3906 		}
3907 	}
3908 }
3909 
3910 void
run_protect_test_suites()3911 run_protect_test_suites()
3912 {
3913 	UnitTests readprotection_main_tests = {
3914 		{"Read-protect, read-allow and verify zero-filled memory", test_zero_filled_readprotect, 0},
3915 		{"Verify that region is read-protected iff size is "
3916 		 "nonzero",
3917 		 test_verify_readprotection, 0},
3918 	};
3919 	UnitTests access_readprotected_memory_tests = {
3920 		{"Read start of read-protected range", test_access_readprotected_range_start, SIGBUS},
3921 		{"Read middle of read-protected range", test_access_readprotected_range_middle, SIGBUS},
3922 		{"Read end of read-protected range", test_access_readprotected_range_end, SIGBUS},
3923 	};
3924 	UnitTests writeprotection_main_tests = {
3925 		{"Write-protect and verify zero-filled memory", test_zero_filled_extended, 0},
3926 		{"Verify that region is write-protected iff size is nonzero",
3927 		 test_verify_writeprotection, 0},
3928 	};
3929 	UnitTests write_writeprotected_memory_tests = {
3930 		{"Write at start of write-protected range", test_write_writeprotected_range_start, SIGBUS},
3931 		{"Write in middle of write-protected range", test_write_writeprotected_range_middle, SIGBUS},
3932 		{"Write at end of write-protected range", test_write_writeprotected_range_end, SIGBUS},
3933 	};
3934 	UnitTests protect_edge_case_tests = {
3935 		{"Read-protect zero size ranges", test_readprotect_zero_size, 0},
3936 		{"Write-protect zero size ranges", test_writeprotect_zero_size, 0},
3937 		{"Read-protect wrapped around memory ranges", test_readprotect_wrapped_around_ranges, 0},
3938 		{"Write-protect wrapped around memory ranges", test_writeprotect_wrapped_around_ranges, 0},
3939 	};
3940 
3941 	/* All allocations done with mach_vm_allocate(). */
3942 	set_allocator(wrapper_mach_vm_allocate);
3943 
3944 	/* Run the test suites with various VM sizes, and unspecified or
3945 	 *  fixed (page-aligned or page-unaligned), addresses. */
3946 	for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3947 		for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3948 			for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3949 				/* An allocated address will be page-aligned. */
3950 				if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3951 					continue;
3952 				}
3953 				run_suite(set_up_vm_variables_allocate_readprotect, readprotection_main_tests, deallocate_extra_page,
3954 				    "Main read-protection tests, %s%s address, %s "
3955 				    "size: 0x%jx (%ju)",
3956 				    address_flags[flags_idx].description,
3957 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3958 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3959 				    (uintmax_t)vm_sizes[sizes_idx].size);
3960 				run_suite(set_up_vm_variables_allocate_writeprotect, writeprotection_main_tests, deallocate_extra_page,
3961 				    "Main write-protection tests, %s%s address, %s "
3962 				    "size: 0x%jx (%ju)",
3963 				    address_flags[flags_idx].description,
3964 				    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3965 				    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3966 				    (uintmax_t)vm_sizes[sizes_idx].size);
3967 				/* Nothing gets protected if size is zero. */
3968 				if (sizes_idx != ZERO_BYTES) {
3969 					/* Accessing read-protected memory should cause a bus
3970 					 *  error. */
3971 					run_suite(set_up_vm_variables_allocate_readprotect, access_readprotected_memory_tests, deallocate_extra_page,
3972 					    "Read-protected memory access tests, %s%s "
3973 					    "address, %s size: 0x%jx (%ju)",
3974 					    address_flags[flags_idx].description,
3975 					    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3976 					    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3977 					    (uintmax_t)vm_sizes[sizes_idx].size);
3978 					/* Writing on write-protected memory should cause a bus
3979 					 *  error. */
3980 					run_suite(set_up_vm_variables_allocate_writeprotect, write_writeprotected_memory_tests, deallocate_extra_page,
3981 					    "Write-protected memory writing tests, %s%s "
3982 					    "address, %s size: 0x%jx (%ju)",
3983 					    address_flags[flags_idx].description,
3984 					    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3985 					    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3986 					    (uintmax_t)vm_sizes[sizes_idx].size);
3987 				}
3988 			}
3989 		}
3990 	}
3991 	run_suite(do_nothing, protect_edge_case_tests, do_nothing, "Edge case protection tests");
3992 }
3993 
3994 void
run_copy_test_suites()3995 run_copy_test_suites()
3996 {
3997 	/* Copy tests */
3998 	UnitTests copy_main_tests = {
3999 		{"Copy and verify zero-filled memory", test_zero_filled_copy_dest, 0},
4000 	};
4001 	UnitTests copy_pattern_tests = {
4002 		{"Copy address-filled pattern", test_copy_address_filled, 0},
4003 		{"Copy checkerboard pattern", test_copy_checkerboard, 0},
4004 		{"Copy reverse checkerboard pattern", test_copy_reverse_checkerboard, 0},
4005 	};
4006 	UnitTests copy_edge_case_tests = {
4007 		{"Copy with NULL VM map", test_copy_null_map, 0},
4008 		{"Copy zero size", test_copy_zero_size, 0},
4009 		{"Copy invalid large size", test_copy_invalid_large_size, 0},
4010 		{"Read wrapped around memory ranges", test_copy_wrapped_around_ranges, 0},
4011 	};
4012 	UnitTests copy_inaccessible_tests = {
4013 		{"Copy source partially deallocated region", test_copy_source_partially_deallocated_region, 0},
4014 		/* XXX */
4015 		{"Copy destination partially deallocated region", test_copy_dest_partially_deallocated_region, 0},
4016 		{"Copy source partially read-protected region", test_copy_source_partially_unreadable_region, 0},
4017 		/* XXX */
4018 		{"Copy destination partially write-protected region", test_copy_dest_partially_unwriteable_region, 0},
4019 		{"Copy source on partially deallocated range", test_copy_source_on_partially_deallocated_range, 0},
4020 		{"Copy destination on partially deallocated range", test_copy_dest_on_partially_deallocated_range, 0},
4021 		{"Copy source on partially read-protected range", test_copy_source_on_partially_unreadable_range, 0},
4022 		{"Copy destination on partially write-protected range", test_copy_dest_on_partially_unwritable_range, 0},
4023 	};
4024 
4025 	UnitTests copy_shared_mode_tests = {
4026 		{"Copy using freshly allocated source", test_vmcopy_fresh_source, 0},
4027 		{"Copy using shared source", test_vmcopy_shared_source, 0},
4028 		{"Copy using a \'copied from\' source", test_vmcopy_copied_from_source, 0},
4029 		{"Copy using a \'copied to\' source", test_vmcopy_copied_to_source, 0},
4030 		{"Copy using a true shared source", test_vmcopy_trueshared_source, 0},
4031 		{"Copy using a private aliased source", test_vmcopy_private_aliased_source, 0},
4032 	};
4033 
4034 	/* All allocations done with mach_vm_allocate(). */
4035 	set_allocator(wrapper_mach_vm_allocate);
4036 
4037 	/* All the tests are done with page size regions. */
4038 	set_vm_size(vm_page_size);
4039 
4040 	/* Run the test suites with various shared modes for source */
4041 	for (vmcopy_action_idx = 0; vmcopy_action_idx < numofvmcopyactions; vmcopy_action_idx++) {
4042 		run_suite(set_up_copy_shared_mode_variables, copy_shared_mode_tests, do_nothing, "Copy shared mode tests, %s",
4043 		    vmcopy_actions[vmcopy_action_idx].description);
4044 	}
4045 
4046 	for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
4047 		for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
4048 			for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
4049 				for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) {
4050 					for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) {
4051 						/* An allocated address will be page-aligned. */
4052 						if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) {
4053 							continue;
4054 						}
4055 						run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_edge_case_tests,
4056 						    deallocate_vm_and_buffer,
4057 						    "mach_vm_copy() edge case tests, %s%s address, %s "
4058 						    "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
4059 						    "buffer boundary offset: %d",
4060 						    address_flags[flags_idx].description,
4061 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4062 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4063 						    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4064 						    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4065 						    buffer_offsets[offsets_idx].offset);
4066 						/* The buffer cannot be larger than the destination. */
4067 						if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) {
4068 							continue;
4069 						}
4070 
4071 						/* A zero size buffer is always accessible. */
4072 						if (buffer_sizes_idx != ZERO_BYTES) {
4073 							run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_inaccessible_tests,
4074 							    deallocate_vm_and_buffer,
4075 							    "mach_vm_copy() inaccessibility tests, "
4076 							    "%s%s address, %s size: 0x%jx (%ju), buffer "
4077 							    "%s size: 0x%jx (%ju), buffer boundary "
4078 							    "offset: %d",
4079 							    address_flags[flags_idx].description,
4080 							    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4081 							    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4082 							    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4083 							    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4084 							    buffer_offsets[offsets_idx].offset);
4085 						}
4086 						run_suite(set_up_source_and_dest_variables_allocate_copy, copy_main_tests, deallocate_vm_and_buffer,
4087 						    "mach_vm_copy() main tests, %s%s address, %s "
4088 						    "size: 0x%jx (%ju), destination %s size: 0x%jx (%ju), "
4089 						    "destination boundary offset: %d",
4090 						    address_flags[flags_idx].description,
4091 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4092 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4093 						    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4094 						    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4095 						    buffer_offsets[offsets_idx].offset);
4096 						run_suite(set_up_source_and_dest_variables_allocate_copy, copy_pattern_tests, deallocate_vm_and_buffer,
4097 						    "mach_vm_copy() pattern tests, %s%s address, %s "
4098 						    "size: 0x%jx (%ju) destination %s size: 0x%jx (%ju), "
4099 						    "destination boundary offset: %d",
4100 						    address_flags[flags_idx].description,
4101 						    (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4102 						    vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4103 						    (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4104 						    (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4105 						    buffer_offsets[offsets_idx].offset);
4106 					}
4107 				}
4108 			}
4109 		}
4110 	}
4111 }
4112 
4113 static int
set_disable_vm_sanitize_telemetry_via_sysctl(uint32_t val)4114 set_disable_vm_sanitize_telemetry_via_sysctl(uint32_t val)
4115 {
4116 	int ret = sysctlbyname("debug.disable_vm_sanitize_telemetry", NULL, NULL, &val, sizeof(uint32_t));
4117 	if (ret != 0) {
4118 		T_LOG("telemetry sysctl failed with errno %d.", errno);
4119 	}
4120 	return ret;
4121 }
4122 
4123 static int
disable_vm_sanitize_telemetry(void)4124 disable_vm_sanitize_telemetry(void)
4125 {
4126 	return set_disable_vm_sanitize_telemetry_via_sysctl(1);
4127 }
4128 
4129 static int
reenable_vm_sanitize_telemetry(void)4130 reenable_vm_sanitize_telemetry(void)
4131 {
4132 	return set_disable_vm_sanitize_telemetry_via_sysctl(0);
4133 }
4134 
4135 void
perform_test_with_options(test_option_t options)4136 perform_test_with_options(test_option_t options)
4137 {
4138 	disable_vm_sanitize_telemetry();
4139 
4140 	process_options(options);
4141 
4142 	if (flag_run_allocate_test) {
4143 		run_allocate_test_suites();
4144 	}
4145 
4146 	if (flag_run_deallocate_test) {
4147 		run_deallocate_test_suites();
4148 	}
4149 
4150 	if (flag_run_read_test) {
4151 		run_read_test_suites();
4152 	}
4153 
4154 	if (flag_run_write_test) {
4155 		run_write_test_suites();
4156 	}
4157 
4158 	if (flag_run_protect_test) {
4159 		run_protect_test_suites();
4160 	}
4161 
4162 	if (flag_run_copy_test) {
4163 		run_copy_test_suites();
4164 	}
4165 
4166 	log_aggregated_results();
4167 	reenable_vm_sanitize_telemetry();
4168 }
4169 
4170 T_DECL(vm_test_allocate, "Allocate VM unit test")
4171 {
4172 	test_options.to_flags = VM_TEST_ALLOCATE;
4173 	test_options.to_vmsize = 0;
4174 	test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4175 
4176 	perform_test_with_options(test_options);
4177 }
4178 
4179 T_DECL(vm_test_deallocate, "Deallocate VM unit test",
4180     T_META_ENABLED(!TARGET_OS_BRIDGE),  /* disabled on bridgeOS due to failures, rdar://137493917 */
4181     T_META_IGNORECRASHES(".*vm_allocation.*"))
4182 {
4183 	test_options.to_flags = VM_TEST_DEALLOCATE;
4184 	test_options.to_vmsize = 0;
4185 	test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4186 
4187 	perform_test_with_options(test_options);
4188 }
4189 
4190 T_DECL(vm_test_read, "Read VM unit test")
4191 {
4192 	test_options.to_flags = VM_TEST_READ;
4193 	test_options.to_vmsize = 0;
4194 	test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4195 
4196 	perform_test_with_options(test_options);
4197 }
4198 
4199 T_DECL(vm_test_write, "Write VM unit test")
4200 {
4201 	test_options.to_flags = VM_TEST_WRITE;
4202 	test_options.to_vmsize = 0;
4203 	test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4204 
4205 	perform_test_with_options(test_options);
4206 }
4207 
4208 T_DECL(vm_test_protect, "Protect VM unit test",
4209     T_META_IGNORECRASHES(".*vm_allocation.*"))
4210 {
4211 	test_options.to_flags = VM_TEST_PROTECT;
4212 	test_options.to_vmsize = 0;
4213 	test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4214 
4215 	perform_test_with_options(test_options);
4216 }
4217 
4218 T_DECL(vm_test_copy, "Copy VM unit test")
4219 {
4220 	test_options.to_flags = VM_TEST_COPY;
4221 	test_options.to_vmsize = 0;
4222 	test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4223 
4224 	perform_test_with_options(test_options);
4225 }
4226