xref: /xnu-12377.41.6/tests/vm/configurator/vm_configurator_tests.h (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * vm_configurator_tests.h
31  *
32  * Virtual memory configurations and a test wrapper
33  * available for use by tests that use vm_configurator.
34  */
35 
36 #ifndef VM_CONFIGURATOR_TESTS_H
37 #define VM_CONFIGURATOR_TESTS_H
38 
39 #include "vm_configurator.h"
40 
41 /*
42  * Tests
43  *
44  * To add a new configuration for all VM API to be tested with:
45  * 1. Add a function definition `configure_<testname>`
46  *    that returns a vm_config_t representing the VM state
47  *    and address range to be tested.
48  * 2. Add a field named `<testname>` to struct vm_tests_t.
49  * 3. Add a call to `RUN_TEST(<testname>)` in run_vm_tests() below.
50  *
51  * To help debug failing tests:
52  * - Run a test executable with environment variable VERBOSE=1
53  *   to print the checker and VM state frequently.
54  * - Run a test executable with only a single VM configuration
55  *   by naming that configuration on the command line.
56  * Example of verbosely running only one read fault test:
57  *   env VERBOSE=1 /path/to/configurator_fault -n fault_read permanent_before_allocation
58  */
59 
60 typedef vm_config_t *(*configure_fn_t)(void);
61 
62 typedef test_result_t (*test_fn_t)(
63 	checker_list_t *checker_list,
64 	mach_vm_address_t start,
65 	mach_vm_size_t size);
66 
67 /* single entry */
68 
69 static inline vm_config_t *
configure_single_entry_1(void)70 configure_single_entry_1(void)
71 {
72 	/* one entry, tested address range is the entire entry */
73 	vm_entry_template_t templates[] = {
74 		vm_entry_template(),
75 		END_ENTRIES
76 	};
77 	return make_vm_config("single entry > entire entry", templates);
78 }
79 
80 static inline vm_config_t *
configure_single_entry_2(void)81 configure_single_entry_2(void)
82 {
83 	/* one entry, tested address range includes only the first part of it */
84 	vm_entry_template_t templates[] = {
85 		vm_entry_template(),
86 		END_ENTRIES
87 	};
88 	return make_vm_config("single entry > first pages", templates,
89 	           0, -DEFAULT_PARTIAL_ENTRY_SIZE);
90 }
91 
92 static inline vm_config_t *
configure_single_entry_3(void)93 configure_single_entry_3(void)
94 {
95 	/* one entry, tested address range includes only the last part of it */
96 	vm_entry_template_t templates[] = {
97 		vm_entry_template(),
98 		END_ENTRIES
99 	};
100 	return make_vm_config("single entry > last pages", templates,
101 	           DEFAULT_PARTIAL_ENTRY_SIZE, 0);
102 }
103 
104 static inline vm_config_t *
configure_single_entry_4(void)105 configure_single_entry_4(void)
106 {
107 	/* one entry, tested address range includes only the middle part of it */
108 	vm_entry_template_t templates[] = {
109 		vm_entry_template(),
110 		END_ENTRIES
111 	};
112 	return make_vm_config("single entry > middle pages", templates,
113 	           DEFAULT_PARTIAL_ENTRY_SIZE / 2, -(DEFAULT_PARTIAL_ENTRY_SIZE / 2));
114 }
115 
116 /* multiple entries */
117 
118 static inline vm_config_t *
configure_multiple_entries_1(void)119 configure_multiple_entries_1(void)
120 {
121 	/* two entries, tested address range includes both */
122 	vm_entry_template_t templates[] = {
123 		vm_entry_template(),
124 		vm_entry_template(),
125 		END_ENTRIES
126 	};
127 	return make_vm_config("multiple entries > two entries", templates);
128 }
129 
130 static inline vm_config_t *
configure_multiple_entries_2(void)131 configure_multiple_entries_2(void)
132 {
133 	/* three entries, tested address range includes all of them */
134 	vm_entry_template_t templates[] = {
135 		vm_entry_template(),
136 		vm_entry_template(),
137 		vm_entry_template(),
138 		END_ENTRIES
139 	};
140 	return make_vm_config("multiple entries > three entries", templates);
141 }
142 
143 static inline vm_config_t *
configure_multiple_entries_3(void)144 configure_multiple_entries_3(void)
145 {
146 	/* many entries, tested address range includes all of them */
147 	vm_entry_template_t templates[] = {
148 		vm_entry_template(), vm_entry_template(), vm_entry_template(), vm_entry_template(),
149 		vm_entry_template(), vm_entry_template(), vm_entry_template(), vm_entry_template(),
150 		vm_entry_template(), vm_entry_template(), vm_entry_template(), vm_entry_template(),
151 		vm_entry_template(), vm_entry_template(), vm_entry_template(), vm_entry_template(),
152 		vm_entry_template(), vm_entry_template(), vm_entry_template(), vm_entry_template(),
153 		vm_entry_template(), vm_entry_template(), vm_entry_template(), vm_entry_template(),
154 		vm_entry_template(), vm_entry_template(), vm_entry_template(), vm_entry_template(),
155 		vm_entry_template(), vm_entry_template(), vm_entry_template(), vm_entry_template(),
156 		vm_entry_template(), vm_entry_template(), vm_entry_template(), vm_entry_template(),
157 		vm_entry_template(), vm_entry_template(), vm_entry_template(), vm_entry_template(),
158 		vm_entry_template(), vm_entry_template(), vm_entry_template(), vm_entry_template(),
159 		vm_entry_template(), vm_entry_template(), vm_entry_template(), vm_entry_template(),
160 		END_ENTRIES
161 	};
162 	return make_vm_config("multiple entries > many entries", templates);
163 }
164 
165 static inline vm_config_t *
configure_multiple_entries_4(void)166 configure_multiple_entries_4(void)
167 {
168 	/* three entries, tested address range excludes the end of the last one */
169 	vm_entry_template_t templates[] = {
170 		vm_entry_template(),
171 		vm_entry_template(),
172 		vm_entry_template(),
173 		END_ENTRIES
174 	};
175 	return make_vm_config("multiple entries > three entries, except the last pages", templates,
176 	           0, -DEFAULT_PARTIAL_ENTRY_SIZE);
177 }
178 
179 static inline vm_config_t *
configure_multiple_entries_5(void)180 configure_multiple_entries_5(void)
181 {
182 	/* three entries, tested address range excludes the start of the first one */
183 	vm_entry_template_t templates[] = {
184 		vm_entry_template(),
185 		vm_entry_template(),
186 		vm_entry_template(),
187 		END_ENTRIES
188 	};
189 	return make_vm_config("multiple entries > three entries, except the first pages", templates,
190 	           DEFAULT_PARTIAL_ENTRY_SIZE, 0);
191 }
192 
193 static inline vm_config_t *
configure_multiple_entries_6(void)194 configure_multiple_entries_6(void)
195 {
196 	/*
197 	 * three entries, tested address range excludes both
198 	 * the start of the first one and the end of the last one
199 	 */
200 	vm_entry_template_t templates[] = {
201 		vm_entry_template(),
202 		vm_entry_template(),
203 		vm_entry_template(),
204 		END_ENTRIES
205 	};
206 	assert(DEFAULT_PARTIAL_ENTRY_SIZE / 2 > 0);
207 	return make_vm_config("multiple entries > three entries, except the first and last pages", templates,
208 	           DEFAULT_PARTIAL_ENTRY_SIZE / 2, -(DEFAULT_PARTIAL_ENTRY_SIZE / 2));
209 }
210 
211 /* some holes but not entirely holes */
212 
213 static inline vm_config_t *
configure_some_holes_1(void)214 configure_some_holes_1(void)
215 {
216 	/* test address range begins in a hole and ends in an allocation */
217 	vm_entry_template_t templates[] = {
218 		hole_template,
219 		vm_entry_template(),
220 		END_ENTRIES
221 	};
222 	return make_vm_config("some holes > hole then one entry", templates);
223 }
224 
225 static inline vm_config_t *
configure_some_holes_2(void)226 configure_some_holes_2(void)
227 {
228 	/* test address range begins in a hole and ends in three allocation */
229 	vm_entry_template_t templates[] = {
230 		hole_template,
231 		vm_entry_template(),
232 		vm_entry_template(),
233 		vm_entry_template(),
234 		END_ENTRIES
235 	};
236 	return make_vm_config("some holes > hole then multiple entries", templates);
237 }
238 
239 static inline vm_config_t *
configure_some_holes_3(void)240 configure_some_holes_3(void)
241 {
242 	/* test address range begins in a hole and ends in the middle of an allocation */
243 	vm_entry_template_t templates[] = {
244 		hole_template,
245 		vm_entry_template(),
246 		END_ENTRIES
247 	};
248 	return make_vm_config("some holes > hole then partial entry", templates,
249 	           0, -DEFAULT_PARTIAL_ENTRY_SIZE);
250 }
251 
252 static inline vm_config_t *
configure_some_holes_4(void)253 configure_some_holes_4(void)
254 {
255 	/*
256 	 * test address range begins in a hole, covers two allocations,
257 	 * and ends in the middle of a third allocation
258 	 */
259 	vm_entry_template_t templates[] = {
260 		hole_template,
261 		vm_entry_template(),
262 		vm_entry_template(),
263 		vm_entry_template(),
264 		END_ENTRIES
265 	};
266 	return make_vm_config("some holes > hole then multiple entries then partial entry", templates,
267 	           0, -DEFAULT_PARTIAL_ENTRY_SIZE);
268 }
269 
270 static inline vm_config_t *
configure_some_holes_5(void)271 configure_some_holes_5(void)
272 {
273 	/* test address range begins at an allocation and ends in a hole */
274 	vm_entry_template_t templates[] = {
275 		vm_entry_template(),
276 		hole_template,
277 		END_ENTRIES
278 	};
279 	return make_vm_config("some holes > one entry then hole", templates);
280 }
281 
282 static inline vm_config_t *
configure_some_holes_6(void)283 configure_some_holes_6(void)
284 {
285 	/*
286 	 * test address range begins at an allocation, covers two more allocations,
287 	 * and ends in a hole
288 	 */
289 	vm_entry_template_t templates[] = {
290 		vm_entry_template(),
291 		vm_entry_template(),
292 		vm_entry_template(),
293 		hole_template,
294 		END_ENTRIES
295 	};
296 	return make_vm_config("some holes > multiple entries then hole", templates);
297 }
298 
299 static inline vm_config_t *
configure_some_holes_7(void)300 configure_some_holes_7(void)
301 {
302 	/* test address range begins in the middle of an allocation and ends in a hole */
303 	vm_entry_template_t templates[] = {
304 		vm_entry_template(),
305 		hole_template,
306 		END_ENTRIES
307 	};
308 	return make_vm_config("some holes > partial entry then hole", templates,
309 	           DEFAULT_PARTIAL_ENTRY_SIZE, 0);
310 }
311 
312 static inline vm_config_t *
configure_some_holes_8(void)313 configure_some_holes_8(void)
314 {
315 	/*
316 	 * test address range begins in the middle of an allocation, covers
317 	 * two more allocations, and ends in a hole
318 	 */
319 	vm_entry_template_t templates[] = {
320 		vm_entry_template(),
321 		vm_entry_template(),
322 		vm_entry_template(),
323 		hole_template,
324 		END_ENTRIES
325 	};
326 	return make_vm_config("some holes > partial entry then multiple entries then hole", templates,
327 	           DEFAULT_PARTIAL_ENTRY_SIZE, 0);
328 }
329 
330 static inline vm_config_t *
configure_some_holes_9(void)331 configure_some_holes_9(void)
332 {
333 	/* test address range is an allocation, then a hole, then an allocation */
334 	vm_entry_template_t templates[] = {
335 		vm_entry_template(),
336 		hole_template,
337 		vm_entry_template(),
338 		END_ENTRIES
339 	};
340 	return make_vm_config("some holes > hole in the middle", templates);
341 }
342 
343 static inline vm_config_t *
configure_some_holes_10(void)344 configure_some_holes_10(void)
345 {
346 	/* test address range is allocation-hole-allocation-hole-allocation */
347 	vm_entry_template_t templates[] = {
348 		vm_entry_template(),
349 		hole_template,
350 		vm_entry_template(),
351 		hole_template,
352 		vm_entry_template(),
353 		END_ENTRIES
354 	};
355 	return make_vm_config("some holes > two holes, three entries", templates);
356 }
357 
358 static inline vm_config_t *
configure_some_holes_11(void)359 configure_some_holes_11(void)
360 {
361 	/*
362 	 * test address range is
363 	 * two allocations-hole-two allocations-hole-two allocations
364 	 */
365 	vm_entry_template_t templates[] = {
366 		vm_entry_template(),
367 		vm_entry_template(),
368 		hole_template,
369 		vm_entry_template(),
370 		vm_entry_template(),
371 		hole_template,
372 		vm_entry_template(),
373 		vm_entry_template(),
374 		END_ENTRIES
375 	};
376 	return make_vm_config("some holes > two holes, six entries", templates);
377 }
378 
379 static inline vm_config_t *
configure_some_holes_12(void)380 configure_some_holes_12(void)
381 {
382 	/*
383 	 * test address range is
384 	 * three allocations-hole-three allocations-hole-three allocations
385 	 */
386 	vm_entry_template_t templates[] = {
387 		vm_entry_template(),
388 		vm_entry_template(),
389 		vm_entry_template(),
390 		hole_template,
391 		vm_entry_template(),
392 		vm_entry_template(),
393 		vm_entry_template(),
394 		hole_template,
395 		vm_entry_template(),
396 		vm_entry_template(),
397 		vm_entry_template(),
398 		END_ENTRIES
399 	};
400 	return make_vm_config("some holes > two holes, nine entries", templates);
401 }
402 
403 /* all holes */
404 
405 static inline vm_config_t *
configure_all_holes_1(void)406 configure_all_holes_1(void)
407 {
408 	/* test address range is unallocated, with allocations on both sides */
409 	vm_entry_template_t templates[] = {
410 		vm_entry_template(),
411 		hole_template,
412 		vm_entry_template(),
413 		END_ENTRIES
414 	};
415 	return make_vm_config("all holes > hole with entries on both sides", templates,
416 	           DEFAULT_ENTRY_SIZE, -DEFAULT_ENTRY_SIZE);
417 }
418 
419 static inline vm_config_t *
configure_all_holes_2(void)420 configure_all_holes_2(void)
421 {
422 	/*
423 	 * test address range is unallocated, with an allocation before
424 	 * and more unallocated space after
425 	 */
426 	vm_entry_template_t templates[] = {
427 		vm_entry_template(),
428 		hole_template,
429 		END_ENTRIES
430 	};
431 	return make_vm_config("all holes > hole with entry before and hole after", templates,
432 	           DEFAULT_ENTRY_SIZE, -DEFAULT_PARTIAL_ENTRY_SIZE);
433 }
434 
435 static inline vm_config_t *
configure_all_holes_3(void)436 configure_all_holes_3(void)
437 {
438 	/*
439 	 * test address range is unallocated, with more unallocated space before
440 	 * and an allocation after
441 	 */
442 	vm_entry_template_t templates[] = {
443 		hole_template,
444 		vm_entry_template(),
445 		END_ENTRIES
446 	};
447 	return make_vm_config("all holes > hole with hole before and entry after", templates,
448 	           DEFAULT_PARTIAL_ENTRY_SIZE, -DEFAULT_ENTRY_SIZE);
449 }
450 
451 static inline vm_config_t *
configure_all_holes_4(void)452 configure_all_holes_4(void)
453 {
454 	/* test address range is unallocated, with more unallocated space before and after */
455 	vm_entry_template_t templates[] = {
456 		hole_template,
457 		END_ENTRIES
458 	};
459 	return make_vm_config("all holes > hole with holes on both sides", templates,
460 	           DEFAULT_PARTIAL_ENTRY_SIZE / 2, -(DEFAULT_PARTIAL_ENTRY_SIZE / 2));
461 }
462 
463 /* residency and sharing */
464 
465 static inline vm_config_t *
configure_null_entry(void)466 configure_null_entry(void)
467 {
468 	vm_entry_template_t templates[] = {
469 		vm_entry_template(.share_mode = SM_EMPTY),
470 		END_ENTRIES
471 	};
472 	return make_vm_config("residency > null entry", templates);
473 }
474 
475 static inline vm_config_t *
configure_nonresident_entry(void)476 configure_nonresident_entry(void)
477 {
478 	vm_entry_template_t templates[] = {
479 		vm_entry_template(.share_mode = SM_PRIVATE),
480 		END_ENTRIES
481 	};
482 	return make_vm_config("residency > nonresident entry", templates);
483 }
484 
485 static inline vm_config_t *
configure_resident_entry(void)486 configure_resident_entry(void)
487 {
488 	vm_object_template_t object_templates[] = {
489 		vm_object_template(.fill_pattern = {Fill, 0}),
490 		END_OBJECTS
491 	};
492 	vm_entry_template_t templates[] = {
493 		vm_entry_template(.share_mode = SM_PRIVATE, .object = &object_templates[0]),
494 		END_ENTRIES
495 	};
496 	return make_vm_config("residency > resident entry", templates, object_templates);
497 }
498 
499 static inline vm_config_t *
configure_shared_entry(void)500 configure_shared_entry(void)
501 {
502 	/*
503 	 * Two entries sharing the same object.
504 	 * The address range covers only the left entry
505 	 */
506 	vm_object_template_t object_templates[] = {
507 		vm_object_template(),
508 		END_OBJECTS
509 	};
510 	vm_entry_template_t templates[] = {
511 		vm_entry_template(.share_mode = SM_SHARED, .object = &object_templates[0]),
512 		vm_entry_template(.share_mode = SM_SHARED, .object = &object_templates[0]),
513 		END_ENTRIES
514 	};
515 	return make_vm_config("sharing > simple shared entry", templates, object_templates,
516 	           0, -DEFAULT_ENTRY_SIZE);
517 }
518 
519 static inline vm_config_t *
configure_shared_entry_discontiguous(void)520 configure_shared_entry_discontiguous(void)
521 {
522 	/*
523 	 * Two entries sharing the same object,
524 	 * but not the same range inside that object.
525 	 * The address range covers only the left entry.
526 	 */
527 	vm_object_template_t object_templates[] = {
528 		vm_object_template(),
529 		END_OBJECTS
530 	};
531 	vm_entry_template_t templates[] = {
532 		vm_entry_template(.share_mode = SM_SHARED, .object = &object_templates[0],
533 	    .offset = 0),
534 		vm_entry_template(.share_mode = SM_SHARED, .object = &object_templates[0],
535 	    .offset = DEFAULT_ENTRY_SIZE),
536 		END_ENTRIES
537 	};
538 	return make_vm_config("sharing > discontiguous shared entry", templates, object_templates,
539 	           0, -DEFAULT_ENTRY_SIZE);
540 }
541 
542 static inline vm_config_t *
configure_shared_entry_partial(void)543 configure_shared_entry_partial(void)
544 {
545 	/*
546 	 * Two entries sharing the same object,
547 	 * but only partly overlap inside that object.
548 	 * The address range covers only the left entry.
549 	 */
550 	vm_object_template_t object_templates[] = {
551 		vm_object_template(),
552 		END_OBJECTS
553 	};
554 	vm_entry_template_t templates[] = {
555 		vm_entry_template(.share_mode = SM_SHARED, .object = &object_templates[0],
556 	    .offset = 0),
557 		vm_entry_template(.share_mode = SM_SHARED, .object = &object_templates[0],
558 	    .offset = DEFAULT_PARTIAL_ENTRY_SIZE),
559 		END_ENTRIES
560 	};
561 	return make_vm_config("sharing > partial shared entry", templates, object_templates,
562 	           0, -DEFAULT_ENTRY_SIZE);
563 }
564 
565 static inline vm_config_t *
configure_shared_entry_pairs(void)566 configure_shared_entry_pairs(void)
567 {
568 	/*
569 	 * Four entries. The first and last are shared. The middle two are
570 	 * also shared, independently.
571 	 * The address range covers all four entries.
572 	 */
573 	vm_object_template_t object_templates[] = {
574 		vm_object_template(.fill_pattern = {Fill, 0x1111111111111111}),
575 		vm_object_template(.fill_pattern = {Fill, 0x2222222222222222}),
576 		END_OBJECTS
577 	};
578 	vm_entry_template_t templates[] = {
579 		vm_entry_template(.share_mode = SM_SHARED, .object = &object_templates[0]),
580 		vm_entry_template(.share_mode = SM_SHARED, .object = &object_templates[1]),
581 		vm_entry_template(.share_mode = SM_SHARED, .object = &object_templates[1]),
582 		vm_entry_template(.share_mode = SM_SHARED, .object = &object_templates[0]),
583 		END_ENTRIES
584 	};
585 	return make_vm_config("sharing > two pairs of shared entries", templates, object_templates);
586 }
587 
588 static inline vm_config_t *
configure_shared_entry_x1000(void)589 configure_shared_entry_x1000(void)
590 {
591 	/*
592 	 * Many entries, all shared.
593 	 * The address range covers all entries.
594 	 */
595 	vm_object_template_t object_templates[] = {
596 		vm_object_template(.size = PAGE_SIZE),
597 		END_OBJECTS
598 	};
599 
600 	const unsigned count = 1000;  /* 1000 shared entries */
601 	vm_entry_template_t *templates = calloc(sizeof(templates[0]), count + 1);  /* ... plus 1 END_ENTRIES entry */
602 	for (unsigned i = 0; i < count; i++) {
603 		templates[i] = vm_entry_template(.share_mode = SM_SHARED, .object = &object_templates[0], .size = PAGE_SIZE);
604 	}
605 	templates[count] = END_ENTRIES;
606 	vm_config_t *result = make_vm_config("sharing > 1000 shared entries", templates, object_templates);
607 	free(templates);
608 	return result;
609 }
610 
611 static inline vm_config_t *
configure_cow_entry(void)612 configure_cow_entry(void)
613 {
614 	/*
615 	 * two entries that are COW copies of the same underlying object
616 	 * Operating range includes only the first entry.
617 	 */
618 	vm_object_template_t object_templates[] = {
619 		/* fixme must use a fill pattern to get a non-null object to copy */
620 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
621 		END_OBJECTS
622 	};
623 	vm_entry_template_t templates[] = {
624 		vm_entry_template(.share_mode = SM_COW, .object = &object_templates[0]),
625 		vm_entry_template(.share_mode = SM_COW, .object = &object_templates[0]),
626 		END_ENTRIES
627 	};
628 	return make_vm_config("cow > one COW entry", templates, object_templates,
629 	           0, -DEFAULT_ENTRY_SIZE);
630 }
631 
632 static inline vm_config_t *
configure_cow_unreferenced(void)633 configure_cow_unreferenced(void)
634 {
635 	/*
636 	 * one COW entry but the memory being copied has no other references
637 	 */
638 	vm_object_template_t object_templates[] = {
639 		/* fixme must use a fill pattern to get a non-null object to copy */
640 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
641 		END_OBJECTS
642 	};
643 	vm_entry_template_t templates[] = {
644 		vm_entry_template(.share_mode = SM_COW, .object = &object_templates[0]),
645 		END_ENTRIES
646 	};
647 	return make_vm_config("cow > COW with no other references", templates, object_templates);
648 }
649 
650 static inline vm_config_t *
configure_cow_nocow(void)651 configure_cow_nocow(void)
652 {
653 	/*
654 	 * one entry that is COW, then one ordinary entry.
655 	 * Additional out-of-range entry is a second reference to the COW memory.
656 	 */
657 	vm_object_template_t object_templates[] = {
658 		/* fixme must use a fill pattern to get a non-null object to copy */
659 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
660 		END_OBJECTS
661 	};
662 	vm_entry_template_t templates[] = {
663 		vm_entry_template(.share_mode = SM_COW, .object = &object_templates[0]),
664 		vm_entry_template(.share_mode = SM_PRIVATE),
665 		vm_entry_template(.share_mode = SM_COW, .object = &object_templates[0]),
666 		END_ENTRIES
667 	};
668 	return make_vm_config("cow > COW then not-COW", templates, object_templates,
669 	           0, -DEFAULT_ENTRY_SIZE);
670 }
671 
672 static inline vm_config_t *
configure_nocow_cow(void)673 configure_nocow_cow(void)
674 {
675 	/*
676 	 * one ordinary entry, then one entry that is COW.
677 	 * Additional out-of-range entry is a second reference to the COW memory.
678 	 */
679 	vm_object_template_t object_templates[] = {
680 		/* fixme must use a fill pattern to get a non-null object to copy */
681 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
682 		END_OBJECTS
683 	};
684 	vm_entry_template_t templates[] = {
685 		vm_entry_template(.share_mode = SM_PRIVATE),
686 		vm_entry_template(.share_mode = SM_COW, .object = &object_templates[0]),
687 		vm_entry_template(.share_mode = SM_COW, .object = &object_templates[0]),
688 		END_ENTRIES
689 	};
690 	return make_vm_config("cow > not-COW then COW", templates, object_templates,
691 	           0, -DEFAULT_ENTRY_SIZE);
692 }
693 
694 static inline vm_config_t *
configure_cow_unreadable(void)695 configure_cow_unreadable(void)
696 {
697 	/*
698 	 * COW entry that is unreadable.
699 	 * Additional out-of-range entry is a second reference to the COW memory.
700 	 */
701 	vm_object_template_t object_templates[] = {
702 		/* fixme must use a fill pattern to get a non-null object to copy */
703 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
704 		END_OBJECTS
705 	};
706 	vm_entry_template_t templates[] = {
707 		vm_entry_template(.share_mode = SM_COW, .object = &object_templates[0],
708 	    .protection = VM_PROT_NONE),
709 		vm_entry_template(.share_mode = SM_COW, .object = &object_templates[0]),
710 		END_ENTRIES
711 	};
712 	return make_vm_config("cow > COW but unreadable", templates, object_templates,
713 	           0, -DEFAULT_ENTRY_SIZE);
714 }
715 
716 static inline vm_config_t *
configure_cow_unwriteable(void)717 configure_cow_unwriteable(void)
718 {
719 	/*
720 	 * COW entry that is readable but unwriteable.
721 	 * Additional out-of-range entry is a second reference to the COW memory.
722 	 */
723 	vm_object_template_t object_templates[] = {
724 		/* fixme must use a fill pattern to get a non-null object to copy */
725 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
726 		END_OBJECTS
727 	};
728 	vm_entry_template_t templates[] = {
729 		vm_entry_template(.share_mode = SM_COW, .object = &object_templates[0],
730 	    .protection = VM_PROT_READ),
731 		vm_entry_template(.share_mode = SM_COW, .object = &object_templates[0]),
732 		END_ENTRIES
733 	};
734 	return make_vm_config("cow > COW but unwriteable", templates, object_templates,
735 	           0, -DEFAULT_ENTRY_SIZE);
736 }
737 
738 
739 static inline vm_config_t *
configure_permanent_entry(void)740 configure_permanent_entry(void)
741 {
742 	/* one permanent entry */
743 	vm_object_template_t object_templates[] = {
744 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
745 		END_OBJECTS
746 	};
747 	vm_entry_template_t templates[] = {
748 		vm_entry_template(.permanent = true, .object = &object_templates[0]),
749 		END_ENTRIES
750 	};
751 	return make_vm_config("permanent > one permanent entry",
752 	           templates, object_templates);
753 }
754 
755 static inline vm_config_t *
configure_permanent_before_permanent(void)756 configure_permanent_before_permanent(void)
757 {
758 	/* two permanent entries, both in-range */
759 	vm_object_template_t object_templates[] = {
760 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
761 		END_OBJECTS
762 	};
763 	vm_entry_template_t templates[] = {
764 		vm_entry_template(.permanent = true, .object = &object_templates[0]),
765 		vm_entry_template(.permanent = true, .share_mode = SM_EMPTY),
766 		END_ENTRIES
767 	};
768 	return make_vm_config("permanent > two permanent entries",
769 	           templates, object_templates);
770 }
771 
772 static inline vm_config_t *
configure_permanent_before_allocation(void)773 configure_permanent_before_allocation(void)
774 {
775 	/*
776 	 * permanent entry followed by allocation
777 	 * The third entry, outside the tested address range,
778 	 * is an unallocated hole. This tests rdar://144128567
779 	 * along with test configure_permanent_before_allocation_2
780 	 */
781 	vm_object_template_t object_templates[] = {
782 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
783 		END_OBJECTS
784 	};
785 	vm_entry_template_t templates[] = {
786 		vm_entry_template(.permanent = true, .object = &object_templates[0]),
787 		vm_entry_template(),
788 		hole_template,
789 		END_ENTRIES
790 	};
791 	return make_vm_config("permanent > permanent entry before allocation, hole outside",
792 	           templates, object_templates, 0, -DEFAULT_ENTRY_SIZE);
793 }
794 
795 static inline vm_config_t *
configure_permanent_before_allocation_2(void)796 configure_permanent_before_allocation_2(void)
797 {
798 	/*
799 	 * permanent entry followed by allocation
800 	 * The third entry, outside the tested address range,
801 	 * is an allocation to provoke rdar://144128567.
802 	 * Other than that bug the behavior should be
803 	 * identical to configure_permanent_before_allocation.
804 	 */
805 	vm_object_template_t object_templates[] = {
806 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
807 		END_OBJECTS
808 	};
809 	vm_entry_template_t templates[] = {
810 		vm_entry_template(.permanent = true, .object = &object_templates[0]),
811 		vm_entry_template(),
812 		vm_entry_template(),
813 		END_ENTRIES
814 	};
815 	return make_vm_config("permanent > permanent entry before allocation, allocation outside",
816 	           templates, object_templates, 0, -DEFAULT_ENTRY_SIZE);
817 }
818 
819 static inline vm_config_t *
configure_permanent_before_hole(void)820 configure_permanent_before_hole(void)
821 {
822 	/* permanent entry followed by a hole */
823 	vm_object_template_t object_templates[] = {
824 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
825 		END_OBJECTS
826 	};
827 	vm_entry_template_t templates[] = {
828 		vm_entry_template(.permanent = true, .object = &object_templates[0]),
829 		hole_template,
830 		END_ENTRIES
831 	};
832 	return make_vm_config("permanent > permanent entry before hole",
833 	           templates, object_templates);
834 }
835 
836 static inline vm_config_t *
configure_permanent_after_allocation(void)837 configure_permanent_after_allocation(void)
838 {
839 	/* allocation followed by a permanent entry */
840 	vm_object_template_t object_templates[] = {
841 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
842 		END_OBJECTS
843 	};
844 	vm_entry_template_t templates[] = {
845 		vm_entry_template(),
846 		vm_entry_template(.permanent = true, .object = &object_templates[0]),
847 		END_ENTRIES
848 	};
849 	return make_vm_config("permanent > permanent entry after allocation",
850 	           templates, object_templates);
851 }
852 
853 static inline vm_config_t *
configure_permanent_after_hole(void)854 configure_permanent_after_hole(void)
855 {
856 	/* hole followed by a permanent entry */
857 	vm_object_template_t object_templates[] = {
858 		vm_object_template(.fill_pattern = {Fill, 0x1234567890abcdef}),
859 		END_OBJECTS
860 	};
861 	vm_entry_template_t templates[] = {
862 		hole_template,
863 		vm_entry_template(.permanent = true, .object = &object_templates[0]),
864 		END_ENTRIES
865 	};
866 	return make_vm_config("permanent > permanent entry after hole",
867 	           templates, object_templates);
868 }
869 
870 
871 static inline vm_config_t *
configure_protection_single_common(vm_prot_t prot,vm_prot_t max)872 configure_protection_single_common(vm_prot_t prot, vm_prot_t max)
873 {
874 	vm_entry_template_t templates[] = {
875 		vm_entry_template(.protection = prot, .max_protection = max),
876 		END_ENTRIES
877 	};
878 
879 	TEMP_CSTRING(name, "protection > single entry prot/max %s/%s",
880 	    name_for_prot(prot), name_for_prot(max));
881 	return make_vm_config(name, templates);
882 }
883 
884 static inline vm_config_t *
configure_protection_pairs_common(vm_prot_t prot_left,vm_prot_t prot_right)885 configure_protection_pairs_common(vm_prot_t prot_left, vm_prot_t prot_right)
886 {
887 	vm_prot_t max_prot = VM_PROT_READ | VM_PROT_WRITE;
888 	vm_entry_template_t templates[] = {
889 		vm_entry_template(.protection = prot_left, .max_protection = max_prot),
890 		vm_entry_template(.protection = prot_right, .max_protection = max_prot),
891 		END_ENTRIES
892 	};
893 
894 	TEMP_CSTRING(name, "protection > two entries prot/max %s/%s and %s/%s",
895 	    name_for_prot(prot_left), name_for_prot(max_prot),
896 	    name_for_prot(prot_right), name_for_prot(max_prot));
897 	return make_vm_config(name, templates);
898 }
899 
900 /* single entry with every prot/max combination (fixme no PROT_EXEC) */
901 
902 /* prot/max ---/--- */
903 static inline vm_config_t *
configure_protection_single_000_000(void)904 configure_protection_single_000_000(void)
905 {
906 	return configure_protection_single_common(VM_PROT_NONE, VM_PROT_NONE);
907 }
908 
909 /* prot/max r--/--- is disallowed */
910 
911 /* prot/max -w-/--- is disallowed */
912 
913 /* prot/max rw-/--- is disallowed */
914 
915 
916 /* prot/max ---/r-- */
917 static inline vm_config_t *
configure_protection_single_000_r00(void)918 configure_protection_single_000_r00(void)
919 {
920 	return configure_protection_single_common(VM_PROT_NONE, VM_PROT_READ);
921 }
922 
923 /* prot/max r--/r-- */
924 static inline vm_config_t *
configure_protection_single_r00_r00(void)925 configure_protection_single_r00_r00(void)
926 {
927 	return configure_protection_single_common(VM_PROT_READ, VM_PROT_READ);
928 }
929 
930 /* prot/max -w-/r-- is disallowed */
931 
932 /* prot/max rw-/r-- is disallowed */
933 
934 
935 /* prot/max ---/w-- */
936 static inline vm_config_t *
configure_protection_single_000_0w0(void)937 configure_protection_single_000_0w0(void)
938 {
939 	return configure_protection_single_common(VM_PROT_NONE, VM_PROT_WRITE);
940 }
941 
942 /* prot/max r--/-w- is disallowed */
943 
944 /* prot/max -w-/-w- */
945 static inline vm_config_t *
configure_protection_single_0w0_0w0(void)946 configure_protection_single_0w0_0w0(void)
947 {
948 	return configure_protection_single_common(VM_PROT_WRITE, VM_PROT_WRITE);
949 }
950 
951 /* prot/max rw-/-w- is disallowed */
952 
953 
954 /* prot/max ---/rw- */
955 static inline vm_config_t *
configure_protection_single_000_rw0(void)956 configure_protection_single_000_rw0(void)
957 {
958 	return configure_protection_single_common(VM_PROT_NONE, VM_PROT_READ | VM_PROT_WRITE);
959 }
960 
961 /* prot/max r--/rw- */
962 static inline vm_config_t *
configure_protection_single_r00_rw0(void)963 configure_protection_single_r00_rw0(void)
964 {
965 	return configure_protection_single_common(VM_PROT_READ, VM_PROT_READ | VM_PROT_WRITE);
966 }
967 
968 /* prot/max -w-/rw- */
969 static inline vm_config_t *
configure_protection_single_0w0_rw0(void)970 configure_protection_single_0w0_rw0(void)
971 {
972 	return configure_protection_single_common(VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE);
973 }
974 
975 /* prot/max rw-/rw- */
976 static inline vm_config_t *
configure_protection_single_rw0_rw0(void)977 configure_protection_single_rw0_rw0(void)
978 {
979 	return configure_protection_single_common(VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE);
980 }
981 
982 
983 /* two entries with every pair of protections (fixme no PROT_EXEC) */
984 
985 static inline vm_config_t *
configure_protection_pairs_000_000(void)986 configure_protection_pairs_000_000(void)
987 {
988 	return configure_protection_pairs_common(VM_PROT_NONE, VM_PROT_NONE);
989 }
990 
991 static inline vm_config_t *
configure_protection_pairs_000_r00(void)992 configure_protection_pairs_000_r00(void)
993 {
994 	return configure_protection_pairs_common(VM_PROT_NONE, VM_PROT_READ);
995 }
996 
997 static inline vm_config_t *
configure_protection_pairs_000_0w0(void)998 configure_protection_pairs_000_0w0(void)
999 {
1000 	return configure_protection_pairs_common(VM_PROT_NONE, VM_PROT_WRITE);
1001 }
1002 
1003 static inline vm_config_t *
configure_protection_pairs_000_rw0(void)1004 configure_protection_pairs_000_rw0(void)
1005 {
1006 	return configure_protection_pairs_common(VM_PROT_NONE, VM_PROT_READ | VM_PROT_WRITE);
1007 }
1008 
1009 
1010 static inline vm_config_t *
configure_protection_pairs_r00_000(void)1011 configure_protection_pairs_r00_000(void)
1012 {
1013 	return configure_protection_pairs_common(VM_PROT_READ, VM_PROT_NONE);
1014 }
1015 
1016 static inline vm_config_t *
configure_protection_pairs_r00_r00(void)1017 configure_protection_pairs_r00_r00(void)
1018 {
1019 	return configure_protection_pairs_common(VM_PROT_READ, VM_PROT_READ);
1020 }
1021 
1022 static inline vm_config_t *
configure_protection_pairs_r00_0w0(void)1023 configure_protection_pairs_r00_0w0(void)
1024 {
1025 	return configure_protection_pairs_common(VM_PROT_READ, VM_PROT_WRITE);
1026 }
1027 
1028 static inline vm_config_t *
configure_protection_pairs_r00_rw0(void)1029 configure_protection_pairs_r00_rw0(void)
1030 {
1031 	return configure_protection_pairs_common(VM_PROT_READ, VM_PROT_READ | VM_PROT_WRITE);
1032 }
1033 
1034 
1035 static inline vm_config_t *
configure_protection_pairs_0w0_000(void)1036 configure_protection_pairs_0w0_000(void)
1037 {
1038 	return configure_protection_pairs_common(VM_PROT_WRITE, VM_PROT_NONE);
1039 }
1040 
1041 static inline vm_config_t *
configure_protection_pairs_0w0_r00(void)1042 configure_protection_pairs_0w0_r00(void)
1043 {
1044 	return configure_protection_pairs_common(VM_PROT_WRITE, VM_PROT_READ);
1045 }
1046 
1047 static inline vm_config_t *
configure_protection_pairs_0w0_0w0(void)1048 configure_protection_pairs_0w0_0w0(void)
1049 {
1050 	return configure_protection_pairs_common(VM_PROT_WRITE, VM_PROT_WRITE);
1051 }
1052 
1053 static inline vm_config_t *
configure_protection_pairs_0w0_rw0(void)1054 configure_protection_pairs_0w0_rw0(void)
1055 {
1056 	return configure_protection_pairs_common(VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE);
1057 }
1058 
1059 
1060 static inline vm_config_t *
configure_protection_pairs_rw0_000(void)1061 configure_protection_pairs_rw0_000(void)
1062 {
1063 	return configure_protection_pairs_common(VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE);
1064 }
1065 
1066 static inline vm_config_t *
configure_protection_pairs_rw0_r00(void)1067 configure_protection_pairs_rw0_r00(void)
1068 {
1069 	return configure_protection_pairs_common(VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ);
1070 }
1071 
1072 static inline vm_config_t *
configure_protection_pairs_rw0_0w0(void)1073 configure_protection_pairs_rw0_0w0(void)
1074 {
1075 	return configure_protection_pairs_common(VM_PROT_READ | VM_PROT_WRITE, VM_PROT_WRITE);
1076 }
1077 
1078 static inline vm_config_t *
configure_protection_pairs_rw0_rw0(void)1079 configure_protection_pairs_rw0_rw0(void)
1080 {
1081 	return configure_protection_pairs_common(VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE);
1082 }
1083 
1084 
1085 /* submaps */
1086 
1087 /*
1088  * Common code for tests that are a single submap whose contents are a single entry
1089  * but test at different start and end offsets within that entry.
1090  */
1091 static inline vm_config_t *
configure_single_submap_single_entry_common(const char * testname,mach_vm_size_t start_offset,mach_vm_size_t end_offset)1092 configure_single_submap_single_entry_common(
1093 	const char *testname,
1094 	mach_vm_size_t start_offset,
1095 	mach_vm_size_t end_offset)
1096 {
1097 	vm_object_template_t submap_objects[] = {
1098 		vm_object_template(.fill_pattern = {Fill, 0x1111111111111111}),
1099 		END_OBJECTS
1100 	};
1101 	vm_entry_template_t submap_entries[] = {
1102 		vm_entry_template(.object = &submap_objects[0]),
1103 		END_ENTRIES
1104 	};
1105 	vm_object_template_t object_templates[] = {
1106 		submap_object_template(
1107 			.submap.entries = submap_entries,
1108 			.submap.objects = submap_objects),
1109 		END_OBJECTS
1110 	};
1111 	vm_entry_template_t entry_templates[] = {
1112 		submap_entry_template(.object = &object_templates[0]),
1113 		END_ENTRIES
1114 	};
1115 	return make_vm_config(testname,
1116 	           entry_templates, object_templates, submap_entries, submap_objects,
1117 	           start_offset, end_offset);
1118 }
1119 
1120 static inline vm_config_t *
configure_single_submap_single_entry(void)1121 configure_single_submap_single_entry(void)
1122 {
1123 	/*
1124 	 * test range consists of a single submap mapping
1125 	 * which in turn contains a single entry
1126 	 */
1127 	return configure_single_submap_single_entry_common(
1128 		"submap > single entry > entire entry",
1129 		0, 0 /* start and end offsets */);
1130 }
1131 
1132 static inline vm_config_t *
configure_single_submap_single_entry_first_pages(void)1133 configure_single_submap_single_entry_first_pages(void)
1134 {
1135 	/*
1136 	 * test range consists of a single submap mapping
1137 	 * which in turn contains a single entry
1138 	 * and the address range to be tested
1139 	 * excludes the end of that entry
1140 	 */
1141 	return configure_single_submap_single_entry_common(
1142 		"submap > single entry > first pages",
1143 		0, -DEFAULT_PARTIAL_ENTRY_SIZE /* start and end offsets */);
1144 }
1145 
1146 static inline vm_config_t *
configure_single_submap_single_entry_last_pages(void)1147 configure_single_submap_single_entry_last_pages(void)
1148 {
1149 	/*
1150 	 * test range consists of a single submap mapping
1151 	 * which in turn contains a single entry
1152 	 * and the address range to be tested
1153 	 * excludes the start of that entry
1154 	 */
1155 	return configure_single_submap_single_entry_common(
1156 		"submap > single entry > last pages",
1157 		DEFAULT_PARTIAL_ENTRY_SIZE, 0 /* start and end offsets */);
1158 }
1159 
1160 static inline vm_config_t *
configure_single_submap_single_entry_middle_pages(void)1161 configure_single_submap_single_entry_middle_pages(void)
1162 {
1163 	/*
1164 	 * test range consists of a single submap mapping
1165 	 * which in turn contains a single entry
1166 	 * and the address range to be tested
1167 	 * excludes the start and end of that entry
1168 	 */
1169 	return configure_single_submap_single_entry_common(
1170 		"submap > single entry > middle pages",
1171 		DEFAULT_PARTIAL_ENTRY_SIZE / 2, -(DEFAULT_PARTIAL_ENTRY_SIZE / 2) /* start and end offsets */);
1172 }
1173 
1174 
1175 static inline vm_config_t *
configure_single_submap_oversize_entry_common(const char * testname,mach_vm_address_t parent_offset,mach_vm_size_t parent_size)1176 configure_single_submap_oversize_entry_common(
1177 	const char *testname,
1178 	mach_vm_address_t parent_offset,
1179 	mach_vm_size_t parent_size)
1180 {
1181 	/*
1182 	 * submap contains a single entry of default size,
1183 	 * parent map's view of the submap excludes some part of that entry
1184 	 */
1185 	assert(parent_offset < DEFAULT_ENTRY_SIZE);
1186 	assert(parent_offset + parent_size <= DEFAULT_ENTRY_SIZE);
1187 
1188 	vm_object_template_t submap_objects[] = {
1189 		vm_object_template(.fill_pattern = {Fill, 0x1111111111111111}),
1190 		END_OBJECTS
1191 	};
1192 	vm_entry_template_t submap_entries[] = {
1193 		vm_entry_template(.object = &submap_objects[0]),
1194 		END_ENTRIES
1195 	};
1196 	vm_object_template_t object_templates[] = {
1197 		submap_object_template(
1198 			.submap.entries = submap_entries,
1199 			.submap.objects = submap_objects),
1200 		END_OBJECTS
1201 	};
1202 	vm_entry_template_t entry_templates[] = {
1203 		submap_entry_template(
1204 			.object = &object_templates[0],
1205 			.offset = parent_offset,
1206 			.size = parent_size),
1207 		END_ENTRIES
1208 	};
1209 	return make_vm_config(testname,
1210 	           entry_templates, object_templates,
1211 	           submap_entries, submap_objects,
1212 	           0, 0);
1213 }
1214 
1215 static inline vm_config_t *
configure_single_submap_oversize_entry_at_start(void)1216 configure_single_submap_oversize_entry_at_start(void)
1217 {
1218 	/*
1219 	 * submap contains a single entry,
1220 	 * parent map's view of the submap excludes the start of that entry
1221 	 */
1222 	return configure_single_submap_oversize_entry_common(
1223 		"submap > oversize entry > oversize at start",
1224 		DEFAULT_ENTRY_SIZE / 2 /* parent_offset */,
1225 		DEFAULT_ENTRY_SIZE / 2 /* parent_size */);
1226 }
1227 
1228 static inline vm_config_t *
configure_single_submap_oversize_entry_at_end(void)1229 configure_single_submap_oversize_entry_at_end(void)
1230 {
1231 	/*
1232 	 * submap contains a single entry,
1233 	 * parent map's view of the submap excludes the end of that entry
1234 	 */
1235 	return configure_single_submap_oversize_entry_common(
1236 		"submap > oversize entry > oversize at end",
1237 		0 /* parent_offset */,
1238 		DEFAULT_ENTRY_SIZE / 2 /* parent_size */);
1239 }
1240 
1241 static inline vm_config_t *
configure_single_submap_oversize_entry_at_both(void)1242 configure_single_submap_oversize_entry_at_both(void)
1243 {
1244 	/*
1245 	 * submap contains a single entry,
1246 	 * parent map's view of the submap excludes the start and end of that entry
1247 	 */
1248 	return configure_single_submap_oversize_entry_common(
1249 		"submap > oversize entry > oversize at both start and end",
1250 		DEFAULT_ENTRY_SIZE / 4 /* parent_offset */,
1251 		DEFAULT_ENTRY_SIZE / 2 /* parent_size */);
1252 }
1253 
1254 
1255 /*
1256  * Common code for tests of a submap before or after a hole or allocation.
1257  */
1258 static inline vm_config_t *
configure_submap_beafterfore_entry(const char * testname,vm_entry_template_kind_t first,vm_entry_template_kind_t second,int submap_protection)1259 configure_submap_beafterfore_entry(
1260 	const char *testname,
1261 	vm_entry_template_kind_t first,
1262 	vm_entry_template_kind_t second,
1263 	int submap_protection)
1264 {
1265 	vm_object_template_t submap_objects[] = {
1266 		vm_object_template(.fill_pattern = {Fill, 0x1111111111111111}),
1267 		END_OBJECTS
1268 	};
1269 	vm_entry_template_t submap_entries[] = {
1270 		vm_entry_template(
1271 			.object = &submap_objects[0],
1272 			.protection = submap_protection,
1273 			.max_protection = submap_protection),
1274 		END_ENTRIES
1275 	};
1276 	vm_object_template_t object_templates[] = {
1277 		submap_object_template(
1278 			.submap.entries = submap_entries,
1279 			.submap.objects = submap_objects),
1280 		END_OBJECTS
1281 	};
1282 	vm_entry_template_t template_options[] = {
1283 		[Hole] = hole_template,
1284 		[Allocation] = vm_entry_template(),
1285 		[Submap] = submap_entry_template(.object = &object_templates[0])
1286 	};
1287 	/* entries must be Hole or Allocation or Submap */
1288 	assert(first == Hole || first == Allocation || first == Submap);
1289 	assert(second == Hole || second == Allocation || second == Submap);
1290 	/* exactly one entry must be Submap */
1291 	assert((first == Submap && second != Submap) ||
1292 	    (first != Submap && second == Submap));
1293 	vm_entry_template_t entry_templates[] = {
1294 		template_options[first],
1295 		template_options[second],
1296 		END_ENTRIES
1297 	};
1298 	return make_vm_config(testname,
1299 	           entry_templates, object_templates, submap_entries, submap_objects,
1300 	           0, 0);
1301 }
1302 
1303 static inline vm_config_t *
configure_submap_before_allocation(void)1304 configure_submap_before_allocation(void)
1305 {
1306 	return configure_submap_beafterfore_entry(
1307 		"submap > submap before allocation", Submap, Allocation,
1308 		VM_PROT_READ | VM_PROT_WRITE);
1309 }
1310 
1311 static inline vm_config_t *
configure_submap_before_allocation_ro(void)1312 configure_submap_before_allocation_ro(void)
1313 {
1314 	return configure_submap_beafterfore_entry(
1315 		"submap > submap before allocation, read-only", Submap, Allocation,
1316 		VM_PROT_READ);
1317 }
1318 
1319 static inline vm_config_t *
configure_submap_after_allocation(void)1320 configure_submap_after_allocation(void)
1321 {
1322 	return configure_submap_beafterfore_entry(
1323 		"submap > submap after allocation", Allocation, Submap,
1324 		VM_PROT_READ | VM_PROT_WRITE);
1325 }
1326 
1327 static inline vm_config_t *
configure_submap_after_allocation_ro(void)1328 configure_submap_after_allocation_ro(void)
1329 {
1330 	return configure_submap_beafterfore_entry(
1331 		"submap > submap after allocation, read-only", Allocation, Submap,
1332 		VM_PROT_READ);
1333 }
1334 
1335 static inline vm_config_t *
configure_submap_before_hole(void)1336 configure_submap_before_hole(void)
1337 {
1338 	return configure_submap_beafterfore_entry(
1339 		"submap > submap before hole", Submap, Hole,
1340 		VM_PROT_READ | VM_PROT_WRITE);
1341 }
1342 
1343 static inline vm_config_t *
configure_submap_before_hole_ro(void)1344 configure_submap_before_hole_ro(void)
1345 {
1346 	return configure_submap_beafterfore_entry(
1347 		"submap > submap before hole, read-only", Submap, Hole,
1348 		VM_PROT_READ);
1349 }
1350 
1351 static inline vm_config_t *
configure_submap_after_hole(void)1352 configure_submap_after_hole(void)
1353 {
1354 	return configure_submap_beafterfore_entry(
1355 		"submap > submap after hole", Hole, Submap,
1356 		VM_PROT_READ | VM_PROT_WRITE);
1357 }
1358 
1359 static inline vm_config_t *
configure_submap_after_hole_ro(void)1360 configure_submap_after_hole_ro(void)
1361 {
1362 	return configure_submap_beafterfore_entry(
1363 		"submap > submap after hole, read-only", Hole, Submap,
1364 		VM_PROT_READ);
1365 }
1366 
1367 static inline vm_config_t *
configure_submap_allocation_submap_one_entry_common(const char * testname,int submap_protection)1368 configure_submap_allocation_submap_one_entry_common(
1369 	const char *testname,
1370 	int submap_protection)
1371 {
1372 	/*
1373 	 * submap has a single entry, but parent map entries are
1374 	 * submap-allocation-submap, as if part of the submap mapping
1375 	 * had been deallocated or unnested
1376 	 */
1377 
1378 	vm_object_template_t submap_objects[] = {
1379 		vm_object_template(.fill_pattern = {Fill, 0x1111111111111111}),
1380 		END_OBJECTS
1381 	};
1382 	vm_entry_template_t submap_entries[] = {
1383 		vm_entry_template(
1384 			.object = &submap_objects[0],
1385 			.size = DEFAULT_ENTRY_SIZE * 3,
1386 			.protection = submap_protection,
1387 			.max_protection = submap_protection),
1388 		END_ENTRIES
1389 	};
1390 	vm_object_template_t object_templates[] = {
1391 		submap_object_template(
1392 			.submap.entries = submap_entries,
1393 			.submap.objects = submap_objects),
1394 		END_OBJECTS
1395 	};
1396 	vm_entry_template_t entry_templates[] = {
1397 		submap_entry_template(
1398 			.object = &object_templates[0],
1399 			.offset = 0),
1400 		vm_entry_template(),
1401 		submap_entry_template(
1402 			.object = &object_templates[0],
1403 			.offset = DEFAULT_ENTRY_SIZE * 2),
1404 		END_ENTRIES
1405 	};
1406 	return make_vm_config(testname,
1407 	           entry_templates, object_templates,
1408 	           submap_entries, submap_objects,
1409 	           0, 0);
1410 }
1411 
1412 static inline vm_config_t *
configure_submap_allocation_submap_one_entry(void)1413 configure_submap_allocation_submap_one_entry(void)
1414 {
1415 	return configure_submap_allocation_submap_one_entry_common(
1416 		"submap > submap-allocation-submap, one entry in submap",
1417 		VM_PROT_READ | VM_PROT_WRITE);
1418 }
1419 
1420 static inline vm_config_t *
configure_submap_allocation_submap_one_entry_ro(void)1421 configure_submap_allocation_submap_one_entry_ro(void)
1422 {
1423 	return configure_submap_allocation_submap_one_entry_common(
1424 		"submap > submap-allocation-submap, one entry in submap, read-only",
1425 		VM_PROT_READ);
1426 }
1427 
1428 static inline vm_config_t *
configure_submap_allocation_submap_two_entries_common(const char * testname,int submap_protection)1429 configure_submap_allocation_submap_two_entries_common(
1430 	const char *testname,
1431 	int submap_protection)
1432 {
1433 	/*
1434 	 * submap has two entries, but parent map entries are
1435 	 * submap-allocation-submap, as if part of the submap mapping
1436 	 * had been deallocated or unnested (not matching the submap
1437 	 * entry boundaries)
1438 	 */
1439 
1440 	const mach_vm_size_t parent_entry_size = DEFAULT_ENTRY_SIZE;
1441 	const mach_vm_size_t total_size = parent_entry_size * 3;
1442 	const mach_vm_size_t submap_entry_size = total_size / 2;
1443 	assert(parent_entry_size * 3 == submap_entry_size * 2);
1444 
1445 	vm_object_template_t submap_objects[] = {
1446 		vm_object_template(.fill_pattern = {Fill, 0x1111111111111111}),
1447 		vm_object_template(.fill_pattern = {Fill, 0x2222222222222222}),
1448 		END_OBJECTS
1449 	};
1450 	vm_entry_template_t submap_entries[] = {
1451 		vm_entry_template(
1452 			.object = &submap_objects[0],
1453 			.size = submap_entry_size,
1454 			.protection = submap_protection,
1455 			.max_protection = submap_protection),
1456 		vm_entry_template(
1457 			.object = &submap_objects[1],
1458 			.size = submap_entry_size,
1459 			.protection = submap_protection,
1460 			.max_protection = submap_protection),
1461 		END_ENTRIES
1462 	};
1463 	vm_object_template_t object_templates[] = {
1464 		submap_object_template(
1465 			.submap.entries = submap_entries,
1466 			.submap.objects = submap_objects),
1467 		END_OBJECTS
1468 	};
1469 	vm_entry_template_t entry_templates[] = {
1470 		submap_entry_template(
1471 			.object = &object_templates[0],
1472 			.offset = 0,
1473 			.size = parent_entry_size),
1474 		vm_entry_template(),
1475 		submap_entry_template(
1476 			.object = &object_templates[0],
1477 			.offset = parent_entry_size * 2,
1478 			.size = parent_entry_size),
1479 		END_ENTRIES
1480 	};
1481 	return make_vm_config(testname,
1482 	           entry_templates, object_templates,
1483 	           submap_entries, submap_objects,
1484 	           0, 0);
1485 }
1486 
1487 static inline vm_config_t *
configure_submap_allocation_submap_two_entries(void)1488 configure_submap_allocation_submap_two_entries(void)
1489 {
1490 	return configure_submap_allocation_submap_two_entries_common(
1491 		"submap > submap-allocation-submap, two entries in submap",
1492 		VM_PROT_READ | VM_PROT_WRITE);
1493 }
1494 
1495 static inline vm_config_t *
configure_submap_allocation_submap_two_entries_ro(void)1496 configure_submap_allocation_submap_two_entries_ro(void)
1497 {
1498 	return configure_submap_allocation_submap_two_entries_common(
1499 		"submap > submap-allocation-submap, two entries in submap, read-only",
1500 		VM_PROT_READ);
1501 }
1502 
1503 static inline vm_config_t *
configure_submap_allocation_submap_three_entries_common(const char * testname,int submap_protection)1504 configure_submap_allocation_submap_three_entries_common(
1505 	const char *testname,
1506 	int submap_protection)
1507 {
1508 	/*
1509 	 * submap has three entries, parent map entries are
1510 	 * submap-allocation-submap, as if part of the submap mapping
1511 	 * had been deallocated or unnested on the submap entry boundaries
1512 	 */
1513 
1514 	vm_object_template_t submap_objects[] = {
1515 		vm_object_template(.fill_pattern = {Fill, 0x1111111111111111}),
1516 		vm_object_template(.fill_pattern = {Fill, 0x2222222222222222}),
1517 		vm_object_template(.fill_pattern = {Fill, 0x3333333333333333}),
1518 		END_OBJECTS
1519 	};
1520 	vm_entry_template_t submap_entries[] = {
1521 		vm_entry_template(
1522 			.object = &submap_objects[0],
1523 			.protection = submap_protection,
1524 			.max_protection = submap_protection),
1525 		vm_entry_template(
1526 			.object = &submap_objects[1],
1527 			.protection = submap_protection,
1528 			.max_protection = submap_protection),
1529 		vm_entry_template(
1530 			.object = &submap_objects[2],
1531 			.protection = submap_protection,
1532 			.max_protection = submap_protection),
1533 		END_ENTRIES
1534 	};
1535 	vm_object_template_t object_templates[] = {
1536 		submap_object_template(
1537 			.submap.entries = submap_entries,
1538 			.submap.objects = submap_objects),
1539 		END_OBJECTS
1540 	};
1541 	vm_entry_template_t entry_templates[] = {
1542 		submap_entry_template(
1543 			.object = &object_templates[0],
1544 			.offset = 0),
1545 		vm_entry_template(),
1546 		submap_entry_template(
1547 			.object = &object_templates[0],
1548 			.offset = DEFAULT_ENTRY_SIZE * 2),
1549 		END_ENTRIES
1550 	};
1551 	return make_vm_config(testname,
1552 	           entry_templates, object_templates,
1553 	           submap_entries, submap_objects,
1554 	           0, 0);
1555 }
1556 
1557 static inline vm_config_t *
configure_submap_allocation_submap_three_entries(void)1558 configure_submap_allocation_submap_three_entries(void)
1559 {
1560 	return configure_submap_allocation_submap_three_entries_common(
1561 		"submap > submap-allocation-submap, three entries in submap",
1562 		VM_PROT_READ | VM_PROT_WRITE);
1563 }
1564 
1565 static inline vm_config_t *
configure_submap_allocation_submap_three_entries_ro(void)1566 configure_submap_allocation_submap_three_entries_ro(void)
1567 {
1568 	return configure_submap_allocation_submap_three_entries_common(
1569 		"submap > submap-allocation-submap, three entries in submap, read-only",
1570 		VM_PROT_READ);
1571 }
1572 
1573 
1574 /* add new tests here (configure_<testname> functions) */
1575 
1576 
1577 typedef struct {
1578 	test_fn_t single_entry_1;
1579 	test_fn_t single_entry_2;
1580 	test_fn_t single_entry_3;
1581 	test_fn_t single_entry_4;
1582 
1583 	test_fn_t multiple_entries_1;
1584 	test_fn_t multiple_entries_2;
1585 	test_fn_t multiple_entries_3;
1586 	test_fn_t multiple_entries_4;
1587 	test_fn_t multiple_entries_5;
1588 	test_fn_t multiple_entries_6;
1589 
1590 	test_fn_t some_holes_1;
1591 	test_fn_t some_holes_2;
1592 	test_fn_t some_holes_3;
1593 	test_fn_t some_holes_4;
1594 	test_fn_t some_holes_5;
1595 	test_fn_t some_holes_6;
1596 	test_fn_t some_holes_7;
1597 	test_fn_t some_holes_8;
1598 	test_fn_t some_holes_9;
1599 	test_fn_t some_holes_10;
1600 	test_fn_t some_holes_11;
1601 	test_fn_t some_holes_12;
1602 
1603 	test_fn_t all_holes_1;
1604 	test_fn_t all_holes_2;
1605 	test_fn_t all_holes_3;
1606 	test_fn_t all_holes_4;
1607 
1608 	test_fn_t null_entry;
1609 	test_fn_t nonresident_entry;
1610 	test_fn_t resident_entry;
1611 
1612 	test_fn_t shared_entry;
1613 	test_fn_t shared_entry_discontiguous;
1614 	test_fn_t shared_entry_partial;
1615 	test_fn_t shared_entry_pairs;
1616 	test_fn_t shared_entry_x1000;
1617 
1618 	test_fn_t cow_entry;
1619 	test_fn_t cow_unreferenced;
1620 	test_fn_t cow_nocow;
1621 	test_fn_t nocow_cow;
1622 	test_fn_t cow_unreadable;
1623 	test_fn_t cow_unwriteable;
1624 
1625 	test_fn_t permanent_entry;
1626 	test_fn_t permanent_before_permanent;
1627 	test_fn_t permanent_before_allocation;
1628 	test_fn_t permanent_before_allocation_2;
1629 	test_fn_t permanent_before_hole;
1630 	test_fn_t permanent_after_allocation;
1631 	test_fn_t permanent_after_hole;
1632 
1633 	test_fn_t single_submap_single_entry;
1634 	test_fn_t single_submap_single_entry_first_pages;
1635 	test_fn_t single_submap_single_entry_last_pages;
1636 	test_fn_t single_submap_single_entry_middle_pages;
1637 	test_fn_t single_submap_oversize_entry_at_start;
1638 	test_fn_t single_submap_oversize_entry_at_end;
1639 	test_fn_t single_submap_oversize_entry_at_both;
1640 
1641 	test_fn_t single_submap_single_entry_ro;
1642 	test_fn_t single_submap_single_entry_first_pages_ro;
1643 	test_fn_t single_submap_single_entry_last_pages_ro;
1644 	test_fn_t single_submap_single_entry_middle_pages_ro;
1645 	test_fn_t single_submap_oversize_entry_at_start_ro;
1646 	test_fn_t single_submap_oversize_entry_at_end_ro;
1647 	test_fn_t single_submap_oversize_entry_at_both_ro;
1648 
1649 	test_fn_t submap_before_allocation;
1650 	test_fn_t submap_after_allocation;
1651 	test_fn_t submap_before_hole;
1652 	test_fn_t submap_after_hole;
1653 	test_fn_t submap_allocation_submap_one_entry;
1654 	test_fn_t submap_allocation_submap_two_entries;
1655 	test_fn_t submap_allocation_submap_three_entries;
1656 
1657 	test_fn_t submap_before_allocation_ro;
1658 	test_fn_t submap_after_allocation_ro;
1659 	test_fn_t submap_before_hole_ro;
1660 	test_fn_t submap_after_hole_ro;
1661 	test_fn_t submap_allocation_submap_one_entry_ro;
1662 	test_fn_t submap_allocation_submap_two_entries_ro;
1663 	test_fn_t submap_allocation_submap_three_entries_ro;
1664 
1665 	test_fn_t protection_single_000_000;
1666 	test_fn_t protection_single_000_r00;
1667 	test_fn_t protection_single_000_0w0;
1668 	test_fn_t protection_single_000_rw0;
1669 	test_fn_t protection_single_r00_r00;
1670 	test_fn_t protection_single_r00_rw0;
1671 	test_fn_t protection_single_0w0_0w0;
1672 	test_fn_t protection_single_0w0_rw0;
1673 	test_fn_t protection_single_rw0_rw0;
1674 
1675 	test_fn_t protection_pairs_000_000;
1676 	test_fn_t protection_pairs_000_r00;
1677 	test_fn_t protection_pairs_000_0w0;
1678 	test_fn_t protection_pairs_000_rw0;
1679 	test_fn_t protection_pairs_r00_000;
1680 	test_fn_t protection_pairs_r00_r00;
1681 	test_fn_t protection_pairs_r00_0w0;
1682 	test_fn_t protection_pairs_r00_rw0;
1683 	test_fn_t protection_pairs_0w0_000;
1684 	test_fn_t protection_pairs_0w0_r00;
1685 	test_fn_t protection_pairs_0w0_0w0;
1686 	test_fn_t protection_pairs_0w0_rw0;
1687 	test_fn_t protection_pairs_rw0_000;
1688 	test_fn_t protection_pairs_rw0_r00;
1689 	test_fn_t protection_pairs_rw0_0w0;
1690 	test_fn_t protection_pairs_rw0_rw0;
1691 
1692 	/* add new tests here */
1693 } vm_tests_t;
1694 
1695 
1696 /*
1697  * test_is_unimplemented is used by test files
1698  * as a value in struct vm_tests_t to indicate that
1699  * a particular test case is deliberately not implemented.
1700  */
1701 extern test_result_t
1702 test_is_unimplemented(
1703 	checker_list_t *checker_list,
1704 	mach_vm_address_t start,
1705 	mach_vm_size_t size);
1706 
1707 /*
1708  * Return true if the process is running under Rosetta translation
1709  * https://developer.apple.com/documentation/apple-silicon/about-the-rosetta-translation-environment#Determine-Whether-Your-App-Is-Running-as-a-Translated-Binary
1710  */
1711 static bool
isRosetta()1712 isRosetta()
1713 {
1714 #if KERNEL
1715 	return false;
1716 #else
1717 	int out_value = 0;
1718 	size_t io_size = sizeof(out_value);
1719 	if (sysctlbyname("sysctl.proc_translated", &out_value, &io_size, NULL, 0) == 0) {
1720 		assert(io_size >= sizeof(out_value));
1721 		return out_value;
1722 	}
1723 	return false;
1724 #endif
1725 }
1726 
1727 /*
1728  * Return true if the task map's page size is less than the VM page size.
1729  * (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT)
1730  * for example, Rosetta Intel on ARM
1731  */
1732 static inline bool
task_page_size_less_than_vm_page_size(void)1733 task_page_size_less_than_vm_page_size(void)
1734 {
1735 	size_t map_page_size = PAGE_SIZE;
1736 	uint32_t vm_page_size = 0;
1737 	size_t len = sizeof(vm_page_size);
1738 	int err = sysctlbyname("vm.pagesize", &vm_page_size, &len, NULL, 0);
1739 	T_QUIET; T_ASSERT_POSIX_SUCCESS(err, "sysctlbyname('vm.pagesize')");
1740 	T_QUIET; T_ASSERT_GE(len, sizeof(vm_page_size), "sysctl result size");
1741 	return map_page_size < vm_page_size;
1742 }
1743 
1744 extern void
1745 run_one_vm_test(
1746 	const char *filename,
1747 	const char *funcname,
1748 	const char *testname,
1749 	configure_fn_t configure_fn,
1750 	test_fn_t test_fn);
1751 
1752 static inline void
run_vm_tests(const char * funcname,const char * filename,vm_tests_t * tests,int argc,char * const * argv)1753 run_vm_tests(
1754 	const char *funcname,
1755 	const char *filename,
1756 	vm_tests_t *tests,
1757 	int argc,
1758 	char * const *argv)
1759 {
1760 	/* Allow naming a single test to run on the command line. */
1761 	const char *test_to_run = NULL;
1762 	bool ran_a_test = false;
1763 	if (argc == 1) {
1764 		test_to_run = argv[0];
1765 		T_LOG("RUNNING ONLY ONE TEST: %s %s", funcname, test_to_run);
1766 	}
1767 
1768 	/*
1769 	 * rdar://138495830 tests fail on Rosetta because of allocation holes
1770 	 * We run tests that don't have holes and skip those that do.
1771 	 */
1772 	bool test_holes = true;
1773 	if (isRosetta()) {
1774 		T_LOG("SKIPPING TESTS of allocation holes on Rosetta (rdar://138495830)");
1775 		test_holes = false;
1776 	}
1777 
1778 #define RUN_TEST(testname)                                      \
1779 	({                                                              \
1780 	    if (test_to_run == NULL || 0 == strcmp(#testname, test_to_run)) { \
1781 	            ran_a_test = true;                                  \
1782 	            run_one_vm_test(filename, funcname, #testname,      \
1783 	                configure_##testname, tests->testname);         \
1784 	    }                                                           \
1785 	})
1786 
1787 	/* single vm map entry and parts thereof, no holes */
1788 	RUN_TEST(single_entry_1);
1789 	RUN_TEST(single_entry_2);
1790 	RUN_TEST(single_entry_3);
1791 	RUN_TEST(single_entry_4);
1792 
1793 	/* multiple map entries and parts thereof, no holes */
1794 	RUN_TEST(multiple_entries_1);
1795 	RUN_TEST(multiple_entries_2);
1796 	RUN_TEST(multiple_entries_3);
1797 	RUN_TEST(multiple_entries_4);
1798 	RUN_TEST(multiple_entries_5);
1799 	RUN_TEST(multiple_entries_6);
1800 
1801 	/* ranges with holes */
1802 	if (test_holes) {
1803 		RUN_TEST(some_holes_1);
1804 		RUN_TEST(some_holes_2);
1805 		RUN_TEST(some_holes_3);
1806 		RUN_TEST(some_holes_4);
1807 		RUN_TEST(some_holes_5);
1808 		RUN_TEST(some_holes_6);
1809 		RUN_TEST(some_holes_7);
1810 		RUN_TEST(some_holes_8);
1811 		RUN_TEST(some_holes_9);
1812 		RUN_TEST(some_holes_10);
1813 		RUN_TEST(some_holes_11);
1814 		RUN_TEST(some_holes_12);
1815 	}
1816 
1817 	/* ranges that are nothing but holes */
1818 	if (test_holes) {
1819 		RUN_TEST(all_holes_1);
1820 		RUN_TEST(all_holes_2);
1821 		RUN_TEST(all_holes_3);
1822 		RUN_TEST(all_holes_4);
1823 	}
1824 
1825 	/* residency */
1826 	RUN_TEST(null_entry);
1827 	RUN_TEST(nonresident_entry);  // fixme broken in create_vm_state
1828 	RUN_TEST(resident_entry);
1829 
1830 	/* sharing */
1831 	RUN_TEST(shared_entry);
1832 	RUN_TEST(shared_entry_discontiguous);
1833 	RUN_TEST(shared_entry_partial);
1834 	RUN_TEST(shared_entry_pairs);
1835 	RUN_TEST(shared_entry_x1000);
1836 
1837 	/* cow */
1838 	RUN_TEST(cow_entry);
1839 	RUN_TEST(cow_unreferenced);
1840 	RUN_TEST(cow_nocow);
1841 	RUN_TEST(nocow_cow);
1842 	RUN_TEST(cow_unreadable);
1843 	RUN_TEST(cow_unwriteable);
1844 
1845 	/* permanent */
1846 	RUN_TEST(permanent_entry);
1847 	RUN_TEST(permanent_before_permanent);
1848 	if (test_holes) {
1849 		/* this test does have a required hole, after the other allocations */
1850 		RUN_TEST(permanent_before_allocation);
1851 	}
1852 	RUN_TEST(permanent_before_allocation_2);
1853 	if (test_holes) {
1854 		RUN_TEST(permanent_before_hole);
1855 	}
1856 	RUN_TEST(permanent_after_allocation);
1857 	if (test_holes) {
1858 		RUN_TEST(permanent_after_hole);
1859 	}
1860 
1861 	/* submaps */
1862 	RUN_TEST(single_submap_single_entry);
1863 	RUN_TEST(single_submap_single_entry_first_pages);
1864 	RUN_TEST(single_submap_single_entry_last_pages);
1865 	RUN_TEST(single_submap_single_entry_middle_pages);
1866 	RUN_TEST(single_submap_oversize_entry_at_start);
1867 	RUN_TEST(single_submap_oversize_entry_at_end);
1868 	RUN_TEST(single_submap_oversize_entry_at_both);
1869 
1870 	RUN_TEST(submap_before_allocation);
1871 	RUN_TEST(submap_before_allocation_ro);
1872 	RUN_TEST(submap_after_allocation);
1873 	RUN_TEST(submap_after_allocation_ro);
1874 	if (test_holes) {
1875 		RUN_TEST(submap_before_hole);
1876 		RUN_TEST(submap_before_hole_ro);
1877 		RUN_TEST(submap_after_hole);
1878 		RUN_TEST(submap_after_hole_ro);
1879 	}
1880 	RUN_TEST(submap_allocation_submap_one_entry);
1881 	RUN_TEST(submap_allocation_submap_one_entry_ro);
1882 	RUN_TEST(submap_allocation_submap_two_entries);
1883 	RUN_TEST(submap_allocation_submap_two_entries_ro);
1884 	RUN_TEST(submap_allocation_submap_three_entries);
1885 	RUN_TEST(submap_allocation_submap_three_entries_ro);
1886 
1887 	/* protection */
1888 	RUN_TEST(protection_single_000_000);
1889 	RUN_TEST(protection_single_000_r00);
1890 	RUN_TEST(protection_single_r00_r00);
1891 	RUN_TEST(protection_single_000_0w0);
1892 	RUN_TEST(protection_single_0w0_0w0);
1893 	RUN_TEST(protection_single_000_rw0);
1894 	RUN_TEST(protection_single_r00_rw0);
1895 	RUN_TEST(protection_single_0w0_rw0);
1896 	RUN_TEST(protection_single_rw0_rw0);
1897 
1898 	RUN_TEST(protection_pairs_000_000);
1899 	RUN_TEST(protection_pairs_000_r00);
1900 	RUN_TEST(protection_pairs_000_0w0);
1901 	RUN_TEST(protection_pairs_000_rw0);
1902 	RUN_TEST(protection_pairs_r00_000);
1903 	RUN_TEST(protection_pairs_r00_r00);
1904 	RUN_TEST(protection_pairs_r00_0w0);
1905 	RUN_TEST(protection_pairs_r00_rw0);
1906 	RUN_TEST(protection_pairs_0w0_000);
1907 	RUN_TEST(protection_pairs_0w0_r00);
1908 	RUN_TEST(protection_pairs_0w0_0w0);
1909 	RUN_TEST(protection_pairs_0w0_rw0);
1910 	RUN_TEST(protection_pairs_rw0_000);
1911 	RUN_TEST(protection_pairs_rw0_r00);
1912 	RUN_TEST(protection_pairs_rw0_0w0);
1913 	RUN_TEST(protection_pairs_rw0_rw0);
1914 
1915 	/* add new tests here */
1916 
1917 #undef RUN_TEST
1918 
1919 	if (test_to_run != NULL && !ran_a_test) {
1920 		T_FAIL("no test named '%s'", test_to_run);
1921 	}
1922 }
1923 
1924 #endif  /* VM_CONFIGURATOR_TESTS_H */
1925