xref: /xnu-12377.81.4/tests/vm/configurator/vm_configurator.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <stdint.h>
32 #include <stdbool.h>
33 #include <unistd.h>
34 #include <assert.h>
35 #include <machine/endian.h>
36 
37 #include "try_read_write.h"
38 #include "exc_helpers.h"
39 #include "exc_guard_helper.h"
40 #include "vm_configurator.h"
41 #include "vm_configurator_tests.h"
42 
43 #pragma clang diagnostic ignored "-Wgnu-conditional-omitted-operand"
44 #pragma clang diagnostic ignored "-Wformat-pedantic"
45 
46 bool Verbose = false;
47 
48 /* TODO: sufficiently new SDK defines this */
49 #ifndef VM_BEHAVIOR_LAST_VALID
50 #define VM_BEHAVIOR_LAST_VALID VM_BEHAVIOR_ZERO
51 #endif
52 
53 #define KB16 16384
54 #define MB (1024*1024)
55 
56 /* pretty printing */
57 
58 /* indentation printed in front of submap contents */
59 #define SUBMAP_PREFIX "    .   "
60 
61 /*
62  * Used when printing attributes of checkers and vm regions.
63  * BadHighlight gets a highlighted color and "*" marker.
64  * NormalHighlight gets normal color.
65  * IgnoredHighlight gets dimmed color.
66  */
67 typedef enum {
68 	BadHighlight = 0,
69 	NormalHighlight,
70 	IgnoredHighlight,
71 	HighlightCount
72 } attribute_highlight_t;
73 
74 /*
75  * Specify highlights for all entry and object attributes.
76  * Used when printing entire checkers or VM states.
77  */
78 typedef struct {
79 	attribute_highlight_t highlighting;
80 	vm_entry_attribute_list_t entry;
81 	vm_object_attribute_list_t object;
82 } attribute_highlights_t;
83 
84 /*
85  * Print all attributes as NormalHighlight.
86  */
87 static attribute_highlights_t
normal_highlights(void)88 normal_highlights(void)
89 {
90 	return (attribute_highlights_t) {
91 		       .highlighting = NormalHighlight,
92 		       .entry = vm_entry_attributes_with_default(true),
93 		       .object = vm_object_attributes_with_default(true),
94 	};
95 }
96 
97 /*
98  * Print bad_entry_attr and bad_object_attr as BadHighlight.
99  * Print other attributes as IgnoredHighlight.
100  */
101 static attribute_highlights_t
bad_or_ignored_highlights(vm_entry_attribute_list_t bad_entry_attr,vm_object_attribute_list_t bad_object_attr)102 bad_or_ignored_highlights(
103 	vm_entry_attribute_list_t bad_entry_attr,
104 	vm_object_attribute_list_t bad_object_attr)
105 {
106 	return (attribute_highlights_t) {
107 		       .highlighting = BadHighlight,
108 		       .entry = bad_entry_attr,
109 		       .object = bad_object_attr,
110 	};
111 }
112 
113 /*
114  * Print normal_entry_attr and normal_object_attr as NormalHighlight.
115  * Print other attributes as IgnoredHighlight.
116  */
117 static attribute_highlights_t
normal_or_ignored_highlights(vm_entry_attribute_list_t normal_entry_attr,vm_object_attribute_list_t normal_object_attr)118 normal_or_ignored_highlights(
119 	vm_entry_attribute_list_t normal_entry_attr,
120 	vm_object_attribute_list_t normal_object_attr)
121 {
122 	return (attribute_highlights_t) {
123 		       .highlighting = NormalHighlight,
124 		       .entry = normal_entry_attr,
125 		       .object = normal_object_attr,
126 	};
127 }
128 
129 /* Return true if we should print terminal color codes. */
130 static bool
use_colors(void)131 use_colors(void)
132 {
133 	static int stdout_is_tty = -1;
134 	if (stdout_is_tty == -1) {
135 		stdout_is_tty = isatty(STDOUT_FILENO);
136 	}
137 	return stdout_is_tty;
138 }
139 
140 #pragma clang diagnostic push
141 #pragma clang diagnostic ignored "-Wpedantic"
142 /* -Wpedantic doesn't like "\e" */
143 
144 #define ANSI_DIM "\e[2m"
145 #define ANSI_UNDIM "\e[22m"
146 
147 /*
148  * Returns a highlighting prefix string.
149  * Its printed length is one character, either ' ' or '*'
150  * It may include ANSI color codes.
151  */
152 static const char *
highlight_prefix(attribute_highlight_t highlight)153 highlight_prefix(attribute_highlight_t highlight)
154 {
155 	assert(highlight >= 0 && highlight < HighlightCount);
156 	static const char * highlights[2][HighlightCount] = {
157 		[0] = {
158 			/* no tty, omit color codes */
159 			[BadHighlight] = "*",
160 			[NormalHighlight] = " ",
161 			[IgnoredHighlight] = " ",
162 		},
163 		[1] = {
164 			/* tty, add color codes */
165 			[BadHighlight] = "*",
166 			[NormalHighlight] = " ",
167 			[IgnoredHighlight] = ANSI_DIM " ",
168 		}
169 	};
170 
171 	return highlights[use_colors()][highlight];
172 }
173 
174 /*
175  * Returns a highlighting suffix string.
176  * Its printed length is zero characters.
177  * It may include ANSI color codes.
178  */
179 static const char *
highlight_suffix(attribute_highlight_t highlight __unused)180 highlight_suffix(attribute_highlight_t highlight __unused)
181 {
182 	if (use_colors()) {
183 		return ANSI_UNDIM;
184 	} else {
185 		return "";
186 	}
187 }
188 
189 #pragma clang diagnostic pop  /* ignored -Wpedantic */
190 
191 /*
192  * Format a value with highlighting.
193  * Usage:
194  *     printf("%sFFFF%s", HIGHLIGHT(value, entry.some_attr));
195  * where "FFFF" is the format string for `value`
196  * and `highlights.entry.some_attr` is true for highlighted values.
197  *
198  * Uses `highlights.highlighting` if `highlights.entry.some_attr` is true.
199  * Uses `IgnoredHighlight` if `highlights.entry.some_attr` is false.
200  */
201 #define HIGHLIGHT(value, attr_path)                             \
202 	    highlight_prefix(highlights.attr_path ? highlights.highlighting : IgnoredHighlight), \
203 	    (value),                                            \
204 	    highlight_suffix(highlights.attr_path ? highlights.highlighting : IgnoredHighlight)
205 
206 
207 /* host_priv port wrappers */
208 
209 host_priv_t
host_priv(void)210 host_priv(void)
211 {
212 	host_priv_t result;
213 	kern_return_t kr = host_get_host_priv_port(mach_host_self(), &result);
214 	assert(kr == 0 && "cannot get host_priv port; try running as root");
215 	return result;
216 }
217 
218 bool
host_priv_allowed(void)219 host_priv_allowed(void)
220 {
221 	host_priv_t result;
222 	kern_return_t kr = host_get_host_priv_port(mach_host_self(), &result);
223 	return kr == 0;
224 }
225 
226 /* math */
227 
228 static bool
is_power_of_two(mach_vm_size_t n)229 is_power_of_two(mach_vm_size_t n)
230 {
231 	return n > 0 && (n & (n - 1)) == 0;
232 }
233 
234 static bool
is_valid_alignment_mask(mach_vm_size_t mask)235 is_valid_alignment_mask(mach_vm_size_t mask)
236 {
237 	if (mask == 0) {
238 		return true;
239 	}
240 
241 	mach_vm_size_t pow = mask + 1; /* may wrap around to zero */
242 	if (pow == 0) {
243 		return true; /* mask is ~0, mask + 1 wrapped to zero */
244 	}
245 
246 	return is_power_of_two(pow);
247 }
248 
249 
250 /*
251  * Some vm_behavior_t values have a persistent effect on the vm entry.
252  * Other behavior values are really one-shot memory operations.
253  */
254 static bool
is_persistent_vm_behavior(vm_behavior_t behavior)255 is_persistent_vm_behavior(vm_behavior_t behavior)
256 {
257 	return
258 	        behavior == VM_BEHAVIOR_DEFAULT ||
259 	        behavior == VM_BEHAVIOR_RANDOM ||
260 	        behavior == VM_BEHAVIOR_SEQUENTIAL ||
261 	        behavior == VM_BEHAVIOR_RSEQNTL;
262 }
263 
264 
265 const char *
name_for_entry_kind(vm_entry_template_kind_t kind)266 name_for_entry_kind(vm_entry_template_kind_t kind)
267 {
268 	static const char *kind_name[] = {
269 		"END_ENTRIES", "allocation", "hole", "submap parent"
270 	};
271 	assert(kind < countof(kind_name));
272 	return kind_name[kind];
273 }
274 
275 const char *
name_for_kr(kern_return_t kr)276 name_for_kr(kern_return_t kr)
277 {
278 	static const char *kr_name[] = {
279 		"KERN_SUCCESS", "KERN_INVALID_ADDRESS",
280 		"KERN_PROTECTION_FAILURE", "KERN_NO_SPACE",
281 		"KERN_INVALID_ARGUMENT", "KERN_FAILURE",
282 		"KERN_RESOURCE_SHORTAGE", "KERN_NOT_RECEIVER",
283 		"KERN_NO_ACCESS", "KERN_MEMORY_FAILURE",
284 		"KERN_MEMORY_ERROR", "KERN_ALREADY_IN_SET",
285 		"KERN_NOT_IN_SET", "KERN_NAME_EXISTS",
286 		"KERN_ABORTED", "KERN_INVALID_NAME",
287 		"KERN_INVALID_TASK", "KERN_INVALID_RIGHT",
288 		"KERN_INVALID_VALUE", "KERN_UREFS_OVERFLOW",
289 		"KERN_INVALID_CAPABILITY", "KERN_RIGHT_EXISTS",
290 		"KERN_INVALID_HOST", "KERN_MEMORY_PRESENT",
291 		/* add other kern_return.h values here if desired */
292 	};
293 
294 	if ((size_t)kr < countof(kr_name)) {
295 		return kr_name[kr];
296 	}
297 
298 	/* TODO: recognize and/or decode mach_error format? */
299 
300 	return "??";
301 }
302 
303 const char *
name_for_prot(vm_prot_t prot)304 name_for_prot(vm_prot_t prot)
305 {
306 	assert(prot_contains_all(VM_PROT_ALL /* rwx */, prot));
307 	/* TODO: uexec? */
308 	static const char *prot_name[] = {
309 		"---", "r--", "-w-", "rw-",
310 		"--x", "r-x", "-wx", "rwx"
311 	};
312 	return prot_name[prot];
313 }
314 
315 const char *
name_for_inherit(vm_inherit_t inherit)316 name_for_inherit(vm_inherit_t inherit)
317 {
318 	static const char *inherit_name[] = {
319 		[VM_INHERIT_SHARE] = "VM_INHERIT_SHARE",
320 		[VM_INHERIT_COPY]  = "VM_INHERIT_COPY",
321 		[VM_INHERIT_NONE]  = "VM_INHERIT_NONE",
322 	};
323 	static_assert(countof(inherit_name) == VM_INHERIT_LAST_VALID + 1,
324 	    "new vm_inherit_t values need names");
325 
326 	assert(inherit <= VM_INHERIT_LAST_VALID);
327 	return inherit_name[inherit];
328 }
329 
330 const char *
name_for_behavior(vm_behavior_t behavior)331 name_for_behavior(vm_behavior_t behavior)
332 {
333 	static const char *behavior_name[] = {
334 		[VM_BEHAVIOR_DEFAULT]          = "VM_BEHAVIOR_DEFAULT",
335 		[VM_BEHAVIOR_RANDOM]           = "VM_BEHAVIOR_RANDOM",
336 		[VM_BEHAVIOR_SEQUENTIAL]       = "VM_BEHAVIOR_SEQUENTIAL",
337 		[VM_BEHAVIOR_RSEQNTL]          = "VM_BEHAVIOR_RSEQNTL",
338 		[VM_BEHAVIOR_WILLNEED]         = "VM_BEHAVIOR_WILLNEED",
339 		[VM_BEHAVIOR_DONTNEED]         = "VM_BEHAVIOR_DONTNEED",
340 		[VM_BEHAVIOR_FREE]             = "VM_BEHAVIOR_FREE",
341 		[VM_BEHAVIOR_ZERO_WIRED_PAGES] = "VM_BEHAVIOR_ZERO_WIRED_PAGES",
342 		[VM_BEHAVIOR_REUSABLE]         = "VM_BEHAVIOR_REUSABLE",
343 		[VM_BEHAVIOR_REUSE]            = "VM_BEHAVIOR_REUSE",
344 		[VM_BEHAVIOR_CAN_REUSE]        = "VM_BEHAVIOR_CAN_REUSE",
345 		[VM_BEHAVIOR_PAGEOUT]          = "VM_BEHAVIOR_PAGEOUT",
346 		[VM_BEHAVIOR_ZERO]             = "VM_BEHAVIOR_ZERO",
347 	};
348 	static_assert(countof(behavior_name) == VM_BEHAVIOR_LAST_VALID + 1,
349 	    "new vm_behavior_t values need names");
350 
351 	assert(behavior >= 0 && behavior <= VM_BEHAVIOR_LAST_VALID);
352 	return behavior_name[behavior];
353 }
354 
355 const char *
name_for_share_mode(uint8_t share_mode)356 name_for_share_mode(uint8_t share_mode)
357 {
358 	assert(share_mode > 0);
359 	static const char *share_mode_name[] = {
360 		[0]                  = "(0)",
361 		[SM_COW]             = "SM_COW",
362 		[SM_PRIVATE]         = "SM_PRIVATE",
363 		[SM_EMPTY]           = "SM_EMPTY",
364 		[SM_SHARED]          = "SM_SHARED",
365 		[SM_TRUESHARED]      = "SM_TRUESHARED",
366 		[SM_PRIVATE_ALIASED] = "SM_PRIVATE_ALIASED",
367 		[SM_SHARED_ALIASED]  = "SM_SHARED_ALIASED",
368 		[SM_LARGE_PAGE]      = "SM_LARGE_PAGE"
369 	};
370 
371 	assert(share_mode < countof(share_mode_name));
372 	return share_mode_name[share_mode];
373 }
374 
375 const char *
name_for_bool(boolean_t value)376 name_for_bool(boolean_t value)
377 {
378 	switch (value) {
379 	case 0:  return "false";
380 	case 1:  return "true";
381 	default: return "true-but-not-1";
382 	}
383 }
384 
385 
386 void
clamp_start_end_to_start_end(mach_vm_address_t * const inout_start,mach_vm_address_t * const inout_end,mach_vm_address_t limit_start,mach_vm_address_t limit_end)387 clamp_start_end_to_start_end(
388 	mach_vm_address_t   * const inout_start,
389 	mach_vm_address_t   * const inout_end,
390 	mach_vm_address_t           limit_start,
391 	mach_vm_address_t           limit_end)
392 {
393 	if (*inout_start < limit_start) {
394 		*inout_start = limit_start;
395 	}
396 
397 	if (*inout_end > limit_end) {
398 		*inout_end = limit_end;
399 	}
400 
401 	if (*inout_start > *inout_end) {
402 		/* no-overlap case */
403 		*inout_end = *inout_start;
404 	}
405 }
406 
407 void
clamp_address_size_to_address_size(mach_vm_address_t * const inout_address,mach_vm_size_t * const inout_size,mach_vm_address_t limit_address,mach_vm_size_t limit_size)408 clamp_address_size_to_address_size(
409 	mach_vm_address_t   * const inout_address,
410 	mach_vm_size_t      * const inout_size,
411 	mach_vm_address_t           limit_address,
412 	mach_vm_size_t              limit_size)
413 {
414 	mach_vm_address_t end = *inout_address + *inout_size;
415 	mach_vm_address_t limit_end = limit_address + limit_size;
416 	clamp_start_end_to_start_end(inout_address, &end, limit_address, limit_end);
417 	*inout_size = end - *inout_address;
418 }
419 
420 void
clamp_address_size_to_checker(mach_vm_address_t * const inout_address,mach_vm_size_t * const inout_size,vm_entry_checker_t * checker)421 clamp_address_size_to_checker(
422 	mach_vm_address_t   * const inout_address,
423 	mach_vm_size_t      * const inout_size,
424 	vm_entry_checker_t         *checker)
425 {
426 	clamp_address_size_to_address_size(
427 		inout_address, inout_size,
428 		checker->address, checker->size);
429 }
430 
431 void
clamp_start_end_to_checker(mach_vm_address_t * const inout_start,mach_vm_address_t * const inout_end,vm_entry_checker_t * checker)432 clamp_start_end_to_checker(
433 	mach_vm_address_t   * const inout_start,
434 	mach_vm_address_t   * const inout_end,
435 	vm_entry_checker_t         *checker)
436 {
437 	clamp_start_end_to_start_end(
438 		inout_start, inout_end,
439 		checker->address, checker_end_address(checker));
440 }
441 
442 
443 uint64_t
get_object_id_for_address(mach_vm_address_t address)444 get_object_id_for_address(mach_vm_address_t address)
445 {
446 	mach_vm_address_t info_address = address;
447 	mach_vm_size_t info_size;
448 	vm_region_submap_info_data_64_t info;
449 
450 	bool found = get_info_for_address_fast(&info_address, &info_size, &info);
451 	assert(found);
452 	assert(info_address == address);
453 	return info.object_id_full;
454 }
455 
456 uint16_t
get_user_tag_for_address(mach_vm_address_t address)457 get_user_tag_for_address(mach_vm_address_t address)
458 {
459 	mach_vm_address_t info_address = address;
460 	mach_vm_size_t info_size;
461 	vm_region_submap_info_data_64_t info;
462 
463 	bool found = get_info_for_address_fast(&info_address, &info_size, &info);
464 	if (found) {
465 		return info.user_tag;
466 	} else {
467 		return 0;
468 	}
469 }
470 
471 uint16_t
get_app_specific_user_tag_for_address(mach_vm_address_t address)472 get_app_specific_user_tag_for_address(mach_vm_address_t address)
473 {
474 	uint16_t tag = get_user_tag_for_address(address);
475 	if (tag < VM_MEMORY_APPLICATION_SPECIFIC_1 ||
476 	    tag > VM_MEMORY_APPLICATION_SPECIFIC_16) {
477 		/* tag is outside app-specific range, override it */
478 		return 0;
479 	}
480 	return tag;
481 }
482 
483 static void
set_vm_self_region_footprint(bool value)484 set_vm_self_region_footprint(bool value)
485 {
486 	int value_storage = value;
487 	int error = sysctlbyname("vm.self_region_footprint", NULL, NULL, &value_storage, sizeof(value_storage));
488 	T_QUIET; T_ASSERT_POSIX_SUCCESS(error, "sysctl(vm.self_region_footprint)");
489 }
490 
491 bool __attribute__((overloadable))
get_info_for_address_fast(mach_vm_address_t * const inout_address,mach_vm_size_t * const out_size,vm_region_submap_info_data_64_t * const out_info,uint32_t submap_depth)492 get_info_for_address_fast(
493 	mach_vm_address_t * const inout_address,
494 	mach_vm_size_t * const out_size,
495 	vm_region_submap_info_data_64_t * const out_info,
496 	uint32_t submap_depth)
497 {
498 	kern_return_t kr;
499 
500 	mach_vm_address_t query_address = *inout_address;
501 	mach_vm_address_t actual_address = query_address;
502 	uint32_t actual_depth = submap_depth;
503 	mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
504 	kr = mach_vm_region_recurse(mach_task_self(),
505 	    &actual_address, out_size, &actual_depth,
506 	    (vm_region_recurse_info_t)out_info,
507 	    &count);
508 
509 	if (kr == KERN_INVALID_ADDRESS || actual_depth < submap_depth) {
510 		/* query_address is unmapped, and so is everything after it */
511 		*inout_address = ~(mach_vm_address_t)0;
512 		*out_size = 0;
513 		return false;
514 	}
515 	assert(kr == 0);
516 	if (actual_address > query_address) {
517 		/* query_address is unmapped, but there is a subsequent mapping */
518 		*inout_address = actual_address;
519 		/* *out_size already set */
520 		return false;
521 	}
522 
523 	/* query_address is mapped */
524 	*inout_address = actual_address;
525 	/* *out_size already set */
526 	return true;
527 }
528 
529 bool __attribute__((overloadable))
get_info_for_address(mach_vm_address_t * const inout_address,mach_vm_size_t * const out_size,vm_region_submap_info_data_64_t * const out_info,uint32_t submap_depth)530 get_info_for_address(
531 	mach_vm_address_t * const inout_address,
532 	mach_vm_size_t * const out_size,
533 	vm_region_submap_info_data_64_t * const out_info,
534 	uint32_t submap_depth)
535 {
536 	mach_vm_address_t addr1, addr2;
537 	mach_vm_size_t size1 = 0, size2 = 0;
538 	vm_region_submap_info_data_64_t info1, info2;
539 	bool result1, result2;
540 
541 	/*
542 	 * VM's task_self_region_footprint() changes
543 	 * how vm_map_region_walk() counts things.
544 	 *
545 	 * We want the ref_count and shadow_depth from footprint==true
546 	 * (ignoring the specific pages in the objects)
547 	 * but we want pages_resident from footprint==false.
548 	 *
549 	 * Here we call vm_region once with footprint and once without,
550 	 * and pick out the values we want to return.
551 	 */
552 
553 	set_vm_self_region_footprint(true);
554 	addr1 = *inout_address;
555 	result1 = get_info_for_address_fast(&addr1, &size1, &info1, submap_depth);
556 
557 	set_vm_self_region_footprint(false);
558 	addr2 = *inout_address;
559 	result2 = get_info_for_address_fast(&addr2, &size2, &info2, submap_depth);
560 	assert(addr1 == addr2);
561 	assert(size1 == size2);
562 	assert(result1 == result2);
563 
564 	info1.pages_resident = info2.pages_resident;
565 	*out_info = info1;
566 	*inout_address = addr1;
567 	*out_size = size1;
568 
569 	return result1;
570 }
571 
572 static bool
is_mapped(mach_vm_address_t address,uint32_t submap_depth)573 is_mapped(mach_vm_address_t address, uint32_t submap_depth)
574 {
575 	mach_vm_size_t size;
576 	vm_region_submap_info_data_64_t info;
577 	return get_info_for_address_fast(&address, &size, &info, submap_depth);
578 }
579 
580 
581 static void
dump_region_info(mach_vm_address_t address,mach_vm_size_t size,uint32_t submap_depth,vm_region_submap_info_data_64_t * info,attribute_highlights_t highlights)582 dump_region_info(
583 	mach_vm_address_t address,
584 	mach_vm_size_t size,
585 	uint32_t submap_depth,
586 	vm_region_submap_info_data_64_t *info,
587 	attribute_highlights_t highlights)
588 {
589 	mach_vm_address_t end = address + size;
590 
591 	const char *suffix = "";
592 	if (info->is_submap) {
593 		suffix = " (submap parent)";
594 	} else if (submap_depth > 0) {
595 		suffix = " (allocation in submap)";
596 	}
597 
598 	const char *submap_prefix = submap_depth > 0 ? SUBMAP_PREFIX : "";
599 
600 	/* Output order should match dump_checker_info() for the reader's convenience. */
601 
602 	T_LOG("%sMAPPING   0x%llx..0x%llx (size 0x%llx)%s", submap_prefix, address, end, size, suffix);
603 	T_LOG("%s    %sprotection:     %s%s", submap_prefix, HIGHLIGHT(name_for_prot(info->protection), entry.protection_attr));
604 	T_LOG("%s    %smax protection: %s%s", submap_prefix, HIGHLIGHT(name_for_prot(info->max_protection), entry.max_protection_attr));
605 	T_LOG("%s    %sinheritance:    %s%s", submap_prefix, HIGHLIGHT(name_for_inherit(info->inheritance), entry.inheritance_attr));
606 	T_LOG("%s    %sbehavior:       %s%s", submap_prefix, HIGHLIGHT(name_for_behavior(info->behavior), entry.behavior_attr));
607 	T_LOG("%s    %suser wired count:  %d%s", submap_prefix, HIGHLIGHT(info->user_wired_count, entry.user_wired_count_attr));
608 	T_LOG("%s    %suser tag:       %d%s", submap_prefix, HIGHLIGHT(info->user_tag, entry.user_tag_attr));
609 	T_LOG("%s    %sobject offset:  0x%llx%s", submap_prefix, HIGHLIGHT(info->offset, entry.object_offset_attr));
610 	T_LOG("%s    %sobject id:      0x%llx%s", submap_prefix, HIGHLIGHT(info->object_id_full, object.object_id_attr));
611 	T_LOG("%s    %sref count:      %u%s", submap_prefix, HIGHLIGHT(info->ref_count, object.ref_count_attr));
612 	T_LOG("%s    %sshadow depth:   %hu%s", submap_prefix, HIGHLIGHT(info->shadow_depth, object.shadow_depth_attr));
613 	T_LOG("%s    %spages resident: %u%s", submap_prefix, HIGHLIGHT(info->pages_resident, entry.pages_resident_attr));
614 	T_LOG("%s    %spages shared now private: %u%s", submap_prefix, highlight_prefix(IgnoredHighlight), info->pages_shared_now_private, highlight_suffix(IgnoredHighlight));
615 	T_LOG("%s    %spages swapped out: %u%s", submap_prefix, highlight_prefix(IgnoredHighlight), info->pages_swapped_out, highlight_suffix(IgnoredHighlight));
616 	T_LOG("%s    %spages dirtied:  %u%s", submap_prefix, highlight_prefix(IgnoredHighlight), info->pages_dirtied, highlight_suffix(IgnoredHighlight));
617 	T_LOG("%s    %sexternal pager: %hhu%s", submap_prefix, highlight_prefix(IgnoredHighlight), info->external_pager, highlight_suffix(IgnoredHighlight));
618 	T_LOG("%s    %sshare mode:     %s%s", submap_prefix, HIGHLIGHT(name_for_share_mode(info->share_mode), entry.share_mode_attr));
619 	T_LOG("%s    %sis submap:      %s%s", submap_prefix, HIGHLIGHT(name_for_bool(info->is_submap), entry.is_submap_attr));
620 	T_LOG("%s    %ssubmap depth:   %u%s", submap_prefix, HIGHLIGHT(submap_depth, entry.submap_depth_attr));
621 }
622 
623 static void
dump_hole_info(mach_vm_address_t address,mach_vm_size_t size,uint32_t submap_depth,attribute_highlights_t highlights)624 dump_hole_info(
625 	mach_vm_address_t address,
626 	mach_vm_size_t size,
627 	uint32_t submap_depth,
628 	attribute_highlights_t highlights)
629 {
630 	mach_vm_address_t end = address + size;
631 	const char *submap_prefix = submap_depth > 0 ? SUBMAP_PREFIX : "";
632 	const char *suffix = "";
633 	if (submap_depth > 0) {
634 		suffix = " (unallocated in submap)";
635 	}
636 
637 	T_LOG("%sHOLE 0x%llx..0x%llx (size 0x%llx)%s",
638 	    submap_prefix, address, end, size, suffix);
639 	if (submap_depth > 0) {
640 		/* print submap depth to avoid confusion about holes inside submaps */
641 		T_LOG("%s    %ssubmap depth:   %u%s", submap_prefix, HIGHLIGHT(submap_depth, entry.submap_depth_attr));
642 	}
643 }
644 
645 __attribute__((overloadable))
646 static void
dump_region_info_in_range(mach_vm_address_t range_start,mach_vm_size_t range_size,uint32_t submap_depth,bool recurse,attribute_highlights_t highlights)647 dump_region_info_in_range(
648 	mach_vm_address_t range_start,
649 	mach_vm_size_t range_size,
650 	uint32_t submap_depth,
651 	bool recurse,
652 	attribute_highlights_t highlights)
653 {
654 	mach_vm_address_t range_end = range_start + range_size;
655 	mach_vm_address_t prev_end = range_start;
656 	do {
657 		mach_vm_address_t address = prev_end;
658 		mach_vm_size_t size = 0;
659 		vm_region_submap_info_data_64_t info;
660 		(void)get_info_for_address(&address, &size, &info, submap_depth);
661 		/*
662 		 * [address, address+size) is the next mapped region,
663 		 * or [~0, ~0) if there is no next mapping.
664 		 * There may be a hole preceding that region.
665 		 * That region may be beyond our range.
666 		 */
667 		if (address > prev_end) {
668 			/* don't report any part of the hole beyond range_end */
669 			mach_vm_address_t hole_end = min(address, range_end);
670 			dump_hole_info(prev_end, hole_end - prev_end, submap_depth, highlights);
671 		}
672 		if (address < range_end) {
673 			dump_region_info(address, size, submap_depth, &info, highlights);
674 			if (info.is_submap && recurse) {
675 				/* print submap contents within this window */
676 				mach_vm_address_t submap_start = max(prev_end, address);
677 				mach_vm_address_t submap_end = min(range_end, address + size);
678 				dump_region_info_in_range(submap_start, submap_end - submap_start,
679 				    submap_depth + 1, true, highlights);
680 			}
681 		}
682 		prev_end = address + size;
683 	} while (prev_end < range_end);
684 }
685 
686 
687 static void
dump_region_info_for_entry(vm_entry_checker_t * checker,attribute_highlights_t highlights)688 dump_region_info_for_entry(
689 	vm_entry_checker_t *checker,
690 	attribute_highlights_t highlights)
691 {
692 	/* Try to print at the checker's submap depth only. Don't recurse. */
693 	dump_region_info_in_range(checker->address, checker->size,
694 	    checker->submap_depth, false /* recurse */, highlights);
695 }
696 
697 void
dump_region_info_for_entries(entry_checker_range_t list)698 dump_region_info_for_entries(entry_checker_range_t list)
699 {
700 	/*
701 	 * Ignore the submap depth of the checkers themselves.
702 	 * Print starting at submap depth 0 and recurse.
703 	 * Don't specially highlight any attributes.
704 	 */
705 	mach_vm_address_t start = checker_range_start_address(list);
706 	mach_vm_address_t end = checker_range_end_address(list);
707 	dump_region_info_in_range(
708 		start, end - start,
709 		0 /* submap depth */, true /* recurse */,
710 		normal_highlights());
711 }
712 
713 /*
714  * Count the number of templates in a END_ENTRIES-terminated list.
715  */
716 static unsigned
count_entry_templates(const vm_entry_template_t * templates)717 count_entry_templates(const vm_entry_template_t *templates)
718 {
719 	if (templates == NULL) {
720 		return 0;
721 	}
722 	for (unsigned count = 0;; count++) {
723 		if (templates[count].kind == EndEntries) {
724 			return count;
725 		}
726 	}
727 }
728 
729 /*
730  * Count the number of templates in a END_OBJECTS-terminated list.
731  */
732 static unsigned
count_object_templates(const vm_object_template_t * templates)733 count_object_templates(const vm_object_template_t *templates)
734 {
735 	if (templates == NULL) {
736 		return 0;
737 	}
738 	for (unsigned count = 0;; count++) {
739 		if (templates[count].kind == EndObjects) {
740 			return count;
741 		}
742 	}
743 }
744 
745 /* conveniences for some macros elsewhere */
746 static unsigned
count_submap_object_templates(const vm_object_template_t * templates)747 count_submap_object_templates(const vm_object_template_t *templates)
748 {
749 	return count_object_templates(templates);
750 }
751 static unsigned
count_submap_entry_templates(const vm_entry_template_t * templates)752 count_submap_entry_templates(const vm_entry_template_t *templates)
753 {
754 	return count_entry_templates(templates);
755 }
756 
757 
758 static vm_object_checker_t *
object_checker_new(void)759 object_checker_new(void)
760 {
761 	return calloc(sizeof(vm_object_checker_t), 1);
762 }
763 
764 /*
765  * Returns true if obj_checker refers to a NULL vm object.
766  */
767 static bool
object_is_null(vm_object_checker_t * obj_checker)768 object_is_null(vm_object_checker_t *obj_checker)
769 {
770 	if (obj_checker == NULL) {
771 		return true;
772 	}
773 	assert(obj_checker->kind != Deinited);
774 	assert(obj_checker->kind != FreedObject);
775 	assert(obj_checker->kind != EndObjects);
776 	if (obj_checker->object_id_mode == object_has_known_id) {
777 		return obj_checker->object_id == 0;
778 	}
779 	return false;
780 }
781 
782 static unsigned
object_checker_get_shadow_depth(vm_object_checker_t * obj_checker)783 object_checker_get_shadow_depth(vm_object_checker_t *obj_checker)
784 {
785 	if (obj_checker == NULL || obj_checker->shadow == NULL) {
786 		return 0;
787 	}
788 	assert(!object_is_null(obj_checker));  /* null object must have no shadow */
789 	return 1 + object_checker_get_shadow_depth(obj_checker->shadow);
790 }
791 
792 static unsigned
object_checker_get_self_ref_count(vm_object_checker_t * obj_checker)793 object_checker_get_self_ref_count(vm_object_checker_t *obj_checker)
794 {
795 	if (object_is_null(obj_checker)) {
796 		/* null object always has zero self_ref_count */
797 		return 0;
798 	} else {
799 		return obj_checker->self_ref_count;
800 	}
801 }
802 
803 /*
804  * ref_count as reported by vm_region is:
805  * this object's self_ref_count
806  * plus all object self_ref_counts in its shadow chain
807  * minus the number of objects in its shadow chain
808  * (i.e. discounting the references internal to the shadow chain)
809  * TODO: also discounting references due to paging_in_progress
810  */
811 static unsigned
object_checker_get_vm_region_ref_count(vm_object_checker_t * obj_checker)812 object_checker_get_vm_region_ref_count(vm_object_checker_t *obj_checker)
813 {
814 	unsigned count = object_checker_get_self_ref_count(obj_checker);
815 	while ((obj_checker = obj_checker->shadow)) {
816 		count += object_checker_get_self_ref_count(obj_checker) - 1;
817 	}
818 	return count;
819 }
820 
821 /*
822  * Increments an object checker's refcount, mirroring the VM's refcount.
823  */
824 static void
object_checker_reference(vm_object_checker_t * obj_checker)825 object_checker_reference(vm_object_checker_t *obj_checker)
826 {
827 	if (!object_is_null(obj_checker)) {
828 		obj_checker->self_ref_count++;
829 	}
830 }
831 
832 static void object_checker_deinit(vm_object_checker_t *obj_checker); /* forward */
833 static void checker_list_free(checker_list_t *checker_list); /* forward */
834 
835 /*
836  * Decrements an object checker's refcount, mirroring the VM's refcount.
837  */
838 static void
object_checker_dereference(vm_object_checker_t * obj_checker)839 object_checker_dereference(vm_object_checker_t *obj_checker)
840 {
841 	if (!object_is_null(obj_checker)) {
842 		assert(obj_checker->self_ref_count > 0);
843 		obj_checker->self_ref_count--;
844 		if (obj_checker->self_ref_count == 0) {
845 			/*
846 			 * We can't free this object checker because
847 			 * a checker list may still point to it.
848 			 * But we do tear down some of its contents.
849 			 */
850 			object_checker_deinit(obj_checker);
851 		}
852 	}
853 }
854 
855 static void
object_checker_deinit(vm_object_checker_t * obj_checker)856 object_checker_deinit(vm_object_checker_t *obj_checker)
857 {
858 	if (obj_checker->kind != Deinited) {
859 		object_checker_dereference(obj_checker->shadow);
860 		obj_checker->shadow = NULL;
861 
862 		if (obj_checker->submap_checkers) {
863 			assert(obj_checker->kind == SubmapObject);
864 			/* submap checker list must not store objects */
865 			assert(obj_checker->submap_checkers->objects == NULL);
866 			checker_list_free(obj_checker->submap_checkers);
867 		}
868 
869 		/*
870 		 * Previously we kept the object_id intact so we could
871 		 * detect usage of an object that the checkers thought
872 		 * was dead. This caused false failures when the VM's
873 		 * vm_object_t allocator re-used an object pointer.
874 		 * Now we scrub the object_id of deinited objects
875 		 * so that vm_object_t pointer reuse is allowed.
876 		 */
877 		obj_checker->object_id_mode = object_has_known_id;
878 		obj_checker->object_id = ~0;
879 		obj_checker->kind = Deinited;
880 	}
881 }
882 
883 static void
object_checker_free(vm_object_checker_t * obj_checker)884 object_checker_free(vm_object_checker_t *obj_checker)
885 {
886 	object_checker_deinit(obj_checker);
887 	free(obj_checker);
888 }
889 
890 vm_object_checker_t *
object_checker_clone(vm_object_checker_t * obj_checker)891 object_checker_clone(vm_object_checker_t *obj_checker)
892 {
893 	assert(obj_checker->kind != SubmapObject);  /* unimplemented */
894 
895 	vm_object_checker_t *result = object_checker_new();
896 	*result = *obj_checker;
897 
898 	result->self_ref_count = 0;
899 	result->object_id_mode = object_is_unknown;
900 	result->object_id = 0;
901 	result->shadow = NULL;
902 
903 	result->next = NULL;
904 	result->prev = NULL;
905 
906 	return result;
907 }
908 
909 
910 /*
911  * Search a checker list for an object with the given object_id.
912  * Returns if no object is known to have that id.
913  */
914 static vm_object_checker_t *
find_object_checker_for_object_id(checker_list_t * list,uint64_t object_id)915 find_object_checker_for_object_id(checker_list_t *list, uint64_t object_id)
916 {
917 	/* object list is only stored in the top-level checker list */
918 	if (list->parent) {
919 		return find_object_checker_for_object_id(list->parent, object_id);
920 	}
921 
922 	/* first object must be the null object */
923 	assert(list->objects && object_is_null(list->objects));
924 
925 	FOREACH_OBJECT_CHECKER(obj_checker, list) {
926 		assert(obj_checker->kind != FreedObject);
927 		switch (obj_checker->object_id_mode) {
928 		case object_is_unknown:
929 		case object_has_unknown_nonnull_id:
930 			/* nope */
931 			break;
932 		case object_has_known_id:
933 			if (object_id == obj_checker->object_id) {
934 				assert(obj_checker->kind != Deinited);
935 				return obj_checker;
936 			}
937 			break;
938 		}
939 	}
940 
941 	return NULL;
942 }
943 
944 /*
945  * Create a new object checker for the null vm object.
946  */
947 static vm_object_checker_t *
make_null_object_checker(checker_list_t * checker_list)948 make_null_object_checker(checker_list_t *checker_list)
949 {
950 	vm_object_checker_t *obj_checker = object_checker_new();
951 	obj_checker->kind = Anonymous;
952 	obj_checker->verify = vm_object_attributes_with_default(true);
953 
954 	obj_checker->object_id_mode = object_has_known_id;
955 	obj_checker->object_id = 0;
956 
957 	obj_checker->size = ~0u;
958 	obj_checker->self_ref_count = 0;
959 	obj_checker->fill_pattern.mode = DontFill;
960 
961 	obj_checker->next = NULL;
962 	obj_checker->prev = NULL;
963 
964 	/* null object must be the first in the list */
965 	assert(checker_list->objects == NULL);
966 	checker_list->objects = obj_checker;
967 
968 	return obj_checker;
969 }
970 
971 /*
972  * Create a new object checker for anonymous memory.
973  * The new object checker is added to the checker list.
974  */
975 static vm_object_checker_t *
make_anonymous_object_checker(checker_list_t * checker_list,mach_vm_size_t size)976 make_anonymous_object_checker(checker_list_t *checker_list, mach_vm_size_t size)
977 {
978 	vm_object_checker_t *obj_checker = object_checker_new();
979 	obj_checker->kind = Anonymous;
980 	obj_checker->verify = vm_object_attributes_with_default(true);
981 
982 	/* don't know the object's id yet, we'll look it up later */
983 	obj_checker->object_id_mode = object_is_unknown;
984 	obj_checker->object_id = 0;
985 
986 	obj_checker->size = size;
987 	obj_checker->self_ref_count = 0;
988 	obj_checker->fill_pattern.mode = DontFill;
989 
990 	obj_checker->next = NULL;
991 	obj_checker->prev = NULL;
992 
993 	checker_list_append_object(checker_list, obj_checker);
994 
995 	return obj_checker;
996 }
997 
998 static void checker_list_move_objects_to_parent(checker_list_t *submap_list); /* forward */
999 
1000 /*
1001  * Create a new object checker for a parent map submap entry's object.
1002  * The submap's contents are verified using submap_checkers.
1003  * The new object checker takes ownership of submap_checkers.
1004  * The new object checker is added to the checker list.
1005  */
1006 static vm_object_checker_t *
make_submap_object_checker(checker_list_t * checker_list,checker_list_t * submap_checkers)1007 make_submap_object_checker(
1008 	checker_list_t *checker_list,
1009 	checker_list_t *submap_checkers)
1010 {
1011 	/* address range where the submap is currently mapped */
1012 	mach_vm_address_t submap_start = checker_range_start_address(submap_checkers->entries);
1013 	mach_vm_address_t submap_size = checker_range_size(submap_checkers->entries);
1014 	vm_object_checker_t *obj_checker = object_checker_new();
1015 	obj_checker->kind = SubmapObject;
1016 	obj_checker->verify = vm_object_attributes_with_default(true);
1017 
1018 	/* Look up the object_id stored in the parent map's submap entry. */
1019 	obj_checker->object_id = get_object_id_for_address(submap_start); /* submap_depth==0 */
1020 	obj_checker->object_id_mode = object_has_known_id;
1021 
1022 	obj_checker->size = submap_size;
1023 	obj_checker->self_ref_count = 0;
1024 	obj_checker->fill_pattern.mode = DontFill;
1025 
1026 	obj_checker->next = NULL;
1027 	obj_checker->prev = NULL;
1028 
1029 	obj_checker->submap_checkers = submap_checkers;
1030 
1031 	/*
1032 	 * Slide the submap checkers as if they were
1033 	 * checking a submap remapping at address 0.
1034 	 */
1035 	FOREACH_CHECKER(submap_checker, submap_checkers->entries) {
1036 		submap_checker->address -= submap_start;
1037 	}
1038 
1039 	/* Move the submap list's object checkers into the parent list. */
1040 	submap_checkers->parent = checker_list;
1041 	checker_list_move_objects_to_parent(submap_checkers);
1042 
1043 	checker_list_append_object(checker_list, obj_checker);
1044 
1045 	return obj_checker;
1046 }
1047 
1048 static vm_entry_checker_t *
checker_new(void)1049 checker_new(void)
1050 {
1051 	return calloc(sizeof(vm_entry_checker_t), 1);
1052 }
1053 
1054 static void
checker_free(vm_entry_checker_t * checker)1055 checker_free(vm_entry_checker_t *checker)
1056 {
1057 	object_checker_dereference(checker->object);
1058 	free(checker);
1059 }
1060 
1061 
1062 static checker_list_t *
checker_list_new(void)1063 checker_list_new(void)
1064 {
1065 	checker_list_t *list = calloc(sizeof(*list), 1);
1066 
1067 	list->entries.head = NULL;
1068 	list->entries.tail = NULL;
1069 
1070 	make_null_object_checker(list);
1071 
1072 	return list;
1073 }
1074 
1075 void
checker_list_append_object(checker_list_t * list,vm_object_checker_t * obj_checker)1076 checker_list_append_object(
1077 	checker_list_t *list,
1078 	vm_object_checker_t *obj_checker)
1079 {
1080 	/* object list is only stored in the top-level checker list */
1081 	if (list->parent) {
1082 		return checker_list_append_object(list, obj_checker);
1083 	}
1084 
1085 	/* first object must be the null object */
1086 	assert(list->objects && object_is_null(list->objects));
1087 
1088 	/* no additional null objects are allowed */
1089 	assert(!object_is_null(obj_checker));
1090 
1091 	/* new object must be currently unlinked */
1092 	assert(obj_checker->next == NULL && obj_checker->prev == NULL);
1093 
1094 	/* no duplicate IDs allowed */
1095 	if (obj_checker->object_id_mode == object_has_known_id) {
1096 		assert(!find_object_checker_for_object_id(list, obj_checker->object_id));
1097 	}
1098 
1099 	/* insert object after the null object */
1100 	vm_object_checker_t *left = list->objects;
1101 	vm_object_checker_t *right = list->objects->next;
1102 	obj_checker->prev = left;
1103 	obj_checker->next = right;
1104 	left->next = obj_checker;
1105 	if (right) {
1106 		right->prev = obj_checker;
1107 	}
1108 }
1109 
1110 /*
1111  * Move object checkers from a submap checker list to its parent.
1112  * Submap checker lists do not store objects.
1113  */
1114 static void
checker_list_move_objects_to_parent(checker_list_t * submap_list)1115 checker_list_move_objects_to_parent(checker_list_t *submap_list)
1116 {
1117 	vm_object_checker_t *obj_checker = submap_list->objects;
1118 
1119 	checker_list_t *parent_list = submap_list->parent;
1120 	assert(parent_list != NULL);
1121 
1122 	/* skip submap's null object, the parent should already have one */
1123 	assert(obj_checker != NULL && object_is_null(obj_checker));
1124 	obj_checker = obj_checker->next;
1125 
1126 	while (obj_checker != NULL) {
1127 		vm_object_checker_t *cur = obj_checker;
1128 		obj_checker = obj_checker->next;
1129 
1130 		cur->prev = cur->next = NULL;
1131 		checker_list_append_object(parent_list, cur);
1132 	}
1133 
1134 	/* free submap's null object */
1135 	object_checker_free(submap_list->objects);
1136 	submap_list->objects = NULL;
1137 }
1138 
1139 unsigned
checker_range_count(entry_checker_range_t entry_range)1140 checker_range_count(entry_checker_range_t entry_range)
1141 {
1142 	unsigned count = 0;
1143 	FOREACH_CHECKER(checker, entry_range) {
1144 		count++;
1145 	}
1146 	return count;
1147 }
1148 
1149 mach_vm_address_t
checker_range_start_address(entry_checker_range_t checker_range)1150 checker_range_start_address(entry_checker_range_t checker_range)
1151 {
1152 	return checker_range.head->address;
1153 }
1154 
1155 mach_vm_address_t
checker_range_end_address(entry_checker_range_t checker_range)1156 checker_range_end_address(entry_checker_range_t checker_range)
1157 {
1158 	return checker_end_address(checker_range.tail);
1159 }
1160 
1161 mach_vm_size_t
checker_range_size(entry_checker_range_t checker_range)1162 checker_range_size(entry_checker_range_t checker_range)
1163 {
1164 	return checker_range_end_address(checker_range) - checker_range_start_address(checker_range);
1165 }
1166 
1167 /*
1168  * Add a checker to the end of a checker range.
1169  */
1170 static void
checker_range_append(entry_checker_range_t * list,vm_entry_checker_t * inserted)1171 checker_range_append(entry_checker_range_t *list, vm_entry_checker_t *inserted)
1172 {
1173 	inserted->prev = list->tail;
1174 	if (!list->head) {
1175 		list->head = inserted;
1176 	}
1177 	if (list->tail) {
1178 		list->tail->next = inserted;
1179 	}
1180 	list->tail = inserted;
1181 }
1182 
1183 /*
1184  * Free a range of checkers.
1185  * You probably don't want to call this.
1186  * Use checker_list_free() or checker_list_free_range() instead.
1187  */
1188 static void
checker_range_free(entry_checker_range_t range)1189 checker_range_free(entry_checker_range_t range)
1190 {
1191 	/* not FOREACH_CHECKER due to use-after-free */
1192 	vm_entry_checker_t *checker = range.head;
1193 	vm_entry_checker_t *end = range.tail->next;
1194 	while (checker != end) {
1195 		vm_entry_checker_t *dead = checker;
1196 		checker = checker->next;
1197 		checker_free(dead);
1198 	}
1199 }
1200 
1201 static void
checker_list_free(checker_list_t * list)1202 checker_list_free(checker_list_t *list)
1203 {
1204 	/* Free map entry checkers */
1205 	checker_range_free(list->entries);
1206 
1207 	/* Free object checkers. */
1208 	vm_object_checker_t *obj_checker = list->objects;
1209 	while (obj_checker) {
1210 		vm_object_checker_t *dead = obj_checker;
1211 		obj_checker = obj_checker->next;
1212 		object_checker_free(dead);
1213 	}
1214 
1215 	free(list);
1216 }
1217 
1218 /*
1219  * Clone a vm entry checker.
1220  * The new clone increases its object's refcount.
1221  * The new clone is unlinked from the checker list.
1222  */
1223 static vm_entry_checker_t *
checker_clone(vm_entry_checker_t * old)1224 checker_clone(vm_entry_checker_t *old)
1225 {
1226 	vm_entry_checker_t *new_checker = checker_new();
1227 	*new_checker = *old;
1228 	object_checker_reference(new_checker->object);
1229 	new_checker->prev = NULL;
1230 	new_checker->next = NULL;
1231 	return new_checker;
1232 }
1233 
1234 static void
checker_set_pages_resident(vm_entry_checker_t * checker,mach_vm_size_t pages)1235 checker_set_pages_resident(vm_entry_checker_t *checker, mach_vm_size_t pages)
1236 {
1237 	checker->pages_resident = (uint32_t)pages;
1238 }
1239 
1240 /*
1241  * Return the nth checker in a linked list of checkers.
1242  * Includes holes.
1243  */
1244 static vm_entry_checker_t *
checker_nth(vm_entry_checker_t * checkers,unsigned n)1245 checker_nth(vm_entry_checker_t *checkers, unsigned n)
1246 {
1247 	assert(checkers != NULL);
1248 	if (n == 0) {
1249 		return checkers;
1250 	} else {
1251 		return checker_nth(checkers->next, n - 1);
1252 	}
1253 }
1254 
1255 /*
1256  * Return the nth checker in a checker list.
1257  * Includes holes.
1258  */
1259 vm_entry_checker_t *
checker_list_nth(checker_list_t * list,unsigned n)1260 checker_list_nth(checker_list_t *list, unsigned n)
1261 {
1262 	return checker_nth(list->entries.head, n);
1263 }
1264 
1265 static void
checker_list_apply_slide(checker_list_t * checker_list,mach_vm_address_t slide)1266 checker_list_apply_slide(checker_list_t *checker_list, mach_vm_address_t slide)
1267 {
1268 	FOREACH_CHECKER(checker, checker_list->entries) {
1269 		checker->address += slide;
1270 	}
1271 }
1272 
1273 checker_list_t *
checker_get_and_slide_submap_checkers(vm_entry_checker_t * submap_parent)1274 checker_get_and_slide_submap_checkers(vm_entry_checker_t *submap_parent)
1275 {
1276 	assert(submap_parent->kind == Submap);
1277 	assert(submap_parent->object);
1278 	checker_list_t *submap_checkers = submap_parent->object->submap_checkers;
1279 	assert(!submap_checkers->is_slid);
1280 	submap_checkers->is_slid = true;
1281 	submap_checkers->submap_slide = submap_parent->address - submap_parent->object_offset;
1282 	checker_list_apply_slide(submap_checkers, submap_checkers->submap_slide);
1283 	return submap_checkers;
1284 }
1285 
1286 void
unslide_submap_checkers(checker_list_t * submap_checkers)1287 unslide_submap_checkers(checker_list_t *submap_checkers)
1288 {
1289 	assert(submap_checkers->is_slid);
1290 	submap_checkers->is_slid = false;
1291 	checker_list_apply_slide(submap_checkers, -submap_checkers->submap_slide);
1292 	submap_checkers->submap_slide = 0;
1293 }
1294 
1295 
1296 /*
1297  * vm_region of submap contents clamps the reported
1298  * address range to the parent map's submap entry,
1299  * and also modifies some (but not all) fields to match.
1300  * Our submap checkers model the submap's real contents.
1301  * When verifying VM state, we "tweak" the checkers
1302  * of submap contents to match what vm_region will
1303  * report, and "untweak" the checkers afterwards.
1304  *
1305  * Note that these submap "tweaks" are separate from the
1306  * submap "slide" (checker_get_and_slide_submap_checkers).
1307  * Submap slide is applied any time the submap contents are used.
1308  * Submap tweaks are applied only when comparing checkers to vm_region output.
1309  */
1310 
1311 typedef struct {
1312 	mach_vm_address_t address;
1313 	mach_vm_address_t size;
1314 	uint32_t pages_resident;
1315 } checker_tweaks_t;
1316 
1317 typedef struct {
1318 	/* save the checker list so we can use attribute(cleanup) */
1319 	checker_list_t *tweaked_checker_list;
1320 
1321 	/* some entries are removed from the list; save them here */
1322 	entry_checker_range_t original_entries;
1323 
1324 	/* some entries are modified; save their old values here */
1325 	vm_entry_checker_t new_head_original_contents;
1326 	vm_entry_checker_t new_tail_original_contents;
1327 } checker_list_tweaks_t;
1328 
1329 static void
checker_tweak_for_vm_region(vm_entry_checker_t * checker,vm_entry_checker_t * submap_parent)1330 checker_tweak_for_vm_region(vm_entry_checker_t *checker, vm_entry_checker_t *submap_parent)
1331 {
1332 	/* clamp checker bounds to the submap window */
1333 	mach_vm_size_t old_size = checker->size;
1334 	clamp_address_size_to_checker(&checker->address, &checker->size, submap_parent);
1335 
1336 	/*
1337 	 * scale pages_resident, on the assumption that either
1338 	 * all pages are resident, or none of them (TODO page modeling)
1339 	 */
1340 	if (checker->size != old_size) {
1341 		assert(checker->size < old_size);
1342 		double scale = (double)checker->size / old_size;
1343 		checker->pages_resident *= scale;
1344 	}
1345 
1346 	/*
1347 	 * vm_region does NOT adjust the reported object offset,
1348 	 * so don't tweak it here
1349 	 */
1350 }
1351 
1352 static checker_list_tweaks_t
submap_checkers_tweak_for_vm_region(checker_list_t * submap_checkers,vm_entry_checker_t * submap_parent)1353 submap_checkers_tweak_for_vm_region(
1354 	checker_list_t *submap_checkers,
1355 	vm_entry_checker_t *submap_parent)
1356 {
1357 	assert(submap_checkers->is_slid);
1358 
1359 	checker_list_tweaks_t tweaks;
1360 	tweaks.tweaked_checker_list = submap_checkers;
1361 
1362 	/* The order below must reverse submap_checkers_untweak() */
1363 
1364 	/*
1365 	 * Remove entries from the list that fall outside this submap window.
1366 	 * (we don't actually change the linked list,
1367 	 * only the checker list's head and tail)
1368 	 */
1369 	tweaks.original_entries = submap_checkers->entries;
1370 	submap_checkers->entries = checker_list_find_range_including_holes(submap_checkers,
1371 	    submap_parent->address, submap_parent->size);
1372 
1373 	/* "clip" the new head and tail to the submap parent's bounds */
1374 	vm_entry_checker_t *new_head = submap_checkers->entries.head;
1375 	vm_entry_checker_t *new_tail = submap_checkers->entries.tail;
1376 
1377 	tweaks.new_head_original_contents = *new_head;
1378 	tweaks.new_tail_original_contents = *new_tail;
1379 	checker_tweak_for_vm_region(new_head, submap_parent);
1380 	checker_tweak_for_vm_region(new_tail, submap_parent);
1381 
1382 	return tweaks;
1383 }
1384 
1385 static void
cleanup_submap_checkers_untweak(checker_list_tweaks_t * tweaks)1386 cleanup_submap_checkers_untweak(checker_list_tweaks_t *tweaks)
1387 {
1388 	checker_list_t *submap_checkers = tweaks->tweaked_checker_list;
1389 
1390 	/* The order below must reverse submap_checkers_tweak_for_vm_region() */
1391 
1392 	/* restore contents of narrowed head and tail */
1393 	*submap_checkers->entries.tail = tweaks->new_tail_original_contents;
1394 	*submap_checkers->entries.head = tweaks->new_head_original_contents;
1395 
1396 	/*
1397 	 * restore entries clipped from the list
1398 	 *
1399 	 * old_prefix->head..old_prefix->tail <-> head..tail <-> old_suffix->head..old_suffix->tail
1400 	 */
1401 	submap_checkers->entries = tweaks->original_entries;
1402 }
1403 
1404 #define DEFER_UNTWEAK __attribute__((cleanup(cleanup_submap_checkers_untweak)))
1405 
1406 /*
1407  * Set an entry checker's object checker.
1408  * Adjusts the refcount of the new object checker and (if any) the old object checker.
1409  * Updates the entry's resident page count if the object has a fill pattern.
1410  */
1411 void
checker_set_object(vm_entry_checker_t * checker,vm_object_checker_t * obj_checker)1412 checker_set_object(vm_entry_checker_t *checker, vm_object_checker_t *obj_checker)
1413 {
1414 	object_checker_reference(obj_checker);
1415 	if (checker->object) {
1416 		object_checker_dereference(checker->object);
1417 	}
1418 	checker->object = obj_checker;
1419 
1420 	/* if the object has a fill pattern then the pages will be resident already */
1421 	if (checker->object->fill_pattern.mode == Fill) {
1422 		checker_set_pages_resident(checker, checker->size / PAGE_SIZE);
1423 	}
1424 }
1425 
1426 void
checker_make_shadow_object(checker_list_t * list,vm_entry_checker_t * checker)1427 checker_make_shadow_object(checker_list_t *list, vm_entry_checker_t *checker)
1428 {
1429 	vm_object_checker_t *old_object = checker->object;
1430 	vm_object_checker_t *new_object = object_checker_clone(checker->object);
1431 	checker_list_append_object(list, new_object);
1432 
1433 	new_object->size = checker->size;
1434 	checker->object_offset = 0;
1435 
1436 	new_object->shadow = old_object;
1437 	object_checker_reference(old_object);
1438 	checker_set_object(checker, new_object);
1439 }
1440 
1441 /*
1442  * Set an entry checker's object to the null object.
1443  */
1444 void
checker_set_null_object(checker_list_t * list,vm_entry_checker_t * checker)1445 checker_set_null_object(checker_list_t *list, vm_entry_checker_t *checker)
1446 {
1447 	checker_set_object(checker, find_object_checker_for_object_id(list, 0));
1448 }
1449 
1450 /*
1451  * vm_region computes share_mode from several other entry and object attributes.
1452  * Mimic that here.
1453  */
1454 uint8_t
checker_share_mode(vm_entry_checker_t * checker)1455 checker_share_mode(vm_entry_checker_t *checker)
1456 {
1457 	vm_object_checker_t *obj_checker = checker->object;
1458 
1459 	if (object_is_null(obj_checker)) {
1460 		return SM_EMPTY;
1461 	}
1462 	if (checker_is_submap(checker)) {
1463 		return SM_PRIVATE;
1464 	}
1465 	if (object_checker_get_shadow_depth(obj_checker) > 0) {
1466 		return SM_COW;
1467 	}
1468 	if (checker->needs_copy) {
1469 		return SM_COW;
1470 	}
1471 	if (object_checker_get_self_ref_count(obj_checker) == 1) {
1472 		/* TODO: self_ref_count == 2 && named */
1473 		return SM_PRIVATE;
1474 	}
1475 
1476 	return SM_SHARED;
1477 }
1478 
1479 
1480 /*
1481  * Translate a share mode into a "narrowed" form.
1482  * - SM_TRUESHARED is mapped to SM_SHARED
1483  * - SM_SHARED_ALIASED is unsupported.
1484  * - TODO: SM_LARGE_PAGE
1485  */
1486 static unsigned
narrow_share_mode(unsigned share_mode)1487 narrow_share_mode(unsigned share_mode)
1488 {
1489 	switch (share_mode) {
1490 	case SM_TRUESHARED:
1491 		return SM_SHARED;
1492 	case SM_PRIVATE_ALIASED:
1493 		return SM_PRIVATE_ALIASED;
1494 	case SM_SHARED_ALIASED:
1495 		T_FAIL("unexpected/unimplemented share mode SM_SHARED_ALIASED");
1496 	case SM_LARGE_PAGE:
1497 		T_FAIL("unexpected/unimplemented share mode SM_LARGE_PAGE");
1498 	default:
1499 		return share_mode;
1500 	}
1501 }
1502 
1503 /*
1504  * Return true if a region and a checker have the same share_mode,
1505  * after accounting for share mode distinctions that the checkers do not enforce.
1506  */
1507 static bool
same_share_mode(vm_region_submap_info_data_64_t * info,vm_entry_checker_t * checker)1508 same_share_mode(vm_region_submap_info_data_64_t *info, vm_entry_checker_t *checker)
1509 {
1510 	return narrow_share_mode(info->share_mode) ==
1511 	       narrow_share_mode(checker_share_mode(checker));
1512 }
1513 
1514 /*
1515  * Allocate an entry checker using designated initializer syntax.
1516  */
1517 #define vm_entry_checker(...)                                   \
1518 	checker_clone(&(vm_entry_checker_t){ __VA_ARGS__ })
1519 
1520 /*
1521  * Allocate a new checker for an unallocated hole.
1522  * The new checker is not linked into the list.
1523  */
1524 static vm_entry_checker_t *
make_checker_for_hole(mach_vm_address_t address,mach_vm_size_t size)1525 make_checker_for_hole(mach_vm_address_t address, mach_vm_size_t size)
1526 {
1527 	return vm_entry_checker(
1528 		.address = address,
1529 		.size = size,
1530 		.kind = Hole,
1531 		.verify = vm_entry_attributes_with_default(true)
1532 		);
1533 }
1534 
1535 static vm_entry_checker_t *
make_checker_for_anonymous_private(checker_list_t * list,vm_entry_template_kind_t kind,mach_vm_address_t address,mach_vm_size_t size,vm_prot_t protection,vm_prot_t max_protection,uint16_t user_tag,bool permanent)1536 make_checker_for_anonymous_private(
1537 	checker_list_t *list,
1538 	vm_entry_template_kind_t kind,
1539 	mach_vm_address_t address,
1540 	mach_vm_size_t size,
1541 	vm_prot_t protection,
1542 	vm_prot_t max_protection,
1543 	uint16_t user_tag,
1544 	bool permanent)
1545 {
1546 	// fixme hack: if you ask for protection --x you get r-x
1547 	// fixme arm only?
1548 	if (protection == VM_PROT_EXECUTE) {
1549 		protection = VM_PROT_READ | VM_PROT_EXECUTE;
1550 	}
1551 
1552 	assert(user_tag < 256);
1553 
1554 	vm_entry_checker_t *checker = vm_entry_checker(
1555 		.kind = kind,
1556 
1557 		.address = address,
1558 		.size = size,
1559 
1560 		.object = NULL, /* set below */
1561 
1562 		.protection = protection,
1563 		.max_protection = max_protection,
1564 		.inheritance = VM_INHERIT_DEFAULT,
1565 		.behavior = VM_BEHAVIOR_DEFAULT,
1566 		.permanent = permanent,
1567 
1568 		.user_wired_count = 0,
1569 		.user_tag = (uint8_t)user_tag,
1570 
1571 		.object_offset = 0,
1572 		.pages_resident = 0,
1573 		.needs_copy = false,
1574 
1575 		.verify = vm_entry_attributes_with_default(true)
1576 		);
1577 
1578 	checker_set_null_object(list, checker);
1579 
1580 	return checker;
1581 }
1582 
1583 vm_entry_checker_t *
make_checker_for_vm_allocate(checker_list_t * list,mach_vm_address_t address,mach_vm_size_t size,int flags_and_tag)1584 make_checker_for_vm_allocate(
1585 	checker_list_t *list,
1586 	mach_vm_address_t address,
1587 	mach_vm_size_t size,
1588 	int flags_and_tag)
1589 {
1590 	/* Complain about flags not understood by this code. */
1591 
1592 	/* these flags are permitted but have no effect on the checker */
1593 	int ignored_flags =
1594 	    VM_FLAGS_FIXED | VM_FLAGS_ANYWHERE | VM_FLAGS_RANDOM_ADDR |
1595 	    VM_FLAGS_OVERWRITE;
1596 
1597 	/* these flags are handled by this code */
1598 	int handled_flags = VM_FLAGS_ALIAS_MASK /* tag */ | VM_FLAGS_PERMANENT;
1599 
1600 	int allowed_flags = ignored_flags | handled_flags;
1601 	assert((flags_and_tag & ~allowed_flags) == 0);
1602 
1603 	bool permanent = flags_and_tag & VM_FLAGS_PERMANENT;
1604 	uint16_t tag;
1605 	VM_GET_FLAGS_ALIAS(flags_and_tag, tag);
1606 
1607 	return make_checker_for_anonymous_private(
1608 		list, Allocation, address, size,
1609 		VM_PROT_DEFAULT, VM_PROT_ALL, tag, permanent);
1610 }
1611 
1612 /*
1613  * Build a vm_checker for a newly-created shared memory region.
1614  * The region is assumed to be a remapping of anonymous memory.
1615  * Attributes not otherwise specified are assumed to have
1616  * default values as set by mach_vm_map().
1617  * The new checker is not linked into the list.
1618  */
1619 static vm_entry_checker_t *
make_checker_for_shared(checker_list_t * list __unused,vm_entry_template_kind_t kind,mach_vm_address_t address,mach_vm_size_t size,mach_vm_address_t object_offset,vm_prot_t protection,vm_prot_t max_protection,uint16_t user_tag,bool permanent,vm_object_checker_t * obj_checker)1620 make_checker_for_shared(
1621 	checker_list_t *list __unused,
1622 	vm_entry_template_kind_t kind,
1623 	mach_vm_address_t address,
1624 	mach_vm_size_t size,
1625 	mach_vm_address_t object_offset,
1626 	vm_prot_t protection,
1627 	vm_prot_t max_protection,
1628 	uint16_t user_tag,
1629 	bool permanent,
1630 	vm_object_checker_t *obj_checker)
1631 {
1632 	// fixme hack: if you ask for protection --x you get r-x
1633 	// fixme arm only?
1634 	if (protection == VM_PROT_EXECUTE) {
1635 		protection = VM_PROT_READ | VM_PROT_EXECUTE;
1636 	}
1637 
1638 	assert(user_tag < 256);
1639 	vm_entry_checker_t *checker = vm_entry_checker(
1640 		.kind = kind,
1641 
1642 		.address = address,
1643 		.size = size,
1644 
1645 		.object = NULL, /* set below */
1646 
1647 		.protection = protection,
1648 		.max_protection = max_protection,
1649 		.inheritance = VM_INHERIT_DEFAULT,
1650 		.behavior = VM_BEHAVIOR_DEFAULT,
1651 		.permanent = permanent,
1652 
1653 		.user_wired_count = 0,
1654 		.user_tag = (uint8_t)user_tag,
1655 
1656 		.object_offset = object_offset,
1657 		.pages_resident = 0,
1658 		.needs_copy = false,
1659 
1660 		.verify = vm_entry_attributes_with_default(true)
1661 		);
1662 
1663 	checker_set_object(checker, obj_checker);
1664 
1665 	return checker;
1666 }
1667 
1668 /*
1669  * Build a checker for a parent map's submap entry.
1670  */
1671 vm_entry_checker_t *
make_checker_for_submap(mach_vm_address_t address,mach_vm_size_t size,mach_vm_address_t object_offset,vm_object_checker_t * submap_object_checker)1672 make_checker_for_submap(
1673 	mach_vm_address_t address,
1674 	mach_vm_size_t size,
1675 	mach_vm_address_t object_offset,
1676 	vm_object_checker_t *submap_object_checker)
1677 {
1678 	vm_entry_checker_t *checker = vm_entry_checker(
1679 		.kind = Submap,
1680 		.address = address,
1681 		.size = size,
1682 		.object = NULL, /* set below */
1683 		.protection = VM_PROT_READ,
1684 		.max_protection = 0, /* set below */
1685 		.inheritance = VM_INHERIT_SHARE,
1686 		.behavior = VM_BEHAVIOR_DEFAULT,
1687 		.permanent = false, /* see comment below */
1688 		.user_wired_count = 0,
1689 		.user_tag = 0,
1690 		.submap_depth = 0,
1691 		.object_offset = object_offset,
1692 		.pages_resident = 0,
1693 		.needs_copy = false,
1694 
1695 		.verify = vm_entry_attributes_with_default(true),
1696 		);
1697 
1698 	/*
1699 	 * Submap max_protection differs on x86_64.
1700 	 * (see VM_MAP_POLICY_WRITABLE_SHARED_REGION
1701 	 *  and vm_shared_region_insert_submap)
1702 	 */
1703 #if __x86_64__
1704 	checker->max_protection = VM_PROT_ALL;
1705 #else
1706 	checker->max_protection = VM_PROT_READ;
1707 #endif
1708 
1709 	checker_set_object(checker, submap_object_checker);
1710 
1711 	/*
1712 	 * Real submap entries for the shared region are sometimes
1713 	 * permanent (see shared_region_make_permanent()).
1714 	 * This test does not attempt to duplicate that because
1715 	 * permanent entries are difficult to manage in userspace.
1716 	 */
1717 
1718 	return checker;
1719 }
1720 
1721 
1722 /*
1723  * Print a checker's fields with optional highlighting.
1724  */
1725 static void
dump_checker_info_with_highlighting(vm_entry_checker_t * checker,attribute_highlights_t highlights)1726 dump_checker_info_with_highlighting(
1727 	vm_entry_checker_t *checker,
1728 	attribute_highlights_t highlights)
1729 {
1730 	const char *submap_prefix = checker->submap_depth > 0 ? SUBMAP_PREFIX : "";
1731 
1732 	/* Output order should match dump_region_info() for the reader's convenience. */
1733 
1734 	T_LOG("%sCHECKER   %s0x%llx%s..%s0x%llx%s %s(size 0x%llx)%s (%s%s)",
1735 	    submap_prefix,
1736 	    HIGHLIGHT(checker->address, entry.address_attr),
1737 	    HIGHLIGHT(checker_end_address(checker), entry.size_attr),
1738 	    HIGHLIGHT(checker->size, entry.size_attr),
1739 	    name_for_entry_kind(checker->kind),
1740 	    checker->submap_depth > 0 ? " in submap" : "");
1741 
1742 	if (checker->kind == Hole) {
1743 		if (checker->submap_depth != 0) {
1744 			/* print submap depth to avoid confusion about holes inside submaps */
1745 			T_LOG("%s    %ssubmap_depth:   %u%s", submap_prefix, HIGHLIGHT(checker->submap_depth, entry.submap_depth_attr));
1746 		}
1747 		return;
1748 	}
1749 
1750 	T_LOG("%s    %sprotection:     %s%s", submap_prefix, HIGHLIGHT(name_for_prot(checker->protection), entry.protection_attr));
1751 	T_LOG("%s    %smax protection: %s%s", submap_prefix, HIGHLIGHT(name_for_prot(checker->max_protection), entry.max_protection_attr));
1752 	T_LOG("%s    %sinheritance:    %s%s", submap_prefix, HIGHLIGHT(name_for_inherit(checker->inheritance), entry.inheritance_attr));
1753 	T_LOG("%s    %sbehavior:       %s%s", submap_prefix, HIGHLIGHT(name_for_behavior(checker->behavior), entry.behavior_attr));
1754 	T_LOG("%s    %suser wired count:  %d%s", submap_prefix, HIGHLIGHT(checker->user_wired_count, entry.user_wired_count_attr));
1755 	T_LOG("%s    %suser tag:       %d%s", submap_prefix, HIGHLIGHT(checker->user_tag, entry.user_tag_attr));
1756 	T_LOG("%s    %sobject offset:  0x%llx%s", submap_prefix, HIGHLIGHT(checker->object_offset, entry.object_offset_attr));
1757 
1758 	vm_object_checker_t *obj_checker = checker->object;
1759 	if (object_is_null(obj_checker)) {
1760 		T_LOG("%s    %sobject id:      %d%s", submap_prefix, HIGHLIGHT(0, entry.object_attr));
1761 	} else if (obj_checker->object_id_mode == object_is_unknown) {
1762 		T_LOG("%s    %sobject id:      %s%s", submap_prefix, HIGHLIGHT("unknown", entry.object_attr));
1763 	} else if (obj_checker->object_id_mode == object_has_unknown_nonnull_id) {
1764 		T_LOG("%s    %sobject id:      %s%s", submap_prefix, HIGHLIGHT("unknown, not null", entry.object_attr));
1765 	} else {
1766 		assert(obj_checker->object_id_mode == object_has_known_id);
1767 		T_LOG("%s    %sobject id:      0x%llx%s", submap_prefix, HIGHLIGHT(obj_checker->object_id, object.object_id_attr));
1768 		for (vm_object_checker_t *shadow = obj_checker->shadow; shadow; shadow = shadow->shadow) {
1769 			T_LOG("%s        %sshadow:         0x%llx%s", submap_prefix, HIGHLIGHT(shadow->object_id, object.object_id_attr));
1770 		}
1771 		T_LOG("%s    %sobject size:    0x%llx%s", submap_prefix, HIGHLIGHT(obj_checker->size, object.size_attr));
1772 		T_LOG("%s    %sref_count:      %u%s", submap_prefix, HIGHLIGHT(object_checker_get_vm_region_ref_count(obj_checker), object.ref_count_attr));
1773 		T_LOG("%s    %sshadow_depth:   %u%s", submap_prefix, HIGHLIGHT(object_checker_get_shadow_depth(obj_checker), object.shadow_depth_attr));
1774 		T_LOG("%s    %sself_ref_count: %u%s", submap_prefix, HIGHLIGHT(object_checker_get_self_ref_count(obj_checker), object.ref_count_attr));
1775 	}
1776 
1777 	T_LOG("%s    %spages resident: %u%s", submap_prefix, HIGHLIGHT(checker->pages_resident, entry.pages_resident_attr));
1778 	T_LOG("%s    %sshare mode:     %s%s", submap_prefix, HIGHLIGHT(name_for_share_mode(checker_share_mode(checker)), entry.share_mode_attr));
1779 	T_LOG("%s    %sis submap:      %s%s", submap_prefix, HIGHLIGHT(name_for_bool(checker_is_submap(checker)), entry.is_submap_attr));
1780 	T_LOG("%s    %ssubmap_depth:   %u%s", submap_prefix, HIGHLIGHT(checker->submap_depth, entry.submap_depth_attr));
1781 	T_LOG("%s    %spermanent:      %s%s", submap_prefix, HIGHLIGHT(name_for_bool(checker->permanent), entry.permanent_attr));
1782 }
1783 
1784 
1785 static void
dump_checker_info(vm_entry_checker_t * checker)1786 dump_checker_info(vm_entry_checker_t *checker)
1787 {
1788 	/*
1789 	 * Verified attributes are printed normally.
1790 	 * Unverified attributes are printed ignored.
1791 	 */
1792 	vm_entry_attribute_list_t verified_entry_attr = checker->verify;
1793 	vm_object_attribute_list_t verified_object_attr;
1794 	if (checker->verify.object_attr == false) {
1795 		/* object verification disabled entirely */
1796 		verified_object_attr = vm_object_attributes_with_default(false);
1797 	} else if (checker->object == NULL) {
1798 		verified_object_attr = vm_object_attributes_with_default(true);
1799 	} else {
1800 		verified_object_attr = checker->object->verify;
1801 	}
1802 
1803 	dump_checker_info_with_highlighting(checker,
1804 	    normal_or_ignored_highlights(verified_entry_attr, verified_object_attr));
1805 }
1806 
1807 void
dump_checker_range(entry_checker_range_t list)1808 dump_checker_range(
1809 	entry_checker_range_t list)
1810 {
1811 	FOREACH_CHECKER(checker, list) {
1812 		dump_checker_info(checker);
1813 		if (checker_is_submap(checker)) {
1814 			checker_list_t *submap_checkers DEFER_UNSLIDE =
1815 			    checker_get_and_slide_submap_checkers(checker);
1816 			dump_checker_range(submap_checkers->entries);
1817 		}
1818 	}
1819 }
1820 
1821 /*
1822  * Print a checker that failed verification,
1823  * and the real VM regions overlapping it.
1824  * Attributes in bad_entry_attr and bad_object_attr are printed as BadHighlight.
1825  * Other attributes are printed as IgnoredHighlight.
1826  */
1827 static void
warn_bad_checker(vm_entry_checker_t * checker,vm_entry_attribute_list_t bad_entry_attr,vm_object_attribute_list_t bad_object_attr,const char * message)1828 warn_bad_checker(
1829 	vm_entry_checker_t *checker,
1830 	vm_entry_attribute_list_t bad_entry_attr,
1831 	vm_object_attribute_list_t bad_object_attr,
1832 	const char *message)
1833 {
1834 	attribute_highlights_t highlights =
1835 	    bad_or_ignored_highlights(bad_entry_attr, bad_object_attr);
1836 	T_LOG("*** %s: expected ***", message);
1837 	dump_checker_info_with_highlighting(checker, highlights);
1838 	T_LOG("*** %s: actual ***", message);
1839 	dump_region_info_for_entry(checker, highlights);
1840 }
1841 
1842 static mach_vm_size_t
overestimate_size(const vm_entry_template_t templates[],unsigned count)1843 overestimate_size(const vm_entry_template_t templates[], unsigned count)
1844 {
1845 	mach_vm_size_t size = 0;
1846 	for (unsigned i = 0; i < count; i++) {
1847 		bool overflowed = __builtin_add_overflow(size, templates[i].size, &size);
1848 		assert(!overflowed);
1849 	}
1850 	return size;
1851 }
1852 
1853 /*
1854  * The arena is a contiguous address range where the VM regions for
1855  * a test are placed. Here we allocate the entire space to reserve it.
1856  * Later, it is overwritten by each desired map entry or unallocated hole.
1857  *
1858  * Problem: We want to generate unallocated holes and verify later that
1859  * they are still unallocated. But code like Rosetta compilation and
1860  * Mach exceptions can allocate VM space outside out control. If those
1861  * allocations land in our unallocated holes then a test may spuriously fail.
1862  * Solution: The arena is allocated with VM_FLAGS_RANDOM_ADDR to keep it
1863  * well away from the VM's allocation frontier. This does not prevent the
1864  * problem entirely but so far it appears to dodge it with high probability.
1865  * TODO: make this more reliable or completely safe somehow.
1866  */
1867 static void
allocate_arena(mach_vm_size_t arena_size,mach_vm_size_t arena_alignment_mask,mach_vm_address_t * const out_arena_address)1868 allocate_arena(
1869 	mach_vm_size_t arena_size,
1870 	mach_vm_size_t arena_alignment_mask,
1871 	mach_vm_address_t * const out_arena_address)
1872 {
1873 	mach_vm_size_t arena_unaligned_size;
1874 	mach_vm_address_t allocated = 0;
1875 	kern_return_t kr;
1876 
1877 	/*
1878 	 * VM_FLAGS_RANDOM_ADDR will often spuriously fail
1879 	 * when using a large alignment mask.
1880 	 * We instead allocate oversized and perform the alignment manually.
1881 	 */
1882 	if (arena_alignment_mask > PAGE_MASK) {
1883 		arena_unaligned_size = arena_size + arena_alignment_mask + 1;
1884 	} else {
1885 		arena_unaligned_size = arena_size;
1886 	}
1887 
1888 	kr = mach_vm_map(mach_task_self(), &allocated, arena_unaligned_size,
1889 	    0 /* alignment mask */, VM_FLAGS_ANYWHERE | VM_FLAGS_RANDOM_ADDR,
1890 	    0, 0, 0, 0, 0, 0);
1891 
1892 	if (kr == KERN_NO_SPACE) {
1893 		/*
1894 		 * VM_FLAGS_RANDOM_ADDR can spuriously fail even without alignment.
1895 		 * Try again without it.
1896 		 */
1897 		kr = mach_vm_map(mach_task_self(), &allocated, arena_unaligned_size,
1898 		    0 /* alignment mask */, VM_FLAGS_ANYWHERE,
1899 		    0, 0, 0, 0, 0, 0);
1900 		if (kr == KERN_SUCCESS) {
1901 			T_LOG("note: forced to allocate arena without VM_FLAGS_RANDOM_ADDR");
1902 		}
1903 	}
1904 
1905 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "arena allocation "
1906 	    "(size 0x%llx, alignment 0x%llx)", arena_size, arena_alignment_mask);
1907 
1908 	if (arena_alignment_mask > PAGE_MASK) {
1909 		/* Align manually within the oversized allocation. */
1910 		mach_vm_address_t aligned = (allocated & ~arena_alignment_mask) + arena_alignment_mask + 1;
1911 		mach_vm_address_t aligned_end = aligned + arena_size;
1912 		mach_vm_address_t allocated_end = allocated + arena_unaligned_size;
1913 
1914 		assert(aligned >= allocated && aligned_end <= allocated_end);
1915 		assert((aligned & arena_alignment_mask) == 0);
1916 		assert((aligned & PAGE_MASK) == 0);
1917 
1918 		/* trim the overallocation */
1919 		(void)mach_vm_deallocate(mach_task_self(), allocated, aligned - allocated);
1920 		(void)mach_vm_deallocate(mach_task_self(), aligned_end, allocated_end - aligned_end);
1921 
1922 		*out_arena_address = aligned;
1923 	} else {
1924 		/* No alignment needed. */
1925 		*out_arena_address = allocated;
1926 	}
1927 }
1928 
1929 static void
write_fill_pattern(mach_vm_address_t start,mach_vm_size_t size,fill_pattern_t fill_pattern)1930 write_fill_pattern(
1931 	mach_vm_address_t start,
1932 	mach_vm_size_t size,
1933 	fill_pattern_t fill_pattern)
1934 {
1935 	assert(start % sizeof(uint64_t) == 0);
1936 	if (fill_pattern.mode == Fill) {
1937 		for (mach_vm_address_t c = start;
1938 		    c < start + size;
1939 		    c += sizeof(uint64_t)) {
1940 			*(uint64_t *)c = fill_pattern.pattern;
1941 		}
1942 	}
1943 }
1944 
1945 /*
1946  * Returns true if the memory contents of [start, start + size)
1947  * matches the fill pattern.
1948  * A fill pattern of DontFill always matches and never reads the memory.
1949  * If the pattern did not match, *first_bad_address is set to the
1950  * first address (uint64_t aligned) that did not match.
1951  */
1952 static bool
verify_fill_pattern(mach_vm_address_t start,mach_vm_size_t size,fill_pattern_t fill_pattern,mach_vm_address_t * const first_bad_address)1953 verify_fill_pattern(
1954 	mach_vm_address_t start,
1955 	mach_vm_size_t size,
1956 	fill_pattern_t fill_pattern,
1957 	mach_vm_address_t * const first_bad_address)
1958 {
1959 	mach_vm_address_t end = start + size;
1960 	bool good = true;
1961 	assert(start % sizeof(uint64_t) == 0);
1962 	if (fill_pattern.mode == Fill) {
1963 		for (mach_vm_address_t c = start;
1964 		    c < end;
1965 		    c += sizeof(uint64_t)) {
1966 			if (*(uint64_t *)c != fill_pattern.pattern) {
1967 				if (first_bad_address) {
1968 					*first_bad_address = c;
1969 				}
1970 				good = false;
1971 				break;
1972 			}
1973 		}
1974 	}
1975 
1976 	return good;
1977 }
1978 
1979 /* Debug syscall to manipulate submaps. */
1980 
1981 typedef enum {
1982 	vsto_make_submap = 1, /* make submap from entries in current_map() at start..end, offset ignored */
1983 	vsto_remap_submap = 2, /* map in current_map() at start..end, from submap address offset */
1984 	vsto_end
1985 } vm_submap_test_op;
1986 
1987 typedef struct {
1988 	vm_submap_test_op op;
1989 	mach_vm_address_t submap_base_address;
1990 	mach_vm_address_t start;
1991 	mach_vm_address_t end;
1992 	mach_vm_address_t offset;
1993 } vm_submap_test_args;
1994 
1995 static void
submap_op(vm_submap_test_args * args)1996 submap_op(vm_submap_test_args *args)
1997 {
1998 	int err = sysctlbyname("vm.submap_test_ctl",
1999 	    NULL, NULL, args, sizeof(*args));
2000 	T_QUIET; T_ASSERT_POSIX_SUCCESS(err, "sysctl(vm.submap_test_ctl)");
2001 }
2002 
2003 /* Lower address range [start..end) into a submap at that same address. */
2004 static void
submapify(mach_vm_address_t start,mach_vm_address_t end)2005 submapify(mach_vm_address_t start, mach_vm_address_t end)
2006 {
2007 	vm_submap_test_args args = {
2008 		.op = vsto_make_submap,
2009 		.submap_base_address = 0,
2010 		.start = start,
2011 		.end = end,
2012 		.offset = 0,
2013 	};
2014 	submap_op(&args);
2015 }
2016 
2017 /*
2018  * submap_base_address is the start of a submap created with submapify().
2019  * Remap that submap or a portion thereof at [start, end).
2020  * Use offset as the VME_OFFSET field in the parent map's submap entry.
2021  */
2022 static void
remap_submap(mach_vm_address_t submap_base_address,mach_vm_address_t start,mach_vm_size_t size,mach_vm_address_t offset)2023 remap_submap(
2024 	mach_vm_address_t submap_base_address,
2025 	mach_vm_address_t start,
2026 	mach_vm_size_t size,
2027 	mach_vm_address_t offset)
2028 {
2029 	vm_submap_test_args args = {
2030 		.op = vsto_remap_submap,
2031 		.submap_base_address = submap_base_address,
2032 		.start = start,
2033 		.end = start + size,
2034 		.offset = offset,
2035 	};
2036 	submap_op(&args);
2037 }
2038 
2039 /*
2040  * Temporary scratch space for newly-created VM objects.
2041  * Used by create_vm_state() and its helpers.
2042  */
2043 typedef struct {
2044 	/* computed from entry templates */
2045 	unsigned entry_count;
2046 	bool is_private;
2047 	mach_vm_size_t min_size;  /* size required by entries that use it */
2048 
2049 	/*
2050 	 * set when allocating the object's temporary backing storage
2051 	 */
2052 	mach_vm_address_t allocated_address;
2053 	mach_vm_size_t allocated_size;
2054 	vm_object_checker_t *checker;
2055 } object_scratch_t;
2056 
2057 static void
allocate_submap_storage_and_checker(checker_list_t * checker_list,const vm_object_template_t * object_tmpl,object_scratch_t * object_scratch)2058 allocate_submap_storage_and_checker(
2059 	checker_list_t *checker_list,
2060 	const vm_object_template_t *object_tmpl,
2061 	object_scratch_t *object_scratch)
2062 {
2063 	assert(object_tmpl->kind == SubmapObject);
2064 	assert(object_tmpl->size == 0);
2065 	assert(object_scratch->min_size > 0);
2066 	assert(object_scratch->entry_count > 0);
2067 
2068 	/*
2069 	 * Submap size is determined by its contents.
2070 	 * min_size is the minimum size required for
2071 	 * the offset/size of the parent map entries
2072 	 * that remap this submap.
2073 	 * We allocate the submap first, then check min_size.
2074 	 */
2075 
2076 	/*
2077 	 * Check some preconditions on the submap contents.
2078 	 * This is in addition to the checks performed by create_vm_state().
2079 	 */
2080 	for (unsigned i = 0; i < object_tmpl->submap.entry_count; i++) {
2081 		const vm_entry_template_t *tmpl = &object_tmpl->submap.entries[i];
2082 
2083 		assert(tmpl->kind != Hole);  /* no holes, vm_map_seal fills them */
2084 		assert(tmpl->kind != Submap);  /* no nested submaps */
2085 	}
2086 
2087 	/*
2088 	 * Allocate the submap's entries into temporary space,
2089 	 * space, lower them into a submap, and build checkers for them.
2090 	 * Later there will be entry templates in the parent map that
2091 	 * remap this space and clone these checkers.
2092 	 * This temporary space will be cleaned up when
2093 	 * the object_scratch is destroyed at the end of create_vm_state().
2094 	 */
2095 	checker_list_t *submap_checkers = create_vm_state(
2096 		object_tmpl->submap.entries, object_tmpl->submap.entry_count,
2097 		object_tmpl->submap.objects, object_tmpl->submap.object_count,
2098 		SUBMAP_ALIGNMENT_MASK, "submap construction");
2099 
2100 	/*
2101 	 * Update the returned submap checkers for vm_map_seal and submap lowering.
2102 	 * - set the submap depth
2103 	 * - resolve null objects
2104 	 * - disable share mode verification (TODO vm_region says SM_COW, we say SM_PRIVATE)
2105 	 * - TODO resolve needs_copy COW and change to COPY_DELAY
2106 	 */
2107 	FOREACH_CHECKER(submap_checker, submap_checkers->entries) {
2108 		T_QUIET; T_ASSERT_EQ(submap_checker->submap_depth, 0, "nested submaps not allowed");
2109 		submap_checker->submap_depth = 1;
2110 		checker_resolve_null_vm_object(submap_checkers, submap_checker);
2111 		submap_checker->verify.share_mode_attr = false;
2112 	}
2113 
2114 	mach_vm_address_t submap_start = checker_range_start_address(submap_checkers->entries);
2115 	mach_vm_address_t submap_end = checker_range_end_address(submap_checkers->entries);
2116 	assert(submap_start < submap_end);
2117 
2118 	/* verify that the submap is bigger than min_size */
2119 	T_QUIET; T_ASSERT_GE(submap_end - submap_start, object_scratch->min_size,
2120 	    "some submap entry extends beyond the end of the submap object");
2121 
2122 	/* make it a real boy^W submap */
2123 	submapify(submap_start, submap_end);
2124 
2125 	/*
2126 	 * Make an object checker for the entire submap.
2127 	 * This checker stores the entry and object checkers for the submap's contents.
2128 	 */
2129 	vm_object_checker_t *obj_checker = make_submap_object_checker(
2130 		checker_list, submap_checkers);
2131 
2132 	object_scratch->allocated_address = submap_start;
2133 	object_scratch->allocated_size = submap_end - submap_start;
2134 	object_scratch->checker = obj_checker;
2135 }
2136 
2137 static void
allocate_object_storage_and_checker(checker_list_t * checker_list,const vm_object_template_t * object_tmpl,object_scratch_t * object_scratch)2138 allocate_object_storage_and_checker(
2139 	checker_list_t *checker_list,
2140 	const vm_object_template_t *object_tmpl,
2141 	object_scratch_t *object_scratch)
2142 {
2143 	kern_return_t kr;
2144 
2145 	assert(object_tmpl->kind != EndObjects);
2146 	assert(object_scratch->entry_count > 0);
2147 	assert(object_scratch->min_size > 0);
2148 
2149 	/*
2150 	 * min_size is the required object size as determined by
2151 	 * the entries using this object and their sizes and offsets.
2152 	 *
2153 	 * tmpl->size may be zero, in which case we allocate min_size bytes
2154 	 * OR tmpl->size may be non-zero, in which case we allocate tmpl->size bytes
2155 	 * and verify that it is at least as large as min_size.
2156 	 */
2157 	mach_vm_size_t size = object_tmpl->size ?: object_scratch->min_size;
2158 	assert(size >= object_scratch->min_size);
2159 
2160 	if (object_scratch->is_private == 1) {
2161 		/*
2162 		 * Object is private memory for a single entry.
2163 		 * It will be allocated when the entry is created.
2164 		 */
2165 		assert(object_scratch->entry_count == 1);
2166 		object_scratch->allocated_address = 0;
2167 		object_scratch->allocated_size = 0;
2168 		object_scratch->checker = NULL;
2169 	} else if (object_tmpl->kind == Anonymous) {
2170 		/*
2171 		 * Object is anonymous memory and shared or COW
2172 		 * by multiple entries. Allocate temporary space now.
2173 		 * Each entry will copy or share it when the entries
2174 		 * are created. Then this temporary allocation will be freed.
2175 		 */
2176 		// fixme double-check that freeing this backing store
2177 		// does not interfere with COW state
2178 		mach_vm_address_t address = 0;
2179 		kr = mach_vm_allocate(mach_task_self(), &address, size,
2180 		    VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_MEMORY_SCENEKIT));
2181 		assert(kr == 0);
2182 
2183 		object_scratch->allocated_address = address;
2184 		object_scratch->allocated_size = size;
2185 
2186 		object_scratch->checker = make_anonymous_object_checker(
2187 			checker_list, size);
2188 
2189 		write_fill_pattern(address, size, object_tmpl->fill_pattern);
2190 		object_scratch->checker->fill_pattern = object_tmpl->fill_pattern;
2191 	} else {
2192 		T_FAIL("unexpected/unimplemented: object is neither private nor anonymous nor submap");
2193 	}
2194 }
2195 
2196 
2197 /*
2198  * Choose an entry's user_tag value.
2199  * If the requested value is an ordinary tag, use it.
2200  * If the requested value is autoincrementing, pick the next
2201  * autoincrementing tag. *inc stores the persistent increment
2202  * state and should be cleared before the first call.
2203  */
2204 static uint8_t
choose_user_tag(uint16_t requested_tag,uint8_t * inc)2205 choose_user_tag(uint16_t requested_tag, uint8_t *inc)
2206 {
2207 	uint8_t assigned_tag;
2208 	if (requested_tag == VM_MEMORY_TAG_AUTOINCREMENTING) {
2209 		/* choose an incrementing tag 1..16 */
2210 		assigned_tag = VM_MEMORY_APPLICATION_SPECIFIC_1 + *inc;
2211 		*inc = (*inc + 1) % 16;
2212 	} else {
2213 		/* ordinary tag */
2214 		assert(requested_tag < 256);
2215 		assigned_tag = (uint8_t)requested_tag;
2216 	}
2217 	return assigned_tag;
2218 }
2219 
2220 
2221 /*
2222  * SM_EMPTY is the default template share mode,
2223  * but we allow other template values to implicitly
2224  * override it.
2225  */
2226 static uint8_t
template_real_share_mode(const vm_entry_template_t * tmpl)2227 template_real_share_mode(const vm_entry_template_t *tmpl)
2228 {
2229 	if (tmpl->share_mode != SM_EMPTY) {
2230 		return tmpl->share_mode;
2231 	}
2232 
2233 	/* things that can override SM_EMPTY */
2234 	if (tmpl->user_wired_count > 0) {
2235 		return SM_PRIVATE;
2236 	}
2237 	if (tmpl->object && tmpl->object->fill_pattern.mode == Fill) {
2238 		return SM_PRIVATE;
2239 	}
2240 
2241 	return SM_EMPTY;
2242 }
2243 
2244 static void
create_vm_hole(const vm_entry_template_t * tmpl,mach_vm_address_t dest_address,checker_list_t * checker_list)2245 create_vm_hole(
2246 	const vm_entry_template_t *tmpl,
2247 	mach_vm_address_t dest_address,
2248 	checker_list_t *checker_list)
2249 {
2250 	kern_return_t kr;
2251 
2252 	assert(dest_address % PAGE_SIZE == 0);
2253 	assert(tmpl->size % PAGE_SIZE == 0);
2254 	assert(tmpl->object == NULL);
2255 
2256 	/* deallocate the hole */
2257 	kr = mach_vm_deallocate(mach_task_self(),
2258 	    dest_address, tmpl->size);
2259 	assert(kr == 0);
2260 
2261 	/* add a checker for the unallocated space */
2262 	checker_range_append(&checker_list->entries,
2263 	    make_checker_for_hole(dest_address, tmpl->size));
2264 }
2265 
2266 static void
create_vm_submap(const vm_entry_template_t * tmpl,object_scratch_t * object_scratch,mach_vm_address_t dest_address,checker_list_t * checker_list)2267 create_vm_submap(
2268 	const vm_entry_template_t *tmpl,
2269 	object_scratch_t *object_scratch,
2270 	mach_vm_address_t dest_address,
2271 	checker_list_t *checker_list)
2272 {
2273 	kern_return_t kr;
2274 
2275 	/* entry must not extend beyond submap's backing store */
2276 	assert(tmpl->offset + tmpl->size <= object_scratch->allocated_size);
2277 
2278 	/* deallocate space for the new submap entry */
2279 	/* TODO vsto_remap_submap should copy-overwrite */
2280 	kr = mach_vm_deallocate(mach_task_self(),
2281 	    dest_address, tmpl->size);
2282 	assert(kr == 0);
2283 
2284 	remap_submap(object_scratch->allocated_address,
2285 	    dest_address, tmpl->size, tmpl->offset);
2286 
2287 	/*
2288 	 * Create a map entry checker for the parent map's submap entry.
2289 	 * Its object checker is the submap checker, which in turn
2290 	 * contains the entry checkers for the submap's contents.
2291 	 */
2292 	checker_range_append(&checker_list->entries,
2293 	    make_checker_for_submap(dest_address, tmpl->size, tmpl->offset,
2294 	    object_scratch->checker));
2295 }
2296 
2297 __attribute__((overloadable))
2298 checker_list_t *
create_vm_state(const vm_entry_template_t entry_templates[],unsigned entry_template_count,const vm_object_template_t object_templates[],unsigned object_template_count,mach_vm_size_t alignment_mask,const char * message)2299 create_vm_state(
2300 	const vm_entry_template_t entry_templates[],
2301 	unsigned entry_template_count,
2302 	const vm_object_template_t object_templates[],
2303 	unsigned object_template_count,
2304 	mach_vm_size_t alignment_mask,
2305 	const char *message)
2306 {
2307 	const vm_object_template_t *start_object_templates = &object_templates[0];
2308 	const vm_object_template_t *end_object_templates = &object_templates[object_template_count];
2309 	checker_list_t *checker_list = checker_list_new();
2310 	uint8_t tag_increment = 0;
2311 	kern_return_t kr;
2312 
2313 	/* temporary scratch space for new objects for shared and COW entries */
2314 	object_scratch_t *new_objects =
2315 	    calloc(sizeof(object_scratch_t), object_template_count);
2316 
2317 	/* Check some preconditions */
2318 
2319 	assert(is_valid_alignment_mask(alignment_mask));
2320 	assert(entry_template_count > 0);
2321 
2322 	/*
2323 	 * Check preconditions of each entry template
2324 	 * and accumulate some info about their respective objects.
2325 	 */
2326 	for (unsigned i = 0; i < entry_template_count; i++) {
2327 		const vm_entry_template_t *tmpl = &entry_templates[i];
2328 
2329 		assert(tmpl->kind != EndEntries);
2330 		assert(tmpl->size > 0);
2331 		assert(tmpl->size % PAGE_SIZE == 0);
2332 		assert(tmpl->inheritance <= VM_INHERIT_LAST_VALID);
2333 
2334 		/* reject VM_PROT_EXEC; TODO: support it somehow */
2335 		T_QUIET; T_ASSERT_TRUE(prot_contains_all(VM_PROT_READ | VM_PROT_WRITE, tmpl->protection),
2336 		    "entry template #%u protection 0x%x exceeds VM_PROT_READ | VM_PROT_WRITE", i, tmpl->protection);
2337 
2338 		T_QUIET; T_ASSERT_TRUE(prot_contains_all(VM_PROT_ALL, tmpl->max_protection),
2339 		    "entry template #%u max_protection 0x%x exceeds VM_PROT_ALL", i, tmpl->max_protection);
2340 
2341 		T_QUIET; T_ASSERT_TRUE(prot_contains_all(tmpl->max_protection, tmpl->protection),
2342 		    "entry template #%u protection exceeds max_protection (%s/%s)",
2343 		    i, name_for_prot(tmpl->protection), name_for_prot(tmpl->max_protection));
2344 
2345 		/* entry can't be COW and wired at the same time */
2346 		assert(!(tmpl->user_wired_count > 0 && template_real_share_mode(tmpl) == SM_COW));
2347 
2348 		/*
2349 		 * We only allow vm_behavior_t values that are stored
2350 		 * persistently in the entry.
2351 		 * Non-persistent behaviors don't make sense here because
2352 		 * they're really more like one-shot memory operations.
2353 		 */
2354 		assert(is_persistent_vm_behavior(tmpl->behavior));
2355 
2356 		/*
2357 		 * Non-zero offset in object not implemented for
2358 		 * SM_EMPTY and SM_PRIVATE.
2359 		 * (TODO might be possible for SM_PRIVATE.)
2360 		 */
2361 		if (tmpl->kind != Submap) {
2362 			switch (template_real_share_mode(tmpl)) {
2363 			case SM_EMPTY:
2364 			case SM_PRIVATE:
2365 				assert(tmpl->offset == 0); /* unimplemented */
2366 				break;
2367 			default:
2368 				break;
2369 			}
2370 		} else {
2371 			/* Submap entries are SM_PRIVATE and can be offset. */
2372 		}
2373 
2374 		/* entry's object template must be NULL or in the object list */
2375 		object_scratch_t *object_scratch = NULL;
2376 		if (tmpl->object) {
2377 			assert(tmpl->object >= start_object_templates &&
2378 			    tmpl->object < end_object_templates);
2379 
2380 			object_scratch =
2381 			    &new_objects[tmpl->object - start_object_templates];
2382 
2383 			/* object size must be large enough to span this entry */
2384 			mach_vm_size_t min_size = tmpl->offset + tmpl->size;
2385 			if (object_scratch->min_size < min_size) {
2386 				object_scratch->min_size = min_size;
2387 			}
2388 		}
2389 
2390 		if (tmpl->kind == Submap) {
2391 			/* submap */
2392 			assert(tmpl->object);
2393 			assert(tmpl->object->kind == SubmapObject);
2394 			object_scratch->entry_count++;
2395 			object_scratch->is_private = false;
2396 		} else {
2397 			/* not submap */
2398 			assert(tmpl->object == NULL || tmpl->object->kind != SubmapObject);
2399 
2400 			/*
2401 			 * object entry_count is the number of entries that use it
2402 			 *
2403 			 * object is_private if its only reference
2404 			 * is an entry with share mode private
2405 			 */
2406 			switch (template_real_share_mode(tmpl)) {
2407 			case SM_EMPTY:
2408 				/*
2409 				 * empty may not have an object
2410 				 * (but note that some options may override SM_EMPTY,
2411 				 * see template_real_share_mode())
2412 				 */
2413 				assert(tmpl->object == NULL);
2414 				break;
2415 			case SM_PRIVATE:
2416 				/*
2417 				 * private:
2418 				 * object is optional
2419 				 * object must not be used already
2420 				 * object will be private
2421 				 */
2422 				if (tmpl->object) {
2423 					assert(object_scratch->entry_count == 0 &&
2424 					    "SM_PRIVATE entry template may not share "
2425 					    "its object template with any other entry");
2426 					object_scratch->entry_count = 1;
2427 					object_scratch->is_private = true;
2428 				}
2429 				break;
2430 			case SM_SHARED:
2431 			/* case SM_TRUESHARED, TODO maybe */
2432 			case SM_COW:
2433 				/*
2434 				 * shared or cow:
2435 				 * object is required
2436 				 * object must not be private already
2437 				 */
2438 				assert(tmpl->object);
2439 				assert(object_scratch->is_private == false);
2440 				object_scratch->entry_count++;
2441 				break;
2442 			default:
2443 				T_FAIL("unexpected/unimplemented: unsupported share mode");
2444 			}
2445 		}
2446 	}
2447 
2448 	/*
2449 	 * Check that every SM_SHARED entry really does share
2450 	 * its object with at least one other entry.
2451 	 */
2452 	for (unsigned i = 0; i < entry_template_count; i++) {
2453 		const vm_entry_template_t *tmpl = &entry_templates[i];
2454 		const vm_object_template_t *object_tmpl = tmpl->object;
2455 		object_scratch_t *object_scratch =
2456 		    tmpl->object ? &new_objects[object_tmpl - start_object_templates] : NULL;
2457 
2458 		if (template_real_share_mode(tmpl) == SM_SHARED) {
2459 			assert(tmpl->object != NULL &&
2460 			    "SM_SHARED entry template must have an object template");
2461 			assert(object_scratch->entry_count > 1 &&
2462 			    "SM_SHARED entry's object template must be used by at least one other entry");
2463 		}
2464 	}
2465 
2466 	/*
2467 	 * Check some preconditions of object templates,
2468 	 * and allocate backing storage and checkers for objects that are shared.
2469 	 * (Objects that are private are handled when the entry is created.)
2470 	 *
2471 	 * This also allocates backing storage and checkers for submaps in a
2472 	 * similar way to shared non-submaps. The submap mapping(s) into this
2473 	 * arena's address range, and the checkers thereof, are handled later.
2474 	 */
2475 	for (unsigned i = 0; i < object_template_count; i++) {
2476 		const vm_object_template_t *object_tmpl = &object_templates[i];
2477 		object_scratch_t *object_scratch = &new_objects[i];
2478 
2479 		if (object_tmpl->kind == SubmapObject) {
2480 			allocate_submap_storage_and_checker(
2481 				checker_list, object_tmpl, object_scratch);
2482 		} else {
2483 			allocate_object_storage_and_checker(
2484 				checker_list, object_tmpl, object_scratch);
2485 		}
2486 	}
2487 
2488 	/* Allocate a range large enough to span all requested entries. */
2489 	mach_vm_address_t arena_address = 0;
2490 	mach_vm_address_t arena_end = 0;
2491 	{
2492 		mach_vm_size_t arena_size =
2493 		    overestimate_size(entry_templates, entry_template_count);
2494 		allocate_arena(arena_size, alignment_mask, &arena_address);
2495 		arena_end = arena_address + arena_size;
2496 	}
2497 
2498 	/* Carve up the allocated range into the requested entries. */
2499 	for (unsigned i = 0; i < entry_template_count; i++) {
2500 		const vm_entry_template_t *tmpl = &entry_templates[i];
2501 		const vm_object_template_t *object_tmpl = tmpl->object;
2502 		object_scratch_t *object_scratch =
2503 		    tmpl->object ? &new_objects[object_tmpl - start_object_templates] : NULL;
2504 
2505 		/*
2506 		 * Assign a user_tag, resolving autoincrementing if requested.
2507 		 */
2508 		uint8_t assigned_tag = choose_user_tag(tmpl->user_tag, &tag_increment);
2509 
2510 		unsigned permanent_flag = tmpl->permanent ? VM_FLAGS_PERMANENT : 0;
2511 
2512 		/* Allocate the entry. */
2513 
2514 		if (tmpl->kind == Hole) {
2515 			create_vm_hole(tmpl, arena_address, checker_list);
2516 			arena_address += tmpl->size;
2517 			continue;
2518 		} else if (tmpl->kind == Submap) {
2519 			create_vm_submap(tmpl, object_scratch, arena_address, checker_list);
2520 			arena_address += tmpl->size;
2521 			continue;
2522 		} else {
2523 			assert(tmpl->kind == Allocation);
2524 		}
2525 
2526 		/* new entry is a real allocation */
2527 		if (template_real_share_mode(tmpl) == SM_SHARED) {
2528 			/*
2529 			 * New map entry is shared: it shares
2530 			 * the same object as some other map entry.
2531 			 *
2532 			 * Create the entry using mach_make_memory_entry()
2533 			 * and mach_vm_map(). The source is the object's
2534 			 * temporary backing store (or a portion thereof).
2535 			 *
2536 			 * We don't use vm_remap to share because it can't
2537 			 * set the user_tag.
2538 			 */
2539 
2540 			/* must not extend beyond object's temporary backing store */
2541 			assert(tmpl->offset + tmpl->size <= object_scratch->allocated_size);
2542 
2543 			/* create the memory entry covering the entire source object */
2544 			mach_vm_size_t size = tmpl->size;
2545 			mach_port_t memory_entry_port;
2546 			kr = mach_make_memory_entry_64(mach_task_self(),
2547 			    &size,
2548 			    object_scratch->allocated_address + tmpl->offset, /* src */
2549 			    tmpl->protection | MAP_MEM_VM_SHARE,
2550 			    &memory_entry_port, MEMORY_OBJECT_NULL);
2551 			assert(kr == 0);
2552 			assert(size == tmpl->size);
2553 
2554 			/* map the memory entry */
2555 			mach_vm_address_t allocated_address = arena_address;
2556 			kr = mach_vm_map(mach_task_self(),
2557 			    &allocated_address,
2558 			    tmpl->size,
2559 			    0,             /* alignment mask */
2560 			    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MAKE_TAG(assigned_tag) | permanent_flag,
2561 			    memory_entry_port,  /* src */
2562 			    0, /* offset - already applied during mmme */
2563 			    false, /* copy */
2564 			    tmpl->protection,
2565 			    tmpl->max_protection,
2566 			    VM_INHERIT_DEFAULT);
2567 			assert(kr == 0);
2568 			assert(allocated_address == arena_address);
2569 
2570 			/* tear down the memory entry */
2571 			mach_port_deallocate(mach_task_self(), memory_entry_port);
2572 
2573 			/* set up the checkers */
2574 			vm_entry_checker_t *checker = make_checker_for_shared(
2575 				checker_list, tmpl->kind,
2576 				allocated_address, tmpl->size, tmpl->offset,
2577 				tmpl->protection, tmpl->max_protection,
2578 				assigned_tag, tmpl->permanent, object_scratch->checker);
2579 			checker_range_append(&checker_list->entries, checker);
2580 
2581 			arena_address = allocated_address + tmpl->size;
2582 		} else if (tmpl->object == NULL || tmpl->object->kind == Anonymous) {
2583 			/*
2584 			 * New entry's object is null or anonymous private memory.
2585 			 * Create the entry using mach_vm_map.
2586 			 */
2587 
2588 			/*
2589 			 * We attempt to map the memory with the correct protections
2590 			 * from the start, because this is more capable than
2591 			 * mapping with more permissive protections and then
2592 			 * calling vm_protect.
2593 			 *
2594 			 * But sometimes we need to read or write the memory
2595 			 * during setup. In that case we are forced to map
2596 			 * permissively and vm_protect later.
2597 			 */
2598 			vm_prot_t initial_protection = tmpl->protection;
2599 			vm_prot_t initial_max_protection = tmpl->max_protection;
2600 			bool protect_last = false;
2601 			if (template_real_share_mode(tmpl) == SM_PRIVATE ||
2602 			    tmpl->object != NULL) {
2603 				protect_last = true;
2604 				initial_protection |= VM_PROT_READ | VM_PROT_WRITE;
2605 				initial_max_protection |= VM_PROT_READ | VM_PROT_WRITE;
2606 			}
2607 
2608 			mach_vm_address_t allocated_address = arena_address;
2609 			kr = mach_vm_map(mach_task_self(),
2610 			    &allocated_address,
2611 			    tmpl->size,
2612 			    0,     /* alignment mask */
2613 			    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MAKE_TAG(assigned_tag) | permanent_flag,
2614 			    0,     /* memory object */
2615 			    0,     /* object offset */
2616 			    false, /* copy */
2617 			    initial_protection,
2618 			    initial_max_protection,
2619 			    VM_INHERIT_DEFAULT);
2620 			assert(kr == 0);
2621 			assert(allocated_address == arena_address);
2622 
2623 			vm_entry_checker_t *checker = make_checker_for_anonymous_private(
2624 				checker_list,
2625 				tmpl->kind, allocated_address, tmpl->size,
2626 				tmpl->protection, tmpl->max_protection, assigned_tag,
2627 				tmpl->permanent);
2628 			checker_range_append(&checker_list->entries, checker);
2629 
2630 			arena_address = allocated_address + tmpl->size;
2631 
2632 			if (template_real_share_mode(tmpl) == SM_PRIVATE) {
2633 				/*
2634 				 * New entry needs a non-null object.
2635 				 * tmpl->object may be NULL or have no fill pattern,
2636 				 * in which case the caller wants a non-null
2637 				 * object with no resident pages.
2638 				 */
2639 				vm_object_checker_t *obj_checker =
2640 				    make_anonymous_object_checker(checker_list,
2641 				    checker->object_offset + checker->size);
2642 				if (tmpl->object) {
2643 					obj_checker->fill_pattern = tmpl->object->fill_pattern;
2644 					write_fill_pattern(checker->address, checker->size,
2645 					    obj_checker->fill_pattern);
2646 				} else {
2647 					/*
2648 					 * no object template: fill with zeros
2649 					 * to get a vm object, then kill its pages.
2650 					 */
2651 					write_fill_pattern(checker->address, checker->size,
2652 					    (fill_pattern_t){Fill, 0});
2653 					kr = mach_vm_behavior_set(mach_task_self(),
2654 					    checker->address, checker->size, VM_BEHAVIOR_FREE);
2655 					assert(kr == 0);
2656 					kr = mach_vm_behavior_set(mach_task_self(),
2657 					    checker->address, checker->size, VM_BEHAVIOR_PAGEOUT);
2658 					assert(kr == 0);
2659 				}
2660 				checker_set_object(checker, obj_checker);
2661 			} else if (tmpl->object != NULL) {
2662 				/*
2663 				 * New entry needs a real object for COW.
2664 				 * (SM_SHARED was handled above)
2665 				 */
2666 				assert(template_real_share_mode(tmpl) == SM_COW);
2667 				kr = mach_vm_copy(mach_task_self(),
2668 				    object_scratch->allocated_address + tmpl->offset,
2669 				    tmpl->size, allocated_address);
2670 				assert(kr == 0);
2671 				checker_set_object(checker, object_scratch->checker);
2672 				checker->needs_copy = true;
2673 			}
2674 
2675 			if (protect_last) {
2676 				/*
2677 				 * Set protection and max_protection
2678 				 * if we couldn't do it up front.
2679 				 */
2680 				kr = mach_vm_protect(mach_task_self(),
2681 				    allocated_address, tmpl->size, false /*set_max*/, tmpl->protection);
2682 				assert(kr == 0);
2683 				kr = mach_vm_protect(mach_task_self(),
2684 				    allocated_address, tmpl->size, true /*set_max*/, tmpl->max_protection);
2685 				assert(kr == 0);
2686 			}
2687 		} else if (template_real_share_mode(tmpl) == SM_PRIVATE) {
2688 			/*
2689 			 * New entry's object is private non-anonymous memory
2690 			 * TODO named entries
2691 			 */
2692 			T_FAIL("unexpected/unimplemented: non-anonymous memory unimplemented");
2693 		} else {
2694 			T_FAIL("unexpected/unimplemented: unrecognized share mode");
2695 		}
2696 	}
2697 
2698 	/*
2699 	 * All entries now have their objects set.
2700 	 * Deallocate temporary storage for shared objects.
2701 	 * Do this before verifying share_mode: any sharing from
2702 	 * the temporary object storage itself should not count.
2703 	 */
2704 	for (unsigned i = 0; i < object_template_count; i++) {
2705 		object_scratch_t *object_scratch = &new_objects[i];
2706 
2707 		if (object_scratch->allocated_address > 0) {
2708 			kr = mach_vm_deallocate(mach_task_self(),
2709 			    object_scratch->allocated_address,
2710 			    object_scratch->allocated_size);
2711 			assert(kr == 0);
2712 			object_scratch->allocated_address = 0;
2713 			object_scratch->allocated_size = 0;
2714 		}
2715 	}
2716 
2717 	/*
2718 	 * All of the entries and checkers are in place.
2719 	 * Now set each entry's properties.
2720 	 */
2721 	for (unsigned i = 0; i < entry_template_count; i++) {
2722 		const vm_entry_template_t *tmpl = &entry_templates[i];
2723 		vm_entry_checker_t *checker =
2724 		    checker_list_nth(checker_list, i);
2725 
2726 		if (tmpl->kind == Hole) {
2727 			continue;  /* nothing else to do for holes */
2728 		}
2729 		if (tmpl->kind == Submap) {
2730 			continue;  /* nothing else to do for submaps */
2731 		}
2732 		assert(tmpl->kind == Allocation);
2733 
2734 		/* user_tag - already set */
2735 
2736 		/* permanent - already set */
2737 
2738 		/*
2739 		 * protection, max_protection - already set
2740 		 * We set these in mach_vm_map() because setting default
2741 		 * values in mach_vm_map() and then adjusting them with
2742 		 * mach_vm_protect() is less capable.
2743 		 */
2744 
2745 		/* inheritance */
2746 		if (tmpl->inheritance != VM_INHERIT_DEFAULT) {
2747 			kr = mach_vm_inherit(mach_task_self(),
2748 			    checker->address, checker->size,
2749 			    tmpl->inheritance);
2750 			assert(kr == 0);
2751 			checker->inheritance = tmpl->inheritance;
2752 		}
2753 
2754 		/* behavior */
2755 		if (tmpl->behavior != VM_BEHAVIOR_DEFAULT) {
2756 			checker->behavior = tmpl->behavior;
2757 			kr = mach_vm_behavior_set(mach_task_self(),
2758 			    checker->address, checker->size, tmpl->behavior);
2759 			assert(kr == 0);
2760 		}
2761 
2762 		/* user_wired_count */
2763 		if (tmpl->user_wired_count > 0) {
2764 			checker_resolve_null_vm_object(checker_list, checker);
2765 			checker->user_wired_count = tmpl->user_wired_count;
2766 			for (uint16_t w = 0; w < tmpl->user_wired_count; w++) {
2767 				kr = mach_vm_wire(host_priv(), mach_task_self(),
2768 				    checker->address, checker->size, VM_PROT_READ);
2769 				assert(kr == 0);
2770 			}
2771 		}
2772 
2773 		/*
2774 		 * Verify that the template's share mode matches
2775 		 * the checker's share mode, after allowing for
2776 		 * some mismatches for usability purposes.
2777 		 * Do this last.
2778 		 */
2779 		assert(template_real_share_mode(tmpl) == checker_share_mode(checker));
2780 	}
2781 
2782 	/* Deallocate any remaining arena space */
2783 	kr = mach_vm_deallocate(mach_task_self(),
2784 	    arena_address, arena_end - arena_address);
2785 	assert(kr == 0);
2786 
2787 	/* Deallocate scratch space */
2788 	free(new_objects);
2789 
2790 	/* Verify that our entries and checkers match. */
2791 	assert(verify_vm_state(checker_list, message));
2792 
2793 	return checker_list;
2794 }
2795 
2796 void
create_vm_state_from_config(vm_config_t * config,checker_list_t ** const out_checker_list,mach_vm_address_t * const out_start_address,mach_vm_address_t * const out_end_address)2797 create_vm_state_from_config(
2798 	vm_config_t *config,
2799 	checker_list_t ** const out_checker_list,
2800 	mach_vm_address_t * const out_start_address,
2801 	mach_vm_address_t * const out_end_address)
2802 {
2803 	checker_list_t *list = create_vm_state(
2804 		config->entry_templates, config->entry_template_count,
2805 		config->object_templates, config->object_template_count,
2806 		config->alignment_mask, "before test");
2807 
2808 	/*
2809 	 * Adjusted start and end address are relative to the
2810 	 * templates' first and last entry (holes ARE included)
2811 	 */
2812 
2813 	*out_start_address = list->entries.head->address + config->start_adjustment;
2814 	*out_end_address = checker_end_address(list->entries.tail) + config->end_adjustment;
2815 	assert(*out_start_address < *out_end_address);
2816 
2817 	*out_checker_list = list;
2818 }
2819 
2820 
2821 /*
2822  * Deallocate the real memory and update the checker for the end of a test.
2823  * The checker itself may be deallocated and replaced.
2824  */
2825 static void
checker_deallocate_allocation(checker_list_t * list,vm_entry_checker_t * checker)2826 checker_deallocate_allocation(checker_list_t *list, vm_entry_checker_t *checker)
2827 {
2828 	assert(checker->kind == Allocation || checker->kind == Submap);
2829 
2830 	kern_return_t kr = mach_vm_deallocate(mach_task_self(),
2831 	    checker->address, checker->size);
2832 	assert(kr == 0);
2833 
2834 	if (checker->permanent) {
2835 		/* permanent entry becomes inaccessible */
2836 		checker->protection = VM_PROT_NONE;
2837 		checker->max_protection = VM_PROT_NONE;
2838 
2839 		/*
2840 		 * hack: disable verification of some attributes
2841 		 * that verify_vm_faultability perturbed
2842 		 */
2843 		checker->verify.object_attr = false;
2844 		checker->verify.share_mode_attr = false;
2845 		checker->verify.pages_resident_attr = false;
2846 
2847 		/*
2848 		 * Don't verify fill pattern because the verifier
2849 		 * is noisy when the memory is inaccessible.
2850 		 */
2851 		if (checker->object) {
2852 			checker->object->verify.fill_pattern_attr = false;
2853 		}
2854 	} else {
2855 		/* nonpermanent entry becomes a deallocated hole */
2856 		vm_entry_checker_t *new_hole =
2857 		    make_checker_for_hole(checker->address, checker->size);
2858 		checker_list_replace_checker(list, checker, new_hole);
2859 	}
2860 }
2861 
2862 /*
2863  * Deallocate the VM allocations covered by the checkers.
2864  * Updates the checkers so that entry permanence can be verified later.
2865  *
2866  * Not recommended after verification errors because the
2867  * true VM allocations may not match the checkers' list.
2868  */
2869 static void
deallocate_vm_allocations(checker_list_t * list)2870 deallocate_vm_allocations(checker_list_t *list)
2871 {
2872 	/* not FOREACH_CHECKER due to use-after-free */
2873 	vm_entry_checker_t *checker = list->entries.head;
2874 	vm_entry_checker_t *end = list->entries.tail->next;
2875 	while (checker != end) {
2876 		vm_entry_checker_t *next = checker->next;
2877 
2878 		if (checker->kind == Allocation || checker->kind == Submap) {
2879 			checker_deallocate_allocation(list, checker);
2880 		}
2881 
2882 		checker = next;
2883 	}
2884 }
2885 
2886 static void
learn_object_id(checker_list_t * checker_list,vm_object_checker_t * obj_checker,uint64_t object_id,vm_entry_attribute_list_t * const out_bad_entry_attr,vm_object_attribute_list_t * const out_bad_object_attr,const char * message)2887 learn_object_id(
2888 	checker_list_t *checker_list,
2889 	vm_object_checker_t *obj_checker,
2890 	uint64_t object_id,
2891 	vm_entry_attribute_list_t * const out_bad_entry_attr,
2892 	vm_object_attribute_list_t * const out_bad_object_attr,
2893 	const char *message)
2894 {
2895 	assert(obj_checker->object_id_mode != object_has_known_id);
2896 
2897 	if (find_object_checker_for_object_id(checker_list, object_id)) {
2898 		/*
2899 		 * This object should have its own id,
2900 		 * but we already have another object
2901 		 * checker with this id. That's bad.
2902 		 */
2903 		T_FAIL("%s: wrong object id (expected new id, got existing id)", message);
2904 		out_bad_entry_attr->object_attr = true;
2905 		out_bad_object_attr->object_id_attr = true;
2906 	} else {
2907 		/*
2908 		 * Remember this object id.
2909 		 * If other entries should have the same object
2910 		 * but don't then the mismatch will be
2911 		 * detected when they are verified.
2912 		 */
2913 		obj_checker->object_id_mode = object_has_known_id;
2914 		obj_checker->object_id = object_id;
2915 	}
2916 }
2917 
2918 /*
2919  * Verify VM state of an address range that is expected to be an allocation.
2920  * Returns true if it looks correct.
2921  * T_FAILs and logs details and returns false if it looks wrong.
2922  */
2923 static bool
verify_allocation(checker_list_t * checker_list,vm_entry_checker_t * checker,const char * message)2924 verify_allocation(
2925 	checker_list_t *checker_list,
2926 	vm_entry_checker_t *checker,
2927 	const char *message)
2928 {
2929 	vm_entry_attribute_list_t bad_entry_attr =
2930 	    vm_entry_attributes_with_default(false);
2931 	vm_object_attribute_list_t bad_object_attr =
2932 	    vm_object_attributes_with_default(false);
2933 
2934 	assert(checker->kind == Allocation || checker->kind == Submap);
2935 
2936 	/* Call vm_region to get the actual VM state */
2937 	mach_vm_address_t actual_address = checker->address;
2938 	mach_vm_size_t actual_size = 0;
2939 	vm_region_submap_info_data_64_t info;
2940 	if (!get_info_for_address(&actual_address, &actual_size, &info, checker->submap_depth)) {
2941 		/* address was unmapped - not a valid allocation */
2942 		if (checker->submap_depth && is_mapped(checker->address, 0)) {
2943 			/* address was mapped, but checker wanted a submap */
2944 			T_FAIL("%s: allocation was expected to be in a submap", message);
2945 		} else {
2946 			/* address was unmapped at every submap depth */
2947 			T_FAIL("%s: allocation was not mapped", message);
2948 		}
2949 		bad_entry_attr.is_submap_attr = true;
2950 		bad_entry_attr.submap_depth_attr = true;
2951 		warn_bad_checker(checker, bad_entry_attr, bad_object_attr, message);
2952 		return false;
2953 	}
2954 
2955 	/* Report any differences between the checker and the actual state. */
2956 
2957 	if (actual_address != checker->address ||
2958 	    actual_size != checker->size) {
2959 		/* address is mapped, but region doesn't match template exactly */
2960 		T_FAIL("%s: entry bounds did not match", message);
2961 		bad_entry_attr.address_attr = true;
2962 		bad_entry_attr.size_attr = true;
2963 	}
2964 
2965 	if (checker->verify.protection_attr &&
2966 	    info.protection != checker->protection) {
2967 		T_FAIL("%s: wrong protection", message);
2968 		bad_entry_attr.protection_attr = true;
2969 	}
2970 	if (checker->verify.max_protection_attr &&
2971 	    info.max_protection != checker->max_protection) {
2972 		T_FAIL("%s: wrong max protection", message);
2973 		bad_entry_attr.max_protection_attr = true;
2974 	}
2975 	if (checker->verify.inheritance_attr &&
2976 	    info.inheritance != checker->inheritance) {
2977 		T_FAIL("%s: wrong inheritance", message);
2978 		bad_entry_attr.inheritance_attr = true;
2979 	}
2980 	if (checker->verify.behavior_attr &&
2981 	    info.behavior != checker->behavior) {
2982 		T_FAIL("%s: wrong behavior", message);
2983 		bad_entry_attr.behavior_attr = true;
2984 	}
2985 	if (checker->verify.user_wired_count_attr &&
2986 	    info.user_wired_count != checker->user_wired_count) {
2987 		T_FAIL("%s: wrong user wired count", message);
2988 		bad_entry_attr.user_wired_count_attr = true;
2989 	}
2990 	if (checker->verify.user_tag_attr &&
2991 	    info.user_tag != checker->user_tag) {
2992 		T_FAIL("%s: wrong user tag", message);
2993 		bad_entry_attr.user_tag_attr = true;
2994 	}
2995 	if (checker->verify.is_submap_attr &&
2996 	    info.is_submap != checker_is_submap(checker)) {
2997 		T_FAIL("%s: wrong is_submap", message);
2998 		bad_entry_attr.is_submap_attr = true;
2999 		bad_entry_attr.submap_depth_attr = true;
3000 	}
3001 
3002 	if (checker->verify.object_offset_attr &&
3003 	    info.offset != checker->object_offset) {
3004 		T_FAIL("%s: wrong object offset", message);
3005 		bad_entry_attr.object_offset_attr = true;
3006 	}
3007 
3008 	if (checker->verify.object_attr) {
3009 		vm_object_checker_t *obj_checker = checker->object;
3010 		assert(obj_checker != NULL);
3011 		assert(obj_checker->kind != Deinited);
3012 
3013 		unsigned vm_region_ref_count = object_checker_get_vm_region_ref_count(obj_checker);
3014 		unsigned shadow_depth = object_checker_get_shadow_depth(obj_checker);
3015 
3016 		if (obj_checker->verify.object_id_attr) {
3017 			switch (obj_checker->object_id_mode) {
3018 			case object_is_unknown:
3019 				learn_object_id(checker_list, obj_checker, info.object_id_full,
3020 				    &bad_entry_attr, &bad_object_attr, message);
3021 				break;
3022 			case object_has_unknown_nonnull_id:
3023 				/*
3024 				 * We don't know the right object id,
3025 				 * but we know that zero is wrong.
3026 				 */
3027 				if (info.object_id_full == 0) {
3028 					T_FAIL("%s: wrong object id (expected nonzero)", message);
3029 					bad_entry_attr.object_attr = true;
3030 					bad_object_attr.object_id_attr = true;
3031 					break;
3032 				}
3033 				learn_object_id(checker_list, obj_checker, info.object_id_full,
3034 				    &bad_entry_attr, &bad_object_attr, message);
3035 				break;
3036 			case object_has_known_id:
3037 				if (info.object_id_full != obj_checker->object_id) {
3038 					T_FAIL("%s: wrong object id", message);
3039 					bad_entry_attr.object_attr = true;
3040 					bad_object_attr.object_id_attr = true;
3041 				}
3042 				break;
3043 			}
3044 		}
3045 
3046 		/*
3047 		 * can't check object's true size, but we can
3048 		 * check that it is big enough for this vm entry
3049 		 */
3050 		if (obj_checker->verify.size_attr &&
3051 		    info.offset + actual_size > obj_checker->size) {
3052 			T_FAIL("%s: entry extends beyond object's expected size", message);
3053 			bad_entry_attr.object_attr = true;
3054 			bad_object_attr.size_attr = true;
3055 		}
3056 
3057 		if (obj_checker->verify.ref_count_attr &&
3058 		    info.ref_count != vm_region_ref_count) {
3059 			T_FAIL("%s: wrong object ref count (want %u got %u)",
3060 			    message, vm_region_ref_count, info.ref_count);
3061 			bad_entry_attr.object_attr = true;
3062 			bad_object_attr.ref_count_attr = true;
3063 		}
3064 
3065 		if (obj_checker->verify.shadow_depth_attr &&
3066 		    info.shadow_depth != shadow_depth) {
3067 			T_FAIL("%s: wrong object shadow depth (want %u got %u)",
3068 			    message, shadow_depth, info.shadow_depth);
3069 			bad_entry_attr.object_attr = true;
3070 			bad_object_attr.shadow_depth_attr = true;
3071 		}
3072 
3073 		/* Verify fill pattern after checking the rest of the object */
3074 		if (!obj_checker->verify.fill_pattern_attr) {
3075 			/* fill pattern check disabled */
3076 		} else if (bad_entry_attr.address_attr || bad_entry_attr.size_attr) {
3077 			/* don't try to verify fill if the address or size were bad */
3078 		} else if (obj_checker->fill_pattern.mode == DontFill) {
3079 			/* no fill pattern set, don't verify it */
3080 		} else if (!(info.protection & VM_PROT_READ)) {
3081 			/* protection disallows read, can't verify fill pattern */
3082 			T_LOG("note: %s: can't verify fill pattern of unreadable memory (%s/%s)",
3083 			    message, name_for_prot(info.protection), name_for_prot(info.max_protection));
3084 		} else {
3085 			/* verify the fill pattern */
3086 			mach_vm_address_t first_bad_address;
3087 			if (!verify_fill_pattern(actual_address, actual_size,
3088 			    obj_checker->fill_pattern, &first_bad_address)) {
3089 				T_FAIL("%s: wrong fill at address 0x%llx "
3090 				    "(expected 0x%016llx, got 0x%016llx)",
3091 				    message, first_bad_address,
3092 				    obj_checker->fill_pattern.pattern,
3093 				    *(uint64_t *)first_bad_address);
3094 				bad_entry_attr.object_attr = true;
3095 				bad_object_attr.fill_pattern_attr = true;
3096 			}
3097 		}
3098 	}
3099 
3100 	/* do this after checking the object */
3101 	if (checker->verify.share_mode_attr &&
3102 	    !same_share_mode(&info, checker)) {
3103 		T_FAIL("%s: wrong share mode", message);
3104 		bad_entry_attr.share_mode_attr = true;
3105 	}
3106 
3107 	/* do this after checking the object */
3108 	if (checker->verify.pages_resident_attr &&
3109 	    info.pages_resident != checker->pages_resident) {
3110 		T_FAIL("%s: wrong pages resident count (want %d, got %d)",
3111 		    message, checker->pages_resident, info.pages_resident);
3112 		bad_entry_attr.pages_resident_attr = true;
3113 	}
3114 
3115 	/*
3116 	 * checker->permanent can only be tested destructively.
3117 	 * We don't verify it until the end of the test.
3118 	 */
3119 
3120 	if (bad_entry_attr.bits != 0 || bad_object_attr.bits != 0) {
3121 		warn_bad_checker(checker, bad_entry_attr, bad_object_attr, message);
3122 		return false;
3123 	}
3124 
3125 	return true;
3126 }
3127 
3128 
3129 /*
3130  * Verify VM state of an address range that is
3131  * expected to be an unallocated hole.
3132  * Returns true if it looks correct.
3133  * T_FAILs and logs details and returns false if it looks wrong.
3134  */
3135 static bool
verify_hole(vm_entry_checker_t * checker,const char * message)3136 verify_hole(vm_entry_checker_t *checker, const char *message)
3137 {
3138 	bool good = true;
3139 
3140 	assert(checker->kind == Hole);
3141 
3142 	/* zero-size hole is always presumed valid */
3143 	if (checker->size == 0) {
3144 		return true;
3145 	}
3146 
3147 	mach_vm_address_t actual_address = checker->address;
3148 	mach_vm_size_t actual_size = 0;
3149 	vm_region_submap_info_data_64_t info;
3150 	if (get_info_for_address_fast(&actual_address, &actual_size, &info)) {
3151 		/* address was mapped - not a hole */
3152 		T_FAIL("%s: expected hole is not a hole", message);
3153 		good = false;
3154 	} else if (actual_address < checker_end_address(checker)) {
3155 		/* [address, address + size) was partly mapped - not a hole */
3156 		T_FAIL("%s: expected hole is not a hole", message);
3157 		good = false;
3158 	} else {
3159 		/* [address, address + size) was entirely unmapped */
3160 	}
3161 
3162 	if (!good) {
3163 		warn_bad_checker(checker,
3164 		    vm_entry_attributes_with_default(true),
3165 		    vm_object_attributes_with_default(true),
3166 		    message);
3167 	}
3168 	return good;
3169 }
3170 
3171 test_result_t
verify_vm_state_nested(checker_list_t * checker_list,bool in_submap,const char * message)3172 verify_vm_state_nested(checker_list_t *checker_list, bool in_submap, const char *message)
3173 {
3174 	bool good = true;
3175 
3176 	if (Verbose) {
3177 		T_LOG("*** %s: verifying vm entries %s ***",
3178 		    message, in_submap ? "(in submap) " : "");
3179 	}
3180 
3181 	vm_entry_checker_t *last_checked = NULL;
3182 	FOREACH_CHECKER(checker, checker_list->entries) {
3183 		last_checked = checker;
3184 
3185 		switch (checker->kind) {
3186 		case Allocation:
3187 			good &= verify_allocation(checker_list, checker, message);
3188 			break;
3189 		case Hole:
3190 			good &= verify_hole(checker, message);
3191 			break;
3192 		case Submap: {
3193 			/* Verify the submap entry in the parent map. */
3194 			good &= verify_allocation(checker_list, checker, message);
3195 
3196 			/* Verify the submap's contents. */
3197 
3198 			/*
3199 			 * Adjust the submap content checkers to match
3200 			 * vm_region output within this submap entry.
3201 			 * Undo those adjustments at end of scope.
3202 			 */
3203 			checker_list_t *submap_checkers DEFER_UNSLIDE =
3204 			    checker_get_and_slide_submap_checkers(checker);
3205 			checker_list_tweaks_t tweaks DEFER_UNTWEAK =
3206 			    submap_checkers_tweak_for_vm_region(submap_checkers, checker);
3207 
3208 			good &= verify_vm_state_nested(submap_checkers, true, message);
3209 			break;
3210 		}
3211 		case EndEntries:
3212 		default:
3213 			assert(0);
3214 		}
3215 	}
3216 	assert(last_checked == checker_list->entries.tail);
3217 
3218 	if (in_submap) {
3219 		/* don't dump submap alone, wait until we're back at the top level */
3220 	} else if (!good || Verbose) {
3221 		T_LOG("*** %s: all expected ***", message);
3222 		dump_checker_range(checker_list->entries);
3223 		T_LOG("*** %s: all actual ***", message);
3224 		dump_region_info_for_entries(checker_list->entries);
3225 	}
3226 
3227 	return good ? TestSucceeded : TestFailed;
3228 }
3229 
3230 test_result_t
verify_vm_state(checker_list_t * checker_list,const char * message)3231 verify_vm_state(checker_list_t *checker_list, const char *message)
3232 {
3233 	assert(!checker_list->is_slid);
3234 	return verify_vm_state_nested(checker_list, false, message);
3235 }
3236 
3237 
3238 /*
3239  * Get the expected errors for read and write faults
3240  * inside the given checker's memory.
3241  * The signals are:
3242  *     0       (mapped and readable / writeable)
3243  *     KERN_PROTECTION_FAILURE  (mapped but not readable / writeable)
3244  *     KERN_INVALID_ADDRESS (unmapped)
3245  */
3246 static void
get_expected_errors_for_faults(vm_entry_checker_t * checker,kern_return_t * const out_read_error,kern_return_t * const out_write_error)3247 get_expected_errors_for_faults(
3248 	vm_entry_checker_t *checker,
3249 	kern_return_t * const out_read_error,
3250 	kern_return_t * const out_write_error)
3251 {
3252 	switch (checker->kind) {
3253 	case Allocation:
3254 		/* mapped: error is either none or protection failure */
3255 		switch (checker->protection & (VM_PROT_READ | VM_PROT_WRITE)) {
3256 		case VM_PROT_READ | VM_PROT_WRITE:
3257 			/* mapped, read/write */
3258 			*out_read_error = 0;
3259 			*out_write_error = 0;
3260 			break;
3261 		case VM_PROT_READ:
3262 			/* mapped, read-only */
3263 			*out_read_error = 0;
3264 			*out_write_error = KERN_PROTECTION_FAILURE;
3265 			break;
3266 		case VM_PROT_WRITE:
3267 			/* mapped, "write-only" but inaccessible to faults */
3268 			*out_read_error = KERN_PROTECTION_FAILURE;
3269 			*out_write_error = KERN_PROTECTION_FAILURE;
3270 			break;
3271 		case 0:
3272 			/* mapped, inaccessible */
3273 			*out_read_error = KERN_PROTECTION_FAILURE;
3274 			*out_write_error = KERN_PROTECTION_FAILURE;
3275 			break;
3276 		default:
3277 			T_FAIL("unexpected protection %s", name_for_prot(checker->protection));
3278 		}
3279 		break;
3280 	case Hole:
3281 		/* unmapped: error is invalid address */
3282 		*out_read_error = KERN_INVALID_ADDRESS;
3283 		*out_write_error = KERN_INVALID_ADDRESS;
3284 		break;
3285 	case EndEntries:
3286 	default:
3287 		assert(0);
3288 	}
3289 }
3290 
3291 
3292 static fill_pattern_t
checker_fill_pattern(vm_entry_checker_t * checker)3293 checker_fill_pattern(vm_entry_checker_t *checker)
3294 {
3295 	if (checker->object == NULL) {
3296 		return (fill_pattern_t){ .mode = DontFill, .pattern = 0 };
3297 	}
3298 	return checker->object->fill_pattern;
3299 }
3300 
3301 static bool
checker_should_verify_fill_pattern(vm_entry_checker_t * checker)3302 checker_should_verify_fill_pattern(vm_entry_checker_t *checker)
3303 {
3304 	return checker->verify.object_attr &&
3305 	       checker->object != NULL &&
3306 	       checker->object->verify.fill_pattern_attr &&
3307 	       checker->object->fill_pattern.mode == Fill;
3308 }
3309 
3310 /*
3311  * Verify read and/or write faults on every page of checker's address range.
3312  */
3313 bool
verify_checker_faultability(vm_entry_checker_t * checker,const char * message,bool verify_reads,bool verify_writes)3314 verify_checker_faultability(
3315 	vm_entry_checker_t *checker,
3316 	const char *message,
3317 	bool verify_reads,
3318 	bool verify_writes)
3319 {
3320 	return verify_checker_faultability_in_address_range(checker, message,
3321 	           verify_reads, verify_writes, checker->address, checker->size);
3322 }
3323 
3324 bool
verify_checker_faultability_in_address_range(vm_entry_checker_t * checker,const char * message,bool verify_reads,bool verify_writes,mach_vm_address_t checked_address,mach_vm_size_t checked_size)3325 verify_checker_faultability_in_address_range(
3326 	vm_entry_checker_t *checker,
3327 	const char *message,
3328 	bool verify_reads,
3329 	bool verify_writes,
3330 	mach_vm_address_t checked_address,
3331 	mach_vm_size_t checked_size)
3332 {
3333 	assert(verify_reads || verify_writes);
3334 
3335 	if (Verbose) {
3336 		const char *faults;
3337 		if (verify_reads && verify_writes) {
3338 			faults = "read and write";
3339 		} else if (verify_reads) {
3340 			faults = "read";
3341 		} else {
3342 			faults = "write";
3343 		}
3344 		T_LOG("%s: trying %s faults in [0x%llx..0x%llx)",
3345 		    message, faults, checked_address, checked_address + checked_size);
3346 	}
3347 
3348 	/* range to be checked must fall within the checker */
3349 	assert(checked_size > 0);
3350 	assert(checker_contains_address(checker, checked_address));
3351 	assert(checker_contains_address(checker, checked_address + checked_size - 1));
3352 
3353 	/* read and write use the fill pattern if any */
3354 	fill_pattern_t fill_pattern = checker_fill_pattern(checker);
3355 	bool enforce_expected_byte = checker_should_verify_fill_pattern(checker);
3356 #if BYTE_ORDER == LITTLE_ENDIAN
3357 	uint8_t expected_byte = fill_pattern.pattern & 0xff;
3358 #else
3359 	uint8_t expected_byte = fill_pattern.pattern >> 56;
3360 #endif
3361 
3362 	bool good = true;
3363 	kern_return_t expected_read_error, expected_write_error;
3364 	get_expected_errors_for_faults(checker,
3365 	    &expected_read_error, &expected_write_error);
3366 
3367 	mach_vm_address_t start = checked_address;
3368 	mach_vm_address_t end = checked_address + checked_size;
3369 	for (mach_vm_address_t addr = start; addr < end; addr += PAGE_SIZE) {
3370 		if (verify_reads) {
3371 			uint8_t actual_byte;
3372 			kern_return_t actual_read_error;
3373 			try_read_byte(addr, &actual_byte, &actual_read_error);
3374 			if (expected_read_error != actual_read_error) {
3375 				T_FAIL("%s: wrong error %d %s (expected %d %s) "
3376 				    "when reading from address 0x%llx",
3377 				    message, actual_read_error, name_for_kr(actual_read_error),
3378 				    expected_read_error, name_for_kr(expected_read_error), addr);
3379 				good = false;
3380 				break;
3381 			}
3382 			if (enforce_expected_byte &&
3383 			    actual_read_error == KERN_SUCCESS &&
3384 			    expected_byte != actual_byte) {
3385 				T_FAIL("%s: wrong byte 0x%hhx (expected 0x%hhx) "
3386 				    "read from address 0x%llx",
3387 				    message, actual_byte, expected_byte, addr);
3388 				good = false;
3389 				break;
3390 			}
3391 		}
3392 
3393 		if (verify_writes) {
3394 			kern_return_t actual_write_error;
3395 			try_write_byte(addr, expected_byte, &actual_write_error);
3396 			if (expected_write_error != actual_write_error) {
3397 				T_FAIL("%s: wrong error %d %s (expected %d %s) "
3398 				    "when writing to address 0x%llx",
3399 				    message, actual_write_error, name_for_kr(actual_write_error),
3400 				    expected_write_error, name_for_kr(expected_write_error), addr);
3401 				good = false;
3402 				break;
3403 			}
3404 		}
3405 	}
3406 
3407 	if (!good) {
3408 		warn_bad_checker(checker,
3409 		    vm_entry_attributes_with_default(true),
3410 		    vm_object_attributes_with_default(true),
3411 		    message);
3412 	}
3413 
3414 	return good;
3415 }
3416 
3417 
3418 static test_result_t
verify_vm_faultability_nested(checker_list_t * checker_list,const char * message,bool verify_reads,bool verify_writes,bool in_submap)3419 verify_vm_faultability_nested(
3420 	checker_list_t *checker_list,
3421 	const char *message,
3422 	bool verify_reads,
3423 	bool verify_writes,
3424 	bool in_submap)
3425 {
3426 	bool good = true;
3427 
3428 	if (Verbose) {
3429 		T_LOG("*** %s: verifying vm faultability %s ***",
3430 		    message, in_submap ? "(in submap) " : "");
3431 	}
3432 
3433 	FOREACH_CHECKER(checker, checker_list->entries) {
3434 		bool really_verify_writes = verify_writes;
3435 
3436 		if (prot_contains_all(checker->protection, VM_PROT_READ | VM_PROT_WRITE)) {
3437 			/*
3438 			 * Don't try writing to "writeable" submap allocations.
3439 			 * That provokes unnesting which confuses us, because
3440 			 * we don't update the checkers for that unnesting here.
3441 			 * TODO: implement write fault testing in writeable submaps
3442 			 */
3443 			if (checker_is_submap(checker)) {
3444 				/* checker is parent map's submap entry with +rw */
3445 				really_verify_writes = false;
3446 			} else if (in_submap) {
3447 				/* checker is submap contents with +rw */
3448 				really_verify_writes = false;
3449 			}
3450 		}
3451 
3452 		/* Read and/or write from the checker's memory. */
3453 
3454 		if (checker_is_submap(checker)) {
3455 			/* Verify based on submap contents. */
3456 			T_QUIET; T_ASSERT_FALSE(in_submap, "nested submaps not allowed");
3457 
3458 			/*
3459 			 * Adjust the submap content checkers to match
3460 			 * vm_region output within this submap entry.
3461 			 * Undo those adjustments at end of scope.
3462 			 */
3463 			checker_list_t *submap_checkers DEFER_UNSLIDE =
3464 			    checker_get_and_slide_submap_checkers(checker);
3465 			checker_list_tweaks_t tweaks DEFER_UNTWEAK =
3466 			    submap_checkers_tweak_for_vm_region(submap_checkers, checker);
3467 
3468 			good &= verify_vm_faultability_nested(submap_checkers, message,
3469 			    verify_reads, really_verify_writes, true /* in_submap */);
3470 		} else {
3471 			good &= verify_checker_faultability(checker,
3472 			    message, verify_reads, verify_writes);
3473 		}
3474 	}
3475 
3476 	if (in_submap) {
3477 		/* don't dump submap alone, wait until we're back at the top level */
3478 	} else if (!good || Verbose) {
3479 		T_LOG("*** %s: all expected ***", message);
3480 		dump_checker_range(checker_list->entries);
3481 		T_LOG("*** %s: all actual ***", message);
3482 		dump_region_info_for_entries(checker_list->entries);
3483 	}
3484 
3485 	return good ? TestSucceeded : TestFailed;
3486 }
3487 
3488 test_result_t
verify_vm_faultability(checker_list_t * checker_list,const char * message,bool verify_reads,bool verify_writes)3489 verify_vm_faultability(
3490 	checker_list_t *checker_list,
3491 	const char *message,
3492 	bool verify_reads,
3493 	bool verify_writes)
3494 {
3495 	return verify_vm_faultability_nested(checker_list, message,
3496 	           verify_reads, verify_writes, false /* in_submap */);
3497 }
3498 
3499 
3500 /* Inserts new_left to the left of old_right. */
3501 static void
checker_insert_left(vm_entry_checker_t * new_left,vm_entry_checker_t * old_right)3502 checker_insert_left(
3503 	vm_entry_checker_t *new_left,
3504 	vm_entry_checker_t *old_right)
3505 {
3506 	assert(new_left);
3507 	assert(old_right);
3508 
3509 	new_left->prev = old_right->prev;
3510 	new_left->next = old_right;
3511 
3512 	old_right->prev = new_left;
3513 	if (new_left->prev) {
3514 		new_left->prev->next = new_left;
3515 	}
3516 }
3517 
3518 /* Inserts new_right to the right of old_left. */
3519 static void
checker_insert_right(vm_entry_checker_t * old_left,vm_entry_checker_t * new_right)3520 checker_insert_right(
3521 	vm_entry_checker_t *old_left,
3522 	vm_entry_checker_t *new_right)
3523 {
3524 	assert(old_left);
3525 	assert(new_right);
3526 
3527 	new_right->prev = old_left;
3528 	new_right->next = old_left->next;
3529 
3530 	old_left->next = new_right;
3531 	if (new_right->next) {
3532 		new_right->next->prev = new_right;
3533 	}
3534 }
3535 
3536 /*
3537  * Split a checker into two checkers at an address.
3538  * On entry, the checker has already been cloned into two identical checkers.
3539  * This function modifies the clones to make two separate checkers.
3540  */
3541 static void
checker_split_clones(vm_entry_checker_t * left,vm_entry_checker_t * right,mach_vm_address_t split)3542 checker_split_clones(
3543 	vm_entry_checker_t *left,
3544 	vm_entry_checker_t *right,
3545 	mach_vm_address_t split)
3546 {
3547 	mach_vm_address_t start = left->address;
3548 	mach_vm_address_t end = checker_end_address(left);
3549 
3550 	assert(split > start);
3551 	assert(split < end);
3552 
3553 	assert(left->next == right);
3554 	assert(right->prev == left);
3555 
3556 	left->address = start;
3557 	left->size = split - start;
3558 	right->address = split;
3559 	right->size = end - split;
3560 
3561 	right->object_offset = left->object_offset + left->size;
3562 }
3563 
3564 vm_entry_checker_t *
checker_clip_right(checker_list_t * list,vm_entry_checker_t * left,mach_vm_address_t split)3565 checker_clip_right(
3566 	checker_list_t *list,
3567 	vm_entry_checker_t *left,
3568 	mach_vm_address_t split)
3569 {
3570 	if (split > left->address && split < checker_end_address(left)) {
3571 		vm_entry_checker_t *right = checker_clone(left);
3572 		checker_insert_right(left, right);
3573 		checker_split_clones(left, right, split);
3574 		if (list && list->entries.tail == left) {
3575 			list->entries.tail = right;
3576 		}
3577 		return right;
3578 	}
3579 	return NULL;
3580 }
3581 
3582 vm_entry_checker_t *
checker_clip_left(checker_list_t * list,vm_entry_checker_t * right,mach_vm_address_t split)3583 checker_clip_left(
3584 	checker_list_t *list,
3585 	vm_entry_checker_t *right,
3586 	mach_vm_address_t split)
3587 {
3588 	if (split > right->address && split < checker_end_address(right)) {
3589 		vm_entry_checker_t *left = checker_clone(right);
3590 		checker_insert_left(left, right);
3591 		checker_split_clones(left, right, split);
3592 		if (list && list->entries.head == right) {
3593 			list->entries.head = left;
3594 		}
3595 		return left;
3596 	}
3597 	return NULL;
3598 }
3599 
3600 static entry_checker_range_t
checker_list_try_find_range_including_holes(checker_list_t * list,mach_vm_address_t start,mach_vm_size_t size)3601 checker_list_try_find_range_including_holes(
3602 	checker_list_t *list,
3603 	mach_vm_address_t start,
3604 	mach_vm_size_t size)
3605 {
3606 	mach_vm_address_t end = start + size;
3607 	vm_entry_checker_t *first = NULL;
3608 	vm_entry_checker_t *last = NULL;
3609 
3610 	assert(start >= list->entries.head->address);
3611 	assert(end <= checker_end_address(list->entries.tail));
3612 	assert(end >= start);
3613 
3614 	FOREACH_CHECKER(checker, list->entries) {
3615 		/* find the first entry that ends after the start address */
3616 		if (checker_end_address(checker) > start && !first) {
3617 			first = checker;
3618 		}
3619 		/* find the last entry that begins before the end address */
3620 		if (checker->address < end) {
3621 			last = checker;
3622 		}
3623 	}
3624 
3625 	return (entry_checker_range_t){ first, last };
3626 }
3627 
3628 entry_checker_range_t
checker_list_find_range_including_holes(checker_list_t * list,mach_vm_address_t start,mach_vm_size_t size)3629 checker_list_find_range_including_holes(
3630 	checker_list_t *list,
3631 	mach_vm_address_t start,
3632 	mach_vm_size_t size)
3633 {
3634 	entry_checker_range_t result =
3635 	    checker_list_try_find_range_including_holes(list, start, size);
3636 	vm_entry_checker_t *first = result.head;
3637 	vm_entry_checker_t *last = result.tail;
3638 
3639 	assert(first && last);
3640 	assert(first->address <= last->address);
3641 
3642 	return result;
3643 }
3644 
3645 entry_checker_range_t
checker_list_find_range(checker_list_t * list,mach_vm_address_t start,mach_vm_size_t size)3646 checker_list_find_range(
3647 	checker_list_t *list,
3648 	mach_vm_address_t start,
3649 	mach_vm_size_t size)
3650 {
3651 	entry_checker_range_t result =
3652 	    checker_list_find_range_including_holes(list, start, size);
3653 
3654 	FOREACH_CHECKER(checker, result) {
3655 		assert(checker->kind != Hole);
3656 	}
3657 
3658 	return result;
3659 }
3660 
3661 vm_entry_checker_t *
checker_list_find_checker(checker_list_t * list,mach_vm_address_t addr)3662 checker_list_find_checker(checker_list_t *list, mach_vm_address_t addr)
3663 {
3664 	entry_checker_range_t found =
3665 	    checker_list_try_find_range_including_holes(list, addr, 0);
3666 	vm_entry_checker_t *checker = found.head;
3667 
3668 	if (!checker) {
3669 		return NULL;
3670 	}
3671 	if (addr < checker->address || addr >= checker_end_address(checker)) {
3672 		return NULL;
3673 	}
3674 
3675 	return checker;
3676 }
3677 
3678 vm_entry_checker_t *
checker_list_find_allocation(checker_list_t * list,mach_vm_address_t addr)3679 checker_list_find_allocation(checker_list_t *list, mach_vm_address_t addr)
3680 {
3681 	vm_entry_checker_t *checker = checker_list_find_checker(list, addr);
3682 
3683 	if (checker->kind != Allocation) {
3684 		return NULL;
3685 	}
3686 
3687 	return checker;
3688 }
3689 
3690 entry_checker_range_t
checker_list_find_and_clip(checker_list_t * list,mach_vm_address_t start,mach_vm_size_t size)3691 checker_list_find_and_clip(
3692 	checker_list_t *list,
3693 	mach_vm_address_t start,
3694 	mach_vm_size_t size)
3695 {
3696 	entry_checker_range_t limit = checker_list_find_range(list, start, size);
3697 	checker_clip_left(list, limit.head, start);
3698 	checker_clip_right(list, limit.tail, start + size);
3699 	return limit;
3700 }
3701 
3702 entry_checker_range_t
checker_list_find_and_clip_including_holes(checker_list_t * list,mach_vm_address_t start,mach_vm_size_t size)3703 checker_list_find_and_clip_including_holes(
3704 	checker_list_t *list,
3705 	mach_vm_address_t start,
3706 	mach_vm_size_t size)
3707 {
3708 	mach_vm_address_t end = start + size;
3709 	entry_checker_range_t limit =
3710 	    checker_list_find_range_including_holes(list, start, size);
3711 
3712 	if (checker_contains_address(limit.head, start)) {
3713 		checker_clip_left(list, limit.head, start);
3714 		assert(limit.head->address == start);
3715 	}
3716 	if (checker_contains_address(limit.tail, end)) {
3717 		checker_clip_right(list, limit.tail, end);
3718 		assert(checker_end_address(limit.tail) == end);
3719 	}
3720 
3721 	return limit;
3722 }
3723 
3724 static bool
can_simplify_kind(vm_entry_checker_t * left,vm_entry_checker_t * right)3725 can_simplify_kind(vm_entry_checker_t *left, vm_entry_checker_t *right)
3726 {
3727 	return (left->kind == Allocation && right->kind == Allocation) ||
3728 	       (left->kind == Submap && right->kind == Submap);
3729 }
3730 
3731 void
checker_simplify_left(checker_list_t * list,vm_entry_checker_t * right)3732 checker_simplify_left(
3733 	checker_list_t *list,
3734 	vm_entry_checker_t *right)
3735 {
3736 	vm_entry_checker_t *left = right->prev;
3737 	if (!left) {
3738 		return;
3739 	}
3740 	if (can_simplify_kind(left, right) &&
3741 	    left->protection == right->protection &&
3742 	    left->max_protection == right->max_protection &&
3743 	    left->inheritance == right->inheritance &&
3744 	    left->behavior == right->behavior &&
3745 	    left->user_wired_count == right->user_wired_count &&
3746 	    left->user_tag == right->user_tag &&
3747 	    left->submap_depth == right->submap_depth &&
3748 	    left->object == right->object &&
3749 	    left->object_offset + left->size == right->object_offset &&
3750 	    left->permanent == right->permanent) {
3751 		/* kill left and keep right so the simplify loop works unimpeded */
3752 		right->address = left->address;
3753 		right->size += left->size;
3754 		right->object_offset = left->object_offset;
3755 
3756 		/* update other properties that may differ */
3757 
3758 		if (left->verify.pages_resident_attr != right->verify.pages_resident_attr) {
3759 			T_LOG("note: can't verify page counts after simplify "
3760 			    "merged two entries with different page count verification");
3761 		}
3762 		right->pages_resident += left->pages_resident;
3763 
3764 		/*
3765 		 * unlink and free left checker
3766 		 * update the checker list if we are deleting its head
3767 		 */
3768 		right->prev = left->prev;
3769 		if (left->prev) {
3770 			left->prev->next = right;
3771 		}
3772 		if (list->entries.head == left) {
3773 			list->entries.head = right;
3774 		}
3775 		checker_free(left);
3776 	}
3777 }
3778 
3779 void
checker_list_simplify(checker_list_t * list,mach_vm_address_t start,mach_vm_size_t size)3780 checker_list_simplify(
3781 	checker_list_t *list,
3782 	mach_vm_address_t start,
3783 	mach_vm_size_t size)
3784 {
3785 	mach_vm_address_t end = start + size;
3786 	entry_checker_range_t limit = checker_list_find_range_including_holes(list, start, size);
3787 
3788 	/* vm_map_simplify_range() also includes any entry that starts at `end` */
3789 	if (limit.tail && limit.tail->next && limit.tail->next->address == end) {
3790 		limit.tail = limit.tail->next;
3791 	}
3792 
3793 	FOREACH_CHECKER(checker, limit) {
3794 		checker_simplify_left(list, checker);
3795 	}
3796 }
3797 
3798 void
checker_list_replace_range(checker_list_t * list,entry_checker_range_t old_range,entry_checker_range_t new_range)3799 checker_list_replace_range(
3800 	checker_list_t *list,
3801 	entry_checker_range_t old_range,
3802 	entry_checker_range_t new_range)
3803 {
3804 	/* old_range and new_range must coincide */
3805 	assert(checker_range_start_address(old_range) == checker_range_start_address(new_range));
3806 	assert(checker_range_end_address(old_range) == checker_range_end_address(new_range));
3807 
3808 	/*
3809 	 * Unlink old_range and link in new_range.
3810 	 * Update list->entries if necessary.
3811 	 *
3812 	 * before: ... prev old_range next ...
3813 	 * after:  ... prev new_range next ...
3814 	 * a.k.a:  ... prev new_left ... new_right next ...
3815 	 */
3816 	vm_entry_checker_t *prev = old_range.head->prev;
3817 	vm_entry_checker_t *new_left = new_range.head;
3818 	new_left->prev = prev;
3819 	if (prev) {
3820 		prev->next = new_left;
3821 	} else {
3822 		list->entries.head = new_left;
3823 	}
3824 
3825 	vm_entry_checker_t *next = old_range.tail->next;
3826 	vm_entry_checker_t *new_right = new_range.tail;
3827 	new_right->next = next;
3828 	if (next) {
3829 		next->prev = new_right;
3830 	} else {
3831 		list->entries.tail = new_right;
3832 	}
3833 
3834 	/* Destroy the removed entries. */
3835 	/* TODO: update checker state to account for the removal? */
3836 	checker_range_free(old_range);
3837 }
3838 
3839 void
checker_list_free_range(checker_list_t * list,entry_checker_range_t range)3840 checker_list_free_range(
3841 	checker_list_t *list,
3842 	entry_checker_range_t range)
3843 {
3844 	/* Make a new hole checker covering the removed range. */
3845 	vm_entry_checker_t *new_hole = make_checker_for_hole(
3846 		checker_range_start_address(range),
3847 		checker_range_size(range));
3848 	entry_checker_range_t new_range = { new_hole, new_hole };
3849 
3850 	/* Remove checkers in the old range and insert the new hole. */
3851 	checker_list_replace_range(list, range, new_range);
3852 }
3853 
3854 
3855 static bool
checker_has_null_vm_object(vm_entry_checker_t * checker)3856 checker_has_null_vm_object(vm_entry_checker_t *checker)
3857 {
3858 	return object_is_null(checker->object);
3859 }
3860 
3861 void
checker_resolve_null_vm_object(checker_list_t * checker_list,vm_entry_checker_t * checker)3862 checker_resolve_null_vm_object(
3863 	checker_list_t *checker_list,
3864 	vm_entry_checker_t *checker)
3865 {
3866 	if (checker_has_null_vm_object(checker)) {
3867 		/* entry's object offset is reset to zero */
3868 		checker->object_offset = 0;
3869 
3870 		/* entry gets a new object */
3871 		vm_object_checker_t *obj_checker =
3872 		    make_anonymous_object_checker(checker_list, checker->size);
3873 		checker_set_object(checker, obj_checker);
3874 
3875 		/* don't know the object's id yet, but we know it isn't zero */
3876 		obj_checker->object_id_mode = object_has_unknown_nonnull_id;
3877 	}
3878 }
3879 
3880 void
checker_fault_for_prot_not_cow(checker_list_t * checker_list,vm_entry_checker_t * checker,vm_prot_t fault_prot)3881 checker_fault_for_prot_not_cow(
3882 	checker_list_t *checker_list,
3883 	vm_entry_checker_t *checker,
3884 	vm_prot_t fault_prot)
3885 {
3886 	assert(fault_prot != VM_PROT_NONE);
3887 
3888 	/* write fault also requires read permission */
3889 	vm_prot_t required_prot = fault_prot;
3890 	if (prot_contains_all(required_prot, VM_PROT_WRITE)) {
3891 		required_prot |= VM_PROT_READ;
3892 	}
3893 	if (!prot_contains_all(checker->protection, required_prot)) {
3894 		/* access denied */
3895 		return;
3896 	}
3897 
3898 	checker_resolve_null_vm_object(checker_list, checker);
3899 	if (fault_prot & VM_PROT_WRITE) {
3900 		/* cow resolution is hard, don't try it here */
3901 		assert(checker_share_mode(checker) != SM_COW);
3902 	}
3903 
3904 	/* entry is 100% resident */
3905 	checker_set_pages_resident(checker, checker->size / PAGE_SIZE);
3906 }
3907 
3908 vm_entry_checker_t *
checker_list_try_unnest_one_entry_in_submap(checker_list_t * checker_list,vm_entry_checker_t * submap_parent,bool unnest_readonly,bool all_overwritten,mach_vm_address_t * const inout_next_address)3909 checker_list_try_unnest_one_entry_in_submap(
3910 	checker_list_t *checker_list,
3911 	vm_entry_checker_t *submap_parent,
3912 	bool unnest_readonly,
3913 	bool all_overwritten,
3914 	mach_vm_address_t * const inout_next_address)
3915 {
3916 	mach_vm_address_t unnest_start;
3917 	mach_vm_address_t unnest_end;
3918 	vm_entry_checker_t *unnested_checker;
3919 	vm_prot_t submap_protection;
3920 	vm_prot_t submap_max_protection;
3921 	vm_object_checker_t *obj_checker;
3922 
3923 	{
3924 		/* Find the checker for the entry inside the submap at this parent map address. */
3925 		checker_list_t *submap_checkers DEFER_UNSLIDE =
3926 		    checker_get_and_slide_submap_checkers(submap_parent);
3927 		vm_entry_checker_t *submap_content =
3928 		    checker_list_find_checker(submap_checkers, *inout_next_address);
3929 
3930 		/* Compute the range to be unnested if required, and advance past it. */
3931 		unnest_start = submap_content->address;
3932 		unnest_end = checker_end_address(submap_content);
3933 		clamp_start_end_to_checker(&unnest_start, &unnest_end, submap_parent);
3934 		*inout_next_address = unnest_end;
3935 
3936 		/* Return now if the submap content does not need to be unnested. */
3937 		switch (submap_content->kind) {
3938 		case Allocation:
3939 			if (!(submap_content->protection & VM_PROT_WRITE) && !unnest_readonly) {
3940 				/*
3941 				 * Allocation is read-only and unnest_readonly is not set.
3942 				 * Don't unnest this.
3943 				 */
3944 				return NULL;
3945 			}
3946 			break;
3947 		case Hole:
3948 			/* Unallocated in submap. Don't unnest. */
3949 			return NULL;
3950 		case Submap:
3951 			assert(0 && "nested submaps not allowed");
3952 		default:
3953 			assert(0 && "unknown checker kind");
3954 		}
3955 
3956 		submap_protection = submap_content->protection;
3957 		submap_max_protection = submap_content->max_protection;
3958 		obj_checker = submap_content->object;
3959 
3960 		/*
3961 		 * Unslide the submap checkers now at end of scope.
3962 		 * Changing the submap parent map entry from a submap
3963 		 * to an allocation (below) may leave the submap checkers
3964 		 * unreferenced and thus deallocated.
3965 		 */
3966 	}
3967 
3968 	/* Clip the submap parent to the unnest bounds. */
3969 	checker_clip_left(checker_list, submap_parent, unnest_start);
3970 	checker_clip_right(checker_list, submap_parent, unnest_end);
3971 
3972 	/*
3973 	 * unnested_checker (nee submap_parent) now matches the unnesting bounds.
3974 	 * Change its object and other attributes to become the unnested entry.
3975 	 * (this matches the behavior of vm_map_lookup_and_lock_object(),
3976 	 * which also edits the parent map entry in place)
3977 	 */
3978 
3979 	unnested_checker = submap_parent;
3980 	unnested_checker->kind = Allocation;
3981 
3982 	/*
3983 	 * Set unnested_checker's protection and inheritance.
3984 	 * Copied from vm_map_lookup_and_lock_object.
3985 	 */
3986 	if (unnested_checker->protection != VM_PROT_READ) {
3987 		/*
3988 		 * Someone has already altered the top entry's
3989 		 * protections via vm_protect(VM_PROT_COPY).
3990 		 * Respect these new values and ignore the
3991 		 * submap entry's protections.
3992 		 */
3993 	} else {
3994 		/*
3995 		 * Regular copy-on-write: propagate the submap
3996 		 * entry's protections to the top map entry.
3997 		 */
3998 		unnested_checker->protection |= submap_protection;
3999 	}
4000 	unnested_checker->max_protection |= submap_max_protection;
4001 	if (unnested_checker->inheritance == VM_INHERIT_SHARE) {
4002 		unnested_checker->inheritance = VM_INHERIT_COPY;
4003 	}
4004 
4005 	/*
4006 	 * Set unnested_checker's vm object.
4007 	 * unnesting is a copy-on-write copy, but in our
4008 	 * tests it is sometimes immediately overwritten so we skip that step.
4009 	 */
4010 	checker_set_object(unnested_checker, obj_checker);
4011 	bool is_null = object_is_null(obj_checker);
4012 	if (is_null && all_overwritten) {
4013 		checker_resolve_null_vm_object(checker_list, unnested_checker);
4014 	} else if (is_null) {
4015 		/* no object change */
4016 	} else if (all_overwritten && (submap_protection & VM_PROT_WRITE)) {
4017 		/* writeable and will be overwritten - skip COW representation */
4018 		obj_checker = object_checker_clone(obj_checker);
4019 		checker_list_append_object(checker_list, obj_checker);
4020 		unnested_checker->needs_copy = false;
4021 		checker_set_object(unnested_checker, obj_checker);
4022 		unnested_checker->object_offset = 0;
4023 	} else {
4024 		/* won't be overwritten - model a COW copy */
4025 		checker_make_shadow_object(checker_list, unnested_checker);
4026 	}
4027 
4028 	/* TODO: tpro, permanent, VM_PROT_EXEC */
4029 
4030 	assert(*inout_next_address == checker_end_address(unnested_checker));
4031 
4032 	return unnested_checker;
4033 }
4034 
4035 __attribute__((overloadable))
4036 vm_config_t *
make_vm_config(const char * name,vm_entry_template_t * entry_templates,vm_object_template_t * object_templates,vm_entry_template_t * submap_entry_templates,vm_object_template_t * submap_object_templates,mach_vm_size_t start_adjustment,mach_vm_size_t end_adjustment,mach_vm_size_t alignment_mask)4037 make_vm_config(
4038 	const char *name,
4039 	vm_entry_template_t *entry_templates,
4040 	vm_object_template_t *object_templates,
4041 	vm_entry_template_t *submap_entry_templates,
4042 	vm_object_template_t *submap_object_templates,
4043 	mach_vm_size_t start_adjustment,
4044 	mach_vm_size_t end_adjustment,
4045 	mach_vm_size_t alignment_mask)
4046 {
4047 	/*
4048 	 * Allocate a new vm_config_t and populate it with
4049 	 * copies of the name string and all of the templates.
4050 	 */
4051 	vm_config_t *result = calloc(sizeof(vm_config_t), 1);
4052 
4053 	result->config_name = strdup(name);
4054 	result->start_adjustment = start_adjustment;
4055 	result->end_adjustment = end_adjustment;
4056 	result->alignment_mask = alignment_mask;
4057 
4058 	/* memcpy the templates */
4059 
4060 #define COPY_TEMPLATE_LIST(T)                                           \
4061 	unsigned T##_template_count = count_##T##_templates(T##_templates); \
4062 	size_t T##_template_bytes = T##_template_count * sizeof(T##_templates[0]); \
4063 	result->T##_templates = calloc(1, T##_template_bytes);          \
4064 	result->T##_template_count = T##_template_count;                \
4065 	memcpy(result->T##_templates, T##_templates, T##_template_bytes)
4066 
4067 	COPY_TEMPLATE_LIST(entry);
4068 	COPY_TEMPLATE_LIST(object);
4069 	COPY_TEMPLATE_LIST(submap_entry);
4070 	COPY_TEMPLATE_LIST(submap_object);
4071 
4072 	/* fix up the pointers inside the templates */
4073 	/* TODO: use indexes instead of pointers so that they don't need fixup */
4074 
4075 #define ASSERT_IS_WITHIN(ptr, array, array_count) \
4076 	assert((ptr) >= (array) && (ptr) < (array) + (array_count))
4077 
4078 	for (unsigned i = 0; i < result->entry_template_count; i++) {
4079 		vm_entry_template_t *tmpl = &result->entry_templates[i];
4080 		if (tmpl->object) {
4081 			/* fix up entry's object to point into the copied templates */
4082 			ASSERT_IS_WITHIN(tmpl->object, object_templates, object_template_count);
4083 			tmpl->object = &result->object_templates[tmpl->object - object_templates];
4084 		}
4085 	}
4086 	for (unsigned i = 0; i < result->submap_entry_template_count; i++) {
4087 		vm_entry_template_t *tmpl = &result->submap_entry_templates[i];
4088 		if (tmpl->object) {
4089 			/* fix up submap entry's object to point into the copied submap templates */
4090 			ASSERT_IS_WITHIN(tmpl->object, submap_object_templates, submap_object_template_count);
4091 			tmpl->object = &result->submap_object_templates[tmpl->object - submap_object_templates];
4092 		}
4093 	}
4094 	for (unsigned i = 0; i < result->object_template_count; i++) {
4095 		vm_object_template_t *tmpl = &result->object_templates[i];
4096 		if (tmpl->kind != SubmapObject) {
4097 			continue;
4098 		}
4099 		/* fix up submap's template lists to point into the copied submap templates */
4100 		assert(tmpl->submap.entries);  /* submap must contain at least one entry */
4101 		ASSERT_IS_WITHIN(tmpl->submap.entries, submap_entry_templates, submap_entry_template_count);
4102 		ptrdiff_t submap_index = tmpl->submap.entries - submap_entry_templates;
4103 		tmpl->submap.entries = &result->submap_entry_templates[submap_index];
4104 		if (tmpl->submap.entry_count == 0) {
4105 			tmpl->submap.entry_count = submap_entry_template_count - submap_index;
4106 		}
4107 		assert(submap_index + tmpl->submap.entry_count <= submap_entry_template_count);
4108 
4109 		if (tmpl->submap.objects) {
4110 			ASSERT_IS_WITHIN(tmpl->submap.objects, submap_object_templates, submap_object_template_count);
4111 			ptrdiff_t object_index = tmpl->submap.objects - submap_object_templates;
4112 			tmpl->submap.objects = &result->submap_object_templates[object_index];
4113 			if (tmpl->submap.object_count == 0) {
4114 				tmpl->submap.object_count = submap_object_template_count - object_index;
4115 			}
4116 			assert(object_index + tmpl->submap.object_count <= submap_object_template_count);
4117 		}
4118 	}
4119 	for (unsigned i = 0; i < result->submap_object_template_count; i++) {
4120 		/* no fixups needed inside submap_object_templates, they can't be nested submap objects */
4121 		vm_object_template_t *tmpl = &result->submap_object_templates[i];
4122 		assert(tmpl->kind != SubmapObject);
4123 	}
4124 
4125 #undef ASSERT_IS_WITHIN
4126 
4127 	return result;
4128 }
4129 
4130 
4131 static void
free_vm_config(vm_config_t * config)4132 free_vm_config(vm_config_t *config)
4133 {
4134 	free(config->entry_templates);
4135 	free(config->object_templates);
4136 	free(config->config_name);
4137 	free(config);
4138 }
4139 
4140 
4141 /*
4142  * templates are initialized by vm_configurator_init()
4143  * because PAGE_SIZE is not a compile-time constant
4144  */
4145 vm_object_template_t END_OBJECTS;
4146 vm_entry_template_t END_ENTRIES = {};
4147 vm_entry_template_t guard_entry_template = {};
4148 vm_entry_template_t hole_template = {};
4149 
4150 __attribute__((constructor))
4151 static void
vm_configurator_init(void)4152 vm_configurator_init(void)
4153 {
4154 	/*
4155 	 * Set Verbose if environment variable VERBOSE is set.
4156 	 * Also set verbose_exc_helper to match.
4157 	 */
4158 	char *env_verbose = getenv("VERBOSE");
4159 	if (env_verbose) {
4160 		if (0 == strcasecmp(env_verbose, "0") ||
4161 		    0 == strcasecmp(env_verbose, "false") ||
4162 		    0 == strcasecmp(env_verbose, "no")) {
4163 			/*
4164 			 * VERBOSE is set to something false-ish like "NO".
4165 			 * Don't enable it.
4166 			 */
4167 		} else {
4168 			Verbose = true;
4169 		}
4170 	}
4171 
4172 	verbose_exc_helper = Verbose;
4173 
4174 	/*
4175 	 * Verify some preconditions about page sizes.
4176 	 * These would be static_asserts but PAGE_SIZE isn't constant.
4177 	 */
4178 	assert(DEFAULT_PARTIAL_ENTRY_SIZE > 0);
4179 	assert(DEFAULT_PARTIAL_ENTRY_SIZE / 2 > 0);
4180 
4181 	/*
4182 	 * Initialize some useful templates.
4183 	 * These would be static initializers but PAGE_SIZE isn't constant.
4184 	 */
4185 	guard_entry_template = vm_entry_template(
4186 		.protection = 0, .max_protection = 0,
4187 		.user_tag = VM_MEMORY_GUARD /* 31 */);
4188 	hole_template =
4189 	    vm_entry_template(.kind = Hole);
4190 	END_ENTRIES =
4191 	    vm_entry_template(.kind = EndEntries);
4192 	END_OBJECTS = (vm_object_template_t){.kind = EndObjects, .size = 0};
4193 
4194 	/*
4195 	 * Initialize fault exception and guard exception handlers.
4196 	 * Do this explicitly in the hope of avoiding memory allocations
4197 	 * inside our unallocated address ranges later.
4198 	 */
4199 	exc_guard_helper_init();
4200 	{
4201 		static const char unwriteable = 1;
4202 		kern_return_t kr;
4203 		bool succeeded = try_write_byte((mach_vm_address_t)&unwriteable, 0, &kr);
4204 		assert(!succeeded);
4205 		assert(kr == KERN_PROTECTION_FAILURE);
4206 	}
4207 
4208 	/*
4209 	 * host_priv is looked up lazily so we don't
4210 	 * unnecessarily fail tests that don't need it.
4211 	 */
4212 }
4213 
4214 test_result_t
test_is_unimplemented(checker_list_t * checker_list __unused,mach_vm_address_t start __unused,mach_vm_size_t size __unused)4215 test_is_unimplemented(
4216 	checker_list_t *checker_list __unused,
4217 	mach_vm_address_t start __unused,
4218 	mach_vm_size_t size __unused)
4219 {
4220 	T_FAIL("don't call test_is_unimplemented()");
4221 	return TestFailed;
4222 }
4223 
4224 void
run_one_vm_test(const char * filename,const char * funcname,const char * testname,configure_fn_t configure_fn,test_fn_t test_fn)4225 run_one_vm_test(
4226 	const char *filename,
4227 	const char *funcname,
4228 	const char *testname,
4229 	configure_fn_t configure_fn,
4230 	test_fn_t test_fn)
4231 {
4232 	vm_config_t *config;
4233 	checker_list_t *checker_list;
4234 	mach_vm_address_t vm_state_start_address;
4235 	mach_vm_address_t vm_state_end_address;
4236 	mach_vm_address_t test_fn_start_address;
4237 	mach_vm_address_t test_fn_end_address;
4238 	test_result_t result;
4239 
4240 	const char *short_filename = strstr(filename, "tests/") ?: filename;
4241 
4242 	if (test_fn == NULL) {
4243 		/* vm_tests_t field not set. The test file needs to be updated. */
4244 		T_FAIL("test %s.%s not present in test file %s; please write it",
4245 		    funcname, testname, short_filename);
4246 		return;
4247 	} else if (test_fn == test_is_unimplemented) {
4248 		/* Test is deliberately not implemented. */
4249 		T_PASS("unimplemented test: %s %s %s",
4250 		    short_filename, funcname, testname);
4251 		return;
4252 	}
4253 
4254 	/* Prepare the VM state. */
4255 	config = configure_fn();
4256 	T_LOG("note: starting test: %s %s (%s) ...", funcname, testname, config->config_name);
4257 
4258 	create_vm_state_from_config(config, &checker_list,
4259 	    &test_fn_start_address, &test_fn_end_address);
4260 	vm_state_start_address = checker_range_start_address(checker_list->entries);
4261 	vm_state_end_address = checker_range_end_address(checker_list->entries);
4262 
4263 	if (vm_state_start_address != test_fn_start_address ||
4264 	    vm_state_end_address != test_fn_end_address) {
4265 		T_LOG("note: prepared vm state is 0x%llx..0x%llx; calling tested function on 0x%llx..0x%llx",
4266 		    vm_state_start_address, vm_state_end_address,
4267 		    test_fn_start_address, test_fn_end_address);
4268 	} else {
4269 		T_LOG("note: prepared vm state is 0x%llx..0x%llx; calling tested function on the entire range",
4270 		    vm_state_start_address, vm_state_end_address);
4271 	}
4272 
4273 	/* Run the test. */
4274 	result = test_fn(checker_list, test_fn_start_address,
4275 	    test_fn_end_address - test_fn_start_address);
4276 
4277 	/*
4278 	 * Verify and/or deallocate depending on the initial test result.
4279 	 * These operations may change the result to a failure.
4280 	 */
4281 	switch (result) {
4282 	case TestSucceeded:
4283 		/*
4284 		 * Verify one more time, then perform
4285 		 * destructive verifications and deallocate.
4286 		 */
4287 		result = verify_vm_state(checker_list, "after test");
4288 		if (result == TestSucceeded) {
4289 			result = verify_vm_faultability(checker_list, "final faultability check", true, true);
4290 		}
4291 		if (result == TestSucceeded) {
4292 			deallocate_vm_allocations(checker_list);
4293 			result = verify_vm_state(checker_list, "after final deallocation");
4294 		}
4295 		break;
4296 	case TestFailed:
4297 		/*
4298 		 * we don't attempt to deallocate after a failure
4299 		 * because we don't know where the real allocations are
4300 		 */
4301 		break;
4302 	}
4303 
4304 	checker_list_free(checker_list);
4305 
4306 	/* Report the final test result. */
4307 	if (result == TestFailed) {
4308 		/* executable name is basename(short_filename) minus ".c" suffix */
4309 		const char *exe_name = strrchr(short_filename, '/');
4310 		exe_name = exe_name ? exe_name + 1 : short_filename;
4311 		int exe_name_len = strrchr(exe_name, '.') - exe_name;
4312 		const char *arch_cmd = isRosetta() ? "arch -x86_64 " : "";
4313 		T_FAIL("%s %s %s (%s) failed above; run it locally with `env VERBOSE=1 %s%.*s -n %s %s`",
4314 		    short_filename, funcname, testname, config->config_name,
4315 		    arch_cmd, exe_name_len, exe_name, funcname, testname);
4316 	} else {
4317 		T_PASS("%s %s %s (%s)",
4318 		    short_filename, funcname, testname, config->config_name);
4319 	}
4320 
4321 	free_vm_config(config);
4322 }
4323