1 /*
2 * Copyright (c) 2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * vm_configurator.h
31 *
32 * Generator and checker of userspace virtual memory configurations.
33 */
34
35 #ifndef VM_CONFIGURATOR_H
36 #define VM_CONFIGURATOR_H
37
38 /*
39 * -- Dramatis personae --
40 *
41 * vm_entry_template_t
42 * Specification of a VM entry to create,
43 * or a hole in VM address space to skip over.
44 * Used to describe and create the VM state for the start of a test.
45 *
46 * vm_object_template_t
47 * Specification of a VM object to create for entries to copy or share.
48 * Used to describe and create the VM state for the start of a test.
49 *
50 * vm_config_t
51 * Specification of one or more contiguous VM entries,
52 * plus a test name and an address range within that VM
53 * space that is the range to be tested.
54 * Used to describe and create the VM state for the start of a test.
55 *
56 * vm_entry_checker_t
57 * Describes the expected state of a VM entry or a hole,
58 * and verifies that the live VM state matches the expected state.
59 * Updated by test code as test operations are performed.
60 * Used to verify the VM state during and after a test.
61 *
62 * vm_object_checker_t
63 * Describes the expected state of a VM object
64 * and verifies that the live VM state matches the expected state
65 * Updated by test code as test operations are performed.
66 * Used to verify the VM state during and after a test.
67 *
68 * -- Outline of a test --
69 *
70 * 1. Describe the desired initial memory state
71 * with arrays of vm_entry_template_t and vm_object_template_t.
72 * 2. Call create_vm_state() to allocate the specified VM entries
73 * and lists of vm_entry_checker_t and vm_object_checker_t
74 * that match the newly-allocated state.
75 * 3. Perform the VM operations to be tested. Update the checkers
76 * with the state changes that you expect. If some field's value
77 * becomes indeterminate, or difficult to specify and unimportant
78 * for your test, disable that field in the checker.
79 * 4. Call verify_vm_state() to compare the live
80 * VM state to the checker's expected state.
81 * 5. Optionally repeat steps 3 and 4 to test a sequence of VM operations.
82 *
83 * See vm_configurator_tests.h for a set of templates used by
84 * many VM syscall tests, and some details on how to run them.
85 */
86
87 #include <stdio.h>
88 #include <stdint.h>
89 #include <stdlib.h>
90 #include <stdbool.h>
91 #include <assert.h>
92
93 #include <mach/mach.h>
94 #include <mach/mach_vm.h>
95 #include <mach/vm_prot.h>
96 #include <mach/vm_param.h>
97 #include <mach/vm_region.h>
98 #include <mach/vm_inherit.h>
99 #include <mach/vm_behavior.h>
100 #include <mach/vm_statistics.h>
101
102 #include <darwintest.h>
103 #include <darwintest_utils.h>
104 #include <test_utils.h>
105
106 /*
107 * Set Verbose = true to log the complete VM state, both expected and actual,
108 * every time it is checked.
109 * Initialized from environment variable VERBOSE
110 */
111 extern bool Verbose;
112
113 /*
114 * Return values from individual test functions.
115 * These are ordered from "best" to "worst".
116 *
117 * TODO: docs
118 */
119 typedef enum {
120 TestSucceeded = 1,
121 TestFailed,
122 } test_result_t;
123
124 static inline test_result_t
worst_result(test_result_t * list,unsigned count)125 worst_result(test_result_t *list, unsigned count)
126 {
127 test_result_t worst = TestSucceeded;
128 for (unsigned i = 0; i < count; i++) {
129 if (list[i] > worst) {
130 worst = list[i];
131 }
132 }
133 return worst;
134 }
135
136 typedef enum {
137 DontFill = 0, /* must be zero */
138 Fill = 1
139 } fill_pattern_mode_t;
140
141 typedef struct {
142 fill_pattern_mode_t mode;
143 uint64_t pattern;
144 } fill_pattern_t;
145
146 /*
147 * EndObjects: for END_OBJECTS array terminator
148 * Deinited: an object that is no longer referenced and whose checker is now
149 * depopulated but is still allocated because some checker list may point to it
150 * Anonymous: anonymous memory such as vm_allocate()
151 * SubmapObject: an "object" that is really a submap
152 * TODO: support named/pageable objects
153 */
154 typedef enum {
155 FreedObject = 0, /* use after free, shouldn't happen */
156 EndObjects,
157 Deinited,
158 Anonymous,
159 SubmapObject,
160 } vm_object_template_kind_t;
161
162 /*
163 * struct vm_object_template_t
164 * Declaratively specify VM objects to be created.
165 */
166 typedef struct vm_object_template_s {
167 vm_object_template_kind_t kind;
168
169 mach_vm_size_t size; /* size 0 means auto-compute from entry sizes */
170
171 fill_pattern_t fill_pattern;
172 struct {
173 struct vm_entry_template_s *entries;
174 struct vm_object_template_s *objects;
175 unsigned entry_count;
176 unsigned object_count;
177 } submap;
178 } vm_object_template_t;
179
180 /*
181 * Convenience macro for initializing a vm_object_template_t.
182 * The macro sets all template fields to a default value.
183 * You may override any field using designated initializer syntax.
184 *
185 * Example usage:
186 * // all default values
187 * vm_object_template()
188 *
189 * // default, with custom size and fill pattern
190 * vm_object_template(
191 * .size = 20 * PAGE_SIZE,
192 * .fill_pattern = 0x1234567890abcdef)
193 */
194 #pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
195 #define vm_object_template(...) \
196 _Pragma("clang diagnostic push") \
197 _Pragma("clang diagnostic ignored \"-Winitializer-overrides\"") \
198 _Pragma("clang diagnostic ignored \"-Wmissing-field-initializers\"") \
199 (vm_object_template_t){ \
200 .size = 0, /* auto-computed */ \
201 .kind = Anonymous, \
202 .fill_pattern = {.mode = DontFill}, \
203 __VA_ARGS__ \
204 } \
205 _Pragma("clang diagnostic pop")
206
207 /* Convenience for submap objects */
208 #define submap_object_template(...) \
209 vm_object_template(.kind = SubmapObject, __VA_ARGS__)
210
211 /*
212 * EndEntries: for END_ENTRIES array terminator
213 * Allocation: an ordinary VM entry
214 * Hole: an unallocated range of the address space.
215 * Submap: a mapping of a submap
216 */
217 typedef enum {
218 EndEntries = 0,
219 Allocation,
220 Hole,
221 Submap,
222 } vm_entry_template_kind_t;
223
224 /*
225 * struct vm_entry_template_t
226 * Declaratively specify VM entries to be created.
227 */
228 typedef struct vm_entry_template_s {
229 mach_vm_size_t size;
230 vm_entry_template_kind_t kind;
231
232 /*
233 * NULL object means either null vm_object_t or anonymous zerofilled
234 * memory, depending on the requirements of the other settings.
235 * (For example, non-zero wire count faults in the pages
236 * so it is no longer a null vm_object_t.)
237 * Used when .kind == Allocation.
238 */
239 vm_object_template_t *object;
240
241 mach_vm_offset_t offset;
242
243 vm_prot_t protection;
244 vm_prot_t max_protection;
245 vm_inherit_t inheritance;
246 vm_behavior_t behavior;
247 bool permanent;
248
249 /* New entry gets vm_wire'd this many times. */
250 uint16_t user_wired_count;
251
252 /*
253 * User tag may be a specific value, or autoincrementing.
254 *
255 * An autoincrementing tag is assigned by create_vm_state()
256 * in the VM_MEMORY_APPLICATION_SPECIFIC_1-16 range. Adjacent
257 * autoincrementing entries get distinct tags. This can be
258 * used to stop the VM from simplifying/coalescing vm entries
259 * that you want to remain separate.
260 */
261 uint16_t user_tag;
262 #define VM_MEMORY_TAG_AUTOINCREMENTING 256
263
264 uint8_t share_mode;
265
266 /*
267 * Code to update when adding new fields:
268 * vm_entry_template() macro
269 * create_vm_state() function
270 */
271 } vm_entry_template_t;
272
273
274 /*
275 * Default size for vm_entries created by this generator
276 * Some tests require that this be above some minimum.
277 * 64 * PAGE_SIZE is big enough that 1/4 of an entry is
278 * still over the 32KB physical copy limit inside vm_map_copyin.
279 */
280 #define DEFAULT_ENTRY_SIZE (64 * (mach_vm_address_t)PAGE_SIZE)
281
282 /*
283 * Default size for address ranges that cover only part of a vm_entry.
284 * Some tests require that this be above some minimum.
285 */
286 #define DEFAULT_PARTIAL_ENTRY_SIZE (DEFAULT_ENTRY_SIZE / 2u)
287
288 /*
289 * Unnesting of submap nested pmaps occurs at L[N-1] page table
290 * boundaries (pmap "twig"). By default we avoid crossing those
291 * boundaries in tests because it affects the unnested map entries
292 * in the parent map.
293 * TODO: don't hardcode this, get it from pmap somehow
294 */
295 #define SUBMAP_ALIGNMENT_MASK (0x2000000ull - 1)
296
297 /*
298 * Convenience macro for initializing a vm_entry_template_t.
299 * The macro sets all template fields to a default value.
300 * You may override any field using designated initializer syntax.
301 *
302 * Example usage:
303 * // all default values
304 * vm_entry_template()
305 *
306 * // default, with custom size and protections
307 * vm_entry_template(
308 * .size = 20 * PAGE_SIZE,
309 * .protection = VM_PROT_READ,
310 * .max_protection = VM_PROT_READ | VM_PROT_WRITE)
311 */
312 #define vm_entry_template(...) \
313 _Pragma("clang diagnostic push") \
314 _Pragma("clang diagnostic ignored \"-Winitializer-overrides\"") \
315 _Pragma("clang diagnostic ignored \"-Wmissing-field-initializers\"") \
316 (vm_entry_template_t){ \
317 .size = DEFAULT_ENTRY_SIZE, \
318 .kind = Allocation, \
319 .object = NULL, \
320 .offset = 0, \
321 .protection = VM_PROT_READ | VM_PROT_WRITE, \
322 .max_protection = VM_PROT_READ | VM_PROT_WRITE, \
323 .inheritance = VM_INHERIT_DEFAULT, /* inherit_copy */ \
324 .behavior = VM_BEHAVIOR_DEFAULT, \
325 .permanent = false, \
326 .user_wired_count = 0, \
327 .user_tag = VM_MEMORY_TAG_AUTOINCREMENTING, \
328 .share_mode = SM_EMPTY, \
329 __VA_ARGS__ \
330 } \
331 _Pragma("clang diagnostic pop")
332
333 /* Convenience for submap entries */
334 #define submap_entry_template(...) \
335 vm_entry_template(.kind = Submap, __VA_ARGS__)
336
337 /*
338 * Convenience templates.
339 * END_ENTRIES and END_OBJECTS: terminates a template list
340 * passed to create_vm_state() instead of passing an array count.
341 * (useful for hand-written template array initializers)
342 * guard_entry_template: an allocation that defaults to
343 * prot/max NONE/NONE and tag VM_MEMORY_GUARD
344 * hole_template: an unallocated hole in the address space.
345 */
346 extern vm_object_template_t END_OBJECTS;
347 extern vm_entry_template_t END_ENTRIES;
348 extern vm_entry_template_t guard_entry_template;
349 extern vm_entry_template_t hole_template;
350
351 /*
352 * Count the number of templates in an END_TEMPLATE-terminated array.
353 */
354 extern unsigned
355 count_templates(const vm_entry_template_t *templates);
356
357
358 /*
359 * struct vm_entry_attribute_list_t
360 * A list of checkable entry attributes with one bool for each.
361 * Used to record which attributes should be verified by a checker,
362 * or which attributes failed to match during verification.
363 */
364 typedef struct {
365 union {
366 uint64_t bits;
367 struct {
368 uint64_t address_attr:1;
369 uint64_t size_attr:1;
370 uint64_t object_attr:1;
371 uint64_t protection_attr:1;
372 uint64_t max_protection_attr:1;
373 uint64_t inheritance_attr:1;
374 uint64_t behavior_attr:1;
375 uint64_t permanent_attr:1;
376 uint64_t user_wired_count_attr:1;
377 uint64_t user_tag_attr:1;
378 uint64_t is_submap_attr:1;
379 uint64_t submap_depth_attr:1;
380 uint64_t object_offset_attr:1;
381 uint64_t pages_resident_attr:1;
382 uint64_t share_mode_attr:1;
383 };
384 };
385
386 /*
387 * Code to update when adding new fields:
388 * dump_checker_info()
389 * vm_entry_attributes_with_default macro
390 * verify_allocation()
391 */
392 } vm_entry_attribute_list_t;
393
394 /*
395 * struct vm_object_attribute_list_t
396 * A list of checkable entry attributes with one bool for each.
397 * Used to record which attributes should be verified by a checker,
398 * or which attributes failed to match during verification.
399 */
400 typedef struct {
401 union {
402 uint64_t bits;
403 struct {
404 uint64_t object_id_attr:1;
405 uint64_t size_attr:1;
406 uint64_t ref_count_attr:1;
407 uint64_t shadow_depth_attr:1;
408 uint64_t fill_pattern_attr:1;
409 };
410 };
411
412 /*
413 * Code to update when adding new fields:
414 * dump_checker_info()
415 * vm_object_attributes_with_default macro
416 * verify_allocation()
417 */
418 } vm_object_attribute_list_t;
419
420 /*
421 * vm_entry_attributes_with_default() returns a vm_entry_attribute_list_t,
422 * with all attributes set to `default_value`, and the caller can set individual
423 * attributes to other values using designated initializer syntax.
424 */
425 #define vm_entry_attributes_with_default(default_value, ...) \
426 _Pragma("clang diagnostic push") \
427 _Pragma("clang diagnostic ignored \"-Winitializer-overrides\"") \
428 _Pragma("clang diagnostic ignored \"-Wmissing-field-initializers\"") \
429 (vm_entry_attribute_list_t){ \
430 .address_attr = (default_value), \
431 .size_attr = (default_value), \
432 .object_attr = (default_value), \
433 .protection_attr = (default_value), \
434 .max_protection_attr = (default_value), \
435 .inheritance_attr = (default_value), \
436 .behavior_attr = (default_value), \
437 .permanent_attr = (default_value), \
438 .user_wired_count_attr = (default_value), \
439 .user_tag_attr = (default_value), \
440 .is_submap_attr = (default_value), \
441 .submap_depth_attr = (default_value), \
442 .object_offset_attr = (default_value), \
443 .pages_resident_attr = (default_value), \
444 .share_mode_attr = (default_value), \
445 __VA_ARGS__ \
446 } \
447 _Pragma("clang diagnostic pop")
448
449 /*
450 * vm_object_attributes_with_default() returns a vm_object_attribute_list_t,
451 * with all attributes set to `default_value`, and the caller can set individual
452 * attributes to other values using designated initializer syntax.
453 */
454 #define vm_object_attributes_with_default(default_value, ...) \
455 _Pragma("clang diagnostic push") \
456 _Pragma("clang diagnostic ignored \"-Winitializer-overrides\"") \
457 _Pragma("clang diagnostic ignored \"-Wmissing-field-initializers\"") \
458 (vm_object_attribute_list_t){ \
459 .object_id_attr = (default_value), \
460 .size_attr = (default_value), \
461 .ref_count_attr = (default_value), \
462 .shadow_depth_attr = (default_value), \
463 .fill_pattern_attr = (default_value), \
464 __VA_ARGS__ \
465 } \
466 _Pragma("clang diagnostic pop")
467
468 /*
469 * Description of a checker's current knowledge of an object's ID.
470 * object_is_unknown: object'd ID is unknown; it may be null
471 * object_has_unknown_nonnull_id: object's ID is expected to be non-null,
472 * but its actual value is unknown
473 * object_has_known_id: object's ID is expected to be checker->object_id
474 *
475 * During verification unknown object IDs are learned by reading them from the
476 * actual VM state. The learned IDs are applied to subsequent verifications or
477 * to subsequent uses of the same object in the same verification.
478 */
479 typedef enum {
480 object_is_unknown = 0,
481 object_has_unknown_nonnull_id,
482 object_has_known_id
483 } object_id_mode_t;
484
485 /*
486 * struct vm_object_checker_t
487 * Maintain and verify expected state of a VM object.
488 */
489 typedef struct vm_object_checker_s {
490 struct vm_object_checker_s *prev;
491 struct vm_object_checker_s *next;
492
493 vm_object_template_kind_t kind;
494 vm_object_attribute_list_t verify;
495 bool deinited;
496
497 uint64_t object_id;
498 object_id_mode_t object_id_mode;
499
500 /*
501 * This is the count of references to this object specifically.
502 * vm_region's reported ref_count also includes references to
503 * the shadow chain's objects, minus the shadow chain's references
504 * to each other.
505 */
506 unsigned self_ref_count;
507 mach_vm_size_t size;
508 fill_pattern_t fill_pattern;
509
510 /*
511 * Shadow chain.
512 * object->shadow moves away from entry.
513 * object->shadow is refcounted.
514 */
515 struct vm_object_checker_s *shadow;
516
517 /*
518 * Checkers for submap contents.
519 * These checkers are configured for a mapping of the whole
520 * submap at address 0. Verification of actual remappings will
521 * need to compensate for address offsets and bounds clipping.
522 */
523 struct checker_list_s *submap_checkers;
524
525 /*
526 * Code to update when adding new fields:
527 * struct vm_object_attribute_list_t
528 * make_null_object_checker()
529 * make_anonymous_object_checker()
530 * make_submap_object_checker()
531 * dump_checker_info()
532 * verify_allocation()
533 * object_checker_clone()
534 */
535 } vm_object_checker_t;
536
537 /*
538 * Create a new object checker duplicating an existing checker.
539 * The new object is:
540 * - zero self_ref_count
541 * - unknown object_id
542 * - not linked into any checker_list
543 */
544 extern vm_object_checker_t *
545 object_checker_clone(vm_object_checker_t *obj_checker);
546
547 /*
548 * struct vm_entry_checker_t
549 * Maintain and verify expected state of a VM map entry.
550 *
551 * The `verify` bitmap specifies which properties should be checked.
552 * If a property's value is indeterminate, or is difficult to specify
553 * and not important to the test, that check can be disabled.
554 *
555 * Checkers are kept in a doubly-linked list in address order,
556 * similar to vm_map_entry_t but it is not a circular list.
557 * Submaps are recursive: the top-level list contains a Submap checker,
558 * and the Submap checker has its own list of contained checkers.
559 */
560 typedef struct vm_entry_checker_s {
561 struct vm_entry_checker_s *prev;
562 struct vm_entry_checker_s *next;
563
564 vm_entry_template_kind_t kind;
565 vm_entry_attribute_list_t verify;
566
567 mach_vm_address_t address;
568 mach_vm_size_t size;
569
570 vm_object_checker_t *object;
571
572 vm_prot_t protection;
573 vm_prot_t max_protection;
574 vm_inherit_t inheritance;
575 vm_behavior_t behavior;
576 bool permanent;
577
578 uint16_t user_wired_count;
579 uint8_t user_tag;
580
581 bool is_submap; /* true when entry is a parent map's submap entry */
582 uint32_t submap_depth; /* non-zero when entry is a submap's content */
583
584 uint64_t object_offset;
585 uint32_t pages_resident; /* TODO: track this in the object checker instead */
586
587 bool needs_copy;
588
589 /* share_mode is computed from other entry and object attributes */
590
591 /*
592 * Code to update when adding new fields:
593 * struct vm_entry_attribute_list_t
594 * make_checker_for_anonymous_private()
595 * make_checker_for_vm_allocate()
596 * make_checker_for_shared()
597 * make_checker_for_submap()
598 * dump_checker_info()
599 * verify_allocation()
600 * checker_simplify_left()
601 */
602 } vm_entry_checker_t;
603
604 /*
605 * A list of consecutive entry checkers. May be a subset of the entire doubly-linked list.
606 */
607 typedef struct {
608 vm_entry_checker_t *head;
609 vm_entry_checker_t *tail;
610 } entry_checker_range_t;
611
612 /*
613 * Count the number of entries between
614 * checker_range->head and checker_range->tail, inclusive.
615 */
616 extern unsigned
617 checker_range_count(entry_checker_range_t checker_range);
618
619 /*
620 * Return the start address of the first entry in a range.
621 */
622 extern mach_vm_address_t
623 checker_range_start_address(entry_checker_range_t checker_range);
624
625 /*
626 * Return the end address of the last entry in a range.
627 */
628 extern mach_vm_address_t
629 checker_range_end_address(entry_checker_range_t checker_range);
630
631 /*
632 * Return size of all entries in a range.
633 */
634 extern mach_vm_size_t
635 checker_range_size(entry_checker_range_t checker_range);
636
637 /*
638 * Loop over all checkers between
639 * entry_range->head and entry_range->tail, inclusive.
640 * Does visit any submap parent entry.
641 * Does not descend into submap contents.
642 *
643 * You may clip_left the current checker. The new left entry is not visited.
644 * You may clip_right the current checker. The new right entry is visited next.
645 * You may not delete the current checker, unless you also immediately break the loop.
646 */
647 #define FOREACH_CHECKER(checker, entry_range) \
648 for (vm_entry_checker_t *checker = (entry_range).head; \
649 checker != (entry_range).tail->next; \
650 checker = checker->next)
651
652 /*
653 * The list of all entry and object checkers.
654 * The first and last entries may be changed by the test.
655 * The first object is the common null object, so it should not change.
656 *
657 * Submaps get their own checker_list_t. A submap checker
658 * list stores checkers for the submap's map entries.
659 * It does not store any objects; a single global list of objects is
660 * maintained in the top-level checker list so it can be searched by ID.
661 *
662 * submap_slide keeps track of a temporary address offset applied
663 * to the contained checkers. This is used for submap contents.
664 */
665 typedef struct checker_list_s {
666 struct checker_list_s *parent;
667 entry_checker_range_t entries;
668 vm_object_checker_t *objects; /* must be NULL in submaps */
669 uint64_t submap_slide;
670 bool is_slid;
671 } checker_list_t;
672
673 #define FOREACH_OBJECT_CHECKER(obj_checker, list) \
674 for (vm_object_checker_t *obj_checker = (list)->objects; \
675 obj_checker != NULL; \
676 obj_checker = obj_checker->next)
677
678 /*
679 * Return the nth checker in the list. Aborts if n is out of range.
680 */
681 extern vm_entry_checker_t *
682 checker_list_nth(checker_list_t *list, unsigned n);
683
684 /*
685 * Search a list of checkers for an allocation that contains the given address.
686 * Returns NULL if no checker contains the address.
687 * Returns NULL if a non-Allocation checker contains the address.
688 * Does not descend into submaps.
689 */
690 extern vm_entry_checker_t *
691 checker_list_find_allocation(checker_list_t *list, mach_vm_address_t addr);
692
693 /*
694 * Search a list of checkers for a checker that contains the given address.
695 * May return checkers for holes.
696 * Returns NULL if no checker contains the address.
697 * Does not descend into submaps.
698 */
699 extern vm_entry_checker_t *
700 checker_list_find_checker(checker_list_t *list, mach_vm_address_t addr);
701
702 /*
703 * Add a new vm object checker to the list.
704 * Aborts if the new object is null and the list already has its null object.
705 * Aborts if the object's ID is the same as some other object.
706 */
707 extern void
708 checker_list_append_object(
709 checker_list_t *list,
710 vm_object_checker_t *obj_checker);
711
712 /*
713 * Return the list of entry checkers covering an address range.
714 * Aborts if the range includes any hole checkers.
715 */
716 extern entry_checker_range_t
717 checker_list_find_range(
718 checker_list_t *list,
719 mach_vm_address_t start,
720 mach_vm_size_t size);
721
722 /*
723 * Return the list of entry checkers covering an address range.
724 * Hole checkers are allowed.
725 */
726 extern entry_checker_range_t
727 checker_list_find_range_including_holes(
728 checker_list_t *list,
729 mach_vm_address_t start,
730 mach_vm_size_t size);
731
732 /*
733 * Like checker_list_find_range(),
734 * but the first and last entries are clipped to the address range.
735 */
736 extern entry_checker_range_t
737 checker_list_find_and_clip(
738 checker_list_t *list,
739 mach_vm_address_t start,
740 mach_vm_size_t size);
741
742 /*
743 * Like checker_list_find_range_including_holes(),
744 * but the first and last entries (if any) are clipped to the address range.
745 */
746 extern entry_checker_range_t
747 checker_list_find_and_clip_including_holes(
748 checker_list_t *list,
749 mach_vm_address_t start,
750 mach_vm_size_t size);
751
752 /*
753 * Attempts to simplify all entries in an address range.
754 */
755 extern void
756 checker_list_simplify(
757 checker_list_t *list,
758 mach_vm_address_t start,
759 mach_vm_size_t size);
760
761 /*
762 * Replace and delete checkers in old_range
763 * with the checkers in new_range.
764 * The two ranges must have the same start address and size.
765 * Updates list->head and/or list->tail if necessary.
766 */
767 extern void
768 checker_list_replace_range(
769 checker_list_t *list,
770 entry_checker_range_t old_range,
771 entry_checker_range_t new_range);
772
773 /*
774 * Convenience function to replace one checker with another.
775 * The two checkers must have the same start address and size.
776 */
777 static inline void
checker_list_replace_checker(checker_list_t * list,vm_entry_checker_t * old_checker,vm_entry_checker_t * new_checker)778 checker_list_replace_checker(
779 checker_list_t *list,
780 vm_entry_checker_t *old_checker,
781 vm_entry_checker_t *new_checker)
782 {
783 checker_list_replace_range(list,
784 (entry_checker_range_t){ old_checker, old_checker },
785 (entry_checker_range_t){ new_checker, new_checker });
786 }
787
788 /*
789 * Convenience function to replace one checker with several checkers.
790 * The old and the new must have the same start address and size.
791 */
792 static inline void
checker_list_replace_checker_with_range(checker_list_t * list,vm_entry_checker_t * old_checker,entry_checker_range_t new_checkers)793 checker_list_replace_checker_with_range(
794 checker_list_t *list,
795 vm_entry_checker_t *old_checker,
796 entry_checker_range_t new_checkers)
797 {
798 checker_list_replace_range(list,
799 (entry_checker_range_t){ old_checker, old_checker },
800 new_checkers);
801 }
802
803 /*
804 * Remove a contiguous range of checkers from a checker list.
805 * The checkers are freed.
806 * The checkers are replaced by a new hole checker.
807 * VM allocations are unaffected.
808 */
809 extern void
810 checker_list_free_range(
811 checker_list_t *list,
812 entry_checker_range_t range);
813
814 /* Convenience function for checker_list_remove_range() of a single checker. */
815 static inline void
checker_list_free_checker(checker_list_t * list,vm_entry_checker_t * checker)816 checker_list_free_checker(
817 checker_list_t *list,
818 vm_entry_checker_t *checker)
819 {
820 checker_list_free_range(list, (entry_checker_range_t){ checker, checker });
821 }
822
823 /*
824 * Compute the end address of an entry.
825 * `checker->address + checker->size`, with integer overflow protection.
826 */
827 static inline mach_vm_address_t
checker_end_address(vm_entry_checker_t * checker)828 checker_end_address(vm_entry_checker_t *checker)
829 {
830 mach_vm_address_t end;
831 bool overflowed = __builtin_add_overflow(checker->address, checker->size, &end);
832 assert(!overflowed);
833 return end;
834 }
835
836 /*
837 * Return true if address is within checker's [start, end)
838 */
839 static inline bool
checker_contains_address(vm_entry_checker_t * checker,mach_vm_address_t address)840 checker_contains_address(vm_entry_checker_t *checker, mach_vm_address_t address)
841 {
842 return address >= checker->address && address < checker_end_address(checker);
843 }
844
845 /*
846 * Compute the share_mode value of an entry.
847 * This value is computed from other values in the checker and its object.
848 */
849 extern uint8_t
850 checker_share_mode(
851 vm_entry_checker_t *checker);
852
853 /*
854 * Compute the is_submap value of a map entry.
855 */
856 static inline bool
checker_is_submap(vm_entry_checker_t * checker)857 checker_is_submap(vm_entry_checker_t *checker)
858 {
859 return checker->kind == Submap;
860 }
861
862 /*
863 * Submap slide (checker_get_and_slide_submap_checkers)
864 *
865 * We want a 1:1 relationship between checkers and map entries.
866 * This is complicated in submaps, where the parent map's view
867 * of the submap uses different addresses.
868 *
869 * Our solution:
870 * 1. Submap content checkers store the address as if inside the submap.
871 * 2. When using a submap content checker in a parent map context,
872 * the checker is temporarily modified to use parent-relative
873 * addresses instead ("slide").
874 *
875 * The checker_list_t for the submap keeps track of the slide state
876 * of its checkers. Some places assert that the submap is or is not slid.
877 *
878 * Note that this code only deals with constant submaps; therefore
879 * we don't need to worry about changing checker bounds while they
880 * are temporarily slid.
881 */
882
883 /*
884 * Return the nested checkers for a parent map's submap entry.
885 * Returns NULL if the checker is not a submap entry.
886 * The caller must call unslide_submap_checkers() when finished.
887 */
888 extern checker_list_t *
889 checker_get_and_slide_submap_checkers(vm_entry_checker_t *checker);
890
891 /*
892 * Undo the effects of get_and_slide_submap_checkers().
893 */
894 extern void
895 unslide_submap_checkers(checker_list_t *submap_checkers);
896
897 /*
898 * Convenience macro to call unslide_submap_checkers() at end of scope.
899 * The caller may manually unslide and then set their variable to NULL
900 * to cancel the automatic unslide.
901 */
902 static inline void
cleanup_unslide_submap_checkers(checker_list_t ** inout_submap_checkers)903 cleanup_unslide_submap_checkers(checker_list_t **inout_submap_checkers)
904 {
905 if (*inout_submap_checkers) {
906 unslide_submap_checkers(*inout_submap_checkers);
907 *inout_submap_checkers = NULL;
908 }
909 }
910 #define DEFER_UNSLIDE \
911 __attribute__((cleanup(cleanup_unslide_submap_checkers)))
912
913
914 /*
915 * Adjust a start/end so that it does not extend beyond a limit.
916 * If start/end falls outside the limit, the output's size will
917 * be zero and its start will be indeterminate.
918 */
919 extern void
920 clamp_start_end_to_start_end(
921 mach_vm_address_t * const inout_start,
922 mach_vm_address_t * const inout_end,
923 mach_vm_address_t limit_start,
924 mach_vm_address_t limit_end);
925
926
927 /*
928 * Adjust a address/size so that it does not extend beyond a limit.
929 * If address/size falls outside the limit, the output size will
930 * be zero and the start will be indeterminate
931 */
932 extern void
933 clamp_address_size_to_address_size(
934 mach_vm_address_t * const inout_address,
935 mach_vm_size_t * const inout_size,
936 mach_vm_address_t limit_address,
937 mach_vm_size_t limit_size);
938
939
940 /*
941 * Adjust an address range so it does not extend beyond an entry's bounds.
942 * When clamping to a submap entry:
943 * checker is a submap entry in the parent map.
944 * address and size are in the parent map's address space on entry and on exit.
945 */
946 extern void
947 clamp_address_size_to_checker(
948 mach_vm_address_t * const inout_address,
949 mach_vm_size_t * const inout_size,
950 vm_entry_checker_t *checker);
951
952 /*
953 * Adjust an address range so it does not extend beyond an entry's bounds.
954 * When clamping to a submap entry:
955 * checker is a submap entry in the parent map.
956 * address and size are in the parent map's address space on entry and on exit.
957 */
958 extern void
959 clamp_start_end_to_checker(
960 mach_vm_address_t * const inout_start,
961 mach_vm_address_t * const inout_end,
962 vm_entry_checker_t *checker);
963
964
965 /*
966 * Set the VM object that an entry points to.
967 * Replaces any existing object. Updates self_ref_count of any objects.
968 */
969 extern void
970 checker_set_object(vm_entry_checker_t *checker, vm_object_checker_t *obj_checker);
971
972 /*
973 * Set an entry's object to the null object.
974 * Identical to `checker_set_object(checker, find_object_checker_for_object_id(list, 0))`
975 */
976 extern void
977 checker_set_null_object(checker_list_t *list, vm_entry_checker_t *checker);
978
979 /*
980 * Set an entry's object to a copy of its current object,
981 * with the new_object->shadow = old_object.
982 * The entry's current object must not be null.
983 */
984 extern void
985 checker_make_shadow_object(checker_list_t *list, vm_entry_checker_t *checker);
986
987 /*
988 * If checker has a null VM object, change it to a new anonymous object.
989 */
990 extern void
991 checker_resolve_null_vm_object(
992 checker_list_t *checker_list,
993 vm_entry_checker_t *checker);
994
995 /*
996 * Update an entry's checker as if a fault occurred inside it.
997 * Assumes that all pages in the entry were faulted.
998 * Aborts if the fault appears to be a copy-on-write fault; this code does
999 * not attempt to handle that case.
1000 *
1001 * - resolves null objects
1002 * - sets the resident page count
1003 */
1004 extern void
1005 checker_fault_for_prot_not_cow(
1006 checker_list_t *checker_list,
1007 vm_entry_checker_t *checker,
1008 vm_prot_t fault_prot);
1009
1010
1011 /*
1012 * Conditionally unnest one checker in a submap.
1013 *
1014 * submap_parent is a parent map's submap entry.
1015 * *inout_next_address is the current address in the parent map,
1016 * within the bounds of submap_parent.
1017 * If the entry inside the submap that contains *inout_next_address is:
1018 * - unallocated:
1019 * advance *inout_next_address past the unallocated space and return NULL
1020 * - a writeable allocation:
1021 * unnest the appropriate range in the parent map,
1022 * advance *inout_next_address past the unnested range,
1023 * and return the unnested range's new checker
1024 * - a readable allocation:
1025 * - (unnest_readonly == false) advance past it, same as for unallocated holes
1026 * - (unnest_readonly == true) unnest it, same as for writeable allocations
1027 *
1028 * Set all_overwritten = true if the newly-unnested memory will
1029 * be promptly written to (thus resolving null objects and collapsing COW shadow chains).
1030 */
1031 extern vm_entry_checker_t *
1032 checker_list_try_unnest_one_entry_in_submap(
1033 checker_list_t *checker_list,
1034 vm_entry_checker_t *submap_parent,
1035 bool unnest_readonly,
1036 bool all_overwritten,
1037 mach_vm_address_t * const inout_next_address);
1038
1039 /*
1040 * Perform a clip-left operation on a checker, similar to vm_map_clip_left.
1041 * Entry `right` is divided at `split`.
1042 * Returns the new left-hand entry.
1043 * Returns NULL if no split occurred.
1044 * Updates list->head and/or list->tail if necessary.
1045 */
1046 extern vm_entry_checker_t *
1047 checker_clip_left(
1048 checker_list_t *list,
1049 vm_entry_checker_t *right,
1050 mach_vm_address_t split);
1051
1052 /*
1053 * Perform a clip-right operation on a checker, similar to vm_map_clip_right.
1054 * Entry `left` is divided at `split`.
1055 * Returns the new right-hand entry.
1056 * Returns NULL if no split occurred.
1057 * Updates list->head and/or list->tail if necessary.
1058 */
1059 extern vm_entry_checker_t *
1060 checker_clip_right(
1061 checker_list_t *list,
1062 vm_entry_checker_t *left,
1063 mach_vm_address_t split);
1064
1065 /*
1066 * Perform a simplify operation on a checker and the entry to its left.
1067 * If coalescing occurs, `right` is preserved and
1068 * the entry to the left is destroyed.
1069 */
1070 extern void
1071 checker_simplify_left(
1072 checker_list_t *list,
1073 vm_entry_checker_t *right);
1074
1075
1076 /*
1077 * Build a vm_checker for a newly-created memory region.
1078 * The region is assumed to be the result of vm_allocate().
1079 * The new checker is not linked into the list.
1080 */
1081 extern vm_entry_checker_t *
1082 make_checker_for_vm_allocate(
1083 checker_list_t *list,
1084 mach_vm_address_t address,
1085 mach_vm_size_t size,
1086 int flags_and_tag);
1087
1088 /*
1089 * Create VM entries and VM entry checkers
1090 * for the given VM entry templates.
1091 *
1092 * Entries will be created consecutively in contiguous memory, as specified.
1093 * "Holes" will be deallocated during construction;
1094 * be warned that the holes may become filled by other allocations
1095 * including Rosetta's translations, which will cause the checker to
1096 * fail later.
1097 *
1098 * Alignment handling:
1099 * The first entry gets `alignment_mask` alignment.
1100 * After that it is the caller's responsibility to arrange their
1101 * templates in a way that yields the alignments they want.
1102 */
1103 extern __attribute__((overloadable))
1104 checker_list_t *
1105 create_vm_state(
1106 const vm_entry_template_t entry_templates[],
1107 unsigned entry_template_count,
1108 const vm_object_template_t object_templates[],
1109 unsigned object_template_count,
1110 mach_vm_size_t alignment_mask,
1111 const char *message);
1112
1113 static inline __attribute__((overloadable))
1114 checker_list_t *
create_vm_state(const vm_entry_template_t templates[],unsigned count,mach_vm_size_t alignment_mask)1115 create_vm_state(
1116 const vm_entry_template_t templates[],
1117 unsigned count,
1118 mach_vm_size_t alignment_mask)
1119 {
1120 return create_vm_state(templates, count, NULL, 0,
1121 alignment_mask, "create_vm_state");
1122 }
1123
1124 /*
1125 * Like create_vm_state, but the alignment mask defaults to PAGE_MASK
1126 * and the template list is terminated by END_ENTRIES
1127 */
1128 static inline __attribute__((overloadable))
1129 checker_list_t *
create_vm_state(const vm_entry_template_t templates[])1130 create_vm_state(const vm_entry_template_t templates[])
1131 {
1132 return create_vm_state(templates, count_templates(templates), PAGE_MASK);
1133 }
1134
1135 /*
1136 * Like create_vm_state, but the alignment mask defaults to PAGE_MASK.
1137 */
1138 static inline __attribute__((overloadable))
1139 checker_list_t *
create_vm_state(const vm_entry_template_t templates[],unsigned count)1140 create_vm_state(const vm_entry_template_t templates[], unsigned count)
1141 {
1142 return create_vm_state(templates, count, PAGE_MASK);
1143 }
1144
1145
1146 /*
1147 * Verify that the VM's state (as determined by vm_region)
1148 * matches the expected state from a list of checkers.
1149 *
1150 * Returns TestSucceeded if the state is good, TestFailed otherwise.
1151 *
1152 * Failures are also reported as darwintest failures (typically T_FAIL)
1153 * and failure details of expected and actual state are reported with T_LOG.
1154 */
1155 extern test_result_t
1156 verify_vm_state(checker_list_t *checker_list, const char *message);
1157
1158 /*
1159 * Perform VM read and/or write faults on every page spanned by a list of checkers,
1160 * and verify that exceptions are delivered (or not) as expected.
1161 * This is a destructive test: the faults may change VM state (for example
1162 * resolving COW) but the checkers are not updated.
1163 *
1164 * Returns TestSucceeded if the state is good, TestFailed otherwise.
1165 *
1166 * Failures are also reported as darwintest failures (typically T_FAIL)
1167 * and failure details of expected and actual state are reported with T_LOG.
1168 */
1169 extern test_result_t
1170 verify_vm_faultability(
1171 checker_list_t *checker_list,
1172 const char *message,
1173 bool verify_reads,
1174 bool verify_writes);
1175
1176 /*
1177 * Like verify_vm_faultability, but reads and/or writes
1178 * from a single checker's memory.
1179 * Returns true if the verification succeeded.
1180 */
1181 extern bool
1182 verify_checker_faultability(
1183 vm_entry_checker_t *checker,
1184 const char *message,
1185 bool verify_reads,
1186 bool verify_writes);
1187
1188 /*
1189 * Like verify_checker_faultability, but reads and/or writes
1190 * only part of a single checker's memory.
1191 * Returns true if the verification succeeded.
1192 */
1193 extern bool
1194 verify_checker_faultability_in_address_range(
1195 vm_entry_checker_t *checker,
1196 const char *message,
1197 bool verify_reads,
1198 bool verify_writes,
1199 mach_vm_address_t checked_address,
1200 mach_vm_size_t checked_size);
1201
1202 /*
1203 * Specification for a single trial:
1204 * - the test's name
1205 * - the templates for the virtual memory layout
1206 * - the address range within that virtual memory
1207 * layout that the tested operation should use.
1208 */
1209 typedef struct vm_config_s {
1210 char *config_name;
1211
1212 /*
1213 * Test's start address is the start of the first
1214 * entry plus start_adjustment. Test's end address
1215 * is the end of the last entry plus end_adjustment.
1216 * When not zero, start_adjustment is typically positive
1217 * and end_adjustment is typically negative.
1218 */
1219 mach_vm_size_t start_adjustment;
1220 mach_vm_size_t end_adjustment;
1221
1222 /* First map entry gets this alignment. */
1223 mach_vm_size_t alignment_mask;
1224
1225 vm_entry_template_t *entry_templates;
1226 unsigned entry_template_count;
1227 vm_object_template_t *object_templates;
1228 unsigned object_template_count;
1229
1230 vm_entry_template_t *submap_entry_templates;
1231 unsigned submap_entry_template_count;
1232 vm_object_template_t *submap_object_templates;
1233 unsigned submap_object_template_count;
1234 } vm_config_t;
1235
1236 __attribute__((overloadable))
1237 extern vm_config_t *
1238 make_vm_config(
1239 const char *name,
1240 vm_entry_template_t *entry_templates,
1241 vm_object_template_t *object_templates,
1242 vm_entry_template_t *submap_entry_templates,
1243 vm_object_template_t *submap_object_templates,
1244 mach_vm_size_t start_adjustment,
1245 mach_vm_size_t end_adjustment,
1246 mach_vm_size_t alignment_mask);
1247
1248 /*
1249 * make_vm_config() variants with fewer parameters
1250 * (convenient for hardcoded initializer syntax)
1251 *
1252 * Variants that allow submap entries force submap-compatible alignment.
1253 * Variants without submap entries use no alignment.
1254 */
1255
1256 __attribute__((overloadable))
1257 static inline vm_config_t *
make_vm_config(const char * name,vm_entry_template_t * entry_templates,vm_object_template_t * object_templates,vm_entry_template_t * submap_entry_templates,vm_object_template_t * submap_object_templates,mach_vm_size_t start_adjustment,mach_vm_size_t end_adjustment)1258 make_vm_config(
1259 const char *name,
1260 vm_entry_template_t *entry_templates,
1261 vm_object_template_t *object_templates,
1262 vm_entry_template_t *submap_entry_templates,
1263 vm_object_template_t *submap_object_templates,
1264 mach_vm_size_t start_adjustment,
1265 mach_vm_size_t end_adjustment)
1266 {
1267 return make_vm_config(name, entry_templates, object_templates,
1268 submap_entry_templates, submap_object_templates,
1269 start_adjustment, end_adjustment, SUBMAP_ALIGNMENT_MASK);
1270 }
1271
1272 __attribute__((overloadable))
1273 static inline vm_config_t *
make_vm_config(const char * name,vm_entry_template_t * entry_templates,vm_object_template_t * object_templates,mach_vm_size_t start_adjustment,mach_vm_size_t end_adjustment)1274 make_vm_config(
1275 const char *name,
1276 vm_entry_template_t *entry_templates,
1277 vm_object_template_t *object_templates,
1278 mach_vm_size_t start_adjustment,
1279 mach_vm_size_t end_adjustment)
1280 {
1281 return make_vm_config(name, entry_templates, object_templates,
1282 NULL, NULL,
1283 start_adjustment, end_adjustment, 0);
1284 }
1285
1286 __attribute__((overloadable))
1287 static inline vm_config_t *
make_vm_config(const char * name,vm_entry_template_t * entry_templates,mach_vm_size_t start_adjustment,mach_vm_size_t end_adjustment)1288 make_vm_config(
1289 const char *name,
1290 vm_entry_template_t *entry_templates,
1291 mach_vm_size_t start_adjustment,
1292 mach_vm_size_t end_adjustment)
1293 {
1294 return make_vm_config(name, entry_templates, NULL,
1295 NULL, NULL,
1296 start_adjustment, end_adjustment, 0);
1297 }
1298
1299 __attribute__((overloadable))
1300 static inline vm_config_t *
make_vm_config(const char * name,vm_entry_template_t * entry_templates,vm_object_template_t * object_templates)1301 make_vm_config(
1302 const char *name,
1303 vm_entry_template_t *entry_templates,
1304 vm_object_template_t *object_templates)
1305 {
1306 return make_vm_config(name, entry_templates, object_templates,
1307 NULL, NULL,
1308 0, 0, 0);
1309 }
1310
1311 __attribute__((overloadable))
1312 static inline vm_config_t *
make_vm_config(const char * name,vm_entry_template_t * entry_templates)1313 make_vm_config(
1314 const char *name,
1315 vm_entry_template_t *entry_templates)
1316 {
1317 return make_vm_config(name, entry_templates, NULL,
1318 NULL, NULL,
1319 0, 0, 0);
1320 }
1321
1322
1323 /*
1324 * Like create_vm_state, but also computes the config's desired address range.
1325 */
1326 extern void
1327 create_vm_state_from_config(
1328 vm_config_t *config,
1329 checker_list_t ** const out_checker_list,
1330 mach_vm_address_t * const out_start_address,
1331 mach_vm_address_t * const out_end_address);
1332
1333
1334 /*
1335 * Logs the contents of checkers.
1336 * Also logs the contents of submap checkers recursively.
1337 */
1338 extern void
1339 dump_checker_range(entry_checker_range_t list);
1340
1341 /*
1342 * Logs info from vm_region() for the address ranges spanned by the checkers.
1343 * Also logs the contents of submaps recursively.
1344 */
1345 extern void
1346 dump_region_info_for_entries(entry_checker_range_t list);
1347
1348
1349 /*
1350 * Convenience functions for logging.
1351 */
1352
1353 extern const char *
1354 name_for_entry_kind(vm_entry_template_kind_t kind);
1355
1356 extern const char *
1357 name_for_kr(kern_return_t kr);
1358
1359 extern const char *
1360 name_for_prot(vm_prot_t prot);
1361
1362 extern const char *
1363 name_for_inherit(vm_inherit_t inheritance);
1364
1365 extern const char *
1366 name_for_behavior(vm_behavior_t behavior);
1367
1368 extern const char *
1369 name_for_bool(boolean_t value);
1370
1371 extern const char *
1372 name_for_share_mode(uint8_t share_mode);
1373
1374 /* Convenience macro for compile-time array size */
1375 #define countof(array) \
1376 _Pragma("clang diagnostic push") \
1377 _Pragma("clang diagnostic error \"-Wsizeof-pointer-div\"") \
1378 (sizeof(array)/sizeof((array)[0])) \
1379 _Pragma("clang diagnostic pop")
1380
1381 /* Convenience macro for a heap allocated formatted string deallocated at end of scope. */
1382 static inline void
cleanup_cstring(char ** ptr)1383 cleanup_cstring(char **ptr)
1384 {
1385 free(*ptr);
1386 }
1387 #define CLEANUP_CSTRING __attribute__((cleanup(cleanup_cstring)))
1388 #define TEMP_CSTRING(str, format, ...) \
1389 char *str CLEANUP_CSTRING; \
1390 asprintf(&str, format, __VA_ARGS__)
1391
1392 /*
1393 * Returns true if each bit set in `values` is also set in `container`.
1394 */
1395 static inline bool
prot_contains_all(vm_prot_t container,vm_prot_t values)1396 prot_contains_all(vm_prot_t container, vm_prot_t values)
1397 {
1398 return (container & values) == values;
1399 }
1400
1401 /*
1402 * Convenience functions for address arithmetic
1403 */
1404
1405 static inline mach_vm_address_t
max(mach_vm_address_t a,mach_vm_address_t b)1406 max(mach_vm_address_t a, mach_vm_address_t b)
1407 {
1408 if (a > b) {
1409 return a;
1410 } else {
1411 return b;
1412 }
1413 }
1414
1415 static inline mach_vm_address_t
min(mach_vm_address_t a,mach_vm_address_t b)1416 min(mach_vm_address_t a, mach_vm_address_t b)
1417 {
1418 if (a < b) {
1419 return a;
1420 } else {
1421 return b;
1422 }
1423 }
1424
1425
1426 /*
1427 * Call vm_region on an address.
1428 * If the query address is mapped at that submap depth:
1429 * - Sets *inout_address and *out_size to that map entry's address and size.
1430 * [*inout_address, *inout_address + *out_size) contains the query address.
1431 * - Sets the info from vm_region.
1432 * - Returns true.
1433 * If the query address is unmapped, or not mapped at that submap depth:
1434 * - Sets *inout_address to the address of the next map entry, or ~0 if there is none.
1435 * - Sets *out_size to zero.
1436 * - Returns false.
1437 */
1438 __attribute__((overloadable))
1439 extern bool
1440 get_info_for_address(
1441 mach_vm_address_t *inout_address,
1442 mach_vm_size_t *out_size,
1443 vm_region_submap_info_data_64_t *out_info,
1444 uint32_t submap_depth);
1445
1446 __attribute__((overloadable))
1447 static inline bool
get_info_for_address(mach_vm_address_t * const inout_address,mach_vm_size_t * const out_size,vm_region_submap_info_data_64_t * const out_info)1448 get_info_for_address(
1449 mach_vm_address_t * const inout_address,
1450 mach_vm_size_t * const out_size,
1451 vm_region_submap_info_data_64_t * const out_info)
1452 {
1453 return get_info_for_address(inout_address, out_size, out_info, 0);
1454 }
1455
1456 /*
1457 * Like get_info_for_address(), but
1458 * (1) it's faster, and
1459 * (2) it does not get the right ref_count or shadow_depth values from vm_region.
1460 */
1461 __attribute__((overloadable))
1462 extern bool
1463 get_info_for_address_fast(
1464 mach_vm_address_t *inout_address,
1465 mach_vm_size_t *out_size,
1466 vm_region_submap_info_data_64_t *out_info,
1467 uint32_t submap_depth);
1468
1469 __attribute__((overloadable))
1470 static inline bool
get_info_for_address_fast(mach_vm_address_t * const inout_address,mach_vm_size_t * const out_size,vm_region_submap_info_data_64_t * const out_info)1471 get_info_for_address_fast(
1472 mach_vm_address_t * const inout_address,
1473 mach_vm_size_t * const out_size,
1474 vm_region_submap_info_data_64_t * const out_info)
1475 {
1476 return get_info_for_address_fast(inout_address, out_size, out_info, 0);
1477 }
1478
1479 /*
1480 * Convenience function to get object_id_full from vm_region at an address.
1481 * Returns zero if the address is mapped but has a null object.
1482 * Aborts if the address is not mapped.
1483 */
1484 extern uint64_t
1485 get_object_id_for_address(mach_vm_address_t address);
1486
1487 /*
1488 * Convenience function to get user_tag from vm_region at an address.
1489 * Returns zero if the address is not mapped.
1490 */
1491 extern uint16_t
1492 get_user_tag_for_address(mach_vm_address_t address);
1493
1494 /*
1495 * Convenience function to get user_tag from vm_region at an address,
1496 * if that tag is within the app-specific tag range.
1497 * Returns zero if the address is not mapped.
1498 * Returns zero if the address's tag is not within the app-specific range
1499 * [VM_MEMORY_APPLICATION_SPECIFIC_1, VM_MEMORY_APPLICATION_SPECIFIC_16]
1500 *
1501 * This is used by tests that copy user tags from nearby memory.
1502 * The "nearby" memory might not be part of the tested range.
1503 * Copying an arbitrary user tag from outside is undesirable
1504 * because the VM changes some of its behavior for some tag
1505 * values and the tests need to see consistent behavior instead.
1506 */
1507 extern uint16_t
1508 get_app_specific_user_tag_for_address(mach_vm_address_t address);
1509
1510 /*
1511 * Convenience functions for vm_wire's host_priv port.
1512 * host_priv() returns the port, or halts if it can't.
1513 * host_priv_allowed() returns true or false.
1514 * The host_priv port requires root on macOS.
1515 */
1516 extern host_priv_t
1517 host_priv(void);
1518
1519 extern bool
1520 host_priv_allowed(void);
1521
1522 #endif /* VM_CONFIGURATOR_H */
1523