xref: /xnu-10063.141.1/tests/vm/vm_ranges.c (revision d8b80295118ef25ac3a784134bcf95cd8e88109f)
1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 
4 #include <sys/types.h>
5 #include <sys/sysctl.h>
6 #include <mach/mach.h>
7 #include <mach/mach_vm.h>
8 #include <mach/vm_types.h>
9 #include <sys/mman.h>
10 #include <unistd.h>
11 #include <TargetConditionals.h>
12 
13 enum {
14 	DEFAULT = 0,
15 	HEAP
16 };
17 
18 #define ALLOCATION_SIZE (PAGE_SIZE)
19 static char _filepath[MAXPATHLEN];
20 static struct mach_vm_range parent_default;
21 static struct mach_vm_range parent_heap;
22 
23 #define CHILD_PROCESS_COUNT     (20)
24 #define MAX_VM_ADDRESS          (0xFC0000000ULL)
25 #undef KiB
26 #undef MiB
27 #undef GiB
28 #define KiB(x)  ((uint64_t)(x) << 10)
29 #define MiB(x)  ((uint64_t)(x) << 20)
30 #define GiB(x)  ((uint64_t)(x) << 30)
31 
32 /*
33  * Choose an arbitrary memory tag which applies to each of default/heap range
34  * for testing placement of allocations.
35  */
36 #define VM_MEMORY_RANGE_DEFAULT (VM_MAKE_TAG(VM_MEMORY_STACK))
37 #define VM_MEMORY_RANGE_HEAP    (VM_MAKE_TAG(VM_MEMORY_MALLOC_SMALL))
38 
39 #define RANGE_DEFAULT_FLAGS     (VM_FLAGS_ANYWHERE | VM_MEMORY_RANGE_DEFAULT)
40 #define RANGE_HEAP_FLAGS        (VM_FLAGS_ANYWHERE | VM_MEMORY_RANGE_HEAP)
41 
42 T_GLOBAL_META(
43 	T_META_NAMESPACE("xnu.vm"),
44 	T_META_RADAR_COMPONENT_NAME("xnu"),
45 	T_META_RADAR_COMPONENT_VERSION("VM"),
46 	T_META_ENABLED(!TARGET_OS_OSX),
47 	T_META_OWNER("mmorran")
48 	);
49 
50 static bool
ranges_enabled(void)51 ranges_enabled(void)
52 {
53 	struct mach_vm_range range;
54 	size_t range_sz = sizeof(range);
55 
56 	bzero(&range, sizeof(range));
57 
58 	/*
59 	 * We will fail with ENOENT or EINVAL if ranges are either not supported
60 	 * or not enabled on our process.
61 	 */
62 	return sysctlbyname("vm.vm_map_user_range_default",
63 	           &range, &range_sz, NULL, 0) == 0;
64 }
65 
66 #define CHECK_RANGES_ENABLED() \
67 	if (!ranges_enabled()) { \
68 	        T_SKIP("VM map ranges not enabled"); \
69 	}
70 
71 static struct mach_vm_range
get_range(int target_range)72 get_range(int target_range)
73 {
74 	int ret = EINVAL;
75 	struct mach_vm_range range;
76 	size_t range_sz = sizeof(range);
77 
78 	bzero(&range, sizeof(range));
79 
80 	switch (target_range) {
81 	case DEFAULT:
82 		ret = sysctlbyname("vm.vm_map_user_range_default", &range, &range_sz, NULL, 0);
83 		T_QUIET;
84 		T_ASSERT_POSIX_SUCCESS(ret, "successfully retrieved user default range");
85 		break;
86 
87 	case HEAP:
88 		ret = sysctlbyname("vm.vm_map_user_range_heap", &range, &range_sz, NULL, 0);
89 		T_QUIET;
90 		T_ASSERT_POSIX_SUCCESS(ret, "successfully retrieved user heap range");
91 		break;
92 
93 	default:
94 		/* Fall through with EINVAL */
95 		break;
96 	}
97 
98 	return range;
99 }
100 
101 static mach_vm_address_t
assert_allocate(mach_vm_address_t dst,int vm_flags)102 assert_allocate(mach_vm_address_t dst, int vm_flags)
103 {
104 	int ret = mach_vm_allocate(mach_task_self(), &dst, ALLOCATION_SIZE, vm_flags);
105 	T_ASSERT_MACH_SUCCESS(ret, "vm_allocate");
106 	return dst;
107 }
108 
109 static void
assert_in_range(struct mach_vm_range range,mach_vm_offset_t addr)110 assert_in_range(struct mach_vm_range range, mach_vm_offset_t addr)
111 {
112 	T_LOG("checking if %llx <= %llx <= %llx", range.min_address, addr,
113 	    range.max_address);
114 	T_EXPECT_GE(addr, range.min_address, "allocation above heap min address");
115 	T_EXPECT_LE(addr, range.max_address, "allocation below heap max address");
116 }
117 
118 static void
assert_in_heap_range(mach_vm_offset_t addr)119 assert_in_heap_range(mach_vm_offset_t addr)
120 {
121 	struct mach_vm_range range = get_range(HEAP);
122 
123 	assert_in_range(range, addr);
124 }
125 
126 static void *
assert_mmap(void * addr,int fd,int flags)127 assert_mmap(void *addr, int fd, int flags)
128 {
129 	void *ret = mmap(addr, ALLOCATION_SIZE, VM_PROT_READ | VM_PROT_WRITE,
130 	    flags, fd, 0);
131 	T_EXPECT_NE(ret, MAP_FAILED, "mmap should not have MAP_FAILED");
132 	T_EXPECT_NE(ret, NULL, "mmap should have returned a valid pointer");
133 	return ret;
134 }
135 
136 static void
assert_allocate_eq(mach_vm_address_t dst,int vm_flags)137 assert_allocate_eq(mach_vm_address_t dst, int vm_flags)
138 {
139 	mach_vm_address_t target = dst;
140 
141 	T_ASSERT_MACH_SUCCESS(mach_vm_allocate(mach_task_self(), &target,
142 	    ALLOCATION_SIZE, vm_flags), "vm_allocate");
143 
144 	T_EXPECT_EQ(target, dst, "target/dst differ");
145 }
146 
147 static mach_vm_address_t
assert_allocate_in_range(int target_range,mach_vm_address_t dst,int vm_flags)148 assert_allocate_in_range(int target_range, mach_vm_address_t dst, int vm_flags)
149 {
150 	struct mach_vm_range range = get_range(target_range);
151 	dst = assert_allocate(dst, vm_flags);
152 
153 	assert_in_range(range, (mach_vm_offset_t)dst);
154 
155 	return dst;
156 }
157 
158 static void *
assert_mmap_in_range(void * addr,int target_range,int fd,int flags)159 assert_mmap_in_range(void *addr, int target_range, int fd, int flags)
160 {
161 	struct mach_vm_range range = get_range(target_range);
162 	void *dst = assert_mmap(addr, fd, flags);
163 
164 	assert_in_range(range, (mach_vm_offset_t)dst);
165 
166 	return dst;
167 }
168 
169 __attribute__((overloadable))
170 static void
171 fork_child_test(void (^child_test)(void))
172 {
173 	pid_t child_pid;
174 	int err;
175 
176 	child_pid = fork();
177 
178 	if (child_pid == 0) {
179 		/* child process */
180 		T_LOG("in child");
181 		child_test();
182 		exit(0);
183 	} else {
184 		T_QUIET; T_ASSERT_POSIX_SUCCESS(child_pid, "fork process");
185 
186 		/* wait for child process to exit */
187 		if (dt_waitpid(child_pid, &err, NULL, 30) == false) {
188 			T_FAIL("dt_waitpid() failed on child pid %d", child_pid);
189 		}
190 	}
191 }
192 
193 __attribute__((overloadable))
194 static void
fork_child_test(void (* child_test)(void))195 fork_child_test(void (*child_test)(void))
196 {
197 	fork_child_test(^{
198 		child_test();
199 	});
200 }
201 
202 static void
cleanup_file(void)203 cleanup_file(void)
204 {
205 	unlink(_filepath);
206 	bzero(_filepath, MAXPATHLEN);
207 }
208 
209 T_DECL(range_allocate_heap,
210     "ensure malloc tagged memory is allocated within the heap range")
211 {
212 	CHECK_RANGES_ENABLED();
213 
214 	assert_allocate_in_range(HEAP, 0, RANGE_HEAP_FLAGS);
215 }
216 
217 T_DECL(range_allocate_anywhere,
218     "ensure allocation is within target range when hint is outwith range")
219 {
220 	CHECK_RANGES_ENABLED();
221 
222 	struct mach_vm_range range = get_range(HEAP);
223 
224 	assert_allocate_in_range(HEAP, range.min_address - ALLOCATION_SIZE, RANGE_HEAP_FLAGS);
225 }
226 
227 T_DECL(range_allocate_stack,
228     "ensure a stack allocation is in the default range")
229 {
230 	CHECK_RANGES_ENABLED();
231 
232 	assert_allocate_in_range(DEFAULT, 0, RANGE_DEFAULT_FLAGS);
233 }
234 
235 static void
ensure_fixed_mappings_succeed_cross(int heap)236 ensure_fixed_mappings_succeed_cross(int heap)
237 {
238 	vm_map_address_t addr;
239 
240 	addr = assert_allocate(0, VM_FLAGS_ANYWHERE | heap);
241 	vm_deallocate(mach_task_self(), addr, ALLOCATION_SIZE);
242 
243 	assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_MEMORY_RANGE_DEFAULT);
244 	assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MEMORY_RANGE_DEFAULT);
245 	vm_deallocate(mach_task_self(), addr, ALLOCATION_SIZE);
246 
247 	assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_MEMORY_RANGE_HEAP);
248 	assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MEMORY_RANGE_HEAP);
249 	vm_deallocate(mach_task_self(), addr, ALLOCATION_SIZE);
250 }
251 
252 static void
ensure_rogue_fixed_fails(void)253 ensure_rogue_fixed_fails(void)
254 {
255 	struct mach_vm_range def = get_range(DEFAULT);
256 	struct mach_vm_range heap = get_range(HEAP);
257 	mach_vm_address_t addr;
258 	kern_return_t kr;
259 
260 	if (def.max_address + 3 * ALLOCATION_SIZE <= heap.min_address) {
261 		addr = heap.min_address - 2 * ALLOCATION_SIZE;
262 	} else {
263 		/*
264 		 * in the unlikely event when there's no space
265 		 * between default and heap, then there must be
266 		 * a hole after heap.
267 		 */
268 		addr = heap.max_address + ALLOCATION_SIZE;
269 	}
270 
271 	kr = mach_vm_allocate(mach_task_self(), &addr,
272 	    ALLOCATION_SIZE, VM_FLAGS_FIXED);
273 	T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "should fail");
274 }
275 
276 static void
ensure_fixed_mapping(void)277 ensure_fixed_mapping(void)
278 {
279 	ensure_fixed_mappings_succeed_cross(VM_MEMORY_RANGE_DEFAULT);
280 	ensure_fixed_mappings_succeed_cross(VM_MEMORY_RANGE_HEAP);
281 
282 	ensure_rogue_fixed_fails();
283 }
284 
285 T_DECL(range_allocate_fixed, "ensure fixed target is honored (even with an incorrect tag)")
286 {
287 	CHECK_RANGES_ENABLED();
288 
289 	ensure_fixed_mapping();
290 	fork_child_test(ensure_fixed_mapping);
291 }
292 
293 T_DECL(range_mmap_anon, "ensure anon mapping within HEAP range")
294 {
295 	CHECK_RANGES_ENABLED();
296 
297 	assert_mmap_in_range(NULL, HEAP, -1, MAP_ANON | MAP_PRIVATE);
298 }
299 
300 T_DECL(range_mmap_file, "ensure file is mapped within HEAP range")
301 {
302 	CHECK_RANGES_ENABLED();
303 
304 	int fd = -1;
305 
306 	/* prepare temp file */
307 	strncpy(_filepath, "/tmp/mapfile.XXXXXX", MAXPATHLEN);
308 	T_ASSERT_POSIX_SUCCESS(fd = mkstemp(_filepath), NULL);
309 	atexit(cleanup_file);
310 
311 	T_ASSERT_POSIX_SUCCESS(ftruncate(fd, (off_t)ALLOCATION_SIZE), NULL);
312 
313 	/* map it in to the heap rage */
314 #if TARGET_OS_OSX
315 	T_LOG("mapping file in DEFAULT range");
316 	assert_mmap_in_range(NULL, DEFAULT, fd, MAP_FILE | MAP_SHARED);
317 #else
318 	T_LOG("mapping file in HEAP range");
319 	assert_mmap_in_range(NULL, HEAP, fd, MAP_FILE | MAP_SHARED);
320 #endif
321 }
322 
323 
324 T_DECL(range_mmap_alias_tag, "ensure anon mapping with tag is honored")
325 {
326 	CHECK_RANGES_ENABLED();
327 
328 	assert_mmap_in_range(NULL, DEFAULT, VM_MEMORY_RANGE_DEFAULT, MAP_ANON | MAP_PRIVATE);
329 }
330 
331 T_DECL(range_mmap_with_low_hint,
332     "ensure allocation is within target range when hint is below range")
333 {
334 	CHECK_RANGES_ENABLED();
335 
336 	struct mach_vm_range range = get_range(HEAP);
337 	mach_vm_address_t target = range.min_address - ALLOCATION_SIZE;
338 
339 	assert_mmap_in_range((void *)target, HEAP, -1, MAP_ANON | MAP_PRIVATE);
340 }
341 
342 T_DECL(range_mmap_with_high_hint,
343     "ensure allocation is within target range when hint is within range")
344 {
345 	CHECK_RANGES_ENABLED();
346 
347 	struct mach_vm_range range = get_range(HEAP);
348 	mach_vm_address_t target = range.max_address - 100 * ALLOCATION_SIZE;
349 
350 	void *dst = assert_mmap_in_range((void *)target, HEAP, -1, MAP_ANON | MAP_PRIVATE);
351 
352 	T_EXPECT_EQ((mach_vm_address_t)dst, target, "unexpected allocation address");
353 }
354 
355 T_DECL(range_mmap_with_bad_hint,
356     "ensure allocation fails when hint is above range")
357 {
358 	CHECK_RANGES_ENABLED();
359 
360 	struct mach_vm_range range = get_range(HEAP);
361 	mach_vm_address_t target = range.max_address + 0x100000000;
362 
363 	/* mmap should retry with 0 base on initial KERN_NO_SPACE failure */
364 	assert_mmap_in_range((void *)target, HEAP, -1, MAP_ANON | MAP_PRIVATE);
365 }
366 
367 T_DECL(range_mach_vm_map_with_bad_hint,
368     "ensure mach_vm_map fails when hint is above range")
369 {
370 	CHECK_RANGES_ENABLED();
371 
372 	struct mach_vm_range range = get_range(HEAP);
373 	mach_vm_address_t addr = range.max_address + 0x100000000;
374 
375 	/*
376 	 * unlike mmap & vm_allocate, mach_vm_map should fail when given a hint
377 	 * out with the target range.
378 	 */
379 	int ret = mach_vm_map(mach_task_self(), &addr, ALLOCATION_SIZE,
380 	    (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL,
381 	    (memory_object_offset_t)0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
382 	    VM_INHERIT_DEFAULT);
383 	T_QUIET; T_EXPECT_EQ(ret, KERN_NO_SPACE, "expected KERN_NO_SPACE");
384 }
385 
386 T_DECL(range_mach_vm_remap_default,
387     "ensure mach_vm_remap is successful in default range")
388 {
389 	CHECK_RANGES_ENABLED();
390 
391 	vm_prot_t curprot;
392 	vm_prot_t maxprot;
393 
394 	mach_vm_address_t addr = assert_allocate_in_range(DEFAULT, 0, RANGE_DEFAULT_FLAGS);
395 	mach_vm_address_t target = addr + ALLOCATION_SIZE;
396 
397 	int ret = mach_vm_remap(mach_task_self(), &target, ALLOCATION_SIZE,
398 	    (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, mach_task_self(),
399 	    addr, FALSE, &curprot, &maxprot, VM_INHERIT_NONE);
400 	T_QUIET; T_EXPECT_EQ(ret, KERN_SUCCESS, "expected KERN_SUCCESS");
401 }
402 
403 T_DECL(range_mach_vm_remap_heap_with_hint,
404     "ensure mach_vm_remap is successful in heap range")
405 {
406 	CHECK_RANGES_ENABLED();
407 
408 	vm_prot_t curprot;
409 	vm_prot_t maxprot;
410 
411 	mach_vm_address_t addr = assert_allocate_in_range(HEAP, 0, RANGE_HEAP_FLAGS);
412 	mach_vm_address_t target = addr + ALLOCATION_SIZE;
413 
414 	int ret = mach_vm_remap(mach_task_self(), &target, ALLOCATION_SIZE,
415 	    (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, mach_task_self(),
416 	    addr, FALSE, &curprot, &maxprot, VM_INHERIT_NONE);
417 	T_QUIET; T_EXPECT_EQ(ret, KERN_SUCCESS, "expected KERN_SUCCESS");
418 	assert_in_heap_range(target);
419 }
420 
421 T_DECL(range_mach_vm_remap_heap,
422     "ensure mach_vm_remap remains in same range")
423 {
424 	CHECK_RANGES_ENABLED();
425 
426 	vm_prot_t curprot;
427 	vm_prot_t maxprot;
428 
429 	mach_vm_address_t addr = assert_allocate_in_range(HEAP, 0, RANGE_HEAP_FLAGS);
430 	mach_vm_address_t target = 0;
431 
432 	int ret = mach_vm_remap(mach_task_self(), &target, ALLOCATION_SIZE,
433 	    (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, mach_task_self(),
434 	    addr, FALSE, &curprot, &maxprot, VM_INHERIT_NONE);
435 	T_EXPECT_EQ(ret, KERN_SUCCESS, "expected KERN_SUCCESS");
436 	assert_in_heap_range(target);
437 }
438 
439 static void
ensure_range(void)440 ensure_range(void)
441 {
442 	struct mach_vm_range def = get_range(DEFAULT);
443 	struct mach_vm_range heap = get_range(HEAP);
444 
445 	T_EXPECT_GT(heap.min_address, def.max_address,
446 	    "ranges should not overlap");
447 	T_EXPECT_LE(heap.max_address, MAX_VM_ADDRESS,
448 	    "expected max <= %llx", MAX_VM_ADDRESS);
449 	T_EXPECT_EQ(heap.min_address,
450 	    heap.min_address & (unsigned long)~0x1FFFFF,
451 	    "expected alignment on 2MB TT boundary");
452 }
453 
454 static void
ensure_child_range(void)455 ensure_child_range(void)
456 {
457 	struct mach_vm_range def = get_range(DEFAULT);
458 	struct mach_vm_range heap = get_range(HEAP);
459 
460 	T_QUIET; T_EXPECT_EQ(def.min_address, parent_default.min_address,
461 	    "expected forked default min to be equal");
462 	T_QUIET; T_EXPECT_EQ(def.max_address, parent_default.max_address,
463 	    "expected forked default max to be equal");
464 	T_QUIET; T_EXPECT_EQ(heap.min_address, parent_heap.min_address,
465 	    "expected forked heap min to be equal");
466 	T_QUIET; T_EXPECT_EQ(heap.max_address, parent_heap.max_address,
467 	    "expected forked heap max to be equal");
468 }
469 
470 T_DECL(range_ensure_bounds, "ensure ranges respect map bounds")
471 {
472 	CHECK_RANGES_ENABLED();
473 
474 	parent_default = get_range(DEFAULT);
475 	parent_heap = get_range(HEAP);
476 
477 	ensure_range();
478 
479 	for (uint32_t i = 0; i < CHILD_PROCESS_COUNT; i++) {
480 		fork_child_test(ensure_child_range);
481 	}
482 }
483 
484 static bool
parse_void_ranges(struct mach_vm_range * void1,struct mach_vm_range * void2)485 parse_void_ranges(struct mach_vm_range *void1, struct mach_vm_range *void2)
486 {
487 	char buf[256];
488 	size_t bsz = sizeof(buf) - 1;
489 	char *s;
490 
491 	if (sysctlbyname("vm.malloc_ranges", buf, &bsz, NULL, 0) == -1) {
492 		if (errno == ENOENT) {
493 			return false;
494 		}
495 		T_ASSERT_POSIX_SUCCESS(-1, "sysctlbyname(vm.malloc_ranges)");
496 	}
497 	buf[bsz] = '\0';
498 
499 	s = buf;
500 
501 	void1->min_address = strtoull(s, &s, 16);
502 	T_QUIET; T_ASSERT_EQ(*s, ':', "should have a ':'");
503 	s++;
504 
505 	void1->max_address = strtoull(s, &s, 16);
506 	T_QUIET; T_ASSERT_EQ(*s, ' ', "should have a ' '");
507 	s++;
508 
509 	void2->min_address = strtoull(s, &s, 16);
510 	T_QUIET; T_ASSERT_EQ(*s, ':', "should have a ':'");
511 	s++;
512 
513 	void2->max_address = strtoull(s, &s, 16);
514 	T_QUIET; T_ASSERT_EQ(*s, '\0', "should be done");
515 
516 	return true;
517 }
518 
519 T_DECL(create_range, "ensure create ranges kinda works")
520 {
521 	struct mach_vm_range void1, void2, *r;
522 
523 	mach_vm_range_recipe_v1_t array[10];
524 	uint32_t nranges = 0;
525 
526 	if (!parse_void_ranges(&void1, &void2)) {
527 		T_SKIP("malloc_ranges not supported");
528 	}
529 
530 	T_LOG("Ranges are %#llx:%#llx %#llx:%#llx",
531 	    void1.min_address, void1.max_address,
532 	    void2.min_address, void2.max_address);
533 
534 #define reset() \
535 	nranges = 0
536 #define add_range(l, r) \
537 	array[nranges++] = (mach_vm_range_recipe_v1_t){ \
538 	    .range = { l, r }, .range_tag = MACH_VM_RANGE_FIXED, \
539 	}
540 #define create_ranges() \
541 	mach_vm_range_create(mach_task_self(), MACH_VM_RANGE_FLAVOR_V1, \
542 	    (mach_vm_range_recipes_raw_t)array, sizeof(array[0]) * nranges)
543 
544 	if (void1.min_address + MiB(128) > void1.max_address) {
545 		r = &void2;
546 	} else {
547 		r = &void1;
548 	}
549 
550 	reset();
551 	add_range(void1.min_address - MiB(10), void1.min_address);
552 	T_EXPECT_MACH_ERROR(create_ranges(), KERN_INVALID_ARGUMENT,
553 	    "should fail: range outside of voids");
554 
555 	reset();
556 	add_range(r->min_address + MiB(1), r->min_address + MiB(3));
557 	add_range(r->min_address, r->min_address + MiB(2));
558 	T_EXPECT_MACH_ERROR(create_ranges(), KERN_INVALID_ARGUMENT,
559 	    "should fail: overlapping ranges");
560 
561 	reset();
562 	add_range(r->min_address, r->min_address + MiB(1));
563 	add_range(r->min_address + MiB(2), r->min_address + MiB(3));
564 	T_EXPECT_MACH_SUCCESS(create_ranges(), "should succeed");
565 
566 	reset();
567 	add_range(r->min_address, r->min_address + MiB(1));
568 	add_range(r->min_address + MiB(2), r->min_address + MiB(3));
569 	T_EXPECT_MACH_ERROR(create_ranges(), KERN_MEMORY_PRESENT,
570 	    "should fail: already allocated");
571 
572 	reset();
573 	add_range(r->min_address + MiB(4), r->min_address + MiB(5));
574 	add_range(r->min_address + MiB(6), r->min_address + MiB(7));
575 	T_EXPECT_MACH_SUCCESS(create_ranges(), "should succeed");
576 
577 	__block vm_offset_t offs = 0;
578 
579 	void (^check_works)(void) = ^{
580 		mach_vm_address_t addr;
581 		kern_return_t kr;
582 
583 		offs += PAGE_SIZE;
584 		addr  = r->min_address + offs;
585 		assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
586 
587 		addr  = r->min_address + MiB(2) + offs;
588 		assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
589 
590 		addr  = r->min_address + MiB(1);
591 		kr = mach_vm_allocate(mach_task_self(), &addr, ALLOCATION_SIZE,
592 		    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
593 		T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "should fail");
594 	};
595 
596 	check_works();
597 	fork_child_test(check_works);
598 
599 #undef create_ranges
600 #undef add_range
601 #undef reset
602 }
603