1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3
4 #include <sys/types.h>
5 #include <sys/sysctl.h>
6 #include <mach/mach.h>
7 #include <mach/mach_vm.h>
8 #include <mach/task_info.h>
9 #include <mach/vm_param.h>
10 #include <mach/vm_types.h>
11 #include <sys/mman.h>
12 #include <unistd.h>
13 #include <TargetConditionals.h>
14
15 enum {
16 DEFAULT = 0,
17 HEAP,
18 LARGE_FILE
19 };
20
21 static char _filepath[MAXPATHLEN];
22 static struct mach_vm_range parent_default;
23 static struct mach_vm_range parent_heap;
24
25 #define CHILD_PROCESS_COUNT (20)
26 #undef KiB
27 #undef MiB
28 #undef GiB
29 #define KiB(x) ((uint64_t)(x) << 10)
30 #define MiB(x) ((uint64_t)(x) << 20)
31 #define GiB(x) ((uint64_t)(x) << 30)
32
33 #define ALLOCATION_SIZE (PAGE_SIZE)
34 #define LARGE_ALLOCATION_SIZE (GiB(1))
35 #define PER_ALLOC_AMT_GB (GiB(256))
36 #define N_ALLOC 5
37
38
39 /*
40 * Choose an arbitrary memory tag which applies to each of default/heap range
41 * for testing placement of allocations.
42 */
43 #define VM_MEMORY_RANGE_DEFAULT (VM_MAKE_TAG(VM_MEMORY_STACK))
44 #define VM_MEMORY_RANGE_HEAP (VM_MAKE_TAG(VM_MEMORY_MALLOC_SMALL))
45
46 #define RANGE_DEFAULT_FLAGS (VM_FLAGS_ANYWHERE | VM_MEMORY_RANGE_DEFAULT)
47 #define RANGE_HEAP_FLAGS (VM_FLAGS_ANYWHERE | VM_MEMORY_RANGE_HEAP)
48
49 T_GLOBAL_META(
50 T_META_NAMESPACE("xnu.vm"),
51 T_META_RADAR_COMPONENT_NAME("xnu"),
52 T_META_RADAR_COMPONENT_VERSION("VM"),
53 T_META_ENABLED(!TARGET_OS_OSX),
54 T_META_OWNER("mmorran")
55 );
56
57 static bool
ranges_enabled(void)58 ranges_enabled(void)
59 {
60 struct mach_vm_range range;
61 size_t range_sz = sizeof(range);
62
63 bzero(&range, sizeof(range));
64
65 /*
66 * We will fail with ENOENT or EINVAL if ranges are either not supported
67 * or not enabled on our process.
68 */
69 return sysctlbyname("vm.vm_map_user_range_default",
70 &range, &range_sz, NULL, 0) == 0;
71 }
72
73 #define CHECK_RANGES_ENABLED() \
74 if (!ranges_enabled()) { \
75 T_SKIP("VM map ranges not enabled"); \
76 }
77
78 static struct mach_vm_range
get_range(int target_range)79 get_range(int target_range)
80 {
81 int ret = EINVAL;
82 struct mach_vm_range range;
83 size_t range_sz = sizeof(range);
84
85 bzero(&range, sizeof(range));
86
87 switch (target_range) {
88 case DEFAULT:
89 ret = sysctlbyname("vm.vm_map_user_range_default", &range, &range_sz, NULL, 0);
90 T_QUIET;
91 T_ASSERT_POSIX_SUCCESS(ret, "successfully retrieved user default range");
92 break;
93
94 case HEAP:
95 ret = sysctlbyname("vm.vm_map_user_range_heap", &range, &range_sz, NULL, 0);
96 T_QUIET;
97 T_ASSERT_POSIX_SUCCESS(ret, "successfully retrieved user heap range");
98 break;
99
100 case LARGE_FILE:
101 ret = sysctlbyname("vm.vm_map_user_range_large_file", &range, &range_sz, NULL, 0);
102 T_QUIET;
103 T_ASSERT_POSIX_SUCCESS(ret, "successfully retrieved user large file range");
104 break;
105
106 default:
107 /* Fall through with EINVAL */
108 break;
109 }
110
111 return range;
112 }
113
114 static task_vm_info_data_t
get_vm_task_info(void)115 get_vm_task_info(void)
116 {
117 task_vm_info_data_t ti;
118
119 mach_msg_type_number_t count = TASK_VM_INFO_COUNT;
120 kern_return_t const kr = task_info(mach_task_self(),
121 TASK_VM_INFO,
122 (task_info_t) &ti,
123 &count);
124 T_QUIET;
125 T_ASSERT_MACH_SUCCESS(kr, "get task_info()");
126 return ti;
127 }
128
129 static mach_vm_address_t
assert_allocate(mach_vm_address_t dst,int vm_flags)130 assert_allocate(mach_vm_address_t dst, int vm_flags)
131 {
132 int ret = mach_vm_allocate(mach_task_self(), &dst, ALLOCATION_SIZE, vm_flags);
133 T_ASSERT_MACH_SUCCESS(ret, "vm_allocate");
134 return dst;
135 }
136
137 static void
assert_in_range(struct mach_vm_range range,mach_vm_offset_t addr)138 assert_in_range(struct mach_vm_range range, mach_vm_offset_t addr)
139 {
140 T_LOG("checking if %llx <= %llx <= %llx", range.min_address, addr,
141 range.max_address);
142 T_EXPECT_GE(addr, range.min_address, "allocation above min address");
143 T_EXPECT_LE(addr, range.max_address, "allocation below max address");
144 }
145
146 static void
assert_in_heap_range(mach_vm_offset_t addr)147 assert_in_heap_range(mach_vm_offset_t addr)
148 {
149 struct mach_vm_range range = get_range(HEAP);
150
151 assert_in_range(range, addr);
152 }
153
154 static void *
assert_mmap(void * addr,size_t sz,int fd,int flags)155 assert_mmap(void *addr, size_t sz, int fd, int flags)
156 {
157 void *ret = mmap(addr, sz, VM_PROT_READ | VM_PROT_WRITE,
158 flags, fd, 0);
159 T_EXPECT_NE(ret, MAP_FAILED, "mmap should not have MAP_FAILED");
160 T_EXPECT_NE(ret, NULL, "mmap should have returned a valid pointer");
161 return ret;
162 }
163
164 static void
assert_allocate_eq(mach_vm_address_t dst,int vm_flags)165 assert_allocate_eq(mach_vm_address_t dst, int vm_flags)
166 {
167 mach_vm_address_t target = dst;
168
169 T_ASSERT_MACH_SUCCESS(mach_vm_allocate(mach_task_self(), &target,
170 ALLOCATION_SIZE, vm_flags), "vm_allocate");
171
172 T_EXPECT_EQ(target, dst, "target/dst differ");
173 }
174
175 static mach_vm_address_t
assert_allocate_in_range(int target_range,mach_vm_address_t dst,int vm_flags)176 assert_allocate_in_range(int target_range, mach_vm_address_t dst, int vm_flags)
177 {
178 struct mach_vm_range range = get_range(target_range);
179 dst = assert_allocate(dst, vm_flags);
180
181 assert_in_range(range, (mach_vm_offset_t)dst);
182
183 return dst;
184 }
185
186 static void *
assert_mmap_in_range(void * addr,int target_range,size_t sz,int fd,int flags)187 assert_mmap_in_range(void *addr, int target_range, size_t sz, int fd, int flags)
188 {
189 struct mach_vm_range range = get_range(target_range);
190 void *dst = assert_mmap(addr, sz, fd, flags);
191
192 assert_in_range(range, (mach_vm_offset_t)dst);
193
194 return dst;
195 }
196
197 static void
ensure_mmap_fails(void * addr,size_t sz,int flags,int fd)198 ensure_mmap_fails(void *addr, size_t sz, int flags, int fd)
199 {
200 void *ret = mmap(addr, sz, VM_PROT_READ | VM_PROT_WRITE, flags, fd, 0);
201 T_QUIET;
202 T_EXPECT_EQ_PTR(ret, MAP_FAILED, "mmap should fail");
203 }
204
205 __attribute__((overloadable))
206 static void
207 fork_child_test(void (^child_test)(void))
208 {
209 pid_t child_pid;
210 int err;
211
212 child_pid = fork();
213
214 if (child_pid == 0) {
215 /* child process */
216 T_LOG("in child");
217 child_test();
218 exit(0);
219 } else {
220 T_QUIET; T_ASSERT_POSIX_SUCCESS(child_pid, "fork process");
221
222 /* wait for child process to exit */
223 if (dt_waitpid(child_pid, &err, NULL, 30) == false) {
224 T_FAIL("dt_waitpid() failed on child pid %d", child_pid);
225 }
226 }
227 }
228
229 __attribute__((overloadable))
230 static void
fork_child_test(void (* child_test)(void))231 fork_child_test(void (*child_test)(void))
232 {
233 fork_child_test(^{
234 child_test();
235 });
236 }
237
238 static void
cleanup_file(void)239 cleanup_file(void)
240 {
241 unlink(_filepath);
242 bzero(_filepath, MAXPATHLEN);
243 }
244
245 T_DECL(range_allocate_heap,
246 "ensure malloc tagged memory is allocated within the heap range", T_META_TAG_VM_PREFERRED)
247 {
248 CHECK_RANGES_ENABLED();
249
250 assert_allocate_in_range(HEAP, 0, RANGE_HEAP_FLAGS);
251 }
252
253 T_DECL(range_allocate_anywhere,
254 "ensure allocation is within target range when hint is outwith range", T_META_TAG_VM_PREFERRED)
255 {
256 CHECK_RANGES_ENABLED();
257
258 struct mach_vm_range range = get_range(HEAP);
259
260 assert_allocate_in_range(HEAP, range.min_address - ALLOCATION_SIZE, RANGE_HEAP_FLAGS);
261 }
262
263 T_DECL(range_allocate_stack,
264 "ensure a stack allocation is in the default range", T_META_TAG_VM_PREFERRED)
265 {
266 CHECK_RANGES_ENABLED();
267
268 assert_allocate_in_range(DEFAULT, 0, RANGE_DEFAULT_FLAGS);
269 }
270
271 static void
ensure_fixed_mappings_succeed_cross(int heap)272 ensure_fixed_mappings_succeed_cross(int heap)
273 {
274 vm_map_address_t addr;
275
276 addr = assert_allocate(0, VM_FLAGS_ANYWHERE | heap);
277 vm_deallocate(mach_task_self(), addr, ALLOCATION_SIZE);
278
279 assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_MEMORY_RANGE_DEFAULT);
280 assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MEMORY_RANGE_DEFAULT);
281 vm_deallocate(mach_task_self(), addr, ALLOCATION_SIZE);
282
283 assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_MEMORY_RANGE_HEAP);
284 assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MEMORY_RANGE_HEAP);
285 vm_deallocate(mach_task_self(), addr, ALLOCATION_SIZE);
286 }
287
288 static void
ensure_rogue_fixed_fails(void)289 ensure_rogue_fixed_fails(void)
290 {
291 struct mach_vm_range def = get_range(DEFAULT);
292 struct mach_vm_range heap = get_range(HEAP);
293 mach_vm_address_t addr;
294 kern_return_t kr;
295
296 if (def.max_address + 3 * ALLOCATION_SIZE <= heap.min_address) {
297 addr = heap.min_address - 2 * ALLOCATION_SIZE;
298 } else {
299 /*
300 * in the unlikely event when there's no space
301 * between default and heap, then there must be
302 * a hole after heap.
303 */
304 addr = heap.max_address + ALLOCATION_SIZE;
305 }
306
307 kr = mach_vm_allocate(mach_task_self(), &addr,
308 ALLOCATION_SIZE, VM_FLAGS_FIXED);
309 T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "should fail");
310 }
311
312 static void
ensure_fixed_mapping(void)313 ensure_fixed_mapping(void)
314 {
315 ensure_fixed_mappings_succeed_cross(VM_MEMORY_RANGE_DEFAULT);
316 ensure_fixed_mappings_succeed_cross(VM_MEMORY_RANGE_HEAP);
317
318 ensure_rogue_fixed_fails();
319 }
320
321 T_DECL(range_allocate_fixed, "ensure fixed target is honored (even with an incorrect tag)", T_META_TAG_VM_PREFERRED)
322 {
323 CHECK_RANGES_ENABLED();
324
325 ensure_fixed_mapping();
326 fork_child_test(ensure_fixed_mapping);
327 }
328
329 T_DECL(range_mmap_anon, "ensure anon mapping within HEAP range", T_META_TAG_VM_PREFERRED)
330 {
331 CHECK_RANGES_ENABLED();
332
333 assert_mmap_in_range(NULL, HEAP, ALLOCATION_SIZE, -1, MAP_ANON | MAP_PRIVATE);
334 }
335
336 T_DECL(range_mmap_file, "ensure file is mapped within HEAP range", T_META_TAG_VM_PREFERRED)
337 {
338 CHECK_RANGES_ENABLED();
339
340 int fd = -1;
341
342 /* prepare temp file */
343 strncpy(_filepath, "/tmp/mapfile.XXXXXX", MAXPATHLEN);
344 T_ASSERT_POSIX_SUCCESS(fd = mkstemp(_filepath), NULL);
345 atexit(cleanup_file);
346
347 T_ASSERT_POSIX_SUCCESS(ftruncate(fd, (off_t)ALLOCATION_SIZE), NULL);
348
349 /* map it in to the heap rage */
350 #if TARGET_OS_OSX
351 T_LOG("mapping file in DEFAULT range");
352 assert_mmap_in_range(NULL, DEFAULT, ALLOCATION_SIZE, fd, MAP_FILE | MAP_SHARED);
353 #else
354 T_LOG("mapping file in HEAP range");
355 assert_mmap_in_range(NULL, HEAP, ALLOCATION_SIZE, fd, MAP_FILE | MAP_SHARED);
356 #endif
357 }
358
359
360 T_DECL(range_mmap_alias_tag, "ensure anon mapping with tag is honored", T_META_TAG_VM_PREFERRED)
361 {
362 CHECK_RANGES_ENABLED();
363
364 assert_mmap_in_range(NULL, DEFAULT, ALLOCATION_SIZE, VM_MEMORY_RANGE_DEFAULT, MAP_ANON | MAP_PRIVATE);
365 }
366
367 T_DECL(range_mmap_with_low_hint,
368 "ensure allocation is within target range when hint is below range", T_META_TAG_VM_PREFERRED)
369 {
370 CHECK_RANGES_ENABLED();
371
372 struct mach_vm_range range = get_range(HEAP);
373 mach_vm_address_t target = range.min_address - ALLOCATION_SIZE;
374
375 assert_mmap_in_range((void *)target, HEAP, ALLOCATION_SIZE, -1, MAP_ANON | MAP_PRIVATE);
376 }
377
378 T_DECL(range_mmap_with_high_hint,
379 "ensure allocation is within target range when hint is within range", T_META_TAG_VM_PREFERRED)
380 {
381 CHECK_RANGES_ENABLED();
382
383 struct mach_vm_range range = get_range(HEAP);
384 mach_vm_address_t target = range.max_address - 100 * ALLOCATION_SIZE;
385
386 void *dst = assert_mmap_in_range((void *)target, HEAP, ALLOCATION_SIZE, -1, MAP_ANON | MAP_PRIVATE);
387
388 T_EXPECT_EQ((mach_vm_address_t)dst, target, "unexpected allocation address");
389 }
390
391 T_DECL(range_mmap_with_bad_hint,
392 "ensure allocation fails when hint is above range", T_META_TAG_VM_PREFERRED)
393 {
394 CHECK_RANGES_ENABLED();
395
396 struct mach_vm_range range = get_range(HEAP);
397 mach_vm_address_t target = range.max_address + 0x100000000;
398
399 /* mmap should retry with 0 base on initial KERN_NO_SPACE failure */
400 assert_mmap_in_range((void *)target, HEAP, ALLOCATION_SIZE, -1, MAP_ANON | MAP_PRIVATE);
401 }
402
403 T_DECL(range_mach_vm_map_with_bad_hint,
404 "ensure mach_vm_map fails when hint is above range", T_META_TAG_VM_PREFERRED)
405 {
406 CHECK_RANGES_ENABLED();
407
408 struct mach_vm_range range = get_range(HEAP);
409 mach_vm_address_t addr = range.max_address + 0x100000000;
410
411 /*
412 * unlike mmap & vm_allocate, mach_vm_map should fail when given a hint
413 * out with the target range.
414 */
415 int ret = mach_vm_map(mach_task_self(), &addr, ALLOCATION_SIZE,
416 (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL,
417 (memory_object_offset_t)0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
418 VM_INHERIT_DEFAULT);
419 T_QUIET; T_EXPECT_EQ(ret, KERN_NO_SPACE, "expected KERN_NO_SPACE");
420 }
421
422 T_DECL(range_mach_vm_remap_default,
423 "ensure mach_vm_remap is successful in default range", T_META_TAG_VM_PREFERRED)
424 {
425 CHECK_RANGES_ENABLED();
426
427 vm_prot_t curprot;
428 vm_prot_t maxprot;
429
430 mach_vm_address_t addr = assert_allocate_in_range(DEFAULT, 0, RANGE_DEFAULT_FLAGS);
431 mach_vm_address_t target = addr + ALLOCATION_SIZE;
432
433 int ret = mach_vm_remap(mach_task_self(), &target, ALLOCATION_SIZE,
434 (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, mach_task_self(),
435 addr, FALSE, &curprot, &maxprot, VM_INHERIT_NONE);
436 T_QUIET; T_EXPECT_EQ(ret, KERN_SUCCESS, "expected KERN_SUCCESS");
437 }
438
439 T_DECL(range_mach_vm_remap_heap_with_hint,
440 "ensure mach_vm_remap is successful in heap range", T_META_TAG_VM_PREFERRED)
441 {
442 CHECK_RANGES_ENABLED();
443
444 vm_prot_t curprot;
445 vm_prot_t maxprot;
446
447 mach_vm_address_t addr = assert_allocate_in_range(HEAP, 0, RANGE_HEAP_FLAGS);
448 mach_vm_address_t target = addr + ALLOCATION_SIZE;
449
450 int ret = mach_vm_remap(mach_task_self(), &target, ALLOCATION_SIZE,
451 (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, mach_task_self(),
452 addr, FALSE, &curprot, &maxprot, VM_INHERIT_NONE);
453 T_QUIET; T_EXPECT_EQ(ret, KERN_SUCCESS, "expected KERN_SUCCESS");
454 assert_in_heap_range(target);
455 }
456
457 T_DECL(range_mach_vm_remap_heap,
458 "ensure mach_vm_remap remains in same range", T_META_TAG_VM_PREFERRED)
459 {
460 CHECK_RANGES_ENABLED();
461
462 vm_prot_t curprot;
463 vm_prot_t maxprot;
464
465 mach_vm_address_t addr = assert_allocate_in_range(HEAP, 0, RANGE_HEAP_FLAGS);
466 mach_vm_address_t target = 0;
467
468 int ret = mach_vm_remap(mach_task_self(), &target, ALLOCATION_SIZE,
469 (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, mach_task_self(),
470 addr, FALSE, &curprot, &maxprot, VM_INHERIT_NONE);
471 T_EXPECT_EQ(ret, KERN_SUCCESS, "expected KERN_SUCCESS");
472 assert_in_heap_range(target);
473 }
474
475 static void
ensure_range(void)476 ensure_range(void)
477 {
478 struct mach_vm_range def = get_range(DEFAULT);
479 struct mach_vm_range heap = get_range(HEAP);
480
481 T_EXPECT_GT(heap.min_address, def.max_address,
482 "ranges should not overlap");
483 T_EXPECT_LE(heap.max_address, MACH_VM_MAX_ADDRESS,
484 "expected max <= %llx", MACH_VM_MAX_ADDRESS);
485 T_EXPECT_EQ(heap.min_address,
486 heap.min_address & (unsigned long)~0x1FFFFF,
487 "expected alignment on 2MB TT boundary");
488 }
489
490 static void
ensure_child_range(void)491 ensure_child_range(void)
492 {
493 struct mach_vm_range def = get_range(DEFAULT);
494 struct mach_vm_range heap = get_range(HEAP);
495
496 T_QUIET; T_EXPECT_EQ(def.min_address, parent_default.min_address,
497 "expected forked default min to be equal");
498 T_QUIET; T_EXPECT_EQ(def.max_address, parent_default.max_address,
499 "expected forked default max to be equal");
500 T_QUIET; T_EXPECT_EQ(heap.min_address, parent_heap.min_address,
501 "expected forked heap min to be equal");
502 T_QUIET; T_EXPECT_EQ(heap.max_address, parent_heap.max_address,
503 "expected forked heap max to be equal");
504 }
505
506 T_DECL(range_ensure_bounds, "ensure ranges respect map bounds", T_META_TAG_VM_PREFERRED)
507 {
508 CHECK_RANGES_ENABLED();
509
510 parent_default = get_range(DEFAULT);
511 parent_heap = get_range(HEAP);
512
513 ensure_range();
514
515 for (uint32_t i = 0; i < CHILD_PROCESS_COUNT; i++) {
516 fork_child_test(ensure_child_range);
517 }
518 }
519
520 static bool
parse_void_ranges(struct mach_vm_range * void1,struct mach_vm_range * void2)521 parse_void_ranges(struct mach_vm_range *void1, struct mach_vm_range *void2)
522 {
523 char buf[256];
524 size_t bsz = sizeof(buf) - 1;
525 char *s;
526
527 if (sysctlbyname("vm.malloc_ranges", buf, &bsz, NULL, 0) == -1) {
528 if (errno == ENOENT) {
529 return false;
530 }
531 T_ASSERT_POSIX_SUCCESS(-1, "sysctlbyname(vm.malloc_ranges)");
532 }
533 buf[bsz] = '\0';
534
535 s = buf;
536
537 void1->min_address = strtoull(s, &s, 16);
538 T_QUIET; T_ASSERT_EQ(*s, ':', "should have a ':'");
539 s++;
540
541 void1->max_address = strtoull(s, &s, 16);
542 T_QUIET; T_ASSERT_EQ(*s, ' ', "should have a ' '");
543 s++;
544
545 void2->min_address = strtoull(s, &s, 16);
546 T_QUIET; T_ASSERT_EQ(*s, ':', "should have a ':'");
547 s++;
548
549 void2->max_address = strtoull(s, &s, 16);
550 T_QUIET; T_ASSERT_EQ(*s, '\0', "should be done");
551
552 return true;
553 }
554
555 T_DECL(create_range, "ensure create ranges kinda works", T_META_TAG_VM_PREFERRED)
556 {
557 struct mach_vm_range void1, void2, *r;
558
559 mach_vm_range_recipe_v1_t array[10];
560 uint32_t nranges = 0;
561
562 if (!parse_void_ranges(&void1, &void2)) {
563 T_SKIP("malloc_ranges not supported");
564 }
565
566 T_LOG("Ranges are %#llx:%#llx %#llx:%#llx",
567 void1.min_address, void1.max_address,
568 void2.min_address, void2.max_address);
569
570 #define reset() \
571 nranges = 0
572 #define add_range(l, r) \
573 array[nranges++] = (mach_vm_range_recipe_v1_t){ \
574 .range = { l, r }, .range_tag = MACH_VM_RANGE_FIXED, \
575 }
576 #define create_ranges() \
577 mach_vm_range_create(mach_task_self(), MACH_VM_RANGE_FLAVOR_V1, \
578 (mach_vm_range_recipes_raw_t)array, sizeof(array[0]) * nranges)
579
580 if (void1.min_address + MiB(128) > void1.max_address) {
581 r = &void2;
582 } else {
583 r = &void1;
584 }
585
586 reset();
587 add_range(void1.min_address - MiB(10), void1.min_address);
588 T_EXPECT_MACH_ERROR(create_ranges(), KERN_INVALID_ARGUMENT,
589 "should fail: range outside of voids");
590
591 reset();
592 add_range(r->min_address + MiB(1), r->min_address + MiB(3));
593 add_range(r->min_address, r->min_address + MiB(2));
594 T_EXPECT_MACH_ERROR(create_ranges(), KERN_INVALID_ARGUMENT,
595 "should fail: overlapping ranges");
596
597 reset();
598 add_range(r->min_address, r->min_address + MiB(1));
599 add_range(r->min_address + MiB(2), r->min_address + MiB(3));
600 T_EXPECT_MACH_SUCCESS(create_ranges(), "should succeed");
601
602 reset();
603 add_range(r->min_address, r->min_address + MiB(1));
604 add_range(r->min_address + MiB(2), r->min_address + MiB(3));
605 T_EXPECT_MACH_ERROR(create_ranges(), KERN_MEMORY_PRESENT,
606 "should fail: already allocated");
607
608 reset();
609 add_range(r->min_address + MiB(4), r->min_address + MiB(5));
610 add_range(r->min_address + MiB(6), r->min_address + MiB(7));
611 T_EXPECT_MACH_SUCCESS(create_ranges(), "should succeed");
612
613 __block vm_offset_t offs = 0;
614
615 void (^check_works)(void) = ^{
616 mach_vm_address_t addr;
617 kern_return_t kr;
618
619 offs += PAGE_SIZE;
620 addr = r->min_address + offs;
621 assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
622
623 addr = r->min_address + MiB(2) + offs;
624 assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
625
626 addr = r->min_address + MiB(1);
627 kr = mach_vm_allocate(mach_task_self(), &addr, ALLOCATION_SIZE,
628 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
629 T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "should fail");
630 };
631
632 check_works();
633 fork_child_test(check_works);
634
635 #undef create_ranges
636 #undef add_range
637 #undef reset
638 }
639
640 T_DECL(range_mmap_too_large, "ensure mmap fails when allocation is too large", T_META_TAG_VM_PREFERRED)
641 {
642 // Get VM map min_offset and max_offset
643 task_vm_info_data_t const ti = get_vm_task_info();
644 T_LOG("task_info range: 0x%llx-0x%llx, covering %llu bytes of memory",
645 ti.min_address, ti.max_address, ti.max_address - ti.min_address);
646
647 // Try to mmap more memory than the address space can handle
648 size_t const sz_too_large = ti.max_address - ti.min_address + 1;
649 T_LOG("Trying to allocate %zu bytes", sz_too_large);
650 ensure_mmap_fails(NULL, sz_too_large, MAP_ANON | MAP_PRIVATE, -1);
651 }
652
653 T_DECL(range_mmap_outside_map_range_fixed,
654 "ensure mmap fails when making a fixed allocation beyond VM map max address", T_META_TAG_VM_PREFERRED)
655 {
656 // Get VM map min_offset and max_offset
657 task_vm_info_data_t const ti = get_vm_task_info();
658 T_LOG("task_info range: 0x%llx-0x%llx", ti.min_address, ti.max_address);
659
660 // Try to allocate a page between VM map max_offset and MACH_VM_MAX_ADDRESS
661 mach_vm_address_t const target = ti.max_address + PAGE_SIZE;
662 T_LOG("Trying to allocate memory at 0x%llx", target);
663 ensure_mmap_fails((void *)target, ALLOCATION_SIZE, MAP_ANON | MAP_PRIVATE | MAP_FIXED, -1);
664 }
665
666 static bool
large_file_range_enabled(void)667 large_file_range_enabled(void)
668 {
669 struct mach_vm_range range;
670 size_t range_sz = sizeof(range);
671
672 bzero(&range, sizeof(range));
673
674 /*
675 * We will fail with ENOENT or EINVAL if ranges are either not supported
676 * or not enabled on our process.
677 */
678 int const ret = sysctlbyname("vm.vm_map_user_range_large_file",
679 &range, &range_sz, NULL, 0);
680 if (ret) {
681 T_LOG("vm.vm_map_user_range_large_file errno: %d", errno);
682 } else {
683 T_LOG("large file range: (%llx, %llx)",
684 range.min_address, range.max_address);
685 }
686 return ret == 0;
687 }
688
689 T_DECL(range_mmap_large_file,
690 "ensure large file is mapped within LARGE_FILE range", T_META_TAG_VM_PREFERRED)
691 {
692 if (!large_file_range_enabled()) {
693 T_SKIP("large file range not enabled");
694 }
695
696 void *ptrs[N_ALLOC];
697 uint32_t i;
698
699 int fd = -1;
700 /* prepare temp file */
701 char const * tmp_dir = dt_tmpdir();
702 snprintf(_filepath, MAXPATHLEN, "%s/maplargefile.XXXXXX", tmp_dir);
703 T_ASSERT_POSIX_SUCCESS(fd = mkstemp(_filepath), NULL);
704 atexit(cleanup_file);
705
706 T_LOG("Attempting to allocate VA space in %llu GB chunks.",
707 LARGE_ALLOCATION_SIZE);
708 for (i = 0; i < N_ALLOC; ++i) {
709 void *p = assert_mmap_in_range(NULL, LARGE_FILE, LARGE_ALLOCATION_SIZE,
710 fd, MAP_SHARED | MAP_FILE);
711 if (p == MAP_FAILED) {
712 if (errno != ENOMEM) {
713 T_WITH_ERRNO;
714 T_LOG("mmap failed: stopped at %u of %d/%llu GB chunks",
715 i + 1, N_ALLOC, LARGE_ALLOCATION_SIZE);
716 }
717 break;
718 } else {
719 T_LOG("allocation %u: %p", i + 1, p);
720 }
721
722 T_QUIET; T_ASSERT_NOTNULL(p, "mmap");
723 ptrs[i] = p;
724 }
725
726 T_EXPECT_GE_UINT(i, N_ALLOC, "Allocate at least %u/%d %llu-GB chunks of VA space",
727 i, N_ALLOC, (LARGE_ALLOCATION_SIZE / GiB(1)));
728
729 T_LOG("Unmapping memory");
730 for (uint32_t j = 0; j < i; ++j) {
731 int const res = munmap(ptrs[j], LARGE_ALLOCATION_SIZE);
732 T_QUIET; T_ASSERT_POSIX_SUCCESS(res, 0, "munmap");
733 }
734 }
735