1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3
4 #include <sys/types.h>
5 #include <sys/sysctl.h>
6 #include <mach/mach.h>
7 #include <mach/mach_vm.h>
8 #include <mach/vm_types.h>
9 #include <sys/mman.h>
10 #include <unistd.h>
11 #include <TargetConditionals.h>
12
13 typedef void (*child_test)(void);
14
15 enum {
16 DEFAULT = 0,
17 HEAP
18 };
19
20 static vm_size_t _allocation_size = 0;
21 static char _filepath[MAXPATHLEN];
22 static struct mach_vm_range parent_default;
23 static struct mach_vm_range parent_heap;
24
25 #define CHILD_PROCESS_COUNT (20)
26 #define MAX_VM_ADDRESS (0xFC0000000ULL)
27
28 /*
29 * Choose an arbitrary memory tag which applies to each of default/heap range
30 * for testing placement of allocations.
31 */
32 #define VM_MEMORY_RANGE_DEFAULT (VM_MAKE_TAG(VM_MEMORY_STACK))
33 #define VM_MEMORY_RANGE_HEAP (VM_MAKE_TAG(VM_MEMORY_MALLOC))
34
35 #define RANGE_DEFAULT_FLAGS (VM_FLAGS_ANYWHERE | VM_MEMORY_RANGE_DEFAULT)
36 #define RANGE_HEAP_FLAGS (VM_FLAGS_ANYWHERE | VM_MEMORY_RANGE_HEAP)
37
38 #define TARGET_OS_OTHER 0
39
40 T_GLOBAL_META(
41 T_META_NAMESPACE("xnu.vm"),
42 T_META_RADAR_COMPONENT_NAME("xnu"),
43 T_META_RADAR_COMPONENT_VERSION("VM"),
44 T_META_ENABLED(TARGET_OS_IOS || TARGET_OS_OTHER),
45 T_META_OWNER("mmorran")
46 );
47
48 static void
set_allocation_size(size_t sz)49 set_allocation_size(size_t sz)
50 {
51 _allocation_size = sz;
52 }
53
54 static bool
ranges_enabled(void)55 ranges_enabled(void)
56 {
57 struct mach_vm_range range;
58 size_t range_sz = sizeof(range);
59
60 bzero(&range, sizeof(range));
61
62 /*
63 * We will fail with ENOENT or EINVAL if ranges are either not supported
64 * or not enabled on our process.
65 */
66 return sysctlbyname("vm.vm_map_user_range_default",
67 &range, &range_sz, NULL, 0) == 0;
68 }
69
70 static void
71 do_test(void (^test)(void))
72 {
73 if (!ranges_enabled()) {
74 T_SKIP("VM map ranges not enabled");
75 }
76
77 test();
78 }
79
80 static struct mach_vm_range
get_range(int target_range)81 get_range(int target_range)
82 {
83 int ret = EINVAL;
84 struct mach_vm_range range;
85 size_t range_sz = sizeof(range);
86
87 bzero(&range, sizeof(range));
88
89 switch (target_range) {
90 case DEFAULT:
91 ret = sysctlbyname("vm.vm_map_user_range_default", &range, &range_sz, NULL, 0);
92 T_QUIET;
93 T_ASSERT_POSIX_SUCCESS(ret, "successfully retrieved user default range");
94 break;
95
96 case HEAP:
97 ret = sysctlbyname("vm.vm_map_user_range_heap", &range, &range_sz, NULL, 0);
98 T_QUIET;
99 T_ASSERT_POSIX_SUCCESS(ret, "successfully retrieved user heap range");
100 break;
101
102 default:
103 /* Fall through with EINVAL */
104 break;
105 }
106
107 return range;
108 }
109
110 static mach_vm_address_t
assert_allocate(mach_vm_address_t dst,int vm_flags)111 assert_allocate(mach_vm_address_t dst, int vm_flags)
112 {
113 int ret = mach_vm_allocate(mach_task_self(), &dst, _allocation_size, vm_flags);
114 T_ASSERT_MACH_SUCCESS(ret, "vm_allocate");
115 return dst;
116 }
117
118 static void
assert_in_range(struct mach_vm_range range,mach_vm_offset_t addr)119 assert_in_range(struct mach_vm_range range, mach_vm_offset_t addr)
120 {
121 T_LOG("checking if %llx <= %llx <= %llx", range.min_address, addr,
122 range.max_address);
123 T_EXPECT_GE(addr, range.min_address, "allocation above heap min address");
124 T_EXPECT_LE(addr, range.max_address, "allocation below heap max address");
125 }
126
127 static void
assert_in_heap_range(mach_vm_offset_t addr)128 assert_in_heap_range(mach_vm_offset_t addr)
129 {
130 struct mach_vm_range range = get_range(HEAP);
131
132 assert_in_range(range, addr);
133 }
134
135 static void *
assert_mmap(void * addr,int fd,int flags)136 assert_mmap(void *addr, int fd, int flags)
137 {
138 void *ret = mmap(addr, _allocation_size, VM_PROT_READ | VM_PROT_WRITE,
139 flags, fd, 0);
140 T_EXPECT_NE(ret, MAP_FAILED, "mmap should not have MAP_FAILED");
141 T_EXPECT_NE(ret, NULL, "mmap should have returned a valid pointer");
142 return ret;
143 }
144
145 static void
assert_allocate_eq(mach_vm_address_t dst,int vm_flags)146 assert_allocate_eq(mach_vm_address_t dst, int vm_flags)
147 {
148 mach_vm_address_t target = dst;
149
150 T_ASSERT_MACH_SUCCESS(mach_vm_allocate(mach_task_self(), &target,
151 _allocation_size, vm_flags), "vm_allocate");
152
153 T_EXPECT_EQ(target, dst, "target/dst differ");
154 }
155
156 static mach_vm_address_t
assert_allocate_in_range(int target_range,mach_vm_address_t dst,int vm_flags)157 assert_allocate_in_range(int target_range, mach_vm_address_t dst, int vm_flags)
158 {
159 struct mach_vm_range range = get_range(target_range);
160 dst = assert_allocate(dst, vm_flags);
161
162 assert_in_range(range, (mach_vm_offset_t)dst);
163
164 return dst;
165 }
166
167 static void *
assert_mmap_in_range(void * addr,int target_range,int fd,int flags)168 assert_mmap_in_range(void *addr, int target_range, int fd, int flags)
169 {
170 struct mach_vm_range range = get_range(target_range);
171 void *dst = assert_mmap(addr, fd, flags);
172
173 assert_in_range(range, (mach_vm_offset_t)dst);
174
175 return dst;
176 }
177
178 static void
fork_child_test(child_test func)179 fork_child_test(child_test func)
180 {
181 pid_t child_pid;
182 int err;
183
184 child_pid = fork();
185
186 if (child_pid == 0) {
187 /* child process */
188 func();
189 exit(0);
190 } else {
191 T_QUIET; T_ASSERT_POSIX_SUCCESS(child_pid, "fork process");
192
193 /* wait for child process to exit */
194 if (dt_waitpid(child_pid, &err, NULL, 30) == false) {
195 T_FAIL("dt_waitpid() failed on child pid %d", child_pid);
196 }
197 }
198 }
199
200 static void
cleanup_file(void)201 cleanup_file(void)
202 {
203 unlink(_filepath);
204 bzero(_filepath, MAXPATHLEN);
205 }
206
207 T_DECL(range_allocate_heap,
208 "ensure malloc tagged memory is allocated within the heap range")
209 {
210 do_test(^{
211 set_allocation_size(PAGE_SIZE);
212 assert_allocate_in_range(HEAP, 0, RANGE_HEAP_FLAGS);
213 });
214 }
215
216 T_DECL(range_allocate_anywhere,
217 "ensure allocation is within target range when hint is outwith range")
218 {
219 do_test(^{
220 struct mach_vm_range range = get_range(HEAP);
221
222 set_allocation_size(PAGE_SIZE);
223 assert_allocate_in_range(HEAP, range.min_address - _allocation_size, RANGE_HEAP_FLAGS);
224 });
225 }
226
227 T_DECL(range_allocate_stack,
228 "ensure a stack allocation is in the default range")
229 {
230 do_test(^{
231 set_allocation_size(PAGE_SIZE);
232 assert_allocate_in_range(DEFAULT, 0, RANGE_DEFAULT_FLAGS);
233 });
234 }
235
236 static void
ensure_fixed_mappings_succeed_cross(int heap)237 ensure_fixed_mappings_succeed_cross(int heap)
238 {
239 vm_map_address_t addr;
240
241 addr = assert_allocate(0, VM_FLAGS_ANYWHERE | heap);
242 vm_deallocate(mach_task_self(), addr, _allocation_size);
243
244 assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_MEMORY_RANGE_DEFAULT);
245 assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MEMORY_RANGE_DEFAULT);
246 vm_deallocate(mach_task_self(), addr, _allocation_size);
247
248 assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_MEMORY_RANGE_HEAP);
249 assert_allocate_eq(addr, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MEMORY_RANGE_HEAP);
250 vm_deallocate(mach_task_self(), addr, _allocation_size);
251 }
252
253 static void
ensure_rogue_fixed_fails(void)254 ensure_rogue_fixed_fails(void)
255 {
256 struct mach_vm_range def = get_range(DEFAULT);
257 struct mach_vm_range heap = get_range(HEAP);
258 mach_vm_address_t addr;
259 kern_return_t kr;
260
261 if (def.max_address + 3 * _allocation_size <= heap.min_address) {
262 addr = heap.min_address - 2 * _allocation_size;
263 } else {
264 /*
265 * in the unlikely event when there's no space
266 * between default and heap, then there must be
267 * a hole after heap.
268 */
269 addr = heap.max_address + _allocation_size;
270 }
271
272 kr = mach_vm_allocate(mach_task_self(), &addr,
273 _allocation_size, VM_FLAGS_FIXED);
274 T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "should fail");
275 }
276
277 static void
ensure_fixed_mapping(void)278 ensure_fixed_mapping(void)
279 {
280 set_allocation_size(PAGE_SIZE);
281
282 ensure_fixed_mappings_succeed_cross(VM_MEMORY_RANGE_DEFAULT);
283 ensure_fixed_mappings_succeed_cross(VM_MEMORY_RANGE_HEAP);
284
285 ensure_rogue_fixed_fails();
286 }
287
288 T_DECL(range_allocate_fixed, "ensure fixed target is honored (even with an incorrect tag)")
289 {
290 do_test(^{
291 ensure_fixed_mapping();
292 fork_child_test(ensure_fixed_mapping);
293 });
294 }
295
296 T_DECL(range_mmap_anon, "ensure anon mapping within HEAP range")
297 {
298 do_test(^{
299 set_allocation_size(PAGE_SIZE);
300
301 assert_mmap_in_range(NULL, HEAP, -1, MAP_ANON | MAP_PRIVATE);
302 });
303 }
304
305 T_DECL(range_mmap_file, "ensure file is mapped within HEAP range")
306 {
307 do_test(^{
308 int fd = -1;
309
310 set_allocation_size(PAGE_SIZE);
311
312 /* prepare temp file */
313 strncpy(_filepath, "/tmp/mapfile.XXXXXX", MAXPATHLEN);
314 T_ASSERT_POSIX_SUCCESS(fd = mkstemp(_filepath), NULL);
315 atexit(cleanup_file);
316
317 T_ASSERT_POSIX_SUCCESS(ftruncate(fd, (off_t)_allocation_size), NULL);
318
319 /* map it in to the heap rage */
320 T_LOG("mapping file in HEAP range");
321 assert_mmap_in_range(NULL, HEAP, fd, MAP_FILE | MAP_SHARED);
322 });
323 }
324
325
326 T_DECL(range_mmap_alias_tag, "ensure anon mapping with tag is honored")
327 {
328 do_test(^{
329 set_allocation_size(PAGE_SIZE);
330 assert_mmap_in_range(NULL, DEFAULT, VM_MEMORY_RANGE_DEFAULT, MAP_ANON | MAP_PRIVATE);
331 });
332 }
333
334 T_DECL(range_mmap_with_low_hint,
335 "ensure allocation is within target range when hint is below range")
336 {
337 do_test(^{
338 struct mach_vm_range range = get_range(HEAP);
339 mach_vm_address_t target = range.min_address - _allocation_size;
340
341 set_allocation_size(PAGE_SIZE);
342 assert_mmap_in_range((void *)target, HEAP, -1, MAP_ANON | MAP_PRIVATE);
343 });
344 }
345
346 T_DECL(range_mmap_with_high_hint,
347 "ensure allocation is within target range when hint is within range")
348 {
349 do_test(^{
350 struct mach_vm_range range = get_range(HEAP);
351 mach_vm_address_t target = range.min_address + 0x100000000;
352
353 set_allocation_size(PAGE_SIZE);
354 void *dst = assert_mmap_in_range((void *)target, HEAP, -1, MAP_ANON | MAP_PRIVATE);
355
356 T_EXPECT_EQ((mach_vm_address_t)dst, target, "unexpected allocation address");
357 });
358 }
359
360 T_DECL(range_mmap_with_bad_hint,
361 "ensure allocation fails when hint is above range")
362 {
363 do_test(^{
364 struct mach_vm_range range = get_range(HEAP);
365 mach_vm_address_t target = range.max_address + 0x100000000;
366
367 set_allocation_size(PAGE_SIZE);
368
369 /* mmap should retry with 0 base on initial KERN_NO_SPACE failure */
370 assert_mmap_in_range((void *)target, HEAP, -1, MAP_ANON | MAP_PRIVATE);
371 });
372 }
373
374 T_DECL(range_mach_vm_map_with_bad_hint,
375 "ensure mach_vm_map fails when hint is above range")
376 {
377 do_test(^{
378 struct mach_vm_range range = get_range(HEAP);
379 mach_vm_address_t addr = range.max_address + 0x100000000;
380
381 set_allocation_size(PAGE_SIZE);
382
383 /*
384 * unlike mmap & vm_allocate, mach_vm_map should fail when given a hint
385 * out with the target range.
386 */
387 int ret = mach_vm_map(mach_task_self(), &addr, _allocation_size,
388 (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL,
389 (memory_object_offset_t)0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
390 VM_INHERIT_DEFAULT);
391 T_QUIET; T_EXPECT_EQ(ret, KERN_NO_SPACE, "expected KERN_NO_SPACE");
392 });
393 }
394
395 T_DECL(range_mach_vm_remap_default,
396 "ensure mach_vm_remap is successful in default range")
397 {
398 do_test(^{
399 vm_prot_t curprot;
400 vm_prot_t maxprot;
401
402 set_allocation_size(PAGE_SIZE);
403
404 mach_vm_address_t addr = assert_allocate_in_range(DEFAULT, 0, RANGE_DEFAULT_FLAGS);
405 mach_vm_address_t target = addr + _allocation_size;
406
407 int ret = mach_vm_remap(mach_task_self(), &target, _allocation_size,
408 (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, mach_task_self(),
409 addr, FALSE, &curprot, &maxprot, VM_INHERIT_NONE);
410 T_QUIET; T_EXPECT_EQ(ret, KERN_SUCCESS, "expected KERN_SUCCESS");
411 });
412 }
413
414 T_DECL(range_mach_vm_remap_heap_with_hint,
415 "ensure mach_vm_remap is successful in heap range")
416 {
417 do_test(^{
418 vm_prot_t curprot;
419 vm_prot_t maxprot;
420
421 set_allocation_size(PAGE_SIZE);
422
423 mach_vm_address_t addr = assert_allocate_in_range(HEAP, 0, RANGE_HEAP_FLAGS);
424 mach_vm_address_t target = addr + _allocation_size;
425
426 int ret = mach_vm_remap(mach_task_self(), &target, _allocation_size,
427 (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, mach_task_self(),
428 addr, FALSE, &curprot, &maxprot, VM_INHERIT_NONE);
429 T_QUIET; T_EXPECT_EQ(ret, KERN_SUCCESS, "expected KERN_SUCCESS");
430 assert_in_heap_range(target);
431 });
432 }
433
434 T_DECL(range_mach_vm_remap_heap,
435 "ensure mach_vm_remap remains in same range")
436 {
437 do_test(^{
438 vm_prot_t curprot;
439 vm_prot_t maxprot;
440
441 set_allocation_size(PAGE_SIZE);
442
443 mach_vm_address_t addr = assert_allocate_in_range(HEAP, 0, RANGE_HEAP_FLAGS);
444 mach_vm_address_t target = 0;
445
446 int ret = mach_vm_remap(mach_task_self(), &target, _allocation_size,
447 (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, mach_task_self(),
448 addr, FALSE, &curprot, &maxprot, VM_INHERIT_NONE);
449 T_EXPECT_EQ(ret, KERN_SUCCESS, "expected KERN_SUCCESS");
450 assert_in_heap_range(target);
451 });
452 }
453
454 static void
ensure_range(void)455 ensure_range(void)
456 {
457 struct mach_vm_range def = get_range(DEFAULT);
458 struct mach_vm_range heap = get_range(HEAP);
459
460 T_EXPECT_GT(heap.min_address, def.max_address,
461 "ranges should not overlap");
462 T_EXPECT_LE(heap.max_address, MAX_VM_ADDRESS,
463 "expected max <= %llx", MAX_VM_ADDRESS);
464 T_EXPECT_EQ(heap.min_address,
465 heap.min_address & (unsigned long)~0x1FFFFF,
466 "expected alignment on 2MB TT boundary");
467 }
468
469 static void
ensure_child_range(void)470 ensure_child_range(void)
471 {
472 struct mach_vm_range def = get_range(DEFAULT);
473 struct mach_vm_range heap = get_range(HEAP);
474
475 T_QUIET; T_EXPECT_EQ(def.min_address, parent_default.min_address,
476 "expected forked default min to be equal");
477 T_QUIET; T_EXPECT_EQ(def.max_address, parent_default.max_address,
478 "expected forked default max to be equal");
479 T_QUIET; T_EXPECT_EQ(heap.min_address, parent_heap.min_address,
480 "expected forked heap min to be equal");
481 T_QUIET; T_EXPECT_EQ(heap.max_address, parent_heap.max_address,
482 "expected forked heap max to be equal");
483 }
484
485 T_DECL(range_ensure_bounds, "ensure ranges respect map bounds")
486 {
487 do_test(^{
488 parent_default = get_range(DEFAULT);
489 parent_heap = get_range(HEAP);
490
491 ensure_range();
492
493 for (uint32_t i = 0; i < CHILD_PROCESS_COUNT; i++) {
494 fork_child_test(ensure_child_range);
495 }
496 });
497 }
498