1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3
4 #include <sys/types.h>
5 #include <sys/sysctl.h>
6 #include <mach/mach.h>
7 #include <mach/mach_vm.h>
8 #include <mach/vm_types.h>
9 #include <sys/mman.h>
10 #include <unistd.h>
11 #include <TargetConditionals.h>
12
13 typedef void (*child_test)(void);
14
15 enum {
16 DEFAULT = 0,
17 HEAP
18 };
19
20 static vm_size_t _allocation_size = 0;
21 static char _filepath[MAXPATHLEN];
22 static struct mach_vm_range parent_default;
23 static struct mach_vm_range parent_heap;
24
25 #define CHILD_PROCESS_COUNT (20)
26 #define MAX_VM_ADDRESS (0xFC0000000ULL)
27
28 /*
29 * Choose an arbitrary memory tag which applies to each of default/heap range
30 * for testing placement of allocations.
31 */
32 #define VM_MEMORY_RANGE_DEFAULT (VM_MAKE_TAG(VM_MEMORY_STACK))
33 #define VM_MEMORY_RANGE_HEAP (VM_MAKE_TAG(VM_MEMORY_MALLOC))
34
35 #define RANGE_DEFAULT_FLAGS (VM_FLAGS_ANYWHERE | VM_MEMORY_RANGE_DEFAULT)
36 #define RANGE_HEAP_FLAGS (VM_FLAGS_ANYWHERE | VM_MEMORY_RANGE_HEAP)
37
38 #define TARGET_OS_OTHER 0
39
40 T_GLOBAL_META(
41 T_META_NAMESPACE("xnu.vm"),
42 T_META_RADAR_COMPONENT_NAME("xnu"),
43 T_META_RADAR_COMPONENT_VERSION("VM"),
44 T_META_ENABLED(TARGET_OS_IOS || TARGET_OS_OTHER),
45 T_META_OWNER("mmorran")
46 );
47
48 static void
set_allocation_size(size_t sz)49 set_allocation_size(size_t sz)
50 {
51 _allocation_size = sz;
52 }
53
54 static bool
ranges_enabled()55 ranges_enabled()
56 {
57 struct mach_vm_range range;
58 size_t range_sz = sizeof(range);
59
60 bzero(&range, sizeof(range));
61
62 /*
63 * We will fail with ENOENT or EINVAL if ranges are either not supported
64 * or not enabled on our process.
65 */
66 return sysctlbyname("vm.vm_map_user_range_default",
67 &range, &range_sz, NULL, 0) == 0;
68 }
69
70 static void
71 do_test(void (^test)(void))
72 {
73 if (!ranges_enabled()) {
74 T_SKIP("VM map ranges not enabled");
75 }
76
77 test();
78 }
79
80 static struct mach_vm_range
get_range(int target_range)81 get_range(int target_range)
82 {
83 int ret = EINVAL;
84 struct mach_vm_range range;
85 size_t range_sz = sizeof(range);
86
87 bzero(&range, sizeof(range));
88
89 switch (target_range) {
90 case DEFAULT:
91 ret = sysctlbyname("vm.vm_map_user_range_default", &range, &range_sz, NULL, 0);
92 T_QUIET;
93 T_ASSERT_POSIX_SUCCESS(ret, "successfully retrieved user default range");
94 break;
95
96 case HEAP:
97 ret = sysctlbyname("vm.vm_map_user_range_heap", &range, &range_sz, NULL, 0);
98 T_QUIET;
99 T_ASSERT_POSIX_SUCCESS(ret, "successfully retrieved user heap range");
100 break;
101
102 default:
103 /* Fall through with EINVAL */
104 break;
105 }
106
107 return range;
108 }
109
110 static mach_vm_address_t
assert_allocate(mach_vm_address_t dst,int vm_flags)111 assert_allocate(mach_vm_address_t dst, int vm_flags)
112 {
113 int ret = mach_vm_allocate(mach_task_self(), &dst, _allocation_size, vm_flags);
114 T_ASSERT_MACH_SUCCESS(ret, "vm_allocate");
115 return dst;
116 }
117
118 static void
assert_in_range(struct mach_vm_range range,mach_vm_offset_t addr)119 assert_in_range(struct mach_vm_range range, mach_vm_offset_t addr)
120 {
121 T_LOG("checking if %llx <= %llx <= %llx", range.min_address, addr,
122 range.max_address);
123 T_EXPECT_GE(addr, range.min_address, "allocation above heap min address");
124 T_EXPECT_LE(addr, range.max_address, "allocation below heap max address");
125 }
126
127 static void
assert_in_heap_range(mach_vm_offset_t addr)128 assert_in_heap_range(mach_vm_offset_t addr)
129 {
130 struct mach_vm_range range = get_range(HEAP);
131
132 assert_in_range(range, addr);
133 }
134
135 static void *
assert_mmap(void * addr,int fd,int flags)136 assert_mmap(void *addr, int fd, int flags)
137 {
138 void *ret = mmap(addr, _allocation_size, VM_PROT_READ | VM_PROT_WRITE,
139 flags, fd, 0);
140 T_EXPECT_NE(ret, MAP_FAILED, "mmap should not have MAP_FAILED");
141 T_EXPECT_NE(ret, NULL, "mmap should have returned a valid pointer");
142 return ret;
143 }
144
145 static void
assert_allocate_eq(mach_vm_address_t dst,int vm_flags)146 assert_allocate_eq(mach_vm_address_t dst, int vm_flags)
147 {
148 mach_vm_address_t target = dst;
149
150 T_ASSERT_MACH_SUCCESS(mach_vm_allocate(mach_task_self(), &target,
151 _allocation_size, vm_flags), "vm_allocate");
152
153 T_EXPECT_EQ(target, dst, "target/dst differ");
154 }
155
156 static mach_vm_address_t
assert_allocate_in_range(int target_range,mach_vm_address_t dst,int vm_flags)157 assert_allocate_in_range(int target_range, mach_vm_address_t dst, int vm_flags)
158 {
159 struct mach_vm_range range = get_range(target_range);
160 dst = assert_allocate(dst, vm_flags);
161
162 assert_in_range(range, (mach_vm_offset_t)dst);
163
164 return dst;
165 }
166
167 static void *
assert_mmap_in_range(void * addr,int target_range,int fd,int flags)168 assert_mmap_in_range(void *addr, int target_range, int fd, int flags)
169 {
170 struct mach_vm_range range = get_range(target_range);
171 void *dst = assert_mmap(addr, fd, flags);
172
173 assert_in_range(range, (mach_vm_offset_t)dst);
174
175 return dst;
176 }
177
178 static void
fork_child_test(child_test func)179 fork_child_test(child_test func)
180 {
181 pid_t child_pid;
182 int err;
183
184 child_pid = fork();
185
186 if (child_pid == 0) {
187 /* child process */
188 func();
189 exit(0);
190 } else {
191 T_QUIET; T_ASSERT_POSIX_SUCCESS(child_pid, "fork process");
192
193 /* wait for child process to exit */
194 if (dt_waitpid(child_pid, &err, NULL, 30) == false) {
195 T_FAIL("dt_waitpid() failed on child pid %d", child_pid);
196 }
197 }
198 }
199
200 static void
cleanup_file(void)201 cleanup_file(void)
202 {
203 unlink(_filepath);
204 bzero(_filepath, MAXPATHLEN);
205 }
206
207 T_DECL(range_allocate_heap,
208 "ensure malloc tagged memory is allocated within the heap range")
209 {
210 do_test(^{
211 set_allocation_size(PAGE_SIZE);
212 assert_allocate_in_range(HEAP, 0, RANGE_HEAP_FLAGS);
213 });
214 }
215
216 T_DECL(range_allocate_anywhere,
217 "ensure allocation is within target range when hint is outwith range")
218 {
219 do_test(^{
220 struct mach_vm_range range = get_range(HEAP);
221
222 set_allocation_size(PAGE_SIZE);
223 assert_allocate_in_range(HEAP, range.min_address - _allocation_size, RANGE_HEAP_FLAGS);
224 });
225 }
226
227 T_DECL(range_allocate_stack,
228 "ensure a stack allocation is in the default range")
229 {
230 do_test(^{
231 set_allocation_size(PAGE_SIZE);
232 assert_allocate_in_range(DEFAULT, 0, RANGE_DEFAULT_FLAGS);
233 });
234 }
235
236 T_DECL(range_allocate_fixed, "ensure fixed target is honored")
237 {
238 do_test(^{
239 struct mach_vm_range range = get_range(HEAP);
240
241 set_allocation_size(PAGE_SIZE);
242 assert_allocate_eq(range.min_address - _allocation_size, VM_FLAGS_FIXED | VM_MEMORY_RANGE_HEAP);
243 });
244 }
245
246 T_DECL(range_mmap_anon, "ensure anon mapping within HEAP range")
247 {
248 do_test(^{
249 set_allocation_size(PAGE_SIZE);
250
251 assert_mmap_in_range(NULL, HEAP, -1, MAP_ANON | MAP_PRIVATE);
252 });
253 }
254
255 T_DECL(range_mmap_file, "ensure file is mapped within HEAP range")
256 {
257 do_test(^{
258 int fd = -1;
259
260 set_allocation_size(PAGE_SIZE);
261
262 /* prepare temp file */
263 strncpy(_filepath, "/tmp/mapfile.XXXXXX", MAXPATHLEN);
264 T_ASSERT_POSIX_SUCCESS(fd = mkstemp(_filepath), NULL);
265 atexit(cleanup_file);
266
267 T_ASSERT_POSIX_SUCCESS(ftruncate(fd, (off_t)_allocation_size), NULL);
268
269 /* map it in to the heap rage */
270 T_LOG("mapping file in HEAP range");
271 assert_mmap_in_range(NULL, HEAP, fd, MAP_FILE | MAP_SHARED);
272 });
273 }
274
275
276 T_DECL(range_mmap_alias_tag, "ensure anon mapping with tag is honored")
277 {
278 do_test(^{
279 set_allocation_size(PAGE_SIZE);
280 assert_mmap_in_range(NULL, DEFAULT, VM_MEMORY_RANGE_DEFAULT, MAP_ANON | MAP_PRIVATE);
281 });
282 }
283
284 T_DECL(range_mmap_with_low_hint,
285 "ensure allocation is within target range when hint is below range")
286 {
287 do_test(^{
288 struct mach_vm_range range = get_range(HEAP);
289 mach_vm_address_t target = range.min_address - _allocation_size;
290
291 set_allocation_size(PAGE_SIZE);
292 assert_mmap_in_range((void *)target, HEAP, -1, MAP_ANON | MAP_PRIVATE);
293 });
294 }
295
296 T_DECL(range_mmap_with_high_hint,
297 "ensure allocation is within target range when hint is within range")
298 {
299 do_test(^{
300 struct mach_vm_range range = get_range(HEAP);
301 mach_vm_address_t target = range.min_address + 0x100000000;
302
303 set_allocation_size(PAGE_SIZE);
304 void *dst = assert_mmap_in_range((void *)target, HEAP, -1, MAP_ANON | MAP_PRIVATE);
305
306 T_EXPECT_EQ((mach_vm_address_t)dst, target, "unexpected allocation address");
307 });
308 }
309
310 T_DECL(range_mmap_with_bad_hint,
311 "ensure allocation fails when hint is above range")
312 {
313 do_test(^{
314 struct mach_vm_range range = get_range(HEAP);
315 mach_vm_address_t target = range.max_address + 0x100000000;
316
317 set_allocation_size(PAGE_SIZE);
318
319 /* mmap should retry with 0 base on initial KERN_NO_SPACE failure */
320 assert_mmap_in_range((void *)target, HEAP, -1, MAP_ANON | MAP_PRIVATE);
321 });
322 }
323
324 T_DECL(range_mach_vm_map_with_bad_hint,
325 "ensure mach_vm_map fails when hint is above range")
326 {
327 do_test(^{
328 struct mach_vm_range range = get_range(HEAP);
329 mach_vm_address_t addr = range.max_address + 0x100000000;
330
331 set_allocation_size(PAGE_SIZE);
332
333 /*
334 * unlike mmap & vm_allocate, mach_vm_map should fail when given a hint
335 * out with the target range.
336 */
337 int ret = mach_vm_map(mach_task_self(), &addr, _allocation_size,
338 (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, MACH_PORT_NULL,
339 (memory_object_offset_t)0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
340 VM_INHERIT_DEFAULT);
341 T_QUIET; T_EXPECT_EQ(ret, KERN_NO_SPACE, "expected KERN_NO_SPACE");
342 });
343 }
344
345 T_DECL(range_mach_vm_remap_default,
346 "ensure mach_vm_remap is successful in default range")
347 {
348 do_test(^{
349 vm_prot_t curprot;
350 vm_prot_t maxprot;
351
352 set_allocation_size(PAGE_SIZE);
353
354 mach_vm_address_t addr = assert_allocate_in_range(DEFAULT, 0, RANGE_DEFAULT_FLAGS);
355 mach_vm_address_t target = addr + _allocation_size;
356
357 int ret = mach_vm_remap(mach_task_self(), &target, _allocation_size,
358 (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, mach_task_self(),
359 addr, FALSE, &curprot, &maxprot, VM_INHERIT_NONE);
360 T_QUIET; T_EXPECT_EQ(ret, KERN_SUCCESS, "expected KERN_SUCCESS");
361 });
362 }
363
364 T_DECL(range_mach_vm_remap_heap_with_hint,
365 "ensure mach_vm_remap is successful in heap range")
366 {
367 do_test(^{
368 vm_prot_t curprot;
369 vm_prot_t maxprot;
370
371 set_allocation_size(PAGE_SIZE);
372
373 mach_vm_address_t addr = assert_allocate_in_range(HEAP, 0, RANGE_HEAP_FLAGS);
374 mach_vm_address_t target = addr + _allocation_size;
375
376 int ret = mach_vm_remap(mach_task_self(), &target, _allocation_size,
377 (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, mach_task_self(),
378 addr, FALSE, &curprot, &maxprot, VM_INHERIT_NONE);
379 T_QUIET; T_EXPECT_EQ(ret, KERN_SUCCESS, "expected KERN_SUCCESS");
380 assert_in_heap_range(target);
381 });
382 }
383
384 T_DECL(range_mach_vm_remap_heap,
385 "ensure mach_vm_remap remains in same range")
386 {
387 do_test(^{
388 vm_prot_t curprot;
389 vm_prot_t maxprot;
390
391 set_allocation_size(PAGE_SIZE);
392
393 mach_vm_address_t addr = assert_allocate_in_range(HEAP, 0, RANGE_HEAP_FLAGS);
394 mach_vm_address_t target = 0;
395
396 int ret = mach_vm_remap(mach_task_self(), &target, _allocation_size,
397 (mach_vm_offset_t)0, VM_FLAGS_ANYWHERE, mach_task_self(),
398 addr, FALSE, &curprot, &maxprot, VM_INHERIT_NONE);
399 T_EXPECT_EQ(ret, KERN_SUCCESS, "expected KERN_SUCCESS");
400 assert_in_heap_range(target);
401 });
402 }
403
404 static void
ensure_range()405 ensure_range()
406 {
407 struct mach_vm_range def = get_range(DEFAULT);
408 struct mach_vm_range heap = get_range(HEAP);
409
410 T_EXPECT_GT(heap.min_address, def.max_address,
411 "ranges should not overlap");
412 T_EXPECT_LE(heap.max_address, MAX_VM_ADDRESS,
413 "expected max <= %llx", MAX_VM_ADDRESS);
414 T_EXPECT_EQ(heap.min_address,
415 heap.min_address & (unsigned long)~0x1FFFFF,
416 "expected alignment on 2MB TT boundary");
417 }
418
419 static void
ensure_child_range()420 ensure_child_range()
421 {
422 struct mach_vm_range def = get_range(DEFAULT);
423 struct mach_vm_range heap = get_range(HEAP);
424
425 T_QUIET; T_EXPECT_EQ(def.min_address, parent_default.min_address,
426 "expected forked default min to be equal");
427 T_QUIET; T_EXPECT_EQ(def.max_address, parent_default.max_address,
428 "expected forked default max to be equal");
429 T_QUIET; T_EXPECT_EQ(heap.min_address, parent_heap.min_address,
430 "expected forked heap min to be equal");
431 T_QUIET; T_EXPECT_EQ(heap.max_address, parent_heap.max_address,
432 "expected forked heap max to be equal");
433 }
434
435 T_DECL(range_ensure_bounds, "ensure ranges respect map bounds")
436 {
437 do_test(^{
438 parent_default = get_range(DEFAULT);
439 parent_heap = get_range(HEAP);
440
441 ensure_range();
442
443 for (uint32_t i = 0; i < CHILD_PROCESS_COUNT; i++) {
444 fork_child_test(ensure_child_range);
445 }
446 });
447 }
448