1 #include <darwintest.h>
2
3 #include <stdlib.h>
4
5 #include <mach/mach_init.h>
6 #include <mach/mach_vm.h>
7 #include <mach/vm_map.h>
8
9 T_GLOBAL_META(
10 T_META_NAMESPACE("xnu.vm"),
11 T_META_RADAR_COMPONENT_NAME("xnu"),
12 T_META_RADAR_COMPONENT_VERSION("VM"));
13
14 char *_prot_str[] = {
15 /* 0 */ "---",
16 /* 1 */ "r--",
17 /* 2 */ "-w-",
18 /* 3 */ "rw-",
19 /* 4 */ "--x",
20 /* 5 */ "r-x",
21 /* 6 */ "-wx",
22 /* 7 */ "rwx"
23 };
24 char *
prot_str(vm_prot_t prot)25 prot_str(vm_prot_t prot)
26 {
27 return _prot_str[prot & VM_PROT_ALL];
28 }
29
30 void
print_region_info(mach_vm_address_t vmaddr,mach_vm_size_t vmsize,vm_region_submap_short_info_64_t ri,mach_vm_address_t vmaddr2,mach_vm_size_t vmsize2,vm_region_submap_short_info_64_t ri2)31 print_region_info(
32 mach_vm_address_t vmaddr,
33 mach_vm_size_t vmsize,
34 vm_region_submap_short_info_64_t ri,
35 mach_vm_address_t vmaddr2,
36 mach_vm_size_t vmsize2,
37 vm_region_submap_short_info_64_t ri2)
38 {
39 T_LOG(" [ 0x%016llx - 0x%016llx ] size 0x%016llx prot 0x%x/0x%x %s/%s submap %d\n",
40 (uint64_t)vmaddr, (uint64_t)(vmaddr + vmsize), (uint64_t)vmsize,
41 ri->protection, ri->max_protection,
42 prot_str(ri->protection),
43 prot_str(ri->max_protection),
44 ri->is_submap);
45 if (ri2) {
46 T_LOG("-> [ 0x%016llx - 0x%016llx ] size 0x%016llx prot 0x%x/0x%x %s/%s submap %d\n",
47 (uint64_t)vmaddr2, (uint64_t)(vmaddr2 + vmsize2),
48 (uint64_t)vmsize2,
49 ri2->protection, ri2->max_protection,
50 prot_str(ri2->protection), prot_str(ri2->max_protection),
51 ri2->is_submap);
52 }
53 }
54
55 static bool
find_nested_read_only_mapping(mach_vm_address_t * vmaddr_p,mach_vm_size_t * vmsize_p,vm_region_submap_short_info_64_t ri)56 find_nested_read_only_mapping(
57 mach_vm_address_t *vmaddr_p,
58 mach_vm_size_t *vmsize_p,
59 vm_region_submap_short_info_64_t ri)
60 {
61 kern_return_t kr;
62 mach_vm_address_t vmaddr_sub;
63 mach_vm_size_t vmsize_sub;
64 natural_t depth;
65 mach_msg_type_number_t count;
66
67 T_LOG("===== Looking for read-only mapping in shared region");
68 /* find a read-only mapping in the shared region */
69 for (vmaddr_sub = 0;; vmaddr_sub += vmsize_sub) {
70 depth = 1;
71 count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
72 kr = mach_vm_region_recurse(mach_task_self(),
73 &vmaddr_sub,
74 &vmsize_sub,
75 &depth,
76 (vm_region_recurse_info_t)ri,
77 &count);
78 T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_sub);
79 if (kr != KERN_SUCCESS) {
80 /* end of address space */
81 T_FAIL("could not find shared region");
82 return false;
83 }
84 if (depth == 0) {
85 /* not the shared region, keep looking */
86 continue;
87 }
88 if (ri->max_protection != VM_PROT_READ) {
89 /* not read-only: keep looking */
90 continue;
91 }
92 T_PASS("Found read-only mapping in shared region at 0x%llx size 0x%llx\n", (uint64_t)vmaddr_sub, (uint64_t)vmsize_sub);
93 *vmaddr_p = vmaddr_sub;
94 *vmsize_p = vmsize_sub;
95 return true;
96 }
97 return false;
98 }
99
100 T_DECL(vm_test_shreg_ro, "Tests that read-only shared-region mappings can't be overwritten", T_META_TAG_VM_PREFERRED)
101 {
102 kern_return_t kr;
103 mach_vm_address_t vmaddr, vmaddr_sub, vmaddr_tmp, vmaddr_buf;
104 mach_vm_size_t vmsize, vmsize_sub, vmsize_tmp;
105 natural_t depth1, depth2;
106 mach_msg_type_number_t count;
107 vm_region_submap_short_info_data_64_t ri1, ri2;
108 vm_prot_t cur_prot, max_prot;
109
110 #if __x86_64__
111 T_SKIP("x86_64: read-only shared region mappings are not protected");
112 return;
113 #endif /* __x86_64 */
114
115 #define ASSERT_UNCHANGED(clip_ok) \
116 do { \
117 if (clip_ok) { \
118 T_EXPECT_GE(vmaddr_tmp, vmaddr, \
119 "vmaddr clipped 0x%llx -> 0x%llx", \
120 (uint64_t)vmaddr, (uint64_t)vmaddr_tmp); \
121 T_EXPECT_LE(vmsize_tmp, vmsize, \
122 "vmsize clipped 0x%llx -> 0x%llx", \
123 (uint64_t)vmsize, (uint64_t)vmsize_tmp); \
124 } else { \
125 T_EXPECT_EQ(vmaddr_tmp, vmaddr, \
126 "vmaddr unchanged 0x%llx -> 0x%llx", \
127 (uint64_t)vmaddr, (uint64_t)vmaddr_tmp); \
128 T_EXPECT_EQ(vmsize_tmp, vmsize, \
129 "vmsize unchanged 0x%llx -> 0x%llx", \
130 (uint64_t)vmsize, (uint64_t)vmsize_tmp); \
131 } \
132 T_EXPECT_LE(ri2.protection, VM_PROT_READ, \
133 "should not become writable"); \
134 T_EXPECT_LE(ri2.max_protection, VM_PROT_READ, \
135 "should not become able to become writable"); \
136 } while (0)
137
138 T_LOG("==========================================");
139 if (!find_nested_read_only_mapping(&vmaddr_sub, &vmsize_sub, &ri2)) {
140 T_FAIL("could not find appropriate mapping");
141 return;
142 }
143 T_LOG("==========================================");
144 /* get top-level mapping protections */
145 vmaddr = vmaddr_sub;
146 depth1 = 0;
147 count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
148 kr = mach_vm_region_recurse(mach_task_self(),
149 &vmaddr,
150 &vmsize,
151 &depth1,
152 (vm_region_recurse_info_t)&ri1,
153 &count);
154 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_sub);
155 print_region_info(vmaddr, vmsize, &ri1, vmaddr_sub, vmsize_sub, &ri2);
156
157
158
159 /* vm_allocate(VM_FLAGS_OVERWRITE) on top of submap */
160 T_LOG("==========================================");
161 T_LOG("===== vm_allocate(VM_FLAGS_OVERWRITE) on nested mapping");
162 T_LOG("==========================================");
163 T_EXPECT_EQ(ri1.is_submap, 1, "mapping should be nested");
164 vmaddr_tmp = vmaddr_sub;
165 kr = mach_vm_allocate(mach_task_self(),
166 &vmaddr_tmp,
167 vmsize_sub,
168 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
169 T_EXPECT_MACH_ERROR(kr, KERN_PROTECTION_FAILURE, "vm_allocate(OVERWRITE) fails with KERN_PROTECTION_FAILURE");
170 T_EXPECT_EQ(vmaddr_sub, vmaddr_tmp, "vmaddr is unchanged");
171 /* check protections again */
172 depth2 = 0;
173 vmaddr_tmp = vmaddr_sub;
174 count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
175 kr = mach_vm_region_recurse(mach_task_self(),
176 &vmaddr_tmp,
177 &vmsize_tmp,
178 &depth2,
179 (vm_region_recurse_info_t)&ri2,
180 &count);
181 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_sub);
182 print_region_info(vmaddr, vmsize, &ri1, vmaddr_tmp, vmsize_tmp, &ri2);
183 ASSERT_UNCHANGED(false);
184 T_EXPECT_EQ(ri2.is_submap, 1, "mapping should still be nested");
185
186 /* vm_remap(VM_FLAGS_OVERWRITE) on top of submap */
187 T_LOG("==========================================");
188 T_LOG("===== vm_remap(VM_FLAGS_OVERWRITE) on nested mapping");
189 T_LOG("==========================================");
190 T_EXPECT_EQ(ri1.is_submap, 1, "mapping should be nested");
191 vmaddr_tmp = vmaddr_sub;
192 kr = mach_vm_remap(mach_task_self(),
193 &vmaddr_tmp,
194 vmsize_sub,
195 0,
196 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
197 mach_task_self(),
198 vmaddr_sub,
199 TRUE,
200 &cur_prot,
201 &max_prot,
202 VM_INHERIT_DEFAULT);
203 T_EXPECT_MACH_ERROR(kr, KERN_PROTECTION_FAILURE, "vm_remap(OVERWRITE) fails with KERN_PROTECTION_FAILURE");
204 T_EXPECT_EQ(vmaddr_sub, vmaddr_tmp, "vmaddr is unchanged");
205 /* check protections again */
206 depth2 = 0;
207 vmaddr_tmp = vmaddr_sub;
208 count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
209 kr = mach_vm_region_recurse(mach_task_self(),
210 &vmaddr_tmp,
211 &vmsize_tmp,
212 &depth2,
213 (vm_region_recurse_info_t)&ri2,
214 &count);
215 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_sub);
216 print_region_info(vmaddr, vmsize, &ri1, vmaddr_tmp, vmsize_tmp, &ri2);
217 ASSERT_UNCHANGED(false);
218 T_EXPECT_EQ(ri2.is_submap, 1, "mapping should still be nested");
219
220 /* vm_protect(VM_PROT_COPY) on submap mapping */
221 T_LOG("==========================================");
222 T_LOG("===== vm_protect(VM_PROT_COPY) on nested mapping");
223 T_LOG("==========================================");
224 ri1 = ri2;
225 T_EXPECT_EQ(ri1.is_submap, 1, "mapping should be nested");
226 kr = mach_vm_protect(mach_task_self(),
227 vmaddr_sub,
228 vmsize_sub,
229 FALSE, /* set_maximum */
230 VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
231 T_EXPECT_MACH_ERROR(kr, KERN_PROTECTION_FAILURE, "vm_protect(VM_PROT_COPY) fails with KERN_PROTECTION_FAILURE");
232 /* check protections again */
233 depth2 = 0;
234 vmaddr_tmp = vmaddr_sub;
235 count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
236 kr = mach_vm_region_recurse(mach_task_self(),
237 &vmaddr_tmp,
238 &vmsize_tmp,
239 &depth2,
240 (vm_region_recurse_info_t)&ri2,
241 &count);
242 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_sub);
243 print_region_info(vmaddr, vmsize, &ri1, vmaddr_tmp, vmsize_tmp, &ri2);
244 ASSERT_UNCHANGED(true);
245 /* clipping expected (pmap unnesting), so reset expectations */
246 vmaddr = vmaddr_tmp;
247 vmsize = vmsize_tmp;
248 T_EXPECT_EQ(ri2.is_submap, 0, "mapping should now be unnnested");
249
250 /* vm_protect(VM_PROT_COPY) on unnested mapping */
251 T_LOG("==========================================");
252 T_LOG("===== vm_protect(VM_PROT_COPY) on unnested mapping");
253 T_LOG("==========================================");
254 ri1 = ri2;
255 T_EXPECT_EQ(ri1.is_submap, 0, "mapping should not be nested");
256 kr = mach_vm_protect(mach_task_self(),
257 vmaddr_sub,
258 vmsize_sub,
259 FALSE, /* set_maximum */
260 VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
261 T_EXPECT_MACH_ERROR(kr, KERN_NO_SPACE,
262 "vm_protect(VM_PROT_COPY) fails with KERN_NO_SPACE");
263 /* check protections again */
264 depth2 = 0;
265 count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
266 vmaddr_tmp = vmaddr_sub;
267 kr = mach_vm_region_recurse(mach_task_self(),
268 &vmaddr_tmp,
269 &vmsize_tmp,
270 &depth2,
271 (vm_region_recurse_info_t)&ri2,
272 &count);
273 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_sub);
274 print_region_info(vmaddr, vmsize, &ri1, vmaddr_tmp, vmsize_tmp, &ri2);
275 ASSERT_UNCHANGED(false);
276 T_EXPECT_EQ(ri2.is_submap, 0, "mapping should still be unnested");
277
278 /* vm_allocate(VM_FLAGS_OVERWRITE) on top of unnested mapping */
279 T_LOG("==========================================");
280 T_LOG("===== vm_allocate(VM_FLAGS_OVERWRITE) on unnested mapping");
281 T_LOG("==========================================");
282 ri1 = ri2;
283 T_EXPECT_EQ(ri1.is_submap, 0, "mapping should be unnested");
284 vmaddr_tmp = vmaddr_sub;
285 kr = mach_vm_allocate(mach_task_self(),
286 &vmaddr_tmp,
287 vmsize_sub,
288 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
289 T_EXPECT_MACH_ERROR(kr, KERN_NO_SPACE, "vm_allocate(OVERWRITE) fails with KERN_NO_SPACE");
290 T_EXPECT_EQ(vmaddr, vmaddr_tmp, "vmaddr is unchanged");
291 /* check protections again */
292 depth2 = 0;
293 count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
294 vmaddr_tmp = vmaddr_sub;
295 kr = mach_vm_region_recurse(mach_task_self(),
296 &vmaddr_tmp,
297 &vmsize_tmp,
298 &depth2,
299 (vm_region_recurse_info_t)&ri2,
300 &count);
301 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_sub);
302 print_region_info(vmaddr, vmsize, &ri1, vmaddr_tmp, vmsize_tmp, &ri2);
303 ASSERT_UNCHANGED(false);
304 T_EXPECT_EQ(ri2.is_submap, 0, "mapping should still be unnested");
305
306
307 /* find a new nested read-only mapping */
308 T_LOG("==========================================");
309 T_LOG("");
310 T_LOG("==========================================");
311 if (!find_nested_read_only_mapping(&vmaddr_sub, &vmsize_sub, &ri2)) {
312 T_FAIL("could not find appropriate mapping");
313 return;
314 }
315 T_LOG("==========================================");
316 /* get top-level mapping protections */
317 vmaddr = vmaddr_sub;
318 depth1 = 0;
319 count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
320 kr = mach_vm_region_recurse(mach_task_self(),
321 &vmaddr,
322 &vmsize,
323 &depth1,
324 (vm_region_recurse_info_t)&ri1,
325 &count);
326 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_sub);
327 print_region_info(vmaddr, vmsize, &ri1, vmaddr_sub, vmsize_sub, &ri2);
328
329
330 /* vm_write() on top of submap */
331 T_LOG("==========================================");
332 T_LOG("===== vm_write() on nested mapping");
333 T_LOG("==========================================");
334 T_EXPECT_EQ(ri1.is_submap, 1, "mapping should be nested");
335 /* get a temporary buffer */
336 kr = mach_vm_allocate(mach_task_self(),
337 &vmaddr_buf,
338 vmsize_sub,
339 VM_FLAGS_ANYWHERE);
340 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate(0x%llx)", vmsize_sub);
341 /* copy the data to avoid undue crash */
342 memcpy((char *)(uintptr_t)vmaddr_buf,
343 (char *)(uintptr_t)vmaddr_sub,
344 (size_t)vmsize_sub);
345 kr = mach_vm_write(mach_task_self(),
346 vmaddr_sub, /* destination address */
347 vmaddr_buf, /* source buffer */
348 vmsize_sub);
349 T_EXPECT_MACH_ERROR(kr, KERN_PROTECTION_FAILURE,
350 "vm_write() on nested mapping fails with KERN_PROTECTION_FAILURE");
351 /* check protections again */
352 depth2 = 0;
353 count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
354 vmaddr_tmp = vmaddr_sub;
355 kr = mach_vm_region_recurse(mach_task_self(),
356 &vmaddr_tmp,
357 &vmsize_tmp,
358 &depth2,
359 (vm_region_recurse_info_t)&ri2,
360 &count);
361 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_sub);
362 print_region_info(vmaddr_sub, vmsize_sub, &ri1, vmaddr_tmp, vmsize_tmp, &ri2);
363 ASSERT_UNCHANGED(true);
364 T_EXPECT_EQ(ri2.is_submap, 1, "mapping should still be nested");
365 /* clipping expected (pmap unnesting), so reset expectations */
366 vmaddr = vmaddr_tmp;
367 vmsize = vmsize_tmp;
368
369 /* force un-nesting of that mapping */
370 T_LOG("==========================================");
371 T_LOG("===== unnesting the mapping");
372 T_LOG("==========================================");
373 ri1 = ri2;
374 T_EXPECT_EQ(ri1.is_submap, 1, "mapping should be nested");
375 kr = mach_vm_protect(mach_task_self(),
376 vmaddr_sub,
377 vmsize_sub,
378 FALSE, /* set_maximum */
379 VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
380 T_EXPECT_MACH_ERROR(kr, KERN_PROTECTION_FAILURE, "vm_protect(0x%llx,0x%llx,VM_PROT_COPY) fails with KERN_PROTECTION_FAILURE", (uint64_t)vmaddr_sub, (uint64_t)vmsize_sub);
381 /* check protections again */
382 depth2 = 0;
383 count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
384 vmaddr_tmp = vmaddr_sub;
385 kr = mach_vm_region_recurse(mach_task_self(),
386 &vmaddr_tmp,
387 &vmsize_tmp,
388 &depth2,
389 (vm_region_recurse_info_t)&ri2,
390 &count);
391 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_sub);
392 print_region_info(vmaddr, vmsize, &ri1, vmaddr_tmp, vmsize_tmp, &ri2);
393 ASSERT_UNCHANGED(true);
394 /* clipping expected (pmap unnesting), so reset expectations */
395 vmaddr = vmaddr_tmp;
396 vmsize = vmsize_tmp;
397 T_EXPECT_EQ(ri2.is_submap, 0, "mapping should now be unnested");
398
399 /* vm_write() on top of unnested mapping */
400 T_LOG("==========================================");
401 T_LOG("===== vm_write() on unnested mapping");
402 T_LOG("==========================================");
403 ri1 = ri2;
404 T_EXPECT_EQ(ri1.is_submap, 0, "mapping should be unnested");
405 /* we re-use vmaddr_buf from test above... */
406 kr = mach_vm_write(mach_task_self(),
407 vmaddr_sub, /* destination address */
408 vmaddr_buf, /* source buffer */
409 vmsize_sub);
410 T_EXPECT_MACH_ERROR(kr, KERN_PROTECTION_FAILURE,
411 "vm_write() on unnested mapping fails with KERN_PROTECTION_FAILURE");
412 /* check protections again */
413 depth2 = 0;
414 count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
415 vmaddr_tmp = vmaddr_sub;
416 kr = mach_vm_region_recurse(mach_task_self(),
417 &vmaddr_tmp,
418 &vmsize_tmp,
419 &depth2,
420 (vm_region_recurse_info_t)&ri2,
421 &count);
422 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_sub);
423 print_region_info(vmaddr_sub, vmsize_sub, &ri1, vmaddr_tmp, vmsize_tmp, &ri2);
424 ASSERT_UNCHANGED(false);
425 T_EXPECT_EQ(ri2.is_submap, 0, "mapping should still be unnested");
426
427 // T_LOG("pausing..."); getchar();
428 }
429
430 T_DECL(shared_region_x86_writable, "Tests shared region PROT_WRITE is permitted on Intel only, and fails on all other architectures",
431 T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
432 {
433 mach_vm_address_t vmaddr_sub;
434 mach_vm_size_t vmsize_sub;
435 vm_region_submap_short_info_data_64_t ri;
436
437 if (!find_nested_read_only_mapping(&vmaddr_sub, &vmsize_sub, &ri)) {
438 T_FAIL("could not find appropriate mapping");
439 return;
440 }
441
442 kern_return_t kr;
443 kr = mach_vm_protect(mach_task_self(), vmaddr_sub, vmsize_sub, FALSE, VM_PROT_READ | VM_PROT_WRITE);
444
445 #if defined(__x86_64__) || defined(__i386__)
446 T_ASSERT_MACH_SUCCESS(kr, "mach_vm_protect()");
447 #else /* defined(__x86_64__) || defined(__i386__) */
448 T_ASSERT_MACH_ERROR(kr, KERN_PROTECTION_FAILURE, "mach_vm_protect()");
449 #endif /* defined(__x86_64__) || defined(__i386__) */
450 }
451