xref: /xnu-12377.41.6/tests/vm/vm_test_linkedit_permanent.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 #include <darwintest.h>
2 
3 #include <stdlib.h>
4 
5 #include <mach/mach_init.h>
6 #include <mach/mach_vm.h>
7 #include <mach/vm_map.h>
8 
9 T_GLOBAL_META(
10 	T_META_NAMESPACE("xnu.vm"),
11 	T_META_RADAR_COMPONENT_NAME("xnu"),
12 	T_META_RADAR_COMPONENT_VERSION("VM"));
13 
14 static char *_prot_str[] = {
15 	/* 0 */ "---",
16 	/* 1 */ "r--",
17 	/* 2 */ "-w-",
18 	/* 3 */ "rw-",
19 	/* 4 */ "--x",
20 	/* 5 */ "r-x",
21 	/* 6 */ "-wx",
22 	/* 7 */ "rwx"
23 };
24 static char *
prot_str(vm_prot_t prot)25 prot_str(vm_prot_t prot)
26 {
27 	return _prot_str[prot & VM_PROT_ALL];
28 }
29 
30 static void
print_region_info(mach_vm_address_t vmaddr,mach_vm_size_t vmsize,vm_region_submap_short_info_64_t ri,mach_vm_address_t vmaddr2,mach_vm_size_t vmsize2,vm_region_submap_short_info_64_t ri2)31 print_region_info(
32 	mach_vm_address_t vmaddr,
33 	mach_vm_size_t vmsize,
34 	vm_region_submap_short_info_64_t ri,
35 	mach_vm_address_t vmaddr2,
36 	mach_vm_size_t vmsize2,
37 	vm_region_submap_short_info_64_t ri2)
38 {
39 	T_LOG("   [ 0x%016llx - 0x%016llx ] size 0x%016llx prot 0x%x/0x%x %s/%s submap %d\n",
40 	    (uint64_t)vmaddr, (uint64_t)(vmaddr + vmsize), (uint64_t)vmsize,
41 	    ri->protection, ri->max_protection,
42 	    prot_str(ri->protection),
43 	    prot_str(ri->max_protection),
44 	    ri->is_submap);
45 	if (ri2) {
46 		T_LOG("-> [ 0x%016llx - 0x%016llx ] size 0x%016llx prot 0x%x/0x%x %s/%s submap %d\n",
47 		    (uint64_t)vmaddr2, (uint64_t)(vmaddr2 + vmsize2),
48 		    (uint64_t)vmsize2,
49 		    ri2->protection, ri2->max_protection,
50 		    prot_str(ri2->protection), prot_str(ri2->max_protection),
51 		    ri2->is_submap);
52 	}
53 }
54 
55 static bool
find_first_read_only_mapping(mach_vm_address_t * vmaddr_p,mach_vm_size_t * vmsize_p,vm_region_submap_short_info_64_t ri)56 find_first_read_only_mapping(
57 	mach_vm_address_t *vmaddr_p,
58 	mach_vm_size_t *vmsize_p,
59 	vm_region_submap_short_info_64_t ri)
60 {
61 	kern_return_t kr;
62 	mach_vm_address_t vmaddr;
63 	mach_vm_size_t vmsize;
64 	natural_t depth;
65 	mach_msg_type_number_t count;
66 
67 	T_LOG("===== Looking for first read-only mapping");
68 	/* find the first read-only mapping */
69 	for (vmaddr = 0;; vmaddr += vmsize) {
70 		depth = 0;
71 		count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
72 		kr = mach_vm_region_recurse(mach_task_self(),
73 		    &vmaddr,
74 		    &vmsize,
75 		    &depth,
76 		    (vm_region_recurse_info_t)ri,
77 		    &count);
78 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr);
79 		if (kr != KERN_SUCCESS) {
80 			/* end of address space */
81 			T_FAIL("could not find first read-only mapping");
82 			return false;
83 		}
84 		if (ri->is_submap) {
85 			/* submap: keep looking */
86 			continue;
87 		}
88 		if (ri->max_protection != VM_PROT_READ) {
89 			/* not read-only: keep looking */
90 			continue;
91 		}
92 		T_PASS("Found first read-only mapping at 0x%llx size 0x%llx\n", (uint64_t)vmaddr, (uint64_t)vmsize);
93 		*vmaddr_p = vmaddr;
94 		*vmsize_p = vmsize;
95 		return true;
96 	}
97 	return false;
98 }
99 
100 T_DECL(vm_test_linkedit_permanent, "Tests that LINKEDIT mapping can't be overwritten", T_META_TAG_VM_PREFERRED)
101 {
102 	kern_return_t kr;
103 	mach_vm_address_t vmaddr, vmaddr_linkedit, vmaddr_tmp, vmaddr_buf;
104 	mach_vm_size_t vmsize, vmsize_linkedit, vmsize_tmp;
105 	natural_t depth1, depth2;
106 	mach_msg_type_number_t count;
107 	vm_region_submap_short_info_data_64_t ri1, ri2;
108 	vm_prot_t cur_prot, max_prot;
109 
110 #if __x86_64__
111 	T_SKIP("x86_64: LINKEDIT mappings are not protected");
112 	return;
113 #endif /* __x86_64 */
114 
115 #define ASSERT_UNCHANGED(clip_ok, object_change_ok)                     \
116 	do {                                                            \
117 	        if (clip_ok) {                                          \
118 	                T_EXPECT_GE(vmaddr_tmp, vmaddr,                 \
119 	                    "vmaddr clipped 0x%llx -> 0x%llx",          \
120 	                    (uint64_t)vmaddr, (uint64_t)vmaddr_tmp);    \
121 	                T_EXPECT_LE(vmsize_tmp, vmsize,                 \
122 	                    "vmsize clipped 0x%llx -> 0x%llx",          \
123 	                    (uint64_t)vmsize, (uint64_t)vmsize_tmp);    \
124 	        } else {                                                \
125 	                T_EXPECT_EQ(vmaddr_tmp, vmaddr,                 \
126 	                    "vmaddr unchanged 0x%llx -> 0x%llx",        \
127 	                    (uint64_t)vmaddr, (uint64_t)vmaddr_tmp);    \
128 	                T_EXPECT_EQ(vmsize_tmp, vmsize,                 \
129 	                            "vmsize unchanged 0x%llx -> 0x%llx", \
130 	                            (uint64_t)vmsize, (uint64_t)vmsize_tmp); \
131 	        }                                                       \
132 	        T_EXPECT_LE(ri2.protection, VM_PROT_READ,               \
133 	                    "should not become writable");              \
134 	        T_EXPECT_LE(ri2.max_protection, VM_PROT_READ,           \
135 	                    "should not become able to become writable"); \
136 	        if (!object_change_ok) {                                \
137 	                T_EXPECT_EQ(ri2.object_id, ri1.object_id,       \
138 	                            "object id should not change");     \
139 	        }                                                       \
140 	} while (0)
141 
142 	T_LOG("==========================================");
143 	if (!find_first_read_only_mapping(&vmaddr_linkedit, &vmsize_linkedit, &ri2)) {
144 		T_FAIL("could not find appropriate mapping");
145 		return;
146 	}
147 	T_LOG("==========================================");
148 	/* get top-level mapping protections */
149 	vmaddr = vmaddr_linkedit;
150 	depth1 = 0;
151 	count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
152 	kr = mach_vm_region_recurse(mach_task_self(),
153 	    &vmaddr,
154 	    &vmsize,
155 	    &depth1,
156 	    (vm_region_recurse_info_t)&ri1,
157 	    &count);
158 	T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_linkedit);
159 	print_region_info(vmaddr, vmsize, &ri1, vmaddr_linkedit, vmsize_linkedit, &ri2);
160 
161 
162 
163 	/* vm_write() on LINKEDIT mapping */
164 	T_LOG("==========================================");
165 	T_LOG("===== vm_write() on LINKEDIT mapping");
166 	T_LOG("==========================================");
167 	T_EXPECT_EQ(ri1.is_submap, 0, "mapping should be nested");
168 	/* get a temporary buffer */
169 	kr = mach_vm_allocate(mach_task_self(),
170 	    &vmaddr_buf,
171 	    vmsize_linkedit,
172 	    VM_FLAGS_ANYWHERE);
173 	T_EXPECT_MACH_SUCCESS(kr, "vm_allocate(0x%llx)", vmsize_linkedit);
174 	/* copy the data to avoid undue crash */
175 	memcpy((char *)(uintptr_t)vmaddr_buf,
176 	    (char *)(uintptr_t)vmaddr_linkedit,
177 	    (size_t)vmsize_linkedit);
178 	kr = mach_vm_write(mach_task_self(),
179 	    vmaddr_linkedit,           /* destination address */
180 	    vmaddr_buf,                /* source buffer */
181 	    (mach_msg_type_number_t) vmsize_linkedit);
182 	T_EXPECT_MACH_ERROR(kr, KERN_PROTECTION_FAILURE,
183 	    "vm_write() on LINKEDIT mapping fails with KERN_PROTECTION_FAILURE");
184 	/* check protections again */
185 	depth2 = 0;
186 	count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
187 	vmaddr_tmp = vmaddr_linkedit;
188 	kr = mach_vm_region_recurse(mach_task_self(),
189 	    &vmaddr_tmp,
190 	    &vmsize_tmp,
191 	    &depth2,
192 	    (vm_region_recurse_info_t)&ri2,
193 	    &count);
194 	T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_linkedit);
195 	print_region_info(vmaddr_linkedit, vmsize_linkedit, &ri1, vmaddr_tmp, vmsize_tmp, &ri2);
196 	ASSERT_UNCHANGED(false, false);
197 	T_EXPECT_EQ(ri2.is_submap, 0, "mapping should not be nested");
198 
199 	/* vm_allocate(VM_FLAGS_OVERWRITE) on top of LINKEDIT */
200 	T_LOG("==========================================");
201 	T_LOG("===== vm_allocate(VM_FLAGS_OVERWRITE) on LINKEDIT mapping");
202 	T_LOG("==========================================");
203 	T_EXPECT_EQ(ri1.is_submap, 0, "mapping should not be nested");
204 	vmaddr_tmp = vmaddr_linkedit;
205 	kr = mach_vm_allocate(mach_task_self(),
206 	    &vmaddr_tmp,
207 	    vmsize_linkedit,
208 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE);
209 	T_EXPECT_MACH_ERROR(kr, KERN_NO_SPACE, "vm_allocate(OVERWRITE) fails with KERN_NO_SPACE");
210 	T_EXPECT_EQ(vmaddr_linkedit, vmaddr_tmp, "vmaddr is unchanged");
211 	/* check protections again */
212 	depth2 = 0;
213 	vmaddr_tmp = vmaddr_linkedit;
214 	count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
215 	kr = mach_vm_region_recurse(mach_task_self(),
216 	    &vmaddr_tmp,
217 	    &vmsize_tmp,
218 	    &depth2,
219 	    (vm_region_recurse_info_t)&ri2,
220 	    &count);
221 	T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_linkedit);
222 	print_region_info(vmaddr, vmsize, &ri1, vmaddr_tmp, vmsize_tmp, &ri2);
223 	ASSERT_UNCHANGED(false, false);
224 	T_EXPECT_EQ(ri2.is_submap, 0, "mapping should not be nested");
225 
226 	/* vm_remap(VM_FLAGS_OVERWRITE) on top of submap */
227 	T_LOG("==========================================");
228 	T_LOG("===== vm_remap(VM_FLAGS_OVERWRITE) on LINKEDIT mapping");
229 	T_LOG("==========================================");
230 	T_EXPECT_EQ(ri1.is_submap, 0, "mapping should not be nested");
231 	vmaddr_tmp = vmaddr_linkedit;
232 	kr = mach_vm_remap(mach_task_self(),
233 	    &vmaddr_tmp,
234 	    vmsize_linkedit,
235 	    0,
236 	    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
237 	    mach_task_self(),
238 	    vmaddr_buf,
239 	    TRUE,
240 	    &cur_prot,
241 	    &max_prot,
242 	    VM_INHERIT_DEFAULT);
243 	T_EXPECT_MACH_ERROR(kr, KERN_NO_SPACE, "vm_remap(OVERWRITE) fails with KERN_NO_SPACE");
244 	T_EXPECT_EQ(vmaddr_linkedit, vmaddr_tmp, "vmaddr is unchanged");
245 	/* check protections again */
246 	depth2 = 0;
247 	vmaddr_tmp = vmaddr_linkedit;
248 	count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
249 	kr = mach_vm_region_recurse(mach_task_self(),
250 	    &vmaddr_tmp,
251 	    &vmsize_tmp,
252 	    &depth2,
253 	    (vm_region_recurse_info_t)&ri2,
254 	    &count);
255 	T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_linkedit);
256 	print_region_info(vmaddr, vmsize, &ri1, vmaddr_tmp, vmsize_tmp, &ri2);
257 	ASSERT_UNCHANGED(false, false);
258 	T_EXPECT_EQ(ri2.is_submap, 0, "mapping should not be nested");
259 
260 	/* vm_protect(VM_PROT_COPY) on LINKEDIT mapping */
261 	T_LOG("==========================================");
262 	T_LOG("===== vm_protect(VM_PROT_COPY) on LINKEDIT mapping");
263 	T_LOG("==========================================");
264 	ri1 = ri2;
265 	T_EXPECT_EQ(ri1.is_submap, 0, "mapping should not be nested");
266 	kr = mach_vm_protect(mach_task_self(),
267 	    vmaddr_linkedit,
268 	    vmsize_linkedit,
269 	    FALSE,                  /* set_maximum */
270 	    VM_PROT_COPY | VM_PROT_READ | VM_PROT_WRITE);
271 	T_EXPECT_MACH_ERROR(kr, KERN_NO_SPACE,
272 	    "vm_protect(VM_PROT_COPY) fails with KERN_NO_SPACE");
273 	/* check protections again */
274 	depth2 = 0;
275 	count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
276 	vmaddr_tmp = vmaddr_linkedit;
277 	kr = mach_vm_region_recurse(mach_task_self(),
278 	    &vmaddr_tmp,
279 	    &vmsize_tmp,
280 	    &depth2,
281 	    (vm_region_recurse_info_t)&ri2,
282 	    &count);
283 	T_EXPECT_MACH_SUCCESS(kr, "mach_vm_region_recurse(0x%llx)\n", vmaddr_linkedit);
284 	print_region_info(vmaddr, vmsize, &ri1, vmaddr_tmp, vmsize_tmp, &ri2);
285 	ASSERT_UNCHANGED(false, true);
286 	T_EXPECT_EQ(ri2.is_submap, 0, "mapping should not be nested");
287 
288 //	T_LOG("pausing..."); getchar();
289 }
290