xref: /xnu-11417.140.69/tests/vm/vm_unaligned_copy_switch_race.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 
4 #include <mach/mach_init.h>
5 #include <mach/mach_port.h>
6 #include <mach/vm_map.h>
7 
8 T_GLOBAL_META(
9 	T_META_NAMESPACE("xnu.vm"),
10 	T_META_RADAR_COMPONENT_NAME("xnu"),
11 	T_META_RADAR_COMPONENT_VERSION("VM"));
12 
13 struct context1 {
14 	vm_size_t obj_size;
15 	vm_address_t e0;
16 	mach_port_t mem_entry_ro;
17 	mach_port_t mem_entry_rw;
18 	dispatch_semaphore_t running_sem;
19 	pthread_mutex_t mtx;
20 	bool done;
21 };
22 
23 static void *
switcheroo_thread(__unused void * arg)24 switcheroo_thread(__unused void *arg)
25 {
26 	kern_return_t kr;
27 	struct context1 *ctx;
28 
29 	ctx = (struct context1 *)arg;
30 	/* tell main thread we're ready to run */
31 	dispatch_semaphore_signal(ctx->running_sem);
32 	while (!ctx->done) {
33 		/* wait for main thread to be done setting things up */
34 		pthread_mutex_lock(&ctx->mtx);
35 		if (ctx->done) {
36 			break;
37 		}
38 		/* switch e0 to RW mapping */
39 		kr = vm_map(mach_task_self(),
40 		    &ctx->e0,
41 		    ctx->obj_size,
42 		    0,         /* mask */
43 		    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
44 		    ctx->mem_entry_rw,
45 		    0,
46 		    FALSE,         /* copy */
47 		    VM_PROT_READ | VM_PROT_WRITE,
48 		    VM_PROT_READ | VM_PROT_WRITE,
49 		    VM_INHERIT_DEFAULT);
50 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, " vm_map() RW");
51 		/* wait a little bit */
52 		usleep(100);
53 		/* switch bakc to original RO mapping */
54 		kr = vm_map(mach_task_self(),
55 		    &ctx->e0,
56 		    ctx->obj_size,
57 		    0,         /* mask */
58 		    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
59 		    ctx->mem_entry_ro,
60 		    0,
61 		    FALSE,         /* copy */
62 		    VM_PROT_READ,
63 		    VM_PROT_READ,
64 		    VM_INHERIT_DEFAULT);
65 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, " vm_map() RO");
66 		/* tell main thread we're don switching mappings */
67 		pthread_mutex_unlock(&ctx->mtx);
68 		usleep(100);
69 	}
70 	return NULL;
71 }
72 
73 T_DECL(unaligned_copy_switch_race,
74     "Test that unaligned copy respects read-only mapping", T_META_TAG_VM_PREFERRED)
75 {
76 	pthread_t th = NULL;
77 	int ret;
78 	kern_return_t kr;
79 	time_t start, duration;
80 	mach_msg_type_number_t cow_read_size;
81 	vm_size_t copied_size;
82 	int loops;
83 	vm_address_t e2, e5;
84 	struct context1 context1, *ctx;
85 	int kern_success = 0, kern_protection_failure = 0, kern_other = 0;
86 	vm_address_t ro_addr, tmp_addr;
87 	memory_object_size_t mo_size;
88 
89 	ctx = &context1;
90 	ctx->obj_size = 256 * 1024;
91 	ctx->e0 = 0;
92 	ctx->running_sem = dispatch_semaphore_create(0);
93 	T_QUIET; T_ASSERT_NE(ctx->running_sem, NULL, "dispatch_semaphore_create");
94 	ret = pthread_mutex_init(&ctx->mtx, NULL);
95 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "pthread_mutex_init");
96 	ctx->done = false;
97 	ctx->mem_entry_rw = MACH_PORT_NULL;
98 	ctx->mem_entry_ro = MACH_PORT_NULL;
99 
100 	/* allocate our attack target memory */
101 	kr = vm_allocate(mach_task_self(),
102 	    &ro_addr,
103 	    ctx->obj_size,
104 	    VM_FLAGS_ANYWHERE);
105 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate ro_addr");
106 	/* initialize to 'A' */
107 	memset((char *)ro_addr, 'A', ctx->obj_size);
108 	/* make it read-only */
109 	kr = vm_protect(mach_task_self(),
110 	    ro_addr,
111 	    ctx->obj_size,
112 	    TRUE,             /* set_maximum */
113 	    VM_PROT_READ);
114 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_protect ro_addr");
115 	/* make sure we can't get read-write handle on that target memory */
116 	mo_size = ctx->obj_size;
117 	kr = mach_make_memory_entry_64(mach_task_self(),
118 	    &mo_size,
119 	    ro_addr,
120 	    MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
121 	    &ctx->mem_entry_ro,
122 	    MACH_PORT_NULL);
123 	T_QUIET; T_ASSERT_MACH_ERROR(kr, KERN_PROTECTION_FAILURE, "make_mem_entry() RO");
124 	/* take read-only handle on that target memory */
125 	mo_size = ctx->obj_size;
126 	kr = mach_make_memory_entry_64(mach_task_self(),
127 	    &mo_size,
128 	    ro_addr,
129 	    MAP_MEM_VM_SHARE | VM_PROT_READ,
130 	    &ctx->mem_entry_ro,
131 	    MACH_PORT_NULL);
132 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "make_mem_entry() RO");
133 	T_QUIET; T_ASSERT_EQ(mo_size, (memory_object_size_t)ctx->obj_size, "wrong mem_entry size");
134 	/* make sure we can't map target memory as writable */
135 	tmp_addr = 0;
136 	kr = vm_map(mach_task_self(),
137 	    &tmp_addr,
138 	    ctx->obj_size,
139 	    0,         /* mask */
140 	    VM_FLAGS_ANYWHERE,
141 	    ctx->mem_entry_ro,
142 	    0,
143 	    FALSE,         /* copy */
144 	    VM_PROT_READ,
145 	    VM_PROT_READ | VM_PROT_WRITE,
146 	    VM_INHERIT_DEFAULT);
147 	T_QUIET; T_EXPECT_MACH_ERROR(kr, KERN_INVALID_RIGHT, " vm_map() mem_entry_rw");
148 	tmp_addr = 0;
149 	kr = vm_map(mach_task_self(),
150 	    &tmp_addr,
151 	    ctx->obj_size,
152 	    0,         /* mask */
153 	    VM_FLAGS_ANYWHERE,
154 	    ctx->mem_entry_ro,
155 	    0,
156 	    FALSE,         /* copy */
157 	    VM_PROT_READ | VM_PROT_WRITE,
158 	    VM_PROT_READ | VM_PROT_WRITE,
159 	    VM_INHERIT_DEFAULT);
160 	T_QUIET; T_EXPECT_MACH_ERROR(kr, KERN_INVALID_RIGHT, " vm_map() mem_entry_rw");
161 
162 	/* allocate a source buffer for the unaligned copy */
163 	kr = vm_allocate(mach_task_self(),
164 	    &e5,
165 	    ctx->obj_size,
166 	    VM_FLAGS_ANYWHERE);
167 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate e5");
168 	/* initialize to 'C' */
169 	memset((char *)e5, 'C', ctx->obj_size);
170 
171 	/*
172 	 * get a handle on some writable memory that will be temporarily
173 	 * switched with the read-only mapping of our target memory to try
174 	 * and trick copy_unaligned to write to our read-only target.
175 	 */
176 	tmp_addr = 0;
177 	kr = vm_allocate(mach_task_self(),
178 	    &tmp_addr,
179 	    ctx->obj_size,
180 	    VM_FLAGS_ANYWHERE);
181 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate() some rw memory");
182 	/* initialize to 'D' */
183 	memset((char *)tmp_addr, 'D', ctx->obj_size);
184 	/* get a memory entry handle for that RW memory */
185 	mo_size = ctx->obj_size;
186 	kr = mach_make_memory_entry_64(mach_task_self(),
187 	    &mo_size,
188 	    tmp_addr,
189 	    MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
190 	    &ctx->mem_entry_rw,
191 	    MACH_PORT_NULL);
192 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "make_mem_entry() RW");
193 	T_QUIET; T_ASSERT_EQ(mo_size, (memory_object_size_t)ctx->obj_size, "wrong mem_entry size");
194 	kr = vm_deallocate(mach_task_self(), tmp_addr, ctx->obj_size);
195 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate() tmp_addr 0x%llx", (uint64_t)tmp_addr);
196 	tmp_addr = 0;
197 
198 	pthread_mutex_lock(&ctx->mtx);
199 
200 	/* start racing thread */
201 	ret = pthread_create(&th, NULL, switcheroo_thread, (void *)ctx);
202 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "pthread_create");
203 
204 	/* wait for racing thread to be ready to run */
205 	dispatch_semaphore_wait(ctx->running_sem, DISPATCH_TIME_FOREVER);
206 
207 	duration = 10; /* 10 seconds */
208 	T_LOG("Testing for %ld seconds...", duration);
209 	for (start = time(NULL), loops = 0;
210 	    time(NULL) < start + duration;
211 	    loops++) {
212 		/* reserve space for our 2 contiguous allocations */
213 		e2 = 0;
214 		kr = vm_allocate(mach_task_self(),
215 		    &e2,
216 		    2 * ctx->obj_size,
217 		    VM_FLAGS_ANYWHERE);
218 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate to reserve e2+e0");
219 
220 		/* make 1st allocation in our reserved space */
221 		kr = vm_allocate(mach_task_self(),
222 		    &e2,
223 		    ctx->obj_size,
224 		    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MAKE_TAG(240));
225 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate e2");
226 		/* initialize to 'B' */
227 		memset((char *)e2, 'B', ctx->obj_size);
228 
229 		/* map our read-only target memory right after */
230 		ctx->e0 = e2 + ctx->obj_size;
231 		kr = vm_map(mach_task_self(),
232 		    &ctx->e0,
233 		    ctx->obj_size,
234 		    0,         /* mask */
235 		    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MAKE_TAG(241),
236 		    ctx->mem_entry_ro,
237 		    0,
238 		    FALSE,         /* copy */
239 		    VM_PROT_READ,
240 		    VM_PROT_READ,
241 		    VM_INHERIT_DEFAULT);
242 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, " vm_map() mem_entry_ro");
243 
244 		/* let the racing thread go */
245 		pthread_mutex_unlock(&ctx->mtx);
246 		/* wait a little bit */
247 		usleep(100);
248 
249 		/* trigger copy_unaligned while racing with other thread */
250 		kr = vm_read_overwrite(mach_task_self(),
251 		    e5,
252 		    ctx->obj_size,
253 		    e2 + 1,
254 		    &copied_size);
255 		T_QUIET;
256 		T_ASSERT_TRUE(kr == KERN_SUCCESS || kr == KERN_PROTECTION_FAILURE,
257 		    "vm_read_overwrite kr %d", kr);
258 		switch (kr) {
259 		case KERN_SUCCESS:
260 			/* the target was RW */
261 			kern_success++;
262 			break;
263 		case KERN_PROTECTION_FAILURE:
264 			/* the target was RO */
265 			kern_protection_failure++;
266 			break;
267 		default:
268 			/* should not happen */
269 			kern_other++;
270 			break;
271 		}
272 		/* check that our read-only memory was not modified */
273 		T_QUIET; T_ASSERT_EQ(*(char *)ro_addr, 'A', "RO mapping was modified");
274 
275 		/* tell racing thread to stop toggling mappings */
276 		pthread_mutex_lock(&ctx->mtx);
277 
278 		/* clean up before next loop */
279 		vm_deallocate(mach_task_self(), ctx->e0, ctx->obj_size);
280 		ctx->e0 = 0;
281 		vm_deallocate(mach_task_self(), e2, ctx->obj_size);
282 		e2 = 0;
283 	}
284 
285 	ctx->done = true;
286 	pthread_mutex_unlock(&ctx->mtx);
287 	pthread_join(th, NULL);
288 
289 	kr = mach_port_deallocate(mach_task_self(), ctx->mem_entry_rw);
290 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_port_deallocate(me_rw)");
291 	kr = mach_port_deallocate(mach_task_self(), ctx->mem_entry_ro);
292 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_port_deallocate(me_ro)");
293 	kr = vm_deallocate(mach_task_self(), ro_addr, ctx->obj_size);
294 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate(ro_addr)");
295 	kr = vm_deallocate(mach_task_self(), e5, ctx->obj_size);
296 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate(e5)");
297 
298 
299 	T_LOG("vm_read_overwrite: KERN_SUCCESS:%d KERN_PROTECTION_FAILURE:%d other:%d",
300 	    kern_success, kern_protection_failure, kern_other);
301 	T_PASS("Ran %d times in %ld seconds with no failure", loops, duration);
302 }
303