xref: /xnu-8792.61.2/tests/vm/vm_unaligned_copy_switch_race.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 
4 #include <mach/mach_init.h>
5 #include <mach/mach_port.h>
6 #include <mach/vm_map.h>
7 
8 T_GLOBAL_META(
9 	T_META_NAMESPACE("xnu.vm"),
10 	T_META_RADAR_COMPONENT_NAME("xnu"),
11 	T_META_RADAR_COMPONENT_VERSION("VM"));
12 
13 struct context1 {
14 	vm_size_t obj_size;
15 	vm_address_t e0;
16 	mach_port_t mem_entry_ro;
17 	mach_port_t mem_entry_rw;
18 	dispatch_semaphore_t running_sem;
19 	pthread_mutex_t mtx;
20 	bool done;
21 };
22 
23 static void *
switcheroo_thread(__unused void * arg)24 switcheroo_thread(__unused void *arg)
25 {
26 	kern_return_t kr;
27 	struct context1 *ctx;
28 
29 	ctx = (struct context1 *)arg;
30 	/* tell main thread we're ready to run */
31 	dispatch_semaphore_signal(ctx->running_sem);
32 	while (!ctx->done) {
33 		/* wait for main thread to be done setting things up */
34 		pthread_mutex_lock(&ctx->mtx);
35 		/* switch e0 to RW mapping */
36 		kr = vm_map(mach_task_self(),
37 		    &ctx->e0,
38 		    ctx->obj_size,
39 		    0,         /* mask */
40 		    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
41 		    ctx->mem_entry_rw,
42 		    0,
43 		    FALSE,         /* copy */
44 		    VM_PROT_READ | VM_PROT_WRITE,
45 		    VM_PROT_READ | VM_PROT_WRITE,
46 		    VM_INHERIT_DEFAULT);
47 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, " vm_map() RW");
48 		/* wait a little bit */
49 		usleep(100);
50 		/* switch bakc to original RO mapping */
51 		kr = vm_map(mach_task_self(),
52 		    &ctx->e0,
53 		    ctx->obj_size,
54 		    0,         /* mask */
55 		    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
56 		    ctx->mem_entry_ro,
57 		    0,
58 		    FALSE,         /* copy */
59 		    VM_PROT_READ,
60 		    VM_PROT_READ,
61 		    VM_INHERIT_DEFAULT);
62 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, " vm_map() RO");
63 		/* tell main thread we're don switching mappings */
64 		pthread_mutex_unlock(&ctx->mtx);
65 		usleep(100);
66 	}
67 	return NULL;
68 }
69 
70 T_DECL(unaligned_copy_switch_race,
71     "Test that unaligned copy respects read-only mapping")
72 {
73 	pthread_t th = NULL;
74 	int ret;
75 	kern_return_t kr;
76 	time_t start, duration;
77 	mach_msg_type_number_t cow_read_size;
78 	vm_size_t copied_size;
79 	int loops;
80 	vm_address_t e2, e5;
81 	struct context1 context1, *ctx;
82 	int kern_success = 0, kern_protection_failure = 0, kern_other = 0;
83 	vm_address_t ro_addr, tmp_addr;
84 	memory_object_size_t mo_size;
85 
86 	ctx = &context1;
87 	ctx->obj_size = 256 * 1024;
88 	ctx->e0 = 0;
89 	ctx->running_sem = dispatch_semaphore_create(0);
90 	T_QUIET; T_ASSERT_NE(ctx->running_sem, NULL, "dispatch_semaphore_create");
91 	ret = pthread_mutex_init(&ctx->mtx, NULL);
92 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "pthread_mutex_init");
93 	ctx->done = false;
94 	ctx->mem_entry_rw = MACH_PORT_NULL;
95 	ctx->mem_entry_ro = MACH_PORT_NULL;
96 
97 	/* allocate our attack target memory */
98 	kr = vm_allocate(mach_task_self(),
99 	    &ro_addr,
100 	    ctx->obj_size,
101 	    VM_FLAGS_ANYWHERE);
102 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate ro_addr");
103 	/* initialize to 'A' */
104 	memset((char *)ro_addr, 'A', ctx->obj_size);
105 	/* make it read-only */
106 	kr = vm_protect(mach_task_self(),
107 	    ro_addr,
108 	    ctx->obj_size,
109 	    TRUE,             /* set_maximum */
110 	    VM_PROT_READ);
111 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_protect ro_addr");
112 	/* make sure we can't get read-write handle on that target memory */
113 	mo_size = ctx->obj_size;
114 	kr = mach_make_memory_entry_64(mach_task_self(),
115 	    &mo_size,
116 	    ro_addr,
117 	    MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
118 	    &ctx->mem_entry_ro,
119 	    MACH_PORT_NULL);
120 	T_QUIET; T_ASSERT_MACH_ERROR(kr, KERN_PROTECTION_FAILURE, "make_mem_entry() RO");
121 	/* take read-only handle on that target memory */
122 	mo_size = ctx->obj_size;
123 	kr = mach_make_memory_entry_64(mach_task_self(),
124 	    &mo_size,
125 	    ro_addr,
126 	    MAP_MEM_VM_SHARE | VM_PROT_READ,
127 	    &ctx->mem_entry_ro,
128 	    MACH_PORT_NULL);
129 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "make_mem_entry() RO");
130 	T_QUIET; T_ASSERT_EQ(mo_size, (memory_object_size_t)ctx->obj_size, "wrong mem_entry size");
131 	/* make sure we can't map target memory as writable */
132 	tmp_addr = 0;
133 	kr = vm_map(mach_task_self(),
134 	    &tmp_addr,
135 	    ctx->obj_size,
136 	    0,         /* mask */
137 	    VM_FLAGS_ANYWHERE,
138 	    ctx->mem_entry_ro,
139 	    0,
140 	    FALSE,         /* copy */
141 	    VM_PROT_READ,
142 	    VM_PROT_READ | VM_PROT_WRITE,
143 	    VM_INHERIT_DEFAULT);
144 	T_QUIET; T_EXPECT_MACH_ERROR(kr, KERN_INVALID_RIGHT, " vm_map() mem_entry_rw");
145 	tmp_addr = 0;
146 	kr = vm_map(mach_task_self(),
147 	    &tmp_addr,
148 	    ctx->obj_size,
149 	    0,         /* mask */
150 	    VM_FLAGS_ANYWHERE,
151 	    ctx->mem_entry_ro,
152 	    0,
153 	    FALSE,         /* copy */
154 	    VM_PROT_READ | VM_PROT_WRITE,
155 	    VM_PROT_READ | VM_PROT_WRITE,
156 	    VM_INHERIT_DEFAULT);
157 	T_QUIET; T_EXPECT_MACH_ERROR(kr, KERN_INVALID_RIGHT, " vm_map() mem_entry_rw");
158 
159 	/* allocate a source buffer for the unaligned copy */
160 	kr = vm_allocate(mach_task_self(),
161 	    &e5,
162 	    ctx->obj_size,
163 	    VM_FLAGS_ANYWHERE);
164 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate e5");
165 	/* initialize to 'C' */
166 	memset((char *)e5, 'C', ctx->obj_size);
167 
168 	/*
169 	 * get a handle on some writable memory that will be temporarily
170 	 * switched with the read-only mapping of our target memory to try
171 	 * and trick copy_unaligned to write to our read-only target.
172 	 */
173 	tmp_addr = 0;
174 	kr = vm_allocate(mach_task_self(),
175 	    &tmp_addr,
176 	    ctx->obj_size,
177 	    VM_FLAGS_ANYWHERE);
178 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate() some rw memory");
179 	/* initialize to 'D' */
180 	memset((char *)tmp_addr, 'D', ctx->obj_size);
181 	/* get a memory entry handle for that RW memory */
182 	mo_size = ctx->obj_size;
183 	kr = mach_make_memory_entry_64(mach_task_self(),
184 	    &mo_size,
185 	    tmp_addr,
186 	    MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
187 	    &ctx->mem_entry_rw,
188 	    MACH_PORT_NULL);
189 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "make_mem_entry() RW");
190 	T_QUIET; T_ASSERT_EQ(mo_size, (memory_object_size_t)ctx->obj_size, "wrong mem_entry size");
191 	kr = vm_deallocate(mach_task_self(), tmp_addr, ctx->obj_size);
192 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate() tmp_addr 0x%llx", (uint64_t)tmp_addr);
193 	tmp_addr = 0;
194 
195 	pthread_mutex_lock(&ctx->mtx);
196 
197 	/* start racing thread */
198 	ret = pthread_create(&th, NULL, switcheroo_thread, (void *)ctx);
199 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "pthread_create");
200 
201 	/* wait for racing thread to be ready to run */
202 	dispatch_semaphore_wait(ctx->running_sem, DISPATCH_TIME_FOREVER);
203 
204 	duration = 10; /* 10 seconds */
205 	T_LOG("Testing for %ld seconds...", duration);
206 	for (start = time(NULL), loops = 0;
207 	    time(NULL) < start + duration;
208 	    loops++) {
209 		/* reserve space for our 2 contiguous allocations */
210 		e2 = 0;
211 		kr = vm_allocate(mach_task_self(),
212 		    &e2,
213 		    2 * ctx->obj_size,
214 		    VM_FLAGS_ANYWHERE);
215 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate to reserve e2+e0");
216 
217 		/* make 1st allocation in our reserved space */
218 		kr = vm_allocate(mach_task_self(),
219 		    &e2,
220 		    ctx->obj_size,
221 		    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MAKE_TAG(240));
222 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate e2");
223 		/* initialize to 'B' */
224 		memset((char *)e2, 'B', ctx->obj_size);
225 
226 		/* map our read-only target memory right after */
227 		ctx->e0 = e2 + ctx->obj_size;
228 		kr = vm_map(mach_task_self(),
229 		    &ctx->e0,
230 		    ctx->obj_size,
231 		    0,         /* mask */
232 		    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MAKE_TAG(241),
233 		    ctx->mem_entry_ro,
234 		    0,
235 		    FALSE,         /* copy */
236 		    VM_PROT_READ,
237 		    VM_PROT_READ,
238 		    VM_INHERIT_DEFAULT);
239 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, " vm_map() mem_entry_ro");
240 
241 		/* let the racing thread go */
242 		pthread_mutex_unlock(&ctx->mtx);
243 		/* wait a little bit */
244 		usleep(100);
245 
246 		/* trigger copy_unaligned while racing with other thread */
247 		kr = vm_read_overwrite(mach_task_self(),
248 		    e5,
249 		    ctx->obj_size,
250 		    e2 + 1,
251 		    &copied_size);
252 		T_QUIET;
253 		T_ASSERT_TRUE(kr == KERN_SUCCESS || kr == KERN_PROTECTION_FAILURE,
254 		    "vm_read_overwrite kr %d", kr);
255 		switch (kr) {
256 		case KERN_SUCCESS:
257 			/* the target was RW */
258 			kern_success++;
259 			break;
260 		case KERN_PROTECTION_FAILURE:
261 			/* the target was RO */
262 			kern_protection_failure++;
263 			break;
264 		default:
265 			/* should not happen */
266 			kern_other++;
267 			break;
268 		}
269 		/* check that our read-only memory was not modified */
270 		T_QUIET; T_ASSERT_EQ(*(char *)ro_addr, 'A', "RO mapping was modified");
271 
272 		/* tell racing thread to stop toggling mappings */
273 		pthread_mutex_lock(&ctx->mtx);
274 
275 		/* clean up before next loop */
276 		vm_deallocate(mach_task_self(), ctx->e0, ctx->obj_size);
277 		ctx->e0 = 0;
278 		vm_deallocate(mach_task_self(), e2, ctx->obj_size);
279 		e2 = 0;
280 	}
281 
282 	ctx->done = true;
283 	pthread_join(th, NULL);
284 
285 	kr = mach_port_deallocate(mach_task_self(), ctx->mem_entry_rw);
286 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_port_deallocate(me_rw)");
287 	kr = mach_port_deallocate(mach_task_self(), ctx->mem_entry_ro);
288 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_port_deallocate(me_ro)");
289 	kr = vm_deallocate(mach_task_self(), ro_addr, ctx->obj_size);
290 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate(ro_addr)");
291 	kr = vm_deallocate(mach_task_self(), e5, ctx->obj_size);
292 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate(e5)");
293 
294 
295 	T_LOG("vm_read_overwrite: KERN_SUCCESS:%d KERN_PROTECTION_FAILURE:%d other:%d",
296 	    kern_success, kern_protection_failure, kern_other);
297 	T_PASS("Ran %d times in %ld seconds with no failure", loops, duration);
298 }
299