1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kalloc.h>
30 #include <kern/task.h>
31 #include <kern/thread.h>
32 #include <libkern/libkern.h>
33 #include <mach/mach_vm.h>
34 #include <mach/semaphore.h>
35 #include <mach/task.h>
36 #include <vm/vm_kern_xnu.h>
37 #include <vm/vm_map.h>
38 #include <vm/vm_map_xnu.h>
39 #include <vm/vm_protos.h>
40 #include <sys/errno.h>
41 #include <sys/proc.h>
42 #include <sys/proc_internal.h>
43 #include <sys/vm.h>
44 #include <tests/ktest.h>
45
46 kern_return_t copyio_test(void);
47
48 #define copyio_test_buf_size (PAGE_SIZE * 16)
49 static const char copyio_test_string[] = {'T', 'e', 's', 't', ' ', 'S', 't', 'r', 'i', 'n', 'g', '!', '\0', 'A', 'B', 'C'};
50
51 struct copyio_test_data {
52 /* VM map of the current userspace process. */
53 vm_map_t user_map;
54 /* The start of a `copyio_test_buf_size'-sized region mapped into userspace. */
55 user_addr_t user_addr;
56 /* The start of a page-sized region that guaranteed to be unmapped in userspace. */
57 user_addr_t unmapped_addr;
58 /* The start of a page-sized region mapped at the largest possible userspace address. */
59 user_addr_t user_lastpage_addr;
60 /* Kernel mapping of the physical pages mapped at `user_addr'. */
61 void *kern_addr;
62
63 /* Scratch buffers of size `copyio_test_buf_size'. */
64 char *buf1, *buf2;
65 /* Scratch data to pass to helper threads */
66 union {
67 void *thread_ptr;
68 uint64_t thread_data;
69 };
70 };
71
72 typedef int (*copyio_thread_fn_t)(struct copyio_test_data *);
73
74 struct copyio_test_thread_data {
75 copyio_thread_fn_t fn;
76 struct copyio_test_data *data;
77 int ret;
78 semaphore_t done;
79 };
80
81 static void
copyio_thread_call_fn(void * arg,wait_result_t __unused res)82 copyio_thread_call_fn(void *arg, wait_result_t __unused res)
83 {
84 struct copyio_test_thread_data *tdata = arg;
85 tdata->ret = tdata->fn(tdata->data);
86 semaphore_signal(tdata->done);
87 }
88
89 static int
copyio_test_run_in_thread(copyio_thread_fn_t fn,struct copyio_test_data * data)90 copyio_test_run_in_thread(copyio_thread_fn_t fn, struct copyio_test_data *data)
91 {
92 struct copyio_test_thread_data tdata = {
93 .fn = fn,
94 .data = data,
95 };
96 thread_t thread;
97
98 semaphore_create(current_task(), &tdata.done, SYNC_POLICY_FIFO, 0);
99 kernel_thread_start(copyio_thread_call_fn, &tdata, &thread);
100
101 semaphore_wait(tdata.done);
102
103 thread_deallocate(thread);
104 semaphore_destroy(current_task(), tdata.done);
105
106 return tdata.ret;
107 }
108
109 static void
copyio_test_protect(struct copyio_test_data * data,vm_prot_t prot)110 copyio_test_protect(struct copyio_test_data *data, vm_prot_t prot)
111 {
112 __assert_only kern_return_t ret = mach_vm_protect(data->user_map, data->user_addr, copyio_test_buf_size, false, prot);
113 assert(ret == KERN_SUCCESS);
114 }
115
116 static int
copyin_from_kernel(struct copyio_test_data * data)117 copyin_from_kernel(struct copyio_test_data *data)
118 {
119 char *in_buf = data->buf2;
120 return copyin((uintptr_t)data->kern_addr, in_buf, copyio_test_buf_size);
121 }
122
123 static void
copyin_test(struct copyio_test_data * data)124 copyin_test(struct copyio_test_data *data)
125 {
126 char *out_buf = data->buf1;
127 char *in_buf = data->buf2;
128
129 for (size_t i = 0; i < copyio_test_buf_size; i++) {
130 out_buf[i] = (char)i;
131 }
132 memcpy(data->kern_addr, out_buf, copyio_test_buf_size);
133
134 int err = copyin(data->user_addr, in_buf, copyio_test_buf_size);
135 T_EXPECT_EQ_INT(err, 0, "copyin() with valid parameters should succeed");
136 int cmp = memcmp(out_buf, in_buf, copyio_test_buf_size);
137 T_EXPECT_EQ_INT(cmp, 0, "copyin() should correctly copy in data");
138
139 err = copyin(data->unmapped_addr, NULL, 0);
140 T_EXPECT_EQ_INT(err, 0, "copyin() with 0 size should always succeed");
141
142 err = copyin(data->unmapped_addr, in_buf, copyio_test_buf_size);
143 T_EXPECT_EQ_INT(err, EFAULT, "copyin() from unmapped userspace address should return EFAULT");
144 err = copyin(data->unmapped_addr - PAGE_SIZE, in_buf, PAGE_SIZE * 2);
145 T_EXPECT_EQ_INT(err, EFAULT, "copyin() from partially valid userspace range should return EFAULT");
146 err = copyin(data->user_lastpage_addr, in_buf, PAGE_SIZE * 2);
147 T_EXPECT_EQ_INT(err, EFAULT, "copyin() past end of userspace address space should return EFAULT");
148
149 bzero(in_buf, copyio_test_buf_size);
150 err = copyio_test_run_in_thread(copyin_from_kernel, data);
151 T_EXPECT_EQ_INT(err, 0, "copyin() from kernel address in kernel_task thread should succeed");
152 cmp = memcmp(data->kern_addr, in_buf, copyio_test_buf_size);
153 T_EXPECT_EQ_INT(cmp, 0, "copyin() from kernel address should correctly copy in data");
154 err = copyin_from_kernel(data);
155 T_EXPECT_EQ_INT(err, EFAULT, "copyin() from kernel address in other threads should return EFAULT");
156
157 copyio_test_protect(data, VM_PROT_WRITE);
158 err = copyin(data->user_addr, in_buf, copyio_test_buf_size);
159 T_EXPECT_EQ_INT(err, EFAULT, "copyin() from write-only address should return EFAULT");
160 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE);
161 }
162
163 static int
copyout_to_kernel(struct copyio_test_data * data)164 copyout_to_kernel(struct copyio_test_data *data)
165 {
166 char *out_buf = data->buf1;
167 return copyout(out_buf, (uintptr_t)data->kern_addr, copyio_test_buf_size);
168 }
169
170 static void
copyout_test(struct copyio_test_data * data)171 copyout_test(struct copyio_test_data *data)
172 {
173 char *out_buf = data->buf1;
174
175 bzero(data->kern_addr, copyio_test_buf_size);
176
177 for (size_t i = 0; i < copyio_test_buf_size; i++) {
178 out_buf[i] = ~(char)i;
179 }
180 int err = copyout(out_buf, data->user_addr, copyio_test_buf_size);
181 T_EXPECT_EQ_INT(err, 0, "copyout() with valid parameters should succeed");
182
183 int cmp = memcmp(data->kern_addr, out_buf, copyio_test_buf_size);
184 T_EXPECT_EQ_INT(cmp, 0, "copyout() should correctly copy out data");
185
186 err = copyout(NULL, data->unmapped_addr, 0);
187 T_EXPECT_EQ_INT(err, 0, "copyout() with 0 size should always succeed");
188
189 err = copyout(out_buf, data->unmapped_addr, copyio_test_buf_size);
190 T_EXPECT_EQ_INT(err, EFAULT, "copyout() to unmapped userspace address should return EFAULT");
191 err = copyout(out_buf, data->unmapped_addr - PAGE_SIZE, PAGE_SIZE * 2);
192 T_EXPECT_EQ_INT(err, EFAULT, "copyout() to partially valid userspace range should return EFAULT");
193 err = copyout(out_buf, data->user_lastpage_addr, PAGE_SIZE * 2);
194 T_EXPECT_EQ_INT(err, EFAULT, "copyout() past end of userspace address space should return EFAULT");
195
196 bzero(data->kern_addr, copyio_test_buf_size);
197
198 err = copyio_test_run_in_thread(copyout_to_kernel, data);
199 T_EXPECT_EQ_INT(err, 0, "copyout() to kernel address in kernel_task thread should succeed");
200 cmp = memcmp(out_buf, data->kern_addr, copyio_test_buf_size);
201 T_EXPECT_EQ_INT(cmp, 0, "copyout() to kernel address should correctly copy out data");
202 err = copyout_to_kernel(data);
203 T_EXPECT_EQ_INT(err, EFAULT, "copyout() to kernel address in other threads should return EFAULT");
204
205 copyio_test_protect(data, VM_PROT_READ);
206 err = copyout(out_buf, data->user_addr, copyio_test_buf_size);
207 T_EXPECT_EQ_INT(err, EFAULT, "copyout() to read-only address should return EFAULT");
208 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE);
209 }
210
211 static int
copyinstr_from_kernel(struct copyio_test_data * data)212 copyinstr_from_kernel(struct copyio_test_data *data)
213 {
214 char *in_buf = data->buf1;
215 size_t *lencopied = data->thread_ptr;
216 return copyinstr((user_addr_t)data->kern_addr, in_buf, copyio_test_buf_size, lencopied);
217 }
218
219 static void
copyinstr_test(struct copyio_test_data * data)220 copyinstr_test(struct copyio_test_data *data)
221 {
222 char *in_buf = data->buf1;
223
224 memcpy(data->kern_addr, copyio_test_string, sizeof(copyio_test_string));
225
226 bzero(in_buf, copyio_test_buf_size);
227 size_t lencopied;
228 int err = copyinstr(data->user_addr, in_buf, copyio_test_buf_size, &lencopied);
229 T_EXPECT_EQ_INT(err, 0, "copyinstr() with valid parameters should succeed");
230 T_EXPECT_EQ_ULONG(lencopied, strlen(copyio_test_string) + 1, "copyinstr() with a large enough buffer should read entire string");
231
232 int cmp = strncmp(in_buf, copyio_test_string, lencopied);
233 T_EXPECT_EQ_INT(cmp, 0, "copyinstr() should correctly copy string up to NULL terminator");
234 cmp = memcmp(in_buf, copyio_test_string, sizeof(copyio_test_string));
235 T_EXPECT_NE_INT(cmp, 0, "copyinstr() should not read past NULL terminator");
236
237 bzero(in_buf, copyio_test_buf_size);
238 const vm_size_t trunc_size = strlen(copyio_test_string) - 4;
239 err = copyinstr(data->user_addr, in_buf, trunc_size, &lencopied);
240 T_EXPECT_EQ_INT(err, ENAMETOOLONG, "truncated copyinstr() should return ENAMETOOLONG");
241 T_EXPECT_EQ_ULONG(lencopied, trunc_size, "truncated copyinstr() should copy exactly `maxlen' bytes");
242 cmp = memcmp(in_buf, copyio_test_string, trunc_size);
243 T_EXPECT_EQ_INT(cmp, 0, "copyinstr() should correctly copy in truncated string");
244 cmp = memcmp(in_buf, copyio_test_string, strlen(copyio_test_string));
245 T_EXPECT_NE_INT(cmp, 0, "copyinstr() should stop copying at `maxlen' bytes");
246
247 err = copyinstr(data->unmapped_addr, in_buf, copyio_test_buf_size, &lencopied);
248 T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() from unmapped userspace address should return EFAULT");
249 err = copyinstr(data->user_lastpage_addr, in_buf, PAGE_SIZE * 2, &lencopied);
250 T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() past end of userspace address space should return EFAULT");
251
252 bzero(in_buf, copyio_test_buf_size);
253 data->thread_ptr = &lencopied;
254
255 err = copyio_test_run_in_thread(copyinstr_from_kernel, data);
256 #if defined (__arm64__)
257 T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() from kernel address in kernel_task thread should return EFAULT");
258 #else
259 T_EXPECT_EQ_INT(err, 0, "copyinstr() from kernel address in kernel_task thread should succeed");
260 T_EXPECT_EQ_ULONG(lencopied, strlen(copyio_test_string) + 1, "copyinstr() from kernel address should read entire string");
261 cmp = strncmp(in_buf, copyio_test_string, lencopied);
262 T_EXPECT_EQ_INT(cmp, 0, "copyinstr() from kernel address should correctly copy string up to NULL terminator");
263 cmp = memcmp(in_buf, copyio_test_string, sizeof(copyio_test_string));
264 T_EXPECT_NE_INT(cmp, 0, "copyinstr() from kernel address should not read past NULL terminator");
265 #endif
266 err = copyinstr_from_kernel(data);
267 T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() from kernel address in other threads should return EFAULT");
268
269 copyio_test_protect(data, VM_PROT_WRITE);
270 err = copyinstr(data->user_addr, in_buf, copyio_test_buf_size, &lencopied);
271 T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() from write-only address should return EFAULT");
272 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE);
273
274 /* Place an unterminated string at the end of the mapped region */
275 const size_t unterminated_size = 16;
276 char *kern_unterminated_addr = (char *)data->kern_addr + copyio_test_buf_size - unterminated_size;
277 memset(kern_unterminated_addr, 'A', unterminated_size);
278
279 user_addr_t user_unterminated_addr = data->user_addr + copyio_test_buf_size - unterminated_size;
280 err = copyinstr(user_unterminated_addr, in_buf, copyio_test_buf_size, &lencopied);
281 T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() from userspace region without NULL terminator should return EFAULT");
282 }
283
284 static int
copyoutstr_to_kernel(struct copyio_test_data * data)285 copyoutstr_to_kernel(struct copyio_test_data *data)
286 {
287 size_t *lencopied = data->thread_ptr;
288 return copyoutstr(copyio_test_string, (user_addr_t)data->kern_addr, sizeof(copyio_test_string), lencopied);
289 }
290
291 static void
copyoutstr_test(struct copyio_test_data * data)292 copyoutstr_test(struct copyio_test_data *data)
293 {
294 bzero(data->kern_addr, sizeof(copyio_test_string));
295
296 size_t lencopied;
297 int err = copyoutstr(copyio_test_string, data->user_addr, sizeof(copyio_test_string), &lencopied);
298 T_EXPECT_EQ_INT(err, 0, "copyoutstr() with valid parameters should succeed");
299 T_EXPECT_EQ_ULONG(lencopied, strlen(copyio_test_string) + 1, "copyoutstr() should copy string up to NULL terminator");
300
301 int cmp = strncmp(data->kern_addr, copyio_test_string, sizeof(copyio_test_string));
302 T_EXPECT_EQ_INT(cmp, 0, "copyoutstr() should correctly copy out string");
303 cmp = memcmp(data->kern_addr, copyio_test_string, sizeof(copyio_test_string));
304 T_EXPECT_NE_INT(cmp, 0, "copyoutstr() should stop copying at NULL terminator");
305
306 bzero(data->kern_addr, sizeof(copyio_test_string));
307
308 const vm_size_t trunc_size = strlen(copyio_test_string) - 4;
309 err = copyoutstr(copyio_test_string, data->user_addr, trunc_size, &lencopied);
310 T_EXPECT_EQ_INT(err, ENAMETOOLONG, "truncated copyoutstr() should return ENAMETOOLONG");
311 T_EXPECT_EQ_ULONG(lencopied, trunc_size, "truncated copyoutstr() should copy exactly `maxlen' bytes");
312 cmp = strncmp(data->kern_addr, copyio_test_string, trunc_size);
313 T_EXPECT_EQ_INT(cmp, 0, "copyoutstr() should correctly copy out truncated string");
314 cmp = memcmp(data->kern_addr, copyio_test_string, sizeof(copyio_test_string));
315 T_EXPECT_NE_INT(cmp, 0, "copyoutstr() should stop copying at `maxlen' bytes");
316
317 err = copyoutstr(copyio_test_string, data->unmapped_addr, strlen(copyio_test_string), &lencopied);
318 T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() to unmapped userspace address should return EFAULT");
319 err = copyoutstr(copyio_test_string, data->unmapped_addr - 1, strlen(copyio_test_string), &lencopied);
320 T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() to partially valid userspace range should return EFAULT");
321 err = copyoutstr(copyio_test_string, data->user_lastpage_addr + PAGE_SIZE - 1, strlen(copyio_test_string), &lencopied);
322 T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() past end of userspace address space should return EFAULT");
323
324 bzero(data->kern_addr, sizeof(copyio_test_string));
325 data->thread_ptr = &lencopied;
326
327 err = copyio_test_run_in_thread(copyoutstr_to_kernel, data);
328 #if defined (__arm64__)
329 T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() to kernel address in kernel_task thread should return EFAULT");
330 #else
331 T_EXPECT_EQ_INT(err, 0, "copyoutstr() to kernel address in kernel_task thread should succeed");
332 T_EXPECT_EQ_ULONG(lencopied, strlen(copyio_test_string) + 1, "copyoutstr() to kernel address should copy string up to NULL terminator");
333 cmp = strncmp(data->kern_addr, copyio_test_string, sizeof(copyio_test_string));
334 T_EXPECT_EQ_INT(cmp, 0, "copyoutstr() to kernel address should correctly copy out data");
335 #endif
336 err = copyoutstr_to_kernel(data);
337 T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() to kernel address in other threads should return EFAULT");
338
339 copyio_test_protect(data, VM_PROT_READ);
340 err = copyoutstr(copyio_test_string, data->user_addr, strlen(copyio_test_string), &lencopied);
341 T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() to read-only address should return EFAULT");
342 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE);
343 }
344
345 static int
copyin_atomic32_from_kernel(struct copyio_test_data * data)346 copyin_atomic32_from_kernel(struct copyio_test_data *data)
347 {
348 return copyin_atomic32((uintptr_t)data->kern_addr, data->thread_ptr);
349 }
350
351 static int
copyin_atomic64_from_kernel(struct copyio_test_data * data)352 copyin_atomic64_from_kernel(struct copyio_test_data *data)
353 {
354 return copyin_atomic64((uintptr_t)data->kern_addr, data->thread_ptr);
355 }
356
357 static int
copyout_atomic32_to_kernel(struct copyio_test_data * data)358 copyout_atomic32_to_kernel(struct copyio_test_data *data)
359 {
360 return copyout_atomic32((uint32_t)data->thread_data, (user_addr_t)data->kern_addr);
361 }
362
363 static int
copyout_atomic64_to_kernel(struct copyio_test_data * data)364 copyout_atomic64_to_kernel(struct copyio_test_data *data)
365 {
366 return copyout_atomic64(data->thread_data, (user_addr_t)data->kern_addr);
367 }
368
369 /**
370 * Note: we can't test atomic copyio calls which go past the end of the
371 * userspace address space, since there's no way to provide a range
372 * that straddles the userspace address boundary while being suitably
373 * aligned for the copy.
374 */
375 #define copyin_atomic_test(data, word_t, copyin_fn, copyin_from_kernel_fn) \
376 do { \
377 const word_t word_out = (word_t)0x123456789ABCDEF0UL; \
378 word_t word_in = 0; \
379 memcpy(data->kern_addr, &word_out, sizeof(word_out)); \
380 \
381 int err = copyin_fn(data->user_addr, &word_in); \
382 T_EXPECT_EQ_INT(err, 0, #copyin_fn "() with valid parameters should succeed"); \
383 \
384 int cmp = memcmp(&word_in, &word_out, sizeof(word_t)); \
385 T_EXPECT_EQ_INT(cmp, 0, #copyin_fn "() should correctly copy word"); \
386 \
387 for (unsigned int offset = 1; offset < sizeof(word_t); offset++) { \
388 err = copyin_fn(data->user_addr + offset, &word_in); \
389 T_EXPECT_EQ_INT(err, EINVAL, \
390 #copyin_fn "() from unaligned userspace address should return EINVAL (offset = %u)", \
391 offset); \
392 }; \
393 err = copyin_fn(data->unmapped_addr, &word_in); \
394 T_EXPECT_EQ_INT(err, EFAULT, #copyin_fn "() from unmapped userspace address should return EFAULT"); \
395 \
396 data->thread_ptr = &word_in; \
397 \
398 err = copyio_test_run_in_thread(copyin_from_kernel_fn, data); \
399 T_EXPECT_EQ_INT(err, EFAULT, \
400 #copyin_fn "() from kernel address in kernel_task threads should return EFAULT"); \
401 err = copyin_from_kernel_fn(data); \
402 T_EXPECT_EQ_INT(err, EFAULT, \
403 #copyin_fn "() from kernel address in other threads should return EFAULT"); \
404 \
405 copyio_test_protect(data, VM_PROT_WRITE); \
406 err = copyin_fn(data->user_addr, &word_in); \
407 T_EXPECT_EQ_INT(err, EFAULT, #copyin_fn "() from write-only address should return EFAULT"); \
408 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE); \
409 } while (0)
410
411 #define copyout_atomic_test(data, word_t, copyout_fn, copyout_to_kernel_fn) \
412 do { \
413 const word_t word_out = (word_t)0x123456789ABCDEF0UL; \
414 bzero(data->kern_addr, sizeof(word_t)); \
415 \
416 int err = copyout_fn(word_out, data->user_addr); \
417 T_EXPECT_EQ_INT(err, 0, #copyout_fn "() with valid parameters should succeed"); \
418 \
419 int cmp = memcmp(data->kern_addr, &word_out, sizeof(word_t)); \
420 T_EXPECT_EQ_INT(cmp, 0, #copyout_fn "() should correctly copy word"); \
421 \
422 for (unsigned int offset = 1; offset < sizeof(word_t); offset++) { \
423 err = copyout_fn(word_out, data->user_addr + offset); \
424 T_EXPECT_EQ_INT(err, EINVAL, \
425 #copyout_fn "() to unaligned userspace address should return EINVAL (offset = %u)", \
426 offset); \
427 }; \
428 err = copyout_fn(word_out, data->unmapped_addr); \
429 T_EXPECT_EQ_INT(err, EFAULT, #copyout_fn "() to unmapped userspace address should return EFAULT"); \
430 err = copyout_fn(word_out, (uintptr_t)data->kern_addr); \
431 T_EXPECT_EQ_INT(err, EFAULT, #copyout_fn "() to kernel address should return EFAULT"); \
432 \
433 data->thread_data = word_out; \
434 \
435 err = copyio_test_run_in_thread(copyout_to_kernel_fn, data); \
436 T_EXPECT_EQ_INT(err, EFAULT, \
437 #copyout_fn "() to kernel address in kernel_task thread should return EFAULT"); \
438 err = copyout_to_kernel_fn(data); \
439 T_EXPECT_EQ_INT(err, EFAULT, #copyout_fn "() to kernel address in other threads should return EFAULT"); \
440 \
441 copyio_test_protect(data, VM_PROT_READ); \
442 err = copyout_fn(word_out, data->user_addr); \
443 T_EXPECT_EQ_INT(err, EFAULT, #copyout_fn "() to read-only address should return EFAULT"); \
444 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE); \
445 } while (0)
446
447 #define copyio_atomic_test(data, size) \
448 do { \
449 copyin_atomic_test((data), uint ## size ## _t, copyin_atomic ## size, \
450 copyin_atomic ## size ## _from_kernel); \
451 copyout_atomic_test((data), uint ## size ## _t, copyout_atomic ## size, \
452 copyout_atomic ## size ## _to_kernel); \
453 } while (0)
454
455 static int
copyin_atomic32_wait_if_equals_from_kernel(struct copyio_test_data * data)456 copyin_atomic32_wait_if_equals_from_kernel(struct copyio_test_data *data)
457 {
458 return copyin_atomic32_wait_if_equals((uintptr_t)data->kern_addr, (uint32_t)data->thread_data);
459 }
460
461 static void
copyin_atomic32_wait_if_equals_test(struct copyio_test_data * data)462 copyin_atomic32_wait_if_equals_test(struct copyio_test_data *data)
463 {
464 bzero(data->kern_addr, sizeof(uint32_t));
465 int err = copyin_atomic32_wait_if_equals(data->user_addr, 0);
466 T_EXPECT_EQ_INT(err, 0, "copyin_atomic32_wait_if_equals() should return 0 when equals");
467 err = copyin_atomic32_wait_if_equals(data->user_addr, ~0U);
468 T_EXPECT_EQ_INT(err, ESTALE, "copyin_atomic32_wait_if_equals() should return ESTALE when not equals");
469
470 for (unsigned int offset = 1; offset < sizeof(uint32_t); offset++) {
471 err = copyin_atomic32_wait_if_equals(data->user_addr + offset, 0);
472 T_EXPECT_EQ_INT(err, EINVAL,
473 "copyin_atomic32_wait_if_equals() on unaligned userspace address should return EINVAL (offset = %u)",
474 offset);
475 }
476 err = copyin_atomic32_wait_if_equals(data->unmapped_addr, 0);
477 T_EXPECT_EQ_INT(err, EFAULT, "copyin_atomic32_wait_if_equals() on unmapped userspace address should return EFAULT");
478
479 data->thread_data = 0;
480
481 err = copyio_test_run_in_thread(copyin_atomic32_wait_if_equals_from_kernel, data);
482 T_EXPECT_EQ_INT(err, EFAULT, "copyin_atomic32_wait_if_equals() from kernel address in kernel_task thread should return EFAULT");
483 err = copyin_atomic32_wait_if_equals_from_kernel(data);
484 T_EXPECT_EQ_INT(err, EFAULT, "copyin_atomic32_wait_if_equals() from kernel address in other threads should return EFAULT");
485
486 copyio_test_protect(data, VM_PROT_WRITE);
487 err = copyin_atomic32_wait_if_equals(data->user_addr, 0);
488 T_EXPECT_EQ_INT(err, EFAULT, "copyin_atomic32_wait_if_equals() on write-only address should return EFAULT");
489 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE);
490 }
491
492 kern_return_t
copyio_test(void)493 copyio_test(void)
494 {
495 struct copyio_test_data data = {};
496 mach_vm_offset_t user_addr = 0;
497 kern_return_t ret = KERN_SUCCESS;
498
499 data.buf1 = kalloc_data(copyio_test_buf_size, Z_WAITOK);
500 data.buf2 = kalloc_data(copyio_test_buf_size, Z_WAITOK);
501 if (!data.buf1 || !data.buf2) {
502 T_FAIL("failed to allocate scratch buffers");
503 ret = KERN_NO_SPACE;
504 goto err_kalloc;
505 }
506
507 /**
508 * This test needs to manipulate the current userspace process's
509 * address space. This is okay to do at the specific point in time
510 * when bsd_do_post() runs: current_proc() points to the init process,
511 * which has been set up to the point of having a valid vm_map, but
512 * not to the point of actually execing yet.
513 */
514 proc_t proc = current_proc();
515 assert(proc_getpid(proc) == 1);
516 data.user_map = get_task_map_reference(proc_task(proc));
517
518 user_addr = data.user_addr;
519 ret = mach_vm_allocate_kernel(data.user_map, &user_addr,
520 copyio_test_buf_size + PAGE_SIZE, VM_MAP_KERNEL_FLAGS_ANYWHERE());
521 if (ret) {
522 T_FAIL("mach_vm_allocate_kernel(user_addr) failed: %d", ret);
523 goto err_user_alloc;
524 }
525 data.user_addr = (user_addr_t)user_addr;
526
527 user_addr = get_map_max(data.user_map) - PAGE_SIZE;
528 ret = mach_vm_allocate_kernel(data.user_map, &user_addr, PAGE_SIZE,
529 VM_MAP_KERNEL_FLAGS_FIXED());
530 if (ret) {
531 T_FAIL("mach_vm_allocate_kernel(user_lastpage_addr) failed: %d", ret);
532 goto err_user_lastpage_alloc;
533 }
534 data.user_lastpage_addr = (user_addr_t)user_addr;
535
536 data.unmapped_addr = data.user_addr + copyio_test_buf_size;
537 mach_vm_deallocate(data.user_map, data.unmapped_addr, PAGE_SIZE);
538
539 vm_prot_t cur_protection, max_protection;
540 mach_vm_offset_t kern_addr = 0;
541 ret = mach_vm_remap(kernel_map, &kern_addr, copyio_test_buf_size,
542 VM_PROT_READ | VM_PROT_WRITE, VM_FLAGS_ANYWHERE,
543 data.user_map, data.user_addr, false,
544 &cur_protection, &max_protection, VM_INHERIT_NONE);
545 if (ret) {
546 T_FAIL("mach_vm_remap() failed: %d", ret);
547 goto err_kern_remap;
548 }
549 data.kern_addr = (void *)kern_addr;
550
551 copyin_test(&data);
552 copyout_test(&data);
553 copyinstr_test(&data);
554 copyoutstr_test(&data);
555 copyio_atomic_test(&data, 32);
556 copyio_atomic_test(&data, 64);
557 copyin_atomic32_wait_if_equals_test(&data);
558
559 mach_vm_deallocate(kernel_map, kern_addr, copyio_test_buf_size);
560 err_kern_remap:
561 mach_vm_deallocate(data.user_map, data.user_lastpage_addr, PAGE_SIZE);
562 err_user_lastpage_alloc:
563 mach_vm_deallocate(data.user_map, data.user_addr, copyio_test_buf_size);
564 err_user_alloc:
565 vm_map_deallocate(data.user_map);
566 err_kalloc:
567 kfree_data(data.buf2, copyio_test_buf_size);
568 kfree_data(data.buf1, copyio_test_buf_size);
569 return ret;
570 }
571
572