1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/kalloc.h>
30 #include <kern/task.h>
31 #include <kern/thread.h>
32 #include <libkern/libkern.h>
33 #include <mach/mach_vm.h>
34 #include <mach/semaphore.h>
35 #include <mach/task.h>
36 #include <vm/vm_kern_xnu.h>
37 #include <vm/vm_map.h>
38 #include <vm/vm_map_xnu.h>
39 #include <vm/vm_protos.h>
40 #include <sys/errno.h>
41 #include <sys/proc.h>
42 #include <sys/proc_internal.h>
43 #include <sys/vm.h>
44 #include <tests/ktest.h>
45
46 kern_return_t copyio_test(void);
47 #if HAS_MTE
48 kern_return_t copyio_unprivileged_test(void);
49 #endif
50
51 #define copyio_test_buf_size (PAGE_SIZE * 16)
52 static const char copyio_test_string[] = {'T', 'e', 's', 't', ' ', 'S', 't', 'r', 'i', 'n', 'g', '!', '\0', 'A', 'B', 'C'};
53
54 struct copyio_test_data {
55 /* VM map of the current userspace process. */
56 vm_map_t user_map;
57 /* The start of a `copyio_test_buf_size'-sized region mapped into userspace. */
58 user_addr_t user_addr;
59 /* The start of a page-sized region that guaranteed to be unmapped in userspace. */
60 user_addr_t unmapped_addr;
61 /* The start of a page-sized region mapped at the largest possible userspace address. */
62 user_addr_t user_lastpage_addr;
63 /* Kernel mapping of the physical pages mapped at `user_addr'. */
64 void *kern_addr;
65
66 /* Scratch buffers of size `copyio_test_buf_size'. */
67 char *buf1, *buf2;
68 /* Scratch data to pass to helper threads */
69 union {
70 void *thread_ptr;
71 uint64_t thread_data;
72 };
73 };
74
75 typedef int (*copyio_thread_fn_t)(struct copyio_test_data *);
76
77 struct copyio_test_thread_data {
78 copyio_thread_fn_t fn;
79 struct copyio_test_data *data;
80 int ret;
81 semaphore_t done;
82 };
83
84 static void
copyio_thread_call_fn(void * arg,wait_result_t __unused res)85 copyio_thread_call_fn(void *arg, wait_result_t __unused res)
86 {
87 struct copyio_test_thread_data *tdata = arg;
88 tdata->ret = tdata->fn(tdata->data);
89 semaphore_signal(tdata->done);
90 }
91
92 static int
copyio_test_run_in_thread(copyio_thread_fn_t fn,struct copyio_test_data * data)93 copyio_test_run_in_thread(copyio_thread_fn_t fn, struct copyio_test_data *data)
94 {
95 struct copyio_test_thread_data tdata = {
96 .fn = fn,
97 .data = data,
98 };
99 thread_t thread;
100
101 semaphore_create(current_task(), &tdata.done, SYNC_POLICY_FIFO, 0);
102 kernel_thread_start(copyio_thread_call_fn, &tdata, &thread);
103
104 semaphore_wait(tdata.done);
105
106 thread_deallocate(thread);
107 semaphore_destroy(current_task(), tdata.done);
108
109 return tdata.ret;
110 }
111
112 static void
copyio_test_protect(struct copyio_test_data * data,vm_prot_t prot)113 copyio_test_protect(struct copyio_test_data *data, vm_prot_t prot)
114 {
115 __assert_only kern_return_t ret = mach_vm_protect(data->user_map, data->user_addr, copyio_test_buf_size, false, prot);
116 assert(ret == KERN_SUCCESS);
117 }
118
119 static int
copyin_from_kernel(struct copyio_test_data * data)120 copyin_from_kernel(struct copyio_test_data *data)
121 {
122 char *in_buf = data->buf2;
123 return copyin((uintptr_t)data->kern_addr, in_buf, copyio_test_buf_size);
124 }
125
126 static void
copyin_test(struct copyio_test_data * data)127 copyin_test(struct copyio_test_data *data)
128 {
129 char *out_buf = data->buf1;
130 char *in_buf = data->buf2;
131
132 for (size_t i = 0; i < copyio_test_buf_size; i++) {
133 out_buf[i] = (char)i;
134 }
135 memcpy(data->kern_addr, out_buf, copyio_test_buf_size);
136
137 int err = copyin(data->user_addr, in_buf, copyio_test_buf_size);
138 T_EXPECT_EQ_INT(err, 0, "copyin() with valid parameters should succeed");
139 int cmp = memcmp(out_buf, in_buf, copyio_test_buf_size);
140 T_EXPECT_EQ_INT(cmp, 0, "copyin() should correctly copy in data");
141
142 err = copyin(data->unmapped_addr, NULL, 0);
143 T_EXPECT_EQ_INT(err, 0, "copyin() with 0 size should always succeed");
144
145 err = copyin(data->unmapped_addr, in_buf, copyio_test_buf_size);
146 T_EXPECT_EQ_INT(err, EFAULT, "copyin() from unmapped userspace address should return EFAULT");
147 err = copyin(data->unmapped_addr - PAGE_SIZE, in_buf, PAGE_SIZE * 2);
148 T_EXPECT_EQ_INT(err, EFAULT, "copyin() from partially valid userspace range should return EFAULT");
149 err = copyin(data->user_lastpage_addr, in_buf, PAGE_SIZE * 2);
150 T_EXPECT_EQ_INT(err, EFAULT, "copyin() past end of userspace address space should return EFAULT");
151
152 bzero(in_buf, copyio_test_buf_size);
153 err = copyio_test_run_in_thread(copyin_from_kernel, data);
154 T_EXPECT_EQ_INT(err, 0, "copyin() from kernel address in kernel_task thread should succeed");
155 cmp = memcmp(data->kern_addr, in_buf, copyio_test_buf_size);
156 T_EXPECT_EQ_INT(cmp, 0, "copyin() from kernel address should correctly copy in data");
157 err = copyin_from_kernel(data);
158 T_EXPECT_EQ_INT(err, EFAULT, "copyin() from kernel address in other threads should return EFAULT");
159
160 copyio_test_protect(data, VM_PROT_WRITE);
161 err = copyin(data->user_addr, in_buf, copyio_test_buf_size);
162 T_EXPECT_EQ_INT(err, EFAULT, "copyin() from write-only address should return EFAULT");
163 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE);
164 }
165
166 static int
copyout_to_kernel(struct copyio_test_data * data)167 copyout_to_kernel(struct copyio_test_data *data)
168 {
169 char *out_buf = data->buf1;
170 return copyout(out_buf, (uintptr_t)data->kern_addr, copyio_test_buf_size);
171 }
172
173 static void
copyout_test(struct copyio_test_data * data)174 copyout_test(struct copyio_test_data *data)
175 {
176 char *out_buf = data->buf1;
177
178 bzero(data->kern_addr, copyio_test_buf_size);
179
180 for (size_t i = 0; i < copyio_test_buf_size; i++) {
181 out_buf[i] = ~(char)i;
182 }
183 int err = copyout(out_buf, data->user_addr, copyio_test_buf_size);
184 T_EXPECT_EQ_INT(err, 0, "copyout() with valid parameters should succeed");
185
186 int cmp = memcmp(data->kern_addr, out_buf, copyio_test_buf_size);
187 T_EXPECT_EQ_INT(cmp, 0, "copyout() should correctly copy out data");
188
189 err = copyout(NULL, data->unmapped_addr, 0);
190 T_EXPECT_EQ_INT(err, 0, "copyout() with 0 size should always succeed");
191
192 err = copyout(out_buf, data->unmapped_addr, copyio_test_buf_size);
193 T_EXPECT_EQ_INT(err, EFAULT, "copyout() to unmapped userspace address should return EFAULT");
194 err = copyout(out_buf, data->unmapped_addr - PAGE_SIZE, PAGE_SIZE * 2);
195 T_EXPECT_EQ_INT(err, EFAULT, "copyout() to partially valid userspace range should return EFAULT");
196 err = copyout(out_buf, data->user_lastpage_addr, PAGE_SIZE * 2);
197 T_EXPECT_EQ_INT(err, EFAULT, "copyout() past end of userspace address space should return EFAULT");
198
199 bzero(data->kern_addr, copyio_test_buf_size);
200
201 err = copyio_test_run_in_thread(copyout_to_kernel, data);
202 T_EXPECT_EQ_INT(err, 0, "copyout() to kernel address in kernel_task thread should succeed");
203 cmp = memcmp(out_buf, data->kern_addr, copyio_test_buf_size);
204 T_EXPECT_EQ_INT(cmp, 0, "copyout() to kernel address should correctly copy out data");
205 err = copyout_to_kernel(data);
206 T_EXPECT_EQ_INT(err, EFAULT, "copyout() to kernel address in other threads should return EFAULT");
207
208 copyio_test_protect(data, VM_PROT_READ);
209 err = copyout(out_buf, data->user_addr, copyio_test_buf_size);
210 T_EXPECT_EQ_INT(err, EFAULT, "copyout() to read-only address should return EFAULT");
211 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE);
212 }
213
214 static int
copyinstr_from_kernel(struct copyio_test_data * data)215 copyinstr_from_kernel(struct copyio_test_data *data)
216 {
217 char *in_buf = data->buf1;
218 size_t *lencopied = data->thread_ptr;
219 return copyinstr((user_addr_t)data->kern_addr, in_buf, copyio_test_buf_size, lencopied);
220 }
221
222 static void
copyinstr_test(struct copyio_test_data * data)223 copyinstr_test(struct copyio_test_data *data)
224 {
225 char *in_buf = data->buf1;
226
227 memcpy(data->kern_addr, copyio_test_string, sizeof(copyio_test_string));
228
229 bzero(in_buf, copyio_test_buf_size);
230 size_t lencopied;
231 int err = copyinstr(data->user_addr, in_buf, copyio_test_buf_size, &lencopied);
232 T_EXPECT_EQ_INT(err, 0, "copyinstr() with valid parameters should succeed");
233 T_EXPECT_EQ_ULONG(lencopied, strlen(copyio_test_string) + 1, "copyinstr() with a large enough buffer should read entire string");
234
235 int cmp = strncmp(in_buf, copyio_test_string, lencopied);
236 T_EXPECT_EQ_INT(cmp, 0, "copyinstr() should correctly copy string up to NULL terminator");
237 cmp = memcmp(in_buf, copyio_test_string, sizeof(copyio_test_string));
238 T_EXPECT_NE_INT(cmp, 0, "copyinstr() should not read past NULL terminator");
239
240 bzero(in_buf, copyio_test_buf_size);
241 const vm_size_t trunc_size = strlen(copyio_test_string) - 4;
242 err = copyinstr(data->user_addr, in_buf, trunc_size, &lencopied);
243 T_EXPECT_EQ_INT(err, ENAMETOOLONG, "truncated copyinstr() should return ENAMETOOLONG");
244 T_EXPECT_EQ_ULONG(lencopied, trunc_size, "truncated copyinstr() should copy exactly `maxlen' bytes");
245 cmp = memcmp(in_buf, copyio_test_string, trunc_size);
246 T_EXPECT_EQ_INT(cmp, 0, "copyinstr() should correctly copy in truncated string");
247 cmp = memcmp(in_buf, copyio_test_string, strlen(copyio_test_string));
248 T_EXPECT_NE_INT(cmp, 0, "copyinstr() should stop copying at `maxlen' bytes");
249
250 err = copyinstr(data->unmapped_addr, in_buf, copyio_test_buf_size, &lencopied);
251 T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() from unmapped userspace address should return EFAULT");
252 err = copyinstr(data->user_lastpage_addr, in_buf, PAGE_SIZE * 2, &lencopied);
253 T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() past end of userspace address space should return EFAULT");
254
255 bzero(in_buf, copyio_test_buf_size);
256 data->thread_ptr = &lencopied;
257
258 err = copyio_test_run_in_thread(copyinstr_from_kernel, data);
259 #if defined (__arm64__)
260 T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() from kernel address in kernel_task thread should return EFAULT");
261 #else
262 T_EXPECT_EQ_INT(err, 0, "copyinstr() from kernel address in kernel_task thread should succeed");
263 T_EXPECT_EQ_ULONG(lencopied, strlen(copyio_test_string) + 1, "copyinstr() from kernel address should read entire string");
264 cmp = strncmp(in_buf, copyio_test_string, lencopied);
265 T_EXPECT_EQ_INT(cmp, 0, "copyinstr() from kernel address should correctly copy string up to NULL terminator");
266 cmp = memcmp(in_buf, copyio_test_string, sizeof(copyio_test_string));
267 T_EXPECT_NE_INT(cmp, 0, "copyinstr() from kernel address should not read past NULL terminator");
268 #endif
269 err = copyinstr_from_kernel(data);
270 T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() from kernel address in other threads should return EFAULT");
271
272 copyio_test_protect(data, VM_PROT_WRITE);
273 err = copyinstr(data->user_addr, in_buf, copyio_test_buf_size, &lencopied);
274 T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() from write-only address should return EFAULT");
275 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE);
276
277 /* Place an unterminated string at the end of the mapped region */
278 const size_t unterminated_size = 16;
279 char *kern_unterminated_addr = (char *)data->kern_addr + copyio_test_buf_size - unterminated_size;
280 memset(kern_unterminated_addr, 'A', unterminated_size);
281
282 user_addr_t user_unterminated_addr = data->user_addr + copyio_test_buf_size - unterminated_size;
283 err = copyinstr(user_unterminated_addr, in_buf, copyio_test_buf_size, &lencopied);
284 T_EXPECT_EQ_INT(err, EFAULT, "copyinstr() from userspace region without NULL terminator should return EFAULT");
285 }
286
287 static int
copyoutstr_to_kernel(struct copyio_test_data * data)288 copyoutstr_to_kernel(struct copyio_test_data *data)
289 {
290 size_t *lencopied = data->thread_ptr;
291 return copyoutstr(copyio_test_string, (user_addr_t)data->kern_addr, sizeof(copyio_test_string), lencopied);
292 }
293
294 static void
copyoutstr_test(struct copyio_test_data * data)295 copyoutstr_test(struct copyio_test_data *data)
296 {
297 bzero(data->kern_addr, sizeof(copyio_test_string));
298
299 size_t lencopied;
300 int err = copyoutstr(copyio_test_string, data->user_addr, sizeof(copyio_test_string), &lencopied);
301 T_EXPECT_EQ_INT(err, 0, "copyoutstr() with valid parameters should succeed");
302 T_EXPECT_EQ_ULONG(lencopied, strlen(copyio_test_string) + 1, "copyoutstr() should copy string up to NULL terminator");
303
304 int cmp = strncmp(data->kern_addr, copyio_test_string, sizeof(copyio_test_string));
305 T_EXPECT_EQ_INT(cmp, 0, "copyoutstr() should correctly copy out string");
306 cmp = memcmp(data->kern_addr, copyio_test_string, sizeof(copyio_test_string));
307 T_EXPECT_NE_INT(cmp, 0, "copyoutstr() should stop copying at NULL terminator");
308
309 bzero(data->kern_addr, sizeof(copyio_test_string));
310
311 const vm_size_t trunc_size = strlen(copyio_test_string) - 4;
312 err = copyoutstr(copyio_test_string, data->user_addr, trunc_size, &lencopied);
313 T_EXPECT_EQ_INT(err, ENAMETOOLONG, "truncated copyoutstr() should return ENAMETOOLONG");
314 T_EXPECT_EQ_ULONG(lencopied, trunc_size, "truncated copyoutstr() should copy exactly `maxlen' bytes");
315 cmp = strncmp(data->kern_addr, copyio_test_string, trunc_size);
316 T_EXPECT_EQ_INT(cmp, 0, "copyoutstr() should correctly copy out truncated string");
317 cmp = memcmp(data->kern_addr, copyio_test_string, sizeof(copyio_test_string));
318 T_EXPECT_NE_INT(cmp, 0, "copyoutstr() should stop copying at `maxlen' bytes");
319
320 err = copyoutstr(copyio_test_string, data->unmapped_addr, strlen(copyio_test_string), &lencopied);
321 T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() to unmapped userspace address should return EFAULT");
322 err = copyoutstr(copyio_test_string, data->unmapped_addr - 1, strlen(copyio_test_string), &lencopied);
323 T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() to partially valid userspace range should return EFAULT");
324 err = copyoutstr(copyio_test_string, data->user_lastpage_addr + PAGE_SIZE - 1, strlen(copyio_test_string), &lencopied);
325 T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() past end of userspace address space should return EFAULT");
326
327 bzero(data->kern_addr, sizeof(copyio_test_string));
328 data->thread_ptr = &lencopied;
329
330 err = copyio_test_run_in_thread(copyoutstr_to_kernel, data);
331 #if defined (__arm64__)
332 T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() to kernel address in kernel_task thread should return EFAULT");
333 #else
334 T_EXPECT_EQ_INT(err, 0, "copyoutstr() to kernel address in kernel_task thread should succeed");
335 T_EXPECT_EQ_ULONG(lencopied, strlen(copyio_test_string) + 1, "copyoutstr() to kernel address should copy string up to NULL terminator");
336 cmp = strncmp(data->kern_addr, copyio_test_string, sizeof(copyio_test_string));
337 T_EXPECT_EQ_INT(cmp, 0, "copyoutstr() to kernel address should correctly copy out data");
338 #endif
339 err = copyoutstr_to_kernel(data);
340 T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() to kernel address in other threads should return EFAULT");
341
342 copyio_test_protect(data, VM_PROT_READ);
343 err = copyoutstr(copyio_test_string, data->user_addr, strlen(copyio_test_string), &lencopied);
344 T_EXPECT_EQ_INT(err, EFAULT, "copyoutstr() to read-only address should return EFAULT");
345 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE);
346 }
347
348 static int
copyin_atomic32_from_kernel(struct copyio_test_data * data)349 copyin_atomic32_from_kernel(struct copyio_test_data *data)
350 {
351 return copyin_atomic32((uintptr_t)data->kern_addr, data->thread_ptr);
352 }
353
354 static int
copyin_atomic64_from_kernel(struct copyio_test_data * data)355 copyin_atomic64_from_kernel(struct copyio_test_data *data)
356 {
357 return copyin_atomic64((uintptr_t)data->kern_addr, data->thread_ptr);
358 }
359
360 static int
copyout_atomic32_to_kernel(struct copyio_test_data * data)361 copyout_atomic32_to_kernel(struct copyio_test_data *data)
362 {
363 return copyout_atomic32((uint32_t)data->thread_data, (user_addr_t)data->kern_addr);
364 }
365
366 static int
copyout_atomic64_to_kernel(struct copyio_test_data * data)367 copyout_atomic64_to_kernel(struct copyio_test_data *data)
368 {
369 return copyout_atomic64(data->thread_data, (user_addr_t)data->kern_addr);
370 }
371
372 /**
373 * Note: we can't test atomic copyio calls which go past the end of the
374 * userspace address space, since there's no way to provide a range
375 * that straddles the userspace address boundary while being suitably
376 * aligned for the copy.
377 */
378 #define copyin_atomic_test(data, word_t, copyin_fn, copyin_from_kernel_fn) \
379 do { \
380 const word_t word_out = (word_t)0x123456789ABCDEF0UL; \
381 word_t word_in = 0; \
382 memcpy(data->kern_addr, &word_out, sizeof(word_out)); \
383 \
384 int err = copyin_fn(data->user_addr, &word_in); \
385 T_EXPECT_EQ_INT(err, 0, #copyin_fn "() with valid parameters should succeed"); \
386 \
387 int cmp = memcmp(&word_in, &word_out, sizeof(word_t)); \
388 T_EXPECT_EQ_INT(cmp, 0, #copyin_fn "() should correctly copy word"); \
389 \
390 for (unsigned int offset = 1; offset < sizeof(word_t); offset++) { \
391 err = copyin_fn(data->user_addr + offset, &word_in); \
392 T_EXPECT_EQ_INT(err, EINVAL, \
393 #copyin_fn "() from unaligned userspace address should return EINVAL (offset = %u)", \
394 offset); \
395 }; \
396 err = copyin_fn(data->unmapped_addr, &word_in); \
397 T_EXPECT_EQ_INT(err, EFAULT, #copyin_fn "() from unmapped userspace address should return EFAULT"); \
398 \
399 data->thread_ptr = &word_in; \
400 \
401 err = copyio_test_run_in_thread(copyin_from_kernel_fn, data); \
402 T_EXPECT_EQ_INT(err, EFAULT, \
403 #copyin_fn "() from kernel address in kernel_task threads should return EFAULT"); \
404 err = copyin_from_kernel_fn(data); \
405 T_EXPECT_EQ_INT(err, EFAULT, \
406 #copyin_fn "() from kernel address in other threads should return EFAULT"); \
407 \
408 copyio_test_protect(data, VM_PROT_WRITE); \
409 err = copyin_fn(data->user_addr, &word_in); \
410 T_EXPECT_EQ_INT(err, EFAULT, #copyin_fn "() from write-only address should return EFAULT"); \
411 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE); \
412 } while (0)
413
414 #define copyout_atomic_test(data, word_t, copyout_fn, copyout_to_kernel_fn) \
415 do { \
416 const word_t word_out = (word_t)0x123456789ABCDEF0UL; \
417 bzero(data->kern_addr, sizeof(word_t)); \
418 \
419 int err = copyout_fn(word_out, data->user_addr); \
420 T_EXPECT_EQ_INT(err, 0, #copyout_fn "() with valid parameters should succeed"); \
421 \
422 int cmp = memcmp(data->kern_addr, &word_out, sizeof(word_t)); \
423 T_EXPECT_EQ_INT(cmp, 0, #copyout_fn "() should correctly copy word"); \
424 \
425 for (unsigned int offset = 1; offset < sizeof(word_t); offset++) { \
426 err = copyout_fn(word_out, data->user_addr + offset); \
427 T_EXPECT_EQ_INT(err, EINVAL, \
428 #copyout_fn "() to unaligned userspace address should return EINVAL (offset = %u)", \
429 offset); \
430 }; \
431 err = copyout_fn(word_out, data->unmapped_addr); \
432 T_EXPECT_EQ_INT(err, EFAULT, #copyout_fn "() to unmapped userspace address should return EFAULT"); \
433 err = copyout_fn(word_out, (uintptr_t)data->kern_addr); \
434 T_EXPECT_EQ_INT(err, EFAULT, #copyout_fn "() to kernel address should return EFAULT"); \
435 \
436 data->thread_data = word_out; \
437 \
438 err = copyio_test_run_in_thread(copyout_to_kernel_fn, data); \
439 T_EXPECT_EQ_INT(err, EFAULT, \
440 #copyout_fn "() to kernel address in kernel_task thread should return EFAULT"); \
441 err = copyout_to_kernel_fn(data); \
442 T_EXPECT_EQ_INT(err, EFAULT, #copyout_fn "() to kernel address in other threads should return EFAULT"); \
443 \
444 copyio_test_protect(data, VM_PROT_READ); \
445 err = copyout_fn(word_out, data->user_addr); \
446 T_EXPECT_EQ_INT(err, EFAULT, #copyout_fn "() to read-only address should return EFAULT"); \
447 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE); \
448 } while (0)
449
450 #define copyio_atomic_test(data, size) \
451 do { \
452 copyin_atomic_test((data), uint ## size ## _t, copyin_atomic ## size, \
453 copyin_atomic ## size ## _from_kernel); \
454 copyout_atomic_test((data), uint ## size ## _t, copyout_atomic ## size, \
455 copyout_atomic ## size ## _to_kernel); \
456 } while (0)
457
458 static int
copyin_atomic32_wait_if_equals_from_kernel(struct copyio_test_data * data)459 copyin_atomic32_wait_if_equals_from_kernel(struct copyio_test_data *data)
460 {
461 return copyin_atomic32_wait_if_equals((uintptr_t)data->kern_addr, (uint32_t)data->thread_data);
462 }
463
464 static void
copyin_atomic32_wait_if_equals_test(struct copyio_test_data * data)465 copyin_atomic32_wait_if_equals_test(struct copyio_test_data *data)
466 {
467 bzero(data->kern_addr, sizeof(uint32_t));
468 int err = copyin_atomic32_wait_if_equals(data->user_addr, 0);
469 T_EXPECT_EQ_INT(err, 0, "copyin_atomic32_wait_if_equals() should return 0 when equals");
470 err = copyin_atomic32_wait_if_equals(data->user_addr, ~0U);
471 T_EXPECT_EQ_INT(err, ESTALE, "copyin_atomic32_wait_if_equals() should return ESTALE when not equals");
472
473 for (unsigned int offset = 1; offset < sizeof(uint32_t); offset++) {
474 err = copyin_atomic32_wait_if_equals(data->user_addr + offset, 0);
475 T_EXPECT_EQ_INT(err, EINVAL,
476 "copyin_atomic32_wait_if_equals() on unaligned userspace address should return EINVAL (offset = %u)",
477 offset);
478 }
479 err = copyin_atomic32_wait_if_equals(data->unmapped_addr, 0);
480 T_EXPECT_EQ_INT(err, EFAULT, "copyin_atomic32_wait_if_equals() on unmapped userspace address should return EFAULT");
481
482 data->thread_data = 0;
483
484 err = copyio_test_run_in_thread(copyin_atomic32_wait_if_equals_from_kernel, data);
485 T_EXPECT_EQ_INT(err, EFAULT, "copyin_atomic32_wait_if_equals() from kernel address in kernel_task thread should return EFAULT");
486 err = copyin_atomic32_wait_if_equals_from_kernel(data);
487 T_EXPECT_EQ_INT(err, EFAULT, "copyin_atomic32_wait_if_equals() from kernel address in other threads should return EFAULT");
488
489 copyio_test_protect(data, VM_PROT_WRITE);
490 err = copyin_atomic32_wait_if_equals(data->user_addr, 0);
491 T_EXPECT_EQ_INT(err, EFAULT, "copyin_atomic32_wait_if_equals() on write-only address should return EFAULT");
492 copyio_test_protect(data, VM_PROT_READ | VM_PROT_WRITE);
493 }
494
495 kern_return_t
copyio_test(void)496 copyio_test(void)
497 {
498 struct copyio_test_data data = {};
499 mach_vm_offset_t user_addr = 0;
500 kern_return_t ret = KERN_SUCCESS;
501
502 data.buf1 = kalloc_data(copyio_test_buf_size, Z_WAITOK);
503 data.buf2 = kalloc_data(copyio_test_buf_size, Z_WAITOK);
504 if (!data.buf1 || !data.buf2) {
505 T_FAIL("failed to allocate scratch buffers");
506 ret = KERN_NO_SPACE;
507 goto err_kalloc;
508 }
509
510 /**
511 * This test needs to manipulate the current userspace process's
512 * address space. This is okay to do at the specific point in time
513 * when bsd_do_post() runs: current_proc() points to the init process,
514 * which has been set up to the point of having a valid vm_map, but
515 * not to the point of actually execing yet.
516 */
517 proc_t proc = current_proc();
518 assert(proc_getpid(proc) == 1);
519 data.user_map = get_task_map_reference(proc_task(proc));
520
521 user_addr = data.user_addr;
522 ret = mach_vm_allocate_kernel(data.user_map, &user_addr,
523 copyio_test_buf_size + PAGE_SIZE, VM_MAP_KERNEL_FLAGS_ANYWHERE());
524 if (ret) {
525 T_FAIL("mach_vm_allocate_kernel(user_addr) failed: %d", ret);
526 goto err_user_alloc;
527 }
528 data.user_addr = (user_addr_t)user_addr;
529
530 user_addr = get_map_max(data.user_map) - PAGE_SIZE;
531 ret = mach_vm_allocate_kernel(data.user_map, &user_addr, PAGE_SIZE,
532 VM_MAP_KERNEL_FLAGS_FIXED());
533 if (ret) {
534 T_FAIL("mach_vm_allocate_kernel(user_lastpage_addr) failed: %d", ret);
535 goto err_user_lastpage_alloc;
536 }
537 data.user_lastpage_addr = (user_addr_t)user_addr;
538
539 data.unmapped_addr = data.user_addr + copyio_test_buf_size;
540 mach_vm_deallocate(data.user_map, data.unmapped_addr, PAGE_SIZE);
541
542 vm_prot_t cur_protection, max_protection;
543 mach_vm_offset_t kern_addr = 0;
544 ret = mach_vm_remap(kernel_map, &kern_addr, copyio_test_buf_size,
545 VM_PROT_READ | VM_PROT_WRITE, VM_FLAGS_ANYWHERE,
546 data.user_map, data.user_addr, false,
547 &cur_protection, &max_protection, VM_INHERIT_NONE);
548 if (ret) {
549 T_FAIL("mach_vm_remap() failed: %d", ret);
550 goto err_kern_remap;
551 }
552 data.kern_addr = (void *)kern_addr;
553
554 copyin_test(&data);
555 copyout_test(&data);
556 copyinstr_test(&data);
557 copyoutstr_test(&data);
558 copyio_atomic_test(&data, 32);
559 copyio_atomic_test(&data, 64);
560 copyin_atomic32_wait_if_equals_test(&data);
561
562 mach_vm_deallocate(kernel_map, kern_addr, copyio_test_buf_size);
563 err_kern_remap:
564 mach_vm_deallocate(data.user_map, data.user_lastpage_addr, PAGE_SIZE);
565 err_user_lastpage_alloc:
566 mach_vm_deallocate(data.user_map, data.user_addr, copyio_test_buf_size);
567 err_user_alloc:
568 vm_map_deallocate(data.user_map);
569 err_kalloc:
570 kfree_data(data.buf2, copyio_test_buf_size);
571 kfree_data(data.buf1, copyio_test_buf_size);
572 return ret;
573 }
574
575 #if HAS_MTE
576 kern_return_t
copyio_unprivileged_test(void)577 copyio_unprivileged_test(void)
578 {
579 task_t task = current_task();
580 bool sec_enabled = task_has_sec(task);
581 task_clear_sec(task);
582
583 vm_map_t map = current_map();
584 bool sec_access = vm_map_has_sec_access(map);
585 vm_map_mark_has_sec_access(map);
586
587 kern_return_t ret = copyio_test();
588
589 if (sec_enabled) {
590 task_set_sec(task);
591 }
592 if (!sec_access) {
593 vm_map_remove_sec_access(map);
594 }
595
596 return ret;
597 }
598 #endif /* HAS_MTE */
599