xref: /xnu-12377.41.6/osfmk/arm64/copyio.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2012-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <arm/cpu_data_internal.h>
30 #include <arm/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/zalloc_internal.h>
33 #include <sys/errno.h>
34 #include <vm/pmap.h>
35 #include <vm/vm_map_xnu.h>
36 #include <vm/vm_memtag.h>
37 #include <san/kasan.h>
38 #include <arm/pmap.h>
39 #include <arm64/speculation.h>
40 
41 #undef copyin
42 #undef copyout
43 
44 extern int _bcopyin(const user_addr_t src, char *dst, vm_size_t len);
45 extern int _bcopyinstr(const user_addr_t src, char *dst, vm_size_t max, vm_size_t *actual);
46 extern int _bcopyout(const char *src, user_addr_t dst, vm_size_t len);
47 extern int _copyin_atomic32(const user_addr_t src, uint32_t *dst);
48 extern int _copyin_atomic32_wait_if_equals(const user_addr_t src, uint32_t value);
49 extern int _copyin_atomic64(const user_addr_t src, uint64_t *dst);
50 extern int _copyout_atomic32(uint32_t u32, user_addr_t dst);
51 extern int _copyout_atomic64(uint64_t u64, user_addr_t dst);
52 
53 #if HAS_MTE
54 extern int _unprivileged_bcopyin(const user_addr_t src, char *dst, vm_size_t len);
55 extern int _unprivileged_bcopyinstr(const user_addr_t src, char *dst, vm_size_t max, vm_size_t *actual);
56 extern int _unprivileged_bcopyout(const char *src, user_addr_t dst, vm_size_t len);
57 extern int _unprivileged_copyin_atomic32(const user_addr_t src, uint32_t *dst);
58 extern int _copyin_atomic32_wait_if_equals_unchecked(const user_addr_t src, uint32_t value);
59 extern int _unprivileged_copyin_atomic64(const user_addr_t src, uint64_t *dst);
60 extern int _unprivileged_copyout_atomic32(uint32_t u32, user_addr_t dst);
61 extern int _unprivileged_copyout_atomic64(uint64_t u64, user_addr_t dst);
62 
63 extern int _copyin_mte_load_tag(const user_addr_t src, user_addr_t* out);
64 #endif /* HAS_MTE */
65 
66 extern int copyoutstr_prevalidate(const void *kaddr, user_addr_t uaddr, size_t len);
67 
68 extern const vm_map_address_t physmap_base;
69 extern const vm_map_address_t physmap_end;
70 
71 /*!
72  * @typedef copyio_flags_t
73  *
74  * @const COPYIO_IN
75  * The copy is user -> kernel.
76  * One of COPYIO_IN or COPYIO_OUT should always be specified.
77  *
78  * @const COPYIO_OUT
79  * The copy is kernel -> user
80  * One of COPYIO_IN or COPYIO_OUT should always be specified.
81  *
82  * @const COPYIO_ALLOW_KERNEL_TO_KERNEL
83  * The "user_address" is allowed to be in the VA space of the kernel.
84  *
85  * @const COPYIO_VALIDATE_USER_ONLY
86  * There isn't really a kernel address used, and only the user address
87  * needs to be validated.
88  *
89  * @const COPYIO_ATOMIC
90  * The copyio operation is atomic, ensure that it is properly aligned.
91  */
92 __options_decl(copyio_flags_t, uint32_t, {
93 	COPYIO_IN                       = 0x0001,
94 	COPYIO_OUT                      = 0x0002,
95 	COPYIO_ALLOW_KERNEL_TO_KERNEL   = 0x0004,
96 	COPYIO_VALIDATE_USER_ONLY       = 0x0008,
97 	COPYIO_ATOMIC                   = 0x0010,
98 });
99 
100 typedef enum {
101 	USER_ACCESS_READ,
102 	USER_ACCESS_WRITE
103 } user_access_direction_t;
104 
105 #if HAS_MTE
106 static inline void
unprivileged_user_access_enable(__unused user_access_direction_t user_access_direction,pmap_t __unused pmap)107 unprivileged_user_access_enable(__unused user_access_direction_t user_access_direction,
108     pmap_t __unused pmap)
109 {
110 }
111 
112 static inline void
unprivileged_user_access_disable(__unused user_access_direction_t user_access_direction,pmap_t __unused pmap)113 unprivileged_user_access_disable(__unused user_access_direction_t user_access_direction,
114     pmap_t __unused pmap)
115 {
116 	current_thread()->machine.in_unprivileged_access = false;
117 }
118 #endif /* HAS_MTE */
119 
120 static inline void
user_access_enable(__unused user_access_direction_t user_access_direction,pmap_t __unused pmap)121 user_access_enable(__unused user_access_direction_t user_access_direction, pmap_t __unused pmap)
122 {
123 #if __ARM_PAN_AVAILABLE__
124 	assert(__builtin_arm_rsr("pan") != 0);
125 	__builtin_arm_wsr("pan", 0);
126 #endif  /* __ARM_PAN_AVAILABLE__ */
127 
128 }
129 
130 static inline void
user_access_disable(__unused user_access_direction_t user_access_direction,pmap_t __unused pmap)131 user_access_disable(__unused user_access_direction_t user_access_direction, pmap_t __unused pmap)
132 {
133 #if __ARM_PAN_AVAILABLE__
134 	__builtin_arm_wsr("pan", 1);
135 #endif  /* __ARM_PAN_AVAILABLE__ */
136 
137 }
138 
139 #if HAS_MTE
140 static inline bool
userspace_access_is_tagged(vm_map_t map)141 userspace_access_is_tagged(vm_map_t map)
142 {
143 	if (task_has_sec_never_check(current_task())) {
144 		return false;
145 	}
146 	if (ml_thread_get_sec_override(current_thread())) {
147 		return false;
148 	}
149 	return current_task_has_sec_enabled() || !vm_map_has_sec_access(map);
150 }
151 
152 #define WRAP_COPYIO_UNPRIVILEGED(_dir, _map, _op)                           \
153 	({                                                                      \
154 	        int _ret;                                                       \
155 	        unprivileged_user_access_enable(_dir, (_map)->pmap);            \
156 	        _ret = _op;                                                     \
157 	        unprivileged_user_access_disable(_dir, (_map)->pmap);           \
158 	        _ret;                                                           \
159 	})
160 #endif /* HAS_MTE */
161 
162 #define WRAP_COPYIO_PAN(_dir, _map, _op)                                    \
163 	({                                                                      \
164 	        int _ret;                                                       \
165 	        user_access_enable(_dir, (_map)->pmap);                         \
166 	        _ret = _op;                                                     \
167 	        user_access_disable(_dir, (_map)->pmap);                        \
168 	        _ret;                                                           \
169 	})
170 
171 #if HAS_MTE
172 /* BEGIN IGNORE CODESTYLE */
173 /**
174  * Wraps a low-level assembly copyio handler.
175  *
176  * Depending on how the target address space is configured, this macro will
177  * choose between a "privileged" handler (ldr/str + PAN=0) and an unprivileged
178  * equivalent (ldtr/sttr).  Privileged handler calls are wrapped with
179  * user_access_{enable,disable} as needed.
180  *
181  * @param _dir USER_ACCESS_READ if this handler reads from userspace memory, or
182  *             USER_ACCESS_WRITE if it writes to userspace memory
183  * @param _map the VM map to read from
184  * @param _op the copyio handler to invoke, including parameters
185  */
186 #define WRAP_COPYIO(_dir, _map, _op)                                                            \
187 	({                                                                                          \
188 	        int _ret2;                                                                          \
189 	        if (userspace_access_is_tagged(_map)) {                                             \
190 	                _ret2 = WRAP_COPYIO_PAN(_dir, _map, _op);                                   \
191 	                if (_ret2 == EAGAIN) {                                                      \
192 	                        /*                                                                  \
193 	                         * The exception handler enabled MTE soft mode.                     \
194 	                         * Try again with a handler that respects ATA0/TCF0.                \
195 	                         */                                                                 \
196 	                        _ret2 = WRAP_COPYIO_UNPRIVILEGED(_dir, _map, _unprivileged ## _op); \
197 	                }                                                                           \
198 	        } else {                                                                            \
199 	                _ret2 = WRAP_COPYIO_UNPRIVILEGED(_dir, _map, _unprivileged ## _op);         \
200 	        }                                                                                   \
201 	        _ret2;                                                                              \
202 	})
203 /* END IGNORE CODESTYLE */
204 #else
205 #define WRAP_COPYIO(_dir, _map, _op) WRAP_COPYIO_PAN(_dir, _map, _op)
206 #endif
207 
208 /*
209  * Copy sizes bigger than this value will cause a kernel panic.
210  *
211  * Yes, this is an arbitrary fixed limit, but it's almost certainly
212  * a programming error to be copying more than this amount between
213  * user and wired kernel memory in a single invocation on this
214  * platform.
215  */
216 const int copysize_limit_panic = (64 * 1024 * 1024);
217 
218 static inline bool
is_kernel_to_kernel_copy(pmap_t pmap)219 is_kernel_to_kernel_copy(pmap_t pmap)
220 {
221 	return pmap == kernel_pmap;
222 }
223 
224 /**
225  * In order to prevent copies from speculatively targeting the wrong address
226  * space, force kernel-to-kernel copies to target the kernel address space
227  * (TTBR1) and non-kernel copies to target the user address space (TTBR0).
228  *
229  * This should have no non-speculative effect as any address which passes
230  * validation should already have bit 55 (the address space select bit) set
231  * appropriately. If the address would change (i.e. addr is invalid for the copy
232  * type), this function panics and so it must only be called after all other
233  * verification has completed.
234  */
235 static user_addr_t
copy_ensure_address_space_spec(vm_map_t map,const user_addr_t addr)236 copy_ensure_address_space_spec(vm_map_t map, const user_addr_t addr)
237 {
238 	user_addr_t new_addr = 0;
239 	user_addr_t kaddr = addr | BIT(55);
240 	user_addr_t uaddr = addr & (~BIT(55));
241 
242 	/*
243 	 * new_addr = is_kernel_to_kernel_copy(...) ? kaddr : uaddr
244 	 *
245 	 * The check must be performed explicitly as the compiler lowering of the
246 	 * actual call may be subject to prediction.
247 	 */
248 	SPECULATION_GUARD_SELECT_XXX(
249 		/* out */ new_addr,
250 		/* cmp_1  */ map->pmap, /* cmp_2 */ kernel_pmap,
251 		/* cc   */ "eq", /* sel_1 */ kaddr,
252 		/* n_cc */ "ne", /* sel_2 */ uaddr);
253 
254 	/*
255 	 * Since we're modifying the address past the validation point, let's be
256 	 * sure we didn't erroneously change address spaces.
257 	 *
258 	 * We have to be careful to hide this check from the optimizer as if it
259 	 * learns that new_addr == addr, then it is free to (and, indeed, does) use
260 	 * addr everywhere that new_addr is referenced, which breaks our hardening.
261 	 */
262 	user_addr_t new_addr_opt_hidden = new_addr;
263 	__compiler_materialize_and_prevent_reordering_on(new_addr_opt_hidden);
264 	if (new_addr_opt_hidden != addr) {
265 		panic("copy_ensure_address_space_spec changed address: 0x%llx->0x%llx",
266 		    addr, new_addr);
267 	}
268 
269 	return new_addr;
270 }
271 
272 static int
copy_validate_user_addr(vm_map_t map,const user_addr_t user_addr,vm_size_t nbytes)273 copy_validate_user_addr(vm_map_t map, const user_addr_t user_addr, vm_size_t nbytes)
274 {
275 	user_addr_t canonicalized_user_addr = user_addr;
276 	user_addr_t user_addr_last;
277 	bool is_kernel_to_kernel = is_kernel_to_kernel_copy(map->pmap);
278 
279 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
280 	/*
281 	 *  `user_addr` could be tagged. Canonicalize the address so we perform
282 	 *  range checks with canonical addresses.
283 	 *
284 	 *  Emulated processes are allowed to pass tagged pointers into the kernel.
285 	 *  Though we do not currently check tags for emulated processes, on real
286 	 *  silicon, copyio will check tags, so we want to propagate them here
287 	 *  for testing purposes. This works because TCR.TBI0 is set, which
288 	 *  enables TBI for access to TTBR0 at all ELx.
289 	 *
290 	 *  We use vm_memtag_canonicalize() here, rather than vm_map_strip_addr()
291 	 *  as TBI-tagged addresses are explicitly banned from copyio.
292 	 */
293 	if (current_task_has_sec_enabled()) {
294 		canonicalized_user_addr = (user_addr_t)vm_memtag_canonicalize(map, user_addr);
295 	}
296 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
297 
298 	if (__improbable(canonicalized_user_addr < vm_map_min(map) ||
299 	    os_add_overflow(canonicalized_user_addr, nbytes, &user_addr_last) ||
300 	    user_addr_last > vm_map_max(map))) {
301 		return EFAULT;
302 	}
303 
304 
305 	if (!is_kernel_to_kernel) {
306 		if (__improbable(canonicalized_user_addr & ARM_TBI_USER_MASK)) {
307 			return EINVAL;
308 		}
309 	}
310 
311 	return 0;
312 }
313 
314 static void
copy_validate_kernel_addr(uintptr_t kernel_addr,vm_size_t nbytes)315 copy_validate_kernel_addr(uintptr_t kernel_addr, vm_size_t nbytes)
316 {
317 	uintptr_t kernel_addr_last;
318 
319 	if (__improbable(os_add_overflow(kernel_addr, nbytes, &kernel_addr_last))) {
320 		panic("%s(%p, %lu) - kaddr not in kernel", __func__,
321 		    (void *)kernel_addr, nbytes);
322 	}
323 
324 	bool in_kva = (VM_KERNEL_STRIP_PTR(kernel_addr) >= VM_MIN_KERNEL_ADDRESS) &&
325 	    (VM_KERNEL_STRIP_PTR(kernel_addr_last) <= VM_MAX_KERNEL_ADDRESS);
326 	bool in_physmap = (VM_KERNEL_STRIP_PTR(kernel_addr) >= physmap_base) &&
327 	    (VM_KERNEL_STRIP_PTR(kernel_addr_last) <= physmap_end);
328 
329 	if (__improbable(!(in_kva || in_physmap))) {
330 		panic("%s(%p, %lu) - kaddr not in kernel", __func__,
331 		    (void *)kernel_addr, nbytes);
332 	}
333 
334 	zone_element_bounds_check(kernel_addr, nbytes);
335 }
336 
337 /*
338  * Validate the arguments to copy{in,out} on this platform.
339  *
340  * Returns EXDEV when the current thread pmap is the kernel's
341  * which is non fatal for certain routines.
342  */
343 static inline __attribute__((always_inline)) int
copy_validate(vm_map_t map,const user_addr_t user_addr,uintptr_t kernel_addr,vm_size_t nbytes,copyio_flags_t flags)344 copy_validate(vm_map_t map, const user_addr_t user_addr, uintptr_t kernel_addr,
345     vm_size_t nbytes, copyio_flags_t flags)
346 {
347 	int ret;
348 
349 	if (__improbable(nbytes > copysize_limit_panic)) {
350 		return EINVAL;
351 	}
352 
353 	ret = copy_validate_user_addr(map, user_addr, nbytes);
354 	if (__improbable(ret)) {
355 		return ret;
356 	}
357 
358 	if (flags & COPYIO_ATOMIC) {
359 		if (__improbable(user_addr & (nbytes - 1))) {
360 			return EINVAL;
361 		}
362 	}
363 
364 	if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
365 		copy_validate_kernel_addr(kernel_addr, nbytes);
366 #if KASAN
367 		/* For user copies, asan-check the kernel-side buffer */
368 		if (flags & COPYIO_IN) {
369 			__asan_storeN(kernel_addr, nbytes);
370 		} else {
371 			__asan_loadN(kernel_addr, nbytes);
372 		}
373 #endif
374 	}
375 
376 	if (is_kernel_to_kernel_copy(map->pmap)) {
377 		if (__improbable((flags & COPYIO_ALLOW_KERNEL_TO_KERNEL) == 0)) {
378 			return EFAULT;
379 		}
380 		return EXDEV;
381 	}
382 
383 	return 0;
384 }
385 
386 int
copyin_kern(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes)387 copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
388 {
389 	bcopy((const char*)(uintptr_t)user_addr, kernel_addr, nbytes);
390 
391 	return 0;
392 }
393 
394 int
copyout_kern(const char * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)395 copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
396 {
397 	bcopy(kernel_addr, (char *)(uintptr_t)user_addr, nbytes);
398 
399 	return 0;
400 }
401 
402 int
copyin(const user_addr_t user_addr,void * kernel_addr,vm_size_t nbytes)403 copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes)
404 {
405 	vm_map_t map = current_thread()->map;
406 	user_addr_t guarded_user_addr;
407 	int result;
408 
409 	if (__improbable(nbytes == 0)) {
410 		return 0;
411 	}
412 
413 	result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, nbytes,
414 	    COPYIO_IN | COPYIO_ALLOW_KERNEL_TO_KERNEL);
415 	if (result == EXDEV) {
416 		guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
417 		return copyin_kern(guarded_user_addr, kernel_addr, nbytes);
418 	}
419 	if (__improbable(result)) {
420 		return result;
421 	}
422 
423 	guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
424 
425 	return WRAP_COPYIO(USER_ACCESS_READ, map,
426 	           _bcopyin(guarded_user_addr, kernel_addr, nbytes));
427 }
428 
429 /*
430  * copy{in,out}_atomic{32,64}
431  * Read or store an aligned value from userspace as a single memory transaction.
432  * These functions support userspace synchronization features
433  */
434 int
copyin_atomic32(const user_addr_t user_addr,uint32_t * kernel_addr)435 copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
436 {
437 	vm_map_t map = current_thread()->map;
438 	int result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, 4,
439 	    COPYIO_IN | COPYIO_ATOMIC);
440 	if (__improbable(result)) {
441 		return result;
442 	}
443 
444 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
445 
446 	return WRAP_COPYIO(USER_ACCESS_READ, map,
447 	           _copyin_atomic32(guarded_user_addr, kernel_addr));
448 }
449 
450 #if HAS_MTE
451 static inline int
_unprivileged_copyin_atomic32_wait_if_equals(const user_addr_t user_addr,uint32_t value)452 _unprivileged_copyin_atomic32_wait_if_equals(const user_addr_t user_addr, uint32_t value)
453 {
454 	vm_map_t map = current_thread()->map;
455 
456 	assert(__builtin_arm_rsr64("TCO") == 0);
457 	return WRAP_COPYIO_PAN(USER_ACCESS_READ, map,
458 	           _copyin_atomic32_wait_if_equals_unchecked(user_addr, value));
459 }
460 
461 
462 /*
463  * Retrieves the associated MTE tag, if any, for a user space address.
464  * Returns the input pointer with any associated MTE tag merged to the
465  * architecturally specified bitfield in `out`.
466  */
467 int
copyin_mte_load_tag(const user_addr_t user_addr,user_addr_t * out)468 copyin_mte_load_tag(const user_addr_t user_addr, user_addr_t* out)
469 {
470 	vm_map_t map = current_thread()->map;
471 	int result = copy_validate(map, user_addr, (uintptr_t)out, sizeof(user_addr_t),
472 	    COPYIO_IN | COPYIO_ATOMIC);
473 	if (__improbable(result)) {
474 		return result;
475 	}
476 
477 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
478 
479 	int ret = WRAP_COPYIO_PAN(USER_ACCESS_READ, map,
480 	    _copyin_mte_load_tag(guarded_user_addr, out));
481 	return ret;
482 }
483 
484 #endif /* HAS_MTE */
485 
486 int
copyin_atomic32_wait_if_equals(const user_addr_t user_addr,uint32_t value)487 copyin_atomic32_wait_if_equals(const user_addr_t user_addr, uint32_t value)
488 {
489 	vm_map_t map = current_thread()->map;
490 	int result = copy_validate(map, user_addr, 0, 4,
491 	    COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
492 	if (__improbable(result)) {
493 		return result;
494 	}
495 
496 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
497 
498 	return WRAP_COPYIO(USER_ACCESS_READ, map,
499 	           _copyin_atomic32_wait_if_equals(guarded_user_addr, value));
500 }
501 
502 int
copyin_atomic64(const user_addr_t user_addr,uint64_t * kernel_addr)503 copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
504 {
505 	vm_map_t map = current_thread()->map;
506 	int result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, 8,
507 	    COPYIO_IN | COPYIO_ATOMIC);
508 	if (__improbable(result)) {
509 		return result;
510 	}
511 
512 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
513 
514 	return WRAP_COPYIO(USER_ACCESS_READ, map,
515 	           _copyin_atomic64(guarded_user_addr, kernel_addr));
516 }
517 
518 int
copyout_atomic32(uint32_t value,user_addr_t user_addr)519 copyout_atomic32(uint32_t value, user_addr_t user_addr)
520 {
521 	vm_map_t map = current_thread()->map;
522 	int result = copy_validate(map, user_addr, 0, 4,
523 	    COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
524 	if (__improbable(result)) {
525 		return result;
526 	}
527 
528 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
529 
530 	return WRAP_COPYIO(USER_ACCESS_WRITE, map,
531 	           _copyout_atomic32(value, guarded_user_addr));
532 }
533 
534 int
copyout_atomic64(uint64_t value,user_addr_t user_addr)535 copyout_atomic64(uint64_t value, user_addr_t user_addr)
536 {
537 	vm_map_t map = current_thread()->map;
538 	int result = copy_validate(map, user_addr, 0, 8,
539 	    COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
540 	if (__improbable(result)) {
541 		return result;
542 	}
543 
544 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
545 
546 	return WRAP_COPYIO(USER_ACCESS_WRITE, map,
547 	           _copyout_atomic64(value, guarded_user_addr));
548 }
549 
550 int
copyinstr(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes,vm_size_t * lencopied)551 copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
552 {
553 	vm_map_t map = current_thread()->map;
554 	int result;
555 	vm_size_t bytes_copied = 0;
556 
557 	*lencopied = 0;
558 	if (__improbable(nbytes == 0)) {
559 		return ENAMETOOLONG;
560 	}
561 
562 	result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, nbytes, COPYIO_IN);
563 	if (__improbable(result)) {
564 		return result;
565 	}
566 
567 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
568 
569 	result = WRAP_COPYIO(USER_ACCESS_READ, map,
570 	    _bcopyinstr(guarded_user_addr, kernel_addr, nbytes, &bytes_copied));
571 
572 	if (result != EFAULT) {
573 		*lencopied = bytes_copied;
574 	}
575 	return result;
576 }
577 
578 int
copyout(const void * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)579 copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
580 {
581 	vm_map_t map = current_thread()->map;
582 	int result;
583 	user_addr_t guarded_user_addr;
584 
585 	if (nbytes == 0) {
586 		return 0;
587 	}
588 
589 	result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, nbytes,
590 	    COPYIO_OUT | COPYIO_ALLOW_KERNEL_TO_KERNEL);
591 	if (result == EXDEV) {
592 		guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
593 		return copyout_kern(kernel_addr, guarded_user_addr, nbytes);
594 	}
595 	if (__improbable(result)) {
596 		return result;
597 	}
598 
599 	guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
600 
601 	return WRAP_COPYIO(USER_ACCESS_WRITE, map,
602 	           _bcopyout(kernel_addr, guarded_user_addr, nbytes));
603 }
604 
605 int
copyoutstr_prevalidate(const void * __unused kaddr,user_addr_t __unused uaddr,size_t __unused len)606 copyoutstr_prevalidate(const void *__unused kaddr, user_addr_t __unused uaddr, size_t __unused len)
607 {
608 	vm_map_t map = current_thread()->map;
609 
610 	if (__improbable(is_kernel_to_kernel_copy(map->pmap))) {
611 		return EFAULT;
612 	}
613 
614 	return 0;
615 }
616 
617 #if (DEBUG || DEVELOPMENT)
618 int
verify_write(const void * source,void * dst,size_t size)619 verify_write(const void *source, void *dst, size_t size)
620 {
621 	int rc;
622 	disable_preemption();
623 	rc = _bcopyout((const char*)source, (user_addr_t)dst, size);
624 	enable_preemption();
625 	return rc;
626 }
627 #endif
628