xref: /xnu-10063.101.15/osfmk/arm64/copyio.c (revision 94d3b452840153a99b38a3a9659680b2a006908e)
1 /*
2  * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <arm/cpu_data_internal.h>
30 #include <arm/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/zalloc_internal.h>
33 #include <sys/errno.h>
34 #include <vm/pmap.h>
35 #include <vm/vm_map.h>
36 #include <san/kasan.h>
37 #include <arm/pmap.h>
38 
39 #undef copyin
40 #undef copyout
41 
42 extern int _bcopyin(const char *src, char *dst, vm_size_t len);
43 extern int _bcopyinstr(const char *src, char *dst, vm_size_t max, vm_size_t *actual);
44 extern int _bcopyout(const char *src, char *dst, vm_size_t len);
45 extern int _copyin_atomic32(const char *src, uint32_t *dst);
46 extern int _copyin_atomic32_wait_if_equals(const char *src, uint32_t dst);
47 extern int _copyin_atomic64(const char *src, uint64_t *dst);
48 extern int _copyout_atomic32(uint32_t u32, const char *dst);
49 extern int _copyout_atomic64(uint64_t u64, const char *dst);
50 
51 extern int copyoutstr_prevalidate(const void *kaddr, user_addr_t uaddr, size_t len);
52 
53 extern const vm_map_address_t physmap_base;
54 extern const vm_map_address_t physmap_end;
55 
56 /*!
57  * @typedef copyio_flags_t
58  *
59  * @const COPYIO_IN
60  * The copy is user -> kernel.
61  * One of COPYIO_IN or COPYIO_OUT should always be specified.
62  *
63  * @const COPYIO_OUT
64  * The copy is kernel -> user
65  * One of COPYIO_IN or COPYIO_OUT should always be specified.
66  *
67  * @const COPYIO_ALLOW_KERNEL_TO_KERNEL
68  * The "user_address" is allowed to be in the VA space of the kernel.
69  *
70  * @const COPYIO_VALIDATE_USER_ONLY
71  * There isn't really a kernel address used, and only the user address
72  * needs to be validated.
73  *
74  * @const COPYIO_ATOMIC
75  * The copyio operation is atomic, ensure that it is properly aligned.
76  */
77 __options_decl(copyio_flags_t, uint32_t, {
78 	COPYIO_IN                       = 0x0001,
79 	COPYIO_OUT                      = 0x0002,
80 	COPYIO_ALLOW_KERNEL_TO_KERNEL   = 0x0004,
81 	COPYIO_VALIDATE_USER_ONLY       = 0x0008,
82 	COPYIO_ATOMIC                   = 0x0010,
83 });
84 
85 typedef enum {
86 	USER_ACCESS_READ,
87 	USER_ACCESS_WRITE
88 } user_access_direction_t;
89 
90 static inline void
user_access_enable(__unused user_access_direction_t user_access_direction)91 user_access_enable(__unused user_access_direction_t user_access_direction)
92 {
93 #if __ARM_PAN_AVAILABLE__
94 	assert(__builtin_arm_rsr("pan") != 0);
95 	__builtin_arm_wsr("pan", 0);
96 #endif  /* __ARM_PAN_AVAILABLE__ */
97 
98 }
99 
100 static inline void
user_access_disable(__unused user_access_direction_t user_access_direction)101 user_access_disable(__unused user_access_direction_t user_access_direction)
102 {
103 #if __ARM_PAN_AVAILABLE__
104 	__builtin_arm_wsr("pan", 1);
105 #endif  /* __ARM_PAN_AVAILABLE__ */
106 
107 }
108 
109 /*
110  * Copy sizes bigger than this value will cause a kernel panic.
111  *
112  * Yes, this is an arbitrary fixed limit, but it's almost certainly
113  * a programming error to be copying more than this amount between
114  * user and wired kernel memory in a single invocation on this
115  * platform.
116  */
117 const int copysize_limit_panic = (64 * 1024 * 1024);
118 
119 static inline bool
is_kernel_to_kernel_copy()120 is_kernel_to_kernel_copy()
121 {
122 	return current_thread()->map->pmap == kernel_pmap;
123 }
124 
125 /*
126  * Validate the arguments to copy{in,out} on this platform.
127  *
128  * Returns EXDEV when the current thread pmap is the kernel's
129  * which is non fatal for certain routines.
130  */
131 static int
copy_validate(const user_addr_t user_addr,uintptr_t kernel_addr,vm_size_t nbytes,copyio_flags_t flags)132 copy_validate(const user_addr_t user_addr, uintptr_t kernel_addr,
133     vm_size_t nbytes, copyio_flags_t flags)
134 {
135 	thread_t self = current_thread();
136 
137 	user_addr_t user_addr_last;
138 	uintptr_t kernel_addr_last;
139 	user_addr_t canonicalized_user_addr = user_addr;
140 
141 
142 	if (__improbable(nbytes > copysize_limit_panic)) {
143 		panic("%s(%p, %p, %lu) - transfer too large", __func__,
144 		    (void *)user_addr, (void *)kernel_addr, nbytes);
145 	}
146 
147 	if (__improbable((canonicalized_user_addr < vm_map_min(self->map)) ||
148 	    os_add_overflow(canonicalized_user_addr, nbytes, &user_addr_last) ||
149 	    (user_addr_last > vm_map_max(self->map)))) {
150 		return EFAULT;
151 	}
152 
153 	if (flags & COPYIO_ATOMIC) {
154 		if (__improbable(user_addr & (nbytes - 1))) {
155 			return EINVAL;
156 		}
157 	}
158 
159 	if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
160 		if (__improbable(os_add_overflow(kernel_addr, nbytes, &kernel_addr_last))) {
161 			panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__,
162 			    (void *)user_addr, (void *)kernel_addr, nbytes);
163 		}
164 
165 		bool in_kva = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= VM_MIN_KERNEL_ADDRESS) &&
166 		    (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= VM_MAX_KERNEL_ADDRESS);
167 		bool in_physmap = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= physmap_base) &&
168 		    (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= physmap_end);
169 
170 		if (__improbable(!(in_kva || in_physmap))) {
171 			panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__,
172 			    (void *)user_addr, (void *)kernel_addr, nbytes);
173 		}
174 	}
175 
176 	if (is_kernel_to_kernel_copy()) {
177 		if (__improbable((flags & COPYIO_ALLOW_KERNEL_TO_KERNEL) == 0)) {
178 			return EFAULT;
179 		}
180 		return EXDEV;
181 	}
182 
183 	if (__improbable(canonicalized_user_addr & ARM_TBI_USER_MASK)) {
184 		return EINVAL;
185 	}
186 
187 	if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
188 		zone_element_bounds_check(kernel_addr, nbytes);
189 #if KASAN
190 		/* For user copies, asan-check the kernel-side buffer */
191 		if (flags & COPYIO_IN) {
192 			__asan_storeN(kernel_addr, nbytes);
193 		} else {
194 			__asan_loadN(kernel_addr, nbytes);
195 		}
196 #endif
197 	}
198 	return 0;
199 }
200 
201 int
copyin_kern(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes)202 copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
203 {
204 	bcopy((const char*)(uintptr_t)user_addr, kernel_addr, nbytes);
205 
206 	return 0;
207 }
208 
209 int
copyout_kern(const char * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)210 copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
211 {
212 	bcopy(kernel_addr, (char *)(uintptr_t)user_addr, nbytes);
213 
214 	return 0;
215 }
216 
217 int
copyin(const user_addr_t user_addr,void * kernel_addr,vm_size_t nbytes)218 copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes)
219 {
220 	int result;
221 
222 	if (__improbable(nbytes == 0)) {
223 		return 0;
224 	}
225 
226 	result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes,
227 	    COPYIO_IN | COPYIO_ALLOW_KERNEL_TO_KERNEL);
228 	if (result == EXDEV) {
229 		return copyin_kern(user_addr, kernel_addr, nbytes);
230 	}
231 	if (__improbable(result)) {
232 		return result;
233 	}
234 
235 	user_access_enable(USER_ACCESS_READ);
236 	result = _bcopyin((const char *)user_addr, kernel_addr, nbytes);
237 	user_access_disable(USER_ACCESS_READ);
238 	return result;
239 }
240 
241 /*
242  * copy{in,out}_atomic{32,64}
243  * Read or store an aligned value from userspace as a single memory transaction.
244  * These functions support userspace synchronization features
245  */
246 int
copyin_atomic32(const user_addr_t user_addr,uint32_t * kernel_addr)247 copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
248 {
249 	int result = copy_validate(user_addr, (uintptr_t)kernel_addr, 4,
250 	    COPYIO_IN | COPYIO_ATOMIC);
251 	if (__improbable(result)) {
252 		return result;
253 	}
254 	user_access_enable(USER_ACCESS_READ);
255 	result = _copyin_atomic32((const char *)user_addr, kernel_addr);
256 	user_access_disable(USER_ACCESS_READ);
257 	return result;
258 }
259 
260 int
copyin_atomic32_wait_if_equals(const user_addr_t user_addr,uint32_t value)261 copyin_atomic32_wait_if_equals(const user_addr_t user_addr, uint32_t value)
262 {
263 	int result = copy_validate(user_addr, 0, 4,
264 	    COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
265 	if (__improbable(result)) {
266 		return result;
267 	}
268 	user_access_enable(USER_ACCESS_READ);
269 	result = _copyin_atomic32_wait_if_equals((const char *)user_addr, value);
270 	user_access_disable(USER_ACCESS_READ);
271 	return result;
272 }
273 
274 int
copyin_atomic64(const user_addr_t user_addr,uint64_t * kernel_addr)275 copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
276 {
277 	int result = copy_validate(user_addr, (uintptr_t)kernel_addr, 8,
278 	    COPYIO_IN | COPYIO_ATOMIC);
279 	if (__improbable(result)) {
280 		return result;
281 	}
282 	user_access_enable(USER_ACCESS_READ);
283 	result = _copyin_atomic64((const char *)user_addr, kernel_addr);
284 	user_access_disable(USER_ACCESS_READ);
285 	return result;
286 }
287 
288 int
copyout_atomic32(uint32_t value,user_addr_t user_addr)289 copyout_atomic32(uint32_t value, user_addr_t user_addr)
290 {
291 	int result = copy_validate(user_addr, 0, 4,
292 	    COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
293 	if (__improbable(result)) {
294 		return result;
295 	}
296 	user_access_enable(USER_ACCESS_WRITE);
297 	result = _copyout_atomic32(value, (const char *)user_addr);
298 	user_access_disable(USER_ACCESS_WRITE);
299 	return result;
300 }
301 
302 int
copyout_atomic64(uint64_t value,user_addr_t user_addr)303 copyout_atomic64(uint64_t value, user_addr_t user_addr)
304 {
305 	int result = copy_validate(user_addr, 0, 8,
306 	    COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
307 	if (__improbable(result)) {
308 		return result;
309 	}
310 	user_access_enable(USER_ACCESS_WRITE);
311 	result = _copyout_atomic64(value, (const char *)user_addr);
312 	user_access_disable(USER_ACCESS_WRITE);
313 	return result;
314 }
315 
316 int
copyinstr(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes,vm_size_t * lencopied)317 copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
318 {
319 	int result;
320 	vm_size_t bytes_copied = 0;
321 
322 	*lencopied = 0;
323 	if (__improbable(nbytes == 0)) {
324 		return ENAMETOOLONG;
325 	}
326 
327 	result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes, COPYIO_IN);
328 	if (__improbable(result)) {
329 		return result;
330 	}
331 	user_access_enable(USER_ACCESS_READ);
332 	result = _bcopyinstr((const char *)user_addr, kernel_addr, nbytes,
333 	    &bytes_copied);
334 	user_access_disable(USER_ACCESS_READ);
335 	if (result != EFAULT) {
336 		*lencopied = bytes_copied;
337 	}
338 	return result;
339 }
340 
341 int
copyout(const void * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)342 copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
343 {
344 	int result;
345 
346 	if (nbytes == 0) {
347 		return 0;
348 	}
349 
350 	result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes,
351 	    COPYIO_OUT | COPYIO_ALLOW_KERNEL_TO_KERNEL);
352 	if (result == EXDEV) {
353 		return copyout_kern(kernel_addr, user_addr, nbytes);
354 	}
355 	if (__improbable(result)) {
356 		return result;
357 	}
358 	user_access_enable(USER_ACCESS_WRITE);
359 	result = _bcopyout(kernel_addr, (char *)user_addr, nbytes);
360 	user_access_disable(USER_ACCESS_WRITE);
361 	return result;
362 }
363 
364 int
copyoutstr_prevalidate(const void * __unused kaddr,user_addr_t __unused uaddr,size_t __unused len)365 copyoutstr_prevalidate(const void *__unused kaddr, user_addr_t __unused uaddr, size_t __unused len)
366 {
367 	if (__improbable(is_kernel_to_kernel_copy())) {
368 		return EFAULT;
369 	}
370 
371 	return 0;
372 }
373 
374 #if (DEBUG || DEVELOPMENT)
375 int
verify_write(const void * source,void * dst,size_t size)376 verify_write(const void *source, void *dst, size_t size)
377 {
378 	int rc;
379 	disable_preemption();
380 	rc = _bcopyout((const char*)source, (char*)dst, size);
381 	enable_preemption();
382 	return rc;
383 }
384 #endif
385