xref: /xnu-8019.80.24/osfmk/arm64/copyio.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <arm/cpu_data_internal.h>
30 #include <arm/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/zalloc_internal.h>
33 #include <sys/errno.h>
34 #include <vm/pmap.h>
35 #include <vm/vm_map.h>
36 #include <san/kasan.h>
37 
38 #undef copyin
39 #undef copyout
40 
41 extern int _bcopyin(const char *src, char *dst, vm_size_t len);
42 extern int _bcopyinstr(const char *src, char *dst, vm_size_t max, vm_size_t *actual);
43 extern int _bcopyout(const char *src, char *dst, vm_size_t len);
44 extern int _copyin_atomic32(const char *src, uint32_t *dst);
45 extern int _copyin_atomic32_wait_if_equals(const char *src, uint32_t dst);
46 extern int _copyin_atomic64(const char *src, uint64_t *dst);
47 extern int _copyout_atomic32(uint32_t u32, const char *dst);
48 extern int _copyout_atomic64(uint64_t u64, const char *dst);
49 
50 extern int copyoutstr_prevalidate(const void *kaddr, user_addr_t uaddr, size_t len);
51 
52 extern const vm_map_address_t physmap_base;
53 extern const vm_map_address_t physmap_end;
54 
55 /*!
56  * @typedef copyio_flags_t
57  *
58  * @const COPYIO_IN
59  * The copy is user -> kernel.
60  * One of COPYIO_IN or COPYIO_OUT should always be specified.
61  *
62  * @const COPYIO_OUT
63  * The copy is kernel -> user
64  * One of COPYIO_IN or COPYIO_OUT should always be specified.
65  *
66  * @const COPYIO_ALLOW_KERNEL_TO_KERNEL
67  * The "user_address" is allowed to be in the VA space of the kernel.
68  *
69  * @const COPYIO_VALIDATE_USER_ONLY
70  * There isn't really a kernel address used, and only the user address
71  * needs to be validated.
72  *
73  * @const COPYIO_ATOMIC
74  * The copyio operation is atomic, ensure that it is properly aligned.
75  */
76 __options_decl(copyio_flags_t, uint32_t, {
77 	COPYIO_IN                       = 0x0001,
78 	COPYIO_OUT                      = 0x0002,
79 	COPYIO_ALLOW_KERNEL_TO_KERNEL   = 0x0004,
80 	COPYIO_VALIDATE_USER_ONLY       = 0x0008,
81 	COPYIO_ATOMIC                   = 0x0010,
82 });
83 
84 static inline void
user_access_enable(void)85 user_access_enable(void)
86 {
87 #if __ARM_PAN_AVAILABLE__
88 	assert(__builtin_arm_rsr("pan") != 0);
89 	__builtin_arm_wsr("pan", 0);
90 #endif  /* __ARM_PAN_AVAILABLE__ */
91 }
92 
93 static inline void
user_access_disable(void)94 user_access_disable(void)
95 {
96 #if __ARM_PAN_AVAILABLE__
97 	__builtin_arm_wsr("pan", 1);
98 #endif  /* __ARM_PAN_AVAILABLE__ */
99 }
100 
101 /*
102  * Copy sizes bigger than this value will cause a kernel panic.
103  *
104  * Yes, this is an arbitrary fixed limit, but it's almost certainly
105  * a programming error to be copying more than this amount between
106  * user and wired kernel memory in a single invocation on this
107  * platform.
108  */
109 const int copysize_limit_panic = (64 * 1024 * 1024);
110 
111 static inline bool
is_kernel_to_kernel_copy()112 is_kernel_to_kernel_copy()
113 {
114 	return current_thread()->map->pmap == kernel_pmap;
115 }
116 
117 /*
118  * Validate the arguments to copy{in,out} on this platform.
119  *
120  * Returns EXDEV when the current thread pmap is the kernel's
121  * which is non fatal for certain routines.
122  */
123 static int
copy_validate(const user_addr_t user_addr,uintptr_t kernel_addr,vm_size_t nbytes,copyio_flags_t flags)124 copy_validate(const user_addr_t user_addr, uintptr_t kernel_addr,
125     vm_size_t nbytes, copyio_flags_t flags)
126 {
127 	thread_t self = current_thread();
128 
129 	user_addr_t user_addr_last;
130 	uintptr_t kernel_addr_last;
131 
132 	if (__improbable(nbytes > copysize_limit_panic)) {
133 		panic("%s(%p, %p, %lu) - transfer too large", __func__,
134 		    (void *)user_addr, (void *)kernel_addr, nbytes);
135 	}
136 
137 	if (__improbable((user_addr < vm_map_min(self->map)) ||
138 	    os_add_overflow(user_addr, nbytes, &user_addr_last) ||
139 	    (user_addr_last > vm_map_max(self->map)))) {
140 		return EFAULT;
141 	}
142 
143 	if (flags & COPYIO_ATOMIC) {
144 		if (__improbable(user_addr & (nbytes - 1))) {
145 			return EINVAL;
146 		}
147 	}
148 
149 	if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
150 		if (__improbable(os_add_overflow(kernel_addr, nbytes, &kernel_addr_last))) {
151 			panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__,
152 			    (void *)user_addr, (void *)kernel_addr, nbytes);
153 		}
154 
155 		bool in_kva = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= VM_MIN_KERNEL_ADDRESS) &&
156 		    (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= VM_MAX_KERNEL_ADDRESS);
157 		bool in_physmap = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= physmap_base) &&
158 		    (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= physmap_end);
159 
160 		if (__improbable(!(in_kva || in_physmap))) {
161 			panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__,
162 			    (void *)user_addr, (void *)kernel_addr, nbytes);
163 		}
164 	}
165 
166 	if (is_kernel_to_kernel_copy()) {
167 		if (__improbable((flags & COPYIO_ALLOW_KERNEL_TO_KERNEL) == 0)) {
168 			return EFAULT;
169 		}
170 		return EXDEV;
171 	}
172 
173 	if (__improbable(user_addr & TBI_MASK)) {
174 		return EINVAL;
175 	}
176 
177 	if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
178 		if (__probable(!zalloc_disable_copyio_check)) {
179 			zone_t src_zone = NULL;
180 			vm_size_t kernel_buf_size = zone_element_size((void *)kernel_addr, &src_zone);
181 			/*
182 			 * Size of elements in the permanent zone is not saved as a part of the
183 			 * zone's info
184 			 */
185 			if (__improbable(src_zone && !src_zone->z_permanent &&
186 			    kernel_buf_size < nbytes)) {
187 				panic("copyio_preflight: kernel buffer 0x%lx has size %lu < nbytes %lu",
188 				    kernel_addr, kernel_buf_size, nbytes);
189 			}
190 		}
191 
192 #if KASAN
193 		/* For user copies, asan-check the kernel-side buffer */
194 		if (flags & COPYIO_IN) {
195 			__asan_storeN(kernel_addr, nbytes);
196 		} else {
197 			__asan_loadN(kernel_addr, nbytes);
198 		}
199 #endif
200 	}
201 	return 0;
202 }
203 
204 int
copyin_kern(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes)205 copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
206 {
207 	bcopy((const char*)(uintptr_t)user_addr, kernel_addr, nbytes);
208 
209 	return 0;
210 }
211 
212 int
copyout_kern(const char * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)213 copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
214 {
215 	bcopy(kernel_addr, (char *)(uintptr_t)user_addr, nbytes);
216 
217 	return 0;
218 }
219 
220 int
copyin(const user_addr_t user_addr,void * kernel_addr,vm_size_t nbytes)221 copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes)
222 {
223 	int result;
224 
225 	if (__improbable(nbytes == 0)) {
226 		return 0;
227 	}
228 
229 	result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes,
230 	    COPYIO_IN | COPYIO_ALLOW_KERNEL_TO_KERNEL);
231 	if (result == EXDEV) {
232 		return copyin_kern(user_addr, kernel_addr, nbytes);
233 	}
234 	if (__improbable(result)) {
235 		return result;
236 	}
237 
238 	user_access_enable();
239 	result = _bcopyin((const char *)user_addr, kernel_addr, nbytes);
240 	user_access_disable();
241 	return result;
242 }
243 
244 /*
245  * copy{in,out}_atomic{32,64}
246  * Read or store an aligned value from userspace as a single memory transaction.
247  * These functions support userspace synchronization features
248  */
249 int
copyin_atomic32(const user_addr_t user_addr,uint32_t * kernel_addr)250 copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
251 {
252 	int result = copy_validate(user_addr, (uintptr_t)kernel_addr, 4,
253 	    COPYIO_IN | COPYIO_ATOMIC);
254 	if (__improbable(result)) {
255 		return result;
256 	}
257 	user_access_enable();
258 	result = _copyin_atomic32((const char *)user_addr, kernel_addr);
259 	user_access_disable();
260 	return result;
261 }
262 
263 int
copyin_atomic32_wait_if_equals(const user_addr_t user_addr,uint32_t value)264 copyin_atomic32_wait_if_equals(const user_addr_t user_addr, uint32_t value)
265 {
266 	int result = copy_validate(user_addr, 0, 4,
267 	    COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
268 	if (__improbable(result)) {
269 		return result;
270 	}
271 	user_access_enable();
272 	result = _copyin_atomic32_wait_if_equals((const char *)user_addr, value);
273 	user_access_disable();
274 	return result;
275 }
276 
277 int
copyin_atomic64(const user_addr_t user_addr,uint64_t * kernel_addr)278 copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
279 {
280 	int result = copy_validate(user_addr, (uintptr_t)kernel_addr, 8,
281 	    COPYIO_IN | COPYIO_ATOMIC);
282 	if (__improbable(result)) {
283 		return result;
284 	}
285 	user_access_enable();
286 	result = _copyin_atomic64((const char *)user_addr, kernel_addr);
287 	user_access_disable();
288 	return result;
289 }
290 
291 int
copyout_atomic32(uint32_t value,user_addr_t user_addr)292 copyout_atomic32(uint32_t value, user_addr_t user_addr)
293 {
294 	int result = copy_validate(user_addr, 0, 4,
295 	    COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
296 	if (__improbable(result)) {
297 		return result;
298 	}
299 	user_access_enable();
300 	result = _copyout_atomic32(value, (const char *)user_addr);
301 	user_access_disable();
302 	return result;
303 }
304 
305 int
copyout_atomic64(uint64_t value,user_addr_t user_addr)306 copyout_atomic64(uint64_t value, user_addr_t user_addr)
307 {
308 	int result = copy_validate(user_addr, 0, 8,
309 	    COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
310 	if (__improbable(result)) {
311 		return result;
312 	}
313 	user_access_enable();
314 	result = _copyout_atomic64(value, (const char *)user_addr);
315 	user_access_disable();
316 	return result;
317 }
318 
319 int
copyinstr(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes,vm_size_t * lencopied)320 copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
321 {
322 	int result;
323 	vm_size_t bytes_copied = 0;
324 
325 	*lencopied = 0;
326 	if (__improbable(nbytes == 0)) {
327 		return ENAMETOOLONG;
328 	}
329 
330 	result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes, COPYIO_IN);
331 	if (__improbable(result)) {
332 		return result;
333 	}
334 	user_access_enable();
335 	result = _bcopyinstr((const char *)user_addr, kernel_addr, nbytes,
336 	    &bytes_copied);
337 	user_access_disable();
338 	if (result != EFAULT) {
339 		*lencopied = bytes_copied;
340 	}
341 	return result;
342 }
343 
344 int
copyout(const void * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)345 copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
346 {
347 	int result;
348 
349 	if (nbytes == 0) {
350 		return 0;
351 	}
352 
353 	result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes,
354 	    COPYIO_OUT | COPYIO_ALLOW_KERNEL_TO_KERNEL);
355 	if (result == EXDEV) {
356 		return copyout_kern(kernel_addr, user_addr, nbytes);
357 	}
358 	if (__improbable(result)) {
359 		return result;
360 	}
361 	user_access_enable();
362 	result = _bcopyout(kernel_addr, (char *)user_addr, nbytes);
363 	user_access_disable();
364 	return result;
365 }
366 
367 int
copyoutstr_prevalidate(const void * __unused kaddr,user_addr_t __unused uaddr,size_t __unused len)368 copyoutstr_prevalidate(const void *__unused kaddr, user_addr_t __unused uaddr, size_t __unused len)
369 {
370 	if (__improbable(is_kernel_to_kernel_copy())) {
371 		return EFAULT;
372 	}
373 
374 	return 0;
375 }
376 
377 #if (DEBUG || DEVELOPMENT)
378 int
verify_write(const void * source,void * dst,size_t size)379 verify_write(const void *source, void *dst, size_t size)
380 {
381 	int rc;
382 	disable_preemption();
383 	rc = _bcopyout((const char*)source, (char*)dst, size);
384 	enable_preemption();
385 	return rc;
386 }
387 #endif
388