1 /*
2 * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/cpu_data_internal.h>
30 #include <arm/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/zalloc_internal.h>
33 #include <sys/errno.h>
34 #include <vm/pmap.h>
35 #include <vm/vm_map.h>
36 #include <san/kasan.h>
37 #include <arm/pmap.h>
38
39 #undef copyin
40 #undef copyout
41
42 extern int _bcopyin(const char *src, char *dst, vm_size_t len);
43 extern int _bcopyinstr(const char *src, char *dst, vm_size_t max, vm_size_t *actual);
44 extern int _bcopyout(const char *src, char *dst, vm_size_t len);
45 extern int _copyin_atomic32(const char *src, uint32_t *dst);
46 extern int _copyin_atomic32_wait_if_equals(const char *src, uint32_t dst);
47 extern int _copyin_atomic64(const char *src, uint64_t *dst);
48 extern int _copyout_atomic32(uint32_t u32, const char *dst);
49 extern int _copyout_atomic64(uint64_t u64, const char *dst);
50
51 extern int copyoutstr_prevalidate(const void *kaddr, user_addr_t uaddr, size_t len);
52
53 extern const vm_map_address_t physmap_base;
54 extern const vm_map_address_t physmap_end;
55
56 /*!
57 * @typedef copyio_flags_t
58 *
59 * @const COPYIO_IN
60 * The copy is user -> kernel.
61 * One of COPYIO_IN or COPYIO_OUT should always be specified.
62 *
63 * @const COPYIO_OUT
64 * The copy is kernel -> user
65 * One of COPYIO_IN or COPYIO_OUT should always be specified.
66 *
67 * @const COPYIO_ALLOW_KERNEL_TO_KERNEL
68 * The "user_address" is allowed to be in the VA space of the kernel.
69 *
70 * @const COPYIO_VALIDATE_USER_ONLY
71 * There isn't really a kernel address used, and only the user address
72 * needs to be validated.
73 *
74 * @const COPYIO_ATOMIC
75 * The copyio operation is atomic, ensure that it is properly aligned.
76 */
77 __options_decl(copyio_flags_t, uint32_t, {
78 COPYIO_IN = 0x0001,
79 COPYIO_OUT = 0x0002,
80 COPYIO_ALLOW_KERNEL_TO_KERNEL = 0x0004,
81 COPYIO_VALIDATE_USER_ONLY = 0x0008,
82 COPYIO_ATOMIC = 0x0010,
83 });
84
85 static inline void
user_access_enable(void)86 user_access_enable(void)
87 {
88 #if __ARM_PAN_AVAILABLE__
89 assert(__builtin_arm_rsr("pan") != 0);
90 __builtin_arm_wsr("pan", 0);
91 #endif /* __ARM_PAN_AVAILABLE__ */
92
93 }
94
95 static inline void
user_access_disable(void)96 user_access_disable(void)
97 {
98 #if __ARM_PAN_AVAILABLE__
99 __builtin_arm_wsr("pan", 1);
100 #endif /* __ARM_PAN_AVAILABLE__ */
101
102 }
103
104 /*
105 * Copy sizes bigger than this value will cause a kernel panic.
106 *
107 * Yes, this is an arbitrary fixed limit, but it's almost certainly
108 * a programming error to be copying more than this amount between
109 * user and wired kernel memory in a single invocation on this
110 * platform.
111 */
112 const int copysize_limit_panic = (64 * 1024 * 1024);
113
114 static inline bool
is_kernel_to_kernel_copy()115 is_kernel_to_kernel_copy()
116 {
117 return current_thread()->map->pmap == kernel_pmap;
118 }
119
120 /*
121 * Validate the arguments to copy{in,out} on this platform.
122 *
123 * Returns EXDEV when the current thread pmap is the kernel's
124 * which is non fatal for certain routines.
125 */
126 static int
copy_validate(const user_addr_t user_addr,uintptr_t kernel_addr,vm_size_t nbytes,copyio_flags_t flags)127 copy_validate(const user_addr_t user_addr, uintptr_t kernel_addr,
128 vm_size_t nbytes, copyio_flags_t flags)
129 {
130 thread_t self = current_thread();
131
132 user_addr_t user_addr_last;
133 uintptr_t kernel_addr_last;
134
135 if (__improbable(nbytes > copysize_limit_panic)) {
136 panic("%s(%p, %p, %lu) - transfer too large", __func__,
137 (void *)user_addr, (void *)kernel_addr, nbytes);
138 }
139
140 if (__improbable((user_addr < vm_map_min(self->map)) ||
141 os_add_overflow(user_addr, nbytes, &user_addr_last) ||
142 (user_addr_last > vm_map_max(self->map)))) {
143 return EFAULT;
144 }
145
146 if (flags & COPYIO_ATOMIC) {
147 if (__improbable(user_addr & (nbytes - 1))) {
148 return EINVAL;
149 }
150 }
151
152 if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
153 if (__improbable(os_add_overflow(kernel_addr, nbytes, &kernel_addr_last))) {
154 panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__,
155 (void *)user_addr, (void *)kernel_addr, nbytes);
156 }
157
158 bool in_kva = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= VM_MIN_KERNEL_ADDRESS) &&
159 (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= VM_MAX_KERNEL_ADDRESS);
160 bool in_physmap = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= physmap_base) &&
161 (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= physmap_end);
162
163 if (__improbable(!(in_kva || in_physmap))) {
164 panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__,
165 (void *)user_addr, (void *)kernel_addr, nbytes);
166 }
167 }
168
169 if (is_kernel_to_kernel_copy()) {
170 if (__improbable((flags & COPYIO_ALLOW_KERNEL_TO_KERNEL) == 0)) {
171 return EFAULT;
172 }
173 return EXDEV;
174 }
175
176 if (__improbable(user_addr & TBI_MASK)) {
177 return EINVAL;
178 }
179
180 if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
181 zone_element_bounds_check(kernel_addr, nbytes);
182 #if KASAN
183 /* For user copies, asan-check the kernel-side buffer */
184 if (flags & COPYIO_IN) {
185 __asan_storeN(kernel_addr, nbytes);
186 } else {
187 __asan_loadN(kernel_addr, nbytes);
188 }
189 #endif
190 }
191 return 0;
192 }
193
194 int
copyin_kern(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes)195 copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
196 {
197 bcopy((const char*)(uintptr_t)user_addr, kernel_addr, nbytes);
198
199 return 0;
200 }
201
202 int
copyout_kern(const char * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)203 copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
204 {
205 bcopy(kernel_addr, (char *)(uintptr_t)user_addr, nbytes);
206
207 return 0;
208 }
209
210 int
copyin(const user_addr_t user_addr,void * kernel_addr,vm_size_t nbytes)211 copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes)
212 {
213 int result;
214
215 if (__improbable(nbytes == 0)) {
216 return 0;
217 }
218
219 result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes,
220 COPYIO_IN | COPYIO_ALLOW_KERNEL_TO_KERNEL);
221 if (result == EXDEV) {
222 return copyin_kern(user_addr, kernel_addr, nbytes);
223 }
224 if (__improbable(result)) {
225 return result;
226 }
227
228 user_access_enable();
229 result = _bcopyin((const char *)user_addr, kernel_addr, nbytes);
230 user_access_disable();
231 return result;
232 }
233
234 /*
235 * copy{in,out}_atomic{32,64}
236 * Read or store an aligned value from userspace as a single memory transaction.
237 * These functions support userspace synchronization features
238 */
239 int
copyin_atomic32(const user_addr_t user_addr,uint32_t * kernel_addr)240 copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
241 {
242 int result = copy_validate(user_addr, (uintptr_t)kernel_addr, 4,
243 COPYIO_IN | COPYIO_ATOMIC);
244 if (__improbable(result)) {
245 return result;
246 }
247 user_access_enable();
248 result = _copyin_atomic32((const char *)user_addr, kernel_addr);
249 user_access_disable();
250 return result;
251 }
252
253 int
copyin_atomic32_wait_if_equals(const user_addr_t user_addr,uint32_t value)254 copyin_atomic32_wait_if_equals(const user_addr_t user_addr, uint32_t value)
255 {
256 int result = copy_validate(user_addr, 0, 4,
257 COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
258 if (__improbable(result)) {
259 return result;
260 }
261 user_access_enable();
262 result = _copyin_atomic32_wait_if_equals((const char *)user_addr, value);
263 user_access_disable();
264 return result;
265 }
266
267 int
copyin_atomic64(const user_addr_t user_addr,uint64_t * kernel_addr)268 copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
269 {
270 int result = copy_validate(user_addr, (uintptr_t)kernel_addr, 8,
271 COPYIO_IN | COPYIO_ATOMIC);
272 if (__improbable(result)) {
273 return result;
274 }
275 user_access_enable();
276 result = _copyin_atomic64((const char *)user_addr, kernel_addr);
277 user_access_disable();
278 return result;
279 }
280
281 int
copyout_atomic32(uint32_t value,user_addr_t user_addr)282 copyout_atomic32(uint32_t value, user_addr_t user_addr)
283 {
284 int result = copy_validate(user_addr, 0, 4,
285 COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
286 if (__improbable(result)) {
287 return result;
288 }
289 user_access_enable();
290 result = _copyout_atomic32(value, (const char *)user_addr);
291 user_access_disable();
292 return result;
293 }
294
295 int
copyout_atomic64(uint64_t value,user_addr_t user_addr)296 copyout_atomic64(uint64_t value, user_addr_t user_addr)
297 {
298 int result = copy_validate(user_addr, 0, 8,
299 COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
300 if (__improbable(result)) {
301 return result;
302 }
303 user_access_enable();
304 result = _copyout_atomic64(value, (const char *)user_addr);
305 user_access_disable();
306 return result;
307 }
308
309 int
copyinstr(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes,vm_size_t * lencopied)310 copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
311 {
312 int result;
313 vm_size_t bytes_copied = 0;
314
315 *lencopied = 0;
316 if (__improbable(nbytes == 0)) {
317 return ENAMETOOLONG;
318 }
319
320 result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes, COPYIO_IN);
321 if (__improbable(result)) {
322 return result;
323 }
324 user_access_enable();
325 result = _bcopyinstr((const char *)user_addr, kernel_addr, nbytes,
326 &bytes_copied);
327 user_access_disable();
328 if (result != EFAULT) {
329 *lencopied = bytes_copied;
330 }
331 return result;
332 }
333
334 int
copyout(const void * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)335 copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
336 {
337 int result;
338
339 if (nbytes == 0) {
340 return 0;
341 }
342
343 result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes,
344 COPYIO_OUT | COPYIO_ALLOW_KERNEL_TO_KERNEL);
345 if (result == EXDEV) {
346 return copyout_kern(kernel_addr, user_addr, nbytes);
347 }
348 if (__improbable(result)) {
349 return result;
350 }
351 user_access_enable();
352 result = _bcopyout(kernel_addr, (char *)user_addr, nbytes);
353 user_access_disable();
354 return result;
355 }
356
357 int
copyoutstr_prevalidate(const void * __unused kaddr,user_addr_t __unused uaddr,size_t __unused len)358 copyoutstr_prevalidate(const void *__unused kaddr, user_addr_t __unused uaddr, size_t __unused len)
359 {
360 if (__improbable(is_kernel_to_kernel_copy())) {
361 return EFAULT;
362 }
363
364 return 0;
365 }
366
367 #if (DEBUG || DEVELOPMENT)
368 int
verify_write(const void * source,void * dst,size_t size)369 verify_write(const void *source, void *dst, size_t size)
370 {
371 int rc;
372 disable_preemption();
373 rc = _bcopyout((const char*)source, (char*)dst, size);
374 enable_preemption();
375 return rc;
376 }
377 #endif
378