1 /*
2 * Copyright (c) 2012-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/cpu_data_internal.h>
30 #include <arm/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/zalloc_internal.h>
33 #include <sys/errno.h>
34 #include <vm/pmap.h>
35 #include <vm/vm_map.h>
36 #include <san/kasan.h>
37
38 #undef copyin
39 #undef copyout
40
41 extern int _bcopyin(const char *src, char *dst, vm_size_t len);
42 extern int _bcopyinstr(const char *src, char *dst, vm_size_t max, vm_size_t *actual);
43 extern int _bcopyout(const char *src, char *dst, vm_size_t len);
44 extern int _copyin_atomic32(const char *src, uint32_t *dst);
45 extern int _copyin_atomic32_wait_if_equals(const char *src, uint32_t dst);
46 extern int _copyin_atomic64(const char *src, uint64_t *dst);
47 extern int _copyout_atomic32(uint32_t u32, const char *dst);
48 extern int _copyout_atomic64(uint64_t u64, const char *dst);
49
50 extern int copyoutstr_prevalidate(const void *kaddr, user_addr_t uaddr, size_t len);
51
52 extern const vm_map_address_t physmap_base;
53 extern const vm_map_address_t physmap_end;
54
55 /*!
56 * @typedef copyio_flags_t
57 *
58 * @const COPYIO_IN
59 * The copy is user -> kernel.
60 * One of COPYIO_IN or COPYIO_OUT should always be specified.
61 *
62 * @const COPYIO_OUT
63 * The copy is kernel -> user
64 * One of COPYIO_IN or COPYIO_OUT should always be specified.
65 *
66 * @const COPYIO_ALLOW_KERNEL_TO_KERNEL
67 * The "user_address" is allowed to be in the VA space of the kernel.
68 *
69 * @const COPYIO_VALIDATE_USER_ONLY
70 * There isn't really a kernel address used, and only the user address
71 * needs to be validated.
72 *
73 * @const COPYIO_ATOMIC
74 * The copyio operation is atomic, ensure that it is properly aligned.
75 */
76 __options_decl(copyio_flags_t, uint32_t, {
77 COPYIO_IN = 0x0001,
78 COPYIO_OUT = 0x0002,
79 COPYIO_ALLOW_KERNEL_TO_KERNEL = 0x0004,
80 COPYIO_VALIDATE_USER_ONLY = 0x0008,
81 COPYIO_ATOMIC = 0x0010,
82 });
83
84 static inline void
user_access_enable(void)85 user_access_enable(void)
86 {
87 #if __ARM_PAN_AVAILABLE__
88 assert(__builtin_arm_rsr("pan") != 0);
89 __builtin_arm_wsr("pan", 0);
90 #endif /* __ARM_PAN_AVAILABLE__ */
91 }
92
93 static inline void
user_access_disable(void)94 user_access_disable(void)
95 {
96 #if __ARM_PAN_AVAILABLE__
97 __builtin_arm_wsr("pan", 1);
98 #endif /* __ARM_PAN_AVAILABLE__ */
99 }
100
101 /*
102 * Copy sizes bigger than this value will cause a kernel panic.
103 *
104 * Yes, this is an arbitrary fixed limit, but it's almost certainly
105 * a programming error to be copying more than this amount between
106 * user and wired kernel memory in a single invocation on this
107 * platform.
108 */
109 const int copysize_limit_panic = (64 * 1024 * 1024);
110
111 static inline bool
is_kernel_to_kernel_copy()112 is_kernel_to_kernel_copy()
113 {
114 return current_thread()->map->pmap == kernel_pmap;
115 }
116
117 /*
118 * Validate the arguments to copy{in,out} on this platform.
119 *
120 * Returns EXDEV when the current thread pmap is the kernel's
121 * which is non fatal for certain routines.
122 */
123 static int
copy_validate(const user_addr_t user_addr,uintptr_t kernel_addr,vm_size_t nbytes,copyio_flags_t flags)124 copy_validate(const user_addr_t user_addr, uintptr_t kernel_addr,
125 vm_size_t nbytes, copyio_flags_t flags)
126 {
127 thread_t self = current_thread();
128
129 user_addr_t user_addr_last;
130 uintptr_t kernel_addr_last;
131
132 if (__improbable(nbytes > copysize_limit_panic)) {
133 panic("%s(%p, %p, %lu) - transfer too large", __func__,
134 (void *)user_addr, (void *)kernel_addr, nbytes);
135 }
136
137 if (__improbable((user_addr < vm_map_min(self->map)) ||
138 os_add_overflow(user_addr, nbytes, &user_addr_last) ||
139 (user_addr_last > vm_map_max(self->map)))) {
140 return EFAULT;
141 }
142
143 if (flags & COPYIO_ATOMIC) {
144 if (__improbable(user_addr & (nbytes - 1))) {
145 return EINVAL;
146 }
147 }
148
149 if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
150 if (__improbable(os_add_overflow(kernel_addr, nbytes, &kernel_addr_last))) {
151 panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__,
152 (void *)user_addr, (void *)kernel_addr, nbytes);
153 }
154
155 bool in_kva = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= VM_MIN_KERNEL_ADDRESS) &&
156 (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= VM_MAX_KERNEL_ADDRESS);
157 bool in_physmap = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= physmap_base) &&
158 (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= physmap_end);
159
160 if (__improbable(!(in_kva || in_physmap))) {
161 panic("%s(%p, %p, %lu) - kaddr not in kernel", __func__,
162 (void *)user_addr, (void *)kernel_addr, nbytes);
163 }
164 }
165
166 if (is_kernel_to_kernel_copy()) {
167 if (__improbable((flags & COPYIO_ALLOW_KERNEL_TO_KERNEL) == 0)) {
168 return EFAULT;
169 }
170 return EXDEV;
171 }
172
173 if (__improbable(user_addr & TBI_MASK)) {
174 return EINVAL;
175 }
176
177 if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
178 if (__probable(!zalloc_disable_copyio_check)) {
179 zone_t src_zone = NULL;
180 vm_offset_t oob_offs, size;
181
182 size = zone_element_size((void *)kernel_addr,
183 &src_zone, false, &oob_offs);
184 size -= oob_offs;
185
186 /*
187 * Size of elements in the permanent zone is not saved as a part of the
188 * zone's info
189 */
190 if (__improbable(src_zone && !src_zone->z_permanent &&
191 size < nbytes)) {
192 panic("copyio_preflight: kernel buffer %p "
193 "has size %lu < nbytes %lu",
194 (void *)kernel_addr, size, nbytes);
195 }
196 }
197
198 #if KASAN
199 /* For user copies, asan-check the kernel-side buffer */
200 if (flags & COPYIO_IN) {
201 __asan_storeN(kernel_addr, nbytes);
202 } else {
203 __asan_loadN(kernel_addr, nbytes);
204 }
205 #endif
206 }
207 return 0;
208 }
209
210 int
copyin_kern(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes)211 copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
212 {
213 bcopy((const char*)(uintptr_t)user_addr, kernel_addr, nbytes);
214
215 return 0;
216 }
217
218 int
copyout_kern(const char * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)219 copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
220 {
221 bcopy(kernel_addr, (char *)(uintptr_t)user_addr, nbytes);
222
223 return 0;
224 }
225
226 int
copyin(const user_addr_t user_addr,void * kernel_addr,vm_size_t nbytes)227 copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes)
228 {
229 int result;
230
231 if (__improbable(nbytes == 0)) {
232 return 0;
233 }
234
235 result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes,
236 COPYIO_IN | COPYIO_ALLOW_KERNEL_TO_KERNEL);
237 if (result == EXDEV) {
238 return copyin_kern(user_addr, kernel_addr, nbytes);
239 }
240 if (__improbable(result)) {
241 return result;
242 }
243
244 user_access_enable();
245 result = _bcopyin((const char *)user_addr, kernel_addr, nbytes);
246 user_access_disable();
247 return result;
248 }
249
250 /*
251 * copy{in,out}_atomic{32,64}
252 * Read or store an aligned value from userspace as a single memory transaction.
253 * These functions support userspace synchronization features
254 */
255 int
copyin_atomic32(const user_addr_t user_addr,uint32_t * kernel_addr)256 copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
257 {
258 int result = copy_validate(user_addr, (uintptr_t)kernel_addr, 4,
259 COPYIO_IN | COPYIO_ATOMIC);
260 if (__improbable(result)) {
261 return result;
262 }
263 user_access_enable();
264 result = _copyin_atomic32((const char *)user_addr, kernel_addr);
265 user_access_disable();
266 return result;
267 }
268
269 int
copyin_atomic32_wait_if_equals(const user_addr_t user_addr,uint32_t value)270 copyin_atomic32_wait_if_equals(const user_addr_t user_addr, uint32_t value)
271 {
272 int result = copy_validate(user_addr, 0, 4,
273 COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
274 if (__improbable(result)) {
275 return result;
276 }
277 user_access_enable();
278 result = _copyin_atomic32_wait_if_equals((const char *)user_addr, value);
279 user_access_disable();
280 return result;
281 }
282
283 int
copyin_atomic64(const user_addr_t user_addr,uint64_t * kernel_addr)284 copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
285 {
286 int result = copy_validate(user_addr, (uintptr_t)kernel_addr, 8,
287 COPYIO_IN | COPYIO_ATOMIC);
288 if (__improbable(result)) {
289 return result;
290 }
291 user_access_enable();
292 result = _copyin_atomic64((const char *)user_addr, kernel_addr);
293 user_access_disable();
294 return result;
295 }
296
297 int
copyout_atomic32(uint32_t value,user_addr_t user_addr)298 copyout_atomic32(uint32_t value, user_addr_t user_addr)
299 {
300 int result = copy_validate(user_addr, 0, 4,
301 COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
302 if (__improbable(result)) {
303 return result;
304 }
305 user_access_enable();
306 result = _copyout_atomic32(value, (const char *)user_addr);
307 user_access_disable();
308 return result;
309 }
310
311 int
copyout_atomic64(uint64_t value,user_addr_t user_addr)312 copyout_atomic64(uint64_t value, user_addr_t user_addr)
313 {
314 int result = copy_validate(user_addr, 0, 8,
315 COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
316 if (__improbable(result)) {
317 return result;
318 }
319 user_access_enable();
320 result = _copyout_atomic64(value, (const char *)user_addr);
321 user_access_disable();
322 return result;
323 }
324
325 int
copyinstr(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes,vm_size_t * lencopied)326 copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
327 {
328 int result;
329 vm_size_t bytes_copied = 0;
330
331 *lencopied = 0;
332 if (__improbable(nbytes == 0)) {
333 return ENAMETOOLONG;
334 }
335
336 result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes, COPYIO_IN);
337 if (__improbable(result)) {
338 return result;
339 }
340 user_access_enable();
341 result = _bcopyinstr((const char *)user_addr, kernel_addr, nbytes,
342 &bytes_copied);
343 user_access_disable();
344 if (result != EFAULT) {
345 *lencopied = bytes_copied;
346 }
347 return result;
348 }
349
350 int
copyout(const void * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)351 copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
352 {
353 int result;
354
355 if (nbytes == 0) {
356 return 0;
357 }
358
359 result = copy_validate(user_addr, (uintptr_t)kernel_addr, nbytes,
360 COPYIO_OUT | COPYIO_ALLOW_KERNEL_TO_KERNEL);
361 if (result == EXDEV) {
362 return copyout_kern(kernel_addr, user_addr, nbytes);
363 }
364 if (__improbable(result)) {
365 return result;
366 }
367 user_access_enable();
368 result = _bcopyout(kernel_addr, (char *)user_addr, nbytes);
369 user_access_disable();
370 return result;
371 }
372
373 int
copyoutstr_prevalidate(const void * __unused kaddr,user_addr_t __unused uaddr,size_t __unused len)374 copyoutstr_prevalidate(const void *__unused kaddr, user_addr_t __unused uaddr, size_t __unused len)
375 {
376 if (__improbable(is_kernel_to_kernel_copy())) {
377 return EFAULT;
378 }
379
380 return 0;
381 }
382
383 #if (DEBUG || DEVELOPMENT)
384 int
verify_write(const void * source,void * dst,size_t size)385 verify_write(const void *source, void *dst, size_t size)
386 {
387 int rc;
388 disable_preemption();
389 rc = _bcopyout((const char*)source, (char*)dst, size);
390 enable_preemption();
391 return rc;
392 }
393 #endif
394