1 /*
2 * Copyright (c) 2012-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/cpu_data_internal.h>
30 #include <arm/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/zalloc_internal.h>
33 #include <sys/errno.h>
34 #include <vm/pmap.h>
35 #include <vm/vm_map_xnu.h>
36 #include <san/kasan.h>
37 #include <arm/pmap.h>
38 #include <arm64/speculation.h>
39
40 #undef copyin
41 #undef copyout
42
43 extern int _bcopyin(const char *src, char *dst, vm_size_t len);
44 extern int _bcopyinstr(const char *src, char *dst, vm_size_t max, vm_size_t *actual);
45 extern int _bcopyout(const char *src, char *dst, vm_size_t len);
46 extern int _copyin_atomic32(const char *src, uint32_t *dst);
47 extern int _copyin_atomic32_wait_if_equals(const char *src, uint32_t dst);
48 extern int _copyin_atomic64(const char *src, uint64_t *dst);
49 extern int _copyout_atomic32(uint32_t u32, const char *dst);
50 extern int _copyout_atomic64(uint64_t u64, const char *dst);
51
52 extern int copyoutstr_prevalidate(const void *kaddr, user_addr_t uaddr, size_t len);
53
54 extern const vm_map_address_t physmap_base;
55 extern const vm_map_address_t physmap_end;
56
57 /*!
58 * @typedef copyio_flags_t
59 *
60 * @const COPYIO_IN
61 * The copy is user -> kernel.
62 * One of COPYIO_IN or COPYIO_OUT should always be specified.
63 *
64 * @const COPYIO_OUT
65 * The copy is kernel -> user
66 * One of COPYIO_IN or COPYIO_OUT should always be specified.
67 *
68 * @const COPYIO_ALLOW_KERNEL_TO_KERNEL
69 * The "user_address" is allowed to be in the VA space of the kernel.
70 *
71 * @const COPYIO_VALIDATE_USER_ONLY
72 * There isn't really a kernel address used, and only the user address
73 * needs to be validated.
74 *
75 * @const COPYIO_ATOMIC
76 * The copyio operation is atomic, ensure that it is properly aligned.
77 */
78 __options_decl(copyio_flags_t, uint32_t, {
79 COPYIO_IN = 0x0001,
80 COPYIO_OUT = 0x0002,
81 COPYIO_ALLOW_KERNEL_TO_KERNEL = 0x0004,
82 COPYIO_VALIDATE_USER_ONLY = 0x0008,
83 COPYIO_ATOMIC = 0x0010,
84 });
85
86 typedef enum {
87 USER_ACCESS_READ,
88 USER_ACCESS_WRITE
89 } user_access_direction_t;
90
91 static inline void
user_access_enable(__unused user_access_direction_t user_access_direction,pmap_t __unused pmap)92 user_access_enable(__unused user_access_direction_t user_access_direction, pmap_t __unused pmap)
93 {
94 #if __ARM_PAN_AVAILABLE__
95 assert(__builtin_arm_rsr("pan") != 0);
96 __builtin_arm_wsr("pan", 0);
97 #endif /* __ARM_PAN_AVAILABLE__ */
98
99 }
100
101 static inline void
user_access_disable(__unused user_access_direction_t user_access_direction,pmap_t __unused pmap)102 user_access_disable(__unused user_access_direction_t user_access_direction, pmap_t __unused pmap)
103 {
104 #if __ARM_PAN_AVAILABLE__
105 __builtin_arm_wsr("pan", 1);
106 #endif /* __ARM_PAN_AVAILABLE__ */
107
108 }
109
110 /*
111 * Copy sizes bigger than this value will cause a kernel panic.
112 *
113 * Yes, this is an arbitrary fixed limit, but it's almost certainly
114 * a programming error to be copying more than this amount between
115 * user and wired kernel memory in a single invocation on this
116 * platform.
117 */
118 const int copysize_limit_panic = (64 * 1024 * 1024);
119
120 static inline bool
is_kernel_to_kernel_copy(pmap_t pmap)121 is_kernel_to_kernel_copy(pmap_t pmap)
122 {
123 return pmap == kernel_pmap;
124 }
125
126 /**
127 * In order to prevent copies from speculatively targeting the wrong address
128 * space, force kernel-to-kernel copies to target the kernel address space
129 * (TTBR1) and non-kernel copies to target the user address space (TTBR0).
130 *
131 * This should have no non-speculative effect as any address which passes
132 * validation should already have bit 55 (the address space select bit) set
133 * appropriately. If the address would change (i.e. addr is invalid for the copy
134 * type), this function panics and so it must only be called after all other
135 * verification has completed.
136 */
137 static user_addr_t
copy_ensure_address_space_spec(vm_map_t map,const user_addr_t addr)138 copy_ensure_address_space_spec(vm_map_t map, const user_addr_t addr)
139 {
140 user_addr_t new_addr = 0;
141 user_addr_t kaddr = addr | BIT(55);
142 user_addr_t uaddr = addr & (~BIT(55));
143
144 /*
145 * new_addr = is_kernel_to_kernel_copy(...) ? kaddr : uaddr
146 *
147 * The check must be performed explicitly as the compiler lowering of the
148 * actual call may be subject to prediction.
149 */
150 SPECULATION_GUARD_SELECT_XXX(
151 /* out */ new_addr,
152 /* cmp_1 */ map->pmap, /* cmp_2 */ kernel_pmap,
153 /* cc */ "eq", /* sel_1 */ kaddr,
154 /* n_cc */ "ne", /* sel_2 */ uaddr);
155
156 /*
157 * Since we're modifying the address past the validation point, let's be
158 * sure we didn't erroneously change address spaces.
159 *
160 * We have to be careful to hide this check from the optimizer as if it
161 * learns that new_addr == addr, then it is free to (and, indeed, does) use
162 * addr everywhere that new_addr is referenced, which breaks our hardening.
163 */
164 user_addr_t new_addr_opt_hidden = new_addr;
165 __compiler_materialize_and_prevent_reordering_on(new_addr_opt_hidden);
166 if (new_addr_opt_hidden != addr) {
167 panic("copy_ensure_address_space_spec changed address: 0x%llx->0x%llx",
168 addr, new_addr);
169 }
170
171 return new_addr;
172 }
173
174 static int
copy_validate_user_addr(vm_map_t map,const user_addr_t user_addr,vm_size_t nbytes)175 copy_validate_user_addr(vm_map_t map, const user_addr_t user_addr, vm_size_t nbytes)
176 {
177 user_addr_t canonicalized_user_addr = user_addr;
178 user_addr_t user_addr_last;
179 bool is_kernel_to_kernel = is_kernel_to_kernel_copy(map->pmap);
180
181 if (!is_kernel_to_kernel) {
182 }
183
184 if (__improbable(canonicalized_user_addr < vm_map_min(map) ||
185 os_add_overflow(canonicalized_user_addr, nbytes, &user_addr_last) ||
186 user_addr_last > vm_map_max(map))) {
187 return EFAULT;
188 }
189
190
191 if (!is_kernel_to_kernel) {
192 if (__improbable(canonicalized_user_addr & ARM_TBI_USER_MASK)) {
193 return EINVAL;
194 }
195 }
196
197 return 0;
198 }
199
200 static void
copy_validate_kernel_addr(uintptr_t kernel_addr,vm_size_t nbytes)201 copy_validate_kernel_addr(uintptr_t kernel_addr, vm_size_t nbytes)
202 {
203 uintptr_t kernel_addr_last;
204
205 if (__improbable(os_add_overflow(kernel_addr, nbytes, &kernel_addr_last))) {
206 panic("%s(%p, %lu) - kaddr not in kernel", __func__,
207 (void *)kernel_addr, nbytes);
208 }
209
210 bool in_kva = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= VM_MIN_KERNEL_ADDRESS) &&
211 (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= VM_MAX_KERNEL_ADDRESS);
212 bool in_physmap = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= physmap_base) &&
213 (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= physmap_end);
214
215 if (__improbable(!(in_kva || in_physmap))) {
216 panic("%s(%p, %lu) - kaddr not in kernel", __func__,
217 (void *)kernel_addr, nbytes);
218 }
219
220 zone_element_bounds_check(kernel_addr, nbytes);
221 }
222
223 /*
224 * Validate the arguments to copy{in,out} on this platform.
225 *
226 * Returns EXDEV when the current thread pmap is the kernel's
227 * which is non fatal for certain routines.
228 */
229 static inline __attribute__((always_inline)) int
copy_validate(vm_map_t map,const user_addr_t user_addr,uintptr_t kernel_addr,vm_size_t nbytes,copyio_flags_t flags)230 copy_validate(vm_map_t map, const user_addr_t user_addr, uintptr_t kernel_addr,
231 vm_size_t nbytes, copyio_flags_t flags)
232 {
233 int ret;
234
235 if (__improbable(nbytes > copysize_limit_panic)) {
236 return EINVAL;
237 }
238
239 ret = copy_validate_user_addr(map, user_addr, nbytes);
240 if (__improbable(ret)) {
241 return ret;
242 }
243
244 if (flags & COPYIO_ATOMIC) {
245 if (__improbable(user_addr & (nbytes - 1))) {
246 return EINVAL;
247 }
248 }
249
250 if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
251 copy_validate_kernel_addr(kernel_addr, nbytes);
252 #if KASAN
253 /* For user copies, asan-check the kernel-side buffer */
254 if (flags & COPYIO_IN) {
255 __asan_storeN(kernel_addr, nbytes);
256 } else {
257 __asan_loadN(kernel_addr, nbytes);
258 }
259 #endif
260 }
261
262 if (is_kernel_to_kernel_copy(map->pmap)) {
263 if (__improbable((flags & COPYIO_ALLOW_KERNEL_TO_KERNEL) == 0)) {
264 return EFAULT;
265 }
266 return EXDEV;
267 }
268
269 return 0;
270 }
271
272 int
copyin_kern(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes)273 copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
274 {
275 bcopy((const char*)(uintptr_t)user_addr, kernel_addr, nbytes);
276
277 return 0;
278 }
279
280 int
copyout_kern(const char * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)281 copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
282 {
283 bcopy(kernel_addr, (char *)(uintptr_t)user_addr, nbytes);
284
285 return 0;
286 }
287
288 int
copyin(const user_addr_t user_addr,void * kernel_addr,vm_size_t nbytes)289 copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes)
290 {
291 vm_map_t map = current_thread()->map;
292 user_addr_t guarded_user_addr;
293 pmap_t pmap = map->pmap;
294 int result;
295
296 if (__improbable(nbytes == 0)) {
297 return 0;
298 }
299
300 result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, nbytes,
301 COPYIO_IN | COPYIO_ALLOW_KERNEL_TO_KERNEL);
302 if (result == EXDEV) {
303 guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
304 return copyin_kern(guarded_user_addr, kernel_addr, nbytes);
305 }
306 if (__improbable(result)) {
307 return result;
308 }
309
310 guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
311 user_access_enable(USER_ACCESS_READ, pmap);
312 result = _bcopyin((const char *)guarded_user_addr, kernel_addr, nbytes);
313 user_access_disable(USER_ACCESS_READ, pmap);
314 return result;
315 }
316
317 /*
318 * copy{in,out}_atomic{32,64}
319 * Read or store an aligned value from userspace as a single memory transaction.
320 * These functions support userspace synchronization features
321 */
322 int
copyin_atomic32(const user_addr_t user_addr,uint32_t * kernel_addr)323 copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
324 {
325 vm_map_t map = current_thread()->map;
326 pmap_t pmap = map->pmap;
327 int result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, 4,
328 COPYIO_IN | COPYIO_ATOMIC);
329 if (__improbable(result)) {
330 return result;
331 }
332
333 user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
334 user_access_enable(USER_ACCESS_READ, pmap);
335 result = _copyin_atomic32((const char *)guarded_user_addr, kernel_addr);
336 user_access_disable(USER_ACCESS_READ, pmap);
337 return result;
338 }
339
340 int
copyin_atomic32_wait_if_equals(const user_addr_t user_addr,uint32_t value)341 copyin_atomic32_wait_if_equals(const user_addr_t user_addr, uint32_t value)
342 {
343 vm_map_t map = current_thread()->map;
344 pmap_t pmap = map->pmap;
345 int result = copy_validate(map, user_addr, 0, 4,
346 COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
347 if (__improbable(result)) {
348 return result;
349 }
350
351 user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
352 user_access_enable(USER_ACCESS_READ, pmap);
353 result = _copyin_atomic32_wait_if_equals((const char *)guarded_user_addr, value);
354 user_access_disable(USER_ACCESS_READ, pmap);
355 return result;
356 }
357
358 int
copyin_atomic64(const user_addr_t user_addr,uint64_t * kernel_addr)359 copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
360 {
361 vm_map_t map = current_thread()->map;
362 pmap_t pmap = map->pmap;
363 int result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, 8,
364 COPYIO_IN | COPYIO_ATOMIC);
365 if (__improbable(result)) {
366 return result;
367 }
368
369 user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
370 user_access_enable(USER_ACCESS_READ, pmap);
371 result = _copyin_atomic64((const char *)guarded_user_addr, kernel_addr);
372 user_access_disable(USER_ACCESS_READ, pmap);
373 return result;
374 }
375
376 int
copyout_atomic32(uint32_t value,user_addr_t user_addr)377 copyout_atomic32(uint32_t value, user_addr_t user_addr)
378 {
379 vm_map_t map = current_thread()->map;
380 pmap_t pmap = map->pmap;
381 int result = copy_validate(map, user_addr, 0, 4,
382 COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
383 if (__improbable(result)) {
384 return result;
385 }
386
387 user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
388 user_access_enable(USER_ACCESS_WRITE, pmap);
389 result = _copyout_atomic32(value, (const char *)guarded_user_addr);
390 user_access_disable(USER_ACCESS_WRITE, pmap);
391 return result;
392 }
393
394 int
copyout_atomic64(uint64_t value,user_addr_t user_addr)395 copyout_atomic64(uint64_t value, user_addr_t user_addr)
396 {
397 vm_map_t map = current_thread()->map;
398 pmap_t pmap = map->pmap;
399 int result = copy_validate(map, user_addr, 0, 8,
400 COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
401 if (__improbable(result)) {
402 return result;
403 }
404
405 user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
406 user_access_enable(USER_ACCESS_WRITE, pmap);
407 result = _copyout_atomic64(value, (const char *)guarded_user_addr);
408 user_access_disable(USER_ACCESS_WRITE, pmap);
409 return result;
410 }
411
412 int
copyinstr(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes,vm_size_t * lencopied)413 copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
414 {
415 vm_map_t map = current_thread()->map;
416 pmap_t pmap = map->pmap;
417 int result;
418 vm_size_t bytes_copied = 0;
419
420 *lencopied = 0;
421 if (__improbable(nbytes == 0)) {
422 return ENAMETOOLONG;
423 }
424
425 result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, nbytes, COPYIO_IN);
426 if (__improbable(result)) {
427 return result;
428 }
429
430 user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
431 user_access_enable(USER_ACCESS_READ, pmap);
432 result = _bcopyinstr((const char *)guarded_user_addr, kernel_addr, nbytes,
433 &bytes_copied);
434 user_access_disable(USER_ACCESS_READ, pmap);
435 if (result != EFAULT) {
436 *lencopied = bytes_copied;
437 }
438 return result;
439 }
440
441 int
copyout(const void * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)442 copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
443 {
444 vm_map_t map = current_thread()->map;
445 pmap_t pmap = map->pmap;
446 int result;
447 user_addr_t guarded_user_addr;
448
449 if (nbytes == 0) {
450 return 0;
451 }
452
453 result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, nbytes,
454 COPYIO_OUT | COPYIO_ALLOW_KERNEL_TO_KERNEL);
455 if (result == EXDEV) {
456 guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
457 return copyout_kern(kernel_addr, guarded_user_addr, nbytes);
458 }
459 if (__improbable(result)) {
460 return result;
461 }
462
463 guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
464 user_access_enable(USER_ACCESS_WRITE, pmap);
465 result = _bcopyout(kernel_addr, (char *)guarded_user_addr, nbytes);
466 user_access_disable(USER_ACCESS_WRITE, pmap);
467 return result;
468 }
469
470 int
copyoutstr_prevalidate(const void * __unused kaddr,user_addr_t __unused uaddr,size_t __unused len)471 copyoutstr_prevalidate(const void *__unused kaddr, user_addr_t __unused uaddr, size_t __unused len)
472 {
473 vm_map_t map = current_thread()->map;
474
475 if (__improbable(is_kernel_to_kernel_copy(map->pmap))) {
476 return EFAULT;
477 }
478
479 return 0;
480 }
481
482 #if (DEBUG || DEVELOPMENT)
483 int
verify_write(const void * source,void * dst,size_t size)484 verify_write(const void *source, void *dst, size_t size)
485 {
486 int rc;
487 disable_preemption();
488 rc = _bcopyout((const char*)source, (char*)dst, size);
489 enable_preemption();
490 return rc;
491 }
492 #endif
493