xref: /xnu-11417.140.69/osfmk/arm64/copyio.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2012-2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <arm/cpu_data_internal.h>
30 #include <arm/misc_protos.h>
31 #include <kern/thread.h>
32 #include <kern/zalloc_internal.h>
33 #include <sys/errno.h>
34 #include <vm/pmap.h>
35 #include <vm/vm_map_xnu.h>
36 #include <vm/vm_memtag.h>
37 #include <san/kasan.h>
38 #include <arm/pmap.h>
39 #include <arm64/speculation.h>
40 
41 #undef copyin
42 #undef copyout
43 
44 extern int _bcopyin(const user_addr_t src, char *dst, vm_size_t len);
45 extern int _bcopyinstr(const user_addr_t src, char *dst, vm_size_t max, vm_size_t *actual);
46 extern int _bcopyout(const char *src, user_addr_t dst, vm_size_t len);
47 extern int _copyin_atomic32(const user_addr_t src, uint32_t *dst);
48 extern int _copyin_atomic32_wait_if_equals(const user_addr_t src, uint32_t value);
49 extern int _copyin_atomic64(const user_addr_t src, uint64_t *dst);
50 extern int _copyout_atomic32(uint32_t u32, user_addr_t dst);
51 extern int _copyout_atomic64(uint64_t u64, user_addr_t dst);
52 
53 
54 extern int copyoutstr_prevalidate(const void *kaddr, user_addr_t uaddr, size_t len);
55 
56 extern const vm_map_address_t physmap_base;
57 extern const vm_map_address_t physmap_end;
58 
59 /*!
60  * @typedef copyio_flags_t
61  *
62  * @const COPYIO_IN
63  * The copy is user -> kernel.
64  * One of COPYIO_IN or COPYIO_OUT should always be specified.
65  *
66  * @const COPYIO_OUT
67  * The copy is kernel -> user
68  * One of COPYIO_IN or COPYIO_OUT should always be specified.
69  *
70  * @const COPYIO_ALLOW_KERNEL_TO_KERNEL
71  * The "user_address" is allowed to be in the VA space of the kernel.
72  *
73  * @const COPYIO_VALIDATE_USER_ONLY
74  * There isn't really a kernel address used, and only the user address
75  * needs to be validated.
76  *
77  * @const COPYIO_ATOMIC
78  * The copyio operation is atomic, ensure that it is properly aligned.
79  */
80 __options_decl(copyio_flags_t, uint32_t, {
81 	COPYIO_IN                       = 0x0001,
82 	COPYIO_OUT                      = 0x0002,
83 	COPYIO_ALLOW_KERNEL_TO_KERNEL   = 0x0004,
84 	COPYIO_VALIDATE_USER_ONLY       = 0x0008,
85 	COPYIO_ATOMIC                   = 0x0010,
86 });
87 
88 typedef enum {
89 	USER_ACCESS_READ,
90 	USER_ACCESS_WRITE
91 } user_access_direction_t;
92 
93 
94 static inline void
user_access_enable(__unused user_access_direction_t user_access_direction,pmap_t __unused pmap)95 user_access_enable(__unused user_access_direction_t user_access_direction, pmap_t __unused pmap)
96 {
97 #if __ARM_PAN_AVAILABLE__
98 	assert(__builtin_arm_rsr("pan") != 0);
99 	__builtin_arm_wsr("pan", 0);
100 #endif  /* __ARM_PAN_AVAILABLE__ */
101 
102 }
103 
104 static inline void
user_access_disable(__unused user_access_direction_t user_access_direction,pmap_t __unused pmap)105 user_access_disable(__unused user_access_direction_t user_access_direction, pmap_t __unused pmap)
106 {
107 #if __ARM_PAN_AVAILABLE__
108 	__builtin_arm_wsr("pan", 1);
109 #endif  /* __ARM_PAN_AVAILABLE__ */
110 
111 }
112 
113 
114 #define WRAP_COPYIO_PAN(_dir, _map, _op)                                    \
115 	({                                                                      \
116 	        int _ret;                                                       \
117 	        user_access_enable(_dir, (_map)->pmap);                         \
118 	        _ret = _op;                                                     \
119 	        user_access_disable(_dir, (_map)->pmap);                        \
120 	        _ret;                                                           \
121 	})
122 
123 #define WRAP_COPYIO(_dir, _map, _op) WRAP_COPYIO_PAN(_dir, _map, _op)
124 
125 /*
126  * Copy sizes bigger than this value will cause a kernel panic.
127  *
128  * Yes, this is an arbitrary fixed limit, but it's almost certainly
129  * a programming error to be copying more than this amount between
130  * user and wired kernel memory in a single invocation on this
131  * platform.
132  */
133 const int copysize_limit_panic = (64 * 1024 * 1024);
134 
135 static inline bool
is_kernel_to_kernel_copy(pmap_t pmap)136 is_kernel_to_kernel_copy(pmap_t pmap)
137 {
138 	return pmap == kernel_pmap;
139 }
140 
141 /**
142  * In order to prevent copies from speculatively targeting the wrong address
143  * space, force kernel-to-kernel copies to target the kernel address space
144  * (TTBR1) and non-kernel copies to target the user address space (TTBR0).
145  *
146  * This should have no non-speculative effect as any address which passes
147  * validation should already have bit 55 (the address space select bit) set
148  * appropriately. If the address would change (i.e. addr is invalid for the copy
149  * type), this function panics and so it must only be called after all other
150  * verification has completed.
151  */
152 static user_addr_t
copy_ensure_address_space_spec(vm_map_t map,const user_addr_t addr)153 copy_ensure_address_space_spec(vm_map_t map, const user_addr_t addr)
154 {
155 	user_addr_t new_addr = 0;
156 	user_addr_t kaddr = addr | BIT(55);
157 	user_addr_t uaddr = addr & (~BIT(55));
158 
159 	/*
160 	 * new_addr = is_kernel_to_kernel_copy(...) ? kaddr : uaddr
161 	 *
162 	 * The check must be performed explicitly as the compiler lowering of the
163 	 * actual call may be subject to prediction.
164 	 */
165 	SPECULATION_GUARD_SELECT_XXX(
166 		/* out */ new_addr,
167 		/* cmp_1  */ map->pmap, /* cmp_2 */ kernel_pmap,
168 		/* cc   */ "eq", /* sel_1 */ kaddr,
169 		/* n_cc */ "ne", /* sel_2 */ uaddr);
170 
171 	/*
172 	 * Since we're modifying the address past the validation point, let's be
173 	 * sure we didn't erroneously change address spaces.
174 	 *
175 	 * We have to be careful to hide this check from the optimizer as if it
176 	 * learns that new_addr == addr, then it is free to (and, indeed, does) use
177 	 * addr everywhere that new_addr is referenced, which breaks our hardening.
178 	 */
179 	user_addr_t new_addr_opt_hidden = new_addr;
180 	__compiler_materialize_and_prevent_reordering_on(new_addr_opt_hidden);
181 	if (new_addr_opt_hidden != addr) {
182 		panic("copy_ensure_address_space_spec changed address: 0x%llx->0x%llx",
183 		    addr, new_addr);
184 	}
185 
186 	return new_addr;
187 }
188 
189 static int
copy_validate_user_addr(vm_map_t map,const user_addr_t user_addr,vm_size_t nbytes)190 copy_validate_user_addr(vm_map_t map, const user_addr_t user_addr, vm_size_t nbytes)
191 {
192 	user_addr_t canonicalized_user_addr = user_addr;
193 	user_addr_t user_addr_last;
194 	bool is_kernel_to_kernel = is_kernel_to_kernel_copy(map->pmap);
195 
196 
197 	if (__improbable(canonicalized_user_addr < vm_map_min(map) ||
198 	    os_add_overflow(canonicalized_user_addr, nbytes, &user_addr_last) ||
199 	    user_addr_last > vm_map_max(map))) {
200 		return EFAULT;
201 	}
202 
203 
204 	if (!is_kernel_to_kernel) {
205 		if (__improbable(canonicalized_user_addr & ARM_TBI_USER_MASK)) {
206 			return EINVAL;
207 		}
208 	}
209 
210 	return 0;
211 }
212 
213 static void
copy_validate_kernel_addr(uintptr_t kernel_addr,vm_size_t nbytes)214 copy_validate_kernel_addr(uintptr_t kernel_addr, vm_size_t nbytes)
215 {
216 	uintptr_t kernel_addr_last;
217 
218 	if (__improbable(os_add_overflow(kernel_addr, nbytes, &kernel_addr_last))) {
219 		panic("%s(%p, %lu) - kaddr not in kernel", __func__,
220 		    (void *)kernel_addr, nbytes);
221 	}
222 
223 	bool in_kva = (VM_KERNEL_STRIP_PTR(kernel_addr) >= VM_MIN_KERNEL_ADDRESS) &&
224 	    (VM_KERNEL_STRIP_PTR(kernel_addr_last) <= VM_MAX_KERNEL_ADDRESS);
225 	bool in_physmap = (VM_KERNEL_STRIP_PTR(kernel_addr) >= physmap_base) &&
226 	    (VM_KERNEL_STRIP_PTR(kernel_addr_last) <= physmap_end);
227 
228 	if (__improbable(!(in_kva || in_physmap))) {
229 		panic("%s(%p, %lu) - kaddr not in kernel", __func__,
230 		    (void *)kernel_addr, nbytes);
231 	}
232 
233 	zone_element_bounds_check(kernel_addr, nbytes);
234 }
235 
236 /*
237  * Validate the arguments to copy{in,out} on this platform.
238  *
239  * Returns EXDEV when the current thread pmap is the kernel's
240  * which is non fatal for certain routines.
241  */
242 static inline __attribute__((always_inline)) int
copy_validate(vm_map_t map,const user_addr_t user_addr,uintptr_t kernel_addr,vm_size_t nbytes,copyio_flags_t flags)243 copy_validate(vm_map_t map, const user_addr_t user_addr, uintptr_t kernel_addr,
244     vm_size_t nbytes, copyio_flags_t flags)
245 {
246 	int ret;
247 
248 	if (__improbable(nbytes > copysize_limit_panic)) {
249 		return EINVAL;
250 	}
251 
252 	ret = copy_validate_user_addr(map, user_addr, nbytes);
253 	if (__improbable(ret)) {
254 		return ret;
255 	}
256 
257 	if (flags & COPYIO_ATOMIC) {
258 		if (__improbable(user_addr & (nbytes - 1))) {
259 			return EINVAL;
260 		}
261 	}
262 
263 	if ((flags & COPYIO_VALIDATE_USER_ONLY) == 0) {
264 		copy_validate_kernel_addr(kernel_addr, nbytes);
265 #if KASAN
266 		/* For user copies, asan-check the kernel-side buffer */
267 		if (flags & COPYIO_IN) {
268 			__asan_storeN(kernel_addr, nbytes);
269 		} else {
270 			__asan_loadN(kernel_addr, nbytes);
271 		}
272 #endif
273 	}
274 
275 	if (is_kernel_to_kernel_copy(map->pmap)) {
276 		if (__improbable((flags & COPYIO_ALLOW_KERNEL_TO_KERNEL) == 0)) {
277 			return EFAULT;
278 		}
279 		return EXDEV;
280 	}
281 
282 	return 0;
283 }
284 
285 int
copyin_kern(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes)286 copyin_kern(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes)
287 {
288 	bcopy((const char*)(uintptr_t)user_addr, kernel_addr, nbytes);
289 
290 	return 0;
291 }
292 
293 int
copyout_kern(const char * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)294 copyout_kern(const char *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
295 {
296 	bcopy(kernel_addr, (char *)(uintptr_t)user_addr, nbytes);
297 
298 	return 0;
299 }
300 
301 int
copyin(const user_addr_t user_addr,void * kernel_addr,vm_size_t nbytes)302 copyin(const user_addr_t user_addr, void *kernel_addr, vm_size_t nbytes)
303 {
304 	vm_map_t map = current_thread()->map;
305 	user_addr_t guarded_user_addr;
306 	int result;
307 
308 	if (__improbable(nbytes == 0)) {
309 		return 0;
310 	}
311 
312 	result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, nbytes,
313 	    COPYIO_IN | COPYIO_ALLOW_KERNEL_TO_KERNEL);
314 	if (result == EXDEV) {
315 		guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
316 		return copyin_kern(guarded_user_addr, kernel_addr, nbytes);
317 	}
318 	if (__improbable(result)) {
319 		return result;
320 	}
321 
322 	guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
323 
324 	return WRAP_COPYIO(USER_ACCESS_READ, map,
325 	           _bcopyin(guarded_user_addr, kernel_addr, nbytes));
326 }
327 
328 /*
329  * copy{in,out}_atomic{32,64}
330  * Read or store an aligned value from userspace as a single memory transaction.
331  * These functions support userspace synchronization features
332  */
333 int
copyin_atomic32(const user_addr_t user_addr,uint32_t * kernel_addr)334 copyin_atomic32(const user_addr_t user_addr, uint32_t *kernel_addr)
335 {
336 	vm_map_t map = current_thread()->map;
337 	int result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, 4,
338 	    COPYIO_IN | COPYIO_ATOMIC);
339 	if (__improbable(result)) {
340 		return result;
341 	}
342 
343 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
344 
345 	return WRAP_COPYIO(USER_ACCESS_READ, map,
346 	           _copyin_atomic32(guarded_user_addr, kernel_addr));
347 }
348 
349 
350 int
copyin_atomic32_wait_if_equals(const user_addr_t user_addr,uint32_t value)351 copyin_atomic32_wait_if_equals(const user_addr_t user_addr, uint32_t value)
352 {
353 	vm_map_t map = current_thread()->map;
354 	int result = copy_validate(map, user_addr, 0, 4,
355 	    COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
356 	if (__improbable(result)) {
357 		return result;
358 	}
359 
360 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
361 
362 	return WRAP_COPYIO(USER_ACCESS_READ, map,
363 	           _copyin_atomic32_wait_if_equals(guarded_user_addr, value));
364 }
365 
366 int
copyin_atomic64(const user_addr_t user_addr,uint64_t * kernel_addr)367 copyin_atomic64(const user_addr_t user_addr, uint64_t *kernel_addr)
368 {
369 	vm_map_t map = current_thread()->map;
370 	int result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, 8,
371 	    COPYIO_IN | COPYIO_ATOMIC);
372 	if (__improbable(result)) {
373 		return result;
374 	}
375 
376 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
377 
378 	return WRAP_COPYIO(USER_ACCESS_READ, map,
379 	           _copyin_atomic64(guarded_user_addr, kernel_addr));
380 }
381 
382 int
copyout_atomic32(uint32_t value,user_addr_t user_addr)383 copyout_atomic32(uint32_t value, user_addr_t user_addr)
384 {
385 	vm_map_t map = current_thread()->map;
386 	int result = copy_validate(map, user_addr, 0, 4,
387 	    COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
388 	if (__improbable(result)) {
389 		return result;
390 	}
391 
392 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
393 
394 	return WRAP_COPYIO(USER_ACCESS_WRITE, map,
395 	           _copyout_atomic32(value, guarded_user_addr));
396 }
397 
398 int
copyout_atomic64(uint64_t value,user_addr_t user_addr)399 copyout_atomic64(uint64_t value, user_addr_t user_addr)
400 {
401 	vm_map_t map = current_thread()->map;
402 	int result = copy_validate(map, user_addr, 0, 8,
403 	    COPYIO_OUT | COPYIO_ATOMIC | COPYIO_VALIDATE_USER_ONLY);
404 	if (__improbable(result)) {
405 		return result;
406 	}
407 
408 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
409 
410 	return WRAP_COPYIO(USER_ACCESS_WRITE, map,
411 	           _copyout_atomic64(value, guarded_user_addr));
412 }
413 
414 int
copyinstr(const user_addr_t user_addr,char * kernel_addr,vm_size_t nbytes,vm_size_t * lencopied)415 copyinstr(const user_addr_t user_addr, char *kernel_addr, vm_size_t nbytes, vm_size_t *lencopied)
416 {
417 	vm_map_t map = current_thread()->map;
418 	int result;
419 	vm_size_t bytes_copied = 0;
420 
421 	*lencopied = 0;
422 	if (__improbable(nbytes == 0)) {
423 		return ENAMETOOLONG;
424 	}
425 
426 	result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, nbytes, COPYIO_IN);
427 	if (__improbable(result)) {
428 		return result;
429 	}
430 
431 	user_addr_t guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
432 
433 	result = WRAP_COPYIO(USER_ACCESS_READ, map,
434 	    _bcopyinstr(guarded_user_addr, kernel_addr, nbytes, &bytes_copied));
435 
436 	if (result != EFAULT) {
437 		*lencopied = bytes_copied;
438 	}
439 	return result;
440 }
441 
442 int
copyout(const void * kernel_addr,user_addr_t user_addr,vm_size_t nbytes)443 copyout(const void *kernel_addr, user_addr_t user_addr, vm_size_t nbytes)
444 {
445 	vm_map_t map = current_thread()->map;
446 	int result;
447 	user_addr_t guarded_user_addr;
448 
449 	if (nbytes == 0) {
450 		return 0;
451 	}
452 
453 	result = copy_validate(map, user_addr, (uintptr_t)kernel_addr, nbytes,
454 	    COPYIO_OUT | COPYIO_ALLOW_KERNEL_TO_KERNEL);
455 	if (result == EXDEV) {
456 		guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
457 		return copyout_kern(kernel_addr, guarded_user_addr, nbytes);
458 	}
459 	if (__improbable(result)) {
460 		return result;
461 	}
462 
463 	guarded_user_addr = copy_ensure_address_space_spec(map, user_addr);
464 
465 	return WRAP_COPYIO(USER_ACCESS_WRITE, map,
466 	           _bcopyout(kernel_addr, guarded_user_addr, nbytes));
467 }
468 
469 int
copyoutstr_prevalidate(const void * __unused kaddr,user_addr_t __unused uaddr,size_t __unused len)470 copyoutstr_prevalidate(const void *__unused kaddr, user_addr_t __unused uaddr, size_t __unused len)
471 {
472 	vm_map_t map = current_thread()->map;
473 
474 	if (__improbable(is_kernel_to_kernel_copy(map->pmap))) {
475 		return EFAULT;
476 	}
477 
478 	return 0;
479 }
480 
481 #if (DEBUG || DEVELOPMENT)
482 int
verify_write(const void * source,void * dst,size_t size)483 verify_write(const void *source, void *dst, size_t size)
484 {
485 	int rc;
486 	disable_preemption();
487 	rc = _bcopyout((const char*)source, (user_addr_t)dst, size);
488 	enable_preemption();
489 	return rc;
490 }
491 #endif
492