xref: /xnu-12377.61.12/osfmk/vm/vm_far.h (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #pragma once
30 #ifdef KERNEL_PRIVATE
31 #include <stdint.h>
32 #include <stddef.h>
33 #include <sys/cdefs.h>
34 #include <stdbool.h>
35 #include <kern/panic_call.h>
36 
37 #ifdef __arm64__
38 #include <arm64/speculation.h>
39 #endif /* __arm64__ */
40 
41 /*
42  * The VM_FAR poison is set in a pointer's top 16 bits when its offset exceeds
43  * the VM_FAR bounds.
44  */
45 #define VM_FAR_POISON_VALUE (0x2BADULL)
46 #define VM_FAR_POISON_SHIFT (48)
47 #define VM_FAR_POISON_MASK (0xFFFFULL << VM_FAR_POISON_SHIFT)
48 #define VM_FAR_POISON_BITS (VM_FAR_POISON_VALUE << VM_FAR_POISON_SHIFT)
49 
50 #define VM_FAR_ACCESSOR
51 
52 __pure2
53 __attribute__((always_inline))
54 static inline void *
vm_far_add_ptr_internal(void * ptr,uint64_t idx,size_t elem_size,bool __unused idx_small)55 vm_far_add_ptr_internal(void *ptr, uint64_t idx, size_t elem_size,
56     bool __unused idx_small)
57 {
58 
59 	uintptr_t ptr_i = (uintptr_t)(ptr);
60 	uintptr_t new_ptr_i = ptr_i + (idx * elem_size);
61 
62 #if HAS_MTE
63 	/*
64 	 * Since pointer math through integers doesn't get CPA, emulate it by hand.
65 	 * Like the compiler, however, we can elide the check when the tag won't
66 	 * overflow in a useful way (such as when the index is 32-bits or smaller)
67 	 */
68 	if (!__builtin_constant_p(idx_small) || !idx_small) {
69 		if (__improbable((ptr_i ^ new_ptr_i) & (0xFFC0000000000000ULL))) {
70 			/* Poison the top 16-bits in the same way the compiler does */
71 			new_ptr_i &= ~(0xFFFFULL << 48);
72 			new_ptr_i |= 0x0080ULL << 48;
73 		}
74 	}
75 #endif /* HAS_MTE */
76 
77 	return __unsafe_forge_single(void *, new_ptr_i);
78 }
79 
80 __attribute__((always_inline))
81 static inline void *
vm_far_add_ptr_bounded_fatal_unsigned_internal(void * ptr,uint64_t idx,size_t count,size_t elem_size,bool __unused idx_small)82 vm_far_add_ptr_bounded_fatal_unsigned_internal(void *ptr, uint64_t idx,
83     size_t count, size_t elem_size, bool __unused idx_small)
84 {
85 	void *__single new_ptr = vm_far_add_ptr_internal(
86 		ptr, idx, elem_size,
87 		/*
88 		 * Since we're bounds checking the index, we can support small index
89 		 * optimizations even when the index is large.
90 		 */
91 		/* idx_small */ false);
92 
93 	bool guarded_ptr_valid;
94 	void *__single guarded_ptr;
95 #if __arm64__
96 	/* Guard passes if idx < count */
97 	SPECULATION_GUARD_ZEROING_XXX(
98 		/* out */ guarded_ptr, /* out_valid */ guarded_ptr_valid,
99 		/* value */ new_ptr,
100 		/* cmp1 */ idx, /* cmp2 */ count,
101 		/* cc */ "LO");
102 #else
103 	/*
104 	 * We don't support guards on this target, so just perform a normal bounds
105 	 * check.
106 	 */
107 	guarded_ptr_valid = idx < count;
108 	guarded_ptr = new_ptr;
109 #endif /* __arm64__ */
110 
111 	if (__improbable(!guarded_ptr_valid)) {
112 		panic("vm_far bounds check failed idx=%llu/count=%zu", idx, count);
113 	}
114 
115 	return guarded_ptr;
116 }
117 
118 __pure2
119 __attribute__((always_inline))
120 static inline void *
vm_far_add_ptr_bounded_poison_unsigned_internal(void * ptr,uint64_t idx,size_t count,size_t elem_size,bool __unused idx_small)121 vm_far_add_ptr_bounded_poison_unsigned_internal(void *ptr, uint64_t idx,
122     size_t count, size_t elem_size, bool __unused idx_small)
123 {
124 	void *__single new_ptr = vm_far_add_ptr_internal(
125 		ptr, idx, elem_size,
126 		/*
127 		 * Since we're bounds checking the index, we can support small index
128 		 * optimizations even when the index is large.
129 		 */
130 		/* idx_small */ false);
131 
132 	void *__single guarded_ptr;
133 
134 	/*
135 	 * Poison the top 16-bits with a well-known code so that later dereferences
136 	 * of the poisoned pointer are easy to identify.
137 	 */
138 	uintptr_t poisoned_ptr_i = (uintptr_t)new_ptr;
139 	poisoned_ptr_i &= ~VM_FAR_POISON_MASK;
140 	poisoned_ptr_i |= VM_FAR_POISON_BITS;
141 
142 #if __arm64__
143 	SPECULATION_GUARD_SELECT_XXX(
144 		/* out  */ guarded_ptr,
145 		/* cmp1 */ idx, /* cmp2 */ count,
146 		/* cc   */ "LO", /* value_cc */ (uintptr_t)new_ptr,
147 		/* n_cc */ "HS", /* value_n_cc */ poisoned_ptr_i);
148 #else
149 	/*
150 	 * We don't support guards on this target, so just perform a normal bounds
151 	 * check.
152 	 */
153 	if (__probable(idx < count)) {
154 		guarded_ptr = new_ptr;
155 	} else {
156 		guarded_ptr = __unsafe_forge_single(void *, poisoned_ptr_i);
157 	}
158 #endif /* __arm64__ */
159 
160 	return guarded_ptr;
161 }
162 
163 /**
164  * Compute &PTR[IDX] without enforcing VM_FAR.
165  *
166  * In this variant, IDX will not be bounds checked.
167  */
168 #define VM_FAR_ADD_PTR_UNBOUNDED(ptr, idx) \
169 	((__typeof__((ptr))) vm_far_add_ptr_internal( \
170 	        (ptr), (idx), sizeof(__typeof__(*(ptr))), sizeof((idx)) <= 4))
171 
172 /**
173  * Compute &PTR[IDX] without enforcing VM_FAR.
174  *
175  * If the unsigned IDX value exceeds COUNT, trigger a panic.
176  */
177 #define VM_FAR_ADD_PTR_BOUNDED_FATAL_UNSIGNED(ptr, idx, count) \
178 	((__typeof__((ptr))) vm_far_add_ptr_bounded_fatal_unsigned_internal( \
179 	        (ptr), (idx), (count), sizeof(__typeof__(*(ptr))), \
180 	        sizeof((idx)) <= 4))
181 
182 /**
183  * Compute &PTR[IDX] without enforcing VM_FAR.
184  *
185  * If the unsigned IDX value exceeds COUNT, poison the pointer such that
186  * attempting to dereference it will fault.
187  */
188 #define VM_FAR_ADD_PTR_BOUNDED_POISON_UNSIGNED(ptr, idx, count) \
189 	((__typeof__((ptr))) vm_far_add_ptr_bounded_poison_unsigned_internal( \
190 	        (ptr), (idx), (count), sizeof(__typeof__(*(ptr))), \
191 	        sizeof((idx)) <= 4))
192 
193 #endif /* KERNEL_PRIVATE */
194