1 /*
2 * Copyright (c) 2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #pragma once
30 #ifdef KERNEL_PRIVATE
31 #include <stdint.h>
32 #include <stddef.h>
33 #include <sys/cdefs.h>
34 #include <stdbool.h>
35 #include <kern/panic_call.h>
36
37 #ifdef __arm64__
38 #include <arm64/speculation.h>
39 #endif /* __arm64__ */
40
41 /*
42 * The VM_FAR poison is set in a pointer's top 16 bits when its offset exceeds
43 * the VM_FAR bounds.
44 */
45 #define VM_FAR_POISON_VALUE (0x2BADULL)
46 #define VM_FAR_POISON_SHIFT (48)
47 #define VM_FAR_POISON_MASK (0xFFFFULL << VM_FAR_POISON_SHIFT)
48 #define VM_FAR_POISON_BITS (VM_FAR_POISON_VALUE << VM_FAR_POISON_SHIFT)
49
50 #define VM_FAR_ACCESSOR
51
52 __pure2
53 __attribute__((always_inline))
54 static inline void *
vm_far_add_ptr_internal(void * ptr,uint64_t idx,size_t elem_size,bool __unused idx_small)55 vm_far_add_ptr_internal(void *ptr, uint64_t idx, size_t elem_size,
56 bool __unused idx_small)
57 {
58
59 uintptr_t ptr_i = (uintptr_t)(ptr);
60 uintptr_t new_ptr_i = ptr_i + (idx * elem_size);
61
62
63 return __unsafe_forge_single(void *, new_ptr_i);
64 }
65
66 __attribute__((always_inline))
67 static inline void *
vm_far_add_ptr_bounded_fatal_unsigned_internal(void * ptr,uint64_t idx,size_t count,size_t elem_size,bool __unused idx_small)68 vm_far_add_ptr_bounded_fatal_unsigned_internal(void *ptr, uint64_t idx,
69 size_t count, size_t elem_size, bool __unused idx_small)
70 {
71 void *__single new_ptr = vm_far_add_ptr_internal(
72 ptr, idx, elem_size,
73 /*
74 * Since we're bounds checking the index, we can support small index
75 * optimizations even when the index is large.
76 */
77 /* idx_small */ false);
78
79 bool guarded_ptr_valid;
80 void *__single guarded_ptr;
81 #if __arm64__
82 /* Guard passes if idx < count */
83 SPECULATION_GUARD_ZEROING_XXX(
84 /* out */ guarded_ptr, /* out_valid */ guarded_ptr_valid,
85 /* value */ new_ptr,
86 /* cmp1 */ idx, /* cmp2 */ count,
87 /* cc */ "LO");
88 #else
89 /*
90 * We don't support guards on this target, so just perform a normal bounds
91 * check.
92 */
93 guarded_ptr_valid = idx < count;
94 guarded_ptr = new_ptr;
95 #endif /* __arm64__ */
96
97 if (__improbable(!guarded_ptr_valid)) {
98 panic("vm_far bounds check failed idx=%llu/count=%zu", idx, count);
99 }
100
101 return guarded_ptr;
102 }
103
104 __pure2
105 __attribute__((always_inline))
106 static inline void *
vm_far_add_ptr_bounded_poison_unsigned_internal(void * ptr,uint64_t idx,size_t count,size_t elem_size,bool __unused idx_small)107 vm_far_add_ptr_bounded_poison_unsigned_internal(void *ptr, uint64_t idx,
108 size_t count, size_t elem_size, bool __unused idx_small)
109 {
110 void *__single new_ptr = vm_far_add_ptr_internal(
111 ptr, idx, elem_size,
112 /*
113 * Since we're bounds checking the index, we can support small index
114 * optimizations even when the index is large.
115 */
116 /* idx_small */ false);
117
118 void *__single guarded_ptr;
119
120 /*
121 * Poison the top 16-bits with a well-known code so that later dereferences
122 * of the poisoned pointer are easy to identify.
123 */
124 uintptr_t poisoned_ptr_i = (uintptr_t)new_ptr;
125 poisoned_ptr_i &= ~VM_FAR_POISON_MASK;
126 poisoned_ptr_i |= VM_FAR_POISON_BITS;
127
128 #if __arm64__
129 SPECULATION_GUARD_SELECT_XXX(
130 /* out */ guarded_ptr,
131 /* cmp1 */ idx, /* cmp2 */ count,
132 /* cc */ "LO", /* value_cc */ (uintptr_t)new_ptr,
133 /* n_cc */ "HS", /* value_n_cc */ poisoned_ptr_i);
134 #else
135 /*
136 * We don't support guards on this target, so just perform a normal bounds
137 * check.
138 */
139 if (__probable(idx < count)) {
140 guarded_ptr = new_ptr;
141 } else {
142 guarded_ptr = __unsafe_forge_single(void *, poisoned_ptr_i);
143 }
144 #endif /* __arm64__ */
145
146 return guarded_ptr;
147 }
148
149 /**
150 * Compute &PTR[IDX] without enforcing VM_FAR.
151 *
152 * In this variant, IDX will not be bounds checked.
153 */
154 #define VM_FAR_ADD_PTR_UNBOUNDED(ptr, idx) \
155 ((__typeof__((ptr))) vm_far_add_ptr_internal( \
156 (ptr), (idx), sizeof(__typeof__(*(ptr))), sizeof((idx)) <= 4))
157
158 /**
159 * Compute &PTR[IDX] without enforcing VM_FAR.
160 *
161 * If the unsigned IDX value exceeds COUNT, trigger a panic.
162 */
163 #define VM_FAR_ADD_PTR_BOUNDED_FATAL_UNSIGNED(ptr, idx, count) \
164 ((__typeof__((ptr))) vm_far_add_ptr_bounded_fatal_unsigned_internal( \
165 (ptr), (idx), (count), sizeof(__typeof__(*(ptr))), \
166 sizeof((idx)) <= 4))
167
168 /**
169 * Compute &PTR[IDX] without enforcing VM_FAR.
170 *
171 * If the unsigned IDX value exceeds COUNT, poison the pointer such that
172 * attempting to dereference it will fault.
173 */
174 #define VM_FAR_ADD_PTR_BOUNDED_POISON_UNSIGNED(ptr, idx, count) \
175 ((__typeof__((ptr))) vm_far_add_ptr_bounded_poison_unsigned_internal( \
176 (ptr), (idx), (count), sizeof(__typeof__(*(ptr))), \
177 sizeof((idx)) <= 4))
178
179 #endif /* KERNEL_PRIVATE */
180