1 /*
2 * Copyright (c) 2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * vm_sanitize_error_compat.h
31 * Error code rewriting functions to preserve historical error values.
32 */
33
34 #include <vm/vm_map_xnu.h>
35 #include <vm/vm_sanitize_internal.h>
36
37 /* Don't use errno values in this file. Everything here should be kern_return. */
38 #undef EINVAL
39 #define EINVAL DONT_USE_EINVAL
40 #undef EAGAIN
41 #define EAGAIN DONT_USE_EAGAIN
42 #undef EACCESS
43 #define EACCESS DONT_USE_EACCESS
44 #undef ENOMEM
45 #define ENOMEM DONT_USE_ENOMEM
46 #undef EPERM
47 #define EPERM DONT_USE_EPERM
48
49 /*
50 * KERN_SUCCESS is ambiguous here. Don't use it. Instead:
51 * VM_ERR_RETURN_NOW: "stop the calling function now and return success"
52 * VM_SANITIZE_FALLTHROUGH: "don't stop the calling function"
53 * These values are intended for vm_sanitize_get_kr().
54 */
55 #undef KERN_SUCCESS
56 #define KERN_SUCCESS DONT_USE_KERN_SUCCESS
57 #define VM_SANITIZE_FALLTHROUGH 0
58
59 /* Don't rewrite this result or telemeter anything. */
60 static inline __result_use_check
61 vm_sanitize_compat_rewrite_t
vm_sanitize_make_policy_dont_rewrite_err(kern_return_t err)62 vm_sanitize_make_policy_dont_rewrite_err(kern_return_t err)
63 {
64 return (vm_sanitize_compat_rewrite_t) {
65 .compat_kr = err,
66 .should_rewrite = false,
67 .should_telemeter = false
68 };
69 }
70
71 /*
72 * Telemeter this result. Don't rewrite it.
73 * compat_kr is advisory only: telemetry reports it as the value
74 * we might return in the future, but we don't use it now.
75 */
76 static inline __result_use_check
77 vm_sanitize_compat_rewrite_t
vm_sanitize_make_policy_telemeter_dont_rewrite_err(kern_return_t err)78 vm_sanitize_make_policy_telemeter_dont_rewrite_err(kern_return_t err)
79 {
80 return (vm_sanitize_compat_rewrite_t) {
81 .compat_kr = err,
82 .should_rewrite = false,
83 .should_telemeter = true
84 };
85 }
86
87 /* Rewrite and telemeter this result. */
88 static inline __result_use_check
89 vm_sanitize_compat_rewrite_t
vm_sanitize_make_policy_telemeter_and_rewrite_err(kern_return_t err)90 vm_sanitize_make_policy_telemeter_and_rewrite_err(kern_return_t err)
91 {
92 return (vm_sanitize_compat_rewrite_t) {
93 .compat_kr = err,
94 .should_rewrite = true,
95 .should_telemeter = true
96 };
97 }
98
99
100 /*
101 * Similar to vm_map_range_overflows()
102 * but size zero is not unconditionally allowed
103 */
104 static bool __unused
vm_sanitize_range_overflows_strict_zero(vm_address_t start,vm_size_t size,vm_offset_t pgmask)105 vm_sanitize_range_overflows_strict_zero(vm_address_t start, vm_size_t size, vm_offset_t pgmask)
106 {
107 vm_address_t sum;
108 if (__builtin_add_overflow(start, size, &sum)) {
109 return true;
110 }
111
112 vm_address_t aligned_start = vm_map_trunc_page_mask(start, pgmask);
113 vm_address_t aligned_end = vm_map_round_page_mask(start + size, pgmask);
114 if (aligned_end <= aligned_start) {
115 return true;
116 }
117
118 return false;
119 }
120
121 /*
122 * Similar to vm_map_range_overflows()
123 * including unconditional acceptance of zero
124 */
125 static bool __unused
vm_sanitize_range_overflows_allow_zero(vm_address_t start,vm_size_t size,vm_offset_t pgmask)126 vm_sanitize_range_overflows_allow_zero(vm_address_t start, vm_size_t size, vm_offset_t pgmask)
127 {
128 if (size == 0) {
129 return false;
130 }
131
132 vm_address_t sum;
133 if (__builtin_add_overflow(start, size, &sum)) {
134 return true;
135 }
136
137 vm_address_t aligned_start = vm_map_trunc_page_mask(start, pgmask);
138 vm_address_t aligned_end = vm_map_round_page_mask(start + size, pgmask);
139 if (aligned_end <= aligned_start) {
140 return true;
141 }
142
143 return false;
144 }
145
146
147 /*
148 * Error rewriting functions and the sanitization caller description
149 * for each VM API.
150 */
151
152 /* memory entry */
153
154 VM_SANITIZE_DEFINE_CALLER(MACH_MAKE_MEMORY_ENTRY);
155 VM_SANITIZE_DEFINE_CALLER(MACH_MEMORY_ENTRY_PAGE_OP);
156 VM_SANITIZE_DEFINE_CALLER(MACH_MEMORY_ENTRY_RANGE_OP);
157 VM_SANITIZE_DEFINE_CALLER(MACH_MEMORY_ENTRY_MAP_SIZE);
158 VM_SANITIZE_DEFINE_CALLER(MACH_MEMORY_OBJECT_MEMORY_ENTRY);
159
160 /* alloc/dealloc */
161
162 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_allocate_fixed(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask)163 vm_sanitize_err_compat_addr_size_vm_allocate_fixed(
164 kern_return_t initial_kr,
165 vm_address_t start,
166 vm_size_t size,
167 vm_offset_t pgmask)
168 {
169 /*
170 * vm_allocate(VM_FLAGS_FIXED) historically returned
171 * KERN_INVALID_ADDRESS instead of KERN_INVALID_ARGUMENT
172 * for some invalid input ranges.
173 */
174 if (vm_sanitize_range_overflows_allow_zero(start, size, pgmask) &&
175 vm_map_round_page_mask(size, pgmask) != 0) {
176 return vm_sanitize_make_policy_telemeter_and_rewrite_err(KERN_INVALID_ADDRESS);
177 }
178 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
179 }
180
181 VM_SANITIZE_DEFINE_CALLER(VM_ALLOCATE_FIXED,
182 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_allocate_fixed);
183
184 VM_SANITIZE_DEFINE_CALLER(VM_ALLOCATE_ANYWHERE, /* no error compat needed */);
185
186 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_deallocate(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask)187 vm_sanitize_err_compat_addr_size_vm_deallocate(
188 kern_return_t initial_kr,
189 vm_address_t start,
190 vm_size_t size,
191 vm_offset_t pgmask)
192 {
193 /*
194 * vm_deallocate historically did nothing and
195 * returned success for some invalid input ranges.
196 * We currently telemeter this case but
197 * return an error without rewriting it to success.
198 * If we did rewrite it, we would use VM_ERR_RETURN_NOW to return
199 * success immediately and bypass the rest of vm_deallocate.
200 */
201 if (vm_sanitize_range_overflows_strict_zero(start, size, pgmask) &&
202 start + size >= start) {
203 return vm_sanitize_make_policy_telemeter_dont_rewrite_err(VM_ERR_RETURN_NOW);
204 }
205 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
206 }
207
208 VM_SANITIZE_DEFINE_CALLER(VM_DEALLOCATE,
209 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_deallocate);
210
211 VM_SANITIZE_DEFINE_CALLER(MUNMAP, /* no error compat needed */);
212
213 /* map/remap */
214
215 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_cur_and_max_prots_vm_map(kern_return_t initial_kr,vm_prot_t * cur_prot_inout,vm_prot_t * max_prot_inout,vm_prot_t extra_mask __unused)216 vm_sanitize_err_compat_cur_and_max_prots_vm_map(
217 kern_return_t initial_kr,
218 vm_prot_t *cur_prot_inout,
219 vm_prot_t *max_prot_inout,
220 vm_prot_t extra_mask __unused)
221 {
222 /*
223 * Invalid but historically accepted for some APIs: cur and max
224 * each within limits, but max less permissive than cur.
225 * We telemeter this and rewrite away the error and allow
226 * the calling function to proceed after removing
227 * permissions from cur to make it match max.
228 *
229 * We assume the individual prot values are legal
230 * because they were checked individually first.
231 */
232 if (__improbable((*cur_prot_inout & *max_prot_inout) != *cur_prot_inout)) {
233 *cur_prot_inout &= *max_prot_inout;
234 return vm_sanitize_make_policy_telemeter_and_rewrite_err(VM_SANITIZE_FALLTHROUGH);
235 }
236 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
237 }
238
239 /*
240 * vm_remap and vm_remap_new do not need cur/max error compat.
241 * In all flavors either cur/max is an out parameter only
242 * or it has historically already rejected inconsistent cur/max.
243 */
244 VM_SANITIZE_DEFINE_CALLER(VM_MAP_REMAP);
245
246 /* mmap has new successes that we can't rewrite or telemeter */
247 VM_SANITIZE_DEFINE_CALLER(MMAP, /* no error compat needed */);
248
249 VM_SANITIZE_DEFINE_CALLER(MAP_WITH_LINKING_NP);
250
251 VM_SANITIZE_DEFINE_CALLER(MREMAP_ENCRYPTED, /* no error compat needed */);
252
253 /*
254 * vm_map does need cur/max compat
255 * compat for vm_map_enter_mem_object includes all vm_map flavors
256 */
257 VM_SANITIZE_DEFINE_CALLER(ENTER_MEM_OBJ,
258 .err_compat_prot_cur_max = &vm_sanitize_err_compat_cur_and_max_prots_vm_map);
259 VM_SANITIZE_DEFINE_CALLER(ENTER_MEM_OBJ_CTL,
260 .err_compat_prot_cur_max = &vm_sanitize_err_compat_cur_and_max_prots_vm_map);
261
262 /* wire/unwire */
263
264 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_wire_user(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask)265 vm_sanitize_err_compat_addr_size_vm_wire_user(
266 kern_return_t initial_kr,
267 vm_address_t start,
268 vm_size_t size,
269 vm_offset_t pgmask)
270 {
271 /*
272 * vm_wire historically did nothing and
273 * returned success for some invalid input ranges.
274 * We currently telemeter this case but
275 * return an error without rewriting it to success.
276 * If we did rewrite it, we would use VM_ERR_RETURN_NOW to return
277 * success immediately and bypass the rest of vm_wire.
278 */
279 if (vm_sanitize_range_overflows_strict_zero(start, size, pgmask) &&
280 start + size >= start) {
281 return vm_sanitize_make_policy_telemeter_dont_rewrite_err(VM_ERR_RETURN_NOW);
282 }
283 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
284 }
285
286 VM_SANITIZE_DEFINE_CALLER(VM_WIRE_USER,
287 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_wire_user);
288 VM_SANITIZE_DEFINE_CALLER(VM_UNWIRE_USER,
289 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_wire_user);
290
291 VM_SANITIZE_DEFINE_CALLER(VM_MAP_WIRE, /* no error compat needed */);
292 VM_SANITIZE_DEFINE_CALLER(VM_MAP_UNWIRE, /* no error compat needed */);
293
294 #if XNU_PLATFORM_MacOSX
295 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vslock(kern_return_t initial_kr __unused,vm_address_t start __unused,vm_size_t size __unused,vm_offset_t pgmask __unused)296 vm_sanitize_err_compat_addr_size_vslock(
297 kern_return_t initial_kr __unused,
298 vm_address_t start __unused,
299 vm_size_t size __unused,
300 vm_offset_t pgmask __unused)
301 {
302 /*
303 * vslock and vsunlock historically did nothing
304 * and returned success for every start/size value.
305 * We telemeter bogus values and early return success.
306 */
307 return vm_sanitize_make_policy_telemeter_and_rewrite_err(VM_ERR_RETURN_NOW);
308 }
309
310 VM_SANITIZE_DEFINE_CALLER(VSLOCK,
311 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vslock);
312 VM_SANITIZE_DEFINE_CALLER(VSUNLOCK,
313 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vslock);
314 #else /* XNU_PLATFORM_MacOSX */
315 VM_SANITIZE_DEFINE_CALLER(VSLOCK, /* no error compat needed */);
316 VM_SANITIZE_DEFINE_CALLER(VSUNLOCK, /* no error compat needed */);
317 #endif /* XNU_PLATFORM_MacOSX */
318
319 /* copyin/copyout */
320
321 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_map_copyio(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask)322 vm_sanitize_err_compat_addr_size_vm_map_copyio(
323 kern_return_t initial_kr,
324 vm_address_t start,
325 vm_size_t size,
326 vm_offset_t pgmask)
327 {
328 /*
329 * vm_map_copyin and vm_map_copyout (and functions based on them)
330 * historically returned KERN_INVALID_ADDRESS
331 * instead of KERN_INVALID_ARGUMENT.
332 */
333 if (vm_sanitize_range_overflows_allow_zero(start, size, pgmask) &&
334 initial_kr == KERN_INVALID_ARGUMENT) {
335 return vm_sanitize_make_policy_telemeter_and_rewrite_err(KERN_INVALID_ADDRESS);
336 }
337
338 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
339 }
340
341 VM_SANITIZE_DEFINE_CALLER(VM_MAP_COPY_OVERWRITE,
342 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_map_copyio);
343 VM_SANITIZE_DEFINE_CALLER(VM_MAP_COPYIN,
344 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_map_copyio);
345 VM_SANITIZE_DEFINE_CALLER(VM_MAP_READ_USER,
346 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_map_copyio);
347 VM_SANITIZE_DEFINE_CALLER(VM_MAP_WRITE_USER,
348 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_map_copyio);
349
350 /* inherit */
351
352 /* protect */
353
354 /* behavior */
355
356 /* msync */
357
358 /* machine attribute */
359
360 /* page info */
361
362 /* test */
363 VM_SANITIZE_DEFINE_CALLER(TEST);
364