1 /*
2 * Copyright (c) 2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * vm_sanitize_error_compat.h
31 * Error code rewriting functions to preserve historical error values.
32 */
33
34 /* avoid includes here; we want these pragmas to also affect included inline functions */
35 /* Disabling optimizations makes it impossible to optimize out UBSan checks */
36 #if !__OPTIMIZE__
37 #pragma clang attribute push (__attribute__((no_sanitize("undefined", \
38 "integer", "unsigned-shift-base", "nullability", "bounds"))), apply_to=function)
39 #endif
40
41 #include <vm/vm_map_xnu.h>
42 #include <vm/vm_sanitize_internal.h>
43
44 /* Don't use errno values in this file. Everything here should be kern_return. */
45 #undef EINVAL
46 #define EINVAL DONT_USE_EINVAL
47 #undef EAGAIN
48 #define EAGAIN DONT_USE_EAGAIN
49 #undef EACCESS
50 #define EACCESS DONT_USE_EACCESS
51 #undef ENOMEM
52 #define ENOMEM DONT_USE_ENOMEM
53 #undef EPERM
54 #define EPERM DONT_USE_EPERM
55
56 /*
57 * KERN_SUCCESS is ambiguous here. Don't use it. Instead:
58 * VM_ERR_RETURN_NOW: "stop the calling function now and return success"
59 * VM_SANITIZE_FALLTHROUGH: "don't stop the calling function"
60 * These values are intended for vm_sanitize_get_kr().
61 */
62 #undef KERN_SUCCESS
63 #define KERN_SUCCESS DONT_USE_KERN_SUCCESS
64 #define VM_SANITIZE_FALLTHROUGH 0
65
66 /*
67 * For compatibility reasons, vm_sanitize_err_compat functions deliberately
68 * contain the same checks as the old source code did, which frequently used
69 * unsigned overflow intentionally. Since we want to replicate the same behavior
70 * here, we don't want UBSan to generate checks for these functions.
71 */
72 #define NO_SANITIZE_UNSIGNED_OVERFLOW __attribute__((no_sanitize("unsigned-integer-overflow")))
73
74 /* Don't rewrite this result or telemeter anything. */
75 static inline __result_use_check
76 vm_sanitize_compat_rewrite_t
vm_sanitize_make_policy_dont_rewrite_err(kern_return_t err)77 vm_sanitize_make_policy_dont_rewrite_err(kern_return_t err)
78 {
79 return (vm_sanitize_compat_rewrite_t) {
80 .compat_kr = err,
81 .should_rewrite = false,
82 .should_telemeter = false
83 };
84 }
85
86 /*
87 * Telemeter this result. Don't rewrite it.
88 * compat_kr is advisory only: telemetry reports it as the value
89 * we might return in the future, but we don't use it now.
90 */
91 static inline __result_use_check
92 vm_sanitize_compat_rewrite_t
vm_sanitize_make_policy_telemeter_dont_rewrite_err(kern_return_t err)93 vm_sanitize_make_policy_telemeter_dont_rewrite_err(kern_return_t err)
94 {
95 return (vm_sanitize_compat_rewrite_t) {
96 .compat_kr = err,
97 .should_rewrite = false,
98 .should_telemeter = true
99 };
100 }
101
102 /* Rewrite and telemeter this result. */
103 static inline __result_use_check
104 vm_sanitize_compat_rewrite_t
vm_sanitize_make_policy_telemeter_and_rewrite_err(kern_return_t err)105 vm_sanitize_make_policy_telemeter_and_rewrite_err(kern_return_t err)
106 {
107 return (vm_sanitize_compat_rewrite_t) {
108 .compat_kr = err,
109 .should_rewrite = true,
110 .should_telemeter = true
111 };
112 }
113
114
115 /*
116 * Similar to vm_map_range_overflows()
117 * but size zero is not unconditionally allowed
118 */
119 static bool __unused
vm_sanitize_range_overflows_strict_zero(vm_address_t start,vm_size_t size,vm_offset_t pgmask)120 vm_sanitize_range_overflows_strict_zero(vm_address_t start, vm_size_t size, vm_offset_t pgmask)
121 {
122 vm_address_t sum;
123 if (__builtin_add_overflow(start, size, &sum)) {
124 return true;
125 }
126
127 vm_address_t aligned_start = vm_map_trunc_page_mask(start, pgmask);
128 vm_address_t aligned_end = vm_map_round_page_mask(start + size, pgmask);
129 if (aligned_end <= aligned_start) {
130 return true;
131 }
132
133 return false;
134 }
135
136 /*
137 * Similar to vm_map_range_overflows()
138 * including unconditional acceptance of zero
139 */
140 static bool __unused
vm_sanitize_range_overflows_allow_zero(vm_address_t start,vm_size_t size,vm_offset_t pgmask)141 vm_sanitize_range_overflows_allow_zero(vm_address_t start, vm_size_t size, vm_offset_t pgmask)
142 {
143 if (size == 0) {
144 return false;
145 }
146
147 vm_address_t sum;
148 if (__builtin_add_overflow(start, size, &sum)) {
149 return true;
150 }
151
152 vm_address_t aligned_start = vm_map_trunc_page_mask(start, pgmask);
153 vm_address_t aligned_end = vm_map_round_page_mask(start + size, pgmask);
154 if (aligned_end <= aligned_start) {
155 return true;
156 }
157
158 return false;
159 }
160
161
162 /*
163 * Error rewriting functions and the sanitization caller description
164 * for each VM API.
165 */
166
167 /* memory entry */
168
169 VM_SANITIZE_DEFINE_CALLER(MACH_MAKE_MEMORY_ENTRY);
170 VM_SANITIZE_DEFINE_CALLER(MACH_MEMORY_ENTRY_PAGE_OP);
171 VM_SANITIZE_DEFINE_CALLER(MACH_MEMORY_ENTRY_RANGE_OP);
172 VM_SANITIZE_DEFINE_CALLER(MACH_MEMORY_ENTRY_MAP_SIZE);
173 VM_SANITIZE_DEFINE_CALLER(MACH_MEMORY_OBJECT_MEMORY_ENTRY);
174
175 /* alloc/dealloc */
176
177 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_allocate_fixed(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask,vm_map_t map_or_null __unused)178 vm_sanitize_err_compat_addr_size_vm_allocate_fixed(
179 kern_return_t initial_kr,
180 vm_address_t start,
181 vm_size_t size,
182 vm_offset_t pgmask,
183 vm_map_t map_or_null __unused)
184 {
185 /*
186 * vm_allocate(VM_FLAGS_FIXED) historically returned
187 * KERN_INVALID_ADDRESS instead of KERN_INVALID_ARGUMENT
188 * for some invalid input ranges.
189 */
190 if (vm_sanitize_range_overflows_allow_zero(start, size, pgmask) &&
191 vm_map_round_page_mask(size, pgmask) != 0) {
192 return vm_sanitize_make_policy_telemeter_and_rewrite_err(KERN_INVALID_ADDRESS);
193 }
194 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
195 }
196
197 VM_SANITIZE_DEFINE_CALLER(VM_ALLOCATE_FIXED,
198 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_allocate_fixed);
199
200 VM_SANITIZE_DEFINE_CALLER(VM_ALLOCATE_ANYWHERE, /* no error compat needed */);
201
202 NO_SANITIZE_UNSIGNED_OVERFLOW
203 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_deallocate(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask,vm_map_t map_or_null __unused)204 vm_sanitize_err_compat_addr_size_vm_deallocate(
205 kern_return_t initial_kr,
206 vm_address_t start,
207 vm_size_t size,
208 vm_offset_t pgmask,
209 vm_map_t map_or_null __unused)
210 {
211 /*
212 * vm_deallocate historically did nothing and
213 * returned success for some invalid input ranges.
214 * We currently telemeter this case but
215 * return an error without rewriting it to success.
216 * If we did rewrite it, we would use VM_ERR_RETURN_NOW to return
217 * success immediately and bypass the rest of vm_deallocate.
218 */
219 if (vm_sanitize_range_overflows_strict_zero(start, size, pgmask) &&
220 start + size >= start) {
221 return vm_sanitize_make_policy_telemeter_dont_rewrite_err(VM_ERR_RETURN_NOW);
222 }
223 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
224 }
225
226 VM_SANITIZE_DEFINE_CALLER(VM_DEALLOCATE,
227 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_deallocate);
228
229 VM_SANITIZE_DEFINE_CALLER(MUNMAP, /* no error compat needed */);
230
231 /* map/remap */
232
233 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_cur_and_max_prots_vm_map(kern_return_t initial_kr,vm_prot_t * cur_prot_inout,vm_prot_t * max_prot_inout,vm_prot_t extra_mask __unused)234 vm_sanitize_err_compat_cur_and_max_prots_vm_map(
235 kern_return_t initial_kr,
236 vm_prot_t *cur_prot_inout,
237 vm_prot_t *max_prot_inout,
238 vm_prot_t extra_mask __unused)
239 {
240 /*
241 * Invalid but historically accepted for some APIs: cur and max
242 * each within limits, but max less permissive than cur.
243 * We telemeter this and rewrite away the error and allow
244 * the calling function to proceed after removing
245 * permissions from cur to make it match max.
246 *
247 * We assume the individual prot values are legal
248 * because they were checked individually first.
249 */
250 if (__improbable((*cur_prot_inout & *max_prot_inout) != *cur_prot_inout)) {
251 *cur_prot_inout &= *max_prot_inout;
252 return vm_sanitize_make_policy_telemeter_and_rewrite_err(VM_SANITIZE_FALLTHROUGH);
253 }
254 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
255 }
256
257 /*
258 * vm_remap and vm_remap_new do not need cur/max error compat.
259 * In all flavors either cur/max is an out parameter only
260 * or it has historically already rejected inconsistent cur/max.
261 */
262 VM_SANITIZE_DEFINE_CALLER(VM_MAP_REMAP);
263
264 /* mmap has new successes that we can't rewrite or telemeter */
265 VM_SANITIZE_DEFINE_CALLER(MMAP, /* no error compat needed */);
266
267 VM_SANITIZE_DEFINE_CALLER(MAP_WITH_LINKING_NP);
268
269 VM_SANITIZE_DEFINE_CALLER(MREMAP_ENCRYPTED, /* no error compat needed */);
270
271 /*
272 * vm_map does need cur/max compat
273 * compat for vm_map_enter_mem_object includes all vm_map flavors
274 */
275 VM_SANITIZE_DEFINE_CALLER(ENTER_MEM_OBJ,
276 .err_compat_prot_cur_max = &vm_sanitize_err_compat_cur_and_max_prots_vm_map);
277 VM_SANITIZE_DEFINE_CALLER(ENTER_MEM_OBJ_CTL,
278 .err_compat_prot_cur_max = &vm_sanitize_err_compat_cur_and_max_prots_vm_map);
279
280 /* wire/unwire */
281
282 NO_SANITIZE_UNSIGNED_OVERFLOW
283 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_wire_user(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask,vm_map_t map_or_null __unused)284 vm_sanitize_err_compat_addr_size_vm_wire_user(
285 kern_return_t initial_kr,
286 vm_address_t start,
287 vm_size_t size,
288 vm_offset_t pgmask,
289 vm_map_t map_or_null __unused)
290 {
291 /*
292 * vm_wire historically did nothing and
293 * returned success for some invalid input ranges.
294 * We currently telemeter this case but
295 * return an error without rewriting it to success.
296 * If we did rewrite it, we would use VM_ERR_RETURN_NOW to return
297 * success immediately and bypass the rest of vm_wire.
298 */
299 if (vm_sanitize_range_overflows_strict_zero(start, size, pgmask) &&
300 start + size >= start) {
301 return vm_sanitize_make_policy_telemeter_dont_rewrite_err(VM_ERR_RETURN_NOW);
302 }
303 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
304 }
305
306 VM_SANITIZE_DEFINE_CALLER(VM_WIRE_USER,
307 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_wire_user);
308 VM_SANITIZE_DEFINE_CALLER(VM_UNWIRE_USER,
309 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_wire_user);
310
311 VM_SANITIZE_DEFINE_CALLER(VM_MAP_WIRE, /* no error compat needed */);
312 VM_SANITIZE_DEFINE_CALLER(VM_MAP_UNWIRE, /* no error compat needed */);
313
314 #if XNU_PLATFORM_MacOSX
315 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vslock(kern_return_t initial_kr __unused,vm_address_t start __unused,vm_size_t size __unused,vm_offset_t pgmask __unused,vm_map_t map_or_null __unused)316 vm_sanitize_err_compat_addr_size_vslock(
317 kern_return_t initial_kr __unused,
318 vm_address_t start __unused,
319 vm_size_t size __unused,
320 vm_offset_t pgmask __unused,
321 vm_map_t map_or_null __unused)
322 {
323 /*
324 * vslock and vsunlock historically did nothing
325 * and returned success for every start/size value.
326 * We telemeter unsanitary values and early-return success.
327 */
328 return vm_sanitize_make_policy_telemeter_and_rewrite_err(VM_ERR_RETURN_NOW);
329 }
330
331 VM_SANITIZE_DEFINE_CALLER(VSLOCK,
332 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vslock);
333 VM_SANITIZE_DEFINE_CALLER(VSUNLOCK,
334 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vslock);
335 #else /* XNU_PLATFORM_MacOSX */
336 VM_SANITIZE_DEFINE_CALLER(VSLOCK, /* no error compat needed */);
337 VM_SANITIZE_DEFINE_CALLER(VSUNLOCK, /* no error compat needed */);
338 #endif /* XNU_PLATFORM_MacOSX */
339
340 /* copyin/copyout */
341
342 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_map_copyio(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask,vm_map_t map_or_null __unused)343 vm_sanitize_err_compat_addr_size_vm_map_copyio(
344 kern_return_t initial_kr,
345 vm_address_t start,
346 vm_size_t size,
347 vm_offset_t pgmask,
348 vm_map_t map_or_null __unused)
349 {
350 /*
351 * vm_map_copyin and vm_map_copyout (and functions based on them)
352 * historically returned KERN_INVALID_ADDRESS
353 * instead of KERN_INVALID_ARGUMENT.
354 */
355 if (vm_sanitize_range_overflows_allow_zero(start, size, pgmask) &&
356 initial_kr == KERN_INVALID_ARGUMENT) {
357 return vm_sanitize_make_policy_telemeter_and_rewrite_err(KERN_INVALID_ADDRESS);
358 }
359
360 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
361 }
362
363 VM_SANITIZE_DEFINE_CALLER(VM_MAP_COPY_OVERWRITE,
364 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_map_copyio);
365 VM_SANITIZE_DEFINE_CALLER(VM_MAP_COPYIN,
366 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_map_copyio);
367 VM_SANITIZE_DEFINE_CALLER(VM_MAP_READ_USER,
368 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_map_copyio);
369 VM_SANITIZE_DEFINE_CALLER(VM_MAP_WRITE_USER,
370 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_map_copyio);
371
372 /* inherit */
373
374 NO_SANITIZE_UNSIGNED_OVERFLOW
375 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_map_inherit(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask,vm_map_t map_or_null __unused)376 vm_sanitize_err_compat_addr_size_vm_map_inherit(
377 kern_return_t initial_kr,
378 vm_address_t start,
379 vm_size_t size,
380 vm_offset_t pgmask,
381 vm_map_t map_or_null __unused)
382 {
383 /*
384 * vm_inherit historically did nothing and
385 * returned success for some invalid input ranges.
386 * We currently telemeter this case but
387 * return an error without rewriting it to success.
388 * If we did rewrite it, we would use VM_ERR_RETURN_NOW to return
389 * success immediately and bypass the rest of vm_inherit.
390 */
391 if (vm_sanitize_range_overflows_allow_zero(start, size, pgmask) &&
392 start + size >= start) {
393 return vm_sanitize_make_policy_telemeter_dont_rewrite_err(VM_ERR_RETURN_NOW);
394 }
395 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
396 }
397
398 VM_SANITIZE_DEFINE_CALLER(VM_MAP_INHERIT,
399 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_map_inherit);
400 VM_SANITIZE_DEFINE_CALLER(MINHERIT, /* no error compat needed */);
401
402 /* protect */
403
404 NO_SANITIZE_UNSIGNED_OVERFLOW
405 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_protect(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask,vm_map_t map_or_null __unused)406 vm_sanitize_err_compat_addr_size_vm_protect(
407 kern_return_t initial_kr,
408 vm_address_t start,
409 vm_size_t size,
410 vm_offset_t pgmask,
411 vm_map_t map_or_null __unused)
412 {
413 /*
414 * vm_protect historically returned KERN_INVALID_ADDRESS
415 * instead of KERN_INVALID_ARGUMENT for some invalid input ranges.
416 */
417 if (vm_sanitize_range_overflows_allow_zero(start, size, pgmask)) {
418 vm_address_t aligned_start = vm_map_trunc_page(start, pgmask);
419 vm_address_t aligned_end = vm_map_round_page(start + size, pgmask);
420 if (start + size < start) {
421 /* fall through - this was KERN_INVALID_ARGUMENT historically too */
422 } else if (vm_sanitize_range_overflows_allow_zero(aligned_start, aligned_end - aligned_start, pgmask)) {
423 /* fall through - this was KERN_INVALID_ARGUMENT historically too */
424 } else {
425 return vm_sanitize_make_policy_telemeter_and_rewrite_err(KERN_INVALID_ADDRESS);
426 }
427 }
428 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
429 }
430
431 VM_SANITIZE_DEFINE_CALLER(VM_MAP_PROTECT,
432 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_protect);
433 VM_SANITIZE_DEFINE_CALLER(MPROTECT, /* no error compat needed */);
434
435 NO_SANITIZE_UNSIGNED_OVERFLOW
436 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_useracc(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask,vm_map_t map_or_null)437 vm_sanitize_err_compat_addr_size_useracc(
438 kern_return_t initial_kr,
439 vm_address_t start,
440 vm_size_t size,
441 vm_offset_t pgmask,
442 vm_map_t map_or_null)
443 {
444 /*
445 * We require the map be passed in from
446 * VM_SANITIZE_CALLER_USERACC call sites.
447 */
448 assert(map_or_null);
449
450 /*
451 * useracc historically returned TRUE (success) for some
452 * invalid input ranges. We currently telemeter this case but
453 * return an error without rewriting it.
454 * If we did rewrite it, we would use VM_ERR_RETURN_NOW to return
455 * success immediately and bypass the rest of useracc.
456 */
457 if (vm_sanitize_range_overflows_allow_zero(start, size, pgmask)) {
458 vm_address_t aligned_start = vm_map_trunc_page(start, pgmask);
459 vm_address_t aligned_end = vm_map_round_page(start + size, pgmask);
460 vm_map_entry_t tmp_entry;
461 bool have_entry;
462
463 vm_map_lock(map_or_null);
464 have_entry = vm_map_lookup_entry(map_or_null, aligned_start, &tmp_entry);
465 vm_map_unlock(map_or_null);
466
467 if (vm_sanitize_range_overflows_allow_zero(aligned_start, aligned_end - aligned_start, pgmask) ||
468 aligned_start < vm_map_min(map_or_null) ||
469 aligned_end > vm_map_max(map_or_null) ||
470 aligned_start > aligned_end ||
471 !have_entry) {
472 /* fall through - these were errors historically too */
473 } else {
474 /* this was a possible success historically */
475 return vm_sanitize_make_policy_telemeter_dont_rewrite_err(VM_ERR_RETURN_NOW);
476 }
477 }
478 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
479 }
480
481 VM_SANITIZE_DEFINE_CALLER(USERACC,
482 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_useracc);
483
484 /* behavior */
485
486 NO_SANITIZE_UNSIGNED_OVERFLOW
487 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_behavior_set(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask,vm_map_t map_or_null)488 vm_sanitize_err_compat_addr_size_vm_behavior_set(
489 kern_return_t initial_kr,
490 vm_address_t start,
491 vm_size_t size,
492 vm_offset_t pgmask,
493 vm_map_t map_or_null)
494 {
495 /*
496 * We require the map be passed in from
497 * VM_SANITIZE_CALLER_VM_BEHAVIOR_SET call sites.
498 */
499 assert(map_or_null);
500
501 /*
502 * vm_behavior_set historically returned KERN_NO_SPACE
503 * or KERN_INVALID_ADDRESS for some invalid input ranges.
504 */
505 if (vm_sanitize_range_overflows_allow_zero(start, size, pgmask)) {
506 vm_address_t aligned_start = vm_map_trunc_page(start, pgmask);
507 vm_address_t aligned_end = vm_map_round_page(start + size, pgmask);
508 if (start + size < start) {
509 /* fall through - this was KERN_INVALID_ARGUMENT historically too */
510 } else if (aligned_start > aligned_end ||
511 aligned_start < vm_map_min(map_or_null) ||
512 aligned_end > vm_map_max(map_or_null)) {
513 return vm_sanitize_make_policy_telemeter_and_rewrite_err(KERN_NO_SPACE);
514 } else {
515 return vm_sanitize_make_policy_telemeter_and_rewrite_err(KERN_INVALID_ADDRESS);
516 }
517 }
518 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
519 }
520
521 VM_SANITIZE_DEFINE_CALLER(VM_BEHAVIOR_SET,
522 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_behavior_set);
523 VM_SANITIZE_DEFINE_CALLER(MADVISE);
524
525 /* msync */
526
527 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_msync(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask,vm_map_t map_or_null __unused)528 vm_sanitize_err_compat_addr_size_vm_msync(
529 kern_return_t initial_kr,
530 vm_address_t start,
531 vm_size_t size,
532 vm_offset_t pgmask,
533 vm_map_t map_or_null __unused)
534 {
535 /*
536 * vm_msync historically returned KERN_INVALID_ADDRESS
537 * instead of KERN_INVALID_ARGUMENT.
538 */
539 if (vm_sanitize_range_overflows_allow_zero(start, size, pgmask) &&
540 initial_kr == KERN_INVALID_ARGUMENT) {
541 return vm_sanitize_make_policy_telemeter_and_rewrite_err(KERN_INVALID_ADDRESS);
542 }
543
544 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
545 }
546
547 VM_SANITIZE_DEFINE_CALLER(VM_MAP_MSYNC,
548 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_msync);
549
550 VM_SANITIZE_DEFINE_CALLER(MSYNC);
551
552 /* machine attribute */
553
554 NO_SANITIZE_UNSIGNED_OVERFLOW
555 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_machine_attribute(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask,vm_map_t map_or_null __unused)556 vm_sanitize_err_compat_addr_size_vm_machine_attribute(
557 kern_return_t initial_kr,
558 vm_address_t start,
559 vm_size_t size,
560 vm_offset_t pgmask,
561 vm_map_t map_or_null __unused)
562 {
563 /*
564 * vm_machine_attribute historically returned KERN_INVALID_ADDRESS
565 * instead of KERN_INVALID_ARGUMENT for some invalid input ranges.
566 */
567 if (vm_sanitize_range_overflows_allow_zero(start, size, pgmask) &&
568 start + size >= start) {
569 return vm_sanitize_make_policy_telemeter_and_rewrite_err(KERN_INVALID_ADDRESS);
570 }
571 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
572 }
573
574 VM_SANITIZE_DEFINE_CALLER(VM_MAP_MACHINE_ATTRIBUTE,
575 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_machine_attribute);
576
577 /* page info */
578
579 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_vm_map_page_range_info(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask,vm_map_t map_or_null __unused)580 vm_sanitize_err_compat_addr_size_vm_map_page_range_info(
581 kern_return_t initial_kr,
582 vm_address_t start,
583 vm_size_t size,
584 vm_offset_t pgmask,
585 vm_map_t map_or_null __unused)
586 {
587 /*
588 * vm_map_page_range_info (and functions based on it)
589 * historically returned KERN_INVALID_ADDRESS
590 * instead of KERN_INVALID_ARGUMENT.
591 */
592 if (vm_sanitize_range_overflows_allow_zero(start, size, pgmask) &&
593 initial_kr == KERN_INVALID_ARGUMENT) {
594 return vm_sanitize_make_policy_telemeter_and_rewrite_err(KERN_INVALID_ADDRESS);
595 }
596
597 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
598 }
599
600 VM_SANITIZE_DEFINE_CALLER(VM_MAP_PAGE_RANGE_INFO,
601 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_vm_map_page_range_info);
602
603 /*
604 * mach_vm_page_range_query now returns success in a few size==0 cases that
605 * were historically failures. We do not telemeter or rewrite them.
606 */
607 VM_SANITIZE_DEFINE_CALLER(VM_MAP_PAGE_RANGE_QUERY, /* no error compat */);
608
609 NO_SANITIZE_UNSIGNED_OVERFLOW
610 static vm_sanitize_compat_rewrite_t
vm_sanitize_err_compat_addr_size_mincore(kern_return_t initial_kr,vm_address_t start,vm_size_t size,vm_offset_t pgmask,vm_map_t map_or_null __unused)611 vm_sanitize_err_compat_addr_size_mincore(
612 kern_return_t initial_kr,
613 vm_address_t start,
614 vm_size_t size,
615 vm_offset_t pgmask,
616 vm_map_t map_or_null __unused)
617 {
618 /*
619 * mincore historically did nothing and
620 * returned success for some invalid input ranges.
621 * We currently telemeter this case but
622 * return an error without rewriting it to success.
623 * If we did rewrite it, we would use VM_ERR_RETURN_NOW to return
624 * success immediately and bypass the rest of vm_wire.
625 */
626 if (vm_sanitize_range_overflows_strict_zero(start, size, pgmask)) {
627 vm_address_t aligned_start = vm_map_trunc_page(start, pgmask);
628 vm_address_t aligned_end = vm_map_round_page(start + size, pgmask);
629 if (aligned_end == aligned_start) {
630 return vm_sanitize_make_policy_telemeter_dont_rewrite_err(VM_ERR_RETURN_NOW);
631 }
632 }
633 return vm_sanitize_make_policy_dont_rewrite_err(initial_kr);
634 }
635
636 VM_SANITIZE_DEFINE_CALLER(MINCORE,
637 .err_compat_addr_size = &vm_sanitize_err_compat_addr_size_mincore);
638
639 /* single */
640 VM_SANITIZE_DEFINE_CALLER(MACH_VM_DEFERRED_RECLAMATION_BUFFER_INIT);
641 VM_SANITIZE_DEFINE_CALLER(MACH_VM_RANGE_CREATE);
642 VM_SANITIZE_DEFINE_CALLER(SHARED_REGION_MAP_AND_SLIDE_2_NP);
643
644 /* test */
645 VM_SANITIZE_DEFINE_CALLER(TEST);
646
647 #if !__OPTIMIZE__
648 #pragma clang attribute pop
649 #endif
650