xref: /xnu-12377.81.4/osfmk/vm/vm_sanitize.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /* avoid includes here; we want these pragmas to also affect included inline functions */
30 #include <mach/machine/vm_param.h> /* to get PAGE_SHIFT without the inline functions from mach/vm_param.h */
31 /*
32  * On 4k-hardware-page arm64 systems, the PAGE_SHIFT macro does not resolve to
33  * a constant, but instead a variable whose value is determined on boot depending
34  * on the amount of RAM installed.
35  *
36  * In these cases, actual instructions need to be emitted to compute values like
37  * PAGE_SIZE = (1 << PAGE_SHIFT), which means UBSan checks will be generated
38  * as well since the values cannot be computed at compile time.
39  *
40  * Therefore, we disable arithmetic UBSan checks on these configurations. We
41  * detect them with PAGE_SHIFT == 0, since (during the preprocessing phase)
42  * symbols will resolve to 0, whereas PAGE_SHIFT will resolve to its actual
43  * nonzero value if it is defined as a macro.
44  */
45 #if PAGE_SHIFT == 0
46 #pragma clang attribute push (__attribute__((no_sanitize("signed-integer-overflow", \
47         "unsigned-integer-overflow", "shift", "unsigned-shift-base"))), apply_to=function)
48 #endif
49 
50 /* Disabling optimizations makes it impossible to optimize out UBSan checks */
51 #if !__OPTIMIZE__
52 #pragma clang attribute push (__attribute__((no_sanitize("undefined", \
53         "integer", "unsigned-shift-base", "nullability", "bounds"))), apply_to=function)
54 #endif
55 
56 #include <vm/vm_map_xnu.h>
57 #include <vm/vm_sanitize_internal.h>
58 #include <vm/vm_object_internal.h>
59 
60 #if HAS_MTE
61 #include <arm64/mte_xnu.h>
62 #endif /* HAS_MTE */
63 
64 #define VM_SANITIZE_PROT_ALLOWED (VM_PROT_ALL | VM_PROT_ALLEXEC)
65 
66 // TODO: enable telemetry and ktriage separately?
67 
68 /* Also send telemetry output to kernel serial console? */
69 static TUNABLE(bool, vm_sanitize_telemeter_to_serial,
70     "vm_sanitize_telemeter_to_serial", false);
71 
72 /*
73  * Arithmetic macros that suppress UBSan. os_xyz_overflow does not generate a
74  * UBSan overflow check, since it indicates to the compiler that overflow is
75  * (potentially) intentional and well-defined.
76  *
77  * These macros ignore the value that indicates whether overflow actually,
78  * occurred, so a comment should be left explaining why it is unlikely to
79  * happen or is otherwise not a concern.
80  */
81 #define vm_add_no_ubsan(a, b) ({ typeof(a+b) TMP; (void) os_add_overflow(a, b, &TMP); TMP; })
82 #define vm_sub_no_ubsan(a, b) ({ typeof(a+b) TMP; (void) os_sub_overflow(a, b, &TMP); TMP; })
83 
84 static inline
85 kern_return_t
vm_sanitize_apply_err_rewrite_policy(kern_return_t initial_kr,vm_sanitize_compat_rewrite_t rewrite)86 vm_sanitize_apply_err_rewrite_policy(kern_return_t initial_kr, vm_sanitize_compat_rewrite_t rewrite)
87 {
88 	return rewrite.should_rewrite ? rewrite.compat_kr : initial_kr;
89 }
90 
91 __attribute__((always_inline, warn_unused_result))
92 vm_addr_struct_t
vm_sanitize_wrap_addr(vm_address_t val)93 vm_sanitize_wrap_addr(vm_address_t val)
94 {
95 	return (vm_addr_struct_t) { .UNSAFE = val };
96 }
97 
98 __attribute__((always_inline, warn_unused_result))
99 vm_size_struct_t
vm_sanitize_wrap_size(vm_size_t val)100 vm_sanitize_wrap_size(vm_size_t val)
101 {
102 	return (vm_size_struct_t) { .UNSAFE = val };
103 }
104 
105 __attribute__((always_inline, warn_unused_result))
106 vm32_size_struct_t
vm32_sanitize_wrap_size(vm32_size_t val)107 vm32_sanitize_wrap_size(vm32_size_t val)
108 {
109 	return (vm32_size_struct_t) { .UNSAFE = val };
110 }
111 
112 __attribute__((always_inline, warn_unused_result))
113 vm_prot_ut
vm_sanitize_wrap_prot(vm_prot_t val)114 vm_sanitize_wrap_prot(vm_prot_t val)
115 {
116 	return (vm_prot_ut) { .UNSAFE = val };
117 }
118 
119 __attribute__((always_inline, warn_unused_result))
120 vm_inherit_ut
vm_sanitize_wrap_inherit(vm_inherit_t val)121 vm_sanitize_wrap_inherit(vm_inherit_t val)
122 {
123 	return (vm_inherit_ut) { .UNSAFE = val };
124 }
125 
126 __attribute__((always_inline, warn_unused_result))
127 vm_behavior_ut
vm_sanitize_wrap_behavior(vm_behavior_t val)128 vm_sanitize_wrap_behavior(vm_behavior_t val)
129 {
130 	return (vm_behavior_ut) { .UNSAFE = val };
131 }
132 
133 #ifdef  MACH_KERNEL_PRIVATE
134 __attribute__((always_inline, warn_unused_result))
135 vm_addr_struct_t
vm_sanitize_expand_addr_to_64(vm32_address_ut val)136 vm_sanitize_expand_addr_to_64(vm32_address_ut val)
137 {
138 	return (vm_addr_struct_t) { .UNSAFE = val.UNSAFE };
139 }
140 
141 __attribute__((always_inline, warn_unused_result))
142 vm_size_struct_t
vm_sanitize_expand_size_to_64(vm32_size_ut val)143 vm_sanitize_expand_size_to_64(vm32_size_ut val)
144 {
145 	return (vm_size_struct_t) { .UNSAFE = val.UNSAFE };
146 }
147 
148 __attribute__((always_inline, warn_unused_result))
149 vm32_address_ut
vm_sanitize_trunc_addr_to_32(vm_addr_struct_t val)150 vm_sanitize_trunc_addr_to_32(vm_addr_struct_t val)
151 {
152 	vm32_address_ut ret;
153 
154 	ret.UNSAFE = CAST_DOWN_EXPLICIT(vm32_address_t, val.UNSAFE);
155 	return ret;
156 }
157 
158 __attribute__((always_inline, warn_unused_result))
159 vm32_size_ut
vm_sanitize_trunc_size_to_32(vm_size_struct_t val)160 vm_sanitize_trunc_size_to_32(vm_size_struct_t val)
161 {
162 	vm32_size_ut ret;
163 
164 	ret.UNSAFE = CAST_DOWN_EXPLICIT(vm32_size_t, val.UNSAFE);
165 	return ret;
166 }
167 
168 __attribute__((always_inline, warn_unused_result, overloadable))
169 bool
vm_sanitize_add_overflow(vm32_address_ut addr_u,vm32_size_ut size_u,vm32_address_ut * addr_out_u)170 vm_sanitize_add_overflow(
171 	vm32_address_ut         addr_u,
172 	vm32_size_ut            size_u,
173 	vm32_address_ut        *addr_out_u)
174 {
175 	vm32_address_t addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
176 	vm32_size_t    size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
177 
178 	return os_add_overflow(addr, size, &addr_out_u->UNSAFE);
179 }
180 #endif  /* MACH_KERNEL_PRIVATE */
181 
182 __attribute__((always_inline, warn_unused_result, overloadable))
183 bool
vm_sanitize_add_overflow(vm_addr_struct_t addr_u,vm_size_struct_t size_u,vm_addr_struct_t * addr_out_u)184 vm_sanitize_add_overflow(
185 	vm_addr_struct_t        addr_u,
186 	vm_size_struct_t        size_u,
187 	vm_addr_struct_t       *addr_out_u)
188 {
189 	mach_vm_address_t addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
190 	mach_vm_size_t    size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
191 
192 	return os_add_overflow(addr, size, &addr_out_u->UNSAFE);
193 }
194 
195 __attribute__((always_inline, warn_unused_result, overloadable))
196 bool
vm_sanitize_add_overflow(vm_size_struct_t size1_u,vm_size_struct_t size2_u,vm_size_struct_t * size_out_u)197 vm_sanitize_add_overflow(
198 	vm_size_struct_t        size1_u,
199 	vm_size_struct_t        size2_u,
200 	vm_size_struct_t       *size_out_u)
201 {
202 	mach_vm_address_t size1 = VM_SANITIZE_UNSAFE_UNWRAP(size1_u);
203 	mach_vm_size_t    size2 = VM_SANITIZE_UNSAFE_UNWRAP(size2_u);
204 
205 	return os_add_overflow(size1, size2, &size_out_u->UNSAFE);
206 }
207 
208 /*
209  * vm_*_no_ubsan is acceptable in these functions since they operate on unsafe
210  * types. The return value is also an unsafe type and must be sanitized before
211  * it can be used in other functions.
212  */
213 __attribute__((always_inline, warn_unused_result))
214 vm_addr_struct_t
vm_sanitize_compute_ut_end(vm_addr_struct_t addr_u,vm_size_struct_t size_u)215 vm_sanitize_compute_ut_end(
216 	vm_addr_struct_t        addr_u,
217 	vm_size_struct_t        size_u)
218 {
219 	vm_addr_struct_t end_u = { 0 };
220 	vm_address_t addr_local = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
221 	vm_size_t size_local = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
222 
223 	VM_SANITIZE_UT_SET(end_u, vm_add_no_ubsan(addr_local, size_local));
224 	return end_u;
225 }
226 
227 __attribute__((always_inline, warn_unused_result))
228 vm_size_struct_t
vm_sanitize_compute_ut_size(vm_addr_struct_t addr_u,vm_addr_struct_t end_u)229 vm_sanitize_compute_ut_size(
230 	vm_addr_struct_t        addr_u,
231 	vm_addr_struct_t        end_u)
232 {
233 	vm_size_struct_t size_u = { 0 };
234 	vm_address_t addr_local = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
235 	vm_address_t end_local = VM_SANITIZE_UNSAFE_UNWRAP(end_u);
236 
237 	VM_SANITIZE_UT_SET(size_u, vm_sub_no_ubsan(end_local, addr_local));
238 	return size_u;
239 }
240 
241 __attribute__((always_inline, warn_unused_result))
242 mach_vm_address_t
vm_sanitize_addr(vm_map_t map,vm_addr_struct_t addr_u)243 vm_sanitize_addr(
244 	vm_map_t                map,
245 	vm_addr_struct_t        addr_u)
246 {
247 	mach_vm_address_t addr   = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
248 	vm_map_offset_t   pgmask = vm_map_page_mask(map);
249 
250 	return vm_map_trunc_page_mask(addr, pgmask);
251 }
252 
253 __attribute__((always_inline, warn_unused_result))
254 mach_vm_offset_t
vm_sanitize_offset_in_page(vm_map_offset_t mask,vm_addr_struct_t addr_u)255 vm_sanitize_offset_in_page(
256 	vm_map_offset_t         mask,
257 	vm_addr_struct_t        addr_u)
258 {
259 	return VM_SANITIZE_UNSAFE_UNWRAP(addr_u) & mask;
260 }
261 
262 __attribute__((always_inline, warn_unused_result))
263 kern_return_t
vm_sanitize_offset(vm_addr_struct_t offset_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_address_t addr,vm_map_address_t end,vm_map_offset_t * offset)264 vm_sanitize_offset(
265 	vm_addr_struct_t        offset_u,
266 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
267 	vm_map_address_t        addr,
268 	vm_map_address_t        end,
269 	vm_map_offset_t        *offset)
270 {
271 	*offset = VM_SANITIZE_UNSAFE_UNWRAP(offset_u);
272 
273 	if ((*offset < addr) || (*offset > end)) {
274 		*offset = 0;
275 		return KERN_INVALID_ARGUMENT;
276 	}
277 
278 	return KERN_SUCCESS;
279 }
280 
281 __attribute__((always_inline, warn_unused_result))
282 kern_return_t
vm_sanitize_mask(vm_addr_struct_t mask_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_offset_t * mask)283 vm_sanitize_mask(
284 	vm_addr_struct_t        mask_u,
285 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
286 	vm_map_offset_t        *mask)
287 {
288 	*mask = VM_SANITIZE_UNSAFE_UNWRAP(mask_u);
289 
290 	/*
291 	 * Adding validation to mask has high ABI risk and low security value.
292 	 * The only internal function that deals with mask is vm_map_locate_space
293 	 * and it currently ensures that addresses are aligned to page boundary
294 	 * even for weird alignment requests.
295 	 *
296 	 * rdar://120445665
297 	 */
298 
299 	return KERN_SUCCESS;
300 }
301 
302 __attribute__((always_inline, warn_unused_result))
303 kern_return_t
vm_sanitize_object_size(vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_sanitize_flags_t flags,vm_object_offset_t * size)304 vm_sanitize_object_size(
305 	vm_size_struct_t        size_u,
306 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
307 	vm_sanitize_flags_t     flags,
308 	vm_object_offset_t     *size)
309 {
310 	mach_vm_size_t  size_aligned;
311 
312 	*size   = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
313 	/*
314 	 * Handle size zero as requested by the caller
315 	 */
316 	if (*size == 0) {
317 		if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
318 			return VM_ERR_RETURN_NOW;
319 		} else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
320 			return KERN_INVALID_ARGUMENT;
321 		} else {
322 			/* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
323 			return KERN_SUCCESS;
324 		}
325 	}
326 
327 	size_aligned = vm_map_round_page_mask(*size, PAGE_MASK);
328 	if (size_aligned == 0) {
329 		*size = 0;
330 		return KERN_INVALID_ARGUMENT;
331 	}
332 
333 	if (!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES)) {
334 		*size = size_aligned;
335 	}
336 	return KERN_SUCCESS;
337 }
338 
339 __attribute__((always_inline, warn_unused_result))
340 kern_return_t
vm_sanitize_size(vm_addr_struct_t offset_u,vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_t map,vm_sanitize_flags_t flags,mach_vm_size_t * size)341 vm_sanitize_size(
342 	vm_addr_struct_t        offset_u,
343 	vm_size_struct_t        size_u,
344 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
345 	vm_map_t                map,
346 	vm_sanitize_flags_t     flags,
347 	mach_vm_size_t         *size)
348 {
349 	mach_vm_size_t  offset = VM_SANITIZE_UNSAFE_UNWRAP(offset_u);
350 	vm_map_offset_t pgmask = vm_map_page_mask(map);
351 	mach_vm_size_t  size_aligned;
352 
353 	*size   = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
354 	/*
355 	 * Handle size zero as requested by the caller
356 	 */
357 	if (*size == 0) {
358 		if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
359 			return VM_ERR_RETURN_NOW;
360 		} else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
361 			return KERN_INVALID_ARGUMENT;
362 		} else {
363 			/* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
364 			return KERN_SUCCESS;
365 		}
366 	}
367 
368 	/*
369 	 * Ensure that offset and size don't overflow when refering to the
370 	 * vm_object
371 	 */
372 	if (os_add_overflow(*size, offset, &size_aligned)) {
373 		*size = 0;
374 		return KERN_INVALID_ARGUMENT;
375 	}
376 	/*
377 	 * This rounding is a check on the vm_object and thus uses the kernel's PAGE_MASK
378 	 */
379 	if (vm_map_round_page_mask(size_aligned, PAGE_MASK) == 0) {
380 		*size = 0;
381 		return KERN_INVALID_ARGUMENT;
382 	}
383 
384 	/*
385 	 * Check that a non zero size being mapped doesn't round to 0
386 	 *
387 	 * vm_sub_no_ubsan is acceptable here since the subtraction is guaranteed to
388 	 * not overflow. We know size_aligned = *size + offset, and since that
389 	 * addition did not overflow and offset >= offset & ~pgmask, this
390 	 * subtraction also cannot overflow.
391 	 */
392 	size_aligned = vm_sub_no_ubsan(size_aligned, offset & ~pgmask);
393 
394 	/*
395 	 * This rounding is a check on the specified map and thus uses its pgmask
396 	 */
397 	size_aligned  = vm_map_round_page_mask(size_aligned, pgmask);
398 	if (size_aligned == 0) {
399 		*size = 0;
400 		return KERN_INVALID_ARGUMENT;
401 	}
402 
403 	if (!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES)) {
404 		*size = size_aligned;
405 	}
406 	return KERN_SUCCESS;
407 }
408 
409 static __attribute__((warn_unused_result))
410 kern_return_t
vm_sanitize_err_compat_addr_size(kern_return_t initial_kr,vm_sanitize_caller_t vm_sanitize_caller,vm_addr_struct_t addr_u,vm_size_struct_t size_u,mach_vm_offset_t pgmask,vm_map_t map_or_null)411 vm_sanitize_err_compat_addr_size(
412 	kern_return_t           initial_kr,
413 	vm_sanitize_caller_t    vm_sanitize_caller,
414 	vm_addr_struct_t        addr_u,
415 	vm_size_struct_t        size_u,
416 	mach_vm_offset_t        pgmask,
417 	vm_map_t                map_or_null)
418 {
419 	vm_sanitize_compat_rewrite_t compat = {initial_kr, false, false};
420 	if (vm_sanitize_caller->err_compat_addr_size) {
421 		compat = (vm_sanitize_caller->err_compat_addr_size)
422 		    (initial_kr, VM_SANITIZE_UNSAFE_UNWRAP(addr_u), VM_SANITIZE_UNSAFE_UNWRAP(size_u),
423 		    pgmask, map_or_null);
424 	}
425 
426 	if (compat.should_telemeter) {
427 #if DEVELOPMENT || DEBUG
428 		if (vm_sanitize_telemeter_to_serial) {
429 			printf("VM API - [%s] unsanitary addr 0x%llx size 0x%llx pgmask "
430 			    "0x%llx passed to %s; error code %d may become %d\n",
431 			    proc_best_name(current_proc()),
432 			    VM_SANITIZE_UNSAFE_UNWRAP(addr_u), VM_SANITIZE_UNSAFE_UNWRAP(size_u), pgmask,
433 			    vm_sanitize_caller->vmsc_caller_name, initial_kr, compat.compat_kr);
434 		}
435 #endif /* DEVELOPMENT || DEBUG */
436 
437 		vm_sanitize_send_telemetry(
438 			vm_sanitize_caller->vmsc_telemetry_id,
439 			VM_SANITIZE_CHECKER_ADDR_SIZE,
440 			VM_SANITIZE_CHECKER_COUNT_1 /* fixme */,
441 			vm_sanitize_caller->vmsc_ktriage_id,
442 			VM_SANITIZE_UNSAFE_UNWRAP(addr_u),
443 			VM_SANITIZE_UNSAFE_UNWRAP(size_u),
444 			pgmask,
445 			0 /* arg4 */,
446 			initial_kr,
447 			compat.compat_kr);
448 	}
449 
450 	return vm_sanitize_apply_err_rewrite_policy(initial_kr, compat);
451 }
452 
453 __attribute__((always_inline, warn_unused_result))
454 kern_return_t
vm_sanitize_addr_size(vm_addr_struct_t addr_u,vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller,mach_vm_offset_t pgmask,vm_map_t map_or_null,vm_sanitize_flags_t flags,vm_map_offset_t * addr,vm_map_offset_t * end,vm_map_size_t * size)455 vm_sanitize_addr_size(
456 	vm_addr_struct_t        addr_u,
457 	vm_size_struct_t        size_u,
458 	vm_sanitize_caller_t    vm_sanitize_caller,
459 	mach_vm_offset_t        pgmask,
460 	vm_map_t                map_or_null,
461 	vm_sanitize_flags_t     flags,
462 	vm_map_offset_t        *addr,
463 	vm_map_offset_t        *end,
464 	vm_map_size_t          *size)
465 {
466 	/*
467 	 * map_or_null is not available from all call sites.
468 	 * Use pgmask instead of vm_map_page_mask(map) for alignment.
469 	 */
470 
471 	vm_map_offset_t addr_aligned = 0;
472 	vm_map_offset_t end_aligned = 0, end_unaligned = 0;
473 	kern_return_t kr;
474 
475 	*addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
476 	*size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
477 	if (flags & VM_SANITIZE_FLAGS_REALIGN_START) {
478 		assert(!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES));
479 	}
480 
481 #if KASAN_TBI
482 	if (flags & VM_SANITIZE_FLAGS_CANONICALIZE) {
483 		*addr = vm_memtag_canonicalize_kernel(*addr);
484 	}
485 #endif /* KASAN_TBI */
486 
487 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
488 	/*
489 	 * The next two flag checks are complementary.
490 	 * VM_SANITIZE_FLAGS_STRIP_ADDR ensures that the address is stripped of
491 	 * all its metadata bits (PAC, TBI, MTE). This is used by kernel entrypoints
492 	 * that are expected to handle metadata-filled addresses to ease adoption.
493 	 *
494 	 * VM_SANITIZE_FLAGS_DENY_NON_CANONICAL_ADDR instead is for entrypoints where
495 	 * we require the caller to have performed the necessary stripping of metadata
496 	 * and we expect the address to be in its canonical form.
497 	 *
498 	 * Both these calls _require_ the map to be available, as that's used to determine
499 	 * whether the user or kernel canonicalization rules should be applied (we cannot
500 	 * rely on the TTBR selector bit - bit 55 - as that one is under caller's control).
501 	 */
502 	if (flags & VM_SANITIZE_FLAGS_STRIP_ADDR) {
503 		/* strip sites must pass map. */
504 		assert(map_or_null != NULL);
505 		assert(!(flags & VM_SANITIZE_FLAGS_DENY_NON_CANONICAL_ADDR));
506 		*addr = vm_map_strip_addr(map_or_null, *addr);
507 	}
508 
509 	if (flags & VM_SANITIZE_FLAGS_DENY_NON_CANONICAL_ADDR) {
510 		/* counter part to strip, also requires a valid map */
511 		assert(map_or_null != NULL);
512 		if (vm_map_strip_addr(map_or_null, *addr) != *addr) {
513 #if HAS_MTE
514 			mte_report_non_canonical_address((caddr_t)*addr, map_or_null, __func__);
515 #endif /* HAS_MTE */
516 			kr = KERN_INVALID_ARGUMENT;
517 			goto unsanitary;
518 		}
519 	}
520 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
521 
522 	addr_aligned = vm_map_trunc_page_mask(*addr, pgmask);
523 
524 	/*
525 	 * Ensure that the address is aligned
526 	 */
527 	if (__improbable((flags & VM_SANITIZE_FLAGS_CHECK_ALIGNED_START) && (*addr & pgmask))) {
528 		kr = KERN_INVALID_ARGUMENT;
529 		goto unsanitary;
530 	}
531 
532 	/*
533 	 * Ensure that the size is aligned
534 	 */
535 	if (__improbable((flags & VM_SANITIZE_FLAGS_CHECK_ALIGNED_SIZE) && (*size & pgmask))) {
536 		kr = KERN_INVALID_ARGUMENT;
537 		goto unsanitary;
538 	}
539 
540 	/*
541 	 * Handle size zero as requested by the caller
542 	 */
543 	if (*size == 0) {
544 		/*
545 		 * NOTE: these early returns bypass the VM_SANITIZE_FLAGS_CHECK_ADDR_RANGE
546 		 * check. Since the size is 0, the range [start, end) is empty and thus
547 		 * no values within this range can overflow the upper bits.
548 		 */
549 		if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
550 			*addr = 0;
551 			*end = 0;
552 			/* size is already 0 */
553 			return VM_ERR_RETURN_NOW;
554 		} else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
555 			kr = KERN_INVALID_ARGUMENT;
556 			goto unsanitary;
557 		} else {
558 			/* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
559 			if (flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES) {
560 				/* addr is already set */
561 				*end = *addr;
562 				/* size is already 0 */
563 				return KERN_SUCCESS;
564 			} else {
565 				*addr = addr_aligned;
566 				*end = addr_aligned;
567 				/* size is already 0 */
568 				return KERN_SUCCESS;
569 			}
570 		}
571 	}
572 
573 	/*
574 	 * Compute the aligned end now
575 	 */
576 	if (flags & VM_SANITIZE_FLAGS_REALIGN_START) {
577 		*addr = addr_aligned;
578 	}
579 	if (__improbable(os_add_overflow(*addr, *size, &end_unaligned))) {
580 		kr = KERN_INVALID_ARGUMENT;
581 		goto unsanitary;
582 	}
583 	end_aligned = vm_map_round_page_mask(end_unaligned, pgmask);
584 	if (__improbable(end_aligned <= addr_aligned)) {
585 		kr = KERN_INVALID_ARGUMENT;
586 		goto unsanitary;
587 	}
588 
589 	if (flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES) {
590 		/* addr and size are already set */
591 		*end = end_unaligned;
592 	} else {
593 		*addr = addr_aligned;
594 		*end = end_aligned;
595 		/*
596 		 * vm_sub_no_ubsan is acceptable since the subtraction is guaranteed to
597 		 * not overflow, as we have already verified end_aligned > addr_aligned.
598 		 */
599 		*size = vm_sub_no_ubsan(end_aligned, addr_aligned);
600 	}
601 
602 	if (flags & VM_SANITIZE_FLAGS_CHECK_ADDR_RANGE) {
603 #if defined(__arm64__) && MACH_ASSERT
604 		/*
605 		 * Make sure that this fails noisily if someone adds support for large
606 		 * VA extensions. With such extensions, this code will have to check
607 		 * ID_AA64MMFR2_EL1 to get the actual max VA size for the system,
608 		 * instead of assuming it is 48 bits.
609 		 */
610 		assert((__builtin_arm_rsr64("ID_AA64MMFR2_EL1") & ID_AA64MMFR2_EL1_VARANGE_MASK) == 0);
611 #endif /* defined(__arm64__) && MACH_ASSERT */
612 		const uint64_t max_va_bits = 48;
613 		const mach_vm_offset_t va_range_upper_bound = (1ULL << max_va_bits);
614 		const mach_vm_offset_t va_mask = va_range_upper_bound - 1;
615 
616 		if ((*addr & ~va_mask) != (*end & ~va_mask)) {
617 			if (*end == va_range_upper_bound) {
618 				/*
619 				 * Since the range is exclusive of `end`, the range [start, end)
620 				 * does not include any invalid values in this case. Therefore,
621 				 * we treat this as a success and fall through.
622 				 */
623 			} else {
624 				/*
625 				 * This means iterating within the range [start, end) may
626 				 * overflow above the VA bits supported by the system. Since
627 				 * these bits may be used by the kernel or hardware to store
628 				 * other values, we should not allow the operation to proceed.
629 				 */
630 				kr = KERN_INVALID_ADDRESS;
631 				goto unsanitary;
632 			}
633 		}
634 	}
635 
636 	return KERN_SUCCESS;
637 
638 unsanitary:
639 	*addr = 0;
640 	*end = 0;
641 	*size = 0;
642 	return vm_sanitize_err_compat_addr_size(kr, vm_sanitize_caller,
643 	           addr_u, size_u, pgmask, map_or_null);
644 }
645 
646 __attribute__((always_inline, warn_unused_result))
647 kern_return_t
vm_sanitize_addr_end(vm_addr_struct_t addr_u,vm_addr_struct_t end_u,vm_sanitize_caller_t vm_sanitize_caller,mach_vm_offset_t mask,vm_map_t map_or_null,vm_sanitize_flags_t flags,vm_map_offset_t * start,vm_map_offset_t * end,vm_map_size_t * size)648 vm_sanitize_addr_end(
649 	vm_addr_struct_t        addr_u,
650 	vm_addr_struct_t        end_u,
651 	vm_sanitize_caller_t    vm_sanitize_caller,
652 	mach_vm_offset_t        mask,
653 	vm_map_t                map_or_null,
654 	vm_sanitize_flags_t     flags,
655 	vm_map_offset_t        *start,
656 	vm_map_offset_t        *end,
657 	vm_map_size_t          *size)
658 {
659 	vm_size_struct_t size_u = vm_sanitize_compute_ut_size(addr_u, end_u);
660 
661 	return vm_sanitize_addr_size(addr_u, size_u, vm_sanitize_caller, mask,
662 	           map_or_null, flags, start, end, size);
663 }
664 
665 __attribute__((always_inline, warn_unused_result))
666 kern_return_t
vm_sanitize_prot(vm_prot_ut prot_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_t map __unused,vm_prot_t extra_mask,vm_prot_t * prot)667 vm_sanitize_prot(
668 	vm_prot_ut              prot_u,
669 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
670 	vm_map_t                map __unused,
671 	vm_prot_t               extra_mask,
672 	vm_prot_t              *prot)
673 {
674 	*prot = VM_SANITIZE_UNSAFE_UNWRAP(prot_u);
675 
676 	if (__improbable(*prot & ~(VM_SANITIZE_PROT_ALLOWED | extra_mask))) {
677 		*prot = VM_PROT_NONE;
678 		return KERN_INVALID_ARGUMENT;
679 	}
680 
681 #if defined(__x86_64__)
682 	if ((*prot & VM_PROT_UEXEC) &&
683 	    !pmap_supported_feature(map->pmap, PMAP_FEAT_UEXEC)) {
684 		*prot = VM_PROT_NONE;
685 		return KERN_INVALID_ARGUMENT;
686 	}
687 #endif
688 
689 	return KERN_SUCCESS;
690 }
691 
692 /*
693  * *out_cur and *out_max are modified when there is an err compat rewrite
694  * otherwise they are left unchanged
695  */
696 static __attribute__((warn_unused_result))
697 kern_return_t
vm_sanitize_err_compat_cur_and_max_prots(kern_return_t initial_kr,vm_sanitize_caller_t vm_sanitize_caller,vm_prot_ut cur_prot_u,vm_prot_ut max_prot_u,vm_prot_t extra_mask,vm_prot_t * out_cur,vm_prot_t * out_max)698 vm_sanitize_err_compat_cur_and_max_prots(
699 	kern_return_t           initial_kr,
700 	vm_sanitize_caller_t    vm_sanitize_caller,
701 	vm_prot_ut              cur_prot_u,
702 	vm_prot_ut              max_prot_u,
703 	vm_prot_t               extra_mask,
704 	vm_prot_t              *out_cur,
705 	vm_prot_t              *out_max)
706 {
707 	vm_prot_t initial_cur_prot = VM_SANITIZE_UNSAFE_UNWRAP(cur_prot_u);
708 	vm_prot_t initial_max_prot = VM_SANITIZE_UNSAFE_UNWRAP(max_prot_u);
709 
710 	vm_sanitize_compat_rewrite_t compat = {initial_kr, false, false};
711 	vm_prot_t compat_cur_prot = initial_cur_prot;
712 	vm_prot_t compat_max_prot = initial_max_prot;
713 	if (vm_sanitize_caller->err_compat_prot_cur_max) {
714 		compat = (vm_sanitize_caller->err_compat_prot_cur_max)
715 		    (initial_kr, &compat_cur_prot, &compat_max_prot, extra_mask);
716 	}
717 
718 	if (compat.should_telemeter) {
719 #if DEVELOPMENT || DEBUG
720 		if (vm_sanitize_telemeter_to_serial) {
721 			printf("VM API - [%s] unsanitary vm_prot cur %d max %d "
722 			    "passed to %s; error code %d may become %d\n",
723 			    proc_best_name(current_proc()),
724 			    initial_cur_prot, initial_max_prot,
725 			    vm_sanitize_caller->vmsc_caller_name,
726 			    initial_kr, compat.compat_kr);
727 		}
728 #endif /* DEVELOPMENT || DEBUG */
729 
730 		vm_sanitize_send_telemetry(
731 			vm_sanitize_caller->vmsc_telemetry_id,
732 			VM_SANITIZE_CHECKER_PROT_CUR_MAX,
733 			VM_SANITIZE_CHECKER_COUNT_1 /* fixme */,
734 			vm_sanitize_caller->vmsc_ktriage_id,
735 			initial_cur_prot,
736 			initial_max_prot,
737 			extra_mask,
738 			0 /* arg4 */,
739 			initial_kr,
740 			compat.compat_kr);
741 	}
742 
743 	if (compat.should_rewrite) {
744 		*out_cur = compat_cur_prot;
745 		*out_max = compat_max_prot;
746 		return compat.compat_kr;
747 	} else {
748 		/* out_cur and out_max unchanged */
749 		return initial_kr;
750 	}
751 }
752 
753 __attribute__((always_inline, warn_unused_result))
754 kern_return_t
vm_sanitize_cur_and_max_prots(vm_prot_ut cur_prot_u,vm_prot_ut max_prot_u,vm_sanitize_caller_t vm_sanitize_caller,vm_map_t map,vm_prot_t extra_mask,vm_prot_t * cur_prot,vm_prot_t * max_prot)755 vm_sanitize_cur_and_max_prots(
756 	vm_prot_ut              cur_prot_u,
757 	vm_prot_ut              max_prot_u,
758 	vm_sanitize_caller_t    vm_sanitize_caller,
759 	vm_map_t                map,
760 	vm_prot_t               extra_mask,
761 	vm_prot_t              *cur_prot,
762 	vm_prot_t              *max_prot)
763 {
764 	kern_return_t kr;
765 
766 	kr = vm_sanitize_prot(cur_prot_u, vm_sanitize_caller, map, extra_mask, cur_prot);
767 	if (__improbable(kr != KERN_SUCCESS)) {
768 		*cur_prot = VM_PROT_NONE;
769 		*max_prot = VM_PROT_NONE;
770 		return kr;
771 	}
772 
773 	kr = vm_sanitize_prot(max_prot_u, vm_sanitize_caller, map, extra_mask, max_prot);
774 	if (__improbable(kr != KERN_SUCCESS)) {
775 		*cur_prot = VM_PROT_NONE;
776 		*max_prot = VM_PROT_NONE;
777 		return kr;
778 	}
779 
780 
781 	/*
782 	 * This check needs to be performed on the actual protection bits.
783 	 * vm_sanitize_prot restricts cur and max prot to
784 	 * (VM_PROT_ALL | VM_PROT_ALLEXEC | extra_mask), but we don't enforce
785 	 * ordering on the extra_mask bits.
786 	 */
787 	if (__improbable((*cur_prot & *max_prot & VM_SANITIZE_PROT_ALLOWED) !=
788 	    (*cur_prot & VM_SANITIZE_PROT_ALLOWED))) {
789 		/* cur is more permissive than max */
790 		kr = KERN_INVALID_ARGUMENT;
791 		goto unsanitary;
792 	}
793 	return KERN_SUCCESS;
794 
795 unsanitary:
796 	*cur_prot = VM_PROT_NONE;
797 	*max_prot = VM_PROT_NONE;
798 	/* error compat may set cur/max to something other than 0/0 */
799 	return vm_sanitize_err_compat_cur_and_max_prots(kr, vm_sanitize_caller,
800 	           cur_prot_u, max_prot_u, extra_mask, cur_prot, max_prot);
801 }
802 
803 __attribute__((always_inline, warn_unused_result))
804 vm_prot_t
vm_sanitize_prot_bsd(vm_prot_ut prot_u,vm_sanitize_caller_t vm_sanitize_caller __unused)805 vm_sanitize_prot_bsd(
806 	vm_prot_ut              prot_u,
807 	vm_sanitize_caller_t    vm_sanitize_caller __unused)
808 {
809 	vm_prot_t prot = VM_SANITIZE_UNSAFE_UNWRAP(prot_u);
810 
811 	/*
812 	 * Strip all protections that are not allowed
813 	 */
814 	prot &= (VM_PROT_ALL | VM_PROT_TRUSTED | VM_PROT_STRIP_READ);
815 	return prot;
816 }
817 
818 __attribute__((always_inline, warn_unused_result))
819 kern_return_t
vm_sanitize_memory_entry_perm(vm_prot_ut perm_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_sanitize_flags_t flags,vm_prot_t extra_mask,vm_prot_t * perm)820 vm_sanitize_memory_entry_perm(
821 	vm_prot_ut              perm_u,
822 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
823 	vm_sanitize_flags_t     flags,
824 	vm_prot_t               extra_mask,
825 	vm_prot_t              *perm)
826 {
827 	vm_prot_t prot;
828 	vm_prot_t map_mem_flags;
829 	vm_prot_t access;
830 
831 	*perm = VM_SANITIZE_UNSAFE_UNWRAP(perm_u);
832 	prot = *perm & MAP_MEM_PROT_MASK;
833 	map_mem_flags = *perm & MAP_MEM_FLAGS_MASK;
834 	access = GET_MAP_MEM(*perm);
835 
836 	if ((flags & VM_SANITIZE_FLAGS_CHECK_USER_MEM_MAP_FLAGS) &&
837 	    (map_mem_flags & ~MAP_MEM_FLAGS_USER)) {
838 		/*
839 		 * Unknown flag: reject for forward compatibility.
840 		 */
841 		*perm = VM_PROT_NONE;
842 		return KERN_INVALID_VALUE;
843 	}
844 
845 	/*
846 	 * Clear prot bits in perm and set them to only allowed values
847 	 */
848 	*perm &= ~MAP_MEM_PROT_MASK;
849 	*perm |= (prot & (VM_PROT_ALL | extra_mask));
850 
851 	/*
852 	 * No checks on access
853 	 */
854 	(void) access;
855 
856 	return KERN_SUCCESS;
857 }
858 
859 __attribute__((always_inline, warn_unused_result))
860 kern_return_t
vm_sanitize_inherit(vm_inherit_ut inherit_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_inherit_t * inherit)861 vm_sanitize_inherit(
862 	vm_inherit_ut           inherit_u,
863 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
864 	vm_inherit_t           *inherit)
865 {
866 	*inherit = VM_SANITIZE_UNSAFE_UNWRAP(inherit_u);
867 
868 	if (__improbable(*inherit > VM_INHERIT_LAST_VALID)) {
869 		*inherit = VM_INHERIT_NONE;
870 		return KERN_INVALID_ARGUMENT;
871 	}
872 
873 	return KERN_SUCCESS;
874 }
875 
876 __attribute__((always_inline, warn_unused_result))
877 kern_return_t
vm_sanitize_behavior(vm_behavior_ut behavior_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_behavior_t * behavior)878 vm_sanitize_behavior(
879 	vm_behavior_ut           behavior_u,
880 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
881 	vm_behavior_t           *behavior)
882 {
883 	*behavior = VM_SANITIZE_UNSAFE_UNWRAP(behavior_u);
884 
885 	if (__improbable((*behavior > VM_BEHAVIOR_LAST_VALID)
886 	    || (*behavior < 0))) {
887 		*behavior = VM_BEHAVIOR_DEFAULT;
888 		return KERN_INVALID_ARGUMENT;
889 	}
890 
891 	return KERN_SUCCESS;
892 }
893 
894 
895 #if PAGE_SHIFT == 0
896 #pragma clang attribute pop
897 #endif
898 
899 #if !__OPTIMIZE__
900 #pragma clang attribute pop
901 #endif
902