xref: /xnu-12377.1.9/osfmk/vm/vm_sanitize.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /* avoid includes here; we want these pragmas to also affect included inline functions */
30 #include <mach/machine/vm_param.h> /* to get PAGE_SHIFT without the inline functions from mach/vm_param.h */
31 /*
32  * On 4k-hardware-page arm64 systems, the PAGE_SHIFT macro does not resolve to
33  * a constant, but instead a variable whose value is determined on boot depending
34  * on the amount of RAM installed.
35  *
36  * In these cases, actual instructions need to be emitted to compute values like
37  * PAGE_SIZE = (1 << PAGE_SHIFT), which means UBSan checks will be generated
38  * as well since the values cannot be computed at compile time.
39  *
40  * Therefore, we disable arithmetic UBSan checks on these configurations. We
41  * detect them with PAGE_SHIFT == 0, since (during the preprocessing phase)
42  * symbols will resolve to 0, whereas PAGE_SHIFT will resolve to its actual
43  * nonzero value if it is defined as a macro.
44  */
45 #if PAGE_SHIFT == 0
46 #pragma clang attribute push (__attribute__((no_sanitize("signed-integer-overflow", \
47         "unsigned-integer-overflow", "shift", "unsigned-shift-base"))), apply_to=function)
48 #endif
49 
50 /* Disabling optimizations makes it impossible to optimize out UBSan checks */
51 #if !__OPTIMIZE__
52 #pragma clang attribute push (__attribute__((no_sanitize("undefined", \
53         "integer", "unsigned-shift-base", "nullability", "bounds"))), apply_to=function)
54 #endif
55 
56 #include <vm/vm_map_xnu.h>
57 #include <vm/vm_sanitize_internal.h>
58 #include <vm/vm_object_internal.h>
59 
60 
61 #define VM_SANITIZE_PROT_ALLOWED (VM_PROT_ALL | VM_PROT_ALLEXEC)
62 
63 // TODO: enable telemetry and ktriage separately?
64 
65 /* Also send telemetry output to kernel serial console? */
66 static TUNABLE(bool, vm_sanitize_telemeter_to_serial,
67     "vm_sanitize_telemeter_to_serial", false);
68 
69 /*
70  * Arithmetic macros that suppress UBSan. os_xyz_overflow does not generate a
71  * UBSan overflow check, since it indicates to the compiler that overflow is
72  * (potentially) intentional and well-defined.
73  *
74  * These macros ignore the value that indicates whether overflow actually,
75  * occurred, so a comment should be left explaining why it is unlikely to
76  * happen or is otherwise not a concern.
77  */
78 #define vm_add_no_ubsan(a, b) ({ typeof(a+b) TMP; (void) os_add_overflow(a, b, &TMP); TMP; })
79 #define vm_sub_no_ubsan(a, b) ({ typeof(a+b) TMP; (void) os_sub_overflow(a, b, &TMP); TMP; })
80 
81 static inline
82 kern_return_t
vm_sanitize_apply_err_rewrite_policy(kern_return_t initial_kr,vm_sanitize_compat_rewrite_t rewrite)83 vm_sanitize_apply_err_rewrite_policy(kern_return_t initial_kr, vm_sanitize_compat_rewrite_t rewrite)
84 {
85 	return rewrite.should_rewrite ? rewrite.compat_kr : initial_kr;
86 }
87 
88 __attribute__((always_inline, warn_unused_result))
89 vm_addr_struct_t
vm_sanitize_wrap_addr(vm_address_t val)90 vm_sanitize_wrap_addr(vm_address_t val)
91 {
92 	return (vm_addr_struct_t) { .UNSAFE = val };
93 }
94 
95 __attribute__((always_inline, warn_unused_result))
96 vm_size_struct_t
vm_sanitize_wrap_size(vm_size_t val)97 vm_sanitize_wrap_size(vm_size_t val)
98 {
99 	return (vm_size_struct_t) { .UNSAFE = val };
100 }
101 
102 __attribute__((always_inline, warn_unused_result))
103 vm32_size_struct_t
vm32_sanitize_wrap_size(vm32_size_t val)104 vm32_sanitize_wrap_size(vm32_size_t val)
105 {
106 	return (vm32_size_struct_t) { .UNSAFE = val };
107 }
108 
109 __attribute__((always_inline, warn_unused_result))
110 vm_prot_ut
vm_sanitize_wrap_prot(vm_prot_t val)111 vm_sanitize_wrap_prot(vm_prot_t val)
112 {
113 	return (vm_prot_ut) { .UNSAFE = val };
114 }
115 
116 __attribute__((always_inline, warn_unused_result))
117 vm_inherit_ut
vm_sanitize_wrap_inherit(vm_inherit_t val)118 vm_sanitize_wrap_inherit(vm_inherit_t val)
119 {
120 	return (vm_inherit_ut) { .UNSAFE = val };
121 }
122 
123 __attribute__((always_inline, warn_unused_result))
124 vm_behavior_ut
vm_sanitize_wrap_behavior(vm_behavior_t val)125 vm_sanitize_wrap_behavior(vm_behavior_t val)
126 {
127 	return (vm_behavior_ut) { .UNSAFE = val };
128 }
129 
130 #ifdef  MACH_KERNEL_PRIVATE
131 __attribute__((always_inline, warn_unused_result))
132 vm_addr_struct_t
vm_sanitize_expand_addr_to_64(vm32_address_ut val)133 vm_sanitize_expand_addr_to_64(vm32_address_ut val)
134 {
135 	return (vm_addr_struct_t) { .UNSAFE = val.UNSAFE };
136 }
137 
138 __attribute__((always_inline, warn_unused_result))
139 vm_size_struct_t
vm_sanitize_expand_size_to_64(vm32_size_ut val)140 vm_sanitize_expand_size_to_64(vm32_size_ut val)
141 {
142 	return (vm_size_struct_t) { .UNSAFE = val.UNSAFE };
143 }
144 
145 __attribute__((always_inline, warn_unused_result))
146 vm32_address_ut
vm_sanitize_trunc_addr_to_32(vm_addr_struct_t val)147 vm_sanitize_trunc_addr_to_32(vm_addr_struct_t val)
148 {
149 	vm32_address_ut ret;
150 
151 	ret.UNSAFE = CAST_DOWN_EXPLICIT(vm32_address_t, val.UNSAFE);
152 	return ret;
153 }
154 
155 __attribute__((always_inline, warn_unused_result))
156 vm32_size_ut
vm_sanitize_trunc_size_to_32(vm_size_struct_t val)157 vm_sanitize_trunc_size_to_32(vm_size_struct_t val)
158 {
159 	vm32_size_ut ret;
160 
161 	ret.UNSAFE = CAST_DOWN_EXPLICIT(vm32_size_t, val.UNSAFE);
162 	return ret;
163 }
164 
165 __attribute__((always_inline, warn_unused_result, overloadable))
166 bool
vm_sanitize_add_overflow(vm32_address_ut addr_u,vm32_size_ut size_u,vm32_address_ut * addr_out_u)167 vm_sanitize_add_overflow(
168 	vm32_address_ut         addr_u,
169 	vm32_size_ut            size_u,
170 	vm32_address_ut        *addr_out_u)
171 {
172 	vm32_address_t addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
173 	vm32_size_t    size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
174 
175 	return os_add_overflow(addr, size, &addr_out_u->UNSAFE);
176 }
177 #endif  /* MACH_KERNEL_PRIVATE */
178 
179 __attribute__((always_inline, warn_unused_result, overloadable))
180 bool
vm_sanitize_add_overflow(vm_addr_struct_t addr_u,vm_size_struct_t size_u,vm_addr_struct_t * addr_out_u)181 vm_sanitize_add_overflow(
182 	vm_addr_struct_t        addr_u,
183 	vm_size_struct_t        size_u,
184 	vm_addr_struct_t       *addr_out_u)
185 {
186 	mach_vm_address_t addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
187 	mach_vm_size_t    size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
188 
189 	return os_add_overflow(addr, size, &addr_out_u->UNSAFE);
190 }
191 
192 __attribute__((always_inline, warn_unused_result, overloadable))
193 bool
vm_sanitize_add_overflow(vm_size_struct_t size1_u,vm_size_struct_t size2_u,vm_size_struct_t * size_out_u)194 vm_sanitize_add_overflow(
195 	vm_size_struct_t        size1_u,
196 	vm_size_struct_t        size2_u,
197 	vm_size_struct_t       *size_out_u)
198 {
199 	mach_vm_address_t size1 = VM_SANITIZE_UNSAFE_UNWRAP(size1_u);
200 	mach_vm_size_t    size2 = VM_SANITIZE_UNSAFE_UNWRAP(size2_u);
201 
202 	return os_add_overflow(size1, size2, &size_out_u->UNSAFE);
203 }
204 
205 /*
206  * vm_*_no_ubsan is acceptable in these functions since they operate on unsafe
207  * types. The return value is also an unsafe type and must be sanitized before
208  * it can be used in other functions.
209  */
210 __attribute__((always_inline, warn_unused_result))
211 vm_addr_struct_t
vm_sanitize_compute_ut_end(vm_addr_struct_t addr_u,vm_size_struct_t size_u)212 vm_sanitize_compute_ut_end(
213 	vm_addr_struct_t        addr_u,
214 	vm_size_struct_t        size_u)
215 {
216 	vm_addr_struct_t end_u = { 0 };
217 	vm_address_t addr_local = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
218 	vm_size_t size_local = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
219 
220 	VM_SANITIZE_UT_SET(end_u, vm_add_no_ubsan(addr_local, size_local));
221 	return end_u;
222 }
223 
224 __attribute__((always_inline, warn_unused_result))
225 vm_size_struct_t
vm_sanitize_compute_ut_size(vm_addr_struct_t addr_u,vm_addr_struct_t end_u)226 vm_sanitize_compute_ut_size(
227 	vm_addr_struct_t        addr_u,
228 	vm_addr_struct_t        end_u)
229 {
230 	vm_size_struct_t size_u = { 0 };
231 	vm_address_t addr_local = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
232 	vm_address_t end_local = VM_SANITIZE_UNSAFE_UNWRAP(end_u);
233 
234 	VM_SANITIZE_UT_SET(size_u, vm_sub_no_ubsan(end_local, addr_local));
235 	return size_u;
236 }
237 
238 __attribute__((always_inline, warn_unused_result))
239 mach_vm_address_t
vm_sanitize_addr(vm_map_t map,vm_addr_struct_t addr_u)240 vm_sanitize_addr(
241 	vm_map_t                map,
242 	vm_addr_struct_t        addr_u)
243 {
244 	mach_vm_address_t addr   = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
245 	vm_map_offset_t   pgmask = vm_map_page_mask(map);
246 
247 	return vm_map_trunc_page_mask(addr, pgmask);
248 }
249 
250 __attribute__((always_inline, warn_unused_result))
251 mach_vm_offset_t
vm_sanitize_offset_in_page(vm_map_offset_t mask,vm_addr_struct_t addr_u)252 vm_sanitize_offset_in_page(
253 	vm_map_offset_t         mask,
254 	vm_addr_struct_t        addr_u)
255 {
256 	return VM_SANITIZE_UNSAFE_UNWRAP(addr_u) & mask;
257 }
258 
259 __attribute__((always_inline, warn_unused_result))
260 kern_return_t
vm_sanitize_offset(vm_addr_struct_t offset_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_address_t addr,vm_map_address_t end,vm_map_offset_t * offset)261 vm_sanitize_offset(
262 	vm_addr_struct_t        offset_u,
263 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
264 	vm_map_address_t        addr,
265 	vm_map_address_t        end,
266 	vm_map_offset_t        *offset)
267 {
268 	*offset = VM_SANITIZE_UNSAFE_UNWRAP(offset_u);
269 
270 	if ((*offset < addr) || (*offset > end)) {
271 		*offset = 0;
272 		return KERN_INVALID_ARGUMENT;
273 	}
274 
275 	return KERN_SUCCESS;
276 }
277 
278 __attribute__((always_inline, warn_unused_result))
279 kern_return_t
vm_sanitize_mask(vm_addr_struct_t mask_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_offset_t * mask)280 vm_sanitize_mask(
281 	vm_addr_struct_t        mask_u,
282 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
283 	vm_map_offset_t        *mask)
284 {
285 	*mask = VM_SANITIZE_UNSAFE_UNWRAP(mask_u);
286 
287 	/*
288 	 * Adding validation to mask has high ABI risk and low security value.
289 	 * The only internal function that deals with mask is vm_map_locate_space
290 	 * and it currently ensures that addresses are aligned to page boundary
291 	 * even for weird alignment requests.
292 	 *
293 	 * rdar://120445665
294 	 */
295 
296 	return KERN_SUCCESS;
297 }
298 
299 __attribute__((always_inline, warn_unused_result))
300 kern_return_t
vm_sanitize_object_size(vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_sanitize_flags_t flags,vm_object_offset_t * size)301 vm_sanitize_object_size(
302 	vm_size_struct_t        size_u,
303 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
304 	vm_sanitize_flags_t     flags,
305 	vm_object_offset_t     *size)
306 {
307 	mach_vm_size_t  size_aligned;
308 
309 	*size   = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
310 	/*
311 	 * Handle size zero as requested by the caller
312 	 */
313 	if (*size == 0) {
314 		if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
315 			return VM_ERR_RETURN_NOW;
316 		} else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
317 			return KERN_INVALID_ARGUMENT;
318 		} else {
319 			/* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
320 			return KERN_SUCCESS;
321 		}
322 	}
323 
324 	size_aligned = vm_map_round_page_mask(*size, PAGE_MASK);
325 	if (size_aligned == 0) {
326 		*size = 0;
327 		return KERN_INVALID_ARGUMENT;
328 	}
329 
330 	if (!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES)) {
331 		*size = size_aligned;
332 	}
333 	return KERN_SUCCESS;
334 }
335 
336 __attribute__((always_inline, warn_unused_result))
337 kern_return_t
vm_sanitize_size(vm_addr_struct_t offset_u,vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_t map,vm_sanitize_flags_t flags,mach_vm_size_t * size)338 vm_sanitize_size(
339 	vm_addr_struct_t        offset_u,
340 	vm_size_struct_t        size_u,
341 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
342 	vm_map_t                map,
343 	vm_sanitize_flags_t     flags,
344 	mach_vm_size_t         *size)
345 {
346 	mach_vm_size_t  offset = VM_SANITIZE_UNSAFE_UNWRAP(offset_u);
347 	vm_map_offset_t pgmask = vm_map_page_mask(map);
348 	mach_vm_size_t  size_aligned;
349 
350 	*size   = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
351 	/*
352 	 * Handle size zero as requested by the caller
353 	 */
354 	if (*size == 0) {
355 		if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
356 			return VM_ERR_RETURN_NOW;
357 		} else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
358 			return KERN_INVALID_ARGUMENT;
359 		} else {
360 			/* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
361 			return KERN_SUCCESS;
362 		}
363 	}
364 
365 	/*
366 	 * Ensure that offset and size don't overflow when refering to the
367 	 * vm_object
368 	 */
369 	if (os_add_overflow(*size, offset, &size_aligned)) {
370 		*size = 0;
371 		return KERN_INVALID_ARGUMENT;
372 	}
373 	/*
374 	 * This rounding is a check on the vm_object and thus uses the kernel's PAGE_MASK
375 	 */
376 	if (vm_map_round_page_mask(size_aligned, PAGE_MASK) == 0) {
377 		*size = 0;
378 		return KERN_INVALID_ARGUMENT;
379 	}
380 
381 	/*
382 	 * Check that a non zero size being mapped doesn't round to 0
383 	 *
384 	 * vm_sub_no_ubsan is acceptable here since the subtraction is guaranteed to
385 	 * not overflow. We know size_aligned = *size + offset, and since that
386 	 * addition did not overflow and offset >= offset & ~pgmask, this
387 	 * subtraction also cannot overflow.
388 	 */
389 	size_aligned = vm_sub_no_ubsan(size_aligned, offset & ~pgmask);
390 
391 	/*
392 	 * This rounding is a check on the specified map and thus uses its pgmask
393 	 */
394 	size_aligned  = vm_map_round_page_mask(size_aligned, pgmask);
395 	if (size_aligned == 0) {
396 		*size = 0;
397 		return KERN_INVALID_ARGUMENT;
398 	}
399 
400 	if (!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES)) {
401 		*size = size_aligned;
402 	}
403 	return KERN_SUCCESS;
404 }
405 
406 static __attribute__((warn_unused_result))
407 kern_return_t
vm_sanitize_err_compat_addr_size(kern_return_t initial_kr,vm_sanitize_caller_t vm_sanitize_caller,vm_addr_struct_t addr_u,vm_size_struct_t size_u,mach_vm_offset_t pgmask,vm_map_t map_or_null)408 vm_sanitize_err_compat_addr_size(
409 	kern_return_t           initial_kr,
410 	vm_sanitize_caller_t    vm_sanitize_caller,
411 	vm_addr_struct_t        addr_u,
412 	vm_size_struct_t        size_u,
413 	mach_vm_offset_t        pgmask,
414 	vm_map_t                map_or_null)
415 {
416 	vm_sanitize_compat_rewrite_t compat = {initial_kr, false, false};
417 	if (vm_sanitize_caller->err_compat_addr_size) {
418 		compat = (vm_sanitize_caller->err_compat_addr_size)
419 		    (initial_kr, VM_SANITIZE_UNSAFE_UNWRAP(addr_u), VM_SANITIZE_UNSAFE_UNWRAP(size_u),
420 		    pgmask, map_or_null);
421 	}
422 
423 	if (compat.should_telemeter) {
424 #if DEVELOPMENT || DEBUG
425 		if (vm_sanitize_telemeter_to_serial) {
426 			printf("VM API - [%s] unsanitary addr 0x%llx size 0x%llx pgmask "
427 			    "0x%llx passed to %s; error code %d may become %d\n",
428 			    proc_best_name(current_proc()),
429 			    VM_SANITIZE_UNSAFE_UNWRAP(addr_u), VM_SANITIZE_UNSAFE_UNWRAP(size_u), pgmask,
430 			    vm_sanitize_caller->vmsc_caller_name, initial_kr, compat.compat_kr);
431 		}
432 #endif /* DEVELOPMENT || DEBUG */
433 
434 		vm_sanitize_send_telemetry(
435 			vm_sanitize_caller->vmsc_telemetry_id,
436 			VM_SANITIZE_CHECKER_ADDR_SIZE,
437 			VM_SANITIZE_CHECKER_COUNT_1 /* fixme */,
438 			vm_sanitize_caller->vmsc_ktriage_id,
439 			VM_SANITIZE_UNSAFE_UNWRAP(addr_u),
440 			VM_SANITIZE_UNSAFE_UNWRAP(size_u),
441 			pgmask,
442 			0 /* arg4 */,
443 			initial_kr,
444 			compat.compat_kr);
445 	}
446 
447 	return vm_sanitize_apply_err_rewrite_policy(initial_kr, compat);
448 }
449 
450 __attribute__((always_inline, warn_unused_result))
451 kern_return_t
vm_sanitize_addr_size(vm_addr_struct_t addr_u,vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller,mach_vm_offset_t pgmask,vm_map_t map_or_null,vm_sanitize_flags_t flags,vm_map_offset_t * addr,vm_map_offset_t * end,vm_map_size_t * size)452 vm_sanitize_addr_size(
453 	vm_addr_struct_t        addr_u,
454 	vm_size_struct_t        size_u,
455 	vm_sanitize_caller_t    vm_sanitize_caller,
456 	mach_vm_offset_t        pgmask,
457 	vm_map_t                map_or_null,
458 	vm_sanitize_flags_t     flags,
459 	vm_map_offset_t        *addr,
460 	vm_map_offset_t        *end,
461 	vm_map_size_t          *size)
462 {
463 	/*
464 	 * map_or_null is not available from all call sites.
465 	 * Use pgmask instead of vm_map_page_mask(map) for alignment.
466 	 */
467 
468 	vm_map_offset_t addr_aligned = 0;
469 	vm_map_offset_t end_aligned = 0, end_unaligned = 0;
470 	kern_return_t kr;
471 
472 	*addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
473 	*size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
474 	if (flags & VM_SANITIZE_FLAGS_REALIGN_START) {
475 		assert(!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES));
476 	}
477 
478 #if KASAN_TBI
479 	if (flags & VM_SANITIZE_FLAGS_CANONICALIZE) {
480 		*addr = vm_memtag_canonicalize_kernel(*addr);
481 	}
482 #endif /* KASAN_TBI */
483 
484 
485 	addr_aligned = vm_map_trunc_page_mask(*addr, pgmask);
486 
487 	/*
488 	 * Ensure that the address is aligned
489 	 */
490 	if (__improbable((flags & VM_SANITIZE_FLAGS_CHECK_ALIGNED_START) && (*addr & pgmask))) {
491 		kr = KERN_INVALID_ARGUMENT;
492 		goto unsanitary;
493 	}
494 
495 	/*
496 	 * Ensure that the size is aligned
497 	 */
498 	if (__improbable((flags & VM_SANITIZE_FLAGS_CHECK_ALIGNED_SIZE) && (*size & pgmask))) {
499 		kr = KERN_INVALID_ARGUMENT;
500 		goto unsanitary;
501 	}
502 
503 	/*
504 	 * Handle size zero as requested by the caller
505 	 */
506 	if (*size == 0) {
507 		/*
508 		 * NOTE: these early returns bypass the VM_SANITIZE_FLAGS_CHECK_ADDR_RANGE
509 		 * check. Since the size is 0, the range [start, end) is empty and thus
510 		 * no values within this range can overflow the upper bits.
511 		 */
512 		if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
513 			*addr = 0;
514 			*end = 0;
515 			/* size is already 0 */
516 			return VM_ERR_RETURN_NOW;
517 		} else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
518 			kr = KERN_INVALID_ARGUMENT;
519 			goto unsanitary;
520 		} else {
521 			/* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
522 			if (flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES) {
523 				/* addr is already set */
524 				*end = *addr;
525 				/* size is already 0 */
526 				return KERN_SUCCESS;
527 			} else {
528 				*addr = addr_aligned;
529 				*end = addr_aligned;
530 				/* size is already 0 */
531 				return KERN_SUCCESS;
532 			}
533 		}
534 	}
535 
536 	/*
537 	 * Compute the aligned end now
538 	 */
539 	if (flags & VM_SANITIZE_FLAGS_REALIGN_START) {
540 		*addr = addr_aligned;
541 	}
542 	if (__improbable(os_add_overflow(*addr, *size, &end_unaligned))) {
543 		kr = KERN_INVALID_ARGUMENT;
544 		goto unsanitary;
545 	}
546 	end_aligned = vm_map_round_page_mask(end_unaligned, pgmask);
547 	if (__improbable(end_aligned <= addr_aligned)) {
548 		kr = KERN_INVALID_ARGUMENT;
549 		goto unsanitary;
550 	}
551 
552 	if (flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES) {
553 		/* addr and size are already set */
554 		*end = end_unaligned;
555 	} else {
556 		*addr = addr_aligned;
557 		*end = end_aligned;
558 		/*
559 		 * vm_sub_no_ubsan is acceptable since the subtraction is guaranteed to
560 		 * not overflow, as we have already verified end_aligned > addr_aligned.
561 		 */
562 		*size = vm_sub_no_ubsan(end_aligned, addr_aligned);
563 	}
564 
565 	if (flags & VM_SANITIZE_FLAGS_CHECK_ADDR_RANGE) {
566 #if defined(__arm64__) && MACH_ASSERT
567 		/*
568 		 * Make sure that this fails noisily if someone adds support for large
569 		 * VA extensions. With such extensions, this code will have to check
570 		 * ID_AA64MMFR2_EL1 to get the actual max VA size for the system,
571 		 * instead of assuming it is 48 bits.
572 		 */
573 		assert((__builtin_arm_rsr64("ID_AA64MMFR2_EL1") & ID_AA64MMFR2_EL1_VARANGE_MASK) == 0);
574 #endif /* defined(__arm64__) && MACH_ASSERT */
575 		const uint64_t max_va_bits = 48;
576 		const mach_vm_offset_t va_range_upper_bound = (1ULL << max_va_bits);
577 		const mach_vm_offset_t va_mask = va_range_upper_bound - 1;
578 
579 		if ((*addr & ~va_mask) != (*end & ~va_mask)) {
580 			if (*end == va_range_upper_bound) {
581 				/*
582 				 * Since the range is exclusive of `end`, the range [start, end)
583 				 * does not include any invalid values in this case. Therefore,
584 				 * we treat this as a success and fall through.
585 				 */
586 			} else {
587 				/*
588 				 * This means iterating within the range [start, end) may
589 				 * overflow above the VA bits supported by the system. Since
590 				 * these bits may be used by the kernel or hardware to store
591 				 * other values, we should not allow the operation to proceed.
592 				 */
593 				kr = KERN_INVALID_ADDRESS;
594 				goto unsanitary;
595 			}
596 		}
597 	}
598 
599 	return KERN_SUCCESS;
600 
601 unsanitary:
602 	*addr = 0;
603 	*end = 0;
604 	*size = 0;
605 	return vm_sanitize_err_compat_addr_size(kr, vm_sanitize_caller,
606 	           addr_u, size_u, pgmask, map_or_null);
607 }
608 
609 __attribute__((always_inline, warn_unused_result))
610 kern_return_t
vm_sanitize_addr_end(vm_addr_struct_t addr_u,vm_addr_struct_t end_u,vm_sanitize_caller_t vm_sanitize_caller,mach_vm_offset_t mask,vm_map_t map_or_null,vm_sanitize_flags_t flags,vm_map_offset_t * start,vm_map_offset_t * end,vm_map_size_t * size)611 vm_sanitize_addr_end(
612 	vm_addr_struct_t        addr_u,
613 	vm_addr_struct_t        end_u,
614 	vm_sanitize_caller_t    vm_sanitize_caller,
615 	mach_vm_offset_t        mask,
616 	vm_map_t                map_or_null,
617 	vm_sanitize_flags_t     flags,
618 	vm_map_offset_t        *start,
619 	vm_map_offset_t        *end,
620 	vm_map_size_t          *size)
621 {
622 	vm_size_struct_t size_u = vm_sanitize_compute_ut_size(addr_u, end_u);
623 
624 	return vm_sanitize_addr_size(addr_u, size_u, vm_sanitize_caller, mask,
625 	           map_or_null, flags, start, end, size);
626 }
627 
628 __attribute__((always_inline, warn_unused_result))
629 kern_return_t
vm_sanitize_prot(vm_prot_ut prot_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_t map __unused,vm_prot_t extra_mask,vm_prot_t * prot)630 vm_sanitize_prot(
631 	vm_prot_ut              prot_u,
632 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
633 	vm_map_t                map __unused,
634 	vm_prot_t               extra_mask,
635 	vm_prot_t              *prot)
636 {
637 	*prot = VM_SANITIZE_UNSAFE_UNWRAP(prot_u);
638 
639 	if (__improbable(*prot & ~(VM_SANITIZE_PROT_ALLOWED | extra_mask))) {
640 		*prot = VM_PROT_NONE;
641 		return KERN_INVALID_ARGUMENT;
642 	}
643 
644 #if defined(__x86_64__)
645 	if ((*prot & VM_PROT_UEXEC) &&
646 	    !pmap_supported_feature(map->pmap, PMAP_FEAT_UEXEC)) {
647 		*prot = VM_PROT_NONE;
648 		return KERN_INVALID_ARGUMENT;
649 	}
650 #endif
651 
652 	return KERN_SUCCESS;
653 }
654 
655 /*
656  * *out_cur and *out_max are modified when there is an err compat rewrite
657  * otherwise they are left unchanged
658  */
659 static __attribute__((warn_unused_result))
660 kern_return_t
vm_sanitize_err_compat_cur_and_max_prots(kern_return_t initial_kr,vm_sanitize_caller_t vm_sanitize_caller,vm_prot_ut cur_prot_u,vm_prot_ut max_prot_u,vm_prot_t extra_mask,vm_prot_t * out_cur,vm_prot_t * out_max)661 vm_sanitize_err_compat_cur_and_max_prots(
662 	kern_return_t           initial_kr,
663 	vm_sanitize_caller_t    vm_sanitize_caller,
664 	vm_prot_ut              cur_prot_u,
665 	vm_prot_ut              max_prot_u,
666 	vm_prot_t               extra_mask,
667 	vm_prot_t              *out_cur,
668 	vm_prot_t              *out_max)
669 {
670 	vm_prot_t initial_cur_prot = VM_SANITIZE_UNSAFE_UNWRAP(cur_prot_u);
671 	vm_prot_t initial_max_prot = VM_SANITIZE_UNSAFE_UNWRAP(max_prot_u);
672 
673 	vm_sanitize_compat_rewrite_t compat = {initial_kr, false, false};
674 	vm_prot_t compat_cur_prot = initial_cur_prot;
675 	vm_prot_t compat_max_prot = initial_max_prot;
676 	if (vm_sanitize_caller->err_compat_prot_cur_max) {
677 		compat = (vm_sanitize_caller->err_compat_prot_cur_max)
678 		    (initial_kr, &compat_cur_prot, &compat_max_prot, extra_mask);
679 	}
680 
681 	if (compat.should_telemeter) {
682 #if DEVELOPMENT || DEBUG
683 		if (vm_sanitize_telemeter_to_serial) {
684 			printf("VM API - [%s] unsanitary vm_prot cur %d max %d "
685 			    "passed to %s; error code %d may become %d\n",
686 			    proc_best_name(current_proc()),
687 			    initial_cur_prot, initial_max_prot,
688 			    vm_sanitize_caller->vmsc_caller_name,
689 			    initial_kr, compat.compat_kr);
690 		}
691 #endif /* DEVELOPMENT || DEBUG */
692 
693 		vm_sanitize_send_telemetry(
694 			vm_sanitize_caller->vmsc_telemetry_id,
695 			VM_SANITIZE_CHECKER_PROT_CUR_MAX,
696 			VM_SANITIZE_CHECKER_COUNT_1 /* fixme */,
697 			vm_sanitize_caller->vmsc_ktriage_id,
698 			initial_cur_prot,
699 			initial_max_prot,
700 			extra_mask,
701 			0 /* arg4 */,
702 			initial_kr,
703 			compat.compat_kr);
704 	}
705 
706 	if (compat.should_rewrite) {
707 		*out_cur = compat_cur_prot;
708 		*out_max = compat_max_prot;
709 		return compat.compat_kr;
710 	} else {
711 		/* out_cur and out_max unchanged */
712 		return initial_kr;
713 	}
714 }
715 
716 __attribute__((always_inline, warn_unused_result))
717 kern_return_t
vm_sanitize_cur_and_max_prots(vm_prot_ut cur_prot_u,vm_prot_ut max_prot_u,vm_sanitize_caller_t vm_sanitize_caller,vm_map_t map,vm_prot_t extra_mask,vm_prot_t * cur_prot,vm_prot_t * max_prot)718 vm_sanitize_cur_and_max_prots(
719 	vm_prot_ut              cur_prot_u,
720 	vm_prot_ut              max_prot_u,
721 	vm_sanitize_caller_t    vm_sanitize_caller,
722 	vm_map_t                map,
723 	vm_prot_t               extra_mask,
724 	vm_prot_t              *cur_prot,
725 	vm_prot_t              *max_prot)
726 {
727 	kern_return_t kr;
728 
729 	kr = vm_sanitize_prot(cur_prot_u, vm_sanitize_caller, map, extra_mask, cur_prot);
730 	if (__improbable(kr != KERN_SUCCESS)) {
731 		*cur_prot = VM_PROT_NONE;
732 		*max_prot = VM_PROT_NONE;
733 		return kr;
734 	}
735 
736 	kr = vm_sanitize_prot(max_prot_u, vm_sanitize_caller, map, extra_mask, max_prot);
737 	if (__improbable(kr != KERN_SUCCESS)) {
738 		*cur_prot = VM_PROT_NONE;
739 		*max_prot = VM_PROT_NONE;
740 		return kr;
741 	}
742 
743 
744 	/*
745 	 * This check needs to be performed on the actual protection bits.
746 	 * vm_sanitize_prot restricts cur and max prot to
747 	 * (VM_PROT_ALL | VM_PROT_ALLEXEC | extra_mask), but we don't enforce
748 	 * ordering on the extra_mask bits.
749 	 */
750 	if (__improbable((*cur_prot & *max_prot & VM_SANITIZE_PROT_ALLOWED) !=
751 	    (*cur_prot & VM_SANITIZE_PROT_ALLOWED))) {
752 		/* cur is more permissive than max */
753 		kr = KERN_INVALID_ARGUMENT;
754 		goto unsanitary;
755 	}
756 	return KERN_SUCCESS;
757 
758 unsanitary:
759 	*cur_prot = VM_PROT_NONE;
760 	*max_prot = VM_PROT_NONE;
761 	/* error compat may set cur/max to something other than 0/0 */
762 	return vm_sanitize_err_compat_cur_and_max_prots(kr, vm_sanitize_caller,
763 	           cur_prot_u, max_prot_u, extra_mask, cur_prot, max_prot);
764 }
765 
766 __attribute__((always_inline, warn_unused_result))
767 vm_prot_t
vm_sanitize_prot_bsd(vm_prot_ut prot_u,vm_sanitize_caller_t vm_sanitize_caller __unused)768 vm_sanitize_prot_bsd(
769 	vm_prot_ut              prot_u,
770 	vm_sanitize_caller_t    vm_sanitize_caller __unused)
771 {
772 	vm_prot_t prot = VM_SANITIZE_UNSAFE_UNWRAP(prot_u);
773 
774 	/*
775 	 * Strip all protections that are not allowed
776 	 */
777 	prot &= (VM_PROT_ALL | VM_PROT_TRUSTED | VM_PROT_STRIP_READ);
778 	return prot;
779 }
780 
781 __attribute__((always_inline, warn_unused_result))
782 kern_return_t
vm_sanitize_memory_entry_perm(vm_prot_ut perm_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_sanitize_flags_t flags,vm_prot_t extra_mask,vm_prot_t * perm)783 vm_sanitize_memory_entry_perm(
784 	vm_prot_ut              perm_u,
785 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
786 	vm_sanitize_flags_t     flags,
787 	vm_prot_t               extra_mask,
788 	vm_prot_t              *perm)
789 {
790 	vm_prot_t prot;
791 	vm_prot_t map_mem_flags;
792 	vm_prot_t access;
793 
794 	*perm = VM_SANITIZE_UNSAFE_UNWRAP(perm_u);
795 	prot = *perm & MAP_MEM_PROT_MASK;
796 	map_mem_flags = *perm & MAP_MEM_FLAGS_MASK;
797 	access = GET_MAP_MEM(*perm);
798 
799 	if ((flags & VM_SANITIZE_FLAGS_CHECK_USER_MEM_MAP_FLAGS) &&
800 	    (map_mem_flags & ~MAP_MEM_FLAGS_USER)) {
801 		/*
802 		 * Unknown flag: reject for forward compatibility.
803 		 */
804 		*perm = VM_PROT_NONE;
805 		return KERN_INVALID_VALUE;
806 	}
807 
808 	/*
809 	 * Clear prot bits in perm and set them to only allowed values
810 	 */
811 	*perm &= ~MAP_MEM_PROT_MASK;
812 	*perm |= (prot & (VM_PROT_ALL | extra_mask));
813 
814 	/*
815 	 * No checks on access
816 	 */
817 	(void) access;
818 
819 	return KERN_SUCCESS;
820 }
821 
822 __attribute__((always_inline, warn_unused_result))
823 kern_return_t
vm_sanitize_inherit(vm_inherit_ut inherit_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_inherit_t * inherit)824 vm_sanitize_inherit(
825 	vm_inherit_ut           inherit_u,
826 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
827 	vm_inherit_t           *inherit)
828 {
829 	*inherit = VM_SANITIZE_UNSAFE_UNWRAP(inherit_u);
830 
831 	if (__improbable(*inherit > VM_INHERIT_LAST_VALID)) {
832 		*inherit = VM_INHERIT_NONE;
833 		return KERN_INVALID_ARGUMENT;
834 	}
835 
836 	return KERN_SUCCESS;
837 }
838 
839 __attribute__((always_inline, warn_unused_result))
840 kern_return_t
vm_sanitize_behavior(vm_behavior_ut behavior_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_behavior_t * behavior)841 vm_sanitize_behavior(
842 	vm_behavior_ut           behavior_u,
843 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
844 	vm_behavior_t           *behavior)
845 {
846 	*behavior = VM_SANITIZE_UNSAFE_UNWRAP(behavior_u);
847 
848 	if (__improbable((*behavior > VM_BEHAVIOR_LAST_VALID)
849 	    || (*behavior < 0))) {
850 		*behavior = VM_BEHAVIOR_DEFAULT;
851 		return KERN_INVALID_ARGUMENT;
852 	}
853 
854 	return KERN_SUCCESS;
855 }
856 
857 
858 #if PAGE_SHIFT == 0
859 #pragma clang attribute pop
860 #endif
861 
862 #if !__OPTIMIZE__
863 #pragma clang attribute pop
864 #endif
865