xref: /xnu-11417.121.6/osfmk/vm/vm_sanitize.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /* avoid includes here; we want these pragmas to also affect included inline functions */
30 #include <mach/machine/vm_param.h> /* to get PAGE_SHIFT without the inline functions from mach/vm_param.h */
31 /*
32  * On 4k-hardware-page arm64 systems, the PAGE_SHIFT macro does not resolve to
33  * a constant, but instead a variable whose value is determined on boot depending
34  * on the amount of RAM installed.
35  *
36  * In these cases, actual instructions need to be emitted to compute values like
37  * PAGE_SIZE = (1 << PAGE_SHIFT), which means UBSan checks will be generated
38  * as well since the values cannot be computed at compile time.
39  *
40  * Therefore, we disable arithmetic UBSan checks on these configurations. We
41  * detect them with PAGE_SHIFT == 0, since (during the preprocessing phase)
42  * symbols will resolve to 0, whereas PAGE_SHIFT will resolve to its actual
43  * nonzero value if it is defined as a macro.
44  */
45 #if PAGE_SHIFT == 0
46 #pragma clang attribute push (__attribute__((no_sanitize("signed-integer-overflow", \
47         "unsigned-integer-overflow", "shift", "unsigned-shift-base"))), apply_to=function)
48 #endif
49 
50 /* Disabling optimizations makes it impossible to optimize out UBSan checks */
51 #if !__OPTIMIZE__
52 #pragma clang attribute push (__attribute__((no_sanitize("undefined", \
53         "integer", "unsigned-shift-base", "nullability", "bounds"))), apply_to=function)
54 #endif
55 
56 #include <vm/vm_map_xnu.h>
57 #include <vm/vm_sanitize_internal.h>
58 #include <vm/vm_object_internal.h>
59 
60 #define VM_SANITIZE_PROT_ALLOWED (VM_PROT_ALL | VM_PROT_ALLEXEC)
61 
62 // TODO: enable telemetry and ktriage separately?
63 
64 /* Also send telemetry output to kernel serial console? */
65 static TUNABLE(bool, vm_sanitize_telemeter_to_serial,
66     "vm_sanitize_telemeter_to_serial", false);
67 
68 /*
69  * Arithmetic macros that suppress UBSan. os_xyz_overflow does not generate a
70  * UBSan overflow check, since it indicates to the compiler that overflow is
71  * (potentially) intentional and well-defined.
72  *
73  * These macros ignore the value that indicates whether overflow actually,
74  * occurred, so a comment should be left explaining why it is unlikely to
75  * happen or is otherwise not a concern.
76  */
77 #define vm_add_no_ubsan(a, b) ({ typeof(a+b) TMP; (void) os_add_overflow(a, b, &TMP); TMP; })
78 #define vm_sub_no_ubsan(a, b) ({ typeof(a+b) TMP; (void) os_sub_overflow(a, b, &TMP); TMP; })
79 
80 static inline
81 kern_return_t
vm_sanitize_apply_err_rewrite_policy(kern_return_t initial_kr,vm_sanitize_compat_rewrite_t rewrite)82 vm_sanitize_apply_err_rewrite_policy(kern_return_t initial_kr, vm_sanitize_compat_rewrite_t rewrite)
83 {
84 	return rewrite.should_rewrite ? rewrite.compat_kr : initial_kr;
85 }
86 
87 __attribute__((always_inline, warn_unused_result))
88 vm_addr_struct_t
vm_sanitize_wrap_addr(vm_address_t val)89 vm_sanitize_wrap_addr(vm_address_t val)
90 {
91 	return (vm_addr_struct_t) { .UNSAFE = val };
92 }
93 
94 __attribute__((always_inline, warn_unused_result))
95 vm_size_struct_t
vm_sanitize_wrap_size(vm_size_t val)96 vm_sanitize_wrap_size(vm_size_t val)
97 {
98 	return (vm_size_struct_t) { .UNSAFE = val };
99 }
100 
101 __attribute__((always_inline, warn_unused_result))
102 vm32_size_struct_t
vm32_sanitize_wrap_size(vm32_size_t val)103 vm32_sanitize_wrap_size(vm32_size_t val)
104 {
105 	return (vm32_size_struct_t) { .UNSAFE = val };
106 }
107 
108 __attribute__((always_inline, warn_unused_result))
109 vm_prot_ut
vm_sanitize_wrap_prot(vm_prot_t val)110 vm_sanitize_wrap_prot(vm_prot_t val)
111 {
112 	return (vm_prot_ut) { .UNSAFE = val };
113 }
114 
115 __attribute__((always_inline, warn_unused_result))
116 vm_inherit_ut
vm_sanitize_wrap_inherit(vm_inherit_t val)117 vm_sanitize_wrap_inherit(vm_inherit_t val)
118 {
119 	return (vm_inherit_ut) { .UNSAFE = val };
120 }
121 
122 __attribute__((always_inline, warn_unused_result))
123 vm_behavior_ut
vm_sanitize_wrap_behavior(vm_behavior_t val)124 vm_sanitize_wrap_behavior(vm_behavior_t val)
125 {
126 	return (vm_behavior_ut) { .UNSAFE = val };
127 }
128 
129 #ifdef  MACH_KERNEL_PRIVATE
130 __attribute__((always_inline, warn_unused_result))
131 vm_addr_struct_t
vm_sanitize_expand_addr_to_64(vm32_address_ut val)132 vm_sanitize_expand_addr_to_64(vm32_address_ut val)
133 {
134 	return (vm_addr_struct_t) { .UNSAFE = val.UNSAFE };
135 }
136 
137 __attribute__((always_inline, warn_unused_result))
138 vm_size_struct_t
vm_sanitize_expand_size_to_64(vm32_size_ut val)139 vm_sanitize_expand_size_to_64(vm32_size_ut val)
140 {
141 	return (vm_size_struct_t) { .UNSAFE = val.UNSAFE };
142 }
143 
144 __attribute__((always_inline, warn_unused_result))
145 vm32_address_ut
vm_sanitize_trunc_addr_to_32(vm_addr_struct_t val)146 vm_sanitize_trunc_addr_to_32(vm_addr_struct_t val)
147 {
148 	vm32_address_ut ret;
149 
150 	ret.UNSAFE = CAST_DOWN_EXPLICIT(vm32_address_t, val.UNSAFE);
151 	return ret;
152 }
153 
154 __attribute__((always_inline, warn_unused_result))
155 vm32_size_ut
vm_sanitize_trunc_size_to_32(vm_size_struct_t val)156 vm_sanitize_trunc_size_to_32(vm_size_struct_t val)
157 {
158 	vm32_size_ut ret;
159 
160 	ret.UNSAFE = CAST_DOWN_EXPLICIT(vm32_size_t, val.UNSAFE);
161 	return ret;
162 }
163 
164 __attribute__((always_inline, warn_unused_result, overloadable))
165 bool
vm_sanitize_add_overflow(vm32_address_ut addr_u,vm32_size_ut size_u,vm32_address_ut * addr_out_u)166 vm_sanitize_add_overflow(
167 	vm32_address_ut         addr_u,
168 	vm32_size_ut            size_u,
169 	vm32_address_ut        *addr_out_u)
170 {
171 	vm32_address_t addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
172 	vm32_size_t    size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
173 
174 	return os_add_overflow(addr, size, &addr_out_u->UNSAFE);
175 }
176 #endif  /* MACH_KERNEL_PRIVATE */
177 
178 __attribute__((always_inline, warn_unused_result, overloadable))
179 bool
vm_sanitize_add_overflow(vm_addr_struct_t addr_u,vm_size_struct_t size_u,vm_addr_struct_t * addr_out_u)180 vm_sanitize_add_overflow(
181 	vm_addr_struct_t        addr_u,
182 	vm_size_struct_t        size_u,
183 	vm_addr_struct_t       *addr_out_u)
184 {
185 	mach_vm_address_t addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
186 	mach_vm_size_t    size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
187 
188 	return os_add_overflow(addr, size, &addr_out_u->UNSAFE);
189 }
190 
191 __attribute__((always_inline, warn_unused_result, overloadable))
192 bool
vm_sanitize_add_overflow(vm_size_struct_t size1_u,vm_size_struct_t size2_u,vm_size_struct_t * size_out_u)193 vm_sanitize_add_overflow(
194 	vm_size_struct_t        size1_u,
195 	vm_size_struct_t        size2_u,
196 	vm_size_struct_t       *size_out_u)
197 {
198 	mach_vm_address_t size1 = VM_SANITIZE_UNSAFE_UNWRAP(size1_u);
199 	mach_vm_size_t    size2 = VM_SANITIZE_UNSAFE_UNWRAP(size2_u);
200 
201 	return os_add_overflow(size1, size2, &size_out_u->UNSAFE);
202 }
203 
204 /*
205  * vm_*_no_ubsan is acceptable in these functions since they operate on unsafe
206  * types. The return value is also an unsafe type and must be sanitized before
207  * it can be used in other functions.
208  */
209 __attribute__((always_inline, warn_unused_result))
210 vm_addr_struct_t
vm_sanitize_compute_ut_end(vm_addr_struct_t addr_u,vm_size_struct_t size_u)211 vm_sanitize_compute_ut_end(
212 	vm_addr_struct_t        addr_u,
213 	vm_size_struct_t        size_u)
214 {
215 	vm_addr_struct_t end_u = { 0 };
216 	vm_address_t addr_local = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
217 	vm_size_t size_local = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
218 
219 	VM_SANITIZE_UT_SET(end_u, vm_add_no_ubsan(addr_local, size_local));
220 	return end_u;
221 }
222 
223 __attribute__((always_inline, warn_unused_result))
224 vm_size_struct_t
vm_sanitize_compute_ut_size(vm_addr_struct_t addr_u,vm_addr_struct_t end_u)225 vm_sanitize_compute_ut_size(
226 	vm_addr_struct_t        addr_u,
227 	vm_addr_struct_t        end_u)
228 {
229 	vm_size_struct_t size_u = { 0 };
230 	vm_address_t addr_local = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
231 	vm_address_t end_local = VM_SANITIZE_UNSAFE_UNWRAP(end_u);
232 
233 	VM_SANITIZE_UT_SET(size_u, vm_sub_no_ubsan(end_local, addr_local));
234 	return size_u;
235 }
236 
237 __attribute__((always_inline, warn_unused_result))
238 mach_vm_address_t
vm_sanitize_addr(vm_map_t map,vm_addr_struct_t addr_u)239 vm_sanitize_addr(
240 	vm_map_t                map,
241 	vm_addr_struct_t        addr_u)
242 {
243 	mach_vm_address_t addr   = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
244 	vm_map_offset_t   pgmask = vm_map_page_mask(map);
245 
246 	return vm_map_trunc_page_mask(addr, pgmask);
247 }
248 
249 __attribute__((always_inline, warn_unused_result))
250 mach_vm_offset_t
vm_sanitize_offset_in_page(vm_map_offset_t mask,vm_addr_struct_t addr_u)251 vm_sanitize_offset_in_page(
252 	vm_map_offset_t         mask,
253 	vm_addr_struct_t        addr_u)
254 {
255 	return VM_SANITIZE_UNSAFE_UNWRAP(addr_u) & mask;
256 }
257 
258 __attribute__((always_inline, warn_unused_result))
259 kern_return_t
vm_sanitize_offset(vm_addr_struct_t offset_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_address_t addr,vm_map_address_t end,vm_map_offset_t * offset)260 vm_sanitize_offset(
261 	vm_addr_struct_t        offset_u,
262 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
263 	vm_map_address_t        addr,
264 	vm_map_address_t        end,
265 	vm_map_offset_t        *offset)
266 {
267 	*offset = VM_SANITIZE_UNSAFE_UNWRAP(offset_u);
268 
269 	if ((*offset < addr) || (*offset > end)) {
270 		*offset = 0;
271 		return KERN_INVALID_ARGUMENT;
272 	}
273 
274 	return KERN_SUCCESS;
275 }
276 
277 __attribute__((always_inline, warn_unused_result))
278 kern_return_t
vm_sanitize_mask(vm_addr_struct_t mask_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_offset_t * mask)279 vm_sanitize_mask(
280 	vm_addr_struct_t        mask_u,
281 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
282 	vm_map_offset_t        *mask)
283 {
284 	*mask = VM_SANITIZE_UNSAFE_UNWRAP(mask_u);
285 
286 	/*
287 	 * Adding validation to mask has high ABI risk and low security value.
288 	 * The only internal function that deals with mask is vm_map_locate_space
289 	 * and it currently ensures that addresses are aligned to page boundary
290 	 * even for weird alignment requests.
291 	 *
292 	 * rdar://120445665
293 	 */
294 
295 	return KERN_SUCCESS;
296 }
297 
298 __attribute__((always_inline, warn_unused_result))
299 kern_return_t
vm_sanitize_object_size(vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_sanitize_flags_t flags,vm_object_offset_t * size)300 vm_sanitize_object_size(
301 	vm_size_struct_t        size_u,
302 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
303 	vm_sanitize_flags_t     flags,
304 	vm_object_offset_t     *size)
305 {
306 	mach_vm_size_t  size_aligned;
307 
308 	*size   = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
309 	/*
310 	 * Handle size zero as requested by the caller
311 	 */
312 	if (*size == 0) {
313 		if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
314 			return VM_ERR_RETURN_NOW;
315 		} else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
316 			return KERN_INVALID_ARGUMENT;
317 		} else {
318 			/* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
319 			return KERN_SUCCESS;
320 		}
321 	}
322 
323 	size_aligned = vm_map_round_page_mask(*size, PAGE_MASK);
324 	if (size_aligned == 0) {
325 		*size = 0;
326 		return KERN_INVALID_ARGUMENT;
327 	}
328 
329 	if (!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES)) {
330 		*size = size_aligned;
331 	}
332 	return KERN_SUCCESS;
333 }
334 
335 __attribute__((always_inline, warn_unused_result))
336 kern_return_t
vm_sanitize_size(vm_addr_struct_t offset_u,vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_t map,vm_sanitize_flags_t flags,mach_vm_size_t * size)337 vm_sanitize_size(
338 	vm_addr_struct_t        offset_u,
339 	vm_size_struct_t        size_u,
340 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
341 	vm_map_t                map,
342 	vm_sanitize_flags_t     flags,
343 	mach_vm_size_t         *size)
344 {
345 	mach_vm_size_t  offset = VM_SANITIZE_UNSAFE_UNWRAP(offset_u);
346 	vm_map_offset_t pgmask = vm_map_page_mask(map);
347 	mach_vm_size_t  size_aligned;
348 
349 	*size   = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
350 	/*
351 	 * Handle size zero as requested by the caller
352 	 */
353 	if (*size == 0) {
354 		if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
355 			return VM_ERR_RETURN_NOW;
356 		} else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
357 			return KERN_INVALID_ARGUMENT;
358 		} else {
359 			/* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
360 			return KERN_SUCCESS;
361 		}
362 	}
363 
364 	/*
365 	 * Ensure that offset and size don't overflow when refering to the
366 	 * vm_object
367 	 */
368 	if (os_add_overflow(*size, offset, &size_aligned)) {
369 		*size = 0;
370 		return KERN_INVALID_ARGUMENT;
371 	}
372 	/*
373 	 * This rounding is a check on the vm_object and thus uses the kernel's PAGE_MASK
374 	 */
375 	if (vm_map_round_page_mask(size_aligned, PAGE_MASK) == 0) {
376 		*size = 0;
377 		return KERN_INVALID_ARGUMENT;
378 	}
379 
380 	/*
381 	 * Check that a non zero size being mapped doesn't round to 0
382 	 *
383 	 * vm_sub_no_ubsan is acceptable here since the subtraction is guaranteed to
384 	 * not overflow. We know size_aligned = *size + offset, and since that
385 	 * addition did not overflow and offset >= offset & ~pgmask, this
386 	 * subtraction also cannot overflow.
387 	 */
388 	size_aligned = vm_sub_no_ubsan(size_aligned, offset & ~pgmask);
389 
390 	/*
391 	 * This rounding is a check on the specified map and thus uses its pgmask
392 	 */
393 	size_aligned  = vm_map_round_page_mask(size_aligned, pgmask);
394 	if (size_aligned == 0) {
395 		*size = 0;
396 		return KERN_INVALID_ARGUMENT;
397 	}
398 
399 	if (!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES)) {
400 		*size = size_aligned;
401 	}
402 	return KERN_SUCCESS;
403 }
404 
405 static __attribute__((warn_unused_result))
406 kern_return_t
vm_sanitize_err_compat_addr_size(kern_return_t initial_kr,vm_sanitize_caller_t vm_sanitize_caller,vm_addr_struct_t addr_u,vm_size_struct_t size_u,mach_vm_offset_t pgmask,vm_map_t map_or_null)407 vm_sanitize_err_compat_addr_size(
408 	kern_return_t           initial_kr,
409 	vm_sanitize_caller_t    vm_sanitize_caller,
410 	vm_addr_struct_t        addr_u,
411 	vm_size_struct_t        size_u,
412 	mach_vm_offset_t        pgmask,
413 	vm_map_t                map_or_null)
414 {
415 	vm_sanitize_compat_rewrite_t compat = {initial_kr, false, false};
416 	if (vm_sanitize_caller->err_compat_addr_size) {
417 		compat = (vm_sanitize_caller->err_compat_addr_size)
418 		    (initial_kr, VM_SANITIZE_UNSAFE_UNWRAP(addr_u), VM_SANITIZE_UNSAFE_UNWRAP(size_u),
419 		    pgmask, map_or_null);
420 	}
421 
422 	if (compat.should_telemeter) {
423 #if DEVELOPMENT || DEBUG
424 		if (vm_sanitize_telemeter_to_serial) {
425 			printf("VM API - [%s] unsanitary addr 0x%llx size 0x%llx pgmask "
426 			    "0x%llx passed to %s; error code %d may become %d\n",
427 			    proc_best_name(current_proc()),
428 			    VM_SANITIZE_UNSAFE_UNWRAP(addr_u), VM_SANITIZE_UNSAFE_UNWRAP(size_u), pgmask,
429 			    vm_sanitize_caller->vmsc_caller_name, initial_kr, compat.compat_kr);
430 		}
431 #endif /* DEVELOPMENT || DEBUG */
432 
433 		vm_sanitize_send_telemetry(
434 			vm_sanitize_caller->vmsc_telemetry_id,
435 			VM_SANITIZE_CHECKER_ADDR_SIZE,
436 			VM_SANITIZE_CHECKER_COUNT_1 /* fixme */,
437 			vm_sanitize_caller->vmsc_ktriage_id,
438 			VM_SANITIZE_UNSAFE_UNWRAP(addr_u),
439 			VM_SANITIZE_UNSAFE_UNWRAP(size_u),
440 			pgmask,
441 			0 /* arg4 */,
442 			initial_kr,
443 			compat.compat_kr);
444 	}
445 
446 	return vm_sanitize_apply_err_rewrite_policy(initial_kr, compat);
447 }
448 
449 __attribute__((always_inline, warn_unused_result))
450 kern_return_t
vm_sanitize_addr_size(vm_addr_struct_t addr_u,vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller,mach_vm_offset_t pgmask,vm_map_t map_or_null,vm_sanitize_flags_t flags,vm_map_offset_t * addr,vm_map_offset_t * end,vm_map_size_t * size)451 vm_sanitize_addr_size(
452 	vm_addr_struct_t        addr_u,
453 	vm_size_struct_t        size_u,
454 	vm_sanitize_caller_t    vm_sanitize_caller,
455 	mach_vm_offset_t        pgmask,
456 	vm_map_t                map_or_null,
457 	vm_sanitize_flags_t     flags,
458 	vm_map_offset_t        *addr,
459 	vm_map_offset_t        *end,
460 	vm_map_size_t          *size)
461 {
462 	/*
463 	 * map_or_null is not available from all call sites.
464 	 * Use pgmask instead of vm_map_page_mask(map) for alignment.
465 	 */
466 
467 	vm_map_offset_t addr_aligned = 0;
468 	vm_map_offset_t end_aligned = 0, end_unaligned = 0;
469 	kern_return_t kr;
470 
471 	*addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
472 	*size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
473 	if (flags & VM_SANITIZE_FLAGS_REALIGN_START) {
474 		assert(!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES));
475 	}
476 
477 #if CONFIG_KERNEL_TAGGING
478 	if (flags & VM_SANITIZE_FLAGS_CANONICALIZE) {
479 		*addr = vm_memtag_canonicalize_kernel(*addr);
480 	}
481 #endif /* CONFIG_KERNEL_TAGGING */
482 	addr_aligned = vm_map_trunc_page_mask(*addr, pgmask);
483 
484 	/*
485 	 * Ensure that the address is aligned
486 	 */
487 	if (__improbable((flags & VM_SANITIZE_FLAGS_CHECK_ALIGNED_START) && (*addr & pgmask))) {
488 		kr = KERN_INVALID_ARGUMENT;
489 		goto unsanitary;
490 	}
491 
492 	/*
493 	 * Ensure that the size is aligned
494 	 */
495 	if (__improbable((flags & VM_SANITIZE_FLAGS_CHECK_ALIGNED_SIZE) && (*size & pgmask))) {
496 		kr = KERN_INVALID_ARGUMENT;
497 		goto unsanitary;
498 	}
499 
500 	/*
501 	 * Handle size zero as requested by the caller
502 	 */
503 	if (*size == 0) {
504 		/*
505 		 * NOTE: these early returns bypass the VM_SANITIZE_FLAGS_CHECK_ADDR_RANGE
506 		 * check. Since the size is 0, the range [start, end) is empty and thus
507 		 * no values within this range can overflow the upper bits.
508 		 */
509 		if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
510 			*addr = 0;
511 			*end = 0;
512 			/* size is already 0 */
513 			return VM_ERR_RETURN_NOW;
514 		} else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
515 			kr = KERN_INVALID_ARGUMENT;
516 			goto unsanitary;
517 		} else {
518 			/* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
519 			if (flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES) {
520 				/* addr is already set */
521 				*end = *addr;
522 				/* size is already 0 */
523 				return KERN_SUCCESS;
524 			} else {
525 				*addr = addr_aligned;
526 				*end = addr_aligned;
527 				/* size is already 0 */
528 				return KERN_SUCCESS;
529 			}
530 		}
531 	}
532 
533 	/*
534 	 * Compute the aligned end now
535 	 */
536 	if (flags & VM_SANITIZE_FLAGS_REALIGN_START) {
537 		*addr = addr_aligned;
538 	}
539 	if (__improbable(os_add_overflow(*addr, *size, &end_unaligned))) {
540 		kr = KERN_INVALID_ARGUMENT;
541 		goto unsanitary;
542 	}
543 	end_aligned = vm_map_round_page_mask(end_unaligned, pgmask);
544 	if (__improbable(end_aligned <= addr_aligned)) {
545 		kr = KERN_INVALID_ARGUMENT;
546 		goto unsanitary;
547 	}
548 
549 	if (flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES) {
550 		/* addr and size are already set */
551 		*end = end_unaligned;
552 	} else {
553 		*addr = addr_aligned;
554 		*end = end_aligned;
555 		/*
556 		 * vm_sub_no_ubsan is acceptable since the subtraction is guaranteed to
557 		 * not overflow, as we have already verified end_aligned > addr_aligned.
558 		 */
559 		*size = vm_sub_no_ubsan(end_aligned, addr_aligned);
560 	}
561 
562 	if (flags & VM_SANITIZE_FLAGS_CHECK_ADDR_RANGE) {
563 #if defined(__arm64__) && MACH_ASSERT
564 		/*
565 		 * Make sure that this fails noisily if someone adds support for large
566 		 * VA extensions. With such extensions, this code will have to check
567 		 * ID_AA64MMFR2_EL1 to get the actual max VA size for the system,
568 		 * instead of assuming it is 48 bits.
569 		 */
570 		assert((__builtin_arm_rsr64("ID_AA64MMFR2_EL1") & ID_AA64MMFR2_EL1_VARANGE_MASK) == 0);
571 #endif /* defined(__arm64__) && MACH_ASSERT */
572 		const uint64_t max_va_bits = 48;
573 		const mach_vm_offset_t va_range_upper_bound = (1ULL << max_va_bits);
574 		const mach_vm_offset_t va_mask = va_range_upper_bound - 1;
575 
576 		if ((*addr & ~va_mask) != (*end & ~va_mask)) {
577 			if (*end == va_range_upper_bound) {
578 				/*
579 				 * Since the range is exclusive of `end`, the range [start, end)
580 				 * does not include any invalid values in this case. Therefore,
581 				 * we treat this as a success and fall through.
582 				 */
583 			} else {
584 				/*
585 				 * This means iterating within the range [start, end) may
586 				 * overflow above the VA bits supported by the system. Since
587 				 * these bits may be used by the kernel or hardware to store
588 				 * other values, we should not allow the operation to proceed.
589 				 */
590 				kr = KERN_INVALID_ADDRESS;
591 				goto unsanitary;
592 			}
593 		}
594 	}
595 
596 	return KERN_SUCCESS;
597 
598 unsanitary:
599 	*addr = 0;
600 	*end = 0;
601 	*size = 0;
602 	return vm_sanitize_err_compat_addr_size(kr, vm_sanitize_caller,
603 	           addr_u, size_u, pgmask, map_or_null);
604 }
605 
606 __attribute__((always_inline, warn_unused_result))
607 kern_return_t
vm_sanitize_addr_end(vm_addr_struct_t addr_u,vm_addr_struct_t end_u,vm_sanitize_caller_t vm_sanitize_caller,mach_vm_offset_t mask,vm_map_t map_or_null,vm_sanitize_flags_t flags,vm_map_offset_t * start,vm_map_offset_t * end,vm_map_size_t * size)608 vm_sanitize_addr_end(
609 	vm_addr_struct_t        addr_u,
610 	vm_addr_struct_t        end_u,
611 	vm_sanitize_caller_t    vm_sanitize_caller,
612 	mach_vm_offset_t        mask,
613 	vm_map_t                map_or_null,
614 	vm_sanitize_flags_t     flags,
615 	vm_map_offset_t        *start,
616 	vm_map_offset_t        *end,
617 	vm_map_size_t          *size)
618 {
619 	vm_size_struct_t size_u = vm_sanitize_compute_ut_size(addr_u, end_u);
620 
621 	return vm_sanitize_addr_size(addr_u, size_u, vm_sanitize_caller, mask,
622 	           map_or_null, flags, start, end, size);
623 }
624 
625 __attribute__((always_inline, warn_unused_result))
626 kern_return_t
vm_sanitize_prot(vm_prot_ut prot_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_t map __unused,vm_prot_t extra_mask,vm_prot_t * prot)627 vm_sanitize_prot(
628 	vm_prot_ut              prot_u,
629 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
630 	vm_map_t                map __unused,
631 	vm_prot_t               extra_mask,
632 	vm_prot_t              *prot)
633 {
634 	*prot = VM_SANITIZE_UNSAFE_UNWRAP(prot_u);
635 
636 	if (__improbable(*prot & ~(VM_SANITIZE_PROT_ALLOWED | extra_mask))) {
637 		*prot = VM_PROT_NONE;
638 		return KERN_INVALID_ARGUMENT;
639 	}
640 
641 #if defined(__x86_64__)
642 	if ((*prot & VM_PROT_UEXEC) &&
643 	    !pmap_supported_feature(map->pmap, PMAP_FEAT_UEXEC)) {
644 		*prot = VM_PROT_NONE;
645 		return KERN_INVALID_ARGUMENT;
646 	}
647 #endif
648 
649 	return KERN_SUCCESS;
650 }
651 
652 /*
653  * *out_cur and *out_max are modified when there is an err compat rewrite
654  * otherwise they are left unchanged
655  */
656 static __attribute__((warn_unused_result))
657 kern_return_t
vm_sanitize_err_compat_cur_and_max_prots(kern_return_t initial_kr,vm_sanitize_caller_t vm_sanitize_caller,vm_prot_ut cur_prot_u,vm_prot_ut max_prot_u,vm_prot_t extra_mask,vm_prot_t * out_cur,vm_prot_t * out_max)658 vm_sanitize_err_compat_cur_and_max_prots(
659 	kern_return_t           initial_kr,
660 	vm_sanitize_caller_t    vm_sanitize_caller,
661 	vm_prot_ut              cur_prot_u,
662 	vm_prot_ut              max_prot_u,
663 	vm_prot_t               extra_mask,
664 	vm_prot_t              *out_cur,
665 	vm_prot_t              *out_max)
666 {
667 	vm_prot_t initial_cur_prot = VM_SANITIZE_UNSAFE_UNWRAP(cur_prot_u);
668 	vm_prot_t initial_max_prot = VM_SANITIZE_UNSAFE_UNWRAP(max_prot_u);
669 
670 	vm_sanitize_compat_rewrite_t compat = {initial_kr, false, false};
671 	vm_prot_t compat_cur_prot = initial_cur_prot;
672 	vm_prot_t compat_max_prot = initial_max_prot;
673 	if (vm_sanitize_caller->err_compat_prot_cur_max) {
674 		compat = (vm_sanitize_caller->err_compat_prot_cur_max)
675 		    (initial_kr, &compat_cur_prot, &compat_max_prot, extra_mask);
676 	}
677 
678 	if (compat.should_telemeter) {
679 #if DEVELOPMENT || DEBUG
680 		if (vm_sanitize_telemeter_to_serial) {
681 			printf("VM API - [%s] unsanitary vm_prot cur %d max %d "
682 			    "passed to %s; error code %d may become %d\n",
683 			    proc_best_name(current_proc()),
684 			    initial_cur_prot, initial_max_prot,
685 			    vm_sanitize_caller->vmsc_caller_name,
686 			    initial_kr, compat.compat_kr);
687 		}
688 #endif /* DEVELOPMENT || DEBUG */
689 
690 		vm_sanitize_send_telemetry(
691 			vm_sanitize_caller->vmsc_telemetry_id,
692 			VM_SANITIZE_CHECKER_PROT_CUR_MAX,
693 			VM_SANITIZE_CHECKER_COUNT_1 /* fixme */,
694 			vm_sanitize_caller->vmsc_ktriage_id,
695 			initial_cur_prot,
696 			initial_max_prot,
697 			extra_mask,
698 			0 /* arg4 */,
699 			initial_kr,
700 			compat.compat_kr);
701 	}
702 
703 	if (compat.should_rewrite) {
704 		*out_cur = compat_cur_prot;
705 		*out_max = compat_max_prot;
706 		return compat.compat_kr;
707 	} else {
708 		/* out_cur and out_max unchanged */
709 		return initial_kr;
710 	}
711 }
712 
713 __attribute__((always_inline, warn_unused_result))
714 kern_return_t
vm_sanitize_cur_and_max_prots(vm_prot_ut cur_prot_u,vm_prot_ut max_prot_u,vm_sanitize_caller_t vm_sanitize_caller,vm_map_t map,vm_prot_t extra_mask,vm_prot_t * cur_prot,vm_prot_t * max_prot)715 vm_sanitize_cur_and_max_prots(
716 	vm_prot_ut              cur_prot_u,
717 	vm_prot_ut              max_prot_u,
718 	vm_sanitize_caller_t    vm_sanitize_caller,
719 	vm_map_t                map,
720 	vm_prot_t               extra_mask,
721 	vm_prot_t              *cur_prot,
722 	vm_prot_t              *max_prot)
723 {
724 	kern_return_t kr;
725 
726 	kr = vm_sanitize_prot(cur_prot_u, vm_sanitize_caller, map, extra_mask, cur_prot);
727 	if (__improbable(kr != KERN_SUCCESS)) {
728 		*cur_prot = VM_PROT_NONE;
729 		*max_prot = VM_PROT_NONE;
730 		return kr;
731 	}
732 
733 	kr = vm_sanitize_prot(max_prot_u, vm_sanitize_caller, map, extra_mask, max_prot);
734 	if (__improbable(kr != KERN_SUCCESS)) {
735 		*cur_prot = VM_PROT_NONE;
736 		*max_prot = VM_PROT_NONE;
737 		return kr;
738 	}
739 
740 
741 	/*
742 	 * This check needs to be performed on the actual protection bits.
743 	 * vm_sanitize_prot restricts cur and max prot to
744 	 * (VM_PROT_ALL | VM_PROT_ALLEXEC | extra_mask), but we don't enforce
745 	 * ordering on the extra_mask bits.
746 	 */
747 	if (__improbable((*cur_prot & *max_prot & VM_SANITIZE_PROT_ALLOWED) !=
748 	    (*cur_prot & VM_SANITIZE_PROT_ALLOWED))) {
749 		/* cur is more permissive than max */
750 		kr = KERN_INVALID_ARGUMENT;
751 		goto unsanitary;
752 	}
753 	return KERN_SUCCESS;
754 
755 unsanitary:
756 	*cur_prot = VM_PROT_NONE;
757 	*max_prot = VM_PROT_NONE;
758 	/* error compat may set cur/max to something other than 0/0 */
759 	return vm_sanitize_err_compat_cur_and_max_prots(kr, vm_sanitize_caller,
760 	           cur_prot_u, max_prot_u, extra_mask, cur_prot, max_prot);
761 }
762 
763 __attribute__((always_inline, warn_unused_result))
764 vm_prot_t
vm_sanitize_prot_bsd(vm_prot_ut prot_u,vm_sanitize_caller_t vm_sanitize_caller __unused)765 vm_sanitize_prot_bsd(
766 	vm_prot_ut              prot_u,
767 	vm_sanitize_caller_t    vm_sanitize_caller __unused)
768 {
769 	vm_prot_t prot = VM_SANITIZE_UNSAFE_UNWRAP(prot_u);
770 
771 	/*
772 	 * Strip all protections that are not allowed
773 	 */
774 	prot &= (VM_PROT_ALL | VM_PROT_TRUSTED | VM_PROT_STRIP_READ);
775 	return prot;
776 }
777 
778 __attribute__((always_inline, warn_unused_result))
779 kern_return_t
vm_sanitize_memory_entry_perm(vm_prot_ut perm_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_sanitize_flags_t flags,vm_prot_t extra_mask,vm_prot_t * perm)780 vm_sanitize_memory_entry_perm(
781 	vm_prot_ut              perm_u,
782 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
783 	vm_sanitize_flags_t     flags,
784 	vm_prot_t               extra_mask,
785 	vm_prot_t              *perm)
786 {
787 	vm_prot_t prot;
788 	vm_prot_t map_mem_flags;
789 	vm_prot_t access;
790 
791 	*perm = VM_SANITIZE_UNSAFE_UNWRAP(perm_u);
792 	prot = *perm & MAP_MEM_PROT_MASK;
793 	map_mem_flags = *perm & MAP_MEM_FLAGS_MASK;
794 	access = GET_MAP_MEM(*perm);
795 
796 	if ((flags & VM_SANITIZE_FLAGS_CHECK_USER_MEM_MAP_FLAGS) &&
797 	    (map_mem_flags & ~MAP_MEM_FLAGS_USER)) {
798 		/*
799 		 * Unknown flag: reject for forward compatibility.
800 		 */
801 		*perm = VM_PROT_NONE;
802 		return KERN_INVALID_VALUE;
803 	}
804 
805 	/*
806 	 * Clear prot bits in perm and set them to only allowed values
807 	 */
808 	*perm &= ~MAP_MEM_PROT_MASK;
809 	*perm |= (prot & (VM_PROT_ALL | extra_mask));
810 
811 	/*
812 	 * No checks on access
813 	 */
814 	(void) access;
815 
816 	return KERN_SUCCESS;
817 }
818 
819 __attribute__((always_inline, warn_unused_result))
820 kern_return_t
vm_sanitize_inherit(vm_inherit_ut inherit_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_inherit_t * inherit)821 vm_sanitize_inherit(
822 	vm_inherit_ut           inherit_u,
823 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
824 	vm_inherit_t           *inherit)
825 {
826 	*inherit = VM_SANITIZE_UNSAFE_UNWRAP(inherit_u);
827 
828 	if (__improbable(*inherit > VM_INHERIT_LAST_VALID)) {
829 		*inherit = VM_INHERIT_NONE;
830 		return KERN_INVALID_ARGUMENT;
831 	}
832 
833 	return KERN_SUCCESS;
834 }
835 
836 __attribute__((always_inline, warn_unused_result))
837 kern_return_t
vm_sanitize_behavior(vm_behavior_ut behavior_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_behavior_t * behavior)838 vm_sanitize_behavior(
839 	vm_behavior_ut           behavior_u,
840 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
841 	vm_behavior_t           *behavior)
842 {
843 	*behavior = VM_SANITIZE_UNSAFE_UNWRAP(behavior_u);
844 
845 	if (__improbable((*behavior > VM_BEHAVIOR_LAST_VALID)
846 	    || (*behavior < 0))) {
847 		*behavior = VM_BEHAVIOR_DEFAULT;
848 		return KERN_INVALID_ARGUMENT;
849 	}
850 
851 	return KERN_SUCCESS;
852 }
853 
854 
855 #if PAGE_SHIFT == 0
856 #pragma clang attribute pop
857 #endif
858 
859 #if !__OPTIMIZE__
860 #pragma clang attribute pop
861 #endif
862