xref: /xnu-11215.81.4/osfmk/vm/vm_sanitize.c (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /* avoid includes here; we want these pragmas to also affect included inline functions */
30 #include <mach/machine/vm_param.h> /* to get PAGE_SHIFT without the inline functions from mach/vm_param.h */
31 /*
32  * On 4k-hardware-page arm64 systems, the PAGE_SHIFT macro does not resolve to
33  * a constant, but instead a variable whose value is determined on boot depending
34  * on the amount of RAM installed.
35  *
36  * In these cases, actual instructions need to be emitted to compute values like
37  * PAGE_SIZE = (1 << PAGE_SHIFT), which means UBSan checks will be generated
38  * as well since the values cannot be computed at compile time.
39  *
40  * Therefore, we disable arithmetic UBSan checks on these configurations. We
41  * detect them with PAGE_SHIFT == 0, since (during the preprocessing phase)
42  * symbols will resolve to 0, whereas PAGE_SHIFT will resolve to its actual
43  * nonzero value if it is defined as a macro.
44  */
45 #if PAGE_SHIFT == 0
46 #pragma clang attribute push (__attribute__((no_sanitize("signed-integer-overflow", \
47         "unsigned-integer-overflow", "shift", "unsigned-shift-base"))), apply_to=function)
48 #endif
49 
50 /* Disabling optimizations makes it impossible to optimize out UBSan checks */
51 #if !__OPTIMIZE__
52 #pragma clang attribute push (__attribute__((no_sanitize("undefined", \
53         "integer", "unsigned-shift-base", "nullability", "bounds"))), apply_to=function)
54 #endif
55 
56 #include <vm/vm_map_xnu.h>
57 #include <vm/vm_sanitize_internal.h>
58 #include <vm/vm_object_internal.h>
59 
60 #define VM_SANITIZE_PROT_ALLOWED (VM_PROT_ALL | VM_PROT_ALLEXEC)
61 
62 // TODO: enable telemetry and ktriage separately?
63 
64 /* Also send telemetry output to kernel serial console? */
65 static TUNABLE(bool, vm_sanitize_telemeter_to_serial,
66     "vm_sanitize_telemeter_to_serial", false);
67 
68 /*
69  * Arithmetic macros that suppress UBSan. os_xyz_overflow does not generate a
70  * UBSan overflow check, since it indicates to the compiler that overflow is
71  * (potentially) intentional and well-defined.
72  *
73  * These macros ignore the value that indicates whether overflow actually,
74  * occurred, so a comment should be left explaining why it is unlikely to
75  * happen or is otherwise not a concern.
76  */
77 #define vm_add_no_ubsan(a, b) ({ typeof(a+b) TMP; (void) os_add_overflow(a, b, &TMP); TMP; })
78 #define vm_sub_no_ubsan(a, b) ({ typeof(a+b) TMP; (void) os_sub_overflow(a, b, &TMP); TMP; })
79 
80 static inline
81 kern_return_t
vm_sanitize_apply_err_rewrite_policy(kern_return_t initial_kr,vm_sanitize_compat_rewrite_t rewrite)82 vm_sanitize_apply_err_rewrite_policy(kern_return_t initial_kr, vm_sanitize_compat_rewrite_t rewrite)
83 {
84 	return rewrite.should_rewrite ? rewrite.compat_kr : initial_kr;
85 }
86 
87 __attribute__((always_inline, warn_unused_result))
88 vm_addr_struct_t
vm_sanitize_wrap_addr(vm_address_t val)89 vm_sanitize_wrap_addr(vm_address_t val)
90 {
91 	return (vm_addr_struct_t) { .UNSAFE = val };
92 }
93 
94 __attribute__((always_inline, warn_unused_result))
95 vm_size_struct_t
vm_sanitize_wrap_size(vm_size_t val)96 vm_sanitize_wrap_size(vm_size_t val)
97 {
98 	return (vm_size_struct_t) { .UNSAFE = val };
99 }
100 
101 __attribute__((always_inline, warn_unused_result))
102 vm32_size_struct_t
vm32_sanitize_wrap_size(vm32_size_t val)103 vm32_sanitize_wrap_size(vm32_size_t val)
104 {
105 	return (vm32_size_struct_t) { .UNSAFE = val };
106 }
107 
108 __attribute__((always_inline, warn_unused_result))
109 vm_prot_ut
vm_sanitize_wrap_prot(vm_prot_t val)110 vm_sanitize_wrap_prot(vm_prot_t val)
111 {
112 	return (vm_prot_ut) { .UNSAFE = val };
113 }
114 
115 __attribute__((always_inline, warn_unused_result))
116 vm_inherit_ut
vm_sanitize_wrap_inherit(vm_inherit_t val)117 vm_sanitize_wrap_inherit(vm_inherit_t val)
118 {
119 	return (vm_inherit_ut) { .UNSAFE = val };
120 }
121 
122 __attribute__((always_inline, warn_unused_result))
123 vm_behavior_ut
vm_sanitize_wrap_behavior(vm_behavior_t val)124 vm_sanitize_wrap_behavior(vm_behavior_t val)
125 {
126 	return (vm_behavior_ut) { .UNSAFE = val };
127 }
128 
129 #ifdef  MACH_KERNEL_PRIVATE
130 __attribute__((always_inline, warn_unused_result))
131 vm_addr_struct_t
vm_sanitize_expand_addr_to_64(vm32_address_ut val)132 vm_sanitize_expand_addr_to_64(vm32_address_ut val)
133 {
134 	return (vm_addr_struct_t) { .UNSAFE = val.UNSAFE };
135 }
136 
137 __attribute__((always_inline, warn_unused_result))
138 vm_size_struct_t
vm_sanitize_expand_size_to_64(vm32_size_ut val)139 vm_sanitize_expand_size_to_64(vm32_size_ut val)
140 {
141 	return (vm_size_struct_t) { .UNSAFE = val.UNSAFE };
142 }
143 
144 __attribute__((always_inline, warn_unused_result))
145 vm32_address_ut
vm_sanitize_trunc_addr_to_32(vm_addr_struct_t val)146 vm_sanitize_trunc_addr_to_32(vm_addr_struct_t val)
147 {
148 	vm32_address_ut ret;
149 
150 	ret.UNSAFE = CAST_DOWN_EXPLICIT(vm32_address_t, val.UNSAFE);
151 	return ret;
152 }
153 
154 __attribute__((always_inline, warn_unused_result))
155 vm32_size_ut
vm_sanitize_trunc_size_to_32(vm_size_struct_t val)156 vm_sanitize_trunc_size_to_32(vm_size_struct_t val)
157 {
158 	vm32_size_ut ret;
159 
160 	ret.UNSAFE = CAST_DOWN_EXPLICIT(vm32_size_t, val.UNSAFE);
161 	return ret;
162 }
163 
164 __attribute__((always_inline, warn_unused_result, overloadable))
165 bool
vm_sanitize_add_overflow(vm32_address_ut addr_u,vm32_size_ut size_u,vm32_address_ut * addr_out_u)166 vm_sanitize_add_overflow(
167 	vm32_address_ut         addr_u,
168 	vm32_size_ut            size_u,
169 	vm32_address_ut        *addr_out_u)
170 {
171 	vm32_address_t addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
172 	vm32_size_t    size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
173 
174 	return os_add_overflow(addr, size, &addr_out_u->UNSAFE);
175 }
176 #endif  /* MACH_KERNEL_PRIVATE */
177 
178 __attribute__((always_inline, warn_unused_result, overloadable))
179 bool
vm_sanitize_add_overflow(vm_addr_struct_t addr_u,vm_size_struct_t size_u,vm_addr_struct_t * addr_out_u)180 vm_sanitize_add_overflow(
181 	vm_addr_struct_t        addr_u,
182 	vm_size_struct_t        size_u,
183 	vm_addr_struct_t       *addr_out_u)
184 {
185 	mach_vm_address_t addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
186 	mach_vm_size_t    size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
187 
188 	return os_add_overflow(addr, size, &addr_out_u->UNSAFE);
189 }
190 
191 __attribute__((always_inline, warn_unused_result, overloadable))
192 bool
vm_sanitize_add_overflow(vm_size_struct_t size1_u,vm_size_struct_t size2_u,vm_size_struct_t * size_out_u)193 vm_sanitize_add_overflow(
194 	vm_size_struct_t        size1_u,
195 	vm_size_struct_t        size2_u,
196 	vm_size_struct_t       *size_out_u)
197 {
198 	mach_vm_address_t size1 = VM_SANITIZE_UNSAFE_UNWRAP(size1_u);
199 	mach_vm_size_t    size2 = VM_SANITIZE_UNSAFE_UNWRAP(size2_u);
200 
201 	return os_add_overflow(size1, size2, &size_out_u->UNSAFE);
202 }
203 
204 /*
205  * vm_*_no_ubsan is acceptable in these functions since they operate on unsafe
206  * types. The return value is also an unsafe type and must be sanitized before
207  * it can be used in other functions.
208  */
209 __attribute__((always_inline, warn_unused_result))
210 vm_addr_struct_t
vm_sanitize_compute_ut_end(vm_addr_struct_t addr_u,vm_size_struct_t size_u)211 vm_sanitize_compute_ut_end(
212 	vm_addr_struct_t        addr_u,
213 	vm_size_struct_t        size_u)
214 {
215 	vm_addr_struct_t end_u = { 0 };
216 	vm_address_t addr_local = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
217 	vm_size_t size_local = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
218 
219 	VM_SANITIZE_UT_SET(end_u, vm_add_no_ubsan(addr_local, size_local));
220 	return end_u;
221 }
222 
223 __attribute__((always_inline, warn_unused_result))
224 vm_size_struct_t
vm_sanitize_compute_ut_size(vm_addr_struct_t addr_u,vm_addr_struct_t end_u)225 vm_sanitize_compute_ut_size(
226 	vm_addr_struct_t        addr_u,
227 	vm_addr_struct_t        end_u)
228 {
229 	vm_size_struct_t size_u = { 0 };
230 	vm_address_t addr_local = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
231 	vm_address_t end_local = VM_SANITIZE_UNSAFE_UNWRAP(end_u);
232 
233 	VM_SANITIZE_UT_SET(size_u, vm_sub_no_ubsan(end_local, addr_local));
234 	return size_u;
235 }
236 
237 __attribute__((always_inline, warn_unused_result))
238 mach_vm_address_t
vm_sanitize_addr(vm_map_t map,vm_addr_struct_t addr_u)239 vm_sanitize_addr(
240 	vm_map_t                map,
241 	vm_addr_struct_t        addr_u)
242 {
243 	mach_vm_address_t addr   = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
244 	vm_map_offset_t   pgmask = vm_map_page_mask(map);
245 
246 	return vm_map_trunc_page_mask(addr, pgmask);
247 }
248 
249 __attribute__((always_inline, warn_unused_result))
250 mach_vm_offset_t
vm_sanitize_offset_in_page(vm_map_offset_t mask,vm_addr_struct_t addr_u)251 vm_sanitize_offset_in_page(
252 	vm_map_offset_t         mask,
253 	vm_addr_struct_t        addr_u)
254 {
255 	return VM_SANITIZE_UNSAFE_UNWRAP(addr_u) & mask;
256 }
257 
258 __attribute__((always_inline, warn_unused_result))
259 kern_return_t
vm_sanitize_offset(vm_addr_struct_t offset_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_address_t addr,vm_map_address_t end,vm_map_offset_t * offset)260 vm_sanitize_offset(
261 	vm_addr_struct_t        offset_u,
262 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
263 	vm_map_address_t        addr,
264 	vm_map_address_t        end,
265 	vm_map_offset_t        *offset)
266 {
267 	*offset = VM_SANITIZE_UNSAFE_UNWRAP(offset_u);
268 
269 	if ((*offset < addr) || (*offset > end)) {
270 		*offset = 0;
271 		return KERN_INVALID_ARGUMENT;
272 	}
273 
274 	return KERN_SUCCESS;
275 }
276 
277 __attribute__((always_inline, warn_unused_result))
278 kern_return_t
vm_sanitize_mask(vm_addr_struct_t mask_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_offset_t * mask)279 vm_sanitize_mask(
280 	vm_addr_struct_t        mask_u,
281 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
282 	vm_map_offset_t        *mask)
283 {
284 	*mask = VM_SANITIZE_UNSAFE_UNWRAP(mask_u);
285 
286 	/*
287 	 * Adding validation to mask has high ABI risk and low security value.
288 	 * The only internal function that deals with mask is vm_map_locate_space
289 	 * and it currently ensures that addresses are aligned to page boundary
290 	 * even for weird alignment requests.
291 	 *
292 	 * rdar://120445665
293 	 */
294 
295 	return KERN_SUCCESS;
296 }
297 
298 __attribute__((always_inline, warn_unused_result))
299 kern_return_t
vm_sanitize_object_size(vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_sanitize_flags_t flags,vm_object_offset_t * size)300 vm_sanitize_object_size(
301 	vm_size_struct_t        size_u,
302 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
303 	vm_sanitize_flags_t     flags,
304 	vm_object_offset_t     *size)
305 {
306 	mach_vm_size_t  size_aligned;
307 
308 	*size   = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
309 	/*
310 	 * Handle size zero as requested by the caller
311 	 */
312 	if (*size == 0) {
313 		if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
314 			return VM_ERR_RETURN_NOW;
315 		} else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
316 			return KERN_INVALID_ARGUMENT;
317 		} else {
318 			/* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
319 			return KERN_SUCCESS;
320 		}
321 	}
322 
323 	size_aligned = vm_map_round_page_mask(*size, PAGE_MASK);
324 	if (size_aligned == 0) {
325 		*size = 0;
326 		return KERN_INVALID_ARGUMENT;
327 	}
328 
329 	if (!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES)) {
330 		*size = size_aligned;
331 	}
332 	return KERN_SUCCESS;
333 }
334 
335 __attribute__((always_inline, warn_unused_result))
336 kern_return_t
vm_sanitize_size(vm_addr_struct_t offset_u,vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_t map,vm_sanitize_flags_t flags,mach_vm_size_t * size)337 vm_sanitize_size(
338 	vm_addr_struct_t        offset_u,
339 	vm_size_struct_t        size_u,
340 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
341 	vm_map_t                map,
342 	vm_sanitize_flags_t     flags,
343 	mach_vm_size_t         *size)
344 {
345 	mach_vm_size_t  offset = VM_SANITIZE_UNSAFE_UNWRAP(offset_u);
346 	vm_map_offset_t pgmask = vm_map_page_mask(map);
347 	mach_vm_size_t  size_aligned;
348 
349 	*size   = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
350 	/*
351 	 * Handle size zero as requested by the caller
352 	 */
353 	if (*size == 0) {
354 		if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
355 			return VM_ERR_RETURN_NOW;
356 		} else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
357 			return KERN_INVALID_ARGUMENT;
358 		} else {
359 			/* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
360 			return KERN_SUCCESS;
361 		}
362 	}
363 
364 	/*
365 	 * Ensure that offset and size don't overflow when refering to the
366 	 * vm_object
367 	 */
368 	if (os_add_overflow(*size, offset, &size_aligned)) {
369 		*size = 0;
370 		return KERN_INVALID_ARGUMENT;
371 	}
372 	/*
373 	 * This rounding is a check on the vm_object and thus uses the kernel's PAGE_MASK
374 	 */
375 	if (vm_map_round_page_mask(size_aligned, PAGE_MASK) == 0) {
376 		*size = 0;
377 		return KERN_INVALID_ARGUMENT;
378 	}
379 
380 	/*
381 	 * Check that a non zero size being mapped doesn't round to 0
382 	 *
383 	 * vm_sub_no_ubsan is acceptable here since the subtraction is guaranteed to
384 	 * not overflow. We know size_aligned = *size + offset, and since that
385 	 * addition did not overflow and offset >= offset & ~pgmask, this
386 	 * subtraction also cannot overflow.
387 	 */
388 	size_aligned = vm_sub_no_ubsan(size_aligned, offset & ~pgmask);
389 
390 	/*
391 	 * This rounding is a check on the specified map and thus uses its pgmask
392 	 */
393 	size_aligned  = vm_map_round_page_mask(size_aligned, pgmask);
394 	if (size_aligned == 0) {
395 		*size = 0;
396 		return KERN_INVALID_ARGUMENT;
397 	}
398 
399 	if (!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES)) {
400 		*size = size_aligned;
401 	}
402 	return KERN_SUCCESS;
403 }
404 
405 static __attribute__((warn_unused_result))
406 kern_return_t
vm_sanitize_err_compat_addr_size(kern_return_t initial_kr,vm_sanitize_caller_t vm_sanitize_caller,vm_addr_struct_t addr_u,vm_size_struct_t size_u,mach_vm_offset_t pgmask,vm_map_t map_or_null)407 vm_sanitize_err_compat_addr_size(
408 	kern_return_t           initial_kr,
409 	vm_sanitize_caller_t    vm_sanitize_caller,
410 	vm_addr_struct_t        addr_u,
411 	vm_size_struct_t        size_u,
412 	mach_vm_offset_t        pgmask,
413 	vm_map_t                map_or_null)
414 {
415 	vm_sanitize_compat_rewrite_t compat = {initial_kr, false, false};
416 	if (vm_sanitize_caller->err_compat_addr_size) {
417 		compat = (vm_sanitize_caller->err_compat_addr_size)
418 		    (initial_kr, VM_SANITIZE_UNSAFE_UNWRAP(addr_u), VM_SANITIZE_UNSAFE_UNWRAP(size_u),
419 		    pgmask, map_or_null);
420 	}
421 
422 	if (compat.should_telemeter) {
423 #if DEVELOPMENT || DEBUG
424 		if (vm_sanitize_telemeter_to_serial) {
425 			printf("VM API - [%s] unsanitary addr 0x%llx size 0x%llx pgmask "
426 			    "0x%llx passed to %s; error code %d may become %d\n",
427 			    proc_best_name(current_proc()),
428 			    VM_SANITIZE_UNSAFE_UNWRAP(addr_u), VM_SANITIZE_UNSAFE_UNWRAP(size_u), pgmask,
429 			    vm_sanitize_caller->vmsc_caller_name, initial_kr, compat.compat_kr);
430 		}
431 #endif /* DEVELOPMENT || DEBUG */
432 
433 		vm_sanitize_send_telemetry(
434 			vm_sanitize_caller->vmsc_telemetry_id,
435 			VM_SANITIZE_CHECKER_ADDR_SIZE,
436 			VM_SANITIZE_CHECKER_COUNT_1 /* fixme */,
437 			vm_sanitize_caller->vmsc_ktriage_id,
438 			VM_SANITIZE_UNSAFE_UNWRAP(addr_u),
439 			VM_SANITIZE_UNSAFE_UNWRAP(size_u),
440 			pgmask,
441 			0 /* arg4 */,
442 			initial_kr,
443 			compat.compat_kr);
444 	}
445 
446 	return vm_sanitize_apply_err_rewrite_policy(initial_kr, compat);
447 }
448 
449 __attribute__((always_inline, warn_unused_result))
450 kern_return_t
vm_sanitize_addr_size(vm_addr_struct_t addr_u,vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller,mach_vm_offset_t pgmask,vm_map_t map_or_null,vm_sanitize_flags_t flags,vm_map_offset_t * addr,vm_map_offset_t * end,vm_map_size_t * size)451 vm_sanitize_addr_size(
452 	vm_addr_struct_t        addr_u,
453 	vm_size_struct_t        size_u,
454 	vm_sanitize_caller_t    vm_sanitize_caller,
455 	mach_vm_offset_t        pgmask,
456 	vm_map_t                map_or_null,
457 	vm_sanitize_flags_t     flags,
458 	vm_map_offset_t        *addr,
459 	vm_map_offset_t        *end,
460 	vm_map_size_t          *size)
461 {
462 	/*
463 	 * map_or_null is not available from all call sites.
464 	 * Use pgmask instead of vm_map_page_mask(map) for alignment.
465 	 */
466 
467 	vm_map_offset_t addr_aligned = 0;
468 	vm_map_offset_t end_aligned = 0, end_unaligned = 0;
469 	kern_return_t kr;
470 
471 	*addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
472 	*size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
473 	if (flags & VM_SANITIZE_FLAGS_REALIGN_START) {
474 		assert(!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES));
475 	}
476 
477 #if CONFIG_KERNEL_TAGGING
478 	if (flags & VM_SANITIZE_FLAGS_CANONICALIZE) {
479 		*addr = vm_memtag_canonicalize_address(*addr);
480 	}
481 #endif /* CONFIG_KERNEL_TAGGING */
482 	addr_aligned = vm_map_trunc_page_mask(*addr, pgmask);
483 
484 	/*
485 	 * Ensure that the address is aligned
486 	 */
487 	if (__improbable((flags & VM_SANITIZE_FLAGS_CHECK_ALIGNED_START) && (*addr & pgmask))) {
488 		kr = KERN_INVALID_ARGUMENT;
489 		goto unsanitary;
490 	}
491 
492 	/*
493 	 * Ensure that the size is aligned
494 	 */
495 	if (__improbable((flags & VM_SANITIZE_FLAGS_CHECK_ALIGNED_SIZE) && (*size & pgmask))) {
496 		kr = KERN_INVALID_ARGUMENT;
497 		goto unsanitary;
498 	}
499 
500 	/*
501 	 * Handle size zero as requested by the caller
502 	 */
503 	if (*size == 0) {
504 		if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
505 			*addr = 0;
506 			*end = 0;
507 			/* size is already 0 */
508 			return VM_ERR_RETURN_NOW;
509 		} else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
510 			kr = KERN_INVALID_ARGUMENT;
511 			goto unsanitary;
512 		} else {
513 			/* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
514 			if (flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES) {
515 				/* addr is already set */
516 				*end = *addr;
517 				/* size is already 0 */
518 				return KERN_SUCCESS;
519 			} else {
520 				*addr = addr_aligned;
521 				*end = addr_aligned;
522 				/* size is already 0 */
523 				return KERN_SUCCESS;
524 			}
525 		}
526 	}
527 
528 	/*
529 	 * Compute the aligned end now
530 	 */
531 	if (flags & VM_SANITIZE_FLAGS_REALIGN_START) {
532 		*addr = addr_aligned;
533 	}
534 	if (__improbable(os_add_overflow(*addr, *size, &end_unaligned))) {
535 		kr = KERN_INVALID_ARGUMENT;
536 		goto unsanitary;
537 	}
538 
539 	end_aligned = vm_map_round_page_mask(end_unaligned, pgmask);
540 	if (__improbable(end_aligned <= addr_aligned)) {
541 		kr = KERN_INVALID_ARGUMENT;
542 		goto unsanitary;
543 	}
544 
545 
546 	if (flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES) {
547 		/* addr and size are already set */
548 		*end = end_unaligned;
549 	} else {
550 		*addr = addr_aligned;
551 		*end = end_aligned;
552 		/*
553 		 * vm_sub_no_ubsan is acceptable since the subtraction is guaranteed to
554 		 * not overflow, as we have already verified end_aligned > addr_aligned.
555 		 */
556 		*size = vm_sub_no_ubsan(end_aligned, addr_aligned);
557 	}
558 	return KERN_SUCCESS;
559 
560 unsanitary:
561 	*addr = 0;
562 	*end = 0;
563 	*size = 0;
564 	return vm_sanitize_err_compat_addr_size(kr, vm_sanitize_caller,
565 	           addr_u, size_u, pgmask, map_or_null);
566 }
567 
568 __attribute__((always_inline, warn_unused_result))
569 kern_return_t
vm_sanitize_addr_end(vm_addr_struct_t addr_u,vm_addr_struct_t end_u,vm_sanitize_caller_t vm_sanitize_caller,mach_vm_offset_t mask,vm_map_t map_or_null,vm_sanitize_flags_t flags,vm_map_offset_t * start,vm_map_offset_t * end,vm_map_size_t * size)570 vm_sanitize_addr_end(
571 	vm_addr_struct_t        addr_u,
572 	vm_addr_struct_t        end_u,
573 	vm_sanitize_caller_t    vm_sanitize_caller,
574 	mach_vm_offset_t        mask,
575 	vm_map_t                map_or_null,
576 	vm_sanitize_flags_t     flags,
577 	vm_map_offset_t        *start,
578 	vm_map_offset_t        *end,
579 	vm_map_size_t          *size)
580 {
581 	vm_size_struct_t size_u = vm_sanitize_compute_ut_size(addr_u, end_u);
582 
583 	return vm_sanitize_addr_size(addr_u, size_u, vm_sanitize_caller, mask,
584 	           map_or_null, flags, start, end, size);
585 }
586 
587 __attribute__((always_inline, warn_unused_result))
588 kern_return_t
vm_sanitize_prot(vm_prot_ut prot_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_t map __unused,vm_prot_t extra_mask,vm_prot_t * prot)589 vm_sanitize_prot(
590 	vm_prot_ut              prot_u,
591 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
592 	vm_map_t                map __unused,
593 	vm_prot_t               extra_mask,
594 	vm_prot_t              *prot)
595 {
596 	*prot = VM_SANITIZE_UNSAFE_UNWRAP(prot_u);
597 
598 	if (__improbable(*prot & ~(VM_SANITIZE_PROT_ALLOWED | extra_mask))) {
599 		*prot = VM_PROT_NONE;
600 		return KERN_INVALID_ARGUMENT;
601 	}
602 
603 #if defined(__x86_64__)
604 	if ((*prot & VM_PROT_UEXEC) &&
605 	    !pmap_supported_feature(map->pmap, PMAP_FEAT_UEXEC)) {
606 		*prot = VM_PROT_NONE;
607 		return KERN_INVALID_ARGUMENT;
608 	}
609 #endif
610 
611 	return KERN_SUCCESS;
612 }
613 
614 /*
615  * *out_cur and *out_max are modified when there is an err compat rewrite
616  * otherwise they are left unchanged
617  */
618 static __attribute__((warn_unused_result))
619 kern_return_t
vm_sanitize_err_compat_cur_and_max_prots(kern_return_t initial_kr,vm_sanitize_caller_t vm_sanitize_caller,vm_prot_ut cur_prot_u,vm_prot_ut max_prot_u,vm_prot_t extra_mask,vm_prot_t * out_cur,vm_prot_t * out_max)620 vm_sanitize_err_compat_cur_and_max_prots(
621 	kern_return_t           initial_kr,
622 	vm_sanitize_caller_t    vm_sanitize_caller,
623 	vm_prot_ut              cur_prot_u,
624 	vm_prot_ut              max_prot_u,
625 	vm_prot_t               extra_mask,
626 	vm_prot_t              *out_cur,
627 	vm_prot_t              *out_max)
628 {
629 	vm_prot_t initial_cur_prot = VM_SANITIZE_UNSAFE_UNWRAP(cur_prot_u);
630 	vm_prot_t initial_max_prot = VM_SANITIZE_UNSAFE_UNWRAP(max_prot_u);
631 
632 	vm_sanitize_compat_rewrite_t compat = {initial_kr, false, false};
633 	vm_prot_t compat_cur_prot = initial_cur_prot;
634 	vm_prot_t compat_max_prot = initial_max_prot;
635 	if (vm_sanitize_caller->err_compat_prot_cur_max) {
636 		compat = (vm_sanitize_caller->err_compat_prot_cur_max)
637 		    (initial_kr, &compat_cur_prot, &compat_max_prot, extra_mask);
638 	}
639 
640 	if (compat.should_telemeter) {
641 #if DEVELOPMENT || DEBUG
642 		if (vm_sanitize_telemeter_to_serial) {
643 			printf("VM API - [%s] unsanitary vm_prot cur %d max %d "
644 			    "passed to %s; error code %d may become %d\n",
645 			    proc_best_name(current_proc()),
646 			    initial_cur_prot, initial_max_prot,
647 			    vm_sanitize_caller->vmsc_caller_name,
648 			    initial_kr, compat.compat_kr);
649 		}
650 #endif /* DEVELOPMENT || DEBUG */
651 
652 		vm_sanitize_send_telemetry(
653 			vm_sanitize_caller->vmsc_telemetry_id,
654 			VM_SANITIZE_CHECKER_PROT_CUR_MAX,
655 			VM_SANITIZE_CHECKER_COUNT_1 /* fixme */,
656 			vm_sanitize_caller->vmsc_ktriage_id,
657 			initial_cur_prot,
658 			initial_max_prot,
659 			extra_mask,
660 			0 /* arg4 */,
661 			initial_kr,
662 			compat.compat_kr);
663 	}
664 
665 	if (compat.should_rewrite) {
666 		*out_cur = compat_cur_prot;
667 		*out_max = compat_max_prot;
668 		return compat.compat_kr;
669 	} else {
670 		/* out_cur and out_max unchanged */
671 		return initial_kr;
672 	}
673 }
674 
675 __attribute__((always_inline, warn_unused_result))
676 kern_return_t
vm_sanitize_cur_and_max_prots(vm_prot_ut cur_prot_u,vm_prot_ut max_prot_u,vm_sanitize_caller_t vm_sanitize_caller,vm_map_t map,vm_prot_t extra_mask,vm_prot_t * cur_prot,vm_prot_t * max_prot)677 vm_sanitize_cur_and_max_prots(
678 	vm_prot_ut              cur_prot_u,
679 	vm_prot_ut              max_prot_u,
680 	vm_sanitize_caller_t    vm_sanitize_caller,
681 	vm_map_t                map,
682 	vm_prot_t               extra_mask,
683 	vm_prot_t              *cur_prot,
684 	vm_prot_t              *max_prot)
685 {
686 	kern_return_t kr;
687 
688 	kr = vm_sanitize_prot(cur_prot_u, vm_sanitize_caller, map, extra_mask, cur_prot);
689 	if (__improbable(kr != KERN_SUCCESS)) {
690 		*cur_prot = VM_PROT_NONE;
691 		*max_prot = VM_PROT_NONE;
692 		return kr;
693 	}
694 
695 	kr = vm_sanitize_prot(max_prot_u, vm_sanitize_caller, map, extra_mask, max_prot);
696 	if (__improbable(kr != KERN_SUCCESS)) {
697 		*cur_prot = VM_PROT_NONE;
698 		*max_prot = VM_PROT_NONE;
699 		return kr;
700 	}
701 
702 
703 	/*
704 	 * This check needs to be performed on the actual protection bits.
705 	 * vm_sanitize_prot restricts cur and max prot to
706 	 * (VM_PROT_ALL | VM_PROT_ALLEXEC | extra_mask), but we don't enforce
707 	 * ordering on the extra_mask bits.
708 	 */
709 	if (__improbable((*cur_prot & *max_prot & VM_SANITIZE_PROT_ALLOWED) !=
710 	    (*cur_prot & VM_SANITIZE_PROT_ALLOWED))) {
711 		/* cur is more permissive than max */
712 		kr = KERN_INVALID_ARGUMENT;
713 		goto unsanitary;
714 	}
715 	return KERN_SUCCESS;
716 
717 unsanitary:
718 	*cur_prot = VM_PROT_NONE;
719 	*max_prot = VM_PROT_NONE;
720 	/* error compat may set cur/max to something other than 0/0 */
721 	return vm_sanitize_err_compat_cur_and_max_prots(kr, vm_sanitize_caller,
722 	           cur_prot_u, max_prot_u, extra_mask, cur_prot, max_prot);
723 }
724 
725 __attribute__((always_inline, warn_unused_result))
726 vm_prot_t
vm_sanitize_prot_bsd(vm_prot_ut prot_u,vm_sanitize_caller_t vm_sanitize_caller __unused)727 vm_sanitize_prot_bsd(
728 	vm_prot_ut              prot_u,
729 	vm_sanitize_caller_t    vm_sanitize_caller __unused)
730 {
731 	vm_prot_t prot = VM_SANITIZE_UNSAFE_UNWRAP(prot_u);
732 
733 	/*
734 	 * Strip all protections that are not allowed
735 	 */
736 	prot &= (VM_PROT_ALL | VM_PROT_TRUSTED | VM_PROT_STRIP_READ);
737 	return prot;
738 }
739 
740 __attribute__((always_inline, warn_unused_result))
741 kern_return_t
vm_sanitize_memory_entry_perm(vm_prot_ut perm_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_sanitize_flags_t flags,vm_prot_t extra_mask,vm_prot_t * perm)742 vm_sanitize_memory_entry_perm(
743 	vm_prot_ut              perm_u,
744 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
745 	vm_sanitize_flags_t     flags,
746 	vm_prot_t               extra_mask,
747 	vm_prot_t              *perm)
748 {
749 	vm_prot_t prot;
750 	vm_prot_t map_mem_flags;
751 	vm_prot_t access;
752 
753 	*perm = VM_SANITIZE_UNSAFE_UNWRAP(perm_u);
754 	prot = *perm & MAP_MEM_PROT_MASK;
755 	map_mem_flags = *perm & MAP_MEM_FLAGS_MASK;
756 	access = GET_MAP_MEM(*perm);
757 
758 	if ((flags & VM_SANITIZE_FLAGS_CHECK_USER_MEM_MAP_FLAGS) &&
759 	    (map_mem_flags & ~MAP_MEM_FLAGS_USER)) {
760 		/*
761 		 * Unknown flag: reject for forward compatibility.
762 		 */
763 		*perm = VM_PROT_NONE;
764 		return KERN_INVALID_VALUE;
765 	}
766 
767 	/*
768 	 * Clear prot bits in perm and set them to only allowed values
769 	 */
770 	*perm &= ~MAP_MEM_PROT_MASK;
771 	*perm |= (prot & (VM_PROT_ALL | extra_mask));
772 
773 	/*
774 	 * No checks on access
775 	 */
776 	(void) access;
777 
778 	return KERN_SUCCESS;
779 }
780 
781 __attribute__((always_inline, warn_unused_result))
782 kern_return_t
vm_sanitize_inherit(vm_inherit_ut inherit_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_inherit_t * inherit)783 vm_sanitize_inherit(
784 	vm_inherit_ut           inherit_u,
785 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
786 	vm_inherit_t           *inherit)
787 {
788 	*inherit = VM_SANITIZE_UNSAFE_UNWRAP(inherit_u);
789 
790 	if (__improbable(*inherit > VM_INHERIT_LAST_VALID)) {
791 		*inherit = VM_INHERIT_NONE;
792 		return KERN_INVALID_ARGUMENT;
793 	}
794 
795 	return KERN_SUCCESS;
796 }
797 
798 __attribute__((always_inline, warn_unused_result))
799 kern_return_t
vm_sanitize_behavior(vm_behavior_ut behavior_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_behavior_t * behavior)800 vm_sanitize_behavior(
801 	vm_behavior_ut           behavior_u,
802 	vm_sanitize_caller_t    vm_sanitize_caller __unused,
803 	vm_behavior_t           *behavior)
804 {
805 	*behavior = VM_SANITIZE_UNSAFE_UNWRAP(behavior_u);
806 
807 	if (__improbable((*behavior > VM_BEHAVIOR_LAST_VALID)
808 	    || (*behavior < 0))) {
809 		*behavior = VM_BEHAVIOR_DEFAULT;
810 		return KERN_INVALID_ARGUMENT;
811 	}
812 
813 	return KERN_SUCCESS;
814 }
815 
816 #if DEBUG || DEVELOPMENT
817 
818 static bool
vm_sanitize_offset_test(void)819 vm_sanitize_offset_test(void)
820 {
821 	kern_return_t kr = KERN_SUCCESS;
822 	vm_map_offset_t offset;
823 	vm_map_address_t addr, end;
824 	vm_addr_struct_t offset_u;
825 
826 	/*
827 	 * Offset that is less than lower bound
828 	 */
829 	offset_u = vm_sanitize_wrap_addr(0);
830 	addr = 5;
831 	end = 10;
832 	kr = vm_sanitize_offset(offset_u, VM_SANITIZE_CALLER_TEST, addr, end, &offset);
833 
834 	if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
835 		printf("%s: failed for addr %p end %p offset %p\n",
836 		    __func__, (void *)addr, (void *)end,
837 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u));
838 		return false;
839 	}
840 
841 	/*
842 	 * Offset that is less than lower bound
843 	 */
844 	offset_u = vm_sanitize_wrap_addr(11);
845 	addr = 5;
846 	end = 10;
847 	kr = KERN_SUCCESS;
848 	kr = vm_sanitize_offset(offset_u, VM_SANITIZE_CALLER_TEST, addr, end, &offset);
849 
850 	if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
851 		printf("%s: failed for addr %p end %p offset %p\n",
852 		    __func__, (void *)addr, (void *)end,
853 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u));
854 		return false;
855 	}
856 
857 	printf("%s: passed\n", __func__);
858 
859 	return true;
860 }
861 
862 static bool
vm_sanitize_size_test(void)863 vm_sanitize_size_test(void)
864 {
865 	kern_return_t kr = KERN_SUCCESS;
866 	vm_map_size_t size;
867 	vm_addr_struct_t offset_u;
868 	vm_size_struct_t size_u;
869 
870 	/*
871 	 * VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS should return VM_ERR_RETURN_NOW for size = 0
872 	 * for callers that need to return success early
873 	 */
874 	offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
875 	size_u = vm_sanitize_wrap_size(0);
876 	kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
877 	    VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS, &size);
878 
879 	if (vm_sanitize_get_kr(kr) != KERN_SUCCESS ||
880 	    kr != VM_ERR_RETURN_NOW) {
881 		printf("%s: VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS failed for offset %p size %p\n",
882 		    __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
883 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
884 		return false;
885 	}
886 
887 	/*
888 	 * VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS should return failure for size = 0
889 	 */
890 	offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
891 	size_u = vm_sanitize_wrap_size(0);
892 	kr = KERN_SUCCESS;
893 	kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
894 	    VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS, &size);
895 
896 	if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
897 		printf("%s: VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS failed for offset %p size %p\n",
898 		    __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
899 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
900 		return false;
901 	}
902 
903 	/*
904 	 * VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH should return success for size = 0
905 	 */
906 	offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
907 	size_u = vm_sanitize_wrap_size(0);
908 	kr = KERN_SUCCESS;
909 	kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
910 	    VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, &size);
911 
912 	if (vm_sanitize_get_kr(kr) != KERN_SUCCESS) {
913 		printf("%s: VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH failed for offset %p "
914 		    "size %p\n", __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
915 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
916 		return false;
917 	}
918 
919 	/*
920 	 * VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES should return unaligned values
921 	 */
922 	offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
923 	size_u = vm_sanitize_wrap_size(PAGE_SIZE + 1);
924 	kr = KERN_SUCCESS;
925 	kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
926 	    VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES | VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH,
927 	    &size);
928 
929 	if ((vm_sanitize_get_kr(kr) != KERN_SUCCESS) ||
930 	    (size != PAGE_SIZE + 1)) {
931 		printf("%s: VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES failed for offset %p size %p\n",
932 		    __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
933 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
934 		return false;
935 	}
936 
937 	/*
938 	 * Values that overflow
939 	 */
940 	offset_u = vm_sanitize_wrap_addr(2 * PAGE_SIZE);
941 	size_u = vm_sanitize_wrap_size(-PAGE_SIZE - 1);
942 	kr = KERN_SUCCESS;
943 	kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
944 	    VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, &size);
945 
946 	if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
947 		printf("%s: failed for offset %p size %p\n",
948 		    __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
949 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
950 		return false;
951 	}
952 
953 	/*
954 	 * Values that overflow when rounding
955 	 */
956 	offset_u = vm_sanitize_wrap_addr(0);
957 	size_u = vm_sanitize_wrap_size(-1);
958 	kr = KERN_SUCCESS;
959 	kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
960 	    VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, &size);
961 
962 	if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
963 		printf("%s: failed for offset %p size %p\n",
964 		    __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
965 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
966 		return false;
967 	}
968 
969 	/*
970 	 * Values that overflow when rounding
971 	 */
972 	offset_u = vm_sanitize_wrap_addr(-2);
973 	size_u = vm_sanitize_wrap_size(1);
974 	kr = KERN_SUCCESS;
975 	kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
976 	    VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, &size);
977 
978 	if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
979 		printf("%s: failed for offset %p size %p\n",
980 		    __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
981 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
982 		return false;
983 	}
984 
985 	printf("%s: passed\n", __func__);
986 
987 	return true;
988 }
989 
990 static bool
vm_sanitize_addr_size_test(void)991 vm_sanitize_addr_size_test(void)
992 {
993 	kern_return_t kr = KERN_SUCCESS;
994 	vm_map_address_t start, end;
995 	vm_map_size_t size;
996 	vm_addr_struct_t offset_u;
997 	vm_size_struct_t size_u;
998 
999 	/*
1000 	 * VM_SANITIZE_FLAGS_CHECK_ALIGNED_START should fail on passing unaligned offset
1001 	 */
1002 	offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
1003 	size_u = vm_sanitize_wrap_size(PAGE_SIZE);
1004 
1005 	kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
1006 	    VM_SANITIZE_FLAGS_CHECK_ALIGNED_START | VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH,
1007 	    &start, &end, &size);
1008 
1009 	if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
1010 		printf("%s: VM_SANITIZE_FLAGS_CHECK_ALIGNED_START failed for offset %p size %p\n",
1011 		    __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
1012 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
1013 		return false;
1014 	}
1015 
1016 	/*
1017 	 * VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS should return VM_ERR_RETURN_NOW for size = 0
1018 	 * for callers that need to return success early
1019 	 */
1020 	offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
1021 	size_u = vm_sanitize_wrap_size(0);
1022 	kr = KERN_SUCCESS;
1023 	kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
1024 	    VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS, &start, &end,
1025 	    &size);
1026 
1027 	if (vm_sanitize_get_kr(kr) != KERN_SUCCESS ||
1028 	    kr != VM_ERR_RETURN_NOW) {
1029 		printf("%s: VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS failed for offset %p size %p\n",
1030 		    __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
1031 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
1032 		return false;
1033 	}
1034 
1035 	/*
1036 	 * VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS should return failure for size = 0
1037 	 */
1038 	offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
1039 	size_u = vm_sanitize_wrap_size(0);
1040 	kr = KERN_SUCCESS;
1041 	kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
1042 	    VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS, &start, &end,
1043 	    &size);
1044 
1045 	if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
1046 		printf("%s: VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS failed for offset %p size %p\n",
1047 		    __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
1048 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
1049 		return false;
1050 	}
1051 
1052 	/*
1053 	 * VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH should return success for size = 0
1054 	 */
1055 	offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
1056 	size_u = vm_sanitize_wrap_size(0);
1057 	kr = KERN_SUCCESS;
1058 	kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
1059 	    VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, &start,
1060 	    &end, &size);
1061 
1062 	if ((vm_sanitize_get_kr(kr) != KERN_SUCCESS) ||
1063 	    (start != PAGE_SIZE) || (end != PAGE_SIZE)) {
1064 		printf("%s: VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH failed for offset %p "
1065 		    "size %p\n", __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
1066 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
1067 		return false;
1068 	}
1069 
1070 	/*
1071 	 * VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES should return unaligned values
1072 	 */
1073 	offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
1074 	size_u = vm_sanitize_wrap_size(PAGE_SIZE);
1075 	kr = KERN_SUCCESS;
1076 	kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
1077 	    VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES | VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH,
1078 	    &start, &end, &size);
1079 
1080 	if ((vm_sanitize_get_kr(kr) != KERN_SUCCESS) ||
1081 	    (start != PAGE_SIZE + 1) || (end != 2 * PAGE_SIZE + 1)) {
1082 		printf("%s: VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES failed for offset %p size %p\n",
1083 		    __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
1084 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
1085 		return false;
1086 	}
1087 
1088 
1089 	/*
1090 	 * VM_SANITIZE_FLAGS_REALIGN_START should not use unaligned values for sanitization
1091 	 */
1092 	offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
1093 	size_u = vm_sanitize_wrap_size(PAGE_SIZE);
1094 	kr = KERN_SUCCESS;
1095 	kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
1096 	    VM_SANITIZE_FLAGS_REALIGN_START | VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH,
1097 	    &start, &end, &size);
1098 
1099 	if ((vm_sanitize_get_kr(kr) != KERN_SUCCESS) ||
1100 	    (start != PAGE_SIZE) || (end != 2 * PAGE_SIZE)) {
1101 		printf("%s: VM_SANITIZE_FLAGS_REALIGN_START failed for offset %p size %p\n",
1102 		    __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
1103 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
1104 		return false;
1105 	}
1106 
1107 	/*
1108 	 * Values that overflow
1109 	 */
1110 	offset_u = vm_sanitize_wrap_addr(2 * PAGE_SIZE);
1111 	size_u = vm_sanitize_wrap_size(-PAGE_SIZE - 1);
1112 	kr = KERN_SUCCESS;
1113 	kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
1114 	    VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, &start,
1115 	    &end, &size);
1116 
1117 	if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
1118 		printf("%s: failed for offset %p size %p\n",
1119 		    __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
1120 		    (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
1121 		return false;
1122 	}
1123 
1124 	printf("%s: passed\n", __func__);
1125 
1126 	return true;
1127 }
1128 
1129 static bool
vm_sanitize_prot_test(void)1130 vm_sanitize_prot_test(void)
1131 {
1132 	kern_return_t kr = KERN_SUCCESS;
1133 	vm_prot_ut prot_u;
1134 	vm_prot_t prot;
1135 
1136 	prot_u = vm_sanitize_wrap_prot(VM_PROT_NO_CHANGE_LEGACY |
1137 	    VM_PROT_NO_CHANGE |
1138 	    VM_PROT_COPY |
1139 	    VM_PROT_WANTS_COPY |
1140 	    VM_PROT_TRUSTED |
1141 	    VM_PROT_IS_MASK |
1142 	    VM_PROT_STRIP_READ |
1143 	    VM_PROT_EXECUTE_ONLY |
1144 	    VM_PROT_COPY_FAIL_IF_EXECUTABLE |
1145 	    VM_PROT_TPRO);
1146 
1147 	kr = vm_sanitize_prot(prot_u, VM_SANITIZE_CALLER_TEST, current_map(),
1148 	    VM_PROT_NONE, &prot);
1149 
1150 	if (kr == KERN_SUCCESS) {
1151 		printf("%s: failed for invalid set of permissions\n", __func__);
1152 		return false;
1153 	}
1154 
1155 	printf("%s: passed\n", __func__);
1156 
1157 	return true;
1158 }
1159 
1160 static bool
vm_sanitize_cur_and_max_prots_test(void)1161 vm_sanitize_cur_and_max_prots_test(void)
1162 {
1163 	kern_return_t kr = KERN_SUCCESS;
1164 	vm_prot_ut cur_prot_u, max_prot_u;
1165 	vm_prot_t cur_prot, max_prot;
1166 
1167 	/*
1168 	 * Validate that incompatible prots are rejected
1169 	 */
1170 	cur_prot_u = vm_sanitize_wrap_prot(VM_PROT_ALL);
1171 	max_prot_u = vm_sanitize_wrap_prot(VM_PROT_READ);
1172 	kr = vm_sanitize_cur_and_max_prots(cur_prot_u, max_prot_u, VM_SANITIZE_CALLER_TEST,
1173 	    current_map(), VM_PROT_NONE, &cur_prot,
1174 	    &max_prot);
1175 
1176 	if (kr == KERN_SUCCESS) {
1177 		printf("%s: failed for invalid set of permissions\n", __func__);
1178 		return false;
1179 	}
1180 	printf("%s: passed\n", __func__);
1181 
1182 	return true;
1183 }
1184 
1185 static bool
vm_sanitize_prot_bsd_test(void)1186 vm_sanitize_prot_bsd_test(void)
1187 {
1188 	vm_prot_ut prot_u;
1189 	vm_prot_t prot;
1190 
1191 	prot_u = vm_sanitize_wrap_prot(VM_PROT_NO_CHANGE_LEGACY |
1192 	    VM_PROT_NO_CHANGE |
1193 	    VM_PROT_COPY |
1194 	    VM_PROT_WANTS_COPY |
1195 	    VM_PROT_IS_MASK |
1196 	    VM_PROT_COPY_FAIL_IF_EXECUTABLE |
1197 	    VM_PROT_TPRO);
1198 
1199 	prot = vm_sanitize_prot_bsd(prot_u, VM_SANITIZE_CALLER_TEST);
1200 
1201 	if (prot != VM_PROT_NONE) {
1202 		printf("%s: failed to strip invalid permissions\n", __func__);
1203 		return false;
1204 	}
1205 
1206 	printf("%s: passed\n", __func__);
1207 
1208 	return true;
1209 }
1210 
1211 static bool
vm_sanitize_memory_entry_perm_test(void)1212 vm_sanitize_memory_entry_perm_test(void)
1213 {
1214 	kern_return_t kr = KERN_SUCCESS;
1215 	vm_prot_ut perm_u;
1216 	vm_prot_t perm;
1217 
1218 	/*
1219 	 * Ensure invalid map_mem_flags is rejected
1220 	 */
1221 	perm_u = vm_sanitize_wrap_prot(0x001000);
1222 	kr = vm_sanitize_memory_entry_perm(perm_u, VM_SANITIZE_CALLER_TEST,
1223 	    VM_SANITIZE_FLAGS_CHECK_USER_MEM_MAP_FLAGS,
1224 	    VM_PROT_IS_MASK, &perm);
1225 
1226 	if (kr == KERN_SUCCESS) {
1227 		printf("%s: failed to reject invalid map_mem_flags\n", __func__);
1228 		return false;
1229 	}
1230 
1231 	/*
1232 	 * Ensure invalid prot bits are cleared
1233 	 */
1234 	kr = KERN_SUCCESS;
1235 	perm_u = vm_sanitize_wrap_prot(VM_PROT_NO_CHANGE_LEGACY |
1236 	    VM_PROT_NO_CHANGE |
1237 	    VM_PROT_COPY |
1238 	    VM_PROT_WANTS_COPY |
1239 	    VM_PROT_EXECUTE_ONLY |
1240 	    VM_PROT_COPY_FAIL_IF_EXECUTABLE |
1241 	    VM_PROT_TPRO);
1242 	kr = vm_sanitize_memory_entry_perm(perm_u, VM_SANITIZE_CALLER_TEST,
1243 	    VM_SANITIZE_FLAGS_CHECK_USER_MEM_MAP_FLAGS,
1244 	    VM_PROT_IS_MASK, &perm);
1245 
1246 	if (perm != VM_PROT_NONE) {
1247 		printf("%s: failed to clear invalid prot bits\n", __func__);
1248 		return false;
1249 	}
1250 
1251 	printf("%s: passed\n", __func__);
1252 
1253 	return true;
1254 }
1255 
1256 static bool
vm_sanitize_inherit_test(void)1257 vm_sanitize_inherit_test(void)
1258 {
1259 	kern_return_t kr = KERN_SUCCESS;
1260 	vm_inherit_ut inherit_u;
1261 	vm_inherit_t inherit;
1262 
1263 	/*
1264 	 * Ensure invalid values are rejected
1265 	 */
1266 	inherit_u = vm_sanitize_wrap_inherit(VM_INHERIT_DONATE_COPY);
1267 	kr = vm_sanitize_inherit(inherit_u, VM_SANITIZE_CALLER_TEST, &inherit);
1268 
1269 	if (kr == KERN_SUCCESS) {
1270 		printf("%s: failed to reject invalid inherit values\n", __func__);
1271 		return false;
1272 	}
1273 	printf("%s: passed\n", __func__);
1274 
1275 	return true;
1276 }
1277 
1278 static bool
vm_sanitize_behavior_test(void)1279 vm_sanitize_behavior_test(void)
1280 {
1281 	kern_return_t kr = KERN_SUCCESS;
1282 	vm_behavior_ut behavior_u;
1283 	vm_behavior_t behavior;
1284 
1285 	/*
1286 	 * Ensure invalid values are rejected
1287 	 */
1288 	behavior_u = vm_sanitize_wrap_behavior(VM_BEHAVIOR_LAST_VALID + 1);
1289 	kr = vm_sanitize_behavior(behavior_u, VM_SANITIZE_CALLER_TEST, &behavior);
1290 
1291 	if (kr == KERN_SUCCESS) {
1292 		printf("%s: failed to reject invalid behavior value\n", __func__);
1293 		return false;
1294 	}
1295 	printf("%s: passed\n", __func__);
1296 
1297 	return true;
1298 }
1299 
1300 /*
1301  * Verify that u0..u15 == t_arg[0]..t_arg[15], then return ret.
1302  * If there are ABI problems then the parameters or return
1303  * value may be passed incorrectly. We use a large number
1304  * of parameters in order to fill the ABI's parameter
1305  * registers and spill onto the stack.
1306  */
1307 #define TEST_UT_TYPE_ABI_UT_CALLEE(type_t, type_ut, equal, t_arg)       \
1308 	static_assert(sizeof(type_ut) == sizeof(type_t));               \
1309 	static_assert(__alignof__(type_ut) == __alignof__(type_t));     \
1310                                                                         \
1311 	__attribute__((used, noinline))                                 \
1312 	static type_ut                                                  \
1313 	vm_sanitize_test_##type_ut##_callee(                            \
1314 	        type_ut u0,  type_ut u1,  type_ut u2,  type_ut u3,      \
1315 	        type_ut u4,  type_ut u5,  type_ut u6,  type_ut u7,      \
1316 	        type_ut u8,  type_ut u9,  type_ut u10, type_ut u11,     \
1317 	        type_ut u12, type_ut u13, type_ut u14, type_ut u15,     \
1318 	        type_ut ret,                                            \
1319 	        bool *out_params_ok) {                                  \
1320 	        asm("");                                                \
1321                                                                         \
1322 	        *out_params_ok = (                                      \
1323 	                equal(u0.UNSAFE,  t_arg[0])  && equal(u1.UNSAFE,  t_arg[1])  && \
1324 	                equal(u2.UNSAFE,  t_arg[2])  && equal(u3.UNSAFE,  t_arg[3])  && \
1325 	                equal(u4.UNSAFE,  t_arg[4])  && equal(u5.UNSAFE,  t_arg[5])  && \
1326 	                equal(u6.UNSAFE,  t_arg[6])  && equal(u7.UNSAFE,  t_arg[7])  && \
1327 	                equal(u8.UNSAFE,  t_arg[8])  && equal(u9.UNSAFE,  t_arg[9])  && \
1328 	                equal(u10.UNSAFE, t_arg[10]) && equal(u11.UNSAFE, t_arg[11]) && \
1329 	                equal(u12.UNSAFE, t_arg[12]) && equal(u13.UNSAFE, t_arg[13]) && \
1330 	                equal(u14.UNSAFE, t_arg[14]) && equal(u15.UNSAFE, t_arg[15]) \
1331 	                );                                              \
1332                                                                         \
1333 	/* return value is checked by the caller */             \
1334 	        return ret;                                             \
1335 }
1336 
1337 /*
1338  * Make a function pointer that points to the function above,
1339  * but with a function type that has type_t parameters
1340  * instead of type_ut.
1341  *
1342  * This is the same type mismatch that occurs when
1343  * the call site is outside the trust boundary and
1344  * the callee is inside it.
1345  */
1346 #define TEST_UT_TYPE_ABI_T_CALLEE(type_t, type_ut)                      \
1347 	typedef type_t                                                  \
1348 	(*vm_sanitize_test_##type_t##_callee_t)(                        \
1349 	        type_t u0,  type_t u1,  type_t u2,  type_t u3,          \
1350 	        type_t u4,  type_t u5,  type_t u6,  type_t u7,          \
1351 	        type_t u8,  type_t u9,  type_t u10, type_t u11,         \
1352 	        type_t u12, type_t u13, type_t u14, type_t u15,         \
1353 	        type_t ret,                                             \
1354 	        bool *out_params_ok);                                   \
1355 	_Pragma("clang diagnostic push")                                \
1356 	_Pragma("clang diagnostic ignored \"-Wcast-align\"")            \
1357 	_Pragma("clang diagnostic ignored \"-Wcast-function-type\"")    \
1358 	static vm_sanitize_test_##type_t##_callee_t                     \
1359 	volatile vm_sanitize_test_##type_t##_callee =                   \
1360 	    (vm_sanitize_test_##type_t##_callee_t)                      \
1361 	    vm_sanitize_test_##type_ut##_callee;                        \
1362 	_Pragma("clang diagnostic pop")
1363 
1364 /* Verify type_t actual parameters passed to type_ut formal parameters */
1365 #define TEST_UT_TYPE_ABI_T_CALLER(type_t, type_ut, equal, t_arg)        \
1366 	static void                                                     \
1367 	vm_sanitize_test_##type_t##_abi(                                \
1368 	        bool *out_params_ok,                                    \
1369 	        bool *out_ret_ok) {                                     \
1370                                                                         \
1371 	        type_t t_ret = vm_sanitize_test_##type_t##_callee(      \
1372 	                t_arg[0],  t_arg[1],  t_arg[2],  t_arg[3],      \
1373 	                t_arg[4],  t_arg[5],  t_arg[6],  t_arg[7],      \
1374 	                t_arg[8],  t_arg[9],  t_arg[10], t_arg[11],     \
1375 	                t_arg[12], t_arg[13], t_arg[14], t_arg[15],     \
1376 	                t_arg[16],                                      \
1377 	                out_params_ok);                                 \
1378 	        *out_ret_ok = equal(t_ret, t_arg[16]);                  \
1379 	}
1380 
1381 /* Verify type_ut actual parameters passed to type_ut formal parameters */
1382 #define TEST_UT_TYPE_ABI_UT_CALLER(type_t, type_ut, equal, t_arg)       \
1383 	static void                                                     \
1384 	vm_sanitize_test_##type_ut##_abi(                               \
1385 	        bool *out_params_ok,                                    \
1386 	        bool *out_ret_ok) {                                     \
1387                                                                         \
1388 	        type_ut ut_ret = vm_sanitize_test_##type_ut##_callee(   \
1389 	                (type_ut){t_arg[0]},  (type_ut){t_arg[1]},      \
1390 	                (type_ut){t_arg[2]},  (type_ut){t_arg[3]},      \
1391 	                (type_ut){t_arg[4]},  (type_ut){t_arg[5]},      \
1392 	                (type_ut){t_arg[6]},  (type_ut){t_arg[7]},      \
1393 	                (type_ut){t_arg[8]},  (type_ut){t_arg[9]},      \
1394 	                (type_ut){t_arg[10]}, (type_ut){t_arg[11]},     \
1395 	                (type_ut){t_arg[12]}, (type_ut){t_arg[13]},     \
1396 	                (type_ut){t_arg[14]}, (type_ut){t_arg[15]},     \
1397 	                (type_ut){t_arg[16]},                           \
1398 	                out_params_ok);                                 \
1399 	        *out_ret_ok = equal(ut_ret.UNSAFE, t_arg[16]);          \
1400 	}
1401 
1402 /*
1403  * Generate ABI testing functions for one type `type_t`
1404  * and its unsafe type `type_ut`.
1405  *
1406  * `equal(a, b)` is a function or macro that compares two `type_t`.
1407  */
1408 #define TEST_UT_TYPE_ABI(type_t, type_ut, equal) \
1409 	TEST_UT_TYPE_ABI_UT_CALLEE(type_t, type_ut, equal, vm_sanitize_test_##type_t##_args) \
1410 	TEST_UT_TYPE_ABI_T_CALLEE(type_t, type_ut)                      \
1411 	TEST_UT_TYPE_ABI_T_CALLER(type_t, type_ut, equal, vm_sanitize_test_##type_t##_args) \
1412 	TEST_UT_TYPE_ABI_UT_CALLER(type_t, type_ut, equal, vm_sanitize_test_##type_t##_args) \
1413 
1414 /* our test macro requires a single-token type name, not `struct mach_vm_range` */
1415 typedef struct mach_vm_range mach_vm_range_s;
1416 
1417 /* mach_vm_range_us does not exist, but we'll create it for this test */
1418 VM_GENERATE_UNSAFE_TYPE(mach_vm_range_s, mach_vm_range_us);
1419 
1420 /*
1421  * Parameter values to test. 16 input values plus 1 return value.
1422  * These arrays are used inside the macros above under the name `t_arg`.
1423  */
1424 #define V 0xF89aFb00
1425 static vm_behavior_t vm_sanitize_test_vm_behavior_t_args[17] = {
1426 	V + 0, V + 1, V + 2, V + 3,
1427 	V + 4, V + 5, V + 6, V + 7,
1428 	V + 8, V + 9, V + 10, V + 11,
1429 	V + 12, V + 13, V + 14, V + 15,
1430 	V + 16
1431 };
1432 #undef V
1433 #define V 0xF89aFb00u
1434 static const vm_inherit_t vm_sanitize_test_vm_inherit_t_args[17] = {
1435 	V + 0, V + 1, V + 2, V + 3,
1436 	V + 4, V + 5, V + 6, V + 7,
1437 	V + 8, V + 9, V + 10, V + 11,
1438 	V + 12, V + 13, V + 14, V + 15,
1439 	V + 16
1440 };
1441 #undef V
1442 #define V 0xF1234567F89aFb00ull
1443 static const mach_vm_address_t vm_sanitize_test_mach_vm_address_t_args[17] = {
1444 	V + 0, V + 1, V + 2, V + 3,
1445 	V + 4, V + 5, V + 6, V + 7,
1446 	V + 8, V + 9, V + 10, V + 11,
1447 	V + 12, V + 13, V + 14, V + 15,
1448 	V + 16
1449 };
1450 #undef V
1451 #define V 0xF1234567F89aFb00ull
1452 static const mach_vm_range_s vm_sanitize_test_mach_vm_range_s_args[17] = {
1453 	{V, V + 0x10000000100 * 0}, {V, V + 0x10000000100 * 1},
1454 	{V, V + 0x10000000100 * 2}, {V, V + 0x10000000100 * 3},
1455 	{V, V + 0x10000000100 * 4}, {V, V + 0x10000000100 * 5},
1456 	{V, V + 0x10000000100 * 6}, {V, V + 0x10000000100 * 7},
1457 	{V, V + 0x10000000100 * 8}, {V, V + 0x10000000100 * 9},
1458 	{V, V + 0x10000000100 * 10}, {V, V + 0x10000000100 * 11},
1459 	{V, V + 0x10000000100 * 12}, {V, V + 0x10000000100 * 13},
1460 	{V, V + 0x10000000100 * 14}, {V, V + 0x10000000100 * 15},
1461 	{V, V + 0x10000000100 * 16},
1462 };
1463 #undef V
1464 
1465 #define equal_scalar(a, b) (a) == (b)
1466 #define equal_range(a, b) \
1467 	((a).min_address == (b).min_address && (a).max_address == (b).max_address)
1468 
1469 /* signed int */
1470 TEST_UT_TYPE_ABI(vm_behavior_t, vm_behavior_ut, equal_scalar);
1471 /* unsigned int */
1472 TEST_UT_TYPE_ABI(vm_inherit_t, vm_inherit_ut, equal_scalar);
1473 /* pointer-size int */
1474 TEST_UT_TYPE_ABI(mach_vm_address_t, mach_vm_address_ut, equal_scalar);
1475 /* struct of two pointer-sized ints */
1476 TEST_UT_TYPE_ABI(mach_vm_range_s, mach_vm_range_us, equal_range);
1477 
1478 #undef equal_scalar
1479 #undef equal_struct
1480 
1481 /* Call the ABI test function for one type and complain if it failed. */
1482 static bool
vm_sanitize_test_one_abi(void (* test_t)(bool *,bool *),const char * t_name)1483 vm_sanitize_test_one_abi(void (*test_t)(bool*, bool*), const char *t_name)
1484 {
1485 	bool params_ok, ret_ok;
1486 
1487 	test_t(&params_ok, &ret_ok);
1488 
1489 	if (!params_ok) {
1490 		printf("vm_sanitize_ut_type_abi_test: mismatched %s parameter\n", t_name);
1491 		return false;
1492 	}
1493 	if (!ret_ok) {
1494 		printf("vm_sanitize_ut_type_abi_test: mismatched %s return\n", t_name);
1495 		return false;
1496 	}
1497 	return true;
1498 }
1499 
1500 /*
1501  * Verify that the ABI conventions of a _ut type and its _t counterpart match.
1502  *
1503  * Sanitized types means that external call sites use type_t parameters and
1504  * internal definitions use type_ut parameters. Any mismatch between
1505  * type_t and type_ut at the function call ABI level may cause misinterpreted
1506  * parameter values or data type layouts.
1507  *
1508  * - same sizeof
1509  * - same alignof
1510  * - type_t actual parameter passed to type_ut formal parameter
1511  * - type_ut actual return value returned to type_t formal return
1512  */
1513 static bool
vm_sanitize_ut_type_abi_test(void)1514 vm_sanitize_ut_type_abi_test(void)
1515 {
1516 	bool passed =
1517 	    vm_sanitize_test_one_abi(vm_sanitize_test_vm_behavior_t_abi, "vm_behavior_t") &&
1518 	    vm_sanitize_test_one_abi(vm_sanitize_test_vm_behavior_ut_abi, "vm_behavior_ut") &&
1519 	    vm_sanitize_test_one_abi(vm_sanitize_test_vm_inherit_t_abi, "vm_inherit_t") &&
1520 	    vm_sanitize_test_one_abi(vm_sanitize_test_vm_inherit_ut_abi, "vm_inherit_ut") &&
1521 	    vm_sanitize_test_one_abi(vm_sanitize_test_mach_vm_address_t_abi, "mach_vm_address_t") &&
1522 	    vm_sanitize_test_one_abi(vm_sanitize_test_mach_vm_address_ut_abi, "mach_vm_address_ut") &&
1523 	    vm_sanitize_test_one_abi(vm_sanitize_test_mach_vm_range_s_abi, "mach_vm_range_s") &&
1524 	    vm_sanitize_test_one_abi(vm_sanitize_test_mach_vm_range_us_abi, "mach_vm_range_us");
1525 
1526 	if (passed) {
1527 		printf("%s: passed\n", __func__);
1528 	}
1529 	return passed;
1530 }
1531 
1532 static int
vm_sanitize_run_test(int64_t in __unused,int64_t * out)1533 vm_sanitize_run_test(int64_t in __unused, int64_t *out)
1534 {
1535 	*out = 0;
1536 
1537 	if (!vm_sanitize_offset_test() ||
1538 	    !vm_sanitize_size_test() ||
1539 	    !vm_sanitize_addr_size_test() ||
1540 	    !vm_sanitize_prot_test() ||
1541 	    !vm_sanitize_cur_and_max_prots_test() ||
1542 	    !vm_sanitize_prot_bsd_test() ||
1543 	    !vm_sanitize_memory_entry_perm_test() ||
1544 	    !vm_sanitize_inherit_test() ||
1545 	    !vm_sanitize_behavior_test() ||
1546 	    !vm_sanitize_ut_type_abi_test()) {
1547 		return 0;
1548 	}
1549 
1550 	printf("%s: All tests passed\n", __func__);
1551 	*out = 1;
1552 	return 0;
1553 }
1554 SYSCTL_TEST_REGISTER(vm_sanitize_test, vm_sanitize_run_test);
1555 #endif /* DEBUG || DEVELOPMENT */
1556 
1557 #if PAGE_SHIFT == 0
1558 #pragma clang attribute pop
1559 #endif
1560 
1561 #if !__OPTIMIZE__
1562 #pragma clang attribute pop
1563 #endif
1564