1 /*
2 * Copyright (c) 2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <vm/vm_map_xnu.h>
30 #include <vm/vm_sanitize_internal.h>
31 #include <vm/vm_object_internal.h>
32
33 // TODO: enable telemetry and ktriage separately?
34
35 /* Also send telemetry output to kernel serial console? */
36 static TUNABLE(bool, vm_sanitize_telemeter_to_serial,
37 "vm_sanitize_telemeter_to_serial", false);
38
39 static inline
40 kern_return_t
vm_sanitize_apply_err_rewrite_policy(kern_return_t initial_kr,vm_sanitize_compat_rewrite_t rewrite)41 vm_sanitize_apply_err_rewrite_policy(kern_return_t initial_kr, vm_sanitize_compat_rewrite_t rewrite)
42 {
43 return rewrite.should_rewrite ? rewrite.compat_kr : initial_kr;
44 }
45
46 __attribute__((always_inline, warn_unused_result))
47 vm_addr_struct_t
vm_sanitize_wrap_addr(vm_address_t val)48 vm_sanitize_wrap_addr(vm_address_t val)
49 {
50 return (vm_addr_struct_t) { .UNSAFE = val };
51 }
52
53 __attribute__((always_inline, warn_unused_result))
54 vm_size_struct_t
vm_sanitize_wrap_size(vm_size_t val)55 vm_sanitize_wrap_size(vm_size_t val)
56 {
57 return (vm_size_struct_t) { .UNSAFE = val };
58 }
59
60 __attribute__((always_inline, warn_unused_result))
61 vm32_size_struct_t
vm32_sanitize_wrap_size(vm32_size_t val)62 vm32_sanitize_wrap_size(vm32_size_t val)
63 {
64 return (vm32_size_struct_t) { .UNSAFE = val };
65 }
66
67 __attribute__((always_inline, warn_unused_result))
68 vm_prot_ut
vm_sanitize_wrap_prot(vm_prot_t val)69 vm_sanitize_wrap_prot(vm_prot_t val)
70 {
71 return (vm_prot_ut) { .UNSAFE = val };
72 }
73
74 __attribute__((always_inline, warn_unused_result))
75 vm_inherit_ut
vm_sanitize_wrap_inherit(vm_inherit_t val)76 vm_sanitize_wrap_inherit(vm_inherit_t val)
77 {
78 return (vm_inherit_ut) { .UNSAFE = val };
79 }
80
81 #ifdef MACH_KERNEL_PRIVATE
82 __attribute__((always_inline, warn_unused_result))
83 vm_addr_struct_t
vm_sanitize_expand_addr_to_64(vm32_address_ut val)84 vm_sanitize_expand_addr_to_64(vm32_address_ut val)
85 {
86 return (vm_addr_struct_t) { .UNSAFE = val.UNSAFE };
87 }
88
89 __attribute__((always_inline, warn_unused_result))
90 vm_size_struct_t
vm_sanitize_expand_size_to_64(vm32_size_ut val)91 vm_sanitize_expand_size_to_64(vm32_size_ut val)
92 {
93 return (vm_size_struct_t) { .UNSAFE = val.UNSAFE };
94 }
95
96 __attribute__((always_inline, warn_unused_result))
97 kern_return_t
vm_sanitize_expand_addr_size_to_64(vm32_address_ut addr32,vm32_size_ut size32,vm_address_ut * addr,vm_size_ut * size)98 vm_sanitize_expand_addr_size_to_64(
99 vm32_address_ut addr32,
100 vm32_size_ut size32,
101 vm_address_ut *addr,
102 vm_size_ut *size)
103 {
104 uint32_t discard;
105
106 if (__improbable(os_add_overflow(addr32.UNSAFE, size32.UNSAFE, &discard))) {
107 addr->UNSAFE = 0;
108 size->UNSAFE = 0;
109 return KERN_INVALID_ARGUMENT;
110 }
111
112 addr->UNSAFE = addr32.UNSAFE;
113 size->UNSAFE = size32.UNSAFE;
114 return KERN_SUCCESS;
115 }
116
117 __attribute__((always_inline, warn_unused_result))
118 vm32_address_ut
vm_sanitize_trunc_addr_to_32(vm_addr_struct_t val)119 vm_sanitize_trunc_addr_to_32(vm_addr_struct_t val)
120 {
121 vm32_address_ut ret;
122
123 ret.UNSAFE = CAST_DOWN_EXPLICIT(vm32_address_t, val.UNSAFE);
124 return ret;
125 }
126
127 __attribute__((always_inline, warn_unused_result))
128 vm32_size_ut
vm_sanitize_trunc_size_to_32(vm_size_struct_t val)129 vm_sanitize_trunc_size_to_32(vm_size_struct_t val)
130 {
131 vm32_size_ut ret;
132
133 ret.UNSAFE = CAST_DOWN_EXPLICIT(vm32_size_t, val.UNSAFE);
134 return ret;
135 }
136 #endif /* MACH_KERNEL_PRIVATE */
137
138 __attribute__((always_inline, warn_unused_result, overloadable))
139 bool
vm_sanitize_add_overflow(vm_addr_struct_t addr_u,vm_size_struct_t size_u,vm_addr_struct_t * addr_out_u)140 vm_sanitize_add_overflow(
141 vm_addr_struct_t addr_u,
142 vm_size_struct_t size_u,
143 vm_addr_struct_t *addr_out_u)
144 {
145 mach_vm_address_t addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
146 mach_vm_size_t size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
147
148 return os_add_overflow(addr, size, &addr_out_u->UNSAFE);
149 }
150
151 __attribute__((always_inline, warn_unused_result, overloadable))
152 bool
vm_sanitize_add_overflow(vm_size_struct_t size1_u,vm_size_struct_t size2_u,vm_size_struct_t * size_out_u)153 vm_sanitize_add_overflow(
154 vm_size_struct_t size1_u,
155 vm_size_struct_t size2_u,
156 vm_size_struct_t *size_out_u)
157 {
158 mach_vm_address_t size1 = VM_SANITIZE_UNSAFE_UNWRAP(size1_u);
159 mach_vm_size_t size2 = VM_SANITIZE_UNSAFE_UNWRAP(size2_u);
160
161 return os_add_overflow(size1, size2, &size_out_u->UNSAFE);
162 }
163
164 __attribute__((always_inline, warn_unused_result))
165 vm_addr_struct_t
vm_sanitize_compute_unsafe_end(vm_addr_struct_t addr_u,vm_size_struct_t size_u)166 vm_sanitize_compute_unsafe_end(
167 vm_addr_struct_t addr_u,
168 vm_size_struct_t size_u)
169 {
170 vm_addr_struct_t end_u = { 0 };
171 vm_address_t addr_local = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
172 vm_size_t size_local = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
173
174 VM_SANITIZE_UNSAFE_SET(end_u, addr_local + size_local);
175 return end_u;
176 }
177
178 __attribute__((always_inline, warn_unused_result))
179 vm_size_struct_t
vm_sanitize_compute_unsafe_size(vm_addr_struct_t addr_u,vm_addr_struct_t end_u)180 vm_sanitize_compute_unsafe_size(
181 vm_addr_struct_t addr_u,
182 vm_addr_struct_t end_u)
183 {
184 vm_size_struct_t size_u = { 0 };
185 vm_address_t addr_local = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
186 vm_address_t end_local = VM_SANITIZE_UNSAFE_UNWRAP(end_u);
187
188 VM_SANITIZE_UNSAFE_SET(size_u, end_local - addr_local);
189 return size_u;
190 }
191
192 __attribute__((always_inline, warn_unused_result))
193 mach_vm_address_t
vm_sanitize_addr(vm_map_t map,vm_addr_struct_t addr_u)194 vm_sanitize_addr(
195 vm_map_t map,
196 vm_addr_struct_t addr_u)
197 {
198 mach_vm_address_t addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
199 vm_map_offset_t pgmask = vm_map_page_mask(map);
200
201 return vm_map_trunc_page_mask(addr, pgmask);
202 }
203
204 __attribute__((always_inline, warn_unused_result))
205 mach_vm_offset_t
vm_sanitize_offset_in_page(vm_map_t map,vm_addr_struct_t addr_u)206 vm_sanitize_offset_in_page(
207 vm_map_t map,
208 vm_addr_struct_t addr_u)
209 {
210 return VM_SANITIZE_UNSAFE_UNWRAP(addr_u) & vm_map_page_mask(map);
211 }
212
213 __attribute__((always_inline, warn_unused_result))
214 kern_return_t
vm_sanitize_offset(vm_addr_struct_t offset_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_address_t addr,vm_map_address_t end,vm_map_offset_t * offset)215 vm_sanitize_offset(
216 vm_addr_struct_t offset_u,
217 vm_sanitize_caller_t vm_sanitize_caller __unused,
218 vm_map_address_t addr,
219 vm_map_address_t end,
220 vm_map_offset_t *offset)
221 {
222 *offset = VM_SANITIZE_UNSAFE_UNWRAP(offset_u);
223
224 if ((*offset < addr) || (*offset > end)) {
225 *offset = 0;
226 return KERN_INVALID_ARGUMENT;
227 }
228
229 return KERN_SUCCESS;
230 }
231
232 __attribute__((always_inline, warn_unused_result))
233 kern_return_t
vm_sanitize_mask(vm_addr_struct_t mask_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_offset_t * mask)234 vm_sanitize_mask(
235 vm_addr_struct_t mask_u,
236 vm_sanitize_caller_t vm_sanitize_caller __unused,
237 vm_map_offset_t *mask)
238 {
239 *mask = VM_SANITIZE_UNSAFE_UNWRAP(mask_u);
240
241 /*
242 * Adding validation to mask has high ABI risk and low security value.
243 * The only internal function that deals with mask is vm_map_locate_space
244 * and it currently ensures that addresses are aligned to page boundary
245 * even for weird alignment requests.
246 *
247 * rdar://120445665
248 */
249
250 return KERN_SUCCESS;
251 }
252
253 __attribute__((always_inline, warn_unused_result))
254 kern_return_t
vm_sanitize_object_size(vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_sanitize_flags_t flags,vm_object_offset_t * size)255 vm_sanitize_object_size(
256 vm_size_struct_t size_u,
257 vm_sanitize_caller_t vm_sanitize_caller __unused,
258 vm_sanitize_flags_t flags,
259 vm_object_offset_t *size)
260 {
261 mach_vm_size_t size_aligned;
262
263 *size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
264 /*
265 * Handle size zero as requested by the caller
266 */
267 if (*size == 0) {
268 if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
269 return VM_ERR_RETURN_NOW;
270 } else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
271 return KERN_INVALID_ARGUMENT;
272 } else {
273 /* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
274 return KERN_SUCCESS;
275 }
276 }
277
278 size_aligned = vm_map_round_page_mask(*size, PAGE_MASK);
279 if (size_aligned == 0) {
280 *size = 0;
281 return KERN_INVALID_ARGUMENT;
282 }
283
284 if (!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES)) {
285 *size = size_aligned;
286 }
287 return KERN_SUCCESS;
288 }
289
290 __attribute__((always_inline, warn_unused_result))
291 kern_return_t
vm_sanitize_size(vm_addr_struct_t offset_u,vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_t map,vm_sanitize_flags_t flags,mach_vm_size_t * size)292 vm_sanitize_size(
293 vm_addr_struct_t offset_u,
294 vm_size_struct_t size_u,
295 vm_sanitize_caller_t vm_sanitize_caller __unused,
296 vm_map_t map,
297 vm_sanitize_flags_t flags,
298 mach_vm_size_t *size)
299 {
300 mach_vm_size_t offset = VM_SANITIZE_UNSAFE_UNWRAP(offset_u);
301 vm_map_offset_t pgmask = vm_map_page_mask(map);
302 mach_vm_size_t size_aligned;
303
304 *size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
305 /*
306 * Handle size zero as requested by the caller
307 */
308 if (*size == 0) {
309 if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
310 return VM_ERR_RETURN_NOW;
311 } else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
312 return KERN_INVALID_ARGUMENT;
313 } else {
314 /* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
315 return KERN_SUCCESS;
316 }
317 }
318
319 /*
320 * Ensure that offset and size don't overflow when refering to the
321 * vm_object
322 */
323 if (os_add_overflow(*size, offset, &size_aligned)) {
324 *size = 0;
325 return KERN_INVALID_ARGUMENT;
326 }
327 /*
328 * This rounding is a check on the vm_object and thus uses the kernel's PAGE_MASK
329 */
330 if (vm_map_round_page_mask(size_aligned, PAGE_MASK) == 0) {
331 *size = 0;
332 return KERN_INVALID_ARGUMENT;
333 }
334
335 /*
336 * Check that a non zero size being mapped doesn't round to 0
337 */
338 size_aligned -= offset & ~pgmask;
339 /*
340 * This rounding is a check on the specified map and thus uses its pgmask
341 */
342 size_aligned = vm_map_round_page_mask(size_aligned, pgmask);
343 if (size_aligned == 0) {
344 *size = 0;
345 return KERN_INVALID_ARGUMENT;
346 }
347
348 if (!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES)) {
349 *size = size_aligned;
350 }
351 return KERN_SUCCESS;
352 }
353
354 static __attribute__((warn_unused_result))
355 kern_return_t
vm_sanitize_err_compat_addr_size(kern_return_t initial_kr,vm_sanitize_caller_t vm_sanitize_caller,vm_addr_struct_t addr_u,vm_size_struct_t size_u,mach_vm_offset_t pgmask)356 vm_sanitize_err_compat_addr_size(
357 kern_return_t initial_kr,
358 vm_sanitize_caller_t vm_sanitize_caller,
359 vm_addr_struct_t addr_u,
360 vm_size_struct_t size_u,
361 mach_vm_offset_t pgmask)
362 {
363 vm_sanitize_compat_rewrite_t compat = {initial_kr, false, false};
364 if (vm_sanitize_caller->err_compat_addr_size) {
365 compat = (vm_sanitize_caller->err_compat_addr_size)
366 (initial_kr, VM_SANITIZE_UNSAFE_UNWRAP(addr_u), VM_SANITIZE_UNSAFE_UNWRAP(size_u),
367 pgmask);
368 }
369
370 if (compat.should_telemeter) {
371 #if DEVELOPMENT || DEBUG
372 if (vm_sanitize_telemeter_to_serial) {
373 printf("VM API - [%s] unsanitary addr 0x%llx size 0x%llx pgmask "
374 "0x%llx passed to %s; error code %d may become %d\n",
375 proc_best_name(current_proc()),
376 VM_SANITIZE_UNSAFE_UNWRAP(addr_u), VM_SANITIZE_UNSAFE_UNWRAP(size_u), pgmask,
377 vm_sanitize_caller->vm_sanitize_caller_name, initial_kr, compat.compat_kr);
378 }
379 #endif /* DEVELOPMENT || DEBUG */
380
381 vm_sanitize_send_telemetry(
382 vm_sanitize_caller->vm_sanitize_telemetry_id,
383 VM_SANITIZE_CHECKER_ADDR_SIZE,
384 VM_SANITIZE_CHECKER_COUNT_1 /* fixme */,
385 vm_sanitize_caller->vm_sanitize_ktriage_id,
386 VM_SANITIZE_UNSAFE_UNWRAP(addr_u),
387 VM_SANITIZE_UNSAFE_UNWRAP(size_u),
388 pgmask,
389 0 /* arg4 */,
390 initial_kr,
391 compat.compat_kr);
392 }
393
394 return vm_sanitize_apply_err_rewrite_policy(initial_kr, compat);
395 }
396
397 __attribute__((always_inline, warn_unused_result))
398 kern_return_t
vm_sanitize_addr_size(vm_addr_struct_t addr_u,vm_size_struct_t size_u,vm_sanitize_caller_t vm_sanitize_caller,mach_vm_offset_t pgmask,vm_sanitize_flags_t flags,vm_map_offset_t * addr,vm_map_offset_t * end,vm_map_size_t * size)399 vm_sanitize_addr_size(
400 vm_addr_struct_t addr_u,
401 vm_size_struct_t size_u,
402 vm_sanitize_caller_t vm_sanitize_caller,
403 mach_vm_offset_t pgmask,
404 vm_sanitize_flags_t flags,
405 vm_map_offset_t *addr,
406 vm_map_offset_t *end,
407 vm_map_size_t *size)
408 {
409 vm_map_offset_t addr_aligned = 0;
410 vm_map_offset_t end_aligned = 0;
411 kern_return_t kr;
412
413 *addr = VM_SANITIZE_UNSAFE_UNWRAP(addr_u);
414 *size = VM_SANITIZE_UNSAFE_UNWRAP(size_u);
415 if (flags & VM_SANITIZE_FLAGS_REALIGN_START) {
416 assert(!(flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES));
417 }
418
419 #if CONFIG_KERNEL_TAGGING
420 if (flags & VM_SANITIZE_FLAGS_CANONICALIZE) {
421 *addr = vm_memtag_canonicalize_address(*addr);
422 }
423 #endif /* CONFIG_KERNEL_TAGGING */
424 addr_aligned = vm_map_trunc_page_mask(*addr, pgmask);
425
426 /*
427 * Ensure that the address is aligned
428 */
429 if (__improbable((flags & VM_SANITIZE_FLAGS_CHECK_ALIGNED_START) && (*addr & pgmask))) {
430 kr = KERN_INVALID_ARGUMENT;
431 goto unsanitary;
432 }
433
434 /*
435 * Handle size zero as requested by the caller
436 */
437 if (*size == 0) {
438 if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS) {
439 *addr = 0;
440 *end = 0;
441 /* size is already 0 */
442 return VM_ERR_RETURN_NOW;
443 } else if (flags & VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS) {
444 kr = KERN_INVALID_ARGUMENT;
445 goto unsanitary;
446 } else {
447 /* VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH - nothing to do */
448 if (flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES) {
449 /* addr is already set */
450 *end = *addr;
451 /* size is already 0 */
452 return KERN_SUCCESS;
453 } else {
454 *addr = addr_aligned;
455 *end = addr_aligned;
456 /* size is already 0 */
457 return KERN_SUCCESS;
458 }
459 }
460 }
461
462 /*
463 * Compute the aligned end now
464 */
465 if (flags & VM_SANITIZE_FLAGS_REALIGN_START) {
466 *addr = addr_aligned;
467 }
468 if (__improbable(os_add_overflow(*addr, *size, &end_aligned))) {
469 kr = KERN_INVALID_ARGUMENT;
470 goto unsanitary;
471 }
472
473 end_aligned = vm_map_round_page_mask(end_aligned, pgmask);
474 if (__improbable(end_aligned <= addr_aligned)) {
475 kr = KERN_INVALID_ARGUMENT;
476 goto unsanitary;
477 }
478
479 if (flags & VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES) {
480 /* addr and size are already set */
481 *end = *addr + *size;
482 } else {
483 *addr = addr_aligned;
484 *end = end_aligned;
485 *size = end_aligned - addr_aligned;
486 }
487 return KERN_SUCCESS;
488
489 unsanitary:
490 *addr = 0;
491 *end = 0;
492 *size = 0;
493 return vm_sanitize_err_compat_addr_size(kr, vm_sanitize_caller,
494 addr_u, size_u, pgmask);
495 }
496
497 __attribute__((always_inline, warn_unused_result))
498 kern_return_t
vm_sanitize_addr_end(vm_addr_struct_t addr_u,vm_addr_struct_t end_u,vm_sanitize_caller_t vm_sanitize_caller,vm_map_t map,vm_sanitize_flags_t flags,vm_map_offset_t * start,vm_map_offset_t * end,vm_map_size_t * size)499 vm_sanitize_addr_end(
500 vm_addr_struct_t addr_u,
501 vm_addr_struct_t end_u,
502 vm_sanitize_caller_t vm_sanitize_caller,
503 vm_map_t map,
504 vm_sanitize_flags_t flags,
505 vm_map_offset_t *start,
506 vm_map_offset_t *end,
507 vm_map_size_t *size)
508 {
509 vm_size_struct_t size_u = vm_sanitize_compute_unsafe_size(addr_u, end_u);
510
511 return vm_sanitize_addr_size(addr_u, size_u, vm_sanitize_caller, map, flags,
512 start, end, size);
513 }
514
515 __attribute__((always_inline, warn_unused_result))
516 kern_return_t
vm_sanitize_prot(vm_prot_ut prot_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_map_t map __unused,vm_prot_t extra_mask,vm_prot_t * prot)517 vm_sanitize_prot(
518 vm_prot_ut prot_u,
519 vm_sanitize_caller_t vm_sanitize_caller __unused,
520 vm_map_t map __unused,
521 vm_prot_t extra_mask,
522 vm_prot_t *prot)
523 {
524 *prot = VM_SANITIZE_UNSAFE_UNWRAP(prot_u);
525
526 if (__improbable(*prot & ~(VM_PROT_ALL | VM_PROT_ALLEXEC | extra_mask))) {
527 *prot = VM_PROT_NONE;
528 return KERN_INVALID_ARGUMENT;
529 }
530
531 #if defined(__x86_64__)
532 if ((*prot & VM_PROT_UEXEC) &&
533 !pmap_supported_feature(map->pmap, PMAP_FEAT_UEXEC)) {
534 *prot = VM_PROT_NONE;
535 return KERN_INVALID_ARGUMENT;
536 }
537 #endif
538
539 return KERN_SUCCESS;
540 }
541
542 /*
543 * *out_cur and *out_max are modified when there is an err compat rewrite
544 * otherwise they are left unchanged
545 */
546 static __attribute__((warn_unused_result))
547 kern_return_t
vm_sanitize_err_compat_cur_and_max_prots(kern_return_t initial_kr,vm_sanitize_caller_t vm_sanitize_caller,vm_prot_ut cur_prot_u,vm_prot_ut max_prot_u,vm_prot_t extra_mask,vm_prot_t * out_cur,vm_prot_t * out_max)548 vm_sanitize_err_compat_cur_and_max_prots(
549 kern_return_t initial_kr,
550 vm_sanitize_caller_t vm_sanitize_caller,
551 vm_prot_ut cur_prot_u,
552 vm_prot_ut max_prot_u,
553 vm_prot_t extra_mask,
554 vm_prot_t *out_cur,
555 vm_prot_t *out_max)
556 {
557 vm_prot_t initial_cur_prot = VM_SANITIZE_UNSAFE_UNWRAP(cur_prot_u);
558 vm_prot_t initial_max_prot = VM_SANITIZE_UNSAFE_UNWRAP(max_prot_u);
559
560 vm_sanitize_compat_rewrite_t compat = {initial_kr, false, false};
561 vm_prot_t compat_cur_prot = initial_cur_prot;
562 vm_prot_t compat_max_prot = initial_max_prot;
563 if (vm_sanitize_caller->err_compat_prot_cur_max) {
564 compat = (vm_sanitize_caller->err_compat_prot_cur_max)
565 (initial_kr, &compat_cur_prot, &compat_max_prot, extra_mask);
566 }
567
568 if (compat.should_telemeter) {
569 #if DEVELOPMENT || DEBUG
570 if (vm_sanitize_telemeter_to_serial) {
571 printf("VM API - [%s] unsanitary vm_prot cur %d max %d "
572 "passed to %s; error code %d may become %d\n",
573 proc_best_name(current_proc()),
574 initial_cur_prot, initial_max_prot,
575 vm_sanitize_caller->vm_sanitize_caller_name,
576 initial_kr, compat.compat_kr);
577 }
578 #endif /* DEVELOPMENT || DEBUG */
579
580 vm_sanitize_send_telemetry(
581 vm_sanitize_caller->vm_sanitize_telemetry_id,
582 VM_SANITIZE_CHECKER_PROT_CUR_MAX,
583 VM_SANITIZE_CHECKER_COUNT_1 /* fixme */,
584 vm_sanitize_caller->vm_sanitize_ktriage_id,
585 initial_cur_prot,
586 initial_max_prot,
587 extra_mask,
588 0 /* arg4 */,
589 initial_kr,
590 compat.compat_kr);
591 }
592
593 if (compat.should_rewrite) {
594 *out_cur = compat_cur_prot;
595 *out_max = compat_max_prot;
596 return compat.compat_kr;
597 } else {
598 /* out_cur and out_max unchanged */
599 return initial_kr;
600 }
601 }
602
603 __attribute__((always_inline, warn_unused_result))
604 kern_return_t
vm_sanitize_cur_and_max_prots(vm_prot_ut cur_prot_u,vm_prot_ut max_prot_u,vm_sanitize_caller_t vm_sanitize_caller,vm_map_t map,vm_prot_t extra_mask,vm_prot_t * cur_prot,vm_prot_t * max_prot)605 vm_sanitize_cur_and_max_prots(
606 vm_prot_ut cur_prot_u,
607 vm_prot_ut max_prot_u,
608 vm_sanitize_caller_t vm_sanitize_caller,
609 vm_map_t map,
610 vm_prot_t extra_mask,
611 vm_prot_t *cur_prot,
612 vm_prot_t *max_prot)
613 {
614 kern_return_t kr;
615
616 kr = vm_sanitize_prot(cur_prot_u, vm_sanitize_caller, map, extra_mask, cur_prot);
617 if (__improbable(kr != KERN_SUCCESS)) {
618 *cur_prot = VM_PROT_NONE;
619 *max_prot = VM_PROT_NONE;
620 return kr;
621 }
622
623 kr = vm_sanitize_prot(max_prot_u, vm_sanitize_caller, map, extra_mask, max_prot);
624 if (__improbable(kr != KERN_SUCCESS)) {
625 *cur_prot = VM_PROT_NONE;
626 *max_prot = VM_PROT_NONE;
627 return kr;
628 }
629
630
631 /*
632 * This check needs to be performed on the actual protection bits.
633 * vm_sanitize_prot restricts cur and max prot to
634 * (VM_PROT_ALL | VM_PROT_ALLEXEC | extra_mask). Therefore strip
635 * extra_mask while performing this check.
636 */
637 if (__improbable((*cur_prot & *max_prot & ~extra_mask) !=
638 (*cur_prot & ~extra_mask))) {
639 /* cur is more permissive than max */
640 kr = KERN_INVALID_ARGUMENT;
641 goto unsanitary;
642 }
643 return KERN_SUCCESS;
644
645 unsanitary:
646 *cur_prot = VM_PROT_NONE;
647 *max_prot = VM_PROT_NONE;
648 /* error compat may set cur/max to something other than 0/0 */
649 return vm_sanitize_err_compat_cur_and_max_prots(kr, vm_sanitize_caller,
650 cur_prot_u, max_prot_u, extra_mask, cur_prot, max_prot);
651 }
652
653 __attribute__((always_inline, warn_unused_result))
654 kern_return_t
vm_sanitize_prot_bsd(vm_prot_ut prot_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_prot_t * prot)655 vm_sanitize_prot_bsd(
656 vm_prot_ut prot_u,
657 vm_sanitize_caller_t vm_sanitize_caller __unused,
658 vm_prot_t *prot)
659 {
660 *prot = VM_SANITIZE_UNSAFE_UNWRAP(prot_u);
661
662 /*
663 * Strip all protections that are not allowed
664 */
665 *prot &= (VM_PROT_ALL | VM_PROT_TRUSTED | VM_PROT_STRIP_READ);
666 return KERN_SUCCESS;
667 }
668
669 __attribute__((always_inline, warn_unused_result))
670 kern_return_t
vm_sanitize_memory_entry_perm(vm_prot_ut perm_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_sanitize_flags_t flags,vm_prot_t extra_mask,vm_prot_t * perm)671 vm_sanitize_memory_entry_perm(
672 vm_prot_ut perm_u,
673 vm_sanitize_caller_t vm_sanitize_caller __unused,
674 vm_sanitize_flags_t flags,
675 vm_prot_t extra_mask,
676 vm_prot_t *perm)
677 {
678 vm_prot_t prot;
679 vm_prot_t map_mem_flags;
680 vm_prot_t access;
681
682 *perm = VM_SANITIZE_UNSAFE_UNWRAP(perm_u);
683 prot = *perm & MAP_MEM_PROT_MASK;
684 map_mem_flags = *perm & MAP_MEM_FLAGS_MASK;
685 access = GET_MAP_MEM(*perm);
686
687 if ((flags & VM_SANITIZE_FLAGS_CHECK_USER_MEM_MAP_FLAGS) &&
688 (map_mem_flags & ~MAP_MEM_FLAGS_USER)) {
689 /*
690 * Unknown flag: reject for forward compatibility.
691 */
692 *perm = VM_PROT_NONE;
693 return KERN_INVALID_VALUE;
694 }
695
696 /*
697 * Clear prot bits in perm and set them to only allowed values
698 */
699 *perm &= ~MAP_MEM_PROT_MASK;
700 *perm |= (prot & (VM_PROT_ALL | extra_mask));
701
702 /*
703 * No checks on access
704 */
705 (void) access;
706
707 return KERN_SUCCESS;
708 }
709
710 __attribute__((always_inline, warn_unused_result))
711 kern_return_t
vm_sanitize_inherit(vm_inherit_ut inherit_u,vm_sanitize_caller_t vm_sanitize_caller __unused,vm_inherit_t * inherit)712 vm_sanitize_inherit(
713 vm_inherit_ut inherit_u,
714 vm_sanitize_caller_t vm_sanitize_caller __unused,
715 vm_inherit_t *inherit)
716 {
717 *inherit = VM_SANITIZE_UNSAFE_UNWRAP(inherit_u);
718
719 if (__improbable(*inherit > VM_INHERIT_LAST_VALID)) {
720 *inherit = VM_INHERIT_NONE;
721 return KERN_INVALID_ARGUMENT;
722 }
723
724 return KERN_SUCCESS;
725 }
726
727 #if DEBUG || DEVELOPMENT
728
729 static bool
vm_sanitize_offset_test(void)730 vm_sanitize_offset_test(void)
731 {
732 kern_return_t kr = KERN_SUCCESS;
733 vm_map_offset_t offset;
734 vm_map_address_t addr, end;
735 vm_addr_struct_t offset_u;
736
737 /*
738 * Offset that is less than lower bound
739 */
740 offset_u = vm_sanitize_wrap_addr(0);
741 addr = 5;
742 end = 10;
743 kr = vm_sanitize_offset(offset_u, VM_SANITIZE_CALLER_TEST, addr, end, &offset);
744
745 if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
746 printf("%s: failed for addr %p end %p offset %p\n",
747 __func__, (void *)addr, (void *)end,
748 (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u));
749 return false;
750 }
751
752 /*
753 * Offset that is less than lower bound
754 */
755 offset_u = vm_sanitize_wrap_addr(11);
756 addr = 5;
757 end = 10;
758 kr = KERN_SUCCESS;
759 kr = vm_sanitize_offset(offset_u, VM_SANITIZE_CALLER_TEST, addr, end, &offset);
760
761 if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
762 printf("%s: failed for addr %p end %p offset %p\n",
763 __func__, (void *)addr, (void *)end,
764 (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u));
765 return false;
766 }
767
768 printf("%s: passed\n", __func__);
769
770 return true;
771 }
772
773 static bool
vm_sanitize_size_test(void)774 vm_sanitize_size_test(void)
775 {
776 kern_return_t kr = KERN_SUCCESS;
777 vm_map_size_t size;
778 vm_addr_struct_t offset_u;
779 vm_size_struct_t size_u;
780
781 /*
782 * VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS should return VM_ERR_RETURN_NOW for size = 0
783 * for callers that need to return success early
784 */
785 offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
786 size_u = vm_sanitize_wrap_size(0);
787 kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
788 VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS, &size);
789
790 if (vm_sanitize_get_kr(kr) != KERN_SUCCESS ||
791 kr != VM_ERR_RETURN_NOW) {
792 printf("%s: VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS failed for offset %p size %p\n",
793 __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
794 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
795 return false;
796 }
797
798 /*
799 * VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS should return failure for size = 0
800 */
801 offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
802 size_u = vm_sanitize_wrap_size(0);
803 kr = KERN_SUCCESS;
804 kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
805 VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS, &size);
806
807 if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
808 printf("%s: VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS failed for offset %p size %p\n",
809 __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
810 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
811 return false;
812 }
813
814 /*
815 * VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH should return success for size = 0
816 */
817 offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
818 size_u = vm_sanitize_wrap_size(0);
819 kr = KERN_SUCCESS;
820 kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
821 VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, &size);
822
823 if (vm_sanitize_get_kr(kr) != KERN_SUCCESS) {
824 printf("%s: VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH failed for offset %p "
825 "size %p\n", __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
826 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
827 return false;
828 }
829
830 /*
831 * VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES should return unaligned values
832 */
833 offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
834 size_u = vm_sanitize_wrap_size(PAGE_SIZE + 1);
835 kr = KERN_SUCCESS;
836 kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
837 VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES | VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH,
838 &size);
839
840 if ((vm_sanitize_get_kr(kr) != KERN_SUCCESS) ||
841 (size != PAGE_SIZE + 1)) {
842 printf("%s: VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES failed for offset %p size %p\n",
843 __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
844 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
845 return false;
846 }
847
848 /*
849 * Values that overflow
850 */
851 offset_u = vm_sanitize_wrap_addr(2 * PAGE_SIZE);
852 size_u = vm_sanitize_wrap_size(-PAGE_SIZE - 1);
853 kr = KERN_SUCCESS;
854 kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
855 VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, &size);
856
857 if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
858 printf("%s: failed for offset %p size %p\n",
859 __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
860 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
861 return false;
862 }
863
864 /*
865 * Values that overflow when rounding
866 */
867 offset_u = vm_sanitize_wrap_addr(0);
868 size_u = vm_sanitize_wrap_size(-1);
869 kr = KERN_SUCCESS;
870 kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
871 VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, &size);
872
873 if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
874 printf("%s: failed for offset %p size %p\n",
875 __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
876 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
877 return false;
878 }
879
880 /*
881 * Values that overflow when rounding
882 */
883 offset_u = vm_sanitize_wrap_addr(-2);
884 size_u = vm_sanitize_wrap_size(1);
885 kr = KERN_SUCCESS;
886 kr = vm_sanitize_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, current_map(),
887 VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, &size);
888
889 if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
890 printf("%s: failed for offset %p size %p\n",
891 __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
892 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
893 printf("Deepti: size = %p\n", (void *)size);
894 return false;
895 }
896
897 printf("%s: passed\n", __func__);
898
899 return true;
900 }
901
902 static bool
vm_sanitize_addr_size_test(void)903 vm_sanitize_addr_size_test(void)
904 {
905 kern_return_t kr = KERN_SUCCESS;
906 vm_map_address_t start, end;
907 vm_map_size_t size;
908 vm_addr_struct_t offset_u;
909 vm_size_struct_t size_u;
910
911 /*
912 * VM_SANITIZE_FLAGS_CHECK_ALIGNED_START should fail on passing unaligned offset
913 */
914 offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
915 size_u = vm_sanitize_wrap_size(PAGE_SIZE);
916
917 kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
918 VM_SANITIZE_FLAGS_CHECK_ALIGNED_START | VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH,
919 &start, &end, &size);
920
921 if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
922 printf("%s: VM_SANITIZE_FLAGS_CHECK_ALIGNED_START failed for offset %p size %p\n",
923 __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
924 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
925 return false;
926 }
927
928 /*
929 * VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS should return VM_ERR_RETURN_NOW for size = 0
930 * for callers that need to return success early
931 */
932 offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
933 size_u = vm_sanitize_wrap_size(0);
934 kr = KERN_SUCCESS;
935 kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
936 VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS, &start, &end,
937 &size);
938
939 if (vm_sanitize_get_kr(kr) != KERN_SUCCESS ||
940 kr != VM_ERR_RETURN_NOW) {
941 printf("%s: VM_SANITIZE_FLAGS_SIZE_ZERO_SUCCEEDS failed for offset %p size %p\n",
942 __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
943 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
944 return false;
945 }
946
947 /*
948 * VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS should return failure for size = 0
949 */
950 offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
951 size_u = vm_sanitize_wrap_size(0);
952 kr = KERN_SUCCESS;
953 kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
954 VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS, &start, &end,
955 &size);
956
957 if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
958 printf("%s: VM_SANITIZE_FLAGS_SIZE_ZERO_FAILS failed for offset %p size %p\n",
959 __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
960 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
961 return false;
962 }
963
964 /*
965 * VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH should return success for size = 0
966 */
967 offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
968 size_u = vm_sanitize_wrap_size(0);
969 kr = KERN_SUCCESS;
970 kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
971 VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, &start,
972 &end, &size);
973
974 if ((vm_sanitize_get_kr(kr) != KERN_SUCCESS) ||
975 (start != PAGE_SIZE) || (end != PAGE_SIZE)) {
976 printf("%s: VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH failed for offset %p "
977 "size %p\n", __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
978 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
979 return false;
980 }
981
982 /*
983 * VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES should return unaligned values
984 */
985 offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
986 size_u = vm_sanitize_wrap_size(PAGE_SIZE);
987 kr = KERN_SUCCESS;
988 kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
989 VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES | VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH,
990 &start, &end, &size);
991
992 if ((vm_sanitize_get_kr(kr) != KERN_SUCCESS) ||
993 (start != PAGE_SIZE + 1) || (end != 2 * PAGE_SIZE + 1)) {
994 printf("%s: VM_SANITIZE_FLAGS_GET_UNALIGNED_VALUES failed for offset %p size %p\n",
995 __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
996 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
997 return false;
998 }
999
1000
1001 /*
1002 * VM_SANITIZE_FLAGS_REALIGN_START should not use unaligned values for sanitization
1003 */
1004 offset_u = vm_sanitize_wrap_addr(PAGE_SIZE + 1);
1005 size_u = vm_sanitize_wrap_size(PAGE_SIZE);
1006 kr = KERN_SUCCESS;
1007 kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
1008 VM_SANITIZE_FLAGS_REALIGN_START | VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH,
1009 &start, &end, &size);
1010
1011 if ((vm_sanitize_get_kr(kr) != KERN_SUCCESS) ||
1012 (start != PAGE_SIZE) || (end != 2 * PAGE_SIZE)) {
1013 printf("%s: VM_SANITIZE_FLAGS_REALIGN_START failed for offset %p size %p\n",
1014 __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
1015 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
1016 return false;
1017 }
1018
1019 /*
1020 * Values that overflow
1021 */
1022 offset_u = vm_sanitize_wrap_addr(2 * PAGE_SIZE);
1023 size_u = vm_sanitize_wrap_size(-PAGE_SIZE - 1);
1024 kr = KERN_SUCCESS;
1025 kr = vm_sanitize_addr_size(offset_u, size_u, VM_SANITIZE_CALLER_TEST, PAGE_MASK,
1026 VM_SANITIZE_FLAGS_SIZE_ZERO_FALLTHROUGH, &start,
1027 &end, &size);
1028
1029 if (vm_sanitize_get_kr(kr) == KERN_SUCCESS) {
1030 printf("%s: failed for offset %p size %p\n",
1031 __func__, (void *)VM_SANITIZE_UNSAFE_UNWRAP(offset_u),
1032 (void *)VM_SANITIZE_UNSAFE_UNWRAP(size_u));
1033 return false;
1034 }
1035
1036 printf("%s: passed\n", __func__);
1037
1038 return true;
1039 }
1040
1041 static bool
vm_sanitize_prot_test(void)1042 vm_sanitize_prot_test(void)
1043 {
1044 kern_return_t kr = KERN_SUCCESS;
1045 vm_prot_ut prot_u;
1046 vm_prot_t prot;
1047
1048 prot_u = vm_sanitize_wrap_prot(VM_PROT_NO_CHANGE_LEGACY |
1049 VM_PROT_NO_CHANGE |
1050 VM_PROT_COPY |
1051 VM_PROT_WANTS_COPY |
1052 VM_PROT_TRUSTED |
1053 VM_PROT_IS_MASK |
1054 VM_PROT_STRIP_READ |
1055 VM_PROT_EXECUTE_ONLY |
1056 VM_PROT_COPY_FAIL_IF_EXECUTABLE |
1057 VM_PROT_TPRO);
1058
1059 kr = vm_sanitize_prot(prot_u, VM_SANITIZE_CALLER_TEST, current_map(),
1060 VM_PROT_NONE, &prot);
1061
1062 if (kr == KERN_SUCCESS) {
1063 printf("%s: failed for invalid set of permissions\n", __func__);
1064 return false;
1065 }
1066
1067 printf("%s: passed\n", __func__);
1068
1069 return true;
1070 }
1071
1072 static bool
vm_sanitize_cur_and_max_prots_test(void)1073 vm_sanitize_cur_and_max_prots_test(void)
1074 {
1075 kern_return_t kr = KERN_SUCCESS;
1076 vm_prot_ut cur_prot_u, max_prot_u;
1077 vm_prot_t cur_prot, max_prot;
1078
1079 /*
1080 * Validate that incompatible prots are rejected
1081 */
1082 cur_prot_u = vm_sanitize_wrap_prot(VM_PROT_ALL);
1083 max_prot_u = vm_sanitize_wrap_prot(VM_PROT_READ);
1084 kr = vm_sanitize_cur_and_max_prots(cur_prot_u, max_prot_u, VM_SANITIZE_CALLER_TEST,
1085 current_map(), VM_PROT_NONE, &cur_prot,
1086 &max_prot);
1087
1088 if (kr == KERN_SUCCESS) {
1089 printf("%s: failed for invalid set of permissions\n", __func__);
1090 return false;
1091 }
1092 printf("%s: passed\n", __func__);
1093
1094 return true;
1095 }
1096
1097 static bool
vm_sanitize_prot_bsd_test(void)1098 vm_sanitize_prot_bsd_test(void)
1099 {
1100 kern_return_t kr = KERN_SUCCESS;
1101 vm_prot_ut prot_u;
1102 vm_prot_t prot;
1103
1104 prot_u = vm_sanitize_wrap_prot(VM_PROT_NO_CHANGE_LEGACY |
1105 VM_PROT_NO_CHANGE |
1106 VM_PROT_COPY |
1107 VM_PROT_WANTS_COPY |
1108 VM_PROT_IS_MASK |
1109 VM_PROT_COPY_FAIL_IF_EXECUTABLE |
1110 VM_PROT_TPRO);
1111
1112 kr = vm_sanitize_prot_bsd(prot_u, VM_SANITIZE_CALLER_TEST, &prot);
1113
1114 if (prot != VM_PROT_NONE) {
1115 printf("%s: failed to strip invalid permissions\n", __func__);
1116 return false;
1117 }
1118
1119 printf("%s: passed\n", __func__);
1120
1121 return true;
1122 }
1123
1124 static bool
vm_sanitize_memory_entry_perm_test(void)1125 vm_sanitize_memory_entry_perm_test(void)
1126 {
1127 kern_return_t kr = KERN_SUCCESS;
1128 vm_prot_ut perm_u;
1129 vm_prot_t perm;
1130
1131 /*
1132 * Ensure invalid map_mem_flags is rejected
1133 */
1134 perm_u = vm_sanitize_wrap_prot(0x001000);
1135 kr = vm_sanitize_memory_entry_perm(perm_u, VM_SANITIZE_CALLER_TEST,
1136 VM_SANITIZE_FLAGS_CHECK_USER_MEM_MAP_FLAGS,
1137 VM_PROT_IS_MASK, &perm);
1138
1139 if (kr == KERN_SUCCESS) {
1140 printf("%s: failed to reject invalid map_mem_flags\n", __func__);
1141 return false;
1142 }
1143
1144 /*
1145 * Ensure invalid prot bits are cleared
1146 */
1147 kr = KERN_SUCCESS;
1148 perm_u = vm_sanitize_wrap_prot(VM_PROT_NO_CHANGE_LEGACY |
1149 VM_PROT_NO_CHANGE |
1150 VM_PROT_COPY |
1151 VM_PROT_WANTS_COPY |
1152 VM_PROT_EXECUTE_ONLY |
1153 VM_PROT_COPY_FAIL_IF_EXECUTABLE |
1154 VM_PROT_TPRO);
1155 kr = vm_sanitize_memory_entry_perm(perm_u, VM_SANITIZE_CALLER_TEST,
1156 VM_SANITIZE_FLAGS_CHECK_USER_MEM_MAP_FLAGS,
1157 VM_PROT_IS_MASK, &perm);
1158
1159 if (perm != VM_PROT_NONE) {
1160 printf("%s: failed to clear invalid prot bits\n", __func__);
1161 return false;
1162 }
1163
1164 printf("%s: passed\n", __func__);
1165
1166 return true;
1167 }
1168
1169 static bool
vm_sanitize_inherit_test(void)1170 vm_sanitize_inherit_test(void)
1171 {
1172 kern_return_t kr = KERN_SUCCESS;
1173 vm_inherit_ut inherit_u;
1174 vm_inherit_t inherit;
1175
1176 /*
1177 * Ensure invalid values are rejected
1178 */
1179 inherit_u = vm_sanitize_wrap_inherit(VM_INHERIT_DONATE_COPY);
1180 kr = vm_sanitize_inherit(inherit_u, VM_SANITIZE_CALLER_TEST, &inherit);
1181
1182 if (kr == KERN_SUCCESS) {
1183 printf("%s: failed to reject invalid inherit values\n", __func__);
1184 return false;
1185 }
1186 printf("%s: passed\n", __func__);
1187
1188 return true;
1189 }
1190
1191
1192 static int
vm_sanitize_run_test(int64_t in __unused,int64_t * out)1193 vm_sanitize_run_test(int64_t in __unused, int64_t *out)
1194 {
1195 *out = 0;
1196
1197 if (!vm_sanitize_offset_test() ||
1198 !vm_sanitize_size_test() ||
1199 !vm_sanitize_addr_size_test() ||
1200 !vm_sanitize_prot_test() ||
1201 !vm_sanitize_cur_and_max_prots_test() ||
1202 !vm_sanitize_prot_bsd_test() ||
1203 !vm_sanitize_memory_entry_perm_test() ||
1204 !vm_sanitize_inherit_test()) {
1205 return 0;
1206 }
1207
1208 printf("%s: All tests passed\n", __func__);
1209 *out = 1;
1210 return 0;
1211 }
1212 SYSCTL_TEST_REGISTER(vm_sanitize_test, vm_sanitize_run_test);
1213 #endif /* DEBUG || DEVELOPMENT */
1214