1 #include <kern/zalloc.h>
2 #include <kern/thread_test_context.h>
3
4 #include "vm_parameter_validation.h"
5
6 #pragma clang diagnostic ignored "-Wdeclaration-after-statement"
7 #pragma clang diagnostic ignored "-Wincompatible-function-pointer-types"
8 #pragma clang diagnostic ignored "-Wmissing-prototypes"
9 #pragma clang diagnostic ignored "-Wpedantic"
10 #pragma clang diagnostic ignored "-Wgcc-compat"
11
12
13 DEFINE_TEST_IDENTITY(test_identity_vm_parameter_validation_kern);
14
15 // vprintf() to a userspace buffer
16 // output is incremented to point at the new nul terminator
17 static void
user_vprintf(user_addr_t * output,user_addr_t output_end,const char * format,va_list args)18 user_vprintf(user_addr_t *output, user_addr_t output_end, const char *format, va_list args) __printflike(3, 0)
19 {
20 extern int vsnprintf(char *, size_t, const char *, va_list) __printflike(3, 0);
21 char linebuf[1024];
22 size_t printed;
23
24 printed = vsnprintf(linebuf, sizeof(linebuf), format, args);
25 assert(printed < sizeof(linebuf) - 1);
26 if (*output + printed + 1 < output_end) {
27 copyout(linebuf, *output, printed + 1);
28 *output += printed;
29
30 /* *output + 1 == output_end occurs only after the error case below */
31 assert(*output + 1 < output_end);
32 } else if (*output + 1 < output_end) {
33 /*
34 * Not enough space in the output buffer for this text.
35 * Print as much as we can, then rewind and terminate
36 * the buffer with an error message.
37 * The tests will continue to run after this, but they
38 * won't be able to output anything more.
39 */
40 static const char err_msg[] =
41 KERN_RESULT_DELIMITER KERN_FAILURE_DELIMITER
42 "kernel output buffer full, output truncated\n";
43 size_t err_len = strlen(err_msg);
44 size_t printable = output_end - *output - 1;
45 assert(printable <= printed);
46 copyout(linebuf, *output, printable + 1);
47 copyout(err_msg, output_end - err_len - 1, err_len + 1);
48 *output = output_end - 1;
49 } else {
50 /*
51 * Not enough space in the output buffer,
52 * and we already inserted the error message.
53 * Do nothing.
54 */
55 assert(*output + 1 == output_end);
56 }
57 }
58
59 void
testprintf(const char * format,...)60 testprintf(const char *format, ...)
61 {
62 vm_parameter_validation_kern_thread_context_t *globals = get_globals();
63
64 va_list args;
65 va_start(args, format);
66 user_vprintf(&globals->output_buffer_cur, globals->output_buffer_end, format, args);
67 va_end(args);
68 }
69
70 // Utils
71
72 static mach_port_t
make_a_mem_object(vm_size_t size)73 make_a_mem_object(vm_size_t size)
74 {
75 ipc_port_t out_handle;
76 kern_return_t kr = mach_memory_object_memory_entry_64((host_t)1, /*internal=*/ true, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_handle);
77 assert(kr == 0);
78 return out_handle;
79 }
80
81 static mach_port_t
make_a_mem_entry(MAP_T map,vm_size_t size)82 make_a_mem_entry(MAP_T map, vm_size_t size)
83 {
84 mach_port_t port;
85 memory_object_size_t s = (memory_object_size_t)size;
86 kern_return_t kr = mach_make_memory_entry_64(map, &s, (memory_object_offset_t)0, MAP_MEM_NAMED_CREATE | MAP_MEM_LEDGER_TAGGED, &port, MACH_PORT_NULL);
87 assert(kr == 0);
88 return port;
89 }
90
91 // Test functions
92
93 static results_t *
test_vm_map_copy_overwrite(kern_return_t (* func)(MAP_T dst_map,vm_map_copy_t copy,mach_vm_address_t start,mach_vm_size_t size),const char * testname)94 test_vm_map_copy_overwrite(kern_return_t (*func)(MAP_T dst_map, vm_map_copy_t copy, mach_vm_address_t start, mach_vm_size_t size), const char * testname)
95 {
96 // source map: has an allocation bigger than our
97 // "reasonable" trial sizes, to copy from
98 MAP_T src_map SMART_MAP;
99 allocation_t src_alloc SMART_ALLOCATE_VM(src_map, TEST_ALLOC_SIZE, VM_PROT_READ);
100
101 // dest map: has an allocation bigger than our
102 // "reasonable" trial sizes, to copy-overwrite on
103 MAP_T dst_map SMART_MAP;
104 allocation_t dst_alloc SMART_ALLOCATE_VM(dst_map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
105
106 // We test dst/size parameters.
107 // We don't test the contents of the vm_map_copy_t.
108 start_size_trials_t *trials SMART_START_SIZE_TRIALS(dst_alloc.addr);
109 results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, dst_alloc.addr, trials->count);
110
111 for (unsigned i = 0; i < trials->count; i++) {
112 start_size_trial_t trial = trials->list[i];
113
114 // Copy from the source.
115 vm_map_copy_t copy;
116 kern_return_t kr = vm_map_copyin(src_map, src_alloc.addr, src_alloc.size, false, ©);
117 assert(kr == 0);
118 assert(copy); // null copy won't exercise the sanitization path
119
120 // Copy-overwrite to the destination.
121 kern_return_t ret = func(dst_map, copy, trial.start, trial.size);
122
123 if (ret != KERN_SUCCESS) {
124 vm_map_copy_discard(copy);
125 }
126 append_result(results, ret, trial.name);
127 }
128 return results;
129 }
130
131 /*
132 * This function temporarily allocates a writeable allocation in kernel_map, and a read only allocation in a temporary map.
133 * It's used to test a function such as vm_map_read_user which copies in data to a kernel pointer that must be writeable.
134 */
135 static results_t *
test_src_kerneldst_size(kern_return_t (* func)(MAP_T map,vm_map_offset_t src,void * dst,vm_size_t length),const char * testname)136 test_src_kerneldst_size(kern_return_t (*func)(MAP_T map, vm_map_offset_t src, void * dst, vm_size_t length), const char * testname)
137 {
138 MAP_T map SMART_MAP;
139 allocation_t src_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_READ);
140 allocation_t dst_base SMART_ALLOCATE_VM(kernel_map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
141 src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
142 results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
143
144 for (unsigned i = 0; i < trials->count; i++) {
145 src_dst_size_trial_t trial = trials->list[i];
146 trial = slide_trial_src(trial, src_base.addr);
147 trial = slide_trial_dst(trial, dst_base.addr);
148 int ret = func(map, trial.src, (void *)trial.dst, trial.size);
149 append_result(results, ret, trial.name);
150 }
151 return results;
152 }
153
154 /*
155 * This function temporarily allocates a read only allocation in kernel_map, and a writeable allocation in a temporary map.
156 * It's used to test a function such as vm_map_write_user which copies data from a kernel pointer to a writeable userspace address.
157 */
158 static results_t *
test_kernelsrc_dst_size(kern_return_t (* func)(MAP_T map,void * src,vm_map_offset_t dst,vm_size_t length),const char * testname)159 test_kernelsrc_dst_size(kern_return_t (*func)(MAP_T map, void *src, vm_map_offset_t dst, vm_size_t length), const char * testname)
160 {
161 MAP_T map SMART_MAP;
162 allocation_t src_base SMART_ALLOCATE_VM(kernel_map, TEST_ALLOC_SIZE, VM_PROT_READ);
163 allocation_t dst_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
164 src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
165 results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
166
167 for (unsigned i = 0; i < trials->count; i++) {
168 src_dst_size_trial_t trial = trials->list[i];
169 trial = slide_trial_src(trial, src_base.addr);
170 trial = slide_trial_dst(trial, dst_base.addr);
171 int ret = func(map, (void *)trial.src, trial.dst, trial.size);
172 append_result(results, ret, trial.name);
173 }
174 return results;
175 }
176
177
178 /////////////////////////////////////////////////////
179 // Mach tests
180
181
182 static kern_return_t
call_mach_vm_read(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)183 call_mach_vm_read(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
184 {
185 vm_offset_t out_addr;
186 mach_msg_type_number_t out_size;
187 kern_return_t kr = mach_vm_read(map, start, size, &out_addr, &out_size);
188 if (kr == 0) {
189 // we didn't call through MIG so out_addr is really a vm_map_copy_t
190 vm_map_copy_discard((vm_map_copy_t)out_addr);
191 }
192 return kr;
193 }
194
195 static inline void
check_vm_map_copyin_outparam_changes(kern_return_t * kr,vm_map_copy_t copy,vm_map_copy_t saved_copy)196 check_vm_map_copyin_outparam_changes(kern_return_t * kr, vm_map_copy_t copy, vm_map_copy_t saved_copy)
197 {
198 if (*kr == KERN_SUCCESS) {
199 if (copy == saved_copy) {
200 *kr = OUT_PARAM_BAD;
201 }
202 } else {
203 if (copy != saved_copy) {
204 *kr = OUT_PARAM_BAD;
205 }
206 }
207 }
208
209 static kern_return_t
call_vm_map_copyin(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)210 call_vm_map_copyin(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
211 {
212 vm_map_copy_t invalid_initial_value = INVALID_VM_MAP_COPY;
213 vm_map_copy_t copy = invalid_initial_value;
214 kern_return_t kr = vm_map_copyin(map, start, size, false, ©);
215 if (kr == 0) {
216 vm_map_copy_discard(copy);
217 }
218 check_vm_map_copyin_outparam_changes(&kr, copy, invalid_initial_value);
219 return kr;
220 }
221
222 static kern_return_t
call_copyoutmap_atomic32(MAP_T map,vm_map_offset_t addr)223 call_copyoutmap_atomic32(MAP_T map, vm_map_offset_t addr)
224 {
225 uint32_t data = 0;
226 kern_return_t kr = copyoutmap_atomic32(map, data, addr);
227 return kr;
228 }
229
230
231 static kern_return_t
call_mach_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)232 call_mach_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
233 {
234 mach_vm_address_t saved_start = *start;
235 kern_return_t kr = mach_vm_allocate_external(map, start, size, flags);
236 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
237 return kr;
238 }
239
240 static kern_return_t
call_mach_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)241 call_mach_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
242 {
243 mach_vm_address_t saved_start = *start;
244 kern_return_t kr = mach_vm_allocate_external(map, start, size, VM_FLAGS_FIXED);
245 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
246 return kr;
247 }
248
249 static kern_return_t
call_mach_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)250 call_mach_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
251 {
252 mach_vm_address_t saved_start = *start;
253 kern_return_t kr = mach_vm_allocate_external(map, start, size, VM_FLAGS_ANYWHERE);
254 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
255 return kr;
256 }
257
258 static kern_return_t
call_mach_vm_allocate_kernel__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)259 call_mach_vm_allocate_kernel__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
260 {
261 mach_vm_address_t saved_start = *start;
262 kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
263 FLAGS_AND_TAG(flags, VM_KERN_MEMORY_OSFMK));
264 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
265 return kr;
266 }
267
268 static kern_return_t
call_mach_vm_allocate_kernel__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)269 call_mach_vm_allocate_kernel__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
270 {
271 if (dealloc_would_time_out(*start, size, map)) {
272 return ACCEPTABLE;
273 }
274
275 mach_vm_address_t saved_start = *start;
276 kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
277 FLAGS_AND_TAG(VM_FLAGS_FIXED, VM_KERN_MEMORY_OSFMK));
278 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
279 return kr;
280 }
281
282 static kern_return_t
call_mach_vm_allocate_kernel__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)283 call_mach_vm_allocate_kernel__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
284 {
285 if (dealloc_would_time_out(*start, size, map)) {
286 return ACCEPTABLE;
287 }
288
289 mach_vm_address_t saved_start = *start;
290 kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
291 FLAGS_AND_TAG(VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK));
292 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
293 return kr;
294 }
295
296
297
298 static kern_return_t
call_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)299 call_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
300 {
301 mach_vm_address_t saved_start = *start;
302 kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, flags);
303 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
304 return kr;
305 }
306
307 static kern_return_t
call_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)308 call_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
309 {
310 mach_vm_address_t saved_start = *start;
311 kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, VM_FLAGS_FIXED);
312 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
313 return kr;
314 }
315
316 static kern_return_t
call_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)317 call_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
318 {
319 mach_vm_address_t saved_start = *start;
320 kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, VM_FLAGS_ANYWHERE);
321 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
322 return kr;
323 }
324
325 static kern_return_t
call_mach_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)326 call_mach_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
327 {
328 kern_return_t kr = mach_vm_deallocate(map, start, size);
329 return kr;
330 }
331
332 static kern_return_t
call_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)333 call_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
334 {
335 kern_return_t kr = vm_deallocate(map, (vm_address_t) start, (vm_size_t) size);
336 return kr;
337 }
338
339 // Including sys/systm.h caused things to blow up
340 int vslock(user_addr_t addr, user_size_t len);
341 int vsunlock(user_addr_t addr, user_size_t len, int dirtied);
342 static int
call_vslock(void * start,size_t size)343 call_vslock(void * start, size_t size)
344 {
345 int kr = vslock((user_addr_t) start, (user_size_t) size);
346 if (kr == KERN_SUCCESS) {
347 (void) vsunlock((user_addr_t) start, (user_size_t) size, 0);
348 }
349
350 return kr;
351 }
352
353 static int
call_vsunlock_undirtied(void * start,size_t size)354 call_vsunlock_undirtied(void * start, size_t size)
355 {
356 int kr = vslock((user_addr_t) start, (user_size_t) size);
357 if (kr == EINVAL) {
358 // Invalid vslock arguments should also be
359 // invalid vsunlock arguments. Test it.
360 } else if (kr != KERN_SUCCESS) {
361 // vslock failed, and vsunlock of non-locked memory panics
362 return PANIC;
363 }
364 kr = vsunlock((user_addr_t) start, (user_size_t) size, 0);
365 return kr;
366 }
367
368 static int
call_vsunlock_dirtied(void * start,size_t size)369 call_vsunlock_dirtied(void * start, size_t size)
370 {
371 int kr = vslock((user_addr_t) start, (user_size_t) size);
372 if (kr == EINVAL) {
373 // Invalid vslock arguments should also be
374 // invalid vsunlock arguments. Test it.
375 } else if (kr != KERN_SUCCESS) {
376 // vslock failed, and vsunlock of non-locked memory panics
377 return PANIC;
378 }
379 kr = vsunlock((user_addr_t) start, (user_size_t) size, 1);
380 return kr;
381 }
382
383 extern kern_return_t vm_map_wire_external(
384 vm_map_t map,
385 vm_map_offset_t start,
386 vm_map_offset_t end,
387 vm_prot_t access_type,
388 boolean_t user_wire);
389
390
391 typedef kern_return_t (*wire_fn_t)(
392 vm_map_t task,
393 mach_vm_address_t start,
394 mach_vm_address_t end,
395 vm_prot_t prot,
396 vm_tag_t tag,
397 boolean_t user_wire);
398
399
400 /*
401 * Tell vm_tag_bt() to change its behavior so our calls to
402 * vm_map_wire_external and vm_map_wire_and_extract do not panic.
403 */
404 static void
prevent_wire_tag_panic(bool prevent)405 prevent_wire_tag_panic(bool prevent)
406 {
407 thread_set_test_option(test_option_vm_prevent_wire_tag_panic, prevent);
408 }
409
410 #if XNU_PLATFORM_MacOSX
411 // vm_map_wire_and_extract() implemented on macOS only
412
413
414 /*
415 * wire_nested requires a range of exactly one page when passed a physpage pointer.
416 * wire_and_extract is meant to provide that, but as a result of round introduced, unaligned values don't follow that.
417 */
418 static bool
will_vm_map_wire_nested_panic_due_to_invalid_range_size(MAP_T map,mach_vm_address_t start)419 will_vm_map_wire_nested_panic_due_to_invalid_range_size(MAP_T map, mach_vm_address_t start)
420 {
421 mach_vm_address_t end = start + VM_MAP_PAGE_SIZE(map);
422 if (round_up_map(map, end) - trunc_down_map(map, start) != VM_MAP_PAGE_SIZE(map)) {
423 return true;
424 }
425 return false;
426 }
427
428 static inline void
check_vm_map_wire_and_extract_outparam_changes(kern_return_t * kr,ppnum_t physpage)429 check_vm_map_wire_and_extract_outparam_changes(kern_return_t * kr, ppnum_t physpage)
430 {
431 if (*kr != KERN_SUCCESS) {
432 if (physpage != 0) {
433 *kr = OUT_PARAM_BAD;
434 }
435 }
436 }
437
438 static kern_return_t
vm_map_wire_and_extract_retyped(vm_map_t map,mach_vm_address_t start,mach_vm_address_t end __unused,vm_prot_t prot,vm_tag_t tag __unused,boolean_t user_wire)439 vm_map_wire_and_extract_retyped(
440 vm_map_t map,
441 mach_vm_address_t start,
442 mach_vm_address_t end __unused,
443 vm_prot_t prot,
444 vm_tag_t tag __unused,
445 boolean_t user_wire)
446 {
447 if (will_vm_map_wire_nested_panic_due_to_invalid_range_size(map, start)) {
448 return PANIC;
449 }
450
451 ppnum_t physpage = UNLIKELY_INITIAL_PPNUM;
452 kern_return_t kr = vm_map_wire_and_extract(map, start, prot, user_wire, &physpage);
453 check_vm_map_wire_and_extract_outparam_changes(&kr, physpage);
454 return kr;
455 }
456 #endif // XNU_PLATFORM_MacOSX
457
458
459 static kern_return_t
vm_map_wire_external_retyped(vm_map_t map,mach_vm_address_t start,mach_vm_address_t end,vm_prot_t prot,vm_tag_t tag __unused,boolean_t user_wire)460 vm_map_wire_external_retyped(
461 vm_map_t map,
462 mach_vm_address_t start,
463 mach_vm_address_t end,
464 vm_prot_t prot,
465 vm_tag_t tag __unused,
466 boolean_t user_wire)
467 {
468 return vm_map_wire_external(map, start, end, prot, user_wire);
469 }
470
471 static kern_return_t
wire_call_impl(wire_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t end,vm_prot_t prot,vm_tag_t tag,bool user_wire)472 wire_call_impl(wire_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t end, vm_prot_t prot, vm_tag_t tag, bool user_wire)
473 {
474 if (tag == VM_KERN_MEMORY_NONE) {
475 return PANIC;
476 }
477 prevent_wire_tag_panic(true);
478 kern_return_t kr = fn(map, start, end, prot, tag, user_wire);
479 prevent_wire_tag_panic(false);
480 if (kr == KERN_SUCCESS) {
481 (void) vm_map_unwire(map, start, end, user_wire);
482 }
483 return kr;
484 }
485
486 #define WIRE_IMPL(FN, user_wire) \
487 static kern_return_t \
488 __attribute__((used)) \
489 call_ ## FN ## __start_end__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start, mach_vm_address_t end) \
490 { \
491 return wire_call_impl(FN, map, start, end, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, user_wire); \
492 } \
493 static kern_return_t \
494 __attribute__((used)) \
495 call_ ## FN ## __prot__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot) \
496 { \
497 mach_vm_address_t end; \
498 if (__builtin_add_overflow(start, size, &end)) { \
499 return BUSTED; \
500 } \
501 return wire_call_impl(FN, map, start, end, prot, VM_KERN_MEMORY_OSFMK, user_wire); \
502 } \
503 static kern_return_t \
504 __attribute__((used)) \
505 call_ ## FN ## __tag__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start, mach_vm_address_t end, vm_tag_t tag) \
506 { \
507 kern_return_t kr = wire_call_impl(FN, map, start, end, VM_PROT_DEFAULT, tag, user_wire); \
508 return kr; \
509 } \
510 static kern_return_t \
511 __attribute__((used)) \
512 call_ ## FN ## __start__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start) \
513 { \
514 return wire_call_impl(FN, map, start, 0, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, user_wire); \
515 } \
516
WIRE_IMPL(vm_map_wire_external_retyped,true)517 WIRE_IMPL(vm_map_wire_external_retyped, true)
518 WIRE_IMPL(vm_map_wire_external_retyped, false)
519 WIRE_IMPL(vm_map_wire_kernel, true)
520 WIRE_IMPL(vm_map_wire_kernel, false)
521
522 #if XNU_PLATFORM_MacOSX
523 WIRE_IMPL(vm_map_wire_and_extract_retyped, true)
524 WIRE_IMPL(vm_map_wire_and_extract_retyped, false)
525 #endif
526
527 static kern_return_t
528 call_mach_vm_wire_level_monitor(int64_t requested_pages)
529 {
530 kern_return_t kr = mach_vm_wire_level_monitor(requested_pages);
531 /*
532 * KERN_RESOURCE_SHORTAGE and KERN_SUCCESS are
533 * equivalent acceptable results for this test.
534 */
535 if (kr == KERN_RESOURCE_SHORTAGE) {
536 #if !defined(XNU_TARGET_OS_BRIDGE)
537 kr = KERN_SUCCESS;
538 #else /* defined(XNU_TARGET_OS_BRIDGE) */
539 /*
540 * ...but the bridgeOS golden file recorded
541 * KERN_RESOURCE_SHORTAGE for some values so
542 * match that to avoid a golden file update.
543 * This code can be removed during any golden file update.
544 */
545 if (requested_pages == 1 || requested_pages == 2) {
546 kr = KERN_SUCCESS;
547 } else {
548 kr = KERN_RESOURCE_SHORTAGE;
549 }
550 #endif /* defined(XNU_TARGET_OS_BRIDGE) */
551 }
552 return kr;
553 }
554
555 static kern_return_t
call_vm_map_unwire_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)556 call_vm_map_unwire_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
557 {
558 kern_return_t kr = vm_map_unwire(map, start, end, TRUE);
559 return kr;
560 }
561
562
563 static kern_return_t
call_vm_map_unwire_non_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)564 call_vm_map_unwire_non_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
565 {
566 kern_return_t kr = vm_map_wire_kernel(map, start, end, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, FALSE);
567 if (kr) {
568 return PANIC;
569 }
570 kr = vm_map_unwire(map, start, end, FALSE);
571 return kr;
572 }
573
574 #ifndef __x86_64__
575 extern const vm_map_address_t physmap_base;
576 extern const vm_map_address_t physmap_end;
577 #endif
578
579 /*
580 * This function duplicates the panicking checks done in copy_validate.
581 * size==0 is returned as success earlier in copyin/out than copy_validate is called, so we ignore that case.
582 */
583 static bool
will_copyio_panic_in_copy_validate(void * kernel_addr,vm_size_t size)584 will_copyio_panic_in_copy_validate(void *kernel_addr, vm_size_t size)
585 {
586 if (size == 0) {
587 return false;
588 }
589 extern const int copysize_limit_panic;
590 if (size > copysize_limit_panic) {
591 return true;
592 }
593
594 /*
595 * copyio is architecture specific and has different checks per arch.
596 */
597 #ifdef __x86_64__
598 if ((vm_offset_t) kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
599 return true;
600 }
601 #else /* not __x86_64__ */
602 uintptr_t kernel_addr_last;
603 if (os_add_overflow((uintptr_t) kernel_addr, size, &kernel_addr_last)) {
604 return true;
605 }
606
607 bool in_kva = (VM_KERNEL_STRIP_PTR(kernel_addr) >= VM_MIN_KERNEL_ADDRESS) &&
608 (VM_KERNEL_STRIP_PTR(kernel_addr_last) <= VM_MAX_KERNEL_ADDRESS);
609 bool in_physmap = (VM_KERNEL_STRIP_PTR(kernel_addr) >= physmap_base) &&
610 (VM_KERNEL_STRIP_PTR(kernel_addr_last) <= physmap_end);
611
612 if (!(in_kva || in_physmap)) {
613 return true;
614 }
615 #endif /* not __x86_64__ */
616
617 return false;
618 }
619
620 static kern_return_t
call_copyinmap(MAP_T map,vm_map_offset_t fromaddr,void * todata,vm_size_t length)621 call_copyinmap(MAP_T map, vm_map_offset_t fromaddr, void * todata, vm_size_t length)
622 {
623 if (will_copyio_panic_in_copy_validate(todata, length)) {
624 return PANIC;
625 }
626
627 kern_return_t kr = copyinmap(map, fromaddr, todata, length);
628 return kr;
629 }
630
631 static kern_return_t
call_copyoutmap(MAP_T map,void * fromdata,vm_map_offset_t toaddr,vm_size_t length)632 call_copyoutmap(MAP_T map, void * fromdata, vm_map_offset_t toaddr, vm_size_t length)
633 {
634 if (will_copyio_panic_in_copy_validate(fromdata, length)) {
635 return PANIC;
636 }
637
638 kern_return_t kr = copyoutmap(map, fromdata, toaddr, length);
639 return kr;
640 }
641
642 static kern_return_t
call_vm_map_read_user(MAP_T map,vm_map_address_t src_addr,void * ptr,vm_size_t size)643 call_vm_map_read_user(MAP_T map, vm_map_address_t src_addr, void * ptr, vm_size_t size)
644 {
645 if (will_copyio_panic_in_copy_validate(ptr, size)) {
646 return PANIC;
647 }
648
649 kern_return_t kr = vm_map_read_user(map, src_addr, ptr, size);
650 return kr;
651 }
652
653 static kern_return_t
call_vm_map_write_user(MAP_T map,void * ptr,vm_map_address_t dst_addr,vm_size_t size)654 call_vm_map_write_user(MAP_T map, void * ptr, vm_map_address_t dst_addr, vm_size_t size)
655 {
656 if (will_copyio_panic_in_copy_validate(ptr, size)) {
657 return PANIC;
658 }
659
660 kern_return_t kr = vm_map_write_user(map, ptr, dst_addr, size);
661 return kr;
662 }
663
664 static kern_return_t
call_vm_map_copy_overwrite_interruptible(MAP_T dst_map,vm_map_copy_t copy,mach_vm_address_t dst_addr,mach_vm_size_t copy_size)665 call_vm_map_copy_overwrite_interruptible(MAP_T dst_map, vm_map_copy_t copy, mach_vm_address_t dst_addr, mach_vm_size_t copy_size)
666 {
667 kern_return_t kr = vm_map_copy_overwrite(dst_map, dst_addr, copy, copy_size,
668 TRUE);
669
670 return kr;
671 }
672
673 static kern_return_t
call_mach_vm_protect__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)674 call_mach_vm_protect__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
675 {
676 kern_return_t kr = mach_vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
677 return kr;
678 }
679 static kern_return_t
call_mach_vm_protect__vm_prot(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)680 call_mach_vm_protect__vm_prot(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
681 {
682 kern_return_t kr = mach_vm_protect(map, start, size, 0, prot);
683 return kr;
684 }
685
686 static kern_return_t
call_vm_protect__start_size(MAP_T map,vm_address_t start,vm_size_t size)687 call_vm_protect__start_size(MAP_T map, vm_address_t start, vm_size_t size)
688 {
689 kern_return_t kr = vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
690 return kr;
691 }
692
693 static kern_return_t
call_vm_protect__vm_prot(MAP_T map,vm_address_t start,vm_size_t size,vm_prot_t prot)694 call_vm_protect__vm_prot(MAP_T map, vm_address_t start, vm_size_t size, vm_prot_t prot)
695 {
696 kern_return_t kr = vm_protect(map, start, size, 0, prot);
697 return kr;
698 }
699
700 /*
701 * VME_OFFSET_SET will panic due to an assertion if passed an address that is not aligned to VME_ALIAS_BITS
702 * VME_OFFSET_SET is called by _vm_map_clip_(start/end)
703 * vm_map_protect -> vm_map_clip_end -> _vm_map_clip_end -> VME_OFFSET_SET
704 */
705 static bool
will_vm_map_protect_panic(mach_vm_address_t start,mach_vm_address_t end)706 will_vm_map_protect_panic(mach_vm_address_t start, mach_vm_address_t end)
707 {
708 bool start_aligned = start == ((start >> VME_ALIAS_BITS) << VME_ALIAS_BITS);
709 bool end_aligned = end == ((end >> VME_ALIAS_BITS) << VME_ALIAS_BITS);
710 return !(start_aligned && end_aligned);
711 }
712
713 static kern_return_t
call_vm_map_protect__start_size__no_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)714 call_vm_map_protect__start_size__no_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
715 {
716 mach_vm_address_t end = start + size;
717 if (will_vm_map_protect_panic(start, end)) {
718 return PANIC;
719 }
720
721 kern_return_t kr = vm_map_protect(map, start, end, 0, VM_PROT_READ | VM_PROT_WRITE);
722 return kr;
723 }
724
725 static kern_return_t
call_vm_map_protect__start_size__set_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)726 call_vm_map_protect__start_size__set_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
727 {
728 mach_vm_address_t end = start + size;
729 if (will_vm_map_protect_panic(start, end)) {
730 return PANIC;
731 }
732
733 kern_return_t kr = vm_map_protect(map, start, end, 1, VM_PROT_READ | VM_PROT_WRITE);
734 return kr;
735 }
736
737 static kern_return_t
call_vm_map_protect__vm_prot__no_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)738 call_vm_map_protect__vm_prot__no_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
739 {
740 mach_vm_address_t end = start + size;
741 if (will_vm_map_protect_panic(start, end)) {
742 return PANIC;
743 }
744
745 kern_return_t kr = vm_map_protect(map, start, end, 0, prot);
746 return kr;
747 }
748
749 static kern_return_t
call_vm_map_protect__vm_prot__set_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)750 call_vm_map_protect__vm_prot__set_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
751 {
752 mach_vm_address_t end = start + size;
753 if (will_vm_map_protect_panic(start, end)) {
754 return PANIC;
755 }
756
757 kern_return_t kr = vm_map_protect(map, start, end, 0, prot);
758 return kr;
759 }
760
761 // Fwd decl to avoid including bsd headers
762 int useracc(user_addr_t addr, user_size_t len, int prot);
763
764 static int
call_useracc__start_size(void * start,size_t size)765 call_useracc__start_size(void * start, size_t size)
766 {
767 int result = useracc((user_addr_t) start, (user_addr_t) size, VM_PROT_READ);
768 return result;
769 }
770
771 static int
call_useracc__vm_prot(void * start,size_t size,int prot)772 call_useracc__vm_prot(void * start, size_t size, int prot)
773 {
774 return useracc((user_addr_t) start, (user_addr_t) size, prot);
775 }
776
777 static int
call_vm_map_purgable_control__address__get(MAP_T map,mach_vm_address_t addr)778 call_vm_map_purgable_control__address__get(MAP_T map, mach_vm_address_t addr)
779 {
780 int state = INVALID_PURGABLE_STATE;
781 int initial_state = state;
782 kern_return_t kr = vm_map_purgable_control(map, addr, VM_PURGABLE_GET_STATE, &state);
783 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_GET_STATE);
784 return kr;
785 }
786
787 static int
call_vm_map_purgable_control__address__purge_all(MAP_T map,mach_vm_address_t addr)788 call_vm_map_purgable_control__address__purge_all(MAP_T map, mach_vm_address_t addr)
789 {
790 int state = INVALID_PURGABLE_STATE;
791 int initial_state = state;
792 kern_return_t kr = vm_map_purgable_control(map, addr, VM_PURGABLE_PURGE_ALL, &state);
793 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_PURGE_ALL);
794 return kr;
795 }
796
797 static int
call_vm_map_purgable_control__purgeable_state(MAP_T map,vm_address_t addr,vm_purgable_t control,int state)798 call_vm_map_purgable_control__purgeable_state(MAP_T map, vm_address_t addr, vm_purgable_t control, int state)
799 {
800 int state_copy = state;
801 kern_return_t kr = vm_map_purgable_control(map, addr, control, &state_copy);
802 check_mach_vm_purgable_control_outparam_changes(&kr, state_copy, state, control);
803
804 return kr;
805 }
806
807 static kern_return_t
call_vm_map_page_info(MAP_T map,mach_vm_address_t addr)808 call_vm_map_page_info(MAP_T map, mach_vm_address_t addr)
809 {
810 vm_page_info_flavor_t flavor = VM_PAGE_INFO_BASIC;
811 mach_msg_type_number_t count = VM_PAGE_INFO_BASIC_COUNT;
812 mach_msg_type_number_t saved_count = count;
813 vm_page_info_basic_data_t info = {0};
814 info.depth = -1;
815 vm_page_info_basic_data_t saved_info = info;
816
817 /*
818 * If this test is invoked from a rosetta process,
819 * vm_map_page_range_info_internal doesn't know what
820 * effective_page_shift to use and returns KERN_INVALID_ARGUMENT.
821 * To fix this, we can set the region_page_shift to the page_shift
822 * used for map
823 */
824 int saved_page_shift = thread_self_region_page_shift();
825 if (PAGE_SIZE == KB16) {
826 if (VM_MAP_PAGE_SHIFT(current_map()) != VM_MAP_PAGE_SHIFT(map)) {
827 thread_self_region_page_shift_set(VM_MAP_PAGE_SHIFT(map));
828 }
829 }
830
831 kern_return_t kr = vm_map_page_info(map, addr, flavor, (vm_page_info_t)&info, &count);
832
833 thread_self_region_page_shift_set(saved_page_shift);
834
835 check_mach_vm_page_info_outparam_changes(&kr, info, saved_info, count, saved_count);
836
837 return kr;
838 }
839
840 #if CONFIG_MAP_RANGES
841 static kern_return_t
call_mach_vm_range_create(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,mach_vm_address_t second_start,mach_vm_size_t second_size)842 call_mach_vm_range_create(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, mach_vm_address_t second_start, mach_vm_size_t second_size)
843 {
844 mach_vm_range_recipe_v1_t array[2];
845 array[0] = (mach_vm_range_recipe_v1_t){
846 .range = { start, start + size }, .range_tag = MACH_VM_RANGE_FIXED,
847 };
848 array[1] = (mach_vm_range_recipe_v1_t){
849 .range = { second_start, second_start + second_size }, .range_tag = MACH_VM_RANGE_FIXED,
850 };
851
852 // mach_vm_range_create requires map == current_map(). Patch it up, do the call, and then restore it.
853 vm_map_t saved_map = swap_task_map(current_task(), current_thread(), map);
854
855 kern_return_t kr = mach_vm_range_create(map, MACH_VM_RANGE_FLAVOR_V1, (mach_vm_range_recipes_raw_t)array, sizeof(array[0]) * 2);
856
857 swap_task_map(current_task(), current_thread(), saved_map);
858
859 return kr;
860 }
861 #endif /* CONFIG_MAP_RANGES */
862
863 // Mach memory entry ownership
864
865 extern kern_return_t
866 mach_memory_entry_ownership(
867 ipc_port_t entry_port,
868 task_t owner,
869 int ledger_tag,
870 int ledger_flags);
871
872 static kern_return_t
call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused,int ledger_tag)873 call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused, int ledger_tag)
874 {
875 mach_port_t mementry = make_a_mem_entry(map, TEST_ALLOC_SIZE + 1);
876 kern_return_t kr = mach_memory_entry_ownership(mementry, TASK_NULL, ledger_tag, 0);
877 mach_memory_entry_port_release(mementry);
878 return kr;
879 }
880
881 static kern_return_t
call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused,int ledger_flag)882 call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused, int ledger_flag)
883 {
884 mach_port_t mementry = make_a_mem_entry(map, TEST_ALLOC_SIZE + 1);
885 kern_return_t kr = mach_memory_entry_ownership(mementry, TASK_NULL, VM_LEDGER_TAG_DEFAULT, ledger_flag);
886 mach_memory_entry_port_release(mementry);
887 return kr;
888 }
889
890 static inline void
check_mach_memory_entry_map_size_outparam_changes(kern_return_t * kr,mach_vm_size_t map_size,mach_vm_size_t invalid_initial_size)891 check_mach_memory_entry_map_size_outparam_changes(kern_return_t * kr, mach_vm_size_t map_size,
892 mach_vm_size_t invalid_initial_size)
893 {
894 if (*kr == KERN_SUCCESS) {
895 if (map_size == invalid_initial_size) {
896 *kr = OUT_PARAM_BAD;
897 }
898 } else {
899 if (map_size != 0) {
900 *kr = OUT_PARAM_BAD;
901 }
902 }
903 }
904
905 static kern_return_t
call_mach_memory_entry_map_size__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)906 call_mach_memory_entry_map_size__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
907 {
908 mach_port_t mementry;
909 mach_vm_address_t addr;
910 memory_object_size_t s = (memory_object_size_t)TEST_ALLOC_SIZE + 1;
911 /*
912 * UNLIKELY_INITIAL_SIZE is guaranteed to never be the correct map_size
913 * from the mach_memory_entry_map_size calls we make. map_size should represent the size of the
914 * copy that would result, and UNLIKELY_INITIAL_SIZE is completely unrelated to the sizes we pass
915 * and not page aligned.
916 */
917 mach_vm_size_t invalid_initial_size = UNLIKELY_INITIAL_SIZE;
918
919 mach_vm_size_t map_size = invalid_initial_size;
920
921 kern_return_t kr = mach_vm_allocate_kernel(map, &addr, s, FLAGS_AND_TAG(VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK));
922 assert(kr == 0);
923 kr = mach_make_memory_entry_64(map, &s, (memory_object_offset_t)addr, MAP_MEM_VM_SHARE, &mementry, MACH_PORT_NULL);
924 assert(kr == 0);
925 kr = mach_memory_entry_map_size(mementry, map, start, size, &map_size);
926 check_mach_memory_entry_map_size_outparam_changes(&kr, map_size, invalid_initial_size);
927 mach_memory_entry_port_release(mementry);
928 (void)mach_vm_deallocate(map, addr, s);
929 return kr;
930 }
931
932 struct file_control_return {
933 void * control;
934 void * fp;
935 void * vp;
936 int fd;
937 };
938 struct file_control_return get_control_from_fd(int fd);
939 void cleanup_control_related_data(struct file_control_return info);
940 uint32_t vnode_vid(void * vp);
941
942 static void
check_task_find_region_details_outparam_changes(int * result,uintptr_t vp,uintptr_t saved_vp,uint32_t vid,bool is_map_shared,uint64_t start,uint64_t saved_start,uint64_t len,uint64_t saved_len)943 check_task_find_region_details_outparam_changes(int * result,
944 uintptr_t vp, uintptr_t saved_vp,
945 uint32_t vid,
946 bool is_map_shared,
947 uint64_t start, uint64_t saved_start,
948 uint64_t len, uint64_t saved_len)
949 {
950 // task_find_region_details returns a bool. 0 means failure, 1 success
951 if (*result == 0) {
952 if (vp != 0 || vid != 0 || is_map_shared != 0 || start != 0 || len != 0) {
953 *result = OUT_PARAM_BAD;
954 }
955 } else {
956 if (vp == saved_vp || start == saved_start || len == saved_len) {
957 *result = OUT_PARAM_BAD;
958 }
959 if (vid != (uint32_t)vnode_vid((void *)vp)) {
960 *result = OUT_PARAM_BAD;
961 }
962 // is_map_shared seems to check if the relevant entry is shadowed by another
963 // we don't set up any shadow entries for this test
964 if (is_map_shared) {
965 // *result = OUT_PARAM_BAD;
966 }
967 }
968 }
969
970
971 static int
call_task_find_region_details(MAP_T map,mach_vm_address_t addr)972 call_task_find_region_details(MAP_T map, mach_vm_address_t addr)
973 {
974 (void) map;
975 uint64_t len = UNLIKELY_INITIAL_SIZE, start = UNLIKELY_INITIAL_ADDRESS;
976 uint64_t saved_len = len, saved_start = start;
977 bool is_map_shared = true;
978 uintptr_t vp = (uintptr_t) INVALID_VNODE_PTR;
979 uintptr_t saved_vp = vp;
980 uint32_t vid = UNLIKELY_INITIAL_VID;
981
982 /*
983 * task_find_region_details operates on task->map. Our setup code does allocations
984 * that otherwise could theoretically overwrite existing ones, so we don't want to
985 * operate on current_map
986 */
987 vm_map_t saved_map = swap_task_map(current_task(), current_thread(), map);
988
989 int kr = task_find_region_details(current_task(), addr, FIND_REGION_DETAILS_AT_OFFSET, &vp, &vid, &is_map_shared, &start, &len);
990
991 swap_task_map(current_task(), current_thread(), saved_map);
992
993 check_task_find_region_details_outparam_changes(&kr, vp, saved_vp, vid, is_map_shared, start, saved_start, len, saved_len);
994 return kr;
995 }
996
997 static results_t * __attribute__((used))
test_kext_unix_with_allocated_vnode_addr(kern_return_t (* func)(MAP_T dst_map,mach_vm_address_t start),const char * testname)998 test_kext_unix_with_allocated_vnode_addr(kern_return_t (*func)(MAP_T dst_map, mach_vm_address_t start), const char *testname)
999 {
1000 MAP_T map SMART_MAP;
1001 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
1002 addr_trials_t *trials SMART_ADDR_TRIALS(base.addr);
1003 results_t *results = alloc_results(testname, eSMART_ADDR_TRIALS, base.addr, trials->count);
1004
1005 for (unsigned i = 0; i < trials->count; i++) {
1006 mach_vm_address_t addr = (mach_vm_address_t)trials->list[i].addr;
1007
1008 int file_descriptor = get_globals()->file_descriptor;
1009 struct file_control_return control_info = get_control_from_fd(file_descriptor);
1010 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED(.vmf_overwrite = true);
1011 kern_return_t kr = vm_map_enter_mem_object_control(map, &addr, TEST_ALLOC_SIZE, 0, vmk_flags, (memory_object_control_t) control_info.control, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1012 if (kr == KERN_INVALID_ARGUMENT) {
1013 // can't map a file at that address, so we can't pass
1014 // such a mapping to the function being tested
1015 append_result(results, IGNORED, trials->list[i].name);
1016 cleanup_control_related_data(control_info);
1017 continue;
1018 }
1019 assert(kr == KERN_SUCCESS);
1020
1021 kern_return_t ret = func(map, addr);
1022 append_result(results, ret, trials->list[i].name);
1023 cleanup_control_related_data(control_info);
1024 }
1025 return results;
1026 }
1027
1028 extern uint64_t vm_reclaim_max_threshold;
1029
1030 #if 0
1031 static kern_return_t
1032 test_mach_vm_deferred_reclamation_buffer_init(MAP_T map __unused, mach_vm_address_t address, mach_vm_size_t size)
1033 {
1034 uint64_t vm_reclaim_max_threshold_orig = vm_reclaim_max_threshold;
1035 kern_return_t kr = 0;
1036
1037 vm_reclaim_max_threshold = KB16;
1038 kr = call_mach_vm_deferred_reclamation_buffer_init(current_task(), address, size);
1039 vm_reclaim_max_threshold = vm_reclaim_max_threshold_orig;
1040
1041 return kr;
1042 }
1043 #endif
1044
1045
1046 // mach_make_memory_entry and variants
1047
1048 static inline void
check_mach_memory_entry_outparam_changes(kern_return_t * kr,mach_vm_size_t size,mach_port_t out_handle)1049 check_mach_memory_entry_outparam_changes(kern_return_t * kr, mach_vm_size_t size,
1050 mach_port_t out_handle)
1051 {
1052 /*
1053 * mach_make_memory_entry overwrites *size to be 0 on failure.
1054 */
1055 if (*kr != KERN_SUCCESS) {
1056 if (size != 0) {
1057 *kr = OUT_PARAM_BAD;
1058 }
1059 if (out_handle != 0) {
1060 *kr = OUT_PARAM_BAD;
1061 }
1062 }
1063 }
1064
1065 #define IMPL(FN, T) \
1066 static kern_return_t \
1067 call_ ## FN ## __start_size__memonly(MAP_T map, T start, T size) \
1068 { \
1069 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
1070 T io_size = size; \
1071 mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT; \
1072 mach_port_t out_handle = invalid_handle_value; \
1073 kern_return_t kr = FN(map, &io_size, start, \
1074 VM_PROT_READ | MAP_MEM_ONLY, &out_handle, memobject); \
1075 if (kr == 0) { \
1076 if (out_handle) mach_memory_entry_port_release(out_handle); \
1077 } \
1078 mach_memory_entry_port_release(memobject); \
1079 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1080 return kr; \
1081 } \
1082 \
1083 static kern_return_t \
1084 call_ ## FN ## __start_size__namedcreate(MAP_T map, T start, T size) \
1085 { \
1086 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
1087 T io_size = size; \
1088 mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT; \
1089 mach_port_t out_handle = invalid_handle_value; \
1090 kern_return_t kr = FN(map, &io_size, start, \
1091 VM_PROT_READ | MAP_MEM_NAMED_CREATE, &out_handle, memobject); \
1092 if (kr == 0) { \
1093 if (out_handle) mach_memory_entry_port_release(out_handle); \
1094 } \
1095 mach_memory_entry_port_release(memobject); \
1096 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1097 return kr; \
1098 } \
1099 \
1100 static kern_return_t \
1101 call_ ## FN ## __start_size__copy(MAP_T map, T start, T size) \
1102 { \
1103 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
1104 T io_size = size; \
1105 mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT; \
1106 mach_port_t out_handle = invalid_handle_value; \
1107 kern_return_t kr = FN(map, &io_size, start, \
1108 VM_PROT_READ | MAP_MEM_VM_COPY, &out_handle, memobject); \
1109 if (kr == 0) { \
1110 if (out_handle) mach_memory_entry_port_release(out_handle); \
1111 } \
1112 mach_memory_entry_port_release(memobject); \
1113 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1114 return kr; \
1115 } \
1116 \
1117 static kern_return_t \
1118 call_ ## FN ## __start_size__share(MAP_T map, T start, T size) \
1119 { \
1120 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
1121 T io_size = size; \
1122 mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT; \
1123 mach_port_t out_handle = invalid_handle_value; \
1124 kern_return_t kr = FN(map, &io_size, start, \
1125 VM_PROT_READ | MAP_MEM_VM_SHARE, &out_handle, memobject); \
1126 if (kr == 0) { \
1127 if (out_handle) mach_memory_entry_port_release(out_handle); \
1128 } \
1129 mach_memory_entry_port_release(memobject); \
1130 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1131 return kr; \
1132 } \
1133 \
1134 static kern_return_t \
1135 call_ ## FN ## __start_size__namedreuse(MAP_T map, T start, T size) \
1136 { \
1137 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
1138 T io_size = size; \
1139 mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT; \
1140 mach_port_t out_handle = invalid_handle_value; \
1141 kern_return_t kr = FN(map, &io_size, start, \
1142 VM_PROT_READ | MAP_MEM_NAMED_REUSE, &out_handle, memobject); \
1143 if (kr == 0) { \
1144 if (out_handle) mach_memory_entry_port_release(out_handle); \
1145 } \
1146 mach_memory_entry_port_release(memobject); \
1147 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1148 return kr; \
1149 } \
1150 \
1151 static kern_return_t \
1152 call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot) \
1153 { \
1154 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
1155 T io_size = size; \
1156 mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT; \
1157 mach_port_t out_handle = invalid_handle_value; \
1158 kern_return_t kr = FN(map, &io_size, start, \
1159 prot, &out_handle, memobject); \
1160 if (kr == 0) { \
1161 if (out_handle) mach_memory_entry_port_release(out_handle); \
1162 } \
1163 mach_memory_entry_port_release(memobject); \
1164 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1165 return kr; \
1166 }
1167
IMPL(mach_make_memory_entry_64,mach_vm_address_t)1168 IMPL(mach_make_memory_entry_64, mach_vm_address_t)
1169 IMPL(mach_make_memory_entry, vm_size_t)
1170 static kern_return_t
1171 mach_make_memory_entry_internal_retyped(
1172 vm_map_t target_map,
1173 memory_object_size_t *size,
1174 memory_object_offset_t offset,
1175 vm_prot_t permission,
1176 ipc_port_t *object_handle,
1177 ipc_port_t parent_handle)
1178 {
1179 vm_named_entry_kernel_flags_t vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
1180 if (permission & MAP_MEM_LEDGER_TAGGED) {
1181 vmne_kflags.vmnekf_ledger_tag = VM_LEDGER_TAG_DEFAULT;
1182 }
1183 return mach_make_memory_entry_internal(target_map, size, offset, permission, vmne_kflags, object_handle, parent_handle);
1184 }
1185 IMPL(mach_make_memory_entry_internal_retyped, mach_vm_address_t)
1186
1187 #undef IMPL
1188
1189 // mach_vm_map/mach_vm_map_external/mach_vm_map_kernel/vm_map/vm_map_external infra
1190
1191 typedef kern_return_t (*map_fn_t)(vm_map_t target_task,
1192 mach_vm_address_t *address,
1193 mach_vm_size_t size,
1194 mach_vm_offset_t mask,
1195 int flags,
1196 mem_entry_name_port_t object,
1197 memory_object_offset_t offset,
1198 boolean_t copy,
1199 vm_prot_t cur_protection,
1200 vm_prot_t max_protection,
1201 vm_inherit_t inheritance);
1202
1203 static kern_return_t
call_map_fn__allocate_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1204 call_map_fn__allocate_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1205 {
1206 mach_vm_address_t out_addr = start;
1207 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1208 0, 0, 0, 0, 0, VM_INHERIT_NONE);
1209 // fixed-overwrite with pre-existing allocation, don't deallocate
1210 return kr;
1211 }
1212
1213 static kern_return_t
call_map_fn__allocate_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1214 call_map_fn__allocate_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1215 {
1216 mach_vm_address_t out_addr = start;
1217 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1218 0, 0, true, 0, 0, VM_INHERIT_NONE);
1219 // fixed-overwrite with pre-existing allocation, don't deallocate
1220 return kr;
1221 }
1222
1223 static kern_return_t
call_map_fn__allocate_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)1224 call_map_fn__allocate_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
1225 {
1226 mach_vm_address_t out_addr = start_hint;
1227 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, 0, 0, 0, 0, 0, VM_INHERIT_NONE);
1228 if (kr == 0) {
1229 (void)mach_vm_deallocate(map, out_addr, size);
1230 }
1231 return kr;
1232 }
1233
1234 static kern_return_t
call_map_fn__memobject_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1235 call_map_fn__memobject_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1236 {
1237 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1238 mach_vm_address_t out_addr = start;
1239 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1240 memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1241 // fixed-overwrite with pre-existing allocation, don't deallocate
1242 mach_memory_entry_port_release(memobject);
1243 return kr;
1244 }
1245
1246 static kern_return_t
call_map_fn__memobject_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1247 call_map_fn__memobject_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1248 {
1249 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1250 mach_vm_address_t out_addr = start;
1251 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1252 memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1253 // fixed-overwrite with pre-existing allocation, don't deallocate
1254 mach_memory_entry_port_release(memobject);
1255 return kr;
1256 }
1257
1258 static kern_return_t
call_map_fn__memobject_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)1259 call_map_fn__memobject_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
1260 {
1261 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1262 mach_vm_address_t out_addr = start_hint;
1263 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, memobject,
1264 KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1265 if (kr == 0) {
1266 (void)mach_vm_deallocate(map, out_addr, size);
1267 }
1268 mach_memory_entry_port_release(memobject);
1269 return kr;
1270 }
1271
1272 static kern_return_t
helper_call_map_fn__memobject__ssoo(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1273 helper_call_map_fn__memobject__ssoo(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1274 {
1275 mach_port_t memobject = make_a_mem_object(obj_size);
1276 mach_vm_address_t out_addr = start;
1277 kern_return_t kr = fn(map, &out_addr, size, 0, flags, memobject,
1278 offset, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1279 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1280 mach_memory_entry_port_release(memobject);
1281 return kr;
1282 }
1283
1284 static kern_return_t
call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1285 call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1286 {
1287 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, offset, obj_size);
1288 }
1289
1290 static kern_return_t
call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1291 call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1292 {
1293 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, offset, obj_size);
1294 }
1295
1296 static kern_return_t
call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1297 call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1298 {
1299 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_ANYWHERE, false, start, size, offset, obj_size);
1300 }
1301
1302 static kern_return_t
help_call_map_fn__allocate__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1303 help_call_map_fn__allocate__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1304 {
1305 mach_vm_address_t out_addr = start;
1306 kern_return_t kr = fn(map, &out_addr, size, 0, flags,
1307 0, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
1308 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1309 return kr;
1310 }
1311
1312 static kern_return_t
call_map_fn__allocate_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1313 call_map_fn__allocate_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1314 {
1315 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
1316 }
1317
1318 static kern_return_t
call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1319 call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1320 {
1321 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
1322 }
1323
1324 static kern_return_t
call_map_fn__allocate_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1325 call_map_fn__allocate_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1326 {
1327 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
1328 }
1329
1330 static kern_return_t
help_call_map_fn__memobject__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1331 help_call_map_fn__memobject__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1332 {
1333 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1334 mach_vm_address_t out_addr = start;
1335 kern_return_t kr = fn(map, &out_addr, size, 0, flags,
1336 memobject, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
1337 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1338 mach_memory_entry_port_release(memobject);
1339 return kr;
1340 }
1341
1342 static kern_return_t
call_map_fn__memobject_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1343 call_map_fn__memobject_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1344 {
1345 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
1346 }
1347
1348 static kern_return_t
call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1349 call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1350 {
1351 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
1352 }
1353
1354 static kern_return_t
call_map_fn__memobject_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1355 call_map_fn__memobject_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1356 {
1357 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
1358 }
1359
1360 static kern_return_t
call_map_fn__allocate__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1361 call_map_fn__allocate__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1362 {
1363 kern_return_t kr = fn(map, start, size, 0, flags,
1364 0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1365 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1366 return kr;
1367 }
1368
1369 static kern_return_t
call_map_fn__allocate_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1370 call_map_fn__allocate_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1371 {
1372 kern_return_t kr = fn(map, start, size, 0, flags,
1373 0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1374 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1375 return kr;
1376 }
1377
1378 static kern_return_t
call_map_fn__memobject__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1379 call_map_fn__memobject__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1380 {
1381 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1382 kern_return_t kr = fn(map, start, size, 0, flags,
1383 memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1384 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1385 mach_memory_entry_port_release(memobject);
1386 return kr;
1387 }
1388
1389 static kern_return_t
call_map_fn__memobject_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1390 call_map_fn__memobject_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1391 {
1392 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1393 kern_return_t kr = fn(map, start, size, 0, flags,
1394 memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1395 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1396 mach_memory_entry_port_release(memobject);
1397 return kr;
1398 }
1399
1400 static kern_return_t
help_call_map_fn__allocate__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)1401 help_call_map_fn__allocate__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
1402 {
1403 mach_vm_address_t out_addr = 0;
1404 kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
1405 0, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
1406 deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
1407 return kr;
1408 }
1409
1410 static kern_return_t
call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1411 call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1412 {
1413 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
1414 }
1415
1416 static kern_return_t
call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1417 call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1418 {
1419 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
1420 }
1421
1422 static kern_return_t
call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1423 call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1424 {
1425 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
1426 }
1427
1428 static kern_return_t
help_call_map_fn__memobject__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)1429 help_call_map_fn__memobject__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
1430 {
1431 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1432 mach_vm_address_t out_addr = 0;
1433 kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
1434 memobject, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
1435 deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
1436 mach_memory_entry_port_release(memobject);
1437 return kr;
1438 }
1439
1440 static kern_return_t
call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1441 call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1442 {
1443 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
1444 }
1445
1446 static kern_return_t
call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1447 call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1448 {
1449 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
1450 }
1451
1452 static kern_return_t
call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1453 call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1454 {
1455 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
1456 }
1457
1458 // wrappers
1459
1460 kern_return_t
mach_vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1461 mach_vm_map_wrapped(vm_map_t target_task,
1462 mach_vm_address_t *address,
1463 mach_vm_size_t size,
1464 mach_vm_offset_t mask,
1465 int flags,
1466 mem_entry_name_port_t object,
1467 memory_object_offset_t offset,
1468 boolean_t copy,
1469 vm_prot_t cur_protection,
1470 vm_prot_t max_protection,
1471 vm_inherit_t inheritance)
1472 {
1473 if (dealloc_would_time_out(*address, size, target_task)) {
1474 return ACCEPTABLE;
1475 }
1476
1477 mach_vm_address_t saved_addr = *address;
1478 kern_return_t kr = mach_vm_map(target_task, address, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1479 check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1480 return kr;
1481 }
1482
1483 // missing forward declaration
1484 kern_return_t
1485 mach_vm_map_external(
1486 vm_map_t target_map,
1487 mach_vm_offset_t *address,
1488 mach_vm_size_t initial_size,
1489 mach_vm_offset_t mask,
1490 int flags,
1491 ipc_port_t port,
1492 vm_object_offset_t offset,
1493 boolean_t copy,
1494 vm_prot_t cur_protection,
1495 vm_prot_t max_protection,
1496 vm_inherit_t inheritance);
1497 kern_return_t
mach_vm_map_external_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1498 mach_vm_map_external_wrapped(vm_map_t target_task,
1499 mach_vm_address_t *address,
1500 mach_vm_size_t size,
1501 mach_vm_offset_t mask,
1502 int flags,
1503 mem_entry_name_port_t object,
1504 memory_object_offset_t offset,
1505 boolean_t copy,
1506 vm_prot_t cur_protection,
1507 vm_prot_t max_protection,
1508 vm_inherit_t inheritance)
1509 {
1510 if (dealloc_would_time_out(*address, size, target_task)) {
1511 return ACCEPTABLE;
1512 }
1513
1514 mach_vm_address_t saved_addr = *address;
1515 kern_return_t kr = mach_vm_map_external(target_task, address, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1516 check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1517 return kr;
1518 }
1519
1520 kern_return_t
mach_vm_map_kernel_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1521 mach_vm_map_kernel_wrapped(vm_map_t target_task,
1522 mach_vm_address_t *address,
1523 mach_vm_size_t size,
1524 mach_vm_offset_t mask,
1525 int flags,
1526 mem_entry_name_port_t object,
1527 memory_object_offset_t offset,
1528 boolean_t copy,
1529 vm_prot_t cur_protection,
1530 vm_prot_t max_protection,
1531 vm_inherit_t inheritance)
1532 {
1533 if (dealloc_would_time_out(*address, size, target_task)) {
1534 return ACCEPTABLE;
1535 }
1536
1537 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1538
1539 vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
1540 mach_vm_address_t saved_addr = *address;
1541 kern_return_t kr = mach_vm_map_kernel(target_task, address, size, mask, vmk_flags, object, offset, copy, cur_protection, max_protection, inheritance);
1542 check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1543 return kr;
1544 }
1545
1546 static inline void
check_vm_map_enter_mem_object_control_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_address_t saved_start,int flags,MAP_T map)1547 check_vm_map_enter_mem_object_control_outparam_changes(kern_return_t * kr, mach_vm_address_t addr,
1548 mach_vm_address_t saved_start, int flags, MAP_T map)
1549 {
1550 if (*kr == KERN_SUCCESS) {
1551 if (is_fixed(flags)) {
1552 if (addr != truncate_vm_map_addr_with_flags(map, saved_start, flags)) {
1553 *kr = OUT_PARAM_BAD;
1554 }
1555 }
1556 } else {
1557 if (saved_start != addr) {
1558 *kr = OUT_PARAM_BAD;
1559 }
1560 }
1561 }
1562
1563 kern_return_t
vm_map_enter_mem_object_control_wrapped(vm_map_t target_map,mach_vm_address_t * address,mach_vm_size_t size,vm_map_offset_t mask,int flags,mem_entry_name_port_t object __unused,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1564 vm_map_enter_mem_object_control_wrapped(
1565 vm_map_t target_map,
1566 mach_vm_address_t *address,
1567 mach_vm_size_t size,
1568 vm_map_offset_t mask,
1569 int flags,
1570 mem_entry_name_port_t object __unused,
1571 memory_object_offset_t offset,
1572 boolean_t copy,
1573 vm_prot_t cur_protection,
1574 vm_prot_t max_protection,
1575 vm_inherit_t inheritance)
1576 {
1577 if (dealloc_would_time_out(*address, size, target_map)) {
1578 return ACCEPTABLE;
1579 }
1580
1581 vm_map_offset_t vmmaddr = (vm_map_offset_t) *address;
1582 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1583
1584 vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
1585 int file_descriptor = get_globals()->file_descriptor;
1586 struct file_control_return control_info = get_control_from_fd(file_descriptor);
1587 kern_return_t kr = vm_map_enter_mem_object_control(target_map, &vmmaddr, size, mask, vmk_flags, (memory_object_control_t) control_info.control, offset, copy, cur_protection, max_protection, inheritance);
1588 check_vm_map_enter_mem_object_control_outparam_changes(&kr, vmmaddr, *address, flags, target_map);
1589
1590 *address = vmmaddr;
1591
1592 cleanup_control_related_data(control_info);
1593
1594 return kr;
1595 }
1596
1597 kern_return_t
vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1598 vm_map_wrapped(vm_map_t target_task,
1599 mach_vm_address_t *address,
1600 mach_vm_size_t size,
1601 mach_vm_offset_t mask,
1602 int flags,
1603 mem_entry_name_port_t object,
1604 memory_object_offset_t offset,
1605 boolean_t copy,
1606 vm_prot_t cur_protection,
1607 vm_prot_t max_protection,
1608 vm_inherit_t inheritance)
1609 {
1610 if (dealloc_would_time_out(*address, size, target_task)) {
1611 return ACCEPTABLE;
1612 }
1613
1614 vm_address_t addr = (vm_address_t)*address;
1615 kern_return_t kr = vm_map(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1616 check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1617 *address = addr;
1618 return kr;
1619 }
1620
1621 kern_return_t
1622 vm_map_external(
1623 vm_map_t target_map,
1624 vm_offset_t *address,
1625 vm_size_t size,
1626 vm_offset_t mask,
1627 int flags,
1628 ipc_port_t port,
1629 vm_offset_t offset,
1630 boolean_t copy,
1631 vm_prot_t cur_protection,
1632 vm_prot_t max_protection,
1633 vm_inherit_t inheritance);
1634 kern_return_t
vm_map_external_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1635 vm_map_external_wrapped(vm_map_t target_task,
1636 mach_vm_address_t *address,
1637 mach_vm_size_t size,
1638 mach_vm_offset_t mask,
1639 int flags,
1640 mem_entry_name_port_t object,
1641 memory_object_offset_t offset,
1642 boolean_t copy,
1643 vm_prot_t cur_protection,
1644 vm_prot_t max_protection,
1645 vm_inherit_t inheritance)
1646 {
1647 if (dealloc_would_time_out(*address, size, target_task)) {
1648 return ACCEPTABLE;
1649 }
1650
1651 vm_address_t addr = (vm_address_t)*address;
1652 kern_return_t kr = vm_map_external(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1653 check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1654 *address = addr;
1655 return kr;
1656 }
1657
1658 // implementations
1659
1660 #define IMPL_MAP_FN_START_SIZE(map_fn, instance) \
1661 static kern_return_t \
1662 call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start, mach_vm_size_t size) \
1663 { \
1664 return call_map_fn__ ## instance(map_fn, map, start, size); \
1665 }
1666
1667 #define IMPL_MAP_FN_HINT_SIZE(map_fn, instance) \
1668 static kern_return_t \
1669 call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size) \
1670 { \
1671 return call_map_fn__ ## instance(map_fn, map, start_hint, size); \
1672 }
1673
1674 #define IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, instance) \
1675 static kern_return_t \
1676 call_ ## map_fn ## __ ## instance ## __start_size_offset_object(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size) \
1677 { \
1678 return call_map_fn__ ## instance ## __start_size_offset_object(map_fn, map, start, size, offset, obj_size); \
1679 }
1680
1681 #define IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, instance) \
1682 static kern_return_t \
1683 call_ ## map_fn ## __ ## instance ## __inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit) \
1684 { \
1685 return call_map_fn__ ## instance ## __inherit(map_fn, map, start, size, inherit); \
1686 }
1687
1688 #define IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, instance) \
1689 static kern_return_t \
1690 call_ ## map_fn ## __ ## instance ## __flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags) \
1691 { \
1692 return call_map_fn__ ## instance ## __flags(map_fn, map, start, size, flags); \
1693 }
1694
1695 #define IMPL_MAP_FN_PROT_PAIRS(map_fn, instance) \
1696 static kern_return_t \
1697 call_ ## map_fn ## __ ## instance ## __prot_pairs(MAP_T map, vm_prot_t cur, vm_prot_t max) \
1698 { \
1699 return call_map_fn__ ## instance ## __prot_pairs(map_fn, map, cur, max); \
1700 }
1701
1702 #define IMPL(map_fn) \
1703 IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed) \
1704 IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed_copy) \
1705 IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed) \
1706 IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed_copy) \
1707 IMPL_MAP_FN_HINT_SIZE(map_fn, allocate_anywhere) \
1708 IMPL_MAP_FN_HINT_SIZE(map_fn, memobject_anywhere) \
1709 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed) \
1710 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed_copy) \
1711 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_anywhere) \
1712 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed) \
1713 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed_copy) \
1714 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_anywhere) \
1715 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed) \
1716 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed_copy) \
1717 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_anywhere) \
1718 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate) \
1719 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate_copy) \
1720 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject) \
1721 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject_copy) \
1722 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed) \
1723 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed_copy) \
1724 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_anywhere) \
1725 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed) \
1726 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed_copy) \
1727 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_anywhere) \
1728
1729 IMPL(mach_vm_map_wrapped)
IMPL(mach_vm_map_external_wrapped)1730 IMPL(mach_vm_map_external_wrapped)
1731 IMPL(mach_vm_map_kernel_wrapped)
1732 IMPL(vm_map_wrapped)
1733 IMPL(vm_map_external_wrapped)
1734 IMPL(vm_map_enter_mem_object_control_wrapped)
1735
1736 #undef IMPL
1737
1738 static void
1739 cleanup_context(vm_parameter_validation_kern_thread_context_t *ctx)
1740 {
1741 thread_cleanup_test_context(&ctx->ttc);
1742 }
1743
1744 static results_t *
process_results(results_t * results)1745 process_results(results_t *results)
1746 {
1747 if (get_globals()->generate_golden) {
1748 return dump_golden_results(results);
1749 } else {
1750 return __dump_results(results);
1751 }
1752 }
1753
1754 static int
vm_parameter_validation_kern_test(int64_t in_value,int64_t * out_value)1755 vm_parameter_validation_kern_test(int64_t in_value, int64_t *out_value)
1756 {
1757 // Copyin the arguments from userspace.
1758 // Fail if the structure sizes don't match.
1759 vm_parameter_validation_kern_args_t args;
1760 if (copyin(in_value, &args, sizeof(args)) != 0 ||
1761 args.sizeof_args != sizeof(args)) {
1762 *out_value = KERN_TEST_BAD_ARGS;
1763 return 0;
1764 }
1765
1766 // Use the thread test context to store our "global" variables.
1767 vm_parameter_validation_kern_thread_context_t ctx
1768 __attribute__((cleanup(cleanup_context))) = {
1769 .ttc = {
1770 .ttc_identity = test_identity_vm_parameter_validation_kern,
1771 // - avoid panics for untagged wired memory (set to true during some tests)
1772 // - clamp vm addresses before passing to pmap to avoid pmap panics
1773 .test_option_vm_prevent_wire_tag_panic = false,
1774 .test_option_vm_map_clamp_pmap_remove = true,
1775 },
1776 .output_buffer_start = args.output_buffer_address,
1777 .output_buffer_cur = args.output_buffer_address,
1778 .output_buffer_end = args.output_buffer_address + args.output_buffer_size,
1779 .file_descriptor = (int)args.file_descriptor,
1780 .generate_golden = args.generate_golden,
1781 };
1782 thread_set_test_context(&ctx.ttc);
1783
1784 #if !CONFIG_SPTM && (__ARM_42BIT_PA_SPACE__ || ARM_LARGE_MEMORY)
1785 if (get_globals()->generate_golden) {
1786 // Some devices skip some trials to avoid timeouts.
1787 // Golden files cannot be generated on these devices.
1788 testprintf("Can't generate golden files on this device "
1789 "(PPL && (__ARM_42BIT_PA_SPACE__ || ARM_LARGE_MEMORY)). "
1790 "Try again on a different device.\n");
1791 *out_value = KERN_TEST_FAILED;
1792 return 0;
1793 }
1794 #else
1795 #pragma clang diagnostic ignored "-Wunused-label"
1796 #endif
1797
1798 /*
1799 * -- memory entry functions --
1800 * The memory entry test functions use macros to generate each flavor of memory entry function.
1801 * For more context on why, see the matching comment in vm_parameter_validation.c
1802 */
1803
1804 #define RUN_START_SIZE(fn, variant, name) dealloc_results(process_results(test_mach_with_allocated_start_size(call_ ## fn ## __start_size__ ## variant, name " (start/size)")))
1805 #define RUN_PROT(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(call_ ## fn ## __vm_prot , name " (vm_prot_t)")))
1806
1807 #define RUN_ALL(fn, name) \
1808 RUN_START_SIZE(fn, copy, #name " (copy)"); \
1809 RUN_START_SIZE(fn, memonly, #name " (memonly)"); \
1810 RUN_START_SIZE(fn, namedcreate, #name " (namedcreate)"); \
1811 RUN_START_SIZE(fn, share, #name " (share)"); \
1812 RUN_START_SIZE(fn, namedreuse, #name " (namedreuse)"); \
1813 RUN_PROT(fn, #name " (vm_prot_t)"); \
1814
1815 RUN_ALL(mach_make_memory_entry_64, mach_make_memory_entry_64);
1816 RUN_ALL(mach_make_memory_entry, mach_make_memory_entry);
1817 RUN_ALL(mach_make_memory_entry_internal_retyped, mach_make_memory_entry_internal);
1818 #undef RUN_ALL
1819 #undef RUN_START_SIZE
1820 #undef RUN_PROT
1821
1822 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_tag(fn, name " (ledger tag)")))
1823 RUN(call_mach_memory_entry_ownership__ledger_tag, "mach_memory_entry_ownership");
1824 #undef RUN
1825
1826 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_flag(fn, name " (ledger flag)")))
1827 RUN(call_mach_memory_entry_ownership__ledger_flag, "mach_memory_entry_ownership");
1828 #undef RUN
1829
1830 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
1831 RUN(call_mach_memory_entry_map_size__start_size, "mach_memory_entry_map_size");
1832 #undef RUN
1833
1834 /*
1835 * -- allocate/deallocate functions --
1836 */
1837
1838 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_start_size(fn, name)))
1839 RUN(call_mach_vm_allocate__start_size_fixed, "mach_vm_allocate_external (fixed) (realigned start/size)");
1840 RUN(call_mach_vm_allocate__start_size_anywhere, "mach_vm_allocate_external (anywhere) (hint/size)");
1841 RUN(call_mach_vm_allocate_kernel__start_size_fixed, "mach_vm_allocate (fixed) (realigned start/size)");
1842 RUN(call_mach_vm_allocate_kernel__start_size_anywhere, "mach_vm_allocate (anywhere) (hint/size)");
1843 #undef RUN
1844
1845 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1846 RUN(call_mach_vm_allocate__flags, "mach_vm_allocate_external");
1847 RUN(call_mach_vm_allocate_kernel__flags, "mach_vm_allocate_kernel");
1848 #undef RUN
1849
1850 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_start_size(fn, name)))
1851 RUN(call_vm_allocate__start_size_fixed, "vm_allocate (fixed) (realigned start/size)");
1852 RUN(call_vm_allocate__start_size_anywhere, "vm_allocate (anywhere) (hint/size)");
1853 #undef RUN
1854
1855 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1856 RUN(call_vm_allocate__flags, "vm_allocate");
1857 #undef RUN
1858 dealloc_results(process_results(test_deallocator(call_mach_vm_deallocate, "mach_vm_deallocate (start/size)")));
1859 dealloc_results(process_results(test_deallocator(call_vm_deallocate, "vm_deallocate (start/size)")));
1860
1861 /*
1862 * -- map/remap functions --
1863 * These functions rely heavily on macros.
1864 * For more context on why, see the matching comment in vm_parameter_validation.c
1865 */
1866
1867 // map tests
1868
1869 #define RUN_START_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (realigned start/size)")))
1870 #define RUN_HINT_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
1871 #define RUN_PROT_PAIR(fn, name) dealloc_results(process_results(test_mach_vm_prot_pair(fn, name " (vm_prot_t pair)")))
1872 #define RUN_INHERIT(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
1873 #define RUN_FLAGS(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1874 #define RUN_SSOO(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size_offset_object(fn, name " (start/size/offset/object)")))
1875
1876 #define RUN_ALL(fn, name) \
1877 RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)"); \
1878 RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)"); \
1879 RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)"); \
1880 RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
1881 RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)"); \
1882 RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)"); \
1883 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)"); \
1884 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)"); \
1885 RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)"); \
1886 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)"); \
1887 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)"); \
1888 RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)"); \
1889 RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)"); \
1890 RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)"); \
1891 RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)"); \
1892 RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)"); \
1893 RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)"); \
1894 RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)"); \
1895 RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)"); \
1896 RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)"); \
1897 RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)"); \
1898 RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)"); \
1899 RUN_SSOO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)"); \
1900 RUN_SSOO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)"); \
1901 RUN_SSOO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)"); \
1902
1903 RUN_ALL(mach_vm_map_wrapped, mach_vm_map);
1904 RUN_ALL(mach_vm_map_external_wrapped, mach_vm_map_external);
1905 RUN_ALL(mach_vm_map_kernel_wrapped, mach_vm_map_kernel);
1906 RUN_ALL(vm_map_wrapped, vm_map);
1907 RUN_ALL(vm_map_external_wrapped, vm_map_external);
1908
1909 #define RUN_SSO(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size_offset(fn, name " (start/size/offset)")))
1910
1911 #define RUN_ALL_CTL(fn, name) \
1912 RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)"); \
1913 RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)"); \
1914 RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)"); \
1915 RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
1916 RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)"); \
1917 RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)"); \
1918 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)"); \
1919 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)"); \
1920 RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)"); \
1921 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)"); \
1922 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)"); \
1923 RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)"); \
1924 RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)"); \
1925 RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)"); \
1926 RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)"); \
1927 RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)"); \
1928 RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)"); \
1929 RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)"); \
1930 RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)"); \
1931 RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)"); \
1932 RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)"); \
1933 RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)"); \
1934 RUN_SSO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)"); \
1935 RUN_SSO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)"); \
1936 RUN_SSO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)"); \
1937
1938 RUN_ALL_CTL(vm_map_enter_mem_object_control_wrapped, vm_map_enter_mem_object_control);
1939
1940 #undef RUN_ALL
1941 #undef RUN_START_SIZE
1942 #undef RUN_HINT_SIZE
1943 #undef RUN_PROT_PAIR
1944 #undef RUN_INHERIT
1945 #undef RUN_FLAGS
1946 #undef RUN_SSOO
1947 #undef RUN_ALL_CTL
1948 #undef RUN_SSO
1949
1950 // remap tests
1951
1952 #define FN_NAME(fn, variant, type) call_ ## fn ## __ ## variant ## __ ## type
1953 #define RUN_HELPER(harness, fn, variant, type, type_name, name) dealloc_results(process_results(harness(FN_NAME(fn, variant, type), #name " (" #variant ") (" type_name ")")))
1954 #define RUN_SRC_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, src_size, type_name, name)
1955 #define RUN_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, dst_size, type_name, name)
1956 #define RUN_PROT_PAIRS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_prot_pair, fn, variant, prot_pairs, "prot_pairs", name)
1957 #define RUN_INHERIT(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_inherit_t, fn, variant, inherit, "inherit", name)
1958 #define RUN_FLAGS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_map_kernel_flags_t, fn, variant, flags, "flags", name)
1959 #define RUN_SRC_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_allocated_src_unallocated_dst_size, fn, variant, src_dst_size, type_name, name)
1960
1961 #define RUN_ALL(fn, realigned, name) \
1962 RUN_SRC_SIZE(fn, copy, realigned "src/size", name); \
1963 RUN_SRC_SIZE(fn, nocopy, realigned "src/size", name); \
1964 RUN_DST_SIZE(fn, fixed, "realigned dst/size", name); \
1965 RUN_DST_SIZE(fn, fixed_copy, "realigned dst/size", name); \
1966 RUN_DST_SIZE(fn, anywhere, "hint/size", name); \
1967 RUN_INHERIT(fn, fixed, name); \
1968 RUN_INHERIT(fn, fixed_copy, name); \
1969 RUN_INHERIT(fn, anywhere, name); \
1970 RUN_FLAGS(fn, nocopy, name); \
1971 RUN_FLAGS(fn, copy, name); \
1972 RUN_PROT_PAIRS(fn, fixed, name); \
1973 RUN_PROT_PAIRS(fn, fixed_copy, name); \
1974 RUN_PROT_PAIRS(fn, anywhere, name); \
1975 RUN_SRC_DST_SIZE(fn, fixed, "src/dst/size", name); \
1976 RUN_SRC_DST_SIZE(fn, fixed_copy, "src/dst/size", name); \
1977 RUN_SRC_DST_SIZE(fn, anywhere, "src/dst/size", name); \
1978
1979 RUN_ALL(mach_vm_remap_wrapped_kern, "realigned ", mach_vm_remap);
1980 RUN_ALL(mach_vm_remap_new_kernel_wrapped, , mach_vm_remap_new_kernel);
1981
1982 #undef RUN_ALL
1983 #undef RUN_HELPER
1984 #undef RUN_SRC_SIZE
1985 #undef RUN_DST_SIZE
1986 #undef RUN_PROT_PAIRS
1987 #undef RUN_INHERIT
1988 #undef RUN_FLAGS
1989 #undef RUN_SRC_DST_SIZE
1990
1991 /*
1992 * -- wire/unwire functions --
1993 * Some wire functions (vm_map_wire_and_extract, vm_map_wire_external, vm_map_wire_kernel)
1994 * are implemented with macros to avoid code duplication that would happen otherwise from the multiple
1995 * entrypoints, multiple params under test, and user/non user wired paths
1996 */
1997
1998 #define RUN(fn, name) dealloc_results(process_results(test_kext_unix_with_allocated_start_size(fn, name " (start/size)")))
1999 RUN(call_vslock, "vslock");
2000 RUN(call_vsunlock_undirtied, "vsunlock (undirtied)");
2001 RUN(call_vsunlock_dirtied, "vsunlock (dirtied)");
2002 #undef RUN
2003
2004 #define RUN_PROT(fn, wired, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(call_ ## fn ## __prot__user_wired_ ## wired ## _, name " (vm_prot_t)")))
2005 #define RUN_START(fn, wired, name) dealloc_results(process_results(test_kext_tagged_with_allocated_addr(call_ ## fn ## __start__user_wired_ ## wired ## _, name " (addr)")))
2006 #define RUN_START_END(fn, wired, name) dealloc_results(process_results(test_mach_with_allocated_start_end(call_ ## fn ## __start_end__user_wired_ ## wired ## _, name " (start/end)")))
2007 #define RUN_TAG(fn, wired, name) dealloc_results(process_results(test_mach_with_allocated_tag(call_ ## fn ## __tag__user_wired_ ## wired ## _, name " (tag)")))
2008
2009 #if XNU_PLATFORM_MacOSX
2010 // vm_map_wire_and_extract is implemented on macOS only
2011
2012 #define RUN_ALL_WIRE_AND_EXTRACT(fn, name) \
2013 RUN_PROT(fn, true, #name " (user wired)"); \
2014 RUN_PROT(fn, false, #name " (non user wired)"); \
2015 RUN_START(fn, true, #name " (user wired)"); \
2016 RUN_START(fn, false, #name " (non user wired)");
2017
2018 RUN_ALL_WIRE_AND_EXTRACT(vm_map_wire_and_extract_retyped, vm_map_wire_and_extract);
2019 #undef RUN_ALL_WIRE_AND_EXTRACT
2020 #endif // XNU_PLATFORM_MacOSX
2021
2022 #define RUN_ALL_WIRE_EXTERNAL(fn, name) \
2023 RUN_PROT(fn, true, #name " (user wired)"); \
2024 RUN_PROT(fn, false, #name " (non user wired))"); \
2025 RUN_START_END(fn, true, #name " (user wired)"); \
2026 RUN_START_END(fn, false, #name " (non user wired)");
2027
2028 RUN_ALL_WIRE_EXTERNAL(vm_map_wire_external_retyped, vm_map_wire_external);
2029 #undef RUN_ALL_WIRE_EXTERNAL
2030
2031 #define RUN_ALL_WIRE_KERNEL(fn, name) \
2032 RUN_PROT(fn, false, #name " (non user wired))"); \
2033 RUN_PROT(fn, true, #name " (user wired)"); \
2034 RUN_START_END(fn, true, #name " (user wired)"); \
2035 RUN_START_END(fn, false, #name " (non user wired)"); \
2036 RUN_TAG(fn, true, #name " (user wired)"); \
2037 RUN_TAG(fn, false, #name " (non user wired)");
2038
2039 RUN_ALL_WIRE_KERNEL(vm_map_wire_kernel, vm_map_wire_kernel);
2040 #undef RUN_ALL_WIRE_KERNEL
2041
2042 #undef RUN_PROT
2043 #undef RUN_START
2044 #undef RUN_START_END
2045 #undef RUN_TAG
2046
2047 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_end(fn, name " (start/end)")))
2048 RUN(call_vm_map_unwire_user_wired, "vm_map_unwire (user_wired)");
2049 RUN(call_vm_map_unwire_non_user_wired, "vm_map_unwire (non user_wired)");
2050 #undef RUN
2051
2052 #define RUN(fn, name) dealloc_results(process_results(test_with_int64(fn, name " (int64)")))
2053 RUN(call_mach_vm_wire_level_monitor, "mach_vm_wire_level_monitor");
2054 #undef RUN
2055
2056 /*
2057 * -- copyin/copyout functions --
2058 */
2059
2060 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2061 RUN(call_vm_map_copyin, "vm_map_copyin");
2062 RUN(call_mach_vm_read, "mach_vm_read");
2063 // vm_map_copyin_common is covered well by the vm_map_copyin test
2064 // RUN(call_vm_map_copyin_common, "vm_map_copyin_common");
2065 #undef RUN
2066
2067 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr_of_size_n(fn, sizeof(uint32_t), name " (start)")))
2068 RUN(call_copyoutmap_atomic32, "copyoutmap_atomic32");
2069 #undef RUN
2070
2071 #define RUN(fn, name) dealloc_results(process_results(test_src_kerneldst_size(fn, name " (src/dst/size)")))
2072 RUN(call_copyinmap, "copyinmap");
2073 RUN(call_vm_map_read_user, "vm_map_read_user");
2074 #undef RUN
2075
2076 #define RUN(fn, name) dealloc_results(process_results(test_kernelsrc_dst_size(fn, name " (src/dst/size)")))
2077 RUN(call_vm_map_write_user, "vm_map_write_user");
2078 RUN(call_copyoutmap, "copyoutmap");
2079 #undef RUN
2080
2081 dealloc_results(process_results(test_vm_map_copy_overwrite(call_vm_map_copy_overwrite_interruptible, "vm_map_copy_overwrite (start/size)")));
2082
2083 /*
2084 * -- protection functions --
2085 */
2086
2087 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2088 RUN(call_mach_vm_protect__start_size, "mach_vm_protect");
2089 RUN(call_vm_protect__start_size, "vm_protect");
2090 RUN(call_vm_map_protect__start_size__no_max, "vm_map_protect (no max)");
2091 RUN(call_vm_map_protect__start_size__set_max, "vm_map_protect (set max)");
2092 #undef RUN
2093
2094 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
2095 RUN(call_mach_vm_protect__vm_prot, "mach_vm_protect");
2096 RUN(call_vm_protect__vm_prot, "vm_protect");
2097 RUN(call_vm_map_protect__vm_prot__no_max, "vm_map_protect (no max)");
2098 RUN(call_vm_map_protect__vm_prot__set_max, "vm_map_protect (set max)");
2099 #undef RUN
2100
2101 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
2102 RUN(call_useracc__start_size, "useracc");
2103 #undef RUN
2104 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
2105 RUN(call_useracc__vm_prot, "useracc");
2106 #undef RUN
2107
2108 /*
2109 * -- madvise/behavior functions --
2110 */
2111
2112 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2113 RUN(call_mach_vm_behavior_set__start_size__default, "mach_vm_behavior_set (VM_BEHAVIOR_DEFAULT)");
2114 RUN(call_mach_vm_behavior_set__start_size__can_reuse, "mach_vm_behavior_set (VM_BEHAVIOR_CAN_REUSE)");
2115 #undef RUN
2116 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_behavior_t(fn, name " (vm_behavior_t)")))
2117 RUN(call_mach_vm_behavior_set__vm_behavior, "mach_vm_behavior_set");
2118 #undef RUN
2119
2120 /*
2121 * -- purgability/purgeability functions --
2122 */
2123
2124 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_addr(fn, name " (addr)")))
2125 RUN(call_vm_map_purgable_control__address__get, "vm_map_purgable_control (get)");
2126 RUN(call_vm_map_purgable_control__address__purge_all, "vm_map_purgable_control (purge all)");
2127 #undef RUN
2128
2129 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_and_state(fn, name " (purgeable and state)")))
2130 RUN(call_vm_map_purgable_control__purgeable_state, "vm_map_purgable_control");
2131 #undef RUN
2132
2133 /*
2134 * -- region info functions --
2135 */
2136
2137 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
2138 RUN(call_mach_vm_region, "mach_vm_region");
2139 RUN(call_vm_region, "vm_region");
2140 #undef RUN
2141
2142 /*
2143 * -- page info functions --
2144 */
2145
2146 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
2147 RUN(call_vm_map_page_info, "vm_map_page_info");
2148 #undef RUN
2149
2150 /*
2151 * -- miscellaneous functions --
2152 */
2153
2154 #if CONFIG_MAP_RANGES
2155 dealloc_results(process_results(test_mach_vm_range_create(call_mach_vm_range_create, "mach_vm_range_create (start/size/start2/size2)")));
2156 #endif
2157
2158 dealloc_results(process_results(test_kext_unix_with_allocated_vnode_addr(call_task_find_region_details, "task_find_region_details (addr)")));
2159
2160 *out_value = KERN_TEST_SUCCESS;
2161 return 0;
2162 }
2163
2164 // The "_v2" suffix is here because sysctl "vm_parameter_validation_kern" was an
2165 // older version of this test that used incompatibly different sysctl parameters.
2166 SYSCTL_TEST_REGISTER(vm_parameter_validation_kern_v2, vm_parameter_validation_kern_test);
2167