1 #include <kern/zalloc.h>
2 #include <kern/thread_test_context.h>
3
4 #include "vm_parameter_validation.h"
5
6 #pragma clang diagnostic ignored "-Wdeclaration-after-statement"
7 #pragma clang diagnostic ignored "-Wincompatible-function-pointer-types"
8 #pragma clang diagnostic ignored "-Wmissing-prototypes"
9 #pragma clang diagnostic ignored "-Wpedantic"
10 #pragma clang diagnostic ignored "-Wgcc-compat"
11
12
13 // Kernel sysctl test prints its output into a userspace buffer.
14 // fixme these global variables prevent test concurrency
15
16 static user_addr_t SYSCTL_OUTPUT_BUF;
17 static user_addr_t SYSCTL_OUTPUT_END;
18
19 // This is a read/write fd passed from userspace.
20 // It's passed to make it easier for kernel tests to interact with a file.
21 static int file_descriptor;
22
23 // Output to create a golden test result in kern test, controlled by
24 // MSB in file_descriptor and set by GENERATE_GOLDEN_IMAGE from userspace.
25 bool kernel_generate_golden = FALSE;
26
27 // vprintf() to a userspace buffer
28 // output is incremented to point at the new nul terminator
29 static void
user_vprintf(user_addr_t * output,user_addr_t output_end,const char * format,va_list args)30 user_vprintf(user_addr_t *output, user_addr_t output_end, const char *format, va_list args) __printflike(3, 0)
31 {
32 extern int vsnprintf(char *, size_t, const char *, va_list) __printflike(3, 0);
33 char linebuf[1024];
34 size_t printed;
35
36 printed = vsnprintf(linebuf, sizeof(linebuf), format, args);
37 assert(printed < sizeof(linebuf) - 1);
38 assert(*output + printed + 1 < output_end);
39 copyout(linebuf, *output, printed + 1);
40 *output += printed;
41 }
42
43 void
testprintf(const char * format,...)44 testprintf(const char *format, ...)
45 {
46 va_list args;
47 va_start(args, format);
48 user_vprintf(&SYSCTL_OUTPUT_BUF, SYSCTL_OUTPUT_END, format, args);
49 va_end(args);
50 }
51
52 // Utils
53
54 static mach_port_t
make_a_mem_object(vm_size_t size)55 make_a_mem_object(vm_size_t size)
56 {
57 ipc_port_t out_handle;
58 kern_return_t kr = mach_memory_object_memory_entry_64((host_t)1, /*internal=*/ true, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_handle);
59 assert(kr == 0);
60 return out_handle;
61 }
62
63 static mach_port_t
make_a_mem_entry(MAP_T map,vm_size_t size)64 make_a_mem_entry(MAP_T map, vm_size_t size)
65 {
66 mach_port_t port;
67 memory_object_size_t s = (memory_object_size_t)size;
68 kern_return_t kr = mach_make_memory_entry_64(map, &s, (memory_object_offset_t)0, MAP_MEM_NAMED_CREATE | MAP_MEM_LEDGER_TAGGED, &port, MACH_PORT_NULL);
69 assert(kr == 0);
70 return port;
71 }
72
73 // Test functions
74
75 static results_t *
test_vm_map_copy_overwrite(kern_return_t (* func)(MAP_T dst_map,vm_map_copy_t copy,mach_vm_address_t start,mach_vm_size_t size),const char * testname)76 test_vm_map_copy_overwrite(kern_return_t (*func)(MAP_T dst_map, vm_map_copy_t copy, mach_vm_address_t start, mach_vm_size_t size), const char * testname)
77 {
78 // source map: has an allocation bigger than our
79 // "reasonable" trial sizes, to copy from
80 MAP_T src_map SMART_MAP;
81 allocation_t src_alloc SMART_ALLOCATE_VM(src_map, TEST_ALLOC_SIZE, VM_PROT_READ);
82
83 // dest map: has an allocation bigger than our
84 // "reasonable" trial sizes, to copy-overwrite on
85 MAP_T dst_map SMART_MAP;
86 allocation_t dst_alloc SMART_ALLOCATE_VM(dst_map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
87
88 // We test dst/size parameters.
89 // We don't test the contents of the vm_map_copy_t.
90 start_size_trials_t *trials SMART_START_SIZE_TRIALS(dst_alloc.addr);
91 results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, dst_alloc.addr, trials->count);
92
93 for (unsigned i = 0; i < trials->count; i++) {
94 start_size_trial_t trial = trials->list[i];
95
96 // Copy from the source.
97 vm_map_copy_t copy;
98 kern_return_t kr = vm_map_copyin(src_map, src_alloc.addr, src_alloc.size, false, ©);
99 assert(kr == 0);
100 assert(copy); // null copy won't exercise the sanitization path
101
102 // Copy-overwrite to the destination.
103 kern_return_t ret = func(dst_map, copy, trial.start, trial.size);
104
105 if (ret != KERN_SUCCESS) {
106 vm_map_copy_discard(copy);
107 }
108 append_result(results, ret, trial.name);
109 }
110 return results;
111 }
112
113 /*
114 * This function temporarily allocates a writeable allocation in kernel_map, and a read only allocation in a temporary map.
115 * It's used to test a function such as vm_map_read_user which copies in data to a kernel pointer that must be writeable.
116 */
117 static results_t *
test_src_kerneldst_size(kern_return_t (* func)(MAP_T map,vm_map_offset_t src,void * dst,vm_size_t length),const char * testname)118 test_src_kerneldst_size(kern_return_t (*func)(MAP_T map, vm_map_offset_t src, void * dst, vm_size_t length), const char * testname)
119 {
120 MAP_T map SMART_MAP;
121 allocation_t src_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_READ);
122 allocation_t dst_base SMART_ALLOCATE_VM(kernel_map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
123 src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
124 results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
125
126 for (unsigned i = 0; i < trials->count; i++) {
127 src_dst_size_trial_t trial = trials->list[i];
128 trial = slide_trial_src(trial, src_base.addr);
129 trial = slide_trial_dst(trial, dst_base.addr);
130 int ret = func(map, trial.src, (void *)trial.dst, trial.size);
131 append_result(results, ret, trial.name);
132 }
133 return results;
134 }
135
136 /*
137 * This function temporarily allocates a read only allocation in kernel_map, and a writeable allocation in a temporary map.
138 * It's used to test a function such as vm_map_write_user which copies data from a kernel pointer to a writeable userspace address.
139 */
140 static results_t *
test_kernelsrc_dst_size(kern_return_t (* func)(MAP_T map,void * src,vm_map_offset_t dst,vm_size_t length),const char * testname)141 test_kernelsrc_dst_size(kern_return_t (*func)(MAP_T map, void *src, vm_map_offset_t dst, vm_size_t length), const char * testname)
142 {
143 MAP_T map SMART_MAP;
144 allocation_t src_base SMART_ALLOCATE_VM(kernel_map, TEST_ALLOC_SIZE, VM_PROT_READ);
145 allocation_t dst_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
146 src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
147 results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
148
149 for (unsigned i = 0; i < trials->count; i++) {
150 src_dst_size_trial_t trial = trials->list[i];
151 trial = slide_trial_src(trial, src_base.addr);
152 trial = slide_trial_dst(trial, dst_base.addr);
153 int ret = func(map, (void *)trial.src, trial.dst, trial.size);
154 append_result(results, ret, trial.name);
155 }
156 return results;
157 }
158
159
160 /////////////////////////////////////////////////////
161 // Mach tests
162
163
164 static kern_return_t
call_mach_vm_read(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)165 call_mach_vm_read(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
166 {
167 vm_offset_t out_addr;
168 mach_msg_type_number_t out_size;
169 kern_return_t kr = mach_vm_read(map, start, size, &out_addr, &out_size);
170 if (kr == 0) {
171 // we didn't call through MIG so out_addr is really a vm_map_copy_t
172 vm_map_copy_discard((vm_map_copy_t)out_addr);
173 }
174 return kr;
175 }
176
177 static inline void
check_vm_map_copyin_outparam_changes(kern_return_t * kr,vm_map_copy_t copy,vm_map_copy_t saved_copy)178 check_vm_map_copyin_outparam_changes(kern_return_t * kr, vm_map_copy_t copy, vm_map_copy_t saved_copy)
179 {
180 if (*kr == KERN_SUCCESS) {
181 if (copy == saved_copy) {
182 *kr = OUT_PARAM_BAD;
183 }
184 } else {
185 if (copy != saved_copy) {
186 *kr = OUT_PARAM_BAD;
187 }
188 }
189 }
190
191 static kern_return_t
call_vm_map_copyin(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)192 call_vm_map_copyin(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
193 {
194 vm_map_copy_t invalid_initial_value = INVALID_VM_MAP_COPY;
195 vm_map_copy_t copy = invalid_initial_value;
196 kern_return_t kr = vm_map_copyin(map, start, size, false, ©);
197 if (kr == 0) {
198 vm_map_copy_discard(copy);
199 }
200 check_vm_map_copyin_outparam_changes(&kr, copy, invalid_initial_value);
201 return kr;
202 }
203
204 static kern_return_t
call_copyoutmap_atomic32(MAP_T map,vm_map_offset_t addr)205 call_copyoutmap_atomic32(MAP_T map, vm_map_offset_t addr)
206 {
207 uint32_t data = 0;
208 kern_return_t kr = copyoutmap_atomic32(map, data, addr);
209 return kr;
210 }
211
212
213 static kern_return_t
call_mach_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)214 call_mach_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
215 {
216 mach_vm_address_t saved_start = *start;
217 kern_return_t kr = mach_vm_allocate_external(map, start, size, flags);
218 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
219 return kr;
220 }
221
222 static kern_return_t
call_mach_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)223 call_mach_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
224 {
225 mach_vm_address_t saved_start = *start;
226 kern_return_t kr = mach_vm_allocate_external(map, start, size, VM_FLAGS_FIXED);
227 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
228 return kr;
229 }
230
231 static kern_return_t
call_mach_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)232 call_mach_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
233 {
234 mach_vm_address_t saved_start = *start;
235 kern_return_t kr = mach_vm_allocate_external(map, start, size, VM_FLAGS_ANYWHERE);
236 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
237 return kr;
238 }
239
240 static kern_return_t
call_mach_vm_allocate_kernel__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)241 call_mach_vm_allocate_kernel__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
242 {
243 mach_vm_address_t saved_start = *start;
244 kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
245 FLAGS_AND_TAG(flags, VM_KERN_MEMORY_OSFMK));
246 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
247 return kr;
248 }
249
250 static kern_return_t
call_mach_vm_allocate_kernel__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)251 call_mach_vm_allocate_kernel__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
252 {
253 if (dealloc_would_time_out(*start, size, map)) {
254 return ACCEPTABLE;
255 }
256
257 mach_vm_address_t saved_start = *start;
258 kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
259 FLAGS_AND_TAG(VM_FLAGS_FIXED, VM_KERN_MEMORY_OSFMK));
260 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
261 return kr;
262 }
263
264 static kern_return_t
call_mach_vm_allocate_kernel__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)265 call_mach_vm_allocate_kernel__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
266 {
267 if (dealloc_would_time_out(*start, size, map)) {
268 return ACCEPTABLE;
269 }
270
271 mach_vm_address_t saved_start = *start;
272 kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
273 FLAGS_AND_TAG(VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK));
274 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
275 return kr;
276 }
277
278
279
280 static kern_return_t
call_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)281 call_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
282 {
283 mach_vm_address_t saved_start = *start;
284 kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, flags);
285 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
286 return kr;
287 }
288
289 static kern_return_t
call_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)290 call_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
291 {
292 mach_vm_address_t saved_start = *start;
293 kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, VM_FLAGS_FIXED);
294 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
295 return kr;
296 }
297
298 static kern_return_t
call_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)299 call_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
300 {
301 mach_vm_address_t saved_start = *start;
302 kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, VM_FLAGS_ANYWHERE);
303 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
304 return kr;
305 }
306
307 static kern_return_t
call_mach_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)308 call_mach_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
309 {
310 kern_return_t kr = mach_vm_deallocate(map, start, size);
311 return kr;
312 }
313
314 static kern_return_t
call_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)315 call_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
316 {
317 kern_return_t kr = vm_deallocate(map, (vm_address_t) start, (vm_size_t) size);
318 return kr;
319 }
320
321 // Including sys/systm.h caused things to blow up
322 int vslock(user_addr_t addr, user_size_t len);
323 int vsunlock(user_addr_t addr, user_size_t len, int dirtied);
324 static int
call_vslock(void * start,size_t size)325 call_vslock(void * start, size_t size)
326 {
327 int kr = vslock((user_addr_t) start, (user_size_t) size);
328 if (kr == KERN_SUCCESS) {
329 (void) vsunlock((user_addr_t) start, (user_size_t) size, 0);
330 }
331
332 return kr;
333 }
334
335 static int
call_vsunlock_undirtied(void * start,size_t size)336 call_vsunlock_undirtied(void * start, size_t size)
337 {
338 int kr = vslock((user_addr_t) start, (user_size_t) size);
339 if (kr == EINVAL) {
340 // Invalid vslock arguments should also be
341 // invalid vsunlock arguments. Test it.
342 } else if (kr != KERN_SUCCESS) {
343 // vslock failed, and vsunlock of non-locked memory panics
344 return PANIC;
345 }
346 kr = vsunlock((user_addr_t) start, (user_size_t) size, 0);
347 return kr;
348 }
349
350 static int
call_vsunlock_dirtied(void * start,size_t size)351 call_vsunlock_dirtied(void * start, size_t size)
352 {
353 int kr = vslock((user_addr_t) start, (user_size_t) size);
354 if (kr == EINVAL) {
355 // Invalid vslock arguments should also be
356 // invalid vsunlock arguments. Test it.
357 } else if (kr != KERN_SUCCESS) {
358 // vslock failed, and vsunlock of non-locked memory panics
359 return PANIC;
360 }
361 kr = vsunlock((user_addr_t) start, (user_size_t) size, 1);
362 return kr;
363 }
364
365 extern kern_return_t vm_map_wire_external(
366 vm_map_t map,
367 vm_map_offset_t start,
368 vm_map_offset_t end,
369 vm_prot_t access_type,
370 boolean_t user_wire);
371
372
373 typedef kern_return_t (*wire_fn_t)(
374 vm_map_t task,
375 mach_vm_address_t start,
376 mach_vm_address_t end,
377 vm_prot_t prot,
378 vm_tag_t tag,
379 boolean_t user_wire);
380
381
382 /*
383 * Tell vm_tag_bt() to change its behavior so our calls to
384 * vm_map_wire_external and vm_map_wire_and_extract do not panic.
385 */
386 static void
prevent_wire_tag_panic(bool prevent)387 prevent_wire_tag_panic(bool prevent)
388 {
389 thread_set_test_option(test_option_vm_prevent_wire_tag_panic, prevent);
390 }
391
392 #if XNU_PLATFORM_MacOSX
393 // vm_map_wire_and_extract() implemented on macOS only
394
395
396 /*
397 * wire_nested requires a range of exactly one page when passed a physpage pointer.
398 * wire_and_extract is meant to provide that, but as a result of round introduced, unaligned values don't follow that.
399 */
400 static bool
will_vm_map_wire_nested_panic_due_to_invalid_range_size(MAP_T map,mach_vm_address_t start)401 will_vm_map_wire_nested_panic_due_to_invalid_range_size(MAP_T map, mach_vm_address_t start)
402 {
403 mach_vm_address_t end = start + VM_MAP_PAGE_SIZE(map);
404 if (round_up_map(map, end) - trunc_down_map(map, start) != VM_MAP_PAGE_SIZE(map)) {
405 return true;
406 }
407 return false;
408 }
409
410 static inline void
check_vm_map_wire_and_extract_outparam_changes(kern_return_t * kr,ppnum_t physpage)411 check_vm_map_wire_and_extract_outparam_changes(kern_return_t * kr, ppnum_t physpage)
412 {
413 if (*kr != KERN_SUCCESS) {
414 if (physpage != 0) {
415 *kr = OUT_PARAM_BAD;
416 }
417 }
418 }
419
420 static kern_return_t
vm_map_wire_and_extract_retyped(vm_map_t map,mach_vm_address_t start,mach_vm_address_t end __unused,vm_prot_t prot,vm_tag_t tag __unused,boolean_t user_wire)421 vm_map_wire_and_extract_retyped(
422 vm_map_t map,
423 mach_vm_address_t start,
424 mach_vm_address_t end __unused,
425 vm_prot_t prot,
426 vm_tag_t tag __unused,
427 boolean_t user_wire)
428 {
429 if (will_vm_map_wire_nested_panic_due_to_invalid_range_size(map, start)) {
430 return PANIC;
431 }
432
433 ppnum_t physpage = UNLIKELY_INITIAL_PPNUM;
434 kern_return_t kr = vm_map_wire_and_extract(map, start, prot, user_wire, &physpage);
435 check_vm_map_wire_and_extract_outparam_changes(&kr, physpage);
436 return kr;
437 }
438 #endif // XNU_PLATFORM_MacOSX
439
440
441 static kern_return_t
vm_map_wire_external_retyped(vm_map_t map,mach_vm_address_t start,mach_vm_address_t end,vm_prot_t prot,vm_tag_t tag __unused,boolean_t user_wire)442 vm_map_wire_external_retyped(
443 vm_map_t map,
444 mach_vm_address_t start,
445 mach_vm_address_t end,
446 vm_prot_t prot,
447 vm_tag_t tag __unused,
448 boolean_t user_wire)
449 {
450 return vm_map_wire_external(map, start, end, prot, user_wire);
451 }
452
453 static kern_return_t
wire_call_impl(wire_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t end,vm_prot_t prot,vm_tag_t tag,bool user_wire)454 wire_call_impl(wire_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t end, vm_prot_t prot, vm_tag_t tag, bool user_wire)
455 {
456 if (tag == VM_KERN_MEMORY_NONE) {
457 return PANIC;
458 }
459 prevent_wire_tag_panic(true);
460 kern_return_t kr = fn(map, start, end, prot, tag, user_wire);
461 prevent_wire_tag_panic(false);
462 if (kr == KERN_SUCCESS) {
463 (void) vm_map_unwire(map, start, end, user_wire);
464 }
465 return kr;
466 }
467
468 #define WIRE_IMPL(FN, user_wire) \
469 static kern_return_t \
470 __attribute__((used)) \
471 call_ ## FN ## __start_end__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start, mach_vm_address_t end) \
472 { \
473 return wire_call_impl(FN, map, start, end, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, user_wire); \
474 } \
475 static kern_return_t \
476 __attribute__((used)) \
477 call_ ## FN ## __prot__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot) \
478 { \
479 mach_vm_address_t end; \
480 if (__builtin_add_overflow(start, size, &end)) { \
481 return BUSTED; \
482 } \
483 return wire_call_impl(FN, map, start, end, prot, VM_KERN_MEMORY_OSFMK, user_wire); \
484 } \
485 static kern_return_t \
486 __attribute__((used)) \
487 call_ ## FN ## __tag__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start, mach_vm_address_t end, vm_tag_t tag) \
488 { \
489 kern_return_t kr = wire_call_impl(FN, map, start, end, VM_PROT_DEFAULT, tag, user_wire); \
490 return kr; \
491 } \
492 static kern_return_t \
493 __attribute__((used)) \
494 call_ ## FN ## __start__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start) \
495 { \
496 return wire_call_impl(FN, map, start, 0, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, user_wire); \
497 } \
498
WIRE_IMPL(vm_map_wire_external_retyped,true)499 WIRE_IMPL(vm_map_wire_external_retyped, true)
500 WIRE_IMPL(vm_map_wire_external_retyped, false)
501 WIRE_IMPL(vm_map_wire_kernel, true)
502 WIRE_IMPL(vm_map_wire_kernel, false)
503
504 #if XNU_PLATFORM_MacOSX
505 WIRE_IMPL(vm_map_wire_and_extract_retyped, true)
506 WIRE_IMPL(vm_map_wire_and_extract_retyped, false)
507 #endif
508
509 static kern_return_t
510 call_mach_vm_wire_level_monitor(int64_t requested_pages)
511 {
512 kern_return_t kr = mach_vm_wire_level_monitor(requested_pages);
513 return kr;
514 }
515
516 static kern_return_t
call_vm_map_unwire_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)517 call_vm_map_unwire_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
518 {
519 kern_return_t kr = vm_map_unwire(map, start, end, TRUE);
520 return kr;
521 }
522
523
524 static kern_return_t
call_vm_map_unwire_non_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)525 call_vm_map_unwire_non_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
526 {
527 kern_return_t kr = vm_map_wire_kernel(map, start, end, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, FALSE);
528 if (kr) {
529 return PANIC;
530 }
531 kr = vm_map_unwire(map, start, end, FALSE);
532 return kr;
533 }
534
535 #ifndef __x86_64__
536 extern const vm_map_address_t physmap_base;
537 extern const vm_map_address_t physmap_end;
538 #endif
539
540 /*
541 * This function duplicates the panicking checks done in copy_validate.
542 * size==0 is returned as success earlier in copyin/out than copy_validate is called, so we ignore that case.
543 */
544 static bool
will_copyio_panic_in_copy_validate(void * kernel_addr,vm_size_t size)545 will_copyio_panic_in_copy_validate(void *kernel_addr, vm_size_t size)
546 {
547 if (size == 0) {
548 return false;
549 }
550 extern const int copysize_limit_panic;
551 if (size > copysize_limit_panic) {
552 return true;
553 }
554
555 /*
556 * copyio is architecture specific and has different checks per arch.
557 */
558 #ifdef __x86_64__
559 if ((vm_offset_t) kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
560 return true;
561 }
562 #else /* not __x86_64__ */
563 uintptr_t kernel_addr_last;
564 if (os_add_overflow((uintptr_t) kernel_addr, size, &kernel_addr_last)) {
565 return true;
566 }
567
568 bool in_kva = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= VM_MIN_KERNEL_ADDRESS) &&
569 (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= VM_MAX_KERNEL_ADDRESS);
570 bool in_physmap = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= physmap_base) &&
571 (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= physmap_end);
572
573 if (!(in_kva || in_physmap)) {
574 return true;
575 }
576 #endif /* not __x86_64__ */
577
578 return false;
579 }
580
581 static kern_return_t
call_copyinmap(MAP_T map,vm_map_offset_t fromaddr,void * todata,vm_size_t length)582 call_copyinmap(MAP_T map, vm_map_offset_t fromaddr, void * todata, vm_size_t length)
583 {
584 if (will_copyio_panic_in_copy_validate(todata, length)) {
585 return PANIC;
586 }
587
588 kern_return_t kr = copyinmap(map, fromaddr, todata, length);
589 return kr;
590 }
591
592 static kern_return_t
call_copyoutmap(MAP_T map,void * fromdata,vm_map_offset_t toaddr,vm_size_t length)593 call_copyoutmap(MAP_T map, void * fromdata, vm_map_offset_t toaddr, vm_size_t length)
594 {
595 if (will_copyio_panic_in_copy_validate(fromdata, length)) {
596 return PANIC;
597 }
598
599 kern_return_t kr = copyoutmap(map, fromdata, toaddr, length);
600 return kr;
601 }
602
603 static kern_return_t
call_vm_map_read_user(MAP_T map,vm_map_address_t src_addr,void * ptr,vm_size_t size)604 call_vm_map_read_user(MAP_T map, vm_map_address_t src_addr, void * ptr, vm_size_t size)
605 {
606 if (will_copyio_panic_in_copy_validate(ptr, size)) {
607 return PANIC;
608 }
609
610 kern_return_t kr = vm_map_read_user(map, src_addr, ptr, size);
611 return kr;
612 }
613
614 static kern_return_t
call_vm_map_write_user(MAP_T map,void * ptr,vm_map_address_t dst_addr,vm_size_t size)615 call_vm_map_write_user(MAP_T map, void * ptr, vm_map_address_t dst_addr, vm_size_t size)
616 {
617 if (will_copyio_panic_in_copy_validate(ptr, size)) {
618 return PANIC;
619 }
620
621 kern_return_t kr = vm_map_write_user(map, ptr, dst_addr, size);
622 return kr;
623 }
624
625 static kern_return_t
call_vm_map_copy_overwrite_interruptible(MAP_T dst_map,vm_map_copy_t copy,mach_vm_address_t dst_addr,mach_vm_size_t copy_size)626 call_vm_map_copy_overwrite_interruptible(MAP_T dst_map, vm_map_copy_t copy, mach_vm_address_t dst_addr, mach_vm_size_t copy_size)
627 {
628 kern_return_t kr = vm_map_copy_overwrite(dst_map, dst_addr, copy, copy_size, TRUE);
629
630 const mach_vm_size_t va_mask = ((1ULL << 48) - 1);
631 if ((dst_addr & ~va_mask) == 0ULL && ((dst_addr + copy_size) & ~va_mask) == ~va_mask) {
632 if (kr == KERN_INVALID_ADDRESS) {
633 return ACCEPTABLE;
634 }
635 }
636 return kr;
637 }
638
639 static kern_return_t
call_mach_vm_protect__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)640 call_mach_vm_protect__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
641 {
642 kern_return_t kr = mach_vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
643 return kr;
644 }
645 static kern_return_t
call_mach_vm_protect__vm_prot(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)646 call_mach_vm_protect__vm_prot(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
647 {
648 kern_return_t kr = mach_vm_protect(map, start, size, 0, prot);
649 return kr;
650 }
651
652 static kern_return_t
call_vm_protect__start_size(MAP_T map,vm_address_t start,vm_size_t size)653 call_vm_protect__start_size(MAP_T map, vm_address_t start, vm_size_t size)
654 {
655 kern_return_t kr = vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
656 return kr;
657 }
658
659 static kern_return_t
call_vm_protect__vm_prot(MAP_T map,vm_address_t start,vm_size_t size,vm_prot_t prot)660 call_vm_protect__vm_prot(MAP_T map, vm_address_t start, vm_size_t size, vm_prot_t prot)
661 {
662 kern_return_t kr = vm_protect(map, start, size, 0, prot);
663 return kr;
664 }
665
666 /*
667 * VME_OFFSET_SET will panic due to an assertion if passed an address that is not aligned to VME_ALIAS_BITS
668 * VME_OFFSET_SET is called by _vm_map_clip_(start/end)
669 * vm_map_protect -> vm_map_clip_end -> _vm_map_clip_end -> VME_OFFSET_SET
670 */
671 static bool
will_vm_map_protect_panic(mach_vm_address_t start,mach_vm_address_t end)672 will_vm_map_protect_panic(mach_vm_address_t start, mach_vm_address_t end)
673 {
674 bool start_aligned = start == ((start >> VME_ALIAS_BITS) << VME_ALIAS_BITS);
675 bool end_aligned = end == ((end >> VME_ALIAS_BITS) << VME_ALIAS_BITS);
676 return !(start_aligned && end_aligned);
677 }
678
679 static kern_return_t
call_vm_map_protect__start_size__no_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)680 call_vm_map_protect__start_size__no_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
681 {
682 mach_vm_address_t end = start + size;
683 if (will_vm_map_protect_panic(start, end)) {
684 return PANIC;
685 }
686
687 kern_return_t kr = vm_map_protect(map, start, end, 0, VM_PROT_READ | VM_PROT_WRITE);
688 return kr;
689 }
690
691 static kern_return_t
call_vm_map_protect__start_size__set_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)692 call_vm_map_protect__start_size__set_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
693 {
694 mach_vm_address_t end = start + size;
695 if (will_vm_map_protect_panic(start, end)) {
696 return PANIC;
697 }
698
699 kern_return_t kr = vm_map_protect(map, start, end, 1, VM_PROT_READ | VM_PROT_WRITE);
700 return kr;
701 }
702
703 static kern_return_t
call_vm_map_protect__vm_prot__no_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)704 call_vm_map_protect__vm_prot__no_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
705 {
706 mach_vm_address_t end = start + size;
707 if (will_vm_map_protect_panic(start, end)) {
708 return PANIC;
709 }
710
711 kern_return_t kr = vm_map_protect(map, start, end, 0, prot);
712 return kr;
713 }
714
715 static kern_return_t
call_vm_map_protect__vm_prot__set_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)716 call_vm_map_protect__vm_prot__set_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
717 {
718 mach_vm_address_t end = start + size;
719 if (will_vm_map_protect_panic(start, end)) {
720 return PANIC;
721 }
722
723 kern_return_t kr = vm_map_protect(map, start, end, 0, prot);
724 return kr;
725 }
726
727 // Fwd decl to avoid including bsd headers
728 int useracc(user_addr_t addr, user_size_t len, int prot);
729
730 static int
call_useracc__start_size(void * start,size_t size)731 call_useracc__start_size(void * start, size_t size)
732 {
733 int result = useracc((user_addr_t) start, (user_addr_t) size, VM_PROT_READ);
734 return result;
735 }
736
737 static int
call_useracc__vm_prot(void * start,size_t size,int prot)738 call_useracc__vm_prot(void * start, size_t size, int prot)
739 {
740 return useracc((user_addr_t) start, (user_addr_t) size, prot);
741 }
742
743 static int
call_vm_map_purgable_control__address__get(MAP_T map,mach_vm_address_t addr)744 call_vm_map_purgable_control__address__get(MAP_T map, mach_vm_address_t addr)
745 {
746 int state = INVALID_PURGABLE_STATE;
747 int initial_state = state;
748 kern_return_t kr = vm_map_purgable_control(map, addr, VM_PURGABLE_GET_STATE, &state);
749 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_GET_STATE);
750 return kr;
751 }
752
753 static int
call_vm_map_purgable_control__address__purge_all(MAP_T map,mach_vm_address_t addr)754 call_vm_map_purgable_control__address__purge_all(MAP_T map, mach_vm_address_t addr)
755 {
756 int state = INVALID_PURGABLE_STATE;
757 int initial_state = state;
758 kern_return_t kr = vm_map_purgable_control(map, addr, VM_PURGABLE_PURGE_ALL, &state);
759 check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_PURGE_ALL);
760 return kr;
761 }
762
763 static int
call_vm_map_purgable_control__purgeable_state(MAP_T map,vm_address_t addr,vm_purgable_t control,int state)764 call_vm_map_purgable_control__purgeable_state(MAP_T map, vm_address_t addr, vm_purgable_t control, int state)
765 {
766 int state_copy = state;
767 kern_return_t kr = vm_map_purgable_control(map, addr, control, &state_copy);
768 check_mach_vm_purgable_control_outparam_changes(&kr, state_copy, state, control);
769
770 return kr;
771 }
772
773 #if XNU_PLATFORM_MacOSX
774 static void
check_vm_region_object_create_outparam_changes(kern_return_t * kr,ipc_port_t handle)775 check_vm_region_object_create_outparam_changes(kern_return_t * kr, ipc_port_t handle)
776 {
777 if (handle == NULL) {
778 *kr = OUT_PARAM_BAD;
779 }
780 }
781
782 static kern_return_t
call_vm_region_object_create(MAP_T map,vm_size_t size)783 call_vm_region_object_create(MAP_T map, vm_size_t size)
784 {
785 ipc_port_t handle = NULL;
786 kern_return_t kr = vm_region_object_create(map, size, &handle);
787 check_vm_region_object_create_outparam_changes(&kr, handle);
788
789 if (kr == KERN_SUCCESS) {
790 mach_memory_entry_port_release(handle);
791 }
792
793 return kr;
794 }
795 #endif /* #if XNU_PLATFORM_MacOSX */
796
797 static kern_return_t
call_vm_map_page_info(MAP_T map,mach_vm_address_t addr)798 call_vm_map_page_info(MAP_T map, mach_vm_address_t addr)
799 {
800 vm_page_info_flavor_t flavor = VM_PAGE_INFO_BASIC;
801 mach_msg_type_number_t count = VM_PAGE_INFO_BASIC_COUNT;
802 mach_msg_type_number_t saved_count = count;
803 vm_page_info_basic_data_t info = {0};
804 info.depth = -1;
805 vm_page_info_basic_data_t saved_info = info;
806
807 /*
808 * If this test is invoked from a rosetta process,
809 * vm_map_page_range_info_internal doesn't know what
810 * effective_page_shift to use and returns KERN_INVALID_ARGUMENT.
811 * To fix this, we can set the region_page_shift to the page_shift
812 * used for map
813 */
814 int saved_page_shift = thread_self_region_page_shift();
815 if (PAGE_SIZE == KB16) {
816 if (VM_MAP_PAGE_SHIFT(current_map()) != VM_MAP_PAGE_SHIFT(map)) {
817 thread_self_region_page_shift_set(VM_MAP_PAGE_SHIFT(map));
818 }
819 }
820
821 kern_return_t kr = vm_map_page_info(map, addr, flavor, (vm_page_info_t)&info, &count);
822
823 thread_self_region_page_shift_set(saved_page_shift);
824
825 check_mach_vm_page_info_outparam_changes(&kr, info, saved_info, count, saved_count);
826
827 return kr;
828 }
829
830 #if CONFIG_MAP_RANGES
831 static kern_return_t
call_mach_vm_range_create(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,mach_vm_address_t second_start,mach_vm_size_t second_size)832 call_mach_vm_range_create(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, mach_vm_address_t second_start, mach_vm_size_t second_size)
833 {
834 mach_vm_range_recipe_v1_t array[2];
835 array[0] = (mach_vm_range_recipe_v1_t){
836 .range = { start, start + size }, .range_tag = MACH_VM_RANGE_FIXED,
837 };
838 array[1] = (mach_vm_range_recipe_v1_t){
839 .range = { second_start, second_start + second_size }, .range_tag = MACH_VM_RANGE_FIXED,
840 };
841
842 // mach_vm_range_create requires map == current_map(). Patch it up, do the call, and then restore it.
843 vm_map_t saved_map = swap_task_map(current_task(), current_thread(), map);
844
845 kern_return_t kr = mach_vm_range_create(map, MACH_VM_RANGE_FLAVOR_V1, (mach_vm_range_recipes_raw_t)array, sizeof(array[0]) * 2);
846
847 swap_task_map(current_task(), current_thread(), saved_map);
848
849 return kr;
850 }
851 #endif /* CONFIG_MAP_RANGES */
852
853 // Mach memory entry ownership
854
855 extern kern_return_t
856 mach_memory_entry_ownership(
857 ipc_port_t entry_port,
858 task_t owner,
859 int ledger_tag,
860 int ledger_flags);
861
862 static kern_return_t
call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused,int ledger_tag)863 call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused, int ledger_tag)
864 {
865 mach_port_t mementry = make_a_mem_entry(map, TEST_ALLOC_SIZE + 1);
866 kern_return_t kr = mach_memory_entry_ownership(mementry, TASK_NULL, ledger_tag, 0);
867 mach_memory_entry_port_release(mementry);
868 return kr;
869 }
870
871 static kern_return_t
call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused,int ledger_flag)872 call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused, int ledger_flag)
873 {
874 mach_port_t mementry = make_a_mem_entry(map, TEST_ALLOC_SIZE + 1);
875 kern_return_t kr = mach_memory_entry_ownership(mementry, TASK_NULL, VM_LEDGER_TAG_DEFAULT, ledger_flag);
876 mach_memory_entry_port_release(mementry);
877 return kr;
878 }
879
880 static inline void
check_mach_memory_entry_map_size_outparam_changes(kern_return_t * kr,mach_vm_size_t map_size,mach_vm_size_t invalid_initial_size)881 check_mach_memory_entry_map_size_outparam_changes(kern_return_t * kr, mach_vm_size_t map_size,
882 mach_vm_size_t invalid_initial_size)
883 {
884 if (*kr == KERN_SUCCESS) {
885 if (map_size == invalid_initial_size) {
886 *kr = OUT_PARAM_BAD;
887 }
888 } else {
889 if (map_size != 0) {
890 *kr = OUT_PARAM_BAD;
891 }
892 }
893 }
894
895 static kern_return_t
call_mach_memory_entry_map_size__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)896 call_mach_memory_entry_map_size__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
897 {
898 mach_port_t mementry;
899 mach_vm_address_t addr;
900 memory_object_size_t s = (memory_object_size_t)TEST_ALLOC_SIZE + 1;
901 /*
902 * UNLIKELY_INITIAL_SIZE is guaranteed to never be the correct map_size
903 * from the mach_memory_entry_map_size calls we make. map_size should represent the size of the
904 * copy that would result, and UNLIKELY_INITIAL_SIZE is completely unrelated to the sizes we pass
905 * and not page aligned.
906 */
907 mach_vm_size_t invalid_initial_size = UNLIKELY_INITIAL_SIZE;
908
909 mach_vm_size_t map_size = invalid_initial_size;
910
911 kern_return_t kr = mach_vm_allocate_kernel(map, &addr, s, FLAGS_AND_TAG(VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK));
912 assert(kr == 0);
913 kr = mach_make_memory_entry_64(map, &s, (memory_object_offset_t)addr, MAP_MEM_VM_SHARE, &mementry, MACH_PORT_NULL);
914 assert(kr == 0);
915 kr = mach_memory_entry_map_size(mementry, map, start, size, &map_size);
916 check_mach_memory_entry_map_size_outparam_changes(&kr, map_size, invalid_initial_size);
917 mach_memory_entry_port_release(mementry);
918 (void)mach_vm_deallocate(map, addr, s);
919 return kr;
920 }
921
922 struct file_control_return {
923 void * control;
924 void * fp;
925 void * vp;
926 int fd;
927 };
928 struct file_control_return get_control_from_fd(int fd);
929 void cleanup_control_related_data(struct file_control_return info);
930 uint32_t vnode_vid(void * vp);
931
932 static void
check_task_find_region_details_outparam_changes(int * result,uintptr_t vp,uintptr_t saved_vp,uint32_t vid,bool is_map_shared,uint64_t start,uint64_t saved_start,uint64_t len,uint64_t saved_len)933 check_task_find_region_details_outparam_changes(int * result,
934 uintptr_t vp, uintptr_t saved_vp,
935 uint32_t vid,
936 bool is_map_shared,
937 uint64_t start, uint64_t saved_start,
938 uint64_t len, uint64_t saved_len)
939 {
940 // task_find_region_details returns a bool. 0 means failure, 1 success
941 if (*result == 0) {
942 if (vp != 0 || vid != 0 || is_map_shared != 0 || start != 0 || len != 0) {
943 *result = OUT_PARAM_BAD;
944 }
945 } else {
946 if (vp == saved_vp || start == saved_start || len == saved_len) {
947 *result = OUT_PARAM_BAD;
948 }
949 if (vid != (uint32_t)vnode_vid((void *)vp)) {
950 *result = OUT_PARAM_BAD;
951 }
952 // is_map_shared seems to check if the relevant entry is shadowed by another
953 // we don't set up any shadow entries for this test
954 if (is_map_shared) {
955 // *result = OUT_PARAM_BAD;
956 }
957 }
958 }
959
960
961 static int
call_task_find_region_details(MAP_T map,mach_vm_address_t addr)962 call_task_find_region_details(MAP_T map, mach_vm_address_t addr)
963 {
964 (void) map;
965 uint64_t len = UNLIKELY_INITIAL_SIZE, start = UNLIKELY_INITIAL_ADDRESS;
966 uint64_t saved_len = len, saved_start = start;
967 bool is_map_shared = true;
968 uintptr_t vp = (uintptr_t) INVALID_VNODE_PTR;
969 uintptr_t saved_vp = vp;
970 uint32_t vid = UNLIKELY_INITIAL_VID;
971
972 /*
973 * task_find_region_details operates on task->map. Our setup code does allocations
974 * that otherwise could theoretically overwrite existing ones, so we don't want to
975 * operate on current_map
976 */
977 vm_map_t saved_map = swap_task_map(current_task(), current_thread(), map);
978
979 int kr = task_find_region_details(current_task(), addr, FIND_REGION_DETAILS_AT_OFFSET, &vp, &vid, &is_map_shared, &start, &len);
980
981 swap_task_map(current_task(), current_thread(), saved_map);
982
983 check_task_find_region_details_outparam_changes(&kr, vp, saved_vp, vid, is_map_shared, start, saved_start, len, saved_len);
984 return kr;
985 }
986
987 static results_t * __attribute__((used))
test_kext_unix_with_allocated_vnode_addr(kern_return_t (* func)(MAP_T dst_map,mach_vm_address_t start),const char * testname)988 test_kext_unix_with_allocated_vnode_addr(kern_return_t (*func)(MAP_T dst_map, mach_vm_address_t start), const char *testname)
989 {
990 MAP_T map SMART_MAP;
991 allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
992 addr_trials_t *trials SMART_ADDR_TRIALS(base.addr);
993 results_t *results = alloc_results(testname, eSMART_ADDR_TRIALS, base.addr, trials->count);
994
995 for (unsigned i = 0; i < trials->count; i++) {
996 mach_vm_address_t addr = (mach_vm_address_t)trials->list[i].addr;
997
998 struct file_control_return control_info = get_control_from_fd(file_descriptor);
999 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED(.vmf_overwrite = true);
1000 kern_return_t kr = vm_map_enter_mem_object_control(map, &addr, TEST_ALLOC_SIZE, 0, vmk_flags, (memory_object_control_t) control_info.control, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1001 if (kr == KERN_INVALID_ARGUMENT) {
1002 // can't map a file at that address, so we can't pass
1003 // such a mapping to the function being tested
1004 append_result(results, IGNORED, trials->list[i].name);
1005 cleanup_control_related_data(control_info);
1006 continue;
1007 }
1008 assert(kr == KERN_SUCCESS);
1009
1010 kern_return_t ret = func(map, addr);
1011 append_result(results, ret, trials->list[i].name);
1012 cleanup_control_related_data(control_info);
1013 }
1014 return results;
1015 }
1016
1017 extern uint64_t vm_reclaim_max_threshold;
1018
1019 #if 0
1020 static kern_return_t
1021 test_mach_vm_deferred_reclamation_buffer_init(MAP_T map __unused, mach_vm_address_t address, mach_vm_size_t size)
1022 {
1023 uint64_t vm_reclaim_max_threshold_orig = vm_reclaim_max_threshold;
1024 kern_return_t kr = 0;
1025
1026 vm_reclaim_max_threshold = KB16;
1027 kr = call_mach_vm_deferred_reclamation_buffer_init(current_task(), address, size);
1028 vm_reclaim_max_threshold = vm_reclaim_max_threshold_orig;
1029
1030 return kr;
1031 }
1032 #endif
1033
1034
1035 // mach_make_memory_entry and variants
1036
1037 static inline void
check_mach_memory_entry_outparam_changes(kern_return_t * kr,mach_vm_size_t size,mach_port_t out_handle)1038 check_mach_memory_entry_outparam_changes(kern_return_t * kr, mach_vm_size_t size,
1039 mach_port_t out_handle)
1040 {
1041 /*
1042 * mach_make_memory_entry overwrites *size to be 0 on failure.
1043 */
1044 if (*kr != KERN_SUCCESS) {
1045 if (size != 0) {
1046 *kr = OUT_PARAM_BAD;
1047 }
1048 if (out_handle != 0) {
1049 *kr = OUT_PARAM_BAD;
1050 }
1051 }
1052 }
1053
1054 #define IMPL(FN, T) \
1055 static kern_return_t \
1056 call_ ## FN ## __start_size__memonly(MAP_T map, T start, T size) \
1057 { \
1058 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
1059 T io_size = size; \
1060 mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT; \
1061 mach_port_t out_handle = invalid_handle_value; \
1062 kern_return_t kr = FN(map, &io_size, start, \
1063 VM_PROT_READ | MAP_MEM_ONLY, &out_handle, memobject); \
1064 if (kr == 0) { \
1065 if (out_handle) mach_memory_entry_port_release(out_handle); \
1066 } \
1067 mach_memory_entry_port_release(memobject); \
1068 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1069 return kr; \
1070 } \
1071 \
1072 static kern_return_t \
1073 call_ ## FN ## __start_size__namedcreate(MAP_T map, T start, T size) \
1074 { \
1075 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
1076 T io_size = size; \
1077 mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT; \
1078 mach_port_t out_handle = invalid_handle_value; \
1079 kern_return_t kr = FN(map, &io_size, start, \
1080 VM_PROT_READ | MAP_MEM_NAMED_CREATE, &out_handle, memobject); \
1081 if (kr == 0) { \
1082 if (out_handle) mach_memory_entry_port_release(out_handle); \
1083 } \
1084 mach_memory_entry_port_release(memobject); \
1085 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1086 return kr; \
1087 } \
1088 \
1089 static kern_return_t \
1090 call_ ## FN ## __start_size__copy(MAP_T map, T start, T size) \
1091 { \
1092 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
1093 T io_size = size; \
1094 mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT; \
1095 mach_port_t out_handle = invalid_handle_value; \
1096 kern_return_t kr = FN(map, &io_size, start, \
1097 VM_PROT_READ | MAP_MEM_VM_COPY, &out_handle, memobject); \
1098 if (kr == 0) { \
1099 if (out_handle) mach_memory_entry_port_release(out_handle); \
1100 } \
1101 mach_memory_entry_port_release(memobject); \
1102 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1103 return kr; \
1104 } \
1105 \
1106 static kern_return_t \
1107 call_ ## FN ## __start_size__share(MAP_T map, T start, T size) \
1108 { \
1109 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
1110 T io_size = size; \
1111 mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT; \
1112 mach_port_t out_handle = invalid_handle_value; \
1113 kern_return_t kr = FN(map, &io_size, start, \
1114 VM_PROT_READ | MAP_MEM_VM_SHARE, &out_handle, memobject); \
1115 if (kr == 0) { \
1116 if (out_handle) mach_memory_entry_port_release(out_handle); \
1117 } \
1118 mach_memory_entry_port_release(memobject); \
1119 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1120 return kr; \
1121 } \
1122 \
1123 static kern_return_t \
1124 call_ ## FN ## __start_size__namedreuse(MAP_T map, T start, T size) \
1125 { \
1126 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
1127 T io_size = size; \
1128 mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT; \
1129 mach_port_t out_handle = invalid_handle_value; \
1130 kern_return_t kr = FN(map, &io_size, start, \
1131 VM_PROT_READ | MAP_MEM_NAMED_REUSE, &out_handle, memobject); \
1132 if (kr == 0) { \
1133 if (out_handle) mach_memory_entry_port_release(out_handle); \
1134 } \
1135 mach_memory_entry_port_release(memobject); \
1136 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1137 return kr; \
1138 } \
1139 \
1140 static kern_return_t \
1141 call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot) \
1142 { \
1143 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
1144 T io_size = size; \
1145 mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT; \
1146 mach_port_t out_handle = invalid_handle_value; \
1147 kern_return_t kr = FN(map, &io_size, start, \
1148 prot, &out_handle, memobject); \
1149 if (kr == 0) { \
1150 if (out_handle) mach_memory_entry_port_release(out_handle); \
1151 } \
1152 mach_memory_entry_port_release(memobject); \
1153 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1154 return kr; \
1155 }
1156
IMPL(mach_make_memory_entry_64,mach_vm_address_t)1157 IMPL(mach_make_memory_entry_64, mach_vm_address_t)
1158 IMPL(mach_make_memory_entry, vm_size_t)
1159 static kern_return_t
1160 mach_make_memory_entry_internal_retyped(
1161 vm_map_t target_map,
1162 memory_object_size_t *size,
1163 memory_object_offset_t offset,
1164 vm_prot_t permission,
1165 ipc_port_t *object_handle,
1166 ipc_port_t parent_handle)
1167 {
1168 vm_named_entry_kernel_flags_t vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
1169 if (permission & MAP_MEM_LEDGER_TAGGED) {
1170 vmne_kflags.vmnekf_ledger_tag = VM_LEDGER_TAG_DEFAULT;
1171 }
1172 return mach_make_memory_entry_internal(target_map, size, offset, permission, vmne_kflags, object_handle, parent_handle);
1173 }
1174 IMPL(mach_make_memory_entry_internal_retyped, mach_vm_address_t)
1175
1176 #undef IMPL
1177
1178 // mach_vm_map/mach_vm_map_external/mach_vm_map_kernel/vm_map/vm_map_external infra
1179
1180 typedef kern_return_t (*map_fn_t)(vm_map_t target_task,
1181 mach_vm_address_t *address,
1182 mach_vm_size_t size,
1183 mach_vm_offset_t mask,
1184 int flags,
1185 mem_entry_name_port_t object,
1186 memory_object_offset_t offset,
1187 boolean_t copy,
1188 vm_prot_t cur_protection,
1189 vm_prot_t max_protection,
1190 vm_inherit_t inheritance);
1191
1192 static kern_return_t
call_map_fn__allocate_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1193 call_map_fn__allocate_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1194 {
1195 mach_vm_address_t out_addr = start;
1196 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1197 0, 0, 0, 0, 0, VM_INHERIT_NONE);
1198 // fixed-overwrite with pre-existing allocation, don't deallocate
1199 return kr;
1200 }
1201
1202 static kern_return_t
call_map_fn__allocate_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1203 call_map_fn__allocate_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1204 {
1205 mach_vm_address_t out_addr = start;
1206 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1207 0, 0, true, 0, 0, VM_INHERIT_NONE);
1208 // fixed-overwrite with pre-existing allocation, don't deallocate
1209 return kr;
1210 }
1211
1212 static kern_return_t
call_map_fn__allocate_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)1213 call_map_fn__allocate_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
1214 {
1215 mach_vm_address_t out_addr = start_hint;
1216 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, 0, 0, 0, 0, 0, VM_INHERIT_NONE);
1217 if (kr == 0) {
1218 (void)mach_vm_deallocate(map, out_addr, size);
1219 }
1220 return kr;
1221 }
1222
1223 static kern_return_t
call_map_fn__memobject_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1224 call_map_fn__memobject_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1225 {
1226 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1227 mach_vm_address_t out_addr = start;
1228 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1229 memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1230 // fixed-overwrite with pre-existing allocation, don't deallocate
1231 mach_memory_entry_port_release(memobject);
1232 return kr;
1233 }
1234
1235 static kern_return_t
call_map_fn__memobject_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1236 call_map_fn__memobject_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1237 {
1238 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1239 mach_vm_address_t out_addr = start;
1240 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1241 memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1242 // fixed-overwrite with pre-existing allocation, don't deallocate
1243 mach_memory_entry_port_release(memobject);
1244 return kr;
1245 }
1246
1247 static kern_return_t
call_map_fn__memobject_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)1248 call_map_fn__memobject_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
1249 {
1250 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1251 mach_vm_address_t out_addr = start_hint;
1252 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, memobject,
1253 KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1254 if (kr == 0) {
1255 (void)mach_vm_deallocate(map, out_addr, size);
1256 }
1257 mach_memory_entry_port_release(memobject);
1258 return kr;
1259 }
1260
1261 static kern_return_t
helper_call_map_fn__memobject__ssoo(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1262 helper_call_map_fn__memobject__ssoo(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1263 {
1264 mach_port_t memobject = make_a_mem_object(obj_size);
1265 mach_vm_address_t out_addr = start;
1266 kern_return_t kr = fn(map, &out_addr, size, 0, flags, memobject,
1267 offset, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1268 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1269 mach_memory_entry_port_release(memobject);
1270 return kr;
1271 }
1272
1273 static kern_return_t
call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1274 call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1275 {
1276 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, offset, obj_size);
1277 }
1278
1279 static kern_return_t
call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1280 call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1281 {
1282 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, offset, obj_size);
1283 }
1284
1285 static kern_return_t
call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1286 call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1287 {
1288 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_ANYWHERE, false, start, size, offset, obj_size);
1289 }
1290
1291 static kern_return_t
help_call_map_fn__allocate__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1292 help_call_map_fn__allocate__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1293 {
1294 mach_vm_address_t out_addr = start;
1295 kern_return_t kr = fn(map, &out_addr, size, 0, flags,
1296 0, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
1297 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1298 return kr;
1299 }
1300
1301 static kern_return_t
call_map_fn__allocate_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1302 call_map_fn__allocate_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1303 {
1304 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
1305 }
1306
1307 static kern_return_t
call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1308 call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1309 {
1310 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
1311 }
1312
1313 static kern_return_t
call_map_fn__allocate_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1314 call_map_fn__allocate_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1315 {
1316 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
1317 }
1318
1319 static kern_return_t
help_call_map_fn__memobject__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1320 help_call_map_fn__memobject__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1321 {
1322 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1323 mach_vm_address_t out_addr = start;
1324 kern_return_t kr = fn(map, &out_addr, size, 0, flags,
1325 memobject, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
1326 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1327 mach_memory_entry_port_release(memobject);
1328 return kr;
1329 }
1330
1331 static kern_return_t
call_map_fn__memobject_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1332 call_map_fn__memobject_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1333 {
1334 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
1335 }
1336
1337 static kern_return_t
call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1338 call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1339 {
1340 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
1341 }
1342
1343 static kern_return_t
call_map_fn__memobject_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1344 call_map_fn__memobject_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1345 {
1346 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
1347 }
1348
1349 static kern_return_t
call_map_fn__allocate__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1350 call_map_fn__allocate__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1351 {
1352 kern_return_t kr = fn(map, start, size, 0, flags,
1353 0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1354 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1355 return kr;
1356 }
1357
1358 static kern_return_t
call_map_fn__allocate_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1359 call_map_fn__allocate_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1360 {
1361 kern_return_t kr = fn(map, start, size, 0, flags,
1362 0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1363 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1364 return kr;
1365 }
1366
1367 static kern_return_t
call_map_fn__memobject__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1368 call_map_fn__memobject__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1369 {
1370 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1371 kern_return_t kr = fn(map, start, size, 0, flags,
1372 memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1373 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1374 mach_memory_entry_port_release(memobject);
1375 return kr;
1376 }
1377
1378 static kern_return_t
call_map_fn__memobject_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1379 call_map_fn__memobject_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1380 {
1381 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1382 kern_return_t kr = fn(map, start, size, 0, flags,
1383 memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1384 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1385 mach_memory_entry_port_release(memobject);
1386 return kr;
1387 }
1388
1389 static kern_return_t
help_call_map_fn__allocate__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)1390 help_call_map_fn__allocate__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
1391 {
1392 mach_vm_address_t out_addr = 0;
1393 kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
1394 0, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
1395 deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
1396 return kr;
1397 }
1398
1399 static kern_return_t
call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1400 call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1401 {
1402 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
1403 }
1404
1405 static kern_return_t
call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1406 call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1407 {
1408 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
1409 }
1410
1411 static kern_return_t
call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1412 call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1413 {
1414 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
1415 }
1416
1417 static kern_return_t
help_call_map_fn__memobject__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)1418 help_call_map_fn__memobject__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
1419 {
1420 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1421 mach_vm_address_t out_addr = 0;
1422 kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
1423 memobject, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
1424 deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
1425 mach_memory_entry_port_release(memobject);
1426 return kr;
1427 }
1428
1429 static kern_return_t
call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1430 call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1431 {
1432 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
1433 }
1434
1435 static kern_return_t
call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1436 call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1437 {
1438 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
1439 }
1440
1441 static kern_return_t
call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1442 call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1443 {
1444 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
1445 }
1446
1447 // wrappers
1448
1449 kern_return_t
mach_vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1450 mach_vm_map_wrapped(vm_map_t target_task,
1451 mach_vm_address_t *address,
1452 mach_vm_size_t size,
1453 mach_vm_offset_t mask,
1454 int flags,
1455 mem_entry_name_port_t object,
1456 memory_object_offset_t offset,
1457 boolean_t copy,
1458 vm_prot_t cur_protection,
1459 vm_prot_t max_protection,
1460 vm_inherit_t inheritance)
1461 {
1462 if (dealloc_would_time_out(*address, size, target_task)) {
1463 return ACCEPTABLE;
1464 }
1465
1466 mach_vm_address_t saved_addr = *address;
1467 kern_return_t kr = mach_vm_map(target_task, address, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1468 check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1469 return kr;
1470 }
1471
1472 // missing forward declaration
1473 kern_return_t
1474 mach_vm_map_external(
1475 vm_map_t target_map,
1476 mach_vm_offset_t *address,
1477 mach_vm_size_t initial_size,
1478 mach_vm_offset_t mask,
1479 int flags,
1480 ipc_port_t port,
1481 vm_object_offset_t offset,
1482 boolean_t copy,
1483 vm_prot_t cur_protection,
1484 vm_prot_t max_protection,
1485 vm_inherit_t inheritance);
1486 kern_return_t
mach_vm_map_external_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1487 mach_vm_map_external_wrapped(vm_map_t target_task,
1488 mach_vm_address_t *address,
1489 mach_vm_size_t size,
1490 mach_vm_offset_t mask,
1491 int flags,
1492 mem_entry_name_port_t object,
1493 memory_object_offset_t offset,
1494 boolean_t copy,
1495 vm_prot_t cur_protection,
1496 vm_prot_t max_protection,
1497 vm_inherit_t inheritance)
1498 {
1499 if (dealloc_would_time_out(*address, size, target_task)) {
1500 return ACCEPTABLE;
1501 }
1502
1503 mach_vm_address_t saved_addr = *address;
1504 kern_return_t kr = mach_vm_map_external(target_task, address, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1505 check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1506 return kr;
1507 }
1508
1509 kern_return_t
mach_vm_map_kernel_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1510 mach_vm_map_kernel_wrapped(vm_map_t target_task,
1511 mach_vm_address_t *address,
1512 mach_vm_size_t size,
1513 mach_vm_offset_t mask,
1514 int flags,
1515 mem_entry_name_port_t object,
1516 memory_object_offset_t offset,
1517 boolean_t copy,
1518 vm_prot_t cur_protection,
1519 vm_prot_t max_protection,
1520 vm_inherit_t inheritance)
1521 {
1522 if (dealloc_would_time_out(*address, size, target_task)) {
1523 return ACCEPTABLE;
1524 }
1525
1526 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1527
1528 vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
1529 mach_vm_address_t saved_addr = *address;
1530 kern_return_t kr = mach_vm_map_kernel(target_task, address, size, mask, vmk_flags, object, offset, copy, cur_protection, max_protection, inheritance);
1531 check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1532 return kr;
1533 }
1534
1535 static inline void
check_vm_map_enter_mem_object_control_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_address_t saved_start,int flags,MAP_T map)1536 check_vm_map_enter_mem_object_control_outparam_changes(kern_return_t * kr, mach_vm_address_t addr,
1537 mach_vm_address_t saved_start, int flags, MAP_T map)
1538 {
1539 if (*kr == KERN_SUCCESS) {
1540 if (is_fixed(flags)) {
1541 if (addr != truncate_vm_map_addr_with_flags(map, saved_start, flags)) {
1542 *kr = OUT_PARAM_BAD;
1543 }
1544 }
1545 } else {
1546 if (saved_start != addr) {
1547 *kr = OUT_PARAM_BAD;
1548 }
1549 }
1550 }
1551
1552 kern_return_t
vm_map_enter_mem_object_control_wrapped(vm_map_t target_map,mach_vm_address_t * address,mach_vm_size_t size,vm_map_offset_t mask,int flags,mem_entry_name_port_t object __unused,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1553 vm_map_enter_mem_object_control_wrapped(
1554 vm_map_t target_map,
1555 mach_vm_address_t *address,
1556 mach_vm_size_t size,
1557 vm_map_offset_t mask,
1558 int flags,
1559 mem_entry_name_port_t object __unused,
1560 memory_object_offset_t offset,
1561 boolean_t copy,
1562 vm_prot_t cur_protection,
1563 vm_prot_t max_protection,
1564 vm_inherit_t inheritance)
1565 {
1566 if (dealloc_would_time_out(*address, size, target_map)) {
1567 return ACCEPTABLE;
1568 }
1569
1570 vm_map_offset_t vmmaddr = (vm_map_offset_t) *address;
1571 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1572
1573 vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
1574 struct file_control_return control_info = get_control_from_fd(file_descriptor);
1575 kern_return_t kr = vm_map_enter_mem_object_control(target_map, &vmmaddr, size, mask, vmk_flags, (memory_object_control_t) control_info.control, offset, copy, cur_protection, max_protection, inheritance);
1576 check_vm_map_enter_mem_object_control_outparam_changes(&kr, vmmaddr, *address, flags, target_map);
1577
1578 *address = vmmaddr;
1579
1580 cleanup_control_related_data(control_info);
1581
1582 return kr;
1583 }
1584
1585 kern_return_t
vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1586 vm_map_wrapped(vm_map_t target_task,
1587 mach_vm_address_t *address,
1588 mach_vm_size_t size,
1589 mach_vm_offset_t mask,
1590 int flags,
1591 mem_entry_name_port_t object,
1592 memory_object_offset_t offset,
1593 boolean_t copy,
1594 vm_prot_t cur_protection,
1595 vm_prot_t max_protection,
1596 vm_inherit_t inheritance)
1597 {
1598 if (dealloc_would_time_out(*address, size, target_task)) {
1599 return ACCEPTABLE;
1600 }
1601
1602 vm_address_t addr = (vm_address_t)*address;
1603 kern_return_t kr = vm_map(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1604 check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1605 *address = addr;
1606 return kr;
1607 }
1608
1609 kern_return_t
1610 vm_map_external(
1611 vm_map_t target_map,
1612 vm_offset_t *address,
1613 vm_size_t size,
1614 vm_offset_t mask,
1615 int flags,
1616 ipc_port_t port,
1617 vm_offset_t offset,
1618 boolean_t copy,
1619 vm_prot_t cur_protection,
1620 vm_prot_t max_protection,
1621 vm_inherit_t inheritance);
1622 kern_return_t
vm_map_external_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1623 vm_map_external_wrapped(vm_map_t target_task,
1624 mach_vm_address_t *address,
1625 mach_vm_size_t size,
1626 mach_vm_offset_t mask,
1627 int flags,
1628 mem_entry_name_port_t object,
1629 memory_object_offset_t offset,
1630 boolean_t copy,
1631 vm_prot_t cur_protection,
1632 vm_prot_t max_protection,
1633 vm_inherit_t inheritance)
1634 {
1635 if (dealloc_would_time_out(*address, size, target_task)) {
1636 return ACCEPTABLE;
1637 }
1638
1639 vm_address_t addr = (vm_address_t)*address;
1640 kern_return_t kr = vm_map_external(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1641 check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1642 *address = addr;
1643 return kr;
1644 }
1645
1646 // implementations
1647
1648 #define IMPL_MAP_FN_START_SIZE(map_fn, instance) \
1649 static kern_return_t \
1650 call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start, mach_vm_size_t size) \
1651 { \
1652 return call_map_fn__ ## instance(map_fn, map, start, size); \
1653 }
1654
1655 #define IMPL_MAP_FN_HINT_SIZE(map_fn, instance) \
1656 static kern_return_t \
1657 call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size) \
1658 { \
1659 return call_map_fn__ ## instance(map_fn, map, start_hint, size); \
1660 }
1661
1662 #define IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, instance) \
1663 static kern_return_t \
1664 call_ ## map_fn ## __ ## instance ## __start_size_offset_object(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size) \
1665 { \
1666 return call_map_fn__ ## instance ## __start_size_offset_object(map_fn, map, start, size, offset, obj_size); \
1667 }
1668
1669 #define IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, instance) \
1670 static kern_return_t \
1671 call_ ## map_fn ## __ ## instance ## __inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit) \
1672 { \
1673 return call_map_fn__ ## instance ## __inherit(map_fn, map, start, size, inherit); \
1674 }
1675
1676 #define IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, instance) \
1677 static kern_return_t \
1678 call_ ## map_fn ## __ ## instance ## __flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags) \
1679 { \
1680 return call_map_fn__ ## instance ## __flags(map_fn, map, start, size, flags); \
1681 }
1682
1683 #define IMPL_MAP_FN_PROT_PAIRS(map_fn, instance) \
1684 static kern_return_t \
1685 call_ ## map_fn ## __ ## instance ## __prot_pairs(MAP_T map, vm_prot_t cur, vm_prot_t max) \
1686 { \
1687 return call_map_fn__ ## instance ## __prot_pairs(map_fn, map, cur, max); \
1688 }
1689
1690 #define IMPL(map_fn) \
1691 IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed) \
1692 IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed_copy) \
1693 IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed) \
1694 IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed_copy) \
1695 IMPL_MAP_FN_HINT_SIZE(map_fn, allocate_anywhere) \
1696 IMPL_MAP_FN_HINT_SIZE(map_fn, memobject_anywhere) \
1697 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed) \
1698 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed_copy) \
1699 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_anywhere) \
1700 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed) \
1701 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed_copy) \
1702 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_anywhere) \
1703 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed) \
1704 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed_copy) \
1705 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_anywhere) \
1706 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate) \
1707 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate_copy) \
1708 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject) \
1709 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject_copy) \
1710 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed) \
1711 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed_copy) \
1712 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_anywhere) \
1713 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed) \
1714 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed_copy) \
1715 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_anywhere) \
1716
1717 IMPL(mach_vm_map_wrapped)
IMPL(mach_vm_map_external_wrapped)1718 IMPL(mach_vm_map_external_wrapped)
1719 IMPL(mach_vm_map_kernel_wrapped)
1720 IMPL(vm_map_wrapped)
1721 IMPL(vm_map_external_wrapped)
1722 IMPL(vm_map_enter_mem_object_control_wrapped)
1723
1724 #undef IMPL
1725
1726 static int
1727 vm_parameter_validation_kern_test(int64_t in_value, int64_t *out_value)
1728 {
1729 // in_value has the userspace address of the fixed-size output buffer and a file descriptor.
1730 // The address is KB16 aligned, so the bottom bits are used for the fd.
1731 // fd bit 15 also indicates if we want to generate golden results.
1732 // in_value is KB16 aligned
1733 uint64_t fd_mask = KB16 - 1;
1734 file_descriptor = (int)(((uint64_t) in_value) & fd_mask);
1735 uint64_t buffer_address = in_value - file_descriptor;
1736 SYSCTL_OUTPUT_BUF = buffer_address;
1737 SYSCTL_OUTPUT_END = SYSCTL_OUTPUT_BUF + SYSCTL_OUTPUT_BUFFER_SIZE;
1738
1739 // check if running to generate golden result list via boot-arg
1740 kernel_generate_golden = (file_descriptor & (KB16 >> 1)) > 0;
1741 if (kernel_generate_golden) {
1742 file_descriptor &= ~(KB16 >> 1);
1743 }
1744
1745 // Test options:
1746 // - avoid panics for untagged wired memory (set to true during some tests)
1747 // - clamp vm addresses before passing to pmap to avoid pmap panics
1748 thread_test_context_t ctx CLEANUP_THREAD_TEST_CONTEXT = {
1749 .test_option_vm_prevent_wire_tag_panic = false,
1750 .test_option_vm_map_clamp_pmap_remove = true,
1751 };
1752 thread_set_test_context(&ctx);
1753
1754 #if !CONFIG_SPTM && (__ARM_42BIT_PA_SPACE__ || ARM_LARGE_MEMORY)
1755 if (kernel_generate_golden) {
1756 // Some devices skip some trials to avoid timeouts.
1757 // Golden files cannot be generated on these devices.
1758 testprintf("Can't generate golden files on this device "
1759 "(PPL && (__ARM_42BIT_PA_SPACE__ || ARM_LARGE_MEMORY)). "
1760 "Try again on a different device.\n");
1761 *out_value = 0; // failure
1762 goto done;
1763 }
1764 #else
1765 #pragma clang diagnostic ignored "-Wunused-label"
1766 #endif
1767
1768 /*
1769 * -- memory entry functions --
1770 * The memory entry test functions use macros to generate each flavor of memory entry function.
1771 * For more context on why, see the matching comment in vm_parameter_validation.c
1772 */
1773
1774 #define RUN_START_SIZE(fn, variant, name) dealloc_results(process_results(test_mach_with_allocated_start_size(call_ ## fn ## __start_size__ ## variant, name " (start/size)")))
1775 #define RUN_PROT(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(call_ ## fn ## __vm_prot , name " (vm_prot_t)")))
1776
1777 #define RUN_ALL(fn, name) \
1778 RUN_START_SIZE(fn, copy, #name " (copy)"); \
1779 RUN_START_SIZE(fn, memonly, #name " (memonly)"); \
1780 RUN_START_SIZE(fn, namedcreate, #name " (namedcreate)"); \
1781 RUN_START_SIZE(fn, share, #name " (share)"); \
1782 RUN_START_SIZE(fn, namedreuse, #name " (namedreuse)"); \
1783 RUN_PROT(fn, #name " (vm_prot_t)"); \
1784
1785 RUN_ALL(mach_make_memory_entry_64, mach_make_memory_entry_64);
1786 RUN_ALL(mach_make_memory_entry, mach_make_memory_entry);
1787 RUN_ALL(mach_make_memory_entry_internal_retyped, mach_make_memory_entry_internal);
1788 #undef RUN_ALL
1789 #undef RUN_START_SIZE
1790 #undef RUN_PROT
1791
1792 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_tag(fn, name " (ledger tag)")))
1793 RUN(call_mach_memory_entry_ownership__ledger_tag, "mach_memory_entry_ownership");
1794 #undef RUN
1795
1796 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_flag(fn, name " (ledger flag)")))
1797 RUN(call_mach_memory_entry_ownership__ledger_flag, "mach_memory_entry_ownership");
1798 #undef RUN
1799
1800 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
1801 RUN(call_mach_memory_entry_map_size__start_size, "mach_memory_entry_map_size");
1802 #undef RUN
1803
1804 /*
1805 * -- allocate/deallocate functions --
1806 */
1807
1808 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_start_size(fn, name)))
1809 RUN(call_mach_vm_allocate__start_size_fixed, "mach_vm_allocate_external (fixed) (realigned start/size)");
1810 RUN(call_mach_vm_allocate__start_size_anywhere, "mach_vm_allocate_external (anywhere) (hint/size)");
1811 RUN(call_mach_vm_allocate_kernel__start_size_fixed, "mach_vm_allocate (fixed) (realigned start/size)");
1812 RUN(call_mach_vm_allocate_kernel__start_size_anywhere, "mach_vm_allocate (anywhere) (hint/size)");
1813 #undef RUN
1814
1815 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1816 RUN(call_mach_vm_allocate__flags, "mach_vm_allocate_external");
1817 RUN(call_mach_vm_allocate_kernel__flags, "mach_vm_allocate_kernel");
1818 #undef RUN
1819
1820 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_start_size(fn, name)))
1821 RUN(call_vm_allocate__start_size_fixed, "vm_allocate (fixed) (realigned start/size)");
1822 RUN(call_vm_allocate__start_size_anywhere, "vm_allocate (anywhere) (hint/size)");
1823 #undef RUN
1824
1825 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1826 RUN(call_vm_allocate__flags, "vm_allocate");
1827 #undef RUN
1828 dealloc_results(process_results(test_deallocator(call_mach_vm_deallocate, "mach_vm_deallocate (start/size)")));
1829 dealloc_results(process_results(test_deallocator(call_vm_deallocate, "vm_deallocate (start/size)")));
1830
1831 /*
1832 * -- map/remap functions --
1833 * These functions rely heavily on macros.
1834 * For more context on why, see the matching comment in vm_parameter_validation.c
1835 */
1836
1837 // map tests
1838
1839 #define RUN_START_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (realigned start/size)")))
1840 #define RUN_HINT_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
1841 #define RUN_PROT_PAIR(fn, name) dealloc_results(process_results(test_mach_vm_prot_pair(fn, name " (vm_prot_t pair)")))
1842 #define RUN_INHERIT(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
1843 #define RUN_FLAGS(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1844 #define RUN_SSOO(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size_offset_object(fn, name " (start/size/offset/object)")))
1845
1846 #define RUN_ALL(fn, name) \
1847 RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)"); \
1848 RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)"); \
1849 RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)"); \
1850 RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
1851 RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)"); \
1852 RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)"); \
1853 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)"); \
1854 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)"); \
1855 RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)"); \
1856 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)"); \
1857 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)"); \
1858 RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)"); \
1859 RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)"); \
1860 RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)"); \
1861 RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)"); \
1862 RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)"); \
1863 RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)"); \
1864 RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)"); \
1865 RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)"); \
1866 RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)"); \
1867 RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)"); \
1868 RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)"); \
1869 RUN_SSOO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)"); \
1870 RUN_SSOO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)"); \
1871 RUN_SSOO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)"); \
1872
1873 RUN_ALL(mach_vm_map_wrapped, mach_vm_map);
1874 RUN_ALL(mach_vm_map_external_wrapped, mach_vm_map_external);
1875 RUN_ALL(mach_vm_map_kernel_wrapped, mach_vm_map_kernel);
1876 RUN_ALL(vm_map_wrapped, vm_map);
1877 RUN_ALL(vm_map_external_wrapped, vm_map_external);
1878
1879 #define RUN_SSO(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size_offset(fn, name " (start/size/offset)")))
1880
1881 #define RUN_ALL_CTL(fn, name) \
1882 RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)"); \
1883 RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)"); \
1884 RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)"); \
1885 RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
1886 RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)"); \
1887 RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)"); \
1888 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)"); \
1889 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)"); \
1890 RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)"); \
1891 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)"); \
1892 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)"); \
1893 RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)"); \
1894 RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)"); \
1895 RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)"); \
1896 RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)"); \
1897 RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)"); \
1898 RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)"); \
1899 RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)"); \
1900 RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)"); \
1901 RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)"); \
1902 RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)"); \
1903 RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)"); \
1904 RUN_SSO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)"); \
1905 RUN_SSO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)"); \
1906 RUN_SSO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)"); \
1907
1908 RUN_ALL_CTL(vm_map_enter_mem_object_control_wrapped, vm_map_enter_mem_object_control);
1909
1910 #undef RUN_ALL
1911 #undef RUN_START_SIZE
1912 #undef RUN_HINT_SIZE
1913 #undef RUN_PROT_PAIR
1914 #undef RUN_INHERIT
1915 #undef RUN_FLAGS
1916 #undef RUN_SSOO
1917 #undef RUN_ALL_CTL
1918 #undef RUN_SSO
1919
1920 // remap tests
1921
1922 #define FN_NAME(fn, variant, type) call_ ## fn ## __ ## variant ## __ ## type
1923 #define RUN_HELPER(harness, fn, variant, type, type_name, name) dealloc_results(process_results(harness(FN_NAME(fn, variant, type), #name " (" #variant ") (" type_name ")")))
1924 #define RUN_SRC_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, src_size, type_name, name)
1925 #define RUN_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, dst_size, type_name, name)
1926 #define RUN_PROT_PAIRS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_prot_pair, fn, variant, prot_pairs, "prot_pairs", name)
1927 #define RUN_INHERIT(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_inherit_t, fn, variant, inherit, "inherit", name)
1928 #define RUN_FLAGS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_map_kernel_flags_t, fn, variant, flags, "flags", name)
1929 #define RUN_SRC_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_allocated_src_unallocated_dst_size, fn, variant, src_dst_size, type_name, name)
1930
1931 #define RUN_ALL(fn, realigned, name) \
1932 RUN_SRC_SIZE(fn, copy, realigned "src/size", name); \
1933 RUN_SRC_SIZE(fn, nocopy, realigned "src/size", name); \
1934 RUN_DST_SIZE(fn, fixed, "realigned dst/size", name); \
1935 RUN_DST_SIZE(fn, fixed_copy, "realigned dst/size", name); \
1936 RUN_DST_SIZE(fn, anywhere, "hint/size", name); \
1937 RUN_INHERIT(fn, fixed, name); \
1938 RUN_INHERIT(fn, fixed_copy, name); \
1939 RUN_INHERIT(fn, anywhere, name); \
1940 RUN_FLAGS(fn, nocopy, name); \
1941 RUN_FLAGS(fn, copy, name); \
1942 RUN_PROT_PAIRS(fn, fixed, name); \
1943 RUN_PROT_PAIRS(fn, fixed_copy, name); \
1944 RUN_PROT_PAIRS(fn, anywhere, name); \
1945 RUN_SRC_DST_SIZE(fn, fixed, "src/dst/size", name); \
1946 RUN_SRC_DST_SIZE(fn, fixed_copy, "src/dst/size", name); \
1947 RUN_SRC_DST_SIZE(fn, anywhere, "src/dst/size", name); \
1948
1949 RUN_ALL(mach_vm_remap_wrapped_kern, "realigned ", mach_vm_remap);
1950 RUN_ALL(mach_vm_remap_new_kernel_wrapped, , mach_vm_remap_new_kernel);
1951
1952 #undef RUN_ALL
1953 #undef RUN_HELPER
1954 #undef RUN_SRC_SIZE
1955 #undef RUN_DST_SIZE
1956 #undef RUN_PROT_PAIRS
1957 #undef RUN_INHERIT
1958 #undef RUN_FLAGS
1959 #undef RUN_SRC_DST_SIZE
1960
1961 /*
1962 * -- wire/unwire functions --
1963 * Some wire functions (vm_map_wire_and_extract, vm_map_wire_external, vm_map_wire_kernel)
1964 * are implemented with macros to avoid code duplication that would happen otherwise from the multiple
1965 * entrypoints, multiple params under test, and user/non user wired paths
1966 */
1967
1968 #define RUN(fn, name) dealloc_results(process_results(test_kext_unix_with_allocated_start_size(fn, name " (start/size)")))
1969 RUN(call_vslock, "vslock");
1970 RUN(call_vsunlock_undirtied, "vsunlock (undirtied)");
1971 RUN(call_vsunlock_dirtied, "vsunlock (dirtied)");
1972 #undef RUN
1973
1974 #define RUN_PROT(fn, wired, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(call_ ## fn ## __prot__user_wired_ ## wired ## _, name " (vm_prot_t)")))
1975 #define RUN_START(fn, wired, name) dealloc_results(process_results(test_kext_tagged_with_allocated_addr(call_ ## fn ## __start__user_wired_ ## wired ## _, name " (addr)")))
1976 #define RUN_START_END(fn, wired, name) dealloc_results(process_results(test_mach_with_allocated_start_end(call_ ## fn ## __start_end__user_wired_ ## wired ## _, name " (start/end)")))
1977 #define RUN_TAG(fn, wired, name) dealloc_results(process_results(test_mach_with_allocated_tag(call_ ## fn ## __tag__user_wired_ ## wired ## _, name " (tag)")))
1978
1979 #if XNU_PLATFORM_MacOSX
1980 // vm_map_wire_and_extract is implemented on macOS only
1981
1982 #define RUN_ALL_WIRE_AND_EXTRACT(fn, name) \
1983 RUN_PROT(fn, true, #name " (user wired)"); \
1984 RUN_PROT(fn, false, #name " (non user wired)"); \
1985 RUN_START(fn, true, #name " (user wired)"); \
1986 RUN_START(fn, false, #name " (non user wired)");
1987
1988 RUN_ALL_WIRE_AND_EXTRACT(vm_map_wire_and_extract_retyped, vm_map_wire_and_extract);
1989 #undef RUN_ALL_WIRE_AND_EXTRACT
1990 #endif // XNU_PLATFORM_MacOSX
1991
1992 #define RUN_ALL_WIRE_EXTERNAL(fn, name) \
1993 RUN_PROT(fn, true, #name " (user wired)"); \
1994 RUN_PROT(fn, false, #name " (non user wired))"); \
1995 RUN_START_END(fn, true, #name " (user wired)"); \
1996 RUN_START_END(fn, false, #name " (non user wired)");
1997
1998 RUN_ALL_WIRE_EXTERNAL(vm_map_wire_external_retyped, vm_map_wire_external);
1999 #undef RUN_ALL_WIRE_EXTERNAL
2000
2001 #define RUN_ALL_WIRE_KERNEL(fn, name) \
2002 RUN_PROT(fn, false, #name " (non user wired))"); \
2003 RUN_PROT(fn, true, #name " (user wired)"); \
2004 RUN_START_END(fn, true, #name " (user wired)"); \
2005 RUN_START_END(fn, false, #name " (non user wired)"); \
2006 RUN_TAG(fn, true, #name " (user wired)"); \
2007 RUN_TAG(fn, false, #name " (non user wired)");
2008
2009 RUN_ALL_WIRE_KERNEL(vm_map_wire_kernel, vm_map_wire_kernel);
2010 #undef RUN_ALL_WIRE_KERNEL
2011
2012 #undef RUN_PROT
2013 #undef RUN_START
2014 #undef RUN_START_END
2015 #undef RUN_TAG
2016
2017 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_end(fn, name " (start/end)")))
2018 RUN(call_vm_map_unwire_user_wired, "vm_map_unwire (user_wired)");
2019 RUN(call_vm_map_unwire_non_user_wired, "vm_map_unwire (non user_wired)");
2020 #undef RUN
2021
2022 #define RUN(fn, name) dealloc_results(process_results(test_with_int64(fn, name " (int64)")))
2023 RUN(call_mach_vm_wire_level_monitor, "mach_vm_wire_level_monitor");
2024 #undef RUN
2025
2026 /*
2027 * -- copyin/copyout functions --
2028 */
2029
2030 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2031 RUN(call_vm_map_copyin, "vm_map_copyin");
2032 RUN(call_mach_vm_read, "mach_vm_read");
2033 // vm_map_copyin_common is covered well by the vm_map_copyin test
2034 // RUN(call_vm_map_copyin_common, "vm_map_copyin_common");
2035 #undef RUN
2036
2037 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr_of_size_n(fn, sizeof(uint32_t), name " (start)")))
2038 RUN(call_copyoutmap_atomic32, "copyoutmap_atomic32");
2039 #undef RUN
2040
2041 #define RUN(fn, name) dealloc_results(process_results(test_src_kerneldst_size(fn, name " (src/dst/size)")))
2042 RUN(call_copyinmap, "copyinmap");
2043 RUN(call_vm_map_read_user, "vm_map_read_user");
2044 #undef RUN
2045
2046 #define RUN(fn, name) dealloc_results(process_results(test_kernelsrc_dst_size(fn, name " (src/dst/size)")))
2047 RUN(call_vm_map_write_user, "vm_map_write_user");
2048 RUN(call_copyoutmap, "copyoutmap");
2049 #undef RUN
2050
2051 dealloc_results(process_results(test_vm_map_copy_overwrite(call_vm_map_copy_overwrite_interruptible, "vm_map_copy_overwrite (start/size)")));
2052
2053 /*
2054 * -- protection functions --
2055 */
2056
2057 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2058 RUN(call_mach_vm_protect__start_size, "mach_vm_protect");
2059 RUN(call_vm_protect__start_size, "vm_protect");
2060 RUN(call_vm_map_protect__start_size__no_max, "vm_map_protect (no max)");
2061 RUN(call_vm_map_protect__start_size__set_max, "vm_map_protect (set max)");
2062 #undef RUN
2063
2064 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
2065 RUN(call_mach_vm_protect__vm_prot, "mach_vm_protect");
2066 RUN(call_vm_protect__vm_prot, "vm_protect");
2067 RUN(call_vm_map_protect__vm_prot__no_max, "vm_map_protect (no max)");
2068 RUN(call_vm_map_protect__vm_prot__set_max, "vm_map_protect (set max)");
2069 #undef RUN
2070
2071 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
2072 RUN(call_useracc__start_size, "useracc");
2073 #undef RUN
2074 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
2075 RUN(call_useracc__vm_prot, "useracc");
2076 #undef RUN
2077
2078 /*
2079 * -- madvise/behavior functions --
2080 */
2081
2082 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2083 RUN(call_mach_vm_behavior_set__start_size__default, "mach_vm_behavior_set (VM_BEHAVIOR_DEFAULT)");
2084 RUN(call_mach_vm_behavior_set__start_size__can_reuse, "mach_vm_behavior_set (VM_BEHAVIOR_CAN_REUSE)");
2085 #undef RUN
2086 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_behavior_t(fn, name " (vm_behavior_t)")))
2087 RUN(call_mach_vm_behavior_set__vm_behavior, "mach_vm_behavior_set");
2088 #undef RUN
2089
2090 /*
2091 * -- purgability/purgeability functions --
2092 */
2093
2094 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_addr(fn, name " (addr)")))
2095 RUN(call_vm_map_purgable_control__address__get, "vm_map_purgable_control (get)");
2096 RUN(call_vm_map_purgable_control__address__purge_all, "vm_map_purgable_control (purge all)");
2097 #undef RUN
2098
2099 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_and_state(fn, name " (purgeable and state)")))
2100 RUN(call_vm_map_purgable_control__purgeable_state, "vm_map_purgable_control");
2101 #undef RUN
2102
2103 /*
2104 * -- region info functions --
2105 */
2106
2107 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
2108 RUN(call_mach_vm_region, "mach_vm_region");
2109 RUN(call_vm_region, "vm_region");
2110 #undef RUN
2111 #if XNU_PLATFORM_MacOSX
2112 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_size(fn, name " (size)")))
2113 RUN(call_vm_region_object_create, "vm_region_object_create");
2114 #undef RUN
2115 #endif
2116
2117 /*
2118 * -- page info functions --
2119 */
2120
2121 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
2122 RUN(call_vm_map_page_info, "vm_map_page_info");
2123 #undef RUN
2124
2125 /*
2126 * -- miscellaneous functions --
2127 */
2128
2129 #if CONFIG_MAP_RANGES
2130 dealloc_results(process_results(test_mach_vm_range_create(call_mach_vm_range_create, "mach_vm_range_create (start/size/start2/size2)")));
2131 #endif
2132
2133 dealloc_results(process_results(test_kext_unix_with_allocated_vnode_addr(call_task_find_region_details, "task_find_region_details (addr)")));
2134
2135 *out_value = 1; // success
2136 done:
2137 SYSCTL_OUTPUT_BUF = 0;
2138 SYSCTL_OUTPUT_END = 0;
2139 return 0;
2140 }
2141
2142 SYSCTL_TEST_REGISTER(vm_parameter_validation_kern, vm_parameter_validation_kern_test);
2143