xref: /xnu-11417.140.69/osfmk/tests/vm_parameter_validation_kern.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 #include <kern/zalloc.h>
2 #include <kern/thread_test_context.h>
3 
4 #include "vm_parameter_validation.h"
5 
6 #pragma clang diagnostic ignored "-Wdeclaration-after-statement"
7 #pragma clang diagnostic ignored "-Wincompatible-function-pointer-types"
8 #pragma clang diagnostic ignored "-Wmissing-prototypes"
9 #pragma clang diagnostic ignored "-Wpedantic"
10 #pragma clang diagnostic ignored "-Wgcc-compat"
11 
12 
13 // Kernel sysctl test prints its output into a userspace buffer.
14 // fixme these global variables prevent test concurrency
15 
16 static user_addr_t SYSCTL_OUTPUT_BUF;
17 static user_addr_t SYSCTL_OUTPUT_END;
18 
19 // This is a read/write fd passed from userspace.
20 // It's passed to make it easier for kernel tests to interact with a file.
21 static int file_descriptor;
22 
23 // Output to create a golden test result in kern test, controlled by
24 // MSB in file_descriptor and set by GENERATE_GOLDEN_IMAGE from userspace.
25 bool kernel_generate_golden = FALSE;
26 
27 // vprintf() to a userspace buffer
28 // output is incremented to point at the new nul terminator
29 static void
user_vprintf(user_addr_t * output,user_addr_t output_end,const char * format,va_list args)30 user_vprintf(user_addr_t *output, user_addr_t output_end, const char *format, va_list args) __printflike(3, 0)
31 {
32 	extern int vsnprintf(char *, size_t, const char *, va_list) __printflike(3, 0);
33 	char linebuf[1024];
34 	size_t printed;
35 
36 	printed = vsnprintf(linebuf, sizeof(linebuf), format, args);
37 	assert(printed < sizeof(linebuf) - 1);
38 	assert(*output + printed + 1 < output_end);
39 	copyout(linebuf, *output, printed + 1);
40 	*output += printed;
41 }
42 
43 void
testprintf(const char * format,...)44 testprintf(const char *format, ...)
45 {
46 	va_list args;
47 	va_start(args, format);
48 	user_vprintf(&SYSCTL_OUTPUT_BUF, SYSCTL_OUTPUT_END, format, args);
49 	va_end(args);
50 }
51 
52 // Utils
53 
54 static mach_port_t
make_a_mem_object(vm_size_t size)55 make_a_mem_object(vm_size_t size)
56 {
57 	ipc_port_t out_handle;
58 	kern_return_t kr = mach_memory_object_memory_entry_64((host_t)1, /*internal=*/ true, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_handle);
59 	assert(kr == 0);
60 	return out_handle;
61 }
62 
63 static mach_port_t
make_a_mem_entry(MAP_T map,vm_size_t size)64 make_a_mem_entry(MAP_T map, vm_size_t size)
65 {
66 	mach_port_t port;
67 	memory_object_size_t s = (memory_object_size_t)size;
68 	kern_return_t kr = mach_make_memory_entry_64(map, &s, (memory_object_offset_t)0, MAP_MEM_NAMED_CREATE | MAP_MEM_LEDGER_TAGGED, &port, MACH_PORT_NULL);
69 	assert(kr == 0);
70 	return port;
71 }
72 
73 // Test functions
74 
75 static results_t *
test_vm_map_copy_overwrite(kern_return_t (* func)(MAP_T dst_map,vm_map_copy_t copy,mach_vm_address_t start,mach_vm_size_t size),const char * testname)76 test_vm_map_copy_overwrite(kern_return_t (*func)(MAP_T dst_map, vm_map_copy_t copy, mach_vm_address_t start, mach_vm_size_t size), const char * testname)
77 {
78 	// source map: has an allocation bigger than our
79 	// "reasonable" trial sizes, to copy from
80 	MAP_T src_map SMART_MAP;
81 	allocation_t src_alloc SMART_ALLOCATE_VM(src_map, TEST_ALLOC_SIZE, VM_PROT_READ);
82 
83 	// dest map: has an allocation bigger than our
84 	// "reasonable" trial sizes, to copy-overwrite on
85 	MAP_T dst_map SMART_MAP;
86 	allocation_t dst_alloc SMART_ALLOCATE_VM(dst_map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
87 
88 	// We test dst/size parameters.
89 	// We don't test the contents of the vm_map_copy_t.
90 	start_size_trials_t *trials SMART_START_SIZE_TRIALS(dst_alloc.addr);
91 	results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, dst_alloc.addr, trials->count);
92 
93 	for (unsigned i = 0; i < trials->count; i++) {
94 		start_size_trial_t trial = trials->list[i];
95 
96 		// Copy from the source.
97 		vm_map_copy_t copy;
98 		kern_return_t kr = vm_map_copyin(src_map, src_alloc.addr, src_alloc.size, false, &copy);
99 		assert(kr == 0);
100 		assert(copy);  // null copy won't exercise the sanitization path
101 
102 		// Copy-overwrite to the destination.
103 		kern_return_t ret = func(dst_map, copy, trial.start, trial.size);
104 
105 		if (ret != KERN_SUCCESS) {
106 			vm_map_copy_discard(copy);
107 		}
108 		append_result(results, ret, trial.name);
109 	}
110 	return results;
111 }
112 
113 /*
114  * This function temporarily allocates a writeable allocation in kernel_map, and a read only allocation in a temporary map.
115  * It's used to test a function such as vm_map_read_user which copies in data to a kernel pointer that must be writeable.
116  */
117 static results_t *
test_src_kerneldst_size(kern_return_t (* func)(MAP_T map,vm_map_offset_t src,void * dst,vm_size_t length),const char * testname)118 test_src_kerneldst_size(kern_return_t (*func)(MAP_T map, vm_map_offset_t src, void * dst, vm_size_t length), const char * testname)
119 {
120 	MAP_T map SMART_MAP;
121 	allocation_t src_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_READ);
122 	allocation_t dst_base SMART_ALLOCATE_VM(kernel_map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
123 	src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
124 	results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
125 
126 	for (unsigned i = 0; i < trials->count; i++) {
127 		src_dst_size_trial_t trial = trials->list[i];
128 		trial = slide_trial_src(trial, src_base.addr);
129 		trial = slide_trial_dst(trial, dst_base.addr);
130 		int ret = func(map, trial.src, (void *)trial.dst, trial.size);
131 		append_result(results, ret, trial.name);
132 	}
133 	return results;
134 }
135 
136 /*
137  * This function temporarily allocates a read only allocation in kernel_map, and a writeable allocation in a temporary map.
138  * It's used to test a function such as vm_map_write_user which copies data from a kernel pointer to a writeable userspace address.
139  */
140 static results_t *
test_kernelsrc_dst_size(kern_return_t (* func)(MAP_T map,void * src,vm_map_offset_t dst,vm_size_t length),const char * testname)141 test_kernelsrc_dst_size(kern_return_t (*func)(MAP_T map, void *src, vm_map_offset_t dst, vm_size_t length), const char * testname)
142 {
143 	MAP_T map SMART_MAP;
144 	allocation_t src_base SMART_ALLOCATE_VM(kernel_map, TEST_ALLOC_SIZE, VM_PROT_READ);
145 	allocation_t dst_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
146 	src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
147 	results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
148 
149 	for (unsigned i = 0; i < trials->count; i++) {
150 		src_dst_size_trial_t trial = trials->list[i];
151 		trial = slide_trial_src(trial, src_base.addr);
152 		trial = slide_trial_dst(trial, dst_base.addr);
153 		int ret = func(map, (void *)trial.src, trial.dst, trial.size);
154 		append_result(results, ret, trial.name);
155 	}
156 	return results;
157 }
158 
159 
160 /////////////////////////////////////////////////////
161 // Mach tests
162 
163 
164 static kern_return_t
call_mach_vm_read(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)165 call_mach_vm_read(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
166 {
167 	vm_offset_t out_addr;
168 	mach_msg_type_number_t out_size;
169 	kern_return_t kr = mach_vm_read(map, start, size, &out_addr, &out_size);
170 	if (kr == 0) {
171 		// we didn't call through MIG so out_addr is really a vm_map_copy_t
172 		vm_map_copy_discard((vm_map_copy_t)out_addr);
173 	}
174 	return kr;
175 }
176 
177 static inline void
check_vm_map_copyin_outparam_changes(kern_return_t * kr,vm_map_copy_t copy,vm_map_copy_t saved_copy)178 check_vm_map_copyin_outparam_changes(kern_return_t * kr, vm_map_copy_t copy, vm_map_copy_t saved_copy)
179 {
180 	if (*kr == KERN_SUCCESS) {
181 		if (copy == saved_copy) {
182 			*kr = OUT_PARAM_BAD;
183 		}
184 	} else {
185 		if (copy != saved_copy) {
186 			*kr = OUT_PARAM_BAD;
187 		}
188 	}
189 }
190 
191 static kern_return_t
call_vm_map_copyin(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)192 call_vm_map_copyin(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
193 {
194 	vm_map_copy_t invalid_initial_value = INVALID_VM_MAP_COPY;
195 	vm_map_copy_t copy = invalid_initial_value;
196 	kern_return_t kr = vm_map_copyin(map, start, size, false, &copy);
197 	if (kr == 0) {
198 		vm_map_copy_discard(copy);
199 	}
200 	check_vm_map_copyin_outparam_changes(&kr, copy, invalid_initial_value);
201 	return kr;
202 }
203 
204 static kern_return_t
call_copyoutmap_atomic32(MAP_T map,vm_map_offset_t addr)205 call_copyoutmap_atomic32(MAP_T map, vm_map_offset_t addr)
206 {
207 	uint32_t data = 0;
208 	kern_return_t kr = copyoutmap_atomic32(map, data, addr);
209 	return kr;
210 }
211 
212 
213 static kern_return_t
call_mach_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)214 call_mach_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
215 {
216 	mach_vm_address_t saved_start = *start;
217 	kern_return_t kr = mach_vm_allocate_external(map, start, size, flags);
218 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
219 	return kr;
220 }
221 
222 static kern_return_t
call_mach_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)223 call_mach_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
224 {
225 	mach_vm_address_t saved_start = *start;
226 	kern_return_t kr = mach_vm_allocate_external(map, start, size, VM_FLAGS_FIXED);
227 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
228 	return kr;
229 }
230 
231 static kern_return_t
call_mach_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)232 call_mach_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
233 {
234 	mach_vm_address_t saved_start = *start;
235 	kern_return_t kr = mach_vm_allocate_external(map, start, size, VM_FLAGS_ANYWHERE);
236 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
237 	return kr;
238 }
239 
240 static kern_return_t
call_mach_vm_allocate_kernel__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)241 call_mach_vm_allocate_kernel__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
242 {
243 	mach_vm_address_t saved_start = *start;
244 	kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
245 	    FLAGS_AND_TAG(flags, VM_KERN_MEMORY_OSFMK));
246 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
247 	return kr;
248 }
249 
250 static kern_return_t
call_mach_vm_allocate_kernel__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)251 call_mach_vm_allocate_kernel__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
252 {
253 	if (dealloc_would_time_out(*start, size, map)) {
254 		return ACCEPTABLE;
255 	}
256 
257 	mach_vm_address_t saved_start = *start;
258 	kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
259 	    FLAGS_AND_TAG(VM_FLAGS_FIXED, VM_KERN_MEMORY_OSFMK));
260 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
261 	return kr;
262 }
263 
264 static kern_return_t
call_mach_vm_allocate_kernel__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)265 call_mach_vm_allocate_kernel__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
266 {
267 	if (dealloc_would_time_out(*start, size, map)) {
268 		return ACCEPTABLE;
269 	}
270 
271 	mach_vm_address_t saved_start = *start;
272 	kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
273 	    FLAGS_AND_TAG(VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK));
274 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
275 	return kr;
276 }
277 
278 
279 
280 static kern_return_t
call_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)281 call_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
282 {
283 	mach_vm_address_t saved_start = *start;
284 	kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, flags);
285 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
286 	return kr;
287 }
288 
289 static kern_return_t
call_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)290 call_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
291 {
292 	mach_vm_address_t saved_start = *start;
293 	kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, VM_FLAGS_FIXED);
294 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
295 	return kr;
296 }
297 
298 static kern_return_t
call_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)299 call_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
300 {
301 	mach_vm_address_t saved_start = *start;
302 	kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, VM_FLAGS_ANYWHERE);
303 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
304 	return kr;
305 }
306 
307 static kern_return_t
call_mach_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)308 call_mach_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
309 {
310 	kern_return_t kr = mach_vm_deallocate(map, start, size);
311 	return kr;
312 }
313 
314 static kern_return_t
call_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)315 call_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
316 {
317 	kern_return_t kr = vm_deallocate(map, (vm_address_t) start, (vm_size_t) size);
318 	return kr;
319 }
320 
321 // Including sys/systm.h caused things to blow up
322 int     vslock(user_addr_t addr, user_size_t len);
323 int     vsunlock(user_addr_t addr, user_size_t len, int dirtied);
324 static int
call_vslock(void * start,size_t size)325 call_vslock(void * start, size_t size)
326 {
327 	int kr = vslock((user_addr_t) start, (user_size_t) size);
328 	if (kr == KERN_SUCCESS) {
329 		(void) vsunlock((user_addr_t) start, (user_size_t) size, 0);
330 	}
331 
332 	return kr;
333 }
334 
335 static int
call_vsunlock_undirtied(void * start,size_t size)336 call_vsunlock_undirtied(void * start, size_t size)
337 {
338 	int kr = vslock((user_addr_t) start, (user_size_t) size);
339 	if (kr == EINVAL) {
340 		// Invalid vslock arguments should also be
341 		// invalid vsunlock arguments. Test it.
342 	} else if (kr != KERN_SUCCESS) {
343 		// vslock failed, and vsunlock of non-locked memory panics
344 		return PANIC;
345 	}
346 	kr = vsunlock((user_addr_t) start, (user_size_t) size, 0);
347 	return kr;
348 }
349 
350 static int
call_vsunlock_dirtied(void * start,size_t size)351 call_vsunlock_dirtied(void * start, size_t size)
352 {
353 	int kr = vslock((user_addr_t) start, (user_size_t) size);
354 	if (kr == EINVAL) {
355 		// Invalid vslock arguments should also be
356 		// invalid vsunlock arguments. Test it.
357 	} else if (kr != KERN_SUCCESS) {
358 		// vslock failed, and vsunlock of non-locked memory panics
359 		return PANIC;
360 	}
361 	kr = vsunlock((user_addr_t) start, (user_size_t) size, 1);
362 	return kr;
363 }
364 
365 extern kern_return_t    vm_map_wire_external(
366 	vm_map_t                map,
367 	vm_map_offset_t         start,
368 	vm_map_offset_t         end,
369 	vm_prot_t               access_type,
370 	boolean_t               user_wire);
371 
372 
373 typedef kern_return_t (*wire_fn_t)(
374 	vm_map_t task,
375 	mach_vm_address_t start,
376 	mach_vm_address_t end,
377 	vm_prot_t prot,
378 	vm_tag_t tag,
379 	boolean_t user_wire);
380 
381 
382 /*
383  * Tell vm_tag_bt() to change its behavior so our calls to
384  * vm_map_wire_external and vm_map_wire_and_extract do not panic.
385  */
386 static void
prevent_wire_tag_panic(bool prevent)387 prevent_wire_tag_panic(bool prevent)
388 {
389 	thread_set_test_option(test_option_vm_prevent_wire_tag_panic, prevent);
390 }
391 
392 #if XNU_PLATFORM_MacOSX
393 // vm_map_wire_and_extract() implemented on macOS only
394 
395 
396 /*
397  * wire_nested requires a range of exactly one page when passed a physpage pointer.
398  * wire_and_extract is meant to provide that, but as a result of round introduced, unaligned values don't follow that.
399  */
400 static bool
will_vm_map_wire_nested_panic_due_to_invalid_range_size(MAP_T map,mach_vm_address_t start)401 will_vm_map_wire_nested_panic_due_to_invalid_range_size(MAP_T map, mach_vm_address_t start)
402 {
403 	mach_vm_address_t end = start + VM_MAP_PAGE_SIZE(map);
404 	if (round_up_map(map, end) - trunc_down_map(map, start) != VM_MAP_PAGE_SIZE(map)) {
405 		return true;
406 	}
407 	return false;
408 }
409 
410 static inline void
check_vm_map_wire_and_extract_outparam_changes(kern_return_t * kr,ppnum_t physpage)411 check_vm_map_wire_and_extract_outparam_changes(kern_return_t * kr, ppnum_t physpage)
412 {
413 	if (*kr != KERN_SUCCESS) {
414 		if (physpage != 0) {
415 			*kr = OUT_PARAM_BAD;
416 		}
417 	}
418 }
419 
420 static kern_return_t
vm_map_wire_and_extract_retyped(vm_map_t map,mach_vm_address_t start,mach_vm_address_t end __unused,vm_prot_t prot,vm_tag_t tag __unused,boolean_t user_wire)421 vm_map_wire_and_extract_retyped(
422 	vm_map_t                map,
423 	mach_vm_address_t       start,
424 	mach_vm_address_t       end __unused,
425 	vm_prot_t               prot,
426 	vm_tag_t                tag __unused,
427 	boolean_t               user_wire)
428 {
429 	if (will_vm_map_wire_nested_panic_due_to_invalid_range_size(map, start)) {
430 		return PANIC;
431 	}
432 
433 	ppnum_t physpage = UNLIKELY_INITIAL_PPNUM;
434 	kern_return_t kr = vm_map_wire_and_extract(map, start, prot, user_wire, &physpage);
435 	check_vm_map_wire_and_extract_outparam_changes(&kr, physpage);
436 	return kr;
437 }
438 #endif // XNU_PLATFORM_MacOSX
439 
440 
441 static kern_return_t
vm_map_wire_external_retyped(vm_map_t map,mach_vm_address_t start,mach_vm_address_t end,vm_prot_t prot,vm_tag_t tag __unused,boolean_t user_wire)442 vm_map_wire_external_retyped(
443 	vm_map_t                map,
444 	mach_vm_address_t       start,
445 	mach_vm_address_t       end,
446 	vm_prot_t               prot,
447 	vm_tag_t                tag __unused,
448 	boolean_t               user_wire)
449 {
450 	return vm_map_wire_external(map, start, end, prot, user_wire);
451 }
452 
453 static kern_return_t
wire_call_impl(wire_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t end,vm_prot_t prot,vm_tag_t tag,bool user_wire)454 wire_call_impl(wire_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t end, vm_prot_t prot, vm_tag_t tag, bool user_wire)
455 {
456 	if (tag == VM_KERN_MEMORY_NONE) {
457 		return PANIC;
458 	}
459 	prevent_wire_tag_panic(true);
460 	kern_return_t kr = fn(map, start, end, prot, tag, user_wire);
461 	prevent_wire_tag_panic(false);
462 	if (kr == KERN_SUCCESS) {
463 		(void) vm_map_unwire(map, start, end, user_wire);
464 	}
465 	return kr;
466 }
467 
468 #define WIRE_IMPL(FN, user_wire)                                                  \
469 	static kern_return_t                                                      \
470 	__attribute__((used))                                                     \
471 	call_ ## FN ## __start_end__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start, mach_vm_address_t end) \
472 	{                                                                         \
473 	        return wire_call_impl(FN, map, start, end, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, user_wire); \
474 	}                                                                         \
475 	static kern_return_t                                                      \
476 	__attribute__((used))                                                     \
477 	call_ ## FN ## __prot__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot) \
478 	{                                                                         \
479 	        mach_vm_address_t end;                                            \
480 	        if (__builtin_add_overflow(start, size, &end)) {                  \
481 	                return BUSTED;                                            \
482 	        }                                                                 \
483 	        return wire_call_impl(FN, map, start, end, prot, VM_KERN_MEMORY_OSFMK, user_wire); \
484 	}                                                                         \
485 	static kern_return_t                                                      \
486 	__attribute__((used))                                                     \
487 	call_ ## FN ## __tag__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start, mach_vm_address_t end, vm_tag_t tag) \
488 	{                                                                         \
489 	        kern_return_t kr = wire_call_impl(FN, map, start, end, VM_PROT_DEFAULT, tag, user_wire); \
490 	        return kr;                                                        \
491 	}                                                                         \
492 	static kern_return_t                                                      \
493 	__attribute__((used))                                                     \
494 	call_ ## FN ## __start__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start) \
495 	{                                                                         \
496 	        return wire_call_impl(FN, map, start, 0, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, user_wire); \
497 	}                                                                         \
498 
WIRE_IMPL(vm_map_wire_external_retyped,true)499 WIRE_IMPL(vm_map_wire_external_retyped, true)
500 WIRE_IMPL(vm_map_wire_external_retyped, false)
501 WIRE_IMPL(vm_map_wire_kernel, true)
502 WIRE_IMPL(vm_map_wire_kernel, false)
503 
504 #if XNU_PLATFORM_MacOSX
505 WIRE_IMPL(vm_map_wire_and_extract_retyped, true)
506 WIRE_IMPL(vm_map_wire_and_extract_retyped, false)
507 #endif
508 
509 static kern_return_t
510 call_mach_vm_wire_level_monitor(int64_t requested_pages)
511 {
512 	kern_return_t kr = mach_vm_wire_level_monitor(requested_pages);
513 	return kr;
514 }
515 
516 static kern_return_t
call_vm_map_unwire_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)517 call_vm_map_unwire_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
518 {
519 	kern_return_t kr = vm_map_unwire(map, start, end, TRUE);
520 	return kr;
521 }
522 
523 
524 static kern_return_t
call_vm_map_unwire_non_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)525 call_vm_map_unwire_non_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
526 {
527 	kern_return_t kr = vm_map_wire_kernel(map, start, end, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, FALSE);
528 	if (kr) {
529 		return PANIC;
530 	}
531 	kr = vm_map_unwire(map, start, end, FALSE);
532 	return kr;
533 }
534 
535 #ifndef __x86_64__
536 extern const vm_map_address_t physmap_base;
537 extern const vm_map_address_t physmap_end;
538 #endif
539 
540 /*
541  * This function duplicates the panicking checks done in copy_validate.
542  * size==0 is returned as success earlier in copyin/out than copy_validate is called, so we ignore that case.
543  */
544 static bool
will_copyio_panic_in_copy_validate(void * kernel_addr,vm_size_t size)545 will_copyio_panic_in_copy_validate(void *kernel_addr, vm_size_t size)
546 {
547 	if (size == 0) {
548 		return false;
549 	}
550 	extern const int copysize_limit_panic;
551 	if (size > copysize_limit_panic) {
552 		return true;
553 	}
554 
555 	/*
556 	 * copyio is architecture specific and has different checks per arch.
557 	 */
558 #ifdef __x86_64__
559 	if ((vm_offset_t) kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
560 		return true;
561 	}
562 #else /* not __x86_64__ */
563 	uintptr_t kernel_addr_last;
564 	if (os_add_overflow((uintptr_t) kernel_addr, size, &kernel_addr_last)) {
565 		return true;
566 	}
567 
568 	bool in_kva = (VM_KERNEL_STRIP_PTR(kernel_addr) >= VM_MIN_KERNEL_ADDRESS) &&
569 	    (VM_KERNEL_STRIP_PTR(kernel_addr_last) <= VM_MAX_KERNEL_ADDRESS);
570 	bool in_physmap = (VM_KERNEL_STRIP_PTR(kernel_addr) >= physmap_base) &&
571 	    (VM_KERNEL_STRIP_PTR(kernel_addr_last) <= physmap_end);
572 
573 	if (!(in_kva || in_physmap)) {
574 		return true;
575 	}
576 #endif /* not __x86_64__ */
577 
578 	return false;
579 }
580 
581 static kern_return_t
call_copyinmap(MAP_T map,vm_map_offset_t fromaddr,void * todata,vm_size_t length)582 call_copyinmap(MAP_T map, vm_map_offset_t fromaddr, void * todata, vm_size_t length)
583 {
584 	if (will_copyio_panic_in_copy_validate(todata, length)) {
585 		return PANIC;
586 	}
587 
588 	kern_return_t kr = copyinmap(map, fromaddr, todata, length);
589 	return kr;
590 }
591 
592 static kern_return_t
call_copyoutmap(MAP_T map,void * fromdata,vm_map_offset_t toaddr,vm_size_t length)593 call_copyoutmap(MAP_T map, void * fromdata, vm_map_offset_t toaddr, vm_size_t length)
594 {
595 	if (will_copyio_panic_in_copy_validate(fromdata, length)) {
596 		return PANIC;
597 	}
598 
599 	kern_return_t kr = copyoutmap(map, fromdata, toaddr, length);
600 	return kr;
601 }
602 
603 static kern_return_t
call_vm_map_read_user(MAP_T map,vm_map_address_t src_addr,void * ptr,vm_size_t size)604 call_vm_map_read_user(MAP_T map, vm_map_address_t src_addr, void * ptr, vm_size_t size)
605 {
606 	if (will_copyio_panic_in_copy_validate(ptr, size)) {
607 		return PANIC;
608 	}
609 
610 	kern_return_t kr = vm_map_read_user(map, src_addr, ptr, size);
611 	return kr;
612 }
613 
614 static kern_return_t
call_vm_map_write_user(MAP_T map,void * ptr,vm_map_address_t dst_addr,vm_size_t size)615 call_vm_map_write_user(MAP_T map, void * ptr, vm_map_address_t dst_addr, vm_size_t size)
616 {
617 	if (will_copyio_panic_in_copy_validate(ptr, size)) {
618 		return PANIC;
619 	}
620 
621 	kern_return_t kr = vm_map_write_user(map, ptr, dst_addr, size);
622 	return kr;
623 }
624 
625 static kern_return_t
call_vm_map_copy_overwrite_interruptible(MAP_T dst_map,vm_map_copy_t copy,mach_vm_address_t dst_addr,mach_vm_size_t copy_size)626 call_vm_map_copy_overwrite_interruptible(MAP_T dst_map, vm_map_copy_t copy, mach_vm_address_t dst_addr, mach_vm_size_t copy_size)
627 {
628 	kern_return_t kr = vm_map_copy_overwrite(dst_map, dst_addr, copy, copy_size,
629 	    TRUE);
630 
631 	const mach_vm_size_t va_mask = ((1ULL << 48) - 1);
632 	if ((dst_addr & ~va_mask) == 0ULL && ((dst_addr + copy_size) & ~va_mask) == ~va_mask) {
633 		if (kr == KERN_INVALID_ADDRESS) {
634 			return ACCEPTABLE;
635 		}
636 	}
637 	return kr;
638 }
639 
640 static kern_return_t
call_mach_vm_protect__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)641 call_mach_vm_protect__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
642 {
643 	kern_return_t kr = mach_vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
644 	return kr;
645 }
646 static kern_return_t
call_mach_vm_protect__vm_prot(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)647 call_mach_vm_protect__vm_prot(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
648 {
649 	kern_return_t kr = mach_vm_protect(map, start, size, 0, prot);
650 	return kr;
651 }
652 
653 static kern_return_t
call_vm_protect__start_size(MAP_T map,vm_address_t start,vm_size_t size)654 call_vm_protect__start_size(MAP_T map, vm_address_t start, vm_size_t size)
655 {
656 	kern_return_t kr = vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
657 	return kr;
658 }
659 
660 static kern_return_t
call_vm_protect__vm_prot(MAP_T map,vm_address_t start,vm_size_t size,vm_prot_t prot)661 call_vm_protect__vm_prot(MAP_T map, vm_address_t start, vm_size_t size, vm_prot_t prot)
662 {
663 	kern_return_t kr = vm_protect(map, start, size, 0, prot);
664 	return kr;
665 }
666 
667 /*
668  * VME_OFFSET_SET will panic due to an assertion if passed an address that is not aligned to VME_ALIAS_BITS
669  * VME_OFFSET_SET is called by _vm_map_clip_(start/end)
670  * vm_map_protect -> vm_map_clip_end -> _vm_map_clip_end -> VME_OFFSET_SET
671  */
672 static bool
will_vm_map_protect_panic(mach_vm_address_t start,mach_vm_address_t end)673 will_vm_map_protect_panic(mach_vm_address_t start, mach_vm_address_t end)
674 {
675 	bool start_aligned = start == ((start >> VME_ALIAS_BITS) << VME_ALIAS_BITS);
676 	bool end_aligned = end == ((end >> VME_ALIAS_BITS) << VME_ALIAS_BITS);
677 	return !(start_aligned && end_aligned);
678 }
679 
680 static kern_return_t
call_vm_map_protect__start_size__no_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)681 call_vm_map_protect__start_size__no_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
682 {
683 	mach_vm_address_t end = start + size;
684 	if (will_vm_map_protect_panic(start, end)) {
685 		return PANIC;
686 	}
687 
688 	kern_return_t kr = vm_map_protect(map, start, end, 0, VM_PROT_READ | VM_PROT_WRITE);
689 	return kr;
690 }
691 
692 static kern_return_t
call_vm_map_protect__start_size__set_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)693 call_vm_map_protect__start_size__set_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
694 {
695 	mach_vm_address_t end = start + size;
696 	if (will_vm_map_protect_panic(start, end)) {
697 		return PANIC;
698 	}
699 
700 	kern_return_t kr = vm_map_protect(map, start, end, 1, VM_PROT_READ | VM_PROT_WRITE);
701 	return kr;
702 }
703 
704 static kern_return_t
call_vm_map_protect__vm_prot__no_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)705 call_vm_map_protect__vm_prot__no_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
706 {
707 	mach_vm_address_t end = start + size;
708 	if (will_vm_map_protect_panic(start, end)) {
709 		return PANIC;
710 	}
711 
712 	kern_return_t kr = vm_map_protect(map, start, end, 0, prot);
713 	return kr;
714 }
715 
716 static kern_return_t
call_vm_map_protect__vm_prot__set_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)717 call_vm_map_protect__vm_prot__set_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
718 {
719 	mach_vm_address_t end = start + size;
720 	if (will_vm_map_protect_panic(start, end)) {
721 		return PANIC;
722 	}
723 
724 	kern_return_t kr = vm_map_protect(map, start, end, 0, prot);
725 	return kr;
726 }
727 
728 // Fwd decl to avoid including bsd headers
729 int     useracc(user_addr_t addr, user_size_t len, int prot);
730 
731 static int
call_useracc__start_size(void * start,size_t size)732 call_useracc__start_size(void * start, size_t size)
733 {
734 	int result = useracc((user_addr_t) start, (user_addr_t) size, VM_PROT_READ);
735 	return result;
736 }
737 
738 static int
call_useracc__vm_prot(void * start,size_t size,int prot)739 call_useracc__vm_prot(void * start, size_t size, int prot)
740 {
741 	return useracc((user_addr_t) start, (user_addr_t) size, prot);
742 }
743 
744 static int
call_vm_map_purgable_control__address__get(MAP_T map,mach_vm_address_t addr)745 call_vm_map_purgable_control__address__get(MAP_T map, mach_vm_address_t addr)
746 {
747 	int state = INVALID_PURGABLE_STATE;
748 	int initial_state = state;
749 	kern_return_t kr = vm_map_purgable_control(map, addr, VM_PURGABLE_GET_STATE, &state);
750 	check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_GET_STATE);
751 	return kr;
752 }
753 
754 static int
call_vm_map_purgable_control__address__purge_all(MAP_T map,mach_vm_address_t addr)755 call_vm_map_purgable_control__address__purge_all(MAP_T map, mach_vm_address_t addr)
756 {
757 	int state = INVALID_PURGABLE_STATE;
758 	int initial_state = state;
759 	kern_return_t kr = vm_map_purgable_control(map, addr, VM_PURGABLE_PURGE_ALL, &state);
760 	check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_PURGE_ALL);
761 	return kr;
762 }
763 
764 static int
call_vm_map_purgable_control__purgeable_state(MAP_T map,vm_address_t addr,vm_purgable_t control,int state)765 call_vm_map_purgable_control__purgeable_state(MAP_T map, vm_address_t addr, vm_purgable_t control, int state)
766 {
767 	int state_copy = state;
768 	kern_return_t kr = vm_map_purgable_control(map, addr, control, &state_copy);
769 	check_mach_vm_purgable_control_outparam_changes(&kr, state_copy, state, control);
770 
771 	return kr;
772 }
773 
774 #if XNU_PLATFORM_MacOSX
775 static void
check_vm_region_object_create_outparam_changes(kern_return_t * kr,ipc_port_t handle)776 check_vm_region_object_create_outparam_changes(kern_return_t * kr, ipc_port_t handle)
777 {
778 	if (handle == NULL) {
779 		*kr = OUT_PARAM_BAD;
780 	}
781 }
782 
783 static kern_return_t
call_vm_region_object_create(MAP_T map,vm_size_t size)784 call_vm_region_object_create(MAP_T map, vm_size_t size)
785 {
786 	ipc_port_t handle = NULL;
787 	kern_return_t kr = vm_region_object_create(map, size, &handle);
788 	check_vm_region_object_create_outparam_changes(&kr, handle);
789 
790 	if (kr == KERN_SUCCESS) {
791 		mach_memory_entry_port_release(handle);
792 	}
793 
794 	return kr;
795 }
796 #endif /* #if XNU_PLATFORM_MacOSX */
797 
798 static kern_return_t
call_vm_map_page_info(MAP_T map,mach_vm_address_t addr)799 call_vm_map_page_info(MAP_T map, mach_vm_address_t addr)
800 {
801 	vm_page_info_flavor_t flavor = VM_PAGE_INFO_BASIC;
802 	mach_msg_type_number_t count = VM_PAGE_INFO_BASIC_COUNT;
803 	mach_msg_type_number_t saved_count = count;
804 	vm_page_info_basic_data_t info = {0};
805 	info.depth = -1;
806 	vm_page_info_basic_data_t saved_info = info;
807 
808 	/*
809 	 * If this test is invoked from a rosetta process,
810 	 * vm_map_page_range_info_internal doesn't know what
811 	 * effective_page_shift to use and returns KERN_INVALID_ARGUMENT.
812 	 * To fix this, we can set the region_page_shift to the page_shift
813 	 * used for map
814 	 */
815 	int saved_page_shift = thread_self_region_page_shift();
816 	if (PAGE_SIZE == KB16) {
817 		if (VM_MAP_PAGE_SHIFT(current_map()) != VM_MAP_PAGE_SHIFT(map)) {
818 			thread_self_region_page_shift_set(VM_MAP_PAGE_SHIFT(map));
819 		}
820 	}
821 
822 	kern_return_t kr = vm_map_page_info(map, addr, flavor, (vm_page_info_t)&info, &count);
823 
824 	thread_self_region_page_shift_set(saved_page_shift);
825 
826 	check_mach_vm_page_info_outparam_changes(&kr, info, saved_info, count, saved_count);
827 
828 	return kr;
829 }
830 
831 #if CONFIG_MAP_RANGES
832 static kern_return_t
call_mach_vm_range_create(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,mach_vm_address_t second_start,mach_vm_size_t second_size)833 call_mach_vm_range_create(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, mach_vm_address_t second_start, mach_vm_size_t second_size)
834 {
835 	mach_vm_range_recipe_v1_t array[2];
836 	array[0] = (mach_vm_range_recipe_v1_t){
837 		.range = { start, start + size }, .range_tag = MACH_VM_RANGE_FIXED,
838 	};
839 	array[1] = (mach_vm_range_recipe_v1_t){
840 		.range = { second_start, second_start + second_size }, .range_tag = MACH_VM_RANGE_FIXED,
841 	};
842 
843 	// mach_vm_range_create requires map == current_map(). Patch it up, do the call, and then restore it.
844 	vm_map_t saved_map = swap_task_map(current_task(), current_thread(), map);
845 
846 	kern_return_t kr = mach_vm_range_create(map, MACH_VM_RANGE_FLAVOR_V1, (mach_vm_range_recipes_raw_t)array, sizeof(array[0]) * 2);
847 
848 	swap_task_map(current_task(), current_thread(), saved_map);
849 
850 	return kr;
851 }
852 #endif /* CONFIG_MAP_RANGES */
853 
854 // Mach memory entry ownership
855 
856 extern kern_return_t
857 mach_memory_entry_ownership(
858 	ipc_port_t      entry_port,
859 	task_t          owner,
860 	int             ledger_tag,
861 	int             ledger_flags);
862 
863 static kern_return_t
call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused,int ledger_tag)864 call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused, int ledger_tag)
865 {
866 	mach_port_t mementry = make_a_mem_entry(map, TEST_ALLOC_SIZE + 1);
867 	kern_return_t kr = mach_memory_entry_ownership(mementry, TASK_NULL, ledger_tag, 0);
868 	mach_memory_entry_port_release(mementry);
869 	return kr;
870 }
871 
872 static kern_return_t
call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused,int ledger_flag)873 call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused, int ledger_flag)
874 {
875 	mach_port_t mementry = make_a_mem_entry(map, TEST_ALLOC_SIZE + 1);
876 	kern_return_t kr = mach_memory_entry_ownership(mementry, TASK_NULL, VM_LEDGER_TAG_DEFAULT, ledger_flag);
877 	mach_memory_entry_port_release(mementry);
878 	return kr;
879 }
880 
881 static inline void
check_mach_memory_entry_map_size_outparam_changes(kern_return_t * kr,mach_vm_size_t map_size,mach_vm_size_t invalid_initial_size)882 check_mach_memory_entry_map_size_outparam_changes(kern_return_t * kr, mach_vm_size_t map_size,
883     mach_vm_size_t invalid_initial_size)
884 {
885 	if (*kr == KERN_SUCCESS) {
886 		if (map_size == invalid_initial_size) {
887 			*kr = OUT_PARAM_BAD;
888 		}
889 	} else {
890 		if (map_size != 0) {
891 			*kr = OUT_PARAM_BAD;
892 		}
893 	}
894 }
895 
896 static kern_return_t
call_mach_memory_entry_map_size__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)897 call_mach_memory_entry_map_size__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
898 {
899 	mach_port_t mementry;
900 	mach_vm_address_t addr;
901 	memory_object_size_t s = (memory_object_size_t)TEST_ALLOC_SIZE + 1;
902 	/*
903 	 * UNLIKELY_INITIAL_SIZE is guaranteed to never be the correct map_size
904 	 * from the mach_memory_entry_map_size calls we make. map_size should represent the size of the
905 	 * copy that would result, and UNLIKELY_INITIAL_SIZE is completely unrelated to the sizes we pass
906 	 * and not page aligned.
907 	 */
908 	mach_vm_size_t invalid_initial_size = UNLIKELY_INITIAL_SIZE;
909 
910 	mach_vm_size_t map_size = invalid_initial_size;
911 
912 	kern_return_t kr = mach_vm_allocate_kernel(map, &addr, s, FLAGS_AND_TAG(VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK));
913 	assert(kr == 0);
914 	kr = mach_make_memory_entry_64(map, &s, (memory_object_offset_t)addr, MAP_MEM_VM_SHARE, &mementry, MACH_PORT_NULL);
915 	assert(kr == 0);
916 	kr = mach_memory_entry_map_size(mementry, map, start, size, &map_size);
917 	check_mach_memory_entry_map_size_outparam_changes(&kr, map_size, invalid_initial_size);
918 	mach_memory_entry_port_release(mementry);
919 	(void)mach_vm_deallocate(map, addr, s);
920 	return kr;
921 }
922 
923 struct file_control_return {
924 	void * control;
925 	void * fp;
926 	void * vp;
927 	int fd;
928 };
929 struct file_control_return get_control_from_fd(int fd);
930 void cleanup_control_related_data(struct file_control_return info);
931 uint32_t vnode_vid(void * vp);
932 
933 static void
check_task_find_region_details_outparam_changes(int * result,uintptr_t vp,uintptr_t saved_vp,uint32_t vid,bool is_map_shared,uint64_t start,uint64_t saved_start,uint64_t len,uint64_t saved_len)934 check_task_find_region_details_outparam_changes(int * result,
935     uintptr_t vp, uintptr_t saved_vp,
936     uint32_t vid,
937     bool is_map_shared,
938     uint64_t start, uint64_t saved_start,
939     uint64_t len, uint64_t saved_len)
940 {
941 	// task_find_region_details returns a bool. 0 means failure, 1 success
942 	if (*result == 0) {
943 		if (vp != 0 || vid != 0 || is_map_shared != 0 || start != 0 || len != 0) {
944 			*result = OUT_PARAM_BAD;
945 		}
946 	} else {
947 		if (vp == saved_vp || start == saved_start || len == saved_len) {
948 			*result = OUT_PARAM_BAD;
949 		}
950 		if (vid != (uint32_t)vnode_vid((void *)vp)) {
951 			*result = OUT_PARAM_BAD;
952 		}
953 		// is_map_shared seems to check if the relevant entry is shadowed by another
954 		// we don't set up any shadow entries for this test
955 		if (is_map_shared) {
956 			// *result = OUT_PARAM_BAD;
957 		}
958 	}
959 }
960 
961 
962 static int
call_task_find_region_details(MAP_T map,mach_vm_address_t addr)963 call_task_find_region_details(MAP_T map, mach_vm_address_t addr)
964 {
965 	(void) map;
966 	uint64_t len = UNLIKELY_INITIAL_SIZE, start = UNLIKELY_INITIAL_ADDRESS;
967 	uint64_t saved_len = len, saved_start = start;
968 	bool is_map_shared = true;
969 	uintptr_t vp = (uintptr_t) INVALID_VNODE_PTR;
970 	uintptr_t saved_vp = vp;
971 	uint32_t vid = UNLIKELY_INITIAL_VID;
972 
973 	/*
974 	 * task_find_region_details operates on task->map. Our setup code does allocations
975 	 * that otherwise could theoretically overwrite existing ones, so we don't want to
976 	 * operate on current_map
977 	 */
978 	vm_map_t saved_map = swap_task_map(current_task(), current_thread(), map);
979 
980 	int kr = task_find_region_details(current_task(), addr, FIND_REGION_DETAILS_AT_OFFSET, &vp, &vid, &is_map_shared, &start, &len);
981 
982 	swap_task_map(current_task(), current_thread(), saved_map);
983 
984 	check_task_find_region_details_outparam_changes(&kr, vp, saved_vp, vid, is_map_shared, start, saved_start, len, saved_len);
985 	return kr;
986 }
987 
988 static results_t * __attribute__((used))
test_kext_unix_with_allocated_vnode_addr(kern_return_t (* func)(MAP_T dst_map,mach_vm_address_t start),const char * testname)989 test_kext_unix_with_allocated_vnode_addr(kern_return_t (*func)(MAP_T dst_map, mach_vm_address_t start), const char *testname)
990 {
991 	MAP_T map SMART_MAP;
992 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
993 	addr_trials_t *trials SMART_ADDR_TRIALS(base.addr);
994 	results_t *results = alloc_results(testname, eSMART_ADDR_TRIALS, base.addr, trials->count);
995 
996 	for (unsigned i = 0; i < trials->count; i++) {
997 		mach_vm_address_t addr = (mach_vm_address_t)trials->list[i].addr;
998 
999 		struct file_control_return control_info = get_control_from_fd(file_descriptor);
1000 		vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED(.vmf_overwrite = true);
1001 		kern_return_t kr = vm_map_enter_mem_object_control(map, &addr, TEST_ALLOC_SIZE, 0, vmk_flags, (memory_object_control_t) control_info.control, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1002 		if (kr == KERN_INVALID_ARGUMENT) {
1003 			// can't map a file at that address, so we can't pass
1004 			// such a mapping to the function being tested
1005 			append_result(results, IGNORED, trials->list[i].name);
1006 			cleanup_control_related_data(control_info);
1007 			continue;
1008 		}
1009 		assert(kr == KERN_SUCCESS);
1010 
1011 		kern_return_t ret = func(map, addr);
1012 		append_result(results, ret, trials->list[i].name);
1013 		cleanup_control_related_data(control_info);
1014 	}
1015 	return results;
1016 }
1017 
1018 extern uint64_t vm_reclaim_max_threshold;
1019 
1020 #if 0
1021 static kern_return_t
1022 test_mach_vm_deferred_reclamation_buffer_init(MAP_T map __unused, mach_vm_address_t address, mach_vm_size_t size)
1023 {
1024 	uint64_t vm_reclaim_max_threshold_orig = vm_reclaim_max_threshold;
1025 	kern_return_t kr = 0;
1026 
1027 	vm_reclaim_max_threshold = KB16;
1028 	kr = call_mach_vm_deferred_reclamation_buffer_init(current_task(), address, size);
1029 	vm_reclaim_max_threshold = vm_reclaim_max_threshold_orig;
1030 
1031 	return kr;
1032 }
1033 #endif
1034 
1035 
1036 // mach_make_memory_entry and variants
1037 
1038 static inline void
check_mach_memory_entry_outparam_changes(kern_return_t * kr,mach_vm_size_t size,mach_port_t out_handle)1039 check_mach_memory_entry_outparam_changes(kern_return_t * kr, mach_vm_size_t size,
1040     mach_port_t out_handle)
1041 {
1042 	/*
1043 	 * mach_make_memory_entry overwrites *size to be 0 on failure.
1044 	 */
1045 	if (*kr != KERN_SUCCESS) {
1046 		if (size != 0) {
1047 			*kr = OUT_PARAM_BAD;
1048 		}
1049 		if (out_handle != 0) {
1050 			*kr = OUT_PARAM_BAD;
1051 		}
1052 	}
1053 }
1054 
1055 #define IMPL(FN, T)                                                               \
1056 	static kern_return_t                                                      \
1057 	call_ ## FN ## __start_size__memonly(MAP_T map, T start, T size)                      \
1058 	{                                                                         \
1059 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
1060 	        T io_size = size;                                                 \
1061 	        mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT;     \
1062 	        mach_port_t out_handle = invalid_handle_value;                    \
1063 	        kern_return_t kr = FN(map, &io_size, start,                       \
1064 	                              VM_PROT_READ | MAP_MEM_ONLY, &out_handle, memobject); \
1065 	        if (kr == 0) {                                                    \
1066 	                if (out_handle) mach_memory_entry_port_release(out_handle); \
1067 	        }                                                                 \
1068 	        mach_memory_entry_port_release(memobject);                        \
1069 	        check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1070 	        return kr;                                                        \
1071 	}                                                                         \
1072                                                                                   \
1073 	static kern_return_t                                                      \
1074 	call_ ## FN ## __start_size__namedcreate(MAP_T map, T start, T size)                  \
1075 	{                                                                         \
1076 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
1077 	        T io_size = size;                                                 \
1078 	        mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT;     \
1079 	        mach_port_t out_handle = invalid_handle_value;                    \
1080 	        kern_return_t kr = FN(map, &io_size, start,                       \
1081 	                              VM_PROT_READ | MAP_MEM_NAMED_CREATE, &out_handle, memobject); \
1082 	        if (kr == 0) {                                                    \
1083 	                if (out_handle) mach_memory_entry_port_release(out_handle); \
1084 	        }                                                                 \
1085 	        mach_memory_entry_port_release(memobject);                        \
1086 	        check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1087 	        return kr;                                                        \
1088 	}                                                                         \
1089                                                                                   \
1090 	static kern_return_t                                                      \
1091 	call_ ## FN ## __start_size__copy(MAP_T map, T start, T size)                         \
1092 	{                                                                         \
1093 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
1094 	        T io_size = size;                                                 \
1095 	        mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT;     \
1096 	        mach_port_t out_handle = invalid_handle_value;                    \
1097 	        kern_return_t kr = FN(map, &io_size, start,                       \
1098 	                              VM_PROT_READ | MAP_MEM_VM_COPY, &out_handle, memobject); \
1099 	        if (kr == 0) {                                                    \
1100 	                if (out_handle) mach_memory_entry_port_release(out_handle); \
1101 	        }                                                                 \
1102 	        mach_memory_entry_port_release(memobject);                        \
1103 	        check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1104 	        return kr;                                                        \
1105 	}                                                                         \
1106                                                                                   \
1107 	static kern_return_t                                                      \
1108 	call_ ## FN ## __start_size__share(MAP_T map, T start, T size)            \
1109 	{                                                                         \
1110 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
1111 	        T io_size = size;                                                 \
1112 	        mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT;     \
1113 	        mach_port_t out_handle = invalid_handle_value;                    \
1114 	        kern_return_t kr = FN(map, &io_size, start,                       \
1115 	                              VM_PROT_READ | MAP_MEM_VM_SHARE, &out_handle, memobject); \
1116 	        if (kr == 0) {                                                    \
1117 	                if (out_handle) mach_memory_entry_port_release(out_handle); \
1118 	        }                                                                 \
1119 	        mach_memory_entry_port_release(memobject);                        \
1120 	        check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1121 	        return kr;                                                        \
1122 	}                                                                         \
1123                                                                                   \
1124 	static kern_return_t                                                      \
1125 	call_ ## FN ## __start_size__namedreuse(MAP_T map, T start, T size)       \
1126 	{                                                                         \
1127 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
1128 	        T io_size = size;                                                 \
1129 	        mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT;     \
1130 	        mach_port_t out_handle = invalid_handle_value;                    \
1131 	        kern_return_t kr = FN(map, &io_size, start,                       \
1132 	                              VM_PROT_READ | MAP_MEM_NAMED_REUSE, &out_handle, memobject); \
1133 	        if (kr == 0) {                                                    \
1134 	                if (out_handle) mach_memory_entry_port_release(out_handle); \
1135 	        }                                                                 \
1136 	        mach_memory_entry_port_release(memobject);                        \
1137 	        check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1138 	        return kr;                                                        \
1139 	}                                                                         \
1140                                                                                   \
1141 	static kern_return_t                                                      \
1142 	call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot)      \
1143 	{                                                                         \
1144 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
1145 	        T io_size = size;                                                 \
1146 	        mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT;     \
1147 	        mach_port_t out_handle = invalid_handle_value;                    \
1148 	        kern_return_t kr = FN(map, &io_size, start,                       \
1149 	                              prot, &out_handle, memobject); \
1150 	        if (kr == 0) {                                                    \
1151 	                if (out_handle) mach_memory_entry_port_release(out_handle); \
1152 	        }                                                                 \
1153 	        mach_memory_entry_port_release(memobject);                        \
1154 	        check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1155 	        return kr;                                                        \
1156 	}
1157 
IMPL(mach_make_memory_entry_64,mach_vm_address_t)1158 IMPL(mach_make_memory_entry_64, mach_vm_address_t)
1159 IMPL(mach_make_memory_entry, vm_size_t)
1160 static kern_return_t
1161 mach_make_memory_entry_internal_retyped(
1162 	vm_map_t                target_map,
1163 	memory_object_size_t    *size,
1164 	memory_object_offset_t  offset,
1165 	vm_prot_t               permission,
1166 	ipc_port_t              *object_handle,
1167 	ipc_port_t              parent_handle)
1168 {
1169 	vm_named_entry_kernel_flags_t   vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
1170 	if (permission & MAP_MEM_LEDGER_TAGGED) {
1171 		vmne_kflags.vmnekf_ledger_tag = VM_LEDGER_TAG_DEFAULT;
1172 	}
1173 	return mach_make_memory_entry_internal(target_map, size, offset, permission, vmne_kflags, object_handle, parent_handle);
1174 }
1175 IMPL(mach_make_memory_entry_internal_retyped, mach_vm_address_t)
1176 
1177 #undef IMPL
1178 
1179 // mach_vm_map/mach_vm_map_external/mach_vm_map_kernel/vm_map/vm_map_external infra
1180 
1181 typedef kern_return_t (*map_fn_t)(vm_map_t target_task,
1182     mach_vm_address_t *address,
1183     mach_vm_size_t size,
1184     mach_vm_offset_t mask,
1185     int flags,
1186     mem_entry_name_port_t object,
1187     memory_object_offset_t offset,
1188     boolean_t copy,
1189     vm_prot_t cur_protection,
1190     vm_prot_t max_protection,
1191     vm_inherit_t inheritance);
1192 
1193 static kern_return_t
call_map_fn__allocate_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1194 call_map_fn__allocate_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1195 {
1196 	mach_vm_address_t out_addr = start;
1197 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1198 	    0, 0, 0, 0, 0, VM_INHERIT_NONE);
1199 	// fixed-overwrite with pre-existing allocation, don't deallocate
1200 	return kr;
1201 }
1202 
1203 static kern_return_t
call_map_fn__allocate_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1204 call_map_fn__allocate_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1205 {
1206 	mach_vm_address_t out_addr = start;
1207 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1208 	    0, 0, true, 0, 0, VM_INHERIT_NONE);
1209 	// fixed-overwrite with pre-existing allocation, don't deallocate
1210 	return kr;
1211 }
1212 
1213 static kern_return_t
call_map_fn__allocate_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)1214 call_map_fn__allocate_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
1215 {
1216 	mach_vm_address_t out_addr = start_hint;
1217 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, 0, 0, 0, 0, 0, VM_INHERIT_NONE);
1218 	if (kr == 0) {
1219 		(void)mach_vm_deallocate(map, out_addr, size);
1220 	}
1221 	return kr;
1222 }
1223 
1224 static kern_return_t
call_map_fn__memobject_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1225 call_map_fn__memobject_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1226 {
1227 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1228 	mach_vm_address_t out_addr = start;
1229 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1230 	    memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1231 	// fixed-overwrite with pre-existing allocation, don't deallocate
1232 	mach_memory_entry_port_release(memobject);
1233 	return kr;
1234 }
1235 
1236 static kern_return_t
call_map_fn__memobject_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1237 call_map_fn__memobject_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1238 {
1239 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1240 	mach_vm_address_t out_addr = start;
1241 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1242 	    memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1243 	// fixed-overwrite with pre-existing allocation, don't deallocate
1244 	mach_memory_entry_port_release(memobject);
1245 	return kr;
1246 }
1247 
1248 static kern_return_t
call_map_fn__memobject_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)1249 call_map_fn__memobject_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
1250 {
1251 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1252 	mach_vm_address_t out_addr = start_hint;
1253 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, memobject,
1254 	    KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1255 	if (kr == 0) {
1256 		(void)mach_vm_deallocate(map, out_addr, size);
1257 	}
1258 	mach_memory_entry_port_release(memobject);
1259 	return kr;
1260 }
1261 
1262 static kern_return_t
helper_call_map_fn__memobject__ssoo(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1263 helper_call_map_fn__memobject__ssoo(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1264 {
1265 	mach_port_t memobject = make_a_mem_object(obj_size);
1266 	mach_vm_address_t out_addr = start;
1267 	kern_return_t kr = fn(map, &out_addr, size, 0, flags, memobject,
1268 	    offset, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1269 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1270 	mach_memory_entry_port_release(memobject);
1271 	return kr;
1272 }
1273 
1274 static kern_return_t
call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1275 call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1276 {
1277 	return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, offset, obj_size);
1278 }
1279 
1280 static kern_return_t
call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1281 call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1282 {
1283 	return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, offset, obj_size);
1284 }
1285 
1286 static kern_return_t
call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1287 call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1288 {
1289 	return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_ANYWHERE, false, start, size, offset, obj_size);
1290 }
1291 
1292 static kern_return_t
help_call_map_fn__allocate__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1293 help_call_map_fn__allocate__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1294 {
1295 	mach_vm_address_t out_addr = start;
1296 	kern_return_t kr = fn(map, &out_addr, size, 0, flags,
1297 	    0, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
1298 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1299 	return kr;
1300 }
1301 
1302 static kern_return_t
call_map_fn__allocate_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1303 call_map_fn__allocate_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1304 {
1305 	return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
1306 }
1307 
1308 static kern_return_t
call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1309 call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1310 {
1311 	return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
1312 }
1313 
1314 static kern_return_t
call_map_fn__allocate_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1315 call_map_fn__allocate_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1316 {
1317 	return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
1318 }
1319 
1320 static kern_return_t
help_call_map_fn__memobject__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1321 help_call_map_fn__memobject__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1322 {
1323 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1324 	mach_vm_address_t out_addr = start;
1325 	kern_return_t kr = fn(map, &out_addr, size, 0, flags,
1326 	    memobject, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
1327 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1328 	mach_memory_entry_port_release(memobject);
1329 	return kr;
1330 }
1331 
1332 static kern_return_t
call_map_fn__memobject_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1333 call_map_fn__memobject_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1334 {
1335 	return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
1336 }
1337 
1338 static kern_return_t
call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1339 call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1340 {
1341 	return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
1342 }
1343 
1344 static kern_return_t
call_map_fn__memobject_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1345 call_map_fn__memobject_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1346 {
1347 	return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
1348 }
1349 
1350 static kern_return_t
call_map_fn__allocate__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1351 call_map_fn__allocate__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1352 {
1353 	kern_return_t kr = fn(map, start, size, 0, flags,
1354 	    0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1355 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1356 	return kr;
1357 }
1358 
1359 static kern_return_t
call_map_fn__allocate_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1360 call_map_fn__allocate_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1361 {
1362 	kern_return_t kr = fn(map, start, size, 0, flags,
1363 	    0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1364 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1365 	return kr;
1366 }
1367 
1368 static kern_return_t
call_map_fn__memobject__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1369 call_map_fn__memobject__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1370 {
1371 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1372 	kern_return_t kr = fn(map, start, size, 0, flags,
1373 	    memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1374 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1375 	mach_memory_entry_port_release(memobject);
1376 	return kr;
1377 }
1378 
1379 static kern_return_t
call_map_fn__memobject_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1380 call_map_fn__memobject_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1381 {
1382 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1383 	kern_return_t kr = fn(map, start, size, 0, flags,
1384 	    memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1385 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1386 	mach_memory_entry_port_release(memobject);
1387 	return kr;
1388 }
1389 
1390 static kern_return_t
help_call_map_fn__allocate__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)1391 help_call_map_fn__allocate__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
1392 {
1393 	mach_vm_address_t out_addr = 0;
1394 	kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
1395 	    0, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
1396 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
1397 	return kr;
1398 }
1399 
1400 static kern_return_t
call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1401 call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1402 {
1403 	return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
1404 }
1405 
1406 static kern_return_t
call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1407 call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1408 {
1409 	return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
1410 }
1411 
1412 static kern_return_t
call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1413 call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1414 {
1415 	return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
1416 }
1417 
1418 static kern_return_t
help_call_map_fn__memobject__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)1419 help_call_map_fn__memobject__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
1420 {
1421 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1422 	mach_vm_address_t out_addr = 0;
1423 	kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
1424 	    memobject, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
1425 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
1426 	mach_memory_entry_port_release(memobject);
1427 	return kr;
1428 }
1429 
1430 static kern_return_t
call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1431 call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1432 {
1433 	return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
1434 }
1435 
1436 static kern_return_t
call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1437 call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1438 {
1439 	return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
1440 }
1441 
1442 static kern_return_t
call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1443 call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1444 {
1445 	return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
1446 }
1447 
1448 // wrappers
1449 
1450 kern_return_t
mach_vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1451 mach_vm_map_wrapped(vm_map_t target_task,
1452     mach_vm_address_t *address,
1453     mach_vm_size_t size,
1454     mach_vm_offset_t mask,
1455     int flags,
1456     mem_entry_name_port_t object,
1457     memory_object_offset_t offset,
1458     boolean_t copy,
1459     vm_prot_t cur_protection,
1460     vm_prot_t max_protection,
1461     vm_inherit_t inheritance)
1462 {
1463 	if (dealloc_would_time_out(*address, size, target_task)) {
1464 		return ACCEPTABLE;
1465 	}
1466 
1467 	mach_vm_address_t saved_addr = *address;
1468 	kern_return_t kr = mach_vm_map(target_task, address, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1469 	check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1470 	return kr;
1471 }
1472 
1473 // missing forward declaration
1474 kern_return_t
1475 mach_vm_map_external(
1476 	vm_map_t                target_map,
1477 	mach_vm_offset_t        *address,
1478 	mach_vm_size_t          initial_size,
1479 	mach_vm_offset_t        mask,
1480 	int                     flags,
1481 	ipc_port_t              port,
1482 	vm_object_offset_t      offset,
1483 	boolean_t               copy,
1484 	vm_prot_t               cur_protection,
1485 	vm_prot_t               max_protection,
1486 	vm_inherit_t            inheritance);
1487 kern_return_t
mach_vm_map_external_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1488 mach_vm_map_external_wrapped(vm_map_t target_task,
1489     mach_vm_address_t *address,
1490     mach_vm_size_t size,
1491     mach_vm_offset_t mask,
1492     int flags,
1493     mem_entry_name_port_t object,
1494     memory_object_offset_t offset,
1495     boolean_t copy,
1496     vm_prot_t cur_protection,
1497     vm_prot_t max_protection,
1498     vm_inherit_t inheritance)
1499 {
1500 	if (dealloc_would_time_out(*address, size, target_task)) {
1501 		return ACCEPTABLE;
1502 	}
1503 
1504 	mach_vm_address_t saved_addr = *address;
1505 	kern_return_t kr = mach_vm_map_external(target_task, address, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1506 	check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1507 	return kr;
1508 }
1509 
1510 kern_return_t
mach_vm_map_kernel_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1511 mach_vm_map_kernel_wrapped(vm_map_t target_task,
1512     mach_vm_address_t *address,
1513     mach_vm_size_t size,
1514     mach_vm_offset_t mask,
1515     int flags,
1516     mem_entry_name_port_t object,
1517     memory_object_offset_t offset,
1518     boolean_t copy,
1519     vm_prot_t cur_protection,
1520     vm_prot_t max_protection,
1521     vm_inherit_t inheritance)
1522 {
1523 	if (dealloc_would_time_out(*address, size, target_task)) {
1524 		return ACCEPTABLE;
1525 	}
1526 
1527 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1528 
1529 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
1530 	mach_vm_address_t saved_addr = *address;
1531 	kern_return_t kr = mach_vm_map_kernel(target_task, address, size, mask, vmk_flags, object, offset, copy, cur_protection, max_protection, inheritance);
1532 	check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1533 	return kr;
1534 }
1535 
1536 static inline void
check_vm_map_enter_mem_object_control_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_address_t saved_start,int flags,MAP_T map)1537 check_vm_map_enter_mem_object_control_outparam_changes(kern_return_t * kr, mach_vm_address_t addr,
1538     mach_vm_address_t saved_start, int flags, MAP_T map)
1539 {
1540 	if (*kr == KERN_SUCCESS) {
1541 		if (is_fixed(flags)) {
1542 			if (addr != truncate_vm_map_addr_with_flags(map, saved_start, flags)) {
1543 				*kr = OUT_PARAM_BAD;
1544 			}
1545 		}
1546 	} else {
1547 		if (saved_start != addr) {
1548 			*kr = OUT_PARAM_BAD;
1549 		}
1550 	}
1551 }
1552 
1553 kern_return_t
vm_map_enter_mem_object_control_wrapped(vm_map_t target_map,mach_vm_address_t * address,mach_vm_size_t size,vm_map_offset_t mask,int flags,mem_entry_name_port_t object __unused,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1554 vm_map_enter_mem_object_control_wrapped(
1555 	vm_map_t                target_map,
1556 	mach_vm_address_t      *address,
1557 	mach_vm_size_t          size,
1558 	vm_map_offset_t         mask,
1559 	int                     flags,
1560 	mem_entry_name_port_t   object __unused,
1561 	memory_object_offset_t  offset,
1562 	boolean_t               copy,
1563 	vm_prot_t               cur_protection,
1564 	vm_prot_t               max_protection,
1565 	vm_inherit_t            inheritance)
1566 {
1567 	if (dealloc_would_time_out(*address, size, target_map)) {
1568 		return ACCEPTABLE;
1569 	}
1570 
1571 	vm_map_offset_t vmmaddr = (vm_map_offset_t) *address;
1572 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1573 
1574 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
1575 	struct file_control_return control_info = get_control_from_fd(file_descriptor);
1576 	kern_return_t kr = vm_map_enter_mem_object_control(target_map, &vmmaddr, size, mask, vmk_flags, (memory_object_control_t) control_info.control, offset, copy, cur_protection, max_protection, inheritance);
1577 	check_vm_map_enter_mem_object_control_outparam_changes(&kr, vmmaddr, *address, flags, target_map);
1578 
1579 	*address = vmmaddr;
1580 
1581 	cleanup_control_related_data(control_info);
1582 
1583 	return kr;
1584 }
1585 
1586 kern_return_t
vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1587 vm_map_wrapped(vm_map_t target_task,
1588     mach_vm_address_t *address,
1589     mach_vm_size_t size,
1590     mach_vm_offset_t mask,
1591     int flags,
1592     mem_entry_name_port_t object,
1593     memory_object_offset_t offset,
1594     boolean_t copy,
1595     vm_prot_t cur_protection,
1596     vm_prot_t max_protection,
1597     vm_inherit_t inheritance)
1598 {
1599 	if (dealloc_would_time_out(*address, size, target_task)) {
1600 		return ACCEPTABLE;
1601 	}
1602 
1603 	vm_address_t addr = (vm_address_t)*address;
1604 	kern_return_t kr = vm_map(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1605 	check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1606 	*address = addr;
1607 	return kr;
1608 }
1609 
1610 kern_return_t
1611 vm_map_external(
1612 	vm_map_t                target_map,
1613 	vm_offset_t             *address,
1614 	vm_size_t               size,
1615 	vm_offset_t             mask,
1616 	int                     flags,
1617 	ipc_port_t              port,
1618 	vm_offset_t             offset,
1619 	boolean_t               copy,
1620 	vm_prot_t               cur_protection,
1621 	vm_prot_t               max_protection,
1622 	vm_inherit_t            inheritance);
1623 kern_return_t
vm_map_external_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1624 vm_map_external_wrapped(vm_map_t target_task,
1625     mach_vm_address_t *address,
1626     mach_vm_size_t size,
1627     mach_vm_offset_t mask,
1628     int flags,
1629     mem_entry_name_port_t object,
1630     memory_object_offset_t offset,
1631     boolean_t copy,
1632     vm_prot_t cur_protection,
1633     vm_prot_t max_protection,
1634     vm_inherit_t inheritance)
1635 {
1636 	if (dealloc_would_time_out(*address, size, target_task)) {
1637 		return ACCEPTABLE;
1638 	}
1639 
1640 	vm_address_t addr = (vm_address_t)*address;
1641 	kern_return_t kr = vm_map_external(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1642 	check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1643 	*address = addr;
1644 	return kr;
1645 }
1646 
1647 // implementations
1648 
1649 #define IMPL_MAP_FN_START_SIZE(map_fn, instance)                                                \
1650     static kern_return_t                                                                        \
1651     call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start, mach_vm_size_t size) \
1652     {                                                                                           \
1653 	return call_map_fn__ ## instance(map_fn, map, start, size);                             \
1654     }
1655 
1656 #define IMPL_MAP_FN_HINT_SIZE(map_fn, instance)                                                      \
1657     static kern_return_t                                                                             \
1658     call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size) \
1659     {                                                                                                \
1660 	return call_map_fn__ ## instance(map_fn, map, start_hint, size);                             \
1661     }
1662 
1663 #define IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, instance)                                                                                                                   \
1664     static kern_return_t                                                                                                                                                         \
1665     call_ ## map_fn ## __ ## instance ## __start_size_offset_object(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size) \
1666     {                                                                                                                                                                            \
1667 	return call_map_fn__ ## instance ## __start_size_offset_object(map_fn, map, start, size, offset, obj_size);                                                              \
1668     }
1669 
1670 #define IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, instance)                                                                          \
1671     static kern_return_t                                                                                                          \
1672     call_ ## map_fn ## __ ## instance ## __inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit) \
1673     {                                                                                                                             \
1674 	return call_map_fn__ ## instance ## __inherit(map_fn, map, start, size, inherit);                                         \
1675     }
1676 
1677 #define IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, instance)                                                                 \
1678     static kern_return_t                                                                                               \
1679     call_ ## map_fn ## __ ## instance ## __flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags) \
1680     {                                                                                                                  \
1681 	return call_map_fn__ ## instance ## __flags(map_fn, map, start, size, flags);                                  \
1682     }
1683 
1684 #define IMPL_MAP_FN_PROT_PAIRS(map_fn, instance)                                               \
1685     static kern_return_t                                                                       \
1686     call_ ## map_fn ## __ ## instance ## __prot_pairs(MAP_T map, vm_prot_t cur, vm_prot_t max) \
1687     {                                                                                          \
1688 	return call_map_fn__ ## instance ## __prot_pairs(map_fn, map, cur, max);               \
1689     }
1690 
1691 #define IMPL(map_fn)                                                       \
1692 	IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed)                     \
1693 	IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed_copy)                \
1694 	IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed)                    \
1695 	IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed_copy)               \
1696 	IMPL_MAP_FN_HINT_SIZE(map_fn, allocate_anywhere)                   \
1697 	IMPL_MAP_FN_HINT_SIZE(map_fn, memobject_anywhere)                  \
1698 	IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed)      \
1699 	IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed_copy) \
1700 	IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_anywhere)   \
1701 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed)             \
1702 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed_copy)        \
1703 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_anywhere)          \
1704 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed)            \
1705 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed_copy)       \
1706 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_anywhere)         \
1707 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate)                     \
1708 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate_copy)                \
1709 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject)                    \
1710 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject_copy)               \
1711 	IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed)                     \
1712 	IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed_copy)                \
1713 	IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_anywhere)                  \
1714 	IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed)                    \
1715 	IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed_copy)               \
1716 	IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_anywhere)                 \
1717 
1718 IMPL(mach_vm_map_wrapped)
IMPL(mach_vm_map_external_wrapped)1719 IMPL(mach_vm_map_external_wrapped)
1720 IMPL(mach_vm_map_kernel_wrapped)
1721 IMPL(vm_map_wrapped)
1722 IMPL(vm_map_external_wrapped)
1723 IMPL(vm_map_enter_mem_object_control_wrapped)
1724 
1725 #undef IMPL
1726 
1727 static int
1728 vm_parameter_validation_kern_test(int64_t in_value, int64_t *out_value)
1729 {
1730 	// in_value has the userspace address of the fixed-size output buffer and a file descriptor.
1731 	// The address is KB16 aligned, so the bottom bits are used for the fd.
1732 	// fd bit 15 also indicates if we want to generate golden results.
1733 	// in_value is KB16 aligned
1734 	uint64_t fd_mask = KB16 - 1;
1735 	file_descriptor = (int)(((uint64_t) in_value) & fd_mask);
1736 	uint64_t buffer_address = in_value - file_descriptor;
1737 	SYSCTL_OUTPUT_BUF = buffer_address;
1738 	SYSCTL_OUTPUT_END = SYSCTL_OUTPUT_BUF + SYSCTL_OUTPUT_BUFFER_SIZE;
1739 
1740 	// check if running to generate golden result list via boot-arg
1741 	kernel_generate_golden = (file_descriptor & (KB16 >> 1)) > 0;
1742 	if (kernel_generate_golden) {
1743 		file_descriptor &= ~(KB16 >> 1);
1744 	}
1745 
1746 	// Test options:
1747 	// - avoid panics for untagged wired memory (set to true during some tests)
1748 	// - clamp vm addresses before passing to pmap to avoid pmap panics
1749 	thread_test_context_t ctx CLEANUP_THREAD_TEST_CONTEXT = {
1750 		.test_option_vm_prevent_wire_tag_panic = false,
1751 		.test_option_vm_map_clamp_pmap_remove = true,
1752 	};
1753 	thread_set_test_context(&ctx);
1754 
1755 #if !CONFIG_SPTM && (__ARM_42BIT_PA_SPACE__ || ARM_LARGE_MEMORY)
1756 	if (kernel_generate_golden) {
1757 		// Some devices skip some trials to avoid timeouts.
1758 		// Golden files cannot be generated on these devices.
1759 		testprintf("Can't generate golden files on this device "
1760 		    "(PPL && (__ARM_42BIT_PA_SPACE__ || ARM_LARGE_MEMORY)). "
1761 		    "Try again on a different device.\n");
1762 		*out_value = 0;  // failure
1763 		goto done;
1764 	}
1765 #else
1766 #pragma clang diagnostic ignored "-Wunused-label"
1767 #endif
1768 
1769 	/*
1770 	 * -- memory entry functions --
1771 	 * The memory entry test functions use macros to generate each flavor of memory entry function.
1772 	 * For more context on why, see the matching comment in vm_parameter_validation.c
1773 	 */
1774 
1775 #define RUN_START_SIZE(fn, variant, name) dealloc_results(process_results(test_mach_with_allocated_start_size(call_ ## fn ## __start_size__ ## variant, name " (start/size)")))
1776 #define RUN_PROT(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(call_ ## fn ## __vm_prot , name " (vm_prot_t)")))
1777 
1778 #define RUN_ALL(fn, name) \
1779 	RUN_START_SIZE(fn, copy, #name " (copy)"); \
1780 	RUN_START_SIZE(fn, memonly, #name " (memonly)"); \
1781 	RUN_START_SIZE(fn, namedcreate, #name " (namedcreate)"); \
1782 	RUN_START_SIZE(fn, share, #name " (share)"); \
1783 	RUN_START_SIZE(fn, namedreuse, #name " (namedreuse)"); \
1784 	RUN_PROT(fn, #name " (vm_prot_t)"); \
1785 
1786 	RUN_ALL(mach_make_memory_entry_64, mach_make_memory_entry_64);
1787 	RUN_ALL(mach_make_memory_entry, mach_make_memory_entry);
1788 	RUN_ALL(mach_make_memory_entry_internal_retyped, mach_make_memory_entry_internal);
1789 #undef RUN_ALL
1790 #undef RUN_START_SIZE
1791 #undef RUN_PROT
1792 
1793 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_tag(fn, name " (ledger tag)")))
1794 	RUN(call_mach_memory_entry_ownership__ledger_tag, "mach_memory_entry_ownership");
1795 #undef RUN
1796 
1797 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_flag(fn, name " (ledger flag)")))
1798 	RUN(call_mach_memory_entry_ownership__ledger_flag, "mach_memory_entry_ownership");
1799 #undef RUN
1800 
1801 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
1802 	RUN(call_mach_memory_entry_map_size__start_size, "mach_memory_entry_map_size");
1803 #undef RUN
1804 
1805 	/*
1806 	 * -- allocate/deallocate functions --
1807 	 */
1808 
1809 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_start_size(fn, name)))
1810 	RUN(call_mach_vm_allocate__start_size_fixed, "mach_vm_allocate_external (fixed) (realigned start/size)");
1811 	RUN(call_mach_vm_allocate__start_size_anywhere, "mach_vm_allocate_external (anywhere) (hint/size)");
1812 	RUN(call_mach_vm_allocate_kernel__start_size_fixed, "mach_vm_allocate (fixed) (realigned start/size)");
1813 	RUN(call_mach_vm_allocate_kernel__start_size_anywhere, "mach_vm_allocate (anywhere) (hint/size)");
1814 #undef RUN
1815 
1816 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1817 	RUN(call_mach_vm_allocate__flags, "mach_vm_allocate_external");
1818 	RUN(call_mach_vm_allocate_kernel__flags, "mach_vm_allocate_kernel");
1819 #undef RUN
1820 
1821 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_start_size(fn, name)))
1822 	RUN(call_vm_allocate__start_size_fixed, "vm_allocate (fixed) (realigned start/size)");
1823 	RUN(call_vm_allocate__start_size_anywhere, "vm_allocate (anywhere) (hint/size)");
1824 #undef RUN
1825 
1826 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1827 	RUN(call_vm_allocate__flags, "vm_allocate");
1828 #undef RUN
1829 	dealloc_results(process_results(test_deallocator(call_mach_vm_deallocate, "mach_vm_deallocate (start/size)")));
1830 	dealloc_results(process_results(test_deallocator(call_vm_deallocate, "vm_deallocate (start/size)")));
1831 
1832 	/*
1833 	 * -- map/remap functions --
1834 	 * These functions rely heavily on macros.
1835 	 * For more context on why, see the matching comment in vm_parameter_validation.c
1836 	 */
1837 
1838 	// map tests
1839 
1840 #define RUN_START_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (realigned start/size)")))
1841 #define RUN_HINT_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
1842 #define RUN_PROT_PAIR(fn, name) dealloc_results(process_results(test_mach_vm_prot_pair(fn, name " (vm_prot_t pair)")))
1843 #define RUN_INHERIT(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
1844 #define RUN_FLAGS(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1845 #define RUN_SSOO(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size_offset_object(fn, name " (start/size/offset/object)")))
1846 
1847 #define RUN_ALL(fn, name)     \
1848 	RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)");   \
1849 	RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)");  \
1850 	RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)");  \
1851 	RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
1852 	RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)");  \
1853 	RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)");  \
1854 	RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)");  \
1855 	RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)");  \
1856 	RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)");  \
1857 	RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)");  \
1858 	RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)");  \
1859 	RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)");  \
1860 	RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)");  \
1861 	RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)");  \
1862 	RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)");  \
1863 	RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)");  \
1864 	RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)");  \
1865 	RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)");  \
1866 	RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)");  \
1867 	RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)");  \
1868 	RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)");  \
1869 	RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)");  \
1870 	RUN_SSOO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)");  \
1871 	RUN_SSOO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)");  \
1872 	RUN_SSOO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)");  \
1873 
1874 	RUN_ALL(mach_vm_map_wrapped, mach_vm_map);
1875 	RUN_ALL(mach_vm_map_external_wrapped, mach_vm_map_external);
1876 	RUN_ALL(mach_vm_map_kernel_wrapped, mach_vm_map_kernel);
1877 	RUN_ALL(vm_map_wrapped, vm_map);
1878 	RUN_ALL(vm_map_external_wrapped, vm_map_external);
1879 
1880 #define RUN_SSO(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size_offset(fn, name " (start/size/offset)")))
1881 
1882 #define RUN_ALL_CTL(fn, name)     \
1883 	RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)");   \
1884 	RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)");  \
1885 	RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)");  \
1886 	RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
1887 	RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)");  \
1888 	RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)");  \
1889 	RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)");  \
1890 	RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)");  \
1891 	RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)");  \
1892 	RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)");  \
1893 	RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)");  \
1894 	RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)");  \
1895 	RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)");  \
1896 	RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)");  \
1897 	RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)");  \
1898 	RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)");  \
1899 	RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)");  \
1900 	RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)");  \
1901 	RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)");  \
1902 	RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)");  \
1903 	RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)");  \
1904 	RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)");  \
1905 	RUN_SSO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)");  \
1906 	RUN_SSO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)");  \
1907 	RUN_SSO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)");  \
1908 
1909 	RUN_ALL_CTL(vm_map_enter_mem_object_control_wrapped, vm_map_enter_mem_object_control);
1910 
1911 #undef RUN_ALL
1912 #undef RUN_START_SIZE
1913 #undef RUN_HINT_SIZE
1914 #undef RUN_PROT_PAIR
1915 #undef RUN_INHERIT
1916 #undef RUN_FLAGS
1917 #undef RUN_SSOO
1918 #undef RUN_ALL_CTL
1919 #undef RUN_SSO
1920 
1921 	// remap tests
1922 
1923 #define FN_NAME(fn, variant, type) call_ ## fn ## __  ## variant ## __ ## type
1924 #define RUN_HELPER(harness, fn, variant, type, type_name, name) dealloc_results(process_results(harness(FN_NAME(fn, variant, type), #name " (" #variant ") (" type_name ")")))
1925 #define RUN_SRC_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, src_size, type_name, name)
1926 #define RUN_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, dst_size, type_name, name)
1927 #define RUN_PROT_PAIRS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_prot_pair, fn, variant, prot_pairs, "prot_pairs", name)
1928 #define RUN_INHERIT(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_inherit_t, fn, variant, inherit, "inherit", name)
1929 #define RUN_FLAGS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_map_kernel_flags_t, fn, variant, flags, "flags", name)
1930 #define RUN_SRC_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_allocated_src_unallocated_dst_size, fn, variant, src_dst_size, type_name, name)
1931 
1932 #define RUN_ALL(fn, realigned, name)                                    \
1933 	RUN_SRC_SIZE(fn, copy, realigned "src/size", name);             \
1934 	RUN_SRC_SIZE(fn, nocopy, realigned "src/size", name);           \
1935 	RUN_DST_SIZE(fn, fixed, "realigned dst/size", name);            \
1936 	RUN_DST_SIZE(fn, fixed_copy, "realigned dst/size", name);       \
1937 	RUN_DST_SIZE(fn, anywhere, "hint/size", name);                  \
1938 	RUN_INHERIT(fn, fixed, name);                                   \
1939 	RUN_INHERIT(fn, fixed_copy, name);                              \
1940 	RUN_INHERIT(fn, anywhere, name);                                \
1941 	RUN_FLAGS(fn, nocopy, name);                                    \
1942 	RUN_FLAGS(fn, copy, name);                                      \
1943 	RUN_PROT_PAIRS(fn, fixed, name);                                \
1944 	RUN_PROT_PAIRS(fn, fixed_copy, name);                           \
1945 	RUN_PROT_PAIRS(fn, anywhere, name);                             \
1946 	RUN_SRC_DST_SIZE(fn, fixed, "src/dst/size", name);              \
1947 	RUN_SRC_DST_SIZE(fn, fixed_copy, "src/dst/size", name);         \
1948 	RUN_SRC_DST_SIZE(fn, anywhere, "src/dst/size", name);           \
1949 
1950 	RUN_ALL(mach_vm_remap_wrapped_kern, "realigned ", mach_vm_remap);
1951 	RUN_ALL(mach_vm_remap_new_kernel_wrapped, , mach_vm_remap_new_kernel);
1952 
1953 #undef RUN_ALL
1954 #undef RUN_HELPER
1955 #undef RUN_SRC_SIZE
1956 #undef RUN_DST_SIZE
1957 #undef RUN_PROT_PAIRS
1958 #undef RUN_INHERIT
1959 #undef RUN_FLAGS
1960 #undef RUN_SRC_DST_SIZE
1961 
1962 	/*
1963 	 * -- wire/unwire functions --
1964 	 * Some wire functions (vm_map_wire_and_extract, vm_map_wire_external, vm_map_wire_kernel)
1965 	 * are implemented with macros to avoid code duplication that would happen otherwise from the multiple
1966 	 * entrypoints, multiple params under test, and user/non user wired paths
1967 	 */
1968 
1969 #define RUN(fn, name) dealloc_results(process_results(test_kext_unix_with_allocated_start_size(fn, name " (start/size)")))
1970 	RUN(call_vslock, "vslock");
1971 	RUN(call_vsunlock_undirtied, "vsunlock (undirtied)");
1972 	RUN(call_vsunlock_dirtied, "vsunlock (dirtied)");
1973 #undef RUN
1974 
1975 #define RUN_PROT(fn, wired, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(call_ ## fn ## __prot__user_wired_ ## wired ## _, name " (vm_prot_t)")))
1976 #define RUN_START(fn, wired, name) dealloc_results(process_results(test_kext_tagged_with_allocated_addr(call_ ## fn ## __start__user_wired_ ## wired ## _, name " (addr)")))
1977 #define RUN_START_END(fn, wired, name) dealloc_results(process_results(test_mach_with_allocated_start_end(call_ ## fn ## __start_end__user_wired_ ## wired ## _, name " (start/end)")))
1978 #define RUN_TAG(fn, wired, name) dealloc_results(process_results(test_mach_with_allocated_tag(call_ ## fn ## __tag__user_wired_ ## wired ## _, name " (tag)")))
1979 
1980 #if XNU_PLATFORM_MacOSX
1981 // vm_map_wire_and_extract is implemented on macOS only
1982 
1983 #define RUN_ALL_WIRE_AND_EXTRACT(fn, name) \
1984 	RUN_PROT(fn, true, #name " (user wired)"); \
1985 	RUN_PROT(fn, false, #name " (non user wired)"); \
1986 	RUN_START(fn, true, #name " (user wired)"); \
1987 	RUN_START(fn, false, #name " (non user wired)");
1988 
1989 	RUN_ALL_WIRE_AND_EXTRACT(vm_map_wire_and_extract_retyped, vm_map_wire_and_extract);
1990 #undef RUN_ALL_WIRE_AND_EXTRACT
1991 #endif // XNU_PLATFORM_MacOSX
1992 
1993 #define RUN_ALL_WIRE_EXTERNAL(fn, name) \
1994 	RUN_PROT(fn, true, #name " (user wired)"); \
1995 	RUN_PROT(fn, false, #name " (non user wired))"); \
1996 	RUN_START_END(fn, true, #name " (user wired)"); \
1997 	RUN_START_END(fn, false, #name " (non user wired)");
1998 
1999 	RUN_ALL_WIRE_EXTERNAL(vm_map_wire_external_retyped, vm_map_wire_external);
2000 #undef RUN_ALL_WIRE_EXTERNAL
2001 
2002 #define RUN_ALL_WIRE_KERNEL(fn, name) \
2003 	RUN_PROT(fn, false, #name " (non user wired))"); \
2004 	RUN_PROT(fn, true, #name " (user wired)"); \
2005 	RUN_START_END(fn, true, #name " (user wired)"); \
2006 	RUN_START_END(fn, false, #name " (non user wired)"); \
2007 	RUN_TAG(fn, true, #name " (user wired)"); \
2008 	RUN_TAG(fn, false, #name " (non user wired)");
2009 
2010 	RUN_ALL_WIRE_KERNEL(vm_map_wire_kernel, vm_map_wire_kernel);
2011 #undef RUN_ALL_WIRE_KERNEL
2012 
2013 #undef RUN_PROT
2014 #undef RUN_START
2015 #undef RUN_START_END
2016 #undef RUN_TAG
2017 
2018 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_end(fn, name " (start/end)")))
2019 	RUN(call_vm_map_unwire_user_wired, "vm_map_unwire (user_wired)");
2020 	RUN(call_vm_map_unwire_non_user_wired, "vm_map_unwire (non user_wired)");
2021 #undef RUN
2022 
2023 #define RUN(fn, name) dealloc_results(process_results(test_with_int64(fn, name " (int64)")))
2024 	RUN(call_mach_vm_wire_level_monitor, "mach_vm_wire_level_monitor");
2025 #undef RUN
2026 
2027 	/*
2028 	 * -- copyin/copyout functions --
2029 	 */
2030 
2031 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2032 	RUN(call_vm_map_copyin, "vm_map_copyin");
2033 	RUN(call_mach_vm_read, "mach_vm_read");
2034 	// vm_map_copyin_common is covered well by the vm_map_copyin test
2035 	// RUN(call_vm_map_copyin_common, "vm_map_copyin_common");
2036 #undef RUN
2037 
2038 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr_of_size_n(fn, sizeof(uint32_t), name " (start)")))
2039 	RUN(call_copyoutmap_atomic32, "copyoutmap_atomic32");
2040 #undef RUN
2041 
2042 #define RUN(fn, name) dealloc_results(process_results(test_src_kerneldst_size(fn, name " (src/dst/size)")))
2043 	RUN(call_copyinmap, "copyinmap");
2044 	RUN(call_vm_map_read_user, "vm_map_read_user");
2045 #undef RUN
2046 
2047 #define RUN(fn, name) dealloc_results(process_results(test_kernelsrc_dst_size(fn, name " (src/dst/size)")))
2048 	RUN(call_vm_map_write_user, "vm_map_write_user");
2049 	RUN(call_copyoutmap, "copyoutmap");
2050 #undef RUN
2051 
2052 	dealloc_results(process_results(test_vm_map_copy_overwrite(call_vm_map_copy_overwrite_interruptible, "vm_map_copy_overwrite (start/size)")));
2053 
2054 	/*
2055 	 * -- protection functions --
2056 	 */
2057 
2058 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2059 	RUN(call_mach_vm_protect__start_size, "mach_vm_protect");
2060 	RUN(call_vm_protect__start_size, "vm_protect");
2061 	RUN(call_vm_map_protect__start_size__no_max, "vm_map_protect (no max)");
2062 	RUN(call_vm_map_protect__start_size__set_max, "vm_map_protect (set max)");
2063 #undef RUN
2064 
2065 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
2066 	RUN(call_mach_vm_protect__vm_prot, "mach_vm_protect");
2067 	RUN(call_vm_protect__vm_prot, "vm_protect");
2068 	RUN(call_vm_map_protect__vm_prot__no_max, "vm_map_protect (no max)");
2069 	RUN(call_vm_map_protect__vm_prot__set_max, "vm_map_protect (set max)");
2070 #undef RUN
2071 
2072 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
2073 	RUN(call_useracc__start_size, "useracc");
2074 #undef RUN
2075 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
2076 	RUN(call_useracc__vm_prot, "useracc");
2077 #undef RUN
2078 
2079 	/*
2080 	 * -- madvise/behavior functions --
2081 	 */
2082 
2083 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2084 	RUN(call_mach_vm_behavior_set__start_size__default, "mach_vm_behavior_set (VM_BEHAVIOR_DEFAULT)");
2085 	RUN(call_mach_vm_behavior_set__start_size__can_reuse, "mach_vm_behavior_set (VM_BEHAVIOR_CAN_REUSE)");
2086 #undef RUN
2087 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_behavior_t(fn, name " (vm_behavior_t)")))
2088 	RUN(call_mach_vm_behavior_set__vm_behavior, "mach_vm_behavior_set");
2089 #undef RUN
2090 
2091 	/*
2092 	 * -- purgability/purgeability functions --
2093 	 */
2094 
2095 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_addr(fn, name " (addr)")))
2096 	RUN(call_vm_map_purgable_control__address__get, "vm_map_purgable_control (get)");
2097 	RUN(call_vm_map_purgable_control__address__purge_all, "vm_map_purgable_control (purge all)");
2098 #undef RUN
2099 
2100 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_and_state(fn, name " (purgeable and state)")))
2101 	RUN(call_vm_map_purgable_control__purgeable_state, "vm_map_purgable_control");
2102 #undef RUN
2103 
2104 	/*
2105 	 * -- region info functions --
2106 	 */
2107 
2108 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
2109 	RUN(call_mach_vm_region, "mach_vm_region");
2110 	RUN(call_vm_region, "vm_region");
2111 #undef RUN
2112 #if XNU_PLATFORM_MacOSX
2113 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_size(fn, name " (size)")))
2114 	RUN(call_vm_region_object_create, "vm_region_object_create");
2115 #undef RUN
2116 #endif
2117 
2118 	/*
2119 	 * -- page info functions --
2120 	 */
2121 
2122 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
2123 	RUN(call_vm_map_page_info, "vm_map_page_info");
2124 #undef RUN
2125 
2126 	/*
2127 	 * -- miscellaneous functions --
2128 	 */
2129 
2130 #if CONFIG_MAP_RANGES
2131 	dealloc_results(process_results(test_mach_vm_range_create(call_mach_vm_range_create, "mach_vm_range_create (start/size/start2/size2)")));
2132 #endif
2133 
2134 	dealloc_results(process_results(test_kext_unix_with_allocated_vnode_addr(call_task_find_region_details, "task_find_region_details (addr)")));
2135 
2136 	*out_value = 1;  // success
2137 done:
2138 	SYSCTL_OUTPUT_BUF = 0;
2139 	SYSCTL_OUTPUT_END = 0;
2140 	return 0;
2141 }
2142 
2143 SYSCTL_TEST_REGISTER(vm_parameter_validation_kern, vm_parameter_validation_kern_test);
2144