xref: /xnu-12377.41.6/osfmk/tests/vm_parameter_validation_kern.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 #include <kern/zalloc.h>
2 #include <kern/thread_test_context.h>
3 
4 #include "vm_parameter_validation.h"
5 
6 #pragma clang diagnostic ignored "-Wdeclaration-after-statement"
7 #pragma clang diagnostic ignored "-Wincompatible-function-pointer-types"
8 #pragma clang diagnostic ignored "-Wmissing-prototypes"
9 #pragma clang diagnostic ignored "-Wpedantic"
10 #pragma clang diagnostic ignored "-Wgcc-compat"
11 
12 
13 DEFINE_TEST_IDENTITY(test_identity_vm_parameter_validation_kern);
14 
15 // vprintf() to a userspace buffer
16 // output is incremented to point at the new nul terminator
17 static void
user_vprintf(user_addr_t * output,user_addr_t output_end,const char * format,va_list args)18 user_vprintf(user_addr_t *output, user_addr_t output_end, const char *format, va_list args) __printflike(3, 0)
19 {
20 	extern int vsnprintf(char *, size_t, const char *, va_list) __printflike(3, 0);
21 	char linebuf[1024];
22 	size_t printed;
23 
24 	printed = vsnprintf(linebuf, sizeof(linebuf), format, args);
25 	assert(printed < sizeof(linebuf) - 1);
26 	if (*output + printed + 1 < output_end) {
27 		copyout(linebuf, *output, printed + 1);
28 		*output += printed;
29 
30 		/* *output + 1 == output_end occurs only after the error case below */
31 		assert(*output + 1 < output_end);
32 	} else if (*output + 1 < output_end) {
33 		/*
34 		 * Not enough space in the output buffer for this text.
35 		 * Print as much as we can, then rewind and terminate
36 		 * the buffer with an error message.
37 		 * The tests will continue to run after this, but they
38 		 * won't be able to output anything more.
39 		 */
40 		static const char err_msg[] =
41 		    KERN_RESULT_DELIMITER KERN_FAILURE_DELIMITER
42 		    "kernel output buffer full, output truncated\n";
43 		size_t err_len = strlen(err_msg);
44 		size_t printable = output_end - *output - 1;
45 		assert(printable <= printed);
46 		copyout(linebuf, *output, printable + 1);
47 		copyout(err_msg, output_end - err_len - 1, err_len + 1);
48 		*output = output_end - 1;
49 	} else {
50 		/*
51 		 * Not enough space in the output buffer,
52 		 * and we already inserted the error message.
53 		 * Do nothing.
54 		 */
55 		assert(*output + 1 == output_end);
56 	}
57 }
58 
59 void
testprintf(const char * format,...)60 testprintf(const char *format, ...)
61 {
62 	vm_parameter_validation_kern_thread_context_t *globals = get_globals();
63 
64 	va_list args;
65 	va_start(args, format);
66 	user_vprintf(&globals->output_buffer_cur, globals->output_buffer_end, format, args);
67 	va_end(args);
68 }
69 
70 // Utils
71 
72 static mach_port_t
make_a_mem_object(vm_size_t size)73 make_a_mem_object(vm_size_t size)
74 {
75 	ipc_port_t out_handle;
76 	kern_return_t kr = mach_memory_object_memory_entry_64((host_t)1, /*internal=*/ true, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_handle);
77 	assert(kr == 0);
78 	return out_handle;
79 }
80 
81 static mach_port_t
make_a_mem_entry(MAP_T map,vm_size_t size)82 make_a_mem_entry(MAP_T map, vm_size_t size)
83 {
84 	mach_port_t port;
85 	memory_object_size_t s = (memory_object_size_t)size;
86 	kern_return_t kr = mach_make_memory_entry_64(map, &s, (memory_object_offset_t)0, MAP_MEM_NAMED_CREATE | MAP_MEM_LEDGER_TAGGED, &port, MACH_PORT_NULL);
87 	assert(kr == 0);
88 	return port;
89 }
90 
91 // Test functions
92 
93 static results_t *
test_vm_map_copy_overwrite(kern_return_t (* func)(MAP_T dst_map,vm_map_copy_t copy,mach_vm_address_t start,mach_vm_size_t size),const char * testname)94 test_vm_map_copy_overwrite(kern_return_t (*func)(MAP_T dst_map, vm_map_copy_t copy, mach_vm_address_t start, mach_vm_size_t size), const char * testname)
95 {
96 	// source map: has an allocation bigger than our
97 	// "reasonable" trial sizes, to copy from
98 	MAP_T src_map SMART_MAP;
99 	allocation_t src_alloc SMART_ALLOCATE_VM(src_map, TEST_ALLOC_SIZE, VM_PROT_READ);
100 
101 	// dest map: has an allocation bigger than our
102 	// "reasonable" trial sizes, to copy-overwrite on
103 	MAP_T dst_map SMART_MAP;
104 	allocation_t dst_alloc SMART_ALLOCATE_VM(dst_map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
105 
106 	// We test dst/size parameters.
107 	// We don't test the contents of the vm_map_copy_t.
108 	start_size_trials_t *trials SMART_START_SIZE_TRIALS(dst_alloc.addr);
109 	results_t *results = alloc_results(testname, eSMART_START_SIZE_TRIALS, dst_alloc.addr, trials->count);
110 
111 	for (unsigned i = 0; i < trials->count; i++) {
112 		start_size_trial_t trial = trials->list[i];
113 
114 		// Copy from the source.
115 		vm_map_copy_t copy;
116 		kern_return_t kr = vm_map_copyin(src_map, src_alloc.addr, src_alloc.size, false, &copy);
117 		assert(kr == 0);
118 		assert(copy);  // null copy won't exercise the sanitization path
119 
120 		// Copy-overwrite to the destination.
121 		kern_return_t ret = func(dst_map, copy, trial.start, trial.size);
122 
123 		if (ret != KERN_SUCCESS) {
124 			vm_map_copy_discard(copy);
125 		}
126 		append_result(results, ret, trial.name);
127 	}
128 	return results;
129 }
130 
131 /*
132  * This function temporarily allocates a writeable allocation in kernel_map, and a read only allocation in a temporary map.
133  * It's used to test a function such as vm_map_read_user which copies in data to a kernel pointer that must be writeable.
134  */
135 static results_t *
test_src_kerneldst_size(kern_return_t (* func)(MAP_T map,vm_map_offset_t src,void * dst,vm_size_t length),const char * testname)136 test_src_kerneldst_size(kern_return_t (*func)(MAP_T map, vm_map_offset_t src, void * dst, vm_size_t length), const char * testname)
137 {
138 	MAP_T map SMART_MAP;
139 	allocation_t src_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_READ);
140 	allocation_t dst_base SMART_ALLOCATE_VM(kernel_map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
141 	src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
142 	results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
143 
144 	for (unsigned i = 0; i < trials->count; i++) {
145 		src_dst_size_trial_t trial = trials->list[i];
146 		trial = slide_trial_src(trial, src_base.addr);
147 		trial = slide_trial_dst(trial, dst_base.addr);
148 		int ret = func(map, trial.src, (void *)trial.dst, trial.size);
149 		append_result(results, ret, trial.name);
150 	}
151 	return results;
152 }
153 
154 /*
155  * This function temporarily allocates a read only allocation in kernel_map, and a writeable allocation in a temporary map.
156  * It's used to test a function such as vm_map_write_user which copies data from a kernel pointer to a writeable userspace address.
157  */
158 static results_t *
test_kernelsrc_dst_size(kern_return_t (* func)(MAP_T map,void * src,vm_map_offset_t dst,vm_size_t length),const char * testname)159 test_kernelsrc_dst_size(kern_return_t (*func)(MAP_T map, void *src, vm_map_offset_t dst, vm_size_t length), const char * testname)
160 {
161 	MAP_T map SMART_MAP;
162 	allocation_t src_base SMART_ALLOCATE_VM(kernel_map, TEST_ALLOC_SIZE, VM_PROT_READ);
163 	allocation_t dst_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
164 	src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
165 	results_t *results = alloc_results(testname, eSMART_SRC_DST_SIZE_TRIALS, trials->count);
166 
167 	for (unsigned i = 0; i < trials->count; i++) {
168 		src_dst_size_trial_t trial = trials->list[i];
169 		trial = slide_trial_src(trial, src_base.addr);
170 		trial = slide_trial_dst(trial, dst_base.addr);
171 		int ret = func(map, (void *)trial.src, trial.dst, trial.size);
172 		append_result(results, ret, trial.name);
173 	}
174 	return results;
175 }
176 
177 
178 /////////////////////////////////////////////////////
179 // Mach tests
180 
181 
182 static kern_return_t
call_mach_vm_read(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)183 call_mach_vm_read(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
184 {
185 	vm_offset_t out_addr;
186 	mach_msg_type_number_t out_size;
187 	kern_return_t kr = mach_vm_read(map, start, size, &out_addr, &out_size);
188 	if (kr == 0) {
189 		// we didn't call through MIG so out_addr is really a vm_map_copy_t
190 		vm_map_copy_discard((vm_map_copy_t)out_addr);
191 	}
192 	return kr;
193 }
194 
195 static inline void
check_vm_map_copyin_outparam_changes(kern_return_t * kr,vm_map_copy_t copy,vm_map_copy_t saved_copy)196 check_vm_map_copyin_outparam_changes(kern_return_t * kr, vm_map_copy_t copy, vm_map_copy_t saved_copy)
197 {
198 	if (*kr == KERN_SUCCESS) {
199 		if (copy == saved_copy) {
200 			*kr = OUT_PARAM_BAD;
201 		}
202 	} else {
203 		if (copy != saved_copy) {
204 			*kr = OUT_PARAM_BAD;
205 		}
206 	}
207 }
208 
209 static kern_return_t
call_vm_map_copyin(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)210 call_vm_map_copyin(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
211 {
212 	vm_map_copy_t invalid_initial_value = INVALID_VM_MAP_COPY;
213 	vm_map_copy_t copy = invalid_initial_value;
214 	kern_return_t kr = vm_map_copyin(map, start, size, false, &copy);
215 	if (kr == 0) {
216 		vm_map_copy_discard(copy);
217 	}
218 	check_vm_map_copyin_outparam_changes(&kr, copy, invalid_initial_value);
219 	return kr;
220 }
221 
222 static kern_return_t
call_copyoutmap_atomic32(MAP_T map,vm_map_offset_t addr)223 call_copyoutmap_atomic32(MAP_T map, vm_map_offset_t addr)
224 {
225 	uint32_t data = 0;
226 	kern_return_t kr = copyoutmap_atomic32(map, data, addr);
227 	return kr;
228 }
229 
230 
231 static kern_return_t
call_mach_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)232 call_mach_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
233 {
234 	mach_vm_address_t saved_start = *start;
235 	kern_return_t kr = mach_vm_allocate_external(map, start, size, flags);
236 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
237 	return kr;
238 }
239 
240 static kern_return_t
call_mach_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)241 call_mach_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
242 {
243 	mach_vm_address_t saved_start = *start;
244 	kern_return_t kr = mach_vm_allocate_external(map, start, size, VM_FLAGS_FIXED);
245 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
246 	return kr;
247 }
248 
249 static kern_return_t
call_mach_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)250 call_mach_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
251 {
252 	mach_vm_address_t saved_start = *start;
253 	kern_return_t kr = mach_vm_allocate_external(map, start, size, VM_FLAGS_ANYWHERE);
254 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
255 	return kr;
256 }
257 
258 static kern_return_t
call_mach_vm_allocate_kernel__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)259 call_mach_vm_allocate_kernel__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
260 {
261 	mach_vm_address_t saved_start = *start;
262 	kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
263 	    FLAGS_AND_TAG(flags, VM_KERN_MEMORY_OSFMK));
264 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
265 	return kr;
266 }
267 
268 static kern_return_t
call_mach_vm_allocate_kernel__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)269 call_mach_vm_allocate_kernel__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
270 {
271 	if (dealloc_would_time_out(*start, size, map)) {
272 		return ACCEPTABLE;
273 	}
274 
275 	mach_vm_address_t saved_start = *start;
276 	kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
277 	    FLAGS_AND_TAG(VM_FLAGS_FIXED, VM_KERN_MEMORY_OSFMK));
278 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
279 	return kr;
280 }
281 
282 static kern_return_t
call_mach_vm_allocate_kernel__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)283 call_mach_vm_allocate_kernel__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
284 {
285 	if (dealloc_would_time_out(*start, size, map)) {
286 		return ACCEPTABLE;
287 	}
288 
289 	mach_vm_address_t saved_start = *start;
290 	kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
291 	    FLAGS_AND_TAG(VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK));
292 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
293 	return kr;
294 }
295 
296 
297 
298 static kern_return_t
call_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)299 call_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
300 {
301 	mach_vm_address_t saved_start = *start;
302 	kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, flags);
303 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
304 	return kr;
305 }
306 
307 static kern_return_t
call_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)308 call_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
309 {
310 	mach_vm_address_t saved_start = *start;
311 	kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, VM_FLAGS_FIXED);
312 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
313 	return kr;
314 }
315 
316 static kern_return_t
call_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)317 call_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
318 {
319 	mach_vm_address_t saved_start = *start;
320 	kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, VM_FLAGS_ANYWHERE);
321 	check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
322 	return kr;
323 }
324 
325 static kern_return_t
call_mach_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)326 call_mach_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
327 {
328 	kern_return_t kr = mach_vm_deallocate(map, start, size);
329 	return kr;
330 }
331 
332 static kern_return_t
call_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)333 call_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
334 {
335 	kern_return_t kr = vm_deallocate(map, (vm_address_t) start, (vm_size_t) size);
336 	return kr;
337 }
338 
339 // Including sys/systm.h caused things to blow up
340 int     vslock(user_addr_t addr, user_size_t len);
341 int     vsunlock(user_addr_t addr, user_size_t len, int dirtied);
342 static int
call_vslock(void * start,size_t size)343 call_vslock(void * start, size_t size)
344 {
345 	int kr = vslock((user_addr_t) start, (user_size_t) size);
346 	if (kr == KERN_SUCCESS) {
347 		(void) vsunlock((user_addr_t) start, (user_size_t) size, 0);
348 	}
349 
350 	return kr;
351 }
352 
353 static int
call_vsunlock_undirtied(void * start,size_t size)354 call_vsunlock_undirtied(void * start, size_t size)
355 {
356 	int kr = vslock((user_addr_t) start, (user_size_t) size);
357 	if (kr == EINVAL) {
358 		// Invalid vslock arguments should also be
359 		// invalid vsunlock arguments. Test it.
360 	} else if (kr != KERN_SUCCESS) {
361 		// vslock failed, and vsunlock of non-locked memory panics
362 		return PANIC;
363 	}
364 	kr = vsunlock((user_addr_t) start, (user_size_t) size, 0);
365 	return kr;
366 }
367 
368 static int
call_vsunlock_dirtied(void * start,size_t size)369 call_vsunlock_dirtied(void * start, size_t size)
370 {
371 	int kr = vslock((user_addr_t) start, (user_size_t) size);
372 	if (kr == EINVAL) {
373 		// Invalid vslock arguments should also be
374 		// invalid vsunlock arguments. Test it.
375 	} else if (kr != KERN_SUCCESS) {
376 		// vslock failed, and vsunlock of non-locked memory panics
377 		return PANIC;
378 	}
379 	kr = vsunlock((user_addr_t) start, (user_size_t) size, 1);
380 	return kr;
381 }
382 
383 extern kern_return_t    vm_map_wire_external(
384 	vm_map_t                map,
385 	vm_map_offset_t         start,
386 	vm_map_offset_t         end,
387 	vm_prot_t               access_type,
388 	boolean_t               user_wire);
389 
390 
391 typedef kern_return_t (*wire_fn_t)(
392 	vm_map_t task,
393 	mach_vm_address_t start,
394 	mach_vm_address_t end,
395 	vm_prot_t prot,
396 	vm_tag_t tag,
397 	boolean_t user_wire);
398 
399 
400 /*
401  * Tell vm_tag_bt() to change its behavior so our calls to
402  * vm_map_wire_external and vm_map_wire_and_extract do not panic.
403  */
404 static void
prevent_wire_tag_panic(bool prevent)405 prevent_wire_tag_panic(bool prevent)
406 {
407 	thread_set_test_option(test_option_vm_prevent_wire_tag_panic, prevent);
408 }
409 
410 #if XNU_PLATFORM_MacOSX
411 // vm_map_wire_and_extract() implemented on macOS only
412 
413 
414 /*
415  * wire_nested requires a range of exactly one page when passed a physpage pointer.
416  * wire_and_extract is meant to provide that, but as a result of round introduced, unaligned values don't follow that.
417  */
418 static bool
will_vm_map_wire_nested_panic_due_to_invalid_range_size(MAP_T map,mach_vm_address_t start)419 will_vm_map_wire_nested_panic_due_to_invalid_range_size(MAP_T map, mach_vm_address_t start)
420 {
421 	mach_vm_address_t end = start + VM_MAP_PAGE_SIZE(map);
422 	if (round_up_map(map, end) - trunc_down_map(map, start) != VM_MAP_PAGE_SIZE(map)) {
423 		return true;
424 	}
425 	return false;
426 }
427 
428 static inline void
check_vm_map_wire_and_extract_outparam_changes(kern_return_t * kr,ppnum_t physpage)429 check_vm_map_wire_and_extract_outparam_changes(kern_return_t * kr, ppnum_t physpage)
430 {
431 	if (*kr != KERN_SUCCESS) {
432 		if (physpage != 0) {
433 			*kr = OUT_PARAM_BAD;
434 		}
435 	}
436 }
437 
438 static kern_return_t
vm_map_wire_and_extract_retyped(vm_map_t map,mach_vm_address_t start,mach_vm_address_t end __unused,vm_prot_t prot,vm_tag_t tag __unused,boolean_t user_wire)439 vm_map_wire_and_extract_retyped(
440 	vm_map_t                map,
441 	mach_vm_address_t       start,
442 	mach_vm_address_t       end __unused,
443 	vm_prot_t               prot,
444 	vm_tag_t                tag __unused,
445 	boolean_t               user_wire)
446 {
447 	if (will_vm_map_wire_nested_panic_due_to_invalid_range_size(map, start)) {
448 		return PANIC;
449 	}
450 
451 	ppnum_t physpage = UNLIKELY_INITIAL_PPNUM;
452 	kern_return_t kr = vm_map_wire_and_extract(map, start, prot, user_wire, &physpage);
453 	check_vm_map_wire_and_extract_outparam_changes(&kr, physpage);
454 	return kr;
455 }
456 #endif // XNU_PLATFORM_MacOSX
457 
458 
459 static kern_return_t
vm_map_wire_external_retyped(vm_map_t map,mach_vm_address_t start,mach_vm_address_t end,vm_prot_t prot,vm_tag_t tag __unused,boolean_t user_wire)460 vm_map_wire_external_retyped(
461 	vm_map_t                map,
462 	mach_vm_address_t       start,
463 	mach_vm_address_t       end,
464 	vm_prot_t               prot,
465 	vm_tag_t                tag __unused,
466 	boolean_t               user_wire)
467 {
468 	return vm_map_wire_external(map, start, end, prot, user_wire);
469 }
470 
471 static kern_return_t
wire_call_impl(wire_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t end,vm_prot_t prot,vm_tag_t tag,bool user_wire)472 wire_call_impl(wire_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t end, vm_prot_t prot, vm_tag_t tag, bool user_wire)
473 {
474 	if (tag == VM_KERN_MEMORY_NONE) {
475 		return PANIC;
476 	}
477 	prevent_wire_tag_panic(true);
478 	kern_return_t kr = fn(map, start, end, prot, tag, user_wire);
479 	prevent_wire_tag_panic(false);
480 	if (kr == KERN_SUCCESS) {
481 		(void) vm_map_unwire(map, start, end, user_wire);
482 	}
483 	return kr;
484 }
485 
486 #define WIRE_IMPL(FN, user_wire)                                                  \
487 	static kern_return_t                                                      \
488 	__attribute__((used))                                                     \
489 	call_ ## FN ## __start_end__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start, mach_vm_address_t end) \
490 	{                                                                         \
491 	        return wire_call_impl(FN, map, start, end, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, user_wire); \
492 	}                                                                         \
493 	static kern_return_t                                                      \
494 	__attribute__((used))                                                     \
495 	call_ ## FN ## __prot__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot) \
496 	{                                                                         \
497 	        mach_vm_address_t end;                                            \
498 	        if (__builtin_add_overflow(start, size, &end)) {                  \
499 	                return BUSTED;                                            \
500 	        }                                                                 \
501 	        return wire_call_impl(FN, map, start, end, prot, VM_KERN_MEMORY_OSFMK, user_wire); \
502 	}                                                                         \
503 	static kern_return_t                                                      \
504 	__attribute__((used))                                                     \
505 	call_ ## FN ## __tag__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start, mach_vm_address_t end, vm_tag_t tag) \
506 	{                                                                         \
507 	        kern_return_t kr = wire_call_impl(FN, map, start, end, VM_PROT_DEFAULT, tag, user_wire); \
508 	        return kr;                                                        \
509 	}                                                                         \
510 	static kern_return_t                                                      \
511 	__attribute__((used))                                                     \
512 	call_ ## FN ## __start__user_wired_ ## user_wire ## _(MAP_T map, mach_vm_address_t start) \
513 	{                                                                         \
514 	        return wire_call_impl(FN, map, start, 0, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, user_wire); \
515 	}                                                                         \
516 
WIRE_IMPL(vm_map_wire_external_retyped,true)517 WIRE_IMPL(vm_map_wire_external_retyped, true)
518 WIRE_IMPL(vm_map_wire_external_retyped, false)
519 WIRE_IMPL(vm_map_wire_kernel, true)
520 WIRE_IMPL(vm_map_wire_kernel, false)
521 
522 #if XNU_PLATFORM_MacOSX
523 WIRE_IMPL(vm_map_wire_and_extract_retyped, true)
524 WIRE_IMPL(vm_map_wire_and_extract_retyped, false)
525 #endif
526 
527 static kern_return_t
528 call_mach_vm_wire_level_monitor(int64_t requested_pages)
529 {
530 	kern_return_t kr = mach_vm_wire_level_monitor(requested_pages);
531 	/*
532 	 * KERN_RESOURCE_SHORTAGE and KERN_SUCCESS are
533 	 * equivalent acceptable results for this test.
534 	 */
535 	if (kr == KERN_RESOURCE_SHORTAGE) {
536 #if !defined(XNU_TARGET_OS_BRIDGE)
537 		kr = KERN_SUCCESS;
538 #else  /* defined(XNU_TARGET_OS_BRIDGE) */
539 		/*
540 		 * ...but the bridgeOS golden file recorded
541 		 * KERN_RESOURCE_SHORTAGE for some values so
542 		 * match that to avoid a golden file update.
543 		 * This code can be removed during any golden file update.
544 		 */
545 		if (requested_pages == 1 || requested_pages == 2) {
546 			kr = KERN_SUCCESS;
547 		} else {
548 			kr = KERN_RESOURCE_SHORTAGE;
549 		}
550 #endif /* defined(XNU_TARGET_OS_BRIDGE) */
551 	}
552 	return kr;
553 }
554 
555 static kern_return_t
call_vm_map_unwire_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)556 call_vm_map_unwire_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
557 {
558 	kern_return_t kr = vm_map_unwire(map, start, end, TRUE);
559 	return kr;
560 }
561 
562 
563 static kern_return_t
call_vm_map_unwire_non_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)564 call_vm_map_unwire_non_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
565 {
566 	kern_return_t kr = vm_map_wire_kernel(map, start, end, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, FALSE);
567 	if (kr) {
568 		return PANIC;
569 	}
570 	kr = vm_map_unwire(map, start, end, FALSE);
571 	return kr;
572 }
573 
574 #ifndef __x86_64__
575 extern const vm_map_address_t physmap_base;
576 extern const vm_map_address_t physmap_end;
577 #endif
578 
579 /*
580  * This function duplicates the panicking checks done in copy_validate.
581  * size==0 is returned as success earlier in copyin/out than copy_validate is called, so we ignore that case.
582  */
583 static bool
will_copyio_panic_in_copy_validate(void * kernel_addr,vm_size_t size)584 will_copyio_panic_in_copy_validate(void *kernel_addr, vm_size_t size)
585 {
586 	if (size == 0) {
587 		return false;
588 	}
589 	extern const int copysize_limit_panic;
590 	if (size > copysize_limit_panic) {
591 		return true;
592 	}
593 
594 	/*
595 	 * copyio is architecture specific and has different checks per arch.
596 	 */
597 #ifdef __x86_64__
598 	if ((vm_offset_t) kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
599 		return true;
600 	}
601 #else /* not __x86_64__ */
602 	uintptr_t kernel_addr_last;
603 	if (os_add_overflow((uintptr_t) kernel_addr, size, &kernel_addr_last)) {
604 		return true;
605 	}
606 
607 	bool in_kva = (VM_KERNEL_STRIP_PTR(kernel_addr) >= VM_MIN_KERNEL_ADDRESS) &&
608 	    (VM_KERNEL_STRIP_PTR(kernel_addr_last) <= VM_MAX_KERNEL_ADDRESS);
609 	bool in_physmap = (VM_KERNEL_STRIP_PTR(kernel_addr) >= physmap_base) &&
610 	    (VM_KERNEL_STRIP_PTR(kernel_addr_last) <= physmap_end);
611 
612 	if (!(in_kva || in_physmap)) {
613 		return true;
614 	}
615 #endif /* not __x86_64__ */
616 
617 	return false;
618 }
619 
620 static kern_return_t
call_copyinmap(MAP_T map,vm_map_offset_t fromaddr,void * todata,vm_size_t length)621 call_copyinmap(MAP_T map, vm_map_offset_t fromaddr, void * todata, vm_size_t length)
622 {
623 	if (will_copyio_panic_in_copy_validate(todata, length)) {
624 		return PANIC;
625 	}
626 
627 	kern_return_t kr = copyinmap(map, fromaddr, todata, length);
628 	return kr;
629 }
630 
631 static kern_return_t
call_copyoutmap(MAP_T map,void * fromdata,vm_map_offset_t toaddr,vm_size_t length)632 call_copyoutmap(MAP_T map, void * fromdata, vm_map_offset_t toaddr, vm_size_t length)
633 {
634 	if (will_copyio_panic_in_copy_validate(fromdata, length)) {
635 		return PANIC;
636 	}
637 
638 	kern_return_t kr = copyoutmap(map, fromdata, toaddr, length);
639 	return kr;
640 }
641 
642 static kern_return_t
call_vm_map_read_user(MAP_T map,vm_map_address_t src_addr,void * ptr,vm_size_t size)643 call_vm_map_read_user(MAP_T map, vm_map_address_t src_addr, void * ptr, vm_size_t size)
644 {
645 	if (will_copyio_panic_in_copy_validate(ptr, size)) {
646 		return PANIC;
647 	}
648 
649 	kern_return_t kr = vm_map_read_user(map, src_addr, ptr, size);
650 	return kr;
651 }
652 
653 static kern_return_t
call_vm_map_write_user(MAP_T map,void * ptr,vm_map_address_t dst_addr,vm_size_t size)654 call_vm_map_write_user(MAP_T map, void * ptr, vm_map_address_t dst_addr, vm_size_t size)
655 {
656 	if (will_copyio_panic_in_copy_validate(ptr, size)) {
657 		return PANIC;
658 	}
659 
660 	kern_return_t kr = vm_map_write_user(map, ptr, dst_addr, size);
661 	return kr;
662 }
663 
664 static kern_return_t
call_vm_map_copy_overwrite_interruptible(MAP_T dst_map,vm_map_copy_t copy,mach_vm_address_t dst_addr,mach_vm_size_t copy_size)665 call_vm_map_copy_overwrite_interruptible(MAP_T dst_map, vm_map_copy_t copy, mach_vm_address_t dst_addr, mach_vm_size_t copy_size)
666 {
667 	kern_return_t kr = vm_map_copy_overwrite(dst_map, dst_addr, copy, copy_size,
668 #if HAS_MTE
669 	    FALSE,
670 #endif
671 	    TRUE);
672 
673 	return kr;
674 }
675 
676 static kern_return_t
call_mach_vm_protect__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)677 call_mach_vm_protect__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
678 {
679 	kern_return_t kr = mach_vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
680 	return kr;
681 }
682 static kern_return_t
call_mach_vm_protect__vm_prot(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)683 call_mach_vm_protect__vm_prot(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
684 {
685 	kern_return_t kr = mach_vm_protect(map, start, size, 0, prot);
686 	return kr;
687 }
688 
689 static kern_return_t
call_vm_protect__start_size(MAP_T map,vm_address_t start,vm_size_t size)690 call_vm_protect__start_size(MAP_T map, vm_address_t start, vm_size_t size)
691 {
692 	kern_return_t kr = vm_protect(map, start, size, 0, VM_PROT_READ | VM_PROT_WRITE);
693 	return kr;
694 }
695 
696 static kern_return_t
call_vm_protect__vm_prot(MAP_T map,vm_address_t start,vm_size_t size,vm_prot_t prot)697 call_vm_protect__vm_prot(MAP_T map, vm_address_t start, vm_size_t size, vm_prot_t prot)
698 {
699 	kern_return_t kr = vm_protect(map, start, size, 0, prot);
700 	return kr;
701 }
702 
703 /*
704  * VME_OFFSET_SET will panic due to an assertion if passed an address that is not aligned to VME_ALIAS_BITS
705  * VME_OFFSET_SET is called by _vm_map_clip_(start/end)
706  * vm_map_protect -> vm_map_clip_end -> _vm_map_clip_end -> VME_OFFSET_SET
707  */
708 static bool
will_vm_map_protect_panic(mach_vm_address_t start,mach_vm_address_t end)709 will_vm_map_protect_panic(mach_vm_address_t start, mach_vm_address_t end)
710 {
711 	bool start_aligned = start == ((start >> VME_ALIAS_BITS) << VME_ALIAS_BITS);
712 	bool end_aligned = end == ((end >> VME_ALIAS_BITS) << VME_ALIAS_BITS);
713 	return !(start_aligned && end_aligned);
714 }
715 
716 static kern_return_t
call_vm_map_protect__start_size__no_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)717 call_vm_map_protect__start_size__no_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
718 {
719 	mach_vm_address_t end = start + size;
720 	if (will_vm_map_protect_panic(start, end)) {
721 		return PANIC;
722 	}
723 
724 	kern_return_t kr = vm_map_protect(map, start, end, 0, VM_PROT_READ | VM_PROT_WRITE);
725 	return kr;
726 }
727 
728 static kern_return_t
call_vm_map_protect__start_size__set_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)729 call_vm_map_protect__start_size__set_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
730 {
731 	mach_vm_address_t end = start + size;
732 	if (will_vm_map_protect_panic(start, end)) {
733 		return PANIC;
734 	}
735 
736 	kern_return_t kr = vm_map_protect(map, start, end, 1, VM_PROT_READ | VM_PROT_WRITE);
737 	return kr;
738 }
739 
740 static kern_return_t
call_vm_map_protect__vm_prot__no_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)741 call_vm_map_protect__vm_prot__no_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
742 {
743 	mach_vm_address_t end = start + size;
744 	if (will_vm_map_protect_panic(start, end)) {
745 		return PANIC;
746 	}
747 
748 	kern_return_t kr = vm_map_protect(map, start, end, 0, prot);
749 	return kr;
750 }
751 
752 static kern_return_t
call_vm_map_protect__vm_prot__set_max(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)753 call_vm_map_protect__vm_prot__set_max(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
754 {
755 	mach_vm_address_t end = start + size;
756 	if (will_vm_map_protect_panic(start, end)) {
757 		return PANIC;
758 	}
759 
760 	kern_return_t kr = vm_map_protect(map, start, end, 0, prot);
761 	return kr;
762 }
763 
764 // Fwd decl to avoid including bsd headers
765 int     useracc(user_addr_t addr, user_size_t len, int prot);
766 
767 static int
call_useracc__start_size(void * start,size_t size)768 call_useracc__start_size(void * start, size_t size)
769 {
770 	int result = useracc((user_addr_t) start, (user_addr_t) size, VM_PROT_READ);
771 	return result;
772 }
773 
774 static int
call_useracc__vm_prot(void * start,size_t size,int prot)775 call_useracc__vm_prot(void * start, size_t size, int prot)
776 {
777 	return useracc((user_addr_t) start, (user_addr_t) size, prot);
778 }
779 
780 static int
call_vm_map_purgable_control__address__get(MAP_T map,mach_vm_address_t addr)781 call_vm_map_purgable_control__address__get(MAP_T map, mach_vm_address_t addr)
782 {
783 	int state = INVALID_PURGABLE_STATE;
784 	int initial_state = state;
785 	kern_return_t kr = vm_map_purgable_control(map, addr, VM_PURGABLE_GET_STATE, &state);
786 	check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_GET_STATE);
787 	return kr;
788 }
789 
790 static int
call_vm_map_purgable_control__address__purge_all(MAP_T map,mach_vm_address_t addr)791 call_vm_map_purgable_control__address__purge_all(MAP_T map, mach_vm_address_t addr)
792 {
793 	int state = INVALID_PURGABLE_STATE;
794 	int initial_state = state;
795 	kern_return_t kr = vm_map_purgable_control(map, addr, VM_PURGABLE_PURGE_ALL, &state);
796 	check_mach_vm_purgable_control_outparam_changes(&kr, state, initial_state, VM_PURGABLE_PURGE_ALL);
797 	return kr;
798 }
799 
800 static int
call_vm_map_purgable_control__purgeable_state(MAP_T map,vm_address_t addr,vm_purgable_t control,int state)801 call_vm_map_purgable_control__purgeable_state(MAP_T map, vm_address_t addr, vm_purgable_t control, int state)
802 {
803 	int state_copy = state;
804 	kern_return_t kr = vm_map_purgable_control(map, addr, control, &state_copy);
805 	check_mach_vm_purgable_control_outparam_changes(&kr, state_copy, state, control);
806 
807 	return kr;
808 }
809 
810 static kern_return_t
call_vm_map_page_info(MAP_T map,mach_vm_address_t addr)811 call_vm_map_page_info(MAP_T map, mach_vm_address_t addr)
812 {
813 	vm_page_info_flavor_t flavor = VM_PAGE_INFO_BASIC;
814 	mach_msg_type_number_t count = VM_PAGE_INFO_BASIC_COUNT;
815 	mach_msg_type_number_t saved_count = count;
816 	vm_page_info_basic_data_t info = {0};
817 	info.depth = -1;
818 	vm_page_info_basic_data_t saved_info = info;
819 
820 	/*
821 	 * If this test is invoked from a rosetta process,
822 	 * vm_map_page_range_info_internal doesn't know what
823 	 * effective_page_shift to use and returns KERN_INVALID_ARGUMENT.
824 	 * To fix this, we can set the region_page_shift to the page_shift
825 	 * used for map
826 	 */
827 	int saved_page_shift = thread_self_region_page_shift();
828 	if (PAGE_SIZE == KB16) {
829 		if (VM_MAP_PAGE_SHIFT(current_map()) != VM_MAP_PAGE_SHIFT(map)) {
830 			thread_self_region_page_shift_set(VM_MAP_PAGE_SHIFT(map));
831 		}
832 	}
833 
834 	kern_return_t kr = vm_map_page_info(map, addr, flavor, (vm_page_info_t)&info, &count);
835 
836 	thread_self_region_page_shift_set(saved_page_shift);
837 
838 	check_mach_vm_page_info_outparam_changes(&kr, info, saved_info, count, saved_count);
839 
840 	return kr;
841 }
842 
843 #if CONFIG_MAP_RANGES
844 static kern_return_t
call_mach_vm_range_create(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,mach_vm_address_t second_start,mach_vm_size_t second_size)845 call_mach_vm_range_create(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, mach_vm_address_t second_start, mach_vm_size_t second_size)
846 {
847 	mach_vm_range_recipe_v1_t array[2];
848 	array[0] = (mach_vm_range_recipe_v1_t){
849 		.range = { start, start + size }, .range_tag = MACH_VM_RANGE_FIXED,
850 	};
851 	array[1] = (mach_vm_range_recipe_v1_t){
852 		.range = { second_start, second_start + second_size }, .range_tag = MACH_VM_RANGE_FIXED,
853 	};
854 
855 	// mach_vm_range_create requires map == current_map(). Patch it up, do the call, and then restore it.
856 	vm_map_t saved_map = swap_task_map(current_task(), current_thread(), map);
857 
858 	kern_return_t kr = mach_vm_range_create(map, MACH_VM_RANGE_FLAVOR_V1, (mach_vm_range_recipes_raw_t)array, sizeof(array[0]) * 2);
859 
860 	swap_task_map(current_task(), current_thread(), saved_map);
861 
862 	return kr;
863 }
864 #endif /* CONFIG_MAP_RANGES */
865 
866 // Mach memory entry ownership
867 
868 extern kern_return_t
869 mach_memory_entry_ownership(
870 	ipc_port_t      entry_port,
871 	task_t          owner,
872 	int             ledger_tag,
873 	int             ledger_flags);
874 
875 static kern_return_t
call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused,int ledger_tag)876 call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused, int ledger_tag)
877 {
878 	mach_port_t mementry = make_a_mem_entry(map, TEST_ALLOC_SIZE + 1);
879 	kern_return_t kr = mach_memory_entry_ownership(mementry, TASK_NULL, ledger_tag, 0);
880 	mach_memory_entry_port_release(mementry);
881 	return kr;
882 }
883 
884 static kern_return_t
call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused,int ledger_flag)885 call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused, int ledger_flag)
886 {
887 	mach_port_t mementry = make_a_mem_entry(map, TEST_ALLOC_SIZE + 1);
888 	kern_return_t kr = mach_memory_entry_ownership(mementry, TASK_NULL, VM_LEDGER_TAG_DEFAULT, ledger_flag);
889 	mach_memory_entry_port_release(mementry);
890 	return kr;
891 }
892 
893 static inline void
check_mach_memory_entry_map_size_outparam_changes(kern_return_t * kr,mach_vm_size_t map_size,mach_vm_size_t invalid_initial_size)894 check_mach_memory_entry_map_size_outparam_changes(kern_return_t * kr, mach_vm_size_t map_size,
895     mach_vm_size_t invalid_initial_size)
896 {
897 	if (*kr == KERN_SUCCESS) {
898 		if (map_size == invalid_initial_size) {
899 			*kr = OUT_PARAM_BAD;
900 		}
901 	} else {
902 		if (map_size != 0) {
903 			*kr = OUT_PARAM_BAD;
904 		}
905 	}
906 }
907 
908 static kern_return_t
call_mach_memory_entry_map_size__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)909 call_mach_memory_entry_map_size__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
910 {
911 	mach_port_t mementry;
912 	mach_vm_address_t addr;
913 	memory_object_size_t s = (memory_object_size_t)TEST_ALLOC_SIZE + 1;
914 	/*
915 	 * UNLIKELY_INITIAL_SIZE is guaranteed to never be the correct map_size
916 	 * from the mach_memory_entry_map_size calls we make. map_size should represent the size of the
917 	 * copy that would result, and UNLIKELY_INITIAL_SIZE is completely unrelated to the sizes we pass
918 	 * and not page aligned.
919 	 */
920 	mach_vm_size_t invalid_initial_size = UNLIKELY_INITIAL_SIZE;
921 
922 	mach_vm_size_t map_size = invalid_initial_size;
923 
924 	kern_return_t kr = mach_vm_allocate_kernel(map, &addr, s, FLAGS_AND_TAG(VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK));
925 	assert(kr == 0);
926 	kr = mach_make_memory_entry_64(map, &s, (memory_object_offset_t)addr, MAP_MEM_VM_SHARE, &mementry, MACH_PORT_NULL);
927 	assert(kr == 0);
928 	kr = mach_memory_entry_map_size(mementry, map, start, size, &map_size);
929 	check_mach_memory_entry_map_size_outparam_changes(&kr, map_size, invalid_initial_size);
930 	mach_memory_entry_port_release(mementry);
931 	(void)mach_vm_deallocate(map, addr, s);
932 	return kr;
933 }
934 
935 struct file_control_return {
936 	void * control;
937 	void * fp;
938 	void * vp;
939 	int fd;
940 };
941 struct file_control_return get_control_from_fd(int fd);
942 void cleanup_control_related_data(struct file_control_return info);
943 uint32_t vnode_vid(void * vp);
944 
945 static void
check_task_find_region_details_outparam_changes(int * result,uintptr_t vp,uintptr_t saved_vp,uint32_t vid,bool is_map_shared,uint64_t start,uint64_t saved_start,uint64_t len,uint64_t saved_len)946 check_task_find_region_details_outparam_changes(int * result,
947     uintptr_t vp, uintptr_t saved_vp,
948     uint32_t vid,
949     bool is_map_shared,
950     uint64_t start, uint64_t saved_start,
951     uint64_t len, uint64_t saved_len)
952 {
953 	// task_find_region_details returns a bool. 0 means failure, 1 success
954 	if (*result == 0) {
955 		if (vp != 0 || vid != 0 || is_map_shared != 0 || start != 0 || len != 0) {
956 			*result = OUT_PARAM_BAD;
957 		}
958 	} else {
959 		if (vp == saved_vp || start == saved_start || len == saved_len) {
960 			*result = OUT_PARAM_BAD;
961 		}
962 		if (vid != (uint32_t)vnode_vid((void *)vp)) {
963 			*result = OUT_PARAM_BAD;
964 		}
965 		// is_map_shared seems to check if the relevant entry is shadowed by another
966 		// we don't set up any shadow entries for this test
967 		if (is_map_shared) {
968 			// *result = OUT_PARAM_BAD;
969 		}
970 	}
971 }
972 
973 
974 static int
call_task_find_region_details(MAP_T map,mach_vm_address_t addr)975 call_task_find_region_details(MAP_T map, mach_vm_address_t addr)
976 {
977 	(void) map;
978 	uint64_t len = UNLIKELY_INITIAL_SIZE, start = UNLIKELY_INITIAL_ADDRESS;
979 	uint64_t saved_len = len, saved_start = start;
980 	bool is_map_shared = true;
981 	uintptr_t vp = (uintptr_t) INVALID_VNODE_PTR;
982 	uintptr_t saved_vp = vp;
983 	uint32_t vid = UNLIKELY_INITIAL_VID;
984 
985 	/*
986 	 * task_find_region_details operates on task->map. Our setup code does allocations
987 	 * that otherwise could theoretically overwrite existing ones, so we don't want to
988 	 * operate on current_map
989 	 */
990 	vm_map_t saved_map = swap_task_map(current_task(), current_thread(), map);
991 
992 	int kr = task_find_region_details(current_task(), addr, FIND_REGION_DETAILS_AT_OFFSET, &vp, &vid, &is_map_shared, &start, &len);
993 
994 	swap_task_map(current_task(), current_thread(), saved_map);
995 
996 	check_task_find_region_details_outparam_changes(&kr, vp, saved_vp, vid, is_map_shared, start, saved_start, len, saved_len);
997 	return kr;
998 }
999 
1000 static results_t * __attribute__((used))
test_kext_unix_with_allocated_vnode_addr(kern_return_t (* func)(MAP_T dst_map,mach_vm_address_t start),const char * testname)1001 test_kext_unix_with_allocated_vnode_addr(kern_return_t (*func)(MAP_T dst_map, mach_vm_address_t start), const char *testname)
1002 {
1003 	MAP_T map SMART_MAP;
1004 	allocation_t base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
1005 	addr_trials_t *trials SMART_ADDR_TRIALS(base.addr);
1006 	results_t *results = alloc_results(testname, eSMART_ADDR_TRIALS, base.addr, trials->count);
1007 
1008 	for (unsigned i = 0; i < trials->count; i++) {
1009 		mach_vm_address_t addr = (mach_vm_address_t)trials->list[i].addr;
1010 
1011 		int file_descriptor = get_globals()->file_descriptor;
1012 		struct file_control_return control_info = get_control_from_fd(file_descriptor);
1013 		vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_FIXED(.vmf_overwrite = true);
1014 		kern_return_t kr = vm_map_enter_mem_object_control(map, &addr, TEST_ALLOC_SIZE, 0, vmk_flags, (memory_object_control_t) control_info.control, 0, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1015 		if (kr == KERN_INVALID_ARGUMENT) {
1016 			// can't map a file at that address, so we can't pass
1017 			// such a mapping to the function being tested
1018 			append_result(results, IGNORED, trials->list[i].name);
1019 			cleanup_control_related_data(control_info);
1020 			continue;
1021 		}
1022 		assert(kr == KERN_SUCCESS);
1023 
1024 		kern_return_t ret = func(map, addr);
1025 		append_result(results, ret, trials->list[i].name);
1026 		cleanup_control_related_data(control_info);
1027 	}
1028 	return results;
1029 }
1030 
1031 extern uint64_t vm_reclaim_max_threshold;
1032 
1033 #if 0
1034 static kern_return_t
1035 test_mach_vm_deferred_reclamation_buffer_init(MAP_T map __unused, mach_vm_address_t address, mach_vm_size_t size)
1036 {
1037 	uint64_t vm_reclaim_max_threshold_orig = vm_reclaim_max_threshold;
1038 	kern_return_t kr = 0;
1039 
1040 	vm_reclaim_max_threshold = KB16;
1041 	kr = call_mach_vm_deferred_reclamation_buffer_init(current_task(), address, size);
1042 	vm_reclaim_max_threshold = vm_reclaim_max_threshold_orig;
1043 
1044 	return kr;
1045 }
1046 #endif
1047 
1048 
1049 // mach_make_memory_entry and variants
1050 
1051 static inline void
check_mach_memory_entry_outparam_changes(kern_return_t * kr,mach_vm_size_t size,mach_port_t out_handle)1052 check_mach_memory_entry_outparam_changes(kern_return_t * kr, mach_vm_size_t size,
1053     mach_port_t out_handle)
1054 {
1055 	/*
1056 	 * mach_make_memory_entry overwrites *size to be 0 on failure.
1057 	 */
1058 	if (*kr != KERN_SUCCESS) {
1059 		if (size != 0) {
1060 			*kr = OUT_PARAM_BAD;
1061 		}
1062 		if (out_handle != 0) {
1063 			*kr = OUT_PARAM_BAD;
1064 		}
1065 	}
1066 }
1067 
1068 #define IMPL(FN, T)                                                               \
1069 	static kern_return_t                                                      \
1070 	call_ ## FN ## __start_size__memonly(MAP_T map, T start, T size)                      \
1071 	{                                                                         \
1072 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
1073 	        T io_size = size;                                                 \
1074 	        mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT;     \
1075 	        mach_port_t out_handle = invalid_handle_value;                    \
1076 	        kern_return_t kr = FN(map, &io_size, start,                       \
1077 	                              VM_PROT_READ | MAP_MEM_ONLY, &out_handle, memobject); \
1078 	        if (kr == 0) {                                                    \
1079 	                if (out_handle) mach_memory_entry_port_release(out_handle); \
1080 	        }                                                                 \
1081 	        mach_memory_entry_port_release(memobject);                        \
1082 	        check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1083 	        return kr;                                                        \
1084 	}                                                                         \
1085                                                                                   \
1086 	static kern_return_t                                                      \
1087 	call_ ## FN ## __start_size__namedcreate(MAP_T map, T start, T size)                  \
1088 	{                                                                         \
1089 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
1090 	        T io_size = size;                                                 \
1091 	        mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT;     \
1092 	        mach_port_t out_handle = invalid_handle_value;                    \
1093 	        kern_return_t kr = FN(map, &io_size, start,                       \
1094 	                              VM_PROT_READ | MAP_MEM_NAMED_CREATE, &out_handle, memobject); \
1095 	        if (kr == 0) {                                                    \
1096 	                if (out_handle) mach_memory_entry_port_release(out_handle); \
1097 	        }                                                                 \
1098 	        mach_memory_entry_port_release(memobject);                        \
1099 	        check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1100 	        return kr;                                                        \
1101 	}                                                                         \
1102                                                                                   \
1103 	static kern_return_t                                                      \
1104 	call_ ## FN ## __start_size__copy(MAP_T map, T start, T size)                         \
1105 	{                                                                         \
1106 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
1107 	        T io_size = size;                                                 \
1108 	        mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT;     \
1109 	        mach_port_t out_handle = invalid_handle_value;                    \
1110 	        kern_return_t kr = FN(map, &io_size, start,                       \
1111 	                              VM_PROT_READ | MAP_MEM_VM_COPY, &out_handle, memobject); \
1112 	        if (kr == 0) {                                                    \
1113 	                if (out_handle) mach_memory_entry_port_release(out_handle); \
1114 	        }                                                                 \
1115 	        mach_memory_entry_port_release(memobject);                        \
1116 	        check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1117 	        return kr;                                                        \
1118 	}                                                                         \
1119                                                                                   \
1120 	static kern_return_t                                                      \
1121 	call_ ## FN ## __start_size__share(MAP_T map, T start, T size)            \
1122 	{                                                                         \
1123 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
1124 	        T io_size = size;                                                 \
1125 	        mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT;     \
1126 	        mach_port_t out_handle = invalid_handle_value;                    \
1127 	        kern_return_t kr = FN(map, &io_size, start,                       \
1128 	                              VM_PROT_READ | MAP_MEM_VM_SHARE, &out_handle, memobject); \
1129 	        if (kr == 0) {                                                    \
1130 	                if (out_handle) mach_memory_entry_port_release(out_handle); \
1131 	        }                                                                 \
1132 	        mach_memory_entry_port_release(memobject);                        \
1133 	        check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1134 	        return kr;                                                        \
1135 	}                                                                         \
1136                                                                                   \
1137 	static kern_return_t                                                      \
1138 	call_ ## FN ## __start_size__namedreuse(MAP_T map, T start, T size)       \
1139 	{                                                                         \
1140 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
1141 	        T io_size = size;                                                 \
1142 	        mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT;     \
1143 	        mach_port_t out_handle = invalid_handle_value;                    \
1144 	        kern_return_t kr = FN(map, &io_size, start,                       \
1145 	                              VM_PROT_READ | MAP_MEM_NAMED_REUSE, &out_handle, memobject); \
1146 	        if (kr == 0) {                                                    \
1147 	                if (out_handle) mach_memory_entry_port_release(out_handle); \
1148 	        }                                                                 \
1149 	        mach_memory_entry_port_release(memobject);                        \
1150 	        check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1151 	        return kr;                                                        \
1152 	}                                                                         \
1153                                                                                   \
1154 	static kern_return_t                                                      \
1155 	call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot)      \
1156 	{                                                                         \
1157 	        mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);          \
1158 	        T io_size = size;                                                 \
1159 	        mach_port_t invalid_handle_value = UNLIKELY_INITIAL_MACH_PORT;     \
1160 	        mach_port_t out_handle = invalid_handle_value;                    \
1161 	        kern_return_t kr = FN(map, &io_size, start,                       \
1162 	                              prot, &out_handle, memobject); \
1163 	        if (kr == 0) {                                                    \
1164 	                if (out_handle) mach_memory_entry_port_release(out_handle); \
1165 	        }                                                                 \
1166 	        mach_memory_entry_port_release(memobject);                        \
1167 	        check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle); \
1168 	        return kr;                                                        \
1169 	}
1170 
IMPL(mach_make_memory_entry_64,mach_vm_address_t)1171 IMPL(mach_make_memory_entry_64, mach_vm_address_t)
1172 IMPL(mach_make_memory_entry, vm_size_t)
1173 static kern_return_t
1174 mach_make_memory_entry_internal_retyped(
1175 	vm_map_t                target_map,
1176 	memory_object_size_t    *size,
1177 	memory_object_offset_t  offset,
1178 	vm_prot_t               permission,
1179 	ipc_port_t              *object_handle,
1180 	ipc_port_t              parent_handle)
1181 {
1182 	vm_named_entry_kernel_flags_t   vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
1183 	if (permission & MAP_MEM_LEDGER_TAGGED) {
1184 		vmne_kflags.vmnekf_ledger_tag = VM_LEDGER_TAG_DEFAULT;
1185 	}
1186 	return mach_make_memory_entry_internal(target_map, size, offset, permission, vmne_kflags, object_handle, parent_handle);
1187 }
1188 IMPL(mach_make_memory_entry_internal_retyped, mach_vm_address_t)
1189 
1190 #undef IMPL
1191 
1192 // mach_vm_map/mach_vm_map_external/mach_vm_map_kernel/vm_map/vm_map_external infra
1193 
1194 typedef kern_return_t (*map_fn_t)(vm_map_t target_task,
1195     mach_vm_address_t *address,
1196     mach_vm_size_t size,
1197     mach_vm_offset_t mask,
1198     int flags,
1199     mem_entry_name_port_t object,
1200     memory_object_offset_t offset,
1201     boolean_t copy,
1202     vm_prot_t cur_protection,
1203     vm_prot_t max_protection,
1204     vm_inherit_t inheritance);
1205 
1206 static kern_return_t
call_map_fn__allocate_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1207 call_map_fn__allocate_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1208 {
1209 	mach_vm_address_t out_addr = start;
1210 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1211 	    0, 0, 0, 0, 0, VM_INHERIT_NONE);
1212 	// fixed-overwrite with pre-existing allocation, don't deallocate
1213 	return kr;
1214 }
1215 
1216 static kern_return_t
call_map_fn__allocate_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1217 call_map_fn__allocate_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1218 {
1219 	mach_vm_address_t out_addr = start;
1220 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1221 	    0, 0, true, 0, 0, VM_INHERIT_NONE);
1222 	// fixed-overwrite with pre-existing allocation, don't deallocate
1223 	return kr;
1224 }
1225 
1226 static kern_return_t
call_map_fn__allocate_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)1227 call_map_fn__allocate_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
1228 {
1229 	mach_vm_address_t out_addr = start_hint;
1230 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, 0, 0, 0, 0, 0, VM_INHERIT_NONE);
1231 	if (kr == 0) {
1232 		(void)mach_vm_deallocate(map, out_addr, size);
1233 	}
1234 	return kr;
1235 }
1236 
1237 static kern_return_t
call_map_fn__memobject_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1238 call_map_fn__memobject_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1239 {
1240 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1241 	mach_vm_address_t out_addr = start;
1242 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1243 	    memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1244 	// fixed-overwrite with pre-existing allocation, don't deallocate
1245 	mach_memory_entry_port_release(memobject);
1246 	return kr;
1247 }
1248 
1249 static kern_return_t
call_map_fn__memobject_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1250 call_map_fn__memobject_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1251 {
1252 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1253 	mach_vm_address_t out_addr = start;
1254 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1255 	    memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1256 	// fixed-overwrite with pre-existing allocation, don't deallocate
1257 	mach_memory_entry_port_release(memobject);
1258 	return kr;
1259 }
1260 
1261 static kern_return_t
call_map_fn__memobject_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)1262 call_map_fn__memobject_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
1263 {
1264 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1265 	mach_vm_address_t out_addr = start_hint;
1266 	kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, memobject,
1267 	    KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1268 	if (kr == 0) {
1269 		(void)mach_vm_deallocate(map, out_addr, size);
1270 	}
1271 	mach_memory_entry_port_release(memobject);
1272 	return kr;
1273 }
1274 
1275 static kern_return_t
helper_call_map_fn__memobject__ssoo(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1276 helper_call_map_fn__memobject__ssoo(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1277 {
1278 	mach_port_t memobject = make_a_mem_object(obj_size);
1279 	mach_vm_address_t out_addr = start;
1280 	kern_return_t kr = fn(map, &out_addr, size, 0, flags, memobject,
1281 	    offset, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1282 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1283 	mach_memory_entry_port_release(memobject);
1284 	return kr;
1285 }
1286 
1287 static kern_return_t
call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1288 call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1289 {
1290 	return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, offset, obj_size);
1291 }
1292 
1293 static kern_return_t
call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1294 call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1295 {
1296 	return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, offset, obj_size);
1297 }
1298 
1299 static kern_return_t
call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1300 call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1301 {
1302 	return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_ANYWHERE, false, start, size, offset, obj_size);
1303 }
1304 
1305 static kern_return_t
help_call_map_fn__allocate__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1306 help_call_map_fn__allocate__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1307 {
1308 	mach_vm_address_t out_addr = start;
1309 	kern_return_t kr = fn(map, &out_addr, size, 0, flags,
1310 	    0, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
1311 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1312 	return kr;
1313 }
1314 
1315 static kern_return_t
call_map_fn__allocate_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1316 call_map_fn__allocate_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1317 {
1318 	return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
1319 }
1320 
1321 static kern_return_t
call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1322 call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1323 {
1324 	return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
1325 }
1326 
1327 static kern_return_t
call_map_fn__allocate_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1328 call_map_fn__allocate_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1329 {
1330 	return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
1331 }
1332 
1333 static kern_return_t
help_call_map_fn__memobject__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1334 help_call_map_fn__memobject__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1335 {
1336 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1337 	mach_vm_address_t out_addr = start;
1338 	kern_return_t kr = fn(map, &out_addr, size, 0, flags,
1339 	    memobject, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
1340 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1341 	mach_memory_entry_port_release(memobject);
1342 	return kr;
1343 }
1344 
1345 static kern_return_t
call_map_fn__memobject_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1346 call_map_fn__memobject_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1347 {
1348 	return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
1349 }
1350 
1351 static kern_return_t
call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1352 call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1353 {
1354 	return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
1355 }
1356 
1357 static kern_return_t
call_map_fn__memobject_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1358 call_map_fn__memobject_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1359 {
1360 	return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
1361 }
1362 
1363 static kern_return_t
call_map_fn__allocate__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1364 call_map_fn__allocate__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1365 {
1366 	kern_return_t kr = fn(map, start, size, 0, flags,
1367 	    0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1368 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1369 	return kr;
1370 }
1371 
1372 static kern_return_t
call_map_fn__allocate_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1373 call_map_fn__allocate_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1374 {
1375 	kern_return_t kr = fn(map, start, size, 0, flags,
1376 	    0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1377 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1378 	return kr;
1379 }
1380 
1381 static kern_return_t
call_map_fn__memobject__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1382 call_map_fn__memobject__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1383 {
1384 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1385 	kern_return_t kr = fn(map, start, size, 0, flags,
1386 	    memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1387 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1388 	mach_memory_entry_port_release(memobject);
1389 	return kr;
1390 }
1391 
1392 static kern_return_t
call_map_fn__memobject_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1393 call_map_fn__memobject_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1394 {
1395 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1396 	kern_return_t kr = fn(map, start, size, 0, flags,
1397 	    memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1398 	deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1399 	mach_memory_entry_port_release(memobject);
1400 	return kr;
1401 }
1402 
1403 static kern_return_t
help_call_map_fn__allocate__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)1404 help_call_map_fn__allocate__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
1405 {
1406 	mach_vm_address_t out_addr = 0;
1407 	kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
1408 	    0, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
1409 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
1410 	return kr;
1411 }
1412 
1413 static kern_return_t
call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1414 call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1415 {
1416 	return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
1417 }
1418 
1419 static kern_return_t
call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1420 call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1421 {
1422 	return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
1423 }
1424 
1425 static kern_return_t
call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1426 call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1427 {
1428 	return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
1429 }
1430 
1431 static kern_return_t
help_call_map_fn__memobject__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)1432 help_call_map_fn__memobject__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
1433 {
1434 	mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1435 	mach_vm_address_t out_addr = 0;
1436 	kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
1437 	    memobject, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
1438 	deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
1439 	mach_memory_entry_port_release(memobject);
1440 	return kr;
1441 }
1442 
1443 static kern_return_t
call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1444 call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1445 {
1446 	return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
1447 }
1448 
1449 static kern_return_t
call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1450 call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1451 {
1452 	return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
1453 }
1454 
1455 static kern_return_t
call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1456 call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1457 {
1458 	return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
1459 }
1460 
1461 // wrappers
1462 
1463 kern_return_t
mach_vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1464 mach_vm_map_wrapped(vm_map_t target_task,
1465     mach_vm_address_t *address,
1466     mach_vm_size_t size,
1467     mach_vm_offset_t mask,
1468     int flags,
1469     mem_entry_name_port_t object,
1470     memory_object_offset_t offset,
1471     boolean_t copy,
1472     vm_prot_t cur_protection,
1473     vm_prot_t max_protection,
1474     vm_inherit_t inheritance)
1475 {
1476 	if (dealloc_would_time_out(*address, size, target_task)) {
1477 		return ACCEPTABLE;
1478 	}
1479 
1480 	mach_vm_address_t saved_addr = *address;
1481 	kern_return_t kr = mach_vm_map(target_task, address, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1482 	check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1483 	return kr;
1484 }
1485 
1486 // missing forward declaration
1487 kern_return_t
1488 mach_vm_map_external(
1489 	vm_map_t                target_map,
1490 	mach_vm_offset_t        *address,
1491 	mach_vm_size_t          initial_size,
1492 	mach_vm_offset_t        mask,
1493 	int                     flags,
1494 	ipc_port_t              port,
1495 	vm_object_offset_t      offset,
1496 	boolean_t               copy,
1497 	vm_prot_t               cur_protection,
1498 	vm_prot_t               max_protection,
1499 	vm_inherit_t            inheritance);
1500 kern_return_t
mach_vm_map_external_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1501 mach_vm_map_external_wrapped(vm_map_t target_task,
1502     mach_vm_address_t *address,
1503     mach_vm_size_t size,
1504     mach_vm_offset_t mask,
1505     int flags,
1506     mem_entry_name_port_t object,
1507     memory_object_offset_t offset,
1508     boolean_t copy,
1509     vm_prot_t cur_protection,
1510     vm_prot_t max_protection,
1511     vm_inherit_t inheritance)
1512 {
1513 	if (dealloc_would_time_out(*address, size, target_task)) {
1514 		return ACCEPTABLE;
1515 	}
1516 
1517 	mach_vm_address_t saved_addr = *address;
1518 	kern_return_t kr = mach_vm_map_external(target_task, address, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1519 	check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1520 	return kr;
1521 }
1522 
1523 kern_return_t
mach_vm_map_kernel_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1524 mach_vm_map_kernel_wrapped(vm_map_t target_task,
1525     mach_vm_address_t *address,
1526     mach_vm_size_t size,
1527     mach_vm_offset_t mask,
1528     int flags,
1529     mem_entry_name_port_t object,
1530     memory_object_offset_t offset,
1531     boolean_t copy,
1532     vm_prot_t cur_protection,
1533     vm_prot_t max_protection,
1534     vm_inherit_t inheritance)
1535 {
1536 	if (dealloc_would_time_out(*address, size, target_task)) {
1537 		return ACCEPTABLE;
1538 	}
1539 
1540 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1541 
1542 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
1543 	mach_vm_address_t saved_addr = *address;
1544 	kern_return_t kr = mach_vm_map_kernel(target_task, address, size, mask, vmk_flags, object, offset, copy, cur_protection, max_protection, inheritance);
1545 	check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1546 	return kr;
1547 }
1548 
1549 static inline void
check_vm_map_enter_mem_object_control_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_address_t saved_start,int flags,MAP_T map)1550 check_vm_map_enter_mem_object_control_outparam_changes(kern_return_t * kr, mach_vm_address_t addr,
1551     mach_vm_address_t saved_start, int flags, MAP_T map)
1552 {
1553 	if (*kr == KERN_SUCCESS) {
1554 		if (is_fixed(flags)) {
1555 			if (addr != truncate_vm_map_addr_with_flags(map, saved_start, flags)) {
1556 				*kr = OUT_PARAM_BAD;
1557 			}
1558 		}
1559 	} else {
1560 		if (saved_start != addr) {
1561 			*kr = OUT_PARAM_BAD;
1562 		}
1563 	}
1564 }
1565 
1566 kern_return_t
vm_map_enter_mem_object_control_wrapped(vm_map_t target_map,mach_vm_address_t * address,mach_vm_size_t size,vm_map_offset_t mask,int flags,mem_entry_name_port_t object __unused,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1567 vm_map_enter_mem_object_control_wrapped(
1568 	vm_map_t                target_map,
1569 	mach_vm_address_t      *address,
1570 	mach_vm_size_t          size,
1571 	vm_map_offset_t         mask,
1572 	int                     flags,
1573 	mem_entry_name_port_t   object __unused,
1574 	memory_object_offset_t  offset,
1575 	boolean_t               copy,
1576 	vm_prot_t               cur_protection,
1577 	vm_prot_t               max_protection,
1578 	vm_inherit_t            inheritance)
1579 {
1580 	if (dealloc_would_time_out(*address, size, target_map)) {
1581 		return ACCEPTABLE;
1582 	}
1583 
1584 	vm_map_offset_t vmmaddr = (vm_map_offset_t) *address;
1585 	vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1586 
1587 	vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
1588 	int file_descriptor = get_globals()->file_descriptor;
1589 	struct file_control_return control_info = get_control_from_fd(file_descriptor);
1590 	kern_return_t kr = vm_map_enter_mem_object_control(target_map, &vmmaddr, size, mask, vmk_flags, (memory_object_control_t) control_info.control, offset, copy, cur_protection, max_protection, inheritance);
1591 	check_vm_map_enter_mem_object_control_outparam_changes(&kr, vmmaddr, *address, flags, target_map);
1592 
1593 	*address = vmmaddr;
1594 
1595 	cleanup_control_related_data(control_info);
1596 
1597 	return kr;
1598 }
1599 
1600 kern_return_t
vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1601 vm_map_wrapped(vm_map_t target_task,
1602     mach_vm_address_t *address,
1603     mach_vm_size_t size,
1604     mach_vm_offset_t mask,
1605     int flags,
1606     mem_entry_name_port_t object,
1607     memory_object_offset_t offset,
1608     boolean_t copy,
1609     vm_prot_t cur_protection,
1610     vm_prot_t max_protection,
1611     vm_inherit_t inheritance)
1612 {
1613 	if (dealloc_would_time_out(*address, size, target_task)) {
1614 		return ACCEPTABLE;
1615 	}
1616 
1617 	vm_address_t addr = (vm_address_t)*address;
1618 	kern_return_t kr = vm_map(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1619 	check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1620 	*address = addr;
1621 	return kr;
1622 }
1623 
1624 kern_return_t
1625 vm_map_external(
1626 	vm_map_t                target_map,
1627 	vm_offset_t             *address,
1628 	vm_size_t               size,
1629 	vm_offset_t             mask,
1630 	int                     flags,
1631 	ipc_port_t              port,
1632 	vm_offset_t             offset,
1633 	boolean_t               copy,
1634 	vm_prot_t               cur_protection,
1635 	vm_prot_t               max_protection,
1636 	vm_inherit_t            inheritance);
1637 kern_return_t
vm_map_external_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1638 vm_map_external_wrapped(vm_map_t target_task,
1639     mach_vm_address_t *address,
1640     mach_vm_size_t size,
1641     mach_vm_offset_t mask,
1642     int flags,
1643     mem_entry_name_port_t object,
1644     memory_object_offset_t offset,
1645     boolean_t copy,
1646     vm_prot_t cur_protection,
1647     vm_prot_t max_protection,
1648     vm_inherit_t inheritance)
1649 {
1650 	if (dealloc_would_time_out(*address, size, target_task)) {
1651 		return ACCEPTABLE;
1652 	}
1653 
1654 	vm_address_t addr = (vm_address_t)*address;
1655 	kern_return_t kr = vm_map_external(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1656 	check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1657 	*address = addr;
1658 	return kr;
1659 }
1660 
1661 // implementations
1662 
1663 #define IMPL_MAP_FN_START_SIZE(map_fn, instance)                                                \
1664     static kern_return_t                                                                        \
1665     call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start, mach_vm_size_t size) \
1666     {                                                                                           \
1667 	return call_map_fn__ ## instance(map_fn, map, start, size);                             \
1668     }
1669 
1670 #define IMPL_MAP_FN_HINT_SIZE(map_fn, instance)                                                      \
1671     static kern_return_t                                                                             \
1672     call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size) \
1673     {                                                                                                \
1674 	return call_map_fn__ ## instance(map_fn, map, start_hint, size);                             \
1675     }
1676 
1677 #define IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, instance)                                                                                                                   \
1678     static kern_return_t                                                                                                                                                         \
1679     call_ ## map_fn ## __ ## instance ## __start_size_offset_object(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size) \
1680     {                                                                                                                                                                            \
1681 	return call_map_fn__ ## instance ## __start_size_offset_object(map_fn, map, start, size, offset, obj_size);                                                              \
1682     }
1683 
1684 #define IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, instance)                                                                          \
1685     static kern_return_t                                                                                                          \
1686     call_ ## map_fn ## __ ## instance ## __inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit) \
1687     {                                                                                                                             \
1688 	return call_map_fn__ ## instance ## __inherit(map_fn, map, start, size, inherit);                                         \
1689     }
1690 
1691 #define IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, instance)                                                                 \
1692     static kern_return_t                                                                                               \
1693     call_ ## map_fn ## __ ## instance ## __flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags) \
1694     {                                                                                                                  \
1695 	return call_map_fn__ ## instance ## __flags(map_fn, map, start, size, flags);                                  \
1696     }
1697 
1698 #define IMPL_MAP_FN_PROT_PAIRS(map_fn, instance)                                               \
1699     static kern_return_t                                                                       \
1700     call_ ## map_fn ## __ ## instance ## __prot_pairs(MAP_T map, vm_prot_t cur, vm_prot_t max) \
1701     {                                                                                          \
1702 	return call_map_fn__ ## instance ## __prot_pairs(map_fn, map, cur, max);               \
1703     }
1704 
1705 #define IMPL(map_fn)                                                       \
1706 	IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed)                     \
1707 	IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed_copy)                \
1708 	IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed)                    \
1709 	IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed_copy)               \
1710 	IMPL_MAP_FN_HINT_SIZE(map_fn, allocate_anywhere)                   \
1711 	IMPL_MAP_FN_HINT_SIZE(map_fn, memobject_anywhere)                  \
1712 	IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed)      \
1713 	IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed_copy) \
1714 	IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_anywhere)   \
1715 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed)             \
1716 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed_copy)        \
1717 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_anywhere)          \
1718 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed)            \
1719 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed_copy)       \
1720 	IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_anywhere)         \
1721 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate)                     \
1722 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate_copy)                \
1723 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject)                    \
1724 	IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject_copy)               \
1725 	IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed)                     \
1726 	IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed_copy)                \
1727 	IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_anywhere)                  \
1728 	IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed)                    \
1729 	IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed_copy)               \
1730 	IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_anywhere)                 \
1731 
1732 IMPL(mach_vm_map_wrapped)
IMPL(mach_vm_map_external_wrapped)1733 IMPL(mach_vm_map_external_wrapped)
1734 IMPL(mach_vm_map_kernel_wrapped)
1735 IMPL(vm_map_wrapped)
1736 IMPL(vm_map_external_wrapped)
1737 IMPL(vm_map_enter_mem_object_control_wrapped)
1738 
1739 #undef IMPL
1740 
1741 static void
1742 cleanup_context(vm_parameter_validation_kern_thread_context_t *ctx)
1743 {
1744 	thread_cleanup_test_context(&ctx->ttc);
1745 }
1746 
1747 static results_t *
process_results(results_t * results)1748 process_results(results_t *results)
1749 {
1750 	if (get_globals()->generate_golden) {
1751 		return dump_golden_results(results);
1752 	} else {
1753 		return __dump_results(results);
1754 	}
1755 }
1756 
1757 static int
vm_parameter_validation_kern_test(int64_t in_value,int64_t * out_value)1758 vm_parameter_validation_kern_test(int64_t in_value, int64_t *out_value)
1759 {
1760 	// Copyin the arguments from userspace.
1761 	// Fail if the structure sizes don't match.
1762 	vm_parameter_validation_kern_args_t args;
1763 	if (copyin(in_value, &args, sizeof(args)) != 0 ||
1764 	    args.sizeof_args != sizeof(args)) {
1765 		*out_value = KERN_TEST_BAD_ARGS;
1766 		return 0;
1767 	}
1768 
1769 	// Use the thread test context to store our "global" variables.
1770 	vm_parameter_validation_kern_thread_context_t ctx
1771 	__attribute__((cleanup(cleanup_context))) = {
1772 		.ttc = {
1773 			.ttc_identity = test_identity_vm_parameter_validation_kern,
1774 			// - avoid panics for untagged wired memory (set to true during some tests)
1775 			// - clamp vm addresses before passing to pmap to avoid pmap panics
1776 			.test_option_vm_prevent_wire_tag_panic = false,
1777 			.test_option_vm_map_clamp_pmap_remove = true,
1778 		},
1779 		.output_buffer_start = args.output_buffer_address,
1780 		.output_buffer_cur = args.output_buffer_address,
1781 		.output_buffer_end = args.output_buffer_address + args.output_buffer_size,
1782 		.file_descriptor = (int)args.file_descriptor,
1783 		.generate_golden = args.generate_golden,
1784 	};
1785 	thread_set_test_context(&ctx.ttc);
1786 
1787 #if !CONFIG_SPTM && (__ARM_42BIT_PA_SPACE__ || ARM_LARGE_MEMORY)
1788 	if (get_globals()->generate_golden) {
1789 		// Some devices skip some trials to avoid timeouts.
1790 		// Golden files cannot be generated on these devices.
1791 		testprintf("Can't generate golden files on this device "
1792 		    "(PPL && (__ARM_42BIT_PA_SPACE__ || ARM_LARGE_MEMORY)). "
1793 		    "Try again on a different device.\n");
1794 		*out_value = KERN_TEST_FAILED;
1795 		return 0;
1796 	}
1797 #else
1798 #pragma clang diagnostic ignored "-Wunused-label"
1799 #endif
1800 
1801 	/*
1802 	 * -- memory entry functions --
1803 	 * The memory entry test functions use macros to generate each flavor of memory entry function.
1804 	 * For more context on why, see the matching comment in vm_parameter_validation.c
1805 	 */
1806 
1807 #define RUN_START_SIZE(fn, variant, name) dealloc_results(process_results(test_mach_with_allocated_start_size(call_ ## fn ## __start_size__ ## variant, name " (start/size)")))
1808 #define RUN_PROT(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(call_ ## fn ## __vm_prot , name " (vm_prot_t)")))
1809 
1810 #define RUN_ALL(fn, name) \
1811 	RUN_START_SIZE(fn, copy, #name " (copy)"); \
1812 	RUN_START_SIZE(fn, memonly, #name " (memonly)"); \
1813 	RUN_START_SIZE(fn, namedcreate, #name " (namedcreate)"); \
1814 	RUN_START_SIZE(fn, share, #name " (share)"); \
1815 	RUN_START_SIZE(fn, namedreuse, #name " (namedreuse)"); \
1816 	RUN_PROT(fn, #name " (vm_prot_t)"); \
1817 
1818 	RUN_ALL(mach_make_memory_entry_64, mach_make_memory_entry_64);
1819 	RUN_ALL(mach_make_memory_entry, mach_make_memory_entry);
1820 	RUN_ALL(mach_make_memory_entry_internal_retyped, mach_make_memory_entry_internal);
1821 #undef RUN_ALL
1822 #undef RUN_START_SIZE
1823 #undef RUN_PROT
1824 
1825 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_tag(fn, name " (ledger tag)")))
1826 	RUN(call_mach_memory_entry_ownership__ledger_tag, "mach_memory_entry_ownership");
1827 #undef RUN
1828 
1829 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_ledger_flag(fn, name " (ledger flag)")))
1830 	RUN(call_mach_memory_entry_ownership__ledger_flag, "mach_memory_entry_ownership");
1831 #undef RUN
1832 
1833 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
1834 	RUN(call_mach_memory_entry_map_size__start_size, "mach_memory_entry_map_size");
1835 #undef RUN
1836 
1837 	/*
1838 	 * -- allocate/deallocate functions --
1839 	 */
1840 
1841 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_start_size(fn, name)))
1842 	RUN(call_mach_vm_allocate__start_size_fixed, "mach_vm_allocate_external (fixed) (realigned start/size)");
1843 	RUN(call_mach_vm_allocate__start_size_anywhere, "mach_vm_allocate_external (anywhere) (hint/size)");
1844 	RUN(call_mach_vm_allocate_kernel__start_size_fixed, "mach_vm_allocate (fixed) (realigned start/size)");
1845 	RUN(call_mach_vm_allocate_kernel__start_size_anywhere, "mach_vm_allocate (anywhere) (hint/size)");
1846 #undef RUN
1847 
1848 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1849 	RUN(call_mach_vm_allocate__flags, "mach_vm_allocate_external");
1850 	RUN(call_mach_vm_allocate_kernel__flags, "mach_vm_allocate_kernel");
1851 #undef RUN
1852 
1853 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_start_size(fn, name)))
1854 	RUN(call_vm_allocate__start_size_fixed, "vm_allocate (fixed) (realigned start/size)");
1855 	RUN(call_vm_allocate__start_size_anywhere, "vm_allocate (anywhere) (hint/size)");
1856 #undef RUN
1857 
1858 #define RUN(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1859 	RUN(call_vm_allocate__flags, "vm_allocate");
1860 #undef RUN
1861 	dealloc_results(process_results(test_deallocator(call_mach_vm_deallocate, "mach_vm_deallocate (start/size)")));
1862 	dealloc_results(process_results(test_deallocator(call_vm_deallocate, "vm_deallocate (start/size)")));
1863 
1864 	/*
1865 	 * -- map/remap functions --
1866 	 * These functions rely heavily on macros.
1867 	 * For more context on why, see the matching comment in vm_parameter_validation.c
1868 	 */
1869 
1870 	// map tests
1871 
1872 #define RUN_START_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (realigned start/size)")))
1873 #define RUN_HINT_SIZE(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
1874 #define RUN_PROT_PAIR(fn, name) dealloc_results(process_results(test_mach_vm_prot_pair(fn, name " (vm_prot_t pair)")))
1875 #define RUN_INHERIT(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
1876 #define RUN_FLAGS(fn, name) dealloc_results(process_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1877 #define RUN_SSOO(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size_offset_object(fn, name " (start/size/offset/object)")))
1878 
1879 #define RUN_ALL(fn, name)     \
1880 	RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)");   \
1881 	RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)");  \
1882 	RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)");  \
1883 	RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
1884 	RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)");  \
1885 	RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)");  \
1886 	RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)");  \
1887 	RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)");  \
1888 	RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)");  \
1889 	RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)");  \
1890 	RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)");  \
1891 	RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)");  \
1892 	RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)");  \
1893 	RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)");  \
1894 	RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)");  \
1895 	RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)");  \
1896 	RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)");  \
1897 	RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)");  \
1898 	RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)");  \
1899 	RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)");  \
1900 	RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)");  \
1901 	RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)");  \
1902 	RUN_SSOO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)");  \
1903 	RUN_SSOO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)");  \
1904 	RUN_SSOO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)");  \
1905 
1906 	RUN_ALL(mach_vm_map_wrapped, mach_vm_map);
1907 	RUN_ALL(mach_vm_map_external_wrapped, mach_vm_map_external);
1908 	RUN_ALL(mach_vm_map_kernel_wrapped, mach_vm_map_kernel);
1909 	RUN_ALL(vm_map_wrapped, vm_map);
1910 	RUN_ALL(vm_map_external_wrapped, vm_map_external);
1911 
1912 #define RUN_SSO(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size_offset(fn, name " (start/size/offset)")))
1913 
1914 #define RUN_ALL_CTL(fn, name)     \
1915 	RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)");   \
1916 	RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)");  \
1917 	RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)");  \
1918 	RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
1919 	RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)");  \
1920 	RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)");  \
1921 	RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)");  \
1922 	RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)");  \
1923 	RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)");  \
1924 	RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)");  \
1925 	RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)");  \
1926 	RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)");  \
1927 	RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)");  \
1928 	RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)");  \
1929 	RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)");  \
1930 	RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)");  \
1931 	RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)");  \
1932 	RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)");  \
1933 	RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)");  \
1934 	RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)");  \
1935 	RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)");  \
1936 	RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)");  \
1937 	RUN_SSO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)");  \
1938 	RUN_SSO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)");  \
1939 	RUN_SSO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)");  \
1940 
1941 	RUN_ALL_CTL(vm_map_enter_mem_object_control_wrapped, vm_map_enter_mem_object_control);
1942 
1943 #undef RUN_ALL
1944 #undef RUN_START_SIZE
1945 #undef RUN_HINT_SIZE
1946 #undef RUN_PROT_PAIR
1947 #undef RUN_INHERIT
1948 #undef RUN_FLAGS
1949 #undef RUN_SSOO
1950 #undef RUN_ALL_CTL
1951 #undef RUN_SSO
1952 
1953 	// remap tests
1954 
1955 #define FN_NAME(fn, variant, type) call_ ## fn ## __  ## variant ## __ ## type
1956 #define RUN_HELPER(harness, fn, variant, type, type_name, name) dealloc_results(process_results(harness(FN_NAME(fn, variant, type), #name " (" #variant ") (" type_name ")")))
1957 #define RUN_SRC_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, src_size, type_name, name)
1958 #define RUN_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, dst_size, type_name, name)
1959 #define RUN_PROT_PAIRS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_prot_pair, fn, variant, prot_pairs, "prot_pairs", name)
1960 #define RUN_INHERIT(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_inherit_t, fn, variant, inherit, "inherit", name)
1961 #define RUN_FLAGS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_map_kernel_flags_t, fn, variant, flags, "flags", name)
1962 #define RUN_SRC_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_allocated_src_unallocated_dst_size, fn, variant, src_dst_size, type_name, name)
1963 
1964 #define RUN_ALL(fn, realigned, name)                                    \
1965 	RUN_SRC_SIZE(fn, copy, realigned "src/size", name);             \
1966 	RUN_SRC_SIZE(fn, nocopy, realigned "src/size", name);           \
1967 	RUN_DST_SIZE(fn, fixed, "realigned dst/size", name);            \
1968 	RUN_DST_SIZE(fn, fixed_copy, "realigned dst/size", name);       \
1969 	RUN_DST_SIZE(fn, anywhere, "hint/size", name);                  \
1970 	RUN_INHERIT(fn, fixed, name);                                   \
1971 	RUN_INHERIT(fn, fixed_copy, name);                              \
1972 	RUN_INHERIT(fn, anywhere, name);                                \
1973 	RUN_FLAGS(fn, nocopy, name);                                    \
1974 	RUN_FLAGS(fn, copy, name);                                      \
1975 	RUN_PROT_PAIRS(fn, fixed, name);                                \
1976 	RUN_PROT_PAIRS(fn, fixed_copy, name);                           \
1977 	RUN_PROT_PAIRS(fn, anywhere, name);                             \
1978 	RUN_SRC_DST_SIZE(fn, fixed, "src/dst/size", name);              \
1979 	RUN_SRC_DST_SIZE(fn, fixed_copy, "src/dst/size", name);         \
1980 	RUN_SRC_DST_SIZE(fn, anywhere, "src/dst/size", name);           \
1981 
1982 	RUN_ALL(mach_vm_remap_wrapped_kern, "realigned ", mach_vm_remap);
1983 	RUN_ALL(mach_vm_remap_new_kernel_wrapped, , mach_vm_remap_new_kernel);
1984 
1985 #undef RUN_ALL
1986 #undef RUN_HELPER
1987 #undef RUN_SRC_SIZE
1988 #undef RUN_DST_SIZE
1989 #undef RUN_PROT_PAIRS
1990 #undef RUN_INHERIT
1991 #undef RUN_FLAGS
1992 #undef RUN_SRC_DST_SIZE
1993 
1994 	/*
1995 	 * -- wire/unwire functions --
1996 	 * Some wire functions (vm_map_wire_and_extract, vm_map_wire_external, vm_map_wire_kernel)
1997 	 * are implemented with macros to avoid code duplication that would happen otherwise from the multiple
1998 	 * entrypoints, multiple params under test, and user/non user wired paths
1999 	 */
2000 
2001 #define RUN(fn, name) dealloc_results(process_results(test_kext_unix_with_allocated_start_size(fn, name " (start/size)")))
2002 	RUN(call_vslock, "vslock");
2003 	RUN(call_vsunlock_undirtied, "vsunlock (undirtied)");
2004 	RUN(call_vsunlock_dirtied, "vsunlock (dirtied)");
2005 #undef RUN
2006 
2007 #define RUN_PROT(fn, wired, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(call_ ## fn ## __prot__user_wired_ ## wired ## _, name " (vm_prot_t)")))
2008 #define RUN_START(fn, wired, name) dealloc_results(process_results(test_kext_tagged_with_allocated_addr(call_ ## fn ## __start__user_wired_ ## wired ## _, name " (addr)")))
2009 #define RUN_START_END(fn, wired, name) dealloc_results(process_results(test_mach_with_allocated_start_end(call_ ## fn ## __start_end__user_wired_ ## wired ## _, name " (start/end)")))
2010 #define RUN_TAG(fn, wired, name) dealloc_results(process_results(test_mach_with_allocated_tag(call_ ## fn ## __tag__user_wired_ ## wired ## _, name " (tag)")))
2011 
2012 #if XNU_PLATFORM_MacOSX
2013 // vm_map_wire_and_extract is implemented on macOS only
2014 
2015 #define RUN_ALL_WIRE_AND_EXTRACT(fn, name) \
2016 	RUN_PROT(fn, true, #name " (user wired)"); \
2017 	RUN_PROT(fn, false, #name " (non user wired)"); \
2018 	RUN_START(fn, true, #name " (user wired)"); \
2019 	RUN_START(fn, false, #name " (non user wired)");
2020 
2021 	RUN_ALL_WIRE_AND_EXTRACT(vm_map_wire_and_extract_retyped, vm_map_wire_and_extract);
2022 #undef RUN_ALL_WIRE_AND_EXTRACT
2023 #endif // XNU_PLATFORM_MacOSX
2024 
2025 #define RUN_ALL_WIRE_EXTERNAL(fn, name) \
2026 	RUN_PROT(fn, true, #name " (user wired)"); \
2027 	RUN_PROT(fn, false, #name " (non user wired))"); \
2028 	RUN_START_END(fn, true, #name " (user wired)"); \
2029 	RUN_START_END(fn, false, #name " (non user wired)");
2030 
2031 	RUN_ALL_WIRE_EXTERNAL(vm_map_wire_external_retyped, vm_map_wire_external);
2032 #undef RUN_ALL_WIRE_EXTERNAL
2033 
2034 #define RUN_ALL_WIRE_KERNEL(fn, name) \
2035 	RUN_PROT(fn, false, #name " (non user wired))"); \
2036 	RUN_PROT(fn, true, #name " (user wired)"); \
2037 	RUN_START_END(fn, true, #name " (user wired)"); \
2038 	RUN_START_END(fn, false, #name " (non user wired)"); \
2039 	RUN_TAG(fn, true, #name " (user wired)"); \
2040 	RUN_TAG(fn, false, #name " (non user wired)");
2041 
2042 	RUN_ALL_WIRE_KERNEL(vm_map_wire_kernel, vm_map_wire_kernel);
2043 #undef RUN_ALL_WIRE_KERNEL
2044 
2045 #undef RUN_PROT
2046 #undef RUN_START
2047 #undef RUN_START_END
2048 #undef RUN_TAG
2049 
2050 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_end(fn, name " (start/end)")))
2051 	RUN(call_vm_map_unwire_user_wired, "vm_map_unwire (user_wired)");
2052 	RUN(call_vm_map_unwire_non_user_wired, "vm_map_unwire (non user_wired)");
2053 #undef RUN
2054 
2055 #define RUN(fn, name) dealloc_results(process_results(test_with_int64(fn, name " (int64)")))
2056 	RUN(call_mach_vm_wire_level_monitor, "mach_vm_wire_level_monitor");
2057 #undef RUN
2058 
2059 	/*
2060 	 * -- copyin/copyout functions --
2061 	 */
2062 
2063 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2064 	RUN(call_vm_map_copyin, "vm_map_copyin");
2065 	RUN(call_mach_vm_read, "mach_vm_read");
2066 	// vm_map_copyin_common is covered well by the vm_map_copyin test
2067 	// RUN(call_vm_map_copyin_common, "vm_map_copyin_common");
2068 #undef RUN
2069 
2070 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr_of_size_n(fn, sizeof(uint32_t), name " (start)")))
2071 	RUN(call_copyoutmap_atomic32, "copyoutmap_atomic32");
2072 #undef RUN
2073 
2074 #define RUN(fn, name) dealloc_results(process_results(test_src_kerneldst_size(fn, name " (src/dst/size)")))
2075 	RUN(call_copyinmap, "copyinmap");
2076 	RUN(call_vm_map_read_user, "vm_map_read_user");
2077 #undef RUN
2078 
2079 #define RUN(fn, name) dealloc_results(process_results(test_kernelsrc_dst_size(fn, name " (src/dst/size)")))
2080 	RUN(call_vm_map_write_user, "vm_map_write_user");
2081 	RUN(call_copyoutmap, "copyoutmap");
2082 #undef RUN
2083 
2084 	dealloc_results(process_results(test_vm_map_copy_overwrite(call_vm_map_copy_overwrite_interruptible, "vm_map_copy_overwrite (start/size)")));
2085 
2086 	/*
2087 	 * -- protection functions --
2088 	 */
2089 
2090 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2091 	RUN(call_mach_vm_protect__start_size, "mach_vm_protect");
2092 	RUN(call_vm_protect__start_size, "vm_protect");
2093 	RUN(call_vm_map_protect__start_size__no_max, "vm_map_protect (no max)");
2094 	RUN(call_vm_map_protect__start_size__set_max, "vm_map_protect (set max)");
2095 #undef RUN
2096 
2097 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
2098 	RUN(call_mach_vm_protect__vm_prot, "mach_vm_protect");
2099 	RUN(call_vm_protect__vm_prot, "vm_protect");
2100 	RUN(call_vm_map_protect__vm_prot__no_max, "vm_map_protect (no max)");
2101 	RUN(call_vm_map_protect__vm_prot__set_max, "vm_map_protect (set max)");
2102 #undef RUN
2103 
2104 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_start_size(fn, name " (start/size)")))
2105 	RUN(call_useracc__start_size, "useracc");
2106 #undef RUN
2107 #define RUN(fn, name) dealloc_results(process_results(test_unix_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
2108 	RUN(call_useracc__vm_prot, "useracc");
2109 #undef RUN
2110 
2111 	/*
2112 	 * -- madvise/behavior functions --
2113 	 */
2114 
2115 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
2116 	RUN(call_mach_vm_behavior_set__start_size__default, "mach_vm_behavior_set (VM_BEHAVIOR_DEFAULT)");
2117 	RUN(call_mach_vm_behavior_set__start_size__can_reuse, "mach_vm_behavior_set (VM_BEHAVIOR_CAN_REUSE)");
2118 #undef RUN
2119 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_vm_behavior_t(fn, name " (vm_behavior_t)")))
2120 	RUN(call_mach_vm_behavior_set__vm_behavior, "mach_vm_behavior_set");
2121 #undef RUN
2122 
2123 	/*
2124 	 * -- purgability/purgeability functions --
2125 	 */
2126 
2127 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_addr(fn, name " (addr)")))
2128 	RUN(call_vm_map_purgable_control__address__get, "vm_map_purgable_control (get)");
2129 	RUN(call_vm_map_purgable_control__address__purge_all, "vm_map_purgable_control (purge all)");
2130 #undef RUN
2131 
2132 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_purgeable_and_state(fn, name " (purgeable and state)")))
2133 	RUN(call_vm_map_purgable_control__purgeable_state, "vm_map_purgable_control");
2134 #undef RUN
2135 
2136 	/*
2137 	 * -- region info functions --
2138 	 */
2139 
2140 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
2141 	RUN(call_mach_vm_region, "mach_vm_region");
2142 	RUN(call_vm_region, "vm_region");
2143 #undef RUN
2144 
2145 	/*
2146 	 * -- page info functions --
2147 	 */
2148 
2149 #define RUN(fn, name) dealloc_results(process_results(test_mach_with_allocated_addr(fn, name " (addr)")))
2150 	RUN(call_vm_map_page_info, "vm_map_page_info");
2151 #undef RUN
2152 
2153 	/*
2154 	 * -- miscellaneous functions --
2155 	 */
2156 
2157 #if CONFIG_MAP_RANGES
2158 	dealloc_results(process_results(test_mach_vm_range_create(call_mach_vm_range_create, "mach_vm_range_create (start/size/start2/size2)")));
2159 #endif
2160 
2161 	dealloc_results(process_results(test_kext_unix_with_allocated_vnode_addr(call_task_find_region_details, "task_find_region_details (addr)")));
2162 
2163 	*out_value = KERN_TEST_SUCCESS;
2164 	return 0;
2165 }
2166 
2167 // The "_v2" suffix is here because sysctl "vm_parameter_validation_kern" was an
2168 // older version of this test that used incompatibly different sysctl parameters.
2169 SYSCTL_TEST_REGISTER(vm_parameter_validation_kern_v2, vm_parameter_validation_kern_test);
2170