1 #include <sys/cdefs.h>
2 #include <kern/zalloc.h>
3
4 #include "vm_parameter_validation.h"
5
6 #pragma clang diagnostic ignored "-Wdeclaration-after-statement"
7 #pragma clang diagnostic ignored "-Wincompatible-function-pointer-types"
8 #pragma clang diagnostic ignored "-Wmissing-prototypes"
9 #pragma clang diagnostic ignored "-Wpedantic"
10 #pragma clang diagnostic ignored "-Wgcc-compat"
11
12 #pragma clang diagnostic ignored "-Wunused-variable"
13
14
15 // Kernel sysctl test prints its output into a userspace buffer.
16 // fixme these global variables prevent test concurrency
17
18 static user_addr_t SYSCTL_OUTPUT_BUF;
19 static user_addr_t SYSCTL_OUTPUT_END;
20
21 // This is a read/write fd passed from userspace.
22 // It's passed to make it easier for kernel tests to interact with a file.
23 static int file_descriptor;
24
25 // Output to create a golden test result in kern test, controlled by
26 // vm_parameter_validation_kern_golden=1
27 bool kernel_generate_golden = FALSE;
28
29 // vprintf() to a userspace buffer
30 // output is incremented to point at the new nul terminator
31 static void
user_vprintf(user_addr_t * output,user_addr_t output_end,const char * format,va_list args)32 user_vprintf(user_addr_t *output, user_addr_t output_end, const char *format, va_list args) __printflike(3, 0)
33 {
34 extern int vsnprintf(char *, size_t, const char *, va_list) __printflike(3, 0);
35 char linebuf[1024];
36 size_t printed;
37
38 printed = vsnprintf(linebuf, sizeof(linebuf), format, args);
39 assert(printed < sizeof(linebuf) - 1);
40 assert(*output + printed + 1 < output_end);
41 copyout(linebuf, *output, printed + 1);
42 *output += printed;
43 }
44
45 void
testprintf(const char * format,...)46 testprintf(const char *format, ...)
47 {
48 va_list args;
49 va_start(args, format);
50 user_vprintf(&SYSCTL_OUTPUT_BUF, SYSCTL_OUTPUT_END, format, args);
51 va_end(args);
52 }
53
54 // Utils
55
56 static mach_port_t
make_a_mem_object(vm_size_t size)57 make_a_mem_object(vm_size_t size)
58 {
59 ipc_port_t out_handle;
60 kern_return_t kr = mach_memory_object_memory_entry_64((host_t)1, /*internal=*/ true, size, VM_PROT_READ | VM_PROT_WRITE, 0, &out_handle);
61 assert(kr == 0);
62 return out_handle;
63 }
64
65 static mach_port_t
make_a_mem_entry(MAP_T map,vm_size_t size)66 make_a_mem_entry(MAP_T map, vm_size_t size)
67 {
68 mach_port_t port;
69 memory_object_size_t s = (memory_object_size_t)size;
70 kern_return_t kr = mach_make_memory_entry_64(map, &s, (memory_object_offset_t)0, MAP_MEM_NAMED_CREATE | MAP_MEM_LEDGER_TAGGED, &port, MACH_PORT_NULL);
71 assert(kr == 0);
72 return port;
73 }
74
75 // Test functions
76
77 static results_t *
test_vm_map_copy_overwrite(kern_return_t (* func)(MAP_T dst_map,vm_map_copy_t copy,mach_vm_address_t start,mach_vm_size_t size),const char * testname)78 test_vm_map_copy_overwrite(kern_return_t (*func)(MAP_T dst_map, vm_map_copy_t copy, mach_vm_address_t start, mach_vm_size_t size), const char * testname)
79 {
80 // source map: has an allocation bigger than our
81 // "reasonable" trial sizes, to copy from
82 MAP_T src_map SMART_MAP;
83 allocation_t src_alloc SMART_ALLOCATE_VM(src_map, TEST_ALLOC_SIZE, VM_PROT_READ);
84
85 // dest map: has an allocation bigger than our
86 // "reasonable" trial sizes, to copy-overwrite on
87 MAP_T dst_map SMART_MAP;
88 allocation_t dst_alloc SMART_ALLOCATE_VM(dst_map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
89
90 // We test dst/size parameters.
91 // We don't test the contents of the vm_map_copy_t.
92 start_size_trials_t *trials SMART_START_SIZE_TRIALS(dst_alloc.addr);
93 results_t *results = alloc_results(testname, trials->count);
94
95 for (unsigned i = 0; i < trials->count; i++) {
96 start_size_trial_t trial = trials->list[i];
97
98 // Copy from the source.
99 vm_map_copy_t copy;
100 kern_return_t kr = vm_map_copyin(src_map, src_alloc.addr, src_alloc.size, false, ©);
101 assert(kr == 0);
102 assert(copy); // null copy won't exercise the sanitization path
103
104 // Copy-overwrite to the destination.
105 kern_return_t ret = func(dst_map, copy, trial.start, trial.size);
106
107 if (ret != KERN_SUCCESS) {
108 vm_map_copy_discard(copy);
109 }
110 append_result(results, ret, trial.name);
111 }
112 return results;
113 }
114
115 /*
116 * This function temporarily allocates a writeable allocation in kernel_map, and a read only allocation in a temporary map.
117 * It's used to test a function such as vm_map_read_user which copies in data to a kernel pointer that must be writeable.
118 */
119 static results_t *
test_src_kerneldst_size(kern_return_t (* func)(MAP_T map,vm_map_offset_t src,void * dst,vm_size_t length),const char * testname)120 test_src_kerneldst_size(kern_return_t (*func)(MAP_T map, vm_map_offset_t src, void * dst, vm_size_t length), const char * testname)
121 {
122 MAP_T map SMART_MAP;
123 allocation_t src_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_READ);
124 allocation_t dst_base SMART_ALLOCATE_VM(kernel_map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
125 src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
126 results_t *results = alloc_results(testname, trials->count);
127
128 for (unsigned i = 0; i < trials->count; i++) {
129 src_dst_size_trial_t trial = trials->list[i];
130 trial = slide_trial_src(trial, src_base.addr);
131 trial = slide_trial_dst(trial, dst_base.addr);
132 int ret = func(map, trial.src, (void *)trial.dst, trial.size);
133 append_result(results, ret, trial.name);
134 }
135 return results;
136 }
137
138 /*
139 * This function temporarily allocates a read only allocation in kernel_map, and a writeable allocation in a temporary map.
140 * It's used to test a function such as vm_map_write_user which copies data from a kernel pointer to a writeable userspace address.
141 */
142 static results_t *
test_kernelsrc_dst_size(kern_return_t (* func)(MAP_T map,void * src,vm_map_offset_t dst,vm_size_t length),const char * testname)143 test_kernelsrc_dst_size(kern_return_t (*func)(MAP_T map, void *src, vm_map_offset_t dst, vm_size_t length), const char * testname)
144 {
145 MAP_T map SMART_MAP;
146 allocation_t src_base SMART_ALLOCATE_VM(kernel_map, TEST_ALLOC_SIZE, VM_PROT_READ);
147 allocation_t dst_base SMART_ALLOCATE_VM(map, TEST_ALLOC_SIZE, VM_PROT_DEFAULT);
148 src_dst_size_trials_t * trials SMART_SRC_DST_SIZE_TRIALS();
149 results_t *results = alloc_results(testname, trials->count);
150
151 for (unsigned i = 0; i < trials->count; i++) {
152 src_dst_size_trial_t trial = trials->list[i];
153 trial = slide_trial_src(trial, src_base.addr);
154 trial = slide_trial_dst(trial, dst_base.addr);
155 int ret = func(map, (void *)trial.src, trial.dst, trial.size);
156 append_result(results, ret, trial.name);
157 }
158 return results;
159 }
160
161
162 /////////////////////////////////////////////////////
163 // Mach tests
164
165
166 static kern_return_t
call_mach_vm_read(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)167 call_mach_vm_read(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
168 {
169 vm_offset_t out_addr;
170 mach_msg_type_number_t out_size;
171 kern_return_t kr = mach_vm_read(map, start, size, &out_addr, &out_size);
172 if (kr == 0) {
173 // we didn't call through MIG so out_addr is really a vm_map_copy_t
174 vm_map_copy_discard((vm_map_copy_t)out_addr);
175 }
176 return kr;
177 }
178
179 static inline void
check_vm_map_copyin_outparam_changes(kern_return_t * kr,vm_map_copy_t copy,vm_map_copy_t saved_copy)180 check_vm_map_copyin_outparam_changes(kern_return_t * kr, vm_map_copy_t copy, vm_map_copy_t saved_copy)
181 {
182 if (*kr == KERN_SUCCESS) {
183 if (copy == saved_copy) {
184 *kr = OUT_PARAM_BAD;
185 }
186 } else {
187 if (copy != saved_copy) {
188 *kr = OUT_PARAM_BAD;
189 }
190 }
191 }
192
193 static kern_return_t
call_vm_map_copyin(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)194 call_vm_map_copyin(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
195 {
196 vm_map_copy_t invalid_initial_value = INVALID_INITIAL_COPY;
197 vm_map_copy_t copy = invalid_initial_value;
198 kern_return_t kr = vm_map_copyin(map, start, size, false, ©);
199 if (kr == 0) {
200 vm_map_copy_discard(copy);
201 }
202 check_vm_map_copyin_outparam_changes(&kr, copy, invalid_initial_value);
203 return kr;
204 }
205
206 static kern_return_t
call_copyoutmap_atomic32(MAP_T map,vm_map_offset_t addr)207 call_copyoutmap_atomic32(MAP_T map, vm_map_offset_t addr)
208 {
209 uint32_t data = 0;
210 kern_return_t kr = copyoutmap_atomic32(map, data, addr);
211 return kr;
212 }
213
214
215 static kern_return_t
call_mach_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)216 call_mach_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
217 {
218 mach_vm_address_t saved_start = *start;
219 kern_return_t kr = mach_vm_allocate_external(map, start, size, flags);
220 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
221 return kr;
222 }
223
224 static kern_return_t
call_mach_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)225 call_mach_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
226 {
227 mach_vm_address_t saved_start = *start;
228 kern_return_t kr = mach_vm_allocate_external(map, start, size, VM_FLAGS_FIXED);
229 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
230 return kr;
231 }
232
233 static kern_return_t
call_mach_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)234 call_mach_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
235 {
236 mach_vm_address_t saved_start = *start;
237 kern_return_t kr = mach_vm_allocate_external(map, start, size, VM_FLAGS_ANYWHERE);
238 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
239 return kr;
240 }
241
242 static kern_return_t
call_mach_vm_allocate_kernel__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)243 call_mach_vm_allocate_kernel__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
244 {
245 mach_vm_address_t saved_start = *start;
246 kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
247 FLAGS_AND_TAG(flags, VM_KERN_MEMORY_OSFMK));
248 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
249 return kr;
250 }
251
252 static kern_return_t
call_mach_vm_allocate_kernel__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)253 call_mach_vm_allocate_kernel__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
254 {
255 mach_vm_address_t saved_start = *start;
256 mach_vm_address_t minus_two_kb16 = -2 * KB16;
257
258 if (*start + size >= minus_two_kb16) {
259 // Allocation actually works fine here. Deallocation does not.
260 // It triggers a end < start assertion in pmap. Seems like some offset is added to the end of the region, which is -KB16 in these cases which overflows.
261 return PANIC;
262 }
263 mach_vm_address_t before = *start;
264
265 kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
266 FLAGS_AND_TAG(VM_FLAGS_FIXED, VM_KERN_MEMORY_OSFMK));
267 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
268
269
270 return kr;
271 }
272
273 static kern_return_t
call_mach_vm_allocate_kernel__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)274 call_mach_vm_allocate_kernel__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
275 {
276 mach_vm_address_t saved_start = *start;
277 mach_vm_address_t minus_two_kb16 = -2 * KB16;
278 if (*start + size >= minus_two_kb16) {
279 // Allocation actually works fine here. Deallocation does not.
280 // It triggers a end < start assertion in pmap. Seems like some offset is added to the end of the region, which is -KB16 in these cases which overflows.
281 return PANIC;
282 }
283 kern_return_t kr = mach_vm_allocate_kernel(map, start, size,
284 FLAGS_AND_TAG(VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK));
285 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
286 return kr;
287 }
288
289
290
291 static kern_return_t
call_vm_allocate__flags(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)292 call_vm_allocate__flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
293 {
294 mach_vm_address_t saved_start = *start;
295 kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, flags);
296 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, flags, map);
297 return kr;
298 }
299
300 static kern_return_t
call_vm_allocate__start_size_fixed(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)301 call_vm_allocate__start_size_fixed(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
302 {
303 mach_vm_address_t saved_start = *start;
304 kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, VM_FLAGS_FIXED);
305 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_FIXED, map);
306 return kr;
307 }
308
309 static kern_return_t
call_vm_allocate__start_size_anywhere(MAP_T map,mach_vm_address_t * start,mach_vm_size_t size)310 call_vm_allocate__start_size_anywhere(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size)
311 {
312 mach_vm_address_t saved_start = *start;
313 kern_return_t kr = vm_allocate(map, (vm_address_t *) start, (vm_size_t) size, VM_FLAGS_ANYWHERE);
314 check_mach_vm_allocate_outparam_changes(&kr, *start, size, saved_start, VM_FLAGS_ANYWHERE, map);
315 return kr;
316 }
317
318 static kern_return_t
call_mach_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)319 call_mach_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
320 {
321 kern_return_t kr = mach_vm_deallocate(map, start, size);
322 return kr;
323 }
324
325 static kern_return_t
call_vm_deallocate(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)326 call_vm_deallocate(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
327 {
328 kern_return_t kr = vm_deallocate(map, (vm_address_t) start, (vm_size_t) size);
329 return kr;
330 }
331
332 // Including sys/systm.h caused things to blow up
333 int vslock(user_addr_t addr, user_size_t len);
334 int vsunlock(user_addr_t addr, user_size_t len, int dirtied);
335 static int
call_vslock(void * start,size_t size)336 call_vslock(void * start, size_t size)
337 {
338 int kr = vslock((user_addr_t) start, (user_size_t) size);
339 if (kr == KERN_SUCCESS) {
340 (void) vsunlock((user_addr_t) start, (user_size_t) size, 0);
341 }
342
343 return kr;
344 }
345
346 static int
call_vsunlock_undirtied(void * start,size_t size)347 call_vsunlock_undirtied(void * start, size_t size)
348 {
349 int kr = vslock((user_addr_t) start, (user_size_t) size);
350 if (kr == EINVAL) {
351 // Invalid vslock arguments should also be
352 // invalid vsunlock arguments. Test it.
353 } else if (kr != KERN_SUCCESS) {
354 // vslock failed, and vsunlock of non-locked memory panics
355 return PANIC;
356 }
357 kr = vsunlock((user_addr_t) start, (user_size_t) size, 0);
358 return kr;
359 }
360
361 static int
call_vsunlock_dirtied(void * start,size_t size)362 call_vsunlock_dirtied(void * start, size_t size)
363 {
364 int kr = vslock((user_addr_t) start, (user_size_t) size);
365 if (kr == EINVAL) {
366 // Invalid vslock arguments should also be
367 // invalid vsunlock arguments. Test it.
368 } else if (kr != KERN_SUCCESS) {
369 // vslock failed, and vsunlock of non-locked memory panics
370 return PANIC;
371 }
372 kr = vsunlock((user_addr_t) start, (user_size_t) size, 1);
373 return kr;
374 }
375
376 #if XNU_PLATFORM_MacOSX
377 // vm_map_wire_and_extract() implemented on macOS only
378
379 static inline void
check_vm_map_wire_and_extract_out_params_changes(kern_return_t * kr,ppnum_t physpage)380 check_vm_map_wire_and_extract_out_params_changes(kern_return_t * kr, ppnum_t physpage)
381 {
382 if (*kr != KERN_SUCCESS) {
383 if (physpage != 0) {
384 *kr = OUT_PARAM_BAD;
385 }
386 }
387 }
388
389
390 static kern_return_t
call_vm_map_wire_and_extract_user_wired(MAP_T map,mach_vm_address_t start)391 call_vm_map_wire_and_extract_user_wired(MAP_T map, mach_vm_address_t start)
392 {
393 if (will_wire_function_panic_due_to_alignment(start, start + VM_MAP_PAGE_SIZE(map))) {
394 return PANIC;
395 }
396 if (will_wire_function_panic_due_to_vm_tag(start)) {
397 return BUSTED;
398 }
399
400 ppnum_t physpage = INVALID_INITIAL_PPNUM;
401 kern_return_t kr = vm_map_wire_and_extract(map, start, VM_PROT_DEFAULT, TRUE, &physpage);
402 check_vm_map_wire_and_extract_out_params_changes(&kr, physpage);
403 return kr;
404 }
405
406 static kern_return_t
call_vm_map_wire_and_extract_non_user_wired(MAP_T map,mach_vm_address_t start)407 call_vm_map_wire_and_extract_non_user_wired(MAP_T map, mach_vm_address_t start)
408 {
409 if (will_wire_function_panic_due_to_alignment(start, start + VM_MAP_PAGE_SIZE(map))) {
410 return PANIC;
411 }
412 if (will_wire_function_panic_due_to_vm_tag(start)) {
413 return BUSTED;
414 }
415 ppnum_t physpage = INVALID_INITIAL_PPNUM;
416 kern_return_t kr = vm_map_wire_and_extract(map, start, VM_PROT_DEFAULT, FALSE, &physpage);
417 check_vm_map_wire_and_extract_out_params_changes(&kr, physpage);
418 return kr;
419 }
420
421 static kern_return_t
call_vm_map_wire_and_extract_vm_prot_t_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)422 call_vm_map_wire_and_extract_vm_prot_t_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
423 {
424 (void) size;
425 if (will_wire_function_panic_due_to_alignment(start, start + VM_MAP_PAGE_SIZE(map))) {
426 return PANIC;
427 }
428 if (will_wire_function_panic_due_to_vm_tag(start)) {
429 return BUSTED;
430 }
431
432 ppnum_t physpage = INVALID_INITIAL_PPNUM;
433 kern_return_t kr = vm_map_wire_and_extract(map, start, prot, TRUE, &physpage);
434 check_vm_map_wire_and_extract_out_params_changes(&kr, physpage);
435 return kr;
436 }
437
438 static kern_return_t
call_vm_map_wire_and_extract_vm_prot_t_non_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)439 call_vm_map_wire_and_extract_vm_prot_t_non_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
440 {
441 (void) size;
442 if (will_wire_function_panic_due_to_alignment(start, start + VM_MAP_PAGE_SIZE(map))) {
443 return PANIC;
444 }
445 if (will_wire_function_panic_due_to_vm_tag(start)) {
446 return BUSTED;
447 }
448
449 ppnum_t physpage = INVALID_INITIAL_PPNUM;
450 kern_return_t kr = vm_map_wire_and_extract(map, start, prot, FALSE, &physpage);
451 check_vm_map_wire_and_extract_out_params_changes(&kr, physpage);
452 return kr;
453 }
454
455 #endif // XNU_PLATFORM_MacOSX
456
457 extern kern_return_t vm_map_wire_external(
458 vm_map_t map,
459 vm_map_offset_t start,
460 vm_map_offset_t end,
461 vm_prot_t access_type,
462 boolean_t user_wire);
463
464 static kern_return_t
call_vm_map_wire_external_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)465 call_vm_map_wire_external_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
466 {
467 if (will_wire_function_panic_due_to_alignment(start, end)) {
468 return PANIC;
469 }
470 if (will_wire_function_panic_due_to_vm_tag(start)) {
471 return BUSTED;
472 }
473
474 kern_return_t kr = vm_map_wire_external(map, start, end, VM_PROT_DEFAULT, TRUE);
475 return kr;
476 }
477
478 static kern_return_t
call_vm_map_wire_external_non_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)479 call_vm_map_wire_external_non_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
480 {
481 if (will_wire_function_panic_due_to_alignment(start, end)) {
482 return PANIC;
483 }
484 if (will_wire_function_panic_due_to_vm_tag(start)) {
485 return BUSTED;
486 }
487
488 kern_return_t kr = vm_map_wire_external(map, start, end, VM_PROT_DEFAULT, FALSE);
489 if (kr == KERN_SUCCESS) {
490 (void) vm_map_unwire(map, start, end, FALSE);
491 }
492 return kr;
493 }
494
495 static kern_return_t
call_vm_map_wire_kernel_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)496 call_vm_map_wire_kernel_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
497 {
498 if (will_wire_function_panic_due_to_alignment(start, end)) {
499 return PANIC;
500 }
501 kern_return_t kr = vm_map_wire_kernel(map, start, end, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, TRUE);
502 return kr;
503 }
504
505 static kern_return_t
call_vm_map_wire_kernel_non_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)506 call_vm_map_wire_kernel_non_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
507 {
508 if (will_wire_function_panic_due_to_alignment(start, end)) {
509 return PANIC;
510 }
511 kern_return_t kr = vm_map_wire_kernel(map, start, end, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, FALSE);
512 if (kr == KERN_SUCCESS) {
513 (void) vm_map_unwire(map, start, end, FALSE);
514 }
515 return kr;
516 }
517
518 static kern_return_t
call_vm_map_wire_external_vm_prot_t_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)519 call_vm_map_wire_external_vm_prot_t_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
520 {
521 mach_vm_address_t end;
522 if (__builtin_add_overflow(start, size, &end)) {
523 return BUSTED;
524 }
525
526 if (will_wire_function_panic_due_to_alignment(start, end)) {
527 return PANIC;
528 }
529 if (will_wire_function_panic_due_to_vm_tag(start)) {
530 return BUSTED;
531 }
532
533
534 ppnum_t physpage;
535 kern_return_t kr = vm_map_wire_external(map, start, end, prot, TRUE);
536 return kr;
537 }
538
539 static kern_return_t
call_vm_map_wire_external_vm_prot_t_non_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)540 call_vm_map_wire_external_vm_prot_t_non_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
541 {
542 mach_vm_address_t end;
543 if (__builtin_add_overflow(start, size, &end)) {
544 return BUSTED;
545 }
546 if (will_wire_function_panic_due_to_alignment(start, end)) {
547 return PANIC;
548 }
549 if (will_wire_function_panic_due_to_vm_tag(start)) {
550 return BUSTED;
551 }
552
553
554 ppnum_t physpage;
555 kern_return_t kr = vm_map_wire_external(map, start, end, prot, FALSE);
556 if (kr == KERN_SUCCESS) {
557 (void) vm_map_unwire(map, start, end, FALSE);
558 }
559 return kr;
560 }
561
562 static kern_return_t
call_vm_map_wire_kernel_vm_prot_t_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)563 call_vm_map_wire_kernel_vm_prot_t_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
564 {
565 mach_vm_address_t end;
566 if (__builtin_add_overflow(start, size, &end)) {
567 return BUSTED;
568 }
569 if (will_wire_function_panic_due_to_alignment(start, end)) {
570 return PANIC;
571 }
572
573 ppnum_t physpage;
574 kern_return_t kr = vm_map_wire_kernel(map, start, end, prot, VM_KERN_MEMORY_OSFMK, TRUE);
575 return kr;
576 }
577
578 static kern_return_t
call_vm_map_wire_kernel_vm_prot_t_non_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_prot_t prot)579 call_vm_map_wire_kernel_vm_prot_t_non_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_prot_t prot)
580 {
581 mach_vm_address_t end;
582 if (__builtin_add_overflow(start, size, &end)) {
583 return BUSTED;
584 }
585 if (will_wire_function_panic_due_to_alignment(start, end)) {
586 return PANIC;
587 }
588
589 ppnum_t physpage;
590 kern_return_t kr = vm_map_wire_kernel(map, start, end, prot, VM_KERN_MEMORY_OSFMK, FALSE);
591 if (kr == KERN_SUCCESS) {
592 (void) vm_map_unwire(map, start, end, FALSE);
593 }
594 return kr;
595 }
596
597
598 static kern_return_t
call_vm_map_kernel_tag_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end,vm_tag_t tag)599 call_vm_map_kernel_tag_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end, vm_tag_t tag)
600 {
601 if (will_wire_function_panic_due_to_alignment(start, end)) {
602 return PANIC;
603 }
604 if (tag == VM_KERN_MEMORY_NONE) {
605 return PANIC;
606 }
607 kern_return_t kr = vm_map_wire_kernel(map, start, end, VM_PROT_DEFAULT, tag, TRUE);
608 if (kr == KERN_SUCCESS) {
609 (void) vm_map_unwire(map, start, end, TRUE);
610 }
611 return kr;
612 }
613
614 static kern_return_t
call_vm_map_kernel_tag_non_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end,vm_tag_t tag)615 call_vm_map_kernel_tag_non_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end, vm_tag_t tag)
616 {
617 if (will_wire_function_panic_due_to_alignment(start, end)) {
618 return PANIC;
619 }
620 if (tag == VM_KERN_MEMORY_NONE) {
621 return PANIC;
622 }
623 kern_return_t kr = vm_map_wire_kernel(map, start, end, VM_PROT_DEFAULT, tag, FALSE);
624 if (kr == KERN_SUCCESS) {
625 (void) vm_map_unwire(map, start, end, FALSE);
626 }
627 return kr;
628 }
629
630
631 static kern_return_t
call_mach_vm_wire_level_monitor(int64_t requested_pages)632 call_mach_vm_wire_level_monitor(int64_t requested_pages)
633 {
634 kern_return_t kr = mach_vm_wire_level_monitor(requested_pages);
635 return kr;
636 }
637
638 static kern_return_t
call_vm_map_unwire_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)639 call_vm_map_unwire_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
640 {
641 if (will_wire_function_panic_due_to_alignment(start, end)) {
642 return PANIC;
643 }
644
645 kern_return_t kr = vm_map_unwire(map, start, end, TRUE);
646 return kr;
647 }
648
649
650 static kern_return_t
call_vm_map_unwire_non_user_wired(MAP_T map,mach_vm_address_t start,mach_vm_address_t end)651 call_vm_map_unwire_non_user_wired(MAP_T map, mach_vm_address_t start, mach_vm_address_t end)
652 {
653 if (will_wire_function_panic_due_to_alignment(start, end)) {
654 return PANIC;
655 }
656
657 kern_return_t kr = vm_map_wire_kernel(map, start, end, VM_PROT_DEFAULT, VM_KERN_MEMORY_OSFMK, FALSE);
658 if (kr) {
659 return PANIC;
660 }
661 kr = vm_map_unwire(map, start, end, FALSE);
662 return kr;
663 }
664
665 #ifndef __x86_64__
666 extern const vm_map_address_t physmap_base;
667 extern const vm_map_address_t physmap_end;
668 #endif
669
670 /*
671 * This function duplicates the panicking checks done in copy_validate.
672 * size==0 is returned as success earlier in copyin/out than copy_validate is called, so we ignore that case.
673 */
674 static bool
will_copyio_panic_in_copy_validate(void * kernel_addr,vm_size_t size)675 will_copyio_panic_in_copy_validate(void *kernel_addr, vm_size_t size)
676 {
677 if (size == 0) {
678 return false;
679 }
680 extern const int copysize_limit_panic;
681 if (size > copysize_limit_panic) {
682 return true;
683 }
684
685 /*
686 * copyio is architecture specific and has different checks per arch.
687 */
688 #ifdef __x86_64__
689 if ((vm_offset_t) kernel_addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
690 return true;
691 }
692 #else /* not __x86_64__ */
693 uintptr_t kernel_addr_last;
694 if (os_add_overflow((uintptr_t) kernel_addr, size, &kernel_addr_last)) {
695 return true;
696 }
697
698 bool in_kva = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= VM_MIN_KERNEL_ADDRESS) &&
699 (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= VM_MAX_KERNEL_ADDRESS);
700 bool in_physmap = (VM_KERNEL_STRIP_UPTR(kernel_addr) >= physmap_base) &&
701 (VM_KERNEL_STRIP_UPTR(kernel_addr_last) <= physmap_end);
702
703 if (!(in_kva || in_physmap)) {
704 return true;
705 }
706 #endif /* not __x86_64__ */
707
708 return false;
709 }
710
711 static kern_return_t
call_copyinmap(MAP_T map,vm_map_offset_t fromaddr,void * todata,vm_size_t length)712 call_copyinmap(MAP_T map, vm_map_offset_t fromaddr, void * todata, vm_size_t length)
713 {
714 if (will_copyio_panic_in_copy_validate(todata, length)) {
715 return PANIC;
716 }
717
718 kern_return_t kr = copyinmap(map, fromaddr, todata, length);
719 return kr;
720 }
721
722 static kern_return_t
call_copyoutmap(MAP_T map,void * fromdata,vm_map_offset_t toaddr,vm_size_t length)723 call_copyoutmap(MAP_T map, void * fromdata, vm_map_offset_t toaddr, vm_size_t length)
724 {
725 if (will_copyio_panic_in_copy_validate(fromdata, length)) {
726 return PANIC;
727 }
728
729 kern_return_t kr = copyoutmap(map, fromdata, toaddr, length);
730 return kr;
731 }
732
733 static kern_return_t
call_vm_map_read_user(MAP_T map,vm_map_address_t src_addr,void * ptr,vm_size_t size)734 call_vm_map_read_user(MAP_T map, vm_map_address_t src_addr, void * ptr, vm_size_t size)
735 {
736 if (will_copyio_panic_in_copy_validate(ptr, size)) {
737 return PANIC;
738 }
739
740 kern_return_t kr = vm_map_read_user(map, src_addr, ptr, size);
741 return kr;
742 }
743
744 static kern_return_t
call_vm_map_write_user(MAP_T map,void * ptr,vm_map_address_t dst_addr,vm_size_t size)745 call_vm_map_write_user(MAP_T map, void * ptr, vm_map_address_t dst_addr, vm_size_t size)
746 {
747 if (will_copyio_panic_in_copy_validate(ptr, size)) {
748 return PANIC;
749 }
750
751 kern_return_t kr = vm_map_write_user(map, ptr, dst_addr, size);
752 return kr;
753 }
754
755 static kern_return_t
call_vm_map_copyout(MAP_T dst_map,vm_map_copy_t copy)756 call_vm_map_copyout(MAP_T dst_map, vm_map_copy_t copy)
757 {
758 // save this value because `copy` is destroyed by vm_map_copyout_size()
759 mach_vm_size_t copy_size = copy ? copy->size : 0;
760 vm_map_address_t dst_addr;
761 kern_return_t kr = vm_map_copyout(dst_map, &dst_addr, copy);
762 if (kr == KERN_SUCCESS) {
763 if (copy != NULL) {
764 (void) mach_vm_deallocate(dst_map, dst_addr, copy_size);
765 }
766 }
767 return kr;
768 }
769
770 static kern_return_t
call_vm_map_copyout_size(MAP_T dst_map,vm_map_copy_t copy,mach_vm_size_t size)771 call_vm_map_copyout_size(MAP_T dst_map, vm_map_copy_t copy, mach_vm_size_t size)
772 {
773 // save this value because `copy` is destroyed by vm_map_copyout_size()
774 mach_vm_size_t copy_size = copy ? copy->size : 0;
775 vm_map_address_t dst_addr;
776 kern_return_t kr = vm_map_copyout_size(dst_map, &dst_addr, copy, size);
777 if (kr == KERN_SUCCESS) {
778 if (copy != NULL) {
779 (void) mach_vm_deallocate(dst_map, dst_addr, copy_size);
780 }
781 }
782 return kr;
783 }
784
785 static kern_return_t
call_vm_map_copy_overwrite_interruptible(MAP_T dst_map,vm_map_copy_t copy,mach_vm_address_t dst_addr,mach_vm_size_t copy_size)786 call_vm_map_copy_overwrite_interruptible(MAP_T dst_map, vm_map_copy_t copy, mach_vm_address_t dst_addr, mach_vm_size_t copy_size)
787 {
788 kern_return_t kr = vm_map_copy_overwrite(dst_map, dst_addr, copy, copy_size, TRUE);
789 return kr;
790 }
791
792 static kern_return_t
call_vm_map_copy_overwrite_non_interruptible(MAP_T dst_map,vm_map_copy_t copy,mach_vm_address_t dst_addr,mach_vm_size_t copy_size)793 call_vm_map_copy_overwrite_non_interruptible(MAP_T dst_map, vm_map_copy_t copy, mach_vm_address_t dst_addr, mach_vm_size_t copy_size)
794 {
795 kern_return_t kr = vm_map_copy_overwrite(dst_map, dst_addr, copy, copy_size, FALSE);
796 return kr;
797 }
798
799 // Mach memory entry ownership
800
801 extern kern_return_t
802 mach_memory_entry_ownership(
803 ipc_port_t entry_port,
804 task_t owner,
805 int ledger_tag,
806 int ledger_flags);
807
808 static kern_return_t
call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused,int ledger_tag)809 call_mach_memory_entry_ownership__ledger_tag(MAP_T map __unused, int ledger_tag)
810 {
811 mach_port_t mementry = make_a_mem_entry(map, TEST_ALLOC_SIZE + 1);
812 kern_return_t kr = mach_memory_entry_ownership(mementry, TASK_NULL, ledger_tag, 0);
813 mach_memory_entry_port_release(mementry);
814 return kr;
815 }
816
817 static kern_return_t
call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused,int ledger_flag)818 call_mach_memory_entry_ownership__ledger_flag(MAP_T map __unused, int ledger_flag)
819 {
820 mach_port_t mementry = make_a_mem_entry(map, TEST_ALLOC_SIZE + 1);
821 kern_return_t kr = mach_memory_entry_ownership(mementry, TASK_NULL, VM_LEDGER_TAG_DEFAULT, ledger_flag);
822 mach_memory_entry_port_release(mementry);
823 return kr;
824 }
825
826 static inline void
check_mach_memory_entry_map_size_outparam_changes(kern_return_t * kr,mach_vm_size_t map_size,mach_vm_size_t invalid_initial_size)827 check_mach_memory_entry_map_size_outparam_changes(kern_return_t * kr, mach_vm_size_t map_size,
828 mach_vm_size_t invalid_initial_size)
829 {
830 if (*kr == KERN_SUCCESS) {
831 if (map_size == invalid_initial_size) {
832 *kr = OUT_PARAM_BAD;
833 }
834 } else {
835 if (map_size != invalid_initial_size) {
836 *kr = OUT_PARAM_BAD;
837 }
838 }
839 }
840
841 static kern_return_t
call_mach_memory_entry_map_size__start_size(MAP_T map,mach_vm_address_t start,mach_vm_size_t size)842 call_mach_memory_entry_map_size__start_size(MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
843 {
844 mach_port_t mementry;
845 mach_vm_address_t addr;
846 memory_object_size_t s = (memory_object_size_t)TEST_ALLOC_SIZE + 1;
847 /*
848 * INVALID_INITIAL_SIZE is guaranteed to never be the correct map_size
849 * from the mach_memory_entry_map_size calls we make. map_size should represent the size of the
850 * copy that would result, and INVALID_INITIAL_SIZE is completely unrelated to the sizes we pass
851 * and not page aligned.
852 */
853 mach_vm_size_t invalid_initial_size = INVALID_INITIAL_SIZE;
854
855 mach_vm_size_t map_size = invalid_initial_size;
856
857 kern_return_t kr = mach_vm_allocate_kernel(map, &addr, s, FLAGS_AND_TAG(VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_OSFMK));
858 assert(kr == 0);
859 kr = mach_make_memory_entry_64(map, &s, (memory_object_offset_t)addr, MAP_MEM_VM_SHARE, &mementry, MACH_PORT_NULL);
860 assert(kr == 0);
861 kr = mach_memory_entry_map_size(mementry, map, start, size, &map_size);
862 check_mach_memory_entry_map_size_outparam_changes(&kr, map_size, invalid_initial_size);
863 mach_memory_entry_port_release(mementry);
864 (void)mach_vm_deallocate(map, addr, s);
865 return kr;
866 }
867
868 static inline void
check_mach_memory_entry_outparam_changes(kern_return_t * kr,mach_vm_size_t size,mach_port_t out_handle,mach_port_t saved_handle)869 check_mach_memory_entry_outparam_changes(kern_return_t * kr, mach_vm_size_t size,
870 mach_port_t out_handle, mach_port_t saved_handle)
871 {
872 /*
873 * mach_make_memory_entry overwrites *size to be 0 on failure.
874 */
875 if (*kr != KERN_SUCCESS) {
876 if (size != 0) {
877 *kr = OUT_PARAM_BAD;
878 }
879 if (out_handle != saved_handle) {
880 *kr = OUT_PARAM_BAD;
881 }
882 }
883 }
884 // mach_make_memory_entry and variants
885
886 #define IMPL(FN, T) \
887 static kern_return_t \
888 call_ ## FN ## __start_size__memonly(MAP_T map, T start, T size) \
889 { \
890 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
891 T io_size = size; \
892 mach_port_t invalid_handle_value = INVALID_INITIAL_MACH_PORT; \
893 mach_port_t out_handle = invalid_handle_value; \
894 kern_return_t kr = FN(map, &io_size, start, \
895 VM_PROT_READ | MAP_MEM_ONLY, &out_handle, memobject); \
896 if (kr == 0) { \
897 if (out_handle) mach_memory_entry_port_release(out_handle); \
898 } \
899 mach_memory_entry_port_release(memobject); \
900 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle,\
901 invalid_handle_value); \
902 return kr; \
903 } \
904 \
905 static kern_return_t \
906 call_ ## FN ## __start_size__namedcreate(MAP_T map, T start, T size) \
907 { \
908 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
909 T io_size = size; \
910 mach_port_t invalid_handle_value = INVALID_INITIAL_MACH_PORT; \
911 mach_port_t out_handle = invalid_handle_value; \
912 kern_return_t kr = FN(map, &io_size, start, \
913 VM_PROT_READ | MAP_MEM_NAMED_CREATE, &out_handle, memobject); \
914 if (kr == 0) { \
915 if (out_handle) mach_memory_entry_port_release(out_handle); \
916 } \
917 mach_memory_entry_port_release(memobject); \
918 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle,\
919 invalid_handle_value); \
920 return kr; \
921 } \
922 \
923 static kern_return_t \
924 call_ ## FN ## __start_size__copy(MAP_T map, T start, T size) \
925 { \
926 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
927 T io_size = size; \
928 mach_port_t invalid_handle_value = INVALID_INITIAL_MACH_PORT; \
929 mach_port_t out_handle = invalid_handle_value; \
930 kern_return_t kr = FN(map, &io_size, start, \
931 VM_PROT_READ | MAP_MEM_VM_COPY, &out_handle, memobject); \
932 if (kr == 0) { \
933 if (out_handle) mach_memory_entry_port_release(out_handle); \
934 } \
935 mach_memory_entry_port_release(memobject); \
936 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle,\
937 invalid_handle_value); \
938 return kr; \
939 } \
940 \
941 static kern_return_t \
942 call_ ## FN ## __start_size__share(MAP_T map, T start, T size) \
943 { \
944 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
945 T io_size = size; \
946 mach_port_t invalid_handle_value = INVALID_INITIAL_MACH_PORT; \
947 mach_port_t out_handle = invalid_handle_value; \
948 kern_return_t kr = FN(map, &io_size, start, \
949 VM_PROT_READ | MAP_MEM_VM_SHARE, &out_handle, memobject); \
950 if (kr == 0) { \
951 if (out_handle) mach_memory_entry_port_release(out_handle); \
952 } \
953 mach_memory_entry_port_release(memobject); \
954 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle,\
955 invalid_handle_value); \
956 return kr; \
957 } \
958 \
959 static kern_return_t \
960 call_ ## FN ## __start_size__namedreuse(MAP_T map, T start, T size) \
961 { \
962 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
963 T io_size = size; \
964 mach_port_t invalid_handle_value = INVALID_INITIAL_MACH_PORT; \
965 mach_port_t out_handle = invalid_handle_value; \
966 kern_return_t kr = FN(map, &io_size, start, \
967 VM_PROT_READ | MAP_MEM_NAMED_REUSE, &out_handle, memobject); \
968 if (kr == 0) { \
969 if (out_handle) mach_memory_entry_port_release(out_handle); \
970 } \
971 mach_memory_entry_port_release(memobject); \
972 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle,\
973 invalid_handle_value); \
974 return kr; \
975 } \
976 \
977 static kern_return_t \
978 call_ ## FN ## __vm_prot(MAP_T map, T start, T size, vm_prot_t prot) \
979 { \
980 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1); \
981 T io_size = size; \
982 mach_port_t invalid_handle_value = INVALID_INITIAL_MACH_PORT; \
983 mach_port_t out_handle = invalid_handle_value; \
984 kern_return_t kr = FN(map, &io_size, start, \
985 prot, &out_handle, memobject); \
986 if (kr == 0) { \
987 if (out_handle) mach_memory_entry_port_release(out_handle); \
988 } \
989 mach_memory_entry_port_release(memobject); \
990 check_mach_memory_entry_outparam_changes(&kr, io_size, out_handle,\
991 invalid_handle_value); \
992 return kr; \
993 }
994
IMPL(mach_make_memory_entry_64,mach_vm_address_t)995 IMPL(mach_make_memory_entry_64, mach_vm_address_t)
996 IMPL(mach_make_memory_entry, vm_size_t)
997 static kern_return_t
998 mach_make_memory_entry_internal_retyped(
999 vm_map_t target_map,
1000 memory_object_size_t *size,
1001 memory_object_offset_t offset,
1002 vm_prot_t permission,
1003 ipc_port_t *object_handle,
1004 ipc_port_t parent_handle)
1005 {
1006 vm_named_entry_kernel_flags_t vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
1007 if (permission & MAP_MEM_LEDGER_TAGGED) {
1008 vmne_kflags.vmnekf_ledger_tag = VM_LEDGER_TAG_DEFAULT;
1009 }
1010 return mach_make_memory_entry_internal(target_map, size, offset, permission, vmne_kflags, object_handle, parent_handle);
1011 }
1012 IMPL(mach_make_memory_entry_internal_retyped, mach_vm_address_t)
1013
1014 #undef IMPL
1015
1016 // mach_vm_map/mach_vm_map_external/mach_vm_map_kernel/vm_map/vm_map_external infra
1017
1018 typedef kern_return_t (*map_fn_t)(vm_map_t target_task,
1019 mach_vm_address_t *address,
1020 mach_vm_size_t size,
1021 mach_vm_offset_t mask,
1022 int flags,
1023 mem_entry_name_port_t object,
1024 memory_object_offset_t offset,
1025 boolean_t copy,
1026 vm_prot_t cur_protection,
1027 vm_prot_t max_protection,
1028 vm_inherit_t inheritance);
1029
1030 static kern_return_t
call_map_fn__allocate_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1031 call_map_fn__allocate_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1032 {
1033 mach_vm_address_t out_addr = start;
1034 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1035 0, 0, 0, 0, 0, VM_INHERIT_NONE);
1036 // fixed-overwrite with pre-existing allocation, don't deallocate
1037 return kr;
1038 }
1039
1040 static kern_return_t
call_map_fn__allocate_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1041 call_map_fn__allocate_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1042 {
1043 mach_vm_address_t out_addr = start;
1044 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1045 0, 0, true, 0, 0, VM_INHERIT_NONE);
1046 // fixed-overwrite with pre-existing allocation, don't deallocate
1047 return kr;
1048 }
1049
1050 static kern_return_t
call_map_fn__allocate_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)1051 call_map_fn__allocate_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
1052 {
1053 mach_vm_address_t out_addr = start_hint;
1054 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, 0, 0, 0, 0, 0, VM_INHERIT_NONE);
1055 if (kr == 0) {
1056 (void)mach_vm_deallocate(map, out_addr, size);
1057 }
1058 return kr;
1059 }
1060
1061 static kern_return_t
call_map_fn__memobject_fixed(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1062 call_map_fn__memobject_fixed(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1063 {
1064 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1065 mach_vm_address_t out_addr = start;
1066 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1067 memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1068 // fixed-overwrite with pre-existing allocation, don't deallocate
1069 mach_memory_entry_port_release(memobject);
1070 return kr;
1071 }
1072
1073 static kern_return_t
call_map_fn__memobject_fixed_copy(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size)1074 call_map_fn__memobject_fixed_copy(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size)
1075 {
1076 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1077 mach_vm_address_t out_addr = start;
1078 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1079 memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1080 // fixed-overwrite with pre-existing allocation, don't deallocate
1081 mach_memory_entry_port_release(memobject);
1082 return kr;
1083 }
1084
1085 static kern_return_t
call_map_fn__memobject_anywhere(map_fn_t fn,MAP_T map,mach_vm_address_t start_hint,mach_vm_size_t size)1086 call_map_fn__memobject_anywhere(map_fn_t fn, MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size)
1087 {
1088 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1089 mach_vm_address_t out_addr = start_hint;
1090 kern_return_t kr = fn(map, &out_addr, size, 0, VM_FLAGS_ANYWHERE, memobject,
1091 KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1092 if (kr == 0) {
1093 (void)mach_vm_deallocate(map, out_addr, size);
1094 }
1095 mach_memory_entry_port_release(memobject);
1096 return kr;
1097 }
1098
1099 static kern_return_t
helper_call_map_fn__memobject__ssoo(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1100 helper_call_map_fn__memobject__ssoo(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1101 {
1102 mach_port_t memobject = make_a_mem_object(obj_size);
1103 mach_vm_address_t out_addr = start;
1104 kern_return_t kr = fn(map, &out_addr, size, 0, flags, memobject,
1105 offset, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1106 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1107 mach_memory_entry_port_release(memobject);
1108 return kr;
1109 }
1110
1111 static kern_return_t
call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1112 call_map_fn__memobject_fixed__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1113 {
1114 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, offset, obj_size);
1115 }
1116
1117 static kern_return_t
call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1118 call_map_fn__memobject_fixed_copy__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1119 {
1120 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, offset, obj_size);
1121 }
1122
1123 static kern_return_t
call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_object_offset_t offset,mach_vm_size_t obj_size)1124 call_map_fn__memobject_anywhere__start_size_offset_object(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size)
1125 {
1126 return helper_call_map_fn__memobject__ssoo(fn, map, VM_FLAGS_ANYWHERE, false, start, size, offset, obj_size);
1127 }
1128
1129 static kern_return_t
help_call_map_fn__allocate__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1130 help_call_map_fn__allocate__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1131 {
1132 mach_vm_address_t out_addr = start;
1133 kern_return_t kr = fn(map, &out_addr, size, 0, flags,
1134 0, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
1135 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1136 return kr;
1137 }
1138
1139 static kern_return_t
call_map_fn__allocate_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1140 call_map_fn__allocate_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1141 {
1142 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
1143 }
1144
1145 static kern_return_t
call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1146 call_map_fn__allocate_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1147 {
1148 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
1149 }
1150
1151 static kern_return_t
call_map_fn__allocate_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1152 call_map_fn__allocate_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1153 {
1154 return help_call_map_fn__allocate__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
1155 }
1156
1157 static kern_return_t
help_call_map_fn__memobject__inherit(map_fn_t fn,MAP_T map,int flags,bool copy,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1158 help_call_map_fn__memobject__inherit(map_fn_t fn, MAP_T map, int flags, bool copy, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1159 {
1160 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1161 mach_vm_address_t out_addr = start;
1162 kern_return_t kr = fn(map, &out_addr, size, 0, flags,
1163 memobject, KB16, copy, VM_PROT_DEFAULT, VM_PROT_DEFAULT, inherit);
1164 deallocate_if_not_fixed_overwrite(kr, map, out_addr, size, flags);
1165 mach_memory_entry_port_release(memobject);
1166 return kr;
1167 }
1168
1169 static kern_return_t
call_map_fn__memobject_fixed__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1170 call_map_fn__memobject_fixed__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1171 {
1172 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, start, size, inherit);
1173 }
1174
1175 static kern_return_t
call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1176 call_map_fn__memobject_fixed_copy__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1177 {
1178 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, start, size, inherit);
1179 }
1180
1181 static kern_return_t
call_map_fn__memobject_anywhere__inherit(map_fn_t fn,MAP_T map,mach_vm_address_t start,mach_vm_size_t size,vm_inherit_t inherit)1182 call_map_fn__memobject_anywhere__inherit(map_fn_t fn, MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit)
1183 {
1184 return help_call_map_fn__memobject__inherit(fn, map, VM_FLAGS_ANYWHERE, false, start, size, inherit);
1185 }
1186
1187 static kern_return_t
call_map_fn__allocate__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1188 call_map_fn__allocate__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1189 {
1190 kern_return_t kr = fn(map, start, size, 0, flags,
1191 0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1192 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1193 return kr;
1194 }
1195
1196 static kern_return_t
call_map_fn__allocate_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1197 call_map_fn__allocate_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1198 {
1199 kern_return_t kr = fn(map, start, size, 0, flags,
1200 0, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1201 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1202 return kr;
1203 }
1204
1205 static kern_return_t
call_map_fn__memobject__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1206 call_map_fn__memobject__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1207 {
1208 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1209 kern_return_t kr = fn(map, start, size, 0, flags,
1210 memobject, KB16, false, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1211 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1212 mach_memory_entry_port_release(memobject);
1213 return kr;
1214 }
1215
1216 static kern_return_t
call_map_fn__memobject_copy__flags(map_fn_t fn,MAP_T map,mach_vm_address_t * start,mach_vm_size_t size,int flags)1217 call_map_fn__memobject_copy__flags(map_fn_t fn, MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags)
1218 {
1219 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1220 kern_return_t kr = fn(map, start, size, 0, flags,
1221 memobject, KB16, true, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1222 deallocate_if_not_fixed_overwrite(kr, map, *start, size, flags);
1223 mach_memory_entry_port_release(memobject);
1224 return kr;
1225 }
1226
1227 static kern_return_t
help_call_map_fn__allocate__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)1228 help_call_map_fn__allocate__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
1229 {
1230 mach_vm_address_t out_addr = 0;
1231 kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
1232 0, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
1233 deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
1234 return kr;
1235 }
1236
1237 static kern_return_t
call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1238 call_map_fn__allocate_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1239 {
1240 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
1241 }
1242
1243 static kern_return_t
call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1244 call_map_fn__allocate_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1245 {
1246 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
1247 }
1248
1249 static kern_return_t
call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1250 call_map_fn__allocate_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1251 {
1252 return help_call_map_fn__allocate__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
1253 }
1254
1255 static kern_return_t
help_call_map_fn__memobject__prot_pairs(map_fn_t fn,MAP_T map,int flags,bool copy,vm_prot_t cur,vm_prot_t max)1256 help_call_map_fn__memobject__prot_pairs(map_fn_t fn, MAP_T map, int flags, bool copy, vm_prot_t cur, vm_prot_t max)
1257 {
1258 mach_port_t memobject = make_a_mem_object(TEST_ALLOC_SIZE + 1);
1259 mach_vm_address_t out_addr = 0;
1260 kern_return_t kr = fn(map, &out_addr, KB16, 0, flags,
1261 memobject, KB16, copy, cur, max, VM_INHERIT_DEFAULT);
1262 deallocate_if_not_fixed_overwrite(kr, map, out_addr, KB16, flags);
1263 mach_memory_entry_port_release(memobject);
1264 return kr;
1265 }
1266
1267 static kern_return_t
call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1268 call_map_fn__memobject_fixed__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1269 {
1270 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, false, cur, max);
1271 }
1272
1273 static kern_return_t
call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1274 call_map_fn__memobject_fixed_copy__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1275 {
1276 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, true, cur, max);
1277 }
1278
1279 static kern_return_t
call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn,MAP_T map,vm_prot_t cur,vm_prot_t max)1280 call_map_fn__memobject_anywhere__prot_pairs(map_fn_t fn, MAP_T map, vm_prot_t cur, vm_prot_t max)
1281 {
1282 return help_call_map_fn__memobject__prot_pairs(fn, map, VM_FLAGS_ANYWHERE, false, cur, max);
1283 }
1284
1285 // wrappers
1286
1287 static bool
dealloc_would_panic(mach_vm_address_t start,mach_vm_size_t size)1288 dealloc_would_panic(mach_vm_address_t start, mach_vm_size_t size)
1289 {
1290 return (start > 0xffffffffffffbffd) ||
1291 (size > 0x8000000000);
1292 }
1293
1294 kern_return_t
mach_vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1295 mach_vm_map_wrapped(vm_map_t target_task,
1296 mach_vm_address_t *address,
1297 mach_vm_size_t size,
1298 mach_vm_offset_t mask,
1299 int flags,
1300 mem_entry_name_port_t object,
1301 memory_object_offset_t offset,
1302 boolean_t copy,
1303 vm_prot_t cur_protection,
1304 vm_prot_t max_protection,
1305 vm_inherit_t inheritance)
1306 {
1307 if (dealloc_would_panic(*address, size)) {
1308 return PANIC;
1309 }
1310 mach_vm_address_t saved_addr = *address;
1311 kern_return_t kr = mach_vm_map(target_task, address, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1312 check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1313 return kr;
1314 }
1315
1316 // missing forward declaration
1317 kern_return_t
1318 mach_vm_map_external(
1319 vm_map_t target_map,
1320 mach_vm_offset_t *address,
1321 mach_vm_size_t initial_size,
1322 mach_vm_offset_t mask,
1323 int flags,
1324 ipc_port_t port,
1325 vm_object_offset_t offset,
1326 boolean_t copy,
1327 vm_prot_t cur_protection,
1328 vm_prot_t max_protection,
1329 vm_inherit_t inheritance);
1330 kern_return_t
mach_vm_map_external_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1331 mach_vm_map_external_wrapped(vm_map_t target_task,
1332 mach_vm_address_t *address,
1333 mach_vm_size_t size,
1334 mach_vm_offset_t mask,
1335 int flags,
1336 mem_entry_name_port_t object,
1337 memory_object_offset_t offset,
1338 boolean_t copy,
1339 vm_prot_t cur_protection,
1340 vm_prot_t max_protection,
1341 vm_inherit_t inheritance)
1342 {
1343 if (dealloc_would_panic(*address, size)) {
1344 return PANIC;
1345 }
1346 mach_vm_address_t saved_addr = *address;
1347 kern_return_t kr = mach_vm_map_external(target_task, address, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1348 check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1349 return kr;
1350 }
1351
1352 kern_return_t
mach_vm_map_kernel_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1353 mach_vm_map_kernel_wrapped(vm_map_t target_task,
1354 mach_vm_address_t *address,
1355 mach_vm_size_t size,
1356 mach_vm_offset_t mask,
1357 int flags,
1358 mem_entry_name_port_t object,
1359 memory_object_offset_t offset,
1360 boolean_t copy,
1361 vm_prot_t cur_protection,
1362 vm_prot_t max_protection,
1363 vm_inherit_t inheritance)
1364 {
1365 if (dealloc_would_panic(*address, size)) {
1366 return PANIC;
1367 }
1368 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1369
1370 vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
1371 mach_vm_address_t saved_addr = *address;
1372 kern_return_t kr = mach_vm_map_kernel(target_task, address, size, mask, vmk_flags, object, offset, copy, cur_protection, max_protection, inheritance);
1373 check_mach_vm_map_outparam_changes(&kr, *address, saved_addr, flags, target_task);
1374 return kr;
1375 }
1376
1377 struct file_control_return {
1378 void * control;
1379 void * fp;
1380 void * vp;
1381 int fd;
1382 };
1383
1384 static inline void
check_vm_map_enter_mem_object_control_outparam_changes(kern_return_t * kr,mach_vm_address_t addr,mach_vm_address_t saved_start,int flags,MAP_T map)1385 check_vm_map_enter_mem_object_control_outparam_changes(kern_return_t * kr, mach_vm_address_t addr,
1386 mach_vm_address_t saved_start, int flags, MAP_T map)
1387 {
1388 if (*kr == KERN_SUCCESS) {
1389 if (is_fixed(flags)) {
1390 if (addr != truncate_vm_map_addr_with_flags(map, saved_start, flags)) {
1391 *kr = OUT_PARAM_BAD;
1392 }
1393 }
1394 } else {
1395 if (saved_start != addr) {
1396 *kr = OUT_PARAM_BAD;
1397 }
1398 }
1399 }
1400
1401 struct file_control_return get_control_from_fd(int fd);
1402 void cleanup_control_related_data(struct file_control_return info);
1403 kern_return_t
vm_map_enter_mem_object_control_wrapped(vm_map_t target_map,mach_vm_address_t * address,mach_vm_size_t size,vm_map_offset_t mask,int flags,mem_entry_name_port_t object __unused,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1404 vm_map_enter_mem_object_control_wrapped(
1405 vm_map_t target_map,
1406 mach_vm_address_t *address,
1407 mach_vm_size_t size,
1408 vm_map_offset_t mask,
1409 int flags,
1410 mem_entry_name_port_t object __unused,
1411 memory_object_offset_t offset,
1412 boolean_t copy,
1413 vm_prot_t cur_protection,
1414 vm_prot_t max_protection,
1415 vm_inherit_t inheritance)
1416 {
1417 mach_vm_address_t start = vm_map_trunc_page(*address, VM_MAP_PAGE_MASK(target_map));
1418 mach_vm_address_t end = round_up_page(*address + size, PAGE_SIZE);
1419 mach_vm_address_t end_offset;
1420 if (__builtin_add_overflow(end - start, offset, &end_offset)) {
1421 return PANIC;
1422 }
1423
1424 vm_map_offset_t vmmaddr;
1425 vmmaddr = (vm_map_offset_t) *address;
1426
1427 if (dealloc_would_panic(*address, size)) {
1428 return PANIC;
1429 }
1430 vm_map_kernel_flags_t vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1431
1432 vm_map_kernel_flags_set_vmflags(&vmk_flags, flags);
1433 struct file_control_return control_info = get_control_from_fd(file_descriptor);
1434 kern_return_t kr = vm_map_enter_mem_object_control(target_map, &vmmaddr, size, mask, vmk_flags, (memory_object_control_t) control_info.control, offset, copy, cur_protection, max_protection, inheritance);
1435 check_vm_map_enter_mem_object_control_outparam_changes(&kr, vmmaddr, *address, flags, target_map);
1436
1437 *address = vmmaddr;
1438
1439 cleanup_control_related_data(control_info);
1440
1441 return kr;
1442 }
1443
1444 kern_return_t
vm_map_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1445 vm_map_wrapped(vm_map_t target_task,
1446 mach_vm_address_t *address,
1447 mach_vm_size_t size,
1448 mach_vm_offset_t mask,
1449 int flags,
1450 mem_entry_name_port_t object,
1451 memory_object_offset_t offset,
1452 boolean_t copy,
1453 vm_prot_t cur_protection,
1454 vm_prot_t max_protection,
1455 vm_inherit_t inheritance)
1456 {
1457 if (dealloc_would_panic(*address, size)) {
1458 return PANIC;
1459 }
1460 vm_address_t addr = (vm_address_t)*address;
1461 kern_return_t kr = vm_map(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1462 check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1463 *address = addr;
1464 return kr;
1465 }
1466
1467 kern_return_t
1468 vm_map_external(
1469 vm_map_t target_map,
1470 vm_offset_t *address,
1471 vm_size_t size,
1472 vm_offset_t mask,
1473 int flags,
1474 ipc_port_t port,
1475 vm_offset_t offset,
1476 boolean_t copy,
1477 vm_prot_t cur_protection,
1478 vm_prot_t max_protection,
1479 vm_inherit_t inheritance);
1480 kern_return_t
vm_map_external_wrapped(vm_map_t target_task,mach_vm_address_t * address,mach_vm_size_t size,mach_vm_offset_t mask,int flags,mem_entry_name_port_t object,memory_object_offset_t offset,boolean_t copy,vm_prot_t cur_protection,vm_prot_t max_protection,vm_inherit_t inheritance)1481 vm_map_external_wrapped(vm_map_t target_task,
1482 mach_vm_address_t *address,
1483 mach_vm_size_t size,
1484 mach_vm_offset_t mask,
1485 int flags,
1486 mem_entry_name_port_t object,
1487 memory_object_offset_t offset,
1488 boolean_t copy,
1489 vm_prot_t cur_protection,
1490 vm_prot_t max_protection,
1491 vm_inherit_t inheritance)
1492 {
1493 if (dealloc_would_panic(*address, size)) {
1494 return PANIC;
1495 }
1496 vm_address_t addr = (vm_address_t)*address;
1497 kern_return_t kr = vm_map_external(target_task, &addr, size, mask, flags, object, offset, copy, cur_protection, max_protection, inheritance);
1498 check_mach_vm_map_outparam_changes(&kr, addr, (vm_address_t)*address, flags, target_task);
1499 *address = addr;
1500 return kr;
1501 }
1502
1503 // implementations
1504
1505 #define IMPL_MAP_FN_START_SIZE(map_fn, instance) \
1506 static kern_return_t \
1507 call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start, mach_vm_size_t size) \
1508 { \
1509 return call_map_fn__ ## instance(map_fn, map, start, size); \
1510 }
1511
1512 #define IMPL_MAP_FN_HINT_SIZE(map_fn, instance) \
1513 static kern_return_t \
1514 call_ ## map_fn ## __ ## instance (MAP_T map, mach_vm_address_t start_hint, mach_vm_size_t size) \
1515 { \
1516 return call_map_fn__ ## instance(map_fn, map, start_hint, size); \
1517 }
1518
1519 #define IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, instance) \
1520 static kern_return_t \
1521 call_ ## map_fn ## __ ## instance ## __start_size_offset_object(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_object_offset_t offset, mach_vm_size_t obj_size) \
1522 { \
1523 return call_map_fn__ ## instance ## __start_size_offset_object(map_fn, map, start, size, offset, obj_size); \
1524 }
1525
1526 #define IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, instance) \
1527 static kern_return_t \
1528 call_ ## map_fn ## __ ## instance ## __inherit(MAP_T map, mach_vm_address_t start, mach_vm_size_t size, vm_inherit_t inherit) \
1529 { \
1530 return call_map_fn__ ## instance ## __inherit(map_fn, map, start, size, inherit); \
1531 }
1532
1533 #define IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, instance) \
1534 static kern_return_t \
1535 call_ ## map_fn ## __ ## instance ## __flags(MAP_T map, mach_vm_address_t * start, mach_vm_size_t size, int flags) \
1536 { \
1537 return call_map_fn__ ## instance ## __flags(map_fn, map, start, size, flags); \
1538 }
1539
1540 #define IMPL_MAP_FN_PROT_PAIRS(map_fn, instance) \
1541 static kern_return_t \
1542 call_ ## map_fn ## __ ## instance ## __prot_pairs(MAP_T map, vm_prot_t cur, vm_prot_t max) \
1543 { \
1544 return call_map_fn__ ## instance ## __prot_pairs(map_fn, map, cur, max); \
1545 }
1546
1547 #define IMPL(map_fn) \
1548 IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed) \
1549 IMPL_MAP_FN_START_SIZE(map_fn, allocate_fixed_copy) \
1550 IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed) \
1551 IMPL_MAP_FN_START_SIZE(map_fn, memobject_fixed_copy) \
1552 IMPL_MAP_FN_HINT_SIZE(map_fn, allocate_anywhere) \
1553 IMPL_MAP_FN_HINT_SIZE(map_fn, memobject_anywhere) \
1554 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed) \
1555 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_fixed_copy) \
1556 IMPL_MAP_FN_START_SIZE_OFFSET_OBJECT(map_fn, memobject_anywhere) \
1557 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed) \
1558 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_fixed_copy) \
1559 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, allocate_anywhere) \
1560 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed) \
1561 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_fixed_copy) \
1562 IMPL_MAP_FN_START_SIZE_INHERIT(map_fn, memobject_anywhere) \
1563 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate) \
1564 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, allocate_copy) \
1565 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject) \
1566 IMPL_MAP_FN_START_SIZE_FLAGS(map_fn, memobject_copy) \
1567 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed) \
1568 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_fixed_copy) \
1569 IMPL_MAP_FN_PROT_PAIRS(map_fn, allocate_anywhere) \
1570 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed) \
1571 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_fixed_copy) \
1572 IMPL_MAP_FN_PROT_PAIRS(map_fn, memobject_anywhere) \
1573
1574 IMPL(mach_vm_map_wrapped)
IMPL(mach_vm_map_external_wrapped)1575 IMPL(mach_vm_map_external_wrapped)
1576 IMPL(mach_vm_map_kernel_wrapped)
1577 IMPL(vm_map_wrapped)
1578 IMPL(vm_map_external_wrapped)
1579 IMPL(vm_map_enter_mem_object_control_wrapped)
1580
1581 #undef IMPL
1582
1583 static int
1584 vm_parameter_validation_kern_test(int64_t in_value, int64_t *out_value)
1585 {
1586 // in_value has the userspace address of the fixed-size output buffer and a file descriptor.
1587 // The address is KB16 aligned, so the bottom bits are used for the fd.
1588 // fd bit 15 also indicates if we want to generate golden results.
1589 // in_value is KB16 aligned
1590 uint64_t fd_mask = KB16 - 1;
1591 file_descriptor = (int)(((uint64_t) in_value) & fd_mask);
1592 uint64_t buffer_address = in_value - file_descriptor;
1593 SYSCTL_OUTPUT_BUF = buffer_address;
1594 SYSCTL_OUTPUT_END = SYSCTL_OUTPUT_BUF + SYSCTL_OUTPUT_BUFFER_SIZE;
1595
1596 // check if running to generate golden result list via boot-arg
1597 kernel_generate_golden = (file_descriptor & (KB16 >> 1)) > 0;
1598 if (kernel_generate_golden) {
1599 file_descriptor &= ~(KB16 >> 1);
1600 } else {
1601 init_kernel_generate_golden();
1602 }
1603
1604 /*
1605 * Group 1: memory entry
1606 */
1607
1608 #define RUN_START_SIZE(fn, variant, name) dealloc_results(dump_results(test_mach_with_allocated_start_size(call_ ## fn ## __start_size__ ## variant, name " (start/size)")))
1609 #define RUN_PROT(fn, name) dealloc_results(dump_results(test_mach_with_allocated_vm_prot_t(call_ ## fn ## __vm_prot , name " (vm_prot_t)")))
1610
1611 #define RUN_ALL(fn, name) \
1612 RUN_START_SIZE(fn, copy, #name " (copy)"); \
1613 RUN_START_SIZE(fn, memonly, #name " (memonly)"); \
1614 RUN_START_SIZE(fn, namedcreate, #name " (namedcreate)"); \
1615 RUN_START_SIZE(fn, share, #name " (share)"); \
1616 RUN_START_SIZE(fn, namedreuse, #name " (namedreuse)"); \
1617 RUN_PROT(fn, #name " (vm_prot_t)"); \
1618
1619 RUN_ALL(mach_make_memory_entry_64, mach_make_memory_entry_64);
1620 RUN_ALL(mach_make_memory_entry, mach_make_memory_entry);
1621 RUN_ALL(mach_make_memory_entry_internal_retyped, mach_make_memory_entry_internal);
1622 #undef RUN_ALL
1623 #undef RUN_START_SIZE
1624 #undef RUN_PROT
1625
1626 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_ledger_tag(fn, name " (ledger tag)")))
1627 RUN(call_mach_memory_entry_ownership__ledger_tag, "mach_memory_entry_ownership");
1628 #undef RUN
1629
1630 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_ledger_flag(fn, name " (ledger flag)")))
1631 RUN(call_mach_memory_entry_ownership__ledger_flag, "mach_memory_entry_ownership");
1632 #undef RUN
1633
1634 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
1635 RUN(call_mach_memory_entry_map_size__start_size, "mach_memory_entry_map_size");
1636 #undef RUN
1637
1638 /*
1639 * Group 2: allocate/deallocate
1640 */
1641
1642 #define RUN(fn, name) dealloc_results(dump_results(test_mach_allocation_func_with_start_size(fn, name)))
1643 RUN(call_mach_vm_allocate__start_size_fixed, "mach_vm_allocate_external (fixed) (realigned start/size)");
1644 RUN(call_mach_vm_allocate__start_size_anywhere, "mach_vm_allocate_external (anywhere) (hint/size)");
1645 RUN(call_mach_vm_allocate_kernel__start_size_fixed, "mach_vm_allocate (fixed) (realigned start/size)");
1646 RUN(call_mach_vm_allocate_kernel__start_size_anywhere, "mach_vm_allocate (anywhere) (hint/size)");
1647 #undef RUN
1648
1649 #define RUN(fn, name) dealloc_results(dump_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1650 RUN(call_mach_vm_allocate__flags, "mach_vm_allocate_external");
1651 RUN(call_mach_vm_allocate_kernel__flags, "mach_vm_allocate_kernel");
1652 #undef RUN
1653
1654 #define RUN(fn, name) dealloc_results(dump_results(test_mach_allocation_func_with_start_size(fn, name)))
1655 RUN(call_vm_allocate__start_size_fixed, "vm_allocate (fixed) (realigned start/size)");
1656 RUN(call_vm_allocate__start_size_anywhere, "vm_allocate (anywhere) (hint/size)");
1657 #undef RUN
1658
1659 #define RUN(fn, name) dealloc_results(dump_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1660 RUN(call_vm_allocate__flags, "vm_allocate");
1661 #undef RUN
1662 dealloc_results(dump_results(test_deallocator(call_mach_vm_deallocate, "mach_vm_deallocate (start/size)")));
1663 dealloc_results(dump_results(test_deallocator(call_vm_deallocate, "vm_deallocate (start/size)")));
1664
1665 /*
1666 * Group 3: map/remap
1667 */
1668
1669 // map tests
1670
1671 #define RUN_START_SIZE(fn, name) dealloc_results(dump_results(test_mach_with_allocated_start_size(fn, name " (realigned start/size)")))
1672 #define RUN_HINT_SIZE(fn, name) dealloc_results(dump_results(test_mach_with_allocated_start_size(fn, name " (hint/size)")))
1673 #define RUN_PROT_PAIR(fn, name) dealloc_results(dump_results(test_mach_vm_prot_pair(fn, name " (vm_prot_t pair)")))
1674 #define RUN_INHERIT(fn, name) dealloc_results(dump_results(test_mach_with_allocated_vm_inherit_t(fn, name " (vm_inherit_t)")))
1675 #define RUN_FLAGS(fn, name) dealloc_results(dump_results(test_mach_allocation_func_with_vm_map_kernel_flags_t(fn, name " (vm_map_kernel_flags_t)")))
1676 #define RUN_SSOO(fn, name) dealloc_results(dump_results(test_mach_with_start_size_offset_object(fn, name " (start/size/offset/object)")))
1677
1678 #define RUN_ALL(fn, name) \
1679 RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)"); \
1680 RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)"); \
1681 RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)"); \
1682 RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
1683 RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)"); \
1684 RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)"); \
1685 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)"); \
1686 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)"); \
1687 RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)"); \
1688 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)"); \
1689 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)"); \
1690 RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)"); \
1691 RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)"); \
1692 RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)"); \
1693 RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)"); \
1694 RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)"); \
1695 RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)"); \
1696 RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)"); \
1697 RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)"); \
1698 RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)"); \
1699 RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)"); \
1700 RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)"); \
1701 RUN_SSOO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)"); \
1702 RUN_SSOO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)"); \
1703 RUN_SSOO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)"); \
1704
1705 RUN_ALL(mach_vm_map_wrapped, mach_vm_map);
1706 RUN_ALL(mach_vm_map_external_wrapped, mach_vm_map_external);
1707 RUN_ALL(mach_vm_map_kernel_wrapped, mach_vm_map_kernel);
1708 RUN_ALL(vm_map_wrapped, vm_map);
1709 RUN_ALL(vm_map_external_wrapped, vm_map_external);
1710
1711 #define RUN_SSO(fn, name) dealloc_results(dump_results(test_mach_with_start_size_offset(fn, name " (start/size/offset)")))
1712
1713 #define RUN_ALL_CTL(fn, name) \
1714 RUN_START_SIZE(call_ ## fn ## __allocate_fixed, #name " (allocate fixed overwrite)"); \
1715 RUN_START_SIZE(call_ ## fn ## __allocate_fixed_copy, #name " (allocate fixed overwrite copy)"); \
1716 RUN_START_SIZE(call_ ## fn ## __memobject_fixed, #name " (memobject fixed overwrite)"); \
1717 RUN_START_SIZE(call_ ## fn ## __memobject_fixed_copy, #name " (memobject fixed overwrite copy)"); \
1718 RUN_HINT_SIZE(call_ ## fn ## __allocate_anywhere, #name " (allocate anywhere)"); \
1719 RUN_HINT_SIZE(call_ ## fn ## __memobject_anywhere, #name " (memobject anywhere)"); \
1720 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed__prot_pairs, #name " (allocate fixed overwrite)"); \
1721 RUN_PROT_PAIR(call_ ## fn ## __allocate_fixed_copy__prot_pairs, #name " (allocate fixed overwrite copy)"); \
1722 RUN_PROT_PAIR(call_ ## fn ## __allocate_anywhere__prot_pairs, #name " (allocate anywhere)"); \
1723 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed__prot_pairs, #name " (memobject fixed overwrite)"); \
1724 RUN_PROT_PAIR(call_ ## fn ## __memobject_fixed_copy__prot_pairs, #name " (memobject fixed overwrite copy)"); \
1725 RUN_PROT_PAIR(call_ ## fn ## __memobject_anywhere__prot_pairs, #name " (memobject anywhere)"); \
1726 RUN_INHERIT(call_ ## fn ## __allocate_fixed__inherit, #name " (allocate fixed overwrite)"); \
1727 RUN_INHERIT(call_ ## fn ## __allocate_fixed_copy__inherit, #name " (allocate fixed overwrite copy)"); \
1728 RUN_INHERIT(call_ ## fn ## __allocate_anywhere__inherit, #name " (allocate anywhere)"); \
1729 RUN_INHERIT(call_ ## fn ## __memobject_fixed__inherit, #name " (memobject fixed overwrite)"); \
1730 RUN_INHERIT(call_ ## fn ## __memobject_fixed_copy__inherit, #name " (memobject fixed overwrite copy)"); \
1731 RUN_INHERIT(call_ ## fn ## __memobject_anywhere__inherit, #name " (memobject anywhere)"); \
1732 RUN_FLAGS(call_ ## fn ## __allocate__flags, #name " (allocate)"); \
1733 RUN_FLAGS(call_ ## fn ## __allocate_copy__flags, #name " (allocate copy)"); \
1734 RUN_FLAGS(call_ ## fn ## __memobject__flags, #name " (memobject)"); \
1735 RUN_FLAGS(call_ ## fn ## __memobject_copy__flags, #name " (memobject copy)"); \
1736 RUN_SSO(call_ ## fn ## __memobject_fixed__start_size_offset_object, #name " (memobject fixed overwrite)"); \
1737 RUN_SSO(call_ ## fn ## __memobject_fixed_copy__start_size_offset_object, #name " (memobject fixed overwrite copy)"); \
1738 RUN_SSO(call_ ## fn ## __memobject_anywhere__start_size_offset_object, #name " (memobject anywhere)"); \
1739
1740 RUN_ALL_CTL(vm_map_enter_mem_object_control_wrapped, vm_map_enter_mem_object_control);
1741
1742 #undef RUN_ALL
1743 #undef RUN_START_SIZE
1744 #undef RUN_HINT_SIZE
1745 #undef RUN_PROT_PAIR
1746 #undef RUN_INHERIT
1747 #undef RUN_FLAGS
1748 #undef RUN_SSOO
1749 #undef RUN_ALL_CTL
1750 #undef RUN_SSO
1751
1752 // remap tests
1753
1754 #define FN_NAME(fn, variant, type) call_ ## fn ## __ ## variant ## __ ## type
1755 #define RUN_HELPER(harness, fn, variant, type, type_name, name) dealloc_results(dump_results(harness(FN_NAME(fn, variant, type), #name " (" #variant ") (" type_name ")")))
1756 #define RUN_SRC_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, src_size, type_name, name)
1757 #define RUN_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_mach_with_allocated_start_size, fn, variant, dst_size, type_name, name)
1758 #define RUN_PROT_PAIRS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_prot_pair, fn, variant, prot_pairs, "prot_pairs", name)
1759 #define RUN_INHERIT(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_inherit_t, fn, variant, inherit, "inherit", name)
1760 #define RUN_FLAGS(fn, variant, name) RUN_HELPER(test_mach_with_allocated_vm_map_kernel_flags_t, fn, variant, flags, "flags", name)
1761 #define RUN_SRC_DST_SIZE(fn, variant, type_name, name) RUN_HELPER(test_allocated_src_unallocated_dst_size, fn, variant, src_dst_size, type_name, name)
1762
1763 #define RUN_ALL(fn, realigned, name) \
1764 RUN_SRC_SIZE(fn, copy, realigned "src/size", name); \
1765 RUN_SRC_SIZE(fn, nocopy, realigned "src/size", name); \
1766 RUN_DST_SIZE(fn, fixed, "realigned dst/size", name); \
1767 RUN_DST_SIZE(fn, fixed_copy, "realigned dst/size", name); \
1768 RUN_DST_SIZE(fn, anywhere, "hint/size", name); \
1769 RUN_INHERIT(fn, fixed, name); \
1770 RUN_INHERIT(fn, fixed_copy, name); \
1771 RUN_INHERIT(fn, anywhere, name); \
1772 RUN_FLAGS(fn, nocopy, name); \
1773 RUN_FLAGS(fn, copy, name); \
1774 RUN_PROT_PAIRS(fn, fixed, name); \
1775 RUN_PROT_PAIRS(fn, fixed_copy, name); \
1776 RUN_PROT_PAIRS(fn, anywhere, name); \
1777 RUN_SRC_DST_SIZE(fn, fixed, "src/dst/size", name); \
1778 RUN_SRC_DST_SIZE(fn, fixed_copy, "src/dst/size", name); \
1779 RUN_SRC_DST_SIZE(fn, anywhere, "src/dst/size", name); \
1780
1781 RUN_ALL(mach_vm_remap_wrapped_kern, "realigned ", mach_vm_remap);
1782 RUN_ALL(mach_vm_remap_new_kernel_wrapped, , mach_vm_remap_new_kernel);
1783
1784 #undef RUN_ALL
1785 #undef RUN_HELPER
1786 #undef RUN_SRC_SIZE
1787 #undef RUN_DST_SIZE
1788 #undef RUN_PROT_PAIRS
1789 #undef RUN_INHERIT
1790 #undef RUN_FLAGS
1791 #undef RUN_SRC_DST_SIZE
1792
1793 /*
1794 * Group 4: wire/unwire
1795 */
1796
1797 #define RUN(fn, name) dealloc_results(dump_results(test_kext_unix_with_allocated_start_size(fn, name " (start/size)")))
1798 RUN(call_vslock, "vslock");
1799 RUN(call_vsunlock_undirtied, "vsunlock (undirtied)");
1800 RUN(call_vsunlock_dirtied, "vsunlock (dirtied)");
1801 #undef RUN
1802
1803 #if XNU_PLATFORM_MacOSX
1804 // vm_map_wire_and_extract is implemented on macOS only
1805 #define RUN(fn, name) dealloc_results(dump_results(test_kext_tagged_with_allocated_addr(fn, name " (addr)")))
1806 RUN(call_vm_map_wire_and_extract_user_wired, "vm_map_wire_and_extract (user wired)");
1807 RUN(call_vm_map_wire_and_extract_non_user_wired, "vm_map_wire_and_extract (user wired)");
1808 #undef RUN
1809
1810 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
1811 RUN(call_vm_map_wire_and_extract_vm_prot_t_user_wired, "vm_map_wire_and_extract_external (user wired)");
1812 RUN(call_vm_map_wire_and_extract_vm_prot_t_non_user_wired, "vm_map_wire_and_extract_external (non user wired)");
1813 #undef RUN
1814 #endif // XNU_PLATFORM_MacOSX
1815
1816 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_allocated_vm_prot_t(fn, name " (vm_prot_t)")))
1817 RUN(call_vm_map_wire_external_vm_prot_t_user_wired, "vm_map_wire_external (user wired)");
1818 RUN(call_vm_map_wire_external_vm_prot_t_non_user_wired, "vm_map_wire_external (non user wired))");
1819 RUN(call_vm_map_wire_kernel_vm_prot_t_user_wired, "vm_map_wire_kernel (user wired)");
1820 RUN(call_vm_map_wire_kernel_vm_prot_t_non_user_wired, "vm_map_wire_kernel (non user wired))");
1821 #undef RUN
1822
1823 #define RUN(fn, name) dealloc_results(dump_results(test_with_start_end(fn, name " (start/end)")))
1824 RUN(call_vm_map_wire_external_user_wired, "vm_map_wire_external (user wired)");
1825 RUN(call_vm_map_wire_external_non_user_wired, "vm_map_wire_external (non user wired)");
1826 RUN(call_vm_map_wire_kernel_user_wired, "vm_map_wire_kernel (user wired)");
1827 RUN(call_vm_map_wire_kernel_non_user_wired, "vm_map_wire_kernel (non user wired)");
1828 RUN(call_vm_map_unwire_user_wired, "vm_map_unwire (user_wired)");
1829 RUN(call_vm_map_unwire_non_user_wired, "vm_map_unwire (non user_wired)");
1830 #undef RUN
1831
1832 #define RUN(fn, name) dealloc_results(dump_results(test_with_tag(fn, name " (tag)")))
1833 RUN(call_vm_map_kernel_tag_user_wired, "vm_map_wire_kernel (user wired)");
1834 RUN(call_vm_map_kernel_tag_non_user_wired, "vm_map_wire_kernel (non user wired)");
1835 #undef RUN
1836
1837 #define RUN(fn, name) dealloc_results(dump_results(test_with_int64(fn, name " (int64)")))
1838 RUN(call_mach_vm_wire_level_monitor, "mach_vm_wire_level_monitor");
1839 #undef RUN
1840
1841 /*
1842 * Group 5: copyin/copyout
1843 */
1844
1845 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_allocated_start_size(fn, name " (start/size)")))
1846 RUN(call_vm_map_copyin, "vm_map_copyin");
1847 // vm_map_copyin_common is covered well by the vm_map_copyin test
1848 // RUN(call_vm_map_copyin_common, "vm_map_copyin_common");
1849 #undef RUN
1850
1851 #define RUN(fn, name) dealloc_results(dump_results(test_mach_with_allocated_addr_of_size_n(fn, sizeof(uint32_t), name " (start)")))
1852 RUN(call_copyoutmap_atomic32, "copyoutmap_atomic32");
1853 #undef RUN
1854
1855 #define RUN(fn, name) dealloc_results(dump_results(test_src_kerneldst_size(fn, name " (src/dst/size)")))
1856 RUN(call_copyinmap, "copyinmap");
1857 RUN(call_vm_map_read_user, "vm_map_read_user");
1858 #undef RUN
1859
1860 #define RUN(fn, name) dealloc_results(dump_results(test_kernelsrc_dst_size(fn, name " (src/dst/size)")))
1861 RUN(call_vm_map_write_user, "vm_map_write_user");
1862 RUN(call_copyoutmap, "copyoutmap");
1863 #undef RUN
1864
1865 dealloc_results(dump_results(test_vm_map_copy_overwrite(call_vm_map_copy_overwrite_interruptible, "vm_map_copy_overwrite (start/size)")));
1866
1867 SYSCTL_OUTPUT_BUF = 0;
1868 SYSCTL_OUTPUT_END = 0;
1869 *out_value = 1; // success
1870 return 0;
1871 }
1872
1873 SYSCTL_TEST_REGISTER(vm_parameter_validation_kern, vm_parameter_validation_kern_test);
1874