xref: /xnu-12377.81.4/tests/vm/upl.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  *
28  */
29 
30 #include <darwintest.h>
31 #include <darwintest_utils.h>
32 
33 #include <os/thread_self_restrict.h>
34 
35 #include <stdlib.h>
36 #include <sys/mman.h>
37 
38 #include <mach/mach.h>
39 #include <mach/mach_vm.h>
40 
41 #include <System/machine/cpu_capabilities.h>
42 
43 #include "exc_guard_helper.h"
44 #include "test_utils.h"
45 
46 T_GLOBAL_META(
47 	T_META_NAMESPACE("xnu.vm"),
48 	T_META_RADAR_COMPONENT_NAME("xnu"),
49 	T_META_RADAR_COMPONENT_VERSION("VM"),
50 	T_META_OWNER("jharmening"),
51 	T_META_CHECK_LEAKS(false),
52 	T_META_RUN_CONCURRENTLY(true),
53 	T_META_ALL_VALID_ARCHS(true));
54 
55 typedef struct {
56 	uint64_t ptr;
57 	uint32_t size;
58 	char test_pattern;
59 	bool copy_expected;
60 	bool should_fail;
61 	bool upl_rw;
62 } upl_test_args;
63 
64 T_DECL(vm_upl_ro_on_rw,
65     "Generate RO UPL against RW memory region")
66 {
67 	const size_t buf_size = 10 * PAGE_SIZE;
68 	unsigned int *buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
69 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
70 
71 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
72 		buf[i] = (unsigned int)'a' + i;
73 	}
74 
75 	upl_test_args args = { .ptr = (uint64_t)buf, .size = buf_size, .test_pattern = 'a',
76 		               .copy_expected = false, .should_fail = false, .upl_rw = false };
77 
78 	int64_t addr = (int64_t)&args;
79 	int64_t result = 0;
80 	size_t s = sizeof(result);
81 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
82 	    "sysctlbyname(debug.test.vm_upl)");
83 
84 	args.ptr = (uint64_t)buf + 0x800;
85 	args.size -= 0x1000;
86 
87 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
88 	    "sysctlbyname(debug.test.vm_upl)");
89 
90 	args.ptr = (uint64_t)buf + 0x1000;
91 	args.size -= 0x1000;
92 
93 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
94 	    "sysctlbyname(debug.test.vm_upl)");
95 
96 	munmap(buf, buf_size);
97 }
98 
99 T_DECL(vm_upl_ro_on_ro,
100     "Generate RO UPL against RO memory region")
101 {
102 	const size_t buf_size = 10 * PAGE_SIZE;
103 	unsigned int *buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
104 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
105 
106 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
107 		buf[i] = (unsigned int)'a' + i;
108 	}
109 
110 	T_QUIET; T_ASSERT_POSIX_SUCCESS(mprotect(buf, buf_size, PROT_READ), "mprotect");
111 
112 	upl_test_args args = { .ptr = (uint64_t)buf, .size = buf_size, .test_pattern = 'a',
113 		               .copy_expected = false, .should_fail = false, .upl_rw = false };
114 
115 	int64_t addr = (int64_t)&args;
116 	int64_t result = 0;
117 	size_t s = sizeof(result);
118 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
119 	    "sysctlbyname(debug.test.vm_upl)");
120 
121 	args.ptr = (uint64_t)buf + 0x800;
122 	args.size -= 0x1000;
123 
124 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
125 	    "sysctlbyname(debug.test.vm_upl)");
126 
127 	args.ptr = (uint64_t)buf + 0x1000;
128 	args.size -= 0x1000;
129 
130 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
131 	    "sysctlbyname(debug.test.vm_upl)");
132 
133 	munmap(buf, buf_size);
134 }
135 
136 T_DECL(vm_upl_rw_on_rw,
137     "Generate RW UPL against RW memory region")
138 {
139 	const size_t buf_size = 10 * PAGE_SIZE;
140 	unsigned int *buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
141 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
142 
143 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
144 		buf[i] = (unsigned int)'a' + i;
145 	}
146 
147 	upl_test_args args = { .ptr = (uint64_t)buf, .size = buf_size, .test_pattern = 'b',
148 		               .copy_expected = false, .should_fail = false, .upl_rw = true };
149 
150 	int64_t addr = (int64_t)&args;
151 	int64_t result = 0;
152 	size_t s = sizeof(result);
153 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
154 	    "sysctlbyname(debug.test.vm_upl)");
155 
156 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
157 		T_QUIET; T_ASSERT_EQ(buf[i], (unsigned int)'b' + i,
158 		    "buf[%u]='%u' == '%u'",
159 		    i, buf[i], (unsigned int)'b' + i);
160 	}
161 	bzero(buf, buf_size);
162 	args.ptr = (uint64_t)buf + 0x800;
163 	args.size -= 0x1000;
164 
165 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
166 	    "sysctlbyname(debug.test.vm_upl)");
167 
168 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
169 		if ((i < (0x800 / sizeof(*buf))) || (i >= ((0x800 + args.size) / sizeof(*buf)))) {
170 			T_QUIET; T_ASSERT_EQ(buf[i], 0,
171 			    "buf[%u]='%u' == 0", i, buf[i]);
172 		} else {
173 			T_QUIET; T_ASSERT_EQ(buf[i], (unsigned int)'b' + i - (unsigned int)(0x800 / sizeof(*buf)),
174 			    "buf[%u]='%u' == '%u'",
175 			    i, buf[i], (unsigned int)'b' + i - (unsigned int)(0x800 / sizeof(*buf)));
176 		}
177 	}
178 
179 	bzero(buf, buf_size);
180 	args.ptr = (uint64_t)buf + 0x1000;
181 	args.size -= 0x1000;
182 
183 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
184 	    "sysctlbyname(debug.test.vm_upl)");
185 
186 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
187 		if ((i < (0x1000 / sizeof(*buf))) || (i >= ((0x1000 + args.size) / sizeof(*buf)))) {
188 			T_QUIET; T_ASSERT_EQ(buf[i], 0,
189 			    "buf[%u]='%u' == 0", i, buf[i]);
190 		} else {
191 			T_QUIET; T_ASSERT_EQ(buf[i], (unsigned int)'b' + i - (unsigned int)(0x1000 / sizeof(*buf)),
192 			    "buf[%u]='%u' == '%u'",
193 			    i, buf[i], (unsigned int)'b' + i - (unsigned int)(0x1000 / sizeof(*buf)));
194 		}
195 	}
196 
197 	munmap(buf, buf_size);
198 }
199 
200 T_DECL(vm_upl_rw_on_ro,
201     "Generate RW UPL against RO memory region")
202 {
203 	const size_t buf_size = 10 * PAGE_SIZE;
204 	unsigned int *buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
205 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
206 
207 	T_QUIET; T_ASSERT_POSIX_SUCCESS(mprotect(buf, buf_size, PROT_READ), "mprotect");
208 
209 	upl_test_args args = { .ptr = (uint64_t)buf, .size = buf_size, .test_pattern = 'b',
210 		               .copy_expected = false, .should_fail = true, .upl_rw = true };
211 
212 	int64_t addr = (int64_t)&args;
213 	int64_t result = 0;
214 	size_t s = sizeof(result);
215 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
216 	    "sysctlbyname(debug.test.vm_upl)");
217 
218 	args.ptr = (uint64_t)buf + 0x800;
219 	args.size -= 0x1000;
220 
221 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
222 	    "sysctlbyname(debug.test.vm_upl)");
223 
224 	args.ptr = (uint64_t)buf + 0x1000;
225 	args.size -= 0x1000;
226 
227 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
228 	    "sysctlbyname(debug.test.vm_upl)");
229 
230 	munmap(buf, buf_size);
231 }
232 
233 T_DECL(vm_upl_ro_on_rx,
234     "Generate RO UPL against RX memory region")
235 {
236 	bool copy_expected = true;
237 #if TARGET_OS_OSX
238 	/**
239 	 * For embedded targets, UPL creation against RX mappings should always produce a copy due to codesigning.
240 	 * For MacOS, a copy should only be produced if the SPTM is enabled, due to the SPTM's stricter requirements
241 	 * for DMA mappings of executable frame types.
242 	 */
243 	if (!is_sptm_enabled()) {
244 		copy_expected = false;
245 	}
246 #endif /* TARGET_OS_OSX */
247 
248 	upl_test_args args = { .ptr = (uint64_t)__builtin_return_address(0), .size = PAGE_SIZE, .test_pattern = 'a',
249 		               .copy_expected = copy_expected, .should_fail = false, .upl_rw = false };
250 
251 	int64_t addr = (int64_t)&args;
252 	int64_t result = 0;
253 	size_t s = sizeof(result);
254 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
255 	    "sysctlbyname(debug.test.vm_upl)");
256 
257 	args.ptr += 0x100;
258 	args.size -= 0x200;
259 
260 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
261 	    "sysctlbyname(debug.test.vm_upl)");
262 }
263 
264 T_DECL(vm_upl_rw_on_rx,
265     "Generate RW UPL against RX memory region")
266 {
267 	upl_test_args args = { .ptr = (uint64_t)__builtin_return_address(0), .size = PAGE_SIZE, .test_pattern = 'a',
268 		               .copy_expected = true, .should_fail = true, .upl_rw = true };
269 
270 	int64_t addr = (int64_t)&args;
271 	int64_t result = 0;
272 	size_t s = sizeof(result);
273 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
274 	    "sysctlbyname(debug.test.vm_upl)");
275 
276 	args.ptr += 0x100;
277 	args.size -= 0x200;
278 
279 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
280 	    "sysctlbyname(debug.test.vm_upl)");
281 }
282 
283 T_DECL(vm_upl_ro_on_jit,
284     "Generate RO UPL against JIT memory region")
285 {
286 	if (!is_map_jit_allowed()) {
287 		T_SKIP("MAP_JIT not allowed for this system configuration");
288 	}
289 	/**
290 	 * Direct RO UPLs against JIT pages should be allowed for non-SPTM targets.
291 	 * For SPTM targets, a copy is expected due to the SPTM's stricter requirements for DMA
292 	 * mappings of executable frame types.
293 	 */
294 	bool copy_expected = is_sptm_enabled();
295 	const size_t buf_size = 10 * PAGE_SIZE;
296 	unsigned int *buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE | MAP_JIT, -1, 0);
297 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
298 
299 	if (os_thread_self_restrict_rwx_is_supported()) {
300 		os_thread_self_restrict_rwx_to_rw();
301 	}
302 
303 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
304 		buf[i] = (unsigned int)'a' + i;
305 	}
306 
307 	upl_test_args args = { .ptr = (uint64_t)buf, .size = buf_size, .test_pattern = 'b',
308 		               .copy_expected = copy_expected, .should_fail = false, .upl_rw = false };
309 
310 	int64_t addr = (int64_t)&args;
311 	int64_t result = 0;
312 	size_t s = sizeof(result);
313 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
314 	    "sysctlbyname(debug.test.vm_upl)");
315 
316 	args.ptr = (uint64_t)buf + 0x800;
317 	args.size -= 0x1000;
318 
319 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
320 	    "sysctlbyname(debug.test.vm_upl)");
321 
322 	args.ptr = (uint64_t)buf + 0x1000;
323 	args.size -= 0x1000;
324 
325 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
326 	    "sysctlbyname(debug.test.vm_upl)");
327 
328 	munmap(buf, buf_size);
329 }
330 
331 T_DECL(vm_upl_rw_on_jit,
332     "Generate RW UPL against JIT memory region")
333 {
334 	if (process_is_translated()) {
335 		/* TODO: Remove this once rdar://142438840 is fixed. */
336 		T_SKIP("Guard exception handling does not work correctly with Rosetta (rdar://142438840), skipping...");
337 	}
338 	if (!is_map_jit_allowed()) {
339 		T_SKIP("MAP_JIT not allowed for this system configuration");
340 	}
341 	const size_t buf_size = 10 * PAGE_SIZE;
342 	/**
343 	 * Direct RW UPLs against JIT pages should be allowed for non-SPTM targets.
344 	 * For SPTM targets, UPL creation should fail due to the SPTM's stricter requirements for DMA
345 	 * mappings of executable frame types.
346 	 */
347 	bool should_fail = is_sptm_enabled();
348 	unsigned int *buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE | MAP_JIT, -1, 0);
349 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
350 
351 	upl_test_args args = { .ptr = (uint64_t)buf, .size = buf_size, .test_pattern = 'b',
352 		               .copy_expected = false, .should_fail = should_fail, .upl_rw = true };
353 
354 	__block int64_t addr = (int64_t)&args;
355 	__block int64_t result = 0;
356 	__block size_t s = sizeof(result);
357 
358 	/* Ensure that guard exceptions will not be fatal to the test process. */
359 	enable_exc_guard_of_type(GUARD_TYPE_VIRT_MEMORY);
360 
361 	/**
362 	 * Iterate 3 times to guarantee buffer offsets that are neither 4K nor 16K aligned,
363 	 * and 4K but not necessarily 16K aligned.
364 	 */
365 	for (int i = 0; i < 2; i++) {
366 		exc_guard_helper_info_t exc_info;
367 		bool caught_exception =
368 		    block_raised_exc_guard_of_type(GUARD_TYPE_VIRT_MEMORY, &exc_info, ^{
369 			T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
370 			"sysctlbyname(debug.test.vm_upl)");
371 		});
372 		if (args.should_fail) {
373 			T_ASSERT_TRUE(caught_exception, "Failing test should also throw guard exception");
374 			T_ASSERT_EQ(exc_info.guard_flavor, kGUARD_EXC_SEC_UPL_WRITE_ON_EXEC_REGION,
375 			    "Failing test throws the expected guard exception flavor");
376 			T_ASSERT_EQ(exc_info.catch_count, 1, "Failing test should throw exactly one guard exception");
377 		} else {
378 			T_ASSERT_FALSE(caught_exception, "Passing test should not throw guard exception");
379 		}
380 
381 		args.ptr += 0x800;
382 		args.size -= 0x1000;
383 	}
384 
385 	munmap(buf, buf_size);
386 }
387 
388 T_DECL(vm_upl_ro_on_commpage,
389     "Generate RO UPL against comm page")
390 {
391 #if !TARGET_OS_OSX
392 	T_SKIP("Comm page only guaranteed to be within user address range on MacOS, skipping...");
393 #else
394 #ifndef __arm64__
395 	T_SKIP("Comm page only has UPL-incompatible mapping on arm64, skipping...");
396 #else
397 	upl_test_args args = { .ptr = (uint64_t)_COMM_PAGE_START_ADDRESS, .size = 0x1000, .test_pattern = 'b',
398 		               .copy_expected = false, .should_fail = true, .upl_rw = false };
399 
400 	int64_t addr = (int64_t)&args;
401 	int64_t result = 0;
402 	size_t s = sizeof(result);
403 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
404 	    "sysctlbyname(debug.test.vm_upl)");
405 #endif /* !defined(__arm64__) */
406 #endif /* !TARGET_OS_OSX */
407 }
408 
409 T_DECL(vm_upl_partial_cow,
410     "Generate a UPL that requires CoW setup for part of an object")
411 {
412 	const size_t buf_size = 10 * PAGE_SIZE;
413 	unsigned int *buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
414 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
415 
416 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
417 		buf[i] = (unsigned int)'a' + i;
418 	}
419 
420 	/*
421 	 * Mark a portion of the buffer RO, which will split off a separate vm_map_entry backed by the same
422 	 * vm_object.  This will produce an internal COPY_SYMMETRIC object with refcount > 1, which is the
423 	 * baseline requirement for partial CoW setup by vm_map_create_upl().
424 	 */
425 	T_QUIET; T_ASSERT_POSIX_SUCCESS(mprotect((char*)buf + (8 * PAGE_SIZE), 2 * PAGE_SIZE, PROT_READ), "mprotect");
426 
427 	/*
428 	 * Request a non-page-aligned UPL against the RW region of the buffer, to ensure that partial CoW
429 	 * setup still ultimately uses a page-aligned buffer as required for vm_map_entry clipping.
430 	 */
431 	upl_test_args args = { .ptr = (uint64_t)buf + 0x800, .size = 2 * PAGE_SIZE, .test_pattern = 'b',
432 		               .copy_expected = false, .should_fail = false, .upl_rw = true };
433 
434 	int64_t addr = (int64_t)&args;
435 	int64_t result = 0;
436 	size_t s = sizeof(result);
437 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl", &result, &s, &addr, sizeof(addr)),
438 	    "sysctlbyname(debug.test.vm_upl)");
439 
440 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
441 		if ((i < (0x800 / sizeof(*buf))) || (i >= ((0x800 + args.size) / sizeof(*buf)))) {
442 			T_QUIET; T_ASSERT_EQ(buf[i], (unsigned int)'a' + i,
443 			    "buf[%u]='%u' == '%u'", i, buf[i], (unsigned int)'a' + i);
444 		} else {
445 			T_QUIET; T_ASSERT_EQ(buf[i], (unsigned int)'b' + i - (unsigned int)(0x800 / sizeof(*buf)),
446 			    "buf[%u]='%u' == '%u'",
447 			    i, buf[i], (unsigned int)'b' + i - (unsigned int)(0x800 / sizeof(*buf)));
448 		}
449 	}
450 
451 	munmap(buf, buf_size);
452 }
453 
454 typedef struct {
455 	uint64_t ptr;
456 	uint32_t size;
457 	bool upl_rw;
458 	bool should_fail;
459 	uint8_t fault_prot;
460 } upl_object_test_args;
461 
462 T_DECL(vm_upl_rw_on_exec_object,
463     "Attempt to create a writable UPL against an object containing executable pages")
464 {
465 	/**
466 	 * This test is meant to exercise functionality that is currently SPTM-specific.
467 	 * It also relies on the assumption that JIT regions are faulted in an all-or-nothing
468 	 * manner, so that the write faults generated by our buffer fill below will also
469 	 * produce executable mappings of the underlying JIT pages.  This happens to hold
470 	 * true on SPTM-enabled devices because all of them use xPRR, but may not hold true
471 	 * in general.
472 	 */
473 	if (!is_sptm_enabled()) {
474 		T_SKIP("Exec object test only supported on SPTM-enabled devices, skipping...");
475 	}
476 	if (process_is_translated()) {
477 		/* TODO: Remove this once rdar://142438840 is fixed. */
478 		T_SKIP("Guard exception handling does not work correctly with Rosetta (rdar://142438840), skipping...");
479 	}
480 	if (!is_map_jit_allowed()) {
481 		T_SKIP("MAP_JIT not allowed for this system configuration");
482 	}
483 
484 	const size_t buf_size = 10 * PAGE_SIZE;
485 	unsigned int *buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE | MAP_JIT, -1, 0);
486 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
487 
488 	if (os_thread_self_restrict_rwx_is_supported()) {
489 		os_thread_self_restrict_rwx_to_rw();
490 	}
491 
492 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
493 		buf[i] = (unsigned int)'a' + i;
494 	}
495 
496 	/* Ensure that guard exceptions will not be fatal to the test process. */
497 	enable_exc_guard_of_type(GUARD_TYPE_VIRT_MEMORY);
498 
499 	upl_object_test_args args = { .ptr = (uint64_t)buf, .size = buf_size, .upl_rw = true, .should_fail = true, .fault_prot = VM_PROT_NONE };
500 
501 	__block int64_t addr = (int64_t)&args;
502 	__block int64_t result = 0;
503 	__block size_t s = sizeof(result);
504 	exc_guard_helper_info_t exc_info;
505 	bool caught_exception =
506 	    block_raised_exc_guard_of_type(GUARD_TYPE_VIRT_MEMORY, &exc_info, ^{
507 		T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl_object", &result, &s, &addr, sizeof(addr)),
508 		"sysctlbyname(debug.test.vm_upl_object)");
509 	});
510 	if (args.should_fail) {
511 		T_ASSERT_TRUE(caught_exception, "Failing test should also throw guard exception");
512 		T_ASSERT_EQ(exc_info.guard_flavor, kGUARD_EXC_SEC_IOPL_ON_EXEC_PAGE,
513 		    "Failing test throws the expected guard exception flavor");
514 		T_ASSERT_EQ(exc_info.catch_count, 1, "Failing test should throw exactly one guard exception");
515 	} else {
516 		T_ASSERT_FALSE(caught_exception, "Passing test should not throw guard exception");
517 	}
518 
519 	munmap(buf, buf_size);
520 }
521 
522 T_DECL(vm_upl_ro_with_exec_fault,
523     "Attempt to exec-fault a region while a UPL is in-flight for that region")
524 {
525 	/**
526 	 * This test is meant to exercise functionality that is currently SPTM-specific.
527 	 */
528 	if (!is_sptm_enabled()) {
529 		T_SKIP("Exec-fault test only supported on SPTM-enabled devices, skipping...");
530 	}
531 	if (process_is_translated()) {
532 		/* TODO: Remove this once rdar://142438840 is fixed. */
533 		T_SKIP("Guard exception handling does not work correctly with Rosetta (rdar://142438840), skipping...");
534 	}
535 	if (!is_map_jit_allowed()) {
536 		T_SKIP("MAP_JIT not allowed for this system configuration");
537 	}
538 
539 	const size_t buf_size = 10 * PAGE_SIZE;
540 	unsigned int *buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANON | MAP_PRIVATE | MAP_JIT, -1, 0);
541 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
542 
543 	if (os_thread_self_restrict_rwx_is_supported()) {
544 		os_thread_self_restrict_rwx_to_rw();
545 	}
546 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
547 		buf[i] = (unsigned int)'a' + i;
548 	}
549 	if (os_thread_self_restrict_rwx_is_supported()) {
550 		os_thread_self_restrict_rwx_to_rx();
551 	}
552 
553 	/* Ensure that guard exceptions will not be fatal to the test process. */
554 	enable_exc_guard_of_type(GUARD_TYPE_VIRT_MEMORY);
555 
556 	upl_object_test_args args = { .ptr = (uint64_t)buf, .size = buf_size, .upl_rw = false, .should_fail = false,
557 		                      .fault_prot = VM_PROT_EXECUTE | VM_PROT_READ };
558 
559 	__block int64_t addr = (int64_t)&args;
560 	__block int64_t result = 0;
561 	__block size_t s = sizeof(result);
562 	exc_guard_helper_info_t exc_info;
563 	bool caught_exception =
564 	    block_raised_exc_guard_of_type(GUARD_TYPE_VIRT_MEMORY, &exc_info, ^{
565 		T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl_object", &result, &s, &addr, sizeof(addr)),
566 		"sysctlbyname(debug.test.vm_upl_object)");
567 	});
568 	T_ASSERT_TRUE(caught_exception, "Exec fault should throw guard exception");
569 	T_ASSERT_EQ(exc_info.guard_flavor, kGUARD_EXC_SEC_EXEC_ON_IOPL_PAGE,
570 	    "Attempted exec fault throws the expected guard exception flavor");
571 	T_ASSERT_EQ(exc_info.catch_count, 1, "Attempted exec fault should throw exactly one guard exception");
572 
573 	munmap(buf, buf_size);
574 }
575 
576 T_DECL(vm_upl_ro_with_write_fault_on_exec_file,
577     "Attempt to write-fault a file-backed executable region while a UPL is in-flight for that region")
578 {
579 #if TARGET_OS_OSX
580 	/**
581 	 * Test the bug uncovered in rdar://158063220.  This requires an on-the-fly retype to an
582 	 * executable frame type in conjunction with a write fault on a file-backed page that is also
583 	 * being cleaned in place.  This implies a "legacy JIT" mapping, since modern MAP_JIT mappings
584 	 * aren't allowed for file-backed memory.  The existing vm_upl_object test hook can already
585 	 * simulate the concurrent retype and in-place clean, so this test is mostly a matter of
586 	 * setting up the file-backed memory correctly.
587 	 */
588 	/**
589 	 * This test is meant to exercise functionality that is currently SPTM-specific.
590 	 */
591 	if (!is_sptm_enabled()) {
592 		T_SKIP("Write-fault-on-exec test only supported on SPTM-enabled devices, skipping...");
593 	}
594 
595 	if (process_is_translated()) {
596 		/* TODO: Remove this once rdar://142438840 is fixed. */
597 		T_SKIP("Guard exception handling does not work correctly with Rosetta (rdar://142438840), skipping...");
598 	}
599 
600 	/* First, generate our backing file. */
601 	const size_t buf_size = 10 * PAGE_SIZE;
602 	unsigned int *buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
603 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
604 
605 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
606 		buf[i] = (unsigned int)'a' + i;
607 	}
608 
609 	ssize_t nbytes;
610 	int fd;
611 	char tmp_file_name[PATH_MAX] = "/tmp/vm_upl_data.XXXXXXXX";
612 	T_ASSERT_NOTNULL(mktemp(tmp_file_name), "create temporary file name");
613 	T_WITH_ERRNO; T_QUIET; T_ASSERT_GE(fd = open(tmp_file_name, O_CREAT | O_RDWR),
614 	    0, "create temp file");
615 	T_WITH_ERRNO; T_QUIET; T_ASSERT_EQ(nbytes = write(fd, buf, buf_size),
616 	    (ssize_t)buf_size, "write %zu bytes", buf_size);
617 	munmap(buf, buf_size);
618 
619 	/* Map the backing file. */
620 	buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_FILE | MAP_SHARED, fd, 0);
621 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
622 
623 	/**
624 	 * Twiddle the mapping permissions between RX and RW.  This will mark the mapping as "user debug",
625 	 * which will induce a retype when the backing pages are faulted in.
626 	 */
627 	kern_return_t kr = mach_vm_protect(mach_task_self(), (mach_vm_address_t)buf, buf_size, FALSE, VM_PROT_READ | VM_PROT_EXECUTE);
628 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_protect(RX)");
629 
630 	kr = mach_vm_protect(mach_task_self(), (mach_vm_address_t)buf, buf_size, FALSE, VM_PROT_READ | VM_PROT_WRITE);
631 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_protect(RW)");
632 
633 	/* Ensure that guard exceptions will not be fatal to the test process. */
634 	enable_exc_guard_of_type(GUARD_TYPE_VIRT_MEMORY);
635 
636 	upl_object_test_args args = { .ptr = (uint64_t)buf, .size = buf_size, .upl_rw = false, .should_fail = false,
637 		                      .fault_prot = VM_PROT_WRITE | VM_PROT_READ };
638 
639 	__block int64_t addr = (int64_t)&args;
640 	__block int64_t result = 0;
641 	__block size_t s = sizeof(result);
642 	exc_guard_helper_info_t exc_info;
643 	bool caught_exception =
644 	    block_raised_exc_guard_of_type(GUARD_TYPE_VIRT_MEMORY, &exc_info, ^{
645 		T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl_object", &result, &s, &addr, sizeof(addr)),
646 		"sysctlbyname(debug.test.vm_upl_object)");
647 	});
648 	if (!process_is_translated()) {
649 		T_ASSERT_TRUE(caught_exception, "Exec fault should throw guard exception");
650 		T_ASSERT_EQ(exc_info.guard_flavor, kGUARD_EXC_SEC_EXEC_ON_IOPL_PAGE,
651 		    "Attempted exec fault throws the expected guard exception flavor");
652 		T_ASSERT_EQ(exc_info.catch_count, 1, "Attempted exec fault should throw exactly one guard exception");
653 	}
654 
655 	munmap(buf, buf_size);
656 	close(fd);
657 #else
658 	T_SKIP("Write-fault on exec file requires non-MAP_JIT RWX support");
659 #endif /* TARGET_OS_OSX */
660 }
661 
662 typedef struct {
663 	uint64_t ptr;
664 	uint64_t upl_base;
665 	uint32_t size;
666 	uint32_t upl_size;
667 	bool upl_rw;
668 } upl_submap_test_args;
669 
670 T_DECL(vm_upl_ro_on_submap,
671     "Generate RO UPL against a submap region")
672 {
673 	const size_t buf_size = 10 * PAGE_SIZE;
674 	unsigned int *buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
675 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
676 
677 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
678 		buf[i] = (unsigned int)'a' + i;
679 	}
680 
681 	upl_submap_test_args args = { .ptr = (uint64_t)buf, .size = buf_size, .upl_base = 0x180000000ULL,
682 		                      .upl_size = buf_size, .upl_rw = false };
683 
684 	int64_t addr = (int64_t)&args;
685 	int64_t result = 0;
686 	size_t s = sizeof(result);
687 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl_submap", &result, &s, &addr, sizeof(addr)),
688 	    "sysctlbyname(debug.test.vm_upl_submap)");
689 
690 	args.upl_base += 0x800;
691 	args.upl_size -= 0x1000;
692 
693 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl_submap", &result, &s, &addr, sizeof(addr)),
694 	    "sysctlbyname(debug.test.vm_upl_submap)");
695 
696 	args.upl_base += 0x800;
697 	args.upl_size -= 0x1000;
698 
699 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl_submap", &result, &s, &addr, sizeof(addr)),
700 	    "sysctlbyname(debug.test.vm_upl_submap)");
701 
702 	munmap(buf, buf_size);
703 }
704 
705 T_DECL(vm_upl_rw_on_submap,
706     "Generate RW UPL against a submap region")
707 {
708 	const size_t buf_size = 10 * PAGE_SIZE;
709 	unsigned int *buf = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
710 	T_QUIET; T_ASSERT_NE_PTR(buf, MAP_FAILED, "map buffer");
711 
712 	for (unsigned int i = 0; i < (buf_size / sizeof(*buf)); i++) {
713 		buf[i] = (unsigned int)'a' + i;
714 	}
715 
716 	upl_submap_test_args args = { .ptr = (uint64_t)buf, .size = buf_size, .upl_base = 0x180000000ULL,
717 		                      .upl_size = buf_size, .upl_rw = true };
718 
719 	int64_t addr = (int64_t)&args;
720 	int64_t result = 0;
721 	size_t s = sizeof(result);
722 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl_submap", &result, &s, &addr, sizeof(addr)),
723 	    "sysctlbyname(debug.test.vm_upl_submap)");
724 
725 	args.upl_base += 0x800;
726 	args.upl_size -= 0x1000;
727 
728 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl_submap", &result, &s, &addr, sizeof(addr)),
729 	    "sysctlbyname(debug.test.vm_upl_submap");
730 
731 	args.upl_base += 0x800;
732 	args.upl_size -= 0x1000;
733 
734 	T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.test.vm_upl_submap", &result, &s, &addr, sizeof(addr)),
735 	    "sysctlbyname(debug.test.vm_upl_submap)");
736 
737 	munmap(buf, buf_size);
738 }
739