xref: /xnu-12377.61.12/tests/exc_guard_helper_test.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * exc_guard_helper_test.c
31  *
32  * Test the testing helper functions in exc_guard_helper.h.
33  */
34 
35 #include "exc_guard_helper.h"
36 
37 #include <darwintest.h>
38 #include <sys/types.h>
39 #include <sys/sysctl.h>
40 #include <mach/mach.h>
41 #include <mach/mach_vm.h>
42 #include <mach/task_info.h>
43 #include "test_utils.h"
44 
45 T_GLOBAL_META(
46 	T_META_NAMESPACE("xnu"),
47 	T_META_RADAR_COMPONENT_NAME("xnu"),
48 	T_META_RADAR_COMPONENT_VERSION("vm"),
49 	T_META_RUN_CONCURRENTLY(true),
50 	T_META_ALL_VALID_ARCHS(true)
51 	);
52 
53 /* Convenience macro for compile-time array size */
54 #define countof(array)                                                  \
55 	_Pragma("clang diagnostic push")                                \
56 	_Pragma("clang diagnostic error \"-Wsizeof-pointer-div\"")      \
57 	(sizeof(array)/sizeof((array)[0]))                              \
58 	_Pragma("clang diagnostic pop")
59 
60 /*
61  * Return true if [query_start, query_start + query_size) is unallocated memory.
62  */
63 static bool
is_hole(mach_vm_address_t query_start,mach_vm_size_t query_size)64 is_hole(mach_vm_address_t query_start, mach_vm_size_t query_size)
65 {
66 	mach_vm_address_t entry_start = query_start;
67 	mach_vm_size_t entry_size;
68 	vm_region_submap_info_data_64_t info;
69 	uint32_t depth = 0;
70 	mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
71 	kern_return_t kr = mach_vm_region_recurse(mach_task_self(),
72 	    &entry_start, &entry_size, &depth,
73 	    (vm_region_recurse_info_t)&info, &count);
74 
75 	if (kr == KERN_INVALID_ADDRESS) {
76 		/*
77 		 * query_start is unmapped, and so is everything after it,
78 		 * therefore the query range is a hole
79 		 */
80 		return true;
81 	}
82 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_region");
83 
84 	/* this code does not handle submaps */
85 	T_QUIET; T_ASSERT_EQ(depth, 0, "submaps unimplemented");
86 
87 	/*
88 	 * entry_start is mapped memory, and either
89 	 * (1) entry_start's mapping contains query_start, OR
90 	 * (2) query_start is unmapped and entry_start is the next mapped memory
91 	 */
92 
93 	if (entry_start >= query_start + query_size) {
94 		/*
95 		 * entry_start's mapping does not contain query_start,
96 		 * and entry_start's mapping begins after the query range,
97 		 * therefore the query range is a hole
98 		 */
99 		return true;
100 	} else {
101 		return false;
102 	}
103 }
104 
105 /* Call enable_exc_guard_of_type(), and test its behavior. */
106 static void
enable_exc_guard_of_type_and_verify(unsigned int guard_type)107 enable_exc_guard_of_type_and_verify(unsigned int guard_type)
108 {
109 	struct {
110 		const char *name;
111 		task_exc_guard_behavior_t all_mask;
112 		task_exc_guard_behavior_t deliver_mask;
113 		task_exc_guard_behavior_t fatal_mask;
114 	} guards[] = {
115 		[GUARD_TYPE_VIRT_MEMORY] = {
116 			.name = "VM",
117 			.all_mask = TASK_EXC_GUARD_VM_ALL,
118 			.deliver_mask = TASK_EXC_GUARD_VM_DELIVER,
119 			.fatal_mask = TASK_EXC_GUARD_VM_FATAL
120 		},
121 		[GUARD_TYPE_MACH_PORT] = {
122 			.name = "Mach port",
123 			.all_mask = TASK_EXC_GUARD_MP_ALL,
124 			.deliver_mask = TASK_EXC_GUARD_MP_DELIVER,
125 			.fatal_mask = TASK_EXC_GUARD_MP_FATAL
126 		}
127 	};
128 
129 	kern_return_t kr;
130 	task_exc_guard_behavior_t disabling_behavior, old_behavior, new_behavior;
131 
132 	T_QUIET; T_ASSERT_TRUE(guard_type < countof(guards) && guards[guard_type].name != NULL,
133 	    "guard type in enable_exc_guard_of_type_and_verify");
134 
135 	/* disable guard exceptions of this type, then verify that enable_exc_guard_of_type enables them */
136 
137 	kr = task_get_exc_guard_behavior(mach_task_self(), &disabling_behavior);
138 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "get old behavior");
139 	disabling_behavior &= ~guards[guard_type].all_mask;
140 	kr = task_set_exc_guard_behavior(mach_task_self(), disabling_behavior);
141 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "set empty behavior");
142 
143 	old_behavior = enable_exc_guard_of_type(guard_type);
144 	T_QUIET; T_ASSERT_EQ(old_behavior, disabling_behavior, "enable_exc_guard_of_type return value");
145 	T_QUIET; T_ASSERT_FALSE(old_behavior & guards[guard_type].deliver_mask,
146 	    "%s guard exceptions must not be enabled", guards[guard_type].name);
147 
148 	kr = task_get_exc_guard_behavior(mach_task_self(), &new_behavior);
149 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "get new behavior");
150 	T_ASSERT_TRUE(new_behavior & guards[guard_type].deliver_mask,
151 	    "enable_exc_guard_of_type enabled %s guard exceptions", guards[guard_type].name);
152 	T_ASSERT_FALSE(new_behavior & guards[guard_type].fatal_mask,
153 	    "enable_exc_guard_of_type set %s guard exceptions to non-fatal", guards[guard_type].name);
154 }
155 
156 
157 T_DECL(exc_guard_helper_test_vm,
158     "test the test helper function block_raised_exc_guard_of_type with VM guard exceptions")
159 {
160 	if (process_is_translated()) {
161 		T_SKIP("VM guard exceptions not supported on Rosetta (rdar://142438840)");
162 	}
163 
164 	kern_return_t kr;
165 	exc_guard_helper_info_t exc_info;
166 
167 	exc_guard_helper_init();
168 	enable_exc_guard_of_type_and_verify(GUARD_TYPE_VIRT_MEMORY);
169 
170 	/*
171 	 * Test guard exceptions by deallocating unallocated VM space.
172 	 * Problem: Rosetta asynchronously allocates memory in the process
173 	 * to store translated instructions. These allocations can land
174 	 * inside our unallocated space, disrupting our test and crashing
175 	 * after we call vm_deallocate() on space that we thought was empty.
176 	 * Solution:
177 	 * - use VM_FLAGS_RANDOM_ADDR in the hope of moving our allocation
178 	 *   away from VM's ordinary next allocation space
179 	 * - try to verify that the unallocated space is empty before
180 	 *   calling vm_deallocate, and retry several times if it is not empty
181 	 */
182 
183 #define LAST_RETRY 10
184 	for (int retry_count = 0; retry_count <= LAST_RETRY; retry_count++) {
185 		/* allocate three pages */
186 		mach_vm_address_t allocated = 0;
187 		kr = mach_vm_allocate(mach_task_self(), &allocated, PAGE_SIZE * 3,
188 		    VM_FLAGS_ANYWHERE | VM_FLAGS_RANDOM_ADDR);
189 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "allocate space");
190 
191 		/* deallocate the page in the middle; no EXC_GUARD from successful deallocation */
192 		if (block_raised_exc_guard_of_type(GUARD_TYPE_VIRT_MEMORY, &exc_info, ^{
193 			kern_return_t kr;
194 			kr = mach_vm_deallocate(mach_task_self(), allocated + PAGE_SIZE, PAGE_SIZE);
195 			T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "create hole");
196 		})) {
197 			T_FAIL("unexpected guard exception");
198 		} else {
199 			T_ASSERT_EQ(exc_info.catch_count, 0, "block_raised_exc_guard_of_type(VM) with no exceptions");
200 		}
201 
202 		/* try to deallocate the hole, twice, and detect the guard exceptions */
203 		__block bool retry = false;
204 		bool caught_exception = block_raised_exc_guard_of_type(GUARD_TYPE_VIRT_MEMORY, &exc_info, ^{
205 			kern_return_t kr;
206 
207 			/* deallocate page-hole-page; EXC_GUARD expected from deallocating a hole */
208 			if (!is_hole(allocated + PAGE_SIZE, PAGE_SIZE)) {
209 			        retry = true;  /* somebody allocated inside our unallocated space; retry */
210 			        return;
211 			}
212 			kr = mach_vm_deallocate(mach_task_self(), allocated, PAGE_SIZE * 3);
213 			T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate a hole");
214 
215 			/* deallocate again, now all holes; EXC_GUARD expected from deallocating a hole */
216 			if (!is_hole(allocated, PAGE_SIZE * 3)) {
217 			        retry = true;  /* somebody allocated inside our unallocated space; retry */
218 			        return;
219 			}
220 			kr = mach_vm_deallocate(mach_task_self(), allocated, PAGE_SIZE * 3);
221 			T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate a hole again");
222 
223 			if (!is_hole(allocated, PAGE_SIZE * 3)) {
224 			        retry = true;  /* somebody allocated inside our unallocated space; retry */
225 			        return;
226 			}
227 		});
228 
229 		if (retry) {
230 			if (retry_count < LAST_RETRY) {
231 				T_LOG("unallocated space was found to be allocated, retrying");
232 			} else {
233 				T_FAIL("intended unallocated space was repeatedly found to be allocated, giving up");
234 			}
235 		} else if (caught_exception) {
236 			/* caught an exception as expected: verify what we caught */
237 			T_ASSERT_EQ(exc_info.catch_count, 2, "block_raised_exc_guard_of_type(VM) with 2 exceptions");
238 			T_ASSERT_EQ(exc_info.guard_type, GUARD_TYPE_VIRT_MEMORY, "caught exception's type");
239 			T_ASSERT_EQ(exc_info.guard_flavor, kGUARD_EXC_DEALLOC_GAP, "caught exception's flavor");
240 			T_ASSERT_EQ(exc_info.guard_payload, allocated + PAGE_SIZE, "caught exception's payload");
241 			break;  /* done retrying */
242 		} else {
243 			/* where's the beef? */
244 			T_FAIL("no VM guard exception caught");
245 			break;  /* done retrying */
246 		}
247 	}
248 }
249 
250 
251 T_DECL(exc_guard_helper_test_mach_port,
252     "test the test helper function block_raised_exc_guard_of_type with Mach port guard exceptions")
253 {
254 	kern_return_t kr;
255 	exc_guard_helper_info_t exc_info;
256 	mach_port_t port;
257 
258 	exc_guard_helper_init();
259 	enable_exc_guard_of_type_and_verify(GUARD_TYPE_MACH_PORT);
260 
261 	/*
262 	 * Test guard exceptions by overflowing the send right count for a port.
263 	 */
264 
265 	kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port);
266 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "new port");
267 	kr = mach_port_insert_right(mach_task_self(), port, port, MACH_MSG_TYPE_MAKE_SEND);
268 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "make send");
269 
270 	/* add and remove one send right, should succeed */
271 	if (block_raised_exc_guard_of_type(GUARD_TYPE_MACH_PORT, &exc_info, ^{
272 		kern_return_t kr;
273 		kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, +1);
274 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "add one send right");
275 		kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, -1);
276 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "remove one send right");
277 	})) {
278 		T_FAIL("unexpected guard exception");
279 	} else {
280 		T_ASSERT_EQ(exc_info.catch_count, 0, "block_raised_exc_guard_of_type(MACH_PORT) with no exceptions");
281 	}
282 
283 	/* try to overflow the port's send right count, twice, and catch the exceptions */
284 	bool caught_exception = block_raised_exc_guard_of_type(GUARD_TYPE_MACH_PORT, &exc_info, ^{
285 		kern_return_t kr;
286 		unsigned expected_error;
287 		if (process_is_translated()) {
288 		        expected_error = 0x1000013;  /* KERN_UREFS_OVERFLOW plus another bit? */
289 		} else {
290 		        expected_error = KERN_INVALID_VALUE;
291 		}
292 		kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, INT32_MAX);
293 		T_QUIET; T_ASSERT_MACH_ERROR(kr, expected_error, "add too many send rights");
294 		kr = mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, INT32_MAX);
295 		T_QUIET; T_ASSERT_MACH_ERROR(kr, expected_error, "add too many send rights, again");
296 	});
297 	if (caught_exception) {
298 		/* caught an exception as expected: verify what we caught */
299 		T_ASSERT_EQ(exc_info.catch_count, 2, "block_raised_exc_guard_of_type(MACH_PORT) with 2 exceptions");
300 		T_ASSERT_EQ(exc_info.guard_type, GUARD_TYPE_MACH_PORT, "caught exception's type");
301 		T_ASSERT_EQ(exc_info.guard_flavor, kGUARD_EXC_INVALID_VALUE, "caught exception's flavor");
302 		T_ASSERT_EQ(exc_info.guard_target, port, "caught exception's target");
303 	} else {
304 		/* where's the beef? */
305 		T_FAIL("no Mach port guard exception caught");
306 	}
307 }
308