xref: /xnu-12377.41.6/tests/arm_mte_mach_msg.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2024 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <arm_acle.h>
30 #include <darwintest.h>
31 #include <darwintest_multiprocess.h>
32 #include <launch.h>
33 #include <mach/mach_vm.h>
34 #include <mach/message.h>
35 #include <mach-o/dyld.h>
36 #include <servers/bootstrap.h>
37 
38 #include "arm_mte_utilities.h"
39 #include "test_utils.h"
40 
41 T_GLOBAL_META(
42 	T_META_NAMESPACE("xnu.arm"),
43 	T_META_RADAR_COMPONENT_NAME("xnu"),
44 	T_META_RADAR_COMPONENT_VERSION("arm"),
45 	T_META_RUN_CONCURRENTLY(false),
46 	T_META_IGNORECRASHES(".*mte_mach_msg.*"),
47 	T_META_CHECK_LEAKS(false));
48 
49 #define SERVICE_NAME "com.apple.xnu.test.arm_mte_sharing"
50 #define VIRTUAL_COPY_SZ ((32 * 1024) + 15)
51 
52 static const mach_vm_size_t sz_rounded = (VIRTUAL_COPY_SZ + (MTE_GRANULE_SIZE - 1)) & (unsigned)~((signed)(MTE_GRANULE_SIZE - 1));
53 static const mach_msg_size_t memory_size_options[] = { MTE_GRANULE_SIZE, MTE_GRANULE_SIZE*4, KERNEL_BUFFER_COPY_THRESHOLD, VIRTUAL_COPY_SZ };
54 #define count_of(x) (sizeof(x) / sizeof(x[0]))
55 
56 typedef struct {
57 	mach_msg_header_t header;
58 	mach_msg_body_t body;
59 	mach_msg_ool_descriptor_t dsc;
60 	mach_msg_trailer_t trailer;
61 } ipc_complex_ool_message;
62 
63 typedef struct {
64 	mach_msg_header_t header;
65 	mach_msg_body_t body;
66 	mach_msg_port_descriptor_t dsc;
67 	bool is_share;
68 	memory_object_size_t size;
69 	mach_msg_trailer_t trailer;
70 } ipc_complex_port_message;
71 
72 static const uint64_t DATA = 0xFEDBCA;
73 /* Helpers */
74 static mach_port_t
server_checkin(void)75 server_checkin(void)
76 {
77 	mach_port_t mp;
78 
79 	kern_return_t kr = bootstrap_check_in(bootstrap_port, SERVICE_NAME, &mp);
80 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "bootstrap_check_in");
81 	return mp;
82 }
83 
84 static mach_port_t
server_lookup(void)85 server_lookup(void)
86 {
87 	mach_port_t mp;
88 
89 	kern_return_t kr = bootstrap_look_up(bootstrap_port, SERVICE_NAME, &mp);
90 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "bootstrap_look_up");
91 	return mp;
92 }
93 
94 static unsigned short
get_shadow_depth(void * ptr)95 get_shadow_depth(void* ptr)
96 {
97 	vm_address_t address = (vm_address_t) ptr;
98 	unsigned int depth = 1;
99 	vm_size_t size;
100 	struct vm_region_submap_info_64 info;
101 	mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
102 	kern_return_t kr = vm_region_recurse_64(mach_task_self(), &address, &size,
103 	    &depth, (vm_region_info_t) &info, &count);
104 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "get_shadow_depth: vm_region_recurse_64");
105 	return info.shadow_depth;
106 }
107 
108 /* Client/server code for out-of-line memory tests */
109 static void
send_ool_memory(mach_port_t svc_port,void * addr,bool deallocate,mach_msg_copy_options_t copy,mach_msg_size_t size)110 send_ool_memory(
111 	mach_port_t svc_port,
112 	void *addr,
113 	bool deallocate,
114 	mach_msg_copy_options_t copy,
115 	mach_msg_size_t size)
116 {
117 	ipc_complex_ool_message msg;
118 	bzero(&msg, sizeof(ipc_complex_ool_message));
119 	mach_msg_header_t hdr = {
120 		.msgh_remote_port = svc_port,
121 		.msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, 0, 0, MACH_MSGH_BITS_COMPLEX),
122 		.msgh_id = 1,
123 		.msgh_size = offsetof(ipc_complex_ool_message, trailer),
124 	};
125 	mach_msg_ool_descriptor_t dsc = {
126 		.address = addr,
127 		.deallocate = deallocate,
128 		.copy = copy,
129 		.size = size,
130 		.type = MACH_MSG_OOL_DESCRIPTOR,
131 	};
132 	msg.header = hdr;
133 	msg.body.msgh_descriptor_count = 1;
134 	msg.dsc = dsc;
135 
136 	T_LOG("sending message, size: %u, deallocate: %d, copy option: %s",
137 	    size,
138 	    deallocate,
139 	    (copy == MACH_MSG_VIRTUAL_COPY) ? "virtual copy" : "physical copy");
140 	kern_return_t kr = mach_msg(&msg.header, MACH_SEND_MSG, msg.header.msgh_size,
141 	    0, MACH_PORT_NULL, 10000, 0);
142 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_msg send");
143 }
144 
145 static uint64_t *
tag_pointer(uint64_t ** untagged_ptr)146 tag_pointer(uint64_t **untagged_ptr)
147 {
148 	uint64_t mask;
149 
150 	/* Tag the memory */
151 	uint64_t *tagged_ptr = __arm_mte_create_random_tag(*untagged_ptr, mask);
152 	T_QUIET; T_EXPECT_NE_PTR(*untagged_ptr, tagged_ptr,
153 	    "Random tag was not taken from excluded tag set");
154 
155 	for (uint64_t i = 0; i < sz_rounded / MTE_GRANULE_SIZE; ++i) {
156 		uintptr_t cur_ptr = (uintptr_t)tagged_ptr + i * MTE_GRANULE_SIZE;
157 		__arm_mte_set_tag((void*) cur_ptr);
158 	}
159 
160 	/* Write to the tagged memory */
161 	for (uint64_t i = 0; i < sz_rounded / sizeof(*tagged_ptr); ++i) {
162 		tagged_ptr[i] = DATA;
163 	}
164 	return tagged_ptr;
165 }
166 
167 static void
send_ool_memory_helper(void * addr,boolean_t deallocate,mach_msg_copy_options_t copy_option,mach_msg_size_t size,boolean_t is_memory_tagged,boolean_t expect_pass)168 send_ool_memory_helper(
169 	void *addr,
170 	boolean_t deallocate,
171 	mach_msg_copy_options_t copy_option,
172 	mach_msg_size_t size,
173 	boolean_t is_memory_tagged,
174 	boolean_t expect_pass)
175 {
176 	if (expect_pass) {
177 		mach_port_t port = server_lookup();
178 		send_ool_memory(port, addr, deallocate, copy_option, size);
179 	} else {
180 		char description[100];
181 		snprintf(description, sizeof(description),
182 		    "(copy_options = %d) (is memory tagged = %d) (size = %d) mach_msg(deallocate=%d)",
183 		    copy_option, is_memory_tagged, size, deallocate);
184 		expect_sigkill(^{
185 			/* expect_sigkill forks, and the child does not gain the parent's port rights */
186 			mach_port_t port = server_lookup();
187 			send_ool_memory(port, addr, deallocate, copy_option, size);
188 		}, description);
189 	}
190 }
191 
192 static void
reset_tagged_pointer(uint64_t * ptr)193 reset_tagged_pointer(uint64_t *ptr)
194 {
195 	uint64_t mask;
196 	/* We want to allocate the max amount of memory we'll need for the test */
197 	uint64_t *untagged_ptr;
198 	untagged_ptr = allocate_tagged_memory(sz_rounded, &mask);
199 	ptr = tag_pointer(&untagged_ptr);
200 
201 	/* Write to the memory */
202 	for (uint64_t i = 0; i < sz_rounded / sizeof(*ptr); ++i) {
203 		ptr[i] = DATA;
204 	}
205 }
206 
207 static void
ool_memory_assertions(uint64_t * ptr)208 ool_memory_assertions(uint64_t *ptr)
209 {
210 	/*
211 	 * The last parameter of send_memory_entry_helper
212 	 * denotes whether the case is expected to complete normally or not.
213 	 */
214 	send_ool_memory_helper(ptr, FALSE, MACH_MSG_VIRTUAL_COPY, MTE_GRANULE_SIZE, true, true);
215 	send_ool_memory_helper(ptr, FALSE, MACH_MSG_VIRTUAL_COPY, MTE_GRANULE_SIZE * 4, true, true);
216 	send_ool_memory_helper(ptr, FALSE, MACH_MSG_VIRTUAL_COPY, KERNEL_BUFFER_COPY_THRESHOLD, true, true);
217 
218 	/*
219 	 * Sending >32k bytes of tagged memory as a virtual copy (deallocate == false)
220 	 * should always succeed under contemporary VM policy.
221 	 */
222 	send_ool_memory_helper(ptr, FALSE, MACH_MSG_VIRTUAL_COPY, VIRTUAL_COPY_SZ, true, true);
223 
224 	send_ool_memory_helper(ptr, FALSE, MACH_MSG_PHYSICAL_COPY, MTE_GRANULE_SIZE, true, true);
225 	send_ool_memory_helper(ptr, FALSE, MACH_MSG_PHYSICAL_COPY, MTE_GRANULE_SIZE * 4, true, true);
226 	send_ool_memory_helper(ptr, FALSE, MACH_MSG_PHYSICAL_COPY, KERNEL_BUFFER_COPY_THRESHOLD, true, true);
227 	send_ool_memory_helper(ptr, FALSE, MACH_MSG_PHYSICAL_COPY, VIRTUAL_COPY_SZ, true, true);
228 
229 	/*
230 	 * mach_msg(deallocate=true) on a tagged pointer is an illegal operation, as
231 	 * this is functionally equivalent to vm_deallocate() on that same tagged
232 	 * pointer.
233 	 */
234 	send_ool_memory_helper(ptr, TRUE, MACH_MSG_VIRTUAL_COPY, MTE_GRANULE_SIZE, true, false);
235 	send_ool_memory_helper(ptr, TRUE, MACH_MSG_VIRTUAL_COPY, MTE_GRANULE_SIZE * 4, true, false);
236 	send_ool_memory_helper(ptr, TRUE, MACH_MSG_VIRTUAL_COPY, KERNEL_BUFFER_COPY_THRESHOLD, true, false);
237 	/* rdar://152970401: We take the kernel buffer path even above the virtual copy threshold for local MTE movement */
238 	send_ool_memory_helper(ptr, TRUE, MACH_MSG_VIRTUAL_COPY, VIRTUAL_COPY_SZ, true, false);
239 	reset_tagged_pointer(ptr);
240 
241 	send_ool_memory_helper(ptr, TRUE, MACH_MSG_PHYSICAL_COPY, MTE_GRANULE_SIZE, true, false);
242 	send_ool_memory_helper(ptr, TRUE, MACH_MSG_PHYSICAL_COPY, MTE_GRANULE_SIZE * 4, true, false);
243 	send_ool_memory_helper(ptr, TRUE, MACH_MSG_PHYSICAL_COPY, KERNEL_BUFFER_COPY_THRESHOLD, true, false);
244 	/* rdar://152970401: We take the kernel buffer path even above the virtual copy threshold for local MTE movement */
245 	send_ool_memory_helper(ptr, TRUE, MACH_MSG_PHYSICAL_COPY, VIRTUAL_COPY_SZ, true, false);
246 	reset_tagged_pointer(ptr);
247 }
248 
249 static uint64_t *
untagged_ool_memory_assertions(uint64_t * untagged_ptr)250 untagged_ool_memory_assertions(uint64_t *untagged_ptr)
251 {
252 	const mach_msg_copy_options_t copy_options[] = { MACH_MSG_VIRTUAL_COPY, MACH_MSG_PHYSICAL_COPY};
253 	const int copy_options_size = *(&copy_options + 1) - copy_options;
254 
255 	for (int mem_size_index = 0; mem_size_index < count_of(memory_size_options); mem_size_index++) {
256 		for (int copy_index = 0; copy_index < copy_options_size; copy_index++) {
257 			mach_msg_size_t mem_size = memory_size_options[mem_size_index];
258 			mach_msg_copy_options_t copy_option = copy_options[copy_index];
259 			/*
260 			 * The last parameter of send_memory_entry_helper
261 			 * denotes whether the case is expected to complete normally or not.
262 			 * We expect the process to complete normally for all combinations
263 			 * of untagged memory
264 			 */
265 			send_ool_memory_helper(untagged_ptr, FALSE, copy_option, mem_size, false, true);
266 			send_ool_memory_helper(untagged_ptr, TRUE, copy_option, mem_size, false, true);
267 			/* Reallocate the untagged memory for the next invocation, since we used DEALLOCATE=TRUE above */
268 			untagged_ptr = allocate_untagged_memory(sz_rounded);
269 			for (uint64_t i = 0; i < sz_rounded / sizeof(uint64_t); ++i) {
270 				untagged_ptr[i] = DATA;
271 			}
272 		}
273 	}
274 	return untagged_ptr;
275 }
276 
277 static void
ool_memory_client_template(bool is_tagged)278 ool_memory_client_template(bool is_tagged)
279 {
280 	assert_normal_exit(^{
281 		T_SETUPBEGIN;
282 		validate_proc_pidinfo_mte_status(getpid(), true);
283 		if (T_STATE == T_STATE_SETUPFAIL) {
284 		        T_FAIL("client was not spawned under MTE");
285 		        return;
286 		}
287 
288 		uint64_t mask;
289 		/* We want to allocate the max amount of memory we'll need for the test */
290 		uint64_t *untagged_ptr;
291 		if (is_tagged) {
292 		        untagged_ptr = allocate_tagged_memory(sz_rounded, &mask);
293 		} else {
294 		        untagged_ptr = allocate_untagged_memory(sz_rounded);
295 		}
296 
297 		/* Tag the memory */
298 		uint64_t *tagged_ptr = NULL;
299 		if (is_tagged) {
300 		        tagged_ptr = tag_pointer(&untagged_ptr);
301 		}
302 
303 		/* Write to the memory */
304 		for (uint64_t i = 0; i < sz_rounded / sizeof(uint64_t); ++i) {
305 		        if (is_tagged) {
306 		                tagged_ptr[i] = DATA;
307 			} else {
308 		                untagged_ptr[i] = DATA;
309 			}
310 		}
311 		T_SETUPEND;
312 
313 		if (!is_tagged) {
314 		        /* mach_msg_send should ALWAYS succeed on all untagged memory entry sizes */
315 		        untagged_ptr = untagged_ool_memory_assertions(untagged_ptr);
316 		} else {
317 		        ool_memory_assertions(tagged_ptr);
318 		}
319 
320 		T_EXPECT_MACH_SUCCESS(vm_deallocate(mach_task_self(), (vm_address_t)untagged_ptr, sz_rounded), "vm_deallocate");
321 	}, "ool_memory_client_template");
322 }
323 
324 T_HELPER_DECL(ool_memory_client_tagged, "ool_memory_client_tagged")
325 {
326 	ool_memory_client_template(true);
327 }
328 
329 T_HELPER_DECL(ool_memory_client_untagged, "ool_memory_client_untagged") {
330 	ool_memory_client_template(false);
331 }
332 
333 static void
receive_ool_memory(mach_port_t rcv_port,bool is_relaxed)334 receive_ool_memory(mach_port_t rcv_port, bool is_relaxed)
335 {
336 	ipc_complex_ool_message msg;
337 
338 	kern_return_t kr = mach_msg(&msg.header, MACH_RCV_MSG, 0, sizeof(msg),
339 	    rcv_port, 0, 0);
340 	T_ASSERT_MACH_SUCCESS(kr, "received msg");
341 
342 	switch (msg.dsc.copy) {
343 	case MACH_MSG_VIRTUAL_COPY:
344 	{
345 		/* No validations to perform right now */
346 	}
347 	case MACH_MSG_PHYSICAL_COPY:
348 	{
349 		/* Verify that the received data is correct */
350 		uint64_t *received_data = (uint64_t*) msg.dsc.address;
351 		for (uint i = 0; i < msg.dsc.size / sizeof(uint64_t); ++i) {
352 			T_QUIET; T_ASSERT_EQ_ULLONG(DATA, received_data[i],
353 			    "received_data[%u] == expected data", i);
354 		}
355 		T_LOG("Successfully read and verified received %u bytes of data", msg.dsc.size);
356 		break;
357 	}
358 	default:
359 	{
360 		/* We're not expecting the other cases for this test for now */
361 		T_FAIL("Unexpected copy option: %d", msg.dsc.copy);
362 	}
363 	}
364 }
365 
366 static void
ool_memory_server(bool is_relaxed,bool has_mte)367 ool_memory_server(bool is_relaxed, bool has_mte)
368 {
369 	validate_proc_pidinfo_mte_status(getpid(), has_mte);
370 	/* Get the server's receive right */
371 	mach_port_t svc_port = server_checkin();
372 
373 	while (true) {
374 		receive_ool_memory(svc_port, is_relaxed);
375 	}
376 }
377 
378 T_HELPER_DECL(ool_memory_server_with_mte, "ool_memory_server_with_mte")
379 {
380 	ool_memory_server(false, true);
381 }
382 
383 T_HELPER_DECL(ool_memory_server_with_mte_relaxed, "ool_memory_server_with_mte_relaxed")
384 {
385 	ool_memory_server(true, true);
386 }
387 
388 T_HELPER_DECL(ool_memory_server_without_mte_relaxed, "ool_memory_server_without_mte_relaxed")
389 {
390 	ool_memory_server(true, false);
391 }
392 
393 /* Client/server code for memory descriptor tests */
394 static void
send_memory_entry(mach_port_t svc_port,void * ptr,vm_prot_t flags,mach_msg_size_t size)395 send_memory_entry(
396 	mach_port_t svc_port,
397 	void *ptr,
398 	vm_prot_t flags,
399 	mach_msg_size_t size)
400 {
401 	mach_port_t memory_entry;
402 	memory_object_size_t memory_entry_size = size;
403 	bool is_share = !(flags & MAP_MEM_VM_COPY); /* flags = 0 is also a true-share case */
404 	vm_offset_t mask = is_share ? ~MTE_TAG_MASK : ~0ULL; /* copy cases need tags to do copyin */
405 	vm_offset_t addr = (vm_offset_t) ptr & mask;
406 	kern_return_t kr = mach_make_memory_entry_64(mach_task_self(), &memory_entry_size,
407 	    addr, flags | VM_PROT_DEFAULT, &memory_entry, MACH_PORT_NULL);
408 	T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "make memory entry, size=%llu, flags=%#x",
409 	    memory_entry_size, flags);
410 	if (kr != KERN_SUCCESS) {
411 		return;
412 	}
413 
414 	ipc_complex_port_message msg;
415 	bzero(&msg, sizeof(ipc_complex_port_message));
416 	mach_msg_header_t hdr = {
417 		.msgh_remote_port = svc_port,
418 		.msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, 0, 0, MACH_MSGH_BITS_COMPLEX),
419 		.msgh_id = 1,
420 		.msgh_size = offsetof(ipc_complex_port_message, trailer),
421 	};
422 	mach_msg_port_descriptor_t dsc = {
423 		.name = memory_entry,
424 		.disposition = MACH_MSG_TYPE_COPY_SEND,
425 		.type = MACH_MSG_PORT_DESCRIPTOR
426 	};
427 	msg.header = hdr;
428 	msg.body.msgh_descriptor_count = 1;
429 	msg.dsc = dsc;
430 	msg.is_share = is_share;
431 	msg.size = size;
432 
433 	T_LOG("sending message, size: %u, flags: %#x", size, flags);
434 	kr = mach_msg(&msg.header, MACH_SEND_MSG, msg.header.msgh_size, 0,
435 	    MACH_PORT_NULL, 10000, 0);
436 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_msg send");
437 }
438 
439 static void
send_memory_entry_helper(void * addr,vm_prot_t flag,mach_msg_size_t size,boolean_t is_memory_tagged,boolean_t expect_pass)440 send_memory_entry_helper(
441 	void *addr,
442 	vm_prot_t flag,
443 	mach_msg_size_t size,
444 	boolean_t is_memory_tagged,
445 	boolean_t expect_pass)
446 {
447 	if (expect_pass) {
448 		mach_port_t port = server_lookup();
449 		send_memory_entry(port, addr, flag, size);
450 	} else {
451 		char description[100];
452 		snprintf(description, sizeof(description),
453 		    "(flags = %d) (is memory tagged = %d) (size = %d)", flag, is_memory_tagged, size);
454 		expect_sigkill(^{
455 			/* expect_sigkill forks, and the child does not gain the parent's port rights */
456 			mach_port_t port = server_lookup();
457 			send_memory_entry(port, addr, flag, size);
458 		}, description);
459 	}
460 }
461 
462 static void
default_tagged_memory_entry_assertions(uint64_t * tagged_ptr)463 default_tagged_memory_entry_assertions(uint64_t *tagged_ptr)
464 {
465 	/*
466 	 * Creating a shared memory entry of tagged memory is a violation of
467 	 * security policy. The last parameter of send_memory_entry_helper
468 	 * denotes whether the case is expected to complete normally or not.
469 	 * Most of these cases expect the process to be killed.
470 	 */
471 	send_memory_entry_helper(tagged_ptr, 0, MTE_GRANULE_SIZE, true, false);
472 	send_memory_entry_helper(tagged_ptr, 0, MTE_GRANULE_SIZE * 4, true, false);
473 	send_memory_entry_helper(tagged_ptr, 0, KERNEL_BUFFER_COPY_THRESHOLD, true, false);
474 	send_memory_entry_helper(tagged_ptr, 0, VIRTUAL_COPY_SZ, true, false);
475 
476 	send_memory_entry_helper(tagged_ptr, MAP_MEM_VM_SHARE, MTE_GRANULE_SIZE, true, false);
477 	send_memory_entry_helper(tagged_ptr, MAP_MEM_VM_SHARE, MTE_GRANULE_SIZE * 4, true, false);
478 	send_memory_entry_helper(tagged_ptr, MAP_MEM_VM_SHARE, KERNEL_BUFFER_COPY_THRESHOLD, true, false);
479 	send_memory_entry_helper(tagged_ptr, MAP_MEM_VM_SHARE, VIRTUAL_COPY_SZ, true, false);
480 
481 	/* These next three cases are the only ones in which the process is not expected to terminate) */
482 	send_memory_entry_helper(tagged_ptr, MAP_MEM_VM_COPY, MTE_GRANULE_SIZE, true, true);
483 	send_memory_entry_helper(tagged_ptr, MAP_MEM_VM_COPY, MTE_GRANULE_SIZE * 4, true, true);
484 	send_memory_entry_helper(tagged_ptr, MAP_MEM_VM_COPY, KERNEL_BUFFER_COPY_THRESHOLD, true, true);
485 	/* Copies above 32k are also allowed under VM policies v3 */
486 	send_memory_entry_helper(tagged_ptr, MAP_MEM_VM_COPY, VIRTUAL_COPY_SZ, true, true);
487 }
488 
489 static void
relaxed_memory_entry_assertions(uint64_t * ptr)490 relaxed_memory_entry_assertions(uint64_t *ptr)
491 {
492 	const int vm_flags[] = { 0, MAP_MEM_VM_SHARE, MAP_MEM_VM_COPY };
493 	for (int mem_size_index = 0; mem_size_index < count_of(memory_size_options); mem_size_index++) {
494 		for (int vm_flags_index = 0; vm_flags_index < count_of(vm_flags); vm_flags_index++) {
495 			mach_msg_size_t mem_size = memory_size_options[mem_size_index];
496 			int vm_flag = vm_flags[vm_flags_index];
497 			send_memory_entry_helper(ptr, vm_flag, mem_size, false, true);
498 		}
499 	}
500 }
501 
502 /*
503  *  memory_entry_client_template(bool is_tagged)
504  *       [is_tagged]: is the memory being send tagged
505  */
506 static void
memory_entry_client_template(bool is_tagged)507 memory_entry_client_template(bool is_tagged)
508 {
509 	T_SETUPBEGIN;
510 	validate_proc_pidinfo_mte_status(getpid(), true);
511 	if (T_STATE == T_STATE_SETUPFAIL) {
512 		T_FAIL("client was not spawned under MTE");
513 		return;
514 	}
515 
516 	uint64_t mask;
517 	/* We want to allocate the max amount of memory we'll need for the test */
518 	uint64_t *untagged_ptr;
519 	if (is_tagged) {
520 		untagged_ptr = allocate_tagged_memory(sz_rounded, &mask);
521 	} else {
522 		untagged_ptr = allocate_untagged_memory(sz_rounded);
523 	}
524 
525 	/* Tag the memory */
526 	uint64_t *tagged_ptr = NULL;
527 	if (is_tagged) {
528 		tagged_ptr = tag_pointer(&untagged_ptr);
529 	}
530 
531 	/* Write to the memory */
532 	for (uint64_t i = 0; i < sz_rounded / sizeof(uint64_t); ++i) {
533 		if (is_tagged) {
534 			tagged_ptr[i] = DATA;
535 		} else {
536 			untagged_ptr[i] = DATA;
537 		}
538 	}
539 	T_SETUPEND;
540 
541 	/*
542 	 * libdarwintest will automatically end the test when one of the helpers
543 	 * terminates. The server never terminates in this test setup, so this
544 	 * only happens if the client terminates, but it also doesn't trigger if
545 	 * the client is killed rather than ending normally, resulting in a hang.
546 	 *
547 	 * Therefore, the helper launches a child process to run the actual test,
548 	 * so that the helper process can exit normally even on a SIGKILL.
549 	 */
550 	assert_normal_exit(^{
551 		if (!is_tagged) {
552 		        /* mach_msg_send should ALWAYS succeed on all tagged memory entry sizes */
553 		        relaxed_memory_entry_assertions(untagged_ptr);
554 		} else {
555 		        /*
556 		         * Creating a shared memory entry of tagged memory is a violation of
557 		         * security policy.
558 		         */
559 		        default_tagged_memory_entry_assertions(tagged_ptr);
560 		}
561 
562 		T_EXPECT_MACH_SUCCESS(vm_deallocate(mach_task_self(), (vm_address_t) untagged_ptr, sz_rounded), "vm_deallocate");
563 	}, "memory_entry_client_template");
564 }
565 
566 T_HELPER_DECL(memory_entry_client_tagged, "memory_entry_client")
567 {
568 	/* VM security policies should be observed on tagged memory */
569 	memory_entry_client_template(true);
570 }
571 
572 T_HELPER_DECL(memory_entry_client_untagged, "memory_entry_client")
573 {
574 	/* VM security policies should be relaxed on untagged memory */
575 	memory_entry_client_template(false);
576 }
577 
578 static void
receive_memory_entry(mach_port_t rcv_port,bool is_relaxed)579 receive_memory_entry(mach_port_t rcv_port, bool is_relaxed)
580 {
581 	ipc_complex_port_message msg;
582 
583 	kern_return_t kr = mach_msg(&msg.header, MACH_RCV_MSG, 0, sizeof(msg), rcv_port, 0, 0);
584 	T_ASSERT_MACH_SUCCESS(kr, "received msg");
585 	if (!is_relaxed) {
586 		T_EXPECT_FALSE(msg.is_share, "it should not be possible to create + send a tagged share memory entry");
587 	}
588 
589 	if (!msg.is_share && msg.size <= KERNEL_BUFFER_COPY_THRESHOLD) {
590 		mach_vm_address_t addr = 0;
591 		kr = mach_vm_map(mach_task_self(), &addr, msg.size, /* mask = */ 0,
592 		    VM_FLAGS_ANYWHERE, msg.dsc.name, /* offset = */ 0, /* copy = */ false,
593 		    VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
594 		T_EXPECT_MACH_SUCCESS(kr, "map copy memory entry, copy = false, size = %llu", addr);
595 		if (kr == KERN_SUCCESS) {
596 			kr = mach_vm_deallocate(mach_task_self(), addr, msg.size);
597 			T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "cleanup mach_vm_map(copy = false)");
598 		}
599 		kr = mach_vm_map(mach_task_self(), &addr, msg.size, /* mask = */ 0,
600 		    VM_FLAGS_ANYWHERE, msg.dsc.name, /* offset = */ 0, /* copy = */ true,
601 		    VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
602 		T_EXPECT_MACH_SUCCESS(kr, "map copy memory entry, copy = true, size = %llu", addr);
603 		if (kr == KERN_SUCCESS) {
604 			kr = mach_vm_deallocate(mach_task_self(), addr, msg.size);
605 			T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "cleanup mach_vm_map(copy = true)");
606 		}
607 	}
608 }
609 
610 static void
memory_entry_server(bool is_relaxed,bool has_mte)611 memory_entry_server(bool is_relaxed, bool has_mte)
612 {
613 	validate_proc_pidinfo_mte_status(getpid(), has_mte);
614 	/* Get the server's receive right */
615 	mach_port_t svc_port = server_checkin();
616 
617 	while (true) {
618 		receive_memory_entry(svc_port, is_relaxed);
619 	}
620 }
621 
622 T_HELPER_DECL(memory_entry_server_with_mte, "memory_entry_server_with_mte")
623 {
624 	memory_entry_server(false, true);
625 }
626 
627 T_HELPER_DECL(memory_entry_server_with_mte_relaxed, "memory_entry_server_with_mte_relaxed")
628 {
629 	memory_entry_server(true, true);
630 }
631 
632 T_HELPER_DECL(memory_entry_server_without_mte_relaxed, "memory_entry_server_without_mte_relaxed")
633 {
634 	memory_entry_server(true, false);
635 }
636 
637 static void
spawn_helper_with_flags(char * helper_name,posix_spawn_secflag_options flags)638 spawn_helper_with_flags(char *helper_name, posix_spawn_secflag_options flags)
639 {
640 	char path[PATH_MAX];
641 	uint32_t path_size = sizeof(path);
642 	T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
643 	char *args[] = { path, "-n", helper_name, NULL};
644 	posix_spawn_with_flags_and_assert_successful_exit(args, flags, true, false);
645 }
646 
647 T_HELPER_DECL(memory_entry_client_tagged_with_mte, "memory_entry_client_with_mte")
648 {
649 	spawn_helper_with_flags("memory_entry_client_tagged", 0);
650 }
651 
652 T_HELPER_DECL(memory_entry_client_untagged_with_mte, "memory_entry_client_with_mte")
653 {
654 	spawn_helper_with_flags("memory_entry_client_untagged", 0);
655 }
656 
657 T_HELPER_DECL(ool_memory_client_tagged_with_mte, "ool_memory_client_tagged_with_mte")
658 {
659 	spawn_helper_with_flags("ool_memory_client_tagged", 0);
660 }
661 
662 T_HELPER_DECL(ool_memory_client_untagged_with_mte, "ool_memory_client_untagged_with_mte")
663 {
664 	spawn_helper_with_flags("ool_memory_client_untagged", 0);
665 }
666 
667 static void
client_server_template(char * launchd_plist,char * server_helper,char * client_helper)668 client_server_template(char *launchd_plist, char *server_helper, char *client_helper)
669 {
670 	#if __arm64__
671 	dt_helper_t helpers[] = {
672 		dt_launchd_helper_domain(launchd_plist,
673 	    server_helper, NULL, LAUNCH_SYSTEM_DOMAIN),
674 		dt_fork_helper(client_helper)
675 	};
676 	dt_run_helpers(helpers, 2, 600);
677 	#endif /* __arm64__ */
678 }
679 
680 /* Actual test definitions */
681 T_DECL(mte_mach_msg_send_ool_tagged_entitled,
682     "Send tagged memory OOL in a mach msg from MTE-enabled -> MTE-enabled process",
683     T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
684     XNU_T_META_SOC_SPECIFIC,
685     T_META_ASROOT(true),
686     // T_META_ENABLED(__arm64__)
687     T_META_ENABLED(false) /* rdar://153934699 */
688     )
689 {
690 	client_server_template("com.apple.xnu.test.arm_mte_sharing_hardened.plist",
691 	    "ool_memory_server_with_mte", "ool_memory_client_tagged_with_mte");
692 }
693 
694 T_DECL(mte_mach_msg_send_ool_untagged_entitled,
695     "Send untagged memory OOL in a mach msg from MTE-enabled -> MTE-enabled process",
696     T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
697     XNU_T_META_SOC_SPECIFIC,
698     T_META_ASROOT(true),
699     T_META_ENABLED(__arm64__))
700 {
701 	client_server_template("com.apple.xnu.test.arm_mte_sharing_hardened.plist",
702 	    "ool_memory_server_with_mte_relaxed", "ool_memory_client_untagged_with_mte");
703 }
704 
705 T_DECL(mte_mach_msg_send_ool_tagged_entitled_to_unentitled,
706     "Send tagged memory OOL in a mach msg from MTE-enabled -> non MTE-enabled process",
707     T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
708     XNU_T_META_SOC_SPECIFIC,
709     T_META_ASROOT(true),
710     T_META_ENABLED(__arm64__))
711 {
712 	client_server_template("com.apple.xnu.test.arm_mte_sharing_unhardened.plist",
713 	    "ool_memory_server_without_mte_relaxed", "ool_memory_client_tagged_with_mte");
714 }
715 
716 T_DECL(mte_mach_msg_send_memory_entry_tagged_entitled,
717     "Send tagged memory entries in a mach msg from MTE-enabled -> MTE-enabled process",
718     T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
719     XNU_T_META_SOC_SPECIFIC,
720     T_META_ASROOT(true),
721     // T_META_ENABLED(__arm64__)
722     T_META_ENABLED(false) /* rdar://153934699 */
723     )
724 {
725 	client_server_template("com.apple.xnu.test.arm_mte_sharing_hardened.plist",
726 	    "memory_entry_server_with_mte", "memory_entry_client_tagged_with_mte");
727 }
728 
729 T_DECL(mte_mach_msg_send_memory_entry_untagged_entitled,
730     "Send untagged memory entries in a mach msg from MTE-enabled -> MTE-enabled process",
731     T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
732     XNU_T_META_SOC_SPECIFIC,
733     T_META_ASROOT(true),
734     T_META_ENABLED(__arm64__))
735 {
736 	client_server_template("com.apple.xnu.test.arm_mte_sharing_hardened.plist",
737 	    "memory_entry_server_with_mte_relaxed", "memory_entry_client_untagged_with_mte");
738 }
739 
740 T_DECL(mte_mach_msg_send_memory_entry_tagged_entitled_to_unentitled,
741     "Send tagged memory entries in a mach msg from MTE-enabled -> non MTE-enabled process",
742     T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
743     XNU_T_META_SOC_SPECIFIC,
744     T_META_ASROOT(true),
745     T_META_ENABLED(__arm64__))
746 {
747 	client_server_template("com.apple.xnu.test.arm_mte_sharing_unhardened.plist",
748 	    "memory_entry_server_without_mte_relaxed", "memory_entry_client_tagged_with_mte");
749 }
750