1*4d495c6eSApple OSS Distributions /*
2*4d495c6eSApple OSS Distributions * Copyright (c) 2023 Apple Computer, Inc. All rights reserved.
3*4d495c6eSApple OSS Distributions *
4*4d495c6eSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*4d495c6eSApple OSS Distributions *
6*4d495c6eSApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*4d495c6eSApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*4d495c6eSApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*4d495c6eSApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*4d495c6eSApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*4d495c6eSApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*4d495c6eSApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*4d495c6eSApple OSS Distributions * terms of an Apple operating system software license agreement.
14*4d495c6eSApple OSS Distributions *
15*4d495c6eSApple OSS Distributions * Please obtain a copy of the License at
16*4d495c6eSApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*4d495c6eSApple OSS Distributions *
18*4d495c6eSApple OSS Distributions * The Original Code and all software distributed under the License are
19*4d495c6eSApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*4d495c6eSApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*4d495c6eSApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*4d495c6eSApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*4d495c6eSApple OSS Distributions * Please see the License for the specific language governing rights and
24*4d495c6eSApple OSS Distributions * limitations under the License.
25*4d495c6eSApple OSS Distributions *
26*4d495c6eSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*4d495c6eSApple OSS Distributions */
28*4d495c6eSApple OSS Distributions
29*4d495c6eSApple OSS Distributions #include <arm_acle.h>
30*4d495c6eSApple OSS Distributions #include <darwintest.h>
31*4d495c6eSApple OSS Distributions #include <fcntl.h>
32*4d495c6eSApple OSS Distributions #include <mach/mach.h>
33*4d495c6eSApple OSS Distributions #include <mach/mach_vm.h>
34*4d495c6eSApple OSS Distributions #include <mach/vm_map.h>
35*4d495c6eSApple OSS Distributions #include <mach-o/dyld.h>
36*4d495c6eSApple OSS Distributions #include <spawn_private.h>
37*4d495c6eSApple OSS Distributions #include <sys/aio.h>
38*4d495c6eSApple OSS Distributions #include <sys/spawn_internal.h>
39*4d495c6eSApple OSS Distributions #include <stdlib.h>
40*4d495c6eSApple OSS Distributions #include <sys/mman.h>
41*4d495c6eSApple OSS Distributions #include <sys/sysctl.h>
42*4d495c6eSApple OSS Distributions #include <sys/wait.h>
43*4d495c6eSApple OSS Distributions #include <signal.h>
44*4d495c6eSApple OSS Distributions
45*4d495c6eSApple OSS Distributions #include "arm_mte_utilities.h"
46*4d495c6eSApple OSS Distributions #include "test_utils.h"
47*4d495c6eSApple OSS Distributions
48*4d495c6eSApple OSS Distributions #if (TARGET_OS_OSX || TARGET_OS_IOS) && defined(__arm64__)
49*4d495c6eSApple OSS Distributions // TODO(PT): It'd be nice to have this as an allow list rather than the inverse,
50*4d495c6eSApple OSS Distributions // but I wasn't able to restrict based on TARGET_OS_[IPHONE|IOS] as this is sometimes set even for XR_OS.
51*4d495c6eSApple OSS Distributions // For now, to keep things moving, just restrict this from being set on platforms where
52*4d495c6eSApple OSS Distributions // we know it's not the case.
53*4d495c6eSApple OSS Distributions #if !(TARGET_OS_XR || TARGET_OS_TV || TARGET_OS_WATCH || TARGET_OS_BRIDGE)
54*4d495c6eSApple OSS Distributions #define TARGET_SUPPORTS_MTE_EMULATION 1
55*4d495c6eSApple OSS Distributions #endif
56*4d495c6eSApple OSS Distributions #endif
57*4d495c6eSApple OSS Distributions
58*4d495c6eSApple OSS Distributions T_GLOBAL_META(
59*4d495c6eSApple OSS Distributions T_META_NAMESPACE("xnu.arm"),
60*4d495c6eSApple OSS Distributions T_META_RADAR_COMPONENT_NAME("xnu"),
61*4d495c6eSApple OSS Distributions T_META_RADAR_COMPONENT_VERSION("arm"),
62*4d495c6eSApple OSS Distributions T_META_OWNER("ghackmann"),
63*4d495c6eSApple OSS Distributions T_META_RUN_CONCURRENTLY(true),
64*4d495c6eSApple OSS Distributions T_META_IGNORECRASHES(".*arm_mte.*"),
65*4d495c6eSApple OSS Distributions T_META_CHECK_LEAKS(false));
66*4d495c6eSApple OSS Distributions
67*4d495c6eSApple OSS Distributions static uint64_t
task_footprint(void)68*4d495c6eSApple OSS Distributions task_footprint(void)
69*4d495c6eSApple OSS Distributions {
70*4d495c6eSApple OSS Distributions task_vm_info_data_t ti;
71*4d495c6eSApple OSS Distributions kern_return_t kr;
72*4d495c6eSApple OSS Distributions mach_msg_type_number_t count;
73*4d495c6eSApple OSS Distributions
74*4d495c6eSApple OSS Distributions count = TASK_VM_INFO_COUNT;
75*4d495c6eSApple OSS Distributions kr = task_info(mach_task_self(),
76*4d495c6eSApple OSS Distributions TASK_VM_INFO,
77*4d495c6eSApple OSS Distributions (task_info_t) &ti,
78*4d495c6eSApple OSS Distributions &count);
79*4d495c6eSApple OSS Distributions T_QUIET;
80*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "task_info()");
81*4d495c6eSApple OSS Distributions #if defined(__arm64__)
82*4d495c6eSApple OSS Distributions T_QUIET;
83*4d495c6eSApple OSS Distributions T_ASSERT_EQ(count, TASK_VM_INFO_COUNT, "task_info() count = %d (expected %d)",
84*4d495c6eSApple OSS Distributions count, TASK_VM_INFO_COUNT);
85*4d495c6eSApple OSS Distributions #endif /* defined(__arm64__) */
86*4d495c6eSApple OSS Distributions return ti.phys_footprint;
87*4d495c6eSApple OSS Distributions }
88*4d495c6eSApple OSS Distributions
89*4d495c6eSApple OSS Distributions static void
do_mte_tag_check(void)90*4d495c6eSApple OSS Distributions do_mte_tag_check(void)
91*4d495c6eSApple OSS Distributions {
92*4d495c6eSApple OSS Distributions static const size_t ALLOC_SIZE = MTE_GRANULE_SIZE * 2;
93*4d495c6eSApple OSS Distributions
94*4d495c6eSApple OSS Distributions vm_address_t address = 0;
95*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &address, ALLOC_SIZE, VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
96*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "allocate tagged memory");
97*4d495c6eSApple OSS Distributions char *untagged_ptr = (char *)address;
98*4d495c6eSApple OSS Distributions
99*4d495c6eSApple OSS Distributions char *orig_tagged_ptr = __arm_mte_get_tag(untagged_ptr);
100*4d495c6eSApple OSS Distributions unsigned int orig_tag = extract_mte_tag(orig_tagged_ptr);
101*4d495c6eSApple OSS Distributions T_ASSERT_EQ_UINT(orig_tag, 0U, "originally assigned tag is zero");
102*4d495c6eSApple OSS Distributions
103*4d495c6eSApple OSS Distributions uint64_t mask = __arm_mte_exclude_tag(orig_tagged_ptr, 0);
104*4d495c6eSApple OSS Distributions T_EXPECT_EQ_LLONG(mask, (1LL << 0), "zero tag is excluded");
105*4d495c6eSApple OSS Distributions
106*4d495c6eSApple OSS Distributions char *random_tagged_ptr = NULL;
107*4d495c6eSApple OSS Distributions /*
108*4d495c6eSApple OSS Distributions * Generate the random tag. We've excluded the original tag, so it should never
109*4d495c6eSApple OSS Distributions * reappear no matter how many times we regenerate a new tag.
110*4d495c6eSApple OSS Distributions */
111*4d495c6eSApple OSS Distributions for (unsigned int i = 0; i < NUM_MTE_TAGS * 4; i++) {
112*4d495c6eSApple OSS Distributions random_tagged_ptr = __arm_mte_create_random_tag(untagged_ptr, mask);
113*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_NE_PTR(orig_tagged_ptr, random_tagged_ptr,
114*4d495c6eSApple OSS Distributions "random tag was not taken from excluded tag set");
115*4d495c6eSApple OSS Distributions
116*4d495c6eSApple OSS Distributions ptrdiff_t diff = __arm_mte_ptrdiff(untagged_ptr, random_tagged_ptr);
117*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_EQ_ULONG(diff, (ptrdiff_t)0, "untagged %p and tagged %p have identical address bits",
118*4d495c6eSApple OSS Distributions untagged_ptr, random_tagged_ptr);
119*4d495c6eSApple OSS Distributions }
120*4d495c6eSApple OSS Distributions
121*4d495c6eSApple OSS Distributions /* Time to make things real, commit the tag to memory */
122*4d495c6eSApple OSS Distributions __arm_mte_set_tag(random_tagged_ptr);
123*4d495c6eSApple OSS Distributions
124*4d495c6eSApple OSS Distributions /* Ensure that we can read back the tag */
125*4d495c6eSApple OSS Distributions char *read_back = __arm_mte_get_tag(untagged_ptr);
126*4d495c6eSApple OSS Distributions T_EXPECT_EQ_PTR(read_back, random_tagged_ptr, "tag was committed to memory correctly");
127*4d495c6eSApple OSS Distributions
128*4d495c6eSApple OSS Distributions /* Verify that accessing memory actually works */
129*4d495c6eSApple OSS Distributions random_tagged_ptr[0] = 't';
130*4d495c6eSApple OSS Distributions random_tagged_ptr[1] = 'e';
131*4d495c6eSApple OSS Distributions random_tagged_ptr[2] = 's';
132*4d495c6eSApple OSS Distributions random_tagged_ptr[3] = 't';
133*4d495c6eSApple OSS Distributions T_EXPECT_EQ_STR(random_tagged_ptr, "test", "read/write from tagged memory");
134*4d495c6eSApple OSS Distributions
135*4d495c6eSApple OSS Distributions /*
136*4d495c6eSApple OSS Distributions * Confirm that the next MTE granule still has the default tag, and then
137*4d495c6eSApple OSS Distributions * simulate an out-of-bounds access into that granule.
138*4d495c6eSApple OSS Distributions */
139*4d495c6eSApple OSS Distributions void *next_granule_ptr = orig_tagged_ptr + MTE_GRANULE_SIZE;
140*4d495c6eSApple OSS Distributions unsigned int next_granule_tag = extract_mte_tag(next_granule_ptr);
141*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_EQ_UINT(next_granule_tag, 0U,
142*4d495c6eSApple OSS Distributions "next MTE granule still has its originally assigned tag");
143*4d495c6eSApple OSS Distributions
144*4d495c6eSApple OSS Distributions T_LOG("attempting out-of-bounds access to tagged memory");
145*4d495c6eSApple OSS Distributions expect_sigkill(^{
146*4d495c6eSApple OSS Distributions random_tagged_ptr[MTE_GRANULE_SIZE] = '!';
147*4d495c6eSApple OSS Distributions }, "out-of-bounds access to tagged memory raises uncatchable exception");
148*4d495c6eSApple OSS Distributions
149*4d495c6eSApple OSS Distributions /*
150*4d495c6eSApple OSS Distributions * Simulate a use-after-free by accessing orig_tagged_ptr, which has an
151*4d495c6eSApple OSS Distributions * out-of-date tag.
152*4d495c6eSApple OSS Distributions */
153*4d495c6eSApple OSS Distributions T_LOG("attempting use-after-free access to tagged memory");
154*4d495c6eSApple OSS Distributions expect_sigkill(^{
155*4d495c6eSApple OSS Distributions orig_tagged_ptr[0] = 'T';
156*4d495c6eSApple OSS Distributions }, "use-after-free access to tagged memory raises uncatchable exception");
157*4d495c6eSApple OSS Distributions
158*4d495c6eSApple OSS Distributions __arm_mte_set_tag(orig_tagged_ptr);
159*4d495c6eSApple OSS Distributions __arm_mte_set_tag(orig_tagged_ptr + MTE_GRANULE_SIZE);
160*4d495c6eSApple OSS Distributions vm_deallocate(mach_task_self(), address, ALLOC_SIZE);
161*4d495c6eSApple OSS Distributions }
162*4d495c6eSApple OSS Distributions
163*4d495c6eSApple OSS Distributions T_DECL(mte_tag_check,
164*4d495c6eSApple OSS Distributions "Test MTE2 tag check fault handling",
165*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
166*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
167*4d495c6eSApple OSS Distributions {
168*4d495c6eSApple OSS Distributions #if !__arm64__
169*4d495c6eSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
170*4d495c6eSApple OSS Distributions #else /* !__arm64__ */
171*4d495c6eSApple OSS Distributions do_mte_tag_check();
172*4d495c6eSApple OSS Distributions #endif
173*4d495c6eSApple OSS Distributions }
174*4d495c6eSApple OSS Distributions
175*4d495c6eSApple OSS Distributions T_DECL(mte_tag_check_child,
176*4d495c6eSApple OSS Distributions "Test MTE2 tag check fault in a child process",
177*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
178*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
179*4d495c6eSApple OSS Distributions {
180*4d495c6eSApple OSS Distributions #if !__arm64__
181*4d495c6eSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
182*4d495c6eSApple OSS Distributions #else /* !__arm64__ */
183*4d495c6eSApple OSS Distributions pid_t pid = fork();
184*4d495c6eSApple OSS Distributions if (pid == 0) {
185*4d495c6eSApple OSS Distributions /*
186*4d495c6eSApple OSS Distributions * Make sure the child process also has tag checks enabled.
187*4d495c6eSApple OSS Distributions */
188*4d495c6eSApple OSS Distributions do_mte_tag_check();
189*4d495c6eSApple OSS Distributions } else {
190*4d495c6eSApple OSS Distributions T_ASSERT_TRUE(pid != -1, "Checking fork success in parent");
191*4d495c6eSApple OSS Distributions
192*4d495c6eSApple OSS Distributions int status = 0;
193*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid");
194*4d495c6eSApple OSS Distributions }
195*4d495c6eSApple OSS Distributions #endif
196*4d495c6eSApple OSS Distributions }
197*4d495c6eSApple OSS Distributions
198*4d495c6eSApple OSS Distributions T_DECL(mte_canonical_tag_check,
199*4d495c6eSApple OSS Distributions "Test MTE4 Canonical Tag Check fault handling",
200*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
201*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
202*4d495c6eSApple OSS Distributions {
203*4d495c6eSApple OSS Distributions #if !__arm64__
204*4d495c6eSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
205*4d495c6eSApple OSS Distributions #else /* !__arm64__ */
206*4d495c6eSApple OSS Distributions vm_address_t address = 0;
207*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &address, MTE_GRANULE_SIZE, VM_FLAGS_ANYWHERE);
208*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "allocate a canonically-tagged page");
209*4d495c6eSApple OSS Distributions char *ptr = (char *)address;
210*4d495c6eSApple OSS Distributions
211*4d495c6eSApple OSS Distributions T_LOG("attempting to set tag on canonically-tagged memory");
212*4d495c6eSApple OSS Distributions char *tagged_ptr = __arm_mte_increment_tag(ptr, 1);
213*4d495c6eSApple OSS Distributions expect_signal(SIGBUS, ^{
214*4d495c6eSApple OSS Distributions __arm_mte_set_tag(tagged_ptr);
215*4d495c6eSApple OSS Distributions }, "setting tag on canonically-tagged memory raises a canonical memory permission fault");
216*4d495c6eSApple OSS Distributions
217*4d495c6eSApple OSS Distributions T_LOG("attempting to access canonically-tagged memory with a tagged address");
218*4d495c6eSApple OSS Distributions expect_sigkill(^{
219*4d495c6eSApple OSS Distributions tagged_ptr[0] = '!';
220*4d495c6eSApple OSS Distributions }, "accessing canonically-tagged memory with a tagged address raises a canonical tag check fault");
221*4d495c6eSApple OSS Distributions
222*4d495c6eSApple OSS Distributions vm_deallocate(mach_task_self(), address, MTE_GRANULE_SIZE);
223*4d495c6eSApple OSS Distributions #endif
224*4d495c6eSApple OSS Distributions }
225*4d495c6eSApple OSS Distributions
226*4d495c6eSApple OSS Distributions static void
run_mte_copyio_tests(bool tag_check_faults_enabled)227*4d495c6eSApple OSS Distributions run_mte_copyio_tests(bool tag_check_faults_enabled)
228*4d495c6eSApple OSS Distributions {
229*4d495c6eSApple OSS Distributions static_assert(MAXTHREADNAMESIZE >= MTE_GRANULE_SIZE * 2, "kern.threadname parameter can span multiple MTE granules");
230*4d495c6eSApple OSS Distributions
231*4d495c6eSApple OSS Distributions const size_t buf_size = MAXTHREADNAMESIZE;
232*4d495c6eSApple OSS Distributions const size_t threadname_len = MTE_GRANULE_SIZE * 2;
233*4d495c6eSApple OSS Distributions vm_address_t address = 0;
234*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &address, buf_size, VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
235*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "allocate tagged memory");
236*4d495c6eSApple OSS Distributions
237*4d495c6eSApple OSS Distributions char *untagged_ptr = (char *)address;
238*4d495c6eSApple OSS Distributions /* n.b.: kern.threadname uses unterminated strings */
239*4d495c6eSApple OSS Distributions memset(untagged_ptr, 'A', threadname_len);
240*4d495c6eSApple OSS Distributions
241*4d495c6eSApple OSS Distributions char *tagged_ptr = __arm_mte_create_random_tag(untagged_ptr, 0);
242*4d495c6eSApple OSS Distributions __arm_mte_set_tag(tagged_ptr);
243*4d495c6eSApple OSS Distributions char *next_granule_ptr = tagged_ptr + MTE_GRANULE_SIZE;
244*4d495c6eSApple OSS Distributions __arm_mte_set_tag(next_granule_ptr);
245*4d495c6eSApple OSS Distributions
246*4d495c6eSApple OSS Distributions int err = sysctlbyname("kern.threadname", NULL, NULL, tagged_ptr, threadname_len);
247*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(err, "copyin using tagged pointer succeeds");
248*4d495c6eSApple OSS Distributions
249*4d495c6eSApple OSS Distributions /* Simulate use-after-free by passing in obsolete tag */
250*4d495c6eSApple OSS Distributions if (tag_check_faults_enabled) {
251*4d495c6eSApple OSS Distributions expect_sigkill(^{
252*4d495c6eSApple OSS Distributions sysctlbyname("kern.threadname", NULL, NULL, untagged_ptr, threadname_len);
253*4d495c6eSApple OSS Distributions }, "copyin using incorrectly-tagged pointer");
254*4d495c6eSApple OSS Distributions } else {
255*4d495c6eSApple OSS Distributions err = sysctlbyname("kern.threadname", NULL, NULL, untagged_ptr, threadname_len);
256*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(err, "bypass: copyin using incorrectly-tagged pointer succeeds");
257*4d495c6eSApple OSS Distributions }
258*4d495c6eSApple OSS Distributions
259*4d495c6eSApple OSS Distributions /* Simulate out-of-bounds access by giving the second MTE granule a different tag */
260*4d495c6eSApple OSS Distributions char *different_tag_next_granule_ptr = __arm_mte_increment_tag(next_granule_ptr, 1);
261*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_NE(different_tag_next_granule_ptr, next_granule_ptr, "__arm_mte_increment_tag()");
262*4d495c6eSApple OSS Distributions __arm_mte_set_tag(different_tag_next_granule_ptr);
263*4d495c6eSApple OSS Distributions if (tag_check_faults_enabled) {
264*4d495c6eSApple OSS Distributions expect_sigkill(^{
265*4d495c6eSApple OSS Distributions sysctlbyname("kern.threadname", NULL, NULL, tagged_ptr, threadname_len);
266*4d495c6eSApple OSS Distributions }, "copyin using inconsistently-tagged buffer");
267*4d495c6eSApple OSS Distributions } else {
268*4d495c6eSApple OSS Distributions err = sysctlbyname("kern.threadname", NULL, NULL, tagged_ptr, threadname_len);
269*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(err, "bypass: copyin using inconsistently-tagged buffer succeeds");
270*4d495c6eSApple OSS Distributions }
271*4d495c6eSApple OSS Distributions __arm_mte_set_tag(next_granule_ptr);
272*4d495c6eSApple OSS Distributions
273*4d495c6eSApple OSS Distributions size_t oldlen = buf_size;
274*4d495c6eSApple OSS Distributions err = sysctlbyname("kern.threadname", tagged_ptr, &oldlen, NULL, 0);
275*4d495c6eSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(err, "copyout using tagged pointer succeeds");
276*4d495c6eSApple OSS Distributions
277*4d495c6eSApple OSS Distributions #pragma clang diagnostic push
278*4d495c6eSApple OSS Distributions #pragma clang diagnostic ignored "-Wshadow"
279*4d495c6eSApple OSS Distributions
280*4d495c6eSApple OSS Distributions if (tag_check_faults_enabled) {
281*4d495c6eSApple OSS Distributions expect_sigkill(^{
282*4d495c6eSApple OSS Distributions /* We need to repopulate kern.threadname since it isn't inherited across fork() */
283*4d495c6eSApple OSS Distributions int err = sysctlbyname("kern.threadname", NULL, NULL, tagged_ptr, threadname_len);
284*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_POSIX_SUCCESS(err, "sysctlbyname(kern.threadname)");
285*4d495c6eSApple OSS Distributions
286*4d495c6eSApple OSS Distributions size_t oldlen = buf_size;
287*4d495c6eSApple OSS Distributions sysctlbyname("kern.threadname", untagged_ptr, &oldlen, NULL, 0);
288*4d495c6eSApple OSS Distributions }, "copyout using incorrectly-tagged pointer");
289*4d495c6eSApple OSS Distributions } else {
290*4d495c6eSApple OSS Distributions size_t oldlen = buf_size;
291*4d495c6eSApple OSS Distributions int err = sysctlbyname("kern.threadname", untagged_ptr, &oldlen, NULL, 0);
292*4d495c6eSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(err, "bypass: copyout using incorrectly-tagged pointer succeeds");
293*4d495c6eSApple OSS Distributions }
294*4d495c6eSApple OSS Distributions
295*4d495c6eSApple OSS Distributions __arm_mte_set_tag(different_tag_next_granule_ptr);
296*4d495c6eSApple OSS Distributions if (tag_check_faults_enabled) {
297*4d495c6eSApple OSS Distributions expect_sigkill(^{
298*4d495c6eSApple OSS Distributions int err = sysctlbyname("kern.threadname", NULL, NULL, tagged_ptr, threadname_len);
299*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_POSIX_SUCCESS(err, "sysctlbyname(kern.threadname)");
300*4d495c6eSApple OSS Distributions
301*4d495c6eSApple OSS Distributions size_t oldlen = buf_size;
302*4d495c6eSApple OSS Distributions sysctlbyname("kern.threadname", tagged_ptr, &oldlen, NULL, 0);
303*4d495c6eSApple OSS Distributions }, "copyout using inconsistently-tagged buffer");
304*4d495c6eSApple OSS Distributions } else {
305*4d495c6eSApple OSS Distributions size_t oldlen = buf_size;
306*4d495c6eSApple OSS Distributions int err = sysctlbyname("kern.threadname", tagged_ptr, &oldlen, NULL, 0);
307*4d495c6eSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(err, "bypass: copyout using inconsistently-tagged buffer succeeds");
308*4d495c6eSApple OSS Distributions }
309*4d495c6eSApple OSS Distributions __arm_mte_set_tag(next_granule_ptr);
310*4d495c6eSApple OSS Distributions
311*4d495c6eSApple OSS Distributions #pragma clang diagnostic pop
312*4d495c6eSApple OSS Distributions
313*4d495c6eSApple OSS Distributions vm_deallocate(mach_task_self(), address, buf_size);
314*4d495c6eSApple OSS Distributions }
315*4d495c6eSApple OSS Distributions
316*4d495c6eSApple OSS Distributions T_DECL(mte_copyio,
317*4d495c6eSApple OSS Distributions "Test MTE tag handling during copyin/copyout operations",
318*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
319*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
320*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
321*4d495c6eSApple OSS Distributions {
322*4d495c6eSApple OSS Distributions run_mte_copyio_tests(true);
323*4d495c6eSApple OSS Distributions }
324*4d495c6eSApple OSS Distributions
325*4d495c6eSApple OSS Distributions T_DECL(mte_malloc_footprint_test,
326*4d495c6eSApple OSS Distributions "Test footprint across malloc() and free()",
327*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
328*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
329*4d495c6eSApple OSS Distributions T_META_ENABLED(false) /* rdar://131390446 */)
330*4d495c6eSApple OSS Distributions {
331*4d495c6eSApple OSS Distributions #if !__arm64__
332*4d495c6eSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
333*4d495c6eSApple OSS Distributions #else /* !__arm64__ */
334*4d495c6eSApple OSS Distributions uint64_t count = 1024;
335*4d495c6eSApple OSS Distributions uint64_t margin = 4;
336*4d495c6eSApple OSS Distributions char* address[count];
337*4d495c6eSApple OSS Distributions uint64_t size = PAGE_SIZE;
338*4d495c6eSApple OSS Distributions
339*4d495c6eSApple OSS Distributions for (unsigned int i = 0; i < count; i++) {
340*4d495c6eSApple OSS Distributions address[i] = (char *) malloc(size);
341*4d495c6eSApple OSS Distributions
342*4d495c6eSApple OSS Distributions char *cp;
343*4d495c6eSApple OSS Distributions for (cp = (char *) (address[i]); cp < (char *) (address[i] + size); cp += PAGE_SIZE) {
344*4d495c6eSApple OSS Distributions *cp = 'x';
345*4d495c6eSApple OSS Distributions }
346*4d495c6eSApple OSS Distributions }
347*4d495c6eSApple OSS Distributions
348*4d495c6eSApple OSS Distributions uint64_t fp1 = task_footprint();
349*4d495c6eSApple OSS Distributions T_LOG("Footprint after malloc(): %llu bytes", fp1);
350*4d495c6eSApple OSS Distributions
351*4d495c6eSApple OSS Distributions for (unsigned int i = 0; i < count; i++) {
352*4d495c6eSApple OSS Distributions free(address[i]);
353*4d495c6eSApple OSS Distributions }
354*4d495c6eSApple OSS Distributions uint64_t fp2 = task_footprint();
355*4d495c6eSApple OSS Distributions T_LOG("Footprint after free(): %llu bytes", fp2);
356*4d495c6eSApple OSS Distributions
357*4d495c6eSApple OSS Distributions T_EXPECT_TRUE(((fp2 + PAGE_SIZE * (count - margin)) <= fp1), "Footprint after free() is higher than expected.");
358*4d495c6eSApple OSS Distributions #endif
359*4d495c6eSApple OSS Distributions }
360*4d495c6eSApple OSS Distributions
361*4d495c6eSApple OSS Distributions T_DECL(mte_tagged_memory_direct_io,
362*4d495c6eSApple OSS Distributions "Test direct I/O on tagged memory",
363*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
364*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
365*4d495c6eSApple OSS Distributions {
366*4d495c6eSApple OSS Distributions #if !__arm64__
367*4d495c6eSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
368*4d495c6eSApple OSS Distributions #else /* !__arm64__ */
369*4d495c6eSApple OSS Distributions
370*4d495c6eSApple OSS Distributions uint64_t size = PAGE_SIZE;
371*4d495c6eSApple OSS Distributions char* address = (char*) malloc(size);
372*4d495c6eSApple OSS Distributions
373*4d495c6eSApple OSS Distributions char *cp;
374*4d495c6eSApple OSS Distributions for (cp = (char *) (address); cp < (char *) (address + size); cp += PAGE_SIZE) {
375*4d495c6eSApple OSS Distributions *cp = 'x';
376*4d495c6eSApple OSS Distributions }
377*4d495c6eSApple OSS Distributions
378*4d495c6eSApple OSS Distributions int fd = open("/tmp/file1", O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC, 0644);
379*4d495c6eSApple OSS Distributions T_ASSERT_TRUE(fd > 0, "File open successful");
380*4d495c6eSApple OSS Distributions T_ASSERT_TRUE(((fcntl(fd, F_NOCACHE, 1)) != -1), "Setting F_NOCACHE");
381*4d495c6eSApple OSS Distributions ssize_t ret = pwrite(fd, address, size, 0);
382*4d495c6eSApple OSS Distributions T_ASSERT_TRUE((uint64_t) ret == size, "pwrite() on tagged memory");
383*4d495c6eSApple OSS Distributions
384*4d495c6eSApple OSS Distributions char *incorrectly_tagged = __arm_mte_increment_tag(address, 1);
385*4d495c6eSApple OSS Distributions ret = pwrite(fd, incorrectly_tagged, size, 0);
386*4d495c6eSApple OSS Distributions T_ASSERT_TRUE((uint64_t) ret == size, "pwrite() on incorrectly tagged memory passes with direct I/O");
387*4d495c6eSApple OSS Distributions
388*4d495c6eSApple OSS Distributions free(address);
389*4d495c6eSApple OSS Distributions #endif
390*4d495c6eSApple OSS Distributions }
391*4d495c6eSApple OSS Distributions
392*4d495c6eSApple OSS Distributions T_DECL(mte_tagged_memory_copy_io,
393*4d495c6eSApple OSS Distributions "Test direct I/O on tagged memory",
394*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
395*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
396*4d495c6eSApple OSS Distributions {
397*4d495c6eSApple OSS Distributions #if !__arm64__
398*4d495c6eSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
399*4d495c6eSApple OSS Distributions #else /* !__arm64__ */
400*4d495c6eSApple OSS Distributions
401*4d495c6eSApple OSS Distributions uint64_t size = PAGE_SIZE;
402*4d495c6eSApple OSS Distributions char* address = (char*) malloc(size);
403*4d495c6eSApple OSS Distributions
404*4d495c6eSApple OSS Distributions char *cp;
405*4d495c6eSApple OSS Distributions for (cp = (char *) (address); cp < (char *) (address + size); cp += PAGE_SIZE) {
406*4d495c6eSApple OSS Distributions *cp = 'x';
407*4d495c6eSApple OSS Distributions }
408*4d495c6eSApple OSS Distributions
409*4d495c6eSApple OSS Distributions int fd = open("/tmp/file1", O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC, 0644);
410*4d495c6eSApple OSS Distributions T_ASSERT_TRUE(fd > 0, "File open successful");
411*4d495c6eSApple OSS Distributions ssize_t ret = pwrite(fd, address, size, 0);
412*4d495c6eSApple OSS Distributions T_ASSERT_TRUE((uint64_t) ret == size, "pwrite() on tagged memory");
413*4d495c6eSApple OSS Distributions
414*4d495c6eSApple OSS Distributions char *incorrectly_tagged = __arm_mte_increment_tag(address, 1);
415*4d495c6eSApple OSS Distributions expect_sigkill(^{
416*4d495c6eSApple OSS Distributions (void)pwrite(fd, incorrectly_tagged, size, 0);
417*4d495c6eSApple OSS Distributions }, "copy I/O on wrongly tagged memory");
418*4d495c6eSApple OSS Distributions
419*4d495c6eSApple OSS Distributions free(address);
420*4d495c6eSApple OSS Distributions #endif
421*4d495c6eSApple OSS Distributions }
422*4d495c6eSApple OSS Distributions
423*4d495c6eSApple OSS Distributions
424*4d495c6eSApple OSS Distributions static int FORK_TEST_CHILD_WRITES_FIRST = 0x1;
425*4d495c6eSApple OSS Distributions static int FORK_TEST_CHILD_FORKS = 0x2;
426*4d495c6eSApple OSS Distributions static int FORK_TEST_CHILD_RETAGS = 0x4;
427*4d495c6eSApple OSS Distributions static void
do_fork_test(vm_size_t vm_alloc_sz,int flags)428*4d495c6eSApple OSS Distributions do_fork_test(vm_size_t vm_alloc_sz, int flags)
429*4d495c6eSApple OSS Distributions {
430*4d495c6eSApple OSS Distributions #if !__arm64__
431*4d495c6eSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
432*4d495c6eSApple OSS Distributions #else /* !__arm64__ */
433*4d495c6eSApple OSS Distributions vm_address_t address = 0;
434*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &address, vm_alloc_sz, VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
435*4d495c6eSApple OSS Distributions
436*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "allocate tagged memory");
437*4d495c6eSApple OSS Distributions
438*4d495c6eSApple OSS Distributions char *untagged_ptr = (char *)address;
439*4d495c6eSApple OSS Distributions char *orig_tagged_ptr = __arm_mte_get_tag(untagged_ptr);
440*4d495c6eSApple OSS Distributions uint64_t mask = __arm_mte_exclude_tag(orig_tagged_ptr, 0);
441*4d495c6eSApple OSS Distributions
442*4d495c6eSApple OSS Distributions size_t count;
443*4d495c6eSApple OSS Distributions size_t offset;
444*4d495c6eSApple OSS Distributions const vm_size_t NUM_GRANULES = vm_alloc_sz / MTE_GRANULE_SIZE;
445*4d495c6eSApple OSS Distributions char *tagged_ptrs[NUM_GRANULES];
446*4d495c6eSApple OSS Distributions
447*4d495c6eSApple OSS Distributions /*
448*4d495c6eSApple OSS Distributions * Tag the entire page
449*4d495c6eSApple OSS Distributions */
450*4d495c6eSApple OSS Distributions for (count = 0; count < NUM_GRANULES; count++) {
451*4d495c6eSApple OSS Distributions offset = count * MTE_GRANULE_SIZE;
452*4d495c6eSApple OSS Distributions tagged_ptrs[count] = __arm_mte_create_random_tag(untagged_ptr + offset, mask);
453*4d495c6eSApple OSS Distributions __arm_mte_set_tag(tagged_ptrs[count]);
454*4d495c6eSApple OSS Distributions }
455*4d495c6eSApple OSS Distributions
456*4d495c6eSApple OSS Distributions if (!(flags & FORK_TEST_CHILD_WRITES_FIRST)) {
457*4d495c6eSApple OSS Distributions for (count = 0; count < NUM_GRANULES; count++) {
458*4d495c6eSApple OSS Distributions *(tagged_ptrs[count]) = 'a';
459*4d495c6eSApple OSS Distributions }
460*4d495c6eSApple OSS Distributions }
461*4d495c6eSApple OSS Distributions
462*4d495c6eSApple OSS Distributions pid_t pid = fork();
463*4d495c6eSApple OSS Distributions if (pid == 0) {
464*4d495c6eSApple OSS Distributions T_LOG("Child forked");
465*4d495c6eSApple OSS Distributions
466*4d495c6eSApple OSS Distributions if (flags & FORK_TEST_CHILD_RETAGS) {
467*4d495c6eSApple OSS Distributions T_LOG("Child editing tags");
468*4d495c6eSApple OSS Distributions /* re-tag the entire page */
469*4d495c6eSApple OSS Distributions for (count = 0; count < NUM_GRANULES; count++) {
470*4d495c6eSApple OSS Distributions tagged_ptrs[count] = __arm_mte_increment_tag(tagged_ptrs[count], 1);
471*4d495c6eSApple OSS Distributions __arm_mte_set_tag(tagged_ptrs[count]);
472*4d495c6eSApple OSS Distributions }
473*4d495c6eSApple OSS Distributions }
474*4d495c6eSApple OSS Distributions
475*4d495c6eSApple OSS Distributions T_LOG("Accessing parent tagged memory");
476*4d495c6eSApple OSS Distributions /*
477*4d495c6eSApple OSS Distributions * Make sure the child process also has tag checks enabled.
478*4d495c6eSApple OSS Distributions */
479*4d495c6eSApple OSS Distributions for (count = 0; count < NUM_GRANULES; count++) {
480*4d495c6eSApple OSS Distributions *(tagged_ptrs[count]) = 'a';
481*4d495c6eSApple OSS Distributions }
482*4d495c6eSApple OSS Distributions
483*4d495c6eSApple OSS Distributions T_LOG("Child access to tagged memory success");
484*4d495c6eSApple OSS Distributions
485*4d495c6eSApple OSS Distributions expect_sigkill(^{
486*4d495c6eSApple OSS Distributions *untagged_ptr = 'b';
487*4d495c6eSApple OSS Distributions }, "Child access through untagged ptr");
488*4d495c6eSApple OSS Distributions
489*4d495c6eSApple OSS Distributions if (flags & FORK_TEST_CHILD_FORKS) {
490*4d495c6eSApple OSS Distributions pid_t pid2 = fork();
491*4d495c6eSApple OSS Distributions
492*4d495c6eSApple OSS Distributions if (pid2 == 0) {
493*4d495c6eSApple OSS Distributions T_LOG("Grandchild forked");
494*4d495c6eSApple OSS Distributions
495*4d495c6eSApple OSS Distributions T_LOG("Accessing grandparent's tagged memory");
496*4d495c6eSApple OSS Distributions
497*4d495c6eSApple OSS Distributions for (count = 0; count < NUM_GRANULES; count++) {
498*4d495c6eSApple OSS Distributions *(tagged_ptrs[count]) = 'a';
499*4d495c6eSApple OSS Distributions }
500*4d495c6eSApple OSS Distributions
501*4d495c6eSApple OSS Distributions T_LOG("Grandchild access to tagged memory success");
502*4d495c6eSApple OSS Distributions
503*4d495c6eSApple OSS Distributions pid_t pid3 = fork();
504*4d495c6eSApple OSS Distributions
505*4d495c6eSApple OSS Distributions if (pid3 == 0) {
506*4d495c6eSApple OSS Distributions T_LOG("Great grandchild forked");
507*4d495c6eSApple OSS Distributions
508*4d495c6eSApple OSS Distributions T_LOG("Accessing great grandparent's tagged memory");
509*4d495c6eSApple OSS Distributions
510*4d495c6eSApple OSS Distributions for (count = 0; count < NUM_GRANULES; count++) {
511*4d495c6eSApple OSS Distributions *(tagged_ptrs[count]) = 'a';
512*4d495c6eSApple OSS Distributions }
513*4d495c6eSApple OSS Distributions
514*4d495c6eSApple OSS Distributions T_LOG("Great grandchild access to tagged memory success");
515*4d495c6eSApple OSS Distributions
516*4d495c6eSApple OSS Distributions kr = vm_deallocate(mach_task_self(), address, vm_alloc_sz);
517*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "Great grandchild vm_deallocate");
518*4d495c6eSApple OSS Distributions exit(0);
519*4d495c6eSApple OSS Distributions } else {
520*4d495c6eSApple OSS Distributions T_ASSERT_TRUE(pid3 != -1, "Checking fork success in grandchild");
521*4d495c6eSApple OSS Distributions int status2 = 0;
522*4d495c6eSApple OSS Distributions
523*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(waitpid(pid3, &status2, 0), "waitpid");
524*4d495c6eSApple OSS Distributions T_ASSERT_TRUE(WIFEXITED(status2) > 0, "Great grandchild exited normally");
525*4d495c6eSApple OSS Distributions }
526*4d495c6eSApple OSS Distributions
527*4d495c6eSApple OSS Distributions kr = vm_deallocate(mach_task_self(), address, vm_alloc_sz);
528*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "Grandchild vm_deallocate");
529*4d495c6eSApple OSS Distributions exit(0);
530*4d495c6eSApple OSS Distributions } else {
531*4d495c6eSApple OSS Distributions T_ASSERT_TRUE(pid2 != -1, "Checking fork success in child");
532*4d495c6eSApple OSS Distributions int status2 = 0;
533*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(waitpid(pid2, &status2, 0), "waitpid");
534*4d495c6eSApple OSS Distributions T_ASSERT_TRUE(WIFEXITED(status2) > 0, "Grandchild exited normally");
535*4d495c6eSApple OSS Distributions }
536*4d495c6eSApple OSS Distributions }
537*4d495c6eSApple OSS Distributions
538*4d495c6eSApple OSS Distributions kr = vm_deallocate(mach_task_self(), address, vm_alloc_sz);
539*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "Child vm_deallocate");
540*4d495c6eSApple OSS Distributions exit(0);
541*4d495c6eSApple OSS Distributions } else {
542*4d495c6eSApple OSS Distributions T_ASSERT_TRUE(pid != -1, "Checking fork success in parent");
543*4d495c6eSApple OSS Distributions
544*4d495c6eSApple OSS Distributions int status = 0;
545*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid");
546*4d495c6eSApple OSS Distributions
547*4d495c6eSApple OSS Distributions T_ASSERT_TRUE(WIFEXITED(status) > 0, "Child exited normally");
548*4d495c6eSApple OSS Distributions
549*4d495c6eSApple OSS Distributions /* Verify that accessing memory actually works */
550*4d495c6eSApple OSS Distributions for (count = 0; count < NUM_GRANULES; count++) {
551*4d495c6eSApple OSS Distributions *(tagged_ptrs[count]) = 'a';
552*4d495c6eSApple OSS Distributions }
553*4d495c6eSApple OSS Distributions
554*4d495c6eSApple OSS Distributions T_LOG("Parent access to tagged memory sucessfull");
555*4d495c6eSApple OSS Distributions
556*4d495c6eSApple OSS Distributions expect_sigkill(^{
557*4d495c6eSApple OSS Distributions *untagged_ptr = 'b';
558*4d495c6eSApple OSS Distributions }, "Parent access through untagged ptr");
559*4d495c6eSApple OSS Distributions }
560*4d495c6eSApple OSS Distributions
561*4d495c6eSApple OSS Distributions kr = vm_deallocate(mach_task_self(), address, vm_alloc_sz);
562*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "Parent vm_deallocate");
563*4d495c6eSApple OSS Distributions #endif
564*4d495c6eSApple OSS Distributions }
565*4d495c6eSApple OSS Distributions
566*4d495c6eSApple OSS Distributions T_DECL(mte_tag_check_fork_after_alloc_less_page_sz,
567*4d495c6eSApple OSS Distributions "Test MTE2 tag check fault in a child process after vm_allocate(ALLOC_SIZE, MTE)",
568*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
569*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
570*4d495c6eSApple OSS Distributions {
571*4d495c6eSApple OSS Distributions static const size_t ALLOC_SIZE = MTE_GRANULE_SIZE * 2;
572*4d495c6eSApple OSS Distributions do_fork_test(ALLOC_SIZE, 0);
573*4d495c6eSApple OSS Distributions }
574*4d495c6eSApple OSS Distributions
575*4d495c6eSApple OSS Distributions T_DECL(mte_tag_check_fork_after_alloc_page_sz,
576*4d495c6eSApple OSS Distributions "Test MTE2 tag check fault in a child process after vm_allocate(PAGE_SIZE, MTE)",
577*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
578*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
579*4d495c6eSApple OSS Distributions {
580*4d495c6eSApple OSS Distributions do_fork_test(PAGE_SIZE, 0);
581*4d495c6eSApple OSS Distributions }
582*4d495c6eSApple OSS Distributions
583*4d495c6eSApple OSS Distributions /* NOTE: These following tests matter for when we switch to MEMORY_OBJECT_COPY_DELAY_FORK */
584*4d495c6eSApple OSS Distributions T_DECL(mte_tag_check_fork_child_fault_write,
585*4d495c6eSApple OSS Distributions "Test MTE2 tag check fault in a child process after vm_allocate(MTE) and child writes to tagged memory first",
586*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
587*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
588*4d495c6eSApple OSS Distributions {
589*4d495c6eSApple OSS Distributions do_fork_test(PAGE_SIZE, FORK_TEST_CHILD_WRITES_FIRST);
590*4d495c6eSApple OSS Distributions }
591*4d495c6eSApple OSS Distributions
592*4d495c6eSApple OSS Distributions T_DECL(mte_tag_check_fork_child_double_fork,
593*4d495c6eSApple OSS Distributions "Test MTE2 tag check fault in a child process after vm_allocate(MTE) and child writes to tagged memory first and then forks again",
594*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
595*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
596*4d495c6eSApple OSS Distributions {
597*4d495c6eSApple OSS Distributions do_fork_test(PAGE_SIZE, FORK_TEST_CHILD_WRITES_FIRST | FORK_TEST_CHILD_FORKS);
598*4d495c6eSApple OSS Distributions }
599*4d495c6eSApple OSS Distributions
600*4d495c6eSApple OSS Distributions /*
601*4d495c6eSApple OSS Distributions * These cases specifically test that tag setting instructions (STG) resolve CoW
602*4d495c6eSApple OSS Distributions * on fork correctly, since the child doesn't fault in the mapping by writing first.
603*4d495c6eSApple OSS Distributions */
604*4d495c6eSApple OSS Distributions T_DECL(mte_tag_check_fork_child_retag,
605*4d495c6eSApple OSS Distributions "Test MTE2 tag check fault in a child process after vm_allocate(MTE) and child changes tags",
606*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
607*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
608*4d495c6eSApple OSS Distributions {
609*4d495c6eSApple OSS Distributions do_fork_test(PAGE_SIZE, FORK_TEST_CHILD_RETAGS);
610*4d495c6eSApple OSS Distributions }
611*4d495c6eSApple OSS Distributions
612*4d495c6eSApple OSS Distributions T_DECL(mte_tag_check_fork_child_fault_write_retag,
613*4d495c6eSApple OSS Distributions "Test MTE2 tag check fault in a child process after vm_allocate(MTE) and child changes tags and writes to tagged memory first",
614*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
615*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
616*4d495c6eSApple OSS Distributions {
617*4d495c6eSApple OSS Distributions do_fork_test(PAGE_SIZE, FORK_TEST_CHILD_WRITES_FIRST | FORK_TEST_CHILD_RETAGS);
618*4d495c6eSApple OSS Distributions }
619*4d495c6eSApple OSS Distributions
620*4d495c6eSApple OSS Distributions T_DECL(mte_tag_check_fork_child_fault_write_retag_double_fork,
621*4d495c6eSApple OSS Distributions "Test MTE2 tag check fault in a child process after vm_allocate(MTE) and child changes tags, writes to tagged memory first, and then forks again",
622*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
623*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
624*4d495c6eSApple OSS Distributions {
625*4d495c6eSApple OSS Distributions do_fork_test(PAGE_SIZE, FORK_TEST_CHILD_WRITES_FIRST | FORK_TEST_CHILD_RETAGS | FORK_TEST_CHILD_FORKS);
626*4d495c6eSApple OSS Distributions }
627*4d495c6eSApple OSS Distributions
628*4d495c6eSApple OSS Distributions
629*4d495c6eSApple OSS Distributions T_DECL(mte_userland_uses_fake_kernel_pointer,
630*4d495c6eSApple OSS Distributions "Test that VM correctly rejects kernel-looking pointer from userspace",
631*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
632*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
633*4d495c6eSApple OSS Distributions T_META_ENABLED(__arm64__))
634*4d495c6eSApple OSS Distributions {
635*4d495c6eSApple OSS Distributions #if __arm64__
636*4d495c6eSApple OSS Distributions /*
637*4d495c6eSApple OSS Distributions * When the VM is given a user address that looks like a kernel pointer,
638*4d495c6eSApple OSS Distributions * we want to make sure that it still gets canonicalized as a user address
639*4d495c6eSApple OSS Distributions * (rather than a valid kernel pointer).
640*4d495c6eSApple OSS Distributions * This should result in a nonsensical pointer that shouldn't exist in any
641*4d495c6eSApple OSS Distributions * VM map, so the memory access should fail.
642*4d495c6eSApple OSS Distributions */
643*4d495c6eSApple OSS Distributions vm_address_t addr = 0;
644*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(
645*4d495c6eSApple OSS Distributions mach_task_self(),
646*4d495c6eSApple OSS Distributions &addr,
647*4d495c6eSApple OSS Distributions MTE_GRANULE_SIZE,
648*4d495c6eSApple OSS Distributions VM_FLAGS_ANYWHERE);
649*4d495c6eSApple OSS Distributions T_QUIET;
650*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "allocate an untagged page");
651*4d495c6eSApple OSS Distributions T_LOG("Allocated untagged page at addr: 0x%lx", addr);
652*4d495c6eSApple OSS Distributions
653*4d495c6eSApple OSS Distributions /* Create a kernel-like pointer in userspace */
654*4d495c6eSApple OSS Distributions char *tampered_ptr = (char *)(addr | VM_MIN_KERNEL_ADDRESS);
655*4d495c6eSApple OSS Distributions T_LOG("Tampered ptr: %p", tampered_ptr);
656*4d495c6eSApple OSS Distributions
657*4d495c6eSApple OSS Distributions /* segfault is expected, since the pointer is not valid in the userspace map */
658*4d495c6eSApple OSS Distributions expect_signal(SIGSEGV, ^{
659*4d495c6eSApple OSS Distributions *tampered_ptr = 'a';
660*4d495c6eSApple OSS Distributions }, "Accessing kernel-like pointer from userspace");
661*4d495c6eSApple OSS Distributions vm_deallocate(mach_task_self(), addr, MTE_GRANULE_SIZE);
662*4d495c6eSApple OSS Distributions #endif /* __arm64__ */
663*4d495c6eSApple OSS Distributions }
664*4d495c6eSApple OSS Distributions
665*4d495c6eSApple OSS Distributions /*
666*4d495c6eSApple OSS Distributions * Allocates tagged memory, assigns the memory a tag, and attempts to
667*4d495c6eSApple OSS Distributions * read the memory into its own address space via mach_vm_read().
668*4d495c6eSApple OSS Distributions *
669*4d495c6eSApple OSS Distributions * Also attempts to read the memory into its own address space with an untagged
670*4d495c6eSApple OSS Distributions * pointer, which we expect to fail.
671*4d495c6eSApple OSS Distributions */
672*4d495c6eSApple OSS Distributions static void
mte_mach_vm_read(mach_vm_size_t sz)673*4d495c6eSApple OSS Distributions mte_mach_vm_read(mach_vm_size_t sz)
674*4d495c6eSApple OSS Distributions {
675*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
676*4d495c6eSApple OSS Distributions __block mach_vm_address_t addr = 0;
677*4d495c6eSApple OSS Distributions __block vm_offset_t read_addr = 0;
678*4d495c6eSApple OSS Distributions __block mach_msg_type_number_t read_size = 0;
679*4d495c6eSApple OSS Distributions
680*4d495c6eSApple OSS Distributions mach_vm_size_t sz_rounded = (sz + (MTE_GRANULE_SIZE - 1)) & (unsigned)~((signed)(MTE_GRANULE_SIZE - 1));
681*4d495c6eSApple OSS Distributions T_LOG("sz rounded: %llu", sz_rounded);
682*4d495c6eSApple OSS Distributions /* Allocate some tagged memory */
683*4d495c6eSApple OSS Distributions T_LOG("Allocate tagged memory");
684*4d495c6eSApple OSS Distributions kern_return_t kr = mach_vm_allocate(
685*4d495c6eSApple OSS Distributions mach_task_self(),
686*4d495c6eSApple OSS Distributions &addr,
687*4d495c6eSApple OSS Distributions sz_rounded,
688*4d495c6eSApple OSS Distributions VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
689*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "Allocated tagged page");
690*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_NE_ULLONG(0ULL, addr, "Allocated address is not null");
691*4d495c6eSApple OSS Distributions
692*4d495c6eSApple OSS Distributions uint64_t *untagged_ptr = (uint64_t *)addr;
693*4d495c6eSApple OSS Distributions
694*4d495c6eSApple OSS Distributions uint64_t *orig_tagged_ptr = __arm_mte_get_tag(untagged_ptr);
695*4d495c6eSApple OSS Distributions unsigned int orig_tag = extract_mte_tag(orig_tagged_ptr);
696*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_EQ_UINT(orig_tag, 0U, "Originally assigned tag is zero");
697*4d495c6eSApple OSS Distributions
698*4d495c6eSApple OSS Distributions uint64_t mask = __arm_mte_exclude_tag(orig_tagged_ptr, 0);
699*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_EQ_LLONG(mask, (1ULL << 0), "Zero tag is excluded");
700*4d495c6eSApple OSS Distributions
701*4d495c6eSApple OSS Distributions /* Generate random tag */
702*4d495c6eSApple OSS Distributions uint64_t *tagged_ptr = NULL;
703*4d495c6eSApple OSS Distributions tagged_ptr = __arm_mte_create_random_tag(untagged_ptr, mask);
704*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_NE_PTR(orig_tagged_ptr, tagged_ptr,
705*4d495c6eSApple OSS Distributions "Random tag was not taken from excluded tag set");
706*4d495c6eSApple OSS Distributions
707*4d495c6eSApple OSS Distributions /* Time to make things real, commit the tag to memory */
708*4d495c6eSApple OSS Distributions for (uintptr_t cur_ptr = (uintptr_t)tagged_ptr;
709*4d495c6eSApple OSS Distributions cur_ptr < (uintptr_t)tagged_ptr + sz_rounded;
710*4d495c6eSApple OSS Distributions cur_ptr += MTE_GRANULE_SIZE) {
711*4d495c6eSApple OSS Distributions __arm_mte_set_tag((void *)cur_ptr);
712*4d495c6eSApple OSS Distributions }
713*4d495c6eSApple OSS Distributions T_LOG("Commited tagged pointer to memory: %p", tagged_ptr);
714*4d495c6eSApple OSS Distributions
715*4d495c6eSApple OSS Distributions /* Write to the memory */
716*4d495c6eSApple OSS Distributions for (uint i = 0; i < sz_rounded / sizeof(uint64_t); ++i) {
717*4d495c6eSApple OSS Distributions tagged_ptr[i] = addr;
718*4d495c6eSApple OSS Distributions }
719*4d495c6eSApple OSS Distributions T_LOG("Wrote to memory");
720*4d495c6eSApple OSS Distributions T_SETUPEND;
721*4d495c6eSApple OSS Distributions
722*4d495c6eSApple OSS Distributions T_LOG("Reading %llu bytes from %p", sz, tagged_ptr);
723*4d495c6eSApple OSS Distributions kr = mach_vm_read(
724*4d495c6eSApple OSS Distributions mach_task_self(),
725*4d495c6eSApple OSS Distributions (mach_vm_address_t)tagged_ptr,
726*4d495c6eSApple OSS Distributions sz,
727*4d495c6eSApple OSS Distributions &read_addr,
728*4d495c6eSApple OSS Distributions &read_size);
729*4d495c6eSApple OSS Distributions T_ASSERT_EQ(kr, KERN_SUCCESS,
730*4d495c6eSApple OSS Distributions "mach_vm_read %llu bytes from tagged ptr", sz);
731*4d495c6eSApple OSS Distributions
732*4d495c6eSApple OSS Distributions /* Make sure we get the same thing back */
733*4d495c6eSApple OSS Distributions T_ASSERT_EQ_UINT((unsigned int)sz, read_size,
734*4d495c6eSApple OSS Distributions "sz:%llu == read_size:%d", sz, read_size);
735*4d495c6eSApple OSS Distributions int result = memcmp(tagged_ptr, (void *)read_addr, sz);
736*4d495c6eSApple OSS Distributions T_ASSERT_EQ(result, 0, "mach_vm_read back the same info");
737*4d495c6eSApple OSS Distributions
738*4d495c6eSApple OSS Distributions /* Now try with incorrectly tagged pointer (aka, no tag) */
739*4d495c6eSApple OSS Distributions uint64_t *random_tagged_ptr = NULL;
740*4d495c6eSApple OSS Distributions /* Exclude the previous tag */
741*4d495c6eSApple OSS Distributions unsigned int previous_tag = extract_mte_tag(tagged_ptr);
742*4d495c6eSApple OSS Distributions mask = __arm_mte_exclude_tag(tagged_ptr, previous_tag);
743*4d495c6eSApple OSS Distributions random_tagged_ptr = __arm_mte_create_random_tag(untagged_ptr, mask);
744*4d495c6eSApple OSS Distributions T_LOG("random tagged ptr: %p", random_tagged_ptr);
745*4d495c6eSApple OSS Distributions T_EXPECT_NE_PTR(tagged_ptr, random_tagged_ptr,
746*4d495c6eSApple OSS Distributions "Random tag was not taken from excluded tag set");
747*4d495c6eSApple OSS Distributions
748*4d495c6eSApple OSS Distributions T_LOG("Reading %llu bytes from %p", sz, random_tagged_ptr);
749*4d495c6eSApple OSS Distributions expect_sigkill(^{
750*4d495c6eSApple OSS Distributions T_LOG("tagged_ptr[0]: %llu", random_tagged_ptr[0]);
751*4d495c6eSApple OSS Distributions }, "Accessing memory with the wrong tag, should fail");
752*4d495c6eSApple OSS Distributions
753*4d495c6eSApple OSS Distributions expect_sigkill(^{
754*4d495c6eSApple OSS Distributions (void)mach_vm_read(
755*4d495c6eSApple OSS Distributions mach_task_self(),
756*4d495c6eSApple OSS Distributions (mach_vm_address_t)random_tagged_ptr,
757*4d495c6eSApple OSS Distributions KERNEL_BUFFER_COPY_THRESHOLD,
758*4d495c6eSApple OSS Distributions &read_addr,
759*4d495c6eSApple OSS Distributions &read_size);
760*4d495c6eSApple OSS Distributions }, "Untagged pointer access leads to tag check fault");
761*4d495c6eSApple OSS Distributions
762*4d495c6eSApple OSS Distributions /* Reset tags to 0 before freeing */
763*4d495c6eSApple OSS Distributions for (uintptr_t cur_ptr = (uintptr_t)orig_tagged_ptr;
764*4d495c6eSApple OSS Distributions cur_ptr < (uintptr_t)orig_tagged_ptr + sz_rounded;
765*4d495c6eSApple OSS Distributions cur_ptr += MTE_GRANULE_SIZE) {
766*4d495c6eSApple OSS Distributions __arm_mte_set_tag((void *)cur_ptr);
767*4d495c6eSApple OSS Distributions }
768*4d495c6eSApple OSS Distributions vm_deallocate(mach_task_self(), addr, sz_rounded);
769*4d495c6eSApple OSS Distributions }
770*4d495c6eSApple OSS Distributions
771*4d495c6eSApple OSS Distributions T_DECL(mte_mach_vm_read_16b,
772*4d495c6eSApple OSS Distributions "mach_vm_read 16 bytes of tagged memory",
773*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
774*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
775*4d495c6eSApple OSS Distributions T_META_ENABLED(__arm64__))
776*4d495c6eSApple OSS Distributions {
777*4d495c6eSApple OSS Distributions #if __arm64__
778*4d495c6eSApple OSS Distributions mte_mach_vm_read(MTE_GRANULE_SIZE);
779*4d495c6eSApple OSS Distributions #endif /* __arm64__ */
780*4d495c6eSApple OSS Distributions }
781*4d495c6eSApple OSS Distributions
782*4d495c6eSApple OSS Distributions T_DECL(mte_mach_vm_read_32k,
783*4d495c6eSApple OSS Distributions "mach_vm_read 32k bytes of tagged memory",
784*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
785*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
786*4d495c6eSApple OSS Distributions T_META_ENABLED(__arm64__))
787*4d495c6eSApple OSS Distributions {
788*4d495c6eSApple OSS Distributions #if __arm64__
789*4d495c6eSApple OSS Distributions mte_mach_vm_read(KERNEL_BUFFER_COPY_THRESHOLD);
790*4d495c6eSApple OSS Distributions #endif /* __arm64__ */
791*4d495c6eSApple OSS Distributions }
792*4d495c6eSApple OSS Distributions
793*4d495c6eSApple OSS Distributions T_DECL(mte_mach_vm_read_over_32k,
794*4d495c6eSApple OSS Distributions "mach_vm_read 32k + 1 bytes of tagged memory",
795*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
796*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
797*4d495c6eSApple OSS Distributions T_META_ENABLED(__arm64__))
798*4d495c6eSApple OSS Distributions {
799*4d495c6eSApple OSS Distributions #if __arm64__
800*4d495c6eSApple OSS Distributions /* This will actually get rounded to 32K + 16 */
801*4d495c6eSApple OSS Distributions mte_mach_vm_read(KERNEL_BUFFER_COPY_THRESHOLD + 1);
802*4d495c6eSApple OSS Distributions #endif /* __arm64__ */
803*4d495c6eSApple OSS Distributions }
804*4d495c6eSApple OSS Distributions
805*4d495c6eSApple OSS Distributions T_DECL(mte_vm_map_copyinout_in_kernel,
806*4d495c6eSApple OSS Distributions "Test that the VM handles vm_map_copyin correctly for kernel-to-kernel tagged memory",
807*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
808*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
809*4d495c6eSApple OSS Distributions T_META_ASROOT(true),
810*4d495c6eSApple OSS Distributions T_META_ENABLED(__arm64__))
811*4d495c6eSApple OSS Distributions {
812*4d495c6eSApple OSS Distributions #if __arm64__
813*4d495c6eSApple OSS Distributions T_SKIP("This test is expected to panic; comment this line to be able to run it at desk.");
814*4d495c6eSApple OSS Distributions (void) run_sysctl_test("vm_map_copyio", 0);
815*4d495c6eSApple OSS Distributions #endif /* __arm64__ */
816*4d495c6eSApple OSS Distributions }
817*4d495c6eSApple OSS Distributions
818*4d495c6eSApple OSS Distributions #if __arm64__
819*4d495c6eSApple OSS Distributions static void
do_remap_test(bool own_memory)820*4d495c6eSApple OSS Distributions do_remap_test(bool own_memory)
821*4d495c6eSApple OSS Distributions {
822*4d495c6eSApple OSS Distributions mach_vm_address_t tagged_addr, untagged_addr;
823*4d495c6eSApple OSS Distributions mach_vm_size_t size = PAGE_SIZE;
824*4d495c6eSApple OSS Distributions
825*4d495c6eSApple OSS Distributions T_LOG("Allocate tagged memory");
826*4d495c6eSApple OSS Distributions tagged_addr = allocate_and_tag_range(size, TAG_RANDOM);
827*4d495c6eSApple OSS Distributions char *tagged_ptr = (char*) tagged_addr;
828*4d495c6eSApple OSS Distributions untagged_addr = tagged_addr & ~MTE_TAG_MASK;
829*4d495c6eSApple OSS Distributions
830*4d495c6eSApple OSS Distributions /* Write to the memory */
831*4d495c6eSApple OSS Distributions for (unsigned int i = 0; i < size; i++) {
832*4d495c6eSApple OSS Distributions tagged_ptr[i] = 'a';
833*4d495c6eSApple OSS Distributions }
834*4d495c6eSApple OSS Distributions
835*4d495c6eSApple OSS Distributions T_LOG("Wrote to memory");
836*4d495c6eSApple OSS Distributions
837*4d495c6eSApple OSS Distributions expect_normal_exit(^{
838*4d495c6eSApple OSS Distributions kern_return_t kr;
839*4d495c6eSApple OSS Distributions mach_port_t port;
840*4d495c6eSApple OSS Distributions if (own_memory) {
841*4d495c6eSApple OSS Distributions port = mach_task_self();
842*4d495c6eSApple OSS Distributions } else {
843*4d495c6eSApple OSS Distributions /* note: expect_normal_exit forks, so the parent has the allocation as well */
844*4d495c6eSApple OSS Distributions kr = task_for_pid(mach_task_self(), getppid(), &port);
845*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "task_for_pid");
846*4d495c6eSApple OSS Distributions }
847*4d495c6eSApple OSS Distributions
848*4d495c6eSApple OSS Distributions mach_vm_address_t remap_addr = 0;
849*4d495c6eSApple OSS Distributions vm_prot_t curprot = VM_PROT_WRITE | VM_PROT_READ;
850*4d495c6eSApple OSS Distributions vm_prot_t maxprot = VM_PROT_WRITE | VM_PROT_READ;
851*4d495c6eSApple OSS Distributions kr = mach_vm_remap_new(mach_task_self(), &remap_addr, size,
852*4d495c6eSApple OSS Distributions /* mask = */ 0, VM_FLAGS_ANYWHERE, port, untagged_addr,
853*4d495c6eSApple OSS Distributions /* copy = */ FALSE, &curprot, &maxprot, VM_INHERIT_DEFAULT);
854*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "successfully remapped tagged memory");
855*4d495c6eSApple OSS Distributions
856*4d495c6eSApple OSS Distributions T_ASSERT_EQ(remap_addr & MTE_TAG_MASK, 0ULL, "vm_remap returns an untagged pointer");
857*4d495c6eSApple OSS Distributions
858*4d495c6eSApple OSS Distributions char *untagged_remap_ptr = (char*) remap_addr;
859*4d495c6eSApple OSS Distributions char *tagged_remap_ptr = __arm_mte_get_tag(untagged_remap_ptr);
860*4d495c6eSApple OSS Distributions char *incorrectly_tagged_remap_ptr = __arm_mte_increment_tag(tagged_remap_ptr, 1);
861*4d495c6eSApple OSS Distributions
862*4d495c6eSApple OSS Distributions /* verify the data is correct; check every granule for speed */
863*4d495c6eSApple OSS Distributions for (unsigned int i = 0; i < size; i += MTE_GRANULE_SIZE) {
864*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_EQ(tagged_remap_ptr[i], 'a', "read value %u from array", i);
865*4d495c6eSApple OSS Distributions }
866*4d495c6eSApple OSS Distributions T_LOG("Verified data from child");
867*4d495c6eSApple OSS Distributions
868*4d495c6eSApple OSS Distributions /* make sure the new mapping is also tagged */
869*4d495c6eSApple OSS Distributions expect_sigkill(^{
870*4d495c6eSApple OSS Distributions *untagged_remap_ptr = 'b';
871*4d495c6eSApple OSS Distributions }, "remapped MTE memory sends SIGKILL when accessed with canonical tag");
872*4d495c6eSApple OSS Distributions expect_sigkill(^{
873*4d495c6eSApple OSS Distributions *incorrectly_tagged_remap_ptr = 'b';
874*4d495c6eSApple OSS Distributions }, "remapped MTE memory sends SIGKILL when accessed with incorrect tag");
875*4d495c6eSApple OSS Distributions expect_normal_exit(^{
876*4d495c6eSApple OSS Distributions *tagged_remap_ptr = 'b';
877*4d495c6eSApple OSS Distributions }, "remapped MTE memory can be accessed with correct tag");
878*4d495c6eSApple OSS Distributions
879*4d495c6eSApple OSS Distributions if (!own_memory) {
880*4d495c6eSApple OSS Distributions kr = mach_port_deallocate(mach_task_self(), port);
881*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "deallocate parent port");
882*4d495c6eSApple OSS Distributions }
883*4d495c6eSApple OSS Distributions kr = mach_vm_deallocate(mach_task_self(), remap_addr, size);
884*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "deallocate remapped memory");
885*4d495c6eSApple OSS Distributions kr = mach_vm_deallocate(mach_task_self(), untagged_addr, size);
886*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "deallocate original memory from child");
887*4d495c6eSApple OSS Distributions }, "remap tagged memory");
888*4d495c6eSApple OSS Distributions kern_return_t kr = mach_vm_deallocate(mach_task_self(), untagged_addr, size);
889*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "deallocate original memory");
890*4d495c6eSApple OSS Distributions }
891*4d495c6eSApple OSS Distributions
892*4d495c6eSApple OSS Distributions T_DECL(mte_vm_map_remap_self,
893*4d495c6eSApple OSS Distributions "mach_vm_remap_new() on a tagged memory of the same process",
894*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
895*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
896*4d495c6eSApple OSS Distributions T_META_ENABLED(__arm64__))
897*4d495c6eSApple OSS Distributions {
898*4d495c6eSApple OSS Distributions do_remap_test(true);
899*4d495c6eSApple OSS Distributions }
900*4d495c6eSApple OSS Distributions
901*4d495c6eSApple OSS Distributions T_DECL(mte_vm_map_remap_other,
902*4d495c6eSApple OSS Distributions "mach_vm_remap_new() on a tagged memory of a different process",
903*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
904*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
905*4d495c6eSApple OSS Distributions T_META_ENABLED(__arm64__))
906*4d495c6eSApple OSS Distributions {
907*4d495c6eSApple OSS Distributions do_remap_test(false);
908*4d495c6eSApple OSS Distributions }
909*4d495c6eSApple OSS Distributions
910*4d495c6eSApple OSS Distributions #endif /* __arm64__ */
911*4d495c6eSApple OSS Distributions
912*4d495c6eSApple OSS Distributions T_DECL(vm_allocate_zero_tags,
913*4d495c6eSApple OSS Distributions "Ensure tags are zeroed when tagged memory is allocated from userspace",
914*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
915*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
916*4d495c6eSApple OSS Distributions {
917*4d495c6eSApple OSS Distributions #if !__arm64__
918*4d495c6eSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
919*4d495c6eSApple OSS Distributions #else /* !__arm64__ */
920*4d495c6eSApple OSS Distributions /*
921*4d495c6eSApple OSS Distributions * Do a bunch of allocations and check that the returned tags are zeroed.
922*4d495c6eSApple OSS Distributions * We do NUM_ALLOCATIONS_PER_ITERATION allocations, check the tags,
923*4d495c6eSApple OSS Distributions * deallocate them, and then do it again for a total of NUM_ITERATIONS
924*4d495c6eSApple OSS Distributions * iterations.
925*4d495c6eSApple OSS Distributions * NUM_ALLOCATIONS_PER_ITERATION is equal to the array bound.
926*4d495c6eSApple OSS Distributions */
927*4d495c6eSApple OSS Distributions vm_address_t addresses[1000];
928*4d495c6eSApple OSS Distributions const unsigned int NUM_ALLOCATIONS_PER_ITERATION = sizeof(addresses) / sizeof(addresses[0]);
929*4d495c6eSApple OSS Distributions const unsigned int NUM_ITERATIONS = 3;
930*4d495c6eSApple OSS Distributions
931*4d495c6eSApple OSS Distributions kern_return_t kr;
932*4d495c6eSApple OSS Distributions for (size_t i = 0; i < NUM_ITERATIONS; i++) {
933*4d495c6eSApple OSS Distributions unsigned int failures = 0;
934*4d495c6eSApple OSS Distributions for (size_t j = 0; j < NUM_ALLOCATIONS_PER_ITERATION; j++) {
935*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), &addresses[j], MTE_GRANULE_SIZE, VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
936*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "allocate tagged memory (%zu, %zu)", i, j);
937*4d495c6eSApple OSS Distributions
938*4d495c6eSApple OSS Distributions /*
939*4d495c6eSApple OSS Distributions * This is the actual test - we get the correctly tagged pointer and
940*4d495c6eSApple OSS Distributions * verify that it is zero.
941*4d495c6eSApple OSS Distributions */
942*4d495c6eSApple OSS Distributions char *tagged_ptr = __arm_mte_get_tag((char*) addresses[j]);
943*4d495c6eSApple OSS Distributions unsigned int orig_tag = extract_mte_tag(tagged_ptr);
944*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_EQ(orig_tag, 0, "vm_allocate returns memory with zeroed tags (%zu, %zu)", i, j);
945*4d495c6eSApple OSS Distributions failures += (orig_tag != 0);
946*4d495c6eSApple OSS Distributions
947*4d495c6eSApple OSS Distributions /* Assign an arbitrary nonzero tag and commit it to memory */
948*4d495c6eSApple OSS Distributions tagged_ptr = __arm_mte_create_random_tag(tagged_ptr, 1);
949*4d495c6eSApple OSS Distributions __arm_mte_set_tag(tagged_ptr);
950*4d495c6eSApple OSS Distributions
951*4d495c6eSApple OSS Distributions /* Fail early if a zero tag was somehow assigned */
952*4d495c6eSApple OSS Distributions unsigned int new_tag = extract_mte_tag(tagged_ptr);
953*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_NE(new_tag, 0, "random tag is nonzero (%zu, %zu)", i, j);
954*4d495c6eSApple OSS Distributions }
955*4d495c6eSApple OSS Distributions
956*4d495c6eSApple OSS Distributions for (size_t j = 0; j < NUM_ALLOCATIONS_PER_ITERATION; j++) {
957*4d495c6eSApple OSS Distributions kr = vm_deallocate(mach_task_self(), addresses[j], MTE_GRANULE_SIZE);
958*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "deallocate tagged memory (%zu, %zu)", i, j);
959*4d495c6eSApple OSS Distributions }
960*4d495c6eSApple OSS Distributions /* Aggregate results per iteration to avoid too much noise */
961*4d495c6eSApple OSS Distributions T_EXPECT_EQ(failures, 0, "Iteration %zu success", i);
962*4d495c6eSApple OSS Distributions }
963*4d495c6eSApple OSS Distributions #endif /* !__arm64__ */
964*4d495c6eSApple OSS Distributions }
965*4d495c6eSApple OSS Distributions
966*4d495c6eSApple OSS Distributions /*
967*4d495c6eSApple OSS Distributions * Policy (MTE_VMSEC_13): VM performed range-checks must be done with
968*4d495c6eSApple OSS Distributions * canonicalized pointers, regardless of whether MTE is enabled
969*4d495c6eSApple OSS Distributions *
970*4d495c6eSApple OSS Distributions * Note that this specifically tests vm_map_copyin, vm_map_copy_overwrite,
971*4d495c6eSApple OSS Distributions * since those kernel functions are intended to take tagged pointers.
972*4d495c6eSApple OSS Distributions */
973*4d495c6eSApple OSS Distributions T_DECL(mte_copy_range_checks,
974*4d495c6eSApple OSS Distributions "Test that VM range checks operate on canonicalized pointers",
975*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
976*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
977*4d495c6eSApple OSS Distributions {
978*4d495c6eSApple OSS Distributions #if !__arm64__
979*4d495c6eSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
980*4d495c6eSApple OSS Distributions #else /* !__arm64__ */
981*4d495c6eSApple OSS Distributions vm_address_t tagged_addr, incorrectly_tagged_addr;
982*4d495c6eSApple OSS Distributions /*
983*4d495c6eSApple OSS Distributions * Test setup
984*4d495c6eSApple OSS Distributions */
985*4d495c6eSApple OSS Distributions const mach_vm_size_t alloc_size = PAGE_SIZE;
986*4d495c6eSApple OSS Distributions tagged_addr = allocate_and_tag_range(alloc_size, 1);
987*4d495c6eSApple OSS Distributions incorrectly_tagged_addr = (tagged_addr & ~MTE_TAG_MASK) | (2LLU << MTE_TAG_SHIFT);
988*4d495c6eSApple OSS Distributions
989*4d495c6eSApple OSS Distributions /*
990*4d495c6eSApple OSS Distributions * mach_vm_copyin test:
991*4d495c6eSApple OSS Distributions * If mach_vm_copyin canonicalizes the tagged pointer for its range checks
992*4d495c6eSApple OSS Distributions * like it should, the range check will succeed and the actual "copy-in"
993*4d495c6eSApple OSS Distributions * operation will be allowed to go through. This will result in a tag check
994*4d495c6eSApple OSS Distributions * fault and the process being killed since the tag is incorrect.
995*4d495c6eSApple OSS Distributions *
996*4d495c6eSApple OSS Distributions * If, erroneously, the range check is done on tagged pointers, we expect
997*4d495c6eSApple OSS Distributions * to see a failure since the "incorrect" tag is larger than the "correct"
998*4d495c6eSApple OSS Distributions * one so it would be treated as out-of-bounds for the map.
999*4d495c6eSApple OSS Distributions */
1000*4d495c6eSApple OSS Distributions
1001*4d495c6eSApple OSS Distributions expect_sigkill(^{
1002*4d495c6eSApple OSS Distributions pointer_t read_address;
1003*4d495c6eSApple OSS Distributions mach_msg_type_number_t read_size;
1004*4d495c6eSApple OSS Distributions kern_return_t kr = mach_vm_read(mach_task_self(), incorrectly_tagged_addr,
1005*4d495c6eSApple OSS Distributions alloc_size, &read_address, &read_size);
1006*4d495c6eSApple OSS Distributions T_LOG("SIGKILL not received, kr was %d", kr);
1007*4d495c6eSApple OSS Distributions }, "mach_vm_read with incorrectly tagged pointer should cause a tag check fault");
1008*4d495c6eSApple OSS Distributions
1009*4d495c6eSApple OSS Distributions /*
1010*4d495c6eSApple OSS Distributions * mach_vm_copy_overwrite test:
1011*4d495c6eSApple OSS Distributions * Essentially the same logic using mach_vm_write instead of mach_vm_read.
1012*4d495c6eSApple OSS Distributions * To be able to do a vm_map_write, we need to first set up a vm_map_copy_t,
1013*4d495c6eSApple OSS Distributions * which we can get from a correctly-executed vm_map_read.
1014*4d495c6eSApple OSS Distributions */
1015*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1016*4d495c6eSApple OSS Distributions pointer_t copy_address;
1017*4d495c6eSApple OSS Distributions mach_msg_type_number_t copy_size;
1018*4d495c6eSApple OSS Distributions kern_return_t kr = mach_vm_read(mach_task_self(), tagged_addr,
1019*4d495c6eSApple OSS Distributions alloc_size, ©_address, ©_size);
1020*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "set up vm_map_copy_t for mach_vm_write test");
1021*4d495c6eSApple OSS Distributions T_SETUPEND;
1022*4d495c6eSApple OSS Distributions expect_sigkill(^{
1023*4d495c6eSApple OSS Distributions kern_return_t kr2 = mach_vm_write(mach_task_self(), incorrectly_tagged_addr,
1024*4d495c6eSApple OSS Distributions copy_address, copy_size);
1025*4d495c6eSApple OSS Distributions T_LOG("SIGKILL not received, kr was %d", kr2);
1026*4d495c6eSApple OSS Distributions }, "mach_vm_write with incorrectly tagged pointer should cause a tag check fault");
1027*4d495c6eSApple OSS Distributions #endif /* !__arm64__ */
1028*4d495c6eSApple OSS Distributions }
1029*4d495c6eSApple OSS Distributions
1030*4d495c6eSApple OSS Distributions /*
1031*4d495c6eSApple OSS Distributions * Policy (MTE_VMSEC_14): VM performed range math must be done using canonical
1032*4d495c6eSApple OSS Distributions * pointers, regardless of whether MTE is enabled.
1033*4d495c6eSApple OSS Distributions *
1034*4d495c6eSApple OSS Distributions * Note that this specifically tests vm_map_copyin, vm_map_copy_overwrite,
1035*4d495c6eSApple OSS Distributions * since those kernel functions are intended to take tagged pointers.
1036*4d495c6eSApple OSS Distributions */
1037*4d495c6eSApple OSS Distributions T_DECL(mte_copy_range_math,
1038*4d495c6eSApple OSS Distributions "Test that pointer values are not canonicalized after range math",
1039*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1040*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
1041*4d495c6eSApple OSS Distributions {
1042*4d495c6eSApple OSS Distributions #if !__arm64__
1043*4d495c6eSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
1044*4d495c6eSApple OSS Distributions #else /* !__arm64__ */
1045*4d495c6eSApple OSS Distributions vm_address_t tagged_addr;
1046*4d495c6eSApple OSS Distributions kern_return_t kr;
1047*4d495c6eSApple OSS Distributions
1048*4d495c6eSApple OSS Distributions /*
1049*4d495c6eSApple OSS Distributions * Test setup
1050*4d495c6eSApple OSS Distributions */
1051*4d495c6eSApple OSS Distributions const mach_vm_size_t alloc_size = MTE_GRANULE_SIZE;
1052*4d495c6eSApple OSS Distributions tagged_addr = allocate_and_tag_range(alloc_size, TAG_RANDOM);
1053*4d495c6eSApple OSS Distributions
1054*4d495c6eSApple OSS Distributions vm_offset_t read_address;
1055*4d495c6eSApple OSS Distributions mach_msg_type_number_t read_size;
1056*4d495c6eSApple OSS Distributions mach_vm_size_t malformed_size;
1057*4d495c6eSApple OSS Distributions
1058*4d495c6eSApple OSS Distributions /*
1059*4d495c6eSApple OSS Distributions * A size which extends into the MTE tag bits is too large to fit in
1060*4d495c6eSApple OSS Distributions * memory and should be rejected. If range math is operating on tagged
1061*4d495c6eSApple OSS Distributions * pointers (and the tag bits get stripped later), then this would
1062*4d495c6eSApple OSS Distributions * be accepted.
1063*4d495c6eSApple OSS Distributions */
1064*4d495c6eSApple OSS Distributions // Test vm_map_copyin using mach_vm_read
1065*4d495c6eSApple OSS Distributions malformed_size = (mach_vm_size_t) alloc_size | (7LLU << MTE_TAG_SHIFT);
1066*4d495c6eSApple OSS Distributions kr = mach_vm_read(mach_task_self(), tagged_addr, malformed_size,
1067*4d495c6eSApple OSS Distributions &read_address, &read_size);
1068*4d495c6eSApple OSS Distributions T_EXPECT_MACH_ERROR_(kr, KERN_INVALID_ARGUMENT, "mach_vm_read should reject size which extends into tag bits");
1069*4d495c6eSApple OSS Distributions
1070*4d495c6eSApple OSS Distributions /*
1071*4d495c6eSApple OSS Distributions * Cannot test vm_map_copy_overwrite from userspace. The only entry point
1072*4d495c6eSApple OSS Distributions * that hits this function without first hitting mach_vm_read is
1073*4d495c6eSApple OSS Distributions * mach_vm_write, which takes its size as a 32-bit mach_msg_type_number_t.
1074*4d495c6eSApple OSS Distributions */
1075*4d495c6eSApple OSS Distributions #endif /* !__arm64__ */
1076*4d495c6eSApple OSS Distributions }
1077*4d495c6eSApple OSS Distributions
1078*4d495c6eSApple OSS Distributions /*
1079*4d495c6eSApple OSS Distributions * Policy (MTE_VMSEC_16): if the parameter/target of a VM API is a range of
1080*4d495c6eSApple OSS Distributions * memory, VM APIs must ensure that the address is not tagged
1081*4d495c6eSApple OSS Distributions *
1082*4d495c6eSApple OSS Distributions * Corollary: to ease adoption in cases in which pointers obtained from
1083*4d495c6eSApple OSS Distributions * the memory allocator are directly passed to some of these functions,
1084*4d495c6eSApple OSS Distributions * we implement stripping at the kernel API entrypoint for APIs that do
1085*4d495c6eSApple OSS Distributions * not affect the VM state or that are safe and common enough to strip.
1086*4d495c6eSApple OSS Distributions * This helps also clearing/making deterministic
1087*4d495c6eSApple OSS Distributions * cases where addresses were passed along the VM subsystem just waiting
1088*4d495c6eSApple OSS Distributions * to eventually be rejected.
1089*4d495c6eSApple OSS Distributions *
1090*4d495c6eSApple OSS Distributions * note: this does not apply to APIs which lead to vm_map_copy{in,out}, since
1091*4d495c6eSApple OSS Distributions * these need tags to be able to read tagged memory.
1092*4d495c6eSApple OSS Distributions */
1093*4d495c6eSApple OSS Distributions T_DECL(mte_vm_reject_tagged_pointers,
1094*4d495c6eSApple OSS Distributions "Test that most VM APIs reject tagged pointers",
1095*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1096*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
1097*4d495c6eSApple OSS Distributions T_META_ASROOT(true) /* to be able to get host_priv port for mach_vm_wire */)
1098*4d495c6eSApple OSS Distributions {
1099*4d495c6eSApple OSS Distributions #if !__arm64__
1100*4d495c6eSApple OSS Distributions T_SKIP("Running on non-arm64 target, skipping...");
1101*4d495c6eSApple OSS Distributions #else /* !__arm64__ */
1102*4d495c6eSApple OSS Distributions vm_address_t untagged_addr, tagged_addr, tagged_addr_mprotect;
1103*4d495c6eSApple OSS Distributions void *untagged_ptr, *tagged_ptr, *tagged_ptr_mprotect;
1104*4d495c6eSApple OSS Distributions kern_return_t kr;
1105*4d495c6eSApple OSS Distributions int ret;
1106*4d495c6eSApple OSS Distributions
1107*4d495c6eSApple OSS Distributions /*
1108*4d495c6eSApple OSS Distributions * Test setup
1109*4d495c6eSApple OSS Distributions */
1110*4d495c6eSApple OSS Distributions const size_t alloc_size = PAGE_SIZE;
1111*4d495c6eSApple OSS Distributions tagged_addr = allocate_and_tag_range(alloc_size, TAG_RANDOM);
1112*4d495c6eSApple OSS Distributions tagged_addr_mprotect = allocate_and_tag_range(alloc_size, TAG_RANDOM);
1113*4d495c6eSApple OSS Distributions untagged_addr = tagged_addr & ~MTE_TAG_MASK;
1114*4d495c6eSApple OSS Distributions untagged_ptr = (void*) untagged_addr;
1115*4d495c6eSApple OSS Distributions tagged_ptr = (void*) tagged_addr;
1116*4d495c6eSApple OSS Distributions tagged_ptr_mprotect = (void *)tagged_addr_mprotect;
1117*4d495c6eSApple OSS Distributions
1118*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_NE(tagged_addr & MTE_TAG_MASK, 0ULL, "validate tagged_addr");
1119*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_EQ(untagged_addr & MTE_TAG_MASK, 0ULL, "validate untagged_addr");
1120*4d495c6eSApple OSS Distributions
1121*4d495c6eSApple OSS Distributions __block struct vm_region_submap_info_64 region_info;
1122*4d495c6eSApple OSS Distributions void (^get_region_info)(void) = ^{
1123*4d495c6eSApple OSS Distributions vm_address_t address = untagged_addr;
1124*4d495c6eSApple OSS Distributions unsigned int depth = 1;
1125*4d495c6eSApple OSS Distributions vm_size_t size;
1126*4d495c6eSApple OSS Distributions mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
1127*4d495c6eSApple OSS Distributions kern_return_t region_kr = vm_region_recurse_64(mach_task_self(), &address, &size,
1128*4d495c6eSApple OSS Distributions &depth, (vm_region_info_t) ®ion_info, &count);
1129*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(region_kr, "get allocation region info");
1130*4d495c6eSApple OSS Distributions };
1131*4d495c6eSApple OSS Distributions
1132*4d495c6eSApple OSS Distributions /*
1133*4d495c6eSApple OSS Distributions * Test various APIs with tagged pointers
1134*4d495c6eSApple OSS Distributions */
1135*4d495c6eSApple OSS Distributions /* mprotect, mach_vm_protect are common enough, we strip implicitly. */
1136*4d495c6eSApple OSS Distributions ret = mprotect(tagged_ptr_mprotect, alloc_size, PROT_NONE);
1137*4d495c6eSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(ret, "mprotect");
1138*4d495c6eSApple OSS Distributions kr = mach_vm_protect(mach_task_self(), tagged_addr_mprotect, alloc_size, false, PROT_NONE);
1139*4d495c6eSApple OSS Distributions T_EXPECT_MACH_SUCCESS(kr, "mach_vm_protect");
1140*4d495c6eSApple OSS Distributions
1141*4d495c6eSApple OSS Distributions /*
1142*4d495c6eSApple OSS Distributions * mincore: SUCCESS
1143*4d495c6eSApple OSS Distributions */
1144*4d495c6eSApple OSS Distributions char vec[100] = {0};
1145*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_LE(alloc_size, sizeof(vec) * PAGE_SIZE, "vec is large enough to fit mincore result");
1146*4d495c6eSApple OSS Distributions ret = mincore(tagged_ptr, alloc_size, vec);
1147*4d495c6eSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(ret, "mincore: return value");
1148*4d495c6eSApple OSS Distributions
1149*4d495c6eSApple OSS Distributions /* msync, mach_vm_msync */
1150*4d495c6eSApple OSS Distributions ret = msync(tagged_ptr, alloc_size, MS_SYNC);
1151*4d495c6eSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(ret, "msync");
1152*4d495c6eSApple OSS Distributions kr = mach_vm_msync(mach_task_self(), tagged_addr, alloc_size, VM_SYNC_SYNCHRONOUS | VM_SYNC_CONTIGUOUS);
1153*4d495c6eSApple OSS Distributions T_EXPECT_MACH_SUCCESS(kr, "mach_vm_msync");
1154*4d495c6eSApple OSS Distributions
1155*4d495c6eSApple OSS Distributions /* madvise, mach_vm_behavior_set strip tagged addresses */
1156*4d495c6eSApple OSS Distributions ret = madvise(tagged_ptr, alloc_size, MADV_NORMAL);
1157*4d495c6eSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(ret, "madvise");
1158*4d495c6eSApple OSS Distributions kr = mach_vm_behavior_set(mach_task_self(), tagged_addr, alloc_size,
1159*4d495c6eSApple OSS Distributions VM_BEHAVIOR_DEFAULT);
1160*4d495c6eSApple OSS Distributions T_EXPECT_MACH_SUCCESS(kr, "mach_vm_behavior_set");
1161*4d495c6eSApple OSS Distributions
1162*4d495c6eSApple OSS Distributions /*
1163*4d495c6eSApple OSS Distributions * minherit, mach_vm_inherit:
1164*4d495c6eSApple OSS Distributions * mach_vm_inherit would just silently succeed and do nothing if the range was tagged, so
1165*4d495c6eSApple OSS Distributions * we strip addresses to have consistent behavior.
1166*4d495c6eSApple OSS Distributions */
1167*4d495c6eSApple OSS Distributions const vm_inherit_t NEW_INHERIT = VM_INHERIT_NONE;
1168*4d495c6eSApple OSS Distributions ret = minherit(tagged_ptr, alloc_size, NEW_INHERIT);
1169*4d495c6eSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(ret, "minherit");
1170*4d495c6eSApple OSS Distributions kr = mach_vm_inherit(mach_task_self(), tagged_addr, alloc_size, NEW_INHERIT);
1171*4d495c6eSApple OSS Distributions T_EXPECT_MACH_SUCCESS(kr, "mach_vm_inherit");
1172*4d495c6eSApple OSS Distributions
1173*4d495c6eSApple OSS Distributions /*
1174*4d495c6eSApple OSS Distributions * mlock, mach_vm_wire(prot != VM_PROT_NONE):
1175*4d495c6eSApple OSS Distributions * Allow implicitly stripping to avoid no-op success that might confuse third parties.
1176*4d495c6eSApple OSS Distributions */
1177*4d495c6eSApple OSS Distributions mach_port_t host_priv = HOST_PRIV_NULL;
1178*4d495c6eSApple OSS Distributions kr = host_get_host_priv_port(mach_host_self(), &host_priv); \
1179*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "get host_priv port");
1180*4d495c6eSApple OSS Distributions
1181*4d495c6eSApple OSS Distributions ret = mlock(tagged_ptr, alloc_size);
1182*4d495c6eSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(ret, "mlock");
1183*4d495c6eSApple OSS Distributions get_region_info();
1184*4d495c6eSApple OSS Distributions T_EXPECT_EQ(region_info.user_wired_count, (unsigned short) 1, "mlock on tagged pointer should wire memory");
1185*4d495c6eSApple OSS Distributions ret = munlock(tagged_ptr, alloc_size);
1186*4d495c6eSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(ret, "munlock");
1187*4d495c6eSApple OSS Distributions get_region_info();
1188*4d495c6eSApple OSS Distributions T_EXPECT_EQ(region_info.user_wired_count, (unsigned short) 0, "munlock on tagged pointer should unwire memory");
1189*4d495c6eSApple OSS Distributions
1190*4d495c6eSApple OSS Distributions kr = mach_vm_wire(host_priv, mach_task_self(), tagged_addr,
1191*4d495c6eSApple OSS Distributions alloc_size, VM_PROT_DEFAULT);
1192*4d495c6eSApple OSS Distributions T_EXPECT_MACH_SUCCESS(kr, "mach_vm_wire (wire)");
1193*4d495c6eSApple OSS Distributions get_region_info();
1194*4d495c6eSApple OSS Distributions T_EXPECT_EQ(region_info.user_wired_count, (unsigned short) 1, "mach_vm_wire on tagged address should wire memory");
1195*4d495c6eSApple OSS Distributions ret = munlock(tagged_ptr, alloc_size);
1196*4d495c6eSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(ret, "munlock");
1197*4d495c6eSApple OSS Distributions
1198*4d495c6eSApple OSS Distributions /* List of flags used to test vm_allocate, vm_map and vm_remap */
1199*4d495c6eSApple OSS Distributions const int ALLOCATE_FLAGS[] = {
1200*4d495c6eSApple OSS Distributions VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
1201*4d495c6eSApple OSS Distributions VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_FLAGS_MTE,
1202*4d495c6eSApple OSS Distributions VM_FLAGS_ANYWHERE,
1203*4d495c6eSApple OSS Distributions VM_FLAGS_ANYWHERE | VM_FLAGS_MTE
1204*4d495c6eSApple OSS Distributions };
1205*4d495c6eSApple OSS Distributions const size_t NUM_ALLOCATE_FLAGS = sizeof(ALLOCATE_FLAGS) / sizeof(*ALLOCATE_FLAGS);
1206*4d495c6eSApple OSS Distributions
1207*4d495c6eSApple OSS Distributions /* vm_allocate tests: */
1208*4d495c6eSApple OSS Distributions for (size_t i = 0; i < NUM_ALLOCATE_FLAGS; i++) {
1209*4d495c6eSApple OSS Distributions mach_vm_address_t new_addr = tagged_addr;
1210*4d495c6eSApple OSS Distributions kr = mach_vm_allocate(mach_task_self(), &new_addr, alloc_size, ALLOCATE_FLAGS[i]);
1211*4d495c6eSApple OSS Distributions if (ALLOCATE_FLAGS[i] & VM_FLAGS_ANYWHERE) {
1212*4d495c6eSApple OSS Distributions T_EXPECT_MACH_SUCCESS(kr, "mach_vm_allocate %zu (%#x)", i, ALLOCATE_FLAGS[i]);
1213*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_EQ(new_addr & MTE_TAG_MASK, 0ull, "mach_vm_allocate should return untagged pointer");
1214*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_NE((vm_address_t) new_addr, untagged_addr, "allocate anywhere should return a new range");
1215*4d495c6eSApple OSS Distributions
1216*4d495c6eSApple OSS Distributions /* clean up new allocation */
1217*4d495c6eSApple OSS Distributions if (kr == KERN_SUCCESS) {
1218*4d495c6eSApple OSS Distributions kr = mach_vm_deallocate(mach_task_self(), new_addr, alloc_size);
1219*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "cleanup mach_vm_map");
1220*4d495c6eSApple OSS Distributions }
1221*4d495c6eSApple OSS Distributions } else {
1222*4d495c6eSApple OSS Distributions T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "mach_vm_allocate %zu (%#x)", i, ALLOCATE_FLAGS[i]);
1223*4d495c6eSApple OSS Distributions }
1224*4d495c6eSApple OSS Distributions }
1225*4d495c6eSApple OSS Distributions
1226*4d495c6eSApple OSS Distributions /* mach_vm_machine_attribute: allow tagged addresses */
1227*4d495c6eSApple OSS Distributions vm_machine_attribute_val_t machine_attribute_val = MATTR_VAL_CACHE_FLUSH;
1228*4d495c6eSApple OSS Distributions kr = mach_vm_machine_attribute(mach_task_self(), tagged_addr, alloc_size,
1229*4d495c6eSApple OSS Distributions MATTR_CACHE, &machine_attribute_val);
1230*4d495c6eSApple OSS Distributions T_EXPECT_MACH_SUCCESS(kr, "mach_vm_machine_attribute");
1231*4d495c6eSApple OSS Distributions
1232*4d495c6eSApple OSS Distributions /* mach_make_memory_entry_64: DO NOT allow tagged addresses */
1233*4d495c6eSApple OSS Distributions mach_port_t object_handle;
1234*4d495c6eSApple OSS Distributions memory_object_size_t object_size = alloc_size;
1235*4d495c6eSApple OSS Distributions kr = mach_make_memory_entry_64(mach_task_self(), &object_size, tagged_addr,
1236*4d495c6eSApple OSS Distributions VM_PROT_DEFAULT, &object_handle, MACH_PORT_NULL);
1237*4d495c6eSApple OSS Distributions T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "mach_make_memory_entry_64");
1238*4d495c6eSApple OSS Distributions
1239*4d495c6eSApple OSS Distributions /* mach_vm_map: DO NOT allow tagged addresses */
1240*4d495c6eSApple OSS Distributions /* setup: get a memory entry to map in */
1241*4d495c6eSApple OSS Distributions kr = mach_make_memory_entry_64(mach_task_self(), &object_size, untagged_addr,
1242*4d495c6eSApple OSS Distributions VM_PROT_DEFAULT | MAP_MEM_NAMED_CREATE, &object_handle, MACH_PORT_NULL);
1243*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "create memory entry for mach_vm_map");
1244*4d495c6eSApple OSS Distributions
1245*4d495c6eSApple OSS Distributions for (size_t i = 0; i < NUM_ALLOCATE_FLAGS; i++) {
1246*4d495c6eSApple OSS Distributions mach_vm_address_t new_addr = tagged_addr;
1247*4d495c6eSApple OSS Distributions kr = mach_vm_map(mach_task_self(), &new_addr, alloc_size, /* mask = */ 0,
1248*4d495c6eSApple OSS Distributions ALLOCATE_FLAGS[i], object_handle, /* offset = */ 0, /* copy = */ true,
1249*4d495c6eSApple OSS Distributions VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
1250*4d495c6eSApple OSS Distributions if (ALLOCATE_FLAGS[i] & VM_FLAGS_ANYWHERE) {
1251*4d495c6eSApple OSS Distributions /*
1252*4d495c6eSApple OSS Distributions * VM_FLAGS_ANYWHERE uses the provided address as a location to start
1253*4d495c6eSApple OSS Distributions * searching from. Since a tagged address is outside the map bounds,
1254*4d495c6eSApple OSS Distributions * it won't be able to find any space for the allocation.
1255*4d495c6eSApple OSS Distributions */
1256*4d495c6eSApple OSS Distributions T_EXPECT_MACH_ERROR(kr, KERN_NO_SPACE, "mach_vm_map %zu (%#x)", i, ALLOCATE_FLAGS[i]);
1257*4d495c6eSApple OSS Distributions } else {
1258*4d495c6eSApple OSS Distributions T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "mach_vm_map %zu (%#x)", i, ALLOCATE_FLAGS[i]);
1259*4d495c6eSApple OSS Distributions }
1260*4d495c6eSApple OSS Distributions }
1261*4d495c6eSApple OSS Distributions
1262*4d495c6eSApple OSS Distributions /* clean up memory entry object handle */
1263*4d495c6eSApple OSS Distributions kr = mach_port_deallocate(mach_task_self(), object_handle);
1264*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_map tests: clean up memory entry object handle");
1265*4d495c6eSApple OSS Distributions
1266*4d495c6eSApple OSS Distributions /* mach_vm_purgable_control */
1267*4d495c6eSApple OSS Distributions int purgable_state;
1268*4d495c6eSApple OSS Distributions kr = mach_vm_purgable_control(mach_task_self(), tagged_addr, VM_PURGABLE_GET_STATE, &purgable_state);
1269*4d495c6eSApple OSS Distributions T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "mach_vm_purgable_control");
1270*4d495c6eSApple OSS Distributions
1271*4d495c6eSApple OSS Distributions /* mach_vm_region: reject tagged addresses */
1272*4d495c6eSApple OSS Distributions mach_vm_address_t region_addr = tagged_addr;
1273*4d495c6eSApple OSS Distributions mach_vm_size_t region_size;
1274*4d495c6eSApple OSS Distributions vm_region_basic_info_data_64_t region_info_64;
1275*4d495c6eSApple OSS Distributions mach_msg_type_number_t region_info_cnt = VM_REGION_BASIC_INFO_COUNT_64;
1276*4d495c6eSApple OSS Distributions mach_port_t unused;
1277*4d495c6eSApple OSS Distributions
1278*4d495c6eSApple OSS Distributions kr = mach_vm_region(mach_task_self(), ®ion_addr, ®ion_size,
1279*4d495c6eSApple OSS Distributions VM_REGION_BASIC_INFO_64, (vm_region_info_t) ®ion_info_64,
1280*4d495c6eSApple OSS Distributions ®ion_info_cnt, &unused);
1281*4d495c6eSApple OSS Distributions T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "mach_vm_region");
1282*4d495c6eSApple OSS Distributions
1283*4d495c6eSApple OSS Distributions /* mach_vm_remap_new */
1284*4d495c6eSApple OSS Distributions mach_vm_address_t untagged_addr2, tagged_addr2;
1285*4d495c6eSApple OSS Distributions tagged_addr2 = allocate_and_tag_range(alloc_size, TAG_RANDOM);
1286*4d495c6eSApple OSS Distributions untagged_addr2 = tagged_addr2 & ~MTE_TAG_MASK;
1287*4d495c6eSApple OSS Distributions
1288*4d495c6eSApple OSS Distributions /* Test each flag value twice, once with source tagged and once with destination tagged */
1289*4d495c6eSApple OSS Distributions for (size_t i = 0; i < 2 * NUM_ALLOCATE_FLAGS; i++) {
1290*4d495c6eSApple OSS Distributions int flags = ALLOCATE_FLAGS[i % NUM_ALLOCATE_FLAGS];
1291*4d495c6eSApple OSS Distributions bool source_tagged = i < NUM_ALLOCATE_FLAGS;
1292*4d495c6eSApple OSS Distributions char *msg = source_tagged ? "source tagged" : "dest tagged";
1293*4d495c6eSApple OSS Distributions mach_vm_address_t src_addr = source_tagged ? tagged_addr : untagged_addr;
1294*4d495c6eSApple OSS Distributions mach_vm_address_t dest_addr = source_tagged ? untagged_addr2 : tagged_addr2;
1295*4d495c6eSApple OSS Distributions
1296*4d495c6eSApple OSS Distributions vm_prot_t cur_prot = VM_PROT_DEFAULT, max_prot = VM_PROT_DEFAULT;
1297*4d495c6eSApple OSS Distributions kr = mach_vm_remap_new(mach_task_self(), &dest_addr, alloc_size, /* mask = */ 0,
1298*4d495c6eSApple OSS Distributions flags, mach_task_self(), src_addr, true, &cur_prot, &max_prot,
1299*4d495c6eSApple OSS Distributions VM_INHERIT_DEFAULT);
1300*4d495c6eSApple OSS Distributions
1301*4d495c6eSApple OSS Distributions if (flags & VM_FLAGS_MTE) {
1302*4d495c6eSApple OSS Distributions /* VM_FLAGS_USER_REMAP does not include VM_FLAGS_MTE */
1303*4d495c6eSApple OSS Distributions T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT, "mach_vm_remap_new %zu (%s, %#x)", i, msg, flags);
1304*4d495c6eSApple OSS Distributions } else if (!source_tagged && flags & VM_FLAGS_ANYWHERE) {
1305*4d495c6eSApple OSS Distributions /*
1306*4d495c6eSApple OSS Distributions * In this case, we pass vm_map_remap_extract since the source
1307*4d495c6eSApple OSS Distributions * address is untagged. When we try to find a space to insert it
1308*4d495c6eSApple OSS Distributions * into the map, we fail since VM_FLAGS_ANYWHERE uses the destination
1309*4d495c6eSApple OSS Distributions * passed in as a location to start searching from.
1310*4d495c6eSApple OSS Distributions */
1311*4d495c6eSApple OSS Distributions T_EXPECT_MACH_ERROR(kr, KERN_NO_SPACE, "mach_vm_remap_new %zu (%s, %#x)", i, msg, flags);
1312*4d495c6eSApple OSS Distributions } else {
1313*4d495c6eSApple OSS Distributions T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "mach_vm_remap_new %zu (%s, %#x)", i, msg, flags);
1314*4d495c6eSApple OSS Distributions }
1315*4d495c6eSApple OSS Distributions
1316*4d495c6eSApple OSS Distributions if (kr == KERN_SUCCESS && (flags & VM_FLAGS_ANYWHERE)) {
1317*4d495c6eSApple OSS Distributions /* clean up the new allocation if we mistakenly suceeded */
1318*4d495c6eSApple OSS Distributions kr = mach_vm_deallocate(mach_task_self(), dest_addr, alloc_size);
1319*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "cleanup mach_vm_remap_new %zu (%s, %#x)", i, msg, flags);
1320*4d495c6eSApple OSS Distributions }
1321*4d495c6eSApple OSS Distributions }
1322*4d495c6eSApple OSS Distributions
1323*4d495c6eSApple OSS Distributions /* clean up our second allocation */
1324*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1325*4d495c6eSApple OSS Distributions kr = vm_deallocate(mach_task_self(), untagged_addr2, alloc_size);
1326*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "clean up allocation for mach_vm_remap_new tests");
1327*4d495c6eSApple OSS Distributions T_SETUPEND;
1328*4d495c6eSApple OSS Distributions
1329*4d495c6eSApple OSS Distributions /* vm_deallocate: vm_allocate() will return a canonical address, so we mandate a canonical address here */
1330*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1331*4d495c6eSApple OSS Distributions kr = vm_deallocate(mach_task_self(), tagged_addr, alloc_size);
1332*4d495c6eSApple OSS Distributions T_EXPECT_MACH_ERROR(kr, KERN_INVALID_ARGUMENT, "vm_deallocate denies a non-canonical addresses");
1333*4d495c6eSApple OSS Distributions T_SETUPEND;
1334*4d495c6eSApple OSS Distributions
1335*4d495c6eSApple OSS Distributions /* test cleanup */
1336*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1337*4d495c6eSApple OSS Distributions kr = vm_deallocate(mach_task_self(), untagged_addr, alloc_size);
1338*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "test region cleanup");
1339*4d495c6eSApple OSS Distributions T_SETUPEND;
1340*4d495c6eSApple OSS Distributions #endif /* !__arm64__ */
1341*4d495c6eSApple OSS Distributions }
1342*4d495c6eSApple OSS Distributions
1343*4d495c6eSApple OSS Distributions T_DECL(mte_tagged_page_relocation,
1344*4d495c6eSApple OSS Distributions "Test that VM copies tags on page relocation for tagged memory",
1345*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
1346*4d495c6eSApple OSS Distributions T_META_ASROOT(true),
1347*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
1348*4d495c6eSApple OSS Distributions T_META_ENABLED(__arm64__))
1349*4d495c6eSApple OSS Distributions {
1350*4d495c6eSApple OSS Distributions #if __arm64__
1351*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1352*4d495c6eSApple OSS Distributions mach_vm_address_t addr = 0;
1353*4d495c6eSApple OSS Distributions kern_return_t kr = mach_vm_allocate(
1354*4d495c6eSApple OSS Distributions mach_task_self(),
1355*4d495c6eSApple OSS Distributions &addr,
1356*4d495c6eSApple OSS Distributions PAGE_SIZE,
1357*4d495c6eSApple OSS Distributions VM_FLAGS_ANYWHERE | VM_FLAGS_MTE
1358*4d495c6eSApple OSS Distributions );
1359*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr,
1360*4d495c6eSApple OSS Distributions "allocate 32 bytes of tagged memory at 0x%llx", addr);
1361*4d495c6eSApple OSS Distributions
1362*4d495c6eSApple OSS Distributions /* Verify originally assigned tags are zero */
1363*4d495c6eSApple OSS Distributions for (uint i = 0; i < PAGE_SIZE / MTE_GRANULE_SIZE; ++i) {
1364*4d495c6eSApple OSS Distributions char *untagged_ptr = (char *)((uintptr_t)addr + i * MTE_GRANULE_SIZE);
1365*4d495c6eSApple OSS Distributions char *orig_tagged_ptr = __arm_mte_get_tag(untagged_ptr);
1366*4d495c6eSApple OSS Distributions unsigned int orig_tag = extract_mte_tag(orig_tagged_ptr);
1367*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_EQ_UINT(orig_tag, 0U, "originally assigned tag is zero");
1368*4d495c6eSApple OSS Distributions }
1369*4d495c6eSApple OSS Distributions
1370*4d495c6eSApple OSS Distributions /*
1371*4d495c6eSApple OSS Distributions * Tag the first 16 bytes with non-zero tag, and
1372*4d495c6eSApple OSS Distributions * leave the second 16 bytes as is
1373*4d495c6eSApple OSS Distributions */
1374*4d495c6eSApple OSS Distributions char *untagged_ptr = (char *)addr;
1375*4d495c6eSApple OSS Distributions char *orig_tagged_ptr = __arm_mte_get_tag(untagged_ptr);
1376*4d495c6eSApple OSS Distributions uint64_t mask = __arm_mte_exclude_tag(orig_tagged_ptr, 0);
1377*4d495c6eSApple OSS Distributions T_EXPECT_EQ_LLONG(mask, (1LL << 0), "zero tag is excluded");
1378*4d495c6eSApple OSS Distributions
1379*4d495c6eSApple OSS Distributions char *random_tagged_ptr = __arm_mte_create_random_tag(untagged_ptr, mask);
1380*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_NE_PTR(orig_tagged_ptr, random_tagged_ptr,
1381*4d495c6eSApple OSS Distributions "random tag was not taken from excluded tag set");
1382*4d495c6eSApple OSS Distributions
1383*4d495c6eSApple OSS Distributions ptrdiff_t diff = __arm_mte_ptrdiff(untagged_ptr, random_tagged_ptr);
1384*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_EQ_ULONG(diff, (ptrdiff_t)0, "untagged %p and tagged %p have identical address bits",
1385*4d495c6eSApple OSS Distributions untagged_ptr, random_tagged_ptr);
1386*4d495c6eSApple OSS Distributions
1387*4d495c6eSApple OSS Distributions /* Time to make things real, commit the tag to memory */
1388*4d495c6eSApple OSS Distributions __arm_mte_set_tag(random_tagged_ptr);
1389*4d495c6eSApple OSS Distributions
1390*4d495c6eSApple OSS Distributions /* Ensure that we can read back the tag */
1391*4d495c6eSApple OSS Distributions char *read_back = __arm_mte_get_tag(untagged_ptr);
1392*4d495c6eSApple OSS Distributions T_EXPECT_EQ_PTR(read_back, random_tagged_ptr, "tag was committed to memory correctly");
1393*4d495c6eSApple OSS Distributions
1394*4d495c6eSApple OSS Distributions T_LOG("tagged pointer: %p", random_tagged_ptr);
1395*4d495c6eSApple OSS Distributions random_tagged_ptr[0] = 'a';
1396*4d495c6eSApple OSS Distributions untagged_ptr[MTE_GRANULE_SIZE] = 'b';
1397*4d495c6eSApple OSS Distributions T_SETUPEND;
1398*4d495c6eSApple OSS Distributions
1399*4d495c6eSApple OSS Distributions /*
1400*4d495c6eSApple OSS Distributions * Relocate the page.
1401*4d495c6eSApple OSS Distributions * The kernel will also write 'b' and 'c' to the memory.
1402*4d495c6eSApple OSS Distributions */
1403*4d495c6eSApple OSS Distributions int64_t ret = run_sysctl_test("vm_page_relocate", (int64_t)random_tagged_ptr);
1404*4d495c6eSApple OSS Distributions T_EXPECT_EQ_LLONG(ret, 1LL, "sysctl: relocate page");
1405*4d495c6eSApple OSS Distributions
1406*4d495c6eSApple OSS Distributions T_EXPECT_EQ_CHAR(random_tagged_ptr[0], 'b',
1407*4d495c6eSApple OSS Distributions "reading from tagged ptr after relocation");
1408*4d495c6eSApple OSS Distributions T_EXPECT_EQ_CHAR(untagged_ptr[MTE_GRANULE_SIZE], 'c',
1409*4d495c6eSApple OSS Distributions "reading from untagged ptr after relocation");
1410*4d495c6eSApple OSS Distributions #endif /* __arm64__ */
1411*4d495c6eSApple OSS Distributions }
1412*4d495c6eSApple OSS Distributions
1413*4d495c6eSApple OSS Distributions T_HELPER_DECL(mte_tag_violate, "child process to trigger an MTE violation")
1414*4d495c6eSApple OSS Distributions {
1415*4d495c6eSApple OSS Distributions static const size_t ALLOC_SIZE = MTE_GRANULE_SIZE * 2;
1416*4d495c6eSApple OSS Distributions
1417*4d495c6eSApple OSS Distributions vm_address_t address = 0;
1418*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &address, ALLOC_SIZE, VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
1419*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "allocate tagged memory");
1420*4d495c6eSApple OSS Distributions char *untagged_ptr = (char *) address;
1421*4d495c6eSApple OSS Distributions
1422*4d495c6eSApple OSS Distributions char *orig_tagged_ptr = __arm_mte_get_tag(untagged_ptr);
1423*4d495c6eSApple OSS Distributions unsigned int orig_tag = extract_mte_tag(orig_tagged_ptr);
1424*4d495c6eSApple OSS Distributions T_ASSERT_EQ_UINT(orig_tag, 0U, "originally assigned tag is zero");
1425*4d495c6eSApple OSS Distributions
1426*4d495c6eSApple OSS Distributions uint64_t mask = __arm_mte_exclude_tag(orig_tagged_ptr, 0);
1427*4d495c6eSApple OSS Distributions T_EXPECT_EQ_LLONG(mask, (1LL << 0), "zero tag is excluded");
1428*4d495c6eSApple OSS Distributions
1429*4d495c6eSApple OSS Distributions char *random_tagged_ptr = NULL;
1430*4d495c6eSApple OSS Distributions for (unsigned int i = 0; i < NUM_MTE_TAGS * 4; i++) {
1431*4d495c6eSApple OSS Distributions random_tagged_ptr = __arm_mte_create_random_tag(untagged_ptr, mask);
1432*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_NE_PTR(orig_tagged_ptr, random_tagged_ptr,
1433*4d495c6eSApple OSS Distributions "random tag was not taken from excluded tag set");
1434*4d495c6eSApple OSS Distributions
1435*4d495c6eSApple OSS Distributions ptrdiff_t diff = __arm_mte_ptrdiff(untagged_ptr, random_tagged_ptr);
1436*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_EQ_ULONG(diff, (ptrdiff_t)0, "untagged %p and tagged %p have identical address bits",
1437*4d495c6eSApple OSS Distributions untagged_ptr, random_tagged_ptr);
1438*4d495c6eSApple OSS Distributions }
1439*4d495c6eSApple OSS Distributions
1440*4d495c6eSApple OSS Distributions __arm_mte_set_tag(random_tagged_ptr);
1441*4d495c6eSApple OSS Distributions
1442*4d495c6eSApple OSS Distributions char *read_back = __arm_mte_get_tag(untagged_ptr);
1443*4d495c6eSApple OSS Distributions T_EXPECT_EQ_PTR(read_back, random_tagged_ptr, "tag was committed to memory correctly");
1444*4d495c6eSApple OSS Distributions
1445*4d495c6eSApple OSS Distributions random_tagged_ptr[0] = 't';
1446*4d495c6eSApple OSS Distributions random_tagged_ptr[1] = 'e';
1447*4d495c6eSApple OSS Distributions random_tagged_ptr[2] = 's';
1448*4d495c6eSApple OSS Distributions random_tagged_ptr[3] = 't';
1449*4d495c6eSApple OSS Distributions T_EXPECT_EQ_STR(random_tagged_ptr, "test", "read/write from tagged memory");
1450*4d495c6eSApple OSS Distributions
1451*4d495c6eSApple OSS Distributions void *next_granule_ptr = orig_tagged_ptr + MTE_GRANULE_SIZE;
1452*4d495c6eSApple OSS Distributions unsigned int next_granule_tag = extract_mte_tag(next_granule_ptr);
1453*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_EQ_UINT(next_granule_tag, 0U,
1454*4d495c6eSApple OSS Distributions "next MTE granule still has its originally assigned tag");
1455*4d495c6eSApple OSS Distributions
1456*4d495c6eSApple OSS Distributions T_LOG("attempting out-of-bounds access to tagged memory");
1457*4d495c6eSApple OSS Distributions random_tagged_ptr[MTE_GRANULE_SIZE] = '!';
1458*4d495c6eSApple OSS Distributions T_LOG("bypass: survived OOB access");
1459*4d495c6eSApple OSS Distributions
1460*4d495c6eSApple OSS Distributions __arm_mte_set_tag(orig_tagged_ptr);
1461*4d495c6eSApple OSS Distributions __arm_mte_set_tag(orig_tagged_ptr + MTE_GRANULE_SIZE);
1462*4d495c6eSApple OSS Distributions vm_deallocate(mach_task_self(), address, ALLOC_SIZE);
1463*4d495c6eSApple OSS Distributions exit(0);
1464*4d495c6eSApple OSS Distributions }
1465*4d495c6eSApple OSS Distributions
1466*4d495c6eSApple OSS Distributions T_HELPER_DECL(mte_copyio_bypass_helper, "child process to test copyio in MTE tag check bypass mode")
1467*4d495c6eSApple OSS Distributions {
1468*4d495c6eSApple OSS Distributions run_mte_copyio_tests(false);
1469*4d495c6eSApple OSS Distributions }
1470*4d495c6eSApple OSS Distributions
1471*4d495c6eSApple OSS Distributions static void
run_helper_with_sec_bypass(char * helper_name)1472*4d495c6eSApple OSS Distributions run_helper_with_sec_bypass(char *helper_name)
1473*4d495c6eSApple OSS Distributions {
1474*4d495c6eSApple OSS Distributions char path[PATH_MAX];
1475*4d495c6eSApple OSS Distributions uint32_t path_size = sizeof(path);
1476*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
1477*4d495c6eSApple OSS Distributions char *args[] = { path, "-n", helper_name, NULL };
1478*4d495c6eSApple OSS Distributions
1479*4d495c6eSApple OSS Distributions pid_t child_pid = 0;
1480*4d495c6eSApple OSS Distributions posix_spawnattr_t attr;
1481*4d495c6eSApple OSS Distributions errno_t ret = posix_spawnattr_init(&attr);
1482*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_init");
1483*4d495c6eSApple OSS Distributions
1484*4d495c6eSApple OSS Distributions ret = posix_spawnattr_set_use_sec_transition_shims_np(&attr, POSIX_SPAWN_SECFLAG_EXPLICIT_ENABLE | POSIX_SPAWN_SECFLAG_EXPLICIT_CHECK_BYPASS);
1485*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_set_use_sec_transition_shims_np");
1486*4d495c6eSApple OSS Distributions
1487*4d495c6eSApple OSS Distributions ret = posix_spawn(&child_pid, path, NULL, &attr, args, NULL);
1488*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_ZERO(ret, "posix_spawn");
1489*4d495c6eSApple OSS Distributions T_ASSERT_NE(child_pid, 0, "posix_spawn");
1490*4d495c6eSApple OSS Distributions
1491*4d495c6eSApple OSS Distributions ret = posix_spawnattr_destroy(&attr);
1492*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_destroy");
1493*4d495c6eSApple OSS Distributions
1494*4d495c6eSApple OSS Distributions int status = 0;
1495*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(waitpid(child_pid, &status, 0), "waitpid");
1496*4d495c6eSApple OSS Distributions T_EXPECT_TRUE(WIFEXITED(status), "exited successfully");
1497*4d495c6eSApple OSS Distributions T_EXPECT_TRUE(WEXITSTATUS(status) == 0, "exited with status %d", WEXITSTATUS(status));
1498*4d495c6eSApple OSS Distributions }
1499*4d495c6eSApple OSS Distributions
1500*4d495c6eSApple OSS Distributions T_DECL(mte_tag_bypass,
1501*4d495c6eSApple OSS Distributions "Test MTE2 tag check bypass works with posix_spawnattr",
1502*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
1503*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
1504*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
1505*4d495c6eSApple OSS Distributions {
1506*4d495c6eSApple OSS Distributions run_helper_with_sec_bypass("mte_tag_violate");
1507*4d495c6eSApple OSS Distributions }
1508*4d495c6eSApple OSS Distributions
1509*4d495c6eSApple OSS Distributions T_DECL(mte_copyio_bypass,
1510*4d495c6eSApple OSS Distributions "Test MTE2 tag check bypass with copyio operations",
1511*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
1512*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE4", 1),
1513*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
1514*4d495c6eSApple OSS Distributions {
1515*4d495c6eSApple OSS Distributions run_helper_with_sec_bypass("mte_copyio_bypass_helper");
1516*4d495c6eSApple OSS Distributions }
1517*4d495c6eSApple OSS Distributions
1518*4d495c6eSApple OSS Distributions #ifdef __arm64__
1519*4d495c6eSApple OSS Distributions T_DECL(mte_read_only,
1520*4d495c6eSApple OSS Distributions "Verify that setting tags on a read-only mapping results in SIGBUS",
1521*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1522*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
1523*4d495c6eSApple OSS Distributions {
1524*4d495c6eSApple OSS Distributions uint64_t mask;
1525*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1526*4d495c6eSApple OSS Distributions void* untagged_ptr = allocate_tagged_memory(MTE_GRANULE_SIZE, &mask);
1527*4d495c6eSApple OSS Distributions void *tagged_ptr = __arm_mte_create_random_tag(untagged_ptr, mask);
1528*4d495c6eSApple OSS Distributions T_SETUPEND;
1529*4d495c6eSApple OSS Distributions
1530*4d495c6eSApple OSS Distributions assert_normal_exit(^{
1531*4d495c6eSApple OSS Distributions __arm_mte_set_tag(tagged_ptr);
1532*4d495c6eSApple OSS Distributions }, "can set tags on writable memory");
1533*4d495c6eSApple OSS Distributions
1534*4d495c6eSApple OSS Distributions int ret = mprotect(untagged_ptr, MTE_GRANULE_SIZE, PROT_READ);
1535*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(ret, "mprotect");
1536*4d495c6eSApple OSS Distributions
1537*4d495c6eSApple OSS Distributions tagged_ptr = __arm_mte_increment_tag(tagged_ptr, 1);
1538*4d495c6eSApple OSS Distributions
1539*4d495c6eSApple OSS Distributions expect_signal(SIGBUS, ^{
1540*4d495c6eSApple OSS Distributions __arm_mte_set_tag(tagged_ptr);
1541*4d495c6eSApple OSS Distributions }, "set tag on read-only memory");
1542*4d495c6eSApple OSS Distributions
1543*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1544*4d495c6eSApple OSS Distributions kern_return_t kr = vm_deallocate(mach_task_self(), (vm_address_t) untagged_ptr, MTE_GRANULE_SIZE);
1545*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "clean up tagged allocation");
1546*4d495c6eSApple OSS Distributions T_SETUPEND;
1547*4d495c6eSApple OSS Distributions }
1548*4d495c6eSApple OSS Distributions
1549*4d495c6eSApple OSS Distributions T_DECL(mte_inherit_share,
1550*4d495c6eSApple OSS Distributions "Verify that you can't set VM_INHERIT_SHARE on tagged memory",
1551*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1552*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
1553*4d495c6eSApple OSS Distributions {
1554*4d495c6eSApple OSS Distributions const mach_vm_size_t ALLOC_SIZE = PAGE_SIZE;
1555*4d495c6eSApple OSS Distributions __block kern_return_t kr;
1556*4d495c6eSApple OSS Distributions
1557*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1558*4d495c6eSApple OSS Distributions vm_address_t tagged_addr = allocate_and_tag_range(ALLOC_SIZE, TAG_RANDOM);
1559*4d495c6eSApple OSS Distributions vm_address_t untagged_addr = tagged_addr & ~MTE_TAG_MASK;
1560*4d495c6eSApple OSS Distributions T_SETUPEND;
1561*4d495c6eSApple OSS Distributions
1562*4d495c6eSApple OSS Distributions expect_sigkill(^{
1563*4d495c6eSApple OSS Distributions int ret = minherit((void*) untagged_addr, ALLOC_SIZE, VM_INHERIT_SHARE);
1564*4d495c6eSApple OSS Distributions T_LOG("minherit: was not killed and returned %d", ret);
1565*4d495c6eSApple OSS Distributions }, "minherit(VM_INHERIT_SHARE) on tagged memory");
1566*4d495c6eSApple OSS Distributions
1567*4d495c6eSApple OSS Distributions expect_sigkill(^{
1568*4d495c6eSApple OSS Distributions kr = mach_vm_inherit(mach_task_self(), untagged_addr,
1569*4d495c6eSApple OSS Distributions ALLOC_SIZE, VM_INHERIT_SHARE);
1570*4d495c6eSApple OSS Distributions T_LOG("mach_vm_inherit: was not killed and returned %d", kr);
1571*4d495c6eSApple OSS Distributions }, "mach_vm_inherit(VM_INHERIT_SHARE) on tagged memory");
1572*4d495c6eSApple OSS Distributions
1573*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1574*4d495c6eSApple OSS Distributions kr = vm_deallocate(mach_task_self(), untagged_addr, ALLOC_SIZE);
1575*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "clean up tagged allocation");
1576*4d495c6eSApple OSS Distributions T_SETUPEND;
1577*4d495c6eSApple OSS Distributions
1578*4d495c6eSApple OSS Distributions expect_sigkill(^{
1579*4d495c6eSApple OSS Distributions mach_vm_address_t addr = 0;
1580*4d495c6eSApple OSS Distributions kr = mach_vm_map(mach_task_self(), &addr, ALLOC_SIZE, /* mask = */ 0,
1581*4d495c6eSApple OSS Distributions VM_FLAGS_ANYWHERE | VM_FLAGS_MTE, MACH_PORT_NULL, /* offset = */ 0,
1582*4d495c6eSApple OSS Distributions /* copy = */ false, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_SHARE);
1583*4d495c6eSApple OSS Distributions T_LOG("mach_vm_map: was not killed and returned %d", kr);
1584*4d495c6eSApple OSS Distributions
1585*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1586*4d495c6eSApple OSS Distributions kr = vm_deallocate(mach_task_self(), addr, ALLOC_SIZE);
1587*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_MACH_SUCCESS(kr, "clean up mach_vm_map allocation");
1588*4d495c6eSApple OSS Distributions T_SETUPEND;
1589*4d495c6eSApple OSS Distributions }, "mach_vm_map(VM_INHERIT_SHARE) to create new tagged memory");
1590*4d495c6eSApple OSS Distributions }
1591*4d495c6eSApple OSS Distributions
1592*4d495c6eSApple OSS Distributions static vm_object_id_t
get_object_id(mach_port_t task,vm_address_t addr)1593*4d495c6eSApple OSS Distributions get_object_id(mach_port_t task, vm_address_t addr)
1594*4d495c6eSApple OSS Distributions {
1595*4d495c6eSApple OSS Distributions unsigned int depth = 1;
1596*4d495c6eSApple OSS Distributions vm_size_t size;
1597*4d495c6eSApple OSS Distributions struct vm_region_submap_info_64 info;
1598*4d495c6eSApple OSS Distributions mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
1599*4d495c6eSApple OSS Distributions kern_return_t kr = vm_region_recurse_64(task, &addr, &size, &depth,
1600*4d495c6eSApple OSS Distributions (vm_region_info_t) &info, &count);
1601*4d495c6eSApple OSS Distributions /*
1602*4d495c6eSApple OSS Distributions * I'm not sure why it returns KERN_INVALID_ADDRESS in this case, but this
1603*4d495c6eSApple OSS Distributions * can happen if the corpse task goes away. That happens if a jetsam event
1604*4d495c6eSApple OSS Distributions * occurs (even on an unrelated process) while the test is running.
1605*4d495c6eSApple OSS Distributions */
1606*4d495c6eSApple OSS Distributions if (task != mach_task_self() && kr == KERN_INVALID_ADDRESS) {
1607*4d495c6eSApple OSS Distributions T_SKIP("corpse port disappeared, bailing...");
1608*4d495c6eSApple OSS Distributions }
1609*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "get_object_id: vm_region_recurse_64");
1610*4d495c6eSApple OSS Distributions return info.object_id_full;
1611*4d495c6eSApple OSS Distributions }
1612*4d495c6eSApple OSS Distributions
1613*4d495c6eSApple OSS Distributions T_DECL(mte_corpse_fork,
1614*4d495c6eSApple OSS Distributions "Verify that corpse-fork sharing paths work normally on tagged memory",
1615*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1616*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
1617*4d495c6eSApple OSS Distributions /* rdar://138528295 (Provide a mechanism to guarantee availability of corpse slots for tests) */
1618*4d495c6eSApple OSS Distributions T_META_RUN_CONCURRENTLY(false))
1619*4d495c6eSApple OSS Distributions {
1620*4d495c6eSApple OSS Distributions /*
1621*4d495c6eSApple OSS Distributions * The corpse-fork path shares memory in two additional cases:
1622*4d495c6eSApple OSS Distributions * (1) if the entry has INHERIT_NONE, and
1623*4d495c6eSApple OSS Distributions * (2) if the memory is "owned" by the process for accounting purposes. This
1624*4d495c6eSApple OSS Distributions * essentially means that it is purgeable & volatile.
1625*4d495c6eSApple OSS Distributions * We want to ensure that these cases are unaffected by MTE restrictions on
1626*4d495c6eSApple OSS Distributions * VM_INHERIT_SHARE.
1627*4d495c6eSApple OSS Distributions */
1628*4d495c6eSApple OSS Distributions kern_return_t kr;
1629*4d495c6eSApple OSS Distributions mach_vm_size_t alloc_size = PAGE_SIZE;
1630*4d495c6eSApple OSS Distributions mach_vm_address_t inherit_none_addr, owned_addr, regular_addr;
1631*4d495c6eSApple OSS Distributions
1632*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1633*4d495c6eSApple OSS Distributions
1634*4d495c6eSApple OSS Distributions /* First up, expand the system's corpse pool size.
1635*4d495c6eSApple OSS Distributions * Otherwise, this test sporadically can't secure the corpse slots it needs.
1636*4d495c6eSApple OSS Distributions */
1637*4d495c6eSApple OSS Distributions int original_total_corpses_allowed;
1638*4d495c6eSApple OSS Distributions size_t original_total_corpses_allowed_sizeof = sizeof(original_total_corpses_allowed);
1639*4d495c6eSApple OSS Distributions int total_corpses_allowed = 20;
1640*4d495c6eSApple OSS Distributions int ret = sysctlbyname("kern.total_corpses_allowed",
1641*4d495c6eSApple OSS Distributions &original_total_corpses_allowed, &original_total_corpses_allowed_sizeof,
1642*4d495c6eSApple OSS Distributions &total_corpses_allowed, sizeof(total_corpses_allowed));
1643*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_POSIX_ZERO(ret, "sysctl kern.total_corpses_allowed");
1644*4d495c6eSApple OSS Distributions
1645*4d495c6eSApple OSS Distributions /* set up regular MTE-tagged region */
1646*4d495c6eSApple OSS Distributions kr = mach_vm_allocate(mach_task_self(), ®ular_addr, alloc_size,
1647*4d495c6eSApple OSS Distributions VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
1648*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate regular region");
1649*4d495c6eSApple OSS Distributions
1650*4d495c6eSApple OSS Distributions /* set up region for testing INHERIT_NONE */
1651*4d495c6eSApple OSS Distributions kr = mach_vm_allocate(mach_task_self(), &inherit_none_addr, alloc_size,
1652*4d495c6eSApple OSS Distributions VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
1653*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate INHERIT_NONE region");
1654*4d495c6eSApple OSS Distributions
1655*4d495c6eSApple OSS Distributions kr = mach_vm_inherit(mach_task_self(), inherit_none_addr, alloc_size,
1656*4d495c6eSApple OSS Distributions VM_INHERIT_NONE);
1657*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_inherit(INHERIT_NONE)");
1658*4d495c6eSApple OSS Distributions
1659*4d495c6eSApple OSS Distributions /* set up region for testing "owned" memory */
1660*4d495c6eSApple OSS Distributions kr = mach_vm_allocate(mach_task_self(), &owned_addr, alloc_size,
1661*4d495c6eSApple OSS Distributions VM_FLAGS_ANYWHERE | VM_FLAGS_MTE | VM_FLAGS_PURGABLE);
1662*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate owned region");
1663*4d495c6eSApple OSS Distributions
1664*4d495c6eSApple OSS Distributions int purgable_state = VM_PURGABLE_VOLATILE;
1665*4d495c6eSApple OSS Distributions kr = mach_vm_purgable_control(mach_task_self(), owned_addr, VM_PURGABLE_SET_STATE,
1666*4d495c6eSApple OSS Distributions &purgable_state);
1667*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VM_PURGABLE_VOLATILE)");
1668*4d495c6eSApple OSS Distributions T_SETUPEND;
1669*4d495c6eSApple OSS Distributions
1670*4d495c6eSApple OSS Distributions /* Write in some data and tags */
1671*4d495c6eSApple OSS Distributions char *regular_ptr = __arm_mte_increment_tag((char*) regular_addr, 1);
1672*4d495c6eSApple OSS Distributions char *inherit_none_ptr = __arm_mte_increment_tag((char*) inherit_none_addr, 2);
1673*4d495c6eSApple OSS Distributions char *owned_ptr = __arm_mte_increment_tag((char*) owned_addr, 3);
1674*4d495c6eSApple OSS Distributions for (size_t i = 0; i < alloc_size; i++) {
1675*4d495c6eSApple OSS Distributions if (i % MTE_GRANULE_SIZE == 0) {
1676*4d495c6eSApple OSS Distributions __arm_mte_set_tag(®ular_ptr[i]);
1677*4d495c6eSApple OSS Distributions __arm_mte_set_tag(&inherit_none_ptr[i]);
1678*4d495c6eSApple OSS Distributions __arm_mte_set_tag(&owned_ptr[i]);
1679*4d495c6eSApple OSS Distributions }
1680*4d495c6eSApple OSS Distributions regular_ptr[i] = 'a';
1681*4d495c6eSApple OSS Distributions inherit_none_ptr[i] = 'b';
1682*4d495c6eSApple OSS Distributions owned_ptr[i] = 'c';
1683*4d495c6eSApple OSS Distributions }
1684*4d495c6eSApple OSS Distributions T_LOG("wrote data and tags");
1685*4d495c6eSApple OSS Distributions
1686*4d495c6eSApple OSS Distributions mach_port_t corpse_port;
1687*4d495c6eSApple OSS Distributions size_t NUM_RETRIES = 5;
1688*4d495c6eSApple OSS Distributions for (size_t i = 0;; i++) {
1689*4d495c6eSApple OSS Distributions kr = task_generate_corpse(mach_task_self(), &corpse_port);
1690*4d495c6eSApple OSS Distributions if (kr == KERN_RESOURCE_SHORTAGE) {
1691*4d495c6eSApple OSS Distributions T_LOG("hit system corpse limit");
1692*4d495c6eSApple OSS Distributions if (i == NUM_RETRIES) {
1693*4d495c6eSApple OSS Distributions T_SKIP("retried too many times, bailing...");
1694*4d495c6eSApple OSS Distributions } else {
1695*4d495c6eSApple OSS Distributions /* give ReportCrash some time to finish handling some corpses */
1696*4d495c6eSApple OSS Distributions sleep(2);
1697*4d495c6eSApple OSS Distributions /* ... then retry */
1698*4d495c6eSApple OSS Distributions T_LOG("retrying... (%lu/%lu)", i + 1, NUM_RETRIES);
1699*4d495c6eSApple OSS Distributions continue;
1700*4d495c6eSApple OSS Distributions }
1701*4d495c6eSApple OSS Distributions }
1702*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "task_generate_corpse");
1703*4d495c6eSApple OSS Distributions break;
1704*4d495c6eSApple OSS Distributions }
1705*4d495c6eSApple OSS Distributions
1706*4d495c6eSApple OSS Distributions /*
1707*4d495c6eSApple OSS Distributions * Make sure the "regular" region was not shared.
1708*4d495c6eSApple OSS Distributions * Note: in the case of symmetric CoW, the object IDs may match even if
1709*4d495c6eSApple OSS Distributions * there is no true sharing happening. However, since we only expect delayed
1710*4d495c6eSApple OSS Distributions * CoW or eager copies for MTE objects, this isn't a concern here.
1711*4d495c6eSApple OSS Distributions */
1712*4d495c6eSApple OSS Distributions vm_object_id_t regular_id = get_object_id(mach_task_self(), regular_addr);
1713*4d495c6eSApple OSS Distributions vm_object_id_t regular_corpse_id = get_object_id(corpse_port, regular_addr);
1714*4d495c6eSApple OSS Distributions T_EXPECT_NE(regular_id, regular_corpse_id, "regular region was not shared");
1715*4d495c6eSApple OSS Distributions
1716*4d495c6eSApple OSS Distributions /* Make sure the INHERIT_NONE region was shared */
1717*4d495c6eSApple OSS Distributions vm_object_id_t inherit_none_id = get_object_id(mach_task_self(), inherit_none_addr);
1718*4d495c6eSApple OSS Distributions vm_object_id_t inherit_none_corpse_id = get_object_id(corpse_port, inherit_none_addr);
1719*4d495c6eSApple OSS Distributions T_EXPECT_EQ(inherit_none_id, inherit_none_corpse_id, "INHERIT_NONE region was shared");
1720*4d495c6eSApple OSS Distributions
1721*4d495c6eSApple OSS Distributions /* Make sure the owned region was shared */
1722*4d495c6eSApple OSS Distributions vm_object_id_t owned_id = get_object_id(mach_task_self(), owned_addr);
1723*4d495c6eSApple OSS Distributions vm_object_id_t owned_corpse_id = get_object_id(corpse_port, owned_addr);
1724*4d495c6eSApple OSS Distributions T_EXPECT_EQ(owned_id, owned_corpse_id, "owned region was shared");
1725*4d495c6eSApple OSS Distributions
1726*4d495c6eSApple OSS Distributions /* Cleanup */
1727*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1728*4d495c6eSApple OSS Distributions kr = mach_vm_deallocate(mach_task_self(), regular_addr, alloc_size);
1729*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "deallocate regular allocation");
1730*4d495c6eSApple OSS Distributions kr = mach_vm_deallocate(mach_task_self(), inherit_none_addr, alloc_size);
1731*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "deallocate INHERIT_NONE allocation");
1732*4d495c6eSApple OSS Distributions kr = mach_vm_deallocate(mach_task_self(), owned_addr, alloc_size);
1733*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "deallocate owned allocation");
1734*4d495c6eSApple OSS Distributions kr = mach_port_deallocate(mach_task_self(), corpse_port);
1735*4d495c6eSApple OSS Distributions T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "deallocate corpse port");
1736*4d495c6eSApple OSS Distributions
1737*4d495c6eSApple OSS Distributions /* Reduce the corpse pool size back to its original value */
1738*4d495c6eSApple OSS Distributions ret = sysctlbyname("kern.total_corpses_allowed",
1739*4d495c6eSApple OSS Distributions NULL, 0,
1740*4d495c6eSApple OSS Distributions &original_total_corpses_allowed, sizeof(original_total_corpses_allowed));
1741*4d495c6eSApple OSS Distributions T_QUIET; T_EXPECT_POSIX_ZERO(ret, "sysctl kern.total_corpses_allowed");
1742*4d495c6eSApple OSS Distributions
1743*4d495c6eSApple OSS Distributions T_SETUPEND;
1744*4d495c6eSApple OSS Distributions }
1745*4d495c6eSApple OSS Distributions
1746*4d495c6eSApple OSS Distributions T_DECL(mte_aio,
1747*4d495c6eSApple OSS Distributions "Test MTE asynchronous access faults when the kernel does copyio on behalf of a process",
1748*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1749*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
1750*4d495c6eSApple OSS Distributions T_META_ENABLED(false) /* rdar://154801490 */) {
1751*4d495c6eSApple OSS Distributions const mach_vm_size_t BUF_SIZE = MTE_GRANULE_SIZE;
1752*4d495c6eSApple OSS Distributions uint64_t mask;
1753*4d495c6eSApple OSS Distributions
1754*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1755*4d495c6eSApple OSS Distributions char *buf_untagged = allocate_tagged_memory(BUF_SIZE, &mask);
1756*4d495c6eSApple OSS Distributions char *buf_tagged = __arm_mte_create_random_tag(buf_untagged, mask);
1757*4d495c6eSApple OSS Distributions __arm_mte_set_tag(buf_tagged);
1758*4d495c6eSApple OSS Distributions strncpy(buf_tagged, "ABCDEFG", BUF_SIZE);
1759*4d495c6eSApple OSS Distributions
1760*4d495c6eSApple OSS Distributions char *buf_incorrectly_tagged = __arm_mte_increment_tag(buf_tagged, 1);
1761*4d495c6eSApple OSS Distributions int fd = fileno(tmpfile());
1762*4d495c6eSApple OSS Distributions
1763*4d495c6eSApple OSS Distributions T_SETUPEND;
1764*4d495c6eSApple OSS Distributions
1765*4d495c6eSApple OSS Distributions expect_sigkill(^{
1766*4d495c6eSApple OSS Distributions struct aiocb aiocb = {
1767*4d495c6eSApple OSS Distributions .aio_fildes = fd,
1768*4d495c6eSApple OSS Distributions .aio_offset = 0,
1769*4d495c6eSApple OSS Distributions .aio_buf = buf_incorrectly_tagged,
1770*4d495c6eSApple OSS Distributions .aio_nbytes = strlen(buf_tagged),
1771*4d495c6eSApple OSS Distributions };
1772*4d495c6eSApple OSS Distributions int ret = aio_write(&aiocb);
1773*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(ret, "aio_write");
1774*4d495c6eSApple OSS Distributions
1775*4d495c6eSApple OSS Distributions /* wait for the kernel to handle our async I/O */
1776*4d495c6eSApple OSS Distributions /* we should be killed at some point while this happens */
1777*4d495c6eSApple OSS Distributions const struct aiocb *aio_list[1] = { &aiocb };
1778*4d495c6eSApple OSS Distributions (void)aio_suspend(aio_list, 1, NULL);
1779*4d495c6eSApple OSS Distributions
1780*4d495c6eSApple OSS Distributions /* we were not killed: */
1781*4d495c6eSApple OSS Distributions close(fd);
1782*4d495c6eSApple OSS Distributions T_ASSERT_FAIL("aio write with untagged pointer completed successfully");
1783*4d495c6eSApple OSS Distributions }, "asynchronous I/O write from tagged buffer with incorrect MTE tags");
1784*4d495c6eSApple OSS Distributions
1785*4d495c6eSApple OSS Distributions char read_buf[BUF_SIZE];
1786*4d495c6eSApple OSS Distributions ssize_t bytes_read = read(fd, read_buf, sizeof(read_buf));
1787*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(bytes_read, "read from tmpfile");
1788*4d495c6eSApple OSS Distributions
1789*4d495c6eSApple OSS Distributions T_EXPECT_EQ(bytes_read, 0L, "no bytes sent over tmpfile");
1790*4d495c6eSApple OSS Distributions
1791*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1792*4d495c6eSApple OSS Distributions kern_return_t kr = vm_deallocate(mach_task_self(), (vm_address_t) buf_untagged, BUF_SIZE);
1793*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "deallocate tagged buffer");
1794*4d495c6eSApple OSS Distributions
1795*4d495c6eSApple OSS Distributions close(fd);
1796*4d495c6eSApple OSS Distributions T_SETUPEND;
1797*4d495c6eSApple OSS Distributions }
1798*4d495c6eSApple OSS Distributions
1799*4d495c6eSApple OSS Distributions T_HELPER_DECL(mte_tag_violate_aio, "child process to trigger an asynchronous MTE violation via AIO") {
1800*4d495c6eSApple OSS Distributions const mach_vm_size_t BUF_SIZE = MTE_GRANULE_SIZE;
1801*4d495c6eSApple OSS Distributions uint64_t mask;
1802*4d495c6eSApple OSS Distributions
1803*4d495c6eSApple OSS Distributions char *buf_untagged = allocate_tagged_memory(BUF_SIZE, &mask);
1804*4d495c6eSApple OSS Distributions char *buf_tagged = __arm_mte_create_random_tag(buf_untagged, mask);
1805*4d495c6eSApple OSS Distributions __arm_mte_set_tag(buf_tagged);
1806*4d495c6eSApple OSS Distributions
1807*4d495c6eSApple OSS Distributions strncpy(buf_tagged, "ABCDEFG", BUF_SIZE);
1808*4d495c6eSApple OSS Distributions size_t length = strlen(buf_tagged);
1809*4d495c6eSApple OSS Distributions
1810*4d495c6eSApple OSS Distributions char *buf_incorrectly_tagged = __arm_mte_increment_tag(buf_tagged, 1);
1811*4d495c6eSApple OSS Distributions int fd = fileno(tmpfile());
1812*4d495c6eSApple OSS Distributions
1813*4d495c6eSApple OSS Distributions struct aiocb aiocb = {
1814*4d495c6eSApple OSS Distributions .aio_fildes = fd,
1815*4d495c6eSApple OSS Distributions .aio_offset = 0,
1816*4d495c6eSApple OSS Distributions .aio_buf = buf_incorrectly_tagged,
1817*4d495c6eSApple OSS Distributions .aio_nbytes = length,
1818*4d495c6eSApple OSS Distributions };
1819*4d495c6eSApple OSS Distributions int ret = aio_write(&aiocb);
1820*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(ret, "aio_write");
1821*4d495c6eSApple OSS Distributions
1822*4d495c6eSApple OSS Distributions /* wait for the kernel to handle our async I/O */
1823*4d495c6eSApple OSS Distributions const struct aiocb *aio_list[1] = { &aiocb };
1824*4d495c6eSApple OSS Distributions ret = aio_suspend(aio_list, 1, NULL);
1825*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(ret, "aio_suspend");
1826*4d495c6eSApple OSS Distributions
1827*4d495c6eSApple OSS Distributions char read_buf[BUF_SIZE];
1828*4d495c6eSApple OSS Distributions ssize_t bytes_read = read(fd, read_buf, sizeof(read_buf));
1829*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(bytes_read, "read from tmpfile");
1830*4d495c6eSApple OSS Distributions
1831*4d495c6eSApple OSS Distributions /* these have to be "may fail" instead of "expect fail" due to rdar://136258500 */
1832*4d495c6eSApple OSS Distributions T_MAYFAIL_WITH_RADAR(136300841);
1833*4d495c6eSApple OSS Distributions T_EXPECT_EQ(bytes_read, (ssize_t)length, "bytes sent over tmpfile");
1834*4d495c6eSApple OSS Distributions
1835*4d495c6eSApple OSS Distributions for (size_t i = 0; i < length; i++) {
1836*4d495c6eSApple OSS Distributions T_MAYFAIL_WITH_RADAR(136300841);
1837*4d495c6eSApple OSS Distributions T_EXPECT_EQ(buf_tagged[i], read_buf[i], "character %lu matches", i);
1838*4d495c6eSApple OSS Distributions }
1839*4d495c6eSApple OSS Distributions
1840*4d495c6eSApple OSS Distributions kern_return_t kr = vm_deallocate(mach_task_self(), (vm_address_t) buf_untagged, BUF_SIZE);
1841*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "deallocate tagged buffer");
1842*4d495c6eSApple OSS Distributions
1843*4d495c6eSApple OSS Distributions close(fd);
1844*4d495c6eSApple OSS Distributions }
1845*4d495c6eSApple OSS Distributions
1846*4d495c6eSApple OSS Distributions T_DECL(mte_aio_tag_bypass,
1847*4d495c6eSApple OSS Distributions "Test nonfatal MTE asynchronous access faults with tag check bypass",
1848*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1849*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC) {
1850*4d495c6eSApple OSS Distributions run_helper_with_sec_bypass("mte_tag_violate_aio");
1851*4d495c6eSApple OSS Distributions }
1852*4d495c6eSApple OSS Distributions #endif /* __arm64__ */
1853*4d495c6eSApple OSS Distributions
1854*4d495c6eSApple OSS Distributions static void
run_iokit_sysctl_test(int vector)1855*4d495c6eSApple OSS Distributions run_iokit_sysctl_test(int vector)
1856*4d495c6eSApple OSS Distributions {
1857*4d495c6eSApple OSS Distributions int ret = sysctlbyname("kern.iokittest", NULL, 0, &vector, sizeof(vector));
1858*4d495c6eSApple OSS Distributions T_EXPECT_POSIX_ZERO(ret, "sysctl kern.iokittest(%d)", vector);
1859*4d495c6eSApple OSS Distributions }
1860*4d495c6eSApple OSS Distributions
1861*4d495c6eSApple OSS Distributions T_DECL(mte_iomd_cpu_map,
1862*4d495c6eSApple OSS Distributions "Test that IOMemoryDescriptor::map() of userspace memory is mapped as untagged in the kernel",
1863*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
1864*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1865*4d495c6eSApple OSS Distributions T_META_ASROOT(true),
1866*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
1867*4d495c6eSApple OSS Distributions {
1868*4d495c6eSApple OSS Distributions run_iokit_sysctl_test(333);
1869*4d495c6eSApple OSS Distributions }
1870*4d495c6eSApple OSS Distributions
1871*4d495c6eSApple OSS Distributions T_DECL(mte_iomd_read_write_bytes,
1872*4d495c6eSApple OSS Distributions "Test that IOMemoryDescriptor::read/writeBytes() of tagged memory works",
1873*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
1874*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1875*4d495c6eSApple OSS Distributions T_META_ASROOT(true),
1876*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC) {
1877*4d495c6eSApple OSS Distributions run_iokit_sysctl_test(334);
1878*4d495c6eSApple OSS Distributions }
1879*4d495c6eSApple OSS Distributions
1880*4d495c6eSApple OSS Distributions T_DECL(iomd_read_write_bytes_non_mte,
1881*4d495c6eSApple OSS Distributions "Test that IOMemoryDescriptor::read/writeBytes() of untagged memory works",
1882*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
1883*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1884*4d495c6eSApple OSS Distributions T_META_ASROOT(true),
1885*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC) {
1886*4d495c6eSApple OSS Distributions run_iokit_sysctl_test(335);
1887*4d495c6eSApple OSS Distributions }
1888*4d495c6eSApple OSS Distributions
1889*4d495c6eSApple OSS Distributions T_DECL(iomd_read_bytes_with_tcf,
1890*4d495c6eSApple OSS Distributions "Test that tag mismatches during IOMemoryDescriptor::readBytes() get detected",
1891*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
1892*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1893*4d495c6eSApple OSS Distributions T_META_ASROOT(true),
1894*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC) {
1895*4d495c6eSApple OSS Distributions /* The iokit test will generate an artificial tag check mismatch midway through the buffer */
1896*4d495c6eSApple OSS Distributions expect_sigkill(^{
1897*4d495c6eSApple OSS Distributions run_iokit_sysctl_test(336);
1898*4d495c6eSApple OSS Distributions T_ASSERT_FAIL("Expected this process to get killed");
1899*4d495c6eSApple OSS Distributions }, "asynchronous TCF in readBytes()");
1900*4d495c6eSApple OSS Distributions }
1901*4d495c6eSApple OSS Distributions
1902*4d495c6eSApple OSS Distributions T_DECL(iomd_write_bytes_with_tcf,
1903*4d495c6eSApple OSS Distributions "Test that tag mismatches during IOMemoryDescriptor::writeBytes() continue to work out of the box",
1904*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
1905*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1906*4d495c6eSApple OSS Distributions T_META_ASROOT(true),
1907*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC) {
1908*4d495c6eSApple OSS Distributions /* The iokit test will generate an artificial tag check mismatch midway through the buffer */
1909*4d495c6eSApple OSS Distributions expect_sigkill(^{
1910*4d495c6eSApple OSS Distributions run_iokit_sysctl_test(337);
1911*4d495c6eSApple OSS Distributions T_ASSERT_FAIL("Expected this process to get killed");
1912*4d495c6eSApple OSS Distributions }, "asynchronous TCF in writeBytes()");
1913*4d495c6eSApple OSS Distributions }
1914*4d495c6eSApple OSS Distributions
1915*4d495c6eSApple OSS Distributions T_DECL(iomd_create_alias_mapping_in_this_map,
1916*4d495c6eSApple OSS Distributions "Test that IOMemoryDescriptor::createMappingInTask() of tagged memory in the current task works",
1917*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
1918*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1919*4d495c6eSApple OSS Distributions T_META_ASROOT(true),
1920*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC) {
1921*4d495c6eSApple OSS Distributions run_iokit_sysctl_test(340);
1922*4d495c6eSApple OSS Distributions }
1923*4d495c6eSApple OSS Distributions
1924*4d495c6eSApple OSS Distributions T_DECL(iomd_create_alias_mapping_in_kernel_map,
1925*4d495c6eSApple OSS Distributions "Test that IOMemoryDescriptor::createMappingInTask() of tagged memory in the kernel is allowed",
1926*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
1927*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1928*4d495c6eSApple OSS Distributions T_META_ASROOT(true),
1929*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC) {
1930*4d495c6eSApple OSS Distributions run_iokit_sysctl_test(342);
1931*4d495c6eSApple OSS Distributions }
1932*4d495c6eSApple OSS Distributions
1933*4d495c6eSApple OSS Distributions T_DECL(mte_cpu_map_pageout,
1934*4d495c6eSApple OSS Distributions "Test correct behavior of kernel CPU mapping after userspace mapping is paged out",
1935*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
1936*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1937*4d495c6eSApple OSS Distributions T_META_ASROOT(true),
1938*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC)
1939*4d495c6eSApple OSS Distributions {
1940*4d495c6eSApple OSS Distributions mach_vm_size_t alloc_size = PAGE_SIZE;
1941*4d495c6eSApple OSS Distributions char *ptr = (char*)(allocate_and_tag_range(alloc_size, TAG_RANDOM_EXCLUDE(0xF)));
1942*4d495c6eSApple OSS Distributions char value = 'A';
1943*4d495c6eSApple OSS Distributions memset(ptr, value, alloc_size);
1944*4d495c6eSApple OSS Distributions
1945*4d495c6eSApple OSS Distributions struct {
1946*4d495c6eSApple OSS Distributions mach_vm_size_t size;
1947*4d495c6eSApple OSS Distributions char *ptr;
1948*4d495c6eSApple OSS Distributions char value;
1949*4d495c6eSApple OSS Distributions } args = { alloc_size, ptr, value };
1950*4d495c6eSApple OSS Distributions run_sysctl_test("vm_cpu_map_pageout", (int64_t)(&args));
1951*4d495c6eSApple OSS Distributions }
1952*4d495c6eSApple OSS Distributions
1953*4d495c6eSApple OSS Distributions T_DECL(vm_region_recurse_mte_info,
1954*4d495c6eSApple OSS Distributions "Ensure metadata returned by vm_region_recurse correct reflects MTE status",
1955*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
1956*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
1957*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
1958*4d495c6eSApple OSS Distributions T_META_ASROOT(true))
1959*4d495c6eSApple OSS Distributions {
1960*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
1961*4d495c6eSApple OSS Distributions
1962*4d495c6eSApple OSS Distributions /* Given an MTE-enabled region */
1963*4d495c6eSApple OSS Distributions const mach_vm_size_t alloc_size = PAGE_SIZE;
1964*4d495c6eSApple OSS Distributions vm_address_t tagged_buffer_addr = allocate_and_tag_range(alloc_size, 0xa);
1965*4d495c6eSApple OSS Distributions vm_address_t untagged_handle_to_tagged_address = tagged_buffer_addr & ~MTE_TAG_MASK;
1966*4d495c6eSApple OSS Distributions
1967*4d495c6eSApple OSS Distributions /* And a non-MTE-enabled region */
1968*4d495c6eSApple OSS Distributions /* (Manually select an address to be sure we're placed in a new region from the tagged region) */
1969*4d495c6eSApple OSS Distributions mach_vm_address_t untagged_buffer_addr = untagged_handle_to_tagged_address + (32 * 1024);
1970*4d495c6eSApple OSS Distributions kern_return_t kr = mach_vm_allocate(
1971*4d495c6eSApple OSS Distributions mach_task_self(),
1972*4d495c6eSApple OSS Distributions &untagged_buffer_addr,
1973*4d495c6eSApple OSS Distributions alloc_size,
1974*4d495c6eSApple OSS Distributions VM_FLAGS_FIXED );
1975*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "Allocated untagged page");
1976*4d495c6eSApple OSS Distributions /* (And write to it to be sure we populate a VM object) */
1977*4d495c6eSApple OSS Distributions memset((uint8_t*)untagged_buffer_addr, 0, alloc_size);
1978*4d495c6eSApple OSS Distributions
1979*4d495c6eSApple OSS Distributions T_SETUPEND;
1980*4d495c6eSApple OSS Distributions
1981*4d495c6eSApple OSS Distributions /* When we query the attributes of the region covering the MTE-enabled buffer */
1982*4d495c6eSApple OSS Distributions mach_vm_address_t addr = untagged_handle_to_tagged_address;
1983*4d495c6eSApple OSS Distributions mach_vm_size_t addr_size = alloc_size;
1984*4d495c6eSApple OSS Distributions uint32_t nesting_depth = UINT_MAX;
1985*4d495c6eSApple OSS Distributions mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_V2_COUNT_64;
1986*4d495c6eSApple OSS Distributions vm_region_submap_info_data_64_t region_info;
1987*4d495c6eSApple OSS Distributions kr = vm_region_recurse_64(mach_task_self(), (vm_address_t*)&addr, (vm_size_t*)&addr_size, &nesting_depth, (vm_region_recurse_info_t)®ion_info, &count);
1988*4d495c6eSApple OSS Distributions
1989*4d495c6eSApple OSS Distributions /* Then our metadata confirms that the region contains an MTE-mappable object */
1990*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "Query MTE-enabled region");
1991*4d495c6eSApple OSS Distributions T_ASSERT_TRUE(region_info.flags & VM_REGION_FLAG_MTE_ENABLED, "Expected metadata to reflect an MTE mappable object");
1992*4d495c6eSApple OSS Distributions
1993*4d495c6eSApple OSS Distributions /* And when we query the same thing via the 'short' info */
1994*4d495c6eSApple OSS Distributions addr = untagged_handle_to_tagged_address;
1995*4d495c6eSApple OSS Distributions addr_size = alloc_size;
1996*4d495c6eSApple OSS Distributions nesting_depth = UINT_MAX;
1997*4d495c6eSApple OSS Distributions count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1998*4d495c6eSApple OSS Distributions vm_region_submap_short_info_data_64_t short_info;
1999*4d495c6eSApple OSS Distributions kr = mach_vm_region_recurse(mach_task_self(), (mach_vm_address_t*)&addr, (mach_vm_size_t*)&addr_size, &nesting_depth, (vm_region_info_t)&short_info, &count);
2000*4d495c6eSApple OSS Distributions
2001*4d495c6eSApple OSS Distributions /* Then the short metadata also confirms that the region contains an MTE-mappable object */
2002*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "Query MTE-enabled region");
2003*4d495c6eSApple OSS Distributions T_ASSERT_TRUE(short_info.flags & VM_REGION_FLAG_MTE_ENABLED, "Expected metadata to reflect an MTE mappable object");
2004*4d495c6eSApple OSS Distributions
2005*4d495c6eSApple OSS Distributions /* And when we query the attributes of the region covering the non-MTE-enabled buffer */
2006*4d495c6eSApple OSS Distributions addr = untagged_buffer_addr;
2007*4d495c6eSApple OSS Distributions addr_size = alloc_size;
2008*4d495c6eSApple OSS Distributions nesting_depth = UINT_MAX;
2009*4d495c6eSApple OSS Distributions count = VM_REGION_SUBMAP_INFO_V2_COUNT_64;
2010*4d495c6eSApple OSS Distributions memset(®ion_info, 0, sizeof(region_info));
2011*4d495c6eSApple OSS Distributions kr = mach_vm_region_recurse(mach_task_self(), (mach_vm_address_t*)&addr, (mach_vm_size_t*)&addr_size, &nesting_depth, (vm_region_info_t)®ion_info, &count);
2012*4d495c6eSApple OSS Distributions
2013*4d495c6eSApple OSS Distributions /* Then our metadata confirm that the region does not contain an MTE-mappable object */
2014*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "Query MTE-disabled region");
2015*4d495c6eSApple OSS Distributions T_ASSERT_FALSE(region_info.flags & VM_REGION_FLAG_MTE_ENABLED, "Expected metadata to reflect no MTE mappable object");
2016*4d495c6eSApple OSS Distributions
2017*4d495c6eSApple OSS Distributions /* And when we query the same thing via the 'short' info */
2018*4d495c6eSApple OSS Distributions addr = untagged_buffer_addr;
2019*4d495c6eSApple OSS Distributions addr_size = alloc_size;
2020*4d495c6eSApple OSS Distributions nesting_depth = UINT_MAX;
2021*4d495c6eSApple OSS Distributions count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
2022*4d495c6eSApple OSS Distributions memset(&short_info, 0, sizeof(short_info));
2023*4d495c6eSApple OSS Distributions kr = mach_vm_region_recurse(mach_task_self(), (mach_vm_address_t*)&addr, (mach_vm_size_t*)&addr_size, &nesting_depth, (vm_region_info_t)&short_info, &count);
2024*4d495c6eSApple OSS Distributions
2025*4d495c6eSApple OSS Distributions /* Then the short metadata also confirms that the region does not contain an MTE-mappable object */
2026*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "Query MTE-disabled region");
2027*4d495c6eSApple OSS Distributions T_ASSERT_FALSE(short_info.flags & VM_REGION_FLAG_MTE_ENABLED, "Expected metadata to reflect no MTE mappable object");
2028*4d495c6eSApple OSS Distributions
2029*4d495c6eSApple OSS Distributions /* Cleanup */
2030*4d495c6eSApple OSS Distributions kr = mach_vm_deallocate(mach_task_self(), untagged_handle_to_tagged_address, alloc_size);
2031*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "deallocate tagged memory");
2032*4d495c6eSApple OSS Distributions kr = mach_vm_deallocate(mach_task_self(), untagged_buffer_addr, alloc_size);
2033*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "deallocate untagged memory");
2034*4d495c6eSApple OSS Distributions }
2035*4d495c6eSApple OSS Distributions
2036*4d495c6eSApple OSS Distributions T_DECL(mach_vm_read_of_remote_proc,
2037*4d495c6eSApple OSS Distributions "Verify that mach_vm_read of a remote MTE-enabled process works",
2038*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
2039*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
2040*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
2041*4d495c6eSApple OSS Distributions /* rdar://151142487: gcore won't work on iOS without unrestricting task_read_for_pid */
2042*4d495c6eSApple OSS Distributions T_META_BOOTARGS_SET("amfi_unrestrict_task_for_pid=1"),
2043*4d495c6eSApple OSS Distributions T_META_ASROOT(true))
2044*4d495c6eSApple OSS Distributions {
2045*4d495c6eSApple OSS Distributions /* Given a process that is launched as MTE-enabled */
2046*4d495c6eSApple OSS Distributions char* sleep_args[] = { "/bin/sleep", "5000", NULL};
2047*4d495c6eSApple OSS Distributions posix_spawnattr_t attr;
2048*4d495c6eSApple OSS Distributions errno_t ret = posix_spawnattr_init(&attr);
2049*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_init");
2050*4d495c6eSApple OSS Distributions ret = posix_spawnattr_set_use_sec_transition_shims_np(&attr, POSIX_SPAWN_SECFLAG_EXPLICIT_ENABLE);
2051*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_set_use_sec_transition_shims_np");
2052*4d495c6eSApple OSS Distributions pid_t child_pid = 0;
2053*4d495c6eSApple OSS Distributions ret = posix_spawn(&child_pid, sleep_args[0], NULL, &attr, sleep_args, NULL);
2054*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_ZERO(ret, "posix_spawn");
2055*4d495c6eSApple OSS Distributions T_ASSERT_NE(child_pid, 0, "posix_spawn");
2056*4d495c6eSApple OSS Distributions ret = posix_spawnattr_destroy(&attr);
2057*4d495c6eSApple OSS Distributions T_ASSERT_POSIX_ZERO(ret, "posix_spawnattr_destroy");
2058*4d495c6eSApple OSS Distributions
2059*4d495c6eSApple OSS Distributions /* And it's MTE-enabled as expected */
2060*4d495c6eSApple OSS Distributions validate_proc_pidinfo_mte_status(child_pid, true);
2061*4d495c6eSApple OSS Distributions
2062*4d495c6eSApple OSS Distributions /* And gcore attempts to mach_vm_read some of its memory */
2063*4d495c6eSApple OSS Distributions char pid_buf[64];
2064*4d495c6eSApple OSS Distributions snprintf(pid_buf, sizeof(pid_buf), "%d", child_pid);
2065*4d495c6eSApple OSS Distributions char* gcore_args[] = { "/usr/bin/gcore", pid_buf, NULL};
2066*4d495c6eSApple OSS Distributions /* Then gcore (and its implicit mach_vm_read()) succeeds */
2067*4d495c6eSApple OSS Distributions posix_spawn_with_flags_and_assert_successful_exit(gcore_args, POSIX_SPAWN_SECFLAG_EXPLICIT_DISABLE, false, false);
2068*4d495c6eSApple OSS Distributions
2069*4d495c6eSApple OSS Distributions kill_child(child_pid);
2070*4d495c6eSApple OSS Distributions }
2071*4d495c6eSApple OSS Distributions
2072*4d495c6eSApple OSS Distributions void
do_local_vm_copyin_with_invalid_tag_test(vm_size_t size)2073*4d495c6eSApple OSS Distributions do_local_vm_copyin_with_invalid_tag_test(vm_size_t size)
2074*4d495c6eSApple OSS Distributions {
2075*4d495c6eSApple OSS Distributions T_SETUPBEGIN;
2076*4d495c6eSApple OSS Distributions
2077*4d495c6eSApple OSS Distributions /* Given an MTE-enabled region */
2078*4d495c6eSApple OSS Distributions vm_address_t mte_region = 0;
2079*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &mte_region, size, VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
2080*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(MTE)");
2081*4d495c6eSApple OSS Distributions memset((void *)mte_region, 0, size);
2082*4d495c6eSApple OSS Distributions
2083*4d495c6eSApple OSS Distributions /* And an MTE-disabled region */
2084*4d495c6eSApple OSS Distributions vm_address_t non_mte_region = 0;
2085*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), &non_mte_region, size, VM_FLAGS_ANYWHERE);
2086*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(non-MTE)");
2087*4d495c6eSApple OSS Distributions
2088*4d495c6eSApple OSS Distributions /* And the MTE region has tag 0x4, but our pointer is incorrectly tagged 0x5 */
2089*4d495c6eSApple OSS Distributions mte_region |= 0x0400000000000000;
2090*4d495c6eSApple OSS Distributions __arm_mte_set_tag((void *)mte_region);
2091*4d495c6eSApple OSS Distributions mte_region |= 0x0500000000000000;
2092*4d495c6eSApple OSS Distributions
2093*4d495c6eSApple OSS Distributions T_SETUPEND;
2094*4d495c6eSApple OSS Distributions
2095*4d495c6eSApple OSS Distributions /* When we use `vm_read_overwrite` */
2096*4d495c6eSApple OSS Distributions /* Then the system terminates us due to our incorrectly tagged request */
2097*4d495c6eSApple OSS Distributions vm_size_t out_size;
2098*4d495c6eSApple OSS Distributions vm_read_overwrite(mach_task_self(), mte_region, size, non_mte_region, &out_size);
2099*4d495c6eSApple OSS Distributions T_FAIL("Expected to be SIGKILLED");
2100*4d495c6eSApple OSS Distributions }
2101*4d495c6eSApple OSS Distributions
2102*4d495c6eSApple OSS Distributions T_DECL(local_vm_copyin_with_invalid_tag,
2103*4d495c6eSApple OSS Distributions "Verify that copyin of local memory with an invalid tag is denied",
2104*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
2105*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
2106*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
2107*4d495c6eSApple OSS Distributions T_META_ASROOT(true))
2108*4d495c6eSApple OSS Distributions {
2109*4d495c6eSApple OSS Distributions /*
2110*4d495c6eSApple OSS Distributions * We go down different code paths depending on the size,
2111*4d495c6eSApple OSS Distributions * so test both and ensure they're handled consistently.
2112*4d495c6eSApple OSS Distributions */
2113*4d495c6eSApple OSS Distributions expect_sigkill(^{
2114*4d495c6eSApple OSS Distributions do_local_vm_copyin_with_invalid_tag_test(PAGE_SIZE);
2115*4d495c6eSApple OSS Distributions }, "local_vm_copyin(PAGE_SIZE)");
2116*4d495c6eSApple OSS Distributions expect_sigkill(^{
2117*4d495c6eSApple OSS Distributions do_local_vm_copyin_with_invalid_tag_test(PAGE_SIZE * 10);
2118*4d495c6eSApple OSS Distributions }, "local_vm_copyin(PAGE_SIZE * 10)");
2119*4d495c6eSApple OSS Distributions }
2120*4d495c6eSApple OSS Distributions
2121*4d495c6eSApple OSS Distributions T_DECL(local_vm_copyin_with_large_non_mte_object_with_adjacent_mte_object,
2122*4d495c6eSApple OSS Distributions "Ensure a large copyin with a non-MTE object and adjacent MTE object fails",
2123*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
2124*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
2125*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
2126*4d495c6eSApple OSS Distributions T_META_ASROOT(true))
2127*4d495c6eSApple OSS Distributions {
2128*4d495c6eSApple OSS Distributions expect_sigkill(^{
2129*4d495c6eSApple OSS Distributions /* Given a non-MTE-enabled object */
2130*4d495c6eSApple OSS Distributions vm_address_t non_mte_object_address = 0;
2131*4d495c6eSApple OSS Distributions vm_size_t non_mte_object_size = PAGE_SIZE;
2132*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &non_mte_object_address, non_mte_object_size, VM_FLAGS_ANYWHERE);
2133*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(non-MTE)");
2134*4d495c6eSApple OSS Distributions /* And ensure it's present */
2135*4d495c6eSApple OSS Distributions memset((void *)non_mte_object_address, 0, non_mte_object_size);
2136*4d495c6eSApple OSS Distributions
2137*4d495c6eSApple OSS Distributions /* And an adjacent MTE object (which is large enough that the total region will definitely be above `msg_ool_size_small`) */
2138*4d495c6eSApple OSS Distributions vm_address_t mte_object_address = non_mte_object_address + non_mte_object_size;
2139*4d495c6eSApple OSS Distributions vm_size_t mte_object_size = PAGE_SIZE * 2;
2140*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), &mte_object_address, mte_object_size, VM_FLAGS_FIXED | VM_FLAGS_MTE);
2141*4d495c6eSApple OSS Distributions if (kr == KERN_NO_SPACE) {
2142*4d495c6eSApple OSS Distributions /*
2143*4d495c6eSApple OSS Distributions * Skip gracefully if we fail to grab the VA space we need.
2144*4d495c6eSApple OSS Distributions * Note that we send ourselves a SIGKILL so the expect_sigkill() wrapper
2145*4d495c6eSApple OSS Distributions * is happy. We can't use T_SKIP or the like because that would elide the
2146*4d495c6eSApple OSS Distributions * SIGKILL.
2147*4d495c6eSApple OSS Distributions */
2148*4d495c6eSApple OSS Distributions T_LOG("Cannot grab required VA space, skipping...");
2149*4d495c6eSApple OSS Distributions kill(getpid(), SIGKILL);
2150*4d495c6eSApple OSS Distributions return;
2151*4d495c6eSApple OSS Distributions }
2152*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(adjacent MTE)");
2153*4d495c6eSApple OSS Distributions /* And ensure it's present */
2154*4d495c6eSApple OSS Distributions memset((void *)mte_object_address, 0, mte_object_size);
2155*4d495c6eSApple OSS Distributions /* And the MTE object has a non-zero tag (so we TCF when crossing it) */
2156*4d495c6eSApple OSS Distributions mte_object_address |= 0x0400000000000000;
2157*4d495c6eSApple OSS Distributions for (mach_vm_size_t offset = 0; offset < mte_object_size; offset += MTE_GRANULE_SIZE) {
2158*4d495c6eSApple OSS Distributions __arm_mte_set_tag(&((uint8_t*)mte_object_address)[offset]);
2159*4d495c6eSApple OSS Distributions }
2160*4d495c6eSApple OSS Distributions
2161*4d495c6eSApple OSS Distributions /* When we try to copyin the entire region, spanning both objects */
2162*4d495c6eSApple OSS Distributions vm_size_t total_region_size = mte_object_size + non_mte_object_size;
2163*4d495c6eSApple OSS Distributions vm_address_t region_to_overwrite = 0;
2164*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), ®ion_to_overwrite, total_region_size, VM_FLAGS_ANYWHERE);
2165*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(scribble region)");
2166*4d495c6eSApple OSS Distributions
2167*4d495c6eSApple OSS Distributions vm_size_t out_size;
2168*4d495c6eSApple OSS Distributions /* Then we take a TCF during the copyin */
2169*4d495c6eSApple OSS Distributions vm_read_overwrite(mach_task_self(), non_mte_object_address, total_region_size, region_to_overwrite, &out_size);
2170*4d495c6eSApple OSS Distributions }, "Trigger a TCF during copyin");
2171*4d495c6eSApple OSS Distributions }
2172*4d495c6eSApple OSS Distributions
2173*4d495c6eSApple OSS Distributions T_DECL(local_vm_copyin_with_large_mte_object_with_invalid_size,
2174*4d495c6eSApple OSS Distributions "Ensure a large copyin with a non-MTE object but an invalid size fails",
2175*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
2176*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
2177*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
2178*4d495c6eSApple OSS Distributions T_META_ASROOT(true))
2179*4d495c6eSApple OSS Distributions {
2180*4d495c6eSApple OSS Distributions /* Given an MTE-enabled object (which is large enough that it exceeds `msg_ool_size_small`) */
2181*4d495c6eSApple OSS Distributions vm_address_t mte_object_address = 0;
2182*4d495c6eSApple OSS Distributions vm_size_t mte_object_size = PAGE_SIZE * 3;
2183*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &mte_object_address, mte_object_size, VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
2184*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(MTE)");
2185*4d495c6eSApple OSS Distributions /* And ensure it's present */
2186*4d495c6eSApple OSS Distributions memset((void *)mte_object_address, 0, mte_object_size);
2187*4d495c6eSApple OSS Distributions
2188*4d495c6eSApple OSS Distributions /* When we try to copyin the region, but specify a size that's too large */
2189*4d495c6eSApple OSS Distributions /* And we ensure this object is not coalesced with the above object */
2190*4d495c6eSApple OSS Distributions vm_size_t invalid_size = mte_object_size + PAGE_SIZE * 16;
2191*4d495c6eSApple OSS Distributions vm_address_t region_to_overwrite = mte_object_address + (PAGE_SIZE * 8);
2192*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), ®ion_to_overwrite, invalid_size, VM_FLAGS_FIXED);
2193*4d495c6eSApple OSS Distributions if (kr == KERN_NO_SPACE) {
2194*4d495c6eSApple OSS Distributions /* Skip gracefully if we fail to grab the VA space we need */
2195*4d495c6eSApple OSS Distributions T_SKIP("Cannot grab required VA space, skipping...");
2196*4d495c6eSApple OSS Distributions return;
2197*4d495c6eSApple OSS Distributions }
2198*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(scribble region)");
2199*4d495c6eSApple OSS Distributions
2200*4d495c6eSApple OSS Distributions vm_size_t out_size;
2201*4d495c6eSApple OSS Distributions kr = vm_read_overwrite(mach_task_self(), mte_object_address, invalid_size, region_to_overwrite, &out_size);
2202*4d495c6eSApple OSS Distributions /* Then it fails */
2203*4d495c6eSApple OSS Distributions T_ASSERT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "copyin fails");
2204*4d495c6eSApple OSS Distributions }
2205*4d495c6eSApple OSS Distributions
2206*4d495c6eSApple OSS Distributions T_DECL(local_vm_copyin_with_large_mte_object_with_hole_in_region,
2207*4d495c6eSApple OSS Distributions "Ensure a large copyin with an MTE object, but with a hole in the middle, is rejected",
2208*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
2209*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
2210*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
2211*4d495c6eSApple OSS Distributions T_META_ASROOT(true))
2212*4d495c6eSApple OSS Distributions {
2213*4d495c6eSApple OSS Distributions /* Given an MTE-enabled object (which is large enough that it exceeds `msg_ool_size_small`) */
2214*4d495c6eSApple OSS Distributions vm_address_t mte_object_address = 0;
2215*4d495c6eSApple OSS Distributions vm_size_t mte_object_size = PAGE_SIZE * 3;
2216*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &mte_object_address, mte_object_size, VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
2217*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(MTE)");
2218*4d495c6eSApple OSS Distributions /* And ensure it's present */
2219*4d495c6eSApple OSS Distributions memset((void *)mte_object_address, 0, mte_object_size);
2220*4d495c6eSApple OSS Distributions
2221*4d495c6eSApple OSS Distributions /* And a nearby non-MTE object, but we leave a hole in the middle */
2222*4d495c6eSApple OSS Distributions vm_size_t padding = PAGE_SIZE;
2223*4d495c6eSApple OSS Distributions vm_address_t non_mte_object_address = mte_object_address + mte_object_size + padding;
2224*4d495c6eSApple OSS Distributions vm_size_t non_mte_object_size = PAGE_SIZE;
2225*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), &non_mte_object_address, non_mte_object_size, VM_FLAGS_FIXED);
2226*4d495c6eSApple OSS Distributions if (kr == KERN_NO_SPACE) {
2227*4d495c6eSApple OSS Distributions /* Skip gracefully if we fail to grab the VA space we need */
2228*4d495c6eSApple OSS Distributions T_SKIP("Cannot grab required VA space, skipping...");
2229*4d495c6eSApple OSS Distributions return;
2230*4d495c6eSApple OSS Distributions }
2231*4d495c6eSApple OSS Distributions
2232*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(nearby non-MTE)");
2233*4d495c6eSApple OSS Distributions /* And ensure it's present */
2234*4d495c6eSApple OSS Distributions memset((void *)non_mte_object_address, 0, non_mte_object_size);
2235*4d495c6eSApple OSS Distributions
2236*4d495c6eSApple OSS Distributions /* When we try to copyin the whole region, including the hole */
2237*4d495c6eSApple OSS Distributions vm_size_t region_size = mte_object_size + padding + non_mte_object_size;
2238*4d495c6eSApple OSS Distributions vm_address_t region_to_overwrite = 0;
2239*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), ®ion_to_overwrite, region_size, VM_FLAGS_ANYWHERE);
2240*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(scribble region)");
2241*4d495c6eSApple OSS Distributions
2242*4d495c6eSApple OSS Distributions vm_size_t out_size;
2243*4d495c6eSApple OSS Distributions kr = vm_read_overwrite(mach_task_self(), mte_object_address, region_size, region_to_overwrite, &out_size);
2244*4d495c6eSApple OSS Distributions /* Then it fails */
2245*4d495c6eSApple OSS Distributions T_ASSERT_MACH_ERROR(kr, KERN_INVALID_ADDRESS, "copyin fails");
2246*4d495c6eSApple OSS Distributions }
2247*4d495c6eSApple OSS Distributions
2248*4d495c6eSApple OSS Distributions T_DECL(local_vm_copyin_with_large_mte_object_with_adjacent_large_mte_object_same_tags,
2249*4d495c6eSApple OSS Distributions "Ensure a large copyin with two MTE objects with the same tag succeeds",
2250*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
2251*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
2252*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
2253*4d495c6eSApple OSS Distributions T_META_ASROOT(true))
2254*4d495c6eSApple OSS Distributions {
2255*4d495c6eSApple OSS Distributions /* Given an MTE-enabled object */
2256*4d495c6eSApple OSS Distributions vm_address_t mte_object1_address = 0;
2257*4d495c6eSApple OSS Distributions vm_size_t mte_object1_size = PAGE_SIZE;
2258*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &mte_object1_address, mte_object1_size, VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
2259*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(MTE)");
2260*4d495c6eSApple OSS Distributions /* And ensure it's present */
2261*4d495c6eSApple OSS Distributions memset((void *)mte_object1_address, 0, mte_object1_size);
2262*4d495c6eSApple OSS Distributions
2263*4d495c6eSApple OSS Distributions /* And an adjacent MTE object (which is large enough that the total region will definitely be above `msg_ool_size_small`) */
2264*4d495c6eSApple OSS Distributions vm_address_t mte_object2_address = mte_object1_address + mte_object1_size;
2265*4d495c6eSApple OSS Distributions vm_size_t mte_object2_size = PAGE_SIZE * 2;
2266*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), &mte_object2_address, mte_object2_size, VM_FLAGS_FIXED | VM_FLAGS_MTE);
2267*4d495c6eSApple OSS Distributions if (kr == KERN_NO_SPACE) {
2268*4d495c6eSApple OSS Distributions /* Skip gracefully if we fail to grab the VA space we need */
2269*4d495c6eSApple OSS Distributions T_SKIP("Cannot grab required VA space, skipping...");
2270*4d495c6eSApple OSS Distributions return;
2271*4d495c6eSApple OSS Distributions }
2272*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(MTE)");
2273*4d495c6eSApple OSS Distributions /* And ensure it's present */
2274*4d495c6eSApple OSS Distributions memset((void *)mte_object2_address, 0, mte_object2_size);
2275*4d495c6eSApple OSS Distributions
2276*4d495c6eSApple OSS Distributions /* And both objects share the same tag */
2277*4d495c6eSApple OSS Distributions vm_size_t total_region_size = mte_object1_size + mte_object2_size;
2278*4d495c6eSApple OSS Distributions mte_object1_address |= 0x0400000000000000;
2279*4d495c6eSApple OSS Distributions for (mach_vm_size_t offset = 0; offset < total_region_size; offset += MTE_GRANULE_SIZE) {
2280*4d495c6eSApple OSS Distributions __arm_mte_set_tag(&((uint8_t*)mte_object1_address)[offset]);
2281*4d495c6eSApple OSS Distributions }
2282*4d495c6eSApple OSS Distributions
2283*4d495c6eSApple OSS Distributions /* When we try to copyin the entire region, spanning both objects */
2284*4d495c6eSApple OSS Distributions vm_address_t region_to_overwrite = 0;
2285*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), ®ion_to_overwrite, total_region_size, VM_FLAGS_ANYWHERE);
2286*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(scribble region)");
2287*4d495c6eSApple OSS Distributions
2288*4d495c6eSApple OSS Distributions vm_size_t out_size;
2289*4d495c6eSApple OSS Distributions kr = vm_read_overwrite(mach_task_self(), mte_object1_address, total_region_size, region_to_overwrite, &out_size);
2290*4d495c6eSApple OSS Distributions /* Then it succeeds */
2291*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "copyin");
2292*4d495c6eSApple OSS Distributions }
2293*4d495c6eSApple OSS Distributions
2294*4d495c6eSApple OSS Distributions T_DECL(local_vm_copyin_with_large_mte_object_with_adjacent_large_mte_object_different_tags,
2295*4d495c6eSApple OSS Distributions "Ensure a large copyin with two MTE objects with a different tag in the second object fails",
2296*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
2297*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
2298*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
2299*4d495c6eSApple OSS Distributions T_META_ASROOT(true))
2300*4d495c6eSApple OSS Distributions {
2301*4d495c6eSApple OSS Distributions expect_sigkill(^{
2302*4d495c6eSApple OSS Distributions /* Given an MTE-enabled object */
2303*4d495c6eSApple OSS Distributions vm_address_t mte_object1_address = 0;
2304*4d495c6eSApple OSS Distributions vm_size_t mte_object1_size = PAGE_SIZE;
2305*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &mte_object1_address, mte_object1_size, VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
2306*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(MTE)");
2307*4d495c6eSApple OSS Distributions /* And ensure it's present */
2308*4d495c6eSApple OSS Distributions memset((void *)mte_object1_address, 0, mte_object1_size);
2309*4d495c6eSApple OSS Distributions
2310*4d495c6eSApple OSS Distributions /* And an adjacent MTE object (which is large enough that the total region will definitely be above `msg_ool_size_small`) */
2311*4d495c6eSApple OSS Distributions vm_address_t mte_object2_address = mte_object1_address + mte_object1_size;
2312*4d495c6eSApple OSS Distributions vm_size_t mte_object2_size = PAGE_SIZE * 2;
2313*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), &mte_object2_address, mte_object2_size, VM_FLAGS_FIXED | VM_FLAGS_MTE);
2314*4d495c6eSApple OSS Distributions if (kr == KERN_NO_SPACE) {
2315*4d495c6eSApple OSS Distributions /*
2316*4d495c6eSApple OSS Distributions * Skip gracefully if we fail to grab the VA space we need.
2317*4d495c6eSApple OSS Distributions * Note that we send ourselves a SIGKILL so the expect_sigkill() wrapper
2318*4d495c6eSApple OSS Distributions * is happy. We can't use T_SKIP or the like because that would elide the
2319*4d495c6eSApple OSS Distributions * SIGKILL.
2320*4d495c6eSApple OSS Distributions */
2321*4d495c6eSApple OSS Distributions T_LOG("Cannot grab required VA space, skipping...");
2322*4d495c6eSApple OSS Distributions kill(getpid(), SIGKILL);
2323*4d495c6eSApple OSS Distributions return;
2324*4d495c6eSApple OSS Distributions }
2325*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(adjacent MTE)");
2326*4d495c6eSApple OSS Distributions /* And ensure it's present */
2327*4d495c6eSApple OSS Distributions memset((void *)mte_object2_address, 0, mte_object2_size);
2328*4d495c6eSApple OSS Distributions
2329*4d495c6eSApple OSS Distributions /* And the objects have different tags */
2330*4d495c6eSApple OSS Distributions mte_object1_address |= 0x0400000000000000;
2331*4d495c6eSApple OSS Distributions for (mach_vm_size_t offset = 0; offset < mte_object1_size; offset += MTE_GRANULE_SIZE) {
2332*4d495c6eSApple OSS Distributions __arm_mte_set_tag(&((uint8_t*)mte_object1_address)[offset]);
2333*4d495c6eSApple OSS Distributions }
2334*4d495c6eSApple OSS Distributions mte_object2_address |= 0x0500000000000000;
2335*4d495c6eSApple OSS Distributions for (mach_vm_size_t offset = 0; offset < mte_object2_size; offset += MTE_GRANULE_SIZE) {
2336*4d495c6eSApple OSS Distributions __arm_mte_set_tag(&((uint8_t*)mte_object2_address)[offset]);
2337*4d495c6eSApple OSS Distributions }
2338*4d495c6eSApple OSS Distributions
2339*4d495c6eSApple OSS Distributions /* When we try to copyin the entire region, spanning both objects */
2340*4d495c6eSApple OSS Distributions vm_address_t region_to_overwrite = 0;
2341*4d495c6eSApple OSS Distributions vm_size_t total_region_size = mte_object1_size + mte_object2_size;
2342*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), ®ion_to_overwrite, total_region_size, VM_FLAGS_ANYWHERE);
2343*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(scribble region)");
2344*4d495c6eSApple OSS Distributions
2345*4d495c6eSApple OSS Distributions /* And we use a pointer that only has a valid tag for the first object */
2346*4d495c6eSApple OSS Distributions /* Then we get a SIGKILL (because we take a TCF) */
2347*4d495c6eSApple OSS Distributions vm_size_t out_size;
2348*4d495c6eSApple OSS Distributions vm_read_overwrite(mach_task_self(), mte_object1_address, total_region_size, region_to_overwrite, &out_size);
2349*4d495c6eSApple OSS Distributions }, "Trigger a TCF during copyin");
2350*4d495c6eSApple OSS Distributions }
2351*4d495c6eSApple OSS Distributions
2352*4d495c6eSApple OSS Distributions T_DECL(local_vm_copyin_with_large_mte_object_with_adjacent_non_mte_object,
2353*4d495c6eSApple OSS Distributions "Ensure a large copyin with an MTE object and adjacent non-MTE object fails",
2354*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
2355*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
2356*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
2357*4d495c6eSApple OSS Distributions T_META_ASROOT(true))
2358*4d495c6eSApple OSS Distributions {
2359*4d495c6eSApple OSS Distributions expect_sigkill(^{
2360*4d495c6eSApple OSS Distributions /* Given an MTE-enabled object */
2361*4d495c6eSApple OSS Distributions vm_address_t mte_object_address = 0;
2362*4d495c6eSApple OSS Distributions vm_size_t mte_object_size = PAGE_SIZE;
2363*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &mte_object_address, mte_object_size, VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
2364*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(MTE)");
2365*4d495c6eSApple OSS Distributions /* And ensure it's present */
2366*4d495c6eSApple OSS Distributions memset((void *)mte_object_address, 0, mte_object_size);
2367*4d495c6eSApple OSS Distributions /* And the MTE object has a non-zero tag (so we CTCF when crossing to an untagged region) */
2368*4d495c6eSApple OSS Distributions vm_address_t tagged_mte_object_address = mte_object_address | 0x0400000000000000;
2369*4d495c6eSApple OSS Distributions for (mach_vm_size_t offset = 0; offset < mte_object_size; offset += MTE_GRANULE_SIZE) {
2370*4d495c6eSApple OSS Distributions __arm_mte_set_tag(&((uint8_t*)tagged_mte_object_address)[offset]);
2371*4d495c6eSApple OSS Distributions }
2372*4d495c6eSApple OSS Distributions
2373*4d495c6eSApple OSS Distributions /* And an adjacent non-MTE object (which is large enough that the total region will definitely be above `msg_ool_size_small`) */
2374*4d495c6eSApple OSS Distributions vm_address_t non_mte_object_address = mte_object_address + mte_object_size;
2375*4d495c6eSApple OSS Distributions vm_size_t non_mte_object_size = PAGE_SIZE * 2;
2376*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), &non_mte_object_address, non_mte_object_size, VM_FLAGS_FIXED);
2377*4d495c6eSApple OSS Distributions if (kr == KERN_NO_SPACE) {
2378*4d495c6eSApple OSS Distributions /*
2379*4d495c6eSApple OSS Distributions * Skip gracefully if we fail to grab the VA space we need.
2380*4d495c6eSApple OSS Distributions * Note that we send ourselves a SIGKILL so the expect_sigkill() wrapper
2381*4d495c6eSApple OSS Distributions * is happy. We can't use T_SKIP or the like because that would elide the
2382*4d495c6eSApple OSS Distributions * SIGKILL.
2383*4d495c6eSApple OSS Distributions */
2384*4d495c6eSApple OSS Distributions T_LOG("Cannot grab required VA space, skipping...");
2385*4d495c6eSApple OSS Distributions kill(getpid(), SIGKILL);
2386*4d495c6eSApple OSS Distributions return;
2387*4d495c6eSApple OSS Distributions }
2388*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(adjacent non-MTE)");
2389*4d495c6eSApple OSS Distributions /* And ensure it's present */
2390*4d495c6eSApple OSS Distributions memset((void *)non_mte_object_address, 0, non_mte_object_size);
2391*4d495c6eSApple OSS Distributions
2392*4d495c6eSApple OSS Distributions /* When we try to copyin the entire region, spanning both objects */
2393*4d495c6eSApple OSS Distributions vm_size_t total_region_size = mte_object_size + non_mte_object_size;
2394*4d495c6eSApple OSS Distributions vm_address_t region_to_overwrite = 0;
2395*4d495c6eSApple OSS Distributions kr = vm_allocate(mach_task_self(), ®ion_to_overwrite, total_region_size, VM_FLAGS_ANYWHERE);
2396*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(scribble region)");
2397*4d495c6eSApple OSS Distributions
2398*4d495c6eSApple OSS Distributions vm_size_t out_size;
2399*4d495c6eSApple OSS Distributions vm_read_overwrite(mach_task_self(), mte_object_address, total_region_size, region_to_overwrite, &out_size);
2400*4d495c6eSApple OSS Distributions /* Then we're killed due to a CTCF */
2401*4d495c6eSApple OSS Distributions }, "Trigger a CTCF during copyin");
2402*4d495c6eSApple OSS Distributions }
2403*4d495c6eSApple OSS Distributions
2404*4d495c6eSApple OSS Distributions T_DECL(make_memory_entry_handles_kernel_buffers,
2405*4d495c6eSApple OSS Distributions "Ensure mach_make_memory_entry does not panic when handed an MTE copy",
2406*4d495c6eSApple OSS Distributions T_META_ENABLED(TARGET_CPU_ARM64),
2407*4d495c6eSApple OSS Distributions T_META_REQUIRES_SYSCTL_EQ("hw.optional.arm.FEAT_MTE2", 1),
2408*4d495c6eSApple OSS Distributions XNU_T_META_SOC_SPECIFIC,
2409*4d495c6eSApple OSS Distributions T_META_ASROOT(true))
2410*4d495c6eSApple OSS Distributions {
2411*4d495c6eSApple OSS Distributions /* Given an MTE-enabled object */
2412*4d495c6eSApple OSS Distributions vm_address_t mte_object_address = 0;
2413*4d495c6eSApple OSS Distributions vm_size_t mte_object_size = PAGE_SIZE;
2414*4d495c6eSApple OSS Distributions kern_return_t kr = vm_allocate(mach_task_self(), &mte_object_address, mte_object_size, VM_FLAGS_ANYWHERE | VM_FLAGS_MTE);
2415*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "vm_allocate(MTE)");
2416*4d495c6eSApple OSS Distributions /* And ensure it's present */
2417*4d495c6eSApple OSS Distributions memset((void *)mte_object_address, 0, mte_object_size);
2418*4d495c6eSApple OSS Distributions /* And assign a non-zero tag just for authenticity */
2419*4d495c6eSApple OSS Distributions vm_address_t tagged_mte_object_address = mte_object_address | 0x0400000000000000;
2420*4d495c6eSApple OSS Distributions for (mach_vm_size_t offset = 0; offset < mte_object_size; offset += MTE_GRANULE_SIZE) {
2421*4d495c6eSApple OSS Distributions __arm_mte_set_tag(&((uint8_t*)tagged_mte_object_address)[offset]);
2422*4d495c6eSApple OSS Distributions }
2423*4d495c6eSApple OSS Distributions
2424*4d495c6eSApple OSS Distributions /* When I use mach_make_memory_entry_64(MAP_MEM_VM_COPY) */
2425*4d495c6eSApple OSS Distributions mach_vm_size_t size = mte_object_size;
2426*4d495c6eSApple OSS Distributions mach_port_t memory_entry_port;
2427*4d495c6eSApple OSS Distributions kr = mach_make_memory_entry_64(mach_task_self(),
2428*4d495c6eSApple OSS Distributions &size,
2429*4d495c6eSApple OSS Distributions tagged_mte_object_address,
2430*4d495c6eSApple OSS Distributions VM_PROT_DEFAULT | MAP_MEM_VM_COPY | MAP_MEM_USE_DATA_ADDR,
2431*4d495c6eSApple OSS Distributions &memory_entry_port, MEMORY_OBJECT_NULL);
2432*4d495c6eSApple OSS Distributions /* Then the system does not panic... */
2433*4d495c6eSApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr, "mach_make_memory_entry_64(MTE object)");
2434*4d495c6eSApple OSS Distributions }
2435