xref: /xnu-11215.61.5/tests/vm/vm_unaligned_copy_protection_race.c (revision 4f1223e81cd707a65cc109d0b8ad6653699da3c4)
1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 
4 #include <mach/mach_init.h>
5 #include <mach/vm_map.h>
6 
7 T_GLOBAL_META(
8 	T_META_NAMESPACE("xnu.vm"),
9 	T_META_RADAR_COMPONENT_NAME("xnu"),
10 	T_META_RADAR_COMPONENT_VERSION("VM"));
11 
12 struct context1 {
13 	vm_size_t obj_size;
14 	vm_address_t e0;
15 	dispatch_semaphore_t running_sem;
16 	pthread_mutex_t mtx;
17 	bool done;
18 };
19 
20 static void *
protect_thread(__unused void * arg)21 protect_thread(__unused void *arg)
22 {
23 	kern_return_t kr;
24 	struct context1 *ctx;
25 
26 	ctx = (struct context1 *)arg;
27 	/* tell main thread we're ready to run */
28 	dispatch_semaphore_signal(ctx->running_sem);
29 	while (!ctx->done) {
30 		/* wait for main thread to be done setting things up */
31 		pthread_mutex_lock(&ctx->mtx);
32 		if (ctx->done) {
33 			break;
34 		}
35 		/* make 2nd target mapping (e0) read-only */
36 		kr = vm_protect(mach_task_self(),
37 		    ctx->e0,
38 		    ctx->obj_size,
39 		    FALSE,             /* set_maximum */
40 		    VM_PROT_READ);
41 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, " vm_protect() RO");
42 		/* wait a little bit */
43 		usleep(100);
44 		/* make it read-write again */
45 		kr = vm_protect(mach_task_self(),
46 		    ctx->e0,
47 		    ctx->obj_size,
48 		    FALSE,             /* set_maximum */
49 		    VM_PROT_READ | VM_PROT_WRITE);
50 		T_QUIET; T_EXPECT_MACH_SUCCESS(kr, " vm_protect() RW");
51 		/* tell main thread we're done changing protections */
52 		pthread_mutex_unlock(&ctx->mtx);
53 		usleep(100);
54 	}
55 	return NULL;
56 }
57 
58 T_DECL(unaligned_write_to_cow_bypass,
59     "Test that unaligned copy respects COW", T_META_TAG_VM_PREFERRED)
60 {
61 	pthread_t th = NULL;
62 	int ret;
63 	kern_return_t kr;
64 	time_t start, duration;
65 	mach_msg_type_number_t cow_read_size;
66 	vm_size_t copied_size;
67 	int loops;
68 	vm_address_t e1, e2, e5;
69 	struct context1 context1, *ctx;
70 	int kern_success = 0, kern_protection_failure = 0, kern_other = 0;
71 
72 	ctx = &context1;
73 	ctx->obj_size = 256 * 1024;
74 	ctx->e0 = 0;
75 	ctx->running_sem = dispatch_semaphore_create(0);
76 	T_QUIET; T_ASSERT_NE(ctx->running_sem, NULL, "dispatch_semaphore_create");
77 	ret = pthread_mutex_init(&ctx->mtx, NULL);
78 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "pthread_mutex_init");
79 	ctx->done = false;
80 
81 	pthread_mutex_lock(&ctx->mtx);
82 
83 	/* start racing thread */
84 	ret = pthread_create(&th, NULL, protect_thread, (void *)ctx);
85 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "pthread_create");
86 
87 	/* wait for racing thread to be ready to run */
88 	dispatch_semaphore_wait(ctx->running_sem, DISPATCH_TIME_FOREVER);
89 
90 	duration = 10; /* 10 seconds */
91 	T_LOG("Testing for %ld seconds...", duration);
92 	for (start = time(NULL), loops = 0;
93 	    time(NULL) < start + duration;
94 	    loops++) {
95 		/* reserve space for our 2 contiguous allocations */
96 		e2 = 0;
97 		kr = vm_allocate(mach_task_self(),
98 		    &e2,
99 		    2 * ctx->obj_size,
100 		    VM_FLAGS_ANYWHERE);
101 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate to reserve e2+e0");
102 
103 		/* make 1st allocation in our reserved space */
104 		kr = vm_allocate(mach_task_self(),
105 		    &e2,
106 		    ctx->obj_size,
107 		    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MAKE_TAG(240));
108 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate e2");
109 		/* initialize to 'B' */
110 		memset((char *)e2, 'B', ctx->obj_size);
111 
112 		/* make 2nd allocation in our reserved space */
113 		ctx->e0 = e2 + ctx->obj_size;
114 		kr = vm_allocate(mach_task_self(),
115 		    &ctx->e0,
116 		    ctx->obj_size,
117 		    VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE | VM_MAKE_TAG(241));
118 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate e0");
119 		memset((char *)ctx->e0, 'A', ctx->obj_size);
120 		/* initialize to 'A' */
121 
122 		/* make a COW copy of e0 */
123 		e1 = 0;
124 		kr = vm_read(mach_task_self(),
125 		    ctx->e0,
126 		    ctx->obj_size,
127 		    &e1,
128 		    &cow_read_size);
129 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_read e0->e1");
130 
131 		/* allocate a source buffer */
132 		kr = vm_allocate(mach_task_self(),
133 		    &e5,
134 		    ctx->obj_size,
135 		    VM_FLAGS_ANYWHERE);
136 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "vm_allocate e5");
137 		/* initialize to 'C' */
138 		memset((char *)e5, 'C', ctx->obj_size);
139 
140 		/* let the racing thread go */
141 		pthread_mutex_unlock(&ctx->mtx);
142 
143 		/* trigger copy_unaligned while racing with other thread */
144 		kr = vm_read_overwrite(mach_task_self(),
145 		    e5,
146 		    ctx->obj_size,
147 		    e2 + 1,
148 		    &copied_size);
149 		T_QUIET; T_ASSERT_TRUE(kr == KERN_SUCCESS || kr == KERN_PROTECTION_FAILURE,
150 		    "vm_read_overwrite kr %d", kr);
151 		switch (kr) {
152 		case KERN_SUCCESS:
153 			/* the target as RW */
154 			kern_success++;
155 			break;
156 		case KERN_PROTECTION_FAILURE:
157 			/* the target was RO */
158 			kern_protection_failure++;
159 			break;
160 		default:
161 			/* should not happen */
162 			kern_other++;
163 			break;
164 		}
165 
166 		/* check that the COW copy of e0 (at e1) was not modified */
167 		T_QUIET; T_ASSERT_EQ(*(char *)e1, 'A', "COW mapping was modified");
168 
169 		/* tell racing thread to stop toggling protections */
170 		pthread_mutex_lock(&ctx->mtx);
171 
172 		/* clean up before next loop */
173 		vm_deallocate(mach_task_self(), ctx->e0, ctx->obj_size);
174 		ctx->e0 = 0;
175 		vm_deallocate(mach_task_self(), e1, ctx->obj_size);
176 		e1 = 0;
177 		vm_deallocate(mach_task_self(), e2, ctx->obj_size);
178 		e2 = 0;
179 		vm_deallocate(mach_task_self(), e5, ctx->obj_size);
180 		e5 = 0;
181 	}
182 
183 	ctx->done = true;
184 	pthread_mutex_unlock(&ctx->mtx);
185 	pthread_join(th, NULL);
186 
187 	T_LOG("vm_read_overwrite: KERN_SUCCESS:%d KERN_PROTECTION_FAILURE:%d other:%d",
188 	    kern_success, kern_protection_failure, kern_other);
189 	T_PASS("Ran %d times in %ld seconds with no failure", loops, duration);
190 }
191