1*e3723e1fSApple OSS Distributions #include <darwintest.h>
2*e3723e1fSApple OSS Distributions #include <darwintest_perf.h>
3*e3723e1fSApple OSS Distributions
4*e3723e1fSApple OSS Distributions T_GLOBAL_META(T_META_NAMESPACE("xnu.vm"));
5*e3723e1fSApple OSS Distributions
6*e3723e1fSApple OSS Distributions #include <machine/cpu_capabilities.h>
7*e3723e1fSApple OSS Distributions #include <sys/mman.h>
8*e3723e1fSApple OSS Distributions #include <errno.h>
9*e3723e1fSApple OSS Distributions #include <fcntl.h>
10*e3723e1fSApple OSS Distributions #include <stdint.h>
11*e3723e1fSApple OSS Distributions #include <libkern/OSCacheControl.h>
12*e3723e1fSApple OSS Distributions #include <unistd.h>
13*e3723e1fSApple OSS Distributions #include <signal.h>
14*e3723e1fSApple OSS Distributions #include <stdlib.h>
15*e3723e1fSApple OSS Distributions #include <sys/sysctl.h>
16*e3723e1fSApple OSS Distributions
17*e3723e1fSApple OSS Distributions #include <mach/vm_param.h>
18*e3723e1fSApple OSS Distributions #include <pthread.h>
19*e3723e1fSApple OSS Distributions
20*e3723e1fSApple OSS Distributions #include <os/thread_self_restrict.h>
21*e3723e1fSApple OSS Distributions
22*e3723e1fSApple OSS Distributions #include <mach/mach.h>
23*e3723e1fSApple OSS Distributions #include <mach/mach_error.h>
24*e3723e1fSApple OSS Distributions #include <mach/mach_init.h>
25*e3723e1fSApple OSS Distributions #include <mach/mach_port.h>
26*e3723e1fSApple OSS Distributions #include <mach/mach_vm.h>
27*e3723e1fSApple OSS Distributions #include <mach/vm_map.h>
28*e3723e1fSApple OSS Distributions #include <mach/task.h>
29*e3723e1fSApple OSS Distributions
30*e3723e1fSApple OSS Distributions T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
31*e3723e1fSApple OSS Distributions
32*e3723e1fSApple OSS Distributions #if defined(__arm64__)
33*e3723e1fSApple OSS Distributions /* PAGE_SIZE on ARM64 is an expression derived from a non-const global variable */
34*e3723e1fSApple OSS Distributions #define PAD_SIZE PAGE_MAX_SIZE
35*e3723e1fSApple OSS Distributions #else
36*e3723e1fSApple OSS Distributions #define PAD_SIZE PAGE_MIN_SIZE
37*e3723e1fSApple OSS Distributions #endif
38*e3723e1fSApple OSS Distributions
39*e3723e1fSApple OSS Distributions /* Enumerations */
40*e3723e1fSApple OSS Distributions typedef enum _access_type {
41*e3723e1fSApple OSS Distributions ACCESS_READ,
42*e3723e1fSApple OSS Distributions ACCESS_WRITE,
43*e3723e1fSApple OSS Distributions } access_type_t;
44*e3723e1fSApple OSS Distributions
45*e3723e1fSApple OSS Distributions typedef enum _fault_strategy {
46*e3723e1fSApple OSS Distributions FAULT_STRAT_NONE,
47*e3723e1fSApple OSS Distributions FAULT_STRAT_RW_TPRO,
48*e3723e1fSApple OSS Distributions } fault_strategy_t;
49*e3723e1fSApple OSS Distributions
50*e3723e1fSApple OSS Distributions /* Structures */
51*e3723e1fSApple OSS Distributions typedef struct {
52*e3723e1fSApple OSS Distributions uint64_t fault_count;
53*e3723e1fSApple OSS Distributions fault_strategy_t fault_strategy;
54*e3723e1fSApple OSS Distributions bool fault_expected;
55*e3723e1fSApple OSS Distributions } fault_state_t;
56*e3723e1fSApple OSS Distributions
57*e3723e1fSApple OSS Distributions /* Globals */
58*e3723e1fSApple OSS Distributions static bool key_created = false;
59*e3723e1fSApple OSS Distributions static pthread_key_t fault_state_key;
60*e3723e1fSApple OSS Distributions
61*e3723e1fSApple OSS Distributions /*
62*e3723e1fSApple OSS Distributions * The pager will only map entries with TPRO if we need to perform fixups.
63*e3723e1fSApple OSS Distributions * Otherwise it really is const. Ensure we forge a struct that will require
64*e3723e1fSApple OSS Distributions * dynamic rebasing.
65*e3723e1fSApple OSS Distributions */
66*e3723e1fSApple OSS Distributions typedef struct {
67*e3723e1fSApple OSS Distributions void *reloc;
68*e3723e1fSApple OSS Distributions uint32_t magic;
69*e3723e1fSApple OSS Distributions char bytes[PAD_SIZE - 12];
70*e3723e1fSApple OSS Distributions } const_page_t;
71*e3723e1fSApple OSS Distributions
72*e3723e1fSApple OSS Distributions typedef struct {
73*e3723e1fSApple OSS Distributions const_page_t one;
74*e3723e1fSApple OSS Distributions const_page_t two;
75*e3723e1fSApple OSS Distributions char ro[PAD_SIZE];
76*e3723e1fSApple OSS Distributions } const_state_t;
77*e3723e1fSApple OSS Distributions
78*e3723e1fSApple OSS Distributions #define MAGIC(state) (void *)&state->magic
79*e3723e1fSApple OSS Distributions
80*e3723e1fSApple OSS Distributions /*
81*e3723e1fSApple OSS Distributions * Force known data into our __DATA_CONST segment. The pager will be responsible
82*e3723e1fSApple OSS Distributions * for handling the mapping of this.
83*e3723e1fSApple OSS Distributions */
84*e3723e1fSApple OSS Distributions __attribute__((section("__DATA_CONST,__pager")))
85*e3723e1fSApple OSS Distributions __attribute__((aligned(PAD_SIZE)))
86*e3723e1fSApple OSS Distributions static const_state_t pager_state = {
87*e3723e1fSApple OSS Distributions .one.reloc = &pager_state,
88*e3723e1fSApple OSS Distributions .two.reloc = &pager_state,
89*e3723e1fSApple OSS Distributions .one.magic = 0x41414141,
90*e3723e1fSApple OSS Distributions .two.magic = 0x41414141,
91*e3723e1fSApple OSS Distributions .ro = "CCCC"
92*e3723e1fSApple OSS Distributions };
93*e3723e1fSApple OSS Distributions
94*e3723e1fSApple OSS Distributions /* Allocate a fault_state_t, and associate it with the current thread. */
95*e3723e1fSApple OSS Distributions static fault_state_t *
fault_state_create(void)96*e3723e1fSApple OSS Distributions fault_state_create(void)
97*e3723e1fSApple OSS Distributions {
98*e3723e1fSApple OSS Distributions fault_state_t * fault_state = malloc(sizeof(fault_state_t));
99*e3723e1fSApple OSS Distributions
100*e3723e1fSApple OSS Distributions if (fault_state) {
101*e3723e1fSApple OSS Distributions fault_state->fault_count = 0;
102*e3723e1fSApple OSS Distributions fault_state->fault_strategy = FAULT_STRAT_NONE;
103*e3723e1fSApple OSS Distributions fault_state->fault_expected = false;
104*e3723e1fSApple OSS Distributions
105*e3723e1fSApple OSS Distributions if (pthread_setspecific(fault_state_key, fault_state)) {
106*e3723e1fSApple OSS Distributions free(fault_state);
107*e3723e1fSApple OSS Distributions fault_state = NULL;
108*e3723e1fSApple OSS Distributions }
109*e3723e1fSApple OSS Distributions }
110*e3723e1fSApple OSS Distributions
111*e3723e1fSApple OSS Distributions return fault_state;
112*e3723e1fSApple OSS Distributions }
113*e3723e1fSApple OSS Distributions
114*e3723e1fSApple OSS Distributions /* Disassociate the given fault state from the current thread, and destroy it. */
115*e3723e1fSApple OSS Distributions static void
fault_state_destroy(void * fault_state)116*e3723e1fSApple OSS Distributions fault_state_destroy(void * fault_state)
117*e3723e1fSApple OSS Distributions {
118*e3723e1fSApple OSS Distributions if (fault_state == NULL) {
119*e3723e1fSApple OSS Distributions T_ASSERT_FAIL("Attempted to fault_state_destroy NULL");
120*e3723e1fSApple OSS Distributions }
121*e3723e1fSApple OSS Distributions
122*e3723e1fSApple OSS Distributions free(fault_state);
123*e3723e1fSApple OSS Distributions }
124*e3723e1fSApple OSS Distributions
125*e3723e1fSApple OSS Distributions /*
126*e3723e1fSApple OSS Distributions * A signal handler that attempts to resolve anticipated faults through use of
127*e3723e1fSApple OSS Distributions * the os_thread_self_restrict_rwx functions.
128*e3723e1fSApple OSS Distributions */
129*e3723e1fSApple OSS Distributions static void
access_failed_handler(int signum)130*e3723e1fSApple OSS Distributions access_failed_handler(int signum)
131*e3723e1fSApple OSS Distributions {
132*e3723e1fSApple OSS Distributions fault_state_t * fault_state;
133*e3723e1fSApple OSS Distributions
134*e3723e1fSApple OSS Distributions /* This handler should ONLY handle SIGBUS. */
135*e3723e1fSApple OSS Distributions if (signum != SIGBUS) {
136*e3723e1fSApple OSS Distributions T_ASSERT_FAIL("Unexpected signal sent to handler");
137*e3723e1fSApple OSS Distributions }
138*e3723e1fSApple OSS Distributions
139*e3723e1fSApple OSS Distributions if (!(fault_state = pthread_getspecific(fault_state_key))) {
140*e3723e1fSApple OSS Distributions T_ASSERT_FAIL("Failed to retrieve fault state");
141*e3723e1fSApple OSS Distributions }
142*e3723e1fSApple OSS Distributions
143*e3723e1fSApple OSS Distributions if (!(fault_state->fault_expected)) {
144*e3723e1fSApple OSS Distributions T_ASSERT_FAIL("Unexpected fault taken");
145*e3723e1fSApple OSS Distributions }
146*e3723e1fSApple OSS Distributions
147*e3723e1fSApple OSS Distributions /* We should not see a second fault. */
148*e3723e1fSApple OSS Distributions fault_state->fault_expected = false;
149*e3723e1fSApple OSS Distributions
150*e3723e1fSApple OSS Distributions switch (fault_state->fault_strategy) {
151*e3723e1fSApple OSS Distributions case FAULT_STRAT_NONE:
152*e3723e1fSApple OSS Distributions T_ASSERT_FAIL("No fault strategy");
153*e3723e1fSApple OSS Distributions
154*e3723e1fSApple OSS Distributions /* Just in case we try to do something different. */
155*e3723e1fSApple OSS Distributions break;
156*e3723e1fSApple OSS Distributions case FAULT_STRAT_RW_TPRO:
157*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_rw();
158*e3723e1fSApple OSS Distributions break;
159*e3723e1fSApple OSS Distributions }
160*e3723e1fSApple OSS Distributions
161*e3723e1fSApple OSS Distributions fault_state->fault_count++;
162*e3723e1fSApple OSS Distributions }
163*e3723e1fSApple OSS Distributions
164*e3723e1fSApple OSS Distributions /*
165*e3723e1fSApple OSS Distributions * Attempt the specified access; if the access faults, this will return true;
166*e3723e1fSApple OSS Distributions * otherwise, it will return false.
167*e3723e1fSApple OSS Distributions */
168*e3723e1fSApple OSS Distributions static bool
does_access_fault(access_type_t access_type,void * addr,uint32_t value)169*e3723e1fSApple OSS Distributions does_access_fault(access_type_t access_type, void * addr, uint32_t value)
170*e3723e1fSApple OSS Distributions {
171*e3723e1fSApple OSS Distributions uint64_t old_fault_count;
172*e3723e1fSApple OSS Distributions uint64_t new_fault_count;
173*e3723e1fSApple OSS Distributions
174*e3723e1fSApple OSS Distributions fault_state_t * fault_state;
175*e3723e1fSApple OSS Distributions
176*e3723e1fSApple OSS Distributions struct sigaction old_action; /* Save area for any existing action. */
177*e3723e1fSApple OSS Distributions struct sigaction new_action; /* The action we wish to install for SIGBUS. */
178*e3723e1fSApple OSS Distributions
179*e3723e1fSApple OSS Distributions bool retval = false;
180*e3723e1fSApple OSS Distributions
181*e3723e1fSApple OSS Distributions new_action.sa_handler = access_failed_handler; /* A handler for write failures. */
182*e3723e1fSApple OSS Distributions new_action.sa_mask = 0; /* Don't modify the mask. */
183*e3723e1fSApple OSS Distributions new_action.sa_flags = 0; /* Flags? Who needs those? */
184*e3723e1fSApple OSS Distributions
185*e3723e1fSApple OSS Distributions if (addr == NULL) {
186*e3723e1fSApple OSS Distributions T_ASSERT_FAIL("Access attempted against NULL");
187*e3723e1fSApple OSS Distributions }
188*e3723e1fSApple OSS Distributions
189*e3723e1fSApple OSS Distributions if (!(fault_state = pthread_getspecific(fault_state_key))) {
190*e3723e1fSApple OSS Distributions T_ASSERT_FAIL("Failed to retrieve fault state");
191*e3723e1fSApple OSS Distributions }
192*e3723e1fSApple OSS Distributions
193*e3723e1fSApple OSS Distributions old_fault_count = fault_state->fault_count;
194*e3723e1fSApple OSS Distributions
195*e3723e1fSApple OSS Distributions /* Install a handler so that we can catch SIGBUS. */
196*e3723e1fSApple OSS Distributions sigaction(SIGBUS, &new_action, &old_action);
197*e3723e1fSApple OSS Distributions
198*e3723e1fSApple OSS Distributions /* Perform the requested operation. */
199*e3723e1fSApple OSS Distributions switch (access_type) {
200*e3723e1fSApple OSS Distributions case ACCESS_READ:
201*e3723e1fSApple OSS Distributions fault_state->fault_strategy = FAULT_STRAT_RW_TPRO;
202*e3723e1fSApple OSS Distributions fault_state->fault_expected = true;
203*e3723e1fSApple OSS Distributions
204*e3723e1fSApple OSS Distributions __sync_synchronize();
205*e3723e1fSApple OSS Distributions
206*e3723e1fSApple OSS Distributions #if defined(__arm64__)
207*e3723e1fSApple OSS Distributions uint8_t a = *((volatile uint8_t *)addr);
208*e3723e1fSApple OSS Distributions #endif
209*e3723e1fSApple OSS Distributions __sync_synchronize();
210*e3723e1fSApple OSS Distributions
211*e3723e1fSApple OSS Distributions fault_state->fault_expected = false;
212*e3723e1fSApple OSS Distributions fault_state->fault_strategy = FAULT_STRAT_NONE;
213*e3723e1fSApple OSS Distributions
214*e3723e1fSApple OSS Distributions break;
215*e3723e1fSApple OSS Distributions
216*e3723e1fSApple OSS Distributions case ACCESS_WRITE:
217*e3723e1fSApple OSS Distributions fault_state->fault_strategy = FAULT_STRAT_RW_TPRO;
218*e3723e1fSApple OSS Distributions fault_state->fault_expected = true;
219*e3723e1fSApple OSS Distributions
220*e3723e1fSApple OSS Distributions __sync_synchronize();
221*e3723e1fSApple OSS Distributions
222*e3723e1fSApple OSS Distributions *((volatile uint32_t *)addr) = value;
223*e3723e1fSApple OSS Distributions
224*e3723e1fSApple OSS Distributions __sync_synchronize();
225*e3723e1fSApple OSS Distributions
226*e3723e1fSApple OSS Distributions fault_state->fault_expected = false;
227*e3723e1fSApple OSS Distributions fault_state->fault_strategy = FAULT_STRAT_NONE;
228*e3723e1fSApple OSS Distributions
229*e3723e1fSApple OSS Distributions break;
230*e3723e1fSApple OSS Distributions }
231*e3723e1fSApple OSS Distributions
232*e3723e1fSApple OSS Distributions /* Restore the old SIGBUS handler. */
233*e3723e1fSApple OSS Distributions sigaction(SIGBUS, &old_action, NULL);
234*e3723e1fSApple OSS Distributions
235*e3723e1fSApple OSS Distributions new_fault_count = fault_state->fault_count;
236*e3723e1fSApple OSS Distributions
237*e3723e1fSApple OSS Distributions if (new_fault_count > old_fault_count) {
238*e3723e1fSApple OSS Distributions /* Indicate that we took a fault. */
239*e3723e1fSApple OSS Distributions retval = true;
240*e3723e1fSApple OSS Distributions }
241*e3723e1fSApple OSS Distributions
242*e3723e1fSApple OSS Distributions return retval;
243*e3723e1fSApple OSS Distributions }
244*e3723e1fSApple OSS Distributions
245*e3723e1fSApple OSS Distributions static bool
does_read_fault(void * addr)246*e3723e1fSApple OSS Distributions does_read_fault(void * addr)
247*e3723e1fSApple OSS Distributions {
248*e3723e1fSApple OSS Distributions return does_access_fault(ACCESS_READ, addr, 0);
249*e3723e1fSApple OSS Distributions }
250*e3723e1fSApple OSS Distributions
251*e3723e1fSApple OSS Distributions static bool
does_write_fault(void * addr,uint32_t value)252*e3723e1fSApple OSS Distributions does_write_fault(void * addr, uint32_t value)
253*e3723e1fSApple OSS Distributions {
254*e3723e1fSApple OSS Distributions return does_access_fault(ACCESS_WRITE, addr, value);
255*e3723e1fSApple OSS Distributions }
256*e3723e1fSApple OSS Distributions
257*e3723e1fSApple OSS Distributions static bool
has_pager_support(void)258*e3723e1fSApple OSS Distributions has_pager_support(void)
259*e3723e1fSApple OSS Distributions {
260*e3723e1fSApple OSS Distributions uint32_t enabled = false;
261*e3723e1fSApple OSS Distributions size_t output_size = sizeof(enabled);
262*e3723e1fSApple OSS Distributions
263*e3723e1fSApple OSS Distributions (void)sysctlbyname("vm.pmap_tpro_pagers",
264*e3723e1fSApple OSS Distributions &enabled, &output_size, NULL, 0);
265*e3723e1fSApple OSS Distributions return enabled;
266*e3723e1fSApple OSS Distributions }
267*e3723e1fSApple OSS Distributions
268*e3723e1fSApple OSS Distributions static void
cleanup(void)269*e3723e1fSApple OSS Distributions cleanup(void)
270*e3723e1fSApple OSS Distributions {
271*e3723e1fSApple OSS Distributions fault_state_t * fault_state;
272*e3723e1fSApple OSS Distributions
273*e3723e1fSApple OSS Distributions if (!(fault_state = pthread_getspecific(fault_state_key))) {
274*e3723e1fSApple OSS Distributions T_ASSERT_FAIL("Failed to retrieve fault state");
275*e3723e1fSApple OSS Distributions
276*e3723e1fSApple OSS Distributions T_ASSERT_POSIX_ZERO(pthread_setspecific(fault_state_key, NULL), "Remove fault_state");
277*e3723e1fSApple OSS Distributions fault_state_destroy(fault_state);
278*e3723e1fSApple OSS Distributions }
279*e3723e1fSApple OSS Distributions
280*e3723e1fSApple OSS Distributions if (key_created) {
281*e3723e1fSApple OSS Distributions T_ASSERT_POSIX_ZERO(pthread_key_delete(fault_state_key), "Delete fault state key");
282*e3723e1fSApple OSS Distributions }
283*e3723e1fSApple OSS Distributions
284*e3723e1fSApple OSS Distributions return;
285*e3723e1fSApple OSS Distributions }
286*e3723e1fSApple OSS Distributions
287*e3723e1fSApple OSS Distributions static void
288*e3723e1fSApple OSS Distributions thread_self_restrict_test(void (^test)(void))
289*e3723e1fSApple OSS Distributions {
290*e3723e1fSApple OSS Distributions int err = 0;
291*e3723e1fSApple OSS Distributions
292*e3723e1fSApple OSS Distributions T_SETUPBEGIN;
293*e3723e1fSApple OSS Distributions T_ATEND(cleanup);
294*e3723e1fSApple OSS Distributions
295*e3723e1fSApple OSS Distributions /* Set up the necessary state for the test. */
296*e3723e1fSApple OSS Distributions err = pthread_key_create(&fault_state_key, fault_state_destroy);
297*e3723e1fSApple OSS Distributions T_ASSERT_POSIX_ZERO(err, 0, "Create pthread key");
298*e3723e1fSApple OSS Distributions key_created = true;
299*e3723e1fSApple OSS Distributions
300*e3723e1fSApple OSS Distributions T_ASSERT_NOTNULL(fault_state_create(), "Create fault state");
301*e3723e1fSApple OSS Distributions T_SETUPEND;
302*e3723e1fSApple OSS Distributions
303*e3723e1fSApple OSS Distributions test();
304*e3723e1fSApple OSS Distributions }
305*e3723e1fSApple OSS Distributions
306*e3723e1fSApple OSS Distributions static void
fork_child_test(const_page_t * state)307*e3723e1fSApple OSS Distributions fork_child_test(const_page_t *state)
308*e3723e1fSApple OSS Distributions {
309*e3723e1fSApple OSS Distributions pid_t pid;
310*e3723e1fSApple OSS Distributions int statloc;
311*e3723e1fSApple OSS Distributions
312*e3723e1fSApple OSS Distributions pid = fork();
313*e3723e1fSApple OSS Distributions if (pid == 0) {
314*e3723e1fSApple OSS Distributions T_EXPECT_EQ(state->magic, 0x45454545, "Expected magic on fork");
315*e3723e1fSApple OSS Distributions
316*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_rw();
317*e3723e1fSApple OSS Distributions T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), true, "TPRO region configured as read-write in child");
318*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault((void *)&state->bytes, 0x47474747), 0, "write to pager backed memory in child (no fault)");
319*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x46464646), 0, "write to pager backed memory in child (no fault)");
320*e3723e1fSApple OSS Distributions exit(0);
321*e3723e1fSApple OSS Distributions }
322*e3723e1fSApple OSS Distributions
323*e3723e1fSApple OSS Distributions if (pid < 0) {
324*e3723e1fSApple OSS Distributions T_ASSERT_POSIX_SUCCESS(pid, "fork");
325*e3723e1fSApple OSS Distributions }
326*e3723e1fSApple OSS Distributions
327*e3723e1fSApple OSS Distributions waitpid(pid, &statloc, 0);
328*e3723e1fSApple OSS Distributions }
329*e3723e1fSApple OSS Distributions
330*e3723e1fSApple OSS Distributions static void
pager_test(const_page_t * state)331*e3723e1fSApple OSS Distributions pager_test(const_page_t *state)
332*e3723e1fSApple OSS Distributions {
333*e3723e1fSApple OSS Distributions kern_return_t kr;
334*e3723e1fSApple OSS Distributions uint32_t pre;
335*e3723e1fSApple OSS Distributions vm_prot_t curprot, maxprot;
336*e3723e1fSApple OSS Distributions mach_vm_address_t addr = 0;
337*e3723e1fSApple OSS Distributions const_page_t *copy_state = NULL;
338*e3723e1fSApple OSS Distributions mach_port_t cow_port = MACH_PORT_NULL;
339*e3723e1fSApple OSS Distributions memory_object_size_t me_size = PAGE_SIZE;
340*e3723e1fSApple OSS Distributions
341*e3723e1fSApple OSS Distributions /*
342*e3723e1fSApple OSS Distributions * Validate our initial status quo. TPRO permissions should be RO,
343*e3723e1fSApple OSS Distributions * so we should be able to read from our pager backed mapping but
344*e3723e1fSApple OSS Distributions * should fault when trying to write to it.
345*e3723e1fSApple OSS Distributions */
346*e3723e1fSApple OSS Distributions T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), false, "TPRO region starts read-only");
347*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_read_fault(MAGIC(state)), 0, "read from pager backed memory");
348*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x43434343), 1, "write to pager backed memory (detect fault)");
349*e3723e1fSApple OSS Distributions
350*e3723e1fSApple OSS Distributions /*
351*e3723e1fSApple OSS Distributions * Toggle permissions to RW and attempt a write. We should succeed.
352*e3723e1fSApple OSS Distributions */
353*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_rw();
354*e3723e1fSApple OSS Distributions T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), true, "TPRO region configured as read-write");
355*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x44444444), 0, "write to pager backed memory (no fault)");
356*e3723e1fSApple OSS Distributions
357*e3723e1fSApple OSS Distributions /*
358*e3723e1fSApple OSS Distributions * Toggle permissions to RO and attempt a write. We should detect
359*e3723e1fSApple OSS Distributions * the fault
360*e3723e1fSApple OSS Distributions */
361*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_ro();
362*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x45454545), 1, "write to pager backed memory (detect fault)");
363*e3723e1fSApple OSS Distributions
364*e3723e1fSApple OSS Distributions /*
365*e3723e1fSApple OSS Distributions * Fork a child process and ensure that writes into the pager backed
366*e3723e1fSApple OSS Distributions * regions are not observed by the parent. They should now be COW.
367*e3723e1fSApple OSS Distributions */
368*e3723e1fSApple OSS Distributions pre = state->magic;
369*e3723e1fSApple OSS Distributions fork_child_test(state);
370*e3723e1fSApple OSS Distributions T_EXPECT_EQ(pre, state->magic, "write from child should not be observed");
371*e3723e1fSApple OSS Distributions
372*e3723e1fSApple OSS Distributions /*
373*e3723e1fSApple OSS Distributions * Ensure that if we remap the target region in a shared manner that we
374*e3723e1fSApple OSS Distributions * inherit TPRO. Remapping should be successful but we still rely on
375*e3723e1fSApple OSS Distributions * TPRO permissions to toggle r--/rw-
376*e3723e1fSApple OSS Distributions */
377*e3723e1fSApple OSS Distributions kr = mach_vm_remap(mach_task_self(),
378*e3723e1fSApple OSS Distributions &addr,
379*e3723e1fSApple OSS Distributions PAGE_SIZE,
380*e3723e1fSApple OSS Distributions 0, /* mask */
381*e3723e1fSApple OSS Distributions VM_FLAGS_ANYWHERE,
382*e3723e1fSApple OSS Distributions mach_task_self(),
383*e3723e1fSApple OSS Distributions (mach_vm_address_t)state,
384*e3723e1fSApple OSS Distributions FALSE, /* copy */
385*e3723e1fSApple OSS Distributions &curprot,
386*e3723e1fSApple OSS Distributions &maxprot,
387*e3723e1fSApple OSS Distributions VM_INHERIT_DEFAULT);
388*e3723e1fSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(kr, "mach_vm_remap(SHARED)");
389*e3723e1fSApple OSS Distributions copy_state = (const_page_t *)addr;
390*e3723e1fSApple OSS Distributions
391*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_ro();
392*e3723e1fSApple OSS Distributions T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), false, "TPRO configured as read-only");
393*e3723e1fSApple OSS Distributions T_EXPECT_EQ(curprot, VM_PROT_READ, "TPRO region should be VM_PROT_READ");
394*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(copy_state), 0x46464646), 1, "write to remapped region (detect fault)");
395*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_rw();
396*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(copy_state), 0x46464646), 0, "write to remapped region (no fault)");
397*e3723e1fSApple OSS Distributions T_EXPECT_EQ(0x46464646, state->magic, "write into copied region should be observed");
398*e3723e1fSApple OSS Distributions
399*e3723e1fSApple OSS Distributions /*
400*e3723e1fSApple OSS Distributions * Ensure that if we remap the region that we do not observe writes to
401*e3723e1fSApple OSS Distributions * the new copy in __DATA_CONST itself.
402*e3723e1fSApple OSS Distributions */
403*e3723e1fSApple OSS Distributions kr = mach_vm_remap(mach_task_self(),
404*e3723e1fSApple OSS Distributions (mach_vm_address_t *)©_state,
405*e3723e1fSApple OSS Distributions PAGE_SIZE,
406*e3723e1fSApple OSS Distributions 0, /* mask */
407*e3723e1fSApple OSS Distributions VM_FLAGS_ANYWHERE,
408*e3723e1fSApple OSS Distributions mach_task_self(),
409*e3723e1fSApple OSS Distributions (mach_vm_address_t)state,
410*e3723e1fSApple OSS Distributions TRUE, /* copy */
411*e3723e1fSApple OSS Distributions &curprot,
412*e3723e1fSApple OSS Distributions &maxprot,
413*e3723e1fSApple OSS Distributions VM_INHERIT_DEFAULT);
414*e3723e1fSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(kr, "mach_vm_remap(COPY)");
415*e3723e1fSApple OSS Distributions
416*e3723e1fSApple OSS Distributions /*
417*e3723e1fSApple OSS Distributions * Toggle TPRO RW and write to the new copied region
418*e3723e1fSApple OSS Distributions */
419*e3723e1fSApple OSS Distributions pre = state->magic;
420*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_rw();
421*e3723e1fSApple OSS Distributions T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), true, "TPRO region configured as read-write");
422*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(copy_state), 0x46464646), 0, "write to pager backed memory (no fault)");
423*e3723e1fSApple OSS Distributions T_EXPECT_EQ(pre, state->magic, "write into copied region should not be observed");
424*e3723e1fSApple OSS Distributions
425*e3723e1fSApple OSS Distributions /*
426*e3723e1fSApple OSS Distributions * Make a memory entry for our target region and attempt to map it in
427*e3723e1fSApple OSS Distributions * in a shared fashion. We should succeed but it should transparently
428*e3723e1fSApple OSS Distributions * copy the target VM object as extracting TPRO VM entries will fail.
429*e3723e1fSApple OSS Distributions * Writes to the new region should therefore not be observed.
430*e3723e1fSApple OSS Distributions */
431*e3723e1fSApple OSS Distributions me_size = PAGE_SIZE;
432*e3723e1fSApple OSS Distributions kr = mach_make_memory_entry_64(mach_task_self(),
433*e3723e1fSApple OSS Distributions &me_size,
434*e3723e1fSApple OSS Distributions (mach_vm_address_t)state,
435*e3723e1fSApple OSS Distributions MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
436*e3723e1fSApple OSS Distributions &cow_port,
437*e3723e1fSApple OSS Distributions MACH_PORT_NULL);
438*e3723e1fSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(kr, "mach_make_memory_entry_64(MAP_MEM_VM_SHARE)");
439*e3723e1fSApple OSS Distributions
440*e3723e1fSApple OSS Distributions pre = state->magic;
441*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(copy_state), 0x48484849), 0, "write to mapped copy region (no fault)");
442*e3723e1fSApple OSS Distributions T_EXPECT_EQ(pre, state->magic, "write into copied region should not be observed");
443*e3723e1fSApple OSS Distributions
444*e3723e1fSApple OSS Distributions copy_state = NULL;
445*e3723e1fSApple OSS Distributions kr = mach_vm_map(mach_task_self(),
446*e3723e1fSApple OSS Distributions (mach_vm_address_t *)©_state,
447*e3723e1fSApple OSS Distributions PAGE_SIZE,
448*e3723e1fSApple OSS Distributions 0, /* mask */
449*e3723e1fSApple OSS Distributions VM_FLAGS_ANYWHERE,
450*e3723e1fSApple OSS Distributions cow_port,
451*e3723e1fSApple OSS Distributions 0, /* offset */
452*e3723e1fSApple OSS Distributions TRUE, /* copy */
453*e3723e1fSApple OSS Distributions VM_PROT_READ | VM_PROT_WRITE,
454*e3723e1fSApple OSS Distributions VM_PROT_READ | VM_PROT_WRITE,
455*e3723e1fSApple OSS Distributions VM_INHERIT_DEFAULT);
456*e3723e1fSApple OSS Distributions T_EXPECT_POSIX_SUCCESS(kr, "mach_vm_map(cow_port)");
457*e3723e1fSApple OSS Distributions
458*e3723e1fSApple OSS Distributions /*
459*e3723e1fSApple OSS Distributions * Pages of the copy will no longer be mapped in as TPRO. Both
460*e3723e1fSApple OSS Distributions * read/writes should work even with TPRO toggled RO.
461*e3723e1fSApple OSS Distributions */
462*e3723e1fSApple OSS Distributions pre = state->magic;
463*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_ro();
464*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(copy_state), 0x48484848), 0, "write to mapped copy region (no fault)");
465*e3723e1fSApple OSS Distributions T_EXPECT_EQ(pre, state->magic, "write into copied region should not be observed");
466*e3723e1fSApple OSS Distributions
467*e3723e1fSApple OSS Distributions /*
468*e3723e1fSApple OSS Distributions * We've explored a number of ways to perform copies on the target
469*e3723e1fSApple OSS Distributions * objects in __DATA_CONST. Our first target page (&pager_state.one)
470*e3723e1fSApple OSS Distributions * should now be marked RO without TPRO permissions to handle any
471*e3723e1fSApple OSS Distributions * incoming write faults. Write to it directly again to ensure we
472*e3723e1fSApple OSS Distributions * fault back in with TPRO permissions.
473*e3723e1fSApple OSS Distributions */
474*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_ro();
475*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x49494949), 1, "write to pager backed memory (detect fault)");
476*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_rw();
477*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x4a4a4a4a), 0, "write to pager backed memory (no fault)");
478*e3723e1fSApple OSS Distributions
479*e3723e1fSApple OSS Distributions /*
480*e3723e1fSApple OSS Distributions * Now we attempt to have the page paged out. On systems which support the
481*e3723e1fSApple OSS Distributions * compressor, we'll get paged out/compressed. On fault we should
482*e3723e1fSApple OSS Distributions * be pmapped back in with TPRO permissions.
483*e3723e1fSApple OSS Distributions */
484*e3723e1fSApple OSS Distributions mach_vm_behavior_set(mach_task_self(), (mach_vm_address_t)state, PAGE_SIZE, VM_BEHAVIOR_PAGEOUT);
485*e3723e1fSApple OSS Distributions
486*e3723e1fSApple OSS Distributions /*
487*e3723e1fSApple OSS Distributions * Can verify in debugger at this point that page(s) have been
488*e3723e1fSApple OSS Distributions * paged out. If compressor pager is available the page should
489*e3723e1fSApple OSS Distributions * not be resident and compressor pager should be tied to the
490*e3723e1fSApple OSS Distributions * top level VM object.
491*e3723e1fSApple OSS Distributions */
492*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_ro();
493*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x49494949), 1, "write to pager backed memory after pageout (detect fault)");
494*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_rw();
495*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x4a4a4a4a), 0, "write to pager backed memory after pageout (no fault)");
496*e3723e1fSApple OSS Distributions
497*e3723e1fSApple OSS Distributions /*
498*e3723e1fSApple OSS Distributions * Try and reprotect the region. We should fail
499*e3723e1fSApple OSS Distributions */
500*e3723e1fSApple OSS Distributions kr = vm_protect(mach_task_self(), (mach_vm_address_t)state, PAGE_SIZE, FALSE, VM_PROT_DEFAULT);
501*e3723e1fSApple OSS Distributions T_EXPECT_POSIX_ERROR(kr, KERN_PROTECTION_FAILURE, "vm_protect(RW) should fail");
502*e3723e1fSApple OSS Distributions
503*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_ro();
504*e3723e1fSApple OSS Distributions }
505*e3723e1fSApple OSS Distributions
506*e3723e1fSApple OSS Distributions static void
mmap_test(const_page_t * state)507*e3723e1fSApple OSS Distributions mmap_test(const_page_t *state)
508*e3723e1fSApple OSS Distributions {
509*e3723e1fSApple OSS Distributions void *mapping;
510*e3723e1fSApple OSS Distributions
511*e3723e1fSApple OSS Distributions /*
512*e3723e1fSApple OSS Distributions * Validate our initial status quo. TPRO permissions should be RO,
513*e3723e1fSApple OSS Distributions * so we should be able to read from our pager backed mapping but
514*e3723e1fSApple OSS Distributions * should fault when trying to write to it.
515*e3723e1fSApple OSS Distributions */
516*e3723e1fSApple OSS Distributions T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), false, "TPRO region starts read-only");
517*e3723e1fSApple OSS Distributions
518*e3723e1fSApple OSS Distributions /*
519*e3723e1fSApple OSS Distributions * Attempt to mmap a fixed allocation over our TPRO region.
520*e3723e1fSApple OSS Distributions * TPRO region should be permanent and should disallow being
521*e3723e1fSApple OSS Distributions * overwritten.
522*e3723e1fSApple OSS Distributions */
523*e3723e1fSApple OSS Distributions mapping = mmap(state, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
524*e3723e1fSApple OSS Distributions T_ASSERT_EQ(mapping, MAP_FAILED, "Map over TPRO range should fail");
525*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_ro();
526*e3723e1fSApple OSS Distributions }
527*e3723e1fSApple OSS Distributions
528*e3723e1fSApple OSS Distributions static void
vm_allocate_test(const_page_t * state)529*e3723e1fSApple OSS Distributions vm_allocate_test(const_page_t *state)
530*e3723e1fSApple OSS Distributions {
531*e3723e1fSApple OSS Distributions kern_return_t kr;
532*e3723e1fSApple OSS Distributions mach_vm_address_t addr = (mach_vm_address_t)state;
533*e3723e1fSApple OSS Distributions vm_region_basic_info_data_64_t info;
534*e3723e1fSApple OSS Distributions mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
535*e3723e1fSApple OSS Distributions mach_vm_size_t size = PAGE_SIZE;
536*e3723e1fSApple OSS Distributions mach_port_t unused = MACH_PORT_NULL;
537*e3723e1fSApple OSS Distributions
538*e3723e1fSApple OSS Distributions /*
539*e3723e1fSApple OSS Distributions * Validate our initial status quo. TPRO permissions should be RO,
540*e3723e1fSApple OSS Distributions * so we should be able to read from our pager backed mapping but
541*e3723e1fSApple OSS Distributions * should fault when trying to write to it.
542*e3723e1fSApple OSS Distributions */
543*e3723e1fSApple OSS Distributions T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), false, "TPRO region starts read-only");
544*e3723e1fSApple OSS Distributions
545*e3723e1fSApple OSS Distributions /*
546*e3723e1fSApple OSS Distributions * Deallocate the TPRO region. This should succeed but leave the region
547*e3723e1fSApple OSS Distributions * intact with no permissions. Further allocations should not be able to
548*e3723e1fSApple OSS Distributions * obtain the same address.
549*e3723e1fSApple OSS Distributions */
550*e3723e1fSApple OSS Distributions kr = mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)state, PAGE_SIZE);
551*e3723e1fSApple OSS Distributions T_EXPECT_POSIX_ERROR(kr, KERN_SUCCESS, "vm_deallocate should succeed");
552*e3723e1fSApple OSS Distributions
553*e3723e1fSApple OSS Distributions kr = mach_vm_allocate(mach_task_self(), (mach_vm_address_t *)&addr, PAGE_SIZE, VM_FLAGS_FIXED);
554*e3723e1fSApple OSS Distributions T_EXPECT_POSIX_ERROR(kr, KERN_NO_SPACE, "vm_allocate should fail with KERN_NO_SPACE");
555*e3723e1fSApple OSS Distributions
556*e3723e1fSApple OSS Distributions /*
557*e3723e1fSApple OSS Distributions * Lookup the target region and confirm that all permissions have been
558*e3723e1fSApple OSS Distributions * removed.
559*e3723e1fSApple OSS Distributions */
560*e3723e1fSApple OSS Distributions kr = mach_vm_region(mach_task_self(), &addr, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info, &count, &unused);
561*e3723e1fSApple OSS Distributions T_QUIET; T_EXPECT_POSIX_ERROR(kr, KERN_SUCCESS, "mach_vm_region should succeed");
562*e3723e1fSApple OSS Distributions
563*e3723e1fSApple OSS Distributions T_ASSERT_EQ(info.protection, VM_PROT_NONE, "Entry should have no permissions");
564*e3723e1fSApple OSS Distributions T_ASSERT_EQ(info.max_protection, VM_PROT_NONE, "Entry should have no permissions");
565*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_ro();
566*e3723e1fSApple OSS Distributions }
567*e3723e1fSApple OSS Distributions
568*e3723e1fSApple OSS Distributions T_DECL(thread_self_restrict_pagers,
569*e3723e1fSApple OSS Distributions "Verify that the TPRO pager interfaces work correctly", T_META_TAG_VM_PREFERRED)
570*e3723e1fSApple OSS Distributions {
571*e3723e1fSApple OSS Distributions #if __arm64__
572*e3723e1fSApple OSS Distributions /* Check to see that we support the necessary hardware features. */
573*e3723e1fSApple OSS Distributions if (!os_thread_self_restrict_tpro_is_supported() || !has_pager_support()) {
574*e3723e1fSApple OSS Distributions T_SKIP("no hardware TPRO support enabled on this system");
575*e3723e1fSApple OSS Distributions }
576*e3723e1fSApple OSS Distributions
577*e3723e1fSApple OSS Distributions thread_self_restrict_test(^{
578*e3723e1fSApple OSS Distributions pager_test(&pager_state.one);
579*e3723e1fSApple OSS Distributions
580*e3723e1fSApple OSS Distributions /*
581*e3723e1fSApple OSS Distributions * Ensure that touching the second pager supported page exhibits
582*e3723e1fSApple OSS Distributions * identical behaviour in order to validate the transitions between
583*e3723e1fSApple OSS Distributions * VM entry & copy object chains.
584*e3723e1fSApple OSS Distributions */
585*e3723e1fSApple OSS Distributions pager_test(&pager_state.two);
586*e3723e1fSApple OSS Distributions
587*e3723e1fSApple OSS Distributions /*
588*e3723e1fSApple OSS Distributions * Try and write to a normal __DATA_CONST page that isn't backed by
589*e3723e1fSApple OSS Distributions * the dyld pager. The kernel will have mapped this directly but
590*e3723e1fSApple OSS Distributions * should still maintain TPRO protection.
591*e3723e1fSApple OSS Distributions */
592*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_ro();
593*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(&pager_state.ro[0], 0x41414141), 1, "write to __DATA_CONST should succeed (no fault)");
594*e3723e1fSApple OSS Distributions os_thread_self_restrict_tpro_to_rw();
595*e3723e1fSApple OSS Distributions T_EXPECT_EQ(does_write_fault(&pager_state.ro[0], 0x41414141), 0, "write to __DATA_CONST should fail (detect fault)");
596*e3723e1fSApple OSS Distributions });
597*e3723e1fSApple OSS Distributions #else
598*e3723e1fSApple OSS Distributions T_SKIP("thread_self_restrict_pagers not supported on this system");
599*e3723e1fSApple OSS Distributions #endif /* __arm64__ */
600*e3723e1fSApple OSS Distributions }
601*e3723e1fSApple OSS Distributions
602*e3723e1fSApple OSS Distributions T_DECL(thread_self_restrict_tpro_permanent,
603*e3723e1fSApple OSS Distributions "Verify that TPRO VM entries are permanent")
604*e3723e1fSApple OSS Distributions {
605*e3723e1fSApple OSS Distributions #if __arm64__
606*e3723e1fSApple OSS Distributions /* Check to see that we support the necessary hardware features. */
607*e3723e1fSApple OSS Distributions if (!os_thread_self_restrict_tpro_is_supported() || !has_pager_support()) {
608*e3723e1fSApple OSS Distributions T_SKIP("no hardware TPRO support enabled on this system");
609*e3723e1fSApple OSS Distributions }
610*e3723e1fSApple OSS Distributions
611*e3723e1fSApple OSS Distributions thread_self_restrict_test(^{
612*e3723e1fSApple OSS Distributions mmap_test(&pager_state.one);
613*e3723e1fSApple OSS Distributions vm_allocate_test(&pager_state.two);
614*e3723e1fSApple OSS Distributions });
615*e3723e1fSApple OSS Distributions #else
616*e3723e1fSApple OSS Distributions T_SKIP("thread_self_restrict_tpro_permanent not supported on this system");
617*e3723e1fSApple OSS Distributions #endif /* __arm64__ */
618*e3723e1fSApple OSS Distributions }
619