xref: /xnu-12377.41.6/tests/os_thread_self_restrict_pagers.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1*bbb1b6f9SApple OSS Distributions #include <darwintest.h>
2*bbb1b6f9SApple OSS Distributions #include <darwintest_perf.h>
3*bbb1b6f9SApple OSS Distributions 
4*bbb1b6f9SApple OSS Distributions T_GLOBAL_META(T_META_NAMESPACE("xnu.vm"));
5*bbb1b6f9SApple OSS Distributions 
6*bbb1b6f9SApple OSS Distributions #include <machine/cpu_capabilities.h>
7*bbb1b6f9SApple OSS Distributions #include <sys/mman.h>
8*bbb1b6f9SApple OSS Distributions #include <errno.h>
9*bbb1b6f9SApple OSS Distributions #include <fcntl.h>
10*bbb1b6f9SApple OSS Distributions #include <stdint.h>
11*bbb1b6f9SApple OSS Distributions #include <libkern/OSCacheControl.h>
12*bbb1b6f9SApple OSS Distributions #include <unistd.h>
13*bbb1b6f9SApple OSS Distributions #include <signal.h>
14*bbb1b6f9SApple OSS Distributions #include <stdlib.h>
15*bbb1b6f9SApple OSS Distributions #include <sys/sysctl.h>
16*bbb1b6f9SApple OSS Distributions 
17*bbb1b6f9SApple OSS Distributions #include <mach/vm_param.h>
18*bbb1b6f9SApple OSS Distributions #include <pthread.h>
19*bbb1b6f9SApple OSS Distributions 
20*bbb1b6f9SApple OSS Distributions #include <os/thread_self_restrict.h>
21*bbb1b6f9SApple OSS Distributions 
22*bbb1b6f9SApple OSS Distributions #include <mach/mach.h>
23*bbb1b6f9SApple OSS Distributions #include <mach/mach_error.h>
24*bbb1b6f9SApple OSS Distributions #include <mach/mach_init.h>
25*bbb1b6f9SApple OSS Distributions #include <mach/mach_port.h>
26*bbb1b6f9SApple OSS Distributions #include <mach/mach_vm.h>
27*bbb1b6f9SApple OSS Distributions #include <mach/vm_map.h>
28*bbb1b6f9SApple OSS Distributions #include <mach/task.h>
29*bbb1b6f9SApple OSS Distributions 
30*bbb1b6f9SApple OSS Distributions T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
31*bbb1b6f9SApple OSS Distributions 
32*bbb1b6f9SApple OSS Distributions #if defined(__arm64__)
33*bbb1b6f9SApple OSS Distributions /* PAGE_SIZE on ARM64 is an expression derived from a non-const global variable */
34*bbb1b6f9SApple OSS Distributions #define PAD_SIZE        PAGE_MAX_SIZE
35*bbb1b6f9SApple OSS Distributions #else
36*bbb1b6f9SApple OSS Distributions #define PAD_SIZE        PAGE_MIN_SIZE
37*bbb1b6f9SApple OSS Distributions #endif
38*bbb1b6f9SApple OSS Distributions 
39*bbb1b6f9SApple OSS Distributions /* Enumerations */
40*bbb1b6f9SApple OSS Distributions typedef enum _access_type {
41*bbb1b6f9SApple OSS Distributions 	ACCESS_READ,
42*bbb1b6f9SApple OSS Distributions 	ACCESS_WRITE,
43*bbb1b6f9SApple OSS Distributions } access_type_t;
44*bbb1b6f9SApple OSS Distributions 
45*bbb1b6f9SApple OSS Distributions typedef enum _fault_strategy {
46*bbb1b6f9SApple OSS Distributions 	FAULT_STRAT_NONE,
47*bbb1b6f9SApple OSS Distributions 	FAULT_STRAT_RW_TPRO,
48*bbb1b6f9SApple OSS Distributions } fault_strategy_t;
49*bbb1b6f9SApple OSS Distributions 
50*bbb1b6f9SApple OSS Distributions /* Structures */
51*bbb1b6f9SApple OSS Distributions typedef struct {
52*bbb1b6f9SApple OSS Distributions 	uint64_t fault_count;
53*bbb1b6f9SApple OSS Distributions 	fault_strategy_t fault_strategy;
54*bbb1b6f9SApple OSS Distributions 	bool fault_expected;
55*bbb1b6f9SApple OSS Distributions } fault_state_t;
56*bbb1b6f9SApple OSS Distributions 
57*bbb1b6f9SApple OSS Distributions /* Globals */
58*bbb1b6f9SApple OSS Distributions static bool key_created = false;
59*bbb1b6f9SApple OSS Distributions static pthread_key_t fault_state_key;
60*bbb1b6f9SApple OSS Distributions 
61*bbb1b6f9SApple OSS Distributions /*
62*bbb1b6f9SApple OSS Distributions  * The pager will only map entries with TPRO if we need to perform fixups.
63*bbb1b6f9SApple OSS Distributions  * Otherwise it really is const. Ensure we forge a struct that will require
64*bbb1b6f9SApple OSS Distributions  * dynamic rebasing.
65*bbb1b6f9SApple OSS Distributions  */
66*bbb1b6f9SApple OSS Distributions typedef struct {
67*bbb1b6f9SApple OSS Distributions 	void *reloc;
68*bbb1b6f9SApple OSS Distributions 	uint32_t magic;
69*bbb1b6f9SApple OSS Distributions 	char bytes[PAD_SIZE - 12];
70*bbb1b6f9SApple OSS Distributions } const_page_t;
71*bbb1b6f9SApple OSS Distributions 
72*bbb1b6f9SApple OSS Distributions typedef struct {
73*bbb1b6f9SApple OSS Distributions 	const_page_t one;
74*bbb1b6f9SApple OSS Distributions 	const_page_t two;
75*bbb1b6f9SApple OSS Distributions 	char ro[PAD_SIZE];
76*bbb1b6f9SApple OSS Distributions } const_state_t;
77*bbb1b6f9SApple OSS Distributions 
78*bbb1b6f9SApple OSS Distributions #define MAGIC(state) (void *)&state->magic
79*bbb1b6f9SApple OSS Distributions 
80*bbb1b6f9SApple OSS Distributions /*
81*bbb1b6f9SApple OSS Distributions  * Force known data into our __DATA_CONST segment. The pager will be responsible
82*bbb1b6f9SApple OSS Distributions  * for handling the mapping of this.
83*bbb1b6f9SApple OSS Distributions  */
84*bbb1b6f9SApple OSS Distributions __attribute__((section("__DATA_CONST,__pager")))
85*bbb1b6f9SApple OSS Distributions __attribute__((aligned(PAD_SIZE)))
86*bbb1b6f9SApple OSS Distributions static const_state_t pager_state = {
87*bbb1b6f9SApple OSS Distributions 	.one.reloc = &pager_state,
88*bbb1b6f9SApple OSS Distributions 	.two.reloc = &pager_state,
89*bbb1b6f9SApple OSS Distributions 	.one.magic = 0x41414141,
90*bbb1b6f9SApple OSS Distributions 	.two.magic = 0x41414141,
91*bbb1b6f9SApple OSS Distributions 	.ro = "CCCC"
92*bbb1b6f9SApple OSS Distributions };
93*bbb1b6f9SApple OSS Distributions 
94*bbb1b6f9SApple OSS Distributions /* Allocate a fault_state_t, and associate it with the current thread. */
95*bbb1b6f9SApple OSS Distributions static fault_state_t *
fault_state_create(void)96*bbb1b6f9SApple OSS Distributions fault_state_create(void)
97*bbb1b6f9SApple OSS Distributions {
98*bbb1b6f9SApple OSS Distributions 	fault_state_t * fault_state = malloc(sizeof(fault_state_t));
99*bbb1b6f9SApple OSS Distributions 
100*bbb1b6f9SApple OSS Distributions 	if (fault_state) {
101*bbb1b6f9SApple OSS Distributions 		fault_state->fault_count = 0;
102*bbb1b6f9SApple OSS Distributions 		fault_state->fault_strategy = FAULT_STRAT_NONE;
103*bbb1b6f9SApple OSS Distributions 		fault_state->fault_expected = false;
104*bbb1b6f9SApple OSS Distributions 
105*bbb1b6f9SApple OSS Distributions 		if (pthread_setspecific(fault_state_key, fault_state)) {
106*bbb1b6f9SApple OSS Distributions 			free(fault_state);
107*bbb1b6f9SApple OSS Distributions 			fault_state = NULL;
108*bbb1b6f9SApple OSS Distributions 		}
109*bbb1b6f9SApple OSS Distributions 	}
110*bbb1b6f9SApple OSS Distributions 
111*bbb1b6f9SApple OSS Distributions 	return fault_state;
112*bbb1b6f9SApple OSS Distributions }
113*bbb1b6f9SApple OSS Distributions 
114*bbb1b6f9SApple OSS Distributions /* Disassociate the given fault state from the current thread, and destroy it. */
115*bbb1b6f9SApple OSS Distributions static void
fault_state_destroy(void * fault_state)116*bbb1b6f9SApple OSS Distributions fault_state_destroy(void * fault_state)
117*bbb1b6f9SApple OSS Distributions {
118*bbb1b6f9SApple OSS Distributions 	if (fault_state == NULL) {
119*bbb1b6f9SApple OSS Distributions 		T_ASSERT_FAIL("Attempted to fault_state_destroy NULL");
120*bbb1b6f9SApple OSS Distributions 	}
121*bbb1b6f9SApple OSS Distributions 
122*bbb1b6f9SApple OSS Distributions 	free(fault_state);
123*bbb1b6f9SApple OSS Distributions }
124*bbb1b6f9SApple OSS Distributions 
125*bbb1b6f9SApple OSS Distributions /*
126*bbb1b6f9SApple OSS Distributions  * A signal handler that attempts to resolve anticipated faults through use of
127*bbb1b6f9SApple OSS Distributions  * the os_thread_self_restrict_rwx functions.
128*bbb1b6f9SApple OSS Distributions  */
129*bbb1b6f9SApple OSS Distributions static void
access_failed_handler(int signum)130*bbb1b6f9SApple OSS Distributions access_failed_handler(int signum)
131*bbb1b6f9SApple OSS Distributions {
132*bbb1b6f9SApple OSS Distributions 	fault_state_t * fault_state;
133*bbb1b6f9SApple OSS Distributions 
134*bbb1b6f9SApple OSS Distributions 	/* This handler should ONLY handle SIGBUS. */
135*bbb1b6f9SApple OSS Distributions 	if (signum != SIGBUS) {
136*bbb1b6f9SApple OSS Distributions 		T_ASSERT_FAIL("Unexpected signal sent to handler");
137*bbb1b6f9SApple OSS Distributions 	}
138*bbb1b6f9SApple OSS Distributions 
139*bbb1b6f9SApple OSS Distributions 	if (!(fault_state = pthread_getspecific(fault_state_key))) {
140*bbb1b6f9SApple OSS Distributions 		T_ASSERT_FAIL("Failed to retrieve fault state");
141*bbb1b6f9SApple OSS Distributions 	}
142*bbb1b6f9SApple OSS Distributions 
143*bbb1b6f9SApple OSS Distributions 	if (!(fault_state->fault_expected)) {
144*bbb1b6f9SApple OSS Distributions 		T_ASSERT_FAIL("Unexpected fault taken");
145*bbb1b6f9SApple OSS Distributions 	}
146*bbb1b6f9SApple OSS Distributions 
147*bbb1b6f9SApple OSS Distributions 	/* We should not see a second fault. */
148*bbb1b6f9SApple OSS Distributions 	fault_state->fault_expected = false;
149*bbb1b6f9SApple OSS Distributions 
150*bbb1b6f9SApple OSS Distributions 	switch (fault_state->fault_strategy) {
151*bbb1b6f9SApple OSS Distributions 	case FAULT_STRAT_NONE:
152*bbb1b6f9SApple OSS Distributions 		T_ASSERT_FAIL("No fault strategy");
153*bbb1b6f9SApple OSS Distributions 
154*bbb1b6f9SApple OSS Distributions 		/* Just in case we try to do something different. */
155*bbb1b6f9SApple OSS Distributions 		break;
156*bbb1b6f9SApple OSS Distributions 	case FAULT_STRAT_RW_TPRO:
157*bbb1b6f9SApple OSS Distributions 		os_thread_self_restrict_tpro_to_rw();
158*bbb1b6f9SApple OSS Distributions 		break;
159*bbb1b6f9SApple OSS Distributions 	}
160*bbb1b6f9SApple OSS Distributions 
161*bbb1b6f9SApple OSS Distributions 	fault_state->fault_count++;
162*bbb1b6f9SApple OSS Distributions }
163*bbb1b6f9SApple OSS Distributions 
164*bbb1b6f9SApple OSS Distributions /*
165*bbb1b6f9SApple OSS Distributions  * Attempt the specified access; if the access faults, this will return true;
166*bbb1b6f9SApple OSS Distributions  * otherwise, it will return false.
167*bbb1b6f9SApple OSS Distributions  */
168*bbb1b6f9SApple OSS Distributions static bool
does_access_fault(access_type_t access_type,void * addr,uint32_t value)169*bbb1b6f9SApple OSS Distributions does_access_fault(access_type_t access_type, void * addr, uint32_t value)
170*bbb1b6f9SApple OSS Distributions {
171*bbb1b6f9SApple OSS Distributions 	uint64_t old_fault_count;
172*bbb1b6f9SApple OSS Distributions 	uint64_t new_fault_count;
173*bbb1b6f9SApple OSS Distributions 
174*bbb1b6f9SApple OSS Distributions 	fault_state_t * fault_state;
175*bbb1b6f9SApple OSS Distributions 
176*bbb1b6f9SApple OSS Distributions 	struct sigaction old_action; /* Save area for any existing action. */
177*bbb1b6f9SApple OSS Distributions 	struct sigaction new_action; /* The action we wish to install for SIGBUS. */
178*bbb1b6f9SApple OSS Distributions 
179*bbb1b6f9SApple OSS Distributions 	bool retval = false;
180*bbb1b6f9SApple OSS Distributions 
181*bbb1b6f9SApple OSS Distributions 	new_action.sa_handler = access_failed_handler; /* A handler for write failures. */
182*bbb1b6f9SApple OSS Distributions 	new_action.sa_mask    = 0;                     /* Don't modify the mask. */
183*bbb1b6f9SApple OSS Distributions 	new_action.sa_flags   = 0;                     /* Flags?  Who needs those? */
184*bbb1b6f9SApple OSS Distributions 
185*bbb1b6f9SApple OSS Distributions 	if (addr == NULL) {
186*bbb1b6f9SApple OSS Distributions 		T_ASSERT_FAIL("Access attempted against NULL");
187*bbb1b6f9SApple OSS Distributions 	}
188*bbb1b6f9SApple OSS Distributions 
189*bbb1b6f9SApple OSS Distributions 	if (!(fault_state = pthread_getspecific(fault_state_key))) {
190*bbb1b6f9SApple OSS Distributions 		T_ASSERT_FAIL("Failed to retrieve fault state");
191*bbb1b6f9SApple OSS Distributions 	}
192*bbb1b6f9SApple OSS Distributions 
193*bbb1b6f9SApple OSS Distributions 	old_fault_count = fault_state->fault_count;
194*bbb1b6f9SApple OSS Distributions 
195*bbb1b6f9SApple OSS Distributions 	/* Install a handler so that we can catch SIGBUS. */
196*bbb1b6f9SApple OSS Distributions 	sigaction(SIGBUS, &new_action, &old_action);
197*bbb1b6f9SApple OSS Distributions 
198*bbb1b6f9SApple OSS Distributions 	/* Perform the requested operation. */
199*bbb1b6f9SApple OSS Distributions 	switch (access_type) {
200*bbb1b6f9SApple OSS Distributions 	case ACCESS_READ:
201*bbb1b6f9SApple OSS Distributions 		fault_state->fault_strategy = FAULT_STRAT_RW_TPRO;
202*bbb1b6f9SApple OSS Distributions 		fault_state->fault_expected = true;
203*bbb1b6f9SApple OSS Distributions 
204*bbb1b6f9SApple OSS Distributions 		__sync_synchronize();
205*bbb1b6f9SApple OSS Distributions 
206*bbb1b6f9SApple OSS Distributions #if defined(__arm64__)
207*bbb1b6f9SApple OSS Distributions 		uint8_t a = *((volatile uint8_t *)addr);
208*bbb1b6f9SApple OSS Distributions #endif
209*bbb1b6f9SApple OSS Distributions 		__sync_synchronize();
210*bbb1b6f9SApple OSS Distributions 
211*bbb1b6f9SApple OSS Distributions 		fault_state->fault_expected = false;
212*bbb1b6f9SApple OSS Distributions 		fault_state->fault_strategy = FAULT_STRAT_NONE;
213*bbb1b6f9SApple OSS Distributions 
214*bbb1b6f9SApple OSS Distributions 		break;
215*bbb1b6f9SApple OSS Distributions 
216*bbb1b6f9SApple OSS Distributions 	case ACCESS_WRITE:
217*bbb1b6f9SApple OSS Distributions 		fault_state->fault_strategy = FAULT_STRAT_RW_TPRO;
218*bbb1b6f9SApple OSS Distributions 		fault_state->fault_expected = true;
219*bbb1b6f9SApple OSS Distributions 
220*bbb1b6f9SApple OSS Distributions 		__sync_synchronize();
221*bbb1b6f9SApple OSS Distributions 
222*bbb1b6f9SApple OSS Distributions 		*((volatile uint32_t *)addr) = value;
223*bbb1b6f9SApple OSS Distributions 
224*bbb1b6f9SApple OSS Distributions 		__sync_synchronize();
225*bbb1b6f9SApple OSS Distributions 
226*bbb1b6f9SApple OSS Distributions 		fault_state->fault_expected = false;
227*bbb1b6f9SApple OSS Distributions 		fault_state->fault_strategy = FAULT_STRAT_NONE;
228*bbb1b6f9SApple OSS Distributions 
229*bbb1b6f9SApple OSS Distributions 		break;
230*bbb1b6f9SApple OSS Distributions 	}
231*bbb1b6f9SApple OSS Distributions 
232*bbb1b6f9SApple OSS Distributions 	/* Restore the old SIGBUS handler. */
233*bbb1b6f9SApple OSS Distributions 	sigaction(SIGBUS, &old_action, NULL);
234*bbb1b6f9SApple OSS Distributions 
235*bbb1b6f9SApple OSS Distributions 	new_fault_count = fault_state->fault_count;
236*bbb1b6f9SApple OSS Distributions 
237*bbb1b6f9SApple OSS Distributions 	if (new_fault_count > old_fault_count) {
238*bbb1b6f9SApple OSS Distributions 		/* Indicate that we took a fault. */
239*bbb1b6f9SApple OSS Distributions 		retval = true;
240*bbb1b6f9SApple OSS Distributions 	}
241*bbb1b6f9SApple OSS Distributions 
242*bbb1b6f9SApple OSS Distributions 	return retval;
243*bbb1b6f9SApple OSS Distributions }
244*bbb1b6f9SApple OSS Distributions 
245*bbb1b6f9SApple OSS Distributions static bool
does_read_fault(void * addr)246*bbb1b6f9SApple OSS Distributions does_read_fault(void * addr)
247*bbb1b6f9SApple OSS Distributions {
248*bbb1b6f9SApple OSS Distributions 	return does_access_fault(ACCESS_READ, addr, 0);
249*bbb1b6f9SApple OSS Distributions }
250*bbb1b6f9SApple OSS Distributions 
251*bbb1b6f9SApple OSS Distributions static bool
does_write_fault(void * addr,uint32_t value)252*bbb1b6f9SApple OSS Distributions does_write_fault(void * addr, uint32_t value)
253*bbb1b6f9SApple OSS Distributions {
254*bbb1b6f9SApple OSS Distributions 	return does_access_fault(ACCESS_WRITE, addr, value);
255*bbb1b6f9SApple OSS Distributions }
256*bbb1b6f9SApple OSS Distributions 
257*bbb1b6f9SApple OSS Distributions static bool
has_pager_support(void)258*bbb1b6f9SApple OSS Distributions has_pager_support(void)
259*bbb1b6f9SApple OSS Distributions {
260*bbb1b6f9SApple OSS Distributions 	uint32_t enabled = false;
261*bbb1b6f9SApple OSS Distributions 	size_t output_size = sizeof(enabled);
262*bbb1b6f9SApple OSS Distributions 
263*bbb1b6f9SApple OSS Distributions 	(void)sysctlbyname("vm.pmap_tpro_pagers",
264*bbb1b6f9SApple OSS Distributions 	    &enabled, &output_size, NULL, 0);
265*bbb1b6f9SApple OSS Distributions 	return enabled;
266*bbb1b6f9SApple OSS Distributions }
267*bbb1b6f9SApple OSS Distributions 
268*bbb1b6f9SApple OSS Distributions static void
cleanup(void)269*bbb1b6f9SApple OSS Distributions cleanup(void)
270*bbb1b6f9SApple OSS Distributions {
271*bbb1b6f9SApple OSS Distributions 	fault_state_t * fault_state;
272*bbb1b6f9SApple OSS Distributions 
273*bbb1b6f9SApple OSS Distributions 	if (!(fault_state = pthread_getspecific(fault_state_key))) {
274*bbb1b6f9SApple OSS Distributions 		T_ASSERT_FAIL("Failed to retrieve fault state");
275*bbb1b6f9SApple OSS Distributions 
276*bbb1b6f9SApple OSS Distributions 		T_ASSERT_POSIX_ZERO(pthread_setspecific(fault_state_key, NULL), "Remove fault_state");
277*bbb1b6f9SApple OSS Distributions 		fault_state_destroy(fault_state);
278*bbb1b6f9SApple OSS Distributions 	}
279*bbb1b6f9SApple OSS Distributions 
280*bbb1b6f9SApple OSS Distributions 	if (key_created) {
281*bbb1b6f9SApple OSS Distributions 		T_ASSERT_POSIX_ZERO(pthread_key_delete(fault_state_key), "Delete fault state key");
282*bbb1b6f9SApple OSS Distributions 	}
283*bbb1b6f9SApple OSS Distributions 
284*bbb1b6f9SApple OSS Distributions 	return;
285*bbb1b6f9SApple OSS Distributions }
286*bbb1b6f9SApple OSS Distributions 
287*bbb1b6f9SApple OSS Distributions static void
288*bbb1b6f9SApple OSS Distributions thread_self_restrict_test(void (^test)(void))
289*bbb1b6f9SApple OSS Distributions {
290*bbb1b6f9SApple OSS Distributions 	int err = 0;
291*bbb1b6f9SApple OSS Distributions 
292*bbb1b6f9SApple OSS Distributions 	T_SETUPBEGIN;
293*bbb1b6f9SApple OSS Distributions 	T_ATEND(cleanup);
294*bbb1b6f9SApple OSS Distributions 
295*bbb1b6f9SApple OSS Distributions 	/* Set up the necessary state for the test. */
296*bbb1b6f9SApple OSS Distributions 	err = pthread_key_create(&fault_state_key, fault_state_destroy);
297*bbb1b6f9SApple OSS Distributions 	T_ASSERT_POSIX_ZERO(err, 0, "Create pthread key");
298*bbb1b6f9SApple OSS Distributions 	key_created = true;
299*bbb1b6f9SApple OSS Distributions 
300*bbb1b6f9SApple OSS Distributions 	T_ASSERT_NOTNULL(fault_state_create(), "Create fault state");
301*bbb1b6f9SApple OSS Distributions 	T_SETUPEND;
302*bbb1b6f9SApple OSS Distributions 
303*bbb1b6f9SApple OSS Distributions 	test();
304*bbb1b6f9SApple OSS Distributions }
305*bbb1b6f9SApple OSS Distributions 
306*bbb1b6f9SApple OSS Distributions static void
fork_child_test(const_page_t * state)307*bbb1b6f9SApple OSS Distributions fork_child_test(const_page_t *state)
308*bbb1b6f9SApple OSS Distributions {
309*bbb1b6f9SApple OSS Distributions 	pid_t pid;
310*bbb1b6f9SApple OSS Distributions 	int statloc;
311*bbb1b6f9SApple OSS Distributions 
312*bbb1b6f9SApple OSS Distributions 	pid = fork();
313*bbb1b6f9SApple OSS Distributions 	if (pid == 0) {
314*bbb1b6f9SApple OSS Distributions 		T_EXPECT_EQ(state->magic, 0x45454545, "Expected magic on fork");
315*bbb1b6f9SApple OSS Distributions 
316*bbb1b6f9SApple OSS Distributions 		os_thread_self_restrict_tpro_to_rw();
317*bbb1b6f9SApple OSS Distributions 		T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), true, "TPRO region configured as read-write in child");
318*bbb1b6f9SApple OSS Distributions 		T_EXPECT_EQ(does_write_fault((void *)&state->bytes, 0x47474747), 0, "write to pager backed memory in child (no fault)");
319*bbb1b6f9SApple OSS Distributions 		T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x46464646), 0, "write to pager backed memory in child (no fault)");
320*bbb1b6f9SApple OSS Distributions 		exit(0);
321*bbb1b6f9SApple OSS Distributions 	}
322*bbb1b6f9SApple OSS Distributions 
323*bbb1b6f9SApple OSS Distributions 	if (pid < 0) {
324*bbb1b6f9SApple OSS Distributions 		T_ASSERT_POSIX_SUCCESS(pid, "fork");
325*bbb1b6f9SApple OSS Distributions 	}
326*bbb1b6f9SApple OSS Distributions 
327*bbb1b6f9SApple OSS Distributions 	waitpid(pid, &statloc, 0);
328*bbb1b6f9SApple OSS Distributions }
329*bbb1b6f9SApple OSS Distributions 
330*bbb1b6f9SApple OSS Distributions static void
pager_test(const_page_t * state)331*bbb1b6f9SApple OSS Distributions pager_test(const_page_t *state)
332*bbb1b6f9SApple OSS Distributions {
333*bbb1b6f9SApple OSS Distributions 	kern_return_t kr;
334*bbb1b6f9SApple OSS Distributions 	uint32_t pre;
335*bbb1b6f9SApple OSS Distributions 	vm_prot_t curprot, maxprot;
336*bbb1b6f9SApple OSS Distributions 	mach_vm_address_t addr = 0;
337*bbb1b6f9SApple OSS Distributions 	const_page_t *copy_state = NULL;
338*bbb1b6f9SApple OSS Distributions 	mach_port_t cow_port = MACH_PORT_NULL;
339*bbb1b6f9SApple OSS Distributions 	memory_object_size_t me_size = PAGE_SIZE;
340*bbb1b6f9SApple OSS Distributions 
341*bbb1b6f9SApple OSS Distributions 	/*
342*bbb1b6f9SApple OSS Distributions 	 * Validate our initial status quo. TPRO permissions should be RO,
343*bbb1b6f9SApple OSS Distributions 	 * so we should be able to read from our pager backed mapping but
344*bbb1b6f9SApple OSS Distributions 	 * should fault when trying to write to it.
345*bbb1b6f9SApple OSS Distributions 	 */
346*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), false, "TPRO region starts read-only");
347*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_read_fault(MAGIC(state)), 0, "read from pager backed memory");
348*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x43434343), 1, "write to pager backed memory (detect fault)");
349*bbb1b6f9SApple OSS Distributions 
350*bbb1b6f9SApple OSS Distributions 	/*
351*bbb1b6f9SApple OSS Distributions 	 * Toggle permissions to RW and attempt a write. We should succeed.
352*bbb1b6f9SApple OSS Distributions 	 */
353*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_rw();
354*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), true, "TPRO region configured as read-write");
355*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x44444444), 0, "write to pager backed memory (no fault)");
356*bbb1b6f9SApple OSS Distributions 
357*bbb1b6f9SApple OSS Distributions 	/*
358*bbb1b6f9SApple OSS Distributions 	 * Toggle permissions to RO and attempt a write. We should detect
359*bbb1b6f9SApple OSS Distributions 	 * the fault
360*bbb1b6f9SApple OSS Distributions 	 */
361*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_ro();
362*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x45454545), 1, "write to pager backed memory (detect fault)");
363*bbb1b6f9SApple OSS Distributions 
364*bbb1b6f9SApple OSS Distributions 	/*
365*bbb1b6f9SApple OSS Distributions 	 * Fork a child process and ensure that writes into the pager backed
366*bbb1b6f9SApple OSS Distributions 	 * regions are not observed by the parent. They should now be COW.
367*bbb1b6f9SApple OSS Distributions 	 */
368*bbb1b6f9SApple OSS Distributions 	pre = state->magic;
369*bbb1b6f9SApple OSS Distributions 	fork_child_test(state);
370*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(pre, state->magic, "write from child should not be observed");
371*bbb1b6f9SApple OSS Distributions 
372*bbb1b6f9SApple OSS Distributions 	/*
373*bbb1b6f9SApple OSS Distributions 	 * Ensure that if we remap the target region in a shared manner that we
374*bbb1b6f9SApple OSS Distributions 	 * inherit TPRO. Remapping should be successful but we still rely on
375*bbb1b6f9SApple OSS Distributions 	 * TPRO permissions to toggle r--/rw-
376*bbb1b6f9SApple OSS Distributions 	 */
377*bbb1b6f9SApple OSS Distributions 	kr = mach_vm_remap(mach_task_self(),
378*bbb1b6f9SApple OSS Distributions 	    &addr,
379*bbb1b6f9SApple OSS Distributions 	    PAGE_SIZE,
380*bbb1b6f9SApple OSS Distributions 	    0,                /* mask */
381*bbb1b6f9SApple OSS Distributions 	    VM_FLAGS_ANYWHERE,
382*bbb1b6f9SApple OSS Distributions 	    mach_task_self(),
383*bbb1b6f9SApple OSS Distributions 	    (mach_vm_address_t)state,
384*bbb1b6f9SApple OSS Distributions 	    FALSE,                /* copy */
385*bbb1b6f9SApple OSS Distributions 	    &curprot,
386*bbb1b6f9SApple OSS Distributions 	    &maxprot,
387*bbb1b6f9SApple OSS Distributions 	    VM_INHERIT_DEFAULT);
388*bbb1b6f9SApple OSS Distributions 	T_EXPECT_POSIX_SUCCESS(kr, "mach_vm_remap(SHARED)");
389*bbb1b6f9SApple OSS Distributions 	copy_state = (const_page_t *)addr;
390*bbb1b6f9SApple OSS Distributions 
391*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_ro();
392*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), false, "TPRO configured as read-only");
393*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(curprot, VM_PROT_READ, "TPRO region should be VM_PROT_READ");
394*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_write_fault(MAGIC(copy_state), 0x46464646), 1, "write to remapped region (detect fault)");
395*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_rw();
396*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_write_fault(MAGIC(copy_state), 0x46464646), 0, "write to remapped region (no fault)");
397*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(0x46464646, state->magic, "write into copied region should be observed");
398*bbb1b6f9SApple OSS Distributions 
399*bbb1b6f9SApple OSS Distributions 	/*
400*bbb1b6f9SApple OSS Distributions 	 * Ensure that if we remap the region that we do not observe writes to
401*bbb1b6f9SApple OSS Distributions 	 * the new copy in __DATA_CONST itself.
402*bbb1b6f9SApple OSS Distributions 	 */
403*bbb1b6f9SApple OSS Distributions 	kr = mach_vm_remap(mach_task_self(),
404*bbb1b6f9SApple OSS Distributions 	    (mach_vm_address_t *)&copy_state,
405*bbb1b6f9SApple OSS Distributions 	    PAGE_SIZE,
406*bbb1b6f9SApple OSS Distributions 	    0,                /* mask */
407*bbb1b6f9SApple OSS Distributions 	    VM_FLAGS_ANYWHERE,
408*bbb1b6f9SApple OSS Distributions 	    mach_task_self(),
409*bbb1b6f9SApple OSS Distributions 	    (mach_vm_address_t)state,
410*bbb1b6f9SApple OSS Distributions 	    TRUE,                /* copy */
411*bbb1b6f9SApple OSS Distributions 	    &curprot,
412*bbb1b6f9SApple OSS Distributions 	    &maxprot,
413*bbb1b6f9SApple OSS Distributions 	    VM_INHERIT_DEFAULT);
414*bbb1b6f9SApple OSS Distributions 	T_EXPECT_POSIX_SUCCESS(kr, "mach_vm_remap(COPY)");
415*bbb1b6f9SApple OSS Distributions 
416*bbb1b6f9SApple OSS Distributions 	/*
417*bbb1b6f9SApple OSS Distributions 	 * Toggle TPRO RW and write to the new copied region
418*bbb1b6f9SApple OSS Distributions 	 */
419*bbb1b6f9SApple OSS Distributions 	pre = state->magic;
420*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_rw();
421*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), true, "TPRO region configured as read-write");
422*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_write_fault(MAGIC(copy_state), 0x46464646), 0, "write to pager backed memory (no fault)");
423*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(pre, state->magic, "write into copied region should not be observed");
424*bbb1b6f9SApple OSS Distributions 
425*bbb1b6f9SApple OSS Distributions 	/*
426*bbb1b6f9SApple OSS Distributions 	 * Make a memory entry for our target region and attempt to map it in
427*bbb1b6f9SApple OSS Distributions 	 * in a shared fashion. We should succeed but it should transparently
428*bbb1b6f9SApple OSS Distributions 	 * copy the target VM object as extracting TPRO VM entries will fail.
429*bbb1b6f9SApple OSS Distributions 	 * Writes to the new region should therefore not be observed.
430*bbb1b6f9SApple OSS Distributions 	 */
431*bbb1b6f9SApple OSS Distributions 	me_size = PAGE_SIZE;
432*bbb1b6f9SApple OSS Distributions 	kr = mach_make_memory_entry_64(mach_task_self(),
433*bbb1b6f9SApple OSS Distributions 	    &me_size,
434*bbb1b6f9SApple OSS Distributions 	    (mach_vm_address_t)state,
435*bbb1b6f9SApple OSS Distributions 	    MAP_MEM_VM_SHARE | VM_PROT_READ | VM_PROT_WRITE,
436*bbb1b6f9SApple OSS Distributions 	    &cow_port,
437*bbb1b6f9SApple OSS Distributions 	    MACH_PORT_NULL);
438*bbb1b6f9SApple OSS Distributions 	T_EXPECT_POSIX_SUCCESS(kr, "mach_make_memory_entry_64(MAP_MEM_VM_SHARE)");
439*bbb1b6f9SApple OSS Distributions 
440*bbb1b6f9SApple OSS Distributions 	pre = state->magic;
441*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_write_fault(MAGIC(copy_state), 0x48484849), 0, "write to mapped copy region (no fault)");
442*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(pre, state->magic, "write into copied region should not be observed");
443*bbb1b6f9SApple OSS Distributions 
444*bbb1b6f9SApple OSS Distributions 	copy_state = NULL;
445*bbb1b6f9SApple OSS Distributions 	kr = mach_vm_map(mach_task_self(),
446*bbb1b6f9SApple OSS Distributions 	    (mach_vm_address_t *)&copy_state,
447*bbb1b6f9SApple OSS Distributions 	    PAGE_SIZE,
448*bbb1b6f9SApple OSS Distributions 	    0,              /* mask */
449*bbb1b6f9SApple OSS Distributions 	    VM_FLAGS_ANYWHERE,
450*bbb1b6f9SApple OSS Distributions 	    cow_port,
451*bbb1b6f9SApple OSS Distributions 	    0,              /* offset */
452*bbb1b6f9SApple OSS Distributions 	    TRUE,           /* copy */
453*bbb1b6f9SApple OSS Distributions 	    VM_PROT_READ | VM_PROT_WRITE,
454*bbb1b6f9SApple OSS Distributions 	    VM_PROT_READ | VM_PROT_WRITE,
455*bbb1b6f9SApple OSS Distributions 	    VM_INHERIT_DEFAULT);
456*bbb1b6f9SApple OSS Distributions 	T_EXPECT_POSIX_SUCCESS(kr, "mach_vm_map(cow_port)");
457*bbb1b6f9SApple OSS Distributions 
458*bbb1b6f9SApple OSS Distributions 	/*
459*bbb1b6f9SApple OSS Distributions 	 * Pages of the copy will no longer be mapped in as TPRO. Both
460*bbb1b6f9SApple OSS Distributions 	 * read/writes should work even with TPRO toggled RO.
461*bbb1b6f9SApple OSS Distributions 	 */
462*bbb1b6f9SApple OSS Distributions 	pre = state->magic;
463*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_ro();
464*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_write_fault(MAGIC(copy_state), 0x48484848), 0, "write to mapped copy region (no fault)");
465*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(pre, state->magic, "write into copied region should not be observed");
466*bbb1b6f9SApple OSS Distributions 
467*bbb1b6f9SApple OSS Distributions 	/*
468*bbb1b6f9SApple OSS Distributions 	 * We've explored a number of ways to perform copies on the target
469*bbb1b6f9SApple OSS Distributions 	 * objects in __DATA_CONST. Our first target page (&pager_state.one)
470*bbb1b6f9SApple OSS Distributions 	 * should now be marked RO without TPRO permissions to handle any
471*bbb1b6f9SApple OSS Distributions 	 * incoming write faults. Write to it directly again to ensure we
472*bbb1b6f9SApple OSS Distributions 	 * fault back in with TPRO permissions.
473*bbb1b6f9SApple OSS Distributions 	 */
474*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_ro();
475*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x49494949), 1, "write to pager backed memory (detect fault)");
476*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_rw();
477*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x4a4a4a4a), 0, "write to pager backed memory (no fault)");
478*bbb1b6f9SApple OSS Distributions 
479*bbb1b6f9SApple OSS Distributions 	/*
480*bbb1b6f9SApple OSS Distributions 	 * Now we attempt to have the page paged out. On systems which support the
481*bbb1b6f9SApple OSS Distributions 	 * compressor, we'll get paged out/compressed. On fault we should
482*bbb1b6f9SApple OSS Distributions 	 * be pmapped back in with TPRO permissions.
483*bbb1b6f9SApple OSS Distributions 	 */
484*bbb1b6f9SApple OSS Distributions 	mach_vm_behavior_set(mach_task_self(), (mach_vm_address_t)state, PAGE_SIZE, VM_BEHAVIOR_PAGEOUT);
485*bbb1b6f9SApple OSS Distributions 
486*bbb1b6f9SApple OSS Distributions 	/*
487*bbb1b6f9SApple OSS Distributions 	 * Can verify in debugger at this point that page(s) have been
488*bbb1b6f9SApple OSS Distributions 	 * paged out. If compressor pager is available the page should
489*bbb1b6f9SApple OSS Distributions 	 * not be resident and compressor pager should be tied to the
490*bbb1b6f9SApple OSS Distributions 	 * top level VM object.
491*bbb1b6f9SApple OSS Distributions 	 */
492*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_ro();
493*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x49494949), 1, "write to pager backed memory after pageout (detect fault)");
494*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_rw();
495*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(does_write_fault(MAGIC(state), 0x4a4a4a4a), 0, "write to pager backed memory after pageout (no fault)");
496*bbb1b6f9SApple OSS Distributions 
497*bbb1b6f9SApple OSS Distributions 	/*
498*bbb1b6f9SApple OSS Distributions 	 * Try and reprotect the region. We should fail
499*bbb1b6f9SApple OSS Distributions 	 */
500*bbb1b6f9SApple OSS Distributions 	kr = vm_protect(mach_task_self(), (mach_vm_address_t)state, PAGE_SIZE, FALSE, VM_PROT_DEFAULT);
501*bbb1b6f9SApple OSS Distributions 	T_EXPECT_POSIX_ERROR(kr, KERN_PROTECTION_FAILURE, "vm_protect(RW) should fail");
502*bbb1b6f9SApple OSS Distributions 
503*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_ro();
504*bbb1b6f9SApple OSS Distributions }
505*bbb1b6f9SApple OSS Distributions 
506*bbb1b6f9SApple OSS Distributions static void
mmap_test(const_page_t * state)507*bbb1b6f9SApple OSS Distributions mmap_test(const_page_t *state)
508*bbb1b6f9SApple OSS Distributions {
509*bbb1b6f9SApple OSS Distributions 	void *mapping;
510*bbb1b6f9SApple OSS Distributions 
511*bbb1b6f9SApple OSS Distributions 	/*
512*bbb1b6f9SApple OSS Distributions 	 * Validate our initial status quo. TPRO permissions should be RO,
513*bbb1b6f9SApple OSS Distributions 	 * so we should be able to read from our pager backed mapping but
514*bbb1b6f9SApple OSS Distributions 	 * should fault when trying to write to it.
515*bbb1b6f9SApple OSS Distributions 	 */
516*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), false, "TPRO region starts read-only");
517*bbb1b6f9SApple OSS Distributions 
518*bbb1b6f9SApple OSS Distributions 	/*
519*bbb1b6f9SApple OSS Distributions 	 * Attempt to mmap a fixed allocation over our TPRO region.
520*bbb1b6f9SApple OSS Distributions 	 * TPRO region should be permanent and should disallow being
521*bbb1b6f9SApple OSS Distributions 	 * overwritten.
522*bbb1b6f9SApple OSS Distributions 	 */
523*bbb1b6f9SApple OSS Distributions 	mapping = mmap(state, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, -1, 0);
524*bbb1b6f9SApple OSS Distributions 	T_ASSERT_EQ(mapping, MAP_FAILED, "Map over TPRO range should fail");
525*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_ro();
526*bbb1b6f9SApple OSS Distributions }
527*bbb1b6f9SApple OSS Distributions 
528*bbb1b6f9SApple OSS Distributions static void
vm_allocate_test(const_page_t * state)529*bbb1b6f9SApple OSS Distributions vm_allocate_test(const_page_t *state)
530*bbb1b6f9SApple OSS Distributions {
531*bbb1b6f9SApple OSS Distributions 	kern_return_t kr;
532*bbb1b6f9SApple OSS Distributions 	mach_vm_address_t addr = (mach_vm_address_t)state;
533*bbb1b6f9SApple OSS Distributions 	vm_region_basic_info_data_64_t info;
534*bbb1b6f9SApple OSS Distributions 	mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
535*bbb1b6f9SApple OSS Distributions 	mach_vm_size_t size = PAGE_SIZE;
536*bbb1b6f9SApple OSS Distributions 	mach_port_t unused = MACH_PORT_NULL;
537*bbb1b6f9SApple OSS Distributions 
538*bbb1b6f9SApple OSS Distributions 	/*
539*bbb1b6f9SApple OSS Distributions 	 * Validate our initial status quo. TPRO permissions should be RO,
540*bbb1b6f9SApple OSS Distributions 	 * so we should be able to read from our pager backed mapping but
541*bbb1b6f9SApple OSS Distributions 	 * should fault when trying to write to it.
542*bbb1b6f9SApple OSS Distributions 	 */
543*bbb1b6f9SApple OSS Distributions 	T_EXPECT_EQ(os_thread_self_restrict_tpro_is_writable(), false, "TPRO region starts read-only");
544*bbb1b6f9SApple OSS Distributions 
545*bbb1b6f9SApple OSS Distributions 	/*
546*bbb1b6f9SApple OSS Distributions 	 * Deallocate the TPRO region. This should succeed but leave the region
547*bbb1b6f9SApple OSS Distributions 	 * intact with no permissions. Further allocations should not be able to
548*bbb1b6f9SApple OSS Distributions 	 * obtain the same address.
549*bbb1b6f9SApple OSS Distributions 	 */
550*bbb1b6f9SApple OSS Distributions 	kr = mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)state, PAGE_SIZE);
551*bbb1b6f9SApple OSS Distributions 	T_EXPECT_POSIX_ERROR(kr, KERN_SUCCESS, "vm_deallocate should succeed");
552*bbb1b6f9SApple OSS Distributions 
553*bbb1b6f9SApple OSS Distributions 	kr = mach_vm_allocate(mach_task_self(), (mach_vm_address_t *)&addr, PAGE_SIZE, VM_FLAGS_FIXED);
554*bbb1b6f9SApple OSS Distributions 	T_EXPECT_POSIX_ERROR(kr, KERN_NO_SPACE, "vm_allocate should fail with KERN_NO_SPACE");
555*bbb1b6f9SApple OSS Distributions 
556*bbb1b6f9SApple OSS Distributions 	/*
557*bbb1b6f9SApple OSS Distributions 	 * Lookup the target region and confirm that all permissions have been
558*bbb1b6f9SApple OSS Distributions 	 * removed.
559*bbb1b6f9SApple OSS Distributions 	 */
560*bbb1b6f9SApple OSS Distributions 	kr = mach_vm_region(mach_task_self(), &addr, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info, &count, &unused);
561*bbb1b6f9SApple OSS Distributions 	T_QUIET; T_EXPECT_POSIX_ERROR(kr, KERN_SUCCESS, "mach_vm_region should succeed");
562*bbb1b6f9SApple OSS Distributions 
563*bbb1b6f9SApple OSS Distributions 	T_ASSERT_EQ(info.protection, VM_PROT_NONE, "Entry should have no permissions");
564*bbb1b6f9SApple OSS Distributions 	T_ASSERT_EQ(info.max_protection, VM_PROT_NONE, "Entry should have no permissions");
565*bbb1b6f9SApple OSS Distributions 	os_thread_self_restrict_tpro_to_ro();
566*bbb1b6f9SApple OSS Distributions }
567*bbb1b6f9SApple OSS Distributions 
568*bbb1b6f9SApple OSS Distributions T_DECL(thread_self_restrict_pagers,
569*bbb1b6f9SApple OSS Distributions     "Verify that the TPRO pager interfaces work correctly", T_META_TAG_VM_PREFERRED)
570*bbb1b6f9SApple OSS Distributions {
571*bbb1b6f9SApple OSS Distributions #if __arm64__
572*bbb1b6f9SApple OSS Distributions 	/* Check to see that we support the necessary hardware features. */
573*bbb1b6f9SApple OSS Distributions 	if (!os_thread_self_restrict_tpro_is_supported() || !has_pager_support()) {
574*bbb1b6f9SApple OSS Distributions 		T_SKIP("no hardware TPRO support enabled on this system");
575*bbb1b6f9SApple OSS Distributions 	}
576*bbb1b6f9SApple OSS Distributions 
577*bbb1b6f9SApple OSS Distributions 	thread_self_restrict_test(^{
578*bbb1b6f9SApple OSS Distributions 		pager_test(&pager_state.one);
579*bbb1b6f9SApple OSS Distributions 
580*bbb1b6f9SApple OSS Distributions 		/*
581*bbb1b6f9SApple OSS Distributions 		 * Ensure that touching the second pager supported page exhibits
582*bbb1b6f9SApple OSS Distributions 		 * identical behaviour in order to validate the transitions between
583*bbb1b6f9SApple OSS Distributions 		 * VM entry & copy object chains.
584*bbb1b6f9SApple OSS Distributions 		 */
585*bbb1b6f9SApple OSS Distributions 		pager_test(&pager_state.two);
586*bbb1b6f9SApple OSS Distributions 
587*bbb1b6f9SApple OSS Distributions 		/*
588*bbb1b6f9SApple OSS Distributions 		 * Try and write to a normal __DATA_CONST page that isn't backed by
589*bbb1b6f9SApple OSS Distributions 		 * the dyld pager. The kernel will have mapped this directly but
590*bbb1b6f9SApple OSS Distributions 		 * should still maintain TPRO protection.
591*bbb1b6f9SApple OSS Distributions 		 */
592*bbb1b6f9SApple OSS Distributions 		os_thread_self_restrict_tpro_to_ro();
593*bbb1b6f9SApple OSS Distributions 		T_EXPECT_EQ(does_write_fault(&pager_state.ro[0], 0x41414141), 1, "write to __DATA_CONST should succeed (no fault)");
594*bbb1b6f9SApple OSS Distributions 		os_thread_self_restrict_tpro_to_rw();
595*bbb1b6f9SApple OSS Distributions 		T_EXPECT_EQ(does_write_fault(&pager_state.ro[0], 0x41414141), 0, "write to __DATA_CONST should fail (detect fault)");
596*bbb1b6f9SApple OSS Distributions 	});
597*bbb1b6f9SApple OSS Distributions #else
598*bbb1b6f9SApple OSS Distributions 	T_SKIP("thread_self_restrict_pagers not supported on this system");
599*bbb1b6f9SApple OSS Distributions #endif /* __arm64__ */
600*bbb1b6f9SApple OSS Distributions }
601*bbb1b6f9SApple OSS Distributions 
602*bbb1b6f9SApple OSS Distributions T_DECL(thread_self_restrict_tpro_permanent,
603*bbb1b6f9SApple OSS Distributions     "Verify that TPRO VM entries are permanent")
604*bbb1b6f9SApple OSS Distributions {
605*bbb1b6f9SApple OSS Distributions #if __arm64__
606*bbb1b6f9SApple OSS Distributions 	/* Check to see that we support the necessary hardware features. */
607*bbb1b6f9SApple OSS Distributions 	if (!os_thread_self_restrict_tpro_is_supported() || !has_pager_support()) {
608*bbb1b6f9SApple OSS Distributions 		T_SKIP("no hardware TPRO support enabled on this system");
609*bbb1b6f9SApple OSS Distributions 	}
610*bbb1b6f9SApple OSS Distributions 
611*bbb1b6f9SApple OSS Distributions 	thread_self_restrict_test(^{
612*bbb1b6f9SApple OSS Distributions 		mmap_test(&pager_state.one);
613*bbb1b6f9SApple OSS Distributions 		vm_allocate_test(&pager_state.two);
614*bbb1b6f9SApple OSS Distributions 	});
615*bbb1b6f9SApple OSS Distributions #else
616*bbb1b6f9SApple OSS Distributions 	T_SKIP("thread_self_restrict_tpro_permanent not supported on this system");
617*bbb1b6f9SApple OSS Distributions #endif /* __arm64__ */
618*bbb1b6f9SApple OSS Distributions }
619