1*1b191cb5SApple OSS Distributions // Copyright (c) 2016-2020 Apple Computer, Inc. All rights reserved.
2*1b191cb5SApple OSS Distributions
3*1b191cb5SApple OSS Distributions #include <CoreSymbolication/CoreSymbolication.h>
4*1b191cb5SApple OSS Distributions #include <darwintest.h>
5*1b191cb5SApple OSS Distributions #include <dispatch/dispatch.h>
6*1b191cb5SApple OSS Distributions #include <execinfo.h>
7*1b191cb5SApple OSS Distributions #include <pthread.h>
8*1b191cb5SApple OSS Distributions #include <ptrauth.h>
9*1b191cb5SApple OSS Distributions #include <mach/mach.h>
10*1b191cb5SApple OSS Distributions #include <stdalign.h>
11*1b191cb5SApple OSS Distributions #include <sys/mman.h>
12*1b191cb5SApple OSS Distributions #include <sys/sysctl.h>
13*1b191cb5SApple OSS Distributions
14*1b191cb5SApple OSS Distributions T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
15*1b191cb5SApple OSS Distributions
16*1b191cb5SApple OSS Distributions enum test_scenario {
17*1b191cb5SApple OSS Distributions USER_SCENARIO = 0,
18*1b191cb5SApple OSS Distributions RESUME_SCENARIO = 1,
19*1b191cb5SApple OSS Distributions };
20*1b191cb5SApple OSS Distributions
21*1b191cb5SApple OSS Distributions enum kernel_test_scenario {
22*1b191cb5SApple OSS Distributions PACK_UNPACK_SCENARIO = 0,
23*1b191cb5SApple OSS Distributions PACKED_SCENARIO = 1,
24*1b191cb5SApple OSS Distributions };
25*1b191cb5SApple OSS Distributions
26*1b191cb5SApple OSS Distributions #define USER_FRAMES (12)
27*1b191cb5SApple OSS Distributions #define MAX_SYSCALL_SETUP_FRAMES (3)
28*1b191cb5SApple OSS Distributions #define NON_RECURSE_FRAMES (2)
29*1b191cb5SApple OSS Distributions #define ASYNC_FRAMES (2 + NON_RECURSE_FRAMES)
30*1b191cb5SApple OSS Distributions
31*1b191cb5SApple OSS Distributions static const char *user_bt[USER_FRAMES] = {
32*1b191cb5SApple OSS Distributions "backtrace_thread",
33*1b191cb5SApple OSS Distributions "recurse_a", "recurse_b", "recurse_a", "recurse_b",
34*1b191cb5SApple OSS Distributions "recurse_a", "recurse_b", "recurse_a", "recurse_b",
35*1b191cb5SApple OSS Distributions "recurse_a", "recurse_b", "expect_callstack",
36*1b191cb5SApple OSS Distributions };
37*1b191cb5SApple OSS Distributions
38*1b191cb5SApple OSS Distributions struct callstack_exp {
39*1b191cb5SApple OSS Distributions bool in_syscall_setup;
40*1b191cb5SApple OSS Distributions unsigned int syscall_frames;
41*1b191cb5SApple OSS Distributions const char **callstack;
42*1b191cb5SApple OSS Distributions size_t callstack_len;
43*1b191cb5SApple OSS Distributions unsigned int nchecked;
44*1b191cb5SApple OSS Distributions };
45*1b191cb5SApple OSS Distributions
46*1b191cb5SApple OSS Distributions #if __has_feature(ptrauth_calls)
47*1b191cb5SApple OSS Distributions #define __ptrauth_swift_async_context_parent \
48*1b191cb5SApple OSS Distributions __ptrauth(ptrauth_key_process_independent_data, 1, 0xbda2)
49*1b191cb5SApple OSS Distributions #define __ptrauth_swift_async_context_resume \
50*1b191cb5SApple OSS Distributions __ptrauth(ptrauth_key_function_pointer, 1, 0xd707)
51*1b191cb5SApple OSS Distributions #else
52*1b191cb5SApple OSS Distributions #define __ptrauth_swift_async_context_parent
53*1b191cb5SApple OSS Distributions #define __ptrauth_swift_async_context_resume
54*1b191cb5SApple OSS Distributions #endif
55*1b191cb5SApple OSS Distributions
56*1b191cb5SApple OSS Distributions // This struct fakes the Swift AsyncContext struct which is used by
57*1b191cb5SApple OSS Distributions // the Swift concurrency runtime. We only care about the first 2 fields.
58*1b191cb5SApple OSS Distributions struct fake_async_context {
59*1b191cb5SApple OSS Distributions struct fake_async_context* __ptrauth_swift_async_context_parent next;
60*1b191cb5SApple OSS Distributions void(*__ptrauth_swift_async_context_resume resume_pc)(void);
61*1b191cb5SApple OSS Distributions };
62*1b191cb5SApple OSS Distributions
63*1b191cb5SApple OSS Distributions static void
level1_func()64*1b191cb5SApple OSS Distributions level1_func()
65*1b191cb5SApple OSS Distributions {
66*1b191cb5SApple OSS Distributions }
67*1b191cb5SApple OSS Distributions static void
level2_func()68*1b191cb5SApple OSS Distributions level2_func()
69*1b191cb5SApple OSS Distributions {
70*1b191cb5SApple OSS Distributions }
71*1b191cb5SApple OSS Distributions
72*1b191cb5SApple OSS Distributions // Create a chain of fake async contexts
73*1b191cb5SApple OSS Distributions static alignas(16) struct fake_async_context level1 = { 0, level1_func };
74*1b191cb5SApple OSS Distributions static alignas(16) struct fake_async_context level2 = { &level1, level2_func };
75*1b191cb5SApple OSS Distributions
76*1b191cb5SApple OSS Distributions static const char *async_bt[ASYNC_FRAMES] = {
77*1b191cb5SApple OSS Distributions "level1_func", "level2_func", "backtrace_thread_async",
78*1b191cb5SApple OSS Distributions "expect_async_callstack",
79*1b191cb5SApple OSS Distributions };
80*1b191cb5SApple OSS Distributions
81*1b191cb5SApple OSS Distributions static void
expect_frame(struct callstack_exp * cs,CSSymbolRef symbol,unsigned long addr,unsigned int bt_idx)82*1b191cb5SApple OSS Distributions expect_frame(struct callstack_exp *cs, CSSymbolRef symbol,
83*1b191cb5SApple OSS Distributions unsigned long addr, unsigned int bt_idx)
84*1b191cb5SApple OSS Distributions {
85*1b191cb5SApple OSS Distributions if (CSIsNull(symbol)) {
86*1b191cb5SApple OSS Distributions if (!cs->in_syscall_setup) {
87*1b191cb5SApple OSS Distributions T_FAIL("invalid symbol for address %#lx at frame %d", addr,
88*1b191cb5SApple OSS Distributions bt_idx);
89*1b191cb5SApple OSS Distributions }
90*1b191cb5SApple OSS Distributions return;
91*1b191cb5SApple OSS Distributions }
92*1b191cb5SApple OSS Distributions
93*1b191cb5SApple OSS Distributions const char *name = CSSymbolGetName(symbol);
94*1b191cb5SApple OSS Distributions if (name) {
95*1b191cb5SApple OSS Distributions if (cs->in_syscall_setup) {
96*1b191cb5SApple OSS Distributions if (strcmp(name, cs->callstack[cs->callstack_len - 1]) == 0) {
97*1b191cb5SApple OSS Distributions cs->in_syscall_setup = false;
98*1b191cb5SApple OSS Distributions cs->syscall_frames = bt_idx;
99*1b191cb5SApple OSS Distributions T_LOG("found start of controlled stack at frame %u, expected "
100*1b191cb5SApple OSS Distributions "index %zu", cs->syscall_frames, cs->callstack_len - 1);
101*1b191cb5SApple OSS Distributions } else {
102*1b191cb5SApple OSS Distributions T_LOG("found syscall setup symbol %s at frame %u", name,
103*1b191cb5SApple OSS Distributions bt_idx);
104*1b191cb5SApple OSS Distributions }
105*1b191cb5SApple OSS Distributions }
106*1b191cb5SApple OSS Distributions if (!cs->in_syscall_setup) {
107*1b191cb5SApple OSS Distributions if (cs->nchecked >= cs->callstack_len) {
108*1b191cb5SApple OSS Distributions T_LOG("frame %2u: skipping system frame %s", bt_idx, name);
109*1b191cb5SApple OSS Distributions } else {
110*1b191cb5SApple OSS Distributions size_t frame_idx = cs->callstack_len - cs->nchecked - 1;
111*1b191cb5SApple OSS Distributions T_EXPECT_EQ_STR(name, cs->callstack[frame_idx],
112*1b191cb5SApple OSS Distributions "frame %2zu: saw '%s', expected '%s'",
113*1b191cb5SApple OSS Distributions frame_idx, name, cs->callstack[frame_idx]);
114*1b191cb5SApple OSS Distributions }
115*1b191cb5SApple OSS Distributions cs->nchecked++;
116*1b191cb5SApple OSS Distributions }
117*1b191cb5SApple OSS Distributions } else {
118*1b191cb5SApple OSS Distributions if (!cs->in_syscall_setup) {
119*1b191cb5SApple OSS Distributions T_ASSERT_NOTNULL(name, NULL, "symbol should not be NULL");
120*1b191cb5SApple OSS Distributions }
121*1b191cb5SApple OSS Distributions }
122*1b191cb5SApple OSS Distributions }
123*1b191cb5SApple OSS Distributions
124*1b191cb5SApple OSS Distributions static bool
is_kernel_64_bit(void)125*1b191cb5SApple OSS Distributions is_kernel_64_bit(void)
126*1b191cb5SApple OSS Distributions {
127*1b191cb5SApple OSS Distributions static dispatch_once_t k64_once;
128*1b191cb5SApple OSS Distributions static bool k64 = false;
129*1b191cb5SApple OSS Distributions dispatch_once(&k64_once, ^{
130*1b191cb5SApple OSS Distributions int errb;
131*1b191cb5SApple OSS Distributions int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 /* kernproc */ };
132*1b191cb5SApple OSS Distributions
133*1b191cb5SApple OSS Distributions struct kinfo_proc kp;
134*1b191cb5SApple OSS Distributions size_t len = sizeof(kp);
135*1b191cb5SApple OSS Distributions
136*1b191cb5SApple OSS Distributions errb = sysctl(mib, sizeof(mib) / sizeof(mib[0]), &kp, &len, NULL, 0);
137*1b191cb5SApple OSS Distributions T_QUIET; T_ASSERT_POSIX_SUCCESS(errb,
138*1b191cb5SApple OSS Distributions "sysctl({ CTL_KERN, KERN_PROC, KERN_PROC_PID, 0})");
139*1b191cb5SApple OSS Distributions
140*1b191cb5SApple OSS Distributions k64 = kp.kp_proc.p_flag & P_LP64;
141*1b191cb5SApple OSS Distributions T_LOG("executing with a %s-bit kernel", k64 ? "64" : "32");
142*1b191cb5SApple OSS Distributions });
143*1b191cb5SApple OSS Distributions return k64;
144*1b191cb5SApple OSS Distributions }
145*1b191cb5SApple OSS Distributions
146*1b191cb5SApple OSS Distributions // Use an extra, non-inlineable function so that any frames after expect_stack
147*1b191cb5SApple OSS Distributions // can be safely ignored. This insulates the test from changes in how syscalls
148*1b191cb5SApple OSS Distributions // are called by Libc and the kernel.
149*1b191cb5SApple OSS Distributions static void __attribute__((noinline, not_tail_called))
backtrace_current_thread_wrapper(enum test_scenario scenario,uint64_t * bt,size_t * bt_filled)150*1b191cb5SApple OSS Distributions backtrace_current_thread_wrapper(enum test_scenario scenario, uint64_t *bt,
151*1b191cb5SApple OSS Distributions size_t *bt_filled)
152*1b191cb5SApple OSS Distributions {
153*1b191cb5SApple OSS Distributions int ret = sysctlbyname("kern.backtrace.user", bt, bt_filled, NULL,
154*1b191cb5SApple OSS Distributions scenario);
155*1b191cb5SApple OSS Distributions getpid(); // Really prevent tail calls.
156*1b191cb5SApple OSS Distributions if (ret == -1 && errno == ENOENT) {
157*1b191cb5SApple OSS Distributions T_SKIP("release kernel: kern.backtrace.user sysctl returned ENOENT");
158*1b191cb5SApple OSS Distributions }
159*1b191cb5SApple OSS Distributions T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(\"kern.backtrace.user\")");
160*1b191cb5SApple OSS Distributions T_LOG("kernel returned %zu frame backtrace", *bt_filled);
161*1b191cb5SApple OSS Distributions }
162*1b191cb5SApple OSS Distributions
163*1b191cb5SApple OSS Distributions static CSSymbolicatorRef
get_symbolicator(void)164*1b191cb5SApple OSS Distributions get_symbolicator(void)
165*1b191cb5SApple OSS Distributions {
166*1b191cb5SApple OSS Distributions static CSSymbolicatorRef user_symb;
167*1b191cb5SApple OSS Distributions static dispatch_once_t expect_stack_once;
168*1b191cb5SApple OSS Distributions dispatch_once(&expect_stack_once, ^{
169*1b191cb5SApple OSS Distributions user_symb = CSSymbolicatorCreateWithTask(mach_task_self());
170*1b191cb5SApple OSS Distributions T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL);
171*1b191cb5SApple OSS Distributions T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL);
172*1b191cb5SApple OSS Distributions });
173*1b191cb5SApple OSS Distributions return user_symb;
174*1b191cb5SApple OSS Distributions }
175*1b191cb5SApple OSS Distributions
176*1b191cb5SApple OSS Distributions static void __attribute__((noinline, not_tail_called))
expect_callstack(enum test_scenario scenario)177*1b191cb5SApple OSS Distributions expect_callstack(enum test_scenario scenario)
178*1b191cb5SApple OSS Distributions {
179*1b191cb5SApple OSS Distributions uint64_t bt[USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES] = { 0 };
180*1b191cb5SApple OSS Distributions
181*1b191cb5SApple OSS Distributions CSSymbolicatorRef user_symb = get_symbolicator();
182*1b191cb5SApple OSS Distributions size_t bt_filled = USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES;
183*1b191cb5SApple OSS Distributions backtrace_current_thread_wrapper(scenario, bt, &bt_filled);
184*1b191cb5SApple OSS Distributions
185*1b191cb5SApple OSS Distributions unsigned int bt_len = (unsigned int)bt_filled;
186*1b191cb5SApple OSS Distributions T_EXPECT_GE(bt_len, (unsigned int)USER_FRAMES,
187*1b191cb5SApple OSS Distributions "at least %u frames should be present in backtrace", USER_FRAMES);
188*1b191cb5SApple OSS Distributions T_EXPECT_LE(bt_len, (unsigned int)USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES,
189*1b191cb5SApple OSS Distributions "at most %u frames should be present in backtrace",
190*1b191cb5SApple OSS Distributions USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES);
191*1b191cb5SApple OSS Distributions
192*1b191cb5SApple OSS Distributions struct callstack_exp callstack = {
193*1b191cb5SApple OSS Distributions .in_syscall_setup = true,
194*1b191cb5SApple OSS Distributions .syscall_frames = 0,
195*1b191cb5SApple OSS Distributions .callstack = user_bt,
196*1b191cb5SApple OSS Distributions .callstack_len = USER_FRAMES,
197*1b191cb5SApple OSS Distributions .nchecked = 0,
198*1b191cb5SApple OSS Distributions };
199*1b191cb5SApple OSS Distributions for (unsigned int i = 0; i < bt_len; i++) {
200*1b191cb5SApple OSS Distributions uintptr_t addr;
201*1b191cb5SApple OSS Distributions #if !defined(__LP64__)
202*1b191cb5SApple OSS Distributions // Backtrace frames come out as kernel words; convert them back to user
203*1b191cb5SApple OSS Distributions // uintptr_t for 32-bit processes.
204*1b191cb5SApple OSS Distributions if (is_kernel_64_bit()) {
205*1b191cb5SApple OSS Distributions addr = (uintptr_t)(bt[i]);
206*1b191cb5SApple OSS Distributions } else {
207*1b191cb5SApple OSS Distributions addr = (uintptr_t)(((uint32_t *)bt)[i]);
208*1b191cb5SApple OSS Distributions }
209*1b191cb5SApple OSS Distributions #else // defined(__LP32__)
210*1b191cb5SApple OSS Distributions addr = (uintptr_t)bt[i];
211*1b191cb5SApple OSS Distributions #endif // defined(__LP32__)
212*1b191cb5SApple OSS Distributions
213*1b191cb5SApple OSS Distributions CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
214*1b191cb5SApple OSS Distributions user_symb, addr, kCSNow);
215*1b191cb5SApple OSS Distributions expect_frame(&callstack, symbol, addr, i);
216*1b191cb5SApple OSS Distributions }
217*1b191cb5SApple OSS Distributions
218*1b191cb5SApple OSS Distributions T_EXPECT_GE(callstack.nchecked, USER_FRAMES,
219*1b191cb5SApple OSS Distributions "checked enough frames for correct symbols");
220*1b191cb5SApple OSS Distributions }
221*1b191cb5SApple OSS Distributions
222*1b191cb5SApple OSS Distributions static int __attribute__((noinline, not_tail_called))
223*1b191cb5SApple OSS Distributions recurse_a(enum test_scenario, unsigned int frames);
224*1b191cb5SApple OSS Distributions static int __attribute__((noinline, not_tail_called))
225*1b191cb5SApple OSS Distributions recurse_b(enum test_scenario, unsigned int frames);
226*1b191cb5SApple OSS Distributions
227*1b191cb5SApple OSS Distributions static int __attribute__((noinline, not_tail_called))
recurse_a(enum test_scenario scenario,unsigned int frames)228*1b191cb5SApple OSS Distributions recurse_a(enum test_scenario scenario, unsigned int frames)
229*1b191cb5SApple OSS Distributions {
230*1b191cb5SApple OSS Distributions if (frames == 1) {
231*1b191cb5SApple OSS Distributions expect_callstack(scenario);
232*1b191cb5SApple OSS Distributions getpid(); // Really prevent tail calls.
233*1b191cb5SApple OSS Distributions return 0;
234*1b191cb5SApple OSS Distributions }
235*1b191cb5SApple OSS Distributions
236*1b191cb5SApple OSS Distributions return recurse_b(scenario, frames - 1) + 1;
237*1b191cb5SApple OSS Distributions }
238*1b191cb5SApple OSS Distributions
239*1b191cb5SApple OSS Distributions static int __attribute__((noinline, not_tail_called))
recurse_b(enum test_scenario scenario,unsigned int frames)240*1b191cb5SApple OSS Distributions recurse_b(enum test_scenario scenario, unsigned int frames)
241*1b191cb5SApple OSS Distributions {
242*1b191cb5SApple OSS Distributions if (frames == 1) {
243*1b191cb5SApple OSS Distributions expect_callstack(scenario);
244*1b191cb5SApple OSS Distributions getpid(); // Really prevent tail calls.
245*1b191cb5SApple OSS Distributions return 0;
246*1b191cb5SApple OSS Distributions }
247*1b191cb5SApple OSS Distributions
248*1b191cb5SApple OSS Distributions return recurse_a(scenario, frames - 1) + 1;
249*1b191cb5SApple OSS Distributions }
250*1b191cb5SApple OSS Distributions
251*1b191cb5SApple OSS Distributions static void __attribute__((noinline, not_tail_called))
expect_async_callstack(void)252*1b191cb5SApple OSS Distributions expect_async_callstack(void)
253*1b191cb5SApple OSS Distributions {
254*1b191cb5SApple OSS Distributions uint64_t bt[ASYNC_FRAMES + MAX_SYSCALL_SETUP_FRAMES] = { 0 };
255*1b191cb5SApple OSS Distributions
256*1b191cb5SApple OSS Distributions CSSymbolicatorRef user_symb = get_symbolicator();
257*1b191cb5SApple OSS Distributions size_t bt_filled = ASYNC_FRAMES + MAX_SYSCALL_SETUP_FRAMES;
258*1b191cb5SApple OSS Distributions backtrace_current_thread_wrapper(USER_SCENARIO, bt, &bt_filled);
259*1b191cb5SApple OSS Distributions
260*1b191cb5SApple OSS Distributions unsigned int bt_len = (unsigned int)bt_filled;
261*1b191cb5SApple OSS Distributions T_EXPECT_GE(bt_len, (unsigned int)ASYNC_FRAMES,
262*1b191cb5SApple OSS Distributions "at least %u frames should be present in backtrace", ASYNC_FRAMES);
263*1b191cb5SApple OSS Distributions T_EXPECT_LE(bt_len, (unsigned int)ASYNC_FRAMES + MAX_SYSCALL_SETUP_FRAMES,
264*1b191cb5SApple OSS Distributions "at most %u frames should be present in backtrace",
265*1b191cb5SApple OSS Distributions ASYNC_FRAMES + MAX_SYSCALL_SETUP_FRAMES);
266*1b191cb5SApple OSS Distributions
267*1b191cb5SApple OSS Distributions struct callstack_exp callstack = {
268*1b191cb5SApple OSS Distributions .in_syscall_setup = true,
269*1b191cb5SApple OSS Distributions .syscall_frames = 0,
270*1b191cb5SApple OSS Distributions .callstack = async_bt,
271*1b191cb5SApple OSS Distributions .callstack_len = ASYNC_FRAMES,
272*1b191cb5SApple OSS Distributions .nchecked = 0,
273*1b191cb5SApple OSS Distributions };
274*1b191cb5SApple OSS Distributions for (unsigned int i = 0; i < bt_len; i++) {
275*1b191cb5SApple OSS Distributions uintptr_t addr;
276*1b191cb5SApple OSS Distributions #if !defined(__LP64__)
277*1b191cb5SApple OSS Distributions // Backtrace frames come out as kernel words; convert them back to user
278*1b191cb5SApple OSS Distributions // uintptr_t for 32-bit processes.
279*1b191cb5SApple OSS Distributions if (is_kernel_64_bit()) {
280*1b191cb5SApple OSS Distributions addr = (uintptr_t)(bt[i]);
281*1b191cb5SApple OSS Distributions } else {
282*1b191cb5SApple OSS Distributions addr = (uintptr_t)(((uint32_t *)bt)[i]);
283*1b191cb5SApple OSS Distributions }
284*1b191cb5SApple OSS Distributions #else // defined(__LP32__)
285*1b191cb5SApple OSS Distributions addr = (uintptr_t)bt[i];
286*1b191cb5SApple OSS Distributions #endif // defined(__LP32__)
287*1b191cb5SApple OSS Distributions
288*1b191cb5SApple OSS Distributions CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
289*1b191cb5SApple OSS Distributions user_symb, addr, kCSNow);
290*1b191cb5SApple OSS Distributions expect_frame(&callstack, symbol, addr, i);
291*1b191cb5SApple OSS Distributions }
292*1b191cb5SApple OSS Distributions
293*1b191cb5SApple OSS Distributions T_EXPECT_GE(callstack.nchecked, ASYNC_FRAMES,
294*1b191cb5SApple OSS Distributions "checked enough frames for correct symbols");
295*1b191cb5SApple OSS Distributions }
296*1b191cb5SApple OSS Distributions
297*1b191cb5SApple OSS Distributions static void *
backtrace_thread_async(void * __unused arg)298*1b191cb5SApple OSS Distributions backtrace_thread_async(void * __unused arg)
299*1b191cb5SApple OSS Distributions {
300*1b191cb5SApple OSS Distributions uint64_t *fp = __builtin_frame_address(0);
301*1b191cb5SApple OSS Distributions // We cannot use a variable of pointer type, because this ABI is valid
302*1b191cb5SApple OSS Distributions // on arm64_32 where pointers are 32bits, but the context pointer will
303*1b191cb5SApple OSS Distributions // still be stored in a 64bits slot on the stack.
304*1b191cb5SApple OSS Distributions #if __has_feature(ptrauth_calls)
305*1b191cb5SApple OSS Distributions #define __stack_context_auth __ptrauth(ptrauth_key_process_dependent_data, 1, \
306*1b191cb5SApple OSS Distributions 0xc31a)
307*1b191cb5SApple OSS Distributions struct fake_async_context * __stack_context_auth ctx = &level2;
308*1b191cb5SApple OSS Distributions #else // __has_feature(ptrauth_calls)
309*1b191cb5SApple OSS Distributions /* struct fake_async_context * */uint64_t ctx = (uintptr_t)&level2;
310*1b191cb5SApple OSS Distributions #endif // !__has_feature(ptrauth_calls)
311*1b191cb5SApple OSS Distributions
312*1b191cb5SApple OSS Distributions // The signature of an async frame on the OS stack is:
313*1b191cb5SApple OSS Distributions // [ <AsyncContext address>, <Saved FP | (1<<60)>, <return address> ]
314*1b191cb5SApple OSS Distributions // The Async context must be right before the saved FP on the stack. This
315*1b191cb5SApple OSS Distributions // should happen naturally in an optimized build as it is the only
316*1b191cb5SApple OSS Distributions // variable on the stack.
317*1b191cb5SApple OSS Distributions // This function cannot use T_ASSERT_* becuse it changes the stack
318*1b191cb5SApple OSS Distributions // layout.
319*1b191cb5SApple OSS Distributions assert((uintptr_t)fp - (uintptr_t)&ctx == 8);
320*1b191cb5SApple OSS Distributions
321*1b191cb5SApple OSS Distributions // Modify the saved FP on the stack to include the async frame marker
322*1b191cb5SApple OSS Distributions *fp |= (0x1ULL << 60);
323*1b191cb5SApple OSS Distributions expect_async_callstack();
324*1b191cb5SApple OSS Distributions return NULL;
325*1b191cb5SApple OSS Distributions }
326*1b191cb5SApple OSS Distributions
327*1b191cb5SApple OSS Distributions static void *
backtrace_thread(void * arg)328*1b191cb5SApple OSS Distributions backtrace_thread(void *arg)
329*1b191cb5SApple OSS Distributions {
330*1b191cb5SApple OSS Distributions unsigned int calls;
331*1b191cb5SApple OSS Distributions enum test_scenario scenario = (enum test_scenario)arg;
332*1b191cb5SApple OSS Distributions
333*1b191cb5SApple OSS Distributions // backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname
334*1b191cb5SApple OSS Distributions //
335*1b191cb5SApple OSS Distributions // Always make one less call for this frame (backtrace_thread).
336*1b191cb5SApple OSS Distributions calls = USER_FRAMES - NON_RECURSE_FRAMES;
337*1b191cb5SApple OSS Distributions
338*1b191cb5SApple OSS Distributions T_LOG("backtrace thread calling into %d frames (already at %d frames)",
339*1b191cb5SApple OSS Distributions calls, NON_RECURSE_FRAMES);
340*1b191cb5SApple OSS Distributions (void)recurse_a(scenario, calls);
341*1b191cb5SApple OSS Distributions return NULL;
342*1b191cb5SApple OSS Distributions }
343*1b191cb5SApple OSS Distributions
344*1b191cb5SApple OSS Distributions T_DECL(backtrace_user, "test that the kernel can backtrace user stacks",
345*1b191cb5SApple OSS Distributions T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true))
346*1b191cb5SApple OSS Distributions {
347*1b191cb5SApple OSS Distributions pthread_t thread;
348*1b191cb5SApple OSS Distributions
349*1b191cb5SApple OSS Distributions // Run the test from a different thread to insulate it from libdarwintest
350*1b191cb5SApple OSS Distributions // setup.
351*1b191cb5SApple OSS Distributions T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread,
352*1b191cb5SApple OSS Distributions (void *)USER_SCENARIO), "create additional thread to backtrace");
353*1b191cb5SApple OSS Distributions
354*1b191cb5SApple OSS Distributions T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(thread, NULL), NULL);
355*1b191cb5SApple OSS Distributions }
356*1b191cb5SApple OSS Distributions
357*1b191cb5SApple OSS Distributions T_DECL(backtrace_user_bounds,
358*1b191cb5SApple OSS Distributions "test that the kernel doesn't write frames out of expected bounds")
359*1b191cb5SApple OSS Distributions {
360*1b191cb5SApple OSS Distributions uint64_t bt_init[USER_FRAMES] = {};
361*1b191cb5SApple OSS Distributions size_t bt_filled = USER_FRAMES, bt_filled_after = 0;
362*1b191cb5SApple OSS Distributions int error = 0;
363*1b191cb5SApple OSS Distributions kern_return_t kr = KERN_FAILURE;
364*1b191cb5SApple OSS Distributions void *bt_page = NULL;
365*1b191cb5SApple OSS Distributions void *guard_page = NULL;
366*1b191cb5SApple OSS Distributions void *bt_start = NULL;
367*1b191cb5SApple OSS Distributions
368*1b191cb5SApple OSS Distributions // The backtrace addresses come back as kernel words.
369*1b191cb5SApple OSS Distributions size_t kword_size = is_kernel_64_bit() ? 8 : 4;
370*1b191cb5SApple OSS Distributions
371*1b191cb5SApple OSS Distributions // Get an idea of how many frames to expect.
372*1b191cb5SApple OSS Distributions int ret = sysctlbyname("kern.backtrace.user", bt_init, &bt_filled, NULL, 0);
373*1b191cb5SApple OSS Distributions if (ret == -1 && errno == ENOENT) {
374*1b191cb5SApple OSS Distributions T_SKIP("release kernel: kern.backtrace.user missing");
375*1b191cb5SApple OSS Distributions }
376*1b191cb5SApple OSS Distributions T_ASSERT_POSIX_SUCCESS(error, "sysctlbyname(\"kern.backtrace.user\")");
377*1b191cb5SApple OSS Distributions
378*1b191cb5SApple OSS Distributions // Allocate two pages -- a first one that's valid and a second that
379*1b191cb5SApple OSS Distributions // will be non-writeable to catch a copyout that's too large.
380*1b191cb5SApple OSS Distributions bt_page = mmap(NULL, vm_page_size * 2, PROT_READ | PROT_WRITE,
381*1b191cb5SApple OSS Distributions MAP_ANON | MAP_PRIVATE, -1, 0);
382*1b191cb5SApple OSS Distributions T_WITH_ERRNO;
383*1b191cb5SApple OSS Distributions T_ASSERT_NE(bt_page, MAP_FAILED, "allocated backtrace pages");
384*1b191cb5SApple OSS Distributions guard_page = (char *)bt_page + vm_page_size;
385*1b191cb5SApple OSS Distributions
386*1b191cb5SApple OSS Distributions error = mprotect(guard_page, vm_page_size, PROT_READ);
387*1b191cb5SApple OSS Distributions T_ASSERT_POSIX_SUCCESS(error, "mprotect(..., PROT_READ) guard page");
388*1b191cb5SApple OSS Distributions
389*1b191cb5SApple OSS Distributions // Ensure the pages are set up as expected.
390*1b191cb5SApple OSS Distributions kr = vm_write(mach_task_self(), (vm_address_t)bt_page,
391*1b191cb5SApple OSS Distributions (vm_offset_t)&(int){ 12345 }, sizeof(int));
392*1b191cb5SApple OSS Distributions T_ASSERT_MACH_SUCCESS(kr,
393*1b191cb5SApple OSS Distributions "should succeed in writing to backtrace page");
394*1b191cb5SApple OSS Distributions kr = vm_write(mach_task_self(), (vm_address_t)guard_page,
395*1b191cb5SApple OSS Distributions (vm_offset_t)&(int){ 12345 }, sizeof(int));
396*1b191cb5SApple OSS Distributions T_ASSERT_NE(kr, KERN_SUCCESS, "should fail to write to guard page");
397*1b191cb5SApple OSS Distributions
398*1b191cb5SApple OSS Distributions // Ask the kernel to write the backtrace just before the guard page.
399*1b191cb5SApple OSS Distributions bt_start = (char *)guard_page - (kword_size * bt_filled);
400*1b191cb5SApple OSS Distributions bt_filled_after = bt_filled;
401*1b191cb5SApple OSS Distributions
402*1b191cb5SApple OSS Distributions error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after,
403*1b191cb5SApple OSS Distributions NULL, 0);
404*1b191cb5SApple OSS Distributions T_EXPECT_POSIX_SUCCESS(error,
405*1b191cb5SApple OSS Distributions "sysctlbyname(\"kern.backtrace.user\") just before guard page");
406*1b191cb5SApple OSS Distributions T_EXPECT_EQ(bt_filled, bt_filled_after,
407*1b191cb5SApple OSS Distributions "both calls to backtrace should have filled in the same number of "
408*1b191cb5SApple OSS Distributions "frames");
409*1b191cb5SApple OSS Distributions
410*1b191cb5SApple OSS Distributions // Expect the kernel to fault when writing too far.
411*1b191cb5SApple OSS Distributions bt_start = (char *)bt_start + 1;
412*1b191cb5SApple OSS Distributions bt_filled_after = bt_filled;
413*1b191cb5SApple OSS Distributions error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after,
414*1b191cb5SApple OSS Distributions (void *)USER_SCENARIO, 0);
415*1b191cb5SApple OSS Distributions T_EXPECT_POSIX_FAILURE(error, EFAULT,
416*1b191cb5SApple OSS Distributions "sysctlbyname(\"kern.backtrace.user\") should fault one byte into "
417*1b191cb5SApple OSS Distributions "guard page");
418*1b191cb5SApple OSS Distributions }
419*1b191cb5SApple OSS Distributions
420*1b191cb5SApple OSS Distributions T_DECL(backtrace_user_async,
421*1b191cb5SApple OSS Distributions "test that the kernel can backtrace user async stacks",
422*1b191cb5SApple OSS Distributions T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(false))
423*1b191cb5SApple OSS Distributions {
424*1b191cb5SApple OSS Distributions #if !defined(__LP64__)
425*1b191cb5SApple OSS Distributions T_SKIP("unsupported on LP32");
426*1b191cb5SApple OSS Distributions #else // __LP32__
427*1b191cb5SApple OSS Distributions pthread_t thread;
428*1b191cb5SApple OSS Distributions // Run the test from a different thread to insulate it from libdarwintest
429*1b191cb5SApple OSS Distributions // setup.
430*1b191cb5SApple OSS Distributions T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL,
431*1b191cb5SApple OSS Distributions backtrace_thread_async, NULL),
432*1b191cb5SApple OSS Distributions "create additional thread to backtrace");
433*1b191cb5SApple OSS Distributions
434*1b191cb5SApple OSS Distributions T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(thread, NULL), NULL);
435*1b191cb5SApple OSS Distributions #endif // !__LP32__
436*1b191cb5SApple OSS Distributions }
437*1b191cb5SApple OSS Distributions
438*1b191cb5SApple OSS Distributions T_DECL(backtrace_user_resume,
439*1b191cb5SApple OSS Distributions "test that the kernel can resume a backtrace into a smaller buffer",
440*1b191cb5SApple OSS Distributions T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(false))
441*1b191cb5SApple OSS Distributions {
442*1b191cb5SApple OSS Distributions pthread_t thread;
443*1b191cb5SApple OSS Distributions T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread,
444*1b191cb5SApple OSS Distributions (void *)RESUME_SCENARIO), "create additional thread to backtrace");
445*1b191cb5SApple OSS Distributions T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(thread, NULL), NULL);
446*1b191cb5SApple OSS Distributions }
447*1b191cb5SApple OSS Distributions
448*1b191cb5SApple OSS Distributions T_DECL(backtrace_kernel_pack_unpack,
449*1b191cb5SApple OSS Distributions "test that a kernel backtrace can be packed and unpacked losslessly",
450*1b191cb5SApple OSS Distributions T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(false))
451*1b191cb5SApple OSS Distributions {
452*1b191cb5SApple OSS Distributions int error = sysctlbyname("kern.backtrace.kernel_tests", NULL, NULL,
453*1b191cb5SApple OSS Distributions (void *)PACK_UNPACK_SCENARIO, 0);
454*1b191cb5SApple OSS Distributions T_EXPECT_POSIX_SUCCESS(error,
455*1b191cb5SApple OSS Distributions "sysctlbyname(\"kern.backtrace.kernel_tests\", PACK_UNPACK)");
456*1b191cb5SApple OSS Distributions }
457*1b191cb5SApple OSS Distributions
458*1b191cb5SApple OSS Distributions T_DECL(backtrace_kernel_packed,
459*1b191cb5SApple OSS Distributions "test that a kernel backtrace can be recorded as packed losslessly",
460*1b191cb5SApple OSS Distributions T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(false))
461*1b191cb5SApple OSS Distributions {
462*1b191cb5SApple OSS Distributions int error = sysctlbyname("kern.backtrace.kernel_tests", NULL, NULL,
463*1b191cb5SApple OSS Distributions (void *)PACKED_SCENARIO, 0);
464*1b191cb5SApple OSS Distributions T_EXPECT_POSIX_SUCCESS(error,
465*1b191cb5SApple OSS Distributions "sysctlbyname(\"kern.backtrace.kernel_tests\", PACKED)");
466*1b191cb5SApple OSS Distributions }
467*1b191cb5SApple OSS Distributions
468*1b191cb5SApple OSS Distributions #pragma mark - utilities
469*1b191cb5SApple OSS Distributions
470*1b191cb5SApple OSS Distributions static void __attribute__((noinline, not_tail_called))
spin_forever(void)471*1b191cb5SApple OSS Distributions spin_forever(void)
472*1b191cb5SApple OSS Distributions {
473*1b191cb5SApple OSS Distributions while (true) {
474*1b191cb5SApple OSS Distributions ;
475*1b191cb5SApple OSS Distributions }
476*1b191cb5SApple OSS Distributions }
477*1b191cb5SApple OSS Distributions
478*1b191cb5SApple OSS Distributions static void
check_stack(uintptr_t fp,uintptr_t ctx)479*1b191cb5SApple OSS Distributions check_stack(uintptr_t fp, uintptr_t ctx)
480*1b191cb5SApple OSS Distributions {
481*1b191cb5SApple OSS Distributions if ((fp - ctx) != 0x8) {
482*1b191cb5SApple OSS Distributions fprintf(stderr, "stack frame is not set up properly: "
483*1b191cb5SApple OSS Distributions "%#lx, %#lx is %lx bytes away\n", fp, ctx, fp - ctx);
484*1b191cb5SApple OSS Distributions exit(1);
485*1b191cb5SApple OSS Distributions }
486*1b191cb5SApple OSS Distributions }
487*1b191cb5SApple OSS Distributions
488*1b191cb5SApple OSS Distributions static void __attribute__((noinline, not_tail_called))
spin_backtrace_async(void)489*1b191cb5SApple OSS Distributions spin_backtrace_async(void)
490*1b191cb5SApple OSS Distributions {
491*1b191cb5SApple OSS Distributions uint64_t *fp = __builtin_frame_address(0);
492*1b191cb5SApple OSS Distributions #if __has_feature(ptrauth_calls)
493*1b191cb5SApple OSS Distributions struct fake_async_context * __stack_context_auth ctx = &level2;
494*1b191cb5SApple OSS Distributions #else // __has_feature(ptrauth_calls)
495*1b191cb5SApple OSS Distributions /* struct fake_async_context * */uint64_t ctx = (uintptr_t)&level2;
496*1b191cb5SApple OSS Distributions #endif // !__has_feature(ptrauth_calls)
497*1b191cb5SApple OSS Distributions check_stack((uintptr_t)fp, (uintptr_t)&ctx);
498*1b191cb5SApple OSS Distributions *fp |= (0x1ULL << 60);
499*1b191cb5SApple OSS Distributions
500*1b191cb5SApple OSS Distributions spin_forever();
501*1b191cb5SApple OSS Distributions }
502*1b191cb5SApple OSS Distributions
503*1b191cb5SApple OSS Distributions T_DECL(backtrace_user_async_spin_forever,
504*1b191cb5SApple OSS Distributions "try spinning forever with an async call stack set up",
505*1b191cb5SApple OSS Distributions T_META_ENABLED(false), T_META_CHECK_LEAKS(false),
506*1b191cb5SApple OSS Distributions T_META_ALL_VALID_ARCHS(false))
507*1b191cb5SApple OSS Distributions {
508*1b191cb5SApple OSS Distributions #if !defined(__LP64__)
509*1b191cb5SApple OSS Distributions T_SKIP("unsupported on LP32");
510*1b191cb5SApple OSS Distributions #else // __LP32__
511*1b191cb5SApple OSS Distributions spin_backtrace_async();
512*1b191cb5SApple OSS Distributions #endif // !__LP32__
513*1b191cb5SApple OSS Distributions }
514