xref: /xnu-11417.121.6/tests/backtracing_tests.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1*a1e26a70SApple OSS Distributions // Copyright (c) 2016-2020 Apple Computer, Inc.  All rights reserved.
2*a1e26a70SApple OSS Distributions 
3*a1e26a70SApple OSS Distributions #include <CoreSymbolication/CoreSymbolication.h>
4*a1e26a70SApple OSS Distributions #include <darwintest.h>
5*a1e26a70SApple OSS Distributions #include <dispatch/dispatch.h>
6*a1e26a70SApple OSS Distributions #include <execinfo.h>
7*a1e26a70SApple OSS Distributions #include <pthread.h>
8*a1e26a70SApple OSS Distributions #include <ptrauth.h>
9*a1e26a70SApple OSS Distributions #include <mach/mach.h>
10*a1e26a70SApple OSS Distributions #include <stdalign.h>
11*a1e26a70SApple OSS Distributions #include <sys/mman.h>
12*a1e26a70SApple OSS Distributions #include <sys/sysctl.h>
13*a1e26a70SApple OSS Distributions 
14*a1e26a70SApple OSS Distributions T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
15*a1e26a70SApple OSS Distributions 
16*a1e26a70SApple OSS Distributions enum test_scenario {
17*a1e26a70SApple OSS Distributions 	USER_SCENARIO = 0,
18*a1e26a70SApple OSS Distributions 	RESUME_SCENARIO = 1,
19*a1e26a70SApple OSS Distributions };
20*a1e26a70SApple OSS Distributions 
21*a1e26a70SApple OSS Distributions enum kernel_test_scenario {
22*a1e26a70SApple OSS Distributions 	PACK_UNPACK_SCENARIO = 0,
23*a1e26a70SApple OSS Distributions 	PACKED_SCENARIO = 1,
24*a1e26a70SApple OSS Distributions };
25*a1e26a70SApple OSS Distributions 
26*a1e26a70SApple OSS Distributions #define USER_FRAMES (12)
27*a1e26a70SApple OSS Distributions #define MAX_SYSCALL_SETUP_FRAMES (3)
28*a1e26a70SApple OSS Distributions #define NON_RECURSE_FRAMES (2)
29*a1e26a70SApple OSS Distributions #define ASYNC_FRAMES (2 + NON_RECURSE_FRAMES)
30*a1e26a70SApple OSS Distributions 
31*a1e26a70SApple OSS Distributions static const char *user_bt[USER_FRAMES] = {
32*a1e26a70SApple OSS Distributions 	"backtrace_thread",
33*a1e26a70SApple OSS Distributions 	"recurse_a", "recurse_b", "recurse_a", "recurse_b",
34*a1e26a70SApple OSS Distributions 	"recurse_a", "recurse_b", "recurse_a", "recurse_b",
35*a1e26a70SApple OSS Distributions 	"recurse_a", "recurse_b", "expect_callstack",
36*a1e26a70SApple OSS Distributions };
37*a1e26a70SApple OSS Distributions 
38*a1e26a70SApple OSS Distributions struct callstack_exp {
39*a1e26a70SApple OSS Distributions 	bool in_syscall_setup;
40*a1e26a70SApple OSS Distributions 	unsigned int syscall_frames;
41*a1e26a70SApple OSS Distributions 	const char **callstack;
42*a1e26a70SApple OSS Distributions 	size_t callstack_len;
43*a1e26a70SApple OSS Distributions 	unsigned int nchecked;
44*a1e26a70SApple OSS Distributions };
45*a1e26a70SApple OSS Distributions 
46*a1e26a70SApple OSS Distributions #if __has_feature(ptrauth_calls)
47*a1e26a70SApple OSS Distributions #define __ptrauth_swift_async_context_parent \
48*a1e26a70SApple OSS Distributions   __ptrauth(ptrauth_key_process_independent_data, 1, 0xbda2)
49*a1e26a70SApple OSS Distributions #define __ptrauth_swift_async_context_resume \
50*a1e26a70SApple OSS Distributions   __ptrauth(ptrauth_key_function_pointer, 1, 0xd707)
51*a1e26a70SApple OSS Distributions #else
52*a1e26a70SApple OSS Distributions #define __ptrauth_swift_async_context_parent
53*a1e26a70SApple OSS Distributions #define __ptrauth_swift_async_context_resume
54*a1e26a70SApple OSS Distributions #endif
55*a1e26a70SApple OSS Distributions 
56*a1e26a70SApple OSS Distributions // This struct fakes the Swift AsyncContext struct which is used by
57*a1e26a70SApple OSS Distributions // the Swift concurrency runtime. We only care about the first 2 fields.
58*a1e26a70SApple OSS Distributions struct fake_async_context {
59*a1e26a70SApple OSS Distributions 	struct fake_async_context* __ptrauth_swift_async_context_parent next;
60*a1e26a70SApple OSS Distributions 	void(*__ptrauth_swift_async_context_resume resume_pc)(void);
61*a1e26a70SApple OSS Distributions };
62*a1e26a70SApple OSS Distributions 
63*a1e26a70SApple OSS Distributions static void
level1_func()64*a1e26a70SApple OSS Distributions level1_func()
65*a1e26a70SApple OSS Distributions {
66*a1e26a70SApple OSS Distributions }
67*a1e26a70SApple OSS Distributions static void
level2_func()68*a1e26a70SApple OSS Distributions level2_func()
69*a1e26a70SApple OSS Distributions {
70*a1e26a70SApple OSS Distributions }
71*a1e26a70SApple OSS Distributions 
72*a1e26a70SApple OSS Distributions // Create a chain of fake async contexts
73*a1e26a70SApple OSS Distributions static alignas(16) struct fake_async_context level1 = { 0, level1_func };
74*a1e26a70SApple OSS Distributions static alignas(16) struct fake_async_context level2 = { &level1, level2_func };
75*a1e26a70SApple OSS Distributions 
76*a1e26a70SApple OSS Distributions static const char *async_bt[ASYNC_FRAMES] = {
77*a1e26a70SApple OSS Distributions 	"level1_func", "level2_func", "backtrace_thread_async",
78*a1e26a70SApple OSS Distributions 	"expect_async_callstack",
79*a1e26a70SApple OSS Distributions };
80*a1e26a70SApple OSS Distributions 
81*a1e26a70SApple OSS Distributions static void
expect_frame(struct callstack_exp * cs,CSSymbolRef symbol,unsigned long addr,unsigned int bt_idx)82*a1e26a70SApple OSS Distributions expect_frame(struct callstack_exp *cs, CSSymbolRef symbol,
83*a1e26a70SApple OSS Distributions     unsigned long addr, unsigned int bt_idx)
84*a1e26a70SApple OSS Distributions {
85*a1e26a70SApple OSS Distributions 	if (CSIsNull(symbol)) {
86*a1e26a70SApple OSS Distributions 		if (!cs->in_syscall_setup) {
87*a1e26a70SApple OSS Distributions 			T_FAIL("invalid symbol for address %#lx at frame %d", addr,
88*a1e26a70SApple OSS Distributions 			    bt_idx);
89*a1e26a70SApple OSS Distributions 		}
90*a1e26a70SApple OSS Distributions 		return;
91*a1e26a70SApple OSS Distributions 	}
92*a1e26a70SApple OSS Distributions 
93*a1e26a70SApple OSS Distributions 	const char *name = CSSymbolGetName(symbol);
94*a1e26a70SApple OSS Distributions 	if (name) {
95*a1e26a70SApple OSS Distributions 		if (cs->in_syscall_setup) {
96*a1e26a70SApple OSS Distributions 			if (strcmp(name, cs->callstack[cs->callstack_len - 1]) == 0) {
97*a1e26a70SApple OSS Distributions 				cs->in_syscall_setup = false;
98*a1e26a70SApple OSS Distributions 				cs->syscall_frames = bt_idx;
99*a1e26a70SApple OSS Distributions 				T_LOG("found start of controlled stack at frame %u, expected "
100*a1e26a70SApple OSS Distributions 				    "index %zu", cs->syscall_frames, cs->callstack_len - 1);
101*a1e26a70SApple OSS Distributions 			} else {
102*a1e26a70SApple OSS Distributions 				T_LOG("found syscall setup symbol %s at frame %u", name,
103*a1e26a70SApple OSS Distributions 				    bt_idx);
104*a1e26a70SApple OSS Distributions 			}
105*a1e26a70SApple OSS Distributions 		}
106*a1e26a70SApple OSS Distributions 		if (!cs->in_syscall_setup) {
107*a1e26a70SApple OSS Distributions 			if (cs->nchecked >= cs->callstack_len) {
108*a1e26a70SApple OSS Distributions 				T_LOG("frame %2u: skipping system frame %s", bt_idx, name);
109*a1e26a70SApple OSS Distributions 			} else {
110*a1e26a70SApple OSS Distributions 				size_t frame_idx = cs->callstack_len - cs->nchecked - 1;
111*a1e26a70SApple OSS Distributions 				T_EXPECT_EQ_STR(name, cs->callstack[frame_idx],
112*a1e26a70SApple OSS Distributions 				    "frame %2zu: saw '%s', expected '%s'",
113*a1e26a70SApple OSS Distributions 				    frame_idx, name, cs->callstack[frame_idx]);
114*a1e26a70SApple OSS Distributions 			}
115*a1e26a70SApple OSS Distributions 			cs->nchecked++;
116*a1e26a70SApple OSS Distributions 		}
117*a1e26a70SApple OSS Distributions 	} else {
118*a1e26a70SApple OSS Distributions 		if (!cs->in_syscall_setup) {
119*a1e26a70SApple OSS Distributions 			T_ASSERT_NOTNULL(name, NULL, "symbol should not be NULL");
120*a1e26a70SApple OSS Distributions 		}
121*a1e26a70SApple OSS Distributions 	}
122*a1e26a70SApple OSS Distributions }
123*a1e26a70SApple OSS Distributions 
124*a1e26a70SApple OSS Distributions static bool
is_kernel_64_bit(void)125*a1e26a70SApple OSS Distributions is_kernel_64_bit(void)
126*a1e26a70SApple OSS Distributions {
127*a1e26a70SApple OSS Distributions 	static dispatch_once_t k64_once;
128*a1e26a70SApple OSS Distributions 	static bool k64 = false;
129*a1e26a70SApple OSS Distributions 	dispatch_once(&k64_once, ^{
130*a1e26a70SApple OSS Distributions 		int errb;
131*a1e26a70SApple OSS Distributions 		int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 /* kernproc */ };
132*a1e26a70SApple OSS Distributions 
133*a1e26a70SApple OSS Distributions 		struct kinfo_proc kp;
134*a1e26a70SApple OSS Distributions 		size_t len = sizeof(kp);
135*a1e26a70SApple OSS Distributions 
136*a1e26a70SApple OSS Distributions 		errb = sysctl(mib, sizeof(mib) / sizeof(mib[0]), &kp, &len, NULL, 0);
137*a1e26a70SApple OSS Distributions 		T_QUIET; T_ASSERT_POSIX_SUCCESS(errb,
138*a1e26a70SApple OSS Distributions 		"sysctl({ CTL_KERN, KERN_PROC, KERN_PROC_PID, 0})");
139*a1e26a70SApple OSS Distributions 
140*a1e26a70SApple OSS Distributions 		k64 = kp.kp_proc.p_flag & P_LP64;
141*a1e26a70SApple OSS Distributions 		T_LOG("executing with a %s-bit kernel", k64 ? "64" : "32");
142*a1e26a70SApple OSS Distributions 	});
143*a1e26a70SApple OSS Distributions 	return k64;
144*a1e26a70SApple OSS Distributions }
145*a1e26a70SApple OSS Distributions 
146*a1e26a70SApple OSS Distributions // Use an extra, non-inlineable function so that any frames after expect_stack
147*a1e26a70SApple OSS Distributions // can be safely ignored.  This insulates the test from changes in how syscalls
148*a1e26a70SApple OSS Distributions // are called by Libc and the kernel.
149*a1e26a70SApple OSS Distributions static void __attribute__((noinline, not_tail_called))
backtrace_current_thread_wrapper(enum test_scenario scenario,uint64_t * bt,size_t * bt_filled)150*a1e26a70SApple OSS Distributions backtrace_current_thread_wrapper(enum test_scenario scenario, uint64_t *bt,
151*a1e26a70SApple OSS Distributions     size_t *bt_filled)
152*a1e26a70SApple OSS Distributions {
153*a1e26a70SApple OSS Distributions 	int ret = sysctlbyname("kern.backtrace.user", bt, bt_filled, NULL,
154*a1e26a70SApple OSS Distributions 	    scenario);
155*a1e26a70SApple OSS Distributions 	getpid(); // Really prevent tail calls.
156*a1e26a70SApple OSS Distributions 	if (ret == -1 && errno == ENOENT) {
157*a1e26a70SApple OSS Distributions 		T_SKIP("release kernel: kern.backtrace.user sysctl returned ENOENT");
158*a1e26a70SApple OSS Distributions 	}
159*a1e26a70SApple OSS Distributions 	T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(\"kern.backtrace.user\")");
160*a1e26a70SApple OSS Distributions 	T_LOG("kernel returned %zu frame backtrace", *bt_filled);
161*a1e26a70SApple OSS Distributions }
162*a1e26a70SApple OSS Distributions 
163*a1e26a70SApple OSS Distributions static CSSymbolicatorRef
get_symbolicator(void)164*a1e26a70SApple OSS Distributions get_symbolicator(void)
165*a1e26a70SApple OSS Distributions {
166*a1e26a70SApple OSS Distributions 	static CSSymbolicatorRef user_symb;
167*a1e26a70SApple OSS Distributions 	static dispatch_once_t expect_stack_once;
168*a1e26a70SApple OSS Distributions 	dispatch_once(&expect_stack_once, ^{
169*a1e26a70SApple OSS Distributions 		user_symb = CSSymbolicatorCreateWithTask(mach_task_self());
170*a1e26a70SApple OSS Distributions 		T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL);
171*a1e26a70SApple OSS Distributions 		T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL);
172*a1e26a70SApple OSS Distributions 	});
173*a1e26a70SApple OSS Distributions 	return user_symb;
174*a1e26a70SApple OSS Distributions }
175*a1e26a70SApple OSS Distributions 
176*a1e26a70SApple OSS Distributions static void __attribute__((noinline, not_tail_called))
expect_callstack(enum test_scenario scenario)177*a1e26a70SApple OSS Distributions expect_callstack(enum test_scenario scenario)
178*a1e26a70SApple OSS Distributions {
179*a1e26a70SApple OSS Distributions 	uint64_t bt[USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES] = { 0 };
180*a1e26a70SApple OSS Distributions 
181*a1e26a70SApple OSS Distributions 	CSSymbolicatorRef user_symb = get_symbolicator();
182*a1e26a70SApple OSS Distributions 	size_t bt_filled = USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES;
183*a1e26a70SApple OSS Distributions 	backtrace_current_thread_wrapper(scenario, bt, &bt_filled);
184*a1e26a70SApple OSS Distributions 
185*a1e26a70SApple OSS Distributions 	unsigned int bt_len = (unsigned int)bt_filled;
186*a1e26a70SApple OSS Distributions 	T_EXPECT_GE(bt_len, (unsigned int)USER_FRAMES,
187*a1e26a70SApple OSS Distributions 	    "at least %u frames should be present in backtrace", USER_FRAMES);
188*a1e26a70SApple OSS Distributions 	T_EXPECT_LE(bt_len, (unsigned int)USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES,
189*a1e26a70SApple OSS Distributions 	    "at most %u frames should be present in backtrace",
190*a1e26a70SApple OSS Distributions 	    USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES);
191*a1e26a70SApple OSS Distributions 
192*a1e26a70SApple OSS Distributions 	struct callstack_exp callstack = {
193*a1e26a70SApple OSS Distributions 		.in_syscall_setup = true,
194*a1e26a70SApple OSS Distributions 		.syscall_frames = 0,
195*a1e26a70SApple OSS Distributions 		.callstack = user_bt,
196*a1e26a70SApple OSS Distributions 		.callstack_len = USER_FRAMES,
197*a1e26a70SApple OSS Distributions 		.nchecked = 0,
198*a1e26a70SApple OSS Distributions 	};
199*a1e26a70SApple OSS Distributions 	for (unsigned int i = 0; i < bt_len; i++) {
200*a1e26a70SApple OSS Distributions 		uintptr_t addr;
201*a1e26a70SApple OSS Distributions #if !defined(__LP64__)
202*a1e26a70SApple OSS Distributions 		// Backtrace frames come out as kernel words; convert them back to user
203*a1e26a70SApple OSS Distributions 		// uintptr_t for 32-bit processes.
204*a1e26a70SApple OSS Distributions 		if (is_kernel_64_bit()) {
205*a1e26a70SApple OSS Distributions 			addr = (uintptr_t)(bt[i]);
206*a1e26a70SApple OSS Distributions 		} else {
207*a1e26a70SApple OSS Distributions 			addr = (uintptr_t)(((uint32_t *)bt)[i]);
208*a1e26a70SApple OSS Distributions 		}
209*a1e26a70SApple OSS Distributions #else // defined(__LP32__)
210*a1e26a70SApple OSS Distributions 		addr = (uintptr_t)bt[i];
211*a1e26a70SApple OSS Distributions #endif // defined(__LP32__)
212*a1e26a70SApple OSS Distributions 
213*a1e26a70SApple OSS Distributions 		CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
214*a1e26a70SApple OSS Distributions 			user_symb, addr, kCSNow);
215*a1e26a70SApple OSS Distributions 		expect_frame(&callstack, symbol, addr, i);
216*a1e26a70SApple OSS Distributions 	}
217*a1e26a70SApple OSS Distributions 
218*a1e26a70SApple OSS Distributions 	T_EXPECT_GE(callstack.nchecked, USER_FRAMES,
219*a1e26a70SApple OSS Distributions 	    "checked enough frames for correct symbols");
220*a1e26a70SApple OSS Distributions }
221*a1e26a70SApple OSS Distributions 
222*a1e26a70SApple OSS Distributions static int __attribute__((noinline, not_tail_called))
223*a1e26a70SApple OSS Distributions recurse_a(enum test_scenario, unsigned int frames);
224*a1e26a70SApple OSS Distributions static int __attribute__((noinline, not_tail_called))
225*a1e26a70SApple OSS Distributions recurse_b(enum test_scenario, unsigned int frames);
226*a1e26a70SApple OSS Distributions 
227*a1e26a70SApple OSS Distributions static int __attribute__((noinline, not_tail_called))
recurse_a(enum test_scenario scenario,unsigned int frames)228*a1e26a70SApple OSS Distributions recurse_a(enum test_scenario scenario, unsigned int frames)
229*a1e26a70SApple OSS Distributions {
230*a1e26a70SApple OSS Distributions 	if (frames == 1) {
231*a1e26a70SApple OSS Distributions 		expect_callstack(scenario);
232*a1e26a70SApple OSS Distributions 		getpid(); // Really prevent tail calls.
233*a1e26a70SApple OSS Distributions 		return 0;
234*a1e26a70SApple OSS Distributions 	}
235*a1e26a70SApple OSS Distributions 
236*a1e26a70SApple OSS Distributions 	return recurse_b(scenario, frames - 1) + 1;
237*a1e26a70SApple OSS Distributions }
238*a1e26a70SApple OSS Distributions 
239*a1e26a70SApple OSS Distributions static int __attribute__((noinline, not_tail_called))
recurse_b(enum test_scenario scenario,unsigned int frames)240*a1e26a70SApple OSS Distributions recurse_b(enum test_scenario scenario, unsigned int frames)
241*a1e26a70SApple OSS Distributions {
242*a1e26a70SApple OSS Distributions 	if (frames == 1) {
243*a1e26a70SApple OSS Distributions 		expect_callstack(scenario);
244*a1e26a70SApple OSS Distributions 		getpid(); // Really prevent tail calls.
245*a1e26a70SApple OSS Distributions 		return 0;
246*a1e26a70SApple OSS Distributions 	}
247*a1e26a70SApple OSS Distributions 
248*a1e26a70SApple OSS Distributions 	return recurse_a(scenario, frames - 1) + 1;
249*a1e26a70SApple OSS Distributions }
250*a1e26a70SApple OSS Distributions 
251*a1e26a70SApple OSS Distributions static void __attribute__((noinline, not_tail_called))
expect_async_callstack(void)252*a1e26a70SApple OSS Distributions expect_async_callstack(void)
253*a1e26a70SApple OSS Distributions {
254*a1e26a70SApple OSS Distributions 	uint64_t bt[ASYNC_FRAMES + MAX_SYSCALL_SETUP_FRAMES] = { 0 };
255*a1e26a70SApple OSS Distributions 
256*a1e26a70SApple OSS Distributions 	CSSymbolicatorRef user_symb = get_symbolicator();
257*a1e26a70SApple OSS Distributions 	size_t bt_filled = ASYNC_FRAMES + MAX_SYSCALL_SETUP_FRAMES;
258*a1e26a70SApple OSS Distributions 	backtrace_current_thread_wrapper(USER_SCENARIO, bt, &bt_filled);
259*a1e26a70SApple OSS Distributions 
260*a1e26a70SApple OSS Distributions 	unsigned int bt_len = (unsigned int)bt_filled;
261*a1e26a70SApple OSS Distributions 	T_EXPECT_GE(bt_len, (unsigned int)ASYNC_FRAMES,
262*a1e26a70SApple OSS Distributions 	    "at least %u frames should be present in backtrace", ASYNC_FRAMES);
263*a1e26a70SApple OSS Distributions 	T_EXPECT_LE(bt_len, (unsigned int)ASYNC_FRAMES + MAX_SYSCALL_SETUP_FRAMES,
264*a1e26a70SApple OSS Distributions 	    "at most %u frames should be present in backtrace",
265*a1e26a70SApple OSS Distributions 	    ASYNC_FRAMES + MAX_SYSCALL_SETUP_FRAMES);
266*a1e26a70SApple OSS Distributions 
267*a1e26a70SApple OSS Distributions 	struct callstack_exp callstack = {
268*a1e26a70SApple OSS Distributions 		.in_syscall_setup = true,
269*a1e26a70SApple OSS Distributions 		.syscall_frames = 0,
270*a1e26a70SApple OSS Distributions 		.callstack = async_bt,
271*a1e26a70SApple OSS Distributions 		.callstack_len = ASYNC_FRAMES,
272*a1e26a70SApple OSS Distributions 		.nchecked = 0,
273*a1e26a70SApple OSS Distributions 	};
274*a1e26a70SApple OSS Distributions 	for (unsigned int i = 0; i < bt_len; i++) {
275*a1e26a70SApple OSS Distributions 		uintptr_t addr;
276*a1e26a70SApple OSS Distributions #if !defined(__LP64__)
277*a1e26a70SApple OSS Distributions 		// Backtrace frames come out as kernel words; convert them back to user
278*a1e26a70SApple OSS Distributions 		// uintptr_t for 32-bit processes.
279*a1e26a70SApple OSS Distributions 		if (is_kernel_64_bit()) {
280*a1e26a70SApple OSS Distributions 			addr = (uintptr_t)(bt[i]);
281*a1e26a70SApple OSS Distributions 		} else {
282*a1e26a70SApple OSS Distributions 			addr = (uintptr_t)(((uint32_t *)bt)[i]);
283*a1e26a70SApple OSS Distributions 		}
284*a1e26a70SApple OSS Distributions #else // defined(__LP32__)
285*a1e26a70SApple OSS Distributions 		addr = (uintptr_t)bt[i];
286*a1e26a70SApple OSS Distributions #endif // defined(__LP32__)
287*a1e26a70SApple OSS Distributions 
288*a1e26a70SApple OSS Distributions 		CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
289*a1e26a70SApple OSS Distributions 			user_symb, addr, kCSNow);
290*a1e26a70SApple OSS Distributions 		expect_frame(&callstack, symbol, addr, i);
291*a1e26a70SApple OSS Distributions 	}
292*a1e26a70SApple OSS Distributions 
293*a1e26a70SApple OSS Distributions 	T_EXPECT_GE(callstack.nchecked, ASYNC_FRAMES,
294*a1e26a70SApple OSS Distributions 	    "checked enough frames for correct symbols");
295*a1e26a70SApple OSS Distributions }
296*a1e26a70SApple OSS Distributions 
297*a1e26a70SApple OSS Distributions static void *
backtrace_thread_async(void * __unused arg)298*a1e26a70SApple OSS Distributions backtrace_thread_async(void * __unused arg)
299*a1e26a70SApple OSS Distributions {
300*a1e26a70SApple OSS Distributions 	uint64_t *fp = __builtin_frame_address(0);
301*a1e26a70SApple OSS Distributions 	// We cannot use a variable of pointer type, because this ABI is valid
302*a1e26a70SApple OSS Distributions 	// on arm64_32 where pointers are 32bits, but the context pointer will
303*a1e26a70SApple OSS Distributions 	// still be stored in a 64bits slot on the stack.
304*a1e26a70SApple OSS Distributions #if __has_feature(ptrauth_calls)
305*a1e26a70SApple OSS Distributions #define __stack_context_auth __ptrauth(ptrauth_key_process_dependent_data, 1, \
306*a1e26a70SApple OSS Distributions 	        0xc31a)
307*a1e26a70SApple OSS Distributions 	struct fake_async_context * __stack_context_auth ctx = &level2;
308*a1e26a70SApple OSS Distributions #else // __has_feature(ptrauth_calls)
309*a1e26a70SApple OSS Distributions 	/* struct fake_async_context * */uint64_t ctx  = (uintptr_t)&level2;
310*a1e26a70SApple OSS Distributions #endif // !__has_feature(ptrauth_calls)
311*a1e26a70SApple OSS Distributions 
312*a1e26a70SApple OSS Distributions 	// The signature of an async frame on the OS stack is:
313*a1e26a70SApple OSS Distributions 	// [ <AsyncContext address>, <Saved FP | (1<<60)>, <return address> ]
314*a1e26a70SApple OSS Distributions 	// The Async context must be right before the saved FP on the stack. This
315*a1e26a70SApple OSS Distributions 	// should happen naturally in an optimized build as it is the only
316*a1e26a70SApple OSS Distributions 	// variable on the stack.
317*a1e26a70SApple OSS Distributions 	// This function cannot use T_ASSERT_* becuse it changes the stack
318*a1e26a70SApple OSS Distributions 	// layout.
319*a1e26a70SApple OSS Distributions 	assert((uintptr_t)fp - (uintptr_t)&ctx == 8);
320*a1e26a70SApple OSS Distributions 
321*a1e26a70SApple OSS Distributions 	// Modify the saved FP on the stack to include the async frame marker
322*a1e26a70SApple OSS Distributions 	*fp |= (0x1ULL << 60);
323*a1e26a70SApple OSS Distributions 	expect_async_callstack();
324*a1e26a70SApple OSS Distributions 	return NULL;
325*a1e26a70SApple OSS Distributions }
326*a1e26a70SApple OSS Distributions 
327*a1e26a70SApple OSS Distributions static void *
backtrace_thread(void * arg)328*a1e26a70SApple OSS Distributions backtrace_thread(void *arg)
329*a1e26a70SApple OSS Distributions {
330*a1e26a70SApple OSS Distributions 	unsigned int calls;
331*a1e26a70SApple OSS Distributions 	enum test_scenario scenario = (enum test_scenario)arg;
332*a1e26a70SApple OSS Distributions 
333*a1e26a70SApple OSS Distributions 	// backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname
334*a1e26a70SApple OSS Distributions 	//
335*a1e26a70SApple OSS Distributions 	// Always make one less call for this frame (backtrace_thread).
336*a1e26a70SApple OSS Distributions 	calls = USER_FRAMES - NON_RECURSE_FRAMES;
337*a1e26a70SApple OSS Distributions 
338*a1e26a70SApple OSS Distributions 	T_LOG("backtrace thread calling into %d frames (already at %d frames)",
339*a1e26a70SApple OSS Distributions 	    calls, NON_RECURSE_FRAMES);
340*a1e26a70SApple OSS Distributions 	(void)recurse_a(scenario, calls);
341*a1e26a70SApple OSS Distributions 	return NULL;
342*a1e26a70SApple OSS Distributions }
343*a1e26a70SApple OSS Distributions 
344*a1e26a70SApple OSS Distributions T_DECL(backtrace_user, "test that the kernel can backtrace user stacks",
345*a1e26a70SApple OSS Distributions     T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true), T_META_TAG_VM_PREFERRED)
346*a1e26a70SApple OSS Distributions {
347*a1e26a70SApple OSS Distributions 	pthread_t thread;
348*a1e26a70SApple OSS Distributions 
349*a1e26a70SApple OSS Distributions 	// Run the test from a different thread to insulate it from libdarwintest
350*a1e26a70SApple OSS Distributions 	// setup.
351*a1e26a70SApple OSS Distributions 	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread,
352*a1e26a70SApple OSS Distributions 	    (void *)USER_SCENARIO), "create additional thread to backtrace");
353*a1e26a70SApple OSS Distributions 
354*a1e26a70SApple OSS Distributions 	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(thread, NULL), NULL);
355*a1e26a70SApple OSS Distributions }
356*a1e26a70SApple OSS Distributions 
357*a1e26a70SApple OSS Distributions T_DECL(backtrace_user_bounds,
358*a1e26a70SApple OSS Distributions     "test that the kernel doesn't write frames out of expected bounds", T_META_TAG_VM_PREFERRED)
359*a1e26a70SApple OSS Distributions {
360*a1e26a70SApple OSS Distributions 	uint64_t bt_init[USER_FRAMES] = {};
361*a1e26a70SApple OSS Distributions 	size_t bt_filled = USER_FRAMES, bt_filled_after = 0;
362*a1e26a70SApple OSS Distributions 	int error = 0;
363*a1e26a70SApple OSS Distributions 	kern_return_t kr = KERN_FAILURE;
364*a1e26a70SApple OSS Distributions 	void *bt_page = NULL;
365*a1e26a70SApple OSS Distributions 	void *guard_page = NULL;
366*a1e26a70SApple OSS Distributions 	void *bt_start = NULL;
367*a1e26a70SApple OSS Distributions 
368*a1e26a70SApple OSS Distributions 	// The backtrace addresses come back as kernel words.
369*a1e26a70SApple OSS Distributions 	size_t kword_size = is_kernel_64_bit() ? 8 : 4;
370*a1e26a70SApple OSS Distributions 
371*a1e26a70SApple OSS Distributions 	// Get an idea of how many frames to expect.
372*a1e26a70SApple OSS Distributions 	int ret = sysctlbyname("kern.backtrace.user", bt_init, &bt_filled, NULL, 0);
373*a1e26a70SApple OSS Distributions 	if (ret == -1 && errno == ENOENT) {
374*a1e26a70SApple OSS Distributions 		T_SKIP("release kernel: kern.backtrace.user missing");
375*a1e26a70SApple OSS Distributions 	}
376*a1e26a70SApple OSS Distributions 	T_ASSERT_POSIX_SUCCESS(error, "sysctlbyname(\"kern.backtrace.user\")");
377*a1e26a70SApple OSS Distributions 
378*a1e26a70SApple OSS Distributions 	// Allocate two pages -- a first one that's valid and a second that
379*a1e26a70SApple OSS Distributions 	// will be non-writeable to catch a copyout that's too large.
380*a1e26a70SApple OSS Distributions 	bt_page = mmap(NULL, vm_page_size * 2, PROT_READ | PROT_WRITE,
381*a1e26a70SApple OSS Distributions 	    MAP_ANON | MAP_PRIVATE, -1, 0);
382*a1e26a70SApple OSS Distributions 	T_WITH_ERRNO;
383*a1e26a70SApple OSS Distributions 	T_ASSERT_NE(bt_page, MAP_FAILED, "allocated backtrace pages");
384*a1e26a70SApple OSS Distributions 	guard_page = (char *)bt_page + vm_page_size;
385*a1e26a70SApple OSS Distributions 
386*a1e26a70SApple OSS Distributions 	error = mprotect(guard_page, vm_page_size, PROT_READ);
387*a1e26a70SApple OSS Distributions 	T_ASSERT_POSIX_SUCCESS(error, "mprotect(..., PROT_READ) guard page");
388*a1e26a70SApple OSS Distributions 
389*a1e26a70SApple OSS Distributions 	// Ensure the pages are set up as expected.
390*a1e26a70SApple OSS Distributions 	kr = vm_write(mach_task_self(), (vm_address_t)bt_page,
391*a1e26a70SApple OSS Distributions 	    (vm_offset_t)&(int){ 12345 }, sizeof(int));
392*a1e26a70SApple OSS Distributions 	T_ASSERT_MACH_SUCCESS(kr,
393*a1e26a70SApple OSS Distributions 	    "should succeed in writing to backtrace page");
394*a1e26a70SApple OSS Distributions 	kr = vm_write(mach_task_self(), (vm_address_t)guard_page,
395*a1e26a70SApple OSS Distributions 	    (vm_offset_t)&(int){ 12345 }, sizeof(int));
396*a1e26a70SApple OSS Distributions 	T_ASSERT_NE(kr, KERN_SUCCESS, "should fail to write to guard page");
397*a1e26a70SApple OSS Distributions 
398*a1e26a70SApple OSS Distributions 	// Ask the kernel to write the backtrace just before the guard page.
399*a1e26a70SApple OSS Distributions 	bt_start = (char *)guard_page - (kword_size * bt_filled);
400*a1e26a70SApple OSS Distributions 	bt_filled_after = bt_filled;
401*a1e26a70SApple OSS Distributions 
402*a1e26a70SApple OSS Distributions 	error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after,
403*a1e26a70SApple OSS Distributions 	    NULL, 0);
404*a1e26a70SApple OSS Distributions 	T_EXPECT_POSIX_SUCCESS(error,
405*a1e26a70SApple OSS Distributions 	    "sysctlbyname(\"kern.backtrace.user\") just before guard page");
406*a1e26a70SApple OSS Distributions 	T_EXPECT_EQ(bt_filled, bt_filled_after,
407*a1e26a70SApple OSS Distributions 	    "both calls to backtrace should have filled in the same number of "
408*a1e26a70SApple OSS Distributions 	    "frames");
409*a1e26a70SApple OSS Distributions 
410*a1e26a70SApple OSS Distributions 	// Expect the kernel to fault when writing too far.
411*a1e26a70SApple OSS Distributions 	bt_start = (char *)bt_start + 1;
412*a1e26a70SApple OSS Distributions 	bt_filled_after = bt_filled;
413*a1e26a70SApple OSS Distributions 	error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after,
414*a1e26a70SApple OSS Distributions 	    (void *)USER_SCENARIO, 0);
415*a1e26a70SApple OSS Distributions 	T_EXPECT_POSIX_FAILURE(error, EFAULT,
416*a1e26a70SApple OSS Distributions 	    "sysctlbyname(\"kern.backtrace.user\") should fault one byte into "
417*a1e26a70SApple OSS Distributions 	    "guard page");
418*a1e26a70SApple OSS Distributions }
419*a1e26a70SApple OSS Distributions 
420*a1e26a70SApple OSS Distributions T_DECL(backtrace_user_async,
421*a1e26a70SApple OSS Distributions     "test that the kernel can backtrace user async stacks",
422*a1e26a70SApple OSS Distributions     T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(false), T_META_TAG_VM_PREFERRED)
423*a1e26a70SApple OSS Distributions {
424*a1e26a70SApple OSS Distributions #if !defined(__LP64__)
425*a1e26a70SApple OSS Distributions 	T_SKIP("unsupported on LP32");
426*a1e26a70SApple OSS Distributions #else // __LP32__
427*a1e26a70SApple OSS Distributions 	pthread_t thread;
428*a1e26a70SApple OSS Distributions 	// Run the test from a different thread to insulate it from libdarwintest
429*a1e26a70SApple OSS Distributions 	// setup.
430*a1e26a70SApple OSS Distributions 	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL,
431*a1e26a70SApple OSS Distributions 	    backtrace_thread_async, NULL),
432*a1e26a70SApple OSS Distributions 	    "create additional thread to backtrace");
433*a1e26a70SApple OSS Distributions 
434*a1e26a70SApple OSS Distributions 	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(thread, NULL), NULL);
435*a1e26a70SApple OSS Distributions #endif // !__LP32__
436*a1e26a70SApple OSS Distributions }
437*a1e26a70SApple OSS Distributions 
438*a1e26a70SApple OSS Distributions T_DECL(backtrace_user_resume,
439*a1e26a70SApple OSS Distributions     "test that the kernel can resume a backtrace into a smaller buffer",
440*a1e26a70SApple OSS Distributions     T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(false), T_META_TAG_VM_PREFERRED)
441*a1e26a70SApple OSS Distributions {
442*a1e26a70SApple OSS Distributions 	pthread_t thread;
443*a1e26a70SApple OSS Distributions 	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread,
444*a1e26a70SApple OSS Distributions 	    (void *)RESUME_SCENARIO), "create additional thread to backtrace");
445*a1e26a70SApple OSS Distributions 	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(thread, NULL), NULL);
446*a1e26a70SApple OSS Distributions }
447*a1e26a70SApple OSS Distributions 
448*a1e26a70SApple OSS Distributions T_DECL(backtrace_kernel_pack_unpack,
449*a1e26a70SApple OSS Distributions     "test that a kernel backtrace can be packed and unpacked losslessly",
450*a1e26a70SApple OSS Distributions     T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(false), T_META_TAG_VM_PREFERRED)
451*a1e26a70SApple OSS Distributions {
452*a1e26a70SApple OSS Distributions 	int error = sysctlbyname("kern.backtrace.kernel_tests", NULL, NULL,
453*a1e26a70SApple OSS Distributions 	    (void *)PACK_UNPACK_SCENARIO, 0);
454*a1e26a70SApple OSS Distributions 	T_EXPECT_POSIX_SUCCESS(error,
455*a1e26a70SApple OSS Distributions 	    "sysctlbyname(\"kern.backtrace.kernel_tests\", PACK_UNPACK)");
456*a1e26a70SApple OSS Distributions }
457*a1e26a70SApple OSS Distributions 
458*a1e26a70SApple OSS Distributions T_DECL(backtrace_kernel_packed,
459*a1e26a70SApple OSS Distributions     "test that a kernel backtrace can be recorded as packed losslessly",
460*a1e26a70SApple OSS Distributions     T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(false), T_META_TAG_VM_PREFERRED)
461*a1e26a70SApple OSS Distributions {
462*a1e26a70SApple OSS Distributions 	int error = sysctlbyname("kern.backtrace.kernel_tests", NULL, NULL,
463*a1e26a70SApple OSS Distributions 	    (void *)PACKED_SCENARIO, 0);
464*a1e26a70SApple OSS Distributions 	T_EXPECT_POSIX_SUCCESS(error,
465*a1e26a70SApple OSS Distributions 	    "sysctlbyname(\"kern.backtrace.kernel_tests\", PACKED)");
466*a1e26a70SApple OSS Distributions }
467*a1e26a70SApple OSS Distributions 
468*a1e26a70SApple OSS Distributions #pragma mark - utilities
469*a1e26a70SApple OSS Distributions 
470*a1e26a70SApple OSS Distributions static void __attribute__((noinline, not_tail_called))
spin_forever(void)471*a1e26a70SApple OSS Distributions spin_forever(void)
472*a1e26a70SApple OSS Distributions {
473*a1e26a70SApple OSS Distributions 	while (true) {
474*a1e26a70SApple OSS Distributions 		;
475*a1e26a70SApple OSS Distributions 	}
476*a1e26a70SApple OSS Distributions }
477*a1e26a70SApple OSS Distributions 
478*a1e26a70SApple OSS Distributions static void
check_stack(uintptr_t fp,uintptr_t ctx)479*a1e26a70SApple OSS Distributions check_stack(uintptr_t fp, uintptr_t ctx)
480*a1e26a70SApple OSS Distributions {
481*a1e26a70SApple OSS Distributions 	if ((fp - ctx) != 0x8) {
482*a1e26a70SApple OSS Distributions 		fprintf(stderr, "stack frame is not set up properly: "
483*a1e26a70SApple OSS Distributions 		    "%#lx, %#lx is %lx bytes away\n", fp, ctx, fp - ctx);
484*a1e26a70SApple OSS Distributions 		exit(1);
485*a1e26a70SApple OSS Distributions 	}
486*a1e26a70SApple OSS Distributions }
487*a1e26a70SApple OSS Distributions 
488*a1e26a70SApple OSS Distributions static void __attribute__((noinline, not_tail_called))
spin_backtrace_async(void)489*a1e26a70SApple OSS Distributions spin_backtrace_async(void)
490*a1e26a70SApple OSS Distributions {
491*a1e26a70SApple OSS Distributions 	uint64_t *fp = __builtin_frame_address(0);
492*a1e26a70SApple OSS Distributions #if __has_feature(ptrauth_calls)
493*a1e26a70SApple OSS Distributions 	struct fake_async_context * __stack_context_auth ctx = &level2;
494*a1e26a70SApple OSS Distributions #else // __has_feature(ptrauth_calls)
495*a1e26a70SApple OSS Distributions 	/* struct fake_async_context * */uint64_t ctx  = (uintptr_t)&level2;
496*a1e26a70SApple OSS Distributions #endif // !__has_feature(ptrauth_calls)
497*a1e26a70SApple OSS Distributions 	check_stack((uintptr_t)fp, (uintptr_t)&ctx);
498*a1e26a70SApple OSS Distributions 	*fp |= (0x1ULL << 60);
499*a1e26a70SApple OSS Distributions 
500*a1e26a70SApple OSS Distributions 	spin_forever();
501*a1e26a70SApple OSS Distributions }
502*a1e26a70SApple OSS Distributions 
503*a1e26a70SApple OSS Distributions T_DECL(backtrace_user_async_spin_forever,
504*a1e26a70SApple OSS Distributions     "try spinning forever with an async call stack set up",
505*a1e26a70SApple OSS Distributions     T_META_ENABLED(false), T_META_CHECK_LEAKS(false),
506*a1e26a70SApple OSS Distributions     T_META_ALL_VALID_ARCHS(false), T_META_TAG_VM_PREFERRED)
507*a1e26a70SApple OSS Distributions {
508*a1e26a70SApple OSS Distributions #if !defined(__LP64__)
509*a1e26a70SApple OSS Distributions 	T_SKIP("unsupported on LP32");
510*a1e26a70SApple OSS Distributions #else // __LP32__
511*a1e26a70SApple OSS Distributions 	spin_backtrace_async();
512*a1e26a70SApple OSS Distributions #endif // !__LP32__
513*a1e26a70SApple OSS Distributions }
514