xref: /xnu-10002.81.5/tests/stackshot_idle_25570396.m (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1/* This program tests that kThreadIdleWorker is being set properly, so
2 * that idle and active threads can be appropriately identified.
3 */
4
5#include <darwintest.h>
6#include <dispatch/dispatch.h>
7#include <kdd.h>
8#include <kern/kcdata.h>
9#include <kern/debug.h>
10#include <mach/mach_init.h>
11#include <mach/mach_traps.h>
12#include <mach/semaphore.h>
13#include <mach/task.h>
14#include <pthread.h>
15#include <sys/stackshot.h>
16#include <stdlib.h>
17#include <unistd.h>
18
19#include <Foundation/Foundation.h>
20
21#define NUMRETRIES  5  // number of times to retry a stackshot
22#define NUMENQUEUES 16 // number of blocking jobs to enqueue
23#define NUMTHREADS  (NUMENQUEUES + 2) // total number of threads (including numenqueues)
24
25T_GLOBAL_META(
26        T_META_NAMESPACE("xnu.stackshot"),
27        T_META_RADAR_COMPONENT_NAME("xnu"),
28        T_META_RADAR_COMPONENT_VERSION("stackshot"),
29        T_META_OWNER("jonathan_w_adams"),
30        T_META_ASROOT(true)
31);
32
33volatile static int spin_threads = 1;
34
35static void *
36take_stackshot(uint32_t extra_flags, uint64_t since_timestamp)
37{
38	void * stackshot;
39	int ret, retries;
40	uint32_t stackshot_flags = STACKSHOT_SAVE_LOADINFO |
41					STACKSHOT_GET_GLOBAL_MEM_STATS |
42					STACKSHOT_SAVE_IMP_DONATION_PIDS |
43					STACKSHOT_KCDATA_FORMAT;
44
45	if (since_timestamp != 0)
46		stackshot_flags |= STACKSHOT_COLLECT_DELTA_SNAPSHOT;
47
48	stackshot_flags |= extra_flags;
49
50	stackshot = stackshot_config_create();
51	T_ASSERT_NOTNULL(stackshot, "Allocating stackshot config");
52
53	ret = stackshot_config_set_flags(stackshot, stackshot_flags);
54	T_ASSERT_POSIX_ZERO(ret, "Setting flags on stackshot config");
55
56	ret = stackshot_config_set_pid(stackshot, getpid());
57	T_ASSERT_POSIX_ZERO(ret, "Setting target pid on stackshot config");
58
59	if (since_timestamp != 0) {
60		ret = stackshot_config_set_delta_timestamp(stackshot, since_timestamp);
61		T_ASSERT_POSIX_ZERO(ret, "Setting prev snapshot time on stackshot config");
62	}
63
64	for (retries = NUMRETRIES; retries > 0; retries--) {
65		ret = stackshot_capture_with_config(stackshot);
66		T_ASSERT_TRUE(ret == 0 || ret == EBUSY || ret == ETIMEDOUT, "Attempting to take stackshot (error %d)...", ret);
67		if (retries == 0 && (ret == EBUSY || ret == ETIMEDOUT))
68			T_ASSERT_FAIL("Failed to take stackshot after %d retries: %s", ret, strerror(ret));
69		if (ret == 0)
70			break;
71	}
72	return stackshot;
73}
74
75static uint64_t get_stackshot_timestamp(void * stackshot)
76{
77	kcdata_iter_t iter;
78	void * buf;
79	uint64_t default_time = 0;
80	uint32_t t, buflen;
81
82	buf = stackshot_config_get_stackshot_buffer(stackshot);
83	T_ASSERT_NOTNULL(buf, "Getting stackshot buffer");
84	buflen = stackshot_config_get_stackshot_size(stackshot);
85
86	iter = kcdata_iter(buf, buflen);
87	t    = kcdata_iter_type(iter);
88
89	T_ASSERT_TRUE(t == KCDATA_BUFFER_BEGIN_STACKSHOT || t == KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT,
90		"Making sure stackshot data begins with \"begin\" flag");
91	T_ASSERT_TRUE(kcdata_iter_valid(iter = kcdata_iter_find_type(iter, KCDATA_TYPE_MACH_ABSOLUTE_TIME)),
92		"Getting stackshot timestamp");
93	default_time = *(uint64_t *)kcdata_iter_payload(iter);
94	return default_time;
95}
96
97static void
98get_thread_statuses(void * stackshot, int * num_idles, int * num_nonidles)
99{
100	void *buf;
101	uint32_t t, buflen;
102	uint64_t thread_snap_flags;
103	NSError *error = nil;
104	NSMutableDictionary *parsed_container, *parsed_threads;
105
106	*num_idles = 0;
107	*num_nonidles = 0;
108
109	buf = stackshot_config_get_stackshot_buffer(stackshot);
110	T_ASSERT_NOTNULL(buf, "Getting stackshot buffer");
111	buflen = stackshot_config_get_stackshot_size(stackshot);
112
113	kcdata_iter_t iter = kcdata_iter(buf, buflen);
114	T_ASSERT_TRUE(kcdata_iter_type(iter) == KCDATA_BUFFER_BEGIN_STACKSHOT ||
115			kcdata_iter_type(iter) == KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT,
116			"Checking start of stackshot buffer");
117
118	iter = kcdata_iter_next(iter);
119	KCDATA_ITER_FOREACH(iter)
120	{
121		t = kcdata_iter_type(iter);
122
123		if (t != KCDATA_TYPE_CONTAINER_BEGIN) {
124			continue;
125		}
126
127		if (kcdata_iter_container_type(iter) != STACKSHOT_KCCONTAINER_TASK) {
128			continue;
129		}
130
131		parsed_container = parseKCDataContainer(&iter, &error);
132		T_ASSERT_TRUE(parsed_container && !error, "Parsing container");
133
134		parsed_threads = parsed_container[@"task_snapshots"][@"thread_snapshots"];
135		for (id th_key in parsed_threads) {
136			/* check to see that tid matches expected idle status */
137			thread_snap_flags = [parsed_threads[th_key][@"thread_snapshot"][@"ths_ss_flags"] unsignedLongLongValue];
138			(thread_snap_flags & kThreadIdleWorker) ? (*num_idles)++ : (*num_nonidles)++;
139		}
140		[parsed_container release];
141	}
142
143}
144
145/* Dispatch NUMENQUEUES jobs to a concurrent queue that immediately wait on a
146 * shared semaphore. This should spin up plenty of threads! */
147static void
148warm_up_threadpool(dispatch_queue_t q)
149{
150	int i;
151	dispatch_semaphore_t thread_wait = dispatch_semaphore_create(0);
152	T_QUIET; T_ASSERT_NOTNULL(thread_wait, "Initializing work queue semaphore");
153	dispatch_semaphore_t main_wait = dispatch_semaphore_create(0);
154	T_QUIET; T_ASSERT_NOTNULL(main_wait, "Initializing main thread semaphore");
155
156	for (i = 0; i < NUMENQUEUES; i++) {
157		dispatch_async(q, ^{
158			dispatch_semaphore_wait(thread_wait, DISPATCH_TIME_FOREVER);
159			dispatch_semaphore_signal(main_wait);
160		});
161	}
162
163	sleep(1); // give worker threads enough time to block
164
165	for (i = 0; i < NUMENQUEUES; i++) {
166		dispatch_semaphore_signal(thread_wait);
167		dispatch_semaphore_wait(main_wait, DISPATCH_TIME_FOREVER);
168	}
169
170	dispatch_release(thread_wait);
171	dispatch_release(main_wait);
172
173	// Give enough time for worker threads to go idle again
174	sleep(1);
175}
176
177/* Dispatch NUMENQUEUES jobs to a concurrent queue that spin in a tight loop.
178 * Isn't guaranteed to occupy every worker thread, but it's enough so
179 * that a thread will go from idle to nonidle.
180 */
181static void
182fill_threadpool_with_spinning(dispatch_queue_t q)
183{
184	int i;
185	for (i = 0; i < NUMENQUEUES; i++) {
186		dispatch_async(q, ^{
187			while(spin_threads); // should now appear as non-idle in delta shot
188		});
189	}
190	sleep(1); // wait for jobs to enqueue
191}
192
193/* Take stackshot, count the number of idle and nonidle threads the stackshot records.
194 * Where this is called, there should be NUMENQUEUES idle threads (thanks to warm_up_threadpool)
195 * and 2 nonidle threads (the main thread, and the spinning pthread).
196 */
197static void
198take_and_verify_initial_stackshot(uint64_t * since_time)
199{
200	void *stackshot;
201	int num_init_idle_threads, num_init_nonidle_threads;
202
203	stackshot = take_stackshot(0, 0);
204	*since_time = get_stackshot_timestamp(stackshot);
205	get_thread_statuses(stackshot, &num_init_idle_threads, &num_init_nonidle_threads);
206
207	T_EXPECT_EQ(num_init_idle_threads, NUMENQUEUES,
208			"Idle count of %d should match expected value of %d...",
209			num_init_idle_threads, NUMENQUEUES);
210	T_EXPECT_EQ(num_init_nonidle_threads, NUMTHREADS - NUMENQUEUES,
211			"Non-idle count of %d should match expected value of %d...",
212			num_init_nonidle_threads, NUMTHREADS - NUMENQUEUES);
213	stackshot_config_dealloc(stackshot);
214}
215
216/* Take a stackshot and a delta stackshot, measuring what changed since the previous
217 * stackshot. Where this is called, the blocking jobs have been cleared from the work queue,
218 * and the work queue has NUMENQUEUES tight-spinning jobs on it. Make sure that
219 * no new idle threads appear in the delta, and make sure that the delta shot isn't
220 * ignoring the worker threads that have become active.
221 */
222static void
223take_and_verify_delta_stackshot(uint64_t since_time)
224{
225	void *stackshot;
226	void *delta_stackshot;
227
228	int num_delta_idles, num_delta_nonidles, num_curr_idles, num_curr_nonidles;
229
230	stackshot = take_stackshot(0, 0);
231	delta_stackshot = take_stackshot(0, since_time); /* Threads should appear in delta stackshot as non-idle */
232
233	get_thread_statuses(stackshot, &num_curr_idles, &num_curr_nonidles);
234	get_thread_statuses(delta_stackshot, &num_delta_idles, &num_delta_nonidles);
235
236	T_EXPECT_EQ(num_delta_idles, 0, "Making sure there are no idles in delta shot");
237	T_EXPECT_EQ(num_delta_nonidles + num_curr_idles, NUMTHREADS,
238			"Making sure delta shot isn't ignoring newly active threads");
239	stackshot_config_dealloc(stackshot);
240	stackshot_config_dealloc(delta_stackshot);
241}
242
243static void *
244spinning_non_work_queue_thread(void * ignored)
245{
246	(void)ignored;
247	while(spin_threads);
248	return NULL;
249}
250
251T_DECL(stackshot_idle_25570396, "Tests that stackshot can properly recognize idle and non-idle threads")
252{
253	int ret;
254	uint64_t initial_stackshot_time;
255	pthread_t spinning_thread;
256	dispatch_queue_t q;
257
258	ret = pthread_create(&spinning_thread, NULL, spinning_non_work_queue_thread, NULL);
259	T_ASSERT_POSIX_ZERO(ret, "Spinning up non-work-queue thread");
260
261	q = dispatch_queue_create("com.apple.kernel.test.waiting_semaphores", DISPATCH_QUEUE_CONCURRENT);
262
263	warm_up_threadpool(q);
264	take_and_verify_initial_stackshot(&initial_stackshot_time);
265
266	fill_threadpool_with_spinning(q);
267	take_and_verify_delta_stackshot(initial_stackshot_time);
268
269	spin_threads = 0; /* pthread-made thread should now exit */
270	ret = pthread_join(spinning_thread, NULL);
271	T_ASSERT_POSIX_ZERO(ret, "Joining on non-work-queue thread");
272}
273