1 // Copyright (c) 2018-2019 Apple Inc. All rights reserved.
2
3 #include <CoreSymbolication/CoreSymbolication.h>
4 #include <darwintest.h>
5 #include <dispatch/dispatch.h>
6 #include <kperf/kperf.h>
7 #include <ktrace/session.h>
8 #include <ktrace/private.h>
9 #include <System/sys/kdebug.h>
10 #include <pthread.h>
11
12 #include "kperf_helpers.h"
13 #include "ktrace_helpers.h"
14 #include "ktrace_meta.h"
15
16 #define CALLSTACK_VALID 0x1
17 #define CALLSTACK_TRUNCATED 0x10
18
19 static void
expect_frame(const char ** bt,unsigned int bt_len,CSSymbolRef symbol,uint64_t addr,unsigned int bt_idx,unsigned int max_frames)20 expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol,
21 uint64_t addr, unsigned int bt_idx, unsigned int max_frames)
22 {
23 const char *name;
24 unsigned int frame_idx = max_frames - bt_idx - 1;
25
26 if (!bt[frame_idx]) {
27 T_LOG("frame %2u: skipping system frame '%s'", frame_idx,
28 CSSymbolGetName(symbol));
29 return;
30 }
31
32 T_LOG("checking frame %d: %llx", bt_idx, addr);
33 if (CSIsNull(symbol)) {
34 T_FAIL("invalid return address symbol");
35 return;
36 }
37
38 if (frame_idx >= bt_len) {
39 T_FAIL("unexpected frame '%s' (%#" PRIx64 ") at index %u",
40 CSSymbolGetName(symbol), addr, frame_idx);
41 return;
42 }
43
44 name = CSSymbolGetName(symbol);
45 T_QUIET; T_ASSERT_NOTNULL(name, NULL);
46 T_EXPECT_EQ_STR(name, bt[frame_idx],
47 "frame %2u: saw '%s', expected '%s'",
48 frame_idx, name, bt[frame_idx]);
49 }
50
51 /*
52 * Expect to see either user or kernel stacks on thread with ID `tid` with a
53 * signature of `bt` of length `bt_len`. Updates `stacks_seen` when stack
54 * is found.
55 *
56 * Can also allow stacks to be larger than the signature -- additional frames
57 * near the current PC will be ignored. This allows stacks to potentially be
58 * in the middle of a signalling system call (which signals that it is safe to
59 * start sampling).
60 */
61 static void
expect_backtrace(ktrace_session_t s,uint64_t tid,unsigned int * stacks_seen,bool kern,const char ** bt,unsigned int bt_len,unsigned int allow_larger_by)62 expect_backtrace(ktrace_session_t s, uint64_t tid, unsigned int *stacks_seen,
63 bool kern, const char **bt, unsigned int bt_len, unsigned int allow_larger_by)
64 {
65 CSSymbolicatorRef symb;
66 uint32_t hdr_debugid;
67 uint32_t data_debugid;
68 __block unsigned int stacks = 0;
69 __block unsigned int frames = 0;
70 __block unsigned int hdr_frames = 0;
71 __block unsigned int allow_larger = allow_larger_by;
72
73 T_SETUPBEGIN;
74
75 if (kern) {
76 static CSSymbolicatorRef kern_symb;
77 static dispatch_once_t kern_symb_once;
78
79 hdr_debugid = PERF_STK_KHDR;
80 data_debugid = PERF_STK_KDATA;
81
82 dispatch_once(&kern_symb_once, ^(void) {
83 kern_symb = CSSymbolicatorCreateWithMachKernel();
84 T_QUIET; T_ASSERT_FALSE(CSIsNull(kern_symb), NULL);
85 });
86 symb = kern_symb;
87 } else {
88 static CSSymbolicatorRef user_symb;
89 static dispatch_once_t user_symb_once;
90
91 hdr_debugid = PERF_STK_UHDR;
92 data_debugid = PERF_STK_UDATA;
93
94 dispatch_once(&user_symb_once, ^(void) {
95 user_symb = CSSymbolicatorCreateWithTask(mach_task_self());
96 T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL);
97 T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL);
98 });
99 symb = user_symb;
100 }
101
102 ktrace_events_single(s, hdr_debugid, ^(struct trace_point *tp) {
103 if (tid != 0 && tid != tp->threadid) {
104 return;
105 }
106
107 T_LOG("found %s stack from thread %#" PRIx64, kern ? "kernel" : "user",
108 tp->threadid);
109 stacks++;
110 if (!(tp->arg1 & 1)) {
111 T_FAIL("invalid %s stack on thread %#" PRIx64,
112 kern ? "kernel" : "user", tp->threadid);
113 return;
114 }
115
116 hdr_frames = (unsigned int)tp->arg2;
117 /* ignore extra link register or value pointed to by stack pointer */
118 hdr_frames -= 1;
119
120 T_QUIET; T_EXPECT_GE(hdr_frames, bt_len,
121 "at least %u frames in header", bt_len);
122 T_QUIET; T_EXPECT_LE(hdr_frames, bt_len + allow_larger,
123 "at most %u + %u frames in header", bt_len, allow_larger);
124 if (hdr_frames > bt_len && allow_larger > 0) {
125 allow_larger = hdr_frames - bt_len;
126 hdr_frames = bt_len;
127 }
128
129 T_LOG("%s stack seen", kern ? "kernel" : "user");
130 frames = 0;
131 });
132
133 ktrace_events_single(s, data_debugid, ^(struct trace_point *tp) {
134 if (tid != 0 && tid != tp->threadid) {
135 return;
136 }
137
138 int i = 0;
139
140 if (frames == 0 && hdr_frames > bt_len) {
141 /* skip frames near the PC */
142 i = (int)allow_larger;
143 allow_larger -= 4;
144 }
145
146 for (; i < 4 && frames < hdr_frames; i++, frames++) {
147 uint64_t addr = (&tp->arg1)[i];
148 CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
149 symb, addr, kCSNow);
150
151 expect_frame(bt, bt_len, symbol, addr, frames, hdr_frames);
152 }
153
154 /* saw the end of the user stack */
155 if (hdr_frames == frames) {
156 *stacks_seen += 1;
157 if (!kern) {
158 ktrace_end(s, 1);
159 }
160 }
161 });
162
163 T_SETUPEND;
164 }
165
166 #define TRIGGERING_DEBUGID (0xfeff0f00)
167
168 /*
169 * These functions must return an int to avoid the function prologue being
170 * hoisted out of the path to the spin (breaking being able to get a good
171 * backtrace).
172 */
173 static int __attribute__((noinline, not_tail_called))
174 recurse_a(dispatch_semaphore_t spinning, unsigned int frames);
175 static int __attribute__((noinline, not_tail_called))
176 recurse_b(dispatch_semaphore_t spinning, unsigned int frames);
177
178 static int __attribute__((noinline, not_tail_called))
recurse_a(dispatch_semaphore_t spinning,unsigned int frames)179 recurse_a(dispatch_semaphore_t spinning, unsigned int frames)
180 {
181 if (frames == 0) {
182 if (spinning) {
183 dispatch_semaphore_signal(spinning);
184 for (;;) {
185 ;
186 }
187 } else {
188 kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0);
189 return 0;
190 }
191 }
192
193 return recurse_b(spinning, frames - 1) + 1;
194 }
195
196 static int __attribute__((noinline, not_tail_called))
recurse_b(dispatch_semaphore_t spinning,unsigned int frames)197 recurse_b(dispatch_semaphore_t spinning, unsigned int frames)
198 {
199 if (frames == 0) {
200 if (spinning) {
201 dispatch_semaphore_signal(spinning);
202 for (;;) {
203 ;
204 }
205 } else {
206 kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0);
207 return 0;
208 }
209 }
210
211 return recurse_a(spinning, frames - 1) + 1;
212 }
213
214 #define USER_FRAMES (12)
215
216 #if defined(__x86_64__)
217
218 #define RECURSE_START_OFFSET (3)
219
220 #else /* defined(__x86_64__) */
221
222 #define RECURSE_START_OFFSET (2)
223
224 #endif /* !defined(__x86_64__) */
225
226 static const char *user_bt[USER_FRAMES] = {
227 #if defined(__x86_64__)
228 /*
229 * x86_64 has an extra "thread_start" frame here.
230 */
231 NULL,
232 #endif /* defined(__x86_64__) */
233 NULL, NULL,
234 "backtrace_thread",
235 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
236 "recurse_a", "recurse_b", "recurse_a",
237 #if !defined(__x86_64__)
238 /*
239 * Pick up the slack to make the number of frames constant.
240 */
241 "recurse_b",
242 #endif /* !defined(__x86_64__) */
243 NULL,
244 };
245
246 #if defined(__arm__)
247
248 #define KERNEL_FRAMES (2)
249 static const char *kernel_bt[KERNEL_FRAMES] = {
250 "unix_syscall", "kdebug_trace64"
251 };
252
253 #elif defined(__arm64__)
254
255 #define KERNEL_FRAMES (4)
256 static const char *kernel_bt[KERNEL_FRAMES] = {
257 "fleh_synchronous", "sleh_synchronous", "unix_syscall", "kdebug_trace64",
258 };
259
260 #elif defined(__x86_64__)
261
262 #define KERNEL_FRAMES (2)
263 static const char *kernel_bt[KERNEL_FRAMES] = {
264 "unix_syscall64", "kdebug_trace64"
265 };
266
267 #else
268 #error "architecture unsupported"
269 #endif /* defined(__arm__) */
270
271 static dispatch_once_t backtrace_once;
272 static dispatch_semaphore_t backtrace_started;
273 static dispatch_semaphore_t backtrace_go;
274
275 /*
276 * Another thread to run with a known backtrace.
277 *
278 * Take a semaphore that will be signalled when the thread is spinning at the
279 * correct frame. If the semaphore is NULL, don't spin and instead make a
280 * kdebug_trace system call, which can trigger a deterministic backtrace itself.
281 */
282 static void *
backtrace_thread(void * arg)283 backtrace_thread(void *arg)
284 {
285 dispatch_semaphore_t notify_spinning;
286 unsigned int calls;
287
288 notify_spinning = (dispatch_semaphore_t)arg;
289
290 dispatch_semaphore_signal(backtrace_started);
291 if (!notify_spinning) {
292 dispatch_semaphore_wait(backtrace_go, DISPATCH_TIME_FOREVER);
293 }
294
295 /*
296 * _pthread_start, backtrace_thread, recurse_a, recurse_b,
297 * ...[, __kdebug_trace64]
298 *
299 * Always make two fewer calls for this frame (backtrace_thread and
300 * _pthread_start).
301 */
302 calls = USER_FRAMES - RECURSE_START_OFFSET - 2;
303 if (notify_spinning) {
304 /*
305 * Spinning doesn't end up calling __kdebug_trace64.
306 */
307 calls -= 1;
308 }
309
310 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
311 calls, RECURSE_START_OFFSET);
312 (void)recurse_a(notify_spinning, calls);
313 return NULL;
314 }
315
316 static uint64_t
create_backtrace_thread(void * (* thread_fn)(void *),dispatch_semaphore_t notify_spinning)317 create_backtrace_thread(void *(*thread_fn)(void *),
318 dispatch_semaphore_t notify_spinning)
319 {
320 pthread_t thread = NULL;
321 uint64_t tid;
322
323 dispatch_once(&backtrace_once, ^{
324 backtrace_started = dispatch_semaphore_create(0);
325 T_QUIET; T_ASSERT_NOTNULL(backtrace_started, NULL);
326
327 if (!notify_spinning) {
328 backtrace_go = dispatch_semaphore_create(0);
329 T_QUIET; T_ASSERT_NOTNULL(backtrace_go, NULL);
330 }
331 });
332
333 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, thread_fn,
334 (void *)notify_spinning), NULL);
335 T_QUIET; T_ASSERT_NOTNULL(thread, "backtrace thread created");
336 dispatch_semaphore_wait(backtrace_started, DISPATCH_TIME_FOREVER);
337
338 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_threadid_np(thread, &tid), NULL);
339 T_QUIET; T_ASSERT_NE(tid, UINT64_C(0),
340 "backtrace thread created does not have ID 0");
341
342 T_LOG("starting thread with ID 0x%" PRIx64, tid);
343
344 return tid;
345 }
346
347 static void
start_backtrace_thread(void)348 start_backtrace_thread(void)
349 {
350 T_QUIET; T_ASSERT_NOTNULL(backtrace_go,
351 "thread to backtrace created before starting it");
352 dispatch_semaphore_signal(backtrace_go);
353 }
354
355 #if TARGET_OS_WATCH
356 #define TEST_TIMEOUT_NS (30 * NSEC_PER_SEC)
357 #else /* TARGET_OS_WATCH */
358 #define TEST_TIMEOUT_NS (5 * NSEC_PER_SEC)
359 #endif /* !TARGET_OS_WATCH */
360
361 T_DECL(kperf_stacks_kdebug_trig,
362 "test that backtraces from kdebug trigger are correct",
363 T_META_ASROOT(true))
364 {
365 static unsigned int stacks_seen = 0;
366 ktrace_session_t s;
367 kperf_kdebug_filter_t filter;
368 uint64_t tid;
369
370 start_controlling_ktrace();
371
372 s = ktrace_session_create();
373 T_ASSERT_NOTNULL(s, "ktrace session was created");
374
375 ktrace_set_collection_interval(s, 100);
376
377 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
378
379 tid = create_backtrace_thread(backtrace_thread, NULL);
380 expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES, 0);
381 expect_backtrace(s, tid, &stacks_seen, true, kernel_bt, KERNEL_FRAMES, 4);
382
383 /*
384 * The triggering event must be traced (and thus registered with libktrace)
385 * to get backtraces.
386 */
387 ktrace_events_single(s, TRIGGERING_DEBUGID,
388 ^(__unused struct trace_point *tp){ });
389
390 ktrace_set_completion_handler(s, ^(void) {
391 T_EXPECT_GE(stacks_seen, 2U, "saw both kernel and user stacks");
392 ktrace_session_destroy(s);
393 kperf_reset();
394 T_END;
395 });
396
397 filter = kperf_kdebug_filter_create();
398 T_ASSERT_NOTNULL(filter, "kperf kdebug filter was created");
399
400 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_debugid(filter,
401 TRIGGERING_DEBUGID), NULL);
402 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL);
403 (void)kperf_action_count_set(1);
404 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
405 KPERF_SAMPLER_USTACK | KPERF_SAMPLER_KSTACK), NULL);
406 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL);
407 kperf_kdebug_filter_destroy(filter);
408
409 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
410
411 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
412
413 start_backtrace_thread();
414
415 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
416 dispatch_get_main_queue(), ^(void)
417 {
418 T_LOG("ending test after timeout");
419 ktrace_end(s, 0);
420 });
421
422 dispatch_main();
423 }
424
425 T_DECL(kperf_ustack_timer,
426 "test that user backtraces on a timer are correct",
427 T_META_ASROOT(true))
428 {
429 static unsigned int stacks_seen = 0;
430 ktrace_session_t s;
431 uint64_t tid;
432 dispatch_semaphore_t wait_for_spinning = dispatch_semaphore_create(0);
433
434 start_controlling_ktrace();
435
436 s = ktrace_session_create();
437 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
438
439 ktrace_set_collection_interval(s, 100);
440
441 ktrace_filter_pid(s, getpid());
442
443 configure_kperf_stacks_timer(getpid(), 10, false);
444
445 tid = create_backtrace_thread(backtrace_thread, wait_for_spinning);
446 /* potentially calling dispatch function and system call */
447 expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES - 1, 2);
448
449 ktrace_set_completion_handler(s, ^(void) {
450 T_EXPECT_GE(stacks_seen, 1U, "saw at least one stack");
451 ktrace_session_destroy(s);
452 kperf_reset();
453 T_END;
454 });
455
456 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
457
458 /* wait until the thread that will be backtraced is spinning */
459 dispatch_semaphore_wait(wait_for_spinning, DISPATCH_TIME_FOREVER);
460
461 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
462
463 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
464 dispatch_get_main_queue(), ^(void)
465 {
466 T_LOG("ending test after timeout");
467 ktrace_end(s, 0);
468 });
469
470 dispatch_main();
471 }
472
473 static volatile bool spin = true;
474
475 __attribute__((noinline, not_tail_called))
476 static void
recurse_spin(dispatch_semaphore_t notify_sema,int depth)477 recurse_spin(dispatch_semaphore_t notify_sema, int depth)
478 {
479 if (depth > 0) {
480 recurse_spin(notify_sema, depth - 1);
481 } else {
482 dispatch_semaphore_signal(notify_sema);
483 while (spin);
484 }
485 }
486
487 static void *
spin_thread(void * arg)488 spin_thread(void *arg)
489 {
490 dispatch_semaphore_t notify_sema = arg;
491 dispatch_semaphore_signal(backtrace_started);
492 recurse_spin(notify_sema, 257);
493 return NULL;
494 }
495
496 T_DECL(kperf_ustack_trunc, "ensure stacks are marked as truncated")
497 {
498 start_controlling_ktrace();
499
500 ktrace_session_t s = ktrace_session_create();
501 T_ASSERT_NOTNULL(s, "ktrace session was created");
502
503 ktrace_set_collection_interval(s, 100);
504
505 T_QUIET;
506 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
507
508 configure_kperf_stacks_timer(getpid(), 10, false);
509
510 __block bool saw_stack = false;
511 ktrace_set_completion_handler(s, ^{
512 T_EXPECT_TRUE(saw_stack, "saw the user stack");
513 T_END;
514 });
515
516 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
517 uint64_t tid = create_backtrace_thread(spin_thread, notify_sema);
518
519 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
520 if (tp->threadid != tid) {
521 return;
522 }
523 T_LOG("found %llu frame stack", tp->arg2);
524 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_VALID,
525 "found valid callstack");
526 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_TRUNCATED,
527 "found truncated callstack");
528 saw_stack = true;
529 ktrace_end(s, 1);
530 });
531
532 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
533
534 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
535 "start tracing");
536
537 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
538 dispatch_get_main_queue(), ^(void)
539 {
540 T_LOG("ending test after timeout");
541 ktrace_end(s, 0);
542 });
543
544 dispatch_main();
545 }
546
547 T_DECL(kperf_ustack_maxlen, "ensure stacks up to 256 frames can be captured")
548 {
549 start_controlling_ktrace();
550
551 ktrace_session_t s = ktrace_session_create();
552 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace session was created");
553
554 ktrace_set_collection_interval(s, 100);
555
556 T_QUIET;
557 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
558
559 configure_kperf_stacks_timer(getpid(), 10, false);
560
561 __block bool saw_stack = false;
562 __block bool saw_stack_data = false;
563 __block uint64_t nevents = 0;
564 ktrace_set_completion_handler(s, ^{
565 T_EXPECT_TRUE(saw_stack, "saw the user stack");
566 T_LOG("saw %" PRIu64 " stack data events", nevents);
567 T_EXPECT_TRUE(saw_stack_data, "saw all frames of the user stack");
568 T_END;
569 });
570
571 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
572 uint64_t tid = create_backtrace_thread(spin_thread, notify_sema);
573
574 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
575 if (tp->threadid != tid) {
576 return;
577 }
578 T_LOG("found %llu frame stack", tp->arg2);
579 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_VALID,
580 "found valid callstack");
581 T_EXPECT_EQ(tp->arg2, UINT64_C(256),
582 "found the correct number of frames");
583 saw_stack = true;
584 });
585
586 ktrace_events_single(s, PERF_STK_UDATA, ^(struct trace_point *tp) {
587 if (tp->threadid != tid && !saw_stack) {
588 return;
589 }
590 nevents++;
591 if (nevents == 256 / 4) {
592 ktrace_end(s, 1);
593 }
594 saw_stack_data = true;
595 });
596
597 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
598
599 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
600 "start tracing");
601
602 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
603 dispatch_get_main_queue(), ^(void)
604 {
605 T_LOG("ending test after timeout");
606 ktrace_end(s, 0);
607 });
608
609 dispatch_main();
610 }
611
612 T_DECL(kperf_ustack_mostly_valid,
613 "ensure most stacks captured by kperf are valid")
614 {
615 start_controlling_ktrace();
616
617 ktrace_session_t s = ktrace_session_create();
618 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace session was created");
619
620 ktrace_set_collection_interval(s, 100);
621 configure_kperf_stacks_timer(-1, 10, false);
622 int set = 1;
623 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kperf.lightweight_pet", NULL, NULL,
624 &set, sizeof(set)), NULL);
625
626 __block uint64_t stacks_seen = 0;
627 __block uint64_t valid_stacks_seen = 0;
628
629 ktrace_set_completion_handler(s, ^{
630 double valid_density = (double)valid_stacks_seen / (double)stacks_seen;
631 T_LOG("saw %" PRIu64 " stack header events, %" PRIu64 " valid (%g%%)",
632 stacks_seen, valid_stacks_seen, valid_density * 100.);
633 T_EXPECT_GT(valid_density, 0.98, "more than 98%% of stacks are valid");
634 T_END;
635 });
636
637 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
638 (void)create_backtrace_thread(spin_thread, notify_sema);
639
640 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
641 stacks_seen += 1;
642 if (tp->arg1 & CALLSTACK_VALID && tp->arg2 > 0) {
643 valid_stacks_seen += 1;
644 }
645 });
646
647 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
648
649 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
650 "start tracing");
651
652 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
653 dispatch_get_main_queue(), ^(void)
654 {
655 T_LOG("ending test after timeout");
656 ktrace_end(s, 0);
657 });
658
659 dispatch_main();
660 }
661
662 /* TODO test kernel stacks in all modes */
663 /* TODO legacy PET mode backtracing */
664