1 // Copyright (c) 2018-2019 Apple Inc. All rights reserved.
2
3 #include <CoreSymbolication/CoreSymbolication.h>
4 #include <darwintest.h>
5 #include <dispatch/dispatch.h>
6 #include <kperf/kperf.h>
7 #include <ktrace/session.h>
8 #include <ktrace/private.h>
9 #include <System/sys/kdebug.h>
10 #include <pthread.h>
11
12 #include "kperf_helpers.h"
13 #include "ktrace_helpers.h"
14 #include "ktrace_meta.h"
15
16 #define CALLSTACK_VALID 0x1
17 #define CALLSTACK_TRUNCATED 0x10
18
19 T_GLOBAL_META(T_META_TAG_VM_PREFERRED);
20
21 static void
expect_frame(const char ** bt,unsigned int bt_len,CSSymbolRef symbol,uint64_t addr,unsigned int bt_idx,unsigned int max_frames)22 expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol,
23 uint64_t addr, unsigned int bt_idx, unsigned int max_frames)
24 {
25 const char *name;
26 unsigned int frame_idx = max_frames - bt_idx - 1;
27
28 if (!bt[frame_idx]) {
29 T_LOG("frame %2u: skipping system frame '%s'", frame_idx,
30 CSSymbolGetName(symbol));
31 return;
32 }
33
34 T_LOG("checking frame %d: %llx", bt_idx, addr);
35 if (CSIsNull(symbol)) {
36 T_FAIL("invalid return address symbol");
37 return;
38 }
39
40 if (frame_idx >= bt_len) {
41 T_FAIL("unexpected frame '%s' (%#" PRIx64 ") at index %u",
42 CSSymbolGetName(symbol), addr, frame_idx);
43 return;
44 }
45
46 name = CSSymbolGetName(symbol);
47 T_QUIET; T_ASSERT_NOTNULL(name, NULL);
48 T_EXPECT_EQ_STR(name, bt[frame_idx],
49 "frame %2u: saw '%s', expected '%s'",
50 frame_idx, name, bt[frame_idx]);
51 }
52
53 /*
54 * Expect to see either user or kernel stacks on thread with ID `tid` with a
55 * signature of `bt` of length `bt_len`. Updates `stacks_seen` when stack
56 * is found.
57 *
58 * Can also allow stacks to be larger than the signature -- additional frames
59 * near the current PC will be ignored. This allows stacks to potentially be
60 * in the middle of a signalling system call (which signals that it is safe to
61 * start sampling).
62 */
63 static void
expect_backtrace(ktrace_session_t s,uint64_t tid,unsigned int * stacks_seen,bool kern,const char ** bt,unsigned int bt_len,unsigned int allow_larger_by)64 expect_backtrace(ktrace_session_t s, uint64_t tid, unsigned int *stacks_seen,
65 bool kern, const char **bt, unsigned int bt_len, unsigned int allow_larger_by)
66 {
67 CSSymbolicatorRef symb;
68 uint32_t hdr_debugid;
69 uint32_t data_debugid;
70 __block unsigned int stacks = 0;
71 __block unsigned int frames = 0;
72 __block unsigned int hdr_frames = 0;
73 __block unsigned int allow_larger = allow_larger_by;
74
75 T_SETUPBEGIN;
76
77 if (kern) {
78 static CSSymbolicatorRef kern_symb;
79 static dispatch_once_t kern_symb_once;
80
81 hdr_debugid = PERF_STK_KHDR;
82 data_debugid = PERF_STK_KDATA;
83
84 dispatch_once(&kern_symb_once, ^(void) {
85 kern_symb = CSSymbolicatorCreateWithMachKernel();
86 T_QUIET; T_ASSERT_FALSE(CSIsNull(kern_symb), NULL);
87 });
88 symb = kern_symb;
89 } else {
90 static CSSymbolicatorRef user_symb;
91 static dispatch_once_t user_symb_once;
92
93 hdr_debugid = PERF_STK_UHDR;
94 data_debugid = PERF_STK_UDATA;
95
96 dispatch_once(&user_symb_once, ^(void) {
97 user_symb = CSSymbolicatorCreateWithTask(mach_task_self());
98 T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL);
99 T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL);
100 });
101 symb = user_symb;
102 }
103
104 ktrace_events_single(s, hdr_debugid, ^(struct trace_point *tp) {
105 if (tid != 0 && tid != tp->threadid) {
106 return;
107 }
108
109 T_LOG("found %s stack from thread %#" PRIx64, kern ? "kernel" : "user",
110 tp->threadid);
111 stacks++;
112 if (!(tp->arg1 & 1)) {
113 T_FAIL("invalid %s stack on thread %#" PRIx64,
114 kern ? "kernel" : "user", tp->threadid);
115 return;
116 }
117
118 hdr_frames = (unsigned int)tp->arg2;
119 /* ignore extra link register or value pointed to by stack pointer */
120 hdr_frames -= 1;
121
122 T_QUIET; T_EXPECT_GE(hdr_frames, bt_len,
123 "at least %u frames in header", bt_len);
124 T_QUIET; T_EXPECT_LE(hdr_frames, bt_len + allow_larger,
125 "at most %u + %u frames in header", bt_len, allow_larger);
126 if (hdr_frames > bt_len && allow_larger > 0) {
127 allow_larger = hdr_frames - bt_len;
128 hdr_frames = bt_len;
129 }
130
131 T_LOG("%s stack seen", kern ? "kernel" : "user");
132 frames = 0;
133 });
134
135 ktrace_events_single(s, data_debugid, ^(struct trace_point *tp) {
136 if (tid != 0 && tid != tp->threadid) {
137 return;
138 }
139
140 int i = 0;
141
142 if (frames == 0 && hdr_frames > bt_len) {
143 /* skip frames near the PC */
144 i = (int)allow_larger;
145 allow_larger -= 4;
146 }
147
148 for (; i < 4 && frames < hdr_frames; i++, frames++) {
149 uint64_t addr = (&tp->arg1)[i];
150 CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
151 symb, addr, kCSNow);
152
153 expect_frame(bt, bt_len, symbol, addr, frames, hdr_frames);
154 }
155
156 /* saw the end of the user stack */
157 if (hdr_frames == frames) {
158 *stacks_seen += 1;
159 if (!kern) {
160 ktrace_end(s, 1);
161 }
162 }
163 });
164
165 T_SETUPEND;
166 }
167
168 #define TRIGGERING_DEBUGID (0xfeff0f00)
169
170 /*
171 * These functions must return an int to avoid the function prologue being
172 * hoisted out of the path to the spin (breaking being able to get a good
173 * backtrace).
174 */
175 static int __attribute__((noinline, not_tail_called))
176 recurse_a(dispatch_semaphore_t spinning, unsigned int frames);
177 static int __attribute__((noinline, not_tail_called))
178 recurse_b(dispatch_semaphore_t spinning, unsigned int frames);
179
180 static int __attribute__((noinline, not_tail_called))
recurse_a(dispatch_semaphore_t spinning,unsigned int frames)181 recurse_a(dispatch_semaphore_t spinning, unsigned int frames)
182 {
183 if (frames == 0) {
184 if (spinning) {
185 dispatch_semaphore_signal(spinning);
186 for (;;) {
187 ;
188 }
189 } else {
190 kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0);
191 return 0;
192 }
193 }
194
195 return recurse_b(spinning, frames - 1) + 1;
196 }
197
198 static int __attribute__((noinline, not_tail_called))
recurse_b(dispatch_semaphore_t spinning,unsigned int frames)199 recurse_b(dispatch_semaphore_t spinning, unsigned int frames)
200 {
201 if (frames == 0) {
202 if (spinning) {
203 dispatch_semaphore_signal(spinning);
204 for (;;) {
205 ;
206 }
207 } else {
208 kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0);
209 return 0;
210 }
211 }
212
213 return recurse_a(spinning, frames - 1) + 1;
214 }
215
216 #define USER_FRAMES (12)
217
218 #if defined(__x86_64__)
219
220 #define RECURSE_START_OFFSET (3)
221
222 #else /* defined(__x86_64__) */
223
224 #define RECURSE_START_OFFSET (2)
225
226 #endif /* !defined(__x86_64__) */
227
228 static const char *user_bt[USER_FRAMES] = {
229 #if defined(__x86_64__)
230 /*
231 * x86_64 has an extra "thread_start" frame here.
232 */
233 NULL,
234 #endif /* defined(__x86_64__) */
235 NULL, NULL,
236 "backtrace_thread",
237 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
238 "recurse_a", "recurse_b", "recurse_a",
239 #if !defined(__x86_64__)
240 /*
241 * Pick up the slack to make the number of frames constant.
242 */
243 "recurse_b",
244 #endif /* !defined(__x86_64__) */
245 NULL,
246 };
247
248 #if defined(__arm64__)
249
250 #define KERNEL_FRAMES (4)
251 static const char *kernel_bt[KERNEL_FRAMES] = {
252 "fleh_synchronous", "sleh_synchronous", "unix_syscall", "kdebug_trace64",
253 };
254
255 #elif defined(__x86_64__)
256
257 #define KERNEL_FRAMES (2)
258 static const char *kernel_bt[KERNEL_FRAMES] = {
259 "unix_syscall64", "kdebug_trace64"
260 };
261
262 #else
263 #error "architecture unsupported"
264 #endif /* defined(__arm64__) */
265
266 static dispatch_once_t backtrace_once;
267 static dispatch_semaphore_t backtrace_started;
268 static dispatch_semaphore_t backtrace_go;
269
270 /*
271 * Another thread to run with a known backtrace.
272 *
273 * Take a semaphore that will be signalled when the thread is spinning at the
274 * correct frame. If the semaphore is NULL, don't spin and instead make a
275 * kdebug_trace system call, which can trigger a deterministic backtrace itself.
276 */
277 static void *
backtrace_thread(void * arg)278 backtrace_thread(void *arg)
279 {
280 dispatch_semaphore_t notify_spinning;
281 unsigned int calls;
282
283 notify_spinning = (dispatch_semaphore_t)arg;
284
285 dispatch_semaphore_signal(backtrace_started);
286 if (!notify_spinning) {
287 dispatch_semaphore_wait(backtrace_go, DISPATCH_TIME_FOREVER);
288 }
289
290 /*
291 * _pthread_start, backtrace_thread, recurse_a, recurse_b,
292 * ...[, __kdebug_trace64]
293 *
294 * Always make two fewer calls for this frame (backtrace_thread and
295 * _pthread_start).
296 */
297 calls = USER_FRAMES - RECURSE_START_OFFSET - 2;
298 if (notify_spinning) {
299 /*
300 * Spinning doesn't end up calling __kdebug_trace64.
301 */
302 calls -= 1;
303 }
304
305 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
306 calls, RECURSE_START_OFFSET);
307 (void)recurse_a(notify_spinning, calls);
308 return NULL;
309 }
310
311 static uint64_t
create_backtrace_thread(void * (* thread_fn)(void *),dispatch_semaphore_t notify_spinning)312 create_backtrace_thread(void *(*thread_fn)(void *),
313 dispatch_semaphore_t notify_spinning)
314 {
315 pthread_t thread = NULL;
316 uint64_t tid;
317
318 dispatch_once(&backtrace_once, ^{
319 backtrace_started = dispatch_semaphore_create(0);
320 T_QUIET; T_ASSERT_NOTNULL(backtrace_started, NULL);
321
322 if (!notify_spinning) {
323 backtrace_go = dispatch_semaphore_create(0);
324 T_QUIET; T_ASSERT_NOTNULL(backtrace_go, NULL);
325 }
326 });
327
328 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, thread_fn,
329 (void *)notify_spinning), NULL);
330 T_QUIET; T_ASSERT_NOTNULL(thread, "backtrace thread created");
331 dispatch_semaphore_wait(backtrace_started, DISPATCH_TIME_FOREVER);
332
333 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_threadid_np(thread, &tid), NULL);
334 T_QUIET; T_ASSERT_NE(tid, UINT64_C(0),
335 "backtrace thread created does not have ID 0");
336
337 T_LOG("starting thread with ID 0x%" PRIx64, tid);
338
339 return tid;
340 }
341
342 static void
start_backtrace_thread(void)343 start_backtrace_thread(void)
344 {
345 T_QUIET; T_ASSERT_NOTNULL(backtrace_go,
346 "thread to backtrace created before starting it");
347 dispatch_semaphore_signal(backtrace_go);
348 }
349
350 #if TARGET_OS_WATCH
351 #define TEST_TIMEOUT_NS (30 * NSEC_PER_SEC)
352 #else /* TARGET_OS_WATCH */
353 #define TEST_TIMEOUT_NS (5 * NSEC_PER_SEC)
354 #endif /* !TARGET_OS_WATCH */
355
356 T_DECL(kperf_stacks_kdebug_trig,
357 "test that backtraces from kdebug trigger are correct",
358 T_META_ASROOT(true))
359 {
360 static unsigned int stacks_seen = 0;
361 ktrace_session_t s;
362 kperf_kdebug_filter_t filter;
363 uint64_t tid;
364
365 start_controlling_ktrace();
366
367 s = ktrace_session_create();
368 T_ASSERT_NOTNULL(s, "ktrace session was created");
369
370 ktrace_set_collection_interval(s, 100);
371
372 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
373
374 tid = create_backtrace_thread(backtrace_thread, NULL);
375 expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES, 0);
376 expect_backtrace(s, tid, &stacks_seen, true, kernel_bt, KERNEL_FRAMES, 4);
377
378 /*
379 * The triggering event must be traced (and thus registered with libktrace)
380 * to get backtraces.
381 */
382 ktrace_events_single(s, TRIGGERING_DEBUGID,
383 ^(__unused struct trace_point *tp){ });
384
385 ktrace_set_completion_handler(s, ^(void) {
386 T_EXPECT_GE(stacks_seen, 2U, "saw both kernel and user stacks");
387 ktrace_session_destroy(s);
388 kperf_reset();
389 T_END;
390 });
391
392 filter = kperf_kdebug_filter_create();
393 T_ASSERT_NOTNULL(filter, "kperf kdebug filter was created");
394
395 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_debugid(filter,
396 TRIGGERING_DEBUGID), NULL);
397 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL);
398 (void)kperf_action_count_set(1);
399 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
400 KPERF_SAMPLER_USTACK | KPERF_SAMPLER_KSTACK), NULL);
401 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL);
402 kperf_kdebug_filter_destroy(filter);
403
404 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
405
406 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
407
408 start_backtrace_thread();
409
410 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
411 dispatch_get_main_queue(), ^(void)
412 {
413 T_LOG("ending test after timeout");
414 ktrace_end(s, 0);
415 });
416
417 dispatch_main();
418 }
419
420 T_DECL(kperf_ustack_timer,
421 "test that user backtraces on a timer are correct",
422 T_META_ASROOT(true))
423 {
424 static unsigned int stacks_seen = 0;
425 ktrace_session_t s;
426 uint64_t tid;
427 dispatch_semaphore_t wait_for_spinning = dispatch_semaphore_create(0);
428
429 start_controlling_ktrace();
430
431 s = ktrace_session_create();
432 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
433
434 ktrace_set_collection_interval(s, 100);
435
436 ktrace_filter_pid(s, getpid());
437
438 configure_kperf_stacks_timer(getpid(), 10, false);
439
440 tid = create_backtrace_thread(backtrace_thread, wait_for_spinning);
441 /* potentially calling dispatch function and system call */
442 expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES - 1, 2);
443
444 ktrace_set_completion_handler(s, ^(void) {
445 T_EXPECT_GE(stacks_seen, 1U, "saw at least one stack");
446 ktrace_session_destroy(s);
447 kperf_reset();
448 T_END;
449 });
450
451 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
452
453 /* wait until the thread that will be backtraced is spinning */
454 dispatch_semaphore_wait(wait_for_spinning, DISPATCH_TIME_FOREVER);
455
456 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
457
458 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
459 dispatch_get_main_queue(), ^(void)
460 {
461 T_LOG("ending test after timeout");
462 ktrace_end(s, 0);
463 });
464
465 dispatch_main();
466 }
467
468 static volatile bool spin = true;
469
470 __attribute__((noinline, not_tail_called))
471 static void
recurse_spin(dispatch_semaphore_t notify_sema,int depth)472 recurse_spin(dispatch_semaphore_t notify_sema, int depth)
473 {
474 if (depth > 0) {
475 recurse_spin(notify_sema, depth - 1);
476 } else {
477 dispatch_semaphore_signal(notify_sema);
478 while (spin);
479 }
480 }
481
482 static void *
spin_thread(void * arg)483 spin_thread(void *arg)
484 {
485 dispatch_semaphore_t notify_sema = arg;
486 dispatch_semaphore_signal(backtrace_started);
487 recurse_spin(notify_sema, 257);
488 return NULL;
489 }
490
491 T_DECL(kperf_ustack_trunc, "ensure stacks are marked as truncated")
492 {
493 start_controlling_ktrace();
494
495 ktrace_session_t s = ktrace_session_create();
496 T_ASSERT_NOTNULL(s, "ktrace session was created");
497
498 ktrace_set_collection_interval(s, 100);
499
500 T_QUIET;
501 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
502
503 configure_kperf_stacks_timer(getpid(), 10, false);
504
505 __block bool saw_stack = false;
506 ktrace_set_completion_handler(s, ^{
507 T_EXPECT_TRUE(saw_stack, "saw the user stack");
508 T_END;
509 });
510
511 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
512 uint64_t tid = create_backtrace_thread(spin_thread, notify_sema);
513
514 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
515 if (tp->threadid != tid) {
516 return;
517 }
518 T_LOG("found %llu frame stack", tp->arg2);
519 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_VALID,
520 "found valid callstack");
521 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_TRUNCATED,
522 "found truncated callstack");
523 saw_stack = true;
524 ktrace_end(s, 1);
525 });
526
527 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
528
529 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
530 "start tracing");
531
532 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
533 dispatch_get_main_queue(), ^(void)
534 {
535 T_LOG("ending test after timeout");
536 ktrace_end(s, 0);
537 });
538
539 dispatch_main();
540 }
541
542 T_DECL(kperf_ustack_maxlen, "ensure stacks up to 256 frames can be captured")
543 {
544 start_controlling_ktrace();
545
546 ktrace_session_t s = ktrace_session_create();
547 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace session was created");
548
549 ktrace_set_collection_interval(s, 100);
550
551 T_QUIET;
552 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
553
554 configure_kperf_stacks_timer(getpid(), 10, false);
555
556 __block bool saw_stack = false;
557 __block bool saw_stack_data = false;
558 __block uint64_t nevents = 0;
559 ktrace_set_completion_handler(s, ^{
560 T_EXPECT_TRUE(saw_stack, "saw the user stack");
561 T_LOG("saw %" PRIu64 " stack data events", nevents);
562 T_EXPECT_TRUE(saw_stack_data, "saw all frames of the user stack");
563 T_END;
564 });
565
566 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
567 uint64_t tid = create_backtrace_thread(spin_thread, notify_sema);
568
569 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
570 if (tp->threadid != tid) {
571 return;
572 }
573 T_LOG("found %llu frame stack", tp->arg2);
574 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_VALID,
575 "found valid callstack");
576 T_EXPECT_EQ(tp->arg2, UINT64_C(256),
577 "found the correct number of frames");
578 saw_stack = true;
579 });
580
581 ktrace_events_single(s, PERF_STK_UDATA, ^(struct trace_point *tp) {
582 if (tp->threadid != tid && !saw_stack) {
583 return;
584 }
585 nevents++;
586 if (nevents == 256 / 4) {
587 ktrace_end(s, 1);
588 }
589 saw_stack_data = true;
590 });
591
592 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
593
594 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
595 "start tracing");
596
597 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
598 dispatch_get_main_queue(), ^(void)
599 {
600 T_LOG("ending test after timeout");
601 ktrace_end(s, 0);
602 });
603
604 dispatch_main();
605 }
606
607 T_DECL(kperf_ustack_mostly_valid,
608 "ensure most stacks captured by kperf are valid")
609 {
610 start_controlling_ktrace();
611
612 ktrace_session_t s = ktrace_session_create();
613 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace session was created");
614
615 ktrace_set_collection_interval(s, 100);
616 configure_kperf_stacks_timer(-1, 10, false);
617 int set = 1;
618 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kperf.lightweight_pet", NULL, NULL,
619 &set, sizeof(set)), NULL);
620
621 __block uint64_t stacks_seen = 0;
622 __block uint64_t valid_stacks_seen = 0;
623
624 ktrace_set_completion_handler(s, ^{
625 double valid_density = (double)valid_stacks_seen / (double)stacks_seen;
626 T_LOG("saw %" PRIu64 " stack header events, %" PRIu64 " valid (%g%%)",
627 stacks_seen, valid_stacks_seen, valid_density * 100.);
628 T_EXPECT_GT(valid_density, 0.98, "more than 98%% of stacks are valid");
629 T_END;
630 });
631
632 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
633 (void)create_backtrace_thread(spin_thread, notify_sema);
634
635 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
636 stacks_seen += 1;
637 if (tp->arg1 & CALLSTACK_VALID && tp->arg2 > 0) {
638 valid_stacks_seen += 1;
639 }
640 });
641
642 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
643
644 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
645 "start tracing");
646
647 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
648 dispatch_get_main_queue(), ^(void)
649 {
650 T_LOG("ending test after timeout");
651 ktrace_end(s, 0);
652 });
653
654 dispatch_main();
655 }
656
657 /* TODO test kernel stacks in all modes */
658 /* TODO legacy PET mode backtracing */
659