1 // Copyright (c) 2018-2019 Apple Inc. All rights reserved.
2
3 #include <CoreSymbolication/CoreSymbolication.h>
4 #include <darwintest.h>
5 #include <dispatch/dispatch.h>
6 #include <kperf/kperf.h>
7 #include <ktrace/session.h>
8 #include <ktrace/private.h>
9 #include <System/sys/kdebug.h>
10 #include <pthread.h>
11
12 #include "kperf_helpers.h"
13 #include "ktrace_helpers.h"
14 #include "ktrace_meta.h"
15
16 #define CALLSTACK_VALID 0x1
17 #define CALLSTACK_TRUNCATED 0x10
18
19 static void
expect_frame(const char ** bt,unsigned int bt_len,CSSymbolRef symbol,uint64_t addr,unsigned int bt_idx,unsigned int max_frames)20 expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol,
21 uint64_t addr, unsigned int bt_idx, unsigned int max_frames)
22 {
23 const char *name;
24 unsigned int frame_idx = max_frames - bt_idx - 1;
25
26 if (!bt[frame_idx]) {
27 T_LOG("frame %2u: skipping system frame '%s'", frame_idx,
28 CSSymbolGetName(symbol));
29 return;
30 }
31
32 T_LOG("checking frame %d: %llx", bt_idx, addr);
33 if (CSIsNull(symbol)) {
34 T_FAIL("invalid return address symbol");
35 return;
36 }
37
38 if (frame_idx >= bt_len) {
39 T_FAIL("unexpected frame '%s' (%#" PRIx64 ") at index %u",
40 CSSymbolGetName(symbol), addr, frame_idx);
41 return;
42 }
43
44 name = CSSymbolGetName(symbol);
45 T_QUIET; T_ASSERT_NOTNULL(name, NULL);
46 T_EXPECT_EQ_STR(name, bt[frame_idx],
47 "frame %2u: saw '%s', expected '%s'",
48 frame_idx, name, bt[frame_idx]);
49 }
50
51 /*
52 * Expect to see either user or kernel stacks on thread with ID `tid` with a
53 * signature of `bt` of length `bt_len`. Updates `stacks_seen` when stack
54 * is found.
55 *
56 * Can also allow stacks to be larger than the signature -- additional frames
57 * near the current PC will be ignored. This allows stacks to potentially be
58 * in the middle of a signalling system call (which signals that it is safe to
59 * start sampling).
60 */
61 static void
expect_backtrace(ktrace_session_t s,uint64_t tid,unsigned int * stacks_seen,bool kern,const char ** bt,unsigned int bt_len,unsigned int allow_larger_by)62 expect_backtrace(ktrace_session_t s, uint64_t tid, unsigned int *stacks_seen,
63 bool kern, const char **bt, unsigned int bt_len, unsigned int allow_larger_by)
64 {
65 CSSymbolicatorRef symb;
66 uint32_t hdr_debugid;
67 uint32_t data_debugid;
68 __block unsigned int stacks = 0;
69 __block unsigned int frames = 0;
70 __block unsigned int hdr_frames = 0;
71 __block unsigned int allow_larger = allow_larger_by;
72
73 T_SETUPBEGIN;
74
75 if (kern) {
76 static CSSymbolicatorRef kern_symb;
77 static dispatch_once_t kern_symb_once;
78
79 hdr_debugid = PERF_STK_KHDR;
80 data_debugid = PERF_STK_KDATA;
81
82 dispatch_once(&kern_symb_once, ^(void) {
83 kern_symb = CSSymbolicatorCreateWithMachKernel();
84 T_QUIET; T_ASSERT_FALSE(CSIsNull(kern_symb), NULL);
85 });
86 symb = kern_symb;
87 } else {
88 static CSSymbolicatorRef user_symb;
89 static dispatch_once_t user_symb_once;
90
91 hdr_debugid = PERF_STK_UHDR;
92 data_debugid = PERF_STK_UDATA;
93
94 dispatch_once(&user_symb_once, ^(void) {
95 user_symb = CSSymbolicatorCreateWithTask(mach_task_self());
96 T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL);
97 T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL);
98 });
99 symb = user_symb;
100 }
101
102 ktrace_events_single(s, hdr_debugid, ^(struct trace_point *tp) {
103 if (tid != 0 && tid != tp->threadid) {
104 return;
105 }
106
107 T_LOG("found %s stack from thread %#" PRIx64, kern ? "kernel" : "user",
108 tp->threadid);
109 stacks++;
110 if (!(tp->arg1 & 1)) {
111 T_FAIL("invalid %s stack on thread %#" PRIx64,
112 kern ? "kernel" : "user", tp->threadid);
113 return;
114 }
115
116 hdr_frames = (unsigned int)tp->arg2;
117 /* ignore extra link register or value pointed to by stack pointer */
118 hdr_frames -= 1;
119
120 T_QUIET; T_EXPECT_GE(hdr_frames, bt_len,
121 "at least %u frames in header", bt_len);
122 T_QUIET; T_EXPECT_LE(hdr_frames, bt_len + allow_larger,
123 "at most %u + %u frames in header", bt_len, allow_larger);
124 if (hdr_frames > bt_len && allow_larger > 0) {
125 allow_larger = hdr_frames - bt_len;
126 hdr_frames = bt_len;
127 }
128
129 T_LOG("%s stack seen", kern ? "kernel" : "user");
130 frames = 0;
131 });
132
133 ktrace_events_single(s, data_debugid, ^(struct trace_point *tp) {
134 if (tid != 0 && tid != tp->threadid) {
135 return;
136 }
137
138 int i = 0;
139
140 if (frames == 0 && hdr_frames > bt_len) {
141 /* skip frames near the PC */
142 i = (int)allow_larger;
143 allow_larger -= 4;
144 }
145
146 for (; i < 4 && frames < hdr_frames; i++, frames++) {
147 uint64_t addr = (&tp->arg1)[i];
148 CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
149 symb, addr, kCSNow);
150
151 expect_frame(bt, bt_len, symbol, addr, frames, hdr_frames);
152 }
153
154 /* saw the end of the user stack */
155 if (hdr_frames == frames) {
156 *stacks_seen += 1;
157 if (!kern) {
158 ktrace_end(s, 1);
159 }
160 }
161 });
162
163 T_SETUPEND;
164 }
165
166 #define TRIGGERING_DEBUGID (0xfeff0f00)
167
168 /*
169 * These functions must return an int to avoid the function prologue being
170 * hoisted out of the path to the spin (breaking being able to get a good
171 * backtrace).
172 */
173 static int __attribute__((noinline, not_tail_called))
174 recurse_a(dispatch_semaphore_t spinning, unsigned int frames);
175 static int __attribute__((noinline, not_tail_called))
176 recurse_b(dispatch_semaphore_t spinning, unsigned int frames);
177
178 static int __attribute__((noinline, not_tail_called))
recurse_a(dispatch_semaphore_t spinning,unsigned int frames)179 recurse_a(dispatch_semaphore_t spinning, unsigned int frames)
180 {
181 if (frames == 0) {
182 if (spinning) {
183 dispatch_semaphore_signal(spinning);
184 for (;;) {
185 ;
186 }
187 } else {
188 kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0);
189 return 0;
190 }
191 }
192
193 return recurse_b(spinning, frames - 1) + 1;
194 }
195
196 static int __attribute__((noinline, not_tail_called))
recurse_b(dispatch_semaphore_t spinning,unsigned int frames)197 recurse_b(dispatch_semaphore_t spinning, unsigned int frames)
198 {
199 if (frames == 0) {
200 if (spinning) {
201 dispatch_semaphore_signal(spinning);
202 for (;;) {
203 ;
204 }
205 } else {
206 kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0);
207 return 0;
208 }
209 }
210
211 return recurse_a(spinning, frames - 1) + 1;
212 }
213
214 #define USER_FRAMES (12)
215
216 #if defined(__x86_64__)
217
218 #define RECURSE_START_OFFSET (3)
219
220 #else /* defined(__x86_64__) */
221
222 #define RECURSE_START_OFFSET (2)
223
224 #endif /* !defined(__x86_64__) */
225
226 static const char *user_bt[USER_FRAMES] = {
227 #if defined(__x86_64__)
228 /*
229 * x86_64 has an extra "thread_start" frame here.
230 */
231 NULL,
232 #endif /* defined(__x86_64__) */
233 NULL, NULL,
234 "backtrace_thread",
235 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
236 "recurse_a", "recurse_b", "recurse_a",
237 #if !defined(__x86_64__)
238 /*
239 * Pick up the slack to make the number of frames constant.
240 */
241 "recurse_b",
242 #endif /* !defined(__x86_64__) */
243 NULL,
244 };
245
246 #if defined(__arm64__)
247
248 #define KERNEL_FRAMES (4)
249 static const char *kernel_bt[KERNEL_FRAMES] = {
250 "fleh_synchronous", "sleh_synchronous", "unix_syscall", "kdebug_trace64",
251 };
252
253 #elif defined(__x86_64__)
254
255 #define KERNEL_FRAMES (2)
256 static const char *kernel_bt[KERNEL_FRAMES] = {
257 "unix_syscall64", "kdebug_trace64"
258 };
259
260 #else
261 #error "architecture unsupported"
262 #endif /* defined(__arm64__) */
263
264 static dispatch_once_t backtrace_once;
265 static dispatch_semaphore_t backtrace_started;
266 static dispatch_semaphore_t backtrace_go;
267
268 /*
269 * Another thread to run with a known backtrace.
270 *
271 * Take a semaphore that will be signalled when the thread is spinning at the
272 * correct frame. If the semaphore is NULL, don't spin and instead make a
273 * kdebug_trace system call, which can trigger a deterministic backtrace itself.
274 */
275 static void *
backtrace_thread(void * arg)276 backtrace_thread(void *arg)
277 {
278 dispatch_semaphore_t notify_spinning;
279 unsigned int calls;
280
281 notify_spinning = (dispatch_semaphore_t)arg;
282
283 dispatch_semaphore_signal(backtrace_started);
284 if (!notify_spinning) {
285 dispatch_semaphore_wait(backtrace_go, DISPATCH_TIME_FOREVER);
286 }
287
288 /*
289 * _pthread_start, backtrace_thread, recurse_a, recurse_b,
290 * ...[, __kdebug_trace64]
291 *
292 * Always make two fewer calls for this frame (backtrace_thread and
293 * _pthread_start).
294 */
295 calls = USER_FRAMES - RECURSE_START_OFFSET - 2;
296 if (notify_spinning) {
297 /*
298 * Spinning doesn't end up calling __kdebug_trace64.
299 */
300 calls -= 1;
301 }
302
303 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
304 calls, RECURSE_START_OFFSET);
305 (void)recurse_a(notify_spinning, calls);
306 return NULL;
307 }
308
309 static uint64_t
create_backtrace_thread(void * (* thread_fn)(void *),dispatch_semaphore_t notify_spinning)310 create_backtrace_thread(void *(*thread_fn)(void *),
311 dispatch_semaphore_t notify_spinning)
312 {
313 pthread_t thread = NULL;
314 uint64_t tid;
315
316 dispatch_once(&backtrace_once, ^{
317 backtrace_started = dispatch_semaphore_create(0);
318 T_QUIET; T_ASSERT_NOTNULL(backtrace_started, NULL);
319
320 if (!notify_spinning) {
321 backtrace_go = dispatch_semaphore_create(0);
322 T_QUIET; T_ASSERT_NOTNULL(backtrace_go, NULL);
323 }
324 });
325
326 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, thread_fn,
327 (void *)notify_spinning), NULL);
328 T_QUIET; T_ASSERT_NOTNULL(thread, "backtrace thread created");
329 dispatch_semaphore_wait(backtrace_started, DISPATCH_TIME_FOREVER);
330
331 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_threadid_np(thread, &tid), NULL);
332 T_QUIET; T_ASSERT_NE(tid, UINT64_C(0),
333 "backtrace thread created does not have ID 0");
334
335 T_LOG("starting thread with ID 0x%" PRIx64, tid);
336
337 return tid;
338 }
339
340 static void
start_backtrace_thread(void)341 start_backtrace_thread(void)
342 {
343 T_QUIET; T_ASSERT_NOTNULL(backtrace_go,
344 "thread to backtrace created before starting it");
345 dispatch_semaphore_signal(backtrace_go);
346 }
347
348 #if TARGET_OS_WATCH
349 #define TEST_TIMEOUT_NS (30 * NSEC_PER_SEC)
350 #else /* TARGET_OS_WATCH */
351 #define TEST_TIMEOUT_NS (5 * NSEC_PER_SEC)
352 #endif /* !TARGET_OS_WATCH */
353
354 T_DECL(kperf_stacks_kdebug_trig,
355 "test that backtraces from kdebug trigger are correct",
356 T_META_ASROOT(true))
357 {
358 static unsigned int stacks_seen = 0;
359 ktrace_session_t s;
360 kperf_kdebug_filter_t filter;
361 uint64_t tid;
362
363 start_controlling_ktrace();
364
365 s = ktrace_session_create();
366 T_ASSERT_NOTNULL(s, "ktrace session was created");
367
368 ktrace_set_collection_interval(s, 100);
369
370 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
371
372 tid = create_backtrace_thread(backtrace_thread, NULL);
373 expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES, 0);
374 expect_backtrace(s, tid, &stacks_seen, true, kernel_bt, KERNEL_FRAMES, 4);
375
376 /*
377 * The triggering event must be traced (and thus registered with libktrace)
378 * to get backtraces.
379 */
380 ktrace_events_single(s, TRIGGERING_DEBUGID,
381 ^(__unused struct trace_point *tp){ });
382
383 ktrace_set_completion_handler(s, ^(void) {
384 T_EXPECT_GE(stacks_seen, 2U, "saw both kernel and user stacks");
385 ktrace_session_destroy(s);
386 kperf_reset();
387 T_END;
388 });
389
390 filter = kperf_kdebug_filter_create();
391 T_ASSERT_NOTNULL(filter, "kperf kdebug filter was created");
392
393 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_debugid(filter,
394 TRIGGERING_DEBUGID), NULL);
395 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL);
396 (void)kperf_action_count_set(1);
397 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
398 KPERF_SAMPLER_USTACK | KPERF_SAMPLER_KSTACK), NULL);
399 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL);
400 kperf_kdebug_filter_destroy(filter);
401
402 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
403
404 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
405
406 start_backtrace_thread();
407
408 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
409 dispatch_get_main_queue(), ^(void)
410 {
411 T_LOG("ending test after timeout");
412 ktrace_end(s, 0);
413 });
414
415 dispatch_main();
416 }
417
418 T_DECL(kperf_ustack_timer,
419 "test that user backtraces on a timer are correct",
420 T_META_ASROOT(true))
421 {
422 static unsigned int stacks_seen = 0;
423 ktrace_session_t s;
424 uint64_t tid;
425 dispatch_semaphore_t wait_for_spinning = dispatch_semaphore_create(0);
426
427 start_controlling_ktrace();
428
429 s = ktrace_session_create();
430 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
431
432 ktrace_set_collection_interval(s, 100);
433
434 ktrace_filter_pid(s, getpid());
435
436 configure_kperf_stacks_timer(getpid(), 10, false);
437
438 tid = create_backtrace_thread(backtrace_thread, wait_for_spinning);
439 /* potentially calling dispatch function and system call */
440 expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES - 1, 2);
441
442 ktrace_set_completion_handler(s, ^(void) {
443 T_EXPECT_GE(stacks_seen, 1U, "saw at least one stack");
444 ktrace_session_destroy(s);
445 kperf_reset();
446 T_END;
447 });
448
449 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
450
451 /* wait until the thread that will be backtraced is spinning */
452 dispatch_semaphore_wait(wait_for_spinning, DISPATCH_TIME_FOREVER);
453
454 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
455
456 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
457 dispatch_get_main_queue(), ^(void)
458 {
459 T_LOG("ending test after timeout");
460 ktrace_end(s, 0);
461 });
462
463 dispatch_main();
464 }
465
466 static volatile bool spin = true;
467
468 __attribute__((noinline, not_tail_called))
469 static void
recurse_spin(dispatch_semaphore_t notify_sema,int depth)470 recurse_spin(dispatch_semaphore_t notify_sema, int depth)
471 {
472 if (depth > 0) {
473 recurse_spin(notify_sema, depth - 1);
474 } else {
475 dispatch_semaphore_signal(notify_sema);
476 while (spin);
477 }
478 }
479
480 static void *
spin_thread(void * arg)481 spin_thread(void *arg)
482 {
483 dispatch_semaphore_t notify_sema = arg;
484 dispatch_semaphore_signal(backtrace_started);
485 recurse_spin(notify_sema, 257);
486 return NULL;
487 }
488
489 T_DECL(kperf_ustack_trunc, "ensure stacks are marked as truncated")
490 {
491 start_controlling_ktrace();
492
493 ktrace_session_t s = ktrace_session_create();
494 T_ASSERT_NOTNULL(s, "ktrace session was created");
495
496 ktrace_set_collection_interval(s, 100);
497
498 T_QUIET;
499 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
500
501 configure_kperf_stacks_timer(getpid(), 10, false);
502
503 __block bool saw_stack = false;
504 ktrace_set_completion_handler(s, ^{
505 T_EXPECT_TRUE(saw_stack, "saw the user stack");
506 T_END;
507 });
508
509 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
510 uint64_t tid = create_backtrace_thread(spin_thread, notify_sema);
511
512 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
513 if (tp->threadid != tid) {
514 return;
515 }
516 T_LOG("found %llu frame stack", tp->arg2);
517 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_VALID,
518 "found valid callstack");
519 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_TRUNCATED,
520 "found truncated callstack");
521 saw_stack = true;
522 ktrace_end(s, 1);
523 });
524
525 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
526
527 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
528 "start tracing");
529
530 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
531 dispatch_get_main_queue(), ^(void)
532 {
533 T_LOG("ending test after timeout");
534 ktrace_end(s, 0);
535 });
536
537 dispatch_main();
538 }
539
540 T_DECL(kperf_ustack_maxlen, "ensure stacks up to 256 frames can be captured")
541 {
542 start_controlling_ktrace();
543
544 ktrace_session_t s = ktrace_session_create();
545 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace session was created");
546
547 ktrace_set_collection_interval(s, 100);
548
549 T_QUIET;
550 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
551
552 configure_kperf_stacks_timer(getpid(), 10, false);
553
554 __block bool saw_stack = false;
555 __block bool saw_stack_data = false;
556 __block uint64_t nevents = 0;
557 ktrace_set_completion_handler(s, ^{
558 T_EXPECT_TRUE(saw_stack, "saw the user stack");
559 T_LOG("saw %" PRIu64 " stack data events", nevents);
560 T_EXPECT_TRUE(saw_stack_data, "saw all frames of the user stack");
561 T_END;
562 });
563
564 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
565 uint64_t tid = create_backtrace_thread(spin_thread, notify_sema);
566
567 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
568 if (tp->threadid != tid) {
569 return;
570 }
571 T_LOG("found %llu frame stack", tp->arg2);
572 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_VALID,
573 "found valid callstack");
574 T_EXPECT_EQ(tp->arg2, UINT64_C(256),
575 "found the correct number of frames");
576 saw_stack = true;
577 });
578
579 ktrace_events_single(s, PERF_STK_UDATA, ^(struct trace_point *tp) {
580 if (tp->threadid != tid && !saw_stack) {
581 return;
582 }
583 nevents++;
584 if (nevents == 256 / 4) {
585 ktrace_end(s, 1);
586 }
587 saw_stack_data = true;
588 });
589
590 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
591
592 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
593 "start tracing");
594
595 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
596 dispatch_get_main_queue(), ^(void)
597 {
598 T_LOG("ending test after timeout");
599 ktrace_end(s, 0);
600 });
601
602 dispatch_main();
603 }
604
605 T_DECL(kperf_ustack_mostly_valid,
606 "ensure most stacks captured by kperf are valid")
607 {
608 start_controlling_ktrace();
609
610 ktrace_session_t s = ktrace_session_create();
611 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace session was created");
612
613 ktrace_set_collection_interval(s, 100);
614 configure_kperf_stacks_timer(-1, 10, false);
615 int set = 1;
616 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kperf.lightweight_pet", NULL, NULL,
617 &set, sizeof(set)), NULL);
618
619 __block uint64_t stacks_seen = 0;
620 __block uint64_t valid_stacks_seen = 0;
621
622 ktrace_set_completion_handler(s, ^{
623 double valid_density = (double)valid_stacks_seen / (double)stacks_seen;
624 T_LOG("saw %" PRIu64 " stack header events, %" PRIu64 " valid (%g%%)",
625 stacks_seen, valid_stacks_seen, valid_density * 100.);
626 T_EXPECT_GT(valid_density, 0.98, "more than 98%% of stacks are valid");
627 T_END;
628 });
629
630 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
631 (void)create_backtrace_thread(spin_thread, notify_sema);
632
633 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
634 stacks_seen += 1;
635 if (tp->arg1 & CALLSTACK_VALID && tp->arg2 > 0) {
636 valid_stacks_seen += 1;
637 }
638 });
639
640 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
641
642 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
643 "start tracing");
644
645 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
646 dispatch_get_main_queue(), ^(void)
647 {
648 T_LOG("ending test after timeout");
649 ktrace_end(s, 0);
650 });
651
652 dispatch_main();
653 }
654
655 /* TODO test kernel stacks in all modes */
656 /* TODO legacy PET mode backtracing */
657