1 // Copyright (c) 2020-2021 Apple Inc. All rights reserved.
2
3 #include <darwintest.h>
4 #include <darwintest_utils.h>
5 #include <dispatch/dispatch.h>
6 #include <inttypes.h>
7 #include <ktrace/session.h>
8 #include <ktrace/private.h>
9 #include <kperf/kperf.h>
10 #include <mach/clock_types.h>
11 #include <mach/dyld_kernel.h>
12 #include <mach/host_info.h>
13 #include <mach/mach.h>
14 #include <mach/mach_init.h>
15 #include <mach/task.h>
16 #include <os/assumes.h>
17 #include <stdlib.h>
18 #include <sys/kdebug.h>
19 #include <sys/kdebug_signpost.h>
20 #include <sys/resource_private.h>
21 #include <sys/sysctl.h>
22 #include <stdint.h>
23 #include <TargetConditionals.h>
24
25 #include "ktrace_helpers.h"
26 #include "test_utils.h"
27 #include "ktrace_meta.h"
28
29 #define KDBG_TEST_MACROS 1
30 #define KDBG_TEST_OLD_TIMES 2
31 #define KDBG_TEST_FUTURE_TIMES 3
32 #define KDBG_TEST_IOP_SYNC_FLUSH 4
33
34 #pragma mark kdebug syscalls
35
36 #define TRACE_DEBUGID (0xfedfed00U)
37
38 T_DECL(kdebug_trace_syscall, "test that kdebug_trace(2) emits correct events")
39 {
40 start_controlling_ktrace();
41
42 ktrace_session_t s = ktrace_session_create();
43 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
44 ktrace_set_collection_interval(s, 200);
45
46 __block int events_seen = 0;
47 ktrace_events_single(s, TRACE_DEBUGID, ^void (struct trace_point *tp) {
48 events_seen++;
49 T_PASS("saw traced event");
50
51 if (ktrace_is_kernel_64_bit(s)) {
52 T_EXPECT_EQ(tp->arg1, UINT64_C(0xfeedfacefeedface),
53 "argument 1 of traced event is correct");
54 } else {
55 T_EXPECT_EQ(tp->arg1, UINT64_C(0xfeedface),
56 "argument 1 of traced event is correct");
57 }
58 T_EXPECT_EQ(tp->arg2, 2ULL, "argument 2 of traced event is correct");
59 T_EXPECT_EQ(tp->arg3, 3ULL, "argument 3 of traced event is correct");
60 T_EXPECT_EQ(tp->arg4, 4ULL, "argument 4 of traced event is correct");
61
62 ktrace_end(s, 1);
63 });
64
65 ktrace_set_completion_handler(s, ^{
66 T_EXPECT_GE(events_seen, 1, NULL);
67 ktrace_session_destroy(s);
68 T_END;
69 });
70
71 ktrace_filter_pid(s, getpid());
72
73 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
74 T_ASSERT_POSIX_SUCCESS(kdebug_trace(TRACE_DEBUGID, 0xfeedfacefeedface, 2,
75 3, 4), NULL);
76 ktrace_end(s, 0);
77
78 dispatch_main();
79 }
80
81 #if __LP64__
82 #define IS_64BIT true
83 #else // __LP64__
84 #define IS_64BIT false
85 #endif // !__LP64__
86
87 #define STRING_SIZE (1024)
88
89 T_DECL(kdebug_trace_string_syscall,
90 "test that kdebug_trace_string(2) emits correct events",
91 T_META_ENABLED(IS_64BIT))
92 {
93 start_controlling_ktrace();
94
95 ktrace_session_t s = ktrace_session_create();
96 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
97 ktrace_set_collection_interval(s, 200);
98 ktrace_filter_pid(s, getpid());
99
100 char *traced_string = calloc(1, STRING_SIZE);
101 T_QUIET; T_WITH_ERRNO;
102 T_ASSERT_NOTNULL(traced_string, "allocated memory for string");
103 for (size_t i = 0; i < sizeof(traced_string); i++) {
104 traced_string[i] = 'a' + (i % 26);
105 }
106 traced_string[sizeof(traced_string) - 1] = '\0';
107 size_t traced_len = strlen(traced_string);
108 T_QUIET; T_ASSERT_EQ(traced_len, sizeof(traced_string) - 1,
109 "traced string should be filled");
110
111 ktrace_events_single(s, TRACE_DEBUGID,
112 ^void (struct trace_point * __unused tp) {
113 // Do nothing -- just ensure the event is filtered in.
114 });
115
116 __block unsigned int string_cpu = 0;
117 __block bool tracing_string = false;
118 char *observed_string = calloc(1, PATH_MAX);
119 T_QUIET; T_WITH_ERRNO;
120 T_ASSERT_NOTNULL(observed_string, "allocated memory for observed string");
121 __block size_t string_offset = 0;
122 ktrace_events_single(s, TRACE_STRING_GLOBAL, ^(struct trace_point *tp){
123 if (tp->debugid & DBG_FUNC_START && tp->arg1 == TRACE_DEBUGID) {
124 tracing_string = true;
125 string_cpu = tp->cpuid;
126 memcpy(observed_string + string_offset, &tp->arg3,
127 sizeof(uint64_t) * 2);
128 string_offset += sizeof(uint64_t) * 2;
129 } else if (tracing_string && string_cpu == tp->cpuid) {
130 memcpy(observed_string + string_offset, &tp->arg1,
131 sizeof(uint64_t) * 4);
132 string_offset += sizeof(uint64_t) * 4;
133 if (tp->debugid & DBG_FUNC_END) {
134 ktrace_end(s, 1);
135 }
136 }
137 });
138
139 ktrace_set_completion_handler(s, ^{
140 T_EXPECT_TRUE(tracing_string, "found string in trace");
141 size_t observed_len = strlen(observed_string);
142 T_EXPECT_EQ(traced_len, observed_len, "string lengths should be equal");
143 if (traced_len == observed_len) {
144 T_EXPECT_EQ_STR(traced_string, observed_string,
145 "observed correct string");
146 }
147 ktrace_session_destroy(s);
148 T_END;
149 });
150
151 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
152 uint64_t str_id = kdebug_trace_string(TRACE_DEBUGID, 0, traced_string);
153 T_WITH_ERRNO; T_ASSERT_NE(str_id, (uint64_t)0, "kdebug_trace_string(2)");
154 ktrace_end(s, 0);
155
156 dispatch_main();
157 }
158
159 #define SIGNPOST_SINGLE_CODE (0x10U)
160 #define SIGNPOST_PAIRED_CODE (0x20U)
161
162 T_DECL(kdebug_signpost_syscall,
163 "test that kdebug_signpost(2) emits correct events")
164 {
165 start_controlling_ktrace();
166
167 ktrace_session_t s = ktrace_session_create();
168 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
169
170 __block int single_seen = 0;
171 __block int paired_seen = 0;
172
173 /* make sure to get enough events for the KDBUFWAIT to trigger */
174 // ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
175 ktrace_events_single(s,
176 APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_SINGLE_CODE),
177 ^(struct trace_point *tp) {
178 single_seen++;
179 T_PASS("single signpost is traced");
180
181 T_EXPECT_EQ(tp->arg1, 1ULL, "argument 1 of single signpost is correct");
182 T_EXPECT_EQ(tp->arg2, 2ULL, "argument 2 of single signpost is correct");
183 T_EXPECT_EQ(tp->arg3, 3ULL, "argument 3 of single signpost is correct");
184 T_EXPECT_EQ(tp->arg4, 4ULL, "argument 4 of single signpost is correct");
185 });
186
187 ktrace_events_single_paired(s,
188 APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_PAIRED_CODE),
189 ^(struct trace_point *start, struct trace_point *end) {
190 paired_seen++;
191 T_PASS("paired signposts are traced");
192
193 T_EXPECT_EQ(start->arg1, 5ULL, "argument 1 of start signpost is correct");
194 T_EXPECT_EQ(start->arg2, 6ULL, "argument 2 of start signpost is correct");
195 T_EXPECT_EQ(start->arg3, 7ULL, "argument 3 of start signpost is correct");
196 T_EXPECT_EQ(start->arg4, 8ULL, "argument 4 of start signpost is correct");
197
198 T_EXPECT_EQ(end->arg1, 9ULL, "argument 1 of end signpost is correct");
199 T_EXPECT_EQ(end->arg2, 10ULL, "argument 2 of end signpost is correct");
200 T_EXPECT_EQ(end->arg3, 11ULL, "argument 3 of end signpost is correct");
201 T_EXPECT_EQ(end->arg4, 12ULL, "argument 4 of end signpost is correct");
202
203 T_EXPECT_EQ(single_seen, 1, "signposts are traced in the correct order");
204
205 ktrace_end(s, 1);
206 });
207
208 ktrace_set_completion_handler(s, ^(void) {
209 T_QUIET; T_EXPECT_NE(single_seen, 0,
210 "did not see single tracepoint before timeout");
211 T_QUIET; T_EXPECT_NE(paired_seen, 0,
212 "did not see single tracepoint before timeout");
213 ktrace_session_destroy(s);
214 T_END;
215 });
216
217 ktrace_filter_pid(s, getpid());
218
219 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
220 "started tracing");
221
222 #pragma clang diagnostic push
223 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
224 T_EXPECT_POSIX_SUCCESS(kdebug_signpost(SIGNPOST_SINGLE_CODE, 1, 2, 3, 4),
225 "emitted single signpost");
226 T_EXPECT_POSIX_SUCCESS(
227 kdebug_signpost_start(SIGNPOST_PAIRED_CODE, 5, 6, 7, 8),
228 "emitted start signpost");
229 T_EXPECT_POSIX_SUCCESS(
230 kdebug_signpost_end(SIGNPOST_PAIRED_CODE, 9, 10, 11, 12),
231 "emitted end signpost");
232 #pragma clang diagnostic pop
233 ktrace_end(s, 0);
234
235 dispatch_main();
236 }
237
238 T_DECL(syscall_tracing,
239 "ensure that syscall arguments are traced propertly")
240 {
241 ktrace_session_t s = ktrace_session_create();
242 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
243
244 __block bool seen = 0;
245
246 ktrace_filter_pid(s, getpid());
247
248 static const int telemetry_syscall_no = 451;
249 static const uint64_t arg1 = 0xfeedfacefeedface;
250
251 ktrace_events_single(s, BSDDBG_CODE(DBG_BSD_EXCP_SC, telemetry_syscall_no),
252 ^(struct trace_point *evt){
253 if (KDBG_EXTRACT_CODE(evt->debugid) != telemetry_syscall_no || seen) {
254 return;
255 }
256
257 seen = true;
258 if (ktrace_is_kernel_64_bit(s)) {
259 T_EXPECT_EQ(evt->arg1, arg1,
260 "argument 1 of syscall event is correct");
261 } else {
262 T_EXPECT_EQ(evt->arg1, (uint64_t)(uint32_t)(arg1),
263 "argument 1 of syscall event is correct");
264 }
265
266 ktrace_end(s, 1);
267 });
268
269 ktrace_set_completion_handler(s, ^{
270 T_ASSERT_TRUE(seen,
271 "should have seen a syscall event for kevent_id(2)");
272 ktrace_session_destroy(s);
273 T_END;
274 });
275
276 int error = ktrace_start(s, dispatch_get_main_queue());
277 T_ASSERT_POSIX_ZERO(error, "started tracing");
278
279 /*
280 * telemetry(2) has a 64-bit argument that will definitely be traced, and
281 * is unlikely to be used elsewhere by this process.
282 */
283 extern int __telemetry(uint64_t cmd, uint64_t deadline, uint64_t interval,
284 uint64_t leeway, uint64_t arg4, uint64_t arg5);
285 (void)__telemetry(arg1, 0, 0, 0, 0, 0);
286
287 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC),
288 dispatch_get_main_queue(), ^{
289 T_LOG("ending test due to timeout");
290 ktrace_end(s, 0);
291 });
292
293 dispatch_main();
294 }
295
296 #pragma mark kdebug behaviors
297
298 #define WRAPPING_EVENTS_COUNT (150000)
299 #define TRACE_ITERATIONS (5000)
300 #define WRAPPING_EVENTS_THRESHOLD (100)
301
302 T_DECL(wrapping,
303 "ensure that wrapping traces lost events and no events prior to the wrap",
304 T_META_CHECK_LEAKS(false))
305 {
306 kbufinfo_t buf_info;
307 int wait_wrapping_secs = (WRAPPING_EVENTS_COUNT / TRACE_ITERATIONS) + 5;
308 int current_secs = wait_wrapping_secs;
309
310 start_controlling_ktrace();
311
312 /* use sysctls manually to bypass libktrace assumptions */
313
314 int mib[4] = { CTL_KERN, KERN_KDEBUG };
315 mib[2] = KERN_KDSETBUF; mib[3] = WRAPPING_EVENTS_COUNT;
316 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDSETBUF");
317
318 mib[2] = KERN_KDSETUP; mib[3] = 0;
319 size_t needed = 0;
320 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, NULL, &needed, NULL, 0),
321 "KERN_KDSETUP");
322
323 mib[2] = KERN_KDENABLE; mib[3] = 1;
324 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDENABLE");
325
326 /* wrapping is on by default */
327
328 /* wait until wrapped */
329 T_LOG("waiting for trace to wrap");
330 mib[2] = KERN_KDGETBUF;
331 needed = sizeof(buf_info);
332 do {
333 sleep(1);
334 for (int i = 0; i < TRACE_ITERATIONS; i++) {
335 T_QUIET;
336 T_ASSERT_POSIX_SUCCESS(kdebug_trace(0xfefe0000, 0, 0, 0, 0), NULL);
337 }
338 T_QUIET;
339 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, &buf_info, &needed, NULL, 0),
340 NULL);
341 } while (!(buf_info.flags & KDBG_WRAPPED) && --current_secs > 0);
342
343 T_ASSERT_TRUE(buf_info.flags & KDBG_WRAPPED,
344 "trace wrapped (after %d seconds within %d second timeout)",
345 wait_wrapping_secs - current_secs, wait_wrapping_secs);
346
347 ktrace_session_t s = ktrace_session_create();
348 T_QUIET; T_ASSERT_NOTNULL(s, NULL);
349 T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(s), NULL);
350
351 __block int events = 0;
352
353 ktrace_events_all(s, ^(struct trace_point *tp) {
354 if (events == 0) {
355 T_EXPECT_EQ(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
356 "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS",
357 tp->debugid,
358 ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
359 } else {
360 T_QUIET;
361 T_EXPECT_NE(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
362 "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS",
363 tp->debugid,
364 ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
365 }
366
367 events++;
368 if (events > WRAPPING_EVENTS_THRESHOLD) {
369 ktrace_end(s, 1);
370 }
371 });
372
373 ktrace_set_completion_handler(s, ^{
374 ktrace_session_destroy(s);
375 T_END;
376 });
377
378 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
379 "started tracing");
380
381 dispatch_main();
382 }
383
384 static void
_assert_tracing_state(bool enable,const char * msg)385 _assert_tracing_state(bool enable, const char *msg)
386 {
387 kbufinfo_t bufinfo = { 0 };
388 T_QUIET;
389 T_ASSERT_POSIX_SUCCESS(sysctl(
390 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDGETBUF }, 3,
391 &bufinfo, &(size_t){ sizeof(bufinfo) }, NULL, 0),
392 "get kdebug buffer info");
393 T_QUIET;
394 T_ASSERT_NE(bufinfo.nkdbufs, 0, "tracing should be configured");
395 T_ASSERT_NE(bufinfo.nolog, enable, "%s: tracing should%s be enabled",
396 msg, enable ? "" : "n't");
397 }
398
399 #define DRAIN_TIMEOUT_NS (1 * NSEC_PER_SEC)
400
401 static void
_drain_until_event(uint32_t debugid)402 _drain_until_event(uint32_t debugid)
403 {
404 static kd_buf events[256] = { 0 };
405 size_t events_size = sizeof(events);
406 uint64_t start_time_ns = clock_gettime_nsec_np(CLOCK_MONOTONIC);
407 while (true) {
408 T_QUIET;
409 T_ASSERT_POSIX_SUCCESS(sysctl(
410 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDREADTR, }, 3,
411 events, &events_size, NULL, 0), "reading trace data");
412 size_t events_count = events_size;
413 for (size_t i = 0; i < events_count; i++) {
414 if (events[i].debugid == debugid) {
415 T_LOG("draining found event 0x%x", debugid);
416 return;
417 }
418 }
419 uint64_t cur_time_ns = clock_gettime_nsec_np(CLOCK_MONOTONIC);
420 if (cur_time_ns - start_time_ns < DRAIN_TIMEOUT_NS) {
421 T_ASSERT_FAIL("timed out while waiting for event 0x%x", debugid);
422 }
423 }
424 }
425
426 T_DECL(disabling_event_match,
427 "ensure that ktrace is disabled when an event disable matcher fires")
428 {
429 start_controlling_ktrace();
430
431 T_SETUPBEGIN;
432 ktrace_session_t s = ktrace_session_create();
433 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
434 ktrace_events_single(s, TRACE_DEBUGID,
435 ^(struct trace_point *tp __unused) {});
436 int error = ktrace_configure(s);
437 T_QUIET; T_ASSERT_POSIX_ZERO(error, "configured session");
438 kd_event_matcher matchers[2] = { {
439 .kem_debugid = TRACE_DEBUGID,
440 .kem_args[0] = 0xff,
441 }, {
442 .kem_debugid = UINT32_MAX,
443 .kem_args[0] = 0xfff,
444 } };
445 size_t matchers_size = sizeof(matchers);
446 T_ASSERT_POSIX_SUCCESS(sysctl(
447 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSET_EDM, }, 3,
448 &matchers, &matchers_size, NULL, 0), "set event disable matcher");
449 size_t size = 0;
450 T_ASSERT_POSIX_SUCCESS(sysctl(
451 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDEFLAGS, KDBG_MATCH_DISABLE, }, 4,
452 NULL, &size, NULL, 0), "enabled event disable matching");
453 T_ASSERT_POSIX_SUCCESS(sysctl(
454 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDENABLE, 1, }, 4,
455 NULL, NULL, NULL, 0), "enabled tracing");
456 _assert_tracing_state(true, "after enabling trace");
457 T_SETUPEND;
458
459 kdebug_trace(TRACE_DEBUGID + 8, 0xff, 0, 0, 0);
460 _drain_until_event(TRACE_DEBUGID + 8);
461 _assert_tracing_state(true, "with wrong debugid");
462 kdebug_trace(TRACE_DEBUGID, 0, 0, 0, 0);
463 _drain_until_event(TRACE_DEBUGID);
464 _assert_tracing_state(true, "with wrong argument");
465 kdebug_trace(TRACE_DEBUGID, 0xff, 0, 0, 0);
466 _drain_until_event(TRACE_DEBUGID);
467 _assert_tracing_state(false, "after disabling event");
468 }
469
470 T_DECL(reject_old_events,
471 "ensure that kdebug rejects events from before tracing began",
472 T_META_CHECK_LEAKS(false))
473 {
474 __block uint64_t event_horizon_ts;
475
476 start_controlling_ktrace();
477
478 ktrace_session_t s = ktrace_session_create();
479 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
480
481 __block int events = 0;
482 ktrace_events_single(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 1),
483 ^(struct trace_point *tp) {
484 events++;
485 T_EXPECT_GT(tp->timestamp, event_horizon_ts,
486 "events in trace should be from after tracing began");
487 });
488
489 ktrace_set_completion_handler(s, ^{
490 T_EXPECT_EQ(events, 2, "should see only two events");
491 ktrace_session_destroy(s);
492 T_END;
493 });
494
495 event_horizon_ts = mach_absolute_time();
496
497 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
498 /* first, try an old event at the beginning of trace */
499 assert_kdebug_test(KDBG_TEST_OLD_TIMES, "induce old event at beginning");
500 /* after a good event has been traced, old events should be rejected */
501 assert_kdebug_test(KDBG_TEST_OLD_TIMES, "induce old event to be rejected");
502 ktrace_end(s, 0);
503
504 dispatch_main();
505 }
506
507 #define ORDERING_TIMEOUT_SEC 5
508
509 T_DECL(ascending_time_order,
510 "ensure that kdebug events are in ascending order based on time",
511 T_META_CHECK_LEAKS(false))
512 {
513 __block uint64_t prev_ts = 0;
514 __block uint32_t prev_debugid = 0;
515 __block unsigned int prev_cpu = 0;
516 __block bool in_order = true;
517
518 start_controlling_ktrace();
519
520 ktrace_session_t s = ktrace_session_create();
521 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
522
523 ktrace_events_all(s, ^(struct trace_point *tp) {
524 if (tp->timestamp < prev_ts) {
525 in_order = false;
526 T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)",
527 prev_ts, prev_debugid, prev_cpu);
528 T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)",
529 tp->timestamp, tp->debugid, tp->cpuid);
530 ktrace_end(s, 1);
531 }
532 });
533
534 ktrace_set_completion_handler(s, ^{
535 ktrace_session_destroy(s);
536 T_EXPECT_TRUE(in_order, "event timestamps were in-order");
537 T_END;
538 });
539
540 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
541 "started tracing");
542
543 /* try to inject old timestamps into trace */
544 assert_kdebug_test(KDBG_TEST_OLD_TIMES, "inject old time");
545
546 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ORDERING_TIMEOUT_SEC * NSEC_PER_SEC),
547 dispatch_get_main_queue(), ^{
548 T_LOG("ending test after timeout");
549 ktrace_end(s, 1);
550 });
551
552 dispatch_main();
553 }
554
555 #pragma mark dyld tracing
556
557 __attribute__((aligned(8)))
558 static const char map_uuid[16] = "map UUID";
559
560 __attribute__((aligned(8)))
561 static const char unmap_uuid[16] = "unmap UUID";
562
563 __attribute__((aligned(8)))
564 static const char sc_uuid[16] = "shared UUID";
565
566 static fsid_t map_fsid = { .val = { 42, 43 } };
567 static fsid_t unmap_fsid = { .val = { 44, 45 } };
568 static fsid_t sc_fsid = { .val = { 46, 47 } };
569
570 static fsobj_id_t map_fsobjid = { .fid_objno = 42, .fid_generation = 43 };
571 static fsobj_id_t unmap_fsobjid = { .fid_objno = 44, .fid_generation = 45 };
572 static fsobj_id_t sc_fsobjid = { .fid_objno = 46, .fid_generation = 47 };
573
574 #define MAP_LOAD_ADDR 0xabadcafe
575 #define UNMAP_LOAD_ADDR 0xfeedface
576 #define SC_LOAD_ADDR 0xfedfaced
577
578 __unused
579 static void
expect_dyld_image_info(struct trace_point * tp,const uint64_t * exp_uuid,uint64_t exp_load_addr,fsid_t * exp_fsid,fsobj_id_t * exp_fsobjid,int order)580 expect_dyld_image_info(struct trace_point *tp, const uint64_t *exp_uuid,
581 uint64_t exp_load_addr, fsid_t *exp_fsid, fsobj_id_t *exp_fsobjid,
582 int order)
583 {
584 #if defined(__LP64__) || defined(__arm64__)
585 if (order == 0) {
586 uint64_t uuid[2];
587 uint64_t load_addr;
588 fsid_t fsid;
589
590 uuid[0] = (uint64_t)tp->arg1;
591 uuid[1] = (uint64_t)tp->arg2;
592 load_addr = (uint64_t)tp->arg3;
593 fsid.val[0] = (int32_t)(tp->arg4 & UINT32_MAX);
594 fsid.val[1] = (int32_t)((uint64_t)tp->arg4 >> 32);
595
596 T_QUIET; T_EXPECT_EQ(uuid[0], exp_uuid[0], NULL);
597 T_QUIET; T_EXPECT_EQ(uuid[1], exp_uuid[1], NULL);
598 T_QUIET; T_EXPECT_EQ(load_addr, exp_load_addr, NULL);
599 T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
600 T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
601 } else if (order == 1) {
602 fsobj_id_t fsobjid;
603
604 fsobjid.fid_objno = (uint32_t)(tp->arg1 & UINT32_MAX);
605 fsobjid.fid_generation = (uint32_t)((uint64_t)tp->arg1 >> 32);
606
607 T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
608 T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
609 exp_fsobjid->fid_generation, NULL);
610 } else {
611 T_ASSERT_FAIL("unrecognized order of events %d", order);
612 }
613 #else /* defined(__LP64__) */
614 if (order == 0) {
615 uint32_t uuid[4];
616
617 uuid[0] = (uint32_t)tp->arg1;
618 uuid[1] = (uint32_t)tp->arg2;
619 uuid[2] = (uint32_t)tp->arg3;
620 uuid[3] = (uint32_t)tp->arg4;
621
622 T_QUIET; T_EXPECT_EQ(uuid[0], (uint32_t)exp_uuid[0], NULL);
623 T_QUIET; T_EXPECT_EQ(uuid[1], (uint32_t)(exp_uuid[0] >> 32), NULL);
624 T_QUIET; T_EXPECT_EQ(uuid[2], (uint32_t)exp_uuid[1], NULL);
625 T_QUIET; T_EXPECT_EQ(uuid[3], (uint32_t)(exp_uuid[1] >> 32), NULL);
626 } else if (order == 1) {
627 uint32_t load_addr;
628 fsid_t fsid;
629 fsobj_id_t fsobjid;
630
631 load_addr = (uint32_t)tp->arg1;
632 fsid.val[0] = (int32_t)tp->arg2;
633 fsid.val[1] = (int32_t)tp->arg3;
634 fsobjid.fid_objno = (uint32_t)tp->arg4;
635
636 T_QUIET; T_EXPECT_EQ(load_addr, (uint32_t)exp_load_addr, NULL);
637 T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
638 T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
639 T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
640 } else if (order == 2) {
641 fsobj_id_t fsobjid;
642
643 fsobjid.fid_generation = tp->arg1;
644
645 T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
646 exp_fsobjid->fid_generation, NULL);
647 } else {
648 T_ASSERT_FAIL("unrecognized order of events %d", order);
649 }
650 #endif /* defined(__LP64__) */
651 }
652
653 #if defined(__LP64__) || defined(__arm64__)
654 #define DYLD_CODE_OFFSET (0)
655 #define DYLD_EVENTS (2)
656 #else
657 #define DYLD_CODE_OFFSET (2)
658 #define DYLD_EVENTS (3)
659 #endif
660
661 static void
expect_dyld_events(ktrace_session_t s,const char * name,uint32_t base_code,const char * exp_uuid,uint64_t exp_load_addr,fsid_t * exp_fsid,fsobj_id_t * exp_fsobjid,uint8_t * saw_events)662 expect_dyld_events(ktrace_session_t s, const char *name, uint32_t base_code,
663 const char *exp_uuid, uint64_t exp_load_addr, fsid_t *exp_fsid,
664 fsobj_id_t *exp_fsobjid, uint8_t *saw_events)
665 {
666 for (int i = 0; i < DYLD_EVENTS; i++) {
667 ktrace_events_single(s, KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID,
668 base_code + DYLD_CODE_OFFSET + (unsigned int)i),
669 ^(struct trace_point *tp) {
670 T_LOG("checking %s event %c", name, 'A' + i);
671 expect_dyld_image_info(tp, (const void *)exp_uuid, exp_load_addr,
672 exp_fsid, exp_fsobjid, i);
673 *saw_events |= (1U << i);
674 });
675 }
676 }
677
678 T_DECL(dyld_events, "test that dyld registering libraries emits events")
679 {
680 dyld_kernel_image_info_t info;
681
682 /*
683 * Use pointers instead of __block variables in order to use these variables
684 * in the completion block below _and_ pass pointers to them to the
685 * expect_dyld_events function.
686 */
687 uint8_t saw_events[3] = { 0 };
688 uint8_t *saw_mapping = &(saw_events[0]);
689 uint8_t *saw_unmapping = &(saw_events[1]);
690 uint8_t *saw_shared_cache = &(saw_events[2]);
691
692 start_controlling_ktrace();
693
694 ktrace_session_t s = ktrace_session_create();
695 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
696
697 T_QUIET;
698 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
699 "filtered to current process");
700
701 expect_dyld_events(s, "mapping", DBG_DYLD_UUID_MAP_A, map_uuid,
702 MAP_LOAD_ADDR, &map_fsid, &map_fsobjid, saw_mapping);
703 expect_dyld_events(s, "unmapping", DBG_DYLD_UUID_UNMAP_A, unmap_uuid,
704 UNMAP_LOAD_ADDR, &unmap_fsid, &unmap_fsobjid, saw_unmapping);
705 expect_dyld_events(s, "shared cache", DBG_DYLD_UUID_SHARED_CACHE_A,
706 sc_uuid, SC_LOAD_ADDR, &sc_fsid, &sc_fsobjid, saw_shared_cache);
707
708 ktrace_set_completion_handler(s, ^{
709 ktrace_session_destroy(s);
710
711 T_EXPECT_EQ(__builtin_popcount(*saw_mapping), DYLD_EVENTS, NULL);
712 T_EXPECT_EQ(__builtin_popcount(*saw_unmapping), DYLD_EVENTS, NULL);
713 T_EXPECT_EQ(__builtin_popcount(*saw_shared_cache), DYLD_EVENTS, NULL);
714 T_END;
715 });
716
717 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
718
719 info.load_addr = MAP_LOAD_ADDR;
720 memcpy(info.uuid, map_uuid, sizeof(info.uuid));
721 info.fsid = map_fsid;
722 info.fsobjid = map_fsobjid;
723 T_EXPECT_MACH_SUCCESS(task_register_dyld_image_infos(mach_task_self(),
724 &info, 1), "registered dyld image info");
725
726 info.load_addr = UNMAP_LOAD_ADDR;
727 memcpy(info.uuid, unmap_uuid, sizeof(info.uuid));
728 info.fsid = unmap_fsid;
729 info.fsobjid = unmap_fsobjid;
730 T_EXPECT_MACH_SUCCESS(task_unregister_dyld_image_infos(mach_task_self(),
731 &info, 1), "unregistered dyld image info");
732
733 info.load_addr = SC_LOAD_ADDR;
734 memcpy(info.uuid, sc_uuid, sizeof(info.uuid));
735 info.fsid = sc_fsid;
736 info.fsobjid = sc_fsobjid;
737 T_EXPECT_MACH_SUCCESS(task_register_dyld_shared_cache_image_info(
738 mach_task_self(), info, FALSE, FALSE),
739 "registered dyld shared cache image info");
740
741 ktrace_end(s, 0);
742
743 dispatch_main();
744 }
745
746 #pragma mark kdebug kernel macros
747
748 #define EXP_KERNEL_EVENTS 5U
749
750 static const uint32_t dev_evts[EXP_KERNEL_EVENTS] = {
751 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 0),
752 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 1),
753 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 2),
754 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 3),
755 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 4),
756 };
757
758 static const uint32_t rel_evts[EXP_KERNEL_EVENTS] = {
759 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 5),
760 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 6),
761 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 7),
762 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 8),
763 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 9),
764 };
765
766 static const uint32_t filt_evts[EXP_KERNEL_EVENTS] = {
767 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 10),
768 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 11),
769 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 12),
770 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 13),
771 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 14),
772 };
773
774 static const uint32_t noprocfilt_evts[EXP_KERNEL_EVENTS] = {
775 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 15),
776 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 16),
777 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 17),
778 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 18),
779 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 19),
780 };
781
782 static void
expect_event(struct trace_point * tp,const char * name,unsigned int * events,const uint32_t * event_ids,size_t event_ids_len)783 expect_event(struct trace_point *tp, const char *name, unsigned int *events,
784 const uint32_t *event_ids, size_t event_ids_len)
785 {
786 unsigned int event_idx = *events;
787 bool event_found = false;
788 size_t i;
789 for (i = 0; i < event_ids_len; i++) {
790 if (event_ids[i] == (tp->debugid & KDBG_EVENTID_MASK)) {
791 T_LOG("found %s event 0x%x", name, tp->debugid);
792 event_found = true;
793 }
794 }
795
796 if (!event_found) {
797 return;
798 }
799
800 *events += 1;
801 for (i = 0; i < event_idx; i++) {
802 T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)i + 1,
803 NULL);
804 }
805 for (; i < 4; i++) {
806 T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)0, NULL);
807 }
808 }
809
810 static void
expect_release_event(struct trace_point * tp,unsigned int * events)811 expect_release_event(struct trace_point *tp, unsigned int *events)
812 {
813 expect_event(tp, "release", events, rel_evts,
814 sizeof(rel_evts) / sizeof(rel_evts[0]));
815 }
816
817 static void
expect_development_event(struct trace_point * tp,unsigned int * events)818 expect_development_event(struct trace_point *tp, unsigned int *events)
819 {
820 expect_event(tp, "dev", events, dev_evts, sizeof(dev_evts) / sizeof(dev_evts[0]));
821 }
822
823 static void
expect_filtered_event(struct trace_point * tp,unsigned int * events)824 expect_filtered_event(struct trace_point *tp, unsigned int *events)
825 {
826 expect_event(tp, "filtered", events, filt_evts,
827 sizeof(filt_evts) / sizeof(filt_evts[0]));
828 }
829
830 static void
expect_noprocfilt_event(struct trace_point * tp,unsigned int * events)831 expect_noprocfilt_event(struct trace_point *tp, unsigned int *events)
832 {
833 expect_event(tp, "noprocfilt", events, noprocfilt_evts,
834 sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]));
835 }
836
837 static void
838 expect_kdbg_test_events(ktrace_session_t s, bool use_all_callback,
839 void (^cb)(unsigned int dev_seen, unsigned int rel_seen,
840 unsigned int filt_seen, unsigned int noprocfilt_seen))
841 {
842 __block unsigned int dev_seen = 0;
843 __block unsigned int rel_seen = 0;
844 __block unsigned int filt_seen = 0;
845 __block unsigned int noprocfilt_seen = 0;
846
847 void (^evtcb)(struct trace_point *tp) = ^(struct trace_point *tp) {
848 expect_development_event(tp, &dev_seen);
849 expect_release_event(tp, &rel_seen);
850 expect_filtered_event(tp, &filt_seen);
851 expect_noprocfilt_event(tp, &noprocfilt_seen);
852 };
853
854 if (use_all_callback) {
855 ktrace_events_all(s, evtcb);
856 } else {
857 ktrace_events_range(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0),
858 KDBG_EVENTID(DBG_BSD + 1, 0, 0), evtcb);
859 }
860
861 ktrace_set_completion_handler(s, ^{
862 ktrace_session_destroy(s);
863 cb(dev_seen, rel_seen, filt_seen, noprocfilt_seen);
864 T_END;
865 });
866
867 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
868 assert_kdebug_test(KDBG_TEST_MACROS, "check test macros");
869
870 ktrace_end(s, 0);
871 }
872
873 T_DECL(kernel_events, "ensure kernel macros work")
874 {
875 start_controlling_ktrace();
876
877 ktrace_session_t s = ktrace_session_create();
878 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
879
880 T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
881 "filtered events to current process");
882
883 expect_kdbg_test_events(s, false,
884 ^(unsigned int dev_seen, unsigned int rel_seen,
885 unsigned int filt_seen, unsigned int noprocfilt_seen) {
886 /*
887 * Development-only events are only filtered if running on an embedded
888 * OS.
889 */
890 unsigned int dev_exp;
891 #if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR)
892 dev_exp = is_development_kernel() ? EXP_KERNEL_EVENTS : 0U;
893 #else
894 dev_exp = EXP_KERNEL_EVENTS;
895 #endif
896
897 T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS,
898 "release and development events seen");
899 T_EXPECT_EQ(dev_seen, dev_exp, "development-only events %sseen",
900 dev_exp ? "" : "not ");
901 T_EXPECT_EQ(filt_seen, dev_exp, "filter-only events seen");
902 T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
903 "process filter-agnostic events seen");
904 });
905
906 dispatch_main();
907 }
908
909 T_DECL(kernel_events_filtered, "ensure that the filtered kernel macros work")
910 {
911 start_controlling_ktrace();
912
913 ktrace_session_t s = ktrace_session_create();
914 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
915
916 T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
917 "filtered events to current process");
918
919 expect_kdbg_test_events(s, true,
920 ^(unsigned int dev_seen, unsigned int rel_seen,
921 unsigned int filt_seen, unsigned int noprocfilt_seen) {
922 T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS, NULL);
923 #if defined(__arm__) || defined(__arm64__)
924 T_EXPECT_EQ(dev_seen, is_development_kernel() ? EXP_KERNEL_EVENTS : 0U,
925 NULL);
926 #else
927 T_EXPECT_EQ(dev_seen, EXP_KERNEL_EVENTS,
928 "development-only events seen");
929 #endif /* defined(__arm__) || defined(__arm64__) */
930 T_EXPECT_EQ(filt_seen, 0U, "no filter-only events seen");
931 T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
932 "process filter-agnostic events seen");
933 });
934
935 dispatch_main();
936 }
937
938 T_DECL(kernel_events_noprocfilt,
939 "ensure that the no process filter kernel macros work")
940 {
941 start_controlling_ktrace();
942
943 ktrace_session_t s = ktrace_session_create();
944 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
945
946 /*
947 * Only allow launchd events through.
948 */
949 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, 1), "filtered events to launchd");
950 for (size_t i = 0; i < sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]); i++) {
951 T_QUIET;
952 T_ASSERT_POSIX_ZERO(ktrace_ignore_process_filter_for_event(s,
953 noprocfilt_evts[i]),
954 "ignored process filter for noprocfilt event");
955 }
956
957 expect_kdbg_test_events(s, false,
958 ^(unsigned int dev_seen, unsigned int rel_seen,
959 unsigned int filt_seen, unsigned int noprocfilt_seen) {
960 T_EXPECT_EQ(rel_seen, 0U, "release and development events not seen");
961 T_EXPECT_EQ(dev_seen, 0U, "development-only events not seen");
962 T_EXPECT_EQ(filt_seen, 0U, "filter-only events not seen");
963
964 T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
965 "process filter-agnostic events seen");
966 });
967
968 dispatch_main();
969 }
970
971 static volatile bool continue_abuse = true;
972
973 #define STRESS_DEBUGID (0xfeedfac0)
974 #define ABUSE_SECS (2)
975 #define TIMER_NS (100 * NSEC_PER_USEC)
976 /*
977 * Use the quantum as the gap threshold.
978 */
979 #define GAP_THRESHOLD_NS (10 * NSEC_PER_MSEC)
980
981 static void *
kdebug_abuser_thread(void * ctx)982 kdebug_abuser_thread(void *ctx)
983 {
984 unsigned int id = (unsigned int)ctx;
985 uint64_t i = 0;
986 while (continue_abuse) {
987 kdebug_trace(STRESS_DEBUGID, id, i, 0, 0);
988 i++;
989 }
990
991 return NULL;
992 }
993
994 T_DECL(stress, "emit events on all but one CPU with a small buffer",
995 T_META_CHECK_LEAKS(false))
996 {
997 start_controlling_ktrace();
998
999 T_SETUPBEGIN;
1000 ktrace_session_t s = ktrace_session_create();
1001 T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1002
1003 /* Let's not waste any time with pleasantries. */
1004 ktrace_set_uuid_map_enabled(s, KTRACE_FEATURE_DISABLED);
1005
1006 /* Ouch. */
1007 ktrace_events_all(s, ^(__unused struct trace_point *tp) {});
1008 ktrace_set_vnode_paths_enabled(s, KTRACE_FEATURE_ENABLED);
1009 (void)atexit_b(^{ kperf_reset(); });
1010 (void)kperf_action_count_set(1);
1011 (void)kperf_timer_count_set(1);
1012 int kperror = kperf_timer_period_set(0, kperf_ns_to_ticks(TIMER_NS));
1013 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_period_set %llu ns",
1014 TIMER_NS);
1015 kperror = kperf_timer_action_set(0, 1);
1016 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_action_set");
1017 kperror = kperf_action_samplers_set(1, KPERF_SAMPLER_TINFO |
1018 KPERF_SAMPLER_TH_SNAPSHOT | KPERF_SAMPLER_KSTACK |
1019 KPERF_SAMPLER_USTACK | KPERF_SAMPLER_MEMINFO |
1020 KPERF_SAMPLER_TINFO_SCHED | KPERF_SAMPLER_TH_DISPATCH |
1021 KPERF_SAMPLER_TK_SNAPSHOT | KPERF_SAMPLER_SYS_MEM |
1022 KPERF_SAMPLER_TH_INSTRS_CYCLES);
1023 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_action_samplers_set");
1024 /* You monster... */
1025
1026 /* The coup-de-grace. */
1027 ktrace_set_buffer_size(s, 10);
1028
1029 char filepath_arr[MAXPATHLEN] = "";
1030 strlcpy(filepath_arr, dt_tmpdir(), sizeof(filepath_arr));
1031 strlcat(filepath_arr, "/stress.ktrace", sizeof(filepath_arr));
1032 char *filepath = filepath_arr;
1033
1034 int ncpus = 0;
1035 size_t ncpus_size = sizeof(ncpus);
1036 int ret = sysctlbyname("hw.logicalcpu_max", &ncpus, &ncpus_size, NULL, 0);
1037 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(\"hw.logicalcpu_max\"");
1038 T_QUIET; T_ASSERT_GT(ncpus, 0, "realistic number of CPUs");
1039
1040 pthread_t *threads = calloc((unsigned int)ncpus - 1, sizeof(pthread_t));
1041 T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(threads, "calloc(%d threads)",
1042 ncpus - 1);
1043
1044 ktrace_set_completion_handler(s, ^{
1045 T_SETUPBEGIN;
1046 ktrace_session_destroy(s);
1047
1048 T_LOG("trace ended, searching for gaps");
1049
1050 ktrace_session_t sread = ktrace_session_create();
1051 T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(sread, "ktrace_session_create");
1052
1053 int error = ktrace_set_file(sread, filepath);
1054 T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_set_file %s", filepath);
1055
1056 ktrace_file_t f = ktrace_file_open(filepath, false);
1057 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(f, "ktrace_file_open %s",
1058 filepath);
1059 uint64_t first_timestamp = 0;
1060 error = ktrace_file_earliest_timestamp(f, &first_timestamp);
1061 T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_file_earliest_timestamp");
1062
1063 uint64_t last_timestamp = 0;
1064 (void)ktrace_file_latest_timestamp(f, &last_timestamp);
1065
1066 __block uint64_t prev_timestamp = 0;
1067 __block uint64_t nevents = 0;
1068 ktrace_events_all(sread, ^(struct trace_point *tp) {
1069 nevents++;
1070 uint64_t delta_ns = 0;
1071 T_QUIET; T_EXPECT_GE(tp->timestamp, prev_timestamp,
1072 "timestamps are monotonically increasing");
1073 int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
1074 tp->timestamp - prev_timestamp, &delta_ns);
1075 T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
1076 if (prev_timestamp && delta_ns > GAP_THRESHOLD_NS) {
1077 if (tp->debugname) {
1078 T_LOG("gap: %gs at %llu - %llu on %d: %s (%#08x)",
1079 (double)delta_ns / 1e9, prev_timestamp,
1080 tp->timestamp, tp->cpuid, tp->debugname, tp->debugid);
1081 } else {
1082 T_LOG("gap: %gs at %llu - %llu on %d: %#x",
1083 (double)delta_ns / 1e9, prev_timestamp,
1084 tp->timestamp, tp->cpuid, tp->debugid);
1085 }
1086
1087 /*
1088 * These gaps are ok -- they appear after CPUs are brought back
1089 * up.
1090 */
1091 #define INTERRUPT (0x1050000)
1092 #define PERF_CPU_IDLE (0x27001000)
1093 #define INTC_HANDLER (0x5000004)
1094 #define DECR_TRAP (0x1090000)
1095 uint32_t eventid = tp->debugid & KDBG_EVENTID_MASK;
1096 if (eventid != INTERRUPT && eventid != PERF_CPU_IDLE &&
1097 eventid != INTC_HANDLER && eventid != DECR_TRAP) {
1098 unsigned int lost_events = TRACE_LOST_EVENTS;
1099 T_QUIET; T_EXPECT_EQ(tp->debugid, lost_events,
1100 "gaps should end with lost events");
1101 }
1102 }
1103
1104 prev_timestamp = tp->timestamp;
1105 });
1106 ktrace_events_single(sread, TRACE_LOST_EVENTS, ^(struct trace_point *tp){
1107 T_LOG("lost: %llu on %d (%llu)", tp->timestamp, tp->cpuid, tp->arg1);
1108 });
1109
1110 __block uint64_t last_write = 0;
1111 ktrace_events_single_paired(sread, TRACE_WRITING_EVENTS,
1112 ^(struct trace_point *start, struct trace_point *end) {
1113 uint64_t delta_ns;
1114 int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
1115 start->timestamp - last_write, &delta_ns);
1116 T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
1117
1118 uint64_t dur_ns;
1119 converror = ktrace_convert_timestamp_to_nanoseconds(sread,
1120 end->timestamp - start->timestamp, &dur_ns);
1121 T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
1122
1123 T_LOG("write: %llu (+%gs): %gus on %d: %llu events", start->timestamp,
1124 (double)delta_ns / 1e9, (double)dur_ns / 1e3, end->cpuid, end->arg1);
1125 last_write = end->timestamp;
1126 });
1127 ktrace_set_completion_handler(sread, ^{
1128 uint64_t duration_ns = 0;
1129 if (last_timestamp) {
1130 int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
1131 last_timestamp - first_timestamp, &duration_ns);
1132 T_QUIET; T_ASSERT_POSIX_ZERO(converror,
1133 "convert timestamp to ns");
1134 T_LOG("file was %gs long, %llu events: %g events/msec/cpu",
1135 (double)duration_ns / 1e9, nevents,
1136 (double)nevents / ((double)duration_ns / 1e6) / ncpus);
1137 }
1138 (void)unlink(filepath);
1139 ktrace_session_destroy(sread);
1140 T_END;
1141 });
1142
1143 int starterror = ktrace_start(sread, dispatch_get_main_queue());
1144 T_QUIET; T_ASSERT_POSIX_ZERO(starterror, "ktrace_start read session");
1145
1146 T_SETUPEND;
1147 });
1148
1149 /* Just kidding... for now. */
1150 #if 0
1151 kperror = kperf_sample_set(1);
1152 T_ASSERT_POSIX_SUCCESS(kperror,
1153 "started kperf timer sampling every %llu ns", TIMER_NS);
1154 #endif
1155
1156 for (int i = 0; i < (ncpus - 1); i++) {
1157 int error = pthread_create(&threads[i], NULL, kdebug_abuser_thread,
1158 (void *)(uintptr_t)i);
1159 T_QUIET; T_ASSERT_POSIX_ZERO(error,
1160 "pthread_create abuser thread %d", i);
1161 }
1162
1163 int error = ktrace_start_writing_file(s, filepath,
1164 ktrace_compression_none, NULL, NULL);
1165 T_ASSERT_POSIX_ZERO(error, "started writing ktrace to %s", filepath);
1166
1167 T_SETUPEND;
1168
1169 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ABUSE_SECS * NSEC_PER_SEC),
1170 dispatch_get_main_queue(), ^{
1171 T_LOG("ending trace");
1172 ktrace_end(s, 1);
1173
1174 continue_abuse = false;
1175 for (int i = 0; i < (ncpus - 1); i++) {
1176 int joinerror = pthread_join(threads[i], NULL);
1177 T_QUIET; T_EXPECT_POSIX_ZERO(joinerror, "pthread_join thread %d",
1178 i);
1179 }
1180 });
1181
1182 dispatch_main();
1183 }
1184
1185 #define ROUND_TRIP_PERIOD UINT64_C(10 * 1000)
1186 #define ROUND_TRIPS_THRESHOLD UINT64_C(25)
1187 #define ROUND_TRIPS_TIMEOUT_SECS (2 * 60)
1188 #define COLLECTION_INTERVAL_MS 100
1189
1190 /*
1191 * Test a sustained tracing session, involving multiple round-trips to the
1192 * kernel.
1193 *
1194 * Trace all events, and every `ROUND_TRIP_PERIOD` events, emit an event that's
1195 * unlikely to be emitted elsewhere. Look for this event, too, and make sure we
1196 * see as many of them as we emitted.
1197 *
1198 * After seeing `ROUND_TRIPS_THRESHOLD` of the unlikely events, end tracing.
1199 * In the failure mode, we won't see any of these, so set a timeout of
1200 * `ROUND_TRIPS_TIMEOUT_SECS` to prevent hanging, waiting for events that we'll
1201 * never see.
1202 */
1203 T_DECL(round_trips,
1204 "test sustained tracing with multiple round-trips through the kernel")
1205 {
1206 start_controlling_ktrace();
1207
1208 ktrace_session_t s = ktrace_session_create();
1209 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1210
1211 /*
1212 * Set a small buffer and collection interval to increase the number of
1213 * round-trips.
1214 */
1215 ktrace_set_buffer_size(s, 50);
1216 ktrace_set_collection_interval(s, COLLECTION_INTERVAL_MS);
1217
1218 __block uint64_t events = 0;
1219 __block uint64_t emitted = 0;
1220 __block uint64_t seen = 0;
1221 ktrace_events_all(s, ^(__unused struct trace_point *tp) {
1222 events++;
1223 if (events % ROUND_TRIP_PERIOD == 0) {
1224 T_LOG("emitting round-trip event %" PRIu64, emitted);
1225 kdebug_trace(TRACE_DEBUGID, events, 0, 0, 0);
1226 emitted++;
1227 }
1228 });
1229
1230 ktrace_events_single(s, TRACE_DEBUGID, ^(__unused struct trace_point *tp) {
1231 T_LOG("saw round-trip event after %" PRIu64 " events", events);
1232 seen++;
1233 if (seen >= ROUND_TRIPS_THRESHOLD) {
1234 T_LOG("ending trace after seeing %" PRIu64 " events, "
1235 "emitting %" PRIu64, seen, emitted);
1236 ktrace_end(s, 1);
1237 }
1238 });
1239
1240 ktrace_set_completion_handler(s, ^{
1241 T_EXPECT_GE(emitted, ROUND_TRIPS_THRESHOLD,
1242 "emitted %" PRIu64 " round-trip events", emitted);
1243 T_EXPECT_GE(seen, ROUND_TRIPS_THRESHOLD,
1244 "saw %" PRIu64 " round-trip events", seen);
1245 ktrace_session_destroy(s);
1246 T_END;
1247 });
1248
1249 int error = ktrace_start(s, dispatch_get_main_queue());
1250 T_ASSERT_POSIX_ZERO(error, "started tracing");
1251
1252 dispatch_after(dispatch_time(DISPATCH_TIME_NOW,
1253 ROUND_TRIPS_TIMEOUT_SECS * NSEC_PER_SEC), dispatch_get_main_queue(),
1254 ^{
1255 T_LOG("ending trace after %d seconds", ROUND_TRIPS_TIMEOUT_SECS);
1256 ktrace_end(s, 0);
1257 });
1258
1259 dispatch_main();
1260 }
1261
1262 #define HEARTBEAT_INTERVAL_SECS 1
1263 #define HEARTBEAT_COUNT 10
1264
1265 /*
1266 * Ensure we see events periodically, checking for recent events on a
1267 * heart-beat.
1268 */
1269 T_DECL(event_coverage, "ensure events appear up to the end of tracing")
1270 {
1271 start_controlling_ktrace();
1272
1273 ktrace_session_t s = ktrace_session_create();
1274 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1275
1276 __block uint64_t current_timestamp = 0;
1277 __block uint64_t events = 0;
1278 ktrace_events_all(s, ^(struct trace_point *tp) {
1279 current_timestamp = tp->timestamp;
1280 events++;
1281 });
1282
1283 ktrace_set_buffer_size(s, 20);
1284 ktrace_set_collection_interval(s, COLLECTION_INTERVAL_MS);
1285
1286 __block uint64_t last_timestamp = 0;
1287 __block uint64_t last_events = 0;
1288 __block unsigned int heartbeats = 0;
1289
1290 ktrace_set_completion_handler(s, ^{
1291 ktrace_session_destroy(s);
1292 T_QUIET; T_EXPECT_GT(events, 0ULL, "should have seen some events");
1293 T_END;
1294 });
1295
1296 dispatch_source_t timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER,
1297 0, 0, dispatch_get_main_queue());
1298 dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW,
1299 HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC),
1300 HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC, 0);
1301 dispatch_source_set_cancel_handler(timer, ^{
1302 dispatch_release(timer);
1303 });
1304
1305 dispatch_source_set_event_handler(timer, ^{
1306 heartbeats++;
1307
1308 T_LOG("heartbeat %u at time %lld, seen %" PRIu64 " events, "
1309 "current event time %lld", heartbeats, mach_absolute_time(),
1310 events, current_timestamp);
1311
1312 if (current_timestamp > 0) {
1313 T_EXPECT_GT(current_timestamp, last_timestamp,
1314 "event timestamps should be increasing");
1315 T_QUIET; T_EXPECT_GT(events, last_events,
1316 "number of events should be increasing");
1317 }
1318
1319 last_timestamp = current_timestamp;
1320 last_events = events;
1321
1322 kdebug_trace(TRACE_DEBUGID, 0, 0, 0, 0);
1323
1324 if (heartbeats >= HEARTBEAT_COUNT) {
1325 T_LOG("ending trace after %u heartbeats", HEARTBEAT_COUNT);
1326 ktrace_end(s, 0);
1327 }
1328 });
1329
1330 int error = ktrace_start(s, dispatch_get_main_queue());
1331 T_ASSERT_POSIX_ZERO(error, "started tracing");
1332
1333 dispatch_activate(timer);
1334
1335 dispatch_main();
1336 }
1337
1338 static unsigned int
set_nevents(unsigned int nevents)1339 set_nevents(unsigned int nevents)
1340 {
1341 T_QUIET;
1342 T_ASSERT_POSIX_SUCCESS(sysctl(
1343 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSETBUF, (int)nevents }, 4,
1344 NULL, 0, NULL, 0), "set kdebug buffer size");
1345
1346 T_QUIET;
1347 T_ASSERT_POSIX_SUCCESS(sysctl(
1348 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSETUP, (int)nevents }, 4,
1349 NULL, 0, NULL, 0), "setup kdebug buffers");
1350
1351 kbufinfo_t bufinfo = { 0 };
1352 T_QUIET;
1353 T_ASSERT_POSIX_SUCCESS(sysctl(
1354 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDGETBUF }, 3,
1355 &bufinfo, &(size_t){ sizeof(bufinfo) }, NULL, 0),
1356 "get kdebug buffer size");
1357
1358 T_QUIET;
1359 T_ASSERT_POSIX_SUCCESS(sysctl(
1360 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDREMOVE }, 3,
1361 NULL, 0, NULL, 0),
1362 "remove kdebug buffers");
1363
1364 return (unsigned int)bufinfo.nkdbufs;
1365 }
1366
1367 T_DECL(set_buffer_size, "ensure large buffer sizes can be set")
1368 {
1369 T_SETUPBEGIN;
1370 uint64_t memsize = 0;
1371 T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.memsize", &memsize,
1372 &(size_t){ sizeof(memsize) }, NULL, 0), "sysctl hw.memsize");
1373 T_SETUPEND;
1374
1375 #if TARGET_OS_IPHONE
1376 if (memsize >= (8ULL << 30)) {
1377 T_SKIP("skipping on iOS device with memory >= 8GB, rdar://79403304");
1378 }
1379 #endif // TARGET_OS_IPHONE
1380
1381 start_controlling_ktrace();
1382
1383 /*
1384 * Try to allocate up to one-eighth of available memory towards
1385 * tracing.
1386 */
1387 uint64_t maxevents_u64 = memsize / 8 / sizeof(kd_buf);
1388 if (maxevents_u64 > UINT32_MAX) {
1389 maxevents_u64 = UINT32_MAX;
1390 }
1391 unsigned int maxevents = (unsigned int)maxevents_u64;
1392
1393 unsigned int minevents = set_nevents(0);
1394 T_ASSERT_GT(minevents, 0, "saw non-zero minimum event count of %u",
1395 minevents);
1396
1397 unsigned int step = ((maxevents - minevents - 1) / 4);
1398 T_ASSERT_GT(step, 0, "stepping by %u events", step);
1399
1400 for (unsigned int i = minevents + step; i < maxevents; i += step) {
1401 unsigned int actualevents = set_nevents(i);
1402 T_ASSERT_GE(actualevents, i - minevents,
1403 "%u events in kernel when %u requested", actualevents, i);
1404 }
1405 }
1406
1407 static void *
donothing(__unused void * arg)1408 donothing(__unused void *arg)
1409 {
1410 return NULL;
1411 }
1412
1413 T_DECL(long_names, "ensure long command names are reported")
1414 {
1415 start_controlling_ktrace();
1416
1417 char longname[] = "thisisaverylongprocessname!";
1418 char *longname_ptr = longname;
1419 static_assert(sizeof(longname) > 16,
1420 "the name should be longer than MAXCOMLEN");
1421
1422 int ret = sysctlbyname("kern.procname", NULL, NULL, longname,
1423 sizeof(longname));
1424 T_ASSERT_POSIX_SUCCESS(ret,
1425 "use sysctl kern.procname to lengthen the name");
1426
1427 ktrace_session_t ktsess = ktrace_session_create();
1428
1429 /*
1430 * 32-bit kernels can only trace 16 bytes of the string in their event
1431 * arguments.
1432 */
1433 if (!ktrace_is_kernel_64_bit(ktsess)) {
1434 longname[16] = '\0';
1435 }
1436
1437 ktrace_filter_pid(ktsess, getpid());
1438
1439 __block bool saw_newthread = false;
1440 ktrace_events_single(ktsess, TRACE_STRING_NEWTHREAD,
1441 ^(struct trace_point *tp) {
1442 if (ktrace_get_pid_for_thread(ktsess, tp->threadid) ==
1443 getpid()) {
1444 saw_newthread = true;
1445
1446 char argname[32] = {};
1447 strncat(argname, (char *)&tp->arg1, sizeof(tp->arg1));
1448 strncat(argname, (char *)&tp->arg2, sizeof(tp->arg2));
1449 strncat(argname, (char *)&tp->arg3, sizeof(tp->arg3));
1450 strncat(argname, (char *)&tp->arg4, sizeof(tp->arg4));
1451
1452 T_EXPECT_EQ_STR((char *)argname, longname_ptr,
1453 "process name of new thread should be long");
1454
1455 ktrace_end(ktsess, 1);
1456 }
1457 });
1458
1459 ktrace_set_completion_handler(ktsess, ^{
1460 ktrace_session_destroy(ktsess);
1461 T_EXPECT_TRUE(saw_newthread,
1462 "should have seen the new thread");
1463 T_END;
1464 });
1465
1466 int error = ktrace_start(ktsess, dispatch_get_main_queue());
1467 T_ASSERT_POSIX_ZERO(error, "started tracing");
1468
1469 pthread_t thread = NULL;
1470 error = pthread_create(&thread, NULL, donothing, NULL);
1471 T_ASSERT_POSIX_ZERO(error, "create new thread");
1472
1473 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC),
1474 dispatch_get_main_queue(), ^{
1475 ktrace_end(ktsess, 0);
1476 });
1477
1478 error = pthread_join(thread, NULL);
1479 T_ASSERT_POSIX_ZERO(error, "join to thread");
1480
1481 dispatch_main();
1482 }
1483
1484 T_DECL(continuous_time, "make sure continuous time status can be queried",
1485 T_META_RUN_CONCURRENTLY(true))
1486 {
1487 bool cont_time = kdebug_using_continuous_time();
1488 T_ASSERT_FALSE(cont_time, "should not be using continuous time yet");
1489 }
1490
1491 T_DECL(lookup_long_paths, "lookup long path names")
1492 {
1493 start_controlling_ktrace();
1494
1495 int ret = chdir("/tmp");
1496 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "chdir to /tmp");
1497 const char *dir = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/";
1498 int i = 0;
1499 do {
1500 ret = mkdir(dir, S_IRUSR | S_IWUSR | S_IXUSR);
1501 if (ret >= 0 || errno != EEXIST) {
1502 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mkdir of %d nested directory",
1503 i);
1504 }
1505 ret = chdir(dir);
1506 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "chdir to %d nested directory", i);
1507 } while (i++ < 40);
1508
1509 ktrace_session_t s = ktrace_session_create();
1510 ktrace_set_collection_interval(s, 250);
1511 ktrace_filter_pid(s, getpid());
1512 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1513 ktrace_events_single(s, VFS_LOOKUP, ^(struct trace_point *tp __unused){});
1514 ktrace_set_vnode_paths_enabled(s, KTRACE_FEATURE_ENABLED);
1515
1516 dispatch_queue_t q = dispatch_queue_create("com.apple.kdebug-test", 0);
1517
1518 ktrace_set_completion_handler(s, ^{
1519 dispatch_release(q);
1520 T_END;
1521 });
1522
1523 int error = ktrace_start(s, q);
1524 T_ASSERT_POSIX_ZERO(error, "started tracing");
1525
1526 int fd = open("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", O_RDWR | O_CREAT);
1527 T_ASSERT_POSIX_SUCCESS(fd, "opened file at %d directories deep", i);
1528
1529 sleep(5);
1530
1531 T_LOG("ending tracing");
1532 ktrace_end(s, 0);
1533 dispatch_main();
1534 }
1535
1536 #pragma mark - boot tracing
1537
1538 static const char *expected_subsystems[] = {
1539 "tunables", "locks_early", "kprintf", "pmap_steal", "vm_kernel",
1540 "kmem", "zalloc",
1541 /* "percpu", only has a startup phase on Intel */
1542 "locks", "codesigning", "oslog", "early_boot",
1543 };
1544 #define EXPECTED_SUBSYSTEMS_LEN \
1545 (sizeof(expected_subsystems) / sizeof(expected_subsystems[0]))
1546
1547 T_DECL(early_boot_tracing, "ensure early boot strings are present",
1548 T_META_BOOTARGS_SET("trace=100000"))
1549 {
1550 T_ATEND(reset_ktrace);
1551
1552 T_SETUPBEGIN;
1553 ktrace_session_t s = ktrace_session_create();
1554 ktrace_set_collection_interval(s, 250);
1555 int error = ktrace_set_use_existing(s);
1556 T_ASSERT_POSIX_ZERO(error, "use existing trace buffer");
1557
1558 #if defined(__x86_64__)
1559 #define FIRST_EVENT_STRING "i386_init"
1560 #else /* defined(__x86_64__) */
1561 #define FIRST_EVENT_STRING "kernel_startup_bootstrap"
1562 #endif /* !defined(__x86_64__) */
1563
1564 __block bool seen_event = false;
1565 __block unsigned int cur_subsystem = 0;
1566 ktrace_events_single(s, TRACE_INFO_STRING, ^(struct trace_point *tp) {
1567 char early_str[33] = "";
1568 size_t argsize = ktrace_is_kernel_64_bit(s) ? 8 : 4;
1569 memcpy(early_str, &tp->arg1, argsize);
1570 memcpy(early_str + argsize, &tp->arg2, argsize);
1571 memcpy(early_str + argsize * 2, &tp->arg3, argsize);
1572 memcpy(early_str + argsize * 3, &tp->arg4, argsize);
1573
1574 if (!seen_event) {
1575 T_LOG("found first string event with args: "
1576 "0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64,
1577 tp->arg1, tp->arg2, tp->arg3, tp->arg4);
1578 char expect_str[33] = FIRST_EVENT_STRING;
1579 if (!ktrace_is_kernel_64_bit(s)) {
1580 // Only the first 16 bytes of the string will be traced.
1581 expect_str[16] = '\0';
1582 }
1583
1584 T_EXPECT_EQ_STR(early_str, expect_str,
1585 "first event in boot trace should be the bootstrap message");
1586 }
1587 seen_event = true;
1588
1589 if (strcmp(early_str, expected_subsystems[cur_subsystem]) == 0) {
1590 T_LOG("found log for subsystem %s",
1591 expected_subsystems[cur_subsystem]);
1592 cur_subsystem++;
1593 }
1594
1595 if (cur_subsystem == EXPECTED_SUBSYSTEMS_LEN) {
1596 T_LOG("ending after seeing all expected logs");
1597 ktrace_end(s, 1);
1598 }
1599 });
1600
1601 ktrace_set_completion_handler(s, ^{
1602 T_EXPECT_TRUE(seen_event, "should see an early boot string event");
1603 T_EXPECT_TRUE(cur_subsystem == EXPECTED_SUBSYSTEMS_LEN,
1604 "should see logs from all subsystems");
1605 if (cur_subsystem != EXPECTED_SUBSYSTEMS_LEN) {
1606 T_LOG("missing log for %s", expected_subsystems[cur_subsystem]);
1607 }
1608 T_END;
1609 });
1610
1611 error = ktrace_start(s, dispatch_get_main_queue());
1612 T_ASSERT_POSIX_ZERO(error, "started tracing");
1613
1614 T_SETUPEND;
1615
1616 dispatch_main();
1617 }
1618
1619 T_DECL(typefilter_boot_arg, "ensure typefilter is set up correctly at boot",
1620 T_META_BOOTARGS_SET("trace=100000 trace_typefilter=S0x0c00,C0xfe"))
1621 {
1622 T_ATEND(reset_ktrace);
1623
1624 T_SETUPBEGIN;
1625 ktrace_config_t config = ktrace_config_create_current();
1626 T_QUIET; T_WITH_ERRNO;
1627 T_ASSERT_NOTNULL(config, "create config from current system");
1628 T_SETUPEND;
1629
1630 T_LOG("ktrace configuration:");
1631 ktrace_config_print_description(config, stdout);
1632
1633 uint8_t *typefilt = ktrace_config_kdebug_get_typefilter(config);
1634 T_ASSERT_NOTNULL(typefilt, "typefilter is active");
1635 T_EXPECT_TRUE(typefilt[0x0c00 / 8],
1636 "specified subclass is set in typefilter");
1637 T_MAYFAIL; // rdar://63625062 (UTD converts commas in boot-args to spaces)
1638 T_EXPECT_TRUE(typefilt[0xfeed / 8],
1639 "specified class is set in typefilter");
1640
1641 ktrace_config_destroy(config);
1642 }
1643
1644 #pragma mark - events present
1645
1646 static int recvd_sigchild = 0;
1647 static void
sighandler(int sig)1648 sighandler(int sig)
1649 {
1650 if (sig != SIGCHLD) {
1651 T_ASSERT_FAIL("unexpected signal: %d", sig);
1652 }
1653 recvd_sigchild = 1;
1654 }
1655
1656 T_DECL(instrs_and_cycles_on_proc_exit,
1657 "instructions and cycles should be traced on thread exit",
1658 T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1))
1659 {
1660 T_SETUPBEGIN;
1661 start_controlling_ktrace();
1662 int error;
1663 struct rusage_info_v4 *rusage = calloc(1, sizeof(*rusage));
1664 char *args[] = { "ls", "-l", NULL, };
1665 int status;
1666 dispatch_queue_t q = dispatch_queue_create("com.apple.kdebug-test",
1667 DISPATCH_QUEUE_SERIAL);
1668 T_QUIET; T_ASSERT_POSIX_SUCCESS(signal(SIGCHLD, sighandler),
1669 "register signal handler");
1670
1671 ktrace_session_t s = ktrace_session_create();
1672 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1673
1674 __block pid_t pid;
1675 __block bool seen_event = false;
1676 __block uint64_t proc_instrs = 0;
1677 __block uint64_t proc_cycles = 0;
1678 __block uint64_t proc_sys_time = 0;
1679 __block uint64_t proc_usr_time = 0;
1680 error = ktrace_events_single(s, DBG_MT_INSTRS_CYCLES_PROC_EXIT,
1681 ^(ktrace_event_t tp){
1682 if (tp->pid == pid) {
1683 seen_event = true;
1684 proc_instrs = tp->arg1;
1685 proc_cycles = tp->arg2;
1686 proc_sys_time = tp->arg3;
1687 proc_usr_time = tp->arg4;
1688 ktrace_end(s, 1);
1689 }
1690 });
1691 T_QUIET; T_WITH_ERRNO; T_ASSERT_POSIX_ZERO(error, "trace single event");
1692 ktrace_set_completion_handler(s, ^{
1693 // TODO Check for equality once rdar://61948669 is fixed.
1694 T_ASSERT_GE(proc_instrs, rusage->ri_instructions,
1695 "trace event instrs are >= to rusage instrs");
1696 T_ASSERT_GE(proc_cycles, rusage->ri_cycles,
1697 "trace event cycles are >= to rusage cycles");
1698 T_ASSERT_GE(proc_sys_time, rusage->ri_system_time,
1699 "trace event sys time is >= rusage sys time");
1700 T_ASSERT_GE(proc_usr_time, rusage->ri_user_time,
1701 "trace event usr time >= rusage usr time");
1702 T_EXPECT_TRUE(seen_event, "should see the proc exit trace event");
1703
1704 free(rusage);
1705 ktrace_session_destroy(s);
1706 dispatch_release(q);
1707 T_END;
1708 });
1709 error = ktrace_start(s, q);
1710 T_ASSERT_POSIX_ZERO(error, "start tracing");
1711 T_SETUPEND;
1712
1713 extern char **environ;
1714 status = posix_spawnp(&pid, args[0], NULL, NULL, args, environ);
1715 T_QUIET; T_ASSERT_POSIX_SUCCESS(status, "spawn process");
1716 if (status == 0) {
1717 while (!recvd_sigchild) {
1718 pause();
1719 }
1720 error = proc_pid_rusage(pid, RUSAGE_INFO_V4, (rusage_info_t)rusage);
1721 T_QUIET; T_ASSERT_POSIX_ZERO(error, "rusage");
1722 error = waitpid(pid, &status, 0);
1723 T_QUIET; T_ASSERT_POSIX_SUCCESS(error, "waitpid");
1724 }
1725 dispatch_main();
1726 }
1727
1728 #define NO_OF_THREADS 2
1729
1730 struct thread_counters_info {
1731 struct thsc_cpi counts;
1732 uint64_t cpu_time;
1733 uint64_t thread_id;
1734 };
1735 typedef struct thread_counters_info *tc_info_t;
1736
1737 static void*
get_thread_counters(void * ptr)1738 get_thread_counters(void* ptr)
1739 {
1740 extern uint64_t __thread_selfusage(void);
1741 extern uint64_t __thread_selfid(void);
1742 tc_info_t tc_info = (tc_info_t) ptr;
1743 tc_info->thread_id = __thread_selfid();
1744 // Just to increase the instr, cycle count
1745 T_LOG("printing %llu\n", tc_info->thread_id);
1746 tc_info->cpu_time = __thread_selfusage();
1747 (void)thread_selfcounts_cpi(&tc_info->counts);
1748 return NULL;
1749 }
1750
1751 T_DECL(instrs_and_cycles_on_thread_exit,
1752 "instructions and cycles should be traced on thread exit",
1753 T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1))
1754 {
1755 T_SETUPBEGIN;
1756 start_controlling_ktrace();
1757
1758 int error;
1759 pthread_t *threads = calloc((unsigned int)(NO_OF_THREADS),
1760 sizeof(pthread_t));
1761 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(threads, "calloc(%d threads)",
1762 NO_OF_THREADS);
1763 tc_info_t tc_infos = calloc((unsigned int) (NO_OF_THREADS),
1764 sizeof(struct thread_counters_info));
1765 T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(tc_infos,
1766 "calloc(%d thread counters)", NO_OF_THREADS);
1767
1768 ktrace_session_t s = ktrace_session_create();
1769 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1770 ktrace_filter_pid(s, getpid());
1771
1772 __block int nevents = 0;
1773 error = ktrace_events_single(s, DBG_MT_INSTRS_CYCLES_THR_EXIT,
1774 ^(ktrace_event_t tp) {
1775 for (int i = 0; i < NO_OF_THREADS; i++) {
1776 if (tp->threadid == tc_infos[i].thread_id) {
1777 nevents++;
1778 uint64_t cpu_time = tp->arg3 + tp->arg4;
1779 /*
1780 * as we are getting counts before thread exit,
1781 * the counts at thread exit should be greater than
1782 * thread_selfcounts
1783 */
1784 T_ASSERT_GE(tp->arg1, tc_infos[i].counts.tcpi_instructions,
1785 "trace event instrs are >= to thread's instrs");
1786 T_ASSERT_GE(tp->arg2, tc_infos[i].counts.tcpi_cycles,
1787 "trace event cycles are >= to thread's cycles");
1788 T_ASSERT_GE(cpu_time, tc_infos[i].cpu_time,
1789 "trace event cpu time is >= thread's cpu time");
1790 }
1791 if (nevents == NO_OF_THREADS) {
1792 ktrace_end(s, 1);
1793 }
1794 }
1795 });
1796 T_QUIET; T_ASSERT_POSIX_ZERO(error, "trace single event");
1797 ktrace_set_completion_handler(s, ^{
1798 T_EXPECT_EQ(NO_OF_THREADS, nevents, "seen %d thread exit trace events",
1799 NO_OF_THREADS);
1800 free(tc_infos);
1801 ktrace_session_destroy(s);
1802 T_END;
1803 });
1804 error = ktrace_start(s, dispatch_get_main_queue());
1805 T_ASSERT_POSIX_ZERO(error, "start tracing");
1806
1807 for (int i = 0; i < NO_OF_THREADS; i++) {
1808 error = pthread_create(&threads[i], NULL, get_thread_counters,
1809 (void *)&tc_infos[i]);
1810 T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_create thread %d", i);
1811 }
1812 T_SETUPEND;
1813 for (int i = 0; i < NO_OF_THREADS; i++) {
1814 error = pthread_join(threads[i], NULL);
1815 T_QUIET; T_EXPECT_POSIX_ZERO(error, "pthread_join thread %d", i);
1816 }
1817
1818 dispatch_main();
1819 }
1820