xref: /xnu-8796.101.5/tests/ktrace/kdebug_tests.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 // Copyright (c) 2020-2022 Apple Inc.  All rights reserved.
2 
3 #include <darwintest.h>
4 #include <darwintest_utils.h>
5 #include <dispatch/dispatch.h>
6 #include <inttypes.h>
7 #include <ktrace/session.h>
8 #include <ktrace/private.h>
9 #include <kperf/kperf.h>
10 #include <mach/clock_types.h>
11 #include <mach/dyld_kernel.h>
12 #include <mach/host_info.h>
13 #include <mach/mach.h>
14 #include <mach/mach_init.h>
15 #include <mach/task.h>
16 #include <os/assumes.h>
17 #include <stdlib.h>
18 #include <sys/kdebug.h>
19 #include <sys/kdebug_signpost.h>
20 #include <sys/resource_private.h>
21 #include <sys/sysctl.h>
22 #include <stdint.h>
23 #include <TargetConditionals.h>
24 
25 #include "ktrace_helpers.h"
26 #include "test_utils.h"
27 #include "ktrace_meta.h"
28 
29 #define KDBG_TEST_MACROS         1
30 #define KDBG_TEST_OLD_TIMES      2
31 #define KDBG_TEST_FUTURE_TIMES   3
32 #define KDBG_TEST_IOP_SYNC_FLUSH 4
33 
34 #pragma mark kdebug syscalls
35 
36 #define TRACE_DEBUGID (0xfedfed00U)
37 
38 T_DECL(kdebug_trace_syscall, "test that kdebug_trace(2) emits correct events")
39 {
40 	start_controlling_ktrace();
41 
42 	ktrace_session_t s = ktrace_session_create();
43 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
44 	ktrace_set_collection_interval(s, 200);
45 
46 	__block int events_seen = 0;
47 	ktrace_events_single(s, TRACE_DEBUGID, ^void (struct trace_point *tp) {
48 		events_seen++;
49 		T_PASS("saw traced event");
50 
51 		if (ktrace_is_kernel_64_bit(s)) {
52 			T_EXPECT_EQ(tp->arg1, UINT64_C(0xfeedfacefeedface),
53 					"argument 1 of traced event is correct");
54 		} else {
55 			T_EXPECT_EQ(tp->arg1, UINT64_C(0xfeedface),
56 					"argument 1 of traced event is correct");
57 		}
58 		T_EXPECT_EQ(tp->arg2, 2ULL, "argument 2 of traced event is correct");
59 		T_EXPECT_EQ(tp->arg3, 3ULL, "argument 3 of traced event is correct");
60 		T_EXPECT_EQ(tp->arg4, 4ULL, "argument 4 of traced event is correct");
61 
62 		ktrace_end(s, 1);
63 	});
64 
65 	ktrace_set_completion_handler(s, ^{
66 		T_EXPECT_GE(events_seen, 1, NULL);
67 		ktrace_session_destroy(s);
68 		T_END;
69 	});
70 
71 	ktrace_filter_pid(s, getpid());
72 
73 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
74 	T_ASSERT_POSIX_SUCCESS(kdebug_trace(TRACE_DEBUGID, 0xfeedfacefeedface, 2,
75 			3, 4), NULL);
76 	ktrace_end(s, 0);
77 
78 	dispatch_main();
79 }
80 
81 #if __LP64__
82 #define IS_64BIT true
83 #else // __LP64__
84 #define IS_64BIT false
85 #endif // !__LP64__
86 
87 #define STRING_SIZE (1024)
88 
89 T_DECL(kdebug_trace_string_syscall,
90 		"test that kdebug_trace_string(2) emits correct events",
91 		T_META_ENABLED(IS_64BIT))
92 {
93 	start_controlling_ktrace();
94 
95 	ktrace_session_t s = ktrace_session_create();
96 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
97 	ktrace_set_collection_interval(s, 200);
98 	ktrace_filter_pid(s, getpid());
99 
100 	char *traced_string = calloc(1, STRING_SIZE);
101 	T_QUIET; T_WITH_ERRNO;
102 	T_ASSERT_NOTNULL(traced_string, "allocated memory for string");
103 	for (size_t i = 0; i < sizeof(traced_string); i++) {
104 		traced_string[i] = 'a' + (i % 26);
105 	}
106 	traced_string[sizeof(traced_string) - 1] = '\0';
107 	size_t traced_len = strlen(traced_string);
108 	T_QUIET; T_ASSERT_EQ(traced_len, sizeof(traced_string) - 1,
109 			"traced string should be filled");
110 
111 	ktrace_events_single(s, TRACE_DEBUGID,
112 			^void (struct trace_point * __unused tp) {
113 		// Do nothing -- just ensure the event is filtered in.
114 	});
115 
116 	__block unsigned int string_cpu = 0;
117 	__block bool tracing_string = false;
118 	char *observed_string = calloc(1, PATH_MAX);
119 	T_QUIET; T_WITH_ERRNO;
120 	T_ASSERT_NOTNULL(observed_string, "allocated memory for observed string");
121 	__block size_t string_offset = 0;
122 	ktrace_events_single(s, TRACE_STRING_GLOBAL, ^(struct trace_point *tp){
123 		if (tp->debugid & DBG_FUNC_START && tp->arg1 == TRACE_DEBUGID) {
124 			tracing_string = true;
125 			string_cpu = tp->cpuid;
126 			memcpy(observed_string + string_offset, &tp->arg3,
127 					sizeof(uint64_t) * 2);
128 			string_offset += sizeof(uint64_t) * 2;
129 		} else if (tracing_string && string_cpu == tp->cpuid) {
130 			memcpy(observed_string + string_offset, &tp->arg1,
131 					sizeof(uint64_t) * 4);
132 			string_offset += sizeof(uint64_t) * 4;
133 			if (tp->debugid & DBG_FUNC_END) {
134 				ktrace_end(s, 1);
135 			}
136 		}
137 	});
138 
139 	ktrace_set_completion_handler(s, ^{
140 		T_EXPECT_TRUE(tracing_string, "found string in trace");
141 		size_t observed_len = strlen(observed_string);
142 		T_EXPECT_EQ(traced_len, observed_len, "string lengths should be equal");
143 		if (traced_len == observed_len) {
144 			T_EXPECT_EQ_STR(traced_string, observed_string,
145 					"observed correct string");
146 		}
147 		ktrace_session_destroy(s);
148 		T_END;
149 	});
150 
151 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
152 	uint64_t str_id = kdebug_trace_string(TRACE_DEBUGID, 0, traced_string);
153 	T_WITH_ERRNO; T_ASSERT_NE(str_id, (uint64_t)0, "kdebug_trace_string(2)");
154 	ktrace_end(s, 0);
155 
156 	dispatch_main();
157 }
158 
159 #define SIGNPOST_SINGLE_CODE (0x10U)
160 #define SIGNPOST_PAIRED_CODE (0x20U)
161 
162 T_DECL(kdebug_signpost_syscall,
163 		"test that kdebug_signpost(2) emits correct events")
164 {
165 	start_controlling_ktrace();
166 
167 	ktrace_session_t s = ktrace_session_create();
168 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
169 
170 	__block int single_seen = 0;
171 	__block int paired_seen = 0;
172 
173 	/* make sure to get enough events for the KDBUFWAIT to trigger */
174 	// ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
175 	ktrace_events_single(s,
176 	    APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_SINGLE_CODE),
177 	    ^(struct trace_point *tp) {
178 		single_seen++;
179 		T_PASS("single signpost is traced");
180 
181 		T_EXPECT_EQ(tp->arg1, 1ULL, "argument 1 of single signpost is correct");
182 		T_EXPECT_EQ(tp->arg2, 2ULL, "argument 2 of single signpost is correct");
183 		T_EXPECT_EQ(tp->arg3, 3ULL, "argument 3 of single signpost is correct");
184 		T_EXPECT_EQ(tp->arg4, 4ULL, "argument 4 of single signpost is correct");
185 	});
186 
187 	ktrace_events_single_paired(s,
188 	    APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_PAIRED_CODE),
189 	    ^(struct trace_point *start, struct trace_point *end) {
190 		paired_seen++;
191 		T_PASS("paired signposts are traced");
192 
193 		T_EXPECT_EQ(start->arg1, 5ULL, "argument 1 of start signpost is correct");
194 		T_EXPECT_EQ(start->arg2, 6ULL, "argument 2 of start signpost is correct");
195 		T_EXPECT_EQ(start->arg3, 7ULL, "argument 3 of start signpost is correct");
196 		T_EXPECT_EQ(start->arg4, 8ULL, "argument 4 of start signpost is correct");
197 
198 		T_EXPECT_EQ(end->arg1, 9ULL, "argument 1 of end signpost is correct");
199 		T_EXPECT_EQ(end->arg2, 10ULL, "argument 2 of end signpost is correct");
200 		T_EXPECT_EQ(end->arg3, 11ULL, "argument 3 of end signpost is correct");
201 		T_EXPECT_EQ(end->arg4, 12ULL, "argument 4 of end signpost is correct");
202 
203 		T_EXPECT_EQ(single_seen, 1, "signposts are traced in the correct order");
204 
205 		ktrace_end(s, 1);
206 	});
207 
208 	ktrace_set_completion_handler(s, ^(void) {
209 		T_QUIET; T_EXPECT_NE(single_seen, 0,
210 		"did not see single tracepoint before timeout");
211 		T_QUIET; T_EXPECT_NE(paired_seen, 0,
212 		"did not see single tracepoint before timeout");
213 		ktrace_session_destroy(s);
214 		T_END;
215 	});
216 
217 	ktrace_filter_pid(s, getpid());
218 
219 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
220 	    "started tracing");
221 
222 #pragma clang diagnostic push
223 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
224 	T_EXPECT_POSIX_SUCCESS(kdebug_signpost(SIGNPOST_SINGLE_CODE, 1, 2, 3, 4),
225 	    "emitted single signpost");
226 	T_EXPECT_POSIX_SUCCESS(
227 		kdebug_signpost_start(SIGNPOST_PAIRED_CODE, 5, 6, 7, 8),
228 		"emitted start signpost");
229 	T_EXPECT_POSIX_SUCCESS(
230 		kdebug_signpost_end(SIGNPOST_PAIRED_CODE, 9, 10, 11, 12),
231 		"emitted end signpost");
232 #pragma clang diagnostic pop
233 	ktrace_end(s, 0);
234 
235 	dispatch_main();
236 }
237 
238 T_DECL(syscall_tracing,
239 		"ensure that syscall arguments are traced propertly")
240 {
241 	ktrace_session_t s = ktrace_session_create();
242 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
243 
244 	__block bool seen = 0;
245 
246 	ktrace_filter_pid(s, getpid());
247 
248 	static const int telemetry_syscall_no = 451;
249 	static const uint64_t arg1 = 0xfeedfacefeedface;
250 
251 	ktrace_events_single(s, BSDDBG_CODE(DBG_BSD_EXCP_SC, telemetry_syscall_no),
252 			^(struct trace_point *evt){
253 		if (KDBG_EXTRACT_CODE(evt->debugid) != telemetry_syscall_no || seen) {
254 			return;
255 		}
256 
257 		seen = true;
258 		if (ktrace_is_kernel_64_bit(s)) {
259 			T_EXPECT_EQ(evt->arg1, arg1,
260 					"argument 1 of syscall event is correct");
261 		} else {
262 			T_EXPECT_EQ(evt->arg1, (uint64_t)(uint32_t)(arg1),
263 					"argument 1 of syscall event is correct");
264 		}
265 
266 		ktrace_end(s, 1);
267 	});
268 
269 	ktrace_set_completion_handler(s, ^{
270 		T_ASSERT_TRUE(seen,
271 				"should have seen a syscall event for kevent_id(2)");
272 		ktrace_session_destroy(s);
273 		T_END;
274 	});
275 
276 	int error = ktrace_start(s, dispatch_get_main_queue());
277 	T_ASSERT_POSIX_ZERO(error, "started tracing");
278 
279 	/*
280 	 * telemetry(2) has a 64-bit argument that will definitely be traced, and
281 	 * is unlikely to be used elsewhere by this process.
282 	 */
283 	extern int __telemetry(uint64_t cmd, uint64_t deadline, uint64_t interval,
284 			uint64_t leeway, uint64_t arg4, uint64_t arg5);
285 	(void)__telemetry(arg1, 0, 0, 0, 0, 0);
286 
287 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC),
288 			dispatch_get_main_queue(), ^{
289 		T_LOG("ending test due to timeout");
290 		ktrace_end(s, 0);
291 	});
292 
293 	dispatch_main();
294 }
295 
296 #pragma mark kdebug behaviors
297 
298 #define WRAPPING_EVENTS_COUNT     (150000)
299 #define TRACE_ITERATIONS          (5000)
300 #define WRAPPING_EVENTS_THRESHOLD (100)
301 
302 T_DECL(wrapping,
303     "ensure that wrapping traces lost events and no events prior to the wrap",
304     T_META_CHECK_LEAKS(false))
305 {
306 	kbufinfo_t buf_info;
307 	int wait_wrapping_secs = (WRAPPING_EVENTS_COUNT / TRACE_ITERATIONS) + 5;
308 	int current_secs = wait_wrapping_secs;
309 
310 	start_controlling_ktrace();
311 
312 	/* use sysctls manually to bypass libktrace assumptions */
313 
314 	int mib[4] = { CTL_KERN, KERN_KDEBUG };
315 	mib[2] = KERN_KDSETBUF; mib[3] = WRAPPING_EVENTS_COUNT;
316 	T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDSETBUF");
317 
318 	mib[2] = KERN_KDSETUP; mib[3] = 0;
319 	size_t needed = 0;
320 	T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, NULL, &needed, NULL, 0),
321 	    "KERN_KDSETUP");
322 
323 	mib[2] = KERN_KDENABLE; mib[3] = 1;
324 	T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDENABLE");
325 
326 	/* wrapping is on by default */
327 
328 	/* wait until wrapped */
329 	T_LOG("waiting for trace to wrap");
330 	mib[2] = KERN_KDGETBUF;
331 	needed = sizeof(buf_info);
332 	do {
333 		sleep(1);
334 		for (int i = 0; i < TRACE_ITERATIONS; i++) {
335 			T_QUIET;
336 			T_ASSERT_POSIX_SUCCESS(kdebug_trace(0xfefe0000, 0, 0, 0, 0), NULL);
337 		}
338 		T_QUIET;
339 		T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, &buf_info, &needed, NULL, 0),
340 		    NULL);
341 	} while (!(buf_info.flags & KDBG_WRAPPED) && --current_secs > 0);
342 
343 	T_ASSERT_TRUE(buf_info.flags & KDBG_WRAPPED,
344 	    "trace wrapped (after %d seconds within %d second timeout)",
345 	    wait_wrapping_secs - current_secs, wait_wrapping_secs);
346 
347 	ktrace_session_t s = ktrace_session_create();
348 	T_QUIET; T_ASSERT_NOTNULL(s, NULL);
349 	T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(s), NULL);
350 
351 	__block int events = 0;
352 
353 	ktrace_events_all(s, ^(struct trace_point *tp) {
354 		if (events == 0) {
355 		        T_EXPECT_EQ(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
356 		        "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS",
357 		        tp->debugid,
358 		        ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
359 		} else {
360 		        T_QUIET;
361 		        T_EXPECT_NE(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
362 		        "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS",
363 		        tp->debugid,
364 		        ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
365 		}
366 
367 		events++;
368 		if (events > WRAPPING_EVENTS_THRESHOLD) {
369 		        ktrace_end(s, 1);
370 		}
371 	});
372 
373 	ktrace_set_completion_handler(s, ^{
374 		ktrace_session_destroy(s);
375 		T_END;
376 	});
377 
378 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
379 	    "started tracing");
380 
381 	dispatch_main();
382 }
383 
384 static void
_assert_tracing_state(bool enable,const char * msg)385 _assert_tracing_state(bool enable, const char *msg)
386 {
387 	kbufinfo_t bufinfo = { 0 };
388 	T_QUIET;
389 	T_ASSERT_POSIX_SUCCESS(sysctl(
390 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDGETBUF }, 3,
391 		    &bufinfo, &(size_t){ sizeof(bufinfo) }, NULL, 0),
392 	    "get kdebug buffer info");
393 	T_QUIET;
394 	T_ASSERT_NE(bufinfo.nkdbufs, 0, "tracing should be configured");
395 	T_ASSERT_NE(bufinfo.nolog, enable, "%s: tracing should%s be enabled",
396 			msg, enable ? "" : "n't");
397 }
398 
399 #define DRAIN_TIMEOUT_NS (1 * NSEC_PER_SEC)
400 
401 static void
_drain_until_event(uint32_t debugid)402 _drain_until_event(uint32_t debugid)
403 {
404 	static kd_buf events[256] = { 0 };
405 	size_t events_size = sizeof(events);
406 	uint64_t start_time_ns = clock_gettime_nsec_np(CLOCK_MONOTONIC);
407 	unsigned int reads = 0;
408 	while (true) {
409 		T_QUIET;
410 		T_ASSERT_POSIX_SUCCESS(sysctl(
411 				(int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDREADTR, }, 3,
412 				events, &events_size, NULL, 0), "reading trace data");
413 		reads += 1;
414 		size_t events_count = events_size;
415 		for (size_t i = 0; i < events_count; i++) {
416 			if (events[i].debugid == debugid) {
417 				T_LOG("draining found event 0x%x", debugid);
418 				return;
419 			}
420 		}
421 		uint64_t cur_time_ns = clock_gettime_nsec_np(CLOCK_MONOTONIC);
422 		if (cur_time_ns - start_time_ns > DRAIN_TIMEOUT_NS) {
423 			T_ASSERT_FAIL("timed out after %f seconds waiting for 0x%x,"
424 					" after %u reads",
425 					(double)(cur_time_ns - start_time_ns) / 1e9, debugid,
426 					reads);
427 		}
428 	}
429 }
430 
431 T_DECL(disabling_event_match,
432     "ensure that ktrace is disabled when an event disable matcher fires")
433 {
434 	start_controlling_ktrace();
435 
436 	T_SETUPBEGIN;
437 	ktrace_session_t s = ktrace_session_create();
438 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
439 	ktrace_events_single(s, TRACE_DEBUGID,
440 			^(struct trace_point *tp __unused) {});
441 	int error = ktrace_configure(s);
442 	T_QUIET; T_ASSERT_POSIX_ZERO(error, "configured session");
443 	kd_event_matcher matchers[2] = { {
444 		.kem_debugid = TRACE_DEBUGID,
445 		.kem_args[0] = 0xff,
446 	}, {
447 		.kem_debugid = UINT32_MAX,
448 		.kem_args[0] = 0xfff,
449 	} };
450 	size_t matchers_size = sizeof(matchers);
451 	T_ASSERT_POSIX_SUCCESS(sysctl(
452 		(int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSET_EDM, }, 3,
453 		&matchers, &matchers_size, NULL, 0), "set event disable matcher");
454 	size_t size = 0;
455 	T_ASSERT_POSIX_SUCCESS(sysctl(
456 		(int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDEFLAGS, KDBG_MATCH_DISABLE, }, 4,
457 		NULL, &size, NULL, 0), "enabled event disable matching");
458 	T_ASSERT_POSIX_SUCCESS(sysctl(
459 		(int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDENABLE, 1, }, 4,
460 		NULL, NULL, NULL, 0), "enabled tracing");
461 	_assert_tracing_state(true, "after enabling trace");
462 	T_SETUPEND;
463 
464 	kdebug_trace(TRACE_DEBUGID + 8, 0xff, 0, 0, 0);
465 	_drain_until_event(TRACE_DEBUGID + 8);
466 	_assert_tracing_state(true, "with wrong debugid");
467 	kdebug_trace(TRACE_DEBUGID, 0, 0, 0, 0);
468 	_drain_until_event(TRACE_DEBUGID);
469 	_assert_tracing_state(true, "with wrong argument");
470 	kdebug_trace(TRACE_DEBUGID, 0xff, 0, 0, 0);
471 	_drain_until_event(TRACE_DEBUGID);
472 	_assert_tracing_state(false, "after disabling event");
473 }
474 
475 T_DECL(reject_old_events,
476     "ensure that kdebug rejects events from before tracing began",
477     T_META_CHECK_LEAKS(false))
478 {
479 	__block uint64_t event_horizon_ts;
480 
481 	start_controlling_ktrace();
482 
483 	ktrace_session_t s = ktrace_session_create();
484 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
485 	ktrace_set_collection_interval(s, 100);
486 
487 	__block int events = 0;
488 	ktrace_events_single(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 1),
489 	    ^(struct trace_point *tp) {
490 		events++;
491 		T_EXPECT_GT(tp->timestamp, event_horizon_ts,
492 		"events in trace should be from after tracing began");
493 	});
494 
495 	ktrace_set_completion_handler(s, ^{
496 		T_EXPECT_EQ(events, 2, "should see only two events");
497 		ktrace_session_destroy(s);
498 		T_END;
499 	});
500 
501 	event_horizon_ts = mach_absolute_time();
502 
503 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
504 	/* first, try an old event at the beginning of trace */
505 	assert_kdebug_test(KDBG_TEST_OLD_TIMES, "induce old event at beginning");
506 	/* after a good event has been traced, old events should be rejected */
507 	assert_kdebug_test(KDBG_TEST_OLD_TIMES, "induce old event to be rejected");
508 	ktrace_end(s, 0);
509 
510 	dispatch_main();
511 }
512 
513 #define ORDERING_TIMEOUT_SEC 5
514 
515 T_DECL(ascending_time_order,
516     "ensure that kdebug events are in ascending order based on time",
517     T_META_CHECK_LEAKS(false), XNU_T_META_SOC_SPECIFIC)
518 {
519 	__block uint64_t prev_ts = 0;
520 	__block uint32_t prev_debugid = 0;
521 	__block unsigned int prev_cpu = 0;
522 	__block bool in_order = true;
523 
524 	start_controlling_ktrace();
525 
526 	ktrace_session_t s = ktrace_session_create();
527 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
528 
529 	ktrace_events_all(s, ^(struct trace_point *tp) {
530 		if (tp->timestamp < prev_ts) {
531 		        in_order = false;
532 		        T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)",
533 		        prev_ts, prev_debugid, prev_cpu);
534 		        T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)",
535 		        tp->timestamp, tp->debugid, tp->cpuid);
536 		        ktrace_end(s, 1);
537 		}
538 	});
539 
540 	ktrace_set_completion_handler(s, ^{
541 		ktrace_session_destroy(s);
542 		T_EXPECT_TRUE(in_order, "event timestamps were in-order");
543 		T_END;
544 	});
545 
546 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
547 	    "started tracing");
548 
549 	/* try to inject old timestamps into trace */
550 	assert_kdebug_test(KDBG_TEST_OLD_TIMES, "inject old time");
551 
552 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ORDERING_TIMEOUT_SEC * NSEC_PER_SEC),
553 	    dispatch_get_main_queue(), ^{
554 		T_LOG("ending test after timeout");
555 		ktrace_end(s, 1);
556 	});
557 
558 	dispatch_main();
559 }
560 
561 #pragma mark dyld tracing
562 
563 __attribute__((aligned(8)))
564 static const char map_uuid[16] = "map UUID";
565 
566 __attribute__((aligned(8)))
567 static const char unmap_uuid[16] = "unmap UUID";
568 
569 __attribute__((aligned(8)))
570 static const char sc_uuid[16] = "shared UUID";
571 
572 static fsid_t map_fsid = { .val = { 42, 43 } };
573 static fsid_t unmap_fsid = { .val = { 44, 45 } };
574 static fsid_t sc_fsid = { .val = { 46, 47 } };
575 
576 static fsobj_id_t map_fsobjid = { .fid_objno = 42, .fid_generation = 43 };
577 static fsobj_id_t unmap_fsobjid = { .fid_objno = 44, .fid_generation = 45 };
578 static fsobj_id_t sc_fsobjid = { .fid_objno = 46, .fid_generation = 47 };
579 
580 #define MAP_LOAD_ADDR   0xabadcafe
581 #define UNMAP_LOAD_ADDR 0xfeedface
582 #define SC_LOAD_ADDR    0xfedfaced
583 
584 __unused
585 static void
expect_dyld_image_info(struct trace_point * tp,const uint64_t * exp_uuid,uint64_t exp_load_addr,fsid_t * exp_fsid,fsobj_id_t * exp_fsobjid,int order)586 expect_dyld_image_info(struct trace_point *tp, const uint64_t *exp_uuid,
587     uint64_t exp_load_addr, fsid_t *exp_fsid, fsobj_id_t *exp_fsobjid,
588     int order)
589 {
590 #if defined(__LP64__) || defined(__arm64__)
591 	if (order == 0) {
592 		uint64_t uuid[2];
593 		uint64_t load_addr;
594 		fsid_t fsid;
595 
596 		uuid[0] = (uint64_t)tp->arg1;
597 		uuid[1] = (uint64_t)tp->arg2;
598 		load_addr = (uint64_t)tp->arg3;
599 		fsid.val[0] = (int32_t)(tp->arg4 & UINT32_MAX);
600 		fsid.val[1] = (int32_t)((uint64_t)tp->arg4 >> 32);
601 
602 		T_QUIET; T_EXPECT_EQ(uuid[0], exp_uuid[0], NULL);
603 		T_QUIET; T_EXPECT_EQ(uuid[1], exp_uuid[1], NULL);
604 		T_QUIET; T_EXPECT_EQ(load_addr, exp_load_addr, NULL);
605 		T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
606 		T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
607 	} else if (order == 1) {
608 		fsobj_id_t fsobjid;
609 
610 		fsobjid.fid_objno = (uint32_t)(tp->arg1 & UINT32_MAX);
611 		fsobjid.fid_generation = (uint32_t)((uint64_t)tp->arg1 >> 32);
612 
613 		T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
614 		T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
615 		    exp_fsobjid->fid_generation, NULL);
616 	} else {
617 		T_ASSERT_FAIL("unrecognized order of events %d", order);
618 	}
619 #else /* defined(__LP64__) */
620 	if (order == 0) {
621 		uint32_t uuid[4];
622 
623 		uuid[0] = (uint32_t)tp->arg1;
624 		uuid[1] = (uint32_t)tp->arg2;
625 		uuid[2] = (uint32_t)tp->arg3;
626 		uuid[3] = (uint32_t)tp->arg4;
627 
628 		T_QUIET; T_EXPECT_EQ(uuid[0], (uint32_t)exp_uuid[0], NULL);
629 		T_QUIET; T_EXPECT_EQ(uuid[1], (uint32_t)(exp_uuid[0] >> 32), NULL);
630 		T_QUIET; T_EXPECT_EQ(uuid[2], (uint32_t)exp_uuid[1], NULL);
631 		T_QUIET; T_EXPECT_EQ(uuid[3], (uint32_t)(exp_uuid[1] >> 32), NULL);
632 	} else if (order == 1) {
633 		uint32_t load_addr;
634 		fsid_t fsid;
635 		fsobj_id_t fsobjid;
636 
637 		load_addr = (uint32_t)tp->arg1;
638 		fsid.val[0] = (int32_t)tp->arg2;
639 		fsid.val[1] = (int32_t)tp->arg3;
640 		fsobjid.fid_objno = (uint32_t)tp->arg4;
641 
642 		T_QUIET; T_EXPECT_EQ(load_addr, (uint32_t)exp_load_addr, NULL);
643 		T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
644 		T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
645 		T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
646 	} else if (order == 2) {
647 		fsobj_id_t fsobjid;
648 
649 		fsobjid.fid_generation = tp->arg1;
650 
651 		T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
652 		    exp_fsobjid->fid_generation, NULL);
653 	} else {
654 		T_ASSERT_FAIL("unrecognized order of events %d", order);
655 	}
656 #endif /* defined(__LP64__) */
657 }
658 
659 #if defined(__LP64__) || defined(__arm64__)
660 #define DYLD_CODE_OFFSET (0)
661 #define DYLD_EVENTS      (2)
662 #else
663 #define DYLD_CODE_OFFSET (2)
664 #define DYLD_EVENTS      (3)
665 #endif
666 
667 static void
expect_dyld_events(ktrace_session_t s,const char * name,uint32_t base_code,const char * exp_uuid,uint64_t exp_load_addr,fsid_t * exp_fsid,fsobj_id_t * exp_fsobjid,uint8_t * saw_events)668 expect_dyld_events(ktrace_session_t s, const char *name, uint32_t base_code,
669     const char *exp_uuid, uint64_t exp_load_addr, fsid_t *exp_fsid,
670     fsobj_id_t *exp_fsobjid, uint8_t *saw_events)
671 {
672 	for (int i = 0; i < DYLD_EVENTS; i++) {
673 		ktrace_events_single(s, KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID,
674 		    base_code + DYLD_CODE_OFFSET + (unsigned int)i),
675 		    ^(struct trace_point *tp) {
676 			T_LOG("checking %s event %c", name, 'A' + i);
677 			expect_dyld_image_info(tp, (const void *)exp_uuid, exp_load_addr,
678 			exp_fsid, exp_fsobjid, i);
679 			*saw_events |= (1U << i);
680 		});
681 	}
682 }
683 
684 T_DECL(dyld_events, "test that dyld registering libraries emits events")
685 {
686 	dyld_kernel_image_info_t info;
687 
688 	/*
689 	 * Use pointers instead of __block variables in order to use these variables
690 	 * in the completion block below _and_ pass pointers to them to the
691 	 * expect_dyld_events function.
692 	 */
693 	uint8_t saw_events[3] = { 0 };
694 	uint8_t *saw_mapping = &(saw_events[0]);
695 	uint8_t *saw_unmapping = &(saw_events[1]);
696 	uint8_t *saw_shared_cache = &(saw_events[2]);
697 
698 	start_controlling_ktrace();
699 
700 	ktrace_session_t s = ktrace_session_create();
701 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
702 
703 	T_QUIET;
704 	T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
705 	    "filtered to current process");
706 
707 	expect_dyld_events(s, "mapping", DBG_DYLD_UUID_MAP_A, map_uuid,
708 	    MAP_LOAD_ADDR, &map_fsid, &map_fsobjid, saw_mapping);
709 	expect_dyld_events(s, "unmapping", DBG_DYLD_UUID_UNMAP_A, unmap_uuid,
710 	    UNMAP_LOAD_ADDR, &unmap_fsid, &unmap_fsobjid, saw_unmapping);
711 	expect_dyld_events(s, "shared cache", DBG_DYLD_UUID_SHARED_CACHE_A,
712 	    sc_uuid, SC_LOAD_ADDR, &sc_fsid, &sc_fsobjid, saw_shared_cache);
713 
714 	ktrace_set_completion_handler(s, ^{
715 		ktrace_session_destroy(s);
716 
717 		T_EXPECT_EQ(__builtin_popcount(*saw_mapping), DYLD_EVENTS, NULL);
718 		T_EXPECT_EQ(__builtin_popcount(*saw_unmapping), DYLD_EVENTS, NULL);
719 		T_EXPECT_EQ(__builtin_popcount(*saw_shared_cache), DYLD_EVENTS, NULL);
720 		T_END;
721 	});
722 
723 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
724 
725 	info.load_addr = MAP_LOAD_ADDR;
726 	memcpy(info.uuid, map_uuid, sizeof(info.uuid));
727 	info.fsid = map_fsid;
728 	info.fsobjid = map_fsobjid;
729 	T_EXPECT_MACH_SUCCESS(task_register_dyld_image_infos(mach_task_self(),
730 	    &info, 1), "registered dyld image info");
731 
732 	info.load_addr = UNMAP_LOAD_ADDR;
733 	memcpy(info.uuid, unmap_uuid, sizeof(info.uuid));
734 	info.fsid = unmap_fsid;
735 	info.fsobjid = unmap_fsobjid;
736 	T_EXPECT_MACH_SUCCESS(task_unregister_dyld_image_infos(mach_task_self(),
737 	    &info, 1), "unregistered dyld image info");
738 
739 	info.load_addr = SC_LOAD_ADDR;
740 	memcpy(info.uuid, sc_uuid, sizeof(info.uuid));
741 	info.fsid = sc_fsid;
742 	info.fsobjid = sc_fsobjid;
743 	T_EXPECT_MACH_SUCCESS(task_register_dyld_shared_cache_image_info(
744 		    mach_task_self(), info, FALSE, FALSE),
745 	    "registered dyld shared cache image info");
746 
747 	ktrace_end(s, 0);
748 
749 	dispatch_main();
750 }
751 
752 #pragma mark kdebug kernel macros
753 
754 #define EXP_KERNEL_EVENTS 5U
755 
756 static const uint32_t dev_evts[EXP_KERNEL_EVENTS] = {
757 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 0),
758 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 1),
759 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 2),
760 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 3),
761 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 4),
762 };
763 
764 static const uint32_t rel_evts[EXP_KERNEL_EVENTS] = {
765 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 5),
766 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 6),
767 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 7),
768 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 8),
769 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 9),
770 };
771 
772 static const uint32_t filt_evts[EXP_KERNEL_EVENTS] = {
773 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 10),
774 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 11),
775 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 12),
776 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 13),
777 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 14),
778 };
779 
780 static const uint32_t noprocfilt_evts[EXP_KERNEL_EVENTS] = {
781 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 15),
782 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 16),
783 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 17),
784 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 18),
785 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 19),
786 };
787 
788 static void
expect_event(struct trace_point * tp,const char * name,unsigned int * events,const uint32_t * event_ids,size_t event_ids_len)789 expect_event(struct trace_point *tp, const char *name, unsigned int *events,
790     const uint32_t *event_ids, size_t event_ids_len)
791 {
792 	unsigned int event_idx = *events;
793 	bool event_found = false;
794 	size_t i;
795 	for (i = 0; i < event_ids_len; i++) {
796 		if (event_ids[i] == (tp->debugid & KDBG_EVENTID_MASK)) {
797 			T_LOG("found %s event 0x%x", name, tp->debugid);
798 			event_found = true;
799 		}
800 	}
801 
802 	if (!event_found) {
803 		return;
804 	}
805 
806 	*events += 1;
807 	for (i = 0; i < event_idx; i++) {
808 		T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)i + 1,
809 		    NULL);
810 	}
811 	for (; i < 4; i++) {
812 		T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)0, NULL);
813 	}
814 }
815 
816 static void
expect_release_event(struct trace_point * tp,unsigned int * events)817 expect_release_event(struct trace_point *tp, unsigned int *events)
818 {
819 	expect_event(tp, "release", events, rel_evts,
820 	    sizeof(rel_evts) / sizeof(rel_evts[0]));
821 }
822 
823 static void
expect_development_event(struct trace_point * tp,unsigned int * events)824 expect_development_event(struct trace_point *tp, unsigned int *events)
825 {
826 	expect_event(tp, "dev", events, dev_evts, sizeof(dev_evts) / sizeof(dev_evts[0]));
827 }
828 
829 static void
expect_filtered_event(struct trace_point * tp,unsigned int * events)830 expect_filtered_event(struct trace_point *tp, unsigned int *events)
831 {
832 	expect_event(tp, "filtered", events, filt_evts,
833 	    sizeof(filt_evts) / sizeof(filt_evts[0]));
834 }
835 
836 static void
expect_noprocfilt_event(struct trace_point * tp,unsigned int * events)837 expect_noprocfilt_event(struct trace_point *tp, unsigned int *events)
838 {
839 	expect_event(tp, "noprocfilt", events, noprocfilt_evts,
840 	    sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]));
841 }
842 
843 static void
844 expect_kdbg_test_events(ktrace_session_t s, bool use_all_callback,
845     void (^cb)(unsigned int dev_seen, unsigned int rel_seen,
846     unsigned int filt_seen, unsigned int noprocfilt_seen))
847 {
848 	__block unsigned int dev_seen = 0;
849 	__block unsigned int rel_seen = 0;
850 	__block unsigned int filt_seen = 0;
851 	__block unsigned int noprocfilt_seen = 0;
852 
853 	void (^evtcb)(struct trace_point *tp) = ^(struct trace_point *tp) {
854 		expect_development_event(tp, &dev_seen);
855 		expect_release_event(tp, &rel_seen);
856 		expect_filtered_event(tp, &filt_seen);
857 		expect_noprocfilt_event(tp, &noprocfilt_seen);
858 	};
859 
860 	if (use_all_callback) {
861 		ktrace_events_all(s, evtcb);
862 	} else {
863 		ktrace_events_range(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0),
864 		    KDBG_EVENTID(DBG_BSD + 1, 0, 0), evtcb);
865 	}
866 
867 	ktrace_set_completion_handler(s, ^{
868 		ktrace_session_destroy(s);
869 		cb(dev_seen, rel_seen, filt_seen, noprocfilt_seen);
870 		T_END;
871 	});
872 
873 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
874 	assert_kdebug_test(KDBG_TEST_MACROS, "check test macros");
875 
876 	ktrace_end(s, 0);
877 }
878 
879 T_DECL(kernel_events, "ensure kernel macros work")
880 {
881 	start_controlling_ktrace();
882 
883 	ktrace_session_t s = ktrace_session_create();
884 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
885 
886 	T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
887 	    "filtered events to current process");
888 
889 	expect_kdbg_test_events(s, false,
890 	    ^(unsigned int dev_seen, unsigned int rel_seen,
891 	    unsigned int filt_seen, unsigned int noprocfilt_seen) {
892 		/*
893 		 * Development-only events are only filtered if running on an embedded
894 		 * OS.
895 		 */
896 		unsigned int dev_exp;
897 #if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR)
898 		dev_exp = is_development_kernel() ? EXP_KERNEL_EVENTS : 0U;
899 #else
900 		dev_exp = EXP_KERNEL_EVENTS;
901 #endif
902 
903 		T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS,
904 		"release and development events seen");
905 		T_EXPECT_EQ(dev_seen, dev_exp, "development-only events %sseen",
906 		dev_exp ? "" : "not ");
907 		T_EXPECT_EQ(filt_seen, dev_exp, "filter-only events seen");
908 		T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
909 		"process filter-agnostic events seen");
910 	});
911 
912 	dispatch_main();
913 }
914 
915 T_DECL(kernel_events_filtered, "ensure that the filtered kernel macros work")
916 {
917 	start_controlling_ktrace();
918 
919 	ktrace_session_t s = ktrace_session_create();
920 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
921 
922 	T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
923 	    "filtered events to current process");
924 
925 	expect_kdbg_test_events(s, true,
926 	    ^(unsigned int dev_seen, unsigned int rel_seen,
927 	    unsigned int filt_seen, unsigned int noprocfilt_seen) {
928 		T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS, NULL);
929 #if defined(__arm64__)
930 		T_EXPECT_EQ(dev_seen, is_development_kernel() ? EXP_KERNEL_EVENTS : 0U,
931 		NULL);
932 #else
933 		T_EXPECT_EQ(dev_seen, EXP_KERNEL_EVENTS,
934 		"development-only events seen");
935 #endif /* defined(__arm64__) */
936 		T_EXPECT_EQ(filt_seen, 0U, "no filter-only events seen");
937 		T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
938 		"process filter-agnostic events seen");
939 	});
940 
941 	dispatch_main();
942 }
943 
944 T_DECL(kernel_events_noprocfilt,
945     "ensure that the no process filter kernel macros work")
946 {
947 	start_controlling_ktrace();
948 
949 	ktrace_session_t s = ktrace_session_create();
950 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
951 
952 	/*
953 	 * Only allow launchd events through.
954 	 */
955 	T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, 1), "filtered events to launchd");
956 	for (size_t i = 0; i < sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]); i++) {
957 		T_QUIET;
958 		T_ASSERT_POSIX_ZERO(ktrace_ignore_process_filter_for_event(s,
959 		    noprocfilt_evts[i]),
960 		    "ignored process filter for noprocfilt event");
961 	}
962 
963 	expect_kdbg_test_events(s, false,
964 	    ^(unsigned int dev_seen, unsigned int rel_seen,
965 	    unsigned int filt_seen, unsigned int noprocfilt_seen) {
966 		T_EXPECT_EQ(rel_seen, 0U, "release and development events not seen");
967 		T_EXPECT_EQ(dev_seen, 0U, "development-only events not seen");
968 		T_EXPECT_EQ(filt_seen, 0U, "filter-only events not seen");
969 
970 		T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
971 		"process filter-agnostic events seen");
972 	});
973 
974 	dispatch_main();
975 }
976 
977 static volatile bool continue_abuse = true;
978 
979 #define STRESS_DEBUGID (0xfeedfac0)
980 #define ABUSE_SECS (2)
981 #define TIMER_NS (100 * NSEC_PER_USEC)
982 /*
983  * Use the quantum as the gap threshold.
984  */
985 #define GAP_THRESHOLD_NS (10 * NSEC_PER_MSEC)
986 
987 static void *
kdebug_abuser_thread(void * ctx)988 kdebug_abuser_thread(void *ctx)
989 {
990 	unsigned int id = (unsigned int)ctx;
991 	uint64_t i = 0;
992 	while (continue_abuse) {
993 		kdebug_trace(STRESS_DEBUGID, id, i, 0, 0);
994 		i++;
995 	}
996 
997 	return NULL;
998 }
999 
1000 T_DECL(stress, "emit events on all but one CPU with a small buffer",
1001     T_META_CHECK_LEAKS(false))
1002 {
1003 	start_controlling_ktrace();
1004 
1005 	T_SETUPBEGIN;
1006 	ktrace_session_t s = ktrace_session_create();
1007 	T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1008 
1009 	/* Let's not waste any time with pleasantries. */
1010 	ktrace_set_uuid_map_enabled(s, KTRACE_FEATURE_DISABLED);
1011 
1012 	/* Ouch. */
1013 	ktrace_events_all(s, ^(__unused struct trace_point *tp) {});
1014 	ktrace_set_vnode_paths_enabled(s, KTRACE_FEATURE_ENABLED);
1015 	(void)atexit_b(^{ kperf_reset(); });
1016 	(void)kperf_action_count_set(1);
1017 	(void)kperf_timer_count_set(1);
1018 	int kperror = kperf_timer_period_set(0, kperf_ns_to_ticks(TIMER_NS));
1019 	T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_period_set %llu ns",
1020 	    TIMER_NS);
1021 	kperror = kperf_timer_action_set(0, 1);
1022 	T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_action_set");
1023 	kperror = kperf_action_samplers_set(1, KPERF_SAMPLER_TINFO |
1024 	    KPERF_SAMPLER_TH_SNAPSHOT | KPERF_SAMPLER_KSTACK |
1025 	    KPERF_SAMPLER_USTACK | KPERF_SAMPLER_MEMINFO |
1026 	    KPERF_SAMPLER_TINFO_SCHED | KPERF_SAMPLER_TH_DISPATCH |
1027 	    KPERF_SAMPLER_TK_SNAPSHOT | KPERF_SAMPLER_SYS_MEM |
1028 	    KPERF_SAMPLER_TH_INSTRS_CYCLES);
1029 	T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_action_samplers_set");
1030 	/* You monster... */
1031 
1032 	/* The coup-de-grace. */
1033 	ktrace_set_buffer_size(s, 10);
1034 
1035 	char filepath_arr[MAXPATHLEN] = "";
1036 	strlcpy(filepath_arr, dt_tmpdir(), sizeof(filepath_arr));
1037 	strlcat(filepath_arr, "/stress.ktrace", sizeof(filepath_arr));
1038 	char *filepath = filepath_arr;
1039 
1040 	int ncpus = 0;
1041 	size_t ncpus_size = sizeof(ncpus);
1042 	int ret = sysctlbyname("hw.logicalcpu_max", &ncpus, &ncpus_size, NULL, 0);
1043 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(\"hw.logicalcpu_max\"");
1044 	T_QUIET; T_ASSERT_GT(ncpus, 0, "realistic number of CPUs");
1045 
1046 	pthread_t *threads = calloc((unsigned int)ncpus - 1, sizeof(pthread_t));
1047 	T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(threads, "calloc(%d threads)",
1048 	    ncpus - 1);
1049 
1050 	ktrace_set_completion_handler(s, ^{
1051 		T_SETUPBEGIN;
1052 		ktrace_session_destroy(s);
1053 
1054 		T_LOG("trace ended, searching for gaps");
1055 
1056 		ktrace_session_t sread = ktrace_session_create();
1057 		T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(sread, "ktrace_session_create");
1058 
1059 		int error = ktrace_set_file(sread, filepath);
1060 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_set_file %s", filepath);
1061 
1062 		ktrace_file_t f = ktrace_file_open(filepath, false);
1063 		T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(f, "ktrace_file_open %s",
1064 		filepath);
1065 		uint64_t first_timestamp = 0;
1066 		error = ktrace_file_earliest_timestamp(f, &first_timestamp);
1067 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_file_earliest_timestamp");
1068 
1069 		uint64_t last_timestamp = 0;
1070 		(void)ktrace_file_latest_timestamp(f, &last_timestamp);
1071 
1072 		__block uint64_t prev_timestamp = 0;
1073 		__block uint64_t nevents = 0;
1074 		ktrace_events_all(sread, ^(struct trace_point *tp) {
1075 			nevents++;
1076 			uint64_t delta_ns = 0;
1077 			T_QUIET; T_EXPECT_GE(tp->timestamp, prev_timestamp,
1078 			"timestamps are monotonically increasing");
1079 			int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
1080 			tp->timestamp - prev_timestamp, &delta_ns);
1081 			T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
1082 			if (prev_timestamp && delta_ns > GAP_THRESHOLD_NS) {
1083 			        if (tp->debugname) {
1084 			                T_LOG("gap: %gs at %llu - %llu on %d: %s (%#08x)",
1085 			                (double)delta_ns / 1e9, prev_timestamp,
1086 			                tp->timestamp, tp->cpuid, tp->debugname, tp->debugid);
1087 				} else {
1088 			                T_LOG("gap: %gs at %llu - %llu on %d: %#x",
1089 			                (double)delta_ns / 1e9, prev_timestamp,
1090 			                tp->timestamp, tp->cpuid, tp->debugid);
1091 				}
1092 
1093 			        /*
1094 			         * These gaps are ok -- they appear after CPUs are brought back
1095 			         * up.
1096 			         */
1097 #define INTERRUPT (0x1050000)
1098 #define PERF_CPU_IDLE (0x27001000)
1099 #define INTC_HANDLER (0x5000004)
1100 #define DECR_TRAP (0x1090000)
1101 			        uint32_t eventid = tp->debugid & KDBG_EVENTID_MASK;
1102 			        if (eventid != INTERRUPT && eventid != PERF_CPU_IDLE &&
1103 			        eventid != INTC_HANDLER && eventid != DECR_TRAP) {
1104 			                unsigned int lost_events = TRACE_LOST_EVENTS;
1105 			                T_QUIET; T_EXPECT_EQ(tp->debugid, lost_events,
1106 			                "gaps should end with lost events");
1107 				}
1108 			}
1109 
1110 			prev_timestamp = tp->timestamp;
1111 		});
1112 		ktrace_events_single(sread, TRACE_LOST_EVENTS, ^(struct trace_point *tp){
1113 			T_LOG("lost: %llu on %d (%llu)", tp->timestamp, tp->cpuid, tp->arg1);
1114 		});
1115 
1116 		__block uint64_t last_write = 0;
1117 		ktrace_events_single_paired(sread, TRACE_WRITING_EVENTS,
1118 		^(struct trace_point *start, struct trace_point *end) {
1119 			uint64_t delta_ns;
1120 			int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
1121 			start->timestamp - last_write, &delta_ns);
1122 			T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
1123 
1124 			uint64_t dur_ns;
1125 			converror = ktrace_convert_timestamp_to_nanoseconds(sread,
1126 			end->timestamp - start->timestamp, &dur_ns);
1127 			T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
1128 
1129 			T_LOG("write: %llu (+%gs): %gus on %d: %llu events", start->timestamp,
1130 			(double)delta_ns / 1e9, (double)dur_ns / 1e3, end->cpuid, end->arg1);
1131 			last_write = end->timestamp;
1132 		});
1133 		ktrace_set_completion_handler(sread, ^{
1134 			uint64_t duration_ns = 0;
1135 			if (last_timestamp) {
1136 			        int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
1137 			        last_timestamp - first_timestamp, &duration_ns);
1138 			        T_QUIET; T_ASSERT_POSIX_ZERO(converror,
1139 			        "convert timestamp to ns");
1140 			        T_LOG("file was %gs long, %llu events: %g events/msec/cpu",
1141 			        (double)duration_ns / 1e9, nevents,
1142 			        (double)nevents / ((double)duration_ns / 1e6) / ncpus);
1143 			}
1144 			(void)unlink(filepath);
1145 			ktrace_session_destroy(sread);
1146 			T_END;
1147 		});
1148 
1149 		int starterror = ktrace_start(sread, dispatch_get_main_queue());
1150 		T_QUIET; T_ASSERT_POSIX_ZERO(starterror, "ktrace_start read session");
1151 
1152 		T_SETUPEND;
1153 	});
1154 
1155 /* Just kidding... for now. */
1156 #if 0
1157 	kperror = kperf_sample_set(1);
1158 	T_ASSERT_POSIX_SUCCESS(kperror,
1159 	    "started kperf timer sampling every %llu ns", TIMER_NS);
1160 #endif
1161 
1162 	for (int i = 0; i < (ncpus - 1); i++) {
1163 		int error = pthread_create(&threads[i], NULL, kdebug_abuser_thread,
1164 		    (void *)(uintptr_t)i);
1165 		T_QUIET; T_ASSERT_POSIX_ZERO(error,
1166 		    "pthread_create abuser thread %d", i);
1167 	}
1168 
1169 	int error = ktrace_start_writing_file(s, filepath,
1170 	    ktrace_compression_none, NULL, NULL);
1171 	T_ASSERT_POSIX_ZERO(error, "started writing ktrace to %s", filepath);
1172 
1173 	T_SETUPEND;
1174 
1175 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ABUSE_SECS * NSEC_PER_SEC),
1176 	    dispatch_get_main_queue(), ^{
1177 		T_LOG("ending trace");
1178 		ktrace_end(s, 1);
1179 
1180 		continue_abuse = false;
1181 		for (int i = 0; i < (ncpus - 1); i++) {
1182 		        int joinerror = pthread_join(threads[i], NULL);
1183 		        T_QUIET; T_EXPECT_POSIX_ZERO(joinerror, "pthread_join thread %d",
1184 		        i);
1185 		}
1186 	});
1187 
1188 	dispatch_main();
1189 }
1190 
1191 #define ROUND_TRIP_PERIOD UINT64_C(10 * 1000)
1192 #define ROUND_TRIPS_THRESHOLD UINT64_C(25)
1193 #define ROUND_TRIPS_TIMEOUT_SECS (2 * 60)
1194 #define COLLECTION_INTERVAL_MS 100
1195 
1196 /*
1197  * Test a sustained tracing session, involving multiple round-trips to the
1198  * kernel.
1199  *
1200  * Trace all events, and every `ROUND_TRIP_PERIOD` events, emit an event that's
1201  * unlikely to be emitted elsewhere.  Look for this event, too, and make sure we
1202  * see as many of them as we emitted.
1203  *
1204  * After seeing `ROUND_TRIPS_THRESHOLD` of the unlikely events, end tracing.
1205  * In the failure mode, we won't see any of these, so set a timeout of
1206  * `ROUND_TRIPS_TIMEOUT_SECS` to prevent hanging, waiting for events that we'll
1207  * never see.
1208  */
1209 T_DECL(round_trips,
1210     "test sustained tracing with multiple round-trips through the kernel")
1211 {
1212 	start_controlling_ktrace();
1213 
1214 	ktrace_session_t s = ktrace_session_create();
1215 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1216 
1217 	/*
1218 	 * Set a small buffer and collection interval to increase the number of
1219 	 * round-trips.
1220 	 */
1221 	ktrace_set_buffer_size(s, 50);
1222 	ktrace_set_collection_interval(s, COLLECTION_INTERVAL_MS);
1223 
1224 	__block uint64_t events = 0;
1225 	__block uint64_t emitted = 0;
1226 	__block uint64_t seen = 0;
1227 	ktrace_events_all(s, ^(__unused struct trace_point *tp) {
1228 		events++;
1229 		if (events % ROUND_TRIP_PERIOD == 0) {
1230 		        T_LOG("emitting round-trip event %" PRIu64, emitted);
1231 		        kdebug_trace(TRACE_DEBUGID, events, 0, 0, 0);
1232 		        emitted++;
1233 		}
1234 	});
1235 
1236 	ktrace_events_single(s, TRACE_DEBUGID, ^(__unused struct trace_point *tp) {
1237 		T_LOG("saw round-trip event after %" PRIu64 " events", events);
1238 		seen++;
1239 		if (seen >= ROUND_TRIPS_THRESHOLD) {
1240 		        T_LOG("ending trace after seeing %" PRIu64 " events, "
1241 		        "emitting %" PRIu64, seen, emitted);
1242 		        ktrace_end(s, 1);
1243 		}
1244 	});
1245 
1246 	ktrace_set_completion_handler(s, ^{
1247 		T_EXPECT_GE(emitted, ROUND_TRIPS_THRESHOLD,
1248 		"emitted %" PRIu64 " round-trip events", emitted);
1249 		T_EXPECT_GE(seen, ROUND_TRIPS_THRESHOLD,
1250 		"saw %" PRIu64 " round-trip events", seen);
1251 		ktrace_session_destroy(s);
1252 		T_END;
1253 	});
1254 
1255 	int error = ktrace_start(s, dispatch_get_main_queue());
1256 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1257 
1258 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW,
1259 	    ROUND_TRIPS_TIMEOUT_SECS * NSEC_PER_SEC), dispatch_get_main_queue(),
1260 	    ^{
1261 		T_LOG("ending trace after %d seconds", ROUND_TRIPS_TIMEOUT_SECS);
1262 		ktrace_end(s, 0);
1263 	});
1264 
1265 	dispatch_main();
1266 }
1267 
1268 #define HEARTBEAT_INTERVAL_SECS 1
1269 #define HEARTBEAT_COUNT 10
1270 
1271 /*
1272  * Ensure we see events periodically, checking for recent events on a
1273  * heart-beat.
1274  */
1275 T_DECL(event_coverage, "ensure events appear up to the end of tracing")
1276 {
1277 	start_controlling_ktrace();
1278 
1279 	ktrace_session_t s = ktrace_session_create();
1280 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1281 
1282 	__block uint64_t current_timestamp = 0;
1283 	__block uint64_t events = 0;
1284 	ktrace_events_all(s, ^(struct trace_point *tp) {
1285 		current_timestamp = tp->timestamp;
1286 		events++;
1287 	});
1288 
1289 	ktrace_set_buffer_size(s, 20);
1290 	ktrace_set_collection_interval(s, COLLECTION_INTERVAL_MS);
1291 
1292 	__block uint64_t last_timestamp = 0;
1293 	__block uint64_t last_events = 0;
1294 	__block unsigned int heartbeats = 0;
1295 
1296 	ktrace_set_completion_handler(s, ^{
1297 		ktrace_session_destroy(s);
1298 		T_QUIET; T_EXPECT_GT(events, 0ULL, "should have seen some events");
1299 		T_END;
1300 	});
1301 
1302 	dispatch_source_t timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER,
1303 	    0, 0, dispatch_get_main_queue());
1304 	dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW,
1305 	    HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC),
1306 	    HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC, 0);
1307 	dispatch_source_set_cancel_handler(timer, ^{
1308 		dispatch_release(timer);
1309 	});
1310 
1311 	dispatch_source_set_event_handler(timer, ^{
1312 		heartbeats++;
1313 
1314 		T_LOG("heartbeat %u at time %lld, seen %" PRIu64 " events, "
1315 		"current event time %lld", heartbeats, mach_absolute_time(),
1316 		events, current_timestamp);
1317 
1318 		if (current_timestamp > 0) {
1319 		        T_EXPECT_GT(current_timestamp, last_timestamp,
1320 		        "event timestamps should be increasing");
1321 		        T_QUIET; T_EXPECT_GT(events, last_events,
1322 		        "number of events should be increasing");
1323 		}
1324 
1325 		last_timestamp = current_timestamp;
1326 		last_events = events;
1327 
1328 		kdebug_trace(TRACE_DEBUGID, 0, 0, 0, 0);
1329 
1330 		if (heartbeats >= HEARTBEAT_COUNT) {
1331 		        T_LOG("ending trace after %u heartbeats", HEARTBEAT_COUNT);
1332 		        ktrace_end(s, 0);
1333 		}
1334 	});
1335 
1336 	int error = ktrace_start(s, dispatch_get_main_queue());
1337 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1338 
1339 	dispatch_activate(timer);
1340 
1341 	dispatch_main();
1342 }
1343 
1344 static unsigned int
get_nevents(void)1345 get_nevents(void)
1346 {
1347 	kbufinfo_t bufinfo = { 0 };
1348 	T_QUIET;
1349 	T_ASSERT_POSIX_SUCCESS(sysctl(
1350 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDGETBUF }, 3,
1351 		    &bufinfo, &(size_t){ sizeof(bufinfo) }, NULL, 0),
1352 	    "get kdebug buffer size");
1353 
1354 	return (unsigned int)bufinfo.nkdbufs;
1355 }
1356 
1357 static unsigned int
set_nevents(unsigned int nevents)1358 set_nevents(unsigned int nevents)
1359 {
1360 	T_QUIET;
1361 	T_ASSERT_POSIX_SUCCESS(sysctl(
1362 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSETBUF, (int)nevents }, 4,
1363 		    NULL, 0, NULL, 0), "set kdebug buffer size");
1364 
1365 	T_QUIET;
1366 	T_ASSERT_POSIX_SUCCESS(sysctl(
1367 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSETUP, (int)nevents }, 4,
1368 		    NULL, 0, NULL, 0), "setup kdebug buffers");
1369 
1370 	unsigned int nevents_allocated = get_nevents();
1371 
1372 	T_QUIET;
1373 	T_ASSERT_POSIX_SUCCESS(sysctl(
1374 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDREMOVE }, 3,
1375 		    NULL, 0, NULL, 0),
1376 	    "remove kdebug buffers");
1377 
1378 	return nevents_allocated;
1379 }
1380 
1381 T_DECL(set_buffer_size, "ensure large buffer sizes can be set",
1382 		XNU_T_META_SOC_SPECIFIC)
1383 {
1384 	T_SETUPBEGIN;
1385 	uint64_t memsize = 0;
1386 	T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.memsize", &memsize,
1387 	    &(size_t){ sizeof(memsize) }, NULL, 0), "sysctl hw.memsize");
1388 	T_SETUPEND;
1389 
1390 #if TARGET_OS_IPHONE
1391 	if (memsize >= (8ULL << 30)) {
1392 		T_SKIP("skipping on iOS device with memory >= 8GB, rdar://79403304");
1393 	}
1394 #endif // TARGET_OS_IPHONE
1395 
1396 	start_controlling_ktrace();
1397 
1398 	/*
1399 	 * Try to allocate up to one-eighth of available memory towards
1400 	 * tracing.
1401 	 */
1402 	uint64_t maxevents_u64 = memsize / 8 / sizeof(kd_buf);
1403 	if (maxevents_u64 > UINT32_MAX) {
1404 		maxevents_u64 = UINT32_MAX;
1405 	}
1406 	unsigned int maxevents = (unsigned int)maxevents_u64;
1407 
1408 	unsigned int minevents = set_nevents(0);
1409 	T_ASSERT_GT(minevents, 0, "saw non-zero minimum event count of %u",
1410 	    minevents);
1411 
1412 	unsigned int step = ((maxevents - minevents - 1) / 4);
1413 	T_ASSERT_GT(step, 0, "stepping by %u events", step);
1414 
1415 	for (unsigned int i = minevents + step; i < maxevents; i += step) {
1416 		unsigned int actualevents = set_nevents(i);
1417 		T_ASSERT_GE(actualevents, i - minevents,
1418 		    "%u events in kernel when %u requested", actualevents, i);
1419 	}
1420 }
1421 
1422 static void *
donothing(__unused void * arg)1423 donothing(__unused void *arg)
1424 {
1425 	return NULL;
1426 }
1427 
1428 T_DECL(long_names, "ensure long command names are reported")
1429 {
1430 	start_controlling_ktrace();
1431 
1432 	char longname[] = "thisisaverylongprocessname!";
1433 	char *longname_ptr = longname;
1434 	static_assert(sizeof(longname) > 16,
1435 	    "the name should be longer than MAXCOMLEN");
1436 
1437 	int ret = sysctlbyname("kern.procname", NULL, NULL, longname,
1438 	    sizeof(longname));
1439 	T_ASSERT_POSIX_SUCCESS(ret,
1440 	    "use sysctl kern.procname to lengthen the name");
1441 
1442 	ktrace_session_t ktsess = ktrace_session_create();
1443 
1444 	/*
1445 	 * 32-bit kernels can only trace 16 bytes of the string in their event
1446 	 * arguments.
1447 	 */
1448 	if (!ktrace_is_kernel_64_bit(ktsess)) {
1449 		longname[16] = '\0';
1450 	}
1451 
1452 	ktrace_filter_pid(ktsess, getpid());
1453 
1454 	__block bool saw_newthread = false;
1455 	ktrace_events_single(ktsess, TRACE_STRING_NEWTHREAD,
1456 	    ^(struct trace_point *tp) {
1457 		if (ktrace_get_pid_for_thread(ktsess, tp->threadid) ==
1458 		    getpid()) {
1459 			saw_newthread = true;
1460 
1461 			char argname[32] = {};
1462 			strncat(argname, (char *)&tp->arg1, sizeof(tp->arg1));
1463 			strncat(argname, (char *)&tp->arg2, sizeof(tp->arg2));
1464 			strncat(argname, (char *)&tp->arg3, sizeof(tp->arg3));
1465 			strncat(argname, (char *)&tp->arg4, sizeof(tp->arg4));
1466 
1467 			T_EXPECT_EQ_STR((char *)argname, longname_ptr,
1468 			    "process name of new thread should be long");
1469 
1470 			ktrace_end(ktsess, 1);
1471 		}
1472 	});
1473 
1474 	ktrace_set_completion_handler(ktsess, ^{
1475 		ktrace_session_destroy(ktsess);
1476 		T_EXPECT_TRUE(saw_newthread,
1477 		    "should have seen the new thread");
1478 		T_END;
1479 	});
1480 
1481 	int error = ktrace_start(ktsess, dispatch_get_main_queue());
1482 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1483 
1484 	pthread_t thread = NULL;
1485 	error = pthread_create(&thread, NULL, donothing, NULL);
1486 	T_ASSERT_POSIX_ZERO(error, "create new thread");
1487 
1488 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC),
1489 	    dispatch_get_main_queue(), ^{
1490 		ktrace_end(ktsess, 0);
1491 	});
1492 
1493 	error = pthread_join(thread, NULL);
1494 	T_ASSERT_POSIX_ZERO(error, "join to thread");
1495 
1496 	dispatch_main();
1497 }
1498 
1499 T_DECL(continuous_time, "make sure continuous time status can be queried",
1500 	T_META_RUN_CONCURRENTLY(true))
1501 {
1502 	bool cont_time = kdebug_using_continuous_time();
1503 	T_ASSERT_FALSE(cont_time, "should not be using continuous time yet");
1504 }
1505 
1506 T_DECL(lookup_long_paths, "lookup long path names")
1507 {
1508 	start_controlling_ktrace();
1509 
1510 	int ret = chdir("/tmp");
1511 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "chdir to /tmp");
1512 	const char *dir = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/";
1513 	int i = 0;
1514 	do {
1515 		ret = mkdir(dir, S_IRUSR | S_IWUSR | S_IXUSR);
1516 		if (ret >= 0 || errno != EEXIST) {
1517 			T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mkdir of %d nested directory",
1518 			    i);
1519 		}
1520 		ret = chdir(dir);
1521 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "chdir to %d nested directory", i);
1522 	} while (i++ < 40);
1523 
1524 	ktrace_session_t s = ktrace_session_create();
1525 	ktrace_set_collection_interval(s, 250);
1526 	ktrace_filter_pid(s, getpid());
1527 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1528 	ktrace_events_single(s, VFS_LOOKUP, ^(struct trace_point *tp __unused){});
1529 	ktrace_set_vnode_paths_enabled(s, KTRACE_FEATURE_ENABLED);
1530 
1531 	dispatch_queue_t q = dispatch_queue_create("com.apple.kdebug-test", 0);
1532 
1533 	ktrace_set_completion_handler(s, ^{
1534 		dispatch_release(q);
1535 		T_END;
1536 	});
1537 
1538 	int error = ktrace_start(s, q);
1539 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1540 
1541 	int fd = open("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", O_RDWR | O_CREAT);
1542 	T_ASSERT_POSIX_SUCCESS(fd, "opened file at %d directories deep", i);
1543 
1544 	sleep(5);
1545 
1546 	T_LOG("ending tracing");
1547 	ktrace_end(s, 0);
1548 	dispatch_main();
1549 }
1550 
1551 #pragma mark - boot tracing
1552 
1553 static void
expect_kernel_task_tracing(void)1554 expect_kernel_task_tracing(void)
1555 {
1556 	unsigned int state = 0;
1557 	size_t state_size = sizeof(state);
1558 	int ret = sysctlbyname("ktrace.state", &state, &state_size, NULL, 0);
1559 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl(ktrace.state)");
1560 	T_ASSERT_EQ(state, 1, "state is foreground");
1561 
1562 	char configured_by[1024] = "";
1563 	size_t configured_by_size = sizeof(configured_by);
1564 	ret = sysctlbyname("ktrace.configured_by", &configured_by,
1565 	    &configured_by_size, NULL, 0);
1566 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl(ktrace.configured_by)");
1567 	T_ASSERT_EQ_STR(configured_by, "kernel_task", "configured by kernel_task");
1568 }
1569 
1570 static const char *expected_subsystems[] = {
1571 	"tunables", "locks", "kprintf", "pmap_steal", "kmem", "zalloc",
1572 	/* "percpu", only has a startup phase on Intel */
1573 	"codesigning", "oslog", "early_boot",
1574 };
1575 #define EXPECTED_SUBSYSTEMS_LEN \
1576 		(sizeof(expected_subsystems) / sizeof(expected_subsystems[0]))
1577 
1578 T_DECL(early_boot_tracing, "ensure early boot strings are present",
1579 	T_META_BOOTARGS_SET("trace=1000000"), XNU_T_META_SOC_SPECIFIC)
1580 {
1581 	T_ATEND(reset_ktrace);
1582 
1583 	expect_kernel_task_tracing();
1584 
1585 	T_SETUPBEGIN;
1586 	ktrace_session_t s = ktrace_session_create();
1587 	ktrace_set_collection_interval(s, 250);
1588 	int error = ktrace_set_use_existing(s);
1589 	T_ASSERT_POSIX_ZERO(error, "use existing trace buffer");
1590 
1591 #if defined(__x86_64__)
1592 #define FIRST_EVENT_STRING "i386_init"
1593 #else /* defined(__x86_64__) */
1594 #define FIRST_EVENT_STRING "kernel_startup_bootstrap"
1595 #endif /* !defined(__x86_64__) */
1596 
1597 	__block bool seen_event = false;
1598 	__block size_t cur_subsystem = 0;
1599 	ktrace_events_single(s, TRACE_INFO_STRING, ^(struct trace_point *tp) {
1600 		char early_str[33] = "";
1601 		size_t argsize = ktrace_is_kernel_64_bit(s) ? 8 : 4;
1602 		memcpy(early_str, &tp->arg1, argsize);
1603 		memcpy(early_str + argsize, &tp->arg2, argsize);
1604 		memcpy(early_str + argsize * 2, &tp->arg3, argsize);
1605 		memcpy(early_str + argsize * 3, &tp->arg4, argsize);
1606 
1607 		if (!seen_event) {
1608 			T_LOG("found first string event with args: "
1609 			    "0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64,
1610 			    tp->arg1, tp->arg2, tp->arg3, tp->arg4);
1611 			char expect_str[33] = FIRST_EVENT_STRING;
1612 			if (!ktrace_is_kernel_64_bit(s)) {
1613 				// Only the first 16 bytes of the string will be traced.
1614 				expect_str[16] = '\0';
1615 			}
1616 
1617 			T_EXPECT_EQ_STR(early_str, expect_str,
1618 			    "first event in boot trace should be the bootstrap message");
1619 		}
1620 		seen_event = true;
1621 
1622 		if (strcmp(early_str, expected_subsystems[cur_subsystem]) == 0) {
1623 			T_LOG("found log for subsystem `%s'",
1624 					expected_subsystems[cur_subsystem]);
1625 			cur_subsystem++;
1626 		} else {
1627 			T_LOG("saw extra log for subsystem `%s'", early_str);
1628 		}
1629 
1630 		if (cur_subsystem == EXPECTED_SUBSYSTEMS_LEN) {
1631 			T_LOG("ending after seeing all expected logs");
1632 			ktrace_end(s, 1);
1633 		}
1634 	});
1635 
1636 	ktrace_set_completion_handler(s, ^{
1637 		T_EXPECT_TRUE(seen_event, "should see an early boot string event");
1638 		T_EXPECT_EQ(cur_subsystem, EXPECTED_SUBSYSTEMS_LEN,
1639 				"should see logs from all subsystems");
1640 		if (cur_subsystem != EXPECTED_SUBSYSTEMS_LEN) {
1641 			T_LOG("missing log for %s", expected_subsystems[cur_subsystem]);
1642 		}
1643 		T_END;
1644 	});
1645 
1646 	error = ktrace_start(s, dispatch_get_main_queue());
1647 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1648 
1649 	T_SETUPEND;
1650 
1651 	dispatch_main();
1652 }
1653 
1654 // Allocating ~4TB should be clamped to some lower number.
1655 T_DECL(early_boot_tracing_too_large,
1656     "ensure early boot tracing can allocate up to a clamped size",
1657 	T_META_BOOTARGS_SET("trace=64000000000"), XNU_T_META_SOC_SPECIFIC)
1658 {
1659 	expect_kernel_task_tracing();
1660 	T_EXPECT_NE(get_nevents(), 0, "allocated some events");
1661 }
1662 
1663 // Not SoC-specific because the typefilter parsing logic is generic.
1664 T_DECL(typefilter_boot_arg, "ensure typefilter is set up correctly at boot",
1665 	T_META_BOOTARGS_SET("trace=100000 trace_typefilter=S0x0c00,C0xfe"))
1666 {
1667 	T_ATEND(reset_ktrace);
1668 
1669 	T_SETUPBEGIN;
1670 	ktrace_config_t config = ktrace_config_create_current();
1671 	T_QUIET; T_WITH_ERRNO;
1672 	T_ASSERT_NOTNULL(config, "create config from current system");
1673 	T_SETUPEND;
1674 
1675 	T_LOG("ktrace configuration:");
1676 	ktrace_config_print_description(config, stdout);
1677 
1678 	uint8_t *typefilt = ktrace_config_kdebug_get_typefilter(config);
1679 	T_ASSERT_NOTNULL(typefilt, "typefilter is active");
1680 	T_EXPECT_TRUE(typefilt[0x0c00 / 8],
1681 			"specified subclass is set in typefilter");
1682 	T_MAYFAIL; // rdar://63625062 (UTD converts commas in boot-args to spaces)
1683 	T_EXPECT_TRUE(typefilt[0xfeed / 8],
1684 			"specified class is set in typefilter");
1685 
1686 	ktrace_config_destroy(config);
1687 }
1688 
1689 #pragma mark - events present
1690 
1691 static int recvd_sigchild = 0;
1692 static void
sighandler(int sig)1693 sighandler(int sig)
1694 {
1695 	if (sig != SIGCHLD) {
1696 		T_ASSERT_FAIL("unexpected signal: %d", sig);
1697 	}
1698 	recvd_sigchild = 1;
1699 }
1700 
1701 #define END_EVENT (0xfeedfac0)
1702 
1703 T_DECL(instrs_and_cycles_on_proc_exit,
1704 		"instructions and cycles should be traced on thread exit",
1705 		T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1))
1706 {
1707 	T_SETUPBEGIN;
1708 	start_controlling_ktrace();
1709 	int error;
1710 	struct rusage_info_v4 *rusage = calloc(1, sizeof(*rusage));
1711 	char *args[] = { "ls", "-l", NULL, };
1712 	int status;
1713 	dispatch_queue_t q = dispatch_queue_create("com.apple.kdebug-test",
1714 			DISPATCH_QUEUE_SERIAL);
1715 	T_QUIET; T_ASSERT_POSIX_SUCCESS(signal(SIGCHLD, sighandler),
1716 			"register signal handler");
1717 
1718 	ktrace_session_t s = ktrace_session_create();
1719 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1720 	ktrace_set_collection_interval(s, 100);
1721 
1722 	__block pid_t pid;
1723 	__block bool seen_event = false;
1724 	__block uint64_t proc_instrs = 0;
1725 	__block uint64_t proc_cycles = 0;
1726 	__block uint64_t proc_sys_time = 0;
1727 	__block uint64_t proc_usr_time = 0;
1728 	error = ktrace_events_single(s, DBG_MT_INSTRS_CYCLES_PROC_EXIT,
1729 			^(ktrace_event_t tp){
1730 		if (tp->pid == pid) {
1731 			seen_event = true;
1732 			proc_instrs = tp->arg1;
1733 			proc_cycles = tp->arg2;
1734 			proc_sys_time = tp->arg3;
1735 			proc_usr_time = tp->arg4;
1736 			ktrace_end(s, 1);
1737 		}
1738 	});
1739 	T_QUIET; T_WITH_ERRNO; T_ASSERT_POSIX_ZERO(error, "trace single event");
1740 	error = ktrace_events_single(s, END_EVENT, ^(ktrace_event_t __unused tp){
1741 		T_LOG("saw ending event, stopping trace session");
1742 		ktrace_end(s, 0);
1743 	});
1744 	T_QUIET; T_WITH_ERRNO; T_ASSERT_POSIX_ZERO(error, "trace single event");
1745 	ktrace_set_completion_handler(s, ^{
1746 		// TODO Check for equality once rdar://61948669 is fixed.
1747 		T_ASSERT_GE(proc_instrs, rusage->ri_instructions,
1748 				"trace event instrs are >= to rusage instrs");
1749 		T_ASSERT_GE(proc_cycles, rusage->ri_cycles,
1750 				"trace event cycles are >= to rusage cycles");
1751 		T_ASSERT_GE(proc_sys_time, rusage->ri_system_time,
1752 				"trace event sys time is >= rusage sys time");
1753 		T_ASSERT_GE(proc_usr_time, rusage->ri_user_time,
1754 				"trace event usr time >= rusage usr time");
1755 		T_EXPECT_TRUE(seen_event, "should see the proc exit trace event");
1756 
1757 		free(rusage);
1758 		ktrace_session_destroy(s);
1759 		dispatch_release(q);
1760 		T_END;
1761 	});
1762 	error = ktrace_start(s, q);
1763 	T_ASSERT_POSIX_ZERO(error, "start tracing");
1764 	T_SETUPEND;
1765 
1766 	extern char **environ;
1767 	status = posix_spawnp(&pid, args[0], NULL, NULL, args, environ);
1768 	T_QUIET; T_ASSERT_POSIX_SUCCESS(status, "spawn process");
1769 	if (status == 0) {
1770 		while (!recvd_sigchild) {
1771 			pause();
1772 		}
1773 		error = proc_pid_rusage(pid, RUSAGE_INFO_V4, (rusage_info_t)rusage);
1774 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "rusage");
1775 		error = waitpid(pid, &status, 0);
1776 		T_QUIET; T_ASSERT_POSIX_SUCCESS(error, "waitpid");
1777 		kdebug_trace(END_EVENT, 0, 0, 0, 0);
1778 	}
1779 	dispatch_main();
1780 }
1781 
1782 #define NO_OF_THREADS 2
1783 
1784 struct thread_counters_info {
1785 	struct thsc_cpi counts;
1786 	uint64_t cpu_time;
1787 	uint64_t thread_id;
1788 };
1789 typedef struct thread_counters_info *tc_info_t;
1790 
1791 static void*
get_thread_counters(void * ptr)1792 get_thread_counters(void* ptr)
1793 {
1794 	extern uint64_t __thread_selfusage(void);
1795 	extern uint64_t __thread_selfid(void);
1796 	tc_info_t tc_info = (tc_info_t) ptr;
1797 	tc_info->thread_id = __thread_selfid();
1798 	// Just to increase the instr, cycle count
1799 	T_LOG("printing %llu\n", tc_info->thread_id);
1800 	tc_info->cpu_time = __thread_selfusage();
1801 	(void)thread_selfcounts(THSC_CPI, &tc_info->counts, sizeof(tc_info->counts));
1802 	return NULL;
1803 }
1804 
1805 T_DECL(instrs_and_cycles_on_thread_exit,
1806 		"instructions and cycles should be traced on thread exit",
1807 		T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1))
1808 {
1809 	T_SETUPBEGIN;
1810 	start_controlling_ktrace();
1811 
1812 	int error;
1813 	pthread_t *threads = calloc((unsigned int)(NO_OF_THREADS),
1814 			sizeof(pthread_t));
1815 	 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(threads, "calloc(%d threads)",
1816 	    NO_OF_THREADS);
1817 	tc_info_t tc_infos = calloc((unsigned int) (NO_OF_THREADS),
1818 			sizeof(struct thread_counters_info));
1819 	T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(tc_infos,
1820 			"calloc(%d thread counters)", NO_OF_THREADS);
1821 
1822 	ktrace_session_t s = ktrace_session_create();
1823 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1824 	ktrace_filter_pid(s, getpid());
1825 
1826 	__block int nevents = 0;
1827 	error = ktrace_events_single(s, DBG_MT_INSTRS_CYCLES_THR_EXIT,
1828 			^(ktrace_event_t tp) {
1829 		for (int i = 0; i < NO_OF_THREADS; i++) {
1830 			if (tp->threadid == tc_infos[i].thread_id) {
1831 				nevents++;
1832 				uint64_t cpu_time = tp->arg3 + tp->arg4;
1833 				/*
1834 				 * as we are getting counts before thread exit,
1835 				 * the counts at thread exit should be greater than
1836 				 * thread_selfcounts
1837 				 */
1838 				T_ASSERT_GE(tp->arg1, tc_infos[i].counts.tcpi_instructions,
1839 					"trace event instrs are >= to thread's instrs");
1840 				T_ASSERT_GE(tp->arg2, tc_infos[i].counts.tcpi_cycles,
1841 					"trace event cycles are >= to thread's cycles");
1842 				T_ASSERT_GE(cpu_time, tc_infos[i].cpu_time,
1843 					"trace event cpu time is >= thread's cpu time");
1844 			}
1845 			if (nevents == NO_OF_THREADS) {
1846 				ktrace_end(s, 1);
1847 			}
1848 		}
1849 	});
1850 	T_QUIET; T_ASSERT_POSIX_ZERO(error, "trace single event");
1851 	ktrace_set_completion_handler(s, ^{
1852 		T_EXPECT_EQ(NO_OF_THREADS, nevents, "seen %d thread exit trace events",
1853 				NO_OF_THREADS);
1854 		free(tc_infos);
1855 		ktrace_session_destroy(s);
1856 		T_END;
1857 	});
1858 	error = ktrace_start(s, dispatch_get_main_queue());
1859 	T_ASSERT_POSIX_ZERO(error, "start tracing");
1860 
1861 	for (int i = 0; i < NO_OF_THREADS; i++) {
1862 		error = pthread_create(&threads[i], NULL, get_thread_counters,
1863 				(void *)&tc_infos[i]);
1864 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_create thread %d", i);
1865 	}
1866 	T_SETUPEND;
1867 	for (int i = 0; i < NO_OF_THREADS; i++) {
1868 		error = pthread_join(threads[i], NULL);
1869 		T_QUIET; T_EXPECT_POSIX_ZERO(error, "pthread_join thread %d", i);
1870 	}
1871 
1872 	dispatch_main();
1873 }
1874