xref: /xnu-8019.80.24/tests/ktrace/kdebug_tests.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 // Copyright (c) 2020 Apple Inc.  All rights reserved.
2 
3 #include <darwintest.h>
4 #include <darwintest_utils.h>
5 #include <dispatch/dispatch.h>
6 #include <inttypes.h>
7 #include <ktrace/session.h>
8 #include <ktrace/private.h>
9 #include <kperf/kperf.h>
10 #include <mach/clock_types.h>
11 #include <mach/dyld_kernel.h>
12 #include <mach/host_info.h>
13 #include <mach/mach.h>
14 #include <mach/mach_init.h>
15 #include <mach/task.h>
16 #include <os/assumes.h>
17 #include <stdlib.h>
18 #include <sys/kdebug.h>
19 #include <sys/kdebug_signpost.h>
20 #include <sys/sysctl.h>
21 #include <stdint.h>
22 #include <TargetConditionals.h>
23 
24 #include "ktrace_helpers.h"
25 #include "test_utils.h"
26 #include "ktrace_meta.h"
27 
28 #define KDBG_TEST_MACROS         1
29 #define KDBG_TEST_OLD_TIMES      2
30 #define KDBG_TEST_FUTURE_TIMES   3
31 #define KDBG_TEST_IOP_SYNC_FLUSH 4
32 
33 #pragma mark kdebug syscalls
34 
35 #define TRACE_DEBUGID (0xfedfed00U)
36 
37 T_DECL(kdebug_trace_syscall, "test that kdebug_trace(2) emits correct events")
38 {
39 	start_controlling_ktrace();
40 
41 	ktrace_session_t s = ktrace_session_create();
42 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
43 
44 	ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
45 
46 	__block int events_seen = 0;
47 	ktrace_events_single(s, TRACE_DEBUGID, ^void (struct trace_point *tp) {
48 		events_seen++;
49 		T_PASS("saw traced event");
50 
51 		if (ktrace_is_kernel_64_bit(s)) {
52 			T_EXPECT_EQ(tp->arg1, UINT64_C(0xfeedfacefeedface),
53 					"argument 1 of traced event is correct");
54 		} else {
55 			T_EXPECT_EQ(tp->arg1, UINT64_C(0xfeedface),
56 					"argument 1 of traced event is correct");
57 		}
58 		T_EXPECT_EQ(tp->arg2, 2ULL, "argument 2 of traced event is correct");
59 		T_EXPECT_EQ(tp->arg3, 3ULL, "argument 3 of traced event is correct");
60 		T_EXPECT_EQ(tp->arg4, 4ULL, "argument 4 of traced event is correct");
61 
62 		ktrace_end(s, 1);
63 	});
64 
65 	ktrace_set_completion_handler(s, ^{
66 		T_EXPECT_GE(events_seen, 1, NULL);
67 		ktrace_session_destroy(s);
68 		T_END;
69 	});
70 
71 	ktrace_filter_pid(s, getpid());
72 
73 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
74 	T_ASSERT_POSIX_SUCCESS(kdebug_trace(TRACE_DEBUGID, 0xfeedfacefeedface, 2,
75 			3, 4), NULL);
76 	ktrace_end(s, 0);
77 
78 	dispatch_main();
79 }
80 
81 #define SIGNPOST_SINGLE_CODE (0x10U)
82 #define SIGNPOST_PAIRED_CODE (0x20U)
83 
84 T_DECL(kdebug_signpost_syscall,
85     "test that kdebug_signpost(2) emits correct events")
86 {
87 	start_controlling_ktrace();
88 
89 	ktrace_session_t s = ktrace_session_create();
90 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
91 
92 	__block int single_seen = 0;
93 	__block int paired_seen = 0;
94 
95 	/* make sure to get enough events for the KDBUFWAIT to trigger */
96 	// ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
97 	ktrace_events_single(s,
98 	    APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_SINGLE_CODE),
99 	    ^(struct trace_point *tp) {
100 		single_seen++;
101 		T_PASS("single signpost is traced");
102 
103 		T_EXPECT_EQ(tp->arg1, 1ULL, "argument 1 of single signpost is correct");
104 		T_EXPECT_EQ(tp->arg2, 2ULL, "argument 2 of single signpost is correct");
105 		T_EXPECT_EQ(tp->arg3, 3ULL, "argument 3 of single signpost is correct");
106 		T_EXPECT_EQ(tp->arg4, 4ULL, "argument 4 of single signpost is correct");
107 	});
108 
109 	ktrace_events_single_paired(s,
110 	    APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_PAIRED_CODE),
111 	    ^(struct trace_point *start, struct trace_point *end) {
112 		paired_seen++;
113 		T_PASS("paired signposts are traced");
114 
115 		T_EXPECT_EQ(start->arg1, 5ULL, "argument 1 of start signpost is correct");
116 		T_EXPECT_EQ(start->arg2, 6ULL, "argument 2 of start signpost is correct");
117 		T_EXPECT_EQ(start->arg3, 7ULL, "argument 3 of start signpost is correct");
118 		T_EXPECT_EQ(start->arg4, 8ULL, "argument 4 of start signpost is correct");
119 
120 		T_EXPECT_EQ(end->arg1, 9ULL, "argument 1 of end signpost is correct");
121 		T_EXPECT_EQ(end->arg2, 10ULL, "argument 2 of end signpost is correct");
122 		T_EXPECT_EQ(end->arg3, 11ULL, "argument 3 of end signpost is correct");
123 		T_EXPECT_EQ(end->arg4, 12ULL, "argument 4 of end signpost is correct");
124 
125 		T_EXPECT_EQ(single_seen, 1, "signposts are traced in the correct order");
126 
127 		ktrace_end(s, 1);
128 	});
129 
130 	ktrace_set_completion_handler(s, ^(void) {
131 		T_QUIET; T_EXPECT_NE(single_seen, 0,
132 		"did not see single tracepoint before timeout");
133 		T_QUIET; T_EXPECT_NE(paired_seen, 0,
134 		"did not see single tracepoint before timeout");
135 		ktrace_session_destroy(s);
136 		T_END;
137 	});
138 
139 	ktrace_filter_pid(s, getpid());
140 
141 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
142 	    "started tracing");
143 
144 #pragma clang diagnostic push
145 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
146 	T_EXPECT_POSIX_SUCCESS(kdebug_signpost(SIGNPOST_SINGLE_CODE, 1, 2, 3, 4),
147 	    "emitted single signpost");
148 	T_EXPECT_POSIX_SUCCESS(
149 		kdebug_signpost_start(SIGNPOST_PAIRED_CODE, 5, 6, 7, 8),
150 		"emitted start signpost");
151 	T_EXPECT_POSIX_SUCCESS(
152 		kdebug_signpost_end(SIGNPOST_PAIRED_CODE, 9, 10, 11, 12),
153 		"emitted end signpost");
154 #pragma clang diagnostic pop
155 	ktrace_end(s, 0);
156 
157 	dispatch_main();
158 }
159 
160 T_DECL(syscall_tracing,
161 		"ensure that syscall arguments are traced propertly")
162 {
163 	ktrace_session_t s = ktrace_session_create();
164 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
165 
166 	__block bool seen = 0;
167 
168 	ktrace_filter_pid(s, getpid());
169 
170 	static const int telemetry_syscall_no = 451;
171 	static const uint64_t arg1 = 0xfeedfacefeedface;
172 
173 	ktrace_events_single(s, BSDDBG_CODE(DBG_BSD_EXCP_SC, telemetry_syscall_no),
174 			^(struct trace_point *evt){
175 		if (KDBG_EXTRACT_CODE(evt->debugid) != telemetry_syscall_no || seen) {
176 			return;
177 		}
178 
179 		seen = true;
180 		if (ktrace_is_kernel_64_bit(s)) {
181 			T_EXPECT_EQ(evt->arg1, arg1,
182 					"argument 1 of syscall event is correct");
183 		} else {
184 			T_EXPECT_EQ(evt->arg1, (uint64_t)(uint32_t)(arg1),
185 					"argument 1 of syscall event is correct");
186 		}
187 
188 		ktrace_end(s, 1);
189 	});
190 
191 	ktrace_set_completion_handler(s, ^{
192 		T_ASSERT_TRUE(seen,
193 				"should have seen a syscall event for kevent_id(2)");
194 		ktrace_session_destroy(s);
195 		T_END;
196 	});
197 
198 	int error = ktrace_start(s, dispatch_get_main_queue());
199 	T_ASSERT_POSIX_ZERO(error, "started tracing");
200 
201 	/*
202 	 * telemetry(2) has a 64-bit argument that will definitely be traced, and
203 	 * is unlikely to be used elsewhere by this process.
204 	 */
205 	extern int __telemetry(uint64_t cmd, uint64_t deadline, uint64_t interval,
206 			uint64_t leeway, uint64_t arg4, uint64_t arg5);
207 	(void)__telemetry(arg1, 0, 0, 0, 0, 0);
208 
209 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC),
210 			dispatch_get_main_queue(), ^{
211 		T_LOG("ending test due to timeout");
212 		ktrace_end(s, 0);
213 	});
214 
215 	dispatch_main();
216 }
217 
218 #pragma mark kdebug behaviors
219 
220 #define WRAPPING_EVENTS_COUNT     (150000)
221 #define TRACE_ITERATIONS          (5000)
222 #define WRAPPING_EVENTS_THRESHOLD (100)
223 
224 T_DECL(wrapping,
225     "ensure that wrapping traces lost events and no events prior to the wrap",
226     T_META_CHECK_LEAKS(false))
227 {
228 	kbufinfo_t buf_info;
229 	int wait_wrapping_secs = (WRAPPING_EVENTS_COUNT / TRACE_ITERATIONS) + 5;
230 	int current_secs = wait_wrapping_secs;
231 
232 	start_controlling_ktrace();
233 
234 	/* use sysctls manually to bypass libktrace assumptions */
235 
236 	int mib[4] = { CTL_KERN, KERN_KDEBUG };
237 	mib[2] = KERN_KDSETBUF; mib[3] = WRAPPING_EVENTS_COUNT;
238 	T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDSETBUF");
239 
240 	mib[2] = KERN_KDSETUP; mib[3] = 0;
241 	size_t needed = 0;
242 	T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, NULL, &needed, NULL, 0),
243 	    "KERN_KDSETUP");
244 
245 	mib[2] = KERN_KDENABLE; mib[3] = 1;
246 	T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDENABLE");
247 
248 	/* wrapping is on by default */
249 
250 	/* wait until wrapped */
251 	T_LOG("waiting for trace to wrap");
252 	mib[2] = KERN_KDGETBUF;
253 	needed = sizeof(buf_info);
254 	do {
255 		sleep(1);
256 		for (int i = 0; i < TRACE_ITERATIONS; i++) {
257 			T_QUIET;
258 			T_ASSERT_POSIX_SUCCESS(kdebug_trace(0xfefe0000, 0, 0, 0, 0), NULL);
259 		}
260 		T_QUIET;
261 		T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, &buf_info, &needed, NULL, 0),
262 		    NULL);
263 	} while (!(buf_info.flags & KDBG_WRAPPED) && --current_secs > 0);
264 
265 	T_ASSERT_TRUE(buf_info.flags & KDBG_WRAPPED,
266 	    "trace wrapped (after %d seconds within %d second timeout)",
267 	    wait_wrapping_secs - current_secs, wait_wrapping_secs);
268 
269 	ktrace_session_t s = ktrace_session_create();
270 	T_QUIET; T_ASSERT_NOTNULL(s, NULL);
271 	T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(s), NULL);
272 
273 	__block int events = 0;
274 
275 	ktrace_events_all(s, ^(struct trace_point *tp) {
276 		if (events == 0) {
277 		        T_EXPECT_EQ(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
278 		        "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS",
279 		        tp->debugid,
280 		        ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
281 		} else {
282 		        T_QUIET;
283 		        T_EXPECT_NE(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
284 		        "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS",
285 		        tp->debugid,
286 		        ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
287 		}
288 
289 		events++;
290 		if (events > WRAPPING_EVENTS_THRESHOLD) {
291 		        ktrace_end(s, 1);
292 		}
293 	});
294 
295 	ktrace_set_completion_handler(s, ^{
296 		ktrace_session_destroy(s);
297 		T_END;
298 	});
299 
300 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
301 	    "started tracing");
302 
303 	dispatch_main();
304 }
305 
306 T_DECL(reject_old_events,
307     "ensure that kdebug rejects events from before tracing began",
308     T_META_CHECK_LEAKS(false))
309 {
310 	__block uint64_t event_horizon_ts;
311 
312 	start_controlling_ktrace();
313 
314 	ktrace_session_t s = ktrace_session_create();
315 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
316 
317 	__block int events = 0;
318 	ktrace_events_single(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 1),
319 	    ^(struct trace_point *tp) {
320 		events++;
321 		T_EXPECT_GT(tp->timestamp, event_horizon_ts,
322 		"events in trace should be from after tracing began");
323 	});
324 
325 	ktrace_set_completion_handler(s, ^{
326 		T_EXPECT_EQ(events, 2, "should see only two events");
327 		ktrace_session_destroy(s);
328 		T_END;
329 	});
330 
331 	event_horizon_ts = mach_absolute_time();
332 
333 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
334 	/* first, try an old event at the beginning of trace */
335 	assert_kdebug_test(KDBG_TEST_OLD_TIMES, "induce old event at beginning");
336 	/* after a good event has been traced, old events should be rejected */
337 	assert_kdebug_test(KDBG_TEST_OLD_TIMES, "induce old event to be rejected");
338 	ktrace_end(s, 0);
339 
340 	dispatch_main();
341 }
342 
343 #define ORDERING_TIMEOUT_SEC 5
344 
345 T_DECL(ascending_time_order,
346     "ensure that kdebug events are in ascending order based on time",
347     T_META_CHECK_LEAKS(false))
348 {
349 	__block uint64_t prev_ts = 0;
350 	__block uint32_t prev_debugid = 0;
351 	__block unsigned int prev_cpu = 0;
352 	__block bool in_order = true;
353 
354 	start_controlling_ktrace();
355 
356 	ktrace_session_t s = ktrace_session_create();
357 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
358 
359 	ktrace_events_all(s, ^(struct trace_point *tp) {
360 		if (tp->timestamp < prev_ts) {
361 		        in_order = false;
362 		        T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)",
363 		        prev_ts, prev_debugid, prev_cpu);
364 		        T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)",
365 		        tp->timestamp, tp->debugid, tp->cpuid);
366 		        ktrace_end(s, 1);
367 		}
368 	});
369 
370 	ktrace_set_completion_handler(s, ^{
371 		ktrace_session_destroy(s);
372 		T_EXPECT_TRUE(in_order, "event timestamps were in-order");
373 		T_END;
374 	});
375 
376 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
377 	    "started tracing");
378 
379 	/* try to inject old timestamps into trace */
380 	assert_kdebug_test(KDBG_TEST_OLD_TIMES, "inject old time");
381 
382 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ORDERING_TIMEOUT_SEC * NSEC_PER_SEC),
383 	    dispatch_get_main_queue(), ^{
384 		T_LOG("ending test after timeout");
385 		ktrace_end(s, 1);
386 	});
387 
388 	dispatch_main();
389 }
390 
391 #pragma mark dyld tracing
392 
393 __attribute__((aligned(8)))
394 static const char map_uuid[16] = "map UUID";
395 
396 __attribute__((aligned(8)))
397 static const char unmap_uuid[16] = "unmap UUID";
398 
399 __attribute__((aligned(8)))
400 static const char sc_uuid[16] = "shared UUID";
401 
402 static fsid_t map_fsid = { .val = { 42, 43 } };
403 static fsid_t unmap_fsid = { .val = { 44, 45 } };
404 static fsid_t sc_fsid = { .val = { 46, 47 } };
405 
406 static fsobj_id_t map_fsobjid = { .fid_objno = 42, .fid_generation = 43 };
407 static fsobj_id_t unmap_fsobjid = { .fid_objno = 44, .fid_generation = 45 };
408 static fsobj_id_t sc_fsobjid = { .fid_objno = 46, .fid_generation = 47 };
409 
410 #define MAP_LOAD_ADDR   0xabadcafe
411 #define UNMAP_LOAD_ADDR 0xfeedface
412 #define SC_LOAD_ADDR    0xfedfaced
413 
414 __unused
415 static void
expect_dyld_image_info(struct trace_point * tp,const uint64_t * exp_uuid,uint64_t exp_load_addr,fsid_t * exp_fsid,fsobj_id_t * exp_fsobjid,int order)416 expect_dyld_image_info(struct trace_point *tp, const uint64_t *exp_uuid,
417     uint64_t exp_load_addr, fsid_t *exp_fsid, fsobj_id_t *exp_fsobjid,
418     int order)
419 {
420 #if defined(__LP64__) || defined(__arm64__)
421 	if (order == 0) {
422 		uint64_t uuid[2];
423 		uint64_t load_addr;
424 		fsid_t fsid;
425 
426 		uuid[0] = (uint64_t)tp->arg1;
427 		uuid[1] = (uint64_t)tp->arg2;
428 		load_addr = (uint64_t)tp->arg3;
429 		fsid.val[0] = (int32_t)(tp->arg4 & UINT32_MAX);
430 		fsid.val[1] = (int32_t)((uint64_t)tp->arg4 >> 32);
431 
432 		T_QUIET; T_EXPECT_EQ(uuid[0], exp_uuid[0], NULL);
433 		T_QUIET; T_EXPECT_EQ(uuid[1], exp_uuid[1], NULL);
434 		T_QUIET; T_EXPECT_EQ(load_addr, exp_load_addr, NULL);
435 		T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
436 		T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
437 	} else if (order == 1) {
438 		fsobj_id_t fsobjid;
439 
440 		fsobjid.fid_objno = (uint32_t)(tp->arg1 & UINT32_MAX);
441 		fsobjid.fid_generation = (uint32_t)((uint64_t)tp->arg1 >> 32);
442 
443 		T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
444 		T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
445 		    exp_fsobjid->fid_generation, NULL);
446 	} else {
447 		T_ASSERT_FAIL("unrecognized order of events %d", order);
448 	}
449 #else /* defined(__LP64__) */
450 	if (order == 0) {
451 		uint32_t uuid[4];
452 
453 		uuid[0] = (uint32_t)tp->arg1;
454 		uuid[1] = (uint32_t)tp->arg2;
455 		uuid[2] = (uint32_t)tp->arg3;
456 		uuid[3] = (uint32_t)tp->arg4;
457 
458 		T_QUIET; T_EXPECT_EQ(uuid[0], (uint32_t)exp_uuid[0], NULL);
459 		T_QUIET; T_EXPECT_EQ(uuid[1], (uint32_t)(exp_uuid[0] >> 32), NULL);
460 		T_QUIET; T_EXPECT_EQ(uuid[2], (uint32_t)exp_uuid[1], NULL);
461 		T_QUIET; T_EXPECT_EQ(uuid[3], (uint32_t)(exp_uuid[1] >> 32), NULL);
462 	} else if (order == 1) {
463 		uint32_t load_addr;
464 		fsid_t fsid;
465 		fsobj_id_t fsobjid;
466 
467 		load_addr = (uint32_t)tp->arg1;
468 		fsid.val[0] = (int32_t)tp->arg2;
469 		fsid.val[1] = (int32_t)tp->arg3;
470 		fsobjid.fid_objno = (uint32_t)tp->arg4;
471 
472 		T_QUIET; T_EXPECT_EQ(load_addr, (uint32_t)exp_load_addr, NULL);
473 		T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
474 		T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
475 		T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
476 	} else if (order == 2) {
477 		fsobj_id_t fsobjid;
478 
479 		fsobjid.fid_generation = tp->arg1;
480 
481 		T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
482 		    exp_fsobjid->fid_generation, NULL);
483 	} else {
484 		T_ASSERT_FAIL("unrecognized order of events %d", order);
485 	}
486 #endif /* defined(__LP64__) */
487 }
488 
489 #if defined(__LP64__) || defined(__arm64__)
490 #define DYLD_CODE_OFFSET (0)
491 #define DYLD_EVENTS      (2)
492 #else
493 #define DYLD_CODE_OFFSET (2)
494 #define DYLD_EVENTS      (3)
495 #endif
496 
497 static void
expect_dyld_events(ktrace_session_t s,const char * name,uint32_t base_code,const char * exp_uuid,uint64_t exp_load_addr,fsid_t * exp_fsid,fsobj_id_t * exp_fsobjid,uint8_t * saw_events)498 expect_dyld_events(ktrace_session_t s, const char *name, uint32_t base_code,
499     const char *exp_uuid, uint64_t exp_load_addr, fsid_t *exp_fsid,
500     fsobj_id_t *exp_fsobjid, uint8_t *saw_events)
501 {
502 	for (int i = 0; i < DYLD_EVENTS; i++) {
503 		ktrace_events_single(s, KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID,
504 		    base_code + DYLD_CODE_OFFSET + (unsigned int)i),
505 		    ^(struct trace_point *tp) {
506 			T_LOG("checking %s event %c", name, 'A' + i);
507 			expect_dyld_image_info(tp, (const void *)exp_uuid, exp_load_addr,
508 			exp_fsid, exp_fsobjid, i);
509 			*saw_events |= (1U << i);
510 		});
511 	}
512 }
513 
514 T_DECL(dyld_events, "test that dyld registering libraries emits events")
515 {
516 	dyld_kernel_image_info_t info;
517 
518 	/*
519 	 * Use pointers instead of __block variables in order to use these variables
520 	 * in the completion block below _and_ pass pointers to them to the
521 	 * expect_dyld_events function.
522 	 */
523 	uint8_t saw_events[3] = { 0 };
524 	uint8_t *saw_mapping = &(saw_events[0]);
525 	uint8_t *saw_unmapping = &(saw_events[1]);
526 	uint8_t *saw_shared_cache = &(saw_events[2]);
527 
528 	start_controlling_ktrace();
529 
530 	ktrace_session_t s = ktrace_session_create();
531 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
532 
533 	T_QUIET;
534 	T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
535 	    "filtered to current process");
536 
537 	expect_dyld_events(s, "mapping", DBG_DYLD_UUID_MAP_A, map_uuid,
538 	    MAP_LOAD_ADDR, &map_fsid, &map_fsobjid, saw_mapping);
539 	expect_dyld_events(s, "unmapping", DBG_DYLD_UUID_UNMAP_A, unmap_uuid,
540 	    UNMAP_LOAD_ADDR, &unmap_fsid, &unmap_fsobjid, saw_unmapping);
541 	expect_dyld_events(s, "shared cache", DBG_DYLD_UUID_SHARED_CACHE_A,
542 	    sc_uuid, SC_LOAD_ADDR, &sc_fsid, &sc_fsobjid, saw_shared_cache);
543 
544 	ktrace_set_completion_handler(s, ^{
545 		ktrace_session_destroy(s);
546 
547 		T_EXPECT_EQ(__builtin_popcount(*saw_mapping), DYLD_EVENTS, NULL);
548 		T_EXPECT_EQ(__builtin_popcount(*saw_unmapping), DYLD_EVENTS, NULL);
549 		T_EXPECT_EQ(__builtin_popcount(*saw_shared_cache), DYLD_EVENTS, NULL);
550 		T_END;
551 	});
552 
553 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
554 
555 	info.load_addr = MAP_LOAD_ADDR;
556 	memcpy(info.uuid, map_uuid, sizeof(info.uuid));
557 	info.fsid = map_fsid;
558 	info.fsobjid = map_fsobjid;
559 	T_EXPECT_MACH_SUCCESS(task_register_dyld_image_infos(mach_task_self(),
560 	    &info, 1), "registered dyld image info");
561 
562 	info.load_addr = UNMAP_LOAD_ADDR;
563 	memcpy(info.uuid, unmap_uuid, sizeof(info.uuid));
564 	info.fsid = unmap_fsid;
565 	info.fsobjid = unmap_fsobjid;
566 	T_EXPECT_MACH_SUCCESS(task_unregister_dyld_image_infos(mach_task_self(),
567 	    &info, 1), "unregistered dyld image info");
568 
569 	info.load_addr = SC_LOAD_ADDR;
570 	memcpy(info.uuid, sc_uuid, sizeof(info.uuid));
571 	info.fsid = sc_fsid;
572 	info.fsobjid = sc_fsobjid;
573 	T_EXPECT_MACH_SUCCESS(task_register_dyld_shared_cache_image_info(
574 		    mach_task_self(), info, FALSE, FALSE),
575 	    "registered dyld shared cache image info");
576 
577 	ktrace_end(s, 0);
578 
579 	dispatch_main();
580 }
581 
582 #pragma mark kdebug kernel macros
583 
584 #define EXP_KERNEL_EVENTS 5U
585 
586 static const uint32_t dev_evts[EXP_KERNEL_EVENTS] = {
587 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 0),
588 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 1),
589 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 2),
590 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 3),
591 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 4),
592 };
593 
594 static const uint32_t rel_evts[EXP_KERNEL_EVENTS] = {
595 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 5),
596 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 6),
597 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 7),
598 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 8),
599 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 9),
600 };
601 
602 static const uint32_t filt_evts[EXP_KERNEL_EVENTS] = {
603 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 10),
604 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 11),
605 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 12),
606 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 13),
607 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 14),
608 };
609 
610 static const uint32_t noprocfilt_evts[EXP_KERNEL_EVENTS] = {
611 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 15),
612 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 16),
613 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 17),
614 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 18),
615 	BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 19),
616 };
617 
618 static void
expect_event(struct trace_point * tp,const char * name,unsigned int * events,const uint32_t * event_ids,size_t event_ids_len)619 expect_event(struct trace_point *tp, const char *name, unsigned int *events,
620     const uint32_t *event_ids, size_t event_ids_len)
621 {
622 	unsigned int event_idx = *events;
623 	bool event_found = false;
624 	size_t i;
625 	for (i = 0; i < event_ids_len; i++) {
626 		if (event_ids[i] == (tp->debugid & KDBG_EVENTID_MASK)) {
627 			T_LOG("found %s event 0x%x", name, tp->debugid);
628 			event_found = true;
629 		}
630 	}
631 
632 	if (!event_found) {
633 		return;
634 	}
635 
636 	*events += 1;
637 	for (i = 0; i < event_idx; i++) {
638 		T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)i + 1,
639 		    NULL);
640 	}
641 	for (; i < 4; i++) {
642 		T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)0, NULL);
643 	}
644 }
645 
646 static void
expect_release_event(struct trace_point * tp,unsigned int * events)647 expect_release_event(struct trace_point *tp, unsigned int *events)
648 {
649 	expect_event(tp, "release", events, rel_evts,
650 	    sizeof(rel_evts) / sizeof(rel_evts[0]));
651 }
652 
653 static void
expect_development_event(struct trace_point * tp,unsigned int * events)654 expect_development_event(struct trace_point *tp, unsigned int *events)
655 {
656 	expect_event(tp, "dev", events, dev_evts, sizeof(dev_evts) / sizeof(dev_evts[0]));
657 }
658 
659 static void
expect_filtered_event(struct trace_point * tp,unsigned int * events)660 expect_filtered_event(struct trace_point *tp, unsigned int *events)
661 {
662 	expect_event(tp, "filtered", events, filt_evts,
663 	    sizeof(filt_evts) / sizeof(filt_evts[0]));
664 }
665 
666 static void
expect_noprocfilt_event(struct trace_point * tp,unsigned int * events)667 expect_noprocfilt_event(struct trace_point *tp, unsigned int *events)
668 {
669 	expect_event(tp, "noprocfilt", events, noprocfilt_evts,
670 	    sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]));
671 }
672 
673 static void
674 expect_kdbg_test_events(ktrace_session_t s, bool use_all_callback,
675     void (^cb)(unsigned int dev_seen, unsigned int rel_seen,
676     unsigned int filt_seen, unsigned int noprocfilt_seen))
677 {
678 	__block unsigned int dev_seen = 0;
679 	__block unsigned int rel_seen = 0;
680 	__block unsigned int filt_seen = 0;
681 	__block unsigned int noprocfilt_seen = 0;
682 
683 	void (^evtcb)(struct trace_point *tp) = ^(struct trace_point *tp) {
684 		expect_development_event(tp, &dev_seen);
685 		expect_release_event(tp, &rel_seen);
686 		expect_filtered_event(tp, &filt_seen);
687 		expect_noprocfilt_event(tp, &noprocfilt_seen);
688 	};
689 
690 	if (use_all_callback) {
691 		ktrace_events_all(s, evtcb);
692 	} else {
693 		ktrace_events_range(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0),
694 		    KDBG_EVENTID(DBG_BSD + 1, 0, 0), evtcb);
695 	}
696 
697 	ktrace_set_completion_handler(s, ^{
698 		ktrace_session_destroy(s);
699 		cb(dev_seen, rel_seen, filt_seen, noprocfilt_seen);
700 		T_END;
701 	});
702 
703 	T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
704 	assert_kdebug_test(KDBG_TEST_MACROS, "check test macros");
705 
706 	ktrace_end(s, 0);
707 }
708 
709 T_DECL(kernel_events, "ensure kernel macros work")
710 {
711 	start_controlling_ktrace();
712 
713 	ktrace_session_t s = ktrace_session_create();
714 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
715 
716 	T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
717 	    "filtered events to current process");
718 
719 	expect_kdbg_test_events(s, false,
720 	    ^(unsigned int dev_seen, unsigned int rel_seen,
721 	    unsigned int filt_seen, unsigned int noprocfilt_seen) {
722 		/*
723 		 * Development-only events are only filtered if running on an embedded
724 		 * OS.
725 		 */
726 		unsigned int dev_exp;
727 #if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR)
728 		dev_exp = is_development_kernel() ? EXP_KERNEL_EVENTS : 0U;
729 #else
730 		dev_exp = EXP_KERNEL_EVENTS;
731 #endif
732 
733 		T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS,
734 		"release and development events seen");
735 		T_EXPECT_EQ(dev_seen, dev_exp, "development-only events %sseen",
736 		dev_exp ? "" : "not ");
737 		T_EXPECT_EQ(filt_seen, dev_exp, "filter-only events seen");
738 		T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
739 		"process filter-agnostic events seen");
740 	});
741 
742 	dispatch_main();
743 }
744 
745 T_DECL(kernel_events_filtered, "ensure that the filtered kernel macros work")
746 {
747 	start_controlling_ktrace();
748 
749 	ktrace_session_t s = ktrace_session_create();
750 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
751 
752 	T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
753 	    "filtered events to current process");
754 
755 	expect_kdbg_test_events(s, true,
756 	    ^(unsigned int dev_seen, unsigned int rel_seen,
757 	    unsigned int filt_seen, unsigned int noprocfilt_seen) {
758 		T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS, NULL);
759 #if defined(__arm__) || defined(__arm64__)
760 		T_EXPECT_EQ(dev_seen, is_development_kernel() ? EXP_KERNEL_EVENTS : 0U,
761 		NULL);
762 #else
763 		T_EXPECT_EQ(dev_seen, EXP_KERNEL_EVENTS,
764 		"development-only events seen");
765 #endif /* defined(__arm__) || defined(__arm64__) */
766 		T_EXPECT_EQ(filt_seen, 0U, "no filter-only events seen");
767 		T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
768 		"process filter-agnostic events seen");
769 	});
770 
771 	dispatch_main();
772 }
773 
774 T_DECL(kernel_events_noprocfilt,
775     "ensure that the no process filter kernel macros work")
776 {
777 	start_controlling_ktrace();
778 
779 	ktrace_session_t s = ktrace_session_create();
780 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
781 
782 	/*
783 	 * Only allow launchd events through.
784 	 */
785 	T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, 1), "filtered events to launchd");
786 	for (size_t i = 0; i < sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]); i++) {
787 		T_QUIET;
788 		T_ASSERT_POSIX_ZERO(ktrace_ignore_process_filter_for_event(s,
789 		    noprocfilt_evts[i]),
790 		    "ignored process filter for noprocfilt event");
791 	}
792 
793 	expect_kdbg_test_events(s, false,
794 	    ^(unsigned int dev_seen, unsigned int rel_seen,
795 	    unsigned int filt_seen, unsigned int noprocfilt_seen) {
796 		T_EXPECT_EQ(rel_seen, 0U, "release and development events not seen");
797 		T_EXPECT_EQ(dev_seen, 0U, "development-only events not seen");
798 		T_EXPECT_EQ(filt_seen, 0U, "filter-only events not seen");
799 
800 		T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
801 		"process filter-agnostic events seen");
802 	});
803 
804 	dispatch_main();
805 }
806 
807 static volatile bool continue_abuse = true;
808 
809 #define STRESS_DEBUGID (0xfeedfac0)
810 #define ABUSE_SECS (2)
811 #define TIMER_NS (100 * NSEC_PER_USEC)
812 /*
813  * Use the quantum as the gap threshold.
814  */
815 #define GAP_THRESHOLD_NS (10 * NSEC_PER_MSEC)
816 
817 static void *
kdebug_abuser_thread(void * ctx)818 kdebug_abuser_thread(void *ctx)
819 {
820 	unsigned int id = (unsigned int)ctx;
821 	uint64_t i = 0;
822 	while (continue_abuse) {
823 		kdebug_trace(STRESS_DEBUGID, id, i, 0, 0);
824 		i++;
825 	}
826 
827 	return NULL;
828 }
829 
830 T_DECL(stress, "emit events on all but one CPU with a small buffer",
831     T_META_CHECK_LEAKS(false))
832 {
833 	start_controlling_ktrace();
834 
835 	T_SETUPBEGIN;
836 	ktrace_session_t s = ktrace_session_create();
837 	T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
838 
839 	/* Let's not waste any time with pleasantries. */
840 	ktrace_set_uuid_map_enabled(s, KTRACE_FEATURE_DISABLED);
841 
842 	/* Ouch. */
843 	ktrace_events_all(s, ^(__unused struct trace_point *tp) {});
844 	ktrace_set_vnode_paths_enabled(s, KTRACE_FEATURE_ENABLED);
845 	(void)atexit_b(^{ kperf_reset(); });
846 	(void)kperf_action_count_set(1);
847 	(void)kperf_timer_count_set(1);
848 	int kperror = kperf_timer_period_set(0, kperf_ns_to_ticks(TIMER_NS));
849 	T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_period_set %llu ns",
850 	    TIMER_NS);
851 	kperror = kperf_timer_action_set(0, 1);
852 	T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_action_set");
853 	kperror = kperf_action_samplers_set(1, KPERF_SAMPLER_TINFO |
854 	    KPERF_SAMPLER_TH_SNAPSHOT | KPERF_SAMPLER_KSTACK |
855 	    KPERF_SAMPLER_USTACK | KPERF_SAMPLER_MEMINFO |
856 	    KPERF_SAMPLER_TINFO_SCHED | KPERF_SAMPLER_TH_DISPATCH |
857 	    KPERF_SAMPLER_TK_SNAPSHOT | KPERF_SAMPLER_SYS_MEM |
858 	    KPERF_SAMPLER_TH_INSTRS_CYCLES);
859 	T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_action_samplers_set");
860 	/* You monster... */
861 
862 	/* The coup-de-grace. */
863 	ktrace_set_buffer_size(s, 10);
864 
865 	char filepath_arr[MAXPATHLEN] = "";
866 	strlcpy(filepath_arr, dt_tmpdir(), sizeof(filepath_arr));
867 	strlcat(filepath_arr, "/stress.ktrace", sizeof(filepath_arr));
868 	char *filepath = filepath_arr;
869 
870 	int ncpus = 0;
871 	size_t ncpus_size = sizeof(ncpus);
872 	int ret = sysctlbyname("hw.logicalcpu_max", &ncpus, &ncpus_size, NULL, 0);
873 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(\"hw.logicalcpu_max\"");
874 	T_QUIET; T_ASSERT_GT(ncpus, 0, "realistic number of CPUs");
875 
876 	pthread_t *threads = calloc((unsigned int)ncpus - 1, sizeof(pthread_t));
877 	T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(threads, "calloc(%d threads)",
878 	    ncpus - 1);
879 
880 	ktrace_set_completion_handler(s, ^{
881 		T_SETUPBEGIN;
882 		ktrace_session_destroy(s);
883 
884 		T_LOG("trace ended, searching for gaps");
885 
886 		ktrace_session_t sread = ktrace_session_create();
887 		T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(sread, "ktrace_session_create");
888 
889 		int error = ktrace_set_file(sread, filepath);
890 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_set_file %s", filepath);
891 
892 		ktrace_file_t f = ktrace_file_open(filepath, false);
893 		T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(f, "ktrace_file_open %s",
894 		filepath);
895 		uint64_t first_timestamp = 0;
896 		error = ktrace_file_earliest_timestamp(f, &first_timestamp);
897 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_file_earliest_timestamp");
898 
899 		uint64_t last_timestamp = 0;
900 		(void)ktrace_file_latest_timestamp(f, &last_timestamp);
901 
902 		__block uint64_t prev_timestamp = 0;
903 		__block uint64_t nevents = 0;
904 		ktrace_events_all(sread, ^(struct trace_point *tp) {
905 			nevents++;
906 			uint64_t delta_ns = 0;
907 			T_QUIET; T_EXPECT_GE(tp->timestamp, prev_timestamp,
908 			"timestamps are monotonically increasing");
909 			int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
910 			tp->timestamp - prev_timestamp, &delta_ns);
911 			T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
912 			if (prev_timestamp && delta_ns > GAP_THRESHOLD_NS) {
913 			        if (tp->debugname) {
914 			                T_LOG("gap: %gs at %llu - %llu on %d: %s (%#08x)",
915 			                (double)delta_ns / 1e9, prev_timestamp,
916 			                tp->timestamp, tp->cpuid, tp->debugname, tp->debugid);
917 				} else {
918 			                T_LOG("gap: %gs at %llu - %llu on %d: %#x",
919 			                (double)delta_ns / 1e9, prev_timestamp,
920 			                tp->timestamp, tp->cpuid, tp->debugid);
921 				}
922 
923 			        /*
924 			         * These gaps are ok -- they appear after CPUs are brought back
925 			         * up.
926 			         */
927 #define INTERRUPT (0x1050000)
928 #define PERF_CPU_IDLE (0x27001000)
929 #define INTC_HANDLER (0x5000004)
930 #define DECR_TRAP (0x1090000)
931 			        uint32_t eventid = tp->debugid & KDBG_EVENTID_MASK;
932 			        if (eventid != INTERRUPT && eventid != PERF_CPU_IDLE &&
933 			        eventid != INTC_HANDLER && eventid != DECR_TRAP) {
934 			                unsigned int lost_events = TRACE_LOST_EVENTS;
935 			                T_QUIET; T_EXPECT_EQ(tp->debugid, lost_events,
936 			                "gaps should end with lost events");
937 				}
938 			}
939 
940 			prev_timestamp = tp->timestamp;
941 		});
942 		ktrace_events_single(sread, TRACE_LOST_EVENTS, ^(struct trace_point *tp){
943 			T_LOG("lost: %llu on %d (%llu)", tp->timestamp, tp->cpuid, tp->arg1);
944 		});
945 
946 		__block uint64_t last_write = 0;
947 		ktrace_events_single_paired(sread, TRACE_WRITING_EVENTS,
948 		^(struct trace_point *start, struct trace_point *end) {
949 			uint64_t delta_ns;
950 			int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
951 			start->timestamp - last_write, &delta_ns);
952 			T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
953 
954 			uint64_t dur_ns;
955 			converror = ktrace_convert_timestamp_to_nanoseconds(sread,
956 			end->timestamp - start->timestamp, &dur_ns);
957 			T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
958 
959 			T_LOG("write: %llu (+%gs): %gus on %d: %llu events", start->timestamp,
960 			(double)delta_ns / 1e9, (double)dur_ns / 1e3, end->cpuid, end->arg1);
961 			last_write = end->timestamp;
962 		});
963 		ktrace_set_completion_handler(sread, ^{
964 			uint64_t duration_ns = 0;
965 			if (last_timestamp) {
966 			        int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
967 			        last_timestamp - first_timestamp, &duration_ns);
968 			        T_QUIET; T_ASSERT_POSIX_ZERO(converror,
969 			        "convert timestamp to ns");
970 			        T_LOG("file was %gs long, %llu events: %g events/msec/cpu",
971 			        (double)duration_ns / 1e9, nevents,
972 			        (double)nevents / ((double)duration_ns / 1e6) / ncpus);
973 			}
974 			(void)unlink(filepath);
975 			ktrace_session_destroy(sread);
976 			T_END;
977 		});
978 
979 		int starterror = ktrace_start(sread, dispatch_get_main_queue());
980 		T_QUIET; T_ASSERT_POSIX_ZERO(starterror, "ktrace_start read session");
981 
982 		T_SETUPEND;
983 	});
984 
985 /* Just kidding... for now. */
986 #if 0
987 	kperror = kperf_sample_set(1);
988 	T_ASSERT_POSIX_SUCCESS(kperror,
989 	    "started kperf timer sampling every %llu ns", TIMER_NS);
990 #endif
991 
992 	for (int i = 0; i < (ncpus - 1); i++) {
993 		int error = pthread_create(&threads[i], NULL, kdebug_abuser_thread,
994 		    (void *)(uintptr_t)i);
995 		T_QUIET; T_ASSERT_POSIX_ZERO(error,
996 		    "pthread_create abuser thread %d", i);
997 	}
998 
999 	int error = ktrace_start_writing_file(s, filepath,
1000 	    ktrace_compression_none, NULL, NULL);
1001 	T_ASSERT_POSIX_ZERO(error, "started writing ktrace to %s", filepath);
1002 
1003 	T_SETUPEND;
1004 
1005 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ABUSE_SECS * NSEC_PER_SEC),
1006 	    dispatch_get_main_queue(), ^{
1007 		T_LOG("ending trace");
1008 		ktrace_end(s, 1);
1009 
1010 		continue_abuse = false;
1011 		for (int i = 0; i < (ncpus - 1); i++) {
1012 		        int joinerror = pthread_join(threads[i], NULL);
1013 		        T_QUIET; T_EXPECT_POSIX_ZERO(joinerror, "pthread_join thread %d",
1014 		        i);
1015 		}
1016 	});
1017 
1018 	dispatch_main();
1019 }
1020 
1021 #define ROUND_TRIP_PERIOD UINT64_C(10 * 1000)
1022 #define ROUND_TRIPS_THRESHOLD UINT64_C(25)
1023 #define ROUND_TRIPS_TIMEOUT_SECS (2 * 60)
1024 #define COLLECTION_INTERVAL_MS 100
1025 
1026 /*
1027  * Test a sustained tracing session, involving multiple round-trips to the
1028  * kernel.
1029  *
1030  * Trace all events, and every `ROUND_TRIP_PERIOD` events, emit an event that's
1031  * unlikely to be emitted elsewhere.  Look for this event, too, and make sure we
1032  * see as many of them as we emitted.
1033  *
1034  * After seeing `ROUND_TRIPS_THRESHOLD` of the unlikely events, end tracing.
1035  * In the failure mode, we won't see any of these, so set a timeout of
1036  * `ROUND_TRIPS_TIMEOUT_SECS` to prevent hanging, waiting for events that we'll
1037  * never see.
1038  */
1039 T_DECL(round_trips,
1040     "test sustained tracing with multiple round-trips through the kernel")
1041 {
1042 	start_controlling_ktrace();
1043 
1044 	ktrace_session_t s = ktrace_session_create();
1045 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1046 
1047 	/*
1048 	 * Set a small buffer and collection interval to increase the number of
1049 	 * round-trips.
1050 	 */
1051 	ktrace_set_buffer_size(s, 50);
1052 	ktrace_set_collection_interval(s, COLLECTION_INTERVAL_MS);
1053 
1054 	__block uint64_t events = 0;
1055 	__block uint64_t emitted = 0;
1056 	__block uint64_t seen = 0;
1057 	ktrace_events_all(s, ^(__unused struct trace_point *tp) {
1058 		events++;
1059 		if (events % ROUND_TRIP_PERIOD == 0) {
1060 		        T_LOG("emitting round-trip event %" PRIu64, emitted);
1061 		        kdebug_trace(TRACE_DEBUGID, events, 0, 0, 0);
1062 		        emitted++;
1063 		}
1064 	});
1065 
1066 	ktrace_events_single(s, TRACE_DEBUGID, ^(__unused struct trace_point *tp) {
1067 		T_LOG("saw round-trip event after %" PRIu64 " events", events);
1068 		seen++;
1069 		if (seen >= ROUND_TRIPS_THRESHOLD) {
1070 		        T_LOG("ending trace after seeing %" PRIu64 " events, "
1071 		        "emitting %" PRIu64, seen, emitted);
1072 		        ktrace_end(s, 1);
1073 		}
1074 	});
1075 
1076 	ktrace_set_completion_handler(s, ^{
1077 		T_EXPECT_GE(emitted, ROUND_TRIPS_THRESHOLD,
1078 		"emitted %" PRIu64 " round-trip events", emitted);
1079 		T_EXPECT_GE(seen, ROUND_TRIPS_THRESHOLD,
1080 		"saw %" PRIu64 " round-trip events", seen);
1081 		ktrace_session_destroy(s);
1082 		T_END;
1083 	});
1084 
1085 	int error = ktrace_start(s, dispatch_get_main_queue());
1086 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1087 
1088 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW,
1089 	    ROUND_TRIPS_TIMEOUT_SECS * NSEC_PER_SEC), dispatch_get_main_queue(),
1090 	    ^{
1091 		T_LOG("ending trace after %d seconds", ROUND_TRIPS_TIMEOUT_SECS);
1092 		ktrace_end(s, 0);
1093 	});
1094 
1095 	dispatch_main();
1096 }
1097 
1098 #define HEARTBEAT_INTERVAL_SECS 1
1099 #define HEARTBEAT_COUNT 10
1100 
1101 /*
1102  * Ensure we see events periodically, checking for recent events on a
1103  * heart-beat.
1104  */
1105 T_DECL(event_coverage, "ensure events appear up to the end of tracing")
1106 {
1107 	start_controlling_ktrace();
1108 
1109 	ktrace_session_t s = ktrace_session_create();
1110 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1111 
1112 	__block uint64_t current_timestamp = 0;
1113 	__block uint64_t events = 0;
1114 	ktrace_events_all(s, ^(struct trace_point *tp) {
1115 		current_timestamp = tp->timestamp;
1116 		events++;
1117 	});
1118 
1119 	ktrace_set_buffer_size(s, 20);
1120 	ktrace_set_collection_interval(s, COLLECTION_INTERVAL_MS);
1121 
1122 	__block uint64_t last_timestamp = 0;
1123 	__block uint64_t last_events = 0;
1124 	__block unsigned int heartbeats = 0;
1125 
1126 	ktrace_set_completion_handler(s, ^{
1127 		ktrace_session_destroy(s);
1128 		T_QUIET; T_EXPECT_GT(events, 0ULL, "should have seen some events");
1129 		T_END;
1130 	});
1131 
1132 	dispatch_source_t timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER,
1133 	    0, 0, dispatch_get_main_queue());
1134 	dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW,
1135 	    HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC),
1136 	    HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC, 0);
1137 	dispatch_source_set_cancel_handler(timer, ^{
1138 		dispatch_release(timer);
1139 	});
1140 
1141 	dispatch_source_set_event_handler(timer, ^{
1142 		heartbeats++;
1143 
1144 		T_LOG("heartbeat %u at time %lld, seen %" PRIu64 " events, "
1145 		"current event time %lld", heartbeats, mach_absolute_time(),
1146 		events, current_timestamp);
1147 
1148 		if (current_timestamp > 0) {
1149 		        T_EXPECT_GT(current_timestamp, last_timestamp,
1150 		        "event timestamps should be increasing");
1151 		        T_QUIET; T_EXPECT_GT(events, last_events,
1152 		        "number of events should be increasing");
1153 		}
1154 
1155 		last_timestamp = current_timestamp;
1156 		last_events = events;
1157 
1158 		kdebug_trace(TRACE_DEBUGID, 0, 0, 0, 0);
1159 
1160 		if (heartbeats >= HEARTBEAT_COUNT) {
1161 		        T_LOG("ending trace after %u heartbeats", HEARTBEAT_COUNT);
1162 		        ktrace_end(s, 0);
1163 		}
1164 	});
1165 
1166 	int error = ktrace_start(s, dispatch_get_main_queue());
1167 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1168 
1169 	dispatch_activate(timer);
1170 
1171 	dispatch_main();
1172 }
1173 
1174 static unsigned int
set_nevents(unsigned int nevents)1175 set_nevents(unsigned int nevents)
1176 {
1177 	T_QUIET;
1178 	T_ASSERT_POSIX_SUCCESS(sysctl(
1179 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSETBUF, (int)nevents }, 4,
1180 		    NULL, 0, NULL, 0), "set kdebug buffer size");
1181 
1182 	T_QUIET;
1183 	T_ASSERT_POSIX_SUCCESS(sysctl(
1184 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSETUP, (int)nevents }, 4,
1185 		    NULL, 0, NULL, 0), "setup kdebug buffers");
1186 
1187 	kbufinfo_t bufinfo = { 0 };
1188 	T_QUIET;
1189 	T_ASSERT_POSIX_SUCCESS(sysctl(
1190 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDGETBUF }, 3,
1191 		    &bufinfo, &(size_t){ sizeof(bufinfo) }, NULL, 0),
1192 	    "get kdebug buffer size");
1193 
1194 	T_QUIET;
1195 	T_ASSERT_POSIX_SUCCESS(sysctl(
1196 		    (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDREMOVE }, 3,
1197 		    NULL, 0, NULL, 0),
1198 	    "remove kdebug buffers");
1199 
1200 	return (unsigned int)bufinfo.nkdbufs;
1201 }
1202 
1203 T_DECL(set_buffer_size, "ensure large buffer sizes can be set")
1204 {
1205 	T_SETUPBEGIN;
1206 	uint64_t memsize = 0;
1207 	T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.memsize", &memsize,
1208 	    &(size_t){ sizeof(memsize) }, NULL, 0), "sysctl hw.memsize");
1209 	T_SETUPEND;
1210 
1211 #if TARGET_OS_IPHONE
1212 	if (memsize >= (8ULL << 30)) {
1213 		T_SKIP("skipping on iOS device with memory >= 8GB, rdar://79403304");
1214 	}
1215 #endif // TARGET_OS_IPHONE
1216 
1217 	start_controlling_ktrace();
1218 
1219 	/*
1220 	 * Try to allocate up to one-eighth of available memory towards
1221 	 * tracing.
1222 	 */
1223 	uint64_t maxevents_u64 = memsize / 8 / sizeof(kd_buf);
1224 	if (maxevents_u64 > UINT32_MAX) {
1225 		maxevents_u64 = UINT32_MAX;
1226 	}
1227 	unsigned int maxevents = (unsigned int)maxevents_u64;
1228 
1229 	unsigned int minevents = set_nevents(0);
1230 	T_ASSERT_GT(minevents, 0, "saw non-zero minimum event count of %u",
1231 	    minevents);
1232 
1233 	unsigned int step = ((maxevents - minevents - 1) / 4);
1234 	T_ASSERT_GT(step, 0, "stepping by %u events", step);
1235 
1236 	for (unsigned int i = minevents + step; i < maxevents; i += step) {
1237 		unsigned int actualevents = set_nevents(i);
1238 		T_ASSERT_GE(actualevents, i - minevents,
1239 		    "%u events in kernel when %u requested", actualevents, i);
1240 	}
1241 }
1242 
1243 static void *
donothing(__unused void * arg)1244 donothing(__unused void *arg)
1245 {
1246 	return NULL;
1247 }
1248 
1249 T_DECL(long_names, "ensure long command names are reported")
1250 {
1251 	start_controlling_ktrace();
1252 
1253 	char longname[] = "thisisaverylongprocessname!";
1254 	char *longname_ptr = longname;
1255 	static_assert(sizeof(longname) > 16,
1256 	    "the name should be longer than MAXCOMLEN");
1257 
1258 	int ret = sysctlbyname("kern.procname", NULL, NULL, longname,
1259 	    sizeof(longname));
1260 	T_ASSERT_POSIX_SUCCESS(ret,
1261 	    "use sysctl kern.procname to lengthen the name");
1262 
1263 	ktrace_session_t ktsess = ktrace_session_create();
1264 
1265 	/*
1266 	 * 32-bit kernels can only trace 16 bytes of the string in their event
1267 	 * arguments.
1268 	 */
1269 	if (!ktrace_is_kernel_64_bit(ktsess)) {
1270 		longname[16] = '\0';
1271 	}
1272 
1273 	ktrace_filter_pid(ktsess, getpid());
1274 
1275 	__block bool saw_newthread = false;
1276 	ktrace_events_single(ktsess, TRACE_STRING_NEWTHREAD,
1277 	    ^(struct trace_point *tp) {
1278 		if (ktrace_get_pid_for_thread(ktsess, tp->threadid) ==
1279 		    getpid()) {
1280 			saw_newthread = true;
1281 
1282 			char argname[32] = {};
1283 			strncat(argname, (char *)&tp->arg1, sizeof(tp->arg1));
1284 			strncat(argname, (char *)&tp->arg2, sizeof(tp->arg2));
1285 			strncat(argname, (char *)&tp->arg3, sizeof(tp->arg3));
1286 			strncat(argname, (char *)&tp->arg4, sizeof(tp->arg4));
1287 
1288 			T_EXPECT_EQ_STR((char *)argname, longname_ptr,
1289 			    "process name of new thread should be long");
1290 
1291 			ktrace_end(ktsess, 1);
1292 		}
1293 	});
1294 
1295 	ktrace_set_completion_handler(ktsess, ^{
1296 		ktrace_session_destroy(ktsess);
1297 		T_EXPECT_TRUE(saw_newthread,
1298 		    "should have seen the new thread");
1299 		T_END;
1300 	});
1301 
1302 	int error = ktrace_start(ktsess, dispatch_get_main_queue());
1303 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1304 
1305 	pthread_t thread = NULL;
1306 	error = pthread_create(&thread, NULL, donothing, NULL);
1307 	T_ASSERT_POSIX_ZERO(error, "create new thread");
1308 
1309 	dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC),
1310 	    dispatch_get_main_queue(), ^{
1311 		ktrace_end(ktsess, 0);
1312 	});
1313 
1314 	error = pthread_join(thread, NULL);
1315 	T_ASSERT_POSIX_ZERO(error, "join to thread");
1316 
1317 	dispatch_main();
1318 }
1319 
1320 T_DECL(continuous_time, "make sure continuous time status can be queried",
1321 	T_META_RUN_CONCURRENTLY(true))
1322 {
1323 	bool cont_time = kdebug_using_continuous_time();
1324 	T_ASSERT_FALSE(cont_time, "should not be using continuous time yet");
1325 }
1326 
1327 T_DECL(lookup_long_paths, "lookup long path names")
1328 {
1329 	start_controlling_ktrace();
1330 
1331 	int ret = chdir("/tmp");
1332 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "chdir to /tmp");
1333 	const char *dir = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/";
1334 	int i = 0;
1335 	do {
1336 		ret = mkdir(dir, S_IRUSR | S_IWUSR | S_IXUSR);
1337 		if (ret >= 0 || errno != EEXIST) {
1338 			T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mkdir of %d nested directory",
1339 			    i);
1340 		}
1341 		ret = chdir(dir);
1342 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "chdir to %d nested directory", i);
1343 	} while (i++ < 40);
1344 
1345 	ktrace_session_t s = ktrace_session_create();
1346 	ktrace_set_collection_interval(s, 250);
1347 	ktrace_filter_pid(s, getpid());
1348 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1349 	ktrace_events_single(s, VFS_LOOKUP, ^(struct trace_point *tp __unused){});
1350 	ktrace_set_vnode_paths_enabled(s, KTRACE_FEATURE_ENABLED);
1351 
1352 	dispatch_queue_t q = dispatch_queue_create("com.apple.kdebug-test", 0);
1353 
1354 	ktrace_set_completion_handler(s, ^{
1355 		dispatch_release(q);
1356 		T_END;
1357 	});
1358 
1359 	int error = ktrace_start(s, q);
1360 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1361 
1362 	int fd = open("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", O_RDWR | O_CREAT);
1363 	T_ASSERT_POSIX_SUCCESS(fd, "opened file at %d directories deep", i);
1364 
1365 	sleep(5);
1366 
1367 	T_LOG("ending tracing");
1368 	ktrace_end(s, 0);
1369 	dispatch_main();
1370 }
1371 
1372 #pragma mark - boot tracing
1373 
1374 static const char *expected_subsystems[] = {
1375 	"tunables", "locks_early", "kprintf", "pmap_steal", "vm_kernel",
1376 	"kmem", "kmem_alloc", "zalloc",
1377 	/* "percpu", only has a startup phase on Intel */
1378 	"locks", "codesigning", "oslog", "early_boot",
1379 };
1380 #define EXPECTED_SUBSYSTEMS_LEN \
1381 		(sizeof(expected_subsystems) / sizeof(expected_subsystems[0]))
1382 
1383 T_DECL(early_boot_tracing, "ensure early boot strings are present",
1384 	T_META_BOOTARGS_SET("trace=100000"))
1385 {
1386 	T_ATEND(reset_ktrace);
1387 
1388 	T_SETUPBEGIN;
1389 	ktrace_session_t s = ktrace_session_create();
1390 	ktrace_set_collection_interval(s, 250);
1391 	int error = ktrace_set_use_existing(s);
1392 	T_ASSERT_POSIX_ZERO(error, "use existing trace buffer");
1393 
1394 #if defined(__x86_64__)
1395 #define FIRST_EVENT_STRING "i386_init"
1396 #else /* defined(__x86_64__) */
1397 #define FIRST_EVENT_STRING "kernel_startup_bootstrap"
1398 #endif /* !defined(__x86_64__) */
1399 
1400 	__block bool seen_event = false;
1401 	__block unsigned int cur_subsystem = 0;
1402 	ktrace_events_single(s, TRACE_INFO_STRING, ^(struct trace_point *tp) {
1403 		char early_str[33] = "";
1404 		size_t argsize = ktrace_is_kernel_64_bit(s) ? 8 : 4;
1405 		memcpy(early_str, &tp->arg1, argsize);
1406 		memcpy(early_str + argsize, &tp->arg2, argsize);
1407 		memcpy(early_str + argsize * 2, &tp->arg3, argsize);
1408 		memcpy(early_str + argsize * 3, &tp->arg4, argsize);
1409 
1410 		if (!seen_event) {
1411 			T_LOG("found first string event with args: "
1412 			    "0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64,
1413 			    tp->arg1, tp->arg2, tp->arg3, tp->arg4);
1414 			char expect_str[33] = FIRST_EVENT_STRING;
1415 			if (!ktrace_is_kernel_64_bit(s)) {
1416 				// Only the first 16 bytes of the string will be traced.
1417 				expect_str[16] = '\0';
1418 			}
1419 
1420 			T_EXPECT_EQ_STR(early_str, expect_str,
1421 			    "first event in boot trace should be the bootstrap message");
1422 		}
1423 		seen_event = true;
1424 
1425 		if (strcmp(early_str, expected_subsystems[cur_subsystem]) == 0) {
1426 			T_LOG("found log for subsystem %s",
1427 					expected_subsystems[cur_subsystem]);
1428 			cur_subsystem++;
1429 		}
1430 
1431 		if (cur_subsystem == EXPECTED_SUBSYSTEMS_LEN) {
1432 			T_LOG("ending after seeing all expected logs");
1433 			ktrace_end(s, 1);
1434 		}
1435 	});
1436 
1437 	ktrace_set_completion_handler(s, ^{
1438 		T_EXPECT_TRUE(seen_event, "should see an early boot string event");
1439 		T_EXPECT_TRUE(cur_subsystem == EXPECTED_SUBSYSTEMS_LEN,
1440 				"should see logs from all subsystems");
1441 		if (cur_subsystem != EXPECTED_SUBSYSTEMS_LEN) {
1442 			T_LOG("missing log for %s", expected_subsystems[cur_subsystem]);
1443 		}
1444 		T_END;
1445 	});
1446 
1447 	error = ktrace_start(s, dispatch_get_main_queue());
1448 	T_ASSERT_POSIX_ZERO(error, "started tracing");
1449 
1450 	T_SETUPEND;
1451 
1452 	dispatch_main();
1453 }
1454 
1455 T_DECL(typefilter_boot_arg, "ensure typefilter is set up correctly at boot",
1456 	T_META_BOOTARGS_SET("trace=100000 trace_typefilter=S0x0c00,C0xfe"))
1457 {
1458 	T_ATEND(reset_ktrace);
1459 
1460 	T_SETUPBEGIN;
1461 	ktrace_config_t config = ktrace_config_create_current();
1462 	T_QUIET; T_WITH_ERRNO;
1463 	T_ASSERT_NOTNULL(config, "create config from current system");
1464 	T_SETUPEND;
1465 
1466 	T_LOG("ktrace configuration:");
1467 	ktrace_config_print_description(config, stdout);
1468 
1469 	uint8_t *typefilt = ktrace_config_kdebug_get_typefilter(config);
1470 	T_ASSERT_NOTNULL(typefilt, "typefilter is active");
1471 	T_EXPECT_TRUE(typefilt[0x0c00 / 8],
1472 			"specified subclass is set in typefilter");
1473 	T_MAYFAIL; // rdar://63625062 (UTD converts commas in boot-args to spaces)
1474 	T_EXPECT_TRUE(typefilt[0xfeed / 8],
1475 			"specified class is set in typefilter");
1476 
1477 	ktrace_config_destroy(config);
1478 }
1479 
1480 #pragma mark - events present
1481 
1482 static int recvd_sigchild = 0;
1483 static void
sighandler(int sig)1484 sighandler(int sig)
1485 {
1486 	if (sig != SIGCHLD) {
1487 		T_ASSERT_FAIL("unexpected signal: %d", sig);
1488 	}
1489 	recvd_sigchild = 1;
1490 }
1491 
1492 T_DECL(instrs_and_cycles_on_proc_exit,
1493 		"instructions and cycles should be traced on thread exit",
1494 		T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1))
1495 {
1496 	T_SETUPBEGIN;
1497 	start_controlling_ktrace();
1498 	int error;
1499 	struct rusage_info_v4 *rusage = calloc(1, sizeof(*rusage));
1500 	char *args[] = { "ls", "-l", NULL, };
1501 	int status;
1502 	dispatch_queue_t q = dispatch_queue_create("com.apple.kdebug-test",
1503 			DISPATCH_QUEUE_SERIAL);
1504 	T_QUIET; T_ASSERT_POSIX_SUCCESS(signal(SIGCHLD, sighandler),
1505 			"register signal handler");
1506 
1507 	ktrace_session_t s = ktrace_session_create();
1508 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1509 
1510 	__block pid_t pid;
1511 	__block bool seen_event = false;
1512 	__block uint64_t proc_instrs = 0;
1513 	__block uint64_t proc_cycles = 0;
1514 	__block uint64_t proc_sys_time = 0;
1515 	__block uint64_t proc_usr_time = 0;
1516 	error = ktrace_events_single(s, DBG_MT_INSTRS_CYCLES_PROC_EXIT,
1517 			^(ktrace_event_t tp){
1518 		if (tp->pid == pid) {
1519 			seen_event = true;
1520 			proc_instrs = tp->arg1;
1521 			proc_cycles = tp->arg2;
1522 			proc_sys_time = tp->arg3;
1523 			proc_usr_time = tp->arg4;
1524 			ktrace_end(s, 1);
1525 		}
1526 	});
1527 	T_QUIET; T_WITH_ERRNO; T_ASSERT_POSIX_ZERO(error, "trace single event");
1528 	ktrace_set_completion_handler(s, ^{
1529 		// TODO Check for equality once rdar://61948669 is fixed.
1530 		T_ASSERT_GE(proc_instrs, rusage->ri_instructions,
1531 				"trace event instrs are >= to rusage instrs");
1532 		T_ASSERT_GE(proc_cycles, rusage->ri_cycles,
1533 				"trace event cycles are >= to rusage cycles");
1534 		T_ASSERT_GE(proc_sys_time, rusage->ri_system_time,
1535 				"trace event sys time is >= rusage sys time");
1536 		T_ASSERT_GE(proc_usr_time, rusage->ri_user_time,
1537 				"trace event usr time >= rusage usr time");
1538 		T_EXPECT_TRUE(seen_event, "should see the proc exit trace event");
1539 
1540 		free(rusage);
1541 		ktrace_session_destroy(s);
1542 		dispatch_release(q);
1543 		T_END;
1544 	});
1545 	error = ktrace_start(s, q);
1546 	T_ASSERT_POSIX_ZERO(error, "start tracing");
1547 	T_SETUPEND;
1548 
1549 	extern char **environ;
1550 	status = posix_spawnp(&pid, args[0], NULL, NULL, args, environ);
1551 	T_QUIET; T_ASSERT_POSIX_SUCCESS(status, "spawn process");
1552 	if (status == 0) {
1553 		while (!recvd_sigchild) {
1554 			pause();
1555 		}
1556 		error = proc_pid_rusage(pid, RUSAGE_INFO_V4, (rusage_info_t)rusage);
1557 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "rusage");
1558 		error = waitpid(pid, &status, 0);
1559 		T_QUIET; T_ASSERT_POSIX_SUCCESS(error, "waitpid");
1560 	}
1561 	dispatch_main();
1562 }
1563 
1564 #define NO_OF_THREADS 2
1565 
1566 struct thread_counters_info {
1567 	uint64_t counts[2]; //cycles and/or instrs
1568 	uint64_t cpu_time;
1569 	uint64_t thread_id;
1570 };
1571 typedef struct thread_counters_info *tc_info_t;
1572 
1573 static void*
get_thread_counters(void * ptr)1574 get_thread_counters(void* ptr)
1575 {
1576 	extern int thread_selfcounts(int type, void *buf, size_t nbytes);
1577 	extern uint64_t __thread_selfusage(void);
1578 	extern uint64_t __thread_selfid(void);
1579 	tc_info_t tc_info = (tc_info_t) ptr;
1580 	tc_info->thread_id = __thread_selfid();
1581 	// Just to increase the instr, cycle count
1582 	T_LOG("printing %llu\n", tc_info->thread_id);
1583 	tc_info->cpu_time = __thread_selfusage();
1584 	thread_selfcounts(1, tc_info->counts, sizeof(tc_info->counts));
1585 	return NULL;
1586 }
1587 
1588 T_DECL(instrs_and_cycles_on_thread_exit,
1589 		"instructions and cycles should be traced on thread exit",
1590 		T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1))
1591 {
1592 	T_SETUPBEGIN;
1593 	start_controlling_ktrace();
1594 
1595 	int error;
1596 	pthread_t *threads = calloc((unsigned int)(NO_OF_THREADS),
1597 			sizeof(pthread_t));
1598 	 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(threads, "calloc(%d threads)",
1599 	    NO_OF_THREADS);
1600 	tc_info_t tc_infos = calloc((unsigned int) (NO_OF_THREADS),
1601 			sizeof(struct thread_counters_info));
1602 	T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(tc_infos,
1603 			"calloc(%d thread counters)", NO_OF_THREADS);
1604 
1605 	ktrace_session_t s = ktrace_session_create();
1606 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1607 	ktrace_filter_pid(s, getpid());
1608 
1609 	__block int nevents = 0;
1610 	error = ktrace_events_single(s, DBG_MT_INSTRS_CYCLES_THR_EXIT,
1611 			^(ktrace_event_t tp) {
1612 		for (int i = 0; i < NO_OF_THREADS; i++) {
1613 			if (tp->threadid == tc_infos[i].thread_id) {
1614 				nevents++;
1615 				uint64_t cpu_time = tp->arg3 + tp->arg4;
1616 				/*
1617 				 * as we are getting counts before thread exit,
1618 				 * the counts at thread exit should be greater than
1619 				 * thread_selfcounts
1620 				 */
1621 				T_ASSERT_GE(tp->arg1, tc_infos[i].counts[0],
1622 					"trace event instrs are >= to thread's instrs");
1623 				T_ASSERT_GE(tp->arg2, tc_infos[i].counts[1],
1624 					"trace event cycles are >= to thread's cycles");
1625 				T_ASSERT_GE(cpu_time, tc_infos[i].cpu_time,
1626 					"trace event cpu time is >= thread's cpu time");
1627 			}
1628 			if (nevents == NO_OF_THREADS) {
1629 				ktrace_end(s, 1);
1630 			}
1631 		}
1632 	});
1633 	T_QUIET; T_ASSERT_POSIX_ZERO(error, "trace single event");
1634 	ktrace_set_completion_handler(s, ^{
1635 		T_EXPECT_EQ(NO_OF_THREADS, nevents, "seen %d thread exit trace events",
1636 				NO_OF_THREADS);
1637 		free(tc_infos);
1638 		ktrace_session_destroy(s);
1639 		T_END;
1640 	});
1641 	error = ktrace_start(s, dispatch_get_main_queue());
1642 	T_ASSERT_POSIX_ZERO(error, "start tracing");
1643 
1644 	for (int i = 0; i < NO_OF_THREADS; i++) {
1645 		error = pthread_create(&threads[i], NULL, get_thread_counters,
1646 				(void *)&tc_infos[i]);
1647 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_create thread %d", i);
1648 	}
1649 	T_SETUPEND;
1650 	for (int i = 0; i < NO_OF_THREADS; i++) {
1651 		error = pthread_join(threads[i], NULL);
1652 		T_QUIET; T_EXPECT_POSIX_ZERO(error, "pthread_join thread %d", i);
1653 	}
1654 
1655 	dispatch_main();
1656 }
1657