xref: /xnu-8796.101.5/tests/stackshot_tests.m (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1#include <darwintest.h>
2#include <darwintest_utils.h>
3#include <darwintest_multiprocess.h>
4#include <kern/debug.h>
5#include <kern/kern_cdata.h>
6#include <kern/block_hint.h>
7#include <kdd.h>
8#include <libproc.h>
9#include <mach-o/dyld.h>
10#include <mach-o/dyld_images.h>
11#include <mach-o/dyld_priv.h>
12#include <sys/syscall.h>
13#include <sys/stackshot.h>
14#include <uuid/uuid.h>
15#include <servers/bootstrap.h>
16#include <pthread/workqueue_private.h>
17#include <dispatch/private.h>
18#include <stdalign.h>
19#import <zlib.h>
20#import <IOKit/IOKitLib.h>
21#import <IOKit/IOKitLibPrivate.h>
22#import <IOKit/IOKitKeysPrivate.h>
23
24T_GLOBAL_META(
25		T_META_NAMESPACE("xnu.stackshot"),
26		T_META_RADAR_COMPONENT_NAME("xnu"),
27		T_META_RADAR_COMPONENT_VERSION("stackshot"),
28		T_META_OWNER("jonathan_w_adams"),
29		T_META_CHECK_LEAKS(false),
30		T_META_ASROOT(true)
31		);
32
33static const char *current_process_name(void);
34static void verify_stackshot_sharedcache_layout(struct dyld_uuid_info_64 *uuids, uint32_t uuid_count);
35static void parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, NSDictionary *extra);
36static void parse_thread_group_stackshot(void **sbuf, size_t sslen);
37static uint64_t stackshot_timestamp(void *ssbuf, size_t sslen);
38static void initialize_thread(void);
39
40static uint64_t global_flags = 0;
41
42#define DEFAULT_STACKSHOT_BUFFER_SIZE (1024 * 1024)
43#define MAX_STACKSHOT_BUFFER_SIZE     (6 * 1024 * 1024)
44
45#define SRP_SERVICE_NAME "com.apple.xnu.test.stackshot.special_reply_port"
46
47/* bit flags for parse_stackshot */
48#define PARSE_STACKSHOT_DELTA                0x01
49#define PARSE_STACKSHOT_ZOMBIE               0x02
50#define PARSE_STACKSHOT_SHAREDCACHE_LAYOUT   0x04
51#define PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL 0x08
52#define PARSE_STACKSHOT_TURNSTILEINFO        0x10
53#define PARSE_STACKSHOT_POSTEXEC             0x20
54#define PARSE_STACKSHOT_WAITINFO_CSEG        0x40
55#define PARSE_STACKSHOT_WAITINFO_SRP         0x80
56#define PARSE_STACKSHOT_TRANSLATED           0x100
57#define PARSE_STACKSHOT_SHAREDCACHE_FLAGS    0x200
58#define PARSE_STACKSHOT_EXEC_INPROGRESS      0x400
59#define PARSE_STACKSHOT_TRANSITIONING        0x800
60#define PARSE_STACKSHOT_ASYNCSTACK           0x1000
61#define PARSE_STACKSHOT_COMPACTINFO          0x2000 /* TODO: rdar://88789261 */
62#define PARSE_STACKSHOT_DRIVERKIT            0x4000
63
64/* keys for 'extra' dictionary for parse_stackshot */
65static const NSString* zombie_child_pid_key = @"zombie_child_pid"; // -> @(pid), required for PARSE_STACKSHOT_ZOMBIE
66static const NSString* postexec_child_unique_pid_key = @"postexec_child_unique_pid";  // -> @(unique_pid), required for PARSE_STACKSHOT_POSTEXEC
67static const NSString* cseg_expected_threadid_key = @"cseg_expected_threadid"; // -> @(tid), required for PARSE_STACKSHOT_WAITINFO_CSEG
68static const NSString* srp_expected_threadid_key = @"srp_expected_threadid"; // -> @(tid), this or ..._pid required for PARSE_STACKSHOT_WAITINFO_SRP
69static const NSString* srp_expected_pid_key = @"srp_expected_pid"; // -> @(pid), this or ..._threadid required for PARSE_STACKSHOT_WAITINFO_SRP
70static const NSString* translated_child_pid_key = @"translated_child_pid"; // -> @(pid), required for PARSE_STACKSHOT_TRANSLATED
71static const NSString* sharedcache_child_pid_key = @"sharedcache_child_pid"; // @(pid), required for PARSE_STACKSHOT_SHAREDCACHE_FLAGS
72static const NSString* sharedcache_child_sameaddr_key = @"sharedcache_child_sameaddr"; // @(0 or 1), required for PARSE_STACKSHOT_SHAREDCACHE_FLAGS
73static const NSString* exec_inprogress_pid_key = @"exec_inprogress_pid";
74static const NSString* exec_inprogress_found_key = @"exec_inprogress_found";  // callback when inprogress is found
75static const NSString* transitioning_pid_key = @"transitioning_task_pid"; // -> @(pid), required for PARSE_STACKSHOT_TRANSITIONING
76static const NSString* asyncstack_expected_threadid_key = @"asyncstack_expected_threadid"; // -> @(tid), required for PARSE_STACKSHOT_ASYNCSTACK
77static const NSString* asyncstack_expected_stack_key = @"asyncstack_expected_stack"; // -> @[pc...]), expected PCs for asyncstack
78static const NSString* driverkit_found_key = @"driverkit_found_key"; // callback when driverkit process is found. argument is the process pid.
79
80#define TEST_STACKSHOT_QUEUE_LABEL        "houston.we.had.a.problem"
81#define TEST_STACKSHOT_QUEUE_LABEL_LENGTH sizeof(TEST_STACKSHOT_QUEUE_LABEL)
82
83T_DECL(microstackshots, "test the microstackshot syscall")
84{
85	void *buf = NULL;
86	unsigned int size = DEFAULT_STACKSHOT_BUFFER_SIZE;
87
88	while (1) {
89		buf = malloc(size);
90		T_QUIET; T_ASSERT_NOTNULL(buf, "allocated stackshot buffer");
91
92#pragma clang diagnostic push
93#pragma clang diagnostic ignored "-Wdeprecated-declarations"
94		int len = syscall(SYS_microstackshot, buf, size,
95				(uint32_t) STACKSHOT_GET_MICROSTACKSHOT);
96#pragma clang diagnostic pop
97		if (len == ENOSYS) {
98			T_SKIP("microstackshot syscall failed, likely not compiled with CONFIG_TELEMETRY");
99		}
100		if (len == -1 && errno == ENOSPC) {
101			/* syscall failed because buffer wasn't large enough, try again */
102			free(buf);
103			buf = NULL;
104			size *= 2;
105			T_ASSERT_LE(size, (unsigned int)MAX_STACKSHOT_BUFFER_SIZE,
106					"growing stackshot buffer to sane size");
107			continue;
108		}
109		T_ASSERT_POSIX_SUCCESS(len, "called microstackshot syscall");
110		break;
111    }
112
113	T_EXPECT_EQ(*(uint32_t *)buf,
114			(uint32_t)STACKSHOT_MICRO_SNAPSHOT_MAGIC,
115			"magic value for microstackshot matches");
116
117	free(buf);
118}
119
120struct scenario {
121	const char *name;
122	uint64_t flags;
123	bool quiet;
124	bool should_fail;
125	bool maybe_unsupported;
126	bool maybe_enomem;
127	bool no_recordfile;
128	pid_t target_pid;
129	bool target_kernel;
130	uint64_t since_timestamp;
131	uint32_t size_hint;
132	dt_stat_time_t timer;
133};
134
135static void
136quiet(struct scenario *scenario)
137{
138	if (scenario->timer || scenario->quiet) {
139		T_QUIET;
140	}
141}
142
143static void
144take_stackshot(struct scenario *scenario, bool compress_ok, void (^cb)(void *buf, size_t size))
145{
146start:
147	initialize_thread();
148
149	void *config = stackshot_config_create();
150	quiet(scenario);
151	T_ASSERT_NOTNULL(config, "created stackshot config");
152
153	int ret = stackshot_config_set_flags(config, scenario->flags | global_flags);
154	quiet(scenario);
155	T_ASSERT_POSIX_ZERO(ret, "set flags %#llx on stackshot config", scenario->flags);
156
157	if (scenario->size_hint > 0) {
158		ret = stackshot_config_set_size_hint(config, scenario->size_hint);
159		quiet(scenario);
160		T_ASSERT_POSIX_ZERO(ret, "set size hint %" PRIu32 " on stackshot config",
161				scenario->size_hint);
162	}
163
164	if (scenario->target_pid > 0) {
165		ret = stackshot_config_set_pid(config, scenario->target_pid);
166		quiet(scenario);
167		T_ASSERT_POSIX_ZERO(ret, "set target pid %d on stackshot config",
168				scenario->target_pid);
169	} else if (scenario->target_kernel) {
170		ret = stackshot_config_set_pid(config, 0);
171		quiet(scenario);
172		T_ASSERT_POSIX_ZERO(ret, "set kernel target on stackshot config");
173	}
174
175	if (scenario->since_timestamp > 0) {
176		ret = stackshot_config_set_delta_timestamp(config, scenario->since_timestamp);
177		quiet(scenario);
178		T_ASSERT_POSIX_ZERO(ret, "set since timestamp %" PRIu64 " on stackshot config",
179				scenario->since_timestamp);
180	}
181
182	int retries_remaining = 5;
183
184retry: ;
185	uint64_t start_time = mach_absolute_time();
186	ret = stackshot_capture_with_config(config);
187	uint64_t end_time = mach_absolute_time();
188
189	if (scenario->should_fail) {
190		T_EXPECTFAIL;
191		T_ASSERT_POSIX_ZERO(ret, "called stackshot_capture_with_config");
192		return;
193	}
194
195	if (ret == EBUSY || ret == ETIMEDOUT) {
196		if (retries_remaining > 0) {
197			if (!scenario->timer) {
198				T_LOG("stackshot_capture_with_config failed with %s (%d), retrying",
199						strerror(ret), ret);
200			}
201
202			retries_remaining--;
203			goto retry;
204		} else {
205			T_ASSERT_POSIX_ZERO(ret,
206					"called stackshot_capture_with_config (no retries remaining)");
207		}
208	} else if ((ret == ENOTSUP) && scenario->maybe_unsupported) {
209		T_SKIP("kernel indicated this stackshot configuration is not supported");
210	} else if ((ret == ENOMEM) && scenario->maybe_enomem) {
211		T_SKIP("insufficient available memory to run test");
212	} else {
213		quiet(scenario);
214		T_ASSERT_POSIX_ZERO(ret, "called stackshot_capture_with_config");
215	}
216
217	if (scenario->timer) {
218		dt_stat_mach_time_add(scenario->timer, end_time - start_time);
219	}
220	void *buf = stackshot_config_get_stackshot_buffer(config);
221	size_t size = stackshot_config_get_stackshot_size(config);
222	if (scenario->name && !scenario->no_recordfile) {
223		char sspath[MAXPATHLEN];
224		strlcpy(sspath, scenario->name, sizeof(sspath));
225		strlcat(sspath, ".kcdata", sizeof(sspath));
226		T_QUIET; T_ASSERT_POSIX_ZERO(dt_resultfile(sspath, sizeof(sspath)),
227				"create result file path");
228
229		if (!scenario->quiet) {
230			T_LOG("writing stackshot to %s", sspath);
231		}
232
233		FILE *f = fopen(sspath, "w");
234		T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(f,
235				"open stackshot output file");
236
237		size_t written = fwrite(buf, size, 1, f);
238		T_QUIET; T_ASSERT_POSIX_SUCCESS(written, "wrote stackshot to file");
239
240		fclose(f);
241	}
242	cb(buf, size);
243	if (compress_ok) {
244		if (global_flags == 0) {
245			T_LOG("Restarting test with compression");
246			global_flags |= STACKSHOT_DO_COMPRESS;
247			goto start;
248		} else {
249			global_flags = 0;
250		}
251	}
252
253	ret = stackshot_config_dealloc(config);
254	T_QUIET; T_EXPECT_POSIX_ZERO(ret, "deallocated stackshot config");
255}
256
257T_DECL(simple_compressed, "take a simple compressed stackshot")
258{
259	struct scenario scenario = {
260		.name = "kcdata_compressed",
261		.flags = (STACKSHOT_DO_COMPRESS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_THREAD_WAITINFO | STACKSHOT_GET_GLOBAL_MEM_STATS |
262				STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
263	};
264
265	T_LOG("taking compressed kcdata stackshot");
266	take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) {
267		parse_stackshot(0, ssbuf, sslen, nil);
268	});
269}
270
271T_DECL(panic_compressed, "take a compressed stackshot with the same flags as a panic stackshot")
272{
273	uint64_t stackshot_flags = (STACKSHOT_SAVE_KEXT_LOADINFO |
274			STACKSHOT_SAVE_LOADINFO |
275			STACKSHOT_KCDATA_FORMAT |
276			STACKSHOT_ENABLE_BT_FAULTING |
277			STACKSHOT_ENABLE_UUID_FAULTING |
278			STACKSHOT_DO_COMPRESS |
279			STACKSHOT_NO_IO_STATS |
280			STACKSHOT_THREAD_WAITINFO |
281#if TARGET_OS_MAC
282			STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT |
283#endif
284			STACKSHOT_DISABLE_LATENCY_INFO);
285
286	struct scenario scenario = {
287		.name = "kcdata_panic_compressed",
288		.flags = stackshot_flags,
289	};
290
291	T_LOG("taking compressed kcdata stackshot with panic flags");
292	take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) {
293		parse_stackshot(0, ssbuf, sslen, nil);
294	});
295}
296
297T_DECL(kcdata, "test that kcdata stackshots can be taken and parsed")
298{
299	struct scenario scenario = {
300		.name = "kcdata",
301		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS |
302				STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
303	};
304
305	T_LOG("taking kcdata stackshot");
306	take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) {
307		parse_stackshot(0, ssbuf, sslen, nil);
308	});
309}
310
311static void
312get_stats(stackshot_stats_t *_Nonnull out)
313{
314	size_t oldlen = sizeof (*out);
315	bzero(out, oldlen);
316	int result = sysctlbyname("kern.stackshot_stats", out, &oldlen, NULL, 0);
317	T_WITH_ERRNO; T_ASSERT_POSIX_SUCCESS(result, "reading \"kern.stackshot_stats\" sysctl should succeed");
318	T_EXPECT_EQ(oldlen, sizeof (*out), "kernel should update full stats structure");
319}
320
321static void
322log_stats(mach_timebase_info_data_t timebase, uint64_t now, const char *name, stackshot_stats_t stat)
323{
324	uint64_t last_ago = (now - stat.ss_last_start) * timebase.numer / timebase.denom;
325	uint64_t last_duration = (stat.ss_last_end - stat.ss_last_start) * timebase.numer / timebase.denom;
326	uint64_t total_duration = (stat.ss_duration) * timebase.numer / timebase.denom;
327
328	uint64_t nanosec = 1000000000llu;
329	T_LOG("%s: %8lld stackshots, %10lld.%09lld total nsecs, last %lld.%09lld secs ago, %lld.%09lld secs long",
330		name, stat.ss_count,
331		total_duration / nanosec, total_duration % nanosec,
332		last_ago / nanosec, last_ago % nanosec,
333		last_duration / nanosec, last_duration % nanosec);
334}
335
336T_DECL(stats, "test that stackshot stats can be read out and change when a stackshot occurs")
337{
338	mach_timebase_info_data_t timebase = {0, 0};
339	mach_timebase_info(&timebase);
340
341	struct scenario scenario = {
342		.name = "kcdata",
343		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT),
344	};
345
346	stackshot_stats_t pre, post;
347
348	get_stats(&pre);
349
350	T_LOG("taking kcdata stackshot");
351	take_stackshot(&scenario, true, ^(__unused void *ssbuf, __unused size_t sslen) {
352		(void)0;
353	});
354
355	get_stats(&post);
356
357	uint64_t now = mach_absolute_time();
358
359	log_stats(timebase, now, "  pre", pre);
360	log_stats(timebase, now, " post", post);
361
362	int64_t delta_stackshots = (int64_t)(post.ss_count - pre.ss_count);
363	int64_t delta_duration = (int64_t)(post.ss_duration - pre.ss_duration) * (int64_t)timebase.numer / (int64_t)timebase.denom;
364	int64_t delta_nsec = delta_duration % 1000000000ll;
365	if (delta_nsec < 0) {
366	    delta_nsec += 1000000000ll;
367	}
368	T_LOG("delta: %+8lld stackshots, %+10lld.%09lld total nsecs", delta_stackshots, delta_duration / 1000000000ll, delta_nsec);
369
370	T_EXPECT_LT(pre.ss_last_start, pre.ss_last_end, "pre: stackshot should take time");
371	T_EXPECT_LT(pre.ss_count, post.ss_count, "stackshot count should increase when a stackshot is taken");
372	T_EXPECT_LT(pre.ss_duration, post.ss_duration, "stackshot duration should increase when a stackshot is taken");
373	T_EXPECT_LT(pre.ss_last_end, post.ss_last_start, "previous end should be less than new start after a stackshot");
374	T_EXPECT_LT(post.ss_last_start, post.ss_last_end, "post: stackshot should take time");
375}
376
377T_DECL(kcdata_faulting, "test that kcdata stackshots while faulting can be taken and parsed")
378{
379	struct scenario scenario = {
380		.name = "faulting",
381		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
382				| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT
383				| STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING),
384	};
385
386	T_LOG("taking faulting stackshot");
387	take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) {
388		parse_stackshot(0, ssbuf, sslen, nil);
389	});
390}
391
392T_DECL(bad_flags, "test a poorly-formed stackshot syscall")
393{
394	struct scenario scenario = {
395		.flags = STACKSHOT_SAVE_IN_KERNEL_BUFFER /* not allowed from user space */,
396		.should_fail = true,
397	};
398
399	T_LOG("attempting to take stackshot with kernel-only flag");
400	take_stackshot(&scenario, true, ^(__unused void *ssbuf, __unused size_t sslen) {
401		T_ASSERT_FAIL("stackshot data callback called");
402	});
403}
404
405T_DECL(delta, "test delta stackshots")
406{
407	struct scenario scenario = {
408		.name = "delta",
409		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
410				| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
411	};
412
413	T_LOG("taking full stackshot");
414	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
415		uint64_t stackshot_time = stackshot_timestamp(ssbuf, sslen);
416
417		T_LOG("taking delta stackshot since time %" PRIu64, stackshot_time);
418
419		parse_stackshot(0, ssbuf, sslen, nil);
420
421		struct scenario delta_scenario = {
422			.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
423					| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT
424					| STACKSHOT_COLLECT_DELTA_SNAPSHOT),
425			.since_timestamp = stackshot_time
426		};
427
428		take_stackshot(&delta_scenario, false, ^(void *dssbuf, size_t dsslen) {
429			parse_stackshot(PARSE_STACKSHOT_DELTA, dssbuf, dsslen, nil);
430		});
431	});
432}
433
434T_DECL(shared_cache_layout, "test stackshot inclusion of shared cache layout")
435{
436	struct scenario scenario = {
437		.name = "shared_cache_layout",
438		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
439				| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT |
440				STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT),
441	};
442
443	size_t shared_cache_length;
444	const void *cache_header = _dyld_get_shared_cache_range(&shared_cache_length);
445	if (cache_header == NULL) {
446		T_SKIP("Device not running with shared cache, skipping test...");
447	}
448
449	if (shared_cache_length == 0) {
450		T_SKIP("dyld reports that currently running shared cache has zero length");
451	}
452
453	T_LOG("taking stackshot with STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT set");
454	take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) {
455		parse_stackshot(PARSE_STACKSHOT_SHAREDCACHE_LAYOUT, ssbuf, sslen, nil);
456	});
457}
458
459T_DECL(stress, "test that taking stackshots for 60 seconds doesn't crash the system")
460{
461	uint64_t max_diff_time = 60ULL /* seconds */ * 1000000000ULL;
462	uint64_t start_time;
463
464	struct scenario scenario = {
465		.name = "stress",
466		.quiet = true,
467		.flags = (STACKSHOT_KCDATA_FORMAT |
468				STACKSHOT_THREAD_WAITINFO |
469				STACKSHOT_SAVE_LOADINFO |
470				STACKSHOT_SAVE_KEXT_LOADINFO |
471				STACKSHOT_GET_GLOBAL_MEM_STATS |
472				STACKSHOT_SAVE_IMP_DONATION_PIDS |
473				STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT |
474				STACKSHOT_THREAD_GROUP |
475				STACKSHOT_SAVE_JETSAM_COALITIONS |
476				STACKSHOT_ASID |
477				0),
478	};
479
480	start_time = clock_gettime_nsec_np(CLOCK_MONOTONIC);
481	while (clock_gettime_nsec_np(CLOCK_MONOTONIC) - start_time < max_diff_time) {
482		take_stackshot(&scenario, false, ^(void * __unused ssbuf,
483				size_t __unused sslen) {
484			printf(".");
485			fflush(stdout);
486		});
487
488		/*
489		 * After the first stackshot, there's no point in continuing to
490		 * write them to disk, and it wears down the SSDs.
491		 */
492		scenario.no_recordfile = true;
493
494		/* Leave some time for the testing infrastructure to catch up */
495		usleep(10000);
496
497	}
498	printf("\n");
499}
500
501T_DECL(dispatch_queue_label, "test that kcdata stackshots contain libdispatch queue labels")
502{
503	struct scenario scenario = {
504		.name = "kcdata",
505		.flags = (STACKSHOT_GET_DQ | STACKSHOT_KCDATA_FORMAT),
506	};
507	dispatch_semaphore_t child_ready_sem, parent_done_sem;
508	dispatch_queue_t dq;
509
510#if TARGET_OS_WATCH
511	T_SKIP("This test is flaky on watches: 51663346");
512#endif
513
514	child_ready_sem = dispatch_semaphore_create(0);
515	T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "dqlabel child semaphore");
516
517	parent_done_sem = dispatch_semaphore_create(0);
518	T_QUIET; T_ASSERT_NOTNULL(parent_done_sem, "dqlabel parent semaphore");
519
520	dq = dispatch_queue_create(TEST_STACKSHOT_QUEUE_LABEL, NULL);
521	T_QUIET; T_ASSERT_NOTNULL(dq, "dispatch queue");
522
523	/* start the helper thread */
524	dispatch_async(dq, ^{
525			dispatch_semaphore_signal(child_ready_sem);
526
527			dispatch_semaphore_wait(parent_done_sem, DISPATCH_TIME_FOREVER);
528	});
529
530	/* block behind the child starting up */
531	dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
532
533	T_LOG("taking kcdata stackshot with libdispatch queue labels");
534	take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) {
535		parse_stackshot(PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL, ssbuf, sslen, nil);
536	});
537
538	dispatch_semaphore_signal(parent_done_sem);
539}
540
541#define CACHEADDR_ENV "STACKSHOT_TEST_DYLDADDR"
542T_HELPER_DECL(spawn_reslide_child, "child process to spawn with alternate slide")
543{
544	size_t shared_cache_len;
545	const void *addr, *prevaddr;
546	uintmax_t v;
547	char *endptr;
548
549	const char *cacheaddr_env = getenv(CACHEADDR_ENV);
550	T_QUIET; T_ASSERT_NOTNULL(cacheaddr_env, "getenv("CACHEADDR_ENV")");
551	errno = 0;
552	endptr = NULL;
553	v = strtoumax(cacheaddr_env, &endptr, 16);	/* read hex value */
554	T_WITH_ERRNO; T_QUIET; T_ASSERT_NE(v, 0l, "getenv(%s) = \"%s\" should be a non-zero hex number", CACHEADDR_ENV, cacheaddr_env);
555	T_QUIET; T_ASSERT_EQ(*endptr, 0, "getenv(%s) = \"%s\" endptr \"%s\" should be empty", CACHEADDR_ENV, cacheaddr_env, endptr);
556
557	prevaddr = (const void *)v;
558	addr = _dyld_get_shared_cache_range(&shared_cache_len);
559	T_QUIET; T_ASSERT_NOTNULL(addr, "shared cache address");
560
561	T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(getppid(), (addr == prevaddr) ? SIGUSR2 : SIGUSR1), "signaled parent to take stackshot");
562	for (;;) {
563		(void) pause();		/* parent will kill -9 us */
564	}
565}
566
567T_DECL(shared_cache_flags, "tests stackshot's task_ss_flags for the shared cache")
568{
569	posix_spawnattr_t		attr;
570	char *env_addr;
571	char path[PATH_MAX];
572	__block bool child_same_addr = false;
573
574	uint32_t path_size = sizeof(path);
575	T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
576	char *args[] = { path, "-n", "spawn_reslide_child", NULL };
577	pid_t pid;
578	size_t shared_cache_len;
579	const void *addr;
580
581	dispatch_source_t child_diffsig_src, child_samesig_src;
582	dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0);
583	T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "shared_cache child semaphore");
584
585	dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
586	T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
587
588	signal(SIGUSR1, SIG_IGN);
589	signal(SIGUSR2, SIG_IGN);
590	child_samesig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
591	T_QUIET; T_ASSERT_NOTNULL(child_samesig_src, "dispatch_source_create (child_samesig_src)");
592	child_diffsig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR2, 0, signal_processing_q);
593	T_QUIET; T_ASSERT_NOTNULL(child_diffsig_src, "dispatch_source_create (child_diffsig_src)");
594
595	/* child will signal us depending on if their addr is the same or different */
596	dispatch_source_set_event_handler(child_samesig_src, ^{ child_same_addr = false; dispatch_semaphore_signal(child_ready_sem); });
597	dispatch_source_set_event_handler(child_diffsig_src, ^{ child_same_addr = true; dispatch_semaphore_signal(child_ready_sem); });
598	dispatch_activate(child_samesig_src);
599	dispatch_activate(child_diffsig_src);
600
601	addr = _dyld_get_shared_cache_range(&shared_cache_len);
602	T_QUIET; T_ASSERT_NOTNULL(addr, "shared cache address");
603
604	T_QUIET; T_ASSERT_POSIX_SUCCESS(asprintf(&env_addr, "%p", addr), "asprintf of env_addr succeeded");
605	T_QUIET; T_ASSERT_POSIX_SUCCESS(setenv(CACHEADDR_ENV, env_addr, true), "setting "CACHEADDR_ENV" to %s", env_addr);
606
607	T_QUIET; T_ASSERT_POSIX_ZERO(posix_spawnattr_init(&attr), "posix_spawnattr_init");
608	T_QUIET; T_ASSERT_POSIX_ZERO(posix_spawnattr_setflags(&attr, _POSIX_SPAWN_RESLIDE), "posix_spawnattr_setflags");
609	int sp_ret = posix_spawn(&pid, path, NULL, &attr, args, environ);
610	T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", args[0], pid);
611
612	dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
613	T_LOG("received signal from child (%s), capturing stackshot", child_same_addr ? "same shared cache addr" : "different shared cache addr");
614
615	struct scenario scenario = {
616		.name = "shared_cache_flags",
617		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
618				| STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT
619				| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
620	};
621
622	take_stackshot(&scenario, false, ^( void *ssbuf, size_t sslen) {
623		int status;
624		/* First kill the child so we can reap it */
625		T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(pid, SIGKILL), "killing spawned process");
626		T_QUIET; T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on spawned child");
627		T_QUIET; T_ASSERT_EQ(!!WIFSIGNALED(status), 1, "waitpid status should be signalled");
628		T_QUIET; T_ASSERT_EQ(WTERMSIG(status), SIGKILL, "waitpid status should be SIGKILLed");
629
630		parse_stackshot(PARSE_STACKSHOT_SHAREDCACHE_FLAGS, ssbuf, sslen,
631			@{sharedcache_child_pid_key: @(pid), sharedcache_child_sameaddr_key: @(child_same_addr ? 1 : 0)});
632	});
633}
634
635T_DECL(transitioning_tasks, "test that stackshot contains transitioning task info", T_META_BOOTARGS_SET("enable_proc_exit_lpexit_spin=1"))
636{
637    int32_t sysctlValue = -1, numAttempts =0;
638    char path[PATH_MAX];
639    uint32_t path_size = sizeof(path);
640    T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
641    char *args[] = { path, "-n", "exec_child_preexec", NULL };
642
643    dispatch_source_t child_sig_src;
644    dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0);
645    T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "exec child semaphore");
646
647    dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
648    T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
649
650    pid_t pid;
651
652    signal(SIGUSR1, SIG_IGN);
653    child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
654    T_QUIET; T_ASSERT_NOTNULL(child_sig_src, "dispatch_source_create (child_sig_src)");
655
656    dispatch_source_set_event_handler(child_sig_src, ^{ dispatch_semaphore_signal(child_ready_sem); });
657    dispatch_activate(child_sig_src);
658
659    T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.proc_exit_lpexit_spin_pid", NULL, NULL, &sysctlValue, sizeof(sysctlValue)), "set debug.proc_exit_lpexit_spin_pid=-1");
660
661    int proc_exit_spin_pos = 0 ;
662
663    while (0 == sysctlbyname("debug.proc_exit_lpexit_spin_pos", NULL, NULL, &proc_exit_spin_pos, sizeof(proc_exit_spin_pos))) {
664
665        T_LOG(" ##### Testing while spinning in proc_exit at position %d ##### ", proc_exit_spin_pos);
666
667        int sp_ret = posix_spawn(&pid, args[0], NULL, NULL, args, NULL);
668        T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", args[0], pid);
669
670        dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
671
672        struct proc_uniqidentifierinfo proc_info_data = { };
673        int retval = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 0, &proc_info_data, sizeof(proc_info_data));
674        T_QUIET; T_EXPECT_POSIX_SUCCESS(retval, "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO");
675        T_QUIET; T_ASSERT_EQ_INT(retval, (int) sizeof(proc_info_data), "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO returned data");
676
677        T_ASSERT_POSIX_SUCCESS(kill(pid, SIGUSR1), "signaled pre-exec child to exec");
678
679        dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
680
681        T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.proc_exit_lpexit_spin_pid", NULL, NULL, &pid, sizeof(pid)), "set debug.proc_exit_lpexit_spin_pid =  %d, ", pid);
682
683        T_ASSERT_POSIX_SUCCESS(kill(pid, SIGKILL), "kill post-exec child %d", pid);
684
685        sysctlValue = 0;
686        size_t len = sizeof(sysctlValue);
687        while (numAttempts < 5) {
688            T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.proc_exit_lpexit_spinning", &sysctlValue, &len, NULL, 0), "retrieve debug.proc_exit_lpexit_spinning");
689            if (sysctlValue != 1) numAttempts++;
690            else break;
691            sleep(1);
692        }
693
694        T_ASSERT_EQ_UINT(sysctlValue, 1, "find spinning task in proc_exit()");
695
696        struct scenario scenario = {
697            .name = "transitioning_tasks",
698            .flags = (STACKSHOT_KCDATA_FORMAT)
699        };
700
701        take_stackshot(&scenario, false, ^( void *ssbuf, size_t sslen) {
702            parse_stackshot(PARSE_STACKSHOT_TRANSITIONING, ssbuf, sslen, @{transitioning_pid_key: @(pid)});
703
704            // Kill the child
705            int sysctlValueB = -1;
706            T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.proc_exit_lpexit_spin_pid", NULL, NULL, &sysctlValueB, sizeof(sysctlValueB)), "set debug.proc_exit_lpexit_spin_pid=-1");
707            sleep(1);
708            size_t blen = sizeof(sysctlValueB);
709            T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.proc_exit_lpexit_spinning", &sysctlValueB, &blen, NULL, 0), "retrieve debug.proc_exit_lpexit_spinning");
710            T_ASSERT_EQ_UINT(sysctlValueB, 0, "make sure nothing is spining in proc_exit()");
711            int status;
712            T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on post-exec child");
713        });
714
715        proc_exit_spin_pos++;
716    }
717
718}
719
720static void *stuck_sysctl_thread(void *arg) {
721	int val = 1;
722	dispatch_semaphore_t child_thread_started = *(dispatch_semaphore_t *)arg;
723
724	dispatch_semaphore_signal(child_thread_started);
725	T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.wedge_thread", NULL, NULL, &val, sizeof(val)), "wedge child thread");
726
727	return NULL;
728}
729
730T_HELPER_DECL(zombie_child, "child process to sample as a zombie")
731{
732	pthread_t pthread;
733	dispatch_semaphore_t child_thread_started = dispatch_semaphore_create(0);
734	T_QUIET; T_ASSERT_NOTNULL(child_thread_started, "zombie child thread semaphore");
735
736	/* spawn another thread to get stuck in the kernel, then call exit() to become a zombie */
737	T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_create(&pthread, NULL, stuck_sysctl_thread, &child_thread_started), "pthread_create");
738
739	dispatch_semaphore_wait(child_thread_started, DISPATCH_TIME_FOREVER);
740
741	/* sleep for a bit in the hope of ensuring that the other thread has called the sysctl before we signal the parent */
742	usleep(100);
743	T_ASSERT_POSIX_SUCCESS(kill(getppid(), SIGUSR1), "signaled parent to take stackshot");
744
745	exit(0);
746}
747
748T_DECL(zombie, "tests a stackshot of a zombie task with a thread stuck in the kernel")
749{
750	char path[PATH_MAX];
751	uint32_t path_size = sizeof(path);
752	T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
753	char *args[] = { path, "-n", "zombie_child", NULL };
754
755	dispatch_source_t child_sig_src;
756	dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0);
757	T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "zombie child semaphore");
758
759	dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
760	T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
761
762	pid_t pid;
763
764	T_LOG("spawning a child");
765
766	signal(SIGUSR1, SIG_IGN);
767	child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
768	T_QUIET; T_ASSERT_NOTNULL(child_sig_src, "dispatch_source_create (child_sig_src)");
769
770	dispatch_source_set_event_handler(child_sig_src, ^{ dispatch_semaphore_signal(child_ready_sem); });
771	dispatch_activate(child_sig_src);
772
773	int sp_ret = posix_spawn(&pid, args[0], NULL, NULL, args, NULL);
774	T_QUIET; T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", args[0], pid);
775
776	dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
777
778	T_LOG("received signal from child, capturing stackshot");
779
780	struct proc_bsdshortinfo bsdshortinfo;
781	int retval, iterations_to_wait = 10;
782
783	while (iterations_to_wait > 0) {
784		retval = proc_pidinfo(pid, PROC_PIDT_SHORTBSDINFO, 0, &bsdshortinfo, sizeof(bsdshortinfo));
785		if ((retval == 0) && errno == ESRCH) {
786			T_LOG("unable to find child using proc_pidinfo, assuming zombie");
787			break;
788		}
789
790		T_QUIET; T_WITH_ERRNO; T_ASSERT_GT(retval, 0, "proc_pidinfo(PROC_PIDT_SHORTBSDINFO) returned a value > 0");
791		T_QUIET; T_ASSERT_EQ(retval, (int)sizeof(bsdshortinfo), "proc_pidinfo call for PROC_PIDT_SHORTBSDINFO returned expected size");
792
793		if (bsdshortinfo.pbsi_flags & PROC_FLAG_INEXIT) {
794			T_LOG("child proc info marked as in exit");
795			break;
796		}
797
798		iterations_to_wait--;
799		if (iterations_to_wait == 0) {
800			/*
801			 * This will mark the test as failed but let it continue so we
802			 * don't leave a process stuck in the kernel.
803			 */
804			T_FAIL("unable to discover that child is marked as exiting");
805		}
806
807		/* Give the child a few more seconds to make it to exit */
808		sleep(5);
809	}
810
811	/* Give the child some more time to make it through exit */
812	sleep(10);
813
814	struct scenario scenario = {
815		.name = "zombie",
816		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
817				| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
818	};
819
820	take_stackshot(&scenario, false, ^( void *ssbuf, size_t sslen) {
821		/* First unwedge the child so we can reap it */
822		int val = 1, status;
823		T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.unwedge_thread", NULL, NULL, &val, sizeof(val)), "unwedge child");
824
825		T_QUIET; T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on zombie child");
826
827		parse_stackshot(PARSE_STACKSHOT_ZOMBIE, ssbuf, sslen, @{zombie_child_pid_key: @(pid)});
828	});
829}
830
831T_HELPER_DECL(exec_child_preexec, "child process pre-exec")
832{
833	dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
834	T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
835
836	signal(SIGUSR1, SIG_IGN);
837	dispatch_source_t parent_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
838	T_QUIET; T_ASSERT_NOTNULL(parent_sig_src, "dispatch_source_create (child_sig_src)");
839	dispatch_source_set_event_handler(parent_sig_src, ^{
840
841		// Parent took a timestamp then signaled us: exec into the next process
842
843		char path[PATH_MAX];
844		uint32_t path_size = sizeof(path);
845		T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
846		char *args[] = { path, "-n", "exec_child_postexec", NULL };
847
848		T_QUIET; T_ASSERT_POSIX_ZERO(execve(args[0], args, NULL), "execing into exec_child_postexec");
849	});
850	dispatch_activate(parent_sig_src);
851
852	T_ASSERT_POSIX_SUCCESS(kill(getppid(), SIGUSR1), "signaled parent to take timestamp");
853
854	sleep(100);
855	// Should never get here
856	T_FAIL("Received signal to exec from parent");
857}
858
859T_HELPER_DECL(exec_child_postexec, "child process post-exec to sample")
860{
861	T_ASSERT_POSIX_SUCCESS(kill(getppid(), SIGUSR1), "signaled parent to take stackshot");
862	sleep(100);
863	// Should never get here
864	T_FAIL("Killed by parent");
865}
866
867T_DECL(exec, "test getting full task snapshots for a task that execs")
868{
869	char path[PATH_MAX];
870	uint32_t path_size = sizeof(path);
871	T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
872	char *args[] = { path, "-n", "exec_child_preexec", NULL };
873
874	dispatch_source_t child_sig_src;
875	dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0);
876	T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "exec child semaphore");
877
878	dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
879	T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
880
881	pid_t pid;
882
883	T_LOG("spawning a child");
884
885	signal(SIGUSR1, SIG_IGN);
886	child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
887	T_QUIET; T_ASSERT_NOTNULL(child_sig_src, "dispatch_source_create (child_sig_src)");
888
889	dispatch_source_set_event_handler(child_sig_src, ^{ dispatch_semaphore_signal(child_ready_sem); });
890	dispatch_activate(child_sig_src);
891
892	int sp_ret = posix_spawn(&pid, args[0], NULL, NULL, args, NULL);
893	T_QUIET; T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", args[0], pid);
894
895	dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
896	uint64_t start_time = mach_absolute_time();
897
898	struct proc_uniqidentifierinfo proc_info_data = { };
899	int retval = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 0, &proc_info_data, sizeof(proc_info_data));
900	T_QUIET; T_EXPECT_POSIX_SUCCESS(retval, "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO");
901	T_QUIET; T_ASSERT_EQ_INT(retval, (int) sizeof(proc_info_data), "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO returned data");
902	uint64_t unique_pid = proc_info_data.p_uniqueid;
903
904	T_LOG("received signal from pre-exec child, unique_pid is %llu, timestamp is %llu", unique_pid, start_time);
905
906	T_ASSERT_POSIX_SUCCESS(kill(pid, SIGUSR1), "signaled pre-exec child to exec");
907
908	dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
909
910	T_LOG("received signal from post-exec child, capturing stackshot");
911
912	struct scenario scenario = {
913		.name = "exec",
914		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
915				  | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT
916				  | STACKSHOT_COLLECT_DELTA_SNAPSHOT),
917		.since_timestamp = start_time
918	};
919
920	take_stackshot(&scenario, false, ^( void *ssbuf, size_t sslen) {
921		// Kill the child
922		int status;
923		T_ASSERT_POSIX_SUCCESS(kill(pid, SIGKILL), "kill post-exec child %d", pid);
924		T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on post-exec child");
925
926		parse_stackshot(PARSE_STACKSHOT_POSTEXEC | PARSE_STACKSHOT_DELTA, ssbuf, sslen, @{postexec_child_unique_pid_key: @(unique_pid)});
927	});
928}
929
930T_DECL(exec_inprogress, "test stackshots of processes in the middle of exec")
931{
932	pid_t pid;
933	/* a BASH quine which execs itself as long as the parent doesn't exit */
934        char *bash_prog = "[[ $PPID -ne 1 ]] && exec /bin/bash -c \"$0\" \"$0\"";
935	char *args[] = { "/bin/bash", "-c", bash_prog, bash_prog, NULL };
936
937	posix_spawnattr_t sattr;
938	T_ASSERT_POSIX_ZERO(posix_spawnattr_init(&sattr), "posix_spawnattr_init");
939	T_ASSERT_POSIX_ZERO(posix_spawn(&pid, args[0], NULL, &sattr, args, NULL), "spawn exec_inprogress_child");
940
941	struct scenario scenario = {
942		.name = "exec_inprogress",
943		.flags = (STACKSHOT_KCDATA_FORMAT),
944		.target_pid = pid,
945	};
946
947	int tries = 0;
948	int tries_limit = 30;
949	__block bool found = false;
950	__block uint64_t cid1 = 0, cid2 = 0;
951
952	for (tries = 0; !found && tries < tries_limit; tries++) {
953		take_stackshot(&scenario, false,
954		    ^( void *ssbuf, size_t sslen) {
955			parse_stackshot(PARSE_STACKSHOT_EXEC_INPROGRESS,
956			    ssbuf, sslen, @{
957				exec_inprogress_pid_key: @(pid),
958				exec_inprogress_found_key: ^(uint64_t id1, uint64_t id2) { found = true; cid1 = id1; cid2 = id2; }});
959		});
960	}
961	T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(pid, SIGKILL), "killing exec loop");
962	T_ASSERT_TRUE(found, "able to find our execing process mid-exec in %d tries", tries);
963	T_ASSERT_NE(cid1, cid2, "container IDs for in-progress exec are unique");
964	T_PASS("found mid-exec process in %d tries", tries);
965}
966
967#ifdef _LP64
968#if __has_feature(ptrauth_calls)
969#define __ptrauth_swift_async_context_parent \
970  __ptrauth(ptrauth_key_process_independent_data, 1, 0xbda2)
971#define __ptrauth_swift_async_context_resume \
972  __ptrauth(ptrauth_key_function_pointer, 1, 0xd707)
973#else
974#define __ptrauth_swift_async_context_parent
975#define __ptrauth_swift_async_context_resume
976#endif
977// Add 1 to match the symbolication aid added by the stackshot backtracer.
978#define asyncstack_frame(x) ((uintptr_t)(void *)ptrauth_strip((void *)(x), ptrauth_key_function_pointer) + 1)
979
980// This struct fakes the Swift AsyncContext struct which is used by
981// the Swift concurrency runtime. We only care about the first 2 fields.
982struct fake_async_context {
983	struct fake_async_context* __ptrauth_swift_async_context_parent next;
984	void(*__ptrauth_swift_async_context_resume resume_pc)(void);
985};
986
987static void
988level1_func()
989{
990}
991static void
992level2_func()
993{
994}
995
996// Create a chain of fake async contexts; sync with asyncstack_expected_stack below
997static alignas(16) struct fake_async_context level1 = { 0, level1_func };
998static alignas(16) struct fake_async_context level2 = { &level1, level2_func };
999
1000struct async_test_semaphores {
1001	dispatch_semaphore_t child_ready_sem;	/* signal parent we're ready */
1002	dispatch_semaphore_t child_exit_sem;	/* parent tells us to go away */
1003};
1004
1005#define	ASYNCSTACK_THREAD_NAME "asyncstack_thread"
1006
1007static void __attribute__((noinline, not_tail_called))
1008expect_asyncstack(void *arg)
1009{
1010	struct async_test_semaphores *async_ts = arg;
1011
1012	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_setname_np(ASYNCSTACK_THREAD_NAME),
1013	     "set thread name to %s", ASYNCSTACK_THREAD_NAME);
1014
1015	/* Tell the main thread we're all set up, then wait for permission to exit */
1016	dispatch_semaphore_signal(async_ts->child_ready_sem);
1017	dispatch_semaphore_wait(async_ts->child_exit_sem, DISPATCH_TIME_FOREVER);
1018	usleep(1);	/* make sure we don't tailcall semaphore_wait */
1019}
1020
1021static void *
1022asyncstack_thread(void *arg)
1023{
1024	uint64_t *fp = __builtin_frame_address(0);
1025	// We cannot use a variable of pointer type, because this ABI is valid
1026	// on arm64_32 where pointers are 32bits, but the context pointer will
1027	// still be stored in a 64bits slot on the stack.
1028#if __has_feature(ptrauth_calls)
1029#define __stack_context_auth __ptrauth(ptrauth_key_process_dependent_data, 1, \
1030	        0xc31a)
1031	struct fake_async_context * __stack_context_auth ctx = &level2;
1032#else // __has_feature(ptrauth_calls)
1033	/* struct fake_async_context * */uint64_t ctx  = (uintptr_t)&level2;
1034#endif // !__has_feature(ptrauth_calls)
1035
1036	// The signature of an async frame on the OS stack is:
1037	// [ <AsyncContext address>, <Saved FP | (1<<60)>, <return address> ]
1038	// The Async context must be right before the saved FP on the stack. This
1039	// should happen naturally in an optimized build as it is the only
1040	// variable on the stack.
1041	// This function cannot use T_ASSERT_* becuse it changes the stack
1042	// layout.
1043	assert((uintptr_t)fp - (uintptr_t)&ctx == 8);
1044
1045	// Modify the saved FP on the stack to include the async frame marker
1046	*fp |= (0x1ULL << 60);
1047	expect_asyncstack(arg);
1048	return NULL;
1049}
1050
1051T_DECL(asyncstack, "test swift async stack entries")
1052{
1053	struct scenario scenario = {
1054		.name = "asyncstack",
1055		.flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_SAVE_LOADINFO,
1056	};
1057	struct async_test_semaphores async_ts = {
1058	    .child_ready_sem = dispatch_semaphore_create(0),
1059	    .child_exit_sem = dispatch_semaphore_create(0),
1060	};
1061	T_QUIET; T_ASSERT_NOTNULL(async_ts.child_ready_sem, "child_ready_sem alloc");
1062	T_QUIET; T_ASSERT_NOTNULL(async_ts.child_exit_sem, "child_exit_sem alloc");
1063
1064	pthread_t pthread;
1065	__block uint64_t threadid = 0;
1066	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&pthread, NULL, asyncstack_thread, &async_ts), "pthread_create");
1067	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_threadid_np(pthread, &threadid), "pthread_threadid_np");
1068
1069	dispatch_semaphore_wait(async_ts.child_ready_sem, DISPATCH_TIME_FOREVER);
1070
1071	take_stackshot(&scenario, true, ^( void *ssbuf, size_t sslen) {
1072		parse_stackshot(PARSE_STACKSHOT_ASYNCSTACK, ssbuf, sslen, @{
1073		    asyncstack_expected_threadid_key: @(threadid),
1074		       asyncstack_expected_stack_key: @[ @(asyncstack_frame(level2_func)), @(asyncstack_frame(level1_func)) ],
1075		});
1076	});
1077
1078	dispatch_semaphore_signal(async_ts.child_exit_sem);
1079	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(pthread, NULL), "wait for thread");
1080
1081}
1082#endif
1083
1084static uint32_t
1085get_user_promotion_basepri(void)
1086{
1087	mach_msg_type_number_t count = THREAD_POLICY_STATE_COUNT;
1088	struct thread_policy_state thread_policy;
1089	boolean_t get_default = FALSE;
1090	mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
1091
1092	kern_return_t kr = thread_policy_get(thread_port, THREAD_POLICY_STATE,
1093	    (thread_policy_t)&thread_policy, &count, &get_default);
1094	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_policy_get");
1095	return thread_policy.thps_user_promotion_basepri;
1096}
1097
1098static int
1099get_pri(thread_t thread_port)
1100{
1101	kern_return_t kr;
1102
1103	thread_extended_info_data_t extended_info;
1104	mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
1105	kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
1106	    (thread_info_t)&extended_info, &count);
1107
1108	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
1109
1110	return extended_info.pth_curpri;
1111}
1112
1113
1114T_DECL(turnstile_singlehop, "turnstile single hop test")
1115{
1116	dispatch_queue_t dq1, dq2;
1117	dispatch_semaphore_t sema_x;
1118	dispatch_queue_attr_t dq1_attr, dq2_attr;
1119	__block qos_class_t main_qos = 0;
1120	__block int main_relpri = 0, main_relpri2 = 0, main_afterpri = 0;
1121	struct scenario scenario = {
1122		.name = "turnstile_singlehop",
1123		.flags = (STACKSHOT_THREAD_WAITINFO | STACKSHOT_KCDATA_FORMAT),
1124	};
1125	dq1_attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, QOS_CLASS_UTILITY, 0);
1126	dq2_attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INITIATED, 0);
1127	pthread_mutex_t lock_a = PTHREAD_MUTEX_INITIALIZER;
1128	pthread_mutex_t lock_b = PTHREAD_MUTEX_INITIALIZER;
1129
1130	pthread_mutex_t *lockap = &lock_a, *lockbp = &lock_b;
1131
1132	dq1 = dispatch_queue_create("q1", dq1_attr);
1133	dq2 = dispatch_queue_create("q2", dq2_attr);
1134	sema_x = dispatch_semaphore_create(0);
1135
1136	pthread_mutex_lock(lockap);
1137	dispatch_async(dq1, ^{
1138		pthread_mutex_lock(lockbp);
1139		T_ASSERT_POSIX_SUCCESS(pthread_get_qos_class_np(pthread_self(), &main_qos, &main_relpri), "get qos class");
1140		T_LOG("The priority of q1 is %d\n", get_pri(mach_thread_self()));
1141		dispatch_semaphore_signal(sema_x);
1142		pthread_mutex_lock(lockap);
1143	});
1144	dispatch_semaphore_wait(sema_x, DISPATCH_TIME_FOREVER);
1145
1146	T_LOG("Async1 completed");
1147
1148	pthread_set_qos_class_self_np(QOS_CLASS_UTILITY, 0);
1149	T_ASSERT_POSIX_SUCCESS(pthread_get_qos_class_np(pthread_self(), &main_qos, &main_relpri), "get qos class");
1150	T_LOG("The priority of main is %d\n", get_pri(mach_thread_self()));
1151	main_relpri = get_pri(mach_thread_self());
1152
1153	dispatch_async(dq2, ^{
1154		T_ASSERT_POSIX_SUCCESS(pthread_get_qos_class_np(pthread_self(), &main_qos, &main_relpri2), "get qos class");
1155		T_LOG("The priority of q2 is %d\n", get_pri(mach_thread_self()));
1156		dispatch_semaphore_signal(sema_x);
1157		pthread_mutex_lock(lockbp);
1158	});
1159	dispatch_semaphore_wait(sema_x, DISPATCH_TIME_FOREVER);
1160
1161	T_LOG("Async2 completed");
1162
1163	while (1) {
1164		main_afterpri = (int) get_user_promotion_basepri();
1165		if (main_relpri != main_afterpri) {
1166			T_LOG("Success with promotion pri is %d", main_afterpri);
1167			break;
1168		}
1169
1170		usleep(100);
1171	}
1172
1173	take_stackshot(&scenario, true, ^( void *ssbuf, size_t sslen) {
1174		parse_stackshot(PARSE_STACKSHOT_TURNSTILEINFO, ssbuf, sslen, nil);
1175	});
1176}
1177
1178
1179static void
1180expect_instrs_cycles_in_stackshot(void *ssbuf, size_t sslen)
1181{
1182	kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
1183
1184	bool in_task = false;
1185	bool in_thread = false;
1186	bool saw_instrs_cycles = false;
1187	iter = kcdata_iter_next(iter);
1188
1189	KCDATA_ITER_FOREACH(iter) {
1190		switch (kcdata_iter_type(iter)) {
1191		case KCDATA_TYPE_CONTAINER_BEGIN:
1192			switch (kcdata_iter_container_type(iter)) {
1193			case STACKSHOT_KCCONTAINER_TASK:
1194				in_task = true;
1195				saw_instrs_cycles = false;
1196				break;
1197
1198			case STACKSHOT_KCCONTAINER_THREAD:
1199				in_thread = true;
1200				saw_instrs_cycles = false;
1201				break;
1202
1203			default:
1204				break;
1205			}
1206			break;
1207
1208		case STACKSHOT_KCTYPE_INSTRS_CYCLES:
1209			saw_instrs_cycles = true;
1210			break;
1211
1212		case KCDATA_TYPE_CONTAINER_END:
1213			if (in_thread) {
1214				T_QUIET; T_EXPECT_TRUE(saw_instrs_cycles,
1215						"saw instructions and cycles in thread");
1216				in_thread = false;
1217			} else if (in_task) {
1218				T_QUIET; T_EXPECT_TRUE(saw_instrs_cycles,
1219						"saw instructions and cycles in task");
1220				in_task = false;
1221			}
1222
1223		default:
1224			break;
1225		}
1226	}
1227}
1228
1229static void
1230skip_if_monotonic_unsupported(void)
1231{
1232	int supported = 0;
1233	size_t supported_size = sizeof(supported);
1234	int ret = sysctlbyname("kern.monotonic.supported", &supported,
1235			&supported_size, 0, 0);
1236	if (ret < 0 || !supported) {
1237		T_SKIP("monotonic is unsupported");
1238	}
1239}
1240
1241T_DECL(instrs_cycles, "test a getting instructions and cycles in stackshot")
1242{
1243	skip_if_monotonic_unsupported();
1244
1245	struct scenario scenario = {
1246		.name = "instrs-cycles",
1247		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_INSTRS_CYCLES
1248				| STACKSHOT_KCDATA_FORMAT),
1249	};
1250
1251	T_LOG("attempting to take stackshot with instructions and cycles");
1252	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1253		parse_stackshot(0, ssbuf, sslen, nil);
1254		expect_instrs_cycles_in_stackshot(ssbuf, sslen);
1255	});
1256}
1257
1258T_DECL(delta_instrs_cycles,
1259		"test delta stackshots with instructions and cycles")
1260{
1261	skip_if_monotonic_unsupported();
1262
1263	struct scenario scenario = {
1264		.name = "delta-instrs-cycles",
1265		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_INSTRS_CYCLES
1266				| STACKSHOT_KCDATA_FORMAT),
1267	};
1268
1269	T_LOG("taking full stackshot");
1270	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1271		uint64_t stackshot_time = stackshot_timestamp(ssbuf, sslen);
1272
1273		T_LOG("taking delta stackshot since time %" PRIu64, stackshot_time);
1274
1275		parse_stackshot(0, ssbuf, sslen, nil);
1276		expect_instrs_cycles_in_stackshot(ssbuf, sslen);
1277
1278		struct scenario delta_scenario = {
1279			.name = "delta-instrs-cycles-next",
1280			.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_INSTRS_CYCLES
1281					| STACKSHOT_KCDATA_FORMAT
1282					| STACKSHOT_COLLECT_DELTA_SNAPSHOT),
1283			.since_timestamp = stackshot_time,
1284		};
1285
1286		take_stackshot(&delta_scenario, false, ^(void *dssbuf, size_t dsslen) {
1287			parse_stackshot(PARSE_STACKSHOT_DELTA, dssbuf, dsslen, nil);
1288			expect_instrs_cycles_in_stackshot(dssbuf, dsslen);
1289		});
1290	});
1291}
1292
1293static void
1294check_thread_groups_supported()
1295{
1296	int err;
1297	int supported = 0;
1298	size_t supported_size = sizeof(supported);
1299	err = sysctlbyname("kern.thread_groups_supported", &supported, &supported_size, NULL, 0);
1300
1301	if (err || !supported)
1302		T_SKIP("thread groups not supported on this system");
1303}
1304
1305T_DECL(thread_groups, "test getting thread groups in stackshot")
1306{
1307	check_thread_groups_supported();
1308
1309	struct scenario scenario = {
1310		.name = "thread-groups",
1311		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_THREAD_GROUP
1312				| STACKSHOT_KCDATA_FORMAT),
1313	};
1314
1315	T_LOG("attempting to take stackshot with thread group flag");
1316	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1317		parse_thread_group_stackshot(ssbuf, sslen);
1318	});
1319}
1320
1321T_DECL(compactinfo, "test compactinfo inclusion")
1322{
1323	struct scenario scenario = {
1324		.name = "compactinfo",
1325		.target_pid = getpid(),
1326		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_SAVE_DYLD_COMPACTINFO
1327				| STACKSHOT_KCDATA_FORMAT),
1328	};
1329
1330	T_LOG("attempting to take stackshot with compactinfo flag");
1331	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1332		parse_stackshot(PARSE_STACKSHOT_COMPACTINFO, ssbuf, sslen, nil);
1333	});
1334
1335}
1336
1337static NSMutableSet * find_driverkit_pids(io_registry_entry_t root) {
1338	NSMutableSet * driverkit_pids = [NSMutableSet setWithCapacity:3];
1339	io_registry_entry_t current = IO_OBJECT_NULL;
1340	io_iterator_t iter = IO_OBJECT_NULL;
1341
1342	T_EXPECT_MACH_SUCCESS(IORegistryEntryGetChildIterator(root, kIOServicePlane, &iter), "get registry iterator");
1343
1344	while ((current = IOIteratorNext(iter)) != IO_OBJECT_NULL) {
1345		if (_IOObjectConformsTo(current, "IOUserServer", kIOClassNameOverrideNone)) {
1346			CFMutableDictionaryRef cfProperties = NULL;
1347			NSMutableDictionary * properties;
1348			NSString * client_creator_info;
1349			NSArray<NSString *> *creator_info_array;
1350			pid_t pid;
1351
1352			T_QUIET; T_EXPECT_MACH_SUCCESS(IORegistryEntryCreateCFProperties(current, &cfProperties, kCFAllocatorDefault, kNilOptions), "get properties");
1353			properties = CFBridgingRelease(cfProperties);
1354			T_QUIET; T_ASSERT_NOTNULL(properties, "properties is not null");
1355			client_creator_info = properties[@kIOUserClientCreatorKey];
1356			creator_info_array = [client_creator_info componentsSeparatedByString:@","];
1357			if ([creator_info_array[0] hasPrefix:@"pid"]) {
1358				NSArray<NSString *> *pid_info = [creator_info_array[0] componentsSeparatedByString:@" "];
1359				T_QUIET; T_ASSERT_EQ(pid_info.count, 2UL, "Get pid info components from %s", creator_info_array[0].UTF8String);
1360				pid = pid_info[1].intValue;
1361			} else {
1362				T_ASSERT_FAIL("No pid info in client creator info: %s", client_creator_info.UTF8String);
1363			}
1364			T_LOG("Found driver pid %d", pid);
1365			[driverkit_pids addObject:[NSNumber numberWithInt:pid]];
1366		} else {
1367			[driverkit_pids unionSet:find_driverkit_pids(current)];
1368		}
1369		IOObjectRelease(current);
1370	}
1371
1372	IOObjectRelease(iter);
1373	return driverkit_pids;
1374}
1375
1376T_DECL(driverkit, "test driverkit inclusion")
1377{
1378	struct scenario scenario = {
1379		.name = "driverkit",
1380		.target_kernel = true,
1381		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT
1382			    | STACKSHOT_INCLUDE_DRIVER_THREADS_IN_KERNEL),
1383	};
1384
1385	io_registry_entry_t root = IORegistryGetRootEntry(kIOMainPortDefault);
1386	NSMutableSet * driverkit_pids = find_driverkit_pids(root);
1387	IOObjectRelease(root);
1388
1389	T_LOG("expecting to find %lu driverkit processes", [driverkit_pids count]);
1390	T_LOG("attempting to take stackshot with STACKSHOT_INCLUDE_DRIVER_THREADS_IN_KERNEL flag");
1391	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1392		parse_stackshot(PARSE_STACKSHOT_DRIVERKIT, ssbuf, sslen, @{
1393			driverkit_found_key: ^(pid_t pid) {
1394				[driverkit_pids removeObject:[NSNumber numberWithInt:pid]];
1395		}});
1396	});
1397
1398	T_EXPECT_EQ([driverkit_pids count], (NSUInteger)0, "found expected number of driverkit processes");
1399}
1400
1401static void
1402parse_page_table_asid_stackshot(void **ssbuf, size_t sslen)
1403{
1404	bool seen_asid = false;
1405	bool seen_page_table_snapshot = false;
1406	kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
1407	T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT,
1408			"buffer provided is a stackshot");
1409
1410	iter = kcdata_iter_next(iter);
1411	KCDATA_ITER_FOREACH(iter) {
1412		switch (kcdata_iter_type(iter)) {
1413		case KCDATA_TYPE_ARRAY: {
1414			T_QUIET;
1415			T_ASSERT_TRUE(kcdata_iter_array_valid(iter),
1416					"checked that array is valid");
1417
1418			if (kcdata_iter_array_elem_type(iter) != STACKSHOT_KCTYPE_PAGE_TABLES) {
1419				continue;
1420			}
1421
1422			T_ASSERT_FALSE(seen_page_table_snapshot, "check that we haven't yet seen a page table snapshot");
1423			seen_page_table_snapshot = true;
1424
1425			T_ASSERT_EQ((size_t) kcdata_iter_array_elem_size(iter), sizeof(uint64_t),
1426				"check that each element of the pagetable dump is the expected size");
1427
1428			uint64_t *pt_array = kcdata_iter_payload(iter);
1429			uint32_t elem_count = kcdata_iter_array_elem_count(iter);
1430			uint32_t j;
1431			bool nonzero_tte = false;
1432			for (j = 0; j < elem_count;) {
1433				T_QUIET; T_ASSERT_LE(j + 4, elem_count, "check for valid page table segment header");
1434				uint64_t pa = pt_array[j];
1435				uint64_t num_entries = pt_array[j + 1];
1436				uint64_t start_va = pt_array[j + 2];
1437				uint64_t end_va = pt_array[j + 3];
1438
1439				T_QUIET; T_ASSERT_NE(pa, (uint64_t) 0, "check that the pagetable physical address is non-zero");
1440				T_QUIET; T_ASSERT_EQ(pa % (num_entries * sizeof(uint64_t)), (uint64_t) 0, "check that the pagetable physical address is correctly aligned");
1441				T_QUIET; T_ASSERT_NE(num_entries, (uint64_t) 0, "check that a pagetable region has more than 0 entries");
1442				T_QUIET; T_ASSERT_LE(j + 4 + num_entries, (uint64_t) elem_count, "check for sufficient space in page table array");
1443				T_QUIET; T_ASSERT_GT(end_va, start_va, "check for valid VA bounds in page table segment header");
1444
1445				for (uint32_t k = j + 4; k < (j + 4 + num_entries); ++k) {
1446					if (pt_array[k] != 0) {
1447						nonzero_tte = true;
1448						T_QUIET; T_ASSERT_EQ((pt_array[k] >> 48) & 0xf, (uint64_t) 0, "check that bits[48:51] of arm64 TTE are clear");
1449						// L0-L2 table and non-compressed L3 block entries should always have bit 1 set; assumes L0-L2 blocks will not be used outside the kernel
1450						bool table = ((pt_array[k] & 0x2) != 0);
1451						if (table) {
1452							T_QUIET; T_ASSERT_NE(pt_array[k] & ((1ULL << 48) - 1) & ~((1ULL << 12) - 1), (uint64_t) 0, "check that arm64 TTE physical address is non-zero");
1453						} else { // should be a compressed PTE
1454							T_QUIET; T_ASSERT_NE(pt_array[k] & 0xC000000000000000ULL, (uint64_t) 0, "check that compressed PTE has at least one of bits [63:62] set");
1455							T_QUIET; T_ASSERT_EQ(pt_array[k] & ~0xC000000000000000ULL, (uint64_t) 0, "check that compressed PTE has no other bits besides [63:62] set");
1456						}
1457					}
1458				}
1459
1460				j += (4 + num_entries);
1461			}
1462			T_ASSERT_TRUE(nonzero_tte, "check that we saw at least one non-empty TTE");
1463			T_ASSERT_EQ(j, elem_count, "check that page table dump size matches extent of last header");
1464			break;
1465		}
1466		case STACKSHOT_KCTYPE_ASID: {
1467			T_ASSERT_FALSE(seen_asid, "check that we haven't yet seen an ASID");
1468			seen_asid = true;
1469		}
1470		}
1471	}
1472	T_ASSERT_TRUE(seen_page_table_snapshot, "check that we have seen a page table snapshot");
1473	T_ASSERT_TRUE(seen_asid, "check that we have seen an ASID");
1474}
1475
1476T_DECL(dump_page_tables, "test stackshot page table dumping support")
1477{
1478	struct scenario scenario = {
1479		.name = "asid-page-tables",
1480		.flags = (STACKSHOT_KCDATA_FORMAT | STACKSHOT_ASID | STACKSHOT_PAGE_TABLES),
1481		.size_hint = (9ull << 20), // 9 MB
1482		.target_pid = getpid(),
1483		.maybe_unsupported = true,
1484		.maybe_enomem = true,
1485	};
1486
1487	T_LOG("attempting to take stackshot with ASID and page table flags");
1488	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1489		parse_page_table_asid_stackshot(ssbuf, sslen);
1490	});
1491}
1492
1493static void stackshot_verify_current_proc_uuid_info(void **ssbuf, size_t sslen, uint64_t expected_offset, const struct proc_uniqidentifierinfo *proc_info_data)
1494{
1495	const uuid_t *current_uuid = (const uuid_t *)(&proc_info_data->p_uuid);
1496
1497	kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
1498	T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer provided is a stackshot");
1499
1500	iter = kcdata_iter_next(iter);
1501
1502	KCDATA_ITER_FOREACH(iter) {
1503		switch (kcdata_iter_type(iter)) {
1504			case KCDATA_TYPE_ARRAY: {
1505				T_QUIET; T_ASSERT_TRUE(kcdata_iter_array_valid(iter), "checked that array is valid");
1506				if (kcdata_iter_array_elem_type(iter) == KCDATA_TYPE_LIBRARY_LOADINFO64) {
1507					struct user64_dyld_uuid_info *info = (struct user64_dyld_uuid_info *) kcdata_iter_payload(iter);
1508					if (uuid_compare(*current_uuid, info->imageUUID) == 0) {
1509						T_ASSERT_EQ(expected_offset, info->imageLoadAddress, "found matching UUID with matching binary offset");
1510						return;
1511					}
1512				} else if (kcdata_iter_array_elem_type(iter) == KCDATA_TYPE_LIBRARY_LOADINFO) {
1513					struct user32_dyld_uuid_info *info = (struct user32_dyld_uuid_info *) kcdata_iter_payload(iter);
1514					if (uuid_compare(*current_uuid, info->imageUUID) == 0) {
1515						T_ASSERT_EQ(expected_offset, ((uint64_t) info->imageLoadAddress),  "found matching UUID with matching binary offset");
1516						return;
1517					}
1518				}
1519				break;
1520			}
1521			default:
1522				break;
1523		}
1524	}
1525
1526	T_FAIL("failed to find matching UUID in stackshot data");
1527}
1528
1529T_DECL(translated, "tests translated bit is set correctly")
1530{
1531#if !(TARGET_OS_OSX && TARGET_CPU_ARM64)
1532	T_SKIP("Only valid on Apple silicon Macs")
1533#endif
1534	// Get path of stackshot_translated_child helper binary
1535	char path[PATH_MAX];
1536	uint32_t path_size = sizeof(path);
1537	T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
1538	char* binary_name = strrchr(path, '/');
1539	if (binary_name) binary_name++;
1540	T_QUIET; T_ASSERT_NOTNULL(binary_name, "Find basename in path '%s'", path);
1541	strlcpy(binary_name, "stackshot_translated_child", path_size - (binary_name - path));
1542	char *args[] = { path, NULL };
1543
1544	dispatch_source_t child_sig_src;
1545	dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0);
1546	T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "exec child semaphore");
1547
1548	dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
1549	T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
1550
1551	signal(SIGUSR1, SIG_IGN);
1552	child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
1553	T_QUIET; T_ASSERT_NOTNULL(child_sig_src, "dispatch_source_create (child_sig_src)");
1554
1555	dispatch_source_set_event_handler(child_sig_src, ^{ dispatch_semaphore_signal(child_ready_sem); });
1556	dispatch_activate(child_sig_src);
1557
1558	// Spawn child
1559	pid_t pid;
1560	T_LOG("spawning translated child");
1561	T_QUIET; T_ASSERT_POSIX_ZERO(posix_spawn(&pid, args[0], NULL, NULL, args, NULL), "spawned process '%s' with PID %d", args[0], pid);
1562
1563	// Wait for the the child to spawn up
1564	dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
1565
1566	// Make sure the child is running and is translated
1567	int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid };
1568	struct kinfo_proc process_info;
1569	size_t bufsize = sizeof(process_info);
1570	T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &process_info, &bufsize, NULL, 0), "get translated child process info");
1571	T_QUIET; T_ASSERT_GT(bufsize, (size_t)0, "process info is not empty");
1572	T_QUIET; T_ASSERT_TRUE((process_info.kp_proc.p_flag & P_TRANSLATED), "KERN_PROC_PID reports child is translated");
1573
1574	T_LOG("capturing stackshot");
1575
1576	struct scenario scenario = {
1577		.name = "translated",
1578		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
1579				  | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
1580	};
1581
1582	take_stackshot(&scenario, true, ^( void *ssbuf, size_t sslen) {
1583		parse_stackshot(PARSE_STACKSHOT_TRANSLATED, ssbuf, sslen, @{translated_child_pid_key: @(pid)});
1584	});
1585
1586    // Kill the child
1587    int status;
1588    T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(pid, SIGTERM), "kill translated child");
1589    T_QUIET; T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on translated child");
1590
1591}
1592
1593T_DECL(proc_uuid_info, "tests that the main binary UUID for a proc is always populated")
1594{
1595	struct proc_uniqidentifierinfo proc_info_data = { };
1596	mach_msg_type_number_t      count;
1597	kern_return_t               kernel_status;
1598	task_dyld_info_data_t       task_dyld_info;
1599	struct dyld_all_image_infos *target_infos;
1600	int retval;
1601	bool found_image_in_image_infos = false;
1602	uint64_t expected_mach_header_offset = 0;
1603
1604	/* Find the UUID of our main binary */
1605	retval = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 0, &proc_info_data, sizeof(proc_info_data));
1606	T_QUIET; T_EXPECT_POSIX_SUCCESS(retval, "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO");
1607	T_QUIET; T_ASSERT_EQ_INT(retval, (int) sizeof(proc_info_data), "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO returned data");
1608
1609	uuid_string_t str = {};
1610	uuid_unparse(*(uuid_t*)&proc_info_data.p_uuid, str);
1611	T_LOG("Found current UUID is %s", str);
1612
1613	/* Find the location of the dyld image info metadata */
1614	count = TASK_DYLD_INFO_COUNT;
1615	kernel_status = task_info(mach_task_self(), TASK_DYLD_INFO, (task_info_t)&task_dyld_info, &count);
1616	T_QUIET; T_ASSERT_EQ(kernel_status, KERN_SUCCESS, "retrieve task_info for TASK_DYLD_INFO");
1617
1618	target_infos = (struct dyld_all_image_infos *)task_dyld_info.all_image_info_addr;
1619
1620	/* Find our binary in the dyld image info array */
1621	for (int i = 0; i < (int) target_infos->uuidArrayCount; i++) {
1622		if (uuid_compare(target_infos->uuidArray[i].imageUUID, *(uuid_t*)&proc_info_data.p_uuid) == 0) {
1623			expected_mach_header_offset = (uint64_t) target_infos->uuidArray[i].imageLoadAddress;
1624			found_image_in_image_infos = true;
1625		}
1626	}
1627
1628	T_ASSERT_TRUE(found_image_in_image_infos, "found binary image in dyld image info list");
1629
1630	/* Overwrite the dyld image info data so the kernel has to fallback to the UUID stored in the proc structure */
1631	target_infos->uuidArrayCount = 0;
1632
1633	struct scenario scenario = {
1634		.name = "proc_uuid_info",
1635		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT),
1636		.target_pid = getpid(),
1637	};
1638
1639	T_LOG("attempting to take stackshot for current PID");
1640	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1641		stackshot_verify_current_proc_uuid_info(ssbuf, sslen, expected_mach_header_offset, &proc_info_data);
1642	});
1643}
1644
1645T_DECL(cseg_waitinfo, "test that threads stuck in the compressor report correct waitinfo")
1646{
1647	struct scenario scenario = {
1648		.name = "cseg_waitinfo",
1649		.quiet = false,
1650		.flags = (STACKSHOT_THREAD_WAITINFO | STACKSHOT_KCDATA_FORMAT),
1651	};
1652	__block uint64_t thread_id = 0;
1653
1654	dispatch_queue_t dq = dispatch_queue_create("com.apple.stackshot.cseg_waitinfo", NULL);
1655	dispatch_semaphore_t child_ok = dispatch_semaphore_create(0);
1656
1657	dispatch_async(dq, ^{
1658		pthread_threadid_np(NULL, &thread_id);
1659		dispatch_semaphore_signal(child_ok);
1660		int val = 1;
1661		T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.cseg_wedge_thread", NULL, NULL, &val, sizeof(val)), "wedge child thread");
1662	});
1663
1664	dispatch_semaphore_wait(child_ok, DISPATCH_TIME_FOREVER);
1665	sleep(1);
1666
1667	T_LOG("taking stackshot");
1668	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1669		int val = 1;
1670		T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.cseg_unwedge_thread", NULL, NULL, &val, sizeof(val)), "unwedge child thread");
1671		parse_stackshot(PARSE_STACKSHOT_WAITINFO_CSEG, ssbuf, sslen, @{cseg_expected_threadid_key: @(thread_id)});
1672	});
1673}
1674
1675static void
1676srp_send(
1677	mach_port_t send_port,
1678	mach_port_t reply_port,
1679	mach_port_t msg_port)
1680{
1681	kern_return_t ret = 0;
1682
1683	struct test_msg {
1684		mach_msg_header_t header;
1685		mach_msg_body_t body;
1686		mach_msg_port_descriptor_t port_descriptor;
1687	};
1688	struct test_msg send_msg = {
1689		.header = {
1690			.msgh_remote_port = send_port,
1691			.msgh_local_port  = reply_port,
1692			.msgh_bits        = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND,
1693	    reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0,
1694	    MACH_MSG_TYPE_MOVE_SEND,
1695	    MACH_MSGH_BITS_COMPLEX),
1696			.msgh_id          = 0x100,
1697			.msgh_size        = sizeof(send_msg),
1698		},
1699		.body = {
1700			.msgh_descriptor_count = 1,
1701		},
1702		.port_descriptor = {
1703			.name        = msg_port,
1704			.disposition = MACH_MSG_TYPE_MOVE_RECEIVE,
1705			.type        = MACH_MSG_PORT_DESCRIPTOR,
1706		},
1707	};
1708
1709	if (msg_port == MACH_PORT_NULL) {
1710		send_msg.body.msgh_descriptor_count = 0;
1711	}
1712
1713	ret = mach_msg(&(send_msg.header),
1714	    MACH_SEND_MSG |
1715	    MACH_SEND_TIMEOUT |
1716	    MACH_SEND_OVERRIDE,
1717	    send_msg.header.msgh_size,
1718	    0,
1719	    MACH_PORT_NULL,
1720	    10000,
1721	    0);
1722
1723	T_ASSERT_MACH_SUCCESS(ret, "client mach_msg");
1724}
1725
1726T_HELPER_DECL(srp_client,
1727    "Client used for the special_reply_port test")
1728{
1729	pid_t ppid = getppid();
1730	dispatch_semaphore_t can_continue  = dispatch_semaphore_create(0);
1731	dispatch_queue_t dq = dispatch_queue_create("client_signalqueue", NULL);
1732	dispatch_source_t sig_src;
1733
1734	mach_msg_return_t mr;
1735	mach_port_t service_port;
1736	mach_port_t conn_port;
1737	mach_port_t special_reply_port;
1738	mach_port_options_t opts = {
1739		.flags = MPO_INSERT_SEND_RIGHT,
1740	};
1741
1742	signal(SIGUSR1, SIG_IGN);
1743	sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dq);
1744
1745	dispatch_source_set_event_handler(sig_src, ^{
1746			dispatch_semaphore_signal(can_continue);
1747	});
1748	dispatch_activate(sig_src);
1749
1750	/* lookup the mach service port for the parent */
1751	kern_return_t kr = bootstrap_look_up(bootstrap_port,
1752	    SRP_SERVICE_NAME, &service_port);
1753	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up");
1754
1755	/* create the send-once right (special reply port) and message to send to the server */
1756	kr = mach_port_construct(mach_task_self(), &opts, 0ull, &conn_port);
1757	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_port_construct");
1758
1759	special_reply_port = thread_get_special_reply_port();
1760	T_QUIET; T_ASSERT_TRUE(MACH_PORT_VALID(special_reply_port), "get_thread_special_reply_port");
1761
1762	/* send the message with the special reply port */
1763	srp_send(service_port, special_reply_port, conn_port);
1764
1765	/* signal the parent to continue */
1766	kill(ppid, SIGUSR1);
1767
1768	struct {
1769		mach_msg_header_t header;
1770		mach_msg_body_t body;
1771		mach_msg_port_descriptor_t port_descriptor;
1772	} rcv_msg = {
1773		.header =
1774		{
1775			.msgh_remote_port = MACH_PORT_NULL,
1776			.msgh_local_port  = special_reply_port,
1777			.msgh_size        = sizeof(rcv_msg),
1778		},
1779	};
1780
1781	/* wait on the reply from the parent (that we will never receive) */
1782	mr = mach_msg(&(rcv_msg.header),
1783			(MACH_RCV_MSG | MACH_RCV_SYNC_WAIT),
1784			0,
1785			rcv_msg.header.msgh_size,
1786			special_reply_port,
1787			MACH_MSG_TIMEOUT_NONE,
1788			service_port);
1789
1790	/* not expected to execute as parent will SIGKILL client... */
1791	T_LOG("client process exiting after sending message to parent (server)");
1792}
1793
1794enum srp_test_type {
1795	SRP_TEST_THREAD,	/* expect waiter on current thread */
1796	SRP_TEST_PID,		/* expect waiter on current PID */
1797	SRP_TEST_EITHER,	/* waiter could be on either */
1798};
1799
1800static void
1801check_srp_test(const char *name, enum srp_test_type ty)
1802{
1803	struct scenario scenario = {
1804		.name = name,
1805		.quiet = false,
1806		.flags = (STACKSHOT_THREAD_WAITINFO | STACKSHOT_KCDATA_FORMAT),
1807	};
1808	uint64_t thread_id = 0;
1809	pthread_threadid_np(NULL, &thread_id);
1810	if (ty == SRP_TEST_THREAD) {
1811		take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1812			parse_stackshot(PARSE_STACKSHOT_WAITINFO_SRP, ssbuf, sslen,
1813					@{srp_expected_threadid_key: @(thread_id)});
1814		});
1815	} else if (ty == SRP_TEST_PID) {
1816		take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1817			parse_stackshot(PARSE_STACKSHOT_WAITINFO_SRP, ssbuf, sslen,
1818					@{srp_expected_pid_key: @(getpid())});
1819		});
1820	} else {
1821		take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1822			parse_stackshot(PARSE_STACKSHOT_WAITINFO_SRP, ssbuf, sslen,
1823					@{srp_expected_pid_key: @(getpid()), srp_expected_threadid_key: @(thread_id)});
1824		});
1825	}
1826
1827}
1828
1829
1830/*
1831 * Tests the stackshot wait info plumbing for synchronous IPC that doesn't use kevent on the server.
1832 *
1833 * (part 1): tests the scenario where a client sends a request that includes a special reply port
1834 *           to a server that doesn't receive the message and doesn't copy the send-once right
1835 *           into its address space as a result. for this case the special reply port is enqueued
1836 *           in a port and we check which task has that receive right and use that info. (rdar://60440338)
1837 * (part 2): tests the scenario where a client sends a request that includes a special reply port
1838 *           to a server that receives the message and copies in the send-once right, but doesn't
1839 *           reply to the client. for this case the special reply port is copied out and the kernel
1840 *           stashes the info about which task copied out the send once right. (rdar://60440592)
1841 * (part 3): tests the same as part 2, but uses kevents, which allow for
1842 *           priority inheritance
1843 */
1844T_DECL(special_reply_port, "test that tasks using special reply ports have correct waitinfo")
1845{
1846	dispatch_semaphore_t can_continue  = dispatch_semaphore_create(0);
1847	dispatch_queue_t dq = dispatch_queue_create("signalqueue", NULL);
1848	dispatch_queue_t machdq = dispatch_queue_create("machqueue", NULL);
1849	dispatch_source_t sig_src;
1850	char path[PATH_MAX];
1851	uint32_t path_size = sizeof(path);
1852	T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
1853	char *client_args[] = { path, "-n", "srp_client", NULL };
1854	pid_t client_pid;
1855	int sp_ret;
1856	kern_return_t kr;
1857	mach_port_t port;
1858
1859	/* setup the signal handler in the parent (server) */
1860	T_LOG("setup sig handlers");
1861	signal(SIGUSR1, SIG_IGN);
1862	sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dq);
1863
1864	dispatch_source_set_event_handler(sig_src, ^{
1865			dispatch_semaphore_signal(can_continue);
1866	});
1867	dispatch_activate(sig_src);
1868
1869	/* register with the mach service name so the client can lookup and send a message to the parent (server) */
1870	T_LOG("Server about to check in");
1871	kr = bootstrap_check_in(bootstrap_port, SRP_SERVICE_NAME, &port);
1872	T_ASSERT_MACH_SUCCESS(kr, "server bootstrap_check_in");
1873
1874	T_LOG("Launching client");
1875	sp_ret = posix_spawn(&client_pid, client_args[0], NULL, NULL, client_args, NULL);
1876	T_QUIET; T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", client_args[0], client_pid);
1877	T_LOG("Spawned client as PID %d", client_pid);
1878
1879	dispatch_semaphore_wait(can_continue, DISPATCH_TIME_FOREVER);
1880	T_LOG("Ready to take stackshot, but waiting 1s for the coast to clear");
1881
1882	/*
1883	 * can_continue indicates the client has signaled us, but we want to make
1884	 * sure they've actually blocked sending their mach message.  It's cheesy, but
1885	 * sleep() works for this.
1886	 */
1887	sleep(1);
1888
1889	/*
1890	 * take the stackshot without calling receive to verify that the stackshot wait
1891	 * info shows our (the server) thread for the scenario where the server has yet to
1892	 * receive the message.
1893	 */
1894	T_LOG("Taking stackshot for part 1 coverage");
1895	check_srp_test("srp", SRP_TEST_THREAD);
1896
1897	/*
1898	 * receive the message from the client (which should copy the send once right into
1899	 * our address space).
1900	 */
1901	struct {
1902		mach_msg_header_t header;
1903		mach_msg_body_t body;
1904		mach_msg_port_descriptor_t port_descriptor;
1905	} rcv_msg = {
1906		.header =
1907		{
1908			.msgh_remote_port = MACH_PORT_NULL,
1909			.msgh_local_port  = port,
1910			.msgh_size        = sizeof(rcv_msg),
1911		},
1912	};
1913
1914	T_LOG("server: starting sync receive\n");
1915
1916	mach_msg_return_t mr;
1917	mr = mach_msg(&(rcv_msg.header),
1918			(MACH_RCV_MSG | MACH_RCV_TIMEOUT),
1919			0,
1920			4096,
1921			port,
1922			10000,
1923			MACH_PORT_NULL);
1924	T_QUIET; T_ASSERT_MACH_SUCCESS(mr, "mach_msg() recieve of message from client");
1925
1926	/*
1927	 * take the stackshot to verify that the stackshot wait info shows our (the server) PID
1928	 * for the scenario where the server has received the message and copied in the send-once right.
1929	 */
1930	T_LOG("Taking stackshot for part 2 coverage");
1931	check_srp_test("srp", SRP_TEST_PID);
1932
1933	/* cleanup - kill the client */
1934	T_ASSERT_POSIX_SUCCESS(kill(client_pid, SIGKILL), "killing client");
1935	T_ASSERT_POSIX_SUCCESS(waitpid(client_pid, NULL, 0), "waiting for the client to exit");
1936
1937	// do it again, but using kevents
1938	T_LOG("Launching client");
1939	sp_ret = posix_spawn(&client_pid, client_args[0], NULL, NULL, client_args, NULL);
1940	T_QUIET; T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", client_args[0], client_pid);
1941	T_LOG("Spawned client as PID %d", client_pid);
1942
1943	dispatch_semaphore_wait(can_continue, DISPATCH_TIME_FOREVER);
1944	T_LOG("Ready to take stackshot, but waiting 1s for the coast to clear");
1945
1946	/*
1947	 * can_continue indicates the client has signaled us, but we want to make
1948	 * sure they've actually blocked sending their mach message.  It's cheesy, but
1949	 * sleep() works for this.
1950	 */
1951	sleep(1);
1952
1953	dispatch_mach_t dispatch_mach = dispatch_mach_create(SRP_SERVICE_NAME, machdq,
1954	    ^(dispatch_mach_reason_t reason,
1955	      dispatch_mach_msg_t message,
1956	      mach_error_t error __unused) {
1957		switch (reason) {
1958		case DISPATCH_MACH_MESSAGE_RECEIVED: {
1959			size_t size = 0;
1960			mach_msg_header_t *msg __unused = dispatch_mach_msg_get_msg(message, &size);
1961			T_LOG("server: recieved %ld byte message", size);
1962			check_srp_test("turnstile_port_thread", SRP_TEST_THREAD);
1963			T_LOG("server: letting client go");
1964			// drop the message on the ground, we'll kill the client later
1965			dispatch_semaphore_signal(can_continue);
1966			break;
1967		}
1968		default:
1969			break;
1970		}
1971	});
1972
1973	dispatch_mach_connect(dispatch_mach, port, MACH_PORT_NULL, NULL);
1974
1975	dispatch_semaphore_wait(can_continue, DISPATCH_TIME_FOREVER);
1976
1977	/* cleanup - kill the client */
1978	T_ASSERT_POSIX_SUCCESS(kill(client_pid, SIGKILL), "killing client");
1979	T_ASSERT_POSIX_SUCCESS(waitpid(client_pid, NULL, 0), "waiting for the client to exit");
1980}
1981
1982#pragma mark performance tests
1983
1984#define SHOULD_REUSE_SIZE_HINT 0x01
1985#define SHOULD_USE_DELTA       0x02
1986#define SHOULD_TARGET_SELF     0x04
1987
1988static void
1989stackshot_perf(unsigned int options)
1990{
1991	struct scenario scenario = {
1992		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
1993			| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
1994	};
1995
1996	dt_stat_t size = dt_stat_create("bytes", "size");
1997	dt_stat_time_t duration = dt_stat_time_create("duration");
1998	scenario.timer = duration;
1999
2000	if (options & SHOULD_TARGET_SELF) {
2001		scenario.target_pid = getpid();
2002	}
2003
2004	while (!dt_stat_stable(duration) || !dt_stat_stable(size)) {
2005		__block uint64_t last_time = 0;
2006		__block uint32_t size_hint = 0;
2007		take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
2008			dt_stat_add(size, (double)sslen);
2009			last_time = stackshot_timestamp(ssbuf, sslen);
2010			size_hint = (uint32_t)sslen;
2011		});
2012		if (options & SHOULD_USE_DELTA) {
2013			scenario.since_timestamp = last_time;
2014			scenario.flags |= STACKSHOT_COLLECT_DELTA_SNAPSHOT;
2015		}
2016		if (options & SHOULD_REUSE_SIZE_HINT) {
2017			scenario.size_hint = size_hint;
2018		}
2019	}
2020
2021	dt_stat_finalize(duration);
2022	dt_stat_finalize(size);
2023}
2024
2025static void
2026stackshot_flag_perf_noclobber(uint64_t flag, char *flagname)
2027{
2028	struct scenario scenario = {
2029		.quiet = true,
2030		.flags = (flag | STACKSHOT_KCDATA_FORMAT),
2031	};
2032
2033	dt_stat_t duration = dt_stat_create("nanoseconds per thread", "%s_duration", flagname);
2034	dt_stat_t size = dt_stat_create("bytes per thread", "%s_size", flagname);
2035	T_LOG("Testing \"%s\" = 0x%" PRIx64, flagname, flag);
2036
2037	while (!dt_stat_stable(duration) || !dt_stat_stable(size)) {
2038		take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
2039			kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
2040			unsigned long no_threads = 0;
2041			mach_timebase_info_data_t timebase = {0, 0};
2042			uint64_t stackshot_duration = 0;
2043			int found = 0;
2044			T_QUIET; T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, "stackshot buffer");
2045
2046			KCDATA_ITER_FOREACH(iter) {
2047				switch(kcdata_iter_type(iter)) {
2048					case STACKSHOT_KCTYPE_THREAD_SNAPSHOT: {
2049						found |= 1;
2050						no_threads ++;
2051						break;
2052					}
2053					case STACKSHOT_KCTYPE_STACKSHOT_DURATION: {
2054						struct stackshot_duration *ssd = kcdata_iter_payload(iter);
2055						stackshot_duration = ssd->stackshot_duration;
2056						found |= 2;
2057						break;
2058					}
2059					case KCDATA_TYPE_TIMEBASE: {
2060						found |= 4;
2061						mach_timebase_info_data_t *tb = kcdata_iter_payload(iter);
2062						memcpy(&timebase, tb, sizeof(timebase));
2063						break;
2064					}
2065				}
2066			}
2067
2068			T_QUIET; T_ASSERT_EQ(found, 0x7, "found everything needed");
2069
2070			uint64_t ns = (stackshot_duration * timebase.numer) / timebase.denom;
2071			uint64_t per_thread_ns = ns / no_threads;
2072			uint64_t per_thread_size = sslen / no_threads;
2073
2074			dt_stat_add(duration, per_thread_ns);
2075			dt_stat_add(size, per_thread_size);
2076		});
2077	}
2078
2079	dt_stat_finalize(duration);
2080	dt_stat_finalize(size);
2081}
2082
2083static void
2084stackshot_flag_perf(uint64_t flag, char *flagname)
2085{
2086	/*
2087	 * STACKSHOT_NO_IO_STATS disables data collection, so set it for
2088	 * more accurate perfdata collection.
2089	 */
2090	flag |= STACKSHOT_NO_IO_STATS;
2091
2092	stackshot_flag_perf_noclobber(flag, flagname);
2093}
2094
2095
2096T_DECL(flag_perf, "test stackshot performance with different flags set", T_META_TAG_PERF)
2097{
2098	stackshot_flag_perf_noclobber(STACKSHOT_NO_IO_STATS, "baseline");
2099	stackshot_flag_perf_noclobber(0, "io_stats");
2100
2101	stackshot_flag_perf(STACKSHOT_THREAD_WAITINFO, "thread_waitinfo");
2102	stackshot_flag_perf(STACKSHOT_GET_DQ, "get_dq");
2103	stackshot_flag_perf(STACKSHOT_SAVE_LOADINFO, "save_loadinfo");
2104	stackshot_flag_perf(STACKSHOT_GET_GLOBAL_MEM_STATS, "get_global_mem_stats");
2105	stackshot_flag_perf(STACKSHOT_SAVE_KEXT_LOADINFO, "save_kext_loadinfo");
2106	stackshot_flag_perf(STACKSHOT_SAVE_IMP_DONATION_PIDS, "save_imp_donation_pids");
2107	stackshot_flag_perf(STACKSHOT_ENABLE_BT_FAULTING, "enable_bt_faulting");
2108	stackshot_flag_perf(STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT, "collect_sharedcache_layout");
2109	stackshot_flag_perf(STACKSHOT_ENABLE_UUID_FAULTING, "enable_uuid_faulting");
2110	stackshot_flag_perf(STACKSHOT_THREAD_GROUP, "thread_group");
2111	stackshot_flag_perf(STACKSHOT_SAVE_JETSAM_COALITIONS, "save_jetsam_coalitions");
2112	stackshot_flag_perf(STACKSHOT_INSTRS_CYCLES, "instrs_cycles");
2113	stackshot_flag_perf(STACKSHOT_ASID, "asid");
2114}
2115
2116T_DECL(perf_no_size_hint, "test stackshot performance with no size hint",
2117		T_META_TAG_PERF)
2118{
2119	stackshot_perf(0);
2120}
2121
2122T_DECL(perf_size_hint, "test stackshot performance with size hint",
2123		T_META_TAG_PERF)
2124{
2125	stackshot_perf(SHOULD_REUSE_SIZE_HINT);
2126}
2127
2128T_DECL(perf_process, "test stackshot performance targeted at process",
2129		T_META_TAG_PERF)
2130{
2131	stackshot_perf(SHOULD_REUSE_SIZE_HINT | SHOULD_TARGET_SELF);
2132}
2133
2134T_DECL(perf_delta, "test delta stackshot performance",
2135		T_META_TAG_PERF)
2136{
2137	stackshot_perf(SHOULD_REUSE_SIZE_HINT | SHOULD_USE_DELTA);
2138}
2139
2140T_DECL(perf_delta_process, "test delta stackshot performance targeted at a process",
2141		T_META_TAG_PERF)
2142{
2143	stackshot_perf(SHOULD_REUSE_SIZE_HINT | SHOULD_USE_DELTA | SHOULD_TARGET_SELF);
2144}
2145
2146T_DECL(stackshot_entitlement_report_test, "test stackshot entitlement report")
2147{
2148	int sysctlValue = 1;
2149	T_ASSERT_POSIX_SUCCESS(
2150	    sysctlbyname("debug.stackshot_entitlement_send_batch", NULL, NULL, &sysctlValue, sizeof(sysctlValue)),
2151	    "set debug.stackshot_entitlement_send_batch=1");
2152	// having a way to verify that the coreanalytics event was received would be even better
2153	// See rdar://74197197
2154	T_PASS("entitlement test ran");
2155}
2156
2157static uint64_t
2158stackshot_timestamp(void *ssbuf, size_t sslen)
2159{
2160	kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
2161
2162	uint32_t type = kcdata_iter_type(iter);
2163	if (type != KCDATA_BUFFER_BEGIN_STACKSHOT && type != KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT) {
2164		T_ASSERT_FAIL("invalid kcdata type %u", kcdata_iter_type(iter));
2165	}
2166
2167	iter = kcdata_iter_find_type(iter, KCDATA_TYPE_MACH_ABSOLUTE_TIME);
2168	T_QUIET;
2169	T_ASSERT_TRUE(kcdata_iter_valid(iter), "timestamp found in stackshot");
2170
2171	return *(uint64_t *)kcdata_iter_payload(iter);
2172}
2173
2174#define TEST_THREAD_NAME "stackshot_test_thread"
2175
2176static void
2177parse_thread_group_stackshot(void **ssbuf, size_t sslen)
2178{
2179	bool seen_thread_group_snapshot = false;
2180	kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
2181	T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT,
2182			"buffer provided is a stackshot");
2183
2184	NSMutableSet *thread_groups = [[NSMutableSet alloc] init];
2185
2186	iter = kcdata_iter_next(iter);
2187	KCDATA_ITER_FOREACH(iter) {
2188		switch (kcdata_iter_type(iter)) {
2189		case KCDATA_TYPE_ARRAY: {
2190			T_QUIET;
2191			T_ASSERT_TRUE(kcdata_iter_array_valid(iter),
2192					"checked that array is valid");
2193
2194			if (kcdata_iter_array_elem_type(iter) != STACKSHOT_KCTYPE_THREAD_GROUP_SNAPSHOT) {
2195				continue;
2196			}
2197
2198			seen_thread_group_snapshot = true;
2199
2200			if (kcdata_iter_array_elem_size(iter) >= sizeof(struct thread_group_snapshot_v3)) {
2201				struct thread_group_snapshot_v3 *tgs_array = kcdata_iter_payload(iter);
2202				for (uint32_t j = 0; j < kcdata_iter_array_elem_count(iter); j++) {
2203					struct thread_group_snapshot_v3 *tgs = tgs_array + j;
2204					[thread_groups addObject:@(tgs->tgs_id)];
2205				}
2206			}
2207			else {
2208				struct thread_group_snapshot *tgs_array = kcdata_iter_payload(iter);
2209				for (uint32_t j = 0; j < kcdata_iter_array_elem_count(iter); j++) {
2210					struct thread_group_snapshot *tgs = tgs_array + j;
2211					[thread_groups addObject:@(tgs->tgs_id)];
2212				}
2213			}
2214			break;
2215		}
2216		}
2217	}
2218	KCDATA_ITER_FOREACH(iter) {
2219		NSError *error = nil;
2220
2221		switch (kcdata_iter_type(iter)) {
2222
2223		case KCDATA_TYPE_CONTAINER_BEGIN: {
2224			T_QUIET;
2225			T_ASSERT_TRUE(kcdata_iter_container_valid(iter),
2226					"checked that container is valid");
2227
2228			if (kcdata_iter_container_type(iter) != STACKSHOT_KCCONTAINER_THREAD) {
2229				break;
2230			}
2231
2232			NSDictionary *container = parseKCDataContainer(&iter, &error);
2233			T_QUIET; T_ASSERT_NOTNULL(container, "parsed thread container from stackshot");
2234			T_QUIET; T_ASSERT_NULL(error, "error unset after parsing container");
2235
2236			int tg = [container[@"thread_snapshots"][@"thread_group"] intValue];
2237
2238			T_ASSERT_TRUE([thread_groups containsObject:@(tg)], "check that the thread group the thread is in exists");
2239
2240			break;
2241		};
2242
2243		}
2244	}
2245	T_ASSERT_TRUE(seen_thread_group_snapshot, "check that we have seen a thread group snapshot");
2246}
2247
2248static void
2249verify_stackshot_sharedcache_layout(struct dyld_uuid_info_64 *uuids, uint32_t uuid_count)
2250{
2251	uuid_t cur_shared_cache_uuid;
2252	__block uint32_t lib_index = 0, libs_found = 0;
2253
2254	_dyld_get_shared_cache_uuid(cur_shared_cache_uuid);
2255	int result = dyld_shared_cache_iterate_text(cur_shared_cache_uuid, ^(const dyld_shared_cache_dylib_text_info* info) {
2256			T_QUIET; T_ASSERT_LT(lib_index, uuid_count, "dyld_shared_cache_iterate_text exceeded number of libraries returned by kernel");
2257
2258			libs_found++;
2259			struct dyld_uuid_info_64 *cur_stackshot_uuid_entry = &uuids[lib_index];
2260			T_QUIET; T_ASSERT_EQ(memcmp(info->dylibUuid, cur_stackshot_uuid_entry->imageUUID, sizeof(info->dylibUuid)), 0,
2261					"dyld returned UUID doesn't match kernel returned UUID");
2262			T_QUIET; T_ASSERT_EQ(info->loadAddressUnslid, cur_stackshot_uuid_entry->imageLoadAddress,
2263					"dyld returned load address doesn't match kernel returned load address");
2264			lib_index++;
2265		});
2266
2267	T_ASSERT_EQ(result, 0, "iterate shared cache layout");
2268	T_ASSERT_EQ(libs_found, uuid_count, "dyld iterator returned same number of libraries as kernel");
2269
2270	T_LOG("verified %d libraries from dyld shared cache", libs_found);
2271}
2272
2273static void
2274check_shared_cache_uuid(uuid_t imageUUID)
2275{
2276	static uuid_t shared_cache_uuid;
2277	static dispatch_once_t read_shared_cache_uuid;
2278
2279	dispatch_once(&read_shared_cache_uuid, ^{
2280		T_QUIET;
2281		T_ASSERT_TRUE(_dyld_get_shared_cache_uuid(shared_cache_uuid), "retrieve current shared cache UUID");
2282	});
2283	T_QUIET; T_ASSERT_EQ(uuid_compare(shared_cache_uuid, imageUUID), 0,
2284			"dyld returned UUID doesn't match kernel returned UUID for system shared cache");
2285}
2286
2287/*
2288 * extra dictionary contains data relevant for the given flags:
2289 * PARSE_STACKSHOT_ZOMBIE:   zombie_child_pid_key -> @(pid)
2290 * PARSE_STACKSHOT_POSTEXEC: postexec_child_unique_pid_key -> @(unique_pid)
2291 */
2292static void
2293parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, NSDictionary *extra)
2294{
2295	bool delta = (stackshot_parsing_flags & PARSE_STACKSHOT_DELTA);
2296	bool expect_sharedcache_child = (stackshot_parsing_flags & PARSE_STACKSHOT_SHAREDCACHE_FLAGS);
2297	bool expect_zombie_child = (stackshot_parsing_flags & PARSE_STACKSHOT_ZOMBIE);
2298	bool expect_postexec_child = (stackshot_parsing_flags & PARSE_STACKSHOT_POSTEXEC);
2299	bool expect_cseg_waitinfo = (stackshot_parsing_flags & PARSE_STACKSHOT_WAITINFO_CSEG);
2300	bool expect_translated_child = (stackshot_parsing_flags & PARSE_STACKSHOT_TRANSLATED);
2301	bool expect_shared_cache_layout = false;
2302	bool expect_shared_cache_uuid = !delta;
2303	bool expect_dispatch_queue_label = (stackshot_parsing_flags & PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL);
2304	bool expect_turnstile_lock = (stackshot_parsing_flags & PARSE_STACKSHOT_TURNSTILEINFO);
2305	bool expect_srp_waitinfo = (stackshot_parsing_flags & PARSE_STACKSHOT_WAITINFO_SRP);
2306	bool expect_exec_inprogress = (stackshot_parsing_flags & PARSE_STACKSHOT_EXEC_INPROGRESS);
2307	bool expect_transitioning_task = (stackshot_parsing_flags & PARSE_STACKSHOT_TRANSITIONING);
2308	bool expect_asyncstack = (stackshot_parsing_flags & PARSE_STACKSHOT_ASYNCSTACK);
2309	bool expect_driverkit = (stackshot_parsing_flags & PARSE_STACKSHOT_DRIVERKIT);
2310	bool found_zombie_child = false, found_postexec_child = false, found_shared_cache_layout = false, found_shared_cache_uuid = false;
2311	bool found_translated_child = false, found_transitioning_task = false;
2312	bool found_dispatch_queue_label = false, found_turnstile_lock = false;
2313	bool found_cseg_waitinfo = false, found_srp_waitinfo = false;
2314	bool found_sharedcache_child = false, found_sharedcache_badflags = false, found_sharedcache_self = false;
2315	bool found_asyncstack = false;
2316	uint64_t srp_expected_threadid = 0;
2317	pid_t zombie_child_pid = -1, srp_expected_pid = -1, sharedcache_child_pid = -1;
2318	pid_t translated_child_pid = -1, transistioning_task_pid = -1;
2319	bool sharedcache_child_sameaddr = false;
2320	uint64_t postexec_child_unique_pid = 0, cseg_expected_threadid = 0;
2321	uint64_t sharedcache_child_flags = 0, sharedcache_self_flags = 0;
2322	uint64_t asyncstack_threadid = 0;
2323	NSArray *asyncstack_stack = nil;
2324	char *inflatedBufferBase = NULL;
2325	pid_t exec_inprogress_pid = -1;
2326	void (^exec_inprogress_cb)(uint64_t, uint64_t) = NULL;
2327	int exec_inprogress_found = 0;
2328	uint64_t exec_inprogress_containerid = 0;
2329	void (^driverkit_cb)(pid_t) = NULL;
2330	NSMutableDictionary *sharedCaches = [NSMutableDictionary new];
2331
2332	if (expect_shared_cache_uuid) {
2333		uuid_t shared_cache_uuid;
2334		if (!_dyld_get_shared_cache_uuid(shared_cache_uuid)) {
2335			T_LOG("Skipping verifying shared cache UUID in stackshot data because not running with a shared cache");
2336			expect_shared_cache_uuid = false;
2337		}
2338	}
2339
2340	if (stackshot_parsing_flags & PARSE_STACKSHOT_SHAREDCACHE_LAYOUT) {
2341		size_t shared_cache_length = 0;
2342		const void *cache_header = _dyld_get_shared_cache_range(&shared_cache_length);
2343		T_QUIET; T_ASSERT_NOTNULL(cache_header, "current process running with shared cache");
2344		T_QUIET; T_ASSERT_GT(shared_cache_length, sizeof(struct _dyld_cache_header), "valid shared cache length populated by _dyld_get_shared_cache_range");
2345
2346		if (_dyld_shared_cache_is_locally_built()) {
2347			T_LOG("device running with locally built shared cache, expect shared cache layout");
2348			expect_shared_cache_layout = true;
2349		} else {
2350			T_LOG("device running with B&I built shared-cache, no shared cache layout expected");
2351		}
2352	}
2353
2354	if (expect_sharedcache_child) {
2355		NSNumber* pid_num = extra[sharedcache_child_pid_key];
2356		NSNumber* sameaddr_num = extra[sharedcache_child_sameaddr_key];
2357		T_QUIET; T_ASSERT_NOTNULL(pid_num, "sharedcache child pid provided");
2358		T_QUIET; T_ASSERT_NOTNULL(sameaddr_num, "sharedcache child addrsame provided");
2359		sharedcache_child_pid = [pid_num intValue];
2360		T_QUIET; T_ASSERT_GT(sharedcache_child_pid, 0, "sharedcache child pid greater than zero");
2361		sharedcache_child_sameaddr = [sameaddr_num intValue];
2362		T_QUIET; T_ASSERT_GE([sameaddr_num intValue], 0, "sharedcache child sameaddr is boolean (0 or 1)");
2363		T_QUIET; T_ASSERT_LE([sameaddr_num intValue], 1, "sharedcache child sameaddr is boolean (0 or 1)");
2364	}
2365
2366    if (expect_transitioning_task) {
2367        NSNumber* pid_num = extra[transitioning_pid_key];
2368        T_ASSERT_NOTNULL(pid_num, "transitioning task pid provided");
2369        transistioning_task_pid = [pid_num intValue];
2370    }
2371
2372	if (expect_zombie_child) {
2373		NSNumber* pid_num = extra[zombie_child_pid_key];
2374		T_QUIET; T_ASSERT_NOTNULL(pid_num, "zombie child pid provided");
2375		zombie_child_pid = [pid_num intValue];
2376		T_QUIET; T_ASSERT_GT(zombie_child_pid, 0, "zombie child pid greater than zero");
2377	}
2378
2379	if (expect_postexec_child) {
2380		NSNumber* unique_pid_num = extra[postexec_child_unique_pid_key];
2381		T_QUIET; T_ASSERT_NOTNULL(unique_pid_num, "postexec child unique pid provided");
2382		postexec_child_unique_pid = [unique_pid_num unsignedLongLongValue];
2383		T_QUIET; T_ASSERT_GT(postexec_child_unique_pid, 0ull, "postexec child unique pid greater than zero");
2384	}
2385
2386	if (expect_cseg_waitinfo) {
2387		NSNumber* tid_num = extra[cseg_expected_threadid_key];
2388		T_QUIET; T_ASSERT_NOTNULL(tid_num, "cseg's expected thread id provided");
2389		cseg_expected_threadid = tid_num.unsignedLongValue;
2390		T_QUIET; T_ASSERT_GT(cseg_expected_threadid, UINT64_C(0), "compressor segment thread is present");
2391	}
2392
2393	if (expect_srp_waitinfo) {
2394		NSNumber* threadid_num = extra[srp_expected_threadid_key];
2395		NSNumber* pid_num = extra[srp_expected_pid_key];
2396		T_QUIET; T_ASSERT_TRUE(threadid_num != nil || pid_num != nil, "expected SRP threadid or pid");
2397		if (threadid_num != nil) {
2398			srp_expected_threadid = [threadid_num unsignedLongLongValue];
2399			T_QUIET; T_ASSERT_GT(srp_expected_threadid, 0ull, "srp_expected_threadid greater than zero");
2400		}
2401		if (pid_num != nil) {
2402			srp_expected_pid = [pid_num intValue];
2403			T_QUIET; T_ASSERT_GT(srp_expected_pid, 0, "srp_expected_pid greater than zero");
2404		}
2405		T_LOG("looking for SRP pid: %d threadid: %llu", srp_expected_pid, srp_expected_threadid);
2406	}
2407
2408	if (expect_translated_child) {
2409		NSNumber* pid_num = extra[translated_child_pid_key];
2410		T_QUIET; T_ASSERT_NOTNULL(pid_num, "translated child pid provided");
2411		translated_child_pid = [pid_num intValue];
2412		T_QUIET; T_ASSERT_GT(translated_child_pid, 0, "translated child pid greater than zero");
2413	}
2414	if (expect_exec_inprogress) {
2415		NSNumber* pid_num = extra[exec_inprogress_pid_key];
2416		T_QUIET; T_ASSERT_NOTNULL(pid_num, "exec inprogress pid provided");
2417		exec_inprogress_pid = [pid_num intValue];
2418		T_QUIET; T_ASSERT_GT(exec_inprogress_pid, 0, "exec inprogress pid greater than zero");
2419
2420		exec_inprogress_cb = extra[exec_inprogress_found_key];
2421		T_QUIET; T_ASSERT_NOTNULL(exec_inprogress_cb, "exec inprogress found callback provided");
2422	}
2423	if (expect_driverkit) {
2424		driverkit_cb = extra[driverkit_found_key];
2425		T_QUIET; T_ASSERT_NOTNULL(driverkit_cb, "driverkit found callback provided");
2426	}
2427
2428	if (expect_asyncstack) {
2429		NSNumber* threadid_id = extra[asyncstack_expected_threadid_key];
2430		T_QUIET; T_ASSERT_NOTNULL(threadid_id, "asyncstack threadid provided");
2431		asyncstack_threadid = [threadid_id unsignedLongLongValue];
2432		asyncstack_stack = extra[asyncstack_expected_stack_key];
2433		T_QUIET; T_ASSERT_NOTNULL(asyncstack_stack, "asyncstack expected stack provided");
2434	}
2435
2436	kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
2437	if (delta) {
2438		T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT,
2439				"buffer provided is a delta stackshot");
2440
2441			iter = kcdata_iter_next(iter);
2442	} else {
2443		if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_COMPRESSED) {
2444			T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT,
2445					"buffer provided is a stackshot");
2446
2447			iter = kcdata_iter_next(iter);
2448		} else {
2449			/* we are dealing with a compressed buffer */
2450			iter = kcdata_iter_next(iter);
2451			uint64_t compression_type = 0, totalout = 0, totalin = 0;
2452
2453			uint64_t *data;
2454			char *desc;
2455			for (int i = 0; i < 3; i ++) {
2456				kcdata_iter_get_data_with_desc(iter, &desc, (void **)&data, NULL);
2457				if (strcmp(desc, "kcd_c_type") == 0) {
2458					compression_type = *data;
2459				} else if (strcmp(desc, "kcd_c_totalout") == 0){
2460					totalout = *data;
2461				} else if (strcmp(desc, "kcd_c_totalin") == 0){
2462					totalin = *data;
2463				}
2464
2465				iter = kcdata_iter_next(iter);
2466			}
2467
2468			T_ASSERT_EQ(compression_type, UINT64_C(1), "zlib compression is used");
2469			T_ASSERT_GT(totalout, UINT64_C(0), "successfully gathered how long the compressed buffer is");
2470			T_ASSERT_GT(totalin, UINT64_C(0), "successfully gathered how long the uncompressed buffer will be at least");
2471
2472			/* progress to the next kcdata item */
2473			T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, "compressed stackshot found");
2474
2475			char *bufferBase = kcdata_iter_payload(iter);
2476
2477			/*
2478			 * zlib is used, allocate a buffer based on the metadata, plus
2479			 * extra scratch space (+12.5%) in case totalin was inconsistent
2480			 */
2481			size_t inflatedBufferSize = totalin + (totalin >> 3);
2482			inflatedBufferBase = malloc(inflatedBufferSize);
2483			T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(inflatedBufferBase, "allocated temporary output buffer");
2484
2485			z_stream zs;
2486			memset(&zs, 0, sizeof(zs));
2487			T_QUIET; T_ASSERT_EQ(inflateInit(&zs), Z_OK, "inflateInit OK");
2488			zs.next_in = (unsigned char *)bufferBase;
2489			T_QUIET; T_ASSERT_LE(totalout, (uint64_t)UINT_MAX, "stackshot is not too large");
2490			zs.avail_in = (uInt)totalout;
2491			zs.next_out = (unsigned char *)inflatedBufferBase;
2492			T_QUIET; T_ASSERT_LE(inflatedBufferSize, (size_t)UINT_MAX, "output region is not too large");
2493			zs.avail_out = (uInt)inflatedBufferSize;
2494			T_ASSERT_EQ(inflate(&zs, Z_FINISH), Z_STREAM_END, "inflated buffer");
2495			inflateEnd(&zs);
2496
2497			T_ASSERT_EQ((uint64_t)zs.total_out, totalin, "expected number of bytes inflated");
2498
2499			/* copy the data after the compressed area */
2500			T_QUIET; T_ASSERT_GE((void *)bufferBase, ssbuf,
2501					"base of compressed stackshot is after the returned stackshot buffer");
2502			size_t header_size = (size_t)(bufferBase - (char *)ssbuf);
2503			size_t data_after_compressed_size = sslen - totalout - header_size;
2504			T_QUIET; T_ASSERT_LE(data_after_compressed_size,
2505					inflatedBufferSize - zs.total_out,
2506					"footer fits in the buffer");
2507			memcpy(inflatedBufferBase + zs.total_out,
2508					bufferBase + totalout,
2509					data_after_compressed_size);
2510
2511			iter = kcdata_iter(inflatedBufferBase, inflatedBufferSize);
2512		}
2513	}
2514
2515	KCDATA_ITER_FOREACH(iter) {
2516		NSError *error = nil;
2517
2518		switch (kcdata_iter_type(iter)) {
2519		case KCDATA_TYPE_ARRAY: {
2520			T_QUIET;
2521			T_ASSERT_TRUE(kcdata_iter_array_valid(iter),
2522					"checked that array is valid");
2523
2524			NSMutableDictionary *array = parseKCDataArray(iter, &error);
2525			T_QUIET; T_ASSERT_NOTNULL(array, "parsed array from stackshot");
2526			T_QUIET; T_ASSERT_NULL(error, "error unset after parsing array");
2527
2528			if (kcdata_iter_array_elem_type(iter) == STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT) {
2529				struct dyld_uuid_info_64 *shared_cache_uuids = kcdata_iter_payload(iter);
2530				uint32_t uuid_count = kcdata_iter_array_elem_count(iter);
2531				T_ASSERT_NOTNULL(shared_cache_uuids, "parsed shared cache layout array");
2532				T_ASSERT_GT(uuid_count, 0, "returned valid number of UUIDs from shared cache");
2533				verify_stackshot_sharedcache_layout(shared_cache_uuids, uuid_count);
2534				found_shared_cache_layout = true;
2535			}
2536
2537			break;
2538		}
2539
2540		case KCDATA_TYPE_CONTAINER_BEGIN: {
2541			T_QUIET;
2542			T_ASSERT_TRUE(kcdata_iter_container_valid(iter),
2543					"checked that container is valid");
2544
2545			uint64_t containerid = kcdata_iter_container_id(iter);
2546			uint32_t container_type = kcdata_iter_container_type(iter);
2547
2548			if (container_type == STACKSHOT_KCCONTAINER_SHAREDCACHE) {
2549				NSDictionary *container = parseKCDataContainer(&iter, &error);
2550				T_QUIET; T_ASSERT_NOTNULL(container, "parsed sharedcache container from stackshot");
2551				T_QUIET; T_ASSERT_NULL(error, "error unset after parsing sharedcache container");
2552				T_QUIET; T_EXPECT_EQ(sharedCaches[@(containerid)], nil, "sharedcache containerid %lld should be unique", containerid);
2553				sharedCaches[@(containerid)] = container;
2554				break;
2555			}
2556
2557			/*
2558			 * treat containers other than tasks/transitioning_tasks
2559			 * as expanded in-line.
2560			 */
2561			if (container_type != STACKSHOT_KCCONTAINER_TASK &&
2562			    container_type != STACKSHOT_KCCONTAINER_TRANSITIONING_TASK) {
2563				break;
2564			}
2565			NSDictionary *container = parseKCDataContainer(&iter, &error);
2566			T_QUIET; T_ASSERT_NOTNULL(container, "parsed task/transitioning_task container from stackshot");
2567			T_QUIET; T_ASSERT_NULL(error, "error unset after parsing container");
2568
2569			NSDictionary* task_snapshot = container[@"task_snapshots"][@"task_snapshot"];
2570			NSDictionary* task_delta_snapshot = container[@"task_snapshots"][@"task_delta_snapshot"];
2571			NSDictionary* transitioning_task_snapshot = container[@"transitioning_task_snapshots"][@"transitioning_task_snapshot"];
2572
2573			/*
2574			 * Having processed the container, we now only check it
2575			 * if it's the correct type.
2576			 */
2577			if ((!expect_transitioning_task && (container_type != STACKSHOT_KCCONTAINER_TASK)) ||
2578			    (expect_transitioning_task && (container_type != STACKSHOT_KCCONTAINER_TRANSITIONING_TASK))) {
2579				break;
2580			}
2581			if (!expect_transitioning_task) {
2582			    	T_QUIET; T_ASSERT_TRUE(!!task_snapshot != !!task_delta_snapshot, "Either task_snapshot xor task_delta_snapshot provided");
2583			}
2584
2585			if (expect_dispatch_queue_label && !found_dispatch_queue_label) {
2586				for (id thread_key in container[@"task_snapshots"][@"thread_snapshots"]) {
2587					NSMutableDictionary *thread = container[@"task_snapshots"][@"thread_snapshots"][thread_key];
2588					NSString *dql = thread[@"dispatch_queue_label"];
2589
2590					if ([dql isEqualToString:@TEST_STACKSHOT_QUEUE_LABEL]) {
2591						found_dispatch_queue_label = true;
2592						break;
2593					}
2594				}
2595			}
2596
2597			if (expect_transitioning_task && !found_transitioning_task) {
2598				if (transitioning_task_snapshot) {
2599					uint64_t the_pid = [transitioning_task_snapshot[@"tts_pid"] unsignedLongLongValue];
2600					if (the_pid == (uint64_t)transistioning_task_pid) {
2601					    found_transitioning_task = true;
2602					    T_PASS("FOUND Transitioning task %llu has a transitioning task snapshot", (uint64_t) transistioning_task_pid);
2603					    break;
2604					}
2605				}
2606			}
2607
2608			if (expect_postexec_child && !found_postexec_child) {
2609				if (task_snapshot) {
2610					uint64_t unique_pid = [task_snapshot[@"ts_unique_pid"] unsignedLongLongValue];
2611					if (unique_pid == postexec_child_unique_pid) {
2612						found_postexec_child = true;
2613
2614						T_PASS("post-exec child %llu has a task snapshot", postexec_child_unique_pid);
2615
2616						break;
2617					}
2618				}
2619
2620				if (task_delta_snapshot) {
2621					uint64_t unique_pid = [task_delta_snapshot[@"tds_unique_pid"] unsignedLongLongValue];
2622					if (unique_pid == postexec_child_unique_pid) {
2623						found_postexec_child = true;
2624
2625						T_FAIL("post-exec child %llu shouldn't have a delta task snapshot", postexec_child_unique_pid);
2626
2627						break;
2628					}
2629				}
2630			}
2631
2632			if (!task_snapshot) {
2633				break;
2634			}
2635
2636			int pid = [task_snapshot[@"ts_pid"] intValue];
2637
2638			if (pid && expect_shared_cache_uuid && !found_shared_cache_uuid) {
2639				id ptr = container[@"task_snapshots"][@"shared_cache_dyld_load_info"];
2640				if (ptr) {
2641					id uuid = ptr[@"imageUUID"];
2642
2643					uint8_t uuid_p[16];
2644					for (unsigned int i = 0; i < 16; i ++) {
2645						NSNumber *uuidByte = uuid[i];
2646						uuid_p[i] = (uint8_t)uuidByte.charValue;
2647					}
2648
2649					check_shared_cache_uuid(uuid_p);
2650
2651					uint64_t baseAddress = (uint64_t)((NSNumber *)ptr[@"imageSlidBaseAddress"]).longLongValue;
2652					uint64_t firstMapping = (uint64_t)((NSNumber *)ptr[@"sharedCacheSlidFirstMapping"]).longLongValue;
2653
2654					T_EXPECT_LE(baseAddress, firstMapping,
2655						"in per-task shared_cache_dyld_load_info, "
2656						"baseAddress <= firstMapping");
2657					T_EXPECT_GE(baseAddress + (7ull << 32) + (1ull << 29),
2658						firstMapping,
2659						"in per-task shared_cache_dyld_load_info, "
2660						"baseAddress + 28.5gig >= firstMapping");
2661
2662					size_t shared_cache_len;
2663					const void *addr = _dyld_get_shared_cache_range(&shared_cache_len);
2664					T_EXPECT_EQ((uint64_t)addr, firstMapping,
2665							"SlidFirstMapping should match shared_cache_range");
2666
2667					/*
2668					 * check_shared_cache_uuid() will assert on failure, so if
2669					 * we get here, then we have found the shared cache UUID
2670					 * and it's correct
2671					 */
2672					found_shared_cache_uuid = true;
2673				}
2674			}
2675
2676			if (expect_sharedcache_child) {
2677				uint64_t task_flags = [task_snapshot[@"ts_ss_flags"] unsignedLongLongValue];
2678				uint64_t sharedregion_flags = (task_flags & (kTaskSharedRegionNone | kTaskSharedRegionSystem | kTaskSharedRegionOther));
2679				id sharedregion_info = container[@"task_snapshots"][@"shared_cache_dyld_load_info"];
2680				id sharedcache_id = container[@"task_snapshots"][@"sharedCacheID"];
2681				if (!found_sharedcache_badflags) {
2682					T_QUIET; T_EXPECT_NE(sharedregion_flags, 0ll, "one of the kTaskSharedRegion flags should be set on all tasks");
2683					bool multiple = (sharedregion_flags & (sharedregion_flags - 1)) != 0;
2684					T_QUIET; T_EXPECT_FALSE(multiple, "only one kTaskSharedRegion flag should be set on each task");
2685					found_sharedcache_badflags = (sharedregion_flags == 0 || multiple);
2686				}
2687				if (pid == 0) {
2688					T_ASSERT_EQ(sharedregion_flags, (uint64_t)kTaskSharedRegionNone, "Kernel proc (pid 0) should have no shared region");
2689				} else if (pid == sharedcache_child_pid) {
2690					found_sharedcache_child = true;
2691					sharedcache_child_flags = sharedregion_flags;
2692				} else if (pid == getpid()) {
2693					found_sharedcache_self = true;
2694					sharedcache_self_flags = sharedregion_flags;
2695				}
2696				if (sharedregion_flags == kTaskSharedRegionOther && !(task_flags & kTaskSharedRegionInfoUnavailable)) {
2697					T_QUIET; T_EXPECT_NOTNULL(sharedregion_info, "kTaskSharedRegionOther should have a shared_cache_dyld_load_info struct");
2698					T_QUIET; T_EXPECT_NOTNULL(sharedcache_id, "kTaskSharedRegionOther should have a sharedCacheID");
2699					if (sharedcache_id != nil) {
2700						T_QUIET; T_EXPECT_NOTNULL(sharedCaches[sharedcache_id], "sharedCacheID %d should exist", [sharedcache_id intValue]);
2701					}
2702				} else {
2703					T_QUIET; T_EXPECT_NULL(sharedregion_info, "non-kTaskSharedRegionOther should have no shared_cache_dyld_load_info struct");
2704					T_QUIET; T_EXPECT_NULL(sharedcache_id, "non-kTaskSharedRegionOther should have no sharedCacheID");
2705				}
2706			}
2707
2708			if (expect_zombie_child && (pid == zombie_child_pid)) {
2709				found_zombie_child = true;
2710
2711				uint64_t task_flags = [task_snapshot[@"ts_ss_flags"] unsignedLongLongValue];
2712				T_ASSERT_TRUE((task_flags & kTerminatedSnapshot) == kTerminatedSnapshot, "child zombie marked as terminated");
2713
2714				continue;
2715			}
2716
2717			if (expect_translated_child && (pid == translated_child_pid)) {
2718				found_translated_child = true;
2719
2720				uint64_t task_flags = [task_snapshot[@"ts_ss_flags"] unsignedLongLongValue];
2721				T_EXPECT_BITS_SET(task_flags, kTaskIsTranslated, "child marked as translated");
2722
2723				continue;
2724			}
2725			if (expect_exec_inprogress && (pid == exec_inprogress_pid || pid == -exec_inprogress_pid)) {
2726				exec_inprogress_found++;
2727				T_LOG("found exec task with pid %d, instance %d", pid, exec_inprogress_found);
2728				T_QUIET; T_ASSERT_LE(exec_inprogress_found, 2, "no more than two with the expected pid");
2729				if (exec_inprogress_found == 2) {
2730					T_LOG("found 2 tasks with pid %d", exec_inprogress_pid);
2731					exec_inprogress_cb(containerid, exec_inprogress_containerid);
2732				} else {
2733					exec_inprogress_containerid = containerid;
2734				}
2735			}
2736			if (expect_driverkit && driverkit_cb != NULL) {
2737				driverkit_cb(pid);
2738			}
2739			if (expect_cseg_waitinfo) {
2740				NSArray *winfos = container[@"task_snapshots"][@"thread_waitinfo"];
2741
2742				for (id i in winfos) {
2743					NSNumber *waitType = i[@"wait_type"];
2744					NSNumber *owner = i[@"owner"];
2745					if (waitType.intValue == kThreadWaitCompressor &&
2746							owner.unsignedLongValue == cseg_expected_threadid) {
2747						found_cseg_waitinfo = true;
2748						break;
2749					}
2750				}
2751			}
2752
2753			if (expect_srp_waitinfo) {
2754				NSArray *tinfos = container[@"task_snapshots"][@"thread_turnstileinfo"];
2755				NSArray *winfos = container[@"task_snapshots"][@"thread_waitinfo"];
2756				for (id i in tinfos) {
2757					if (!found_srp_waitinfo) {
2758						bool found_thread = false;
2759						bool found_pid = false;
2760						if (([i[@"turnstile_flags"] intValue] & STACKSHOT_TURNSTILE_STATUS_THREAD) &&
2761						    [i[@"turnstile_context"] unsignedLongLongValue] == srp_expected_threadid &&
2762						    srp_expected_threadid != 0) {
2763							found_thread = true;
2764						}
2765						if (([i[@"turnstile_flags"] intValue] & STACKSHOT_TURNSTILE_STATUS_BLOCKED_ON_TASK) &&
2766						    [i[@"turnstile_context"] intValue] == srp_expected_pid &&
2767						    srp_expected_pid != -1) {
2768							found_pid = true;
2769						}
2770						if (found_pid || found_thread) {
2771							T_LOG("found SRP %s %lld waiter: %d", (found_thread ? "thread" : "pid"),
2772							    [i[@"turnstile_context"] unsignedLongLongValue], [i[@"waiter"] intValue]);
2773							/* we found something that is blocking the correct threadid */
2774							for (id j in winfos) {
2775								if ([j[@"waiter"] intValue] == [i[@"waiter"] intValue] &&
2776								    [j[@"wait_type"] intValue] == kThreadWaitPortReceive) {
2777									found_srp_waitinfo = true;
2778									T_EXPECT_EQ([j[@"wait_flags"] intValue], STACKSHOT_WAITINFO_FLAGS_SPECIALREPLY,
2779									    "SRP waitinfo should be marked as a special reply");
2780									break;
2781								}
2782							}
2783
2784							if (found_srp_waitinfo) {
2785								break;
2786							}
2787						}
2788					}
2789				}
2790			}
2791
2792			if (pid != getpid()) {
2793				break;
2794			}
2795
2796			T_EXPECT_EQ_STR(current_process_name(),
2797					[task_snapshot[@"ts_p_comm"] UTF8String],
2798					"current process name matches in stackshot");
2799
2800			uint64_t task_flags = [task_snapshot[@"ts_ss_flags"] unsignedLongLongValue];
2801			T_ASSERT_BITS_NOTSET(task_flags, kTerminatedSnapshot, "current process not marked as terminated");
2802			T_ASSERT_BITS_NOTSET(task_flags, kTaskIsTranslated, "current process not marked as translated");
2803
2804			T_QUIET;
2805			T_EXPECT_LE(pid, [task_snapshot[@"ts_unique_pid"] intValue],
2806					"unique pid is greater than pid");
2807
2808			NSDictionary* task_cpu_architecture = container[@"task_snapshots"][@"task_cpu_architecture"];
2809			T_QUIET; T_ASSERT_NOTNULL(task_cpu_architecture[@"cputype"], "have cputype");
2810			T_QUIET; T_ASSERT_NOTNULL(task_cpu_architecture[@"cpusubtype"], "have cputype");
2811			int cputype = [task_cpu_architecture[@"cputype"] intValue];
2812			int cpusubtype = [task_cpu_architecture[@"cpusubtype"] intValue];
2813
2814			struct proc_archinfo archinfo;
2815			int retval = proc_pidinfo(pid, PROC_PIDARCHINFO, 0, &archinfo, sizeof(archinfo));
2816			T_QUIET; T_WITH_ERRNO; T_ASSERT_GT(retval, 0, "proc_pidinfo(PROC_PIDARCHINFO) returned a value > 0");
2817			T_QUIET; T_ASSERT_EQ(retval, (int)sizeof(struct proc_archinfo), "proc_pidinfo call for PROC_PIDARCHINFO returned expected size");
2818			T_QUIET; T_EXPECT_EQ(cputype, archinfo.p_cputype, "cpu type is correct");
2819			T_QUIET; T_EXPECT_EQ(cpusubtype, archinfo.p_cpusubtype, "cpu subtype is correct");
2820
2821			NSDictionary * codesigning_info = container[@"task_snapshots"][@"stackshot_task_codesigning_info"];
2822			T_QUIET; T_ASSERT_NOTNULL(codesigning_info[@"csflags"], "have csflags");
2823			uint64_t flags = [codesigning_info[@"csflags"] unsignedLongLongValue];
2824			T_QUIET; T_EXPECT_GT(flags, 0, "nonzero csflags");
2825
2826			T_QUIET; T_ASSERT_NOTNULL(container[@"task_snapshots"][@"jetsam_coalition"], "have jetsam coalition");
2827			uint64_t jetsam_coalition = [container[@"task_snapshots"][@"jetsam_coalition"] unsignedLongLongValue];
2828			T_QUIET; T_EXPECT_GT(jetsam_coalition, 0, "nonzero jetsam coalition");
2829
2830			bool found_main_thread = false;
2831			uint64_t main_thread_id = -1ULL;
2832			bool found_null_kernel_frame = false;
2833			for (id thread_key in container[@"task_snapshots"][@"thread_snapshots"]) {
2834				NSMutableDictionary *thread = container[@"task_snapshots"][@"thread_snapshots"][thread_key];
2835				NSDictionary *thread_snap = thread[@"thread_snapshot"];
2836
2837				T_QUIET; T_EXPECT_GT([thread_snap[@"ths_thread_id"] intValue], 0,
2838						"thread ID of thread in current task is valid");
2839				T_QUIET; T_EXPECT_GT([thread_snap[@"ths_base_priority"] intValue], 0,
2840						"base priority of thread in current task is valid");
2841				T_QUIET; T_EXPECT_GT([thread_snap[@"ths_sched_priority"] intValue], 0,
2842						"scheduling priority of thread in current task is valid");
2843
2844				NSString *pth_name = thread[@"pth_name"];
2845				if (pth_name != nil && [pth_name isEqualToString:@TEST_THREAD_NAME]) {
2846					found_main_thread = true;
2847					main_thread_id = [thread_snap[@"ths_thread_id"] unsignedLongLongValue];
2848
2849					T_QUIET; T_EXPECT_GT([thread_snap[@"ths_total_syscalls"] intValue], 0,
2850							"total syscalls of current thread is valid");
2851
2852					NSDictionary *cpu_times = thread[@"cpu_times"];
2853					T_EXPECT_GE([cpu_times[@"runnable_time"] intValue],
2854							[cpu_times[@"system_time"] intValue] +
2855							[cpu_times[@"user_time"] intValue],
2856							"runnable time of current thread is valid");
2857				}
2858				if (!found_null_kernel_frame) {
2859					for (NSNumber *frame in thread[@"kernel_frames"]) {
2860						if (frame.unsignedLongValue == 0) {
2861							found_null_kernel_frame = true;
2862							break;
2863						}
2864					}
2865				}
2866				if (expect_asyncstack && !found_asyncstack &&
2867				    asyncstack_threadid == [thread_snap[@"ths_thread_id"] unsignedLongLongValue]) {
2868					found_asyncstack = true;
2869					NSArray* async_stack = thread[@"user_async_stack_frames"];
2870					NSNumber* start_idx = thread[@"user_async_start_index"];
2871					NSArray* user_stack = thread[@"user_stack_frames"];
2872					T_QUIET; T_ASSERT_NOTNULL(async_stack, "async thread %#llx has user_async_stack_frames", asyncstack_threadid);
2873					T_QUIET; T_ASSERT_NOTNULL(start_idx, "async thread %#llx has user_async_start_index", asyncstack_threadid);
2874					T_QUIET; T_ASSERT_NOTNULL(user_stack, "async thread %#llx has user_stack_frames", asyncstack_threadid);
2875					T_QUIET; T_ASSERT_EQ(async_stack.count, asyncstack_stack.count,
2876						"actual async_stack count == expected async_stack count");
2877					for (size_t i = 0; i < async_stack.count; i++) {
2878						T_EXPECT_EQ([async_stack[i][@"lr"] unsignedLongLongValue],
2879							[asyncstack_stack[i] unsignedLongLongValue], "frame %zu matches", i);
2880					}
2881				}
2882			}
2883			T_EXPECT_TRUE(found_main_thread, "found main thread for current task in stackshot");
2884			T_EXPECT_FALSE(found_null_kernel_frame, "should not see any NULL kernel frames");
2885
2886			if (expect_turnstile_lock && !found_turnstile_lock) {
2887				NSArray *tsinfos = container[@"task_snapshots"][@"thread_turnstileinfo"];
2888
2889				for (id i in tsinfos) {
2890					if ([i[@"turnstile_context"] unsignedLongLongValue] == main_thread_id) {
2891						found_turnstile_lock = true;
2892						break;
2893					}
2894				}
2895			}
2896			break;
2897		}
2898		case STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO: {
2899			// Legacy shared cache info
2900			struct dyld_shared_cache_loadinfo *payload = kcdata_iter_payload(iter);
2901			T_ASSERT_EQ((size_t)kcdata_iter_size(iter), sizeof(*payload), "valid dyld_shared_cache_loadinfo struct");
2902
2903			check_shared_cache_uuid(payload->sharedCacheUUID);
2904
2905			T_EXPECT_LE(payload->sharedCacheUnreliableSlidBaseAddress,
2906				payload->sharedCacheSlidFirstMapping,
2907				"SlidBaseAddress <= SlidFirstMapping");
2908			T_EXPECT_GE(payload->sharedCacheUnreliableSlidBaseAddress + (7ull << 32) + (1ull << 29),
2909				payload->sharedCacheSlidFirstMapping,
2910				"SlidFirstMapping should be within 28.5gigs of SlidBaseAddress");
2911
2912			size_t shared_cache_len;
2913			const void *addr = _dyld_get_shared_cache_range(&shared_cache_len);
2914			T_EXPECT_EQ((uint64_t)addr, payload->sharedCacheSlidFirstMapping,
2915			    "SlidFirstMapping should match shared_cache_range");
2916
2917			/*
2918			 * check_shared_cache_uuid() asserts on failure, so we must have
2919			 * found the shared cache UUID to be correct.
2920			 */
2921			found_shared_cache_uuid = true;
2922			break;
2923		}
2924		}
2925	}
2926
2927	if (expect_sharedcache_child) {
2928		T_QUIET; T_ASSERT_TRUE(found_sharedcache_child, "found sharedcache child in kcdata");
2929		T_QUIET; T_ASSERT_TRUE(found_sharedcache_self, "found self in kcdata");
2930		if (found_sharedcache_child && found_sharedcache_self) {
2931			T_QUIET; T_ASSERT_NE(sharedcache_child_flags, (uint64_t)kTaskSharedRegionNone, "sharedcache child should have shared region");
2932			T_QUIET; T_ASSERT_NE(sharedcache_self_flags, (uint64_t)kTaskSharedRegionNone, "sharedcache: self should have shared region");
2933			if (sharedcache_self_flags == kTaskSharedRegionSystem && !sharedcache_child_sameaddr) {
2934				/* If we're in the system shared region, and the child has a different address, child must have an Other shared region */
2935				T_ASSERT_EQ(sharedcache_child_flags, (uint64_t)kTaskSharedRegionOther,
2936				    "sharedcache child should have Other shared region");
2937			}
2938		}
2939	}
2940
2941	if (expect_transitioning_task) {
2942		T_QUIET; T_ASSERT_TRUE(found_transitioning_task, "found transitioning_task child in kcdata");
2943	}
2944
2945	if (expect_exec_inprogress) {
2946		T_QUIET; T_ASSERT_GT(exec_inprogress_found, 0, "found at least 1 task for execing process");
2947	}
2948
2949	if (expect_zombie_child) {
2950		T_QUIET; T_ASSERT_TRUE(found_zombie_child, "found zombie child in kcdata");
2951	}
2952
2953	if (expect_postexec_child) {
2954		T_QUIET; T_ASSERT_TRUE(found_postexec_child, "found post-exec child in kcdata");
2955	}
2956
2957	if (expect_translated_child) {
2958		T_QUIET; T_ASSERT_TRUE(found_translated_child, "found translated child in kcdata");
2959	}
2960
2961	if (expect_shared_cache_layout) {
2962		T_QUIET; T_ASSERT_TRUE(found_shared_cache_layout, "shared cache layout found in kcdata");
2963	}
2964
2965	if (expect_shared_cache_uuid) {
2966		T_QUIET; T_ASSERT_TRUE(found_shared_cache_uuid, "shared cache UUID found in kcdata");
2967	}
2968
2969	if (expect_dispatch_queue_label) {
2970		T_QUIET; T_ASSERT_TRUE(found_dispatch_queue_label, "dispatch queue label found in kcdata");
2971	}
2972
2973	if (expect_turnstile_lock) {
2974		T_QUIET; T_ASSERT_TRUE(found_turnstile_lock, "found expected deadlock");
2975	}
2976
2977	if (expect_cseg_waitinfo) {
2978		T_QUIET; T_ASSERT_TRUE(found_cseg_waitinfo, "found c_seg waitinfo");
2979	}
2980
2981	if (expect_srp_waitinfo) {
2982		T_QUIET; T_ASSERT_TRUE(found_srp_waitinfo, "found special reply port waitinfo");
2983	}
2984
2985	if (expect_asyncstack) {
2986		T_QUIET; T_ASSERT_TRUE(found_asyncstack, "found async stack threadid");
2987	}
2988
2989	T_ASSERT_FALSE(KCDATA_ITER_FOREACH_FAILED(iter), "successfully iterated kcdata");
2990
2991	free(inflatedBufferBase);
2992}
2993
2994static const char *
2995current_process_name(void)
2996{
2997	static char name[64];
2998
2999	if (!name[0]) {
3000		int ret = proc_name(getpid(), name, sizeof(name));
3001		T_QUIET;
3002		T_ASSERT_POSIX_SUCCESS(ret, "proc_name failed for current process");
3003	}
3004
3005	return name;
3006}
3007
3008static void
3009initialize_thread(void)
3010{
3011	int ret = pthread_setname_np(TEST_THREAD_NAME);
3012	T_QUIET;
3013	T_ASSERT_POSIX_ZERO(ret, "set thread name to %s", TEST_THREAD_NAME);
3014}
3015