xref: /xnu-8020.140.41/tests/stackshot_tests.m (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1#include <darwintest.h>
2#include <darwintest_utils.h>
3#include <darwintest_multiprocess.h>
4#include <kern/debug.h>
5#include <kern/kern_cdata.h>
6#include <kern/block_hint.h>
7#include <kdd.h>
8#include <libproc.h>
9#include <mach-o/dyld.h>
10#include <mach-o/dyld_images.h>
11#include <mach-o/dyld_priv.h>
12#include <sys/syscall.h>
13#include <sys/stackshot.h>
14#include <uuid/uuid.h>
15#include <servers/bootstrap.h>
16#include <pthread/workqueue_private.h>
17#include <dispatch/private.h>
18#include <stdalign.h>
19#import <zlib.h>
20
21T_GLOBAL_META(
22		T_META_NAMESPACE("xnu.stackshot"),
23		T_META_RADAR_COMPONENT_NAME("xnu"),
24		T_META_RADAR_COMPONENT_VERSION("stackshot"),
25		T_META_OWNER("jonathan_w_adams"),
26		T_META_CHECK_LEAKS(false),
27		T_META_ASROOT(true)
28		);
29
30static const char *current_process_name(void);
31static void verify_stackshot_sharedcache_layout(struct dyld_uuid_info_64 *uuids, uint32_t uuid_count);
32static void parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, NSDictionary *extra);
33static void parse_thread_group_stackshot(void **sbuf, size_t sslen);
34static uint64_t stackshot_timestamp(void *ssbuf, size_t sslen);
35static void initialize_thread(void);
36
37static uint64_t global_flags = 0;
38
39#define DEFAULT_STACKSHOT_BUFFER_SIZE (1024 * 1024)
40#define MAX_STACKSHOT_BUFFER_SIZE     (6 * 1024 * 1024)
41
42#define SRP_SERVICE_NAME "com.apple.xnu.test.stackshot.special_reply_port"
43
44/* bit flags for parse_stackshot */
45#define PARSE_STACKSHOT_DELTA                0x01
46#define PARSE_STACKSHOT_ZOMBIE               0x02
47#define PARSE_STACKSHOT_SHAREDCACHE_LAYOUT   0x04
48#define PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL 0x08
49#define PARSE_STACKSHOT_TURNSTILEINFO        0x10
50#define PARSE_STACKSHOT_POSTEXEC             0x20
51#define PARSE_STACKSHOT_WAITINFO_CSEG        0x40
52#define PARSE_STACKSHOT_WAITINFO_SRP         0x80
53#define PARSE_STACKSHOT_TRANSLATED           0x100
54#define PARSE_STACKSHOT_SHAREDCACHE_FLAGS    0x200
55#define PARSE_STACKSHOT_EXEC_INPROGRESS      0x400
56#define PARSE_STACKSHOT_TRANSITIONING        0x800
57#define PARSE_STACKSHOT_ASYNCSTACK           0x1000
58
59/* keys for 'extra' dictionary for parse_stackshot */
60static const NSString* zombie_child_pid_key = @"zombie_child_pid"; // -> @(pid), required for PARSE_STACKSHOT_ZOMBIE
61static const NSString* postexec_child_unique_pid_key = @"postexec_child_unique_pid";  // -> @(unique_pid), required for PARSE_STACKSHOT_POSTEXEC
62static const NSString* cseg_expected_threadid_key = @"cseg_expected_threadid"; // -> @(tid), required for PARSE_STACKSHOT_WAITINFO_CSEG
63static const NSString* srp_expected_threadid_key = @"srp_expected_threadid"; // -> @(tid), this or ..._pid required for PARSE_STACKSHOT_WAITINFO_SRP
64static const NSString* srp_expected_pid_key = @"srp_expected_pid"; // -> @(pid), this or ..._threadid required for PARSE_STACKSHOT_WAITINFO_SRP
65static const NSString* translated_child_pid_key = @"translated_child_pid"; // -> @(pid), required for PARSE_STACKSHOT_TRANSLATED
66static const NSString* sharedcache_child_pid_key = @"sharedcache_child_pid"; // @(pid), required for PARSE_STACKSHOT_SHAREDCACHE_FLAGS
67static const NSString* sharedcache_child_sameaddr_key = @"sharedcache_child_sameaddr"; // @(0 or 1), required for PARSE_STACKSHOT_SHAREDCACHE_FLAGS
68static const NSString* exec_inprogress_pid_key = @"exec_inprogress_pid";
69static const NSString* exec_inprogress_found_key = @"exec_inprogress_found";  // callback when inprogress is found
70static const NSString* transitioning_pid_key = @"transitioning_task_pid"; // -> @(pid), required for PARSE_STACKSHOT_TRANSITIONING
71static const NSString* asyncstack_expected_threadid_key = @"asyncstack_expected_threadid"; // -> @(tid), required for PARSE_STACKSHOT_ASYNCSTACK
72static const NSString* asyncstack_expected_stack_key = @"asyncstack_expected_stack"; // -> @[pc...]), expected PCs for asyncstack
73
74#define TEST_STACKSHOT_QUEUE_LABEL        "houston.we.had.a.problem"
75#define TEST_STACKSHOT_QUEUE_LABEL_LENGTH sizeof(TEST_STACKSHOT_QUEUE_LABEL)
76
77T_DECL(microstackshots, "test the microstackshot syscall")
78{
79	void *buf = NULL;
80	unsigned int size = DEFAULT_STACKSHOT_BUFFER_SIZE;
81
82	while (1) {
83		buf = malloc(size);
84		T_QUIET; T_ASSERT_NOTNULL(buf, "allocated stackshot buffer");
85
86#pragma clang diagnostic push
87#pragma clang diagnostic ignored "-Wdeprecated-declarations"
88		int len = syscall(SYS_microstackshot, buf, size,
89				(uint32_t) STACKSHOT_GET_MICROSTACKSHOT);
90#pragma clang diagnostic pop
91		if (len == ENOSYS) {
92			T_SKIP("microstackshot syscall failed, likely not compiled with CONFIG_TELEMETRY");
93		}
94		if (len == -1 && errno == ENOSPC) {
95			/* syscall failed because buffer wasn't large enough, try again */
96			free(buf);
97			buf = NULL;
98			size *= 2;
99			T_ASSERT_LE(size, (unsigned int)MAX_STACKSHOT_BUFFER_SIZE,
100					"growing stackshot buffer to sane size");
101			continue;
102		}
103		T_ASSERT_POSIX_SUCCESS(len, "called microstackshot syscall");
104		break;
105    }
106
107	T_EXPECT_EQ(*(uint32_t *)buf,
108			(uint32_t)STACKSHOT_MICRO_SNAPSHOT_MAGIC,
109			"magic value for microstackshot matches");
110
111	free(buf);
112}
113
114struct scenario {
115	const char *name;
116	uint64_t flags;
117	bool quiet;
118	bool should_fail;
119	bool maybe_unsupported;
120	bool maybe_enomem;
121	bool no_recordfile;
122	pid_t target_pid;
123	uint64_t since_timestamp;
124	uint32_t size_hint;
125	dt_stat_time_t timer;
126};
127
128static void
129quiet(struct scenario *scenario)
130{
131	if (scenario->timer || scenario->quiet) {
132		T_QUIET;
133	}
134}
135
136static void
137take_stackshot(struct scenario *scenario, bool compress_ok, void (^cb)(void *buf, size_t size))
138{
139start:
140	initialize_thread();
141
142	void *config = stackshot_config_create();
143	quiet(scenario);
144	T_ASSERT_NOTNULL(config, "created stackshot config");
145
146	int ret = stackshot_config_set_flags(config, scenario->flags | global_flags);
147	quiet(scenario);
148	T_ASSERT_POSIX_ZERO(ret, "set flags %#llx on stackshot config", scenario->flags);
149
150	if (scenario->size_hint > 0) {
151		ret = stackshot_config_set_size_hint(config, scenario->size_hint);
152		quiet(scenario);
153		T_ASSERT_POSIX_ZERO(ret, "set size hint %" PRIu32 " on stackshot config",
154				scenario->size_hint);
155	}
156
157	if (scenario->target_pid > 0) {
158		ret = stackshot_config_set_pid(config, scenario->target_pid);
159		quiet(scenario);
160		T_ASSERT_POSIX_ZERO(ret, "set target pid %d on stackshot config",
161				scenario->target_pid);
162	}
163
164	if (scenario->since_timestamp > 0) {
165		ret = stackshot_config_set_delta_timestamp(config, scenario->since_timestamp);
166		quiet(scenario);
167		T_ASSERT_POSIX_ZERO(ret, "set since timestamp %" PRIu64 " on stackshot config",
168				scenario->since_timestamp);
169	}
170
171	int retries_remaining = 5;
172
173retry: ;
174	uint64_t start_time = mach_absolute_time();
175	ret = stackshot_capture_with_config(config);
176	uint64_t end_time = mach_absolute_time();
177
178	if (scenario->should_fail) {
179		T_EXPECTFAIL;
180		T_ASSERT_POSIX_ZERO(ret, "called stackshot_capture_with_config");
181		return;
182	}
183
184	if (ret == EBUSY || ret == ETIMEDOUT) {
185		if (retries_remaining > 0) {
186			if (!scenario->timer) {
187				T_LOG("stackshot_capture_with_config failed with %s (%d), retrying",
188						strerror(ret), ret);
189			}
190
191			retries_remaining--;
192			goto retry;
193		} else {
194			T_ASSERT_POSIX_ZERO(ret,
195					"called stackshot_capture_with_config (no retries remaining)");
196		}
197	} else if ((ret == ENOTSUP) && scenario->maybe_unsupported) {
198		T_SKIP("kernel indicated this stackshot configuration is not supported");
199	} else if ((ret == ENOMEM) && scenario->maybe_enomem) {
200		T_SKIP("insufficient available memory to run test");
201	} else {
202		quiet(scenario);
203		T_ASSERT_POSIX_ZERO(ret, "called stackshot_capture_with_config");
204	}
205
206	if (scenario->timer) {
207		dt_stat_mach_time_add(scenario->timer, end_time - start_time);
208	}
209	void *buf = stackshot_config_get_stackshot_buffer(config);
210	size_t size = stackshot_config_get_stackshot_size(config);
211	if (scenario->name && !scenario->no_recordfile) {
212		char sspath[MAXPATHLEN];
213		strlcpy(sspath, scenario->name, sizeof(sspath));
214		strlcat(sspath, ".kcdata", sizeof(sspath));
215		T_QUIET; T_ASSERT_POSIX_ZERO(dt_resultfile(sspath, sizeof(sspath)),
216				"create result file path");
217
218		if (!scenario->quiet) {
219			T_LOG("writing stackshot to %s", sspath);
220		}
221
222		FILE *f = fopen(sspath, "w");
223		T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(f,
224				"open stackshot output file");
225
226		size_t written = fwrite(buf, size, 1, f);
227		T_QUIET; T_ASSERT_POSIX_SUCCESS(written, "wrote stackshot to file");
228
229		fclose(f);
230	}
231	cb(buf, size);
232	if (compress_ok) {
233		if (global_flags == 0) {
234			T_LOG("Restarting test with compression");
235			global_flags |= STACKSHOT_DO_COMPRESS;
236			goto start;
237		} else {
238			global_flags = 0;
239		}
240	}
241
242	ret = stackshot_config_dealloc(config);
243	T_QUIET; T_EXPECT_POSIX_ZERO(ret, "deallocated stackshot config");
244}
245
246T_DECL(simple_compressed, "take a simple compressed stackshot")
247{
248	struct scenario scenario = {
249		.name = "kcdata_compressed",
250		.flags = (STACKSHOT_DO_COMPRESS | STACKSHOT_SAVE_LOADINFO | STACKSHOT_THREAD_WAITINFO | STACKSHOT_GET_GLOBAL_MEM_STATS |
251				STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
252	};
253
254	T_LOG("taking compressed kcdata stackshot");
255	take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) {
256		parse_stackshot(0, ssbuf, sslen, nil);
257	});
258}
259
260T_DECL(panic_compressed, "take a compressed stackshot with the same flags as a panic stackshot")
261{
262	uint64_t stackshot_flags = (STACKSHOT_SAVE_KEXT_LOADINFO |
263			STACKSHOT_SAVE_LOADINFO |
264			STACKSHOT_KCDATA_FORMAT |
265			STACKSHOT_ENABLE_BT_FAULTING |
266			STACKSHOT_ENABLE_UUID_FAULTING |
267			STACKSHOT_DO_COMPRESS |
268			STACKSHOT_NO_IO_STATS |
269			STACKSHOT_THREAD_WAITINFO |
270#if TARGET_OS_MAC
271			STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT |
272#endif
273			STACKSHOT_DISABLE_LATENCY_INFO);
274
275	struct scenario scenario = {
276		.name = "kcdata_panic_compressed",
277		.flags = stackshot_flags,
278	};
279
280	T_LOG("taking compressed kcdata stackshot with panic flags");
281	take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) {
282		parse_stackshot(0, ssbuf, sslen, nil);
283	});
284}
285
286T_DECL(kcdata, "test that kcdata stackshots can be taken and parsed")
287{
288	struct scenario scenario = {
289		.name = "kcdata",
290		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS |
291				STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
292	};
293
294	T_LOG("taking kcdata stackshot");
295	take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) {
296		parse_stackshot(0, ssbuf, sslen, nil);
297	});
298}
299
300T_DECL(kcdata_faulting, "test that kcdata stackshots while faulting can be taken and parsed")
301{
302	struct scenario scenario = {
303		.name = "faulting",
304		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
305				| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT
306				| STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING),
307	};
308
309	T_LOG("taking faulting stackshot");
310	take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) {
311		parse_stackshot(0, ssbuf, sslen, nil);
312	});
313}
314
315T_DECL(bad_flags, "test a poorly-formed stackshot syscall")
316{
317	struct scenario scenario = {
318		.flags = STACKSHOT_SAVE_IN_KERNEL_BUFFER /* not allowed from user space */,
319		.should_fail = true,
320	};
321
322	T_LOG("attempting to take stackshot with kernel-only flag");
323	take_stackshot(&scenario, true, ^(__unused void *ssbuf, __unused size_t sslen) {
324		T_ASSERT_FAIL("stackshot data callback called");
325	});
326}
327
328T_DECL(delta, "test delta stackshots")
329{
330	struct scenario scenario = {
331		.name = "delta",
332		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
333				| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
334	};
335
336	T_LOG("taking full stackshot");
337	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
338		uint64_t stackshot_time = stackshot_timestamp(ssbuf, sslen);
339
340		T_LOG("taking delta stackshot since time %" PRIu64, stackshot_time);
341
342		parse_stackshot(0, ssbuf, sslen, nil);
343
344		struct scenario delta_scenario = {
345			.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
346					| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT
347					| STACKSHOT_COLLECT_DELTA_SNAPSHOT),
348			.since_timestamp = stackshot_time
349		};
350
351		take_stackshot(&delta_scenario, false, ^(void *dssbuf, size_t dsslen) {
352			parse_stackshot(PARSE_STACKSHOT_DELTA, dssbuf, dsslen, nil);
353		});
354	});
355}
356
357T_DECL(shared_cache_layout, "test stackshot inclusion of shared cache layout")
358{
359	struct scenario scenario = {
360		.name = "shared_cache_layout",
361		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
362				| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT |
363				STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT),
364	};
365
366	size_t shared_cache_length;
367	const void *cache_header = _dyld_get_shared_cache_range(&shared_cache_length);
368	if (cache_header == NULL) {
369		T_SKIP("Device not running with shared cache, skipping test...");
370	}
371
372	if (shared_cache_length == 0) {
373		T_SKIP("dyld reports that currently running shared cache has zero length");
374	}
375
376	T_LOG("taking stackshot with STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT set");
377	take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) {
378		parse_stackshot(PARSE_STACKSHOT_SHAREDCACHE_LAYOUT, ssbuf, sslen, nil);
379	});
380}
381
382T_DECL(stress, "test that taking stackshots for 60 seconds doesn't crash the system")
383{
384	uint64_t max_diff_time = 60ULL /* seconds */ * 1000000000ULL;
385	uint64_t start_time;
386
387	struct scenario scenario = {
388		.name = "stress",
389		.quiet = true,
390		.flags = (STACKSHOT_KCDATA_FORMAT |
391				STACKSHOT_THREAD_WAITINFO |
392				STACKSHOT_SAVE_LOADINFO |
393				STACKSHOT_SAVE_KEXT_LOADINFO |
394				STACKSHOT_GET_GLOBAL_MEM_STATS |
395				STACKSHOT_SAVE_IMP_DONATION_PIDS |
396				STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT |
397				STACKSHOT_THREAD_GROUP |
398				STACKSHOT_SAVE_JETSAM_COALITIONS |
399				STACKSHOT_ASID |
400				0),
401	};
402
403	start_time = clock_gettime_nsec_np(CLOCK_MONOTONIC);
404	while (clock_gettime_nsec_np(CLOCK_MONOTONIC) - start_time < max_diff_time) {
405		take_stackshot(&scenario, false, ^(void * __unused ssbuf,
406				size_t __unused sslen) {
407			printf(".");
408			fflush(stdout);
409		});
410
411		/*
412		 * After the first stackshot, there's no point in continuing to
413		 * write them to disk, and it wears down the SSDs.
414		 */
415		scenario.no_recordfile = true;
416
417		/* Leave some time for the testing infrastructure to catch up */
418		usleep(10000);
419
420	}
421	printf("\n");
422}
423
424T_DECL(dispatch_queue_label, "test that kcdata stackshots contain libdispatch queue labels")
425{
426	struct scenario scenario = {
427		.name = "kcdata",
428		.flags = (STACKSHOT_GET_DQ | STACKSHOT_KCDATA_FORMAT),
429	};
430	dispatch_semaphore_t child_ready_sem, parent_done_sem;
431	dispatch_queue_t dq;
432
433#if TARGET_OS_WATCH
434	T_SKIP("This test is flaky on watches: 51663346");
435#endif
436
437	child_ready_sem = dispatch_semaphore_create(0);
438	T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "dqlabel child semaphore");
439
440	parent_done_sem = dispatch_semaphore_create(0);
441	T_QUIET; T_ASSERT_NOTNULL(parent_done_sem, "dqlabel parent semaphore");
442
443	dq = dispatch_queue_create(TEST_STACKSHOT_QUEUE_LABEL, NULL);
444	T_QUIET; T_ASSERT_NOTNULL(dq, "dispatch queue");
445
446	/* start the helper thread */
447	dispatch_async(dq, ^{
448			dispatch_semaphore_signal(child_ready_sem);
449
450			dispatch_semaphore_wait(parent_done_sem, DISPATCH_TIME_FOREVER);
451	});
452
453	/* block behind the child starting up */
454	dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
455
456	T_LOG("taking kcdata stackshot with libdispatch queue labels");
457	take_stackshot(&scenario, true, ^(void *ssbuf, size_t sslen) {
458		parse_stackshot(PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL, ssbuf, sslen, nil);
459	});
460
461	dispatch_semaphore_signal(parent_done_sem);
462}
463
464#define CACHEADDR_ENV "STACKSHOT_TEST_DYLDADDR"
465T_HELPER_DECL(spawn_reslide_child, "child process to spawn with alternate slide")
466{
467	size_t shared_cache_len;
468	const void *addr, *prevaddr;
469	uintmax_t v;
470	char *endptr;
471
472	const char *cacheaddr_env = getenv(CACHEADDR_ENV);
473	T_QUIET; T_ASSERT_NOTNULL(cacheaddr_env, "getenv("CACHEADDR_ENV")");
474	errno = 0;
475	endptr = NULL;
476	v = strtoumax(cacheaddr_env, &endptr, 16);	/* read hex value */
477	T_WITH_ERRNO; T_QUIET; T_ASSERT_NE(v, 0l, "getenv(%s) = \"%s\" should be a non-zero hex number", CACHEADDR_ENV, cacheaddr_env);
478	T_QUIET; T_ASSERT_EQ(*endptr, 0, "getenv(%s) = \"%s\" endptr \"%s\" should be empty", CACHEADDR_ENV, cacheaddr_env, endptr);
479
480	prevaddr = (const void *)v;
481	addr = _dyld_get_shared_cache_range(&shared_cache_len);
482	T_QUIET; T_ASSERT_NOTNULL(addr, "shared cache address");
483
484	T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(getppid(), (addr == prevaddr) ? SIGUSR2 : SIGUSR1), "signaled parent to take stackshot");
485	for (;;) {
486		(void) pause();		/* parent will kill -9 us */
487	}
488}
489
490T_DECL(shared_cache_flags, "tests stackshot's task_ss_flags for the shared cache")
491{
492	posix_spawnattr_t		attr;
493	char *env_addr;
494	char path[PATH_MAX];
495	__block bool child_same_addr = false;
496
497	uint32_t path_size = sizeof(path);
498	T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
499	char *args[] = { path, "-n", "spawn_reslide_child", NULL };
500	pid_t pid;
501	size_t shared_cache_len;
502	const void *addr;
503
504	dispatch_source_t child_diffsig_src, child_samesig_src;
505	dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0);
506	T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "shared_cache child semaphore");
507
508	dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
509	T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
510
511	signal(SIGUSR1, SIG_IGN);
512	signal(SIGUSR2, SIG_IGN);
513	child_samesig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
514	T_QUIET; T_ASSERT_NOTNULL(child_samesig_src, "dispatch_source_create (child_samesig_src)");
515	child_diffsig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR2, 0, signal_processing_q);
516	T_QUIET; T_ASSERT_NOTNULL(child_diffsig_src, "dispatch_source_create (child_diffsig_src)");
517
518	/* child will signal us depending on if their addr is the same or different */
519	dispatch_source_set_event_handler(child_samesig_src, ^{ child_same_addr = false; dispatch_semaphore_signal(child_ready_sem); });
520	dispatch_source_set_event_handler(child_diffsig_src, ^{ child_same_addr = true; dispatch_semaphore_signal(child_ready_sem); });
521	dispatch_activate(child_samesig_src);
522	dispatch_activate(child_diffsig_src);
523
524	addr = _dyld_get_shared_cache_range(&shared_cache_len);
525	T_QUIET; T_ASSERT_NOTNULL(addr, "shared cache address");
526
527	T_QUIET; T_ASSERT_POSIX_SUCCESS(asprintf(&env_addr, "%p", addr), "asprintf of env_addr succeeded");
528	T_QUIET; T_ASSERT_POSIX_SUCCESS(setenv(CACHEADDR_ENV, env_addr, true), "setting "CACHEADDR_ENV" to %s", env_addr);
529
530	T_QUIET; T_ASSERT_POSIX_ZERO(posix_spawnattr_init(&attr), "posix_spawnattr_init");
531	T_QUIET; T_ASSERT_POSIX_ZERO(posix_spawnattr_setflags(&attr, _POSIX_SPAWN_RESLIDE), "posix_spawnattr_setflags");
532	int sp_ret = posix_spawn(&pid, path, NULL, &attr, args, environ);
533	T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", args[0], pid);
534
535	dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
536	T_LOG("received signal from child (%s), capturing stackshot", child_same_addr ? "same shared cache addr" : "different shared cache addr");
537
538	struct scenario scenario = {
539		.name = "shared_cache_flags",
540		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
541				| STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT
542				| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
543	};
544
545	take_stackshot(&scenario, false, ^( void *ssbuf, size_t sslen) {
546		int status;
547		/* First kill the child so we can reap it */
548		T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(pid, SIGKILL), "killing spawned process");
549		T_QUIET; T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on spawned child");
550		T_QUIET; T_ASSERT_EQ(!!WIFSIGNALED(status), 1, "waitpid status should be signalled");
551		T_QUIET; T_ASSERT_EQ(WTERMSIG(status), SIGKILL, "waitpid status should be SIGKILLed");
552
553		parse_stackshot(PARSE_STACKSHOT_SHAREDCACHE_FLAGS, ssbuf, sslen,
554			@{sharedcache_child_pid_key: @(pid), sharedcache_child_sameaddr_key: @(child_same_addr ? 1 : 0)});
555	});
556}
557
558T_DECL(transitioning_tasks, "test that stackshot contains transitioning task info", T_META_BOOTARGS_SET("enable_proc_exit_lpexit_spin=1"))
559{
560    int32_t sysctlValue = -1, numAttempts =0;
561    char path[PATH_MAX];
562    uint32_t path_size = sizeof(path);
563    T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
564    char *args[] = { path, "-n", "exec_child_preexec", NULL };
565
566    dispatch_source_t child_sig_src;
567    dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0);
568    T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "exec child semaphore");
569
570    dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
571    T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
572
573    pid_t pid;
574
575    signal(SIGUSR1, SIG_IGN);
576    child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
577    T_QUIET; T_ASSERT_NOTNULL(child_sig_src, "dispatch_source_create (child_sig_src)");
578
579    dispatch_source_set_event_handler(child_sig_src, ^{ dispatch_semaphore_signal(child_ready_sem); });
580    dispatch_activate(child_sig_src);
581
582    T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.proc_exit_lpexit_spin_pid", NULL, NULL, &sysctlValue, sizeof(sysctlValue)), "set debug.proc_exit_lpexit_spin_pid=-1");
583
584    int proc_exit_spin_pos = 0 ;
585
586    while (0 == sysctlbyname("debug.proc_exit_lpexit_spin_pos", NULL, NULL, &proc_exit_spin_pos, sizeof(proc_exit_spin_pos))) {
587
588        T_LOG(" ##### Testing while spinning in proc_exit at position %d ##### ", proc_exit_spin_pos);
589
590        int sp_ret = posix_spawn(&pid, args[0], NULL, NULL, args, NULL);
591        T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", args[0], pid);
592
593        dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
594
595        struct proc_uniqidentifierinfo proc_info_data = { };
596        int retval = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 0, &proc_info_data, sizeof(proc_info_data));
597        T_QUIET; T_EXPECT_POSIX_SUCCESS(retval, "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO");
598        T_QUIET; T_ASSERT_EQ_INT(retval, (int) sizeof(proc_info_data), "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO returned data");
599
600        T_ASSERT_POSIX_SUCCESS(kill(pid, SIGUSR1), "signaled pre-exec child to exec");
601
602        dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
603
604        T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.proc_exit_lpexit_spin_pid", NULL, NULL, &pid, sizeof(pid)), "set debug.proc_exit_lpexit_spin_pid =  %d, ", pid);
605
606        T_ASSERT_POSIX_SUCCESS(kill(pid, SIGKILL), "kill post-exec child %d", pid);
607
608        sysctlValue = 0;
609        size_t len = sizeof(sysctlValue);
610        while (numAttempts < 5) {
611            T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.proc_exit_lpexit_spinning", &sysctlValue, &len, NULL, 0), "retrieve debug.proc_exit_lpexit_spinning");
612            if (sysctlValue != 1) numAttempts++;
613            else break;
614            sleep(1);
615        }
616
617        T_ASSERT_EQ_UINT(sysctlValue, 1, "find spinning task in proc_exit()");
618
619        struct scenario scenario = {
620            .name = "transitioning_tasks",
621            .flags = (STACKSHOT_KCDATA_FORMAT)
622        };
623
624        take_stackshot(&scenario, false, ^( void *ssbuf, size_t sslen) {
625            parse_stackshot(PARSE_STACKSHOT_TRANSITIONING, ssbuf, sslen, @{transitioning_pid_key: @(pid)});
626
627            // Kill the child
628            int sysctlValueB = -1;
629            T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.proc_exit_lpexit_spin_pid", NULL, NULL, &sysctlValueB, sizeof(sysctlValueB)), "set debug.proc_exit_lpexit_spin_pid=-1");
630            sleep(1);
631            size_t blen = sizeof(sysctlValueB);
632            T_ASSERT_POSIX_SUCCESS(sysctlbyname("debug.proc_exit_lpexit_spinning", &sysctlValueB, &blen, NULL, 0), "retrieve debug.proc_exit_lpexit_spinning");
633            T_ASSERT_EQ_UINT(sysctlValueB, 0, "make sure nothing is spining in proc_exit()");
634            int status;
635            T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on post-exec child");
636        });
637
638        proc_exit_spin_pos++;
639    }
640
641}
642
643static void *stuck_sysctl_thread(void *arg) {
644	int val = 1;
645	dispatch_semaphore_t child_thread_started = *(dispatch_semaphore_t *)arg;
646
647	dispatch_semaphore_signal(child_thread_started);
648	T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.wedge_thread", NULL, NULL, &val, sizeof(val)), "wedge child thread");
649
650	return NULL;
651}
652
653T_HELPER_DECL(zombie_child, "child process to sample as a zombie")
654{
655	pthread_t pthread;
656	dispatch_semaphore_t child_thread_started = dispatch_semaphore_create(0);
657	T_QUIET; T_ASSERT_NOTNULL(child_thread_started, "zombie child thread semaphore");
658
659	/* spawn another thread to get stuck in the kernel, then call exit() to become a zombie */
660	T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_create(&pthread, NULL, stuck_sysctl_thread, &child_thread_started), "pthread_create");
661
662	dispatch_semaphore_wait(child_thread_started, DISPATCH_TIME_FOREVER);
663
664	/* sleep for a bit in the hope of ensuring that the other thread has called the sysctl before we signal the parent */
665	usleep(100);
666	T_ASSERT_POSIX_SUCCESS(kill(getppid(), SIGUSR1), "signaled parent to take stackshot");
667
668	exit(0);
669}
670
671T_DECL(zombie, "tests a stackshot of a zombie task with a thread stuck in the kernel")
672{
673	char path[PATH_MAX];
674	uint32_t path_size = sizeof(path);
675	T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
676	char *args[] = { path, "-n", "zombie_child", NULL };
677
678	dispatch_source_t child_sig_src;
679	dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0);
680	T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "zombie child semaphore");
681
682	dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
683	T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
684
685	pid_t pid;
686
687	T_LOG("spawning a child");
688
689	signal(SIGUSR1, SIG_IGN);
690	child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
691	T_QUIET; T_ASSERT_NOTNULL(child_sig_src, "dispatch_source_create (child_sig_src)");
692
693	dispatch_source_set_event_handler(child_sig_src, ^{ dispatch_semaphore_signal(child_ready_sem); });
694	dispatch_activate(child_sig_src);
695
696	int sp_ret = posix_spawn(&pid, args[0], NULL, NULL, args, NULL);
697	T_QUIET; T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", args[0], pid);
698
699	dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
700
701	T_LOG("received signal from child, capturing stackshot");
702
703	struct proc_bsdshortinfo bsdshortinfo;
704	int retval, iterations_to_wait = 10;
705
706	while (iterations_to_wait > 0) {
707		retval = proc_pidinfo(pid, PROC_PIDT_SHORTBSDINFO, 0, &bsdshortinfo, sizeof(bsdshortinfo));
708		if ((retval == 0) && errno == ESRCH) {
709			T_LOG("unable to find child using proc_pidinfo, assuming zombie");
710			break;
711		}
712
713		T_QUIET; T_WITH_ERRNO; T_ASSERT_GT(retval, 0, "proc_pidinfo(PROC_PIDT_SHORTBSDINFO) returned a value > 0");
714		T_QUIET; T_ASSERT_EQ(retval, (int)sizeof(bsdshortinfo), "proc_pidinfo call for PROC_PIDT_SHORTBSDINFO returned expected size");
715
716		if (bsdshortinfo.pbsi_flags & PROC_FLAG_INEXIT) {
717			T_LOG("child proc info marked as in exit");
718			break;
719		}
720
721		iterations_to_wait--;
722		if (iterations_to_wait == 0) {
723			/*
724			 * This will mark the test as failed but let it continue so we
725			 * don't leave a process stuck in the kernel.
726			 */
727			T_FAIL("unable to discover that child is marked as exiting");
728		}
729
730		/* Give the child a few more seconds to make it to exit */
731		sleep(5);
732	}
733
734	/* Give the child some more time to make it through exit */
735	sleep(10);
736
737	struct scenario scenario = {
738		.name = "zombie",
739		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
740				| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
741	};
742
743	take_stackshot(&scenario, false, ^( void *ssbuf, size_t sslen) {
744		/* First unwedge the child so we can reap it */
745		int val = 1, status;
746		T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.unwedge_thread", NULL, NULL, &val, sizeof(val)), "unwedge child");
747
748		T_QUIET; T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on zombie child");
749
750		parse_stackshot(PARSE_STACKSHOT_ZOMBIE, ssbuf, sslen, @{zombie_child_pid_key: @(pid)});
751	});
752}
753
754T_HELPER_DECL(exec_child_preexec, "child process pre-exec")
755{
756	dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
757	T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
758
759	signal(SIGUSR1, SIG_IGN);
760	dispatch_source_t parent_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
761	T_QUIET; T_ASSERT_NOTNULL(parent_sig_src, "dispatch_source_create (child_sig_src)");
762	dispatch_source_set_event_handler(parent_sig_src, ^{
763
764		// Parent took a timestamp then signaled us: exec into the next process
765
766		char path[PATH_MAX];
767		uint32_t path_size = sizeof(path);
768		T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
769		char *args[] = { path, "-n", "exec_child_postexec", NULL };
770
771		T_QUIET; T_ASSERT_POSIX_ZERO(execve(args[0], args, NULL), "execing into exec_child_postexec");
772	});
773	dispatch_activate(parent_sig_src);
774
775	T_ASSERT_POSIX_SUCCESS(kill(getppid(), SIGUSR1), "signaled parent to take timestamp");
776
777	sleep(100);
778	// Should never get here
779	T_FAIL("Received signal to exec from parent");
780}
781
782T_HELPER_DECL(exec_child_postexec, "child process post-exec to sample")
783{
784	T_ASSERT_POSIX_SUCCESS(kill(getppid(), SIGUSR1), "signaled parent to take stackshot");
785	sleep(100);
786	// Should never get here
787	T_FAIL("Killed by parent");
788}
789
790T_DECL(exec, "test getting full task snapshots for a task that execs")
791{
792	char path[PATH_MAX];
793	uint32_t path_size = sizeof(path);
794	T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
795	char *args[] = { path, "-n", "exec_child_preexec", NULL };
796
797	dispatch_source_t child_sig_src;
798	dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0);
799	T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "exec child semaphore");
800
801	dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
802	T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
803
804	pid_t pid;
805
806	T_LOG("spawning a child");
807
808	signal(SIGUSR1, SIG_IGN);
809	child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
810	T_QUIET; T_ASSERT_NOTNULL(child_sig_src, "dispatch_source_create (child_sig_src)");
811
812	dispatch_source_set_event_handler(child_sig_src, ^{ dispatch_semaphore_signal(child_ready_sem); });
813	dispatch_activate(child_sig_src);
814
815	int sp_ret = posix_spawn(&pid, args[0], NULL, NULL, args, NULL);
816	T_QUIET; T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", args[0], pid);
817
818	dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
819	uint64_t start_time = mach_absolute_time();
820
821	struct proc_uniqidentifierinfo proc_info_data = { };
822	int retval = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 0, &proc_info_data, sizeof(proc_info_data));
823	T_QUIET; T_EXPECT_POSIX_SUCCESS(retval, "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO");
824	T_QUIET; T_ASSERT_EQ_INT(retval, (int) sizeof(proc_info_data), "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO returned data");
825	uint64_t unique_pid = proc_info_data.p_uniqueid;
826
827	T_LOG("received signal from pre-exec child, unique_pid is %llu, timestamp is %llu", unique_pid, start_time);
828
829	T_ASSERT_POSIX_SUCCESS(kill(pid, SIGUSR1), "signaled pre-exec child to exec");
830
831	dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
832
833	T_LOG("received signal from post-exec child, capturing stackshot");
834
835	struct scenario scenario = {
836		.name = "exec",
837		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
838				  | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT
839				  | STACKSHOT_COLLECT_DELTA_SNAPSHOT),
840		.since_timestamp = start_time
841	};
842
843	take_stackshot(&scenario, false, ^( void *ssbuf, size_t sslen) {
844		// Kill the child
845		int status;
846		T_ASSERT_POSIX_SUCCESS(kill(pid, SIGKILL), "kill post-exec child %d", pid);
847		T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on post-exec child");
848
849		parse_stackshot(PARSE_STACKSHOT_POSTEXEC | PARSE_STACKSHOT_DELTA, ssbuf, sslen, @{postexec_child_unique_pid_key: @(unique_pid)});
850	});
851}
852
853T_DECL(exec_inprogress, "test stackshots of processes in the middle of exec")
854{
855	pid_t pid;
856	/* a BASH quine which execs itself as long as the parent doesn't exit */
857        char *bash_prog = "[[ $PPID -ne 1 ]] && exec /bin/bash -c \"$0\" \"$0\"";
858	char *args[] = { "/bin/bash", "-c", bash_prog, bash_prog, NULL };
859
860	posix_spawnattr_t sattr;
861	T_ASSERT_POSIX_ZERO(posix_spawnattr_init(&sattr), "posix_spawnattr_init");
862	T_ASSERT_POSIX_ZERO(posix_spawn(&pid, args[0], NULL, &sattr, args, NULL), "spawn exec_inprogress_child");
863
864	struct scenario scenario = {
865		.name = "exec_inprogress",
866		.flags = (STACKSHOT_KCDATA_FORMAT),
867		.target_pid = pid,
868	};
869
870	int tries = 0;
871	int tries_limit = 30;
872	__block bool found = false;
873	__block uint64_t cid1 = 0, cid2 = 0;
874
875	for (tries = 0; !found && tries < tries_limit; tries++) {
876		take_stackshot(&scenario, false,
877		    ^( void *ssbuf, size_t sslen) {
878			parse_stackshot(PARSE_STACKSHOT_EXEC_INPROGRESS,
879			    ssbuf, sslen, @{
880				exec_inprogress_pid_key: @(pid),
881				exec_inprogress_found_key: ^(uint64_t id1, uint64_t id2) { found = true; cid1 = id1; cid2 = id2; }});
882		});
883	}
884	T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(pid, SIGKILL), "killing exec loop");
885	T_ASSERT_TRUE(found, "able to find our execing process mid-exec in %d tries", tries);
886	T_ASSERT_NE(cid1, cid2, "container IDs for in-progress exec are unique");
887	T_PASS("found mid-exec process in %d tries", tries);
888}
889
890#ifdef _LP64
891#if __has_feature(ptrauth_calls)
892#define __ptrauth_swift_async_context_parent \
893  __ptrauth(ptrauth_key_process_independent_data, 1, 0xbda2)
894#define __ptrauth_swift_async_context_resume \
895  __ptrauth(ptrauth_key_function_pointer, 1, 0xd707)
896#else
897#define __ptrauth_swift_async_context_parent
898#define __ptrauth_swift_async_context_resume
899#endif
900// Add 1 to match the symbolication aid added by the stackshot backtracer.
901#define asyncstack_frame(x) ((uintptr_t)(void *)ptrauth_strip((void *)(x), ptrauth_key_function_pointer) + 1)
902
903// This struct fakes the Swift AsyncContext struct which is used by
904// the Swift concurrency runtime. We only care about the first 2 fields.
905struct fake_async_context {
906	struct fake_async_context* __ptrauth_swift_async_context_parent next;
907	void(*__ptrauth_swift_async_context_resume resume_pc)(void);
908};
909
910static void
911level1_func()
912{
913}
914static void
915level2_func()
916{
917}
918
919// Create a chain of fake async contexts; sync with asyncstack_expected_stack below
920static alignas(16) struct fake_async_context level1 = { 0, level1_func };
921static alignas(16) struct fake_async_context level2 = { &level1, level2_func };
922
923struct async_test_semaphores {
924	dispatch_semaphore_t child_ready_sem;	/* signal parent we're ready */
925	dispatch_semaphore_t child_exit_sem;	/* parent tells us to go away */
926};
927
928#define	ASYNCSTACK_THREAD_NAME "asyncstack_thread"
929
930static void __attribute__((noinline, not_tail_called))
931expect_asyncstack(void *arg)
932{
933	struct async_test_semaphores *async_ts = arg;
934
935	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_setname_np(ASYNCSTACK_THREAD_NAME),
936	     "set thread name to %s", ASYNCSTACK_THREAD_NAME);
937
938	/* Tell the main thread we're all set up, then wait for permission to exit */
939	dispatch_semaphore_signal(async_ts->child_ready_sem);
940	dispatch_semaphore_wait(async_ts->child_exit_sem, DISPATCH_TIME_FOREVER);
941	usleep(1);	/* make sure we don't tailcall semaphore_wait */
942}
943
944static void *
945asyncstack_thread(void *arg)
946{
947	uint64_t *fp = __builtin_frame_address(0);
948	// We cannot use a variable of pointer type, because this ABI is valid
949	// on arm64_32 where pointers are 32bits, but the context pointer will
950	// still be stored in a 64bits slot on the stack.
951#if __has_feature(ptrauth_calls)
952#define __stack_context_auth __ptrauth(ptrauth_key_process_dependent_data, 1, \
953	        0xc31a)
954	struct fake_async_context * __stack_context_auth ctx = &level2;
955#else // __has_feature(ptrauth_calls)
956	/* struct fake_async_context * */uint64_t ctx  = (uintptr_t)&level2;
957#endif // !__has_feature(ptrauth_calls)
958
959	// The signature of an async frame on the OS stack is:
960	// [ <AsyncContext address>, <Saved FP | (1<<60)>, <return address> ]
961	// The Async context must be right before the saved FP on the stack. This
962	// should happen naturally in an optimized build as it is the only
963	// variable on the stack.
964	// This function cannot use T_ASSERT_* becuse it changes the stack
965	// layout.
966	assert((uintptr_t)fp - (uintptr_t)&ctx == 8);
967
968	// Modify the saved FP on the stack to include the async frame marker
969	*fp |= (0x1ULL << 60);
970	expect_asyncstack(arg);
971	return NULL;
972}
973
974T_DECL(asyncstack, "test swift async stack entries")
975{
976	struct scenario scenario = {
977		.name = "asyncstack",
978		.flags = STACKSHOT_KCDATA_FORMAT | STACKSHOT_SAVE_LOADINFO,
979	};
980	struct async_test_semaphores async_ts = {
981	    .child_ready_sem = dispatch_semaphore_create(0),
982	    .child_exit_sem = dispatch_semaphore_create(0),
983	};
984	T_QUIET; T_ASSERT_NOTNULL(async_ts.child_ready_sem, "child_ready_sem alloc");
985	T_QUIET; T_ASSERT_NOTNULL(async_ts.child_exit_sem, "child_exit_sem alloc");
986
987	pthread_t pthread;
988	__block uint64_t threadid = 0;
989	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&pthread, NULL, asyncstack_thread, &async_ts), "pthread_create");
990	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_threadid_np(pthread, &threadid), "pthread_threadid_np");
991
992	dispatch_semaphore_wait(async_ts.child_ready_sem, DISPATCH_TIME_FOREVER);
993
994	take_stackshot(&scenario, true, ^( void *ssbuf, size_t sslen) {
995		parse_stackshot(PARSE_STACKSHOT_ASYNCSTACK, ssbuf, sslen, @{
996		    asyncstack_expected_threadid_key: @(threadid),
997		       asyncstack_expected_stack_key: @[ @(asyncstack_frame(level2_func)), @(asyncstack_frame(level1_func)) ],
998		});
999	});
1000
1001	dispatch_semaphore_signal(async_ts.child_exit_sem);
1002	T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(pthread, NULL), "wait for thread");
1003
1004}
1005#endif
1006
1007static uint32_t
1008get_user_promotion_basepri(void)
1009{
1010	mach_msg_type_number_t count = THREAD_POLICY_STATE_COUNT;
1011	struct thread_policy_state thread_policy;
1012	boolean_t get_default = FALSE;
1013	mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
1014
1015	kern_return_t kr = thread_policy_get(thread_port, THREAD_POLICY_STATE,
1016	    (thread_policy_t)&thread_policy, &count, &get_default);
1017	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_policy_get");
1018	return thread_policy.thps_user_promotion_basepri;
1019}
1020
1021static int
1022get_pri(thread_t thread_port)
1023{
1024	kern_return_t kr;
1025
1026	thread_extended_info_data_t extended_info;
1027	mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
1028	kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
1029	    (thread_info_t)&extended_info, &count);
1030
1031	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
1032
1033	return extended_info.pth_curpri;
1034}
1035
1036
1037T_DECL(turnstile_singlehop, "turnstile single hop test")
1038{
1039	dispatch_queue_t dq1, dq2;
1040	dispatch_semaphore_t sema_x;
1041	dispatch_queue_attr_t dq1_attr, dq2_attr;
1042	__block qos_class_t main_qos = 0;
1043	__block int main_relpri = 0, main_relpri2 = 0, main_afterpri = 0;
1044	struct scenario scenario = {
1045		.name = "turnstile_singlehop",
1046		.flags = (STACKSHOT_THREAD_WAITINFO | STACKSHOT_KCDATA_FORMAT),
1047	};
1048	dq1_attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, QOS_CLASS_UTILITY, 0);
1049	dq2_attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INITIATED, 0);
1050	pthread_mutex_t lock_a = PTHREAD_MUTEX_INITIALIZER;
1051	pthread_mutex_t lock_b = PTHREAD_MUTEX_INITIALIZER;
1052
1053	pthread_mutex_t *lockap = &lock_a, *lockbp = &lock_b;
1054
1055	dq1 = dispatch_queue_create("q1", dq1_attr);
1056	dq2 = dispatch_queue_create("q2", dq2_attr);
1057	sema_x = dispatch_semaphore_create(0);
1058
1059	pthread_mutex_lock(lockap);
1060	dispatch_async(dq1, ^{
1061		pthread_mutex_lock(lockbp);
1062		T_ASSERT_POSIX_SUCCESS(pthread_get_qos_class_np(pthread_self(), &main_qos, &main_relpri), "get qos class");
1063		T_LOG("The priority of q1 is %d\n", get_pri(mach_thread_self()));
1064		dispatch_semaphore_signal(sema_x);
1065		pthread_mutex_lock(lockap);
1066	});
1067	dispatch_semaphore_wait(sema_x, DISPATCH_TIME_FOREVER);
1068
1069	T_LOG("Async1 completed");
1070
1071	pthread_set_qos_class_self_np(QOS_CLASS_UTILITY, 0);
1072	T_ASSERT_POSIX_SUCCESS(pthread_get_qos_class_np(pthread_self(), &main_qos, &main_relpri), "get qos class");
1073	T_LOG("The priority of main is %d\n", get_pri(mach_thread_self()));
1074	main_relpri = get_pri(mach_thread_self());
1075
1076	dispatch_async(dq2, ^{
1077		T_ASSERT_POSIX_SUCCESS(pthread_get_qos_class_np(pthread_self(), &main_qos, &main_relpri2), "get qos class");
1078		T_LOG("The priority of q2 is %d\n", get_pri(mach_thread_self()));
1079		dispatch_semaphore_signal(sema_x);
1080		pthread_mutex_lock(lockbp);
1081	});
1082	dispatch_semaphore_wait(sema_x, DISPATCH_TIME_FOREVER);
1083
1084	T_LOG("Async2 completed");
1085
1086	while (1) {
1087		main_afterpri = (int) get_user_promotion_basepri();
1088		if (main_relpri != main_afterpri) {
1089			T_LOG("Success with promotion pri is %d", main_afterpri);
1090			break;
1091		}
1092
1093		usleep(100);
1094	}
1095
1096	take_stackshot(&scenario, true, ^( void *ssbuf, size_t sslen) {
1097		parse_stackshot(PARSE_STACKSHOT_TURNSTILEINFO, ssbuf, sslen, nil);
1098	});
1099}
1100
1101
1102static void
1103expect_instrs_cycles_in_stackshot(void *ssbuf, size_t sslen)
1104{
1105	kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
1106
1107	bool in_task = false;
1108	bool in_thread = false;
1109	bool saw_instrs_cycles = false;
1110	iter = kcdata_iter_next(iter);
1111
1112	KCDATA_ITER_FOREACH(iter) {
1113		switch (kcdata_iter_type(iter)) {
1114		case KCDATA_TYPE_CONTAINER_BEGIN:
1115			switch (kcdata_iter_container_type(iter)) {
1116			case STACKSHOT_KCCONTAINER_TASK:
1117				in_task = true;
1118				saw_instrs_cycles = false;
1119				break;
1120
1121			case STACKSHOT_KCCONTAINER_THREAD:
1122				in_thread = true;
1123				saw_instrs_cycles = false;
1124				break;
1125
1126			default:
1127				break;
1128			}
1129			break;
1130
1131		case STACKSHOT_KCTYPE_INSTRS_CYCLES:
1132			saw_instrs_cycles = true;
1133			break;
1134
1135		case KCDATA_TYPE_CONTAINER_END:
1136			if (in_thread) {
1137				T_QUIET; T_EXPECT_TRUE(saw_instrs_cycles,
1138						"saw instructions and cycles in thread");
1139				in_thread = false;
1140			} else if (in_task) {
1141				T_QUIET; T_EXPECT_TRUE(saw_instrs_cycles,
1142						"saw instructions and cycles in task");
1143				in_task = false;
1144			}
1145
1146		default:
1147			break;
1148		}
1149	}
1150}
1151
1152static void
1153skip_if_monotonic_unsupported(void)
1154{
1155	int supported = 0;
1156	size_t supported_size = sizeof(supported);
1157	int ret = sysctlbyname("kern.monotonic.supported", &supported,
1158			&supported_size, 0, 0);
1159	if (ret < 0 || !supported) {
1160		T_SKIP("monotonic is unsupported");
1161	}
1162}
1163
1164T_DECL(instrs_cycles, "test a getting instructions and cycles in stackshot")
1165{
1166	skip_if_monotonic_unsupported();
1167
1168	struct scenario scenario = {
1169		.name = "instrs-cycles",
1170		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_INSTRS_CYCLES
1171				| STACKSHOT_KCDATA_FORMAT),
1172	};
1173
1174	T_LOG("attempting to take stackshot with instructions and cycles");
1175	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1176		parse_stackshot(0, ssbuf, sslen, nil);
1177		expect_instrs_cycles_in_stackshot(ssbuf, sslen);
1178	});
1179}
1180
1181T_DECL(delta_instrs_cycles,
1182		"test delta stackshots with instructions and cycles")
1183{
1184	skip_if_monotonic_unsupported();
1185
1186	struct scenario scenario = {
1187		.name = "delta-instrs-cycles",
1188		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_INSTRS_CYCLES
1189				| STACKSHOT_KCDATA_FORMAT),
1190	};
1191
1192	T_LOG("taking full stackshot");
1193	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1194		uint64_t stackshot_time = stackshot_timestamp(ssbuf, sslen);
1195
1196		T_LOG("taking delta stackshot since time %" PRIu64, stackshot_time);
1197
1198		parse_stackshot(0, ssbuf, sslen, nil);
1199		expect_instrs_cycles_in_stackshot(ssbuf, sslen);
1200
1201		struct scenario delta_scenario = {
1202			.name = "delta-instrs-cycles-next",
1203			.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_INSTRS_CYCLES
1204					| STACKSHOT_KCDATA_FORMAT
1205					| STACKSHOT_COLLECT_DELTA_SNAPSHOT),
1206			.since_timestamp = stackshot_time,
1207		};
1208
1209		take_stackshot(&delta_scenario, false, ^(void *dssbuf, size_t dsslen) {
1210			parse_stackshot(PARSE_STACKSHOT_DELTA, dssbuf, dsslen, nil);
1211			expect_instrs_cycles_in_stackshot(dssbuf, dsslen);
1212		});
1213	});
1214}
1215
1216static void
1217check_thread_groups_supported()
1218{
1219	int err;
1220	int supported = 0;
1221	size_t supported_size = sizeof(supported);
1222	err = sysctlbyname("kern.thread_groups_supported", &supported, &supported_size, NULL, 0);
1223
1224	if (err || !supported)
1225		T_SKIP("thread groups not supported on this system");
1226}
1227
1228T_DECL(thread_groups, "test getting thread groups in stackshot")
1229{
1230	check_thread_groups_supported();
1231
1232	struct scenario scenario = {
1233		.name = "thread-groups",
1234		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_THREAD_GROUP
1235				| STACKSHOT_KCDATA_FORMAT),
1236	};
1237
1238	T_LOG("attempting to take stackshot with thread group flag");
1239	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1240		parse_thread_group_stackshot(ssbuf, sslen);
1241	});
1242}
1243
1244static void
1245parse_page_table_asid_stackshot(void **ssbuf, size_t sslen)
1246{
1247	bool seen_asid = false;
1248	bool seen_page_table_snapshot = false;
1249	kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
1250	T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT,
1251			"buffer provided is a stackshot");
1252
1253	iter = kcdata_iter_next(iter);
1254	KCDATA_ITER_FOREACH(iter) {
1255		switch (kcdata_iter_type(iter)) {
1256		case KCDATA_TYPE_ARRAY: {
1257			T_QUIET;
1258			T_ASSERT_TRUE(kcdata_iter_array_valid(iter),
1259					"checked that array is valid");
1260
1261			if (kcdata_iter_array_elem_type(iter) != STACKSHOT_KCTYPE_PAGE_TABLES) {
1262				continue;
1263			}
1264
1265			T_ASSERT_FALSE(seen_page_table_snapshot, "check that we haven't yet seen a page table snapshot");
1266			seen_page_table_snapshot = true;
1267
1268			T_ASSERT_EQ((size_t) kcdata_iter_array_elem_size(iter), sizeof(uint64_t),
1269				"check that each element of the pagetable dump is the expected size");
1270
1271			uint64_t *pt_array = kcdata_iter_payload(iter);
1272			uint32_t elem_count = kcdata_iter_array_elem_count(iter);
1273			uint32_t j;
1274			bool nonzero_tte = false;
1275			for (j = 0; j < elem_count;) {
1276				T_QUIET; T_ASSERT_LE(j + 4, elem_count, "check for valid page table segment header");
1277				uint64_t pa = pt_array[j];
1278				uint64_t num_entries = pt_array[j + 1];
1279				uint64_t start_va = pt_array[j + 2];
1280				uint64_t end_va = pt_array[j + 3];
1281
1282				T_QUIET; T_ASSERT_NE(pa, (uint64_t) 0, "check that the pagetable physical address is non-zero");
1283				T_QUIET; T_ASSERT_EQ(pa % (num_entries * sizeof(uint64_t)), (uint64_t) 0, "check that the pagetable physical address is correctly aligned");
1284				T_QUIET; T_ASSERT_NE(num_entries, (uint64_t) 0, "check that a pagetable region has more than 0 entries");
1285				T_QUIET; T_ASSERT_LE(j + 4 + num_entries, (uint64_t) elem_count, "check for sufficient space in page table array");
1286				T_QUIET; T_ASSERT_GT(end_va, start_va, "check for valid VA bounds in page table segment header");
1287
1288				for (uint32_t k = j + 4; k < (j + 4 + num_entries); ++k) {
1289					if (pt_array[k] != 0) {
1290						nonzero_tte = true;
1291						T_QUIET; T_ASSERT_EQ((pt_array[k] >> 48) & 0xf, (uint64_t) 0, "check that bits[48:51] of arm64 TTE are clear");
1292						// L0-L2 table and non-compressed L3 block entries should always have bit 1 set; assumes L0-L2 blocks will not be used outside the kernel
1293						bool table = ((pt_array[k] & 0x2) != 0);
1294						if (table) {
1295							T_QUIET; T_ASSERT_NE(pt_array[k] & ((1ULL << 48) - 1) & ~((1ULL << 12) - 1), (uint64_t) 0, "check that arm64 TTE physical address is non-zero");
1296						} else { // should be a compressed PTE
1297							T_QUIET; T_ASSERT_NE(pt_array[k] & 0xC000000000000000ULL, (uint64_t) 0, "check that compressed PTE has at least one of bits [63:62] set");
1298							T_QUIET; T_ASSERT_EQ(pt_array[k] & ~0xC000000000000000ULL, (uint64_t) 0, "check that compressed PTE has no other bits besides [63:62] set");
1299						}
1300					}
1301				}
1302
1303				j += (4 + num_entries);
1304			}
1305			T_ASSERT_TRUE(nonzero_tte, "check that we saw at least one non-empty TTE");
1306			T_ASSERT_EQ(j, elem_count, "check that page table dump size matches extent of last header");
1307			break;
1308		}
1309		case STACKSHOT_KCTYPE_ASID: {
1310			T_ASSERT_FALSE(seen_asid, "check that we haven't yet seen an ASID");
1311			seen_asid = true;
1312		}
1313		}
1314	}
1315	T_ASSERT_TRUE(seen_page_table_snapshot, "check that we have seen a page table snapshot");
1316	T_ASSERT_TRUE(seen_asid, "check that we have seen an ASID");
1317}
1318
1319T_DECL(dump_page_tables, "test stackshot page table dumping support")
1320{
1321	struct scenario scenario = {
1322		.name = "asid-page-tables",
1323		.flags = (STACKSHOT_KCDATA_FORMAT | STACKSHOT_ASID | STACKSHOT_PAGE_TABLES),
1324		.size_hint = (9ull << 20), // 9 MB
1325		.target_pid = getpid(),
1326		.maybe_unsupported = true,
1327		.maybe_enomem = true,
1328	};
1329
1330	T_LOG("attempting to take stackshot with ASID and page table flags");
1331	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1332		parse_page_table_asid_stackshot(ssbuf, sslen);
1333	});
1334}
1335
1336static void stackshot_verify_current_proc_uuid_info(void **ssbuf, size_t sslen, uint64_t expected_offset, const struct proc_uniqidentifierinfo *proc_info_data)
1337{
1338	const uuid_t *current_uuid = (const uuid_t *)(&proc_info_data->p_uuid);
1339
1340	kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
1341	T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer provided is a stackshot");
1342
1343	iter = kcdata_iter_next(iter);
1344
1345	KCDATA_ITER_FOREACH(iter) {
1346		switch (kcdata_iter_type(iter)) {
1347			case KCDATA_TYPE_ARRAY: {
1348				T_QUIET; T_ASSERT_TRUE(kcdata_iter_array_valid(iter), "checked that array is valid");
1349				if (kcdata_iter_array_elem_type(iter) == KCDATA_TYPE_LIBRARY_LOADINFO64) {
1350					struct user64_dyld_uuid_info *info = (struct user64_dyld_uuid_info *) kcdata_iter_payload(iter);
1351					if (uuid_compare(*current_uuid, info->imageUUID) == 0) {
1352						T_ASSERT_EQ(expected_offset, info->imageLoadAddress, "found matching UUID with matching binary offset");
1353						return;
1354					}
1355				} else if (kcdata_iter_array_elem_type(iter) == KCDATA_TYPE_LIBRARY_LOADINFO) {
1356					struct user32_dyld_uuid_info *info = (struct user32_dyld_uuid_info *) kcdata_iter_payload(iter);
1357					if (uuid_compare(*current_uuid, info->imageUUID) == 0) {
1358						T_ASSERT_EQ(expected_offset, ((uint64_t) info->imageLoadAddress),  "found matching UUID with matching binary offset");
1359						return;
1360					}
1361				}
1362				break;
1363			}
1364			default:
1365				break;
1366		}
1367	}
1368
1369	T_FAIL("failed to find matching UUID in stackshot data");
1370}
1371
1372T_DECL(translated, "tests translated bit is set correctly")
1373{
1374#if !(TARGET_OS_OSX && TARGET_CPU_ARM64)
1375	T_SKIP("Only valid on Apple silicon Macs")
1376#endif
1377	// Get path of stackshot_translated_child helper binary
1378	char path[PATH_MAX];
1379	uint32_t path_size = sizeof(path);
1380	T_QUIET; T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
1381	char* binary_name = strrchr(path, '/');
1382	if (binary_name) binary_name++;
1383	T_QUIET; T_ASSERT_NOTNULL(binary_name, "Find basename in path '%s'", path);
1384	strlcpy(binary_name, "stackshot_translated_child", path_size - (binary_name - path));
1385	char *args[] = { path, NULL };
1386
1387	dispatch_source_t child_sig_src;
1388	dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0);
1389	T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "exec child semaphore");
1390
1391	dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
1392	T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
1393
1394	signal(SIGUSR1, SIG_IGN);
1395	child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
1396	T_QUIET; T_ASSERT_NOTNULL(child_sig_src, "dispatch_source_create (child_sig_src)");
1397
1398	dispatch_source_set_event_handler(child_sig_src, ^{ dispatch_semaphore_signal(child_ready_sem); });
1399	dispatch_activate(child_sig_src);
1400
1401	// Spawn child
1402	pid_t pid;
1403	T_LOG("spawning translated child");
1404	T_QUIET; T_ASSERT_POSIX_ZERO(posix_spawn(&pid, args[0], NULL, NULL, args, NULL), "spawned process '%s' with PID %d", args[0], pid);
1405
1406	// Wait for the the child to spawn up
1407	dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
1408
1409	// Make sure the child is running and is translated
1410	int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, pid };
1411	struct kinfo_proc process_info;
1412	size_t bufsize = sizeof(process_info);
1413	T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctl(mib, (unsigned)(sizeof(mib)/sizeof(int)), &process_info, &bufsize, NULL, 0), "get translated child process info");
1414	T_QUIET; T_ASSERT_GT(bufsize, (size_t)0, "process info is not empty");
1415	T_QUIET; T_ASSERT_TRUE((process_info.kp_proc.p_flag & P_TRANSLATED), "KERN_PROC_PID reports child is translated");
1416
1417	T_LOG("capturing stackshot");
1418
1419	struct scenario scenario = {
1420		.name = "translated",
1421		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
1422				  | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
1423	};
1424
1425	take_stackshot(&scenario, true, ^( void *ssbuf, size_t sslen) {
1426		parse_stackshot(PARSE_STACKSHOT_TRANSLATED, ssbuf, sslen, @{translated_child_pid_key: @(pid)});
1427	});
1428
1429    // Kill the child
1430    int status;
1431    T_QUIET; T_ASSERT_POSIX_SUCCESS(kill(pid, SIGTERM), "kill translated child");
1432    T_QUIET; T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on translated child");
1433
1434}
1435
1436T_DECL(proc_uuid_info, "tests that the main binary UUID for a proc is always populated")
1437{
1438	struct proc_uniqidentifierinfo proc_info_data = { };
1439	mach_msg_type_number_t      count;
1440	kern_return_t               kernel_status;
1441	task_dyld_info_data_t       task_dyld_info;
1442	struct dyld_all_image_infos *target_infos;
1443	int retval;
1444	bool found_image_in_image_infos = false;
1445	uint64_t expected_mach_header_offset = 0;
1446
1447	/* Find the UUID of our main binary */
1448	retval = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 0, &proc_info_data, sizeof(proc_info_data));
1449	T_QUIET; T_EXPECT_POSIX_SUCCESS(retval, "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO");
1450	T_QUIET; T_ASSERT_EQ_INT(retval, (int) sizeof(proc_info_data), "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO returned data");
1451
1452	uuid_string_t str = {};
1453	uuid_unparse(*(uuid_t*)&proc_info_data.p_uuid, str);
1454	T_LOG("Found current UUID is %s", str);
1455
1456	/* Find the location of the dyld image info metadata */
1457	count = TASK_DYLD_INFO_COUNT;
1458	kernel_status = task_info(mach_task_self(), TASK_DYLD_INFO, (task_info_t)&task_dyld_info, &count);
1459	T_QUIET; T_ASSERT_EQ(kernel_status, KERN_SUCCESS, "retrieve task_info for TASK_DYLD_INFO");
1460
1461	target_infos = (struct dyld_all_image_infos *)task_dyld_info.all_image_info_addr;
1462
1463	/* Find our binary in the dyld image info array */
1464	for (int i = 0; i < (int) target_infos->uuidArrayCount; i++) {
1465		if (uuid_compare(target_infos->uuidArray[i].imageUUID, *(uuid_t*)&proc_info_data.p_uuid) == 0) {
1466			expected_mach_header_offset = (uint64_t) target_infos->uuidArray[i].imageLoadAddress;
1467			found_image_in_image_infos = true;
1468		}
1469	}
1470
1471	T_ASSERT_TRUE(found_image_in_image_infos, "found binary image in dyld image info list");
1472
1473	/* Overwrite the dyld image info data so the kernel has to fallback to the UUID stored in the proc structure */
1474	target_infos->uuidArrayCount = 0;
1475
1476	struct scenario scenario = {
1477		.name = "proc_uuid_info",
1478		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT),
1479		.target_pid = getpid(),
1480	};
1481
1482	T_LOG("attempting to take stackshot for current PID");
1483	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1484		stackshot_verify_current_proc_uuid_info(ssbuf, sslen, expected_mach_header_offset, &proc_info_data);
1485	});
1486}
1487
1488T_DECL(cseg_waitinfo, "test that threads stuck in the compressor report correct waitinfo")
1489{
1490	struct scenario scenario = {
1491		.name = "cseg_waitinfo",
1492		.quiet = false,
1493		.flags = (STACKSHOT_THREAD_WAITINFO | STACKSHOT_KCDATA_FORMAT),
1494	};
1495	__block uint64_t thread_id = 0;
1496
1497	dispatch_queue_t dq = dispatch_queue_create("com.apple.stackshot.cseg_waitinfo", NULL);
1498	dispatch_semaphore_t child_ok = dispatch_semaphore_create(0);
1499
1500	dispatch_async(dq, ^{
1501		pthread_threadid_np(NULL, &thread_id);
1502		dispatch_semaphore_signal(child_ok);
1503		int val = 1;
1504		T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.cseg_wedge_thread", NULL, NULL, &val, sizeof(val)), "wedge child thread");
1505	});
1506
1507	dispatch_semaphore_wait(child_ok, DISPATCH_TIME_FOREVER);
1508	sleep(1);
1509
1510	T_LOG("taking stackshot");
1511	take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1512		int val = 1;
1513		T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.cseg_unwedge_thread", NULL, NULL, &val, sizeof(val)), "unwedge child thread");
1514		parse_stackshot(PARSE_STACKSHOT_WAITINFO_CSEG, ssbuf, sslen, @{cseg_expected_threadid_key: @(thread_id)});
1515	});
1516}
1517
1518static void
1519srp_send(
1520	mach_port_t send_port,
1521	mach_port_t reply_port,
1522	mach_port_t msg_port)
1523{
1524	kern_return_t ret = 0;
1525
1526	struct test_msg {
1527		mach_msg_header_t header;
1528		mach_msg_body_t body;
1529		mach_msg_port_descriptor_t port_descriptor;
1530	};
1531	struct test_msg send_msg = {
1532		.header = {
1533			.msgh_remote_port = send_port,
1534			.msgh_local_port  = reply_port,
1535			.msgh_bits        = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND,
1536	    reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0,
1537	    MACH_MSG_TYPE_MOVE_SEND,
1538	    MACH_MSGH_BITS_COMPLEX),
1539			.msgh_id          = 0x100,
1540			.msgh_size        = sizeof(send_msg),
1541		},
1542		.body = {
1543			.msgh_descriptor_count = 1,
1544		},
1545		.port_descriptor = {
1546			.name        = msg_port,
1547			.disposition = MACH_MSG_TYPE_MOVE_RECEIVE,
1548			.type        = MACH_MSG_PORT_DESCRIPTOR,
1549		},
1550	};
1551
1552	if (msg_port == MACH_PORT_NULL) {
1553		send_msg.body.msgh_descriptor_count = 0;
1554	}
1555
1556	ret = mach_msg(&(send_msg.header),
1557	    MACH_SEND_MSG |
1558	    MACH_SEND_TIMEOUT |
1559	    MACH_SEND_OVERRIDE,
1560	    send_msg.header.msgh_size,
1561	    0,
1562	    MACH_PORT_NULL,
1563	    10000,
1564	    0);
1565
1566	T_ASSERT_MACH_SUCCESS(ret, "client mach_msg");
1567}
1568
1569T_HELPER_DECL(srp_client,
1570    "Client used for the special_reply_port test")
1571{
1572	pid_t ppid = getppid();
1573	dispatch_semaphore_t can_continue  = dispatch_semaphore_create(0);
1574	dispatch_queue_t dq = dispatch_queue_create("client_signalqueue", NULL);
1575	dispatch_source_t sig_src;
1576
1577	mach_msg_return_t mr;
1578	mach_port_t service_port;
1579	mach_port_t conn_port;
1580	mach_port_t special_reply_port;
1581	mach_port_options_t opts = {
1582		.flags = MPO_INSERT_SEND_RIGHT,
1583	};
1584
1585	signal(SIGUSR1, SIG_IGN);
1586	sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dq);
1587
1588	dispatch_source_set_event_handler(sig_src, ^{
1589			dispatch_semaphore_signal(can_continue);
1590	});
1591	dispatch_activate(sig_src);
1592
1593	/* lookup the mach service port for the parent */
1594	kern_return_t kr = bootstrap_look_up(bootstrap_port,
1595	    SRP_SERVICE_NAME, &service_port);
1596	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up");
1597
1598	/* create the send-once right (special reply port) and message to send to the server */
1599	kr = mach_port_construct(mach_task_self(), &opts, 0ull, &conn_port);
1600	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_port_construct");
1601
1602	special_reply_port = thread_get_special_reply_port();
1603	T_QUIET; T_ASSERT_TRUE(MACH_PORT_VALID(special_reply_port), "get_thread_special_reply_port");
1604
1605	/* send the message with the special reply port */
1606	srp_send(service_port, special_reply_port, conn_port);
1607
1608	/* signal the parent to continue */
1609	kill(ppid, SIGUSR1);
1610
1611	struct {
1612		mach_msg_header_t header;
1613		mach_msg_body_t body;
1614		mach_msg_port_descriptor_t port_descriptor;
1615	} rcv_msg = {
1616		.header =
1617		{
1618			.msgh_remote_port = MACH_PORT_NULL,
1619			.msgh_local_port  = special_reply_port,
1620			.msgh_size        = sizeof(rcv_msg),
1621		},
1622	};
1623
1624	/* wait on the reply from the parent (that we will never receive) */
1625	mr = mach_msg(&(rcv_msg.header),
1626			(MACH_RCV_MSG | MACH_RCV_SYNC_WAIT),
1627			0,
1628			rcv_msg.header.msgh_size,
1629			special_reply_port,
1630			MACH_MSG_TIMEOUT_NONE,
1631			service_port);
1632
1633	/* not expected to execute as parent will SIGKILL client... */
1634	T_LOG("client process exiting after sending message to parent (server)");
1635}
1636
1637enum srp_test_type {
1638	SRP_TEST_THREAD,	/* expect waiter on current thread */
1639	SRP_TEST_PID,		/* expect waiter on current PID */
1640	SRP_TEST_EITHER,	/* waiter could be on either */
1641};
1642
1643static void
1644check_srp_test(const char *name, enum srp_test_type ty)
1645{
1646	struct scenario scenario = {
1647		.name = name,
1648		.quiet = false,
1649		.flags = (STACKSHOT_THREAD_WAITINFO | STACKSHOT_KCDATA_FORMAT),
1650	};
1651	uint64_t thread_id = 0;
1652	pthread_threadid_np(NULL, &thread_id);
1653	if (ty == SRP_TEST_THREAD) {
1654		take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1655			parse_stackshot(PARSE_STACKSHOT_WAITINFO_SRP, ssbuf, sslen,
1656					@{srp_expected_threadid_key: @(thread_id)});
1657		});
1658	} else if (ty == SRP_TEST_PID) {
1659		take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1660			parse_stackshot(PARSE_STACKSHOT_WAITINFO_SRP, ssbuf, sslen,
1661					@{srp_expected_pid_key: @(getpid())});
1662		});
1663	} else {
1664		take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1665			parse_stackshot(PARSE_STACKSHOT_WAITINFO_SRP, ssbuf, sslen,
1666					@{srp_expected_pid_key: @(getpid()), srp_expected_threadid_key: @(thread_id)});
1667		});
1668	}
1669
1670}
1671
1672
1673/*
1674 * Tests the stackshot wait info plumbing for synchronous IPC that doesn't use kevent on the server.
1675 *
1676 * (part 1): tests the scenario where a client sends a request that includes a special reply port
1677 *           to a server that doesn't receive the message and doesn't copy the send-once right
1678 *           into its address space as a result. for this case the special reply port is enqueued
1679 *           in a port and we check which task has that receive right and use that info. (rdar://60440338)
1680 * (part 2): tests the scenario where a client sends a request that includes a special reply port
1681 *           to a server that receives the message and copies in the send-once right, but doesn't
1682 *           reply to the client. for this case the special reply port is copied out and the kernel
1683 *           stashes the info about which task copied out the send once right. (rdar://60440592)
1684 * (part 3): tests the same as part 2, but uses kevents, which allow for
1685 *           priority inheritance
1686 */
1687T_DECL(special_reply_port, "test that tasks using special reply ports have correct waitinfo")
1688{
1689	dispatch_semaphore_t can_continue  = dispatch_semaphore_create(0);
1690	dispatch_queue_t dq = dispatch_queue_create("signalqueue", NULL);
1691	dispatch_queue_t machdq = dispatch_queue_create("machqueue", NULL);
1692	dispatch_source_t sig_src;
1693	char path[PATH_MAX];
1694	uint32_t path_size = sizeof(path);
1695	T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
1696	char *client_args[] = { path, "-n", "srp_client", NULL };
1697	pid_t client_pid;
1698	int sp_ret;
1699	kern_return_t kr;
1700	mach_port_t port;
1701
1702	/* setup the signal handler in the parent (server) */
1703	T_LOG("setup sig handlers");
1704	signal(SIGUSR1, SIG_IGN);
1705	sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dq);
1706
1707	dispatch_source_set_event_handler(sig_src, ^{
1708			dispatch_semaphore_signal(can_continue);
1709	});
1710	dispatch_activate(sig_src);
1711
1712	/* register with the mach service name so the client can lookup and send a message to the parent (server) */
1713	T_LOG("Server about to check in");
1714	kr = bootstrap_check_in(bootstrap_port, SRP_SERVICE_NAME, &port);
1715	T_ASSERT_MACH_SUCCESS(kr, "server bootstrap_check_in");
1716
1717	T_LOG("Launching client");
1718	sp_ret = posix_spawn(&client_pid, client_args[0], NULL, NULL, client_args, NULL);
1719	T_QUIET; T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", client_args[0], client_pid);
1720	T_LOG("Spawned client as PID %d", client_pid);
1721
1722	dispatch_semaphore_wait(can_continue, DISPATCH_TIME_FOREVER);
1723	T_LOG("Ready to take stackshot, but waiting 1s for the coast to clear");
1724
1725	/*
1726	 * can_continue indicates the client has signaled us, but we want to make
1727	 * sure they've actually blocked sending their mach message.  It's cheesy, but
1728	 * sleep() works for this.
1729	 */
1730	sleep(1);
1731
1732	/*
1733	 * take the stackshot without calling receive to verify that the stackshot wait
1734	 * info shows our (the server) thread for the scenario where the server has yet to
1735	 * receive the message.
1736	 */
1737	T_LOG("Taking stackshot for part 1 coverage");
1738	check_srp_test("srp", SRP_TEST_THREAD);
1739
1740	/*
1741	 * receive the message from the client (which should copy the send once right into
1742	 * our address space).
1743	 */
1744	struct {
1745		mach_msg_header_t header;
1746		mach_msg_body_t body;
1747		mach_msg_port_descriptor_t port_descriptor;
1748	} rcv_msg = {
1749		.header =
1750		{
1751			.msgh_remote_port = MACH_PORT_NULL,
1752			.msgh_local_port  = port,
1753			.msgh_size        = sizeof(rcv_msg),
1754		},
1755	};
1756
1757	T_LOG("server: starting sync receive\n");
1758
1759	mach_msg_return_t mr;
1760	mr = mach_msg(&(rcv_msg.header),
1761			(MACH_RCV_MSG | MACH_RCV_TIMEOUT),
1762			0,
1763			4096,
1764			port,
1765			10000,
1766			MACH_PORT_NULL);
1767	T_QUIET; T_ASSERT_MACH_SUCCESS(mr, "mach_msg() recieve of message from client");
1768
1769	/*
1770	 * take the stackshot to verify that the stackshot wait info shows our (the server) PID
1771	 * for the scenario where the server has received the message and copied in the send-once right.
1772	 */
1773	T_LOG("Taking stackshot for part 2 coverage");
1774	check_srp_test("srp", SRP_TEST_PID);
1775
1776	/* cleanup - kill the client */
1777	T_ASSERT_POSIX_SUCCESS(kill(client_pid, SIGKILL), "killing client");
1778	T_ASSERT_POSIX_SUCCESS(waitpid(client_pid, NULL, 0), "waiting for the client to exit");
1779
1780	// do it again, but using kevents
1781	T_LOG("Launching client");
1782	sp_ret = posix_spawn(&client_pid, client_args[0], NULL, NULL, client_args, NULL);
1783	T_QUIET; T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", client_args[0], client_pid);
1784	T_LOG("Spawned client as PID %d", client_pid);
1785
1786	dispatch_semaphore_wait(can_continue, DISPATCH_TIME_FOREVER);
1787	T_LOG("Ready to take stackshot, but waiting 1s for the coast to clear");
1788
1789	/*
1790	 * can_continue indicates the client has signaled us, but we want to make
1791	 * sure they've actually blocked sending their mach message.  It's cheesy, but
1792	 * sleep() works for this.
1793	 */
1794	sleep(1);
1795
1796	dispatch_mach_t dispatch_mach = dispatch_mach_create(SRP_SERVICE_NAME, machdq,
1797	    ^(dispatch_mach_reason_t reason,
1798	      dispatch_mach_msg_t message,
1799	      mach_error_t error __unused) {
1800		switch (reason) {
1801		case DISPATCH_MACH_MESSAGE_RECEIVED: {
1802			size_t size = 0;
1803			mach_msg_header_t *msg __unused = dispatch_mach_msg_get_msg(message, &size);
1804			T_LOG("server: recieved %ld byte message", size);
1805			check_srp_test("turnstile_port_thread", SRP_TEST_THREAD);
1806			T_LOG("server: letting client go");
1807			// drop the message on the ground, we'll kill the client later
1808			dispatch_semaphore_signal(can_continue);
1809			break;
1810		}
1811		default:
1812			break;
1813		}
1814	});
1815
1816	dispatch_mach_connect(dispatch_mach, port, MACH_PORT_NULL, NULL);
1817
1818	dispatch_semaphore_wait(can_continue, DISPATCH_TIME_FOREVER);
1819
1820	/* cleanup - kill the client */
1821	T_ASSERT_POSIX_SUCCESS(kill(client_pid, SIGKILL), "killing client");
1822	T_ASSERT_POSIX_SUCCESS(waitpid(client_pid, NULL, 0), "waiting for the client to exit");
1823}
1824
1825#pragma mark performance tests
1826
1827#define SHOULD_REUSE_SIZE_HINT 0x01
1828#define SHOULD_USE_DELTA       0x02
1829#define SHOULD_TARGET_SELF     0x04
1830
1831static void
1832stackshot_perf(unsigned int options)
1833{
1834	struct scenario scenario = {
1835		.flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
1836			| STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
1837	};
1838
1839	dt_stat_t size = dt_stat_create("bytes", "size");
1840	dt_stat_time_t duration = dt_stat_time_create("duration");
1841	scenario.timer = duration;
1842
1843	if (options & SHOULD_TARGET_SELF) {
1844		scenario.target_pid = getpid();
1845	}
1846
1847	while (!dt_stat_stable(duration) || !dt_stat_stable(size)) {
1848		__block uint64_t last_time = 0;
1849		__block uint32_t size_hint = 0;
1850		take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1851			dt_stat_add(size, (double)sslen);
1852			last_time = stackshot_timestamp(ssbuf, sslen);
1853			size_hint = (uint32_t)sslen;
1854		});
1855		if (options & SHOULD_USE_DELTA) {
1856			scenario.since_timestamp = last_time;
1857			scenario.flags |= STACKSHOT_COLLECT_DELTA_SNAPSHOT;
1858		}
1859		if (options & SHOULD_REUSE_SIZE_HINT) {
1860			scenario.size_hint = size_hint;
1861		}
1862	}
1863
1864	dt_stat_finalize(duration);
1865	dt_stat_finalize(size);
1866}
1867
1868static void
1869stackshot_flag_perf_noclobber(uint64_t flag, char *flagname)
1870{
1871	struct scenario scenario = {
1872		.quiet = true,
1873		.flags = (flag | STACKSHOT_KCDATA_FORMAT),
1874	};
1875
1876	dt_stat_t duration = dt_stat_create("nanoseconds per thread", "%s_duration", flagname);
1877	dt_stat_t size = dt_stat_create("bytes per thread", "%s_size", flagname);
1878	T_LOG("Testing \"%s\" = 0x%" PRIx64, flagname, flag);
1879
1880	while (!dt_stat_stable(duration) || !dt_stat_stable(size)) {
1881		take_stackshot(&scenario, false, ^(void *ssbuf, size_t sslen) {
1882			kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
1883			unsigned long no_threads = 0;
1884			mach_timebase_info_data_t timebase = {0, 0};
1885			uint64_t stackshot_duration = 0;
1886			int found = 0;
1887			T_QUIET; T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, "stackshot buffer");
1888
1889			KCDATA_ITER_FOREACH(iter) {
1890				switch(kcdata_iter_type(iter)) {
1891					case STACKSHOT_KCTYPE_THREAD_SNAPSHOT: {
1892						found |= 1;
1893						no_threads ++;
1894						break;
1895					}
1896					case STACKSHOT_KCTYPE_STACKSHOT_DURATION: {
1897						struct stackshot_duration *ssd = kcdata_iter_payload(iter);
1898						stackshot_duration = ssd->stackshot_duration;
1899						found |= 2;
1900						break;
1901					}
1902					case KCDATA_TYPE_TIMEBASE: {
1903						found |= 4;
1904						mach_timebase_info_data_t *tb = kcdata_iter_payload(iter);
1905						memcpy(&timebase, tb, sizeof(timebase));
1906						break;
1907					}
1908				}
1909			}
1910
1911			T_QUIET; T_ASSERT_EQ(found, 0x7, "found everything needed");
1912
1913			uint64_t ns = (stackshot_duration * timebase.numer) / timebase.denom;
1914			uint64_t per_thread_ns = ns / no_threads;
1915			uint64_t per_thread_size = sslen / no_threads;
1916
1917			dt_stat_add(duration, per_thread_ns);
1918			dt_stat_add(size, per_thread_size);
1919		});
1920	}
1921
1922	dt_stat_finalize(duration);
1923	dt_stat_finalize(size);
1924}
1925
1926static void
1927stackshot_flag_perf(uint64_t flag, char *flagname)
1928{
1929	/*
1930	 * STACKSHOT_NO_IO_STATS disables data collection, so set it for
1931	 * more accurate perfdata collection.
1932	 */
1933	flag |= STACKSHOT_NO_IO_STATS;
1934
1935	stackshot_flag_perf_noclobber(flag, flagname);
1936}
1937
1938
1939T_DECL(flag_perf, "test stackshot performance with different flags set", T_META_TAG_PERF)
1940{
1941	stackshot_flag_perf_noclobber(STACKSHOT_NO_IO_STATS, "baseline");
1942	stackshot_flag_perf_noclobber(0, "io_stats");
1943
1944	stackshot_flag_perf(STACKSHOT_THREAD_WAITINFO, "thread_waitinfo");
1945	stackshot_flag_perf(STACKSHOT_GET_DQ, "get_dq");
1946	stackshot_flag_perf(STACKSHOT_SAVE_LOADINFO, "save_loadinfo");
1947	stackshot_flag_perf(STACKSHOT_GET_GLOBAL_MEM_STATS, "get_global_mem_stats");
1948	stackshot_flag_perf(STACKSHOT_SAVE_KEXT_LOADINFO, "save_kext_loadinfo");
1949	stackshot_flag_perf(STACKSHOT_SAVE_IMP_DONATION_PIDS, "save_imp_donation_pids");
1950	stackshot_flag_perf(STACKSHOT_ENABLE_BT_FAULTING, "enable_bt_faulting");
1951	stackshot_flag_perf(STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT, "collect_sharedcache_layout");
1952	stackshot_flag_perf(STACKSHOT_ENABLE_UUID_FAULTING, "enable_uuid_faulting");
1953	stackshot_flag_perf(STACKSHOT_THREAD_GROUP, "thread_group");
1954	stackshot_flag_perf(STACKSHOT_SAVE_JETSAM_COALITIONS, "save_jetsam_coalitions");
1955	stackshot_flag_perf(STACKSHOT_INSTRS_CYCLES, "instrs_cycles");
1956	stackshot_flag_perf(STACKSHOT_ASID, "asid");
1957}
1958
1959T_DECL(perf_no_size_hint, "test stackshot performance with no size hint",
1960		T_META_TAG_PERF)
1961{
1962	stackshot_perf(0);
1963}
1964
1965T_DECL(perf_size_hint, "test stackshot performance with size hint",
1966		T_META_TAG_PERF)
1967{
1968	stackshot_perf(SHOULD_REUSE_SIZE_HINT);
1969}
1970
1971T_DECL(perf_process, "test stackshot performance targeted at process",
1972		T_META_TAG_PERF)
1973{
1974	stackshot_perf(SHOULD_REUSE_SIZE_HINT | SHOULD_TARGET_SELF);
1975}
1976
1977T_DECL(perf_delta, "test delta stackshot performance",
1978		T_META_TAG_PERF)
1979{
1980	stackshot_perf(SHOULD_REUSE_SIZE_HINT | SHOULD_USE_DELTA);
1981}
1982
1983T_DECL(perf_delta_process, "test delta stackshot performance targeted at a process",
1984		T_META_TAG_PERF)
1985{
1986	stackshot_perf(SHOULD_REUSE_SIZE_HINT | SHOULD_USE_DELTA | SHOULD_TARGET_SELF);
1987}
1988
1989static uint64_t
1990stackshot_timestamp(void *ssbuf, size_t sslen)
1991{
1992	kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
1993
1994	uint32_t type = kcdata_iter_type(iter);
1995	if (type != KCDATA_BUFFER_BEGIN_STACKSHOT && type != KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT) {
1996		T_ASSERT_FAIL("invalid kcdata type %u", kcdata_iter_type(iter));
1997	}
1998
1999	iter = kcdata_iter_find_type(iter, KCDATA_TYPE_MACH_ABSOLUTE_TIME);
2000	T_QUIET;
2001	T_ASSERT_TRUE(kcdata_iter_valid(iter), "timestamp found in stackshot");
2002
2003	return *(uint64_t *)kcdata_iter_payload(iter);
2004}
2005
2006#define TEST_THREAD_NAME "stackshot_test_thread"
2007
2008static void
2009parse_thread_group_stackshot(void **ssbuf, size_t sslen)
2010{
2011	bool seen_thread_group_snapshot = false;
2012	kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
2013	T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT,
2014			"buffer provided is a stackshot");
2015
2016	NSMutableSet *thread_groups = [[NSMutableSet alloc] init];
2017
2018	iter = kcdata_iter_next(iter);
2019	KCDATA_ITER_FOREACH(iter) {
2020		switch (kcdata_iter_type(iter)) {
2021		case KCDATA_TYPE_ARRAY: {
2022			T_QUIET;
2023			T_ASSERT_TRUE(kcdata_iter_array_valid(iter),
2024					"checked that array is valid");
2025
2026			if (kcdata_iter_array_elem_type(iter) != STACKSHOT_KCTYPE_THREAD_GROUP_SNAPSHOT) {
2027				continue;
2028			}
2029
2030			seen_thread_group_snapshot = true;
2031
2032			if (kcdata_iter_array_elem_size(iter) >= sizeof(struct thread_group_snapshot_v3)) {
2033				struct thread_group_snapshot_v3 *tgs_array = kcdata_iter_payload(iter);
2034				for (uint32_t j = 0; j < kcdata_iter_array_elem_count(iter); j++) {
2035					struct thread_group_snapshot_v3 *tgs = tgs_array + j;
2036					[thread_groups addObject:@(tgs->tgs_id)];
2037				}
2038			}
2039			else {
2040				struct thread_group_snapshot *tgs_array = kcdata_iter_payload(iter);
2041				for (uint32_t j = 0; j < kcdata_iter_array_elem_count(iter); j++) {
2042					struct thread_group_snapshot *tgs = tgs_array + j;
2043					[thread_groups addObject:@(tgs->tgs_id)];
2044				}
2045			}
2046			break;
2047		}
2048		}
2049	}
2050	KCDATA_ITER_FOREACH(iter) {
2051		NSError *error = nil;
2052
2053		switch (kcdata_iter_type(iter)) {
2054
2055		case KCDATA_TYPE_CONTAINER_BEGIN: {
2056			T_QUIET;
2057			T_ASSERT_TRUE(kcdata_iter_container_valid(iter),
2058					"checked that container is valid");
2059
2060			if (kcdata_iter_container_type(iter) != STACKSHOT_KCCONTAINER_THREAD) {
2061				break;
2062			}
2063
2064			NSDictionary *container = parseKCDataContainer(&iter, &error);
2065			T_QUIET; T_ASSERT_NOTNULL(container, "parsed thread container from stackshot");
2066			T_QUIET; T_ASSERT_NULL(error, "error unset after parsing container");
2067
2068			int tg = [container[@"thread_snapshots"][@"thread_group"] intValue];
2069
2070			T_ASSERT_TRUE([thread_groups containsObject:@(tg)], "check that the thread group the thread is in exists");
2071
2072			break;
2073		};
2074
2075		}
2076	}
2077	T_ASSERT_TRUE(seen_thread_group_snapshot, "check that we have seen a thread group snapshot");
2078}
2079
2080static void
2081verify_stackshot_sharedcache_layout(struct dyld_uuid_info_64 *uuids, uint32_t uuid_count)
2082{
2083	uuid_t cur_shared_cache_uuid;
2084	__block uint32_t lib_index = 0, libs_found = 0;
2085
2086	_dyld_get_shared_cache_uuid(cur_shared_cache_uuid);
2087	int result = dyld_shared_cache_iterate_text(cur_shared_cache_uuid, ^(const dyld_shared_cache_dylib_text_info* info) {
2088			T_QUIET; T_ASSERT_LT(lib_index, uuid_count, "dyld_shared_cache_iterate_text exceeded number of libraries returned by kernel");
2089
2090			libs_found++;
2091			struct dyld_uuid_info_64 *cur_stackshot_uuid_entry = &uuids[lib_index];
2092			T_QUIET; T_ASSERT_EQ(memcmp(info->dylibUuid, cur_stackshot_uuid_entry->imageUUID, sizeof(info->dylibUuid)), 0,
2093					"dyld returned UUID doesn't match kernel returned UUID");
2094			T_QUIET; T_ASSERT_EQ(info->loadAddressUnslid, cur_stackshot_uuid_entry->imageLoadAddress,
2095					"dyld returned load address doesn't match kernel returned load address");
2096			lib_index++;
2097		});
2098
2099	T_ASSERT_EQ(result, 0, "iterate shared cache layout");
2100	T_ASSERT_EQ(libs_found, uuid_count, "dyld iterator returned same number of libraries as kernel");
2101
2102	T_LOG("verified %d libraries from dyld shared cache", libs_found);
2103}
2104
2105static void
2106check_shared_cache_uuid(uuid_t imageUUID)
2107{
2108	static uuid_t shared_cache_uuid;
2109	static dispatch_once_t read_shared_cache_uuid;
2110
2111	dispatch_once(&read_shared_cache_uuid, ^{
2112		T_QUIET;
2113		T_ASSERT_TRUE(_dyld_get_shared_cache_uuid(shared_cache_uuid), "retrieve current shared cache UUID");
2114	});
2115	T_QUIET; T_ASSERT_EQ(uuid_compare(shared_cache_uuid, imageUUID), 0,
2116			"dyld returned UUID doesn't match kernel returned UUID for system shared cache");
2117}
2118
2119/*
2120 * extra dictionary contains data relevant for the given flags:
2121 * PARSE_STACKSHOT_ZOMBIE:   zombie_child_pid_key -> @(pid)
2122 * PARSE_STACKSHOT_POSTEXEC: postexec_child_unique_pid_key -> @(unique_pid)
2123 */
2124static void
2125parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, NSDictionary *extra)
2126{
2127	bool delta = (stackshot_parsing_flags & PARSE_STACKSHOT_DELTA);
2128	bool expect_sharedcache_child = (stackshot_parsing_flags & PARSE_STACKSHOT_SHAREDCACHE_FLAGS);
2129	bool expect_zombie_child = (stackshot_parsing_flags & PARSE_STACKSHOT_ZOMBIE);
2130	bool expect_postexec_child = (stackshot_parsing_flags & PARSE_STACKSHOT_POSTEXEC);
2131	bool expect_cseg_waitinfo = (stackshot_parsing_flags & PARSE_STACKSHOT_WAITINFO_CSEG);
2132	bool expect_translated_child = (stackshot_parsing_flags & PARSE_STACKSHOT_TRANSLATED);
2133	bool expect_shared_cache_layout = false;
2134	bool expect_shared_cache_uuid = !delta;
2135	bool expect_dispatch_queue_label = (stackshot_parsing_flags & PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL);
2136	bool expect_turnstile_lock = (stackshot_parsing_flags & PARSE_STACKSHOT_TURNSTILEINFO);
2137	bool expect_srp_waitinfo = (stackshot_parsing_flags & PARSE_STACKSHOT_WAITINFO_SRP);
2138	bool expect_exec_inprogress = (stackshot_parsing_flags & PARSE_STACKSHOT_EXEC_INPROGRESS);
2139	bool expect_transitioning_task = (stackshot_parsing_flags & PARSE_STACKSHOT_TRANSITIONING);
2140	bool expect_asyncstack = (stackshot_parsing_flags & PARSE_STACKSHOT_ASYNCSTACK);
2141	bool found_zombie_child = false, found_postexec_child = false, found_shared_cache_layout = false, found_shared_cache_uuid = false;
2142	bool found_translated_child = false, found_transitioning_task = false;
2143	bool found_dispatch_queue_label = false, found_turnstile_lock = false;
2144	bool found_cseg_waitinfo = false, found_srp_waitinfo = false;
2145	bool found_sharedcache_child = false, found_sharedcache_badflags = false, found_sharedcache_self = false;
2146	bool found_asyncstack = false;
2147	uint64_t srp_expected_threadid = 0;
2148	pid_t zombie_child_pid = -1, srp_expected_pid = -1, sharedcache_child_pid = -1;
2149	pid_t translated_child_pid = -1, transistioning_task_pid = -1;
2150	bool sharedcache_child_sameaddr = false;
2151	uint64_t postexec_child_unique_pid = 0, cseg_expected_threadid = 0;
2152	uint64_t sharedcache_child_flags = 0, sharedcache_self_flags = 0;
2153	uint64_t asyncstack_threadid = 0;
2154	NSArray *asyncstack_stack = nil;
2155	char *inflatedBufferBase = NULL;
2156	pid_t exec_inprogress_pid = -1;
2157	void (^exec_inprogress_cb)(uint64_t, uint64_t) = NULL;
2158	int exec_inprogress_found = 0;
2159	uint64_t exec_inprogress_containerid = 0;
2160
2161	if (expect_shared_cache_uuid) {
2162		uuid_t shared_cache_uuid;
2163		if (!_dyld_get_shared_cache_uuid(shared_cache_uuid)) {
2164			T_LOG("Skipping verifying shared cache UUID in stackshot data because not running with a shared cache");
2165			expect_shared_cache_uuid = false;
2166		}
2167	}
2168
2169	if (stackshot_parsing_flags & PARSE_STACKSHOT_SHAREDCACHE_LAYOUT) {
2170		size_t shared_cache_length = 0;
2171		const void *cache_header = _dyld_get_shared_cache_range(&shared_cache_length);
2172		T_QUIET; T_ASSERT_NOTNULL(cache_header, "current process running with shared cache");
2173		T_QUIET; T_ASSERT_GT(shared_cache_length, sizeof(struct _dyld_cache_header), "valid shared cache length populated by _dyld_get_shared_cache_range");
2174
2175		if (_dyld_shared_cache_is_locally_built()) {
2176			T_LOG("device running with locally built shared cache, expect shared cache layout");
2177			expect_shared_cache_layout = true;
2178		} else {
2179			T_LOG("device running with B&I built shared-cache, no shared cache layout expected");
2180		}
2181	}
2182
2183	if (expect_sharedcache_child) {
2184		NSNumber* pid_num = extra[sharedcache_child_pid_key];
2185		NSNumber* sameaddr_num = extra[sharedcache_child_sameaddr_key];
2186		T_QUIET; T_ASSERT_NOTNULL(pid_num, "sharedcache child pid provided");
2187		T_QUIET; T_ASSERT_NOTNULL(sameaddr_num, "sharedcache child addrsame provided");
2188		sharedcache_child_pid = [pid_num intValue];
2189		T_QUIET; T_ASSERT_GT(sharedcache_child_pid, 0, "sharedcache child pid greater than zero");
2190		sharedcache_child_sameaddr = [sameaddr_num intValue];
2191		T_QUIET; T_ASSERT_GE([sameaddr_num intValue], 0, "sharedcache child sameaddr is boolean (0 or 1)");
2192		T_QUIET; T_ASSERT_LE([sameaddr_num intValue], 1, "sharedcache child sameaddr is boolean (0 or 1)");
2193	}
2194
2195    if (expect_transitioning_task) {
2196        NSNumber* pid_num = extra[transitioning_pid_key];
2197        T_ASSERT_NOTNULL(pid_num, "transitioning task pid provided");
2198        transistioning_task_pid = [pid_num intValue];
2199    }
2200
2201	if (expect_zombie_child) {
2202		NSNumber* pid_num = extra[zombie_child_pid_key];
2203		T_QUIET; T_ASSERT_NOTNULL(pid_num, "zombie child pid provided");
2204		zombie_child_pid = [pid_num intValue];
2205		T_QUIET; T_ASSERT_GT(zombie_child_pid, 0, "zombie child pid greater than zero");
2206	}
2207
2208	if (expect_postexec_child) {
2209		NSNumber* unique_pid_num = extra[postexec_child_unique_pid_key];
2210		T_QUIET; T_ASSERT_NOTNULL(unique_pid_num, "postexec child unique pid provided");
2211		postexec_child_unique_pid = [unique_pid_num unsignedLongLongValue];
2212		T_QUIET; T_ASSERT_GT(postexec_child_unique_pid, 0ull, "postexec child unique pid greater than zero");
2213	}
2214
2215	if (expect_cseg_waitinfo) {
2216		NSNumber* tid_num = extra[cseg_expected_threadid_key];
2217		T_QUIET; T_ASSERT_NOTNULL(tid_num, "cseg's expected thread id provided");
2218		cseg_expected_threadid = tid_num.unsignedLongValue;
2219		T_QUIET; T_ASSERT_GT(cseg_expected_threadid, UINT64_C(0), "compressor segment thread is present");
2220	}
2221
2222	if (expect_srp_waitinfo) {
2223		NSNumber* threadid_num = extra[srp_expected_threadid_key];
2224		NSNumber* pid_num = extra[srp_expected_pid_key];
2225		T_QUIET; T_ASSERT_TRUE(threadid_num != nil || pid_num != nil, "expected SRP threadid or pid");
2226		if (threadid_num != nil) {
2227			srp_expected_threadid = [threadid_num unsignedLongLongValue];
2228			T_QUIET; T_ASSERT_GT(srp_expected_threadid, 0ull, "srp_expected_threadid greater than zero");
2229		}
2230		if (pid_num != nil) {
2231			srp_expected_pid = [pid_num intValue];
2232			T_QUIET; T_ASSERT_GT(srp_expected_pid, 0, "srp_expected_pid greater than zero");
2233		}
2234		T_LOG("looking for SRP pid: %d threadid: %llu", srp_expected_pid, srp_expected_threadid);
2235	}
2236
2237	if (expect_translated_child) {
2238		NSNumber* pid_num = extra[translated_child_pid_key];
2239		T_QUIET; T_ASSERT_NOTNULL(pid_num, "translated child pid provided");
2240		translated_child_pid = [pid_num intValue];
2241		T_QUIET; T_ASSERT_GT(translated_child_pid, 0, "translated child pid greater than zero");
2242	}
2243	if (expect_exec_inprogress) {
2244		NSNumber* pid_num = extra[exec_inprogress_pid_key];
2245		T_QUIET; T_ASSERT_NOTNULL(pid_num, "exec inprogress pid provided");
2246		exec_inprogress_pid = [pid_num intValue];
2247		T_QUIET; T_ASSERT_GT(exec_inprogress_pid, 0, "exec inprogress pid greater than zero");
2248
2249		exec_inprogress_cb = extra[exec_inprogress_found_key];
2250		T_QUIET; T_ASSERT_NOTNULL(exec_inprogress_cb, "exec inprogress found callback provided");
2251	}
2252
2253	if (expect_asyncstack) {
2254		NSNumber* threadid_id = extra[asyncstack_expected_threadid_key];
2255		T_QUIET; T_ASSERT_NOTNULL(threadid_id, "asyncstack threadid provided");
2256		asyncstack_threadid = [threadid_id unsignedLongLongValue];
2257		asyncstack_stack = extra[asyncstack_expected_stack_key];
2258		T_QUIET; T_ASSERT_NOTNULL(asyncstack_stack, "asyncstack expected stack provided");
2259	}
2260
2261	kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
2262	if (delta) {
2263		T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT,
2264				"buffer provided is a delta stackshot");
2265
2266			iter = kcdata_iter_next(iter);
2267	} else {
2268		if (kcdata_iter_type(iter) != KCDATA_BUFFER_BEGIN_COMPRESSED) {
2269			T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT,
2270					"buffer provided is a stackshot");
2271
2272			iter = kcdata_iter_next(iter);
2273		} else {
2274			/* we are dealing with a compressed buffer */
2275			iter = kcdata_iter_next(iter);
2276			uint64_t compression_type = 0, totalout = 0, totalin = 0;
2277
2278			uint64_t *data;
2279			char *desc;
2280			for (int i = 0; i < 3; i ++) {
2281				kcdata_iter_get_data_with_desc(iter, &desc, (void **)&data, NULL);
2282				if (strcmp(desc, "kcd_c_type") == 0) {
2283					compression_type = *data;
2284				} else if (strcmp(desc, "kcd_c_totalout") == 0){
2285					totalout = *data;
2286				} else if (strcmp(desc, "kcd_c_totalin") == 0){
2287					totalin = *data;
2288				}
2289
2290				iter = kcdata_iter_next(iter);
2291			}
2292
2293			T_ASSERT_EQ(compression_type, UINT64_C(1), "zlib compression is used");
2294			T_ASSERT_GT(totalout, UINT64_C(0), "successfully gathered how long the compressed buffer is");
2295			T_ASSERT_GT(totalin, UINT64_C(0), "successfully gathered how long the uncompressed buffer will be at least");
2296
2297			/* progress to the next kcdata item */
2298			T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, "compressed stackshot found");
2299
2300			char *bufferBase = kcdata_iter_payload(iter);
2301
2302			/*
2303			 * zlib is used, allocate a buffer based on the metadata, plus
2304			 * extra scratch space (+12.5%) in case totalin was inconsistent
2305			 */
2306			size_t inflatedBufferSize = totalin + (totalin >> 3);
2307			inflatedBufferBase = malloc(inflatedBufferSize);
2308			T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(inflatedBufferBase, "allocated temporary output buffer");
2309
2310			z_stream zs;
2311			memset(&zs, 0, sizeof(zs));
2312			T_QUIET; T_ASSERT_EQ(inflateInit(&zs), Z_OK, "inflateInit OK");
2313			zs.next_in = (unsigned char *)bufferBase;
2314			T_QUIET; T_ASSERT_LE(totalout, (uint64_t)UINT_MAX, "stackshot is not too large");
2315			zs.avail_in = (uInt)totalout;
2316			zs.next_out = (unsigned char *)inflatedBufferBase;
2317			T_QUIET; T_ASSERT_LE(inflatedBufferSize, (size_t)UINT_MAX, "output region is not too large");
2318			zs.avail_out = (uInt)inflatedBufferSize;
2319			T_ASSERT_EQ(inflate(&zs, Z_FINISH), Z_STREAM_END, "inflated buffer");
2320			inflateEnd(&zs);
2321
2322			T_ASSERT_EQ((uint64_t)zs.total_out, totalin, "expected number of bytes inflated");
2323
2324			/* copy the data after the compressed area */
2325			T_QUIET; T_ASSERT_GE((void *)bufferBase, ssbuf,
2326					"base of compressed stackshot is after the returned stackshot buffer");
2327			size_t header_size = (size_t)(bufferBase - (char *)ssbuf);
2328			size_t data_after_compressed_size = sslen - totalout - header_size;
2329			T_QUIET; T_ASSERT_LE(data_after_compressed_size,
2330					inflatedBufferSize - zs.total_out,
2331					"footer fits in the buffer");
2332			memcpy(inflatedBufferBase + zs.total_out,
2333					bufferBase + totalout,
2334					data_after_compressed_size);
2335
2336			iter = kcdata_iter(inflatedBufferBase, inflatedBufferSize);
2337		}
2338	}
2339
2340	KCDATA_ITER_FOREACH(iter) {
2341		NSError *error = nil;
2342
2343		switch (kcdata_iter_type(iter)) {
2344		case KCDATA_TYPE_ARRAY: {
2345			T_QUIET;
2346			T_ASSERT_TRUE(kcdata_iter_array_valid(iter),
2347					"checked that array is valid");
2348
2349			NSMutableDictionary *array = parseKCDataArray(iter, &error);
2350			T_QUIET; T_ASSERT_NOTNULL(array, "parsed array from stackshot");
2351			T_QUIET; T_ASSERT_NULL(error, "error unset after parsing array");
2352
2353			if (kcdata_iter_array_elem_type(iter) == STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT) {
2354				struct dyld_uuid_info_64 *shared_cache_uuids = kcdata_iter_payload(iter);
2355				uint32_t uuid_count = kcdata_iter_array_elem_count(iter);
2356				T_ASSERT_NOTNULL(shared_cache_uuids, "parsed shared cache layout array");
2357				T_ASSERT_GT(uuid_count, 0, "returned valid number of UUIDs from shared cache");
2358				verify_stackshot_sharedcache_layout(shared_cache_uuids, uuid_count);
2359				found_shared_cache_layout = true;
2360			}
2361
2362			break;
2363		}
2364
2365		case KCDATA_TYPE_CONTAINER_BEGIN: {
2366			T_QUIET;
2367			T_ASSERT_TRUE(kcdata_iter_container_valid(iter),
2368					"checked that container is valid");
2369
2370			uint64_t containerid = kcdata_iter_container_id(iter);
2371			uint32_t container_type = kcdata_iter_container_type(iter) ;
2372
2373			/*
2374			 * treat containers other than tasks/transitioning_tasks
2375			 * as expanded in-line.
2376			 */
2377			if (container_type != STACKSHOT_KCCONTAINER_TASK &&
2378			    container_type != STACKSHOT_KCCONTAINER_TRANSITIONING_TASK) {
2379				break;
2380			}
2381			NSDictionary *container = parseKCDataContainer(&iter, &error);
2382			T_QUIET; T_ASSERT_NOTNULL(container, "parsed task/transitioning_task container from stackshot");
2383			T_QUIET; T_ASSERT_NULL(error, "error unset after parsing container");
2384
2385			NSDictionary* task_snapshot = container[@"task_snapshots"][@"task_snapshot"];
2386			NSDictionary* task_delta_snapshot = container[@"task_snapshots"][@"task_delta_snapshot"];
2387			NSDictionary* transitioning_task_snapshot = container[@"transitioning_task_snapshots"][@"transitioning_task_snapshot"];
2388
2389			/*
2390			 * Having processed the container, we now only check it
2391			 * if it's the correct type.
2392			 */
2393			if ((!expect_transitioning_task && (container_type != STACKSHOT_KCCONTAINER_TASK)) ||
2394			    (expect_transitioning_task && (container_type != STACKSHOT_KCCONTAINER_TRANSITIONING_TASK))) {
2395				break;
2396			}
2397			if (!expect_transitioning_task) {
2398			    	T_QUIET; T_ASSERT_TRUE(!!task_snapshot != !!task_delta_snapshot, "Either task_snapshot xor task_delta_snapshot provided");
2399			}
2400
2401			if (expect_dispatch_queue_label && !found_dispatch_queue_label) {
2402				for (id thread_key in container[@"task_snapshots"][@"thread_snapshots"]) {
2403					NSMutableDictionary *thread = container[@"task_snapshots"][@"thread_snapshots"][thread_key];
2404					NSString *dql = thread[@"dispatch_queue_label"];
2405
2406					if ([dql isEqualToString:@TEST_STACKSHOT_QUEUE_LABEL]) {
2407						found_dispatch_queue_label = true;
2408						break;
2409					}
2410				}
2411			}
2412
2413			if (expect_transitioning_task && !found_transitioning_task) {
2414				if (transitioning_task_snapshot) {
2415					uint64_t the_pid = [transitioning_task_snapshot[@"tts_pid"] unsignedLongLongValue];
2416					if (the_pid == (uint64_t)transistioning_task_pid) {
2417					    found_transitioning_task = true;
2418					    T_PASS("FOUND Transitioning task %llu has a transitioning task snapshot", (uint64_t) transistioning_task_pid);
2419					    break;
2420					}
2421				}
2422			}
2423
2424			if (expect_postexec_child && !found_postexec_child) {
2425				if (task_snapshot) {
2426					uint64_t unique_pid = [task_snapshot[@"ts_unique_pid"] unsignedLongLongValue];
2427					if (unique_pid == postexec_child_unique_pid) {
2428						found_postexec_child = true;
2429
2430						T_PASS("post-exec child %llu has a task snapshot", postexec_child_unique_pid);
2431
2432						break;
2433					}
2434				}
2435
2436				if (task_delta_snapshot) {
2437					uint64_t unique_pid = [task_delta_snapshot[@"tds_unique_pid"] unsignedLongLongValue];
2438					if (unique_pid == postexec_child_unique_pid) {
2439						found_postexec_child = true;
2440
2441						T_FAIL("post-exec child %llu shouldn't have a delta task snapshot", postexec_child_unique_pid);
2442
2443						break;
2444					}
2445				}
2446			}
2447
2448			if (!task_snapshot) {
2449				break;
2450			}
2451
2452			int pid = [task_snapshot[@"ts_pid"] intValue];
2453
2454			if (pid && expect_shared_cache_uuid && !found_shared_cache_uuid) {
2455				id ptr = container[@"task_snapshots"][@"shared_cache_dyld_load_info"];
2456				if (ptr) {
2457					id uuid = ptr[@"imageUUID"];
2458
2459					uint8_t uuid_p[16];
2460					for (unsigned int i = 0; i < 16; i ++) {
2461						NSNumber *uuidByte = uuid[i];
2462						uuid_p[i] = (uint8_t)uuidByte.charValue;
2463					}
2464
2465					check_shared_cache_uuid(uuid_p);
2466
2467					uint64_t baseAddress = (uint64_t)((NSNumber *)ptr[@"imageSlidBaseAddress"]).longLongValue;
2468					uint64_t firstMapping = (uint64_t)((NSNumber *)ptr[@"sharedCacheSlidFirstMapping"]).longLongValue;
2469
2470					T_EXPECT_LE(baseAddress, firstMapping,
2471						"in per-task shared_cache_dyld_load_info, "
2472						"baseAddress <= firstMapping");
2473					T_EXPECT_GE(baseAddress + (7ull << 32) + (1ull << 29),
2474						firstMapping,
2475						"in per-task shared_cache_dyld_load_info, "
2476						"baseAddress + 28.5gig >= firstMapping");
2477
2478					size_t shared_cache_len;
2479					const void *addr = _dyld_get_shared_cache_range(&shared_cache_len);
2480					T_EXPECT_EQ((uint64_t)addr, firstMapping,
2481							"SlidFirstMapping should match shared_cache_range");
2482
2483					/*
2484					 * check_shared_cache_uuid() will assert on failure, so if
2485					 * we get here, then we have found the shared cache UUID
2486					 * and it's correct
2487					 */
2488					found_shared_cache_uuid = true;
2489				}
2490			}
2491
2492			if (expect_sharedcache_child) {
2493				uint64_t task_flags = [task_snapshot[@"ts_ss_flags"] unsignedLongLongValue];
2494				uint64_t sharedregion_flags = (task_flags & (kTaskSharedRegionNone | kTaskSharedRegionSystem | kTaskSharedRegionOther));
2495				id sharedregion_info = container[@"task_snapshots"][@"shared_cache_dyld_load_info"];
2496				if (!found_sharedcache_badflags) {
2497					T_QUIET; T_ASSERT_NE(sharedregion_flags, 0ll, "one of the kTaskSharedRegion flags should be set on all tasks");
2498					bool multiple = (sharedregion_flags & (sharedregion_flags - 1)) != 0;
2499					T_QUIET; T_ASSERT_FALSE(multiple, "only one kTaskSharedRegion flag should be set on each task");
2500					found_sharedcache_badflags = (sharedregion_flags == 0 || multiple);
2501				}
2502				if (pid == 0) {
2503					T_ASSERT_EQ(sharedregion_flags, (uint64_t)kTaskSharedRegionNone, "Kernel proc (pid 0) should have no shared region");
2504				} else if (pid == sharedcache_child_pid) {
2505					found_sharedcache_child = true;
2506					sharedcache_child_flags = sharedregion_flags;
2507				} else if (pid == getpid()) {
2508					found_sharedcache_self = true;
2509					sharedcache_self_flags = sharedregion_flags;
2510				}
2511				if (sharedregion_flags == kTaskSharedRegionOther && !(task_flags & kTaskSharedRegionInfoUnavailable)) {
2512					T_QUIET; T_ASSERT_NOTNULL(sharedregion_info, "kTaskSharedRegionOther should have a shared_cache_dyld_load_info struct");
2513				} else {
2514					T_QUIET; T_ASSERT_NULL(sharedregion_info, "expect no shared_cache_dyld_load_info struct");
2515				}
2516			}
2517
2518			if (expect_zombie_child && (pid == zombie_child_pid)) {
2519				found_zombie_child = true;
2520
2521				uint64_t task_flags = [task_snapshot[@"ts_ss_flags"] unsignedLongLongValue];
2522				T_ASSERT_TRUE((task_flags & kTerminatedSnapshot) == kTerminatedSnapshot, "child zombie marked as terminated");
2523
2524				continue;
2525			}
2526
2527			if (expect_translated_child && (pid == translated_child_pid)) {
2528				found_translated_child = true;
2529
2530				uint64_t task_flags = [task_snapshot[@"ts_ss_flags"] unsignedLongLongValue];
2531				T_EXPECT_BITS_SET(task_flags, kTaskIsTranslated, "child marked as translated");
2532
2533				continue;
2534			}
2535			if (expect_exec_inprogress && (pid == exec_inprogress_pid || pid == -exec_inprogress_pid)) {
2536				exec_inprogress_found++;
2537				T_LOG("found exec task with pid %d, instance %d", pid, exec_inprogress_found);
2538				T_QUIET; T_ASSERT_LE(exec_inprogress_found, 2, "no more than two with the expected pid");
2539				if (exec_inprogress_found == 2) {
2540					T_LOG("found 2 tasks with pid %d", exec_inprogress_pid);
2541					exec_inprogress_cb(containerid, exec_inprogress_containerid);
2542				} else {
2543					exec_inprogress_containerid = containerid;
2544				}
2545			}
2546			if (expect_cseg_waitinfo) {
2547				NSArray *winfos = container[@"task_snapshots"][@"thread_waitinfo"];
2548
2549				for (id i in winfos) {
2550					NSNumber *waitType = i[@"wait_type"];
2551					NSNumber *owner = i[@"owner"];
2552					if (waitType.intValue == kThreadWaitCompressor &&
2553							owner.unsignedLongValue == cseg_expected_threadid) {
2554						found_cseg_waitinfo = true;
2555						break;
2556					}
2557				}
2558			}
2559
2560			if (expect_srp_waitinfo) {
2561				NSArray *tinfos = container[@"task_snapshots"][@"thread_turnstileinfo"];
2562				NSArray *winfos = container[@"task_snapshots"][@"thread_waitinfo"];
2563				for (id i in tinfos) {
2564					if (!found_srp_waitinfo) {
2565						bool found_thread = false;
2566						bool found_pid = false;
2567						if (([i[@"turnstile_flags"] intValue] & STACKSHOT_TURNSTILE_STATUS_THREAD) &&
2568						    [i[@"turnstile_context"] unsignedLongLongValue] == srp_expected_threadid &&
2569						    srp_expected_threadid != 0) {
2570							found_thread = true;
2571						}
2572						if (([i[@"turnstile_flags"] intValue] & STACKSHOT_TURNSTILE_STATUS_BLOCKED_ON_TASK) &&
2573						    [i[@"turnstile_context"] intValue] == srp_expected_pid &&
2574						    srp_expected_pid != -1) {
2575							found_pid = true;
2576						}
2577						if (found_pid || found_thread) {
2578							T_LOG("found SRP %s %lld waiter: %d", (found_thread ? "thread" : "pid"),
2579							    [i[@"turnstile_context"] unsignedLongLongValue], [i[@"waiter"] intValue]);
2580							/* we found something that is blocking the correct threadid */
2581							for (id j in winfos) {
2582								if ([j[@"waiter"] intValue] == [i[@"waiter"] intValue] &&
2583								    [j[@"wait_type"] intValue] == kThreadWaitPortReceive) {
2584									found_srp_waitinfo = true;
2585									T_EXPECT_EQ([j[@"wait_flags"] intValue], STACKSHOT_WAITINFO_FLAGS_SPECIALREPLY,
2586									    "SRP waitinfo should be marked as a special reply");
2587									break;
2588								}
2589							}
2590
2591							if (found_srp_waitinfo) {
2592								break;
2593							}
2594						}
2595					}
2596				}
2597			}
2598
2599			if (pid != getpid()) {
2600				break;
2601			}
2602
2603			T_EXPECT_EQ_STR(current_process_name(),
2604					[task_snapshot[@"ts_p_comm"] UTF8String],
2605					"current process name matches in stackshot");
2606
2607			uint64_t task_flags = [task_snapshot[@"ts_ss_flags"] unsignedLongLongValue];
2608			T_ASSERT_BITS_NOTSET(task_flags, kTerminatedSnapshot, "current process not marked as terminated");
2609			T_ASSERT_BITS_NOTSET(task_flags, kTaskIsTranslated, "current process not marked as translated");
2610
2611			T_QUIET;
2612			T_EXPECT_LE(pid, [task_snapshot[@"ts_unique_pid"] intValue],
2613					"unique pid is greater than pid");
2614
2615			NSDictionary* task_cpu_architecture = container[@"task_snapshots"][@"task_cpu_architecture"];
2616			T_QUIET; T_ASSERT_NOTNULL(task_cpu_architecture[@"cputype"], "have cputype");
2617			T_QUIET; T_ASSERT_NOTNULL(task_cpu_architecture[@"cpusubtype"], "have cputype");
2618			int cputype = [task_cpu_architecture[@"cputype"] intValue];
2619			int cpusubtype = [task_cpu_architecture[@"cpusubtype"] intValue];
2620
2621			struct proc_archinfo archinfo;
2622			int retval = proc_pidinfo(pid, PROC_PIDARCHINFO, 0, &archinfo, sizeof(archinfo));
2623			T_QUIET; T_WITH_ERRNO; T_ASSERT_GT(retval, 0, "proc_pidinfo(PROC_PIDARCHINFO) returned a value > 0");
2624			T_QUIET; T_ASSERT_EQ(retval, (int)sizeof(struct proc_archinfo), "proc_pidinfo call for PROC_PIDARCHINFO returned expected size");
2625			T_QUIET; T_EXPECT_EQ(cputype, archinfo.p_cputype, "cpu type is correct");
2626			T_QUIET; T_EXPECT_EQ(cpusubtype, archinfo.p_cpusubtype, "cpu subtype is correct");
2627
2628			bool found_main_thread = false;
2629			uint64_t main_thread_id = -1ULL;
2630			bool found_null_kernel_frame = false;
2631			for (id thread_key in container[@"task_snapshots"][@"thread_snapshots"]) {
2632				NSMutableDictionary *thread = container[@"task_snapshots"][@"thread_snapshots"][thread_key];
2633				NSDictionary *thread_snap = thread[@"thread_snapshot"];
2634
2635				T_QUIET; T_EXPECT_GT([thread_snap[@"ths_thread_id"] intValue], 0,
2636						"thread ID of thread in current task is valid");
2637				T_QUIET; T_EXPECT_GT([thread_snap[@"ths_base_priority"] intValue], 0,
2638						"base priority of thread in current task is valid");
2639				T_QUIET; T_EXPECT_GT([thread_snap[@"ths_sched_priority"] intValue], 0,
2640						"scheduling priority of thread in current task is valid");
2641
2642				NSString *pth_name = thread[@"pth_name"];
2643				if (pth_name != nil && [pth_name isEqualToString:@TEST_THREAD_NAME]) {
2644					found_main_thread = true;
2645					main_thread_id = [thread_snap[@"ths_thread_id"] unsignedLongLongValue];
2646
2647					T_QUIET; T_EXPECT_GT([thread_snap[@"ths_total_syscalls"] intValue], 0,
2648							"total syscalls of current thread is valid");
2649
2650					NSDictionary *cpu_times = thread[@"cpu_times"];
2651					T_EXPECT_GE([cpu_times[@"runnable_time"] intValue],
2652							[cpu_times[@"system_time"] intValue] +
2653							[cpu_times[@"user_time"] intValue],
2654							"runnable time of current thread is valid");
2655				}
2656				if (!found_null_kernel_frame) {
2657					for (NSNumber *frame in thread[@"kernel_frames"]) {
2658						if (frame.unsignedLongValue == 0) {
2659							found_null_kernel_frame = true;
2660							break;
2661						}
2662					}
2663				}
2664				if (expect_asyncstack && !found_asyncstack &&
2665				    asyncstack_threadid == [thread_snap[@"ths_thread_id"] unsignedLongLongValue]) {
2666					found_asyncstack = true;
2667					NSArray* async_stack = thread[@"user_async_stack_frames"];
2668					NSNumber* start_idx = thread[@"user_async_start_index"];
2669					NSArray* user_stack = thread[@"user_stack_frames"];
2670					T_QUIET; T_ASSERT_NOTNULL(async_stack, "async thread %#llx has user_async_stack_frames", asyncstack_threadid);
2671					T_QUIET; T_ASSERT_NOTNULL(start_idx, "async thread %#llx has user_async_start_index", asyncstack_threadid);
2672					T_QUIET; T_ASSERT_NOTNULL(user_stack, "async thread %#llx has user_stack_frames", asyncstack_threadid);
2673					T_QUIET; T_ASSERT_EQ(async_stack.count, asyncstack_stack.count,
2674						"actual async_stack count == expected async_stack count");
2675					for (size_t i = 0; i < async_stack.count; i++) {
2676						T_EXPECT_EQ([async_stack[i][@"lr"] unsignedLongLongValue],
2677							[asyncstack_stack[i] unsignedLongLongValue], "frame %zu matches", i);
2678					}
2679				}
2680			}
2681			T_EXPECT_TRUE(found_main_thread, "found main thread for current task in stackshot");
2682			T_EXPECT_FALSE(found_null_kernel_frame, "should not see any NULL kernel frames");
2683
2684			if (expect_turnstile_lock && !found_turnstile_lock) {
2685				NSArray *tsinfos = container[@"task_snapshots"][@"thread_turnstileinfo"];
2686
2687				for (id i in tsinfos) {
2688					if ([i[@"turnstile_context"] unsignedLongLongValue] == main_thread_id) {
2689						found_turnstile_lock = true;
2690						break;
2691					}
2692				}
2693			}
2694			break;
2695		}
2696		case STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO: {
2697			struct dyld_shared_cache_loadinfo *payload = kcdata_iter_payload(iter);
2698			T_ASSERT_EQ((size_t)kcdata_iter_size(iter), sizeof(*payload), "valid dyld_shared_cache_loadinfo struct");
2699
2700			check_shared_cache_uuid(payload->sharedCacheUUID);
2701
2702			T_EXPECT_LE(payload->sharedCacheUnreliableSlidBaseAddress,
2703				payload->sharedCacheSlidFirstMapping,
2704				"SlidBaseAddress <= SlidFirstMapping");
2705			T_EXPECT_GE(payload->sharedCacheUnreliableSlidBaseAddress + (7ull << 32) + (1ull << 29),
2706				payload->sharedCacheSlidFirstMapping,
2707				"SlidFirstMapping should be within 28.5gigs of SlidBaseAddress");
2708
2709			size_t shared_cache_len;
2710			const void *addr = _dyld_get_shared_cache_range(&shared_cache_len);
2711			T_EXPECT_EQ((uint64_t)addr, payload->sharedCacheSlidFirstMapping,
2712			    "SlidFirstMapping should match shared_cache_range");
2713
2714			/*
2715			 * check_shared_cache_uuid() asserts on failure, so we must have
2716			 * found the shared cache UUID to be correct.
2717			 */
2718			found_shared_cache_uuid = true;
2719			break;
2720		}
2721		}
2722	}
2723
2724	if (expect_sharedcache_child) {
2725		T_QUIET; T_ASSERT_TRUE(found_sharedcache_child, "found sharedcache child in kcdata");
2726		T_QUIET; T_ASSERT_TRUE(found_sharedcache_self, "found self in kcdata");
2727		if (found_sharedcache_child && found_sharedcache_self) {
2728			T_QUIET; T_ASSERT_NE(sharedcache_child_flags, (uint64_t)kTaskSharedRegionNone, "sharedcache child should have shared region");
2729			T_QUIET; T_ASSERT_NE(sharedcache_self_flags, (uint64_t)kTaskSharedRegionNone, "sharedcache: self should have shared region");
2730			if (sharedcache_self_flags == kTaskSharedRegionSystem && !sharedcache_child_sameaddr) {
2731				/* If we're in the system shared region, and the child has a different address, child must have an Other shared region */
2732				T_ASSERT_EQ(sharedcache_child_flags, (uint64_t)kTaskSharedRegionOther,
2733				    "sharedcache child should have Other shared region");
2734			}
2735		}
2736	}
2737
2738	if (expect_transitioning_task) {
2739		T_QUIET; T_ASSERT_TRUE(found_transitioning_task, "found transitioning_task child in kcdata");
2740	}
2741
2742	if (expect_exec_inprogress) {
2743		T_QUIET; T_ASSERT_GT(exec_inprogress_found, 0, "found at least 1 task for execing process");
2744	}
2745
2746	if (expect_zombie_child) {
2747		T_QUIET; T_ASSERT_TRUE(found_zombie_child, "found zombie child in kcdata");
2748	}
2749
2750	if (expect_postexec_child) {
2751		T_QUIET; T_ASSERT_TRUE(found_postexec_child, "found post-exec child in kcdata");
2752	}
2753
2754	if (expect_translated_child) {
2755		T_QUIET; T_ASSERT_TRUE(found_translated_child, "found translated child in kcdata");
2756	}
2757
2758	if (expect_shared_cache_layout) {
2759		T_QUIET; T_ASSERT_TRUE(found_shared_cache_layout, "shared cache layout found in kcdata");
2760	}
2761
2762	if (expect_shared_cache_uuid) {
2763		T_QUIET; T_ASSERT_TRUE(found_shared_cache_uuid, "shared cache UUID found in kcdata");
2764	}
2765
2766	if (expect_dispatch_queue_label) {
2767		T_QUIET; T_ASSERT_TRUE(found_dispatch_queue_label, "dispatch queue label found in kcdata");
2768	}
2769
2770	if (expect_turnstile_lock) {
2771		T_QUIET; T_ASSERT_TRUE(found_turnstile_lock, "found expected deadlock");
2772	}
2773
2774	if (expect_cseg_waitinfo) {
2775		T_QUIET; T_ASSERT_TRUE(found_cseg_waitinfo, "found c_seg waitinfo");
2776	}
2777
2778	if (expect_srp_waitinfo) {
2779		T_QUIET; T_ASSERT_TRUE(found_srp_waitinfo, "found special reply port waitinfo");
2780	}
2781
2782	if (expect_asyncstack) {
2783		T_QUIET; T_ASSERT_TRUE(found_asyncstack, "found async stack threadid");
2784	}
2785
2786	T_ASSERT_FALSE(KCDATA_ITER_FOREACH_FAILED(iter), "successfully iterated kcdata");
2787
2788	free(inflatedBufferBase);
2789}
2790
2791static const char *
2792current_process_name(void)
2793{
2794	static char name[64];
2795
2796	if (!name[0]) {
2797		int ret = proc_name(getpid(), name, sizeof(name));
2798		T_QUIET;
2799		T_ASSERT_POSIX_SUCCESS(ret, "proc_name failed for current process");
2800	}
2801
2802	return name;
2803}
2804
2805static void
2806initialize_thread(void)
2807{
2808	int ret = pthread_setname_np(TEST_THREAD_NAME);
2809	T_QUIET;
2810	T_ASSERT_POSIX_ZERO(ret, "set thread name to %s", TEST_THREAD_NAME);
2811}
2812