1 #include <darwintest.h>
2
3 #include <assert.h>
4 #include <mach/clock_types.h>
5 #include <unistd.h>
6 #include <stdlib.h>
7 #include <stdio.h>
8 #include <errno.h>
9 #include <err.h>
10 #include <sys/time.h>
11 #include <mach/mach.h>
12 #include <mach/mach_time.h>
13 #include <pthread.h>
14 #include <sys/sysctl.h>
15 #include <sys/stat.h>
16 #include <sys/mount.h>
17 #include <stdbool.h>
18 #include <signal.h>
19 #include <sys/resource.h>
20 #include <sys/resource_private.h>
21 #include <os/atomic_private.h>
22 #include <libproc.h>
23 #include <TargetConditionals.h>
24
25 #if __has_include(<mach/mach_time_private.h>)
26 #include <mach/mach_time_private.h>
27 #else
28 kern_return_t mach_get_times(uint64_t* absolute_time,
29 uint64_t* continuous_time,
30 struct timespec *tp);
31 #endif
32
33 /*
34 * This test program creates up to 8 worker threads performing
35 * mixed workloads of system calls (which contribute to both
36 * user and system time), as well as spins in userspace (which
37 * only contribute to user time).
38 *
39 * setitimer(2) is used to program timers that fire signals
40 * after various thresholds. The signal handler detects
41 * which thread the signal was delivered on by matching the
42 * stack pointer to ranges for each thread.
43 *
44 * After the test scenario is complete, the distribution of
45 * threads which received interrupts is evaluated to match
46 * expected heuristics.
47 */
48
49 T_GLOBAL_META(
50 T_META_RUN_CONCURRENTLY(false),
51 T_META_CHECK_LEAKS(false),
52 T_META_ALL_VALID_ARCHS(true),
53 T_META_RADAR_COMPONENT_NAME("xnu"),
54 T_META_RADAR_COMPONENT_VERSION("scheduler"),
55 T_META_OWNER("chimene"),
56 T_META_ENABLED(TARGET_OS_OSX)
57 );
58
59 static void *stat_thread(void *arg);
60 static void *statfs_thread(void *arg);
61
62 static void alrm_handler(int, struct __siginfo *, void *);
63
64 static semaphore_t gMainWaitForWorkers;
65 static semaphore_t gWorkersStart;
66
67 static pthread_mutex_t gShouldExitMutex = PTHREAD_MUTEX_INITIALIZER;
68 static pthread_cond_t gShouldExitCondition = PTHREAD_COND_INITIALIZER;
69
70 static _Atomic bool gShouldExit = false;
71
72 static const uint32_t max_threads = 9;
73
74 static struct threadentry {
75 pthread_t thread;
76 uint64_t tid;
77 void* stack_addr;
78 size_t stack_size;
79 bool expect_cpu_usage;
80 uint32_t alrm_count;
81 uint32_t vtalrm_count;
82 uint32_t prof_count;
83 uint32_t xcpu_count;
84 struct thsc_time_cpi self_stats;
85 } __attribute__((aligned(128))) gThreadList[max_threads];
86
87 static uint32_t nworkers;
88 static uint32_t nthreads;
89
90 static double offcore_time_percent_threshold = 75.0;
91
92 static bool is_rosetta = false;
93
94 static mach_timebase_info_data_t timebase_info;
95
96 static uint64_t
abs_to_nanos(uint64_t abs)97 abs_to_nanos(uint64_t abs)
98 {
99 return abs * timebase_info.numer / timebase_info.denom;
100 }
101
102 /* Some statistics APIs return host abstime instead of Rosetta-translated abstime */
103 static uint64_t
abs_to_nanos_host(uint64_t abstime)104 abs_to_nanos_host(uint64_t abstime)
105 {
106 if (is_rosetta) {
107 return abstime * 125 / 3;
108 } else {
109 return abs_to_nanos(abstime);
110 }
111 }
112
113 static int
processIsTranslated(void)114 processIsTranslated(void)
115 {
116 int ret = 0;
117 size_t size = sizeof(ret);
118 if (sysctlbyname("sysctl.proc_translated", &ret, &size, NULL, 0) == -1) {
119 if (errno == ENOENT) {
120 return 0;
121 } else {
122 return -1;
123 }
124 }
125 return ret;
126 }
127
128 static void
fill_thread_stats(uint32_t i)129 fill_thread_stats(uint32_t i)
130 {
131 struct threadentry *entry = &gThreadList[i];
132
133 int rv = thread_selfcounts(THSC_TIME_CPI, &entry->self_stats, sizeof(entry->self_stats));
134 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "thread_selfcounts(THSC_TIME_CPI)");
135 }
136
137 T_DECL(setitimer,
138 "Test various setitimer delivered signals to CPU-burning threads")
139 {
140 int rv;
141 kern_return_t kr;
142 uint32_t ncpu;
143 size_t ncpu_size = sizeof(ncpu);
144
145 struct sched_param self_param = {.sched_priority = 47};
146
147 rv = pthread_setschedparam(pthread_self(), SCHED_FIFO, &self_param);
148 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_setschedparam");
149
150 kr = mach_timebase_info(&timebase_info);
151 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_timebase_info");
152
153 is_rosetta = processIsTranslated();
154
155 rv = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0);
156 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "sysctlbyname(hw.ncpu)");
157
158 if (ncpu < 2) {
159 T_SKIP("%d CPUs not supported for test, returning success", ncpu);
160 }
161
162 nworkers = MIN(max_threads - 1, ncpu);
163 nthreads = nworkers + 1;
164
165 T_LOG("rosetta = %d\n", is_rosetta);
166 T_LOG("hw.ncpu = %d\n", ncpu);
167 T_LOG("nworkers = %d\n", nworkers);
168 T_LOG("nthreads = %d\n", nthreads);
169
170 kr = semaphore_create(mach_task_self(), &gMainWaitForWorkers, SYNC_POLICY_FIFO, 0);
171 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_create()");
172
173 kr = semaphore_create(mach_task_self(), &gWorkersStart, SYNC_POLICY_FIFO, 0);
174 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_create()");
175
176 pthread_attr_t attr;
177
178 rv = pthread_attr_init(&attr);
179 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_attr_init");
180
181 struct sched_param child_param = {.sched_priority = 37};
182
183 rv = pthread_attr_setschedparam(&attr, &child_param);
184 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_attr_set_qos_class_np");
185
186 for (uint32_t i = 0; i < nthreads; i++) {
187 if (i == 0) {
188 gThreadList[i].thread = pthread_self();
189 } else {
190 rv = pthread_create(&gThreadList[i].thread, &attr,
191 i % 2 ? stat_thread : statfs_thread,
192 (void *)(uintptr_t)i);
193 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_create");
194 gThreadList[i].expect_cpu_usage = i % 2 == 0 ? true : false;
195 }
196
197 rv = pthread_threadid_np(gThreadList[i].thread, &gThreadList[i].tid);
198 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_threadid_np");
199
200 gThreadList[i].stack_addr = pthread_get_stackaddr_np(gThreadList[i].thread);
201 gThreadList[i].stack_size = pthread_get_stacksize_np(gThreadList[i].thread);
202 }
203
204 rv = pthread_attr_destroy(&attr);
205 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_attr_destroy");
206
207 for (uint32_t i = 1; i < nthreads; i++) {
208 kr = semaphore_wait(gMainWaitForWorkers);
209 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_wait()");
210 }
211
212 for (uint32_t i = 0; i < nthreads; i++) {
213 T_LOG("Thread %p (0x%llx) checked in, stack %p/%p\n",
214 (void*)gThreadList[i].thread,
215 gThreadList[i].tid,
216 gThreadList[i].stack_addr,
217 (void *)gThreadList[i].stack_size);
218 }
219
220 sigset_t sigmk;
221 sigemptyset(&sigmk);
222
223 struct sigaction sigact = {
224 .sa_sigaction = alrm_handler,
225 .sa_mask = sigmk,
226 .sa_flags = SA_SIGINFO,
227 };
228
229 rv = sigaction(SIGALRM, &sigact, NULL);
230 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "sigaction(SIGALRM)");
231
232 rv = sigaction(SIGVTALRM, &sigact, NULL);
233 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "sigaction(SIGVTALRM)");
234
235 rv = sigaction(SIGPROF, &sigact, NULL);
236 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "sigaction(SIGPROF)");
237
238 rv = sigaction(SIGXCPU, &sigact, NULL);
239 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "sigaction(SIGXCPU)");
240
241 struct itimerval itime = {
242 .it_interval.tv_sec = 0,
243 .it_interval.tv_usec = 10000,
244 .it_value.tv_sec = 0,
245 .it_value.tv_usec = 10, /* immediately */
246 };
247
248 rv = setitimer(ITIMER_REAL, &itime, NULL);
249 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "setitimer(ITIMER_REAL)");
250
251 rv = setitimer(ITIMER_VIRTUAL, &itime, NULL);
252 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "setitimer(ITIMER_REAL)");
253
254 rv = setitimer(ITIMER_PROF, &itime, NULL);
255 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "setitimer(ITIMER_REAL)");
256
257 struct rlimit rlim = {};
258
259 rv = getrlimit(RLIMIT_CPU, &rlim);
260 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "getrlimit(RLIMIT_CPU)");
261
262 rlim.rlim_cur = 1;
263 rv = setrlimit(RLIMIT_CPU, &rlim);
264 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "setrlimit(RLIMIT_CPU)");
265
266 rv = pthread_mutex_lock(&gShouldExitMutex);
267 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_mutex_lock(&gShouldExitMutex)");
268
269 kr = semaphore_signal_all(gWorkersStart);
270 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_signal_all()");
271
272 struct timespec timenow = {};
273 uint64_t time_start;
274
275 kr = mach_get_times(&time_start, NULL, &timenow);
276 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_get_times()");
277
278 struct timespec timeout = {
279 .tv_sec = timenow.tv_sec + 10,
280 .tv_nsec = timenow.tv_nsec,
281 };
282
283 uint64_t time_end = 0;
284
285 do {
286 assert(os_atomic_load(&gShouldExit, relaxed) == false);
287
288 rv = pthread_cond_timedwait(&gShouldExitCondition, &gShouldExitMutex, &timeout);
289 if (rv == ETIMEDOUT) {
290 os_atomic_store(&gShouldExit, true, relaxed);
291
292 time_end = mach_absolute_time();
293
294 struct itimerval itime_stop = {
295 .it_interval.tv_sec = 0,
296 .it_interval.tv_usec = 0,
297 .it_value.tv_sec = 0,
298 .it_value.tv_usec = 0, /* stop immediately */
299 };
300
301 rv = setitimer(ITIMER_REAL, &itime_stop, NULL);
302 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "setitimer(ITIMER_REAL)");
303
304 rv = setitimer(ITIMER_VIRTUAL, &itime_stop, NULL);
305 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "setitimer(ITIMER_VIRTUAL)");
306
307 rv = setitimer(ITIMER_PROF, &itime_stop, NULL);
308 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "setitimer(ITIMER_PROF)");
309
310 break;
311 } else {
312 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_cond_timedwait(&gShouldExitCondition, ...)");
313 }
314 } while (true);
315
316 rv = pthread_mutex_unlock(&gShouldExitMutex);
317 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_mutex_unlock(&gShouldExitMutex)");
318
319 for (uint32_t i = 1; i < nthreads; i++) {
320 rv = pthread_join(gThreadList[i].thread, NULL);
321 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_join");
322 }
323
324 uint64_t test_duration = time_end - time_start;
325 uint64_t test_duration_ns = abs_to_nanos(test_duration);
326
327 double elapsed_secs = (double) test_duration_ns / (uint64_t)NSEC_PER_SEC;
328
329 T_LOG("test duration %3.3f seconds\n", elapsed_secs);
330
331 fill_thread_stats(0);
332
333 struct rusage_info_v6 ru = {};
334 rv = proc_pid_rusage(getpid(), RUSAGE_INFO_V6, (rusage_info_t *)&ru);
335 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "proc_pid_rusage");
336
337 uint64_t total_user_time_ns = abs_to_nanos_host(ru.ri_user_time);
338 double total_user_time_s = (double)total_user_time_ns / (uint64_t)NSEC_PER_SEC;
339
340 uint64_t total_system_time_ns = abs_to_nanos_host(ru.ri_system_time);
341 double total_system_time_s = (double)total_system_time_ns / (uint64_t)NSEC_PER_SEC;
342
343 uint64_t total_time_ns = (total_user_time_ns + total_system_time_ns);
344 double total_time_s = (double)total_time_ns / (uint64_t)NSEC_PER_SEC;
345
346 uint64_t total_runnable_time_ns = abs_to_nanos_host(ru.ri_runnable_time);
347 double total_runnable_time_s = (double)total_runnable_time_ns / (uint64_t)NSEC_PER_SEC;
348
349 uint64_t total_pending_time_ns = total_runnable_time_ns - (total_time_ns);
350 double total_pending_time_s = (double)total_pending_time_ns / (uint64_t)NSEC_PER_SEC;
351
352 uint64_t total_p_time_ns = abs_to_nanos_host(ru.ri_user_ptime + ru.ri_system_ptime);
353 double total_p_time_s = (double)total_p_time_ns / (uint64_t)NSEC_PER_SEC;
354
355 T_LOG("total usage: time: %3.3f user: %3.3f kernel: %3.3f runnable: %3.3f pending: %3.3f pcore: %3.3f\n",
356 total_time_s, total_user_time_s, total_system_time_s,
357 total_runnable_time_s, total_pending_time_s,
358 total_p_time_s);
359
360 /*
361 * "Good" data looks like:
362 *
363 * total usage: time: 77.696 user: 16.570 kernel: 61.126 runnable: 79.951 pending: 2.255 pcore: 72.719
364 * Thread ALRM VTALRM PROF XCPU inst cycle user kernel offcore type
365 * 0x16f78f000 0 251 811 0 27680301973 28913501188 3706622958 ( 38.14%) 6012631083 ( 61.86%) 2.81% statfs
366 * 0x16f81b000 0 2 889 0 27962710058 28780576123 439297291 ( 4.53%) 9259942583 ( 95.47%) 3.01% stat
367 * 0x16f8a7000 0 251 836 0 27558331077 28889228535 3699010000 ( 38.08%) 6016015083 ( 61.92%) 2.85% statfs
368 * 0x16f933000 0 0 939 0 28078084696 28880195679 443067500 ( 4.56%) 9269807666 ( 95.44%) 2.87% stat
369 * 0x16f9bf000 0 283 874 0 27691851016 28969873070 3710916750 ( 38.16%) 6012783541 ( 61.84%) 2.76% statfs
370 * 0x16fa4b000 0 2 908 1 27945063330 28769971396 438583000 ( 4.53%) 9252694291 ( 95.47%) 3.09% stat
371 * 0x16fad7000 0 262 889 0 27328496429 28772748055 3689245375 ( 38.03%) 6011061458 ( 61.97%) 3.00% statfs
372 * 0x16fb63000 0 0 914 0 27942195343 28757254100 439690166 ( 4.53%) 9256659500 ( 95.47%) 3.04% stat
373 * 0x1fe2bb400 1001 0 3 0 72144372 102339334 3532125 ( 9.35%) 34249208 ( 90.65%) 99.62% main
374 */
375 uint32_t total_alrm = 0;
376 uint32_t total_vtalrm = 0;
377 uint32_t total_prof = 0;
378 uint32_t total_xcpu = 0;
379 uint32_t total_vtalrm_in_cpubound = 0;
380
381 uint32_t total_threads_not_finding_cpus = 0;
382
383 T_LOG("Thread ALRM VTALRM PROF XCPU "
384 "inst cycle user kernel "
385 "offcore type\n");
386
387 for (uint32_t i = 0; i < nthreads; i++) {
388 uint64_t user_time = abs_to_nanos_host(gThreadList[i].self_stats.ttci_user_time_mach);
389 uint64_t system_time = abs_to_nanos_host(gThreadList[i].self_stats.ttci_system_time_mach);
390
391
392 uint64_t total_time = user_time + system_time;
393
394 double percentage_user = (double)user_time / (double) total_time * 100;
395 double percentage_system = (double)system_time / (double) total_time * 100;
396 double percentage_not_running = (double)(test_duration_ns - total_time) / (double) test_duration_ns * 100;
397
398 char* thread_type_str = "";
399 char* warning_str = "";
400
401 if (i == 0) {
402 thread_type_str = "main ";
403 } else {
404 thread_type_str = i % 2 ? "stat " : "statfs ";
405
406 if (percentage_not_running > offcore_time_percent_threshold) {
407 total_threads_not_finding_cpus++;
408 warning_str = "** too much offcore time **";
409 }
410 }
411
412 T_LOG("0x%010llx %6d %6d %6d %6d %12lld %12lld %12lld (%7.2f%%) %12lld (%7.2f%%) %7.2f%% %s%s\n",
413 gThreadList[i].tid,
414 gThreadList[i].alrm_count,
415 gThreadList[i].vtalrm_count,
416 gThreadList[i].prof_count,
417 gThreadList[i].xcpu_count,
418 gThreadList[i].self_stats.ttci_instructions,
419 gThreadList[i].self_stats.ttci_cycles,
420 user_time, percentage_user,
421 system_time, percentage_system,
422 percentage_not_running,
423 thread_type_str, warning_str);
424
425 total_alrm += gThreadList[i].alrm_count;
426 total_vtalrm += gThreadList[i].vtalrm_count;
427 total_prof += gThreadList[i].prof_count;
428 total_xcpu += gThreadList[i].xcpu_count;
429
430 if (gThreadList[i].expect_cpu_usage) {
431 total_vtalrm_in_cpubound += gThreadList[i].vtalrm_count;
432 }
433 }
434
435 /*
436 * We expect all SIGALRM to go to the main thread, because it is the
437 * first thread in the process with the signal unmasked, and we
438 * never expect the signal handler itself to take >10ms
439 *
440 * This can happen if the main thread is preempted for the entire 10ms duration, though.
441 * Being high priority, it shouldn't be delayed for more than 10ms too often.
442 * Allow up to 10% to deliver to other threads.
443 */
444 if ((double)gThreadList[0].alrm_count * 100 / total_alrm < 90.0) {
445 T_FAIL("SIGALRM delivered to non-main thread more than 10%% of the time (%d of %d)",
446 gThreadList[0].alrm_count,
447 total_alrm);
448 }
449
450 /* We expect all worker threads to find CPUs of their own for most of the test */
451 if (total_threads_not_finding_cpus != 0) {
452 T_FAIL("%d worker threads spent more than %2.0f%% of time off-core",
453 total_threads_not_finding_cpus, offcore_time_percent_threshold);
454 }
455
456 /*
457 * SIGVTALRM is delivered based on user time, and we expect the busy
458 * threads to have an advantage and account for 80% (non-scientific) of events,
459 * since the other threads will spend more time in kernel mode.
460 */
461 if (total_vtalrm_in_cpubound * 100 / total_vtalrm < 80) {
462 T_FAIL("SIGVTALRM delivered to threads without extra userspace spin (only %d of %d)",
463 total_vtalrm_in_cpubound, total_vtalrm);
464 }
465
466 /*
467 * SIGPROF is delivered based on user+system time, and we expect it to be distributed
468 * among non-blocked threads (so not the main thread, which only handles SIGALRM).
469 */
470 if (gThreadList[0].prof_count * 100 / total_prof > 1) {
471 T_FAIL("SIGPROF delivered to main thread more than 1%% (%d of %d)",
472 gThreadList[0].prof_count,
473 total_prof);
474 }
475
476 /*
477 * SIGXCPU should be delivered exactly once.
478 */
479 if (total_xcpu == 0) {
480 T_FAIL("SIGXCPU delivered %d times (expected at least once)", total_xcpu);
481 }
482 }
483
484 static void *
stat_thread(void * arg)485 stat_thread(void *arg)
486 {
487 kern_return_t kr;
488 int rv;
489
490 /* This wait can be aborted by one of the signals, so we make sure to wait for the first iteration of main */
491 kr = semaphore_wait_signal(gWorkersStart, gMainWaitForWorkers);
492 if (kr != KERN_ABORTED) {
493 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_wait_signal()");
494 }
495
496 rv = pthread_mutex_lock(&gShouldExitMutex);
497 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_mutex_lock(&gShouldExitMutex)");
498 rv = pthread_mutex_unlock(&gShouldExitMutex);
499 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_mutex_unlock(&gShouldExitMutex)");
500
501 do {
502 struct stat sb;
503
504 rv = stat("/", &sb);
505 if (rv != 0) {
506 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "stat");
507 }
508 } while (os_atomic_load(&gShouldExit, relaxed) == false);
509
510 fill_thread_stats((uint32_t)(uintptr_t)arg);
511
512 return NULL;
513 }
514
515 static void *
statfs_thread(void * arg)516 statfs_thread(void *arg)
517 {
518 kern_return_t kr;
519 uint64_t previous_spin_timestamp;
520 int iteration = 0;
521 int rv;
522
523 /* This wait can be aborted by one of the signals, so we make sure to wait for the first iteration of main */
524 kr = semaphore_wait_signal(gWorkersStart, gMainWaitForWorkers);
525 if (kr != KERN_ABORTED) {
526 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_wait_signal()");
527 }
528
529 rv = pthread_mutex_lock(&gShouldExitMutex);
530 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_mutex_lock(&gShouldExitMutex)");
531 rv = pthread_mutex_unlock(&gShouldExitMutex);
532 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_mutex_unlock(&gShouldExitMutex)");
533
534 previous_spin_timestamp = mach_absolute_time();
535
536 do {
537 struct statfs sf;
538
539 /*
540 * Every so many system calls, inject a spin in userspace
541 * proportional to how much time was spent performing the
542 * system calls.
543 */
544 #define SYSCALL_ITERATIONS_BETWEEN_SPINS (10000)
545 if (++iteration % SYSCALL_ITERATIONS_BETWEEN_SPINS == 0) {
546 uint64_t now = mach_absolute_time();
547 uint64_t spin_deadline = now + (now - previous_spin_timestamp) / 2;
548
549 while (mach_absolute_time() < spin_deadline) {
550 ;
551 }
552
553 previous_spin_timestamp = mach_absolute_time();
554 }
555
556 rv = statfs("/", &sf);
557 if (rv != 0) {
558 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "statfs");
559 }
560 } while (os_atomic_load(&gShouldExit, relaxed) == false);
561
562 fill_thread_stats((uint32_t)(uintptr_t)arg);
563
564 return NULL;
565 }
566
567 static void
alrm_handler(int signum,struct __siginfo * info __unused,void * uap)568 alrm_handler(int signum, struct __siginfo *info __unused, void *uap)
569 {
570 ucontext_t *context = (ucontext_t *)uap;
571 struct threadentry *entry = NULL;
572 void *sp;
573
574 #if defined(__arm64__)
575 sp = (void *)__darwin_arm_thread_state64_get_sp((context->uc_mcontext)->__ss);
576 #elif defined(__i386__)
577 sp = (void *)(context->uc_mcontext)->__ss.__esp;
578 #elif defined(__x86_64__)
579 sp = (void *)(context->uc_mcontext)->__ss.__rsp;
580 #else
581 #error Unrecognized architecture
582 #endif
583
584 for (uint32_t i = 0; i < nworkers + 1; i++) {
585 struct threadentry *t = &gThreadList[i];
586 if (((uintptr_t)sp >= ((uintptr_t)t->stack_addr - t->stack_size) &&
587 ((uintptr_t)sp < (uintptr_t)t->stack_addr))) {
588 entry = t;
589 break;
590 }
591 }
592
593 if (entry == NULL) {
594 T_ASSERT_FAIL("Signal %d delivered to unknown thread, SP=%p", signum, sp);
595 }
596
597 switch (signum) {
598 case SIGALRM:
599 os_atomic_inc(&entry->alrm_count, relaxed);
600 break;
601 case SIGVTALRM:
602 os_atomic_inc(&entry->vtalrm_count, relaxed);
603 break;
604 case SIGPROF:
605 os_atomic_inc(&entry->prof_count, relaxed);
606 break;
607 case SIGXCPU:
608 os_atomic_inc(&entry->xcpu_count, relaxed);
609 break;
610 }
611 }
612