1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <unistd.h>
29 #include <stdio.h>
30 #include <math.h>
31 #include <sys/kdebug.h>
32 #include <stdlib.h>
33 #include <pthread.h>
34 #include <errno.h>
35 #include <err.h>
36 #include <string.h>
37 #include <assert.h>
38 #include <sysexits.h>
39 #include <sys/sysctl.h>
40 #include <getopt.h>
41 #include <libproc.h>
42
43 #include <spawn.h>
44 #include <spawn_private.h>
45 #include <sys/spawn_internal.h>
46 #include <mach-o/dyld.h>
47
48 #include <mach/mach_time.h>
49 #include <mach/mach.h>
50 #include <mach/task.h>
51 #include <mach/semaphore.h>
52
53 #include <pthread/qos_private.h>
54
55 #include <sys/resource.h>
56
57 #include <stdatomic.h>
58
59 #include <os/tsd.h>
60 #include <os/lock.h>
61 #include <TargetConditionals.h>
62
63
64 typedef enum wake_type { WAKE_BROADCAST_ONESEM, WAKE_BROADCAST_PERTHREAD, WAKE_CHAIN, WAKE_HOP } wake_type_t;
65 typedef enum my_policy_type { MY_POLICY_REALTIME, MY_POLICY_TIMESHARE, MY_POLICY_TIMESHARE_NO_SMT, MY_POLICY_FIXEDPRI } my_policy_type_t;
66
67 #define mach_assert_zero(error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] error %d (%s) ", (error), mach_error_string(error)); assert(error == 0); } } while (0)
68 #define mach_assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d (%s) ", (tid), (error), mach_error_string(error)); assert(error == 0); } } while (0)
69 #define assert_zero_t(tid, error) do { if ((error) != 0) { fprintf(stderr, "[FAIL] Thread %d error %d ", (tid), (error)); assert(error == 0); } } while (0)
70
71 #define CONSTRAINT_NANOS (20000000ll) /* 20 ms */
72 #define COMPUTATION_NANOS (10000000ll) /* 10 ms */
73 #define LL_CONSTRAINT_NANOS ( 2000000ll) /* 2 ms */
74 #define LL_COMPUTATION_NANOS ( 1000000ll) /* 1 ms */
75 #define RT_CHURN_COMP_NANOS ( 1000000ll) /* 1 ms */
76 #define TRACEWORTHY_NANOS (10000000ll) /* 10 ms */
77 #define TRACEWORTHY_NANOS_TEST ( 1000000ll) /* 1 ms */
78 #define TRACEWORTHY_NANOS_LL ( 500000ll) /*500 us */
79
80 #if DEBUG
81 #define debug_log(args ...) printf(args)
82 #else
83 #define debug_log(args ...) do { } while(0)
84 #endif
85
86 /* Declarations */
87 static void* worker_thread(void *arg);
88 static void usage();
89 static int thread_setup(uint32_t my_id);
90 static my_policy_type_t parse_thread_policy(const char *str);
91 static void selfexec_with_apptype(int argc, char *argv[]);
92 static void parse_args(int argc, char *argv[]);
93
94 static __attribute__((aligned(128))) _Atomic uint32_t g_done_threads;
95 static __attribute__((aligned(128))) _Atomic boolean_t g_churn_stop = FALSE;
96 static __attribute__((aligned(128))) _Atomic uint64_t g_churn_stopped_at = 0;
97
98 /* Global variables (general) */
99 static uint32_t g_maxcpus;
100 static uint32_t g_numcpus;
101 static uint32_t g_nphysicalcpu;
102 static uint32_t g_nlogicalcpu;
103 static uint32_t g_numthreads;
104 static wake_type_t g_waketype;
105 static policy_t g_policy;
106 static uint32_t g_iterations;
107 static struct mach_timebase_info g_mti;
108 static semaphore_t g_main_sem;
109 static uint64_t *g_thread_endtimes_abs;
110 static boolean_t g_verbose = FALSE;
111 static boolean_t g_do_affinity = FALSE;
112 static uint64_t g_starttime_abs;
113 static uint32_t g_iteration_sleeptime_us = 0;
114 static uint32_t g_priority = 0;
115 static uint32_t g_churn_pri = 0;
116 static uint32_t g_churn_count = 0;
117 static boolean_t g_churn_random = FALSE; /* churn threads randomly sleep and wake */
118 static uint32_t g_rt_churn_count = 0;
119 static uint32_t g_traceworthy_count = 0;
120
121 /*
122 * If the number of threads on the command line is 0, meaning ncpus,
123 * this signed number is added to the number of threads, making it
124 * possible to specify ncpus-3 threads, or ncpus+1 etc.
125 */
126 static int32_t g_extra_thread_count = 0;
127
128 static pthread_t* g_churn_threads = NULL;
129 static pthread_t* g_rt_churn_threads = NULL;
130
131 /* should we skip test if run on non-intel */
132 static boolean_t g_run_on_intel_only = FALSE;
133
134 /* Threshold for dropping a 'bad run' tracepoint */
135 static uint64_t g_traceworthy_latency_ns = TRACEWORTHY_NANOS;
136
137 /* Have we re-execed to set apptype? */
138 static boolean_t g_seen_apptype = FALSE;
139
140 /* usleep in betweeen iterations */
141 static boolean_t g_do_sleep = TRUE;
142
143 /* Every thread spins until all threads have checked in */
144 static boolean_t g_do_all_spin = FALSE;
145
146 /* Every thread backgrounds temporarily before parking */
147 static boolean_t g_drop_priority = FALSE;
148
149 /* Use low-latency (sub 4ms deadline) realtime threads */
150 static boolean_t g_rt_ll = FALSE;
151
152 /* Test whether realtime threads are scheduled on the separate CPUs */
153 static boolean_t g_test_rt = FALSE;
154
155 static boolean_t g_rt_churn = FALSE;
156
157 /* On SMT machines, test whether realtime threads are scheduled on the correct CPUs */
158 static boolean_t g_test_rt_smt = FALSE;
159
160 /* Test whether realtime threads are successfully avoiding CPU 0 on Intel */
161 static boolean_t g_test_rt_avoid0 = FALSE;
162
163 /* Fail the test if any iteration fails */
164 static boolean_t g_test_strict_fail = FALSE;
165
166 /* Print a histgram showing how many threads ran on each CPU */
167 static boolean_t g_histogram = FALSE;
168
169 /* One randomly chosen thread holds up the train for a certain duration. */
170 static boolean_t g_do_one_long_spin = FALSE;
171 static uint32_t g_one_long_spin_id = 0;
172 static uint64_t g_one_long_spin_length_abs = 0;
173 static uint64_t g_one_long_spin_length_ns = 0;
174
175 /* Each thread spins for a certain duration after waking up before blocking again. */
176 static boolean_t g_do_each_spin = FALSE;
177 static uint64_t g_each_spin_duration_abs = 0;
178 static uint64_t g_each_spin_duration_ns = 0;
179
180 /* Global variables (broadcast) */
181 static semaphore_t g_broadcastsem;
182 static semaphore_t g_leadersem;
183 static semaphore_t g_readysem;
184 static semaphore_t g_donesem;
185 static semaphore_t g_rt_churn_sem;
186 static semaphore_t g_rt_churn_start_sem;
187
188 /* Global variables (chain) */
189 static semaphore_t *g_semarr;
190
191
192
193 typedef struct {
194 __attribute__((aligned(128))) uint32_t current;
195 uint32_t accum;
196 } histogram_t;
197
198 static histogram_t *g_cpu_histogram;
199 static _Atomic uint64_t *g_cpu_map;
200
201 static uint64_t
abs_to_nanos(uint64_t abstime)202 abs_to_nanos(uint64_t abstime)
203 {
204 return (uint64_t)(abstime * (((double)g_mti.numer) / ((double)g_mti.denom)));
205 }
206
207 static uint64_t
nanos_to_abs(uint64_t ns)208 nanos_to_abs(uint64_t ns)
209 {
210 return (uint64_t)(ns * (((double)g_mti.denom) / ((double)g_mti.numer)));
211 }
212
213 inline static void
yield(void)214 yield(void)
215 {
216 #if defined(__arm64__)
217 asm volatile ("yield");
218 #elif defined(__x86_64__) || defined(__i386__)
219 asm volatile ("pause");
220 #else
221 #error Unrecognized architecture
222 #endif
223 }
224
225 #define BIT(b) (1ULL << (b))
226 #define mask(width) (width >= 64 ? -1ULL : (BIT(width) - 1))
227
228
229
230 static void *
churn_thread(__unused void * arg)231 churn_thread(__unused void *arg)
232 {
233 uint64_t spin_count = 0;
234
235 /*
236 * As a safety measure to avoid wedging, we will bail on the spin if
237 * it's been more than 1s after the most recent run start
238 */
239
240 uint64_t sleep_us = 1000;
241 uint64_t ctime = mach_absolute_time();
242 uint64_t sleep_at_time = ctime + nanos_to_abs(arc4random_uniform(sleep_us * NSEC_PER_USEC) + 1);
243 while ((g_churn_stop == FALSE) && (ctime < (g_starttime_abs + NSEC_PER_SEC))) {
244 spin_count++;
245 yield();
246 ctime = mach_absolute_time();
247 if (g_churn_random && (ctime > sleep_at_time)) {
248 usleep(arc4random_uniform(sleep_us) + 1);
249 ctime = mach_absolute_time();
250 sleep_at_time = ctime + nanos_to_abs(arc4random_uniform(sleep_us * NSEC_PER_USEC) + 1);
251 }
252 }
253
254 /* This is totally racy, but only here to detect if anyone stops early */
255 atomic_fetch_add_explicit(&g_churn_stopped_at, spin_count, memory_order_relaxed);
256
257 return NULL;
258 }
259
260 static void
create_churn_threads()261 create_churn_threads()
262 {
263 if (g_churn_count == 0) {
264 g_churn_count = g_test_rt_smt ? g_numcpus : g_numcpus - 1;
265 }
266
267 errno_t err;
268
269 struct sched_param param = { .sched_priority = (int)g_churn_pri };
270 pthread_attr_t attr;
271
272 /* Array for churn threads */
273 g_churn_threads = (pthread_t*) valloc(sizeof(pthread_t) * g_churn_count);
274 assert(g_churn_threads);
275
276 if ((err = pthread_attr_init(&attr))) {
277 errc(EX_OSERR, err, "pthread_attr_init");
278 }
279
280 if ((err = pthread_attr_setschedparam(&attr, ¶m))) {
281 errc(EX_OSERR, err, "pthread_attr_setschedparam");
282 }
283
284 if ((err = pthread_attr_setschedpolicy(&attr, SCHED_RR))) {
285 errc(EX_OSERR, err, "pthread_attr_setschedpolicy");
286 }
287
288 for (uint32_t i = 0; i < g_churn_count; i++) {
289 pthread_t new_thread;
290
291 err = pthread_create(&new_thread, &attr, churn_thread, NULL);
292
293 if (err) {
294 errc(EX_OSERR, err, "pthread_create");
295 }
296 g_churn_threads[i] = new_thread;
297 }
298
299 if ((err = pthread_attr_destroy(&attr))) {
300 errc(EX_OSERR, err, "pthread_attr_destroy");
301 }
302 }
303
304 static void
join_churn_threads(void)305 join_churn_threads(void)
306 {
307 if (atomic_load_explicit(&g_churn_stopped_at, memory_order_seq_cst) != 0) {
308 printf("Warning: Some of the churn threads may have stopped early: %lld\n",
309 g_churn_stopped_at);
310 }
311
312 atomic_store_explicit(&g_churn_stop, TRUE, memory_order_seq_cst);
313
314 /* Rejoin churn threads */
315 for (uint32_t i = 0; i < g_churn_count; i++) {
316 errno_t err = pthread_join(g_churn_threads[i], NULL);
317 if (err) {
318 errc(EX_OSERR, err, "pthread_join %d", i);
319 }
320 }
321 }
322
323 /*
324 * Set policy
325 */
326 static int
rt_churn_thread_setup(void)327 rt_churn_thread_setup(void)
328 {
329 kern_return_t kr;
330 thread_time_constraint_policy_data_t pol;
331
332 /* Hard-coded realtime parameters (similar to what Digi uses) */
333 pol.period = 100000;
334 pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS * 2);
335 pol.computation = (uint32_t) nanos_to_abs(RT_CHURN_COMP_NANOS * 2);
336 pol.preemptible = 0; /* Ignored by OS */
337
338 kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
339 (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
340 mach_assert_zero_t(0, kr);
341
342 return 0;
343 }
344
345 static void *
rt_churn_thread(__unused void * arg)346 rt_churn_thread(__unused void *arg)
347 {
348 rt_churn_thread_setup();
349
350 for (uint32_t i = 0; i < g_iterations; i++) {
351 kern_return_t kr = semaphore_wait_signal(g_rt_churn_start_sem, g_rt_churn_sem);
352 mach_assert_zero_t(0, kr);
353
354 volatile double x = 0.0;
355 volatile double y = 0.0;
356
357 uint64_t endspin = mach_absolute_time() + nanos_to_abs(RT_CHURN_COMP_NANOS);
358 while (mach_absolute_time() < endspin) {
359 y = y + 1.5 + x;
360 x = sqrt(y);
361 }
362 }
363
364 kern_return_t kr = semaphore_signal(g_rt_churn_sem);
365 mach_assert_zero_t(0, kr);
366
367 return NULL;
368 }
369
370 static void
wait_for_rt_churn_threads(void)371 wait_for_rt_churn_threads(void)
372 {
373 for (uint32_t i = 0; i < g_rt_churn_count; i++) {
374 kern_return_t kr = semaphore_wait(g_rt_churn_sem);
375 mach_assert_zero_t(0, kr);
376 }
377 }
378
379 static void
start_rt_churn_threads(void)380 start_rt_churn_threads(void)
381 {
382 for (uint32_t i = 0; i < g_rt_churn_count; i++) {
383 kern_return_t kr = semaphore_signal(g_rt_churn_start_sem);
384 mach_assert_zero_t(0, kr);
385 }
386 }
387
388 static void
create_rt_churn_threads(void)389 create_rt_churn_threads(void)
390 {
391 if (g_rt_churn_count == 0) {
392 /* Leave 1 CPU to ensure that the main thread can make progress */
393 g_rt_churn_count = g_numcpus - 1;
394 }
395
396 errno_t err;
397
398 struct sched_param param = { .sched_priority = (int)g_churn_pri };
399 pthread_attr_t attr;
400
401 /* Array for churn threads */
402 g_rt_churn_threads = (pthread_t*) valloc(sizeof(pthread_t) * g_rt_churn_count);
403 assert(g_rt_churn_threads);
404
405 if ((err = pthread_attr_init(&attr))) {
406 errc(EX_OSERR, err, "pthread_attr_init");
407 }
408
409 if ((err = pthread_attr_setschedparam(&attr, ¶m))) {
410 errc(EX_OSERR, err, "pthread_attr_setschedparam");
411 }
412
413 if ((err = pthread_attr_setschedpolicy(&attr, SCHED_RR))) {
414 errc(EX_OSERR, err, "pthread_attr_setschedpolicy");
415 }
416
417 for (uint32_t i = 0; i < g_rt_churn_count; i++) {
418 pthread_t new_thread;
419
420 err = pthread_create(&new_thread, &attr, rt_churn_thread, NULL);
421
422 if (err) {
423 errc(EX_OSERR, err, "pthread_create");
424 }
425 g_rt_churn_threads[i] = new_thread;
426 }
427
428 if ((err = pthread_attr_destroy(&attr))) {
429 errc(EX_OSERR, err, "pthread_attr_destroy");
430 }
431
432 /* Wait until all threads have checked in */
433 wait_for_rt_churn_threads();
434 }
435
436 static void
join_rt_churn_threads(void)437 join_rt_churn_threads(void)
438 {
439 /* Rejoin rt churn threads */
440 for (uint32_t i = 0; i < g_rt_churn_count; i++) {
441 errno_t err = pthread_join(g_rt_churn_threads[i], NULL);
442 if (err) {
443 errc(EX_OSERR, err, "pthread_join %d", i);
444 }
445 }
446 }
447
448 /*
449 * Figure out what thread policy to use
450 */
451 static my_policy_type_t
parse_thread_policy(const char * str)452 parse_thread_policy(const char *str)
453 {
454 if (strcmp(str, "timeshare") == 0) {
455 return MY_POLICY_TIMESHARE;
456 } else if (strcmp(str, "timeshare_no_smt") == 0) {
457 return MY_POLICY_TIMESHARE_NO_SMT;
458 } else if (strcmp(str, "realtime") == 0) {
459 return MY_POLICY_REALTIME;
460 } else if (strcmp(str, "fixed") == 0) {
461 return MY_POLICY_FIXEDPRI;
462 } else {
463 errx(EX_USAGE, "Invalid thread policy \"%s\"", str);
464 }
465 }
466
467 /*
468 * Figure out what wakeup pattern to use
469 */
470 static wake_type_t
parse_wakeup_pattern(const char * str)471 parse_wakeup_pattern(const char *str)
472 {
473 if (strcmp(str, "chain") == 0) {
474 return WAKE_CHAIN;
475 } else if (strcmp(str, "hop") == 0) {
476 return WAKE_HOP;
477 } else if (strcmp(str, "broadcast-single-sem") == 0) {
478 return WAKE_BROADCAST_ONESEM;
479 } else if (strcmp(str, "broadcast-per-thread") == 0) {
480 return WAKE_BROADCAST_PERTHREAD;
481 } else {
482 errx(EX_USAGE, "Invalid wakeup pattern \"%s\"", str);
483 }
484 }
485
486 /*
487 * Set policy
488 */
489 static int
thread_setup(uint32_t my_id)490 thread_setup(uint32_t my_id)
491 {
492 kern_return_t kr;
493 errno_t ret;
494 thread_time_constraint_policy_data_t pol;
495
496 if (g_priority) {
497 int policy = SCHED_OTHER;
498 if (g_policy == MY_POLICY_FIXEDPRI) {
499 policy = SCHED_RR;
500 }
501
502 struct sched_param param = {.sched_priority = (int)g_priority};
503 if ((ret = pthread_setschedparam(pthread_self(), policy, ¶m))) {
504 errc(EX_OSERR, ret, "pthread_setschedparam: %d", my_id);
505 }
506 }
507
508 switch (g_policy) {
509 case MY_POLICY_TIMESHARE:
510 break;
511 case MY_POLICY_TIMESHARE_NO_SMT:
512 proc_setthread_no_smt();
513 break;
514 case MY_POLICY_REALTIME:
515 /* Hard-coded realtime parameters (similar to what Digi uses) */
516 pol.period = 100000;
517 if (g_rt_ll) {
518 pol.constraint = (uint32_t) nanos_to_abs(LL_CONSTRAINT_NANOS);
519 pol.computation = (uint32_t) nanos_to_abs(LL_COMPUTATION_NANOS);
520 } else {
521 pol.constraint = (uint32_t) nanos_to_abs(CONSTRAINT_NANOS);
522 pol.computation = (uint32_t) nanos_to_abs(COMPUTATION_NANOS);
523 }
524 pol.preemptible = 0; /* Ignored by OS */
525
526 kr = thread_policy_set(mach_thread_self(), THREAD_TIME_CONSTRAINT_POLICY,
527 (thread_policy_t) &pol, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
528 mach_assert_zero_t(my_id, kr);
529 break;
530 case MY_POLICY_FIXEDPRI:
531 ret = pthread_set_fixedpriority_self();
532 if (ret) {
533 errc(EX_OSERR, ret, "pthread_set_fixedpriority_self");
534 }
535 break;
536 default:
537 errx(EX_USAGE, "invalid policy type %d", g_policy);
538 }
539
540 if (g_do_affinity) {
541 thread_affinity_policy_data_t affinity;
542
543 affinity.affinity_tag = my_id % 2;
544
545 kr = thread_policy_set(mach_thread_self(), THREAD_AFFINITY_POLICY,
546 (thread_policy_t)&affinity, THREAD_AFFINITY_POLICY_COUNT);
547 mach_assert_zero_t(my_id, kr);
548 }
549
550 return 0;
551 }
552
553 time_value_t
get_thread_runtime(void)554 get_thread_runtime(void)
555 {
556 thread_basic_info_data_t info;
557 mach_msg_type_number_t info_count = THREAD_BASIC_INFO_COUNT;
558 thread_info(pthread_mach_thread_np(pthread_self()), THREAD_BASIC_INFO, (thread_info_t)&info, &info_count);
559
560 time_value_add(&info.user_time, &info.system_time);
561
562 return info.user_time;
563 }
564
565 time_value_t worker_threads_total_runtime = {};
566
567 /*
568 * Wait for a wakeup, potentially wake up another of the "0-N" threads,
569 * and notify the main thread when done.
570 */
571 static void*
worker_thread(void * arg)572 worker_thread(void *arg)
573 {
574 static os_unfair_lock runtime_lock = OS_UNFAIR_LOCK_INIT;
575
576 uint32_t my_id = (uint32_t)(uintptr_t)arg;
577 kern_return_t kr;
578
579 volatile double x = 0.0;
580 volatile double y = 0.0;
581
582 /* Set policy and so forth */
583 thread_setup(my_id);
584
585 for (uint32_t i = 0; i < g_iterations; i++) {
586 if (my_id == 0) {
587 /*
588 * Leader thread either wakes everyone up or starts the chain going.
589 */
590
591 /* Give the worker threads undisturbed time to finish before waiting on them */
592 if (g_do_sleep) {
593 usleep(g_iteration_sleeptime_us);
594 }
595
596 debug_log("%d Leader thread wait for ready\n", i);
597
598 /*
599 * Wait for everyone else to declare ready
600 * Is there a better way to do this that won't interfere with the rest of the chain?
601 * TODO: Invent 'semaphore wait for N signals'
602 */
603
604 for (uint32_t j = 0; j < g_numthreads - 1; j++) {
605 kr = semaphore_wait(g_readysem);
606 mach_assert_zero_t(my_id, kr);
607 }
608
609 debug_log("%d Leader thread wait\n", i);
610
611 if (i > 0) {
612 for (int cpuid = 0; cpuid < g_maxcpus; cpuid++) {
613 if (g_cpu_histogram[cpuid].current == 1) {
614 atomic_fetch_or_explicit(&g_cpu_map[i - 1], (1UL << cpuid), memory_order_relaxed);
615 g_cpu_histogram[cpuid].current = 0;
616 }
617 }
618 }
619
620 /* Signal main thread and wait for start of iteration */
621
622 kr = semaphore_wait_signal(g_leadersem, g_main_sem);
623 mach_assert_zero_t(my_id, kr);
624
625 g_thread_endtimes_abs[my_id] = mach_absolute_time();
626
627 debug_log("%d Leader thread go\n", i);
628
629 assert_zero_t(my_id, atomic_load_explicit(&g_done_threads, memory_order_relaxed));
630
631 switch (g_waketype) {
632 case WAKE_BROADCAST_ONESEM:
633 kr = semaphore_signal_all(g_broadcastsem);
634 mach_assert_zero_t(my_id, kr);
635 break;
636 case WAKE_BROADCAST_PERTHREAD:
637 for (uint32_t j = 1; j < g_numthreads; j++) {
638 kr = semaphore_signal(g_semarr[j]);
639 mach_assert_zero_t(my_id, kr);
640 }
641 break;
642 case WAKE_CHAIN:
643 kr = semaphore_signal(g_semarr[my_id + 1]);
644 mach_assert_zero_t(my_id, kr);
645 break;
646 case WAKE_HOP:
647 kr = semaphore_wait_signal(g_donesem, g_semarr[my_id + 1]);
648 mach_assert_zero_t(my_id, kr);
649 break;
650 }
651 } else {
652 /*
653 * Everyone else waits to be woken up,
654 * records when she wakes up, and possibly
655 * wakes up a friend.
656 */
657 switch (g_waketype) {
658 case WAKE_BROADCAST_ONESEM:
659 kr = semaphore_wait_signal(g_broadcastsem, g_readysem);
660 mach_assert_zero_t(my_id, kr);
661
662 g_thread_endtimes_abs[my_id] = mach_absolute_time();
663 break;
664
665 case WAKE_BROADCAST_PERTHREAD:
666 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
667 mach_assert_zero_t(my_id, kr);
668
669 g_thread_endtimes_abs[my_id] = mach_absolute_time();
670 break;
671
672 case WAKE_CHAIN:
673 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
674 mach_assert_zero_t(my_id, kr);
675
676 /* Signal the next thread *after* recording wake time */
677
678 g_thread_endtimes_abs[my_id] = mach_absolute_time();
679
680 if (my_id < (g_numthreads - 1)) {
681 kr = semaphore_signal(g_semarr[my_id + 1]);
682 mach_assert_zero_t(my_id, kr);
683 }
684
685 break;
686
687 case WAKE_HOP:
688 kr = semaphore_wait_signal(g_semarr[my_id], g_readysem);
689 mach_assert_zero_t(my_id, kr);
690
691 /* Signal the next thread *after* recording wake time */
692
693 g_thread_endtimes_abs[my_id] = mach_absolute_time();
694
695 if (my_id < (g_numthreads - 1)) {
696 kr = semaphore_wait_signal(g_donesem, g_semarr[my_id + 1]);
697 mach_assert_zero_t(my_id, kr);
698 } else {
699 kr = semaphore_signal_all(g_donesem);
700 mach_assert_zero_t(my_id, kr);
701 }
702
703 break;
704 }
705 }
706
707 unsigned int cpuid = _os_cpu_number();
708 assert(cpuid < g_maxcpus);
709 debug_log("Thread %p woke up on CPU %d for iteration %d.\n", pthread_self(), cpuid, i);
710 g_cpu_histogram[cpuid].current = 1;
711 g_cpu_histogram[cpuid].accum++;
712
713 if (g_do_one_long_spin && g_one_long_spin_id == my_id) {
714 /* One randomly chosen thread holds up the train for a while. */
715
716 uint64_t endspin = g_starttime_abs + g_one_long_spin_length_abs;
717 while (mach_absolute_time() < endspin) {
718 y = y + 1.5 + x;
719 x = sqrt(y);
720 }
721 }
722
723 if (g_do_each_spin) {
724 /* Each thread spins for a certain duration after waking up before blocking again. */
725
726 uint64_t endspin = mach_absolute_time() + g_each_spin_duration_abs;
727 while (mach_absolute_time() < endspin) {
728 y = y + 1.5 + x;
729 x = sqrt(y);
730 }
731 }
732
733 uint32_t done_threads;
734 done_threads = atomic_fetch_add_explicit(&g_done_threads, 1, memory_order_relaxed) + 1;
735
736 debug_log("Thread %p new value is %d, iteration %d\n", pthread_self(), done_threads, i);
737
738 if (g_drop_priority) {
739 /* Drop priority to BG momentarily */
740 errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, PRIO_DARWIN_BG);
741 if (ret) {
742 errc(EX_OSERR, ret, "setpriority PRIO_DARWIN_BG");
743 }
744 }
745
746 if (g_do_all_spin) {
747 /* Everyone spins until the last thread checks in. */
748
749 while (atomic_load_explicit(&g_done_threads, memory_order_relaxed) < g_numthreads) {
750 y = y + 1.5 + x;
751 x = sqrt(y);
752 }
753 }
754
755 if (g_drop_priority) {
756 /* Restore normal priority */
757 errno_t ret = setpriority(PRIO_DARWIN_THREAD, 0, 0);
758 if (ret) {
759 errc(EX_OSERR, ret, "setpriority 0");
760 }
761 }
762
763 debug_log("Thread %p done spinning, iteration %d\n", pthread_self(), i);
764 }
765
766 if (my_id == 0) {
767 /* Give the worker threads undisturbed time to finish before waiting on them */
768 if (g_do_sleep) {
769 usleep(g_iteration_sleeptime_us);
770 }
771
772 /* Wait for the worker threads to finish */
773 for (uint32_t i = 0; i < g_numthreads - 1; i++) {
774 kr = semaphore_wait(g_readysem);
775 mach_assert_zero_t(my_id, kr);
776 }
777
778 /* Tell everyone and the main thread that the last iteration is done */
779 debug_log("%d Leader thread done\n", g_iterations - 1);
780
781 for (int cpuid = 0; cpuid < g_maxcpus; cpuid++) {
782 if (g_cpu_histogram[cpuid].current == 1) {
783 atomic_fetch_or_explicit(&g_cpu_map[g_iterations - 1], (1UL << cpuid), memory_order_relaxed);
784 g_cpu_histogram[cpuid].current = 0;
785 }
786 }
787
788 kr = semaphore_signal_all(g_main_sem);
789 mach_assert_zero_t(my_id, kr);
790 } else {
791 /* Hold up thread teardown so it doesn't affect the last iteration */
792 kr = semaphore_wait_signal(g_main_sem, g_readysem);
793 mach_assert_zero_t(my_id, kr);
794 }
795
796 time_value_t runtime = get_thread_runtime();
797 os_unfair_lock_lock(&runtime_lock);
798 time_value_add(&worker_threads_total_runtime, &runtime);
799 os_unfair_lock_unlock(&runtime_lock);
800
801 return 0;
802 }
803
804 /*
805 * Given an array of uint64_t values, compute average, max, min, and standard deviation
806 */
807 static void
compute_stats(uint64_t * values,uint64_t count,float * averagep,uint64_t * maxp,uint64_t * minp,float * stddevp)808 compute_stats(uint64_t *values, uint64_t count, float *averagep, uint64_t *maxp, uint64_t *minp, float *stddevp)
809 {
810 uint32_t i;
811 uint64_t _sum = 0;
812 uint64_t _max = 0;
813 uint64_t _min = UINT64_MAX;
814 float _avg = 0;
815 float _dev = 0;
816
817 for (i = 0; i < count; i++) {
818 _sum += values[i];
819 _max = values[i] > _max ? values[i] : _max;
820 _min = values[i] < _min ? values[i] : _min;
821 }
822
823 _avg = ((float)_sum) / ((float)count);
824
825 _dev = 0;
826 for (i = 0; i < count; i++) {
827 _dev += powf((((float)values[i]) - _avg), 2);
828 }
829
830 _dev /= count;
831 _dev = sqrtf(_dev);
832
833 *averagep = _avg;
834 *maxp = _max;
835 *minp = _min;
836 *stddevp = _dev;
837 }
838
839 typedef struct {
840 natural_t sys;
841 natural_t user;
842 natural_t idle;
843 } cpu_time_t;
844
845 void
record_cpu_time(cpu_time_t * cpu_time)846 record_cpu_time(cpu_time_t *cpu_time)
847 {
848 host_cpu_load_info_data_t load;
849 mach_msg_type_number_t count = HOST_CPU_LOAD_INFO_COUNT;
850 kern_return_t kr = host_statistics(mach_host_self(), HOST_CPU_LOAD_INFO, (int *)&load, &count);
851 mach_assert_zero_t(0, kr);
852
853 natural_t total_system_time = load.cpu_ticks[CPU_STATE_SYSTEM];
854 natural_t total_user_time = load.cpu_ticks[CPU_STATE_USER] + load.cpu_ticks[CPU_STATE_NICE];
855 natural_t total_idle_time = load.cpu_ticks[CPU_STATE_IDLE];
856
857 cpu_time->sys = total_system_time;
858 cpu_time->user = total_user_time;
859 cpu_time->idle = total_idle_time;
860 }
861
862 static int
set_recommended_cluster(char cluster_char)863 set_recommended_cluster(char cluster_char)
864 {
865 char buff[4];
866 buff[1] = '\0';
867
868 buff[0] = cluster_char;
869
870 int ret = sysctlbyname("kern.sched_task_set_cluster_type", NULL, NULL, buff, 1);
871 if (ret != 0) {
872 perror("kern.sched_task_set_cluster_type");
873 }
874
875 return ret;
876 }
877
878 int
main(int argc,char ** argv)879 main(int argc, char **argv)
880 {
881 errno_t ret;
882 kern_return_t kr;
883
884 pthread_t *threads;
885 uint64_t *worst_latencies_ns;
886 uint64_t *worst_latencies_from_first_ns;
887 uint64_t *worst_latencies_from_previous_ns;
888 uint64_t max, min;
889 float avg, stddev;
890
891 bool test_fail = false;
892 bool test_warn = false;
893
894 for (int i = 0; i < argc; i++) {
895 if (strcmp(argv[i], "--switched_apptype") == 0) {
896 g_seen_apptype = TRUE;
897 }
898 }
899
900 if (!g_seen_apptype) {
901 selfexec_with_apptype(argc, argv);
902 }
903
904 parse_args(argc, argv);
905
906 srand((unsigned int)time(NULL));
907
908 mach_timebase_info(&g_mti);
909
910 #if TARGET_OS_OSX
911 /* SKIP test if running on arm platform */
912 if (g_run_on_intel_only) {
913 int is_arm = 0;
914 size_t is_arm_size = sizeof(is_arm);
915 ret = sysctlbyname("hw.optional.arm64", &is_arm, &is_arm_size, NULL, 0);
916 if (ret == 0 && is_arm) {
917 printf("Unsupported platform. Skipping test.\n");
918 printf("TEST SKIPPED\n");
919 exit(0);
920 }
921 }
922 #endif /* TARGET_OS_OSX */
923
924
925 size_t maxcpu_size = sizeof(g_maxcpus);
926 ret = sysctlbyname("hw.ncpu", &g_maxcpus, &maxcpu_size, NULL, 0);
927 if (ret) {
928 err(EX_OSERR, "Failed sysctlbyname(hw.ncpu)");
929 }
930 assert(g_maxcpus <= 64); /* g_cpu_map needs to be extended for > 64 cpus */
931
932 size_t numcpu_size = sizeof(g_numcpus);
933 ret = sysctlbyname("hw.perflevel0.logicalcpu", &g_numcpus, &numcpu_size, NULL, 0);
934 if (ret) {
935 /* hw.perflevel0.logicalcpu failed so falling back to hw.ncpu */
936 g_numcpus = g_maxcpus;
937 } else {
938 /* Test for multiple perf levels */
939 uint32_t result = 0;
940 size_t result_size = sizeof(result);
941 ret = sysctlbyname("hw.perflevel1.logicalcpu", &result, &result_size, NULL, 0);
942 if ((ret == 0) && (result > 0)) {
943 /* Multiple perf levels detected, so bind this task to the highest perf node */
944 ret = set_recommended_cluster('p');
945 if (ret && g_test_rt) {
946 printf("set_recommended_cluster('p') failed. Skipping test\n");
947 printf("TEST SKIPPED\n");
948 exit(0);
949 }
950 }
951 }
952
953 size_t physicalcpu_size = sizeof(g_nphysicalcpu);
954 ret = sysctlbyname("hw.perflevel0.physicalcpu", &g_nphysicalcpu, &physicalcpu_size, NULL, 0);
955 if (ret) {
956 /* hw.perflevel0.physicalcpu failed so falling back to hw.physicalcpu */
957 ret = sysctlbyname("hw.physicalcpu", &g_nphysicalcpu, &physicalcpu_size, NULL, 0);
958 if (ret) {
959 err(EX_OSERR, "Failed sysctlbyname(hw.physicalcpu)");
960 }
961 }
962
963 size_t logicalcpu_size = sizeof(g_nlogicalcpu);
964 ret = sysctlbyname("hw.perflevel0.logicalcpu", &g_nlogicalcpu, &logicalcpu_size, NULL, 0);
965 if (ret) {
966 /* hw.perflevel0.logicalcpu failed so falling back to hw.logicalcpu */
967 ret = sysctlbyname("hw.logicalcpu", &g_nlogicalcpu, &logicalcpu_size, NULL, 0);
968 if (ret) {
969 err(EX_OSERR, "Failed sysctlbyname(hw.logicalcpu)");
970 }
971 }
972
973 if (g_test_rt) {
974 if (g_numthreads == 0) {
975 g_numthreads = g_numcpus + g_extra_thread_count;
976 if ((int32_t)g_numthreads < 1) {
977 g_numthreads = 1;
978 }
979 if ((g_numthreads == 1) && ((g_waketype == WAKE_CHAIN) || (g_waketype == WAKE_HOP))) {
980 g_numthreads = 2;
981 }
982 }
983 g_policy = MY_POLICY_REALTIME;
984 g_histogram = true;
985 /* Don't change g_traceworthy_latency_ns if it's explicity been set to something other than the default */
986 if (g_traceworthy_latency_ns == TRACEWORTHY_NANOS) {
987 g_traceworthy_latency_ns = g_rt_ll ? TRACEWORTHY_NANOS_LL : TRACEWORTHY_NANOS_TEST;
988 }
989 } else if (g_test_rt_smt) {
990 if (g_nlogicalcpu != 2 * g_nphysicalcpu) {
991 /* Not SMT */
992 printf("Attempt to run --test-rt-smt on a non-SMT device\n");
993 printf("TEST SKIPPED\n");
994 exit(0);
995 }
996
997 if (g_numthreads == 0) {
998 g_numthreads = g_nphysicalcpu + g_extra_thread_count;
999 }
1000 if ((int32_t)g_numthreads < 1) {
1001 g_numthreads = 1;
1002 }
1003 if ((g_numthreads == 1) && ((g_waketype == WAKE_CHAIN) || (g_waketype == WAKE_HOP))) {
1004 g_numthreads = 2;
1005 }
1006 g_policy = MY_POLICY_REALTIME;
1007 g_histogram = true;
1008 } else if (g_test_rt_avoid0) {
1009 #if defined(__x86_64__) || defined(__i386__)
1010 if (g_nphysicalcpu == 1) {
1011 printf("Attempt to run --test-rt-avoid0 on a uniprocessor\n");
1012 printf("TEST SKIPPED\n");
1013 exit(0);
1014 }
1015 if (g_numthreads == 0) {
1016 g_numthreads = g_nphysicalcpu - 1 + g_extra_thread_count;
1017 }
1018 if ((int32_t)g_numthreads < 1) {
1019 g_numthreads = 1;
1020 }
1021 if ((g_numthreads == 1) && ((g_waketype == WAKE_CHAIN) || (g_waketype == WAKE_HOP))) {
1022 g_numthreads = 2;
1023 }
1024 g_policy = MY_POLICY_REALTIME;
1025 g_histogram = true;
1026 #else
1027 printf("Attempt to run --test-rt-avoid0 on a non-Intel device\n");
1028 printf("TEST SKIPPED\n");
1029 exit(0);
1030 #endif
1031 } else if (g_numthreads == 0) {
1032 g_numthreads = g_numcpus + g_extra_thread_count;
1033 if ((int32_t)g_numthreads < 1) {
1034 g_numthreads = 1;
1035 }
1036 if ((g_numthreads == 1) && ((g_waketype == WAKE_CHAIN) || (g_waketype == WAKE_HOP))) {
1037 g_numthreads = 2;
1038 }
1039 }
1040
1041 if (g_do_each_spin) {
1042 g_each_spin_duration_abs = nanos_to_abs(g_each_spin_duration_ns);
1043 }
1044
1045 /* Configure the long-spin thread to take up half of its computation */
1046 if (g_do_one_long_spin) {
1047 g_one_long_spin_length_ns = COMPUTATION_NANOS / 2;
1048 g_one_long_spin_length_abs = nanos_to_abs(g_one_long_spin_length_ns);
1049 }
1050
1051 /* Estimate the amount of time the cleanup phase needs to back off */
1052 g_iteration_sleeptime_us = g_numthreads * 20;
1053
1054 uint32_t threads_per_core = (g_numthreads / g_numcpus) + 1;
1055 if (g_do_each_spin) {
1056 g_iteration_sleeptime_us += threads_per_core * (g_each_spin_duration_ns / NSEC_PER_USEC);
1057 }
1058 if (g_do_one_long_spin) {
1059 g_iteration_sleeptime_us += g_one_long_spin_length_ns / NSEC_PER_USEC;
1060 }
1061
1062 /* Arrays for threads and their wakeup times */
1063 threads = (pthread_t*) valloc(sizeof(pthread_t) * g_numthreads);
1064 assert(threads);
1065
1066 size_t endtimes_size = sizeof(uint64_t) * g_numthreads;
1067
1068 g_thread_endtimes_abs = (uint64_t*) valloc(endtimes_size);
1069 assert(g_thread_endtimes_abs);
1070
1071 /* Ensure the allocation is pre-faulted */
1072 ret = memset_s(g_thread_endtimes_abs, endtimes_size, 0, endtimes_size);
1073 if (ret) {
1074 errc(EX_OSERR, ret, "memset_s endtimes");
1075 }
1076
1077 size_t latencies_size = sizeof(uint64_t) * g_iterations;
1078
1079 worst_latencies_ns = (uint64_t*) valloc(latencies_size);
1080 assert(worst_latencies_ns);
1081
1082 /* Ensure the allocation is pre-faulted */
1083 ret = memset_s(worst_latencies_ns, latencies_size, 0, latencies_size);
1084 if (ret) {
1085 errc(EX_OSERR, ret, "memset_s latencies");
1086 }
1087
1088 worst_latencies_from_first_ns = (uint64_t*) valloc(latencies_size);
1089 assert(worst_latencies_from_first_ns);
1090
1091 /* Ensure the allocation is pre-faulted */
1092 ret = memset_s(worst_latencies_from_first_ns, latencies_size, 0, latencies_size);
1093 if (ret) {
1094 errc(EX_OSERR, ret, "memset_s latencies_from_first");
1095 }
1096
1097 worst_latencies_from_previous_ns = (uint64_t*) valloc(latencies_size);
1098 assert(worst_latencies_from_previous_ns);
1099
1100 /* Ensure the allocation is pre-faulted */
1101 ret = memset_s(worst_latencies_from_previous_ns, latencies_size, 0, latencies_size);
1102 if (ret) {
1103 errc(EX_OSERR, ret, "memset_s latencies_from_previous");
1104 }
1105
1106 size_t histogram_size = sizeof(histogram_t) * g_maxcpus;
1107 g_cpu_histogram = (histogram_t *)valloc(histogram_size);
1108 assert(g_cpu_histogram);
1109 /* Ensure the allocation is pre-faulted */
1110 ret = memset_s(g_cpu_histogram, histogram_size, 0, histogram_size);
1111 if (ret) {
1112 errc(EX_OSERR, ret, "memset_s g_cpu_histogram");
1113 }
1114
1115 size_t map_size = sizeof(uint64_t) * g_iterations;
1116 g_cpu_map = (_Atomic uint64_t *)valloc(map_size);
1117 assert(g_cpu_map);
1118 /* Ensure the allocation is pre-faulted */
1119 ret = memset_s(g_cpu_map, map_size, 0, map_size);
1120 if (ret) {
1121 errc(EX_OSERR, ret, "memset_s g_cpu_map");
1122 }
1123
1124 kr = semaphore_create(mach_task_self(), &g_main_sem, SYNC_POLICY_FIFO, 0);
1125 mach_assert_zero(kr);
1126
1127 /* Either one big semaphore or one per thread */
1128 if (g_waketype == WAKE_CHAIN ||
1129 g_waketype == WAKE_BROADCAST_PERTHREAD ||
1130 g_waketype == WAKE_HOP) {
1131 g_semarr = valloc(sizeof(semaphore_t) * g_numthreads);
1132 assert(g_semarr);
1133
1134 for (uint32_t i = 0; i < g_numthreads; i++) {
1135 kr = semaphore_create(mach_task_self(), &g_semarr[i], SYNC_POLICY_FIFO, 0);
1136 mach_assert_zero(kr);
1137 }
1138
1139 g_leadersem = g_semarr[0];
1140 } else {
1141 kr = semaphore_create(mach_task_self(), &g_broadcastsem, SYNC_POLICY_FIFO, 0);
1142 mach_assert_zero(kr);
1143 kr = semaphore_create(mach_task_self(), &g_leadersem, SYNC_POLICY_FIFO, 0);
1144 mach_assert_zero(kr);
1145 }
1146
1147 if (g_waketype == WAKE_HOP) {
1148 kr = semaphore_create(mach_task_self(), &g_donesem, SYNC_POLICY_FIFO, 0);
1149 mach_assert_zero(kr);
1150 }
1151
1152 kr = semaphore_create(mach_task_self(), &g_readysem, SYNC_POLICY_FIFO, 0);
1153 mach_assert_zero(kr);
1154
1155 kr = semaphore_create(mach_task_self(), &g_rt_churn_sem, SYNC_POLICY_FIFO, 0);
1156 mach_assert_zero(kr);
1157
1158 kr = semaphore_create(mach_task_self(), &g_rt_churn_start_sem, SYNC_POLICY_FIFO, 0);
1159 mach_assert_zero(kr);
1160
1161 atomic_store_explicit(&g_done_threads, 0, memory_order_relaxed);
1162
1163 /* Create the threads */
1164 for (uint32_t i = 0; i < g_numthreads; i++) {
1165 ret = pthread_create(&threads[i], NULL, worker_thread, (void*)(uintptr_t)i);
1166 if (ret) {
1167 errc(EX_OSERR, ret, "pthread_create %d", i);
1168 }
1169 }
1170
1171 ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL);
1172 if (ret) {
1173 errc(EX_OSERR, ret, "setpriority");
1174 }
1175
1176 bool recommended_cores_warning = false;
1177
1178 thread_setup(0);
1179
1180 g_starttime_abs = mach_absolute_time();
1181
1182 if (g_churn_pri) {
1183 create_churn_threads();
1184 }
1185 if (g_rt_churn) {
1186 create_rt_churn_threads();
1187 }
1188
1189 /* Let everyone get settled */
1190 kr = semaphore_wait(g_main_sem);
1191 mach_assert_zero(kr);
1192
1193 /* Give the system a bit more time to settle */
1194 if (g_do_sleep) {
1195 usleep(g_iteration_sleeptime_us);
1196 }
1197
1198 cpu_time_t start_time;
1199 cpu_time_t finish_time;
1200
1201 record_cpu_time(&start_time);
1202
1203 /* Go! */
1204 for (uint32_t i = 0; i < g_iterations; i++) {
1205 uint32_t j;
1206 uint64_t worst_abs = 0, best_abs = UINT64_MAX;
1207
1208 if (g_do_one_long_spin) {
1209 g_one_long_spin_id = (uint32_t)rand() % g_numthreads;
1210 }
1211
1212 if (g_rt_churn) {
1213 start_rt_churn_threads();
1214 usleep(100);
1215 }
1216
1217 debug_log("%d Main thread reset\n", i);
1218
1219 atomic_store_explicit(&g_done_threads, 0, memory_order_seq_cst);
1220
1221 g_starttime_abs = mach_absolute_time();
1222
1223 /* Fire them off and wait for worker threads to finish */
1224 kr = semaphore_wait_signal(g_main_sem, g_leadersem);
1225 mach_assert_zero(kr);
1226
1227 debug_log("%d Main thread return\n", i);
1228
1229 assert(atomic_load_explicit(&g_done_threads, memory_order_relaxed) == g_numthreads);
1230
1231 if (g_rt_churn) {
1232 wait_for_rt_churn_threads();
1233 }
1234
1235 uint64_t recommended_cores_map;
1236 size_t map_size = sizeof(recommended_cores_map);
1237 ret = sysctlbyname("kern.sched_recommended_cores", &recommended_cores_map, &map_size, NULL, 0);
1238 if ((ret == 0) && (recommended_cores_map & mask(g_maxcpus)) != mask(g_maxcpus)) {
1239 if (g_test_rt) {
1240 /* Cores have been derecommended, which invalidates the test */
1241 printf("Recommended cores 0x%llx != all cores 0x%llx\n", recommended_cores_map, mask(g_maxcpus));
1242 printf("TEST SKIPPED\n");
1243 exit(0);
1244 } else if (!recommended_cores_warning) {
1245 printf("WARNING: Recommended cores 0x%llx != all cores 0x%llx\n", recommended_cores_map, mask(g_maxcpus));
1246 recommended_cores_warning = true;
1247 }
1248 }
1249
1250 /*
1251 * We report the worst latencies relative to start time
1252 * and relative to the lead worker thread
1253 * and (where relevant) relative to the previous thread
1254 */
1255 for (j = 0; j < g_numthreads; j++) {
1256 uint64_t latency_abs;
1257
1258 latency_abs = g_thread_endtimes_abs[j] - g_starttime_abs;
1259 worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs;
1260 }
1261
1262 worst_latencies_ns[i] = abs_to_nanos(worst_abs);
1263
1264 worst_abs = 0;
1265 for (j = 1; j < g_numthreads; j++) {
1266 uint64_t latency_abs;
1267
1268 latency_abs = g_thread_endtimes_abs[j] - g_thread_endtimes_abs[0];
1269 worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs;
1270 best_abs = best_abs > latency_abs ? latency_abs : best_abs;
1271 }
1272
1273 worst_latencies_from_first_ns[i] = abs_to_nanos(worst_abs);
1274
1275 if ((g_waketype == WAKE_CHAIN) || (g_waketype == WAKE_HOP)) {
1276 worst_abs = 0;
1277 for (j = 1; j < g_numthreads; j++) {
1278 uint64_t latency_abs;
1279
1280 latency_abs = g_thread_endtimes_abs[j] - g_thread_endtimes_abs[j - 1];
1281 worst_abs = worst_abs < latency_abs ? latency_abs : worst_abs;
1282 best_abs = best_abs > latency_abs ? latency_abs : best_abs;
1283 }
1284
1285 worst_latencies_from_previous_ns[i] = abs_to_nanos(worst_abs);
1286 }
1287
1288 /*
1289 * In the event of a bad run, cut a trace point.
1290 */
1291 uint64_t worst_latency_ns = ((g_waketype == WAKE_CHAIN) || (g_waketype == WAKE_HOP)) ? worst_latencies_from_previous_ns[i] : worst_latencies_ns[i];
1292 if (worst_latency_ns > g_traceworthy_latency_ns) {
1293 g_traceworthy_count++;
1294 /* Ariadne's ad-hoc test signpost */
1295 kdebug_trace(ARIADNEDBG_CODE(0, 0), worst_latency_ns, g_traceworthy_latency_ns, 0, 0);
1296
1297 if (g_verbose) {
1298 printf("Worst on this round was %.2f us.\n", ((float)worst_latency_ns) / 1000.0);
1299 }
1300 }
1301
1302 /* Give the system a bit more time to settle */
1303 if (g_do_sleep) {
1304 usleep(g_iteration_sleeptime_us);
1305 }
1306 }
1307
1308 record_cpu_time(&finish_time);
1309
1310 /* Rejoin threads */
1311 for (uint32_t i = 0; i < g_numthreads; i++) {
1312 ret = pthread_join(threads[i], NULL);
1313 if (ret) {
1314 errc(EX_OSERR, ret, "pthread_join %d", i);
1315 }
1316 }
1317
1318 if (g_rt_churn) {
1319 join_rt_churn_threads();
1320 }
1321
1322 if (g_churn_pri) {
1323 join_churn_threads();
1324 }
1325
1326 uint32_t cpu_idle_time = (finish_time.idle - start_time.idle) * 10;
1327 uint32_t worker_threads_runtime = worker_threads_total_runtime.seconds * 1000 + worker_threads_total_runtime.microseconds / 1000;
1328
1329 compute_stats(worst_latencies_ns, g_iterations, &avg, &max, &min, &stddev);
1330 printf("Results (from a stop):\n");
1331 printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0);
1332 printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0);
1333 printf("Avg:\t\t%.2f us\n", avg / 1000.0);
1334 printf("Stddev:\t\t%.2f us\n", stddev / 1000.0);
1335
1336 putchar('\n');
1337
1338 compute_stats(worst_latencies_from_first_ns, g_iterations, &avg, &max, &min, &stddev);
1339 printf("Results (relative to first thread):\n");
1340 printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0);
1341 printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0);
1342 printf("Avg:\t\t%.2f us\n", avg / 1000.0);
1343 printf("Stddev:\t\t%.2f us\n", stddev / 1000.0);
1344
1345 if ((g_waketype == WAKE_CHAIN) || (g_waketype == WAKE_HOP)) {
1346 putchar('\n');
1347
1348 compute_stats(worst_latencies_from_previous_ns, g_iterations, &avg, &max, &min, &stddev);
1349 printf("Results (relative to previous thread):\n");
1350 printf("Max:\t\t%.2f us\n", ((float)max) / 1000.0);
1351 printf("Min:\t\t%.2f us\n", ((float)min) / 1000.0);
1352 printf("Avg:\t\t%.2f us\n", avg / 1000.0);
1353 printf("Stddev:\t\t%.2f us\n", stddev / 1000.0);
1354 }
1355
1356 if (g_test_rt) {
1357 putchar('\n');
1358 printf("Count of trace-worthy latencies (>%.2f us): %d\n", ((float)g_traceworthy_latency_ns) / 1000.0, g_traceworthy_count);
1359 }
1360
1361 #if 0
1362 for (uint32_t i = 0; i < g_iterations; i++) {
1363 printf("Iteration %d: %.2f us\n", i, worst_latencies_ns[i] / 1000.0);
1364 }
1365 #endif
1366
1367 if (g_histogram) {
1368 putchar('\n');
1369
1370 for (uint32_t i = 0; i < g_maxcpus; i++) {
1371 printf("%d\t%d\n", i, g_cpu_histogram[i].accum);
1372 }
1373 }
1374
1375 if (g_test_rt || g_test_rt_smt || g_test_rt_avoid0) {
1376 #define PRIMARY 0x5555555555555555ULL
1377 #define SECONDARY 0xaaaaaaaaaaaaaaaaULL
1378
1379 int fail_count = 0;
1380 uint64_t *sched_latencies_ns = ((g_waketype == WAKE_CHAIN) || (g_waketype == WAKE_HOP)) ? worst_latencies_from_previous_ns : worst_latencies_ns;
1381
1382 for (uint32_t i = 0; i < g_iterations; i++) {
1383 bool secondary = false;
1384 bool fail = false;
1385 bool warn = false;
1386 uint64_t map = g_cpu_map[i];
1387 if (g_test_rt_smt) {
1388 /* Test for one or more threads running on secondary cores unexpectedly (WARNING) */
1389 secondary = (map & SECONDARY);
1390 /* Test for threads running on both primary and secondary cpus of the same core (FAIL) */
1391 fail = ((map & PRIMARY) & ((map & SECONDARY) >> 1));
1392 } else if (g_test_rt) {
1393 /* Test that each thread runs on its own core (WARNING for now) */
1394 warn = (__builtin_popcountll(map) != g_numthreads);
1395 /* Test for latency probems (FAIL) */
1396 fail = (sched_latencies_ns[i] > g_traceworthy_latency_ns);
1397 } else if (g_test_rt_avoid0) {
1398 fail = ((map & 0x1) == 0x1);
1399 }
1400 if (warn || secondary || fail) {
1401 printf("Iteration %d: 0x%llx worst latency %.2fus%s%s%s\n", i, map,
1402 sched_latencies_ns[i] / 1000.0,
1403 warn ? " WARNING" : "",
1404 secondary ? " SECONDARY" : "",
1405 fail ? " FAIL" : "");
1406 }
1407 test_warn |= (warn || secondary || fail);
1408 test_fail |= fail;
1409 fail_count += fail;
1410 }
1411
1412 if (test_fail && !g_test_strict_fail && (g_iterations >= 100) && (fail_count <= g_iterations / 100)) {
1413 printf("99%% or better success rate\n");
1414 test_fail = 0;
1415 }
1416 }
1417
1418 if (g_test_rt_smt && (g_each_spin_duration_ns >= 200000) && !test_warn) {
1419 printf("cpu_idle_time=%dms worker_threads_runtime=%dms\n", cpu_idle_time, worker_threads_runtime);
1420 if (cpu_idle_time < worker_threads_runtime / 4) {
1421 printf("FAIL cpu_idle_time unexpectedly small\n");
1422 test_fail = 1;
1423 } else if (cpu_idle_time > worker_threads_runtime * 2) {
1424 printf("FAIL cpu_idle_time unexpectedly large\n");
1425 test_fail = 1;
1426 }
1427 }
1428
1429 if (g_test_rt || g_test_rt_smt || g_test_rt_avoid0) {
1430 if (test_fail) {
1431 printf("TEST FAILED\n");
1432 } else {
1433 printf("TEST PASSED\n");
1434 }
1435 }
1436
1437 free(threads);
1438 free(g_thread_endtimes_abs);
1439 free(worst_latencies_ns);
1440 free(worst_latencies_from_first_ns);
1441 free(worst_latencies_from_previous_ns);
1442 free(g_cpu_histogram);
1443 free(g_cpu_map);
1444
1445 return test_fail;
1446 }
1447
1448 /*
1449 * WARNING: This is SPI specifically intended for use by launchd to start UI
1450 * apps. We use it here for a test tool only to opt into QoS using the same
1451 * policies. Do not use this outside xnu or libxpc/launchd.
1452 */
1453 static void
selfexec_with_apptype(int argc,char * argv[])1454 selfexec_with_apptype(int argc, char *argv[])
1455 {
1456 int ret;
1457 posix_spawnattr_t attr;
1458 extern char **environ;
1459 char *new_argv[argc + 1 + 1 /* NULL */];
1460 int i;
1461 char prog[PATH_MAX];
1462 uint32_t prog_size = PATH_MAX;
1463
1464 ret = _NSGetExecutablePath(prog, &prog_size);
1465 if (ret) {
1466 err(EX_OSERR, "_NSGetExecutablePath");
1467 }
1468
1469 for (i = 0; i < argc; i++) {
1470 new_argv[i] = argv[i];
1471 }
1472
1473 new_argv[i] = "--switched_apptype";
1474 new_argv[i + 1] = NULL;
1475
1476 ret = posix_spawnattr_init(&attr);
1477 if (ret) {
1478 errc(EX_OSERR, ret, "posix_spawnattr_init");
1479 }
1480
1481 ret = posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETEXEC);
1482 if (ret) {
1483 errc(EX_OSERR, ret, "posix_spawnattr_setflags");
1484 }
1485
1486 ret = posix_spawnattr_setprocesstype_np(&attr, POSIX_SPAWN_PROC_TYPE_APP_DEFAULT);
1487 if (ret) {
1488 errc(EX_OSERR, ret, "posix_spawnattr_setprocesstype_np");
1489 }
1490
1491 ret = posix_spawn(NULL, prog, NULL, &attr, new_argv, environ);
1492 if (ret) {
1493 errc(EX_OSERR, ret, "posix_spawn");
1494 }
1495 }
1496
1497 /*
1498 * Admittedly not very attractive.
1499 */
1500 static void __attribute__((noreturn))
usage()1501 usage()
1502 {
1503 errx(EX_USAGE, "Usage: %s <threads> <chain | hop | broadcast-single-sem | broadcast-per-thread> "
1504 "<realtime | timeshare | timeshare_no_smt | fixed> <iterations>\n\t\t"
1505 "[--trace <traceworthy latency in ns>] "
1506 "[--verbose] [--spin-one] [--spin-all] [--spin-time <nanos>] [--affinity]\n\t\t"
1507 "[--no-sleep] [--drop-priority] [--churn-pri <pri>] [--churn-count <n>] [--churn-random]\n\t\t"
1508 "[--extra-thread-count <signed int>]\n\t\t"
1509 "[--rt-churn] [--rt-churn-count <n>] [--rt-ll]\n\t\t"
1510 "[--test-rt] [--test-rt-smt] [--test-rt-avoid0] [--test-strict-fail]",
1511 getprogname());
1512 }
1513
1514 static struct option* g_longopts;
1515 static int option_index;
1516
1517 static uint32_t
read_dec_arg()1518 read_dec_arg()
1519 {
1520 char *cp;
1521 /* char* optarg is a magic global */
1522
1523 uint32_t arg_val = (uint32_t)strtoull(optarg, &cp, 10);
1524
1525 if (cp == optarg || *cp) {
1526 errx(EX_USAGE, "arg --%s requires a decimal number, found \"%s\"",
1527 g_longopts[option_index].name, optarg);
1528 }
1529
1530 return arg_val;
1531 }
1532
1533 static int32_t
read_signed_dec_arg()1534 read_signed_dec_arg()
1535 {
1536 char *cp;
1537 /* char* optarg is a magic global */
1538
1539 int32_t arg_val = (int32_t)strtoull(optarg, &cp, 10);
1540
1541 if (cp == optarg || *cp) {
1542 errx(EX_USAGE, "arg --%s requires a decimal number, found \"%s\"",
1543 g_longopts[option_index].name, optarg);
1544 }
1545
1546 return arg_val;
1547 }
1548
1549 static void
parse_args(int argc,char * argv[])1550 parse_args(int argc, char *argv[])
1551 {
1552 enum {
1553 OPT_GETOPT = 0,
1554 OPT_SPIN_TIME,
1555 OPT_TRACE,
1556 OPT_PRIORITY,
1557 OPT_CHURN_PRI,
1558 OPT_CHURN_COUNT,
1559 OPT_RT_CHURN_COUNT,
1560 OPT_EXTRA_THREAD_COUNT,
1561 };
1562
1563 static struct option longopts[] = {
1564 /* BEGIN IGNORE CODESTYLE */
1565 { "spin-time", required_argument, NULL, OPT_SPIN_TIME },
1566 { "trace", required_argument, NULL, OPT_TRACE },
1567 { "priority", required_argument, NULL, OPT_PRIORITY },
1568 { "churn-pri", required_argument, NULL, OPT_CHURN_PRI },
1569 { "churn-count", required_argument, NULL, OPT_CHURN_COUNT },
1570 { "rt-churn-count", required_argument, NULL, OPT_RT_CHURN_COUNT },
1571 { "extra-thread-count", required_argument, NULL, OPT_EXTRA_THREAD_COUNT },
1572 { "churn-random", no_argument, (int*)&g_churn_random, TRUE },
1573 { "switched_apptype", no_argument, (int*)&g_seen_apptype, TRUE },
1574 { "spin-one", no_argument, (int*)&g_do_one_long_spin, TRUE },
1575 { "intel-only", no_argument, (int*)&g_run_on_intel_only, TRUE },
1576 { "spin-all", no_argument, (int*)&g_do_all_spin, TRUE },
1577 { "affinity", no_argument, (int*)&g_do_affinity, TRUE },
1578 { "no-sleep", no_argument, (int*)&g_do_sleep, FALSE },
1579 { "drop-priority", no_argument, (int*)&g_drop_priority, TRUE },
1580 { "test-rt", no_argument, (int*)&g_test_rt, TRUE },
1581 { "test-rt-smt", no_argument, (int*)&g_test_rt_smt, TRUE },
1582 { "test-rt-avoid0", no_argument, (int*)&g_test_rt_avoid0, TRUE },
1583 { "test-strict-fail", no_argument, (int*)&g_test_strict_fail, TRUE },
1584 { "rt-churn", no_argument, (int*)&g_rt_churn, TRUE },
1585 { "rt-ll", no_argument, (int*)&g_rt_ll, TRUE },
1586 { "histogram", no_argument, (int*)&g_histogram, TRUE },
1587 { "verbose", no_argument, (int*)&g_verbose, TRUE },
1588 { "help", no_argument, NULL, 'h' },
1589 { NULL, 0, NULL, 0 }
1590 /* END IGNORE CODESTYLE */
1591 };
1592
1593 g_longopts = longopts;
1594 int ch = 0;
1595
1596 while ((ch = getopt_long(argc, argv, "h", longopts, &option_index)) != -1) {
1597 switch (ch) {
1598 case OPT_GETOPT:
1599 /* getopt_long set a variable */
1600 break;
1601 case OPT_SPIN_TIME:
1602 g_do_each_spin = TRUE;
1603 g_each_spin_duration_ns = read_dec_arg();
1604 break;
1605 case OPT_TRACE:
1606 g_traceworthy_latency_ns = read_dec_arg();
1607 break;
1608 case OPT_PRIORITY:
1609 g_priority = read_dec_arg();
1610 break;
1611 case OPT_CHURN_PRI:
1612 g_churn_pri = read_dec_arg();
1613 break;
1614 case OPT_CHURN_COUNT:
1615 g_churn_count = read_dec_arg();
1616 break;
1617 case OPT_RT_CHURN_COUNT:
1618 g_rt_churn_count = read_dec_arg();
1619 break;
1620 case OPT_EXTRA_THREAD_COUNT:
1621 g_extra_thread_count = read_signed_dec_arg();
1622 break;
1623 case '?':
1624 case 'h':
1625 default:
1626 usage();
1627 /* NORETURN */
1628 }
1629 }
1630
1631 /*
1632 * getopt_long reorders all the options to the beginning of the argv array.
1633 * Jump past them to the non-option arguments.
1634 */
1635
1636 argc -= optind;
1637 argv += optind;
1638
1639 if (argc > 4) {
1640 warnx("Too many non-option arguments passed");
1641 usage();
1642 }
1643
1644 if (argc != 4) {
1645 warnx("Missing required <threads> <waketype> <policy> <iterations> arguments");
1646 usage();
1647 }
1648
1649 char *cp;
1650
1651 /* How many threads? */
1652 g_numthreads = (uint32_t)strtoull(argv[0], &cp, 10);
1653
1654 if (cp == argv[0] || *cp) {
1655 errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[0]);
1656 }
1657
1658 /* What wakeup pattern? */
1659 g_waketype = parse_wakeup_pattern(argv[1]);
1660
1661 /* Policy */
1662 g_policy = parse_thread_policy(argv[2]);
1663
1664 /* Iterations */
1665 g_iterations = (uint32_t)strtoull(argv[3], &cp, 10);
1666
1667 if (cp == argv[3] || *cp) {
1668 errx(EX_USAGE, "numthreads requires a decimal number, found \"%s\"", argv[3]);
1669 }
1670
1671 if (g_iterations < 1) {
1672 errx(EX_USAGE, "Must have at least one iteration");
1673 }
1674
1675 if (g_numthreads == 1 && g_waketype == WAKE_CHAIN) {
1676 errx(EX_USAGE, "chain mode requires more than one thread");
1677 }
1678
1679 if (g_numthreads == 1 && g_waketype == WAKE_HOP) {
1680 errx(EX_USAGE, "hop mode requires more than one thread");
1681 }
1682 }
1683