1 #include <unistd.h>
2 #include <stdio.h>
3 #include <stdlib.h>
4 #include <pthread.h>
5 #include <errno.h>
6 #include <err.h>
7 #include <string.h>
8 #include <assert.h>
9 #include <sysexits.h>
10 #include <getopt.h>
11 #include <spawn.h>
12 #include <stdbool.h>
13 #include <sys/sysctl.h>
14 #include <mach/mach_time.h>
15 #include <mach/mach.h>
16 #include <mach/semaphore.h>
17 #include <TargetConditionals.h>
18
19 #ifdef T_NAMESPACE
20 #undef T_NAMESPACE
21 #endif
22
23 #include <darwintest.h>
24 #include <stdatomic.h>
25
26 T_GLOBAL_META(T_META_RADAR_COMPONENT_NAME("xnu"),
27 T_META_RADAR_COMPONENT_VERSION("scheduler"));
28
29 #define MAX_THREADS 32
30 #define SPIN_SECS 6
31 #define THR_SPINNER_PRI 63
32 #define THR_MANAGER_PRI 62
33 #define WARMUP_ITERATIONS 100
34 #define POWERCTRL_SUCCESS_STR "Factor1: 1.000000"
35
36 static mach_timebase_info_data_t timebase_info;
37 static semaphore_t semaphore;
38 static semaphore_t worker_sem;
39 static uint32_t g_numcpus;
40 static _Atomic uint32_t keep_going = 1;
41 static dt_stat_time_t s;
42
43 static struct {
44 pthread_t thread;
45 bool measure_thread;
46 } threads[MAX_THREADS];
47
48 static uint64_t
nanos_to_abs(uint64_t nanos)49 nanos_to_abs(uint64_t nanos)
50 {
51 return nanos * timebase_info.denom / timebase_info.numer;
52 }
53
54 extern char **environ;
55
56 static void
csw_perf_test_init(void)57 csw_perf_test_init(void)
58 {
59 int spawn_ret, pid;
60 char *const clpcctrl_args[] = {"/usr/local/bin/clpcctrl", "-f", "5000", NULL};
61 spawn_ret = posix_spawn(&pid, clpcctrl_args[0], NULL, NULL, clpcctrl_args, environ);
62 waitpid(pid, &spawn_ret, 0);
63 }
64
65 static void
csw_perf_test_cleanup(void)66 csw_perf_test_cleanup(void)
67 {
68 int spawn_ret, pid;
69 char *const clpcctrl_args[] = {"/usr/local/bin/clpcctrl", "-d", NULL};
70 spawn_ret = posix_spawn(&pid, clpcctrl_args[0], NULL, NULL, clpcctrl_args, environ);
71 waitpid(pid, &spawn_ret, 0);
72 }
73
74 static pthread_t
create_thread(uint32_t thread_id,uint32_t priority,bool fixpri,void * (* start_routine)(void *))75 create_thread(uint32_t thread_id, uint32_t priority, bool fixpri,
76 void *(*start_routine)(void *))
77 {
78 int rv;
79 pthread_t new_thread;
80 struct sched_param param = { .sched_priority = (int)priority };
81 pthread_attr_t attr;
82
83 T_ASSERT_POSIX_ZERO(pthread_attr_init(&attr), "pthread_attr_init");
84
85 T_ASSERT_POSIX_ZERO(pthread_attr_setschedparam(&attr, ¶m),
86 "pthread_attr_setschedparam");
87
88 if (fixpri) {
89 T_ASSERT_POSIX_ZERO(pthread_attr_setschedpolicy(&attr, SCHED_RR),
90 "pthread_attr_setschedpolicy");
91 }
92
93 T_ASSERT_POSIX_ZERO(pthread_create(&new_thread, &attr, start_routine,
94 (void*)(uintptr_t)thread_id), "pthread_create");
95
96 T_ASSERT_POSIX_ZERO(pthread_attr_destroy(&attr), "pthread_attr_destroy");
97
98 threads[thread_id].thread = new_thread;
99
100 return new_thread;
101 }
102
103 /* Spin until a specified number of seconds elapses */
104 static void
spin_for_duration(uint32_t seconds)105 spin_for_duration(uint32_t seconds)
106 {
107 uint64_t duration = nanos_to_abs((uint64_t)seconds * NSEC_PER_SEC);
108 uint64_t current_time = mach_absolute_time();
109 uint64_t timeout = duration + current_time;
110
111 uint64_t spin_count = 0;
112
113 while (mach_absolute_time() < timeout && atomic_load_explicit(&keep_going,
114 memory_order_relaxed)) {
115 spin_count++;
116 }
117 }
118
119 static void *
spin_thread(void * arg)120 spin_thread(void *arg)
121 {
122 uint32_t thread_id = (uint32_t) arg;
123 char name[30] = "";
124
125 snprintf(name, sizeof(name), "spin thread %2d", thread_id);
126 pthread_setname_np(name);
127 T_ASSERT_MACH_SUCCESS(semaphore_wait_signal(semaphore, worker_sem),
128 "semaphore_wait_signal");
129 spin_for_duration(SPIN_SECS);
130 return NULL;
131 }
132
133 static void *
thread(void * arg)134 thread(void *arg)
135 {
136 uint32_t thread_id = (uint32_t) arg;
137 char name[30] = "";
138
139 snprintf(name, sizeof(name), "thread %2d", thread_id);
140 pthread_setname_np(name);
141 T_ASSERT_MACH_SUCCESS(semaphore_wait_signal(semaphore, worker_sem), "semaphore_wait");
142
143 if (threads[thread_id].measure_thread) {
144 for (int i = 0; i < WARMUP_ITERATIONS; i++) {
145 thread_switch(THREAD_NULL, SWITCH_OPTION_NONE, 0);
146 }
147 T_STAT_MEASURE_LOOP(s) {
148 if (thread_switch(THREAD_NULL, SWITCH_OPTION_NONE, 0)) {
149 T_ASSERT_FAIL("thread_switch");
150 }
151 }
152 atomic_store_explicit(&keep_going, 0, memory_order_relaxed);
153 } else {
154 while (atomic_load_explicit(&keep_going, memory_order_relaxed)) {
155 if (thread_switch(THREAD_NULL, SWITCH_OPTION_NONE, 0)) {
156 T_ASSERT_FAIL("thread_switch");
157 }
158 }
159 }
160 return NULL;
161 }
162
163 void
check_device_temperature(void)164 check_device_temperature(void)
165 {
166 char buffer[256];
167 FILE *pipe = popen("powerctrl Factor1", "r");
168
169 if (pipe == NULL) {
170 T_FAIL("Failed to check device temperature");
171 T_END;
172 }
173
174 fgets(buffer, sizeof(buffer), pipe);
175
176 if (strncmp(POWERCTRL_SUCCESS_STR, buffer, strlen(POWERCTRL_SUCCESS_STR))) {
177 T_PERF("temperature", 0.0, "factor", "device temperature");
178 } else {
179 T_PASS("Device temperature check pass");
180 T_PERF("temperature", 1.0, "factor", "device temperature");
181 }
182 pclose(pipe);
183 }
184
185 void
record_perfcontrol_stats(const char * sysctlname,const char * units,const char * info)186 record_perfcontrol_stats(const char *sysctlname, const char *units, const char *info)
187 {
188 int data = 0;
189 size_t data_size = sizeof(data);
190 T_ASSERT_POSIX_ZERO(sysctlbyname(sysctlname,
191 &data, &data_size, NULL, 0),
192 "%s", sysctlname);
193 T_PERF(info, data, units, info);
194 }
195
196
197 T_GLOBAL_META(T_META_NAMESPACE("xnu.scheduler"));
198
199 /* Disable the test on MacOS for now */
200 T_DECL(perf_csw, "context switch performance", T_META_TAG_PERF, T_META_CHECK_LEAKS(false), T_META_ASROOT(true))
201 {
202 #if !defined (__arm__) && !defined(__arm64__)
203 T_SKIP("Not supported on Intel platforms");
204 return;
205 #endif /* !defined (__arm__) && !defined(__arm64__) */
206 check_device_temperature();
207
208 T_ATEND(csw_perf_test_cleanup);
209
210 csw_perf_test_init();
211 pthread_setname_np("main thread");
212
213 T_ASSERT_MACH_SUCCESS(mach_timebase_info(&timebase_info), "mach_timebase_info");
214
215 struct sched_param param = {.sched_priority = 48};
216
217 T_ASSERT_POSIX_ZERO(pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m),
218 "pthread_setschedparam");
219
220 T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &semaphore,
221 SYNC_POLICY_FIFO, 0), "semaphore_create");
222
223 T_ASSERT_MACH_SUCCESS(semaphore_create(mach_task_self(), &worker_sem,
224 SYNC_POLICY_FIFO, 0), "semaphore_create");
225
226 size_t ncpu_size = sizeof(g_numcpus);
227 T_ASSERT_POSIX_ZERO(sysctlbyname("hw.ncpu", &g_numcpus, &ncpu_size, NULL, 0),
228 "sysctlbyname hw.ncpu");
229
230 printf("hw.ncpu: %d\n", g_numcpus);
231 uint32_t n_spinners = g_numcpus - 1;
232
233 int mt_supported = 0;
234 size_t mt_supported_size = sizeof(mt_supported);
235 T_ASSERT_POSIX_ZERO(sysctlbyname("kern.monotonic.supported", &mt_supported,
236 &mt_supported_size, NULL, 0), "sysctlbyname kern.monotonic.supported");
237
238 for (uint32_t thread_id = 0; thread_id < n_spinners; thread_id++) {
239 threads[thread_id].thread = create_thread(thread_id, THR_SPINNER_PRI,
240 true, &spin_thread);
241 }
242
243 s = dt_stat_time_create("context switch time");
244
245 create_thread(n_spinners, THR_MANAGER_PRI, true, &thread);
246 threads[n_spinners].measure_thread = true;
247 create_thread(n_spinners + 1, THR_MANAGER_PRI, true, &thread);
248
249 /* Allow the context switch threads to get into sem_wait() */
250 for (uint32_t thread_id = 0; thread_id < n_spinners + 2; thread_id++) {
251 T_ASSERT_MACH_SUCCESS(semaphore_wait(worker_sem), "semaphore_wait");
252 }
253
254 int enable_callout_stats = 1;
255 size_t enable_size = sizeof(enable_callout_stats);
256
257 if (mt_supported) {
258 /* Enable callout stat collection */
259 T_ASSERT_POSIX_ZERO(sysctlbyname("kern.perfcontrol_callout.stats_enabled",
260 NULL, 0, &enable_callout_stats, enable_size),
261 "sysctlbyname kern.perfcontrol_callout.stats_enabled");
262 }
263
264 T_ASSERT_MACH_SUCCESS(semaphore_signal_all(semaphore), "semaphore_signal");
265
266
267 for (uint32_t thread_id = 0; thread_id < n_spinners + 2; thread_id++) {
268 T_ASSERT_POSIX_ZERO(pthread_join(threads[thread_id].thread, NULL),
269 "pthread_join %d", thread_id);
270 }
271
272 if (mt_supported) {
273 record_perfcontrol_stats("kern.perfcontrol_callout.oncore_instr",
274 "instructions", "oncore.instructions");
275 record_perfcontrol_stats("kern.perfcontrol_callout.offcore_instr",
276 "instructions", "offcore.instructions");
277 record_perfcontrol_stats("kern.perfcontrol_callout.oncore_cycles",
278 "cycles", "oncore.cycles");
279 record_perfcontrol_stats("kern.perfcontrol_callout.offcore_cycles",
280 "cycles", "offcore.cycles");
281
282 /* Disable callout stat collection */
283 enable_callout_stats = 0;
284 T_ASSERT_POSIX_ZERO(sysctlbyname("kern.perfcontrol_callout.stats_enabled",
285 NULL, 0, &enable_callout_stats, enable_size),
286 "sysctlbyname kern.perfcontrol_callout.stats_enabled");
287 }
288
289 check_device_temperature();
290 dt_stat_finalize(s);
291 }
292