1 // Copyright (c) 2021-2022 Apple Inc. All rights reserved.
2
3 #include <darwintest.h>
4 #include <darwintest_utils.h>
5 #include <dispatch/dispatch.h>
6 #include <mach/semaphore.h>
7 #include <mach/mach.h>
8 #include <mach/task.h>
9 #include <mach-o/dyld.h>
10 #include <stdbool.h>
11 #include <stdlib.h>
12 #include <sys/sysctl.h>
13
14 #include "recount_test_utils.h"
15
16 bool
has_user_system_times(void)17 has_user_system_times(void)
18 {
19 static dispatch_once_t user_system_once;
20 static bool precise_times = false;
21 dispatch_once(&user_system_once, ^{
22 int precise_times_int = 0;
23 size_t precise_times_size = sizeof(precise_times_int);
24 T_SETUPBEGIN;
25 T_QUIET;
26 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.precise_user_kernel_time",
27 &precise_times_int, &precise_times_size, NULL, 0),
28 "sysctl kern.precise_user_kernel_time");
29 T_SETUPEND;
30 precise_times = precise_times_int != 0;
31 });
32 return precise_times;
33 }
34
35 bool
has_cpi(void)36 has_cpi(void)
37 {
38 static dispatch_once_t cpi_once;
39 static int cpi = 0;
40 dispatch_once(&cpi_once, ^{
41 size_t cpi_size = sizeof(cpi);
42 T_SETUPBEGIN;
43 int ret = sysctlbyname("kern.monotonic.supported", &cpi, &cpi_size,
44 NULL, 0);
45 // ENOENT also means that CPI is unavailable.
46 if (ret != 0 && errno != ENOENT) {
47 T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.monotonic.supported");
48 }
49 T_SETUPEND;
50 });
51 return cpi != 0;
52 }
53
54 bool
has_energy(void)55 has_energy(void)
56 {
57 static dispatch_once_t energy_once;
58 static int energy = false;
59 dispatch_once(&energy_once, ^{
60 size_t energy_size = sizeof(energy);
61 T_SETUPBEGIN;
62 T_QUIET;
63 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.pervasive_energy",
64 &energy, &energy_size, NULL, 0),
65 "sysctl kern.pervasive_energy");
66 T_SETUPEND;
67 });
68 return energy != 0;
69 }
70
71 unsigned int
perf_level_count(void)72 perf_level_count(void)
73 {
74 static dispatch_once_t count_once;
75 static unsigned int count = 0;
76 dispatch_once(&count_once, ^{
77 T_SETUPBEGIN;
78 T_QUIET;
79 T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.nperflevels", &count,
80 &(size_t){ sizeof(count) }, NULL, 0),
81 "sysctl hw.nperflevels");
82 T_SETUPEND;
83 });
84 return count;
85 }
86
87 const char *
perf_level_name(unsigned int perf_level)88 perf_level_name(unsigned int perf_level)
89 {
90 static dispatch_once_t names_once;
91 static char names[2][32] = { 0 };
92 dispatch_once(&names_once, ^{
93 T_SETUPBEGIN;
94 unsigned int count = perf_level_count();
95 for (unsigned int i = 0; i < count; i++) {
96 char sysctl_name[64] = { 0 };
97 snprintf(sysctl_name, sizeof(sysctl_name), "hw.perflevel%d.name",
98 i);
99 T_QUIET;
100 T_ASSERT_POSIX_SUCCESS(sysctlbyname(sysctl_name, &names[i],
101 &(size_t){ sizeof(names[i]) }, NULL, 0),
102 "sysctl %s", sysctl_name);
103 }
104 T_SETUPEND;
105 });
106 T_QUIET; T_ASSERT_LT(perf_level, 2, "no more than two perf levels");
107 return names[perf_level];
108 }
109
110 static void
bind_to_cluster(char type)111 bind_to_cluster(char type)
112 {
113 int ret = sysctlbyname("kern.sched_thread_bind_cluster_type", NULL, NULL,
114 &type, sizeof(type));
115 T_QUIET;
116 T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.sched_thread_bind_cluster_type");
117 }
118
119 void
run_on_all_perf_levels(void)120 run_on_all_perf_levels(void)
121 {
122 if (perf_level_count() == 1) {
123 return;
124 }
125
126 T_SETUPBEGIN;
127 bind_to_cluster('P');
128 bind_to_cluster('E');
129 // Return to the kernel to synchronize timings with the scheduler.
130 (void)getppid();
131 T_SETUPEND;
132 }
133
134 uint64_t
ns_from_mach(uint64_t mach_time)135 ns_from_mach(uint64_t mach_time)
136 {
137 mach_timebase_info_data_t tbi = { 0 };
138 mach_timebase_info(&tbi);
139 return mach_time * tbi.numer / tbi.denom;
140 }
141
142 uint64_t
ns_from_timeval(struct timeval tv)143 ns_from_timeval(struct timeval tv)
144 {
145 return (uint64_t)tv.tv_sec * NSEC_PER_SEC + (uint64_t)tv.tv_usec * 1000;
146 }
147
148 struct timeval
timeval_from_ns(uint64_t ns)149 timeval_from_ns(uint64_t ns)
150 {
151 return (struct timeval){
152 .tv_sec = ns / NSEC_PER_SEC,
153 .tv_usec = (ns % NSEC_PER_SEC) / 1000,
154 };
155 }
156
157 uint64_t
ns_from_time_value(struct time_value tv)158 ns_from_time_value(struct time_value tv)
159 {
160 return (uint64_t)tv.seconds * NSEC_PER_SEC +
161 (uint64_t)tv.microseconds * 1000;
162 }
163
164 struct time_value
time_value_from_ns(uint64_t ns)165 time_value_from_ns(uint64_t ns)
166 {
167 return (struct time_value){
168 .seconds = (integer_t)(ns / NSEC_PER_SEC),
169 .microseconds = (ns % NSEC_PER_SEC) / 1000,
170 };
171 }
172
173 static void *
spin_role(void * arg)174 spin_role(void *arg)
175 {
176 volatile uintptr_t *keep_spinning = arg;
177 while (*keep_spinning) {
178 ;
179 }
180 return NULL;
181 }
182
183 struct wait_start {
184 semaphore_t ws_wait;
185 semaphore_t ws_start;
186 };
187
188 static void *
wait_role(void * arg)189 wait_role(void *arg)
190 {
191 struct wait_start *ws = arg;
192 semaphore_wait_signal(ws->ws_wait, ws->ws_start);
193 return NULL;
194 }
195
196 struct scene *
scene_start(unsigned int n,role_t * roles)197 scene_start(unsigned int n, role_t *roles)
198 {
199 if (n == 0) {
200 return NULL;
201 }
202
203 T_SETUPBEGIN;
204
205 size_t scene_size = sizeof(struct scene) + (n + 1) * sizeof(struct actor);
206 struct scene *scene = malloc(scene_size);
207 T_QUIET; T_WITH_ERRNO;
208 T_ASSERT_NOTNULL(scene, "scene = malloc(%zu)", scene_size);
209
210 bzero(scene, scene_size);
211 unsigned int role_i = 0;
212 unsigned int wait_count = 0;
213 for (unsigned int i = 0; i < n; i++) {
214 role_t role = roles[role_i];
215 if (role == ROLE_NONE) {
216 role_i = 0;
217 role = roles[role_i];
218 }
219 if (role == ROLE_WAIT) {
220 wait_count++;
221 }
222 scene->scn_actors[i].act_role = role;
223 role_i++;
224 }
225
226 struct wait_start ws = { 0 };
227 kern_return_t kr = semaphore_create(mach_task_self(), &ws.ws_wait,
228 SYNC_POLICY_FIFO, 0);
229 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_create(... wait ...)");
230 kr = semaphore_create(mach_task_self(), &ws.ws_start,
231 SYNC_POLICY_FIFO, 0);
232 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_create(... start ...)");
233
234 for (unsigned int i = 0; i < n; i++) {
235 struct actor *act = &scene->scn_actors[i];
236 void *(*action)(void *) = NULL;
237 void *sync = NULL;
238 switch (act->act_role) {
239 case ROLE_SPIN:
240 sync = &scene->scn_spin_sync;
241 action = spin_role;
242 break;
243 case ROLE_WAIT:
244 sync = &ws;
245 action = wait_role;
246 break;
247 default:
248 T_ASSERT_FAIL("unexpected role: %d", act->act_role);
249 }
250 int error = pthread_create(&act->act_thread, NULL, action, sync);
251 T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_create");
252 }
253
254 T_SETUPEND;
255 for (unsigned int i = 0; i < wait_count; i++) {
256 semaphore_wait(ws.ws_start);
257 }
258 semaphore_destroy(mach_task_self(), ws.ws_start);
259 scene->scn_wait_sync = (void *)(uintptr_t)ws.ws_wait;
260 return scene;
261 }
262
263 void
scene_end(struct scene * scene)264 scene_end(struct scene *scene)
265 {
266 if (!scene) {
267 return;
268 }
269
270 scene->scn_spin_sync = 0;
271 semaphore_signal_all((semaphore_t)scene->scn_wait_sync);
272 semaphore_destroy(mach_task_self(), (semaphore_t)scene->scn_wait_sync);
273 struct actor *act = scene->scn_actors;
274 while (act->act_role != ROLE_NONE) {
275 int error = pthread_join(act->act_thread, NULL);
276 T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_join");
277 act++;
278 }
279 free(scene);
280 }
281
282 pid_t
launch_helper(char * name)283 launch_helper(char *name)
284 {
285 char bin_path[MAXPATHLEN];
286 uint32_t path_size = sizeof(bin_path);
287
288 T_SETUPBEGIN;
289 int ret = _NSGetExecutablePath(bin_path, &path_size);
290 T_QUIET;
291 T_ASSERT_EQ(ret, 0, "_NSGetExecutablePath()");
292 pid_t pid = 0;
293 ret = dt_launch_tool(&pid, (char *[]){ bin_path, name, NULL}, false, NULL,
294 NULL);
295 T_QUIET;
296 T_ASSERT_POSIX_SUCCESS(ret, "dt_launch_tool(... %s, %s ...)", bin_path,
297 name);
298 T_SETUPEND;
299
300 return pid;
301 }
302