xref: /xnu-10002.81.5/tests/recount/recount_test_utils.c (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1 // Copyright (c) 2021-2022 Apple Inc.  All rights reserved.
2 
3 #include <darwintest.h>
4 #include <darwintest_utils.h>
5 #include <dispatch/dispatch.h>
6 #include <mach/semaphore.h>
7 #include <mach/mach.h>
8 #include <mach/task.h>
9 #include <mach-o/dyld.h>
10 #include <stdbool.h>
11 #include <stdlib.h>
12 #include <sys/sysctl.h>
13 #include <unistd.h>
14 
15 #include "recount_test_utils.h"
16 
17 bool
has_user_system_times(void)18 has_user_system_times(void)
19 {
20 	static dispatch_once_t user_system_once;
21 	static bool precise_times = false;
22 	dispatch_once(&user_system_once, ^{
23 		int precise_times_int = 0;
24 		size_t precise_times_size = sizeof(precise_times_int);
25 		T_SETUPBEGIN;
26 		T_QUIET;
27 		T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.precise_user_kernel_time",
28 				&precise_times_int, &precise_times_size, NULL, 0),
29 				"sysctl kern.precise_user_kernel_time");
30 		T_SETUPEND;
31 		precise_times = precise_times_int != 0;
32 	});
33 	return precise_times;
34 }
35 
36 bool
has_cpi(void)37 has_cpi(void)
38 {
39 	static dispatch_once_t cpi_once;
40 	static int cpi = 0;
41 	dispatch_once(&cpi_once, ^{
42 		size_t cpi_size = sizeof(cpi);
43 		T_SETUPBEGIN;
44 		int ret = sysctlbyname("kern.monotonic.supported", &cpi, &cpi_size,
45 				NULL, 0);
46 		// ENOENT also means that CPI is unavailable.
47 		if (ret != 0 && errno != ENOENT) {
48 			T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.monotonic.supported");
49 		}
50 		T_SETUPEND;
51 	});
52 	return cpi != 0;
53 }
54 
55 bool
has_energy(void)56 has_energy(void)
57 {
58 	static dispatch_once_t energy_once;
59 	static int energy = false;
60 	dispatch_once(&energy_once, ^{
61 		size_t energy_size = sizeof(energy);
62 		T_SETUPBEGIN;
63 		T_QUIET;
64 		T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.pervasive_energy",
65 				&energy, &energy_size, NULL, 0),
66 				"sysctl kern.pervasive_energy");
67 		T_SETUPEND;
68 	});
69 	return energy != 0;
70 }
71 
72 unsigned int
perf_level_count(void)73 perf_level_count(void)
74 {
75 	static dispatch_once_t count_once;
76 	static unsigned int count = 0;
77 	dispatch_once(&count_once, ^{
78 		T_SETUPBEGIN;
79 		T_QUIET;
80 		T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.nperflevels", &count,
81 				&(size_t){ sizeof(count) }, NULL, 0),
82 				"sysctl hw.nperflevels");
83 		T_SETUPEND;
84 	});
85 	return count;
86 }
87 
88 static const char **
_perf_level_names(void)89 _perf_level_names(void)
90 {
91 	static char names[2][32] = { 0 };
92 	static dispatch_once_t names_once;
93 	dispatch_once(&names_once, ^{
94 		T_SETUPBEGIN;
95 		unsigned int count = perf_level_count();
96 		for (unsigned int i = 0; i < count; i++) {
97 			char sysctl_name[64] = { 0 };
98 			snprintf(sysctl_name, sizeof(sysctl_name), "hw.perflevel%d.name",
99 					i);
100 			T_QUIET;
101 			T_ASSERT_POSIX_SUCCESS(sysctlbyname(sysctl_name, &names[i],
102 					&(size_t){ sizeof(names[i]) }, NULL, 0),
103 					"sysctl %s", sysctl_name);
104 		}
105 		T_SETUPEND;
106 	});
107 	static const char *ret_names[] = {
108 		(char *)&names[0],
109 		(char *)&names[1],
110 	};
111 	return ret_names;
112 }
113 
114 const char *
perf_level_name(unsigned int perf_level)115 perf_level_name(unsigned int perf_level)
116 {
117 	return _perf_level_names()[perf_level];
118 }
119 
120 unsigned int
perf_level_index(const char * name)121 perf_level_index(const char *name)
122 {
123 	unsigned int count = perf_level_count();
124 	const char **names = _perf_level_names();
125 	for (unsigned int i = 0; i < count; i++) {
126 		if (strcmp(name, names[i]) == 0) {
127 			return i;
128 		}
129 	}
130 	T_ASSERT_FAIL("cannot find perf level named %s", name);
131 }
132 
133 void
bind_to_cluster(char type)134 bind_to_cluster(char type)
135 {
136 	int ret = sysctlbyname("kern.sched_thread_bind_cluster_type", NULL, NULL,
137 			&type, sizeof(type));
138 	T_QUIET;
139 	T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.sched_thread_bind_cluster_type");
140 	// Ensure the thread has seen a context switch while bound.
141 	usleep(10000);
142 }
143 
144 void
run_on_all_perf_levels(void)145 run_on_all_perf_levels(void)
146 {
147 	if (perf_level_count() == 1) {
148 		return;
149 	}
150 
151 	T_SETUPBEGIN;
152 	bind_to_cluster('P');
153 	bind_to_cluster('E');
154 	// Return to the kernel to synchronize timings with the scheduler.
155 	(void)getppid();
156 	T_SETUPEND;
157 }
158 
159 uint64_t
ns_from_mach(uint64_t mach_time)160 ns_from_mach(uint64_t mach_time)
161 {
162 	mach_timebase_info_data_t tbi = { 0 };
163 	mach_timebase_info(&tbi);
164 	return mach_time * tbi.numer / tbi.denom;
165 }
166 
167 uint64_t
ns_from_timeval(struct timeval tv)168 ns_from_timeval(struct timeval tv)
169 {
170 	return (uint64_t)tv.tv_sec * NSEC_PER_SEC + (uint64_t)tv.tv_usec * 1000;
171 }
172 
173 struct timeval
timeval_from_ns(uint64_t ns)174 timeval_from_ns(uint64_t ns)
175 {
176 	return (struct timeval){
177 		.tv_sec = ns / NSEC_PER_SEC,
178 		.tv_usec = (ns % NSEC_PER_SEC) / 1000,
179 	};
180 }
181 
182 uint64_t
ns_from_time_value(struct time_value tv)183 ns_from_time_value(struct time_value tv)
184 {
185 	return (uint64_t)tv.seconds * NSEC_PER_SEC +
186 			(uint64_t)tv.microseconds * 1000;
187 }
188 
189 struct time_value
time_value_from_ns(uint64_t ns)190 time_value_from_ns(uint64_t ns)
191 {
192 	return (struct time_value){
193 		.seconds = (integer_t)(ns / NSEC_PER_SEC),
194 		.microseconds = (ns % NSEC_PER_SEC) / 1000,
195 	};
196 }
197 
198 static void *
spin_role(void * arg)199 spin_role(void *arg)
200 {
201 	volatile uintptr_t *keep_spinning = arg;
202 	while (*keep_spinning) {
203 		;
204 	}
205 	return NULL;
206 }
207 
208 struct wait_start {
209 	semaphore_t ws_wait;
210 	semaphore_t ws_start;
211 };
212 
213 static void *
wait_role(void * arg)214 wait_role(void *arg)
215 {
216 	struct wait_start *ws = arg;
217 	semaphore_wait_signal(ws->ws_wait, ws->ws_start);
218 	return NULL;
219 }
220 
221 struct scene *
scene_start(unsigned int n,role_t * roles)222 scene_start(unsigned int n, role_t *roles)
223 {
224 	if (n == 0) {
225 		return NULL;
226 	}
227 
228 	T_SETUPBEGIN;
229 
230 	size_t scene_size = sizeof(struct scene) + (n + 1) * sizeof(struct actor);
231 	struct scene *scene = malloc(scene_size);
232 	T_QUIET; T_WITH_ERRNO;
233 	T_ASSERT_NOTNULL(scene, "scene = malloc(%zu)", scene_size);
234 
235 	bzero(scene, scene_size);
236 	unsigned int role_i = 0;
237 	unsigned int wait_count = 0;
238 	for (unsigned int i = 0; i < n; i++) {
239 		role_t role = roles[role_i];
240 		if (role == ROLE_NONE) {
241 			role_i = 0;
242 			role = roles[role_i];
243 		}
244 		if (role == ROLE_WAIT) {
245 			wait_count++;
246 		}
247 		scene->scn_actors[i].act_role = role;
248 		role_i++;
249 	}
250 
251 	struct wait_start ws = { 0 };
252 	kern_return_t kr = semaphore_create(mach_task_self(), &ws.ws_wait,
253 			SYNC_POLICY_FIFO, 0);
254 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_create(... wait ...)");
255 	kr = semaphore_create(mach_task_self(), &ws.ws_start,
256 			SYNC_POLICY_FIFO, 0);
257 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_create(... start ...)");
258 
259 	for (unsigned int i = 0; i < n; i++) {
260 		struct actor *act = &scene->scn_actors[i];
261 		void *(*action)(void *) = NULL;
262 		void *sync = NULL;
263 		switch (act->act_role) {
264 		case ROLE_SPIN:
265 			sync = &scene->scn_spin_sync;
266 			action = spin_role;
267 			break;
268 		case ROLE_WAIT:
269 			sync = &ws;
270 			action = wait_role;
271 			break;
272 		default:
273 			T_ASSERT_FAIL("unexpected role: %d", act->act_role);
274 		}
275 		int error = pthread_create(&act->act_thread, NULL, action, sync);
276 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_create");
277 	}
278 
279 	T_SETUPEND;
280 	for (unsigned int i = 0; i < wait_count; i++) {
281 		semaphore_wait(ws.ws_start);
282 	}
283 	semaphore_destroy(mach_task_self(), ws.ws_start);
284 	scene->scn_wait_sync = (void *)(uintptr_t)ws.ws_wait;
285 	return scene;
286 }
287 
288 void
scene_end(struct scene * scene)289 scene_end(struct scene *scene)
290 {
291 	if (!scene) {
292 		return;
293 	}
294 
295 	scene->scn_spin_sync = 0;
296 	semaphore_signal_all((semaphore_t)scene->scn_wait_sync);
297 	semaphore_destroy(mach_task_self(), (semaphore_t)scene->scn_wait_sync);
298 	struct actor *act = scene->scn_actors;
299 	while (act->act_role != ROLE_NONE) {
300 		int error = pthread_join(act->act_thread, NULL);
301 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_join");
302 		act++;
303 	}
304 	free(scene);
305 }
306 
307 pid_t
launch_helper(char * name)308 launch_helper(char *name)
309 {
310 	char bin_path[MAXPATHLEN];
311 	uint32_t path_size = sizeof(bin_path);
312 
313 	T_SETUPBEGIN;
314 	int ret = _NSGetExecutablePath(bin_path, &path_size);
315 	T_QUIET;
316 	T_ASSERT_EQ(ret, 0, "_NSGetExecutablePath()");
317 	pid_t pid = 0;
318 	ret = dt_launch_tool(&pid, (char *[]){ bin_path, name, NULL}, false, NULL,
319 	    NULL);
320 	T_QUIET;
321 	T_ASSERT_POSIX_SUCCESS(ret, "dt_launch_tool(... %s, %s ...)", bin_path,
322 	    name);
323 	T_SETUPEND;
324 
325 	return pid;
326 }
327