xref: /xnu-11215.1.10/tests/recount/recount_test_utils.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 // Copyright (c) 2021-2022 Apple Inc.  All rights reserved.
2 
3 #include <darwintest.h>
4 #include <darwintest_utils.h>
5 #include <dispatch/dispatch.h>
6 #include <mach/semaphore.h>
7 #include <mach/mach.h>
8 #include <mach/task.h>
9 #include <mach-o/dyld.h>
10 #include <stdbool.h>
11 #include <stdlib.h>
12 #include <sys/sysctl.h>
13 #include <unistd.h>
14 
15 #include "recount_test_utils.h"
16 
17 bool
has_user_system_times(void)18 has_user_system_times(void)
19 {
20 	static dispatch_once_t user_system_once;
21 	static bool precise_times = false;
22 	dispatch_once(&user_system_once, ^{
23 		int precise_times_int = 0;
24 		size_t precise_times_size = sizeof(precise_times_int);
25 		T_SETUPBEGIN;
26 		T_QUIET;
27 		T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.precise_user_kernel_time",
28 				&precise_times_int, &precise_times_size, NULL, 0),
29 				"sysctl kern.precise_user_kernel_time");
30 		T_SETUPEND;
31 		precise_times = precise_times_int != 0;
32 	});
33 	return precise_times;
34 }
35 
36 bool
has_cpi(void)37 has_cpi(void)
38 {
39 	static dispatch_once_t cpi_once;
40 	static int cpi = 0;
41 	dispatch_once(&cpi_once, ^{
42 		size_t cpi_size = sizeof(cpi);
43 		T_SETUPBEGIN;
44 		int ret = sysctlbyname("kern.monotonic.supported", &cpi, &cpi_size,
45 				NULL, 0);
46 		// ENOENT also means that CPI is unavailable.
47 		if (ret != 0 && errno != ENOENT) {
48 			T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.monotonic.supported");
49 		}
50 		T_SETUPEND;
51 	});
52 	return cpi != 0;
53 }
54 
55 bool
has_energy(void)56 has_energy(void)
57 {
58 	static dispatch_once_t energy_once;
59 	static int energy = false;
60 	dispatch_once(&energy_once, ^{
61 		size_t energy_size = sizeof(energy);
62 		T_SETUPBEGIN;
63 		T_QUIET;
64 		T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.pervasive_energy",
65 				&energy, &energy_size, NULL, 0),
66 				"sysctl kern.pervasive_energy");
67 		T_SETUPEND;
68 	});
69 	return energy != 0;
70 }
71 
72 unsigned int
perf_level_count(void)73 perf_level_count(void)
74 {
75 	static dispatch_once_t count_once;
76 	static unsigned int count = 0;
77 	dispatch_once(&count_once, ^{
78 		T_SETUPBEGIN;
79 		T_QUIET;
80 		T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.nperflevels", &count,
81 				&(size_t){ sizeof(count) }, NULL, 0),
82 				"sysctl hw.nperflevels");
83 		T_SETUPEND;
84 	});
85 	return count;
86 }
87 
88 static const char **
_perf_level_names(void)89 _perf_level_names(void)
90 {
91 	static char names[2][32] = { 0 };
92 	static dispatch_once_t names_once;
93 	dispatch_once(&names_once, ^{
94 		T_SETUPBEGIN;
95 		unsigned int count = perf_level_count();
96 		for (unsigned int i = 0; i < count; i++) {
97 			char sysctl_name[64] = { 0 };
98 			snprintf(sysctl_name, sizeof(sysctl_name), "hw.perflevel%d.name",
99 					i);
100 			T_QUIET;
101 			T_ASSERT_POSIX_SUCCESS(sysctlbyname(sysctl_name, &names[i],
102 					&(size_t){ sizeof(names[i]) }, NULL, 0),
103 					"sysctl %s", sysctl_name);
104 		}
105 		T_SETUPEND;
106 	});
107 	static const char *ret_names[] = {
108 		(char *)&names[0],
109 		(char *)&names[1],
110 	};
111 	return ret_names;
112 }
113 
114 const char *
perf_level_name(unsigned int perf_level)115 perf_level_name(unsigned int perf_level)
116 {
117 	return _perf_level_names()[perf_level];
118 }
119 
120 unsigned int
perf_level_index(const char * name)121 perf_level_index(const char *name)
122 {
123 	unsigned int count = perf_level_count();
124 	const char **names = _perf_level_names();
125 	for (unsigned int i = 0; i < count; i++) {
126 		if (strcmp(name, names[i]) == 0) {
127 			return i;
128 		}
129 	}
130 	T_ASSERT_FAIL("cannot find perf level named %s", name);
131 }
132 
133 void
bind_to_cluster(char type)134 bind_to_cluster(char type)
135 {
136 	int ret = sysctlbyname("kern.sched_thread_bind_cluster_type", NULL, NULL,
137 			&type, sizeof(type));
138 	T_QUIET;
139 	T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.sched_thread_bind_cluster_type");
140 	// Ensure the thread has seen a context switch while bound.
141 	usleep(10000);
142 }
143 
144 static void
_unbind_from_cluster(void)145 _unbind_from_cluster(void)
146 {
147 	// Best effort.
148 	(void)sysctlbyname("kern.sched_thread_bind_cluster_type", NULL, NULL, NULL,
149 			0);
150 }
151 
152 void
run_on_all_perf_levels(void)153 run_on_all_perf_levels(void)
154 {
155 	if (perf_level_count() == 1) {
156 		return;
157 	}
158 
159 	T_SETUPBEGIN;
160 	bind_to_cluster('P');
161 	bind_to_cluster('E');
162 	// Return to the kernel to synchronize timings with the scheduler.
163 	(void)getppid();
164 	_unbind_from_cluster();
165 	T_SETUPEND;
166 }
167 
168 static void
_run_on_exclaves(void)169 _run_on_exclaves(void)
170 {
171 	int64_t output = 0;
172 	size_t output_size = sizeof(output);
173 	int64_t input = 0;
174 	int ret = sysctlbyname("debug.test.exclaves_hello_exclave_test", &output,
175 			&output_size, &input, sizeof(input));
176 	T_QUIET;
177 	T_ASSERT_POSIX_SUCCESS(ret, "systcl debug.test.exclaves_hello_exclave_test");
178 }
179 
180 void
run_in_exclaves_on_all_perf_levels(void)181 run_in_exclaves_on_all_perf_levels(void)
182 {
183 	if (perf_level_count() == 1) {
184 		_run_on_exclaves();
185 	}
186 
187 	T_SETUPBEGIN;
188 	bind_to_cluster('P');
189 	_run_on_exclaves();
190 	bind_to_cluster('E');
191 	_run_on_exclaves();
192 	_unbind_from_cluster();
193 	T_SETUPEND;
194 }
195 
196 uint64_t
ns_from_mach(uint64_t mach_time)197 ns_from_mach(uint64_t mach_time)
198 {
199 	mach_timebase_info_data_t tbi = { 0 };
200 	mach_timebase_info(&tbi);
201 	return mach_time * tbi.numer / tbi.denom;
202 }
203 
204 uint64_t
ns_from_timeval(struct timeval tv)205 ns_from_timeval(struct timeval tv)
206 {
207 	return (uint64_t)tv.tv_sec * NSEC_PER_SEC + (uint64_t)tv.tv_usec * 1000;
208 }
209 
210 struct timeval
timeval_from_ns(uint64_t ns)211 timeval_from_ns(uint64_t ns)
212 {
213 	return (struct timeval){
214 		.tv_sec = ns / NSEC_PER_SEC,
215 		.tv_usec = (ns % NSEC_PER_SEC) / 1000,
216 	};
217 }
218 
219 uint64_t
ns_from_time_value(struct time_value tv)220 ns_from_time_value(struct time_value tv)
221 {
222 	return (uint64_t)tv.seconds * NSEC_PER_SEC +
223 			(uint64_t)tv.microseconds * 1000;
224 }
225 
226 struct time_value
time_value_from_ns(uint64_t ns)227 time_value_from_ns(uint64_t ns)
228 {
229 	return (struct time_value){
230 		.seconds = (integer_t)(ns / NSEC_PER_SEC),
231 		.microseconds = (ns % NSEC_PER_SEC) / 1000,
232 	};
233 }
234 
235 static void *
spin_role(void * arg)236 spin_role(void *arg)
237 {
238 	volatile uintptr_t *keep_spinning = arg;
239 	while (*keep_spinning) {
240 		;
241 	}
242 	return NULL;
243 }
244 
245 struct wait_start {
246 	semaphore_t ws_wait;
247 	semaphore_t ws_start;
248 };
249 
250 static void *
wait_role(void * arg)251 wait_role(void *arg)
252 {
253 	struct wait_start *ws = arg;
254 	semaphore_wait_signal(ws->ws_wait, ws->ws_start);
255 	return NULL;
256 }
257 
258 struct scene *
scene_start(unsigned int n,role_t * roles)259 scene_start(unsigned int n, role_t *roles)
260 {
261 	if (n == 0) {
262 		return NULL;
263 	}
264 
265 	T_SETUPBEGIN;
266 
267 	size_t scene_size = sizeof(struct scene) + (n + 1) * sizeof(struct actor);
268 	struct scene *scene = malloc(scene_size);
269 	T_QUIET; T_WITH_ERRNO;
270 	T_ASSERT_NOTNULL(scene, "scene = malloc(%zu)", scene_size);
271 
272 	bzero(scene, scene_size);
273 	unsigned int role_i = 0;
274 	unsigned int wait_count = 0;
275 	for (unsigned int i = 0; i < n; i++) {
276 		role_t role = roles[role_i];
277 		if (role == ROLE_NONE) {
278 			role_i = 0;
279 			role = roles[role_i];
280 		}
281 		if (role == ROLE_WAIT) {
282 			wait_count++;
283 		}
284 		scene->scn_actors[i].act_role = role;
285 		role_i++;
286 	}
287 
288 	struct wait_start ws = { 0 };
289 	kern_return_t kr = semaphore_create(mach_task_self(), &ws.ws_wait,
290 			SYNC_POLICY_FIFO, 0);
291 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_create(... wait ...)");
292 	kr = semaphore_create(mach_task_self(), &ws.ws_start,
293 			SYNC_POLICY_FIFO, 0);
294 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "semaphore_create(... start ...)");
295 
296 	for (unsigned int i = 0; i < n; i++) {
297 		struct actor *act = &scene->scn_actors[i];
298 		void *(*action)(void *) = NULL;
299 		void *sync = NULL;
300 		switch (act->act_role) {
301 		case ROLE_SPIN:
302 			sync = &scene->scn_spin_sync;
303 			action = spin_role;
304 			break;
305 		case ROLE_WAIT:
306 			sync = &ws;
307 			action = wait_role;
308 			break;
309 		default:
310 			T_ASSERT_FAIL("unexpected role: %d", act->act_role);
311 		}
312 		int error = pthread_create(&act->act_thread, NULL, action, sync);
313 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_create");
314 	}
315 
316 	T_SETUPEND;
317 	for (unsigned int i = 0; i < wait_count; i++) {
318 		semaphore_wait(ws.ws_start);
319 	}
320 	semaphore_destroy(mach_task_self(), ws.ws_start);
321 	scene->scn_wait_sync = (void *)(uintptr_t)ws.ws_wait;
322 	return scene;
323 }
324 
325 void
scene_end(struct scene * scene)326 scene_end(struct scene *scene)
327 {
328 	if (!scene) {
329 		return;
330 	}
331 
332 	scene->scn_spin_sync = 0;
333 	semaphore_signal_all((semaphore_t)scene->scn_wait_sync);
334 	semaphore_destroy(mach_task_self(), (semaphore_t)scene->scn_wait_sync);
335 	struct actor *act = scene->scn_actors;
336 	while (act->act_role != ROLE_NONE) {
337 		int error = pthread_join(act->act_thread, NULL);
338 		T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_join");
339 		act++;
340 	}
341 	free(scene);
342 }
343 
344 pid_t
launch_helper(char * name)345 launch_helper(char *name)
346 {
347 	char bin_path[MAXPATHLEN];
348 	uint32_t path_size = sizeof(bin_path);
349 
350 	T_SETUPBEGIN;
351 	int ret = _NSGetExecutablePath(bin_path, &path_size);
352 	T_QUIET;
353 	T_ASSERT_EQ(ret, 0, "_NSGetExecutablePath()");
354 	pid_t pid = 0;
355 	ret = dt_launch_tool(&pid, (char *[]){ bin_path, name, NULL}, false, NULL,
356 	    NULL);
357 	T_QUIET;
358 	T_ASSERT_POSIX_SUCCESS(ret, "dt_launch_tool(... %s, %s ...)", bin_path,
359 	    name);
360 	T_SETUPEND;
361 
362 	return pid;
363 }
364