xref: /xnu-11417.121.6/tests/sched/sched_test_harness/sched_runqueue_harness.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 // Copyright (c) 2023 Apple Inc.  All rights reserved.
2 
3 #include <stdlib.h>
4 #include <stdio.h>
5 
6 #include <darwintest.h>
7 #include <darwintest_utils.h>
8 
9 #include "sched_runqueue_harness.h"
10 #include "sched_harness_impl.h"
11 
12 FILE *_log = NULL;
13 
14 static test_hw_topology_t current_hw_topology = {0};
15 static const int default_cpu = 0;
16 
17 /* Mocking mach_absolute_time() */
18 
19 static mach_timebase_info_data_t _timebase_info;
20 static uint64_t _curr_time = 0;
21 
22 uint64_t
mock_absolute_time(void)23 mock_absolute_time(void)
24 {
25 	return _curr_time;
26 }
27 
28 void
set_mock_time(uint64_t timestamp)29 set_mock_time(uint64_t timestamp)
30 {
31 	fprintf(_log, "\tnew mock time: %llu (%lluus)\n", timestamp,
32 	    timestamp * _timebase_info.numer / _timebase_info.denom / NSEC_PER_USEC);
33 	_curr_time = timestamp;
34 }
35 
36 void
increment_mock_time(uint64_t added_time)37 increment_mock_time(uint64_t added_time)
38 {
39 	set_mock_time(_curr_time + added_time);
40 }
41 
42 void
increment_mock_time_us(uint64_t us)43 increment_mock_time_us(uint64_t us)
44 {
45 	fprintf(_log, "\tadding mock microseconds: %lluus\n", us);
46 	increment_mock_time((us * NSEC_PER_USEC) * _timebase_info.denom / _timebase_info.numer);
47 }
48 
49 /* Test harness utilities */
50 
51 static void
cleanup_runqueue_harness(void)52 cleanup_runqueue_harness(void)
53 {
54 	fclose(_log);
55 }
56 
57 void
set_hw_topology(test_hw_topology_t hw_topology)58 set_hw_topology(test_hw_topology_t hw_topology)
59 {
60 	current_hw_topology = hw_topology;
61 }
62 
63 test_hw_topology_t
get_hw_topology(void)64 get_hw_topology(void)
65 {
66 	return current_hw_topology;
67 }
68 
69 int
cpu_id_to_cluster_id(int cpu_id)70 cpu_id_to_cluster_id(int cpu_id)
71 {
72 	test_hw_topology_t topo = get_hw_topology();
73 	int cpu_index = 0;
74 	for (int p = 0; p < topo.num_psets; p++) {
75 		for (int c = 0; c < topo.psets[p].num_cpus; c++) {
76 			if (cpu_index == cpu_id) {
77 				return (int)p;
78 			}
79 			cpu_index++;
80 		}
81 	}
82 	T_QUIET; T_ASSERT_FAIL("cpu id %d never found out of %d cpus", cpu_id, cpu_index);
83 }
84 
85 int
cluster_id_to_cpu_id(int cluster_id)86 cluster_id_to_cpu_id(int cluster_id)
87 {
88 	test_hw_topology_t topo = get_hw_topology();
89 	int cpu_index = 0;
90 	for (int p = 0; p < topo.num_psets; p++) {
91 		if (p == cluster_id) {
92 			return cpu_index;
93 		}
94 		cpu_index += topo.psets[p].num_cpus;
95 	}
96 	T_QUIET; T_ASSERT_FAIL("pset id %d never found out of %d psets", cluster_id, topo.num_psets);
97 }
98 
99 static char _log_filepath[MAXPATHLEN];
100 static bool auto_current_thread_disabled = false;
101 
102 void
init_harness_logging(char * test_name)103 init_harness_logging(char *test_name)
104 {
105 	kern_return_t kr;
106 	kr = mach_timebase_info(&_timebase_info);
107 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_timebase_info");
108 	auto_current_thread_disabled = false;
109 
110 	/* Set up debugging log of harness events */
111 	strcpy(_log_filepath, test_name);
112 	strcat(_log_filepath, "_test_log.txt");
113 	dt_resultfile(_log_filepath, sizeof(_log_filepath));
114 	_log = fopen(_log_filepath, "w+");
115 	T_QUIET; T_WITH_ERRNO; T_ASSERT_NE(_log, NULL, "fopen");
116 	T_LOG("For debugging, see log of harness events in \"%s\"", _log_filepath);
117 
118 	T_ATEND(cleanup_runqueue_harness);
119 }
120 
121 void
init_runqueue_harness(void)122 init_runqueue_harness(void)
123 {
124 	init_harness_logging(T_NAME);
125 	set_hw_topology(single_core);
126 	impl_init_runqueue();
127 }
128 
129 struct thread_group *
create_tg(int interactivity_score)130 create_tg(int interactivity_score)
131 {
132 	struct thread_group *tg = impl_create_tg(interactivity_score);
133 	fprintf(_log, "\tcreated TG %p w/ interactivity_score %d\n", (void *)tg, interactivity_score);
134 	return tg;
135 }
136 
137 test_thread_t
create_thread(int th_sched_bucket,struct thread_group * tg,int pri)138 create_thread(int th_sched_bucket, struct thread_group *tg, int pri)
139 {
140 	test_thread_t thread = impl_create_thread(th_sched_bucket, tg, pri);
141 	fprintf(_log, "\tcreated thread %p w/ bucket %d, tg %p, pri %d\n",
142 	    (void *)thread, th_sched_bucket, (void *)tg, pri);
143 	return thread;
144 }
145 
146 void
set_thread_sched_mode(test_thread_t thread,int mode)147 set_thread_sched_mode(test_thread_t thread, int mode)
148 {
149 	fprintf(_log, "\tset thread %p sched_mode to %d\n", (void *)thread, mode);
150 	impl_set_thread_sched_mode(thread, mode);
151 }
152 
153 void
set_thread_processor_bound(test_thread_t thread,int cpu_id)154 set_thread_processor_bound(test_thread_t thread, int cpu_id)
155 {
156 	fprintf(_log, "\tset thread %p processor-bound to cpu %d\n", (void *)thread, cpu_id);
157 	impl_set_thread_processor_bound(thread, cpu_id);
158 }
159 
160 void
cpu_set_thread_current(int cpu_id,test_thread_t thread)161 cpu_set_thread_current(int cpu_id, test_thread_t thread)
162 {
163 	impl_cpu_set_thread_current(cpu_id, thread);
164 	fprintf(_log, "\tset %p as current thread on cpu %d\n", thread, cpu_id);
165 }
166 
167 bool
runqueue_empty(test_runq_target_t runq_target)168 runqueue_empty(test_runq_target_t runq_target)
169 {
170 	return dequeue_thread_expect(runq_target, NULL);
171 }
172 
173 static int
runq_target_to_cpu_id(test_runq_target_t runq_target)174 runq_target_to_cpu_id(test_runq_target_t runq_target)
175 {
176 	switch (runq_target.target_type) {
177 	case TEST_RUNQ_TARGET_TYPE_CPU:
178 		return runq_target.target_id;
179 	case TEST_RUNQ_TARGET_TYPE_CLUSTER:
180 		return cluster_id_to_cpu_id(runq_target.target_id);
181 	default:
182 		T_ASSERT_FAIL("unexpected type %d", runq_target.target_type);
183 	}
184 }
185 
186 int
get_default_cpu(void)187 get_default_cpu(void)
188 {
189 	return default_cpu;
190 }
191 
192 test_runq_target_t default_target = {
193 	.target_type = TEST_RUNQ_TARGET_TYPE_CPU,
194 	.target_id = default_cpu,
195 };
196 
197 static void
cpu_enqueue_thread(int cpu_id,test_thread_t thread)198 cpu_enqueue_thread(int cpu_id, test_thread_t thread)
199 {
200 	fprintf(_log, "\tenqueued %p to cpu %d\n", (void *)thread, cpu_id);
201 	impl_cpu_enqueue_thread(cpu_id, thread);
202 }
203 
204 test_runq_target_t
cluster_target(int cluster_id)205 cluster_target(int cluster_id)
206 {
207 	test_runq_target_t target = {
208 		.target_type = TEST_RUNQ_TARGET_TYPE_CLUSTER,
209 		.target_id = cluster_id,
210 	};
211 	return target;
212 }
213 
214 test_runq_target_t
cpu_target(int cpu_id)215 cpu_target(int cpu_id)
216 {
217 	test_runq_target_t target = {
218 		.target_type = TEST_RUNQ_TARGET_TYPE_CPU,
219 		.target_id = cpu_id,
220 	};
221 	return target;
222 }
223 
224 void
enqueue_thread(test_runq_target_t runq_target,test_thread_t thread)225 enqueue_thread(test_runq_target_t runq_target, test_thread_t thread)
226 {
227 	int cpu_id = runq_target_to_cpu_id(runq_target);
228 	cpu_enqueue_thread(cpu_id, thread);
229 }
230 
231 void
enqueue_threads(test_runq_target_t runq_target,int num_threads,...)232 enqueue_threads(test_runq_target_t runq_target, int num_threads, ...)
233 {
234 	va_list args;
235 	va_start(args, num_threads);
236 	for (int i = 0; i < num_threads; i++) {
237 		test_thread_t thread = va_arg(args, test_thread_t);
238 		enqueue_thread(runq_target, thread);
239 	}
240 	va_end(args);
241 }
242 
243 void
enqueue_threads_arr(test_runq_target_t runq_target,int num_threads,test_thread_t * threads)244 enqueue_threads_arr(test_runq_target_t runq_target, int num_threads, test_thread_t *threads)
245 {
246 	for (int i = 0; i < num_threads; i++) {
247 		enqueue_thread(runq_target, threads[i]);
248 	}
249 }
250 
251 void
enqueue_threads_rand_order(test_runq_target_t runq_target,unsigned int random_seed,int num_threads,...)252 enqueue_threads_rand_order(test_runq_target_t runq_target, unsigned int random_seed, int num_threads, ...)
253 {
254 	va_list args;
255 	va_start(args, num_threads);
256 	test_thread_t *tmp = (test_thread_t *)malloc(sizeof(test_thread_t) * (size_t)num_threads);
257 	for (int i = 0; i < num_threads; i++) {
258 		test_thread_t thread = va_arg(args, test_thread_t);
259 		tmp[i] = thread;
260 	}
261 	enqueue_threads_arr_rand_order(runq_target, random_seed, num_threads, tmp);
262 	free(tmp);
263 	va_end(args);
264 }
265 
266 void
enqueue_threads_arr_rand_order(test_runq_target_t runq_target,unsigned int random_seed,int num_threads,test_thread_t * threads)267 enqueue_threads_arr_rand_order(test_runq_target_t runq_target, unsigned int random_seed, int num_threads, test_thread_t *threads)
268 {
269 	test_thread_t scratch_space[num_threads];
270 	for (int i = 0; i < num_threads; i++) {
271 		scratch_space[i] = threads[i];
272 	}
273 	srand(random_seed);
274 	for (int i = 0; i < num_threads; i++) {
275 		int rand_ind = (rand() % (num_threads - i)) + i;
276 		test_thread_t tmp = scratch_space[i];
277 		scratch_space[i] = scratch_space[rand_ind];
278 		scratch_space[rand_ind] = tmp;
279 	}
280 	enqueue_threads_arr(runq_target, num_threads, scratch_space);
281 }
282 
283 bool
dequeue_thread_expect(test_runq_target_t runq_target,test_thread_t expected_thread)284 dequeue_thread_expect(test_runq_target_t runq_target, test_thread_t expected_thread)
285 {
286 	int cpu_id = runq_target_to_cpu_id(runq_target);
287 	test_thread_t chosen_thread = impl_cpu_dequeue_thread(cpu_id);
288 	fprintf(_log, "%s: dequeued %p from cpu %d, expecting %p\n", chosen_thread == expected_thread ?
289 	    "PASS" : "FAIL", (void *)chosen_thread, cpu_id, (void *)expected_thread);
290 	if (chosen_thread != expected_thread) {
291 		return false;
292 	}
293 	if (expected_thread != NULL && auto_current_thread_disabled == false) {
294 		/*
295 		 * Additionally verify that chosen_thread still gets returned as the highest
296 		 * thread, even when compared against the remaining runqueue as the currently
297 		 * running thread
298 		 */
299 		cpu_set_thread_current(cpu_id, expected_thread);
300 		bool pass = cpu_dequeue_thread_expect_compare_current(cpu_id, expected_thread);
301 		if (pass) {
302 			pass = cpu_check_preempt_current(cpu_id, false);
303 		}
304 		impl_cpu_clear_thread_current(cpu_id);
305 		fprintf(_log, "\tcleared current thread\n");
306 		return pass;
307 	}
308 	return true;
309 }
310 
311 int
dequeue_threads_expect_ordered(test_runq_target_t runq_target,int num_threads,...)312 dequeue_threads_expect_ordered(test_runq_target_t runq_target, int num_threads, ...)
313 {
314 	va_list args;
315 	va_start(args, num_threads);
316 	int first_bad_index = -1;
317 	for (int i = 0; i < num_threads; i++) {
318 		test_thread_t thread = va_arg(args, test_thread_t);
319 		bool result = dequeue_thread_expect(runq_target, thread);
320 		if ((result == false) && (first_bad_index == -1)) {
321 			first_bad_index = i;
322 			/* Instead of early-returning, keep dequeueing threads so we can log the information */
323 		}
324 	}
325 	va_end(args);
326 	return first_bad_index;
327 }
328 
329 int
dequeue_threads_expect_ordered_arr(test_runq_target_t runq_target,int num_threads,test_thread_t * threads)330 dequeue_threads_expect_ordered_arr(test_runq_target_t runq_target, int num_threads, test_thread_t *threads)
331 {
332 	int first_bad_index = -1;
333 	for (int i = 0; i < num_threads; i++) {
334 		bool result = dequeue_thread_expect(runq_target, threads[i]);
335 		if ((result == false) && (first_bad_index == -1)) {
336 			first_bad_index = i;
337 			/* Instead of early-returning, keep dequeueing threads so we can log the information */
338 		}
339 	}
340 	return first_bad_index;
341 }
342 
343 bool
cpu_dequeue_thread_expect_compare_current(int cpu_id,test_thread_t expected_thread)344 cpu_dequeue_thread_expect_compare_current(int cpu_id, test_thread_t expected_thread)
345 {
346 	test_thread_t chosen_thread = impl_cpu_dequeue_thread_compare_current(cpu_id);
347 	fprintf(_log, "%s: dequeued %p from cpu %d, expecting current %p\n", chosen_thread == expected_thread ?
348 	    "PASS" : "FAIL", (void *)chosen_thread, cpu_id, (void *)expected_thread);
349 	return chosen_thread == expected_thread;
350 }
351 
352 bool
cpu_check_preempt_current(int cpu_id,bool preemption_expected)353 cpu_check_preempt_current(int cpu_id, bool preemption_expected)
354 {
355 	bool preempting = impl_processor_csw_check(cpu_id);
356 	fprintf(_log, "%s: would preempt on cpu %d? %d, expecting to preempt? %d\n", preempting == preemption_expected ?
357 	    "PASS" : "FAIL", cpu_id, preempting, preemption_expected);
358 	return preempting == preemption_expected;
359 }
360 
361 bool
tracepoint_expect(uint64_t trace_code,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4)362 tracepoint_expect(uint64_t trace_code, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4)
363 {
364 	uint64_t popped_trace_code, popped_arg1, popped_arg2, popped_arg3, popped_arg4;
365 	impl_pop_tracepoint(&popped_trace_code, &popped_arg1, &popped_arg2, &popped_arg3, &popped_arg4);
366 	bool pass = (trace_code == popped_trace_code) && (arg1 == popped_arg1) &&
367 	    (arg2 == popped_arg2) && (arg3 == popped_arg3) && (arg4 == popped_arg4);
368 	fprintf(_log, "%s: expected code %llx arg1 %llx arg2 %llx arg3 %llx arg4 %llx\n", pass ? "PASS" : "FAIL",
369 	    trace_code, arg1, arg2, arg3, arg4);
370 	if (pass == false) {
371 		fprintf(_log, "\tfound code %llx arg1 %llx arg2 %llx arg3 %llx arg4 %llx\n",
372 		    popped_trace_code, popped_arg1, popped_arg2, popped_arg3, popped_arg4);
373 	}
374 	return pass;
375 }
376 
377 void
disable_auto_current_thread(void)378 disable_auto_current_thread(void)
379 {
380 	auto_current_thread_disabled = true;
381 }
382 
383 void
reenable_auto_current_thread(void)384 reenable_auto_current_thread(void)
385 {
386 	auto_current_thread_disabled = false;
387 }
388