1 // Copyright (c) 2023 Apple Inc. All rights reserved.
2
3 #include <stdlib.h>
4 #include <stdio.h>
5
6 #include <darwintest.h>
7 #include <darwintest_utils.h>
8
9 #include "sched_runqueue_harness.h"
10 #include "sched_harness_impl.h"
11
12 FILE *_log = NULL;
13
14 static test_hw_topology_t current_hw_topology = {0};
15 static const int default_cpu = 0;
16
17 /* Mocking mach_absolute_time() */
18
19 static mach_timebase_info_data_t _timebase_info;
20 static uint64_t _curr_time = 0;
21
22 uint64_t
mock_absolute_time(void)23 mock_absolute_time(void)
24 {
25 return _curr_time;
26 }
27
28 void
set_mock_time(uint64_t timestamp)29 set_mock_time(uint64_t timestamp)
30 {
31 fprintf(_log, "\tnew mock time: %llu (%lluus)\n", timestamp,
32 timestamp * _timebase_info.numer / _timebase_info.denom / NSEC_PER_USEC);
33 _curr_time = timestamp;
34 }
35
36 void
increment_mock_time(uint64_t added_time)37 increment_mock_time(uint64_t added_time)
38 {
39 set_mock_time(_curr_time + added_time);
40 }
41
42 void
increment_mock_time_us(uint64_t us)43 increment_mock_time_us(uint64_t us)
44 {
45 fprintf(_log, "\tadding mock microseconds: %lluus\n", us);
46 increment_mock_time((us * NSEC_PER_USEC) * _timebase_info.denom / _timebase_info.numer);
47 }
48
49 /* Test harness utilities */
50
51 static void
cleanup_runqueue_harness(void)52 cleanup_runqueue_harness(void)
53 {
54 fclose(_log);
55 }
56
57 void
set_hw_topology(test_hw_topology_t hw_topology)58 set_hw_topology(test_hw_topology_t hw_topology)
59 {
60 current_hw_topology = hw_topology;
61 }
62
63 test_hw_topology_t
get_hw_topology(void)64 get_hw_topology(void)
65 {
66 return current_hw_topology;
67 }
68
69 int
pset_id_to_cpu_id(int pset_id)70 pset_id_to_cpu_id(int pset_id)
71 {
72 test_hw_topology_t topo = get_hw_topology();
73 int cpu_index = 0;
74 for (int p = 0; p < topo.num_psets; p++) {
75 if (p == pset_id) {
76 return cpu_index;
77 }
78 cpu_index += topo.psets[p].num_cpus;
79 }
80 T_QUIET; T_ASSERT_FAIL("pset id %d never found out of %d psets", pset_id, topo.num_psets);
81 }
82
83 int
cpu_id_to_pset_id(int cpu_id)84 cpu_id_to_pset_id(int cpu_id)
85 {
86 test_hw_topology_t topo = get_hw_topology();
87 T_QUIET; T_ASSERT_LT(cpu_id, topo.total_cpus, "cpu id out of bounds");
88 int cpu_count = 0;
89 for (int p = 0; p < topo.num_psets; p++) {
90 cpu_count += topo.psets[p].num_cpus;
91 if (cpu_id < cpu_count) {
92 return p;
93 }
94 }
95 T_QUIET; T_ASSERT_FAIL("failed to find pset for cpu %d somehow", cpu_id);
96 }
97
98 static char _log_filepath[MAXPATHLEN];
99 static bool auto_current_thread_disabled = false;
100
101 void
init_harness_logging(char * test_name)102 init_harness_logging(char *test_name)
103 {
104 kern_return_t kr;
105 kr = mach_timebase_info(&_timebase_info);
106 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_timebase_info");
107 auto_current_thread_disabled = false;
108
109 /* Set up debugging log of harness events */
110 strcpy(_log_filepath, test_name);
111 strcat(_log_filepath, "_test_log.txt");
112 dt_resultfile(_log_filepath, sizeof(_log_filepath));
113 _log = fopen(_log_filepath, "w+");
114 T_QUIET; T_WITH_ERRNO; T_ASSERT_NE(_log, NULL, "fopen");
115 T_LOG("For debugging, see log of harness events in \"%s\"", _log_filepath);
116
117 T_ATEND(cleanup_runqueue_harness);
118 }
119
120 void
init_runqueue_harness(void)121 init_runqueue_harness(void)
122 {
123 init_harness_logging(T_NAME);
124 set_hw_topology(single_core);
125 impl_init_runqueue();
126 }
127
128 struct thread_group *
create_tg(int interactivity_score)129 create_tg(int interactivity_score)
130 {
131 struct thread_group *tg = impl_create_tg(interactivity_score);
132 fprintf(_log, "\tcreated TG %p w/ interactivity_score %d\n", (void *)tg, interactivity_score);
133 return tg;
134 }
135
136 test_thread_t
create_thread(int th_sched_bucket,struct thread_group * tg,int pri)137 create_thread(int th_sched_bucket, struct thread_group *tg, int pri)
138 {
139 test_thread_t thread = impl_create_thread(th_sched_bucket, tg, pri);
140 fprintf(_log, "\tcreated thread %p w/ bucket %d, tg %p, pri %d\n",
141 (void *)thread, th_sched_bucket, (void *)tg, pri);
142 return thread;
143 }
144
145 void
set_thread_sched_mode(test_thread_t thread,int mode)146 set_thread_sched_mode(test_thread_t thread, int mode)
147 {
148 fprintf(_log, "\tset thread %p sched_mode to %d\n", (void *)thread, mode);
149 impl_set_thread_sched_mode(thread, mode);
150 }
151
152 void
set_thread_processor_bound(test_thread_t thread,int cpu_id)153 set_thread_processor_bound(test_thread_t thread, int cpu_id)
154 {
155 fprintf(_log, "\tset thread %p processor-bound to cpu %d\n", (void *)thread, cpu_id);
156 impl_set_thread_processor_bound(thread, cpu_id);
157 }
158
159 void
cpu_set_thread_current(int cpu_id,test_thread_t thread)160 cpu_set_thread_current(int cpu_id, test_thread_t thread)
161 {
162 impl_cpu_set_thread_current(cpu_id, thread);
163 fprintf(_log, "\tset %p as current thread on cpu %d\n", thread, cpu_id);
164 }
165
166 test_thread_t
cpu_clear_thread_current(int cpu_id)167 cpu_clear_thread_current(int cpu_id)
168 {
169 fprintf(_log, "\tclearing the current thread from cpu %d\n", cpu_id);
170 return impl_cpu_clear_thread_current(cpu_id);
171 }
172
173 bool
runqueue_empty(test_runq_target_t runq_target)174 runqueue_empty(test_runq_target_t runq_target)
175 {
176 return dequeue_thread_expect(runq_target, NULL);
177 }
178
179 static int
runq_target_to_cpu_id(test_runq_target_t runq_target)180 runq_target_to_cpu_id(test_runq_target_t runq_target)
181 {
182 switch (runq_target.target_type) {
183 case TEST_RUNQ_TARGET_TYPE_CPU:
184 return runq_target.target_id;
185 case TEST_RUNQ_TARGET_TYPE_PSET:
186 return pset_id_to_cpu_id(runq_target.target_id);
187 default:
188 T_ASSERT_FAIL("unexpected type %d", runq_target.target_type);
189 }
190 }
191
192 int
get_default_cpu(void)193 get_default_cpu(void)
194 {
195 return default_cpu;
196 }
197
198 test_runq_target_t default_target = {
199 .target_type = TEST_RUNQ_TARGET_TYPE_CPU,
200 .target_id = default_cpu,
201 };
202
203 static void
cpu_enqueue_thread(int cpu_id,test_thread_t thread)204 cpu_enqueue_thread(int cpu_id, test_thread_t thread)
205 {
206 fprintf(_log, "\tenqueued %p to cpu %d\n", (void *)thread, cpu_id);
207 impl_cpu_enqueue_thread(cpu_id, thread);
208 }
209
210 test_runq_target_t
pset_target(int pset_id)211 pset_target(int pset_id)
212 {
213 test_runq_target_t target = {
214 .target_type = TEST_RUNQ_TARGET_TYPE_PSET,
215 .target_id = pset_id,
216 };
217 return target;
218 }
219
220 test_runq_target_t
cpu_target(int cpu_id)221 cpu_target(int cpu_id)
222 {
223 test_runq_target_t target = {
224 .target_type = TEST_RUNQ_TARGET_TYPE_CPU,
225 .target_id = cpu_id,
226 };
227 return target;
228 }
229
230 void
enqueue_thread(test_runq_target_t runq_target,test_thread_t thread)231 enqueue_thread(test_runq_target_t runq_target, test_thread_t thread)
232 {
233 int cpu_id = runq_target_to_cpu_id(runq_target);
234 cpu_enqueue_thread(cpu_id, thread);
235 }
236
237 void
enqueue_threads(test_runq_target_t runq_target,int num_threads,...)238 enqueue_threads(test_runq_target_t runq_target, int num_threads, ...)
239 {
240 va_list args;
241 va_start(args, num_threads);
242 for (int i = 0; i < num_threads; i++) {
243 test_thread_t thread = va_arg(args, test_thread_t);
244 enqueue_thread(runq_target, thread);
245 }
246 va_end(args);
247 }
248
249 void
enqueue_threads_arr(test_runq_target_t runq_target,int num_threads,test_thread_t * threads)250 enqueue_threads_arr(test_runq_target_t runq_target, int num_threads, test_thread_t *threads)
251 {
252 for (int i = 0; i < num_threads; i++) {
253 enqueue_thread(runq_target, threads[i]);
254 }
255 }
256
257 void
enqueue_threads_rand_order(test_runq_target_t runq_target,unsigned int random_seed,int num_threads,...)258 enqueue_threads_rand_order(test_runq_target_t runq_target, unsigned int random_seed, int num_threads, ...)
259 {
260 va_list args;
261 va_start(args, num_threads);
262 test_thread_t *tmp = (test_thread_t *)calloc(num_threads, sizeof(test_thread_t));
263 for (int i = 0; i < num_threads; i++) {
264 test_thread_t thread = va_arg(args, test_thread_t);
265 tmp[i] = thread;
266 }
267 enqueue_threads_arr_rand_order(runq_target, random_seed, num_threads, tmp);
268 free(tmp);
269 va_end(args);
270 }
271
272 void
enqueue_threads_arr_rand_order(test_runq_target_t runq_target,unsigned int random_seed,int num_threads,test_thread_t * threads)273 enqueue_threads_arr_rand_order(test_runq_target_t runq_target, unsigned int random_seed, int num_threads, test_thread_t *threads)
274 {
275 test_thread_t scratch_space[num_threads];
276 for (int i = 0; i < num_threads; i++) {
277 scratch_space[i] = threads[i];
278 }
279 srand(random_seed);
280 for (int i = 0; i < num_threads; i++) {
281 int rand_ind = (rand() % (num_threads - i)) + i;
282 test_thread_t tmp = scratch_space[i];
283 scratch_space[i] = scratch_space[rand_ind];
284 scratch_space[rand_ind] = tmp;
285 }
286 enqueue_threads_arr(runq_target, num_threads, scratch_space);
287 }
288
289 bool
dequeue_thread_expect(test_runq_target_t runq_target,test_thread_t expected_thread)290 dequeue_thread_expect(test_runq_target_t runq_target, test_thread_t expected_thread)
291 {
292 int cpu_id = runq_target_to_cpu_id(runq_target);
293 test_thread_t chosen_thread = impl_cpu_dequeue_thread(cpu_id);
294 fprintf(_log, "%s: dequeued %p from cpu %d, expecting %p\n", chosen_thread == expected_thread ?
295 "PASS" : "FAIL", (void *)chosen_thread, cpu_id, (void *)expected_thread);
296 if (chosen_thread != expected_thread) {
297 return false;
298 }
299 if (expected_thread != NULL && auto_current_thread_disabled == false && impl_get_thread_is_realtime(chosen_thread) == false) {
300 /*
301 * Additionally verify that chosen_thread still gets returned as the highest
302 * thread, even when compared against the remaining runqueue as the currently
303 * running thread.
304 */
305 cpu_set_thread_current(cpu_id, expected_thread);
306 bool pass = cpu_dequeue_thread_expect_compare_current(cpu_id, expected_thread);
307 if (pass) {
308 pass = cpu_check_preempt_current(cpu_id, false);
309 }
310 impl_cpu_clear_thread_current(cpu_id);
311 fprintf(_log, "\tcleared current thread\n");
312 return pass;
313 }
314 return true;
315 }
316
317 int
dequeue_threads_expect_ordered(test_runq_target_t runq_target,int num_threads,...)318 dequeue_threads_expect_ordered(test_runq_target_t runq_target, int num_threads, ...)
319 {
320 va_list args;
321 va_start(args, num_threads);
322 int first_bad_index = -1;
323 for (int i = 0; i < num_threads; i++) {
324 test_thread_t thread = va_arg(args, test_thread_t);
325 bool result = dequeue_thread_expect(runq_target, thread);
326 if ((result == false) && (first_bad_index == -1)) {
327 first_bad_index = i;
328 /* Instead of early-returning, keep dequeueing threads so we can log the information */
329 }
330 }
331 va_end(args);
332 return first_bad_index;
333 }
334
335 int
dequeue_threads_expect_ordered_arr(test_runq_target_t runq_target,int num_threads,test_thread_t * threads)336 dequeue_threads_expect_ordered_arr(test_runq_target_t runq_target, int num_threads, test_thread_t *threads)
337 {
338 int first_bad_index = -1;
339 for (int i = 0; i < num_threads; i++) {
340 bool result = dequeue_thread_expect(runq_target, threads[i]);
341 if ((result == false) && (first_bad_index == -1)) {
342 first_bad_index = i;
343 /* Instead of early-returning, keep dequeueing threads so we can log the information */
344 }
345 }
346 return first_bad_index;
347 }
348
349 bool
cpu_dequeue_thread_expect_compare_current(int cpu_id,test_thread_t expected_thread)350 cpu_dequeue_thread_expect_compare_current(int cpu_id, test_thread_t expected_thread)
351 {
352 test_thread_t chosen_thread = impl_cpu_dequeue_thread_compare_current(cpu_id);
353 fprintf(_log, "%s: dequeued %p from cpu %d, expecting current %p\n", chosen_thread == expected_thread ?
354 "PASS" : "FAIL", (void *)chosen_thread, cpu_id, (void *)expected_thread);
355 return chosen_thread == expected_thread;
356 }
357
358 bool
cpu_check_preempt_current(int cpu_id,bool preemption_expected)359 cpu_check_preempt_current(int cpu_id, bool preemption_expected)
360 {
361 bool preempting = impl_processor_csw_check(cpu_id);
362 fprintf(_log, "%s: would preempt on cpu %d? %d, expecting to preempt? %d\n", preempting == preemption_expected ?
363 "PASS" : "FAIL", cpu_id, preempting, preemption_expected);
364 return preempting == preemption_expected;
365 }
366
367 bool
tracepoint_expect(uint64_t trace_code,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4)368 tracepoint_expect(uint64_t trace_code, uint64_t arg1, uint64_t arg2, uint64_t arg3, uint64_t arg4)
369 {
370 uint64_t popped_trace_code, popped_arg1, popped_arg2, popped_arg3, popped_arg4;
371 impl_pop_tracepoint(&popped_trace_code, &popped_arg1, &popped_arg2, &popped_arg3, &popped_arg4);
372 bool pass = (trace_code == popped_trace_code) && (arg1 == popped_arg1) &&
373 (arg2 == popped_arg2) && (arg3 == popped_arg3) && (arg4 == popped_arg4);
374 fprintf(_log, "%s: expected code %llx arg1 %llx arg2 %llx arg3 %llx arg4 %llx\n", pass ? "PASS" : "FAIL",
375 trace_code, arg1, arg2, arg3, arg4);
376 if (pass == false) {
377 fprintf(_log, "\tfound code %llx arg1 %llx arg2 %llx arg3 %llx arg4 %llx\n",
378 popped_trace_code, popped_arg1, popped_arg2, popped_arg3, popped_arg4);
379 }
380 return pass;
381 }
382
383 void
disable_auto_current_thread(void)384 disable_auto_current_thread(void)
385 {
386 auto_current_thread_disabled = true;
387 }
388
389 void
reenable_auto_current_thread(void)390 reenable_auto_current_thread(void)
391 {
392 auto_current_thread_disabled = false;
393 }
394
395 #pragma mark - Realtime
396
397 void
set_thread_realtime(test_thread_t thread,uint32_t period,uint32_t computation,uint32_t constraint,bool preemptible,uint8_t priority_offset,uint64_t deadline)398 set_thread_realtime(test_thread_t thread, uint32_t period, uint32_t computation, uint32_t constraint, bool preemptible, uint8_t priority_offset, uint64_t deadline)
399 {
400 fprintf(_log, "\tsetting realtime deadline on thread %p: period=0x%x, computation=0x%x, constraint=0x%x,"
401 " preemptible=%s, priority_offset=%x, deadline=%llx\n", (void *) thread, period, computation, constraint,
402 preemptible ? "true" : "false", priority_offset, deadline);
403 impl_set_thread_realtime(thread, period, computation, constraint, preemptible, priority_offset, deadline);
404 }
405