xref: /xnu-12377.81.4/tests/sched/sched_test_harness/sched_clutch_harness_impl.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 // Copyright (c) 2023 Apple Inc.  All rights reserved.
2 
3 #include <stdint.h>
4 #include <stdio.h>
5 #include "../../../bsd/sys/kdebug.h" // Want tracecodes from source without searching BSD headers
6 
7 /* Harness interface */
8 #include "sched_clutch_harness.h"
9 
10 /* Include kernel header depdencies */
11 #include "shadow_headers/misc_needed_defines.h"
12 #include <kern/sched_common.h>
13 
14 /* Header for Clutch policy code under-test */
15 #include <kern/sched_clutch.h>
16 
17 /* Include non-header dependencies */
18 #define KERNEL_DEBUG_CONSTANT_IST(a0, a1, a2, a3, a4, a5, a6) clutch_impl_log_tracepoint(a1, a2, a3, a4, a5)
19 #include "shadow_headers/misc_needed_deps.c"
20 #include "shadow_headers/sched_prim.c"
21 
22 static test_hw_topology_t curr_hw_topo = {
23 	.psets = NULL,
24 	.num_psets = 0,
25 	.total_cpus = 0,
26 };
27 static int _curr_cpu = 0;
28 
29 processor_t
current_processor(void)30 current_processor(void)
31 {
32 	if (_curr_cpu == 0) {
33 		/* Assumes boot CPU of id 0 */
34 		return master_processor;
35 	} else {
36 		return processor_array[_curr_cpu];
37 	}
38 }
39 
40 unsigned int
ml_get_cluster_count(void)41 ml_get_cluster_count(void)
42 {
43 	return (unsigned int)curr_hw_topo.num_psets;
44 }
45 
46 unsigned int
ml_get_cpu_count(void)47 ml_get_cpu_count(void)
48 {
49 	return (unsigned int)curr_hw_topo.total_cpus;
50 }
51 
52 /* Mocked-out Clutch functions */
53 static boolean_t
sched_thread_sched_pri_promoted(thread_t thread)54 sched_thread_sched_pri_promoted(thread_t thread)
55 {
56 	(void)thread;
57 	return FALSE;
58 }
59 
60 #define cpus processor_array
61 
62 /* Clutch policy code under-test, safe to include now after satisfying its dependencies */
63 #include <kern/sched_clutch.c>
64 #include <kern/sched_common.c>
65 #include <kern/processor.c>
66 
67 /* Realtime policy code under-test */
68 #include <kern/sched_rt.c>
69 
70 /* Implementation of sched_clutch_harness.h interface */
71 
72 int root_bucket_to_highest_pri[TH_BUCKET_SCHED_MAX] = {
73 	MAXPRI_USER,
74 	BASEPRI_FOREGROUND,
75 	BASEPRI_USER_INITIATED,
76 	BASEPRI_DEFAULT,
77 	BASEPRI_UTILITY,
78 	MAXPRI_THROTTLE
79 };
80 
81 int clutch_interactivity_score_max = -1;
82 uint64_t clutch_root_bucket_wcel_us[TH_BUCKET_SCHED_MAX];
83 uint64_t clutch_root_bucket_warp_us[TH_BUCKET_SCHED_MAX];
84 const unsigned int CLUTCH_THREAD_SELECT = MACH_SCHED_CLUTCH_THREAD_SELECT;
85 
86 /* Implementation of sched_runqueue_harness.h interface */
87 
88 static test_pset_t single_pset = {
89 	.cpu_type = TEST_CPU_TYPE_PERFORMANCE,
90 	.num_cpus = 1,
91 	.cluster_id = 0,
92 	.die_id = 0,
93 };
94 test_hw_topology_t single_core = {
95 	.psets = &single_pset,
96 	.num_psets = 1,
97 	.total_cpus = 1,
98 };
99 
100 char
test_cpu_type_to_char(test_cpu_type_t cpu_type)101 test_cpu_type_to_char(test_cpu_type_t cpu_type)
102 {
103 	switch (cpu_type) {
104 	case TEST_CPU_TYPE_PERFORMANCE:
105 		return 'P';
106 	case TEST_CPU_TYPE_EFFICIENCY:
107 		return 'E';
108 	default:
109 		assert(false);
110 	}
111 }
112 
113 static cluster_type_t
test_cpu_type_to_cluster_type(test_cpu_type_t cpu_type)114 test_cpu_type_to_cluster_type(test_cpu_type_t cpu_type)
115 {
116 	switch (cpu_type) {
117 	case TEST_CPU_TYPE_PERFORMANCE:
118 		return CLUSTER_TYPE_P;
119 	case TEST_CPU_TYPE_EFFICIENCY:
120 		return CLUSTER_TYPE_E;
121 	default:
122 		return CLUSTER_TYPE_SMP;
123 	}
124 }
125 
126 static uint64_t unique_tg_id = 0;
127 static uint64_t unique_thread_id = 0;
128 static bool first_boot = true;
129 
130 void
clutch_impl_init_topology(test_hw_topology_t hw_topology)131 clutch_impl_init_topology(test_hw_topology_t hw_topology)
132 {
133 	printf("��️  Mock HW Topology: %d psets {", hw_topology.num_psets);
134 	assert(first_boot); // Not supported to initialize more than one topology
135 	first_boot = false;
136 	assert(hw_topology.num_psets <= MAX_PSETS);
137 	int total_cpus = 0;
138 	for (int i = 0; i < hw_topology.num_psets; i++) {
139 		assert((total_cpus + hw_topology.psets[i].num_cpus) <= MAX_CPUS);
140 		printf(" (%d: %d %c CPUs)", i, hw_topology.psets[i].num_cpus, test_cpu_type_to_char(hw_topology.psets[i].cpu_type));
141 		cluster_type_t cluster_type = test_cpu_type_to_cluster_type(hw_topology.psets[i].cpu_type);
142 		processor_set_t pset;
143 		if (i == 0) {
144 #if __AMP__
145 			ml_topology_cluster_t boot_cluster;
146 			boot_cluster.cluster_type = cluster_type;
147 			mock_topology_info.boot_cluster = &boot_cluster;
148 #endif /* __AMP__ */
149 			processor_bootstrap();
150 			SCHED(init)();
151 			SCHED(pset_init)(sched_boot_pset);
152 			SCHED(rt_init_pset)(sched_boot_pset);
153 			SCHED(processor_init)(master_processor);
154 			pset = sched_boot_pset;
155 		} else {
156 			pset = pset_create(cluster_type, i, i);
157 		}
158 		for (int c = total_cpus; c < total_cpus + hw_topology.psets[i].num_cpus; c++) {
159 			if (c > 0) {
160 				processor_t processor = (processor_t)malloc(sizeof(struct processor));
161 				processor_init(processor, c, pset);
162 			}
163 			struct thread_group *not_real_idle_tg = create_tg(0);
164 			thread_t idle_thread = clutch_impl_create_thread(TH_BUCKET_SHARE_BG, not_real_idle_tg, IDLEPRI);
165 			idle_thread->bound_processor = cpus[c];
166 			idle_thread->state = (TH_RUN | TH_IDLE);
167 			cpus[c]->idle_thread = idle_thread;
168 			cpus[c]->active_thread = cpus[c]->idle_thread;
169 			pset_update_processor_state(pset, cpus[c], PROCESSOR_IDLE);
170 		}
171 		total_cpus += hw_topology.psets[i].num_cpus;
172 	}
173 	processor_avail_count = total_cpus;
174 	printf(" }\n");
175 	/* After mock idle thread creation, reset thread/TG start IDs, as the idle threads shouldn't count! */
176 	unique_tg_id = 0;
177 	unique_thread_id = 0;
178 	if (SCHED(cpu_init_completed) != NULL) {
179 		SCHED(cpu_init_completed)();
180 	}
181 	SCHED(rt_init_completed)();
182 }
183 
184 #define MAX_LOGGED_TRACE_CODES 10
185 #define NUM_TRACEPOINT_FIELDS 5
186 static uint64_t logged_trace_codes[MAX_LOGGED_TRACE_CODES];
187 static uint32_t logged_trace_codes_ind = 0;
188 #define MAX_LOGGED_TRACEPOINTS 10000
189 static uint64_t *logged_tracepoints[MAX_LOGGED_TRACE_CODES];
190 static uint32_t curr_tracepoint_inds[MAX_LOGGED_TRACE_CODES];
191 static uint32_t expect_tracepoint_inds[MAX_LOGGED_TRACE_CODES];
192 
193 void
clutch_impl_init_params(void)194 clutch_impl_init_params(void)
195 {
196 	/* Read out Clutch-internal fields for use by the test harness */
197 	clutch_interactivity_score_max = 2 * sched_clutch_bucket_group_interactive_pri;
198 	for (int b = TH_BUCKET_FIXPRI; b < TH_BUCKET_SCHED_MAX; b++) {
199 		clutch_root_bucket_wcel_us[b] = sched_clutch_root_bucket_wcel_us[b] == SCHED_CLUTCH_INVALID_TIME_32 ? 0 : sched_clutch_root_bucket_wcel_us[b];
200 		clutch_root_bucket_warp_us[b] = sched_clutch_root_bucket_warp_us[b] == SCHED_CLUTCH_INVALID_TIME_32 ? 0 : sched_clutch_root_bucket_warp_us[b];
201 	}
202 }
203 
204 void
clutch_impl_add_logged_trace_code(uint64_t tracepoint)205 clutch_impl_add_logged_trace_code(uint64_t tracepoint)
206 {
207 	logged_trace_codes[logged_trace_codes_ind++] = tracepoint;
208 }
209 
210 void
clutch_impl_init_tracepoints(void)211 clutch_impl_init_tracepoints(void)
212 {
213 	/* All filter-included tracepoints */
214 	clutch_impl_add_logged_trace_code(CLUTCH_THREAD_SELECT);
215 	/* Init harness-internal allocators */
216 	for (int i = 0; i < MAX_LOGGED_TRACE_CODES; i++) {
217 		logged_tracepoints[i] = malloc(MAX_LOGGED_TRACEPOINTS * 5 * sizeof(uint64_t));
218 		curr_tracepoint_inds[i] = 0;
219 		expect_tracepoint_inds[i] = 0;
220 	}
221 }
222 
223 struct thread_group *
clutch_impl_create_tg(int interactivity_score)224 clutch_impl_create_tg(int interactivity_score)
225 {
226 	struct thread_group *tg = malloc(sizeof(struct thread_group));
227 	sched_clutch_init_with_thread_group(&tg->tg_sched_clutch, tg);
228 	if (interactivity_score != INITIAL_INTERACTIVITY_SCORE) {
229 		for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
230 			tg->tg_sched_clutch.sc_clutch_groups[bucket].scbg_interactivity_data.scct_count = interactivity_score;
231 			tg->tg_sched_clutch.sc_clutch_groups[bucket].scbg_interactivity_data.scct_timestamp = mach_absolute_time();
232 		}
233 	}
234 	tg->tg_id = unique_tg_id++;
235 	return tg;
236 }
237 
238 test_thread_t
clutch_impl_create_thread(int root_bucket,struct thread_group * tg,int pri)239 clutch_impl_create_thread(int root_bucket, struct thread_group *tg, int pri)
240 {
241 	assert((sched_bucket_t)root_bucket == sched_convert_pri_to_bucket(pri) || (sched_bucket_t)root_bucket == TH_BUCKET_FIXPRI);
242 	assert(tg != NULL);
243 	thread_t thread = malloc(sizeof(struct thread));
244 	thread->base_pri = pri;
245 	thread->sched_pri = pri;
246 	thread->sched_flags = 0;
247 	thread->thread_group = tg;
248 	thread->th_sched_bucket = root_bucket;
249 	thread->bound_processor = NULL;
250 	thread->__runq.runq = PROCESSOR_NULL;
251 	queue_chain_init(thread->runq_links);
252 	thread->thread_id = unique_thread_id++;
253 #if CONFIG_SCHED_EDGE
254 	thread->th_bound_cluster_enqueued = false;
255 	for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
256 		thread->th_shared_rsrc_enqueued[shared_rsrc_type] = false;
257 		thread->th_shared_rsrc_heavy_user[shared_rsrc_type] = false;
258 		thread->th_shared_rsrc_heavy_perf_control[shared_rsrc_type] = false;
259 		thread->th_expired_quantum_on_lower_core = false;
260 		thread->th_expired_quantum_on_higher_core = false;
261 	}
262 #endif /* CONFIG_SCHED_EDGE */
263 	thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
264 	thread->reason = AST_NONE;
265 	thread->sched_mode = TH_MODE_TIMESHARE;
266 	bzero(&thread->realtime, sizeof(thread->realtime));
267 	thread->last_made_runnable_time = 0;
268 	thread->state = TH_RUN;
269 	return thread;
270 }
271 
272 void
impl_set_thread_sched_mode(test_thread_t thread,int mode)273 impl_set_thread_sched_mode(test_thread_t thread, int mode)
274 {
275 	((thread_t)thread)->sched_mode = (sched_mode_t)mode;
276 }
277 
278 bool
impl_get_thread_is_realtime(test_thread_t thread)279 impl_get_thread_is_realtime(test_thread_t thread)
280 {
281 	return ((thread_t)thread)->sched_pri >= BASEPRI_RTQUEUES;
282 }
283 
284 void
clutch_impl_set_thread_processor_bound(test_thread_t thread,int cpu_id)285 clutch_impl_set_thread_processor_bound(test_thread_t thread, int cpu_id)
286 {
287 	((thread_t)thread)->bound_processor = cpus[cpu_id];
288 }
289 
290 void
clutch_impl_cpu_set_thread_current(int cpu_id,test_thread_t thread)291 clutch_impl_cpu_set_thread_current(int cpu_id, test_thread_t thread)
292 {
293 	cpus[cpu_id]->active_thread = thread;
294 	cpus[cpu_id]->first_timeslice = TRUE;
295 	/* Equivalent logic of pset_commit_processor_to_new_thread() */
296 	pset_update_processor_state(cpus[cpu_id]->processor_set, cpus[cpu_id], PROCESSOR_RUNNING);
297 	processor_state_update_from_thread(cpus[cpu_id], thread, true);
298 	if (((thread_t) thread)->sched_pri >= BASEPRI_RTQUEUES) {
299 		bit_set(cpus[cpu_id]->processor_set->realtime_map, cpu_id);
300 		cpus[cpu_id]->deadline = ((thread_t) thread)->realtime.deadline;
301 	} else {
302 		bit_clear(cpus[cpu_id]->processor_set->realtime_map, cpu_id);
303 		cpus[cpu_id]->deadline = UINT64_MAX;
304 	}
305 }
306 
307 test_thread_t
clutch_impl_cpu_clear_thread_current(int cpu_id)308 clutch_impl_cpu_clear_thread_current(int cpu_id)
309 {
310 	test_thread_t thread = cpus[cpu_id]->active_thread;
311 	cpus[cpu_id]->active_thread = cpus[cpu_id]->idle_thread;
312 	bit_clear(cpus[cpu_id]->processor_set->realtime_map, cpu_id);
313 	pset_update_processor_state(cpus[cpu_id]->processor_set, cpus[cpu_id], PROCESSOR_IDLE);
314 	processor_state_update_idle(cpus[cpu_id]);
315 	return thread;
316 }
317 
318 static bool
is_logged_clutch_trace_code(uint64_t clutch_trace_code)319 is_logged_clutch_trace_code(uint64_t clutch_trace_code)
320 {
321 	for (int i = 0; i < logged_trace_codes_ind; i++) {
322 		if (logged_trace_codes[i] == clutch_trace_code) {
323 			return true;
324 		}
325 	}
326 	return false;
327 }
328 
329 static bool
is_logged_trace_code(uint64_t trace_code)330 is_logged_trace_code(uint64_t trace_code)
331 {
332 	if (KDBG_EXTRACT_CLASS(trace_code) == DBG_MACH && KDBG_EXTRACT_SUBCLASS(trace_code) == DBG_MACH_SCHED_CLUTCH) {
333 		if (is_logged_clutch_trace_code(KDBG_EXTRACT_CODE(trace_code))) {
334 			return true;
335 		}
336 	}
337 	return false;
338 }
339 
340 static int
trace_code_to_ind(uint64_t trace_code)341 trace_code_to_ind(uint64_t trace_code)
342 {
343 	for (int i = 0; i < logged_trace_codes_ind; i++) {
344 		if (trace_code == logged_trace_codes[i]) {
345 			return i;
346 		}
347 	}
348 	return -1;
349 }
350 
351 void
clutch_impl_log_tracepoint(uint64_t trace_code,uint64_t a1,uint64_t a2,uint64_t a3,uint64_t a4)352 clutch_impl_log_tracepoint(uint64_t trace_code, uint64_t a1, uint64_t a2, uint64_t a3, uint64_t a4)
353 {
354 	if (is_logged_trace_code(trace_code)) {
355 		int ind = trace_code_to_ind(KDBG_EXTRACT_CODE(trace_code));
356 		assert(ind >= 0);
357 		if (curr_tracepoint_inds[ind] < MAX_LOGGED_TRACEPOINTS) {
358 			logged_tracepoints[ind][curr_tracepoint_inds[ind] * NUM_TRACEPOINT_FIELDS + 0] = KDBG_EXTRACT_CODE(trace_code);
359 			logged_tracepoints[ind][curr_tracepoint_inds[ind] * NUM_TRACEPOINT_FIELDS + 1] = a1;
360 			logged_tracepoints[ind][curr_tracepoint_inds[ind] * NUM_TRACEPOINT_FIELDS + 2] = a2;
361 			logged_tracepoints[ind][curr_tracepoint_inds[ind] * NUM_TRACEPOINT_FIELDS + 3] = a3;
362 			logged_tracepoints[ind][curr_tracepoint_inds[ind] * NUM_TRACEPOINT_FIELDS + 4] = a4;
363 		} else if (curr_tracepoint_inds[ind] == MAX_LOGGED_TRACEPOINTS) {
364 			printf("Ran out of pre-allocated memory to log tracepoints (%d points)...will no longer log tracepoints\n",
365 			    MAX_LOGGED_TRACEPOINTS);
366 		}
367 		curr_tracepoint_inds[ind]++;
368 	}
369 }
370 
371 void
clutch_impl_pop_tracepoint(uint64_t clutch_trace_code,uint64_t * arg1,uint64_t * arg2,uint64_t * arg3,uint64_t * arg4)372 clutch_impl_pop_tracepoint(uint64_t clutch_trace_code, uint64_t *arg1, uint64_t *arg2, uint64_t *arg3, uint64_t *arg4)
373 {
374 	int ind = trace_code_to_ind(clutch_trace_code);
375 	if (expect_tracepoint_inds[ind] >= curr_tracepoint_inds[ind]) {
376 		/* Indicate that there isn't a matching tracepoint drop found to consume */
377 		*arg1 = -1;
378 		*arg2 = -1;
379 		*arg3 = -1;
380 		*arg4 = -1;
381 		return;
382 	}
383 	assert(logged_tracepoints[ind][expect_tracepoint_inds[ind] * NUM_TRACEPOINT_FIELDS + 0] == clutch_trace_code);
384 	*arg1 = logged_tracepoints[ind][expect_tracepoint_inds[ind] * NUM_TRACEPOINT_FIELDS + 1];
385 	*arg2 = logged_tracepoints[ind][expect_tracepoint_inds[ind] * NUM_TRACEPOINT_FIELDS + 2];
386 	*arg3 = logged_tracepoints[ind][expect_tracepoint_inds[ind] * NUM_TRACEPOINT_FIELDS + 3];
387 	*arg4 = logged_tracepoints[ind][expect_tracepoint_inds[ind] * NUM_TRACEPOINT_FIELDS + 4];
388 	expect_tracepoint_inds[ind]++;
389 }
390 
391 uint64_t
impl_get_thread_tid(test_thread_t thread)392 impl_get_thread_tid(test_thread_t thread)
393 {
394 	return ((thread_t)thread)->thread_id;
395 }
396 
397 #pragma mark - Realtime
398 
399 static test_thread_t
impl_dequeue_realtime_thread(processor_set_t pset)400 impl_dequeue_realtime_thread(processor_set_t pset)
401 {
402 	thread_t thread = rt_runq_dequeue(&pset->rt_runq);
403 	pset_update_rt_stealable_state(pset);
404 	return thread;
405 }
406 
407 void
impl_set_thread_realtime(test_thread_t thread,uint32_t period,uint32_t computation,uint32_t constraint,bool preemptible,uint8_t priority_offset,uint64_t deadline)408 impl_set_thread_realtime(test_thread_t thread, uint32_t period, uint32_t computation, uint32_t constraint, bool preemptible, uint8_t priority_offset, uint64_t deadline)
409 {
410 	thread_t t = (thread_t) thread;
411 	t->realtime.period = period;
412 	t->realtime.computation = computation;
413 	t->realtime.constraint = constraint;
414 	t->realtime.preemptible = preemptible;
415 	t->realtime.priority_offset = priority_offset;
416 	t->realtime.deadline = deadline;
417 }
418 
419 void
impl_sched_rt_spill_policy_set(unsigned policy)420 impl_sched_rt_spill_policy_set(unsigned policy)
421 {
422 	sched_rt_spill_policy = policy;
423 }
424 
425 void
impl_sched_rt_steal_policy_set(unsigned policy)426 impl_sched_rt_steal_policy_set(unsigned policy)
427 {
428 	sched_rt_steal_policy = policy;
429 }
430 
431 #pragma mark -- IPI Subsystem
432 
433 sched_ipi_type_t
sched_ipi_action(processor_t dst,thread_t thread,sched_ipi_event_t event)434 sched_ipi_action(processor_t dst, thread_t thread, sched_ipi_event_t event)
435 {
436 	/* Forward to the policy-specific implementation */
437 	return SCHED(ipi_policy)(dst, thread, (dst->active_thread == dst->idle_thread), event);
438 }
439 
440 #define MAX_LOGGED_IPIS 10000
441 typedef struct {
442 	int cpu_id;
443 	sched_ipi_type_t ipi_type;
444 } logged_ipi_t;
445 static logged_ipi_t logged_ipis[MAX_LOGGED_IPIS];
446 static uint32_t curr_ipi_ind = 0;
447 static uint32_t expect_ipi_ind = 0;
448 
449 void
sched_ipi_perform(processor_t dst,sched_ipi_type_t ipi)450 sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
451 {
452 	/* Record the IPI type and where we sent it */
453 	logged_ipis[curr_ipi_ind].cpu_id = dst->cpu_id;
454 	logged_ipis[curr_ipi_ind].ipi_type = ipi;
455 	curr_ipi_ind++;
456 }
457 
458 sched_ipi_type_t
sched_ipi_policy(processor_t dst,thread_t thread,boolean_t dst_idle,sched_ipi_event_t event)459 sched_ipi_policy(processor_t dst, thread_t thread,
460     boolean_t dst_idle, sched_ipi_event_t event)
461 {
462 	(void)dst;
463 	(void)thread;
464 	(void)dst_idle;
465 	(void)event;
466 	if (event == SCHED_IPI_EVENT_REBALANCE) {
467 		return SCHED_IPI_IMMEDIATE;
468 	}
469 	/* For now, default to deferred IPI */
470 	return SCHED_IPI_DEFERRED;
471 }
472 
473 sched_ipi_type_t
sched_ipi_deferred_policy(processor_set_t pset,processor_t dst,thread_t thread,sched_ipi_event_t event)474 sched_ipi_deferred_policy(processor_set_t pset,
475     processor_t dst, thread_t thread, sched_ipi_event_t event)
476 {
477 	(void)pset;
478 	(void)dst;
479 	(void)thread;
480 	(void)event;
481 	return SCHED_IPI_DEFERRED;
482 }
483