xref: /xnu-12377.41.6/tests/sched/sched_test_harness/sched_clutch_harness_impl.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 // Copyright (c) 2023 Apple Inc.  All rights reserved.
2 
3 #include <stdint.h>
4 #include <stdio.h>
5 #include <sys/kdebug.h>
6 
7 /* Harness interface */
8 #include "sched_clutch_harness.h"
9 
10 /* Include kernel header depdencies */
11 #include "shadow_headers/misc_needed_defines.h"
12 
13 /* Header for Clutch policy code under-test */
14 #include <kern/sched_clutch.h>
15 
16 /* Include non-header dependencies */
17 #define KERNEL_DEBUG_CONSTANT_IST(a0, a1, a2, a3, a4, a5, a6) clutch_impl_log_tracepoint(a1, a2, a3, a4, a5)
18 #include "shadow_headers/misc_needed_deps.c"
19 #include "shadow_headers/sched_prim.c"
20 
21 static test_hw_topology_t curr_hw_topo = {
22 	.psets = NULL,
23 	.num_psets = 0,
24 	.total_cpus = 0,
25 };
26 static int _curr_cpu = 0;
27 
28 unsigned int
ml_get_cluster_count(void)29 ml_get_cluster_count(void)
30 {
31 	return (unsigned int)curr_hw_topo.num_psets;
32 }
33 
34 unsigned int
ml_get_cpu_count(void)35 ml_get_cpu_count(void)
36 {
37 	return (unsigned int)curr_hw_topo.total_cpus;
38 }
39 
40 /*
41  * Mocked HW details
42  * For simplicity, we mock a platform with 1 pset comprised of 1 CPU
43  */
44 uint32_t processor_avail_count = 0;
45 
46 static struct processor_set *psets[MAX_PSETS];
47 static struct processor *cpus[MAX_CPUS];
48 
49 /* Boot pset and CPU */
50 struct processor_set pset0;
51 struct processor cpu0;
52 
53 /* pset_nodes indexed by CPU type */
54 pset_node_t pset_node_by_cpu_type[TEST_CPU_TYPE_MAX];
55 
56 /* Mocked-out Clutch functions */
57 static boolean_t
sched_thread_sched_pri_promoted(thread_t thread)58 sched_thread_sched_pri_promoted(thread_t thread)
59 {
60 	(void)thread;
61 	return FALSE;
62 }
63 
64 /* Clutch policy code under-test, safe to include now after satisfying its dependencies */
65 #include <kern/sched_clutch.c>
66 #include <kern/sched_common.c>
67 
68 /* Realtime policy code under-test */
69 #include <kern/sched_rt.c>
70 
71 /* Implementation of sched_clutch_harness.h interface */
72 
73 int root_bucket_to_highest_pri[TH_BUCKET_SCHED_MAX] = {
74 	MAXPRI_USER,
75 	BASEPRI_FOREGROUND,
76 	BASEPRI_USER_INITIATED,
77 	BASEPRI_DEFAULT,
78 	BASEPRI_UTILITY,
79 	MAXPRI_THROTTLE
80 };
81 
82 int clutch_interactivity_score_max = -1;
83 uint64_t clutch_root_bucket_wcel_us[TH_BUCKET_SCHED_MAX];
84 uint64_t clutch_root_bucket_warp_us[TH_BUCKET_SCHED_MAX];
85 unsigned int CLUTCH_THREAD_SELECT = -1;
86 
87 /* Implementation of sched_runqueue_harness.h interface */
88 
89 static test_pset_t single_pset = {
90 	.cpu_type = TEST_CPU_TYPE_PERFORMANCE,
91 	.num_cpus = 1,
92 	.cluster_id = 0,
93 	.die_id = 0,
94 };
95 test_hw_topology_t single_core = {
96 	.psets = &single_pset,
97 	.num_psets = 1,
98 	.total_cpus = 1,
99 };
100 
101 static char
test_cpu_type_to_char(test_cpu_type_t cpu_type)102 test_cpu_type_to_char(test_cpu_type_t cpu_type)
103 {
104 	switch (cpu_type) {
105 	case TEST_CPU_TYPE_PERFORMANCE:
106 		return 'P';
107 	case TEST_CPU_TYPE_EFFICIENCY:
108 		return 'E';
109 	default:
110 		return '?';
111 	}
112 }
113 
114 static uint64_t unique_tg_id = 0;
115 static uint64_t unique_thread_id = 0;
116 
117 void
clutch_impl_init_topology(test_hw_topology_t hw_topology)118 clutch_impl_init_topology(test_hw_topology_t hw_topology)
119 {
120 	printf("��️  Mock HW Topology: %d psets {", hw_topology.num_psets);
121 	assert(hw_topology.num_psets <= MAX_PSETS);
122 
123 	/* Initialize pset nodes for each distinct CPU type. */
124 	for (int i = 0; i < hw_topology.num_psets; i++) {
125 		if (pset_node_by_cpu_type[hw_topology.psets[i].cpu_type] == PSET_NODE_NULL) {
126 			pset_node_by_cpu_type[hw_topology.psets[i].cpu_type] = (pset_node_t) malloc(sizeof(struct pset_node));
127 			pset_node_t node = pset_node_by_cpu_type[hw_topology.psets[i].cpu_type];
128 			bzero(&node->pset_map, sizeof(node->pset_map));
129 			node->psets = PROCESSOR_SET_NULL;
130 		}
131 	}
132 
133 	int total_cpus = 0;
134 	for (int i = 0; i < hw_topology.num_psets; i++) {
135 		assert((total_cpus + hw_topology.psets[i].num_cpus) <= MAX_CPUS);
136 		if (i == 0) {
137 			psets[0] = &pset0;
138 		} else {
139 			psets[i] = (struct processor_set *)malloc(sizeof(struct processor_set));
140 		}
141 		psets[i]->pset_cluster_id = i;
142 		psets[i]->pset_id = i;
143 		psets[i]->cpu_set_low = total_cpus;
144 		psets[i]->cpu_set_count = hw_topology.psets[i].num_cpus;
145 		psets[i]->cpu_bitmask = 0;
146 
147 		pset_node_t node = pset_node_by_cpu_type[hw_topology.psets[i].cpu_type];
148 		psets[i]->node = node;
149 		psets[i]->pset_list = node->psets;
150 		node->psets = psets[i];
151 		node->pset_map |= BIT(i);
152 
153 		printf(" (%d: %d %c CPUs)", i, hw_topology.psets[i].num_cpus, test_cpu_type_to_char(hw_topology.psets[i].cpu_type));
154 		for (int c = total_cpus; c < total_cpus + hw_topology.psets[i].num_cpus; c++) {
155 			if (c == 0) {
156 				cpus[0] = &cpu0;
157 			} else {
158 				cpus[c] = (struct processor *)malloc(sizeof(struct processor));
159 			}
160 			cpus[c]->cpu_id = c;
161 			cpus[c]->processor_set = psets[i];
162 			bit_set(psets[i]->cpu_bitmask, c);
163 			struct thread_group *not_real_idle_tg = create_tg(0);
164 			thread_t idle_thread = clutch_impl_create_thread(TH_BUCKET_SHARE_BG, not_real_idle_tg, IDLEPRI);
165 			idle_thread->bound_processor = cpus[c];
166 			idle_thread->state = (TH_RUN | TH_IDLE);
167 			cpus[c]->idle_thread = idle_thread;
168 			cpus[c]->active_thread = cpus[c]->idle_thread;
169 			cpus[c]->state = PROCESSOR_IDLE;
170 		}
171 		psets[i]->recommended_bitmask = psets[i]->cpu_bitmask;
172 		psets[i]->cpu_available_map = psets[i]->cpu_bitmask;
173 		bzero(&psets[i]->realtime_map, sizeof(psets[i]->realtime_map));
174 		total_cpus += hw_topology.psets[i].num_cpus;
175 	}
176 	processor_avail_count = total_cpus;
177 	printf(" }\n");
178 	/* After mock idle thread creation, reset thread/TG start IDs, as the idle threads shouldn't count! */
179 	unique_tg_id = 0;
180 	unique_thread_id = 0;
181 }
182 
183 #define NUM_LOGGED_TRACE_CODES 1
184 #define NUM_TRACEPOINT_FIELDS 5
185 static uint64_t logged_trace_codes[NUM_LOGGED_TRACE_CODES];
186 #define MAX_LOGGED_TRACEPOINTS 10000
187 static uint64_t *logged_tracepoints = NULL;
188 static uint32_t curr_tracepoint_ind = 0;
189 static uint32_t expect_tracepoint_ind = 0;
190 
191 void
clutch_impl_init_params(void)192 clutch_impl_init_params(void)
193 {
194 	/* Read out Clutch-internal fields for use by the test harness */
195 	clutch_interactivity_score_max = 2 * sched_clutch_bucket_group_interactive_pri;
196 	for (int b = TH_BUCKET_FIXPRI; b < TH_BUCKET_SCHED_MAX; b++) {
197 		clutch_root_bucket_wcel_us[b] = sched_clutch_root_bucket_wcel_us[b] == SCHED_CLUTCH_INVALID_TIME_32 ? 0 : sched_clutch_root_bucket_wcel_us[b];
198 		clutch_root_bucket_warp_us[b] = sched_clutch_root_bucket_warp_us[b] == SCHED_CLUTCH_INVALID_TIME_32 ? 0 : sched_clutch_root_bucket_warp_us[b];
199 	}
200 	CLUTCH_THREAD_SELECT = MACH_SCHED_CLUTCH_THREAD_SELECT;
201 }
202 
203 void
clutch_impl_init_tracepoints(void)204 clutch_impl_init_tracepoints(void)
205 {
206 	/* All filter-included tracepoints */
207 	logged_trace_codes[0] = MACH_SCHED_CLUTCH_THREAD_SELECT;
208 	/* Init harness-internal allocators */
209 	logged_tracepoints = malloc(MAX_LOGGED_TRACEPOINTS * 5 * sizeof(uint64_t));
210 }
211 
212 struct thread_group *
clutch_impl_create_tg(int interactivity_score)213 clutch_impl_create_tg(int interactivity_score)
214 {
215 	struct thread_group *tg = malloc(sizeof(struct thread_group));
216 	sched_clutch_init_with_thread_group(&tg->tg_sched_clutch, tg);
217 	if (interactivity_score != INITIAL_INTERACTIVITY_SCORE) {
218 		for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
219 			tg->tg_sched_clutch.sc_clutch_groups[bucket].scbg_interactivity_data.scct_count = interactivity_score;
220 			tg->tg_sched_clutch.sc_clutch_groups[bucket].scbg_interactivity_data.scct_timestamp = mach_absolute_time();
221 		}
222 	}
223 	tg->tg_id = unique_tg_id++;
224 	return tg;
225 }
226 
227 test_thread_t
clutch_impl_create_thread(int root_bucket,struct thread_group * tg,int pri)228 clutch_impl_create_thread(int root_bucket, struct thread_group *tg, int pri)
229 {
230 	assert((sched_bucket_t)root_bucket == sched_convert_pri_to_bucket(pri) || (sched_bucket_t)root_bucket == TH_BUCKET_FIXPRI);
231 	assert(tg != NULL);
232 	thread_t thread = malloc(sizeof(struct thread));
233 	thread->base_pri = pri;
234 	thread->sched_pri = pri;
235 	thread->sched_flags = 0;
236 	thread->thread_group = tg;
237 	thread->th_sched_bucket = root_bucket;
238 	thread->bound_processor = NULL;
239 	thread->__runq.runq = PROCESSOR_NULL;
240 	queue_chain_init(thread->runq_links);
241 	thread->thread_id = unique_thread_id++;
242 #if CONFIG_SCHED_EDGE
243 	thread->th_bound_cluster_enqueued = false;
244 	for (cluster_shared_rsrc_type_t shared_rsrc_type = CLUSTER_SHARED_RSRC_TYPE_MIN; shared_rsrc_type < CLUSTER_SHARED_RSRC_TYPE_COUNT; shared_rsrc_type++) {
245 		thread->th_shared_rsrc_enqueued[shared_rsrc_type] = false;
246 		thread->th_shared_rsrc_heavy_user[shared_rsrc_type] = false;
247 		thread->th_shared_rsrc_heavy_perf_control[shared_rsrc_type] = false;
248 		thread->th_expired_quantum_on_lower_core = false;
249 		thread->th_expired_quantum_on_higher_core = false;
250 	}
251 #endif /* CONFIG_SCHED_EDGE */
252 	thread->th_bound_cluster_id = THREAD_BOUND_CLUSTER_NONE;
253 	thread->reason = AST_NONE;
254 	thread->sched_mode = TH_MODE_TIMESHARE;
255 	bzero(&thread->realtime, sizeof(thread->realtime));
256 	thread->last_made_runnable_time = 0;
257 	thread->state = TH_RUN;
258 	return thread;
259 }
260 
261 void
impl_set_thread_sched_mode(test_thread_t thread,int mode)262 impl_set_thread_sched_mode(test_thread_t thread, int mode)
263 {
264 	((thread_t)thread)->sched_mode = (sched_mode_t)mode;
265 }
266 
267 bool
impl_get_thread_is_realtime(test_thread_t thread)268 impl_get_thread_is_realtime(test_thread_t thread)
269 {
270 	return ((thread_t)thread)->sched_pri >= BASEPRI_RTQUEUES;
271 }
272 
273 void
clutch_impl_set_thread_processor_bound(test_thread_t thread,int cpu_id)274 clutch_impl_set_thread_processor_bound(test_thread_t thread, int cpu_id)
275 {
276 	((thread_t)thread)->bound_processor = cpus[cpu_id];
277 }
278 
279 void
clutch_impl_cpu_set_thread_current(int cpu_id,test_thread_t thread)280 clutch_impl_cpu_set_thread_current(int cpu_id, test_thread_t thread)
281 {
282 	cpus[cpu_id]->active_thread = thread;
283 	cpus[cpu_id]->first_timeslice = TRUE;
284 	/* Equivalent logic of processor_state_update_from_thread() */
285 	cpus[cpu_id]->current_pri = ((thread_t)thread)->sched_pri;
286 	cpus[cpu_id]->current_thread_group = ((thread_t)thread)->thread_group;
287 	cpus[cpu_id]->current_is_bound = ((thread_t)thread)->bound_processor != PROCESSOR_NULL;
288 
289 	if (((thread_t) thread)->sched_pri >= BASEPRI_RTQUEUES) {
290 		bit_set(cpus[cpu_id]->processor_set->realtime_map, cpu_id);
291 		cpus[cpu_id]->deadline = ((thread_t) thread)->realtime.deadline;
292 	} else {
293 		bit_clear(cpus[cpu_id]->processor_set->realtime_map, cpu_id);
294 		cpus[cpu_id]->deadline = UINT64_MAX;
295 	}
296 }
297 
298 test_thread_t
clutch_impl_cpu_clear_thread_current(int cpu_id)299 clutch_impl_cpu_clear_thread_current(int cpu_id)
300 {
301 	test_thread_t thread = cpus[cpu_id]->active_thread;
302 	cpus[cpu_id]->active_thread = cpus[cpu_id]->idle_thread;
303 	bit_clear(cpus[cpu_id]->processor_set->realtime_map, cpu_id);
304 	return thread;
305 }
306 
307 static bool
is_logged_clutch_trace_code(uint64_t clutch_trace_code)308 is_logged_clutch_trace_code(uint64_t clutch_trace_code)
309 {
310 	for (int i = 0; i < NUM_LOGGED_TRACE_CODES; i++) {
311 		if (logged_trace_codes[i] == clutch_trace_code) {
312 			return true;
313 		}
314 	}
315 	return false;
316 }
317 
318 static bool
is_logged_trace_code(uint64_t trace_code)319 is_logged_trace_code(uint64_t trace_code)
320 {
321 	if (KDBG_EXTRACT_CLASS(trace_code) == DBG_MACH && KDBG_EXTRACT_SUBCLASS(trace_code) == DBG_MACH_SCHED_CLUTCH) {
322 		if (is_logged_clutch_trace_code(KDBG_EXTRACT_CODE(trace_code))) {
323 			return true;
324 		}
325 	}
326 	return false;
327 }
328 
329 void
clutch_impl_log_tracepoint(uint64_t trace_code,uint64_t a1,uint64_t a2,uint64_t a3,uint64_t a4)330 clutch_impl_log_tracepoint(uint64_t trace_code, uint64_t a1, uint64_t a2, uint64_t a3, uint64_t a4)
331 {
332 	if (is_logged_trace_code(trace_code)) {
333 		if (curr_tracepoint_ind < MAX_LOGGED_TRACEPOINTS) {
334 			logged_tracepoints[curr_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 0] = KDBG_EXTRACT_CODE(trace_code);
335 			logged_tracepoints[curr_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 1] = a1;
336 			logged_tracepoints[curr_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 2] = a2;
337 			logged_tracepoints[curr_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 3] = a3;
338 			logged_tracepoints[curr_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 4] = a4;
339 		} else if (curr_tracepoint_ind == MAX_LOGGED_TRACEPOINTS) {
340 			printf("Ran out of pre-allocated memory to log tracepoints (%d points)...will no longer log tracepoints\n",
341 			    MAX_LOGGED_TRACEPOINTS);
342 		}
343 		curr_tracepoint_ind++;
344 	}
345 }
346 
347 void
clutch_impl_pop_tracepoint(uint64_t * clutch_trace_code,uint64_t * arg1,uint64_t * arg2,uint64_t * arg3,uint64_t * arg4)348 clutch_impl_pop_tracepoint(uint64_t *clutch_trace_code, uint64_t *arg1, uint64_t *arg2, uint64_t *arg3, uint64_t *arg4)
349 {
350 	assert(expect_tracepoint_ind < curr_tracepoint_ind);
351 	*clutch_trace_code = logged_tracepoints[expect_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 0];
352 	*arg1 = logged_tracepoints[expect_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 1];
353 	*arg2 = logged_tracepoints[expect_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 2];
354 	*arg3 = logged_tracepoints[expect_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 3];
355 	*arg4 = logged_tracepoints[expect_tracepoint_ind * NUM_TRACEPOINT_FIELDS + 4];
356 	expect_tracepoint_ind++;
357 }
358 
359 #pragma mark - Realtime
360 
361 static test_thread_t
impl_dequeue_realtime_thread(processor_set_t pset)362 impl_dequeue_realtime_thread(processor_set_t pset)
363 {
364 	thread_t thread = rt_runq_dequeue(&pset->rt_runq);
365 	pset_update_rt_stealable_state(pset);
366 	return thread;
367 }
368 
369 void
impl_set_thread_realtime(test_thread_t thread,uint32_t period,uint32_t computation,uint32_t constraint,bool preemptible,uint8_t priority_offset,uint64_t deadline)370 impl_set_thread_realtime(test_thread_t thread, uint32_t period, uint32_t computation, uint32_t constraint, bool preemptible, uint8_t priority_offset, uint64_t deadline)
371 {
372 	thread_t t = (thread_t) thread;
373 	t->realtime.period = period;
374 	t->realtime.computation = computation;
375 	t->realtime.constraint = constraint;
376 	t->realtime.preemptible = preemptible;
377 	t->realtime.priority_offset = priority_offset;
378 	t->realtime.deadline = deadline;
379 }
380 
381 void
impl_sched_rt_spill_policy_set(unsigned policy)382 impl_sched_rt_spill_policy_set(unsigned policy)
383 {
384 	sched_rt_spill_policy = policy;
385 }
386 
387 void
impl_sched_rt_steal_policy_set(unsigned policy)388 impl_sched_rt_steal_policy_set(unsigned policy)
389 {
390 	sched_rt_steal_policy = policy;
391 }
392 
393 void
impl_sched_rt_init_completed()394 impl_sched_rt_init_completed()
395 {
396 	sched_rt_init_completed();
397 }
398 
399 #pragma mark -- IPI Subsystem
400 
401 sched_ipi_type_t
sched_ipi_action(processor_t dst,thread_t thread,sched_ipi_event_t event)402 sched_ipi_action(processor_t dst, thread_t thread, sched_ipi_event_t event)
403 {
404 	/* Forward to the policy-specific implementation */
405 	return SCHED(ipi_policy)(dst, thread, (dst->active_thread == dst->idle_thread), event);
406 }
407 
408 #define MAX_LOGGED_IPIS 10000
409 typedef struct {
410 	int cpu_id;
411 	sched_ipi_type_t ipi_type;
412 } logged_ipi_t;
413 static logged_ipi_t logged_ipis[MAX_LOGGED_IPIS];
414 static uint32_t curr_ipi_ind = 0;
415 static uint32_t expect_ipi_ind = 0;
416 
417 void
sched_ipi_perform(processor_t dst,sched_ipi_type_t ipi)418 sched_ipi_perform(processor_t dst, sched_ipi_type_t ipi)
419 {
420 	/* Record the IPI type and where we sent it */
421 	logged_ipis[curr_ipi_ind].cpu_id = dst->cpu_id;
422 	logged_ipis[curr_ipi_ind].ipi_type = ipi;
423 	curr_ipi_ind++;
424 }
425 
426 sched_ipi_type_t
sched_ipi_policy(processor_t dst,thread_t thread,boolean_t dst_idle,sched_ipi_event_t event)427 sched_ipi_policy(processor_t dst, thread_t thread,
428     boolean_t dst_idle, sched_ipi_event_t event)
429 {
430 	(void)dst;
431 	(void)thread;
432 	(void)dst_idle;
433 	(void)event;
434 	if (event == SCHED_IPI_EVENT_REBALANCE) {
435 		return SCHED_IPI_IMMEDIATE;
436 	}
437 	/* For now, default to deferred IPI */
438 	return SCHED_IPI_DEFERRED;
439 }
440 
441 sched_ipi_type_t
sched_ipi_deferred_policy(processor_set_t pset,processor_t dst,thread_t thread,sched_ipi_event_t event)442 sched_ipi_deferred_policy(processor_set_t pset,
443     processor_t dst, thread_t thread, sched_ipi_event_t event)
444 {
445 	(void)pset;
446 	(void)dst;
447 	(void)thread;
448 	(void)event;
449 	return SCHED_IPI_DEFERRED;
450 }
451