1 // Copyright (c) 2024 Apple Inc. All rights reserved.
2
3 #include <stdint.h>
4 #include <stdbool.h>
5
6 /* Edge shares some of its implementation with the Clutch scheduler */
7 #include "sched_clutch_harness_impl.c"
8
9 /* Machine-layer mocking */
10
11 unsigned int
ml_get_die_id(unsigned int cluster_id)12 ml_get_die_id(unsigned int cluster_id)
13 {
14 return curr_hw_topo.psets[cluster_id].die_id;
15 }
16
17 uint64_t
ml_cpu_signal_deferred_get_timer(void)18 ml_cpu_signal_deferred_get_timer(void)
19 {
20 /* Matching deferred_ipi_timer_ns */
21 return 64 * NSEC_PER_USEC;
22 }
23
24 static test_cpu_type_t
cluster_type_to_test_cpu_type(cluster_type_t cluster_type)25 cluster_type_to_test_cpu_type(cluster_type_t cluster_type)
26 {
27 switch (cluster_type) {
28 case CLUSTER_TYPE_E:
29 return TEST_CPU_TYPE_EFFICIENCY;
30 case CLUSTER_TYPE_P:
31 return TEST_CPU_TYPE_PERFORMANCE;
32 default:
33 assert(false);
34 }
35 }
36
37 static unsigned int cpu_count_for_type[TEST_CPU_TYPE_MAX] = { 0 };
38 static unsigned int recommended_cpu_count_for_type[TEST_CPU_TYPE_MAX] = { 0 };
39
40 unsigned int
ml_get_cpu_number_type(cluster_type_t cluster_type,bool logical,bool available)41 ml_get_cpu_number_type(cluster_type_t cluster_type, bool logical, bool available)
42 {
43 (void)logical;
44 if (available) {
45 return recommended_cpu_count_for_type[cluster_type_to_test_cpu_type(cluster_type)];
46 } else {
47 return cpu_count_for_type[cluster_type_to_test_cpu_type(cluster_type)];
48 }
49 }
50
51 static unsigned int cluster_count_for_type[TEST_CPU_TYPE_MAX] = { 0 };
52
53 unsigned int
ml_get_cluster_number_type(cluster_type_t cluster_type)54 ml_get_cluster_number_type(cluster_type_t cluster_type)
55 {
56 return cluster_count_for_type[cluster_type_to_test_cpu_type(cluster_type)];
57 }
58
59 int sched_amp_spill_deferred_ipi = 1;
60 int sched_amp_pcores_preempt_immediate_ipi = 1;
61
62 /* Implementation of sched_runqueue_harness.h interface */
63
64 static test_pset_t basic_amp_psets[2] = {
65 {
66 .cpu_type = TEST_CPU_TYPE_PERFORMANCE,
67 .num_cpus = 2,
68 .cluster_id = 0,
69 .die_id = 0,
70 },
71 {
72 .cpu_type = TEST_CPU_TYPE_EFFICIENCY,
73 .num_cpus = 4,
74 .cluster_id = 1,
75 .die_id = 0,
76 },
77 };
78 test_hw_topology_t basic_amp = {
79 .psets = &basic_amp_psets[0],
80 .num_psets = 2,
81 .total_cpus = 6,
82 };
83
84 static test_pset_t dual_die_psets[6] = {
85 {
86 .cpu_type = TEST_CPU_TYPE_EFFICIENCY,
87 .num_cpus = 2,
88 .cluster_id = 0,
89 .die_id = 0,
90 },
91 {
92 .cpu_type = TEST_CPU_TYPE_PERFORMANCE,
93 .num_cpus = 4,
94 .cluster_id = 1,
95 .die_id = 0,
96 },
97 {
98 .cpu_type = TEST_CPU_TYPE_PERFORMANCE,
99 .num_cpus = 4,
100 .cluster_id = 2,
101 .die_id = 0,
102 },
103 {
104 .cpu_type = TEST_CPU_TYPE_EFFICIENCY,
105 .num_cpus = 2,
106 .cluster_id = 3,
107 .die_id = 1,
108 },
109 {
110 .cpu_type = TEST_CPU_TYPE_PERFORMANCE,
111 .num_cpus = 4,
112 .cluster_id = 4,
113 .die_id = 1,
114 },
115 {
116 .cpu_type = TEST_CPU_TYPE_PERFORMANCE,
117 .num_cpus = 4,
118 .cluster_id = 5,
119 .die_id = 1,
120 },
121 };
122 test_hw_topology_t dual_die = {
123 .psets = &dual_die_psets[0],
124 .num_psets = 6,
125 .total_cpus = 20,
126 };
127
128 #define MAX_NODES 2
129
130 struct ml_topology_info mock_topology_info;
131
132 const unsigned int EDGE_REBAL_RUNNABLE = MACH_SCHED_EDGE_REBAL_RUNNABLE;
133 const unsigned int EDGE_REBAL_RUNNING = MACH_SCHED_EDGE_REBAL_RUNNING;
134 const unsigned int EDGE_STEAL = MACH_SCHED_EDGE_STEAL;
135 const unsigned int EDGE_SHOULD_YIELD = MACH_SCHED_EDGE_SHOULD_YIELD;
136
137 void
edge_impl_init_tracepoints(void)138 edge_impl_init_tracepoints(void)
139 {
140 clutch_impl_add_logged_trace_code(EDGE_REBAL_RUNNABLE);
141 clutch_impl_add_logged_trace_code(EDGE_REBAL_RUNNING);
142 clutch_impl_add_logged_trace_code(EDGE_STEAL);
143 clutch_impl_add_logged_trace_code(EDGE_SHOULD_YIELD);
144 }
145
146 static void
edge_impl_init_runqueues(void)147 edge_impl_init_runqueues(void)
148 {
149 assert(curr_hw_topo.num_psets != 0);
150 sched_num_psets = curr_hw_topo.num_psets;
151 mock_topology_info.num_cpus = curr_hw_topo.total_cpus;
152 mock_topology_info.num_clusters = curr_hw_topo.num_psets;
153 clutch_impl_init_topology(curr_hw_topo);
154 for (int i = 0; i < curr_hw_topo.num_psets; i++) {
155 cluster_count_for_type[curr_hw_topo.psets[i].cpu_type]++;
156 cpu_count_for_type[curr_hw_topo.psets[i].cpu_type] += curr_hw_topo.psets[i].num_cpus;
157 recommended_cpu_count_for_type[curr_hw_topo.psets[i].cpu_type] +=
158 curr_hw_topo.psets[i].num_cpus;
159 }
160 increment_mock_time(100);
161 clutch_impl_init_params();
162 clutch_impl_init_tracepoints();
163 edge_impl_init_tracepoints();
164 }
165
166 void
impl_init_runqueue(void)167 impl_init_runqueue(void)
168 {
169 assert(curr_hw_topo.num_psets == 0);
170 curr_hw_topo = single_core;
171 edge_impl_init_runqueues();
172 }
173
174 void
impl_init_migration_harness(test_hw_topology_t hw_topology)175 impl_init_migration_harness(test_hw_topology_t hw_topology)
176 {
177 assert(curr_hw_topo.num_psets == 0);
178 curr_hw_topo = hw_topology;
179 edge_impl_init_runqueues();
180 }
181
182 struct thread_group *
impl_create_tg(int interactivity_score)183 impl_create_tg(int interactivity_score)
184 {
185 return clutch_impl_create_tg(interactivity_score);
186 }
187
188 test_thread_t
impl_create_thread(int root_bucket,struct thread_group * tg,int pri)189 impl_create_thread(int root_bucket, struct thread_group *tg, int pri)
190 {
191 return clutch_impl_create_thread(root_bucket, tg, pri);
192 }
193
194 void
impl_set_thread_processor_bound(test_thread_t thread,int cpu_id)195 impl_set_thread_processor_bound(test_thread_t thread, int cpu_id)
196 {
197 _curr_cpu = cpu_id;
198 clutch_impl_set_thread_processor_bound(thread, cpu_id);
199 }
200
201 void
impl_set_thread_cluster_bound(test_thread_t thread,int cluster_id)202 impl_set_thread_cluster_bound(test_thread_t thread, int cluster_id)
203 {
204 /* Should not be already enqueued */
205 assert(thread_get_runq_locked((thread_t)thread) == NULL);
206 ((thread_t)thread)->th_bound_cluster_id = cluster_id;
207 }
208
209 void
impl_cpu_set_thread_current(int cpu_id,test_thread_t thread)210 impl_cpu_set_thread_current(int cpu_id, test_thread_t thread)
211 {
212 _curr_cpu = cpu_id;
213 processor_set_t pset = cpus[cpu_id]->processor_set;
214 clutch_impl_cpu_set_thread_current(cpu_id, thread);
215
216 /* Send followup IPIs for realtime, as needed */
217 bit_clear(pset->rt_pending_spill_cpu_mask, cpu_id);
218 processor_t next_rt_processor = PROCESSOR_NULL;
219 sched_ipi_type_t next_rt_ipi_type = SCHED_IPI_NONE;
220 if (rt_pset_has_stealable_threads(pset)) {
221 rt_choose_next_processor_for_spill_IPI(pset, cpus[cpu_id], &next_rt_processor, &next_rt_ipi_type);
222 } else if (rt_pset_needs_a_followup_IPI(pset)) {
223 rt_choose_next_processor_for_followup_IPI(pset, cpus[cpu_id], &next_rt_processor, &next_rt_ipi_type);
224 }
225 if (next_rt_processor != PROCESSOR_NULL) {
226 sched_ipi_perform(next_rt_processor, next_rt_ipi_type);
227 }
228 }
229
230 test_thread_t
impl_cpu_clear_thread_current(int cpu_id)231 impl_cpu_clear_thread_current(int cpu_id)
232 {
233 _curr_cpu = cpu_id;
234 test_thread_t thread = clutch_impl_cpu_clear_thread_current(cpu_id);
235 pset_update_processor_state(cpus[cpu_id]->processor_set, cpus[cpu_id], PROCESSOR_IDLE);
236 os_atomic_store(&cpus[cpu_id]->processor_set->cpu_running_buckets[cpu_id], TH_BUCKET_SCHED_MAX, relaxed);
237 sched_edge_stir_the_pot_clear_registry_entry();
238 return thread;
239 }
240
241 void
impl_cpu_enqueue_thread(int cpu_id,test_thread_t thread)242 impl_cpu_enqueue_thread(int cpu_id, test_thread_t thread)
243 {
244 _curr_cpu = cpu_id;
245 if (((thread_t) thread)->sched_pri >= BASEPRI_RTQUEUES) {
246 rt_runq_insert(cpus[cpu_id], cpus[cpu_id]->processor_set, (thread_t) thread);
247 } else {
248 sched_clutch_processor_enqueue(cpus[cpu_id], (thread_t) thread, SCHED_TAILQ);
249 }
250 SCHED(update_pset_load_average)(cpus[cpu_id]->processor_set, 0);
251 }
252
253 test_thread_t
impl_cpu_dequeue_thread(int cpu_id)254 impl_cpu_dequeue_thread(int cpu_id)
255 {
256 _curr_cpu = cpu_id;
257 test_thread_t chosen_thread = sched_rt_choose_thread(cpus[cpu_id]);
258 if (chosen_thread != THREAD_NULL) {
259 return chosen_thread;
260 }
261 /* No realtime threads. */
262 return sched_clutch_choose_thread(cpus[cpu_id], MINPRI, NULL, 0);
263 }
264
265 test_thread_t
impl_cpu_dequeue_thread_compare_current(int cpu_id)266 impl_cpu_dequeue_thread_compare_current(int cpu_id)
267 {
268 _curr_cpu = cpu_id;
269 assert(cpus[cpu_id]->active_thread != NULL);
270 processor_set_t pset = cpus[cpu_id]->processor_set;
271 if (rt_runq_count(pset) > 0) {
272 return impl_dequeue_realtime_thread(pset);
273 } else {
274 return sched_clutch_choose_thread(cpus[cpu_id], MINPRI, cpus[cpu_id]->active_thread, 0);
275 }
276 }
277
278 bool
impl_processor_csw_check(int cpu_id)279 impl_processor_csw_check(int cpu_id)
280 {
281 _curr_cpu = cpu_id;
282 assert(cpus[cpu_id]->active_thread != NULL);
283 ast_t preempt_ast = sched_clutch_processor_csw_check(cpus[cpu_id]);
284 return preempt_ast & AST_PREEMPT;
285 }
286
287 void
impl_pop_tracepoint(uint64_t clutch_trace_code,uint64_t * arg1,uint64_t * arg2,uint64_t * arg3,uint64_t * arg4)288 impl_pop_tracepoint(uint64_t clutch_trace_code, uint64_t *arg1, uint64_t *arg2,
289 uint64_t *arg3, uint64_t *arg4)
290 {
291 clutch_impl_pop_tracepoint(clutch_trace_code, arg1, arg2, arg3, arg4);
292 }
293
294 int
impl_choose_pset_for_thread(test_thread_t thread)295 impl_choose_pset_for_thread(test_thread_t thread)
296 {
297 /* Begins search starting from current pset */
298 sched_options_t options = SCHED_NONE;
299 processor_t chosen_processor = sched_edge_choose_processor(
300 current_processor()->processor_set, current_processor(), (thread_t)thread, &options);
301 return chosen_processor->processor_set->pset_id;
302 }
303
304 bool
impl_thread_avoid_processor(test_thread_t thread,int cpu_id,bool quantum_expired)305 impl_thread_avoid_processor(test_thread_t thread, int cpu_id, bool quantum_expired)
306 {
307 _curr_cpu = cpu_id;
308 return sched_edge_thread_avoid_processor(cpus[cpu_id], (thread_t)thread, quantum_expired ? AST_QUANTUM : AST_NONE);
309 }
310
311 void
impl_cpu_expire_quantum(int cpu_id)312 impl_cpu_expire_quantum(int cpu_id)
313 {
314 _curr_cpu = cpu_id;
315 sched_edge_quantum_expire(cpus[cpu_id]->active_thread);
316 cpus[cpu_id]->first_timeslice = FALSE;
317 }
318
319 test_thread_t
impl_steal_thread(int cpu_id)320 impl_steal_thread(int cpu_id)
321 {
322 _curr_cpu = cpu_id;
323 return sched_edge_processor_idle(pset_array[cpu_id_to_pset_id(cpu_id)]);
324 }
325
326 bool
impl_processor_balance(int cpu_id)327 impl_processor_balance(int cpu_id)
328 {
329 _curr_cpu = cpu_id;
330 return sched_edge_balance(cpus[cpu_id], pset_array[cpu_id_to_pset_id(cpu_id)]);
331 }
332
333 void
impl_set_current_processor(int cpu_id)334 impl_set_current_processor(int cpu_id)
335 {
336 _curr_cpu = cpu_id;
337 }
338
339 void
impl_set_tg_sched_bucket_preferred_pset(struct thread_group * tg,int sched_bucket,int cluster_id)340 impl_set_tg_sched_bucket_preferred_pset(struct thread_group *tg, int sched_bucket, int cluster_id)
341 {
342 assert(sched_bucket < TH_BUCKET_SCHED_MAX);
343 sched_clutch_t clutch = sched_clutch_for_thread_group(tg);
344 bitmap_t modify_bitmap[BITMAP_LEN(TH_BUCKET_SCHED_MAX)] = {0};
345 bitmap_set(modify_bitmap, sched_bucket);
346 uint32_t tg_bucket_preferred_cluster[TH_BUCKET_SCHED_MAX] = {0};
347 tg_bucket_preferred_cluster[sched_bucket] = cluster_id;
348 sched_edge_update_preferred_cluster(clutch, modify_bitmap, tg_bucket_preferred_cluster);
349 }
350
351 void
impl_set_pset_load_avg(int cluster_id,int QoS,uint64_t load_avg)352 impl_set_pset_load_avg(int cluster_id, int QoS, uint64_t load_avg)
353 {
354 assert(QoS >= 0 && QoS < TH_BUCKET_SCHED_MAX);
355 pset_array[cluster_id]->pset_load_average[QoS] = load_avg;
356 }
357
358 void
edge_set_thread_shared_rsrc(test_thread_t thread,bool native_first)359 edge_set_thread_shared_rsrc(test_thread_t thread, bool native_first)
360 {
361 int shared_rsrc_type = native_first ? CLUSTER_SHARED_RSRC_TYPE_NATIVE_FIRST :
362 CLUSTER_SHARED_RSRC_TYPE_RR;
363 ((thread_t)thread)->th_shared_rsrc_heavy_user[shared_rsrc_type] = true;
364 }
365
366 void
impl_set_pset_derecommended(int cluster_id)367 impl_set_pset_derecommended(int cluster_id)
368 {
369 processor_set_t pset = pset_array[cluster_id];
370 pset->recommended_bitmask = 0;
371 atomic_bit_clear(&pset->node->pset_recommended_map, cluster_id, memory_order_relaxed);
372 recommended_cpu_count_for_type[cluster_type_to_test_cpu_type(pset->pset_type)] -=
373 bit_count(pset->cpu_bitmask);
374 }
375
376 void
impl_set_pset_recommended(int cluster_id)377 impl_set_pset_recommended(int cluster_id)
378 {
379 processor_set_t pset = pset_array[cluster_id];
380 pset->recommended_bitmask = pset->cpu_bitmask;
381 atomic_bit_set(&pset->node->pset_recommended_map, cluster_id, memory_order_relaxed);
382 recommended_cpu_count_for_type[cluster_type_to_test_cpu_type(pset->pset_type)] +=
383 bit_count(pset->cpu_bitmask);
384 }
385
386 void
impl_pop_ipi(int * cpu_id,test_ipi_type_t * ipi_type)387 impl_pop_ipi(int *cpu_id, test_ipi_type_t *ipi_type)
388 {
389 assert(expect_ipi_ind < curr_ipi_ind);
390 *cpu_id = logged_ipis[expect_ipi_ind].cpu_id;
391 *ipi_type = (test_ipi_type_t)logged_ipis[expect_ipi_ind].ipi_type;
392 expect_ipi_ind++;
393 }
394
395 bool
impl_thread_should_yield(int cpu_id)396 impl_thread_should_yield(int cpu_id)
397 {
398 _curr_cpu = cpu_id;
399 assert(cpus[cpu_id]->active_thread != NULL);
400 return sched_edge_thread_should_yield(cpus[cpu_id], cpus[cpu_id]->active_thread);
401 }
402
403 void
impl_send_ipi(int cpu_id,test_thread_t thread,test_ipi_event_t event)404 impl_send_ipi(int cpu_id, test_thread_t thread, test_ipi_event_t event)
405 {
406 sched_ipi_type_t triggered_ipi = sched_ipi_action(cpus[cpu_id],
407 (thread_t)thread, (sched_ipi_event_t)event);
408 sched_ipi_perform(cpus[cpu_id], triggered_ipi);
409 }
410
411 int
rt_pset_spill_search_order_at_offset(int src_pset_id,int offset)412 rt_pset_spill_search_order_at_offset(int src_pset_id, int offset)
413 {
414 return pset_array[src_pset_id]->sched_rt_spill_search_order.spso_search_order[offset];
415 }
416
417 void
rt_pset_recompute_spill_order(int src_pset_id)418 rt_pset_recompute_spill_order(int src_pset_id)
419 {
420 sched_rt_config_pset_push(pset_array[src_pset_id]);
421 }
422
423 uint32_t
impl_qos_max_parallelism(int qos,uint64_t options)424 impl_qos_max_parallelism(int qos, uint64_t options)
425 {
426 return sched_edge_qos_max_parallelism(qos, options);
427 }
428
429 int *
impl_iterate_pset_search_order(int src_pset_id,uint64_t candidate_map,int sched_bucket)430 impl_iterate_pset_search_order(int src_pset_id, uint64_t candidate_map, int sched_bucket)
431 {
432 int *psets = (int *)malloc(sizeof(int) * curr_hw_topo.num_psets);
433 for (int i = 0; i < curr_hw_topo.num_psets; i++) {
434 psets[i] = -1;
435 }
436 sched_pset_iterate_state_t istate = SCHED_PSET_ITERATE_STATE_INIT;
437 int ind = 0;
438 processor_set_t starting_pset = pset_array[src_pset_id];
439 while (sched_iterate_psets_ordered(starting_pset,
440 &starting_pset->spill_search_order[sched_bucket], candidate_map, &istate)) {
441 psets[ind++] = istate.spis_pset_id;
442 }
443 return psets;
444 }
445
446 test_thread_t
impl_rt_choose_thread(int cpu_id)447 impl_rt_choose_thread(int cpu_id)
448 {
449 return sched_rt_choose_thread(cpus[cpu_id]);
450 }
451
452 void
sched_rt_spill_policy_set(unsigned policy)453 sched_rt_spill_policy_set(unsigned policy)
454 {
455 impl_sched_rt_spill_policy_set(policy);
456 }
457
458 void
sched_rt_steal_policy_set(unsigned policy)459 sched_rt_steal_policy_set(unsigned policy)
460 {
461 impl_sched_rt_steal_policy_set(policy);
462 }
463