1*043036a2SApple OSS Distributions // Copyright (c) 2024 Apple Inc. All rights reserved.
2*043036a2SApple OSS Distributions
3*043036a2SApple OSS Distributions #include "sched_test_harness/sched_policy_darwintest.h"
4*043036a2SApple OSS Distributions #include "sched_test_harness/sched_edge_harness.h"
5*043036a2SApple OSS Distributions
6*043036a2SApple OSS Distributions T_GLOBAL_META(T_META_NAMESPACE("xnu.scheduler"),
7*043036a2SApple OSS Distributions T_META_RADAR_COMPONENT_NAME("xnu"),
8*043036a2SApple OSS Distributions T_META_RADAR_COMPONENT_VERSION("scheduler"),
9*043036a2SApple OSS Distributions T_META_RUN_CONCURRENTLY(true),
10*043036a2SApple OSS Distributions T_META_OWNER("emily_peterson"));
11*043036a2SApple OSS Distributions
12*043036a2SApple OSS Distributions SCHED_POLICY_T_DECL(migration_cluster_bound,
13*043036a2SApple OSS Distributions "Verify that cluster-bound threads always choose the bound "
14*043036a2SApple OSS Distributions "cluster except when its derecommended")
15*043036a2SApple OSS Distributions {
16*043036a2SApple OSS Distributions int ret;
17*043036a2SApple OSS Distributions init_migration_harness(dual_die);
18*043036a2SApple OSS Distributions struct thread_group *tg = create_tg(0);
19*043036a2SApple OSS Distributions test_thread_t threads[dual_die.num_psets];
20*043036a2SApple OSS Distributions int idle_load = 0;
21*043036a2SApple OSS Distributions int low_load = 100000;
22*043036a2SApple OSS Distributions int high_load = 10000000;
23*043036a2SApple OSS Distributions for (int i = 0; i < dual_die.num_psets; i++) {
24*043036a2SApple OSS Distributions threads[i] = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
25*043036a2SApple OSS Distributions set_thread_cluster_bound(threads[i], i);
26*043036a2SApple OSS Distributions set_pset_load_avg(i, TH_BUCKET_SHARE_DF, low_load);
27*043036a2SApple OSS Distributions }
28*043036a2SApple OSS Distributions for (int i = 0; i < dual_die.num_psets; i++) {
29*043036a2SApple OSS Distributions set_current_processor(pset_id_to_cpu_id(i));
30*043036a2SApple OSS Distributions for (int j = 0; j < dual_die.num_psets; j++) {
31*043036a2SApple OSS Distributions /* Add extra load to the bound cluster, so we're definitely not just idle short-circuiting */
32*043036a2SApple OSS Distributions set_pset_load_avg(j, TH_BUCKET_SHARE_DF, high_load);
33*043036a2SApple OSS Distributions ret = choose_pset_for_thread_expect(threads[j], j);
34*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Expecting the bound cluster");
35*043036a2SApple OSS Distributions set_pset_load_avg(j, TH_BUCKET_SHARE_DF, low_load);
36*043036a2SApple OSS Distributions }
37*043036a2SApple OSS Distributions }
38*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Cluster bound chooses bound cluster");
39*043036a2SApple OSS Distributions /* Derecommend the bound cluster */
40*043036a2SApple OSS Distributions for (int i = 0; i < dual_die.num_psets; i++) {
41*043036a2SApple OSS Distributions set_pset_derecommended(i);
42*043036a2SApple OSS Distributions int replacement_pset = -1;
43*043036a2SApple OSS Distributions for (int j = 0; j < dual_die.num_psets; j++) {
44*043036a2SApple OSS Distributions /* Find the first homogenous cluster and mark it as idle so we choose it */
45*043036a2SApple OSS Distributions if ((i != j) && (dual_die.psets[i].cpu_type == dual_die.psets[j].cpu_type)) {
46*043036a2SApple OSS Distributions replacement_pset = j;
47*043036a2SApple OSS Distributions set_pset_load_avg(replacement_pset, TH_BUCKET_SHARE_DF, idle_load);
48*043036a2SApple OSS Distributions break;
49*043036a2SApple OSS Distributions }
50*043036a2SApple OSS Distributions }
51*043036a2SApple OSS Distributions ret = choose_pset_for_thread_expect(threads[i], replacement_pset);
52*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Expecting the idle pset when the bound cluster is derecommended");
53*043036a2SApple OSS Distributions /* Restore pset conditions */
54*043036a2SApple OSS Distributions set_pset_recommended(i);
55*043036a2SApple OSS Distributions set_pset_load_avg(replacement_pset, TH_BUCKET_SHARE_DF, low_load);
56*043036a2SApple OSS Distributions }
57*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Cluster binding is soft");
58*043036a2SApple OSS Distributions }
59*043036a2SApple OSS Distributions
60*043036a2SApple OSS Distributions SCHED_POLICY_T_DECL(migration_should_yield,
61*043036a2SApple OSS Distributions "Verify that we only yield if there's a \"good enough\" thread elsewhere "
62*043036a2SApple OSS Distributions "to switch to")
63*043036a2SApple OSS Distributions {
64*043036a2SApple OSS Distributions int ret;
65*043036a2SApple OSS Distributions init_migration_harness(basic_amp);
66*043036a2SApple OSS Distributions struct thread_group *tg = create_tg(0);
67*043036a2SApple OSS Distributions test_thread_t yielder = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
68*043036a2SApple OSS Distributions int p_pset = 0;
69*043036a2SApple OSS Distributions int p_cpu = pset_id_to_cpu_id(p_pset);
70*043036a2SApple OSS Distributions cpu_set_thread_current(p_cpu, yielder);
71*043036a2SApple OSS Distributions ret = cpu_check_should_yield(p_cpu, false);
72*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "No thread present to yield to");
73*043036a2SApple OSS Distributions ret = tracepoint_expect(EDGE_SHOULD_YIELD, get_thread_tid(yielder), p_pset, 0, 4);
74*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "SCHED_EDGE_YIELD_DISALLOW");
75*043036a2SApple OSS Distributions
76*043036a2SApple OSS Distributions test_thread_t background = create_thread(TH_BUCKET_SHARE_BG, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_BG]);
77*043036a2SApple OSS Distributions enqueue_thread(pset_target(p_pset), background);
78*043036a2SApple OSS Distributions ret = cpu_check_should_yield(p_cpu, true);
79*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Should yield to a low priority thread on the current runqueue");
80*043036a2SApple OSS Distributions ret = tracepoint_expect(EDGE_SHOULD_YIELD, get_thread_tid(yielder), p_pset, 0, 0);
81*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "SCHED_EDGE_YIELD_RUNQ_NONEMPTY");
82*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Basic yield behavior on single pset");
83*043036a2SApple OSS Distributions
84*043036a2SApple OSS Distributions int e_pset = 1;
85*043036a2SApple OSS Distributions int e_cpu = pset_id_to_cpu_id(e_pset);
86*043036a2SApple OSS Distributions ret = dequeue_thread_expect(pset_target(p_pset), background);
87*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Only background thread in runqueue");
88*043036a2SApple OSS Distributions set_tg_sched_bucket_preferred_pset(tg, TH_BUCKET_SHARE_BG, e_pset);
89*043036a2SApple OSS Distributions enqueue_thread(pset_target(e_pset), background);
90*043036a2SApple OSS Distributions ret = cpu_check_should_yield(p_cpu, false);
91*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Should not yield in order to running rebalance native thread");
92*043036a2SApple OSS Distributions ret = tracepoint_expect(EDGE_SHOULD_YIELD, get_thread_tid(yielder), p_cpu, 0, 4);
93*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "SCHED_EDGE_YIELD_DISALLOW");
94*043036a2SApple OSS Distributions
95*043036a2SApple OSS Distributions ret = dequeue_thread_expect(pset_target(e_pset), background);
96*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Only background thread in runqueue");
97*043036a2SApple OSS Distributions set_tg_sched_bucket_preferred_pset(tg, TH_BUCKET_SHARE_BG, p_pset);
98*043036a2SApple OSS Distributions cpu_set_thread_current(e_cpu, background);
99*043036a2SApple OSS Distributions ret = cpu_check_should_yield(p_cpu, true);
100*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Should yield in order to running rebalance foreign thread");
101*043036a2SApple OSS Distributions ret = tracepoint_expect(EDGE_SHOULD_YIELD, get_thread_tid(yielder), p_cpu, 0, 2);
102*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "SCHED_EDGE_YIELD_FOREIGN_RUNNING");
103*043036a2SApple OSS Distributions
104*043036a2SApple OSS Distributions enqueue_thread(pset_target(p_pset), background);
105*043036a2SApple OSS Distributions cpu_set_thread_current(e_cpu, yielder);
106*043036a2SApple OSS Distributions ret = cpu_check_should_yield(e_cpu, true);
107*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Should yield in order to steal thread");
108*043036a2SApple OSS Distributions ret = tracepoint_expect(EDGE_SHOULD_YIELD, get_thread_tid(yielder), e_pset, 0, 3);
109*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "SCHED_EDGE_YIELD_STEAL_POSSIBLE");
110*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Thread yields in order to steal from other psets");
111*043036a2SApple OSS Distributions }
112*043036a2SApple OSS Distributions
113*043036a2SApple OSS Distributions SCHED_POLICY_T_DECL(migration_stir_the_pot_basic,
114*043036a2SApple OSS Distributions "Verify stir-the-pot succeeds to rotate threads across P and E-cores after"
115*043036a2SApple OSS Distributions "their respective quanta have expired")
116*043036a2SApple OSS Distributions {
117*043036a2SApple OSS Distributions int ret;
118*043036a2SApple OSS Distributions init_migration_harness(basic_amp);
119*043036a2SApple OSS Distributions
120*043036a2SApple OSS Distributions struct thread_group *tg = create_tg(0);
121*043036a2SApple OSS Distributions test_thread_t starts_p = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
122*043036a2SApple OSS Distributions test_thread_t starts_e = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
123*043036a2SApple OSS Distributions test_thread_t other_p_thread = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
124*043036a2SApple OSS Distributions int p_cpu = 0;
125*043036a2SApple OSS Distributions int e_cpu = 2;
126*043036a2SApple OSS Distributions int other_e_cpu = 3;
127*043036a2SApple OSS Distributions int other_p_cpu = 1;
128*043036a2SApple OSS Distributions cpu_set_thread_current(p_cpu, starts_p);
129*043036a2SApple OSS Distributions cpu_set_thread_current(e_cpu, starts_e);
130*043036a2SApple OSS Distributions cpu_set_thread_current(other_p_cpu, other_p_thread);
131*043036a2SApple OSS Distributions int p_pset = 0;
132*043036a2SApple OSS Distributions int e_pset = 1;
133*043036a2SApple OSS Distributions
134*043036a2SApple OSS Distributions /* Thread on low core type "pays its dues" */
135*043036a2SApple OSS Distributions cpu_expire_quantum(e_cpu);
136*043036a2SApple OSS Distributions
137*043036a2SApple OSS Distributions /* Thread on high core type should locate swap candidate */
138*043036a2SApple OSS Distributions cpu_expire_quantum(p_cpu);
139*043036a2SApple OSS Distributions ret = ipi_expect(e_cpu, TEST_IPI_IMMEDIATE);
140*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Should have found stir-the-pot candidate with expired quantum");
141*043036a2SApple OSS Distributions
142*043036a2SApple OSS Distributions /* Thread on low core type should respond to IPI by preempting... */
143*043036a2SApple OSS Distributions ret = thread_avoid_processor_expect(starts_e, e_cpu, false, true);
144*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Thread should preempt to get on P-core");
145*043036a2SApple OSS Distributions
146*043036a2SApple OSS Distributions /* (Simulate as if we are switching to another quantum-expired thread) */
147*043036a2SApple OSS Distributions test_thread_t other_expired_thread = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
148*043036a2SApple OSS Distributions cpu_set_thread_current(other_e_cpu, other_expired_thread);
149*043036a2SApple OSS Distributions cpu_expire_quantum(other_e_cpu);
150*043036a2SApple OSS Distributions cpu_clear_thread_current(other_e_cpu);
151*043036a2SApple OSS Distributions cpu_set_thread_current(e_cpu, other_expired_thread);
152*043036a2SApple OSS Distributions
153*043036a2SApple OSS Distributions /* ...and choosing the corresponding P-core for swap */
154*043036a2SApple OSS Distributions ret = choose_pset_for_thread_expect(starts_e, p_pset);
155*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Should choose P-cores despite no idle cores there");
156*043036a2SApple OSS Distributions
157*043036a2SApple OSS Distributions /* Upon arrival, thread swapping in should preempt its predecessor */
158*043036a2SApple OSS Distributions enqueue_thread(pset_target(p_pset), starts_e);
159*043036a2SApple OSS Distributions ret = cpu_check_preempt_current(p_cpu, true);
160*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "P-core should preempt quantum expired thread");
161*043036a2SApple OSS Distributions
162*043036a2SApple OSS Distributions /* ...and preempted thread on P-core should spill down to E, completing the swap */
163*043036a2SApple OSS Distributions ret = dequeue_thread_expect(pset_target(p_pset), starts_e);
164*043036a2SApple OSS Distributions T_QUIET; T_ASSERT_TRUE(ret, "e_starts was enqueued on P");
165*043036a2SApple OSS Distributions cpu_set_thread_current(p_cpu, starts_e);
166*043036a2SApple OSS Distributions ret = choose_pset_for_thread_expect(starts_p, e_pset);
167*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "p_starts spilled to E, completing swap");
168*043036a2SApple OSS Distributions
169*043036a2SApple OSS Distributions /*
170*043036a2SApple OSS Distributions * And a second swap should be initiated for the other E-expired thread
171*043036a2SApple OSS Distributions * that switched on-core afterwards.
172*043036a2SApple OSS Distributions */
173*043036a2SApple OSS Distributions cpu_expire_quantum(other_p_cpu);
174*043036a2SApple OSS Distributions ret = ipi_expect(e_cpu, TEST_IPI_IMMEDIATE);
175*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Should have found stir-the-pot candidate with expired quantum");
176*043036a2SApple OSS Distributions
177*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Stir-the-pot successfully initiated by P-core and completed");
178*043036a2SApple OSS Distributions
179*043036a2SApple OSS Distributions /* Clean-up and reset to initial conditions */
180*043036a2SApple OSS Distributions cpu_set_thread_current(p_cpu, starts_p);
181*043036a2SApple OSS Distributions cpu_set_thread_current(e_cpu, starts_e);
182*043036a2SApple OSS Distributions cpu_set_thread_current(other_p_cpu, other_p_thread);
183*043036a2SApple OSS Distributions cpu_set_thread_current(other_e_cpu, other_expired_thread);
184*043036a2SApple OSS Distributions
185*043036a2SApple OSS Distributions /* Now P-core expires quantum first */
186*043036a2SApple OSS Distributions cpu_expire_quantum(p_cpu);
187*043036a2SApple OSS Distributions
188*043036a2SApple OSS Distributions /* Thread on E-core "pays its dues" and responds to self-message by preempting */
189*043036a2SApple OSS Distributions cpu_expire_quantum(e_cpu);
190*043036a2SApple OSS Distributions ret = thread_avoid_processor_expect(starts_e, e_cpu, false, true);
191*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Thread should preempt to get on P-core");
192*043036a2SApple OSS Distributions
193*043036a2SApple OSS Distributions /* ...and choosing the corresponding P-core for swap */
194*043036a2SApple OSS Distributions cpu_clear_thread_current(e_cpu);
195*043036a2SApple OSS Distributions ret = choose_pset_for_thread_expect(starts_e, p_pset);
196*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Should choose P-cores despite no idle cores there");
197*043036a2SApple OSS Distributions
198*043036a2SApple OSS Distributions /* Upon arrival, thread swapping in should preempt its predecessor */
199*043036a2SApple OSS Distributions enqueue_thread(pset_target(p_pset), starts_e);
200*043036a2SApple OSS Distributions ret = cpu_check_preempt_current(p_cpu, true);
201*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "P-core should preempt quantum expired thread");
202*043036a2SApple OSS Distributions
203*043036a2SApple OSS Distributions /* ...and preempted thread on P-core should spill down to E, completing the swap */
204*043036a2SApple OSS Distributions ret = dequeue_thread_expect(pset_target(p_pset), starts_e);
205*043036a2SApple OSS Distributions T_QUIET; T_ASSERT_TRUE(ret, "e_starts was enqueued on P");
206*043036a2SApple OSS Distributions cpu_set_thread_current(p_cpu, starts_e);
207*043036a2SApple OSS Distributions ret = choose_pset_for_thread_expect(starts_p, e_pset);
208*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "p_starts spilled to E, completing swap");
209*043036a2SApple OSS Distributions
210*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Stir-the-pot successfully initiated by E-core and completed");
211*043036a2SApple OSS Distributions }
212*043036a2SApple OSS Distributions
213*043036a2SApple OSS Distributions SCHED_POLICY_T_DECL(migration_ipi_policy,
214*043036a2SApple OSS Distributions "Verify we send the right type of IPI in different cross-core preemption scenarios")
215*043036a2SApple OSS Distributions {
216*043036a2SApple OSS Distributions int ret;
217*043036a2SApple OSS Distributions init_migration_harness(dual_die);
218*043036a2SApple OSS Distributions struct thread_group *tg = create_tg(0);
219*043036a2SApple OSS Distributions thread_t thread = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
220*043036a2SApple OSS Distributions int dst_pcore = 3;
221*043036a2SApple OSS Distributions int src_pcore = 0;
222*043036a2SApple OSS Distributions
223*043036a2SApple OSS Distributions set_current_processor(src_pcore);
224*043036a2SApple OSS Distributions cpu_send_ipi_for_thread(dst_pcore, thread, TEST_IPI_EVENT_PREEMPT);
225*043036a2SApple OSS Distributions ret = ipi_expect(dst_pcore, TEST_IPI_IDLE);
226*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Idle CPU");
227*043036a2SApple OSS Distributions
228*043036a2SApple OSS Distributions thread_t core_busy = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
229*043036a2SApple OSS Distributions cpu_set_thread_current(dst_pcore, core_busy);
230*043036a2SApple OSS Distributions set_current_processor(src_pcore);
231*043036a2SApple OSS Distributions cpu_send_ipi_for_thread(dst_pcore, thread, TEST_IPI_EVENT_PREEMPT);
232*043036a2SApple OSS Distributions ret = ipi_expect(dst_pcore, TEST_IPI_IMMEDIATE);
233*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Should immediate IPI to preempt on P-core");
234*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Immediate IPIs to preempt P-cores");
235*043036a2SApple OSS Distributions
236*043036a2SApple OSS Distributions int dst_ecore = 13;
237*043036a2SApple OSS Distributions int ecluster_id = 5;
238*043036a2SApple OSS Distributions set_tg_sched_bucket_preferred_pset(tg, TH_BUCKET_SHARE_DF, ecluster_id);
239*043036a2SApple OSS Distributions set_current_processor(src_pcore);
240*043036a2SApple OSS Distributions cpu_send_ipi_for_thread(dst_ecore, thread, TEST_IPI_EVENT_PREEMPT);
241*043036a2SApple OSS Distributions ret = ipi_expect(dst_ecore, TEST_IPI_IDLE);
242*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Idle CPU");
243*043036a2SApple OSS Distributions
244*043036a2SApple OSS Distributions cpu_set_thread_current(dst_ecore, core_busy);
245*043036a2SApple OSS Distributions set_current_processor(src_pcore);
246*043036a2SApple OSS Distributions cpu_send_ipi_for_thread(dst_ecore, thread, TEST_IPI_EVENT_PREEMPT);
247*043036a2SApple OSS Distributions ret = ipi_expect(dst_ecore, TEST_IPI_IMMEDIATE);
248*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Should immediate IPI to preempt for E->E");
249*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Immediate IPIs to cluster homogeneous with preferred");
250*043036a2SApple OSS Distributions }
251*043036a2SApple OSS Distributions
252*043036a2SApple OSS Distributions SCHED_POLICY_T_DECL(migration_max_parallelism,
253*043036a2SApple OSS Distributions "Verify we report expected values for recommended width of parallel workloads")
254*043036a2SApple OSS Distributions {
255*043036a2SApple OSS Distributions int ret;
256*043036a2SApple OSS Distributions init_migration_harness(dual_die);
257*043036a2SApple OSS Distributions uint32_t num_pclusters = 4;
258*043036a2SApple OSS Distributions uint32_t num_pcores = 4 * num_pclusters;
259*043036a2SApple OSS Distributions uint32_t num_eclusters = 2;
260*043036a2SApple OSS Distributions uint32_t num_ecores = 2 * num_eclusters;
261*043036a2SApple OSS Distributions for (thread_qos_t qos = THREAD_QOS_UNSPECIFIED; qos < THREAD_QOS_LAST; qos++) {
262*043036a2SApple OSS Distributions for (int shared_rsrc = 0; shared_rsrc < 2; shared_rsrc++) {
263*043036a2SApple OSS Distributions for (int rt = 0; rt < 2; rt++) {
264*043036a2SApple OSS Distributions uint64_t options = 0;
265*043036a2SApple OSS Distributions uint32_t expected_width = 0;
266*043036a2SApple OSS Distributions if (shared_rsrc) {
267*043036a2SApple OSS Distributions options |= QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE;
268*043036a2SApple OSS Distributions }
269*043036a2SApple OSS Distributions if (rt) {
270*043036a2SApple OSS Distributions options |= QOS_PARALLELISM_REALTIME;
271*043036a2SApple OSS Distributions /* Recommend P-width */
272*043036a2SApple OSS Distributions expected_width = shared_rsrc ? num_pclusters : num_pcores;
273*043036a2SApple OSS Distributions } else if (qos == THREAD_QOS_BACKGROUND || qos == THREAD_QOS_MAINTENANCE) {
274*043036a2SApple OSS Distributions /* Recommend E-width */
275*043036a2SApple OSS Distributions expected_width = shared_rsrc ? num_eclusters : num_ecores;
276*043036a2SApple OSS Distributions } else {
277*043036a2SApple OSS Distributions /* Recommend full width */
278*043036a2SApple OSS Distributions expected_width = shared_rsrc ? (num_eclusters + num_pclusters) : (num_pcores + num_ecores);
279*043036a2SApple OSS Distributions }
280*043036a2SApple OSS Distributions ret = max_parallelism_expect(qos, options, expected_width);
281*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Unexpected width for QoS %d shared_rsrc %d RT %d",
282*043036a2SApple OSS Distributions qos, shared_rsrc, rt);
283*043036a2SApple OSS Distributions }
284*043036a2SApple OSS Distributions }
285*043036a2SApple OSS Distributions }
286*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Correct recommended parallel width for all configurations");
287*043036a2SApple OSS Distributions }
288*043036a2SApple OSS Distributions
289*043036a2SApple OSS Distributions SCHED_POLICY_T_DECL(migration_rebalance_basic, "Verify that basic rebalance steal and "
290*043036a2SApple OSS Distributions "running rebalance mechanisms kick in")
291*043036a2SApple OSS Distributions {
292*043036a2SApple OSS Distributions int ret;
293*043036a2SApple OSS Distributions test_hw_topology_t topo = SCHED_POLICY_DEFAULT_TOPO;
294*043036a2SApple OSS Distributions init_migration_harness(topo);
295*043036a2SApple OSS Distributions int sched_bucket = TH_BUCKET_SHARE_DF;
296*043036a2SApple OSS Distributions struct thread_group *tg = create_tg(0);
297*043036a2SApple OSS Distributions thread_t thread = create_thread(sched_bucket, tg, root_bucket_to_highest_pri[sched_bucket]);
298*043036a2SApple OSS Distributions
299*043036a2SApple OSS Distributions for (int preferred_pset_id = 0; preferred_pset_id < topo.num_psets; preferred_pset_id++) {
300*043036a2SApple OSS Distributions set_tg_sched_bucket_preferred_pset(tg, sched_bucket, preferred_pset_id);
301*043036a2SApple OSS Distributions sched_policy_push_metadata("preferred_pset_id", preferred_pset_id);
302*043036a2SApple OSS Distributions for (int running_on_pset_id = 0; running_on_pset_id < topo.num_psets; running_on_pset_id++) {
303*043036a2SApple OSS Distributions /* Running rebalance */
304*043036a2SApple OSS Distributions int running_on_cpu = pset_id_to_cpu_id(running_on_pset_id);
305*043036a2SApple OSS Distributions cpu_set_thread_current(running_on_cpu, thread);
306*043036a2SApple OSS Distributions sched_policy_push_metadata("running_on_pset_id", running_on_pset_id);
307*043036a2SApple OSS Distributions for (int c = 0; c < topo.total_cpus; c++) {
308*043036a2SApple OSS Distributions sched_policy_push_metadata("evaluate_cpu", c);
309*043036a2SApple OSS Distributions int evaluate_pset = cpu_id_to_pset_id(c);
310*043036a2SApple OSS Distributions bool want_rebalance = cpu_processor_balance(c);
311*043036a2SApple OSS Distributions if (evaluate_pset == running_on_pset_id) {
312*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_FALSE(want_rebalance, "should be no thread available for rebalance %s",
313*043036a2SApple OSS Distributions sched_policy_dump_metadata());
314*043036a2SApple OSS Distributions sched_policy_pop_metadata();
315*043036a2SApple OSS Distributions continue;
316*043036a2SApple OSS Distributions }
317*043036a2SApple OSS Distributions bool should_rebalance = (topo.psets[evaluate_pset].cpu_type == topo.psets[preferred_pset_id].cpu_type) &&
318*043036a2SApple OSS Distributions (topo.psets[running_on_pset_id].cpu_type != topo.psets[preferred_pset_id].cpu_type);
319*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_EQ(want_rebalance, should_rebalance, "should rebalance to move thread to preferred type "
320*043036a2SApple OSS Distributions "if not there already %s", sched_policy_dump_metadata());
321*043036a2SApple OSS Distributions if (should_rebalance) {
322*043036a2SApple OSS Distributions ret = tracepoint_expect(EDGE_REBAL_RUNNING, 0, c, running_on_cpu, 0);
323*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "EDGE_REBAL_RUNNING tracepoint");
324*043036a2SApple OSS Distributions ret = thread_avoid_processor_expect(thread, running_on_cpu, false, true);
325*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "thread will preempt in response to running rebalance IPI %s",
326*043036a2SApple OSS Distributions sched_policy_dump_metadata());
327*043036a2SApple OSS Distributions /* Try loading all other cores of the preferred type, forcing this decision to find the idle one */
328*043036a2SApple OSS Distributions for (int p = 0; p < topo.num_psets; p++) {
329*043036a2SApple OSS Distributions if ((topo.psets[p].cpu_type == topo.psets[preferred_pset_id].cpu_type) &&
330*043036a2SApple OSS Distributions (p != evaluate_pset)) {
331*043036a2SApple OSS Distributions set_pset_load_avg(p, sched_bucket, 10000000);
332*043036a2SApple OSS Distributions }
333*043036a2SApple OSS Distributions }
334*043036a2SApple OSS Distributions ret = thread_avoid_processor_expect(thread, running_on_cpu, false, true);
335*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "...even if all other cores (except rebalancer) are full %s",
336*043036a2SApple OSS Distributions sched_policy_dump_metadata());
337*043036a2SApple OSS Distributions /* Unload cores for clean-up */
338*043036a2SApple OSS Distributions for (int p = 0; p < topo.num_psets; p++) {
339*043036a2SApple OSS Distributions if ((topo.psets[p].cpu_type == topo.psets[preferred_pset_id].cpu_type) &&
340*043036a2SApple OSS Distributions (p != evaluate_pset)) {
341*043036a2SApple OSS Distributions set_pset_load_avg(p, sched_bucket, 0);
342*043036a2SApple OSS Distributions }
343*043036a2SApple OSS Distributions }
344*043036a2SApple OSS Distributions }
345*043036a2SApple OSS Distributions sched_policy_pop_metadata();
346*043036a2SApple OSS Distributions }
347*043036a2SApple OSS Distributions cpu_clear_thread_current(running_on_cpu);
348*043036a2SApple OSS Distributions sched_policy_pop_metadata();
349*043036a2SApple OSS Distributions
350*043036a2SApple OSS Distributions /* Rebalance steal */
351*043036a2SApple OSS Distributions int enqueued_pset = running_on_pset_id;
352*043036a2SApple OSS Distributions enqueue_thread(pset_target(enqueued_pset), thread);
353*043036a2SApple OSS Distributions sched_policy_push_metadata("enqueued_pset", enqueued_pset);
354*043036a2SApple OSS Distributions for (int c = 0; c < topo.total_cpus; c++) {
355*043036a2SApple OSS Distributions sched_policy_push_metadata("evaluate_cpu", c);
356*043036a2SApple OSS Distributions int evaluate_pset = cpu_id_to_pset_id(c);
357*043036a2SApple OSS Distributions if ((topo.psets[evaluate_pset].cpu_type != topo.psets[enqueued_pset].cpu_type) &&
358*043036a2SApple OSS Distributions ((topo.psets[enqueued_pset].cpu_type != TEST_CPU_TYPE_PERFORMANCE) ||
359*043036a2SApple OSS Distributions (topo.psets[preferred_pset_id].cpu_type != TEST_CPU_TYPE_PERFORMANCE))) {
360*043036a2SApple OSS Distributions /* Only evaluate steal between mismatching cluster types and where spill is not allowed */
361*043036a2SApple OSS Distributions thread_t stolen_thread = cpu_steal_thread(c);
362*043036a2SApple OSS Distributions bool should_rebalance_steal = (topo.psets[evaluate_pset].cpu_type == topo.psets[preferred_pset_id].cpu_type) &&
363*043036a2SApple OSS Distributions (topo.psets[enqueued_pset].cpu_type != topo.psets[preferred_pset_id].cpu_type);
364*043036a2SApple OSS Distributions bool did_rebalance_steal = (stolen_thread == thread);
365*043036a2SApple OSS Distributions if (stolen_thread != NULL) {
366*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_EQ(stolen_thread, thread, "should only be one thread to steal?");
367*043036a2SApple OSS Distributions }
368*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_EQ(did_rebalance_steal, should_rebalance_steal, "should rebalance steal to move "
369*043036a2SApple OSS Distributions "thread to preferred type if not already there %s", sched_policy_dump_metadata());
370*043036a2SApple OSS Distributions if (did_rebalance_steal) {
371*043036a2SApple OSS Distributions ret = tracepoint_expect(EDGE_REBAL_RUNNABLE, 0, evaluate_pset, enqueued_pset, 0);
372*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "EDGE_REBAL_RUNNABLE tracepoint");
373*043036a2SApple OSS Distributions /* Put back stolen thread */
374*043036a2SApple OSS Distributions enqueue_thread(pset_target(enqueued_pset), thread);
375*043036a2SApple OSS Distributions }
376*043036a2SApple OSS Distributions }
377*043036a2SApple OSS Distributions sched_policy_pop_metadata();
378*043036a2SApple OSS Distributions }
379*043036a2SApple OSS Distributions
380*043036a2SApple OSS Distributions ret = dequeue_thread_expect(pset_target(enqueued_pset), thread);
381*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "thread correctly where we left it");
382*043036a2SApple OSS Distributions sched_policy_pop_metadata();
383*043036a2SApple OSS Distributions }
384*043036a2SApple OSS Distributions sched_policy_pop_metadata();
385*043036a2SApple OSS Distributions }
386*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Rebalance mechanisms kicking in!");
387*043036a2SApple OSS Distributions }
388*043036a2SApple OSS Distributions
389*043036a2SApple OSS Distributions static test_pset_t two_of_each_psets[6] = {
390*043036a2SApple OSS Distributions {
391*043036a2SApple OSS Distributions .cpu_type = TEST_CPU_TYPE_EFFICIENCY,
392*043036a2SApple OSS Distributions .num_cpus = 2,
393*043036a2SApple OSS Distributions .cluster_id = 0,
394*043036a2SApple OSS Distributions .die_id = 0,
395*043036a2SApple OSS Distributions },
396*043036a2SApple OSS Distributions {
397*043036a2SApple OSS Distributions .cpu_type = TEST_CPU_TYPE_PERFORMANCE,
398*043036a2SApple OSS Distributions .num_cpus = 2,
399*043036a2SApple OSS Distributions .cluster_id = 1,
400*043036a2SApple OSS Distributions .die_id = 0,
401*043036a2SApple OSS Distributions },
402*043036a2SApple OSS Distributions {
403*043036a2SApple OSS Distributions .cpu_type = TEST_CPU_TYPE_EFFICIENCY,
404*043036a2SApple OSS Distributions .num_cpus = 2,
405*043036a2SApple OSS Distributions .cluster_id = 2,
406*043036a2SApple OSS Distributions .die_id = 1,
407*043036a2SApple OSS Distributions },
408*043036a2SApple OSS Distributions {
409*043036a2SApple OSS Distributions .cpu_type = TEST_CPU_TYPE_PERFORMANCE,
410*043036a2SApple OSS Distributions .num_cpus = 2,
411*043036a2SApple OSS Distributions .cluster_id = 3,
412*043036a2SApple OSS Distributions .die_id = 1,
413*043036a2SApple OSS Distributions },
414*043036a2SApple OSS Distributions };
415*043036a2SApple OSS Distributions test_hw_topology_t two_of_each = {
416*043036a2SApple OSS Distributions .psets = &two_of_each_psets[0],
417*043036a2SApple OSS Distributions .num_psets = 4,
418*043036a2SApple OSS Distributions .total_cpus = 8,
419*043036a2SApple OSS Distributions };
420*043036a2SApple OSS Distributions
421*043036a2SApple OSS Distributions static void
clear_threads_from_topo(void)422*043036a2SApple OSS Distributions clear_threads_from_topo(void)
423*043036a2SApple OSS Distributions {
424*043036a2SApple OSS Distributions test_hw_topology_t topo = get_hw_topology();
425*043036a2SApple OSS Distributions int pset_first_cpu = 0;
426*043036a2SApple OSS Distributions for (int p = 0; p < topo.num_psets; p++) {
427*043036a2SApple OSS Distributions while (!runqueue_empty(pset_target(p))) {
428*043036a2SApple OSS Distributions (void)dequeue_thread_expect(pset_target(p), (test_thread_t)0xc0ffee);
429*043036a2SApple OSS Distributions }
430*043036a2SApple OSS Distributions for (int b = 0; b < TH_BUCKET_SCHED_MAX; b++) {
431*043036a2SApple OSS Distributions set_pset_load_avg(p, b, 0);
432*043036a2SApple OSS Distributions }
433*043036a2SApple OSS Distributions for (int c = pset_first_cpu; c < pset_first_cpu + topo.psets[p].num_cpus; c++) {
434*043036a2SApple OSS Distributions cpu_clear_thread_current(c);
435*043036a2SApple OSS Distributions }
436*043036a2SApple OSS Distributions pset_first_cpu += topo.psets[p].num_cpus;
437*043036a2SApple OSS Distributions }
438*043036a2SApple OSS Distributions }
439*043036a2SApple OSS Distributions
440*043036a2SApple OSS Distributions typedef enum {
441*043036a2SApple OSS Distributions enqueued = 0,
442*043036a2SApple OSS Distributions running = 1,
443*043036a2SApple OSS Distributions thread_type_max = 2,
444*043036a2SApple OSS Distributions } thread_type_t;
445*043036a2SApple OSS Distributions
446*043036a2SApple OSS Distributions typedef enum {
447*043036a2SApple OSS Distributions e_recc = 0,
448*043036a2SApple OSS Distributions p_recc = 1,
449*043036a2SApple OSS Distributions recc_type_max = 2,
450*043036a2SApple OSS Distributions } recc_type_t;
451*043036a2SApple OSS Distributions
452*043036a2SApple OSS Distributions static char *
thread_recc_to_core_type_char(recc_type_t recc)453*043036a2SApple OSS Distributions thread_recc_to_core_type_char(recc_type_t recc)
454*043036a2SApple OSS Distributions {
455*043036a2SApple OSS Distributions switch (recc) {
456*043036a2SApple OSS Distributions case e_recc:
457*043036a2SApple OSS Distributions return "E";
458*043036a2SApple OSS Distributions case p_recc:
459*043036a2SApple OSS Distributions return "P";
460*043036a2SApple OSS Distributions default:
461*043036a2SApple OSS Distributions assert(false);
462*043036a2SApple OSS Distributions }
463*043036a2SApple OSS Distributions }
464*043036a2SApple OSS Distributions
465*043036a2SApple OSS Distributions static char
pset_id_to_core_type_char(int pset_id)466*043036a2SApple OSS Distributions pset_id_to_core_type_char(int pset_id)
467*043036a2SApple OSS Distributions {
468*043036a2SApple OSS Distributions return test_cpu_type_to_char(get_hw_topology().psets[pset_id].cpu_type);
469*043036a2SApple OSS Distributions }
470*043036a2SApple OSS Distributions
471*043036a2SApple OSS Distributions static void
no_steal_expect(int stealing_pset,char * explanation)472*043036a2SApple OSS Distributions no_steal_expect(int stealing_pset, char *explanation)
473*043036a2SApple OSS Distributions {
474*043036a2SApple OSS Distributions test_thread_t no_steal = cpu_steal_thread(pset_id_to_cpu_id(stealing_pset));
475*043036a2SApple OSS Distributions T_EXPECT_NULL(no_steal, "No thread stolen because: %s (%p)", explanation, no_steal);
476*043036a2SApple OSS Distributions }
477*043036a2SApple OSS Distributions
478*043036a2SApple OSS Distributions /*
479*043036a2SApple OSS Distributions * For convenience when handling arrays with one test thread per each
480*043036a2SApple OSS Distributions * possible recommendation type, map the recommendation type to an
481*043036a2SApple OSS Distributions * index in such an array.
482*043036a2SApple OSS Distributions */
483*043036a2SApple OSS Distributions static int
recc_type_to_ind(recc_type_t recc)484*043036a2SApple OSS Distributions recc_type_to_ind(recc_type_t recc)
485*043036a2SApple OSS Distributions {
486*043036a2SApple OSS Distributions return (int)recc;
487*043036a2SApple OSS Distributions }
488*043036a2SApple OSS Distributions
489*043036a2SApple OSS Distributions static void
foreign_steal_expect(int stealing_pset,int stolen_from_pset,test_thread_t thread_candidates_matrix[thread_type_max][4][recc_type_max],recc_type_t thread_recommendation)490*043036a2SApple OSS Distributions foreign_steal_expect(int stealing_pset, int stolen_from_pset,
491*043036a2SApple OSS Distributions test_thread_t thread_candidates_matrix[thread_type_max][4][recc_type_max],
492*043036a2SApple OSS Distributions recc_type_t thread_recommendation)
493*043036a2SApple OSS Distributions {
494*043036a2SApple OSS Distributions int ret;
495*043036a2SApple OSS Distributions test_thread_t thread = cpu_steal_thread(pset_id_to_cpu_id(stealing_pset));
496*043036a2SApple OSS Distributions char stealing_type = pset_id_to_core_type_char(stealing_pset);
497*043036a2SApple OSS Distributions char stolen_type = pset_id_to_core_type_char(stolen_from_pset);
498*043036a2SApple OSS Distributions char *recc_type = thread_recc_to_core_type_char(thread_recommendation);
499*043036a2SApple OSS Distributions T_EXPECT_EQ(thread, thread_candidates_matrix[enqueued][stolen_from_pset][recc_type_to_ind(thread_recommendation)],
500*043036a2SApple OSS Distributions "%c (%d) rebalance-steals %s-recommended from %c (%d)", stealing_type, stealing_pset,
501*043036a2SApple OSS Distributions recc_type, stolen_type, stolen_from_pset);
502*043036a2SApple OSS Distributions ret = tracepoint_expect(EDGE_REBAL_RUNNABLE,
503*043036a2SApple OSS Distributions get_thread_tid(thread_candidates_matrix[enqueued][stolen_from_pset][recc_type_to_ind(thread_recommendation)]),
504*043036a2SApple OSS Distributions stealing_pset, stolen_from_pset, 0);
505*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "EDGE_REBAL_RUNNABLE %c->%c %s-recommended tracepoint",
506*043036a2SApple OSS Distributions stolen_type, stealing_type, recc_type);
507*043036a2SApple OSS Distributions }
508*043036a2SApple OSS Distributions
509*043036a2SApple OSS Distributions static void
work_steal_expect(int stealing_pset,int stolen_from_pset,test_thread_t thread_candidates_matrix[thread_type_max][4][recc_type_max],recc_type_t thread_recommendation)510*043036a2SApple OSS Distributions work_steal_expect(int stealing_pset, int stolen_from_pset,
511*043036a2SApple OSS Distributions test_thread_t thread_candidates_matrix[thread_type_max][4][recc_type_max],
512*043036a2SApple OSS Distributions recc_type_t thread_recommendation)
513*043036a2SApple OSS Distributions {
514*043036a2SApple OSS Distributions int ret;
515*043036a2SApple OSS Distributions test_thread_t thread = cpu_steal_thread(pset_id_to_cpu_id(stealing_pset));
516*043036a2SApple OSS Distributions char stealing_type = pset_id_to_core_type_char(stealing_pset);
517*043036a2SApple OSS Distributions char stolen_type = pset_id_to_core_type_char(stolen_from_pset);
518*043036a2SApple OSS Distributions char *recc_type = thread_recc_to_core_type_char(thread_recommendation);
519*043036a2SApple OSS Distributions T_EXPECT_EQ(thread, thread_candidates_matrix[enqueued][stolen_from_pset][recc_type_to_ind(thread_recommendation)],
520*043036a2SApple OSS Distributions "%c (%d) work-steals %s-recommended from %c (%d)", stealing_type, stealing_pset,
521*043036a2SApple OSS Distributions recc_type, stolen_type, stolen_from_pset);
522*043036a2SApple OSS Distributions ret = tracepoint_expect(EDGE_STEAL,
523*043036a2SApple OSS Distributions get_thread_tid(thread_candidates_matrix[enqueued][stolen_from_pset][recc_type_to_ind(thread_recommendation)]),
524*043036a2SApple OSS Distributions stealing_pset, stolen_from_pset, 0);
525*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "EDGE_STEAL %c->%c %s-recommended tracepoint",
526*043036a2SApple OSS Distributions stolen_type, stealing_type, recc_type);
527*043036a2SApple OSS Distributions }
528*043036a2SApple OSS Distributions
529*043036a2SApple OSS Distributions static void
running_rebalance_expect(int rebalancing_pset,char * target_name,int num_target_cpus,int * target_cpus)530*043036a2SApple OSS Distributions running_rebalance_expect(int rebalancing_pset, char *target_name,
531*043036a2SApple OSS Distributions int num_target_cpus, int *target_cpus)
532*043036a2SApple OSS Distributions {
533*043036a2SApple OSS Distributions int ret;
534*043036a2SApple OSS Distributions char rebalancing_type = pset_id_to_core_type_char(rebalancing_pset);
535*043036a2SApple OSS Distributions bool want_rebalance = cpu_processor_balance(pset_id_to_cpu_id(rebalancing_pset));
536*043036a2SApple OSS Distributions T_EXPECT_TRUE(want_rebalance, "Send running rebalance %s->%c IPIs",
537*043036a2SApple OSS Distributions target_name, rebalancing_type);
538*043036a2SApple OSS Distributions for (int i = 0; i < num_target_cpus; i++) {
539*043036a2SApple OSS Distributions ret = tracepoint_expect(EDGE_REBAL_RUNNING, 0, pset_id_to_cpu_id(rebalancing_pset),
540*043036a2SApple OSS Distributions target_cpus[i], 0);
541*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "EDGE_REBAL_RUNNING %s->%c IPI tracepoint %d",
542*043036a2SApple OSS Distributions target_name, rebalancing_type, i);
543*043036a2SApple OSS Distributions }
544*043036a2SApple OSS Distributions }
545*043036a2SApple OSS Distributions
546*043036a2SApple OSS Distributions SCHED_POLICY_T_DECL(migration_steal_order, "Verify that steal policy steps "
547*043036a2SApple OSS Distributions "happen in the right order")
548*043036a2SApple OSS Distributions {
549*043036a2SApple OSS Distributions int sched_bucket = TH_BUCKET_SHARE_DF;
550*043036a2SApple OSS Distributions init_migration_harness(two_of_each);
551*043036a2SApple OSS Distributions for (int config = 0; config < 2; config++) {
552*043036a2SApple OSS Distributions /*
553*043036a2SApple OSS Distributions * Enqueue one thread of each recommendation type on each pset,
554*043036a2SApple OSS Distributions * and set one thread of each recommendation type on each pset
555*043036a2SApple OSS Distributions * running on a core.
556*043036a2SApple OSS Distributions */
557*043036a2SApple OSS Distributions struct thread_group *p_tg = create_tg(0);
558*043036a2SApple OSS Distributions int p_pset = 1;
559*043036a2SApple OSS Distributions set_tg_sched_bucket_preferred_pset(p_tg, sched_bucket, p_pset);
560*043036a2SApple OSS Distributions struct thread_group *e_tg = create_tg(0);
561*043036a2SApple OSS Distributions int e_pset = 0;
562*043036a2SApple OSS Distributions set_tg_sched_bucket_preferred_pset(e_tg, sched_bucket, e_pset);
563*043036a2SApple OSS Distributions test_thread_t threads[thread_type_max][4][recc_type_max];
564*043036a2SApple OSS Distributions for (int p = 0; p < two_of_each.num_psets; p++) {
565*043036a2SApple OSS Distributions for (recc_type_t r = 0; r < recc_type_max; r++) {
566*043036a2SApple OSS Distributions threads[enqueued][p][r] = create_thread(sched_bucket, (r == e_recc) ? e_tg : p_tg,
567*043036a2SApple OSS Distributions root_bucket_to_highest_pri[sched_bucket]);
568*043036a2SApple OSS Distributions enqueue_thread(pset_target(p), threads[enqueued][p][r]);
569*043036a2SApple OSS Distributions T_LOG("Enqueued thread %p on pset %d, recc %d", threads[enqueued][p][r], p, r);
570*043036a2SApple OSS Distributions threads[running][p][r] = create_thread(sched_bucket, (r == e_recc) ? e_tg : p_tg,
571*043036a2SApple OSS Distributions root_bucket_to_highest_pri[sched_bucket]);
572*043036a2SApple OSS Distributions int run_cpu_id = pset_id_to_cpu_id(p) + r;
573*043036a2SApple OSS Distributions cpu_set_thread_current(run_cpu_id, threads[running][p][r]);
574*043036a2SApple OSS Distributions }
575*043036a2SApple OSS Distributions }
576*043036a2SApple OSS Distributions int other_p_pset = 3;
577*043036a2SApple OSS Distributions int other_e_pset = 2;
578*043036a2SApple OSS Distributions if (config == 0) {
579*043036a2SApple OSS Distributions /* ~~~~~ P-core steal/idle path ~~~~~ */
580*043036a2SApple OSS Distributions /* 1. Foreign rebalance steal */
581*043036a2SApple OSS Distributions foreign_steal_expect(other_p_pset, e_pset, threads, p_recc);
582*043036a2SApple OSS Distributions foreign_steal_expect(other_p_pset, other_e_pset, threads, p_recc);
583*043036a2SApple OSS Distributions /* 2. Native work-steal */
584*043036a2SApple OSS Distributions work_steal_expect(other_p_pset, p_pset, threads, p_recc);
585*043036a2SApple OSS Distributions /* 3. Running rebalance */
586*043036a2SApple OSS Distributions no_steal_expect(other_p_pset, "Want to perform running rebalance");
587*043036a2SApple OSS Distributions running_rebalance_expect(other_p_pset, "E", 2,
588*043036a2SApple OSS Distributions (int[]){pset_id_to_cpu_id(e_pset) + p_recc, pset_id_to_cpu_id(other_e_pset) + p_recc});
589*043036a2SApple OSS Distributions cpu_clear_thread_current(pset_id_to_cpu_id(e_pset) + p_recc);
590*043036a2SApple OSS Distributions cpu_clear_thread_current(pset_id_to_cpu_id(other_e_pset) + p_recc);
591*043036a2SApple OSS Distributions /* 4. Work-steal from anywhere allowed */
592*043036a2SApple OSS Distributions no_steal_expect(other_p_pset, "Nothing left a P-core wants to steal");
593*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Verified steal order steps for stealing P-core");
594*043036a2SApple OSS Distributions } else {
595*043036a2SApple OSS Distributions /* ~~~~~ E-core steal/idle path ~~~~~ */
596*043036a2SApple OSS Distributions /* 1. Foreign rebalance steal */
597*043036a2SApple OSS Distributions /* Foreign pset search starts with highest id */
598*043036a2SApple OSS Distributions foreign_steal_expect(other_e_pset, p_pset, threads, e_recc);
599*043036a2SApple OSS Distributions foreign_steal_expect(other_e_pset, other_p_pset, threads, e_recc);
600*043036a2SApple OSS Distributions /* 2. Native work-steal */
601*043036a2SApple OSS Distributions work_steal_expect(other_e_pset, e_pset, threads, e_recc);
602*043036a2SApple OSS Distributions work_steal_expect(other_e_pset, e_pset, threads, p_recc);
603*043036a2SApple OSS Distributions /* 3. Running rebalance */
604*043036a2SApple OSS Distributions no_steal_expect(other_e_pset, "Want to perform running rebalance");
605*043036a2SApple OSS Distributions running_rebalance_expect(other_e_pset, "P", 2,
606*043036a2SApple OSS Distributions (int[]){pset_id_to_cpu_id(p_pset) + e_recc, pset_id_to_cpu_id(other_p_pset) + e_recc});
607*043036a2SApple OSS Distributions cpu_clear_thread_current(pset_id_to_cpu_id(p_pset) + e_recc);
608*043036a2SApple OSS Distributions cpu_clear_thread_current(pset_id_to_cpu_id(other_p_pset) + e_recc);
609*043036a2SApple OSS Distributions /* 4. Work-steal from anywhere allowed */
610*043036a2SApple OSS Distributions for (int i = 0; i < 2; i++) {
611*043036a2SApple OSS Distributions int src_pset = (i == 0) ? other_p_pset : p_pset;
612*043036a2SApple OSS Distributions no_steal_expect(other_e_pset, "Non-zero edge (P->E) steal requires excess "
613*043036a2SApple OSS Distributions "threads in the runqueue");
614*043036a2SApple OSS Distributions cpu_set_thread_current(pset_id_to_cpu_id(src_pset) + e_recc,
615*043036a2SApple OSS Distributions create_thread(sched_bucket, p_tg, root_bucket_to_highest_pri[sched_bucket]));
616*043036a2SApple OSS Distributions work_steal_expect(other_e_pset, src_pset, threads, p_recc);
617*043036a2SApple OSS Distributions }
618*043036a2SApple OSS Distributions no_steal_expect(other_e_pset, "Nothing left of interest to steal");
619*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Verified steal order steps for stealing E-core");
620*043036a2SApple OSS Distributions }
621*043036a2SApple OSS Distributions clear_threads_from_topo();
622*043036a2SApple OSS Distributions }
623*043036a2SApple OSS Distributions }
624*043036a2SApple OSS Distributions
625*043036a2SApple OSS Distributions static bool shush = false;
626*043036a2SApple OSS Distributions
627*043036a2SApple OSS Distributions static void
work_steal_expect_simple(int stealing_pset,int stolen_from_pset,test_thread_t stolen_thread,char * msg)628*043036a2SApple OSS Distributions work_steal_expect_simple(int stealing_pset, int stolen_from_pset,
629*043036a2SApple OSS Distributions test_thread_t stolen_thread, char *msg)
630*043036a2SApple OSS Distributions {
631*043036a2SApple OSS Distributions int ret;
632*043036a2SApple OSS Distributions test_thread_t found_thread = cpu_steal_thread(pset_id_to_cpu_id(stealing_pset));
633*043036a2SApple OSS Distributions if (shush) {
634*043036a2SApple OSS Distributions T_QUIET;
635*043036a2SApple OSS Distributions }
636*043036a2SApple OSS Distributions T_EXPECT_EQ(found_thread, stolen_thread, msg);
637*043036a2SApple OSS Distributions ret = tracepoint_expect(EDGE_STEAL, get_thread_tid(stolen_thread), stealing_pset, stolen_from_pset, 0);
638*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "EDGE_STEAL tracepoint for %s", msg);
639*043036a2SApple OSS Distributions }
640*043036a2SApple OSS Distributions
641*043036a2SApple OSS Distributions SCHED_POLICY_T_DECL(migration_steal_only_excess_by_qos, "Verify that steal logic "
642*043036a2SApple OSS Distributions "only steals across hetergeneous psets when there are excess threads at that QoS")
643*043036a2SApple OSS Distributions {
644*043036a2SApple OSS Distributions init_migration_harness(dual_die);
645*043036a2SApple OSS Distributions int p_pset = 1;
646*043036a2SApple OSS Distributions int p_pset_cpus = get_hw_topology().psets[p_pset].num_cpus;
647*043036a2SApple OSS Distributions int e_pset = 0;
648*043036a2SApple OSS Distributions int other_p_pset = 2;
649*043036a2SApple OSS Distributions
650*043036a2SApple OSS Distributions /* Load P-pset core-by-core until there's an excess thread for E-pset to steal */
651*043036a2SApple OSS Distributions test_thread_t default_threads[p_pset_cpus + 1];
652*043036a2SApple OSS Distributions struct thread_group *tg = create_tg(0);
653*043036a2SApple OSS Distributions set_tg_sched_bucket_preferred_pset(tg, TH_BUCKET_SHARE_DF, p_pset);
654*043036a2SApple OSS Distributions for (int i = 0; i < p_pset_cpus + 1; i++) {
655*043036a2SApple OSS Distributions default_threads[i] = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
656*043036a2SApple OSS Distributions }
657*043036a2SApple OSS Distributions for (int i = 0; i < p_pset_cpus; i++) {
658*043036a2SApple OSS Distributions enqueue_thread(pset_target(p_pset), default_threads[i]);
659*043036a2SApple OSS Distributions increment_mock_time_us(5); // Get FIFO order out
660*043036a2SApple OSS Distributions no_steal_expect(e_pset, "No excess threads yet");
661*043036a2SApple OSS Distributions }
662*043036a2SApple OSS Distributions enqueue_thread(pset_target(p_pset), default_threads[p_pset_cpus]);
663*043036a2SApple OSS Distributions work_steal_expect_simple(e_pset, p_pset, default_threads[0], "P->E Excess thread stolen");
664*043036a2SApple OSS Distributions no_steal_expect(e_pset, "Back to no excess threads");
665*043036a2SApple OSS Distributions /* Allow P-pset to swipe up non-excess threads */
666*043036a2SApple OSS Distributions for (int i = 1; i < p_pset_cpus + 1; i++) {
667*043036a2SApple OSS Distributions work_steal_expect_simple(other_p_pset, p_pset, default_threads[i],
668*043036a2SApple OSS Distributions "Homogenous (P->P) can steal non-excess threads");
669*043036a2SApple OSS Distributions }
670*043036a2SApple OSS Distributions no_steal_expect(other_p_pset, "All threads stolen already");
671*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Heterogenous psets only steal excess threads, while homogeneous steal any");
672*043036a2SApple OSS Distributions clear_threads_from_topo();
673*043036a2SApple OSS Distributions
674*043036a2SApple OSS Distributions /* Enqueue "pyramid" of threads at different QoSes */
675*043036a2SApple OSS Distributions test_thread_t per_qos_threads[TH_BUCKET_SCHED_MAX];
676*043036a2SApple OSS Distributions for (int bucket = 0; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
677*043036a2SApple OSS Distributions set_tg_sched_bucket_preferred_pset(tg, bucket, p_pset);
678*043036a2SApple OSS Distributions per_qos_threads[bucket] = create_thread(bucket, tg, root_bucket_to_highest_pri[bucket]);
679*043036a2SApple OSS Distributions if (bucket == 0) {
680*043036a2SApple OSS Distributions set_thread_sched_mode(per_qos_threads[bucket], TH_MODE_FIXED);
681*043036a2SApple OSS Distributions }
682*043036a2SApple OSS Distributions }
683*043036a2SApple OSS Distributions for (int bucket = 0; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
684*043036a2SApple OSS Distributions enqueue_thread(pset_target(p_pset), per_qos_threads[bucket]);
685*043036a2SApple OSS Distributions if (bucket < p_pset_cpus) {
686*043036a2SApple OSS Distributions no_steal_expect(e_pset, "No excess threads yet");
687*043036a2SApple OSS Distributions }
688*043036a2SApple OSS Distributions }
689*043036a2SApple OSS Distributions for (int qos_with_excess = p_pset_cpus; qos_with_excess < TH_BUCKET_SCHED_MAX; qos_with_excess++) {
690*043036a2SApple OSS Distributions work_steal_expect_simple(e_pset, p_pset, per_qos_threads[qos_with_excess],
691*043036a2SApple OSS Distributions "Steal from highest QoS with non-idle load");
692*043036a2SApple OSS Distributions }
693*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Heterogeneous psets only steal from excess QoSes");
694*043036a2SApple OSS Distributions }
695*043036a2SApple OSS Distributions
696*043036a2SApple OSS Distributions static test_pset_t pair_p_psets[2] = {
697*043036a2SApple OSS Distributions {
698*043036a2SApple OSS Distributions .cpu_type = TEST_CPU_TYPE_PERFORMANCE,
699*043036a2SApple OSS Distributions .num_cpus = 1,
700*043036a2SApple OSS Distributions .cluster_id = 0,
701*043036a2SApple OSS Distributions .die_id = 0,
702*043036a2SApple OSS Distributions },
703*043036a2SApple OSS Distributions {
704*043036a2SApple OSS Distributions .cpu_type = TEST_CPU_TYPE_PERFORMANCE,
705*043036a2SApple OSS Distributions .num_cpus = 1,
706*043036a2SApple OSS Distributions .cluster_id = 1,
707*043036a2SApple OSS Distributions .die_id = 0,
708*043036a2SApple OSS Distributions },
709*043036a2SApple OSS Distributions };
710*043036a2SApple OSS Distributions test_hw_topology_t pair_p = {
711*043036a2SApple OSS Distributions .psets = &pair_p_psets[0],
712*043036a2SApple OSS Distributions .num_psets = 2,
713*043036a2SApple OSS Distributions .total_cpus = 2,
714*043036a2SApple OSS Distributions };
715*043036a2SApple OSS Distributions
716*043036a2SApple OSS Distributions SCHED_POLICY_T_DECL(migration_steal_no_cluster_bound,
717*043036a2SApple OSS Distributions "Verify that cluster-bound threads do not get stolen to a different pset")
718*043036a2SApple OSS Distributions {
719*043036a2SApple OSS Distributions init_migration_harness(pair_p);
720*043036a2SApple OSS Distributions int load_multiplier = 10;
721*043036a2SApple OSS Distributions int loaded_pset = 0;
722*043036a2SApple OSS Distributions int idle_pset = 1;
723*043036a2SApple OSS Distributions int num_bound_threads = pair_p.psets[loaded_pset].num_cpus * load_multiplier;
724*043036a2SApple OSS Distributions enum { eBound = 0, eNativeFirst = 1, eRoundRobin = 2, eMax = 3 } bound_type;
725*043036a2SApple OSS Distributions test_thread_t bound_threads[eMax][num_bound_threads];
726*043036a2SApple OSS Distributions struct thread_group *tg = create_tg(0);
727*043036a2SApple OSS Distributions for (bound_type = 0; bound_type < eMax; bound_type++) {
728*043036a2SApple OSS Distributions for (int i = 0; i < num_bound_threads; i++) {
729*043036a2SApple OSS Distributions bound_threads[bound_type][i] = create_thread(TH_BUCKET_SHARE_DF, tg,
730*043036a2SApple OSS Distributions root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
731*043036a2SApple OSS Distributions switch (bound_type) {
732*043036a2SApple OSS Distributions case eBound:
733*043036a2SApple OSS Distributions set_thread_cluster_bound(bound_threads[bound_type][i], loaded_pset);
734*043036a2SApple OSS Distributions break;
735*043036a2SApple OSS Distributions case eNativeFirst:
736*043036a2SApple OSS Distributions edge_set_thread_shared_rsrc(bound_threads[bound_type][i], true);
737*043036a2SApple OSS Distributions break;
738*043036a2SApple OSS Distributions case eRoundRobin:
739*043036a2SApple OSS Distributions edge_set_thread_shared_rsrc(bound_threads[bound_type][i], false);
740*043036a2SApple OSS Distributions break;
741*043036a2SApple OSS Distributions default:
742*043036a2SApple OSS Distributions T_QUIET; T_ASSERT_FAIL("Invalid bound case");
743*043036a2SApple OSS Distributions }
744*043036a2SApple OSS Distributions increment_mock_time_us(5); // Get FIFO order
745*043036a2SApple OSS Distributions enqueue_thread(pset_target(loaded_pset), bound_threads[bound_type][i]);
746*043036a2SApple OSS Distributions }
747*043036a2SApple OSS Distributions no_steal_expect(idle_pset, "Refuse to steal cluster bound threads");
748*043036a2SApple OSS Distributions }
749*043036a2SApple OSS Distributions test_thread_t unbound_thread = create_thread(TH_BUCKET_SHARE_DF, tg,
750*043036a2SApple OSS Distributions root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
751*043036a2SApple OSS Distributions increment_mock_time_us(5);
752*043036a2SApple OSS Distributions enqueue_thread(pset_target(loaded_pset), unbound_thread);
753*043036a2SApple OSS Distributions work_steal_expect_simple(idle_pset, loaded_pset, unbound_thread,
754*043036a2SApple OSS Distributions "Pluck out the unbound thread to steal");
755*043036a2SApple OSS Distributions no_steal_expect(idle_pset, "Still refuse to steal cluster bound threads");
756*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Cluster bound threads cannot be stolen");
757*043036a2SApple OSS Distributions }
758*043036a2SApple OSS Distributions
759*043036a2SApple OSS Distributions SCHED_POLICY_T_DECL(migration_steal_highest_pri,
760*043036a2SApple OSS Distributions "Verify that higher priority threads are stolen first, across silos")
761*043036a2SApple OSS Distributions {
762*043036a2SApple OSS Distributions init_migration_harness(pair_p);
763*043036a2SApple OSS Distributions int idle_pset = 0;
764*043036a2SApple OSS Distributions int loaded_pset = 1;
765*043036a2SApple OSS Distributions int max_pri_to_subtract = 4;
766*043036a2SApple OSS Distributions int high_bucket = TH_BUCKET_SHARE_FG;
767*043036a2SApple OSS Distributions int low_bucket = TH_BUCKET_SHARE_BG;
768*043036a2SApple OSS Distributions int num_buckets = low_bucket - high_bucket + 1;
769*043036a2SApple OSS Distributions int num_silos = 2;
770*043036a2SApple OSS Distributions int num_threads = num_silos * num_buckets * (max_pri_to_subtract + 1);
771*043036a2SApple OSS Distributions test_thread_t threads[num_threads];
772*043036a2SApple OSS Distributions #define silo_bucket_pri_to_ind(silo, bucket, sub_pri) \
773*043036a2SApple OSS Distributions (silo * (num_buckets * (max_pri_to_subtract + 1)) + \
774*043036a2SApple OSS Distributions (bucket - high_bucket) * ((max_pri_to_subtract + 1)) + sub_pri)
775*043036a2SApple OSS Distributions /* Create a bunch of threads for the different silos, buckets, and priority values */
776*043036a2SApple OSS Distributions for (int s = 0; s < num_silos; s++) {
777*043036a2SApple OSS Distributions struct thread_group *silo_tg = create_tg(0);
778*043036a2SApple OSS Distributions for (int b = high_bucket; b <= low_bucket; b++) {
779*043036a2SApple OSS Distributions set_tg_sched_bucket_preferred_pset(silo_tg, b, s);
780*043036a2SApple OSS Distributions for (int p = 0; p <= max_pri_to_subtract; p++) {
781*043036a2SApple OSS Distributions threads[silo_bucket_pri_to_ind(s, b, p)] =
782*043036a2SApple OSS Distributions create_thread(b, silo_tg, root_bucket_to_highest_pri[b] - p);
783*043036a2SApple OSS Distributions }
784*043036a2SApple OSS Distributions }
785*043036a2SApple OSS Distributions }
786*043036a2SApple OSS Distributions /* Despite enqueueing in a random order, the threads should be stolen out in priority order */
787*043036a2SApple OSS Distributions int rand_seed = 777777;
788*043036a2SApple OSS Distributions enqueue_threads_arr_rand_order(pset_target(loaded_pset), rand_seed, num_threads, threads);
789*043036a2SApple OSS Distributions shush = true; // Quiet work_steal_expect_simple()'s expects
790*043036a2SApple OSS Distributions for (int b = high_bucket; b <= low_bucket; b++) {
791*043036a2SApple OSS Distributions for (int p = 0; p <= max_pri_to_subtract; p++) {
792*043036a2SApple OSS Distributions for (int s = 0; s < num_silos; s++) {
793*043036a2SApple OSS Distributions T_QUIET; work_steal_expect_simple(idle_pset, loaded_pset,
794*043036a2SApple OSS Distributions threads[silo_bucket_pri_to_ind(s, b, p)], "Higher pri threads stolen first");
795*043036a2SApple OSS Distributions }
796*043036a2SApple OSS Distributions }
797*043036a2SApple OSS Distributions }
798*043036a2SApple OSS Distributions shush = false;
799*043036a2SApple OSS Distributions no_steal_expect(idle_pset, "Already stole all the threads");
800*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Higher priority threads stolen first across silos");
801*043036a2SApple OSS Distributions }
802*043036a2SApple OSS Distributions
803*043036a2SApple OSS Distributions SCHED_POLICY_T_DECL(migration_harmonious_chosen_pset,
804*043036a2SApple OSS Distributions "Verify that different migration mechanisms agree about where a thread "
805*043036a2SApple OSS Distributions "should be, given current system conditions")
806*043036a2SApple OSS Distributions {
807*043036a2SApple OSS Distributions int ret;
808*043036a2SApple OSS Distributions test_hw_topology_t topo = SCHED_POLICY_DEFAULT_TOPO;
809*043036a2SApple OSS Distributions init_migration_harness(topo);
810*043036a2SApple OSS Distributions int sched_bucket = TH_BUCKET_SHARE_DF;
811*043036a2SApple OSS Distributions struct thread_group *tg = create_tg(0);
812*043036a2SApple OSS Distributions thread_t thread = create_thread(sched_bucket, tg, root_bucket_to_highest_pri[sched_bucket]);
813*043036a2SApple OSS Distributions int max_load_threads = 20;
814*043036a2SApple OSS Distributions test_thread_t load_threads[max_load_threads];
815*043036a2SApple OSS Distributions for (int i = 0; i < max_load_threads; i++) {
816*043036a2SApple OSS Distributions load_threads[i] = create_thread(sched_bucket, tg, root_bucket_to_highest_pri[sched_bucket]);
817*043036a2SApple OSS Distributions }
818*043036a2SApple OSS Distributions
819*043036a2SApple OSS Distributions /* Iterate conditions with different preferred psets and pset loads */
820*043036a2SApple OSS Distributions for (int preferred_pset_id = 0; preferred_pset_id < topo.num_psets; preferred_pset_id++) {
821*043036a2SApple OSS Distributions set_tg_sched_bucket_preferred_pset(tg, sched_bucket, preferred_pset_id);
822*043036a2SApple OSS Distributions sched_policy_push_metadata("preferred_pset_id", preferred_pset_id);
823*043036a2SApple OSS Distributions for (int loaded_pset_id = 0; loaded_pset_id < topo.num_psets; loaded_pset_id++) {
824*043036a2SApple OSS Distributions /* Load the loaded_pset */
825*043036a2SApple OSS Distributions enqueue_threads_arr(pset_target(loaded_pset_id), max_load_threads, load_threads);
826*043036a2SApple OSS Distributions bool preferred_is_idle = preferred_pset_id != loaded_pset_id;
827*043036a2SApple OSS Distributions sched_policy_push_metadata("loaded_pset_id", loaded_pset_id);
828*043036a2SApple OSS Distributions
829*043036a2SApple OSS Distributions /* Where the thread proactively wants to go */
830*043036a2SApple OSS Distributions int chosen_pset = choose_pset_for_thread(thread);
831*043036a2SApple OSS Distributions bool chose_the_preferred_pset = chosen_pset == preferred_pset_id;
832*043036a2SApple OSS Distributions if (preferred_is_idle) {
833*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(chose_the_preferred_pset, "Should always choose the preferred pset if idle %s",
834*043036a2SApple OSS Distributions sched_policy_dump_metadata());
835*043036a2SApple OSS Distributions }
836*043036a2SApple OSS Distributions
837*043036a2SApple OSS Distributions /* Thread generally should not avoid a processor in its chosen pset */
838*043036a2SApple OSS Distributions for (int c = 0; c < topo.psets[chosen_pset].num_cpus; c++) {
839*043036a2SApple OSS Distributions int avoid_cpu_id = pset_id_to_cpu_id(chosen_pset) + c;
840*043036a2SApple OSS Distributions sched_policy_push_metadata("avoid_cpu_id", avoid_cpu_id);
841*043036a2SApple OSS Distributions ret = thread_avoid_processor_expect(thread, avoid_cpu_id, false, false);
842*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Thread should not want to leave processor in just chosen pset %s",
843*043036a2SApple OSS Distributions sched_policy_dump_metadata());
844*043036a2SApple OSS Distributions sched_policy_pop_metadata();
845*043036a2SApple OSS Distributions }
846*043036a2SApple OSS Distributions
847*043036a2SApple OSS Distributions /* Extra assertions we can make based on the preferred pset being idle */
848*043036a2SApple OSS Distributions if (preferred_is_idle) {
849*043036a2SApple OSS Distributions /* Thread should avoid processor in non-preferred pset to get to the idle preferred pset */
850*043036a2SApple OSS Distributions for (int c = 0; c < topo.total_cpus; c++) {
851*043036a2SApple OSS Distributions if (cpu_id_to_pset_id(c) != preferred_pset_id) {
852*043036a2SApple OSS Distributions sched_policy_push_metadata("avoid_non_preferred_cpu_id", c);
853*043036a2SApple OSS Distributions ret = thread_avoid_processor_expect(thread, c, false, true);
854*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(ret, "Thread should avoid processor in non-preferred pset to get to idle "
855*043036a2SApple OSS Distributions "preferred pset %s", sched_policy_dump_metadata());
856*043036a2SApple OSS Distributions sched_policy_pop_metadata();
857*043036a2SApple OSS Distributions }
858*043036a2SApple OSS Distributions }
859*043036a2SApple OSS Distributions }
860*043036a2SApple OSS Distributions
861*043036a2SApple OSS Distributions /* Other cores should not want to rebalance the running thread away from its chosen pset */
862*043036a2SApple OSS Distributions int chosen_cpu = pset_id_to_cpu_id(chosen_pset);
863*043036a2SApple OSS Distributions cpu_set_thread_current(chosen_cpu, thread);
864*043036a2SApple OSS Distributions for (int c = 0; c < topo.total_cpus; c++) {
865*043036a2SApple OSS Distributions if ((cpu_id_to_pset_id(c) != chosen_pset) && (cpu_id_to_pset_id(c) != loaded_pset_id)) {
866*043036a2SApple OSS Distributions sched_policy_push_metadata("stealing_cpu_id", c);
867*043036a2SApple OSS Distributions thread_t stolen_thread = cpu_steal_thread(c);
868*043036a2SApple OSS Distributions if (stolen_thread != NULL) {
869*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_NE(stolen_thread, thread, "Should not steal back thread from its chosen_pset %s",
870*043036a2SApple OSS Distributions sched_policy_dump_metadata());
871*043036a2SApple OSS Distributions if (stolen_thread != thread) {
872*043036a2SApple OSS Distributions /* Put back the stolen load thread */
873*043036a2SApple OSS Distributions enqueue_thread(pset_target(loaded_pset_id), stolen_thread);
874*043036a2SApple OSS Distributions }
875*043036a2SApple OSS Distributions }
876*043036a2SApple OSS Distributions bool want_rebalance = cpu_processor_balance(c);
877*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_FALSE(want_rebalance, "Should not rebalance thread away from its chosen_pset %s",
878*043036a2SApple OSS Distributions sched_policy_dump_metadata());
879*043036a2SApple OSS Distributions sched_policy_pop_metadata();
880*043036a2SApple OSS Distributions }
881*043036a2SApple OSS Distributions }
882*043036a2SApple OSS Distributions
883*043036a2SApple OSS Distributions (void)dequeue_threads_expect_ordered_arr(pset_target(loaded_pset_id), max_load_threads, load_threads);
884*043036a2SApple OSS Distributions clear_threads_from_topo();
885*043036a2SApple OSS Distributions for (int pset = 0; pset < topo.num_psets; pset++) {
886*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_TRUE(runqueue_empty(pset_target(pset)), "pset %d wasn't cleared at the end of test "
887*043036a2SApple OSS Distributions "scenario %s", pset, sched_policy_dump_metadata());
888*043036a2SApple OSS Distributions }
889*043036a2SApple OSS Distributions sched_policy_pop_metadata();
890*043036a2SApple OSS Distributions }
891*043036a2SApple OSS Distributions sched_policy_pop_metadata();
892*043036a2SApple OSS Distributions }
893*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Policy is harmonious on the subject of a thread's chosen pset");
894*043036a2SApple OSS Distributions }
895*043036a2SApple OSS Distributions
896*043036a2SApple OSS Distributions SCHED_POLICY_T_DECL(migration_search_order,
897*043036a2SApple OSS Distributions "Verify that we iterate psets for spill and steal in the expected order")
898*043036a2SApple OSS Distributions {
899*043036a2SApple OSS Distributions int ret;
900*043036a2SApple OSS Distributions init_migration_harness(dual_die);
901*043036a2SApple OSS Distributions int expected_orders[6][6] = {
902*043036a2SApple OSS Distributions {0, 3, 1, 2, 4, 5},
903*043036a2SApple OSS Distributions {1, 2, 4, 5, 0, 3},
904*043036a2SApple OSS Distributions {2, 1, 4, 5, 0, 3},
905*043036a2SApple OSS Distributions {3, 0, 4, 5, 1, 2},
906*043036a2SApple OSS Distributions {4, 5, 1, 2, 3, 0},
907*043036a2SApple OSS Distributions {5, 4, 1, 2, 3, 0},
908*043036a2SApple OSS Distributions };
909*043036a2SApple OSS Distributions for (int src_pset_id = 0; src_pset_id < dual_die.num_psets; src_pset_id++) {
910*043036a2SApple OSS Distributions ret = iterate_pset_search_order_expect(src_pset_id, UINT64_MAX, 0, expected_orders[src_pset_id], dual_die.num_psets);
911*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_EQ(ret, -1, "Mismatched search order at ind %d for src_pset_id %d",
912*043036a2SApple OSS Distributions ret, src_pset_id);
913*043036a2SApple OSS Distributions }
914*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Search order sorts on migration weight, then locality, then pset id");
915*043036a2SApple OSS Distributions uint64_t p_mask = 0b110110;
916*043036a2SApple OSS Distributions int expected_p_orders[6][6] = {
917*043036a2SApple OSS Distributions {1, 2, 4, 5, -1, -1},
918*043036a2SApple OSS Distributions {1, 2, 4, 5, -1, -1},
919*043036a2SApple OSS Distributions {2, 1, 4, 5, -1, -1},
920*043036a2SApple OSS Distributions {4, 5, 1, 2, -1, -1},
921*043036a2SApple OSS Distributions {4, 5, 1, 2, -1, -1},
922*043036a2SApple OSS Distributions {5, 4, 1, 2, -1, -1},
923*043036a2SApple OSS Distributions };
924*043036a2SApple OSS Distributions uint64_t e_mask = 0b001001;
925*043036a2SApple OSS Distributions int expected_e_orders[6][6] = {
926*043036a2SApple OSS Distributions {0, 3, -1, -1, -1, -1},
927*043036a2SApple OSS Distributions {0, 3, -1, -1, -1, -1},
928*043036a2SApple OSS Distributions {0, 3, -1, -1, -1, -1},
929*043036a2SApple OSS Distributions {3, 0, -1, -1, -1, -1},
930*043036a2SApple OSS Distributions {3, 0, -1, -1, -1, -1},
931*043036a2SApple OSS Distributions {3, 0, -1, -1, -1, -1},
932*043036a2SApple OSS Distributions };
933*043036a2SApple OSS Distributions for (int i = 0; i < 2; i++) {
934*043036a2SApple OSS Distributions for (int src_pset_id = 0; src_pset_id < dual_die.num_psets; src_pset_id++) {
935*043036a2SApple OSS Distributions uint64_t mask = (i == 0) ? p_mask : e_mask;
936*043036a2SApple OSS Distributions int *expected_order_masked = (i == 0) ? expected_p_orders[src_pset_id] : expected_e_orders[src_pset_id];
937*043036a2SApple OSS Distributions ret = iterate_pset_search_order_expect(src_pset_id, mask, 0, expected_order_masked, dual_die.num_psets);
938*043036a2SApple OSS Distributions T_QUIET; T_EXPECT_EQ(ret, -1, "Mismatched masked search order at ind %d for src_pset_id %d",
939*043036a2SApple OSS Distributions ret, src_pset_id);
940*043036a2SApple OSS Distributions }
941*043036a2SApple OSS Distributions }
942*043036a2SApple OSS Distributions SCHED_POLICY_PASS("Search order traversal respects candidate mask");
943*043036a2SApple OSS Distributions }
944