xref: /xnu-12377.41.6/tests/sched/clutch_runqueue.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 // Copyright (c) 2023 Apple Inc.  All rights reserved.
2 
3 #include <string.h>
4 
5 #include "sched_test_harness/sched_policy_darwintest.h"
6 #include "sched_test_harness/sched_clutch_harness.h"
7 
8 T_GLOBAL_META(T_META_NAMESPACE("xnu.scheduler"),
9     T_META_RADAR_COMPONENT_NAME("xnu"),
10     T_META_RADAR_COMPONENT_VERSION("scheduler"),
11     T_META_RUN_CONCURRENTLY(true),
12     T_META_OWNER("emily_peterson"));
13 
14 #define NUM_RAND_SEEDS 5
15 static unsigned int rand_seeds[NUM_RAND_SEEDS] = {377111, 2738572, 1717171, 4990221, 777777};
16 
17 SCHED_POLICY_T_DECL(runq_processor_bound,
18     "Processor-bound threads vs. Regular threads")
19 {
20 	int ret;
21 	init_runqueue_harness();
22 
23 	struct thread_group *high_tg = create_tg(clutch_interactivity_score_max);
24 	struct thread_group *low_tg = create_tg(0);
25 
26 	test_thread_t lowest_bound = create_thread(TH_BUCKET_SHARE_BG, low_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_BG]);
27 	set_thread_processor_bound(lowest_bound, get_default_cpu());
28 	test_thread_t highest_bound = create_thread(TH_BUCKET_SHARE_IN, high_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_IN]);
29 	set_thread_processor_bound(highest_bound, get_default_cpu());
30 	test_thread_t lowest_unbound = create_thread(TH_BUCKET_SHARE_BG, low_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_BG]);
31 	test_thread_t highest_unbound = create_thread(TH_BUCKET_SHARE_IN, high_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_IN]);
32 
33 	for (int i = 0; i < NUM_RAND_SEEDS; i++) {
34 		enqueue_threads_rand_order(default_target, rand_seeds[i], 4, lowest_bound, highest_bound, lowest_unbound, highest_unbound);
35 		ret = dequeue_threads_expect_ordered(default_target, 4, highest_bound, highest_unbound, lowest_bound, lowest_unbound);
36 		T_QUIET; T_EXPECT_EQ(ret, -1, "Processor-bound failed to win tie-break");
37 		T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
38 	}
39 	SCHED_POLICY_PASS("Processor-bound threads win priority tie-breaker");
40 
41 	test_thread_t bound = create_thread(TH_BUCKET_SHARE_DF, low_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF] - 1);
42 	set_thread_processor_bound(bound, get_default_cpu());
43 	test_thread_t higherpri_unbound = create_thread(TH_BUCKET_SHARE_DF, low_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
44 	test_thread_t interactive_higherpri_unbound = create_thread(TH_BUCKET_SHARE_DF, high_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
45 	test_thread_t interactive_lowerpri_unbound = create_thread(TH_BUCKET_SHARE_DF, high_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF] - 2);
46 	for (int i = 0; i < NUM_RAND_SEEDS; i++) {
47 		enqueue_threads_rand_order(default_target, rand_seeds[i], 4, bound, higherpri_unbound, interactive_higherpri_unbound, interactive_lowerpri_unbound);
48 		ret = dequeue_threads_expect_ordered(default_target, 4, interactive_higherpri_unbound, bound, interactive_lowerpri_unbound, higherpri_unbound);
49 		T_QUIET; T_EXPECT_EQ(ret, -1, "Priority and Clutch interactivity score not factored correctly against processor-bound thread");
50 		T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
51 	}
52 	SCHED_POLICY_PASS("Clutch root represented against processor-bound threads by highest pri thread in the highest pri Clutch bucket");
53 }
54 
55 SCHED_POLICY_T_DECL(runq_aboveui,
56     "Above UI vs. timeshare FG root buckets")
57 {
58 	int ret;
59 	init_runqueue_harness();
60 
61 	struct thread_group *same_tg = create_tg(clutch_interactivity_score_max);
62 	test_thread_t aboveui = create_thread(TH_BUCKET_FIXPRI, same_tg, root_bucket_to_highest_pri[TH_BUCKET_FIXPRI]);
63 	set_thread_sched_mode(aboveui, TH_MODE_FIXED);
64 	test_thread_t low_fg = create_thread(TH_BUCKET_SHARE_FG, same_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_FG]);
65 	test_thread_t high_fg = create_thread(TH_BUCKET_SHARE_FG, same_tg, root_bucket_to_highest_pri[TH_BUCKET_FIXPRI] + 1);
66 
67 	for (int i = 0; i < NUM_RAND_SEEDS; i++) {
68 		enqueue_threads_rand_order(default_target, rand_seeds[i], 3, aboveui, low_fg, high_fg);
69 		ret = dequeue_threads_expect_ordered(default_target, 3, high_fg, aboveui, low_fg);
70 		T_QUIET; T_EXPECT_EQ(ret, -1, "Aboveui vs. foreground threads dequeued out of order");
71 		T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
72 	}
73 	SCHED_POLICY_PASS("Aboveui vs. foreground ordered according to priority");
74 }
75 
76 SCHED_POLICY_T_DECL(runq_diff_root_bucket,
77     "Different root buckets (EDF, Starvation Avoidance Mode, and Warp)")
78 {
79 	int ret;
80 	init_runqueue_harness();
81 
82 	struct thread_group *same_tg = create_tg(0);
83 	int num_threads = TH_BUCKET_SCHED_MAX - 1;
84 	test_thread_t threads[num_threads];
85 	test_thread_t rev_threads[num_threads];
86 	test_thread_t warper_threads[num_threads];
87 	for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
88 		threads[bucket - 1] = create_thread(bucket, same_tg, root_bucket_to_highest_pri[bucket]);
89 		rev_threads[num_threads - bucket] = threads[bucket - 1];
90 		warper_threads[bucket - 1] = create_thread(bucket, same_tg, root_bucket_to_highest_pri[bucket]);
91 	}
92 
93 	/* Validate natural EDF between root buckets */
94 	for (int i = 0; i < NUM_RAND_SEEDS; i++) {
95 		enqueue_threads_arr_rand_order(default_target, rand_seeds[i], num_threads, threads);
96 		ret = dequeue_threads_expect_ordered_arr(default_target, num_threads, threads);
97 		T_QUIET; T_EXPECT_EQ(ret, -1, "Root buckets dequeued out of EDF order, after the first %d threads dequeued were correct", ret);
98 		T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
99 	}
100 	SCHED_POLICY_PASS("Basic EDF root bucket order respected");
101 
102 	/* Warp lets high root buckets win despite reverse ordering of root bucket deadlines */
103 	for (int bucket = TH_BUCKET_SHARE_BG; bucket >= TH_BUCKET_SHARE_FG; bucket--) {
104 		if (bucket < TH_BUCKET_SHARE_BG) {
105 			increment_mock_time_us(clutch_root_bucket_wcel_us[bucket + 1] - clutch_root_bucket_wcel_us[bucket] + 1);
106 		}
107 		enqueue_thread(default_target, warper_threads[bucket - 1]);
108 		enqueue_thread(default_target, threads[bucket - 1]);
109 	}
110 	for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
111 		ret = dequeue_thread_expect(default_target, warper_threads[bucket - 1]);
112 		T_QUIET; T_EXPECT_TRUE(ret, "Root bucket %d failed to warp ahead", bucket);
113 		increment_mock_time_us(clutch_root_bucket_warp_us[bucket] / 2);
114 		ret = dequeue_thread_expect(default_target, threads[bucket - 1]);
115 		T_QUIET; T_EXPECT_TRUE(ret, "Root bucket %d's warp window failed to stay open", bucket);
116 		increment_mock_time_us(clutch_root_bucket_warp_us[bucket] / 2 + 1);
117 	}
118 	T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
119 	SCHED_POLICY_PASS("Warping and Warp Windows respected");
120 
121 	/* After Warp is exhausted, Starvation Avoidance Mode kicks in to choose the buckets in EDF order */
122 	for (int bucket = TH_BUCKET_SHARE_BG; bucket >= TH_BUCKET_SHARE_FG; bucket--) {
123 		if (bucket < TH_BUCKET_SHARE_BG) {
124 			increment_mock_time_us(clutch_root_bucket_wcel_us[bucket + 1] - clutch_root_bucket_wcel_us[bucket] + 1);
125 		}
126 		enqueue_thread(default_target, threads[bucket - 1]);
127 	}
128 	ret = dequeue_threads_expect_ordered_arr(default_target, num_threads, rev_threads);
129 	T_QUIET; T_EXPECT_EQ(ret, -1, "Starvation avoidance failed to kick in, after the first %d threads dequeued were correct", ret);
130 	T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
131 	SCHED_POLICY_PASS("Starvation Avoidance Mode respected");
132 }
133 
134 SCHED_POLICY_T_DECL(runq_diff_clutch_bucket,
135     "Same root bucket, different TGs")
136 {
137 	int ret;
138 	init_runqueue_harness();
139 
140 	int num_tgs = clutch_interactivity_score_max + 1;
141 	struct thread_group *tgs[num_tgs];
142 	for (int i = 0; i < num_tgs; i++) {
143 		tgs[i] = create_tg(i);
144 	}
145 
146 	for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
147 		test_thread_t threads[num_tgs];
148 		for (int i = 0; i < num_tgs; i++) {
149 			threads[i] = create_thread(bucket, tgs[clutch_interactivity_score_max - i], root_bucket_to_highest_pri[bucket]);
150 		}
151 
152 		for (int i = 0; i < NUM_RAND_SEEDS; i++) {
153 			enqueue_threads_arr_rand_order(default_target, rand_seeds[i], num_tgs, threads);
154 			ret = dequeue_threads_expect_ordered_arr(default_target, num_tgs, threads);
155 			T_QUIET; T_EXPECT_EQ(ret, -1, "Unique interactivity scores dequeued out-of-order, after the first %d threads dequeued were correct", ret);
156 			T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
157 		}
158 	}
159 	SCHED_POLICY_PASS("Interactivity scores between Clutch buckets respected");
160 
161 	struct thread_group *low_tg = create_tg(clutch_interactivity_score_max / 2);
162 	struct thread_group *high_tg = create_tg((clutch_interactivity_score_max / 2) + 2);
163 	for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
164 		test_thread_t lowpri_but_interactive = create_thread(bucket, high_tg, root_bucket_to_highest_pri[bucket] - 1);
165 		test_thread_t highpri = create_thread(bucket, low_tg, root_bucket_to_highest_pri[bucket]);
166 
167 		for (int order = 0; order < 2; order++) {
168 			enqueue_threads(default_target, 2, (order == 0 ? lowpri_but_interactive : highpri), (order == 0 ? highpri : lowpri_but_interactive));
169 			ret = dequeue_threads_expect_ordered(default_target, 2, lowpri_but_interactive, highpri);
170 			T_QUIET; T_EXPECT_EQ(ret, -1, "Pri %d and i-score %d dequeued before pri %d and i-score %d, enqueue-order %d", root_bucket_to_highest_pri[bucket] - 1, (clutch_interactivity_score_max / 2) + 2, root_bucket_to_highest_pri[bucket], clutch_interactivity_score_max / 2, order);
171 		}
172 
173 		T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
174 	}
175 	SCHED_POLICY_PASS("Priority correctly combined with interactivity scores to order Clutch buckets");
176 
177 	struct thread_group *first_tg = create_tg(clutch_interactivity_score_max / 2);
178 	struct thread_group *second_tg = create_tg(clutch_interactivity_score_max / 2);
179 	for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
180 		test_thread_t first = create_thread(bucket, first_tg, root_bucket_to_highest_pri[bucket]);
181 		test_thread_t second = create_thread(bucket, second_tg, root_bucket_to_highest_pri[bucket]);
182 		enqueue_threads(default_target, 2, first, second);
183 
184 		ret = dequeue_threads_expect_ordered(default_target, 2, first, second);
185 		T_QUIET; T_EXPECT_EQ(ret, -1, "FIFO order disrespected for threads in two Clutch buckets of equal priority");
186 
187 		T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
188 	}
189 	SCHED_POLICY_PASS("Clutch bucket FIFO order respected, for Clutch buckets with the same priority");
190 }
191 
192 SCHED_POLICY_T_DECL(runq_diff_priority,
193     "Same root bucket, same TG, different priorities")
194 {
195 	int ret;
196 	init_runqueue_harness();
197 
198 	struct thread_group *same_tg = create_tg(0);
199 
200 	for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
201 		test_thread_t lowpri = create_thread(bucket, same_tg, root_bucket_to_highest_pri[bucket] - 1);
202 		test_thread_t highpri = create_thread(bucket, same_tg, root_bucket_to_highest_pri[bucket]);
203 
204 		for (int order = 0; order < 2; order++) {
205 			enqueue_threads(default_target, 2, (order == 0 ? lowpri : highpri), (order == 0 ? highpri : lowpri));
206 			ret = dequeue_threads_expect_ordered(default_target, 2, highpri, lowpri);
207 			T_QUIET; T_EXPECT_EQ(ret, -1, "Pri %d dequeued before pri %d, enqueue-order %d", root_bucket_to_highest_pri[bucket] - 1, root_bucket_to_highest_pri[bucket], order);
208 		}
209 
210 		T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
211 	}
212 	SCHED_POLICY_PASS("sched_pri order respected, for threads in the same Clutch bucket");
213 
214 	for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
215 		int num_threads = 10;
216 		test_thread_t threads[num_threads];
217 		for (int i = 0; i < num_threads; i++) {
218 			threads[i] = create_thread(bucket, same_tg, root_bucket_to_highest_pri[bucket]);
219 			increment_mock_time_us(5);
220 			enqueue_thread(default_target, threads[i]);
221 		}
222 		ret = dequeue_threads_expect_ordered_arr(default_target, num_threads, threads);
223 		T_QUIET; T_EXPECT_EQ(ret, -1, "FIFO order disrespected for %d threads at pri %d", num_threads, root_bucket_to_highest_pri[bucket]);
224 		T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
225 	}
226 	SCHED_POLICY_PASS("Thread FIFO order respected, for threads in the same Clutch bucket with the same sched_pri");
227 }
228 
229 /*
230  * 64 bits of fourth argument to CLUTCH_THREAD_SELECT expected to
231  * match the following layout, ordered from most to least significant bit:
232  *
233  * (reserved 23)                 (selection_opened_starvation_avoidance_window 1)
234  *        |      (starvation_avoidance_window_close 12)   | (selection_was_edf 1)
235  *        |                                  |            | |   (traverse mode 3)
236  *        v                                  v            v v      v
237  *        r----------------------wc----------sc----------wsbec-----t--v---
238  *                               ^                       ^ ^ ^        ^
239  *                               |                       | | |       (version 4)
240  *                  (warp_window_close 12)               | | (cluster_id 6)
241  *                                                       | (selection_was_cluster_bound 1)
242  *                                   (selection_opened_warp_window 1)
243  */
244 #define CTS_VERSION 1ULL
245 #define TRAVERSE_MODE_REMOVE_CONSIDER_CURRENT (1ULL << 4)
246 #define TRAVERSE_MODE_CHECK_PREEMPT (2ULL << 4)
247 #define CLUSTER_ID(id) (id << 7)
248 #define SELECTION_WAS_EDF (1ULL << 13)
249 #define SELECTION_WAS_CLUSTER_BOUND (1ULL << 14)
250 #define SELECTION_OPENED_STARVATION_AVOIDANCE_WINDOW (1ULL << 15) | SELECTION_WAS_EDF
251 #define SELECTION_OPENED_WARP_WINDOW (1ULL << 16)
252 #define WINDOW_MASK(bucket, cluster_bound) ( 1ULL << (bucket + cluster_bound * TH_BUCKET_SCHED_MAX) )
253 #define STARVATION_AVOIDANCE_WINDOW_CLOSE(bucket, cluster_bound) (WINDOW_MASK(bucket, cluster_bound) << 17)
254 #define WARP_WINDOW_CLOSE(bucket, cluster_bound) (WINDOW_MASK(bucket, cluster_bound) << 29)
255 
256 /*
257  * We test the selection_was_cluster_bound bit and cluster_id field gated
258  * on the Edge version of this test case.
259  */
260 
261 SCHED_POLICY_T_DECL(runq_tracepoint_thread_select,
262     "Validate emitted MACH_SCHED_CLUTCH_THREAD_SELECT tracepoints")
263 {
264 	int ret;
265 	uint64_t root_bucket_arg;
266 	uint64_t bucket_is_bound = false;
267 #if CONFIG_SCHED_EDGE
268 	init_migration_harness(basic_amp);
269 	bucket_is_bound = true;
270 #else /* !CONFIG_SCHED_EDGE */
271 	init_runqueue_harness();
272 #endif /* CONFIG_SCHED_EDGE */
273 	disable_auto_current_thread();
274 
275 	struct thread_group *same_tg = create_tg(0);
276 	int num_threads = TH_BUCKET_SCHED_MAX - 1;
277 	test_thread_t threads[num_threads];
278 	test_thread_t rev_threads[num_threads];
279 	test_thread_t warper_threads[num_threads];
280 	for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
281 		threads[bucket - 1] = create_thread(bucket, same_tg, root_bucket_to_highest_pri[bucket]);
282 		rev_threads[num_threads - bucket] = threads[bucket - 1];
283 		warper_threads[bucket - 1] = create_thread(bucket, same_tg, root_bucket_to_highest_pri[bucket]);
284 #if CONFIG_SCHED_EDGE
285 		set_thread_cluster_bound(threads[bucket - 1], 0);
286 		set_thread_cluster_bound(warper_threads[bucket - 1], 0);
287 #endif /* CONFIG_SCHED_EDGE */
288 	}
289 
290 	/* Natural EDF */
291 	enqueue_threads_arr(default_target, num_threads, threads);
292 	for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
293 		ret = dequeue_thread_expect(default_target, threads[bucket - 1]);
294 		T_QUIET; T_EXPECT_TRUE(ret, "Root bucket %d failed to warp ahead", bucket);
295 		root_bucket_arg = SELECTION_WAS_EDF | CTS_VERSION;
296 #if CONFIG_SCHED_EDGE
297 		root_bucket_arg |= SELECTION_WAS_CLUSTER_BOUND;
298 #endif /* CONFIG_SCHED_EDGE */
299 		ret = tracepoint_expect(CLUTCH_THREAD_SELECT, (bucket - 1) * 2, 0, bucket, root_bucket_arg);
300 		T_QUIET; T_EXPECT_TRUE(ret, "EDF CLUTCH_THREAD_SELECT tracepoint");
301 	}
302 	T_QUIET; T_ASSERT_EQ(runqueue_empty(default_target), true, "runqueue_empty");
303 	SCHED_POLICY_PASS("Correct CLUTCH_THREAD_SELECT tracepoint info for EDF selections");
304 
305 	/* Warp windows */
306 	for (int bucket = TH_BUCKET_SHARE_BG; bucket >= TH_BUCKET_SHARE_FG; bucket--) {
307 		if (bucket < TH_BUCKET_SHARE_BG) {
308 			increment_mock_time_us(clutch_root_bucket_wcel_us[bucket + 1] - clutch_root_bucket_wcel_us[bucket] + 1);
309 		}
310 		enqueue_thread(default_target, warper_threads[bucket - 1]);
311 		enqueue_thread(default_target, threads[bucket - 1]);
312 	}
313 	for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
314 		/* Opens a new warp window */
315 		ret = dequeue_thread_expect(default_target, warper_threads[bucket - 1]);
316 		T_QUIET; T_EXPECT_TRUE(ret, "Root bucket %d failed to warp ahead", bucket);
317 		root_bucket_arg = (bucket < TH_BUCKET_SHARE_BG ? SELECTION_OPENED_WARP_WINDOW : SELECTION_WAS_EDF) | CTS_VERSION;
318 #if CONFIG_SCHED_EDGE
319 		root_bucket_arg |= SELECTION_WAS_CLUSTER_BOUND;
320 #endif /* CONFIG_SCHED_EDGE */
321 		ret = tracepoint_expect(CLUTCH_THREAD_SELECT, bucket * 2 - 1, 0, bucket, root_bucket_arg);
322 		T_QUIET; T_EXPECT_TRUE(ret, "Open warp window CLUTCH_THREAD_SELECT tracepoint");
323 
324 		/* Makes use of the opened warp window */
325 		increment_mock_time_us(clutch_root_bucket_warp_us[bucket] / 2);
326 		ret = dequeue_thread_expect(default_target, threads[bucket - 1]);
327 		T_QUIET; T_EXPECT_TRUE(ret, "Root bucket %d's warp window failed to stay open", bucket);
328 		root_bucket_arg = (bucket < TH_BUCKET_SHARE_BG ? 0 : SELECTION_WAS_EDF) | CTS_VERSION;
329 #if CONFIG_SCHED_EDGE
330 		root_bucket_arg |= SELECTION_WAS_CLUSTER_BOUND;
331 #endif /* CONFIG_SCHED_EDGE */
332 		ret = tracepoint_expect(CLUTCH_THREAD_SELECT, bucket * 2 - 2, 0, bucket, root_bucket_arg);
333 		T_QUIET; T_EXPECT_TRUE(ret, "Active warp window CLUTCH_THREAD_SELECT tracepoint");
334 
335 		increment_mock_time_us(clutch_root_bucket_warp_us[bucket] / 2 + 1);
336 	}
337 	T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
338 	SCHED_POLICY_PASS("Correct CLUTCH_THREAD_SELECT tracepoint info for warp windows");
339 
340 	/* Starvation avoidance windows */
341 	for (int bucket = TH_BUCKET_SHARE_BG; bucket >= TH_BUCKET_SHARE_FG; bucket--) {
342 		if (bucket < TH_BUCKET_SHARE_BG) {
343 			increment_mock_time_us(clutch_root_bucket_wcel_us[bucket + 1] - clutch_root_bucket_wcel_us[bucket] + 1);
344 		}
345 		enqueue_thread(default_target, threads[bucket - 1]);
346 	}
347 	for (int bucket = TH_BUCKET_SHARE_BG; bucket >= TH_BUCKET_SHARE_FG; bucket--) {
348 		ret = dequeue_thread_expect(default_target, threads[bucket - 1]);
349 		T_QUIET; T_EXPECT_TRUE(ret, "Starvation avoidance failed to kick in for bucket %d", bucket);
350 		root_bucket_arg = SELECTION_WAS_EDF | CTS_VERSION;
351 #if CONFIG_SCHED_EDGE
352 		root_bucket_arg |= SELECTION_WAS_CLUSTER_BOUND;
353 #endif /* CONFIG_SCHED_EDGE */
354 		if (bucket == TH_BUCKET_SHARE_BG) {
355 			/* Enough time has passed for the warp windows opened in the last phase to be closed in one go */
356 			for (int warping_bucket = TH_BUCKET_SHARE_FG; warping_bucket < TH_BUCKET_SHARE_BG; warping_bucket++) {
357 				root_bucket_arg |= WARP_WINDOW_CLOSE(warping_bucket, bucket_is_bound);
358 			}
359 		}
360 		if (bucket > TH_BUCKET_SHARE_FG) {
361 			root_bucket_arg |= SELECTION_OPENED_STARVATION_AVOIDANCE_WINDOW;
362 		}
363 		ret = tracepoint_expect(CLUTCH_THREAD_SELECT, (bucket - 1) * 2, 0, bucket, root_bucket_arg);
364 		T_QUIET; T_EXPECT_TRUE(ret, "Open starvation avoidance window CLUTCH_THREAD_SELECT tracepoint");
365 	}
366 	increment_mock_time_us(clutch_root_bucket_wcel_us[TH_BUCKET_SHARE_BG]);
367 	for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
368 		enqueue_thread(default_target, threads[bucket - 1]);
369 	}
370 	for (int bucket = TH_BUCKET_SHARE_FG; bucket < TH_BUCKET_SCHED_MAX; bucket++) {
371 		ret = dequeue_thread_expect(default_target, threads[bucket - 1]);
372 		T_QUIET; T_EXPECT_TRUE(ret, "EDF dequeue for bucket %d", bucket);
373 		root_bucket_arg = SELECTION_WAS_EDF | CTS_VERSION;
374 #if CONFIG_SCHED_EDGE
375 		root_bucket_arg |= SELECTION_WAS_CLUSTER_BOUND;
376 #endif /* CONFIG_SCHED_EDGE */
377 		if (bucket == TH_BUCKET_SHARE_FG) {
378 			/* Enough time has passed for the starvation avoidance windows opened in the last phase to be closed in one go */
379 			for (int starved_bucket = TH_BUCKET_SHARE_BG; starved_bucket > TH_BUCKET_SHARE_FG; starved_bucket--) {
380 				root_bucket_arg |= STARVATION_AVOIDANCE_WINDOW_CLOSE(starved_bucket, bucket_is_bound);
381 			}
382 		}
383 		ret = tracepoint_expect(CLUTCH_THREAD_SELECT, (bucket - 1) * 2, 0, bucket, root_bucket_arg);
384 		T_QUIET; T_EXPECT_TRUE(ret, "Closing starvation avoidance window or EDF CLUTCH_THREAD_SELECT tracepoint");
385 	}
386 	T_QUIET; T_ASSERT_TRUE(runqueue_empty(default_target), "runqueue_empty");
387 	SCHED_POLICY_PASS("Correct CLUTCH_THREAD_SELECT tracepoint info for starvation avoidance windows");
388 
389 	/* Different runq traverse modes */
390 	cpu_set_thread_current(get_default_cpu(), threads[0]);
391 	enqueue_thread(default_target, threads[1]);
392 	ret = cpu_dequeue_thread_expect_compare_current(get_default_cpu(), threads[0]);
393 	T_QUIET; T_EXPECT_TRUE(ret, "EDF dequeue current thread for bucket");
394 	root_bucket_arg = TRAVERSE_MODE_REMOVE_CONSIDER_CURRENT | SELECTION_WAS_EDF | CTS_VERSION;
395 #if CONFIG_SCHED_EDGE
396 	root_bucket_arg |= SELECTION_WAS_CLUSTER_BOUND;
397 #endif /* CONFIG_SCHED_EDGE */
398 	ret = tracepoint_expect(CLUTCH_THREAD_SELECT, 0, 0, TH_BUCKET_SHARE_FG, root_bucket_arg);
399 	T_QUIET; T_EXPECT_TRUE(ret, "Current thread EDF CLUTCH_THREAD_SELECT tracepoint");
400 	ret = cpu_check_preempt_current(get_default_cpu(), false);
401 	T_QUIET; T_EXPECT_TRUE(ret, "Current thread check preempt");
402 	root_bucket_arg = TRAVERSE_MODE_CHECK_PREEMPT | SELECTION_WAS_EDF | CTS_VERSION;
403 #if CONFIG_SCHED_EDGE
404 	root_bucket_arg |= SELECTION_WAS_CLUSTER_BOUND;
405 #endif /* CONFIG_SCHED_EDGE */
406 	ret = tracepoint_expect(CLUTCH_THREAD_SELECT, 0, 0, TH_BUCKET_SHARE_FG, root_bucket_arg);
407 	T_QUIET; T_EXPECT_TRUE(ret, "Current thread check preempt CLUTCH_THREAD_SELECT tracepoint");
408 	SCHED_POLICY_PASS("Correct CLUTCH_THREAD_SELECT tracepoint info for current thread (traverse modes)");
409 #if CONFIG_SCHED_EDGE
410 	/* Test the cluster_id field */
411 	test_thread_t bound_thread = create_thread(TH_BUCKET_SHARE_DF, same_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
412 	set_thread_cluster_bound(bound_thread, 1);
413 	enqueue_thread(pset_target(1), bound_thread);
414 	ret = dequeue_thread_expect(pset_target(1), bound_thread);
415 	T_QUIET; T_ASSERT_TRUE(ret, "Dequeue single thread on cluster 1");
416 	root_bucket_arg = SELECTION_WAS_EDF | CTS_VERSION | SELECTION_WAS_CLUSTER_BOUND | CLUSTER_ID(1);
417 	ret = tracepoint_expect(CLUTCH_THREAD_SELECT, 10, 0, TH_BUCKET_SHARE_DF, root_bucket_arg);
418 	T_QUIET; T_EXPECT_TRUE(ret, "Cluster-bound CLUTCH_THREAD_SELECT tracepoint");
419 	SCHED_POLICY_PASS("CLUTCH_THREAD_SELECT tracepoint handles non-zero cluster id");
420 #endif /* CONFIG_SCHED_EDGE */
421 }
422 
423 SCHED_POLICY_T_DECL(runq_root_bucket_expired_windows,
424     "Root bucket warp and starvation avoidance windows should expire at the right time")
425 {
426 	int ret;
427 	uint64_t root_bucket_arg;
428 	init_runqueue_harness();
429 	disable_auto_current_thread();
430 
431 	struct thread_group *same_tg = create_tg(0);
432 	test_thread_t def_thread = create_thread(TH_BUCKET_SHARE_DF, same_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
433 	test_thread_t in_thread = create_thread(TH_BUCKET_SHARE_IN, same_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_IN]);
434 
435 	/* Expect user_initiated bucket to warp ahread of starved default bucket */
436 	enqueue_thread(default_target, def_thread);
437 	increment_mock_time_us(clutch_root_bucket_wcel_us[TH_BUCKET_SHARE_DF] + 1);
438 	enqueue_thread(default_target, in_thread);
439 	ret = dequeue_thread_expect(default_target, in_thread);
440 	T_QUIET; T_EXPECT_TRUE(ret, "unexpected bucket");
441 	root_bucket_arg = SELECTION_OPENED_WARP_WINDOW | CTS_VERSION;
442 	ret = tracepoint_expect(CLUTCH_THREAD_SELECT, 1, 0, TH_BUCKET_SHARE_IN, root_bucket_arg);
443 	T_EXPECT_TRUE(ret, "IN warped ahead, tracepoint");
444 
445 	/* Expect warp window to close and default starvation avoidance window to begin */
446 	enqueue_thread(default_target, in_thread);
447 	increment_mock_time_us(clutch_root_bucket_warp_us[TH_BUCKET_SHARE_IN] + 1);
448 	ret = dequeue_thread_expect(default_target, def_thread);
449 	T_QUIET; T_EXPECT_TRUE(ret, "unexpected bucket");
450 	root_bucket_arg = WARP_WINDOW_CLOSE(TH_BUCKET_SHARE_IN, false) | SELECTION_OPENED_STARVATION_AVOIDANCE_WINDOW | CTS_VERSION;
451 	ret = tracepoint_expect(CLUTCH_THREAD_SELECT, 0, 0, TH_BUCKET_SHARE_DF, root_bucket_arg);
452 	T_EXPECT_TRUE(ret, "IN closed warp and DEF opened starvation avoidance, tracepoint");
453 
454 	/* Expect default starvation avoidance window to close and refresh warp for user_initiated with natural EDF */
455 	enqueue_thread(default_target, def_thread);
456 	increment_mock_time_us(clutch_root_bucket_wcel_us[TH_BUCKET_SHARE_DF] + 1);
457 	ret = dequeue_thread_expect(default_target, in_thread);
458 	T_QUIET; T_EXPECT_TRUE(ret, "unexpected bucket");
459 	root_bucket_arg = STARVATION_AVOIDANCE_WINDOW_CLOSE(TH_BUCKET_SHARE_DF, false) | SELECTION_WAS_EDF | CTS_VERSION;
460 	ret = tracepoint_expect(CLUTCH_THREAD_SELECT, 1, 0, TH_BUCKET_SHARE_IN, root_bucket_arg);
461 	T_EXPECT_TRUE(ret, "DEF closed starvation avoidance window and IN refreshed warp, tracepoint");
462 
463 	/* Expect foreground to warp ahead of starved default bucket */
464 	increment_mock_time_us(clutch_root_bucket_wcel_us[TH_BUCKET_SHARE_DF] + 1);
465 	test_thread_t fg_thread = create_thread(TH_BUCKET_SHARE_FG, same_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_FG]);
466 	enqueue_thread(default_target, fg_thread);
467 	ret = dequeue_thread_expect(default_target, fg_thread);
468 	T_QUIET; T_EXPECT_TRUE(ret, "unexpected bucket");
469 	root_bucket_arg = SELECTION_OPENED_WARP_WINDOW | CTS_VERSION;
470 	ret = tracepoint_expect(CLUTCH_THREAD_SELECT, 2, 0, TH_BUCKET_SHARE_FG, root_bucket_arg);
471 	T_EXPECT_TRUE(ret, "FG opened warp window, tracepoint");
472 
473 	/* Expect foreground to close warp window and default to open starvation avoidance window */
474 	increment_mock_time_us(clutch_root_bucket_warp_us[TH_BUCKET_SHARE_FG] + 1);
475 	enqueue_thread(default_target, fg_thread);
476 	ret = dequeue_thread_expect(default_target, def_thread);
477 	T_QUIET; T_EXPECT_TRUE(ret, "unexpected bucket");
478 	root_bucket_arg = WARP_WINDOW_CLOSE(TH_BUCKET_SHARE_FG, false) | SELECTION_OPENED_STARVATION_AVOIDANCE_WINDOW | CTS_VERSION;
479 	ret = tracepoint_expect(CLUTCH_THREAD_SELECT, 0, 0, TH_BUCKET_SHARE_DF, root_bucket_arg);
480 	T_EXPECT_TRUE(ret, "FG closed warp window and DEF opened starvation avoidance window, tracepoint");
481 
482 	/* Expect default to close starvation avoidance window */
483 	increment_mock_time_us(clutch_root_bucket_wcel_us[TH_BUCKET_SHARE_DF] + 1);
484 	enqueue_thread(default_target, def_thread);
485 	ret = dequeue_thread_expect(default_target, fg_thread);
486 	T_QUIET; T_EXPECT_TRUE(ret, "unexpected bucket");
487 	root_bucket_arg = STARVATION_AVOIDANCE_WINDOW_CLOSE(TH_BUCKET_SHARE_DF, false) | SELECTION_WAS_EDF | CTS_VERSION;
488 	ret = tracepoint_expect(CLUTCH_THREAD_SELECT, 2, 0, TH_BUCKET_SHARE_FG, root_bucket_arg);
489 	T_EXPECT_TRUE(ret, "DEF closed starvation avoidance window and FG refreshed warp, tracepoint");
490 
491 	/*
492 	 * Expect user_initiated to experience a full-length warp window
493 	 * (none spent on expired default starvation avoidance window rdar://120562509)
494 	 */
495 	increment_mock_time_us(clutch_root_bucket_wcel_us[TH_BUCKET_SHARE_DF] + 1);
496 	enqueue_thread(default_target, in_thread);
497 	ret = dequeue_thread_expect(default_target, in_thread);
498 	T_QUIET; T_EXPECT_TRUE(ret, "unexpected bucket");
499 	root_bucket_arg = SELECTION_OPENED_WARP_WINDOW | CTS_VERSION;
500 	ret = tracepoint_expect(CLUTCH_THREAD_SELECT, 1, 0, TH_BUCKET_SHARE_IN, root_bucket_arg);
501 	T_EXPECT_TRUE(ret, "IN opened warp window, tracepoint");
502 	enqueue_thread(default_target, in_thread);
503 	increment_mock_time_us(clutch_root_bucket_warp_us[TH_BUCKET_SHARE_IN] - 1);
504 	ret = dequeue_thread_expect(default_target, in_thread);
505 	T_QUIET; T_EXPECT_TRUE(ret, "unexpected bucket");
506 	root_bucket_arg = CTS_VERSION;
507 	ret = tracepoint_expect(CLUTCH_THREAD_SELECT, 1, 0, TH_BUCKET_SHARE_IN, root_bucket_arg);
508 	T_EXPECT_TRUE(ret, "IN had full-length warp window, tracepoint");
509 
510 	SCHED_POLICY_PASS("Correct warp/starvation avoidance window expiration");
511 }
512 
513 SCHED_POLICY_T_DECL(runq_interactivity_starts_maxed,
514     "A new Clutch bucket group should start with max interactivity score")
515 {
516 	int ret;
517 	init_runqueue_harness();
518 
519 	struct thread_group *non_interactive_tg = create_tg(clutch_interactivity_score_max - 1);
520 	test_thread_t non_interactive_tg_thread = create_thread(TH_BUCKET_SHARE_DF, non_interactive_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
521 	enqueue_thread(default_target, non_interactive_tg_thread);
522 
523 	struct thread_group *new_tg = create_tg(INITIAL_INTERACTIVITY_SCORE);
524 	test_thread_t new_tg_thread = create_thread(TH_BUCKET_SHARE_DF, new_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
525 	enqueue_thread(default_target, new_tg_thread);
526 
527 	ret = dequeue_thread_expect(default_target, new_tg_thread);
528 	T_EXPECT_TRUE(ret, "New TG Clutch bucket is interactive");
529 
530 	ret = dequeue_thread_expect(default_target, non_interactive_tg_thread);
531 	T_EXPECT_TRUE(ret, "Non-interactive thread comes second");
532 
533 	SCHED_POLICY_PASS("Interactivity score initialized correctly");
534 }
535