xref: /xnu-12377.61.12/tests/sched/edge_runqueue.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1*4d495c6eSApple OSS Distributions // Copyright (c) 2024 Apple Inc.  All rights reserved.
2*4d495c6eSApple OSS Distributions 
3*4d495c6eSApple OSS Distributions /*
4*4d495c6eSApple OSS Distributions  * Since the Edge scheduler depends on the Clutch scheduler as most of its
5*4d495c6eSApple OSS Distributions  * timesharing policy, the Edge scheduler should also pass all of the Clutch
6*4d495c6eSApple OSS Distributions  * unit tests.
7*4d495c6eSApple OSS Distributions  */
8*4d495c6eSApple OSS Distributions #include "clutch_runqueue.c"
9*4d495c6eSApple OSS Distributions 
10*4d495c6eSApple OSS Distributions #include "sched_test_harness/sched_edge_harness.h"
11*4d495c6eSApple OSS Distributions 
12*4d495c6eSApple OSS Distributions SCHED_POLICY_T_DECL(runq_shared_rsrc_bound,
13*4d495c6eSApple OSS Distributions     "Shared resource threads should be enqueued into bound root buckets")
14*4d495c6eSApple OSS Distributions {
15*4d495c6eSApple OSS Distributions 	int ret;
16*4d495c6eSApple OSS Distributions 	init_migration_harness(single_core);
17*4d495c6eSApple OSS Distributions 	struct thread_group *tg = create_tg(0);
18*4d495c6eSApple OSS Distributions 	/* Test both shared resource types */
19*4d495c6eSApple OSS Distributions 	for (int i = 0; i < CLUSTER_SHARED_RSRC_TYPE_COUNT; i++) {
20*4d495c6eSApple OSS Distributions 		thread_t thread = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
21*4d495c6eSApple OSS Distributions 		edge_set_thread_shared_rsrc(thread, i);
22*4d495c6eSApple OSS Distributions 		enqueue_thread(default_target, thread);
23*4d495c6eSApple OSS Distributions 		ret = dequeue_thread_expect(default_target, thread);
24*4d495c6eSApple OSS Distributions 		T_QUIET; T_EXPECT_TRUE(ret, "Single shared rsrc thread");
25*4d495c6eSApple OSS Distributions 		uint64_t bound_arg = SELECTION_WAS_CLUSTER_BOUND | SELECTION_WAS_EDF | CTS_VERSION;
26*4d495c6eSApple OSS Distributions 		ret = tracepoint_expect(CLUTCH_THREAD_SELECT, i, 0, TH_BUCKET_SHARE_DF, bound_arg);
27*4d495c6eSApple OSS Distributions 		T_EXPECT_TRUE(ret, "CLUTCH_THREAD_SELECT tracepoint confirms shared resource "
28*4d495c6eSApple OSS Distributions 		    "(%s) thread was enqueued as bound", i == 0 ? "native first" : "round robin");
29*4d495c6eSApple OSS Distributions 	}
30*4d495c6eSApple OSS Distributions 	SCHED_POLICY_PASS("Shared resource threads enqueued as bound");
31*4d495c6eSApple OSS Distributions }
32*4d495c6eSApple OSS Distributions 
33*4d495c6eSApple OSS Distributions SCHED_POLICY_T_DECL(runq_aboveui_bound_tiebreaks,
34*4d495c6eSApple OSS Distributions     "Tiebreaking Above UI vs. timeshare FG and bound vs. unbound root buckets")
35*4d495c6eSApple OSS Distributions {
36*4d495c6eSApple OSS Distributions 	int ret;
37*4d495c6eSApple OSS Distributions 	init_migration_harness(single_core);
38*4d495c6eSApple OSS Distributions 
39*4d495c6eSApple OSS Distributions 	/* Create a thread for each permutation (4 total), all at matching priority 63 */
40*4d495c6eSApple OSS Distributions 	struct thread_group *same_tg = create_tg(clutch_interactivity_score_max);
41*4d495c6eSApple OSS Distributions 	test_thread_t unbound_aboveui = create_thread(TH_BUCKET_FIXPRI, same_tg, root_bucket_to_highest_pri[TH_BUCKET_FIXPRI]);
42*4d495c6eSApple OSS Distributions 	set_thread_sched_mode(unbound_aboveui, TH_MODE_FIXED);
43*4d495c6eSApple OSS Distributions 	test_thread_t bound_aboveui = create_thread(TH_BUCKET_FIXPRI, same_tg, root_bucket_to_highest_pri[TH_BUCKET_FIXPRI]);
44*4d495c6eSApple OSS Distributions 	set_thread_sched_mode(bound_aboveui, TH_MODE_FIXED);
45*4d495c6eSApple OSS Distributions 	set_thread_cluster_bound(bound_aboveui, 0);
46*4d495c6eSApple OSS Distributions 	test_thread_t unbound_timeshare_fg = create_thread(TH_BUCKET_SHARE_FG, same_tg, root_bucket_to_highest_pri[TH_BUCKET_FIXPRI]);
47*4d495c6eSApple OSS Distributions 	test_thread_t bound_timeshare_fg = create_thread(TH_BUCKET_SHARE_FG, same_tg, root_bucket_to_highest_pri[TH_BUCKET_FIXPRI]);
48*4d495c6eSApple OSS Distributions 	set_thread_cluster_bound(bound_timeshare_fg, 0);
49*4d495c6eSApple OSS Distributions 
50*4d495c6eSApple OSS Distributions 	for (int i = 0; i < NUM_RAND_SEEDS; i++) {
51*4d495c6eSApple OSS Distributions 		enqueue_threads_rand_order(default_target, rand_seeds[i], 4, unbound_aboveui, bound_aboveui, unbound_timeshare_fg, bound_timeshare_fg);
52*4d495c6eSApple OSS Distributions 		ret = dequeue_threads_expect_ordered(default_target, 2, bound_aboveui, unbound_aboveui);
53*4d495c6eSApple OSS Distributions 		T_QUIET; T_EXPECT_EQ(ret, -1, "Aboveui buckets didn't come out first and correctly ordered in iteration %d", i);
54*4d495c6eSApple OSS Distributions 		/* Needed because bound/unbound root buckets alternate picks, as demonstrated below */
55*4d495c6eSApple OSS Distributions 		disable_auto_current_thread();
56*4d495c6eSApple OSS Distributions 		ret = dequeue_threads_expect_ordered(default_target, 2, bound_timeshare_fg, unbound_timeshare_fg);
57*4d495c6eSApple OSS Distributions 		T_QUIET; T_EXPECT_EQ(ret, -1, "Timeshare buckets didn't come out second and correctly ordered in iteration %d", i);
58*4d495c6eSApple OSS Distributions 		T_QUIET; T_ASSERT_EQ(runqueue_empty(default_target), true, "runqueue_empty");
59*4d495c6eSApple OSS Distributions 		reenable_auto_current_thread();
60*4d495c6eSApple OSS Distributions 	}
61*4d495c6eSApple OSS Distributions 	SCHED_POLICY_PASS("Correct tiebreaking for aboveui vs. foreground and unbound vs. bound root buckets");
62*4d495c6eSApple OSS Distributions }
63*4d495c6eSApple OSS Distributions 
64*4d495c6eSApple OSS Distributions SCHED_POLICY_T_DECL(runq_cluster_bound,
65*4d495c6eSApple OSS Distributions     "Cluster-bound threads vs. regular threads")
66*4d495c6eSApple OSS Distributions {
67*4d495c6eSApple OSS Distributions 	int ret;
68*4d495c6eSApple OSS Distributions 	init_migration_harness(basic_amp);
69*4d495c6eSApple OSS Distributions 	struct thread_group *tg = create_tg(0);
70*4d495c6eSApple OSS Distributions 	int num_threads = 4;
71*4d495c6eSApple OSS Distributions 	test_thread_t threads[num_threads];
72*4d495c6eSApple OSS Distributions 	for (int i = 0; i < NUM_RAND_SEEDS; i++) {
73*4d495c6eSApple OSS Distributions 		/* High root bucket unbound */
74*4d495c6eSApple OSS Distributions 		threads[0] = create_thread(TH_BUCKET_SHARE_IN, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_IN]);
75*4d495c6eSApple OSS Distributions 		/* Middle root bucket bound */
76*4d495c6eSApple OSS Distributions 		threads[1] = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
77*4d495c6eSApple OSS Distributions 		set_thread_cluster_bound(threads[1], 0);
78*4d495c6eSApple OSS Distributions 		/* Low root bucket unbound */
79*4d495c6eSApple OSS Distributions 		threads[2] = create_thread(TH_BUCKET_SHARE_UT, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_UT]);
80*4d495c6eSApple OSS Distributions 		/* Lowest root bucket bound */
81*4d495c6eSApple OSS Distributions 		threads[3] = create_thread(TH_BUCKET_SHARE_BG, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_BG]);
82*4d495c6eSApple OSS Distributions 		set_thread_cluster_bound(threads[3], 0);
83*4d495c6eSApple OSS Distributions 		enqueue_threads_arr_rand_order(default_target, rand_seeds[i], num_threads, threads);
84*4d495c6eSApple OSS Distributions 		/* Bound comes out first due to bound/unbound root bucket tie break in favor of bound */
85*4d495c6eSApple OSS Distributions 		ret = dequeue_threads_expect_ordered_arr(default_target, num_threads, threads);
86*4d495c6eSApple OSS Distributions 		T_QUIET; T_EXPECT_EQ(ret, -1, "Threads dequeued without respect to QoS");
87*4d495c6eSApple OSS Distributions 		T_QUIET; T_EXPECT_TRUE(runqueue_empty(default_target), "runqueue_empty");
88*4d495c6eSApple OSS Distributions 	}
89*4d495c6eSApple OSS Distributions 	SCHED_POLICY_PASS("Cluster bound respects QoS level");
90*4d495c6eSApple OSS Distributions 
91*4d495c6eSApple OSS Distributions 	int num_tie_break_threads = 10;
92*4d495c6eSApple OSS Distributions 	test_thread_t tie_break_threads[num_tie_break_threads];
93*4d495c6eSApple OSS Distributions 	for (int k = 0; k < num_tie_break_threads / 2; k++) {
94*4d495c6eSApple OSS Distributions 		tie_break_threads[k * 2] = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
95*4d495c6eSApple OSS Distributions 		set_thread_cluster_bound(tie_break_threads[k * 2], 0);
96*4d495c6eSApple OSS Distributions 		increment_mock_time_us(5);
97*4d495c6eSApple OSS Distributions 		enqueue_thread(default_target, tie_break_threads[k * 2]);
98*4d495c6eSApple OSS Distributions 	}
99*4d495c6eSApple OSS Distributions 	for (int k = 0; k < num_tie_break_threads / 2; k++) {
100*4d495c6eSApple OSS Distributions 		tie_break_threads[k * 2 + 1] = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
101*4d495c6eSApple OSS Distributions 		increment_mock_time_us(5);
102*4d495c6eSApple OSS Distributions 		enqueue_thread(default_target, tie_break_threads[k * 2 + 1]);
103*4d495c6eSApple OSS Distributions 	}
104*4d495c6eSApple OSS Distributions 	/* Disable current thread check because bound and unbound alternate without time passing */
105*4d495c6eSApple OSS Distributions 	disable_auto_current_thread();
106*4d495c6eSApple OSS Distributions 	for (int k = 0; k < num_tie_break_threads; k++) {
107*4d495c6eSApple OSS Distributions 		/* Simulates repeatedly dequeing threads over time */
108*4d495c6eSApple OSS Distributions 		increment_mock_time_us(5);
109*4d495c6eSApple OSS Distributions 		ret = dequeue_thread_expect(default_target, tie_break_threads[k]);
110*4d495c6eSApple OSS Distributions 		T_QUIET; T_EXPECT_TRUE(ret, "Out-of-order thread\n");
111*4d495c6eSApple OSS Distributions 	}
112*4d495c6eSApple OSS Distributions 	T_QUIET; T_EXPECT_TRUE(runqueue_empty(default_target), "runqueue_empty");
113*4d495c6eSApple OSS Distributions 	SCHED_POLICY_PASS("Unbound vs. bound tie-break");
114*4d495c6eSApple OSS Distributions 
115*4d495c6eSApple OSS Distributions 	struct thread_group *low_iscore_tg = create_tg(0);
116*4d495c6eSApple OSS Distributions 	test_thread_t low_iscore_bound = create_thread(TH_BUCKET_SHARE_DF, low_iscore_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
117*4d495c6eSApple OSS Distributions 	struct thread_group *high_iscore_tg = create_tg(clutch_interactivity_score_max);
118*4d495c6eSApple OSS Distributions 	test_thread_t high_iscore_bound = create_thread(TH_BUCKET_SHARE_DF, high_iscore_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
119*4d495c6eSApple OSS Distributions 	set_thread_cluster_bound(low_iscore_bound, 0);
120*4d495c6eSApple OSS Distributions 	set_thread_cluster_bound(high_iscore_bound, 0);
121*4d495c6eSApple OSS Distributions 	enqueue_threads(default_target, 2, low_iscore_bound, high_iscore_bound);
122*4d495c6eSApple OSS Distributions 	ret = dequeue_threads_expect_ordered(default_target, 2, low_iscore_bound, high_iscore_bound);
123*4d495c6eSApple OSS Distributions 	T_QUIET; T_EXPECT_EQ(ret, -1, "Threads dequeued in non-FIFO order");
124*4d495c6eSApple OSS Distributions 	T_QUIET; T_EXPECT_TRUE(runqueue_empty(default_target), "runqueue_empty");
125*4d495c6eSApple OSS Distributions 	SCHED_POLICY_PASS("Cluster bound threads don't use interactivity score");
126*4d495c6eSApple OSS Distributions }
127