xref: /xnu-11417.101.15/tests/sched/edge_runqueue.c (revision e3723e1f17661b24996789d8afc084c0c3303b26)
1*e3723e1fSApple OSS Distributions // Copyright (c) 2024 Apple Inc.  All rights reserved.
2*e3723e1fSApple OSS Distributions 
3*e3723e1fSApple OSS Distributions /*
4*e3723e1fSApple OSS Distributions  * Since the Edge scheduler depends on the Clutch scheduler as most of its
5*e3723e1fSApple OSS Distributions  * timesharing policy, the Edge scheduler should also pass all of the Clutch
6*e3723e1fSApple OSS Distributions  * unit tests.
7*e3723e1fSApple OSS Distributions  */
8*e3723e1fSApple OSS Distributions #include "clutch_runqueue.c"
9*e3723e1fSApple OSS Distributions 
10*e3723e1fSApple OSS Distributions #include "sched_test_harness/sched_edge_harness.h"
11*e3723e1fSApple OSS Distributions 
12*e3723e1fSApple OSS Distributions SCHED_POLICY_T_DECL(runq_shared_rsrc_bound,
13*e3723e1fSApple OSS Distributions     "Shared resource threads should be enqueued into bound root buckets")
14*e3723e1fSApple OSS Distributions {
15*e3723e1fSApple OSS Distributions 	int ret;
16*e3723e1fSApple OSS Distributions 	init_migration_harness(single_core);
17*e3723e1fSApple OSS Distributions 	struct thread_group *tg = create_tg(0);
18*e3723e1fSApple OSS Distributions 	/* Test both shared resource types */
19*e3723e1fSApple OSS Distributions 	for (int i = 0; i < 2; i++) {
20*e3723e1fSApple OSS Distributions 		thread_t thread = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
21*e3723e1fSApple OSS Distributions 		edge_set_thread_shared_rsrc(thread, i);
22*e3723e1fSApple OSS Distributions 		enqueue_thread(default_target, thread);
23*e3723e1fSApple OSS Distributions 		ret = dequeue_thread_expect(default_target, thread);
24*e3723e1fSApple OSS Distributions 		T_QUIET; T_EXPECT_TRUE(ret, "Single shared rsrc thread");
25*e3723e1fSApple OSS Distributions 		uint64_t bound_arg = SELECTION_WAS_CLUSTER_BOUND | SELECTION_WAS_EDF | CTS_VERSION;
26*e3723e1fSApple OSS Distributions 		ret = tracepoint_expect(CLUTCH_THREAD_SELECT, i, 0, TH_BUCKET_SHARE_DF, bound_arg);
27*e3723e1fSApple OSS Distributions 		T_EXPECT_TRUE(ret, "CLUTCH_THREAD_SELECT tracepoint confirms shared resource "
28*e3723e1fSApple OSS Distributions 		    "(%s) thread was enqueued as bound", i == 0 ? "native first" : "round robin");
29*e3723e1fSApple OSS Distributions 	}
30*e3723e1fSApple OSS Distributions 	SCHED_POLICY_PASS("Shared resource threads enqueued as bound");
31*e3723e1fSApple OSS Distributions }
32*e3723e1fSApple OSS Distributions 
33*e3723e1fSApple OSS Distributions SCHED_POLICY_T_DECL(runq_aboveui_bound_tiebreaks,
34*e3723e1fSApple OSS Distributions     "Tiebreaking Above UI vs. timeshare FG and bound vs. unbound root buckets")
35*e3723e1fSApple OSS Distributions {
36*e3723e1fSApple OSS Distributions 	int ret;
37*e3723e1fSApple OSS Distributions 	init_migration_harness(single_core);
38*e3723e1fSApple OSS Distributions 
39*e3723e1fSApple OSS Distributions 	/* Create a thread for each permutation (4 total), all at matching priority 63 */
40*e3723e1fSApple OSS Distributions 	struct thread_group *same_tg = create_tg(clutch_interactivity_score_max);
41*e3723e1fSApple OSS Distributions 	test_thread_t unbound_aboveui = create_thread(TH_BUCKET_FIXPRI, same_tg, root_bucket_to_highest_pri[TH_BUCKET_FIXPRI]);
42*e3723e1fSApple OSS Distributions 	set_thread_sched_mode(unbound_aboveui, TH_MODE_FIXED);
43*e3723e1fSApple OSS Distributions 	test_thread_t bound_aboveui = create_thread(TH_BUCKET_FIXPRI, same_tg, root_bucket_to_highest_pri[TH_BUCKET_FIXPRI]);
44*e3723e1fSApple OSS Distributions 	set_thread_sched_mode(bound_aboveui, TH_MODE_FIXED);
45*e3723e1fSApple OSS Distributions 	set_thread_cluster_bound(bound_aboveui, 0);
46*e3723e1fSApple OSS Distributions 	test_thread_t unbound_timeshare_fg = create_thread(TH_BUCKET_SHARE_FG, same_tg, root_bucket_to_highest_pri[TH_BUCKET_FIXPRI]);
47*e3723e1fSApple OSS Distributions 	test_thread_t bound_timeshare_fg = create_thread(TH_BUCKET_SHARE_FG, same_tg, root_bucket_to_highest_pri[TH_BUCKET_FIXPRI]);
48*e3723e1fSApple OSS Distributions 	set_thread_cluster_bound(bound_timeshare_fg, 0);
49*e3723e1fSApple OSS Distributions 
50*e3723e1fSApple OSS Distributions 	for (int i = 0; i < NUM_RAND_SEEDS; i++) {
51*e3723e1fSApple OSS Distributions 		enqueue_threads_rand_order(default_target, rand_seeds[i], 4, unbound_aboveui, bound_aboveui, unbound_timeshare_fg, bound_timeshare_fg);
52*e3723e1fSApple OSS Distributions 		ret = dequeue_threads_expect_ordered(default_target, 2, bound_aboveui, unbound_aboveui);
53*e3723e1fSApple OSS Distributions 		T_QUIET; T_EXPECT_EQ(ret, -1, "Aboveui buckets didn't come out first and correctly ordered in iteration %d", i);
54*e3723e1fSApple OSS Distributions 		/* Needed because bound/unbound root buckets alternate picks, as demonstrated below */
55*e3723e1fSApple OSS Distributions 		disable_auto_current_thread();
56*e3723e1fSApple OSS Distributions 		ret = dequeue_threads_expect_ordered(default_target, 2, bound_timeshare_fg, unbound_timeshare_fg);
57*e3723e1fSApple OSS Distributions 		T_QUIET; T_EXPECT_EQ(ret, -1, "Timeshare buckets didn't come out second and correctly ordered in iteration %d", i);
58*e3723e1fSApple OSS Distributions 		T_QUIET; T_ASSERT_EQ(runqueue_empty(default_target), true, "runqueue_empty");
59*e3723e1fSApple OSS Distributions 		reenable_auto_current_thread();
60*e3723e1fSApple OSS Distributions 	}
61*e3723e1fSApple OSS Distributions 	SCHED_POLICY_PASS("Correct tiebreaking for aboveui vs. foreground and unbound vs. bound root buckets");
62*e3723e1fSApple OSS Distributions }
63*e3723e1fSApple OSS Distributions 
64*e3723e1fSApple OSS Distributions SCHED_POLICY_T_DECL(runq_cluster_bound,
65*e3723e1fSApple OSS Distributions     "Cluster-bound threads vs. regular threads")
66*e3723e1fSApple OSS Distributions {
67*e3723e1fSApple OSS Distributions 	int ret;
68*e3723e1fSApple OSS Distributions 	init_migration_harness(basic_amp);
69*e3723e1fSApple OSS Distributions 	struct thread_group *tg = create_tg(0);
70*e3723e1fSApple OSS Distributions 	int num_threads = 4;
71*e3723e1fSApple OSS Distributions 	test_thread_t threads[num_threads];
72*e3723e1fSApple OSS Distributions 	for (int i = 0; i < NUM_RAND_SEEDS; i++) {
73*e3723e1fSApple OSS Distributions 		/* High root bucket unbound */
74*e3723e1fSApple OSS Distributions 		threads[0] = create_thread(TH_BUCKET_SHARE_IN, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_IN]);
75*e3723e1fSApple OSS Distributions 		/* Middle root bucket bound */
76*e3723e1fSApple OSS Distributions 		threads[1] = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
77*e3723e1fSApple OSS Distributions 		set_thread_cluster_bound(threads[1], 0);
78*e3723e1fSApple OSS Distributions 		/* Low root bucket unbound */
79*e3723e1fSApple OSS Distributions 		threads[2] = create_thread(TH_BUCKET_SHARE_UT, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_UT]);
80*e3723e1fSApple OSS Distributions 		/* Lowest root bucket bound */
81*e3723e1fSApple OSS Distributions 		threads[3] = create_thread(TH_BUCKET_SHARE_BG, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_BG]);
82*e3723e1fSApple OSS Distributions 		set_thread_cluster_bound(threads[3], 0);
83*e3723e1fSApple OSS Distributions 		enqueue_threads_arr_rand_order(default_target, rand_seeds[i], num_threads, threads);
84*e3723e1fSApple OSS Distributions 		/* Bound comes out first due to bound/unbound root bucket tie break in favor of bound */
85*e3723e1fSApple OSS Distributions 		ret = dequeue_threads_expect_ordered_arr(default_target, num_threads, threads);
86*e3723e1fSApple OSS Distributions 		T_QUIET; T_EXPECT_EQ(ret, -1, "Threads dequeued without respect to QoS");
87*e3723e1fSApple OSS Distributions 		T_QUIET; T_EXPECT_TRUE(runqueue_empty(default_target), "runqueue_empty");
88*e3723e1fSApple OSS Distributions 	}
89*e3723e1fSApple OSS Distributions 	SCHED_POLICY_PASS("Cluster bound respects QoS level");
90*e3723e1fSApple OSS Distributions 
91*e3723e1fSApple OSS Distributions 	int num_tie_break_threads = 10;
92*e3723e1fSApple OSS Distributions 	test_thread_t tie_break_threads[num_tie_break_threads];
93*e3723e1fSApple OSS Distributions 	for (int k = 0; k < num_tie_break_threads / 2; k++) {
94*e3723e1fSApple OSS Distributions 		tie_break_threads[k * 2] = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
95*e3723e1fSApple OSS Distributions 		set_thread_cluster_bound(tie_break_threads[k * 2], 0);
96*e3723e1fSApple OSS Distributions 		increment_mock_time_us(5);
97*e3723e1fSApple OSS Distributions 		enqueue_thread(default_target, tie_break_threads[k * 2]);
98*e3723e1fSApple OSS Distributions 	}
99*e3723e1fSApple OSS Distributions 	for (int k = 0; k < num_tie_break_threads / 2; k++) {
100*e3723e1fSApple OSS Distributions 		tie_break_threads[k * 2 + 1] = create_thread(TH_BUCKET_SHARE_DF, tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
101*e3723e1fSApple OSS Distributions 		increment_mock_time_us(5);
102*e3723e1fSApple OSS Distributions 		enqueue_thread(default_target, tie_break_threads[k * 2 + 1]);
103*e3723e1fSApple OSS Distributions 	}
104*e3723e1fSApple OSS Distributions 	/* Disable current thread check because bound and unbound alternate without time passing */
105*e3723e1fSApple OSS Distributions 	disable_auto_current_thread();
106*e3723e1fSApple OSS Distributions 	for (int k = 0; k < num_tie_break_threads; k++) {
107*e3723e1fSApple OSS Distributions 		/* Simulates repeatedly dequeing threads over time */
108*e3723e1fSApple OSS Distributions 		increment_mock_time_us(5);
109*e3723e1fSApple OSS Distributions 		ret = dequeue_thread_expect(default_target, tie_break_threads[k]);
110*e3723e1fSApple OSS Distributions 		T_QUIET; T_EXPECT_TRUE(ret, "Out-of-order thread\n");
111*e3723e1fSApple OSS Distributions 	}
112*e3723e1fSApple OSS Distributions 	T_QUIET; T_EXPECT_TRUE(runqueue_empty(default_target), "runqueue_empty");
113*e3723e1fSApple OSS Distributions 	SCHED_POLICY_PASS("Unbound vs. bound tie-break");
114*e3723e1fSApple OSS Distributions 
115*e3723e1fSApple OSS Distributions 	struct thread_group *low_iscore_tg = create_tg(0);
116*e3723e1fSApple OSS Distributions 	test_thread_t low_iscore_bound = create_thread(TH_BUCKET_SHARE_DF, low_iscore_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
117*e3723e1fSApple OSS Distributions 	struct thread_group *high_iscore_tg = create_tg(clutch_interactivity_score_max);
118*e3723e1fSApple OSS Distributions 	test_thread_t high_iscore_bound = create_thread(TH_BUCKET_SHARE_DF, high_iscore_tg, root_bucket_to_highest_pri[TH_BUCKET_SHARE_DF]);
119*e3723e1fSApple OSS Distributions 	set_thread_cluster_bound(low_iscore_bound, 0);
120*e3723e1fSApple OSS Distributions 	set_thread_cluster_bound(high_iscore_bound, 0);
121*e3723e1fSApple OSS Distributions 	enqueue_threads(default_target, 2, low_iscore_bound, high_iscore_bound);
122*e3723e1fSApple OSS Distributions 	ret = dequeue_threads_expect_ordered(default_target, 2, low_iscore_bound, high_iscore_bound);
123*e3723e1fSApple OSS Distributions 	T_QUIET; T_EXPECT_EQ(ret, -1, "Threads dequeued in non-FIFO order");
124*e3723e1fSApple OSS Distributions 	T_QUIET; T_EXPECT_TRUE(runqueue_empty(default_target), "runqueue_empty");
125*e3723e1fSApple OSS Distributions 	SCHED_POLICY_PASS("Cluster bound threads don't use interactivity score");
126*e3723e1fSApple OSS Distributions }
127