1 #include <unistd.h>
2 #include <stdlib.h>
3 #include <pthread.h>
4 #include <spawn.h>
5 #include <mach/mach.h>
6 #include <mach/mach_time.h>
7 #include <TargetConditionals.h>
8 #include <sys/work_interval.h>
9 #include <sys/stat.h>
10 #include <sys/sysctl.h>
11 #include <os/atomic_private.h>
12
13 #include <darwintest.h>
14 #include <darwintest_utils.h>
15 #include <perfdata/perfdata.h>
16 #include "test_utils.h"
17 #include "sched_test_utils.h"
18
19 #include "thread_group_fairness_workload_config.h"
20
21 T_GLOBAL_META(T_META_NAMESPACE("xnu.scheduler"),
22 T_META_RADAR_COMPONENT_NAME("xnu"),
23 T_META_RADAR_COMPONENT_VERSION("scheduler"),
24 T_META_TAG_PERF,
25 T_META_TAG_VM_NOT_ELIGIBLE);
26
27 static const size_t MAX_PDJ_PATH_LEN = 256;
28 static unsigned int num_cores;
29
30 static void
workload_config_load(void)31 workload_config_load(void)
32 {
33 int ret;
34 size_t len = 0;
35 ret = sysctlbyname("kern.workload_config", NULL, &len,
36 sched_thread_group_fairness_workload_config_plist,
37 sched_thread_group_fairness_workload_config_plist_len);
38 if (ret == -1 && errno == ENOENT) {
39 T_SKIP("kern.workload_config failed");
40 }
41 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kern.workload_config");
42 }
43
44 static void
workload_config_cleanup(void)45 workload_config_cleanup(void)
46 {
47 size_t len = 0;
48 sysctlbyname("kern.workload_config", NULL, &len, "", 1);
49 }
50
51 static void
environment_init(void)52 environment_init(void)
53 {
54 num_cores = (unsigned int) dt_ncpu();
55
56 if (platform_is_amp()) {
57 /*
58 * Derecommend all clusters except the E cores, to ensure that thread groups
59 * compete over the same cores irrespective of CLPC's cluster recommendations
60 */
61 char *clpcctrl_args[] = {"-C", "e", NULL};
62 execute_clpcctrl(clpcctrl_args, false);
63 }
64
65 /*
66 * Load a test workload plist containing a Workload ID with
67 * WorkloadClass == DISCRETIONARY, in order to mark the thread group
68 * for that workload as THREAD_GROUP_FLAGS_EFFICIENT
69 */
70 T_ATEND(workload_config_cleanup);
71 workload_config_load();
72 }
73
74 static void
set_work_interval_id(work_interval_t * handle,uint32_t work_interval_flags)75 set_work_interval_id(work_interval_t *handle, uint32_t work_interval_flags)
76 {
77 int ret;
78 mach_port_t port = MACH_PORT_NULL;
79
80 ret = work_interval_copy_port(*handle, &port);
81 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "work_interval_copy_port");
82
83 struct work_interval_workload_id_params wlid_params = {
84 .wlidp_flags = WORK_INTERVAL_WORKLOAD_ID_HAS_ID,
85 .wlidp_wicreate_flags = work_interval_flags,
86 .wlidp_name = (uintptr_t)"com.test.myapp.discretionary",
87 };
88
89 ret = __work_interval_ctl(WORK_INTERVAL_OPERATION_SET_WORKLOAD_ID, port, &wlid_params, sizeof(wlid_params));
90 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "WORK_INTERVAL_OPERATION_SET_WORKLOAD_ID");
91 }
92
93 static uint32_t
make_work_interval(work_interval_t * handle,uint32_t work_type_flags)94 make_work_interval(work_interval_t *handle, uint32_t work_type_flags)
95 {
96 int ret;
97 uint32_t work_interval_flags = WORK_INTERVAL_FLAG_JOINABLE | WORK_INTERVAL_FLAG_GROUP | work_type_flags;
98 ret = work_interval_create(handle, work_interval_flags);
99 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "work_interval_create");
100
101 if (work_type_flags & WORK_INTERVAL_FLAG_HAS_WORKLOAD_ID) {
102 set_work_interval_id(handle, work_interval_flags);
103 }
104 return work_interval_flags;
105 }
106
107 struct thread_data {
108 work_interval_t *handle;
109 uint32_t work_interval_flags;
110 };
111
112 static void *
spin_thread_fn(void * arg)113 spin_thread_fn(void *arg)
114 {
115 struct thread_data *info = (struct thread_data *)arg;
116 int ret;
117
118 /* Join the thread group associated with the work interval handle */
119 ret = work_interval_join(*(info->handle));
120 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "work_interval_join");
121
122 /* Spin indefinitely */
123 volatile uint64_t spin_count = 0;
124 while (mach_absolute_time() < UINT64_MAX) {
125 spin_count++;
126 }
127 return NULL;
128 }
129
130 static void
start_threads(pthread_t * threads,struct thread_data * thread_datas,work_interval_t * handle,uint32_t work_interval_flags)131 start_threads(pthread_t *threads, struct thread_data *thread_datas, work_interval_t *handle, uint32_t work_interval_flags)
132 {
133 int ret;
134 for (unsigned int i = 0; i < num_cores; i++) {
135 thread_datas[i].handle = handle;
136 thread_datas[i].work_interval_flags = work_interval_flags;
137 ret = pthread_create(&threads[i], NULL, spin_thread_fn, &thread_datas[i]);
138 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "pthread_create");
139 }
140 }
141
142 static uint64_t
snapshot_user_time_usec(pthread_t * threads)143 snapshot_user_time_usec(pthread_t *threads)
144 {
145 kern_return_t kr;
146 uint64_t cumulative_user_time_usec = 0;
147 mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
148 for (unsigned int i = 0; i < num_cores; i++) {
149 mach_port_t thread_port = pthread_mach_thread_np(threads[i]);
150 thread_basic_info_data_t info;
151 kr = thread_info(thread_port, THREAD_BASIC_INFO, (thread_info_t)&info, &count);
152 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
153 uint64_t thread_usr_usec = (uint64_t) (info.user_time.seconds) * USEC_PER_SEC + (uint64_t) info.user_time.microseconds;
154 cumulative_user_time_usec += thread_usr_usec;
155 }
156 return cumulative_user_time_usec;
157 }
158
159 T_DECL(thread_group_fairness,
160 "Ensure that thread groups tagged as higher priority do not starve out "
161 "thread groups tagged as lower priority when both behave as CPU spinners",
162 T_META_ASROOT(YES))
163 {
164 T_SETUPBEGIN;
165
166 wait_for_quiescence_default(argc, argv);
167 environment_init();
168
169 /*
170 * Create two work intervals with corresponding thread groups that would
171 * be associated with differing priorities.
172 */
173 work_interval_t lower_pri_handle, higher_pri_handle;
174 uint32_t lower_pri_flags = make_work_interval(&lower_pri_handle, WORK_INTERVAL_TYPE_DEFAULT | WORK_INTERVAL_FLAG_HAS_WORKLOAD_ID);
175 uint32_t higher_pri_flags = make_work_interval(&higher_pri_handle, WORK_INTERVAL_TYPE_DEFAULT);
176
177 /* Start threads to join the lower priority thread group */
178 pthread_t lower_threads[num_cores];
179 struct thread_data lower_thread_datas[num_cores];
180 start_threads(lower_threads, lower_thread_datas, &lower_pri_handle, lower_pri_flags);
181
182 /* Start threads to join the higher priority thread group */
183 pthread_t higher_threads[num_cores];
184 struct thread_data higher_thread_datas[num_cores];
185 start_threads(higher_threads, higher_thread_datas, &higher_pri_handle, higher_pri_flags);
186
187 T_SETUPEND;
188
189 /* Snapshot thread runtimes */
190 uint64_t start_lower_priority_runtime_usec = snapshot_user_time_usec(lower_threads);
191 uint64_t start_higher_priority_runtime_usec = snapshot_user_time_usec(higher_threads);
192
193 /* Allow thread groups time to compete */
194 sleep(3);
195
196 /*
197 * Snapshot runtimes again and compare the usage ratio between the lower and
198 * higher priority thread groups, to determine whether the lower priority group
199 * has been starved
200 */
201 uint64_t finish_lower_priority_runtime_usec = snapshot_user_time_usec(lower_threads);
202 uint64_t finish_higher_priority_runtime_usec = snapshot_user_time_usec(higher_threads);
203
204 uint64_t lower_priority_runtime = finish_lower_priority_runtime_usec - start_lower_priority_runtime_usec;
205 uint64_t higher_priority_runtime = finish_higher_priority_runtime_usec - start_higher_priority_runtime_usec;
206
207 T_QUIET; T_ASSERT_GT(lower_priority_runtime, 10000LL, "lower priority thread group got at least 10ms of CPU time");
208 T_QUIET; T_ASSERT_GT(higher_priority_runtime, 10000LL, "higher priority thread group got at least 10ms of CPU time");
209
210 /* Record the observed runtime ratio */
211 char pdj_path[MAX_PDJ_PATH_LEN];
212 pdwriter_t writer = pdwriter_open_tmp("xnu", "scheduler.thread_group_fairness", 0, 0, pdj_path, MAX_PDJ_PATH_LEN);
213 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(writer, "pdwriter_open_tmp");
214
215 double runtime_ratio_value;
216 double total_runtime = (double)(lower_priority_runtime + higher_priority_runtime);
217 if (lower_priority_runtime <= higher_priority_runtime) {
218 runtime_ratio_value = (double)(lower_priority_runtime) / total_runtime;
219 } else {
220 runtime_ratio_value = (double)(higher_priority_runtime) / total_runtime;
221 }
222 T_LOG("Observed timeshare ratio: %f", runtime_ratio_value);
223
224 pdwriter_new_value(writer, "Thread Group Runtime Ratio", PDUNIT_CUSTOM(runtime_ratio), runtime_ratio_value);
225 pdwriter_record_larger_better(writer);
226 pdwriter_close(writer);
227 /* Ensure that the perfdata file can be copied by BATS */
228 T_QUIET; T_ASSERT_POSIX_ZERO(chmod(pdj_path, 0644), "chmod");
229
230 T_END;
231 }
232
233 static uint64_t
get_thread_group_cpu_time(int sched_bucket)234 get_thread_group_cpu_time(int sched_bucket)
235 {
236 int ret;
237 uint64_t cpu_stats[2];
238 size_t cpu_stats_len = sizeof(uint64_t) * 2;
239 ret = sysctlbyname("kern.clutch_bucket_group_cpu_stats", cpu_stats, &cpu_stats_len,
240 &sched_bucket, sizeof(sched_bucket));
241 if (ret != 0 && errno == ENOTSUP) {
242 T_LOG("Test only supported on Clutch/Edge scheduler (current policy is \"%s\") "
243 "platforms on development/debug build variants", platform_sched_policy());
244 T_SKIP("kern.clutch_bucket_group_cpu_stats development-only sysctl not present");
245 }
246 T_QUIET; T_WITH_ERRNO; T_ASSERT_POSIX_SUCCESS(ret, "kern.clutch_bucket_group_cpu_stats");
247 return cpu_stats[0];
248 }
249
250 static volatile uint64_t mach_deadline = 0;
251 static const int seconds = 2;
252 static _Atomic volatile uint64_t count = 0;
253 static const int iters_per_lock_hold = 100000;
254 static const int low_qos = QOS_CLASS_USER_INITIATED;
255 static const int low_sched_bucket = 2; // TH_BUCKET_SHARE_IN
256 static const int high_qos = QOS_CLASS_USER_INTERACTIVE;
257 static const int high_sched_bucket = 1; // TH_BUCKET_SHARE_FG
258 static _Atomic volatile bool recorder_picked = false;
259
260 static void *
boost_while_working(void * arg)261 boost_while_working(void *arg)
262 {
263 int ret;
264 work_interval_t wi = (work_interval_t)arg;
265 ret = work_interval_join(wi);
266 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "work_interval_join");
267
268 bool is_recorder = os_atomic_cmpxchg(&recorder_picked, false, true, relaxed);
269 uint64_t cpu_time_begin_low = 0;
270 uint64_t cpu_time_begin_high = 0;
271 if (is_recorder) {
272 cpu_time_begin_low = get_thread_group_cpu_time(low_sched_bucket);
273 cpu_time_begin_high = get_thread_group_cpu_time(high_sched_bucket);
274 }
275
276 while (mach_absolute_time() < mach_deadline) {
277 /* Assume high priority */
278 ret = pthread_set_qos_class_self_np(high_qos, 0);
279 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "pthread_set_qos_class_self_np UI");
280 T_QUIET; T_ASSERT_EQ(qos_class_self(), high_qos, "qos_class_self");
281 /* Complete a "work item" */
282 for (volatile int i = 0; i < iters_per_lock_hold; i++) {
283 os_atomic_inc(&count, relaxed);
284 }
285 /* Drop priority down before parking to sleep */
286 ret = pthread_set_qos_class_self_np(low_qos, 0);
287 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "pthread_set_qos_class_self_np IN");
288 T_QUIET; T_ASSERT_EQ(qos_class_self(), low_qos, "qos_class_self");
289 usleep(2 * 1000); // 2ms
290 }
291
292 if (is_recorder) {
293 uint64_t cpu_time_end_low = get_thread_group_cpu_time(low_sched_bucket);
294 uint64_t cpu_time_end_high = get_thread_group_cpu_time(high_sched_bucket);
295
296 T_QUIET; T_ASSERT_GE(cpu_time_end_high, cpu_time_begin_high,
297 "non-monotonic thread group CPU time");
298 uint64_t high_cpu_time = cpu_time_end_high - cpu_time_begin_high;
299 T_QUIET; T_ASSERT_GE(cpu_time_end_low, cpu_time_begin_low,
300 "non-monotonic thread group CPU time");
301 uint64_t low_cpu_time = cpu_time_end_low - cpu_time_begin_low;
302
303 T_QUIET; T_ASSERT_GT(high_cpu_time + low_cpu_time, 0ULL,
304 "CPU not attributed to either expected bucket");
305 T_LOG("High ticks: %llu, Low ticks: %llu, High-to-low ratio: %.3f",
306 high_cpu_time, low_cpu_time, high_cpu_time * 1.0 / (high_cpu_time + low_cpu_time));
307 T_EXPECT_GE(high_cpu_time, low_cpu_time, "More work accounted to the high QoS");
308 T_EXPECT_LE(low_cpu_time * 1.0, high_cpu_time * 0.2,
309 "Vast majority of work accounted to the high QoS");
310 }
311 return NULL;
312 }
313
314 /*
315 * Note, preemption due to non-test threads poses a special problem for
316 * this test because time the test threads spend preempted at their low
317 * QoS, in between processing work items, translates to "blocked" time
318 * for the thread group at its high QoS. This leads to CPU usage aging
319 * out more quickly for the high QoS, causing the test to fail.
320 *
321 * Additionally, the test must be run like an application in the QoS
322 * engine, without a QoS ceiling which would prevent the test threads
323 * from performing adequately high QoS boosts. For example:
324 * sudo taskpolicy -a ./thread_group_fairness -n interactivity_cpu_accounting
325 */
326 T_DECL(interactivity_cpu_accounting,
327 "Ensure that CPU runtime tracked for calculating interactivity score "
328 "gets attributed to the right QoS that performed the work, even if we "
329 "switch QoS while on-core (rdar://125045167)",
330 T_META_ENABLED(TARGET_CPU_ARM64 && !TARGET_OS_BRIDGE),
331 #if TARGET_OS_WATCH
332 T_META_MAYFAIL("Watches too noisy with high priority spinners (rdar://150323037)"),
333 #elif TARGET_OS_TV
334 T_META_MAYFAIL("TVs too noisy with high priority audio (rdar://149974201)"),
335 #endif
336 T_META_ASROOT(YES))
337 {
338 /* Skips the test if needed sysctl isn't present */
339 get_thread_group_cpu_time(0);
340
341 /* Ensure we don't have a QoS ceiling that would prevent high enough boosts */
342 struct task_policy_state policy_state;
343 mach_msg_type_number_t count = TASK_POLICY_STATE_COUNT;
344 boolean_t get_default = FALSE;
345 kern_return_t kr = task_policy_get(mach_task_self(), TASK_POLICY_STATE,
346 (task_policy_t)&policy_state, &count, &get_default);
347 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "task_policy_get(self, TASK_POLICY_STATE)");
348 int requested_app_type = (policy_state.requested & POLICY_REQ_APPTYPE_MASK) >> POLICY_REQ_APPTYPE_SHIFT;
349 T_QUIET; T_ASSERT_EQ(requested_app_type, TASK_APPTYPE_APP_DEFAULT,
350 "Test needs to be run like an application for QoS boosting above pri 37 to succeed");
351
352 wait_for_quiescence(argc, argv, 0.9, 10);
353
354 trace_handle_t trace = begin_collect_trace(argc, argv, T_NAME);
355 T_SETUPEND;
356
357 if (platform_is_amp()) {
358 /*
359 * Isolate-out the effects of cluster recommendation, since that
360 * causes threads to be preempted sometimes for rebalancing purposes.
361 */
362 char *clpcctrl_args[] = {"-C", "p", NULL};
363 execute_clpcctrl(clpcctrl_args, false);
364 }
365
366 mach_deadline = mach_absolute_time() + nanos_to_abs(seconds * NSEC_PER_SEC);
367
368 /*
369 * Create threads in their own TG that will run work at "boosted"
370 * priority and after a work item is complete, lower their
371 * priority back down to a low QoS before "parking" via usleep().
372 *
373 * We expect that the interactivity score for the high QoS for this
374 * TG will be the one to lower, rather than the low QoS which the
375 * threads are switching down to before context-switching off-core.
376 */
377 int num_boosters = MIN(4, dt_ncpu());
378 work_interval_t wi_handle;
379 make_work_interval(&wi_handle, WORK_INTERVAL_TYPE_DEFAULT);
380 pthread_t threads[num_boosters];
381 for (int i = 0; i < num_boosters; i++) {
382 create_thread(&threads[i], NULL, boost_while_working, wi_handle);
383 }
384
385 /*
386 * Wait for test deadline to pass, to avoid priority boosting
387 * with pthread_join(), which would affect the results.
388 */
389 uint64_t curr_time = mach_absolute_time();
390 if (curr_time < mach_deadline) {
391 usleep(abs_to_nanos(mach_deadline - curr_time) / NSEC_PER_USEC);
392 }
393 for (int i = 0; i < num_boosters; i++) {
394 pthread_join(threads[i], NULL);
395 }
396
397 if (platform_is_amp()) {
398 /* Reenable all cores to speed up trace post-processing */
399 char *recommend_all_cores_args[] = {"-C", "all", NULL};
400 execute_clpcctrl(recommend_all_cores_args, false);
401 }
402 end_collect_trace(trace);
403 }
404