xref: /xnu-12377.61.12/tests/sched/cluster_bound_threads.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include <unistd.h>
4 #include <string.h>
5 #include <stdatomic.h>
6 #include <mach/mach.h>
7 #include <mach/mach_time.h>
8 #include <spawn.h>
9 #include <pthread.h>
10 #include <TargetConditionals.h>
11 #include <sys/sysctl.h>
12 #include <os/tsd.h>
13 #include <machine/cpu_capabilities.h>
14 
15 #include <darwintest.h>
16 #include <darwintest_utils.h>
17 #include "test_utils.h"
18 #include "sched_test_utils.h"
19 
20 T_GLOBAL_META(T_META_NAMESPACE("xnu.scheduler"),
21     T_META_RADAR_COMPONENT_NAME("xnu"),
22     T_META_RADAR_COMPONENT_VERSION("scheduler"),
23     T_META_BOOTARGS_SET("enable_skstb=1"),
24     T_META_ASROOT(true),
25     T_META_TAG_VM_NOT_ELIGIBLE,
26     XNU_T_META_SOC_SPECIFIC);
27 
28 static void *
spin_thread(__unused void * arg)29 spin_thread(__unused void *arg)
30 {
31 	spin_for_duration(8);
32 	return NULL;
33 }
34 
35 static void *
spin_bound_thread(void * arg)36 spin_bound_thread(void *arg)
37 {
38 	char type = (char)arg;
39 	bind_to_cluster_of_type(type);
40 	spin_for_duration(10);
41 	return NULL;
42 }
43 
44 #define SPINNER_THREAD_LOAD_FACTOR (4)
45 
46 T_DECL(test_cluster_bound_thread_timeshare, "Make sure the low priority bound threads get CPU in the presence of non-bound CPU spinners",
47     T_META_ENABLED(TARGET_CPU_ARM64 && TARGET_OS_OSX))
48 {
49 	pthread_setname_np("main thread");
50 
51 	kern_return_t kr;
52 
53 	int rv;
54 	pthread_attr_t attr;
55 
56 	rv = pthread_attr_init(&attr);
57 	T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_attr_init");
58 
59 	rv = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
60 	T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_attr_setdetachstate");
61 
62 	rv = pthread_attr_set_qos_class_np(&attr, QOS_CLASS_USER_INITIATED, 0);
63 	T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_attr_set_qos_class_np");
64 
65 	unsigned int ncpu = (unsigned int)dt_ncpu();
66 	pthread_t unbound_thread;
67 	pthread_t bound_thread;
68 
69 	wait_for_quiescence_default(argc, argv);
70 	trace_handle_t trace = begin_collect_trace(argc, argv, "test_cluster_bound_thread_timeshare");
71 
72 	T_LOG("creating %u non-bound threads\n", ncpu * SPINNER_THREAD_LOAD_FACTOR);
73 
74 	for (unsigned int i = 0; i < ncpu * SPINNER_THREAD_LOAD_FACTOR; i++) {
75 		rv = pthread_create(&unbound_thread, &attr, spin_thread, NULL);
76 		T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_create (non-bound)");
77 	}
78 
79 	struct sched_param param = { .sched_priority = (int)20 };
80 	T_ASSERT_POSIX_ZERO(pthread_attr_setschedparam(&attr, &param), "pthread_attr_setschedparam");
81 
82 	rv = pthread_create(&bound_thread, &attr, spin_bound_thread, (void *)(uintptr_t)'P');
83 	T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_create (P-bound)");
84 
85 	rv = pthread_attr_destroy(&attr);
86 	T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_attr_destroy");
87 
88 	sleep(8);
89 
90 	mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
91 	mach_port_t thread_port = pthread_mach_thread_np(bound_thread);
92 	thread_basic_info_data_t bound_thread_info;
93 
94 	kr = thread_info(thread_port, THREAD_BASIC_INFO, (thread_info_t)&bound_thread_info, &count);
95 	if (kr != KERN_SUCCESS) {
96 		T_FAIL("%#x == thread_info(bound_thread, THREAD_BASIC_INFO)", kr);
97 	}
98 
99 	end_collect_trace(trace);
100 
101 	uint64_t bound_usr_usec = (uint64_t)bound_thread_info.user_time.seconds * USEC_PER_SEC + (uint64_t)bound_thread_info.user_time.microseconds;
102 
103 	T_ASSERT_GT(bound_usr_usec, 75000ULL, "Check that bound thread got atleast 75ms CPU time");
104 	T_PASS("Low priority bound threads got some CPU time in the presence of high priority unbound spinners");
105 }
106 
107 static uint64_t
observe_thread_user_time(pthread_t thread,unsigned int seconds)108 observe_thread_user_time(pthread_t thread, unsigned int seconds)
109 {
110 	kern_return_t kr;
111 	mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
112 	mach_port_t port = pthread_mach_thread_np(thread);
113 	thread_basic_info_data_t basic_thread_info;
114 	uint64_t before_user_us = 0;
115 	uint64_t after_user_us = 0;
116 
117 	kr = thread_info(port, THREAD_BASIC_INFO, (thread_info_t)&basic_thread_info, &count);
118 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info(THREAD_BASIC_INFO)");
119 	before_user_us = (uint64_t)basic_thread_info.user_time.seconds * USEC_PER_SEC +
120 	    (uint64_t)basic_thread_info.user_time.microseconds;
121 
122 	sleep(seconds);
123 
124 	kr = thread_info(port, THREAD_BASIC_INFO, (thread_info_t)&basic_thread_info, &count);
125 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info(THREAD_BASIC_INFO)");
126 	after_user_us = (uint64_t)basic_thread_info.user_time.seconds * USEC_PER_SEC +
127 	    (uint64_t)basic_thread_info.user_time.microseconds;
128 
129 	T_QUIET; T_ASSERT_GE(after_user_us, before_user_us, "increasing user_time values");
130 	return after_user_us - before_user_us;
131 }
132 
133 T_DECL(cluster_soft_binding,
134     "Make sure that cluster-binding is \"soft\" and a bound thread can run elsewhere when"
135     "its bound cluster is derecommended",
136     T_META_ENABLED(TARGET_CPU_ARM64))
137 {
138 	T_SETUPBEGIN;
139 	if (!platform_is_amp()) {
140 		T_SKIP("Platform is symmetric, skipping cluster-binding test");
141 	}
142 
143 	wait_for_quiescence_default(argc, argv);
144 
145 	trace_handle_t trace = begin_collect_trace(argc, argv, "cluster_soft_binding");
146 	T_SETUPEND;
147 
148 	for (unsigned int p = 0; p < platform_nperflevels(); p++) {
149 		/* Ensure all cores recommended */
150 		char *restore_dynamic_control_args[] = {"-d", NULL};
151 		execute_clpcctrl(restore_dynamic_control_args, false);
152 		bool all_cores_recommended = check_recommended_core_mask(NULL);
153 		T_QUIET; T_EXPECT_TRUE(all_cores_recommended, "Not all cores are recommended for scheduling");
154 
155 		char perflevel_char = platform_perflevel_name(p)[0];
156 		void *arg = (void *)perflevel_char;
157 		pthread_t bound_thread;
158 		create_thread(&bound_thread, NULL, spin_bound_thread, arg);
159 		sleep(1);
160 
161 		double runtime_threshold = 0.2; // Ran at least 20% of expected time
162 		unsigned int observe_seconds = 3;
163 		uint64_t recommended_user_us = observe_thread_user_time(bound_thread, observe_seconds);
164 		T_LOG("%c-bound thread ran %lluus with all cores recommended", (char)arg, recommended_user_us);
165 		T_QUIET; T_EXPECT_GE(recommended_user_us * 1.0, runtime_threshold * observe_seconds * USEC_PER_SEC,
166 		    "%c-bound thread didn't run at least %f of %d seconds", (char)arg, runtime_threshold, observe_seconds);
167 
168 		/* Derecommend the bound cluster type */
169 		char perflevel_arg[2] = {perflevel_char, '\0'};
170 		char *derecommend_args[] = {"-C", perflevel_arg, NULL};
171 		execute_clpcctrl(derecommend_args, false);
172 		check_recommended_core_mask(NULL);
173 		sleep(1);
174 
175 		uint64_t derecommended_user_us = observe_thread_user_time(bound_thread, observe_seconds);
176 		T_LOG("%c-bound thread ran %lluus with %c-cores derecommended", (char)arg, derecommended_user_us, (char)arg);
177 		T_EXPECT_GE(recommended_user_us * 1.0, runtime_threshold * observe_seconds * USEC_PER_SEC,
178 		    "%c-bound thread ran at least %f of %d seconds when %c-cores were derecommended",
179 		    (char)arg, runtime_threshold, observe_seconds, (char)arg);
180 	}
181 
182 	stop_spinning_threads();
183 	end_collect_trace(trace);
184 }
185 
186 static int num_cluster_bind_trials = 100000;
187 
188 static void *
spin_cluster_binding(void *)189 spin_cluster_binding(void *)
190 {
191 	uint8_t num_clusters = COMM_PAGE_READ(uint8_t, CPU_CLUSTERS);
192 	for (int t = 0; t < num_cluster_bind_trials; t++) {
193 		int bind_cluster = rand() % (num_clusters + 1);
194 		bool unbind = bind_cluster == num_clusters;
195 		if (unbind) {
196 			bind_cluster = -1;
197 		}
198 		bind_to_cluster_id(bind_cluster);
199 		if (!unbind) {
200 			int running_on_cluster = (int)_os_cpu_cluster_number();
201 			T_QUIET; T_EXPECT_EQ(running_on_cluster, bind_cluster, "Failed to reach the bound cluster");
202 			if (running_on_cluster != bind_cluster) {
203 				T_LOG("Failed on iteration %d", t);
204 				/* Mark this failure in the recorded trace */
205 				sched_kdebug_test_fail(t, bind_cluster, running_on_cluster, 0);
206 			}
207 		}
208 	}
209 	return NULL;
210 }
211 
212 T_DECL(cluster_bind_migrate,
213     "Ensure cluster-binding triggers a context-switch if needed to get to the bound cluster",
214     T_META_ENABLED(TARGET_CPU_ARM64),
215     T_META_MAYFAIL("rdar://132360557, need a reasonable expectation that cores will not quickly disable"))
216 {
217 	T_SETUPBEGIN;
218 	if (!platform_is_amp()) {
219 		T_SKIP("Platform is symmetric, skipping cluster-binding test");
220 	}
221 
222 	char *policy_name = platform_sched_policy();
223 	if (strcmp(policy_name, "edge") != 0) {
224 		T_SKIP("Platform is running the \"%s\" scheduler, which lacks strong enough cluster-binding", policy_name);
225 	}
226 
227 	wait_for_quiescence_default(argc, argv);
228 	bool all_cores_recommended = check_recommended_core_mask(NULL);
229 	T_QUIET; T_EXPECT_TRUE(all_cores_recommended, "Not all cores are recommended for scheduling");
230 
231 	srand(777767777);
232 
233 	trace_handle_t trace = begin_collect_trace(argc, argv, "cluster_bind_migrate");
234 	T_SETUPEND;
235 
236 	pthread_t *threads = create_threads(dt_ncpu(), 31, eJoinable, QOS_CLASS_UNSPECIFIED,
237 	    eSchedDefault, DEFAULT_STACK_SIZE, spin_cluster_binding, NULL);
238 	for (int i = 0; i < dt_ncpu(); i++) {
239 		pthread_join(threads[i], NULL);
240 	}
241 
242 	if (T_FAILCOUNT == 0) {
243 		T_PASS("Correctly migrated to the bound cluster for %d trials", num_cluster_bind_trials);
244 	} else {
245 		T_FAIL("%d fails for %d cluster-bind attempts", T_FAILCOUNT, num_cluster_bind_trials);
246 	}
247 	end_collect_trace(trace);
248 }
249