xref: /xnu-10002.81.5/tests/sched_all_cores_running.c (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1 // Copyright (c) 2023 Apple Inc.  All rights reserved.
2 
3 #include <unistd.h>
4 #include <stdlib.h>
5 #include <pthread.h>
6 #include <string.h>
7 #include <mach/mach.h>
8 #include <mach/mach_time.h>
9 #include <sys/stat.h>
10 #include <sys/sysctl.h>
11 #include <sys/time.h>
12 #include <stdatomic.h>
13 #include <time.h>
14 
15 #include <machine/cpu_capabilities.h>
16 #include <os/tsd.h>
17 
18 #include <darwintest.h>
19 #include <darwintest_utils.h>
20 #include "test_utils.h"
21 
22 T_GLOBAL_META(T_META_NAMESPACE("xnu.scheduler"),
23     T_META_RADAR_COMPONENT_NAME("xnu"),
24     T_META_RADAR_COMPONENT_VERSION("scheduler"));
25 
26 /*
27  * As a successor of clpc_disabling_cores_test_21636137, this test ensures that threads
28  * are naturally being scheduled on all of the logical cores (without binding). The test
29  * fails if CLPC has derecommended any cores.
30  */
31 
32 static mach_timebase_info_data_t timebase_info;
33 
34 static uint64_t
nanos_to_abs(uint64_t nanos)35 nanos_to_abs(uint64_t nanos)
36 {
37 	mach_timebase_info(&timebase_info);
38 	return nanos * timebase_info.denom / timebase_info.numer;
39 }
40 
41 static _Atomic uint64_t visited_cores_bitmask = 0;
42 static uint64_t spin_deadline_timestamp = 0;
43 
44 static void *
spin_thread_fn(__unused void * arg)45 spin_thread_fn(__unused void *arg)
46 {
47 	while (mach_absolute_time() < spin_deadline_timestamp) {
48 		unsigned int curr_cpu = _os_cpu_number();
49 		atomic_fetch_or_explicit(&visited_cores_bitmask, (1ULL << curr_cpu), memory_order_relaxed);
50 	}
51 	return NULL;
52 }
53 
54 static void
start_threads(pthread_t * threads,void * (* start_routine)(void *),int priority,unsigned int num_threads)55 start_threads(pthread_t *threads, void *(*start_routine)(void *), int priority, unsigned int num_threads)
56 {
57 	int rv;
58 	pthread_attr_t attr;
59 
60 	rv = pthread_attr_init(&attr);
61 	T_QUIET; T_ASSERT_POSIX_ZERO(rv, "pthread_attr_init");
62 
63 	for (unsigned int i = 0; i < num_threads; i++) {
64 		struct sched_param param = { .sched_priority = (int)priority };
65 
66 		rv = pthread_attr_setschedparam(&attr, &param);
67 		T_QUIET; T_ASSERT_POSIX_ZERO(rv, "pthread_attr_setschedparam");
68 
69 		rv = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
70 		T_QUIET; T_ASSERT_POSIX_ZERO(rv, "pthread_attr_setdetachstate");
71 
72 		rv = pthread_create(&threads[i], &attr, start_routine, NULL);
73 		T_QUIET; T_ASSERT_POSIX_ZERO(rv, "pthread_create");
74 	}
75 
76 	rv = pthread_attr_destroy(&attr);
77 	T_QUIET; T_ASSERT_POSIX_ZERO(rv, "pthread_attr_destroy");
78 }
79 
80 static host_t host;
81 static processor_port_array_t cpu_ports;
82 static mach_msg_type_number_t cpu_count;
83 
84 static void
init_host_and_cpu_count(void)85 init_host_and_cpu_count(void)
86 {
87 	kern_return_t kr;
88 	host_t priv_host;
89 
90 	host = mach_host_self();
91 
92 	kr = host_get_host_priv_port(host, &priv_host);
93 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "host_get_host_priv_port");
94 
95 	kr = host_processors(priv_host, &cpu_ports, &cpu_count);
96 	T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "host_processors");
97 
98 	T_QUIET; T_ASSERT_EQ(cpu_count, (unsigned int)dt_ncpu(), "cpu counts between host_processors() and hw.ncpu don't match");
99 }
100 
101 static void
record_cpu_loads(struct processor_cpu_load_info * cpu_loads)102 record_cpu_loads(struct processor_cpu_load_info *cpu_loads)
103 {
104 	kern_return_t kr;
105 	mach_msg_type_number_t info_count = PROCESSOR_CPU_LOAD_INFO_COUNT;
106 	for (unsigned int i = 0; i < cpu_count; i++) {
107 		kr = processor_info(cpu_ports[i], PROCESSOR_CPU_LOAD_INFO, &host, (processor_info_t)&cpu_loads[i], &info_count);
108 		T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "processor_info");
109 	}
110 }
111 
112 static void
cpu_loads_delta(struct processor_cpu_load_info * start_loads,struct processor_cpu_load_info * finish_loads,unsigned int * non_idle_ticks)113 cpu_loads_delta(struct processor_cpu_load_info *start_loads,
114     struct processor_cpu_load_info *finish_loads,
115     unsigned int *non_idle_ticks)
116 {
117 	struct processor_cpu_load_info delta_loads[cpu_count];
118 	T_LOG("Non-idle time per CPU:");
119 	for (unsigned int i = 0; i < cpu_count; i++) {
120 		uint64_t delta_sum = 0;
121 		for (int state = CPU_STATE_USER; state < CPU_STATE_MAX; state++) {
122 			T_QUIET; T_ASSERT_GE(finish_loads[i].cpu_ticks[state], start_loads[i].cpu_ticks[state], "non-monotonic ticks for state %d", state);
123 			delta_loads[i].cpu_ticks[state] = finish_loads[i].cpu_ticks[state] - start_loads[i].cpu_ticks[state];
124 			delta_sum += delta_loads[i].cpu_ticks[state];
125 		}
126 		T_QUIET; T_ASSERT_GT(delta_sum, 0ULL, "Failed to read meaningful load data for the core. Was the amfi_get_out_of_my_way=1 boot-arg missing?");
127 		non_idle_ticks[i] = delta_loads[i].cpu_ticks[CPU_STATE_USER] + delta_loads[i].cpu_ticks[CPU_STATE_SYSTEM];
128 		T_LOG("\tCore %d non-idle ticks: %d", i, non_idle_ticks[i]);
129 	}
130 }
131 
132 #define KERNEL_BOOTARGS_MAX_SIZE 1024
133 static char kernel_bootargs[KERNEL_BOOTARGS_MAX_SIZE];
134 
135 static const int DEFAULT_THREAD_PRI = 31;
136 
137 T_DECL(all_cores_running,
138     "Verify that we are using all available cores on the system",
139     /* Required to get around the rate limit for processor_info() */
140     T_META_BOOTARGS_SET("amfi_get_out_of_my_way=1"),
141     T_META_ASROOT(true),
142     XNU_T_META_SOC_SPECIFIC)
143 {
144 	T_SETUPBEGIN;
145 	int rv;
146 
147 	/* Warn if amfi_get_out_of_my_way is not set and fail later on if we actually run into the rate limit */
148 	size_t kernel_bootargs_size = sizeof(kernel_bootargs);
149 	rv = sysctlbyname("kern.bootargs", kernel_bootargs, &kernel_bootargs_size, NULL, 0);
150 	T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "kern.bootargs");
151 	if (strstr(kernel_bootargs, "amfi_get_out_of_my_way=1") == NULL) {
152 		T_LOG("WARNING: amfi_get_out_of_my_way=1 boot-arg is missing, required to reliably capture CPU load data");
153 	}
154 
155 	init_host_and_cpu_count();
156 	T_LOG("System has %d logical cores", cpu_count);
157 
158 	uint64_t recommended_cores_mask = 0;
159 	size_t recommended_cores_mask_size = sizeof(recommended_cores_mask);
160 	rv = sysctlbyname("kern.sched_recommended_cores", &recommended_cores_mask, &recommended_cores_mask_size, NULL, 0);
161 	T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "kern.sched_recommended_cores");
162 	T_LOG("Mask kern.sched_recommended_cores: 0x%016llx", recommended_cores_mask);
163 
164 	T_SETUPEND;
165 
166 	struct processor_cpu_load_info start_cpu_loads[cpu_count];
167 	record_cpu_loads(start_cpu_loads);
168 
169 	/* Wait 100ms for the system to settle down */
170 	usleep(100000);
171 
172 	const uint64_t spin_seconds = 3;
173 	spin_deadline_timestamp = mach_absolute_time() + nanos_to_abs(spin_seconds * NSEC_PER_SEC);
174 	unsigned int num_threads = (unsigned int)dt_ncpu() * 2;
175 	T_LOG("Launching %u threads to spin for %lld seconds...", num_threads, spin_seconds);
176 
177 	pthread_t threads[num_threads];
178 	start_threads(threads, &spin_thread_fn, DEFAULT_THREAD_PRI, num_threads);
179 
180 	/* Wait for threads to perform spinning work */
181 	sleep(spin_seconds);
182 	T_LOG("...%lld seconds have elapsed", spin_seconds);
183 
184 	struct processor_cpu_load_info finish_cpu_loads[cpu_count];
185 	record_cpu_loads(finish_cpu_loads);
186 
187 	uint64_t final_visited_cores_bitmask = atomic_load(&visited_cores_bitmask);
188 	T_LOG("Visited cores bitmask: %llx", final_visited_cores_bitmask);
189 
190 	unsigned int non_idle_ticks[cpu_count];
191 	cpu_loads_delta(start_cpu_loads, finish_cpu_loads, non_idle_ticks);
192 
193 	/*
194 	 * Now after we have logged all of the relevant information, enforce that each
195 	 * of the cores was recommended and had test threads scheduled on it.
196 	 */
197 	T_ASSERT_EQ((unsigned int)__builtin_popcountll(final_visited_cores_bitmask), cpu_count, "Each core ran at least one of the test threads");
198 	for (unsigned int i = 0; i < cpu_count; i++) {
199 		T_QUIET; T_ASSERT_GT(non_idle_ticks[i], 0, "One or more cores were idle during the work period");
200 	}
201 	T_PASS("Each core performed work during the work period");
202 
203 	T_END;
204 }
205 
206 T_DECL(recommended_cores_mask,
207     "Tests that the mask of recommended cores includes all logical cores according to hw.ncpu",
208     T_META_NAMESPACE("xnu.scheduler"),
209     XNU_T_META_SOC_SPECIFIC)
210 {
211 	int ret;
212 
213 	uint32_t ncpu = 0;
214 	size_t ncpu_size = sizeof(ncpu);
215 	ret = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0);
216 	T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "hw.ncpu");
217 	T_LOG("hw.ncpu: %d\n", ncpu);
218 
219 	T_ASSERT_LE(ncpu, 64, "Core count isn't too high to reflect in the system's 64-bit wide core masks");
220 
221 	int passed_test = 0;
222 	int tries = 0;
223 	int MAX_RETRIES = 3;
224 	while (!passed_test && tries < MAX_RETRIES) {
225 		uint64_t recommended_cores_mask = 0;
226 		size_t recommended_cores_mask_size = sizeof(recommended_cores_mask);
227 		ret = sysctlbyname("kern.sched_recommended_cores", &recommended_cores_mask, &recommended_cores_mask_size, NULL, 0);
228 		T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kern.sched_recommended_cores");
229 		T_LOG("kern.sched_recommended_cores:     0x%016llx", recommended_cores_mask);
230 
231 		uint64_t expected_set_mask = ~0ULL >> (64 - ncpu);
232 		T_LOG("Expected bits set for all cores:  0x%016llx", expected_set_mask);
233 
234 		if ((recommended_cores_mask & expected_set_mask) == expected_set_mask) {
235 			passed_test = 1;
236 		} else {
237 			/*
238 			 * Maybe some of the cores are derecommended due to thermals.
239 			 * Sleep to give the system a chance to quiesce and try again.
240 			 */
241 			unsigned int sleep_seconds = 10;
242 			T_LOG("Missing expected bits. Sleeping for %u seconds before retrying", sleep_seconds);
243 			sleep(sleep_seconds);
244 			tries++;
245 		}
246 	}
247 
248 	T_ASSERT_EQ(passed_test, 1, "kern.sched_recommended_cores reflects that all expected cores are recommended");
249 }
250