1 /*
2 * Tests to validate that:
3 * - we can schedule threads on all hw.ncpus cores according to _os_cpu_number
4 * - we can schedule threads on all hw.cpuclusters clusters according to _os_cpu_cluster_number
5 * - the cluster id returned by _os_cpu_cluster_number aligns with mappings from IORegistry
6 *
7 * <rdar://problem/29545645>
8 * <rdar://problem/30445216>
9 *
10 * xcrun -sdk macosx.internal clang -o cpucount cpucount.c -ldarwintest -framework IOKit -framework CoreFoundation -g -Weverything
11 * xcrun -sdk iphoneos.internal clang -arch arm64 -o cpucount-ios cpucount.c -ldarwintest -framework IOKit -framework CoreFoundation -g -Weverything
12 * xcrun -sdk macosx.internal clang -o cpucount cpucount.c -ldarwintest -framework IOKit -framework CoreFoundation -arch arm64e -Weverything
13 */
14
15 #include <darwintest.h>
16 #include "test_utils.h"
17
18 #include <stdio.h>
19 #include <stdlib.h>
20 #include <unistd.h>
21 #include <pthread.h>
22 #include <sys/commpage.h>
23 #include <sys/sysctl.h>
24 #include <sys/proc_info.h>
25 #include <libproc.h>
26
27 #include <CoreFoundation/CoreFoundation.h>
28 #include <IOKit/IOKitLib.h>
29
30 #include <mach/mach.h>
31 #include <mach/mach_time.h>
32 #include <machine/cpu_capabilities.h>
33
34 #include <os/tsd.h> /* private header for _os_cpu_number, _os_cpu_cluster_number */
35
36 T_GLOBAL_META(
37 T_META_RUN_CONCURRENTLY(false),
38 T_META_BOOTARGS_SET("enable_skstb=1"),
39 T_META_CHECK_LEAKS(false),
40 T_META_ASROOT(true),
41 T_META_ALL_VALID_ARCHS(true),
42 T_META_RADAR_COMPONENT_NAME("xnu"),
43 T_META_RADAR_COMPONENT_VERSION("scheduler"),
44 T_META_OWNER("jarrad"),
45 T_META_TAG_VM_NOT_PREFERRED
46 );
47
48 #define KERNEL_BOOTARGS_MAX_SIZE 1024
49 static char kernel_bootargs[KERNEL_BOOTARGS_MAX_SIZE];
50
51 #define KERNEL_VERSION_MAX_SIZE 1024
52 static char kernel_version[KERNEL_VERSION_MAX_SIZE];
53
54 static mach_timebase_info_data_t timebase_info;
55
56 // Source: libktrace:corefoundation_helpers.c
57
58 static void
dict_number_internal(CFDictionaryRef dict,CFStringRef key,void * dst_out,CFNumberType nbr_type)59 dict_number_internal(CFDictionaryRef dict, CFStringRef key, void *dst_out, CFNumberType nbr_type)
60 {
61 bool success;
62 T_QUIET; T_ASSERT_NOTNULL(dict, "dict must not be null");
63 T_QUIET; T_ASSERT_NOTNULL(key, " key must not be null");
64 T_QUIET; T_ASSERT_NOTNULL(dst_out, "dst out must not be null");
65
66 CFTypeRef val = CFDictionaryGetValue(dict, key);
67 T_QUIET; T_ASSERT_NOTNULL(val, "unable to get value for key %s", CFStringGetCStringPtr(key, kCFStringEncodingASCII));
68
69 CFTypeID type = CFGetTypeID(val);
70 if (type == CFNumberGetTypeID()) {
71 CFNumberRef val_nbr = (CFNumberRef)val;
72 success = CFNumberGetValue(val_nbr, nbr_type, dst_out);
73 T_QUIET; T_ASSERT_TRUE(success, "dictionary number at key '%s' is not the right type", CFStringGetCStringPtr(key, kCFStringEncodingASCII));
74 } else if (type == CFDataGetTypeID()) {
75 CFDataRef val_data = (CFDataRef)val;
76 size_t raw_size = (size_t)CFDataGetLength(val_data);
77 T_QUIET; T_ASSERT_EQ(raw_size, (size_t)4, "cannot convert CFData of size %zu to number", raw_size);
78 CFDataGetBytes(val_data, CFRangeMake(0, (CFIndex)raw_size), dst_out);
79 } else {
80 T_ASSERT_FAIL("dictionary value at key '%s' should be a number or data", CFStringGetCStringPtr(key, kCFStringEncodingASCII));
81 }
82 }
83
84 static void
dict_uint32(CFDictionaryRef dict,CFStringRef key,uint32_t * dst_out)85 dict_uint32(CFDictionaryRef dict, CFStringRef key, uint32_t *dst_out)
86 {
87 dict_number_internal(dict, key, dst_out, kCFNumberSInt32Type);
88 }
89
90 static uint64_t
abs_to_nanos(uint64_t abs)91 abs_to_nanos(uint64_t abs)
92 {
93 return abs * timebase_info.numer / timebase_info.denom;
94 }
95
96 static int32_t
get_csw_count(void)97 get_csw_count(void)
98 {
99 struct proc_taskinfo taskinfo;
100 int rv;
101
102 rv = proc_pidinfo(getpid(), PROC_PIDTASKINFO, 0, &taskinfo, sizeof(taskinfo));
103 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "PROC_PIDTASKINFO");
104
105 return taskinfo.pti_csw;
106 }
107
108 // noinline hopefully keeps the optimizer from hoisting it out of the loop
109 // until rdar://68253516 is fixed.
110 __attribute__((noinline))
111 static uint32_t
fixed_os_cpu_number(void)112 fixed_os_cpu_number(void)
113 {
114 uint32_t cpu_number = _os_cpu_number();
115 return cpu_number;
116 }
117
118 static unsigned int
commpage_cpu_cluster_number(void)119 commpage_cpu_cluster_number(void)
120 {
121 uint8_t cpu_number = (uint8_t)fixed_os_cpu_number();
122 volatile uint8_t *cpu_to_cluster = COMM_PAGE_SLOT(uint8_t, CPU_TO_CLUSTER);
123 return (unsigned int)*(cpu_to_cluster + cpu_number);
124 }
125
126 static void
cpucount_setup(void)127 cpucount_setup(void)
128 {
129 int rv;
130 kern_return_t kr;
131
132 T_SETUPBEGIN;
133
134 setvbuf(stdout, NULL, _IONBF, 0);
135 setvbuf(stderr, NULL, _IONBF, 0);
136
137 /* Validate what kind of kernel we're on */
138 size_t kernel_version_size = sizeof(kernel_version);
139 rv = sysctlbyname("kern.version", kernel_version, &kernel_version_size, NULL, 0);
140 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "kern.version");
141
142 T_LOG("kern.version: %s\n", kernel_version);
143
144 /* Double check that darwintest set the boot arg we requested */
145 size_t kernel_bootargs_size = sizeof(kernel_bootargs);
146 rv = sysctlbyname("kern.bootargs", kernel_bootargs, &kernel_bootargs_size, NULL, 0);
147 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "kern.bootargs");
148
149 T_LOG("kern.bootargs: %s\n", kernel_bootargs);
150
151 if (NULL == strstr(kernel_bootargs, "enable_skstb=1")) {
152 T_ASSERT_FAIL("enable_skstb=1 boot-arg is missing");
153 }
154
155 kr = mach_timebase_info(&timebase_info);
156 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_timebase_info");
157
158 struct sched_param param = {.sched_priority = 63};
159
160 rv = pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m);
161 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "pthread_setschedparam");
162
163 T_SETUPEND;
164 }
165
166
167 T_DECL(count_cpus,
168 "Tests we can schedule bound threads on all hw.ncpus cores and that _os_cpu_number matches",
169 XNU_T_META_SOC_SPECIFIC)
170 {
171 int rv;
172
173 cpucount_setup();
174
175 int bound_cpu_out = 0;
176 size_t bound_cpu_out_size = sizeof(bound_cpu_out);
177 rv = sysctlbyname("kern.sched_thread_bind_cpu", &bound_cpu_out, &bound_cpu_out_size, NULL, 0);
178
179 if (rv == -1) {
180 if (errno == ENOENT) {
181 T_ASSERT_FAIL("kern.sched_thread_bind_cpu doesn't exist, must set enable_skstb=1 boot-arg on development kernel");
182 }
183 if (errno == EPERM) {
184 T_ASSERT_FAIL("must run as root");
185 }
186 }
187
188 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "read kern.sched_thread_bind_cpu");
189 T_QUIET; T_ASSERT_EQ(bound_cpu_out, -1, "kern.sched_thread_bind_cpu should exist, start unbound");
190
191 uint32_t sysctl_ncpu = 0;
192 size_t ncpu_size = sizeof(sysctl_ncpu);
193 rv = sysctlbyname("hw.ncpu", &sysctl_ncpu, &ncpu_size, NULL, 0);
194 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "sysctlbyname(hw.ncpu)");
195
196 T_LOG("hw.ncpu: %2d\n", sysctl_ncpu);
197
198 T_ASSERT_GT(sysctl_ncpu, 0, "at least one CPU exists");
199
200 for (uint32_t cpu_to_bind = 0; cpu_to_bind < sysctl_ncpu; cpu_to_bind++) {
201 int32_t before_csw_count = get_csw_count();
202 T_LOG("(csw %4d) attempting to bind to cpu %2d\n", before_csw_count, cpu_to_bind);
203
204 uint64_t start = mach_absolute_time();
205
206 rv = sysctlbyname("kern.sched_thread_bind_cpu", NULL, 0, &cpu_to_bind, sizeof(cpu_to_bind));
207
208 uint64_t end = mach_absolute_time();
209
210 if (rv == -1 && errno == ENOTSUP) {
211 T_SKIP("Binding is available, but this process doesn't support binding (e.g. Rosetta on Aruba)");
212 }
213
214 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "kern.sched_thread_bind_cpu(%u)", cpu_to_bind);
215
216 uint32_t os_cpu_number_reported = fixed_os_cpu_number();
217
218 bound_cpu_out = 0;
219 rv = sysctlbyname("kern.sched_thread_bind_cpu", &bound_cpu_out, &bound_cpu_out_size, NULL, 0);
220 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "read kern.sched_thread_bind_cpu");
221
222 T_QUIET; T_EXPECT_EQ((int)cpu_to_bind, bound_cpu_out,
223 "should report bound cpu id matching requested bind target");
224
225 uint64_t delta_abs = end - start;
226 uint64_t delta_ns = abs_to_nanos(delta_abs);
227
228 int32_t after_csw_count = get_csw_count();
229
230 T_LOG("(csw %4d) bound to cpu %2d in %f milliseconds\n",
231 after_csw_count, cpu_to_bind,
232 ((double)delta_ns / 1000000.0));
233
234 if (cpu_to_bind > 0) {
235 T_QUIET; T_EXPECT_LT(before_csw_count, after_csw_count,
236 "should have had to context switch to execute the bind");
237 }
238
239 T_LOG("cpu %2d reported id %2d\n",
240 cpu_to_bind, os_cpu_number_reported);
241
242 T_QUIET;
243 T_EXPECT_EQ(cpu_to_bind, os_cpu_number_reported,
244 "should report same CPU number as was bound to");
245 }
246
247 int unbind = -1; /* pass -1 in order to unbind the thread */
248
249 rv = sysctlbyname("kern.sched_thread_bind_cpu", NULL, 0, &unbind, sizeof(unbind));
250
251 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "kern.sched_thread_bind_cpu(%u)", unbind);
252
253 rv = sysctlbyname("kern.sched_thread_bind_cpu", &bound_cpu_out, &bound_cpu_out_size, NULL, 0);
254
255 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "read kern.sched_thread_bind_cpu");
256 T_QUIET; T_ASSERT_EQ(bound_cpu_out, -1, "thread should be unbound at the end");
257
258 T_PASS("test has run threads on all CPUS");
259 }
260
261 T_DECL(count_clusters,
262 "Tests we can schedule bound threads on all cpu clusters and that _os_cpu_cluster_number matches",
263 XNU_T_META_SOC_SPECIFIC)
264 {
265 int rv;
266
267 cpucount_setup();
268
269 uint8_t cpuclusters = COMM_PAGE_READ(uint8_t, CPU_CLUSTERS);
270 T_LOG("cpuclusters: %2d\n", cpuclusters);
271 T_QUIET; T_ASSERT_GT(cpuclusters, 0, "at least one CPU cluster exists");
272 if (cpuclusters == 1) {
273 T_SKIP("Test is unsupported on non-AMP platforms");
274 }
275
276 uint32_t sysctl_ncpu = 0;
277 size_t ncpu_size = sizeof(sysctl_ncpu);
278 rv = sysctlbyname("hw.ncpu", &sysctl_ncpu, &ncpu_size, NULL, 0);
279 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "sysctlbyname(hw.ncpu)");
280 T_LOG("hw.ncpu: %2d\n", sysctl_ncpu);
281
282 uint64_t recommended_cores = 0;
283 size_t recommended_cores_size = sizeof(recommended_cores);
284 rv = sysctlbyname("kern.sched_recommended_cores", &recommended_cores, &recommended_cores_size, NULL, 0);
285 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "sysctlbyname(kern.sched_recommended_cores)");
286 T_LOG("kern.sched_recommended_cores: %llu", recommended_cores);
287 if ((uint32_t)__builtin_popcountll(recommended_cores) != sysctl_ncpu) {
288 T_SKIP("Missing recommended cores");
289 }
290
291 int bound_cluster_out = 0;
292 size_t bound_cluster_out_size = sizeof(bound_cluster_out);
293 rv = sysctlbyname("kern.sched_thread_bind_cluster_id", &bound_cluster_out, &bound_cluster_out_size, NULL, 0);
294
295 if (rv == -1) {
296 if (errno == ENOENT) {
297 T_ASSERT_FAIL("kern.sched_thread_bind_cluster_id doesn't exist, must set enable_skstb=1 boot-arg on development kernel");
298 }
299 if (errno == EPERM) {
300 T_ASSERT_FAIL("must run as root");
301 }
302 }
303
304 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "read kern.sched_thread_bind_cluster_id");
305 T_QUIET; T_ASSERT_EQ(bound_cluster_out, -1, "kern.sched_thread_bind_cluster_id should exist, start unbound");
306
307 for (uint32_t cluster_to_bind = 0; cluster_to_bind < cpuclusters; cluster_to_bind++) {
308 int32_t before_csw_count = get_csw_count();
309 T_LOG("(csw %4d) attempting to bind to cluster %2d\n", before_csw_count, cluster_to_bind);
310
311 uint64_t start = mach_absolute_time();
312
313 rv = sysctlbyname("kern.sched_thread_bind_cluster_id", NULL, 0, &cluster_to_bind, sizeof(cluster_to_bind));
314
315 uint64_t end = mach_absolute_time();
316
317 if (rv == -1 && errno == ENOTSUP) {
318 T_SKIP("Binding is available, but this process doesn't support binding (e.g. Rosetta on Aruba)");
319 }
320
321 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "kern.sched_thread_bind_cluster_id(%u)", cluster_to_bind);
322
323 T_LOG("CPU ID: %d", fixed_os_cpu_number());
324
325 #if TARGET_CPU_X86_64
326 T_LOG("_os_cpu_cluster_number unsupported under x86.");
327 #else
328 unsigned int os_cluster_number_reported = _os_cpu_cluster_number();
329 T_LOG("OS reported cluster number: %2d\n",
330 os_cluster_number_reported);
331 T_QUIET; T_EXPECT_EQ(cluster_to_bind, os_cluster_number_reported,
332 "_os_cpu_cluster_number should report same cluster number as was bound to");
333 #endif
334
335 unsigned int commpage_cluster_number_reported = commpage_cpu_cluster_number();
336 T_LOG("Comm Page reported cluster number: %u", commpage_cluster_number_reported);
337 T_EXPECT_EQ(commpage_cluster_number_reported, cluster_to_bind, "comm page cluster number matches commpage for this CPU");
338
339 bound_cluster_out = 0;
340 rv = sysctlbyname("kern.sched_thread_bind_cluster_id", &bound_cluster_out, &bound_cluster_out_size, NULL, 0);
341 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "read kern.sched_thread_bind_cluster_id");
342
343 T_QUIET; T_EXPECT_EQ((int)cluster_to_bind, bound_cluster_out,
344 "bound cluster id matches requested bind target");
345
346 uint64_t delta_abs = end - start;
347 uint64_t delta_ns = abs_to_nanos(delta_abs);
348
349 int32_t after_csw_count = get_csw_count();
350
351 T_LOG("(csw %4d) bound to cluster %2d in %f milliseconds\n",
352 after_csw_count, cluster_to_bind,
353 ((double)delta_ns / 1000000.0));
354
355 if (cluster_to_bind > 0) {
356 T_QUIET; T_EXPECT_LT(before_csw_count, after_csw_count,
357 "should have had to context switch to execute the bind");
358 }
359 }
360
361 int unbind = -1; /* pass -1 in order to unbind the thread */
362
363 rv = sysctlbyname("kern.sched_thread_bind_cluster_id", NULL, 0, &unbind, sizeof(unbind));
364
365 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "kern.sched_thread_bind_cluster_id(%u)", unbind);
366
367 rv = sysctlbyname("kern.sched_thread_bind_cluster_id", &bound_cluster_out, &bound_cluster_out_size, NULL, 0);
368
369 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "read kern.sched_thread_bind_cluster_id");
370 T_QUIET; T_ASSERT_EQ(bound_cluster_out, -1, "thread should be unbound at the end");
371
372 T_PASS("test has run threads on all clusters");
373 }
374
375 T_DECL(check_cpu_topology,
376 "Verify _os_cpu_cluster_number(), _os_cpu_number() against IORegistry",
377 XNU_T_META_SOC_SPECIFIC,
378 T_META_ENABLED(TARGET_CPU_ARM || TARGET_CPU_ARM64))
379 {
380 int rv;
381 uint32_t cpu_id, cluster_id;
382 kern_return_t kr;
383 io_iterator_t cpus_iter = 0;
384 io_service_t cpus_service = 0;
385 io_service_t cpu_service = 0;
386 CFDictionaryRef match = NULL;
387
388 cpucount_setup();
389
390 int bound_cpu_out = 0;
391 size_t bound_cpu_out_size = sizeof(bound_cpu_out);
392 rv = sysctlbyname("kern.sched_thread_bind_cpu", &bound_cpu_out, &bound_cpu_out_size, NULL, 0);
393
394 if (rv == -1) {
395 if (errno == ENOENT) {
396 T_FAIL("kern.sched_thread_bind_cpu doesn't exist, must set enable_skstb=1 boot-arg on development kernel");
397 }
398 if (errno == EPERM) {
399 T_FAIL("must run as root");
400 }
401 }
402
403 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "read kern.sched_thread_bind_cpu");
404 T_QUIET; T_ASSERT_EQ(bound_cpu_out, -1, "kern.sched_thread_bind_cpu should exist, start unbound");
405
406 match = IOServiceNameMatching("cpus");
407 cpus_service = IOServiceGetMatchingService(kIOMainPortDefault, match);
408 match = NULL; // consumes reference to match
409 T_QUIET; T_ASSERT_NE(cpus_service, (io_service_t)0, "Failed get cpus IOService");
410
411 kr = IORegistryEntryGetChildIterator(cpus_service, "IODeviceTree", &cpus_iter);
412 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "IORegistryEntryGetChildIterator");
413
414 while ((cpu_service = IOIteratorNext(cpus_iter)) != 0) {
415 CFMutableDictionaryRef props = NULL;
416 kr = IORegistryEntryCreateCFProperties(cpu_service, &props, kCFAllocatorDefault, 0);
417 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "IORegistryEntryCreateCFProperties");
418
419 dict_uint32(props, CFSTR("logical-cpu-id"), &cpu_id);
420 T_LOG("IORegistry logical cpu id: %u", cpu_id);
421 dict_uint32(props, CFSTR("logical-cluster-id"), &cluster_id);
422 T_LOG("IORegistry logical cpu cluster id: %u", cluster_id);
423
424 T_LOG("Binding thread to cpu %u", cpu_id);
425 rv = sysctlbyname("kern.sched_thread_bind_cpu", NULL, 0, &cpu_id, sizeof(cpu_id));
426 if (rv == -1 && errno == ENOTSUP) {
427 T_SKIP("Binding is available, but this process doesn't support binding (e.g. Rosetta on Aruba)");
428 }
429 T_QUIET; T_ASSERT_POSIX_SUCCESS(rv, "kern.sched_thread_bind_cpu(%u)", cpu_id);
430
431 unsigned int os_cpu_number_reported = fixed_os_cpu_number();
432 T_EXPECT_EQ(os_cpu_number_reported, cpu_id, "_os_cpu_number matches IORegistry entry for this CPU");
433 unsigned int os_cluster_number_reported = _os_cpu_cluster_number();
434 T_EXPECT_EQ(os_cluster_number_reported, cluster_id, "_os_cpu_cluster_number matches IORegistry entry for this CPU");
435 unsigned int commpage_cluster_number_reported = commpage_cpu_cluster_number();
436 T_EXPECT_EQ(commpage_cluster_number_reported, cluster_id, "comm page cluster number matches IORegistry entry for this CPU");
437
438 CFRelease(props);
439 IOObjectRelease(cpu_service);
440 }
441 T_PASS("All cluster IDs match with IORegistry");
442 }
443
444 T_DECL(hw_perflevels_order_and_cpu_counts,
445 "check that perflevel sysctls return the correct order and with expected cpu counts",
446 XNU_T_META_SOC_SPECIFIC)
447 {
448 int ret;
449 char sysctlname[256];
450
451 /* Check perflevel count */
452 int level_count = 0;
453 ret = sysctlbyname("hw.nperflevels", &level_count, &(size_t){ sizeof(level_count) }, NULL, 0);
454 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "hw.nperflevels");
455 T_EXPECT_GE(level_count, 1, "valid hw.nperflevels: %d", level_count);
456
457 /* Check perflevel names */
458 char perflevel_name[level_count][128];
459 int efficient_pos = -1;
460 int performance_pos = -1;
461 int standard_pos = -1;
462 for (int p = 0; p < level_count; p++) {
463 snprintf(sysctlname, sizeof(sysctlname), "hw.perflevel%d.name", p);
464 ret = sysctlbyname(sysctlname, perflevel_name[p], &(size_t){ sizeof(perflevel_name[p]) }, NULL, 0);
465 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, sysctlname);
466 if (strcmp(perflevel_name[p], "Efficiency") == 0) {
467 efficient_pos = p;
468 } else if (strcmp(perflevel_name[p], "Performance") == 0) {
469 performance_pos = p;
470 } else if (strcmp(perflevel_name[p], "Standard") == 0) {
471 standard_pos = p;
472 }
473 }
474 T_ASSERT_TRUE((efficient_pos >= 0) || (performance_pos >= 0) || (standard_pos >= 0),
475 "valid perflevels detected (\"Efficient\" %d, \"Performance\" %d, \"Standard\" %d)",
476 efficient_pos, performance_pos, standard_pos);
477 if (standard_pos >= 0) {
478 T_ASSERT_EQ(level_count, 1, "single \"Standard\" perflevel");
479 }
480 if (efficient_pos >= 0) {
481 T_ASSERT_EQ(efficient_pos, level_count - 1, "\"Efficiency\" is the highest index perflevel");
482 }
483 if (performance_pos >= 0) {
484 T_ASSERT_EQ(performance_pos, 0, "\"Performance\" is the lowest index perflevel");
485 }
486
487 /*
488 * Check that certain variants of CPU counts sum up to the expected total
489 * across all perflevels.
490 */
491 const int num_cpu_count_variants = 2;
492 char *cpu_count_variants[num_cpu_count_variants] = {"physicalcpu_max", "logicalcpu_max"};
493 for (int v = 0; v < num_cpu_count_variants; v++) {
494 unsigned int total_amount = 0;
495 snprintf(sysctlname, sizeof(sysctlname), "hw.%s", cpu_count_variants[v]);
496 ret = sysctlbyname(sysctlname, &total_amount, &(size_t){ sizeof(total_amount) }, NULL, 0);
497 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, sysctlname);
498 unsigned int amount_from_perflevels = 0;
499 for (int p = 0; p < level_count; p++) {
500 unsigned int perflevel_amount = 0;
501 snprintf(sysctlname, sizeof(sysctlname), "hw.perflevel%d.%s", p, cpu_count_variants[v]);
502 ret = sysctlbyname(sysctlname, &perflevel_amount, &(size_t){ sizeof(perflevel_amount) }, NULL, 0);
503 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, sysctlname);
504 amount_from_perflevels += perflevel_amount;
505 }
506 T_EXPECT_EQ(total_amount, amount_from_perflevels, "all %u %s accounted for", total_amount, cpu_count_variants[v]);
507 }
508 }
509