1 // Copyright (c) 2021-2023 Apple Inc. All rights reserved.
2
3 #include <darwintest.h>
4 #include <stdlib.h>
5 #include <sys/resource_private.h>
6 #include <sys/sysctl.h>
7
8 #include "test_utils.h"
9 #include "recount_test_utils.h"
10
11 T_GLOBAL_META(
12 T_META_RADAR_COMPONENT_NAME("xnu"),
13 T_META_RADAR_COMPONENT_VERSION("cpu counters"),
14 T_META_OWNER("mwidmann"),
15 T_META_CHECK_LEAKS(false));
16
17 static char *amp_fail_reason = "P-binding on AMP scheduler";
18
19 static void
_check_cpi(struct thsc_cpi * before,struct thsc_cpi * after,const char * name)20 _check_cpi(struct thsc_cpi *before, struct thsc_cpi *after, const char *name)
21 {
22 T_QUIET; T_MAYFAIL_IF_ENABLED(amp_fail_reason);
23 T_EXPECT_GT(before->tcpi_instructions, UINT64_C(0),
24 "%s: instructions non-zero", name);
25 T_QUIET; T_MAYFAIL_IF_ENABLED(amp_fail_reason);
26 T_EXPECT_GT(before->tcpi_cycles, UINT64_C(0), "%s: cycles non-zero",
27 name);
28
29 T_MAYFAIL_IF_ENABLED(amp_fail_reason);
30 T_EXPECT_GT(after->tcpi_instructions, before->tcpi_instructions,
31 "%s: instructions monotonically-increasing", name);
32 T_MAYFAIL_IF_ENABLED(amp_fail_reason);
33 T_EXPECT_GT(after->tcpi_cycles, before->tcpi_cycles,
34 "%s: cycles monotonically-increasing", name);
35 }
36
37 static void
_check_no_cpi(struct thsc_cpi * before,struct thsc_cpi * after,const char * name)38 _check_no_cpi(struct thsc_cpi *before, struct thsc_cpi *after, const char *name)
39 {
40 T_MAYFAIL_IF_ENABLED(amp_fail_reason);
41 T_EXPECT_EQ(after->tcpi_instructions, before->tcpi_instructions,
42 "%s: instructions should not increase", name);
43 T_MAYFAIL_IF_ENABLED(amp_fail_reason);
44 T_EXPECT_EQ(after->tcpi_cycles, before->tcpi_cycles,
45 "%s: cycles should not increase", name);
46 }
47
48 static struct thsc_cpi
_remove_time_from_cpi(struct thsc_time_cpi * time_cpi)49 _remove_time_from_cpi(struct thsc_time_cpi *time_cpi)
50 {
51 return (struct thsc_cpi){
52 .tcpi_instructions = time_cpi->ttci_instructions,
53 .tcpi_cycles = time_cpi->ttci_cycles,
54 };
55 }
56
57 static void
_check_time_cpi(struct thsc_time_cpi * before,struct thsc_time_cpi * after,const char * name)58 _check_time_cpi(struct thsc_time_cpi *before, struct thsc_time_cpi *after,
59 const char *name)
60 {
61 struct thsc_cpi before_cpi = _remove_time_from_cpi(before);
62 struct thsc_cpi after_cpi = _remove_time_from_cpi(after);
63 _check_cpi(&before_cpi, &after_cpi, name);
64
65 T_MAYFAIL_IF_ENABLED(amp_fail_reason);
66 T_EXPECT_GT(after->ttci_user_time_mach, before->ttci_user_time_mach,
67 "%s: user time monotonically-increasing", name);
68
69 if (has_user_system_times()) {
70 T_MAYFAIL_IF_ENABLED(amp_fail_reason);
71 T_EXPECT_GT(after->ttci_system_time_mach, before->ttci_system_time_mach,
72 "%s: system time monotonically-increasing", name);
73 }
74 }
75
76 static void
_check_no_time_cpi(struct thsc_time_cpi * before,struct thsc_time_cpi * after,const char * name)77 _check_no_time_cpi(struct thsc_time_cpi *before, struct thsc_time_cpi *after,
78 const char *name)
79 {
80 struct thsc_cpi before_cpi = _remove_time_from_cpi(before);
81 struct thsc_cpi after_cpi = _remove_time_from_cpi(after);
82 _check_no_cpi(&before_cpi, &after_cpi, name);
83
84 T_MAYFAIL_IF_ENABLED(amp_fail_reason);
85 T_EXPECT_EQ(after->ttci_user_time_mach, before->ttci_user_time_mach,
86 "%s: user time should not change", name);
87
88 if (has_user_system_times()) {
89 T_MAYFAIL_IF_ENABLED(amp_fail_reason);
90 T_EXPECT_EQ(after->ttci_system_time_mach, before->ttci_system_time_mach,
91 "%s: system time should not change", name);
92 }
93 }
94
95 static struct thsc_time_cpi
_remove_energy_from_cpi(struct thsc_time_energy_cpi * energy_cpi)96 _remove_energy_from_cpi(struct thsc_time_energy_cpi *energy_cpi)
97 {
98 return (struct thsc_time_cpi){
99 .ttci_instructions = energy_cpi->ttec_instructions,
100 .ttci_cycles = energy_cpi->ttec_cycles,
101 .ttci_system_time_mach = energy_cpi->ttec_system_time_mach,
102 .ttci_user_time_mach = energy_cpi->ttec_user_time_mach,
103 };
104 }
105
106 static void
_check_usage(struct thsc_time_energy_cpi * before,struct thsc_time_energy_cpi * after,const char * name)107 _check_usage(struct thsc_time_energy_cpi *before,
108 struct thsc_time_energy_cpi *after, const char *name)
109 {
110 struct thsc_time_cpi before_time = _remove_energy_from_cpi(before);
111 struct thsc_time_cpi after_time = _remove_energy_from_cpi(after);
112 _check_time_cpi(&before_time, &after_time, name);
113
114 if (has_energy()) {
115 T_MAYFAIL_IF_ENABLED(amp_fail_reason);
116 T_EXPECT_GT(after->ttec_energy_nj, UINT64_C(0),
117 "%s: energy monotonically-increasing", name);
118 }
119 }
120
121 static void
_check_no_usage(struct thsc_time_energy_cpi * before,struct thsc_time_energy_cpi * after,const char * name)122 _check_no_usage(struct thsc_time_energy_cpi *before,
123 struct thsc_time_energy_cpi *after, const char *name)
124 {
125 struct thsc_time_cpi before_time = _remove_energy_from_cpi(before);
126 struct thsc_time_cpi after_time = _remove_energy_from_cpi(after);
127 _check_no_time_cpi(&before_time, &after_time, name);
128 }
129
130 T_DECL(thread_selfcounts_cpi_sanity, "check the current thread's CPI",
131 REQUIRE_RECOUNT_PMCS, T_META_TAG_VM_NOT_ELIGIBLE)
132 {
133 int err;
134 struct thsc_cpi counts[2] = { 0 };
135
136 err = thread_selfcounts(THSC_CPI, &counts[0], sizeof(counts[0]));
137 T_ASSERT_POSIX_ZERO(err, "thread_selfcounts(THSC_CPI, ...)");
138 err = thread_selfcounts(THSC_CPI, &counts[1], sizeof(counts[1]));
139 T_ASSERT_POSIX_ZERO(err, "thread_selfcounts(THSC_CPI, ...)");
140
141 _check_cpi(&counts[0], &counts[1], "anywhere");
142 }
143
144 T_DECL(thread_selfcounts_perf_level_sanity,
145 "check per-perf level time, energy, and CPI",
146 REQUIRE_RECOUNT_PMCS,
147 // REQUIRE_MULTIPLE_PERF_LEVELS, disabled due to rdar://111297938
148 SET_THREAD_BIND_BOOTARG,
149 T_META_ASROOT(true), T_META_TAG_VM_NOT_ELIGIBLE)
150 {
151 unsigned int level_count = perf_level_count();
152
153 // Until rdar://111297938, manually skip the test if there aren't multiple perf levels.
154 if (level_count < 2) {
155 T_SKIP("device is not eligible for checking perf levels because it is SMP");
156 }
157 struct thsc_time_energy_cpi *before = calloc(level_count, sizeof(*before));
158 struct thsc_time_energy_cpi *after = calloc(level_count, sizeof(*after));
159
160 run_on_all_perf_levels();
161
162 int err = thread_selfcounts(THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL, before,
163 level_count * sizeof(*before));
164 T_ASSERT_POSIX_ZERO(err,
165 "thread_selfcounts(THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL, ...)");
166
167 run_on_all_perf_levels();
168
169 err = thread_selfcounts(THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL, after,
170 level_count * sizeof(*after));
171 T_ASSERT_POSIX_ZERO(err,
172 "thread_selfcounts(THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL, ...)");
173
174 for (unsigned int i = 0; i < level_count; i++) {
175 _check_usage(&before[i], &after[i], perf_level_name(i));
176 }
177
178 free(before);
179 free(after);
180 }
181
182 static void
_expect_counts_on_perf_level(unsigned int perf_level_index,struct thsc_time_energy_cpi * before,struct thsc_time_energy_cpi * after)183 _expect_counts_on_perf_level(unsigned int perf_level_index,
184 struct thsc_time_energy_cpi *before,
185 struct thsc_time_energy_cpi *after)
186 {
187 unsigned int level_count = perf_level_count();
188 int err = thread_selfcounts(THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL, before,
189 level_count * sizeof(*before));
190 T_ASSERT_POSIX_ZERO(err,
191 "thread_selfcounts(THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL, ...)");
192 (void)getppid();
193 // Allow time for CLPC to read energy counters
194 usleep(10000);
195 err = thread_selfcounts(THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL, after,
196 level_count * sizeof(*after));
197 T_ASSERT_POSIX_ZERO(err,
198 "thread_selfcounts(THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL, ...)");
199
200 char *name = perf_level_name(perf_level_index);
201 _check_usage(&before[perf_level_index], &after[perf_level_index], name);
202 }
203
204 static void
_expect_no_counts_on_perf_level(unsigned int perf_level_index,struct thsc_time_energy_cpi * before,struct thsc_time_energy_cpi * after)205 _expect_no_counts_on_perf_level(unsigned int perf_level_index,
206 struct thsc_time_energy_cpi *before,
207 struct thsc_time_energy_cpi *after)
208 {
209 unsigned int level_count = perf_level_count();
210 int err = thread_selfcounts(THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL, before,
211 level_count * sizeof(*before));
212 T_ASSERT_POSIX_ZERO(err,
213 "thread_selfcounts(THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL, ...)");
214 (void)getppid();
215 // Allow time for CLPC to read energy counters
216 usleep(10000);
217 err = thread_selfcounts(THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL, after,
218 level_count * sizeof(*after));
219 T_ASSERT_POSIX_ZERO(err,
220 "thread_selfcounts(THSC_TIME_ENERGY_CPI_PER_PERF_LEVEL, ...)");
221
222 char *name = perf_level_name(perf_level_index);
223 _check_no_usage(&before[perf_level_index], &after[perf_level_index], name);
224 }
225
226 T_DECL(thread_selfcounts_perf_level_correct,
227 "check that runtimes on each perf level match binding request",
228 REQUIRE_RECOUNT_PMCS,
229 // REQUIRE_MULTIPLE_PERF_LEVELS, disabled due to rdar://111297938
230 SET_THREAD_BIND_BOOTARG,
231 T_META_ASROOT(true), T_META_TAG_VM_NOT_ELIGIBLE)
232 {
233 unsigned int level_count = perf_level_count();
234
235 // Until rdar://111297938, manually skip the test if there aren't multiple perf levels.
236 if (level_count < 2) {
237 T_SKIP("device is not eligible for checking perf levels because it is SMP");
238 }
239 T_LOG("Currently running the \"%s\" scheduler policy", sched_policy_name());
240 bool is_edge_scheduler = strcmp(sched_policy_name(), "edge") == 0;
241 for (unsigned int i = 0; i < level_count; i++) {
242 T_LOG("Level %d: %s", i, perf_level_name(i));
243 }
244
245 struct thsc_time_energy_cpi *before = calloc(level_count, sizeof(*before));
246 struct thsc_time_energy_cpi *after = calloc(level_count, sizeof(*after));
247
248 T_LOG("Binding to Efficiency cluster, should only see counts from E-cores");
249 T_SETUPBEGIN;
250 bind_to_cluster('E');
251 T_SETUPEND;
252 _expect_counts_on_perf_level(1, before, after);
253 _expect_no_counts_on_perf_level(0, before, after);
254
255 T_LOG("Binding to Performance cluster, should only see counts from P-cores");
256 T_SETUPBEGIN;
257 bind_to_cluster('P');
258 T_SETUPEND;
259 if (!is_edge_scheduler) {
260 T_QUIET; T_EXPECT_EQ_STR(sched_policy_name(), "amp", "Unexpected multicluster scheduling policy");
261 T_LOG("The AMP scheduler doesn't guarantee that a P-bound thread will "
262 "only run on P-cores, so the following expects may fail.");
263 set_expects_may_fail(true);
264 }
265 _expect_counts_on_perf_level(0, before, after);
266 _expect_no_counts_on_perf_level(1, before, after);
267
268 free(before);
269 free(after);
270 }
271
272 T_DECL(thread_selfcounts_cpi_perf,
273 "test the overhead of thread_selfcounts(2) THSC_CPI", T_META_TAG_PERF,
274 REQUIRE_RECOUNT_PMCS, T_META_TAG_VM_NOT_ELIGIBLE)
275 {
276 struct thsc_cpi counts[2];
277
278 T_SETUPBEGIN;
279 dt_stat_t instrs = dt_stat_create("thread_selfcounts_cpi_instrs",
280 "instructions");
281 dt_stat_t cycles = dt_stat_create("thread_selfcounts_cpi_cycles", "cycles");
282 T_SETUPEND;
283
284 while (!dt_stat_stable(instrs) || !dt_stat_stable(cycles)) {
285 int r1 = thread_selfcounts(THSC_CPI, &counts[0], sizeof(counts[0]));
286 int r2 = thread_selfcounts(THSC_CPI, &counts[1], sizeof(counts[1]));
287 T_QUIET; T_ASSERT_POSIX_ZERO(r1, "thread_selfcounts(THSC_CPI, ...)");
288 T_QUIET; T_ASSERT_POSIX_ZERO(r2, "thread_selfcounts(THSC_CPI, ...)");
289
290 dt_stat_add(instrs, counts[1].tcpi_instructions -
291 counts[0].tcpi_instructions);
292 dt_stat_add(cycles, counts[1].tcpi_cycles - counts[0].tcpi_cycles);
293 }
294
295 dt_stat_finalize(instrs);
296 dt_stat_finalize(cycles);
297 }
298