xref: /xnu-8020.101.4/tests/monotonic_core.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Must come before including darwintest.h
3  */
4 #ifdef T_NAMESPACE
5 #undef T_NAMESPACE
6 #endif /* defined(T_NAMESPACE) */
7 
8 #include <darwintest.h>
9 #include <fcntl.h>
10 #include <inttypes.h>
11 #ifndef PRIVATE
12 /*
13  * Need new CPU families.
14  */
15 #define PRIVATE
16 #include <mach/machine.h>
17 #undef PRIVATE
18 #else /* !defined(PRIVATE) */
19 #include <mach/machine.h>
20 #endif /* defined(PRIVATE) */
21 #include <ktrace.h>
22 #include <mach/mach.h>
23 #include <stdint.h>
24 #include <System/sys/guarded.h>
25 #include <System/sys/monotonic.h>
26 #include <sys/ioctl.h>
27 #include <sys/kdebug.h>
28 #include <sys/resource.h>
29 #include <sys/resource_private.h>
30 #include <sys/sysctl.h>
31 #include <unistd.h>
32 
33 T_GLOBAL_META(
34 	T_META_NAMESPACE("xnu.monotonic"),
35 	T_META_CHECK_LEAKS(false)
36 	);
37 
38 static void
skip_if_unsupported(void)39 skip_if_unsupported(void)
40 {
41 	int r;
42 	int supported = 0;
43 	size_t supported_size = sizeof(supported);
44 
45 	r = sysctlbyname("kern.monotonic.supported", &supported, &supported_size,
46 	    NULL, 0);
47 	if (r < 0) {
48 		T_WITH_ERRNO;
49 		T_SKIP("could not find \"kern.monotonic.supported\" sysctl");
50 	}
51 
52 	if (!supported) {
53 		T_SKIP("monotonic is not supported on this platform");
54 	}
55 }
56 
57 static void
check_fixed_counts(struct thsc_cpi counts[2])58 check_fixed_counts(struct thsc_cpi counts[2])
59 {
60 	T_QUIET;
61 	T_EXPECT_GT(counts[0].tcpi_instructions, UINT64_C(0), "non-zero instructions");
62 	T_QUIET;
63 	T_EXPECT_GT(counts[0].tcpi_cycles, UINT64_C(0), "non-zero cycles");
64 
65 	T_EXPECT_GT(counts[1].tcpi_instructions, counts[0].tcpi_instructions,
66 	    "monotonically-increasing instructions");
67 	T_EXPECT_GT(counts[1].tcpi_cycles, counts[0].tcpi_cycles,
68 	    "monotonically-increasing cycles");
69 }
70 
71 T_DECL(core_fixed_thread_self, "check the current thread's fixed counters",
72     T_META_ASROOT(true))
73 {
74 	int err;
75 	struct thsc_cpi counts[2] = { 0 };
76 
77 	T_SETUPBEGIN;
78 	skip_if_unsupported();
79 	T_SETUPEND;
80 
81 	err = thread_selfcounts_cpi(&counts[0]);
82 	T_ASSERT_POSIX_ZERO(err, "thread_selfcounts");
83 	err = thread_selfcounts_cpi(&counts[1]);
84 	T_ASSERT_POSIX_ZERO(err, "thread_selfcounts");
85 
86 	check_fixed_counts(counts);
87 }
88 
89 T_DECL(core_fixed_task, "check that task counting is working",
90     T_META_ASROOT(true))
91 {
92 	task_t task = mach_task_self();
93 	kern_return_t kr;
94 	mach_msg_type_number_t size = TASK_INSPECT_BASIC_COUNTS_COUNT;
95 	struct thsc_cpi counts[2];
96 
97 	skip_if_unsupported();
98 
99 	kr = task_inspect(task, TASK_INSPECT_BASIC_COUNTS,
100 	    (task_inspect_info_t)&counts[0], &size);
101 	T_ASSERT_MACH_SUCCESS(kr,
102 	    "task_inspect(... TASK_INSPECT_BASIC_COUNTS ...)");
103 
104 	size = TASK_INSPECT_BASIC_COUNTS_COUNT;
105 	kr = task_inspect(task, TASK_INSPECT_BASIC_COUNTS,
106 	    (task_inspect_info_t)&counts[1], &size);
107 	T_ASSERT_MACH_SUCCESS(kr,
108 	    "task_inspect(... TASK_INSPECT_BASIC_COUNTS ...)");
109 
110 	check_fixed_counts(counts);
111 }
112 
113 T_DECL(core_fixed_kdebug, "check that the kdebug macros for monotonic work",
114     T_META_ASROOT(true))
115 {
116 	__block bool saw_events = false;
117 	ktrace_session_t s;
118 	int r;
119 	int set = 1;
120 
121 	T_SETUPBEGIN;
122 	skip_if_unsupported();
123 
124 	s = ktrace_session_create();
125 	T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
126 
127 	ktrace_events_single_paired(s,
128 	    KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_TMPCPU, 0x3fff),
129 	    ^(struct trace_point *start, struct trace_point *end)
130 	{
131 		struct thsc_cpi counts[2];
132 
133 		saw_events = true;
134 
135 		counts[0].tcpi_instructions = start->arg1;
136 		counts[0].tcpi_cycles = start->arg2;
137 		counts[1].tcpi_instructions = end->arg1;
138 		counts[1].tcpi_cycles = end->arg2;
139 
140 		check_fixed_counts(counts);
141 	});
142 
143 	ktrace_set_completion_handler(s, ^{
144 		T_ASSERT_TRUE(saw_events, "should see monotonic kdebug events");
145 		T_END;
146 	});
147 	T_SETUPEND;
148 
149 	T_ASSERT_POSIX_ZERO(ktrace_start(s,
150 	    dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)), NULL);
151 
152 	r = sysctlbyname("kern.monotonic.kdebug_test", NULL, NULL, &set,
153 	    sizeof(set));
154 	T_ASSERT_POSIX_SUCCESS(r,
155 	    "sysctlbyname(\"kern.monotonic.kdebug_test\", ...)");
156 
157 	ktrace_end(s, 0);
158 	dispatch_main();
159 }
160 
161 static void *
spin_thread_self_counts(__unused void * arg)162 spin_thread_self_counts(__unused void *arg)
163 {
164 	struct thsc_cpi counts = { 0 };
165 	while (true) {
166 		(void)thread_selfcounts_cpi(&counts);
167 	}
168 }
169 
170 static void *
spin_task_inspect(__unused void * arg)171 spin_task_inspect(__unused void *arg)
172 {
173 	task_t task = mach_task_self();
174 	uint64_t counts[2] = { 0 };
175 	unsigned int size = 0;
176 	while (true) {
177 		size = (unsigned int)sizeof(counts);
178 		(void)task_inspect(task, TASK_INSPECT_BASIC_COUNTS,
179 		    (task_inspect_info_t)&counts[0], &size);
180 		/*
181 		 * Not realistic for a process to see count values with the high bit
182 		 * set, but kernel pointers will be that high.
183 		 */
184 		T_QUIET; T_ASSERT_LT(counts[0], 1ULL << 63,
185 		        "check for valid count entry 1");
186 		T_QUIET; T_ASSERT_LT(counts[1], 1ULL << 63,
187 		        "check for valid count entry 2");
188 	}
189 }
190 
191 T_DECL(core_fixed_stack_leak_race,
192     "ensure no stack data is leaked by TASK_INSPECT_BASIC_COUNTS")
193 {
194 	T_SETUPBEGIN;
195 
196 	int ncpus = 0;
197 	T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.logicalcpu_max", &ncpus,
198 	    &(size_t){ sizeof(ncpus) }, NULL, 0), "get number of CPUs");
199 	T_QUIET; T_ASSERT_GT(ncpus, 0, "got non-zero number of CPUs");
200 	pthread_t *threads = calloc((unsigned long)ncpus, sizeof(*threads));
201 
202 	T_QUIET; T_ASSERT_NOTNULL(threads, "allocated space for threads");
203 
204 	T_LOG("creating %d threads to attempt to race around task counts", ncpus);
205 	/*
206 	 * Have half the threads hammering thread_self_counts and the other half
207 	 * trying to get an error to occur inside TASK_INSPECT_BASIC_COUNTS and see
208 	 * uninitialized kernel memory.
209 	 */
210 	for (int i = 0; i < ncpus; i++) {
211 		T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&threads[i], NULL,
212 		    i & 1 ? spin_task_inspect : spin_thread_self_counts, NULL),
213 		    NULL);
214 	}
215 
216 	T_SETUPEND;
217 
218 	sleep(10);
219 	T_PASS("ending test after 10 seconds");
220 }
221 
222 static void
perf_sysctl_deltas(const char * sysctl_name,const char * stat_name)223 perf_sysctl_deltas(const char *sysctl_name, const char *stat_name)
224 {
225 	uint64_t deltas[2];
226 	size_t deltas_size;
227 	int r;
228 
229 	T_SETUPBEGIN;
230 	skip_if_unsupported();
231 
232 	dt_stat_t instrs = dt_stat_create("instructions", "%s_instrs",
233 	    stat_name);
234 	dt_stat_t cycles = dt_stat_create("cycles", "%s_cycles", stat_name);
235 	T_SETUPEND;
236 
237 	while (!dt_stat_stable(instrs) || !dt_stat_stable(cycles)) {
238 		deltas_size = sizeof(deltas);
239 		r = sysctlbyname(sysctl_name, deltas, &deltas_size, NULL, 0);
240 		T_QUIET;
241 		T_ASSERT_POSIX_SUCCESS(r, "sysctlbyname(\"%s\", ...)", sysctl_name);
242 		dt_stat_add(instrs, (double)deltas[0]);
243 		dt_stat_add(cycles, (double)deltas[1]);
244 	}
245 
246 	dt_stat_finalize(instrs);
247 	dt_stat_finalize(cycles);
248 }
249 
250 T_DECL(perf_core_fixed_cpu, "test the performance of fixed CPU counter access",
251     T_META_ASROOT(true), T_META_TAG_PERF)
252 {
253 	perf_sysctl_deltas("kern.monotonic.fixed_cpu_perf", "fixed_cpu_counters");
254 }
255 
256 T_DECL(perf_core_fixed_thread, "test the performance of fixed thread counter access",
257     T_META_ASROOT(true), T_META_TAG_PERF)
258 {
259 	perf_sysctl_deltas("kern.monotonic.fixed_thread_perf",
260 	    "fixed_thread_counters");
261 }
262 
263 T_DECL(perf_core_fixed_task, "test the performance of fixed task counter access",
264     T_META_ASROOT(true), T_META_TAG_PERF)
265 {
266 	perf_sysctl_deltas("kern.monotonic.fixed_task_perf", "fixed_task_counters");
267 }
268 
269 T_DECL(perf_core_fixed_thread_self, "test the performance of thread self counts",
270     T_META_TAG_PERF)
271 {
272 	struct thsc_cpi counts[2];
273 
274 	T_SETUPBEGIN;
275 	dt_stat_t instrs = dt_stat_create("fixed_thread_self_instrs", "instructions");
276 	dt_stat_t cycles = dt_stat_create("fixed_thread_self_cycles", "cycles");
277 
278 	skip_if_unsupported();
279 	T_SETUPEND;
280 
281 	while (!dt_stat_stable(instrs) || !dt_stat_stable(cycles)) {
282 		int r1, r2;
283 
284 		r1 = thread_selfcounts_cpi(&counts[0]);
285 		r2 = thread_selfcounts_cpi(&counts[1]);
286 		T_QUIET; T_ASSERT_POSIX_ZERO(r1, "thread_selfcounts");
287 		T_QUIET; T_ASSERT_POSIX_ZERO(r2, "thread_selfcounts");
288 
289 		dt_stat_add(instrs, counts[1].tcpi_instructions - counts[0].tcpi_instructions);
290 		dt_stat_add(cycles, counts[1].tcpi_cycles - counts[0].tcpi_cycles);
291 	}
292 
293 	dt_stat_finalize(instrs);
294 	dt_stat_finalize(cycles);
295 }
296