xref: /xnu-10002.1.13/tests/monotonic_core.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 // Copyright (c) 2021-2022 Apple Inc.  All rights reserved.
2 
3 #include <darwintest.h>
4 #include "test_utils.h"
5 #include <fcntl.h>
6 #include <inttypes.h>
7 #ifndef PRIVATE
8 /*
9  * Need new CPU families.
10  */
11 #define PRIVATE
12 #include <mach/machine.h>
13 #undef PRIVATE
14 #else /* !defined(PRIVATE) */
15 #include <mach/machine.h>
16 #endif /* defined(PRIVATE) */
17 #include <ktrace.h>
18 #include <mach/mach.h>
19 #include <stdint.h>
20 #include <System/sys/guarded.h>
21 #include <System/sys/monotonic.h>
22 #include <sys/ioctl.h>
23 #include <sys/kdebug.h>
24 #include <sys/resource.h>
25 #include <sys/resource_private.h>
26 #include <sys/sysctl.h>
27 #include <unistd.h>
28 
29 T_GLOBAL_META(
30 	T_META_NAMESPACE("xnu.monotonic"),
31 	T_META_CHECK_LEAKS(false)
32 	);
33 
34 static void
skip_if_unsupported(void)35 skip_if_unsupported(void)
36 {
37 	int r;
38 	int supported = 0;
39 	size_t supported_size = sizeof(supported);
40 
41 	r = sysctlbyname("kern.monotonic.supported", &supported, &supported_size,
42 	    NULL, 0);
43 	if (r < 0) {
44 		T_WITH_ERRNO;
45 		T_SKIP("could not find \"kern.monotonic.supported\" sysctl");
46 	}
47 
48 	if (!supported) {
49 		T_SKIP("monotonic is not supported on this platform");
50 	}
51 }
52 
53 static void
check_fixed_counts(struct thsc_cpi counts[2])54 check_fixed_counts(struct thsc_cpi counts[2])
55 {
56 	T_QUIET;
57 	T_EXPECT_GT(counts[0].tcpi_instructions, UINT64_C(0), "non-zero instructions");
58 	T_QUIET;
59 	T_EXPECT_GT(counts[0].tcpi_cycles, UINT64_C(0), "non-zero cycles");
60 
61 	T_EXPECT_GT(counts[1].tcpi_instructions, counts[0].tcpi_instructions,
62 	    "monotonically-increasing instructions");
63 	T_EXPECT_GT(counts[1].tcpi_cycles, counts[0].tcpi_cycles,
64 	    "monotonically-increasing cycles");
65 }
66 
67 T_DECL(core_fixed_task, "check that task counting is working",
68     XNU_T_META_SOC_SPECIFIC, T_META_ASROOT(true))
69 {
70 	task_t task = mach_task_self();
71 	kern_return_t kr;
72 	mach_msg_type_number_t size = TASK_INSPECT_BASIC_COUNTS_COUNT;
73 	struct thsc_cpi counts[2];
74 
75 	skip_if_unsupported();
76 
77 	kr = task_inspect(task, TASK_INSPECT_BASIC_COUNTS,
78 	    (task_inspect_info_t)&counts[0], &size);
79 	T_ASSERT_MACH_SUCCESS(kr,
80 	    "task_inspect(... TASK_INSPECT_BASIC_COUNTS ...)");
81 
82 	size = TASK_INSPECT_BASIC_COUNTS_COUNT;
83 	kr = task_inspect(task, TASK_INSPECT_BASIC_COUNTS,
84 	    (task_inspect_info_t)&counts[1], &size);
85 	T_ASSERT_MACH_SUCCESS(kr,
86 	    "task_inspect(... TASK_INSPECT_BASIC_COUNTS ...)");
87 
88 	check_fixed_counts(counts);
89 }
90 
91 T_DECL(core_fixed_kdebug, "check that the kdebug macros for monotonic work",
92     T_META_ASROOT(true))
93 {
94 	__block bool saw_events = false;
95 	ktrace_session_t s;
96 	int r;
97 	int set = 1;
98 
99 	T_SETUPBEGIN;
100 	skip_if_unsupported();
101 
102 	s = ktrace_session_create();
103 	T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
104 
105 	ktrace_events_single_paired(s,
106 	    KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_TMPCPU, 0x3fff),
107 	    ^(struct trace_point *start, struct trace_point *end)
108 	{
109 		struct thsc_cpi counts[2];
110 
111 		saw_events = true;
112 
113 		counts[0].tcpi_instructions = start->arg1;
114 		counts[0].tcpi_cycles = start->arg2;
115 		counts[1].tcpi_instructions = end->arg1;
116 		counts[1].tcpi_cycles = end->arg2;
117 
118 		check_fixed_counts(counts);
119 	});
120 
121 	ktrace_set_completion_handler(s, ^{
122 		T_ASSERT_TRUE(saw_events, "should see monotonic kdebug events");
123 		T_END;
124 	});
125 	T_SETUPEND;
126 
127 	T_ASSERT_POSIX_ZERO(ktrace_start(s,
128 	    dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)), NULL);
129 
130 	r = sysctlbyname("kern.monotonic.kdebug_test", NULL, NULL, &set,
131 	    sizeof(set));
132 	T_ASSERT_POSIX_SUCCESS(r,
133 	    "sysctlbyname(\"kern.monotonic.kdebug_test\", ...)");
134 
135 	ktrace_end(s, 0);
136 	dispatch_main();
137 }
138 
139 static void *
spin_thread_self_counts(__unused void * arg)140 spin_thread_self_counts(__unused void *arg)
141 {
142 	struct thsc_cpi counts = { 0 };
143 	while (true) {
144 		(void)thread_selfcounts(THSC_CPI, &counts, sizeof(counts));
145 	}
146 }
147 
148 static void *
spin_task_inspect(__unused void * arg)149 spin_task_inspect(__unused void *arg)
150 {
151 	task_t task = mach_task_self();
152 	uint64_t counts[2] = { 0 };
153 	unsigned int size = 0;
154 	while (true) {
155 		size = (unsigned int)sizeof(counts);
156 		(void)task_inspect(task, TASK_INSPECT_BASIC_COUNTS,
157 		    (task_inspect_info_t)&counts[0], &size);
158 		/*
159 		 * Not realistic for a process to see count values with the high bit
160 		 * set, but kernel pointers will be that high.
161 		 */
162 		T_QUIET; T_ASSERT_LT(counts[0], 1ULL << 63,
163 		        "check for valid count entry 1");
164 		T_QUIET; T_ASSERT_LT(counts[1], 1ULL << 63,
165 		        "check for valid count entry 2");
166 	}
167 }
168 
169 T_DECL(core_fixed_stack_leak_race,
170     "ensure no stack data is leaked by TASK_INSPECT_BASIC_COUNTS")
171 {
172 	T_SETUPBEGIN;
173 
174 	int ncpus = 0;
175 	T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.logicalcpu_max", &ncpus,
176 	    &(size_t){ sizeof(ncpus) }, NULL, 0), "get number of CPUs");
177 	T_QUIET; T_ASSERT_GT(ncpus, 0, "got non-zero number of CPUs");
178 	pthread_t *threads = calloc((unsigned long)ncpus, sizeof(*threads));
179 
180 	T_QUIET; T_ASSERT_NOTNULL(threads, "allocated space for threads");
181 
182 	T_LOG("creating %d threads to attempt to race around task counts", ncpus);
183 	/*
184 	 * Have half the threads hammering thread_self_counts and the other half
185 	 * trying to get an error to occur inside TASK_INSPECT_BASIC_COUNTS and see
186 	 * uninitialized kernel memory.
187 	 */
188 	for (int i = 0; i < ncpus; i++) {
189 		T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&threads[i], NULL,
190 		    i & 1 ? spin_task_inspect : spin_thread_self_counts, NULL),
191 		    NULL);
192 	}
193 
194 	T_SETUPEND;
195 
196 	sleep(10);
197 	T_PASS("ending test after 10 seconds");
198 }
199 
200 static void
perf_sysctl_deltas(const char * sysctl_name,const char * stat_name)201 perf_sysctl_deltas(const char *sysctl_name, const char *stat_name)
202 {
203 	uint64_t deltas[2];
204 	size_t deltas_size;
205 	int r;
206 
207 	T_SETUPBEGIN;
208 	skip_if_unsupported();
209 
210 	dt_stat_t instrs = dt_stat_create("instructions", "%s_instrs",
211 	    stat_name);
212 	dt_stat_t cycles = dt_stat_create("cycles", "%s_cycles", stat_name);
213 	T_SETUPEND;
214 
215 	while (!dt_stat_stable(instrs) || !dt_stat_stable(cycles)) {
216 		deltas_size = sizeof(deltas);
217 		r = sysctlbyname(sysctl_name, deltas, &deltas_size, NULL, 0);
218 		T_QUIET;
219 		T_ASSERT_POSIX_SUCCESS(r, "sysctlbyname(\"%s\", ...)", sysctl_name);
220 		dt_stat_add(instrs, (double)deltas[0]);
221 		dt_stat_add(cycles, (double)deltas[1]);
222 	}
223 
224 	dt_stat_finalize(instrs);
225 	dt_stat_finalize(cycles);
226 }
227 
228 T_DECL(perf_core_fixed_cpu, "test the performance of fixed CPU counter access",
229     T_META_ASROOT(true), XNU_T_META_SOC_SPECIFIC, T_META_TAG_PERF)
230 {
231 	perf_sysctl_deltas("kern.monotonic.fixed_cpu_perf", "fixed_cpu_counters");
232 }
233 
234 T_DECL(perf_core_fixed_thread, "test the performance of fixed thread counter access",
235     T_META_ASROOT(true), XNU_T_META_SOC_SPECIFIC, T_META_TAG_PERF)
236 {
237 	perf_sysctl_deltas("kern.monotonic.fixed_thread_perf",
238 	    "fixed_thread_counters");
239 }
240 
241 T_DECL(perf_core_fixed_task, "test the performance of fixed task counter access",
242     T_META_ASROOT(true), XNU_T_META_SOC_SPECIFIC, T_META_TAG_PERF)
243 {
244 	perf_sysctl_deltas("kern.monotonic.fixed_task_perf", "fixed_task_counters");
245 }
246