1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/assert.h>
30 #include <kern/kpc.h>
31 #include <kern/monotonic.h>
32 #include <kern/thread.h>
33 #include <machine/atomic.h>
34 #include <machine/monotonic.h>
35 #include <mach/mach_traps.h>
36 #include <stdatomic.h>
37 #include <sys/errno.h>
38
39 bool mt_debug = false;
40 _Atomic uint64_t mt_pmis = 0;
41 _Atomic uint64_t mt_retrograde = 0;
42
43 #define MT_KDBG_INSTRS_CYCLES(CODE) \
44 KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_INSTRS_CYCLES, CODE)
45
46 static void mt_fixed_counts_internal(uint64_t *counts, uint64_t *counts_since);
47
48 uint64_t
mt_mtc_update_count(struct mt_cpu * mtc,unsigned int ctr)49 mt_mtc_update_count(struct mt_cpu *mtc, unsigned int ctr)
50 {
51 uint64_t snap = mt_core_snap(ctr);
52 if (snap < mtc->mtc_snaps[ctr]) {
53 if (mt_debug) {
54 kprintf("monotonic: cpu %d: thread %#llx: "
55 "retrograde counter %u value: %llu, last read = %llu\n",
56 cpu_number(), thread_tid(current_thread()), ctr, snap,
57 mtc->mtc_snaps[ctr]);
58 }
59 (void)atomic_fetch_add_explicit(&mt_retrograde, 1,
60 memory_order_relaxed);
61 mtc->mtc_snaps[ctr] = snap;
62 return 0;
63 }
64
65 uint64_t count = snap - mtc->mtc_snaps[ctr];
66 mtc->mtc_snaps[ctr] = snap;
67
68 return count;
69 }
70
71 uint64_t
mt_cpu_update_count(cpu_data_t * cpu,unsigned int ctr)72 mt_cpu_update_count(cpu_data_t *cpu, unsigned int ctr)
73 {
74 return mt_mtc_update_count(&cpu->cpu_monotonic, ctr);
75 }
76
77 static void
mt_fixed_counts_internal(uint64_t * counts,uint64_t * counts_since)78 mt_fixed_counts_internal(uint64_t *counts, uint64_t *counts_since)
79 {
80 assert(ml_get_interrupts_enabled() == FALSE);
81
82 struct mt_cpu *mtc = mt_cur_cpu();
83 assert(mtc != NULL);
84
85 mt_mtc_update_fixed_counts(mtc, counts, counts_since);
86 }
87
88 void
mt_mtc_update_fixed_counts(struct mt_cpu * mtc,uint64_t * counts,uint64_t * counts_since)89 mt_mtc_update_fixed_counts(struct mt_cpu *mtc, uint64_t *counts,
90 uint64_t *counts_since)
91 {
92 if (!mt_core_supported) {
93 return;
94 }
95
96 for (int i = 0; i < (int) kpc_fixed_count(); i++) {
97 uint64_t last_delta;
98 uint64_t count;
99
100 last_delta = mt_mtc_update_count(mtc, i);
101 count = mtc->mtc_counts[i] + last_delta;
102
103 if (counts) {
104 counts[i] = count;
105 }
106 if (counts_since) {
107 assert(counts != NULL);
108 counts_since[i] = count - mtc->mtc_counts_last[i];
109 mtc->mtc_counts_last[i] = count;
110 }
111
112 mtc->mtc_counts[i] = count;
113 }
114 }
115
116 void
mt_update_fixed_counts(void)117 mt_update_fixed_counts(void)
118 {
119 assert(ml_get_interrupts_enabled() == FALSE);
120
121 #if defined(__x86_64__)
122 __builtin_ia32_lfence();
123 #elif defined(__arm64__)
124 __builtin_arm_isb(ISB_SY);
125 #endif /* !defined(__x86_64__) && defined(__arm64__) */
126
127 mt_fixed_counts_internal(NULL, NULL);
128 }
129
130 void
mt_fixed_counts(uint64_t * counts)131 mt_fixed_counts(uint64_t *counts)
132 {
133 #if defined(__x86_64__)
134 __builtin_ia32_lfence();
135 #elif defined(__arm64__)
136 __builtin_arm_isb(ISB_SY);
137 #endif /* !defined(__x86_64__) && defined(__arm64__) */
138
139 int intrs_en = ml_set_interrupts_enabled(FALSE);
140 mt_fixed_counts_internal(counts, NULL);
141 ml_set_interrupts_enabled(intrs_en);
142 }
143
144 uint64_t
mt_cur_cpu_instrs(void)145 mt_cur_cpu_instrs(void)
146 {
147 uint64_t counts[MT_CORE_NFIXED];
148
149 if (!mt_core_supported) {
150 return 0;
151 }
152
153 mt_fixed_counts(counts);
154 return counts[MT_CORE_INSTRS];
155 }
156
157 uint64_t
mt_cur_cpu_cycles(void)158 mt_cur_cpu_cycles(void)
159 {
160 uint64_t counts[MT_CORE_NFIXED];
161
162 if (!mt_core_supported) {
163 return 0;
164 }
165
166 mt_fixed_counts(counts);
167 return counts[MT_CORE_CYCLES];
168 }
169
170 void
mt_cur_cpu_cycles_instrs_speculative(uint64_t * cycles,__unused uint64_t * instrs)171 mt_cur_cpu_cycles_instrs_speculative(uint64_t *cycles, __unused uint64_t *instrs)
172 {
173 uint64_t counts[MT_CORE_NFIXED] = {0};
174 struct mt_cpu *mtc = mt_cur_cpu();
175
176 assert(ml_get_interrupts_enabled() == FALSE);
177 assert(mtc != NULL);
178
179 mt_mtc_update_fixed_counts(mtc, counts, NULL);
180
181 *cycles = counts[MT_CORE_CYCLES];
182 *instrs = counts[MT_CORE_INSTRS];
183 }
184
185 void
mt_perfcontrol(uint64_t * instrs,uint64_t * cycles)186 mt_perfcontrol(uint64_t *instrs, uint64_t *cycles)
187 {
188 if (!mt_core_supported) {
189 *instrs = 0;
190 *cycles = 0;
191 return;
192 }
193
194 struct mt_cpu *mtc = mt_cur_cpu();
195
196 /*
197 * The performance controller queries the hardware directly, so provide the
198 * last snapshot we took for the core. This is the value from when we
199 * updated the thread counts.
200 */
201
202 *instrs = mtc->mtc_snaps[MT_CORE_INSTRS];
203 *cycles = mtc->mtc_snaps[MT_CORE_CYCLES];
204 }
205
206 bool
mt_acquire_counters(void)207 mt_acquire_counters(void)
208 {
209 if (kpc_get_force_all_ctrs()) {
210 extern bool kpc_task_get_forced_all_ctrs(task_t);
211 if (kpc_task_get_forced_all_ctrs(current_task())) {
212 return true;
213 }
214 return false;
215 }
216 kpc_force_all_ctrs(current_task(), 1);
217 return true;
218 }
219
220 bool
mt_owns_counters(void)221 mt_owns_counters(void)
222 {
223 return kpc_get_force_all_ctrs();
224 }
225
226 void
mt_release_counters(void)227 mt_release_counters(void)
228 {
229 if (kpc_get_force_all_ctrs()) {
230 kpc_force_all_ctrs(current_task(), 0);
231 }
232 }
233
234 /*
235 * Maintain reset values for the fixed instruction and cycle counters so
236 * clients can be notified after a given number of those events occur. This is
237 * only used by microstackshot.
238 */
239
240 bool mt_microstackshots = false;
241 unsigned int mt_microstackshot_ctr = 0;
242 uint64_t mt_microstackshot_period = 0;
243 mt_pmi_fn mt_microstackshot_pmi_handler = NULL;
244 void *mt_microstackshot_ctx = NULL;
245 uint64_t mt_core_reset_values[MT_CORE_NFIXED] = { 0 };
246
247 #define MT_MIN_FIXED_PERIOD (10 * 1000 * 1000)
248
249 int
mt_microstackshot_start(unsigned int ctr,uint64_t period,mt_pmi_fn handler,void * ctx)250 mt_microstackshot_start(unsigned int ctr, uint64_t period, mt_pmi_fn handler,
251 void *ctx)
252 {
253 assert(ctr < MT_CORE_NFIXED);
254
255 if (period < MT_MIN_FIXED_PERIOD) {
256 return EINVAL;
257 }
258 if (mt_microstackshots) {
259 return EBUSY;
260 }
261
262 mt_microstackshot_ctr = ctr;
263 mt_microstackshot_pmi_handler = handler;
264 mt_microstackshot_ctx = ctx;
265
266 int error = mt_microstackshot_start_arch(period);
267 if (error) {
268 mt_microstackshot_ctr = 0;
269 mt_microstackshot_pmi_handler = NULL;
270 mt_microstackshot_ctx = NULL;
271 return error;
272 }
273
274 mt_microstackshot_period = period;
275 mt_microstackshots = true;
276
277 return 0;
278 }
279
280 int
mt_microstackshot_stop(void)281 mt_microstackshot_stop(void)
282 {
283 mt_microstackshots = false;
284 mt_microstackshot_period = 0;
285 memset(mt_core_reset_values, 0, sizeof(mt_core_reset_values));
286
287 return 0;
288 }
289