1 /*
2 * Copyright (c) 2017-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/cpu_data_internal.h>
30 #include <arm/machine_routines.h>
31 #include <arm64/monotonic.h>
32 #include <kern/assert.h>
33 #include <kern/debug.h> /* panic */
34 #include <kern/kpc.h>
35 #include <kern/monotonic.h>
36 #include <machine/atomic.h>
37 #include <machine/limits.h> /* CHAR_BIT */
38 #include <os/overflow.h>
39 #include <pexpert/arm64/board_config.h>
40 #include <pexpert/device_tree.h> /* SecureDTFindEntry */
41 #include <pexpert/pexpert.h>
42 #include <stdatomic.h>
43 #include <stdint.h>
44 #include <string.h>
45 #include <sys/errno.h>
46 #include <sys/monotonic.h>
47
48 /*
49 * Ensure that control registers read back what was written under MACH_ASSERT
50 * kernels.
51 *
52 * A static inline function cannot be used due to passing the register through
53 * the builtin -- it requires a constant string as its first argument, since
54 * MSRs registers are encoded as an immediate in the instruction.
55 */
56 #if MACH_ASSERT
57 #define CTRL_REG_SET(reg, val) do { \
58 __builtin_arm_wsr64((reg), (val)); \
59 uint64_t __check_reg = __builtin_arm_rsr64((reg)); \
60 if (__check_reg != (val)) { \
61 panic("value written to %s was not read back (wrote %llx, read %llx)", \
62 #reg, (val), __check_reg); \
63 } \
64 } while (0)
65 #else /* MACH_ASSERT */
66 #define CTRL_REG_SET(reg, val) __builtin_arm_wsr64((reg), (val))
67 #endif /* MACH_ASSERT */
68
69 #pragma mark core counters
70
71 const bool mt_core_supported = true;
72
73 static const ml_topology_info_t *topology_info;
74
75 /*
76 * PMC[0-1] are the 48/64-bit fixed counters -- PMC0 is cycles and PMC1 is
77 * instructions (see arm64/monotonic.h).
78 *
79 * PMC2+ are currently handled by kpc.
80 */
81 #define PMC_0_7(X, A) X(0, A); X(1, A); X(2, A); X(3, A); X(4, A); X(5, A); \
82 X(6, A); X(7, A)
83
84 #if CORE_NCTRS > 8
85 #define PMC_8_9(X, A) X(8, A); X(9, A)
86 #else // CORE_NCTRS > 8
87 #define PMC_8_9(X, A)
88 #endif // CORE_NCTRS > 8
89
90 #define PMC_ALL(X, A) PMC_0_7(X, A); PMC_8_9(X, A)
91
92 #if CPMU_64BIT_PMCS
93 #define PMC_WIDTH (63)
94 #else // UPMU_64BIT_PMCS
95 #define PMC_WIDTH (47)
96 #endif // !UPMU_64BIT_PMCS
97
98 #define CTR_MAX ((UINT64_C(1) << PMC_WIDTH) - 1)
99
100 #define CYCLES 0
101 #define INSTRS 1
102
103 /*
104 * PMC0's offset into a core's PIO range.
105 *
106 * This allows cores to remotely query another core's counters.
107 */
108
109 #define PIO_PMC0_OFFSET (0x200)
110
111 /*
112 * The offset of the counter in the configuration registers. Post-Hurricane
113 * devices have additional counters that need a larger shift than the original
114 * counters.
115 *
116 * XXX For now, just support the lower-numbered counters.
117 */
118 #define CTR_POS(CTR) (CTR)
119
120 /*
121 * PMCR0 is the main control register for the performance monitor. It
122 * controls whether the counters are enabled, how they deliver interrupts, and
123 * other features.
124 */
125
126 #define PMCR0_CTR_EN(CTR) (UINT64_C(1) << CTR_POS(CTR))
127 #define PMCR0_FIXED_EN (PMCR0_CTR_EN(CYCLES) | PMCR0_CTR_EN(INSTRS))
128 /* how interrupts are delivered on a PMI */
129 enum {
130 PMCR0_INTGEN_OFF = 0,
131 PMCR0_INTGEN_PMI = 1,
132 PMCR0_INTGEN_AIC = 2,
133 PMCR0_INTGEN_HALT = 3,
134 PMCR0_INTGEN_FIQ = 4,
135 };
136 #define PMCR0_INTGEN_SET(X) ((uint64_t)(X) << 8)
137
138 #if CPMU_AIC_PMI
139 #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_AIC)
140 #else /* CPMU_AIC_PMI */
141 #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_FIQ)
142 #endif /* !CPMU_AIC_PMI */
143
144 #define PMCR0_PMI_SHIFT (12)
145 #define PMCR0_CTR_GE8_PMI_SHIFT (44)
146 #define PMCR0_PMI_EN(CTR) (UINT64_C(1) << (PMCR0_PMI_SHIFT + CTR_POS(CTR)))
147 /* fixed counters are always counting */
148 #define PMCR0_PMI_INIT (PMCR0_PMI_EN(CYCLES) | PMCR0_PMI_EN(INSTRS))
149 /* disable counting on a PMI */
150 #define PMCR0_DISCNT_EN (UINT64_C(1) << 20)
151 /* block PMIs until ERET retires */
152 #define PMCR0_WFRFE_EN (UINT64_C(1) << 22)
153 /* count global (not just core-local) L2C events */
154 #define PMCR0_L2CGLOBAL_EN (UINT64_C(1) << 23)
155 /* user mode access to configuration registers */
156 #define PMCR0_USEREN_EN (UINT64_C(1) << 30)
157 #define PMCR0_CTR_GE8_EN_SHIFT (32)
158
159 #if HAS_CPMU_PC_CAPTURE
160 #define PMCR0_PCC_INIT (UINT64_C(0x7) << 24)
161 #else /* HAS_CPMU_PC_CAPTURE */
162 #define PMCR0_PCC_INIT (0)
163 #endif /* !HAS_CPMU_PC_CAPTURE */
164
165 #define PMCR0_INIT (PMCR0_INTGEN_INIT | PMCR0_PMI_INIT | PMCR0_PCC_INIT)
166
167 /*
168 * PMCR1 controls which execution modes count events.
169 */
170 #define PMCR1_EL0A32_EN(CTR) (UINT64_C(1) << (0 + CTR_POS(CTR)))
171 #define PMCR1_EL0A64_EN(CTR) (UINT64_C(1) << (8 + CTR_POS(CTR)))
172 #define PMCR1_EL1A64_EN(CTR) (UINT64_C(1) << (16 + CTR_POS(CTR)))
173 /* PMCR1_EL3A64 is not supported on systems with no monitor */
174 #if defined(APPLEHURRICANE)
175 #define PMCR1_EL3A64_EN(CTR) UINT64_C(0)
176 #else
177 #define PMCR1_EL3A64_EN(CTR) (UINT64_C(1) << (24 + CTR_POS(CTR)))
178 #endif
179 #define PMCR1_ALL_EN(CTR) (PMCR1_EL0A32_EN(CTR) | PMCR1_EL0A64_EN(CTR) | \
180 PMCR1_EL1A64_EN(CTR) | PMCR1_EL3A64_EN(CTR))
181
182 /* fixed counters always count in all modes */
183 #define PMCR1_INIT (PMCR1_ALL_EN(CYCLES) | PMCR1_ALL_EN(INSTRS))
184
185 static inline void
core_init_execution_modes(void)186 core_init_execution_modes(void)
187 {
188 uint64_t pmcr1;
189
190 pmcr1 = __builtin_arm_rsr64("PMCR1_EL1");
191 pmcr1 |= PMCR1_INIT;
192 __builtin_arm_wsr64("PMCR1_EL1", pmcr1);
193 }
194
195 #define PMSR_OVF(CTR) (1ULL << (CTR))
196
197 static int
core_init(__unused mt_device_t dev)198 core_init(__unused mt_device_t dev)
199 {
200 /* the dev node interface to the core counters is still unsupported */
201 return ENOTSUP;
202 }
203
204 struct mt_cpu *
mt_cur_cpu(void)205 mt_cur_cpu(void)
206 {
207 return &getCpuDatap()->cpu_monotonic;
208 }
209
210 uint64_t
mt_core_snap(unsigned int ctr)211 mt_core_snap(unsigned int ctr)
212 {
213 switch (ctr) {
214 #define PMC_RD(CTR, UNUSED) case (CTR): return __builtin_arm_rsr64(__MSR_STR(PMC ## CTR))
215 PMC_ALL(PMC_RD, 0);
216 #undef PMC_RD
217 default:
218 panic("monotonic: invalid core counter read: %u", ctr);
219 __builtin_unreachable();
220 }
221 }
222
223 void
mt_core_set_snap(unsigned int ctr,uint64_t count)224 mt_core_set_snap(unsigned int ctr, uint64_t count)
225 {
226 switch (ctr) {
227 case 0:
228 __builtin_arm_wsr64("PMC0", count);
229 break;
230 case 1:
231 __builtin_arm_wsr64("PMC1", count);
232 break;
233 default:
234 panic("monotonic: invalid core counter %u write %llu", ctr, count);
235 __builtin_unreachable();
236 }
237 }
238
239 static void
core_set_enabled(void)240 core_set_enabled(void)
241 {
242 uint32_t kpc_mask = kpc_get_running() &
243 (KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_POWER_MASK);
244 uint64_t pmcr0 = __builtin_arm_rsr64("PMCR0_EL1");
245 pmcr0 |= PMCR0_INIT | PMCR0_FIXED_EN;
246
247 if (kpc_mask != 0) {
248 uint64_t kpc_ctrs = kpc_get_configurable_pmc_mask(kpc_mask) <<
249 MT_CORE_NFIXED;
250 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
251 uint64_t ctrs_ge8 = kpc_ctrs >> 8;
252 pmcr0 |= ctrs_ge8 << PMCR0_CTR_GE8_EN_SHIFT;
253 pmcr0 |= ctrs_ge8 << PMCR0_CTR_GE8_PMI_SHIFT;
254 kpc_ctrs &= (1ULL << 8) - 1;
255 #endif /* KPC_ARM64_CONFIGURABLE_COUNT > 6 */
256 kpc_ctrs |= kpc_ctrs << PMCR0_PMI_SHIFT;
257 pmcr0 |= kpc_ctrs;
258 }
259
260 __builtin_arm_wsr64("PMCR0_EL1", pmcr0);
261 #if MACH_ASSERT
262 /*
263 * Only check for the values that were ORed in.
264 */
265 uint64_t pmcr0_check = __builtin_arm_rsr64("PMCR0_EL1");
266 if ((pmcr0_check & (PMCR0_INIT | PMCR0_FIXED_EN)) != (PMCR0_INIT | PMCR0_FIXED_EN)) {
267 panic("monotonic: hardware ignored enable (read %llx, wrote %llx)",
268 pmcr0_check, pmcr0);
269 }
270 #endif /* MACH_ASSERT */
271 }
272
273 static void
core_idle(__unused cpu_data_t * cpu)274 core_idle(__unused cpu_data_t *cpu)
275 {
276 assert(cpu != NULL);
277 assert(ml_get_interrupts_enabled() == FALSE);
278
279 #if DEBUG
280 uint64_t pmcr0 = __builtin_arm_rsr64("PMCR0_EL1");
281 if ((pmcr0 & PMCR0_FIXED_EN) == 0) {
282 panic("monotonic: counters disabled before idling, pmcr0 = 0x%llx", pmcr0);
283 }
284 uint64_t pmcr1 = __builtin_arm_rsr64("PMCR1_EL1");
285 if ((pmcr1 & PMCR1_INIT) == 0) {
286 panic("monotonic: counter modes disabled before idling, pmcr1 = 0x%llx", pmcr1);
287 }
288 #endif /* DEBUG */
289
290 /* disable counters before updating */
291 __builtin_arm_wsr64("PMCR0_EL1", PMCR0_INIT);
292
293 mt_update_fixed_counts();
294 }
295
296 #pragma mark uncore performance monitor
297
298 #if HAS_UNCORE_CTRS
299
300 static bool mt_uncore_initted = false;
301
302 /*
303 * Uncore Performance Monitor
304 *
305 * Uncore performance monitors provide event-counting for the last-level caches
306 * (LLCs). Each LLC has its own uncore performance monitor, which can only be
307 * accessed by cores that use that LLC. Like the core performance monitoring
308 * unit, uncore counters are configured globally. If there is more than one
309 * LLC on the system, PIO reads must be used to satisfy uncore requests (using
310 * the `_r` remote variants of the access functions). Otherwise, local MSRs
311 * suffice (using the `_l` local variants of the access functions).
312 */
313
314 #if UNCORE_PER_CLUSTER
315 #define MAX_NMONITORS MAX_CPU_CLUSTERS
316 static uintptr_t cpm_impl[MAX_NMONITORS] = {};
317 #else
318 #define MAX_NMONITORS (1)
319 #endif /* UNCORE_PER_CLUSTER */
320
321 #if UNCORE_VERSION >= 2
322 /*
323 * V2 uncore monitors feature a CTI mechanism -- the second bit of UPMSR is
324 * used to track if a CTI has been triggered due to an overflow.
325 */
326 #define UPMSR_OVF_POS 2
327 #else /* UNCORE_VERSION >= 2 */
328 #define UPMSR_OVF_POS 1
329 #endif /* UNCORE_VERSION < 2 */
330 #define UPMSR_OVF(R, CTR) ((R) >> ((CTR) + UPMSR_OVF_POS) & 0x1)
331 #define UPMSR_OVF_MASK (((UINT64_C(1) << UNCORE_NCTRS) - 1) << UPMSR_OVF_POS)
332
333 #define UPMPCM_CORE(ID) (UINT64_C(1) << (ID))
334
335 #if UPMU_64BIT_PMCS
336 #define UPMC_WIDTH (63)
337 #else // UPMU_64BIT_PMCS
338 #define UPMC_WIDTH (47)
339 #endif // !UPMU_64BIT_PMCS
340
341 /*
342 * The uncore_pmi_mask is a bitmask of CPUs that receive uncore PMIs. It's
343 * initialized by uncore_init and controllable by the uncore_pmi_mask boot-arg.
344 */
345 static int32_t uncore_pmi_mask = 0;
346
347 /*
348 * The uncore_active_ctrs is a bitmask of uncore counters that are currently
349 * requested.
350 */
351 static uint16_t uncore_active_ctrs = 0;
352 static_assert(sizeof(uncore_active_ctrs) * CHAR_BIT >= UNCORE_NCTRS,
353 "counter mask should fit the full range of counters");
354
355 /*
356 * mt_uncore_enabled is true when any uncore counters are active.
357 */
358 bool mt_uncore_enabled = false;
359
360 /*
361 * The uncore_events are the event configurations for each uncore counter -- as
362 * a union to make it easy to program the hardware registers.
363 */
364 static struct uncore_config {
365 union {
366 uint8_t uce_ctrs[UNCORE_NCTRS];
367 uint64_t uce_regs[UNCORE_NCTRS / 8];
368 } uc_events;
369 union {
370 uint16_t uccm_masks[UNCORE_NCTRS];
371 uint64_t uccm_regs[UNCORE_NCTRS / 4];
372 } uc_cpu_masks[MAX_NMONITORS];
373 } uncore_config;
374
375 static struct uncore_monitor {
376 /*
377 * The last snapshot of each of the hardware counter values.
378 */
379 uint64_t um_snaps[UNCORE_NCTRS];
380
381 /*
382 * The accumulated counts for each counter.
383 */
384 uint64_t um_counts[UNCORE_NCTRS];
385
386 /*
387 * Protects accessing the hardware registers and fields in this structure.
388 */
389 lck_spin_t um_lock;
390
391 /*
392 * Whether this monitor needs its registers restored after wake.
393 */
394 bool um_sleeping;
395 } uncore_monitors[MAX_NMONITORS];
396
397 /*
398 * Each uncore unit has its own monitor, corresponding to the memory hierarchy
399 * of the LLCs.
400 */
401 static unsigned int
uncore_nmonitors(void)402 uncore_nmonitors(void)
403 {
404 #if UNCORE_PER_CLUSTER
405 return topology_info->num_clusters;
406 #else /* UNCORE_PER_CLUSTER */
407 return 1;
408 #endif /* !UNCORE_PER_CLUSTER */
409 }
410
411 static unsigned int
uncmon_get_curid(void)412 uncmon_get_curid(void)
413 {
414 #if UNCORE_PER_CLUSTER
415 return cpu_cluster_id();
416 #else /* UNCORE_PER_CLUSTER */
417 return 0;
418 #endif /* !UNCORE_PER_CLUSTER */
419 }
420
421 /*
422 * Per-monitor locks are required to prevent races with the PMI handlers, not
423 * from other CPUs that are configuring (those are serialized with monotonic's
424 * per-device lock).
425 */
426
427 static int
uncmon_lock(struct uncore_monitor * mon)428 uncmon_lock(struct uncore_monitor *mon)
429 {
430 int intrs_en = ml_set_interrupts_enabled(FALSE);
431 lck_spin_lock(&mon->um_lock);
432 return intrs_en;
433 }
434
435 static void
uncmon_unlock(struct uncore_monitor * mon,int intrs_en)436 uncmon_unlock(struct uncore_monitor *mon, int intrs_en)
437 {
438 lck_spin_unlock(&mon->um_lock);
439 (void)ml_set_interrupts_enabled(intrs_en);
440 }
441
442 /*
443 * Helper functions for accessing the hardware -- these require the monitor be
444 * locked to prevent other CPUs' PMI handlers from making local modifications
445 * or updating the counts.
446 */
447
448 #if UNCORE_VERSION >= 2
449 #define UPMCR0_INTEN_POS 20
450 #define UPMCR0_INTGEN_POS 16
451 #else /* UNCORE_VERSION >= 2 */
452 #define UPMCR0_INTEN_POS 12
453 #define UPMCR0_INTGEN_POS 8
454 #endif /* UNCORE_VERSION < 2 */
455 enum {
456 UPMCR0_INTGEN_OFF = 0,
457 /* fast PMIs are only supported on core CPMU */
458 UPMCR0_INTGEN_AIC = 2,
459 UPMCR0_INTGEN_HALT = 3,
460 UPMCR0_INTGEN_FIQ = 4,
461 };
462 /* always enable interrupts for all counters */
463 #define UPMCR0_INTEN (((1ULL << UNCORE_NCTRS) - 1) << UPMCR0_INTEN_POS)
464 /* route uncore PMIs through the FIQ path */
465 #define UPMCR0_INIT (UPMCR0_INTEN | (UPMCR0_INTGEN_FIQ << UPMCR0_INTGEN_POS))
466
467 /*
468 * Turn counting on for counters set in the `enctrmask` and off, otherwise.
469 */
470 static inline void
uncmon_set_counting_locked_l(__unused unsigned int monid,uint64_t enctrmask)471 uncmon_set_counting_locked_l(__unused unsigned int monid, uint64_t enctrmask)
472 {
473 /*
474 * UPMCR0 controls which counters are enabled and how interrupts are generated
475 * for overflows.
476 */
477 __builtin_arm_wsr64("UPMCR0_EL1", UPMCR0_INIT | enctrmask);
478 }
479
480 #if UNCORE_PER_CLUSTER
481
482 /*
483 * Turn counting on for counters set in the `enctrmask` and off, otherwise.
484 */
485 static inline void
uncmon_set_counting_locked_r(unsigned int monid,uint64_t enctrmask)486 uncmon_set_counting_locked_r(unsigned int monid, uint64_t enctrmask)
487 {
488 const uintptr_t upmcr0_offset = 0x4180;
489 *(uint64_t *)(cpm_impl[monid] + upmcr0_offset) = UPMCR0_INIT | enctrmask;
490 }
491
492 #endif /* UNCORE_PER_CLUSTER */
493
494 /*
495 * The uncore performance monitoring counters (UPMCs) are 48/64-bits wide. The
496 * high bit is an overflow bit, triggering a PMI, providing 47/63 usable bits.
497 */
498
499 #define UPMC_MAX ((UINT64_C(1) << UPMC_WIDTH) - 1)
500
501 /*
502 * The `__builtin_arm_{r,w}sr` functions require constant strings, since the
503 * MSR/MRS instructions encode the registers as immediates. Otherwise, this
504 * would be indexing into an array of strings.
505 */
506
507 #define UPMC_0_7(X, A) X(0, A); X(1, A); X(2, A); X(3, A); X(4, A); X(5, A); \
508 X(6, A); X(7, A)
509 #if UNCORE_NCTRS <= 8
510 #define UPMC_ALL(X, A) UPMC_0_7(X, A)
511 #else /* UNCORE_NCTRS <= 8 */
512 #define UPMC_8_15(X, A) X(8, A); X(9, A); X(10, A); X(11, A); X(12, A); \
513 X(13, A); X(14, A); X(15, A)
514 #define UPMC_ALL(X, A) UPMC_0_7(X, A); UPMC_8_15(X, A)
515 #endif /* UNCORE_NCTRS > 8 */
516
517 static inline uint64_t
uncmon_read_counter_locked_l(__unused unsigned int monid,unsigned int ctr)518 uncmon_read_counter_locked_l(__unused unsigned int monid, unsigned int ctr)
519 {
520 assert(ctr < UNCORE_NCTRS);
521 switch (ctr) {
522 #define UPMC_RD(CTR, UNUSED) case (CTR): return __builtin_arm_rsr64(__MSR_STR(UPMC ## CTR))
523 UPMC_ALL(UPMC_RD, 0);
524 #undef UPMC_RD
525 default:
526 panic("monotonic: invalid counter read %u", ctr);
527 __builtin_unreachable();
528 }
529 }
530
531 static inline void
uncmon_write_counter_locked_l(__unused unsigned int monid,unsigned int ctr,uint64_t count)532 uncmon_write_counter_locked_l(__unused unsigned int monid, unsigned int ctr,
533 uint64_t count)
534 {
535 assert(count < UPMC_MAX);
536 assert(ctr < UNCORE_NCTRS);
537 switch (ctr) {
538 #define UPMC_WR(CTR, COUNT) case (CTR): \
539 return __builtin_arm_wsr64(__MSR_STR(UPMC ## CTR), (COUNT))
540 UPMC_ALL(UPMC_WR, count);
541 #undef UPMC_WR
542 default:
543 panic("monotonic: invalid counter write %u", ctr);
544 }
545 }
546
547 #if UNCORE_PER_CLUSTER
548
549 uintptr_t upmc_offs[UNCORE_NCTRS] = {
550 [0] = 0x4100, [1] = 0x4248, [2] = 0x4110, [3] = 0x4250, [4] = 0x4120,
551 [5] = 0x4258, [6] = 0x4130, [7] = 0x4260, [8] = 0x4140, [9] = 0x4268,
552 [10] = 0x4150, [11] = 0x4270, [12] = 0x4160, [13] = 0x4278,
553 [14] = 0x4170, [15] = 0x4280,
554 };
555
556 static inline uint64_t
uncmon_read_counter_locked_r(unsigned int mon_id,unsigned int ctr)557 uncmon_read_counter_locked_r(unsigned int mon_id, unsigned int ctr)
558 {
559 assert(mon_id < uncore_nmonitors());
560 assert(ctr < UNCORE_NCTRS);
561 return *(uint64_t *)(cpm_impl[mon_id] + upmc_offs[ctr]);
562 }
563
564 static inline void
uncmon_write_counter_locked_r(unsigned int mon_id,unsigned int ctr,uint64_t count)565 uncmon_write_counter_locked_r(unsigned int mon_id, unsigned int ctr,
566 uint64_t count)
567 {
568 assert(count < UPMC_MAX);
569 assert(ctr < UNCORE_NCTRS);
570 assert(mon_id < uncore_nmonitors());
571 *(uint64_t *)(cpm_impl[mon_id] + upmc_offs[ctr]) = count;
572 }
573
574 #endif /* UNCORE_PER_CLUSTER */
575
576 static inline void
uncmon_update_locked(unsigned int monid,unsigned int curid,unsigned int ctr)577 uncmon_update_locked(unsigned int monid, unsigned int curid, unsigned int ctr)
578 {
579 struct uncore_monitor *mon = &uncore_monitors[monid];
580 uint64_t snap = 0;
581 if (curid == monid) {
582 snap = uncmon_read_counter_locked_l(monid, ctr);
583 } else {
584 #if UNCORE_PER_CLUSTER
585 snap = uncmon_read_counter_locked_r(monid, ctr);
586 #endif /* UNCORE_PER_CLUSTER */
587 }
588 /* counters should increase monotonically */
589 assert(snap >= mon->um_snaps[ctr]);
590 mon->um_counts[ctr] += snap - mon->um_snaps[ctr];
591 mon->um_snaps[ctr] = snap;
592 }
593
594 static inline void
uncmon_program_events_locked_l(unsigned int monid)595 uncmon_program_events_locked_l(unsigned int monid)
596 {
597 /*
598 * UPMESR[01] is the event selection register that determines which event a
599 * counter will count.
600 */
601 CTRL_REG_SET("UPMESR0_EL1", uncore_config.uc_events.uce_regs[0]);
602
603 #if UNCORE_NCTRS > 8
604 CTRL_REG_SET("UPMESR1_EL1", uncore_config.uc_events.uce_regs[1]);
605 #endif /* UNCORE_NCTRS > 8 */
606
607 /*
608 * UPMECM[0123] are the event core masks for each counter -- whether or not
609 * that counter counts events generated by an agent. These are set to all
610 * ones so the uncore counters count events from all cores.
611 *
612 * The bits are based off the start of the cluster -- e.g. even if a core
613 * has a CPU ID of 4, it might be the first CPU in a cluster. Shift the
614 * registers right by the ID of the first CPU in the cluster.
615 */
616 CTRL_REG_SET("UPMECM0_EL1",
617 uncore_config.uc_cpu_masks[monid].uccm_regs[0]);
618 CTRL_REG_SET("UPMECM1_EL1",
619 uncore_config.uc_cpu_masks[monid].uccm_regs[1]);
620
621 #if UNCORE_NCTRS > 8
622 CTRL_REG_SET("UPMECM2_EL1",
623 uncore_config.uc_cpu_masks[monid].uccm_regs[2]);
624 CTRL_REG_SET("UPMECM3_EL1",
625 uncore_config.uc_cpu_masks[monid].uccm_regs[3]);
626 #endif /* UNCORE_NCTRS > 8 */
627 }
628
629 #if UNCORE_PER_CLUSTER
630
631 static inline void
uncmon_program_events_locked_r(unsigned int monid)632 uncmon_program_events_locked_r(unsigned int monid)
633 {
634 const uintptr_t upmesr_offs[2] = {[0] = 0x41b0, [1] = 0x41b8, };
635
636 for (unsigned int i = 0; i < sizeof(upmesr_offs) / sizeof(upmesr_offs[0]);
637 i++) {
638 *(uint64_t *)(cpm_impl[monid] + upmesr_offs[i]) =
639 uncore_config.uc_events.uce_regs[i];
640 }
641
642 const uintptr_t upmecm_offs[4] = {
643 [0] = 0x4190, [1] = 0x4198, [2] = 0x41a0, [3] = 0x41a8,
644 };
645
646 for (unsigned int i = 0; i < sizeof(upmecm_offs) / sizeof(upmecm_offs[0]);
647 i++) {
648 *(uint64_t *)(cpm_impl[monid] + upmecm_offs[i]) =
649 uncore_config.uc_cpu_masks[monid].uccm_regs[i];
650 }
651 }
652
653 #endif /* UNCORE_PER_CLUSTER */
654
655 static void
uncmon_clear_int_locked_l(__unused unsigned int monid)656 uncmon_clear_int_locked_l(__unused unsigned int monid)
657 {
658 __builtin_arm_wsr64("UPMSR_EL1", 0);
659 }
660
661 #if UNCORE_PER_CLUSTER
662
663 static void
uncmon_clear_int_locked_r(unsigned int monid)664 uncmon_clear_int_locked_r(unsigned int monid)
665 {
666 const uintptr_t upmsr_off = 0x41c0;
667 *(uint64_t *)(cpm_impl[monid] + upmsr_off) = 0;
668 }
669
670 #endif /* UNCORE_PER_CLUSTER */
671
672 /*
673 * Get the PMI mask for the provided `monid` -- that is, the bitmap of CPUs
674 * that should be sent PMIs for a particular monitor.
675 */
676 static uint64_t
uncmon_get_pmi_mask(unsigned int monid)677 uncmon_get_pmi_mask(unsigned int monid)
678 {
679 uint64_t pmi_mask = uncore_pmi_mask;
680
681 #if UNCORE_PER_CLUSTER
682 pmi_mask &= topology_info->clusters[monid].cpu_mask;
683 #else /* UNCORE_PER_CLUSTER */
684 #pragma unused(monid)
685 #endif /* !UNCORE_PER_CLUSTER */
686
687 return pmi_mask;
688 }
689
690 /*
691 * Initialization routines for the uncore counters.
692 */
693
694 static void
uncmon_init_locked_l(unsigned int monid)695 uncmon_init_locked_l(unsigned int monid)
696 {
697 /*
698 * UPMPCM defines the PMI core mask for the UPMCs -- which cores should
699 * receive interrupts on overflow.
700 */
701 CTRL_REG_SET("UPMPCM_EL1", uncmon_get_pmi_mask(monid));
702 uncmon_set_counting_locked_l(monid,
703 mt_uncore_enabled ? uncore_active_ctrs : 0);
704 }
705
706 #if UNCORE_PER_CLUSTER
707
708 static uintptr_t acc_impl[MAX_NMONITORS] = {};
709
710 static void
uncmon_init_locked_r(unsigned int monid)711 uncmon_init_locked_r(unsigned int monid)
712 {
713 const uintptr_t upmpcm_off = 0x1010;
714
715 *(uint64_t *)(acc_impl[monid] + upmpcm_off) = uncmon_get_pmi_mask(monid);
716 uncmon_set_counting_locked_r(monid,
717 mt_uncore_enabled ? uncore_active_ctrs : 0);
718 }
719
720 #endif /* UNCORE_PER_CLUSTER */
721
722 /*
723 * Initialize the uncore device for monotonic.
724 */
725 static int
uncore_init(__unused mt_device_t dev)726 uncore_init(__unused mt_device_t dev)
727 {
728 #if HAS_UNCORE_CTRS
729 assert(MT_NDEVS > 0);
730 mt_devices[MT_NDEVS - 1].mtd_nmonitors = (uint8_t)uncore_nmonitors();
731 #endif
732
733 #if DEVELOPMENT || DEBUG
734 /*
735 * Development and debug kernels observe the `uncore_pmi_mask` boot-arg,
736 * allowing PMIs to be routed to the CPUs present in the supplied bitmap.
737 * Do some sanity checks on the value provided.
738 */
739 bool parsed_arg = PE_parse_boot_argn("uncore_pmi_mask", &uncore_pmi_mask,
740 sizeof(uncore_pmi_mask));
741 if (parsed_arg) {
742 #if UNCORE_PER_CLUSTER
743 if (__builtin_popcount(uncore_pmi_mask) != (int)uncore_nmonitors()) {
744 panic("monotonic: invalid uncore PMI mask 0x%x", uncore_pmi_mask);
745 }
746 for (unsigned int i = 0; i < uncore_nmonitors(); i++) {
747 if (__builtin_popcountll(uncmon_get_pmi_mask(i)) != 1) {
748 panic("monotonic: invalid uncore PMI CPU for cluster %d in mask 0x%x",
749 i, uncore_pmi_mask);
750 }
751 }
752 #else /* UNCORE_PER_CLUSTER */
753 if (__builtin_popcount(uncore_pmi_mask) != 1) {
754 panic("monotonic: invalid uncore PMI mask 0x%x", uncore_pmi_mask);
755 }
756 #endif /* !UNCORE_PER_CLUSTER */
757 } else
758 #endif /* DEVELOPMENT || DEBUG */
759 {
760 #if UNCORE_PER_CLUSTER
761 for (unsigned int i = 0; i < topology_info->num_clusters; i++) {
762 uncore_pmi_mask |= 1ULL << topology_info->clusters[i].first_cpu_id;
763 }
764 #else /* UNCORE_PER_CLUSTER */
765 /* arbitrarily route to core 0 */
766 uncore_pmi_mask |= 1;
767 #endif /* !UNCORE_PER_CLUSTER */
768 }
769 assert(uncore_pmi_mask != 0);
770
771 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
772 #if UNCORE_PER_CLUSTER
773 ml_topology_cluster_t *cluster = &topology_info->clusters[monid];
774 cpm_impl[monid] = (uintptr_t)cluster->cpm_IMPL_regs;
775 acc_impl[monid] = (uintptr_t)cluster->acc_IMPL_regs;
776 assert(cpm_impl[monid] != 0 && acc_impl[monid] != 0);
777 #endif /* UNCORE_PER_CLUSTER */
778
779 struct uncore_monitor *mon = &uncore_monitors[monid];
780 lck_spin_init(&mon->um_lock, &mt_lock_grp, LCK_ATTR_NULL);
781 }
782
783 mt_uncore_initted = true;
784
785 return 0;
786 }
787
788 /*
789 * Support for monotonic's mtd_read function.
790 */
791
792 static void
uncmon_read_all_counters(unsigned int monid,unsigned int curmonid,uint64_t ctr_mask,uint64_t * counts)793 uncmon_read_all_counters(unsigned int monid, unsigned int curmonid,
794 uint64_t ctr_mask, uint64_t *counts)
795 {
796 struct uncore_monitor *mon = &uncore_monitors[monid];
797
798 int intrs_en = uncmon_lock(mon);
799
800 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
801 if (ctr_mask & (1ULL << ctr)) {
802 uncmon_update_locked(monid, curmonid, ctr);
803 counts[ctr] = mon->um_counts[ctr];
804 }
805 }
806
807 uncmon_unlock(mon, intrs_en);
808 }
809
810 /*
811 * Read all monitor's counters.
812 */
813 static int
uncore_read(uint64_t ctr_mask,uint64_t * counts_out)814 uncore_read(uint64_t ctr_mask, uint64_t *counts_out)
815 {
816 assert(ctr_mask != 0);
817 assert(counts_out != NULL);
818
819 if (!uncore_active_ctrs) {
820 return EPWROFF;
821 }
822 if (ctr_mask & ~uncore_active_ctrs) {
823 return EINVAL;
824 }
825
826 unsigned int curmonid = uncmon_get_curid();
827 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
828 /*
829 * Find this monitor's starting offset into the `counts_out` array.
830 */
831 uint64_t *counts = counts_out + (UNCORE_NCTRS * monid);
832
833 uncmon_read_all_counters(monid, curmonid, ctr_mask, counts);
834 }
835
836 return 0;
837 }
838
839 /*
840 * Support for monotonic's mtd_add function.
841 */
842
843 /*
844 * Add an event to the current uncore configuration. This doesn't take effect
845 * until the counters are enabled again, so there's no need to involve the
846 * monitors.
847 */
848 static int
uncore_add(struct monotonic_config * config,uint32_t * ctr_out)849 uncore_add(struct monotonic_config *config, uint32_t *ctr_out)
850 {
851 if (mt_uncore_enabled) {
852 return EBUSY;
853 }
854
855 uint32_t available = ~uncore_active_ctrs & config->allowed_ctr_mask;
856
857 if (available == 0) {
858 return ENOSPC;
859 }
860
861 uint32_t valid_ctrs = (UINT32_C(1) << UNCORE_NCTRS) - 1;
862 if ((available & valid_ctrs) == 0) {
863 return E2BIG;
864 }
865 /*
866 * Clear the UPMCs the first time an event is added.
867 */
868 unsigned int curmonid = uncmon_get_curid();
869 if (uncore_active_ctrs == 0) {
870 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
871 struct uncore_monitor *mon = &uncore_monitors[monid];
872 bool remote = monid != curmonid;
873
874 int intrs_en = uncmon_lock(mon);
875 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
876 if (remote) {
877 #if UNCORE_PER_CLUSTER
878 uncmon_write_counter_locked_r(monid, ctr, 0);
879 #endif /* UNCORE_PER_CLUSTER */
880 } else {
881 uncmon_write_counter_locked_l(monid, ctr, 0);
882 }
883 }
884 uncmon_unlock(mon, intrs_en);
885 }
886 }
887
888 uint32_t ctr = __builtin_ffsll(available) - 1;
889
890 uncore_active_ctrs |= UINT64_C(1) << ctr;
891 uncore_config.uc_events.uce_ctrs[ctr] = (uint8_t)config->event;
892 uint64_t cpu_mask = UINT64_MAX;
893 if (config->cpu_mask != 0) {
894 cpu_mask = config->cpu_mask;
895 }
896 for (unsigned int i = 0; i < uncore_nmonitors(); i++) {
897 #if UNCORE_PER_CLUSTER
898 const unsigned int shift = topology_info->clusters[i].first_cpu_id;
899 #else /* UNCORE_PER_CLUSTER */
900 const unsigned int shift = 0;
901 #endif /* !UNCORE_PER_CLUSTER */
902 uncore_config.uc_cpu_masks[i].uccm_masks[ctr] = (uint16_t)(cpu_mask >> shift);
903 }
904
905 *ctr_out = ctr;
906 return 0;
907 }
908
909 /*
910 * Support for monotonic's mtd_reset function.
911 */
912
913 /*
914 * Reset all configuration and disable the counters if they're currently
915 * counting.
916 */
917 static void
uncore_reset(void)918 uncore_reset(void)
919 {
920 mt_uncore_enabled = false;
921
922 unsigned int curmonid = uncmon_get_curid();
923
924 if (mt_owns_counters()) {
925 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
926 struct uncore_monitor *mon = &uncore_monitors[monid];
927 bool remote = monid != curmonid;
928
929 int intrs_en = uncmon_lock(mon);
930 if (remote) {
931 #if UNCORE_PER_CLUSTER
932 uncmon_set_counting_locked_r(monid, 0);
933 #endif /* UNCORE_PER_CLUSTER */
934 } else {
935 uncmon_set_counting_locked_l(monid, 0);
936 }
937
938 for (int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
939 if (uncore_active_ctrs & (1U << ctr)) {
940 if (remote) {
941 #if UNCORE_PER_CLUSTER
942 uncmon_write_counter_locked_r(monid, ctr, 0);
943 #endif /* UNCORE_PER_CLUSTER */
944 } else {
945 uncmon_write_counter_locked_l(monid, ctr, 0);
946 }
947 }
948 }
949
950 memset(&mon->um_snaps, 0, sizeof(mon->um_snaps));
951 memset(&mon->um_counts, 0, sizeof(mon->um_counts));
952 if (remote) {
953 #if UNCORE_PER_CLUSTER
954 uncmon_clear_int_locked_r(monid);
955 #endif /* UNCORE_PER_CLUSTER */
956 } else {
957 uncmon_clear_int_locked_l(monid);
958 }
959
960 uncmon_unlock(mon, intrs_en);
961 }
962 }
963
964 uncore_active_ctrs = 0;
965 memset(&uncore_config, 0, sizeof(uncore_config));
966
967 if (mt_owns_counters()) {
968 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
969 struct uncore_monitor *mon = &uncore_monitors[monid];
970 bool remote = monid != curmonid;
971
972 int intrs_en = uncmon_lock(mon);
973 if (remote) {
974 #if UNCORE_PER_CLUSTER
975 uncmon_program_events_locked_r(monid);
976 #endif /* UNCORE_PER_CLUSTER */
977 } else {
978 uncmon_program_events_locked_l(monid);
979 }
980 uncmon_unlock(mon, intrs_en);
981 }
982 }
983 }
984
985 /*
986 * Support for monotonic's mtd_enable function.
987 */
988
989 static void
uncmon_set_enabled_l(unsigned int monid,bool enable)990 uncmon_set_enabled_l(unsigned int monid, bool enable)
991 {
992 struct uncore_monitor *mon = &uncore_monitors[monid];
993 int intrs_en = uncmon_lock(mon);
994
995 if (enable) {
996 uncmon_init_locked_l(monid);
997 uncmon_program_events_locked_l(monid);
998 uncmon_set_counting_locked_l(monid, uncore_active_ctrs);
999 } else {
1000 uncmon_set_counting_locked_l(monid, 0);
1001 }
1002
1003 uncmon_unlock(mon, intrs_en);
1004 }
1005
1006 #if UNCORE_PER_CLUSTER
1007
1008 static void
uncmon_set_enabled_r(unsigned int monid,bool enable)1009 uncmon_set_enabled_r(unsigned int monid, bool enable)
1010 {
1011 struct uncore_monitor *mon = &uncore_monitors[monid];
1012 int intrs_en = uncmon_lock(mon);
1013
1014 if (enable) {
1015 uncmon_init_locked_r(monid);
1016 uncmon_program_events_locked_r(monid);
1017 uncmon_set_counting_locked_r(monid, uncore_active_ctrs);
1018 } else {
1019 uncmon_set_counting_locked_r(monid, 0);
1020 }
1021
1022 uncmon_unlock(mon, intrs_en);
1023 }
1024
1025 #endif /* UNCORE_PER_CLUSTER */
1026
1027 static void
uncore_set_enabled(bool enable)1028 uncore_set_enabled(bool enable)
1029 {
1030 mt_uncore_enabled = enable;
1031
1032 unsigned int curmonid = uncmon_get_curid();
1033 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1034 if (monid != curmonid) {
1035 #if UNCORE_PER_CLUSTER
1036 uncmon_set_enabled_r(monid, enable);
1037 #endif /* UNCORE_PER_CLUSTER */
1038 } else {
1039 uncmon_set_enabled_l(monid, enable);
1040 }
1041 }
1042 }
1043
1044 /*
1045 * Hooks in the machine layer.
1046 */
1047
1048 static void
uncore_fiq(uint64_t upmsr)1049 uncore_fiq(uint64_t upmsr)
1050 {
1051 /*
1052 * Determine which counters overflowed.
1053 */
1054 uint64_t disable_ctr_mask = (upmsr & UPMSR_OVF_MASK) >> UPMSR_OVF_POS;
1055 /* should not receive interrupts from inactive counters */
1056 assert(!(disable_ctr_mask & ~uncore_active_ctrs));
1057
1058 if (uncore_active_ctrs == 0) {
1059 return;
1060 }
1061
1062 unsigned int monid = uncmon_get_curid();
1063 struct uncore_monitor *mon = &uncore_monitors[monid];
1064
1065 int intrs_en = uncmon_lock(mon);
1066
1067 /*
1068 * Disable any counters that overflowed.
1069 */
1070 uncmon_set_counting_locked_l(monid,
1071 uncore_active_ctrs & ~disable_ctr_mask);
1072
1073 /*
1074 * With the overflowing counters disabled, capture their counts and reset
1075 * the UPMCs and their snapshots to 0.
1076 */
1077 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1078 if (UPMSR_OVF(upmsr, ctr)) {
1079 uncmon_update_locked(monid, monid, ctr);
1080 mon->um_snaps[ctr] = 0;
1081 uncmon_write_counter_locked_l(monid, ctr, 0);
1082 }
1083 }
1084
1085 /*
1086 * Acknowledge the interrupt, now that any overflowed PMCs have been reset.
1087 */
1088 uncmon_clear_int_locked_l(monid);
1089
1090 /*
1091 * Re-enable all active counters.
1092 */
1093 uncmon_set_counting_locked_l(monid, uncore_active_ctrs);
1094
1095 uncmon_unlock(mon, intrs_en);
1096 }
1097
1098 static void
uncore_save(void)1099 uncore_save(void)
1100 {
1101 if (!uncore_active_ctrs) {
1102 return;
1103 }
1104
1105 unsigned int curmonid = uncmon_get_curid();
1106
1107 for (unsigned int monid = 0; monid < uncore_nmonitors(); monid++) {
1108 struct uncore_monitor *mon = &uncore_monitors[monid];
1109 int intrs_en = uncmon_lock(mon);
1110
1111 if (mt_uncore_enabled) {
1112 if (monid != curmonid) {
1113 #if UNCORE_PER_CLUSTER
1114 uncmon_set_counting_locked_r(monid, 0);
1115 #endif /* UNCORE_PER_CLUSTER */
1116 } else {
1117 uncmon_set_counting_locked_l(monid, 0);
1118 }
1119 }
1120
1121 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1122 if (uncore_active_ctrs & (1U << ctr)) {
1123 uncmon_update_locked(monid, curmonid, ctr);
1124 }
1125 }
1126
1127 mon->um_sleeping = true;
1128 uncmon_unlock(mon, intrs_en);
1129 }
1130 }
1131
1132 static void
uncore_restore(void)1133 uncore_restore(void)
1134 {
1135 if (!uncore_active_ctrs) {
1136 return;
1137 }
1138 unsigned int curmonid = uncmon_get_curid();
1139
1140 struct uncore_monitor *mon = &uncore_monitors[curmonid];
1141 int intrs_en = uncmon_lock(mon);
1142 if (!mon->um_sleeping) {
1143 goto out;
1144 }
1145
1146 for (unsigned int ctr = 0; ctr < UNCORE_NCTRS; ctr++) {
1147 if (uncore_active_ctrs & (1U << ctr)) {
1148 uncmon_write_counter_locked_l(curmonid, ctr, mon->um_snaps[ctr]);
1149 }
1150 }
1151 uncmon_program_events_locked_l(curmonid);
1152 uncmon_init_locked_l(curmonid);
1153 mon->um_sleeping = false;
1154
1155 out:
1156 uncmon_unlock(mon, intrs_en);
1157 }
1158
1159 #endif /* HAS_UNCORE_CTRS */
1160
1161 #pragma mark common hooks
1162
1163 void
mt_early_init(void)1164 mt_early_init(void)
1165 {
1166 topology_info = ml_get_topology_info();
1167 }
1168
1169 void
mt_cpu_idle(cpu_data_t * cpu)1170 mt_cpu_idle(cpu_data_t *cpu)
1171 {
1172 core_idle(cpu);
1173 }
1174
1175 void
mt_cpu_run(cpu_data_t * cpu)1176 mt_cpu_run(cpu_data_t *cpu)
1177 {
1178 struct mt_cpu *mtc;
1179
1180 assert(cpu != NULL);
1181 assert(ml_get_interrupts_enabled() == FALSE);
1182
1183 mtc = &cpu->cpu_monotonic;
1184
1185 for (int i = 0; i < MT_CORE_NFIXED; i++) {
1186 mt_core_set_snap(i, mtc->mtc_snaps[i]);
1187 }
1188
1189 /* re-enable the counters */
1190 core_init_execution_modes();
1191
1192 core_set_enabled();
1193 }
1194
1195 void
mt_cpu_down(cpu_data_t * cpu)1196 mt_cpu_down(cpu_data_t *cpu)
1197 {
1198 mt_cpu_idle(cpu);
1199 }
1200
1201 void
mt_cpu_up(cpu_data_t * cpu)1202 mt_cpu_up(cpu_data_t *cpu)
1203 {
1204 mt_cpu_run(cpu);
1205 }
1206
1207 void
mt_sleep(void)1208 mt_sleep(void)
1209 {
1210 #if HAS_UNCORE_CTRS
1211 uncore_save();
1212 #endif /* HAS_UNCORE_CTRS */
1213 }
1214
1215 void
mt_wake_per_core(void)1216 mt_wake_per_core(void)
1217 {
1218 #if HAS_UNCORE_CTRS
1219 if (mt_uncore_initted) {
1220 uncore_restore();
1221 }
1222 #endif /* HAS_UNCORE_CTRS */
1223 }
1224
1225 uint64_t
mt_count_pmis(void)1226 mt_count_pmis(void)
1227 {
1228 uint64_t npmis = 0;
1229 for (unsigned int i = 0; i < topology_info->num_cpus; i++) {
1230 cpu_data_t *cpu = (cpu_data_t *)CpuDataEntries[topology_info->cpus[i].cpu_id].cpu_data_vaddr;
1231 npmis += cpu->cpu_monotonic.mtc_npmis;
1232 }
1233 return npmis;
1234 }
1235
1236 static void
mt_cpu_pmi(cpu_data_t * cpu,uint64_t pmcr0)1237 mt_cpu_pmi(cpu_data_t *cpu, uint64_t pmcr0)
1238 {
1239 assert(cpu != NULL);
1240 assert(ml_get_interrupts_enabled() == FALSE);
1241
1242 __builtin_arm_wsr64("PMCR0_EL1", PMCR0_INIT);
1243 /*
1244 * Ensure the CPMU has flushed any increments at this point, so PMSR is up
1245 * to date.
1246 */
1247 __builtin_arm_isb(ISB_SY);
1248
1249 cpu->cpu_monotonic.mtc_npmis += 1;
1250 cpu->cpu_stat.pmi_cnt_wake += 1;
1251
1252 #if MONOTONIC_DEBUG
1253 if (!PMCR0_PMI(pmcr0)) {
1254 kprintf("monotonic: mt_cpu_pmi but no PMI (PMCR0 = %#llx)\n",
1255 pmcr0);
1256 }
1257 #else /* MONOTONIC_DEBUG */
1258 #pragma unused(pmcr0)
1259 #endif /* !MONOTONIC_DEBUG */
1260
1261 uint64_t pmsr = __builtin_arm_rsr64("PMSR_EL1");
1262
1263 #if MONOTONIC_DEBUG
1264 printf("monotonic: cpu = %d, PMSR = 0x%llx, PMCR0 = 0x%llx\n",
1265 cpu_number(), pmsr, pmcr0);
1266 #endif /* MONOTONIC_DEBUG */
1267
1268 #if MACH_ASSERT
1269 uint64_t handled = 0;
1270 #endif /* MACH_ASSERT */
1271
1272 /*
1273 * monotonic handles any fixed counter PMIs.
1274 */
1275 for (unsigned int i = 0; i < MT_CORE_NFIXED; i++) {
1276 if ((pmsr & PMSR_OVF(i)) == 0) {
1277 continue;
1278 }
1279
1280 #if MACH_ASSERT
1281 handled |= 1ULL << i;
1282 #endif /* MACH_ASSERT */
1283 uint64_t count = mt_cpu_update_count(cpu, i);
1284 cpu->cpu_monotonic.mtc_counts[i] += count;
1285 mt_core_set_snap(i, mt_core_reset_values[i]);
1286 cpu->cpu_monotonic.mtc_snaps[i] = mt_core_reset_values[i];
1287
1288 if (mt_microstackshots && mt_microstackshot_ctr == i) {
1289 bool user_mode = false;
1290 arm_saved_state_t *state = get_user_regs(current_thread());
1291 if (state) {
1292 user_mode = PSR64_IS_USER(get_saved_state_cpsr(state));
1293 }
1294 KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 1),
1295 mt_microstackshot_ctr, user_mode);
1296 mt_microstackshot_pmi_handler(user_mode, mt_microstackshot_ctx);
1297 } else if (mt_debug) {
1298 KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 2),
1299 i, count);
1300 }
1301 }
1302
1303 /*
1304 * KPC handles the configurable counter PMIs.
1305 */
1306 for (unsigned int i = MT_CORE_NFIXED; i < CORE_NCTRS; i++) {
1307 if (pmsr & PMSR_OVF(i)) {
1308 #if MACH_ASSERT
1309 handled |= 1ULL << i;
1310 #endif /* MACH_ASSERT */
1311 extern void kpc_pmi_handler(unsigned int ctr);
1312 kpc_pmi_handler(i);
1313 }
1314 }
1315
1316 #if MACH_ASSERT
1317 uint64_t pmsr_after_handling = __builtin_arm_rsr64("PMSR_EL1");
1318 if (pmsr_after_handling != 0) {
1319 unsigned int first_ctr_ovf = __builtin_ffsll(pmsr_after_handling) - 1;
1320 uint64_t count = 0;
1321 const char *extra = "";
1322 if (first_ctr_ovf >= CORE_NCTRS) {
1323 extra = " (invalid counter)";
1324 } else {
1325 count = mt_core_snap(first_ctr_ovf);
1326 }
1327
1328 panic("monotonic: PMI status not cleared on exit from handler, "
1329 "PMSR = 0x%llx HANDLE -> -> 0x%llx, handled 0x%llx, "
1330 "PMCR0 = 0x%llx, PMC%d = 0x%llx%s", pmsr, pmsr_after_handling,
1331 handled, __builtin_arm_rsr64("PMCR0_EL1"), first_ctr_ovf, count, extra);
1332 }
1333 #endif /* MACH_ASSERT */
1334
1335 core_set_enabled();
1336 }
1337
1338 #if CPMU_AIC_PMI
1339 void
mt_cpmu_aic_pmi(cpu_id_t source)1340 mt_cpmu_aic_pmi(cpu_id_t source)
1341 {
1342 struct cpu_data *curcpu = getCpuDatap();
1343 if (source != curcpu->interrupt_nub) {
1344 panic("monotonic: PMI from IOCPU %p delivered to %p", source,
1345 curcpu->interrupt_nub);
1346 }
1347 mt_cpu_pmi(curcpu, __builtin_arm_rsr64("PMCR0_EL1"));
1348 }
1349 #endif /* CPMU_AIC_PMI */
1350
1351 void
mt_fiq(void * cpu,uint64_t pmcr0,uint64_t upmsr)1352 mt_fiq(void *cpu, uint64_t pmcr0, uint64_t upmsr)
1353 {
1354 #if CPMU_AIC_PMI
1355 #pragma unused(cpu, pmcr0)
1356 #else /* CPMU_AIC_PMI */
1357 mt_cpu_pmi(cpu, pmcr0);
1358 #endif /* !CPMU_AIC_PMI */
1359
1360 #if HAS_UNCORE_CTRS
1361 if (upmsr != 0) {
1362 uncore_fiq(upmsr);
1363 }
1364 #else /* HAS_UNCORE_CTRS */
1365 #pragma unused(upmsr)
1366 #endif /* !HAS_UNCORE_CTRS */
1367 }
1368
1369 void
mt_ownership_change(bool available)1370 mt_ownership_change(bool available)
1371 {
1372 #if HAS_UNCORE_CTRS
1373 /*
1374 * No need to take the lock here, as this is only manipulated in the UPMU
1375 * when the current task already owns the counters and is on its way out.
1376 */
1377 if (!available && uncore_active_ctrs) {
1378 uncore_reset();
1379 }
1380 #else
1381 #pragma unused(available)
1382 #endif /* HAS_UNCORE_CTRS */
1383 }
1384
1385 static uint32_t mt_xc_sync;
1386
1387 static void
mt_microstackshot_start_remote(__unused void * arg)1388 mt_microstackshot_start_remote(__unused void *arg)
1389 {
1390 cpu_data_t *cpu = getCpuDatap();
1391
1392 __builtin_arm_wsr64("PMCR0_EL1", PMCR0_INIT);
1393
1394 for (int i = 0; i < MT_CORE_NFIXED; i++) {
1395 uint64_t count = mt_cpu_update_count(cpu, i);
1396 cpu->cpu_monotonic.mtc_counts[i] += count;
1397 mt_core_set_snap(i, mt_core_reset_values[i]);
1398 cpu->cpu_monotonic.mtc_snaps[i] = mt_core_reset_values[i];
1399 }
1400
1401 core_set_enabled();
1402
1403 if (os_atomic_dec(&mt_xc_sync, relaxed) == 0) {
1404 thread_wakeup((event_t)&mt_xc_sync);
1405 }
1406 }
1407
1408 int
mt_microstackshot_start_arch(uint64_t period)1409 mt_microstackshot_start_arch(uint64_t period)
1410 {
1411 uint64_t reset_value = 0;
1412 int ovf = os_sub_overflow(CTR_MAX, period, &reset_value);
1413 if (ovf) {
1414 return ERANGE;
1415 }
1416
1417 mt_core_reset_values[mt_microstackshot_ctr] = reset_value;
1418 cpu_broadcast_xcall(&mt_xc_sync, TRUE, mt_microstackshot_start_remote,
1419 mt_microstackshot_start_remote /* cannot pass NULL */);
1420 return 0;
1421 }
1422
1423 #pragma mark dev nodes
1424
1425 struct mt_device mt_devices[] = {
1426 [0] = {
1427 .mtd_name = "core",
1428 .mtd_init = core_init,
1429 },
1430 #if HAS_UNCORE_CTRS
1431 [1] = {
1432 .mtd_name = "uncore",
1433 .mtd_init = uncore_init,
1434 .mtd_add = uncore_add,
1435 .mtd_reset = uncore_reset,
1436 .mtd_enable = uncore_set_enabled,
1437 .mtd_read = uncore_read,
1438
1439 .mtd_ncounters = UNCORE_NCTRS,
1440 }
1441 #endif /* HAS_UNCORE_CTRS */
1442 };
1443
1444 static_assert(
1445 (sizeof(mt_devices) / sizeof(mt_devices[0])) == MT_NDEVS,
1446 "MT_NDEVS macro should be same as the length of mt_devices");
1447