1 // Copyright (c) 2020 Apple Inc. All rights reserved.
2 //
3 // @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 //
5 // This file contains Original Code and/or Modifications of Original Code
6 // as defined in and that are subject to the Apple Public Source License
7 // Version 2.0 (the 'License'). You may not use this file except in
8 // compliance with the License. The rights granted to you under the License
9 // may not be used to create, or enable the creation or redistribution of,
10 // unlawful or unlicensed copies of an Apple operating system, or to
11 // circumvent, violate, or enable the circumvention or violation of, any
12 // terms of an Apple operating system software license agreement.
13 //
14 // Please obtain a copy of the License at
15 // http://www.opensource.apple.com/apsl/ and read it before using this file.
16 //
17 // The Original Code and all software distributed under the License are
18 // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 // Please see the License for the specific language governing rights and
23 // limitations under the License.
24 //
25 // @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26
27 #if KERNEL
28 #include <kern/kalloc.h>
29 #include <kern/misc_protos.h>
30 #include <kern/perfmon.h>
31 #include <machine/atomic.h>
32 #include <machine/machine_perfmon.h>
33 #include <pexpert/pexpert.h>
34 #endif // KERNEL
35
36 #include <stdint.h>
37 #include <sys/errno.h>
38 #include <sys/perfmon_private.h>
39 #include <sys/queue.h>
40
41 SECURITY_READ_ONLY_LATE(struct perfmon_source) perfmon_sources[perfmon_kind_max]
42 = { 0 };
43
44 const char *perfmon_names[perfmon_kind_max] = {
45 [perfmon_cpmu] = "core",
46 [perfmon_upmu] = "uncore",
47 };
48
49 _Atomic perfmon_config_t active_configs[perfmon_kind_max] = { NULL };
50
51 #if KERNEL
52
53 const char * _Atomic perfmon_owners[perfmon_kind_max] = { NULL };
54
55 static bool
kpc_in_use(enum perfmon_kind id)56 kpc_in_use(enum perfmon_kind id)
57 {
58 #if KPC
59 extern int kpc_get_force_all_ctrs(void);
60 return !(perfmon_cpmu == id && kpc_get_force_all_ctrs());
61 #else // KPC
62 #pragma unused(id)
63 return false;
64 #endif // !KPC
65 }
66
67 __result_use_check bool
perfmon_acquire(enum perfmon_kind kind,const char * name)68 perfmon_acquire(enum perfmon_kind kind, const char *name)
69 {
70 assert(kind < perfmon_kind_max);
71 return kpc_in_use(kind) ||
72 os_atomic_cmpxchg(&perfmon_owners[kind], NULL, name, acq_rel);
73 }
74
75 bool
perfmon_in_use(enum perfmon_kind kind)76 perfmon_in_use(enum perfmon_kind kind)
77 {
78 assert(kind < perfmon_kind_max);
79 return os_atomic_load(&perfmon_owners[kind], acquire) != NULL;
80 }
81
82 void
perfmon_release(enum perfmon_kind kind,const char * name)83 perfmon_release(enum perfmon_kind kind, const char *name)
84 {
85 assert(kind < perfmon_kind_max);
86 if (os_atomic_cmpxchg(&perfmon_owners[kind], name, NULL, acq_rel)) {
87 panic("perfmon: unpaired release: %s on %u", name, kind);
88 }
89 }
90
91 #endif // KERNEL
92
93 struct perfmon_source *
perfmon_source_reserve(enum perfmon_kind kind)94 perfmon_source_reserve(enum perfmon_kind kind)
95 {
96 assert(kind < perfmon_kind_max);
97 struct perfmon_source *source = &perfmon_sources[kind];
98 if (source->ps_supported) {
99 panic("perfmon: reserving source twice: %d", kind);
100 }
101 source->ps_kind = kind;
102 source->ps_name = perfmon_names[kind];
103 source->ps_supported = true;
104 return source;
105 }
106
107 void
perfmon_source_sample_regs(struct perfmon_source * source,uint64_t * regs,size_t regs_count)108 perfmon_source_sample_regs(struct perfmon_source *source, uint64_t *regs,
109 size_t regs_count)
110 {
111 #if KERNEL
112 perfmon_machine_sample_regs(source->ps_kind, regs, regs_count);
113 #else // KERNEL
114 #pragma unused(source, regs, regs_count)
115 panic("perfmon: sample registers unavailable");
116 #endif // !KERNEL
117 }
118
119 static void
perfmon_spec_init(struct perfmon_spec * spec)120 perfmon_spec_init(struct perfmon_spec *spec)
121 {
122 spec->ps_events = kalloc_data(
123 PERFMON_SPEC_MAX_EVENT_COUNT * sizeof(spec->ps_events[0]),
124 Z_WAITOK | Z_ZERO);
125 spec->ps_attrs = kalloc_data(
126 PERFMON_SPEC_MAX_ATTR_COUNT * sizeof(spec->ps_attrs[0]),
127 Z_WAITOK | Z_ZERO);
128 }
129
130 static void
perfmon_spec_deinit(struct perfmon_spec * spec)131 perfmon_spec_deinit(struct perfmon_spec *spec)
132 {
133 kfree_data(spec->ps_events,
134 PERFMON_SPEC_MAX_EVENT_COUNT * sizeof(spec->ps_events[0]));
135 kfree_data(spec->ps_attrs,
136 PERFMON_SPEC_MAX_ATTR_COUNT * sizeof(spec->ps_attrs[0]));
137 }
138
139 perfmon_config_t
perfmon_config_create(struct perfmon_source * source)140 perfmon_config_create(struct perfmon_source *source)
141 {
142 if (!source->ps_supported) {
143 return NULL;
144 }
145 struct perfmon_config *config = kalloc_type(struct perfmon_config,
146 Z_WAITOK | Z_ZERO);
147 config->pc_counters = kalloc_data(
148 sizeof(config->pc_counters[0]) *
149 source->ps_layout.pl_counter_count, Z_WAITOK | Z_ZERO);
150 perfmon_spec_init(&config->pc_spec);
151 config->pc_source = source;
152 return config;
153 }
154
155 int
perfmon_config_add_event(perfmon_config_t config,const struct perfmon_event * event)156 perfmon_config_add_event(perfmon_config_t config,
157 const struct perfmon_event *event)
158 {
159 if (config->pc_configured) {
160 return EBUSY;
161 }
162 struct perfmon_layout *layout = &config->pc_source->ps_layout;
163 struct perfmon_spec *spec = &config->pc_spec;
164 if (event->pe_counter >= layout->pl_counter_count) {
165 return ERANGE;
166 }
167 unsigned short fixed_end = layout->pl_fixed_offset +
168 layout->pl_fixed_count;
169 if (event->pe_counter >= layout->pl_fixed_offset &&
170 event->pe_counter < fixed_end) {
171 return ENODEV;
172 }
173
174 if (spec->ps_event_count >= PERFMON_SPEC_MAX_EVENT_COUNT) {
175 return ENOSPC;
176 }
177 struct perfmon_counter *counter = &config->pc_counters[event->pe_counter];
178 uint64_t counter_bit = (1ULL << event->pe_counter);
179 if ((config->pc_counters_used & counter_bit) != 0) {
180 return EALREADY;
181 }
182
183 counter->pc_number = event->pe_number;
184 config->pc_counters_used |= counter_bit;
185 spec->ps_events[spec->ps_event_count] = *event;
186 spec->ps_event_count += 1;
187 return 0;
188 }
189
190 static int
perfmon_source_resolve_attr(struct perfmon_source * source,const struct perfmon_attr * attr)191 perfmon_source_resolve_attr(struct perfmon_source *source,
192 const struct perfmon_attr *attr)
193 {
194 unsigned short attr_count = source->ps_layout.pl_attr_count;
195 for (unsigned short i = 0; i < attr_count; i++) {
196 const perfmon_name_t *cur_attr = &source->ps_attribute_names[i];
197 if (strncmp(attr->pa_name, *cur_attr, sizeof(*cur_attr)) == 0) {
198 return i;
199 }
200 }
201 return -1;
202 }
203
204 int
perfmon_config_set_attr(perfmon_config_t config,const struct perfmon_attr * attr)205 perfmon_config_set_attr(perfmon_config_t config,
206 const struct perfmon_attr *attr)
207 {
208 if (config->pc_configured) {
209 return EBUSY;
210 }
211 struct perfmon_spec *spec = &config->pc_spec;
212 if (spec->ps_attr_count >= PERFMON_SPEC_MAX_ATTR_COUNT) {
213 return ENOSPC;
214 }
215 if (!PE_i_can_has_debugger(NULL)) {
216 return EPERM;
217 }
218
219 int attr_id = perfmon_source_resolve_attr(config->pc_source, attr);
220 if (attr_id < 0) {
221 return ENOATTR;
222 }
223
224 uint64_t attr_bit = 1ULL << attr_id;
225 if (config->pc_attrs_used & attr_bit) {
226 return EALREADY;
227 }
228
229 config->pc_attr_ids[spec->ps_attr_count] = (unsigned short)attr_id;
230 config->pc_attrs_used |= attr_bit;
231 spec->ps_attrs[spec->ps_attr_count] = *attr;
232 spec->ps_attr_count += 1;
233 return 0;
234 }
235
236 int
perfmon_configure(perfmon_config_t config)237 perfmon_configure(perfmon_config_t config)
238 {
239 enum perfmon_kind kind = config->pc_source->ps_kind;
240 if (!os_atomic_cmpxchg(&active_configs[kind], NULL, config, acq_rel)) {
241 return EBUSY;
242 }
243 int error = perfmon_machine_configure(config->pc_source->ps_kind,
244 config);
245 config->pc_configured = true;
246 return error;
247 }
248
249 struct perfmon_spec *
perfmon_config_specify(perfmon_config_t config)250 perfmon_config_specify(perfmon_config_t config)
251 {
252 return &config->pc_spec;
253 }
254
255 void
perfmon_config_destroy(perfmon_config_t config)256 perfmon_config_destroy(perfmon_config_t config)
257 {
258 if (config->pc_configured) {
259 enum perfmon_kind kind = config->pc_source->ps_kind;
260 if (!os_atomic_cmpxchg(&active_configs[kind], config, NULL, acq_rel)) {
261 panic("perfmon: destroying config that wasn't active: %p", config);
262 }
263
264 perfmon_machine_reset(config->pc_source->ps_kind);
265 }
266 kfree_data(config->pc_counters, sizeof(config->pc_counters[0]) *
267 config->pc_source->ps_layout.pl_counter_count);
268 perfmon_spec_deinit(&config->pc_spec);
269 kfree_type(struct perfmon_config, config);
270 }
271