1 // Copyright (c) 2020 Apple Inc. All rights reserved.
2 //
3 // @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 //
5 // This file contains Original Code and/or Modifications of Original Code
6 // as defined in and that are subject to the Apple Public Source License
7 // Version 2.0 (the 'License'). You may not use this file except in
8 // compliance with the License. The rights granted to you under the License
9 // may not be used to create, or enable the creation or redistribution of,
10 // unlawful or unlicensed copies of an Apple operating system, or to
11 // circumvent, violate, or enable the circumvention or violation of, any
12 // terms of an Apple operating system software license agreement.
13 //
14 // Please obtain a copy of the License at
15 // http://www.opensource.apple.com/apsl/ and read it before using this file.
16 //
17 // The Original Code and all software distributed under the License are
18 // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 // Please see the License for the specific language governing rights and
23 // limitations under the License.
24 //
25 // @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26
27 #if KERNEL
28 #include <kern/kalloc.h>
29 #include <kern/misc_protos.h>
30 #include <kern/perfmon.h>
31 #include <machine/atomic.h>
32 #include <machine/machine_perfmon.h>
33 #include <pexpert/pexpert.h>
34 #endif // KERNEL
35
36 #include <stdint.h>
37 #include <sys/errno.h>
38 #include <sys/perfmon_private.h>
39 #include <sys/queue.h>
40
41 SECURITY_READ_ONLY_LATE(struct perfmon_source) perfmon_sources[perfmon_kind_max]
42 = { 0 };
43
44 const char *perfmon_names[perfmon_kind_max] = {
45 [perfmon_cpmu] = "core",
46 [perfmon_upmu] = "uncore",
47 };
48
49 _Atomic perfmon_config_t active_configs[perfmon_kind_max] = { NULL };
50
51 #if KERNEL
52
53 const char * _Atomic perfmon_owners[perfmon_kind_max] = { NULL };
54
55 __result_use_check bool
perfmon_acquire(enum perfmon_kind kind,const char * name)56 perfmon_acquire(enum perfmon_kind kind, const char *name)
57 {
58 assert(kind < perfmon_kind_max);
59 #if KPC
60 extern int kpc_get_force_all_ctrs(void);
61 if (kind == perfmon_cpmu && kpc_get_force_all_ctrs()) {
62 return false;
63 }
64 #endif // KPC
65 return os_atomic_cmpxchg(&perfmon_owners[kind], NULL, name, acq_rel);
66 }
67
68 bool
perfmon_in_use(enum perfmon_kind kind)69 perfmon_in_use(enum perfmon_kind kind)
70 {
71 assert(kind < perfmon_kind_max);
72 return os_atomic_load(&perfmon_owners[kind], acquire) != NULL;
73 }
74
75 void
perfmon_release(enum perfmon_kind kind,const char * name)76 perfmon_release(enum perfmon_kind kind, const char *name)
77 {
78 assert(kind < perfmon_kind_max);
79 if (!os_atomic_cmpxchg(&perfmon_owners[kind], name, NULL, acq_rel)) {
80 panic("perfmon: unpaired release: %s on %u", name, kind);
81 }
82 }
83
84 #endif // KERNEL
85
86 struct perfmon_source *
perfmon_source_reserve(enum perfmon_kind kind)87 perfmon_source_reserve(enum perfmon_kind kind)
88 {
89 assert(kind < perfmon_kind_max);
90 struct perfmon_source *source = &perfmon_sources[kind];
91 if (source->ps_supported) {
92 panic("perfmon: reserving source twice: %d", kind);
93 }
94 source->ps_kind = kind;
95 source->ps_name = perfmon_names[kind];
96 source->ps_supported = true;
97 return source;
98 }
99
100 void
perfmon_source_sample_regs(struct perfmon_source * source,uint64_t * regs,size_t regs_count)101 perfmon_source_sample_regs(struct perfmon_source *source, uint64_t *regs,
102 size_t regs_count)
103 {
104 #if KERNEL
105 perfmon_machine_sample_regs(source->ps_kind, regs, regs_count);
106 #else // KERNEL
107 #pragma unused(source, regs, regs_count)
108 panic("perfmon: sample registers unavailable");
109 #endif // !KERNEL
110 }
111
112 static void
perfmon_spec_init(struct perfmon_spec * spec)113 perfmon_spec_init(struct perfmon_spec *spec)
114 {
115 spec->ps_events = kalloc_data(
116 PERFMON_SPEC_MAX_EVENT_COUNT * sizeof(spec->ps_events[0]),
117 Z_WAITOK | Z_ZERO);
118 spec->ps_attrs = kalloc_data(
119 PERFMON_SPEC_MAX_ATTR_COUNT * sizeof(spec->ps_attrs[0]),
120 Z_WAITOK | Z_ZERO);
121 }
122
123 static void
perfmon_spec_deinit(struct perfmon_spec * spec)124 perfmon_spec_deinit(struct perfmon_spec *spec)
125 {
126 kfree_data(spec->ps_events,
127 PERFMON_SPEC_MAX_EVENT_COUNT * sizeof(spec->ps_events[0]));
128 kfree_data(spec->ps_attrs,
129 PERFMON_SPEC_MAX_ATTR_COUNT * sizeof(spec->ps_attrs[0]));
130 }
131
132 perfmon_config_t
perfmon_config_create(struct perfmon_source * source)133 perfmon_config_create(struct perfmon_source *source)
134 {
135 if (!source->ps_supported) {
136 return NULL;
137 }
138 struct perfmon_config *config = kalloc_type(struct perfmon_config,
139 Z_WAITOK | Z_ZERO);
140 config->pc_counters = kalloc_data(
141 sizeof(config->pc_counters[0]) *
142 source->ps_layout.pl_counter_count, Z_WAITOK | Z_ZERO);
143 perfmon_spec_init(&config->pc_spec);
144 config->pc_source = source;
145 return config;
146 }
147
148 int
perfmon_config_add_event(perfmon_config_t config,const struct perfmon_event * event)149 perfmon_config_add_event(perfmon_config_t config,
150 const struct perfmon_event *event)
151 {
152 if (config->pc_configured) {
153 return EBUSY;
154 }
155 struct perfmon_layout *layout = &config->pc_source->ps_layout;
156 struct perfmon_spec *spec = &config->pc_spec;
157 if (event->pe_counter >= layout->pl_counter_count) {
158 return ERANGE;
159 }
160 unsigned short fixed_end = layout->pl_fixed_offset +
161 layout->pl_fixed_count;
162 if (event->pe_counter >= layout->pl_fixed_offset &&
163 event->pe_counter < fixed_end) {
164 return ENODEV;
165 }
166
167 if (spec->ps_event_count >= PERFMON_SPEC_MAX_EVENT_COUNT) {
168 return ENOSPC;
169 }
170 struct perfmon_counter *counter = &config->pc_counters[event->pe_counter];
171 uint64_t counter_bit = (1ULL << event->pe_counter);
172 if ((config->pc_counters_used & counter_bit) != 0) {
173 return EALREADY;
174 }
175
176 counter->pc_number = event->pe_number;
177 config->pc_counters_used |= counter_bit;
178 spec->ps_events[spec->ps_event_count] = *event;
179 spec->ps_event_count += 1;
180 return 0;
181 }
182
183 static int
perfmon_source_resolve_attr(struct perfmon_source * source,const struct perfmon_attr * attr)184 perfmon_source_resolve_attr(struct perfmon_source *source,
185 const struct perfmon_attr *attr)
186 {
187 unsigned short attr_count = source->ps_layout.pl_attr_count;
188 for (unsigned short i = 0; i < attr_count; i++) {
189 const perfmon_name_t *cur_attr = &source->ps_attribute_names[i];
190 if (strncmp(attr->pa_name, *cur_attr, sizeof(*cur_attr)) == 0) {
191 return i;
192 }
193 }
194 return -1;
195 }
196
197 int
perfmon_config_set_attr(perfmon_config_t config,const struct perfmon_attr * attr)198 perfmon_config_set_attr(perfmon_config_t config,
199 const struct perfmon_attr *attr)
200 {
201 if (config->pc_configured) {
202 return EBUSY;
203 }
204 struct perfmon_spec *spec = &config->pc_spec;
205 if (spec->ps_attr_count >= PERFMON_SPEC_MAX_ATTR_COUNT) {
206 return ENOSPC;
207 }
208 if (!PE_i_can_has_debugger(NULL)) {
209 return EPERM;
210 }
211
212 int attr_id = perfmon_source_resolve_attr(config->pc_source, attr);
213 if (attr_id < 0) {
214 return ENOATTR;
215 }
216
217 uint64_t attr_bit = 1ULL << attr_id;
218 if (config->pc_attrs_used & attr_bit) {
219 return EALREADY;
220 }
221
222 config->pc_attr_ids[spec->ps_attr_count] = (unsigned short)attr_id;
223 config->pc_attrs_used |= attr_bit;
224 spec->ps_attrs[spec->ps_attr_count] = *attr;
225 spec->ps_attr_count += 1;
226 return 0;
227 }
228
229 int
perfmon_configure(perfmon_config_t config)230 perfmon_configure(perfmon_config_t config)
231 {
232 enum perfmon_kind kind = config->pc_source->ps_kind;
233 if (!os_atomic_cmpxchg(&active_configs[kind], NULL, config, acq_rel)) {
234 return EBUSY;
235 }
236 int error = perfmon_machine_configure(config->pc_source->ps_kind,
237 config);
238 config->pc_configured = true;
239 return error;
240 }
241
242 struct perfmon_spec *
perfmon_config_specify(perfmon_config_t config)243 perfmon_config_specify(perfmon_config_t config)
244 {
245 return &config->pc_spec;
246 }
247
248 void
perfmon_config_destroy(perfmon_config_t config)249 perfmon_config_destroy(perfmon_config_t config)
250 {
251 if (config->pc_configured) {
252 enum perfmon_kind kind = config->pc_source->ps_kind;
253 if (!os_atomic_cmpxchg(&active_configs[kind], config, NULL, acq_rel)) {
254 panic("perfmon: destroying config that wasn't active: %p", config);
255 }
256
257 perfmon_machine_reset(config->pc_source->ps_kind);
258 }
259 kfree_data(config->pc_counters, sizeof(config->pc_counters[0]) *
260 config->pc_source->ps_layout.pl_counter_count);
261 perfmon_spec_deinit(&config->pc_spec);
262 kfree_type(struct perfmon_config, config);
263 }
264