1 // Copyright (c) 2020 Apple Inc. All rights reserved.
2 //
3 // @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 //
5 // This file contains Original Code and/or Modifications of Original Code
6 // as defined in and that are subject to the Apple Public Source License
7 // Version 2.0 (the 'License'). You may not use this file except in
8 // compliance with the License. The rights granted to you under the License
9 // may not be used to create, or enable the creation or redistribution of,
10 // unlawful or unlicensed copies of an Apple operating system, or to
11 // circumvent, violate, or enable the circumvention or violation of, any
12 // terms of an Apple operating system software license agreement.
13 //
14 // Please obtain a copy of the License at
15 // http://www.opensource.apple.com/apsl/ and read it before using this file.
16 //
17 // The Original Code and all software distributed under the License are
18 // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 // Please see the License for the specific language governing rights and
23 // limitations under the License.
24 //
25 // @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26
27 #include <kern/assert.h>
28 #include <kern/kalloc.h>
29 #include <kern/locks.h>
30 #include <kern/perfmon.h>
31 #include <libkern/copyio.h>
32 #include <machine/machine_routines.h>
33 #include <pexpert/pexpert.h>
34 #include <stdbool.h>
35 #include <sys/param.h> /* NULL */
36 #include <sys/stat.h> /* dev_t */
37 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
38 #include <sys/conf.h> /* must come after sys/stat.h */
39 #include <sys/perfmon_private.h>
40 #include <sys/sysctl.h>
41 #include <sys/sysproto.h>
42 #include <sys/systm.h>
43 #include <sys/types.h>
44
45 static unsigned int perfmon_dev_major_sources[perfmon_kind_max] = { 0 };
46 static const unsigned int PERFMON_DEVICES_MAX = 4;
47
48 LCK_GRP_DECLARE(perfmon_dev_lock_group, "perfmon");
49
50 // perfmon_device corresponds to each open file descriptor for perfmon's
51 // character devices.
52 struct perfmon_device {
53 void *pmdv_copyout_buf;
54 lck_mtx_t pmdv_mutex;
55 perfmon_config_t pmdv_config;
56 bool pmdv_allocated;
57 };
58
59 struct perfmon_device perfmon_devices[perfmon_kind_max][PERFMON_DEVICES_MAX]
60 = { 0 };
61 // perfmon_devices is protected by perfmon_devices_lock. If both a per-device
62 // mutex and the devices lock are taken together, the devices lock should be
63 // taken first.
64 LCK_MTX_DECLARE(perfmon_devices_lock, &perfmon_dev_lock_group);
65
66 static int
perfmon_dev_get_source_index(dev_t dev)67 perfmon_dev_get_source_index(dev_t dev)
68 {
69 int dmaj = major(dev);
70 for (int i = 0; i < perfmon_kind_max; i++) {
71 if (perfmon_dev_major_sources[i] == dmaj) {
72 return i;
73 }
74 }
75 panic("perfmon: no source for major device: 0x%x", dev);
76 }
77
78 static struct perfmon_device *
perfmon_dev_get_device(dev_t dev)79 perfmon_dev_get_device(dev_t dev)
80 {
81 int source_index = perfmon_dev_get_source_index(dev);
82 int dmin = minor(dev);
83 if (dmin >= perfmon_kind_max) {
84 panic("perfmon: invalid minor dev number: 0x%x", dev);
85 }
86
87 return &perfmon_devices[source_index][dmin];
88 }
89
90 static struct perfmon_source *
perfmon_dev_get_source(dev_t dev)91 perfmon_dev_get_source(dev_t dev)
92 {
93 return &perfmon_sources[perfmon_dev_get_source_index(dev)];
94 }
95
96 static size_t
perfmon_device_copyout_size(struct perfmon_source * source)97 perfmon_device_copyout_size(struct perfmon_source *source)
98 {
99 struct perfmon_layout *layout = &source->ps_layout;
100 size_t counters_size = layout->pl_counter_count * layout->pl_unit_count *
101 sizeof(uint64_t);
102 size_t reg_names_size = layout->pl_reg_count * sizeof(perfmon_name_t);
103 size_t reg_values_size = layout->pl_reg_count * layout->pl_unit_count *
104 sizeof(uint64_t);
105 size_t attrs_size = layout->pl_attr_count * sizeof(struct perfmon_attr);
106
107 return MAX(counters_size, MAX(reg_names_size,
108 MAX(attrs_size, reg_values_size)));
109 }
110
111 static int
perfmon_dev_open(dev_t dev,int flags,int __unused devtype,proc_t __unused p)112 perfmon_dev_open(dev_t dev, int flags, int __unused devtype, proc_t __unused p)
113 {
114 lck_mtx_lock(&perfmon_devices_lock);
115 struct perfmon_device *device = perfmon_dev_get_device(dev);
116 struct perfmon_source *source = perfmon_dev_get_source(dev);
117 if (((flags & O_RDWR) == O_RDWR)) {
118 if (!perfmon_acquire(source->ps_kind, "perfmon")) {
119 return ETXTBSY;
120 }
121 }
122 if (device->pmdv_allocated) {
123 return EMFILE;
124 }
125 if (!source->ps_supported) {
126 panic("perfmon: attempt to open unsupported source: 0x%x", dev);
127 }
128 device->pmdv_allocated = true;
129 device->pmdv_copyout_buf = kalloc_data(perfmon_device_copyout_size(source), Z_WAITOK);
130 if ((flags & O_RDWR) == O_RDWR) {
131 device->pmdv_config = perfmon_config_create(source);
132 }
133 lck_mtx_unlock(&perfmon_devices_lock);
134
135 return 0;
136 }
137
138 static int
perfmon_dev_clone(dev_t dev,int action)139 perfmon_dev_clone(dev_t dev, int action)
140 {
141 int minor = 0;
142
143 lck_mtx_lock(&perfmon_devices_lock);
144
145 switch (action) {
146 case DEVFS_CLONE_ALLOC:;
147 int source_index = perfmon_dev_get_source_index(dev);
148 for (unsigned int i = 0; i < PERFMON_DEVICES_MAX; i++) {
149 struct perfmon_device *device = &perfmon_devices[source_index][i];
150 if (!device->pmdv_allocated) {
151 minor = i;
152 break;
153 }
154 }
155 // Returning non-zero from the alloc action hangs devfs, so let the open
156 // handler figure out that EMFILE should be returned.
157 break;
158 case DEVFS_CLONE_FREE:
159 // Nothing to do since a device wasn't allocated until the call to open.
160 break;
161 default:
162 minor = -1;
163 break;
164 }
165
166 lck_mtx_unlock(&perfmon_devices_lock);
167
168 return minor;
169 }
170
171 static int
perfmon_dev_close(dev_t dev,int __unused flags,int __unused devtype,proc_t __unused p)172 perfmon_dev_close(dev_t dev, int __unused flags, int __unused devtype,
173 proc_t __unused p)
174 {
175 lck_mtx_lock(&perfmon_devices_lock);
176
177 struct perfmon_device *device = perfmon_dev_get_device(dev);
178
179 lck_mtx_lock(&device->pmdv_mutex);
180
181 if (!device->pmdv_allocated) {
182 panic("perfmon: no device allocated to close: 0x%x", dev);
183 }
184 device->pmdv_allocated = false;
185 struct perfmon_source *source = perfmon_dev_get_source(dev);
186 kfree_data(device->pmdv_copyout_buf, perfmon_device_copyout_size(source));
187 device->pmdv_copyout_buf = NULL;
188 if (device->pmdv_config) {
189 perfmon_release(source->ps_kind, "perfmon");
190 perfmon_config_destroy(device->pmdv_config);
191 device->pmdv_config = NULL;
192 }
193
194 lck_mtx_unlock(&device->pmdv_mutex);
195 lck_mtx_unlock(&perfmon_devices_lock);
196
197 return 0;
198 }
199
200 static int
perfmon_dev_ioctl(dev_t dev,unsigned long cmd,char * arg,int __unused fflag,proc_t __unused p)201 perfmon_dev_ioctl(dev_t dev, unsigned long cmd, char *arg,
202 int __unused fflag, proc_t __unused p)
203 {
204 struct perfmon_device *device = perfmon_dev_get_device(dev);
205 struct perfmon_source *source = perfmon_dev_get_source(dev);
206 int ret = 0;
207
208 lck_mtx_lock(&device->pmdv_mutex);
209
210 unsigned short reg_count = source->ps_layout.pl_reg_count;
211 unsigned short unit_count = source->ps_layout.pl_unit_count;
212
213 switch (cmd) {
214 case PERFMON_CTL_GET_LAYOUT:;
215 struct perfmon_layout *layout = (void *)arg;
216 *layout = source->ps_layout;
217 ret = 0;
218 break;
219
220 case PERFMON_CTL_LIST_REGS: {
221 user_addr_t uptr = *(user_addr_t *)(void *)arg;
222 size_t names_size = reg_count * sizeof(source->ps_register_names[0]);
223 ret = copyout(source->ps_register_names, uptr, names_size);
224 break;
225 }
226
227 case PERFMON_CTL_SAMPLE_REGS: {
228 user_addr_t uptr = *(user_addr_t *)(void *)arg;
229 uint64_t *sample_buf = device->pmdv_copyout_buf;
230 size_t sample_size = reg_count * unit_count * sizeof(sample_buf[0]);
231 perfmon_source_sample_regs(source, sample_buf, reg_count);
232 ret = copyout(sample_buf, uptr, sample_size);
233 break;
234 }
235
236 case PERFMON_CTL_LIST_ATTRS: {
237 user_addr_t uptr = *(user_addr_t *)(void *)arg;
238 unsigned short attr_count = source->ps_layout.pl_attr_count;
239 const perfmon_name_t *attrs_buf = source->ps_attribute_names;
240 size_t attrs_size = attr_count * sizeof(attrs_buf[0]);
241 ret = copyout(attrs_buf, uptr, attrs_size);
242 break;
243 }
244
245 case PERFMON_CTL_ADD_EVENT:
246 if (device->pmdv_config) {
247 struct perfmon_event *event = (void *)arg;
248 event->pe_name[sizeof(event->pe_name) - 1] = '\0';
249 ret = perfmon_config_add_event(device->pmdv_config, event);
250 } else {
251 ret = EBADF;
252 }
253 break;
254
255 case PERFMON_CTL_SET_ATTR:
256 if (device->pmdv_config) {
257 struct perfmon_attr *attr = (void *)arg;
258 attr->pa_name[sizeof(attr->pa_name) - 1] = '\0';
259 ret = perfmon_config_set_attr(device->pmdv_config, attr);
260 } else {
261 ret = EBADF;
262 }
263 break;
264
265 case PERFMON_CTL_CONFIGURE:
266 if (device->pmdv_config) {
267 ret = perfmon_configure(device->pmdv_config);
268 } else {
269 ret = EBADF;
270 }
271 break;
272
273 case PERFMON_CTL_START:
274 ret = ENOTSUP;
275 break;
276
277 case PERFMON_CTL_STOP:
278 ret = ENOTSUP;
279 break;
280
281 case PERFMON_CTL_SPECIFY:;
282 struct perfmon_config *config = device->pmdv_config;
283 if (config) {
284 struct perfmon_spec *uspec = (void *)arg;
285 struct perfmon_spec *kspec = perfmon_config_specify(config);
286 if (uspec->ps_events) {
287 ret = copyout(kspec->ps_events, (user_addr_t)uspec->ps_events,
288 MIN(uspec->ps_event_count, kspec->ps_event_count));
289 if (0 == ret && uspec->ps_attrs) {
290 ret = copyout(kspec->ps_attrs, (user_addr_t)uspec->ps_attrs,
291 MIN(uspec->ps_attr_count, kspec->ps_attr_count));
292 }
293 }
294 uspec->ps_event_count = kspec->ps_event_count;
295 uspec->ps_attr_count = kspec->ps_event_count;
296 } else {
297 ret = EBADF;
298 }
299 break;
300
301 default:
302 ret = ENOTSUP;
303 break;
304 }
305
306 lck_mtx_unlock(&device->pmdv_mutex);
307
308 return ret;
309 }
310
311 static const struct cdevsw perfmon_cdevsw = {
312 .d_open = perfmon_dev_open, .d_close = perfmon_dev_close,
313 .d_ioctl = perfmon_dev_ioctl,
314
315 .d_read = eno_rdwrt, .d_write = eno_rdwrt, .d_stop = eno_stop,
316 .d_reset = eno_reset, .d_ttys = NULL, .d_select = eno_select,
317 .d_mmap = eno_mmap, .d_strategy = eno_strat, .d_type = 0,
318 };
319
320 int
perfmon_dev_init(void)321 perfmon_dev_init(void)
322 {
323 for (unsigned int i = 0; i < perfmon_kind_max; i++) {
324 struct perfmon_source *source = &perfmon_sources[i];
325 if (!source->ps_supported) {
326 continue;
327 }
328
329 int dmaj = cdevsw_add(-1, &perfmon_cdevsw);
330 if (dmaj < 0) {
331 panic("perfmon: %s: cdevsw_add failed: 0x%x", source->ps_name,
332 dmaj);
333 }
334 perfmon_dev_major_sources[i] = dmaj;
335 void *node = devfs_make_node_clone(makedev(dmaj, 0), DEVFS_CHAR,
336 UID_ROOT, GID_WHEEL, 0666, perfmon_dev_clone, "perfmon_%s",
337 source->ps_name);
338 if (!node) {
339 panic("perfmon: %s: devfs_make_node_clone failed",
340 source->ps_name);
341 }
342
343 for (size_t j = 0; j < PERFMON_DEVICES_MAX; j++) {
344 lck_mtx_init(&perfmon_devices[i][j].pmdv_mutex,
345 &perfmon_dev_lock_group, NULL);
346 }
347 }
348
349 return 0;
350 }
351