xref: /xnu-10002.1.13/bsd/dev/dev_perfmon.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 // Copyright (c) 2020 Apple Inc. All rights reserved.
2 //
3 // @APPLE_OSREFERENCE_LICENSE_HEADER_START@
4 //
5 // This file contains Original Code and/or Modifications of Original Code
6 // as defined in and that are subject to the Apple Public Source License
7 // Version 2.0 (the 'License'). You may not use this file except in
8 // compliance with the License. The rights granted to you under the License
9 // may not be used to create, or enable the creation or redistribution of,
10 // unlawful or unlicensed copies of an Apple operating system, or to
11 // circumvent, violate, or enable the circumvention or violation of, any
12 // terms of an Apple operating system software license agreement.
13 //
14 // Please obtain a copy of the License at
15 // http://www.opensource.apple.com/apsl/ and read it before using this file.
16 //
17 // The Original Code and all software distributed under the License are
18 // distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
19 // EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
20 // INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
21 // FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
22 // Please see the License for the specific language governing rights and
23 // limitations under the License.
24 //
25 // @APPLE_OSREFERENCE_LICENSE_HEADER_END@
26 
27 #include <kern/assert.h>
28 #include <kern/kalloc.h>
29 #include <kern/locks.h>
30 #include <kern/perfmon.h>
31 #include <libkern/copyio.h>
32 #include <machine/machine_routines.h>
33 #include <pexpert/pexpert.h>
34 #include <stdbool.h>
35 #include <sys/param.h> /* NULL */
36 #include <sys/stat.h> /* dev_t */
37 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
38 #include <sys/conf.h> /* must come after sys/stat.h */
39 #include <sys/perfmon_private.h>
40 #include <sys/sysctl.h>
41 #include <sys/sysproto.h>
42 #include <sys/systm.h>
43 #include <sys/types.h>
44 
45 static unsigned int perfmon_dev_major_sources[perfmon_kind_max] = { 0 };
46 static const unsigned int PERFMON_DEVICES_MAX = 4;
47 
48 LCK_GRP_DECLARE(perfmon_dev_lock_group, "perfmon");
49 
50 // perfmon_device corresponds to each open file descriptor for perfmon's
51 // character devices.
52 struct perfmon_device {
53 	void *pmdv_copyout_buf;
54 	lck_mtx_t pmdv_mutex;
55 	perfmon_config_t pmdv_config;
56 	bool pmdv_allocated;
57 };
58 
59 struct perfmon_device perfmon_devices[perfmon_kind_max][PERFMON_DEVICES_MAX]
60         = { 0 };
61 // perfmon_devices is protected by perfmon_devices_lock.  If both a per-device
62 // mutex and the devices lock are taken together, the devices lock should be
63 // taken first.
64 LCK_MTX_DECLARE(perfmon_devices_lock, &perfmon_dev_lock_group);
65 
66 static int
perfmon_dev_get_source_index(dev_t dev)67 perfmon_dev_get_source_index(dev_t dev)
68 {
69 	int dmaj = major(dev);
70 	for (int i = 0; i < perfmon_kind_max; i++) {
71 		if (perfmon_dev_major_sources[i] == dmaj) {
72 			return i;
73 		}
74 	}
75 	panic("perfmon: no source for major device: 0x%x", dev);
76 }
77 
78 static struct perfmon_device *
perfmon_dev_get_device(dev_t dev)79 perfmon_dev_get_device(dev_t dev)
80 {
81 	int source_index = perfmon_dev_get_source_index(dev);
82 	int dmin = minor(dev);
83 	if (dmin >= perfmon_kind_max || dmin < 0) {
84 		return NULL;
85 	}
86 
87 	return &perfmon_devices[source_index][dmin];
88 }
89 
90 static struct perfmon_source *
perfmon_dev_get_source(dev_t dev)91 perfmon_dev_get_source(dev_t dev)
92 {
93 	return &perfmon_sources[perfmon_dev_get_source_index(dev)];
94 }
95 
96 static size_t
perfmon_device_copyout_size(struct perfmon_source * source)97 perfmon_device_copyout_size(struct perfmon_source *source)
98 {
99 	struct perfmon_layout *layout = &source->ps_layout;
100 	size_t counters_size = layout->pl_counter_count * layout->pl_unit_count *
101 	    sizeof(uint64_t);
102 	size_t reg_names_size = layout->pl_reg_count * sizeof(perfmon_name_t);
103 	size_t reg_values_size = layout->pl_reg_count * layout->pl_unit_count *
104 	    sizeof(uint64_t);
105 	size_t attrs_size = layout->pl_attr_count * sizeof(struct perfmon_attr);
106 
107 	return MAX(counters_size, MAX(reg_names_size,
108 	           MAX(attrs_size, reg_values_size)));
109 }
110 
111 static int
perfmon_dev_open(dev_t dev,int flags,int __unused devtype,proc_t __unused p)112 perfmon_dev_open(dev_t dev, int flags, int __unused devtype, proc_t __unused p)
113 {
114 	lck_mtx_lock(&perfmon_devices_lock);
115 	struct perfmon_device *device = perfmon_dev_get_device(dev);
116 	struct perfmon_source *source = perfmon_dev_get_source(dev);
117 	if (!device) {
118 		return ENXIO;
119 	}
120 	if (((flags & O_RDWR) == O_RDWR)) {
121 		if (!perfmon_acquire(source->ps_kind, "perfmon")) {
122 			return ETXTBSY;
123 		}
124 	}
125 	if (device->pmdv_allocated) {
126 		return EMFILE;
127 	}
128 	if (!source->ps_supported) {
129 		panic("perfmon: attempt to open unsupported source: 0x%x", dev);
130 	}
131 	device->pmdv_allocated = true;
132 	device->pmdv_copyout_buf = kalloc_data(perfmon_device_copyout_size(source), Z_WAITOK);
133 	if ((flags & O_RDWR) == O_RDWR) {
134 		device->pmdv_config = perfmon_config_create(source);
135 	}
136 	lck_mtx_unlock(&perfmon_devices_lock);
137 
138 	return 0;
139 }
140 
141 static int
perfmon_dev_clone(dev_t dev,int action)142 perfmon_dev_clone(dev_t dev, int action)
143 {
144 	int minor = 0;
145 
146 	lck_mtx_lock(&perfmon_devices_lock);
147 
148 	switch (action) {
149 	case DEVFS_CLONE_ALLOC:;
150 		int source_index = perfmon_dev_get_source_index(dev);
151 		for (unsigned int i = 0; i < PERFMON_DEVICES_MAX; i++) {
152 			struct perfmon_device *device = &perfmon_devices[source_index][i];
153 			if (!device->pmdv_allocated) {
154 				minor = i;
155 				break;
156 			}
157 		}
158 		// Returning non-zero from the alloc action hangs devfs, so let the open
159 		// handler figure out that EMFILE should be returned.
160 		break;
161 	case DEVFS_CLONE_FREE:
162 		// Nothing to do since a device wasn't allocated until the call to open.
163 		break;
164 	default:
165 		minor = -1;
166 		break;
167 	}
168 
169 	lck_mtx_unlock(&perfmon_devices_lock);
170 
171 	return minor;
172 }
173 
174 static int
perfmon_dev_close(dev_t dev,int __unused flags,int __unused devtype,proc_t __unused p)175 perfmon_dev_close(dev_t dev, int __unused flags, int __unused devtype,
176     proc_t __unused p)
177 {
178 	lck_mtx_lock(&perfmon_devices_lock);
179 
180 	struct perfmon_device *device = perfmon_dev_get_device(dev);
181 
182 	lck_mtx_lock(&device->pmdv_mutex);
183 
184 	if (!device->pmdv_allocated) {
185 		panic("perfmon: no device allocated to close: 0x%x", dev);
186 	}
187 	device->pmdv_allocated = false;
188 	struct perfmon_source *source = perfmon_dev_get_source(dev);
189 	kfree_data(device->pmdv_copyout_buf, perfmon_device_copyout_size(source));
190 	device->pmdv_copyout_buf = NULL;
191 	if (device->pmdv_config) {
192 		perfmon_release(source->ps_kind, "perfmon");
193 		perfmon_config_destroy(device->pmdv_config);
194 		device->pmdv_config = NULL;
195 	}
196 
197 	lck_mtx_unlock(&device->pmdv_mutex);
198 	lck_mtx_unlock(&perfmon_devices_lock);
199 
200 	return 0;
201 }
202 
203 static int
perfmon_dev_ioctl(dev_t dev,unsigned long cmd,char * arg,int __unused fflag,proc_t __unused p)204 perfmon_dev_ioctl(dev_t dev, unsigned long cmd, char *arg,
205     int __unused fflag, proc_t __unused p)
206 {
207 	struct perfmon_device *device = perfmon_dev_get_device(dev);
208 	struct perfmon_source *source = perfmon_dev_get_source(dev);
209 	int ret = 0;
210 
211 	lck_mtx_lock(&device->pmdv_mutex);
212 
213 	unsigned short reg_count = source->ps_layout.pl_reg_count;
214 	unsigned short unit_count = source->ps_layout.pl_unit_count;
215 
216 	switch (cmd) {
217 	case PERFMON_CTL_GET_LAYOUT:;
218 		struct perfmon_layout *layout = (void *)arg;
219 		*layout = source->ps_layout;
220 		ret = 0;
221 		break;
222 
223 	case PERFMON_CTL_LIST_REGS: {
224 		user_addr_t uptr = *(user_addr_t *)(void *)arg;
225 		size_t names_size = reg_count * sizeof(source->ps_register_names[0]);
226 		ret = copyout(source->ps_register_names, uptr, names_size);
227 		break;
228 	}
229 
230 	case PERFMON_CTL_SAMPLE_REGS: {
231 		user_addr_t uptr = *(user_addr_t *)(void *)arg;
232 		uint64_t *sample_buf = device->pmdv_copyout_buf;
233 		size_t sample_size = reg_count * unit_count * sizeof(sample_buf[0]);
234 		perfmon_source_sample_regs(source, sample_buf, reg_count);
235 		ret = copyout(sample_buf, uptr, sample_size);
236 		break;
237 	}
238 
239 	case PERFMON_CTL_LIST_ATTRS: {
240 		user_addr_t uptr = *(user_addr_t *)(void *)arg;
241 		unsigned short attr_count = source->ps_layout.pl_attr_count;
242 		const perfmon_name_t *attrs_buf = source->ps_attribute_names;
243 		size_t attrs_size = attr_count * sizeof(attrs_buf[0]);
244 		ret = copyout(attrs_buf, uptr, attrs_size);
245 		break;
246 	}
247 
248 	case PERFMON_CTL_ADD_EVENT:
249 		if (device->pmdv_config) {
250 			struct perfmon_event *event = (void *)arg;
251 			event->pe_name[sizeof(event->pe_name) - 1] = '\0';
252 			ret = perfmon_config_add_event(device->pmdv_config, event);
253 		} else {
254 			ret = EBADF;
255 		}
256 		break;
257 
258 	case PERFMON_CTL_SET_ATTR:
259 		if (device->pmdv_config) {
260 			struct perfmon_attr *attr = (void *)arg;
261 			attr->pa_name[sizeof(attr->pa_name) - 1] = '\0';
262 			ret = perfmon_config_set_attr(device->pmdv_config, attr);
263 		} else {
264 			ret = EBADF;
265 		}
266 		break;
267 
268 	case PERFMON_CTL_CONFIGURE:
269 		if (device->pmdv_config) {
270 			ret = perfmon_configure(device->pmdv_config);
271 		} else {
272 			ret = EBADF;
273 		}
274 		break;
275 
276 	case PERFMON_CTL_START:
277 		ret = ENOTSUP;
278 		break;
279 
280 	case PERFMON_CTL_STOP:
281 		ret = ENOTSUP;
282 		break;
283 
284 	case PERFMON_CTL_SPECIFY:;
285 		struct perfmon_config *config = device->pmdv_config;
286 		if (config) {
287 			struct perfmon_spec *uspec = (void *)arg;
288 			struct perfmon_spec *kspec = perfmon_config_specify(config);
289 			if (uspec->ps_events) {
290 				ret = copyout(kspec->ps_events, (user_addr_t)uspec->ps_events,
291 				    MIN(uspec->ps_event_count, kspec->ps_event_count));
292 				if (0 == ret && uspec->ps_attrs) {
293 					ret = copyout(kspec->ps_attrs, (user_addr_t)uspec->ps_attrs,
294 					    MIN(uspec->ps_attr_count, kspec->ps_attr_count));
295 				}
296 			}
297 			uspec->ps_event_count = kspec->ps_event_count;
298 			uspec->ps_attr_count = kspec->ps_event_count;
299 		} else {
300 			ret = EBADF;
301 		}
302 		break;
303 
304 	default:
305 		ret = ENOTSUP;
306 		break;
307 	}
308 
309 	lck_mtx_unlock(&device->pmdv_mutex);
310 
311 	return ret;
312 }
313 
314 static const struct cdevsw perfmon_cdevsw = {
315 	.d_open = perfmon_dev_open, .d_close = perfmon_dev_close,
316 	.d_ioctl = perfmon_dev_ioctl,
317 
318 	.d_read = eno_rdwrt, .d_write = eno_rdwrt, .d_stop = eno_stop,
319 	.d_reset = eno_reset, .d_ttys = NULL, .d_select = eno_select,
320 	.d_mmap = eno_mmap, .d_strategy = eno_strat, .d_type = 0,
321 };
322 
323 int
perfmon_dev_init(void)324 perfmon_dev_init(void)
325 {
326 	for (unsigned int i = 0; i < perfmon_kind_max; i++) {
327 		struct perfmon_source *source = &perfmon_sources[i];
328 		if (!source->ps_supported) {
329 			continue;
330 		}
331 
332 		int dmaj = cdevsw_add(-1, &perfmon_cdevsw);
333 		if (dmaj < 0) {
334 			panic("perfmon: %s: cdevsw_add failed: 0x%x", source->ps_name,
335 			    dmaj);
336 		}
337 		perfmon_dev_major_sources[i] = dmaj;
338 		void *node = devfs_make_node_clone(makedev(dmaj, 0), DEVFS_CHAR,
339 		    UID_ROOT, GID_WHEEL, 0666, perfmon_dev_clone, "perfmon_%s",
340 		    source->ps_name);
341 		if (!node) {
342 			panic("perfmon: %s: devfs_make_node_clone failed",
343 			    source->ps_name);
344 		}
345 
346 		for (size_t j = 0; j < PERFMON_DEVICES_MAX; j++) {
347 			lck_mtx_init(&perfmon_devices[i][j].pmdv_mutex,
348 			    &perfmon_dev_lock_group, NULL);
349 		}
350 	}
351 
352 	return 0;
353 }
354