xref: /xnu-8019.80.24/bsd/dev/dtrace/profile_prvd.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <kern/cpu_data.h>
27 #include <kern/thread.h>
28 #include <kern/assert.h>
29 #include <mach/thread_status.h>
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/errno.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <sys/conf.h>
37 #include <sys/fcntl.h>
38 #include <miscfs/devfs/devfs.h>
39 
40 #include <sys/dtrace.h>
41 #include <sys/dtrace_impl.h>
42 
43 #include <sys/dtrace_glue.h>
44 
45 #include <machine/pal_routines.h>
46 
47 #if defined(__x86_64__)
48 extern x86_saved_state_t *find_kern_regs(thread_t);
49 #elif defined (__arm__) || defined(__arm64__)
50 extern struct arm_saved_state *find_kern_regs(thread_t);
51 #else
52 #error Unknown architecture
53 #endif
54 
55 #undef ASSERT
56 #define ASSERT(x) do {} while(0)
57 
58 extern void profile_init(void);
59 
60 static dtrace_provider_id_t profile_id;
61 
62 /*
63  * Regardless of platform, the stack frames look like this in the case of the
64  * profile provider:
65  *
66  *	profile_fire
67  *	cyclic_expire
68  *	cyclic_fire
69  *	[ cbe ]
70  *	[ interrupt code ]
71  *
72  * On x86, there are five frames from the generic interrupt code; further, the
73  * interrupted instruction appears as its own stack frame, giving us a total of
74  * 10.
75  *
76  * On SPARC, the picture is further complicated because the compiler
77  * optimizes away tail-calls -- so the following frames are optimized away:
78  *
79  *      profile_fire
80  *	cyclic_expire
81  *
82  * This gives three frames.  However, on DEBUG kernels, the cyclic_expire
83  * frame cannot be tail-call eliminated, yielding four frames in this case.
84  *
85  * All of the above constraints lead to the mess below.  Yes, the profile
86  * provider should ideally figure this out on-the-fly by hitting one of its own
87  * probes and then walking its own stack trace.  This is complicated, however,
88  * and the static definition doesn't seem to be overly brittle.  Still, we
89  * allow for a manual override in case we get it completely wrong.
90  */
91 
92 #if defined(__x86_64__)
93 #define PROF_ARTIFICIAL_FRAMES  9
94 #elif defined(__arm__) || defined(__arm64__)
95 #define PROF_ARTIFICIAL_FRAMES 8
96 #else
97 #error Unknown architecture
98 #endif
99 
100 #define PROF_NAMELEN            15
101 
102 #define PROF_PROFILE            0
103 #define PROF_TICK               1
104 #define PROF_PREFIX_PROFILE     "profile-"
105 #define PROF_PREFIX_TICK        "tick-"
106 
107 typedef struct profile_probe {
108 	char            prof_name[PROF_NAMELEN];
109 	dtrace_id_t     prof_id;
110 	int             prof_kind;
111 	hrtime_t        prof_interval;
112 	cyclic_id_t     prof_cyclic;
113 } profile_probe_t;
114 
115 typedef struct profile_probe_percpu {
116 	hrtime_t        profc_expected;
117 	hrtime_t        profc_interval;
118 	profile_probe_t *profc_probe;
119 } profile_probe_percpu_t;
120 
121 hrtime_t        profile_interval_min = NANOSEC / 5000;          /* 5000 hz */
122 int             profile_aframes = 0;                            /* override */
123 
124 static int profile_rates[] = {
125 	97, 199, 499, 997, 1999,
126 	4001, 4999, 0, 0, 0,
127 	0, 0, 0, 0, 0,
128 	0, 0, 0, 0, 0
129 };
130 
131 static int profile_ticks[] = {
132 	1, 10, 100, 500, 1000,
133 	5000, 0, 0, 0, 0,
134 	0, 0, 0, 0, 0
135 };
136 
137 /*
138  * profile_max defines the upper bound on the number of profile probes that
139  * can exist (this is to prevent malicious or clumsy users from exhausing
140  * system resources by creating a slew of profile probes). At mod load time,
141  * this gets its value from PROFILE_MAX_DEFAULT or profile-max-probes if it's
142  * present in the profile.conf file.
143  */
144 #define PROFILE_MAX_DEFAULT     1000    /* default max. number of probes */
145 static uint32_t profile_max;            /* maximum number of profile probes */
146 static uint32_t profile_total;  /* current number of profile probes */
147 
148 static void
profile_fire(void * arg)149 profile_fire(void *arg)
150 {
151 	profile_probe_percpu_t *pcpu = arg;
152 	profile_probe_t *prof = pcpu->profc_probe;
153 	hrtime_t late;
154 
155 	late = dtrace_gethrtime() - pcpu->profc_expected;
156 	pcpu->profc_expected += pcpu->profc_interval;
157 
158 #if defined(__x86_64__)
159 	x86_saved_state_t *kern_regs = find_kern_regs(current_thread());
160 
161 	if (NULL != kern_regs) {
162 		/* Kernel was interrupted. */
163 		dtrace_probe(prof->prof_id, saved_state64(kern_regs)->isf.rip, 0x0, late, 0, 0);
164 	} else {
165 		pal_register_cache_state(current_thread(), VALID);
166 		/* Possibly a user interrupt */
167 		x86_saved_state_t   *tagged_regs = (x86_saved_state_t *)find_user_regs(current_thread());
168 
169 		if (NULL == tagged_regs) {
170 			/* Too bad, so sad, no useful interrupt state. */
171 			dtrace_probe(prof->prof_id, 0xcafebabe,
172 			    0x0, late, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
173 		} else if (is_saved_state64(tagged_regs)) {
174 			x86_saved_state64_t *regs = saved_state64(tagged_regs);
175 
176 			dtrace_probe(prof->prof_id, 0x0, regs->isf.rip, late, 0, 0);
177 		} else {
178 			x86_saved_state32_t *regs = saved_state32(tagged_regs);
179 
180 			dtrace_probe(prof->prof_id, 0x0, regs->eip, late, 0, 0);
181 		}
182 	}
183 #elif defined(__arm__)
184 	{
185 		arm_saved_state_t *arm_kern_regs = (arm_saved_state_t *) find_kern_regs(current_thread());
186 
187 		// We should only come in here from interrupt context, so we should always have valid kernel regs
188 		assert(NULL != arm_kern_regs);
189 
190 		if (arm_kern_regs->cpsr & 0xF) {
191 			/* Kernel was interrupted. */
192 			dtrace_probe(prof->prof_id, arm_kern_regs->pc, 0x0, late, 0, 0);
193 		} else {
194 			/* Possibly a user interrupt */
195 			arm_saved_state_t   *arm_user_regs = (arm_saved_state_t *)find_user_regs(current_thread());
196 
197 			if (NULL == arm_user_regs) {
198 				/* Too bad, so sad, no useful interrupt state. */
199 				dtrace_probe(prof->prof_id, 0xcafebabe, 0x0, late, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
200 			} else {
201 				dtrace_probe(prof->prof_id, 0x0, arm_user_regs->pc, late, 0, 0);
202 			}
203 		}
204 	}
205 #elif defined(__arm64__)
206 	{
207 		arm_saved_state_t *arm_kern_regs = (arm_saved_state_t *) find_kern_regs(current_thread());
208 
209 		// We should only come in here from interrupt context, so we should always have valid kernel regs
210 		assert(NULL != arm_kern_regs);
211 
212 		if (saved_state64(arm_kern_regs)->cpsr & 0xF) {
213 			/* Kernel was interrupted. */
214 			dtrace_probe(prof->prof_id, saved_state64(arm_kern_regs)->pc, 0x0, late, 0, 0);
215 		} else {
216 			/* Possibly a user interrupt */
217 			arm_saved_state_t   *arm_user_regs = (arm_saved_state_t *)find_user_regs(current_thread());
218 
219 			if (NULL == arm_user_regs) {
220 				/* Too bad, so sad, no useful interrupt state. */
221 				dtrace_probe(prof->prof_id, 0xcafebabe, 0x0, late, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
222 			} else {
223 				dtrace_probe(prof->prof_id, 0x0, get_saved_state_pc(arm_user_regs), late, 0, 0);
224 			}
225 		}
226 	}
227 #else
228 #error Unknown architecture
229 #endif
230 }
231 
232 static void
profile_tick(void * arg)233 profile_tick(void *arg)
234 {
235 	profile_probe_t *prof = arg;
236 
237 #if defined(__x86_64__)
238 	x86_saved_state_t *kern_regs = find_kern_regs(current_thread());
239 
240 	if (NULL != kern_regs) {
241 		/* Kernel was interrupted. */
242 		dtrace_probe(prof->prof_id, saved_state64(kern_regs)->isf.rip, 0x0, 0, 0, 0);
243 	} else {
244 		pal_register_cache_state(current_thread(), VALID);
245 		/* Possibly a user interrupt */
246 		x86_saved_state_t   *tagged_regs = (x86_saved_state_t *)find_user_regs(current_thread());
247 
248 		if (NULL == tagged_regs) {
249 			/* Too bad, so sad, no useful interrupt state. */
250 			dtrace_probe(prof->prof_id, 0xcafebabe,
251 			    0x0, 0, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
252 		} else if (is_saved_state64(tagged_regs)) {
253 			x86_saved_state64_t *regs = saved_state64(tagged_regs);
254 
255 			dtrace_probe(prof->prof_id, 0x0, regs->isf.rip, 0, 0, 0);
256 		} else {
257 			x86_saved_state32_t *regs = saved_state32(tagged_regs);
258 
259 			dtrace_probe(prof->prof_id, 0x0, regs->eip, 0, 0, 0);
260 		}
261 	}
262 #elif defined(__arm__)
263 	{
264 		arm_saved_state_t *arm_kern_regs = (arm_saved_state_t *) find_kern_regs(current_thread());
265 
266 		if (NULL != arm_kern_regs) {
267 			/* Kernel was interrupted. */
268 			dtrace_probe(prof->prof_id, arm_kern_regs->pc, 0x0, 0, 0, 0);
269 		} else {
270 			/* Possibly a user interrupt */
271 			arm_saved_state_t   *arm_user_regs = (arm_saved_state_t *)find_user_regs(current_thread());
272 
273 			if (NULL == arm_user_regs) {
274 				/* Too bad, so sad, no useful interrupt state. */
275 				dtrace_probe(prof->prof_id, 0xcafebabe, 0x0, 0, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
276 			} else {
277 				dtrace_probe(prof->prof_id, 0x0, arm_user_regs->pc, 0, 0, 0);
278 			}
279 		}
280 	}
281 #elif defined(__arm64__)
282 	{
283 		arm_saved_state_t *arm_kern_regs = (arm_saved_state_t *) find_kern_regs(current_thread());
284 
285 		if (NULL != arm_kern_regs) {
286 			/* Kernel was interrupted. */
287 			dtrace_probe(prof->prof_id, saved_state64(arm_kern_regs)->pc, 0x0, 0, 0, 0);
288 		} else {
289 			/* Possibly a user interrupt */
290 			arm_saved_state_t   *arm_user_regs = (arm_saved_state_t *)find_user_regs(current_thread());
291 
292 			if (NULL == arm_user_regs) {
293 				/* Too bad, so sad, no useful interrupt state. */
294 				dtrace_probe(prof->prof_id, 0xcafebabe, 0x0, 0, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
295 			} else {
296 				dtrace_probe(prof->prof_id, 0x0, get_saved_state_pc(arm_user_regs), 0, 0, 0);
297 			}
298 		}
299 	}
300 
301 #else
302 #error Unknown architecture
303 #endif
304 }
305 
306 static void
profile_create(hrtime_t interval,const char * name,int kind)307 profile_create(hrtime_t interval, const char *name, int kind)
308 {
309 	profile_probe_t *prof;
310 
311 	if (interval < profile_interval_min) {
312 		return;
313 	}
314 
315 	if (dtrace_probe_lookup(profile_id, NULL, NULL, name) != 0) {
316 		return;
317 	}
318 
319 	os_atomic_inc(&profile_total, relaxed);
320 	if (profile_total > profile_max) {
321 		os_atomic_dec(&profile_total, relaxed);
322 		return;
323 	}
324 
325 	if (PROF_TICK == kind) {
326 		prof = kmem_zalloc(sizeof(profile_probe_t), KM_SLEEP);
327 	} else {
328 		prof = kmem_zalloc(sizeof(profile_probe_t) + NCPU * sizeof(profile_probe_percpu_t), KM_SLEEP);
329 	}
330 
331 	(void) strlcpy(prof->prof_name, name, sizeof(prof->prof_name));
332 	prof->prof_interval = interval;
333 	prof->prof_cyclic = CYCLIC_NONE;
334 	prof->prof_kind = kind;
335 	prof->prof_id = dtrace_probe_create(profile_id,
336 	    NULL, NULL, name,
337 	    profile_aframes ? profile_aframes : PROF_ARTIFICIAL_FRAMES, prof);
338 }
339 
340 /*ARGSUSED*/
341 static void
profile_provide(void * arg,const dtrace_probedesc_t * desc)342 profile_provide(void *arg, const dtrace_probedesc_t *desc)
343 {
344 #pragma unused(arg) /* __APPLE__ */
345 	int i, j, rate, kind;
346 	hrtime_t val = 0, mult = 1, len;
347 	const char *name, *suffix = NULL;
348 
349 	const struct {
350 		const char *prefix;
351 		int kind;
352 	} types[] = {
353 		{ PROF_PREFIX_PROFILE, PROF_PROFILE },
354 		{ PROF_PREFIX_TICK, PROF_TICK },
355 		{ NULL, 0 }
356 	};
357 
358 	const struct {
359 		const char *name;
360 		hrtime_t mult;
361 	} suffixes[] = {
362 		{ "ns", NANOSEC / NANOSEC },
363 		{ "nsec", NANOSEC / NANOSEC },
364 		{ "us", NANOSEC / MICROSEC },
365 		{ "usec", NANOSEC / MICROSEC },
366 		{ "ms", NANOSEC / MILLISEC },
367 		{ "msec", NANOSEC / MILLISEC },
368 		{ "s", NANOSEC / SEC },
369 		{ "sec", NANOSEC / SEC },
370 		{ "m", NANOSEC * (hrtime_t)60 },
371 		{ "min", NANOSEC * (hrtime_t)60 },
372 		{ "h", NANOSEC * (hrtime_t)(60 * 60) },
373 		{ "hour", NANOSEC * (hrtime_t)(60 * 60) },
374 		{ "d", NANOSEC * (hrtime_t)(24 * 60 * 60) },
375 		{ "day", NANOSEC * (hrtime_t)(24 * 60 * 60) },
376 		{ "hz", 0 },
377 		{ NULL, 0 }
378 	};
379 
380 	if (desc == NULL) {
381 		char n[PROF_NAMELEN];
382 
383 		/*
384 		 * If no description was provided, provide all of our probes.
385 		 */
386 		for (i = 0; i < (int)(sizeof(profile_rates) / sizeof(int)); i++) {
387 			if ((rate = profile_rates[i]) == 0) {
388 				continue;
389 			}
390 
391 			(void) snprintf(n, PROF_NAMELEN, "%s%d",
392 			    PROF_PREFIX_PROFILE, rate);
393 			profile_create(NANOSEC / rate, n, PROF_PROFILE);
394 		}
395 
396 		for (i = 0; i < (int)(sizeof(profile_ticks) / sizeof(int)); i++) {
397 			if ((rate = profile_ticks[i]) == 0) {
398 				continue;
399 			}
400 
401 			(void) snprintf(n, PROF_NAMELEN, "%s%d",
402 			    PROF_PREFIX_TICK, rate);
403 			profile_create(NANOSEC / rate, n, PROF_TICK);
404 		}
405 
406 		return;
407 	}
408 
409 	name = desc->dtpd_name;
410 
411 	for (i = 0; types[i].prefix != NULL; i++) {
412 		len = strlen(types[i].prefix);
413 
414 		if (strncmp(name, types[i].prefix, len) != 0) {
415 			continue;
416 		}
417 		break;
418 	}
419 
420 	if (types[i].prefix == NULL) {
421 		return;
422 	}
423 
424 	kind = types[i].kind;
425 	j = strlen(name) - len;
426 
427 	/*
428 	 * We need to start before any time suffix.
429 	 */
430 	for (j = strlen(name); j >= len; j--) {
431 		if (name[j] >= '0' && name[j] <= '9') {
432 			break;
433 		}
434 		suffix = &name[j];
435 	}
436 
437 	ASSERT(suffix != NULL);
438 
439 	/*
440 	 * Now determine the numerical value present in the probe name.
441 	 */
442 	for (; j >= len; j--) {
443 		if (name[j] < '0' || name[j] > '9') {
444 			return;
445 		}
446 
447 		val += (name[j] - '0') * mult;
448 		mult *= (hrtime_t)10;
449 	}
450 
451 	if (val == 0) {
452 		return;
453 	}
454 
455 	/*
456 	 * Look-up the suffix to determine the multiplier.
457 	 */
458 	for (i = 0, mult = 0; suffixes[i].name != NULL; i++) {
459 		/* APPLE NOTE: Darwin employs size bounded string operations */
460 		if (strncasecmp(suffixes[i].name, suffix, strlen(suffixes[i].name) + 1) == 0) {
461 			mult = suffixes[i].mult;
462 			break;
463 		}
464 	}
465 
466 	if (suffixes[i].name == NULL && *suffix != '\0') {
467 		return;
468 	}
469 
470 	if (mult == 0) {
471 		/*
472 		 * The default is frequency-per-second.
473 		 */
474 		val = NANOSEC / val;
475 	} else {
476 		val *= mult;
477 	}
478 
479 	profile_create(val, name, kind);
480 }
481 
482 /*ARGSUSED*/
483 static void
profile_destroy(void * arg,dtrace_id_t id,void * parg)484 profile_destroy(void *arg, dtrace_id_t id, void *parg)
485 {
486 #pragma unused(arg,id) /* __APPLE__ */
487 	profile_probe_t *prof = parg;
488 
489 	ASSERT(prof->prof_cyclic == CYCLIC_NONE);
490 
491 	if (prof->prof_kind == PROF_TICK) {
492 		kmem_free(prof, sizeof(profile_probe_t));
493 	} else {
494 		kmem_free(prof, sizeof(profile_probe_t) + NCPU * sizeof(profile_probe_percpu_t));
495 	}
496 
497 	ASSERT(profile_total >= 1);
498 	os_atomic_dec(&profile_total, relaxed);
499 }
500 
501 /*ARGSUSED*/
502 static void
profile_online(void * arg,dtrace_cpu_t * cpu,cyc_handler_t * hdlr,cyc_time_t * when)503 profile_online(void *arg, dtrace_cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
504 {
505 #pragma unused(cpu) /* __APPLE__ */
506 	profile_probe_t *prof = arg;
507 	profile_probe_percpu_t *pcpu;
508 
509 	pcpu = ((profile_probe_percpu_t *)(&(prof[1]))) + cpu_number();
510 	pcpu->profc_probe = prof;
511 
512 	hdlr->cyh_func = profile_fire;
513 	hdlr->cyh_arg = pcpu;
514 	hdlr->cyh_level = CY_HIGH_LEVEL;
515 
516 	when->cyt_interval = prof->prof_interval;
517 	when->cyt_when = dtrace_gethrtime() + when->cyt_interval;
518 
519 	pcpu->profc_expected = when->cyt_when;
520 	pcpu->profc_interval = when->cyt_interval;
521 }
522 
523 /*ARGSUSED*/
524 static void
profile_offline(void * arg,dtrace_cpu_t * cpu,void * oarg)525 profile_offline(void *arg, dtrace_cpu_t *cpu, void *oarg)
526 {
527 	profile_probe_percpu_t *pcpu = oarg;
528 
529 	ASSERT(pcpu->profc_probe == arg);
530 #pragma unused(pcpu,arg,cpu) /* __APPLE__ */
531 }
532 
533 /*ARGSUSED*/
534 static int
profile_enable(void * arg,dtrace_id_t id,void * parg)535 profile_enable(void *arg, dtrace_id_t id, void *parg)
536 {
537 #pragma unused(arg,id) /* __APPLE__ */
538 	profile_probe_t *prof = parg;
539 	cyc_omni_handler_t omni;
540 	cyc_handler_t hdlr;
541 	cyc_time_t when;
542 
543 	ASSERT(prof->prof_interval != 0);
544 	ASSERT(MUTEX_HELD(&cpu_lock));
545 
546 	if (prof->prof_kind == PROF_TICK) {
547 		hdlr.cyh_func = profile_tick;
548 		hdlr.cyh_arg = prof;
549 		hdlr.cyh_level = CY_HIGH_LEVEL;
550 
551 		when.cyt_interval = prof->prof_interval;
552 #if !defined(__APPLE__)
553 		when.cyt_when = dtrace_gethrtime() + when.cyt_interval;
554 #else
555 		when.cyt_when = 0;
556 #endif /* __APPLE__ */
557 	} else {
558 		ASSERT(prof->prof_kind == PROF_PROFILE);
559 		omni.cyo_online = profile_online;
560 		omni.cyo_offline = profile_offline;
561 		omni.cyo_arg = prof;
562 	}
563 
564 	if (prof->prof_kind == PROF_TICK) {
565 		prof->prof_cyclic = cyclic_timer_add(&hdlr, &when);
566 	} else {
567 		prof->prof_cyclic = (cyclic_id_t)cyclic_add_omni(&omni); /* cast puns cyclic_id_list_t with cyclic_id_t */
568 	}
569 
570 	return 0;
571 }
572 
573 /*ARGSUSED*/
574 static void
profile_disable(void * arg,dtrace_id_t id,void * parg)575 profile_disable(void *arg, dtrace_id_t id, void *parg)
576 {
577 	profile_probe_t *prof = parg;
578 
579 	ASSERT(prof->prof_cyclic != CYCLIC_NONE);
580 	ASSERT(MUTEX_HELD(&cpu_lock));
581 
582 #pragma unused(arg,id)
583 	if (prof->prof_kind == PROF_TICK) {
584 		cyclic_timer_remove(prof->prof_cyclic);
585 	} else {
586 		cyclic_remove_omni((cyclic_id_list_t)prof->prof_cyclic); /* cast puns cyclic_id_list_t with cyclic_id_t */
587 	}
588 	prof->prof_cyclic = CYCLIC_NONE;
589 }
590 
591 static uint64_t
profile_getarg(void * arg,dtrace_id_t id,void * parg,int argno,int aframes)592 profile_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes)
593 {
594 #pragma unused(arg, id, parg, argno, aframes)
595 	/*
596 	 * All the required arguments for the profile probe are passed directly
597 	 * to dtrace_probe, and we do not go through dtrace_getarg which doesn't
598 	 * know how to hop to the kernel stack from the interrupt stack like
599 	 * dtrace_getpcstack
600 	 */
601 	return 0;
602 }
603 
604 static void
profile_getargdesc(void * arg,dtrace_id_t id,void * parg,dtrace_argdesc_t * desc)605 profile_getargdesc(void *arg, dtrace_id_t id, void *parg, dtrace_argdesc_t *desc)
606 {
607 #pragma unused(arg, id)
608 	profile_probe_t *prof = parg;
609 	const char *argdesc = NULL;
610 	switch (desc->dtargd_ndx) {
611 	case 0:
612 		argdesc = "void*";
613 		break;
614 	case 1:
615 		argdesc = "user_addr_t";
616 		break;
617 	case 2:
618 		if (prof->prof_kind == PROF_PROFILE) {
619 			argdesc = "hrtime_t";
620 		}
621 		break;
622 	}
623 	if (argdesc) {
624 		strlcpy(desc->dtargd_native, argdesc, DTRACE_ARGTYPELEN);
625 	} else {
626 		desc->dtargd_ndx = DTRACE_ARGNONE;
627 	}
628 }
629 
630 /*
631  * APPLE NOTE:  profile_usermode call not supported.
632  */
633 static int
profile_usermode(void * arg,dtrace_id_t id,void * parg)634 profile_usermode(void *arg, dtrace_id_t id, void *parg)
635 {
636 #pragma unused(arg,id,parg)
637 	return 1; /* XXX_BOGUS */
638 }
639 
640 static dtrace_pattr_t profile_attr = {
641 	{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
642 	{ DTRACE_STABILITY_UNSTABLE, DTRACE_STABILITY_UNSTABLE, DTRACE_CLASS_UNKNOWN },
643 	{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
644 	{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
645 	{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
646 };
647 
648 static dtrace_pops_t profile_pops = {
649 	.dtps_provide =         profile_provide,
650 	.dtps_provide_module =  NULL,
651 	.dtps_enable =          profile_enable,
652 	.dtps_disable =         profile_disable,
653 	.dtps_suspend =         NULL,
654 	.dtps_resume =          NULL,
655 	.dtps_getargdesc =      profile_getargdesc,
656 	.dtps_getargval =       profile_getarg,
657 	.dtps_usermode =        profile_usermode,
658 	.dtps_destroy =         profile_destroy
659 };
660 
661 static int
profile_attach(dev_info_t * devi)662 profile_attach(dev_info_t *devi)
663 {
664 	if (ddi_create_minor_node(devi, "profile", S_IFCHR, 0,
665 	    DDI_PSEUDO, 0) == DDI_FAILURE ||
666 	    dtrace_register("profile", &profile_attr,
667 	    DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER, NULL,
668 	    &profile_pops, NULL, &profile_id) != 0) {
669 		ddi_remove_minor_node(devi, NULL);
670 		return DDI_FAILURE;
671 	}
672 
673 	profile_max = PROFILE_MAX_DEFAULT;
674 
675 	return DDI_SUCCESS;
676 }
677 
678 /*
679  * APPLE NOTE:  profile_detach not implemented
680  */
681 #if !defined(__APPLE__)
682 static int
profile_detach(dev_info_t * devi,ddi_detach_cmd_t cmd)683 profile_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
684 {
685 	switch (cmd) {
686 	case DDI_DETACH:
687 		break;
688 	case DDI_SUSPEND:
689 		return DDI_SUCCESS;
690 	default:
691 		return DDI_FAILURE;
692 	}
693 
694 	if (dtrace_unregister(profile_id) != 0) {
695 		return DDI_FAILURE;
696 	}
697 
698 	ddi_remove_minor_node(devi, NULL);
699 	return DDI_SUCCESS;
700 }
701 #endif /* __APPLE__ */
702 
703 d_open_t _profile_open;
704 
705 int
_profile_open(dev_t dev,int flags,int devtype,struct proc * p)706 _profile_open(dev_t dev, int flags, int devtype, struct proc *p)
707 {
708 #pragma unused(dev,flags,devtype,p)
709 	return 0;
710 }
711 
712 #define PROFILE_MAJOR  -24 /* let the kernel pick the device number */
713 
714 static const struct cdevsw profile_cdevsw =
715 {
716 	.d_open = _profile_open,
717 	.d_close = eno_opcl,
718 	.d_read = eno_rdwrt,
719 	.d_write = eno_rdwrt,
720 	.d_ioctl = eno_ioctl,
721 	.d_stop = (stop_fcn_t *)nulldev,
722 	.d_reset = (reset_fcn_t *)nulldev,
723 	.d_select = eno_select,
724 	.d_mmap = eno_mmap,
725 	.d_strategy = eno_strat,
726 	.d_reserved_1 = eno_getc,
727 	.d_reserved_2 = eno_putc,
728 };
729 
730 void
profile_init(void)731 profile_init( void )
732 {
733 	int majdevno = cdevsw_add(PROFILE_MAJOR, &profile_cdevsw);
734 
735 	if (majdevno < 0) {
736 		printf("profile_init: failed to allocate a major number!\n");
737 		return;
738 	}
739 
740 	profile_attach((dev_info_t*)(uintptr_t)majdevno);
741 }
742 #undef PROFILE_MAJOR
743