xref: /xnu-8020.101.4/bsd/dev/dtrace/profile_prvd.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #include <kern/cpu_data.h>
27 #include <kern/thread.h>
28 #include <kern/assert.h>
29 #include <mach/thread_status.h>
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/errno.h>
34 #include <sys/stat.h>
35 #include <sys/ioctl.h>
36 #include <sys/conf.h>
37 #include <sys/fcntl.h>
38 #include <miscfs/devfs/devfs.h>
39 
40 #include <sys/dtrace.h>
41 #include <sys/dtrace_impl.h>
42 
43 #include <sys/dtrace_glue.h>
44 
45 #include <machine/pal_routines.h>
46 
47 #if defined(__x86_64__)
48 extern x86_saved_state_t *find_kern_regs(thread_t);
49 #elif defined (__arm__) || defined(__arm64__)
50 extern struct arm_saved_state *find_kern_regs(thread_t);
51 #else
52 #error Unknown architecture
53 #endif
54 
55 extern void profile_init(void);
56 
57 static dtrace_provider_id_t profile_id;
58 
59 /*
60  * Regardless of platform, the stack frames look like this in the case of the
61  * profile provider:
62  *
63  *	profile_fire
64  *	cyclic_expire
65  *	cyclic_fire
66  *	[ cbe ]
67  *	[ interrupt code ]
68  *
69  * On x86, there are five frames from the generic interrupt code; further, the
70  * interrupted instruction appears as its own stack frame, giving us a total of
71  * 10.
72  *
73  * On SPARC, the picture is further complicated because the compiler
74  * optimizes away tail-calls -- so the following frames are optimized away:
75  *
76  *      profile_fire
77  *	cyclic_expire
78  *
79  * This gives three frames.  However, on DEBUG kernels, the cyclic_expire
80  * frame cannot be tail-call eliminated, yielding four frames in this case.
81  *
82  * All of the above constraints lead to the mess below.  Yes, the profile
83  * provider should ideally figure this out on-the-fly by hitting one of its own
84  * probes and then walking its own stack trace.  This is complicated, however,
85  * and the static definition doesn't seem to be overly brittle.  Still, we
86  * allow for a manual override in case we get it completely wrong.
87  */
88 
89 #if defined(__x86_64__)
90 #define PROF_ARTIFICIAL_FRAMES  9
91 #elif defined(__arm__) || defined(__arm64__)
92 #define PROF_ARTIFICIAL_FRAMES 8
93 #else
94 #error Unknown architecture
95 #endif
96 
97 #define PROF_NAMELEN            15
98 
99 #define PROF_PROFILE            0
100 #define PROF_TICK               1
101 #define PROF_PREFIX_PROFILE     "profile-"
102 #define PROF_PREFIX_TICK        "tick-"
103 
104 typedef struct profile_probe {
105 	char            prof_name[PROF_NAMELEN];
106 	dtrace_id_t     prof_id;
107 	int             prof_kind;
108 	hrtime_t        prof_interval;
109 	cyclic_id_t     prof_cyclic;
110 } profile_probe_t;
111 
112 typedef struct profile_probe_percpu {
113 	hrtime_t        profc_expected;
114 	hrtime_t        profc_interval;
115 	profile_probe_t *profc_probe;
116 } profile_probe_percpu_t;
117 
118 hrtime_t        profile_interval_min = NANOSEC / 5000;          /* 5000 hz */
119 int             profile_aframes = 0;                            /* override */
120 
121 static int profile_rates[] = {
122 	97, 199, 499, 997, 1999,
123 	4001, 4999, 0, 0, 0,
124 	0, 0, 0, 0, 0,
125 	0, 0, 0, 0, 0
126 };
127 
128 static int profile_ticks[] = {
129 	1, 10, 100, 500, 1000,
130 	5000, 0, 0, 0, 0,
131 	0, 0, 0, 0, 0
132 };
133 
134 /*
135  * profile_max defines the upper bound on the number of profile probes that
136  * can exist (this is to prevent malicious or clumsy users from exhausing
137  * system resources by creating a slew of profile probes). At mod load time,
138  * this gets its value from PROFILE_MAX_DEFAULT or profile-max-probes if it's
139  * present in the profile.conf file.
140  */
141 #define PROFILE_MAX_DEFAULT     1000    /* default max. number of probes */
142 static uint32_t profile_max;            /* maximum number of profile probes */
143 static uint32_t profile_total;  /* current number of profile probes */
144 
145 static void
profile_fire(void * arg)146 profile_fire(void *arg)
147 {
148 	profile_probe_percpu_t *pcpu = arg;
149 	profile_probe_t *prof = pcpu->profc_probe;
150 	hrtime_t late;
151 
152 	late = dtrace_gethrtime() - pcpu->profc_expected;
153 	pcpu->profc_expected += pcpu->profc_interval;
154 
155 #if defined(__x86_64__)
156 	x86_saved_state_t *kern_regs = find_kern_regs(current_thread());
157 
158 	if (NULL != kern_regs) {
159 		/* Kernel was interrupted. */
160 		dtrace_probe(prof->prof_id, saved_state64(kern_regs)->isf.rip, 0x0, late, 0, 0);
161 	} else {
162 		pal_register_cache_state(current_thread(), VALID);
163 		/* Possibly a user interrupt */
164 		x86_saved_state_t   *tagged_regs = (x86_saved_state_t *)find_user_regs(current_thread());
165 
166 		if (NULL == tagged_regs) {
167 			/* Too bad, so sad, no useful interrupt state. */
168 			dtrace_probe(prof->prof_id, 0xcafebabe,
169 			    0x0, late, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
170 		} else if (is_saved_state64(tagged_regs)) {
171 			x86_saved_state64_t *regs = saved_state64(tagged_regs);
172 
173 			dtrace_probe(prof->prof_id, 0x0, regs->isf.rip, late, 0, 0);
174 		} else {
175 			x86_saved_state32_t *regs = saved_state32(tagged_regs);
176 
177 			dtrace_probe(prof->prof_id, 0x0, regs->eip, late, 0, 0);
178 		}
179 	}
180 #elif defined(__arm__)
181 	{
182 		arm_saved_state_t *arm_kern_regs = (arm_saved_state_t *) find_kern_regs(current_thread());
183 
184 		// We should only come in here from interrupt context, so we should always have valid kernel regs
185 		assert(NULL != arm_kern_regs);
186 
187 		if (arm_kern_regs->cpsr & 0xF) {
188 			/* Kernel was interrupted. */
189 			dtrace_probe(prof->prof_id, arm_kern_regs->pc, 0x0, late, 0, 0);
190 		} else {
191 			/* Possibly a user interrupt */
192 			arm_saved_state_t   *arm_user_regs = (arm_saved_state_t *)find_user_regs(current_thread());
193 
194 			if (NULL == arm_user_regs) {
195 				/* Too bad, so sad, no useful interrupt state. */
196 				dtrace_probe(prof->prof_id, 0xcafebabe, 0x0, late, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
197 			} else {
198 				dtrace_probe(prof->prof_id, 0x0, arm_user_regs->pc, late, 0, 0);
199 			}
200 		}
201 	}
202 #elif defined(__arm64__)
203 	{
204 		arm_saved_state_t *arm_kern_regs = (arm_saved_state_t *) find_kern_regs(current_thread());
205 
206 		// We should only come in here from interrupt context, so we should always have valid kernel regs
207 		assert(NULL != arm_kern_regs);
208 
209 		if (saved_state64(arm_kern_regs)->cpsr & 0xF) {
210 			/* Kernel was interrupted. */
211 			dtrace_probe(prof->prof_id, saved_state64(arm_kern_regs)->pc, 0x0, late, 0, 0);
212 		} else {
213 			/* Possibly a user interrupt */
214 			arm_saved_state_t   *arm_user_regs = (arm_saved_state_t *)find_user_regs(current_thread());
215 
216 			if (NULL == arm_user_regs) {
217 				/* Too bad, so sad, no useful interrupt state. */
218 				dtrace_probe(prof->prof_id, 0xcafebabe, 0x0, late, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
219 			} else {
220 				dtrace_probe(prof->prof_id, 0x0, get_saved_state_pc(arm_user_regs), late, 0, 0);
221 			}
222 		}
223 	}
224 #else
225 #error Unknown architecture
226 #endif
227 }
228 
229 static void
profile_tick(void * arg)230 profile_tick(void *arg)
231 {
232 	profile_probe_t *prof = arg;
233 
234 #if defined(__x86_64__)
235 	x86_saved_state_t *kern_regs = find_kern_regs(current_thread());
236 
237 	if (NULL != kern_regs) {
238 		/* Kernel was interrupted. */
239 		dtrace_probe(prof->prof_id, saved_state64(kern_regs)->isf.rip, 0x0, 0, 0, 0);
240 	} else {
241 		pal_register_cache_state(current_thread(), VALID);
242 		/* Possibly a user interrupt */
243 		x86_saved_state_t   *tagged_regs = (x86_saved_state_t *)find_user_regs(current_thread());
244 
245 		if (NULL == tagged_regs) {
246 			/* Too bad, so sad, no useful interrupt state. */
247 			dtrace_probe(prof->prof_id, 0xcafebabe,
248 			    0x0, 0, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
249 		} else if (is_saved_state64(tagged_regs)) {
250 			x86_saved_state64_t *regs = saved_state64(tagged_regs);
251 
252 			dtrace_probe(prof->prof_id, 0x0, regs->isf.rip, 0, 0, 0);
253 		} else {
254 			x86_saved_state32_t *regs = saved_state32(tagged_regs);
255 
256 			dtrace_probe(prof->prof_id, 0x0, regs->eip, 0, 0, 0);
257 		}
258 	}
259 #elif defined(__arm__)
260 	{
261 		arm_saved_state_t *arm_kern_regs = (arm_saved_state_t *) find_kern_regs(current_thread());
262 
263 		if (NULL != arm_kern_regs) {
264 			/* Kernel was interrupted. */
265 			dtrace_probe(prof->prof_id, arm_kern_regs->pc, 0x0, 0, 0, 0);
266 		} else {
267 			/* Possibly a user interrupt */
268 			arm_saved_state_t   *arm_user_regs = (arm_saved_state_t *)find_user_regs(current_thread());
269 
270 			if (NULL == arm_user_regs) {
271 				/* Too bad, so sad, no useful interrupt state. */
272 				dtrace_probe(prof->prof_id, 0xcafebabe, 0x0, 0, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
273 			} else {
274 				dtrace_probe(prof->prof_id, 0x0, arm_user_regs->pc, 0, 0, 0);
275 			}
276 		}
277 	}
278 #elif defined(__arm64__)
279 	{
280 		arm_saved_state_t *arm_kern_regs = (arm_saved_state_t *) find_kern_regs(current_thread());
281 
282 		if (NULL != arm_kern_regs) {
283 			/* Kernel was interrupted. */
284 			dtrace_probe(prof->prof_id, saved_state64(arm_kern_regs)->pc, 0x0, 0, 0, 0);
285 		} else {
286 			/* Possibly a user interrupt */
287 			arm_saved_state_t   *arm_user_regs = (arm_saved_state_t *)find_user_regs(current_thread());
288 
289 			if (NULL == arm_user_regs) {
290 				/* Too bad, so sad, no useful interrupt state. */
291 				dtrace_probe(prof->prof_id, 0xcafebabe, 0x0, 0, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
292 			} else {
293 				dtrace_probe(prof->prof_id, 0x0, get_saved_state_pc(arm_user_regs), 0, 0, 0);
294 			}
295 		}
296 	}
297 
298 #else
299 #error Unknown architecture
300 #endif
301 }
302 
303 static void
profile_create(hrtime_t interval,const char * name,int kind)304 profile_create(hrtime_t interval, const char *name, int kind)
305 {
306 	profile_probe_t *prof;
307 
308 	if (interval < profile_interval_min) {
309 		return;
310 	}
311 
312 	if (dtrace_probe_lookup(profile_id, NULL, NULL, name) != 0) {
313 		return;
314 	}
315 
316 	os_atomic_inc(&profile_total, relaxed);
317 	if (profile_total > profile_max) {
318 		os_atomic_dec(&profile_total, relaxed);
319 		return;
320 	}
321 
322 	if (PROF_TICK == kind) {
323 		prof = kmem_zalloc(sizeof(profile_probe_t), KM_SLEEP);
324 	} else {
325 		prof = kmem_zalloc(sizeof(profile_probe_t) + NCPU * sizeof(profile_probe_percpu_t), KM_SLEEP);
326 	}
327 
328 	(void) strlcpy(prof->prof_name, name, sizeof(prof->prof_name));
329 	prof->prof_interval = interval;
330 	prof->prof_cyclic = CYCLIC_NONE;
331 	prof->prof_kind = kind;
332 	prof->prof_id = dtrace_probe_create(profile_id,
333 	    NULL, NULL, name,
334 	    profile_aframes ? profile_aframes : PROF_ARTIFICIAL_FRAMES, prof);
335 }
336 
337 /*ARGSUSED*/
338 static void
profile_provide(void * arg,const dtrace_probedesc_t * desc)339 profile_provide(void *arg, const dtrace_probedesc_t *desc)
340 {
341 #pragma unused(arg) /* __APPLE__ */
342 	int i, j, rate, kind;
343 	hrtime_t val = 0, mult = 1, len;
344 	const char *name, *suffix = NULL;
345 
346 	const struct {
347 		const char *prefix;
348 		int kind;
349 	} types[] = {
350 		{ PROF_PREFIX_PROFILE, PROF_PROFILE },
351 		{ PROF_PREFIX_TICK, PROF_TICK },
352 		{ NULL, 0 }
353 	};
354 
355 	const struct {
356 		const char *name;
357 		hrtime_t mult;
358 	} suffixes[] = {
359 		{ "ns", NANOSEC / NANOSEC },
360 		{ "nsec", NANOSEC / NANOSEC },
361 		{ "us", NANOSEC / MICROSEC },
362 		{ "usec", NANOSEC / MICROSEC },
363 		{ "ms", NANOSEC / MILLISEC },
364 		{ "msec", NANOSEC / MILLISEC },
365 		{ "s", NANOSEC / SEC },
366 		{ "sec", NANOSEC / SEC },
367 		{ "m", NANOSEC * (hrtime_t)60 },
368 		{ "min", NANOSEC * (hrtime_t)60 },
369 		{ "h", NANOSEC * (hrtime_t)(60 * 60) },
370 		{ "hour", NANOSEC * (hrtime_t)(60 * 60) },
371 		{ "d", NANOSEC * (hrtime_t)(24 * 60 * 60) },
372 		{ "day", NANOSEC * (hrtime_t)(24 * 60 * 60) },
373 		{ "hz", 0 },
374 		{ NULL, 0 }
375 	};
376 
377 	if (desc == NULL) {
378 		char n[PROF_NAMELEN];
379 
380 		/*
381 		 * If no description was provided, provide all of our probes.
382 		 */
383 		for (i = 0; i < (int)(sizeof(profile_rates) / sizeof(int)); i++) {
384 			if ((rate = profile_rates[i]) == 0) {
385 				continue;
386 			}
387 
388 			(void) snprintf(n, PROF_NAMELEN, "%s%d",
389 			    PROF_PREFIX_PROFILE, rate);
390 			profile_create(NANOSEC / rate, n, PROF_PROFILE);
391 		}
392 
393 		for (i = 0; i < (int)(sizeof(profile_ticks) / sizeof(int)); i++) {
394 			if ((rate = profile_ticks[i]) == 0) {
395 				continue;
396 			}
397 
398 			(void) snprintf(n, PROF_NAMELEN, "%s%d",
399 			    PROF_PREFIX_TICK, rate);
400 			profile_create(NANOSEC / rate, n, PROF_TICK);
401 		}
402 
403 		return;
404 	}
405 
406 	name = desc->dtpd_name;
407 
408 	for (i = 0; types[i].prefix != NULL; i++) {
409 		len = strlen(types[i].prefix);
410 
411 		if (strncmp(name, types[i].prefix, len) != 0) {
412 			continue;
413 		}
414 		break;
415 	}
416 
417 	if (types[i].prefix == NULL) {
418 		return;
419 	}
420 
421 	kind = types[i].kind;
422 	j = strlen(name) - len;
423 
424 	/*
425 	 * We need to start before any time suffix.
426 	 */
427 	for (j = strlen(name); j >= len; j--) {
428 		if (name[j] >= '0' && name[j] <= '9') {
429 			break;
430 		}
431 		suffix = &name[j];
432 	}
433 
434 	if (!suffix) {
435 		suffix = &name[strlen(name)];
436 	}
437 
438 	/*
439 	 * Now determine the numerical value present in the probe name.
440 	 */
441 	for (; j >= len; j--) {
442 		if (name[j] < '0' || name[j] > '9') {
443 			return;
444 		}
445 
446 		val += (name[j] - '0') * mult;
447 		mult *= (hrtime_t)10;
448 	}
449 
450 	if (val == 0) {
451 		return;
452 	}
453 
454 	/*
455 	 * Look-up the suffix to determine the multiplier.
456 	 */
457 	for (i = 0, mult = 0; suffixes[i].name != NULL; i++) {
458 		/* APPLE NOTE: Darwin employs size bounded string operations */
459 		if (strncasecmp(suffixes[i].name, suffix, strlen(suffixes[i].name) + 1) == 0) {
460 			mult = suffixes[i].mult;
461 			break;
462 		}
463 	}
464 
465 	if (suffixes[i].name == NULL && *suffix != '\0') {
466 		return;
467 	}
468 
469 	if (mult == 0) {
470 		/*
471 		 * The default is frequency-per-second.
472 		 */
473 		val = NANOSEC / val;
474 	} else {
475 		val *= mult;
476 	}
477 
478 	profile_create(val, name, kind);
479 }
480 
481 /*ARGSUSED*/
482 static void
profile_destroy(void * arg,dtrace_id_t id,void * parg)483 profile_destroy(void *arg, dtrace_id_t id, void *parg)
484 {
485 #pragma unused(arg,id) /* __APPLE__ */
486 	profile_probe_t *prof = parg;
487 
488 	ASSERT(prof->prof_cyclic == CYCLIC_NONE);
489 
490 	if (prof->prof_kind == PROF_TICK) {
491 		kmem_free(prof, sizeof(profile_probe_t));
492 	} else {
493 		kmem_free(prof, sizeof(profile_probe_t) + NCPU * sizeof(profile_probe_percpu_t));
494 	}
495 
496 	ASSERT(profile_total >= 1);
497 	os_atomic_dec(&profile_total, relaxed);
498 }
499 
500 /*ARGSUSED*/
501 static void
profile_online(void * arg,dtrace_cpu_t * cpu,cyc_handler_t * hdlr,cyc_time_t * when)502 profile_online(void *arg, dtrace_cpu_t *cpu, cyc_handler_t *hdlr, cyc_time_t *when)
503 {
504 #pragma unused(cpu) /* __APPLE__ */
505 	profile_probe_t *prof = arg;
506 	profile_probe_percpu_t *pcpu;
507 
508 	pcpu = ((profile_probe_percpu_t *)(&(prof[1]))) + cpu_number();
509 	pcpu->profc_probe = prof;
510 
511 	hdlr->cyh_func = profile_fire;
512 	hdlr->cyh_arg = pcpu;
513 	hdlr->cyh_level = CY_HIGH_LEVEL;
514 
515 	when->cyt_interval = prof->prof_interval;
516 	when->cyt_when = dtrace_gethrtime() + when->cyt_interval;
517 
518 	pcpu->profc_expected = when->cyt_when;
519 	pcpu->profc_interval = when->cyt_interval;
520 }
521 
522 /*ARGSUSED*/
523 static void
profile_offline(void * arg,dtrace_cpu_t * cpu,void * oarg)524 profile_offline(void *arg, dtrace_cpu_t *cpu, void *oarg)
525 {
526 	profile_probe_percpu_t *pcpu = oarg;
527 
528 	ASSERT(pcpu->profc_probe == arg);
529 #pragma unused(pcpu,arg,cpu) /* __APPLE__ */
530 }
531 
532 /*ARGSUSED*/
533 static int
profile_enable(void * arg,dtrace_id_t id,void * parg)534 profile_enable(void *arg, dtrace_id_t id, void *parg)
535 {
536 #pragma unused(arg,id) /* __APPLE__ */
537 	profile_probe_t *prof = parg;
538 	cyc_omni_handler_t omni;
539 	cyc_handler_t hdlr;
540 	cyc_time_t when;
541 
542 	ASSERT(prof->prof_interval != 0);
543 	ASSERT(MUTEX_HELD(&cpu_lock));
544 
545 	if (prof->prof_kind == PROF_TICK) {
546 		hdlr.cyh_func = profile_tick;
547 		hdlr.cyh_arg = prof;
548 		hdlr.cyh_level = CY_HIGH_LEVEL;
549 
550 		when.cyt_interval = prof->prof_interval;
551 #if !defined(__APPLE__)
552 		when.cyt_when = dtrace_gethrtime() + when.cyt_interval;
553 #else
554 		when.cyt_when = 0;
555 #endif /* __APPLE__ */
556 	} else {
557 		ASSERT(prof->prof_kind == PROF_PROFILE);
558 		omni.cyo_online = profile_online;
559 		omni.cyo_offline = profile_offline;
560 		omni.cyo_arg = prof;
561 	}
562 
563 	if (prof->prof_kind == PROF_TICK) {
564 		prof->prof_cyclic = cyclic_timer_add(&hdlr, &when);
565 	} else {
566 		prof->prof_cyclic = (cyclic_id_t)cyclic_add_omni(&omni); /* cast puns cyclic_id_list_t with cyclic_id_t */
567 	}
568 
569 	return 0;
570 }
571 
572 /*ARGSUSED*/
573 static void
profile_disable(void * arg,dtrace_id_t id,void * parg)574 profile_disable(void *arg, dtrace_id_t id, void *parg)
575 {
576 	profile_probe_t *prof = parg;
577 
578 	ASSERT(prof->prof_cyclic != CYCLIC_NONE);
579 	ASSERT(MUTEX_HELD(&cpu_lock));
580 
581 #pragma unused(arg,id)
582 	if (prof->prof_kind == PROF_TICK) {
583 		cyclic_timer_remove(prof->prof_cyclic);
584 	} else {
585 		cyclic_remove_omni((cyclic_id_list_t)prof->prof_cyclic); /* cast puns cyclic_id_list_t with cyclic_id_t */
586 	}
587 	prof->prof_cyclic = CYCLIC_NONE;
588 }
589 
590 static uint64_t
profile_getarg(void * arg,dtrace_id_t id,void * parg,int argno,int aframes)591 profile_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes)
592 {
593 #pragma unused(arg, id, parg, argno, aframes)
594 	/*
595 	 * All the required arguments for the profile probe are passed directly
596 	 * to dtrace_probe, and we do not go through dtrace_getarg which doesn't
597 	 * know how to hop to the kernel stack from the interrupt stack like
598 	 * dtrace_getpcstack
599 	 */
600 	return 0;
601 }
602 
603 static void
profile_getargdesc(void * arg,dtrace_id_t id,void * parg,dtrace_argdesc_t * desc)604 profile_getargdesc(void *arg, dtrace_id_t id, void *parg, dtrace_argdesc_t *desc)
605 {
606 #pragma unused(arg, id)
607 	profile_probe_t *prof = parg;
608 	const char *argdesc = NULL;
609 	switch (desc->dtargd_ndx) {
610 	case 0:
611 		argdesc = "void*";
612 		break;
613 	case 1:
614 		argdesc = "user_addr_t";
615 		break;
616 	case 2:
617 		if (prof->prof_kind == PROF_PROFILE) {
618 			argdesc = "hrtime_t";
619 		}
620 		break;
621 	}
622 	if (argdesc) {
623 		strlcpy(desc->dtargd_native, argdesc, DTRACE_ARGTYPELEN);
624 	} else {
625 		desc->dtargd_ndx = DTRACE_ARGNONE;
626 	}
627 }
628 
629 /*
630  * APPLE NOTE:  profile_usermode call not supported.
631  */
632 static int
profile_usermode(void * arg,dtrace_id_t id,void * parg)633 profile_usermode(void *arg, dtrace_id_t id, void *parg)
634 {
635 #pragma unused(arg,id,parg)
636 	return 1; /* XXX_BOGUS */
637 }
638 
639 static dtrace_pattr_t profile_attr = {
640 	{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
641 	{ DTRACE_STABILITY_UNSTABLE, DTRACE_STABILITY_UNSTABLE, DTRACE_CLASS_UNKNOWN },
642 	{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
643 	{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
644 	{ DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_COMMON },
645 };
646 
647 static dtrace_pops_t profile_pops = {
648 	.dtps_provide =         profile_provide,
649 	.dtps_provide_module =  NULL,
650 	.dtps_enable =          profile_enable,
651 	.dtps_disable =         profile_disable,
652 	.dtps_suspend =         NULL,
653 	.dtps_resume =          NULL,
654 	.dtps_getargdesc =      profile_getargdesc,
655 	.dtps_getargval =       profile_getarg,
656 	.dtps_usermode =        profile_usermode,
657 	.dtps_destroy =         profile_destroy
658 };
659 
660 static int
profile_attach(dev_info_t * devi)661 profile_attach(dev_info_t *devi)
662 {
663 	if (ddi_create_minor_node(devi, "profile", S_IFCHR, 0,
664 	    DDI_PSEUDO, 0) == DDI_FAILURE ||
665 	    dtrace_register("profile", &profile_attr,
666 	    DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER, NULL,
667 	    &profile_pops, NULL, &profile_id) != 0) {
668 		ddi_remove_minor_node(devi, NULL);
669 		return DDI_FAILURE;
670 	}
671 
672 	profile_max = PROFILE_MAX_DEFAULT;
673 
674 	return DDI_SUCCESS;
675 }
676 
677 /*
678  * APPLE NOTE:  profile_detach not implemented
679  */
680 #if !defined(__APPLE__)
681 static int
profile_detach(dev_info_t * devi,ddi_detach_cmd_t cmd)682 profile_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
683 {
684 	switch (cmd) {
685 	case DDI_DETACH:
686 		break;
687 	case DDI_SUSPEND:
688 		return DDI_SUCCESS;
689 	default:
690 		return DDI_FAILURE;
691 	}
692 
693 	if (dtrace_unregister(profile_id) != 0) {
694 		return DDI_FAILURE;
695 	}
696 
697 	ddi_remove_minor_node(devi, NULL);
698 	return DDI_SUCCESS;
699 }
700 #endif /* __APPLE__ */
701 
702 d_open_t _profile_open;
703 
704 int
_profile_open(dev_t dev,int flags,int devtype,struct proc * p)705 _profile_open(dev_t dev, int flags, int devtype, struct proc *p)
706 {
707 #pragma unused(dev,flags,devtype,p)
708 	return 0;
709 }
710 
711 #define PROFILE_MAJOR  -24 /* let the kernel pick the device number */
712 
713 static const struct cdevsw profile_cdevsw =
714 {
715 	.d_open = _profile_open,
716 	.d_close = eno_opcl,
717 	.d_read = eno_rdwrt,
718 	.d_write = eno_rdwrt,
719 	.d_ioctl = eno_ioctl,
720 	.d_stop = (stop_fcn_t *)nulldev,
721 	.d_reset = (reset_fcn_t *)nulldev,
722 	.d_select = eno_select,
723 	.d_mmap = eno_mmap,
724 	.d_strategy = eno_strat,
725 	.d_reserved_1 = eno_getc,
726 	.d_reserved_2 = eno_putc,
727 };
728 
729 void
profile_init(void)730 profile_init( void )
731 {
732 	int majdevno = cdevsw_add(PROFILE_MAJOR, &profile_cdevsw);
733 
734 	if (majdevno < 0) {
735 		printf("profile_init: failed to allocate a major number!\n");
736 		return;
737 	}
738 
739 	profile_attach((dev_info_t*)(uintptr_t)majdevno);
740 }
741 #undef PROFILE_MAJOR
742