1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <sys/types.h>
28 #include <sys/time.h>
29
30 #include <sys/codesign.h>
31 #include <sys/errno.h>
32 #include <sys/stat.h>
33 #include <sys/conf.h>
34 #include <sys/systm.h>
35 #include <sys/kauth.h>
36 #include <sys/utfconv.h>
37
38 #include <sys/fasttrap.h>
39 #include <sys/fasttrap_impl.h>
40 #include <sys/fasttrap_isa.h>
41 #include <sys/dtrace.h>
42 #include <sys/dtrace_impl.h>
43 #include <sys/proc.h>
44
45 #include <security/mac_framework.h>
46
47 #include <miscfs/devfs/devfs.h>
48 #include <sys/proc_internal.h>
49 #include <sys/dtrace_glue.h>
50 #include <sys/dtrace_ptss.h>
51
52 #include <kern/cs_blobs.h>
53 #include <kern/thread.h>
54 #include <kern/zalloc.h>
55
56 #include <mach/thread_act.h>
57
58 extern kern_return_t kernel_thread_start_priority(thread_continue_t continuation, void *parameter, integer_t priority, thread_t *new_thread);
59
60 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
61 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
62
63 __private_extern__
64 void
65 qsort(void *a, size_t n, size_t es, int (*cmp)(const void *, const void *));
66
67 /*
68 * User-Land Trap-Based Tracing
69 * ----------------------------
70 *
71 * The fasttrap provider allows DTrace consumers to instrument any user-level
72 * instruction to gather data; this includes probes with semantic
73 * signifigance like entry and return as well as simple offsets into the
74 * function. While the specific techniques used are very ISA specific, the
75 * methodology is generalizable to any architecture.
76 *
77 *
78 * The General Methodology
79 * -----------------------
80 *
81 * With the primary goal of tracing every user-land instruction and the
82 * limitation that we can't trust user space so don't want to rely on much
83 * information there, we begin by replacing the instructions we want to trace
84 * with trap instructions. Each instruction we overwrite is saved into a hash
85 * table keyed by process ID and pc address. When we enter the kernel due to
86 * this trap instruction, we need the effects of the replaced instruction to
87 * appear to have occurred before we proceed with the user thread's
88 * execution.
89 *
90 * Each user level thread is represented by a ulwp_t structure which is
91 * always easily accessible through a register. The most basic way to produce
92 * the effects of the instruction we replaced is to copy that instruction out
93 * to a bit of scratch space reserved in the user thread's ulwp_t structure
94 * (a sort of kernel-private thread local storage), set the PC to that
95 * scratch space and single step. When we reenter the kernel after single
96 * stepping the instruction we must then adjust the PC to point to what would
97 * normally be the next instruction. Of course, special care must be taken
98 * for branches and jumps, but these represent such a small fraction of any
99 * instruction set that writing the code to emulate these in the kernel is
100 * not too difficult.
101 *
102 * Return probes may require several tracepoints to trace every return site,
103 * and, conversely, each tracepoint may activate several probes (the entry
104 * and offset 0 probes, for example). To solve this muliplexing problem,
105 * tracepoints contain lists of probes to activate and probes contain lists
106 * of tracepoints to enable. If a probe is activated, it adds its ID to
107 * existing tracepoints or creates new ones as necessary.
108 *
109 * Most probes are activated _before_ the instruction is executed, but return
110 * probes are activated _after_ the effects of the last instruction of the
111 * function are visible. Return probes must be fired _after_ we have
112 * single-stepped the instruction whereas all other probes are fired
113 * beforehand.
114 *
115 *
116 * Lock Ordering
117 * -------------
118 *
119 * The lock ordering below -- both internally and with respect to the DTrace
120 * framework -- is a little tricky and bears some explanation. Each provider
121 * has a lock (ftp_mtx) that protects its members including reference counts
122 * for enabled probes (ftp_rcount), consumers actively creating probes
123 * (ftp_ccount) and USDT consumers (ftp_mcount); all three prevent a provider
124 * from being freed. A provider is looked up by taking the bucket lock for the
125 * provider hash table, and is returned with its lock held. The provider lock
126 * may be taken in functions invoked by the DTrace framework, but may not be
127 * held while calling functions in the DTrace framework.
128 *
129 * To ensure consistency over multiple calls to the DTrace framework, the
130 * creation lock (ftp_cmtx) should be held. Naturally, the creation lock may
131 * not be taken when holding the provider lock as that would create a cyclic
132 * lock ordering. In situations where one would naturally take the provider
133 * lock and then the creation lock, we instead up a reference count to prevent
134 * the provider from disappearing, drop the provider lock, and acquire the
135 * creation lock.
136 *
137 * Briefly:
138 * bucket lock before provider lock
139 * DTrace before provider lock
140 * creation lock before DTrace
141 * never hold the provider lock and creation lock simultaneously
142 */
143
144 static dtrace_meta_provider_id_t fasttrap_meta_id;
145
146 static thread_t fasttrap_cleanup_thread;
147
148 static LCK_GRP_DECLARE(fasttrap_lck_grp, "fasttrap");
149 static LCK_ATTR_DECLARE(fasttrap_lck_attr, 0, 0);
150 static LCK_MTX_DECLARE_ATTR(fasttrap_cleanup_mtx,
151 &fasttrap_lck_grp, &fasttrap_lck_attr);
152
153
154 #define FASTTRAP_CLEANUP_PROVIDER 0x1
155 #define FASTTRAP_CLEANUP_TRACEPOINT 0x2
156
157 static uint32_t fasttrap_cleanup_work = 0;
158
159 /*
160 * Generation count on modifications to the global tracepoint lookup table.
161 */
162 static volatile uint64_t fasttrap_mod_gen;
163
164 /*
165 * APPLE NOTE: When the fasttrap provider is loaded, fasttrap_max is computed
166 * base on system memory. Each time a probe is created, fasttrap_total is
167 * incremented by the number of tracepoints that may be associated with that
168 * probe; fasttrap_total is capped at fasttrap_max.
169 */
170
171 static uint32_t fasttrap_max;
172 static uint32_t fasttrap_retired;
173 static uint32_t fasttrap_total;
174
175
176 #define FASTTRAP_TPOINTS_DEFAULT_SIZE 0x4000
177 #define FASTTRAP_PROVIDERS_DEFAULT_SIZE 0x100
178 #define FASTTRAP_PROCS_DEFAULT_SIZE 0x100
179
180 fasttrap_hash_t fasttrap_tpoints;
181 static fasttrap_hash_t fasttrap_provs;
182 static fasttrap_hash_t fasttrap_procs;
183
184 static uint64_t fasttrap_pid_count; /* pid ref count */
185 static LCK_MTX_DECLARE_ATTR(fasttrap_count_mtx, /* lock on ref count */
186 &fasttrap_lck_grp, &fasttrap_lck_attr);
187
188 #define FASTTRAP_ENABLE_FAIL 1
189 #define FASTTRAP_ENABLE_PARTIAL 2
190
191 static int fasttrap_tracepoint_enable(proc_t *, fasttrap_probe_t *, uint_t);
192 static void fasttrap_tracepoint_disable(proc_t *, fasttrap_probe_t *, uint_t);
193
194 static fasttrap_provider_t *fasttrap_provider_lookup(proc_t*, fasttrap_provider_type_t, const char *,
195 const dtrace_pattr_t *);
196 static void fasttrap_provider_retire(proc_t*, const char *, int);
197 static void fasttrap_provider_free(fasttrap_provider_t *);
198
199 static fasttrap_proc_t *fasttrap_proc_lookup(pid_t);
200 static void fasttrap_proc_release(fasttrap_proc_t *);
201
202 #define FASTTRAP_PROVS_INDEX(pid, name) \
203 ((fasttrap_hash_str(name) + (pid)) & fasttrap_provs.fth_mask)
204
205 #define FASTTRAP_PROCS_INDEX(pid) ((pid) & fasttrap_procs.fth_mask)
206
207 /*
208 * APPLE NOTE: To save memory, some common memory allocations are given
209 * a unique zone. For example, dtrace_probe_t is 72 bytes in size,
210 * which means it would fall into the kalloc.128 bucket. With
211 * 20k elements allocated, the space saved is substantial.
212 */
213
214 ZONE_DEFINE(fasttrap_tracepoint_t_zone, "dtrace.fasttrap_tracepoint_t",
215 sizeof(fasttrap_tracepoint_t), ZC_NONE);
216
217 /*
218 * APPLE NOTE: fasttrap_probe_t's are variable in size. Some quick profiling has shown
219 * that the sweet spot for reducing memory footprint is covering the first
220 * three sizes. Everything larger goes into the common pool.
221 */
222 #define FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS 4
223
224 struct zone *fasttrap_probe_t_zones[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS];
225
226 static const char *fasttrap_probe_t_zone_names[FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS] = {
227 "",
228 "dtrace.fasttrap_probe_t[1]",
229 "dtrace.fasttrap_probe_t[2]",
230 "dtrace.fasttrap_probe_t[3]"
231 };
232
233 static int
fasttrap_highbit(ulong_t i)234 fasttrap_highbit(ulong_t i)
235 {
236 int h = 1;
237
238 if (i == 0)
239 return (0);
240 #ifdef _LP64
241 if (i & 0xffffffff00000000ul) {
242 h += 32; i >>= 32;
243 }
244 #endif
245 if (i & 0xffff0000) {
246 h += 16; i >>= 16;
247 }
248 if (i & 0xff00) {
249 h += 8; i >>= 8;
250 }
251 if (i & 0xf0) {
252 h += 4; i >>= 4;
253 }
254 if (i & 0xc) {
255 h += 2; i >>= 2;
256 }
257 if (i & 0x2) {
258 h += 1;
259 }
260 return (h);
261 }
262
263 static uint_t
fasttrap_hash_str(const char * p)264 fasttrap_hash_str(const char *p)
265 {
266 unsigned int g;
267 uint_t hval = 0;
268
269 while (*p) {
270 hval = (hval << 4) + *p++;
271 if ((g = (hval & 0xf0000000)) != 0)
272 hval ^= g >> 24;
273 hval &= ~g;
274 }
275 return (hval);
276 }
277
278 /*
279 * APPLE NOTE: fasttrap_sigtrap not implemented
280 */
281 void
fasttrap_sigtrap(proc_t * p,uthread_t t,user_addr_t pc)282 fasttrap_sigtrap(proc_t *p, uthread_t t, user_addr_t pc)
283 {
284 #pragma unused(p, t, pc)
285
286 #if !defined(__APPLE__)
287 sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
288
289 sqp->sq_info.si_signo = SIGTRAP;
290 sqp->sq_info.si_code = TRAP_DTRACE;
291 sqp->sq_info.si_addr = (caddr_t)pc;
292
293 mutex_enter(&p->p_lock);
294 sigaddqa(p, t, sqp);
295 mutex_exit(&p->p_lock);
296
297 if (t != NULL)
298 aston(t);
299 #endif /* __APPLE__ */
300
301 printf("fasttrap_sigtrap called with no implementation.\n");
302 }
303
304 /*
305 * This function ensures that no threads are actively using the memory
306 * associated with probes that were formerly live.
307 */
308 static void
fasttrap_mod_barrier(uint64_t gen)309 fasttrap_mod_barrier(uint64_t gen)
310 {
311 unsigned int i;
312
313 if (gen < fasttrap_mod_gen)
314 return;
315
316 fasttrap_mod_gen++;
317
318 for (i = 0; i < NCPU; i++) {
319 lck_mtx_lock(&cpu_core[i].cpuc_pid_lock);
320 lck_mtx_unlock(&cpu_core[i].cpuc_pid_lock);
321 }
322 }
323
324 static void fasttrap_pid_cleanup(uint32_t);
325
326 static unsigned int
fasttrap_pid_cleanup_providers(void)327 fasttrap_pid_cleanup_providers(void)
328 {
329 fasttrap_provider_t **fpp, *fp;
330 fasttrap_bucket_t *bucket;
331 dtrace_provider_id_t provid;
332 unsigned int later = 0, i;
333
334 /*
335 * Iterate over all the providers trying to remove the marked
336 * ones. If a provider is marked but not retired, we just
337 * have to take a crack at removing it -- it's no big deal if
338 * we can't.
339 */
340 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
341 bucket = &fasttrap_provs.fth_table[i];
342 lck_mtx_lock(&bucket->ftb_mtx);
343 fpp = (fasttrap_provider_t **)&bucket->ftb_data;
344
345 while ((fp = *fpp) != NULL) {
346 if (!fp->ftp_marked) {
347 fpp = &fp->ftp_next;
348 continue;
349 }
350
351 lck_mtx_lock(&fp->ftp_mtx);
352
353 /*
354 * If this provider has consumers actively
355 * creating probes (ftp_ccount) or is a USDT
356 * provider (ftp_mcount), we can't unregister
357 * or even condense.
358 */
359 if (fp->ftp_ccount != 0 ||
360 fp->ftp_mcount != 0) {
361 fp->ftp_marked = 0;
362 lck_mtx_unlock(&fp->ftp_mtx);
363 continue;
364 }
365
366 if (!fp->ftp_retired || fp->ftp_rcount != 0)
367 fp->ftp_marked = 0;
368
369 lck_mtx_unlock(&fp->ftp_mtx);
370
371 /*
372 * If we successfully unregister this
373 * provider we can remove it from the hash
374 * chain and free the memory. If our attempt
375 * to unregister fails and this is a retired
376 * provider, increment our flag to try again
377 * pretty soon. If we've consumed more than
378 * half of our total permitted number of
379 * probes call dtrace_condense() to try to
380 * clean out the unenabled probes.
381 */
382 provid = fp->ftp_provid;
383 if (dtrace_unregister(provid) != 0) {
384 if (fasttrap_total > fasttrap_max / 2)
385 (void) dtrace_condense(provid);
386 later += fp->ftp_marked;
387 fpp = &fp->ftp_next;
388 } else {
389 *fpp = fp->ftp_next;
390 fasttrap_provider_free(fp);
391 }
392 }
393 lck_mtx_unlock(&bucket->ftb_mtx);
394 }
395
396 return later;
397 }
398
399 typedef struct fasttrap_tracepoint_spec {
400 pid_t fttps_pid;
401 user_addr_t fttps_pc;
402 } fasttrap_tracepoint_spec_t;
403
404 static fasttrap_tracepoint_spec_t *fasttrap_retired_spec;
405 static size_t fasttrap_cur_retired = 0, fasttrap_retired_size;
406 static LCK_MTX_DECLARE_ATTR(fasttrap_retired_mtx,
407 &fasttrap_lck_grp, &fasttrap_lck_attr);
408
409 #define DEFAULT_RETIRED_SIZE 256
410
411 static void
fasttrap_tracepoint_cleanup(void)412 fasttrap_tracepoint_cleanup(void)
413 {
414 size_t i;
415 pid_t pid = 0;
416 user_addr_t pc;
417 proc_t *p = PROC_NULL;
418 fasttrap_tracepoint_t *tp = NULL;
419 lck_mtx_lock(&fasttrap_retired_mtx);
420 fasttrap_bucket_t *bucket;
421 for (i = 0; i < fasttrap_cur_retired; i++) {
422 pc = fasttrap_retired_spec[i].fttps_pc;
423 if (fasttrap_retired_spec[i].fttps_pid != pid) {
424 pid = fasttrap_retired_spec[i].fttps_pid;
425 if (p != PROC_NULL) {
426 sprunlock(p);
427 }
428 if ((p = sprlock(pid)) == PROC_NULL) {
429 pid = 0;
430 continue;
431 }
432 }
433 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
434 lck_mtx_lock(&bucket->ftb_mtx);
435 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
436 if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
437 tp->ftt_proc->ftpc_acount != 0)
438 break;
439 }
440 /*
441 * Check that the tracepoint is not gone or has not been
442 * re-activated for another probe
443 */
444 if (tp == NULL || tp->ftt_retired == 0) {
445 lck_mtx_unlock(&bucket->ftb_mtx);
446 continue;
447 }
448 fasttrap_tracepoint_remove(p, tp);
449 lck_mtx_unlock(&bucket->ftb_mtx);
450 }
451 if (p != PROC_NULL) {
452 sprunlock(p);
453 }
454
455 fasttrap_cur_retired = 0;
456
457 lck_mtx_unlock(&fasttrap_retired_mtx);
458 }
459
460 void
fasttrap_tracepoint_retire(proc_t * p,fasttrap_tracepoint_t * tp)461 fasttrap_tracepoint_retire(proc_t *p, fasttrap_tracepoint_t *tp)
462 {
463 if (tp->ftt_retired)
464 return;
465 lck_mtx_lock(&fasttrap_retired_mtx);
466 fasttrap_tracepoint_spec_t *s = &fasttrap_retired_spec[fasttrap_cur_retired++];
467 s->fttps_pid = proc_getpid(p);
468 s->fttps_pc = tp->ftt_pc;
469
470 if (fasttrap_cur_retired == fasttrap_retired_size) {
471 fasttrap_tracepoint_spec_t *new_retired = kmem_zalloc(
472 fasttrap_retired_size * 2 *
473 sizeof(*fasttrap_retired_spec),
474 KM_SLEEP);
475 memcpy(new_retired, fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
476 kmem_free(fasttrap_retired_spec, sizeof(*fasttrap_retired_spec) * fasttrap_retired_size);
477 fasttrap_retired_size *= 2;
478 fasttrap_retired_spec = new_retired;
479 }
480
481 lck_mtx_unlock(&fasttrap_retired_mtx);
482
483 tp->ftt_retired = 1;
484
485 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_TRACEPOINT);
486 }
487
488 static void
fasttrap_pid_cleanup_compute_priority(void)489 fasttrap_pid_cleanup_compute_priority(void)
490 {
491 if (fasttrap_total > (fasttrap_max / 100 * 90) || fasttrap_retired > fasttrap_max / 2) {
492 thread_precedence_policy_data_t precedence = {12 /* BASEPRI_PREEMPT_HIGH */};
493 thread_policy_set(fasttrap_cleanup_thread, THREAD_PRECEDENCE_POLICY, (thread_policy_t) &precedence, THREAD_PRECEDENCE_POLICY_COUNT);
494 }
495 else {
496 thread_precedence_policy_data_t precedence = {-39 /* BASEPRI_USER_INITIATED */};
497 thread_policy_set(fasttrap_cleanup_thread, THREAD_PRECEDENCE_POLICY, (thread_policy_t) &precedence, THREAD_PRECEDENCE_POLICY_COUNT);
498
499 }
500 }
501
502 /*
503 * This is the timeout's callback for cleaning up the providers and their
504 * probes.
505 */
506 /*ARGSUSED*/
507 __attribute__((noreturn))
508 static void
fasttrap_pid_cleanup_cb(void)509 fasttrap_pid_cleanup_cb(void)
510 {
511 uint32_t work = 0;
512 lck_mtx_lock(&fasttrap_cleanup_mtx);
513 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", NULL);
514 while (1) {
515 unsigned int later = 0;
516
517 work = os_atomic_xchg(&fasttrap_cleanup_work, 0, relaxed);
518 lck_mtx_unlock(&fasttrap_cleanup_mtx);
519 if (work & FASTTRAP_CLEANUP_PROVIDER) {
520 later = fasttrap_pid_cleanup_providers();
521 }
522 if (work & FASTTRAP_CLEANUP_TRACEPOINT) {
523 fasttrap_tracepoint_cleanup();
524 }
525 lck_mtx_lock(&fasttrap_cleanup_mtx);
526
527 fasttrap_pid_cleanup_compute_priority();
528 if (!fasttrap_cleanup_work) {
529 /*
530 * If we were unable to remove a retired provider, try again after
531 * a second. This situation can occur in certain circumstances where
532 * providers cannot be unregistered even though they have no probes
533 * enabled because of an execution of dtrace -l or something similar.
534 * If the timeout has been disabled (set to 1 because we're trying
535 * to detach), we set fasttrap_cleanup_work to ensure that we'll
536 * get a chance to do that work if and when the timeout is reenabled
537 * (if detach fails).
538 */
539 if (later > 0) {
540 struct timespec t = {.tv_sec = 1, .tv_nsec = 0};
541 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", &t);
542 }
543 else
544 msleep(&fasttrap_pid_cleanup_cb, &fasttrap_cleanup_mtx, PRIBIO, "fasttrap_pid_cleanup_cb", NULL);
545 }
546 }
547
548 }
549
550 /*
551 * Activates the asynchronous cleanup mechanism.
552 */
553 static void
fasttrap_pid_cleanup(uint32_t work)554 fasttrap_pid_cleanup(uint32_t work)
555 {
556 lck_mtx_lock(&fasttrap_cleanup_mtx);
557 os_atomic_or(&fasttrap_cleanup_work, work, relaxed);
558 fasttrap_pid_cleanup_compute_priority();
559 wakeup(&fasttrap_pid_cleanup_cb);
560 lck_mtx_unlock(&fasttrap_cleanup_mtx);
561 }
562
563 static int
fasttrap_setdebug(proc_t * p)564 fasttrap_setdebug(proc_t *p)
565 {
566 LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
567
568 /*
569 * CS_KILL and CS_HARD will cause code-signing to kill the process
570 * when the process text is modified, so register the intent
571 * to allow invalid access beforehand.
572 */
573 if ((proc_getcsflags(p) & (CS_KILL|CS_HARD))) {
574 proc_unlock(p);
575 for (int i = 0; i < DTRACE_NCLIENTS; i++) {
576 dtrace_state_t *state = dtrace_state_get(i);
577 if (state == NULL)
578 continue;
579 if (state->dts_cred.dcr_cred == NULL)
580 continue;
581 /*
582 * The get_task call flags whether the process should
583 * be flagged to have the cs_allow_invalid call
584 * succeed. We want the best credential that any dtrace
585 * client has, so try all of them.
586 */
587
588 /*
589 * mac_proc_check_get_task() can trigger upcalls. It's
590 * not safe to hold proc references accross upcalls, so
591 * just drop the reference. Given the context, it
592 * should not be possible for the process to actually
593 * disappear.
594 */
595 struct proc_ident pident = proc_ident(p);
596 sprunlock(p);
597 p = PROC_NULL;
598
599 (void) mac_proc_check_get_task(state->dts_cred.dcr_cred, &pident, TASK_FLAVOR_CONTROL);
600
601 p = sprlock(pident.p_pid);
602 if (p == PROC_NULL) {
603 return (ESRCH);
604 }
605 }
606 int rc = cs_allow_invalid(p);
607 proc_lock(p);
608 if (rc == 0) {
609 return (EACCES);
610 }
611 }
612 return (0);
613 }
614
615 /*
616 * This is called from cfork() via dtrace_fasttrap_fork(). The child
617 * process's address space is a (roughly) a copy of the parent process's so
618 * we have to remove all the instrumentation we had previously enabled in the
619 * parent.
620 */
621 static void
fasttrap_fork(proc_t * p,proc_t * cp)622 fasttrap_fork(proc_t *p, proc_t *cp)
623 {
624 pid_t ppid = proc_getpid(p);
625 unsigned int i;
626
627 ASSERT(current_proc() == p);
628 LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_OWNED);
629 ASSERT(p->p_dtrace_count > 0);
630 ASSERT(cp->p_dtrace_count == 0);
631
632 /*
633 * This would be simpler and faster if we maintained per-process
634 * hash tables of enabled tracepoints. It could, however, potentially
635 * slow down execution of a tracepoint since we'd need to go
636 * through two levels of indirection. In the future, we should
637 * consider either maintaining per-process ancillary lists of
638 * enabled tracepoints or hanging a pointer to a per-process hash
639 * table of enabled tracepoints off the proc structure.
640 */
641
642 /*
643 * We don't have to worry about the child process disappearing
644 * because we're in fork().
645 */
646 if (cp != sprlock(proc_getpid(cp))) {
647 printf("fasttrap_fork: sprlock(%d) returned a different proc\n", proc_getpid(cp));
648 return;
649 }
650
651 proc_lock(cp);
652 if (fasttrap_setdebug(cp) == ESRCH) {
653 printf("fasttrap_fork: failed to re-acquire proc\n");
654 return;
655 }
656 proc_unlock(cp);
657
658 /*
659 * Iterate over every tracepoint looking for ones that belong to the
660 * parent process, and remove each from the child process.
661 */
662 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
663 fasttrap_tracepoint_t *tp;
664 fasttrap_bucket_t *bucket = &fasttrap_tpoints.fth_table[i];
665
666 lck_mtx_lock(&bucket->ftb_mtx);
667 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
668 if (tp->ftt_pid == ppid &&
669 tp->ftt_proc->ftpc_acount != 0) {
670 fasttrap_tracepoint_remove(cp, tp);
671
672 /*
673 * The count of active providers can only be
674 * decremented (i.e. to zero) during exec,
675 * exit, and removal of a meta provider so it
676 * should be impossible to drop the count
677 * mid-fork.
678 */
679 ASSERT(tp->ftt_proc->ftpc_acount != 0);
680 }
681 }
682 lck_mtx_unlock(&bucket->ftb_mtx);
683 }
684
685 /*
686 * Free any ptss pages/entries in the child.
687 */
688 dtrace_ptss_fork(p, cp);
689
690 sprunlock(cp);
691 }
692
693 /*
694 * This is called from proc_exit() or from exec_common() if p_dtrace_probes
695 * is set on the proc structure to indicate that there is a pid provider
696 * associated with this process.
697 */
698 static void
fasttrap_exec_exit(proc_t * p)699 fasttrap_exec_exit(proc_t *p)
700 {
701 ASSERT(p == current_proc());
702 LCK_MTX_ASSERT(&p->p_mlock, LCK_MTX_ASSERT_OWNED);
703 LCK_MTX_ASSERT(&p->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
704
705
706 /* APPLE NOTE: Okay, the locking here is really odd and needs some
707 * explaining. This method is always called with the proc_lock held.
708 * We must drop the proc_lock before calling fasttrap_provider_retire
709 * to avoid a deadlock when it takes the bucket lock.
710 *
711 * Next, the dtrace_ptss_exec_exit function requires the sprlock
712 * be held, but not the proc_lock.
713 *
714 * Finally, we must re-acquire the proc_lock
715 */
716 proc_unlock(p);
717
718 /*
719 * We clean up the pid provider for this process here; user-land
720 * static probes are handled by the meta-provider remove entry point.
721 */
722 fasttrap_provider_retire(p, FASTTRAP_PID_NAME, 0);
723
724 /*
725 * APPLE NOTE: We also need to remove any aliased providers.
726 * XXX optimization: track which provider types are instantiated
727 * and only retire as needed.
728 */
729 fasttrap_provider_retire(p, FASTTRAP_OBJC_NAME, 0);
730 fasttrap_provider_retire(p, FASTTRAP_ONESHOT_NAME, 0);
731
732 /*
733 * This should be called after it is no longer possible for a user
734 * thread to execute (potentially dtrace instrumented) instructions.
735 */
736 lck_mtx_lock(&p->p_dtrace_sprlock);
737 dtrace_ptss_exec_exit(p);
738 lck_mtx_unlock(&p->p_dtrace_sprlock);
739
740 proc_lock(p);
741 }
742
743
744 /*ARGSUSED*/
745 static void
fasttrap_pid_provide(void * arg,const dtrace_probedesc_t * desc)746 fasttrap_pid_provide(void *arg, const dtrace_probedesc_t *desc)
747 {
748 #pragma unused(arg, desc)
749 /*
750 * There are no "default" pid probes.
751 */
752 }
753
754 static int
fasttrap_tracepoint_enable(proc_t * p,fasttrap_probe_t * probe,uint_t index)755 fasttrap_tracepoint_enable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
756 {
757 fasttrap_tracepoint_t *tp, *new_tp = NULL;
758 fasttrap_bucket_t *bucket;
759 fasttrap_id_t *id;
760 pid_t pid;
761 user_addr_t pc;
762
763 ASSERT(index < probe->ftp_ntps);
764
765 pid = probe->ftp_pid;
766 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
767 id = &probe->ftp_tps[index].fit_id;
768
769 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
770
771 /*
772 * Before we make any modifications, make sure we've imposed a barrier
773 * on the generation in which this probe was last modified.
774 */
775 fasttrap_mod_barrier(probe->ftp_gen);
776
777 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
778
779 /*
780 * If the tracepoint has already been enabled, just add our id to the
781 * list of interested probes. This may be our second time through
782 * this path in which case we'll have constructed the tracepoint we'd
783 * like to install. If we can't find a match, and have an allocated
784 * tracepoint ready to go, enable that one now.
785 *
786 * A tracepoint whose process is defunct is also considered defunct.
787 */
788 again:
789 lck_mtx_lock(&bucket->ftb_mtx);
790 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
791 int rc = 0;
792 /*
793 * Note that it's safe to access the active count on the
794 * associated proc structure because we know that at least one
795 * provider (this one) will still be around throughout this
796 * operation.
797 */
798 if (tp->ftt_pid != pid || tp->ftt_pc != pc ||
799 tp->ftt_proc->ftpc_acount == 0)
800 continue;
801
802 /*
803 * Now that we've found a matching tracepoint, it would be
804 * a decent idea to confirm that the tracepoint is still
805 * enabled and the trap instruction hasn't been overwritten.
806 * Since this is a little hairy, we'll punt for now.
807 */
808 if (!tp->ftt_installed) {
809 if (fasttrap_tracepoint_install(p, tp) != 0)
810 rc = FASTTRAP_ENABLE_PARTIAL;
811 }
812 /*
813 * This can't be the first interested probe. We don't have
814 * to worry about another thread being in the midst of
815 * deleting this tracepoint (which would be the only valid
816 * reason for a tracepoint to have no interested probes)
817 * since we're holding P_PR_LOCK for this process.
818 */
819 ASSERT(tp->ftt_ids != NULL || tp->ftt_retids != NULL);
820
821 switch (id->fti_ptype) {
822 case DTFTP_ENTRY:
823 case DTFTP_OFFSETS:
824 case DTFTP_IS_ENABLED:
825 id->fti_next = tp->ftt_ids;
826 dtrace_membar_producer();
827 tp->ftt_ids = id;
828 dtrace_membar_producer();
829 break;
830
831 case DTFTP_RETURN:
832 case DTFTP_POST_OFFSETS:
833 id->fti_next = tp->ftt_retids;
834 dtrace_membar_producer();
835 tp->ftt_retids = id;
836 dtrace_membar_producer();
837 break;
838
839 default:
840 ASSERT(0);
841 }
842
843 tp->ftt_retired = 0;
844
845 lck_mtx_unlock(&bucket->ftb_mtx);
846
847 if (new_tp != NULL) {
848 new_tp->ftt_ids = NULL;
849 new_tp->ftt_retids = NULL;
850 }
851
852 return rc;
853 }
854
855 /*
856 * If we have a good tracepoint ready to go, install it now while
857 * we have the lock held and no one can screw with us.
858 */
859 if (new_tp != NULL) {
860 int rc = 0;
861
862 new_tp->ftt_next = bucket->ftb_data;
863 dtrace_membar_producer();
864 bucket->ftb_data = new_tp;
865 dtrace_membar_producer();
866 lck_mtx_unlock(&bucket->ftb_mtx);
867
868 /*
869 * Activate the tracepoint in the ISA-specific manner.
870 * If this fails, we need to report the failure, but
871 * indicate that this tracepoint must still be disabled
872 * by calling fasttrap_tracepoint_disable().
873 */
874 if (fasttrap_tracepoint_install(p, new_tp) != 0)
875 rc = FASTTRAP_ENABLE_PARTIAL;
876 /*
877 * Increment the count of the number of tracepoints active in
878 * the victim process.
879 */
880 //ASSERT(p->p_proc_flag & P_PR_LOCK);
881 p->p_dtrace_count++;
882
883
884 return (rc);
885 }
886
887 lck_mtx_unlock(&bucket->ftb_mtx);
888
889 /*
890 * Initialize the tracepoint that's been preallocated with the probe.
891 */
892 new_tp = probe->ftp_tps[index].fit_tp;
893 new_tp->ftt_retired = 0;
894
895 ASSERT(new_tp->ftt_pid == pid);
896 ASSERT(new_tp->ftt_pc == pc);
897 ASSERT(new_tp->ftt_proc == probe->ftp_prov->ftp_proc);
898 ASSERT(new_tp->ftt_ids == NULL);
899 ASSERT(new_tp->ftt_retids == NULL);
900
901 switch (id->fti_ptype) {
902 case DTFTP_ENTRY:
903 case DTFTP_OFFSETS:
904 case DTFTP_IS_ENABLED:
905 id->fti_next = NULL;
906 new_tp->ftt_ids = id;
907 break;
908
909 case DTFTP_RETURN:
910 case DTFTP_POST_OFFSETS:
911 id->fti_next = NULL;
912 new_tp->ftt_retids = id;
913 break;
914
915 default:
916 ASSERT(0);
917 }
918
919 /*
920 * If the ISA-dependent initialization goes to plan, go back to the
921 * beginning and try to install this freshly made tracepoint.
922 */
923 if (fasttrap_tracepoint_init(p, new_tp, pc, id->fti_ptype) == 0)
924 goto again;
925
926 new_tp->ftt_ids = NULL;
927 new_tp->ftt_retids = NULL;
928
929 return (FASTTRAP_ENABLE_FAIL);
930 }
931
932 static void
fasttrap_tracepoint_disable(proc_t * p,fasttrap_probe_t * probe,uint_t index)933 fasttrap_tracepoint_disable(proc_t *p, fasttrap_probe_t *probe, uint_t index)
934 {
935 fasttrap_bucket_t *bucket;
936 fasttrap_provider_t *provider = probe->ftp_prov;
937 fasttrap_tracepoint_t **pp, *tp;
938 fasttrap_id_t *id, **idp;
939 pid_t pid;
940 user_addr_t pc;
941
942 ASSERT(index < probe->ftp_ntps);
943
944 pid = probe->ftp_pid;
945 pc = probe->ftp_tps[index].fit_tp->ftt_pc;
946 id = &probe->ftp_tps[index].fit_id;
947
948 ASSERT(probe->ftp_tps[index].fit_tp->ftt_pid == pid);
949
950 /*
951 * Find the tracepoint and make sure that our id is one of the
952 * ones registered with it.
953 */
954 bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];
955 lck_mtx_lock(&bucket->ftb_mtx);
956 for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
957 if (tp->ftt_pid == pid && tp->ftt_pc == pc &&
958 tp->ftt_proc == provider->ftp_proc)
959 break;
960 }
961
962 /*
963 * If we somehow lost this tracepoint, we're in a world of hurt.
964 */
965 ASSERT(tp != NULL);
966
967 switch (id->fti_ptype) {
968 case DTFTP_ENTRY:
969 case DTFTP_OFFSETS:
970 case DTFTP_IS_ENABLED:
971 ASSERT(tp->ftt_ids != NULL);
972 idp = &tp->ftt_ids;
973 break;
974
975 case DTFTP_RETURN:
976 case DTFTP_POST_OFFSETS:
977 ASSERT(tp->ftt_retids != NULL);
978 idp = &tp->ftt_retids;
979 break;
980
981 default:
982 /* Fix compiler warning... */
983 idp = NULL;
984 ASSERT(0);
985 }
986
987 while ((*idp)->fti_probe != probe) {
988 idp = &(*idp)->fti_next;
989 ASSERT(*idp != NULL);
990 }
991
992 id = *idp;
993 *idp = id->fti_next;
994 dtrace_membar_producer();
995
996 ASSERT(id->fti_probe == probe);
997
998 /*
999 * If there are other registered enablings of this tracepoint, we're
1000 * all done, but if this was the last probe assocated with this
1001 * this tracepoint, we need to remove and free it.
1002 */
1003 if (tp->ftt_ids != NULL || tp->ftt_retids != NULL) {
1004
1005 /*
1006 * If the current probe's tracepoint is in use, swap it
1007 * for an unused tracepoint.
1008 */
1009 if (tp == probe->ftp_tps[index].fit_tp) {
1010 fasttrap_probe_t *tmp_probe;
1011 fasttrap_tracepoint_t **tmp_tp;
1012 uint_t tmp_index;
1013
1014 if (tp->ftt_ids != NULL) {
1015 tmp_probe = tp->ftt_ids->fti_probe;
1016 /* LINTED - alignment */
1017 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_ids);
1018 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1019 } else {
1020 tmp_probe = tp->ftt_retids->fti_probe;
1021 /* LINTED - alignment */
1022 tmp_index = FASTTRAP_ID_INDEX(tp->ftt_retids);
1023 tmp_tp = &tmp_probe->ftp_tps[tmp_index].fit_tp;
1024 }
1025
1026 ASSERT(*tmp_tp != NULL);
1027 ASSERT(*tmp_tp != probe->ftp_tps[index].fit_tp);
1028 ASSERT((*tmp_tp)->ftt_ids == NULL);
1029 ASSERT((*tmp_tp)->ftt_retids == NULL);
1030
1031 probe->ftp_tps[index].fit_tp = *tmp_tp;
1032 *tmp_tp = tp;
1033
1034 }
1035
1036 lck_mtx_unlock(&bucket->ftb_mtx);
1037
1038 /*
1039 * Tag the modified probe with the generation in which it was
1040 * changed.
1041 */
1042 probe->ftp_gen = fasttrap_mod_gen;
1043 return;
1044 }
1045
1046 lck_mtx_unlock(&bucket->ftb_mtx);
1047
1048 /*
1049 * We can't safely remove the tracepoint from the set of active
1050 * tracepoints until we've actually removed the fasttrap instruction
1051 * from the process's text. We can, however, operate on this
1052 * tracepoint secure in the knowledge that no other thread is going to
1053 * be looking at it since we hold P_PR_LOCK on the process if it's
1054 * live or we hold the provider lock on the process if it's dead and
1055 * gone.
1056 */
1057
1058 /*
1059 * We only need to remove the actual instruction if we're looking
1060 * at an existing process
1061 */
1062 if (p != NULL) {
1063 /*
1064 * If we fail to restore the instruction we need to kill
1065 * this process since it's in a completely unrecoverable
1066 * state.
1067 */
1068 if (fasttrap_tracepoint_remove(p, tp) != 0)
1069 fasttrap_sigtrap(p, NULL, pc);
1070
1071 /*
1072 * Decrement the count of the number of tracepoints active
1073 * in the victim process.
1074 */
1075 //ASSERT(p->p_proc_flag & P_PR_LOCK);
1076 p->p_dtrace_count--;
1077 }
1078
1079 /*
1080 * Remove the probe from the hash table of active tracepoints.
1081 */
1082 lck_mtx_lock(&bucket->ftb_mtx);
1083 pp = (fasttrap_tracepoint_t **)&bucket->ftb_data;
1084 ASSERT(*pp != NULL);
1085 while (*pp != tp) {
1086 pp = &(*pp)->ftt_next;
1087 ASSERT(*pp != NULL);
1088 }
1089
1090 *pp = tp->ftt_next;
1091 dtrace_membar_producer();
1092
1093 lck_mtx_unlock(&bucket->ftb_mtx);
1094
1095 /*
1096 * Tag the modified probe with the generation in which it was changed.
1097 */
1098 probe->ftp_gen = fasttrap_mod_gen;
1099 }
1100
1101 static void
fasttrap_enable_callbacks(void)1102 fasttrap_enable_callbacks(void)
1103 {
1104 /*
1105 * We don't have to play the rw lock game here because we're
1106 * providing something rather than taking something away --
1107 * we can be sure that no threads have tried to follow this
1108 * function pointer yet.
1109 */
1110 lck_mtx_lock(&fasttrap_count_mtx);
1111 if (fasttrap_pid_count == 0) {
1112 ASSERT(dtrace_pid_probe_ptr == NULL);
1113 ASSERT(dtrace_return_probe_ptr == NULL);
1114 dtrace_pid_probe_ptr = &fasttrap_pid_probe;
1115 dtrace_return_probe_ptr = &fasttrap_return_probe;
1116 }
1117 ASSERT(dtrace_pid_probe_ptr == &fasttrap_pid_probe);
1118 ASSERT(dtrace_return_probe_ptr == &fasttrap_return_probe);
1119 fasttrap_pid_count++;
1120 lck_mtx_unlock(&fasttrap_count_mtx);
1121 }
1122
1123 static void
fasttrap_disable_callbacks(void)1124 fasttrap_disable_callbacks(void)
1125 {
1126 //ASSERT(MUTEX_HELD(&cpu_lock));
1127
1128 lck_mtx_lock(&fasttrap_count_mtx);
1129 ASSERT(fasttrap_pid_count > 0);
1130 fasttrap_pid_count--;
1131 if (fasttrap_pid_count == 0) {
1132 dtrace_cpu_t *cur, *cpu = CPU;
1133
1134 /*
1135 * APPLE NOTE: This loop seems broken, it touches every CPU
1136 * but the one we're actually running on. Need to ask Sun folks
1137 * if that is safe. Scenario is this: We're running on CPU A,
1138 * and lock all but A. Then we get preempted, and start running
1139 * on CPU B. A probe fires on A, and is allowed to enter. BOOM!
1140 */
1141 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
1142 lck_rw_lock_exclusive(&cur->cpu_ft_lock);
1143 // rw_enter(&cur->cpu_ft_lock, RW_WRITER);
1144 }
1145
1146 dtrace_pid_probe_ptr = NULL;
1147 dtrace_return_probe_ptr = NULL;
1148
1149 for (cur = cpu->cpu_next; cur != cpu; cur = cur->cpu_next) {
1150 lck_rw_unlock_exclusive(&cur->cpu_ft_lock);
1151 // rw_exit(&cur->cpu_ft_lock);
1152 }
1153 }
1154 lck_mtx_unlock(&fasttrap_count_mtx);
1155 }
1156
1157 /*ARGSUSED*/
1158 static int
fasttrap_pid_enable(void * arg,dtrace_id_t id,void * parg)1159 fasttrap_pid_enable(void *arg, dtrace_id_t id, void *parg)
1160 {
1161 #pragma unused(arg, id)
1162 fasttrap_probe_t *probe = parg;
1163 proc_t *p;
1164 int i, rc;
1165
1166 ASSERT(probe != NULL);
1167 ASSERT(!probe->ftp_enabled);
1168 ASSERT(id == probe->ftp_id);
1169 // ASSERT(MUTEX_HELD(&cpu_lock));
1170
1171 /*
1172 * Increment the count of enabled probes on this probe's provider;
1173 * the provider can't go away while the probe still exists. We
1174 * must increment this even if we aren't able to properly enable
1175 * this probe.
1176 */
1177 lck_mtx_lock(&probe->ftp_prov->ftp_mtx);
1178 probe->ftp_prov->ftp_rcount++;
1179 lck_mtx_unlock(&probe->ftp_prov->ftp_mtx);
1180
1181 /*
1182 * If this probe's provider is retired (meaning it was valid in a
1183 * previously exec'ed incarnation of this address space), bail out. The
1184 * provider can't go away while we're in this code path.
1185 */
1186 if (probe->ftp_prov->ftp_retired)
1187 return(0);
1188
1189 /*
1190 * If we can't find the process, it may be that we're in the context of
1191 * a fork in which the traced process is being born and we're copying
1192 * USDT probes. Otherwise, the process is gone so bail.
1193 */
1194 if ((p = sprlock(probe->ftp_pid)) == PROC_NULL) {
1195 /*
1196 * APPLE NOTE: We should never end up here. The Solaris sprlock()
1197 * does not return process's with SIDL set, but we always return
1198 * the child process.
1199 */
1200 return(0);
1201 }
1202
1203 proc_lock(p);
1204 int p_pid = proc_pid(p);
1205
1206 rc = fasttrap_setdebug(p);
1207 switch (rc) {
1208 case EACCES:
1209 proc_unlock(p);
1210 sprunlock(p);
1211 cmn_err(CE_WARN, "Failed to install fasttrap probe for pid %d: "
1212 "Process does not allow invalid code pages\n", p_pid);
1213 return (0);
1214 case ESRCH:
1215 cmn_err(CE_WARN, "Failed to install fasttrap probe for pid %d: "
1216 "Failed to re-acquire process\n", p_pid);
1217 return (0);
1218 default:
1219 assert(rc == 0);
1220 break;
1221 }
1222
1223 /*
1224 * APPLE NOTE: We do not have an equivalent thread structure to Solaris.
1225 * Solaris uses its ulwp_t struct for scratch space to support the pid provider.
1226 * To mimic this, we allocate on demand scratch space. If this is the first
1227 * time a probe has been enabled in this process, we need to allocate scratch
1228 * space for each already existing thread. Now is a good time to do this, as
1229 * the target process is suspended and the proc_lock is held.
1230 */
1231 if (p->p_dtrace_ptss_pages == NULL) {
1232 dtrace_ptss_enable(p);
1233 }
1234
1235 proc_unlock(p);
1236
1237 /*
1238 * We have to enable the trap entry point before any user threads have
1239 * the chance to execute the trap instruction we're about to place
1240 * in their process's text.
1241 */
1242 fasttrap_enable_callbacks();
1243
1244 /*
1245 * Enable all the tracepoints and add this probe's id to each
1246 * tracepoint's list of active probes.
1247 */
1248 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1249 if ((rc = fasttrap_tracepoint_enable(p, probe, i)) != 0) {
1250 /*
1251 * If enabling the tracepoint failed completely,
1252 * we don't have to disable it; if the failure
1253 * was only partial we must disable it.
1254 */
1255 if (rc == FASTTRAP_ENABLE_FAIL)
1256 i--;
1257 else
1258 ASSERT(rc == FASTTRAP_ENABLE_PARTIAL);
1259
1260 /*
1261 * Back up and pull out all the tracepoints we've
1262 * created so far for this probe.
1263 */
1264 while (i >= 0) {
1265 fasttrap_tracepoint_disable(p, probe, i);
1266 i--;
1267 }
1268
1269 sprunlock(p);
1270
1271 /*
1272 * Since we're not actually enabling this probe,
1273 * drop our reference on the trap table entry.
1274 */
1275 fasttrap_disable_callbacks();
1276 return(0);
1277 }
1278 }
1279
1280 sprunlock(p);
1281
1282 probe->ftp_enabled = 1;
1283 return (0);
1284 }
1285
1286 /*ARGSUSED*/
1287 static void
fasttrap_pid_disable(void * arg,dtrace_id_t id,void * parg)1288 fasttrap_pid_disable(void *arg, dtrace_id_t id, void *parg)
1289 {
1290 #pragma unused(arg, id)
1291 fasttrap_probe_t *probe = parg;
1292 fasttrap_provider_t *provider = probe->ftp_prov;
1293 proc_t *p;
1294 int i, whack = 0;
1295
1296 ASSERT(id == probe->ftp_id);
1297
1298 /*
1299 * We won't be able to acquire a /proc-esque lock on the process
1300 * iff the process is dead and gone. In this case, we rely on the
1301 * provider lock as a point of mutual exclusion to prevent other
1302 * DTrace consumers from disabling this probe.
1303 */
1304 p = sprlock(probe->ftp_pid);
1305
1306 lck_mtx_lock(&provider->ftp_mtx);
1307
1308 /*
1309 * Disable all the associated tracepoints (for fully enabled probes).
1310 */
1311 if (probe->ftp_enabled) {
1312 for (i = 0; i < (int)probe->ftp_ntps; i++) {
1313 fasttrap_tracepoint_disable(p, probe, i);
1314 }
1315 }
1316
1317 ASSERT(provider->ftp_rcount > 0);
1318 provider->ftp_rcount--;
1319
1320 if (p != NULL) {
1321 /*
1322 * Even though we may not be able to remove it entirely, we
1323 * mark this retired provider to get a chance to remove some
1324 * of the associated probes.
1325 */
1326 if (provider->ftp_retired && !provider->ftp_marked)
1327 whack = provider->ftp_marked = 1;
1328 lck_mtx_unlock(&provider->ftp_mtx);
1329
1330 sprunlock(p);
1331 } else {
1332 /*
1333 * If the process is dead, we're just waiting for the
1334 * last probe to be disabled to be able to free it.
1335 */
1336 if (provider->ftp_rcount == 0 && !provider->ftp_marked)
1337 whack = provider->ftp_marked = 1;
1338 lck_mtx_unlock(&provider->ftp_mtx);
1339 }
1340
1341 if (whack) {
1342 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
1343 }
1344
1345 if (!probe->ftp_enabled)
1346 return;
1347
1348 probe->ftp_enabled = 0;
1349
1350 // ASSERT(MUTEX_HELD(&cpu_lock));
1351 fasttrap_disable_callbacks();
1352 }
1353
1354 /*ARGSUSED*/
1355 static void
fasttrap_pid_getargdesc(void * arg,dtrace_id_t id,void * parg,dtrace_argdesc_t * desc)1356 fasttrap_pid_getargdesc(void *arg, dtrace_id_t id, void *parg,
1357 dtrace_argdesc_t *desc)
1358 {
1359 #pragma unused(arg, id)
1360 fasttrap_probe_t *probe = parg;
1361 char *str;
1362 int i, ndx;
1363
1364 desc->dtargd_native[0] = '\0';
1365 desc->dtargd_xlate[0] = '\0';
1366
1367 if (probe->ftp_prov->ftp_retired != 0 ||
1368 desc->dtargd_ndx >= probe->ftp_nargs) {
1369 desc->dtargd_ndx = DTRACE_ARGNONE;
1370 return;
1371 }
1372
1373 ndx = (probe->ftp_argmap != NULL) ?
1374 probe->ftp_argmap[desc->dtargd_ndx] : desc->dtargd_ndx;
1375
1376 str = probe->ftp_ntypes;
1377 for (i = 0; i < ndx; i++) {
1378 str += strlen(str) + 1;
1379 }
1380
1381 (void) strlcpy(desc->dtargd_native, str, sizeof(desc->dtargd_native));
1382
1383 if (probe->ftp_xtypes == NULL)
1384 return;
1385
1386 str = probe->ftp_xtypes;
1387 for (i = 0; i < desc->dtargd_ndx; i++) {
1388 str += strlen(str) + 1;
1389 }
1390
1391 (void) strlcpy(desc->dtargd_xlate, str, sizeof(desc->dtargd_xlate));
1392 }
1393
1394 /*ARGSUSED*/
1395 static void
fasttrap_pid_destroy(void * arg,dtrace_id_t id,void * parg)1396 fasttrap_pid_destroy(void *arg, dtrace_id_t id, void *parg)
1397 {
1398 #pragma unused(arg, id)
1399 fasttrap_probe_t *probe = parg;
1400 unsigned int i;
1401
1402 ASSERT(probe != NULL);
1403 ASSERT(!probe->ftp_enabled);
1404 ASSERT(fasttrap_total >= probe->ftp_ntps);
1405
1406 os_atomic_sub(&fasttrap_total, probe->ftp_ntps, relaxed);
1407 os_atomic_sub(&fasttrap_retired, probe->ftp_ntps, relaxed);
1408
1409 if (probe->ftp_gen + 1 >= fasttrap_mod_gen)
1410 fasttrap_mod_barrier(probe->ftp_gen);
1411
1412 for (i = 0; i < probe->ftp_ntps; i++) {
1413 zfree(fasttrap_tracepoint_t_zone, probe->ftp_tps[i].fit_tp);
1414 }
1415
1416 if (probe->ftp_ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
1417 zfree(fasttrap_probe_t_zones[probe->ftp_ntps], probe);
1418 } else {
1419 size_t size = offsetof(fasttrap_probe_t, ftp_tps[probe->ftp_ntps]);
1420 kmem_free(probe, size);
1421 }
1422 }
1423
1424
1425 static const dtrace_pattr_t pid_attr = {
1426 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1427 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1428 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1429 { DTRACE_STABILITY_EVOLVING, DTRACE_STABILITY_EVOLVING, DTRACE_CLASS_ISA },
1430 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
1431 };
1432
1433 static dtrace_pops_t pid_pops = {
1434 .dtps_provide = fasttrap_pid_provide,
1435 .dtps_provide_module = NULL,
1436 .dtps_enable = fasttrap_pid_enable,
1437 .dtps_disable = fasttrap_pid_disable,
1438 .dtps_suspend = NULL,
1439 .dtps_resume = NULL,
1440 .dtps_getargdesc = fasttrap_pid_getargdesc,
1441 .dtps_getargval = fasttrap_pid_getarg,
1442 .dtps_usermode = NULL,
1443 .dtps_destroy = fasttrap_pid_destroy
1444 };
1445
1446 static dtrace_pops_t usdt_pops = {
1447 .dtps_provide = fasttrap_pid_provide,
1448 .dtps_provide_module = NULL,
1449 .dtps_enable = fasttrap_pid_enable,
1450 .dtps_disable = fasttrap_pid_disable,
1451 .dtps_suspend = NULL,
1452 .dtps_resume = NULL,
1453 .dtps_getargdesc = fasttrap_pid_getargdesc,
1454 .dtps_getargval = fasttrap_usdt_getarg,
1455 .dtps_usermode = NULL,
1456 .dtps_destroy = fasttrap_pid_destroy
1457 };
1458
1459 static fasttrap_proc_t *
fasttrap_proc_lookup(pid_t pid)1460 fasttrap_proc_lookup(pid_t pid)
1461 {
1462 fasttrap_bucket_t *bucket;
1463 fasttrap_proc_t *fprc, *new_fprc;
1464
1465 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1466 lck_mtx_lock(&bucket->ftb_mtx);
1467
1468 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1469 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1470 lck_mtx_lock(&fprc->ftpc_mtx);
1471 lck_mtx_unlock(&bucket->ftb_mtx);
1472 fprc->ftpc_rcount++;
1473 os_atomic_inc(&fprc->ftpc_acount, relaxed);
1474 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1475 lck_mtx_unlock(&fprc->ftpc_mtx);
1476
1477 return (fprc);
1478 }
1479 }
1480
1481 /*
1482 * Drop the bucket lock so we don't try to perform a sleeping
1483 * allocation under it.
1484 */
1485 lck_mtx_unlock(&bucket->ftb_mtx);
1486
1487 new_fprc = kmem_zalloc(sizeof (fasttrap_proc_t), KM_SLEEP);
1488 ASSERT(new_fprc != NULL);
1489 new_fprc->ftpc_pid = pid;
1490 new_fprc->ftpc_rcount = 1;
1491 new_fprc->ftpc_acount = 1;
1492
1493 lck_mtx_lock(&bucket->ftb_mtx);
1494
1495 /*
1496 * Take another lap through the list to make sure a proc hasn't
1497 * been created for this pid while we weren't under the bucket lock.
1498 */
1499 for (fprc = bucket->ftb_data; fprc != NULL; fprc = fprc->ftpc_next) {
1500 if (fprc->ftpc_pid == pid && fprc->ftpc_acount != 0) {
1501 lck_mtx_lock(&fprc->ftpc_mtx);
1502 lck_mtx_unlock(&bucket->ftb_mtx);
1503 fprc->ftpc_rcount++;
1504 os_atomic_inc(&fprc->ftpc_acount, relaxed);
1505 ASSERT(fprc->ftpc_acount <= fprc->ftpc_rcount);
1506 lck_mtx_unlock(&fprc->ftpc_mtx);
1507
1508 kmem_free(new_fprc, sizeof (fasttrap_proc_t));
1509
1510 return (fprc);
1511 }
1512 }
1513
1514 /*
1515 * APPLE NOTE: We have to initialize all locks explicitly
1516 */
1517 lck_mtx_init(&new_fprc->ftpc_mtx, &fasttrap_lck_grp, &fasttrap_lck_attr);
1518
1519 new_fprc->ftpc_next = bucket->ftb_data;
1520 bucket->ftb_data = new_fprc;
1521
1522 lck_mtx_unlock(&bucket->ftb_mtx);
1523
1524 return (new_fprc);
1525 }
1526
1527 static void
fasttrap_proc_release(fasttrap_proc_t * proc)1528 fasttrap_proc_release(fasttrap_proc_t *proc)
1529 {
1530 fasttrap_bucket_t *bucket;
1531 fasttrap_proc_t *fprc, **fprcp;
1532 pid_t pid = proc->ftpc_pid;
1533
1534 lck_mtx_lock(&proc->ftpc_mtx);
1535
1536 ASSERT(proc->ftpc_rcount != 0);
1537 ASSERT(proc->ftpc_acount <= proc->ftpc_rcount);
1538
1539 if (--proc->ftpc_rcount != 0) {
1540 lck_mtx_unlock(&proc->ftpc_mtx);
1541 return;
1542 }
1543
1544 lck_mtx_unlock(&proc->ftpc_mtx);
1545
1546 /*
1547 * There should definitely be no live providers associated with this
1548 * process at this point.
1549 */
1550 ASSERT(proc->ftpc_acount == 0);
1551
1552 bucket = &fasttrap_procs.fth_table[FASTTRAP_PROCS_INDEX(pid)];
1553 lck_mtx_lock(&bucket->ftb_mtx);
1554
1555 fprcp = (fasttrap_proc_t **)&bucket->ftb_data;
1556 while ((fprc = *fprcp) != NULL) {
1557 if (fprc == proc)
1558 break;
1559
1560 fprcp = &fprc->ftpc_next;
1561 }
1562
1563 /*
1564 * Something strange has happened if we can't find the proc.
1565 */
1566 ASSERT(fprc != NULL);
1567
1568 *fprcp = fprc->ftpc_next;
1569
1570 lck_mtx_unlock(&bucket->ftb_mtx);
1571
1572 /*
1573 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1574 * memory is freed even without the destroy. Maybe accounting cleanup?
1575 */
1576 lck_mtx_destroy(&fprc->ftpc_mtx, &fasttrap_lck_grp);
1577
1578 kmem_free(fprc, sizeof (fasttrap_proc_t));
1579 }
1580
1581 /*
1582 * Lookup a fasttrap-managed provider based on its name and associated proc.
1583 * A reference to the proc must be held for the duration of the call.
1584 * If the pattr argument is non-NULL, this function instantiates the provider
1585 * if it doesn't exist otherwise it returns NULL. The provider is returned
1586 * with its lock held.
1587 */
1588 static fasttrap_provider_t *
fasttrap_provider_lookup(proc_t * p,fasttrap_provider_type_t provider_type,const char * name,const dtrace_pattr_t * pattr)1589 fasttrap_provider_lookup(proc_t *p, fasttrap_provider_type_t provider_type, const char *name,
1590 const dtrace_pattr_t *pattr)
1591 {
1592 pid_t pid = proc_getpid(p);
1593 fasttrap_provider_t *fp, *new_fp = NULL;
1594 fasttrap_bucket_t *bucket;
1595 char provname[DTRACE_PROVNAMELEN];
1596 cred_t *cred;
1597
1598 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1599 ASSERT(pattr != NULL);
1600
1601 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(pid, name)];
1602 lck_mtx_lock(&bucket->ftb_mtx);
1603
1604 /*
1605 * Take a lap through the list and return the match if we find it.
1606 */
1607 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1608 if (fp->ftp_pid == pid &&
1609 fp->ftp_provider_type == provider_type &&
1610 strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1611 !fp->ftp_retired) {
1612 lck_mtx_lock(&fp->ftp_mtx);
1613 lck_mtx_unlock(&bucket->ftb_mtx);
1614 return (fp);
1615 }
1616 }
1617
1618 /*
1619 * Drop the bucket lock so we don't try to perform a sleeping
1620 * allocation under it.
1621 */
1622 lck_mtx_unlock(&bucket->ftb_mtx);
1623
1624 /*
1625 * Make sure the process isn't a child
1626 * isn't a zombie (but may be in fork).
1627 */
1628 proc_lock(p);
1629 if (p->p_lflag & P_LEXIT) {
1630 proc_unlock(p);
1631 return (NULL);
1632 }
1633
1634 /*
1635 * Increment p_dtrace_probes so that the process knows to inform us
1636 * when it exits or execs. fasttrap_provider_free() decrements this
1637 * when we're done with this provider.
1638 */
1639 p->p_dtrace_probes++;
1640
1641 /*
1642 * Grab the credentials for this process so we have
1643 * something to pass to dtrace_register().
1644 * APPLE NOTE: We have no equivalent to crhold,
1645 * even though there is a cr_ref filed in ucred.
1646 */
1647 cred = kauth_cred_proc_ref(p);
1648 proc_unlock(p);
1649
1650 new_fp = kmem_zalloc(sizeof (fasttrap_provider_t), KM_SLEEP);
1651 ASSERT(new_fp != NULL);
1652 new_fp->ftp_pid = proc_getpid(p);
1653 new_fp->ftp_proc = fasttrap_proc_lookup(pid);
1654 new_fp->ftp_provider_type = provider_type;
1655
1656 /*
1657 * APPLE NOTE: locks require explicit init
1658 */
1659 lck_mtx_init(&new_fp->ftp_mtx, &fasttrap_lck_grp, &fasttrap_lck_attr);
1660 lck_mtx_init(&new_fp->ftp_cmtx, &fasttrap_lck_grp, &fasttrap_lck_attr);
1661
1662 ASSERT(new_fp->ftp_proc != NULL);
1663
1664 lck_mtx_lock(&bucket->ftb_mtx);
1665
1666 /*
1667 * Take another lap through the list to make sure a provider hasn't
1668 * been created for this pid while we weren't under the bucket lock.
1669 */
1670 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1671 if (fp->ftp_pid == pid && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1672 !fp->ftp_retired) {
1673 lck_mtx_lock(&fp->ftp_mtx);
1674 lck_mtx_unlock(&bucket->ftb_mtx);
1675 fasttrap_provider_free(new_fp);
1676 kauth_cred_unref(&cred);
1677 return (fp);
1678 }
1679 }
1680
1681 (void) strlcpy(new_fp->ftp_name, name, sizeof(new_fp->ftp_name));
1682
1683 /*
1684 * Fail and return NULL if either the provider name is too long
1685 * or we fail to register this new provider with the DTrace
1686 * framework. Note that this is the only place we ever construct
1687 * the full provider name -- we keep it in pieces in the provider
1688 * structure.
1689 */
1690 if (snprintf(provname, sizeof (provname), "%s%u", name, (uint_t)pid) >=
1691 (int)sizeof (provname) ||
1692 dtrace_register(provname, pattr,
1693 DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER | DTRACE_PRIV_ZONEOWNER, cred,
1694 pattr == &pid_attr ? &pid_pops : &usdt_pops, new_fp,
1695 &new_fp->ftp_provid) != 0) {
1696 lck_mtx_unlock(&bucket->ftb_mtx);
1697 fasttrap_provider_free(new_fp);
1698 kauth_cred_unref(&cred);
1699 return (NULL);
1700 }
1701
1702 new_fp->ftp_next = bucket->ftb_data;
1703 bucket->ftb_data = new_fp;
1704
1705 lck_mtx_lock(&new_fp->ftp_mtx);
1706 lck_mtx_unlock(&bucket->ftb_mtx);
1707
1708 kauth_cred_unref(&cred);
1709
1710 return (new_fp);
1711 }
1712
1713 static void
fasttrap_provider_free(fasttrap_provider_t * provider)1714 fasttrap_provider_free(fasttrap_provider_t *provider)
1715 {
1716 pid_t pid = provider->ftp_pid;
1717 proc_t *p;
1718
1719 /*
1720 * There need to be no associated enabled probes, no consumers
1721 * creating probes, and no meta providers referencing this provider.
1722 */
1723 ASSERT(provider->ftp_rcount == 0);
1724 ASSERT(provider->ftp_ccount == 0);
1725 ASSERT(provider->ftp_mcount == 0);
1726
1727 /*
1728 * If this provider hasn't been retired, we need to explicitly drop the
1729 * count of active providers on the associated process structure.
1730 */
1731 if (!provider->ftp_retired) {
1732 os_atomic_dec(&provider->ftp_proc->ftpc_acount, relaxed);
1733 ASSERT(provider->ftp_proc->ftpc_acount <
1734 provider->ftp_proc->ftpc_rcount);
1735 }
1736
1737 fasttrap_proc_release(provider->ftp_proc);
1738
1739 /*
1740 * APPLE NOTE: explicit lock management. Not 100% certain we need this, the
1741 * memory is freed even without the destroy. Maybe accounting cleanup?
1742 */
1743 lck_mtx_destroy(&provider->ftp_mtx, &fasttrap_lck_grp);
1744 lck_mtx_destroy(&provider->ftp_cmtx, &fasttrap_lck_grp);
1745
1746 kmem_free(provider, sizeof (fasttrap_provider_t));
1747
1748 /*
1749 * Decrement p_dtrace_probes on the process whose provider we're
1750 * freeing. We don't have to worry about clobbering somone else's
1751 * modifications to it because we have locked the bucket that
1752 * corresponds to this process's hash chain in the provider hash
1753 * table. Don't sweat it if we can't find the process.
1754 */
1755 if ((p = proc_find(pid)) == NULL) {
1756 return;
1757 }
1758
1759 proc_lock(p);
1760 p->p_dtrace_probes--;
1761 proc_unlock(p);
1762
1763 proc_rele(p);
1764 }
1765
1766 static void
fasttrap_provider_retire(proc_t * p,const char * name,int mprov)1767 fasttrap_provider_retire(proc_t *p, const char *name, int mprov)
1768 {
1769 fasttrap_provider_t *fp;
1770 fasttrap_bucket_t *bucket;
1771 dtrace_provider_id_t provid;
1772 ASSERT(strlen(name) < sizeof (fp->ftp_name));
1773
1774 bucket = &fasttrap_provs.fth_table[FASTTRAP_PROVS_INDEX(proc_getpid(p), name)];
1775 lck_mtx_lock(&bucket->ftb_mtx);
1776
1777 for (fp = bucket->ftb_data; fp != NULL; fp = fp->ftp_next) {
1778 if (fp->ftp_pid == proc_getpid(p) && strncmp(fp->ftp_name, name, sizeof(fp->ftp_name)) == 0 &&
1779 !fp->ftp_retired)
1780 break;
1781 }
1782
1783 if (fp == NULL) {
1784 lck_mtx_unlock(&bucket->ftb_mtx);
1785 return;
1786 }
1787
1788 lck_mtx_lock(&fp->ftp_mtx);
1789 ASSERT(!mprov || fp->ftp_mcount > 0);
1790 if (mprov && --fp->ftp_mcount != 0) {
1791 lck_mtx_unlock(&fp->ftp_mtx);
1792 lck_mtx_unlock(&bucket->ftb_mtx);
1793 return;
1794 }
1795
1796 /*
1797 * Mark the provider to be removed in our post-processing step, mark it
1798 * retired, and drop the active count on its proc. Marking it indicates
1799 * that we should try to remove it; setting the retired flag indicates
1800 * that we're done with this provider; dropping the active the proc
1801 * releases our hold, and when this reaches zero (as it will during
1802 * exit or exec) the proc and associated providers become defunct.
1803 *
1804 * We obviously need to take the bucket lock before the provider lock
1805 * to perform the lookup, but we need to drop the provider lock
1806 * before calling into the DTrace framework since we acquire the
1807 * provider lock in callbacks invoked from the DTrace framework. The
1808 * bucket lock therefore protects the integrity of the provider hash
1809 * table.
1810 */
1811 os_atomic_dec(&fp->ftp_proc->ftpc_acount, relaxed);
1812 ASSERT(fp->ftp_proc->ftpc_acount < fp->ftp_proc->ftpc_rcount);
1813
1814 /*
1815 * Add this provider probes to the retired count and
1816 * make sure we don't add them twice
1817 */
1818 os_atomic_add(&fasttrap_retired, fp->ftp_pcount, relaxed);
1819 fp->ftp_pcount = 0;
1820
1821 fp->ftp_retired = 1;
1822 fp->ftp_marked = 1;
1823 provid = fp->ftp_provid;
1824 lck_mtx_unlock(&fp->ftp_mtx);
1825
1826 /*
1827 * We don't have to worry about invalidating the same provider twice
1828 * since fasttrap_provider_lookup() will ignore providers that have
1829 * been marked as retired.
1830 */
1831 dtrace_invalidate(provid);
1832
1833 lck_mtx_unlock(&bucket->ftb_mtx);
1834
1835 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
1836 }
1837
1838 static int
fasttrap_uint32_cmp(const void * ap,const void * bp)1839 fasttrap_uint32_cmp(const void *ap, const void *bp)
1840 {
1841 return (*(const uint32_t *)ap - *(const uint32_t *)bp);
1842 }
1843
1844 static int
fasttrap_uint64_cmp(const void * ap,const void * bp)1845 fasttrap_uint64_cmp(const void *ap, const void *bp)
1846 {
1847 return (*(const uint64_t *)ap - *(const uint64_t *)bp);
1848 }
1849
1850 static int
fasttrap_add_probe(fasttrap_probe_spec_t * pdata)1851 fasttrap_add_probe(fasttrap_probe_spec_t *pdata)
1852 {
1853 proc_t *p;
1854 fasttrap_provider_t *provider;
1855 fasttrap_probe_t *pp;
1856 fasttrap_tracepoint_t *tp;
1857 const char *name;
1858 unsigned int i, aframes, whack;
1859
1860 /*
1861 * There needs to be at least one desired trace point.
1862 */
1863 if (pdata->ftps_noffs == 0)
1864 return (EINVAL);
1865
1866 switch (pdata->ftps_probe_type) {
1867 case DTFTP_ENTRY:
1868 name = "entry";
1869 aframes = FASTTRAP_ENTRY_AFRAMES;
1870 break;
1871 case DTFTP_RETURN:
1872 name = "return";
1873 aframes = FASTTRAP_RETURN_AFRAMES;
1874 break;
1875 case DTFTP_OFFSETS:
1876 aframes = 0;
1877 name = NULL;
1878 break;
1879 default:
1880 return (EINVAL);
1881 }
1882
1883 const char* provider_name;
1884 switch (pdata->ftps_provider_type) {
1885 case DTFTP_PROVIDER_PID:
1886 provider_name = FASTTRAP_PID_NAME;
1887 break;
1888 case DTFTP_PROVIDER_OBJC:
1889 provider_name = FASTTRAP_OBJC_NAME;
1890 break;
1891 case DTFTP_PROVIDER_ONESHOT:
1892 provider_name = FASTTRAP_ONESHOT_NAME;
1893 break;
1894 default:
1895 return (EINVAL);
1896 }
1897
1898 p = proc_find(pdata->ftps_pid);
1899 if (p == PROC_NULL)
1900 return (ESRCH);
1901
1902 if ((provider = fasttrap_provider_lookup(p, pdata->ftps_provider_type,
1903 provider_name, &pid_attr)) == NULL) {
1904 proc_rele(p);
1905 return (ESRCH);
1906 }
1907
1908 proc_rele(p);
1909 /*
1910 * Increment this reference count to indicate that a consumer is
1911 * actively adding a new probe associated with this provider. This
1912 * prevents the provider from being deleted -- we'll need to check
1913 * for pending deletions when we drop this reference count.
1914 */
1915 provider->ftp_ccount++;
1916 lck_mtx_unlock(&provider->ftp_mtx);
1917
1918 /*
1919 * Grab the creation lock to ensure consistency between calls to
1920 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
1921 * other threads creating probes. We must drop the provider lock
1922 * before taking this lock to avoid a three-way deadlock with the
1923 * DTrace framework.
1924 */
1925 lck_mtx_lock(&provider->ftp_cmtx);
1926
1927 if (name == NULL) {
1928 for (i = 0; i < pdata->ftps_noffs; i++) {
1929 char name_str[17];
1930
1931 (void) snprintf(name_str, sizeof(name_str), "%llx",
1932 (uint64_t)pdata->ftps_offs[i]);
1933
1934 if (dtrace_probe_lookup(provider->ftp_provid,
1935 pdata->ftps_mod, pdata->ftps_func, name_str) != 0)
1936 continue;
1937
1938 os_atomic_inc(&fasttrap_total, relaxed);
1939 if (fasttrap_total > fasttrap_max) {
1940 os_atomic_dec(&fasttrap_total, relaxed);
1941 goto no_mem;
1942 }
1943 provider->ftp_pcount++;
1944
1945 pp = zalloc_flags(fasttrap_probe_t_zones[1], Z_WAITOK | Z_ZERO);
1946
1947 pp->ftp_prov = provider;
1948 pp->ftp_faddr = pdata->ftps_pc;
1949 pp->ftp_fsize = pdata->ftps_size;
1950 pp->ftp_pid = pdata->ftps_pid;
1951 pp->ftp_ntps = 1;
1952
1953 tp = zalloc_flags(fasttrap_tracepoint_t_zone, Z_WAITOK | Z_ZERO);
1954
1955 tp->ftt_proc = provider->ftp_proc;
1956 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
1957 tp->ftt_pid = pdata->ftps_pid;
1958
1959 #if defined(__arm__) || defined(__arm64__)
1960 /*
1961 * On arm the subinfo is used to distinguish between arm
1962 * and thumb modes. On arm64 there is no thumb mode, so
1963 * this field is simply initialized to 0 on its way
1964 * into the kernel.
1965 */
1966 tp->ftt_fntype = pdata->ftps_arch_subinfo;
1967 #endif
1968
1969 pp->ftp_tps[0].fit_tp = tp;
1970 pp->ftp_tps[0].fit_id.fti_probe = pp;
1971 pp->ftp_tps[0].fit_id.fti_ptype = pdata->ftps_probe_type;
1972 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
1973 pdata->ftps_mod, pdata->ftps_func, name_str,
1974 FASTTRAP_OFFSET_AFRAMES, pp);
1975 }
1976
1977 } else if (dtrace_probe_lookup(provider->ftp_provid, pdata->ftps_mod,
1978 pdata->ftps_func, name) == 0) {
1979 os_atomic_add(&fasttrap_total, pdata->ftps_noffs, relaxed);
1980
1981 if (fasttrap_total > fasttrap_max) {
1982 os_atomic_sub(&fasttrap_total, pdata->ftps_noffs, relaxed);
1983 goto no_mem;
1984 }
1985
1986 /*
1987 * Make sure all tracepoint program counter values are unique.
1988 * We later assume that each probe has exactly one tracepoint
1989 * for a given pc.
1990 */
1991 qsort(pdata->ftps_offs, pdata->ftps_noffs,
1992 sizeof (uint64_t), fasttrap_uint64_cmp);
1993 for (i = 1; i < pdata->ftps_noffs; i++) {
1994 if (pdata->ftps_offs[i] > pdata->ftps_offs[i - 1])
1995 continue;
1996
1997 os_atomic_sub(&fasttrap_total, pdata->ftps_noffs, relaxed);
1998 goto no_mem;
1999 }
2000 provider->ftp_pcount += pdata->ftps_noffs;
2001 ASSERT(pdata->ftps_noffs > 0);
2002 if (pdata->ftps_noffs < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2003 pp = zalloc_flags(fasttrap_probe_t_zones[pdata->ftps_noffs],
2004 Z_WAITOK | Z_ZERO);
2005 } else {
2006 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[pdata->ftps_noffs]), KM_SLEEP);
2007 }
2008
2009 pp->ftp_prov = provider;
2010 pp->ftp_faddr = pdata->ftps_pc;
2011 pp->ftp_fsize = pdata->ftps_size;
2012 pp->ftp_pid = pdata->ftps_pid;
2013 pp->ftp_ntps = pdata->ftps_noffs;
2014
2015 for (i = 0; i < pdata->ftps_noffs; i++) {
2016 tp = zalloc_flags(fasttrap_tracepoint_t_zone, Z_WAITOK | Z_ZERO);
2017 tp->ftt_proc = provider->ftp_proc;
2018 tp->ftt_pc = pdata->ftps_offs[i] + pdata->ftps_pc;
2019 tp->ftt_pid = pdata->ftps_pid;
2020
2021 #if defined(__arm__) || defined (__arm64__)
2022 /*
2023 * On arm the subinfo is used to distinguish between arm
2024 * and thumb modes. On arm64 there is no thumb mode, so
2025 * this field is simply initialized to 0 on its way
2026 * into the kernel.
2027 */
2028
2029 tp->ftt_fntype = pdata->ftps_arch_subinfo;
2030 #endif
2031 pp->ftp_tps[i].fit_tp = tp;
2032 pp->ftp_tps[i].fit_id.fti_probe = pp;
2033 pp->ftp_tps[i].fit_id.fti_ptype = pdata->ftps_probe_type;
2034 }
2035
2036 pp->ftp_id = dtrace_probe_create(provider->ftp_provid,
2037 pdata->ftps_mod, pdata->ftps_func, name, aframes, pp);
2038 }
2039
2040 lck_mtx_unlock(&provider->ftp_cmtx);
2041
2042 /*
2043 * We know that the provider is still valid since we incremented the
2044 * creation reference count. If someone tried to clean up this provider
2045 * while we were using it (e.g. because the process called exec(2) or
2046 * exit(2)), take note of that and try to clean it up now.
2047 */
2048 lck_mtx_lock(&provider->ftp_mtx);
2049 provider->ftp_ccount--;
2050 whack = provider->ftp_retired;
2051 lck_mtx_unlock(&provider->ftp_mtx);
2052
2053 if (whack)
2054 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
2055
2056 return (0);
2057
2058 no_mem:
2059 /*
2060 * If we've exhausted the allowable resources, we'll try to remove
2061 * this provider to free some up. This is to cover the case where
2062 * the user has accidentally created many more probes than was
2063 * intended (e.g. pid123:::).
2064 */
2065 lck_mtx_unlock(&provider->ftp_cmtx);
2066 lck_mtx_lock(&provider->ftp_mtx);
2067 provider->ftp_ccount--;
2068 provider->ftp_marked = 1;
2069 lck_mtx_unlock(&provider->ftp_mtx);
2070
2071 fasttrap_pid_cleanup(FASTTRAP_CLEANUP_PROVIDER);
2072
2073 return (ENOMEM);
2074 }
2075
2076 /*ARGSUSED*/
2077 static void *
fasttrap_meta_provide(void * arg,dtrace_helper_provdesc_t * dhpv,proc_t * p)2078 fasttrap_meta_provide(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2079 {
2080 #pragma unused(arg)
2081 fasttrap_provider_t *provider;
2082
2083 /*
2084 * A 32-bit unsigned integer (like a pid for example) can be
2085 * expressed in 10 or fewer decimal digits. Make sure that we'll
2086 * have enough space for the provider name.
2087 */
2088 if (strlen(dhpv->dthpv_provname) + 10 >=
2089 sizeof (provider->ftp_name)) {
2090 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2091 "name too long to accomodate pid", dhpv->dthpv_provname);
2092 return (NULL);
2093 }
2094
2095 /*
2096 * Don't let folks spoof the true pid provider.
2097 */
2098 if (strncmp(dhpv->dthpv_provname, FASTTRAP_PID_NAME, sizeof(FASTTRAP_PID_NAME)) == 0) {
2099 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2100 "%s is an invalid name", dhpv->dthpv_provname,
2101 FASTTRAP_PID_NAME);
2102 return (NULL);
2103 }
2104
2105 /*
2106 * APPLE NOTE: We also need to check the objc and oneshot pid provider types
2107 */
2108 if (strncmp(dhpv->dthpv_provname, FASTTRAP_OBJC_NAME, sizeof(FASTTRAP_OBJC_NAME)) == 0) {
2109 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2110 "%s is an invalid name", dhpv->dthpv_provname,
2111 FASTTRAP_OBJC_NAME);
2112 return (NULL);
2113 }
2114 if (strncmp(dhpv->dthpv_provname, FASTTRAP_ONESHOT_NAME, sizeof(FASTTRAP_ONESHOT_NAME)) == 0) {
2115 cmn_err(CE_WARN, "failed to instantiate provider %s: "
2116 "%s is an invalid name", dhpv->dthpv_provname,
2117 FASTTRAP_ONESHOT_NAME);
2118 return (NULL);
2119 }
2120
2121 /*
2122 * The highest stability class that fasttrap supports is ISA; cap
2123 * the stability of the new provider accordingly.
2124 */
2125 if (dhpv->dthpv_pattr.dtpa_provider.dtat_class > DTRACE_CLASS_ISA)
2126 dhpv->dthpv_pattr.dtpa_provider.dtat_class = DTRACE_CLASS_ISA;
2127 if (dhpv->dthpv_pattr.dtpa_mod.dtat_class > DTRACE_CLASS_ISA)
2128 dhpv->dthpv_pattr.dtpa_mod.dtat_class = DTRACE_CLASS_ISA;
2129 if (dhpv->dthpv_pattr.dtpa_func.dtat_class > DTRACE_CLASS_ISA)
2130 dhpv->dthpv_pattr.dtpa_func.dtat_class = DTRACE_CLASS_ISA;
2131 if (dhpv->dthpv_pattr.dtpa_name.dtat_class > DTRACE_CLASS_ISA)
2132 dhpv->dthpv_pattr.dtpa_name.dtat_class = DTRACE_CLASS_ISA;
2133 if (dhpv->dthpv_pattr.dtpa_args.dtat_class > DTRACE_CLASS_ISA)
2134 dhpv->dthpv_pattr.dtpa_args.dtat_class = DTRACE_CLASS_ISA;
2135
2136 if ((provider = fasttrap_provider_lookup(p, DTFTP_PROVIDER_USDT, dhpv->dthpv_provname,
2137 &dhpv->dthpv_pattr)) == NULL) {
2138 cmn_err(CE_WARN, "failed to instantiate provider %s for "
2139 "process %u", dhpv->dthpv_provname, (uint_t)proc_getpid(p));
2140 return (NULL);
2141 }
2142
2143 /*
2144 * APPLE NOTE!
2145 *
2146 * USDT probes (fasttrap meta probes) are very expensive to create.
2147 * Profiling has shown that the largest single cost is verifying that
2148 * dtrace hasn't already created a given meta_probe. The reason for
2149 * this is dtrace_match() often has to strcmp ~100 hashed entries for
2150 * each static probe being created. We want to get rid of that check.
2151 * The simplest way of eliminating it is to deny the ability to add
2152 * probes to an existing provider. If the provider already exists, BZZT!
2153 * This still leaves the possibility of intentionally malformed DOF
2154 * having duplicate probes. However, duplicate probes are not fatal,
2155 * and there is no way to get that by accident, so we will not check
2156 * for that case.
2157 *
2158 * UPDATE: It turns out there are several use cases that require adding
2159 * probes to existing providers. Disabling the dtrace_probe_lookup()
2160 * optimization for now. See APPLE NOTE in fasttrap_meta_create_probe.
2161 */
2162
2163 /*
2164 * Up the meta provider count so this provider isn't removed until
2165 * the meta provider has been told to remove it.
2166 */
2167 provider->ftp_mcount++;
2168
2169 lck_mtx_unlock(&provider->ftp_mtx);
2170
2171 return (provider);
2172 }
2173
2174 /*ARGSUSED*/
2175 static void
fasttrap_meta_create_probe(void * arg,void * parg,dtrace_helper_probedesc_t * dhpb)2176 fasttrap_meta_create_probe(void *arg, void *parg,
2177 dtrace_helper_probedesc_t *dhpb)
2178 {
2179 #pragma unused(arg)
2180 fasttrap_provider_t *provider = parg;
2181 fasttrap_probe_t *pp;
2182 fasttrap_tracepoint_t *tp;
2183 unsigned int i, j;
2184 uint32_t ntps;
2185
2186 /*
2187 * Since the meta provider count is non-zero we don't have to worry
2188 * about this provider disappearing.
2189 */
2190 ASSERT(provider->ftp_mcount > 0);
2191
2192 /*
2193 * The offsets must be unique.
2194 */
2195 qsort(dhpb->dthpb_offs, dhpb->dthpb_noffs, sizeof (uint32_t),
2196 fasttrap_uint32_cmp);
2197 for (i = 1; i < dhpb->dthpb_noffs; i++) {
2198 if (dhpb->dthpb_base + dhpb->dthpb_offs[i] <=
2199 dhpb->dthpb_base + dhpb->dthpb_offs[i - 1])
2200 return;
2201 }
2202
2203 qsort(dhpb->dthpb_enoffs, dhpb->dthpb_nenoffs, sizeof (uint32_t),
2204 fasttrap_uint32_cmp);
2205 for (i = 1; i < dhpb->dthpb_nenoffs; i++) {
2206 if (dhpb->dthpb_base + dhpb->dthpb_enoffs[i] <=
2207 dhpb->dthpb_base + dhpb->dthpb_enoffs[i - 1])
2208 return;
2209 }
2210
2211 /*
2212 * Grab the creation lock to ensure consistency between calls to
2213 * dtrace_probe_lookup() and dtrace_probe_create() in the face of
2214 * other threads creating probes.
2215 */
2216 lck_mtx_lock(&provider->ftp_cmtx);
2217
2218 #if 0
2219 /*
2220 * APPLE NOTE: This is hideously expensive. See note in
2221 * fasttrap_meta_provide() for why we can get away without
2222 * checking here.
2223 */
2224 if (dtrace_probe_lookup(provider->ftp_provid, dhpb->dthpb_mod,
2225 dhpb->dthpb_func, dhpb->dthpb_name) != 0) {
2226 lck_mtx_unlock(&provider->ftp_cmtx);
2227 return;
2228 }
2229 #endif
2230
2231 ntps = dhpb->dthpb_noffs + dhpb->dthpb_nenoffs;
2232 ASSERT(ntps > 0);
2233
2234 os_atomic_add(&fasttrap_total, ntps, relaxed);
2235
2236 if (fasttrap_total > fasttrap_max) {
2237 os_atomic_sub(&fasttrap_total, ntps, relaxed);
2238 lck_mtx_unlock(&provider->ftp_cmtx);
2239 return;
2240 }
2241
2242 provider->ftp_pcount += ntps;
2243
2244 if (ntps < FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS) {
2245 pp = zalloc_flags(fasttrap_probe_t_zones[ntps], Z_WAITOK | Z_ZERO);
2246 } else {
2247 pp = kmem_zalloc(offsetof(fasttrap_probe_t, ftp_tps[ntps]), KM_SLEEP);
2248 }
2249
2250 pp->ftp_prov = provider;
2251 pp->ftp_pid = provider->ftp_pid;
2252 pp->ftp_ntps = ntps;
2253 pp->ftp_nargs = dhpb->dthpb_xargc;
2254 pp->ftp_xtypes = dhpb->dthpb_xtypes;
2255 pp->ftp_ntypes = dhpb->dthpb_ntypes;
2256
2257 /*
2258 * First create a tracepoint for each actual point of interest.
2259 */
2260 for (i = 0; i < dhpb->dthpb_noffs; i++) {
2261 tp = zalloc_flags(fasttrap_tracepoint_t_zone, Z_WAITOK | Z_ZERO);
2262
2263 tp->ftt_proc = provider->ftp_proc;
2264
2265 /*
2266 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2267 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2268 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2269 */
2270 #if defined(__x86_64__)
2271 /*
2272 * Both 32 & 64 bit want to go back one byte, to point at the first NOP
2273 */
2274 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i] - 1;
2275 #elif defined(__arm__) || defined(__arm64__)
2276 /*
2277 * All ARM and ARM64 probes are zero offset. We need to zero out the
2278 * thumb bit because we still support 32bit user processes.
2279 * On 64bit user processes, bit zero won't be set anyway.
2280 */
2281 tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_offs[i]) & ~0x1UL;
2282 tp->ftt_fntype = FASTTRAP_FN_USDT;
2283 #else
2284 #error "Architecture not supported"
2285 #endif
2286
2287 tp->ftt_pid = provider->ftp_pid;
2288
2289 pp->ftp_tps[i].fit_tp = tp;
2290 pp->ftp_tps[i].fit_id.fti_probe = pp;
2291 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_OFFSETS;
2292 }
2293
2294 /*
2295 * Then create a tracepoint for each is-enabled point.
2296 */
2297 for (j = 0; i < ntps; i++, j++) {
2298 tp = zalloc_flags(fasttrap_tracepoint_t_zone, Z_WAITOK | Z_ZERO);
2299
2300 tp->ftt_proc = provider->ftp_proc;
2301
2302 /*
2303 * APPLE NOTE: We have linker support when creating DOF to handle all relocations for us.
2304 * Unfortunately, a side effect of this is that the relocations do not point at exactly
2305 * the location we want. We need to fix up the addresses here. The fixups vary by arch and type.
2306 */
2307 #if defined(__x86_64__)
2308 /*
2309 * Both 32 & 64 bit want to go forward two bytes, to point at a single byte nop.
2310 */
2311 tp->ftt_pc = dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j] + 2;
2312 #elif defined(__arm__) || defined(__arm64__)
2313 /*
2314 * All ARM and ARM64 probes are zero offset. We need to zero out the
2315 * thumb bit because we still support 32bit user processes.
2316 * On 64bit user processes, bit zero won't be set anyway.
2317 */
2318 tp->ftt_pc = (dhpb->dthpb_base + (int64_t)dhpb->dthpb_enoffs[j]) & ~0x1UL;
2319 tp->ftt_fntype = FASTTRAP_FN_USDT;
2320 #else
2321 #error "Architecture not supported"
2322 #endif
2323
2324 tp->ftt_pid = provider->ftp_pid;
2325
2326 pp->ftp_tps[i].fit_tp = tp;
2327 pp->ftp_tps[i].fit_id.fti_probe = pp;
2328 pp->ftp_tps[i].fit_id.fti_ptype = DTFTP_IS_ENABLED;
2329 }
2330
2331 /*
2332 * If the arguments are shuffled around we set the argument remapping
2333 * table. Later, when the probe fires, we only remap the arguments
2334 * if the table is non-NULL.
2335 */
2336 for (i = 0; i < dhpb->dthpb_xargc; i++) {
2337 if (dhpb->dthpb_args[i] != i) {
2338 pp->ftp_argmap = dhpb->dthpb_args;
2339 break;
2340 }
2341 }
2342
2343 /*
2344 * The probe is fully constructed -- register it with DTrace.
2345 */
2346 pp->ftp_id = dtrace_probe_create(provider->ftp_provid, dhpb->dthpb_mod,
2347 dhpb->dthpb_func, dhpb->dthpb_name, FASTTRAP_OFFSET_AFRAMES, pp);
2348
2349 lck_mtx_unlock(&provider->ftp_cmtx);
2350 }
2351
2352 /*ARGSUSED*/
2353 static void
fasttrap_meta_remove(void * arg,dtrace_helper_provdesc_t * dhpv,proc_t * p)2354 fasttrap_meta_remove(void *arg, dtrace_helper_provdesc_t *dhpv, proc_t *p)
2355 {
2356 #pragma unused(arg)
2357 /*
2358 * Clean up the USDT provider. There may be active consumers of the
2359 * provider busy adding probes, no damage will actually befall the
2360 * provider until that count has dropped to zero. This just puts
2361 * the provider on death row.
2362 */
2363 fasttrap_provider_retire(p, dhpv->dthpv_provname, 1);
2364 }
2365
2366 static char*
fasttrap_meta_provider_name(void * arg)2367 fasttrap_meta_provider_name(void *arg)
2368 {
2369 fasttrap_provider_t *fprovider = arg;
2370 dtrace_provider_t *provider = (dtrace_provider_t*)(fprovider->ftp_provid);
2371 return provider->dtpv_name;
2372 }
2373
2374 static dtrace_mops_t fasttrap_mops = {
2375 .dtms_create_probe = fasttrap_meta_create_probe,
2376 .dtms_provide_proc = fasttrap_meta_provide,
2377 .dtms_remove_proc = fasttrap_meta_remove,
2378 .dtms_provider_name = fasttrap_meta_provider_name
2379 };
2380
2381 /*
2382 * Validate a null-terminated string. If str is not null-terminated,
2383 * or not a UTF8 valid string, the function returns -1. Otherwise, 0 is
2384 * returned.
2385 *
2386 * str: string to validate.
2387 * maxlen: maximal length of the string, null-terminated byte included.
2388 */
2389 static int
fasttrap_validatestr(char const * str,size_t maxlen)2390 fasttrap_validatestr(char const* str, size_t maxlen) {
2391 size_t len;
2392
2393 assert(str);
2394 assert(maxlen != 0);
2395
2396 /* Check if the string is null-terminated. */
2397 len = strnlen(str, maxlen);
2398 if (len >= maxlen)
2399 return -1;
2400
2401 /* Finally, check for UTF8 validity. */
2402 return utf8_validatestr((unsigned const char*) str, len);
2403 }
2404
2405 /*
2406 * Checks that provided credentials are allowed to debug target process.
2407 */
2408 static int
fasttrap_check_cred_priv(cred_t * cr,proc_t * p)2409 fasttrap_check_cred_priv(cred_t *cr, proc_t *p)
2410 {
2411 int err = 0;
2412
2413 /* Only root can use DTrace. */
2414 if (!kauth_cred_issuser(cr)) {
2415 err = EPERM;
2416 goto out;
2417 }
2418
2419 /* Process is marked as no attach. */
2420 if (ISSET(p->p_lflag, P_LNOATTACH)) {
2421 err = EBUSY;
2422 goto out;
2423 }
2424
2425 #if CONFIG_MACF
2426 /* Check with MAC framework when enabled. */
2427 struct proc_ident cur_ident = proc_ident(current_proc());
2428 struct proc_ident p_ident = proc_ident(p);
2429
2430 /* Do not hold ref to proc here to avoid deadlock. */
2431 proc_rele(p);
2432 err = mac_proc_check_debug(&cur_ident, cr, &p_ident);
2433
2434 if (proc_find_ident(&p_ident) == PROC_NULL) {
2435 err = ESRCH;
2436 goto out_no_proc;
2437 }
2438 #endif /* CONFIG_MACF */
2439
2440 out:
2441 proc_rele(p);
2442
2443 out_no_proc:
2444 return err;
2445 }
2446
2447 /*ARGSUSED*/
2448 static int
fasttrap_ioctl(dev_t dev,u_long cmd,user_addr_t arg,int md,cred_t * cr,int * rv)2449 fasttrap_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
2450 {
2451 #pragma unused(dev, md, rv)
2452 if (!dtrace_attached())
2453 return (EAGAIN);
2454
2455 if (cmd == FASTTRAPIOC_MAKEPROBE) {
2456 fasttrap_probe_spec_t *probe;
2457 uint64_t noffs;
2458 size_t size;
2459 int ret;
2460
2461 if (copyin(arg + __offsetof(fasttrap_probe_spec_t, ftps_noffs), &noffs,
2462 sizeof (probe->ftps_noffs)))
2463 return (EFAULT);
2464
2465 /*
2466 * Probes must have at least one tracepoint.
2467 */
2468 if (noffs == 0)
2469 return (EINVAL);
2470
2471 /*
2472 * We want to check the number of noffs before doing
2473 * sizing math, to prevent potential buffer overflows.
2474 */
2475 if (noffs > ((1024 * 1024) - sizeof(fasttrap_probe_spec_t)) / sizeof(probe->ftps_offs[0]))
2476 return (ENOMEM);
2477
2478 size = sizeof (fasttrap_probe_spec_t) +
2479 sizeof (probe->ftps_offs[0]) * (noffs - 1);
2480
2481 probe = kmem_alloc(size, KM_SLEEP);
2482
2483 if (copyin(arg, probe, size) != 0 ||
2484 probe->ftps_noffs != noffs) {
2485 kmem_free(probe, size);
2486 return (EFAULT);
2487 }
2488
2489 /*
2490 * Verify that the function and module strings contain no
2491 * funny characters.
2492 */
2493
2494 if (fasttrap_validatestr(probe->ftps_func, sizeof(probe->ftps_func)) != 0) {
2495 ret = EINVAL;
2496 goto err;
2497 }
2498
2499 if (fasttrap_validatestr(probe->ftps_mod, sizeof(probe->ftps_mod)) != 0) {
2500 ret = EINVAL;
2501 goto err;
2502 }
2503
2504 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2505 proc_t *p;
2506 pid_t pid = probe->ftps_pid;
2507
2508 /*
2509 * Report an error if the process doesn't exist
2510 * or is actively being birthed.
2511 */
2512 if ((p = proc_find(pid)) == PROC_NULL || p->p_stat == SIDL) {
2513 if (p != PROC_NULL)
2514 proc_rele(p);
2515 ret = ESRCH;
2516 goto err;
2517 }
2518
2519 ret = fasttrap_check_cred_priv(cr, p);
2520 if (ret != 0) {
2521 goto err;
2522 }
2523 }
2524
2525 ret = fasttrap_add_probe(probe);
2526
2527 err:
2528 kmem_free(probe, size);
2529
2530 return (ret);
2531
2532 } else if (cmd == FASTTRAPIOC_GETINSTR) {
2533 fasttrap_instr_query_t instr;
2534 fasttrap_tracepoint_t *tp;
2535 uint_t index;
2536 int ret;
2537
2538 if (copyin(arg, &instr, sizeof (instr)) != 0)
2539 return (EFAULT);
2540
2541 if (!PRIV_POLICY_CHOICE(cr, PRIV_ALL, B_FALSE)) {
2542 proc_t *p;
2543 pid_t pid = instr.ftiq_pid;
2544
2545 /*
2546 * Report an error if the process doesn't exist
2547 * or is actively being birthed.
2548 */
2549 if ((p = proc_find(pid)) == NULL || p->p_stat == SIDL) {
2550 if (p != PROC_NULL)
2551 proc_rele(p);
2552 return (ESRCH);
2553 }
2554
2555 ret = fasttrap_check_cred_priv(cr, p);
2556 if (ret != 0) {
2557 return (ret);
2558 }
2559 }
2560
2561 index = FASTTRAP_TPOINTS_INDEX(instr.ftiq_pid, instr.ftiq_pc);
2562
2563 lck_mtx_lock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2564 tp = fasttrap_tpoints.fth_table[index].ftb_data;
2565 while (tp != NULL) {
2566 if (instr.ftiq_pid == tp->ftt_pid &&
2567 instr.ftiq_pc == tp->ftt_pc &&
2568 tp->ftt_proc->ftpc_acount != 0)
2569 break;
2570
2571 tp = tp->ftt_next;
2572 }
2573
2574 if (tp == NULL) {
2575 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2576 return (ENOENT);
2577 }
2578
2579 bcopy(&tp->ftt_instr, &instr.ftiq_instr,
2580 sizeof (instr.ftiq_instr));
2581 lck_mtx_unlock(&fasttrap_tpoints.fth_table[index].ftb_mtx);
2582
2583 if (copyout(&instr, arg, sizeof (instr)) != 0)
2584 return (EFAULT);
2585
2586 return (0);
2587 }
2588
2589 return (EINVAL);
2590 }
2591
2592 static void
fasttrap_attach(void)2593 fasttrap_attach(void)
2594 {
2595 ulong_t nent;
2596 unsigned int i;
2597
2598 /*
2599 * Install our hooks into fork(2), exec(2), and exit(2).
2600 */
2601 dtrace_fasttrap_fork_ptr = &fasttrap_fork;
2602 dtrace_fasttrap_exit_ptr = &fasttrap_exec_exit;
2603 dtrace_fasttrap_exec_ptr = &fasttrap_exec_exit;
2604
2605 /*
2606 * APPLE NOTE: We size the maximum number of fasttrap probes
2607 * based on system memory. 100k probes per 256M of system memory.
2608 * Yes, this is a WAG.
2609 */
2610 fasttrap_max = (sane_size >> 28) * 100000;
2611
2612 if (fasttrap_max == 0)
2613 fasttrap_max = 50000;
2614
2615 fasttrap_total = 0;
2616 fasttrap_retired = 0;
2617
2618 /*
2619 * Conjure up the tracepoints hashtable...
2620 */
2621 #ifdef illumos
2622 nent = ddi_getprop(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
2623 "fasttrap-hash-size", FASTTRAP_TPOINTS_DEFAULT_SIZE);
2624 #else
2625 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2626 #endif
2627
2628 if (nent <= 0 || nent > 0x1000000)
2629 nent = FASTTRAP_TPOINTS_DEFAULT_SIZE;
2630
2631 if ((nent & (nent - 1)) == 0)
2632 fasttrap_tpoints.fth_nent = nent;
2633 else
2634 fasttrap_tpoints.fth_nent = 1 << fasttrap_highbit(nent);
2635 ASSERT(fasttrap_tpoints.fth_nent > 0);
2636 fasttrap_tpoints.fth_mask = fasttrap_tpoints.fth_nent - 1;
2637 fasttrap_tpoints.fth_table = kmem_zalloc(fasttrap_tpoints.fth_nent *
2638 sizeof (fasttrap_bucket_t), KM_SLEEP);
2639 ASSERT(fasttrap_tpoints.fth_table != NULL);
2640
2641 for (i = 0; i < fasttrap_tpoints.fth_nent; i++) {
2642 lck_mtx_init(&fasttrap_tpoints.fth_table[i].ftb_mtx, &fasttrap_lck_grp,
2643 &fasttrap_lck_attr);
2644 }
2645
2646 /*
2647 * ... and the providers hash table...
2648 */
2649 nent = FASTTRAP_PROVIDERS_DEFAULT_SIZE;
2650 if ((nent & (nent - 1)) == 0)
2651 fasttrap_provs.fth_nent = nent;
2652 else
2653 fasttrap_provs.fth_nent = 1 << fasttrap_highbit(nent);
2654 ASSERT(fasttrap_provs.fth_nent > 0);
2655 fasttrap_provs.fth_mask = fasttrap_provs.fth_nent - 1;
2656 fasttrap_provs.fth_table = kmem_zalloc(fasttrap_provs.fth_nent *
2657 sizeof (fasttrap_bucket_t), KM_SLEEP);
2658 ASSERT(fasttrap_provs.fth_table != NULL);
2659
2660 for (i = 0; i < fasttrap_provs.fth_nent; i++) {
2661 lck_mtx_init(&fasttrap_provs.fth_table[i].ftb_mtx, &fasttrap_lck_grp,
2662 &fasttrap_lck_attr);
2663 }
2664
2665 /*
2666 * ... and the procs hash table.
2667 */
2668 nent = FASTTRAP_PROCS_DEFAULT_SIZE;
2669 if ((nent & (nent - 1)) == 0)
2670 fasttrap_procs.fth_nent = nent;
2671 else
2672 fasttrap_procs.fth_nent = 1 << fasttrap_highbit(nent);
2673 ASSERT(fasttrap_procs.fth_nent > 0);
2674 fasttrap_procs.fth_mask = fasttrap_procs.fth_nent - 1;
2675 fasttrap_procs.fth_table = kmem_zalloc(fasttrap_procs.fth_nent *
2676 sizeof (fasttrap_bucket_t), KM_SLEEP);
2677 ASSERT(fasttrap_procs.fth_table != NULL);
2678
2679 #ifndef illumos
2680 for (i = 0; i < fasttrap_procs.fth_nent; i++) {
2681 lck_mtx_init(&fasttrap_procs.fth_table[i].ftb_mtx, &fasttrap_lck_grp,
2682 &fasttrap_lck_attr);
2683 }
2684 #endif
2685
2686 (void) dtrace_meta_register("fasttrap", &fasttrap_mops, NULL,
2687 &fasttrap_meta_id);
2688 }
2689
2690 static int
_fasttrap_open(dev_t dev,int flags,int devtype,struct proc * p)2691 _fasttrap_open(dev_t dev, int flags, int devtype, struct proc *p)
2692 {
2693 #pragma unused(dev, flags, devtype, p)
2694 return 0;
2695 }
2696
2697 static int
_fasttrap_ioctl(dev_t dev,u_long cmd,caddr_t data,int fflag,struct proc * p)2698 _fasttrap_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
2699 {
2700 int err, rv = 0;
2701 user_addr_t uaddrp;
2702
2703 if (proc_is64bit(p)) {
2704 uaddrp = *(user_addr_t *)data;
2705 } else {
2706 uaddrp = (user_addr_t) *(uint32_t *)data;
2707 }
2708
2709 err = fasttrap_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv);
2710
2711 /* XXX Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
2712 if (err != 0) {
2713 ASSERT( (err & 0xfffff000) == 0 );
2714 return (err & 0xfff); /* ioctl returns -1 and errno set to an error code < 4096 */
2715 } else if (rv != 0) {
2716 ASSERT( (rv & 0xfff00000) == 0 );
2717 return (((rv & 0xfffff) << 12)); /* ioctl returns -1 and errno set to a return value >= 4096 */
2718 } else
2719 return 0;
2720 }
2721
2722 static int fasttrap_inited = 0;
2723
2724 #define FASTTRAP_MAJOR -24 /* let the kernel pick the device number */
2725
2726 static const struct cdevsw fasttrap_cdevsw =
2727 {
2728 .d_open = _fasttrap_open,
2729 .d_close = eno_opcl,
2730 .d_read = eno_rdwrt,
2731 .d_write = eno_rdwrt,
2732 .d_ioctl = _fasttrap_ioctl,
2733 .d_stop = (stop_fcn_t *)nulldev,
2734 .d_reset = (reset_fcn_t *)nulldev,
2735 .d_select = eno_select,
2736 .d_mmap = eno_mmap,
2737 .d_strategy = eno_strat,
2738 .d_reserved_1 = eno_getc,
2739 .d_reserved_2 = eno_putc,
2740 };
2741
2742 void fasttrap_init(void);
2743
2744 void
fasttrap_init(void)2745 fasttrap_init( void )
2746 {
2747 /*
2748 * This method is now invoked from multiple places. Any open of /dev/dtrace,
2749 * also dtrace_init if the dtrace_dof_mode is DTRACE_DOF_MODE_NON_LAZY.
2750 *
2751 * The reason is to delay allocating the (rather large) resources as late as possible.
2752 */
2753 if (!fasttrap_inited) {
2754 int majdevno = cdevsw_add(FASTTRAP_MAJOR, &fasttrap_cdevsw);
2755
2756 if (majdevno < 0) {
2757 // FIX ME! What kind of error reporting to do here?
2758 printf("fasttrap_init: failed to allocate a major number!\n");
2759 return;
2760 }
2761
2762 dev_t device = makedev( (uint32_t)majdevno, 0 );
2763 if (NULL == devfs_make_node( device, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666, "fasttrap" )) {
2764 return;
2765 }
2766
2767 /*
2768 * fasttrap_probe_t's are variable in size. We use an array of zones to
2769 * cover the most common sizes.
2770 */
2771 int i;
2772 for (i=1; i<FASTTRAP_PROBE_T_ZONE_MAX_TRACEPOINTS; i++) {
2773 fasttrap_probe_t_zones[i] =
2774 zone_create(fasttrap_probe_t_zone_names[i],
2775 offsetof(fasttrap_probe_t, ftp_tps[i]), ZC_NONE);
2776 }
2777
2778
2779 fasttrap_attach();
2780
2781 /*
2782 * Start the fasttrap cleanup thread
2783 */
2784 kern_return_t res = kernel_thread_start_priority((thread_continue_t)fasttrap_pid_cleanup_cb, NULL, 46 /* BASEPRI_BACKGROUND */, &fasttrap_cleanup_thread);
2785 if (res != KERN_SUCCESS) {
2786 panic("Could not create fasttrap_cleanup_thread");
2787 }
2788 thread_set_thread_name(fasttrap_cleanup_thread, "dtrace_fasttrap_cleanup_thread");
2789
2790 fasttrap_retired_size = DEFAULT_RETIRED_SIZE;
2791 fasttrap_retired_spec = kmem_zalloc(fasttrap_retired_size * sizeof(*fasttrap_retired_spec),
2792 KM_SLEEP);
2793
2794 fasttrap_inited = 1;
2795 }
2796 }
2797
2798 #undef FASTTRAP_MAJOR
2799