xref: /xnu-8792.81.2/bsd/dev/dtrace/dtrace.c (revision 19c3b8c28c31cb8130e034cfb5df6bf9ba342d90)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Portions Copyright (c) 2013, 2016, Joyent, Inc. All rights reserved.
24  * Portions Copyright (c) 2013 by Delphix. All rights reserved.
25  */
26 
27 /*
28  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
29  * Use is subject to license terms.
30  */
31 
32 /*
33  * DTrace - Dynamic Tracing for Solaris
34  *
35  * This is the implementation of the Solaris Dynamic Tracing framework
36  * (DTrace).  The user-visible interface to DTrace is described at length in
37  * the "Solaris Dynamic Tracing Guide".  The interfaces between the libdtrace
38  * library, the in-kernel DTrace framework, and the DTrace providers are
39  * described in the block comments in the <sys/dtrace.h> header file.  The
40  * internal architecture of DTrace is described in the block comments in the
41  * <sys/dtrace_impl.h> header file.  The comments contained within the DTrace
42  * implementation very much assume mastery of all of these sources; if one has
43  * an unanswered question about the implementation, one should consult them
44  * first.
45  *
46  * The functions here are ordered roughly as follows:
47  *
48  *   - Probe context functions
49  *   - Probe hashing functions
50  *   - Non-probe context utility functions
51  *   - Matching functions
52  *   - Provider-to-Framework API functions
53  *   - Probe management functions
54  *   - DIF object functions
55  *   - Format functions
56  *   - Predicate functions
57  *   - ECB functions
58  *   - Buffer functions
59  *   - Enabling functions
60  *   - DOF functions
61  *   - Anonymous enabling functions
62  *   - Process functions
63  *   - Consumer state functions
64  *   - Helper functions
65  *   - Hook functions
66  *   - Driver cookbook functions
67  *
68  * Each group of functions begins with a block comment labelled the "DTrace
69  * [Group] Functions", allowing one to find each block by searching forward
70  * on capital-f functions.
71  */
72 #include <sys/errno.h>
73 #include <sys/types.h>
74 #include <sys/stat.h>
75 #include <sys/conf.h>
76 #include <sys/random.h>
77 #include <sys/systm.h>
78 #include <sys/dtrace_impl.h>
79 #include <sys/param.h>
80 #include <sys/proc_internal.h>
81 #include <sys/ioctl.h>
82 #include <sys/fcntl.h>
83 #include <miscfs/devfs/devfs.h>
84 #include <sys/malloc.h>
85 #include <sys/kernel_types.h>
86 #include <sys/proc_internal.h>
87 #include <sys/uio_internal.h>
88 #include <sys/kauth.h>
89 #include <vm/pmap.h>
90 #include <sys/user.h>
91 #include <mach/exception_types.h>
92 #include <sys/signalvar.h>
93 #include <mach/task.h>
94 #include <kern/ast.h>
95 #include <kern/hvg_hypercall.h>
96 #include <kern/sched_prim.h>
97 #include <kern/processor.h>
98 #include <kern/task.h>
99 #include <kern/zalloc.h>
100 #include <netinet/in.h>
101 #include <libkern/sysctl.h>
102 #include <sys/kdebug.h>
103 #include <sys/sdt_impl.h>
104 
105 #if CONFIG_PERVASIVE_CPI
106 #include <kern/monotonic.h>
107 #include <machine/monotonic.h>
108 #endif /* CONFIG_PERVASIVE_CPI */
109 
110 #include "dtrace_xoroshiro128_plus.h"
111 
112 #include <IOKit/IOPlatformExpert.h>
113 
114 #include <kern/cpu_data.h>
115 
116 extern addr64_t kvtophys(vm_offset_t va);
117 
118 extern uint32_t pmap_find_phys(void *, uint64_t);
119 extern boolean_t pmap_valid_page(uint32_t);
120 extern void OSKextRegisterKextsWithDTrace(void);
121 extern kmod_info_t g_kernel_kmod_info;
122 extern void commpage_update_dof(boolean_t enabled);
123 
124 /* Solaris proc_t is the struct. Darwin's proc_t is a pointer to it. */
125 #define proc_t struct proc /* Steer clear of the Darwin typedef for proc_t */
126 
127 #define t_predcache t_dtrace_predcache /* Cosmetic. Helps readability of thread.h */
128 
129 extern void dtrace_suspend(void);
130 extern void dtrace_resume(void);
131 extern void dtrace_early_init(void);
132 extern int dtrace_keep_kernel_symbols(void);
133 extern void dtrace_init(void);
134 extern void helper_init(void);
135 extern void fasttrap_init(void);
136 
137 static int  dtrace_lazy_dofs_duplicate(proc_t *, proc_t *);
138 extern void dtrace_lazy_dofs_destroy(proc_t *);
139 extern void dtrace_postinit(void);
140 
141 extern void dtrace_proc_fork(proc_t*, proc_t*, int);
142 extern void dtrace_proc_exec(proc_t*);
143 extern void dtrace_proc_exit(proc_t*);
144 
145 /*
146  * DTrace Tunable Variables
147  *
148  * The following variables may be dynamically tuned by using sysctl(8), the
149  * variables being stored in the kern.dtrace namespace.  For example:
150  * 	sysctl kern.dtrace.dof_maxsize = 1048575 	# 1M
151  *
152  * In general, the only variables that one should be tuning this way are those
153  * that affect system-wide DTrace behavior, and for which the default behavior
154  * is undesirable.  Most of these variables are tunable on a per-consumer
155  * basis using DTrace options, and need not be tuned on a system-wide basis.
156  * When tuning these variables, avoid pathological values; while some attempt
157  * is made to verify the integrity of these variables, they are not considered
158  * part of the supported interface to DTrace, and they are therefore not
159  * checked comprehensively.
160  */
161 uint64_t	dtrace_buffer_memory_maxsize = 0;		/* initialized in dtrace_init */
162 uint64_t	dtrace_buffer_memory_inuse = 0;
163 int		dtrace_destructive_disallow = 1;
164 dtrace_optval_t	dtrace_nonroot_maxsize = (16 * 1024 * 1024);
165 size_t		dtrace_difo_maxsize = (256 * 1024);
166 dtrace_optval_t	dtrace_dof_maxsize = (512 * 1024);
167 dtrace_optval_t	dtrace_statvar_maxsize = (16 * 1024);
168 dtrace_optval_t	dtrace_statvar_maxsize_max = (16 * 10 * 1024);
169 size_t		dtrace_actions_max = (16 * 1024);
170 size_t		dtrace_retain_max = 1024;
171 dtrace_optval_t	dtrace_helper_actions_max = 32;
172 dtrace_optval_t	dtrace_helper_providers_max = 64;
173 dtrace_optval_t	dtrace_dstate_defsize = (1 * 1024 * 1024);
174 size_t		dtrace_strsize_default = 256;
175 dtrace_optval_t	dtrace_strsize_min = 8;
176 dtrace_optval_t	dtrace_strsize_max = 65536;
177 dtrace_optval_t	dtrace_cleanrate_default = 990099000;		/* 1.1 hz */
178 dtrace_optval_t	dtrace_cleanrate_min = 20000000;			/* 50 hz */
179 dtrace_optval_t	dtrace_cleanrate_max = (uint64_t)60 * NANOSEC;	/* 1/minute */
180 dtrace_optval_t	dtrace_aggrate_default = NANOSEC;		/* 1 hz */
181 dtrace_optval_t	dtrace_statusrate_default = NANOSEC;		/* 1 hz */
182 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC;	 /* 6/minute */
183 dtrace_optval_t	dtrace_switchrate_default = NANOSEC;		/* 1 hz */
184 dtrace_optval_t	dtrace_nspec_default = 1;
185 dtrace_optval_t	dtrace_specsize_default = 32 * 1024;
186 dtrace_optval_t dtrace_stackframes_default = 20;
187 dtrace_optval_t dtrace_ustackframes_default = 20;
188 dtrace_optval_t dtrace_jstackframes_default = 50;
189 dtrace_optval_t dtrace_jstackstrsize_default = 512;
190 dtrace_optval_t dtrace_buflimit_default = 75;
191 dtrace_optval_t dtrace_buflimit_min = 1;
192 dtrace_optval_t dtrace_buflimit_max = 99;
193 size_t		dtrace_nprobes_default = 4;
194 int		dtrace_msgdsize_max = 128;
195 hrtime_t	dtrace_chill_max = 500 * (NANOSEC / MILLISEC);	/* 500 ms */
196 hrtime_t	dtrace_chill_interval = NANOSEC;		/* 1000 ms */
197 int		dtrace_devdepth_max = 32;
198 int		dtrace_err_verbose;
199 hrtime_t	dtrace_deadman_interval = NANOSEC;
200 hrtime_t	dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
201 hrtime_t	dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
202 
203 /*
204  * DTrace External Variables
205  *
206  * As dtrace(7D) is a kernel module, any DTrace variables are obviously
207  * available to DTrace consumers via the backtick (`) syntax.  One of these,
208  * dtrace_zero, is made deliberately so:  it is provided as a source of
209  * well-known, zero-filled memory.  While this variable is not documented,
210  * it is used by some translators as an implementation detail.
211  */
212 const char	dtrace_zero[256] = { 0 };	/* zero-filled memory */
213 unsigned int	dtrace_max_cpus = 0;		/* number of enabled cpus */
214 /*
215  * DTrace Internal Variables
216  */
217 static dev_info_t	*dtrace_devi;		/* device info */
218 static vmem_t		*dtrace_arena;		/* probe ID arena */
219 static dtrace_probe_t	**dtrace_probes;	/* array of all probes */
220 static int		dtrace_nprobes;		/* number of probes */
221 static dtrace_provider_t *dtrace_provider;	/* provider list */
222 static dtrace_meta_t	*dtrace_meta_pid;	/* user-land meta provider */
223 static int		dtrace_opens;		/* number of opens */
224 static int		dtrace_helpers;		/* number of helpers */
225 static dtrace_hash_t	*dtrace_strings;
226 static dtrace_hash_t	*dtrace_byprov;		/* probes hashed by provider */
227 static dtrace_hash_t	*dtrace_bymod;		/* probes hashed by module */
228 static dtrace_hash_t	*dtrace_byfunc;		/* probes hashed by function */
229 static dtrace_hash_t	*dtrace_byname;		/* probes hashed by name */
230 static dtrace_toxrange_t *dtrace_toxrange;	/* toxic range array */
231 static int		dtrace_toxranges;	/* number of toxic ranges */
232 static int		dtrace_toxranges_max;	/* size of toxic range array */
233 static dtrace_anon_t	dtrace_anon;		/* anonymous enabling */
234 static uint64_t		dtrace_vtime_references; /* number of vtimestamp refs */
235 static kthread_t	*dtrace_panicked;	/* panicking thread */
236 static dtrace_ecb_t	*dtrace_ecb_create_cache; /* cached created ECB */
237 static dtrace_genid_t	dtrace_probegen;	/* current probe generation */
238 static dtrace_helpers_t *dtrace_deferred_pid;	/* deferred helper list */
239 static dtrace_enabling_t *dtrace_retained;	/* list of retained enablings */
240 static dtrace_genid_t   dtrace_retained_gen;    /* current retained enab gen */
241 static dtrace_dynvar_t	dtrace_dynhash_sink;	/* end of dynamic hash chains */
242 
243 static int		dtrace_dof_mode;	/* See dtrace_impl.h for a description of Darwin's dof modes. */
244 
245 			/*
246 			 * This does't quite fit as an internal variable, as it must be accessed in
247 			 * fbt_provide and sdt_provide. Its clearly not a dtrace tunable variable either...
248 			 */
249 int			dtrace_kernel_symbol_mode;	/* See dtrace_impl.h for a description of Darwin's kernel symbol modes. */
250 static uint32_t		dtrace_wake_clients;
251 static uint8_t      dtrace_kerneluuid[16];	/* the 128-bit uuid */
252 
253 /*
254  * To save memory, some common memory allocations are given a
255  * unique zone. For example, dtrace_probe_t is 72 bytes in size,
256  * which means it would fall into the kalloc.128 bucket. With
257  * 20k elements allocated, the space saved is substantial.
258  */
259 
260 static ZONE_DEFINE_TYPE(dtrace_probe_t_zone, "dtrace.dtrace_probe_t",
261     dtrace_probe_t, ZC_PGZ_USE_GUARDS);
262 
263 static ZONE_DEFINE(dtrace_state_pcpu_zone, "dtrace.dtrace_dstate_percpu_t",
264     sizeof(dtrace_dstate_percpu_t), ZC_PERCPU);
265 
266 static int dtrace_module_unloaded(struct kmod_info *kmod);
267 
268 /*
269  * DTrace Locking
270  * DTrace is protected by three (relatively coarse-grained) locks:
271  *
272  * (1) dtrace_lock is required to manipulate essentially any DTrace state,
273  *     including enabling state, probes, ECBs, consumer state, helper state,
274  *     etc.  Importantly, dtrace_lock is _not_ required when in probe context;
275  *     probe context is lock-free -- synchronization is handled via the
276  *     dtrace_sync() cross call mechanism.
277  *
278  * (2) dtrace_provider_lock is required when manipulating provider state, or
279  *     when provider state must be held constant.
280  *
281  * (3) dtrace_meta_lock is required when manipulating meta provider state, or
282  *     when meta provider state must be held constant.
283  *
284  * The lock ordering between these three locks is dtrace_meta_lock before
285  * dtrace_provider_lock before dtrace_lock.  (In particular, there are
286  * several places where dtrace_provider_lock is held by the framework as it
287  * calls into the providers -- which then call back into the framework,
288  * grabbing dtrace_lock.)
289  *
290  * There are two other locks in the mix:  mod_lock and cpu_lock.  With respect
291  * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
292  * role as a coarse-grained lock; it is acquired before both of these locks.
293  * With respect to dtrace_meta_lock, its behavior is stranger:  cpu_lock must
294  * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
295  * mod_lock is similar with respect to dtrace_provider_lock in that it must be
296  * acquired _between_ dtrace_provider_lock and dtrace_lock.
297  */
298 
299 
300 /*
301  * APPLE NOTE:
302  *
303  * For porting purposes, all kmutex_t vars have been changed
304  * to lck_mtx_t, which require explicit initialization.
305  *
306  * kmutex_t becomes lck_mtx_t
307  * mutex_enter() becomes lck_mtx_lock()
308  * mutex_exit() becomes lck_mtx_unlock()
309  *
310  * Lock asserts are changed like this:
311  *
312  * ASSERT(MUTEX_HELD(&cpu_lock));
313  *	becomes:
314  * LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
315  *
316  */
317 static LCK_MTX_DECLARE_ATTR(dtrace_lock,
318     &dtrace_lck_grp, &dtrace_lck_attr);		/* probe state lock */
319 static LCK_MTX_DECLARE_ATTR(dtrace_provider_lock,
320     &dtrace_lck_grp, &dtrace_lck_attr);	/* provider state lock */
321 static LCK_MTX_DECLARE_ATTR(dtrace_meta_lock,
322     &dtrace_lck_grp, &dtrace_lck_attr);	/* meta-provider state lock */
323 static LCK_RW_DECLARE_ATTR(dtrace_dof_mode_lock,
324     &dtrace_lck_grp, &dtrace_lck_attr);	/* dof mode lock */
325 
326 /*
327  * DTrace Provider Variables
328  *
329  * These are the variables relating to DTrace as a provider (that is, the
330  * provider of the BEGIN, END, and ERROR probes).
331  */
332 static dtrace_pattr_t	dtrace_provider_attr = {
333 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
334 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
335 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
336 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
337 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
338 };
339 
340 static void
dtrace_provide_nullop(void * arg,const dtrace_probedesc_t * desc)341 dtrace_provide_nullop(void *arg, const dtrace_probedesc_t *desc)
342 {
343 #pragma unused(arg, desc)
344 }
345 
346 static void
dtrace_provide_module_nullop(void * arg,struct modctl * ctl)347 dtrace_provide_module_nullop(void *arg, struct modctl *ctl)
348 {
349 #pragma unused(arg, ctl)
350 }
351 
352 static int
dtrace_enable_nullop(void * arg,dtrace_id_t id,void * parg)353 dtrace_enable_nullop(void *arg, dtrace_id_t id, void *parg)
354 {
355 #pragma unused(arg, id, parg)
356     return (0);
357 }
358 
359 static void
dtrace_disable_nullop(void * arg,dtrace_id_t id,void * parg)360 dtrace_disable_nullop(void *arg, dtrace_id_t id, void *parg)
361 {
362 #pragma unused(arg, id, parg)
363 }
364 
365 static void
dtrace_suspend_nullop(void * arg,dtrace_id_t id,void * parg)366 dtrace_suspend_nullop(void *arg, dtrace_id_t id, void *parg)
367 {
368 #pragma unused(arg, id, parg)
369 }
370 
371 static void
dtrace_resume_nullop(void * arg,dtrace_id_t id,void * parg)372 dtrace_resume_nullop(void *arg, dtrace_id_t id, void *parg)
373 {
374 #pragma unused(arg, id, parg)
375 }
376 
377 static void
dtrace_destroy_nullop(void * arg,dtrace_id_t id,void * parg)378 dtrace_destroy_nullop(void *arg, dtrace_id_t id, void *parg)
379 {
380 #pragma unused(arg, id, parg)
381 }
382 
383 
384 static dtrace_pops_t dtrace_provider_ops = {
385 	.dtps_provide = dtrace_provide_nullop,
386 	.dtps_provide_module =	dtrace_provide_module_nullop,
387 	.dtps_enable =	dtrace_enable_nullop,
388 	.dtps_disable =	dtrace_disable_nullop,
389 	.dtps_suspend =	dtrace_suspend_nullop,
390 	.dtps_resume =	dtrace_resume_nullop,
391 	.dtps_getargdesc =	NULL,
392 	.dtps_getargval =	NULL,
393 	.dtps_usermode =	NULL,
394 	.dtps_destroy =	dtrace_destroy_nullop,
395 };
396 
397 static dtrace_id_t	dtrace_probeid_begin;	/* special BEGIN probe */
398 static dtrace_id_t	dtrace_probeid_end;	/* special END probe */
399 dtrace_id_t		dtrace_probeid_error;	/* special ERROR probe */
400 
401 /*
402  * DTrace Helper Tracing Variables
403  */
404 uint32_t dtrace_helptrace_next = 0;
405 uint32_t dtrace_helptrace_nlocals;
406 char	*dtrace_helptrace_buffer;
407 size_t	dtrace_helptrace_bufsize = 512 * 1024;
408 
409 #if DEBUG
410 int	dtrace_helptrace_enabled = 1;
411 #else
412 int	dtrace_helptrace_enabled = 0;
413 #endif
414 
415 #if defined (__arm64__)
416 /*
417  * The ioctl for adding helper DOF is based on the
418  * size of a user_addr_t.  We need to recognize both
419  * U32 and U64 as the same action.
420  */
421 #define DTRACEHIOC_ADDDOF_U32       _IOW('h', 4, user32_addr_t)
422 #define DTRACEHIOC_ADDDOF_U64       _IOW('h', 4, user64_addr_t)
423 #endif  /* __arm64__ */
424 
425 /*
426  * DTrace Error Hashing
427  *
428  * On DEBUG kernels, DTrace will track the errors that has seen in a hash
429  * table.  This is very useful for checking coverage of tests that are
430  * expected to induce DIF or DOF processing errors, and may be useful for
431  * debugging problems in the DIF code generator or in DOF generation .  The
432  * error hash may be examined with the ::dtrace_errhash MDB dcmd.
433  */
434 #if DEBUG
435 static dtrace_errhash_t	dtrace_errhash[DTRACE_ERRHASHSZ];
436 static const char *dtrace_errlast;
437 static kthread_t *dtrace_errthread;
438 static LCK_MTX_DECLARE_ATTR(dtrace_errlock, &dtrace_lck_grp, &dtrace_lck_attr);
439 #endif
440 
441 /*
442  * DTrace Macros and Constants
443  *
444  * These are various macros that are useful in various spots in the
445  * implementation, along with a few random constants that have no meaning
446  * outside of the implementation.  There is no real structure to this cpp
447  * mishmash -- but is there ever?
448  */
449 
450 #define	DTRACE_GETSTR(hash, elm)	\
451 	(hash->dth_getstr(elm, hash->dth_stroffs))
452 
453 #define	DTRACE_HASHSTR(hash, elm)	\
454 	dtrace_hash_str(DTRACE_GETSTR(hash, elm))
455 
456 #define	DTRACE_HASHNEXT(hash, elm)	\
457 	(void**)((uintptr_t)(elm) + (hash)->dth_nextoffs)
458 
459 #define	DTRACE_HASHPREV(hash, elm)	\
460 	(void**)((uintptr_t)(elm) + (hash)->dth_prevoffs)
461 
462 #define	DTRACE_HASHEQ(hash, lhs, rhs)	\
463 	(strcmp(DTRACE_GETSTR(hash, lhs), \
464 	    DTRACE_GETSTR(hash, rhs)) == 0)
465 
466 #define	DTRACE_AGGHASHSIZE_SLEW		17
467 
468 #define	DTRACE_V4MAPPED_OFFSET		(sizeof (uint32_t) * 3)
469 
470 /*
471  * The key for a thread-local variable needs to be unique to a single
472  * thread over the lifetime of the system, and not overlap with any variable
473  * IDs. So we take thread's thread_id, a unique 64-bit number that is never
474  * reused after the thread exits, and add DIF_VARIABLE_MAX to it, which
475  * guarantees that it won’t overlap any variable IDs. We also want to treat
476  * running in interrupt context as independent of thread-context. So if
477  * interrupts are active, we set the 63rd bit, otherwise it’s cleared.
478  *
479  * This is necessary (but not sufficient) to assure that global associative
480  * arrays never collide with thread-local variables. To guarantee that they
481  * cannot collide, we must also define the order for keying dynamic variables.
482  *
483  * That order is:
484  *
485  *   [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
486  *
487  * Because the variable-key and the tls-key are in orthogonal spaces, there is
488  * no way for a global variable key signature to match a thread-local key
489  * signature.
490  */
491 #if defined (__x86_64__) || defined(__arm64__)
492 #define	DTRACE_TLS_THRKEY(where) {                                           \
493 	uint_t intr = ml_at_interrupt_context(); /* Note: just one measly bit */ \
494 	uint64_t thr = thread_tid(current_thread());                             \
495 	ASSERT(intr < 2);                                                        \
496 	(where) = ((thr + DIF_VARIABLE_MAX) & (~((uint64_t)1 << 63))) |          \
497 		((uint64_t)intr << 63);                                              \
498 }
499 #else
500 #error Unknown architecture
501 #endif
502 
503 #define	DT_BSWAP_8(x)	((x) & 0xff)
504 #define	DT_BSWAP_16(x)	((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
505 #define	DT_BSWAP_32(x)	((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
506 #define	DT_BSWAP_64(x)	((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
507 
508 #define	DT_MASK_LO 0x00000000FFFFFFFFULL
509 
510 #define	DTRACE_STORE(type, tomax, offset, what) \
511 	*((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
512 
513 
514 #define	DTRACE_ALIGNCHECK(addr, size, flags)				\
515 	if (addr & (MIN(size,4) - 1)) {					\
516 		*flags |= CPU_DTRACE_BADALIGN;				\
517 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr;	\
518 		return (0);						\
519 	}
520 
521 #define	DTRACE_RANGE_REMAIN(remp, addr, baseaddr, basesz)		\
522 do {									\
523 	if ((remp) != NULL) {						\
524 		*(remp) = (uintptr_t)(baseaddr) + (basesz) - (addr);	\
525 	}								\
526 } while (0)
527 
528 
529 /*
530  * Test whether a range of memory starting at testaddr of size testsz falls
531  * within the range of memory described by addr, sz.  We take care to avoid
532  * problems with overflow and underflow of the unsigned quantities, and
533  * disallow all negative sizes.  Ranges of size 0 are allowed.
534  */
535 #define	DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
536 	((testaddr) - (baseaddr) < (basesz) && \
537 	(testaddr) + (testsz) - (baseaddr) <= (basesz) && \
538 	(testaddr) + (testsz) >= (testaddr))
539 
540 /*
541  * Test whether alloc_sz bytes will fit in the scratch region.  We isolate
542  * alloc_sz on the righthand side of the comparison in order to avoid overflow
543  * or underflow in the comparison with it.  This is simpler than the INRANGE
544  * check above, because we know that the dtms_scratch_ptr is valid in the
545  * range.  Allocations of size zero are allowed.
546  */
547 #define	DTRACE_INSCRATCH(mstate, alloc_sz) \
548 	((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
549 	(mstate)->dtms_scratch_ptr >= (alloc_sz))
550 
551 #if defined (__x86_64__) || defined (__arm64__)
552 #define	DTRACE_LOADFUNC(bits)						\
553 /*CSTYLED*/								\
554 uint##bits##_t dtrace_load##bits(uintptr_t addr);			\
555 									\
556 extern int dtrace_nofault_copy##bits(uintptr_t, uint##bits##_t *);	\
557 									\
558 uint##bits##_t								\
559 dtrace_load##bits(uintptr_t addr)					\
560 {									\
561 	size_t size = bits / NBBY;					\
562 	/*CSTYLED*/							\
563 	uint##bits##_t rval = 0;					\
564 	int i;								\
565 	volatile uint16_t *flags = (volatile uint16_t *)		\
566 	    &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;			\
567 									\
568 	DTRACE_ALIGNCHECK(addr, size, flags);				\
569 									\
570 	for (i = 0; i < dtrace_toxranges; i++) {			\
571 		if (addr >= dtrace_toxrange[i].dtt_limit)		\
572 			continue;					\
573 									\
574 		if (addr + size <= dtrace_toxrange[i].dtt_base)		\
575 			continue;					\
576 									\
577 		/*							\
578 		 * This address falls within a toxic region; return 0.	\
579 		 */							\
580 		*flags |= CPU_DTRACE_BADADDR;				\
581 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr;	\
582 		return (0);						\
583 	}								\
584 									\
585 	{								\
586 	*flags |= CPU_DTRACE_NOFAULT;					\
587 	/*CSTYLED*/							\
588 	/*                                                              \
589 	* PR6394061 - avoid device memory that is unpredictably		\
590 	* mapped and unmapped                                   	\
591 	*/								\
592 	if (!pmap_valid_page(pmap_find_phys(kernel_pmap, addr)) ||	\
593 	    dtrace_nofault_copy##bits(addr, &rval)) {			\
594 		*flags |= CPU_DTRACE_BADADDR;				\
595 		cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr;	\
596 		return (0);						\
597 	}								\
598 									\
599 	*flags &= ~CPU_DTRACE_NOFAULT;					\
600 	}								\
601 									\
602 	return (rval);							\
603 }
604 #else /* all other architectures */
605 #error Unknown Architecture
606 #endif
607 
608 #ifdef __LP64__
609 #define	dtrace_loadptr	dtrace_load64
610 #else
611 #define	dtrace_loadptr	dtrace_load32
612 #endif
613 
614 #define	DTRACE_DYNHASH_FREE	0
615 #define	DTRACE_DYNHASH_SINK	1
616 #define	DTRACE_DYNHASH_VALID	2
617 
618 #define DTRACE_MATCH_FAIL       -1
619 #define	DTRACE_MATCH_NEXT	0
620 #define	DTRACE_MATCH_DONE	1
621 #define	DTRACE_ANCHORED(probe)	((probe)->dtpr_func[0] != '\0')
622 #define	DTRACE_STATE_ALIGN	64
623 
624 #define	DTRACE_FLAGS2FLT(flags)						\
625 	(((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR :		\
626 	((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP :		\
627 	((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO :		\
628 	((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV :		\
629 	((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV :		\
630 	((flags) & CPU_DTRACE_TUPOFLOW) ?  DTRACEFLT_TUPOFLOW :		\
631 	((flags) & CPU_DTRACE_BADALIGN) ?  DTRACEFLT_BADALIGN :		\
632 	((flags) & CPU_DTRACE_NOSCRATCH) ?  DTRACEFLT_NOSCRATCH :	\
633 	((flags) & CPU_DTRACE_BADSTACK) ?  DTRACEFLT_BADSTACK :		\
634 	DTRACEFLT_UNKNOWN)
635 
636 #define	DTRACEACT_ISSTRING(act)						\
637 	((act)->dta_kind == DTRACEACT_DIFEXPR &&			\
638 	(act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
639 
640 
641 static size_t dtrace_strlen(const char *, size_t);
642 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
643 static void dtrace_enabling_provide(dtrace_provider_t *);
644 static int dtrace_enabling_match(dtrace_enabling_t *, int *, dtrace_match_cond_t *cond);
645 static void dtrace_enabling_matchall_with_cond(dtrace_match_cond_t *cond);
646 static void dtrace_enabling_matchall(void);
647 static dtrace_state_t *dtrace_anon_grab(void);
648 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
649     dtrace_state_t *, uint64_t, uint64_t);
650 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
651 static void dtrace_buffer_drop(dtrace_buffer_t *);
652 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
653     dtrace_state_t *, dtrace_mstate_t *);
654 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
655     dtrace_optval_t);
656 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *, void *);
657 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
658 static int dtrace_canload_remains(uint64_t, size_t, size_t *,
659 	dtrace_mstate_t *, dtrace_vstate_t *);
660 static int dtrace_canstore_remains(uint64_t, size_t, size_t *,
661 	dtrace_mstate_t *, dtrace_vstate_t *);
662 
663 
664 /*
665  * DTrace sysctl handlers
666  *
667  * These declarations and functions are used for a deeper DTrace configuration.
668  * Most of them are not per-consumer basis and may impact the other DTrace
669  * consumers.  Correctness may not be supported for all the variables, so you
670  * should be careful about what values you are using.
671  */
672 
673 SYSCTL_DECL(_kern_dtrace);
674 SYSCTL_NODE(_kern, OID_AUTO, dtrace, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "dtrace");
675 
676 static int
677 sysctl_dtrace_err_verbose SYSCTL_HANDLER_ARGS
678 {
679 #pragma unused(oidp, arg2)
680 	int changed, error;
681 	int value = *(int *) arg1;
682 
683 	error = sysctl_io_number(req, value, sizeof(value), &value, &changed);
684 	if (error || !changed)
685 		return (error);
686 
687 	if (value != 0 && value != 1)
688 		return (ERANGE);
689 
690 	lck_mtx_lock(&dtrace_lock);
691 		dtrace_err_verbose = value;
692 	lck_mtx_unlock(&dtrace_lock);
693 
694 	return (0);
695 }
696 
697 /*
698  * kern.dtrace.err_verbose
699  *
700  * Set DTrace verbosity when an error occured (0 = disabled, 1 = enabld).
701  * Errors are reported when a DIFO or a DOF has been rejected by the kernel.
702  */
703 SYSCTL_PROC(_kern_dtrace, OID_AUTO, err_verbose,
704 	CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
705 	&dtrace_err_verbose, 0,
706 	sysctl_dtrace_err_verbose, "I", "dtrace error verbose");
707 
708 static int
709 sysctl_dtrace_buffer_memory_maxsize SYSCTL_HANDLER_ARGS
710 {
711 #pragma unused(oidp, arg2, req)
712 	int changed, error;
713 	uint64_t value = *(uint64_t *) arg1;
714 
715 	error = sysctl_io_number(req, value, sizeof(value), &value, &changed);
716 	if (error || !changed)
717 		return (error);
718 
719 	if (value <= dtrace_buffer_memory_inuse)
720 		return (ERANGE);
721 
722 	lck_mtx_lock(&dtrace_lock);
723 		dtrace_buffer_memory_maxsize = value;
724 	lck_mtx_unlock(&dtrace_lock);
725 
726 	return (0);
727 }
728 
729 /*
730  * kern.dtrace.buffer_memory_maxsize
731  *
732  * Set DTrace maximal size in bytes used by all the consumers' state buffers.  By default
733  * the limit is PHYS_MEM / 3 for *all* consumers.  Attempting to set a null, a negative value
734  * or a value <= to dtrace_buffer_memory_inuse will result in a failure.
735  */
736 SYSCTL_PROC(_kern_dtrace, OID_AUTO, buffer_memory_maxsize,
737 	CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
738 	&dtrace_buffer_memory_maxsize, 0,
739 	sysctl_dtrace_buffer_memory_maxsize, "Q", "dtrace state buffer memory maxsize");
740 
741 /*
742  * kern.dtrace.buffer_memory_inuse
743  *
744  * Current state buffer memory used, in bytes, by all the DTrace consumers.
745  * This value is read-only.
746  */
747 SYSCTL_QUAD(_kern_dtrace, OID_AUTO, buffer_memory_inuse, CTLFLAG_RD | CTLFLAG_LOCKED,
748 	&dtrace_buffer_memory_inuse, "dtrace state buffer memory in-use");
749 
750 static int
751 sysctl_dtrace_difo_maxsize SYSCTL_HANDLER_ARGS
752 {
753 #pragma unused(oidp, arg2, req)
754 	int changed, error;
755 	size_t value = *(size_t*) arg1;
756 
757 	error = sysctl_io_number(req, value, sizeof(value), &value, &changed);
758 	if (error || !changed)
759 		return (error);
760 
761 	if (value <= 0)
762 		return (ERANGE);
763 
764 	lck_mtx_lock(&dtrace_lock);
765 		dtrace_difo_maxsize = value;
766 	lck_mtx_unlock(&dtrace_lock);
767 
768 	return (0);
769 }
770 
771 /*
772  * kern.dtrace.difo_maxsize
773  *
774  * Set the DIFO max size in bytes, check the definition of dtrace_difo_maxsize
775  * to get the default value.  Attempting to set a null or negative size will
776  * result in a failure.
777  */
778 SYSCTL_PROC(_kern_dtrace, OID_AUTO, difo_maxsize,
779 	CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
780 	&dtrace_difo_maxsize, 0,
781 	sysctl_dtrace_difo_maxsize, "Q", "dtrace difo maxsize");
782 
783 static int
784 sysctl_dtrace_dof_maxsize SYSCTL_HANDLER_ARGS
785 {
786 #pragma unused(oidp, arg2, req)
787 	int changed, error;
788 	dtrace_optval_t value = *(dtrace_optval_t *) arg1;
789 
790 	error = sysctl_io_number(req, value, sizeof(value), &value, &changed);
791 	if (error || !changed)
792 		return (error);
793 
794 	if (value <= 0)
795 		return (ERANGE);
796 
797 	if (value >= dtrace_copy_maxsize())
798 		return (ERANGE);
799 
800 	lck_mtx_lock(&dtrace_lock);
801 		dtrace_dof_maxsize = value;
802 	lck_mtx_unlock(&dtrace_lock);
803 
804 	return (0);
805 }
806 
807 /*
808  * kern.dtrace.dof_maxsize
809  *
810  * Set the DOF max size in bytes, check the definition of dtrace_dof_maxsize to
811  * get the default value.  Attempting to set a null or negative size will result
812  * in a failure.
813  */
814 SYSCTL_PROC(_kern_dtrace, OID_AUTO, dof_maxsize,
815 	CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
816 	&dtrace_dof_maxsize, 0,
817 	sysctl_dtrace_dof_maxsize, "Q", "dtrace dof maxsize");
818 
819 static int
820 sysctl_dtrace_statvar_maxsize SYSCTL_HANDLER_ARGS
821 {
822 #pragma unused(oidp, arg2, req)
823 	int changed, error;
824 	dtrace_optval_t value = *(dtrace_optval_t*) arg1;
825 
826 	error = sysctl_io_number(req, value, sizeof(value), &value, &changed);
827 	if (error || !changed)
828 		return (error);
829 
830 	if (value <= 0)
831 		return (ERANGE);
832 	if (value > dtrace_statvar_maxsize_max)
833 		return (ERANGE);
834 
835 	lck_mtx_lock(&dtrace_lock);
836 		dtrace_statvar_maxsize = value;
837 	lck_mtx_unlock(&dtrace_lock);
838 
839 	return (0);
840 }
841 
842 /*
843  * kern.dtrace.global_maxsize
844  *
845  * Set the variable max size in bytes, check the definition of
846  * dtrace_statvar_maxsize to get the default value.  Attempting to set a null,
847  * too high or negative size will result in a failure.
848  */
849 SYSCTL_PROC(_kern_dtrace, OID_AUTO, global_maxsize,
850 	CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
851 	&dtrace_statvar_maxsize, 0,
852 	sysctl_dtrace_statvar_maxsize, "Q", "dtrace statvar maxsize");
853 
854 
855 /*
856  * kern.dtrace.provide_private_probes
857  *
858  * Set whether the providers must provide the private probes.  This is
859  * kept as compatibility as they are always provided.
860  */
861 SYSCTL_INT(_kern_dtrace, OID_AUTO, provide_private_probes,
862 	CTLFLAG_RD | CTLFLAG_LOCKED,
863 	(int *)NULL, 1, "provider must provide the private probes");
864 
865 /*
866  * kern.dtrace.dof_mode
867  *
868  * Returns the current DOF mode.
869  * This value is read-only.
870  */
871 SYSCTL_INT(_kern_dtrace, OID_AUTO, dof_mode, CTLFLAG_RD | CTLFLAG_LOCKED,
872 	&dtrace_dof_mode, 0, "dtrace dof mode");
873 
874 /*
875  * DTrace Probe Context Functions
876  *
877  * These functions are called from probe context.  Because probe context is
878  * any context in which C may be called, arbitrarily locks may be held,
879  * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
880  * As a result, functions called from probe context may only call other DTrace
881  * support functions -- they may not interact at all with the system at large.
882  * (Note that the ASSERT macro is made probe-context safe by redefining it in
883  * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
884  * loads are to be performed from probe context, they _must_ be in terms of
885  * the safe dtrace_load*() variants.
886  *
887  * Some functions in this block are not actually called from probe context;
888  * for these functions, there will be a comment above the function reading
889  * "Note:  not called from probe context."
890  */
891 
892 int
dtrace_assfail(const char * a,const char * f,int l)893 dtrace_assfail(const char *a, const char *f, int l)
894 {
895 	panic("dtrace: assertion failed: %s, file: %s, line: %d", a, f, l);
896 
897 	/*
898 	 * We just need something here that even the most clever compiler
899 	 * cannot optimize away.
900 	 */
901 	return (a[(uintptr_t)f]);
902 }
903 
904 /*
905  * Atomically increment a specified error counter from probe context.
906  */
907 static void
dtrace_error(uint32_t * counter)908 dtrace_error(uint32_t *counter)
909 {
910 	/*
911 	 * Most counters stored to in probe context are per-CPU counters.
912 	 * However, there are some error conditions that are sufficiently
913 	 * arcane that they don't merit per-CPU storage.  If these counters
914 	 * are incremented concurrently on different CPUs, scalability will be
915 	 * adversely affected -- but we don't expect them to be white-hot in a
916 	 * correctly constructed enabling...
917 	 */
918 	uint32_t oval, nval;
919 
920 	do {
921 		oval = *counter;
922 
923 		if ((nval = oval + 1) == 0) {
924 			/*
925 			 * If the counter would wrap, set it to 1 -- assuring
926 			 * that the counter is never zero when we have seen
927 			 * errors.  (The counter must be 32-bits because we
928 			 * aren't guaranteed a 64-bit compare&swap operation.)
929 			 * To save this code both the infamy of being fingered
930 			 * by a priggish news story and the indignity of being
931 			 * the target of a neo-puritan witch trial, we're
932 			 * carefully avoiding any colorful description of the
933 			 * likelihood of this condition -- but suffice it to
934 			 * say that it is only slightly more likely than the
935 			 * overflow of predicate cache IDs, as discussed in
936 			 * dtrace_predicate_create().
937 			 */
938 			nval = 1;
939 		}
940 	} while (dtrace_cas32(counter, oval, nval) != oval);
941 }
942 
943 /*
944  * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
945  * uint8_t, a uint16_t, a uint32_t and a uint64_t.
946  */
947 DTRACE_LOADFUNC(8)
948 DTRACE_LOADFUNC(16)
949 DTRACE_LOADFUNC(32)
950 DTRACE_LOADFUNC(64)
951 
952 static int
dtrace_inscratch(uintptr_t dest,size_t size,dtrace_mstate_t * mstate)953 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
954 {
955 	if (dest < mstate->dtms_scratch_base)
956 		return (0);
957 
958 	if (dest + size < dest)
959 		return (0);
960 
961 	if (dest + size > mstate->dtms_scratch_ptr)
962 		return (0);
963 
964 	return (1);
965 }
966 
967 static int
dtrace_canstore_statvar(uint64_t addr,size_t sz,size_t * remain,dtrace_statvar_t ** svars,int nsvars)968 dtrace_canstore_statvar(uint64_t addr, size_t sz, size_t *remain,
969     dtrace_statvar_t **svars, int nsvars)
970 {
971 	int i;
972 
973 	size_t maxglobalsize, maxlocalsize;
974 
975 	maxglobalsize = dtrace_statvar_maxsize + sizeof (uint64_t);
976 	maxlocalsize = (maxglobalsize) * NCPU;
977 
978 	if (nsvars == 0)
979 		return (0);
980 
981 	for (i = 0; i < nsvars; i++) {
982 		dtrace_statvar_t *svar = svars[i];
983 		uint8_t scope;
984 		size_t size;
985 
986 		if (svar == NULL || (size = svar->dtsv_size) == 0)
987 			continue;
988 
989 		scope = svar->dtsv_var.dtdv_scope;
990 
991 		/**
992 		 * We verify that our size is valid in the spirit of providing
993 		 * defense in depth:  we want to prevent attackers from using
994 		 * DTrace to escalate an orthogonal kernel heap corruption bug
995 		 * into the ability to store to arbitrary locations in memory.
996 		 */
997 		VERIFY((scope == DIFV_SCOPE_GLOBAL && size <= maxglobalsize) ||
998 			(scope == DIFV_SCOPE_LOCAL && size <= maxlocalsize));
999 
1000 		if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) {
1001 			DTRACE_RANGE_REMAIN(remain, addr, svar->dtsv_data,
1002 				svar->dtsv_size);
1003 			return (1);
1004 		}
1005 	}
1006 
1007 	return (0);
1008 }
1009 
1010 /*
1011  * Check to see if the address is within a memory region to which a store may
1012  * be issued.  This includes the DTrace scratch areas, and any DTrace variable
1013  * region.  The caller of dtrace_canstore() is responsible for performing any
1014  * alignment checks that are needed before stores are actually executed.
1015  */
1016 static int
dtrace_canstore(uint64_t addr,size_t sz,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1017 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
1018     dtrace_vstate_t *vstate)
1019 {
1020 	return (dtrace_canstore_remains(addr, sz, NULL, mstate, vstate));
1021 }
1022 /*
1023  * Implementation of dtrace_canstore which communicates the upper bound of the
1024  * allowed memory region.
1025  */
1026 static int
dtrace_canstore_remains(uint64_t addr,size_t sz,size_t * remain,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1027 dtrace_canstore_remains(uint64_t addr, size_t sz, size_t *remain,
1028 	dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1029 {
1030 	/*
1031 	 * First, check to see if the address is in scratch space...
1032 	 */
1033 	if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
1034 	    mstate->dtms_scratch_size)) {
1035 		DTRACE_RANGE_REMAIN(remain, addr, mstate->dtms_scratch_base,
1036 			mstate->dtms_scratch_size);
1037 		return (1);
1038 	}
1039 	/*
1040 	 * Now check to see if it's a dynamic variable.  This check will pick
1041 	 * up both thread-local variables and any global dynamically-allocated
1042 	 * variables.
1043 	 */
1044 	if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
1045 	    vstate->dtvs_dynvars.dtds_size)) {
1046 		dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
1047 		uintptr_t base = (uintptr_t)dstate->dtds_base +
1048 		    (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
1049 		uintptr_t chunkoffs;
1050 		dtrace_dynvar_t *dvar;
1051 
1052 		/*
1053 		 * Before we assume that we can store here, we need to make
1054 		 * sure that it isn't in our metadata -- storing to our
1055 		 * dynamic variable metadata would corrupt our state.  For
1056 		 * the range to not include any dynamic variable metadata,
1057 		 * it must:
1058 		 *
1059 		 *	(1) Start above the hash table that is at the base of
1060 		 *	the dynamic variable space
1061 		 *
1062 		 *	(2) Have a starting chunk offset that is beyond the
1063 		 *	dtrace_dynvar_t that is at the base of every chunk
1064 		 *
1065 		 *	(3) Not span a chunk boundary
1066 		 *
1067 		 *	(4) Not be in the tuple space of a dynamic variable
1068 		 *
1069 		 */
1070 		if (addr < base)
1071 			return (0);
1072 
1073 		chunkoffs = (addr - base) % dstate->dtds_chunksize;
1074 
1075 		if (chunkoffs < sizeof (dtrace_dynvar_t))
1076 			return (0);
1077 
1078 		if (chunkoffs + sz > dstate->dtds_chunksize)
1079 			return (0);
1080 
1081 		dvar = (dtrace_dynvar_t *)((uintptr_t)addr - chunkoffs);
1082 
1083 		if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE)
1084 			return (0);
1085 
1086 		if (chunkoffs < sizeof (dtrace_dynvar_t) +
1087 			((dvar->dtdv_tuple.dtt_nkeys - 1) * sizeof (dtrace_key_t)))
1088 			return (0);
1089 
1090 		return (1);
1091 	}
1092 
1093 	/*
1094 	 * Finally, check the static local and global variables.  These checks
1095 	 * take the longest, so we perform them last.
1096 	 */
1097 	if (dtrace_canstore_statvar(addr, sz, remain,
1098 	    vstate->dtvs_locals, vstate->dtvs_nlocals))
1099 		return (1);
1100 
1101 	if (dtrace_canstore_statvar(addr, sz, remain,
1102 	    vstate->dtvs_globals, vstate->dtvs_nglobals))
1103 		return (1);
1104 
1105 	return (0);
1106 }
1107 
1108 
1109 /*
1110  * Convenience routine to check to see if the address is within a memory
1111  * region in which a load may be issued given the user's privilege level;
1112  * if not, it sets the appropriate error flags and loads 'addr' into the
1113  * illegal value slot.
1114  *
1115  * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
1116  * appropriate memory access protection.
1117  */
1118 int
dtrace_canload(uint64_t addr,size_t sz,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1119 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
1120     dtrace_vstate_t *vstate)
1121 {
1122 	return (dtrace_canload_remains(addr, sz, NULL, mstate, vstate));
1123 }
1124 
1125 /*
1126  * Implementation of dtrace_canload which communicates the upper bound of the
1127  * allowed memory region.
1128  */
1129 static int
dtrace_canload_remains(uint64_t addr,size_t sz,size_t * remain,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1130 dtrace_canload_remains(uint64_t addr, size_t sz, size_t *remain,
1131 	dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1132 {
1133 	volatile uint64_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
1134 
1135 	/*
1136 	 * If we hold the privilege to read from kernel memory, then
1137 	 * everything is readable.
1138 	 */
1139 	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1140 		DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
1141 		return (1);
1142 	}
1143 
1144 	/*
1145 	 * You can obviously read that which you can store.
1146 	 */
1147 	if (dtrace_canstore_remains(addr, sz, remain, mstate, vstate))
1148 		return (1);
1149 
1150 	/*
1151 	 * We're allowed to read from our own string table.
1152 	 */
1153 	if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab,
1154 	    mstate->dtms_difo->dtdo_strlen)) {
1155 		DTRACE_RANGE_REMAIN(remain, addr,
1156 			mstate->dtms_difo->dtdo_strtab,
1157 			mstate->dtms_difo->dtdo_strlen);
1158 		return (1);
1159 	}
1160 
1161 	DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
1162 	*illval = addr;
1163 	return (0);
1164 }
1165 
1166 /*
1167  * Convenience routine to check to see if a given string is within a memory
1168  * region in which a load may be issued given the user's privilege level;
1169  * this exists so that we don't need to issue unnecessary dtrace_strlen()
1170  * calls in the event that the user has all privileges.
1171  */
1172 static int
dtrace_strcanload(uint64_t addr,size_t sz,size_t * remain,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1173 dtrace_strcanload(uint64_t addr, size_t sz, size_t *remain,
1174 	dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1175 {
1176 	size_t rsize = 0;
1177 
1178 	/*
1179 	 * If we hold the privilege to read from kernel memory, then
1180 	 * everything is readable.
1181 	 */
1182 	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1183 		DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
1184 		return (1);
1185 	}
1186 
1187 	/*
1188 	 * Even if the caller is uninterested in querying the remaining valid
1189 	 * range, it is required to ensure that the access is allowed.
1190 	 */
1191 	if (remain == NULL) {
1192 		remain = &rsize;
1193 	}
1194 	if (dtrace_canload_remains(addr, 0, remain, mstate, vstate)) {
1195 		size_t strsz;
1196 		/*
1197 		 * Perform the strlen after determining the length of the
1198 		 * memory region which is accessible.  This prevents timing
1199 		 * information from being used to find NULs in memory which is
1200 		 * not accessible to the caller.
1201 		 */
1202 		strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr,
1203 			MIN(sz, *remain));
1204 		if (strsz <= *remain) {
1205 			return (1);
1206 		}
1207 	}
1208 
1209 	return (0);
1210 }
1211 
1212 /*
1213  * Convenience routine to check to see if a given variable is within a memory
1214  * region in which a load may be issued given the user's privilege level.
1215  */
1216 static int
dtrace_vcanload(void * src,dtrace_diftype_t * type,size_t * remain,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1217 dtrace_vcanload(void *src, dtrace_diftype_t *type, size_t *remain,
1218 	dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1219 {
1220 	size_t sz;
1221 	ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1222 
1223 	/*
1224 	 * Calculate the max size before performing any checks since even
1225 	 * DTRACE_ACCESS_KERNEL-credentialed callers expect that this function
1226 	 * return the max length via 'remain'.
1227 	 */
1228 	if (type->dtdt_kind == DIF_TYPE_STRING) {
1229 		dtrace_state_t *state = vstate->dtvs_state;
1230 
1231 		if (state != NULL) {
1232 			sz = state->dts_options[DTRACEOPT_STRSIZE];
1233 		} else {
1234 			/*
1235 			 * In helper context, we have a NULL state; fall back
1236 			 * to using the system-wide default for the string size
1237 			 * in this case.
1238 			 */
1239 			sz = dtrace_strsize_default;
1240 		}
1241 	} else {
1242 		sz = type->dtdt_size;
1243 	}
1244 
1245 	/*
1246 	 * If we hold the privilege to read from kernel memory, then
1247 	 * everything is readable.
1248 	 */
1249 	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1250 		DTRACE_RANGE_REMAIN(remain, (uintptr_t)src, src, sz);
1251 		return (1);
1252 	}
1253 
1254 	if (type->dtdt_kind == DIF_TYPE_STRING) {
1255 		return (dtrace_strcanload((uintptr_t)src, sz, remain, mstate,
1256 			vstate));
1257 	}
1258 	return (dtrace_canload_remains((uintptr_t)src, sz, remain, mstate,
1259 		vstate));
1260 }
1261 
1262 #define	isdigit(ch)	((ch) >= '0' && (ch) <= '9')
1263 #define	islower(ch)	((ch) >= 'a' && (ch) <= 'z')
1264 #define	isspace(ch)	(((ch) == ' ') || ((ch) == '\r') || ((ch) == '\n') || \
1265 			((ch) == '\t') || ((ch) == '\f'))
1266 #define	isxdigit(ch)	(isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \
1267 			((ch) >= 'A' && (ch) <= 'F'))
1268 #define	lisalnum(x)	\
1269 	(isdigit(x) || ((x) >= 'a' && (x) <= 'z') || ((x) >= 'A' && (x) <= 'Z'))
1270 
1271 #define	DIGIT(x)	\
1272 	(isdigit(x) ? (x) - '0' : islower(x) ? (x) + 10 - 'a' : (x) + 10 - 'A')
1273 
1274 /*
1275  * Convert a string to a signed integer using safe loads.
1276  */
1277 static int64_t
dtrace_strtoll(char * input,int base,size_t limit)1278 dtrace_strtoll(char *input, int base, size_t limit)
1279 {
1280 	uintptr_t pos = (uintptr_t)input;
1281 	int64_t val = 0;
1282 	int x;
1283 	boolean_t neg = B_FALSE;
1284 	char c, cc, ccc;
1285 	uintptr_t end = pos + limit;
1286 
1287 	/*
1288 	 * Consume any whitespace preceding digits.
1289 	 */
1290 	while ((c = dtrace_load8(pos)) == ' ' || c == '\t')
1291 		pos++;
1292 
1293 	/*
1294 	 * Handle an explicit sign if one is present.
1295 	 */
1296 	if (c == '-' || c == '+') {
1297 		if (c == '-')
1298 			neg = B_TRUE;
1299 		c = dtrace_load8(++pos);
1300 	}
1301 
1302 	/*
1303 	 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it
1304 	 * if present.
1305 	 */
1306 	if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' ||
1307 	    cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) {
1308 		pos += 2;
1309 		c = ccc;
1310 	}
1311 
1312 	/*
1313 	 * Read in contiguous digits until the first non-digit character.
1314 	 */
1315 	for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base;
1316 	    c = dtrace_load8(++pos))
1317 		val = val * base + x;
1318 
1319 	return (neg ? -val : val);
1320 }
1321 
1322 
1323 /*
1324  * Compare two strings using safe loads.
1325  */
1326 static int
dtrace_strncmp(const char * s1,const char * s2,size_t limit)1327 dtrace_strncmp(const char *s1, const char *s2, size_t limit)
1328 {
1329 	uint8_t c1, c2;
1330 	volatile uint16_t *flags;
1331 
1332 	if (s1 == s2 || limit == 0)
1333 		return (0);
1334 
1335 	flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
1336 
1337 	do {
1338 		if (s1 == NULL) {
1339 			c1 = '\0';
1340 		} else {
1341 			c1 = dtrace_load8((uintptr_t)s1++);
1342 		}
1343 
1344 		if (s2 == NULL) {
1345 			c2 = '\0';
1346 		} else {
1347 			c2 = dtrace_load8((uintptr_t)s2++);
1348 		}
1349 
1350 		if (c1 != c2)
1351 			return (c1 - c2);
1352 	} while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
1353 
1354 	return (0);
1355 }
1356 
1357 /*
1358  * Compute strlen(s) for a string using safe memory accesses.  The additional
1359  * len parameter is used to specify a maximum length to ensure completion.
1360  */
1361 static size_t
dtrace_strlen(const char * s,size_t lim)1362 dtrace_strlen(const char *s, size_t lim)
1363 {
1364 	uint_t len;
1365 
1366 	for (len = 0; len != lim; len++) {
1367 		if (dtrace_load8((uintptr_t)s++) == '\0')
1368 			break;
1369 	}
1370 
1371 	return (len);
1372 }
1373 
1374 /*
1375  * Check if an address falls within a toxic region.
1376  */
1377 static int
dtrace_istoxic(uintptr_t kaddr,size_t size)1378 dtrace_istoxic(uintptr_t kaddr, size_t size)
1379 {
1380 	uintptr_t taddr, tsize;
1381 	int i;
1382 
1383 	for (i = 0; i < dtrace_toxranges; i++) {
1384 		taddr = dtrace_toxrange[i].dtt_base;
1385 		tsize = dtrace_toxrange[i].dtt_limit - taddr;
1386 
1387 		if (kaddr - taddr < tsize) {
1388 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1389 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr;
1390 			return (1);
1391 		}
1392 
1393 		if (taddr - kaddr < size) {
1394 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1395 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr;
1396 			return (1);
1397 		}
1398 	}
1399 
1400 	return (0);
1401 }
1402 
1403 /*
1404  * Copy src to dst using safe memory accesses.  The src is assumed to be unsafe
1405  * memory specified by the DIF program.  The dst is assumed to be safe memory
1406  * that we can store to directly because it is managed by DTrace.  As with
1407  * standard bcopy, overlapping copies are handled properly.
1408  */
1409 static void
dtrace_bcopy(const void * src,void * dst,size_t len)1410 dtrace_bcopy(const void *src, void *dst, size_t len)
1411 {
1412 	if (len != 0) {
1413 		uint8_t *s1 = dst;
1414 		const uint8_t *s2 = src;
1415 
1416 		if (s1 <= s2) {
1417 			do {
1418 				*s1++ = dtrace_load8((uintptr_t)s2++);
1419 			} while (--len != 0);
1420 		} else {
1421 			s2 += len;
1422 			s1 += len;
1423 
1424 			do {
1425 				*--s1 = dtrace_load8((uintptr_t)--s2);
1426 			} while (--len != 0);
1427 		}
1428 	}
1429 }
1430 
1431 /*
1432  * Copy src to dst using safe memory accesses, up to either the specified
1433  * length, or the point that a nul byte is encountered.  The src is assumed to
1434  * be unsafe memory specified by the DIF program.  The dst is assumed to be
1435  * safe memory that we can store to directly because it is managed by DTrace.
1436  * Unlike dtrace_bcopy(), overlapping regions are not handled.
1437  */
1438 static void
dtrace_strcpy(const void * src,void * dst,size_t len)1439 dtrace_strcpy(const void *src, void *dst, size_t len)
1440 {
1441 	if (len != 0) {
1442 		uint8_t *s1 = dst, c;
1443 		const uint8_t *s2 = src;
1444 
1445 		do {
1446 			*s1++ = c = dtrace_load8((uintptr_t)s2++);
1447 		} while (--len != 0 && c != '\0');
1448 	}
1449 }
1450 
1451 /*
1452  * Copy src to dst, deriving the size and type from the specified (BYREF)
1453  * variable type.  The src is assumed to be unsafe memory specified by the DIF
1454  * program.  The dst is assumed to be DTrace variable memory that is of the
1455  * specified type; we assume that we can store to directly.
1456  */
1457 static void
dtrace_vcopy(void * src,void * dst,dtrace_diftype_t * type,size_t limit)1458 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type, size_t limit)
1459 {
1460 	ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1461 
1462 	if (type->dtdt_kind == DIF_TYPE_STRING) {
1463 		dtrace_strcpy(src, dst, MIN(type->dtdt_size, limit));
1464 	} else {
1465 		dtrace_bcopy(src, dst, MIN(type->dtdt_size, limit));
1466 	}
1467 }
1468 
1469 /*
1470  * Compare s1 to s2 using safe memory accesses.  The s1 data is assumed to be
1471  * unsafe memory specified by the DIF program.  The s2 data is assumed to be
1472  * safe memory that we can access directly because it is managed by DTrace.
1473  */
1474 static int
dtrace_bcmp(const void * s1,const void * s2,size_t len)1475 dtrace_bcmp(const void *s1, const void *s2, size_t len)
1476 {
1477 	volatile uint16_t *flags;
1478 
1479 	flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
1480 
1481 	if (s1 == s2)
1482 		return (0);
1483 
1484 	if (s1 == NULL || s2 == NULL)
1485 		return (1);
1486 
1487 	if (s1 != s2 && len != 0) {
1488 		const uint8_t *ps1 = s1;
1489 		const uint8_t *ps2 = s2;
1490 
1491 		do {
1492 			if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1493 				return (1);
1494 		} while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1495 	}
1496 	return (0);
1497 }
1498 
1499 /*
1500  * Zero the specified region using a simple byte-by-byte loop.  Note that this
1501  * is for safe DTrace-managed memory only.
1502  */
1503 static void
dtrace_bzero(void * dst,size_t len)1504 dtrace_bzero(void *dst, size_t len)
1505 {
1506 	uchar_t *cp;
1507 
1508 	for (cp = dst; len != 0; len--)
1509 		*cp++ = 0;
1510 }
1511 
1512 static void
dtrace_add_128(uint64_t * addend1,uint64_t * addend2,uint64_t * sum)1513 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1514 {
1515 	uint64_t result[2];
1516 
1517 	result[0] = addend1[0] + addend2[0];
1518 	result[1] = addend1[1] + addend2[1] +
1519 	    (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1520 
1521 	sum[0] = result[0];
1522 	sum[1] = result[1];
1523 }
1524 
1525 /*
1526  * Shift the 128-bit value in a by b. If b is positive, shift left.
1527  * If b is negative, shift right.
1528  */
1529 static void
dtrace_shift_128(uint64_t * a,int b)1530 dtrace_shift_128(uint64_t *a, int b)
1531 {
1532 	uint64_t mask;
1533 
1534 	if (b == 0)
1535 		return;
1536 
1537 	if (b < 0) {
1538 		b = -b;
1539 		if (b >= 64) {
1540 			a[0] = a[1] >> (b - 64);
1541 			a[1] = 0;
1542 		} else {
1543 			a[0] >>= b;
1544 			mask = 1LL << (64 - b);
1545 			mask -= 1;
1546 			a[0] |= ((a[1] & mask) << (64 - b));
1547 			a[1] >>= b;
1548 		}
1549 	} else {
1550 		if (b >= 64) {
1551 			a[1] = a[0] << (b - 64);
1552 			a[0] = 0;
1553 		} else {
1554 			a[1] <<= b;
1555 			mask = a[0] >> (64 - b);
1556 			a[1] |= mask;
1557 			a[0] <<= b;
1558 		}
1559 	}
1560 }
1561 
1562 /*
1563  * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1564  * use native multiplication on those, and then re-combine into the
1565  * resulting 128-bit value.
1566  *
1567  * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1568  *     hi1 * hi2 << 64 +
1569  *     hi1 * lo2 << 32 +
1570  *     hi2 * lo1 << 32 +
1571  *     lo1 * lo2
1572  */
1573 static void
dtrace_multiply_128(uint64_t factor1,uint64_t factor2,uint64_t * product)1574 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1575 {
1576 	uint64_t hi1, hi2, lo1, lo2;
1577 	uint64_t tmp[2];
1578 
1579 	hi1 = factor1 >> 32;
1580 	hi2 = factor2 >> 32;
1581 
1582 	lo1 = factor1 & DT_MASK_LO;
1583 	lo2 = factor2 & DT_MASK_LO;
1584 
1585 	product[0] = lo1 * lo2;
1586 	product[1] = hi1 * hi2;
1587 
1588 	tmp[0] = hi1 * lo2;
1589 	tmp[1] = 0;
1590 	dtrace_shift_128(tmp, 32);
1591 	dtrace_add_128(product, tmp, product);
1592 
1593 	tmp[0] = hi2 * lo1;
1594 	tmp[1] = 0;
1595 	dtrace_shift_128(tmp, 32);
1596 	dtrace_add_128(product, tmp, product);
1597 }
1598 
1599 /*
1600  * This privilege check should be used by actions and subroutines to
1601  * verify that the user credentials of the process that enabled the
1602  * invoking ECB match the target credentials
1603  */
1604 static int
dtrace_priv_proc_common_user(dtrace_state_t * state)1605 dtrace_priv_proc_common_user(dtrace_state_t *state)
1606 {
1607 	cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1608 
1609 	/*
1610 	 * We should always have a non-NULL state cred here, since if cred
1611 	 * is null (anonymous tracing), we fast-path bypass this routine.
1612 	 */
1613 	ASSERT(s_cr != NULL);
1614 
1615 	if ((cr = dtrace_CRED()) != NULL &&
1616 	    posix_cred_get(s_cr)->cr_uid == posix_cred_get(cr)->cr_uid &&
1617 	    posix_cred_get(s_cr)->cr_uid == posix_cred_get(cr)->cr_ruid &&
1618 	    posix_cred_get(s_cr)->cr_uid == posix_cred_get(cr)->cr_suid &&
1619 	    posix_cred_get(s_cr)->cr_gid == posix_cred_get(cr)->cr_gid &&
1620 	    posix_cred_get(s_cr)->cr_gid == posix_cred_get(cr)->cr_rgid &&
1621 	    posix_cred_get(s_cr)->cr_gid == posix_cred_get(cr)->cr_sgid)
1622 		return (1);
1623 
1624 	return (0);
1625 }
1626 
1627 /*
1628  * This privilege check should be used by actions and subroutines to
1629  * verify that the zone of the process that enabled the invoking ECB
1630  * matches the target credentials
1631  */
1632 static int
dtrace_priv_proc_common_zone(dtrace_state_t * state)1633 dtrace_priv_proc_common_zone(dtrace_state_t *state)
1634 {
1635 	cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1636 #pragma unused(cr, s_cr, state) /* __APPLE__ */
1637 
1638 	/*
1639 	 * We should always have a non-NULL state cred here, since if cred
1640 	 * is null (anonymous tracing), we fast-path bypass this routine.
1641 	 */
1642 	ASSERT(s_cr != NULL);
1643 
1644 	return 1; /* APPLE NOTE: Darwin doesn't do zones. */
1645 }
1646 
1647 /*
1648  * This privilege check should be used by actions and subroutines to
1649  * verify that the process has not setuid or changed credentials.
1650  */
1651 static int
dtrace_priv_proc_common_nocd(void)1652 dtrace_priv_proc_common_nocd(void)
1653 {
1654 	return 1; /* Darwin omits "No Core Dump" flag. */
1655 }
1656 
1657 static int
dtrace_priv_proc_destructive(dtrace_state_t * state)1658 dtrace_priv_proc_destructive(dtrace_state_t *state)
1659 {
1660 	int action = state->dts_cred.dcr_action;
1661 
1662 	if (ISSET(current_proc()->p_lflag, P_LNOATTACH))
1663 		goto bad;
1664 
1665 	if (dtrace_is_restricted() && !dtrace_can_attach_to_proc(current_proc()))
1666 		goto bad;
1667 
1668 	if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1669 	    dtrace_priv_proc_common_zone(state) == 0)
1670 		goto bad;
1671 
1672 	if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1673 	    dtrace_priv_proc_common_user(state) == 0)
1674 		goto bad;
1675 
1676 	if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1677 	    dtrace_priv_proc_common_nocd() == 0)
1678 		goto bad;
1679 
1680 	return (1);
1681 
1682 bad:
1683 	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1684 
1685 	return (0);
1686 }
1687 
1688 static int
dtrace_priv_proc_control(dtrace_state_t * state)1689 dtrace_priv_proc_control(dtrace_state_t *state)
1690 {
1691 	if (ISSET(current_proc()->p_lflag, P_LNOATTACH))
1692 		goto bad;
1693 
1694 	if (dtrace_is_restricted() && !dtrace_can_attach_to_proc(current_proc()))
1695 		goto bad;
1696 
1697 	if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1698 		return (1);
1699 
1700 	if (dtrace_priv_proc_common_zone(state) &&
1701 	    dtrace_priv_proc_common_user(state) &&
1702 	    dtrace_priv_proc_common_nocd())
1703 		return (1);
1704 
1705 bad:
1706 	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1707 
1708 	return (0);
1709 }
1710 
1711 static int
dtrace_priv_proc(dtrace_state_t * state)1712 dtrace_priv_proc(dtrace_state_t *state)
1713 {
1714 	if (ISSET(current_proc()->p_lflag, P_LNOATTACH))
1715 		goto bad;
1716 
1717 	if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed() && !dtrace_can_attach_to_proc(current_proc()))
1718 		goto bad;
1719 
1720 	if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1721 		return (1);
1722 
1723 bad:
1724 	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1725 
1726 	return (0);
1727 }
1728 
1729 /*
1730  * The P_LNOATTACH check is an Apple specific check.
1731  * We need a version of dtrace_priv_proc() that omits
1732  * that check for PID and EXECNAME accesses
1733  */
1734 static int
dtrace_priv_proc_relaxed(dtrace_state_t * state)1735 dtrace_priv_proc_relaxed(dtrace_state_t *state)
1736 {
1737 
1738 	if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1739 		return (1);
1740 
1741 	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1742 
1743 	return (0);
1744 }
1745 
1746 static int
dtrace_priv_kernel(dtrace_state_t * state)1747 dtrace_priv_kernel(dtrace_state_t *state)
1748 {
1749 	if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed())
1750 		goto bad;
1751 
1752 	if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1753 		return (1);
1754 
1755 bad:
1756 	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1757 
1758 	return (0);
1759 }
1760 
1761 static int
dtrace_priv_kernel_destructive(dtrace_state_t * state)1762 dtrace_priv_kernel_destructive(dtrace_state_t *state)
1763 {
1764 	if (dtrace_is_restricted())
1765 		goto bad;
1766 
1767 	if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1768 		return (1);
1769 
1770 bad:
1771 	cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1772 
1773 	return (0);
1774 }
1775 
1776 /*
1777  * Note:  not called from probe context.  This function is called
1778  * asynchronously (and at a regular interval) from outside of probe context to
1779  * clean the dirty dynamic variable lists on all CPUs.  Dynamic variable
1780  * cleaning is explained in detail in <sys/dtrace_impl.h>.
1781  */
1782 static void
dtrace_dynvar_clean(dtrace_dstate_t * dstate)1783 dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1784 {
1785 	dtrace_dynvar_t *dirty;
1786 	int work = 0;
1787 
1788 	zpercpu_foreach(dcpu, dstate->dtds_percpu) {
1789 		ASSERT(dcpu->dtdsc_rinsing == NULL);
1790 
1791 		/*
1792 		 * If the dirty list is NULL, there is no dirty work to do.
1793 		 */
1794 		if (dcpu->dtdsc_dirty == NULL)
1795 			continue;
1796 
1797 		/*
1798 		 * If the clean list is non-NULL, then we're not going to do
1799 		 * any work for this CPU -- it means that there has not been
1800 		 * a dtrace_dynvar() allocation on this CPU (or from this CPU)
1801 		 * since the last time we cleaned house.
1802 		 */
1803 		if (dcpu->dtdsc_clean != NULL)
1804 			continue;
1805 
1806 		work = 1;
1807 
1808 		/*
1809 		 * Atomically move the dirty list aside.
1810 		 */
1811 		do {
1812 			dirty = dcpu->dtdsc_dirty;
1813 
1814 			/*
1815 			 * Before we zap the dirty list, set the rinsing list.
1816 			 * (This allows for a potential assertion in
1817 			 * dtrace_dynvar():  if a free dynamic variable appears
1818 			 * on a hash chain, either the dirty list or the
1819 			 * rinsing list for some CPU must be non-NULL.)
1820 			 */
1821 			dcpu->dtdsc_rinsing = dirty;
1822 			dtrace_membar_producer();
1823 		} while (dtrace_casptr(&dcpu->dtdsc_dirty,
1824 		    dirty, NULL) != dirty);
1825 	}
1826 
1827 	if (!work) {
1828 		/*
1829 		 * We have no work to do; we can simply return.
1830 		 */
1831 		return;
1832 	}
1833 
1834 	dtrace_sync();
1835 
1836 	zpercpu_foreach(dcpu, dstate->dtds_percpu) {
1837 		if (dcpu->dtdsc_rinsing == NULL)
1838 			continue;
1839 
1840 		/*
1841 		 * We are now guaranteed that no hash chain contains a pointer
1842 		 * into this dirty list; we can make it clean.
1843 		 */
1844 		ASSERT(dcpu->dtdsc_clean == NULL);
1845 		dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1846 		dcpu->dtdsc_rinsing = NULL;
1847 	}
1848 
1849 	/*
1850 	 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1851 	 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1852 	 * This prevents a race whereby a CPU incorrectly decides that
1853 	 * the state should be something other than DTRACE_DSTATE_CLEAN
1854 	 * after dtrace_dynvar_clean() has completed.
1855 	 */
1856 	dtrace_sync();
1857 
1858 	dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1859 }
1860 
1861 /*
1862  * Depending on the value of the op parameter, this function looks-up,
1863  * allocates or deallocates an arbitrarily-keyed dynamic variable.  If an
1864  * allocation is requested, this function will return a pointer to a
1865  * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1866  * variable can be allocated.  If NULL is returned, the appropriate counter
1867  * will be incremented.
1868  */
1869 static dtrace_dynvar_t *
dtrace_dynvar(dtrace_dstate_t * dstate,uint_t nkeys,dtrace_key_t * key,size_t dsize,dtrace_dynvar_op_t op,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1870 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1871     dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1872     dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1873 {
1874 	uint64_t hashval = DTRACE_DYNHASH_VALID;
1875 	dtrace_dynhash_t *hash = dstate->dtds_hash;
1876 	dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1877 	processorid_t me = CPU->cpu_id, cpu = me;
1878 	dtrace_dstate_percpu_t *dcpu = zpercpu_get_cpu(dstate->dtds_percpu, me);
1879 	size_t bucket, ksize;
1880 	size_t chunksize = dstate->dtds_chunksize;
1881 	uintptr_t kdata, lock, nstate;
1882 	uint_t i;
1883 
1884 	ASSERT(nkeys != 0);
1885 
1886 	/*
1887 	 * Hash the key.  As with aggregations, we use Jenkins' "One-at-a-time"
1888 	 * algorithm.  For the by-value portions, we perform the algorithm in
1889 	 * 16-bit chunks (as opposed to 8-bit chunks).  This speeds things up a
1890 	 * bit, and seems to have only a minute effect on distribution.  For
1891 	 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1892 	 * over each referenced byte.  It's painful to do this, but it's much
1893 	 * better than pathological hash distribution.  The efficacy of the
1894 	 * hashing algorithm (and a comparison with other algorithms) may be
1895 	 * found by running the ::dtrace_dynstat MDB dcmd.
1896 	 */
1897 	for (i = 0; i < nkeys; i++) {
1898 		if (key[i].dttk_size == 0) {
1899 			uint64_t val = key[i].dttk_value;
1900 
1901 			hashval += (val >> 48) & 0xffff;
1902 			hashval += (hashval << 10);
1903 			hashval ^= (hashval >> 6);
1904 
1905 			hashval += (val >> 32) & 0xffff;
1906 			hashval += (hashval << 10);
1907 			hashval ^= (hashval >> 6);
1908 
1909 			hashval += (val >> 16) & 0xffff;
1910 			hashval += (hashval << 10);
1911 			hashval ^= (hashval >> 6);
1912 
1913 			hashval += val & 0xffff;
1914 			hashval += (hashval << 10);
1915 			hashval ^= (hashval >> 6);
1916 		} else {
1917 			/*
1918 			 * This is incredibly painful, but it beats the hell
1919 			 * out of the alternative.
1920 			 */
1921 			uint64_t j, size = key[i].dttk_size;
1922 			uintptr_t base = (uintptr_t)key[i].dttk_value;
1923 
1924 			if (!dtrace_canload(base, size, mstate, vstate))
1925 				break;
1926 
1927 			for (j = 0; j < size; j++) {
1928 				hashval += dtrace_load8(base + j);
1929 				hashval += (hashval << 10);
1930 				hashval ^= (hashval >> 6);
1931 			}
1932 		}
1933 	}
1934 
1935 	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1936 		return (NULL);
1937 
1938 	hashval += (hashval << 3);
1939 	hashval ^= (hashval >> 11);
1940 	hashval += (hashval << 15);
1941 
1942 	/*
1943 	 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1944 	 * comes out to be one of our two sentinel hash values.  If this
1945 	 * actually happens, we set the hashval to be a value known to be a
1946 	 * non-sentinel value.
1947 	 */
1948 	if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1949 		hashval = DTRACE_DYNHASH_VALID;
1950 
1951 	/*
1952 	 * Yes, it's painful to do a divide here.  If the cycle count becomes
1953 	 * important here, tricks can be pulled to reduce it.  (However, it's
1954 	 * critical that hash collisions be kept to an absolute minimum;
1955 	 * they're much more painful than a divide.)  It's better to have a
1956 	 * solution that generates few collisions and still keeps things
1957 	 * relatively simple.
1958 	 */
1959 	bucket = hashval % dstate->dtds_hashsize;
1960 
1961 	if (op == DTRACE_DYNVAR_DEALLOC) {
1962 		volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1963 
1964 		for (;;) {
1965 			while ((lock = *lockp) & 1)
1966 				continue;
1967 
1968 			if (dtrace_casptr((void *)(uintptr_t)lockp,
1969 			    (void *)lock, (void *)(lock + 1)) == (void *)lock)
1970 				break;
1971 		}
1972 
1973 		dtrace_membar_producer();
1974 	}
1975 
1976 top:
1977 	prev = NULL;
1978 	lock = hash[bucket].dtdh_lock;
1979 
1980 	dtrace_membar_consumer();
1981 
1982 	start = hash[bucket].dtdh_chain;
1983 	ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1984 	    start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1985 	    op != DTRACE_DYNVAR_DEALLOC));
1986 
1987 	for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1988 		dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1989 		dtrace_key_t *dkey = &dtuple->dtt_key[0];
1990 
1991 		if (dvar->dtdv_hashval != hashval) {
1992 			if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1993 				/*
1994 				 * We've reached the sink, and therefore the
1995 				 * end of the hash chain; we can kick out of
1996 				 * the loop knowing that we have seen a valid
1997 				 * snapshot of state.
1998 				 */
1999 				ASSERT(dvar->dtdv_next == NULL);
2000 				ASSERT(dvar == &dtrace_dynhash_sink);
2001 				break;
2002 			}
2003 
2004 			if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
2005 				/*
2006 				 * We've gone off the rails:  somewhere along
2007 				 * the line, one of the members of this hash
2008 				 * chain was deleted.  Note that we could also
2009 				 * detect this by simply letting this loop run
2010 				 * to completion, as we would eventually hit
2011 				 * the end of the dirty list.  However, we
2012 				 * want to avoid running the length of the
2013 				 * dirty list unnecessarily (it might be quite
2014 				 * long), so we catch this as early as
2015 				 * possible by detecting the hash marker.  In
2016 				 * this case, we simply set dvar to NULL and
2017 				 * break; the conditional after the loop will
2018 				 * send us back to top.
2019 				 */
2020 				dvar = NULL;
2021 				break;
2022 			}
2023 
2024 			goto next;
2025 		}
2026 
2027 		if (dtuple->dtt_nkeys != nkeys)
2028 			goto next;
2029 
2030 		for (i = 0; i < nkeys; i++, dkey++) {
2031 			if (dkey->dttk_size != key[i].dttk_size)
2032 				goto next; /* size or type mismatch */
2033 
2034 			if (dkey->dttk_size != 0) {
2035 				if (dtrace_bcmp(
2036 				    (void *)(uintptr_t)key[i].dttk_value,
2037 				    (void *)(uintptr_t)dkey->dttk_value,
2038 				    dkey->dttk_size))
2039 					goto next;
2040 			} else {
2041 				if (dkey->dttk_value != key[i].dttk_value)
2042 					goto next;
2043 			}
2044 		}
2045 
2046 		if (op != DTRACE_DYNVAR_DEALLOC)
2047 			return (dvar);
2048 
2049 		ASSERT(dvar->dtdv_next == NULL ||
2050 		    dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
2051 
2052 		if (prev != NULL) {
2053 			ASSERT(hash[bucket].dtdh_chain != dvar);
2054 			ASSERT(start != dvar);
2055 			ASSERT(prev->dtdv_next == dvar);
2056 			prev->dtdv_next = dvar->dtdv_next;
2057 		} else {
2058 			if (dtrace_casptr(&hash[bucket].dtdh_chain,
2059 			    start, dvar->dtdv_next) != start) {
2060 				/*
2061 				 * We have failed to atomically swing the
2062 				 * hash table head pointer, presumably because
2063 				 * of a conflicting allocation on another CPU.
2064 				 * We need to reread the hash chain and try
2065 				 * again.
2066 				 */
2067 				goto top;
2068 			}
2069 		}
2070 
2071 		dtrace_membar_producer();
2072 
2073 		/*
2074 		 * Now set the hash value to indicate that it's free.
2075 		 */
2076 		ASSERT(hash[bucket].dtdh_chain != dvar);
2077 		dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2078 
2079 		dtrace_membar_producer();
2080 
2081 		/*
2082 		 * Set the next pointer to point at the dirty list, and
2083 		 * atomically swing the dirty pointer to the newly freed dvar.
2084 		 */
2085 		do {
2086 			next = dcpu->dtdsc_dirty;
2087 			dvar->dtdv_next = next;
2088 		} while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
2089 
2090 		/*
2091 		 * Finally, unlock this hash bucket.
2092 		 */
2093 		ASSERT(hash[bucket].dtdh_lock == lock);
2094 		ASSERT(lock & 1);
2095 		hash[bucket].dtdh_lock++;
2096 
2097 		return (NULL);
2098 next:
2099 		prev = dvar;
2100 		continue;
2101 	}
2102 
2103 	if (dvar == NULL) {
2104 		/*
2105 		 * If dvar is NULL, it is because we went off the rails:
2106 		 * one of the elements that we traversed in the hash chain
2107 		 * was deleted while we were traversing it.  In this case,
2108 		 * we assert that we aren't doing a dealloc (deallocs lock
2109 		 * the hash bucket to prevent themselves from racing with
2110 		 * one another), and retry the hash chain traversal.
2111 		 */
2112 		ASSERT(op != DTRACE_DYNVAR_DEALLOC);
2113 		goto top;
2114 	}
2115 
2116 	if (op != DTRACE_DYNVAR_ALLOC) {
2117 		/*
2118 		 * If we are not to allocate a new variable, we want to
2119 		 * return NULL now.  Before we return, check that the value
2120 		 * of the lock word hasn't changed.  If it has, we may have
2121 		 * seen an inconsistent snapshot.
2122 		 */
2123 		if (op == DTRACE_DYNVAR_NOALLOC) {
2124 			if (hash[bucket].dtdh_lock != lock)
2125 				goto top;
2126 		} else {
2127 			ASSERT(op == DTRACE_DYNVAR_DEALLOC);
2128 			ASSERT(hash[bucket].dtdh_lock == lock);
2129 			ASSERT(lock & 1);
2130 			hash[bucket].dtdh_lock++;
2131 		}
2132 
2133 		return (NULL);
2134 	}
2135 
2136 	/*
2137 	 * We need to allocate a new dynamic variable.  The size we need is the
2138 	 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
2139 	 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
2140 	 * the size of any referred-to data (dsize).  We then round the final
2141 	 * size up to the chunksize for allocation.
2142 	 */
2143 	for (ksize = 0, i = 0; i < nkeys; i++)
2144 		ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
2145 
2146 	/*
2147 	 * This should be pretty much impossible, but could happen if, say,
2148 	 * strange DIF specified the tuple.  Ideally, this should be an
2149 	 * assertion and not an error condition -- but that requires that the
2150 	 * chunksize calculation in dtrace_difo_chunksize() be absolutely
2151 	 * bullet-proof.  (That is, it must not be able to be fooled by
2152 	 * malicious DIF.)  Given the lack of backwards branches in DIF,
2153 	 * solving this would presumably not amount to solving the Halting
2154 	 * Problem -- but it still seems awfully hard.
2155 	 */
2156 	if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
2157 	    ksize + dsize > chunksize) {
2158 		dcpu->dtdsc_drops++;
2159 		return (NULL);
2160 	}
2161 
2162 	nstate = DTRACE_DSTATE_EMPTY;
2163 
2164 	do {
2165 retry:
2166 		free = dcpu->dtdsc_free;
2167 
2168 		if (free == NULL) {
2169 			dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
2170 			void *rval;
2171 
2172 			if (clean == NULL) {
2173 				/*
2174 				 * We're out of dynamic variable space on
2175 				 * this CPU.  Unless we have tried all CPUs,
2176 				 * we'll try to allocate from a different
2177 				 * CPU.
2178 				 */
2179 				switch (dstate->dtds_state) {
2180 				case DTRACE_DSTATE_CLEAN: {
2181 					void *sp = &dstate->dtds_state;
2182 
2183 					if (++cpu >= (int)NCPU)
2184 						cpu = 0;
2185 
2186 					if (dcpu->dtdsc_dirty != NULL &&
2187 					    nstate == DTRACE_DSTATE_EMPTY)
2188 						nstate = DTRACE_DSTATE_DIRTY;
2189 
2190 					if (dcpu->dtdsc_rinsing != NULL)
2191 						nstate = DTRACE_DSTATE_RINSING;
2192 
2193 					dcpu = zpercpu_get_cpu(dstate->dtds_percpu, cpu);
2194 
2195 					if (cpu != me)
2196 						goto retry;
2197 
2198 					(void) dtrace_cas32(sp,
2199 					    DTRACE_DSTATE_CLEAN, nstate);
2200 
2201 					/*
2202 					 * To increment the correct bean
2203 					 * counter, take another lap.
2204 					 */
2205 					goto retry;
2206 				}
2207 
2208 				case DTRACE_DSTATE_DIRTY:
2209 					dcpu->dtdsc_dirty_drops++;
2210 					break;
2211 
2212 				case DTRACE_DSTATE_RINSING:
2213 					dcpu->dtdsc_rinsing_drops++;
2214 					break;
2215 
2216 				case DTRACE_DSTATE_EMPTY:
2217 					dcpu->dtdsc_drops++;
2218 					break;
2219 				}
2220 
2221 				DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
2222 				return (NULL);
2223 			}
2224 
2225 			/*
2226 			 * The clean list appears to be non-empty.  We want to
2227 			 * move the clean list to the free list; we start by
2228 			 * moving the clean pointer aside.
2229 			 */
2230 			if (dtrace_casptr(&dcpu->dtdsc_clean,
2231 			    clean, NULL) != clean) {
2232 				/*
2233 				 * We are in one of two situations:
2234 				 *
2235 				 *  (a)	The clean list was switched to the
2236 				 *	free list by another CPU.
2237 				 *
2238 				 *  (b)	The clean list was added to by the
2239 				 *	cleansing cyclic.
2240 				 *
2241 				 * In either of these situations, we can
2242 				 * just reattempt the free list allocation.
2243 				 */
2244 				goto retry;
2245 			}
2246 
2247 			ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
2248 
2249 			/*
2250 			 * Now we'll move the clean list to the free list.
2251 			 * It's impossible for this to fail:  the only way
2252 			 * the free list can be updated is through this
2253 			 * code path, and only one CPU can own the clean list.
2254 			 * Thus, it would only be possible for this to fail if
2255 			 * this code were racing with dtrace_dynvar_clean().
2256 			 * (That is, if dtrace_dynvar_clean() updated the clean
2257 			 * list, and we ended up racing to update the free
2258 			 * list.)  This race is prevented by the dtrace_sync()
2259 			 * in dtrace_dynvar_clean() -- which flushes the
2260 			 * owners of the clean lists out before resetting
2261 			 * the clean lists.
2262 			 */
2263 			rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
2264 			ASSERT(rval == NULL);
2265 			goto retry;
2266 		}
2267 
2268 		dvar = free;
2269 		new_free = dvar->dtdv_next;
2270 	} while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
2271 
2272 	/*
2273 	 * We have now allocated a new chunk.  We copy the tuple keys into the
2274 	 * tuple array and copy any referenced key data into the data space
2275 	 * following the tuple array.  As we do this, we relocate dttk_value
2276 	 * in the final tuple to point to the key data address in the chunk.
2277 	 */
2278 	kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
2279 	dvar->dtdv_data = (void *)(kdata + ksize);
2280 	dvar->dtdv_tuple.dtt_nkeys = nkeys;
2281 
2282 	for (i = 0; i < nkeys; i++) {
2283 		dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
2284 		size_t kesize = key[i].dttk_size;
2285 
2286 		if (kesize != 0) {
2287 			dtrace_bcopy(
2288 			    (const void *)(uintptr_t)key[i].dttk_value,
2289 			    (void *)kdata, kesize);
2290 			dkey->dttk_value = kdata;
2291 			kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
2292 		} else {
2293 			dkey->dttk_value = key[i].dttk_value;
2294 		}
2295 
2296 		dkey->dttk_size = kesize;
2297 	}
2298 
2299 	ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
2300 	dvar->dtdv_hashval = hashval;
2301 	dvar->dtdv_next = start;
2302 
2303 	if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
2304 		return (dvar);
2305 
2306 	/*
2307 	 * The cas has failed.  Either another CPU is adding an element to
2308 	 * this hash chain, or another CPU is deleting an element from this
2309 	 * hash chain.  The simplest way to deal with both of these cases
2310 	 * (though not necessarily the most efficient) is to free our
2311 	 * allocated block and tail-call ourselves.  Note that the free is
2312 	 * to the dirty list and _not_ to the free list.  This is to prevent
2313 	 * races with allocators, above.
2314 	 */
2315 	dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2316 
2317 	dtrace_membar_producer();
2318 
2319 	do {
2320 		free = dcpu->dtdsc_dirty;
2321 		dvar->dtdv_next = free;
2322 	} while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
2323 
2324 	return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
2325 }
2326 
2327 /*ARGSUSED*/
2328 static void
dtrace_aggregate_min(uint64_t * oval,uint64_t nval,uint64_t arg)2329 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
2330 {
2331 #pragma unused(arg) /* __APPLE__ */
2332 	if ((int64_t)nval < (int64_t)*oval)
2333 		*oval = nval;
2334 }
2335 
2336 /*ARGSUSED*/
2337 static void
dtrace_aggregate_max(uint64_t * oval,uint64_t nval,uint64_t arg)2338 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
2339 {
2340 #pragma unused(arg) /* __APPLE__ */
2341 	if ((int64_t)nval > (int64_t)*oval)
2342 		*oval = nval;
2343 }
2344 
2345 static void
dtrace_aggregate_quantize(uint64_t * quanta,uint64_t nval,uint64_t incr)2346 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
2347 {
2348 	int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
2349 	int64_t val = (int64_t)nval;
2350 
2351 	if (val < 0) {
2352 		for (i = 0; i < zero; i++) {
2353 			if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
2354 				quanta[i] += incr;
2355 				return;
2356 			}
2357 		}
2358 	} else {
2359 		for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
2360 			if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
2361 				quanta[i - 1] += incr;
2362 				return;
2363 			}
2364 		}
2365 
2366 		quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
2367 		return;
2368 	}
2369 
2370 	ASSERT(0);
2371 }
2372 
2373 static void
dtrace_aggregate_lquantize(uint64_t * lquanta,uint64_t nval,uint64_t incr)2374 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
2375 {
2376 	uint64_t arg = *lquanta++;
2377 	int32_t base = DTRACE_LQUANTIZE_BASE(arg);
2378 	uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
2379 	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
2380 	int32_t val = (int32_t)nval, level;
2381 
2382 	ASSERT(step != 0);
2383 	ASSERT(levels != 0);
2384 
2385 	if (val < base) {
2386 		/*
2387 		 * This is an underflow.
2388 		 */
2389 		lquanta[0] += incr;
2390 		return;
2391 	}
2392 
2393 	level = (val - base) / step;
2394 
2395 	if (level < levels) {
2396 		lquanta[level + 1] += incr;
2397 		return;
2398 	}
2399 
2400 	/*
2401 	 * This is an overflow.
2402 	 */
2403 	lquanta[levels + 1] += incr;
2404 }
2405 
2406 static int
dtrace_aggregate_llquantize_bucket(int16_t factor,int16_t low,int16_t high,int16_t nsteps,int64_t value)2407 dtrace_aggregate_llquantize_bucket(int16_t factor, int16_t low, int16_t high,
2408                                    int16_t nsteps, int64_t value)
2409 {
2410 	int64_t this = 1, last, next;
2411 	int base = 1, order;
2412 
2413 	for (order = 0; order < low; ++order)
2414 		this *= factor;
2415 
2416 	/*
2417 	 * If our value is less than our factor taken to the power of the
2418 	 * low order of magnitude, it goes into the zeroth bucket.
2419 	 */
2420 	if (value < this)
2421 		return 0;
2422 	else
2423 		last = this;
2424 
2425 	for (this *= factor; order <= high; ++order) {
2426 		int nbuckets = this > nsteps ? nsteps : this;
2427 
2428 		/*
2429 		 * We should not generally get log/linear quantizations
2430 		 * with a high magnitude that allows 64-bits to
2431 		 * overflow, but we nonetheless protect against this
2432 		 * by explicitly checking for overflow, and clamping
2433 		 * our value accordingly.
2434 		 */
2435 		next = this * factor;
2436 		if (next < this) {
2437 			value = this - 1;
2438 		}
2439 
2440 		/*
2441 		 * If our value lies within this order of magnitude,
2442 		 * determine its position by taking the offset within
2443 		 * the order of magnitude, dividing by the bucket
2444 		 * width, and adding to our (accumulated) base.
2445 		 */
2446 		if (value < this) {
2447 			return (base + (value - last) / (this / nbuckets));
2448 		}
2449 
2450 		base += nbuckets - (nbuckets / factor);
2451 		last = this;
2452 		this = next;
2453 	}
2454 
2455 	/*
2456 	 * Our value is greater than or equal to our factor taken to the
2457 	 * power of one plus the high magnitude -- return the top bucket.
2458 	 */
2459 	return base;
2460 }
2461 
2462 static void
dtrace_aggregate_llquantize(uint64_t * llquanta,uint64_t nval,uint64_t incr)2463 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr)
2464 {
2465 	uint64_t arg    = *llquanta++;
2466 	uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
2467 	uint16_t low    = DTRACE_LLQUANTIZE_LOW(arg);
2468 	uint16_t high   = DTRACE_LLQUANTIZE_HIGH(arg);
2469 	uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
2470 
2471 	llquanta[dtrace_aggregate_llquantize_bucket(factor, low, high, nsteps, nval)] += incr;
2472 }
2473 
2474 /*ARGSUSED*/
2475 static void
dtrace_aggregate_avg(uint64_t * data,uint64_t nval,uint64_t arg)2476 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
2477 {
2478 #pragma unused(arg) /* __APPLE__ */
2479 	data[0]++;
2480 	data[1] += nval;
2481 }
2482 
2483 /*ARGSUSED*/
2484 static void
dtrace_aggregate_stddev(uint64_t * data,uint64_t nval,uint64_t arg)2485 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
2486 {
2487 #pragma unused(arg) /* __APPLE__ */
2488 	int64_t snval = (int64_t)nval;
2489 	uint64_t tmp[2];
2490 
2491 	data[0]++;
2492 	data[1] += nval;
2493 
2494 	/*
2495 	 * What we want to say here is:
2496 	 *
2497 	 * data[2] += nval * nval;
2498 	 *
2499 	 * But given that nval is 64-bit, we could easily overflow, so
2500 	 * we do this as 128-bit arithmetic.
2501 	 */
2502 	if (snval < 0)
2503 		snval = -snval;
2504 
2505 	dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2506 	dtrace_add_128(data + 2, tmp, data + 2);
2507 }
2508 
2509 /*ARGSUSED*/
2510 static void
dtrace_aggregate_count(uint64_t * oval,uint64_t nval,uint64_t arg)2511 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2512 {
2513 #pragma unused(nval, arg) /* __APPLE__ */
2514 	*oval = *oval + 1;
2515 }
2516 
2517 /*ARGSUSED*/
2518 static void
dtrace_aggregate_sum(uint64_t * oval,uint64_t nval,uint64_t arg)2519 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2520 {
2521 #pragma unused(arg) /* __APPLE__ */
2522 	*oval += nval;
2523 }
2524 
2525 /*
2526  * Aggregate given the tuple in the principal data buffer, and the aggregating
2527  * action denoted by the specified dtrace_aggregation_t.  The aggregation
2528  * buffer is specified as the buf parameter.  This routine does not return
2529  * failure; if there is no space in the aggregation buffer, the data will be
2530  * dropped, and a corresponding counter incremented.
2531  */
2532 __attribute__((noinline))
2533 static void
dtrace_aggregate(dtrace_aggregation_t * agg,dtrace_buffer_t * dbuf,intptr_t offset,dtrace_buffer_t * buf,uint64_t expr,uint64_t arg)2534 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2535     intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2536 {
2537 #pragma unused(arg)
2538 	dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2539 	uint32_t i, ndx, size, fsize;
2540 	uint32_t align = sizeof (uint64_t) - 1;
2541 	dtrace_aggbuffer_t *agb;
2542 	dtrace_aggkey_t *key;
2543 	uint32_t hashval = 0, limit, isstr;
2544 	caddr_t tomax, data, kdata;
2545 	dtrace_actkind_t action;
2546 	dtrace_action_t *act;
2547 	uintptr_t offs;
2548 
2549 	if (buf == NULL)
2550 		return;
2551 
2552 	if (!agg->dtag_hasarg) {
2553 		/*
2554 		 * Currently, only quantize() and lquantize() take additional
2555 		 * arguments, and they have the same semantics:  an increment
2556 		 * value that defaults to 1 when not present.  If additional
2557 		 * aggregating actions take arguments, the setting of the
2558 		 * default argument value will presumably have to become more
2559 		 * sophisticated...
2560 		 */
2561 		arg = 1;
2562 	}
2563 
2564 	action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2565 	size = rec->dtrd_offset - agg->dtag_base;
2566 	fsize = size + rec->dtrd_size;
2567 
2568 	ASSERT(dbuf->dtb_tomax != NULL);
2569 	data = dbuf->dtb_tomax + offset + agg->dtag_base;
2570 
2571 	if ((tomax = buf->dtb_tomax) == NULL) {
2572 		dtrace_buffer_drop(buf);
2573 		return;
2574 	}
2575 
2576 	/*
2577 	 * The metastructure is always at the bottom of the buffer.
2578 	 */
2579 	agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2580 	    sizeof (dtrace_aggbuffer_t));
2581 
2582 	if (buf->dtb_offset == 0) {
2583 		/*
2584 		 * We just kludge up approximately 1/8th of the size to be
2585 		 * buckets.  If this guess ends up being routinely
2586 		 * off-the-mark, we may need to dynamically readjust this
2587 		 * based on past performance.
2588 		 */
2589 		uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2590 
2591 		if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2592 		    (uintptr_t)tomax || hashsize == 0) {
2593 			/*
2594 			 * We've been given a ludicrously small buffer;
2595 			 * increment our drop count and leave.
2596 			 */
2597 			dtrace_buffer_drop(buf);
2598 			return;
2599 		}
2600 
2601 		/*
2602 		 * And now, a pathetic attempt to try to get a an odd (or
2603 		 * perchance, a prime) hash size for better hash distribution.
2604 		 */
2605 		if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2606 			hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2607 
2608 		agb->dtagb_hashsize = hashsize;
2609 		agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2610 		    agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2611 		agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2612 
2613 		for (i = 0; i < agb->dtagb_hashsize; i++)
2614 			agb->dtagb_hash[i] = NULL;
2615 	}
2616 
2617 	ASSERT(agg->dtag_first != NULL);
2618 	ASSERT(agg->dtag_first->dta_intuple);
2619 
2620 	/*
2621 	 * Calculate the hash value based on the key.  Note that we _don't_
2622 	 * include the aggid in the hashing (but we will store it as part of
2623 	 * the key).  The hashing algorithm is Bob Jenkins' "One-at-a-time"
2624 	 * algorithm: a simple, quick algorithm that has no known funnels, and
2625 	 * gets good distribution in practice.  The efficacy of the hashing
2626 	 * algorithm (and a comparison with other algorithms) may be found by
2627 	 * running the ::dtrace_aggstat MDB dcmd.
2628 	 */
2629 	for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2630 		i = act->dta_rec.dtrd_offset - agg->dtag_base;
2631 		limit = i + act->dta_rec.dtrd_size;
2632 		ASSERT(limit <= size);
2633 		isstr = DTRACEACT_ISSTRING(act);
2634 
2635 		for (; i < limit; i++) {
2636 			hashval += data[i];
2637 			hashval += (hashval << 10);
2638 			hashval ^= (hashval >> 6);
2639 
2640 			if (isstr && data[i] == '\0')
2641 				break;
2642 		}
2643 	}
2644 
2645 	hashval += (hashval << 3);
2646 	hashval ^= (hashval >> 11);
2647 	hashval += (hashval << 15);
2648 
2649 	/*
2650 	 * Yes, the divide here is expensive -- but it's generally the least
2651 	 * of the performance issues given the amount of data that we iterate
2652 	 * over to compute hash values, compare data, etc.
2653 	 */
2654 	ndx = hashval % agb->dtagb_hashsize;
2655 
2656 	for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2657 		ASSERT((caddr_t)key >= tomax);
2658 		ASSERT((caddr_t)key < tomax + buf->dtb_size);
2659 
2660 		if (hashval != key->dtak_hashval || key->dtak_size != size)
2661 			continue;
2662 
2663 		kdata = key->dtak_data;
2664 		ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2665 
2666 		for (act = agg->dtag_first; act->dta_intuple;
2667 		    act = act->dta_next) {
2668 			i = act->dta_rec.dtrd_offset - agg->dtag_base;
2669 			limit = i + act->dta_rec.dtrd_size;
2670 			ASSERT(limit <= size);
2671 			isstr = DTRACEACT_ISSTRING(act);
2672 
2673 			for (; i < limit; i++) {
2674 				if (kdata[i] != data[i])
2675 					goto next;
2676 
2677 				if (isstr && data[i] == '\0')
2678 					break;
2679 			}
2680 		}
2681 
2682 		if (action != key->dtak_action) {
2683 			/*
2684 			 * We are aggregating on the same value in the same
2685 			 * aggregation with two different aggregating actions.
2686 			 * (This should have been picked up in the compiler,
2687 			 * so we may be dealing with errant or devious DIF.)
2688 			 * This is an error condition; we indicate as much,
2689 			 * and return.
2690 			 */
2691 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2692 			return;
2693 		}
2694 
2695 		/*
2696 		 * This is a hit:  we need to apply the aggregator to
2697 		 * the value at this key.
2698 		 */
2699 		agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2700 		return;
2701 next:
2702 		continue;
2703 	}
2704 
2705 	/*
2706 	 * We didn't find it.  We need to allocate some zero-filled space,
2707 	 * link it into the hash table appropriately, and apply the aggregator
2708 	 * to the (zero-filled) value.
2709 	 */
2710 	offs = buf->dtb_offset;
2711 	while (offs & (align - 1))
2712 		offs += sizeof (uint32_t);
2713 
2714 	/*
2715 	 * If we don't have enough room to both allocate a new key _and_
2716 	 * its associated data, increment the drop count and return.
2717 	 */
2718 	if ((uintptr_t)tomax + offs + fsize >
2719 	    agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2720 		dtrace_buffer_drop(buf);
2721 		return;
2722 	}
2723 
2724 	/*CONSTCOND*/
2725 	ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2726 	key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2727 	agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2728 
2729 	key->dtak_data = kdata = tomax + offs;
2730 	buf->dtb_offset = offs + fsize;
2731 
2732 	/*
2733 	 * Now copy the data across.
2734 	 */
2735 	*((dtrace_aggid_t *)kdata) = agg->dtag_id;
2736 
2737 	for (i = sizeof (dtrace_aggid_t); i < size; i++)
2738 		kdata[i] = data[i];
2739 
2740 	/*
2741 	 * Because strings are not zeroed out by default, we need to iterate
2742 	 * looking for actions that store strings, and we need to explicitly
2743 	 * pad these strings out with zeroes.
2744 	 */
2745 	for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2746 		int nul;
2747 
2748 		if (!DTRACEACT_ISSTRING(act))
2749 			continue;
2750 
2751 		i = act->dta_rec.dtrd_offset - agg->dtag_base;
2752 		limit = i + act->dta_rec.dtrd_size;
2753 		ASSERT(limit <= size);
2754 
2755 		for (nul = 0; i < limit; i++) {
2756 			if (nul) {
2757 				kdata[i] = '\0';
2758 				continue;
2759 			}
2760 
2761 			if (data[i] != '\0')
2762 				continue;
2763 
2764 			nul = 1;
2765 		}
2766 	}
2767 
2768 	for (i = size; i < fsize; i++)
2769 		kdata[i] = 0;
2770 
2771 	key->dtak_hashval = hashval;
2772 	key->dtak_size = size;
2773 	key->dtak_action = action;
2774 	key->dtak_next = agb->dtagb_hash[ndx];
2775 	agb->dtagb_hash[ndx] = key;
2776 
2777 	/*
2778 	 * Finally, apply the aggregator.
2779 	 */
2780 	*((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2781 	agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2782 }
2783 
2784 /*
2785  * Given consumer state, this routine finds a speculation in the INACTIVE
2786  * state and transitions it into the ACTIVE state.  If there is no speculation
2787  * in the INACTIVE state, 0 is returned.  In this case, no error counter is
2788  * incremented -- it is up to the caller to take appropriate action.
2789  */
2790 static int
dtrace_speculation(dtrace_state_t * state)2791 dtrace_speculation(dtrace_state_t *state)
2792 {
2793 	int i = 0;
2794 	dtrace_speculation_state_t current;
2795 	uint32_t *stat = &state->dts_speculations_unavail, count;
2796 
2797 	while (i < state->dts_nspeculations) {
2798 		dtrace_speculation_t *spec = &state->dts_speculations[i];
2799 
2800 		current = spec->dtsp_state;
2801 
2802 		if (current != DTRACESPEC_INACTIVE) {
2803 			if (current == DTRACESPEC_COMMITTINGMANY ||
2804 			    current == DTRACESPEC_COMMITTING ||
2805 			    current == DTRACESPEC_DISCARDING)
2806 				stat = &state->dts_speculations_busy;
2807 			i++;
2808 			continue;
2809 		}
2810 
2811 		if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2812 		    current, DTRACESPEC_ACTIVE) == current)
2813 			return (i + 1);
2814 	}
2815 
2816 	/*
2817 	 * We couldn't find a speculation.  If we found as much as a single
2818 	 * busy speculation buffer, we'll attribute this failure as "busy"
2819 	 * instead of "unavail".
2820 	 */
2821 	do {
2822 		count = *stat;
2823 	} while (dtrace_cas32(stat, count, count + 1) != count);
2824 
2825 	return (0);
2826 }
2827 
2828 /*
2829  * This routine commits an active speculation.  If the specified speculation
2830  * is not in a valid state to perform a commit(), this routine will silently do
2831  * nothing.  The state of the specified speculation is transitioned according
2832  * to the state transition diagram outlined in <sys/dtrace_impl.h>
2833  */
2834 static void
dtrace_speculation_commit(dtrace_state_t * state,processorid_t cpu,dtrace_specid_t which)2835 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2836     dtrace_specid_t which)
2837 {
2838 	dtrace_speculation_t *spec;
2839 	dtrace_buffer_t *src, *dest;
2840 	uintptr_t daddr, saddr, dlimit, slimit;
2841 	dtrace_speculation_state_t current,  new = DTRACESPEC_INACTIVE;
2842 	intptr_t offs;
2843 	uint64_t timestamp;
2844 
2845 	if (which == 0)
2846 		return;
2847 
2848 	if (which > (dtrace_specid_t)state->dts_nspeculations) {
2849 		cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2850 		return;
2851 	}
2852 
2853 	spec = &state->dts_speculations[which - 1];
2854 	src = &spec->dtsp_buffer[cpu];
2855 	dest = &state->dts_buffer[cpu];
2856 
2857 	do {
2858 		current = spec->dtsp_state;
2859 
2860 		if (current == DTRACESPEC_COMMITTINGMANY)
2861 			break;
2862 
2863 		switch (current) {
2864 		case DTRACESPEC_INACTIVE:
2865 		case DTRACESPEC_DISCARDING:
2866 			return;
2867 
2868 		case DTRACESPEC_COMMITTING:
2869 			/*
2870 			 * This is only possible if we are (a) commit()'ing
2871 			 * without having done a prior speculate() on this CPU
2872 			 * and (b) racing with another commit() on a different
2873 			 * CPU.  There's nothing to do -- we just assert that
2874 			 * our offset is 0.
2875 			 */
2876 			ASSERT(src->dtb_offset == 0);
2877 			return;
2878 
2879 		case DTRACESPEC_ACTIVE:
2880 			new = DTRACESPEC_COMMITTING;
2881 			break;
2882 
2883 		case DTRACESPEC_ACTIVEONE:
2884 			/*
2885 			 * This speculation is active on one CPU.  If our
2886 			 * buffer offset is non-zero, we know that the one CPU
2887 			 * must be us.  Otherwise, we are committing on a
2888 			 * different CPU from the speculate(), and we must
2889 			 * rely on being asynchronously cleaned.
2890 			 */
2891 			if (src->dtb_offset != 0) {
2892 				new = DTRACESPEC_COMMITTING;
2893 				break;
2894 			}
2895 			OS_FALLTHROUGH;
2896 
2897 		case DTRACESPEC_ACTIVEMANY:
2898 			new = DTRACESPEC_COMMITTINGMANY;
2899 			break;
2900 
2901 		default:
2902 			ASSERT(0);
2903 		}
2904 	} while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2905 	    current, new) != current);
2906 
2907 	/*
2908 	 * We have set the state to indicate that we are committing this
2909 	 * speculation.  Now reserve the necessary space in the destination
2910 	 * buffer.
2911 	 */
2912 	if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2913 	    sizeof (uint64_t), state, NULL)) < 0) {
2914 		dtrace_buffer_drop(dest);
2915 		goto out;
2916 	}
2917 
2918 	/*
2919 	 * We have sufficient space to copy the speculative buffer into the
2920 	 * primary buffer.  First, modify the speculative buffer, filling
2921 	 * in the timestamp of all entries with the current time.  The data
2922 	 * must have the commit() time rather than the time it was traced,
2923 	 * so that all entries in the primary buffer are in timestamp order.
2924 	 */
2925 	timestamp = dtrace_gethrtime();
2926 	saddr = (uintptr_t)src->dtb_tomax;
2927 	slimit = saddr + src->dtb_offset;
2928 	while (saddr < slimit) {
2929 		size_t size;
2930 		dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr;
2931 
2932 		if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
2933 			saddr += sizeof (dtrace_epid_t);
2934 			continue;
2935 		}
2936 
2937 		ASSERT(dtrh->dtrh_epid <= ((dtrace_epid_t) state->dts_necbs));
2938 		size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size;
2939 
2940 		ASSERT(saddr + size <= slimit);
2941 		ASSERT(size >= sizeof(dtrace_rechdr_t));
2942 		ASSERT(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh) == UINT64_MAX);
2943 
2944 		DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp);
2945 
2946 		saddr += size;
2947 	}
2948 
2949 	/*
2950 	 * Copy the buffer across.  (Note that this is a
2951 	 * highly subobtimal bcopy(); in the unlikely event that this becomes
2952 	 * a serious performance issue, a high-performance DTrace-specific
2953 	 * bcopy() should obviously be invented.)
2954 	 */
2955 	daddr = (uintptr_t)dest->dtb_tomax + offs;
2956 	dlimit = daddr + src->dtb_offset;
2957 	saddr = (uintptr_t)src->dtb_tomax;
2958 
2959 	/*
2960 	 * First, the aligned portion.
2961 	 */
2962 	while (dlimit - daddr >= sizeof (uint64_t)) {
2963 		*((uint64_t *)daddr) = *((uint64_t *)saddr);
2964 
2965 		daddr += sizeof (uint64_t);
2966 		saddr += sizeof (uint64_t);
2967 	}
2968 
2969 	/*
2970 	 * Now any left-over bit...
2971 	 */
2972 	while (dlimit - daddr)
2973 		*((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2974 
2975 	/*
2976 	 * Finally, commit the reserved space in the destination buffer.
2977 	 */
2978 	dest->dtb_offset = offs + src->dtb_offset;
2979 
2980 out:
2981 	/*
2982 	 * If we're lucky enough to be the only active CPU on this speculation
2983 	 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2984 	 */
2985 	if (current == DTRACESPEC_ACTIVE ||
2986 	    (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2987 		uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2988 		    DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2989 #pragma unused(rval) /* __APPLE__ */
2990 
2991 		ASSERT(rval == DTRACESPEC_COMMITTING);
2992 	}
2993 
2994 	src->dtb_offset = 0;
2995 	src->dtb_xamot_drops += src->dtb_drops;
2996 	src->dtb_drops = 0;
2997 }
2998 
2999 /*
3000  * This routine discards an active speculation.  If the specified speculation
3001  * is not in a valid state to perform a discard(), this routine will silently
3002  * do nothing.  The state of the specified speculation is transitioned
3003  * according to the state transition diagram outlined in <sys/dtrace_impl.h>
3004  */
3005 __attribute__((noinline))
3006 static void
dtrace_speculation_discard(dtrace_state_t * state,processorid_t cpu,dtrace_specid_t which)3007 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
3008     dtrace_specid_t which)
3009 {
3010 	dtrace_speculation_t *spec;
3011 	dtrace_speculation_state_t current, new = DTRACESPEC_INACTIVE;
3012 	dtrace_buffer_t *buf;
3013 
3014 	if (which == 0)
3015 		return;
3016 
3017 	if (which > (dtrace_specid_t)state->dts_nspeculations) {
3018 		cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3019 		return;
3020 	}
3021 
3022 	spec = &state->dts_speculations[which - 1];
3023 	buf = &spec->dtsp_buffer[cpu];
3024 
3025 	do {
3026 		current = spec->dtsp_state;
3027 
3028 		switch (current) {
3029 		case DTRACESPEC_INACTIVE:
3030 		case DTRACESPEC_COMMITTINGMANY:
3031 		case DTRACESPEC_COMMITTING:
3032 		case DTRACESPEC_DISCARDING:
3033 			return;
3034 
3035 		case DTRACESPEC_ACTIVE:
3036 		case DTRACESPEC_ACTIVEMANY:
3037 			new = DTRACESPEC_DISCARDING;
3038 			break;
3039 
3040 		case DTRACESPEC_ACTIVEONE:
3041 			if (buf->dtb_offset != 0) {
3042 				new = DTRACESPEC_INACTIVE;
3043 			} else {
3044 				new = DTRACESPEC_DISCARDING;
3045 			}
3046 			break;
3047 
3048 		default:
3049 			ASSERT(0);
3050 		}
3051 	} while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3052 	    current, new) != current);
3053 
3054 	buf->dtb_offset = 0;
3055 	buf->dtb_drops = 0;
3056 }
3057 
3058 /*
3059  * Note:  not called from probe context.  This function is called
3060  * asynchronously from cross call context to clean any speculations that are
3061  * in the COMMITTINGMANY or DISCARDING states.  These speculations may not be
3062  * transitioned back to the INACTIVE state until all CPUs have cleaned the
3063  * speculation.
3064  */
3065 static void
dtrace_speculation_clean_here(dtrace_state_t * state)3066 dtrace_speculation_clean_here(dtrace_state_t *state)
3067 {
3068 	dtrace_icookie_t cookie;
3069 	processorid_t cpu = CPU->cpu_id;
3070 	dtrace_buffer_t *dest = &state->dts_buffer[cpu];
3071 	dtrace_specid_t i;
3072 
3073 	cookie = dtrace_interrupt_disable();
3074 
3075 	if (dest->dtb_tomax == NULL) {
3076 		dtrace_interrupt_enable(cookie);
3077 		return;
3078 	}
3079 
3080 	for (i = 0; i < (dtrace_specid_t)state->dts_nspeculations; i++) {
3081 		dtrace_speculation_t *spec = &state->dts_speculations[i];
3082 		dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
3083 
3084 		if (src->dtb_tomax == NULL)
3085 			continue;
3086 
3087 		if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
3088 			src->dtb_offset = 0;
3089 			continue;
3090 		}
3091 
3092 		if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
3093 			continue;
3094 
3095 		if (src->dtb_offset == 0)
3096 			continue;
3097 
3098 		dtrace_speculation_commit(state, cpu, i + 1);
3099 	}
3100 
3101 	dtrace_interrupt_enable(cookie);
3102 }
3103 
3104 /*
3105  * Note:  not called from probe context.  This function is called
3106  * asynchronously (and at a regular interval) to clean any speculations that
3107  * are in the COMMITTINGMANY or DISCARDING states.  If it discovers that there
3108  * is work to be done, it cross calls all CPUs to perform that work;
3109  * COMMITMANY and DISCARDING speculations may not be transitioned back to the
3110  * INACTIVE state until they have been cleaned by all CPUs.
3111  */
3112 static void
dtrace_speculation_clean(dtrace_state_t * state)3113 dtrace_speculation_clean(dtrace_state_t *state)
3114 {
3115 	int work = 0;
3116 	uint32_t rv;
3117 	dtrace_specid_t i;
3118 
3119 	for (i = 0; i < (dtrace_specid_t)state->dts_nspeculations; i++) {
3120 		dtrace_speculation_t *spec = &state->dts_speculations[i];
3121 
3122 		ASSERT(!spec->dtsp_cleaning);
3123 
3124 		if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
3125 		    spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
3126 			continue;
3127 
3128 		work++;
3129 		spec->dtsp_cleaning = 1;
3130 	}
3131 
3132 	if (!work)
3133 		return;
3134 
3135 	dtrace_xcall(DTRACE_CPUALL,
3136 	    (dtrace_xcall_t)dtrace_speculation_clean_here, state);
3137 
3138 	/*
3139 	 * We now know that all CPUs have committed or discarded their
3140 	 * speculation buffers, as appropriate.  We can now set the state
3141 	 * to inactive.
3142 	 */
3143 	for (i = 0; i < (dtrace_specid_t)state->dts_nspeculations; i++) {
3144 		dtrace_speculation_t *spec = &state->dts_speculations[i];
3145 		dtrace_speculation_state_t current, new;
3146 
3147 		if (!spec->dtsp_cleaning)
3148 			continue;
3149 
3150 		current = spec->dtsp_state;
3151 		ASSERT(current == DTRACESPEC_DISCARDING ||
3152 		    current == DTRACESPEC_COMMITTINGMANY);
3153 
3154 		new = DTRACESPEC_INACTIVE;
3155 
3156 		rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
3157 		ASSERT(rv == current);
3158 		spec->dtsp_cleaning = 0;
3159 	}
3160 }
3161 
3162 /*
3163  * Called as part of a speculate() to get the speculative buffer associated
3164  * with a given speculation.  Returns NULL if the specified speculation is not
3165  * in an ACTIVE state.  If the speculation is in the ACTIVEONE state -- and
3166  * the active CPU is not the specified CPU -- the speculation will be
3167  * atomically transitioned into the ACTIVEMANY state.
3168  */
3169 __attribute__((noinline))
3170 static dtrace_buffer_t *
dtrace_speculation_buffer(dtrace_state_t * state,processorid_t cpuid,dtrace_specid_t which)3171 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
3172     dtrace_specid_t which)
3173 {
3174 	dtrace_speculation_t *spec;
3175 	dtrace_speculation_state_t current, new = DTRACESPEC_INACTIVE;
3176 	dtrace_buffer_t *buf;
3177 
3178 	if (which == 0)
3179 		return (NULL);
3180 
3181 	if (which > (dtrace_specid_t)state->dts_nspeculations) {
3182 		cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3183 		return (NULL);
3184 	}
3185 
3186 	spec = &state->dts_speculations[which - 1];
3187 	buf = &spec->dtsp_buffer[cpuid];
3188 
3189 	do {
3190 		current = spec->dtsp_state;
3191 
3192 		switch (current) {
3193 		case DTRACESPEC_INACTIVE:
3194 		case DTRACESPEC_COMMITTINGMANY:
3195 		case DTRACESPEC_DISCARDING:
3196 			return (NULL);
3197 
3198 		case DTRACESPEC_COMMITTING:
3199 			ASSERT(buf->dtb_offset == 0);
3200 			return (NULL);
3201 
3202 		case DTRACESPEC_ACTIVEONE:
3203 			/*
3204 			 * This speculation is currently active on one CPU.
3205 			 * Check the offset in the buffer; if it's non-zero,
3206 			 * that CPU must be us (and we leave the state alone).
3207 			 * If it's zero, assume that we're starting on a new
3208 			 * CPU -- and change the state to indicate that the
3209 			 * speculation is active on more than one CPU.
3210 			 */
3211 			if (buf->dtb_offset != 0)
3212 				return (buf);
3213 
3214 			new = DTRACESPEC_ACTIVEMANY;
3215 			break;
3216 
3217 		case DTRACESPEC_ACTIVEMANY:
3218 			return (buf);
3219 
3220 		case DTRACESPEC_ACTIVE:
3221 			new = DTRACESPEC_ACTIVEONE;
3222 			break;
3223 
3224 		default:
3225 			ASSERT(0);
3226 		}
3227 	} while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3228 	    current, new) != current);
3229 
3230 	ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
3231 	return (buf);
3232 }
3233 
3234 /*
3235  * Return a string.  In the event that the user lacks the privilege to access
3236  * arbitrary kernel memory, we copy the string out to scratch memory so that we
3237  * don't fail access checking.
3238  *
3239  * dtrace_dif_variable() uses this routine as a helper for various
3240  * builtin values such as 'execname' and 'probefunc.'
3241  */
3242 static
3243 uintptr_t
dtrace_dif_varstr(uintptr_t addr,dtrace_state_t * state,dtrace_mstate_t * mstate)3244 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
3245     dtrace_mstate_t *mstate)
3246 {
3247 	uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3248 	uintptr_t ret;
3249 	size_t strsz;
3250 
3251 	/*
3252 	 * The easy case: this probe is allowed to read all of memory, so
3253 	 * we can just return this as a vanilla pointer.
3254 	 */
3255 	if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
3256 		return (addr);
3257 
3258 	/*
3259 	 * This is the tougher case: we copy the string in question from
3260 	 * kernel memory into scratch memory and return it that way: this
3261 	 * ensures that we won't trip up when access checking tests the
3262 	 * BYREF return value.
3263 	 */
3264 	strsz = dtrace_strlen((char *)addr, size) + 1;
3265 
3266 	if (mstate->dtms_scratch_ptr + strsz >
3267 	    mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3268 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3269 		return (0);
3270 	}
3271 
3272 	dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
3273 	    strsz);
3274 	ret = mstate->dtms_scratch_ptr;
3275 	mstate->dtms_scratch_ptr += strsz;
3276 	return (ret);
3277 }
3278 
3279 /*
3280  * This function implements the DIF emulator's variable lookups.  The emulator
3281  * passes a reserved variable identifier and optional built-in array index.
3282  */
3283 static uint64_t
dtrace_dif_variable(dtrace_mstate_t * mstate,dtrace_state_t * state,uint64_t v,uint64_t ndx)3284 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
3285     uint64_t ndx)
3286 {
3287 	/*
3288 	 * If we're accessing one of the uncached arguments, we'll turn this
3289 	 * into a reference in the args array.
3290 	 */
3291 	if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
3292 		ndx = v - DIF_VAR_ARG0;
3293 		v = DIF_VAR_ARGS;
3294 	}
3295 
3296 	switch (v) {
3297 	case DIF_VAR_ARGS:
3298 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
3299 		if (ndx >= sizeof (mstate->dtms_arg) /
3300 		    sizeof (mstate->dtms_arg[0])) {
3301 			int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3302 			dtrace_vstate_t *vstate = &state->dts_vstate;
3303 			dtrace_provider_t *pv;
3304 			uint64_t val;
3305 
3306 			pv = mstate->dtms_probe->dtpr_provider;
3307 			if (pv->dtpv_pops.dtps_getargval != NULL)
3308 				val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
3309 				    mstate->dtms_probe->dtpr_id,
3310 				    mstate->dtms_probe->dtpr_arg, ndx, aframes);
3311 			/* Special case access of arg5 as passed to dtrace_probe_error() (which see.) */
3312 			else if (mstate->dtms_probe->dtpr_id == dtrace_probeid_error && ndx == 5) {
3313 			        return ((dtrace_state_t *)(uintptr_t)(mstate->dtms_arg[0]))->dts_arg_error_illval;
3314 			}
3315 
3316 			else
3317 				val = dtrace_getarg(ndx, aframes, mstate, vstate);
3318 
3319 			/*
3320 			 * This is regrettably required to keep the compiler
3321 			 * from tail-optimizing the call to dtrace_getarg().
3322 			 * The condition always evaluates to true, but the
3323 			 * compiler has no way of figuring that out a priori.
3324 			 * (None of this would be necessary if the compiler
3325 			 * could be relied upon to _always_ tail-optimize
3326 			 * the call to dtrace_getarg() -- but it can't.)
3327 			 */
3328 			if (mstate->dtms_probe != NULL)
3329 				return (val);
3330 
3331 			ASSERT(0);
3332 		}
3333 
3334 		return (mstate->dtms_arg[ndx]);
3335 
3336 	case DIF_VAR_UREGS: {
3337 		thread_t thread;
3338 
3339 		if (!dtrace_priv_proc(state))
3340 			return (0);
3341 
3342 		if ((thread = current_thread()) == NULL) {
3343 			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3344 			cpu_core[CPU->cpu_id].cpuc_dtrace_illval = 0;
3345 			return (0);
3346 		}
3347 
3348 		return (dtrace_getreg(find_user_regs(thread), ndx));
3349 	}
3350 
3351 	case DIF_VAR_VMREGS: {
3352 		uint64_t rval;
3353 
3354 		if (!dtrace_priv_kernel(state))
3355 			return (0);
3356 
3357 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3358 
3359 		rval = dtrace_getvmreg(ndx);
3360 
3361 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3362 
3363 		return (rval);
3364 	}
3365 
3366 	case DIF_VAR_CURTHREAD:
3367 		if (!dtrace_priv_kernel(state))
3368 			return (0);
3369 
3370 		return ((uint64_t)(uintptr_t)current_thread());
3371 
3372 	case DIF_VAR_TIMESTAMP:
3373 		if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
3374 			mstate->dtms_timestamp = dtrace_gethrtime();
3375 			mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
3376 		}
3377 		return (mstate->dtms_timestamp);
3378 
3379 	case DIF_VAR_VTIMESTAMP:
3380 		ASSERT(dtrace_vtime_references != 0);
3381 		return (dtrace_get_thread_vtime(current_thread()));
3382 
3383 	case DIF_VAR_WALLTIMESTAMP:
3384 		if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
3385 			mstate->dtms_walltimestamp = dtrace_gethrestime();
3386 			mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
3387 		}
3388 		return (mstate->dtms_walltimestamp);
3389 
3390 	case DIF_VAR_MACHTIMESTAMP:
3391 		if (!(mstate->dtms_present & DTRACE_MSTATE_MACHTIMESTAMP)) {
3392 			mstate->dtms_machtimestamp = mach_absolute_time();
3393 			mstate->dtms_present |= DTRACE_MSTATE_MACHTIMESTAMP;
3394 		}
3395 		return (mstate->dtms_machtimestamp);
3396 
3397 	case DIF_VAR_MACHCTIMESTAMP:
3398 		if (!(mstate->dtms_present & DTRACE_MSTATE_MACHCTIMESTAMP)) {
3399 			mstate->dtms_machctimestamp = mach_continuous_time();
3400 			mstate->dtms_present |= DTRACE_MSTATE_MACHCTIMESTAMP;
3401 		}
3402 		return (mstate->dtms_machctimestamp);
3403 
3404 
3405 	case DIF_VAR_CPU:
3406 		return ((uint64_t) dtrace_get_thread_last_cpu_id(current_thread()));
3407 
3408 	case DIF_VAR_IPL:
3409 		if (!dtrace_priv_kernel(state))
3410 			return (0);
3411 		if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
3412 			mstate->dtms_ipl = dtrace_getipl();
3413 			mstate->dtms_present |= DTRACE_MSTATE_IPL;
3414 		}
3415 		return (mstate->dtms_ipl);
3416 
3417 	case DIF_VAR_EPID:
3418 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
3419 		return (mstate->dtms_epid);
3420 
3421 	case DIF_VAR_ID:
3422 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3423 		return (mstate->dtms_probe->dtpr_id);
3424 
3425 	case DIF_VAR_STACKDEPTH:
3426 		if (!dtrace_priv_kernel(state))
3427 			return (0);
3428 		if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
3429 			int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3430 
3431 			mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
3432 			mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
3433 		}
3434 		return (mstate->dtms_stackdepth);
3435 
3436 	case DIF_VAR_USTACKDEPTH:
3437 		if (!dtrace_priv_proc(state))
3438 			return (0);
3439 		if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
3440 			/*
3441 			 * See comment in DIF_VAR_PID.
3442 			 */
3443 			if (DTRACE_ANCHORED(mstate->dtms_probe) &&
3444 			    CPU_ON_INTR(CPU)) {
3445 				mstate->dtms_ustackdepth = 0;
3446 			} else {
3447 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3448 				mstate->dtms_ustackdepth =
3449 				    dtrace_getustackdepth();
3450 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3451 			}
3452 			mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
3453 		}
3454 		return (mstate->dtms_ustackdepth);
3455 
3456 	case DIF_VAR_CALLER:
3457 		if (!dtrace_priv_kernel(state))
3458 			return (0);
3459 		if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
3460 			int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3461 
3462 			if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
3463 				/*
3464 				 * If this is an unanchored probe, we are
3465 				 * required to go through the slow path:
3466 				 * dtrace_caller() only guarantees correct
3467 				 * results for anchored probes.
3468 				 */
3469 				pc_t caller[2];
3470 
3471 				dtrace_getpcstack(caller, 2, aframes,
3472 				    (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
3473 				mstate->dtms_caller = caller[1];
3474 			} else if ((mstate->dtms_caller =
3475 				dtrace_caller(aframes)) == (uintptr_t)-1) {
3476 				/*
3477 				 * We have failed to do this the quick way;
3478 				 * we must resort to the slower approach of
3479 				 * calling dtrace_getpcstack().
3480 				 */
3481 				pc_t caller;
3482 
3483 				dtrace_getpcstack(&caller, 1, aframes, NULL);
3484 				mstate->dtms_caller = caller;
3485 			}
3486 
3487 			mstate->dtms_present |= DTRACE_MSTATE_CALLER;
3488 		}
3489 		return (mstate->dtms_caller);
3490 
3491 	case DIF_VAR_UCALLER:
3492 		if (!dtrace_priv_proc(state))
3493 			return (0);
3494 
3495 		if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
3496 			uint64_t ustack[3];
3497 
3498 			/*
3499 			 * dtrace_getupcstack() fills in the first uint64_t
3500 			 * with the current PID.  The second uint64_t will
3501 			 * be the program counter at user-level.  The third
3502 			 * uint64_t will contain the caller, which is what
3503 			 * we're after.
3504 			 */
3505 			ustack[2] = 0;
3506 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3507 			dtrace_getupcstack(ustack, 3);
3508 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3509 			mstate->dtms_ucaller = ustack[2];
3510 			mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
3511 		}
3512 
3513 		return (mstate->dtms_ucaller);
3514 
3515 	case DIF_VAR_PROBEPROV:
3516 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3517 		return (dtrace_dif_varstr(
3518 		    (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
3519 		    state, mstate));
3520 
3521 	case DIF_VAR_PROBEMOD:
3522 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3523 		return (dtrace_dif_varstr(
3524 		    (uintptr_t)mstate->dtms_probe->dtpr_mod,
3525 		    state, mstate));
3526 
3527 	case DIF_VAR_PROBEFUNC:
3528 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3529 		return (dtrace_dif_varstr(
3530 		    (uintptr_t)mstate->dtms_probe->dtpr_func,
3531 		    state, mstate));
3532 
3533 	case DIF_VAR_PROBENAME:
3534 		ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3535 		return (dtrace_dif_varstr(
3536 		    (uintptr_t)mstate->dtms_probe->dtpr_name,
3537 		    state, mstate));
3538 
3539 	case DIF_VAR_PID:
3540 		if (!dtrace_priv_proc_relaxed(state))
3541 			return (0);
3542 
3543 		/*
3544 		 * Note that we are assuming that an unanchored probe is
3545 		 * always due to a high-level interrupt.  (And we're assuming
3546 		 * that there is only a single high level interrupt.)
3547 		 */
3548 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3549 			/* Anchored probe that fires while on an interrupt accrues to process 0 */
3550 			return 0;
3551 
3552 		return ((uint64_t)dtrace_proc_selfpid());
3553 
3554 	case DIF_VAR_PPID:
3555 		if (!dtrace_priv_proc_relaxed(state))
3556 			return (0);
3557 
3558 		/*
3559 		 * See comment in DIF_VAR_PID.
3560 		 */
3561 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3562 			return (0);
3563 
3564 		return ((uint64_t)dtrace_proc_selfppid());
3565 
3566 	case DIF_VAR_TID:
3567 		/* We do not need to check for null current_thread() */
3568 		return thread_tid(current_thread()); /* globally unique */
3569 
3570 	case DIF_VAR_PTHREAD_SELF:
3571 		if (!dtrace_priv_proc(state))
3572 			return (0);
3573 
3574 		/* Not currently supported, but we should be able to delta the dispatchqaddr and dispatchqoffset to get pthread_self */
3575 		return 0;
3576 
3577 	case DIF_VAR_DISPATCHQADDR:
3578 		if (!dtrace_priv_proc(state))
3579 			return (0);
3580 
3581 		/* We do not need to check for null current_thread() */
3582 		return thread_dispatchqaddr(current_thread());
3583 
3584 	case DIF_VAR_EXECNAME:
3585 	{
3586 		char *xname = (char *)mstate->dtms_scratch_ptr;
3587 		char *pname = proc_best_name(curproc);
3588 		size_t scratch_size = sizeof(proc_name_t);
3589 
3590 		/* The scratch allocation's lifetime is that of the clause. */
3591 		if (!DTRACE_INSCRATCH(mstate, scratch_size)) {
3592 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3593 			return 0;
3594 		}
3595 
3596 		if (!dtrace_priv_proc_relaxed(state))
3597 			return (0);
3598 
3599 		mstate->dtms_scratch_ptr += scratch_size;
3600 		strlcpy(xname, pname, scratch_size);
3601 
3602 		return ((uint64_t)(uintptr_t)xname);
3603 	}
3604 
3605 
3606 	case DIF_VAR_ZONENAME:
3607 	{
3608 		/* scratch_size is equal to length('global') + 1 for the null-terminator. */
3609 		char *zname = (char *)mstate->dtms_scratch_ptr;
3610 		size_t scratch_size = 6 + 1;
3611 
3612 		if (!dtrace_priv_proc(state))
3613 			return (0);
3614 
3615 		/* The scratch allocation's lifetime is that of the clause. */
3616 		if (!DTRACE_INSCRATCH(mstate, scratch_size)) {
3617 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3618 			return 0;
3619 		}
3620 
3621 		mstate->dtms_scratch_ptr += scratch_size;
3622 
3623 		/* The kernel does not provide zonename, it will always return 'global'. */
3624 		strlcpy(zname, "global", scratch_size);
3625 
3626 		return ((uint64_t)(uintptr_t)zname);
3627 	}
3628 
3629 #if CONFIG_PERVASIVE_CPI && MONOTONIC
3630 	case DIF_VAR_CPUINSTRS:
3631 		return mt_cur_cpu_instrs();
3632 
3633 	case DIF_VAR_CPUCYCLES:
3634 		return mt_cur_cpu_cycles();
3635 
3636 	case DIF_VAR_VINSTRS: {
3637         struct recount_usage usage = { 0 };
3638         recount_current_thread_usage(&usage);
3639         return usage.ru_instructions;
3640     }
3641 
3642 	case DIF_VAR_VCYCLES: {
3643         struct recount_usage usage = { 0 };
3644         recount_current_thread_usage(&usage);
3645         return usage.ru_cycles;
3646     }
3647 #else /* CONFIG_PERVASIVE_CPI && MONOTONIC */
3648 	case DIF_VAR_CPUINSTRS: /* FALLTHROUGH */
3649 	case DIF_VAR_CPUCYCLES: /* FALLTHROUGH */
3650 	case DIF_VAR_VINSTRS: /* FALLTHROUGH */
3651 	case DIF_VAR_VCYCLES: /* FALLTHROUGH */
3652 		return 0;
3653 #endif /* !CONFIG_PERVASIVE_CPI || !MONOTONIC */
3654 
3655 	case DIF_VAR_UID:
3656 		if (!dtrace_priv_proc_relaxed(state))
3657 			return (0);
3658 
3659 		/*
3660 		 * See comment in DIF_VAR_PID.
3661 		 */
3662 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3663 			return (0);
3664 
3665 		return ((uint64_t) dtrace_proc_selfruid());
3666 
3667 	case DIF_VAR_GID:
3668 		if (!dtrace_priv_proc(state))
3669 			return (0);
3670 
3671 		/*
3672 		 * See comment in DIF_VAR_PID.
3673 		 */
3674 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3675 			return (0);
3676 
3677 		if (dtrace_CRED() != NULL)
3678 			/* Credential does not require lazy initialization. */
3679 			return ((uint64_t)kauth_getgid());
3680 		else {
3681 			/* proc_lock would be taken under kauth_cred_proc_ref() in kauth_cred_get(). */
3682 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3683 			return -1ULL;
3684 		}
3685 
3686 	case DIF_VAR_ERRNO: {
3687 		uthread_t uthread = current_uthread();
3688 		if (!dtrace_priv_proc(state))
3689 			return (0);
3690 
3691 		/*
3692 		 * See comment in DIF_VAR_PID.
3693 		 */
3694 		if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3695 			return (0);
3696 
3697 		if (uthread)
3698 			return (uint64_t)uthread->t_dtrace_errno;
3699 		else {
3700 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3701 			return -1ULL;
3702 		}
3703 	}
3704 
3705 	default:
3706 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3707 		return (0);
3708 	}
3709 }
3710 
3711 typedef enum dtrace_json_state {
3712 	DTRACE_JSON_REST = 1,
3713 	DTRACE_JSON_OBJECT,
3714 	DTRACE_JSON_STRING,
3715 	DTRACE_JSON_STRING_ESCAPE,
3716 	DTRACE_JSON_STRING_ESCAPE_UNICODE,
3717 	DTRACE_JSON_COLON,
3718 	DTRACE_JSON_COMMA,
3719 	DTRACE_JSON_VALUE,
3720 	DTRACE_JSON_IDENTIFIER,
3721 	DTRACE_JSON_NUMBER,
3722 	DTRACE_JSON_NUMBER_FRAC,
3723 	DTRACE_JSON_NUMBER_EXP,
3724 	DTRACE_JSON_COLLECT_OBJECT
3725 } dtrace_json_state_t;
3726 
3727 /*
3728  * This function possesses just enough knowledge about JSON to extract a single
3729  * value from a JSON string and store it in the scratch buffer.  It is able
3730  * to extract nested object values, and members of arrays by index.
3731  *
3732  * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to
3733  * be looked up as we descend into the object tree.  e.g.
3734  *
3735  *    foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL
3736  *       with nelems = 5.
3737  *
3738  * The run time of this function must be bounded above by strsize to limit the
3739  * amount of work done in probe context.  As such, it is implemented as a
3740  * simple state machine, reading one character at a time using safe loads
3741  * until we find the requested element, hit a parsing error or run off the
3742  * end of the object or string.
3743  *
3744  * As there is no way for a subroutine to return an error without interrupting
3745  * clause execution, we simply return NULL in the event of a missing key or any
3746  * other error condition.  Each NULL return in this function is commented with
3747  * the error condition it represents -- parsing or otherwise.
3748  *
3749  * The set of states for the state machine closely matches the JSON
3750  * specification (http://json.org/).  Briefly:
3751  *
3752  *   DTRACE_JSON_REST:
3753  *     Skip whitespace until we find either a top-level Object, moving
3754  *     to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE.
3755  *
3756  *   DTRACE_JSON_OBJECT:
3757  *     Locate the next key String in an Object.  Sets a flag to denote
3758  *     the next String as a key string and moves to DTRACE_JSON_STRING.
3759  *
3760  *   DTRACE_JSON_COLON:
3761  *     Skip whitespace until we find the colon that separates key Strings
3762  *     from their values.  Once found, move to DTRACE_JSON_VALUE.
3763  *
3764  *   DTRACE_JSON_VALUE:
3765  *     Detects the type of the next value (String, Number, Identifier, Object
3766  *     or Array) and routes to the states that process that type.  Here we also
3767  *     deal with the element selector list if we are requested to traverse down
3768  *     into the object tree.
3769  *
3770  *   DTRACE_JSON_COMMA:
3771  *     Skip whitespace until we find the comma that separates key-value pairs
3772  *     in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays
3773  *     (similarly DTRACE_JSON_VALUE).  All following literal value processing
3774  *     states return to this state at the end of their value, unless otherwise
3775  *     noted.
3776  *
3777  *   DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP:
3778  *     Processes a Number literal from the JSON, including any exponent
3779  *     component that may be present.  Numbers are returned as strings, which
3780  *     may be passed to strtoll() if an integer is required.
3781  *
3782  *   DTRACE_JSON_IDENTIFIER:
3783  *     Processes a "true", "false" or "null" literal in the JSON.
3784  *
3785  *   DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE,
3786  *   DTRACE_JSON_STRING_ESCAPE_UNICODE:
3787  *     Processes a String literal from the JSON, whether the String denotes
3788  *     a key, a value or part of a larger Object.  Handles all escape sequences
3789  *     present in the specification, including four-digit unicode characters,
3790  *     but merely includes the escape sequence without converting it to the
3791  *     actual escaped character.  If the String is flagged as a key, we
3792  *     move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA.
3793  *
3794  *   DTRACE_JSON_COLLECT_OBJECT:
3795  *     This state collects an entire Object (or Array), correctly handling
3796  *     embedded strings.  If the full element selector list matches this nested
3797  *     object, we return the Object in full as a string.  If not, we use this
3798  *     state to skip to the next value at this level and continue processing.
3799  */
3800 static char *
dtrace_json(uint64_t size,uintptr_t json,char * elemlist,int nelems,char * dest)3801 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems,
3802     char *dest)
3803 {
3804 	dtrace_json_state_t state = DTRACE_JSON_REST;
3805 	int64_t array_elem = INT64_MIN;
3806 	int64_t array_pos = 0;
3807 	uint8_t escape_unicount = 0;
3808 	boolean_t string_is_key = B_FALSE;
3809 	boolean_t collect_object = B_FALSE;
3810 	boolean_t found_key = B_FALSE;
3811 	boolean_t in_array = B_FALSE;
3812 	uint32_t braces = 0, brackets = 0;
3813 	char *elem = elemlist;
3814 	char *dd = dest;
3815 	uintptr_t cur;
3816 
3817 	for (cur = json; cur < json + size; cur++) {
3818 		char cc = dtrace_load8(cur);
3819 		if (cc == '\0')
3820 			return (NULL);
3821 
3822 		switch (state) {
3823 		case DTRACE_JSON_REST:
3824 			if (isspace(cc))
3825 				break;
3826 
3827 			if (cc == '{') {
3828 				state = DTRACE_JSON_OBJECT;
3829 				break;
3830 			}
3831 
3832 			if (cc == '[') {
3833 				in_array = B_TRUE;
3834 				array_pos = 0;
3835 				array_elem = dtrace_strtoll(elem, 10, size);
3836 				found_key = array_elem == 0 ? B_TRUE : B_FALSE;
3837 				state = DTRACE_JSON_VALUE;
3838 				break;
3839 			}
3840 
3841 			/*
3842 			 * ERROR: expected to find a top-level object or array.
3843 			 */
3844 			return (NULL);
3845 		case DTRACE_JSON_OBJECT:
3846 			if (isspace(cc))
3847 				break;
3848 
3849 			if (cc == '"') {
3850 				state = DTRACE_JSON_STRING;
3851 				string_is_key = B_TRUE;
3852 				break;
3853 			}
3854 
3855 			/*
3856 			 * ERROR: either the object did not start with a key
3857 			 * string, or we've run off the end of the object
3858 			 * without finding the requested key.
3859 			 */
3860 			return (NULL);
3861 		case DTRACE_JSON_STRING:
3862 			if (cc == '\\') {
3863 				*dd++ = '\\';
3864 				state = DTRACE_JSON_STRING_ESCAPE;
3865 				break;
3866 			}
3867 
3868 			if (cc == '"') {
3869 				if (collect_object) {
3870 					/*
3871 					 * We don't reset the dest here, as
3872 					 * the string is part of a larger
3873 					 * object being collected.
3874 					 */
3875 					*dd++ = cc;
3876 					collect_object = B_FALSE;
3877 					state = DTRACE_JSON_COLLECT_OBJECT;
3878 					break;
3879 				}
3880 				*dd = '\0';
3881 				dd = dest; /* reset string buffer */
3882 				if (string_is_key) {
3883 					if (dtrace_strncmp(dest, elem,
3884 					    size) == 0)
3885 						found_key = B_TRUE;
3886 				} else if (found_key) {
3887 					if (nelems > 1) {
3888 						/*
3889 						 * We expected an object, not
3890 						 * this string.
3891 						 */
3892 						return (NULL);
3893 					}
3894 					return (dest);
3895 				}
3896 				state = string_is_key ? DTRACE_JSON_COLON :
3897 				    DTRACE_JSON_COMMA;
3898 				string_is_key = B_FALSE;
3899 				break;
3900 			}
3901 
3902 			*dd++ = cc;
3903 			break;
3904 		case DTRACE_JSON_STRING_ESCAPE:
3905 			*dd++ = cc;
3906 			if (cc == 'u') {
3907 				escape_unicount = 0;
3908 				state = DTRACE_JSON_STRING_ESCAPE_UNICODE;
3909 			} else {
3910 				state = DTRACE_JSON_STRING;
3911 			}
3912 			break;
3913 		case DTRACE_JSON_STRING_ESCAPE_UNICODE:
3914 			if (!isxdigit(cc)) {
3915 				/*
3916 				 * ERROR: invalid unicode escape, expected
3917 				 * four valid hexidecimal digits.
3918 				 */
3919 				return (NULL);
3920 			}
3921 
3922 			*dd++ = cc;
3923 			if (++escape_unicount == 4)
3924 				state = DTRACE_JSON_STRING;
3925 			break;
3926 		case DTRACE_JSON_COLON:
3927 			if (isspace(cc))
3928 				break;
3929 
3930 			if (cc == ':') {
3931 				state = DTRACE_JSON_VALUE;
3932 				break;
3933 			}
3934 
3935 			/*
3936 			 * ERROR: expected a colon.
3937 			 */
3938 			return (NULL);
3939 		case DTRACE_JSON_COMMA:
3940 			if (isspace(cc))
3941 				break;
3942 
3943 			if (cc == ',') {
3944 				if (in_array) {
3945 					state = DTRACE_JSON_VALUE;
3946 					if (++array_pos == array_elem)
3947 						found_key = B_TRUE;
3948 				} else {
3949 					state = DTRACE_JSON_OBJECT;
3950 				}
3951 				break;
3952 			}
3953 
3954 			/*
3955 			 * ERROR: either we hit an unexpected character, or
3956 			 * we reached the end of the object or array without
3957 			 * finding the requested key.
3958 			 */
3959 			return (NULL);
3960 		case DTRACE_JSON_IDENTIFIER:
3961 			if (islower(cc)) {
3962 				*dd++ = cc;
3963 				break;
3964 			}
3965 
3966 			*dd = '\0';
3967 			dd = dest; /* reset string buffer */
3968 
3969 			if (dtrace_strncmp(dest, "true", 5) == 0 ||
3970 			    dtrace_strncmp(dest, "false", 6) == 0 ||
3971 			    dtrace_strncmp(dest, "null", 5) == 0) {
3972 				if (found_key) {
3973 					if (nelems > 1) {
3974 						/*
3975 						 * ERROR: We expected an object,
3976 						 * not this identifier.
3977 						 */
3978 						return (NULL);
3979 					}
3980 					return (dest);
3981 				} else {
3982 					cur--;
3983 					state = DTRACE_JSON_COMMA;
3984 					break;
3985 				}
3986 			}
3987 
3988 			/*
3989 			 * ERROR: we did not recognise the identifier as one
3990 			 * of those in the JSON specification.
3991 			 */
3992 			return (NULL);
3993 		case DTRACE_JSON_NUMBER:
3994 			if (cc == '.') {
3995 				*dd++ = cc;
3996 				state = DTRACE_JSON_NUMBER_FRAC;
3997 				break;
3998 			}
3999 
4000 			if (cc == 'x' || cc == 'X') {
4001 				/*
4002 				 * ERROR: specification explicitly excludes
4003 				 * hexidecimal or octal numbers.
4004 				 */
4005 				return (NULL);
4006 			}
4007 
4008 			OS_FALLTHROUGH;
4009 		case DTRACE_JSON_NUMBER_FRAC:
4010 			if (cc == 'e' || cc == 'E') {
4011 				*dd++ = cc;
4012 				state = DTRACE_JSON_NUMBER_EXP;
4013 				break;
4014 			}
4015 
4016 			if (cc == '+' || cc == '-') {
4017 				/*
4018 				 * ERROR: expect sign as part of exponent only.
4019 				 */
4020 				return (NULL);
4021 			}
4022 			OS_FALLTHROUGH;
4023 		case DTRACE_JSON_NUMBER_EXP:
4024 			if (isdigit(cc) || cc == '+' || cc == '-') {
4025 				*dd++ = cc;
4026 				break;
4027 			}
4028 
4029 			*dd = '\0';
4030 			dd = dest; /* reset string buffer */
4031 			if (found_key) {
4032 				if (nelems > 1) {
4033 					/*
4034 					 * ERROR: We expected an object, not
4035 					 * this number.
4036 					 */
4037 					return (NULL);
4038 				}
4039 				return (dest);
4040 			}
4041 
4042 			cur--;
4043 			state = DTRACE_JSON_COMMA;
4044 			break;
4045 		case DTRACE_JSON_VALUE:
4046 			if (isspace(cc))
4047 				break;
4048 
4049 			if (cc == '{' || cc == '[') {
4050 				if (nelems > 1 && found_key) {
4051 					in_array = cc == '[' ? B_TRUE : B_FALSE;
4052 					/*
4053 					 * If our element selector directs us
4054 					 * to descend into this nested object,
4055 					 * then move to the next selector
4056 					 * element in the list and restart the
4057 					 * state machine.
4058 					 */
4059 					while (*elem != '\0')
4060 						elem++;
4061 					elem++; /* skip the inter-element NUL */
4062 					nelems--;
4063 					dd = dest;
4064 					if (in_array) {
4065 						state = DTRACE_JSON_VALUE;
4066 						array_pos = 0;
4067 						array_elem = dtrace_strtoll(
4068 						    elem, 10, size);
4069 						found_key = array_elem == 0 ?
4070 						    B_TRUE : B_FALSE;
4071 					} else {
4072 						found_key = B_FALSE;
4073 						state = DTRACE_JSON_OBJECT;
4074 					}
4075 					break;
4076 				}
4077 
4078 				/*
4079 				 * Otherwise, we wish to either skip this
4080 				 * nested object or return it in full.
4081 				 */
4082 				if (cc == '[')
4083 					brackets = 1;
4084 				else
4085 					braces = 1;
4086 				*dd++ = cc;
4087 				state = DTRACE_JSON_COLLECT_OBJECT;
4088 				break;
4089 			}
4090 
4091 			if (cc == '"') {
4092 				state = DTRACE_JSON_STRING;
4093 				break;
4094 			}
4095 
4096 			if (islower(cc)) {
4097 				/*
4098 				 * Here we deal with true, false and null.
4099 				 */
4100 				*dd++ = cc;
4101 				state = DTRACE_JSON_IDENTIFIER;
4102 				break;
4103 			}
4104 
4105 			if (cc == '-' || isdigit(cc)) {
4106 				*dd++ = cc;
4107 				state = DTRACE_JSON_NUMBER;
4108 				break;
4109 			}
4110 
4111 			/*
4112 			 * ERROR: unexpected character at start of value.
4113 			 */
4114 			return (NULL);
4115 		case DTRACE_JSON_COLLECT_OBJECT:
4116 			if (cc == '\0')
4117 				/*
4118 				 * ERROR: unexpected end of input.
4119 				 */
4120 				return (NULL);
4121 
4122 			*dd++ = cc;
4123 			if (cc == '"') {
4124 				collect_object = B_TRUE;
4125 				state = DTRACE_JSON_STRING;
4126 				break;
4127 			}
4128 
4129 			if (cc == ']') {
4130 				if (brackets-- == 0) {
4131 					/*
4132 					 * ERROR: unbalanced brackets.
4133 					 */
4134 					return (NULL);
4135 				}
4136 			} else if (cc == '}') {
4137 				if (braces-- == 0) {
4138 					/*
4139 					 * ERROR: unbalanced braces.
4140 					 */
4141 					return (NULL);
4142 				}
4143 			} else if (cc == '{') {
4144 				braces++;
4145 			} else if (cc == '[') {
4146 				brackets++;
4147 			}
4148 
4149 			if (brackets == 0 && braces == 0) {
4150 				if (found_key) {
4151 					*dd = '\0';
4152 					return (dest);
4153 				}
4154 				dd = dest; /* reset string buffer */
4155 				state = DTRACE_JSON_COMMA;
4156 			}
4157 			break;
4158 		}
4159 	}
4160 	return (NULL);
4161 }
4162 
4163 /*
4164  * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
4165  * Notice that we don't bother validating the proper number of arguments or
4166  * their types in the tuple stack.  This isn't needed because all argument
4167  * interpretation is safe because of our load safety -- the worst that can
4168  * happen is that a bogus program can obtain bogus results.
4169  */
4170 static void
dtrace_dif_subr(uint_t subr,uint_t rd,uint64_t * regs,dtrace_key_t * tupregs,int nargs,dtrace_mstate_t * mstate,dtrace_state_t * state)4171 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
4172     dtrace_key_t *tupregs, int nargs,
4173     dtrace_mstate_t *mstate, dtrace_state_t *state)
4174 {
4175 	volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
4176 	volatile uint64_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
4177 	dtrace_vstate_t *vstate = &state->dts_vstate;
4178 
4179 #if !defined(__APPLE__)
4180 	union {
4181 		mutex_impl_t mi;
4182 		uint64_t mx;
4183 	} m;
4184 
4185 	union {
4186 		krwlock_t ri;
4187 		uintptr_t rw;
4188 	} r;
4189 #else
4190 /* FIXME: awaits lock/mutex work */
4191 #endif /* __APPLE__ */
4192 
4193 	switch (subr) {
4194 	case DIF_SUBR_RAND:
4195 		regs[rd] = dtrace_xoroshiro128_plus_next(
4196 		    state->dts_rstate[CPU->cpu_id]);
4197 		break;
4198 
4199 #if !defined(__APPLE__)
4200 	case DIF_SUBR_MUTEX_OWNED:
4201 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4202 		    mstate, vstate)) {
4203 			regs[rd] = 0;
4204 			break;
4205 		}
4206 
4207 		m.mx = dtrace_load64(tupregs[0].dttk_value);
4208 		if (MUTEX_TYPE_ADAPTIVE(&m.mi))
4209 			regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
4210 		else
4211 			regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
4212 		break;
4213 
4214 	case DIF_SUBR_MUTEX_OWNER:
4215 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4216 		    mstate, vstate)) {
4217 			regs[rd] = 0;
4218 			break;
4219 		}
4220 
4221 		m.mx = dtrace_load64(tupregs[0].dttk_value);
4222 		if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
4223 		    MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
4224 			regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
4225 		else
4226 			regs[rd] = 0;
4227 		break;
4228 
4229 	case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
4230 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4231 		    mstate, vstate)) {
4232 			regs[rd] = 0;
4233 			break;
4234 		}
4235 
4236 		m.mx = dtrace_load64(tupregs[0].dttk_value);
4237 		regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
4238 		break;
4239 
4240 	case DIF_SUBR_MUTEX_TYPE_SPIN:
4241 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4242 		    mstate, vstate)) {
4243 			regs[rd] = 0;
4244 			break;
4245 		}
4246 
4247 		m.mx = dtrace_load64(tupregs[0].dttk_value);
4248 		regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
4249 		break;
4250 
4251 	case DIF_SUBR_RW_READ_HELD: {
4252 		uintptr_t tmp;
4253 
4254 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4255 		    mstate, vstate)) {
4256 			regs[rd] = 0;
4257 			break;
4258 		}
4259 
4260 		r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4261 		regs[rd] = _RW_READ_HELD(&r.ri, tmp);
4262 		break;
4263 	}
4264 
4265 	case DIF_SUBR_RW_WRITE_HELD:
4266 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4267 		    mstate, vstate)) {
4268 			regs[rd] = 0;
4269 			break;
4270 		}
4271 
4272 		r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4273 		regs[rd] = _RW_WRITE_HELD(&r.ri);
4274 		break;
4275 
4276 	case DIF_SUBR_RW_ISWRITER:
4277 		if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4278 		    mstate, vstate)) {
4279 			regs[rd] = 0;
4280 			break;
4281 		}
4282 
4283 		r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4284 		regs[rd] = _RW_ISWRITER(&r.ri);
4285 		break;
4286 #else
4287 /* FIXME: awaits lock/mutex work */
4288 #endif /* __APPLE__ */
4289 
4290 	case DIF_SUBR_BCOPY: {
4291 		/*
4292 		 * We need to be sure that the destination is in the scratch
4293 		 * region -- no other region is allowed.
4294 		 */
4295 		uintptr_t src = tupregs[0].dttk_value;
4296 		uintptr_t dest = tupregs[1].dttk_value;
4297 		size_t size = tupregs[2].dttk_value;
4298 
4299 		if (!dtrace_inscratch(dest, size, mstate)) {
4300 			*flags |= CPU_DTRACE_BADADDR;
4301 			*illval = regs[rd];
4302 			break;
4303 		}
4304 
4305 		if (!dtrace_canload(src, size, mstate, vstate)) {
4306 			regs[rd] = 0;
4307 			break;
4308 		}
4309 
4310 		dtrace_bcopy((void *)src, (void *)dest, size);
4311 		break;
4312 	}
4313 
4314 	case DIF_SUBR_ALLOCA:
4315 	case DIF_SUBR_COPYIN: {
4316 		uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
4317 		uint64_t size =
4318 		    tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
4319 		size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
4320 
4321 		/*
4322 		 * Check whether the user can access kernel memory
4323 		 */
4324 		if (dtrace_priv_kernel(state) == 0) {
4325 			DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
4326 			regs[rd] = 0;
4327 			break;
4328 		}
4329 		/*
4330 		 * This action doesn't require any credential checks since
4331 		 * probes will not activate in user contexts to which the
4332 		 * enabling user does not have permissions.
4333 		 */
4334 
4335 		/*
4336 		 * Rounding up the user allocation size could have overflowed
4337 		 * a large, bogus allocation (like -1ULL) to 0.
4338 		 */
4339 		if (scratch_size < size ||
4340 		    !DTRACE_INSCRATCH(mstate, scratch_size)) {
4341 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4342 			regs[rd] = 0;
4343 			break;
4344 		}
4345 
4346 		if (subr == DIF_SUBR_COPYIN) {
4347 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4348 			if (dtrace_priv_proc(state))
4349 				dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4350 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4351 		}
4352 
4353 		mstate->dtms_scratch_ptr += scratch_size;
4354 		regs[rd] = dest;
4355 		break;
4356 	}
4357 
4358 	case DIF_SUBR_COPYINTO: {
4359 		uint64_t size = tupregs[1].dttk_value;
4360 		uintptr_t dest = tupregs[2].dttk_value;
4361 
4362 		/*
4363 		 * This action doesn't require any credential checks since
4364 		 * probes will not activate in user contexts to which the
4365 		 * enabling user does not have permissions.
4366 		 */
4367 		if (!dtrace_inscratch(dest, size, mstate)) {
4368 			*flags |= CPU_DTRACE_BADADDR;
4369 			*illval = regs[rd];
4370 			break;
4371 		}
4372 
4373 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4374 		if (dtrace_priv_proc(state))
4375 			dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4376 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4377 		break;
4378 	}
4379 
4380 	case DIF_SUBR_COPYINSTR: {
4381 		uintptr_t dest = mstate->dtms_scratch_ptr;
4382 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4383 
4384 		if (nargs > 1 && tupregs[1].dttk_value < size)
4385 			size = tupregs[1].dttk_value + 1;
4386 
4387 		/*
4388 		 * This action doesn't require any credential checks since
4389 		 * probes will not activate in user contexts to which the
4390 		 * enabling user does not have permissions.
4391 		 */
4392 		if (!DTRACE_INSCRATCH(mstate, size)) {
4393 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4394 			regs[rd] = 0;
4395 			break;
4396 		}
4397 
4398 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4399 		if (dtrace_priv_proc(state))
4400 			dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
4401 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4402 
4403 		((char *)dest)[size - 1] = '\0';
4404 		mstate->dtms_scratch_ptr += size;
4405 		regs[rd] = dest;
4406 		break;
4407 	}
4408 
4409 	case DIF_SUBR_MSGSIZE:
4410 	case DIF_SUBR_MSGDSIZE: {
4411 		/* Darwin does not implement SysV streams messages */
4412 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4413 		regs[rd] = 0;
4414 		break;
4415 	}
4416 
4417 	case DIF_SUBR_PROGENYOF: {
4418 		pid_t pid = tupregs[0].dttk_value;
4419 		struct proc *p = current_proc();
4420 		int rval = 0, lim = nprocs;
4421 
4422 		while(p && (lim-- > 0)) {
4423 			pid_t ppid;
4424 
4425 			ppid = (pid_t)dtrace_load32((uintptr_t)&(p->p_pid));
4426 			if (*flags & CPU_DTRACE_FAULT)
4427 				break;
4428 
4429 			if (ppid == pid) {
4430 				rval = 1;
4431 				break;
4432 			}
4433 
4434 			if (ppid == 0)
4435 				break; /* Can't climb process tree any further. */
4436 
4437 			p = (struct proc *)dtrace_loadptr((uintptr_t)&(p->p_pptr));
4438 #if __has_feature(ptrauth_calls)
4439 			p = ptrauth_strip(p, ptrauth_key_process_independent_data);
4440 #endif
4441 			if (*flags & CPU_DTRACE_FAULT)
4442 				break;
4443 		}
4444 
4445 		regs[rd] = rval;
4446 		break;
4447 	}
4448 
4449 	case DIF_SUBR_SPECULATION:
4450 		regs[rd] = dtrace_speculation(state);
4451 		break;
4452 
4453 
4454 	case DIF_SUBR_COPYOUT: {
4455 		uintptr_t kaddr = tupregs[0].dttk_value;
4456 		user_addr_t uaddr = tupregs[1].dttk_value;
4457 		uint64_t size = tupregs[2].dttk_value;
4458 
4459 		if (!dtrace_destructive_disallow &&
4460 		    dtrace_priv_proc_control(state) &&
4461 		    !dtrace_istoxic(kaddr, size) &&
4462 		    dtrace_canload(kaddr, size, mstate, vstate)) {
4463 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4464 			dtrace_copyout(kaddr, uaddr, size, flags);
4465 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4466 		}
4467 		break;
4468 	}
4469 
4470 	case DIF_SUBR_COPYOUTSTR: {
4471 		uintptr_t kaddr = tupregs[0].dttk_value;
4472 		user_addr_t uaddr = tupregs[1].dttk_value;
4473 		uint64_t size = tupregs[2].dttk_value;
4474 		size_t lim;
4475 
4476 		if (!dtrace_destructive_disallow &&
4477 		    dtrace_priv_proc_control(state) &&
4478 		    !dtrace_istoxic(kaddr, size) &&
4479 		    dtrace_strcanload(kaddr, size, &lim, mstate, vstate)) {
4480 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4481 			dtrace_copyoutstr(kaddr, uaddr, lim, flags);
4482 			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4483 		}
4484 		break;
4485 	}
4486 
4487 	case DIF_SUBR_STRLEN: {
4488 		size_t size = state->dts_options[DTRACEOPT_STRSIZE];
4489 		uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
4490 		size_t lim;
4491 
4492 		if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
4493 			regs[rd] = 0;
4494 			break;
4495 		}
4496 
4497 		regs[rd] = dtrace_strlen((char *)addr, lim);
4498 
4499 		break;
4500 	}
4501 
4502 	case DIF_SUBR_STRCHR:
4503 	case DIF_SUBR_STRRCHR: {
4504 		/*
4505 		 * We're going to iterate over the string looking for the
4506 		 * specified character.  We will iterate until we have reached
4507 		 * the string length or we have found the character.  If this
4508 		 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
4509 		 * of the specified character instead of the first.
4510 		 */
4511 		uintptr_t addr = tupregs[0].dttk_value;
4512 		uintptr_t addr_limit;
4513 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4514 		size_t lim;
4515 		char c, target = (char)tupregs[1].dttk_value;
4516 
4517 		if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
4518 			regs[rd] = 0;
4519 			break;
4520 		}
4521 		addr_limit = addr + lim;
4522 
4523 		for (regs[rd] = 0; addr < addr_limit; addr++) {
4524 			if ((c = dtrace_load8(addr)) == target) {
4525 				regs[rd] = addr;
4526 
4527 				if (subr == DIF_SUBR_STRCHR)
4528 					break;
4529 			}
4530 
4531 			if (c == '\0')
4532 				break;
4533 		}
4534 
4535 		break;
4536 	}
4537 
4538 	case DIF_SUBR_STRSTR:
4539 	case DIF_SUBR_INDEX:
4540 	case DIF_SUBR_RINDEX: {
4541 		/*
4542 		 * We're going to iterate over the string looking for the
4543 		 * specified string.  We will iterate until we have reached
4544 		 * the string length or we have found the string.  (Yes, this
4545 		 * is done in the most naive way possible -- but considering
4546 		 * that the string we're searching for is likely to be
4547 		 * relatively short, the complexity of Rabin-Karp or similar
4548 		 * hardly seems merited.)
4549 		 */
4550 		char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
4551 		char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
4552 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4553 		size_t len = dtrace_strlen(addr, size);
4554 		size_t sublen = dtrace_strlen(substr, size);
4555 		char *limit = addr + len, *orig = addr;
4556 		int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
4557 		int inc = 1;
4558 
4559 		regs[rd] = notfound;
4560 
4561 		if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
4562 			regs[rd] = 0;
4563 			break;
4564 		}
4565 
4566 		if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
4567 		    vstate)) {
4568 			regs[rd] = 0;
4569 			break;
4570 		}
4571 
4572 		/*
4573 		 * strstr() and index()/rindex() have similar semantics if
4574 		 * both strings are the empty string: strstr() returns a
4575 		 * pointer to the (empty) string, and index() and rindex()
4576 		 * both return index 0 (regardless of any position argument).
4577 		 */
4578 		if (sublen == 0 && len == 0) {
4579 			if (subr == DIF_SUBR_STRSTR)
4580 				regs[rd] = (uintptr_t)addr;
4581 			else
4582 				regs[rd] = 0;
4583 			break;
4584 		}
4585 
4586 		if (subr != DIF_SUBR_STRSTR) {
4587 			if (subr == DIF_SUBR_RINDEX) {
4588 				limit = orig - 1;
4589 				addr += len;
4590 				inc = -1;
4591 			}
4592 
4593 			/*
4594 			 * Both index() and rindex() take an optional position
4595 			 * argument that denotes the starting position.
4596 			 */
4597 			if (nargs == 3) {
4598 				int64_t pos = (int64_t)tupregs[2].dttk_value;
4599 
4600 				/*
4601 				 * If the position argument to index() is
4602 				 * negative, Perl implicitly clamps it at
4603 				 * zero.  This semantic is a little surprising
4604 				 * given the special meaning of negative
4605 				 * positions to similar Perl functions like
4606 				 * substr(), but it appears to reflect a
4607 				 * notion that index() can start from a
4608 				 * negative index and increment its way up to
4609 				 * the string.  Given this notion, Perl's
4610 				 * rindex() is at least self-consistent in
4611 				 * that it implicitly clamps positions greater
4612 				 * than the string length to be the string
4613 				 * length.  Where Perl completely loses
4614 				 * coherence, however, is when the specified
4615 				 * substring is the empty string ("").  In
4616 				 * this case, even if the position is
4617 				 * negative, rindex() returns 0 -- and even if
4618 				 * the position is greater than the length,
4619 				 * index() returns the string length.  These
4620 				 * semantics violate the notion that index()
4621 				 * should never return a value less than the
4622 				 * specified position and that rindex() should
4623 				 * never return a value greater than the
4624 				 * specified position.  (One assumes that
4625 				 * these semantics are artifacts of Perl's
4626 				 * implementation and not the results of
4627 				 * deliberate design -- it beggars belief that
4628 				 * even Larry Wall could desire such oddness.)
4629 				 * While in the abstract one would wish for
4630 				 * consistent position semantics across
4631 				 * substr(), index() and rindex() -- or at the
4632 				 * very least self-consistent position
4633 				 * semantics for index() and rindex() -- we
4634 				 * instead opt to keep with the extant Perl
4635 				 * semantics, in all their broken glory.  (Do
4636 				 * we have more desire to maintain Perl's
4637 				 * semantics than Perl does?  Probably.)
4638 				 */
4639 				if (subr == DIF_SUBR_RINDEX) {
4640 					if (pos < 0) {
4641 						if (sublen == 0)
4642 							regs[rd] = 0;
4643 						break;
4644 					}
4645 
4646 					if ((size_t)pos > len)
4647 						pos = len;
4648 				} else {
4649 					if (pos < 0)
4650 						pos = 0;
4651 
4652 					if ((size_t)pos >= len) {
4653 						if (sublen == 0)
4654 							regs[rd] = len;
4655 						break;
4656 					}
4657 				}
4658 
4659 				addr = orig + pos;
4660 			}
4661 		}
4662 
4663 		for (regs[rd] = notfound; addr != limit; addr += inc) {
4664 			if (dtrace_strncmp(addr, substr, sublen) == 0) {
4665 				if (subr != DIF_SUBR_STRSTR) {
4666 					/*
4667 					 * As D index() and rindex() are
4668 					 * modeled on Perl (and not on awk),
4669 					 * we return a zero-based (and not a
4670 					 * one-based) index.  (For you Perl
4671 					 * weenies: no, we're not going to add
4672 					 * $[ -- and shouldn't you be at a con
4673 					 * or something?)
4674 					 */
4675 					regs[rd] = (uintptr_t)(addr - orig);
4676 					break;
4677 				}
4678 
4679 				ASSERT(subr == DIF_SUBR_STRSTR);
4680 				regs[rd] = (uintptr_t)addr;
4681 				break;
4682 			}
4683 		}
4684 
4685 		break;
4686 	}
4687 
4688 	case DIF_SUBR_STRTOK: {
4689 		uintptr_t addr = tupregs[0].dttk_value;
4690 		uintptr_t tokaddr = tupregs[1].dttk_value;
4691 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4692 		uintptr_t limit, toklimit;
4693 		size_t clim;
4694 		char *dest = (char *)mstate->dtms_scratch_ptr;
4695 		uint8_t c='\0', tokmap[32];	 /* 256 / 8 */
4696 		uint64_t i = 0;
4697 
4698 		/*
4699 		 * Check both the token buffer and (later) the input buffer,
4700 		 * since both could be non-scratch addresses.
4701 		 */
4702 		if (!dtrace_strcanload(tokaddr, size, &clim, mstate, vstate)) {
4703 			regs[rd] = 0;
4704 			break;
4705 		}
4706 		toklimit = tokaddr + clim;
4707 
4708 		if (!DTRACE_INSCRATCH(mstate, size)) {
4709 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4710 			regs[rd] = 0;
4711 			break;
4712 		}
4713 
4714 		if (addr == 0) {
4715 			/*
4716 			 * If the address specified is NULL, we use our saved
4717 			 * strtok pointer from the mstate.  Note that this
4718 			 * means that the saved strtok pointer is _only_
4719 			 * valid within multiple enablings of the same probe --
4720 			 * it behaves like an implicit clause-local variable.
4721 			 */
4722 			addr = mstate->dtms_strtok;
4723 			limit = mstate->dtms_strtok_limit;
4724 		} else {
4725 			/*
4726 			 * If the user-specified address is non-NULL we must
4727 			 * access check it.  This is the only time we have
4728 			 * a chance to do so, since this address may reside
4729 			 * in the string table of this clause-- future calls
4730 			 * (when we fetch addr from mstate->dtms_strtok)
4731 			 * would fail this access check.
4732 			 */
4733 			if (!dtrace_strcanload(addr, size, &clim, mstate,
4734 				vstate)) {
4735 				regs[rd] = 0;
4736 				break;
4737 			}
4738 			limit = addr + clim;
4739 		}
4740 
4741 		/*
4742 		 * First, zero the token map, and then process the token
4743 		 * string -- setting a bit in the map for every character
4744 		 * found in the token string.
4745 		 */
4746 		for (i = 0; i < (int)sizeof (tokmap); i++)
4747 			tokmap[i] = 0;
4748 
4749 		for (; tokaddr < toklimit; tokaddr++) {
4750 			if ((c = dtrace_load8(tokaddr)) == '\0')
4751 				break;
4752 
4753 			ASSERT((c >> 3) < sizeof (tokmap));
4754 			tokmap[c >> 3] |= (1 << (c & 0x7));
4755 		}
4756 
4757 		for (; addr < limit; addr++) {
4758 			/*
4759 			 * We're looking for a character that is _not_
4760 			 * contained in the token string.
4761 			 */
4762 			if ((c = dtrace_load8(addr)) == '\0')
4763 				break;
4764 
4765 			if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
4766 				break;
4767 		}
4768 
4769 		if (c == '\0') {
4770 			/*
4771 			 * We reached the end of the string without finding
4772 			 * any character that was not in the token string.
4773 			 * We return NULL in this case, and we set the saved
4774 			 * address to NULL as well.
4775 			 */
4776 			regs[rd] = 0;
4777 			mstate->dtms_strtok = 0;
4778 			mstate->dtms_strtok_limit = 0;
4779 			break;
4780 		}
4781 
4782 		/*
4783 		 * From here on, we're copying into the destination string.
4784 		 */
4785 		for (i = 0; addr < limit && i < size - 1; addr++) {
4786 			if ((c = dtrace_load8(addr)) == '\0')
4787 				break;
4788 
4789 			if (tokmap[c >> 3] & (1 << (c & 0x7)))
4790 				break;
4791 
4792 			ASSERT(i < size);
4793 			dest[i++] = c;
4794 		}
4795 
4796 		ASSERT(i < size);
4797 		dest[i] = '\0';
4798 		regs[rd] = (uintptr_t)dest;
4799 		mstate->dtms_scratch_ptr += size;
4800 		mstate->dtms_strtok = addr;
4801 		mstate->dtms_strtok_limit = limit;
4802 		break;
4803 	}
4804 
4805 	case DIF_SUBR_SUBSTR: {
4806 		uintptr_t s = tupregs[0].dttk_value;
4807 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4808 		char *d = (char *)mstate->dtms_scratch_ptr;
4809 		int64_t index = (int64_t)tupregs[1].dttk_value;
4810 		int64_t remaining = (int64_t)tupregs[2].dttk_value;
4811 		size_t len = dtrace_strlen((char *)s, size);
4812 		int64_t i = 0;
4813 
4814 		if (!dtrace_canload(s, len + 1, mstate, vstate)) {
4815 			regs[rd] = 0;
4816 			break;
4817 		}
4818 
4819 		if (!DTRACE_INSCRATCH(mstate, size)) {
4820 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4821 			regs[rd] = 0;
4822 			break;
4823 		}
4824 
4825 		if (nargs <= 2)
4826 			remaining = (int64_t)size;
4827 
4828 		if (index < 0) {
4829 			index += len;
4830 
4831 			if (index < 0 && index + remaining > 0) {
4832 				remaining += index;
4833 				index = 0;
4834 			}
4835 		}
4836 
4837 		if ((size_t)index >= len || index < 0) {
4838 			remaining = 0;
4839 		} else if (remaining < 0) {
4840 			remaining += len - index;
4841 		} else if ((uint64_t)index + (uint64_t)remaining > size) {
4842 			remaining = size - index;
4843 		}
4844 
4845 		for (i = 0; i < remaining; i++) {
4846 			if ((d[i] = dtrace_load8(s + index + i)) == '\0')
4847 				break;
4848 			}
4849 
4850 		d[i] = '\0';
4851 
4852 		mstate->dtms_scratch_ptr += size;
4853 		regs[rd] = (uintptr_t)d;
4854 		break;
4855 	}
4856 
4857 	case DIF_SUBR_GETMAJOR:
4858 		regs[rd] = (uintptr_t)major( (dev_t)tupregs[0].dttk_value );
4859 		break;
4860 
4861 	case DIF_SUBR_GETMINOR:
4862 		regs[rd] = (uintptr_t)minor( (dev_t)tupregs[0].dttk_value );
4863 		break;
4864 
4865 	case DIF_SUBR_DDI_PATHNAME: {
4866 		/* APPLE NOTE: currently unsupported on Darwin */
4867 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4868 		regs[rd] = 0;
4869 		break;
4870 	}
4871 
4872 	case DIF_SUBR_STRJOIN: {
4873 		char *d = (char *)mstate->dtms_scratch_ptr;
4874 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4875 		uintptr_t s1 = tupregs[0].dttk_value;
4876 		uintptr_t s2 = tupregs[1].dttk_value;
4877 		uint64_t i = 0, j = 0;
4878 		size_t lim1, lim2;
4879 		char c;
4880 
4881 		if (!dtrace_strcanload(s1, size, &lim1, mstate, vstate) ||
4882 		    !dtrace_strcanload(s2, size, &lim2, mstate, vstate)) {
4883 			regs[rd] = 0;
4884 			break;
4885 		}
4886 
4887 		if (!DTRACE_INSCRATCH(mstate, size)) {
4888 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4889 			regs[rd] = 0;
4890 			break;
4891 		}
4892 
4893 		for (;;) {
4894 			if (i >= size) {
4895 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4896 				regs[rd] = 0;
4897 				break;
4898 			}
4899 			c = (i >= lim1) ? '\0' : dtrace_load8(s1++);
4900 			if ((d[i++] = c) == '\0') {
4901 				i--;
4902 				break;
4903 			}
4904 		}
4905 
4906 		for (;;) {
4907 			if (i >= size) {
4908 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4909 				regs[rd] = 0;
4910 				break;
4911 			}
4912 			c = (j++ >= lim2) ? '\0' : dtrace_load8(s2++);
4913 			if ((d[i++] = c) == '\0')
4914 				break;
4915 		}
4916 
4917 		if (i < size) {
4918 			mstate->dtms_scratch_ptr += i;
4919 			regs[rd] = (uintptr_t)d;
4920 		}
4921 
4922 		break;
4923 	}
4924 
4925 	case DIF_SUBR_STRTOLL: {
4926 		uintptr_t s = tupregs[0].dttk_value;
4927 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4928 		size_t lim;
4929 		int base = 10;
4930 
4931 		if (nargs > 1) {
4932 			if ((base = tupregs[1].dttk_value) <= 1 ||
4933 			    base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
4934 				*flags |= CPU_DTRACE_ILLOP;
4935 				break;
4936 			}
4937 		}
4938 
4939 		if (!dtrace_strcanload(s, size, &lim, mstate, vstate)) {
4940 			regs[rd] = INT64_MIN;
4941 			break;
4942 		}
4943 
4944 		regs[rd] = dtrace_strtoll((char *)s, base, lim);
4945 		break;
4946 	}
4947 
4948 	case DIF_SUBR_LLTOSTR: {
4949 		int64_t i = (int64_t)tupregs[0].dttk_value;
4950 		uint64_t val, digit;
4951 		uint64_t size = 65;	/* enough room for 2^64 in binary */
4952 		char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
4953 		int base = 10;
4954 
4955 		if (nargs > 1) {
4956 			if ((base = tupregs[1].dttk_value) <= 1 ||
4957 			     base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
4958 				*flags |= CPU_DTRACE_ILLOP;
4959 				break;
4960 			}
4961 		}
4962 
4963 		val = (base == 10 && i < 0) ? i * -1 : i;
4964 
4965 		if (!DTRACE_INSCRATCH(mstate, size)) {
4966 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4967 			regs[rd] = 0;
4968 			break;
4969 		}
4970 
4971 		for (*end-- = '\0'; val; val /= base) {
4972 			if ((digit = val % base) <= '9' - '0') {
4973 				*end-- = '0' + digit;
4974 			} else {
4975 				*end-- = 'a' + (digit - ('9' - '0') - 1);
4976 			}
4977 		}
4978 
4979 		if (i == 0 && base == 16)
4980 			*end-- = '0';
4981 
4982 		if (base == 16)
4983 			*end-- = 'x';
4984 
4985 		if (i == 0 || base == 8 || base == 16)
4986 			*end-- = '0';
4987 
4988 		if (i < 0 && base == 10)
4989 			*end-- = '-';
4990 
4991 		regs[rd] = (uintptr_t)end + 1;
4992 		mstate->dtms_scratch_ptr += size;
4993 		break;
4994 	}
4995 
4996 	case DIF_SUBR_HTONS:
4997 	case DIF_SUBR_NTOHS:
4998 #ifdef _BIG_ENDIAN
4999 		regs[rd] = (uint16_t)tupregs[0].dttk_value;
5000 #else
5001 		regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
5002 #endif
5003 		break;
5004 
5005 
5006 	case DIF_SUBR_HTONL:
5007 	case DIF_SUBR_NTOHL:
5008 #ifdef _BIG_ENDIAN
5009 		regs[rd] = (uint32_t)tupregs[0].dttk_value;
5010 #else
5011 		regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
5012 #endif
5013 		break;
5014 
5015 
5016 	case DIF_SUBR_HTONLL:
5017 	case DIF_SUBR_NTOHLL:
5018 #ifdef _BIG_ENDIAN
5019 		regs[rd] = (uint64_t)tupregs[0].dttk_value;
5020 #else
5021 		regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
5022 #endif
5023 		break;
5024 
5025 
5026 	case DIF_SUBR_DIRNAME:
5027 	case DIF_SUBR_BASENAME: {
5028 		char *dest = (char *)mstate->dtms_scratch_ptr;
5029 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5030 		uintptr_t src = tupregs[0].dttk_value;
5031 		int i, j, len = dtrace_strlen((char *)src, size);
5032 		int lastbase = -1, firstbase = -1, lastdir = -1;
5033 		int start, end;
5034 
5035 		if (!dtrace_canload(src, len + 1, mstate, vstate)) {
5036 			regs[rd] = 0;
5037 			break;
5038 		}
5039 
5040 		if (!DTRACE_INSCRATCH(mstate, size)) {
5041 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5042 			regs[rd] = 0;
5043 			break;
5044 		}
5045 
5046 		/*
5047 		 * The basename and dirname for a zero-length string is
5048 		 * defined to be "."
5049 		 */
5050 		if (len == 0) {
5051 			len = 1;
5052 			src = (uintptr_t)".";
5053 		}
5054 
5055 		/*
5056 		 * Start from the back of the string, moving back toward the
5057 		 * front until we see a character that isn't a slash.  That
5058 		 * character is the last character in the basename.
5059 		 */
5060 		for (i = len - 1; i >= 0; i--) {
5061 			if (dtrace_load8(src + i) != '/')
5062 				break;
5063 		}
5064 
5065 		if (i >= 0)
5066 			lastbase = i;
5067 
5068 		/*
5069 		 * Starting from the last character in the basename, move
5070 		 * towards the front until we find a slash.  The character
5071 		 * that we processed immediately before that is the first
5072 		 * character in the basename.
5073 		 */
5074 		for (; i >= 0; i--) {
5075 			if (dtrace_load8(src + i) == '/')
5076 				break;
5077 		}
5078 
5079 		if (i >= 0)
5080 			firstbase = i + 1;
5081 
5082 		/*
5083 		 * Now keep going until we find a non-slash character.  That
5084 		 * character is the last character in the dirname.
5085 		 */
5086 		for (; i >= 0; i--) {
5087 			if (dtrace_load8(src + i) != '/')
5088 				break;
5089 		}
5090 
5091 		if (i >= 0)
5092 			lastdir = i;
5093 
5094 		ASSERT(!(lastbase == -1 && firstbase != -1));
5095 		ASSERT(!(firstbase == -1 && lastdir != -1));
5096 
5097 		if (lastbase == -1) {
5098 			/*
5099 			 * We didn't find a non-slash character.  We know that
5100 			 * the length is non-zero, so the whole string must be
5101 			 * slashes.  In either the dirname or the basename
5102 			 * case, we return '/'.
5103 			 */
5104 			ASSERT(firstbase == -1);
5105 			firstbase = lastbase = lastdir = 0;
5106 		}
5107 
5108 		if (firstbase == -1) {
5109 			/*
5110 			 * The entire string consists only of a basename
5111 			 * component.  If we're looking for dirname, we need
5112 			 * to change our string to be just "."; if we're
5113 			 * looking for a basename, we'll just set the first
5114 			 * character of the basename to be 0.
5115 			 */
5116 			if (subr == DIF_SUBR_DIRNAME) {
5117 				ASSERT(lastdir == -1);
5118 				src = (uintptr_t)".";
5119 				lastdir = 0;
5120 			} else {
5121 				firstbase = 0;
5122 			}
5123 		}
5124 
5125 		if (subr == DIF_SUBR_DIRNAME) {
5126 			if (lastdir == -1) {
5127 				/*
5128 				 * We know that we have a slash in the name --
5129 				 * or lastdir would be set to 0, above.  And
5130 				 * because lastdir is -1, we know that this
5131 				 * slash must be the first character.  (That
5132 				 * is, the full string must be of the form
5133 				 * "/basename".)  In this case, the last
5134 				 * character of the directory name is 0.
5135 				 */
5136 				lastdir = 0;
5137 			}
5138 
5139 			start = 0;
5140 			end = lastdir;
5141 		} else {
5142 			ASSERT(subr == DIF_SUBR_BASENAME);
5143 			ASSERT(firstbase != -1 && lastbase != -1);
5144 			start = firstbase;
5145 			end = lastbase;
5146 		}
5147 
5148 		for (i = start, j = 0; i <= end && (uint64_t)j < size - 1; i++, j++)
5149 			dest[j] = dtrace_load8(src + i);
5150 
5151 		dest[j] = '\0';
5152 		regs[rd] = (uintptr_t)dest;
5153 		mstate->dtms_scratch_ptr += size;
5154 		break;
5155 	}
5156 
5157 	case DIF_SUBR_CLEANPATH: {
5158 		char *dest = (char *)mstate->dtms_scratch_ptr, c;
5159 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5160 		uintptr_t src = tupregs[0].dttk_value;
5161 		size_t lim;
5162 		size_t i = 0, j = 0;
5163 
5164 		if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) {
5165 			regs[rd] = 0;
5166 			break;
5167 		}
5168 
5169 		if (!DTRACE_INSCRATCH(mstate, size)) {
5170 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5171 			regs[rd] = 0;
5172 			break;
5173 		}
5174 
5175 		/*
5176 		 * Move forward, loading each character.
5177 		 */
5178 		do {
5179 			c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5180 next:
5181 			if ((uint64_t)(j + 5) >= size)	/* 5 = strlen("/..c\0") */
5182 				break;
5183 
5184 			if (c != '/') {
5185 				dest[j++] = c;
5186 				continue;
5187 			}
5188 
5189 			c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5190 
5191 			if (c == '/') {
5192 				/*
5193 				 * We have two slashes -- we can just advance
5194 				 * to the next character.
5195 				 */
5196 				goto next;
5197 			}
5198 
5199 			if (c != '.') {
5200 				/*
5201 				 * This is not "." and it's not ".." -- we can
5202 				 * just store the "/" and this character and
5203 				 * drive on.
5204 				 */
5205 				dest[j++] = '/';
5206 				dest[j++] = c;
5207 				continue;
5208 			}
5209 
5210 			c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5211 
5212 			if (c == '/') {
5213 				/*
5214 				 * This is a "/./" component.  We're not going
5215 				 * to store anything in the destination buffer;
5216 				 * we're just going to go to the next component.
5217 				 */
5218 				goto next;
5219 			}
5220 
5221 			if (c != '.') {
5222 				/*
5223 				 * This is not ".." -- we can just store the
5224 				 * "/." and this character and continue
5225 				 * processing.
5226 				 */
5227 				dest[j++] = '/';
5228 				dest[j++] = '.';
5229 				dest[j++] = c;
5230 				continue;
5231 			}
5232 
5233 			c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5234 
5235 			if (c != '/' && c != '\0') {
5236 				/*
5237 				 * This is not ".." -- it's "..[mumble]".
5238 				 * We'll store the "/.." and this character
5239 				 * and continue processing.
5240 				 */
5241 				dest[j++] = '/';
5242 				dest[j++] = '.';
5243 				dest[j++] = '.';
5244 				dest[j++] = c;
5245 				continue;
5246 			}
5247 
5248 			/*
5249 			 * This is "/../" or "/..\0".  We need to back up
5250 			 * our destination pointer until we find a "/".
5251 			 */
5252 			i--;
5253 			while (j != 0 && dest[--j] != '/')
5254 				continue;
5255 
5256 			if (c == '\0')
5257 				dest[++j] = '/';
5258 		} while (c != '\0');
5259 
5260 		dest[j] = '\0';
5261 		regs[rd] = (uintptr_t)dest;
5262 		mstate->dtms_scratch_ptr += size;
5263 		break;
5264 	}
5265 
5266 	case DIF_SUBR_INET_NTOA:
5267 	case DIF_SUBR_INET_NTOA6:
5268 	case DIF_SUBR_INET_NTOP: {
5269 		size_t size;
5270 		int af, argi, i;
5271 		char *base, *end;
5272 
5273 		if (subr == DIF_SUBR_INET_NTOP) {
5274 			af = (int)tupregs[0].dttk_value;
5275 			argi = 1;
5276 		} else {
5277 			af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
5278 			argi = 0;
5279 		}
5280 
5281 		if (af == AF_INET) {
5282 #if !defined(__APPLE__)
5283 			ipaddr_t ip4;
5284 #else
5285 			uint32_t ip4;
5286 #endif /* __APPLE__ */
5287 			uint8_t *ptr8, val;
5288 
5289 			/*
5290 			 * Safely load the IPv4 address.
5291 			 */
5292 #if !defined(__APPLE__)
5293 			ip4 = dtrace_load32(tupregs[argi].dttk_value);
5294 #else
5295 			if (!dtrace_canload(tupregs[argi].dttk_value, sizeof(ip4),
5296 				mstate, vstate)) {
5297 				regs[rd] = 0;
5298 				break;
5299 			}
5300 
5301 			dtrace_bcopy(
5302 			    (void *)(uintptr_t)tupregs[argi].dttk_value,
5303 			    (void *)(uintptr_t)&ip4, sizeof (ip4));
5304 #endif /* __APPLE__ */
5305 			/*
5306 			 * Check an IPv4 string will fit in scratch.
5307 			 */
5308 #if !defined(__APPLE__)
5309 			size = INET_ADDRSTRLEN;
5310 #else
5311 			size = MAX_IPv4_STR_LEN;
5312 #endif /* __APPLE__ */
5313 			if (!DTRACE_INSCRATCH(mstate, size)) {
5314 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5315 				regs[rd] = 0;
5316 				break;
5317 			}
5318 			base = (char *)mstate->dtms_scratch_ptr;
5319 			end = (char *)mstate->dtms_scratch_ptr + size - 1;
5320 
5321 			/*
5322 			 * Stringify as a dotted decimal quad.
5323 			 */
5324 			*end-- = '\0';
5325 			ptr8 = (uint8_t *)&ip4;
5326 			for (i = 3; i >= 0; i--) {
5327 				val = ptr8[i];
5328 
5329 				if (val == 0) {
5330 					*end-- = '0';
5331 				} else {
5332 					for (; val; val /= 10) {
5333 						*end-- = '0' + (val % 10);
5334 					}
5335 				}
5336 
5337 				if (i > 0)
5338 					*end-- = '.';
5339 			}
5340 			ASSERT(end + 1 >= base);
5341 
5342 		} else if (af == AF_INET6) {
5343 #if defined(__APPLE__)
5344 #define _S6_un __u6_addr
5345 #define _S6_u8 __u6_addr8
5346 #endif /* __APPLE__ */
5347 			struct in6_addr ip6;
5348 			int firstzero, tryzero, numzero, v6end;
5349 			uint16_t val;
5350 			const char digits[] = "0123456789abcdef";
5351 
5352 			/*
5353 			 * Stringify using RFC 1884 convention 2 - 16 bit
5354 			 * hexadecimal values with a zero-run compression.
5355 			 * Lower case hexadecimal digits are used.
5356 			 * 	eg, fe80::214:4fff:fe0b:76c8.
5357 			 * The IPv4 embedded form is returned for inet_ntop,
5358 			 * just the IPv4 string is returned for inet_ntoa6.
5359 			 */
5360 
5361 			if (!dtrace_canload(tupregs[argi].dttk_value,
5362 				sizeof(struct in6_addr), mstate, vstate)) {
5363 				regs[rd] = 0;
5364 				break;
5365 			}
5366 
5367 			/*
5368 			 * Safely load the IPv6 address.
5369 			 */
5370 			dtrace_bcopy(
5371 			    (void *)(uintptr_t)tupregs[argi].dttk_value,
5372 			    (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
5373 
5374 			/*
5375 			 * Check an IPv6 string will fit in scratch.
5376 			 */
5377 			size = INET6_ADDRSTRLEN;
5378 			if (!DTRACE_INSCRATCH(mstate, size)) {
5379 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5380 				regs[rd] = 0;
5381 				break;
5382 			}
5383 			base = (char *)mstate->dtms_scratch_ptr;
5384 			end = (char *)mstate->dtms_scratch_ptr + size - 1;
5385 			*end-- = '\0';
5386 
5387 			/*
5388 			 * Find the longest run of 16 bit zero values
5389 			 * for the single allowed zero compression - "::".
5390 			 */
5391 			firstzero = -1;
5392 			tryzero = -1;
5393 			numzero = 1;
5394 			for (i = 0; i < (int)sizeof (struct in6_addr); i++) {
5395 				if (ip6._S6_un._S6_u8[i] == 0 &&
5396 				    tryzero == -1 && i % 2 == 0) {
5397 					tryzero = i;
5398 					continue;
5399 				}
5400 
5401 				if (tryzero != -1 &&
5402 				    (ip6._S6_un._S6_u8[i] != 0 ||
5403 				    i == sizeof (struct in6_addr) - 1)) {
5404 
5405 					if (i - tryzero <= numzero) {
5406 						tryzero = -1;
5407 						continue;
5408 					}
5409 
5410 					firstzero = tryzero;
5411 					numzero = i - i % 2 - tryzero;
5412 					tryzero = -1;
5413 
5414 					if (ip6._S6_un._S6_u8[i] == 0 &&
5415 					    i == sizeof (struct in6_addr) - 1)
5416 						numzero += 2;
5417 				}
5418 			}
5419 			ASSERT(firstzero + numzero <= (int)sizeof (struct in6_addr));
5420 
5421 			/*
5422 			 * Check for an IPv4 embedded address.
5423 			 */
5424 			v6end = sizeof (struct in6_addr) - 2;
5425 			if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
5426 			    IN6_IS_ADDR_V4COMPAT(&ip6)) {
5427 				for (i = sizeof (struct in6_addr) - 1;
5428 				     i >= (int)DTRACE_V4MAPPED_OFFSET; i--) {
5429 					ASSERT(end >= base);
5430 
5431 					val = ip6._S6_un._S6_u8[i];
5432 
5433 					if (val == 0) {
5434 						*end-- = '0';
5435 					} else {
5436 						for (; val; val /= 10) {
5437 							*end-- = '0' + val % 10;
5438 						}
5439 					}
5440 
5441 					if (i > (int)DTRACE_V4MAPPED_OFFSET)
5442 						*end-- = '.';
5443 				}
5444 
5445 				if (subr == DIF_SUBR_INET_NTOA6)
5446 					goto inetout;
5447 
5448 				/*
5449 				 * Set v6end to skip the IPv4 address that
5450 				 * we have already stringified.
5451 				 */
5452 				v6end = 10;
5453 			}
5454 
5455 			/*
5456 			 * Build the IPv6 string by working through the
5457 			 * address in reverse.
5458 			 */
5459 			for (i = v6end; i >= 0; i -= 2) {
5460 				ASSERT(end >= base);
5461 
5462 				if (i == firstzero + numzero - 2) {
5463 					*end-- = ':';
5464 					*end-- = ':';
5465 					i -= numzero - 2;
5466 					continue;
5467 				}
5468 
5469 				if (i < 14 && i != firstzero - 2)
5470 					*end-- = ':';
5471 
5472 				val = (ip6._S6_un._S6_u8[i] << 8) +
5473 				    ip6._S6_un._S6_u8[i + 1];
5474 
5475 				if (val == 0) {
5476 					*end-- = '0';
5477 				} else {
5478 					for (; val; val /= 16) {
5479 						*end-- = digits[val % 16];
5480 					}
5481 				}
5482 			}
5483 			ASSERT(end + 1 >= base);
5484 
5485 #if defined(__APPLE__)
5486 #undef _S6_un
5487 #undef _S6_u8
5488 #endif /* __APPLE__ */
5489 		} else {
5490 			/*
5491 			 * The user didn't use AH_INET or AH_INET6.
5492 			 */
5493 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5494 			regs[rd] = 0;
5495 			break;
5496 		}
5497 
5498 inetout:	regs[rd] = (uintptr_t)end + 1;
5499 		mstate->dtms_scratch_ptr += size;
5500 		break;
5501 	}
5502 
5503 	case DIF_SUBR_JSON: {
5504 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5505 		uintptr_t json = tupregs[0].dttk_value;
5506 		size_t jsonlen = dtrace_strlen((char *)json, size);
5507 		uintptr_t elem = tupregs[1].dttk_value;
5508 		size_t elemlen = dtrace_strlen((char *)elem, size);
5509 
5510 		char *dest = (char *)mstate->dtms_scratch_ptr;
5511 		char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1;
5512 		char *ee = elemlist;
5513 		int nelems = 1;
5514 		uintptr_t cur;
5515 
5516 		if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) ||
5517 		    !dtrace_canload(elem, elemlen + 1, mstate, vstate)) {
5518 			regs[rd] = 0;
5519 			break;
5520 		}
5521 
5522 		if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) {
5523 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5524 			regs[rd] = 0;
5525 			break;
5526 		}
5527 
5528 		/*
5529 		 * Read the element selector and split it up into a packed list
5530 		 * of strings.
5531 		 */
5532 		for (cur = elem; cur < elem + elemlen; cur++) {
5533 			char cc = dtrace_load8(cur);
5534 
5535 			if (cur == elem && cc == '[') {
5536 				/*
5537 				 * If the first element selector key is
5538 				 * actually an array index then ignore the
5539 				 * bracket.
5540 				 */
5541 				continue;
5542 			}
5543 
5544 			if (cc == ']')
5545 				continue;
5546 
5547 			if (cc == '.' || cc == '[') {
5548 				nelems++;
5549 				cc = '\0';
5550 			}
5551 
5552 			*ee++ = cc;
5553 		}
5554 		*ee++ = '\0';
5555 
5556 		if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist,
5557 		    nelems, dest)) != 0)
5558 			mstate->dtms_scratch_ptr += jsonlen + 1;
5559 		break;
5560 	}
5561 
5562 	case DIF_SUBR_TOUPPER:
5563 	case DIF_SUBR_TOLOWER: {
5564 		uintptr_t src = tupregs[0].dttk_value;
5565 		char *dest = (char *)mstate->dtms_scratch_ptr;
5566 		char lower, upper, base, c;
5567 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5568 		size_t len = dtrace_strlen((char*) src, size);
5569 		size_t i = 0;
5570 
5571 		lower = (subr == DIF_SUBR_TOUPPER) ? 'a' : 'A';
5572 		upper = (subr == DIF_SUBR_TOUPPER) ? 'z' : 'Z';
5573 		base  = (subr == DIF_SUBR_TOUPPER) ? 'A' : 'a';
5574 
5575 		if (!dtrace_canload(src, len + 1, mstate, vstate)) {
5576 			regs[rd] = 0;
5577 			break;
5578 		}
5579 
5580 		if (!DTRACE_INSCRATCH(mstate, size)) {
5581 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5582 			regs[rd] = 0;
5583 			break;
5584 		}
5585 
5586 		for (i = 0; i < size - 1; ++i) {
5587 			if ((c = dtrace_load8(src + i)) == '\0')
5588 				break;
5589 			if (c >= lower && c <= upper)
5590 				c = base + (c - lower);
5591 			dest[i] = c;
5592 		}
5593 
5594 		ASSERT(i < size);
5595 
5596 		dest[i] = '\0';
5597 		regs[rd] = (uintptr_t) dest;
5598 		mstate->dtms_scratch_ptr += size;
5599 
5600 		break;
5601 	}
5602 
5603 	case DIF_SUBR_STRIP:
5604 		if (!dtrace_is_valid_ptrauth_key(tupregs[1].dttk_value)) {
5605 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5606 			break;
5607 		}
5608 		regs[rd] = (uint64_t)dtrace_ptrauth_strip(
5609 		    (void*)tupregs[0].dttk_value, tupregs[1].dttk_value);
5610 		break;
5611 
5612 #if defined(__APPLE__)
5613 	case DIF_SUBR_VM_KERNEL_ADDRPERM: {
5614 		if (!dtrace_priv_kernel(state)) {
5615 			regs[rd] = 0;
5616 		} else {
5617 			regs[rd] = VM_KERNEL_ADDRPERM((vm_offset_t) tupregs[0].dttk_value);
5618 		}
5619 
5620 		break;
5621 	}
5622 
5623 	case DIF_SUBR_KDEBUG_TRACE: {
5624 		uint32_t debugid;
5625 		uintptr_t args[4] = {0};
5626 		int i;
5627 
5628 		if (nargs < 2 || nargs > 5) {
5629 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5630 			break;
5631 		}
5632 
5633 		if (dtrace_destructive_disallow ||
5634 		    !dtrace_priv_kernel_destructive(state)) {
5635 			return;
5636 		}
5637 
5638 		debugid = tupregs[0].dttk_value;
5639 		for (i = 0; i < nargs - 1; i++)
5640 			args[i] = tupregs[i + 1].dttk_value;
5641 
5642 		kernel_debug(debugid, args[0], args[1], args[2], args[3], 0);
5643 
5644 		break;
5645 	}
5646 
5647 	case DIF_SUBR_KDEBUG_TRACE_STRING: {
5648 		if (nargs != 3) {
5649 			break;
5650 		}
5651 
5652 		if (dtrace_destructive_disallow ||
5653 		    !dtrace_priv_kernel_destructive(state)) {
5654 			return;
5655 		}
5656 
5657 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5658 		uint32_t debugid = tupregs[0].dttk_value;
5659 		uint64_t str_id = tupregs[1].dttk_value;
5660 		uintptr_t src = tupregs[2].dttk_value;
5661 		size_t lim;
5662 		char buf[size];
5663 		char* str = NULL;
5664 
5665 		if (src != (uintptr_t)0) {
5666 			str = buf;
5667 			if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) {
5668 				break;
5669 			}
5670 			dtrace_strcpy((void*)src, buf, size);
5671 		}
5672 
5673 		(void)kernel_debug_string(debugid, &str_id, str);
5674 		regs[rd] = str_id;
5675 
5676 		break;
5677 	}
5678 
5679 	case DIF_SUBR_MTONS:
5680 		absolutetime_to_nanoseconds(tupregs[0].dttk_value, &regs[rd]);
5681 
5682 		break;
5683 	case DIF_SUBR_PHYSMEM_READ: {
5684 #if DEBUG || DEVELOPMENT
5685 		if (dtrace_destructive_disallow ||
5686 		    !dtrace_priv_kernel_destructive(state)) {
5687 			return;
5688 		}
5689 		regs[rd] = dtrace_physmem_read(tupregs[0].dttk_value,
5690 		    tupregs[1].dttk_value);
5691 #else
5692 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5693 #endif /* DEBUG || DEVELOPMENT */
5694 		break;
5695 	}
5696 	case DIF_SUBR_PHYSMEM_WRITE: {
5697 #if DEBUG || DEVELOPMENT
5698 		if (dtrace_destructive_disallow ||
5699 		    !dtrace_priv_kernel_destructive(state)) {
5700 			return;
5701 		}
5702 
5703 		dtrace_physmem_write(tupregs[0].dttk_value,
5704 		    tupregs[1].dttk_value, (size_t)tupregs[2].dttk_value);
5705 #else
5706 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5707 #endif /* DEBUG || DEVELOPMENT */
5708 		break;
5709 	}
5710 
5711 	case DIF_SUBR_KVTOPHYS: {
5712 #if DEBUG || DEVELOPMENT
5713 		regs[rd] = kvtophys(tupregs[0].dttk_value);
5714 #else
5715 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5716 #endif /* DEBUG || DEVELOPMENT */
5717 		break;
5718 	}
5719 
5720 	case DIF_SUBR_LIVEDUMP: {
5721 #if DEBUG || DEVELOPMENT
5722 		if (dtrace_destructive_disallow ||
5723 		    !dtrace_priv_kernel_destructive(state)) {
5724 			break;
5725 		}
5726 
5727 		/* For the moment, there is only one type of livedump. */
5728 		if (nargs != 1 || tupregs[0].dttk_value != 0) {
5729 			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5730 			break;
5731 		}
5732 
5733 		char *dest = (char *)mstate->dtms_scratch_ptr;
5734 		uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5735 
5736 		if (!DTRACE_INSCRATCH(mstate, size)) {
5737 			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5738 			regs[rd] = 0;
5739 			break;
5740 		}
5741 
5742 		dtrace_livedump(dest, size);
5743 		regs[rd] = (uintptr_t) dest;
5744 		mstate->dtms_scratch_ptr += strlen(dest) + 1;
5745 #else
5746 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5747 #endif /* DEBUG || DEVELOPMENT */
5748 		break;
5749 	}
5750 #endif /* defined(__APPLE__) */
5751 
5752 	}
5753 }
5754 
5755 /*
5756  * Emulate the execution of DTrace IR instructions specified by the given
5757  * DIF object.  This function is deliberately void of assertions as all of
5758  * the necessary checks are handled by a call to dtrace_difo_validate().
5759  */
5760 static uint64_t
dtrace_dif_emulate(dtrace_difo_t * difo,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate,dtrace_state_t * state)5761 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
5762     dtrace_vstate_t *vstate, dtrace_state_t *state)
5763 {
5764 	const dif_instr_t *text = difo->dtdo_buf;
5765 	const uint_t textlen = difo->dtdo_len;
5766 	const char *strtab = difo->dtdo_strtab;
5767 	const uint64_t *inttab = difo->dtdo_inttab;
5768 
5769 	uint64_t rval = 0;
5770 	dtrace_statvar_t *svar;
5771 	dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
5772 	dtrace_difv_t *v;
5773 	volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
5774 	volatile uint64_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
5775 
5776 	dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
5777 	uint64_t regs[DIF_DIR_NREGS];
5778 	uint64_t *tmp;
5779 
5780 	uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
5781 	int64_t cc_r;
5782 	uint_t pc = 0, id, opc = 0;
5783 	uint8_t ttop = 0;
5784 	dif_instr_t instr;
5785 	uint_t r1, r2, rd;
5786 
5787 	/*
5788 	 * We stash the current DIF object into the machine state: we need it
5789 	 * for subsequent access checking.
5790 	 */
5791 	mstate->dtms_difo = difo;
5792 
5793 	regs[DIF_REG_R0] = 0; 		/* %r0 is fixed at zero */
5794 
5795 	while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
5796 		opc = pc;
5797 
5798 		instr = text[pc++];
5799 		r1 = DIF_INSTR_R1(instr);
5800 		r2 = DIF_INSTR_R2(instr);
5801 		rd = DIF_INSTR_RD(instr);
5802 
5803 		switch (DIF_INSTR_OP(instr)) {
5804 		case DIF_OP_OR:
5805 			regs[rd] = regs[r1] | regs[r2];
5806 			break;
5807 		case DIF_OP_XOR:
5808 			regs[rd] = regs[r1] ^ regs[r2];
5809 			break;
5810 		case DIF_OP_AND:
5811 			regs[rd] = regs[r1] & regs[r2];
5812 			break;
5813 		case DIF_OP_SLL:
5814 			regs[rd] = regs[r1] << regs[r2];
5815 			break;
5816 		case DIF_OP_SRL:
5817 			regs[rd] = regs[r1] >> regs[r2];
5818 			break;
5819 		case DIF_OP_SUB:
5820 			regs[rd] = regs[r1] - regs[r2];
5821 			break;
5822 		case DIF_OP_ADD:
5823 			regs[rd] = regs[r1] + regs[r2];
5824 			break;
5825 		case DIF_OP_MUL:
5826 			regs[rd] = regs[r1] * regs[r2];
5827 			break;
5828 		case DIF_OP_SDIV:
5829 			if (regs[r2] == 0) {
5830 				regs[rd] = 0;
5831 				*flags |= CPU_DTRACE_DIVZERO;
5832 			} else {
5833 				regs[rd] = (int64_t)regs[r1] /
5834 				    (int64_t)regs[r2];
5835 			}
5836 			break;
5837 
5838 		case DIF_OP_UDIV:
5839 			if (regs[r2] == 0) {
5840 				regs[rd] = 0;
5841 				*flags |= CPU_DTRACE_DIVZERO;
5842 			} else {
5843 				regs[rd] = regs[r1] / regs[r2];
5844 			}
5845 			break;
5846 
5847 		case DIF_OP_SREM:
5848 			if (regs[r2] == 0) {
5849 				regs[rd] = 0;
5850 				*flags |= CPU_DTRACE_DIVZERO;
5851 			} else {
5852 				regs[rd] = (int64_t)regs[r1] %
5853 				    (int64_t)regs[r2];
5854 			}
5855 			break;
5856 
5857 		case DIF_OP_UREM:
5858 			if (regs[r2] == 0) {
5859 				regs[rd] = 0;
5860 				*flags |= CPU_DTRACE_DIVZERO;
5861 			} else {
5862 				regs[rd] = regs[r1] % regs[r2];
5863 			}
5864 			break;
5865 
5866 		case DIF_OP_NOT:
5867 			regs[rd] = ~regs[r1];
5868 			break;
5869 		case DIF_OP_MOV:
5870 			regs[rd] = regs[r1];
5871 			break;
5872 		case DIF_OP_CMP:
5873 			cc_r = regs[r1] - regs[r2];
5874 			cc_n = cc_r < 0;
5875 			cc_z = cc_r == 0;
5876 			cc_v = 0;
5877 			cc_c = regs[r1] < regs[r2];
5878 			break;
5879 		case DIF_OP_TST:
5880 			cc_n = cc_v = cc_c = 0;
5881 			cc_z = regs[r1] == 0;
5882 			break;
5883 		case DIF_OP_BA:
5884 			pc = DIF_INSTR_LABEL(instr);
5885 			break;
5886 		case DIF_OP_BE:
5887 			if (cc_z)
5888 				pc = DIF_INSTR_LABEL(instr);
5889 			break;
5890 		case DIF_OP_BNE:
5891 			if (cc_z == 0)
5892 				pc = DIF_INSTR_LABEL(instr);
5893 			break;
5894 		case DIF_OP_BG:
5895 			if ((cc_z | (cc_n ^ cc_v)) == 0)
5896 				pc = DIF_INSTR_LABEL(instr);
5897 			break;
5898 		case DIF_OP_BGU:
5899 			if ((cc_c | cc_z) == 0)
5900 				pc = DIF_INSTR_LABEL(instr);
5901 			break;
5902 		case DIF_OP_BGE:
5903 			if ((cc_n ^ cc_v) == 0)
5904 				pc = DIF_INSTR_LABEL(instr);
5905 			break;
5906 		case DIF_OP_BGEU:
5907 			if (cc_c == 0)
5908 				pc = DIF_INSTR_LABEL(instr);
5909 			break;
5910 		case DIF_OP_BL:
5911 			if (cc_n ^ cc_v)
5912 				pc = DIF_INSTR_LABEL(instr);
5913 			break;
5914 		case DIF_OP_BLU:
5915 			if (cc_c)
5916 				pc = DIF_INSTR_LABEL(instr);
5917 			break;
5918 		case DIF_OP_BLE:
5919 			if (cc_z | (cc_n ^ cc_v))
5920 				pc = DIF_INSTR_LABEL(instr);
5921 			break;
5922 		case DIF_OP_BLEU:
5923 			if (cc_c | cc_z)
5924 				pc = DIF_INSTR_LABEL(instr);
5925 			break;
5926 		case DIF_OP_RLDSB:
5927 			if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
5928 				*flags |= CPU_DTRACE_KPRIV;
5929 				*illval = regs[r1];
5930 				break;
5931 			}
5932 			OS_FALLTHROUGH;
5933 		case DIF_OP_LDSB:
5934 			regs[rd] = (int8_t)dtrace_load8(regs[r1]);
5935 			break;
5936 		case DIF_OP_RLDSH:
5937 			if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
5938 				*flags |= CPU_DTRACE_KPRIV;
5939 				*illval = regs[r1];
5940 				break;
5941 			}
5942 			OS_FALLTHROUGH;
5943 		case DIF_OP_LDSH:
5944 			regs[rd] = (int16_t)dtrace_load16(regs[r1]);
5945 			break;
5946 		case DIF_OP_RLDSW:
5947 			if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
5948 				*flags |= CPU_DTRACE_KPRIV;
5949 				*illval = regs[r1];
5950 				break;
5951 			}
5952 			OS_FALLTHROUGH;
5953 		case DIF_OP_LDSW:
5954 			regs[rd] = (int32_t)dtrace_load32(regs[r1]);
5955 			break;
5956 		case DIF_OP_RLDUB:
5957 			if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
5958 				*flags |= CPU_DTRACE_KPRIV;
5959 				*illval = regs[r1];
5960 				break;
5961 			}
5962 			OS_FALLTHROUGH;
5963 		case DIF_OP_LDUB:
5964 			regs[rd] = dtrace_load8(regs[r1]);
5965 			break;
5966 		case DIF_OP_RLDUH:
5967 			if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
5968 				*flags |= CPU_DTRACE_KPRIV;
5969 				*illval = regs[r1];
5970 				break;
5971 			}
5972 			OS_FALLTHROUGH;
5973 		case DIF_OP_LDUH:
5974 			regs[rd] = dtrace_load16(regs[r1]);
5975 			break;
5976 		case DIF_OP_RLDUW:
5977 			if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
5978 				*flags |= CPU_DTRACE_KPRIV;
5979 				*illval = regs[r1];
5980 				break;
5981 			}
5982 			OS_FALLTHROUGH;
5983 		case DIF_OP_LDUW:
5984 			regs[rd] = dtrace_load32(regs[r1]);
5985 			break;
5986 		case DIF_OP_RLDX:
5987 			if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
5988 				*flags |= CPU_DTRACE_KPRIV;
5989 				*illval = regs[r1];
5990 				break;
5991 			}
5992 			OS_FALLTHROUGH;
5993 		case DIF_OP_LDX:
5994 			regs[rd] = dtrace_load64(regs[r1]);
5995 			break;
5996 /*
5997  * Darwin 32-bit kernel may fetch from 64-bit user.
5998  * Do not cast regs to uintptr_t
5999  * DIF_OP_ULDSB,DIF_OP_ULDSH, DIF_OP_ULDSW, DIF_OP_ULDUB
6000  * DIF_OP_ULDUH, DIF_OP_ULDUW, DIF_OP_ULDX
6001  */
6002 		case DIF_OP_ULDSB:
6003 			regs[rd] = (int8_t)
6004 			    dtrace_fuword8(regs[r1]);
6005 			break;
6006 		case DIF_OP_ULDSH:
6007 			regs[rd] = (int16_t)
6008 			    dtrace_fuword16(regs[r1]);
6009 			break;
6010 		case DIF_OP_ULDSW:
6011 			regs[rd] = (int32_t)
6012 			    dtrace_fuword32(regs[r1]);
6013 			break;
6014 		case DIF_OP_ULDUB:
6015 			regs[rd] =
6016 			    dtrace_fuword8(regs[r1]);
6017 			break;
6018 		case DIF_OP_ULDUH:
6019 			regs[rd] =
6020 			    dtrace_fuword16(regs[r1]);
6021 			break;
6022 		case DIF_OP_ULDUW:
6023 			regs[rd] =
6024 			    dtrace_fuword32(regs[r1]);
6025 			break;
6026 		case DIF_OP_ULDX:
6027 			regs[rd] =
6028 			    dtrace_fuword64(regs[r1]);
6029 			break;
6030 		case DIF_OP_RET:
6031 			rval = regs[rd];
6032 			pc = textlen;
6033 			break;
6034 		case DIF_OP_NOP:
6035 			break;
6036 		case DIF_OP_SETX:
6037 			regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
6038 			break;
6039 		case DIF_OP_SETS:
6040 			regs[rd] = (uint64_t)(uintptr_t)
6041 			    (strtab + DIF_INSTR_STRING(instr));
6042 			break;
6043 		case DIF_OP_SCMP: {
6044 			size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
6045 			uintptr_t s1 = regs[r1];
6046 			uintptr_t s2 = regs[r2];
6047 			size_t lim1 = sz, lim2 = sz;
6048 
6049 			if (s1 != 0 &&
6050 			    !dtrace_strcanload(s1, sz, &lim1, mstate, vstate))
6051 				break;
6052 			if (s2 != 0 &&
6053 			    !dtrace_strcanload(s2, sz, &lim2, mstate, vstate))
6054 				break;
6055 
6056 			cc_r = dtrace_strncmp((char *)s1, (char *)s2,
6057 				MIN(lim1, lim2));
6058 
6059 			cc_n = cc_r < 0;
6060 			cc_z = cc_r == 0;
6061 			cc_v = cc_c = 0;
6062 			break;
6063 		}
6064 		case DIF_OP_LDGA:
6065 			regs[rd] = dtrace_dif_variable(mstate, state,
6066 			    r1, regs[r2]);
6067 			break;
6068 		case DIF_OP_LDGS:
6069 			id = DIF_INSTR_VAR(instr);
6070 
6071 			if (id >= DIF_VAR_OTHER_UBASE) {
6072 				uintptr_t a;
6073 
6074 				id -= DIF_VAR_OTHER_UBASE;
6075 				svar = vstate->dtvs_globals[id];
6076 				ASSERT(svar != NULL);
6077 				v = &svar->dtsv_var;
6078 
6079 				if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
6080 					regs[rd] = svar->dtsv_data;
6081 					break;
6082 				}
6083 
6084 				a = (uintptr_t)svar->dtsv_data;
6085 
6086 				if (*(uint8_t *)a == UINT8_MAX) {
6087 					/*
6088 					 * If the 0th byte is set to UINT8_MAX
6089 					 * then this is to be treated as a
6090 					 * reference to a NULL variable.
6091 					 */
6092 					regs[rd] = 0;
6093 				} else {
6094 					regs[rd] = a + sizeof (uint64_t);
6095 				}
6096 
6097 				break;
6098 			}
6099 
6100 			regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
6101 			break;
6102 
6103 		case DIF_OP_STGS:
6104 			id = DIF_INSTR_VAR(instr);
6105 
6106 			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6107 			id -= DIF_VAR_OTHER_UBASE;
6108 
6109 			VERIFY(id < (uint_t)vstate->dtvs_nglobals);
6110 			svar = vstate->dtvs_globals[id];
6111 			ASSERT(svar != NULL);
6112 			v = &svar->dtsv_var;
6113 
6114 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6115 				uintptr_t a = (uintptr_t)svar->dtsv_data;
6116 				size_t lim = 0;
6117 
6118 				ASSERT(a != 0);
6119 				ASSERT(svar->dtsv_size != 0);
6120 
6121 				if (regs[rd] == 0) {
6122 					*(uint8_t *)a = UINT8_MAX;
6123 					break;
6124 				} else {
6125 					*(uint8_t *)a = 0;
6126 					a += sizeof (uint64_t);
6127 				}
6128 				if (!dtrace_vcanload(
6129 				    (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6130 					&lim, mstate, vstate))
6131 					break;
6132 
6133 				dtrace_vcopy((void *)(uintptr_t)regs[rd],
6134 				    (void *)a, &v->dtdv_type, lim);
6135 				break;
6136 			}
6137 
6138 			svar->dtsv_data = regs[rd];
6139 			break;
6140 
6141 		case DIF_OP_LDTA:
6142 			/*
6143 			 * There are no DTrace built-in thread-local arrays at
6144 			 * present.  This opcode is saved for future work.
6145 			 */
6146 			*flags |= CPU_DTRACE_ILLOP;
6147 			regs[rd] = 0;
6148 			break;
6149 
6150 		case DIF_OP_LDLS:
6151 			id = DIF_INSTR_VAR(instr);
6152 
6153 			if (id < DIF_VAR_OTHER_UBASE) {
6154 				/*
6155 				 * For now, this has no meaning.
6156 				 */
6157 				regs[rd] = 0;
6158 				break;
6159 			}
6160 
6161 			id -= DIF_VAR_OTHER_UBASE;
6162 
6163 			ASSERT(id < (uint_t)vstate->dtvs_nlocals);
6164 			ASSERT(vstate->dtvs_locals != NULL);
6165 			svar = vstate->dtvs_locals[id];
6166 			ASSERT(svar != NULL);
6167 			v = &svar->dtsv_var;
6168 
6169 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6170 				uintptr_t a = (uintptr_t)svar->dtsv_data;
6171 				size_t sz = v->dtdv_type.dtdt_size;
6172 
6173 				sz += sizeof (uint64_t);
6174 				ASSERT(svar->dtsv_size == (int)NCPU * sz);
6175 				a += CPU->cpu_id * sz;
6176 
6177 				if (*(uint8_t *)a == UINT8_MAX) {
6178 					/*
6179 					 * If the 0th byte is set to UINT8_MAX
6180 					 * then this is to be treated as a
6181 					 * reference to a NULL variable.
6182 					 */
6183 					regs[rd] = 0;
6184 				} else {
6185 					regs[rd] = a + sizeof (uint64_t);
6186 				}
6187 
6188 				break;
6189 			}
6190 
6191 			ASSERT(svar->dtsv_size == (int)NCPU * sizeof (uint64_t));
6192 			tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6193 			regs[rd] = tmp[CPU->cpu_id];
6194 			break;
6195 
6196 		case DIF_OP_STLS:
6197 			id = DIF_INSTR_VAR(instr);
6198 
6199 			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6200 			id -= DIF_VAR_OTHER_UBASE;
6201 			VERIFY(id < (uint_t)vstate->dtvs_nlocals);
6202 			ASSERT(vstate->dtvs_locals != NULL);
6203 			svar = vstate->dtvs_locals[id];
6204 			ASSERT(svar != NULL);
6205 			v = &svar->dtsv_var;
6206 
6207 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6208 				uintptr_t a = (uintptr_t)svar->dtsv_data;
6209 				size_t sz = v->dtdv_type.dtdt_size;
6210 				size_t lim = 0;
6211 
6212 				sz += sizeof (uint64_t);
6213 				ASSERT(svar->dtsv_size == (int)NCPU * sz);
6214 				a += CPU->cpu_id * sz;
6215 
6216 				if (regs[rd] == 0) {
6217 					*(uint8_t *)a = UINT8_MAX;
6218 					break;
6219 				} else {
6220 					*(uint8_t *)a = 0;
6221 					a += sizeof (uint64_t);
6222 				}
6223 
6224 				if (!dtrace_vcanload(
6225 				    (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6226 				    &lim, mstate, vstate))
6227 					break;
6228 
6229 				dtrace_vcopy((void *)(uintptr_t)regs[rd],
6230 				    (void *)a, &v->dtdv_type, lim);
6231 				break;
6232 			}
6233 
6234 			ASSERT(svar->dtsv_size == (int)NCPU * sizeof (uint64_t));
6235 			tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6236 			tmp[CPU->cpu_id] = regs[rd];
6237 			break;
6238 
6239 		case DIF_OP_LDTS: {
6240 			dtrace_dynvar_t *dvar;
6241 			dtrace_key_t *key;
6242 
6243 			id = DIF_INSTR_VAR(instr);
6244 			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6245 			id -= DIF_VAR_OTHER_UBASE;
6246 			v = &vstate->dtvs_tlocals[id];
6247 
6248 			key = &tupregs[DIF_DTR_NREGS];
6249 			key[0].dttk_value = (uint64_t)id;
6250 			key[0].dttk_size = 0;
6251 			DTRACE_TLS_THRKEY(key[1].dttk_value);
6252 			key[1].dttk_size = 0;
6253 
6254 			dvar = dtrace_dynvar(dstate, 2, key,
6255 			    sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
6256 			    mstate, vstate);
6257 
6258 			if (dvar == NULL) {
6259 				regs[rd] = 0;
6260 				break;
6261 			}
6262 
6263 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6264 				regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6265 			} else {
6266 				regs[rd] = *((uint64_t *)dvar->dtdv_data);
6267 			}
6268 
6269 			break;
6270 		}
6271 
6272 		case DIF_OP_STTS: {
6273 			dtrace_dynvar_t *dvar;
6274 			dtrace_key_t *key;
6275 
6276 			id = DIF_INSTR_VAR(instr);
6277 			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6278 			id -= DIF_VAR_OTHER_UBASE;
6279 			VERIFY(id < (uint_t)vstate->dtvs_ntlocals);
6280 
6281 			key = &tupregs[DIF_DTR_NREGS];
6282 			key[0].dttk_value = (uint64_t)id;
6283 			key[0].dttk_size = 0;
6284 			DTRACE_TLS_THRKEY(key[1].dttk_value);
6285 			key[1].dttk_size = 0;
6286 			v = &vstate->dtvs_tlocals[id];
6287 
6288 			dvar = dtrace_dynvar(dstate, 2, key,
6289 			    v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6290 			    v->dtdv_type.dtdt_size : sizeof (uint64_t),
6291 			    regs[rd] ? DTRACE_DYNVAR_ALLOC :
6292 			    DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6293 
6294 			/*
6295 			 * Given that we're storing to thread-local data,
6296 			 * we need to flush our predicate cache.
6297 			 */
6298 			dtrace_set_thread_predcache(current_thread(), 0);
6299 
6300 			if (dvar == NULL)
6301 				break;
6302 
6303 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6304 				size_t lim = 0;
6305 
6306 				if (!dtrace_vcanload(
6307 				    (void *)(uintptr_t)regs[rd],
6308 				    &v->dtdv_type, &lim, mstate, vstate))
6309 					break;
6310 
6311 				dtrace_vcopy((void *)(uintptr_t)regs[rd],
6312 				    dvar->dtdv_data, &v->dtdv_type, lim);
6313 			} else {
6314 				*((uint64_t *)dvar->dtdv_data) = regs[rd];
6315 			}
6316 
6317 			break;
6318 		}
6319 
6320 		case DIF_OP_SRA:
6321 			regs[rd] = (int64_t)regs[r1] >> regs[r2];
6322 			break;
6323 
6324 		case DIF_OP_CALL:
6325 			dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
6326 			    regs, tupregs, ttop, mstate, state);
6327 			break;
6328 
6329 		case DIF_OP_PUSHTR:
6330 			if (ttop == DIF_DTR_NREGS) {
6331 				*flags |= CPU_DTRACE_TUPOFLOW;
6332 				break;
6333 			}
6334 
6335 			if (r1 == DIF_TYPE_STRING) {
6336 				/*
6337 				 * If this is a string type and the size is 0,
6338 				 * we'll use the system-wide default string
6339 				 * size.  Note that we are _not_ looking at
6340 				 * the value of the DTRACEOPT_STRSIZE option;
6341 				 * had this been set, we would expect to have
6342 				 * a non-zero size value in the "pushtr".
6343 				 */
6344 				tupregs[ttop].dttk_size =
6345 				    dtrace_strlen((char *)(uintptr_t)regs[rd],
6346 				    regs[r2] ? regs[r2] :
6347 				    dtrace_strsize_default) + 1;
6348 			} else {
6349 				if (regs[r2] > LONG_MAX) {
6350 					*flags |= CPU_DTRACE_ILLOP;
6351 					break;
6352 				}
6353 				tupregs[ttop].dttk_size = regs[r2];
6354 			}
6355 
6356 			tupregs[ttop++].dttk_value = regs[rd];
6357 			break;
6358 
6359 		case DIF_OP_PUSHTV:
6360 			if (ttop == DIF_DTR_NREGS) {
6361 				*flags |= CPU_DTRACE_TUPOFLOW;
6362 				break;
6363 			}
6364 
6365 			tupregs[ttop].dttk_value = regs[rd];
6366 			tupregs[ttop++].dttk_size = 0;
6367 			break;
6368 
6369 		case DIF_OP_POPTS:
6370 			if (ttop != 0)
6371 				ttop--;
6372 			break;
6373 
6374 		case DIF_OP_FLUSHTS:
6375 			ttop = 0;
6376 			break;
6377 
6378 		case DIF_OP_LDGAA:
6379 		case DIF_OP_LDTAA: {
6380 			dtrace_dynvar_t *dvar;
6381 			dtrace_key_t *key = tupregs;
6382 			uint_t nkeys = ttop;
6383 
6384 			id = DIF_INSTR_VAR(instr);
6385 			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6386 			id -= DIF_VAR_OTHER_UBASE;
6387 
6388 			key[nkeys].dttk_value = (uint64_t)id;
6389 			key[nkeys++].dttk_size = 0;
6390 
6391 			if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
6392 				DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6393 				key[nkeys++].dttk_size = 0;
6394 				VERIFY(id < (uint_t)vstate->dtvs_ntlocals);
6395 				v = &vstate->dtvs_tlocals[id];
6396 			} else {
6397 				VERIFY(id < (uint_t)vstate->dtvs_nglobals);
6398 				v = &vstate->dtvs_globals[id]->dtsv_var;
6399 			}
6400 
6401 			dvar = dtrace_dynvar(dstate, nkeys, key,
6402 			    v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6403 			    v->dtdv_type.dtdt_size : sizeof (uint64_t),
6404 			    DTRACE_DYNVAR_NOALLOC, mstate, vstate);
6405 
6406 			if (dvar == NULL) {
6407 				regs[rd] = 0;
6408 				break;
6409 			}
6410 
6411 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6412 				regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6413 			} else {
6414 				regs[rd] = *((uint64_t *)dvar->dtdv_data);
6415 			}
6416 
6417 			break;
6418 		}
6419 
6420 		case DIF_OP_STGAA:
6421 		case DIF_OP_STTAA: {
6422 			dtrace_dynvar_t *dvar;
6423 			dtrace_key_t *key = tupregs;
6424 			uint_t nkeys = ttop;
6425 
6426 			id = DIF_INSTR_VAR(instr);
6427 			ASSERT(id >= DIF_VAR_OTHER_UBASE);
6428 			id -= DIF_VAR_OTHER_UBASE;
6429 
6430 			key[nkeys].dttk_value = (uint64_t)id;
6431 			key[nkeys++].dttk_size = 0;
6432 
6433 			if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
6434 				DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6435 				key[nkeys++].dttk_size = 0;
6436 				VERIFY(id < (uint_t)vstate->dtvs_ntlocals);
6437 				v = &vstate->dtvs_tlocals[id];
6438 			} else {
6439 				VERIFY(id < (uint_t)vstate->dtvs_nglobals);
6440 				v = &vstate->dtvs_globals[id]->dtsv_var;
6441 			}
6442 
6443 			dvar = dtrace_dynvar(dstate, nkeys, key,
6444 			    v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6445 			    v->dtdv_type.dtdt_size : sizeof (uint64_t),
6446 			    regs[rd] ? DTRACE_DYNVAR_ALLOC :
6447 			    DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6448 
6449 			if (dvar == NULL)
6450 				break;
6451 
6452 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6453 				size_t lim = 0;
6454 
6455 				if (!dtrace_vcanload(
6456 				    (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6457 				    &lim, mstate, vstate))
6458 					break;
6459 
6460 				dtrace_vcopy((void *)(uintptr_t)regs[rd],
6461 				    dvar->dtdv_data, &v->dtdv_type, lim);
6462 			} else {
6463 				*((uint64_t *)dvar->dtdv_data) = regs[rd];
6464 			}
6465 
6466 			break;
6467 		}
6468 
6469 		case DIF_OP_ALLOCS: {
6470 			uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6471 			size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
6472 
6473 			/*
6474 			 * Rounding up the user allocation size could have
6475 			 * overflowed large, bogus allocations (like -1ULL) to
6476 			 * 0.
6477 			 */
6478 			if (size < regs[r1] ||
6479 			    !DTRACE_INSCRATCH(mstate, size)) {
6480 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6481 				regs[rd] = 0;
6482 				break;
6483 			}
6484 
6485 			dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
6486 				mstate->dtms_scratch_ptr += size;
6487 				regs[rd] = ptr;
6488 			break;
6489 		}
6490 
6491 		case DIF_OP_COPYS:
6492 			if (!dtrace_canstore(regs[rd], regs[r2],
6493 			    mstate, vstate)) {
6494 				*flags |= CPU_DTRACE_BADADDR;
6495 				*illval = regs[rd];
6496 				break;
6497 			}
6498 
6499 			if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
6500 				break;
6501 
6502 			dtrace_bcopy((void *)(uintptr_t)regs[r1],
6503 			    (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
6504 			break;
6505 
6506 		case DIF_OP_STB:
6507 			if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
6508 				*flags |= CPU_DTRACE_BADADDR;
6509 				*illval = regs[rd];
6510 				break;
6511 			}
6512 			*((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
6513 			break;
6514 
6515 		case DIF_OP_STH:
6516 			if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
6517 				*flags |= CPU_DTRACE_BADADDR;
6518 				*illval = regs[rd];
6519 				break;
6520 			}
6521 			if (regs[rd] & 1) {
6522 				*flags |= CPU_DTRACE_BADALIGN;
6523 				*illval = regs[rd];
6524 				break;
6525 			}
6526 			*((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
6527 			break;
6528 
6529 		case DIF_OP_STW:
6530 			if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
6531 				*flags |= CPU_DTRACE_BADADDR;
6532 				*illval = regs[rd];
6533 				break;
6534 			}
6535 			if (regs[rd] & 3) {
6536 				*flags |= CPU_DTRACE_BADALIGN;
6537 				*illval = regs[rd];
6538 				break;
6539 			}
6540 			*((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
6541 			break;
6542 
6543 		case DIF_OP_STX:
6544 			if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
6545 				*flags |= CPU_DTRACE_BADADDR;
6546 				*illval = regs[rd];
6547 				break;
6548 			}
6549 
6550 			/*
6551 			* Darwin kmem_zalloc() called from
6552 			* dtrace_difo_init() is 4-byte aligned.
6553 			*/
6554 			if (regs[rd] & 3) {
6555 				*flags |= CPU_DTRACE_BADALIGN;
6556 				*illval = regs[rd];
6557 				break;
6558 			}
6559 			*((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
6560 			break;
6561 		case DIF_OP_STRIP:
6562 			regs[rd] = (uint64_t)dtrace_ptrauth_strip(
6563 			    (void*)regs[r1], r2);
6564 			break;
6565 		}
6566 	}
6567 
6568 	if (!(*flags & CPU_DTRACE_FAULT))
6569 		return (rval);
6570 
6571 	mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
6572 	mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
6573 
6574 	return (0);
6575 }
6576 
6577 __attribute__((noinline))
6578 static void
dtrace_action_breakpoint(dtrace_ecb_t * ecb)6579 dtrace_action_breakpoint(dtrace_ecb_t *ecb)
6580 {
6581 	dtrace_probe_t *probe = ecb->dte_probe;
6582 	dtrace_provider_t *prov = probe->dtpr_provider;
6583 	char c[DTRACE_FULLNAMELEN + 80], *str;
6584 	const char *msg = "dtrace: breakpoint action at probe ";
6585 	const char *ecbmsg = " (ecb ";
6586 	uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
6587 	uintptr_t val = (uintptr_t)ecb;
6588 	int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
6589 
6590 	if (dtrace_destructive_disallow)
6591 		return;
6592 
6593 	/*
6594 	 * It's impossible to be taking action on the NULL probe.
6595 	 */
6596 	ASSERT(probe != NULL);
6597 
6598 	/*
6599 	 * This is a poor man's (destitute man's?) sprintf():  we want to
6600 	 * print the provider name, module name, function name and name of
6601 	 * the probe, along with the hex address of the ECB with the breakpoint
6602 	 * action -- all of which we must place in the character buffer by
6603 	 * hand.
6604 	 */
6605 	while (*msg != '\0')
6606 		c[i++] = *msg++;
6607 
6608 	for (str = prov->dtpv_name; *str != '\0'; str++)
6609 		c[i++] = *str;
6610 	c[i++] = ':';
6611 
6612 	for (str = probe->dtpr_mod; *str != '\0'; str++)
6613 		c[i++] = *str;
6614 	c[i++] = ':';
6615 
6616 	for (str = probe->dtpr_func; *str != '\0'; str++)
6617 		c[i++] = *str;
6618 	c[i++] = ':';
6619 
6620 	for (str = probe->dtpr_name; *str != '\0'; str++)
6621 		c[i++] = *str;
6622 
6623 	while (*ecbmsg != '\0')
6624 		c[i++] = *ecbmsg++;
6625 
6626 	while (shift >= 0) {
6627 		mask = (uintptr_t)0xf << shift;
6628 
6629 		if (val >= ((uintptr_t)1 << shift))
6630 			c[i++] = "0123456789abcdef"[(val & mask) >> shift];
6631 		shift -= 4;
6632 	}
6633 
6634 	c[i++] = ')';
6635 	c[i] = '\0';
6636 
6637 	debug_enter(c);
6638 }
6639 
6640 __attribute__((noinline))
6641 static void
dtrace_action_panic(dtrace_ecb_t * ecb)6642 dtrace_action_panic(dtrace_ecb_t *ecb)
6643 {
6644 	dtrace_probe_t *probe = ecb->dte_probe;
6645 
6646 	/*
6647 	 * It's impossible to be taking action on the NULL probe.
6648 	 */
6649 	ASSERT(probe != NULL);
6650 
6651 	if (dtrace_destructive_disallow)
6652 		return;
6653 
6654 	if (dtrace_panicked != NULL)
6655 		return;
6656 
6657 	if (dtrace_casptr(&dtrace_panicked, NULL, current_thread()) != NULL)
6658 		return;
6659 
6660 	/*
6661 	 * We won the right to panic.  (We want to be sure that only one
6662 	 * thread calls panic() from dtrace_probe(), and that panic() is
6663 	 * called exactly once.)
6664 	 */
6665 	panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
6666 	    probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
6667 	    probe->dtpr_func, probe->dtpr_name, (void *)ecb);
6668 
6669 	/*
6670 	 * APPLE NOTE: this was for an old Mac OS X debug feature
6671 	 * allowing a return from panic().  Revisit someday.
6672 	 */
6673 	dtrace_panicked = NULL;
6674 }
6675 
6676 static void
dtrace_action_raise(uint64_t sig)6677 dtrace_action_raise(uint64_t sig)
6678 {
6679 	if (dtrace_destructive_disallow)
6680 		return;
6681 
6682 	if (sig >= NSIG) {
6683 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6684 		return;
6685 	}
6686 
6687 	/*
6688 	 * raise() has a queue depth of 1 -- we ignore all subsequent
6689 	 * invocations of the raise() action.
6690 	 */
6691 
6692 	uthread_t uthread = current_uthread();
6693 
6694 	if (uthread && uthread->t_dtrace_sig == 0) {
6695 		uthread->t_dtrace_sig = sig;
6696 		act_set_astbsd(current_thread());
6697 	}
6698 }
6699 
6700 static void
dtrace_action_stop(void)6701 dtrace_action_stop(void)
6702 {
6703 	if (dtrace_destructive_disallow)
6704 		return;
6705 
6706         uthread_t uthread = current_uthread();
6707 	if (uthread) {
6708 		/*
6709 		 * The currently running process will be set to task_suspend
6710 		 * when it next leaves the kernel.
6711 		*/
6712 		uthread->t_dtrace_stop = 1;
6713 		act_set_astbsd(current_thread());
6714 	}
6715 }
6716 
6717 
6718 /*
6719  * APPLE NOTE: pidresume works in conjunction with the dtrace stop action.
6720  * Both activate only when the currently running process next leaves the
6721  * kernel.
6722  */
6723 static void
dtrace_action_pidresume(uint64_t pid)6724 dtrace_action_pidresume(uint64_t pid)
6725 {
6726 	if (dtrace_destructive_disallow)
6727 		return;
6728 
6729 	if (kauth_cred_issuser(kauth_cred_get()) == 0) {
6730 		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6731 		return;
6732 	}
6733         uthread_t uthread = current_uthread();
6734 
6735 	/*
6736 	 * When the currently running process leaves the kernel, it attempts to
6737 	 * task_resume the process (denoted by pid), if that pid appears to have
6738 	 * been stopped by dtrace_action_stop().
6739 	 * The currently running process has a pidresume() queue depth of 1 --
6740 	 * subsequent invocations of the pidresume() action are ignored.
6741 	 */
6742 
6743 	if (pid != 0 && uthread && uthread->t_dtrace_resumepid == 0) {
6744 		uthread->t_dtrace_resumepid = pid;
6745 		act_set_astbsd(current_thread());
6746 	}
6747 }
6748 
6749 __attribute__((noinline))
6750 static void
dtrace_action_chill(dtrace_mstate_t * mstate,hrtime_t val)6751 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
6752 {
6753 	hrtime_t now;
6754 	volatile uint16_t *flags;
6755 	dtrace_cpu_t *cpu = CPU;
6756 
6757 	if (dtrace_destructive_disallow)
6758 		return;
6759 
6760 	flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
6761 
6762 	now = dtrace_gethrtime();
6763 
6764 	if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
6765 		/*
6766 		 * We need to advance the mark to the current time.
6767 		 */
6768 		cpu->cpu_dtrace_chillmark = now;
6769 		cpu->cpu_dtrace_chilled = 0;
6770 	}
6771 
6772 	/*
6773 	 * Now check to see if the requested chill time would take us over
6774 	 * the maximum amount of time allowed in the chill interval.  (Or
6775 	 * worse, if the calculation itself induces overflow.)
6776 	 */
6777 	if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
6778 	    cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
6779 		*flags |= CPU_DTRACE_ILLOP;
6780 		return;
6781 	}
6782 
6783 	while (dtrace_gethrtime() - now < val)
6784 		continue;
6785 
6786 	/*
6787 	 * Normally, we assure that the value of the variable "timestamp" does
6788 	 * not change within an ECB.  The presence of chill() represents an
6789 	 * exception to this rule, however.
6790 	 */
6791 	mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
6792 	cpu->cpu_dtrace_chilled += val;
6793 }
6794 
6795 __attribute__((noinline))
6796 static void
dtrace_action_ustack(dtrace_mstate_t * mstate,dtrace_state_t * state,uint64_t * buf,uint64_t arg)6797 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
6798     uint64_t *buf, uint64_t arg)
6799 {
6800 	int nframes = DTRACE_USTACK_NFRAMES(arg);
6801 	int strsize = DTRACE_USTACK_STRSIZE(arg);
6802 	uint64_t *pcs = &buf[1], *fps;
6803 	char *str = (char *)&pcs[nframes];
6804 	int size, offs = 0, i, j;
6805 	uintptr_t old = mstate->dtms_scratch_ptr, saved;
6806 	uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6807 	char *sym;
6808 
6809 	/*
6810 	 * Should be taking a faster path if string space has not been
6811 	 * allocated.
6812 	 */
6813 	ASSERT(strsize != 0);
6814 
6815 	/*
6816 	 * We will first allocate some temporary space for the frame pointers.
6817 	 */
6818 	fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6819 	size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
6820 	    (nframes * sizeof (uint64_t));
6821 
6822 	if (!DTRACE_INSCRATCH(mstate, (uintptr_t)size)) {
6823 		/*
6824 		 * Not enough room for our frame pointers -- need to indicate
6825 		 * that we ran out of scratch space.
6826 		 */
6827 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6828 		return;
6829 	}
6830 
6831 	mstate->dtms_scratch_ptr += size;
6832 	saved = mstate->dtms_scratch_ptr;
6833 
6834 	/*
6835 	 * Now get a stack with both program counters and frame pointers.
6836 	 */
6837 	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6838 	dtrace_getufpstack(buf, fps, nframes + 1);
6839 	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6840 
6841 	/*
6842 	 * If that faulted, we're cooked.
6843 	 */
6844 	if (*flags & CPU_DTRACE_FAULT)
6845 		goto out;
6846 
6847 	/*
6848 	 * Now we want to walk up the stack, calling the USTACK helper.  For
6849 	 * each iteration, we restore the scratch pointer.
6850 	 */
6851 	for (i = 0; i < nframes; i++) {
6852 		mstate->dtms_scratch_ptr = saved;
6853 
6854 		if (offs >= strsize)
6855 			break;
6856 
6857 		sym = (char *)(uintptr_t)dtrace_helper(
6858 		    DTRACE_HELPER_ACTION_USTACK,
6859 		    mstate, state, pcs[i], fps[i]);
6860 
6861 		/*
6862 		 * If we faulted while running the helper, we're going to
6863 		 * clear the fault and null out the corresponding string.
6864 		 */
6865 		if (*flags & CPU_DTRACE_FAULT) {
6866 			*flags &= ~CPU_DTRACE_FAULT;
6867 			str[offs++] = '\0';
6868 			continue;
6869 		}
6870 
6871 		if (sym == NULL) {
6872 			str[offs++] = '\0';
6873 			continue;
6874 		}
6875 
6876 		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6877 
6878 		/*
6879 		 * Now copy in the string that the helper returned to us.
6880 		 */
6881 		for (j = 0; offs + j < strsize; j++) {
6882 			if ((str[offs + j] = sym[j]) == '\0')
6883 				break;
6884 		}
6885 
6886 		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6887 
6888 		offs += j + 1;
6889 	}
6890 
6891 	if (offs >= strsize) {
6892 		/*
6893 		 * If we didn't have room for all of the strings, we don't
6894 		 * abort processing -- this needn't be a fatal error -- but we
6895 		 * still want to increment a counter (dts_stkstroverflows) to
6896 		 * allow this condition to be warned about.  (If this is from
6897 		 * a jstack() action, it is easily tuned via jstackstrsize.)
6898 		 */
6899 		dtrace_error(&state->dts_stkstroverflows);
6900 	}
6901 
6902 	while (offs < strsize)
6903 		str[offs++] = '\0';
6904 
6905 out:
6906 	mstate->dtms_scratch_ptr = old;
6907 }
6908 
6909 __attribute__((noinline))
6910 static void
dtrace_store_by_ref(dtrace_difo_t * dp,caddr_t tomax,size_t size,size_t * valoffsp,uint64_t * valp,uint64_t end,int intuple,int dtkind)6911 dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size,
6912     size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind)
6913 {
6914 	volatile uint16_t *flags;
6915 	uint64_t val = *valp;
6916 	size_t valoffs = *valoffsp;
6917 
6918 	flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
6919 	ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF);
6920 
6921 	/*
6922 	 * If this is a string, we're going to only load until we find the zero
6923 	 * byte -- after which we'll store zero bytes.
6924 	 */
6925 	if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
6926 		char c = '\0' + 1;
6927 		size_t s;
6928 
6929 		for (s = 0; s < size; s++) {
6930 			if (c != '\0' && dtkind == DIF_TF_BYREF) {
6931 				c = dtrace_load8(val++);
6932 			} else if (c != '\0' && dtkind == DIF_TF_BYUREF) {
6933 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6934 				c = dtrace_fuword8((user_addr_t)(uintptr_t)val++);
6935 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6936 				if (*flags & CPU_DTRACE_FAULT)
6937 					break;
6938 			}
6939 
6940 			DTRACE_STORE(uint8_t, tomax, valoffs++, c);
6941 
6942 			if (c == '\0' && intuple)
6943 				break;
6944 		}
6945 	} else {
6946 		uint8_t c;
6947 		while (valoffs < end) {
6948 			if (dtkind == DIF_TF_BYREF) {
6949 				c = dtrace_load8(val++);
6950 			} else if (dtkind == DIF_TF_BYUREF) {
6951 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6952 				c = dtrace_fuword8((user_addr_t)(uintptr_t)val++);
6953 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6954 				if (*flags & CPU_DTRACE_FAULT)
6955 					break;
6956 			}
6957 
6958 			DTRACE_STORE(uint8_t, tomax,
6959 			    valoffs++, c);
6960 		}
6961 	}
6962 
6963 	*valp = val;
6964 	*valoffsp = valoffs;
6965 }
6966 
6967 /*
6968  * Disables interrupts and sets the per-thread inprobe flag. When DEBUG is
6969  * defined, we also assert that we are not recursing unless the probe ID is an
6970  * error probe.
6971  */
6972 static dtrace_icookie_t
dtrace_probe_enter(dtrace_id_t id)6973 dtrace_probe_enter(dtrace_id_t id)
6974 {
6975 	thread_t thread = current_thread();
6976 	uint16_t inprobe;
6977 
6978 	dtrace_icookie_t cookie;
6979 
6980 	cookie = dtrace_interrupt_disable();
6981 
6982 	/*
6983 	 * Unless this is an ERROR probe, we are not allowed to recurse in
6984 	 * dtrace_probe(). Recursing into DTrace probe usually means that a
6985 	 * function is instrumented that should not have been instrumented or
6986 	 * that the ordering guarantee of the records will be violated,
6987 	 * resulting in unexpected output. If there is an exception to this
6988 	 * assertion, a new case should be added.
6989 	 */
6990 	inprobe = dtrace_get_thread_inprobe(thread);
6991 	VERIFY(inprobe == 0 ||
6992 	    id == dtrace_probeid_error);
6993 	ASSERT(inprobe < UINT16_MAX);
6994 	dtrace_set_thread_inprobe(thread, inprobe + 1);
6995 
6996 	return (cookie);
6997 }
6998 
6999 /*
7000  * Clears the per-thread inprobe flag and enables interrupts.
7001  */
7002 static void
dtrace_probe_exit(dtrace_icookie_t cookie)7003 dtrace_probe_exit(dtrace_icookie_t cookie)
7004 {
7005 	thread_t thread = current_thread();
7006 	uint16_t inprobe = dtrace_get_thread_inprobe(thread);
7007 
7008 	ASSERT(inprobe > 0);
7009 	dtrace_set_thread_inprobe(thread, inprobe - 1);
7010 
7011 #if SCHED_HYGIENE_DEBUG
7012 	/*
7013 	 * Probes can take a relatively long time depending on what the user has
7014 	 * requested be done in probe context.
7015 	 * Probes can fire from places where interrupts are already disabled
7016 	 * (like an interrupt handler) or where preemption has been disabled.
7017 	 * In order to not trip the interrupt or preemption thresholds, it is
7018 	 * important to reset timestamps when leaving probe context.
7019 	 */
7020 
7021 	/* Interrupts were disabled for the duration of this probe. */
7022 	ml_spin_debug_reset(thread);
7023 
7024 	/* May have been called from an interrupt handler. */
7025 	ml_irq_debug_abandon();
7026 
7027 	/* May have been called with preemption disabled. */
7028 	abandon_preemption_disable_measurement();
7029 
7030 #endif /* SCHED_HYGIENE_DEBUG */
7031 
7032 	dtrace_interrupt_enable(cookie);
7033 }
7034 
7035 /*
7036  * If you're looking for the epicenter of DTrace, you just found it.  This
7037  * is the function called by the provider to fire a probe -- from which all
7038  * subsequent probe-context DTrace activity emanates.
7039  */
7040 void
dtrace_probe(dtrace_id_t id,uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4)7041 dtrace_probe(dtrace_id_t id, uint64_t arg0, uint64_t arg1,
7042     uint64_t arg2, uint64_t arg3, uint64_t arg4)
7043 {
7044 	processorid_t cpuid;
7045 	dtrace_icookie_t cookie;
7046 	dtrace_probe_t *probe;
7047 	dtrace_mstate_t mstate;
7048 	dtrace_ecb_t *ecb;
7049 	dtrace_action_t *act;
7050 	intptr_t offs;
7051 	size_t size;
7052 	int vtime, onintr;
7053 	volatile uint16_t *flags;
7054 	hrtime_t now;
7055 
7056 	cookie = dtrace_probe_enter(id);
7057 
7058 	/* Ensure that probe id is valid. */
7059 	if (id - 1 >= (dtrace_id_t)dtrace_nprobes) {
7060 		dtrace_probe_exit(cookie);
7061 		return;
7062 	}
7063 
7064 	probe = dtrace_probes[id - 1];
7065 	if (probe == NULL) {
7066 		dtrace_probe_exit(cookie);
7067 		return;
7068 	}
7069 
7070 	cpuid = CPU->cpu_id;
7071 	onintr = CPU_ON_INTR(CPU);
7072 
7073 	if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
7074 	    probe->dtpr_predcache == dtrace_get_thread_predcache(current_thread())) {
7075 		/*
7076 		 * We have hit in the predicate cache; we know that
7077 		 * this predicate would evaluate to be false.
7078 		 */
7079 		dtrace_probe_exit(cookie);
7080 		return;
7081 	}
7082 
7083 	if (panic_quiesce) {
7084 		/*
7085 		 * We don't trace anything if we're panicking.
7086 		 */
7087 		dtrace_probe_exit(cookie);
7088 		return;
7089 	}
7090 
7091 #if !defined(__APPLE__)
7092 	now = dtrace_gethrtime();
7093 	vtime = dtrace_vtime_references != 0;
7094 
7095 	if (vtime && curthread->t_dtrace_start)
7096 		curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
7097 #else
7098 	/*
7099 	 * APPLE NOTE:  The time spent entering DTrace and arriving
7100 	 * to this point, is attributed to the current thread.
7101 	 * Instead it should accrue to DTrace.  FIXME
7102 	 */
7103 	vtime = dtrace_vtime_references != 0;
7104 
7105 	if (vtime)
7106 	{
7107 		int64_t dtrace_accum_time, recent_vtime;
7108 		thread_t thread = current_thread();
7109 
7110 		dtrace_accum_time = dtrace_get_thread_tracing(thread); /* Time spent inside DTrace so far (nanoseconds) */
7111 
7112 		if (dtrace_accum_time >= 0) {
7113 			recent_vtime = dtrace_abs_to_nano(dtrace_calc_thread_recent_vtime(thread)); /* up to the moment thread vtime */
7114 
7115 			recent_vtime = recent_vtime - dtrace_accum_time; /* Time without DTrace contribution */
7116 
7117 			dtrace_set_thread_vtime(thread, recent_vtime);
7118 		}
7119 	}
7120 
7121 	now = dtrace_gethrtime(); /* must not precede dtrace_calc_thread_recent_vtime() call! */
7122 #endif /* __APPLE__ */
7123 
7124 	/*
7125 	 * APPLE NOTE: A provider may call dtrace_probe_error() in lieu of
7126 	 * dtrace_probe() in some circumstances.   See, e.g. fasttrap_isa.c.
7127 	 * However the provider has no access to ECB context, so passes
7128 	 * 0 through "arg0" and the probe_id of the overridden probe as arg1.
7129 	 * Detect that here and cons up a viable state (from the probe_id).
7130 	 */
7131 	if (dtrace_probeid_error == id && 0 == arg0) {
7132 		dtrace_id_t ftp_id = (dtrace_id_t)arg1;
7133 		dtrace_probe_t *ftp_probe = dtrace_probes[ftp_id - 1];
7134 		dtrace_ecb_t *ftp_ecb = ftp_probe->dtpr_ecb;
7135 
7136 		if (NULL != ftp_ecb) {
7137 			dtrace_state_t *ftp_state = ftp_ecb->dte_state;
7138 
7139 			arg0 = (uint64_t)(uintptr_t)ftp_state;
7140 			arg1 = ftp_ecb->dte_epid;
7141 			/*
7142 			 * args[2-4] established by caller.
7143 			 */
7144 			ftp_state->dts_arg_error_illval = -1; /* arg5 */
7145 		}
7146 	}
7147 
7148 	mstate.dtms_difo = NULL;
7149 	mstate.dtms_probe = probe;
7150 	mstate.dtms_strtok = 0;
7151 	mstate.dtms_arg[0] = arg0;
7152 	mstate.dtms_arg[1] = arg1;
7153 	mstate.dtms_arg[2] = arg2;
7154 	mstate.dtms_arg[3] = arg3;
7155 	mstate.dtms_arg[4] = arg4;
7156 
7157 	flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
7158 
7159 	for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
7160 		dtrace_predicate_t *pred = ecb->dte_predicate;
7161 		dtrace_state_t *state = ecb->dte_state;
7162 		dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
7163 		dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
7164 		dtrace_vstate_t *vstate = &state->dts_vstate;
7165 		dtrace_provider_t *prov = probe->dtpr_provider;
7166 		uint64_t tracememsize = 0;
7167 		int committed = 0;
7168 		caddr_t tomax;
7169 
7170 		/*
7171 		 * A little subtlety with the following (seemingly innocuous)
7172 		 * declaration of the automatic 'val':  by looking at the
7173 		 * code, you might think that it could be declared in the
7174 		 * action processing loop, below.  (That is, it's only used in
7175 		 * the action processing loop.)  However, it must be declared
7176 		 * out of that scope because in the case of DIF expression
7177 		 * arguments to aggregating actions, one iteration of the
7178 		 * action loop will use the last iteration's value.
7179 		 */
7180 #ifdef lint
7181 		uint64_t val = 0;
7182 #else
7183 		uint64_t val = 0;
7184 #endif
7185 
7186 		mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
7187 		*flags &= ~CPU_DTRACE_ERROR;
7188 
7189 		if (prov == dtrace_provider) {
7190 			/*
7191 			 * If dtrace itself is the provider of this probe,
7192 			 * we're only going to continue processing the ECB if
7193 			 * arg0 (the dtrace_state_t) is equal to the ECB's
7194 			 * creating state.  (This prevents disjoint consumers
7195 			 * from seeing one another's metaprobes.)
7196 			 */
7197 			if (arg0 != (uint64_t)(uintptr_t)state)
7198 				continue;
7199 		}
7200 
7201 		if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
7202 			/*
7203 			 * We're not currently active.  If our provider isn't
7204 			 * the dtrace pseudo provider, we're not interested.
7205 			 */
7206 			if (prov != dtrace_provider)
7207 				continue;
7208 
7209 			/*
7210 			 * Now we must further check if we are in the BEGIN
7211 			 * probe.  If we are, we will only continue processing
7212 			 * if we're still in WARMUP -- if one BEGIN enabling
7213 			 * has invoked the exit() action, we don't want to
7214 			 * evaluate subsequent BEGIN enablings.
7215 			 */
7216 			if (probe->dtpr_id == dtrace_probeid_begin &&
7217 			    state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
7218 				ASSERT(state->dts_activity ==
7219 				    DTRACE_ACTIVITY_DRAINING);
7220 				continue;
7221 			}
7222 		}
7223 
7224 		if (ecb->dte_cond) {
7225 			/*
7226 			 * If the dte_cond bits indicate that this
7227 			 * consumer is only allowed to see user-mode firings
7228 			 * of this probe, call the provider's dtps_usermode()
7229 			 * entry point to check that the probe was fired
7230 			 * while in a user context. Skip this ECB if that's
7231 			 * not the case.
7232 			 */
7233 			if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
7234 			    prov->dtpv_pops.dtps_usermode &&
7235 			    prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
7236 			    probe->dtpr_id, probe->dtpr_arg) == 0)
7237 				continue;
7238 
7239 			/*
7240 			 * This is more subtle than it looks. We have to be
7241 			 * absolutely certain that CRED() isn't going to
7242 			 * change out from under us so it's only legit to
7243 			 * examine that structure if we're in constrained
7244 			 * situations. Currently, the only times we'll this
7245 			 * check is if a non-super-user has enabled the
7246 			 * profile or syscall providers -- providers that
7247 			 * allow visibility of all processes. For the
7248 			 * profile case, the check above will ensure that
7249 			 * we're examining a user context.
7250 			 */
7251 			if (ecb->dte_cond & DTRACE_COND_OWNER) {
7252 				cred_t *cr;
7253 				cred_t *s_cr =
7254 				    ecb->dte_state->dts_cred.dcr_cred;
7255 				proc_t *proc;
7256 #pragma unused(proc) /* __APPLE__ */
7257 
7258 				ASSERT(s_cr != NULL);
7259 
7260 			/*
7261 			 * XXX this is hackish, but so is setting a variable
7262 			 * XXX in a McCarthy OR...
7263 			 */
7264 				if ((cr = dtrace_CRED()) == NULL ||
7265 				    posix_cred_get(s_cr)->cr_uid != posix_cred_get(cr)->cr_uid ||
7266 				    posix_cred_get(s_cr)->cr_uid != posix_cred_get(cr)->cr_ruid ||
7267 				    posix_cred_get(s_cr)->cr_uid != posix_cred_get(cr)->cr_suid ||
7268 				    posix_cred_get(s_cr)->cr_gid != posix_cred_get(cr)->cr_gid ||
7269 				    posix_cred_get(s_cr)->cr_gid != posix_cred_get(cr)->cr_rgid ||
7270 				    posix_cred_get(s_cr)->cr_gid != posix_cred_get(cr)->cr_sgid ||
7271 #if !defined(__APPLE__)
7272 				    (proc = ttoproc(curthread)) == NULL ||
7273 				    (proc->p_flag & SNOCD))
7274 #else
7275 					1) /* APPLE NOTE: Darwin omits "No Core Dump" flag */
7276 #endif /* __APPLE__ */
7277 					continue;
7278 			}
7279 
7280 			if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
7281 				cred_t *cr;
7282 				cred_t *s_cr =
7283 				    ecb->dte_state->dts_cred.dcr_cred;
7284 #pragma unused(cr, s_cr) /* __APPLE__ */
7285 
7286 				ASSERT(s_cr != NULL);
7287 
7288 #if !defined(__APPLE__)
7289 				if ((cr = CRED()) == NULL ||
7290 				    s_cr->cr_zone->zone_id !=
7291 				    cr->cr_zone->zone_id)
7292 					continue;
7293 #else
7294 				/* APPLE NOTE: Darwin doesn't do zones. */
7295 #endif /* __APPLE__ */
7296 			}
7297 		}
7298 
7299 		if (now - state->dts_alive > dtrace_deadman_timeout) {
7300 			/*
7301 			 * We seem to be dead.  Unless we (a) have kernel
7302 			 * destructive permissions (b) have expicitly enabled
7303 			 * destructive actions and (c) destructive actions have
7304 			 * not been disabled, we're going to transition into
7305 			 * the KILLED state, from which no further processing
7306 			 * on this state will be performed.
7307 			 */
7308 			if (!dtrace_priv_kernel_destructive(state) ||
7309 			    !state->dts_cred.dcr_destructive ||
7310 			    dtrace_destructive_disallow) {
7311 				void *activity = &state->dts_activity;
7312 				dtrace_activity_t current;
7313 
7314 				do {
7315 					current = state->dts_activity;
7316 				} while (dtrace_cas32(activity, current,
7317 				    DTRACE_ACTIVITY_KILLED) != current);
7318 
7319 				continue;
7320 			}
7321 		}
7322 
7323 		if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
7324 		    ecb->dte_alignment, state, &mstate)) < 0)
7325 			continue;
7326 
7327 		tomax = buf->dtb_tomax;
7328 		ASSERT(tomax != NULL);
7329 
7330 		/*
7331 		 * Build and store the record header corresponding to the ECB.
7332 		 */
7333 		if (ecb->dte_size != 0) {
7334 			dtrace_rechdr_t dtrh;
7335 
7336 			if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
7337 				mstate.dtms_timestamp = dtrace_gethrtime();
7338 				mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
7339 			}
7340 
7341 			ASSERT(ecb->dte_size >= sizeof(dtrace_rechdr_t));
7342 
7343 			dtrh.dtrh_epid = ecb->dte_epid;
7344 			DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, mstate.dtms_timestamp);
7345 			DTRACE_STORE(dtrace_rechdr_t, tomax, offs, dtrh);
7346 		}
7347 
7348 		mstate.dtms_epid = ecb->dte_epid;
7349 		mstate.dtms_present |= DTRACE_MSTATE_EPID;
7350 
7351 		if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
7352 			mstate.dtms_access = DTRACE_ACCESS_KERNEL;
7353 		else
7354 			mstate.dtms_access = 0;
7355 
7356 		if (pred != NULL) {
7357 			dtrace_difo_t *dp = pred->dtp_difo;
7358 			uint64_t rval;
7359 
7360 			rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
7361 
7362 			if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
7363 				dtrace_cacheid_t cid = probe->dtpr_predcache;
7364 
7365 				if (cid != DTRACE_CACHEIDNONE && !onintr) {
7366 					/*
7367 					 * Update the predicate cache...
7368 					 */
7369 					ASSERT(cid == pred->dtp_cacheid);
7370 
7371 					dtrace_set_thread_predcache(current_thread(), cid);
7372 				}
7373 
7374 				continue;
7375 			}
7376 		}
7377 
7378 		for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
7379 		    act != NULL; act = act->dta_next) {
7380 			size_t valoffs;
7381 			dtrace_difo_t *dp;
7382 			dtrace_recdesc_t *rec = &act->dta_rec;
7383 
7384 			size = rec->dtrd_size;
7385 			valoffs = offs + rec->dtrd_offset;
7386 
7387 			if (DTRACEACT_ISAGG(act->dta_kind)) {
7388 				uint64_t v = 0xbad;
7389 				dtrace_aggregation_t *agg;
7390 
7391 				agg = (dtrace_aggregation_t *)act;
7392 
7393 				if ((dp = act->dta_difo) != NULL)
7394 					v = dtrace_dif_emulate(dp,
7395 					    &mstate, vstate, state);
7396 
7397 				if (*flags & CPU_DTRACE_ERROR)
7398 					continue;
7399 
7400 				/*
7401 				 * Note that we always pass the expression
7402 				 * value from the previous iteration of the
7403 				 * action loop.  This value will only be used
7404 				 * if there is an expression argument to the
7405 				 * aggregating action, denoted by the
7406 				 * dtag_hasarg field.
7407 				 */
7408 				dtrace_aggregate(agg, buf,
7409 				    offs, aggbuf, v, val);
7410 				continue;
7411 			}
7412 
7413 			switch (act->dta_kind) {
7414 			case DTRACEACT_STOP:
7415 				if (dtrace_priv_proc_destructive(state))
7416 					dtrace_action_stop();
7417 				continue;
7418 
7419 			case DTRACEACT_BREAKPOINT:
7420 				if (dtrace_priv_kernel_destructive(state))
7421 					dtrace_action_breakpoint(ecb);
7422 				continue;
7423 
7424 			case DTRACEACT_PANIC:
7425 				if (dtrace_priv_kernel_destructive(state))
7426 					dtrace_action_panic(ecb);
7427 				continue;
7428 
7429 			case DTRACEACT_STACK:
7430 				if (!dtrace_priv_kernel(state))
7431 					continue;
7432 
7433 				dtrace_getpcstack((pc_t *)(tomax + valoffs),
7434 				    size / sizeof (pc_t), probe->dtpr_aframes,
7435 				    DTRACE_ANCHORED(probe) ? NULL :
7436 				  (uint32_t *)(uintptr_t)arg0);
7437 				continue;
7438 
7439 			case DTRACEACT_JSTACK:
7440 			case DTRACEACT_USTACK:
7441 				if (!dtrace_priv_proc(state))
7442 					continue;
7443 
7444 				/*
7445 				 * See comment in DIF_VAR_PID.
7446 				 */
7447 				if (DTRACE_ANCHORED(mstate.dtms_probe) &&
7448 				    CPU_ON_INTR(CPU)) {
7449 					int depth = DTRACE_USTACK_NFRAMES(
7450 					    rec->dtrd_arg) + 1;
7451 
7452 					dtrace_bzero((void *)(tomax + valoffs),
7453 					    DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
7454 					    + depth * sizeof (uint64_t));
7455 
7456 					continue;
7457 				}
7458 
7459 				if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
7460 				    curproc->p_dtrace_helpers != NULL) {
7461 					/*
7462 					 * This is the slow path -- we have
7463 					 * allocated string space, and we're
7464 					 * getting the stack of a process that
7465 					 * has helpers.  Call into a separate
7466 					 * routine to perform this processing.
7467 					 */
7468 					dtrace_action_ustack(&mstate, state,
7469 					    (uint64_t *)(tomax + valoffs),
7470 					    rec->dtrd_arg);
7471 					continue;
7472 				}
7473 
7474 				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7475 				dtrace_getupcstack((uint64_t *)
7476 				    (tomax + valoffs),
7477 				    DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
7478 				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7479 				continue;
7480 
7481 			default:
7482 				break;
7483 			}
7484 
7485 			dp = act->dta_difo;
7486 			ASSERT(dp != NULL);
7487 
7488 			val = dtrace_dif_emulate(dp, &mstate, vstate, state);
7489 
7490 			if (*flags & CPU_DTRACE_ERROR)
7491 				continue;
7492 
7493 			switch (act->dta_kind) {
7494 			case DTRACEACT_SPECULATE: {
7495 				dtrace_rechdr_t *dtrh = NULL;
7496 
7497 				ASSERT(buf == &state->dts_buffer[cpuid]);
7498 				buf = dtrace_speculation_buffer(state,
7499 				    cpuid, val);
7500 
7501 				if (buf == NULL) {
7502 					*flags |= CPU_DTRACE_DROP;
7503 					continue;
7504 				}
7505 
7506 				offs = dtrace_buffer_reserve(buf,
7507 				    ecb->dte_needed, ecb->dte_alignment,
7508 				    state, NULL);
7509 
7510 				if (offs < 0) {
7511 					*flags |= CPU_DTRACE_DROP;
7512 					continue;
7513 				}
7514 
7515 				tomax = buf->dtb_tomax;
7516 				ASSERT(tomax != NULL);
7517 
7518 				if (ecb->dte_size == 0)
7519 					continue;
7520 
7521 				ASSERT(ecb->dte_size >= sizeof(dtrace_rechdr_t));
7522 				dtrh = ((void *)(tomax + offs));
7523 				dtrh->dtrh_epid = ecb->dte_epid;
7524 
7525 				/*
7526 				 * When the speculation is committed, all of
7527 				 * the records in the speculative buffer will
7528 				 * have their timestamps set to the commit
7529 				 * time.  Until then, it is set to a sentinel
7530 				 * value, for debugability.
7531 				 */
7532 				DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX);
7533 
7534  				continue;
7535 			}
7536 
7537 			case DTRACEACT_CHILL:
7538 				if (dtrace_priv_kernel_destructive(state))
7539 					dtrace_action_chill(&mstate, val);
7540 				continue;
7541 
7542 			case DTRACEACT_RAISE:
7543 				if (dtrace_priv_proc_destructive(state))
7544 					dtrace_action_raise(val);
7545 				continue;
7546 
7547 			case DTRACEACT_PIDRESUME:   /* __APPLE__ */
7548 				if (dtrace_priv_proc_destructive(state))
7549 					dtrace_action_pidresume(val);
7550 				continue;
7551 
7552 			case DTRACEACT_COMMIT:
7553 				ASSERT(!committed);
7554 
7555 				/*
7556 				 * We need to commit our buffer state.
7557 				 */
7558 				if (ecb->dte_size)
7559 					buf->dtb_offset = offs + ecb->dte_size;
7560 				buf = &state->dts_buffer[cpuid];
7561 				dtrace_speculation_commit(state, cpuid, val);
7562 				committed = 1;
7563 				continue;
7564 
7565 			case DTRACEACT_DISCARD:
7566 				dtrace_speculation_discard(state, cpuid, val);
7567 				continue;
7568 
7569 			case DTRACEACT_DIFEXPR:
7570 			case DTRACEACT_LIBACT:
7571 			case DTRACEACT_PRINTF:
7572 			case DTRACEACT_PRINTA:
7573 			case DTRACEACT_SYSTEM:
7574 			case DTRACEACT_FREOPEN:
7575 			case DTRACEACT_APPLEBINARY:   /* __APPLE__ */
7576 			case DTRACEACT_TRACEMEM:
7577 				break;
7578 
7579 			case DTRACEACT_TRACEMEM_DYNSIZE:
7580 				tracememsize = val;
7581 				break;
7582 
7583 			case DTRACEACT_SYM:
7584 			case DTRACEACT_MOD:
7585 				if (!dtrace_priv_kernel(state))
7586 					continue;
7587 				break;
7588 
7589 			case DTRACEACT_USYM:
7590 			case DTRACEACT_UMOD:
7591 			case DTRACEACT_UADDR: {
7592 				if (!dtrace_priv_proc(state))
7593 					continue;
7594 
7595 				DTRACE_STORE(uint64_t, tomax,
7596 				    valoffs, (uint64_t)dtrace_proc_selfpid());
7597 				DTRACE_STORE(uint64_t, tomax,
7598 				    valoffs + sizeof (uint64_t), val);
7599 
7600 				continue;
7601 			}
7602 
7603 			case DTRACEACT_EXIT: {
7604 				/*
7605 				 * For the exit action, we are going to attempt
7606 				 * to atomically set our activity to be
7607 				 * draining.  If this fails (either because
7608 				 * another CPU has beat us to the exit action,
7609 				 * or because our current activity is something
7610 				 * other than ACTIVE or WARMUP), we will
7611 				 * continue.  This assures that the exit action
7612 				 * can be successfully recorded at most once
7613 				 * when we're in the ACTIVE state.  If we're
7614 				 * encountering the exit() action while in
7615 				 * COOLDOWN, however, we want to honor the new
7616 				 * status code.  (We know that we're the only
7617 				 * thread in COOLDOWN, so there is no race.)
7618 				 */
7619 				void *activity = &state->dts_activity;
7620 				dtrace_activity_t current = state->dts_activity;
7621 
7622 				if (current == DTRACE_ACTIVITY_COOLDOWN)
7623 					break;
7624 
7625 				if (current != DTRACE_ACTIVITY_WARMUP)
7626 					current = DTRACE_ACTIVITY_ACTIVE;
7627 
7628 				if (dtrace_cas32(activity, current,
7629 				    DTRACE_ACTIVITY_DRAINING) != current) {
7630 					*flags |= CPU_DTRACE_DROP;
7631 					continue;
7632 				}
7633 
7634 				break;
7635 			}
7636 
7637 			default:
7638 				ASSERT(0);
7639 			}
7640 
7641 			if (dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF)) {
7642 				uintptr_t end = valoffs + size;
7643 
7644 				if (tracememsize != 0 &&
7645                                     valoffs + tracememsize < end)
7646 				{
7647                                         end = valoffs + tracememsize;
7648                                         tracememsize = 0;
7649                                 }
7650 
7651 				if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF &&
7652 				    !dtrace_vcanload((void *)(uintptr_t)val,
7653 				    &dp->dtdo_rtype, NULL, &mstate, vstate))
7654 				{
7655 					continue;
7656 				}
7657 
7658 				dtrace_store_by_ref(dp, tomax, size, &valoffs,
7659 				    &val, end, act->dta_intuple,
7660 				    dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ?
7661 				    DIF_TF_BYREF: DIF_TF_BYUREF);
7662 
7663 				continue;
7664 			}
7665 
7666 			switch (size) {
7667 			case 0:
7668 				break;
7669 
7670 			case sizeof (uint8_t):
7671 				DTRACE_STORE(uint8_t, tomax, valoffs, val);
7672 				break;
7673 			case sizeof (uint16_t):
7674 				DTRACE_STORE(uint16_t, tomax, valoffs, val);
7675 				break;
7676 			case sizeof (uint32_t):
7677 				DTRACE_STORE(uint32_t, tomax, valoffs, val);
7678 				break;
7679 			case sizeof (uint64_t):
7680 				DTRACE_STORE(uint64_t, tomax, valoffs, val);
7681 				break;
7682 			default:
7683 				/*
7684 				 * Any other size should have been returned by
7685 				 * reference, not by value.
7686 				 */
7687 				ASSERT(0);
7688 				break;
7689 			}
7690 		}
7691 
7692 		if (*flags & CPU_DTRACE_DROP)
7693 			continue;
7694 
7695 		if (*flags & CPU_DTRACE_FAULT) {
7696 			int ndx;
7697 			dtrace_action_t *err;
7698 
7699 			buf->dtb_errors++;
7700 
7701 			if (probe->dtpr_id == dtrace_probeid_error) {
7702 				/*
7703 				 * There's nothing we can do -- we had an
7704 				 * error on the error probe.  We bump an
7705 				 * error counter to at least indicate that
7706 				 * this condition happened.
7707 				 */
7708 				dtrace_error(&state->dts_dblerrors);
7709 				continue;
7710 			}
7711 
7712 			if (vtime) {
7713 				/*
7714 				 * Before recursing on dtrace_probe(), we
7715 				 * need to explicitly clear out our start
7716 				 * time to prevent it from being accumulated
7717 				 * into t_dtrace_vtime.
7718 				 */
7719 
7720 				/*
7721 				 * Darwin sets the sign bit on t_dtrace_tracing
7722 				 * to suspend accumulation to it.
7723 				 */
7724 				dtrace_set_thread_tracing(current_thread(),
7725 				    (1ULL<<63) | dtrace_get_thread_tracing(current_thread()));
7726 			}
7727 
7728 			/*
7729 			 * Iterate over the actions to figure out which action
7730 			 * we were processing when we experienced the error.
7731 			 * Note that act points _past_ the faulting action; if
7732 			 * act is ecb->dte_action, the fault was in the
7733 			 * predicate, if it's ecb->dte_action->dta_next it's
7734 			 * in action #1, and so on.
7735 			 */
7736 			for (err = ecb->dte_action, ndx = 0;
7737 			    err != act; err = err->dta_next, ndx++)
7738 				continue;
7739 
7740 			dtrace_probe_error(state, ecb->dte_epid, ndx,
7741 			    (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
7742 			    mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
7743 			    cpu_core[cpuid].cpuc_dtrace_illval);
7744 
7745 			continue;
7746 		}
7747 
7748 		if (!committed)
7749 			buf->dtb_offset = offs + ecb->dte_size;
7750 	}
7751 
7752 	/* FIXME: On Darwin the time spent leaving DTrace from this point to the rti is attributed
7753 	   to the current thread. Instead it should accrue to DTrace. */
7754 	if (vtime) {
7755 		thread_t thread = current_thread();
7756 		int64_t t = dtrace_get_thread_tracing(thread);
7757 
7758 		if (t >= 0) {
7759 			/* Usual case, accumulate time spent here into t_dtrace_tracing */
7760 			dtrace_set_thread_tracing(thread, t + (dtrace_gethrtime() - now));
7761 		} else {
7762 			/* Return from error recursion. No accumulation, just clear the sign bit on t_dtrace_tracing. */
7763 			dtrace_set_thread_tracing(thread, (~(1ULL<<63)) & t);
7764 		}
7765 	}
7766 
7767 	dtrace_probe_exit(cookie);
7768 }
7769 
7770 /*
7771  * DTrace Probe Hashing Functions
7772  *
7773  * The functions in this section (and indeed, the functions in remaining
7774  * sections) are not _called_ from probe context.  (Any exceptions to this are
7775  * marked with a "Note:".)  Rather, they are called from elsewhere in the
7776  * DTrace framework to look-up probes in, add probes to and remove probes from
7777  * the DTrace probe hashes.  (Each probe is hashed by each element of the
7778  * probe tuple -- allowing for fast lookups, regardless of what was
7779  * specified.)
7780  */
7781 static uint_t
dtrace_hash_str(const char * p)7782 dtrace_hash_str(const char *p)
7783 {
7784 	unsigned int g;
7785 	uint_t hval = 0;
7786 
7787 	while (*p) {
7788 		hval = (hval << 4) + *p++;
7789 		if ((g = (hval & 0xf0000000)) != 0)
7790 			hval ^= g >> 24;
7791 		hval &= ~g;
7792 	}
7793 	return (hval);
7794 }
7795 
7796 static const char*
dtrace_strkey_probe_provider(void * elm,uintptr_t offs)7797 dtrace_strkey_probe_provider(void *elm, uintptr_t offs)
7798 {
7799 #pragma unused(offs)
7800 	dtrace_probe_t *probe = (dtrace_probe_t*)elm;
7801 	return probe->dtpr_provider->dtpv_name;
7802 }
7803 
7804 static const char*
dtrace_strkey_offset(void * elm,uintptr_t offs)7805 dtrace_strkey_offset(void *elm, uintptr_t offs)
7806 {
7807 	return ((char *)((uintptr_t)(elm) + offs));
7808 }
7809 
7810 static const char*
dtrace_strkey_deref_offset(void * elm,uintptr_t offs)7811 dtrace_strkey_deref_offset(void *elm, uintptr_t offs)
7812 {
7813 	return *((char **)((uintptr_t)(elm) + offs));
7814 }
7815 
7816 static dtrace_hash_t *
dtrace_hash_create(dtrace_strkey_f func,uintptr_t arg,uintptr_t nextoffs,uintptr_t prevoffs)7817 dtrace_hash_create(dtrace_strkey_f func, uintptr_t arg, uintptr_t nextoffs, uintptr_t prevoffs)
7818 {
7819 	dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
7820 
7821 	hash->dth_getstr = func;
7822 	hash->dth_stroffs = arg;
7823 	hash->dth_nextoffs = nextoffs;
7824 	hash->dth_prevoffs = prevoffs;
7825 
7826 	hash->dth_size = 1;
7827 	hash->dth_mask = hash->dth_size - 1;
7828 
7829 	hash->dth_tab = kmem_zalloc(hash->dth_size *
7830 	    sizeof (dtrace_hashbucket_t *), KM_SLEEP);
7831 
7832 	return (hash);
7833 }
7834 
7835 /*
7836  * APPLE NOTE: dtrace_hash_destroy is not used.
7837  * It is called by dtrace_detach which is not
7838  * currently implemented.  Revisit someday.
7839  */
7840 #if !defined(__APPLE__)
7841 static void
dtrace_hash_destroy(dtrace_hash_t * hash)7842 dtrace_hash_destroy(dtrace_hash_t *hash)
7843 {
7844 #if DEBUG
7845 	int i;
7846 
7847 	for (i = 0; i < hash->dth_size; i++)
7848 		ASSERT(hash->dth_tab[i] == NULL);
7849 #endif
7850 
7851 	kmem_free(hash->dth_tab,
7852 	    hash->dth_size * sizeof (dtrace_hashbucket_t *));
7853 	kmem_free(hash, sizeof (dtrace_hash_t));
7854 }
7855 #endif /* __APPLE__ */
7856 
7857 static void
dtrace_hash_resize(dtrace_hash_t * hash)7858 dtrace_hash_resize(dtrace_hash_t *hash)
7859 {
7860 	int size = hash->dth_size, i, ndx;
7861 	int new_size = hash->dth_size << 1;
7862 	int new_mask = new_size - 1;
7863 	dtrace_hashbucket_t **new_tab, *bucket, *next;
7864 
7865 	ASSERT((new_size & new_mask) == 0);
7866 
7867 	new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
7868 
7869 	for (i = 0; i < size; i++) {
7870 		for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
7871 			void *elm = bucket->dthb_chain;
7872 
7873 			ASSERT(elm != NULL);
7874 			ndx = DTRACE_HASHSTR(hash, elm) & new_mask;
7875 
7876 			next = bucket->dthb_next;
7877 			bucket->dthb_next = new_tab[ndx];
7878 			new_tab[ndx] = bucket;
7879 		}
7880 	}
7881 
7882 	kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
7883 	hash->dth_tab = new_tab;
7884 	hash->dth_size = new_size;
7885 	hash->dth_mask = new_mask;
7886 }
7887 
7888 static void
dtrace_hash_add(dtrace_hash_t * hash,void * new)7889 dtrace_hash_add(dtrace_hash_t *hash, void *new)
7890 {
7891 	int hashval = DTRACE_HASHSTR(hash, new);
7892 	int ndx = hashval & hash->dth_mask;
7893 	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7894 	void **nextp, **prevp;
7895 
7896 	for (; bucket != NULL; bucket = bucket->dthb_next) {
7897 		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
7898 			goto add;
7899 	}
7900 
7901 	if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
7902 		dtrace_hash_resize(hash);
7903 		dtrace_hash_add(hash, new);
7904 		return;
7905 	}
7906 
7907 	bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
7908 	bucket->dthb_next = hash->dth_tab[ndx];
7909 	hash->dth_tab[ndx] = bucket;
7910 	hash->dth_nbuckets++;
7911 
7912 add:
7913 	nextp = DTRACE_HASHNEXT(hash, new);
7914 	ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
7915 	*nextp = bucket->dthb_chain;
7916 
7917 	if (bucket->dthb_chain != NULL) {
7918 		prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
7919 		ASSERT(*prevp == NULL);
7920 		*prevp = new;
7921 	}
7922 
7923 	bucket->dthb_chain = new;
7924 	bucket->dthb_len++;
7925 }
7926 
7927 static void *
dtrace_hash_lookup_string(dtrace_hash_t * hash,const char * str)7928 dtrace_hash_lookup_string(dtrace_hash_t *hash, const char *str)
7929 {
7930 	int hashval = dtrace_hash_str(str);
7931 	int ndx = hashval & hash->dth_mask;
7932 	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7933 
7934 	for (; bucket != NULL; bucket = bucket->dthb_next) {
7935 		if (strcmp(str, DTRACE_GETSTR(hash, bucket->dthb_chain)) == 0)
7936 			return (bucket->dthb_chain);
7937 	}
7938 
7939 	return (NULL);
7940 }
7941 
7942 static dtrace_probe_t *
dtrace_hash_lookup(dtrace_hash_t * hash,void * template)7943 dtrace_hash_lookup(dtrace_hash_t *hash, void *template)
7944 {
7945 	return dtrace_hash_lookup_string(hash, DTRACE_GETSTR(hash, template));
7946 }
7947 
7948 static int
dtrace_hash_collisions(dtrace_hash_t * hash,void * template)7949 dtrace_hash_collisions(dtrace_hash_t *hash, void *template)
7950 {
7951 	int hashval = DTRACE_HASHSTR(hash, template);
7952 	int ndx = hashval & hash->dth_mask;
7953 	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7954 
7955 	for (; bucket != NULL; bucket = bucket->dthb_next) {
7956 		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
7957 			return (bucket->dthb_len);
7958 	}
7959 
7960 	return (0);
7961 }
7962 
7963 static void
dtrace_hash_remove(dtrace_hash_t * hash,void * elm)7964 dtrace_hash_remove(dtrace_hash_t *hash, void *elm)
7965 {
7966 	int ndx = DTRACE_HASHSTR(hash, elm) & hash->dth_mask;
7967 	dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
7968 
7969 	void **prevp = DTRACE_HASHPREV(hash, elm);
7970 	void **nextp = DTRACE_HASHNEXT(hash, elm);
7971 
7972 	/*
7973 	 * Find the bucket that we're removing this elm from.
7974 	 */
7975 	for (; bucket != NULL; bucket = bucket->dthb_next) {
7976 		if (DTRACE_HASHEQ(hash, bucket->dthb_chain, elm))
7977 			break;
7978 	}
7979 
7980 	ASSERT(bucket != NULL);
7981 
7982 	if (*prevp == NULL) {
7983 		if (*nextp == NULL) {
7984 			/*
7985 			 * The removed element was the only element on this
7986 			 * bucket; we need to remove the bucket.
7987 			 */
7988 			dtrace_hashbucket_t *b = hash->dth_tab[ndx];
7989 
7990 			ASSERT(bucket->dthb_chain == elm);
7991 			ASSERT(b != NULL);
7992 
7993 			if (b == bucket) {
7994 				hash->dth_tab[ndx] = bucket->dthb_next;
7995 			} else {
7996 				while (b->dthb_next != bucket)
7997 					b = b->dthb_next;
7998 				b->dthb_next = bucket->dthb_next;
7999 			}
8000 
8001 			ASSERT(hash->dth_nbuckets > 0);
8002 			hash->dth_nbuckets--;
8003 			kmem_free(bucket, sizeof (dtrace_hashbucket_t));
8004 			return;
8005 		}
8006 
8007 		bucket->dthb_chain = *nextp;
8008 	} else {
8009 		*(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
8010 	}
8011 
8012 	if (*nextp != NULL)
8013 		*(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
8014 }
8015 
8016 /*
8017  * DTrace Utility Functions
8018  *
8019  * These are random utility functions that are _not_ called from probe context.
8020  */
8021 static int
dtrace_badattr(const dtrace_attribute_t * a)8022 dtrace_badattr(const dtrace_attribute_t *a)
8023 {
8024 	return (a->dtat_name > DTRACE_STABILITY_MAX ||
8025 	    a->dtat_data > DTRACE_STABILITY_MAX ||
8026 	    a->dtat_class > DTRACE_CLASS_MAX);
8027 }
8028 
8029 /*
8030  * Returns a dtrace-managed copy of a string, and will
8031  * deduplicate copies of the same string.
8032  * If the specified string is NULL, returns an empty string
8033  */
8034 static char *
dtrace_strref(const char * str)8035 dtrace_strref(const char *str)
8036 {
8037 	dtrace_string_t *s = NULL;
8038 	size_t bufsize = (str != NULL ? strlen(str) : 0) + 1;
8039 
8040 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8041 
8042 	if (str == NULL)
8043 		str = "";
8044 
8045 	for (s = dtrace_hash_lookup_string(dtrace_strings, str); s != NULL;
8046 	     s = *(DTRACE_HASHNEXT(dtrace_strings, s)))  {
8047 		if (strncmp(str, s->dtst_str, bufsize) != 0) {
8048 			continue;
8049 		}
8050 		ASSERT(s->dtst_refcount != UINT32_MAX);
8051 		s->dtst_refcount++;
8052 		return s->dtst_str;
8053 	}
8054 
8055 	s = kmem_zalloc(sizeof(dtrace_string_t) + bufsize, KM_SLEEP);
8056 	s->dtst_refcount = 1;
8057 	(void) strlcpy(s->dtst_str, str, bufsize);
8058 
8059 	dtrace_hash_add(dtrace_strings, s);
8060 
8061 	return s->dtst_str;
8062 }
8063 
8064 static void
dtrace_strunref(const char * str)8065 dtrace_strunref(const char *str)
8066 {
8067 	ASSERT(str != NULL);
8068 	dtrace_string_t *s = NULL;
8069 	size_t bufsize = strlen(str) + 1;
8070 
8071 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8072 
8073 	for (s = dtrace_hash_lookup_string(dtrace_strings, str); s != NULL;
8074 	     s = *(DTRACE_HASHNEXT(dtrace_strings, s)))  {
8075 		if (strncmp(str, s->dtst_str, bufsize) != 0) {
8076 			continue;
8077 		}
8078 		ASSERT(s->dtst_refcount != 0);
8079 		s->dtst_refcount--;
8080 		if (s->dtst_refcount == 0) {
8081 			dtrace_hash_remove(dtrace_strings, s);
8082 			kmem_free(s, sizeof(dtrace_string_t) + bufsize);
8083 		}
8084 		return;
8085 	}
8086 	panic("attempt to unref non-existent string %s", str);
8087 }
8088 
8089 #define	DTRACE_ISALPHA(c)	\
8090 	(((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
8091 
8092 static int
dtrace_badname(const char * s)8093 dtrace_badname(const char *s)
8094 {
8095 	char c;
8096 
8097 	if (s == NULL || (c = *s++) == '\0')
8098 		return (0);
8099 
8100 	if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
8101 		return (1);
8102 
8103 	while ((c = *s++) != '\0') {
8104 		if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
8105 		    c != '-' && c != '_' && c != '.' && c != '`')
8106 			return (1);
8107 	}
8108 
8109 	return (0);
8110 }
8111 
8112 static void
dtrace_cred2priv(cred_t * cr,uint32_t * privp,uid_t * uidp,zoneid_t * zoneidp)8113 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
8114 {
8115 	uint32_t priv;
8116 
8117 	if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
8118 		if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
8119 			priv = DTRACE_PRIV_USER | DTRACE_PRIV_PROC | DTRACE_PRIV_OWNER;
8120 		}
8121 		else {
8122 			priv = DTRACE_PRIV_ALL;
8123 		}
8124 		*uidp = 0;
8125 		*zoneidp = 0;
8126 	} else {
8127 		*uidp = crgetuid(cr);
8128 		*zoneidp = crgetzoneid(cr);
8129 
8130 		priv = 0;
8131 		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
8132 			priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
8133 		else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
8134 			priv |= DTRACE_PRIV_USER;
8135 		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
8136 			priv |= DTRACE_PRIV_PROC;
8137 		if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
8138 			priv |= DTRACE_PRIV_OWNER;
8139 		if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
8140 			priv |= DTRACE_PRIV_ZONEOWNER;
8141 	}
8142 
8143 	*privp = priv;
8144 }
8145 
8146 #ifdef DTRACE_ERRDEBUG
8147 static void
dtrace_errdebug(const char * str)8148 dtrace_errdebug(const char *str)
8149 {
8150 	int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ;
8151 	int occupied = 0;
8152 
8153 	lck_mtx_lock(&dtrace_errlock);
8154 	dtrace_errlast = str;
8155 	dtrace_errthread = (kthread_t *)current_thread();
8156 
8157 	while (occupied++ < DTRACE_ERRHASHSZ) {
8158 		if (dtrace_errhash[hval].dter_msg == str) {
8159 			dtrace_errhash[hval].dter_count++;
8160 			goto out;
8161 		}
8162 
8163 		if (dtrace_errhash[hval].dter_msg != NULL) {
8164 			hval = (hval + 1) % DTRACE_ERRHASHSZ;
8165 			continue;
8166 		}
8167 
8168 		dtrace_errhash[hval].dter_msg = str;
8169 		dtrace_errhash[hval].dter_count = 1;
8170 		goto out;
8171 	}
8172 
8173 	panic("dtrace: undersized error hash");
8174 out:
8175 	lck_mtx_unlock(&dtrace_errlock);
8176 }
8177 #endif
8178 
8179 /*
8180  * DTrace Matching Functions
8181  *
8182  * These functions are used to match groups of probes, given some elements of
8183  * a probe tuple, or some globbed expressions for elements of a probe tuple.
8184  */
8185 static int
dtrace_match_priv(const dtrace_probe_t * prp,uint32_t priv,uid_t uid,zoneid_t zoneid)8186 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
8187     zoneid_t zoneid)
8188 {
8189 	if (priv != DTRACE_PRIV_ALL) {
8190 		uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
8191 		uint32_t match = priv & ppriv;
8192 
8193 		/*
8194 		 * No PRIV_DTRACE_* privileges...
8195 		 */
8196 		if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
8197 		    DTRACE_PRIV_KERNEL)) == 0)
8198 			return (0);
8199 
8200 		/*
8201 		 * No matching bits, but there were bits to match...
8202 		 */
8203 		if (match == 0 && ppriv != 0)
8204 			return (0);
8205 
8206 		/*
8207 		 * Need to have permissions to the process, but don't...
8208 		 */
8209 		if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
8210 		    uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
8211 			return (0);
8212 		}
8213 
8214 		/*
8215 		 * Need to be in the same zone unless we possess the
8216 		 * privilege to examine all zones.
8217 		 */
8218 		if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
8219 		    zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
8220 			return (0);
8221 		}
8222 	}
8223 
8224 	return (1);
8225 }
8226 
8227 /*
8228  * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
8229  * consists of input pattern strings and an ops-vector to evaluate them.
8230  * This function returns >0 for match, 0 for no match, and <0 for error.
8231  */
8232 static int
dtrace_match_probe(const dtrace_probe_t * prp,const dtrace_probekey_t * pkp,uint32_t priv,uid_t uid,zoneid_t zoneid)8233 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
8234     uint32_t priv, uid_t uid, zoneid_t zoneid)
8235 {
8236 	dtrace_provider_t *pvp = prp->dtpr_provider;
8237 	int rv;
8238 
8239 	if (pvp->dtpv_defunct)
8240 		return (0);
8241 
8242 	if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
8243 		return (rv);
8244 
8245 	if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
8246 		return (rv);
8247 
8248 	if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
8249 		return (rv);
8250 
8251 	if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
8252 		return (rv);
8253 
8254 	if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
8255 		return (0);
8256 
8257 	return (rv);
8258 }
8259 
8260 /*
8261  * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
8262  * interface for matching a glob pattern 'p' to an input string 's'.  Unlike
8263  * libc's version, the kernel version only applies to 8-bit ASCII strings.
8264  * In addition, all of the recursion cases except for '*' matching have been
8265  * unwound.  For '*', we still implement recursive evaluation, but a depth
8266  * counter is maintained and matching is aborted if we recurse too deep.
8267  * The function returns 0 if no match, >0 if match, and <0 if recursion error.
8268  */
8269 static int
dtrace_match_glob(const char * s,const char * p,int depth)8270 dtrace_match_glob(const char *s, const char *p, int depth)
8271 {
8272 	const char *olds;
8273 	char s1, c;
8274 	int gs;
8275 
8276 	if (depth > DTRACE_PROBEKEY_MAXDEPTH)
8277 		return (-1);
8278 
8279 	if (s == NULL)
8280 		s = ""; /* treat NULL as empty string */
8281 
8282 top:
8283 	olds = s;
8284 	s1 = *s++;
8285 
8286 	if (p == NULL)
8287 		return (0);
8288 
8289 	if ((c = *p++) == '\0')
8290 		return (s1 == '\0');
8291 
8292 	switch (c) {
8293 	case '[': {
8294 		int ok = 0, notflag = 0;
8295 		char lc = '\0';
8296 
8297 		if (s1 == '\0')
8298 			return (0);
8299 
8300 		if (*p == '!') {
8301 			notflag = 1;
8302 			p++;
8303 		}
8304 
8305 		if ((c = *p++) == '\0')
8306 			return (0);
8307 
8308 		do {
8309 			if (c == '-' && lc != '\0' && *p != ']') {
8310 				if ((c = *p++) == '\0')
8311 					return (0);
8312 				if (c == '\\' && (c = *p++) == '\0')
8313 					return (0);
8314 
8315 				if (notflag) {
8316 					if (s1 < lc || s1 > c)
8317 						ok++;
8318 					else
8319 						return (0);
8320 				} else if (lc <= s1 && s1 <= c)
8321 					ok++;
8322 
8323 			} else if (c == '\\' && (c = *p++) == '\0')
8324 				return (0);
8325 
8326 			lc = c; /* save left-hand 'c' for next iteration */
8327 
8328 			if (notflag) {
8329 				if (s1 != c)
8330 					ok++;
8331 				else
8332 					return (0);
8333 			} else if (s1 == c)
8334 				ok++;
8335 
8336 			if ((c = *p++) == '\0')
8337 				return (0);
8338 
8339 		} while (c != ']');
8340 
8341 		if (ok)
8342 			goto top;
8343 
8344 		return (0);
8345 	}
8346 
8347 	case '\\':
8348 		if ((c = *p++) == '\0')
8349 			return (0);
8350 		OS_FALLTHROUGH;
8351 
8352 	default:
8353 		if (c != s1)
8354 			return (0);
8355 		OS_FALLTHROUGH;
8356 
8357 	case '?':
8358 		if (s1 != '\0')
8359 			goto top;
8360 		return (0);
8361 
8362 	case '*':
8363 		while (*p == '*')
8364 			p++; /* consecutive *'s are identical to a single one */
8365 
8366 		if (*p == '\0')
8367 			return (1);
8368 
8369 		for (s = olds; *s != '\0'; s++) {
8370 			if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
8371 				return (gs);
8372 		}
8373 
8374 		return (0);
8375 	}
8376 }
8377 
8378 /*ARGSUSED*/
8379 static int
dtrace_match_string(const char * s,const char * p,int depth)8380 dtrace_match_string(const char *s, const char *p, int depth)
8381 {
8382 #pragma unused(depth) /* __APPLE__ */
8383 	return (s != NULL && s == p);
8384 }
8385 
8386 /*ARGSUSED*/
8387 static int
dtrace_match_module(const char * s,const char * p,int depth)8388 dtrace_match_module(const char *s, const char *p, int depth)
8389 {
8390 #pragma unused(depth) /* __APPLE__ */
8391 	size_t len;
8392 	if (s == NULL || p == NULL)
8393 		return (0);
8394 
8395 	len = strlen(p);
8396 
8397 	if (strncmp(p, s, len) != 0)
8398 		return (0);
8399 
8400 	if (s[len] == '.' || s[len] == '\0')
8401 		return (1);
8402 
8403 	return (0);
8404 }
8405 
8406 /*ARGSUSED*/
8407 static int
dtrace_match_nul(const char * s,const char * p,int depth)8408 dtrace_match_nul(const char *s, const char *p, int depth)
8409 {
8410 #pragma unused(s, p, depth) /* __APPLE__ */
8411 	return (1); /* always match the empty pattern */
8412 }
8413 
8414 /*ARGSUSED*/
8415 static int
dtrace_match_nonzero(const char * s,const char * p,int depth)8416 dtrace_match_nonzero(const char *s, const char *p, int depth)
8417 {
8418 #pragma unused(p, depth) /* __APPLE__ */
8419 	return (s != NULL && s[0] != '\0');
8420 }
8421 
8422 static int
dtrace_match(const dtrace_probekey_t * pkp,uint32_t priv,uid_t uid,zoneid_t zoneid,int (* matched)(dtrace_probe_t *,void *,void *),void * arg1,void * arg2)8423 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
8424     zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *, void *), void *arg1, void *arg2)
8425 {
8426 	dtrace_probe_t *probe;
8427 	dtrace_provider_t prov_template = {
8428 		.dtpv_name = (char *)(uintptr_t)pkp->dtpk_prov
8429 	};
8430 
8431 	dtrace_probe_t template = {
8432 		.dtpr_provider = &prov_template,
8433 		.dtpr_mod = (char *)(uintptr_t)pkp->dtpk_mod,
8434 		.dtpr_func = (char *)(uintptr_t)pkp->dtpk_func,
8435 		.dtpr_name = (char *)(uintptr_t)pkp->dtpk_name
8436 	};
8437 
8438 	dtrace_hash_t *hash = NULL;
8439 	int len, rc, best = INT_MAX, nmatched = 0;
8440 	dtrace_id_t i;
8441 
8442 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8443 
8444 	/*
8445 	 * If the probe ID is specified in the key, just lookup by ID and
8446 	 * invoke the match callback once if a matching probe is found.
8447 	 */
8448 	if (pkp->dtpk_id != DTRACE_IDNONE) {
8449 		if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
8450 		    dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
8451 		        if ((*matched)(probe, arg1, arg2) == DTRACE_MATCH_FAIL)
8452                                return (DTRACE_MATCH_FAIL);
8453 			nmatched++;
8454 		}
8455 		return (nmatched);
8456 	}
8457 
8458 	/*
8459 	 * We want to find the most distinct of the provider name, module name,
8460 	 * function name, and name.  So for each one that is not a glob
8461 	 * pattern or empty string, we perform a lookup in the corresponding
8462 	 * hash and use the hash table with the fewest collisions to do our
8463 	 * search.
8464 	 */
8465 	if (pkp->dtpk_pmatch == &dtrace_match_string &&
8466 	    (len = dtrace_hash_collisions(dtrace_byprov, &template)) < best) {
8467 		best = len;
8468 		hash = dtrace_byprov;
8469 	}
8470 
8471 	if (pkp->dtpk_mmatch == &dtrace_match_string &&
8472 	    (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
8473 		best = len;
8474 		hash = dtrace_bymod;
8475 	}
8476 
8477 	if (pkp->dtpk_fmatch == &dtrace_match_string &&
8478 	    (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
8479 		best = len;
8480 		hash = dtrace_byfunc;
8481 	}
8482 
8483 	if (pkp->dtpk_nmatch == &dtrace_match_string &&
8484 	    (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
8485 		best = len;
8486 		hash = dtrace_byname;
8487 	}
8488 
8489 	/*
8490 	 * If we did not select a hash table, iterate over every probe and
8491 	 * invoke our callback for each one that matches our input probe key.
8492 	 */
8493 	if (hash == NULL) {
8494 		for (i = 0; i < (dtrace_id_t)dtrace_nprobes; i++) {
8495 			if ((probe = dtrace_probes[i]) == NULL ||
8496 			    dtrace_match_probe(probe, pkp, priv, uid,
8497 			    zoneid) <= 0)
8498 				continue;
8499 
8500 			nmatched++;
8501 
8502                        if ((rc = (*matched)(probe, arg1, arg2)) != DTRACE_MATCH_NEXT) {
8503 			       if (rc == DTRACE_MATCH_FAIL)
8504                                        return (DTRACE_MATCH_FAIL);
8505 			       break;
8506                        }
8507 		}
8508 
8509 		return (nmatched);
8510 	}
8511 
8512 	/*
8513 	 * If we selected a hash table, iterate over each probe of the same key
8514 	 * name and invoke the callback for every probe that matches the other
8515 	 * attributes of our input probe key.
8516 	 */
8517 	for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
8518 	    probe = *(DTRACE_HASHNEXT(hash, probe))) {
8519 
8520 		if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
8521 			continue;
8522 
8523 		nmatched++;
8524 
8525 		if ((rc = (*matched)(probe, arg1, arg2)) != DTRACE_MATCH_NEXT) {
8526 		    if (rc == DTRACE_MATCH_FAIL)
8527 			return (DTRACE_MATCH_FAIL);
8528 		    break;
8529 		}
8530 	}
8531 
8532 	return (nmatched);
8533 }
8534 
8535 /*
8536  * Return the function pointer dtrace_probecmp() should use to compare the
8537  * specified pattern with a string.  For NULL or empty patterns, we select
8538  * dtrace_match_nul().  For glob pattern strings, we use dtrace_match_glob().
8539  * For non-empty non-glob strings, we use dtrace_match_string().
8540  */
8541 static dtrace_probekey_f *
dtrace_probekey_func(const char * p)8542 dtrace_probekey_func(const char *p)
8543 {
8544 	char c;
8545 
8546 	if (p == NULL || *p == '\0')
8547 		return (&dtrace_match_nul);
8548 
8549 	while ((c = *p++) != '\0') {
8550 		if (c == '[' || c == '?' || c == '*' || c == '\\')
8551 			return (&dtrace_match_glob);
8552 	}
8553 
8554 	return (&dtrace_match_string);
8555 }
8556 
8557 static dtrace_probekey_f *
dtrace_probekey_module_func(const char * p)8558 dtrace_probekey_module_func(const char *p)
8559 {
8560 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8561 
8562 	dtrace_probekey_f *f = dtrace_probekey_func(p);
8563 	if (f == &dtrace_match_string) {
8564 		dtrace_probe_t template = {
8565 			.dtpr_mod = (char *)(uintptr_t)p,
8566 		};
8567 		if (dtrace_hash_lookup(dtrace_bymod, &template) == NULL) {
8568 			return (&dtrace_match_module);
8569 		}
8570 		return (&dtrace_match_string);
8571 	}
8572 	return f;
8573 }
8574 
8575 /*
8576  * Build a probe comparison key for use with dtrace_match_probe() from the
8577  * given probe description.  By convention, a null key only matches anchored
8578  * probes: if each field is the empty string, reset dtpk_fmatch to
8579  * dtrace_match_nonzero().
8580  */
8581 static void
dtrace_probekey(const dtrace_probedesc_t * pdp,dtrace_probekey_t * pkp)8582 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
8583 {
8584 
8585 	pkp->dtpk_prov = dtrace_strref(pdp->dtpd_provider);
8586 	pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
8587 
8588 	pkp->dtpk_mod = dtrace_strref(pdp->dtpd_mod);
8589 	pkp->dtpk_mmatch = dtrace_probekey_module_func(pdp->dtpd_mod);
8590 
8591 	pkp->dtpk_func = dtrace_strref(pdp->dtpd_func);
8592 	pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
8593 
8594 	pkp->dtpk_name = dtrace_strref(pdp->dtpd_name);
8595 	pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
8596 
8597 	pkp->dtpk_id = pdp->dtpd_id;
8598 
8599 	if (pkp->dtpk_id == DTRACE_IDNONE &&
8600 	    pkp->dtpk_pmatch == &dtrace_match_nul &&
8601 	    pkp->dtpk_mmatch == &dtrace_match_nul &&
8602 	    pkp->dtpk_fmatch == &dtrace_match_nul &&
8603 	    pkp->dtpk_nmatch == &dtrace_match_nul)
8604 		pkp->dtpk_fmatch = &dtrace_match_nonzero;
8605 }
8606 
8607 static void
dtrace_probekey_release(dtrace_probekey_t * pkp)8608 dtrace_probekey_release(dtrace_probekey_t *pkp)
8609 {
8610 	dtrace_strunref(pkp->dtpk_prov);
8611 	dtrace_strunref(pkp->dtpk_mod);
8612 	dtrace_strunref(pkp->dtpk_func);
8613 	dtrace_strunref(pkp->dtpk_name);
8614 }
8615 
8616 static int
dtrace_cond_provider_match(dtrace_probedesc_t * desc,void * data)8617 dtrace_cond_provider_match(dtrace_probedesc_t *desc, void *data)
8618 {
8619 	if (desc == NULL)
8620 		return 1;
8621 
8622 	dtrace_probekey_f *func = dtrace_probekey_func(desc->dtpd_provider);
8623 
8624 	return func((char*)data, desc->dtpd_provider, 0);
8625 }
8626 
8627 /*
8628  * DTrace Provider-to-Framework API Functions
8629  *
8630  * These functions implement much of the Provider-to-Framework API, as
8631  * described in <sys/dtrace.h>.  The parts of the API not in this section are
8632  * the functions in the API for probe management (found below), and
8633  * dtrace_probe() itself (found above).
8634  */
8635 
8636 /*
8637  * Register the calling provider with the DTrace framework.  This should
8638  * generally be called by DTrace providers in their attach(9E) entry point.
8639  */
8640 int
dtrace_register(const char * name,const dtrace_pattr_t * pap,uint32_t priv,cred_t * cr,const dtrace_pops_t * pops,void * arg,dtrace_provider_id_t * idp)8641 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
8642     cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
8643 {
8644 	dtrace_provider_t *provider;
8645 
8646 	if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
8647 		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8648 		    "arguments", name ? name : "<NULL>");
8649 		return (EINVAL);
8650 	}
8651 
8652 	if (name[0] == '\0' || dtrace_badname(name)) {
8653 		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8654 		    "provider name", name);
8655 		return (EINVAL);
8656 	}
8657 
8658 	if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
8659 	    pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
8660 	    pops->dtps_destroy == NULL ||
8661 	    ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
8662 		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8663 		    "provider ops", name);
8664 		return (EINVAL);
8665 	}
8666 
8667 	if (dtrace_badattr(&pap->dtpa_provider) ||
8668 	    dtrace_badattr(&pap->dtpa_mod) ||
8669 	    dtrace_badattr(&pap->dtpa_func) ||
8670 	    dtrace_badattr(&pap->dtpa_name) ||
8671 	    dtrace_badattr(&pap->dtpa_args)) {
8672 		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8673 		    "provider attributes", name);
8674 		return (EINVAL);
8675 	}
8676 
8677 	if (priv & ~DTRACE_PRIV_ALL) {
8678 		cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8679 		    "privilege attributes", name);
8680 		return (EINVAL);
8681 	}
8682 
8683 	if ((priv & DTRACE_PRIV_KERNEL) &&
8684 	    (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
8685 	    pops->dtps_usermode == NULL) {
8686 		cmn_err(CE_WARN, "failed to register provider '%s': need "
8687 		    "dtps_usermode() op for given privilege attributes", name);
8688 		return (EINVAL);
8689 	}
8690 
8691 	provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
8692 
8693 	provider->dtpv_attr = *pap;
8694 	provider->dtpv_priv.dtpp_flags = priv;
8695 	if (cr != NULL) {
8696 		provider->dtpv_priv.dtpp_uid = crgetuid(cr);
8697 		provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
8698 	}
8699 	provider->dtpv_pops = *pops;
8700 
8701 	if (pops->dtps_provide == NULL) {
8702 		ASSERT(pops->dtps_provide_module != NULL);
8703 		provider->dtpv_pops.dtps_provide = dtrace_provide_nullop;
8704 	}
8705 
8706 	if (pops->dtps_provide_module == NULL) {
8707 		ASSERT(pops->dtps_provide != NULL);
8708 		provider->dtpv_pops.dtps_provide_module =
8709 		    dtrace_provide_module_nullop;
8710 	}
8711 
8712 	if (pops->dtps_suspend == NULL) {
8713 		ASSERT(pops->dtps_resume == NULL);
8714 		provider->dtpv_pops.dtps_suspend = dtrace_suspend_nullop;
8715 		provider->dtpv_pops.dtps_resume = dtrace_resume_nullop;
8716 	}
8717 
8718 	provider->dtpv_arg = arg;
8719 	*idp = (dtrace_provider_id_t)provider;
8720 
8721 	if (pops == &dtrace_provider_ops) {
8722 		LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
8723 		LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8724 
8725 		provider->dtpv_name = dtrace_strref(name);
8726 
8727 		ASSERT(dtrace_anon.dta_enabling == NULL);
8728 
8729 		/*
8730 		 * We make sure that the DTrace provider is at the head of
8731 		 * the provider chain.
8732 		 */
8733 		provider->dtpv_next = dtrace_provider;
8734 		dtrace_provider = provider;
8735 		return (0);
8736 	}
8737 
8738 	lck_mtx_lock(&dtrace_provider_lock);
8739 	lck_mtx_lock(&dtrace_lock);
8740 
8741 	provider->dtpv_name = dtrace_strref(name);
8742 
8743 	/*
8744 	 * If there is at least one provider registered, we'll add this
8745 	 * provider after the first provider.
8746 	 */
8747 	if (dtrace_provider != NULL) {
8748 		provider->dtpv_next = dtrace_provider->dtpv_next;
8749 		dtrace_provider->dtpv_next = provider;
8750 	} else {
8751 		dtrace_provider = provider;
8752 	}
8753 
8754 	if (dtrace_retained != NULL) {
8755 		dtrace_enabling_provide(provider);
8756 
8757 		/*
8758 		 * Now we need to call dtrace_enabling_matchall_with_cond() --
8759 		 * with a condition matching the provider name we just added,
8760 		 * which will acquire cpu_lock and dtrace_lock.  We therefore need
8761 		 * to drop all of our locks before calling into it...
8762 		 */
8763 		lck_mtx_unlock(&dtrace_lock);
8764 		lck_mtx_unlock(&dtrace_provider_lock);
8765 
8766 		dtrace_match_cond_t cond = {dtrace_cond_provider_match, provider->dtpv_name};
8767 		dtrace_enabling_matchall_with_cond(&cond);
8768 
8769 		return (0);
8770 	}
8771 
8772 	lck_mtx_unlock(&dtrace_lock);
8773 	lck_mtx_unlock(&dtrace_provider_lock);
8774 
8775 	return (0);
8776 }
8777 
8778 /*
8779  * Unregister the specified provider from the DTrace framework.  This should
8780  * generally be called by DTrace providers in their detach(9E) entry point.
8781  */
8782 int
dtrace_unregister(dtrace_provider_id_t id)8783 dtrace_unregister(dtrace_provider_id_t id)
8784 {
8785 	dtrace_provider_t *old = (dtrace_provider_t *)id;
8786 	dtrace_provider_t *prev = NULL;
8787 	int self = 0;
8788 	dtrace_probe_t *probe, *first = NULL, *next = NULL;
8789 	dtrace_probe_t template = {
8790 		.dtpr_provider = old
8791 	};
8792 
8793 	if (old->dtpv_pops.dtps_enable ==
8794 	    (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) {
8795 		/*
8796 		 * If DTrace itself is the provider, we're called with locks
8797 		 * already held.
8798 		 */
8799 		ASSERT(old == dtrace_provider);
8800 		ASSERT(dtrace_devi != NULL);
8801 		LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
8802 		LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
8803 		self = 1;
8804 
8805 		if (dtrace_provider->dtpv_next != NULL) {
8806 			/*
8807 			 * There's another provider here; return failure.
8808 			 */
8809 			return (EBUSY);
8810 		}
8811 	} else {
8812 		lck_mtx_lock(&dtrace_provider_lock);
8813 		lck_mtx_lock(&mod_lock);
8814 		lck_mtx_lock(&dtrace_lock);
8815 	}
8816 
8817 	/*
8818 	 * If anyone has /dev/dtrace open, or if there are anonymous enabled
8819 	 * probes, we refuse to let providers slither away, unless this
8820 	 * provider has already been explicitly invalidated.
8821 	 */
8822 	if (!old->dtpv_defunct &&
8823 	    (dtrace_opens || (dtrace_anon.dta_state != NULL &&
8824 	    dtrace_anon.dta_state->dts_necbs > 0))) {
8825 		if (!self) {
8826 			lck_mtx_unlock(&dtrace_lock);
8827 			lck_mtx_unlock(&mod_lock);
8828 			lck_mtx_unlock(&dtrace_provider_lock);
8829 		}
8830 		return (EBUSY);
8831 	}
8832 
8833 	/*
8834 	 * Attempt to destroy the probes associated with this provider.
8835 	 */
8836 	if (old->dtpv_ecb_count!=0) {
8837 		/*
8838 		 * We have at least one ECB; we can't remove this provider.
8839 		 */
8840 		if (!self) {
8841 			lck_mtx_unlock(&dtrace_lock);
8842 			lck_mtx_unlock(&mod_lock);
8843 			lck_mtx_unlock(&dtrace_provider_lock);
8844 		}
8845 		return (EBUSY);
8846 	}
8847 
8848 	/*
8849 	 * All of the probes for this provider are disabled; we can safely
8850 	 * remove all of them from their hash chains and from the probe array.
8851 	 */
8852 	for (probe = dtrace_hash_lookup(dtrace_byprov, &template); probe != NULL;
8853 	    probe = *(DTRACE_HASHNEXT(dtrace_byprov, probe))) {
8854 		if (probe->dtpr_provider != old)
8855 			continue;
8856 
8857 		dtrace_probes[probe->dtpr_id - 1] = NULL;
8858 		old->dtpv_probe_count--;
8859 
8860 		dtrace_hash_remove(dtrace_bymod, probe);
8861 		dtrace_hash_remove(dtrace_byfunc, probe);
8862 		dtrace_hash_remove(dtrace_byname, probe);
8863 
8864 		if (first == NULL) {
8865 			first = probe;
8866 			probe->dtpr_nextmod = NULL;
8867 		} else {
8868 			/*
8869 			 * Use nextmod as the chain of probes to remove
8870 			 */
8871 			probe->dtpr_nextmod = first;
8872 			first = probe;
8873 		}
8874 	}
8875 
8876 	for (probe = first; probe != NULL; probe = next) {
8877 		next = probe->dtpr_nextmod;
8878 		dtrace_hash_remove(dtrace_byprov, probe);
8879 	}
8880 
8881 	/*
8882 	 * The provider's probes have been removed from the hash chains and
8883 	 * from the probe array.  Now issue a dtrace_sync() to be sure that
8884 	 * everyone has cleared out from any probe array processing.
8885 	 */
8886 	dtrace_sync();
8887 
8888 	for (probe = first; probe != NULL; probe = next) {
8889 		next = probe->dtpr_nextmod;
8890 
8891 		old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
8892 		    probe->dtpr_arg);
8893 		dtrace_strunref(probe->dtpr_mod);
8894 		dtrace_strunref(probe->dtpr_func);
8895 		dtrace_strunref(probe->dtpr_name);
8896 		vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
8897 		zfree(dtrace_probe_t_zone, probe);
8898 	}
8899 
8900 	if ((prev = dtrace_provider) == old) {
8901 		ASSERT(self || dtrace_devi == NULL);
8902 		ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
8903 		dtrace_provider = old->dtpv_next;
8904 	} else {
8905 		while (prev != NULL && prev->dtpv_next != old)
8906 			prev = prev->dtpv_next;
8907 
8908 		if (prev == NULL) {
8909 			panic("attempt to unregister non-existent "
8910 			    "dtrace provider %p\n", (void *)id);
8911 		}
8912 
8913 		prev->dtpv_next = old->dtpv_next;
8914 	}
8915 
8916 	dtrace_strunref(old->dtpv_name);
8917 
8918 	if (!self) {
8919 		lck_mtx_unlock(&dtrace_lock);
8920 		lck_mtx_unlock(&mod_lock);
8921 		lck_mtx_unlock(&dtrace_provider_lock);
8922 	}
8923 
8924 	kmem_free(old, sizeof (dtrace_provider_t));
8925 
8926 	return (0);
8927 }
8928 
8929 /*
8930  * Invalidate the specified provider.  All subsequent probe lookups for the
8931  * specified provider will fail, but its probes will not be removed.
8932  */
8933 void
dtrace_invalidate(dtrace_provider_id_t id)8934 dtrace_invalidate(dtrace_provider_id_t id)
8935 {
8936 	dtrace_provider_t *pvp = (dtrace_provider_t *)id;
8937 
8938 	ASSERT(pvp->dtpv_pops.dtps_enable !=
8939 	    (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
8940 
8941 	lck_mtx_lock(&dtrace_provider_lock);
8942 	lck_mtx_lock(&dtrace_lock);
8943 
8944 	pvp->dtpv_defunct = 1;
8945 
8946 	lck_mtx_unlock(&dtrace_lock);
8947 	lck_mtx_unlock(&dtrace_provider_lock);
8948 }
8949 
8950 /*
8951  * Indicate whether or not DTrace has attached.
8952  */
8953 int
dtrace_attached(void)8954 dtrace_attached(void)
8955 {
8956 	/*
8957 	 * dtrace_provider will be non-NULL iff the DTrace driver has
8958 	 * attached.  (It's non-NULL because DTrace is always itself a
8959 	 * provider.)
8960 	 */
8961 	return (dtrace_provider != NULL);
8962 }
8963 
8964 /*
8965  * Remove all the unenabled probes for the given provider.  This function is
8966  * not unlike dtrace_unregister(), except that it doesn't remove the provider
8967  * -- just as many of its associated probes as it can.
8968  */
8969 int
dtrace_condense(dtrace_provider_id_t id)8970 dtrace_condense(dtrace_provider_id_t id)
8971 {
8972 	dtrace_provider_t *prov = (dtrace_provider_t *)id;
8973 	dtrace_probe_t *probe, *first = NULL;
8974 	dtrace_probe_t template = {
8975 		.dtpr_provider = prov
8976 	};
8977 
8978 	/*
8979 	 * Make sure this isn't the dtrace provider itself.
8980 	 */
8981 	ASSERT(prov->dtpv_pops.dtps_enable !=
8982 	  (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
8983 
8984 	lck_mtx_lock(&dtrace_provider_lock);
8985 	lck_mtx_lock(&dtrace_lock);
8986 
8987 	/*
8988 	 * Attempt to destroy the probes associated with this provider.
8989 	 */
8990 	for (probe = dtrace_hash_lookup(dtrace_byprov, &template); probe != NULL;
8991 	    probe = *(DTRACE_HASHNEXT(dtrace_byprov, probe))) {
8992 
8993 		if (probe->dtpr_provider != prov)
8994 			continue;
8995 
8996 		if (probe->dtpr_ecb != NULL)
8997 			continue;
8998 
8999 		dtrace_probes[probe->dtpr_id - 1] = NULL;
9000 		prov->dtpv_probe_count--;
9001 
9002 		dtrace_hash_remove(dtrace_bymod, probe);
9003 		dtrace_hash_remove(dtrace_byfunc, probe);
9004 		dtrace_hash_remove(dtrace_byname, probe);
9005 
9006 		prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
9007 		    probe->dtpr_arg);
9008 		dtrace_strunref(probe->dtpr_mod);
9009 		dtrace_strunref(probe->dtpr_func);
9010 		dtrace_strunref(probe->dtpr_name);
9011 		if (first == NULL) {
9012 			first = probe;
9013 			probe->dtpr_nextmod = NULL;
9014 		} else {
9015 			/*
9016 			 * Use nextmod as the chain of probes to remove
9017 			 */
9018 			probe->dtpr_nextmod = first;
9019 			first = probe;
9020 		}
9021 	}
9022 
9023 	for (probe = first; probe != NULL; probe = first) {
9024 		first = probe->dtpr_nextmod;
9025 		dtrace_hash_remove(dtrace_byprov, probe);
9026 		vmem_free(dtrace_arena, (void *)((uintptr_t)probe->dtpr_id), 1);
9027 		zfree(dtrace_probe_t_zone, probe);
9028 	}
9029 
9030 	lck_mtx_unlock(&dtrace_lock);
9031 	lck_mtx_unlock(&dtrace_provider_lock);
9032 
9033 	return (0);
9034 }
9035 
9036 /*
9037  * DTrace Probe Management Functions
9038  *
9039  * The functions in this section perform the DTrace probe management,
9040  * including functions to create probes, look-up probes, and call into the
9041  * providers to request that probes be provided.  Some of these functions are
9042  * in the Provider-to-Framework API; these functions can be identified by the
9043  * fact that they are not declared "static".
9044  */
9045 
9046 /*
9047  * Create a probe with the specified module name, function name, and name.
9048  */
9049 dtrace_id_t
dtrace_probe_create(dtrace_provider_id_t prov,const char * mod,const char * func,const char * name,int aframes,void * arg)9050 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
9051     const char *func, const char *name, int aframes, void *arg)
9052 {
9053 	dtrace_probe_t *probe, **probes;
9054 	dtrace_provider_t *provider = (dtrace_provider_t *)prov;
9055 	dtrace_id_t id;
9056 
9057 	if (provider == dtrace_provider) {
9058 		LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
9059 	} else {
9060 		lck_mtx_lock(&dtrace_lock);
9061 	}
9062 
9063 	id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
9064 	    VM_BESTFIT | VM_SLEEP);
9065 
9066 	probe = zalloc_flags(dtrace_probe_t_zone, Z_WAITOK | Z_ZERO);
9067 
9068 	probe->dtpr_id = id;
9069 	probe->dtpr_gen = dtrace_probegen++;
9070 	probe->dtpr_mod = dtrace_strref(mod);
9071 	probe->dtpr_func = dtrace_strref(func);
9072 	probe->dtpr_name = dtrace_strref(name);
9073 	probe->dtpr_arg = arg;
9074 	probe->dtpr_aframes = aframes;
9075 	probe->dtpr_provider = provider;
9076 
9077 	dtrace_hash_add(dtrace_byprov, probe);
9078 	dtrace_hash_add(dtrace_bymod, probe);
9079 	dtrace_hash_add(dtrace_byfunc, probe);
9080 	dtrace_hash_add(dtrace_byname, probe);
9081 
9082 	if (id - 1 >= (dtrace_id_t)dtrace_nprobes) {
9083 		size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
9084 		size_t nsize = osize * 2;
9085 
9086 		probes = kmem_zalloc(nsize, KM_SLEEP);
9087 
9088 		dtrace_probe_t **oprobes = dtrace_probes;
9089 
9090 		bcopy(oprobes, probes, osize);
9091 		dtrace_membar_producer();
9092 		dtrace_probes = probes;
9093 
9094 		dtrace_sync();
9095 
9096 		/*
9097 		 * All CPUs are now seeing the new probes array; we can
9098 		 * safely free the old array.
9099 		 */
9100 		kmem_free(oprobes, osize);
9101 		dtrace_nprobes *= 2;
9102 
9103 		ASSERT(id - 1 < (dtrace_id_t)dtrace_nprobes);
9104 	}
9105 
9106 	ASSERT(dtrace_probes[id - 1] == NULL);
9107 	dtrace_probes[id - 1] = probe;
9108 	provider->dtpv_probe_count++;
9109 
9110 	if (provider != dtrace_provider)
9111 		lck_mtx_unlock(&dtrace_lock);
9112 
9113 	return (id);
9114 }
9115 
9116 static dtrace_probe_t *
dtrace_probe_lookup_id(dtrace_id_t id)9117 dtrace_probe_lookup_id(dtrace_id_t id)
9118 {
9119 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
9120 
9121 	if (id == 0 || id > (dtrace_id_t)dtrace_nprobes)
9122 		return (NULL);
9123 
9124 	return (dtrace_probes[id - 1]);
9125 }
9126 
9127 static int
dtrace_probe_lookup_match(dtrace_probe_t * probe,void * arg1,void * arg2)9128 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg1, void *arg2)
9129 {
9130 #pragma unused(arg2)
9131 	*((dtrace_id_t *)arg1) = probe->dtpr_id;
9132 
9133 	return (DTRACE_MATCH_DONE);
9134 }
9135 
9136 /*
9137  * Look up a probe based on provider and one or more of module name, function
9138  * name and probe name.
9139  */
9140 dtrace_id_t
dtrace_probe_lookup(dtrace_provider_id_t prid,const char * mod,const char * func,const char * name)9141 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod,
9142     const char *func, const char *name)
9143 {
9144 	dtrace_probekey_t pkey;
9145 	dtrace_id_t id;
9146 	int match;
9147 
9148 	lck_mtx_lock(&dtrace_lock);
9149 
9150 	pkey.dtpk_prov = dtrace_strref(((dtrace_provider_t *)prid)->dtpv_name);
9151 	pkey.dtpk_pmatch = &dtrace_match_string;
9152 	pkey.dtpk_mod = dtrace_strref(mod);
9153 	pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
9154 	pkey.dtpk_func = dtrace_strref(func);
9155 	pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
9156 	pkey.dtpk_name = dtrace_strref(name);
9157 	pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
9158 	pkey.dtpk_id = DTRACE_IDNONE;
9159 
9160 	match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
9161 	    dtrace_probe_lookup_match, &id, NULL);
9162 
9163 	dtrace_probekey_release(&pkey);
9164 
9165 	lck_mtx_unlock(&dtrace_lock);
9166 
9167 	ASSERT(match == 1 || match == 0);
9168 	return (match ? id : 0);
9169 }
9170 
9171 /*
9172  * Returns the probe argument associated with the specified probe.
9173  */
9174 void *
dtrace_probe_arg(dtrace_provider_id_t id,dtrace_id_t pid)9175 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
9176 {
9177 	dtrace_probe_t *probe;
9178 	void *rval = NULL;
9179 
9180 	lck_mtx_lock(&dtrace_lock);
9181 
9182 	if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
9183 	    probe->dtpr_provider == (dtrace_provider_t *)id)
9184 		rval = probe->dtpr_arg;
9185 
9186 	lck_mtx_unlock(&dtrace_lock);
9187 
9188 	return (rval);
9189 }
9190 
9191 /*
9192  * Copy a probe into a probe description.
9193  */
9194 static void
dtrace_probe_description(const dtrace_probe_t * prp,dtrace_probedesc_t * pdp)9195 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
9196 {
9197 	bzero(pdp, sizeof (dtrace_probedesc_t));
9198 	pdp->dtpd_id = prp->dtpr_id;
9199 
9200 	/* APPLE NOTE: Darwin employs size bounded string operation. */
9201 	(void) strlcpy(pdp->dtpd_provider,
9202 	    prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN);
9203 
9204 	(void) strlcpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN);
9205 	(void) strlcpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN);
9206 	(void) strlcpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN);
9207 }
9208 
9209 /*
9210  * Called to indicate that a probe -- or probes -- should be provided by a
9211  * specfied provider.  If the specified description is NULL, the provider will
9212  * be told to provide all of its probes.  (This is done whenever a new
9213  * consumer comes along, or whenever a retained enabling is to be matched.) If
9214  * the specified description is non-NULL, the provider is given the
9215  * opportunity to dynamically provide the specified probe, allowing providers
9216  * to support the creation of probes on-the-fly.  (So-called _autocreated_
9217  * probes.)  If the provider is NULL, the operations will be applied to all
9218  * providers; if the provider is non-NULL the operations will only be applied
9219  * to the specified provider.  The dtrace_provider_lock must be held, and the
9220  * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
9221  * will need to grab the dtrace_lock when it reenters the framework through
9222  * dtrace_probe_lookup(), dtrace_probe_create(), etc.
9223  */
9224 static void
dtrace_probe_provide(dtrace_probedesc_t * desc,dtrace_provider_t * prv)9225 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
9226 {
9227 	struct modctl *ctl;
9228 	int all = 0;
9229 
9230 	LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
9231 
9232 	if (prv == NULL) {
9233 		all = 1;
9234 		prv = dtrace_provider;
9235 	}
9236 
9237 	do {
9238 		/*
9239 		 * First, call the blanket provide operation.
9240 		 */
9241 		prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
9242 
9243 		/*
9244 		 * Now call the per-module provide operation.  We will grab
9245 		 * mod_lock to prevent the list from being modified.  Note
9246 		 * that this also prevents the mod_busy bits from changing.
9247 		 * (mod_busy can only be changed with mod_lock held.)
9248 		 */
9249 		lck_mtx_lock(&mod_lock);
9250 
9251 		ctl = dtrace_modctl_list;
9252 		while (ctl) {
9253 			prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
9254 			ctl = ctl->mod_next;
9255 		}
9256 
9257 		lck_mtx_unlock(&mod_lock);
9258 	} while (all && (prv = prv->dtpv_next) != NULL);
9259 }
9260 
9261 /*
9262  * Iterate over each probe, and call the Framework-to-Provider API function
9263  * denoted by offs.
9264  */
9265 static void
dtrace_probe_foreach(uintptr_t offs)9266 dtrace_probe_foreach(uintptr_t offs)
9267 {
9268 	dtrace_provider_t *prov;
9269 	void (*func)(void *, dtrace_id_t, void *);
9270 	dtrace_probe_t *probe;
9271 	dtrace_icookie_t cookie;
9272 	int i;
9273 
9274 	/*
9275 	 * We disable interrupts to walk through the probe array.  This is
9276 	 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
9277 	 * won't see stale data.
9278 	 */
9279 	cookie = dtrace_interrupt_disable();
9280 
9281 	for (i = 0; i < dtrace_nprobes; i++) {
9282 		if ((probe = dtrace_probes[i]) == NULL)
9283 			continue;
9284 
9285 		if (probe->dtpr_ecb == NULL) {
9286 			/*
9287 			 * This probe isn't enabled -- don't call the function.
9288 			 */
9289 			continue;
9290 		}
9291 
9292 		prov = probe->dtpr_provider;
9293 		func = *((void(**)(void *, dtrace_id_t, void *))
9294 		    ((uintptr_t)&prov->dtpv_pops + offs));
9295 
9296 		func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
9297 	}
9298 
9299 	dtrace_interrupt_enable(cookie);
9300 }
9301 
9302 static int
dtrace_probe_enable(const dtrace_probedesc_t * desc,dtrace_enabling_t * enab,dtrace_ecbdesc_t * ep)9303 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab, dtrace_ecbdesc_t *ep)
9304 {
9305 	dtrace_probekey_t pkey;
9306 	uint32_t priv;
9307 	uid_t uid;
9308 	zoneid_t zoneid;
9309 	int err;
9310 
9311 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
9312 
9313 	dtrace_ecb_create_cache = NULL;
9314 
9315 	if (desc == NULL) {
9316 		/*
9317 		 * If we're passed a NULL description, we're being asked to
9318 		 * create an ECB with a NULL probe.
9319 		 */
9320 		(void) dtrace_ecb_create_enable(NULL, enab, ep);
9321 		return (0);
9322 	}
9323 
9324 	dtrace_probekey(desc, &pkey);
9325 	dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
9326 	    &priv, &uid, &zoneid);
9327 
9328 	err = dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, enab, ep);
9329 
9330 	dtrace_probekey_release(&pkey);
9331 
9332 	return err;
9333 }
9334 
9335 /*
9336  * DTrace Helper Provider Functions
9337  */
9338 static void
dtrace_dofattr2attr(dtrace_attribute_t * attr,const dof_attr_t dofattr)9339 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
9340 {
9341 	attr->dtat_name = DOF_ATTR_NAME(dofattr);
9342 	attr->dtat_data = DOF_ATTR_DATA(dofattr);
9343 	attr->dtat_class = DOF_ATTR_CLASS(dofattr);
9344 }
9345 
9346 static void
dtrace_dofprov2hprov(dtrace_helper_provdesc_t * hprov,const dof_provider_t * dofprov,char * strtab)9347 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
9348     const dof_provider_t *dofprov, char *strtab)
9349 {
9350 	hprov->dthpv_provname = strtab + dofprov->dofpv_name;
9351 	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
9352 	    dofprov->dofpv_provattr);
9353 	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
9354 	    dofprov->dofpv_modattr);
9355 	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
9356 	    dofprov->dofpv_funcattr);
9357 	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
9358 	    dofprov->dofpv_nameattr);
9359 	dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
9360 	    dofprov->dofpv_argsattr);
9361 }
9362 
9363 static void
dtrace_helper_provide_one(dof_helper_t * dhp,dof_sec_t * sec,proc_t * p)9364 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, proc_t *p)
9365 {
9366 	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9367 	dof_hdr_t *dof = (dof_hdr_t *)daddr;
9368 	dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
9369 	dof_provider_t *provider;
9370 	dof_probe_t *probe;
9371 	uint32_t *off, *enoff;
9372 	uint8_t *arg;
9373 	char *strtab;
9374 	uint_t i, nprobes;
9375 	dtrace_helper_provdesc_t dhpv;
9376 	dtrace_helper_probedesc_t dhpb;
9377 	dtrace_meta_t *meta = dtrace_meta_pid;
9378 	dtrace_mops_t *mops = &meta->dtm_mops;
9379 	void *parg;
9380 
9381 	provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9382 	str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9383 	    provider->dofpv_strtab * dof->dofh_secsize);
9384 	prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9385 	    provider->dofpv_probes * dof->dofh_secsize);
9386 	arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9387 	    provider->dofpv_prargs * dof->dofh_secsize);
9388 	off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9389 	    provider->dofpv_proffs * dof->dofh_secsize);
9390 
9391 	strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9392 	off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
9393 	arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
9394 	enoff = NULL;
9395 
9396 	/*
9397 	 * See dtrace_helper_provider_validate().
9398 	 */
9399 	if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
9400 	    provider->dofpv_prenoffs != DOF_SECT_NONE) {
9401 		enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9402 		    provider->dofpv_prenoffs * dof->dofh_secsize);
9403 		enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
9404 	}
9405 
9406 	nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
9407 
9408 	/*
9409 	 * Create the provider.
9410 	 */
9411 	dtrace_dofprov2hprov(&dhpv, provider, strtab);
9412 
9413 	if ((parg = mops->dtms_provide_proc(meta->dtm_arg, &dhpv, p)) == NULL)
9414 		return;
9415 
9416 	meta->dtm_count++;
9417 
9418 	/*
9419 	 * Create the probes.
9420 	 */
9421 	for (i = 0; i < nprobes; i++) {
9422 		probe = (dof_probe_t *)(uintptr_t)(daddr +
9423 		    prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
9424 
9425 		dhpb.dthpb_mod = dhp->dofhp_mod;
9426 		dhpb.dthpb_func = strtab + probe->dofpr_func;
9427 		dhpb.dthpb_name = strtab + probe->dofpr_name;
9428 #if !defined(__APPLE__)
9429 		dhpb.dthpb_base = probe->dofpr_addr;
9430 #else
9431 		dhpb.dthpb_base = dhp->dofhp_addr; /* FIXME: James, why? */
9432 #endif
9433 		dhpb.dthpb_offs = (int32_t *)(off + probe->dofpr_offidx);
9434 		dhpb.dthpb_noffs = probe->dofpr_noffs;
9435 		if (enoff != NULL) {
9436 			dhpb.dthpb_enoffs = (int32_t *)(enoff + probe->dofpr_enoffidx);
9437 			dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
9438 		} else {
9439 			dhpb.dthpb_enoffs = NULL;
9440 			dhpb.dthpb_nenoffs = 0;
9441 		}
9442 		dhpb.dthpb_args = arg + probe->dofpr_argidx;
9443 		dhpb.dthpb_nargc = probe->dofpr_nargc;
9444 		dhpb.dthpb_xargc = probe->dofpr_xargc;
9445 		dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
9446 		dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
9447 
9448 		mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
9449 	}
9450 
9451 	/*
9452 	 * Since we just created probes, we need to match our enablings
9453 	 * against those, with a precondition knowing that we have only
9454 	 * added probes from this provider
9455 	 */
9456 	char *prov_name = mops->dtms_provider_name(parg);
9457 	ASSERT(prov_name != NULL);
9458 	dtrace_match_cond_t cond = {dtrace_cond_provider_match, (void*)prov_name};
9459 
9460 	dtrace_enabling_matchall_with_cond(&cond);
9461 }
9462 
9463 static void
dtrace_helper_provide(dof_helper_t * dhp,proc_t * p)9464 dtrace_helper_provide(dof_helper_t *dhp, proc_t *p)
9465 {
9466 	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9467 	dof_hdr_t *dof = (dof_hdr_t *)daddr;
9468 	uint32_t i;
9469 
9470 	LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
9471 
9472 	for (i = 0; i < dof->dofh_secnum; i++) {
9473 		dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9474 		    dof->dofh_secoff + i * dof->dofh_secsize);
9475 
9476 		if (sec->dofs_type != DOF_SECT_PROVIDER)
9477 			continue;
9478 
9479 		dtrace_helper_provide_one(dhp, sec, p);
9480 	}
9481 }
9482 
9483 static void
dtrace_helper_provider_remove_one(dof_helper_t * dhp,dof_sec_t * sec,proc_t * p)9484 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, proc_t *p)
9485 {
9486 	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9487 	dof_hdr_t *dof = (dof_hdr_t *)daddr;
9488 	dof_sec_t *str_sec;
9489 	dof_provider_t *provider;
9490 	char *strtab;
9491 	dtrace_helper_provdesc_t dhpv;
9492 	dtrace_meta_t *meta = dtrace_meta_pid;
9493 	dtrace_mops_t *mops = &meta->dtm_mops;
9494 
9495 	provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9496 	str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9497 	    provider->dofpv_strtab * dof->dofh_secsize);
9498 
9499 	strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9500 
9501 	/*
9502 	 * Create the provider.
9503 	 */
9504 	dtrace_dofprov2hprov(&dhpv, provider, strtab);
9505 
9506 	mops->dtms_remove_proc(meta->dtm_arg, &dhpv, p);
9507 
9508 	meta->dtm_count--;
9509 }
9510 
9511 static void
dtrace_helper_provider_remove(dof_helper_t * dhp,proc_t * p)9512 dtrace_helper_provider_remove(dof_helper_t *dhp, proc_t *p)
9513 {
9514 	uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9515 	dof_hdr_t *dof = (dof_hdr_t *)daddr;
9516 	uint32_t i;
9517 
9518 	LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
9519 
9520 	for (i = 0; i < dof->dofh_secnum; i++) {
9521 		dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9522 		    dof->dofh_secoff + i * dof->dofh_secsize);
9523 
9524 		if (sec->dofs_type != DOF_SECT_PROVIDER)
9525 			continue;
9526 
9527 		dtrace_helper_provider_remove_one(dhp, sec, p);
9528 	}
9529 }
9530 
9531 /*
9532  * DTrace Meta Provider-to-Framework API Functions
9533  *
9534  * These functions implement the Meta Provider-to-Framework API, as described
9535  * in <sys/dtrace.h>.
9536  */
9537 int
dtrace_meta_register(const char * name,const dtrace_mops_t * mops,void * arg,dtrace_meta_provider_id_t * idp)9538 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
9539     dtrace_meta_provider_id_t *idp)
9540 {
9541 	dtrace_meta_t *meta;
9542 	dtrace_helpers_t *help, *next;
9543 	uint_t i;
9544 
9545 	*idp = DTRACE_METAPROVNONE;
9546 
9547 	/*
9548 	 * We strictly don't need the name, but we hold onto it for
9549 	 * debuggability. All hail error queues!
9550 	 */
9551 	if (name == NULL) {
9552 		cmn_err(CE_WARN, "failed to register meta-provider: "
9553 		    "invalid name");
9554 		return (EINVAL);
9555 	}
9556 
9557 	if (mops == NULL ||
9558 	    mops->dtms_create_probe == NULL ||
9559 	    mops->dtms_provide_proc == NULL ||
9560 	    mops->dtms_remove_proc == NULL) {
9561 		cmn_err(CE_WARN, "failed to register meta-register %s: "
9562 		    "invalid ops", name);
9563 		return (EINVAL);
9564 	}
9565 
9566 	meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
9567 	meta->dtm_mops = *mops;
9568 	meta->dtm_arg = arg;
9569 
9570 	lck_mtx_lock(&dtrace_meta_lock);
9571 	lck_mtx_lock(&dtrace_lock);
9572 
9573 	if (dtrace_meta_pid != NULL) {
9574 		lck_mtx_unlock(&dtrace_lock);
9575 		lck_mtx_unlock(&dtrace_meta_lock);
9576 		cmn_err(CE_WARN, "failed to register meta-register %s: "
9577 		    "user-land meta-provider exists", name);
9578 		kmem_free(meta, sizeof (dtrace_meta_t));
9579 		return (EINVAL);
9580 	}
9581 
9582 	meta->dtm_name = dtrace_strref(name);
9583 
9584 	dtrace_meta_pid = meta;
9585 	*idp = (dtrace_meta_provider_id_t)meta;
9586 
9587 	/*
9588 	 * If there are providers and probes ready to go, pass them
9589 	 * off to the new meta provider now.
9590 	 */
9591 
9592 	help = dtrace_deferred_pid;
9593 	dtrace_deferred_pid = NULL;
9594 
9595 	lck_mtx_unlock(&dtrace_lock);
9596 
9597 	while (help != NULL) {
9598 		for (i = 0; i < help->dthps_nprovs; i++) {
9599 			proc_t *p = proc_find(help->dthps_pid);
9600 			if (p == PROC_NULL)
9601 				continue;
9602 			dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
9603 			    p);
9604 			proc_rele(p);
9605 		}
9606 
9607 		next = help->dthps_next;
9608 		help->dthps_next = NULL;
9609 		help->dthps_prev = NULL;
9610 		help->dthps_deferred = 0;
9611 		help = next;
9612 	}
9613 
9614 	lck_mtx_unlock(&dtrace_meta_lock);
9615 
9616 	return (0);
9617 }
9618 
9619 int
dtrace_meta_unregister(dtrace_meta_provider_id_t id)9620 dtrace_meta_unregister(dtrace_meta_provider_id_t id)
9621 {
9622 	dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
9623 
9624 	lck_mtx_lock(&dtrace_meta_lock);
9625 	lck_mtx_lock(&dtrace_lock);
9626 
9627 	if (old == dtrace_meta_pid) {
9628 		pp = &dtrace_meta_pid;
9629 	} else {
9630 		panic("attempt to unregister non-existent "
9631 		    "dtrace meta-provider %p\n", (void *)old);
9632 	}
9633 
9634 	if (old->dtm_count != 0) {
9635 		lck_mtx_unlock(&dtrace_lock);
9636 		lck_mtx_unlock(&dtrace_meta_lock);
9637 		return (EBUSY);
9638 	}
9639 
9640 	*pp = NULL;
9641 
9642 	dtrace_strunref(old->dtm_name);
9643 
9644 	lck_mtx_unlock(&dtrace_lock);
9645 	lck_mtx_unlock(&dtrace_meta_lock);
9646 
9647 	kmem_free(old, sizeof (dtrace_meta_t));
9648 
9649 	return (0);
9650 }
9651 
9652 
9653 /*
9654  * DTrace DIF Object Functions
9655  */
9656 static int
dtrace_difo_err(uint_t pc,const char * format,...)9657 dtrace_difo_err(uint_t pc, const char *format, ...)
9658 {
9659 	if (dtrace_err_verbose) {
9660 		va_list alist;
9661 
9662 		(void) uprintf("dtrace DIF object error: [%u]: ", pc);
9663 		va_start(alist, format);
9664 		(void) vuprintf(format, alist);
9665 		va_end(alist);
9666 	}
9667 
9668 #ifdef DTRACE_ERRDEBUG
9669 	dtrace_errdebug(format);
9670 #endif
9671 	return (1);
9672 }
9673 
9674 /*
9675  * Validate a DTrace DIF object by checking the IR instructions.  The following
9676  * rules are currently enforced by dtrace_difo_validate():
9677  *
9678  * 1. Each instruction must have a valid opcode
9679  * 2. Each register, string, variable, or subroutine reference must be valid
9680  * 3. No instruction can modify register %r0 (must be zero)
9681  * 4. All instruction reserved bits must be set to zero
9682  * 5. The last instruction must be a "ret" instruction
9683  * 6. All branch targets must reference a valid instruction _after_ the branch
9684  */
9685 static int
dtrace_difo_validate(dtrace_difo_t * dp,dtrace_vstate_t * vstate,uint_t nregs,cred_t * cr)9686 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
9687     cred_t *cr)
9688 {
9689 	int err = 0;
9690 	uint_t i;
9691 
9692 	int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9693 	int kcheckload;
9694 	uint_t pc;
9695 	int maxglobal = -1, maxlocal = -1, maxtlocal = -1;
9696 
9697 	kcheckload = cr == NULL ||
9698 	    (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
9699 
9700 	dp->dtdo_destructive = 0;
9701 
9702 	for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
9703 		dif_instr_t instr = dp->dtdo_buf[pc];
9704 
9705 		uint_t r1 = DIF_INSTR_R1(instr);
9706 		uint_t r2 = DIF_INSTR_R2(instr);
9707 		uint_t rd = DIF_INSTR_RD(instr);
9708 		uint_t rs = DIF_INSTR_RS(instr);
9709 		uint_t label = DIF_INSTR_LABEL(instr);
9710 		uint_t v = DIF_INSTR_VAR(instr);
9711 		uint_t subr = DIF_INSTR_SUBR(instr);
9712 		uint_t type = DIF_INSTR_TYPE(instr);
9713 		uint_t op = DIF_INSTR_OP(instr);
9714 
9715 		switch (op) {
9716 		case DIF_OP_OR:
9717 		case DIF_OP_XOR:
9718 		case DIF_OP_AND:
9719 		case DIF_OP_SLL:
9720 		case DIF_OP_SRL:
9721 		case DIF_OP_SRA:
9722 		case DIF_OP_SUB:
9723 		case DIF_OP_ADD:
9724 		case DIF_OP_MUL:
9725 		case DIF_OP_SDIV:
9726 		case DIF_OP_UDIV:
9727 		case DIF_OP_SREM:
9728 		case DIF_OP_UREM:
9729 		case DIF_OP_COPYS:
9730 			if (r1 >= nregs)
9731 				err += efunc(pc, "invalid register %u\n", r1);
9732 			if (r2 >= nregs)
9733 				err += efunc(pc, "invalid register %u\n", r2);
9734 			if (rd >= nregs)
9735 				err += efunc(pc, "invalid register %u\n", rd);
9736 			if (rd == 0)
9737 				err += efunc(pc, "cannot write to %%r0\n");
9738 			break;
9739 		case DIF_OP_NOT:
9740 		case DIF_OP_MOV:
9741 		case DIF_OP_ALLOCS:
9742 			if (r1 >= nregs)
9743 				err += efunc(pc, "invalid register %u\n", r1);
9744 			if (r2 != 0)
9745 				err += efunc(pc, "non-zero reserved bits\n");
9746 			if (rd >= nregs)
9747 				err += efunc(pc, "invalid register %u\n", rd);
9748 			if (rd == 0)
9749 				err += efunc(pc, "cannot write to %%r0\n");
9750 			break;
9751 		case DIF_OP_LDSB:
9752 		case DIF_OP_LDSH:
9753 		case DIF_OP_LDSW:
9754 		case DIF_OP_LDUB:
9755 		case DIF_OP_LDUH:
9756 		case DIF_OP_LDUW:
9757 		case DIF_OP_LDX:
9758 			if (r1 >= nregs)
9759 				err += efunc(pc, "invalid register %u\n", r1);
9760 			if (r2 != 0)
9761 				err += efunc(pc, "non-zero reserved bits\n");
9762 			if (rd >= nregs)
9763 				err += efunc(pc, "invalid register %u\n", rd);
9764 			if (rd == 0)
9765 				err += efunc(pc, "cannot write to %%r0\n");
9766 			if (kcheckload)
9767 				dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
9768 				    DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
9769 			break;
9770 		case DIF_OP_RLDSB:
9771 		case DIF_OP_RLDSH:
9772 		case DIF_OP_RLDSW:
9773 		case DIF_OP_RLDUB:
9774 		case DIF_OP_RLDUH:
9775 		case DIF_OP_RLDUW:
9776 		case DIF_OP_RLDX:
9777 			if (r1 >= nregs)
9778 				err += efunc(pc, "invalid register %u\n", r1);
9779 			if (r2 != 0)
9780 				err += efunc(pc, "non-zero reserved bits\n");
9781 			if (rd >= nregs)
9782 				err += efunc(pc, "invalid register %u\n", rd);
9783 			if (rd == 0)
9784 				err += efunc(pc, "cannot write to %%r0\n");
9785 			break;
9786 		case DIF_OP_ULDSB:
9787 		case DIF_OP_ULDSH:
9788 		case DIF_OP_ULDSW:
9789 		case DIF_OP_ULDUB:
9790 		case DIF_OP_ULDUH:
9791 		case DIF_OP_ULDUW:
9792 		case DIF_OP_ULDX:
9793 			if (r1 >= nregs)
9794 				err += efunc(pc, "invalid register %u\n", r1);
9795 			if (r2 != 0)
9796 				err += efunc(pc, "non-zero reserved bits\n");
9797 			if (rd >= nregs)
9798 				err += efunc(pc, "invalid register %u\n", rd);
9799 			if (rd == 0)
9800 				err += efunc(pc, "cannot write to %%r0\n");
9801 			break;
9802 		case DIF_OP_STB:
9803 		case DIF_OP_STH:
9804 		case DIF_OP_STW:
9805 		case DIF_OP_STX:
9806 			if (r1 >= nregs)
9807 				err += efunc(pc, "invalid register %u\n", r1);
9808 			if (r2 != 0)
9809 				err += efunc(pc, "non-zero reserved bits\n");
9810 			if (rd >= nregs)
9811 				err += efunc(pc, "invalid register %u\n", rd);
9812 			if (rd == 0)
9813 				err += efunc(pc, "cannot write to 0 address\n");
9814 			break;
9815 		case DIF_OP_CMP:
9816 		case DIF_OP_SCMP:
9817 			if (r1 >= nregs)
9818 				err += efunc(pc, "invalid register %u\n", r1);
9819 			if (r2 >= nregs)
9820 				err += efunc(pc, "invalid register %u\n", r2);
9821 			if (rd != 0)
9822 				err += efunc(pc, "non-zero reserved bits\n");
9823 			break;
9824 		case DIF_OP_TST:
9825 			if (r1 >= nregs)
9826 				err += efunc(pc, "invalid register %u\n", r1);
9827 			if (r2 != 0 || rd != 0)
9828 				err += efunc(pc, "non-zero reserved bits\n");
9829 			break;
9830 		case DIF_OP_BA:
9831 		case DIF_OP_BE:
9832 		case DIF_OP_BNE:
9833 		case DIF_OP_BG:
9834 		case DIF_OP_BGU:
9835 		case DIF_OP_BGE:
9836 		case DIF_OP_BGEU:
9837 		case DIF_OP_BL:
9838 		case DIF_OP_BLU:
9839 		case DIF_OP_BLE:
9840 		case DIF_OP_BLEU:
9841 			if (label >= dp->dtdo_len) {
9842 				err += efunc(pc, "invalid branch target %u\n",
9843 				    label);
9844 			}
9845 			if (label <= pc) {
9846 				err += efunc(pc, "backward branch to %u\n",
9847 				    label);
9848 			}
9849 			break;
9850 		case DIF_OP_RET:
9851 			if (r1 != 0 || r2 != 0)
9852 				err += efunc(pc, "non-zero reserved bits\n");
9853 			if (rd >= nregs)
9854 				err += efunc(pc, "invalid register %u\n", rd);
9855 			break;
9856 		case DIF_OP_NOP:
9857 		case DIF_OP_POPTS:
9858 		case DIF_OP_FLUSHTS:
9859 			if (r1 != 0 || r2 != 0 || rd != 0)
9860 				err += efunc(pc, "non-zero reserved bits\n");
9861 			break;
9862 		case DIF_OP_SETX:
9863 			if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
9864 				err += efunc(pc, "invalid integer ref %u\n",
9865 				    DIF_INSTR_INTEGER(instr));
9866 			}
9867 			if (rd >= nregs)
9868 				err += efunc(pc, "invalid register %u\n", rd);
9869 			if (rd == 0)
9870 				err += efunc(pc, "cannot write to %%r0\n");
9871 			break;
9872 		case DIF_OP_SETS:
9873 			if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
9874 				err += efunc(pc, "invalid string ref %u\n",
9875 				    DIF_INSTR_STRING(instr));
9876 			}
9877 			if (rd >= nregs)
9878 				err += efunc(pc, "invalid register %u\n", rd);
9879 			if (rd == 0)
9880 				err += efunc(pc, "cannot write to %%r0\n");
9881 			break;
9882 		case DIF_OP_LDGA:
9883 		case DIF_OP_LDTA:
9884 			if (r1 > DIF_VAR_ARRAY_MAX)
9885 				err += efunc(pc, "invalid array %u\n", r1);
9886 			if (r2 >= nregs)
9887 				err += efunc(pc, "invalid register %u\n", r2);
9888 			if (rd >= nregs)
9889 				err += efunc(pc, "invalid register %u\n", rd);
9890 			if (rd == 0)
9891 				err += efunc(pc, "cannot write to %%r0\n");
9892 			break;
9893 		case DIF_OP_LDGS:
9894 		case DIF_OP_LDTS:
9895 		case DIF_OP_LDLS:
9896 		case DIF_OP_LDGAA:
9897 		case DIF_OP_LDTAA:
9898 			if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
9899 				err += efunc(pc, "invalid variable %u\n", v);
9900 			if (rd >= nregs)
9901 				err += efunc(pc, "invalid register %u\n", rd);
9902 			if (rd == 0)
9903 				err += efunc(pc, "cannot write to %%r0\n");
9904 			break;
9905 		case DIF_OP_STGS:
9906 		case DIF_OP_STTS:
9907 		case DIF_OP_STLS:
9908 		case DIF_OP_STGAA:
9909 		case DIF_OP_STTAA:
9910 			if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
9911 				err += efunc(pc, "invalid variable %u\n", v);
9912 			if (rs >= nregs)
9913 				err += efunc(pc, "invalid register %u\n", rd);
9914 			break;
9915 		case DIF_OP_CALL:
9916 			if (subr > DIF_SUBR_MAX &&
9917 			   !(subr >= DIF_SUBR_APPLE_MIN && subr <= DIF_SUBR_APPLE_MAX))
9918 				err += efunc(pc, "invalid subr %u\n", subr);
9919 			if (rd >= nregs)
9920 				err += efunc(pc, "invalid register %u\n", rd);
9921 			if (rd == 0)
9922 				err += efunc(pc, "cannot write to %%r0\n");
9923 
9924 			switch (subr) {
9925 			case DIF_SUBR_COPYOUT:
9926 			case DIF_SUBR_COPYOUTSTR:
9927 			case DIF_SUBR_KDEBUG_TRACE:
9928 			case DIF_SUBR_KDEBUG_TRACE_STRING:
9929 			case DIF_SUBR_PHYSMEM_READ:
9930 			case DIF_SUBR_PHYSMEM_WRITE:
9931 			case DIF_SUBR_LIVEDUMP:
9932 				dp->dtdo_destructive = 1;
9933 				break;
9934 			default:
9935 				break;
9936 			}
9937 			break;
9938 		case DIF_OP_PUSHTR:
9939 			if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
9940 				err += efunc(pc, "invalid ref type %u\n", type);
9941 			if (r2 >= nregs)
9942 				err += efunc(pc, "invalid register %u\n", r2);
9943 			if (rs >= nregs)
9944 				err += efunc(pc, "invalid register %u\n", rs);
9945 			break;
9946 		case DIF_OP_PUSHTV:
9947 			if (type != DIF_TYPE_CTF)
9948 				err += efunc(pc, "invalid val type %u\n", type);
9949 			if (r2 >= nregs)
9950 				err += efunc(pc, "invalid register %u\n", r2);
9951 			if (rs >= nregs)
9952 				err += efunc(pc, "invalid register %u\n", rs);
9953 			break;
9954 		case DIF_OP_STRIP:
9955 			if (r1 >= nregs)
9956 				err += efunc(pc, "invalid register %u\n", r1);
9957 			if (!dtrace_is_valid_ptrauth_key(r2))
9958 				err += efunc(pc, "invalid key\n");
9959 			if (rd >= nregs)
9960 				err += efunc(pc, "invalid register %u\n", rd);
9961 			if (rd == 0)
9962 				err += efunc(pc, "cannot write to %%r0\n");
9963 			break;
9964 		default:
9965 			err += efunc(pc, "invalid opcode %u\n",
9966 			    DIF_INSTR_OP(instr));
9967 		}
9968 	}
9969 
9970 	if (dp->dtdo_len != 0 &&
9971 	    DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
9972 		err += efunc(dp->dtdo_len - 1,
9973 		    "expected 'ret' as last DIF instruction\n");
9974 	}
9975 
9976 	if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) {
9977 		/*
9978 		 * If we're not returning by reference, the size must be either
9979 		 * 0 or the size of one of the base types.
9980 		 */
9981 		switch (dp->dtdo_rtype.dtdt_size) {
9982 		case 0:
9983 		case sizeof (uint8_t):
9984 		case sizeof (uint16_t):
9985 		case sizeof (uint32_t):
9986 		case sizeof (uint64_t):
9987 			break;
9988 
9989 		default:
9990 			err += efunc(dp->dtdo_len - 1, "bad return size\n");
9991 		}
9992 	}
9993 
9994 	for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
9995 		dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
9996 		dtrace_diftype_t *vt, *et;
9997 		uint_t id;
9998 		int ndx;
9999 
10000 		if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
10001 		    v->dtdv_scope != DIFV_SCOPE_THREAD &&
10002 		    v->dtdv_scope != DIFV_SCOPE_LOCAL) {
10003 			err += efunc(i, "unrecognized variable scope %d\n",
10004 			    v->dtdv_scope);
10005 			break;
10006 		}
10007 
10008 		if (v->dtdv_kind != DIFV_KIND_ARRAY &&
10009 		    v->dtdv_kind != DIFV_KIND_SCALAR) {
10010 			err += efunc(i, "unrecognized variable type %d\n",
10011 			    v->dtdv_kind);
10012 			break;
10013 		}
10014 
10015 		if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
10016 			err += efunc(i, "%d exceeds variable id limit\n", id);
10017 			break;
10018 		}
10019 
10020 		if (id < DIF_VAR_OTHER_UBASE)
10021 			continue;
10022 
10023 		/*
10024 		 * For user-defined variables, we need to check that this
10025 		 * definition is identical to any previous definition that we
10026 		 * encountered.
10027 		 */
10028 		ndx = id - DIF_VAR_OTHER_UBASE;
10029 
10030 		switch (v->dtdv_scope) {
10031 		case DIFV_SCOPE_GLOBAL:
10032 			if (maxglobal == -1 || ndx > maxglobal)
10033 				maxglobal = ndx;
10034 
10035 			if (ndx < vstate->dtvs_nglobals) {
10036 				dtrace_statvar_t *svar;
10037 
10038 				if ((svar = vstate->dtvs_globals[ndx]) != NULL)
10039 					existing = &svar->dtsv_var;
10040 			}
10041 
10042 			break;
10043 
10044 		case DIFV_SCOPE_THREAD:
10045 			if (maxtlocal == -1 || ndx > maxtlocal)
10046 				maxtlocal = ndx;
10047 
10048 			if (ndx < vstate->dtvs_ntlocals)
10049 				existing = &vstate->dtvs_tlocals[ndx];
10050 			break;
10051 
10052 		case DIFV_SCOPE_LOCAL:
10053 			if (maxlocal == -1 || ndx > maxlocal)
10054 				maxlocal = ndx;
10055 			if (ndx < vstate->dtvs_nlocals) {
10056 				dtrace_statvar_t *svar;
10057 
10058 				if ((svar = vstate->dtvs_locals[ndx]) != NULL)
10059 					existing = &svar->dtsv_var;
10060 			}
10061 
10062 			break;
10063 		}
10064 
10065 		vt = &v->dtdv_type;
10066 
10067 		if (vt->dtdt_flags & DIF_TF_BYREF) {
10068 			if (vt->dtdt_size == 0) {
10069 				err += efunc(i, "zero-sized variable\n");
10070 				break;
10071 			}
10072 
10073 			if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL ||
10074 			    v->dtdv_scope == DIFV_SCOPE_LOCAL) &&
10075 			    vt->dtdt_size > dtrace_statvar_maxsize) {
10076 				err += efunc(i, "oversized by-ref static\n");
10077 				break;
10078 			}
10079 		}
10080 
10081 		if (existing == NULL || existing->dtdv_id == 0)
10082 			continue;
10083 
10084 		ASSERT(existing->dtdv_id == v->dtdv_id);
10085 		ASSERT(existing->dtdv_scope == v->dtdv_scope);
10086 
10087 		if (existing->dtdv_kind != v->dtdv_kind)
10088 			err += efunc(i, "%d changed variable kind\n", id);
10089 
10090 		et = &existing->dtdv_type;
10091 
10092 		if (vt->dtdt_flags != et->dtdt_flags) {
10093 			err += efunc(i, "%d changed variable type flags\n", id);
10094 			break;
10095 		}
10096 
10097 		if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
10098 			err += efunc(i, "%d changed variable type size\n", id);
10099 			break;
10100 		}
10101 	}
10102 
10103 	for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
10104 		dif_instr_t instr = dp->dtdo_buf[pc];
10105 
10106 		uint_t v = DIF_INSTR_VAR(instr);
10107 		uint_t op = DIF_INSTR_OP(instr);
10108 
10109 		switch (op) {
10110 		case DIF_OP_LDGS:
10111 		case DIF_OP_LDGAA:
10112 		case DIF_OP_STGS:
10113 		case DIF_OP_STGAA:
10114 			if (v > (uint_t)(DIF_VAR_OTHER_UBASE + maxglobal))
10115 				err += efunc(pc, "invalid variable %u\n", v);
10116 			break;
10117 		case DIF_OP_LDTS:
10118 		case DIF_OP_LDTAA:
10119 		case DIF_OP_STTS:
10120 		case DIF_OP_STTAA:
10121 			if (v > (uint_t)(DIF_VAR_OTHER_UBASE + maxtlocal))
10122 				err += efunc(pc, "invalid variable %u\n", v);
10123 			break;
10124 		case DIF_OP_LDLS:
10125 		case DIF_OP_STLS:
10126 			if (v > (uint_t)(DIF_VAR_OTHER_UBASE + maxlocal))
10127 				err += efunc(pc, "invalid variable %u\n", v);
10128 			break;
10129 		default:
10130 			break;
10131 		}
10132 	}
10133 
10134 	return (err);
10135 }
10136 
10137 /*
10138  * Validate a DTrace DIF object that it is to be used as a helper.  Helpers
10139  * are much more constrained than normal DIFOs.  Specifically, they may
10140  * not:
10141  *
10142  * 1. Make calls to subroutines other than copyin(), copyinstr() or
10143  *    miscellaneous string routines
10144  * 2. Access DTrace variables other than the args[] array, and the
10145  *    curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
10146  * 3. Have thread-local variables.
10147  * 4. Have dynamic variables.
10148  */
10149 static int
dtrace_difo_validate_helper(dtrace_difo_t * dp)10150 dtrace_difo_validate_helper(dtrace_difo_t *dp)
10151 {
10152 	int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
10153 	int err = 0;
10154 	uint_t pc;
10155 
10156 	for (pc = 0; pc < dp->dtdo_len; pc++) {
10157 		dif_instr_t instr = dp->dtdo_buf[pc];
10158 
10159 		uint_t v = DIF_INSTR_VAR(instr);
10160 		uint_t subr = DIF_INSTR_SUBR(instr);
10161 		uint_t op = DIF_INSTR_OP(instr);
10162 
10163 		switch (op) {
10164 		case DIF_OP_OR:
10165 		case DIF_OP_XOR:
10166 		case DIF_OP_AND:
10167 		case DIF_OP_SLL:
10168 		case DIF_OP_SRL:
10169 		case DIF_OP_SRA:
10170 		case DIF_OP_SUB:
10171 		case DIF_OP_ADD:
10172 		case DIF_OP_MUL:
10173 		case DIF_OP_SDIV:
10174 		case DIF_OP_UDIV:
10175 		case DIF_OP_SREM:
10176 		case DIF_OP_UREM:
10177 		case DIF_OP_COPYS:
10178 		case DIF_OP_NOT:
10179 		case DIF_OP_MOV:
10180 		case DIF_OP_RLDSB:
10181 		case DIF_OP_RLDSH:
10182 		case DIF_OP_RLDSW:
10183 		case DIF_OP_RLDUB:
10184 		case DIF_OP_RLDUH:
10185 		case DIF_OP_RLDUW:
10186 		case DIF_OP_RLDX:
10187 		case DIF_OP_ULDSB:
10188 		case DIF_OP_ULDSH:
10189 		case DIF_OP_ULDSW:
10190 		case DIF_OP_ULDUB:
10191 		case DIF_OP_ULDUH:
10192 		case DIF_OP_ULDUW:
10193 		case DIF_OP_ULDX:
10194 		case DIF_OP_STB:
10195 		case DIF_OP_STH:
10196 		case DIF_OP_STW:
10197 		case DIF_OP_STX:
10198 		case DIF_OP_ALLOCS:
10199 		case DIF_OP_CMP:
10200 		case DIF_OP_SCMP:
10201 		case DIF_OP_TST:
10202 		case DIF_OP_BA:
10203 		case DIF_OP_BE:
10204 		case DIF_OP_BNE:
10205 		case DIF_OP_BG:
10206 		case DIF_OP_BGU:
10207 		case DIF_OP_BGE:
10208 		case DIF_OP_BGEU:
10209 		case DIF_OP_BL:
10210 		case DIF_OP_BLU:
10211 		case DIF_OP_BLE:
10212 		case DIF_OP_BLEU:
10213 		case DIF_OP_RET:
10214 		case DIF_OP_NOP:
10215 		case DIF_OP_POPTS:
10216 		case DIF_OP_FLUSHTS:
10217 		case DIF_OP_SETX:
10218 		case DIF_OP_SETS:
10219 		case DIF_OP_LDGA:
10220 		case DIF_OP_LDLS:
10221 		case DIF_OP_STGS:
10222 		case DIF_OP_STLS:
10223 		case DIF_OP_PUSHTR:
10224 		case DIF_OP_PUSHTV:
10225 			break;
10226 
10227 		case DIF_OP_LDGS:
10228 			if (v >= DIF_VAR_OTHER_UBASE)
10229 				break;
10230 
10231 			if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
10232 				break;
10233 
10234 			if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
10235 			    v == DIF_VAR_PPID || v == DIF_VAR_TID ||
10236 			    v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
10237 			    v == DIF_VAR_UID || v == DIF_VAR_GID)
10238 				break;
10239 
10240 			err += efunc(pc, "illegal variable %u\n", v);
10241 			break;
10242 
10243 		case DIF_OP_LDTA:
10244 		case DIF_OP_LDTS:
10245 		case DIF_OP_LDGAA:
10246 		case DIF_OP_LDTAA:
10247 			err += efunc(pc, "illegal dynamic variable load\n");
10248 			break;
10249 
10250 		case DIF_OP_STTS:
10251 		case DIF_OP_STGAA:
10252 		case DIF_OP_STTAA:
10253 			err += efunc(pc, "illegal dynamic variable store\n");
10254 			break;
10255 
10256 		case DIF_OP_CALL:
10257 			switch (subr) {
10258 			case DIF_SUBR_ALLOCA:
10259 			case DIF_SUBR_BCOPY:
10260 			case DIF_SUBR_COPYIN:
10261 			case DIF_SUBR_COPYINTO:
10262 			case DIF_SUBR_COPYINSTR:
10263 			case DIF_SUBR_HTONS:
10264 			case DIF_SUBR_HTONL:
10265 			case DIF_SUBR_HTONLL:
10266 			case DIF_SUBR_INDEX:
10267 			case DIF_SUBR_INET_NTOA:
10268 			case DIF_SUBR_INET_NTOA6:
10269 			case DIF_SUBR_INET_NTOP:
10270 			case DIF_SUBR_JSON:
10271 			case DIF_SUBR_LLTOSTR:
10272 			case DIF_SUBR_NTOHS:
10273 			case DIF_SUBR_NTOHL:
10274 			case DIF_SUBR_NTOHLL:
10275 			case DIF_SUBR_RINDEX:
10276 			case DIF_SUBR_STRCHR:
10277 			case DIF_SUBR_STRTOLL:
10278 			case DIF_SUBR_STRJOIN:
10279 			case DIF_SUBR_STRRCHR:
10280 			case DIF_SUBR_STRSTR:
10281 				break;
10282 			default:
10283 				err += efunc(pc, "invalid subr %u\n", subr);
10284 			}
10285 			break;
10286 
10287 		default:
10288 			err += efunc(pc, "invalid opcode %u\n",
10289 			    DIF_INSTR_OP(instr));
10290 		}
10291 	}
10292 
10293 	return (err);
10294 }
10295 
10296 /*
10297  * Returns 1 if the expression in the DIF object can be cached on a per-thread
10298  * basis; 0 if not.
10299  */
10300 static int
dtrace_difo_cacheable(dtrace_difo_t * dp)10301 dtrace_difo_cacheable(dtrace_difo_t *dp)
10302 {
10303 	uint_t i;
10304 
10305 	if (dp == NULL)
10306 		return (0);
10307 
10308 	for (i = 0; i < dp->dtdo_varlen; i++) {
10309 		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10310 
10311 		if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
10312 			continue;
10313 
10314 		switch (v->dtdv_id) {
10315 		case DIF_VAR_CURTHREAD:
10316 		case DIF_VAR_PID:
10317 		case DIF_VAR_TID:
10318 		case DIF_VAR_EXECNAME:
10319 		case DIF_VAR_ZONENAME:
10320 			break;
10321 
10322 		default:
10323 			return (0);
10324 		}
10325 	}
10326 
10327 	/*
10328 	 * This DIF object may be cacheable.  Now we need to look for any
10329 	 * array loading instructions, any memory loading instructions, or
10330 	 * any stores to thread-local variables.
10331 	 */
10332 	for (i = 0; i < dp->dtdo_len; i++) {
10333 		uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
10334 
10335 		if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
10336 		    (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
10337 		    (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
10338 		    op == DIF_OP_LDGA || op == DIF_OP_STTS)
10339 			return (0);
10340 	}
10341 
10342 	return (1);
10343 }
10344 
10345 static void
dtrace_difo_hold(dtrace_difo_t * dp)10346 dtrace_difo_hold(dtrace_difo_t *dp)
10347 {
10348 	uint_t i;
10349 
10350 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10351 
10352 	dp->dtdo_refcnt++;
10353 	ASSERT(dp->dtdo_refcnt != 0);
10354 
10355 	/*
10356 	 * We need to check this DIF object for references to the variable
10357 	 * DIF_VAR_VTIMESTAMP.
10358 	 */
10359 	for (i = 0; i < dp->dtdo_varlen; i++) {
10360 		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10361 
10362 		if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
10363 			continue;
10364 
10365 		if (dtrace_vtime_references++ == 0)
10366 			dtrace_vtime_enable();
10367 	}
10368 }
10369 
10370 /*
10371  * This routine calculates the dynamic variable chunksize for a given DIF
10372  * object.  The calculation is not fool-proof, and can probably be tricked by
10373  * malicious DIF -- but it works for all compiler-generated DIF.  Because this
10374  * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
10375  * if a dynamic variable size exceeds the chunksize.
10376  */
10377 static void
dtrace_difo_chunksize(dtrace_difo_t * dp,dtrace_vstate_t * vstate)10378 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10379 {
10380 	uint64_t sval = 0;
10381 	dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
10382 	const dif_instr_t *text = dp->dtdo_buf;
10383 	uint_t pc, srd = 0;
10384 	uint_t ttop = 0;
10385 	size_t size, ksize;
10386 	uint_t id, i;
10387 
10388 	for (pc = 0; pc < dp->dtdo_len; pc++) {
10389 		dif_instr_t instr = text[pc];
10390 		uint_t op = DIF_INSTR_OP(instr);
10391 		uint_t rd = DIF_INSTR_RD(instr);
10392 		uint_t r1 = DIF_INSTR_R1(instr);
10393 		uint_t nkeys = 0;
10394 		uchar_t scope;
10395 
10396 		dtrace_key_t *key = tupregs;
10397 
10398 		switch (op) {
10399 		case DIF_OP_SETX:
10400 			sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
10401 			srd = rd;
10402 			continue;
10403 
10404 		case DIF_OP_STTS:
10405 			key = &tupregs[DIF_DTR_NREGS];
10406 			key[0].dttk_size = 0;
10407 			key[1].dttk_size = 0;
10408 			nkeys = 2;
10409 			scope = DIFV_SCOPE_THREAD;
10410 			break;
10411 
10412 		case DIF_OP_STGAA:
10413 		case DIF_OP_STTAA:
10414 			nkeys = ttop;
10415 
10416 			if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
10417 				key[nkeys++].dttk_size = 0;
10418 
10419 			key[nkeys++].dttk_size = 0;
10420 
10421 			if (op == DIF_OP_STTAA) {
10422 				scope = DIFV_SCOPE_THREAD;
10423 			} else {
10424 				scope = DIFV_SCOPE_GLOBAL;
10425 			}
10426 
10427 			break;
10428 
10429 		case DIF_OP_PUSHTR:
10430 			if (ttop == DIF_DTR_NREGS)
10431 				return;
10432 
10433 			if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
10434 				/*
10435 				 * If the register for the size of the "pushtr"
10436 				 * is %r0 (or the value is 0) and the type is
10437 				 * a string, we'll use the system-wide default
10438 				 * string size.
10439 				 */
10440 				tupregs[ttop++].dttk_size =
10441 				    dtrace_strsize_default;
10442 			} else {
10443 				if (srd == 0)
10444 					return;
10445 
10446 				if (sval > LONG_MAX)
10447 					return;
10448 
10449 				tupregs[ttop++].dttk_size = sval;
10450 			}
10451 
10452 			break;
10453 
10454 		case DIF_OP_PUSHTV:
10455 			if (ttop == DIF_DTR_NREGS)
10456 				return;
10457 
10458 			tupregs[ttop++].dttk_size = 0;
10459 			break;
10460 
10461 		case DIF_OP_FLUSHTS:
10462 			ttop = 0;
10463 			break;
10464 
10465 		case DIF_OP_POPTS:
10466 			if (ttop != 0)
10467 				ttop--;
10468 			break;
10469 		}
10470 
10471 		sval = 0;
10472 		srd = 0;
10473 
10474 		if (nkeys == 0)
10475 			continue;
10476 
10477 		/*
10478 		 * We have a dynamic variable allocation; calculate its size.
10479 		 */
10480 		for (ksize = 0, i = 0; i < nkeys; i++)
10481 			ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
10482 
10483 		size = sizeof (dtrace_dynvar_t);
10484 		size += sizeof (dtrace_key_t) * (nkeys - 1);
10485 		size += ksize;
10486 
10487 		/*
10488 		 * Now we need to determine the size of the stored data.
10489 		 */
10490 		id = DIF_INSTR_VAR(instr);
10491 
10492 		for (i = 0; i < dp->dtdo_varlen; i++) {
10493 			dtrace_difv_t *v = &dp->dtdo_vartab[i];
10494 
10495 			if (v->dtdv_id == id && v->dtdv_scope == scope) {
10496 				size += v->dtdv_type.dtdt_size;
10497 				break;
10498 			}
10499 		}
10500 
10501 		if (i == dp->dtdo_varlen)
10502 			return;
10503 
10504 		/*
10505 		 * We have the size.  If this is larger than the chunk size
10506 		 * for our dynamic variable state, reset the chunk size.
10507 		 */
10508 		size = P2ROUNDUP(size, sizeof (uint64_t));
10509 
10510 		/*
10511 		 * Before setting the chunk size, check that we're not going
10512 		 * to set it to a negative value...
10513 		 */
10514 		if (size > LONG_MAX)
10515 			return;
10516 
10517 		/*
10518 		 * ...and make certain that we didn't badly overflow.
10519 		 */
10520 		if (size < ksize || size < sizeof (dtrace_dynvar_t))
10521 			return;
10522 
10523 		if (size > vstate->dtvs_dynvars.dtds_chunksize)
10524 			vstate->dtvs_dynvars.dtds_chunksize = size;
10525 	}
10526 }
10527 
10528 static void
dtrace_difo_init(dtrace_difo_t * dp,dtrace_vstate_t * vstate)10529 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10530 {
10531 	int oldsvars, osz, nsz, otlocals, ntlocals;
10532 	uint_t i, id;
10533 
10534 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10535 	ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
10536 
10537 	for (i = 0; i < dp->dtdo_varlen; i++) {
10538 		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10539 		dtrace_statvar_t *svar;
10540 		dtrace_statvar_t ***svarp = NULL;
10541 		size_t dsize = 0;
10542 		uint8_t scope = v->dtdv_scope;
10543 		int *np = (int *)NULL;
10544 
10545 		if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10546 			continue;
10547 
10548 		id -= DIF_VAR_OTHER_UBASE;
10549 
10550 		switch (scope) {
10551 		case DIFV_SCOPE_THREAD:
10552 			while (id >= (uint_t)(otlocals = vstate->dtvs_ntlocals)) {
10553 				dtrace_difv_t *tlocals;
10554 
10555 				if ((ntlocals = (otlocals << 1)) == 0)
10556 					ntlocals = 1;
10557 
10558 				osz = otlocals * sizeof (dtrace_difv_t);
10559 				nsz = ntlocals * sizeof (dtrace_difv_t);
10560 
10561 				tlocals = kmem_zalloc(nsz, KM_SLEEP);
10562 
10563 				if (osz != 0) {
10564 					bcopy(vstate->dtvs_tlocals,
10565 					    tlocals, osz);
10566 					kmem_free(vstate->dtvs_tlocals, osz);
10567 				}
10568 
10569 				vstate->dtvs_tlocals = tlocals;
10570 				vstate->dtvs_ntlocals = ntlocals;
10571 			}
10572 
10573 			vstate->dtvs_tlocals[id] = *v;
10574 			continue;
10575 
10576 		case DIFV_SCOPE_LOCAL:
10577 			np = &vstate->dtvs_nlocals;
10578 			svarp = &vstate->dtvs_locals;
10579 
10580 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10581 				dsize = (int)NCPU * (v->dtdv_type.dtdt_size +
10582 				    sizeof (uint64_t));
10583 			else
10584 				dsize = (int)NCPU * sizeof (uint64_t);
10585 
10586 			break;
10587 
10588 		case DIFV_SCOPE_GLOBAL:
10589 			np = &vstate->dtvs_nglobals;
10590 			svarp = &vstate->dtvs_globals;
10591 
10592 			if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10593 				dsize = v->dtdv_type.dtdt_size +
10594 				    sizeof (uint64_t);
10595 
10596 			break;
10597 
10598 		default:
10599 			ASSERT(0);
10600 		}
10601 
10602 		while (id >= (uint_t)(oldsvars = *np)) {
10603 			dtrace_statvar_t **statics;
10604 			int newsvars, oldsize, newsize;
10605 
10606 			if ((newsvars = (oldsvars << 1)) == 0)
10607 				newsvars = 1;
10608 
10609 			oldsize = oldsvars * sizeof (dtrace_statvar_t *);
10610 			newsize = newsvars * sizeof (dtrace_statvar_t *);
10611 
10612 			statics = kmem_zalloc(newsize, KM_SLEEP);
10613 
10614 			if (oldsize != 0) {
10615 				bcopy(*svarp, statics, oldsize);
10616 				kmem_free(*svarp, oldsize);
10617 			}
10618 
10619 			*svarp = statics;
10620 			*np = newsvars;
10621 		}
10622 
10623 		if ((svar = (*svarp)[id]) == NULL) {
10624 			svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
10625 			svar->dtsv_var = *v;
10626 
10627 			if ((svar->dtsv_size = dsize) != 0) {
10628 				svar->dtsv_data = (uint64_t)(uintptr_t)
10629 				    kmem_zalloc(dsize, KM_SLEEP);
10630 			}
10631 
10632 			(*svarp)[id] = svar;
10633 		}
10634 
10635 		svar->dtsv_refcnt++;
10636 	}
10637 
10638 	dtrace_difo_chunksize(dp, vstate);
10639 	dtrace_difo_hold(dp);
10640 }
10641 
10642 static dtrace_difo_t *
dtrace_difo_duplicate(dtrace_difo_t * dp,dtrace_vstate_t * vstate)10643 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10644 {
10645 	dtrace_difo_t *new;
10646 	size_t sz;
10647 
10648 	ASSERT(dp->dtdo_buf != NULL);
10649 	ASSERT(dp->dtdo_refcnt != 0);
10650 
10651 	new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
10652 
10653 	ASSERT(dp->dtdo_buf != NULL);
10654 	sz = dp->dtdo_len * sizeof (dif_instr_t);
10655 	new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
10656 	bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
10657 	new->dtdo_len = dp->dtdo_len;
10658 
10659 	if (dp->dtdo_strtab != NULL) {
10660 		ASSERT(dp->dtdo_strlen != 0);
10661 		new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
10662 		bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
10663 		new->dtdo_strlen = dp->dtdo_strlen;
10664 	}
10665 
10666 	if (dp->dtdo_inttab != NULL) {
10667 		ASSERT(dp->dtdo_intlen != 0);
10668 		sz = dp->dtdo_intlen * sizeof (uint64_t);
10669 		new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
10670 		bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
10671 		new->dtdo_intlen = dp->dtdo_intlen;
10672 	}
10673 
10674 	if (dp->dtdo_vartab != NULL) {
10675 		ASSERT(dp->dtdo_varlen != 0);
10676 		sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
10677 		new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
10678 		bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
10679 		new->dtdo_varlen = dp->dtdo_varlen;
10680 	}
10681 
10682 	dtrace_difo_init(new, vstate);
10683 	return (new);
10684 }
10685 
10686 static void
dtrace_difo_destroy(dtrace_difo_t * dp,dtrace_vstate_t * vstate)10687 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10688 {
10689 	uint_t i;
10690 
10691 	ASSERT(dp->dtdo_refcnt == 0);
10692 
10693 	for (i = 0; i < dp->dtdo_varlen; i++) {
10694 		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10695 		dtrace_statvar_t *svar;
10696 		dtrace_statvar_t **svarp = NULL;
10697 		uint_t id;
10698 		uint8_t scope = v->dtdv_scope;
10699 		int *np = NULL;
10700 
10701 		switch (scope) {
10702 		case DIFV_SCOPE_THREAD:
10703 			continue;
10704 
10705 		case DIFV_SCOPE_LOCAL:
10706 			np = &vstate->dtvs_nlocals;
10707 			svarp = vstate->dtvs_locals;
10708 			break;
10709 
10710 		case DIFV_SCOPE_GLOBAL:
10711 			np = &vstate->dtvs_nglobals;
10712 			svarp = vstate->dtvs_globals;
10713 			break;
10714 
10715 		default:
10716 			ASSERT(0);
10717 		}
10718 
10719 		if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10720 			continue;
10721 
10722 		id -= DIF_VAR_OTHER_UBASE;
10723 
10724 		ASSERT(id < (uint_t)*np);
10725 
10726 		svar = svarp[id];
10727 		ASSERT(svar != NULL);
10728 		ASSERT(svar->dtsv_refcnt > 0);
10729 
10730 		if (--svar->dtsv_refcnt > 0)
10731 			continue;
10732 
10733 		if (svar->dtsv_size != 0) {
10734 			ASSERT(svar->dtsv_data != 0);
10735 			kmem_free((void *)(uintptr_t)svar->dtsv_data,
10736 			    svar->dtsv_size);
10737 		}
10738 
10739 		kmem_free(svar, sizeof (dtrace_statvar_t));
10740 		svarp[id] = NULL;
10741 	}
10742 
10743 	kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
10744 	kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
10745 	kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
10746 	kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
10747 
10748 	kmem_free(dp, sizeof (dtrace_difo_t));
10749 }
10750 
10751 static void
dtrace_difo_release(dtrace_difo_t * dp,dtrace_vstate_t * vstate)10752 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10753 {
10754 	uint_t i;
10755 
10756 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10757 	ASSERT(dp->dtdo_refcnt != 0);
10758 
10759 	for (i = 0; i < dp->dtdo_varlen; i++) {
10760 		dtrace_difv_t *v = &dp->dtdo_vartab[i];
10761 
10762 		if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
10763 			continue;
10764 
10765 		ASSERT(dtrace_vtime_references > 0);
10766 		if (--dtrace_vtime_references == 0)
10767 			dtrace_vtime_disable();
10768 	}
10769 
10770 	if (--dp->dtdo_refcnt == 0)
10771 		dtrace_difo_destroy(dp, vstate);
10772 }
10773 
10774 /*
10775  * DTrace Format Functions
10776  */
10777 
10778 static dtrace_format_t*
dtrace_format_new(char * str)10779 dtrace_format_new(char *str)
10780 {
10781 	dtrace_format_t *fmt = NULL;
10782 	size_t bufsize = strlen(str) + 1;
10783 
10784 	fmt = kmem_zalloc(sizeof(*fmt) + bufsize, KM_SLEEP);
10785 
10786 	fmt->dtf_refcount = 1;
10787 	(void) strlcpy(fmt->dtf_str, str, bufsize);
10788 
10789 	return fmt;
10790 }
10791 
10792 static uint16_t
dtrace_format_add(dtrace_state_t * state,char * str)10793 dtrace_format_add(dtrace_state_t *state, char *str)
10794 {
10795 	dtrace_format_t **new;
10796 	uint16_t ndx;
10797 
10798 	for (ndx = 0; ndx < state->dts_nformats; ndx++) {
10799 		if (state->dts_formats[ndx] == NULL) {
10800 			state->dts_formats[ndx] = dtrace_format_new(str);
10801 			return (ndx + 1);
10802 		}
10803 		else if (strcmp(state->dts_formats[ndx]->dtf_str, str) == 0) {
10804 			VERIFY(state->dts_formats[ndx]->dtf_refcount < UINT64_MAX);
10805 			state->dts_formats[ndx]->dtf_refcount++;
10806 			return (ndx + 1);
10807 		}
10808 	}
10809 
10810 	if (state->dts_nformats == USHRT_MAX) {
10811 		/*
10812 		 * This is only likely if a denial-of-service attack is being
10813 		 * attempted.  As such, it's okay to fail silently here.
10814 		 */
10815 		return (0);
10816 	}
10817 
10818 	/*
10819 	 * For simplicity, we always resize the formats array to be exactly the
10820 	 * number of formats.
10821 	 */
10822 	ndx = state->dts_nformats++;
10823 	new = kmem_alloc((ndx + 1) * sizeof (*state->dts_formats), KM_SLEEP);
10824 
10825 	if (state->dts_formats != NULL) {
10826 		ASSERT(ndx != 0);
10827 		bcopy(state->dts_formats, new, ndx * sizeof (*state->dts_formats));
10828 		kmem_free(state->dts_formats, ndx * sizeof (*state->dts_formats));
10829 	}
10830 
10831 	state->dts_formats = new;
10832 	state->dts_formats[ndx] = dtrace_format_new(str);
10833 
10834 	return (ndx + 1);
10835 }
10836 
10837 static void
dtrace_format_remove(dtrace_state_t * state,uint16_t format)10838 dtrace_format_remove(dtrace_state_t *state, uint16_t format)
10839 {
10840 	dtrace_format_t *fmt;
10841 
10842 	ASSERT(state->dts_formats != NULL);
10843 	ASSERT(format <= state->dts_nformats);
10844 
10845 	fmt = state->dts_formats[format - 1];
10846 
10847 	ASSERT(fmt != NULL);
10848 	VERIFY(fmt->dtf_refcount > 0);
10849 
10850 	fmt->dtf_refcount--;
10851 
10852 	if (fmt->dtf_refcount == 0) {
10853 		kmem_free(fmt, DTRACE_FORMAT_SIZE(fmt));
10854 		state->dts_formats[format - 1] = NULL;
10855 	}
10856 }
10857 
10858 static void
dtrace_format_destroy(dtrace_state_t * state)10859 dtrace_format_destroy(dtrace_state_t *state)
10860 {
10861 	int i;
10862 
10863 	if (state->dts_nformats == 0) {
10864 		ASSERT(state->dts_formats == NULL);
10865 		return;
10866 	}
10867 
10868 	ASSERT(state->dts_formats != NULL);
10869 
10870 	for (i = 0; i < state->dts_nformats; i++) {
10871 		dtrace_format_t *fmt = state->dts_formats[i];
10872 
10873 		if (fmt == NULL)
10874 			continue;
10875 
10876 		kmem_free(fmt, DTRACE_FORMAT_SIZE(fmt));
10877 	}
10878 
10879 	kmem_free(state->dts_formats, state->dts_nformats * sizeof (*state->dts_formats));
10880 	state->dts_nformats = 0;
10881 	state->dts_formats = NULL;
10882 }
10883 
10884 /*
10885  * DTrace Predicate Functions
10886  */
10887 static dtrace_predicate_t *
dtrace_predicate_create(dtrace_difo_t * dp)10888 dtrace_predicate_create(dtrace_difo_t *dp)
10889 {
10890 	dtrace_predicate_t *pred;
10891 
10892 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10893 	ASSERT(dp->dtdo_refcnt != 0);
10894 
10895 	pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
10896 	pred->dtp_difo = dp;
10897 	pred->dtp_refcnt = 1;
10898 
10899 	if (!dtrace_difo_cacheable(dp))
10900 		return (pred);
10901 
10902 	if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
10903 		/*
10904 		 * This is only theoretically possible -- we have had 2^32
10905 		 * cacheable predicates on this machine.  We cannot allow any
10906 		 * more predicates to become cacheable:  as unlikely as it is,
10907 		 * there may be a thread caching a (now stale) predicate cache
10908 		 * ID. (N.B.: the temptation is being successfully resisted to
10909 		 * have this cmn_err() "Holy shit -- we executed this code!")
10910 		 */
10911 		return (pred);
10912 	}
10913 
10914 	pred->dtp_cacheid = dtrace_predcache_id++;
10915 
10916 	return (pred);
10917 }
10918 
10919 static void
dtrace_predicate_hold(dtrace_predicate_t * pred)10920 dtrace_predicate_hold(dtrace_predicate_t *pred)
10921 {
10922 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10923 	ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
10924 	ASSERT(pred->dtp_refcnt > 0);
10925 
10926 	pred->dtp_refcnt++;
10927 }
10928 
10929 static void
dtrace_predicate_release(dtrace_predicate_t * pred,dtrace_vstate_t * vstate)10930 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
10931 {
10932 	dtrace_difo_t *dp = pred->dtp_difo;
10933 #pragma unused(dp) /* __APPLE__ */
10934 
10935 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
10936 	ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
10937 	ASSERT(pred->dtp_refcnt > 0);
10938 
10939 	if (--pred->dtp_refcnt == 0) {
10940 		dtrace_difo_release(pred->dtp_difo, vstate);
10941 		kmem_free(pred, sizeof (dtrace_predicate_t));
10942 	}
10943 }
10944 
10945 /*
10946  * DTrace Action Description Functions
10947  */
10948 static dtrace_actdesc_t *
dtrace_actdesc_create(dtrace_actkind_t kind,uint32_t ntuple,uint64_t uarg,uint64_t arg)10949 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
10950     uint64_t uarg, uint64_t arg)
10951 {
10952 	dtrace_actdesc_t *act;
10953 
10954 	ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != 0 &&
10955 	    arg >= KERNELBASE) || (arg == 0 && kind == DTRACEACT_PRINTA));
10956 
10957 	act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
10958 	act->dtad_kind = kind;
10959 	act->dtad_ntuple = ntuple;
10960 	act->dtad_uarg = uarg;
10961 	act->dtad_arg = arg;
10962 	act->dtad_refcnt = 1;
10963 
10964 	return (act);
10965 }
10966 
10967 static void
dtrace_actdesc_hold(dtrace_actdesc_t * act)10968 dtrace_actdesc_hold(dtrace_actdesc_t *act)
10969 {
10970 	ASSERT(act->dtad_refcnt >= 1);
10971 	act->dtad_refcnt++;
10972 }
10973 
10974 static void
dtrace_actdesc_release(dtrace_actdesc_t * act,dtrace_vstate_t * vstate)10975 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
10976 {
10977 	dtrace_actkind_t kind = act->dtad_kind;
10978 	dtrace_difo_t *dp;
10979 
10980 	ASSERT(act->dtad_refcnt >= 1);
10981 
10982 	if (--act->dtad_refcnt != 0)
10983 		return;
10984 
10985 	if ((dp = act->dtad_difo) != NULL)
10986 		dtrace_difo_release(dp, vstate);
10987 
10988 	if (DTRACEACT_ISPRINTFLIKE(kind)) {
10989 		char *str = (char *)(uintptr_t)act->dtad_arg;
10990 
10991 		ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
10992 		    (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
10993 
10994 		if (str != NULL)
10995 			kmem_free(str, strlen(str) + 1);
10996 	}
10997 
10998 	kmem_free(act, sizeof (dtrace_actdesc_t));
10999 }
11000 
11001 /*
11002  * DTrace ECB Functions
11003  */
11004 static dtrace_ecb_t *
dtrace_ecb_add(dtrace_state_t * state,dtrace_probe_t * probe)11005 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
11006 {
11007 	dtrace_ecb_t *ecb;
11008 	dtrace_epid_t epid;
11009 
11010 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
11011 
11012 	ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
11013 	ecb->dte_predicate = NULL;
11014 	ecb->dte_probe = probe;
11015 
11016 	/*
11017 	 * The default size is the size of the default action: recording
11018 	 * the header.
11019 	 */
11020 	ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t);
11021 	ecb->dte_alignment = sizeof (dtrace_epid_t);
11022 
11023 	epid = state->dts_epid++;
11024 
11025 	if (epid - 1 >= (dtrace_epid_t)state->dts_necbs) {
11026 		dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
11027 		int necbs = state->dts_necbs << 1;
11028 
11029 		ASSERT(epid == (dtrace_epid_t)state->dts_necbs + 1);
11030 
11031 		if (necbs == 0) {
11032 			ASSERT(oecbs == NULL);
11033 			necbs = 1;
11034 		}
11035 
11036 		ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
11037 
11038 		if (oecbs != NULL)
11039 			bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
11040 
11041 		dtrace_membar_producer();
11042 		state->dts_ecbs = ecbs;
11043 
11044 		if (oecbs != NULL) {
11045 			/*
11046 			 * If this state is active, we must dtrace_sync()
11047 			 * before we can free the old dts_ecbs array:  we're
11048 			 * coming in hot, and there may be active ring
11049 			 * buffer processing (which indexes into the dts_ecbs
11050 			 * array) on another CPU.
11051 			 */
11052 			if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
11053 				dtrace_sync();
11054 
11055 			kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
11056 		}
11057 
11058 		dtrace_membar_producer();
11059 		state->dts_necbs = necbs;
11060 	}
11061 
11062 	ecb->dte_state = state;
11063 
11064 	ASSERT(state->dts_ecbs[epid - 1] == NULL);
11065 	dtrace_membar_producer();
11066 	state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
11067 
11068 	return (ecb);
11069 }
11070 
11071 static int
dtrace_ecb_enable(dtrace_ecb_t * ecb)11072 dtrace_ecb_enable(dtrace_ecb_t *ecb)
11073 {
11074 	dtrace_probe_t *probe = ecb->dte_probe;
11075 
11076 	LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
11077 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
11078 	ASSERT(ecb->dte_next == NULL);
11079 
11080 	if (probe == NULL) {
11081 		/*
11082 		 * This is the NULL probe -- there's nothing to do.
11083 		 */
11084 	    return(0);
11085 	}
11086 
11087 	probe->dtpr_provider->dtpv_ecb_count++;
11088 	if (probe->dtpr_ecb == NULL) {
11089 		dtrace_provider_t *prov = probe->dtpr_provider;
11090 
11091 		/*
11092 		 * We're the first ECB on this probe.
11093 		 */
11094 		probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
11095 
11096 		if (ecb->dte_predicate != NULL)
11097 			probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
11098 
11099 		return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
11100                     probe->dtpr_id, probe->dtpr_arg));
11101 	} else {
11102 		/*
11103 		 * This probe is already active.  Swing the last pointer to
11104 		 * point to the new ECB, and issue a dtrace_sync() to assure
11105 		 * that all CPUs have seen the change.
11106 		 */
11107 		ASSERT(probe->dtpr_ecb_last != NULL);
11108 		probe->dtpr_ecb_last->dte_next = ecb;
11109 		probe->dtpr_ecb_last = ecb;
11110 		probe->dtpr_predcache = 0;
11111 
11112 		dtrace_sync();
11113 		return(0);
11114 	}
11115 }
11116 
11117 static int
dtrace_ecb_resize(dtrace_ecb_t * ecb)11118 dtrace_ecb_resize(dtrace_ecb_t *ecb)
11119 {
11120 	dtrace_action_t *act;
11121 	uint32_t curneeded = UINT32_MAX;
11122 	uint32_t aggbase = UINT32_MAX;
11123 
11124 	/*
11125 	 * If we record anything, we always record the dtrace_rechdr_t.  (And
11126 	 * we always record it first.)
11127 	 */
11128 	ecb->dte_size = sizeof (dtrace_rechdr_t);
11129 	ecb->dte_alignment = sizeof (dtrace_epid_t);
11130 
11131 	for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
11132 		dtrace_recdesc_t *rec = &act->dta_rec;
11133 		ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1);
11134 
11135 		ecb->dte_alignment = MAX(ecb->dte_alignment, rec->dtrd_alignment);
11136 
11137 		if (DTRACEACT_ISAGG(act->dta_kind)) {
11138 			dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
11139 
11140 			ASSERT(rec->dtrd_size != 0);
11141 			ASSERT(agg->dtag_first != NULL);
11142 			ASSERT(act->dta_prev->dta_intuple);
11143 			ASSERT(aggbase != UINT32_MAX);
11144 			ASSERT(curneeded != UINT32_MAX);
11145 
11146 			agg->dtag_base = aggbase;
11147 			curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
11148 			rec->dtrd_offset = curneeded;
11149 			if (curneeded + rec->dtrd_size < curneeded)
11150 				return (EINVAL);
11151 			curneeded += rec->dtrd_size;
11152 			ecb->dte_needed = MAX(ecb->dte_needed, curneeded);
11153 
11154 			aggbase = UINT32_MAX;
11155 			curneeded = UINT32_MAX;
11156 		} else if (act->dta_intuple) {
11157 			if (curneeded == UINT32_MAX) {
11158 				/*
11159 				 * This is the first record in a tuple.  Align
11160 				 * curneeded to be at offset 4 in an 8-byte
11161 				 * aligned block.
11162 				 */
11163 				ASSERT(act->dta_prev == NULL || !act->dta_prev->dta_intuple);
11164 				ASSERT(aggbase == UINT32_MAX);
11165 
11166 				curneeded = P2PHASEUP(ecb->dte_size,
11167 				    sizeof (uint64_t), sizeof (dtrace_aggid_t));
11168 
11169 				aggbase = curneeded - sizeof (dtrace_aggid_t);
11170 				ASSERT(IS_P2ALIGNED(aggbase,
11171 				    sizeof (uint64_t)));
11172 			}
11173 
11174 			curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
11175 			rec->dtrd_offset = curneeded;
11176 			curneeded += rec->dtrd_size;
11177 			if (curneeded + rec->dtrd_size < curneeded)
11178 				return (EINVAL);
11179 		} else {
11180 			/* tuples must be followed by an aggregation */
11181 			ASSERT(act->dta_prev == NULL || !act->dta_prev->dta_intuple);
11182 			ecb->dte_size = P2ROUNDUP(ecb->dte_size, rec->dtrd_alignment);
11183 			rec->dtrd_offset = ecb->dte_size;
11184 			if (ecb->dte_size + rec->dtrd_size < ecb->dte_size)
11185 				return (EINVAL);
11186 			ecb->dte_size += rec->dtrd_size;
11187 			ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size);
11188 		}
11189 	}
11190 
11191 	if ((act = ecb->dte_action) != NULL &&
11192 	    !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
11193 	    ecb->dte_size == sizeof (dtrace_rechdr_t)) {
11194 		/*
11195 		 * If the size is still sizeof (dtrace_rechdr_t), then all
11196 		 * actions store no data; set the size to 0.
11197 		 */
11198 		ecb->dte_size = 0;
11199 	}
11200 
11201 	ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t));
11202 	ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t)));
11203 	ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, ecb->dte_needed);
11204 	return (0);
11205 }
11206 
11207 static dtrace_action_t *
dtrace_ecb_aggregation_create(dtrace_ecb_t * ecb,dtrace_actdesc_t * desc)11208 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
11209 {
11210 	dtrace_aggregation_t *agg;
11211 	size_t size = sizeof (uint64_t);
11212 	int ntuple = desc->dtad_ntuple;
11213 	dtrace_action_t *act;
11214 	dtrace_recdesc_t *frec;
11215 	dtrace_aggid_t aggid;
11216 	dtrace_state_t *state = ecb->dte_state;
11217 
11218 	agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
11219 	agg->dtag_ecb = ecb;
11220 
11221 	ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
11222 
11223 	switch (desc->dtad_kind) {
11224 	case DTRACEAGG_MIN:
11225 		agg->dtag_initial = INT64_MAX;
11226 		agg->dtag_aggregate = dtrace_aggregate_min;
11227 		break;
11228 
11229 	case DTRACEAGG_MAX:
11230 		agg->dtag_initial = INT64_MIN;
11231 		agg->dtag_aggregate = dtrace_aggregate_max;
11232 		break;
11233 
11234 	case DTRACEAGG_COUNT:
11235 		agg->dtag_aggregate = dtrace_aggregate_count;
11236 		break;
11237 
11238 	case DTRACEAGG_QUANTIZE:
11239 		agg->dtag_aggregate = dtrace_aggregate_quantize;
11240 		size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
11241 		    sizeof (uint64_t);
11242 		break;
11243 
11244 	case DTRACEAGG_LQUANTIZE: {
11245 		uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
11246 		uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
11247 
11248 		agg->dtag_initial = desc->dtad_arg;
11249 		agg->dtag_aggregate = dtrace_aggregate_lquantize;
11250 
11251 		if (step == 0 || levels == 0)
11252 			goto err;
11253 
11254 		size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
11255 		break;
11256 	}
11257 
11258 	case DTRACEAGG_LLQUANTIZE: {
11259 		uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
11260 		uint16_t low    = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
11261 		uint16_t high   = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
11262 		uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
11263 		int64_t v;
11264 
11265 		agg->dtag_initial = desc->dtad_arg;
11266 		agg->dtag_aggregate = dtrace_aggregate_llquantize;
11267 
11268 		if (factor < 2 || low >= high || nsteps < factor)
11269 			goto err;
11270 
11271 		/*
11272 		 * Now check that the number of steps evenly divides a power
11273 		 * of the factor.  (This assures both integer bucket size and
11274 		 * linearity within each magnitude.)
11275 		 */
11276 		for (v = factor; v < nsteps; v *= factor)
11277 			continue;
11278 
11279 		if ((v % nsteps) || (nsteps % factor))
11280 			goto err;
11281 
11282  		size = (dtrace_aggregate_llquantize_bucket(factor, low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t);
11283 		break;
11284   }
11285 
11286 	case DTRACEAGG_AVG:
11287 		agg->dtag_aggregate = dtrace_aggregate_avg;
11288 		size = sizeof (uint64_t) * 2;
11289 		break;
11290 
11291 	case DTRACEAGG_STDDEV:
11292 		agg->dtag_aggregate = dtrace_aggregate_stddev;
11293 		size = sizeof (uint64_t) * 4;
11294 		break;
11295 
11296 	case DTRACEAGG_SUM:
11297 		agg->dtag_aggregate = dtrace_aggregate_sum;
11298 		break;
11299 
11300 	default:
11301 		goto err;
11302 	}
11303 
11304 	agg->dtag_action.dta_rec.dtrd_size = size;
11305 
11306 	if (ntuple == 0)
11307 		goto err;
11308 
11309 	/*
11310 	 * We must make sure that we have enough actions for the n-tuple.
11311 	 */
11312 	for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
11313 		if (DTRACEACT_ISAGG(act->dta_kind))
11314 			break;
11315 
11316 		if (--ntuple == 0) {
11317 			/*
11318 			 * This is the action with which our n-tuple begins.
11319 			 */
11320 			agg->dtag_first = act;
11321 			goto success;
11322 		}
11323 	}
11324 
11325 	/*
11326 	 * This n-tuple is short by ntuple elements.  Return failure.
11327 	 */
11328 	ASSERT(ntuple != 0);
11329 err:
11330 	kmem_free(agg, sizeof (dtrace_aggregation_t));
11331 	return (NULL);
11332 
11333 success:
11334 	/*
11335 	 * If the last action in the tuple has a size of zero, it's actually
11336 	 * an expression argument for the aggregating action.
11337 	 */
11338 	ASSERT(ecb->dte_action_last != NULL);
11339 	act = ecb->dte_action_last;
11340 
11341 	if (act->dta_kind == DTRACEACT_DIFEXPR) {
11342 		ASSERT(act->dta_difo != NULL);
11343 
11344 		if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
11345 			agg->dtag_hasarg = 1;
11346 	}
11347 
11348 	/*
11349 	 * We need to allocate an id for this aggregation.
11350 	 */
11351 	aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
11352 	    VM_BESTFIT | VM_SLEEP);
11353 
11354 	if (aggid - 1 >= (dtrace_aggid_t)state->dts_naggregations) {
11355 		dtrace_aggregation_t **oaggs = state->dts_aggregations;
11356 		dtrace_aggregation_t **aggs;
11357 		int naggs = state->dts_naggregations << 1;
11358 		int onaggs = state->dts_naggregations;
11359 
11360 		ASSERT(aggid == (dtrace_aggid_t)state->dts_naggregations + 1);
11361 
11362 		if (naggs == 0) {
11363 			ASSERT(oaggs == NULL);
11364 			naggs = 1;
11365 		}
11366 
11367 		aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
11368 
11369 		if (oaggs != NULL) {
11370 			bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
11371 			kmem_free(oaggs, onaggs * sizeof (*aggs));
11372 		}
11373 
11374 		state->dts_aggregations = aggs;
11375 		state->dts_naggregations = naggs;
11376 	}
11377 
11378 	ASSERT(state->dts_aggregations[aggid - 1] == NULL);
11379 	state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
11380 
11381 	frec = &agg->dtag_first->dta_rec;
11382 	if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
11383 		frec->dtrd_alignment = sizeof (dtrace_aggid_t);
11384 
11385 	for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
11386 		ASSERT(!act->dta_intuple);
11387 		act->dta_intuple = 1;
11388 	}
11389 
11390 	return (&agg->dtag_action);
11391 }
11392 
11393 static void
dtrace_ecb_aggregation_destroy(dtrace_ecb_t * ecb,dtrace_action_t * act)11394 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
11395 {
11396 	dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
11397 	dtrace_state_t *state = ecb->dte_state;
11398 	dtrace_aggid_t aggid = agg->dtag_id;
11399 
11400 	ASSERT(DTRACEACT_ISAGG(act->dta_kind));
11401 	vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
11402 
11403 	ASSERT(state->dts_aggregations[aggid - 1] == agg);
11404 	state->dts_aggregations[aggid - 1] = NULL;
11405 
11406 	kmem_free(agg, sizeof (dtrace_aggregation_t));
11407 }
11408 
11409 static int
dtrace_ecb_action_add(dtrace_ecb_t * ecb,dtrace_actdesc_t * desc)11410 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
11411 {
11412 	dtrace_action_t *action, *last;
11413 	dtrace_difo_t *dp = desc->dtad_difo;
11414 	uint32_t size = 0, align = sizeof (uint8_t), mask;
11415 	uint16_t format = 0;
11416 	dtrace_recdesc_t *rec;
11417 	dtrace_state_t *state = ecb->dte_state;
11418 	dtrace_optval_t *opt = state->dts_options;
11419 	dtrace_optval_t nframes=0, strsize;
11420 	uint64_t arg = desc->dtad_arg;
11421 
11422 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
11423 	ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
11424 
11425 	if (DTRACEACT_ISAGG(desc->dtad_kind)) {
11426 		/*
11427 		 * If this is an aggregating action, there must be neither
11428 		 * a speculate nor a commit on the action chain.
11429 		 */
11430 		dtrace_action_t *act;
11431 
11432 		for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
11433 			if (act->dta_kind == DTRACEACT_COMMIT)
11434 				return (EINVAL);
11435 
11436 			if (act->dta_kind == DTRACEACT_SPECULATE)
11437 				return (EINVAL);
11438 		}
11439 
11440 		action = dtrace_ecb_aggregation_create(ecb, desc);
11441 
11442 		if (action == NULL)
11443 			return (EINVAL);
11444 	} else {
11445 		if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
11446 		    (desc->dtad_kind == DTRACEACT_DIFEXPR &&
11447 		    dp != NULL && dp->dtdo_destructive)) {
11448 			state->dts_destructive = 1;
11449 		}
11450 
11451 		switch (desc->dtad_kind) {
11452 		case DTRACEACT_PRINTF:
11453 		case DTRACEACT_PRINTA:
11454 		case DTRACEACT_SYSTEM:
11455 		case DTRACEACT_FREOPEN:
11456 		case DTRACEACT_DIFEXPR:
11457 			/*
11458 			 * We know that our arg is a string -- turn it into a
11459 			 * format.
11460 			 */
11461 			if (arg == 0) {
11462 				ASSERT(desc->dtad_kind == DTRACEACT_PRINTA ||
11463 				       desc->dtad_kind == DTRACEACT_DIFEXPR);
11464 				format = 0;
11465 			} else {
11466 				ASSERT(arg != 0);
11467 				ASSERT(arg > KERNELBASE);
11468 				format = dtrace_format_add(state,
11469 				    (char *)(uintptr_t)arg);
11470 			}
11471 
11472 			OS_FALLTHROUGH;
11473 		case DTRACEACT_LIBACT:
11474 		case DTRACEACT_TRACEMEM:
11475 		case DTRACEACT_TRACEMEM_DYNSIZE:
11476 		case DTRACEACT_APPLEBINARY:	/* __APPLE__ */
11477 			if (dp == NULL)
11478 				return (EINVAL);
11479 
11480 			if ((size = dp->dtdo_rtype.dtdt_size) != 0)
11481 				break;
11482 
11483 			if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
11484 				if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11485 					return (EINVAL);
11486 
11487 				size = opt[DTRACEOPT_STRSIZE];
11488 			}
11489 
11490 			break;
11491 
11492 		case DTRACEACT_STACK:
11493 			if ((nframes = arg) == 0) {
11494 				nframes = opt[DTRACEOPT_STACKFRAMES];
11495 				ASSERT(nframes > 0);
11496 				arg = nframes;
11497 			}
11498 
11499 			size = nframes * sizeof (pc_t);
11500 			break;
11501 
11502 		case DTRACEACT_JSTACK:
11503 			if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
11504 				strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
11505 
11506 			if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
11507 				nframes = opt[DTRACEOPT_JSTACKFRAMES];
11508 
11509 			arg = DTRACE_USTACK_ARG(nframes, strsize);
11510 
11511 			OS_FALLTHROUGH;
11512 		case DTRACEACT_USTACK:
11513 			if (desc->dtad_kind != DTRACEACT_JSTACK &&
11514 			    (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
11515 				strsize = DTRACE_USTACK_STRSIZE(arg);
11516 				nframes = opt[DTRACEOPT_USTACKFRAMES];
11517 				ASSERT(nframes > 0);
11518 				arg = DTRACE_USTACK_ARG(nframes, strsize);
11519 			}
11520 
11521 			/*
11522 			 * Save a slot for the pid.
11523 			 */
11524 			size = (nframes + 1) * sizeof (uint64_t);
11525 			size += DTRACE_USTACK_STRSIZE(arg);
11526 			size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
11527 
11528 			break;
11529 
11530 		case DTRACEACT_SYM:
11531 		case DTRACEACT_MOD:
11532 			if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
11533 			    sizeof (uint64_t)) ||
11534 			    (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11535 				return (EINVAL);
11536 			break;
11537 
11538 		case DTRACEACT_USYM:
11539 		case DTRACEACT_UMOD:
11540 		case DTRACEACT_UADDR:
11541 			if (dp == NULL ||
11542 			    (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
11543 			    (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11544 				return (EINVAL);
11545 
11546 			/*
11547 			 * We have a slot for the pid, plus a slot for the
11548 			 * argument.  To keep things simple (aligned with
11549 			 * bitness-neutral sizing), we store each as a 64-bit
11550 			 * quantity.
11551 			 */
11552 			size = 2 * sizeof (uint64_t);
11553 			break;
11554 
11555 		case DTRACEACT_STOP:
11556 		case DTRACEACT_BREAKPOINT:
11557 		case DTRACEACT_PANIC:
11558 			break;
11559 
11560 		case DTRACEACT_CHILL:
11561 		case DTRACEACT_DISCARD:
11562 		case DTRACEACT_RAISE:
11563 		case DTRACEACT_PIDRESUME:	/* __APPLE__ */
11564 			if (dp == NULL)
11565 				return (EINVAL);
11566 			break;
11567 
11568 		case DTRACEACT_EXIT:
11569 			if (dp == NULL ||
11570 			    (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
11571 			    (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11572 				return (EINVAL);
11573 			break;
11574 
11575 		case DTRACEACT_SPECULATE:
11576 			if (ecb->dte_size > sizeof (dtrace_rechdr_t))
11577 				return (EINVAL);
11578 
11579 			if (dp == NULL)
11580 				return (EINVAL);
11581 
11582 			state->dts_speculates = 1;
11583 			break;
11584 
11585 		case DTRACEACT_COMMIT: {
11586 			dtrace_action_t *act = ecb->dte_action;
11587 
11588 			for (; act != NULL; act = act->dta_next) {
11589 				if (act->dta_kind == DTRACEACT_COMMIT)
11590 					return (EINVAL);
11591 			}
11592 
11593 			if (dp == NULL)
11594 				return (EINVAL);
11595 			break;
11596 		}
11597 
11598 		default:
11599 			return (EINVAL);
11600 		}
11601 
11602 		if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
11603 			/*
11604 			 * If this is a data-storing action or a speculate,
11605 			 * we must be sure that there isn't a commit on the
11606 			 * action chain.
11607 			 */
11608 			dtrace_action_t *act = ecb->dte_action;
11609 
11610 			for (; act != NULL; act = act->dta_next) {
11611 				if (act->dta_kind == DTRACEACT_COMMIT)
11612 					return (EINVAL);
11613 			}
11614 		}
11615 
11616 		action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
11617 		action->dta_rec.dtrd_size = size;
11618 	}
11619 
11620 	action->dta_refcnt = 1;
11621 	rec = &action->dta_rec;
11622 	size = rec->dtrd_size;
11623 
11624 	for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
11625 		if (!(size & mask)) {
11626 			align = mask + 1;
11627 			break;
11628 		}
11629 	}
11630 
11631 	action->dta_kind = desc->dtad_kind;
11632 
11633 	if ((action->dta_difo = dp) != NULL)
11634 		dtrace_difo_hold(dp);
11635 
11636 	rec->dtrd_action = action->dta_kind;
11637 	rec->dtrd_arg = arg;
11638 	rec->dtrd_uarg = desc->dtad_uarg;
11639 	rec->dtrd_alignment = (uint16_t)align;
11640 	rec->dtrd_format = format;
11641 
11642 	if ((last = ecb->dte_action_last) != NULL) {
11643 		ASSERT(ecb->dte_action != NULL);
11644 		action->dta_prev = last;
11645 		last->dta_next = action;
11646 	} else {
11647 		ASSERT(ecb->dte_action == NULL);
11648 		ecb->dte_action = action;
11649 	}
11650 
11651 	ecb->dte_action_last = action;
11652 
11653 	return (0);
11654 }
11655 
11656 static void
dtrace_ecb_action_remove(dtrace_ecb_t * ecb)11657 dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
11658 {
11659 	dtrace_action_t *act = ecb->dte_action, *next;
11660 	dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
11661 	dtrace_difo_t *dp;
11662 	uint16_t format;
11663 
11664 	if (act != NULL && act->dta_refcnt > 1) {
11665 		ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
11666 		act->dta_refcnt--;
11667 	} else {
11668 		for (; act != NULL; act = next) {
11669 			next = act->dta_next;
11670 			ASSERT(next != NULL || act == ecb->dte_action_last);
11671 			ASSERT(act->dta_refcnt == 1);
11672 
11673 			if ((format = act->dta_rec.dtrd_format) != 0)
11674 				dtrace_format_remove(ecb->dte_state, format);
11675 
11676 			if ((dp = act->dta_difo) != NULL)
11677 				dtrace_difo_release(dp, vstate);
11678 
11679 			if (DTRACEACT_ISAGG(act->dta_kind)) {
11680 				dtrace_ecb_aggregation_destroy(ecb, act);
11681 			} else {
11682 				kmem_free(act, sizeof (dtrace_action_t));
11683 			}
11684 		}
11685 	}
11686 
11687 	ecb->dte_action = NULL;
11688 	ecb->dte_action_last = NULL;
11689 	ecb->dte_size = 0;
11690 }
11691 
11692 static void
dtrace_ecb_disable(dtrace_ecb_t * ecb)11693 dtrace_ecb_disable(dtrace_ecb_t *ecb)
11694 {
11695 	/*
11696 	 * We disable the ECB by removing it from its probe.
11697 	 */
11698 	dtrace_ecb_t *pecb, *prev = NULL;
11699 	dtrace_probe_t *probe = ecb->dte_probe;
11700 
11701 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
11702 
11703 	if (probe == NULL) {
11704 		/*
11705 		 * This is the NULL probe; there is nothing to disable.
11706 		 */
11707 		return;
11708 	}
11709 
11710 	for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
11711 		if (pecb == ecb)
11712 			break;
11713 		prev = pecb;
11714 	}
11715 
11716 	ASSERT(pecb != NULL);
11717 
11718 	if (prev == NULL) {
11719 		probe->dtpr_ecb = ecb->dte_next;
11720 	} else {
11721 		prev->dte_next = ecb->dte_next;
11722 	}
11723 
11724 	if (ecb == probe->dtpr_ecb_last) {
11725 		ASSERT(ecb->dte_next == NULL);
11726 		probe->dtpr_ecb_last = prev;
11727 	}
11728 
11729 	probe->dtpr_provider->dtpv_ecb_count--;
11730 	/*
11731 	 * The ECB has been disconnected from the probe; now sync to assure
11732 	 * that all CPUs have seen the change before returning.
11733 	 */
11734 	dtrace_sync();
11735 
11736 	if (probe->dtpr_ecb == NULL) {
11737 		/*
11738 		 * That was the last ECB on the probe; clear the predicate
11739 		 * cache ID for the probe, disable it and sync one more time
11740 		 * to assure that we'll never hit it again.
11741 		 */
11742 		dtrace_provider_t *prov = probe->dtpr_provider;
11743 
11744 		ASSERT(ecb->dte_next == NULL);
11745 		ASSERT(probe->dtpr_ecb_last == NULL);
11746 		probe->dtpr_predcache = DTRACE_CACHEIDNONE;
11747 		prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
11748 		    probe->dtpr_id, probe->dtpr_arg);
11749 		dtrace_sync();
11750 	} else {
11751 		/*
11752 		 * There is at least one ECB remaining on the probe.  If there
11753 		 * is _exactly_ one, set the probe's predicate cache ID to be
11754 		 * the predicate cache ID of the remaining ECB.
11755 		 */
11756 		ASSERT(probe->dtpr_ecb_last != NULL);
11757 		ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
11758 
11759 		if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
11760 			dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
11761 
11762 			ASSERT(probe->dtpr_ecb->dte_next == NULL);
11763 
11764 			if (p != NULL)
11765 				probe->dtpr_predcache = p->dtp_cacheid;
11766 		}
11767 
11768 		ecb->dte_next = NULL;
11769 	}
11770 }
11771 
11772 static void
dtrace_ecb_destroy(dtrace_ecb_t * ecb)11773 dtrace_ecb_destroy(dtrace_ecb_t *ecb)
11774 {
11775 	dtrace_state_t *state = ecb->dte_state;
11776 	dtrace_vstate_t *vstate = &state->dts_vstate;
11777 	dtrace_predicate_t *pred;
11778 	dtrace_epid_t epid = ecb->dte_epid;
11779 
11780 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
11781 	ASSERT(ecb->dte_next == NULL);
11782 	ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
11783 
11784 	if ((pred = ecb->dte_predicate) != NULL)
11785 		dtrace_predicate_release(pred, vstate);
11786 
11787 	dtrace_ecb_action_remove(ecb);
11788 
11789 	ASSERT(state->dts_ecbs[epid - 1] == ecb);
11790 	state->dts_ecbs[epid - 1] = NULL;
11791 
11792 	kmem_free(ecb, sizeof (dtrace_ecb_t));
11793 }
11794 
11795 static dtrace_ecb_t *
dtrace_ecb_create(dtrace_state_t * state,dtrace_probe_t * probe,dtrace_enabling_t * enab)11796 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
11797     dtrace_enabling_t *enab)
11798 {
11799 	dtrace_ecb_t *ecb;
11800 	dtrace_predicate_t *pred;
11801 	dtrace_actdesc_t *act;
11802 	dtrace_provider_t *prov;
11803 	dtrace_ecbdesc_t *desc = enab->dten_current;
11804 
11805 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
11806 	ASSERT(state != NULL);
11807 
11808 	ecb = dtrace_ecb_add(state, probe);
11809 	ecb->dte_uarg = desc->dted_uarg;
11810 
11811 	if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
11812 		dtrace_predicate_hold(pred);
11813 		ecb->dte_predicate = pred;
11814 	}
11815 
11816 	if (probe != NULL) {
11817 		/*
11818 		 * If the provider shows more leg than the consumer is old
11819 		 * enough to see, we need to enable the appropriate implicit
11820 		 * predicate bits to prevent the ecb from activating at
11821 		 * revealing times.
11822 		 *
11823 		 * Providers specifying DTRACE_PRIV_USER at register time
11824 		 * are stating that they need the /proc-style privilege
11825 		 * model to be enforced, and this is what DTRACE_COND_OWNER
11826 		 * and DTRACE_COND_ZONEOWNER will then do at probe time.
11827 		 */
11828 		prov = probe->dtpr_provider;
11829 		if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
11830 		    (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11831 			ecb->dte_cond |= DTRACE_COND_OWNER;
11832 
11833 		if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
11834 		    (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11835 			ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
11836 
11837 		/*
11838 		 * If the provider shows us kernel innards and the user
11839 		 * is lacking sufficient privilege, enable the
11840 		 * DTRACE_COND_USERMODE implicit predicate.
11841 		 */
11842 		if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
11843 		    (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
11844 			ecb->dte_cond |= DTRACE_COND_USERMODE;
11845 	}
11846 
11847 	if (dtrace_ecb_create_cache != NULL) {
11848 		/*
11849 		 * If we have a cached ecb, we'll use its action list instead
11850 		 * of creating our own (saving both time and space).
11851 		 */
11852 		dtrace_ecb_t *cached = dtrace_ecb_create_cache;
11853 		dtrace_action_t *act_if = cached->dte_action;
11854 
11855 		if (act_if != NULL) {
11856 			ASSERT(act_if->dta_refcnt > 0);
11857 			act_if->dta_refcnt++;
11858 			ecb->dte_action = act_if;
11859 			ecb->dte_action_last = cached->dte_action_last;
11860 			ecb->dte_needed = cached->dte_needed;
11861 			ecb->dte_size = cached->dte_size;
11862 			ecb->dte_alignment = cached->dte_alignment;
11863 		}
11864 
11865 		return (ecb);
11866 	}
11867 
11868 	for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
11869 		if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
11870 			dtrace_ecb_destroy(ecb);
11871 			return (NULL);
11872 		}
11873 	}
11874 
11875 	if ((enab->dten_error = dtrace_ecb_resize(ecb)) != 0) {
11876 		dtrace_ecb_destroy(ecb);
11877 		return (NULL);
11878 	}
11879 
11880 	return (dtrace_ecb_create_cache = ecb);
11881 }
11882 
11883 static int
dtrace_ecb_create_enable(dtrace_probe_t * probe,void * arg1,void * arg2)11884 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg1, void *arg2)
11885 {
11886 	dtrace_ecb_t *ecb;
11887 	dtrace_enabling_t *enab = arg1;
11888 	dtrace_ecbdesc_t *ep = arg2;
11889 	dtrace_state_t *state = enab->dten_vstate->dtvs_state;
11890 
11891 	ASSERT(state != NULL);
11892 
11893 	if (probe != NULL && ep != NULL && probe->dtpr_gen < ep->dted_probegen) {
11894 		/*
11895 		 * This probe was created in a generation for which this
11896 		 * enabling has previously created ECBs; we don't want to
11897 		 * enable it again, so just kick out.
11898 		 */
11899 		return (DTRACE_MATCH_NEXT);
11900 	}
11901 
11902 	if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
11903 		return (DTRACE_MATCH_DONE);
11904 
11905 	if (dtrace_ecb_enable(ecb) < 0)
11906                return (DTRACE_MATCH_FAIL);
11907 
11908 	return (DTRACE_MATCH_NEXT);
11909 }
11910 
11911 static dtrace_ecb_t *
dtrace_epid2ecb(dtrace_state_t * state,dtrace_epid_t id)11912 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
11913 {
11914 	dtrace_ecb_t *ecb;
11915 #pragma unused(ecb) /* __APPLE__ */
11916 
11917 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
11918 
11919 	if (id == 0 || id > (dtrace_epid_t)state->dts_necbs)
11920 		return (NULL);
11921 
11922 	ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
11923 	ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
11924 
11925 	return (state->dts_ecbs[id - 1]);
11926 }
11927 
11928 static dtrace_aggregation_t *
dtrace_aggid2agg(dtrace_state_t * state,dtrace_aggid_t id)11929 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
11930 {
11931 	dtrace_aggregation_t *agg;
11932 #pragma unused(agg) /* __APPLE__ */
11933 
11934 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
11935 
11936 	if (id == 0 || id > (dtrace_aggid_t)state->dts_naggregations)
11937 		return (NULL);
11938 
11939 	ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
11940 	ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
11941 	    agg->dtag_id == id);
11942 
11943 	return (state->dts_aggregations[id - 1]);
11944 }
11945 
11946 /*
11947  * DTrace Buffer Functions
11948  *
11949  * The following functions manipulate DTrace buffers.  Most of these functions
11950  * are called in the context of establishing or processing consumer state;
11951  * exceptions are explicitly noted.
11952  */
11953 
11954 /*
11955  * Note:  called from cross call context.  This function switches the two
11956  * buffers on a given CPU.  The atomicity of this operation is assured by
11957  * disabling interrupts while the actual switch takes place; the disabling of
11958  * interrupts serializes the execution with any execution of dtrace_probe() on
11959  * the same CPU.
11960  */
11961 static void
dtrace_buffer_switch(dtrace_buffer_t * buf)11962 dtrace_buffer_switch(dtrace_buffer_t *buf)
11963 {
11964 	caddr_t tomax = buf->dtb_tomax;
11965 	caddr_t xamot = buf->dtb_xamot;
11966 	dtrace_icookie_t cookie;
11967 	hrtime_t now;
11968 
11969 	ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
11970 	ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
11971 
11972 	cookie = dtrace_interrupt_disable();
11973 	now = dtrace_gethrtime();
11974 	buf->dtb_tomax = xamot;
11975 	buf->dtb_xamot = tomax;
11976 	buf->dtb_xamot_drops = buf->dtb_drops;
11977 	buf->dtb_xamot_offset = buf->dtb_offset;
11978 	buf->dtb_xamot_errors = buf->dtb_errors;
11979 	buf->dtb_xamot_flags = buf->dtb_flags;
11980 	buf->dtb_offset = 0;
11981 	buf->dtb_drops = 0;
11982 	buf->dtb_errors = 0;
11983 	buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
11984 	buf->dtb_interval = now - buf->dtb_switched;
11985 	buf->dtb_switched = now;
11986 	buf->dtb_cur_limit = buf->dtb_limit;
11987 
11988 	dtrace_interrupt_enable(cookie);
11989 }
11990 
11991 /*
11992  * Note:  called from cross call context.  This function activates a buffer
11993  * on a CPU.  As with dtrace_buffer_switch(), the atomicity of the operation
11994  * is guaranteed by the disabling of interrupts.
11995  */
11996 static void
dtrace_buffer_activate(dtrace_state_t * state)11997 dtrace_buffer_activate(dtrace_state_t *state)
11998 {
11999 	dtrace_buffer_t *buf;
12000 	dtrace_icookie_t cookie = dtrace_interrupt_disable();
12001 
12002 	buf = &state->dts_buffer[CPU->cpu_id];
12003 
12004 	if (buf->dtb_tomax != NULL) {
12005 		/*
12006 		 * We might like to assert that the buffer is marked inactive,
12007 		 * but this isn't necessarily true:  the buffer for the CPU
12008 		 * that processes the BEGIN probe has its buffer activated
12009 		 * manually.  In this case, we take the (harmless) action
12010 		 * re-clearing the bit INACTIVE bit.
12011 		 */
12012 		buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
12013 	}
12014 
12015 	dtrace_interrupt_enable(cookie);
12016 }
12017 
12018 static int
dtrace_buffer_canalloc(size_t size)12019 dtrace_buffer_canalloc(size_t size)
12020 {
12021 	if (size > (UINT64_MAX - dtrace_buffer_memory_inuse))
12022 		return (B_FALSE);
12023 	if ((size + dtrace_buffer_memory_inuse) > dtrace_buffer_memory_maxsize)
12024 		return (B_FALSE);
12025 
12026 	return (B_TRUE);
12027 }
12028 
12029 static int
dtrace_buffer_alloc(dtrace_buffer_t * bufs,size_t limit,size_t size,int flags,processorid_t cpu)12030 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t limit, size_t size, int flags,
12031     processorid_t cpu)
12032 {
12033 	dtrace_cpu_t *cp;
12034 	dtrace_buffer_t *buf;
12035 	size_t size_before_alloc = dtrace_buffer_memory_inuse;
12036 
12037 	LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
12038 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12039 
12040 	if (size > (size_t)dtrace_nonroot_maxsize &&
12041 	    !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
12042 		return (EFBIG);
12043 
12044 	cp = cpu_list;
12045 
12046 	do {
12047 		if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
12048 			continue;
12049 
12050 		buf = &bufs[cp->cpu_id];
12051 
12052 		/*
12053 		 * If there is already a buffer allocated for this CPU, it
12054 		 * is only possible that this is a DR event.  In this case,
12055 		 * the buffer size must match our specified size.
12056 		 */
12057 		if (buf->dtb_tomax != NULL) {
12058 			ASSERT(buf->dtb_size == size);
12059 			continue;
12060 		}
12061 
12062 		ASSERT(buf->dtb_xamot == NULL);
12063 
12064 		/* DTrace, please do not eat all the memory. */
12065 		if (dtrace_buffer_canalloc(size) == B_FALSE)
12066 			goto err;
12067 		if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
12068 			goto err;
12069 		dtrace_buffer_memory_inuse += size;
12070 
12071 		/* Unsure that limit is always lower than size */
12072 		limit = limit == size ? limit - 1 : limit;
12073 		buf->dtb_cur_limit = limit;
12074 		buf->dtb_limit = limit;
12075 		buf->dtb_size = size;
12076 		buf->dtb_flags = flags;
12077 		buf->dtb_offset = 0;
12078 		buf->dtb_drops = 0;
12079 
12080 		if (flags & DTRACEBUF_NOSWITCH)
12081 			continue;
12082 
12083 		/* DTrace, please do not eat all the memory. */
12084 		if (dtrace_buffer_canalloc(size) == B_FALSE)
12085 			goto err;
12086 		if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
12087 			goto err;
12088 		dtrace_buffer_memory_inuse += size;
12089 	} while ((cp = cp->cpu_next) != cpu_list);
12090 
12091 	ASSERT(dtrace_buffer_memory_inuse <= dtrace_buffer_memory_maxsize);
12092 
12093 	return (0);
12094 
12095 err:
12096 	cp = cpu_list;
12097 
12098 	do {
12099 		if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
12100 			continue;
12101 
12102 		buf = &bufs[cp->cpu_id];
12103 
12104 		if (buf->dtb_xamot != NULL) {
12105 			ASSERT(buf->dtb_tomax != NULL);
12106 			ASSERT(buf->dtb_size == size);
12107 			kmem_free(buf->dtb_xamot, size);
12108 		}
12109 
12110 		if (buf->dtb_tomax != NULL) {
12111 			ASSERT(buf->dtb_size == size);
12112 			kmem_free(buf->dtb_tomax, size);
12113 		}
12114 
12115 		buf->dtb_tomax = NULL;
12116 		buf->dtb_xamot = NULL;
12117 		buf->dtb_size = 0;
12118 	} while ((cp = cp->cpu_next) != cpu_list);
12119 
12120 	/* Restore the size saved before allocating memory */
12121 	dtrace_buffer_memory_inuse = size_before_alloc;
12122 
12123 	return (ENOMEM);
12124 }
12125 
12126 /*
12127  * Note:  called from probe context.  This function just increments the drop
12128  * count on a buffer.  It has been made a function to allow for the
12129  * possibility of understanding the source of mysterious drop counts.  (A
12130  * problem for which one may be particularly disappointed that DTrace cannot
12131  * be used to understand DTrace.)
12132  */
12133 static void
dtrace_buffer_drop(dtrace_buffer_t * buf)12134 dtrace_buffer_drop(dtrace_buffer_t *buf)
12135 {
12136 	buf->dtb_drops++;
12137 }
12138 
12139 /*
12140  * Note:  called from probe context.  This function is called to reserve space
12141  * in a buffer.  If mstate is non-NULL, sets the scratch base and size in the
12142  * mstate.  Returns the new offset in the buffer, or a negative value if an
12143  * error has occurred.
12144  */
12145 static intptr_t
dtrace_buffer_reserve(dtrace_buffer_t * buf,size_t needed,size_t align,dtrace_state_t * state,dtrace_mstate_t * mstate)12146 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
12147     dtrace_state_t *state, dtrace_mstate_t *mstate)
12148 {
12149 	intptr_t offs = buf->dtb_offset, soffs;
12150 	intptr_t woffs;
12151 	caddr_t tomax;
12152 	size_t total_off;
12153 
12154 	if (buf->dtb_flags & DTRACEBUF_INACTIVE)
12155 		return (-1);
12156 
12157 	if ((tomax = buf->dtb_tomax) == NULL) {
12158 		dtrace_buffer_drop(buf);
12159 		return (-1);
12160 	}
12161 
12162 	if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
12163 		while (offs & (align - 1)) {
12164 			/*
12165 			 * Assert that our alignment is off by a number which
12166 			 * is itself sizeof (uint32_t) aligned.
12167 			 */
12168 			ASSERT(!((align - (offs & (align - 1))) &
12169 			    (sizeof (uint32_t) - 1)));
12170 			DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
12171 			offs += sizeof (uint32_t);
12172 		}
12173 
12174 		if ((uint64_t)(soffs = offs + needed) > buf->dtb_cur_limit) {
12175 			if (buf->dtb_cur_limit == buf->dtb_limit) {
12176 				buf->dtb_cur_limit = buf->dtb_size;
12177 
12178 				os_atomic_inc(&state->dts_buf_over_limit, relaxed);
12179 				/**
12180 				 * Set an AST on the current processor
12181 				 * so that we can wake up the process
12182 				 * outside of probe context, when we know
12183 				 * it is safe to do so
12184 				 */
12185 				minor_t minor = getminor(state->dts_dev);
12186 				ASSERT(minor < 32);
12187 
12188 				os_atomic_or(&dtrace_wake_clients, 1 << minor, relaxed);
12189 				ast_dtrace_on();
12190 			}
12191 			if ((uint64_t)soffs > buf->dtb_size) {
12192 				dtrace_buffer_drop(buf);
12193 				return (-1);
12194 			}
12195 		}
12196 
12197 		if (mstate == NULL)
12198 			return (offs);
12199 
12200 		mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
12201 		mstate->dtms_scratch_size = buf->dtb_size - soffs;
12202 		mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
12203 
12204 		return (offs);
12205 	}
12206 
12207 	if (buf->dtb_flags & DTRACEBUF_FILL) {
12208 		if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
12209 		    (buf->dtb_flags & DTRACEBUF_FULL))
12210 			return (-1);
12211 		goto out;
12212 	}
12213 
12214 	total_off = needed + (offs & (align - 1));
12215 
12216 	/*
12217 	 * For a ring buffer, life is quite a bit more complicated.  Before
12218 	 * we can store any padding, we need to adjust our wrapping offset.
12219 	 * (If we've never before wrapped or we're not about to, no adjustment
12220 	 * is required.)
12221 	 */
12222 	if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
12223 	    offs + total_off > buf->dtb_size) {
12224 		woffs = buf->dtb_xamot_offset;
12225 
12226 		if (offs + total_off > buf->dtb_size) {
12227 			/*
12228 			 * We can't fit in the end of the buffer.  First, a
12229 			 * sanity check that we can fit in the buffer at all.
12230 			 */
12231 			if (total_off > buf->dtb_size) {
12232 				dtrace_buffer_drop(buf);
12233 				return (-1);
12234 			}
12235 
12236 			/*
12237 			 * We're going to be storing at the top of the buffer,
12238 			 * so now we need to deal with the wrapped offset.  We
12239 			 * only reset our wrapped offset to 0 if it is
12240 			 * currently greater than the current offset.  If it
12241 			 * is less than the current offset, it is because a
12242 			 * previous allocation induced a wrap -- but the
12243 			 * allocation didn't subsequently take the space due
12244 			 * to an error or false predicate evaluation.  In this
12245 			 * case, we'll just leave the wrapped offset alone: if
12246 			 * the wrapped offset hasn't been advanced far enough
12247 			 * for this allocation, it will be adjusted in the
12248 			 * lower loop.
12249 			 */
12250 			if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
12251 				if (woffs >= offs)
12252 					woffs = 0;
12253 			} else {
12254 				woffs = 0;
12255 			}
12256 
12257 			/*
12258 			 * Now we know that we're going to be storing to the
12259 			 * top of the buffer and that there is room for us
12260 			 * there.  We need to clear the buffer from the current
12261 			 * offset to the end (there may be old gunk there).
12262 			 */
12263 			while ((uint64_t)offs < buf->dtb_size)
12264 				tomax[offs++] = 0;
12265 
12266 			/*
12267 			 * We need to set our offset to zero.  And because we
12268 			 * are wrapping, we need to set the bit indicating as
12269 			 * much.  We can also adjust our needed space back
12270 			 * down to the space required by the ECB -- we know
12271 			 * that the top of the buffer is aligned.
12272 			 */
12273 			offs = 0;
12274 			total_off = needed;
12275 			buf->dtb_flags |= DTRACEBUF_WRAPPED;
12276 		} else {
12277 			/*
12278 			 * There is room for us in the buffer, so we simply
12279 			 * need to check the wrapped offset.
12280 			 */
12281 			if (woffs < offs) {
12282 				/*
12283 				 * The wrapped offset is less than the offset.
12284 				 * This can happen if we allocated buffer space
12285 				 * that induced a wrap, but then we didn't
12286 				 * subsequently take the space due to an error
12287 				 * or false predicate evaluation.  This is
12288 				 * okay; we know that _this_ allocation isn't
12289 				 * going to induce a wrap.  We still can't
12290 				 * reset the wrapped offset to be zero,
12291 				 * however: the space may have been trashed in
12292 				 * the previous failed probe attempt.  But at
12293 				 * least the wrapped offset doesn't need to
12294 				 * be adjusted at all...
12295 				 */
12296 				goto out;
12297 			}
12298 		}
12299 
12300 		while (offs + total_off > (size_t)woffs) {
12301 			dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
12302 			size_t size;
12303 
12304 			if (epid == DTRACE_EPIDNONE) {
12305 				size = sizeof (uint32_t);
12306 			} else {
12307 				ASSERT(epid <= (dtrace_epid_t)state->dts_necbs);
12308 				ASSERT(state->dts_ecbs[epid - 1] != NULL);
12309 
12310 				size = state->dts_ecbs[epid - 1]->dte_size;
12311 			}
12312 
12313 			ASSERT(woffs + size <= buf->dtb_size);
12314 			ASSERT(size != 0);
12315 
12316 			if (woffs + size == buf->dtb_size) {
12317 				/*
12318 				 * We've reached the end of the buffer; we want
12319 				 * to set the wrapped offset to 0 and break
12320 				 * out.  However, if the offs is 0, then we're
12321 				 * in a strange edge-condition:  the amount of
12322 				 * space that we want to reserve plus the size
12323 				 * of the record that we're overwriting is
12324 				 * greater than the size of the buffer.  This
12325 				 * is problematic because if we reserve the
12326 				 * space but subsequently don't consume it (due
12327 				 * to a failed predicate or error) the wrapped
12328 				 * offset will be 0 -- yet the EPID at offset 0
12329 				 * will not be committed.  This situation is
12330 				 * relatively easy to deal with:  if we're in
12331 				 * this case, the buffer is indistinguishable
12332 				 * from one that hasn't wrapped; we need only
12333 				 * finish the job by clearing the wrapped bit,
12334 				 * explicitly setting the offset to be 0, and
12335 				 * zero'ing out the old data in the buffer.
12336 				 */
12337 				if (offs == 0) {
12338 					buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
12339 					buf->dtb_offset = 0;
12340 					woffs = total_off;
12341 
12342 					while ((uint64_t)woffs < buf->dtb_size)
12343 						tomax[woffs++] = 0;
12344 				}
12345 
12346 				woffs = 0;
12347 				break;
12348 			}
12349 
12350 			woffs += size;
12351 		}
12352 
12353 		/*
12354 		 * We have a wrapped offset.  It may be that the wrapped offset
12355 		 * has become zero -- that's okay.
12356 		 */
12357 		buf->dtb_xamot_offset = woffs;
12358 	}
12359 
12360 out:
12361 	/*
12362 	 * Now we can plow the buffer with any necessary padding.
12363 	 */
12364 	while (offs & (align - 1)) {
12365 		/*
12366 		 * Assert that our alignment is off by a number which
12367 		 * is itself sizeof (uint32_t) aligned.
12368 		 */
12369 		ASSERT(!((align - (offs & (align - 1))) &
12370 		    (sizeof (uint32_t) - 1)));
12371 		DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
12372 		offs += sizeof (uint32_t);
12373 	}
12374 
12375 	if (buf->dtb_flags & DTRACEBUF_FILL) {
12376 		if (offs + needed > buf->dtb_size - state->dts_reserve) {
12377 			buf->dtb_flags |= DTRACEBUF_FULL;
12378 			return (-1);
12379 		}
12380 	}
12381 
12382 	if (mstate == NULL)
12383 		return (offs);
12384 
12385 	/*
12386 	 * For ring buffers and fill buffers, the scratch space is always
12387 	 * the inactive buffer.
12388 	 */
12389 	mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
12390 	mstate->dtms_scratch_size = buf->dtb_size;
12391 	mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
12392 
12393 	return (offs);
12394 }
12395 
12396 static void
dtrace_buffer_polish(dtrace_buffer_t * buf)12397 dtrace_buffer_polish(dtrace_buffer_t *buf)
12398 {
12399 	ASSERT(buf->dtb_flags & DTRACEBUF_RING);
12400 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12401 
12402 	if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
12403 		return;
12404 
12405 	/*
12406 	 * We need to polish the ring buffer.  There are three cases:
12407 	 *
12408 	 * - The first (and presumably most common) is that there is no gap
12409 	 *   between the buffer offset and the wrapped offset.  In this case,
12410 	 *   there is nothing in the buffer that isn't valid data; we can
12411 	 *   mark the buffer as polished and return.
12412 	 *
12413 	 * - The second (less common than the first but still more common
12414 	 *   than the third) is that there is a gap between the buffer offset
12415 	 *   and the wrapped offset, and the wrapped offset is larger than the
12416 	 *   buffer offset.  This can happen because of an alignment issue, or
12417 	 *   can happen because of a call to dtrace_buffer_reserve() that
12418 	 *   didn't subsequently consume the buffer space.  In this case,
12419 	 *   we need to zero the data from the buffer offset to the wrapped
12420 	 *   offset.
12421 	 *
12422 	 * - The third (and least common) is that there is a gap between the
12423 	 *   buffer offset and the wrapped offset, but the wrapped offset is
12424 	 *   _less_ than the buffer offset.  This can only happen because a
12425 	 *   call to dtrace_buffer_reserve() induced a wrap, but the space
12426 	 *   was not subsequently consumed.  In this case, we need to zero the
12427 	 *   space from the offset to the end of the buffer _and_ from the
12428 	 *   top of the buffer to the wrapped offset.
12429 	 */
12430 	if (buf->dtb_offset < buf->dtb_xamot_offset) {
12431 		bzero(buf->dtb_tomax + buf->dtb_offset,
12432 		    buf->dtb_xamot_offset - buf->dtb_offset);
12433 	}
12434 
12435 	if (buf->dtb_offset > buf->dtb_xamot_offset) {
12436 		bzero(buf->dtb_tomax + buf->dtb_offset,
12437 		    buf->dtb_size - buf->dtb_offset);
12438 		bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
12439 	}
12440 }
12441 
12442 static void
dtrace_buffer_free(dtrace_buffer_t * bufs)12443 dtrace_buffer_free(dtrace_buffer_t *bufs)
12444 {
12445 	int i;
12446 
12447 	for (i = 0; i < (int)NCPU; i++) {
12448 		dtrace_buffer_t *buf = &bufs[i];
12449 
12450 		if (buf->dtb_tomax == NULL) {
12451 			ASSERT(buf->dtb_xamot == NULL);
12452 			ASSERT(buf->dtb_size == 0);
12453 			continue;
12454 		}
12455 
12456 		if (buf->dtb_xamot != NULL) {
12457 			ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
12458 			kmem_free(buf->dtb_xamot, buf->dtb_size);
12459 
12460 			ASSERT(dtrace_buffer_memory_inuse >= buf->dtb_size);
12461 			dtrace_buffer_memory_inuse -= buf->dtb_size;
12462 		}
12463 
12464 		kmem_free(buf->dtb_tomax, buf->dtb_size);
12465 		ASSERT(dtrace_buffer_memory_inuse >= buf->dtb_size);
12466 		dtrace_buffer_memory_inuse -= buf->dtb_size;
12467 
12468 		buf->dtb_size = 0;
12469 		buf->dtb_tomax = NULL;
12470 		buf->dtb_xamot = NULL;
12471 	}
12472 }
12473 
12474 /*
12475  * DTrace Enabling Functions
12476  */
12477 static dtrace_enabling_t *
dtrace_enabling_create(dtrace_vstate_t * vstate)12478 dtrace_enabling_create(dtrace_vstate_t *vstate)
12479 {
12480 	dtrace_enabling_t *enab;
12481 
12482 	enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
12483 	enab->dten_vstate = vstate;
12484 
12485 	return (enab);
12486 }
12487 
12488 static void
dtrace_enabling_add(dtrace_enabling_t * enab,dtrace_ecbdesc_t * ecb)12489 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
12490 {
12491 	dtrace_ecbdesc_t **ndesc;
12492 	size_t osize, nsize;
12493 
12494 	/*
12495 	 * We can't add to enablings after we've enabled them, or after we've
12496 	 * retained them.
12497 	 */
12498 	ASSERT(enab->dten_probegen == 0);
12499 	ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12500 
12501 	/* APPLE NOTE: this protects against gcc 4.0 botch on x86 */
12502 	if (ecb == NULL) return;
12503 
12504 	if (enab->dten_ndesc < enab->dten_maxdesc) {
12505 		enab->dten_desc[enab->dten_ndesc++] = ecb;
12506 		return;
12507 	}
12508 
12509 	osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12510 
12511 	if (enab->dten_maxdesc == 0) {
12512 		enab->dten_maxdesc = 1;
12513 	} else {
12514 		enab->dten_maxdesc <<= 1;
12515 	}
12516 
12517 	ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
12518 
12519 	nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12520 	ndesc = kmem_zalloc(nsize, KM_SLEEP);
12521 	bcopy(enab->dten_desc, ndesc, osize);
12522 	kmem_free(enab->dten_desc, osize);
12523 
12524 	enab->dten_desc = ndesc;
12525 	enab->dten_desc[enab->dten_ndesc++] = ecb;
12526 }
12527 
12528 static void
dtrace_enabling_addlike(dtrace_enabling_t * enab,dtrace_ecbdesc_t * ecb,dtrace_probedesc_t * pd)12529 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
12530     dtrace_probedesc_t *pd)
12531 {
12532 	dtrace_ecbdesc_t *new;
12533 	dtrace_predicate_t *pred;
12534 	dtrace_actdesc_t *act;
12535 
12536 	/*
12537 	 * We're going to create a new ECB description that matches the
12538 	 * specified ECB in every way, but has the specified probe description.
12539 	 */
12540 	new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12541 
12542 	if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
12543 		dtrace_predicate_hold(pred);
12544 
12545 	for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
12546 		dtrace_actdesc_hold(act);
12547 
12548 	new->dted_action = ecb->dted_action;
12549 	new->dted_pred = ecb->dted_pred;
12550 	new->dted_probe = *pd;
12551 	new->dted_uarg = ecb->dted_uarg;
12552 
12553 	dtrace_enabling_add(enab, new);
12554 }
12555 
12556 static void
dtrace_enabling_dump(dtrace_enabling_t * enab)12557 dtrace_enabling_dump(dtrace_enabling_t *enab)
12558 {
12559 	int i;
12560 
12561 	for (i = 0; i < enab->dten_ndesc; i++) {
12562 		dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
12563 
12564 		cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
12565 		    desc->dtpd_provider, desc->dtpd_mod,
12566 		    desc->dtpd_func, desc->dtpd_name);
12567 	}
12568 }
12569 
12570 static void
dtrace_enabling_destroy(dtrace_enabling_t * enab)12571 dtrace_enabling_destroy(dtrace_enabling_t *enab)
12572 {
12573 	int i;
12574 	dtrace_ecbdesc_t *ep;
12575 	dtrace_vstate_t *vstate = enab->dten_vstate;
12576 
12577 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12578 
12579 	for (i = 0; i < enab->dten_ndesc; i++) {
12580 		dtrace_actdesc_t *act, *next;
12581 		dtrace_predicate_t *pred;
12582 
12583 		ep = enab->dten_desc[i];
12584 
12585 		if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
12586 			dtrace_predicate_release(pred, vstate);
12587 
12588 		for (act = ep->dted_action; act != NULL; act = next) {
12589 			next = act->dtad_next;
12590 			dtrace_actdesc_release(act, vstate);
12591 		}
12592 
12593 		kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12594 	}
12595 
12596 	kmem_free(enab->dten_desc,
12597 	    enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
12598 
12599 	/*
12600 	 * If this was a retained enabling, decrement the dts_nretained count
12601 	 * and take it off of the dtrace_retained list.
12602 	 */
12603 	if (enab->dten_prev != NULL || enab->dten_next != NULL ||
12604 	    dtrace_retained == enab) {
12605 		ASSERT(enab->dten_vstate->dtvs_state != NULL);
12606 		ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
12607 		enab->dten_vstate->dtvs_state->dts_nretained--;
12608                 dtrace_retained_gen++;
12609 	}
12610 
12611 	if (enab->dten_prev == NULL) {
12612 		if (dtrace_retained == enab) {
12613 			dtrace_retained = enab->dten_next;
12614 
12615 			if (dtrace_retained != NULL)
12616 				dtrace_retained->dten_prev = NULL;
12617 		}
12618 	} else {
12619 		ASSERT(enab != dtrace_retained);
12620 		ASSERT(dtrace_retained != NULL);
12621 		enab->dten_prev->dten_next = enab->dten_next;
12622 	}
12623 
12624 	if (enab->dten_next != NULL) {
12625 		ASSERT(dtrace_retained != NULL);
12626 		enab->dten_next->dten_prev = enab->dten_prev;
12627 	}
12628 
12629 	kmem_free(enab, sizeof (dtrace_enabling_t));
12630 }
12631 
12632 static int
dtrace_enabling_retain(dtrace_enabling_t * enab)12633 dtrace_enabling_retain(dtrace_enabling_t *enab)
12634 {
12635 	dtrace_state_t *state;
12636 
12637 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12638 	ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12639 	ASSERT(enab->dten_vstate != NULL);
12640 
12641 	state = enab->dten_vstate->dtvs_state;
12642 	ASSERT(state != NULL);
12643 
12644 	/*
12645 	 * We only allow each state to retain dtrace_retain_max enablings.
12646 	 */
12647 	if (state->dts_nretained >= dtrace_retain_max)
12648 		return (ENOSPC);
12649 
12650 	state->dts_nretained++;
12651         dtrace_retained_gen++;
12652 
12653 	if (dtrace_retained == NULL) {
12654 		dtrace_retained = enab;
12655 		return (0);
12656 	}
12657 
12658 	enab->dten_next = dtrace_retained;
12659 	dtrace_retained->dten_prev = enab;
12660 	dtrace_retained = enab;
12661 
12662 	return (0);
12663 }
12664 
12665 static int
dtrace_enabling_replicate(dtrace_state_t * state,dtrace_probedesc_t * match,dtrace_probedesc_t * create)12666 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
12667     dtrace_probedesc_t *create)
12668 {
12669 	dtrace_enabling_t *new, *enab;
12670 	int found = 0, err = ENOENT;
12671 
12672 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12673 	ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
12674 	ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
12675 	ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
12676 	ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
12677 
12678 	new = dtrace_enabling_create(&state->dts_vstate);
12679 
12680 	/*
12681 	 * Iterate over all retained enablings, looking for enablings that
12682 	 * match the specified state.
12683 	 */
12684 	for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12685 		int i;
12686 
12687 		/*
12688 		 * dtvs_state can only be NULL for helper enablings -- and
12689 		 * helper enablings can't be retained.
12690 		 */
12691 		ASSERT(enab->dten_vstate->dtvs_state != NULL);
12692 
12693 		if (enab->dten_vstate->dtvs_state != state)
12694 			continue;
12695 
12696 		/*
12697 		 * Now iterate over each probe description; we're looking for
12698 		 * an exact match to the specified probe description.
12699 		 */
12700 		for (i = 0; i < enab->dten_ndesc; i++) {
12701 			dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12702 			dtrace_probedesc_t *pd = &ep->dted_probe;
12703 
12704 			/* APPLE NOTE: Darwin employs size bounded string operation. */
12705 			if (strncmp(pd->dtpd_provider, match->dtpd_provider, DTRACE_PROVNAMELEN))
12706 				continue;
12707 
12708 			if (strncmp(pd->dtpd_mod, match->dtpd_mod, DTRACE_MODNAMELEN))
12709 				continue;
12710 
12711 			if (strncmp(pd->dtpd_func, match->dtpd_func, DTRACE_FUNCNAMELEN))
12712 				continue;
12713 
12714 			if (strncmp(pd->dtpd_name, match->dtpd_name, DTRACE_NAMELEN))
12715 				continue;
12716 
12717 			/*
12718 			 * We have a winning probe!  Add it to our growing
12719 			 * enabling.
12720 			 */
12721 			found = 1;
12722 			dtrace_enabling_addlike(new, ep, create);
12723 		}
12724 	}
12725 
12726 	if (!found || (err = dtrace_enabling_retain(new)) != 0) {
12727 		dtrace_enabling_destroy(new);
12728 		return (err);
12729 	}
12730 
12731 	return (0);
12732 }
12733 
12734 static void
dtrace_enabling_retract(dtrace_state_t * state)12735 dtrace_enabling_retract(dtrace_state_t *state)
12736 {
12737 	dtrace_enabling_t *enab, *next;
12738 
12739 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12740 
12741 	/*
12742 	 * Iterate over all retained enablings, destroy the enablings retained
12743 	 * for the specified state.
12744 	 */
12745 	for (enab = dtrace_retained; enab != NULL; enab = next) {
12746 		next = enab->dten_next;
12747 
12748 		/*
12749 		 * dtvs_state can only be NULL for helper enablings -- and
12750 		 * helper enablings can't be retained.
12751 		 */
12752 		ASSERT(enab->dten_vstate->dtvs_state != NULL);
12753 
12754 		if (enab->dten_vstate->dtvs_state == state) {
12755 			ASSERT(state->dts_nretained > 0);
12756 			dtrace_enabling_destroy(enab);
12757 		}
12758 	}
12759 
12760 	ASSERT(state->dts_nretained == 0);
12761 }
12762 
12763 static int
dtrace_enabling_match(dtrace_enabling_t * enab,int * nmatched,dtrace_match_cond_t * cond)12764 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched, dtrace_match_cond_t *cond)
12765 {
12766 	int i = 0;
12767 	int total_matched = 0, matched = 0;
12768 
12769 	LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
12770 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12771 
12772 	for (i = 0; i < enab->dten_ndesc; i++) {
12773 		dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12774 
12775 		enab->dten_current = ep;
12776 		enab->dten_error = 0;
12777 
12778 		/**
12779 		 * Before doing a dtrace_probe_enable, which is really
12780 		 * expensive, check that this enabling matches the matching precondition
12781 		 * if we have one
12782 		 */
12783 		if (cond && (cond->dmc_func(&ep->dted_probe, cond->dmc_data) == 0)) {
12784 			continue;
12785 		}
12786 		/*
12787 		 * If a provider failed to enable a probe then get out and
12788 		 * let the consumer know we failed.
12789 		 */
12790 		if ((matched = dtrace_probe_enable(&ep->dted_probe, enab, ep)) < 0)
12791 			return (EBUSY);
12792 
12793 		total_matched += matched;
12794 
12795 		if (enab->dten_error != 0) {
12796 			/*
12797 			 * If we get an error half-way through enabling the
12798 			 * probes, we kick out -- perhaps with some number of
12799 			 * them enabled.  Leaving enabled probes enabled may
12800 			 * be slightly confusing for user-level, but we expect
12801 			 * that no one will attempt to actually drive on in
12802 			 * the face of such errors.  If this is an anonymous
12803 			 * enabling (indicated with a NULL nmatched pointer),
12804 			 * we cmn_err() a message.  We aren't expecting to
12805 			 * get such an error -- such as it can exist at all,
12806 			 * it would be a result of corrupted DOF in the driver
12807 			 * properties.
12808 			 */
12809 			if (nmatched == NULL) {
12810 				cmn_err(CE_WARN, "dtrace_enabling_match() "
12811 				    "error on %p: %d", (void *)ep,
12812 				    enab->dten_error);
12813 			}
12814 
12815 			return (enab->dten_error);
12816 		}
12817 
12818 		ep->dted_probegen = dtrace_probegen;
12819 	}
12820 
12821 	if (nmatched != NULL)
12822 		*nmatched = total_matched;
12823 
12824 	return (0);
12825 }
12826 
12827 static void
dtrace_enabling_matchall_with_cond(dtrace_match_cond_t * cond)12828 dtrace_enabling_matchall_with_cond(dtrace_match_cond_t *cond)
12829 {
12830 	dtrace_enabling_t *enab;
12831 
12832 	lck_mtx_lock(&cpu_lock);
12833 	lck_mtx_lock(&dtrace_lock);
12834 
12835 	/*
12836 	 * Iterate over all retained enablings to see if any probes match
12837 	 * against them.  We only perform this operation on enablings for which
12838 	 * we have sufficient permissions by virtue of being in the global zone
12839 	 * or in the same zone as the DTrace client.  Because we can be called
12840 	 * after dtrace_detach() has been called, we cannot assert that there
12841 	 * are retained enablings.  We can safely load from dtrace_retained,
12842 	 * however:  the taskq_destroy() at the end of dtrace_detach() will
12843 	 * block pending our completion.
12844 	 */
12845 
12846 	/*
12847 	 * Darwin doesn't do zones.
12848 	 * Behave as if always in "global" zone."
12849 	 */
12850 	for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12851 		(void) dtrace_enabling_match(enab, NULL, cond);
12852 	}
12853 
12854 	lck_mtx_unlock(&dtrace_lock);
12855 	lck_mtx_unlock(&cpu_lock);
12856 
12857 }
12858 
12859 static void
dtrace_enabling_matchall(void)12860 dtrace_enabling_matchall(void)
12861 {
12862 	dtrace_enabling_matchall_with_cond(NULL);
12863 }
12864 
12865 
12866 
12867 /*
12868  * If an enabling is to be enabled without having matched probes (that is, if
12869  * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
12870  * enabling must be _primed_ by creating an ECB for every ECB description.
12871  * This must be done to assure that we know the number of speculations, the
12872  * number of aggregations, the minimum buffer size needed, etc. before we
12873  * transition out of DTRACE_ACTIVITY_INACTIVE.  To do this without actually
12874  * enabling any probes, we create ECBs for every ECB decription, but with a
12875  * NULL probe -- which is exactly what this function does.
12876  */
12877 static void
dtrace_enabling_prime(dtrace_state_t * state)12878 dtrace_enabling_prime(dtrace_state_t *state)
12879 {
12880 	dtrace_enabling_t *enab;
12881 	int i;
12882 
12883 	for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12884 		ASSERT(enab->dten_vstate->dtvs_state != NULL);
12885 
12886 		if (enab->dten_vstate->dtvs_state != state)
12887 			continue;
12888 
12889 		/*
12890 		 * We don't want to prime an enabling more than once, lest
12891 		 * we allow a malicious user to induce resource exhaustion.
12892 		 * (The ECBs that result from priming an enabling aren't
12893 		 * leaked -- but they also aren't deallocated until the
12894 		 * consumer state is destroyed.)
12895 		 */
12896 		if (enab->dten_primed)
12897 			continue;
12898 
12899 		for (i = 0; i < enab->dten_ndesc; i++) {
12900 			enab->dten_current = enab->dten_desc[i];
12901 			(void) dtrace_probe_enable(NULL, enab, NULL);
12902 		}
12903 
12904 		enab->dten_primed = 1;
12905 	}
12906 }
12907 
12908 /*
12909  * Called to indicate that probes should be provided due to retained
12910  * enablings.  This is implemented in terms of dtrace_probe_provide(), but it
12911  * must take an initial lap through the enabling calling the dtps_provide()
12912  * entry point explicitly to allow for autocreated probes.
12913  */
12914 static void
dtrace_enabling_provide(dtrace_provider_t * prv)12915 dtrace_enabling_provide(dtrace_provider_t *prv)
12916 {
12917 	int i, all = 0;
12918 	dtrace_probedesc_t desc;
12919         dtrace_genid_t gen;
12920 
12921 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12922 	LCK_MTX_ASSERT(&dtrace_provider_lock, LCK_MTX_ASSERT_OWNED);
12923 
12924 	if (prv == NULL) {
12925 		all = 1;
12926 		prv = dtrace_provider;
12927 	}
12928 
12929 	do {
12930 		dtrace_enabling_t *enab;
12931 		void *parg = prv->dtpv_arg;
12932 
12933 retry:
12934 		gen = dtrace_retained_gen;
12935 		for (enab = dtrace_retained; enab != NULL;
12936 		    enab = enab->dten_next) {
12937 			for (i = 0; i < enab->dten_ndesc; i++) {
12938 				desc = enab->dten_desc[i]->dted_probe;
12939 				lck_mtx_unlock(&dtrace_lock);
12940 				prv->dtpv_pops.dtps_provide(parg, &desc);
12941 				lck_mtx_lock(&dtrace_lock);
12942 				/*
12943 				 * Process the retained enablings again if
12944 				 * they have changed while we weren't holding
12945 				 * dtrace_lock.
12946 				 */
12947 				if (gen != dtrace_retained_gen)
12948 					goto retry;
12949 			}
12950 		}
12951 	} while (all && (prv = prv->dtpv_next) != NULL);
12952 
12953 	lck_mtx_unlock(&dtrace_lock);
12954 	dtrace_probe_provide(NULL, all ? NULL : prv);
12955 	lck_mtx_lock(&dtrace_lock);
12956 }
12957 
12958 /*
12959  * DTrace DOF Functions
12960  */
12961 /*ARGSUSED*/
12962 static void
dtrace_dof_error(dof_hdr_t * dof,const char * str)12963 dtrace_dof_error(dof_hdr_t *dof, const char *str)
12964 {
12965 #pragma unused(dof) /* __APPLE__ */
12966 	if (dtrace_err_verbose)
12967 		cmn_err(CE_WARN, "failed to process DOF: %s", str);
12968 
12969 #ifdef DTRACE_ERRDEBUG
12970 	dtrace_errdebug(str);
12971 #endif
12972 }
12973 
12974 /*
12975  * Create DOF out of a currently enabled state.  Right now, we only create
12976  * DOF containing the run-time options -- but this could be expanded to create
12977  * complete DOF representing the enabled state.
12978  */
12979 static dof_hdr_t *
dtrace_dof_create(dtrace_state_t * state)12980 dtrace_dof_create(dtrace_state_t *state)
12981 {
12982 	dof_hdr_t *dof;
12983 	dof_sec_t *sec;
12984 	dof_optdesc_t *opt;
12985 	int i, len = sizeof (dof_hdr_t) +
12986 	    roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
12987 	    sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
12988 
12989 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
12990 
12991 	dof = kmem_zalloc_aligned(len, 8, KM_SLEEP);
12992 	dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
12993 	dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
12994 	dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
12995 	dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
12996 
12997 	dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
12998 	dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
12999 	dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
13000 	dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
13001 	dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
13002 	dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
13003 
13004 	dof->dofh_flags = 0;
13005 	dof->dofh_hdrsize = sizeof (dof_hdr_t);
13006 	dof->dofh_secsize = sizeof (dof_sec_t);
13007 	dof->dofh_secnum = 1;	/* only DOF_SECT_OPTDESC */
13008 	dof->dofh_secoff = sizeof (dof_hdr_t);
13009 	dof->dofh_loadsz = len;
13010 	dof->dofh_filesz = len;
13011 	dof->dofh_pad = 0;
13012 
13013 	/*
13014 	 * Fill in the option section header...
13015 	 */
13016 	sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
13017 	sec->dofs_type = DOF_SECT_OPTDESC;
13018 	sec->dofs_align = sizeof (uint64_t);
13019 	sec->dofs_flags = DOF_SECF_LOAD;
13020 	sec->dofs_entsize = sizeof (dof_optdesc_t);
13021 
13022 	opt = (dof_optdesc_t *)((uintptr_t)sec +
13023 	    roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
13024 
13025 	sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
13026 	sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
13027 
13028 	for (i = 0; i < DTRACEOPT_MAX; i++) {
13029 		opt[i].dofo_option = i;
13030 		opt[i].dofo_strtab = DOF_SECIDX_NONE;
13031 		opt[i].dofo_value = state->dts_options[i];
13032 	}
13033 
13034 	return (dof);
13035 }
13036 
13037 static dof_hdr_t *
dtrace_dof_copyin(user_addr_t uarg,int * errp)13038 dtrace_dof_copyin(user_addr_t uarg, int *errp)
13039 {
13040 	dof_hdr_t hdr, *dof;
13041 
13042 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
13043 
13044 	/*
13045 	 * First, we're going to copyin() the sizeof (dof_hdr_t).
13046 	 */
13047 	if (copyin(uarg, &hdr, sizeof (hdr)) != 0) {
13048 		dtrace_dof_error(NULL, "failed to copyin DOF header");
13049 		*errp = EFAULT;
13050 		return (NULL);
13051 	}
13052 
13053 	/*
13054 	 * Now we'll allocate the entire DOF and copy it in -- provided
13055 	 * that the length isn't outrageous.
13056 	 */
13057 	if (hdr.dofh_loadsz >= (uint64_t)dtrace_dof_maxsize) {
13058 		dtrace_dof_error(&hdr, "load size exceeds maximum");
13059 		*errp = E2BIG;
13060 		return (NULL);
13061 	}
13062 
13063 	if (hdr.dofh_loadsz < sizeof (hdr)) {
13064 		dtrace_dof_error(&hdr, "invalid load size");
13065 		*errp = EINVAL;
13066 		return (NULL);
13067 	}
13068 
13069 	dof = kmem_alloc_aligned(hdr.dofh_loadsz, 8, KM_SLEEP);
13070 
13071 	if (copyin(uarg, dof, hdr.dofh_loadsz) != 0 ||
13072 	    dof->dofh_loadsz != hdr.dofh_loadsz) {
13073 		kmem_free_aligned(dof, hdr.dofh_loadsz);
13074 		*errp = EFAULT;
13075 		return (NULL);
13076 	}
13077 
13078 	return (dof);
13079 }
13080 
13081 static dof_hdr_t *
dtrace_dof_copyin_from_proc(proc_t * p,user_addr_t uarg,int * errp)13082 dtrace_dof_copyin_from_proc(proc_t* p, user_addr_t uarg, int *errp)
13083 {
13084 	dof_hdr_t hdr, *dof;
13085 
13086 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
13087 
13088 	/*
13089 	 * First, we're going to copyin() the sizeof (dof_hdr_t).
13090 	 */
13091 	if (uread(p, &hdr, sizeof(hdr), uarg) != KERN_SUCCESS) {
13092 		dtrace_dof_error(NULL, "failed to copyin DOF header");
13093 		*errp = EFAULT;
13094 		return (NULL);
13095 	}
13096 
13097 	/*
13098 	 * Now we'll allocate the entire DOF and copy it in -- provided
13099 	 * that the length isn't outrageous.
13100 	 */
13101 	if (hdr.dofh_loadsz >= (uint64_t)dtrace_dof_maxsize) {
13102 		dtrace_dof_error(&hdr, "load size exceeds maximum");
13103 		*errp = E2BIG;
13104 		return (NULL);
13105 	}
13106 
13107 	if (hdr.dofh_loadsz < sizeof (hdr)) {
13108 		dtrace_dof_error(&hdr, "invalid load size");
13109 		*errp = EINVAL;
13110 		return (NULL);
13111 	}
13112 
13113 	dof = kmem_alloc_aligned(hdr.dofh_loadsz, 8, KM_SLEEP);
13114 
13115 	if (uread(p, dof, hdr.dofh_loadsz, uarg) != KERN_SUCCESS ||
13116 	    dof->dofh_loadsz != hdr.dofh_loadsz) {
13117 		kmem_free_aligned(dof, hdr.dofh_loadsz);
13118 		*errp = EFAULT;
13119 		return (NULL);
13120 	}
13121 
13122 	return (dof);
13123 }
13124 
13125 static void
dtrace_dof_destroy(dof_hdr_t * dof)13126 dtrace_dof_destroy(dof_hdr_t *dof)
13127 {
13128 	kmem_free_aligned(dof, dof->dofh_loadsz);
13129 }
13130 
13131 static dof_hdr_t *
dtrace_dof_property(const char * name)13132 dtrace_dof_property(const char *name)
13133 {
13134 	unsigned int len = 0;
13135 	dof_hdr_t *dof;
13136 
13137 	if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
13138 		return NULL;
13139 	}
13140 
13141 	if (!PEReadNVRAMProperty(name, NULL, &len)) {
13142 		return NULL;
13143 	}
13144 
13145 	dof = kmem_alloc_aligned(len, 8, KM_SLEEP);
13146 
13147 	if (!PEReadNVRAMProperty(name, dof, &len)) {
13148 		dtrace_dof_destroy(dof);
13149 		dtrace_dof_error(NULL, "unreadable DOF");
13150 		return NULL;
13151 	}
13152 
13153 	if (len < sizeof (dof_hdr_t)) {
13154 		dtrace_dof_destroy(dof);
13155 		dtrace_dof_error(NULL, "truncated header");
13156 		return (NULL);
13157 	}
13158 
13159 	if (len < dof->dofh_loadsz) {
13160 		dtrace_dof_destroy(dof);
13161 		dtrace_dof_error(NULL, "truncated DOF");
13162 		return (NULL);
13163 	}
13164 
13165 	if (len != dof->dofh_loadsz) {
13166 		dtrace_dof_destroy(dof);
13167 		dtrace_dof_error(NULL, "invalid DOF size");
13168 		return (NULL);
13169 	}
13170 
13171 	if (dof->dofh_loadsz >= (uint64_t)dtrace_dof_maxsize) {
13172 		dtrace_dof_destroy(dof);
13173 		dtrace_dof_error(NULL, "oversized DOF");
13174 		return (NULL);
13175 	}
13176 
13177 	return (dof);
13178 }
13179 
13180 /*
13181  * Return the dof_sec_t pointer corresponding to a given section index.  If the
13182  * index is not valid, dtrace_dof_error() is called and NULL is returned.  If
13183  * a type other than DOF_SECT_NONE is specified, the header is checked against
13184  * this type and NULL is returned if the types do not match.
13185  */
13186 static dof_sec_t *
dtrace_dof_sect(dof_hdr_t * dof,uint32_t type,dof_secidx_t i)13187 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
13188 {
13189 	dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
13190 	    ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
13191 
13192 	if (i >= dof->dofh_secnum) {
13193 		dtrace_dof_error(dof, "referenced section index is invalid");
13194 		return (NULL);
13195 	}
13196 
13197 	if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
13198 		dtrace_dof_error(dof, "referenced section is not loadable");
13199 		return (NULL);
13200 	}
13201 
13202 	if (type != DOF_SECT_NONE && type != sec->dofs_type) {
13203 		dtrace_dof_error(dof, "referenced section is the wrong type");
13204 		return (NULL);
13205 	}
13206 
13207 	return (sec);
13208 }
13209 
13210 static dtrace_probedesc_t *
dtrace_dof_probedesc(dof_hdr_t * dof,dof_sec_t * sec,dtrace_probedesc_t * desc)13211 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
13212 {
13213 	dof_probedesc_t *probe;
13214 	dof_sec_t *strtab;
13215 	uintptr_t daddr = (uintptr_t)dof;
13216 	uintptr_t str;
13217 	size_t size;
13218 
13219 	if (sec->dofs_type != DOF_SECT_PROBEDESC) {
13220 		dtrace_dof_error(dof, "invalid probe section");
13221 		return (NULL);
13222 	}
13223 
13224 	if (sec->dofs_align != sizeof (dof_secidx_t)) {
13225 		dtrace_dof_error(dof, "bad alignment in probe description");
13226 		return (NULL);
13227 	}
13228 
13229 	if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
13230 		dtrace_dof_error(dof, "truncated probe description");
13231 		return (NULL);
13232 	}
13233 
13234 	probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
13235 	strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
13236 
13237 	if (strtab == NULL)
13238 		return (NULL);
13239 
13240 	str = daddr + strtab->dofs_offset;
13241 	size = strtab->dofs_size;
13242 
13243 	if (probe->dofp_provider >= strtab->dofs_size) {
13244 		dtrace_dof_error(dof, "corrupt probe provider");
13245 		return (NULL);
13246 	}
13247 
13248 	(void) strncpy(desc->dtpd_provider,
13249 	    (char *)(str + probe->dofp_provider),
13250 	    MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
13251 
13252 	/* APPLE NOTE: Darwin employs size bounded string operation. */
13253 	desc->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
13254 
13255 	if (probe->dofp_mod >= strtab->dofs_size) {
13256 		dtrace_dof_error(dof, "corrupt probe module");
13257 		return (NULL);
13258 	}
13259 
13260 	(void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
13261 	    MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
13262 
13263 	/* APPLE NOTE: Darwin employs size bounded string operation. */
13264 	desc->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
13265 
13266 	if (probe->dofp_func >= strtab->dofs_size) {
13267 		dtrace_dof_error(dof, "corrupt probe function");
13268 		return (NULL);
13269 	}
13270 
13271 	(void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
13272 	    MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
13273 
13274 	/* APPLE NOTE: Darwin employs size bounded string operation. */
13275 	desc->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
13276 
13277 	if (probe->dofp_name >= strtab->dofs_size) {
13278 		dtrace_dof_error(dof, "corrupt probe name");
13279 		return (NULL);
13280 	}
13281 
13282 	(void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
13283 	    MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
13284 
13285 	/* APPLE NOTE: Darwin employs size bounded string operation. */
13286 	desc->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
13287 
13288 	return (desc);
13289 }
13290 
13291 static dtrace_difo_t *
dtrace_dof_difo(dof_hdr_t * dof,dof_sec_t * sec,dtrace_vstate_t * vstate,cred_t * cr)13292 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13293     cred_t *cr)
13294 {
13295 	dtrace_difo_t *dp;
13296 	size_t ttl = 0;
13297 	dof_difohdr_t *dofd;
13298 	uintptr_t daddr = (uintptr_t)dof;
13299 	size_t max_size = dtrace_difo_maxsize;
13300 	uint_t i;
13301 	int l, n;
13302 
13303 
13304 	static const struct {
13305 		int section;
13306 		int bufoffs;
13307 		int lenoffs;
13308 		int entsize;
13309 		int align;
13310 		const char *msg;
13311 	} difo[] = {
13312 		{ DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
13313 		offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
13314 		sizeof (dif_instr_t), "multiple DIF sections" },
13315 
13316 		{ DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
13317 		offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
13318 		sizeof (uint64_t), "multiple integer tables" },
13319 
13320 		{ DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
13321 		offsetof(dtrace_difo_t, dtdo_strlen), 0,
13322 		sizeof (char), "multiple string tables" },
13323 
13324 		{ DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
13325 		offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
13326 		sizeof (uint_t), "multiple variable tables" },
13327 
13328 		{ DOF_SECT_NONE, 0, 0, 0, 0, NULL }
13329 	};
13330 
13331 	if (sec->dofs_type != DOF_SECT_DIFOHDR) {
13332 		dtrace_dof_error(dof, "invalid DIFO header section");
13333 		return (NULL);
13334 	}
13335 
13336 	if (sec->dofs_align != sizeof (dof_secidx_t)) {
13337 		dtrace_dof_error(dof, "bad alignment in DIFO header");
13338 		return (NULL);
13339 	}
13340 
13341 	if (sec->dofs_size < sizeof (dof_difohdr_t) ||
13342 	    sec->dofs_size % sizeof (dof_secidx_t)) {
13343 		dtrace_dof_error(dof, "bad size in DIFO header");
13344 		return (NULL);
13345 	}
13346 
13347 	dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
13348 	n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
13349 
13350 	dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
13351 	dp->dtdo_rtype = dofd->dofd_rtype;
13352 
13353 	for (l = 0; l < n; l++) {
13354 		dof_sec_t *subsec;
13355 		void **bufp;
13356 		uint32_t *lenp;
13357 
13358 		if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
13359 		    dofd->dofd_links[l])) == NULL)
13360 			goto err; /* invalid section link */
13361 
13362 		if (ttl + subsec->dofs_size > max_size) {
13363 			dtrace_dof_error(dof, "exceeds maximum size");
13364 			goto err;
13365 		}
13366 
13367 		ttl += subsec->dofs_size;
13368 
13369 		for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
13370 
13371 			if (subsec->dofs_type != (uint32_t)difo[i].section)
13372 				continue;
13373 
13374 			if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
13375 				dtrace_dof_error(dof, "section not loaded");
13376 				goto err;
13377 			}
13378 
13379 			if (subsec->dofs_align != (uint32_t)difo[i].align) {
13380 				dtrace_dof_error(dof, "bad alignment");
13381 				goto err;
13382 			}
13383 
13384 			bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
13385 			lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
13386 
13387 			if (*bufp != NULL) {
13388 				dtrace_dof_error(dof, difo[i].msg);
13389 				goto err;
13390 			}
13391 
13392 			if ((uint32_t)difo[i].entsize != subsec->dofs_entsize) {
13393 				dtrace_dof_error(dof, "entry size mismatch");
13394 				goto err;
13395 			}
13396 
13397 			if (subsec->dofs_entsize != 0 &&
13398 			    (subsec->dofs_size % subsec->dofs_entsize) != 0) {
13399 				dtrace_dof_error(dof, "corrupt entry size");
13400 				goto err;
13401 			}
13402 
13403 			*lenp = subsec->dofs_size;
13404 			*bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
13405 			bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
13406 			    *bufp, subsec->dofs_size);
13407 
13408 			if (subsec->dofs_entsize != 0)
13409 				*lenp /= subsec->dofs_entsize;
13410 
13411 			break;
13412 		}
13413 
13414 		/*
13415 		 * If we encounter a loadable DIFO sub-section that is not
13416 		 * known to us, assume this is a broken program and fail.
13417 		 */
13418 		if (difo[i].section == DOF_SECT_NONE &&
13419 		    (subsec->dofs_flags & DOF_SECF_LOAD)) {
13420 			dtrace_dof_error(dof, "unrecognized DIFO subsection");
13421 			goto err;
13422 		}
13423 	}
13424 
13425 	if (dp->dtdo_buf == NULL) {
13426 		/*
13427 		 * We can't have a DIF object without DIF text.
13428 		 */
13429 		dtrace_dof_error(dof, "missing DIF text");
13430 		goto err;
13431 	}
13432 
13433 	/*
13434 	 * Before we validate the DIF object, run through the variable table
13435 	 * looking for the strings -- if any of their size are under, we'll set
13436 	 * their size to be the system-wide default string size.  Note that
13437 	 * this should _not_ happen if the "strsize" option has been set --
13438 	 * in this case, the compiler should have set the size to reflect the
13439 	 * setting of the option.
13440 	 */
13441 	for (i = 0; i < dp->dtdo_varlen; i++) {
13442 		dtrace_difv_t *v = &dp->dtdo_vartab[i];
13443 		dtrace_diftype_t *t = &v->dtdv_type;
13444 
13445 		if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
13446 			continue;
13447 
13448 		if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
13449 			t->dtdt_size = dtrace_strsize_default;
13450 	}
13451 
13452 	if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
13453 		goto err;
13454 
13455 	dtrace_difo_init(dp, vstate);
13456 	return (dp);
13457 
13458 err:
13459 	kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
13460 	kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
13461 	kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
13462 	kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
13463 
13464 	kmem_free(dp, sizeof (dtrace_difo_t));
13465 	return (NULL);
13466 }
13467 
13468 static dtrace_predicate_t *
dtrace_dof_predicate(dof_hdr_t * dof,dof_sec_t * sec,dtrace_vstate_t * vstate,cred_t * cr)13469 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13470     cred_t *cr)
13471 {
13472 	dtrace_difo_t *dp;
13473 
13474 	if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
13475 		return (NULL);
13476 
13477 	return (dtrace_predicate_create(dp));
13478 }
13479 
13480 static dtrace_actdesc_t *
dtrace_dof_actdesc(dof_hdr_t * dof,dof_sec_t * sec,dtrace_vstate_t * vstate,cred_t * cr)13481 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13482     cred_t *cr)
13483 {
13484 	dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
13485 	dof_actdesc_t *desc;
13486 	dof_sec_t *difosec;
13487 	size_t offs;
13488 	uintptr_t daddr = (uintptr_t)dof;
13489 	uint64_t arg;
13490 	dtrace_actkind_t kind;
13491 
13492 	if (sec->dofs_type != DOF_SECT_ACTDESC) {
13493 		dtrace_dof_error(dof, "invalid action section");
13494 		return (NULL);
13495 	}
13496 
13497 	if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
13498 		dtrace_dof_error(dof, "truncated action description");
13499 		return (NULL);
13500 	}
13501 
13502 	if (sec->dofs_align != sizeof (uint64_t)) {
13503 		dtrace_dof_error(dof, "bad alignment in action description");
13504 		return (NULL);
13505 	}
13506 
13507 	if (sec->dofs_size < sec->dofs_entsize) {
13508 		dtrace_dof_error(dof, "section entry size exceeds total size");
13509 		return (NULL);
13510 	}
13511 
13512 	if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
13513 		dtrace_dof_error(dof, "bad entry size in action description");
13514 		return (NULL);
13515 	}
13516 
13517 	if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
13518 		dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
13519 		return (NULL);
13520 	}
13521 
13522 	for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
13523 		desc = (dof_actdesc_t *)(daddr +
13524 		    (uintptr_t)sec->dofs_offset + offs);
13525 		kind = (dtrace_actkind_t)desc->dofa_kind;
13526 
13527 		if ((DTRACEACT_ISPRINTFLIKE(kind) &&
13528 		    (kind != DTRACEACT_PRINTA || desc->dofa_strtab != DOF_SECIDX_NONE)) ||
13529 		    (kind == DTRACEACT_DIFEXPR && desc->dofa_strtab != DOF_SECIDX_NONE))
13530 		{
13531 			dof_sec_t *strtab;
13532 			char *str, *fmt;
13533 			uint64_t i;
13534 
13535 			/*
13536 			 * The argument to these actions is an index into the
13537 			 * DOF string table.  For printf()-like actions, this
13538 			 * is the format string.  For print(), this is the
13539 			 * CTF type of the expression result.
13540 			 */
13541 			if ((strtab = dtrace_dof_sect(dof,
13542 			    DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
13543 				goto err;
13544 
13545 			str = (char *)((uintptr_t)dof +
13546 			    (uintptr_t)strtab->dofs_offset);
13547 
13548 			for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
13549 				if (str[i] == '\0')
13550 					break;
13551 			}
13552 
13553 			if (i >= strtab->dofs_size) {
13554 				dtrace_dof_error(dof, "bogus format string");
13555 				goto err;
13556 			}
13557 
13558 			if (i == desc->dofa_arg) {
13559 				dtrace_dof_error(dof, "empty format string");
13560 				goto err;
13561 			}
13562 
13563 			i -= desc->dofa_arg;
13564 			fmt = kmem_alloc(i + 1, KM_SLEEP);
13565 			bcopy(&str[desc->dofa_arg], fmt, i + 1);
13566 			arg = (uint64_t)(uintptr_t)fmt;
13567 		} else {
13568 			if (kind == DTRACEACT_PRINTA) {
13569 				ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
13570 				arg = 0;
13571 			} else {
13572 				arg = desc->dofa_arg;
13573 			}
13574 		}
13575 
13576 		act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
13577 		    desc->dofa_uarg, arg);
13578 
13579 		if (last != NULL) {
13580 			last->dtad_next = act;
13581 		} else {
13582 			first = act;
13583 		}
13584 
13585 		last = act;
13586 
13587 		if (desc->dofa_difo == DOF_SECIDX_NONE)
13588 			continue;
13589 
13590 		if ((difosec = dtrace_dof_sect(dof,
13591 		    DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
13592 			goto err;
13593 
13594 		act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
13595 
13596 		if (act->dtad_difo == NULL)
13597 			goto err;
13598 	}
13599 
13600 	ASSERT(first != NULL);
13601 	return (first);
13602 
13603 err:
13604 	for (act = first; act != NULL; act = next) {
13605 		next = act->dtad_next;
13606 		dtrace_actdesc_release(act, vstate);
13607 	}
13608 
13609 	return (NULL);
13610 }
13611 
13612 static dtrace_ecbdesc_t *
dtrace_dof_ecbdesc(dof_hdr_t * dof,dof_sec_t * sec,dtrace_vstate_t * vstate,cred_t * cr)13613 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13614     cred_t *cr)
13615 {
13616 	dtrace_ecbdesc_t *ep;
13617 	dof_ecbdesc_t *ecb;
13618 	dtrace_probedesc_t *desc;
13619 	dtrace_predicate_t *pred = NULL;
13620 
13621 	if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
13622 		dtrace_dof_error(dof, "truncated ECB description");
13623 		return (NULL);
13624 	}
13625 
13626 	if (sec->dofs_align != sizeof (uint64_t)) {
13627 		dtrace_dof_error(dof, "bad alignment in ECB description");
13628 		return (NULL);
13629 	}
13630 
13631 	ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
13632 	sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
13633 
13634 	if (sec == NULL)
13635 		return (NULL);
13636 
13637 	ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
13638 	ep->dted_uarg = ecb->dofe_uarg;
13639 	desc = &ep->dted_probe;
13640 
13641 	if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
13642 		goto err;
13643 
13644 	if (ecb->dofe_pred != DOF_SECIDX_NONE) {
13645 		if ((sec = dtrace_dof_sect(dof,
13646 		    DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
13647 			goto err;
13648 
13649 		if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
13650 			goto err;
13651 
13652 		ep->dted_pred.dtpdd_predicate = pred;
13653 	}
13654 
13655 	if (ecb->dofe_actions != DOF_SECIDX_NONE) {
13656 		if ((sec = dtrace_dof_sect(dof,
13657 		    DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
13658 			goto err;
13659 
13660 		ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
13661 
13662 		if (ep->dted_action == NULL)
13663 			goto err;
13664 	}
13665 
13666 	return (ep);
13667 
13668 err:
13669 	if (pred != NULL)
13670 		dtrace_predicate_release(pred, vstate);
13671 	kmem_free(ep, sizeof (dtrace_ecbdesc_t));
13672 	return (NULL);
13673 }
13674 
13675 /*
13676  * APPLE NOTE: dyld handles dof relocation.
13677  * Darwin does not need dtrace_dof_relocate()
13678  */
13679 
13680 /*
13681  * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
13682  * header:  it should be at the front of a memory region that is at least
13683  * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
13684  * size.  It need not be validated in any other way.
13685  */
13686 static int
dtrace_dof_slurp(dof_hdr_t * dof,dtrace_vstate_t * vstate,cred_t * cr,dtrace_enabling_t ** enabp,uint64_t ubase,int noprobes)13687 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
13688     dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
13689 {
13690 #pragma unused(ubase) /* __APPLE__ */
13691 	uint64_t len = dof->dofh_loadsz, seclen;
13692 	uintptr_t daddr = (uintptr_t)dof;
13693 	dtrace_ecbdesc_t *ep;
13694 	dtrace_enabling_t *enab;
13695 	uint_t i;
13696 
13697 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
13698 	ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
13699 
13700 	/*
13701 	 * Check the DOF header identification bytes.  In addition to checking
13702 	 * valid settings, we also verify that unused bits/bytes are zeroed so
13703 	 * we can use them later without fear of regressing existing binaries.
13704 	 */
13705 	if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
13706 	    DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
13707 		dtrace_dof_error(dof, "DOF magic string mismatch");
13708 		return (-1);
13709 	}
13710 
13711 	if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
13712 	    dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
13713 		dtrace_dof_error(dof, "DOF has invalid data model");
13714 		return (-1);
13715 	}
13716 
13717 	if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
13718 		dtrace_dof_error(dof, "DOF encoding mismatch");
13719 		return (-1);
13720 	}
13721 
13722 	/*
13723 	 * APPLE NOTE: Darwin only supports DOF_VERSION_3 for now.
13724 	 */
13725 	if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_3) {
13726 		dtrace_dof_error(dof, "DOF version mismatch");
13727 		return (-1);
13728 	}
13729 
13730 	if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
13731 		dtrace_dof_error(dof, "DOF uses unsupported instruction set");
13732 		return (-1);
13733 	}
13734 
13735 	if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
13736 		dtrace_dof_error(dof, "DOF uses too many integer registers");
13737 		return (-1);
13738 	}
13739 
13740 	if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
13741 		dtrace_dof_error(dof, "DOF uses too many tuple registers");
13742 		return (-1);
13743 	}
13744 
13745 	for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
13746 		if (dof->dofh_ident[i] != 0) {
13747 			dtrace_dof_error(dof, "DOF has invalid ident byte set");
13748 			return (-1);
13749 		}
13750 	}
13751 
13752 	if (dof->dofh_flags & ~DOF_FL_VALID) {
13753 		dtrace_dof_error(dof, "DOF has invalid flag bits set");
13754 		return (-1);
13755 	}
13756 
13757 	if (dof->dofh_secsize < sizeof(dof_sec_t)) {
13758 		dtrace_dof_error(dof, "invalid section header size");
13759 		return (-1);
13760 	}
13761 
13762 	/*
13763 	 * Check that the section headers don't exceed the amount of DOF
13764 	 * data.  Note that we cast the section size and number of sections
13765 	 * to uint64_t's to prevent possible overflow in the multiplication.
13766 	 */
13767 	seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
13768 
13769 	if (dof->dofh_secoff > len || seclen > len ||
13770 	    dof->dofh_secoff + seclen > len) {
13771 		dtrace_dof_error(dof, "truncated section headers");
13772 		return (-1);
13773 	}
13774 
13775 	if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
13776 		dtrace_dof_error(dof, "misaligned section headers");
13777 		return (-1);
13778 	}
13779 
13780 	if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
13781 		dtrace_dof_error(dof, "misaligned section size");
13782 		return (-1);
13783 	}
13784 
13785 	/*
13786 	 * Take an initial pass through the section headers to be sure that
13787 	 * the headers don't have stray offsets.  If the 'noprobes' flag is
13788 	 * set, do not permit sections relating to providers, probes, or args.
13789 	 */
13790 	for (i = 0; i < dof->dofh_secnum; i++) {
13791 		dof_sec_t *sec = (dof_sec_t *)(daddr +
13792 		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13793 
13794 		if (noprobes) {
13795 			switch (sec->dofs_type) {
13796 			case DOF_SECT_PROVIDER:
13797 			case DOF_SECT_PROBES:
13798 			case DOF_SECT_PRARGS:
13799 			case DOF_SECT_PROFFS:
13800 				dtrace_dof_error(dof, "illegal sections "
13801 				    "for enabling");
13802 				return (-1);
13803 			}
13804 		}
13805 
13806 		if (sec->dofs_align & (sec->dofs_align - 1)) {
13807 			dtrace_dof_error(dof, "bad section alignment");
13808 			return (-1);
13809 		}
13810 
13811 		if (sec->dofs_offset & (sec->dofs_align - 1)) {
13812 			dtrace_dof_error(dof, "misaligned section");
13813 			return (-1);
13814 		}
13815 
13816 		if (sec->dofs_flags & DOF_SECF_LOAD) {
13817 			len = dof->dofh_loadsz;
13818 		} else {
13819 			len = dof->dofh_filesz;
13820 		}
13821 
13822 		if (sec->dofs_offset > len || sec->dofs_size > len ||
13823 		    sec->dofs_offset + sec->dofs_size > len) {
13824 			dtrace_dof_error(dof, "corrupt section header");
13825 			return (-1);
13826 		}
13827 
13828 		if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
13829 		    sec->dofs_offset + sec->dofs_size - 1) != '\0') {
13830 			dtrace_dof_error(dof, "non-terminating string table");
13831 			return (-1);
13832 		}
13833 	}
13834 
13835 	/*
13836 	 * APPLE NOTE: We have no further relocation to perform.
13837 	 * All dof values are relative offsets.
13838 	 */
13839 
13840 	if ((enab = *enabp) == NULL)
13841 		enab = *enabp = dtrace_enabling_create(vstate);
13842 
13843 	for (i = 0; i < dof->dofh_secnum; i++) {
13844 		dof_sec_t *sec = (dof_sec_t *)(daddr +
13845 		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13846 
13847 		if (sec->dofs_type != DOF_SECT_ECBDESC)
13848 			continue;
13849 
13850 		/*
13851 		 * APPLE NOTE: Defend against gcc 4.0 botch on x86.
13852 		 * not all paths out of inlined dtrace_dof_ecbdesc
13853 		 * are checked for the NULL return value.
13854 		 * Check for NULL explicitly here.
13855 		*/
13856 		ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr);
13857 		if (ep == NULL) {
13858 			dtrace_enabling_destroy(enab);
13859 			*enabp = NULL;
13860 			return (-1);
13861 		}
13862 
13863 		dtrace_enabling_add(enab, ep);
13864 	}
13865 
13866 	return (0);
13867 }
13868 
13869 /*
13870  * Process DOF for any options.  This routine assumes that the DOF has been
13871  * at least processed by dtrace_dof_slurp().
13872  */
13873 static int
dtrace_dof_options(dof_hdr_t * dof,dtrace_state_t * state)13874 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
13875 {
13876 	uint_t i;
13877 	int rval;
13878 	uint32_t entsize;
13879 	size_t offs;
13880 	dof_optdesc_t *desc;
13881 
13882 	for (i = 0; i < dof->dofh_secnum; i++) {
13883 		dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
13884 		    (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
13885 
13886 		if (sec->dofs_type != DOF_SECT_OPTDESC)
13887 			continue;
13888 
13889 		if (sec->dofs_align != sizeof (uint64_t)) {
13890 			dtrace_dof_error(dof, "bad alignment in "
13891 			    "option description");
13892 			return (EINVAL);
13893 		}
13894 
13895 		if ((entsize = sec->dofs_entsize) == 0) {
13896 			dtrace_dof_error(dof, "zeroed option entry size");
13897 			return (EINVAL);
13898 		}
13899 
13900 		if (entsize < sizeof (dof_optdesc_t)) {
13901 			dtrace_dof_error(dof, "bad option entry size");
13902 			return (EINVAL);
13903 		}
13904 
13905 		for (offs = 0; offs < sec->dofs_size; offs += entsize) {
13906 			desc = (dof_optdesc_t *)((uintptr_t)dof +
13907 			    (uintptr_t)sec->dofs_offset + offs);
13908 
13909 			if (desc->dofo_strtab != DOF_SECIDX_NONE) {
13910 				dtrace_dof_error(dof, "non-zero option string");
13911 				return (EINVAL);
13912 			}
13913 
13914 			if (desc->dofo_value == (uint64_t)DTRACEOPT_UNSET) {
13915 				dtrace_dof_error(dof, "unset option");
13916 				return (EINVAL);
13917 			}
13918 
13919 			if ((rval = dtrace_state_option(state,
13920 			    desc->dofo_option, desc->dofo_value)) != 0) {
13921 				dtrace_dof_error(dof, "rejected option");
13922 				return (rval);
13923 			}
13924 		}
13925 	}
13926 
13927 	return (0);
13928 }
13929 
13930 /*
13931  * DTrace Consumer State Functions
13932  */
13933 static int
dtrace_dstate_init(dtrace_dstate_t * dstate,size_t size)13934 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
13935 {
13936 	size_t hashsize, maxper, min_size, chunksize = dstate->dtds_chunksize;
13937 	void *base;
13938 	uintptr_t limit;
13939 	dtrace_dynvar_t *dvar, *next, *start;
13940 
13941 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
13942 	ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
13943 
13944 	bzero(dstate, sizeof (dtrace_dstate_t));
13945 
13946 	if ((dstate->dtds_chunksize = chunksize) == 0)
13947 		dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
13948 
13949 	VERIFY(dstate->dtds_chunksize < (LONG_MAX - sizeof (dtrace_dynhash_t)));
13950 
13951 	if (size < (min_size = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
13952 		size = min_size;
13953 
13954 	if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
13955 		return (ENOMEM);
13956 
13957 	dstate->dtds_size = size;
13958 	dstate->dtds_base = base;
13959 	dstate->dtds_percpu = zalloc_percpu(dtrace_state_pcpu_zone, Z_WAITOK | Z_ZERO);
13960 
13961 	hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
13962 
13963 	if (hashsize != 1 && (hashsize & 1))
13964 		hashsize--;
13965 
13966 	dstate->dtds_hashsize = hashsize;
13967 	dstate->dtds_hash = dstate->dtds_base;
13968 
13969 	/*
13970 	 * Set all of our hash buckets to point to the single sink, and (if
13971 	 * it hasn't already been set), set the sink's hash value to be the
13972 	 * sink sentinel value.  The sink is needed for dynamic variable
13973 	 * lookups to know that they have iterated over an entire, valid hash
13974 	 * chain.
13975 	 */
13976 	for (size_t i = 0; i < hashsize; i++)
13977 		dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
13978 
13979 	if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
13980 		dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
13981 
13982 	/*
13983 	 * Determine number of active CPUs.  Divide free list evenly among
13984 	 * active CPUs.
13985 	 */
13986 	start = (dtrace_dynvar_t *)
13987 	    ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
13988 	limit = (uintptr_t)base + size;
13989 
13990 	VERIFY((uintptr_t)start < limit);
13991 	VERIFY((uintptr_t)start >= (uintptr_t)base);
13992 
13993 	maxper = (limit - (uintptr_t)start) / (int)NCPU;
13994 	maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
13995 
13996 	zpercpu_foreach_cpu(i) {
13997 		dtrace_dstate_percpu_t *dcpu = zpercpu_get_cpu(dstate->dtds_percpu, i);
13998 
13999 		dcpu->dtdsc_free = dvar = start;
14000 
14001 		/*
14002 		 * If we don't even have enough chunks to make it once through
14003 		 * NCPUs, we're just going to allocate everything to the first
14004 		 * CPU.  And if we're on the last CPU, we're going to allocate
14005 		 * whatever is left over.  In either case, we set the limit to
14006 		 * be the limit of the dynamic variable space.
14007 		 */
14008 		if (maxper == 0 || i == NCPU - 1) {
14009 			limit = (uintptr_t)base + size;
14010 			start = NULL;
14011 		} else {
14012 			limit = (uintptr_t)start + maxper;
14013 			start = (dtrace_dynvar_t *)limit;
14014 		}
14015 
14016 		VERIFY(limit <= (uintptr_t)base + size);
14017 
14018 		for (;;) {
14019 			next = (dtrace_dynvar_t *)((uintptr_t)dvar +
14020 			    dstate->dtds_chunksize);
14021 
14022 			if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
14023 				break;
14024 
14025 			VERIFY((uintptr_t)dvar >= (uintptr_t)base &&
14026 			    (uintptr_t)dvar <= (uintptr_t)base + size);
14027 			dvar->dtdv_next = next;
14028 			dvar = next;
14029 		}
14030 
14031 		if (maxper == 0)
14032 			break;
14033 	}
14034 
14035 	return (0);
14036 }
14037 
14038 static void
dtrace_dstate_fini(dtrace_dstate_t * dstate)14039 dtrace_dstate_fini(dtrace_dstate_t *dstate)
14040 {
14041 	LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
14042 
14043 	if (dstate->dtds_base == NULL)
14044 		return;
14045 
14046 	kmem_free(dstate->dtds_base, dstate->dtds_size);
14047 	zfree_percpu(dtrace_state_pcpu_zone, dstate->dtds_percpu);
14048 }
14049 
14050 static void
dtrace_vstate_fini(dtrace_vstate_t * vstate)14051 dtrace_vstate_fini(dtrace_vstate_t *vstate)
14052 {
14053 	/*
14054 	 * Logical XOR, where are you?
14055 	 */
14056 	ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
14057 
14058 	if (vstate->dtvs_nglobals > 0) {
14059 		kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
14060 		    sizeof (dtrace_statvar_t *));
14061 	}
14062 
14063 	if (vstate->dtvs_ntlocals > 0) {
14064 		kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
14065 		    sizeof (dtrace_difv_t));
14066 	}
14067 
14068 	ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
14069 
14070 	if (vstate->dtvs_nlocals > 0) {
14071 		kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
14072 		    sizeof (dtrace_statvar_t *));
14073 	}
14074 }
14075 
14076 static void
dtrace_state_clean(dtrace_state_t * state)14077 dtrace_state_clean(dtrace_state_t *state)
14078 {
14079 	if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
14080 		return;
14081 
14082 	dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
14083 	dtrace_speculation_clean(state);
14084 }
14085 
14086 static void
dtrace_state_deadman(dtrace_state_t * state)14087 dtrace_state_deadman(dtrace_state_t *state)
14088 {
14089 	hrtime_t now;
14090 
14091 	dtrace_sync();
14092 
14093 	now = dtrace_gethrtime();
14094 
14095 	if (state != dtrace_anon.dta_state &&
14096 	    now - state->dts_laststatus >= dtrace_deadman_user)
14097 		return;
14098 
14099 	/*
14100 	 * We must be sure that dts_alive never appears to be less than the
14101 	 * value upon entry to dtrace_state_deadman(), and because we lack a
14102 	 * dtrace_cas64(), we cannot store to it atomically.  We thus instead
14103 	 * store INT64_MAX to it, followed by a memory barrier, followed by
14104 	 * the new value.  This assures that dts_alive never appears to be
14105 	 * less than its true value, regardless of the order in which the
14106 	 * stores to the underlying storage are issued.
14107 	 */
14108 	state->dts_alive = INT64_MAX;
14109 	dtrace_membar_producer();
14110 	state->dts_alive = now;
14111 }
14112 
14113 static int
dtrace_state_create(dev_t * devp,cred_t * cr,dtrace_state_t ** new_state)14114 dtrace_state_create(dev_t *devp, cred_t *cr, dtrace_state_t **new_state)
14115 {
14116 	minor_t minor;
14117 	major_t major;
14118 	char c[30];
14119 	dtrace_state_t *state;
14120 	dtrace_optval_t *opt;
14121 	int bufsize = (int)NCPU * sizeof (dtrace_buffer_t), i;
14122 	unsigned int cpu_it;
14123 
14124 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
14125 	LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
14126 
14127 	/* Cause restart */
14128 	*new_state = NULL;
14129 
14130 	if (devp != NULL) {
14131 		minor = getminor(*devp);
14132 	}
14133 	else {
14134 		minor = DTRACE_NCLIENTS - 1;
14135 	}
14136 
14137 	state = dtrace_state_allocate(minor);
14138 	if (NULL == state) {
14139 		printf("dtrace_open: couldn't acquire minor number %d. This usually means that too many DTrace clients are in use at the moment", minor);
14140 		return (ERESTART);	/* can't reacquire */
14141 	}
14142 
14143 	state->dts_epid = DTRACE_EPIDNONE + 1;
14144 
14145 	(void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor);
14146 	state->dts_aggid_arena = vmem_create(c, (void *)1, INT32_MAX, 1,
14147 	    NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
14148 
14149 	if (devp != NULL) {
14150 		major = getemajor(*devp);
14151 	} else {
14152 		major = ddi_driver_major(dtrace_devi);
14153 	}
14154 
14155 	state->dts_dev = makedev(major, minor);
14156 
14157 	if (devp != NULL)
14158 		*devp = state->dts_dev;
14159 
14160 	/*
14161 	 * We allocate NCPU buffers.  On the one hand, this can be quite
14162 	 * a bit of memory per instance (nearly 36K on a Starcat).  On the
14163 	 * other hand, it saves an additional memory reference in the probe
14164 	 * path.
14165 	 */
14166 	state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
14167 	state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
14168 	state->dts_buf_over_limit = 0;
14169 
14170 	/*
14171          * Allocate and initialise the per-process per-CPU random state.
14172 	 * SI_SUB_RANDOM < SI_SUB_DTRACE_ANON therefore entropy device is
14173          * assumed to be seeded at this point (if from Fortuna seed file).
14174 	 */
14175 	state->dts_rstate = kmem_zalloc(NCPU * sizeof(uint64_t*), KM_SLEEP);
14176 	state->dts_rstate[0] = kmem_zalloc(2 * sizeof(uint64_t), KM_SLEEP);
14177 	(void) read_random(state->dts_rstate[0], 2 * sizeof(uint64_t));
14178 	for (cpu_it = 1; cpu_it < NCPU; cpu_it++) {
14179 		state->dts_rstate[cpu_it] = kmem_zalloc(2 * sizeof(uint64_t), KM_SLEEP);
14180 		/*
14181 		 * Each CPU is assigned a 2^64 period, non-overlapping
14182 		 * subsequence.
14183 		 */
14184 		dtrace_xoroshiro128_plus_jump(state->dts_rstate[cpu_it-1],
14185 		    state->dts_rstate[cpu_it]);
14186 	}
14187 
14188 	state->dts_cleaner = CYCLIC_NONE;
14189 	state->dts_deadman = CYCLIC_NONE;
14190 	state->dts_vstate.dtvs_state = state;
14191 
14192 	for (i = 0; i < DTRACEOPT_MAX; i++)
14193 		state->dts_options[i] = DTRACEOPT_UNSET;
14194 
14195 	/*
14196 	 * Set the default options.
14197 	 */
14198 	opt = state->dts_options;
14199 	opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
14200 	opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
14201 	opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
14202 	opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
14203 	opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
14204 	opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
14205 	opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
14206 	opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
14207 	opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
14208 	opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
14209 	opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
14210 	opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
14211 	opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
14212 	opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
14213 	opt[DTRACEOPT_BUFLIMIT] = dtrace_buflimit_default;
14214 
14215 	/*
14216 	 * Depending on the user credentials, we set flag bits which alter probe
14217 	 * visibility or the amount of destructiveness allowed.  In the case of
14218 	 * actual anonymous tracing, or the possession of all privileges, all of
14219 	 * the normal checks are bypassed.
14220 	 */
14221 #if defined(__APPLE__)
14222 	if (cr != NULL) {
14223 		kauth_cred_ref(cr);
14224 		state->dts_cred.dcr_cred = cr;
14225 	}
14226 	if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
14227 		if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
14228 			/*
14229 			 * Allow only proc credentials when DTrace is
14230 			 * restricted by the current security policy
14231 			 */
14232 			state->dts_cred.dcr_visible = DTRACE_CRV_ALLPROC;
14233 			state->dts_cred.dcr_action = DTRACE_CRA_PROC | DTRACE_CRA_PROC_CONTROL | DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14234 		}
14235 		else {
14236 			state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
14237 			state->dts_cred.dcr_action = DTRACE_CRA_ALL;
14238 		}
14239 	}
14240 
14241 #else
14242 	if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
14243 		state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
14244 		state->dts_cred.dcr_action = DTRACE_CRA_ALL;
14245 	}
14246 	else {
14247 		/*
14248 		 * Set up the credentials for this instantiation.  We take a
14249 		 * hold on the credential to prevent it from disappearing on
14250 		 * us; this in turn prevents the zone_t referenced by this
14251 		 * credential from disappearing.  This means that we can
14252 		 * examine the credential and the zone from probe context.
14253 		 */
14254 		crhold(cr);
14255 		state->dts_cred.dcr_cred = cr;
14256 
14257 		/*
14258 		 * CRA_PROC means "we have *some* privilege for dtrace" and
14259 		 * unlocks the use of variables like pid, zonename, etc.
14260 		 */
14261 		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
14262 		    PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
14263 			state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
14264 		}
14265 
14266 		/*
14267 		 * dtrace_user allows use of syscall and profile providers.
14268 		 * If the user also has proc_owner and/or proc_zone, we
14269 		 * extend the scope to include additional visibility and
14270 		 * destructive power.
14271 		 */
14272 		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
14273 			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
14274 				state->dts_cred.dcr_visible |=
14275 				    DTRACE_CRV_ALLPROC;
14276 
14277 				state->dts_cred.dcr_action |=
14278 				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14279 			}
14280 
14281 			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
14282 				state->dts_cred.dcr_visible |=
14283 				    DTRACE_CRV_ALLZONE;
14284 
14285 				state->dts_cred.dcr_action |=
14286 				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
14287 			}
14288 
14289 			/*
14290 			 * If we have all privs in whatever zone this is,
14291 			 * we can do destructive things to processes which
14292 			 * have altered credentials.
14293 			 *
14294 			 * APPLE NOTE: Darwin doesn't do zones.
14295 			 * Behave as if zone always has destructive privs.
14296 			 */
14297 
14298 			state->dts_cred.dcr_action |=
14299 				DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
14300 		}
14301 
14302 		/*
14303 		 * Holding the dtrace_kernel privilege also implies that
14304 		 * the user has the dtrace_user privilege from a visibility
14305 		 * perspective.  But without further privileges, some
14306 		 * destructive actions are not available.
14307 		 */
14308 		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
14309 			/*
14310 			 * Make all probes in all zones visible.  However,
14311 			 * this doesn't mean that all actions become available
14312 			 * to all zones.
14313 			 */
14314 			state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
14315 			    DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
14316 
14317 			state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
14318 			    DTRACE_CRA_PROC;
14319 			/*
14320 			 * Holding proc_owner means that destructive actions
14321 			 * for *this* zone are allowed.
14322 			 */
14323 			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
14324 				state->dts_cred.dcr_action |=
14325 				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14326 
14327 			/*
14328 			 * Holding proc_zone means that destructive actions
14329 			 * for this user/group ID in all zones is allowed.
14330 			 */
14331 			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
14332 				state->dts_cred.dcr_action |=
14333 				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
14334 
14335 			/*
14336 			 * If we have all privs in whatever zone this is,
14337 			 * we can do destructive things to processes which
14338 			 * have altered credentials.
14339 			 *
14340 			 * APPLE NOTE: Darwin doesn't do zones.
14341 			 * Behave as if zone always has destructive privs.
14342 			 */
14343 			state->dts_cred.dcr_action |=
14344 				DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
14345 		}
14346 
14347 		/*
14348 		 * Holding the dtrace_proc privilege gives control over fasttrap
14349 		 * and pid providers.  We need to grant wider destructive
14350 		 * privileges in the event that the user has proc_owner and/or
14351 		 * proc_zone.
14352 		 */
14353 		if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
14354 			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
14355 				state->dts_cred.dcr_action |=
14356 				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14357 
14358 			if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
14359 				state->dts_cred.dcr_action |=
14360 				    DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
14361 		}
14362 	}
14363 #endif
14364 
14365 	*new_state = state;
14366 	return(0);  /* Success */
14367 }
14368 
14369 static int
dtrace_state_buffer(dtrace_state_t * state,dtrace_buffer_t * buf,int which)14370 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
14371 {
14372 	dtrace_optval_t *opt = state->dts_options, size;
14373 	processorid_t cpu = 0;
14374 	size_t limit = buf->dtb_size;
14375 	int flags = 0, rval;
14376 
14377 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
14378 	LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
14379 	ASSERT(which < DTRACEOPT_MAX);
14380 	ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
14381 	    (state == dtrace_anon.dta_state &&
14382 	    state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
14383 
14384 	if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
14385 		return (0);
14386 
14387 	if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
14388 		cpu = opt[DTRACEOPT_CPU];
14389 
14390 	if (which == DTRACEOPT_SPECSIZE)
14391 		flags |= DTRACEBUF_NOSWITCH;
14392 
14393 	if (which == DTRACEOPT_BUFSIZE) {
14394 		if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
14395 			flags |= DTRACEBUF_RING;
14396 
14397 		if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
14398 			flags |= DTRACEBUF_FILL;
14399 
14400 		if (state != dtrace_anon.dta_state ||
14401 		    state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
14402 			flags |= DTRACEBUF_INACTIVE;
14403 	}
14404 
14405 	for (size = opt[which]; (size_t)size >= sizeof (uint64_t); size >>= 1) {
14406 		/*
14407 		 * The size must be 8-byte aligned.  If the size is not 8-byte
14408 		 * aligned, drop it down by the difference.
14409 		 */
14410 		if (size & (sizeof (uint64_t) - 1))
14411 			size -= size & (sizeof (uint64_t) - 1);
14412 
14413 		if (size < state->dts_reserve) {
14414 			/*
14415 			 * Buffers always must be large enough to accommodate
14416 			 * their prereserved space.  We return E2BIG instead
14417 			 * of ENOMEM in this case to allow for user-level
14418 			 * software to differentiate the cases.
14419 			 */
14420 			return (E2BIG);
14421 		}
14422 		limit = opt[DTRACEOPT_BUFLIMIT] * size / 100;
14423 		rval = dtrace_buffer_alloc(buf, limit, size, flags, cpu);
14424 
14425 		if (rval != ENOMEM) {
14426 			opt[which] = size;
14427 			return (rval);
14428 		}
14429 
14430 		if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
14431 			return (rval);
14432 	}
14433 
14434 	return (ENOMEM);
14435 }
14436 
14437 static int
dtrace_state_buffers(dtrace_state_t * state)14438 dtrace_state_buffers(dtrace_state_t *state)
14439 {
14440 	dtrace_speculation_t *spec = state->dts_speculations;
14441 	int rval, i;
14442 
14443 	if ((rval = dtrace_state_buffer(state, state->dts_buffer,
14444 	    DTRACEOPT_BUFSIZE)) != 0)
14445 		return (rval);
14446 
14447 	if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
14448 	    DTRACEOPT_AGGSIZE)) != 0)
14449 		return (rval);
14450 
14451 	for (i = 0; i < state->dts_nspeculations; i++) {
14452 		if ((rval = dtrace_state_buffer(state,
14453 		    spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
14454 			return (rval);
14455 	}
14456 
14457 	return (0);
14458 }
14459 
14460 static void
dtrace_state_prereserve(dtrace_state_t * state)14461 dtrace_state_prereserve(dtrace_state_t *state)
14462 {
14463 	dtrace_ecb_t *ecb;
14464 	dtrace_probe_t *probe;
14465 
14466 	state->dts_reserve = 0;
14467 
14468 	if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
14469 		return;
14470 
14471 	/*
14472 	 * If our buffer policy is a "fill" buffer policy, we need to set the
14473 	 * prereserved space to be the space required by the END probes.
14474 	 */
14475 	probe = dtrace_probes[dtrace_probeid_end - 1];
14476 	ASSERT(probe != NULL);
14477 
14478 	for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
14479 		if (ecb->dte_state != state)
14480 			continue;
14481 
14482 		state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
14483 	}
14484 }
14485 
14486 static int
dtrace_state_go(dtrace_state_t * state,processorid_t * cpu)14487 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
14488 {
14489 	dtrace_optval_t *opt = state->dts_options, sz, nspec;
14490 	dtrace_speculation_t *spec;
14491 	dtrace_buffer_t *buf;
14492 	cyc_handler_t hdlr;
14493 	cyc_time_t when;
14494 	int rval = 0, i, bufsize = (int)NCPU * sizeof (dtrace_buffer_t);
14495 	dtrace_icookie_t cookie;
14496 
14497 	lck_mtx_lock(&cpu_lock);
14498 	lck_mtx_lock(&dtrace_lock);
14499 
14500 	if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
14501 		rval = EBUSY;
14502 		goto out;
14503 	}
14504 
14505 	/*
14506 	 * Before we can perform any checks, we must prime all of the
14507 	 * retained enablings that correspond to this state.
14508 	 */
14509 	dtrace_enabling_prime(state);
14510 
14511 	if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
14512 		rval = EACCES;
14513 		goto out;
14514 	}
14515 
14516 	dtrace_state_prereserve(state);
14517 
14518 	/*
14519 	 * Now we want to do is try to allocate our speculations.
14520 	 * We do not automatically resize the number of speculations; if
14521 	 * this fails, we will fail the operation.
14522 	 */
14523 	nspec = opt[DTRACEOPT_NSPEC];
14524 	ASSERT(nspec != DTRACEOPT_UNSET);
14525 
14526 	if (nspec > INT_MAX) {
14527 		rval = ENOMEM;
14528 		goto out;
14529 	}
14530 
14531 	spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
14532 
14533 	if (spec == NULL) {
14534 		rval = ENOMEM;
14535 		goto out;
14536 	}
14537 
14538 	state->dts_speculations = spec;
14539 	state->dts_nspeculations = (int)nspec;
14540 
14541 	for (i = 0; i < nspec; i++) {
14542 		if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
14543 			rval = ENOMEM;
14544 			goto err;
14545 		}
14546 
14547 		spec[i].dtsp_buffer = buf;
14548 	}
14549 
14550 	if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
14551 		if (dtrace_anon.dta_state == NULL) {
14552 			rval = ENOENT;
14553 			goto out;
14554 		}
14555 
14556 		if (state->dts_necbs != 0) {
14557 			rval = EALREADY;
14558 			goto out;
14559 		}
14560 
14561 		state->dts_anon = dtrace_anon_grab();
14562 		ASSERT(state->dts_anon != NULL);
14563 		state = state->dts_anon;
14564 
14565 		/*
14566 		 * We want "grabanon" to be set in the grabbed state, so we'll
14567 		 * copy that option value from the grabbing state into the
14568 		 * grabbed state.
14569 		 */
14570 		state->dts_options[DTRACEOPT_GRABANON] =
14571 		    opt[DTRACEOPT_GRABANON];
14572 
14573 		*cpu = dtrace_anon.dta_beganon;
14574 
14575 		/*
14576 		 * If the anonymous state is active (as it almost certainly
14577 		 * is if the anonymous enabling ultimately matched anything),
14578 		 * we don't allow any further option processing -- but we
14579 		 * don't return failure.
14580 		 */
14581 		if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14582 			goto out;
14583 	}
14584 
14585 	if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
14586 	    opt[DTRACEOPT_AGGSIZE] != 0) {
14587 		if (state->dts_aggregations == NULL) {
14588 			/*
14589 			 * We're not going to create an aggregation buffer
14590 			 * because we don't have any ECBs that contain
14591 			 * aggregations -- set this option to 0.
14592 			 */
14593 			opt[DTRACEOPT_AGGSIZE] = 0;
14594 		} else {
14595 			/*
14596 			 * If we have an aggregation buffer, we must also have
14597 			 * a buffer to use as scratch.
14598 			 */
14599 			if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
14600 			  (size_t)opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
14601 				opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
14602 			}
14603 		}
14604 	}
14605 
14606 	if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
14607 	    opt[DTRACEOPT_SPECSIZE] != 0) {
14608 		if (!state->dts_speculates) {
14609 			/*
14610 			 * We're not going to create speculation buffers
14611 			 * because we don't have any ECBs that actually
14612 			 * speculate -- set the speculation size to 0.
14613 			 */
14614 			opt[DTRACEOPT_SPECSIZE] = 0;
14615 		}
14616 	}
14617 
14618 	/*
14619 	 * The bare minimum size for any buffer that we're actually going to
14620 	 * do anything to is sizeof (uint64_t).
14621 	 */
14622 	sz = sizeof (uint64_t);
14623 
14624 	if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
14625 	    (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
14626 	    (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
14627 		/*
14628 		 * A buffer size has been explicitly set to 0 (or to a size
14629 		 * that will be adjusted to 0) and we need the space -- we
14630 		 * need to return failure.  We return ENOSPC to differentiate
14631 		 * it from failing to allocate a buffer due to failure to meet
14632 		 * the reserve (for which we return E2BIG).
14633 		 */
14634 		rval = ENOSPC;
14635 		goto out;
14636 	}
14637 
14638 	if ((rval = dtrace_state_buffers(state)) != 0)
14639 		goto err;
14640 
14641 	if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
14642 		sz = dtrace_dstate_defsize;
14643 
14644 	do {
14645 		rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
14646 
14647 		if (rval == 0)
14648 			break;
14649 
14650 		if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
14651 			goto err;
14652 	} while (sz >>= 1);
14653 
14654 	opt[DTRACEOPT_DYNVARSIZE] = sz;
14655 
14656 	if (rval != 0)
14657 		goto err;
14658 
14659 	if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
14660 		opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
14661 
14662 	if (opt[DTRACEOPT_CLEANRATE] == 0)
14663 		opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
14664 
14665 	if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
14666 		opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
14667 
14668 	if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
14669 		opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
14670 
14671 	if (opt[DTRACEOPT_STRSIZE] > dtrace_strsize_max)
14672 		opt[DTRACEOPT_STRSIZE] = dtrace_strsize_max;
14673 
14674 	if (opt[DTRACEOPT_STRSIZE] < dtrace_strsize_min)
14675 		opt[DTRACEOPT_STRSIZE] = dtrace_strsize_min;
14676 
14677 	if (opt[DTRACEOPT_BUFLIMIT] > dtrace_buflimit_max)
14678 		opt[DTRACEOPT_BUFLIMIT] = dtrace_buflimit_max;
14679 
14680 	if (opt[DTRACEOPT_BUFLIMIT] < dtrace_buflimit_min)
14681 		opt[DTRACEOPT_BUFLIMIT] = dtrace_buflimit_min;
14682 
14683 	hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
14684 	hdlr.cyh_arg = state;
14685 	hdlr.cyh_level = CY_LOW_LEVEL;
14686 
14687 	when.cyt_when = 0;
14688 	when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
14689 
14690 	state->dts_cleaner = cyclic_add(&hdlr, &when);
14691 
14692 	hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
14693 	hdlr.cyh_arg = state;
14694 	hdlr.cyh_level = CY_LOW_LEVEL;
14695 
14696 	when.cyt_when = 0;
14697 	when.cyt_interval = dtrace_deadman_interval;
14698 
14699 	state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
14700 	state->dts_deadman = cyclic_add(&hdlr, &when);
14701 
14702 	state->dts_activity = DTRACE_ACTIVITY_WARMUP;
14703 
14704 	/*
14705 	 * Now it's time to actually fire the BEGIN probe.  We need to disable
14706 	 * interrupts here both to record the CPU on which we fired the BEGIN
14707 	 * probe (the data from this CPU will be processed first at user
14708 	 * level) and to manually activate the buffer for this CPU.
14709 	 */
14710 	cookie = dtrace_interrupt_disable();
14711 	*cpu = CPU->cpu_id;
14712 	ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
14713 	state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
14714 
14715 	dtrace_probe(dtrace_probeid_begin,
14716 	    (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14717 	dtrace_interrupt_enable(cookie);
14718 	/*
14719 	 * We may have had an exit action from a BEGIN probe; only change our
14720 	 * state to ACTIVE if we're still in WARMUP.
14721 	 */
14722 	ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
14723 	    state->dts_activity == DTRACE_ACTIVITY_DRAINING);
14724 
14725 	if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
14726 		state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
14727 
14728 	/*
14729 	 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
14730 	 * want each CPU to transition its principal buffer out of the
14731 	 * INACTIVE state.  Doing this assures that no CPU will suddenly begin
14732 	 * processing an ECB halfway down a probe's ECB chain; all CPUs will
14733 	 * atomically transition from processing none of a state's ECBs to
14734 	 * processing all of them.
14735 	 */
14736 	dtrace_xcall(DTRACE_CPUALL,
14737 	    (dtrace_xcall_t)dtrace_buffer_activate, state);
14738 	goto out;
14739 
14740 err:
14741 	dtrace_buffer_free(state->dts_buffer);
14742 	dtrace_buffer_free(state->dts_aggbuffer);
14743 
14744 	if ((nspec = state->dts_nspeculations) == 0) {
14745 		ASSERT(state->dts_speculations == NULL);
14746 		goto out;
14747 	}
14748 
14749 	spec = state->dts_speculations;
14750 	ASSERT(spec != NULL);
14751 
14752 	for (i = 0; i < state->dts_nspeculations; i++) {
14753 		if ((buf = spec[i].dtsp_buffer) == NULL)
14754 			break;
14755 
14756 		dtrace_buffer_free(buf);
14757 		kmem_free(buf, bufsize);
14758 	}
14759 
14760 	kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14761 	state->dts_nspeculations = 0;
14762 	state->dts_speculations = NULL;
14763 
14764 out:
14765 	lck_mtx_unlock(&dtrace_lock);
14766 	lck_mtx_unlock(&cpu_lock);
14767 
14768 	return (rval);
14769 }
14770 
14771 static int
dtrace_state_stop(dtrace_state_t * state,processorid_t * cpu)14772 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
14773 {
14774 	dtrace_icookie_t cookie;
14775 
14776 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
14777 
14778 	if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
14779 	    state->dts_activity != DTRACE_ACTIVITY_DRAINING)
14780 		return (EINVAL);
14781 
14782 	/*
14783 	 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
14784 	 * to be sure that every CPU has seen it.  See below for the details
14785 	 * on why this is done.
14786 	 */
14787 	state->dts_activity = DTRACE_ACTIVITY_DRAINING;
14788 	dtrace_sync();
14789 
14790 	/*
14791 	 * By this point, it is impossible for any CPU to be still processing
14792 	 * with DTRACE_ACTIVITY_ACTIVE.  We can thus set our activity to
14793 	 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
14794 	 * other CPU in dtrace_buffer_reserve().  This allows dtrace_probe()
14795 	 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
14796 	 * iff we're in the END probe.
14797 	 */
14798 	state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
14799 	dtrace_sync();
14800 	ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
14801 
14802 	/*
14803 	 * Finally, we can release the reserve and call the END probe.  We
14804 	 * disable interrupts across calling the END probe to allow us to
14805 	 * return the CPU on which we actually called the END probe.  This
14806 	 * allows user-land to be sure that this CPU's principal buffer is
14807 	 * processed last.
14808 	 */
14809 	state->dts_reserve = 0;
14810 
14811 	cookie = dtrace_interrupt_disable();
14812 	*cpu = CPU->cpu_id;
14813 	dtrace_probe(dtrace_probeid_end,
14814 	    (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
14815 	dtrace_interrupt_enable(cookie);
14816 
14817 	state->dts_activity = DTRACE_ACTIVITY_STOPPED;
14818 	dtrace_sync();
14819 
14820 	return (0);
14821 }
14822 
14823 static int
dtrace_state_option(dtrace_state_t * state,dtrace_optid_t option,dtrace_optval_t val)14824 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
14825     dtrace_optval_t val)
14826 {
14827 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
14828 
14829 	if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
14830 		return (EBUSY);
14831 
14832 	if (option >= DTRACEOPT_MAX)
14833 		return (EINVAL);
14834 
14835 	if (option != DTRACEOPT_CPU && val < 0)
14836 		return (EINVAL);
14837 
14838 	switch (option) {
14839 	case DTRACEOPT_DESTRUCTIVE:
14840 		if (dtrace_destructive_disallow)
14841 			return (EACCES);
14842 
14843 		state->dts_cred.dcr_destructive = 1;
14844 		break;
14845 
14846 	case DTRACEOPT_BUFSIZE:
14847 	case DTRACEOPT_DYNVARSIZE:
14848 	case DTRACEOPT_AGGSIZE:
14849 	case DTRACEOPT_SPECSIZE:
14850 	case DTRACEOPT_STRSIZE:
14851 		if (val < 0)
14852 			return (EINVAL);
14853 
14854 		if (val >= LONG_MAX) {
14855 			/*
14856 			 * If this is an otherwise negative value, set it to
14857 			 * the highest multiple of 128m less than LONG_MAX.
14858 			 * Technically, we're adjusting the size without
14859 			 * regard to the buffer resizing policy, but in fact,
14860 			 * this has no effect -- if we set the buffer size to
14861 			 * ~LONG_MAX and the buffer policy is ultimately set to
14862 			 * be "manual", the buffer allocation is guaranteed to
14863 			 * fail, if only because the allocation requires two
14864 			 * buffers.  (We set the the size to the highest
14865 			 * multiple of 128m because it ensures that the size
14866 			 * will remain a multiple of a megabyte when
14867 			 * repeatedly halved -- all the way down to 15m.)
14868 			 */
14869 			val = LONG_MAX - (1 << 27) + 1;
14870 		}
14871 	}
14872 
14873 	state->dts_options[option] = val;
14874 
14875 	return (0);
14876 }
14877 
14878 static void
dtrace_state_destroy(dtrace_state_t * state)14879 dtrace_state_destroy(dtrace_state_t *state)
14880 {
14881 	dtrace_ecb_t *ecb;
14882 	dtrace_vstate_t *vstate = &state->dts_vstate;
14883 	minor_t minor = getminor(state->dts_dev);
14884 	int i, bufsize = (int)NCPU * sizeof (dtrace_buffer_t);
14885 	dtrace_speculation_t *spec = state->dts_speculations;
14886 	int nspec = state->dts_nspeculations;
14887 	uint32_t match;
14888 
14889 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
14890 	LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
14891 
14892 	/*
14893 	 * First, retract any retained enablings for this state.
14894 	 */
14895 	dtrace_enabling_retract(state);
14896 	ASSERT(state->dts_nretained == 0);
14897 
14898 	if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
14899 	    state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
14900 		/*
14901 		 * We have managed to come into dtrace_state_destroy() on a
14902 		 * hot enabling -- almost certainly because of a disorderly
14903 		 * shutdown of a consumer.  (That is, a consumer that is
14904 		 * exiting without having called dtrace_stop().) In this case,
14905 		 * we're going to set our activity to be KILLED, and then
14906 		 * issue a sync to be sure that everyone is out of probe
14907 		 * context before we start blowing away ECBs.
14908 		 */
14909 		state->dts_activity = DTRACE_ACTIVITY_KILLED;
14910 		dtrace_sync();
14911 	}
14912 
14913 	/*
14914 	 * Release the credential hold we took in dtrace_state_create().
14915 	 */
14916 	if (state->dts_cred.dcr_cred != NULL)
14917 		kauth_cred_unref(&state->dts_cred.dcr_cred);
14918 
14919 	/*
14920 	 * Now we can safely disable and destroy any enabled probes.  Because
14921 	 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
14922 	 * (especially if they're all enabled), we take two passes through the
14923 	 * ECBs:  in the first, we disable just DTRACE_PRIV_KERNEL probes, and
14924 	 * in the second we disable whatever is left over.
14925 	 */
14926 	for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
14927 		for (i = 0; i < state->dts_necbs; i++) {
14928 			if ((ecb = state->dts_ecbs[i]) == NULL)
14929 				continue;
14930 
14931 			if (match && ecb->dte_probe != NULL) {
14932 				dtrace_probe_t *probe = ecb->dte_probe;
14933 				dtrace_provider_t *prov = probe->dtpr_provider;
14934 
14935 				if (!(prov->dtpv_priv.dtpp_flags & match))
14936 					continue;
14937 			}
14938 
14939 			dtrace_ecb_disable(ecb);
14940 			dtrace_ecb_destroy(ecb);
14941 		}
14942 
14943 		if (!match)
14944 			break;
14945 	}
14946 
14947 	/*
14948 	 * Before we free the buffers, perform one more sync to assure that
14949 	 * every CPU is out of probe context.
14950 	 */
14951 	dtrace_sync();
14952 
14953 	dtrace_buffer_free(state->dts_buffer);
14954 	dtrace_buffer_free(state->dts_aggbuffer);
14955 
14956 	for (i = 0; i < (int)NCPU; i++) {
14957 		kmem_free(state->dts_rstate[i], 2 * sizeof(uint64_t));
14958 	}
14959 	kmem_free(state->dts_rstate, NCPU * sizeof(uint64_t*));
14960 
14961 	for (i = 0; i < nspec; i++)
14962 		dtrace_buffer_free(spec[i].dtsp_buffer);
14963 
14964 	if (state->dts_cleaner != CYCLIC_NONE)
14965 		cyclic_remove(state->dts_cleaner);
14966 
14967 	if (state->dts_deadman != CYCLIC_NONE)
14968 		cyclic_remove(state->dts_deadman);
14969 
14970 	dtrace_dstate_fini(&vstate->dtvs_dynvars);
14971 	dtrace_vstate_fini(vstate);
14972 	kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
14973 
14974 	if (state->dts_aggregations != NULL) {
14975 #if DEBUG
14976 		for (i = 0; i < state->dts_naggregations; i++)
14977 			ASSERT(state->dts_aggregations[i] == NULL);
14978 #endif
14979 		ASSERT(state->dts_naggregations > 0);
14980 		kmem_free(state->dts_aggregations,
14981 		    state->dts_naggregations * sizeof (dtrace_aggregation_t *));
14982 	}
14983 
14984 	kmem_free(state->dts_buffer, bufsize);
14985 	kmem_free(state->dts_aggbuffer, bufsize);
14986 
14987 	for (i = 0; i < nspec; i++)
14988 		kmem_free(spec[i].dtsp_buffer, bufsize);
14989 
14990 	kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
14991 
14992 	dtrace_format_destroy(state);
14993 
14994 	vmem_destroy(state->dts_aggid_arena);
14995 	dtrace_state_free(minor);
14996 }
14997 
14998 /*
14999  * DTrace Anonymous Enabling Functions
15000  */
15001 
15002 int
dtrace_keep_kernel_symbols(void)15003 dtrace_keep_kernel_symbols(void)
15004 {
15005 	if (dtrace_is_restricted() && !dtrace_are_restrictions_relaxed()) {
15006 		return 0;
15007 	}
15008 
15009 	if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL)
15010 		return 1;
15011 
15012 	return 0;
15013 }
15014 
15015 static dtrace_state_t *
dtrace_anon_grab(void)15016 dtrace_anon_grab(void)
15017 {
15018 	dtrace_state_t *state;
15019 
15020 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
15021 
15022 	if ((state = dtrace_anon.dta_state) == NULL) {
15023 		ASSERT(dtrace_anon.dta_enabling == NULL);
15024 		return (NULL);
15025 	}
15026 
15027 	ASSERT(dtrace_anon.dta_enabling != NULL);
15028 	ASSERT(dtrace_retained != NULL);
15029 
15030 	dtrace_enabling_destroy(dtrace_anon.dta_enabling);
15031 	dtrace_anon.dta_enabling = NULL;
15032 	dtrace_anon.dta_state = NULL;
15033 
15034 	return (state);
15035 }
15036 
15037 static void
dtrace_anon_property(void)15038 dtrace_anon_property(void)
15039 {
15040 	int i, rv;
15041 	dtrace_state_t *state;
15042 	dof_hdr_t *dof;
15043 	char c[32];		/* enough for "dof-data-" + digits */
15044 
15045 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
15046 	LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
15047 
15048 	for (i = 0; ; i++) {
15049 		(void) snprintf(c, sizeof (c), "dof-data-%d", i);
15050 
15051 		dtrace_err_verbose = 1;
15052 
15053 		if ((dof = dtrace_dof_property(c)) == NULL) {
15054 			dtrace_err_verbose = 0;
15055 			break;
15056 		}
15057 
15058 #ifdef illumos
15059 		/*
15060 		 * We want to create anonymous state, so we need to transition
15061 		 * the kernel debugger to indicate that DTrace is active.  If
15062 		 * this fails (e.g. because the debugger has modified text in
15063 		 * some way), we won't continue with the processing.
15064 		 */
15065 		if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15066 			cmn_err(CE_NOTE, "kernel debugger active; anonymous "
15067 			    "enabling ignored.");
15068 			dtrace_dof_destroy(dof);
15069 			break;
15070 		}
15071 #endif
15072 
15073 		/*
15074 		 * If we haven't allocated an anonymous state, we'll do so now.
15075 		 */
15076 		if ((state = dtrace_anon.dta_state) == NULL) {
15077 			rv = dtrace_state_create(NULL, NULL, &state);
15078 			dtrace_anon.dta_state = state;
15079 			if (rv != 0 || state == NULL) {
15080 				/*
15081 				 * This basically shouldn't happen:  the only
15082 				 * failure mode from dtrace_state_create() is a
15083 				 * failure of ddi_soft_state_zalloc() that
15084 				 * itself should never happen.  Still, the
15085 				 * interface allows for a failure mode, and
15086 				 * we want to fail as gracefully as possible:
15087 				 * we'll emit an error message and cease
15088 				 * processing anonymous state in this case.
15089 				 */
15090 				cmn_err(CE_WARN, "failed to create "
15091 				    "anonymous state");
15092 				dtrace_dof_destroy(dof);
15093 				break;
15094 			}
15095 		}
15096 
15097 		rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
15098 		    &dtrace_anon.dta_enabling, 0, B_TRUE);
15099 
15100 		if (rv == 0)
15101 			rv = dtrace_dof_options(dof, state);
15102 
15103 		dtrace_err_verbose = 0;
15104 		dtrace_dof_destroy(dof);
15105 
15106 		if (rv != 0) {
15107 			/*
15108 			 * This is malformed DOF; chuck any anonymous state
15109 			 * that we created.
15110 			 */
15111 			ASSERT(dtrace_anon.dta_enabling == NULL);
15112 			dtrace_state_destroy(state);
15113 			dtrace_anon.dta_state = NULL;
15114 			break;
15115 		}
15116 
15117 		ASSERT(dtrace_anon.dta_enabling != NULL);
15118 	}
15119 
15120 	if (dtrace_anon.dta_enabling != NULL) {
15121 		int rval;
15122 
15123 		/*
15124 		 * dtrace_enabling_retain() can only fail because we are
15125 		 * trying to retain more enablings than are allowed -- but
15126 		 * we only have one anonymous enabling, and we are guaranteed
15127 		 * to be allowed at least one retained enabling; we assert
15128 		 * that dtrace_enabling_retain() returns success.
15129 		 */
15130 		rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
15131 		ASSERT(rval == 0);
15132 
15133 		dtrace_enabling_dump(dtrace_anon.dta_enabling);
15134 	}
15135 }
15136 
15137 /*
15138  * DTrace Helper Functions
15139  */
15140 static void
dtrace_helper_trace(dtrace_helper_action_t * helper,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate,int where)15141 dtrace_helper_trace(dtrace_helper_action_t *helper,
15142     dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
15143 {
15144 	uint32_t size, next, nnext;
15145 	int i;
15146 	dtrace_helptrace_t *ent;
15147 	uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
15148 
15149 	if (!dtrace_helptrace_enabled)
15150 		return;
15151 
15152 	ASSERT((uint32_t)vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
15153 
15154 	/*
15155 	 * What would a tracing framework be without its own tracing
15156 	 * framework?  (Well, a hell of a lot simpler, for starters...)
15157 	 */
15158 	size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
15159 	    sizeof (uint64_t) - sizeof (uint64_t);
15160 
15161 	/*
15162 	 * Iterate until we can allocate a slot in the trace buffer.
15163 	 */
15164 	do {
15165 		next = dtrace_helptrace_next;
15166 
15167 		if (next + size < dtrace_helptrace_bufsize) {
15168 			nnext = next + size;
15169 		} else {
15170 			nnext = size;
15171 		}
15172 	} while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
15173 
15174 	/*
15175 	 * We have our slot; fill it in.
15176 	 */
15177 	if (nnext == size)
15178 		next = 0;
15179 
15180 	ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
15181 	ent->dtht_helper = helper;
15182 	ent->dtht_where = where;
15183 	ent->dtht_nlocals = vstate->dtvs_nlocals;
15184 
15185 	ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
15186 	    mstate->dtms_fltoffs : -1;
15187 	ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
15188 	ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
15189 
15190 	for (i = 0; i < vstate->dtvs_nlocals; i++) {
15191 		dtrace_statvar_t *svar;
15192 
15193 		if ((svar = vstate->dtvs_locals[i]) == NULL)
15194 			continue;
15195 
15196 		ASSERT(svar->dtsv_size >= (int)NCPU * sizeof (uint64_t));
15197 		ent->dtht_locals[i] =
15198 		    ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
15199 	}
15200 }
15201 
15202 __attribute__((noinline))
15203 static uint64_t
dtrace_helper(int which,dtrace_mstate_t * mstate,dtrace_state_t * state,uint64_t arg0,uint64_t arg1)15204 dtrace_helper(int which, dtrace_mstate_t *mstate,
15205     dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
15206 {
15207 	uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
15208 	uint64_t sarg0 = mstate->dtms_arg[0];
15209 	uint64_t sarg1 = mstate->dtms_arg[1];
15210 	uint64_t rval = 0;
15211 	dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
15212 	dtrace_helper_action_t *helper;
15213 	dtrace_vstate_t *vstate;
15214 	dtrace_difo_t *pred;
15215 	int i, trace = dtrace_helptrace_enabled;
15216 
15217 	ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
15218 
15219 	if (helpers == NULL)
15220 		return (0);
15221 
15222 	if ((helper = helpers->dthps_actions[which]) == NULL)
15223 		return (0);
15224 
15225 	vstate = &helpers->dthps_vstate;
15226 	mstate->dtms_arg[0] = arg0;
15227 	mstate->dtms_arg[1] = arg1;
15228 
15229 	/*
15230 	 * Now iterate over each helper.  If its predicate evaluates to 'true',
15231 	 * we'll call the corresponding actions.  Note that the below calls
15232 	 * to dtrace_dif_emulate() may set faults in machine state.  This is
15233 	 * okay:  our caller (the outer dtrace_dif_emulate()) will simply plow
15234 	 * the stored DIF offset with its own (which is the desired behavior).
15235 	 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
15236 	 * from machine state; this is okay, too.
15237 	 */
15238 	for (; helper != NULL; helper = helper->dtha_next) {
15239 		if ((pred = helper->dtha_predicate) != NULL) {
15240 			if (trace)
15241 				dtrace_helper_trace(helper, mstate, vstate, 0);
15242 
15243 			if (!dtrace_dif_emulate(pred, mstate, vstate, state))
15244 				goto next;
15245 
15246 			if (*flags & CPU_DTRACE_FAULT)
15247 				goto err;
15248 		}
15249 
15250 		for (i = 0; i < helper->dtha_nactions; i++) {
15251 			if (trace)
15252 				dtrace_helper_trace(helper,
15253 				    mstate, vstate, i + 1);
15254 
15255 			rval = dtrace_dif_emulate(helper->dtha_actions[i],
15256 			    mstate, vstate, state);
15257 
15258 			if (*flags & CPU_DTRACE_FAULT)
15259 				goto err;
15260 		}
15261 
15262 next:
15263 		if (trace)
15264 			dtrace_helper_trace(helper, mstate, vstate,
15265 			    DTRACE_HELPTRACE_NEXT);
15266 	}
15267 
15268 	if (trace)
15269 		dtrace_helper_trace(helper, mstate, vstate,
15270 		    DTRACE_HELPTRACE_DONE);
15271 
15272 	/*
15273 	 * Restore the arg0 that we saved upon entry.
15274 	 */
15275 	mstate->dtms_arg[0] = sarg0;
15276 	mstate->dtms_arg[1] = sarg1;
15277 
15278 	return (rval);
15279 
15280 err:
15281 	if (trace)
15282 		dtrace_helper_trace(helper, mstate, vstate,
15283 		    DTRACE_HELPTRACE_ERR);
15284 
15285 	/*
15286 	 * Restore the arg0 that we saved upon entry.
15287 	 */
15288 	mstate->dtms_arg[0] = sarg0;
15289 	mstate->dtms_arg[1] = sarg1;
15290 
15291 	return (0);
15292 }
15293 
15294 static void
dtrace_helper_action_destroy(dtrace_helper_action_t * helper,dtrace_vstate_t * vstate)15295 dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
15296     dtrace_vstate_t *vstate)
15297 {
15298 	int i;
15299 
15300 	if (helper->dtha_predicate != NULL)
15301 		dtrace_difo_release(helper->dtha_predicate, vstate);
15302 
15303 	for (i = 0; i < helper->dtha_nactions; i++) {
15304 		ASSERT(helper->dtha_actions[i] != NULL);
15305 		dtrace_difo_release(helper->dtha_actions[i], vstate);
15306 	}
15307 
15308 	kmem_free(helper->dtha_actions,
15309 	    helper->dtha_nactions * sizeof (dtrace_difo_t *));
15310 	kmem_free(helper, sizeof (dtrace_helper_action_t));
15311 }
15312 
15313 static int
dtrace_helper_destroygen(proc_t * p,int gen)15314 dtrace_helper_destroygen(proc_t* p, int gen)
15315 {
15316 	dtrace_helpers_t *help = p->p_dtrace_helpers;
15317 	dtrace_vstate_t *vstate;
15318 	uint_t i;
15319 
15320 	LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
15321 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
15322 
15323 	if (help == NULL || gen > help->dthps_generation)
15324 		return (EINVAL);
15325 
15326 	vstate = &help->dthps_vstate;
15327 
15328 	for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15329 		dtrace_helper_action_t *last = NULL, *h, *next;
15330 
15331 		for (h = help->dthps_actions[i]; h != NULL; h = next) {
15332 			next = h->dtha_next;
15333 
15334 			if (h->dtha_generation == gen) {
15335 				if (last != NULL) {
15336 					last->dtha_next = next;
15337 				} else {
15338 					help->dthps_actions[i] = next;
15339 				}
15340 
15341 				dtrace_helper_action_destroy(h, vstate);
15342 			} else {
15343 				last = h;
15344 			}
15345 		}
15346 	}
15347 
15348 	/*
15349 	 * Interate until we've cleared out all helper providers with the
15350 	 * given generation number.
15351 	 */
15352 	for (;;) {
15353 		dtrace_helper_provider_t *prov = NULL;
15354 
15355 		/*
15356 		 * Look for a helper provider with the right generation. We
15357 		 * have to start back at the beginning of the list each time
15358 		 * because we drop dtrace_lock. It's unlikely that we'll make
15359 		 * more than two passes.
15360 		 */
15361 		for (i = 0; i < help->dthps_nprovs; i++) {
15362 			prov = help->dthps_provs[i];
15363 
15364 			if (prov->dthp_generation == gen)
15365 				break;
15366 		}
15367 
15368 		/*
15369 		 * If there were no matches, we're done.
15370 		 */
15371 		if (i == help->dthps_nprovs)
15372 			break;
15373 
15374 		/*
15375 		 * Move the last helper provider into this slot.
15376 		 */
15377 		help->dthps_nprovs--;
15378 		help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
15379 		help->dthps_provs[help->dthps_nprovs] = NULL;
15380 
15381 		lck_mtx_unlock(&dtrace_lock);
15382 
15383 		/*
15384 		 * If we have a meta provider, remove this helper provider.
15385 		 */
15386 		if (dtrace_meta_pid != NULL) {
15387 			ASSERT(dtrace_deferred_pid == NULL);
15388 			dtrace_helper_provider_remove(&prov->dthp_prov,
15389 			    p);
15390 		}
15391 
15392 		dtrace_helper_provider_destroy(prov);
15393 
15394 		lck_mtx_lock(&dtrace_lock);
15395 	}
15396 
15397 	return (0);
15398 }
15399 
15400 static int
dtrace_helper_validate(dtrace_helper_action_t * helper)15401 dtrace_helper_validate(dtrace_helper_action_t *helper)
15402 {
15403 	int err = 0, i;
15404 	dtrace_difo_t *dp;
15405 
15406 	if ((dp = helper->dtha_predicate) != NULL)
15407 		err += dtrace_difo_validate_helper(dp);
15408 
15409 	for (i = 0; i < helper->dtha_nactions; i++)
15410 		err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
15411 
15412 	return (err == 0);
15413 }
15414 
15415 static int
dtrace_helper_action_add(proc_t * p,int which,dtrace_ecbdesc_t * ep)15416 dtrace_helper_action_add(proc_t* p, int which, dtrace_ecbdesc_t *ep)
15417 {
15418 	dtrace_helpers_t *help;
15419 	dtrace_helper_action_t *helper, *last;
15420 	dtrace_actdesc_t *act;
15421 	dtrace_vstate_t *vstate;
15422 	dtrace_predicate_t *pred;
15423 	int count = 0, nactions = 0, i;
15424 
15425 	if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
15426 		return (EINVAL);
15427 
15428 	help = p->p_dtrace_helpers;
15429 	last = help->dthps_actions[which];
15430 	vstate = &help->dthps_vstate;
15431 
15432 	for (count = 0; last != NULL; last = last->dtha_next) {
15433 		count++;
15434 		if (last->dtha_next == NULL)
15435 			break;
15436 	}
15437 
15438 	/*
15439 	 * If we already have dtrace_helper_actions_max helper actions for this
15440 	 * helper action type, we'll refuse to add a new one.
15441 	 */
15442 	if (count >= dtrace_helper_actions_max)
15443 		return (ENOSPC);
15444 
15445 	helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
15446 	helper->dtha_generation = help->dthps_generation;
15447 
15448 	if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
15449 		ASSERT(pred->dtp_difo != NULL);
15450 		dtrace_difo_hold(pred->dtp_difo);
15451 		helper->dtha_predicate = pred->dtp_difo;
15452 	}
15453 
15454 	for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
15455 		if (act->dtad_kind != DTRACEACT_DIFEXPR)
15456 			goto err;
15457 
15458 		if (act->dtad_difo == NULL)
15459 			goto err;
15460 
15461 		nactions++;
15462 	}
15463 
15464 	helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
15465 	    (helper->dtha_nactions = nactions), KM_SLEEP);
15466 
15467 	for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
15468 		dtrace_difo_hold(act->dtad_difo);
15469 		helper->dtha_actions[i++] = act->dtad_difo;
15470 	}
15471 
15472 	if (!dtrace_helper_validate(helper))
15473 		goto err;
15474 
15475 	if (last == NULL) {
15476 		help->dthps_actions[which] = helper;
15477 	} else {
15478 		last->dtha_next = helper;
15479 	}
15480 
15481 	if ((uint32_t)vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
15482 		dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
15483 		dtrace_helptrace_next = 0;
15484 	}
15485 
15486 	return (0);
15487 err:
15488 	dtrace_helper_action_destroy(helper, vstate);
15489 	return (EINVAL);
15490 }
15491 
15492 static void
dtrace_helper_provider_register(proc_t * p,dtrace_helpers_t * help,dof_helper_t * dofhp)15493 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
15494     dof_helper_t *dofhp)
15495 {
15496 	LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
15497 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
15498 
15499 	lck_mtx_lock(&dtrace_lock);
15500 
15501 	if (!dtrace_attached() || dtrace_meta_pid == NULL) {
15502 		/*
15503 		 * If the dtrace module is loaded but not attached, or if
15504 		 * there aren't isn't a meta provider registered to deal with
15505 		 * these provider descriptions, we need to postpone creating
15506 		 * the actual providers until later.
15507 		 */
15508 
15509 		if (help->dthps_next == NULL && help->dthps_prev == NULL &&
15510 		    dtrace_deferred_pid != help) {
15511 			help->dthps_deferred = 1;
15512 			help->dthps_pid = proc_getpid(p);
15513 			help->dthps_next = dtrace_deferred_pid;
15514 			help->dthps_prev = NULL;
15515 			if (dtrace_deferred_pid != NULL)
15516 				dtrace_deferred_pid->dthps_prev = help;
15517 			dtrace_deferred_pid = help;
15518 		}
15519 
15520 		lck_mtx_unlock(&dtrace_lock);
15521 
15522 	} else if (dofhp != NULL) {
15523 		/*
15524 		 * If the dtrace module is loaded and we have a particular
15525 		 * helper provider description, pass that off to the
15526 		 * meta provider.
15527 		 */
15528 
15529 		lck_mtx_unlock(&dtrace_lock);
15530 
15531 		dtrace_helper_provide(dofhp, p);
15532 
15533 	} else {
15534 		/*
15535 		 * Otherwise, just pass all the helper provider descriptions
15536 		 * off to the meta provider.
15537 		 */
15538 
15539 		uint_t i;
15540 		lck_mtx_unlock(&dtrace_lock);
15541 
15542 		for (i = 0; i < help->dthps_nprovs; i++) {
15543 			dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
15544 				p);
15545 		}
15546 	}
15547 }
15548 
15549 static int
dtrace_helper_provider_add(proc_t * p,dof_helper_t * dofhp,int gen)15550 dtrace_helper_provider_add(proc_t* p, dof_helper_t *dofhp, int gen)
15551 {
15552 	dtrace_helpers_t *help;
15553 	dtrace_helper_provider_t *hprov, **tmp_provs;
15554 	uint_t tmp_maxprovs, i;
15555 
15556 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
15557 	help = p->p_dtrace_helpers;
15558 	ASSERT(help != NULL);
15559 
15560 	/*
15561 	 * If we already have dtrace_helper_providers_max helper providers,
15562 	 * we're refuse to add a new one.
15563 	 */
15564 	if (help->dthps_nprovs >= dtrace_helper_providers_max)
15565 		return (ENOSPC);
15566 
15567 	/*
15568 	 * Check to make sure this isn't a duplicate.
15569 	 */
15570 	for (i = 0; i < help->dthps_nprovs; i++) {
15571 		if (dofhp->dofhp_addr ==
15572 		    help->dthps_provs[i]->dthp_prov.dofhp_addr)
15573 			return (EALREADY);
15574 	}
15575 
15576 	hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
15577 	hprov->dthp_prov = *dofhp;
15578 	hprov->dthp_ref = 1;
15579 	hprov->dthp_generation = gen;
15580 
15581 	/*
15582 	 * Allocate a bigger table for helper providers if it's already full.
15583 	 */
15584 	if (help->dthps_maxprovs == help->dthps_nprovs) {
15585 		tmp_maxprovs = help->dthps_maxprovs;
15586 		tmp_provs = help->dthps_provs;
15587 
15588 		if (help->dthps_maxprovs == 0)
15589 			help->dthps_maxprovs = 2;
15590 		else
15591 			help->dthps_maxprovs *= 2;
15592 		if (help->dthps_maxprovs > dtrace_helper_providers_max)
15593 			help->dthps_maxprovs = dtrace_helper_providers_max;
15594 
15595 		ASSERT(tmp_maxprovs < help->dthps_maxprovs);
15596 
15597 		help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
15598 		    sizeof (dtrace_helper_provider_t *), KM_SLEEP);
15599 
15600 		if (tmp_provs != NULL) {
15601 			bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
15602 			    sizeof (dtrace_helper_provider_t *));
15603 			kmem_free(tmp_provs, tmp_maxprovs *
15604 			    sizeof (dtrace_helper_provider_t *));
15605 		}
15606 	}
15607 
15608 	help->dthps_provs[help->dthps_nprovs] = hprov;
15609 	help->dthps_nprovs++;
15610 
15611 	return (0);
15612 }
15613 
15614 static void
dtrace_helper_provider_destroy(dtrace_helper_provider_t * hprov)15615 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
15616 {
15617 	lck_mtx_lock(&dtrace_lock);
15618 
15619 	if (--hprov->dthp_ref == 0) {
15620 		dof_hdr_t *dof;
15621 		lck_mtx_unlock(&dtrace_lock);
15622 		dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
15623 		dtrace_dof_destroy(dof);
15624 		kmem_free(hprov, sizeof (dtrace_helper_provider_t));
15625 	} else {
15626 		lck_mtx_unlock(&dtrace_lock);
15627 	}
15628 }
15629 
15630 static int
dtrace_helper_provider_validate(dof_hdr_t * dof,dof_sec_t * sec)15631 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
15632 {
15633 	uintptr_t daddr = (uintptr_t)dof;
15634 	dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
15635 	dof_provider_t *provider;
15636 	dof_probe_t *probe;
15637 	uint8_t *arg;
15638 	char *strtab, *typestr;
15639 	dof_stridx_t typeidx;
15640 	size_t typesz;
15641 	uint_t nprobes, j, k;
15642 
15643 	ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
15644 
15645 	if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
15646 		dtrace_dof_error(dof, "misaligned section offset");
15647 		return (-1);
15648 	}
15649 
15650 	/*
15651 	 * The section needs to be large enough to contain the DOF provider
15652 	 * structure appropriate for the given version.
15653 	 */
15654 	if (sec->dofs_size <
15655 	    ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
15656 	    offsetof(dof_provider_t, dofpv_prenoffs) :
15657 	    sizeof (dof_provider_t))) {
15658 		dtrace_dof_error(dof, "provider section too small");
15659 		return (-1);
15660 	}
15661 
15662 	provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
15663 	str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
15664 	prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
15665 	arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
15666 	off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
15667 
15668 	if (str_sec == NULL || prb_sec == NULL ||
15669 	    arg_sec == NULL || off_sec == NULL)
15670 		return (-1);
15671 
15672 	enoff_sec = NULL;
15673 
15674 	if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
15675 	    provider->dofpv_prenoffs != DOF_SECT_NONE &&
15676 	    (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
15677 	    provider->dofpv_prenoffs)) == NULL)
15678 		return (-1);
15679 
15680 	strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
15681 
15682 	if (provider->dofpv_name >= str_sec->dofs_size ||
15683 	    strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
15684 		dtrace_dof_error(dof, "invalid provider name");
15685 		return (-1);
15686 	}
15687 
15688 	if (prb_sec->dofs_entsize == 0 ||
15689 	    prb_sec->dofs_entsize > prb_sec->dofs_size) {
15690 		dtrace_dof_error(dof, "invalid entry size");
15691 		return (-1);
15692 	}
15693 
15694 	if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
15695 		dtrace_dof_error(dof, "misaligned entry size");
15696 		return (-1);
15697 	}
15698 
15699 	if (off_sec->dofs_entsize != sizeof (uint32_t)) {
15700 		dtrace_dof_error(dof, "invalid entry size");
15701 		return (-1);
15702 	}
15703 
15704 	if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
15705 		dtrace_dof_error(dof, "misaligned section offset");
15706 		return (-1);
15707 	}
15708 
15709 	if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
15710 		dtrace_dof_error(dof, "invalid entry size");
15711 		return (-1);
15712 	}
15713 
15714 	arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
15715 
15716 	nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
15717 
15718 	/*
15719 	 * Take a pass through the probes to check for errors.
15720 	 */
15721 	for (j = 0; j < nprobes; j++) {
15722 		probe = (dof_probe_t *)(uintptr_t)(daddr +
15723 		    prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
15724 
15725 		if (probe->dofpr_func >= str_sec->dofs_size) {
15726 			dtrace_dof_error(dof, "invalid function name");
15727 			return (-1);
15728 		}
15729 
15730 		if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
15731 			dtrace_dof_error(dof, "function name too long");
15732 			return (-1);
15733 		}
15734 
15735 		if (probe->dofpr_name >= str_sec->dofs_size ||
15736 		    strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
15737 			dtrace_dof_error(dof, "invalid probe name");
15738 			return (-1);
15739 		}
15740 
15741 		/*
15742 		 * The offset count must not wrap the index, and the offsets
15743 		 * must also not overflow the section's data.
15744 		 */
15745 		if (probe->dofpr_offidx + probe->dofpr_noffs <
15746 		    probe->dofpr_offidx ||
15747 		    (probe->dofpr_offidx + probe->dofpr_noffs) *
15748 		    off_sec->dofs_entsize > off_sec->dofs_size) {
15749 			dtrace_dof_error(dof, "invalid probe offset");
15750 			return (-1);
15751 		}
15752 
15753 		if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
15754 			/*
15755 			 * If there's no is-enabled offset section, make sure
15756 			 * there aren't any is-enabled offsets. Otherwise
15757 			 * perform the same checks as for probe offsets
15758 			 * (immediately above).
15759 			 */
15760 			if (enoff_sec == NULL) {
15761 				if (probe->dofpr_enoffidx != 0 ||
15762 				    probe->dofpr_nenoffs != 0) {
15763 					dtrace_dof_error(dof, "is-enabled "
15764 					    "offsets with null section");
15765 					return (-1);
15766 				}
15767 			} else if (probe->dofpr_enoffidx +
15768 			    probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
15769 			    (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
15770 			    enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
15771 				dtrace_dof_error(dof, "invalid is-enabled "
15772 				    "offset");
15773 				return (-1);
15774 			}
15775 
15776 			if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
15777 				dtrace_dof_error(dof, "zero probe and "
15778 				    "is-enabled offsets");
15779 				return (-1);
15780 			}
15781 		} else if (probe->dofpr_noffs == 0) {
15782 			dtrace_dof_error(dof, "zero probe offsets");
15783 			return (-1);
15784 		}
15785 
15786 		if (probe->dofpr_argidx + probe->dofpr_xargc <
15787 		    probe->dofpr_argidx ||
15788 		    (probe->dofpr_argidx + probe->dofpr_xargc) *
15789 		    arg_sec->dofs_entsize > arg_sec->dofs_size) {
15790 			dtrace_dof_error(dof, "invalid args");
15791 			return (-1);
15792 		}
15793 
15794 		typeidx = probe->dofpr_nargv;
15795 		typestr = strtab + probe->dofpr_nargv;
15796 		for (k = 0; k < probe->dofpr_nargc; k++) {
15797 			if (typeidx >= str_sec->dofs_size) {
15798 				dtrace_dof_error(dof, "bad "
15799 				    "native argument type");
15800 				return (-1);
15801 			}
15802 
15803 			typesz = strlen(typestr) + 1;
15804 			if (typesz > DTRACE_ARGTYPELEN) {
15805 				dtrace_dof_error(dof, "native "
15806 				    "argument type too long");
15807 				return (-1);
15808 			}
15809 			typeidx += typesz;
15810 			typestr += typesz;
15811 		}
15812 
15813 		typeidx = probe->dofpr_xargv;
15814 		typestr = strtab + probe->dofpr_xargv;
15815 		for (k = 0; k < probe->dofpr_xargc; k++) {
15816 			if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
15817 				dtrace_dof_error(dof, "bad "
15818 				    "native argument index");
15819 				return (-1);
15820 			}
15821 
15822 			if (typeidx >= str_sec->dofs_size) {
15823 				dtrace_dof_error(dof, "bad "
15824 				    "translated argument type");
15825 				return (-1);
15826 			}
15827 
15828 			typesz = strlen(typestr) + 1;
15829 			if (typesz > DTRACE_ARGTYPELEN) {
15830 				dtrace_dof_error(dof, "translated argument "
15831 				    "type too long");
15832 				return (-1);
15833 			}
15834 
15835 			typeidx += typesz;
15836 			typestr += typesz;
15837 		}
15838 	}
15839 
15840 	return (0);
15841 }
15842 
15843 static int
dtrace_helper_slurp(proc_t * p,dof_hdr_t * dof,dof_helper_t * dhp)15844 dtrace_helper_slurp(proc_t* p, dof_hdr_t *dof, dof_helper_t *dhp)
15845 {
15846 	dtrace_helpers_t *help;
15847 	dtrace_vstate_t *vstate;
15848 	dtrace_enabling_t *enab = NULL;
15849 	int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
15850 	uintptr_t daddr = (uintptr_t)dof;
15851 
15852 	LCK_MTX_ASSERT(&dtrace_meta_lock, LCK_MTX_ASSERT_OWNED);
15853 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
15854 
15855 	if ((help = p->p_dtrace_helpers) == NULL)
15856 		help = dtrace_helpers_create(p);
15857 
15858 	vstate = &help->dthps_vstate;
15859 
15860 	if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
15861 	    dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
15862 		dtrace_dof_destroy(dof);
15863 		return (rv);
15864 	}
15865 
15866 	/*
15867 	 * Look for helper providers and validate their descriptions.
15868 	 */
15869 	if (dhp != NULL) {
15870 		for (i = 0; (uint32_t)i < dof->dofh_secnum; i++) {
15871 			dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
15872 			    dof->dofh_secoff + i * dof->dofh_secsize);
15873 
15874 			if (sec->dofs_type != DOF_SECT_PROVIDER)
15875 				continue;
15876 
15877 			if (dtrace_helper_provider_validate(dof, sec) != 0) {
15878 				dtrace_enabling_destroy(enab);
15879 				dtrace_dof_destroy(dof);
15880 				return (-1);
15881 			}
15882 
15883 			nprovs++;
15884 		}
15885 	}
15886 
15887 	/*
15888 	 * Now we need to walk through the ECB descriptions in the enabling.
15889 	 */
15890 	for (i = 0; i < enab->dten_ndesc; i++) {
15891 		dtrace_ecbdesc_t *ep = enab->dten_desc[i];
15892 		dtrace_probedesc_t *desc = &ep->dted_probe;
15893 
15894 		/* APPLE NOTE: Darwin employs size bounded string operation. */
15895 		if (!LIT_STRNEQL(desc->dtpd_provider, "dtrace"))
15896 			continue;
15897 
15898 		if (!LIT_STRNEQL(desc->dtpd_mod, "helper"))
15899 			continue;
15900 
15901 		if (!LIT_STRNEQL(desc->dtpd_func, "ustack"))
15902 			continue;
15903 
15904 		if ((rv = dtrace_helper_action_add(p, DTRACE_HELPER_ACTION_USTACK,
15905 		    ep)) != 0) {
15906 			/*
15907 			 * Adding this helper action failed -- we are now going
15908 			 * to rip out the entire generation and return failure.
15909 			 */
15910 			(void) dtrace_helper_destroygen(p, help->dthps_generation);
15911 			dtrace_enabling_destroy(enab);
15912 			dtrace_dof_destroy(dof);
15913 			return (-1);
15914 		}
15915 
15916 		nhelpers++;
15917 	}
15918 
15919 	if (nhelpers < enab->dten_ndesc)
15920 		dtrace_dof_error(dof, "unmatched helpers");
15921 
15922 	gen = help->dthps_generation++;
15923 	dtrace_enabling_destroy(enab);
15924 
15925 	if (dhp != NULL && nprovs > 0) {
15926 		dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
15927 		if (dtrace_helper_provider_add(p, dhp, gen) == 0) {
15928 			lck_mtx_unlock(&dtrace_lock);
15929 			dtrace_helper_provider_register(p, help, dhp);
15930 			lck_mtx_lock(&dtrace_lock);
15931 
15932 			destroy = 0;
15933 		}
15934 	}
15935 
15936 	if (destroy)
15937 		dtrace_dof_destroy(dof);
15938 
15939 	return (gen);
15940 }
15941 
15942 /*
15943  * APPLE NOTE:  DTrace lazy dof implementation
15944  *
15945  * DTrace user static probes (USDT probes) and helper actions are loaded
15946  * in a process by proccessing dof sections. The dof sections are passed
15947  * into the kernel by dyld, in a dof_ioctl_data_t block. It is rather
15948  * expensive to process dof for a process that will never use it. There
15949  * is a memory cost (allocating the providers/probes), and a cpu cost
15950  * (creating the providers/probes).
15951  *
15952  * To reduce this cost, we use "lazy dof". The normal proceedure for
15953  * dof processing is to copyin the dof(s) pointed to by the dof_ioctl_data_t
15954  * block, and invoke dof_slurp_helper() on them. When "lazy dof" is
15955  * used, each process retains the dof_ioctl_data_t block, instead of
15956  * copying in the data it points to.
15957  *
15958  * The dof_ioctl_data_t blocks are managed as if they were the actual
15959  * processed dof; on fork the block is copied to the child, on exec and
15960  * exit the block is freed.
15961  *
15962  * If the process loads library(s) containing additional dof, the
15963  * new dof_ioctl_data_t is merged with the existing block.
15964  *
15965  * There are a few catches that make this slightly more difficult.
15966  * When dyld registers dof_ioctl_data_t blocks, it expects a unique
15967  * identifier value for each dof in the block. In non-lazy dof terms,
15968  * this is the generation that dof was loaded in. If we hand back
15969  * a UID for a lazy dof, that same UID must be able to unload the
15970  * dof once it has become non-lazy. To meet this requirement, the
15971  * code that loads lazy dof requires that the UID's for dof(s) in
15972  * the lazy dof be sorted, and in ascending order. It is okay to skip
15973  * UID's, I.E., 1 -> 5 -> 6 is legal.
15974  *
15975  * Once a process has become non-lazy, it will stay non-lazy. All
15976  * future dof operations for that process will be non-lazy, even
15977  * if the dof mode transitions back to lazy.
15978  *
15979  * Always do lazy dof checks before non-lazy (I.E. In fork, exit, exec.).
15980  * That way if the lazy check fails due to transitioning to non-lazy, the
15981  * right thing is done with the newly faulted in dof.
15982  */
15983 
15984 /*
15985  * This method is a bit squicky. It must handle:
15986  *
15987  * dof should not be lazy.
15988  * dof should have been handled lazily, but there was an error
15989  * dof was handled lazily, and needs to be freed.
15990  * dof was handled lazily, and must not be freed.
15991  *
15992  *
15993  * Returns EACCESS if dof should be handled non-lazily.
15994  *
15995  * KERN_SUCCESS and all other return codes indicate lazy handling of dof.
15996  *
15997  * If the dofs data is claimed by this method, dofs_claimed will be set.
15998  * Callers should not free claimed dofs.
15999  */
16000 static int
dtrace_lazy_dofs_add(proc_t * p,dof_ioctl_data_t * incoming_dofs,int * dofs_claimed)16001 dtrace_lazy_dofs_add(proc_t *p, dof_ioctl_data_t* incoming_dofs, int *dofs_claimed)
16002 {
16003 	ASSERT(p);
16004 	ASSERT(incoming_dofs && incoming_dofs->dofiod_count > 0);
16005 
16006 	int rval = 0;
16007 	*dofs_claimed = 0;
16008 
16009 	lck_rw_lock_shared(&dtrace_dof_mode_lock);
16010 
16011 	ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
16012 	ASSERT(dtrace_dof_mode != DTRACE_DOF_MODE_NEVER);
16013 
16014 	/*
16015 	 * Any existing helpers force non-lazy behavior.
16016 	 */
16017 	if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON && (p->p_dtrace_helpers == NULL)) {
16018 		dtrace_sprlock(p);
16019 
16020 		dof_ioctl_data_t* existing_dofs = p->p_dtrace_lazy_dofs;
16021 		unsigned int existing_dofs_count = (existing_dofs) ? existing_dofs->dofiod_count : 0;
16022 		unsigned int i, merged_dofs_count = incoming_dofs->dofiod_count + existing_dofs_count;
16023 
16024 		/*
16025 		 * Range check...
16026 		 */
16027 		if (merged_dofs_count == 0 || merged_dofs_count > 1024) {
16028 			dtrace_dof_error(NULL, "lazy_dofs_add merged_dofs_count out of range");
16029 			rval = EINVAL;
16030 			goto unlock;
16031 		}
16032 
16033 		/*
16034 		 * Each dof being added must be assigned a unique generation.
16035 		 */
16036 		uint64_t generation = (existing_dofs) ? existing_dofs->dofiod_helpers[existing_dofs_count - 1].dofhp_dof + 1 : 1;
16037 		for (i=0; i<incoming_dofs->dofiod_count; i++) {
16038 			/*
16039 			 * We rely on these being the same so we can overwrite dofhp_dof and not lose info.
16040 			 */
16041 			ASSERT(incoming_dofs->dofiod_helpers[i].dofhp_dof == incoming_dofs->dofiod_helpers[i].dofhp_addr);
16042 			incoming_dofs->dofiod_helpers[i].dofhp_dof = generation++;
16043 		}
16044 
16045 
16046 		if (existing_dofs) {
16047 			/*
16048 			 * Merge the existing and incoming dofs
16049 			 */
16050 			size_t merged_dofs_size = DOF_IOCTL_DATA_T_SIZE(merged_dofs_count);
16051 			dof_ioctl_data_t* merged_dofs = kmem_alloc(merged_dofs_size, KM_SLEEP);
16052 
16053 			bcopy(&existing_dofs->dofiod_helpers[0],
16054 			      &merged_dofs->dofiod_helpers[0],
16055 			      sizeof(dof_helper_t) * existing_dofs_count);
16056 			bcopy(&incoming_dofs->dofiod_helpers[0],
16057 			      &merged_dofs->dofiod_helpers[existing_dofs_count],
16058 			      sizeof(dof_helper_t) * incoming_dofs->dofiod_count);
16059 
16060 			merged_dofs->dofiod_count = merged_dofs_count;
16061 
16062 			kmem_free(existing_dofs, DOF_IOCTL_DATA_T_SIZE(existing_dofs_count));
16063 
16064 			p->p_dtrace_lazy_dofs = merged_dofs;
16065 		} else {
16066 			/*
16067 			 * Claim the incoming dofs
16068 			 */
16069 			*dofs_claimed = 1;
16070 			p->p_dtrace_lazy_dofs = incoming_dofs;
16071 		}
16072 
16073 #if DEBUG
16074 		dof_ioctl_data_t* all_dofs = p->p_dtrace_lazy_dofs;
16075 		for (i=0; i<all_dofs->dofiod_count-1; i++) {
16076 			ASSERT(all_dofs->dofiod_helpers[i].dofhp_dof < all_dofs->dofiod_helpers[i+1].dofhp_dof);
16077 		}
16078 #endif /* DEBUG */
16079 
16080 unlock:
16081 		dtrace_sprunlock(p);
16082 	} else {
16083 		rval = EACCES;
16084 	}
16085 
16086  	lck_rw_unlock_shared(&dtrace_dof_mode_lock);
16087 
16088 	return rval;
16089 }
16090 
16091 /*
16092  * Returns:
16093  *
16094  * EINVAL: lazy dof is enabled, but the requested generation was not found.
16095  * EACCES: This removal needs to be handled non-lazily.
16096  */
16097 static int
dtrace_lazy_dofs_remove(proc_t * p,int generation)16098 dtrace_lazy_dofs_remove(proc_t *p, int generation)
16099 {
16100 	int rval = EINVAL;
16101 
16102 	lck_rw_lock_shared(&dtrace_dof_mode_lock);
16103 
16104 	ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
16105 	ASSERT(dtrace_dof_mode != DTRACE_DOF_MODE_NEVER);
16106 
16107 	/*
16108 	 * Any existing helpers force non-lazy behavior.
16109 	 */
16110 	if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON && (p->p_dtrace_helpers == NULL)) {
16111 		dtrace_sprlock(p);
16112 
16113 		dof_ioctl_data_t* existing_dofs = p->p_dtrace_lazy_dofs;
16114 
16115 		if (existing_dofs) {
16116 			int index, existing_dofs_count = existing_dofs->dofiod_count;
16117 			for (index=0; index<existing_dofs_count; index++) {
16118 				if ((int)existing_dofs->dofiod_helpers[index].dofhp_dof == generation) {
16119 					dof_ioctl_data_t* removed_dofs = NULL;
16120 
16121 					/*
16122 					 * If there is only 1 dof, we'll delete it and swap in NULL.
16123 					 */
16124 					if (existing_dofs_count > 1) {
16125 						int removed_dofs_count = existing_dofs_count - 1;
16126 						size_t removed_dofs_size = DOF_IOCTL_DATA_T_SIZE(removed_dofs_count);
16127 
16128 						removed_dofs = kmem_alloc(removed_dofs_size, KM_SLEEP);
16129 						removed_dofs->dofiod_count = removed_dofs_count;
16130 
16131 						/*
16132 						 * copy the remaining data.
16133 						 */
16134 						if (index > 0) {
16135 							bcopy(&existing_dofs->dofiod_helpers[0],
16136 							      &removed_dofs->dofiod_helpers[0],
16137 							      index * sizeof(dof_helper_t));
16138 						}
16139 
16140 						if (index < existing_dofs_count-1) {
16141 							bcopy(&existing_dofs->dofiod_helpers[index+1],
16142 							      &removed_dofs->dofiod_helpers[index],
16143 							      (existing_dofs_count - index - 1) * sizeof(dof_helper_t));
16144 						}
16145 					}
16146 
16147 					kmem_free(existing_dofs, DOF_IOCTL_DATA_T_SIZE(existing_dofs_count));
16148 
16149 					p->p_dtrace_lazy_dofs = removed_dofs;
16150 
16151 					rval = KERN_SUCCESS;
16152 
16153 					break;
16154 				}
16155 			}
16156 
16157 #if DEBUG
16158 			dof_ioctl_data_t* all_dofs = p->p_dtrace_lazy_dofs;
16159 			if (all_dofs) {
16160 				unsigned int i;
16161 				for (i=0; i<all_dofs->dofiod_count-1; i++) {
16162 					ASSERT(all_dofs->dofiod_helpers[i].dofhp_dof < all_dofs->dofiod_helpers[i+1].dofhp_dof);
16163 				}
16164 			}
16165 #endif
16166 
16167 		}
16168 		dtrace_sprunlock(p);
16169 	} else {
16170 		rval = EACCES;
16171 	}
16172 
16173 	lck_rw_unlock_shared(&dtrace_dof_mode_lock);
16174 
16175 	return rval;
16176 }
16177 
16178 void
dtrace_lazy_dofs_destroy(proc_t * p)16179 dtrace_lazy_dofs_destroy(proc_t *p)
16180 {
16181 	lck_rw_lock_shared(&dtrace_dof_mode_lock);
16182 	dtrace_sprlock(p);
16183 
16184 	ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
16185 
16186 	dof_ioctl_data_t* lazy_dofs = p->p_dtrace_lazy_dofs;
16187 	p->p_dtrace_lazy_dofs = NULL;
16188 
16189 	dtrace_sprunlock(p);
16190 	lck_rw_unlock_shared(&dtrace_dof_mode_lock);
16191 
16192 	if (lazy_dofs) {
16193 		kmem_free(lazy_dofs, DOF_IOCTL_DATA_T_SIZE(lazy_dofs->dofiod_count));
16194 	}
16195 }
16196 
16197 static int
dtrace_lazy_dofs_proc_iterate_filter(proc_t * p,void * ignored)16198 dtrace_lazy_dofs_proc_iterate_filter(proc_t *p, void* ignored)
16199 {
16200 #pragma unused(ignored)
16201 	/*
16202 	 * Okay to NULL test without taking the sprlock.
16203 	 */
16204 	return p->p_dtrace_lazy_dofs != NULL;
16205 }
16206 
16207 static void
dtrace_lazy_dofs_process(proc_t * p)16208 dtrace_lazy_dofs_process(proc_t *p) {
16209 	/*
16210 	 * It is possible this process may exit during our attempt to
16211 	 * fault in the dof. We could fix this by holding locks longer,
16212 	 * but the errors are benign.
16213 	 */
16214 	dtrace_sprlock(p);
16215 
16216 
16217 	ASSERT(p->p_dtrace_lazy_dofs == NULL || p->p_dtrace_helpers == NULL);
16218 	ASSERT(dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_OFF);
16219 
16220 	dof_ioctl_data_t* lazy_dofs = p->p_dtrace_lazy_dofs;
16221 	p->p_dtrace_lazy_dofs = NULL;
16222 
16223 	dtrace_sprunlock(p);
16224 	lck_mtx_lock(&dtrace_meta_lock);
16225 	/*
16226 	 * Process each dof_helper_t
16227 	 */
16228 	if (lazy_dofs != NULL) {
16229 		unsigned int i;
16230 		int rval;
16231 
16232 		for (i=0; i<lazy_dofs->dofiod_count; i++) {
16233 			/*
16234 			 * When loading lazy dof, we depend on the generations being sorted in ascending order.
16235 			 */
16236 			ASSERT(i >= (lazy_dofs->dofiod_count - 1) || lazy_dofs->dofiod_helpers[i].dofhp_dof < lazy_dofs->dofiod_helpers[i+1].dofhp_dof);
16237 
16238 			dof_helper_t *dhp = &lazy_dofs->dofiod_helpers[i];
16239 
16240 			/*
16241 			 * We stored the generation in dofhp_dof. Save it, and restore the original value.
16242 			 */
16243 			int generation = dhp->dofhp_dof;
16244 			dhp->dofhp_dof = dhp->dofhp_addr;
16245 
16246 			dof_hdr_t *dof = dtrace_dof_copyin_from_proc(p, dhp->dofhp_dof, &rval);
16247 
16248 			if (dof != NULL) {
16249 				dtrace_helpers_t *help;
16250 
16251 				lck_mtx_lock(&dtrace_lock);
16252 
16253 				/*
16254 				 * This must be done with the dtrace_lock held
16255 				 */
16256 				if ((help = p->p_dtrace_helpers) == NULL)
16257 					help = dtrace_helpers_create(p);
16258 
16259 				/*
16260 				 * If the generation value has been bumped, someone snuck in
16261 				 * when we released the dtrace lock. We have to dump this generation,
16262 				 * there is no safe way to load it.
16263 				 */
16264 				if (help->dthps_generation <= generation) {
16265 					help->dthps_generation = generation;
16266 
16267 					/*
16268 					 * dtrace_helper_slurp() takes responsibility for the dof --
16269 					 * it may free it now or it may save it and free it later.
16270 					 */
16271 					if ((rval = dtrace_helper_slurp(p, dof, dhp)) != generation) {
16272 						dtrace_dof_error(NULL, "returned value did not match expected generation");
16273 					}
16274 				}
16275 
16276 				lck_mtx_unlock(&dtrace_lock);
16277 			}
16278 		}
16279 		lck_mtx_unlock(&dtrace_meta_lock);
16280 		kmem_free(lazy_dofs, DOF_IOCTL_DATA_T_SIZE(lazy_dofs->dofiod_count));
16281 	} else {
16282 		lck_mtx_unlock(&dtrace_meta_lock);
16283 	}
16284 }
16285 
16286 static int
dtrace_lazy_dofs_proc_iterate_doit(proc_t * p,void * ignored)16287 dtrace_lazy_dofs_proc_iterate_doit(proc_t *p, void* ignored)
16288 {
16289 #pragma unused(ignored)
16290 
16291 	dtrace_lazy_dofs_process(p);
16292 
16293 	return PROC_RETURNED;
16294 }
16295 
16296 #define DTRACE_LAZY_DOFS_DUPLICATED 1
16297 
16298 static int
dtrace_lazy_dofs_duplicate(proc_t * parent,proc_t * child)16299 dtrace_lazy_dofs_duplicate(proc_t *parent, proc_t *child)
16300 {
16301 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_NOTOWNED);
16302 	LCK_MTX_ASSERT(&parent->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
16303 	LCK_MTX_ASSERT(&child->p_dtrace_sprlock, LCK_MTX_ASSERT_NOTOWNED);
16304 
16305 	lck_rw_lock_shared(&dtrace_dof_mode_lock);
16306 	dtrace_sprlock(parent);
16307 
16308 	/*
16309 	 * We need to make sure that the transition to lazy dofs -> helpers
16310 	 * was atomic for our parent
16311 	 */
16312 	ASSERT(parent->p_dtrace_lazy_dofs == NULL || parent->p_dtrace_helpers == NULL);
16313 	/*
16314 	 * In theory we should hold the child sprlock, but this is safe...
16315 	 */
16316 	ASSERT(child->p_dtrace_lazy_dofs == NULL && child->p_dtrace_helpers == NULL);
16317 
16318 	dof_ioctl_data_t* parent_dofs = parent->p_dtrace_lazy_dofs;
16319 	dof_ioctl_data_t* child_dofs = NULL;
16320 	if (parent_dofs) {
16321 		size_t parent_dofs_size = DOF_IOCTL_DATA_T_SIZE(parent_dofs->dofiod_count);
16322 		child_dofs = kmem_alloc(parent_dofs_size, KM_SLEEP);
16323 		bcopy(parent_dofs, child_dofs, parent_dofs_size);
16324 	}
16325 
16326 	dtrace_sprunlock(parent);
16327 
16328 	if (child_dofs) {
16329 		dtrace_sprlock(child);
16330 		child->p_dtrace_lazy_dofs = child_dofs;
16331 		dtrace_sprunlock(child);
16332 		/**
16333 		 * We process the DOF at this point if the mode is set to
16334 		 * LAZY_OFF. This can happen if DTrace is still processing the
16335 		 * DOF of other process (which can happen because the
16336 		 * protected pager can have a huge latency)
16337 		 * but has not processed our parent yet
16338 		 */
16339 		if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_OFF) {
16340 			dtrace_lazy_dofs_process(child);
16341 		}
16342 		lck_rw_unlock_shared(&dtrace_dof_mode_lock);
16343 
16344 		return DTRACE_LAZY_DOFS_DUPLICATED;
16345 	}
16346 	lck_rw_unlock_shared(&dtrace_dof_mode_lock);
16347 
16348 	return 0;
16349 }
16350 
16351 static dtrace_helpers_t *
dtrace_helpers_create(proc_t * p)16352 dtrace_helpers_create(proc_t *p)
16353 {
16354 	dtrace_helpers_t *help;
16355 
16356 	LCK_MTX_ASSERT(&dtrace_lock, LCK_MTX_ASSERT_OWNED);
16357 	ASSERT(p->p_dtrace_helpers == NULL);
16358 
16359 	help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
16360 	help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
16361 	    DTRACE_NHELPER_ACTIONS, KM_SLEEP);
16362 
16363 	p->p_dtrace_helpers = help;
16364 	dtrace_helpers++;
16365 
16366 	return (help);
16367 }
16368 
16369 static void
dtrace_helpers_destroy(proc_t * p)16370 dtrace_helpers_destroy(proc_t* p)
16371 {
16372 	dtrace_helpers_t *help;
16373 	dtrace_vstate_t *vstate;
16374 	uint_t i;
16375 
16376 	lck_mtx_lock(&dtrace_meta_lock);
16377 	lck_mtx_lock(&dtrace_lock);
16378 
16379 	ASSERT(p->p_dtrace_helpers != NULL);
16380 	ASSERT(dtrace_helpers > 0);
16381 
16382 	help = p->p_dtrace_helpers;
16383 	vstate = &help->dthps_vstate;
16384 
16385 	/*
16386 	 * We're now going to lose the help from this process.
16387 	 */
16388 	p->p_dtrace_helpers = NULL;
16389 	dtrace_sync();
16390 
16391 	/*
16392 	 * Destory the helper actions.
16393 	 */
16394 	for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
16395 		dtrace_helper_action_t *h, *next;
16396 
16397 		for (h = help->dthps_actions[i]; h != NULL; h = next) {
16398 			next = h->dtha_next;
16399 			dtrace_helper_action_destroy(h, vstate);
16400 			h = next;
16401 		}
16402 	}
16403 
16404 	lck_mtx_unlock(&dtrace_lock);
16405 
16406 	/*
16407 	 * Destroy the helper providers.
16408 	 */
16409 	if (help->dthps_maxprovs > 0) {
16410 		if (dtrace_meta_pid != NULL) {
16411 			ASSERT(dtrace_deferred_pid == NULL);
16412 
16413 			for (i = 0; i < help->dthps_nprovs; i++) {
16414 				dtrace_helper_provider_remove(
16415 				    &help->dthps_provs[i]->dthp_prov, p);
16416 			}
16417 		} else {
16418 			lck_mtx_lock(&dtrace_lock);
16419 			ASSERT(help->dthps_deferred == 0 ||
16420 			    help->dthps_next != NULL ||
16421 			    help->dthps_prev != NULL ||
16422 			    help == dtrace_deferred_pid);
16423 
16424 			/*
16425 			 * Remove the helper from the deferred list.
16426 			 */
16427 			if (help->dthps_next != NULL)
16428 				help->dthps_next->dthps_prev = help->dthps_prev;
16429 			if (help->dthps_prev != NULL)
16430 				help->dthps_prev->dthps_next = help->dthps_next;
16431 			if (dtrace_deferred_pid == help) {
16432 				dtrace_deferred_pid = help->dthps_next;
16433 				ASSERT(help->dthps_prev == NULL);
16434 			}
16435 
16436 			lck_mtx_unlock(&dtrace_lock);
16437 		}
16438 
16439 
16440 		for (i = 0; i < help->dthps_nprovs; i++) {
16441 			dtrace_helper_provider_destroy(help->dthps_provs[i]);
16442 		}
16443 
16444 		kmem_free(help->dthps_provs, help->dthps_maxprovs *
16445 		    sizeof (dtrace_helper_provider_t *));
16446 	}
16447 
16448 	lck_mtx_lock(&dtrace_lock);
16449 
16450 	dtrace_vstate_fini(&help->dthps_vstate);
16451 	kmem_free(help->dthps_actions,
16452 	    sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
16453 	kmem_free(help, sizeof (dtrace_helpers_t));
16454 
16455 	--dtrace_helpers;
16456 	lck_mtx_unlock(&dtrace_lock);
16457 	lck_mtx_unlock(&dtrace_meta_lock);
16458 }
16459 
16460 static void
dtrace_helpers_duplicate(proc_t * from,proc_t * to)16461 dtrace_helpers_duplicate(proc_t *from, proc_t *to)
16462 {
16463 	dtrace_helpers_t *help, *newhelp;
16464 	dtrace_helper_action_t *helper, *new, *last;
16465 	dtrace_difo_t *dp;
16466 	dtrace_vstate_t *vstate;
16467 	uint_t i;
16468 	int j, sz, hasprovs = 0;
16469 
16470 	lck_mtx_lock(&dtrace_meta_lock);
16471 	lck_mtx_lock(&dtrace_lock);
16472 	ASSERT(from->p_dtrace_helpers != NULL);
16473 	ASSERT(dtrace_helpers > 0);
16474 
16475 	help = from->p_dtrace_helpers;
16476 	newhelp = dtrace_helpers_create(to);
16477 	ASSERT(to->p_dtrace_helpers != NULL);
16478 
16479 	newhelp->dthps_generation = help->dthps_generation;
16480 	vstate = &newhelp->dthps_vstate;
16481 
16482 	/*
16483 	 * Duplicate the helper actions.
16484 	 */
16485 	for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
16486 		if ((helper = help->dthps_actions[i]) == NULL)
16487 			continue;
16488 
16489 		for (last = NULL; helper != NULL; helper = helper->dtha_next) {
16490 			new = kmem_zalloc(sizeof (dtrace_helper_action_t),
16491 			    KM_SLEEP);
16492 			new->dtha_generation = helper->dtha_generation;
16493 
16494 			if ((dp = helper->dtha_predicate) != NULL) {
16495 				dp = dtrace_difo_duplicate(dp, vstate);
16496 				new->dtha_predicate = dp;
16497 			}
16498 
16499 			new->dtha_nactions = helper->dtha_nactions;
16500 			sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
16501 			new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
16502 
16503 			for (j = 0; j < new->dtha_nactions; j++) {
16504 				dtrace_difo_t *dpj = helper->dtha_actions[j];
16505 
16506 				ASSERT(dpj != NULL);
16507 				dpj = dtrace_difo_duplicate(dpj, vstate);
16508 				new->dtha_actions[j] = dpj;
16509 			}
16510 
16511 			if (last != NULL) {
16512 				last->dtha_next = new;
16513 			} else {
16514 				newhelp->dthps_actions[i] = new;
16515 			}
16516 
16517 			last = new;
16518 		}
16519 	}
16520 
16521 	/*
16522 	 * Duplicate the helper providers and register them with the
16523 	 * DTrace framework.
16524 	 */
16525 	if (help->dthps_nprovs > 0) {
16526 		newhelp->dthps_nprovs = help->dthps_nprovs;
16527 		newhelp->dthps_maxprovs = help->dthps_nprovs;
16528 		newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
16529 		    sizeof (dtrace_helper_provider_t *), KM_SLEEP);
16530 		for (i = 0; i < newhelp->dthps_nprovs; i++) {
16531 			newhelp->dthps_provs[i] = help->dthps_provs[i];
16532 			newhelp->dthps_provs[i]->dthp_ref++;
16533 		}
16534 
16535 		hasprovs = 1;
16536 	}
16537 
16538 	lck_mtx_unlock(&dtrace_lock);
16539 
16540 	if (hasprovs)
16541 		dtrace_helper_provider_register(to, newhelp, NULL);
16542 
16543 	lck_mtx_unlock(&dtrace_meta_lock);
16544 }
16545 
16546 /**
16547  * DTrace Process functions
16548  */
16549 
16550 void
dtrace_proc_fork(proc_t * parent_proc,proc_t * child_proc,int spawn)16551 dtrace_proc_fork(proc_t *parent_proc, proc_t *child_proc, int spawn)
16552 {
16553 	/*
16554 	 * This code applies to new processes who are copying the task
16555 	 * and thread state and address spaces of their parent process.
16556 	 */
16557 	if (!spawn) {
16558 		/*
16559 		 * APPLE NOTE: Solaris does a sprlock() and drops the
16560 		 * proc_lock here. We're cheating a bit and only taking
16561 		 * the p_dtrace_sprlock lock. A full sprlock would
16562 		 * task_suspend the parent.
16563 		 */
16564 		dtrace_sprlock(parent_proc);
16565 
16566 		/*
16567 		 * Remove all DTrace tracepoints from the child process. We
16568 		 * need to do this _before_ duplicating USDT providers since
16569 		 * any associated probes may be immediately enabled.
16570 		 */
16571 		if (parent_proc->p_dtrace_count > 0) {
16572 			dtrace_fasttrap_fork(parent_proc, child_proc);
16573 		}
16574 
16575 		dtrace_sprunlock(parent_proc);
16576 
16577 		/*
16578 		 * Duplicate any lazy dof(s). This must be done while NOT
16579 		 * holding the parent sprlock! Lock ordering is
16580 		 * dtrace_dof_mode_lock, then sprlock.  It is imperative we
16581 		 * always call dtrace_lazy_dofs_duplicate, rather than null
16582 		 * check and call if !NULL. If we NULL test, during lazy dof
16583 		 * faulting we can race with the faulting code and proceed
16584 		 * from here to beyond the helpers copy. The lazy dof
16585 		 * faulting will then fail to copy the helpers to the child
16586 		 * process. We return if we duplicated lazy dofs as a process
16587 		 * can only have one at the same time to avoid a race between
16588 		 * a dtrace client and dtrace_proc_fork where a process would
16589 		 * end up with both lazy dofs and helpers.
16590 		 */
16591 		if (dtrace_lazy_dofs_duplicate(parent_proc, child_proc) == DTRACE_LAZY_DOFS_DUPLICATED) {
16592 			return;
16593 		}
16594 
16595 		/*
16596 		 * Duplicate any helper actions and providers if they haven't
16597 		 * already.
16598 		 */
16599 #if !defined(__APPLE__)
16600 		 /*
16601 		 * The SFORKING
16602 		 * we set above informs the code to enable USDT probes that
16603 		 * sprlock() may fail because the child is being forked.
16604 		 */
16605 #endif
16606 		/*
16607 		 * APPLE NOTE: As best I can tell, Apple's sprlock() equivalent
16608 		 * never fails to find the child. We do not set SFORKING.
16609 		 */
16610 		if (parent_proc->p_dtrace_helpers != NULL && dtrace_helpers_fork) {
16611 			(*dtrace_helpers_fork)(parent_proc, child_proc);
16612 		}
16613 	}
16614 }
16615 
16616 void
dtrace_proc_exec(proc_t * p)16617 dtrace_proc_exec(proc_t *p)
16618 {
16619 	/*
16620 	 * Invalidate any predicate evaluation already cached for this thread by DTrace.
16621 	 * That's because we've just stored to p_comm and DTrace refers to that when it
16622 	 * evaluates the "execname" special variable. uid and gid may have changed as well.
16623 	 */
16624 	dtrace_set_thread_predcache(current_thread(), 0);
16625 
16626 	/*
16627 	 * Free any outstanding lazy dof entries. It is imperative we
16628 	 * always call dtrace_lazy_dofs_destroy, rather than null check
16629 	 * and call if !NULL. If we NULL test, during lazy dof faulting
16630 	 * we can race with the faulting code and proceed from here to
16631 	 * beyond the helpers cleanup. The lazy dof faulting will then
16632 	 * install new helpers which no longer belong to this process!
16633 	 */
16634 	dtrace_lazy_dofs_destroy(p);
16635 
16636 
16637 	/*
16638 	 * Clean up any DTrace helpers for the process.
16639 	 */
16640 	if (p->p_dtrace_helpers != NULL && dtrace_helpers_cleanup) {
16641 		(*dtrace_helpers_cleanup)(p);
16642 	}
16643 
16644 	/*
16645 	 * Cleanup the DTrace provider associated with this process.
16646 	 */
16647 	proc_lock(p);
16648 	if (p->p_dtrace_probes && dtrace_fasttrap_exec_ptr) {
16649 		(*dtrace_fasttrap_exec_ptr)(p);
16650 	}
16651 	proc_unlock(p);
16652 }
16653 
16654 void
dtrace_proc_exit(proc_t * p)16655 dtrace_proc_exit(proc_t *p)
16656 {
16657 	/*
16658 	 * Free any outstanding lazy dof entries. It is imperative we
16659 	 * always call dtrace_lazy_dofs_destroy, rather than null check
16660 	 * and call if !NULL. If we NULL test, during lazy dof faulting
16661 	 * we can race with the faulting code and proceed from here to
16662 	 * beyond the helpers cleanup. The lazy dof faulting will then
16663 	 * install new helpers which will never be cleaned up, and leak.
16664 	 */
16665 	dtrace_lazy_dofs_destroy(p);
16666 
16667 	/*
16668 	 * Clean up any DTrace helper actions or probes for the process.
16669 	 */
16670 	if (p->p_dtrace_helpers != NULL) {
16671 		(*dtrace_helpers_cleanup)(p);
16672 	}
16673 
16674 	/*
16675 	 * Clean up any DTrace probes associated with this process.
16676 	 */
16677 	/*
16678 	 * APPLE NOTE: We release ptss pages/entries in dtrace_fasttrap_exit_ptr(),
16679 	 * call this after dtrace_helpers_cleanup()
16680 	 */
16681 	proc_lock(p);
16682 	if (p->p_dtrace_probes && dtrace_fasttrap_exit_ptr) {
16683 		(*dtrace_fasttrap_exit_ptr)(p);
16684 	}
16685 	proc_unlock(p);
16686 }
16687 
16688 /*
16689  * DTrace Hook Functions
16690  */
16691 
16692 /*
16693  * APPLE NOTE:  dtrace_modctl_* routines for kext support.
16694  * Used to manipulate the modctl list within dtrace xnu.
16695  */
16696 
16697 modctl_t *dtrace_modctl_list;
16698 
16699 static void
dtrace_modctl_add(struct modctl * newctl)16700 dtrace_modctl_add(struct modctl * newctl)
16701 {
16702 	struct modctl *nextp, *prevp;
16703 
16704 	ASSERT(newctl != NULL);
16705 	LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
16706 
16707 	// Insert new module at the front of the list,
16708 
16709 	newctl->mod_next = dtrace_modctl_list;
16710 	dtrace_modctl_list = newctl;
16711 
16712 	/*
16713 	 * If a module exists with the same name, then that module
16714 	 * must have been unloaded with enabled probes. We will move
16715 	 * the unloaded module to the new module's stale chain and
16716 	 * then stop traversing the list.
16717 	 */
16718 
16719 	prevp = newctl;
16720 	nextp = newctl->mod_next;
16721 
16722 	while (nextp != NULL) {
16723 		if (nextp->mod_loaded) {
16724 			/* This is a loaded module. Keep traversing. */
16725 			prevp = nextp;
16726 			nextp = nextp->mod_next;
16727 			continue;
16728 		}
16729 		else {
16730 			/* Found an unloaded module */
16731 			if (strncmp (newctl->mod_modname, nextp->mod_modname, KMOD_MAX_NAME)) {
16732 				/* Names don't match. Keep traversing. */
16733 				prevp = nextp;
16734 				nextp = nextp->mod_next;
16735 				continue;
16736 			}
16737 			else {
16738 				/* We found a stale entry, move it. We're done. */
16739 				prevp->mod_next = nextp->mod_next;
16740 				newctl->mod_stale = nextp;
16741 				nextp->mod_next = NULL;
16742 				break;
16743 			}
16744 		}
16745 	}
16746 }
16747 
16748 static modctl_t *
dtrace_modctl_lookup(struct kmod_info * kmod)16749 dtrace_modctl_lookup(struct kmod_info * kmod)
16750 {
16751     LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
16752 
16753     struct modctl * ctl;
16754 
16755     for (ctl = dtrace_modctl_list; ctl; ctl=ctl->mod_next) {
16756 	if (ctl->mod_id == kmod->id)
16757 	    return(ctl);
16758     }
16759     return (NULL);
16760 }
16761 
16762 /*
16763  * This routine is called from dtrace_module_unloaded().
16764  * It removes a modctl structure and its stale chain
16765  * from the kext shadow list.
16766  */
16767 static void
dtrace_modctl_remove(struct modctl * ctl)16768 dtrace_modctl_remove(struct modctl * ctl)
16769 {
16770 	ASSERT(ctl != NULL);
16771 	LCK_MTX_ASSERT(&mod_lock, LCK_MTX_ASSERT_OWNED);
16772 	modctl_t *prevp, *nextp, *curp;
16773 
16774 	// Remove stale chain first
16775 	for (curp=ctl->mod_stale; curp != NULL; curp=nextp) {
16776 		nextp = curp->mod_stale;
16777 		/* There should NEVER be user symbols allocated at this point */
16778 		ASSERT(curp->mod_user_symbols == NULL);
16779 		kmem_free(curp, sizeof(modctl_t));
16780 	}
16781 
16782 	prevp = NULL;
16783 	curp = dtrace_modctl_list;
16784 
16785 	while (curp != ctl) {
16786 		prevp = curp;
16787 		curp = curp->mod_next;
16788 	}
16789 
16790 	if (prevp != NULL) {
16791 		prevp->mod_next = ctl->mod_next;
16792 	}
16793 	else {
16794 		dtrace_modctl_list = ctl->mod_next;
16795 	}
16796 
16797 	/* There should NEVER be user symbols allocated at this point */
16798 	ASSERT(ctl->mod_user_symbols == NULL);
16799 
16800 	kmem_free (ctl, sizeof(modctl_t));
16801 }
16802 
16803 /*
16804  * APPLE NOTE: The kext loader will call dtrace_module_loaded
16805  * when the kext is loaded in memory, but before calling the
16806  * kext's start routine.
16807  *
16808  * Return 0 on success
16809  * Return -1 on failure
16810  */
16811 
16812 static int
dtrace_module_loaded(struct kmod_info * kmod,uint32_t flag)16813 dtrace_module_loaded(struct kmod_info *kmod, uint32_t flag)
16814 {
16815 	dtrace_provider_t *prv;
16816 
16817 	/*
16818 	 * If kernel symbols have been disabled, return immediately
16819 	 * DTRACE_KERNEL_SYMBOLS_NEVER is a permanent mode, it is safe to test without holding locks
16820 	 */
16821 	if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_NEVER)
16822 		return 0;
16823 
16824 	struct modctl *ctl = NULL;
16825 	if (!kmod || kmod->address == 0 || kmod->size == 0)
16826 		return(-1);
16827 
16828 	lck_mtx_lock(&dtrace_provider_lock);
16829 	lck_mtx_lock(&mod_lock);
16830 
16831 	/*
16832 	 * Have we seen this kext before?
16833 	 */
16834 
16835 	ctl = dtrace_modctl_lookup(kmod);
16836 
16837 	if (ctl != NULL) {
16838 		/* bail... we already have this kext in the modctl list */
16839 		lck_mtx_unlock(&mod_lock);
16840 		lck_mtx_unlock(&dtrace_provider_lock);
16841 		if (dtrace_err_verbose)
16842 			cmn_err(CE_WARN, "dtrace load module already exists '%s %u' is failing against '%s %u'", kmod->name, (uint_t)kmod->id, ctl->mod_modname, ctl->mod_id);
16843 		return(-1);
16844 	}
16845 	else {
16846 		ctl = kmem_alloc(sizeof(struct modctl), KM_SLEEP);
16847 		if (ctl == NULL) {
16848 			if (dtrace_err_verbose)
16849 				cmn_err(CE_WARN, "dtrace module load '%s %u' is failing ", kmod->name, (uint_t)kmod->id);
16850 			lck_mtx_unlock(&mod_lock);
16851 			lck_mtx_unlock(&dtrace_provider_lock);
16852 			return (-1);
16853 		}
16854 		ctl->mod_next = NULL;
16855 		ctl->mod_stale = NULL;
16856 		strlcpy (ctl->mod_modname, kmod->name, sizeof(ctl->mod_modname));
16857 		ctl->mod_loadcnt = kmod->id;
16858 		ctl->mod_nenabled = 0;
16859 		ctl->mod_address  = kmod->address;
16860 		ctl->mod_size = kmod->size;
16861 		ctl->mod_id = kmod->id;
16862 		ctl->mod_loaded = 1;
16863 		ctl->mod_flags = 0;
16864 		ctl->mod_user_symbols = NULL;
16865 		ctl->mod_sdtprobecnt = 0;
16866 		ctl->mod_sdtdesc = NULL;
16867 
16868 		/*
16869 		 * Find the UUID for this module, if it has one
16870 		 */
16871 		kernel_mach_header_t* header = (kernel_mach_header_t *)ctl->mod_address;
16872 		struct load_command* load_cmd = (struct load_command *)&header[1];
16873 		uint32_t i;
16874 		for (i = 0; i < header->ncmds; i++) {
16875 			if (load_cmd->cmd == LC_UUID) {
16876 				struct uuid_command* uuid_cmd = (struct uuid_command *)load_cmd;
16877 				memcpy(ctl->mod_uuid, uuid_cmd->uuid, sizeof(uuid_cmd->uuid));
16878 				ctl->mod_flags |= MODCTL_HAS_UUID;
16879 				break;
16880 			}
16881 			load_cmd = (struct load_command *)((caddr_t)load_cmd + load_cmd->cmdsize);
16882 		}
16883 
16884 		if (ctl->mod_address == g_kernel_kmod_info.address) {
16885 			ctl->mod_flags |= MODCTL_IS_MACH_KERNEL;
16886 			memcpy(dtrace_kerneluuid, ctl->mod_uuid, sizeof(dtrace_kerneluuid));
16887 		}
16888 		/*
16889 		 * Static kexts have a UUID that is not used for symbolication, as all their
16890 		 * symbols are in kernel
16891 		 */
16892 		else if ((flag & KMOD_DTRACE_STATIC_KEXT) == KMOD_DTRACE_STATIC_KEXT) {
16893 			memcpy(ctl->mod_uuid, dtrace_kerneluuid, sizeof(dtrace_kerneluuid));
16894 			ctl->mod_flags |= MODCTL_IS_STATIC_KEXT;
16895 		}
16896 	}
16897 	dtrace_modctl_add(ctl);
16898 
16899 	/*
16900 	 * We must hold the dtrace_lock to safely test non permanent dtrace_fbt_symbol_mode(s)
16901 	 */
16902 	lck_mtx_lock(&dtrace_lock);
16903 
16904 	/*
16905 	 * DTrace must decide if it will instrument modules lazily via
16906 	 * userspace symbols (default mode), or instrument immediately via
16907 	 * kernel symbols (non-default mode)
16908 	 *
16909 	 * When in default/lazy mode, DTrace will only support modules
16910 	 * built with a valid UUID.
16911 	 *
16912 	 * Overriding the default can be done explicitly in one of
16913 	 * the following two ways.
16914 	 *
16915 	 * A module can force symbols from kernel space using the plist key,
16916 	 * OSBundleForceDTraceInit (see kmod.h).  If this per kext state is set,
16917 	 * we fall through and instrument this module now.
16918 	 *
16919 	 * Or, the boot-arg, dtrace_kernel_symbol_mode, can be set to force symbols
16920 	 * from kernel space (see dtrace_impl.h).  If this system state is set
16921 	 * to a non-userspace mode, we fall through and instrument the module now.
16922 	 */
16923 
16924 	if ((dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE) &&
16925 	    (!(flag & KMOD_DTRACE_FORCE_INIT)))
16926 	{
16927 		/* Load SDT section for module. Symbol related data will be handled lazily. */
16928 		sdt_load_machsect(ctl);
16929 
16930 		/* We will instrument the module lazily -- this is the default */
16931 		lck_mtx_unlock(&dtrace_lock);
16932 		lck_mtx_unlock(&mod_lock);
16933 		lck_mtx_unlock(&dtrace_provider_lock);
16934 		return 0;
16935 	}
16936 
16937 	/* We will instrument the module immediately using kernel symbols */
16938 	if (!(flag & KMOD_DTRACE_NO_KERNEL_SYMS)) {
16939 		ctl->mod_flags |= MODCTL_HAS_KERNEL_SYMBOLS;
16940 	}
16941 
16942 	/* Load SDT section for module. Symbol related data will be handled lazily. */
16943 	sdt_load_machsect(ctl);
16944 
16945 	lck_mtx_unlock(&dtrace_lock);
16946 
16947 	/*
16948 	 * We're going to call each providers per-module provide operation
16949 	 * specifying only this module.
16950 	 */
16951 	for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
16952 		prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
16953 
16954 	/*
16955 	 * APPLE NOTE: The contract with the kext loader is that once this function
16956 	 * has completed, it may delete kernel symbols at will.
16957 	 * We must set this while still holding the mod_lock.
16958 	 */
16959 	ctl->mod_flags &= ~MODCTL_HAS_KERNEL_SYMBOLS;
16960 
16961 	lck_mtx_unlock(&mod_lock);
16962 	lck_mtx_unlock(&dtrace_provider_lock);
16963 
16964 	/*
16965 	 * If we have any retained enablings, we need to match against them.
16966 	 * Enabling probes requires that cpu_lock be held, and we cannot hold
16967 	 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
16968 	 * module.  (In particular, this happens when loading scheduling
16969 	 * classes.)  So if we have any retained enablings, we need to dispatch
16970 	 * our task queue to do the match for us.
16971 	 */
16972 	lck_mtx_lock(&dtrace_lock);
16973 
16974 	if (dtrace_retained == NULL) {
16975 		lck_mtx_unlock(&dtrace_lock);
16976 		return 0;
16977 	}
16978 
16979 	/* APPLE NOTE!
16980 	 *
16981 	 * The cpu_lock mentioned above is only held by dtrace code, Apple's xnu never actually
16982 	 * holds it for any reason. Thus the comment above is invalid, we can directly invoke
16983 	 * dtrace_enabling_matchall without jumping through all the hoops, and we can avoid
16984 	 * the delay call as well.
16985 	 */
16986 	lck_mtx_unlock(&dtrace_lock);
16987 
16988 	dtrace_enabling_matchall();
16989 
16990 	return 0;
16991 }
16992 
16993 /*
16994  * Return 0 on success
16995  * Return -1 on failure
16996  */
16997 static int
dtrace_module_unloaded(struct kmod_info * kmod)16998 dtrace_module_unloaded(struct kmod_info *kmod)
16999 {
17000 	dtrace_probe_t template, *probe, *first, *next;
17001 	dtrace_provider_t *prov;
17002         struct modctl *ctl = NULL;
17003 	struct modctl *syncctl = NULL;
17004 	struct modctl *nextsyncctl = NULL;
17005 	int syncmode = 0;
17006 
17007         lck_mtx_lock(&dtrace_provider_lock);
17008 	lck_mtx_lock(&mod_lock);
17009 	lck_mtx_lock(&dtrace_lock);
17010 
17011 	if (kmod == NULL) {
17012 	    syncmode = 1;
17013 	}
17014 	else {
17015 	    ctl = dtrace_modctl_lookup(kmod);
17016 	    if (ctl == NULL)
17017 	    {
17018 		lck_mtx_unlock(&dtrace_lock);
17019 		lck_mtx_unlock(&mod_lock);
17020 		lck_mtx_unlock(&dtrace_provider_lock);
17021 		return (-1);
17022 	    }
17023 	    ctl->mod_loaded = 0;
17024 	    ctl->mod_address = 0;
17025 	    ctl->mod_size = 0;
17026 	}
17027 
17028 	if (dtrace_bymod == NULL) {
17029 		/*
17030 		 * The DTrace module is loaded (obviously) but not attached;
17031 		 * we don't have any work to do.
17032 		 */
17033 	         if (ctl != NULL)
17034 			 (void)dtrace_modctl_remove(ctl);
17035 		 lck_mtx_unlock(&dtrace_lock);
17036 		 lck_mtx_unlock(&mod_lock);
17037 		 lck_mtx_unlock(&dtrace_provider_lock);
17038 		 return(0);
17039 	}
17040 
17041 	/* Syncmode set means we target and traverse entire modctl list. */
17042         if (syncmode)
17043 	    nextsyncctl = dtrace_modctl_list;
17044 
17045 syncloop:
17046 	if (syncmode)
17047 	{
17048 	    /* find a stale modctl struct */
17049 	    for (syncctl = nextsyncctl; syncctl != NULL; syncctl=syncctl->mod_next) {
17050 		if (syncctl->mod_address == 0)
17051 		    break;
17052 	    }
17053 	    if (syncctl==NULL)
17054 	    {
17055 		/* We have no more work to do */
17056 		lck_mtx_unlock(&dtrace_lock);
17057 		lck_mtx_unlock(&mod_lock);
17058 		lck_mtx_unlock(&dtrace_provider_lock);
17059 		return(0);
17060 	    }
17061 	    else {
17062 		/* keep track of next syncctl in case this one is removed */
17063 		nextsyncctl = syncctl->mod_next;
17064 		ctl = syncctl;
17065 	    }
17066 	}
17067 
17068 	template.dtpr_mod = ctl->mod_modname;
17069 
17070 	for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
17071 	    probe != NULL; probe = probe->dtpr_nextmod) {
17072 	        if (probe->dtpr_ecb != NULL) {
17073 			/*
17074 			 * This shouldn't _actually_ be possible -- we're
17075 			 * unloading a module that has an enabled probe in it.
17076 			 * (It's normally up to the provider to make sure that
17077 			 * this can't happen.)  However, because dtps_enable()
17078 			 * doesn't have a failure mode, there can be an
17079 			 * enable/unload race.  Upshot:  we don't want to
17080 			 * assert, but we're not going to disable the
17081 			 * probe, either.
17082 			 */
17083 
17084 
17085 		        if (syncmode) {
17086 			    /* We're syncing, let's look at next in list */
17087 			    goto syncloop;
17088 			}
17089 
17090 			lck_mtx_unlock(&dtrace_lock);
17091 			lck_mtx_unlock(&mod_lock);
17092 			lck_mtx_unlock(&dtrace_provider_lock);
17093 
17094 			if (dtrace_err_verbose) {
17095 				cmn_err(CE_WARN, "unloaded module '%s' had "
17096 				    "enabled probes", ctl->mod_modname);
17097 			}
17098 			return(-1);
17099 		}
17100 	}
17101 
17102 	probe = first;
17103 
17104 	for (first = NULL; probe != NULL; probe = next) {
17105 		ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
17106 
17107 		dtrace_probes[probe->dtpr_id - 1] = NULL;
17108 		probe->dtpr_provider->dtpv_probe_count--;
17109 
17110 		next = probe->dtpr_nextmod;
17111 		dtrace_hash_remove(dtrace_byprov, probe);
17112 		dtrace_hash_remove(dtrace_bymod, probe);
17113 		dtrace_hash_remove(dtrace_byfunc, probe);
17114 		dtrace_hash_remove(dtrace_byname, probe);
17115 
17116 		if (first == NULL) {
17117 			first = probe;
17118 			probe->dtpr_nextmod = NULL;
17119 		} else {
17120 			probe->dtpr_nextmod = first;
17121 			first = probe;
17122 		}
17123 	}
17124 
17125 	/*
17126 	 * We've removed all of the module's probes from the hash chains and
17127 	 * from the probe array.  Now issue a dtrace_sync() to be sure that
17128 	 * everyone has cleared out from any probe array processing.
17129 	 */
17130 	dtrace_sync();
17131 
17132 	for (probe = first; probe != NULL; probe = first) {
17133 		first = probe->dtpr_nextmod;
17134 		prov = probe->dtpr_provider;
17135 		prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
17136 		    probe->dtpr_arg);
17137 		dtrace_strunref(probe->dtpr_mod);
17138 		dtrace_strunref(probe->dtpr_func);
17139 		dtrace_strunref(probe->dtpr_name);
17140 		vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
17141 
17142 		zfree(dtrace_probe_t_zone, probe);
17143 	}
17144 
17145 	dtrace_modctl_remove(ctl);
17146 
17147 	if (syncmode)
17148 	    goto syncloop;
17149 
17150 	lck_mtx_unlock(&dtrace_lock);
17151 	lck_mtx_unlock(&mod_lock);
17152 	lck_mtx_unlock(&dtrace_provider_lock);
17153 
17154 	return(0);
17155 }
17156 
17157 void
dtrace_suspend(void)17158 dtrace_suspend(void)
17159 {
17160 	dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
17161 }
17162 
17163 void
dtrace_resume(void)17164 dtrace_resume(void)
17165 {
17166 	dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
17167 }
17168 
17169 static int
dtrace_cpu_setup(cpu_setup_t what,processorid_t cpu)17170 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
17171 {
17172 	LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
17173 	lck_mtx_lock(&dtrace_lock);
17174 
17175 	switch (what) {
17176 	case CPU_CONFIG: {
17177 		dtrace_state_t *state;
17178 		dtrace_optval_t *opt, rs, c;
17179 
17180 		/*
17181 		 * For now, we only allocate a new buffer for anonymous state.
17182 		 */
17183 		if ((state = dtrace_anon.dta_state) == NULL)
17184 			break;
17185 
17186 		if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
17187 			break;
17188 
17189 		opt = state->dts_options;
17190 		c = opt[DTRACEOPT_CPU];
17191 
17192 		if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
17193 			break;
17194 
17195 		/*
17196 		 * Regardless of what the actual policy is, we're going to
17197 		 * temporarily set our resize policy to be manual.  We're
17198 		 * also going to temporarily set our CPU option to denote
17199 		 * the newly configured CPU.
17200 		 */
17201 		rs = opt[DTRACEOPT_BUFRESIZE];
17202 		opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
17203 		opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
17204 
17205 		(void) dtrace_state_buffers(state);
17206 
17207 		opt[DTRACEOPT_BUFRESIZE] = rs;
17208 		opt[DTRACEOPT_CPU] = c;
17209 
17210 		break;
17211 	}
17212 
17213 	case CPU_UNCONFIG:
17214 		/*
17215 		 * We don't free the buffer in the CPU_UNCONFIG case.  (The
17216 		 * buffer will be freed when the consumer exits.)
17217 		 */
17218 		break;
17219 
17220 	default:
17221 		break;
17222 	}
17223 
17224 	lck_mtx_unlock(&dtrace_lock);
17225 	return (0);
17226 }
17227 
17228 static void
dtrace_cpu_setup_initial(processorid_t cpu)17229 dtrace_cpu_setup_initial(processorid_t cpu)
17230 {
17231 	(void) dtrace_cpu_setup(CPU_CONFIG, cpu);
17232 }
17233 
17234 static void
dtrace_toxrange_add(uintptr_t base,uintptr_t limit)17235 dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
17236 {
17237 	if (dtrace_toxranges >= dtrace_toxranges_max) {
17238 		int osize, nsize;
17239 		dtrace_toxrange_t *range;
17240 
17241 		osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
17242 
17243 		if (osize == 0) {
17244 			ASSERT(dtrace_toxrange == NULL);
17245 			ASSERT(dtrace_toxranges_max == 0);
17246 			dtrace_toxranges_max = 1;
17247 		} else {
17248 			dtrace_toxranges_max <<= 1;
17249 		}
17250 
17251 		nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
17252 		range = kmem_zalloc(nsize, KM_SLEEP);
17253 
17254 		if (dtrace_toxrange != NULL) {
17255 			ASSERT(osize != 0);
17256 			bcopy(dtrace_toxrange, range, osize);
17257 			kmem_free(dtrace_toxrange, osize);
17258 		}
17259 
17260 		dtrace_toxrange = range;
17261 	}
17262 
17263 	ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0);
17264 	ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0);
17265 
17266 	dtrace_toxrange[dtrace_toxranges].dtt_base = base;
17267 	dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
17268 	dtrace_toxranges++;
17269 }
17270 
17271 /*
17272  * DTrace Driver Cookbook Functions
17273  */
17274 /*ARGSUSED*/
17275 static int
dtrace_attach(dev_info_t * devi)17276 dtrace_attach(dev_info_t *devi)
17277 {
17278 	dtrace_provider_id_t id;
17279 	dtrace_state_t *state = NULL;
17280 	dtrace_enabling_t *enab;
17281 
17282 	lck_mtx_lock(&cpu_lock);
17283 	lck_mtx_lock(&dtrace_provider_lock);
17284 	lck_mtx_lock(&dtrace_lock);
17285 
17286 	/* Darwin uses BSD cloning device driver to automagically obtain minor device number. */
17287 	dtrace_devi = devi;
17288 
17289 	dtrace_modload = dtrace_module_loaded;
17290 	dtrace_modunload = dtrace_module_unloaded;
17291 	dtrace_cpu_init = dtrace_cpu_setup_initial;
17292 	dtrace_helpers_cleanup = dtrace_helpers_destroy;
17293 	dtrace_helpers_fork = dtrace_helpers_duplicate;
17294 	dtrace_cpustart_init = dtrace_suspend;
17295 	dtrace_cpustart_fini = dtrace_resume;
17296 	dtrace_debugger_init = dtrace_suspend;
17297 	dtrace_debugger_fini = dtrace_resume;
17298 
17299 	register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
17300 
17301 	LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
17302 
17303 	dtrace_arena = vmem_create("dtrace", (void *)1, INT32_MAX, 1,
17304 	    NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
17305 
17306 	LCK_MTX_ASSERT(&cpu_lock, LCK_MTX_ASSERT_OWNED);
17307 
17308 	dtrace_nprobes = dtrace_nprobes_default;
17309 	dtrace_probes = kmem_zalloc(sizeof(dtrace_probe_t*) * dtrace_nprobes,
17310 	    KM_SLEEP);
17311 
17312 	dtrace_byprov = dtrace_hash_create(dtrace_strkey_probe_provider,
17313 	    0, /* unused */
17314 	    offsetof(dtrace_probe_t, dtpr_nextprov),
17315 	    offsetof(dtrace_probe_t, dtpr_prevprov));
17316 
17317 	dtrace_bymod = dtrace_hash_create(dtrace_strkey_deref_offset,
17318 	    offsetof(dtrace_probe_t, dtpr_mod),
17319 	    offsetof(dtrace_probe_t, dtpr_nextmod),
17320 	    offsetof(dtrace_probe_t, dtpr_prevmod));
17321 
17322 	dtrace_byfunc = dtrace_hash_create(dtrace_strkey_deref_offset,
17323 	    offsetof(dtrace_probe_t, dtpr_func),
17324 	    offsetof(dtrace_probe_t, dtpr_nextfunc),
17325 	    offsetof(dtrace_probe_t, dtpr_prevfunc));
17326 
17327 	dtrace_byname = dtrace_hash_create(dtrace_strkey_deref_offset,
17328 	    offsetof(dtrace_probe_t, dtpr_name),
17329 	    offsetof(dtrace_probe_t, dtpr_nextname),
17330 	    offsetof(dtrace_probe_t, dtpr_prevname));
17331 
17332 	if (dtrace_retain_max < 1) {
17333 		cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
17334 		    "setting to 1", dtrace_retain_max);
17335 		dtrace_retain_max = 1;
17336 	}
17337 
17338 	/*
17339 	 * Now discover our toxic ranges.
17340 	 */
17341 	dtrace_toxic_ranges(dtrace_toxrange_add);
17342 
17343 	/*
17344 	 * Before we register ourselves as a provider to our own framework,
17345 	 * we would like to assert that dtrace_provider is NULL -- but that's
17346 	 * not true if we were loaded as a dependency of a DTrace provider.
17347 	 * Once we've registered, we can assert that dtrace_provider is our
17348 	 * pseudo provider.
17349 	 */
17350 	(void) dtrace_register("dtrace", &dtrace_provider_attr,
17351 	    DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
17352 
17353 	ASSERT(dtrace_provider != NULL);
17354 	ASSERT((dtrace_provider_id_t)dtrace_provider == id);
17355 
17356 #if defined (__x86_64__)
17357 	dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
17358 	    dtrace_provider, NULL, NULL, "BEGIN", 1, NULL);
17359 	dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
17360 	    dtrace_provider, NULL, NULL, "END", 0, NULL);
17361 	dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
17362 	    dtrace_provider, NULL, NULL, "ERROR", 3, NULL);
17363 #elif defined(__arm64__)
17364 	dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
17365 	    dtrace_provider, NULL, NULL, "BEGIN", 2, NULL);
17366 	dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
17367 	    dtrace_provider, NULL, NULL, "END", 1, NULL);
17368 	dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
17369 	    dtrace_provider, NULL, NULL, "ERROR", 4, NULL);
17370 #else
17371 #error Unknown Architecture
17372 #endif
17373 
17374 	dtrace_anon_property();
17375 	lck_mtx_unlock(&cpu_lock);
17376 
17377 	/*
17378 	 * If DTrace helper tracing is enabled, we need to allocate the
17379 	 * trace buffer and initialize the values.
17380 	 */
17381 	if (dtrace_helptrace_enabled) {
17382 		ASSERT(dtrace_helptrace_buffer == NULL);
17383 		dtrace_helptrace_buffer =
17384 		    kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
17385 		dtrace_helptrace_next = 0;
17386 	}
17387 
17388 	/*
17389 	 * If there are already providers, we must ask them to provide their
17390 	 * probes, and then match any anonymous enabling against them.  Note
17391 	 * that there should be no other retained enablings at this time:
17392 	 * the only retained enablings at this time should be the anonymous
17393 	 * enabling.
17394 	 */
17395 	if (dtrace_anon.dta_enabling != NULL) {
17396 		ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
17397 
17398 		/*
17399 		 * APPLE NOTE: if handling anonymous dof, switch symbol modes.
17400 		 */
17401 		if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE) {
17402 			dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_KERNEL;
17403 		}
17404 
17405 		dtrace_enabling_provide(NULL);
17406 		state = dtrace_anon.dta_state;
17407 
17408 		/*
17409 		 * We couldn't hold cpu_lock across the above call to
17410 		 * dtrace_enabling_provide(), but we must hold it to actually
17411 		 * enable the probes.  We have to drop all of our locks, pick
17412 		 * up cpu_lock, and regain our locks before matching the
17413 		 * retained anonymous enabling.
17414 		 */
17415 		lck_mtx_unlock(&dtrace_lock);
17416 		lck_mtx_unlock(&dtrace_provider_lock);
17417 
17418 		lck_mtx_lock(&cpu_lock);
17419 		lck_mtx_lock(&dtrace_provider_lock);
17420 		lck_mtx_lock(&dtrace_lock);
17421 
17422 		if ((enab = dtrace_anon.dta_enabling) != NULL)
17423 			(void) dtrace_enabling_match(enab, NULL, NULL);
17424 
17425 		lck_mtx_unlock(&cpu_lock);
17426 	}
17427 
17428 	lck_mtx_unlock(&dtrace_lock);
17429 	lck_mtx_unlock(&dtrace_provider_lock);
17430 
17431 	if (state != NULL) {
17432 		/*
17433 		 * If we created any anonymous state, set it going now.
17434 		 */
17435 		(void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
17436 	}
17437 
17438 	return (DDI_SUCCESS);
17439 }
17440 
17441 /*ARGSUSED*/
17442 static int
dtrace_open(dev_t * devp,int flag,int otyp,cred_t * cred_p)17443 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
17444 {
17445 #pragma unused(flag, otyp)
17446 	dtrace_state_t *state;
17447 	uint32_t priv;
17448 	uid_t uid;
17449 	zoneid_t zoneid;
17450 	int rv;
17451 
17452 	/* APPLE: Darwin puts Helper on its own major device. */
17453 
17454 	/*
17455 	 * If no DTRACE_PRIV_* bits are set in the credential, then the
17456 	 * caller lacks sufficient permission to do anything with DTrace.
17457 	 */
17458 	dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
17459 	if (priv == DTRACE_PRIV_NONE)
17460 		return (EACCES);
17461 
17462 	/*
17463 	 * APPLE NOTE: We delay the initialization of fasttrap as late as possible.
17464 	 * It certainly can't be later than now!
17465 	 */
17466 	fasttrap_init();
17467 
17468 	/*
17469 	 * Ask all providers to provide all their probes.
17470 	 */
17471 	lck_mtx_lock(&dtrace_provider_lock);
17472 	dtrace_probe_provide(NULL, NULL);
17473 	lck_mtx_unlock(&dtrace_provider_lock);
17474 
17475 	lck_mtx_lock(&cpu_lock);
17476 	lck_mtx_lock(&dtrace_lock);
17477 	dtrace_opens++;
17478 	dtrace_membar_producer();
17479 
17480 #ifdef illumos
17481 	/*
17482 	 * If the kernel debugger is active (that is, if the kernel debugger
17483 	 * modified text in some way), we won't allow the open.
17484 	 */
17485 	if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
17486 		dtrace_opens--;
17487 		lck_mtx_unlock(&dtrace_lock);
17488 		lck_mtx_unlock(&cpu_lock);
17489 		return (EBUSY);
17490 	}
17491 #endif
17492 
17493 	rv = dtrace_state_create(devp, cred_p, &state);
17494 	lck_mtx_unlock(&cpu_lock);
17495 
17496 	if (rv != 0 || state == NULL) {
17497 		if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) {
17498 #ifdef illumos
17499 			(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
17500 #endif
17501 		}
17502 		lck_mtx_unlock(&dtrace_lock);
17503 		/* propagate EAGAIN or ERESTART */
17504 		return (rv);
17505 	}
17506 
17507 	lck_mtx_unlock(&dtrace_lock);
17508 
17509 	lck_rw_lock_exclusive(&dtrace_dof_mode_lock);
17510 
17511 	/*
17512 	 * If we are currently lazy, transition states.
17513 	 *
17514 	 * Unlike dtrace_close, we do not need to check the
17515 	 * value of dtrace_opens, as any positive value (and
17516 	 * we count as 1) means we transition states.
17517 	 */
17518 	if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_ON) {
17519 		dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_OFF;
17520 		/*
17521 		 * We do not need to hold the exclusive lock while processing
17522 		 * DOF on processes. We do need to make sure the mode does not get
17523 		 * changed to DTRACE_DOF_MODE_LAZY_ON during that stage though
17524 		 * (which should not happen anyway since it only happens in
17525 		 * dtrace_close). There is no way imcomplete USDT probes can be
17526 		 * activate by any DTrace clients here since they all have to
17527 		 * call dtrace_open and be blocked on dtrace_dof_mode_lock
17528 		 */
17529 		lck_rw_lock_exclusive_to_shared(&dtrace_dof_mode_lock);
17530 		/*
17531 		 * Iterate all existing processes and load lazy dofs.
17532 		 */
17533 		proc_iterate(PROC_ALLPROCLIST | PROC_NOWAITTRANS,
17534 			     dtrace_lazy_dofs_proc_iterate_doit,
17535 			     NULL,
17536 			     dtrace_lazy_dofs_proc_iterate_filter,
17537 			     NULL);
17538 
17539 		lck_rw_unlock_shared(&dtrace_dof_mode_lock);
17540 	}
17541 	else {
17542 		lck_rw_unlock_exclusive(&dtrace_dof_mode_lock);
17543 	}
17544 
17545 
17546 	/*
17547 	 * Update kernel symbol state.
17548 	 *
17549 	 * We must own the provider and dtrace locks.
17550 	 *
17551 	 * NOTE! It may appear there is a race by setting this value so late
17552 	 * after dtrace_probe_provide. However, any kext loaded after the
17553 	 * call to probe provide and before we set LAZY_OFF will be marked as
17554 	 * eligible for symbols from userspace. The same dtrace that is currently
17555 	 * calling dtrace_open() (this call!) will get a list of kexts needing
17556 	 * symbols and fill them in, thus closing the race window.
17557 	 *
17558 	 * We want to set this value only after it certain it will succeed, as
17559 	 * this significantly reduces the complexity of error exits.
17560 	 */
17561 	lck_mtx_lock(&dtrace_lock);
17562 	if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE) {
17563 		dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_KERNEL;
17564 	}
17565 	lck_mtx_unlock(&dtrace_lock);
17566 
17567 	/* Suspend cluster powerdown while DTrace device is opened. */
17568 	suspend_cluster_powerdown();
17569 	return (0);
17570 }
17571 
17572 /*ARGSUSED*/
17573 static int
dtrace_close(dev_t dev,int flag,int otyp,cred_t * cred_p)17574 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
17575 {
17576 #pragma unused(flag, otyp, cred_p) /* __APPLE__ */
17577 	minor_t minor = getminor(dev);
17578 	dtrace_state_t *state;
17579 
17580 	/* APPLE NOTE: Darwin puts Helper on its own major device. */
17581 	state = dtrace_state_get(minor);
17582 
17583 	lck_mtx_lock(&cpu_lock);
17584 	lck_mtx_lock(&dtrace_lock);
17585 
17586 	if (state->dts_anon) {
17587 		/*
17588 		 * There is anonymous state. Destroy that first.
17589 		 */
17590 		ASSERT(dtrace_anon.dta_state == NULL);
17591 		dtrace_state_destroy(state->dts_anon);
17592 	}
17593 
17594 	dtrace_state_destroy(state);
17595 	ASSERT(dtrace_opens > 0);
17596 
17597 	/*
17598 	 * Only relinquish control of the kernel debugger interface when there
17599 	 * are no consumers and no anonymous enablings.
17600 	 */
17601 	if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) {
17602 #ifdef illumos
17603 		(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
17604 #endif
17605 	}
17606 
17607 	lck_mtx_unlock(&dtrace_lock);
17608 	lck_mtx_unlock(&cpu_lock);
17609 
17610 	/*
17611 	 * Lock ordering requires the dof mode lock be taken before
17612 	 * the dtrace_lock.
17613 	 */
17614 	lck_rw_lock_exclusive(&dtrace_dof_mode_lock);
17615 	lck_mtx_lock(&dtrace_lock);
17616 
17617 	if (dtrace_opens == 0) {
17618 		/*
17619 		 * If we are currently lazy-off, and this is the last close, transition to
17620 		 * lazy state.
17621 		 */
17622 		if (dtrace_dof_mode == DTRACE_DOF_MODE_LAZY_OFF) {
17623 			dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_ON;
17624 		}
17625 
17626 		/*
17627 		 * If we are the last dtrace client, switch back to lazy (from userspace) symbols
17628 		 */
17629 		if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_FROM_KERNEL) {
17630 			dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE;
17631 		}
17632 	}
17633 
17634 	lck_mtx_unlock(&dtrace_lock);
17635 	lck_rw_unlock_exclusive(&dtrace_dof_mode_lock);
17636 
17637 	/*
17638 	 * Kext probes may be retained past the end of the kext's lifespan. The
17639 	 * probes are kept until the last reference to them has been removed.
17640 	 * Since closing an active dtrace context is likely to drop that last reference,
17641 	 * lets take a shot at cleaning out the orphaned probes now.
17642 	 */
17643 	dtrace_module_unloaded(NULL);
17644 
17645 	/* State is gone so resume cluster powerdown. */
17646 	resume_cluster_powerdown();
17647 	return (0);
17648 }
17649 
17650 /*ARGSUSED*/
17651 static int
dtrace_ioctl_helper(u_long cmd,caddr_t arg,int * rv)17652 dtrace_ioctl_helper(u_long cmd, caddr_t arg, int *rv)
17653 {
17654 #pragma unused(rv)
17655 	/*
17656 	 * Safe to check this outside the dof mode lock
17657 	 */
17658 	if (dtrace_dof_mode == DTRACE_DOF_MODE_NEVER)
17659 		return KERN_SUCCESS;
17660 
17661 	switch (cmd) {
17662 #if defined (__arm64__)
17663 	case DTRACEHIOC_ADDDOF_U32:
17664 	case DTRACEHIOC_ADDDOF_U64:
17665 #else
17666 	case DTRACEHIOC_ADDDOF:
17667 #endif /* __arm64__*/
17668 	                {
17669 			dof_helper_t *dhp = NULL;
17670 			size_t dof_ioctl_data_size;
17671 			dof_ioctl_data_t* multi_dof;
17672 			unsigned int i;
17673 			int rval = 0;
17674 			user_addr_t user_address = *(user_addr_t*)arg;
17675 			uint64_t dof_count;
17676 			int multi_dof_claimed = 0;
17677 			proc_t* p = current_proc();
17678 
17679 			/*
17680 			 * If this is a restricted process and dtrace is restricted,
17681 			 * do not allow DOFs to be registered
17682 			 */
17683 			if (dtrace_is_restricted() &&
17684 				!dtrace_are_restrictions_relaxed() &&
17685 				!dtrace_can_attach_to_proc(current_proc())) {
17686 				return (EACCES);
17687 			}
17688 
17689 			/*
17690 			 * Read the number of DOF sections being passed in.
17691 			 */
17692 			if (copyin(user_address + offsetof(dof_ioctl_data_t, dofiod_count),
17693 				   &dof_count,
17694 				   sizeof(dof_count))) {
17695 				dtrace_dof_error(NULL, "failed to copyin dofiod_count");
17696 				return (EFAULT);
17697 			}
17698 
17699 			/*
17700 			 * Range check the count.
17701 			 */
17702 			if (dof_count == 0 || dof_count > 1024) {
17703 				dtrace_dof_error(NULL, "dofiod_count is not valid");
17704 				return (EINVAL);
17705 			}
17706 
17707 			/*
17708 			 * Allocate a correctly sized structure and copyin the data.
17709 			 */
17710 			dof_ioctl_data_size = DOF_IOCTL_DATA_T_SIZE(dof_count);
17711 			if ((multi_dof = kmem_alloc(dof_ioctl_data_size, KM_SLEEP)) == NULL)
17712 				return (ENOMEM);
17713 
17714 			/* NOTE! We can no longer exit this method via return */
17715 			if (copyin(user_address, multi_dof, dof_ioctl_data_size) != 0) {
17716 				dtrace_dof_error(NULL, "failed copyin of dof_ioctl_data_t");
17717 				rval = EFAULT;
17718 				goto cleanup;
17719 			}
17720 
17721 			/*
17722 			 * Check that the count didn't change between the first copyin and the second.
17723 			 */
17724 			if (multi_dof->dofiod_count != dof_count) {
17725 				rval = EINVAL;
17726 				goto cleanup;
17727 			}
17728 
17729 			/*
17730 			 * Try to process lazily first.
17731 			 */
17732 			rval = dtrace_lazy_dofs_add(p, multi_dof, &multi_dof_claimed);
17733 
17734 			/*
17735 			 * If rval is EACCES, we must be non-lazy.
17736 			 */
17737 			if (rval == EACCES) {
17738 				rval = 0;
17739 				/*
17740 				 * Process each dof_helper_t
17741 				 */
17742 				i = 0;
17743 				do {
17744 					dhp = &multi_dof->dofiod_helpers[i];
17745 
17746 					dof_hdr_t *dof = dtrace_dof_copyin(dhp->dofhp_dof, &rval);
17747 
17748 					if (dof != NULL) {
17749 						lck_mtx_lock(&dtrace_meta_lock);
17750 						lck_mtx_lock(&dtrace_lock);
17751 
17752 						/*
17753 						 * dtrace_helper_slurp() takes responsibility for the dof --
17754 						 * it may free it now or it may save it and free it later.
17755 						 */
17756 						if ((dhp->dofhp_dof = (uint64_t)dtrace_helper_slurp(p, dof, dhp)) == -1ULL) {
17757 							rval = EINVAL;
17758 						}
17759 
17760 						lck_mtx_unlock(&dtrace_lock);
17761 						lck_mtx_unlock(&dtrace_meta_lock);
17762 					}
17763 				} while (++i < multi_dof->dofiod_count && rval == 0);
17764 			}
17765 
17766 			/*
17767 			 * We need to copyout the multi_dof struct, because it contains
17768 			 * the generation (unique id) values needed to call DTRACEHIOC_REMOVE
17769 			 *
17770 			 * This could certainly be better optimized.
17771 			 */
17772 			if (copyout(multi_dof, user_address, dof_ioctl_data_size) != 0) {
17773 				dtrace_dof_error(NULL, "failed copyout of dof_ioctl_data_t");
17774 				/* Don't overwrite pre-existing error code */
17775 				if (rval == 0) rval = EFAULT;
17776 			}
17777 
17778 		cleanup:
17779 			/*
17780 			 * If we had to allocate struct memory, free it.
17781 			 */
17782 			if (multi_dof != NULL && !multi_dof_claimed) {
17783 				kmem_free(multi_dof, dof_ioctl_data_size);
17784 			}
17785 
17786 			return rval;
17787 		}
17788 
17789 		case DTRACEHIOC_REMOVE: {
17790 			int generation = *(int*)arg;
17791 			proc_t* p = current_proc();
17792 
17793 			/*
17794 			 * Try lazy first.
17795 			 */
17796 			int rval = dtrace_lazy_dofs_remove(p, generation);
17797 
17798 			/*
17799 			 * EACCES means non-lazy
17800 			 */
17801 			if (rval == EACCES) {
17802 				lck_mtx_lock(&dtrace_meta_lock);
17803 				lck_mtx_lock(&dtrace_lock);
17804 				rval = dtrace_helper_destroygen(p, generation);
17805 				lck_mtx_unlock(&dtrace_lock);
17806 				lck_mtx_unlock(&dtrace_meta_lock);
17807 			}
17808 
17809 			return (rval);
17810 		}
17811 
17812 		default:
17813 			break;
17814 	}
17815 
17816 	return ENOTTY;
17817 }
17818 
17819 /*ARGSUSED*/
17820 static int
dtrace_ioctl(dev_t dev,u_long cmd,user_addr_t arg,int md,cred_t * cr,int * rv)17821 dtrace_ioctl(dev_t dev, u_long cmd, user_addr_t arg, int md, cred_t *cr, int *rv)
17822 {
17823 #pragma unused(md)
17824 	minor_t minor = getminor(dev);
17825 	dtrace_state_t *state;
17826 	int rval;
17827 
17828 	/* Darwin puts Helper on its own major device. */
17829 
17830 	state = dtrace_state_get(minor);
17831 
17832 	if (state->dts_anon) {
17833 	   ASSERT(dtrace_anon.dta_state == NULL);
17834 	   state = state->dts_anon;
17835 	}
17836 
17837 	switch (cmd) {
17838 	case DTRACEIOC_PROVIDER: {
17839 		dtrace_providerdesc_t pvd;
17840 		dtrace_provider_t *pvp;
17841 
17842 		if (copyin(arg, &pvd, sizeof (pvd)) != 0)
17843 			return (EFAULT);
17844 
17845 		pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
17846 		lck_mtx_lock(&dtrace_provider_lock);
17847 
17848 		for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
17849 			if (strncmp(pvp->dtpv_name, pvd.dtvd_name, DTRACE_PROVNAMELEN) == 0)
17850 				break;
17851 		}
17852 
17853 		lck_mtx_unlock(&dtrace_provider_lock);
17854 
17855 		if (pvp == NULL)
17856 			return (ESRCH);
17857 
17858 		bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
17859 		bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
17860 		if (copyout(&pvd, arg, sizeof (pvd)) != 0)
17861 			return (EFAULT);
17862 
17863 		return (0);
17864 	}
17865 
17866 	case DTRACEIOC_EPROBE: {
17867 		dtrace_eprobedesc_t epdesc;
17868 		dtrace_ecb_t *ecb;
17869 		dtrace_action_t *act;
17870 		void *buf;
17871 		size_t size;
17872 		uintptr_t dest;
17873 		int nrecs;
17874 
17875 		if (copyin(arg, &epdesc, sizeof (epdesc)) != 0)
17876 			return (EFAULT);
17877 
17878 		lck_mtx_lock(&dtrace_lock);
17879 
17880 		if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
17881 			lck_mtx_unlock(&dtrace_lock);
17882 			return (EINVAL);
17883 		}
17884 
17885 		if (ecb->dte_probe == NULL) {
17886 			lck_mtx_unlock(&dtrace_lock);
17887 			return (EINVAL);
17888 		}
17889 
17890 		epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
17891 		epdesc.dtepd_uarg = ecb->dte_uarg;
17892 		epdesc.dtepd_size = ecb->dte_size;
17893 
17894 		nrecs = epdesc.dtepd_nrecs;
17895 		epdesc.dtepd_nrecs = 0;
17896 		for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
17897 			if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
17898 				continue;
17899 
17900 			epdesc.dtepd_nrecs++;
17901 		}
17902 
17903 		/*
17904 		 * Now that we have the size, we need to allocate a temporary
17905 		 * buffer in which to store the complete description.  We need
17906 		 * the temporary buffer to be able to drop dtrace_lock()
17907 		 * across the copyout(), below.
17908 		 */
17909 		size = sizeof (dtrace_eprobedesc_t) +
17910 			(epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
17911 
17912 		buf = kmem_alloc(size, KM_SLEEP);
17913 		dest = (uintptr_t)buf;
17914 
17915 		bcopy(&epdesc, (void *)dest, sizeof (epdesc));
17916 		dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
17917 
17918 		for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
17919 			if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
17920 				continue;
17921 
17922 			if (nrecs-- == 0)
17923 				break;
17924 
17925 			bcopy(&act->dta_rec, (void *)dest,
17926 			sizeof (dtrace_recdesc_t));
17927 			dest += sizeof (dtrace_recdesc_t);
17928 		}
17929 
17930 		lck_mtx_unlock(&dtrace_lock);
17931 
17932 		if (copyout(buf, arg, dest - (uintptr_t)buf) != 0) {
17933 			kmem_free(buf, size);
17934 			return (EFAULT);
17935 		}
17936 
17937 		kmem_free(buf, size);
17938 		return (0);
17939 	}
17940 
17941 	case DTRACEIOC_AGGDESC: {
17942 		dtrace_aggdesc_t aggdesc;
17943 		dtrace_action_t *act;
17944 		dtrace_aggregation_t *agg;
17945 		int nrecs;
17946 		uint32_t offs;
17947 		dtrace_recdesc_t *lrec;
17948 		void *buf;
17949 		size_t size;
17950 		uintptr_t dest;
17951 
17952 		if (copyin(arg, &aggdesc, sizeof (aggdesc)) != 0)
17953 			return (EFAULT);
17954 
17955 		lck_mtx_lock(&dtrace_lock);
17956 
17957 		if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
17958 			lck_mtx_unlock(&dtrace_lock);
17959 			return (EINVAL);
17960 		}
17961 
17962 		aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
17963 
17964 		nrecs = aggdesc.dtagd_nrecs;
17965 		aggdesc.dtagd_nrecs = 0;
17966 
17967 		offs = agg->dtag_base;
17968 		lrec = &agg->dtag_action.dta_rec;
17969 		aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
17970 
17971 		for (act = agg->dtag_first; ; act = act->dta_next) {
17972 			ASSERT(act->dta_intuple ||
17973 			DTRACEACT_ISAGG(act->dta_kind));
17974 
17975 			/*
17976 			 * If this action has a record size of zero, it
17977 			 * denotes an argument to the aggregating action.
17978 			 * Because the presence of this record doesn't (or
17979 			 * shouldn't) affect the way the data is interpreted,
17980 			 * we don't copy it out to save user-level the
17981 			 * confusion of dealing with a zero-length record.
17982 			 */
17983 			if (act->dta_rec.dtrd_size == 0) {
17984 				ASSERT(agg->dtag_hasarg);
17985 				continue;
17986 			}
17987 
17988 			aggdesc.dtagd_nrecs++;
17989 
17990 			if (act == &agg->dtag_action)
17991 				break;
17992 		}
17993 
17994 		/*
17995 		 * Now that we have the size, we need to allocate a temporary
17996 		 * buffer in which to store the complete description.  We need
17997 		 * the temporary buffer to be able to drop dtrace_lock()
17998 		 * across the copyout(), below.
17999 		 */
18000 		size = sizeof (dtrace_aggdesc_t) +
18001 			(aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
18002 
18003 		buf = kmem_alloc(size, KM_SLEEP);
18004 		dest = (uintptr_t)buf;
18005 
18006 		bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
18007 		dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
18008 
18009 		for (act = agg->dtag_first; ; act = act->dta_next) {
18010 			dtrace_recdesc_t rec = act->dta_rec;
18011 
18012 			/*
18013 			 * See the comment in the above loop for why we pass
18014 			 * over zero-length records.
18015 			 */
18016 			if (rec.dtrd_size == 0) {
18017 				ASSERT(agg->dtag_hasarg);
18018 				continue;
18019 			}
18020 
18021 			if (nrecs-- == 0)
18022 				break;
18023 
18024 			rec.dtrd_offset -= offs;
18025 			bcopy(&rec, (void *)dest, sizeof (rec));
18026 			dest += sizeof (dtrace_recdesc_t);
18027 
18028 			if (act == &agg->dtag_action)
18029 				break;
18030 		}
18031 
18032 		lck_mtx_unlock(&dtrace_lock);
18033 
18034 		if (copyout(buf, arg, dest - (uintptr_t)buf) != 0) {
18035 			kmem_free(buf, size);
18036 			return (EFAULT);
18037 		}
18038 
18039 		kmem_free(buf, size);
18040 		return (0);
18041 	}
18042 
18043 	case DTRACEIOC_ENABLE: {
18044 		dof_hdr_t *dof;
18045 		dtrace_enabling_t *enab = NULL;
18046 		dtrace_vstate_t *vstate;
18047 		int err = 0;
18048 
18049 		*rv = 0;
18050 
18051 		/*
18052 		 * If a NULL argument has been passed, we take this as our
18053 		 * cue to reevaluate our enablings.
18054 		 */
18055 		if (arg == 0) {
18056 			dtrace_enabling_matchall();
18057 
18058 			return (0);
18059 		}
18060 
18061 		if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
18062 			return (rval);
18063 
18064 		lck_mtx_lock(&cpu_lock);
18065 		lck_mtx_lock(&dtrace_lock);
18066 		vstate = &state->dts_vstate;
18067 
18068 		if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
18069 			lck_mtx_unlock(&dtrace_lock);
18070 			lck_mtx_unlock(&cpu_lock);
18071 			dtrace_dof_destroy(dof);
18072 			return (EBUSY);
18073 		}
18074 
18075 		if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
18076 			lck_mtx_unlock(&dtrace_lock);
18077 			lck_mtx_unlock(&cpu_lock);
18078 			dtrace_dof_destroy(dof);
18079 			return (EINVAL);
18080 		}
18081 
18082 		if ((rval = dtrace_dof_options(dof, state)) != 0) {
18083 			dtrace_enabling_destroy(enab);
18084 			lck_mtx_unlock(&dtrace_lock);
18085 			lck_mtx_unlock(&cpu_lock);
18086 			dtrace_dof_destroy(dof);
18087 			return (rval);
18088 		}
18089 
18090 		if ((err = dtrace_enabling_match(enab, rv, NULL)) == 0) {
18091 			err = dtrace_enabling_retain(enab);
18092 		} else {
18093 			dtrace_enabling_destroy(enab);
18094 		}
18095 
18096 		lck_mtx_unlock(&dtrace_lock);
18097 		lck_mtx_unlock(&cpu_lock);
18098 		dtrace_dof_destroy(dof);
18099 
18100 		return (err);
18101 	}
18102 
18103 	case DTRACEIOC_REPLICATE: {
18104 		dtrace_repldesc_t desc;
18105 		dtrace_probedesc_t *match = &desc.dtrpd_match;
18106 		dtrace_probedesc_t *create = &desc.dtrpd_create;
18107 		int err;
18108 
18109 		if (copyin(arg, &desc, sizeof (desc)) != 0)
18110 			return (EFAULT);
18111 
18112 		match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
18113 		match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
18114 		match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
18115 		match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
18116 
18117 		create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
18118 		create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
18119 		create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
18120 		create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
18121 
18122 		lck_mtx_lock(&dtrace_lock);
18123 		err = dtrace_enabling_replicate(state, match, create);
18124 		lck_mtx_unlock(&dtrace_lock);
18125 
18126 		return (err);
18127 	}
18128 
18129 	case DTRACEIOC_PROBEMATCH:
18130 	case DTRACEIOC_PROBES: {
18131 		dtrace_probe_t *probe = NULL;
18132 		dtrace_probedesc_t desc;
18133 		dtrace_probekey_t pkey;
18134 		dtrace_id_t i;
18135 		int m = 0;
18136 		uint32_t priv;
18137 		uid_t uid;
18138 		zoneid_t zoneid;
18139 
18140 		if (copyin(arg, &desc, sizeof (desc)) != 0)
18141 			return (EFAULT);
18142 
18143 		desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
18144 		desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
18145 		desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
18146 		desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
18147 
18148 		/*
18149 		 * Before we attempt to match this probe, we want to give
18150 		 * all providers the opportunity to provide it.
18151 		 */
18152 		if (desc.dtpd_id == DTRACE_IDNONE) {
18153 			lck_mtx_lock(&dtrace_provider_lock);
18154 			dtrace_probe_provide(&desc, NULL);
18155 			lck_mtx_unlock(&dtrace_provider_lock);
18156 			desc.dtpd_id++;
18157 		}
18158 
18159 		dtrace_cred2priv(cr, &priv, &uid, &zoneid);
18160 
18161 		lck_mtx_lock(&dtrace_lock);
18162 
18163 		if (cmd == DTRACEIOC_PROBEMATCH)  {
18164 			dtrace_probekey(&desc, &pkey);
18165 			pkey.dtpk_id = DTRACE_IDNONE;
18166 
18167 			/* Quiet compiler warning */
18168 			for (i = desc.dtpd_id; i <= (dtrace_id_t)dtrace_nprobes; i++) {
18169 				if ((probe = dtrace_probes[i - 1]) != NULL &&
18170 					(m = dtrace_match_probe(probe, &pkey,
18171 					priv, uid, zoneid)) != 0)
18172 					break;
18173 			}
18174 
18175 			if (m < 0) {
18176 				lck_mtx_unlock(&dtrace_lock);
18177 				return (EINVAL);
18178 			}
18179 			dtrace_probekey_release(&pkey);
18180 
18181 		} else {
18182                         /* Quiet compiler warning */
18183 			for (i = desc.dtpd_id; i <= (dtrace_id_t)dtrace_nprobes; i++) {
18184 				if ((probe = dtrace_probes[i - 1]) != NULL &&
18185 					dtrace_match_priv(probe, priv, uid, zoneid))
18186 					break;
18187 			}
18188 		}
18189 
18190 		if (probe == NULL) {
18191 			lck_mtx_unlock(&dtrace_lock);
18192 			return (ESRCH);
18193 		}
18194 
18195 		dtrace_probe_description(probe, &desc);
18196 		lck_mtx_unlock(&dtrace_lock);
18197 
18198 		if (copyout(&desc, arg, sizeof (desc)) != 0)
18199 			return (EFAULT);
18200 
18201 		return (0);
18202 	}
18203 
18204 	case DTRACEIOC_PROBEARG: {
18205 		dtrace_argdesc_t desc;
18206 		dtrace_probe_t *probe;
18207 		dtrace_provider_t *prov;
18208 
18209 		if (copyin(arg, &desc, sizeof (desc)) != 0)
18210 			return (EFAULT);
18211 
18212 		if (desc.dtargd_id == DTRACE_IDNONE)
18213 			return (EINVAL);
18214 
18215 		if (desc.dtargd_ndx == DTRACE_ARGNONE)
18216 			return (EINVAL);
18217 
18218 		lck_mtx_lock(&dtrace_provider_lock);
18219 		lck_mtx_lock(&mod_lock);
18220 		lck_mtx_lock(&dtrace_lock);
18221 
18222                 /* Quiet compiler warning */
18223 		if (desc.dtargd_id > (dtrace_id_t)dtrace_nprobes) {
18224 			lck_mtx_unlock(&dtrace_lock);
18225 			lck_mtx_unlock(&mod_lock);
18226 			lck_mtx_unlock(&dtrace_provider_lock);
18227 			return (EINVAL);
18228 		}
18229 
18230 		if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
18231 			lck_mtx_unlock(&dtrace_lock);
18232 			lck_mtx_unlock(&mod_lock);
18233 			lck_mtx_unlock(&dtrace_provider_lock);
18234 			return (EINVAL);
18235 		}
18236 
18237 		lck_mtx_unlock(&dtrace_lock);
18238 
18239 		prov = probe->dtpr_provider;
18240 
18241 		if (prov->dtpv_pops.dtps_getargdesc == NULL) {
18242 		/*
18243 		 * There isn't any typed information for this probe.
18244 		 * Set the argument number to DTRACE_ARGNONE.
18245 		 */
18246 			desc.dtargd_ndx = DTRACE_ARGNONE;
18247 		} else {
18248 			desc.dtargd_native[0] = '\0';
18249 			desc.dtargd_xlate[0] = '\0';
18250 			desc.dtargd_mapping = desc.dtargd_ndx;
18251 
18252 			prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
18253 			probe->dtpr_id, probe->dtpr_arg, &desc);
18254 		}
18255 
18256 		lck_mtx_unlock(&mod_lock);
18257 		lck_mtx_unlock(&dtrace_provider_lock);
18258 
18259 		if (copyout(&desc, arg, sizeof (desc)) != 0)
18260 			return (EFAULT);
18261 
18262 		return (0);
18263 	}
18264 
18265 	case DTRACEIOC_GO: {
18266 		processorid_t cpuid;
18267 		rval = dtrace_state_go(state, &cpuid);
18268 
18269 		if (rval != 0)
18270 			return (rval);
18271 
18272 		if (copyout(&cpuid, arg, sizeof (cpuid)) != 0)
18273 			return (EFAULT);
18274 
18275 		return (0);
18276 	}
18277 
18278 	case DTRACEIOC_STOP: {
18279 		processorid_t cpuid;
18280 
18281 		lck_mtx_lock(&dtrace_lock);
18282 		rval = dtrace_state_stop(state, &cpuid);
18283 		lck_mtx_unlock(&dtrace_lock);
18284 
18285 		if (rval != 0)
18286 			return (rval);
18287 
18288 		if (copyout(&cpuid, arg, sizeof (cpuid)) != 0)
18289 			return (EFAULT);
18290 
18291 		return (0);
18292 	}
18293 
18294 	case DTRACEIOC_DOFGET: {
18295 		dof_hdr_t hdr, *dof;
18296 		uint64_t len;
18297 
18298 		if (copyin(arg, &hdr, sizeof (hdr)) != 0)
18299 			return (EFAULT);
18300 
18301 		lck_mtx_lock(&dtrace_lock);
18302 		dof = dtrace_dof_create(state);
18303 		lck_mtx_unlock(&dtrace_lock);
18304 
18305 		len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
18306 		rval = copyout(dof, arg, len);
18307 		dtrace_dof_destroy(dof);
18308 
18309 		return (rval == 0 ? 0 : EFAULT);
18310 	}
18311 
18312 	case DTRACEIOC_SLEEP: {
18313 		int64_t time;
18314 		uint64_t abstime;
18315 		uint64_t rvalue = DTRACE_WAKE_TIMEOUT;
18316 
18317 		if (copyin(arg, &time, sizeof(time)) != 0)
18318 			return (EFAULT);
18319 
18320 		nanoseconds_to_absolutetime((uint64_t)time, &abstime);
18321 		clock_absolutetime_interval_to_deadline(abstime, &abstime);
18322 
18323 		if (assert_wait_deadline(state, THREAD_ABORTSAFE, abstime) == THREAD_WAITING) {
18324 			if (state->dts_buf_over_limit > 0) {
18325 				clear_wait(current_thread(), THREAD_INTERRUPTED);
18326 				rvalue = DTRACE_WAKE_BUF_LIMIT;
18327 			} else {
18328 				thread_block(THREAD_CONTINUE_NULL);
18329 				if (state->dts_buf_over_limit > 0) {
18330 					rvalue = DTRACE_WAKE_BUF_LIMIT;
18331 				}
18332 			}
18333 		}
18334 
18335 		if (copyout(&rvalue, arg, sizeof(rvalue)) != 0)
18336 			return (EFAULT);
18337 
18338 		return (0);
18339 	}
18340 
18341 	case DTRACEIOC_SIGNAL: {
18342 		wakeup(state);
18343 		return (0);
18344 	}
18345 
18346 	case DTRACEIOC_AGGSNAP:
18347 	case DTRACEIOC_BUFSNAP: {
18348 		dtrace_bufdesc_t desc;
18349 		caddr_t cached;
18350 		boolean_t over_limit;
18351 		dtrace_buffer_t *buf;
18352 
18353 		if (copyin(arg, &desc, sizeof (desc)) != 0)
18354 			return (EFAULT);
18355 
18356 		if ((int)desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
18357 			return (EINVAL);
18358 
18359 		lck_mtx_lock(&dtrace_lock);
18360 
18361 		if (cmd == DTRACEIOC_BUFSNAP) {
18362 			buf = &state->dts_buffer[desc.dtbd_cpu];
18363 		} else {
18364 			buf = &state->dts_aggbuffer[desc.dtbd_cpu];
18365 		}
18366 
18367 		if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
18368 			size_t sz = buf->dtb_offset;
18369 
18370 			if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
18371 				lck_mtx_unlock(&dtrace_lock);
18372 				return (EBUSY);
18373 			}
18374 
18375 			/*
18376 			 * If this buffer has already been consumed, we're
18377 			 * going to indicate that there's nothing left here
18378 			 * to consume.
18379 			 */
18380 			if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
18381 				lck_mtx_unlock(&dtrace_lock);
18382 
18383 				desc.dtbd_size = 0;
18384 				desc.dtbd_drops = 0;
18385 				desc.dtbd_errors = 0;
18386 				desc.dtbd_oldest = 0;
18387 				sz = sizeof (desc);
18388 
18389 				if (copyout(&desc, arg, sz) != 0)
18390 					return (EFAULT);
18391 
18392 				return (0);
18393 			}
18394 
18395 			/*
18396 			 * If this is a ring buffer that has wrapped, we want
18397 			 * to copy the whole thing out.
18398 			 */
18399 			if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
18400 				dtrace_buffer_polish(buf);
18401 				sz = buf->dtb_size;
18402 			}
18403 
18404 			if (copyout(buf->dtb_tomax, (user_addr_t)desc.dtbd_data, sz) != 0) {
18405 				lck_mtx_unlock(&dtrace_lock);
18406 				return (EFAULT);
18407 			}
18408 
18409 			desc.dtbd_size = sz;
18410 			desc.dtbd_drops = buf->dtb_drops;
18411 			desc.dtbd_errors = buf->dtb_errors;
18412 			desc.dtbd_oldest = buf->dtb_xamot_offset;
18413 			desc.dtbd_timestamp = dtrace_gethrtime();
18414 
18415 			lck_mtx_unlock(&dtrace_lock);
18416 
18417 			if (copyout(&desc, arg, sizeof (desc)) != 0)
18418 				return (EFAULT);
18419 
18420 			buf->dtb_flags |= DTRACEBUF_CONSUMED;
18421 
18422 			return (0);
18423 		}
18424 
18425 		if (buf->dtb_tomax == NULL) {
18426 			ASSERT(buf->dtb_xamot == NULL);
18427 			lck_mtx_unlock(&dtrace_lock);
18428 			return (ENOENT);
18429 		}
18430 
18431 		cached = buf->dtb_tomax;
18432 		over_limit = buf->dtb_cur_limit == buf->dtb_size;
18433 
18434 		ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
18435 
18436 		dtrace_xcall(desc.dtbd_cpu,
18437 			(dtrace_xcall_t)dtrace_buffer_switch, buf);
18438 
18439 		state->dts_errors += buf->dtb_xamot_errors;
18440 
18441 		/*
18442 		* If the buffers did not actually switch, then the cross call
18443 		* did not take place -- presumably because the given CPU is
18444 		* not in the ready set.  If this is the case, we'll return
18445 		* ENOENT.
18446 		*/
18447 		if (buf->dtb_tomax == cached) {
18448 			ASSERT(buf->dtb_xamot != cached);
18449 			lck_mtx_unlock(&dtrace_lock);
18450 			return (ENOENT);
18451 		}
18452 
18453 		ASSERT(cached == buf->dtb_xamot);
18454 		/*
18455 		 * At this point we know the buffer have switched, so we
18456 		 * can decrement the over limit count if the buffer was over
18457 		 * its limit. The new buffer might already be over its limit
18458 		 * yet, but we don't care since we're guaranteed not to be
18459 		 * checking the buffer over limit count  at this point.
18460 		 */
18461 		if (over_limit) {
18462 			uint32_t old = os_atomic_dec_orig(&state->dts_buf_over_limit, relaxed);
18463 			#pragma unused(old)
18464 
18465 			/*
18466 			 * Verify that we didn't underflow the value
18467 			 */
18468 			ASSERT(old != 0);
18469 		}
18470 
18471 		/*
18472 		* We have our snapshot; now copy it out.
18473 		*/
18474 		if (dtrace_buffer_copyout(buf->dtb_xamot,
18475 					(user_addr_t)desc.dtbd_data,
18476 					buf->dtb_xamot_offset) != 0) {
18477 			lck_mtx_unlock(&dtrace_lock);
18478 			return (EFAULT);
18479 		}
18480 
18481 		desc.dtbd_size = buf->dtb_xamot_offset;
18482 		desc.dtbd_drops = buf->dtb_xamot_drops;
18483 		desc.dtbd_errors = buf->dtb_xamot_errors;
18484 		desc.dtbd_oldest = 0;
18485 		desc.dtbd_timestamp = buf->dtb_switched;
18486 
18487 		lck_mtx_unlock(&dtrace_lock);
18488 
18489 		/*
18490 		 * Finally, copy out the buffer description.
18491 		 */
18492 		if (copyout(&desc, arg, sizeof (desc)) != 0)
18493 			return (EFAULT);
18494 
18495 		return (0);
18496 	}
18497 
18498 	case DTRACEIOC_CONF: {
18499 		dtrace_conf_t conf;
18500 
18501 		bzero(&conf, sizeof (conf));
18502 		conf.dtc_difversion = DIF_VERSION;
18503 		conf.dtc_difintregs = DIF_DIR_NREGS;
18504 		conf.dtc_diftupregs = DIF_DTR_NREGS;
18505 		conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
18506 
18507 		if (copyout(&conf, arg, sizeof (conf)) != 0)
18508 			return (EFAULT);
18509 
18510 		return (0);
18511 	}
18512 
18513 	case DTRACEIOC_STATUS: {
18514 		dtrace_status_t stat;
18515 		dtrace_dstate_t *dstate;
18516 		int j;
18517 		uint64_t nerrs;
18518 
18519 		/*
18520 		* See the comment in dtrace_state_deadman() for the reason
18521 		* for setting dts_laststatus to INT64_MAX before setting
18522 		* it to the correct value.
18523 		*/
18524 		state->dts_laststatus = INT64_MAX;
18525 		dtrace_membar_producer();
18526 		state->dts_laststatus = dtrace_gethrtime();
18527 
18528 		bzero(&stat, sizeof (stat));
18529 
18530 		lck_mtx_lock(&dtrace_lock);
18531 
18532 		if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
18533 			lck_mtx_unlock(&dtrace_lock);
18534 			return (ENOENT);
18535 		}
18536 
18537 		if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
18538 			stat.dtst_exiting = 1;
18539 
18540 		nerrs = state->dts_errors;
18541 		dstate = &state->dts_vstate.dtvs_dynvars;
18542 
18543 		zpercpu_foreach_cpu(i) {
18544 			dtrace_dstate_percpu_t *dcpu = zpercpu_get_cpu(dstate->dtds_percpu, i);
18545 
18546 			stat.dtst_dyndrops += dcpu->dtdsc_drops;
18547 			stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
18548 			stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
18549 
18550 			if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
18551 				stat.dtst_filled++;
18552 
18553 			nerrs += state->dts_buffer[i].dtb_errors;
18554 
18555 			for (j = 0; j < state->dts_nspeculations; j++) {
18556 				dtrace_speculation_t *spec;
18557 				dtrace_buffer_t *buf;
18558 
18559 				spec = &state->dts_speculations[j];
18560 				buf = &spec->dtsp_buffer[i];
18561 				stat.dtst_specdrops += buf->dtb_xamot_drops;
18562 			}
18563 		}
18564 
18565 		stat.dtst_specdrops_busy = state->dts_speculations_busy;
18566 		stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
18567 		stat.dtst_stkstroverflows = state->dts_stkstroverflows;
18568 		stat.dtst_dblerrors = state->dts_dblerrors;
18569 		stat.dtst_killed =
18570 			(state->dts_activity == DTRACE_ACTIVITY_KILLED);
18571 		stat.dtst_errors = nerrs;
18572 
18573 		lck_mtx_unlock(&dtrace_lock);
18574 
18575 		if (copyout(&stat, arg, sizeof (stat)) != 0)
18576 			return (EFAULT);
18577 
18578 		return (0);
18579 	}
18580 
18581 	case DTRACEIOC_FORMAT: {
18582 		dtrace_fmtdesc_t fmt;
18583 		char *str;
18584 		int len;
18585 
18586 		if (copyin(arg, &fmt, sizeof (fmt)) != 0)
18587 			return (EFAULT);
18588 
18589 		lck_mtx_lock(&dtrace_lock);
18590 
18591 		if (fmt.dtfd_format == 0 ||
18592 			fmt.dtfd_format > state->dts_nformats) {
18593 			lck_mtx_unlock(&dtrace_lock);
18594 			return (EINVAL);
18595 		}
18596 
18597 		/*
18598 		 * Format strings are allocated contiguously and they are
18599 		 * never freed; if a format index is less than the number
18600 		 * of formats, we can assert that the format map is non-NULL
18601 		 * and that the format for the specified index is non-NULL.
18602 		 */
18603 		ASSERT(state->dts_formats != NULL);
18604 		str = state->dts_formats[fmt.dtfd_format - 1]->dtf_str;
18605 		ASSERT(str != NULL);
18606 
18607 		len = strlen(str) + 1;
18608 
18609 		if (len > fmt.dtfd_length) {
18610 			fmt.dtfd_length = len;
18611 
18612 			if (copyout(&fmt, arg, sizeof (fmt)) != 0) {
18613 				lck_mtx_unlock(&dtrace_lock);
18614 				return (EINVAL);
18615 			}
18616 		} else {
18617 			if (copyout(str, (user_addr_t)fmt.dtfd_string, len) != 0) {
18618 				lck_mtx_unlock(&dtrace_lock);
18619 				return (EINVAL);
18620 			}
18621 		}
18622 
18623 		lck_mtx_unlock(&dtrace_lock);
18624 		return (0);
18625 	}
18626 
18627 	case DTRACEIOC_MODUUIDSLIST: {
18628 		size_t module_uuids_list_size;
18629 		dtrace_module_uuids_list_t* uuids_list;
18630 		uint64_t dtmul_count;
18631 
18632 		/*
18633 		 * Security restrictions make this operation illegal, if this is enabled DTrace
18634 		 * must refuse to provide any fbt probes.
18635 		 */
18636 		if (dtrace_fbt_probes_restricted()) {
18637 			cmn_err(CE_WARN, "security restrictions disallow DTRACEIOC_MODUUIDSLIST");
18638 			return (EPERM);
18639 		}
18640 
18641 		/*
18642 		 * Fail if the kernel symbol mode makes this operation illegal.
18643 		 * Both NEVER & ALWAYS_FROM_KERNEL are permanent states, it is legal to check
18644 		 * for them without holding the dtrace_lock.
18645 		 */
18646 		if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_NEVER ||
18647 		    dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL) {
18648 			cmn_err(CE_WARN, "dtrace_kernel_symbol_mode of %u disallows DTRACEIOC_MODUUIDSLIST", dtrace_kernel_symbol_mode);
18649 			return (EPERM);
18650 		}
18651 
18652 		/*
18653 		 * Read the number of symbolsdesc structs being passed in.
18654 		 */
18655 		if (copyin(arg + offsetof(dtrace_module_uuids_list_t, dtmul_count),
18656 		    &dtmul_count, sizeof(dtmul_count)) != 0) {
18657 			cmn_err(CE_WARN, "failed to copyin dtmul_count");
18658 			return (EFAULT);
18659 		}
18660 
18661 		/*
18662 		 * Range check the count. More than 2k kexts is probably an error.
18663 		 */
18664 		if (dtmul_count > 2048) {
18665 			cmn_err(CE_WARN, "dtmul_count is not valid");
18666 			return (EINVAL);
18667 		}
18668 
18669 		/*
18670 		 * For all queries, we return EINVAL when the user specified
18671 		 * count does not match the actual number of modules we find
18672 		 * available.
18673 		 *
18674 		 * If the user specified count is zero, then this serves as a
18675 		 * simple query to count the available modules in need of symbols.
18676 		 */
18677 
18678 		rval = 0;
18679 
18680 		if (dtmul_count == 0)
18681 		{
18682 			lck_mtx_lock(&mod_lock);
18683 			struct modctl* ctl = dtrace_modctl_list;
18684 			while (ctl) {
18685 				ASSERT(!MOD_HAS_USERSPACE_SYMBOLS(ctl));
18686 				if (!MOD_SYMBOLS_DONE(ctl) && !MOD_IS_STATIC_KEXT(ctl)) {
18687 					dtmul_count++;
18688 					rval = EINVAL;
18689 				}
18690 				ctl = ctl->mod_next;
18691 			}
18692 			lck_mtx_unlock(&mod_lock);
18693 
18694 			if (copyout(&dtmul_count, arg, sizeof (dtmul_count)) != 0)
18695 				return (EFAULT);
18696 			else
18697 				return (rval);
18698 		}
18699 
18700 		/*
18701 		 * If we reach this point, then we have a request for full list data.
18702 		 * Allocate a correctly sized structure and copyin the data.
18703 		 */
18704 		module_uuids_list_size = DTRACE_MODULE_UUIDS_LIST_SIZE(dtmul_count);
18705 		if ((uuids_list = kmem_alloc(module_uuids_list_size, KM_SLEEP)) == NULL)
18706 			return (ENOMEM);
18707 
18708 		/* NOTE! We can no longer exit this method via return */
18709 		if (copyin(arg, uuids_list, module_uuids_list_size) != 0) {
18710 			cmn_err(CE_WARN, "failed copyin of dtrace_module_uuids_list_t");
18711 			rval = EFAULT;
18712 			goto moduuidslist_cleanup;
18713 		}
18714 
18715 		/*
18716 		 * Check that the count didn't change between the first copyin and the second.
18717 		 */
18718 		if (uuids_list->dtmul_count != dtmul_count) {
18719 			rval = EINVAL;
18720 			goto moduuidslist_cleanup;
18721 		}
18722 
18723 		/*
18724 		 * Build the list of UUID's that need symbols
18725 		 */
18726 		lck_mtx_lock(&mod_lock);
18727 
18728 		dtmul_count = 0;
18729 
18730 		struct modctl* ctl = dtrace_modctl_list;
18731 		while (ctl) {
18732 			/*
18733 			 * We assume that userspace symbols will be "better" than kernel level symbols,
18734 			 * as userspace can search for dSYM(s) and symbol'd binaries. Even if kernel syms
18735 			 * are available, add user syms if the module might use them.
18736 			 */
18737 			ASSERT(!MOD_HAS_USERSPACE_SYMBOLS(ctl));
18738 			if (!MOD_SYMBOLS_DONE(ctl) && !MOD_IS_STATIC_KEXT(ctl)) {
18739 				UUID* uuid = &uuids_list->dtmul_uuid[dtmul_count];
18740 				if (dtmul_count++ < uuids_list->dtmul_count) {
18741 					memcpy(uuid, ctl->mod_uuid, sizeof(UUID));
18742 				}
18743 			}
18744 			ctl = ctl->mod_next;
18745 		}
18746 
18747 		lck_mtx_unlock(&mod_lock);
18748 
18749 		if (uuids_list->dtmul_count < dtmul_count)
18750 			rval = EINVAL;
18751 
18752 		uuids_list->dtmul_count = dtmul_count;
18753 
18754 		/*
18755 		 * Copyout the symbols list (or at least the count!)
18756 		 */
18757 		if (copyout(uuids_list, arg, module_uuids_list_size) != 0) {
18758 			cmn_err(CE_WARN, "failed copyout of dtrace_symbolsdesc_list_t");
18759 			rval = EFAULT;
18760 		}
18761 
18762 	moduuidslist_cleanup:
18763 		/*
18764 		 * If we had to allocate struct memory, free it.
18765 		 */
18766 		if (uuids_list != NULL) {
18767 			kmem_free(uuids_list, module_uuids_list_size);
18768 		}
18769 
18770 		return rval;
18771 	}
18772 
18773 	case DTRACEIOC_PROVMODSYMS: {
18774 		size_t module_symbols_size;
18775 		dtrace_module_symbols_t* module_symbols;
18776 		uint64_t dtmodsyms_count;
18777 
18778 		/*
18779 		 * Security restrictions make this operation illegal, if this is enabled DTrace
18780 		 * must refuse to provide any fbt probes.
18781 		 */
18782 		if (dtrace_fbt_probes_restricted()) {
18783 			cmn_err(CE_WARN, "security restrictions disallow DTRACEIOC_MODUUIDSLIST");
18784 			return (EPERM);
18785 		}
18786 
18787 		/*
18788 		 * Fail if the kernel symbol mode makes this operation illegal.
18789 		 * Both NEVER & ALWAYS_FROM_KERNEL are permanent states, it is legal to check
18790 		 * for them without holding the dtrace_lock.
18791 		 */
18792 		if (dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_NEVER ||
18793 		    dtrace_kernel_symbol_mode == DTRACE_KERNEL_SYMBOLS_ALWAYS_FROM_KERNEL) {
18794 			cmn_err(CE_WARN, "dtrace_kernel_symbol_mode of %u disallows DTRACEIOC_PROVMODSYMS", dtrace_kernel_symbol_mode);
18795 			return (EPERM);
18796 		}
18797 
18798 		/*
18799 		 * Read the number of module symbols structs being passed in.
18800 		 */
18801 		if (copyin(arg + offsetof(dtrace_module_symbols_t, dtmodsyms_count),
18802 		    &dtmodsyms_count, sizeof(dtmodsyms_count)) != 0) {
18803 			cmn_err(CE_WARN, "failed to copyin dtmodsyms_count");
18804 			return (EFAULT);
18805 		}
18806 
18807 		/* Ensure that we have at least one symbol. */
18808 		if (dtmodsyms_count == 0) {
18809 			cmn_err(CE_WARN, "Invalid dtmodsyms_count value");
18810 			return (EINVAL);
18811 		}
18812 
18813 		/* Safely calculate size we need for copyin buffer. */
18814 		module_symbols_size = DTRACE_MODULE_SYMBOLS_SIZE(dtmodsyms_count);
18815 		if (module_symbols_size == 0 || module_symbols_size > (size_t)dtrace_copy_maxsize()) {
18816 			cmn_err(CE_WARN, "Invalid module_symbols_size %ld", module_symbols_size);
18817 			return (EINVAL);
18818 		}
18819 
18820 		if ((module_symbols = kmem_alloc(module_symbols_size, KM_SLEEP)) == NULL)
18821 			return (ENOMEM);
18822 
18823 		rval = 0;
18824 
18825 		/* NOTE! We can no longer exit this method via return */
18826 		if (copyin(arg, module_symbols, module_symbols_size) != 0) {
18827 			cmn_err(CE_WARN, "failed copyin of dtrace_module_symbols_t");
18828 			rval = EFAULT;
18829 			goto module_symbols_cleanup;
18830 		}
18831 
18832 		/*
18833 		 * Check that the count didn't change between the first copyin and the second.
18834 		 */
18835 		if (module_symbols->dtmodsyms_count != dtmodsyms_count) {
18836 			rval = EINVAL;
18837 			goto module_symbols_cleanup;
18838 		}
18839 
18840 		/*
18841 		 * Find the modctl to add symbols to.
18842 		 */
18843 		lck_mtx_lock(&dtrace_provider_lock);
18844 		lck_mtx_lock(&mod_lock);
18845 
18846 		struct modctl* ctl = dtrace_modctl_list;
18847 		while (ctl) {
18848 			ASSERT(!MOD_HAS_USERSPACE_SYMBOLS(ctl));
18849 			if (MOD_HAS_UUID(ctl) && !MOD_SYMBOLS_DONE(ctl) && memcmp(module_symbols->dtmodsyms_uuid, ctl->mod_uuid, sizeof(UUID)) == 0) {
18850 				dtrace_provider_t *prv;
18851 				ctl->mod_user_symbols = module_symbols;
18852 
18853 				/*
18854 				 * We're going to call each providers per-module provide operation
18855 				 * specifying only this module.
18856 				 */
18857 				for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
18858 					prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
18859 				/*
18860 				 * We gave every provider a chance to provide with the user syms, go ahead and clear them
18861 				 */
18862 				ctl->mod_user_symbols = NULL; /* MUST reset this to clear HAS_USERSPACE_SYMBOLS */
18863 			}
18864 			ctl = ctl->mod_next;
18865 		}
18866 
18867 		lck_mtx_unlock(&mod_lock);
18868 		lck_mtx_unlock(&dtrace_provider_lock);
18869 
18870 	module_symbols_cleanup:
18871 		/*
18872 		 * If we had to allocate struct memory, free it.
18873 		 */
18874 		if (module_symbols != NULL) {
18875 			kmem_free(module_symbols, module_symbols_size);
18876 		}
18877 
18878 		return rval;
18879 	}
18880 
18881 	case DTRACEIOC_PROCWAITFOR: {
18882 		dtrace_procdesc_t pdesc = {
18883 			.p_name = {0},
18884 			.p_pid  = -1
18885 		};
18886 
18887 		if ((rval = copyin(arg, &pdesc, sizeof(pdesc))) != 0)
18888 			goto proc_waitfor_error;
18889 
18890 		if ((rval = dtrace_proc_waitfor(&pdesc)) != 0)
18891 			goto proc_waitfor_error;
18892 
18893 		if ((rval = copyout(&pdesc, arg, sizeof(pdesc))) != 0)
18894 			goto proc_waitfor_error;
18895 
18896 		return 0;
18897 
18898 	proc_waitfor_error:
18899 		/* The process was suspended, revert this since the client will not do it. */
18900 		if (pdesc.p_pid != -1) {
18901 			proc_t *proc = proc_find(pdesc.p_pid);
18902 			if (proc != PROC_NULL) {
18903 				task_pidresume(proc_task(proc));
18904 				proc_rele(proc);
18905 			}
18906 		}
18907 
18908 		return rval;
18909 	}
18910 
18911 	default:
18912 		break;
18913 	}
18914 
18915 	return (ENOTTY);
18916 }
18917 
18918 /*
18919  * APPLE NOTE:  dtrace_detach not implemented
18920  */
18921 #if !defined(__APPLE__)
18922 /*ARGSUSED*/
18923 static int
dtrace_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)18924 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
18925 {
18926 	dtrace_state_t *state;
18927 
18928 	switch (cmd) {
18929 	case DDI_DETACH:
18930 		break;
18931 
18932 	case DDI_SUSPEND:
18933 		return (DDI_SUCCESS);
18934 
18935 	default:
18936 		return (DDI_FAILURE);
18937 	}
18938 
18939 	lck_mtx_lock(&cpu_lock);
18940 	lck_mtx_lock(&dtrace_provider_lock);
18941 	lck_mtx_lock(&dtrace_lock);
18942 
18943 	ASSERT(dtrace_opens == 0);
18944 
18945 	if (dtrace_helpers > 0) {
18946 		lck_mtx_unlock(&dtrace_lock);
18947 		lck_mtx_unlock(&dtrace_provider_lock);
18948 		lck_mtx_unlock(&cpu_lock);
18949 		return (DDI_FAILURE);
18950 	}
18951 
18952 	if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
18953 		lck_mtx_unlock(&dtrace_lock);
18954 		lck_mtx_unlock(&dtrace_provider_lock);
18955 		lck_mtx_unlock(&cpu_lock);
18956 		return (DDI_FAILURE);
18957 	}
18958 
18959 	dtrace_provider = NULL;
18960 
18961 	if ((state = dtrace_anon_grab()) != NULL) {
18962 		/*
18963 		 * If there were ECBs on this state, the provider should
18964 		 * have not been allowed to detach; assert that there is
18965 		 * none.
18966 		 */
18967 		ASSERT(state->dts_necbs == 0);
18968 		dtrace_state_destroy(state);
18969 
18970 		/*
18971 		 * If we're being detached with anonymous state, we need to
18972 		 * indicate to the kernel debugger that DTrace is now inactive.
18973 		 */
18974 		(void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
18975 	}
18976 
18977 	bzero(&dtrace_anon, sizeof (dtrace_anon_t));
18978 	unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
18979 	dtrace_cpu_init = NULL;
18980 	dtrace_helpers_cleanup = NULL;
18981 	dtrace_helpers_fork = NULL;
18982 	dtrace_cpustart_init = NULL;
18983 	dtrace_cpustart_fini = NULL;
18984 	dtrace_debugger_init = NULL;
18985 	dtrace_debugger_fini = NULL;
18986 	dtrace_kreloc_init = NULL;
18987 	dtrace_kreloc_fini = NULL;
18988 	dtrace_modload = NULL;
18989 	dtrace_modunload = NULL;
18990 
18991 	lck_mtx_unlock(&cpu_lock);
18992 
18993 	if (dtrace_helptrace_enabled) {
18994 		kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
18995 		dtrace_helptrace_buffer = NULL;
18996 	}
18997 
18998 	kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
18999 	dtrace_probes = NULL;
19000 	dtrace_nprobes = 0;
19001 
19002 	dtrace_hash_destroy(dtrace_strings);
19003 	dtrace_hash_destroy(dtrace_byprov);
19004 	dtrace_hash_destroy(dtrace_bymod);
19005 	dtrace_hash_destroy(dtrace_byfunc);
19006 	dtrace_hash_destroy(dtrace_byname);
19007 	dtrace_strings = NULL;
19008 	dtrace_byprov = NULL;
19009 	dtrace_bymod = NULL;
19010 	dtrace_byfunc = NULL;
19011 	dtrace_byname = NULL;
19012 
19013 	kmem_cache_destroy(dtrace_state_cache);
19014 	vmem_destroy(dtrace_arena);
19015 
19016 	if (dtrace_toxrange != NULL) {
19017 		kmem_free(dtrace_toxrange,
19018 		    dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
19019 		dtrace_toxrange = NULL;
19020 		dtrace_toxranges = 0;
19021 		dtrace_toxranges_max = 0;
19022 	}
19023 
19024 	ddi_remove_minor_node(dtrace_devi, NULL);
19025 	dtrace_devi = NULL;
19026 
19027 	ddi_soft_state_fini(&dtrace_softstate);
19028 
19029 	ASSERT(dtrace_vtime_references == 0);
19030 	ASSERT(dtrace_opens == 0);
19031 	ASSERT(dtrace_retained == NULL);
19032 
19033 	lck_mtx_unlock(&dtrace_lock);
19034 	lck_mtx_unlock(&dtrace_provider_lock);
19035 
19036 #ifdef illumos
19037 	/*
19038 	 * We don't destroy the task queue until after we have dropped our
19039 	 * locks (taskq_destroy() may block on running tasks).  To prevent
19040 	 * attempting to do work after we have effectively detached but before
19041 	 * the task queue has been destroyed, all tasks dispatched via the
19042 	 * task queue must check that DTrace is still attached before
19043 	 * performing any operation.
19044 	 */
19045 	taskq_destroy(dtrace_taskq);
19046 	dtrace_taskq = NULL;
19047 #endif
19048 
19049 	return (DDI_SUCCESS);
19050 }
19051 #endif  /* __APPLE__ */
19052 
19053 d_open_t _dtrace_open, helper_open;
19054 d_close_t _dtrace_close, helper_close;
19055 d_ioctl_t _dtrace_ioctl, helper_ioctl;
19056 
19057 int
_dtrace_open(dev_t dev,int flags,int devtype,struct proc * p)19058 _dtrace_open(dev_t dev, int flags, int devtype, struct proc *p)
19059 {
19060 #pragma unused(p)
19061 	dev_t locdev = dev;
19062 
19063 	return  dtrace_open( &locdev, flags, devtype, CRED());
19064 }
19065 
19066 int
helper_open(dev_t dev,int flags,int devtype,struct proc * p)19067 helper_open(dev_t dev, int flags, int devtype, struct proc *p)
19068 {
19069 #pragma unused(dev,flags,devtype,p)
19070 	return 0;
19071 }
19072 
19073 int
_dtrace_close(dev_t dev,int flags,int devtype,struct proc * p)19074 _dtrace_close(dev_t dev, int flags, int devtype, struct proc *p)
19075 {
19076 #pragma unused(p)
19077 	return dtrace_close( dev, flags, devtype, CRED());
19078 }
19079 
19080 int
helper_close(dev_t dev,int flags,int devtype,struct proc * p)19081 helper_close(dev_t dev, int flags, int devtype, struct proc *p)
19082 {
19083 #pragma unused(dev,flags,devtype,p)
19084 	return 0;
19085 }
19086 
19087 int
_dtrace_ioctl(dev_t dev,u_long cmd,caddr_t data,int fflag,struct proc * p)19088 _dtrace_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
19089 {
19090 #pragma unused(p)
19091 	int err, rv = 0;
19092     user_addr_t uaddrp;
19093 
19094     if (proc_is64bit(p))
19095 		uaddrp = *(user_addr_t *)data;
19096 	else
19097 		uaddrp = (user_addr_t) *(uint32_t *)data;
19098 
19099 	err = dtrace_ioctl(dev, cmd, uaddrp, fflag, CRED(), &rv);
19100 
19101 	/* Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
19102 	if (err != 0) {
19103 		ASSERT( (err & 0xfffff000) == 0 );
19104 		return (err & 0xfff); /* ioctl will return -1 and will set errno to an error code < 4096 */
19105 	} else if (rv != 0) {
19106 		ASSERT( (rv & 0xfff00000) == 0 );
19107 		return (((rv & 0xfffff) << 12)); /* ioctl will return -1 and will set errno to a value >= 4096 */
19108 	} else
19109 		return 0;
19110 }
19111 
19112 int
helper_ioctl(dev_t dev,u_long cmd,caddr_t data,int fflag,struct proc * p)19113 helper_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p)
19114 {
19115 #pragma unused(dev,fflag,p)
19116 	int err, rv = 0;
19117 
19118 	err = dtrace_ioctl_helper(cmd, data, &rv);
19119 	/* Darwin's BSD ioctls only return -1 or zero. Overload errno to mimic Solaris. 20 bits suffice. */
19120 	if (err != 0) {
19121 		ASSERT( (err & 0xfffff000) == 0 );
19122 		return (err & 0xfff); /* ioctl will return -1 and will set errno to an error code < 4096 */
19123 	} else if (rv != 0) {
19124 		ASSERT( (rv & 0xfff00000) == 0 );
19125 		return (((rv & 0xfffff) << 12)); /* ioctl will return -1 and will set errno to a value >= 4096 */
19126 	} else
19127 		return 0;
19128 }
19129 
19130 #define HELPER_MAJOR  -24 /* let the kernel pick the device number */
19131 
19132 #define nulldevfp        (void (*)(void))&nulldev
19133 
19134 const static struct cdevsw helper_cdevsw =
19135 {
19136 	.d_open = helper_open,
19137 	.d_close = helper_close,
19138 	.d_read = eno_rdwrt,
19139 	.d_write = eno_rdwrt,
19140 	.d_ioctl = helper_ioctl,
19141 	.d_stop = eno_stop,
19142 	.d_reset = eno_reset,
19143 	.d_select = eno_select,
19144 	.d_mmap = eno_mmap,
19145 	.d_strategy = eno_strat,
19146 	.d_reserved_1 = eno_getc,
19147 	.d_reserved_2 = eno_putc,
19148 };
19149 
19150 static int helper_majdevno = 0;
19151 
19152 static int gDTraceInited = 0;
19153 
19154 void
helper_init(void)19155 helper_init( void )
19156 {
19157 	/*
19158 	 * Once the "helper" is initialized, it can take ioctl calls that use locks
19159 	 * and zones initialized in dtrace_init. Make certain dtrace_init was called
19160 	 * before us.
19161 	 */
19162 
19163 	if (!gDTraceInited) {
19164 		panic("helper_init before dtrace_init");
19165 	}
19166 
19167 	if (0 >= helper_majdevno)
19168 	{
19169 		helper_majdevno = cdevsw_add(HELPER_MAJOR, &helper_cdevsw);
19170 
19171 		if (helper_majdevno < 0) {
19172 			printf("helper_init: failed to allocate a major number!\n");
19173 			return;
19174 		}
19175 
19176 		if (NULL == devfs_make_node( makedev(helper_majdevno, 0), DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
19177 					DTRACEMNR_HELPER )) {
19178 			printf("dtrace_init: failed to devfs_make_node for helper!\n");
19179 			return;
19180 		}
19181 	} else
19182 		panic("helper_init: called twice!");
19183 }
19184 
19185 #undef HELPER_MAJOR
19186 
19187 static int
dtrace_clone_func(dev_t dev,int action)19188 dtrace_clone_func(dev_t dev, int action)
19189 {
19190 #pragma unused(dev)
19191 
19192 	if (action == DEVFS_CLONE_ALLOC) {
19193 		return dtrace_state_reserve();
19194 	}
19195 	else if (action == DEVFS_CLONE_FREE) {
19196 		return 0;
19197 	}
19198 	else return -1;
19199 }
19200 
19201 void dtrace_ast(void);
19202 
19203 void
dtrace_ast(void)19204 dtrace_ast(void)
19205 {
19206 	int i;
19207 	uint32_t clients = os_atomic_xchg(&dtrace_wake_clients, 0, relaxed);
19208 	if (clients == 0)
19209 		return;
19210 	/**
19211 	 * We disable preemption here to be sure that we won't get
19212 	 * interrupted by a wakeup to a thread that is higher
19213 	 * priority than us, so that we do issue all wakeups
19214 	 */
19215 	disable_preemption();
19216 	for (i = 0; i < DTRACE_NCLIENTS; i++) {
19217 		if (clients & (1 << i)) {
19218 			dtrace_state_t *state = dtrace_state_get(i);
19219 			if (state) {
19220 				wakeup(state);
19221 			}
19222 
19223 		}
19224 	}
19225 	enable_preemption();
19226 }
19227 
19228 
19229 #define DTRACE_MAJOR  -24 /* let the kernel pick the device number */
19230 
19231 static const struct cdevsw dtrace_cdevsw =
19232 {
19233 	.d_open = _dtrace_open,
19234 	.d_close = _dtrace_close,
19235 	.d_read = eno_rdwrt,
19236 	.d_write = eno_rdwrt,
19237 	.d_ioctl = _dtrace_ioctl,
19238 	.d_stop = eno_stop,
19239 	.d_reset = eno_reset,
19240 	.d_select = eno_select,
19241 	.d_mmap = eno_mmap,
19242 	.d_strategy = eno_strat,
19243 	.d_reserved_1 = eno_getc,
19244 	.d_reserved_2 = eno_putc,
19245 };
19246 
19247 LCK_ATTR_DECLARE(dtrace_lck_attr, 0, 0);
19248 LCK_GRP_DECLARE(dtrace_lck_grp, "dtrace");
19249 
19250 static int gMajDevNo;
19251 
dtrace_early_init(void)19252 void dtrace_early_init (void)
19253 {
19254 	dtrace_restriction_policy_load();
19255 
19256 	/*
19257 	 * See dtrace_impl.h for a description of kernel symbol modes.
19258 	 * The default is to wait for symbols from userspace (lazy symbols).
19259 	 */
19260 	if (!PE_parse_boot_argn("dtrace_kernel_symbol_mode", &dtrace_kernel_symbol_mode, sizeof (dtrace_kernel_symbol_mode))) {
19261 		dtrace_kernel_symbol_mode = DTRACE_KERNEL_SYMBOLS_FROM_USERSPACE;
19262 	}
19263 }
19264 
19265 void
dtrace_init(void)19266 dtrace_init( void )
19267 {
19268 	if (0 == gDTraceInited) {
19269 		unsigned int i, ncpu;
19270 		size_t size = sizeof(dtrace_buffer_memory_maxsize);
19271 
19272 		/*
19273 		 * Disable destructive actions when dtrace is running
19274 		 * in a restricted environment
19275 		 */
19276 		dtrace_destructive_disallow = dtrace_is_restricted() &&
19277 		    !dtrace_are_restrictions_relaxed();
19278 
19279 		/*
19280 		 * DTrace allocates buffers based on the maximum number
19281 		 * of enabled cpus. This call avoids any race when finding
19282 		 * that count.
19283 		 */
19284 		ASSERT(dtrace_max_cpus == 0);
19285 		ncpu = dtrace_max_cpus = ml_wait_max_cpus();
19286 
19287 		/*
19288 		 * Retrieve the size of the physical memory in order to define
19289 		 * the state buffer memory maximal size.  If we cannot retrieve
19290 		 * this value, we'll consider that we have 1Gb of memory per CPU, that's
19291 		 * still better than raising a kernel panic.
19292 		 */
19293 		if (0 != kernel_sysctlbyname("hw.memsize", &dtrace_buffer_memory_maxsize,
19294 		                             &size, NULL, 0))
19295 		{
19296 			dtrace_buffer_memory_maxsize = ncpu * 1024 * 1024 * 1024;
19297 			printf("dtrace_init: failed to retrieve the hw.memsize, defaulted to %lld bytes\n",
19298 			       dtrace_buffer_memory_maxsize);
19299 		}
19300 
19301 		/*
19302 		 * Finally, divide by three to prevent DTrace from eating too
19303 		 * much memory.
19304 		 */
19305 		dtrace_buffer_memory_maxsize /= 3;
19306 		ASSERT(dtrace_buffer_memory_maxsize > 0);
19307 
19308 		gMajDevNo = cdevsw_add(DTRACE_MAJOR, &dtrace_cdevsw);
19309 
19310 		if (gMajDevNo < 0) {
19311 			printf("dtrace_init: failed to allocate a major number!\n");
19312 			gDTraceInited = 0;
19313 			return;
19314 		}
19315 
19316 		if (NULL == devfs_make_node_clone( makedev(gMajDevNo, 0), DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
19317 					dtrace_clone_func, DTRACEMNR_DTRACE )) {
19318 			printf("dtrace_init: failed to devfs_make_node_clone for dtrace!\n");
19319 			gDTraceInited = 0;
19320 			return;
19321 		}
19322 
19323 		/*
19324 		 * The cpu_core structure consists of per-CPU state available in any context.
19325 		 * On some architectures, this may mean that the page(s) containing the
19326 		 * NCPU-sized array of cpu_core structures must be locked in the TLB -- it
19327 		 * is up to the platform to assure that this is performed properly.  Note that
19328 		 * the structure is sized to avoid false sharing.
19329 		 */
19330 
19331 		dtrace_modctl_list = NULL;
19332 
19333 		cpu_core = (cpu_core_t *)kmem_zalloc( ncpu * sizeof(cpu_core_t), KM_SLEEP );
19334 		for (i = 0; i < ncpu; ++i) {
19335 			lck_mtx_init(&cpu_core[i].cpuc_pid_lock, &dtrace_lck_grp, &dtrace_lck_attr);
19336 		}
19337 
19338 		cpu_list = (dtrace_cpu_t *)kmem_zalloc( ncpu * sizeof(dtrace_cpu_t), KM_SLEEP );
19339 		for (i = 0; i < ncpu; ++i) {
19340 			cpu_list[i].cpu_id = (processorid_t)i;
19341 			cpu_list[i].cpu_next = &(cpu_list[(i+1) % ncpu]);
19342 			LIST_INIT(&cpu_list[i].cpu_cyc_list);
19343 			lck_rw_init(&cpu_list[i].cpu_ft_lock, &dtrace_lck_grp, &dtrace_lck_attr);
19344 		}
19345 
19346 		/*
19347 		 * Initialize the CPU offline/online hooks.
19348 		 */
19349 		dtrace_install_cpu_hooks();
19350 
19351 		lck_mtx_lock(&cpu_lock);
19352 		for (i = 0; i < ncpu; ++i)
19353 			/* FIXME: track CPU configuration */
19354 			dtrace_cpu_setup_initial( (processorid_t)i ); /* In lieu of register_cpu_setup_func() callback */
19355 		lck_mtx_unlock(&cpu_lock);
19356 
19357 		(void)dtrace_abs_to_nano(0LL); /* Force once only call to clock_timebase_info (which can take a lock) */
19358 
19359 		dtrace_strings = dtrace_hash_create(dtrace_strkey_offset,
19360 		    offsetof(dtrace_string_t, dtst_str),
19361 		    offsetof(dtrace_string_t, dtst_next),
19362 		    offsetof(dtrace_string_t, dtst_prev));
19363 
19364 		/*
19365 		 * See dtrace_impl.h for a description of dof modes.
19366 		 * The default is lazy dof.
19367 		 *
19368 		 * FIXME: Warn if state is LAZY_OFF? It won't break anything, but
19369 		 * makes no sense...
19370 		 */
19371 		if (!PE_parse_boot_argn("dtrace_dof_mode", &dtrace_dof_mode, sizeof (dtrace_dof_mode))) {
19372 #if defined(XNU_TARGET_OS_OSX)
19373 			dtrace_dof_mode = DTRACE_DOF_MODE_LAZY_ON;
19374 #else
19375 			dtrace_dof_mode = DTRACE_DOF_MODE_NEVER;
19376 #endif
19377 		}
19378 
19379 		/*
19380 		 * Sanity check of dof mode value.
19381 		 */
19382 		switch (dtrace_dof_mode) {
19383 			case DTRACE_DOF_MODE_NEVER:
19384 			case DTRACE_DOF_MODE_LAZY_ON:
19385 				/* valid modes, but nothing else we need to do */
19386 				break;
19387 
19388 			case DTRACE_DOF_MODE_LAZY_OFF:
19389 			case DTRACE_DOF_MODE_NON_LAZY:
19390 				/* Cannot wait for a dtrace_open to init fasttrap */
19391 				fasttrap_init();
19392 				break;
19393 
19394 			default:
19395 				/* Invalid, clamp to non lazy */
19396 				dtrace_dof_mode = DTRACE_DOF_MODE_NON_LAZY;
19397 				fasttrap_init();
19398 				break;
19399 		}
19400 
19401 #if CONFIG_DTRACE
19402         if (dtrace_dof_mode != DTRACE_DOF_MODE_NEVER)
19403             commpage_update_dof(true);
19404 #endif
19405 
19406 		gDTraceInited = 1;
19407 
19408 	} else
19409 		panic("dtrace_init: called twice!");
19410 }
19411 
19412 void
dtrace_postinit(void)19413 dtrace_postinit(void)
19414 {
19415 	/*
19416 	 * Called from bsd_init after all provider's *_init() routines have been
19417 	 * run. That way, anonymous DOF enabled under dtrace_attach() is safe
19418 	 * to go.
19419 	 */
19420 	dtrace_attach( (dev_info_t *)(uintptr_t)makedev(gMajDevNo, 0)); /* Punning a dev_t to a dev_info_t* */
19421 
19422 	/*
19423 	 * Add the mach_kernel to the module list for lazy processing
19424 	 */
19425 	struct kmod_info fake_kernel_kmod;
19426 	memset(&fake_kernel_kmod, 0, sizeof(fake_kernel_kmod));
19427 
19428 	strlcpy(fake_kernel_kmod.name, "mach_kernel", sizeof(fake_kernel_kmod.name));
19429 	fake_kernel_kmod.id = 1;
19430 	fake_kernel_kmod.address = g_kernel_kmod_info.address;
19431 	fake_kernel_kmod.size = g_kernel_kmod_info.size;
19432 
19433 	/* Ensure we don't try to touch symbols if they are gone. */
19434 	boolean_t keepsyms = false;
19435 	PE_parse_boot_argn("keepsyms", &keepsyms, sizeof(keepsyms));
19436 
19437 	if (dtrace_module_loaded(&fake_kernel_kmod, (keepsyms) ? 0 : KMOD_DTRACE_NO_KERNEL_SYMS) != 0) {
19438 		printf("dtrace_postinit: Could not register mach_kernel modctl\n");
19439 	}
19440 
19441 	(void)OSKextRegisterKextsWithDTrace();
19442 }
19443 #undef DTRACE_MAJOR
19444 
19445 /*
19446  * Routines used to register interest in cpu's being added to or removed
19447  * from the system.
19448  */
19449 void
register_cpu_setup_func(cpu_setup_func_t * ignore1,void * ignore2)19450 register_cpu_setup_func(cpu_setup_func_t *ignore1, void *ignore2)
19451 {
19452 #pragma unused(ignore1,ignore2)
19453 }
19454 
19455 void
unregister_cpu_setup_func(cpu_setup_func_t * ignore1,void * ignore2)19456 unregister_cpu_setup_func(cpu_setup_func_t *ignore1, void *ignore2)
19457 {
19458 #pragma unused(ignore1,ignore2)
19459 }
19460