VirtualBox

source: vbox/trunk/src/VBox/ExtPacks/VBoxDTrace/onnv/uts/common/dtrace/dtrace.c@ 53668

Last change on this file since 53668 was 53668, checked in by vboxsync, 10 years ago

VBoxDTrace: More unresolved ring-0 stuff. (r45)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 404.1 KB
Line 
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26/*
27 * DTrace - Dynamic Tracing for Solaris
28 *
29 * This is the implementation of the Solaris Dynamic Tracing framework
30 * (DTrace). The user-visible interface to DTrace is described at length in
31 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
32 * library, the in-kernel DTrace framework, and the DTrace providers are
33 * described in the block comments in the <sys/dtrace.h> header file. The
34 * internal architecture of DTrace is described in the block comments in the
35 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
36 * implementation very much assume mastery of all of these sources; if one has
37 * an unanswered question about the implementation, one should consult them
38 * first.
39 *
40 * The functions here are ordered roughly as follows:
41 *
42 * - Probe context functions
43 * - Probe hashing functions
44 * - Non-probe context utility functions
45 * - Matching functions
46 * - Provider-to-Framework API functions
47 * - Probe management functions
48 * - DIF object functions
49 * - Format functions
50 * - Predicate functions
51 * - ECB functions
52 * - Buffer functions
53 * - Enabling functions
54 * - DOF functions
55 * - Anonymous enabling functions
56 * - Consumer state functions
57 * - Helper functions
58 * - Hook functions
59 * - Driver cookbook functions
60 *
61 * Each group of functions begins with a block comment labelled the "DTrace
62 * [Group] Functions", allowing one to find each block by searching forward
63 * on capital-f functions.
64 */
65#ifndef VBOX
66#include <sys/errno.h>
67#include <sys/stat.h>
68#include <sys/modctl.h>
69#include <sys/conf.h>
70#include <sys/systm.h>
71#include <sys/ddi.h>
72#include <sys/sunddi.h>
73#include <sys/cpuvar.h>
74#include <sys/kmem.h>
75#include <sys/strsubr.h>
76#include <sys/sysmacros.h>
77#include <sys/dtrace_impl.h>
78#include <sys/atomic.h>
79#include <sys/cmn_err.h>
80#include <sys/mutex_impl.h>
81#include <sys/rwlock_impl.h>
82#include <sys/ctf_api.h>
83#include <sys/panic.h>
84#include <sys/priv_impl.h>
85#include <sys/policy.h>
86#include <sys/cred_impl.h>
87#include <sys/procfs_isa.h>
88#include <sys/taskq.h>
89#include <sys/mkdev.h>
90#include <sys/kdi.h>
91#include <sys/zone.h>
92#include <sys/socket.h>
93#include <netinet/in.h>
94
95#else /* VBOX */
96# include <sys/dtrace_impl.h>
97# include <iprt/assert.h>
98# include <iprt/cpuset.h>
99# include <iprt/mp.h>
100# include <iprt/string.h>
101# include <iprt/process.h>
102# include <iprt/thread.h>
103# include <iprt/timer.h>
104# include <limits.h>
105
106/*
107 * Use asm.h to implemente some of the simple stuff in dtrace_asm.s.
108 */
109# include <iprt/asm.h>
110# include <iprt/asm-amd64-x86.h>
111# define dtrace_casptr(a_ppvDst, a_pvOld, a_pvNew) \
112 VBoxDtCompareAndSwapPtr((void * volatile *)a_ppvDst, a_pvOld, a_pvNew)
113DECLINLINE(void *) VBoxDtCompareAndSwapPtr(void * volatile *ppvDst, void *pvOld, void *pvNew)
114{
115 void *pvRet;
116 ASMAtomicCmpXchgExPtrVoid(ppvDst, pvNew, pvOld, &pvRet);
117 return pvRet;
118}
119
120# define dtrace_cas32(a_pu32Dst, a_pu32Old, a_pu32New) \
121 VBoxDtCompareAndSwapU32(a_pu32Dst, a_pu32Old, a_pu32New)
122DECLINLINE(uint32_t) VBoxDtCompareAndSwapU32(uint32_t volatile *pu32Dst, uint32_t u32Old, uint32_t u32New)
123{
124 uint32_t u32Ret;
125 ASMAtomicCmpXchgExU32(pu32Dst, u32New, u32Old, &u32Ret);
126 return u32Ret;
127}
128
129#define dtrace_membar_consumer() ASMReadFence()
130#define dtrace_membar_producer() ASMWriteFence()
131#define dtrace_interrupt_disable() ASMIntDisableFlags()
132#define dtrace_interrupt_enable(a_EFL) ASMSetFlags(a_EFL)
133
134/*
135 * NULL must be set to 0 or we'll end up with a billion warnings(=errors).
136 */
137# undef NULL
138# define NULL (0)
139#endif /* VBOX */
140
141/*
142 * DTrace Tunable Variables
143 *
144 * The following variables may be tuned by adding a line to /etc/system that
145 * includes both the name of the DTrace module ("dtrace") and the name of the
146 * variable. For example:
147 *
148 * set dtrace:dtrace_destructive_disallow = 1
149 *
150 * In general, the only variables that one should be tuning this way are those
151 * that affect system-wide DTrace behavior, and for which the default behavior
152 * is undesirable. Most of these variables are tunable on a per-consumer
153 * basis using DTrace options, and need not be tuned on a system-wide basis.
154 * When tuning these variables, avoid pathological values; while some attempt
155 * is made to verify the integrity of these variables, they are not considered
156 * part of the supported interface to DTrace, and they are therefore not
157 * checked comprehensively. Further, these variables should not be tuned
158 * dynamically via "mdb -kw" or other means; they should only be tuned via
159 * /etc/system.
160 */
161int dtrace_destructive_disallow = 0;
162dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
163size_t dtrace_difo_maxsize = (256 * 1024);
164dtrace_optval_t dtrace_dof_maxsize = (256 * 1024);
165size_t dtrace_global_maxsize = (16 * 1024);
166size_t dtrace_actions_max = (16 * 1024);
167size_t dtrace_retain_max = 1024;
168dtrace_optval_t dtrace_helper_actions_max = 32;
169dtrace_optval_t dtrace_helper_providers_max = 32;
170dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
171size_t dtrace_strsize_default = 256;
172dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
173dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
174dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
175dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
176dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
177dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
178dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
179dtrace_optval_t dtrace_nspec_default = 1;
180dtrace_optval_t dtrace_specsize_default = 32 * 1024;
181dtrace_optval_t dtrace_stackframes_default = 20;
182dtrace_optval_t dtrace_ustackframes_default = 20;
183dtrace_optval_t dtrace_jstackframes_default = 50;
184dtrace_optval_t dtrace_jstackstrsize_default = 512;
185int dtrace_msgdsize_max = 128;
186hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */
187hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
188int dtrace_devdepth_max = 32;
189int dtrace_err_verbose;
190hrtime_t dtrace_deadman_interval = NANOSEC;
191hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
192hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
193
194/*
195 * DTrace External Variables
196 *
197 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
198 * available to DTrace consumers via the backtick (`) syntax. One of these,
199 * dtrace_zero, is made deliberately so: it is provided as a source of
200 * well-known, zero-filled memory. While this variable is not documented,
201 * it is used by some translators as an implementation detail.
202 */
203const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
204
205/*
206 * DTrace Internal Variables
207 */
208#ifndef VBOX
209static dev_info_t *dtrace_devi; /* device info */
210#endif
211static vmem_t *dtrace_arena; /* probe ID arena */
212#ifndef VBOX
213static vmem_t *dtrace_minor; /* minor number arena */
214static taskq_t *dtrace_taskq; /* task queue */
215#endif
216static dtrace_probe_t **dtrace_probes; /* array of all probes */
217static VBDTTYPE(uint32_t,int) dtrace_nprobes; /* number of probes */
218static dtrace_provider_t *dtrace_provider; /* provider list */
219static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
220static int dtrace_opens; /* number of opens */
221static int dtrace_helpers; /* number of helpers */
222#ifndef VBOX
223static void *dtrace_softstate; /* softstate pointer */
224#endif
225static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
226static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
227static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
228static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
229static int dtrace_toxranges; /* number of toxic ranges */
230static int dtrace_toxranges_max; /* size of toxic range array */
231static dtrace_anon_t dtrace_anon; /* anonymous enabling */
232static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
233static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
234static kthread_t *dtrace_panicked; /* panicking thread */
235static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
236static dtrace_genid_t dtrace_probegen; /* current probe generation */
237static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
238static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
239static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */
240static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
241static int dtrace_dynvar_failclean; /* dynvars failed to clean */
242
243/*
244 * DTrace Locking
245 * DTrace is protected by three (relatively coarse-grained) locks:
246 *
247 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
248 * including enabling state, probes, ECBs, consumer state, helper state,
249 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
250 * probe context is lock-free -- synchronization is handled via the
251 * dtrace_sync() cross call mechanism.
252 *
253 * (2) dtrace_provider_lock is required when manipulating provider state, or
254 * when provider state must be held constant.
255 *
256 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
257 * when meta provider state must be held constant.
258 *
259 * The lock ordering between these three locks is dtrace_meta_lock before
260 * dtrace_provider_lock before dtrace_lock. (In particular, there are
261 * several places where dtrace_provider_lock is held by the framework as it
262 * calls into the providers -- which then call back into the framework,
263 * grabbing dtrace_lock.)
264 *
265 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
266 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
267 * role as a coarse-grained lock; it is acquired before both of these locks.
268 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
269 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
270 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
271 * acquired _between_ dtrace_provider_lock and dtrace_lock.
272 */
273static kmutex_t dtrace_lock; /* probe state lock */
274static kmutex_t dtrace_provider_lock; /* provider state lock */
275static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
276
277/*
278 * DTrace Provider Variables
279 *
280 * These are the variables relating to DTrace as a provider (that is, the
281 * provider of the BEGIN, END, and ERROR probes).
282 */
283static dtrace_pattr_t dtrace_provider_attr = {
284{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
285{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
286{ DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
287{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
288{ DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
289};
290
291static void
292dtrace_nullop(void)
293{}
294
295static int
296dtrace_enable_nullop(void)
297{
298 return (0);
299}
300
301static dtrace_pops_t dtrace_provider_ops = {
302 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
303 (void (*)(void *, struct modctl *))dtrace_nullop,
304 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop,
305 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
306 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
307 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
308 NULL,
309 NULL,
310 NULL,
311 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
312};
313
314static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
315static dtrace_id_t dtrace_probeid_end; /* special END probe */
316dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
317
318/*
319 * DTrace Helper Tracing Variables
320 */
321uint32_t dtrace_helptrace_next = 0;
322uint32_t dtrace_helptrace_nlocals;
323char *dtrace_helptrace_buffer;
324int dtrace_helptrace_bufsize = 512 * 1024;
325
326#ifdef DEBUG
327int dtrace_helptrace_enabled = 1;
328#else
329int dtrace_helptrace_enabled = 0;
330#endif
331
332/*
333 * DTrace Error Hashing
334 *
335 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
336 * table. This is very useful for checking coverage of tests that are
337 * expected to induce DIF or DOF processing errors, and may be useful for
338 * debugging problems in the DIF code generator or in DOF generation . The
339 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
340 */
341#ifdef DEBUG
342static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
343static const char *dtrace_errlast;
344static kthread_t *dtrace_errthread;
345static kmutex_t dtrace_errlock;
346#endif
347
348/*
349 * DTrace Macros and Constants
350 *
351 * These are various macros that are useful in various spots in the
352 * implementation, along with a few random constants that have no meaning
353 * outside of the implementation. There is no real structure to this cpp
354 * mishmash -- but is there ever?
355 */
356#define DTRACE_HASHSTR(hash, probe) \
357 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
358
359#define DTRACE_HASHNEXT(hash, probe) \
360 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
361
362#define DTRACE_HASHPREV(hash, probe) \
363 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
364
365#define DTRACE_HASHEQ(hash, lhs, rhs) \
366 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
367 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
368
369#define DTRACE_AGGHASHSIZE_SLEW 17
370
371#define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
372
373/*
374 * The key for a thread-local variable consists of the lower 61 bits of the
375 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
376 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
377 * equal to a variable identifier. This is necessary (but not sufficient) to
378 * assure that global associative arrays never collide with thread-local
379 * variables. To guarantee that they cannot collide, we must also define the
380 * order for keying dynamic variables. That order is:
381 *
382 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
383 *
384 * Because the variable-key and the tls-key are in orthogonal spaces, there is
385 * no way for a global variable key signature to match a thread-local key
386 * signature.
387 */
388#ifndef VBOX
389#define DTRACE_TLS_THRKEY(where) { \
390 uint_t intr = 0; \
391 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
392 for (; actv; actv >>= 1) \
393 intr++; \
394 ASSERT(intr < (1 << 3)); \
395 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
396 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
397}
398#else
399#define DTRACE_TLS_THRKEY(where) do { \
400 (where) = (((uintptr_t)RTThreadNativeSelf() + DIF_VARIABLE_MAX) & (RT_BIT_64(61) - 1)) \
401 | (RTThreadIsInInterrupt(NIL_RTTHREAD) ? RT_BIT_64(61) : 0); \
402} while (0)
403#endif
404
405#define DT_BSWAP_8(x) ((x) & 0xff)
406#define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
407#define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
408#define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
409
410#define DT_MASK_LO 0x00000000FFFFFFFFULL
411
412#define DTRACE_STORE(type, tomax, offset, what) \
413 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
414
415#ifndef __i386
416#define DTRACE_ALIGNCHECK(addr, size, flags) \
417 if (addr & (size - 1)) { \
418 *flags |= CPU_DTRACE_BADALIGN; \
419 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = addr; \
420 return (0); \
421 }
422#else
423#define DTRACE_ALIGNCHECK(addr, size, flags)
424#endif
425
426/*
427 * Test whether a range of memory starting at testaddr of size testsz falls
428 * within the range of memory described by addr, sz. We take care to avoid
429 * problems with overflow and underflow of the unsigned quantities, and
430 * disallow all negative sizes. Ranges of size 0 are allowed.
431 */
432#define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
433 ((testaddr) - (baseaddr) < (basesz) && \
434 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \
435 (testaddr) + (testsz) >= (testaddr))
436
437/*
438 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
439 * alloc_sz on the righthand side of the comparison in order to avoid overflow
440 * or underflow in the comparison with it. This is simpler than the INRANGE
441 * check above, because we know that the dtms_scratch_ptr is valid in the
442 * range. Allocations of size zero are allowed.
443 */
444#define DTRACE_INSCRATCH(mstate, alloc_sz) \
445 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
446 (mstate)->dtms_scratch_ptr >= (alloc_sz))
447
448#define DTRACE_LOADFUNC(bits) \
449/*CSTYLED*/ \
450VBDTSTATIC uint##bits##_t \
451dtrace_load##bits(uintptr_t addr) \
452{ \
453 size_t size = bits / NBBY; \
454 /*CSTYLED*/ \
455 uint##bits##_t rval; \
456 int i; \
457 processorid_t me = VBDT_GET_CPUID(); \
458 volatile uint16_t *flags = (volatile uint16_t *) \
459 &cpu_core[me].cpuc_dtrace_flags; \
460 \
461 DTRACE_ALIGNCHECK(addr, size, flags); \
462 \
463 for (i = 0; i < dtrace_toxranges; i++) { \
464 if (addr >= dtrace_toxrange[i].dtt_limit) \
465 continue; \
466 \
467 if (addr + size <= dtrace_toxrange[i].dtt_base) \
468 continue; \
469 \
470 /* \
471 * This address falls within a toxic region; return 0. \
472 */ \
473 *flags |= CPU_DTRACE_BADADDR; \
474 cpu_core[me].cpuc_dtrace_illval = addr; \
475 return (0); \
476 } \
477 \
478 *flags |= CPU_DTRACE_NOFAULT; \
479 /*CSTYLED*/ \
480 rval = *((volatile uint##bits##_t *)addr); \
481 *flags &= ~CPU_DTRACE_NOFAULT; \
482 \
483 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
484}
485
486#ifdef _LP64
487#define dtrace_loadptr dtrace_load64
488#else
489#define dtrace_loadptr dtrace_load32
490#endif
491
492#define DTRACE_DYNHASH_FREE 0
493#define DTRACE_DYNHASH_SINK 1
494#define DTRACE_DYNHASH_VALID 2
495
496#define DTRACE_MATCH_FAIL -1
497#define DTRACE_MATCH_NEXT 0
498#define DTRACE_MATCH_DONE 1
499#define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
500#define DTRACE_STATE_ALIGN 64
501
502#define DTRACE_FLAGS2FLT(flags) \
503 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
504 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
505 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
506 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
507 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
508 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
509 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
510 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
511 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
512 DTRACEFLT_UNKNOWN)
513
514#define DTRACEACT_ISSTRING(act) \
515 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
516 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
517
518static size_t dtrace_strlen(const char *, size_t);
519static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
520static void dtrace_enabling_provide(dtrace_provider_t *);
521static int dtrace_enabling_match(dtrace_enabling_t *, int *);
522static void dtrace_enabling_matchall(void);
523static dtrace_state_t *dtrace_anon_grab(void);
524#ifndef VBOX
525static uint64_t dtrace_helper(int, dtrace_mstate_t *,
526 dtrace_state_t *, uint64_t, uint64_t);
527static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
528#endif
529static void dtrace_buffer_drop(dtrace_buffer_t *);
530static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
531 dtrace_state_t *, dtrace_mstate_t *);
532static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
533 dtrace_optval_t);
534static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
535static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
536
537/*
538 * DTrace Probe Context Functions
539 *
540 * These functions are called from probe context. Because probe context is
541 * any context in which C may be called, arbitrarily locks may be held,
542 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
543 * As a result, functions called from probe context may only call other DTrace
544 * support functions -- they may not interact at all with the system at large.
545 * (Note that the ASSERT macro is made probe-context safe by redefining it in
546 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
547 * loads are to be performed from probe context, they _must_ be in terms of
548 * the safe dtrace_load*() variants.
549 *
550 * Some functions in this block are not actually called from probe context;
551 * for these functions, there will be a comment above the function reading
552 * "Note: not called from probe context."
553 */
554void
555dtrace_panic(const char *format, ...)
556{
557 va_list alist;
558
559 va_start(alist, format);
560 dtrace_vpanic(format, alist);
561 va_end(alist);
562}
563
564int
565dtrace_assfail(const char *a, const char *f, int l)
566{
567 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
568
569 /*
570 * We just need something here that even the most clever compiler
571 * cannot optimize away.
572 */
573 return (a[(uintptr_t)f]);
574}
575
576/*
577 * Atomically increment a specified error counter from probe context.
578 */
579static void
580dtrace_error(uint32_t *counter)
581{
582 /*
583 * Most counters stored to in probe context are per-CPU counters.
584 * However, there are some error conditions that are sufficiently
585 * arcane that they don't merit per-CPU storage. If these counters
586 * are incremented concurrently on different CPUs, scalability will be
587 * adversely affected -- but we don't expect them to be white-hot in a
588 * correctly constructed enabling...
589 */
590 uint32_t oval, nval;
591
592 do {
593 oval = *counter;
594
595 if ((nval = oval + 1) == 0) {
596 /*
597 * If the counter would wrap, set it to 1 -- assuring
598 * that the counter is never zero when we have seen
599 * errors. (The counter must be 32-bits because we
600 * aren't guaranteed a 64-bit compare&swap operation.)
601 * To save this code both the infamy of being fingered
602 * by a priggish news story and the indignity of being
603 * the target of a neo-puritan witch trial, we're
604 * carefully avoiding any colorful description of the
605 * likelihood of this condition -- but suffice it to
606 * say that it is only slightly more likely than the
607 * overflow of predicate cache IDs, as discussed in
608 * dtrace_predicate_create().
609 */
610 nval = 1;
611 }
612 } while (dtrace_cas32(counter, oval, nval) != oval);
613}
614
615/*
616 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
617 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
618 */
619DTRACE_LOADFUNC(8)
620DTRACE_LOADFUNC(16)
621DTRACE_LOADFUNC(32)
622DTRACE_LOADFUNC(64)
623
624static int
625dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
626{
627 if (dest < mstate->dtms_scratch_base)
628 return (0);
629
630 if (dest + size < dest)
631 return (0);
632
633 if (dest + size > mstate->dtms_scratch_ptr)
634 return (0);
635
636 return (1);
637}
638
639static int
640dtrace_canstore_statvar(uint64_t addr, size_t sz,
641 dtrace_statvar_t **svars, int nsvars)
642{
643 int i;
644
645 for (i = 0; i < nsvars; i++) {
646 dtrace_statvar_t *svar = svars[i];
647
648 if (svar == NULL || svar->dtsv_size == 0)
649 continue;
650
651 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size))
652 return (1);
653 }
654
655 return (0);
656}
657
658/*
659 * Check to see if the address is within a memory region to which a store may
660 * be issued. This includes the DTrace scratch areas, and any DTrace variable
661 * region. The caller of dtrace_canstore() is responsible for performing any
662 * alignment checks that are needed before stores are actually executed.
663 */
664static int
665dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
666 dtrace_vstate_t *vstate)
667{
668 /*
669 * First, check to see if the address is in scratch space...
670 */
671 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
672 mstate->dtms_scratch_size))
673 return (1);
674
675 /*
676 * Now check to see if it's a dynamic variable. This check will pick
677 * up both thread-local variables and any global dynamically-allocated
678 * variables.
679 */
680 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
681 vstate->dtvs_dynvars.dtds_size)) {
682 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
683 uintptr_t base = (uintptr_t)dstate->dtds_base +
684 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
685 uintptr_t chunkoffs;
686
687 /*
688 * Before we assume that we can store here, we need to make
689 * sure that it isn't in our metadata -- storing to our
690 * dynamic variable metadata would corrupt our state. For
691 * the range to not include any dynamic variable metadata,
692 * it must:
693 *
694 * (1) Start above the hash table that is at the base of
695 * the dynamic variable space
696 *
697 * (2) Have a starting chunk offset that is beyond the
698 * dtrace_dynvar_t that is at the base of every chunk
699 *
700 * (3) Not span a chunk boundary
701 *
702 */
703 if (addr < base)
704 return (0);
705
706 chunkoffs = (addr - base) % dstate->dtds_chunksize;
707
708 if (chunkoffs < sizeof (dtrace_dynvar_t))
709 return (0);
710
711 if (chunkoffs + sz > dstate->dtds_chunksize)
712 return (0);
713
714 return (1);
715 }
716
717 /*
718 * Finally, check the static local and global variables. These checks
719 * take the longest, so we perform them last.
720 */
721 if (dtrace_canstore_statvar(addr, sz,
722 vstate->dtvs_locals, vstate->dtvs_nlocals))
723 return (1);
724
725 if (dtrace_canstore_statvar(addr, sz,
726 vstate->dtvs_globals, vstate->dtvs_nglobals))
727 return (1);
728
729 return (0);
730}
731
732
733/*
734 * Convenience routine to check to see if the address is within a memory
735 * region in which a load may be issued given the user's privilege level;
736 * if not, it sets the appropriate error flags and loads 'addr' into the
737 * illegal value slot.
738 *
739 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
740 * appropriate memory access protection.
741 */
742static int
743dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
744 dtrace_vstate_t *vstate)
745{
746 volatile uintptr_t *illval = &cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval;
747
748 /*
749 * If we hold the privilege to read from kernel memory, then
750 * everything is readable.
751 */
752 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
753 return (1);
754
755 /*
756 * You can obviously read that which you can store.
757 */
758 if (dtrace_canstore(addr, sz, mstate, vstate))
759 return (1);
760
761 /*
762 * We're allowed to read from our own string table.
763 */
764 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab,
765 mstate->dtms_difo->dtdo_strlen))
766 return (1);
767
768 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
769 *illval = addr;
770 return (0);
771}
772
773/*
774 * Convenience routine to check to see if a given string is within a memory
775 * region in which a load may be issued given the user's privilege level;
776 * this exists so that we don't need to issue unnecessary dtrace_strlen()
777 * calls in the event that the user has all privileges.
778 */
779static int
780dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
781 dtrace_vstate_t *vstate)
782{
783 size_t strsz;
784
785 /*
786 * If we hold the privilege to read from kernel memory, then
787 * everything is readable.
788 */
789 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
790 return (1);
791
792 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz);
793 if (dtrace_canload(addr, strsz, mstate, vstate))
794 return (1);
795
796 return (0);
797}
798
799/*
800 * Convenience routine to check to see if a given variable is within a memory
801 * region in which a load may be issued given the user's privilege level.
802 */
803static int
804dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate,
805 dtrace_vstate_t *vstate)
806{
807 size_t sz;
808 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
809
810 /*
811 * If we hold the privilege to read from kernel memory, then
812 * everything is readable.
813 */
814 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
815 return (1);
816
817 if (type->dtdt_kind == DIF_TYPE_STRING)
818 sz = dtrace_strlen(src,
819 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1;
820 else
821 sz = type->dtdt_size;
822
823 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate));
824}
825
826/*
827 * Compare two strings using safe loads.
828 */
829static int
830dtrace_strncmp(char *s1, char *s2, size_t limit)
831{
832 uint8_t c1, c2;
833 volatile uint16_t *flags;
834
835 if (s1 == s2 || limit == 0)
836 return (0);
837
838 flags = (volatile uint16_t *)&cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags;
839
840 do {
841 if (s1 == NULL) {
842 c1 = '\0';
843 } else {
844 c1 = dtrace_load8((uintptr_t)s1++);
845 }
846
847 if (s2 == NULL) {
848 c2 = '\0';
849 } else {
850 c2 = dtrace_load8((uintptr_t)s2++);
851 }
852
853 if (c1 != c2)
854 return (c1 - c2);
855 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
856
857 return (0);
858}
859
860/*
861 * Compute strlen(s) for a string using safe memory accesses. The additional
862 * len parameter is used to specify a maximum length to ensure completion.
863 */
864static size_t
865dtrace_strlen(const char *s, size_t lim)
866{
867 uint_t len;
868
869 for (len = 0; len != lim; len++) {
870 if (dtrace_load8((uintptr_t)s++) == '\0')
871 break;
872 }
873
874 return (len);
875}
876
877/*
878 * Check if an address falls within a toxic region.
879 */
880static int
881dtrace_istoxic(uintptr_t kaddr, size_t size)
882{
883 uintptr_t taddr, tsize;
884 int i;
885
886 for (i = 0; i < dtrace_toxranges; i++) {
887 taddr = dtrace_toxrange[i].dtt_base;
888 tsize = dtrace_toxrange[i].dtt_limit - taddr;
889
890 if (kaddr - taddr < tsize) {
891 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
892 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = kaddr;
893 return (1);
894 }
895
896 if (taddr - kaddr < size) {
897 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
898 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = taddr;
899 return (1);
900 }
901 }
902
903 return (0);
904}
905
906/*
907 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
908 * memory specified by the DIF program. The dst is assumed to be safe memory
909 * that we can store to directly because it is managed by DTrace. As with
910 * standard bcopy, overlapping copies are handled properly.
911 */
912static void
913dtrace_bcopy(const void *src, void *dst, size_t len)
914{
915 if (len != 0) {
916 uint8_t *s1 = dst;
917 const uint8_t *s2 = src;
918
919 if (s1 <= s2) {
920 do {
921 *s1++ = dtrace_load8((uintptr_t)s2++);
922 } while (--len != 0);
923 } else {
924 s2 += len;
925 s1 += len;
926
927 do {
928 *--s1 = dtrace_load8((uintptr_t)--s2);
929 } while (--len != 0);
930 }
931 }
932}
933
934/*
935 * Copy src to dst using safe memory accesses, up to either the specified
936 * length, or the point that a nul byte is encountered. The src is assumed to
937 * be unsafe memory specified by the DIF program. The dst is assumed to be
938 * safe memory that we can store to directly because it is managed by DTrace.
939 * Unlike dtrace_bcopy(), overlapping regions are not handled.
940 */
941static void
942dtrace_strcpy(const void *src, void *dst, size_t len)
943{
944 if (len != 0) {
945 uint8_t *s1 = dst, c;
946 const uint8_t *s2 = src;
947
948 do {
949 *s1++ = c = dtrace_load8((uintptr_t)s2++);
950 } while (--len != 0 && c != '\0');
951 }
952}
953
954/*
955 * Copy src to dst, deriving the size and type from the specified (BYREF)
956 * variable type. The src is assumed to be unsafe memory specified by the DIF
957 * program. The dst is assumed to be DTrace variable memory that is of the
958 * specified type; we assume that we can store to directly.
959 */
960static void
961dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
962{
963 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
964
965 if (type->dtdt_kind == DIF_TYPE_STRING) {
966 dtrace_strcpy(src, dst, type->dtdt_size);
967 } else {
968 dtrace_bcopy(src, dst, type->dtdt_size);
969 }
970}
971
972/*
973 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
974 * unsafe memory specified by the DIF program. The s2 data is assumed to be
975 * safe memory that we can access directly because it is managed by DTrace.
976 */
977static int
978dtrace_bcmp(const void *s1, const void *s2, size_t len)
979{
980 volatile uint16_t *flags;
981
982 flags = (volatile uint16_t *)&cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags;
983
984 if (s1 == s2)
985 return (0);
986
987 if (s1 == NULL || s2 == NULL)
988 return (1);
989
990 if (s1 != s2 && len != 0) {
991 const uint8_t *ps1 = s1;
992 const uint8_t *ps2 = s2;
993
994 do {
995 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
996 return (1);
997 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
998 }
999 return (0);
1000}
1001
1002/*
1003 * Zero the specified region using a simple byte-by-byte loop. Note that this
1004 * is for safe DTrace-managed memory only.
1005 */
1006static void
1007dtrace_bzero(void *dst, size_t len)
1008{
1009 uchar_t *cp;
1010
1011 for (cp = dst; len != 0; len--)
1012 *cp++ = 0;
1013}
1014
1015static void
1016dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1017{
1018 uint64_t result[2];
1019
1020 result[0] = addend1[0] + addend2[0];
1021 result[1] = addend1[1] + addend2[1] +
1022 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1023
1024 sum[0] = result[0];
1025 sum[1] = result[1];
1026}
1027
1028/*
1029 * Shift the 128-bit value in a by b. If b is positive, shift left.
1030 * If b is negative, shift right.
1031 */
1032static void
1033dtrace_shift_128(uint64_t *a, int b)
1034{
1035 uint64_t mask;
1036
1037 if (b == 0)
1038 return;
1039
1040 if (b < 0) {
1041 b = -b;
1042 if (b >= 64) {
1043 a[0] = a[1] >> (b - 64);
1044 a[1] = 0;
1045 } else {
1046 a[0] >>= b;
1047 mask = 1LL << (64 - b);
1048 mask -= 1;
1049 a[0] |= ((a[1] & mask) << (64 - b));
1050 a[1] >>= b;
1051 }
1052 } else {
1053 if (b >= 64) {
1054 a[1] = a[0] << (b - 64);
1055 a[0] = 0;
1056 } else {
1057 a[1] <<= b;
1058 mask = a[0] >> (64 - b);
1059 a[1] |= mask;
1060 a[0] <<= b;
1061 }
1062 }
1063}
1064
1065/*
1066 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1067 * use native multiplication on those, and then re-combine into the
1068 * resulting 128-bit value.
1069 *
1070 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1071 * hi1 * hi2 << 64 +
1072 * hi1 * lo2 << 32 +
1073 * hi2 * lo1 << 32 +
1074 * lo1 * lo2
1075 */
1076static void
1077dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1078{
1079 uint64_t hi1, hi2, lo1, lo2;
1080 uint64_t tmp[2];
1081
1082 hi1 = factor1 >> 32;
1083 hi2 = factor2 >> 32;
1084
1085 lo1 = factor1 & DT_MASK_LO;
1086 lo2 = factor2 & DT_MASK_LO;
1087
1088 product[0] = lo1 * lo2;
1089 product[1] = hi1 * hi2;
1090
1091 tmp[0] = hi1 * lo2;
1092 tmp[1] = 0;
1093 dtrace_shift_128(tmp, 32);
1094 dtrace_add_128(product, tmp, product);
1095
1096 tmp[0] = hi2 * lo1;
1097 tmp[1] = 0;
1098 dtrace_shift_128(tmp, 32);
1099 dtrace_add_128(product, tmp, product);
1100}
1101
1102/*
1103 * This privilege check should be used by actions and subroutines to
1104 * verify that the user credentials of the process that enabled the
1105 * invoking ECB match the target credentials
1106 */
1107static int
1108dtrace_priv_proc_common_user(dtrace_state_t *state)
1109{
1110 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1111
1112 /*
1113 * We should always have a non-NULL state cred here, since if cred
1114 * is null (anonymous tracing), we fast-path bypass this routine.
1115 */
1116 ASSERT(s_cr != NULL);
1117
1118 if ((cr = CRED()) != NULL &&
1119 s_cr->cr_uid == cr->cr_uid &&
1120 s_cr->cr_uid == cr->cr_ruid &&
1121 s_cr->cr_uid == cr->cr_suid &&
1122 s_cr->cr_gid == cr->cr_gid &&
1123 s_cr->cr_gid == cr->cr_rgid &&
1124 s_cr->cr_gid == cr->cr_sgid)
1125 return (1);
1126
1127 return (0);
1128}
1129
1130/*
1131 * This privilege check should be used by actions and subroutines to
1132 * verify that the zone of the process that enabled the invoking ECB
1133 * matches the target credentials
1134 */
1135static int
1136dtrace_priv_proc_common_zone(dtrace_state_t *state)
1137{
1138 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1139
1140 /*
1141 * We should always have a non-NULL state cred here, since if cred
1142 * is null (anonymous tracing), we fast-path bypass this routine.
1143 */
1144 ASSERT(s_cr != NULL);
1145
1146 if ((cr = CRED()) != NULL &&
1147 s_cr->cr_zone == cr->cr_zone)
1148 return (1);
1149
1150 return (0);
1151}
1152
1153/*
1154 * This privilege check should be used by actions and subroutines to
1155 * verify that the process has not setuid or changed credentials.
1156 */
1157static int
1158dtrace_priv_proc_common_nocd(VBDTVOID)
1159{
1160#ifndef VBOX
1161 proc_t *proc;
1162
1163 if ((proc = VBDT_GET_PROC()) != NULL &&
1164 !(proc->p_flag & SNOCD))
1165 return (1);
1166
1167 return (0);
1168#else
1169 return (1);
1170#endif
1171}
1172
1173static int
1174dtrace_priv_proc_destructive(dtrace_state_t *state)
1175{
1176 int action = state->dts_cred.dcr_action;
1177
1178 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1179 dtrace_priv_proc_common_zone(state) == 0)
1180 goto bad;
1181
1182 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1183 dtrace_priv_proc_common_user(state) == 0)
1184 goto bad;
1185
1186 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1187 dtrace_priv_proc_common_nocd() == 0)
1188 goto bad;
1189
1190 return (1);
1191
1192bad:
1193 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1194
1195 return (0);
1196}
1197
1198static int
1199dtrace_priv_proc_control(dtrace_state_t *state)
1200{
1201 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1202 return (1);
1203
1204 if (dtrace_priv_proc_common_zone(state) &&
1205 dtrace_priv_proc_common_user(state) &&
1206 dtrace_priv_proc_common_nocd())
1207 return (1);
1208
1209 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1210
1211 return (0);
1212}
1213
1214static int
1215dtrace_priv_proc(dtrace_state_t *state)
1216{
1217 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1218 return (1);
1219
1220 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1221
1222 return (0);
1223}
1224
1225static int
1226dtrace_priv_kernel(dtrace_state_t *state)
1227{
1228 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1229 return (1);
1230
1231 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1232
1233 return (0);
1234}
1235
1236static int
1237dtrace_priv_kernel_destructive(dtrace_state_t *state)
1238{
1239 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1240 return (1);
1241
1242 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1243
1244 return (0);
1245}
1246
1247/*
1248 * Note: not called from probe context. This function is called
1249 * asynchronously (and at a regular interval) from outside of probe context to
1250 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1251 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1252 */
1253VBDTSTATIC void
1254dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1255{
1256 dtrace_dynvar_t *dirty;
1257 dtrace_dstate_percpu_t *dcpu;
1258 dtrace_dynvar_t **rinsep;
1259 int i, j, work = 0;
1260
1261 for (i = 0; i < NCPU; i++) {
1262 dcpu = &dstate->dtds_percpu[i];
1263 rinsep = &dcpu->dtdsc_rinsing;
1264
1265 /*
1266 * If the dirty list is NULL, there is no dirty work to do.
1267 */
1268 if (dcpu->dtdsc_dirty == NULL)
1269 continue;
1270
1271 if (dcpu->dtdsc_rinsing != NULL) {
1272 /*
1273 * If the rinsing list is non-NULL, then it is because
1274 * this CPU was selected to accept another CPU's
1275 * dirty list -- and since that time, dirty buffers
1276 * have accumulated. This is a highly unlikely
1277 * condition, but we choose to ignore the dirty
1278 * buffers -- they'll be picked up a future cleanse.
1279 */
1280 continue;
1281 }
1282
1283 if (dcpu->dtdsc_clean != NULL) {
1284 /*
1285 * If the clean list is non-NULL, then we're in a
1286 * situation where a CPU has done deallocations (we
1287 * have a non-NULL dirty list) but no allocations (we
1288 * also have a non-NULL clean list). We can't simply
1289 * move the dirty list into the clean list on this
1290 * CPU, yet we also don't want to allow this condition
1291 * to persist, lest a short clean list prevent a
1292 * massive dirty list from being cleaned (which in
1293 * turn could lead to otherwise avoidable dynamic
1294 * drops). To deal with this, we look for some CPU
1295 * with a NULL clean list, NULL dirty list, and NULL
1296 * rinsing list -- and then we borrow this CPU to
1297 * rinse our dirty list.
1298 */
1299 for (j = 0; j < NCPU; j++) {
1300 dtrace_dstate_percpu_t *rinser;
1301
1302 rinser = &dstate->dtds_percpu[j];
1303
1304 if (rinser->dtdsc_rinsing != NULL)
1305 continue;
1306
1307 if (rinser->dtdsc_dirty != NULL)
1308 continue;
1309
1310 if (rinser->dtdsc_clean != NULL)
1311 continue;
1312
1313 rinsep = &rinser->dtdsc_rinsing;
1314 break;
1315 }
1316
1317 if (j == NCPU) {
1318 /*
1319 * We were unable to find another CPU that
1320 * could accept this dirty list -- we are
1321 * therefore unable to clean it now.
1322 */
1323 dtrace_dynvar_failclean++;
1324 continue;
1325 }
1326 }
1327
1328 work = 1;
1329
1330 /*
1331 * Atomically move the dirty list aside.
1332 */
1333 do {
1334 dirty = dcpu->dtdsc_dirty;
1335
1336 /*
1337 * Before we zap the dirty list, set the rinsing list.
1338 * (This allows for a potential assertion in
1339 * dtrace_dynvar(): if a free dynamic variable appears
1340 * on a hash chain, either the dirty list or the
1341 * rinsing list for some CPU must be non-NULL.)
1342 */
1343 *rinsep = dirty;
1344 dtrace_membar_producer();
1345 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1346 dirty, NULL) != dirty);
1347 }
1348
1349 if (!work) {
1350 /*
1351 * We have no work to do; we can simply return.
1352 */
1353 return;
1354 }
1355
1356 dtrace_sync();
1357
1358 for (i = 0; i < NCPU; i++) {
1359 dcpu = &dstate->dtds_percpu[i];
1360
1361 if (dcpu->dtdsc_rinsing == NULL)
1362 continue;
1363
1364 /*
1365 * We are now guaranteed that no hash chain contains a pointer
1366 * into this dirty list; we can make it clean.
1367 */
1368 ASSERT(dcpu->dtdsc_clean == NULL);
1369 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1370 dcpu->dtdsc_rinsing = NULL;
1371 }
1372
1373 /*
1374 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1375 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1376 * This prevents a race whereby a CPU incorrectly decides that
1377 * the state should be something other than DTRACE_DSTATE_CLEAN
1378 * after dtrace_dynvar_clean() has completed.
1379 */
1380 dtrace_sync();
1381
1382 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1383}
1384
1385/*
1386 * Depending on the value of the op parameter, this function looks-up,
1387 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1388 * allocation is requested, this function will return a pointer to a
1389 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1390 * variable can be allocated. If NULL is returned, the appropriate counter
1391 * will be incremented.
1392 */
1393VBDTSTATIC dtrace_dynvar_t *
1394dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1395 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1396 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1397{
1398 uint64_t hashval = DTRACE_DYNHASH_VALID;
1399 dtrace_dynhash_t *hash = dstate->dtds_hash;
1400 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1401 processorid_t me = VBDT_GET_CPUID(), cpu = me;
1402 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1403 size_t bucket, ksize;
1404 size_t chunksize = dstate->dtds_chunksize;
1405 uintptr_t kdata, lock, nstate;
1406 uint_t i;
1407
1408 ASSERT(nkeys != 0);
1409
1410 /*
1411 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1412 * algorithm. For the by-value portions, we perform the algorithm in
1413 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1414 * bit, and seems to have only a minute effect on distribution. For
1415 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1416 * over each referenced byte. It's painful to do this, but it's much
1417 * better than pathological hash distribution. The efficacy of the
1418 * hashing algorithm (and a comparison with other algorithms) may be
1419 * found by running the ::dtrace_dynstat MDB dcmd.
1420 */
1421 for (i = 0; i < nkeys; i++) {
1422 if (key[i].dttk_size == 0) {
1423 uint64_t val = key[i].dttk_value;
1424
1425 hashval += (val >> 48) & 0xffff;
1426 hashval += (hashval << 10);
1427 hashval ^= (hashval >> 6);
1428
1429 hashval += (val >> 32) & 0xffff;
1430 hashval += (hashval << 10);
1431 hashval ^= (hashval >> 6);
1432
1433 hashval += (val >> 16) & 0xffff;
1434 hashval += (hashval << 10);
1435 hashval ^= (hashval >> 6);
1436
1437 hashval += val & 0xffff;
1438 hashval += (hashval << 10);
1439 hashval ^= (hashval >> 6);
1440 } else {
1441 /*
1442 * This is incredibly painful, but it beats the hell
1443 * out of the alternative.
1444 */
1445 uint64_t j, size = key[i].dttk_size;
1446 uintptr_t base = (uintptr_t)key[i].dttk_value;
1447
1448 if (!dtrace_canload(base, size, mstate, vstate))
1449 break;
1450
1451 for (j = 0; j < size; j++) {
1452 hashval += dtrace_load8(base + j);
1453 hashval += (hashval << 10);
1454 hashval ^= (hashval >> 6);
1455 }
1456 }
1457 }
1458
1459 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1460 return (NULL);
1461
1462 hashval += (hashval << 3);
1463 hashval ^= (hashval >> 11);
1464 hashval += (hashval << 15);
1465
1466 /*
1467 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1468 * comes out to be one of our two sentinel hash values. If this
1469 * actually happens, we set the hashval to be a value known to be a
1470 * non-sentinel value.
1471 */
1472 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1473 hashval = DTRACE_DYNHASH_VALID;
1474
1475 /*
1476 * Yes, it's painful to do a divide here. If the cycle count becomes
1477 * important here, tricks can be pulled to reduce it. (However, it's
1478 * critical that hash collisions be kept to an absolute minimum;
1479 * they're much more painful than a divide.) It's better to have a
1480 * solution that generates few collisions and still keeps things
1481 * relatively simple.
1482 */
1483 bucket = hashval % dstate->dtds_hashsize;
1484
1485 if (op == DTRACE_DYNVAR_DEALLOC) {
1486 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1487
1488 for (;;) {
1489 while ((lock = *lockp) & 1)
1490 continue;
1491
1492 if (dtrace_casptr((void *)lockp,
1493 (void *)lock, (void *)(lock + 1)) == (void *)lock)
1494 break;
1495 }
1496
1497 dtrace_membar_producer();
1498 }
1499
1500top:
1501 prev = NULL;
1502 lock = hash[bucket].dtdh_lock;
1503
1504 dtrace_membar_consumer();
1505
1506 start = hash[bucket].dtdh_chain;
1507 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1508 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1509 op != DTRACE_DYNVAR_DEALLOC));
1510
1511 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1512 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1513 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1514
1515 if (dvar->dtdv_hashval != hashval) {
1516 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1517 /*
1518 * We've reached the sink, and therefore the
1519 * end of the hash chain; we can kick out of
1520 * the loop knowing that we have seen a valid
1521 * snapshot of state.
1522 */
1523 ASSERT(dvar->dtdv_next == NULL);
1524 ASSERT(dvar == &dtrace_dynhash_sink);
1525 break;
1526 }
1527
1528 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1529 /*
1530 * We've gone off the rails: somewhere along
1531 * the line, one of the members of this hash
1532 * chain was deleted. Note that we could also
1533 * detect this by simply letting this loop run
1534 * to completion, as we would eventually hit
1535 * the end of the dirty list. However, we
1536 * want to avoid running the length of the
1537 * dirty list unnecessarily (it might be quite
1538 * long), so we catch this as early as
1539 * possible by detecting the hash marker. In
1540 * this case, we simply set dvar to NULL and
1541 * break; the conditional after the loop will
1542 * send us back to top.
1543 */
1544 dvar = NULL;
1545 break;
1546 }
1547
1548 goto next;
1549 }
1550
1551 if (dtuple->dtt_nkeys != nkeys)
1552 goto next;
1553
1554 for (i = 0; i < nkeys; i++, dkey++) {
1555 if (dkey->dttk_size != key[i].dttk_size)
1556 goto next; /* size or type mismatch */
1557
1558 if (dkey->dttk_size != 0) {
1559 if (dtrace_bcmp(
1560 (void *)(uintptr_t)key[i].dttk_value,
1561 (void *)(uintptr_t)dkey->dttk_value,
1562 dkey->dttk_size))
1563 goto next;
1564 } else {
1565 if (dkey->dttk_value != key[i].dttk_value)
1566 goto next;
1567 }
1568 }
1569
1570 if (op != DTRACE_DYNVAR_DEALLOC)
1571 return (dvar);
1572
1573 ASSERT(dvar->dtdv_next == NULL ||
1574 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1575
1576 if (prev != NULL) {
1577 ASSERT(hash[bucket].dtdh_chain != dvar);
1578 ASSERT(start != dvar);
1579 ASSERT(prev->dtdv_next == dvar);
1580 prev->dtdv_next = dvar->dtdv_next;
1581 } else {
1582 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1583 start, dvar->dtdv_next) != start) {
1584 /*
1585 * We have failed to atomically swing the
1586 * hash table head pointer, presumably because
1587 * of a conflicting allocation on another CPU.
1588 * We need to reread the hash chain and try
1589 * again.
1590 */
1591 goto top;
1592 }
1593 }
1594
1595 dtrace_membar_producer();
1596
1597 /*
1598 * Now set the hash value to indicate that it's free.
1599 */
1600 ASSERT(hash[bucket].dtdh_chain != dvar);
1601 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1602
1603 dtrace_membar_producer();
1604
1605 /*
1606 * Set the next pointer to point at the dirty list, and
1607 * atomically swing the dirty pointer to the newly freed dvar.
1608 */
1609 do {
1610 next = dcpu->dtdsc_dirty;
1611 dvar->dtdv_next = next;
1612 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1613
1614 /*
1615 * Finally, unlock this hash bucket.
1616 */
1617 ASSERT(hash[bucket].dtdh_lock == lock);
1618 ASSERT(lock & 1);
1619 hash[bucket].dtdh_lock++;
1620
1621 return (NULL);
1622next:
1623 prev = dvar;
1624 continue;
1625 }
1626
1627 if (dvar == NULL) {
1628 /*
1629 * If dvar is NULL, it is because we went off the rails:
1630 * one of the elements that we traversed in the hash chain
1631 * was deleted while we were traversing it. In this case,
1632 * we assert that we aren't doing a dealloc (deallocs lock
1633 * the hash bucket to prevent themselves from racing with
1634 * one another), and retry the hash chain traversal.
1635 */
1636 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1637 goto top;
1638 }
1639
1640 if (op != DTRACE_DYNVAR_ALLOC) {
1641 /*
1642 * If we are not to allocate a new variable, we want to
1643 * return NULL now. Before we return, check that the value
1644 * of the lock word hasn't changed. If it has, we may have
1645 * seen an inconsistent snapshot.
1646 */
1647 if (op == DTRACE_DYNVAR_NOALLOC) {
1648 if (hash[bucket].dtdh_lock != lock)
1649 goto top;
1650 } else {
1651 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1652 ASSERT(hash[bucket].dtdh_lock == lock);
1653 ASSERT(lock & 1);
1654 hash[bucket].dtdh_lock++;
1655 }
1656
1657 return (NULL);
1658 }
1659
1660 /*
1661 * We need to allocate a new dynamic variable. The size we need is the
1662 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1663 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1664 * the size of any referred-to data (dsize). We then round the final
1665 * size up to the chunksize for allocation.
1666 */
1667 for (ksize = 0, i = 0; i < nkeys; i++)
1668 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1669
1670 /*
1671 * This should be pretty much impossible, but could happen if, say,
1672 * strange DIF specified the tuple. Ideally, this should be an
1673 * assertion and not an error condition -- but that requires that the
1674 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1675 * bullet-proof. (That is, it must not be able to be fooled by
1676 * malicious DIF.) Given the lack of backwards branches in DIF,
1677 * solving this would presumably not amount to solving the Halting
1678 * Problem -- but it still seems awfully hard.
1679 */
1680 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1681 ksize + dsize > chunksize) {
1682 dcpu->dtdsc_drops++;
1683 return (NULL);
1684 }
1685
1686 nstate = DTRACE_DSTATE_EMPTY;
1687
1688 do {
1689retry:
1690 free = dcpu->dtdsc_free;
1691
1692 if (free == NULL) {
1693 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1694 void *rval;
1695
1696 if (clean == NULL) {
1697 /*
1698 * We're out of dynamic variable space on
1699 * this CPU. Unless we have tried all CPUs,
1700 * we'll try to allocate from a different
1701 * CPU.
1702 */
1703 switch (dstate->dtds_state) {
1704 case DTRACE_DSTATE_CLEAN: {
1705 void *sp = &dstate->dtds_state;
1706
1707 if (++cpu >= NCPU)
1708 cpu = 0;
1709
1710 if (dcpu->dtdsc_dirty != NULL &&
1711 nstate == DTRACE_DSTATE_EMPTY)
1712 nstate = DTRACE_DSTATE_DIRTY;
1713
1714 if (dcpu->dtdsc_rinsing != NULL)
1715 nstate = DTRACE_DSTATE_RINSING;
1716
1717 dcpu = &dstate->dtds_percpu[cpu];
1718
1719 if (cpu != me)
1720 goto retry;
1721
1722 (void) dtrace_cas32(sp,
1723 DTRACE_DSTATE_CLEAN, nstate);
1724
1725 /*
1726 * To increment the correct bean
1727 * counter, take another lap.
1728 */
1729 goto retry;
1730 }
1731
1732 case DTRACE_DSTATE_DIRTY:
1733 dcpu->dtdsc_dirty_drops++;
1734 break;
1735
1736 case DTRACE_DSTATE_RINSING:
1737 dcpu->dtdsc_rinsing_drops++;
1738 break;
1739
1740 case DTRACE_DSTATE_EMPTY:
1741 dcpu->dtdsc_drops++;
1742 break;
1743 }
1744
1745 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
1746 return (NULL);
1747 }
1748
1749 /*
1750 * The clean list appears to be non-empty. We want to
1751 * move the clean list to the free list; we start by
1752 * moving the clean pointer aside.
1753 */
1754 if (dtrace_casptr(&dcpu->dtdsc_clean,
1755 clean, NULL) != clean) {
1756 /*
1757 * We are in one of two situations:
1758 *
1759 * (a) The clean list was switched to the
1760 * free list by another CPU.
1761 *
1762 * (b) The clean list was added to by the
1763 * cleansing cyclic.
1764 *
1765 * In either of these situations, we can
1766 * just reattempt the free list allocation.
1767 */
1768 goto retry;
1769 }
1770
1771 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
1772
1773 /*
1774 * Now we'll move the clean list to our free list.
1775 * It's impossible for this to fail: the only way
1776 * the free list can be updated is through this
1777 * code path, and only one CPU can own the clean list.
1778 * Thus, it would only be possible for this to fail if
1779 * this code were racing with dtrace_dynvar_clean().
1780 * (That is, if dtrace_dynvar_clean() updated the clean
1781 * list, and we ended up racing to update the free
1782 * list.) This race is prevented by the dtrace_sync()
1783 * in dtrace_dynvar_clean() -- which flushes the
1784 * owners of the clean lists out before resetting
1785 * the clean lists.
1786 */
1787 dcpu = &dstate->dtds_percpu[me];
1788 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
1789 ASSERT(rval == NULL);
1790 goto retry;
1791 }
1792
1793 dvar = free;
1794 new_free = dvar->dtdv_next;
1795 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
1796
1797 /*
1798 * We have now allocated a new chunk. We copy the tuple keys into the
1799 * tuple array and copy any referenced key data into the data space
1800 * following the tuple array. As we do this, we relocate dttk_value
1801 * in the final tuple to point to the key data address in the chunk.
1802 */
1803 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
1804 dvar->dtdv_data = (void *)(kdata + ksize);
1805 dvar->dtdv_tuple.dtt_nkeys = nkeys;
1806
1807 for (i = 0; i < nkeys; i++) {
1808 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
1809 size_t kesize = key[i].dttk_size;
1810
1811 if (kesize != 0) {
1812 dtrace_bcopy(
1813 (const void *)(uintptr_t)key[i].dttk_value,
1814 (void *)kdata, kesize);
1815 dkey->dttk_value = kdata;
1816 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
1817 } else {
1818 dkey->dttk_value = key[i].dttk_value;
1819 }
1820
1821 dkey->dttk_size = kesize;
1822 }
1823
1824 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
1825 dvar->dtdv_hashval = hashval;
1826 dvar->dtdv_next = start;
1827
1828 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
1829 return (dvar);
1830
1831 /*
1832 * The cas has failed. Either another CPU is adding an element to
1833 * this hash chain, or another CPU is deleting an element from this
1834 * hash chain. The simplest way to deal with both of these cases
1835 * (though not necessarily the most efficient) is to free our
1836 * allocated block and tail-call ourselves. Note that the free is
1837 * to the dirty list and _not_ to the free list. This is to prevent
1838 * races with allocators, above.
1839 */
1840 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1841
1842 dtrace_membar_producer();
1843
1844 do {
1845 free = dcpu->dtdsc_dirty;
1846 dvar->dtdv_next = free;
1847 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
1848
1849 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
1850}
1851
1852/*ARGSUSED*/
1853static void
1854dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
1855{
1856 if ((int64_t)nval < (int64_t)*oval)
1857 *oval = nval;
1858}
1859
1860/*ARGSUSED*/
1861static void
1862dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
1863{
1864 if ((int64_t)nval > (int64_t)*oval)
1865 *oval = nval;
1866}
1867
1868static void
1869dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
1870{
1871 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
1872 int64_t val = (int64_t)nval;
1873
1874 if (val < 0) {
1875 for (i = 0; i < zero; i++) {
1876 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
1877 quanta[i] += incr;
1878 return;
1879 }
1880 }
1881 } else {
1882 for (i = zero + 1; i < VBDTCAST(int)DTRACE_QUANTIZE_NBUCKETS; i++) {
1883 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
1884 quanta[i - 1] += incr;
1885 return;
1886 }
1887 }
1888
1889 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
1890 return;
1891 }
1892
1893#ifndef VBOX
1894 ASSERT(0);
1895#else
1896 AssertFatalFailed();
1897#endif
1898}
1899
1900static void
1901dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
1902{
1903 uint64_t arg = *lquanta++;
1904 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
1905 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
1906 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
1907 int32_t val = (int32_t)nval, level;
1908
1909 ASSERT(step != 0);
1910 ASSERT(levels != 0);
1911
1912 if (val < base) {
1913 /*
1914 * This is an underflow.
1915 */
1916 lquanta[0] += incr;
1917 return;
1918 }
1919
1920 level = (val - base) / step;
1921
1922 if (level < levels) {
1923 lquanta[level + 1] += incr;
1924 return;
1925 }
1926
1927 /*
1928 * This is an overflow.
1929 */
1930 lquanta[levels + 1] += incr;
1931}
1932
1933/*ARGSUSED*/
1934static void
1935dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
1936{
1937 data[0]++;
1938 data[1] += nval;
1939}
1940
1941/*ARGSUSED*/
1942static void
1943dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
1944{
1945 int64_t snval = (int64_t)nval;
1946 uint64_t tmp[2];
1947
1948 data[0]++;
1949 data[1] += nval;
1950
1951 /*
1952 * What we want to say here is:
1953 *
1954 * data[2] += nval * nval;
1955 *
1956 * But given that nval is 64-bit, we could easily overflow, so
1957 * we do this as 128-bit arithmetic.
1958 */
1959 if (snval < 0)
1960 snval = -snval;
1961
1962 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
1963 dtrace_add_128(data + 2, tmp, data + 2);
1964}
1965
1966/*ARGSUSED*/
1967static void
1968dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
1969{
1970 *oval = *oval + 1;
1971}
1972
1973/*ARGSUSED*/
1974static void
1975dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
1976{
1977 *oval += nval;
1978}
1979
1980/*
1981 * Aggregate given the tuple in the principal data buffer, and the aggregating
1982 * action denoted by the specified dtrace_aggregation_t. The aggregation
1983 * buffer is specified as the buf parameter. This routine does not return
1984 * failure; if there is no space in the aggregation buffer, the data will be
1985 * dropped, and a corresponding counter incremented.
1986 */
1987static void
1988dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
1989 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
1990{
1991 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
1992 uint32_t i, ndx, size, fsize;
1993 uint32_t align = sizeof (uint64_t) - 1;
1994 dtrace_aggbuffer_t *agb;
1995 dtrace_aggkey_t *key;
1996 uint32_t hashval = 0, limit, isstr;
1997 caddr_t tomax, data, kdata;
1998 dtrace_actkind_t action;
1999 dtrace_action_t *act;
2000 uintptr_t offs;
2001
2002 if (buf == NULL)
2003 return;
2004
2005 if (!agg->dtag_hasarg) {
2006 /*
2007 * Currently, only quantize() and lquantize() take additional
2008 * arguments, and they have the same semantics: an increment
2009 * value that defaults to 1 when not present. If additional
2010 * aggregating actions take arguments, the setting of the
2011 * default argument value will presumably have to become more
2012 * sophisticated...
2013 */
2014 arg = 1;
2015 }
2016
2017 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2018 size = rec->dtrd_offset - agg->dtag_base;
2019 fsize = size + rec->dtrd_size;
2020
2021 ASSERT(dbuf->dtb_tomax != NULL);
2022 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2023
2024 if ((tomax = buf->dtb_tomax) == NULL) {
2025 dtrace_buffer_drop(buf);
2026 return;
2027 }
2028
2029 /*
2030 * The metastructure is always at the bottom of the buffer.
2031 */
2032 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2033 sizeof (dtrace_aggbuffer_t));
2034
2035 if (buf->dtb_offset == 0) {
2036 /*
2037 * We just kludge up approximately 1/8th of the size to be
2038 * buckets. If this guess ends up being routinely
2039 * off-the-mark, we may need to dynamically readjust this
2040 * based on past performance.
2041 */
2042 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2043
2044 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2045 (uintptr_t)tomax || hashsize == 0) {
2046 /*
2047 * We've been given a ludicrously small buffer;
2048 * increment our drop count and leave.
2049 */
2050 dtrace_buffer_drop(buf);
2051 return;
2052 }
2053
2054 /*
2055 * And now, a pathetic attempt to try to get a an odd (or
2056 * perchance, a prime) hash size for better hash distribution.
2057 */
2058 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2059 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2060
2061 agb->dtagb_hashsize = hashsize;
2062 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2063 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2064 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2065
2066 for (i = 0; i < agb->dtagb_hashsize; i++)
2067 agb->dtagb_hash[i] = NULL;
2068 }
2069
2070 ASSERT(agg->dtag_first != NULL);
2071 ASSERT(agg->dtag_first->dta_intuple);
2072
2073 /*
2074 * Calculate the hash value based on the key. Note that we _don't_
2075 * include the aggid in the hashing (but we will store it as part of
2076 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2077 * algorithm: a simple, quick algorithm that has no known funnels, and
2078 * gets good distribution in practice. The efficacy of the hashing
2079 * algorithm (and a comparison with other algorithms) may be found by
2080 * running the ::dtrace_aggstat MDB dcmd.
2081 */
2082 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2083 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2084 limit = i + act->dta_rec.dtrd_size;
2085 ASSERT(limit <= size);
2086 isstr = DTRACEACT_ISSTRING(act);
2087
2088 for (; i < limit; i++) {
2089 hashval += data[i];
2090 hashval += (hashval << 10);
2091 hashval ^= (hashval >> 6);
2092
2093 if (isstr && data[i] == '\0')
2094 break;
2095 }
2096 }
2097
2098 hashval += (hashval << 3);
2099 hashval ^= (hashval >> 11);
2100 hashval += (hashval << 15);
2101
2102 /*
2103 * Yes, the divide here is expensive -- but it's generally the least
2104 * of the performance issues given the amount of data that we iterate
2105 * over to compute hash values, compare data, etc.
2106 */
2107 ndx = hashval % agb->dtagb_hashsize;
2108
2109 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2110 ASSERT((caddr_t)key >= tomax);
2111 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2112
2113 if (hashval != key->dtak_hashval || key->dtak_size != size)
2114 continue;
2115
2116 kdata = key->dtak_data;
2117 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2118
2119 for (act = agg->dtag_first; act->dta_intuple;
2120 act = act->dta_next) {
2121 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2122 limit = i + act->dta_rec.dtrd_size;
2123 ASSERT(limit <= size);
2124 isstr = DTRACEACT_ISSTRING(act);
2125
2126 for (; i < limit; i++) {
2127 if (kdata[i] != data[i])
2128 goto next;
2129
2130 if (isstr && data[i] == '\0')
2131 break;
2132 }
2133 }
2134
2135 if (action != key->dtak_action) {
2136 /*
2137 * We are aggregating on the same value in the same
2138 * aggregation with two different aggregating actions.
2139 * (This should have been picked up in the compiler,
2140 * so we may be dealing with errant or devious DIF.)
2141 * This is an error condition; we indicate as much,
2142 * and return.
2143 */
2144 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2145 return;
2146 }
2147
2148 /*
2149 * This is a hit: we need to apply the aggregator to
2150 * the value at this key.
2151 */
2152 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2153 return;
2154next:
2155 continue;
2156 }
2157
2158 /*
2159 * We didn't find it. We need to allocate some zero-filled space,
2160 * link it into the hash table appropriately, and apply the aggregator
2161 * to the (zero-filled) value.
2162 */
2163 offs = buf->dtb_offset;
2164 while (offs & (align - 1))
2165 offs += sizeof (uint32_t);
2166
2167 /*
2168 * If we don't have enough room to both allocate a new key _and_
2169 * its associated data, increment the drop count and return.
2170 */
2171 if ((uintptr_t)tomax + offs + fsize >
2172 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2173 dtrace_buffer_drop(buf);
2174 return;
2175 }
2176
2177 /*CONSTCOND*/
2178 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2179 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2180 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2181
2182 key->dtak_data = kdata = tomax + offs;
2183 buf->dtb_offset = offs + fsize;
2184
2185 /*
2186 * Now copy the data across.
2187 */
2188 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2189
2190 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2191 kdata[i] = data[i];
2192
2193 /*
2194 * Because strings are not zeroed out by default, we need to iterate
2195 * looking for actions that store strings, and we need to explicitly
2196 * pad these strings out with zeroes.
2197 */
2198 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2199 int nul;
2200
2201 if (!DTRACEACT_ISSTRING(act))
2202 continue;
2203
2204 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2205 limit = i + act->dta_rec.dtrd_size;
2206 ASSERT(limit <= size);
2207
2208 for (nul = 0; i < limit; i++) {
2209 if (nul) {
2210 kdata[i] = '\0';
2211 continue;
2212 }
2213
2214 if (data[i] != '\0')
2215 continue;
2216
2217 nul = 1;
2218 }
2219 }
2220
2221 for (i = size; i < fsize; i++)
2222 kdata[i] = 0;
2223
2224 key->dtak_hashval = hashval;
2225 key->dtak_size = size;
2226 key->dtak_action = action;
2227 key->dtak_next = agb->dtagb_hash[ndx];
2228 agb->dtagb_hash[ndx] = key;
2229
2230 /*
2231 * Finally, apply the aggregator.
2232 */
2233 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2234 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2235}
2236
2237/*
2238 * Given consumer state, this routine finds a speculation in the INACTIVE
2239 * state and transitions it into the ACTIVE state. If there is no speculation
2240 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2241 * incremented -- it is up to the caller to take appropriate action.
2242 */
2243static int
2244dtrace_speculation(dtrace_state_t *state)
2245{
2246 int i = 0;
2247 dtrace_speculation_state_t current;
2248 uint32_t *stat = &state->dts_speculations_unavail, count;
2249
2250 while (i < state->dts_nspeculations) {
2251 dtrace_speculation_t *spec = &state->dts_speculations[i];
2252
2253 current = spec->dtsp_state;
2254
2255 if (current != DTRACESPEC_INACTIVE) {
2256 if (current == DTRACESPEC_COMMITTINGMANY ||
2257 current == DTRACESPEC_COMMITTING ||
2258 current == DTRACESPEC_DISCARDING)
2259 stat = &state->dts_speculations_busy;
2260 i++;
2261 continue;
2262 }
2263
2264 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2265 current, DTRACESPEC_ACTIVE) == current)
2266 return (i + 1);
2267 }
2268
2269 /*
2270 * We couldn't find a speculation. If we found as much as a single
2271 * busy speculation buffer, we'll attribute this failure as "busy"
2272 * instead of "unavail".
2273 */
2274 do {
2275 count = *stat;
2276 } while (dtrace_cas32(stat, count, count + 1) != count);
2277
2278 return (0);
2279}
2280
2281/*
2282 * This routine commits an active speculation. If the specified speculation
2283 * is not in a valid state to perform a commit(), this routine will silently do
2284 * nothing. The state of the specified speculation is transitioned according
2285 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2286 */
2287static void
2288dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2289 dtrace_specid_t which)
2290{
2291 dtrace_speculation_t *spec;
2292 dtrace_buffer_t *src, *dest;
2293 uintptr_t daddr, saddr, dlimit;
2294 dtrace_speculation_state_t current, new VBDTUNASS(-1);
2295 intptr_t offs;
2296
2297 if (which == 0)
2298 return;
2299
2300 if (which > VBDTCAST(unsigned)state->dts_nspeculations) {
2301 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2302 return;
2303 }
2304
2305 spec = &state->dts_speculations[which - 1];
2306 src = &spec->dtsp_buffer[cpu];
2307 dest = &state->dts_buffer[cpu];
2308
2309 do {
2310 current = spec->dtsp_state;
2311
2312 if (current == DTRACESPEC_COMMITTINGMANY)
2313 break;
2314
2315 switch (current) {
2316 case DTRACESPEC_INACTIVE:
2317 case DTRACESPEC_DISCARDING:
2318 return;
2319
2320 case DTRACESPEC_COMMITTING:
2321 /*
2322 * This is only possible if we are (a) commit()'ing
2323 * without having done a prior speculate() on this CPU
2324 * and (b) racing with another commit() on a different
2325 * CPU. There's nothing to do -- we just assert that
2326 * our offset is 0.
2327 */
2328 ASSERT(src->dtb_offset == 0);
2329 return;
2330
2331 case DTRACESPEC_ACTIVE:
2332 new = DTRACESPEC_COMMITTING;
2333 break;
2334
2335 case DTRACESPEC_ACTIVEONE:
2336 /*
2337 * This speculation is active on one CPU. If our
2338 * buffer offset is non-zero, we know that the one CPU
2339 * must be us. Otherwise, we are committing on a
2340 * different CPU from the speculate(), and we must
2341 * rely on being asynchronously cleaned.
2342 */
2343 if (src->dtb_offset != 0) {
2344 new = DTRACESPEC_COMMITTING;
2345 break;
2346 }
2347 /*FALLTHROUGH*/
2348
2349 case DTRACESPEC_ACTIVEMANY:
2350 new = DTRACESPEC_COMMITTINGMANY;
2351 break;
2352
2353 default:
2354#ifndef VBOX
2355 ASSERT(0);
2356#else
2357 AssertFatalMsgFailed(("%d\n", current));
2358#endif
2359 }
2360 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2361 current, new) != current);
2362
2363 /*
2364 * We have set the state to indicate that we are committing this
2365 * speculation. Now reserve the necessary space in the destination
2366 * buffer.
2367 */
2368 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2369 sizeof (uint64_t), state, NULL)) < 0) {
2370 dtrace_buffer_drop(dest);
2371 goto out;
2372 }
2373
2374 /*
2375 * We have the space; copy the buffer across. (Note that this is a
2376 * highly subobtimal bcopy(); in the unlikely event that this becomes
2377 * a serious performance issue, a high-performance DTrace-specific
2378 * bcopy() should obviously be invented.)
2379 */
2380 daddr = (uintptr_t)dest->dtb_tomax + offs;
2381 dlimit = daddr + src->dtb_offset;
2382 saddr = (uintptr_t)src->dtb_tomax;
2383
2384 /*
2385 * First, the aligned portion.
2386 */
2387 while (dlimit - daddr >= sizeof (uint64_t)) {
2388 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2389
2390 daddr += sizeof (uint64_t);
2391 saddr += sizeof (uint64_t);
2392 }
2393
2394 /*
2395 * Now any left-over bit...
2396 */
2397 while (dlimit - daddr)
2398 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2399
2400 /*
2401 * Finally, commit the reserved space in the destination buffer.
2402 */
2403 dest->dtb_offset = offs + src->dtb_offset;
2404
2405out:
2406 /*
2407 * If we're lucky enough to be the only active CPU on this speculation
2408 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2409 */
2410 if (current == DTRACESPEC_ACTIVE ||
2411 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2412 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2413 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2414
2415 ASSERT(rval == DTRACESPEC_COMMITTING);
2416 }
2417
2418 src->dtb_offset = 0;
2419 src->dtb_xamot_drops += src->dtb_drops;
2420 src->dtb_drops = 0;
2421}
2422
2423/*
2424 * This routine discards an active speculation. If the specified speculation
2425 * is not in a valid state to perform a discard(), this routine will silently
2426 * do nothing. The state of the specified speculation is transitioned
2427 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2428 */
2429static void
2430dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2431 dtrace_specid_t which)
2432{
2433 dtrace_speculation_t *spec;
2434 dtrace_speculation_state_t current, new;
2435 dtrace_buffer_t *buf;
2436
2437 if (which == 0)
2438 return;
2439
2440 if (which > VBDTCAST(unsigned)state->dts_nspeculations) {
2441 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2442 return;
2443 }
2444
2445 spec = &state->dts_speculations[which - 1];
2446 buf = &spec->dtsp_buffer[cpu];
2447
2448 do {
2449 current = spec->dtsp_state;
2450
2451 switch (current) {
2452 case DTRACESPEC_INACTIVE:
2453 case DTRACESPEC_COMMITTINGMANY:
2454 case DTRACESPEC_COMMITTING:
2455 case DTRACESPEC_DISCARDING:
2456 return;
2457
2458 case DTRACESPEC_ACTIVE:
2459 case DTRACESPEC_ACTIVEMANY:
2460 new = DTRACESPEC_DISCARDING;
2461 break;
2462
2463 case DTRACESPEC_ACTIVEONE:
2464 if (buf->dtb_offset != 0) {
2465 new = DTRACESPEC_INACTIVE;
2466 } else {
2467 new = DTRACESPEC_DISCARDING;
2468 }
2469 break;
2470
2471 default:
2472#ifndef VBOX
2473 ASSERT(0);
2474#else
2475 AssertFatalMsgFailed(("%d\n", current));
2476#endif
2477 }
2478 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2479 current, new) != current);
2480
2481 buf->dtb_offset = 0;
2482 buf->dtb_drops = 0;
2483}
2484
2485/*
2486 * Note: not called from probe context. This function is called
2487 * asynchronously from cross call context to clean any speculations that are
2488 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2489 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2490 * speculation.
2491 */
2492static void
2493dtrace_speculation_clean_here(dtrace_state_t *state)
2494{
2495 dtrace_icookie_t cookie;
2496 processorid_t cpu = VBDT_GET_CPUID();
2497 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2498 dtrace_specid_t i;
2499
2500 cookie = dtrace_interrupt_disable();
2501
2502 if (dest->dtb_tomax == NULL) {
2503 dtrace_interrupt_enable(cookie);
2504 return;
2505 }
2506
2507 for (i = 0; i < VBDTCAST(unsigned)state->dts_nspeculations; i++) {
2508 dtrace_speculation_t *spec = &state->dts_speculations[i];
2509 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2510
2511 if (src->dtb_tomax == NULL)
2512 continue;
2513
2514 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2515 src->dtb_offset = 0;
2516 continue;
2517 }
2518
2519 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2520 continue;
2521
2522 if (src->dtb_offset == 0)
2523 continue;
2524
2525 dtrace_speculation_commit(state, cpu, i + 1);
2526 }
2527
2528 dtrace_interrupt_enable(cookie);
2529}
2530
2531#ifdef VBOX
2532/** */
2533static DECLCALLBACK(void) dtrace_speculation_clean_here_wrapper(RTCPUID idCpu, void *pvUser1, void *pvUser2)
2534{
2535 dtrace_speculation_clean_here((dtrace_state_t *)pvUser1);
2536 NOREF(pvUser2); NOREF(idCpu);
2537}
2538#endif
2539
2540/*
2541 * Note: not called from probe context. This function is called
2542 * asynchronously (and at a regular interval) to clean any speculations that
2543 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2544 * is work to be done, it cross calls all CPUs to perform that work;
2545 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2546 * INACTIVE state until they have been cleaned by all CPUs.
2547 */
2548static void
2549dtrace_speculation_clean(dtrace_state_t *state)
2550{
2551 int work = 0, rv;
2552 dtrace_specid_t i;
2553
2554 for (i = 0; i < VBDTCAST(unsigned)state->dts_nspeculations; i++) {
2555 dtrace_speculation_t *spec = &state->dts_speculations[i];
2556
2557 ASSERT(!spec->dtsp_cleaning);
2558
2559 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2560 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2561 continue;
2562
2563 work++;
2564 spec->dtsp_cleaning = 1;
2565 }
2566
2567 if (!work)
2568 return;
2569
2570#ifndef VBOX
2571 dtrace_xcall(DTRACE_CPUALL,
2572 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2573#else
2574 RTMpOnAll(dtrace_speculation_clean_here_wrapper, state, NULL);
2575#endif
2576
2577 /*
2578 * We now know that all CPUs have committed or discarded their
2579 * speculation buffers, as appropriate. We can now set the state
2580 * to inactive.
2581 */
2582 for (i = 0; i < VBDTCAST(unsigned)state->dts_nspeculations; i++) {
2583 dtrace_speculation_t *spec = &state->dts_speculations[i];
2584 dtrace_speculation_state_t current, new;
2585
2586 if (!spec->dtsp_cleaning)
2587 continue;
2588
2589 current = spec->dtsp_state;
2590 ASSERT(current == DTRACESPEC_DISCARDING ||
2591 current == DTRACESPEC_COMMITTINGMANY);
2592
2593 new = DTRACESPEC_INACTIVE;
2594
2595 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2596 ASSERT(VBDTCAST(dtrace_speculation_state_t)rv == current);
2597 spec->dtsp_cleaning = 0;
2598 }
2599}
2600
2601/*
2602 * Called as part of a speculate() to get the speculative buffer associated
2603 * with a given speculation. Returns NULL if the specified speculation is not
2604 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
2605 * the active CPU is not the specified CPU -- the speculation will be
2606 * atomically transitioned into the ACTIVEMANY state.
2607 */
2608static dtrace_buffer_t *
2609dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2610 dtrace_specid_t which)
2611{
2612 dtrace_speculation_t *spec;
2613 dtrace_speculation_state_t current, new VBDTUNASS(-1);
2614 dtrace_buffer_t *buf;
2615
2616 if (which == 0)
2617 return (NULL);
2618
2619 if (which > VBDTCAST(unsigned)state->dts_nspeculations) {
2620 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2621 return (NULL);
2622 }
2623
2624 spec = &state->dts_speculations[which - 1];
2625 buf = &spec->dtsp_buffer[cpuid];
2626
2627 do {
2628 current = spec->dtsp_state;
2629
2630 switch (current) {
2631 case DTRACESPEC_INACTIVE:
2632 case DTRACESPEC_COMMITTINGMANY:
2633 case DTRACESPEC_DISCARDING:
2634 return (NULL);
2635
2636 case DTRACESPEC_COMMITTING:
2637 ASSERT(buf->dtb_offset == 0);
2638 return (NULL);
2639
2640 case DTRACESPEC_ACTIVEONE:
2641 /*
2642 * This speculation is currently active on one CPU.
2643 * Check the offset in the buffer; if it's non-zero,
2644 * that CPU must be us (and we leave the state alone).
2645 * If it's zero, assume that we're starting on a new
2646 * CPU -- and change the state to indicate that the
2647 * speculation is active on more than one CPU.
2648 */
2649 if (buf->dtb_offset != 0)
2650 return (buf);
2651
2652 new = DTRACESPEC_ACTIVEMANY;
2653 break;
2654
2655 case DTRACESPEC_ACTIVEMANY:
2656 return (buf);
2657
2658 case DTRACESPEC_ACTIVE:
2659 new = DTRACESPEC_ACTIVEONE;
2660 break;
2661
2662 default:
2663#ifndef VBOX
2664 ASSERT(0);
2665#else
2666 AssertFatalMsgFailed(("%d\n", current));
2667#endif
2668 }
2669 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2670 current, new) != current);
2671
2672 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
2673 return (buf);
2674}
2675
2676/*
2677 * Return a string. In the event that the user lacks the privilege to access
2678 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2679 * don't fail access checking.
2680 *
2681 * dtrace_dif_variable() uses this routine as a helper for various
2682 * builtin values such as 'execname' and 'probefunc.'
2683 */
2684VBDTSTATIC uintptr_t
2685dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
2686 dtrace_mstate_t *mstate)
2687{
2688 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
2689 uintptr_t ret;
2690 size_t strsz;
2691
2692 /*
2693 * The easy case: this probe is allowed to read all of memory, so
2694 * we can just return this as a vanilla pointer.
2695 */
2696 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
2697 return (addr);
2698
2699 /*
2700 * This is the tougher case: we copy the string in question from
2701 * kernel memory into scratch memory and return it that way: this
2702 * ensures that we won't trip up when access checking tests the
2703 * BYREF return value.
2704 */
2705 strsz = dtrace_strlen((char *)addr, size) + 1;
2706
2707 if (mstate->dtms_scratch_ptr + strsz >
2708 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2709 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2710 return (NULL);
2711 }
2712
2713 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2714 strsz);
2715 ret = mstate->dtms_scratch_ptr;
2716 mstate->dtms_scratch_ptr += strsz;
2717 return (ret);
2718}
2719
2720/*
2721 * This function implements the DIF emulator's variable lookups. The emulator
2722 * passes a reserved variable identifier and optional built-in array index.
2723 */
2724static uint64_t
2725dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
2726 uint64_t ndx)
2727{
2728 /*
2729 * If we're accessing one of the uncached arguments, we'll turn this
2730 * into a reference in the args array.
2731 */
2732 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
2733 ndx = v - DIF_VAR_ARG0;
2734 v = DIF_VAR_ARGS;
2735 }
2736
2737 switch (v) {
2738 case DIF_VAR_ARGS:
2739 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
2740 if (ndx >= sizeof (mstate->dtms_arg) /
2741 sizeof (mstate->dtms_arg[0])) {
2742 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2743 dtrace_provider_t *pv;
2744 uint64_t val;
2745
2746 pv = mstate->dtms_probe->dtpr_provider;
2747 if (pv->dtpv_pops.dtps_getargval != NULL)
2748 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
2749 mstate->dtms_probe->dtpr_id,
2750 mstate->dtms_probe->dtpr_arg, ndx, aframes);
2751 else
2752 val = dtrace_getarg(ndx, aframes);
2753
2754 /*
2755 * This is regrettably required to keep the compiler
2756 * from tail-optimizing the call to dtrace_getarg().
2757 * The condition always evaluates to true, but the
2758 * compiler has no way of figuring that out a priori.
2759 * (None of this would be necessary if the compiler
2760 * could be relied upon to _always_ tail-optimize
2761 * the call to dtrace_getarg() -- but it can't.)
2762 */
2763 if (mstate->dtms_probe != NULL)
2764 return (val);
2765
2766#ifndef VBOX
2767 ASSERT(0);
2768#else
2769 AssertFatalFailed();
2770#endif
2771 }
2772
2773 return (mstate->dtms_arg[ndx]);
2774
2775 case DIF_VAR_UREGS: {
2776#ifndef VBOX
2777 klwp_t *lwp;
2778
2779 if (!dtrace_priv_proc(state))
2780 return (0);
2781
2782 if ((lwp = curthread->t_lwp) == NULL) {
2783 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2784 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = NULL;
2785 return (0);
2786 }
2787
2788 return (dtrace_getreg(lwp->lwp_regs, ndx));
2789#else
2790 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2791 return (0);
2792#endif
2793 }
2794
2795 case DIF_VAR_CURTHREAD:
2796 if (!dtrace_priv_kernel(state))
2797 return (0);
2798#ifndef VBOX
2799 return ((uint64_t)(uintptr_t)curthread);
2800#else
2801 return ((uintptr_t)RTThreadNativeSelf());
2802#endif
2803
2804 case DIF_VAR_TIMESTAMP:
2805 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
2806 mstate->dtms_timestamp = dtrace_gethrtime();
2807 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
2808 }
2809 return (mstate->dtms_timestamp);
2810
2811 case DIF_VAR_VTIMESTAMP:
2812#ifndef VBOX
2813 ASSERT(dtrace_vtime_references != 0);
2814 return (curthread->t_dtrace_vtime);
2815#else
2816 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2817 return (0);
2818#endif
2819
2820 case DIF_VAR_WALLTIMESTAMP:
2821 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
2822 mstate->dtms_walltimestamp = dtrace_gethrestime();
2823 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
2824 }
2825 return (mstate->dtms_walltimestamp);
2826
2827 case DIF_VAR_IPL:
2828 if (!dtrace_priv_kernel(state))
2829 return (0);
2830 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
2831 mstate->dtms_ipl = dtrace_getipl();
2832 mstate->dtms_present |= DTRACE_MSTATE_IPL;
2833 }
2834 return (mstate->dtms_ipl);
2835
2836 case DIF_VAR_EPID:
2837 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
2838 return (mstate->dtms_epid);
2839
2840 case DIF_VAR_ID:
2841 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2842 return (mstate->dtms_probe->dtpr_id);
2843
2844 case DIF_VAR_STACKDEPTH:
2845 if (!dtrace_priv_kernel(state))
2846 return (0);
2847 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
2848 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2849
2850 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
2851 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
2852 }
2853 return (mstate->dtms_stackdepth);
2854
2855 case DIF_VAR_USTACKDEPTH:
2856 if (!dtrace_priv_proc(state))
2857 return (0);
2858 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
2859 /*
2860 * See comment in DIF_VAR_PID.
2861 */
2862 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
2863 CPU_ON_INTR(CPU)) {
2864 mstate->dtms_ustackdepth = 0;
2865 } else {
2866 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2867 mstate->dtms_ustackdepth =
2868 dtrace_getustackdepth();
2869 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2870 }
2871 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
2872 }
2873 return (mstate->dtms_ustackdepth);
2874
2875 case DIF_VAR_CALLER:
2876 if (!dtrace_priv_kernel(state))
2877 return (0);
2878 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
2879 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2880
2881 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
2882 /*
2883 * If this is an unanchored probe, we are
2884 * required to go through the slow path:
2885 * dtrace_caller() only guarantees correct
2886 * results for anchored probes.
2887 */
2888 pc_t caller[2];
2889
2890 dtrace_getpcstack(caller, 2, aframes,
2891 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
2892 mstate->dtms_caller = caller[1];
2893 } else if ((mstate->dtms_caller =
2894 dtrace_caller(aframes)) == VBDTCAST(uintptr_t)-1) {
2895 /*
2896 * We have failed to do this the quick way;
2897 * we must resort to the slower approach of
2898 * calling dtrace_getpcstack().
2899 */
2900 pc_t caller;
2901
2902 dtrace_getpcstack(&caller, 1, aframes, NULL);
2903 mstate->dtms_caller = caller;
2904 }
2905
2906 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
2907 }
2908 return (mstate->dtms_caller);
2909
2910 case DIF_VAR_UCALLER:
2911 if (!dtrace_priv_proc(state))
2912 return (0);
2913
2914 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
2915 uint64_t ustack[3];
2916
2917 /*
2918 * dtrace_getupcstack() fills in the first uint64_t
2919 * with the current PID. The second uint64_t will
2920 * be the program counter at user-level. The third
2921 * uint64_t will contain the caller, which is what
2922 * we're after.
2923 */
2924 ustack[2] = NULL;
2925 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2926 dtrace_getupcstack(ustack, 3);
2927 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2928 mstate->dtms_ucaller = ustack[2];
2929 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
2930 }
2931
2932 return (mstate->dtms_ucaller);
2933
2934 case DIF_VAR_PROBEPROV:
2935 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2936 return (dtrace_dif_varstr(
2937 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
2938 state, mstate));
2939
2940 case DIF_VAR_PROBEMOD:
2941 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2942 return (dtrace_dif_varstr(
2943 (uintptr_t)mstate->dtms_probe->dtpr_mod,
2944 state, mstate));
2945
2946 case DIF_VAR_PROBEFUNC:
2947 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2948 return (dtrace_dif_varstr(
2949 (uintptr_t)mstate->dtms_probe->dtpr_func,
2950 state, mstate));
2951
2952 case DIF_VAR_PROBENAME:
2953 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2954 return (dtrace_dif_varstr(
2955 (uintptr_t)mstate->dtms_probe->dtpr_name,
2956 state, mstate));
2957
2958 case DIF_VAR_PID:
2959 if (!dtrace_priv_proc(state))
2960 return (0);
2961
2962#ifndef VBOX
2963 /*
2964 * Note that we are assuming that an unanchored probe is
2965 * always due to a high-level interrupt. (And we're assuming
2966 * that there is only a single high level interrupt.)
2967 */
2968 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2969 return (pid0.pid_id);
2970
2971 /*
2972 * It is always safe to dereference one's own t_procp pointer:
2973 * it always points to a valid, allocated proc structure.
2974 * Further, it is always safe to dereference the p_pidp member
2975 * of one's own proc structure. (These are truisms becuase
2976 * threads and processes don't clean up their own state --
2977 * they leave that task to whomever reaps them.)
2978 */
2979 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
2980#else
2981 return (RTProcSelf());
2982#endif
2983
2984 case DIF_VAR_PPID:
2985 if (!dtrace_priv_proc(state))
2986 return (0);
2987
2988#ifndef VBOX
2989 /*
2990 * See comment in DIF_VAR_PID.
2991 */
2992 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2993 return (pid0.pid_id);
2994
2995 /*
2996 * It is always safe to dereference one's own t_procp pointer:
2997 * it always points to a valid, allocated proc structure.
2998 * (This is true because threads don't clean up their own
2999 * state -- they leave that task to whomever reaps them.)
3000 */
3001 return ((uint64_t)curthread->t_procp->p_ppid);
3002#else
3003 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3004 return (0); /** @todo parent pid? */
3005#endif
3006
3007 case DIF_VAR_TID:
3008#ifndef VBOX
3009 /*
3010 * See comment in DIF_VAR_PID.
3011 */
3012 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3013 return (0);
3014
3015 return ((uint64_t)curthread->t_tid);
3016#else
3017 return (RTThreadNativeSelf()); /** @todo proper tid? */
3018#endif
3019
3020 case DIF_VAR_EXECNAME:
3021 if (!dtrace_priv_proc(state))
3022 return (0);
3023
3024#ifndef VBOX
3025 /*
3026 * See comment in DIF_VAR_PID.
3027 */
3028 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3029 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3030
3031 /*
3032 * It is always safe to dereference one's own t_procp pointer:
3033 * it always points to a valid, allocated proc structure.
3034 * (This is true because threads don't clean up their own
3035 * state -- they leave that task to whomever reaps them.)
3036 */
3037 return (dtrace_dif_varstr(
3038 (uintptr_t)curthread->t_procp->p_user.u_comm,
3039 state, mstate));
3040#else
3041 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3042 return (0); /** @todo execname */
3043#endif
3044
3045 case DIF_VAR_ZONENAME:
3046 if (!dtrace_priv_proc(state))
3047 return (0);
3048
3049#ifndef VBOX
3050 /*
3051 * See comment in DIF_VAR_PID.
3052 */
3053 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3054 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3055
3056 /*
3057 * It is always safe to dereference one's own t_procp pointer:
3058 * it always points to a valid, allocated proc structure.
3059 * (This is true because threads don't clean up their own
3060 * state -- they leave that task to whomever reaps them.)
3061 */
3062 return (dtrace_dif_varstr(
3063 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3064 state, mstate));
3065#else
3066 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3067 return (0);
3068#endif
3069
3070 case DIF_VAR_UID:
3071 if (!dtrace_priv_proc(state))
3072 return (0);
3073
3074#ifndef VBOX
3075 /*
3076 * See comment in DIF_VAR_PID.
3077 */
3078 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3079 return ((uint64_t)p0.p_cred->cr_uid);
3080
3081 /*
3082 * It is always safe to dereference one's own t_procp pointer:
3083 * it always points to a valid, allocated proc structure.
3084 * (This is true because threads don't clean up their own
3085 * state -- they leave that task to whomever reaps them.)
3086 *
3087 * Additionally, it is safe to dereference one's own process
3088 * credential, since this is never NULL after process birth.
3089 */
3090 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3091#else
3092 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3093 return (0);
3094#endif
3095
3096 case DIF_VAR_GID:
3097 if (!dtrace_priv_proc(state))
3098 return (0);
3099
3100#ifndef VBOX
3101 /*
3102 * See comment in DIF_VAR_PID.
3103 */
3104 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3105 return ((uint64_t)p0.p_cred->cr_gid);
3106
3107 /*
3108 * It is always safe to dereference one's own t_procp pointer:
3109 * it always points to a valid, allocated proc structure.
3110 * (This is true because threads don't clean up their own
3111 * state -- they leave that task to whomever reaps them.)
3112 *
3113 * Additionally, it is safe to dereference one's own process
3114 * credential, since this is never NULL after process birth.
3115 */
3116 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3117#else
3118 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3119 return (0);
3120#endif
3121
3122 case DIF_VAR_ERRNO: {
3123#ifndef VBOX
3124 klwp_t *lwp;
3125#endif
3126 if (!dtrace_priv_proc(state))
3127 return (0);
3128
3129#ifndef VBOX
3130 /*
3131 * See comment in DIF_VAR_PID.
3132 */
3133 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3134 return (0);
3135
3136 /*
3137 * It is always safe to dereference one's own t_lwp pointer in
3138 * the event that this pointer is non-NULL. (This is true
3139 * because threads and lwps don't clean up their own state --
3140 * they leave that task to whomever reaps them.)
3141 */
3142 if ((lwp = curthread->t_lwp) == NULL)
3143 return (0);
3144
3145 return ((uint64_t)lwp->lwp_errno);
3146#else
3147 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3148 return (0);
3149#endif
3150 }
3151 default:
3152 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3153 return (0);
3154 }
3155}
3156
3157/*
3158 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
3159 * Notice that we don't bother validating the proper number of arguments or
3160 * their types in the tuple stack. This isn't needed because all argument
3161 * interpretation is safe because of our load safety -- the worst that can
3162 * happen is that a bogus program can obtain bogus results.
3163 */
3164static void
3165dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
3166 dtrace_key_t *tupregs, int nargs,
3167 dtrace_mstate_t *mstate, dtrace_state_t *state)
3168{
3169 volatile uint16_t *flags = &cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags;
3170 volatile uintptr_t *illval = &cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval;
3171 dtrace_vstate_t *vstate = &state->dts_vstate;
3172
3173#ifndef VBOX
3174 union {
3175 mutex_impl_t mi;
3176 uint64_t mx;
3177 } m;
3178
3179 union {
3180 krwlock_t ri;
3181 uintptr_t rw;
3182 } r;
3183#endif
3184
3185 switch (subr) {
3186 case DIF_SUBR_RAND:
3187 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
3188 break;
3189
3190 case DIF_SUBR_MUTEX_OWNED:
3191#ifndef VBOX
3192 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3193 mstate, vstate)) {
3194 regs[rd] = NULL;
3195 break;
3196 }
3197
3198 m.mx = dtrace_load64(tupregs[0].dttk_value);
3199 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
3200 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
3201 else
3202 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
3203#else
3204 regs[rd] = 0;
3205 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3206#endif
3207 break;
3208
3209 case DIF_SUBR_MUTEX_OWNER:
3210#ifndef VBOX
3211 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3212 mstate, vstate)) {
3213 regs[rd] = NULL;
3214 break;
3215 }
3216
3217 m.mx = dtrace_load64(tupregs[0].dttk_value);
3218 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
3219 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
3220 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
3221 else
3222 regs[rd] = 0;
3223#else
3224 regs[rd] = 0;
3225 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3226#endif
3227 break;
3228
3229 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3230#ifndef VBOX
3231 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3232 mstate, vstate)) {
3233 regs[rd] = NULL;
3234 break;
3235 }
3236
3237 m.mx = dtrace_load64(tupregs[0].dttk_value);
3238 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
3239#else
3240 regs[rd] = 0;
3241 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3242#endif
3243 break;
3244
3245 case DIF_SUBR_MUTEX_TYPE_SPIN:
3246#ifndef VBOX
3247 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3248 mstate, vstate)) {
3249 regs[rd] = NULL;
3250 break;
3251 }
3252
3253 m.mx = dtrace_load64(tupregs[0].dttk_value);
3254 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
3255#else
3256 regs[rd] = 0;
3257 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3258#endif
3259 break;
3260
3261 case DIF_SUBR_RW_READ_HELD: {
3262#ifndef VBOX
3263 uintptr_t tmp;
3264
3265 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3266 mstate, vstate)) {
3267 regs[rd] = NULL;
3268 break;
3269 }
3270
3271 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3272 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
3273#else
3274 regs[rd] = 0;
3275 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3276#endif
3277 break;
3278 }
3279
3280 case DIF_SUBR_RW_WRITE_HELD:
3281#ifndef VBOX
3282 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3283 mstate, vstate)) {
3284 regs[rd] = NULL;
3285 break;
3286 }
3287
3288 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3289 regs[rd] = _RW_WRITE_HELD(&r.ri);
3290#else
3291 regs[rd] = 0;
3292 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3293#endif
3294 break;
3295
3296 case DIF_SUBR_RW_ISWRITER:
3297#ifndef VBOX
3298 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3299 mstate, vstate)) {
3300 regs[rd] = NULL;
3301 break;
3302 }
3303
3304 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3305 regs[rd] = _RW_ISWRITER(&r.ri);
3306#else
3307 regs[rd] = 0;
3308 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3309#endif
3310 break;
3311
3312 case DIF_SUBR_BCOPY: {
3313 /*
3314 * We need to be sure that the destination is in the scratch
3315 * region -- no other region is allowed.
3316 */
3317 uintptr_t src = tupregs[0].dttk_value;
3318 uintptr_t dest = tupregs[1].dttk_value;
3319 size_t size = tupregs[2].dttk_value;
3320
3321 if (!dtrace_inscratch(dest, size, mstate)) {
3322 *flags |= CPU_DTRACE_BADADDR;
3323 *illval = regs[rd];
3324 break;
3325 }
3326
3327 if (!dtrace_canload(src, size, mstate, vstate)) {
3328 regs[rd] = NULL;
3329 break;
3330 }
3331
3332 dtrace_bcopy((void *)src, (void *)dest, size);
3333 break;
3334 }
3335
3336 case DIF_SUBR_ALLOCA:
3337 case DIF_SUBR_COPYIN: {
3338 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
3339 uint64_t size =
3340 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
3341 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
3342
3343 /*
3344 * This action doesn't require any credential checks since
3345 * probes will not activate in user contexts to which the
3346 * enabling user does not have permissions.
3347 */
3348
3349 /*
3350 * Rounding up the user allocation size could have overflowed
3351 * a large, bogus allocation (like -1ULL) to 0.
3352 */
3353 if (scratch_size < size ||
3354 !DTRACE_INSCRATCH(mstate, scratch_size)) {
3355 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3356 regs[rd] = NULL;
3357 break;
3358 }
3359
3360 if (subr == DIF_SUBR_COPYIN) {
3361 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3362 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3363 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3364 }
3365
3366 mstate->dtms_scratch_ptr += scratch_size;
3367 regs[rd] = dest;
3368 break;
3369 }
3370
3371 case DIF_SUBR_COPYINTO: {
3372 uint64_t size = tupregs[1].dttk_value;
3373 uintptr_t dest = tupregs[2].dttk_value;
3374
3375 /*
3376 * This action doesn't require any credential checks since
3377 * probes will not activate in user contexts to which the
3378 * enabling user does not have permissions.
3379 */
3380 if (!dtrace_inscratch(dest, size, mstate)) {
3381 *flags |= CPU_DTRACE_BADADDR;
3382 *illval = regs[rd];
3383 break;
3384 }
3385
3386 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3387 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3388 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3389 break;
3390 }
3391
3392 case DIF_SUBR_COPYINSTR: {
3393 uintptr_t dest = mstate->dtms_scratch_ptr;
3394 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3395
3396 if (nargs > 1 && tupregs[1].dttk_value < size)
3397 size = tupregs[1].dttk_value + 1;
3398
3399 /*
3400 * This action doesn't require any credential checks since
3401 * probes will not activate in user contexts to which the
3402 * enabling user does not have permissions.
3403 */
3404 if (!DTRACE_INSCRATCH(mstate, size)) {
3405 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3406 regs[rd] = NULL;
3407 break;
3408 }
3409
3410 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3411 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
3412 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3413
3414 ((char *)dest)[size - 1] = '\0';
3415 mstate->dtms_scratch_ptr += size;
3416 regs[rd] = dest;
3417 break;
3418 }
3419
3420 case DIF_SUBR_MSGSIZE:
3421 case DIF_SUBR_MSGDSIZE: {
3422#ifndef VBOX
3423 uintptr_t baddr = tupregs[0].dttk_value, daddr;
3424 uintptr_t wptr, rptr;
3425 size_t count = 0;
3426 int cont = 0;
3427
3428 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
3429
3430 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
3431 vstate)) {
3432 regs[rd] = NULL;
3433 break;
3434 }
3435
3436 wptr = dtrace_loadptr(baddr +
3437 offsetof(mblk_t, b_wptr));
3438
3439 rptr = dtrace_loadptr(baddr +
3440 offsetof(mblk_t, b_rptr));
3441
3442 if (wptr < rptr) {
3443 *flags |= CPU_DTRACE_BADADDR;
3444 *illval = tupregs[0].dttk_value;
3445 break;
3446 }
3447
3448 daddr = dtrace_loadptr(baddr +
3449 offsetof(mblk_t, b_datap));
3450
3451 baddr = dtrace_loadptr(baddr +
3452 offsetof(mblk_t, b_cont));
3453
3454 /*
3455 * We want to prevent against denial-of-service here,
3456 * so we're only going to search the list for
3457 * dtrace_msgdsize_max mblks.
3458 */
3459 if (cont++ > dtrace_msgdsize_max) {
3460 *flags |= CPU_DTRACE_ILLOP;
3461 break;
3462 }
3463
3464 if (subr == DIF_SUBR_MSGDSIZE) {
3465 if (dtrace_load8(daddr +
3466 offsetof(dblk_t, db_type)) != M_DATA)
3467 continue;
3468 }
3469
3470 count += wptr - rptr;
3471 }
3472
3473 if (!(*flags & CPU_DTRACE_FAULT))
3474 regs[rd] = count;
3475
3476#else
3477 regs[rd] = 0;
3478 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3479#endif
3480 break;
3481 }
3482
3483 case DIF_SUBR_PROGENYOF: {
3484#ifndef VBOX
3485 pid_t pid = tupregs[0].dttk_value;
3486 proc_t *p;
3487 int rval = 0;
3488
3489 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3490
3491 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
3492 if (p->p_pidp->pid_id == pid) {
3493 rval = 1;
3494 break;
3495 }
3496 }
3497
3498 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3499
3500 regs[rd] = rval;
3501#else
3502 regs[rd] = 0;
3503 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3504#endif
3505 break;
3506 }
3507
3508 case DIF_SUBR_SPECULATION:
3509 regs[rd] = dtrace_speculation(state);
3510 break;
3511
3512 case DIF_SUBR_COPYOUT: {
3513 uintptr_t kaddr = tupregs[0].dttk_value;
3514 uintptr_t uaddr = tupregs[1].dttk_value;
3515 uint64_t size = tupregs[2].dttk_value;
3516
3517 if (!dtrace_destructive_disallow &&
3518 dtrace_priv_proc_control(state) &&
3519 !dtrace_istoxic(kaddr, size)) {
3520 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3521 dtrace_copyout(kaddr, uaddr, size, flags);
3522 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3523 }
3524 break;
3525 }
3526
3527 case DIF_SUBR_COPYOUTSTR: {
3528 uintptr_t kaddr = tupregs[0].dttk_value;
3529 uintptr_t uaddr = tupregs[1].dttk_value;
3530 uint64_t size = tupregs[2].dttk_value;
3531
3532 if (!dtrace_destructive_disallow &&
3533 dtrace_priv_proc_control(state) &&
3534 !dtrace_istoxic(kaddr, size)) {
3535 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3536 dtrace_copyoutstr(kaddr, uaddr, size, flags);
3537 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3538 }
3539 break;
3540 }
3541
3542 case DIF_SUBR_STRLEN: {
3543 size_t sz;
3544 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
3545 sz = dtrace_strlen((char *)addr,
3546 state->dts_options[DTRACEOPT_STRSIZE]);
3547
3548 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) {
3549 regs[rd] = NULL;
3550 break;
3551 }
3552
3553 regs[rd] = sz;
3554
3555 break;
3556 }
3557
3558 case DIF_SUBR_STRCHR:
3559 case DIF_SUBR_STRRCHR: {
3560 /*
3561 * We're going to iterate over the string looking for the
3562 * specified character. We will iterate until we have reached
3563 * the string length or we have found the character. If this
3564 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
3565 * of the specified character instead of the first.
3566 */
3567 uintptr_t saddr = tupregs[0].dttk_value;
3568 uintptr_t addr = tupregs[0].dttk_value;
3569 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
3570 char c, target = (char)tupregs[1].dttk_value;
3571
3572 for (regs[rd] = NULL; addr < limit; addr++) {
3573 if ((c = dtrace_load8(addr)) == target) {
3574 regs[rd] = addr;
3575
3576 if (subr == DIF_SUBR_STRCHR)
3577 break;
3578 }
3579
3580 if (c == '\0')
3581 break;
3582 }
3583
3584 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) {
3585 regs[rd] = NULL;
3586 break;
3587 }
3588
3589 break;
3590 }
3591
3592 case DIF_SUBR_STRSTR:
3593 case DIF_SUBR_INDEX:
3594 case DIF_SUBR_RINDEX: {
3595 /*
3596 * We're going to iterate over the string looking for the
3597 * specified string. We will iterate until we have reached
3598 * the string length or we have found the string. (Yes, this
3599 * is done in the most naive way possible -- but considering
3600 * that the string we're searching for is likely to be
3601 * relatively short, the complexity of Rabin-Karp or similar
3602 * hardly seems merited.)
3603 */
3604 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
3605 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
3606 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3607 size_t len = dtrace_strlen(addr, size);
3608 size_t sublen = dtrace_strlen(substr, size);
3609 char *limit = addr + len, *orig = addr;
3610 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
3611 int inc = 1;
3612
3613 regs[rd] = notfound;
3614
3615 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
3616 regs[rd] = NULL;
3617 break;
3618 }
3619
3620 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
3621 vstate)) {
3622 regs[rd] = NULL;
3623 break;
3624 }
3625
3626 /*
3627 * strstr() and index()/rindex() have similar semantics if
3628 * both strings are the empty string: strstr() returns a
3629 * pointer to the (empty) string, and index() and rindex()
3630 * both return index 0 (regardless of any position argument).
3631 */
3632 if (sublen == 0 && len == 0) {
3633 if (subr == DIF_SUBR_STRSTR)
3634 regs[rd] = (uintptr_t)addr;
3635 else
3636 regs[rd] = 0;
3637 break;
3638 }
3639
3640 if (subr != DIF_SUBR_STRSTR) {
3641 if (subr == DIF_SUBR_RINDEX) {
3642 limit = orig - 1;
3643 addr += len;
3644 inc = -1;
3645 }
3646
3647 /*
3648 * Both index() and rindex() take an optional position
3649 * argument that denotes the starting position.
3650 */
3651 if (nargs == 3) {
3652 int64_t pos = (int64_t)tupregs[2].dttk_value;
3653
3654 /*
3655 * If the position argument to index() is
3656 * negative, Perl implicitly clamps it at
3657 * zero. This semantic is a little surprising
3658 * given the special meaning of negative
3659 * positions to similar Perl functions like
3660 * substr(), but it appears to reflect a
3661 * notion that index() can start from a
3662 * negative index and increment its way up to
3663 * the string. Given this notion, Perl's
3664 * rindex() is at least self-consistent in
3665 * that it implicitly clamps positions greater
3666 * than the string length to be the string
3667 * length. Where Perl completely loses
3668 * coherence, however, is when the specified
3669 * substring is the empty string (""). In
3670 * this case, even if the position is
3671 * negative, rindex() returns 0 -- and even if
3672 * the position is greater than the length,
3673 * index() returns the string length. These
3674 * semantics violate the notion that index()
3675 * should never return a value less than the
3676 * specified position and that rindex() should
3677 * never return a value greater than the
3678 * specified position. (One assumes that
3679 * these semantics are artifacts of Perl's
3680 * implementation and not the results of
3681 * deliberate design -- it beggars belief that
3682 * even Larry Wall could desire such oddness.)
3683 * While in the abstract one would wish for
3684 * consistent position semantics across
3685 * substr(), index() and rindex() -- or at the
3686 * very least self-consistent position
3687 * semantics for index() and rindex() -- we
3688 * instead opt to keep with the extant Perl
3689 * semantics, in all their broken glory. (Do
3690 * we have more desire to maintain Perl's
3691 * semantics than Perl does? Probably.)
3692 */
3693 if (subr == DIF_SUBR_RINDEX) {
3694 if (pos < 0) {
3695 if (sublen == 0)
3696 regs[rd] = 0;
3697 break;
3698 }
3699
3700 if (VBDTCAST(uint64_t)pos > len)
3701 pos = len;
3702 } else {
3703 if (pos < 0)
3704 pos = 0;
3705
3706 if (VBDTCAST(uint64_t)pos >= len) {
3707 if (sublen == 0)
3708 regs[rd] = len;
3709 break;
3710 }
3711 }
3712
3713 addr = orig + pos;
3714 }
3715 }
3716
3717 for (regs[rd] = notfound; addr != limit; addr += inc) {
3718 if (dtrace_strncmp(addr, substr, sublen) == 0) {
3719 if (subr != DIF_SUBR_STRSTR) {
3720 /*
3721 * As D index() and rindex() are
3722 * modeled on Perl (and not on awk),
3723 * we return a zero-based (and not a
3724 * one-based) index. (For you Perl
3725 * weenies: no, we're not going to add
3726 * $[ -- and shouldn't you be at a con
3727 * or something?)
3728 */
3729 regs[rd] = (uintptr_t)(addr - orig);
3730 break;
3731 }
3732
3733 ASSERT(subr == DIF_SUBR_STRSTR);
3734 regs[rd] = (uintptr_t)addr;
3735 break;
3736 }
3737 }
3738
3739 break;
3740 }
3741
3742 case DIF_SUBR_STRTOK: {
3743 uintptr_t addr = tupregs[0].dttk_value;
3744 uintptr_t tokaddr = tupregs[1].dttk_value;
3745 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3746 uintptr_t limit, toklimit = tokaddr + size;
3747 uint8_t c VBDTUNASS(0), tokmap[32]; /* 256 / 8 */
3748 char *dest = (char *)mstate->dtms_scratch_ptr;
3749 VBDTTYPE(unsigned,int) i;
3750
3751 /*
3752 * Check both the token buffer and (later) the input buffer,
3753 * since both could be non-scratch addresses.
3754 */
3755 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) {
3756 regs[rd] = NULL;
3757 break;
3758 }
3759
3760 if (!DTRACE_INSCRATCH(mstate, size)) {
3761 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3762 regs[rd] = NULL;
3763 break;
3764 }
3765
3766 if (addr == NULL) {
3767 /*
3768 * If the address specified is NULL, we use our saved
3769 * strtok pointer from the mstate. Note that this
3770 * means that the saved strtok pointer is _only_
3771 * valid within multiple enablings of the same probe --
3772 * it behaves like an implicit clause-local variable.
3773 */
3774 addr = mstate->dtms_strtok;
3775 } else {
3776 /*
3777 * If the user-specified address is non-NULL we must
3778 * access check it. This is the only time we have
3779 * a chance to do so, since this address may reside
3780 * in the string table of this clause-- future calls
3781 * (when we fetch addr from mstate->dtms_strtok)
3782 * would fail this access check.
3783 */
3784 if (!dtrace_strcanload(addr, size, mstate, vstate)) {
3785 regs[rd] = NULL;
3786 break;
3787 }
3788 }
3789
3790 /*
3791 * First, zero the token map, and then process the token
3792 * string -- setting a bit in the map for every character
3793 * found in the token string.
3794 */
3795 for (i = 0; i < sizeof (tokmap); i++)
3796 tokmap[i] = 0;
3797
3798 for (; tokaddr < toklimit; tokaddr++) {
3799 if ((c = dtrace_load8(tokaddr)) == '\0')
3800 break;
3801
3802 ASSERT((c >> 3) < sizeof (tokmap));
3803 tokmap[c >> 3] |= (1 << (c & 0x7));
3804 }
3805
3806 for (limit = addr + size; addr < limit; addr++) {
3807 /*
3808 * We're looking for a character that is _not_ contained
3809 * in the token string.
3810 */
3811 if ((c = dtrace_load8(addr)) == '\0')
3812 break;
3813
3814 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
3815 break;
3816 }
3817
3818 if (c == '\0') {
3819 /*
3820 * We reached the end of the string without finding
3821 * any character that was not in the token string.
3822 * We return NULL in this case, and we set the saved
3823 * address to NULL as well.
3824 */
3825 regs[rd] = NULL;
3826 mstate->dtms_strtok = NULL;
3827 break;
3828 }
3829
3830 /*
3831 * From here on, we're copying into the destination string.
3832 */
3833 for (i = 0; addr < limit && i < size - 1; addr++) {
3834 if ((c = dtrace_load8(addr)) == '\0')
3835 break;
3836
3837 if (tokmap[c >> 3] & (1 << (c & 0x7)))
3838 break;
3839
3840 ASSERT(i < size);
3841 dest[i++] = c;
3842 }
3843
3844 ASSERT(i < size);
3845 dest[i] = '\0';
3846 regs[rd] = (uintptr_t)dest;
3847 mstate->dtms_scratch_ptr += size;
3848 mstate->dtms_strtok = addr;
3849 break;
3850 }
3851
3852 case DIF_SUBR_SUBSTR: {
3853 uintptr_t s = tupregs[0].dttk_value;
3854 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3855 char *d = (char *)mstate->dtms_scratch_ptr;
3856 int64_t index = (int64_t)tupregs[1].dttk_value;
3857 int64_t remaining = (int64_t)tupregs[2].dttk_value;
3858 size_t len = dtrace_strlen((char *)s, size);
3859 int64_t i;
3860
3861 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
3862 regs[rd] = NULL;
3863 break;
3864 }
3865
3866 if (!DTRACE_INSCRATCH(mstate, size)) {
3867 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3868 regs[rd] = NULL;
3869 break;
3870 }
3871
3872 if (nargs <= 2)
3873 remaining = (int64_t)size;
3874
3875 if (index < 0) {
3876 index += len;
3877
3878 if (index < 0 && index + remaining > 0) {
3879 remaining += index;
3880 index = 0;
3881 }
3882 }
3883
3884 if (VBDTCAST(uint64_t)index >= len || index < 0) {
3885 remaining = 0;
3886 } else if (remaining < 0) {
3887 remaining += len - index;
3888 } else if (VBDTCAST(uint64_t)index + remaining > size) {
3889 remaining = size - index;
3890 }
3891
3892 for (i = 0; i < remaining; i++) {
3893 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
3894 break;
3895 }
3896
3897 d[i] = '\0';
3898
3899 mstate->dtms_scratch_ptr += size;
3900 regs[rd] = (uintptr_t)d;
3901 break;
3902 }
3903
3904 case DIF_SUBR_GETMAJOR:
3905#ifndef VBOX
3906#ifdef _LP64
3907 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
3908#else
3909 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
3910#endif
3911#else
3912 regs[rd] = 0;
3913 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3914#endif
3915 break;
3916
3917 case DIF_SUBR_GETMINOR:
3918#ifndef VBOX
3919#ifdef _LP64
3920 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
3921#else
3922 regs[rd] = tupregs[0].dttk_value & MAXMIN;
3923#endif
3924#else
3925 regs[rd] = 0;
3926 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3927#endif
3928 break;
3929
3930 case DIF_SUBR_DDI_PATHNAME: {
3931#ifndef VBOX
3932 /*
3933 * This one is a galactic mess. We are going to roughly
3934 * emulate ddi_pathname(), but it's made more complicated
3935 * by the fact that we (a) want to include the minor name and
3936 * (b) must proceed iteratively instead of recursively.
3937 */
3938 uintptr_t dest = mstate->dtms_scratch_ptr;
3939 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3940 char *start = (char *)dest, *end = start + size - 1;
3941 uintptr_t daddr = tupregs[0].dttk_value;
3942 int64_t minor = (int64_t)tupregs[1].dttk_value;
3943 char *s;
3944 int i, len, depth = 0;
3945
3946 /*
3947 * Due to all the pointer jumping we do and context we must
3948 * rely upon, we just mandate that the user must have kernel
3949 * read privileges to use this routine.
3950 */
3951 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
3952 *flags |= CPU_DTRACE_KPRIV;
3953 *illval = daddr;
3954 regs[rd] = NULL;
3955 }
3956
3957 if (!DTRACE_INSCRATCH(mstate, size)) {
3958 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3959 regs[rd] = NULL;
3960 break;
3961 }
3962
3963 *end = '\0';
3964
3965 /*
3966 * We want to have a name for the minor. In order to do this,
3967 * we need to walk the minor list from the devinfo. We want
3968 * to be sure that we don't infinitely walk a circular list,
3969 * so we check for circularity by sending a scout pointer
3970 * ahead two elements for every element that we iterate over;
3971 * if the list is circular, these will ultimately point to the
3972 * same element. You may recognize this little trick as the
3973 * answer to a stupid interview question -- one that always
3974 * seems to be asked by those who had to have it laboriously
3975 * explained to them, and who can't even concisely describe
3976 * the conditions under which one would be forced to resort to
3977 * this technique. Needless to say, those conditions are
3978 * found here -- and probably only here. Is this the only use
3979 * of this infamous trick in shipping, production code? If it
3980 * isn't, it probably should be...
3981 */
3982 if (minor != -1) {
3983 uintptr_t maddr = dtrace_loadptr(daddr +
3984 offsetof(struct dev_info, devi_minor));
3985
3986 uintptr_t next = offsetof(struct ddi_minor_data, next);
3987 uintptr_t name = offsetof(struct ddi_minor_data,
3988 d_minor) + offsetof(struct ddi_minor, name);
3989 uintptr_t dev = offsetof(struct ddi_minor_data,
3990 d_minor) + offsetof(struct ddi_minor, dev);
3991 uintptr_t scout;
3992
3993 if (maddr != NULL)
3994 scout = dtrace_loadptr(maddr + next);
3995
3996 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
3997 uint64_t m;
3998#ifdef _LP64
3999 m = dtrace_load64(maddr + dev) & MAXMIN64;
4000#else
4001 m = dtrace_load32(maddr + dev) & MAXMIN;
4002#endif
4003 if (m != minor) {
4004 maddr = dtrace_loadptr(maddr + next);
4005
4006 if (scout == NULL)
4007 continue;
4008
4009 scout = dtrace_loadptr(scout + next);
4010
4011 if (scout == NULL)
4012 continue;
4013
4014 scout = dtrace_loadptr(scout + next);
4015
4016 if (scout == NULL)
4017 continue;
4018
4019 if (scout == maddr) {
4020 *flags |= CPU_DTRACE_ILLOP;
4021 break;
4022 }
4023
4024 continue;
4025 }
4026
4027 /*
4028 * We have the minor data. Now we need to
4029 * copy the minor's name into the end of the
4030 * pathname.
4031 */
4032 s = (char *)dtrace_loadptr(maddr + name);
4033 len = dtrace_strlen(s, size);
4034
4035 if (*flags & CPU_DTRACE_FAULT)
4036 break;
4037
4038 if (len != 0) {
4039 if ((end -= (len + 1)) < start)
4040 break;
4041
4042 *end = ':';
4043 }
4044
4045 for (i = 1; i <= len; i++)
4046 end[i] = dtrace_load8((uintptr_t)s++);
4047 break;
4048 }
4049 }
4050
4051 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
4052 ddi_node_state_t devi_state;
4053
4054 devi_state = dtrace_load32(daddr +
4055 offsetof(struct dev_info, devi_node_state));
4056
4057 if (*flags & CPU_DTRACE_FAULT)
4058 break;
4059
4060 if (devi_state >= DS_INITIALIZED) {
4061 s = (char *)dtrace_loadptr(daddr +
4062 offsetof(struct dev_info, devi_addr));
4063 len = dtrace_strlen(s, size);
4064
4065 if (*flags & CPU_DTRACE_FAULT)
4066 break;
4067
4068 if (len != 0) {
4069 if ((end -= (len + 1)) < start)
4070 break;
4071
4072 *end = '@';
4073 }
4074
4075 for (i = 1; i <= len; i++)
4076 end[i] = dtrace_load8((uintptr_t)s++);
4077 }
4078
4079 /*
4080 * Now for the node name...
4081 */
4082 s = (char *)dtrace_loadptr(daddr +
4083 offsetof(struct dev_info, devi_node_name));
4084
4085 daddr = dtrace_loadptr(daddr +
4086 offsetof(struct dev_info, devi_parent));
4087
4088 /*
4089 * If our parent is NULL (that is, if we're the root
4090 * node), we're going to use the special path
4091 * "devices".
4092 */
4093 if (daddr == NULL)
4094 s = "devices";
4095
4096 len = dtrace_strlen(s, size);
4097 if (*flags & CPU_DTRACE_FAULT)
4098 break;
4099
4100 if ((end -= (len + 1)) < start)
4101 break;
4102
4103 for (i = 1; i <= len; i++)
4104 end[i] = dtrace_load8((uintptr_t)s++);
4105 *end = '/';
4106
4107 if (depth++ > dtrace_devdepth_max) {
4108 *flags |= CPU_DTRACE_ILLOP;
4109 break;
4110 }
4111 }
4112
4113 if (end < start)
4114 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4115
4116 if (daddr == NULL) {
4117 regs[rd] = (uintptr_t)end;
4118 mstate->dtms_scratch_ptr += size;
4119 }
4120
4121#else
4122 regs[rd] = 0;
4123 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4124#endif
4125 break;
4126 }
4127
4128 case DIF_SUBR_STRJOIN: {
4129 char *d = (char *)mstate->dtms_scratch_ptr;
4130 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4131 uintptr_t s1 = tupregs[0].dttk_value;
4132 uintptr_t s2 = tupregs[1].dttk_value;
4133 VBDTTYPE(unsigned,int) i = 0;
4134
4135 if (!dtrace_strcanload(s1, size, mstate, vstate) ||
4136 !dtrace_strcanload(s2, size, mstate, vstate)) {
4137 regs[rd] = NULL;
4138 break;
4139 }
4140
4141 if (!DTRACE_INSCRATCH(mstate, size)) {
4142 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4143 regs[rd] = NULL;
4144 break;
4145 }
4146
4147 for (;;) {
4148 if (i >= size) {
4149 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4150 regs[rd] = NULL;
4151 break;
4152 }
4153
4154 if ((d[i++] = dtrace_load8(s1++)) == '\0') {
4155 i--;
4156 break;
4157 }
4158 }
4159
4160 for (;;) {
4161 if (i >= size) {
4162 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4163 regs[rd] = NULL;
4164 break;
4165 }
4166
4167 if ((d[i++] = dtrace_load8(s2++)) == '\0')
4168 break;
4169 }
4170
4171 if (i < size) {
4172 mstate->dtms_scratch_ptr += i;
4173 regs[rd] = (uintptr_t)d;
4174 }
4175
4176 break;
4177 }
4178
4179 case DIF_SUBR_LLTOSTR: {
4180 int64_t i = (int64_t)tupregs[0].dttk_value;
4181 int64_t val = i < 0 ? i * -1 : i;
4182 uint64_t size = 22; /* enough room for 2^64 in decimal */
4183 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
4184
4185 if (!DTRACE_INSCRATCH(mstate, size)) {
4186 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4187 regs[rd] = NULL;
4188 break;
4189 }
4190
4191 for (*end-- = '\0'; val; val /= 10)
4192 *end-- = '0' + (val % 10);
4193
4194 if (i == 0)
4195 *end-- = '0';
4196
4197 if (i < 0)
4198 *end-- = '-';
4199
4200 regs[rd] = (uintptr_t)end + 1;
4201 mstate->dtms_scratch_ptr += size;
4202 break;
4203 }
4204
4205 case DIF_SUBR_HTONS:
4206 case DIF_SUBR_NTOHS:
4207#ifdef _BIG_ENDIAN
4208 regs[rd] = (uint16_t)tupregs[0].dttk_value;
4209#else
4210 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
4211#endif
4212 break;
4213
4214
4215 case DIF_SUBR_HTONL:
4216 case DIF_SUBR_NTOHL:
4217#ifdef _BIG_ENDIAN
4218 regs[rd] = (uint32_t)tupregs[0].dttk_value;
4219#else
4220 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
4221#endif
4222 break;
4223
4224
4225 case DIF_SUBR_HTONLL:
4226 case DIF_SUBR_NTOHLL:
4227#ifdef _BIG_ENDIAN
4228 regs[rd] = (uint64_t)tupregs[0].dttk_value;
4229#else
4230 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
4231#endif
4232 break;
4233
4234
4235 case DIF_SUBR_DIRNAME:
4236 case DIF_SUBR_BASENAME: {
4237 char *dest = (char *)mstate->dtms_scratch_ptr;
4238 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4239 uintptr_t src = tupregs[0].dttk_value;
4240 int i, j, len = VBDTCAST(int)dtrace_strlen((char *)src, size);
4241 int lastbase = -1, firstbase = -1, lastdir = -1;
4242 int start, end;
4243
4244 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
4245 regs[rd] = NULL;
4246 break;
4247 }
4248
4249 if (!DTRACE_INSCRATCH(mstate, size)) {
4250 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4251 regs[rd] = NULL;
4252 break;
4253 }
4254
4255 /*
4256 * The basename and dirname for a zero-length string is
4257 * defined to be "."
4258 */
4259 if (len == 0) {
4260 len = 1;
4261 src = (uintptr_t)".";
4262 }
4263
4264 /*
4265 * Start from the back of the string, moving back toward the
4266 * front until we see a character that isn't a slash. That
4267 * character is the last character in the basename.
4268 */
4269 for (i = len - 1; i >= 0; i--) {
4270 if (dtrace_load8(src + i) != '/')
4271 break;
4272 }
4273
4274 if (i >= 0)
4275 lastbase = i;
4276
4277 /*
4278 * Starting from the last character in the basename, move
4279 * towards the front until we find a slash. The character
4280 * that we processed immediately before that is the first
4281 * character in the basename.
4282 */
4283 for (; i >= 0; i--) {
4284 if (dtrace_load8(src + i) == '/')
4285 break;
4286 }
4287
4288 if (i >= 0)
4289 firstbase = i + 1;
4290
4291 /*
4292 * Now keep going until we find a non-slash character. That
4293 * character is the last character in the dirname.
4294 */
4295 for (; i >= 0; i--) {
4296 if (dtrace_load8(src + i) != '/')
4297 break;
4298 }
4299
4300 if (i >= 0)
4301 lastdir = i;
4302
4303 ASSERT(!(lastbase == -1 && firstbase != -1));
4304 ASSERT(!(firstbase == -1 && lastdir != -1));
4305
4306 if (lastbase == -1) {
4307 /*
4308 * We didn't find a non-slash character. We know that
4309 * the length is non-zero, so the whole string must be
4310 * slashes. In either the dirname or the basename
4311 * case, we return '/'.
4312 */
4313 ASSERT(firstbase == -1);
4314 firstbase = lastbase = lastdir = 0;
4315 }
4316
4317 if (firstbase == -1) {
4318 /*
4319 * The entire string consists only of a basename
4320 * component. If we're looking for dirname, we need
4321 * to change our string to be just "."; if we're
4322 * looking for a basename, we'll just set the first
4323 * character of the basename to be 0.
4324 */
4325 if (subr == DIF_SUBR_DIRNAME) {
4326 ASSERT(lastdir == -1);
4327 src = (uintptr_t)".";
4328 lastdir = 0;
4329 } else {
4330 firstbase = 0;
4331 }
4332 }
4333
4334 if (subr == DIF_SUBR_DIRNAME) {
4335 if (lastdir == -1) {
4336 /*
4337 * We know that we have a slash in the name --
4338 * or lastdir would be set to 0, above. And
4339 * because lastdir is -1, we know that this
4340 * slash must be the first character. (That
4341 * is, the full string must be of the form
4342 * "/basename".) In this case, the last
4343 * character of the directory name is 0.
4344 */
4345 lastdir = 0;
4346 }
4347
4348 start = 0;
4349 end = lastdir;
4350 } else {
4351 ASSERT(subr == DIF_SUBR_BASENAME);
4352 ASSERT(firstbase != -1 && lastbase != -1);
4353 start = firstbase;
4354 end = lastbase;
4355 }
4356
4357 for (i = start, j = 0; i <= end && VBDTCAST(unsigned)j < size - 1; i++, j++)
4358 dest[j] = dtrace_load8(src + i);
4359
4360 dest[j] = '\0';
4361 regs[rd] = (uintptr_t)dest;
4362 mstate->dtms_scratch_ptr += size;
4363 break;
4364 }
4365
4366 case DIF_SUBR_CLEANPATH: {
4367 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4368 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4369 uintptr_t src = tupregs[0].dttk_value;
4370 int i = 0, j = 0;
4371
4372 if (!dtrace_strcanload(src, size, mstate, vstate)) {
4373 regs[rd] = NULL;
4374 break;
4375 }
4376
4377 if (!DTRACE_INSCRATCH(mstate, size)) {
4378 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4379 regs[rd] = NULL;
4380 break;
4381 }
4382
4383 /*
4384 * Move forward, loading each character.
4385 */
4386 do {
4387 c = dtrace_load8(src + i++);
4388next:
4389 if (j + 5 >= VBDTCAST(int64_t)size) /* 5 = strlen("/..c\0") */
4390 break;
4391
4392 if (c != '/') {
4393 dest[j++] = c;
4394 continue;
4395 }
4396
4397 c = dtrace_load8(src + i++);
4398
4399 if (c == '/') {
4400 /*
4401 * We have two slashes -- we can just advance
4402 * to the next character.
4403 */
4404 goto next;
4405 }
4406
4407 if (c != '.') {
4408 /*
4409 * This is not "." and it's not ".." -- we can
4410 * just store the "/" and this character and
4411 * drive on.
4412 */
4413 dest[j++] = '/';
4414 dest[j++] = c;
4415 continue;
4416 }
4417
4418 c = dtrace_load8(src + i++);
4419
4420 if (c == '/') {
4421 /*
4422 * This is a "/./" component. We're not going
4423 * to store anything in the destination buffer;
4424 * we're just going to go to the next component.
4425 */
4426 goto next;
4427 }
4428
4429 if (c != '.') {
4430 /*
4431 * This is not ".." -- we can just store the
4432 * "/." and this character and continue
4433 * processing.
4434 */
4435 dest[j++] = '/';
4436 dest[j++] = '.';
4437 dest[j++] = c;
4438 continue;
4439 }
4440
4441 c = dtrace_load8(src + i++);
4442
4443 if (c != '/' && c != '\0') {
4444 /*
4445 * This is not ".." -- it's "..[mumble]".
4446 * We'll store the "/.." and this character
4447 * and continue processing.
4448 */
4449 dest[j++] = '/';
4450 dest[j++] = '.';
4451 dest[j++] = '.';
4452 dest[j++] = c;
4453 continue;
4454 }
4455
4456 /*
4457 * This is "/../" or "/..\0". We need to back up
4458 * our destination pointer until we find a "/".
4459 */
4460 i--;
4461 while (j != 0 && dest[--j] != '/')
4462 continue;
4463
4464 if (c == '\0')
4465 dest[++j] = '/';
4466 } while (c != '\0');
4467
4468 dest[j] = '\0';
4469 regs[rd] = (uintptr_t)dest;
4470 mstate->dtms_scratch_ptr += size;
4471 break;
4472 }
4473
4474 case DIF_SUBR_INET_NTOA:
4475 case DIF_SUBR_INET_NTOA6:
4476 case DIF_SUBR_INET_NTOP: {
4477#ifndef VBOX
4478 size_t size;
4479 int af, argi, i;
4480 char *base, *end;
4481
4482 if (subr == DIF_SUBR_INET_NTOP) {
4483 af = (int)tupregs[0].dttk_value;
4484 argi = 1;
4485 } else {
4486 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
4487 argi = 0;
4488 }
4489
4490 if (af == AF_INET) {
4491 ipaddr_t ip4;
4492 uint8_t *ptr8, val;
4493
4494 /*
4495 * Safely load the IPv4 address.
4496 */
4497 ip4 = dtrace_load32(tupregs[argi].dttk_value);
4498
4499 /*
4500 * Check an IPv4 string will fit in scratch.
4501 */
4502 size = INET_ADDRSTRLEN;
4503 if (!DTRACE_INSCRATCH(mstate, size)) {
4504 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4505 regs[rd] = NULL;
4506 break;
4507 }
4508 base = (char *)mstate->dtms_scratch_ptr;
4509 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4510
4511 /*
4512 * Stringify as a dotted decimal quad.
4513 */
4514 *end-- = '\0';
4515 ptr8 = (uint8_t *)&ip4;
4516 for (i = 3; i >= 0; i--) {
4517 val = ptr8[i];
4518
4519 if (val == 0) {
4520 *end-- = '0';
4521 } else {
4522 for (; val; val /= 10) {
4523 *end-- = '0' + (val % 10);
4524 }
4525 }
4526
4527 if (i > 0)
4528 *end-- = '.';
4529 }
4530 ASSERT(end + 1 >= base);
4531
4532 } else if (af == AF_INET6) {
4533 struct in6_addr ip6;
4534 int firstzero, tryzero, numzero, v6end;
4535 uint16_t val;
4536 const char digits[] = "0123456789abcdef";
4537
4538 /*
4539 * Stringify using RFC 1884 convention 2 - 16 bit
4540 * hexadecimal values with a zero-run compression.
4541 * Lower case hexadecimal digits are used.
4542 * eg, fe80::214:4fff:fe0b:76c8.
4543 * The IPv4 embedded form is returned for inet_ntop,
4544 * just the IPv4 string is returned for inet_ntoa6.
4545 */
4546
4547 /*
4548 * Safely load the IPv6 address.
4549 */
4550 dtrace_bcopy(
4551 (void *)(uintptr_t)tupregs[argi].dttk_value,
4552 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
4553
4554 /*
4555 * Check an IPv6 string will fit in scratch.
4556 */
4557 size = INET6_ADDRSTRLEN;
4558 if (!DTRACE_INSCRATCH(mstate, size)) {
4559 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4560 regs[rd] = NULL;
4561 break;
4562 }
4563 base = (char *)mstate->dtms_scratch_ptr;
4564 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4565 *end-- = '\0';
4566
4567 /*
4568 * Find the longest run of 16 bit zero values
4569 * for the single allowed zero compression - "::".
4570 */
4571 firstzero = -1;
4572 tryzero = -1;
4573 numzero = 1;
4574 for (i = 0; i < sizeof (struct in6_addr); i++) {
4575 if (ip6._S6_un._S6_u8[i] == 0 &&
4576 tryzero == -1 && i % 2 == 0) {
4577 tryzero = i;
4578 continue;
4579 }
4580
4581 if (tryzero != -1 &&
4582 (ip6._S6_un._S6_u8[i] != 0 ||
4583 i == sizeof (struct in6_addr) - 1)) {
4584
4585 if (i - tryzero <= numzero) {
4586 tryzero = -1;
4587 continue;
4588 }
4589
4590 firstzero = tryzero;
4591 numzero = i - i % 2 - tryzero;
4592 tryzero = -1;
4593
4594 if (ip6._S6_un._S6_u8[i] == 0 &&
4595 i == sizeof (struct in6_addr) - 1)
4596 numzero += 2;
4597 }
4598 }
4599 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
4600
4601 /*
4602 * Check for an IPv4 embedded address.
4603 */
4604 v6end = sizeof (struct in6_addr) - 2;
4605 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
4606 IN6_IS_ADDR_V4COMPAT(&ip6)) {
4607 for (i = sizeof (struct in6_addr) - 1;
4608 i >= DTRACE_V4MAPPED_OFFSET; i--) {
4609 ASSERT(end >= base);
4610
4611 val = ip6._S6_un._S6_u8[i];
4612
4613 if (val == 0) {
4614 *end-- = '0';
4615 } else {
4616 for (; val; val /= 10) {
4617 *end-- = '0' + val % 10;
4618 }
4619 }
4620
4621 if (i > DTRACE_V4MAPPED_OFFSET)
4622 *end-- = '.';
4623 }
4624
4625 if (subr == DIF_SUBR_INET_NTOA6)
4626 goto inetout;
4627
4628 /*
4629 * Set v6end to skip the IPv4 address that
4630 * we have already stringified.
4631 */
4632 v6end = 10;
4633 }
4634
4635 /*
4636 * Build the IPv6 string by working through the
4637 * address in reverse.
4638 */
4639 for (i = v6end; i >= 0; i -= 2) {
4640 ASSERT(end >= base);
4641
4642 if (i == firstzero + numzero - 2) {
4643 *end-- = ':';
4644 *end-- = ':';
4645 i -= numzero - 2;
4646 continue;
4647 }
4648
4649 if (i < 14 && i != firstzero - 2)
4650 *end-- = ':';
4651
4652 val = (ip6._S6_un._S6_u8[i] << 8) +
4653 ip6._S6_un._S6_u8[i + 1];
4654
4655 if (val == 0) {
4656 *end-- = '0';
4657 } else {
4658 for (; val; val /= 16) {
4659 *end-- = digits[val % 16];
4660 }
4661 }
4662 }
4663 ASSERT(end + 1 >= base);
4664
4665 } else {
4666 /*
4667 * The user didn't use AH_INET or AH_INET6.
4668 */
4669 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4670 regs[rd] = NULL;
4671 break;
4672 }
4673
4674inetout: regs[rd] = (uintptr_t)end + 1;
4675 mstate->dtms_scratch_ptr += size;
4676#else /* VBOX */
4677 regs[rd] = 0;
4678 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4679#endif /* VBOX */
4680 break;
4681 }
4682
4683 }
4684}
4685
4686/*
4687 * Emulate the execution of DTrace IR instructions specified by the given
4688 * DIF object. This function is deliberately void of assertions as all of
4689 * the necessary checks are handled by a call to dtrace_difo_validate().
4690 */
4691static uint64_t
4692dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
4693 dtrace_vstate_t *vstate, dtrace_state_t *state)
4694{
4695 const dif_instr_t *text = difo->dtdo_buf;
4696 const uint_t textlen = difo->dtdo_len;
4697 const char *strtab = difo->dtdo_strtab;
4698 const uint64_t *inttab = difo->dtdo_inttab;
4699
4700 uint64_t rval = 0;
4701 dtrace_statvar_t *svar;
4702 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
4703 dtrace_difv_t *v;
4704 volatile uint16_t *flags = &cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags;
4705 volatile uintptr_t *illval = &cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval;
4706
4707 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
4708 uint64_t regs[DIF_DIR_NREGS];
4709 uint64_t *tmp;
4710
4711 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
4712 int64_t cc_r;
4713 uint_t pc = 0, id, opc VBDTUNASS(0);
4714 uint8_t ttop = 0;
4715 dif_instr_t instr;
4716 uint_t r1, r2, rd;
4717
4718 /*
4719 * We stash the current DIF object into the machine state: we need it
4720 * for subsequent access checking.
4721 */
4722 mstate->dtms_difo = difo;
4723
4724 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
4725
4726 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
4727 opc = pc;
4728
4729 instr = text[pc++];
4730 r1 = DIF_INSTR_R1(instr);
4731 r2 = DIF_INSTR_R2(instr);
4732 rd = DIF_INSTR_RD(instr);
4733
4734 switch (DIF_INSTR_OP(instr)) {
4735 case DIF_OP_OR:
4736 regs[rd] = regs[r1] | regs[r2];
4737 break;
4738 case DIF_OP_XOR:
4739 regs[rd] = regs[r1] ^ regs[r2];
4740 break;
4741 case DIF_OP_AND:
4742 regs[rd] = regs[r1] & regs[r2];
4743 break;
4744 case DIF_OP_SLL:
4745 regs[rd] = regs[r1] << regs[r2];
4746 break;
4747 case DIF_OP_SRL:
4748 regs[rd] = regs[r1] >> regs[r2];
4749 break;
4750 case DIF_OP_SUB:
4751 regs[rd] = regs[r1] - regs[r2];
4752 break;
4753 case DIF_OP_ADD:
4754 regs[rd] = regs[r1] + regs[r2];
4755 break;
4756 case DIF_OP_MUL:
4757 regs[rd] = regs[r1] * regs[r2];
4758 break;
4759 case DIF_OP_SDIV:
4760 if (regs[r2] == 0) {
4761 regs[rd] = 0;
4762 *flags |= CPU_DTRACE_DIVZERO;
4763 } else {
4764 regs[rd] = (int64_t)regs[r1] /
4765 (int64_t)regs[r2];
4766 }
4767 break;
4768
4769 case DIF_OP_UDIV:
4770 if (regs[r2] == 0) {
4771 regs[rd] = 0;
4772 *flags |= CPU_DTRACE_DIVZERO;
4773 } else {
4774 regs[rd] = regs[r1] / regs[r2];
4775 }
4776 break;
4777
4778 case DIF_OP_SREM:
4779 if (regs[r2] == 0) {
4780 regs[rd] = 0;
4781 *flags |= CPU_DTRACE_DIVZERO;
4782 } else {
4783 regs[rd] = (int64_t)regs[r1] %
4784 (int64_t)regs[r2];
4785 }
4786 break;
4787
4788 case DIF_OP_UREM:
4789 if (regs[r2] == 0) {
4790 regs[rd] = 0;
4791 *flags |= CPU_DTRACE_DIVZERO;
4792 } else {
4793 regs[rd] = regs[r1] % regs[r2];
4794 }
4795 break;
4796
4797 case DIF_OP_NOT:
4798 regs[rd] = ~regs[r1];
4799 break;
4800 case DIF_OP_MOV:
4801 regs[rd] = regs[r1];
4802 break;
4803 case DIF_OP_CMP:
4804 cc_r = regs[r1] - regs[r2];
4805 cc_n = cc_r < 0;
4806 cc_z = cc_r == 0;
4807 cc_v = 0;
4808 cc_c = regs[r1] < regs[r2];
4809 break;
4810 case DIF_OP_TST:
4811 cc_n = cc_v = cc_c = 0;
4812 cc_z = regs[r1] == 0;
4813 break;
4814 case DIF_OP_BA:
4815 pc = DIF_INSTR_LABEL(instr);
4816 break;
4817 case DIF_OP_BE:
4818 if (cc_z)
4819 pc = DIF_INSTR_LABEL(instr);
4820 break;
4821 case DIF_OP_BNE:
4822 if (cc_z == 0)
4823 pc = DIF_INSTR_LABEL(instr);
4824 break;
4825 case DIF_OP_BG:
4826 if ((cc_z | (cc_n ^ cc_v)) == 0)
4827 pc = DIF_INSTR_LABEL(instr);
4828 break;
4829 case DIF_OP_BGU:
4830 if ((cc_c | cc_z) == 0)
4831 pc = DIF_INSTR_LABEL(instr);
4832 break;
4833 case DIF_OP_BGE:
4834 if ((cc_n ^ cc_v) == 0)
4835 pc = DIF_INSTR_LABEL(instr);
4836 break;
4837 case DIF_OP_BGEU:
4838 if (cc_c == 0)
4839 pc = DIF_INSTR_LABEL(instr);
4840 break;
4841 case DIF_OP_BL:
4842 if (cc_n ^ cc_v)
4843 pc = DIF_INSTR_LABEL(instr);
4844 break;
4845 case DIF_OP_BLU:
4846 if (cc_c)
4847 pc = DIF_INSTR_LABEL(instr);
4848 break;
4849 case DIF_OP_BLE:
4850 if (cc_z | (cc_n ^ cc_v))
4851 pc = DIF_INSTR_LABEL(instr);
4852 break;
4853 case DIF_OP_BLEU:
4854 if (cc_c | cc_z)
4855 pc = DIF_INSTR_LABEL(instr);
4856 break;
4857 case DIF_OP_RLDSB:
4858 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4859 *flags |= CPU_DTRACE_KPRIV;
4860 *illval = regs[r1];
4861 break;
4862 }
4863 /*FALLTHROUGH*/
4864 case DIF_OP_LDSB:
4865 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
4866 break;
4867 case DIF_OP_RLDSH:
4868 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4869 *flags |= CPU_DTRACE_KPRIV;
4870 *illval = regs[r1];
4871 break;
4872 }
4873 /*FALLTHROUGH*/
4874 case DIF_OP_LDSH:
4875 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
4876 break;
4877 case DIF_OP_RLDSW:
4878 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4879 *flags |= CPU_DTRACE_KPRIV;
4880 *illval = regs[r1];
4881 break;
4882 }
4883 /*FALLTHROUGH*/
4884 case DIF_OP_LDSW:
4885 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
4886 break;
4887 case DIF_OP_RLDUB:
4888 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4889 *flags |= CPU_DTRACE_KPRIV;
4890 *illval = regs[r1];
4891 break;
4892 }
4893 /*FALLTHROUGH*/
4894 case DIF_OP_LDUB:
4895 regs[rd] = dtrace_load8(regs[r1]);
4896 break;
4897 case DIF_OP_RLDUH:
4898 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4899 *flags |= CPU_DTRACE_KPRIV;
4900 *illval = regs[r1];
4901 break;
4902 }
4903 /*FALLTHROUGH*/
4904 case DIF_OP_LDUH:
4905 regs[rd] = dtrace_load16(regs[r1]);
4906 break;
4907 case DIF_OP_RLDUW:
4908 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4909 *flags |= CPU_DTRACE_KPRIV;
4910 *illval = regs[r1];
4911 break;
4912 }
4913 /*FALLTHROUGH*/
4914 case DIF_OP_LDUW:
4915 regs[rd] = dtrace_load32(regs[r1]);
4916 break;
4917 case DIF_OP_RLDX:
4918 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
4919 *flags |= CPU_DTRACE_KPRIV;
4920 *illval = regs[r1];
4921 break;
4922 }
4923 /*FALLTHROUGH*/
4924 case DIF_OP_LDX:
4925 regs[rd] = dtrace_load64(regs[r1]);
4926 break;
4927 case DIF_OP_ULDSB:
4928 regs[rd] = (int8_t)
4929 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
4930 break;
4931 case DIF_OP_ULDSH:
4932 regs[rd] = (int16_t)
4933 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
4934 break;
4935 case DIF_OP_ULDSW:
4936 regs[rd] = (int32_t)
4937 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
4938 break;
4939 case DIF_OP_ULDUB:
4940 regs[rd] =
4941 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
4942 break;
4943 case DIF_OP_ULDUH:
4944 regs[rd] =
4945 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
4946 break;
4947 case DIF_OP_ULDUW:
4948 regs[rd] =
4949 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
4950 break;
4951 case DIF_OP_ULDX:
4952 regs[rd] =
4953 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
4954 break;
4955 case DIF_OP_RET:
4956 rval = regs[rd];
4957 pc = textlen;
4958 break;
4959 case DIF_OP_NOP:
4960 break;
4961 case DIF_OP_SETX:
4962 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
4963 break;
4964 case DIF_OP_SETS:
4965 regs[rd] = (uint64_t)(uintptr_t)
4966 (strtab + DIF_INSTR_STRING(instr));
4967 break;
4968 case DIF_OP_SCMP: {
4969 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
4970 uintptr_t s1 = regs[r1];
4971 uintptr_t s2 = regs[r2];
4972
4973 if (s1 != NULL &&
4974 !dtrace_strcanload(s1, sz, mstate, vstate))
4975 break;
4976 if (s2 != NULL &&
4977 !dtrace_strcanload(s2, sz, mstate, vstate))
4978 break;
4979
4980 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz);
4981
4982 cc_n = cc_r < 0;
4983 cc_z = cc_r == 0;
4984 cc_v = cc_c = 0;
4985 break;
4986 }
4987 case DIF_OP_LDGA:
4988 regs[rd] = dtrace_dif_variable(mstate, state,
4989 r1, regs[r2]);
4990 break;
4991 case DIF_OP_LDGS:
4992 id = DIF_INSTR_VAR(instr);
4993
4994 if (id >= DIF_VAR_OTHER_UBASE) {
4995 uintptr_t a;
4996
4997 id -= DIF_VAR_OTHER_UBASE;
4998 svar = vstate->dtvs_globals[id];
4999 ASSERT(svar != NULL);
5000 v = &svar->dtsv_var;
5001
5002 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
5003 regs[rd] = svar->dtsv_data;
5004 break;
5005 }
5006
5007 a = (uintptr_t)svar->dtsv_data;
5008
5009 if (*(uint8_t *)a == UINT8_MAX) {
5010 /*
5011 * If the 0th byte is set to UINT8_MAX
5012 * then this is to be treated as a
5013 * reference to a NULL variable.
5014 */
5015 regs[rd] = NULL;
5016 } else {
5017 regs[rd] = a + sizeof (uint64_t);
5018 }
5019
5020 break;
5021 }
5022
5023 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
5024 break;
5025
5026 case DIF_OP_STGS:
5027 id = DIF_INSTR_VAR(instr);
5028
5029 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5030 id -= DIF_VAR_OTHER_UBASE;
5031
5032 svar = vstate->dtvs_globals[id];
5033 ASSERT(svar != NULL);
5034 v = &svar->dtsv_var;
5035
5036 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5037 uintptr_t a = (uintptr_t)svar->dtsv_data;
5038
5039 ASSERT(a != NULL);
5040 ASSERT(svar->dtsv_size != 0);
5041
5042 if (regs[rd] == NULL) {
5043 *(uint8_t *)a = UINT8_MAX;
5044 break;
5045 } else {
5046 *(uint8_t *)a = 0;
5047 a += sizeof (uint64_t);
5048 }
5049 if (!dtrace_vcanload(
5050 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5051 mstate, vstate))
5052 break;
5053
5054 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5055 (void *)a, &v->dtdv_type);
5056 break;
5057 }
5058
5059 svar->dtsv_data = regs[rd];
5060 break;
5061
5062 case DIF_OP_LDTA:
5063 /*
5064 * There are no DTrace built-in thread-local arrays at
5065 * present. This opcode is saved for future work.
5066 */
5067 *flags |= CPU_DTRACE_ILLOP;
5068 regs[rd] = 0;
5069 break;
5070
5071 case DIF_OP_LDLS:
5072 id = DIF_INSTR_VAR(instr);
5073
5074 if (id < DIF_VAR_OTHER_UBASE) {
5075 /*
5076 * For now, this has no meaning.
5077 */
5078 regs[rd] = 0;
5079 break;
5080 }
5081
5082 id -= DIF_VAR_OTHER_UBASE;
5083
5084 ASSERT(VBDTCAST(int64_t)id < vstate->dtvs_nlocals);
5085 ASSERT(vstate->dtvs_locals != NULL);
5086
5087 svar = vstate->dtvs_locals[id];
5088 ASSERT(svar != NULL);
5089 v = &svar->dtsv_var;
5090
5091 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5092 uintptr_t a = (uintptr_t)svar->dtsv_data;
5093 size_t sz = v->dtdv_type.dtdt_size;
5094
5095 sz += sizeof (uint64_t);
5096 ASSERT(svar->dtsv_size == NCPU * sz);
5097 a += VBDT_GET_CPUID() * sz;
5098
5099 if (*(uint8_t *)a == UINT8_MAX) {
5100 /*
5101 * If the 0th byte is set to UINT8_MAX
5102 * then this is to be treated as a
5103 * reference to a NULL variable.
5104 */
5105 regs[rd] = NULL;
5106 } else {
5107 regs[rd] = a + sizeof (uint64_t);
5108 }
5109
5110 break;
5111 }
5112
5113 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5114 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5115 regs[rd] = tmp[VBDT_GET_CPUID()];
5116 break;
5117
5118 case DIF_OP_STLS:
5119 id = DIF_INSTR_VAR(instr);
5120
5121 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5122 id -= DIF_VAR_OTHER_UBASE;
5123 ASSERT(VBDTCAST(int64_t)id < vstate->dtvs_nlocals);
5124
5125 ASSERT(vstate->dtvs_locals != NULL);
5126 svar = vstate->dtvs_locals[id];
5127 ASSERT(svar != NULL);
5128 v = &svar->dtsv_var;
5129
5130 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5131 uintptr_t a = (uintptr_t)svar->dtsv_data;
5132 size_t sz = v->dtdv_type.dtdt_size;
5133
5134 sz += sizeof (uint64_t);
5135 ASSERT(svar->dtsv_size == NCPU * sz);
5136 a += VBDT_GET_CPUID() * sz;
5137
5138 if (regs[rd] == NULL) {
5139 *(uint8_t *)a = UINT8_MAX;
5140 break;
5141 } else {
5142 *(uint8_t *)a = 0;
5143 a += sizeof (uint64_t);
5144 }
5145
5146 if (!dtrace_vcanload(
5147 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5148 mstate, vstate))
5149 break;
5150
5151 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5152 (void *)a, &v->dtdv_type);
5153 break;
5154 }
5155
5156 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
5157 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
5158 tmp[VBDT_GET_CPUID()] = regs[rd];
5159 break;
5160
5161 case DIF_OP_LDTS: {
5162 dtrace_dynvar_t *dvar;
5163 dtrace_key_t *key;
5164
5165 id = DIF_INSTR_VAR(instr);
5166 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5167 id -= DIF_VAR_OTHER_UBASE;
5168 v = &vstate->dtvs_tlocals[id];
5169
5170 key = &tupregs[DIF_DTR_NREGS];
5171 key[0].dttk_value = (uint64_t)id;
5172 key[0].dttk_size = 0;
5173 DTRACE_TLS_THRKEY(key[1].dttk_value);
5174 key[1].dttk_size = 0;
5175
5176 dvar = dtrace_dynvar(dstate, 2, key,
5177 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
5178 mstate, vstate);
5179
5180 if (dvar == NULL) {
5181 regs[rd] = 0;
5182 break;
5183 }
5184
5185 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5186 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5187 } else {
5188 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5189 }
5190
5191 break;
5192 }
5193
5194 case DIF_OP_STTS: {
5195 dtrace_dynvar_t *dvar;
5196 dtrace_key_t *key;
5197
5198 id = DIF_INSTR_VAR(instr);
5199 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5200 id -= DIF_VAR_OTHER_UBASE;
5201
5202 key = &tupregs[DIF_DTR_NREGS];
5203 key[0].dttk_value = (uint64_t)id;
5204 key[0].dttk_size = 0;
5205 DTRACE_TLS_THRKEY(key[1].dttk_value);
5206 key[1].dttk_size = 0;
5207 v = &vstate->dtvs_tlocals[id];
5208
5209 dvar = dtrace_dynvar(dstate, 2, key,
5210 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5211 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5212 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5213 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5214
5215 /*
5216 * Given that we're storing to thread-local data,
5217 * we need to flush our predicate cache.
5218 */
5219 curthread->t_predcache = NULL;
5220
5221 if (dvar == NULL)
5222 break;
5223
5224 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5225 if (!dtrace_vcanload(
5226 (void *)(uintptr_t)regs[rd],
5227 &v->dtdv_type, mstate, vstate))
5228 break;
5229
5230 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5231 dvar->dtdv_data, &v->dtdv_type);
5232 } else {
5233 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5234 }
5235
5236 break;
5237 }
5238
5239 case DIF_OP_SRA:
5240 regs[rd] = (int64_t)regs[r1] >> regs[r2];
5241 break;
5242
5243 case DIF_OP_CALL:
5244 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
5245 regs, tupregs, ttop, mstate, state);
5246 break;
5247
5248 case DIF_OP_PUSHTR:
5249 if (ttop == DIF_DTR_NREGS) {
5250 *flags |= CPU_DTRACE_TUPOFLOW;
5251 break;
5252 }
5253
5254 if (r1 == DIF_TYPE_STRING) {
5255 /*
5256 * If this is a string type and the size is 0,
5257 * we'll use the system-wide default string
5258 * size. Note that we are _not_ looking at
5259 * the value of the DTRACEOPT_STRSIZE option;
5260 * had this been set, we would expect to have
5261 * a non-zero size value in the "pushtr".
5262 */
5263 tupregs[ttop].dttk_size =
5264 dtrace_strlen((char *)(uintptr_t)regs[rd],
5265 regs[r2] ? regs[r2] :
5266 dtrace_strsize_default) + 1;
5267 } else {
5268 tupregs[ttop].dttk_size = regs[r2];
5269 }
5270
5271 tupregs[ttop++].dttk_value = regs[rd];
5272 break;
5273
5274 case DIF_OP_PUSHTV:
5275 if (ttop == DIF_DTR_NREGS) {
5276 *flags |= CPU_DTRACE_TUPOFLOW;
5277 break;
5278 }
5279
5280 tupregs[ttop].dttk_value = regs[rd];
5281 tupregs[ttop++].dttk_size = 0;
5282 break;
5283
5284 case DIF_OP_POPTS:
5285 if (ttop != 0)
5286 ttop--;
5287 break;
5288
5289 case DIF_OP_FLUSHTS:
5290 ttop = 0;
5291 break;
5292
5293 case DIF_OP_LDGAA:
5294 case DIF_OP_LDTAA: {
5295 dtrace_dynvar_t *dvar;
5296 dtrace_key_t *key = tupregs;
5297 uint_t nkeys = ttop;
5298
5299 id = DIF_INSTR_VAR(instr);
5300 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5301 id -= DIF_VAR_OTHER_UBASE;
5302
5303 key[nkeys].dttk_value = (uint64_t)id;
5304 key[nkeys++].dttk_size = 0;
5305
5306 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
5307 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5308 key[nkeys++].dttk_size = 0;
5309 v = &vstate->dtvs_tlocals[id];
5310 } else {
5311 v = &vstate->dtvs_globals[id]->dtsv_var;
5312 }
5313
5314 dvar = dtrace_dynvar(dstate, nkeys, key,
5315 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5316 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5317 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
5318
5319 if (dvar == NULL) {
5320 regs[rd] = 0;
5321 break;
5322 }
5323
5324 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5325 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5326 } else {
5327 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5328 }
5329
5330 break;
5331 }
5332
5333 case DIF_OP_STGAA:
5334 case DIF_OP_STTAA: {
5335 dtrace_dynvar_t *dvar;
5336 dtrace_key_t *key = tupregs;
5337 uint_t nkeys = ttop;
5338
5339 id = DIF_INSTR_VAR(instr);
5340 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5341 id -= DIF_VAR_OTHER_UBASE;
5342
5343 key[nkeys].dttk_value = (uint64_t)id;
5344 key[nkeys++].dttk_size = 0;
5345
5346 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
5347 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5348 key[nkeys++].dttk_size = 0;
5349 v = &vstate->dtvs_tlocals[id];
5350 } else {
5351 v = &vstate->dtvs_globals[id]->dtsv_var;
5352 }
5353
5354 dvar = dtrace_dynvar(dstate, nkeys, key,
5355 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5356 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5357 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5358 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5359
5360 if (dvar == NULL)
5361 break;
5362
5363 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5364 if (!dtrace_vcanload(
5365 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5366 mstate, vstate))
5367 break;
5368
5369 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5370 dvar->dtdv_data, &v->dtdv_type);
5371 } else {
5372 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5373 }
5374
5375 break;
5376 }
5377
5378 case DIF_OP_ALLOCS: {
5379 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5380 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
5381
5382 /*
5383 * Rounding up the user allocation size could have
5384 * overflowed large, bogus allocations (like -1ULL) to
5385 * 0.
5386 */
5387 if (size < regs[r1] ||
5388 !DTRACE_INSCRATCH(mstate, size)) {
5389 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5390 regs[rd] = NULL;
5391 break;
5392 }
5393
5394 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
5395 mstate->dtms_scratch_ptr += size;
5396 regs[rd] = ptr;
5397 break;
5398 }
5399
5400 case DIF_OP_COPYS:
5401 if (!dtrace_canstore(regs[rd], regs[r2],
5402 mstate, vstate)) {
5403 *flags |= CPU_DTRACE_BADADDR;
5404 *illval = regs[rd];
5405 break;
5406 }
5407
5408 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
5409 break;
5410
5411 dtrace_bcopy((void *)(uintptr_t)regs[r1],
5412 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
5413 break;
5414
5415 case DIF_OP_STB:
5416 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
5417 *flags |= CPU_DTRACE_BADADDR;
5418 *illval = regs[rd];
5419 break;
5420 }
5421 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
5422 break;
5423
5424 case DIF_OP_STH:
5425 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
5426 *flags |= CPU_DTRACE_BADADDR;
5427 *illval = regs[rd];
5428 break;
5429 }
5430 if (regs[rd] & 1) {
5431 *flags |= CPU_DTRACE_BADALIGN;
5432 *illval = regs[rd];
5433 break;
5434 }
5435 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
5436 break;
5437
5438 case DIF_OP_STW:
5439 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
5440 *flags |= CPU_DTRACE_BADADDR;
5441 *illval = regs[rd];
5442 break;
5443 }
5444 if (regs[rd] & 3) {
5445 *flags |= CPU_DTRACE_BADALIGN;
5446 *illval = regs[rd];
5447 break;
5448 }
5449 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
5450 break;
5451
5452 case DIF_OP_STX:
5453 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
5454 *flags |= CPU_DTRACE_BADADDR;
5455 *illval = regs[rd];
5456 break;
5457 }
5458 if (regs[rd] & 7) {
5459 *flags |= CPU_DTRACE_BADALIGN;
5460 *illval = regs[rd];
5461 break;
5462 }
5463 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
5464 break;
5465 }
5466 }
5467
5468 if (!(*flags & CPU_DTRACE_FAULT))
5469 return (rval);
5470
5471 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
5472 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
5473
5474 return (0);
5475}
5476
5477#ifndef VBOX /* no destructive stuff */
5478
5479static void
5480dtrace_action_breakpoint(dtrace_ecb_t *ecb)
5481{
5482 dtrace_probe_t *probe = ecb->dte_probe;
5483 dtrace_provider_t *prov = probe->dtpr_provider;
5484 char c[DTRACE_FULLNAMELEN + 80], *str;
5485 char *msg = "dtrace: breakpoint action at probe ";
5486 char *ecbmsg = " (ecb ";
5487 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
5488 uintptr_t val = (uintptr_t)ecb;
5489 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
5490
5491 if (dtrace_destructive_disallow)
5492 return;
5493
5494 /*
5495 * It's impossible to be taking action on the NULL probe.
5496 */
5497 ASSERT(probe != NULL);
5498
5499 /*
5500 * This is a poor man's (destitute man's?) sprintf(): we want to
5501 * print the provider name, module name, function name and name of
5502 * the probe, along with the hex address of the ECB with the breakpoint
5503 * action -- all of which we must place in the character buffer by
5504 * hand.
5505 */
5506 while (*msg != '\0')
5507 c[i++] = *msg++;
5508
5509 for (str = prov->dtpv_name; *str != '\0'; str++)
5510 c[i++] = *str;
5511 c[i++] = ':';
5512
5513 for (str = probe->dtpr_mod; *str != '\0'; str++)
5514 c[i++] = *str;
5515 c[i++] = ':';
5516
5517 for (str = probe->dtpr_func; *str != '\0'; str++)
5518 c[i++] = *str;
5519 c[i++] = ':';
5520
5521 for (str = probe->dtpr_name; *str != '\0'; str++)
5522 c[i++] = *str;
5523
5524 while (*ecbmsg != '\0')
5525 c[i++] = *ecbmsg++;
5526
5527 while (shift >= 0) {
5528 mask = (uintptr_t)0xf << shift;
5529
5530 if (val >= ((uintptr_t)1 << shift))
5531 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
5532 shift -= 4;
5533 }
5534
5535 c[i++] = ')';
5536 c[i] = '\0';
5537
5538 debug_enter(c);
5539}
5540
5541static void
5542dtrace_action_panic(dtrace_ecb_t *ecb)
5543{
5544 dtrace_probe_t *probe = ecb->dte_probe;
5545
5546 /*
5547 * It's impossible to be taking action on the NULL probe.
5548 */
5549 ASSERT(probe != NULL);
5550
5551 if (dtrace_destructive_disallow)
5552 return;
5553
5554 if (dtrace_panicked != NULL)
5555 return;
5556
5557 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
5558 return;
5559
5560 /*
5561 * We won the right to panic. (We want to be sure that only one
5562 * thread calls panic() from dtrace_probe(), and that panic() is
5563 * called exactly once.)
5564 */
5565 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
5566 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
5567 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
5568}
5569
5570static void
5571dtrace_action_raise(uint64_t sig)
5572{
5573 if (dtrace_destructive_disallow)
5574 return;
5575
5576 if (sig >= NSIG) {
5577 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5578 return;
5579 }
5580
5581 /*
5582 * raise() has a queue depth of 1 -- we ignore all subsequent
5583 * invocations of the raise() action.
5584 */
5585 if (curthread->t_dtrace_sig == 0)
5586 curthread->t_dtrace_sig = (uint8_t)sig;
5587
5588 curthread->t_sig_check = 1;
5589 aston(curthread);
5590}
5591
5592static void
5593dtrace_action_stop(void)
5594{
5595 if (dtrace_destructive_disallow)
5596 return;
5597
5598 if (!curthread->t_dtrace_stop) {
5599 curthread->t_dtrace_stop = 1;
5600 curthread->t_sig_check = 1;
5601 aston(curthread);
5602 }
5603}
5604
5605static void
5606dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
5607{
5608 hrtime_t now;
5609 volatile uint16_t *flags;
5610 cpu_t *cpu = CPU;
5611
5612 if (dtrace_destructive_disallow)
5613 return;
5614
5615 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
5616
5617 now = dtrace_gethrtime();
5618
5619 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
5620 /*
5621 * We need to advance the mark to the current time.
5622 */
5623 cpu->cpu_dtrace_chillmark = now;
5624 cpu->cpu_dtrace_chilled = 0;
5625 }
5626
5627 /*
5628 * Now check to see if the requested chill time would take us over
5629 * the maximum amount of time allowed in the chill interval. (Or
5630 * worse, if the calculation itself induces overflow.)
5631 */
5632 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
5633 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
5634 *flags |= CPU_DTRACE_ILLOP;
5635 return;
5636 }
5637
5638 while (dtrace_gethrtime() - now < val)
5639 continue;
5640
5641 /*
5642 * Normally, we assure that the value of the variable "timestamp" does
5643 * not change within an ECB. The presence of chill() represents an
5644 * exception to this rule, however.
5645 */
5646 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
5647 cpu->cpu_dtrace_chilled += val;
5648}
5649
5650#endif /* !VBOX */
5651
5652static void
5653dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
5654 uint64_t *buf, uint64_t arg)
5655{
5656 int nframes = DTRACE_USTACK_NFRAMES(arg);
5657 int strsize = DTRACE_USTACK_STRSIZE(arg);
5658 uint64_t *pcs = &buf[1], *fps;
5659 char *str = (char *)&pcs[nframes];
5660 int size, offs = 0, i, j;
5661 uintptr_t old = mstate->dtms_scratch_ptr, saved;
5662#ifndef VBOX
5663 uint16_t *flags = &cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags;
5664#else
5665 uint16_t volatile *flags = &cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags;
5666#endif
5667 char *sym;
5668
5669 /*
5670 * Should be taking a faster path if string space has not been
5671 * allocated.
5672 */
5673 ASSERT(strsize != 0);
5674
5675 /*
5676 * We will first allocate some temporary space for the frame pointers.
5677 */
5678 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5679 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
5680 (nframes * sizeof (uint64_t));
5681
5682 if (!DTRACE_INSCRATCH(mstate, VBDTCAST(unsigned)size)) {
5683 /*
5684 * Not enough room for our frame pointers -- need to indicate
5685 * that we ran out of scratch space.
5686 */
5687 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5688 return;
5689 }
5690
5691 mstate->dtms_scratch_ptr += size;
5692 saved = mstate->dtms_scratch_ptr;
5693
5694 /*
5695 * Now get a stack with both program counters and frame pointers.
5696 */
5697 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5698 dtrace_getufpstack(buf, fps, nframes + 1);
5699 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5700
5701 /*
5702 * If that faulted, we're cooked.
5703 */
5704 if (*flags & CPU_DTRACE_FAULT)
5705 goto out;
5706
5707 /*
5708 * Now we want to walk up the stack, calling the USTACK helper. For
5709 * each iteration, we restore the scratch pointer.
5710 */
5711 for (i = 0; i < nframes; i++) {
5712 mstate->dtms_scratch_ptr = saved;
5713
5714 if (offs >= strsize)
5715 break;
5716
5717#ifndef VBOX
5718 sym = (char *)(uintptr_t)dtrace_helper(
5719 DTRACE_HELPER_ACTION_USTACK,
5720 mstate, state, pcs[i], fps[i]);
5721#else
5722 sym = NULL;
5723#endif
5724
5725 /*
5726 * If we faulted while running the helper, we're going to
5727 * clear the fault and null out the corresponding string.
5728 */
5729 if (*flags & CPU_DTRACE_FAULT) {
5730 *flags &= ~CPU_DTRACE_FAULT;
5731 str[offs++] = '\0';
5732 continue;
5733 }
5734
5735 if (sym == NULL) {
5736 str[offs++] = '\0';
5737 continue;
5738 }
5739
5740 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5741
5742 /*
5743 * Now copy in the string that the helper returned to us.
5744 */
5745 for (j = 0; offs + j < strsize; j++) {
5746 if ((str[offs + j] = sym[j]) == '\0')
5747 break;
5748 }
5749
5750 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5751
5752 offs += j + 1;
5753 }
5754
5755 if (offs >= strsize) {
5756 /*
5757 * If we didn't have room for all of the strings, we don't
5758 * abort processing -- this needn't be a fatal error -- but we
5759 * still want to increment a counter (dts_stkstroverflows) to
5760 * allow this condition to be warned about. (If this is from
5761 * a jstack() action, it is easily tuned via jstackstrsize.)
5762 */
5763 dtrace_error(&state->dts_stkstroverflows);
5764 }
5765
5766 while (offs < strsize)
5767 str[offs++] = '\0';
5768
5769out:
5770 mstate->dtms_scratch_ptr = old;
5771}
5772
5773#ifdef VBOX
5774extern void dtrace_probe6(dtrace_id_t, uintptr_t arg0, uintptr_t arg1,
5775 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4, uintptr_t arg5);
5776# define dtrace_probe_error(a1, a2, a3, a4, a5, a6) \
5777 dtrace_probe6(dtrace_probeid_error, (uintptr_t)a1, a2, a3, a4, a5, a6)
5778#endif
5779
5780/*
5781 * If you're looking for the epicenter of DTrace, you just found it. This
5782 * is the function called by the provider to fire a probe -- from which all
5783 * subsequent probe-context DTrace activity emanates.
5784 */
5785void
5786dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
5787 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
5788{
5789 processorid_t cpuid;
5790 dtrace_icookie_t cookie;
5791 dtrace_probe_t *probe;
5792 dtrace_mstate_t mstate;
5793 dtrace_ecb_t *ecb;
5794 dtrace_action_t *act;
5795 intptr_t offs;
5796 size_t size;
5797 int vtime, onintr;
5798 volatile uint16_t *flags;
5799 hrtime_t now;
5800
5801#ifndef VBOX
5802 /*
5803 * Kick out immediately if this CPU is still being born (in which case
5804 * curthread will be set to -1) or the current thread can't allow
5805 * probes in its current context.
5806 */
5807 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
5808 return;
5809#endif
5810
5811 cookie = dtrace_interrupt_disable();
5812 probe = dtrace_probes[id - 1];
5813 cpuid = VBDT_GET_CPUID();
5814 onintr = CPU_ON_INTR(CPU);
5815
5816 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
5817 probe->dtpr_predcache == curthread->t_predcache) {
5818 /*
5819 * We have hit in the predicate cache; we know that
5820 * this predicate would evaluate to be false.
5821 */
5822 dtrace_interrupt_enable(cookie);
5823 return;
5824 }
5825
5826#ifndef VBOX
5827 if (panic_quiesce) {
5828 /*
5829 * We don't trace anything if we're panicking.
5830 */
5831 dtrace_interrupt_enable(cookie);
5832 return;
5833 }
5834#endif
5835
5836 now = dtrace_gethrtime();
5837 vtime = dtrace_vtime_references != 0;
5838
5839 if (vtime && curthread->t_dtrace_start)
5840 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
5841
5842 mstate.dtms_difo = NULL;
5843 mstate.dtms_probe = probe;
5844 mstate.dtms_strtok = NULL;
5845 mstate.dtms_arg[0] = arg0;
5846 mstate.dtms_arg[1] = arg1;
5847 mstate.dtms_arg[2] = arg2;
5848 mstate.dtms_arg[3] = arg3;
5849 mstate.dtms_arg[4] = arg4;
5850
5851 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
5852
5853 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
5854 dtrace_predicate_t *pred = ecb->dte_predicate;
5855 dtrace_state_t *state = ecb->dte_state;
5856 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
5857 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
5858 dtrace_vstate_t *vstate = &state->dts_vstate;
5859 dtrace_provider_t *prov = probe->dtpr_provider;
5860 int committed = 0;
5861 caddr_t tomax;
5862
5863 /*
5864 * A little subtlety with the following (seemingly innocuous)
5865 * declaration of the automatic 'val': by looking at the
5866 * code, you might think that it could be declared in the
5867 * action processing loop, below. (That is, it's only used in
5868 * the action processing loop.) However, it must be declared
5869 * out of that scope because in the case of DIF expression
5870 * arguments to aggregating actions, one iteration of the
5871 * action loop will use the last iteration's value.
5872 */
5873#ifdef lint
5874 uint64_t val = 0;
5875#else
5876 uint64_t val VBDTUNASS(0);
5877#endif
5878
5879 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
5880 *flags &= ~CPU_DTRACE_ERROR;
5881
5882 if (prov == dtrace_provider) {
5883 /*
5884 * If dtrace itself is the provider of this probe,
5885 * we're only going to continue processing the ECB if
5886 * arg0 (the dtrace_state_t) is equal to the ECB's
5887 * creating state. (This prevents disjoint consumers
5888 * from seeing one another's metaprobes.)
5889 */
5890 if (arg0 != (uint64_t)(uintptr_t)state)
5891 continue;
5892 }
5893
5894 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
5895 /*
5896 * We're not currently active. If our provider isn't
5897 * the dtrace pseudo provider, we're not interested.
5898 */
5899 if (prov != dtrace_provider)
5900 continue;
5901
5902 /*
5903 * Now we must further check if we are in the BEGIN
5904 * probe. If we are, we will only continue processing
5905 * if we're still in WARMUP -- if one BEGIN enabling
5906 * has invoked the exit() action, we don't want to
5907 * evaluate subsequent BEGIN enablings.
5908 */
5909 if (probe->dtpr_id == dtrace_probeid_begin &&
5910 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
5911 ASSERT(state->dts_activity ==
5912 DTRACE_ACTIVITY_DRAINING);
5913 continue;
5914 }
5915 }
5916
5917 if (ecb->dte_cond) {
5918 /*
5919 * If the dte_cond bits indicate that this
5920 * consumer is only allowed to see user-mode firings
5921 * of this probe, call the provider's dtps_usermode()
5922 * entry point to check that the probe was fired
5923 * while in a user context. Skip this ECB if that's
5924 * not the case.
5925 */
5926 if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
5927 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
5928 probe->dtpr_id, probe->dtpr_arg) == 0)
5929 continue;
5930
5931 /*
5932 * This is more subtle than it looks. We have to be
5933 * absolutely certain that CRED() isn't going to
5934 * change out from under us so it's only legit to
5935 * examine that structure if we're in constrained
5936 * situations. Currently, the only times we'll this
5937 * check is if a non-super-user has enabled the
5938 * profile or syscall providers -- providers that
5939 * allow visibility of all processes. For the
5940 * profile case, the check above will ensure that
5941 * we're examining a user context.
5942 */
5943 if (ecb->dte_cond & DTRACE_COND_OWNER) {
5944 cred_t *cr;
5945 cred_t *s_cr =
5946 ecb->dte_state->dts_cred.dcr_cred;
5947 proc_t *proc;
5948
5949 ASSERT(s_cr != NULL);
5950
5951 if ((cr = CRED()) == NULL ||
5952 s_cr->cr_uid != cr->cr_uid ||
5953 s_cr->cr_uid != cr->cr_ruid ||
5954 s_cr->cr_uid != cr->cr_suid ||
5955 s_cr->cr_gid != cr->cr_gid ||
5956 s_cr->cr_gid != cr->cr_rgid ||
5957 s_cr->cr_gid != cr->cr_sgid ||
5958#ifndef VBOX
5959 (proc = VBDT_GET_PROC()) == NULL ||
5960 (proc->p_flag & SNOCD))
5961#else
5962 0)
5963
5964#endif
5965 continue;
5966 }
5967
5968#ifndef VBOX
5969 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
5970 cred_t *cr;
5971 cred_t *s_cr =
5972 ecb->dte_state->dts_cred.dcr_cred;
5973
5974 ASSERT(s_cr != NULL);
5975
5976 if ((cr = CRED()) == NULL ||
5977 s_cr->cr_zone->zone_id !=
5978 cr->cr_zone->zone_id)
5979 continue;
5980 }
5981#endif
5982 }
5983
5984 if (now - state->dts_alive > dtrace_deadman_timeout) {
5985 /*
5986 * We seem to be dead. Unless we (a) have kernel
5987 * destructive permissions (b) have expicitly enabled
5988 * destructive actions and (c) destructive actions have
5989 * not been disabled, we're going to transition into
5990 * the KILLED state, from which no further processing
5991 * on this state will be performed.
5992 */
5993 if (!dtrace_priv_kernel_destructive(state) ||
5994 !state->dts_cred.dcr_destructive ||
5995 dtrace_destructive_disallow) {
5996 void *activity = &state->dts_activity;
5997 dtrace_activity_t current;
5998
5999 do {
6000 current = state->dts_activity;
6001 } while (dtrace_cas32(activity, current,
6002 DTRACE_ACTIVITY_KILLED) != current);
6003
6004 continue;
6005 }
6006 }
6007
6008 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
6009 ecb->dte_alignment, state, &mstate)) < 0)
6010 continue;
6011
6012 tomax = buf->dtb_tomax;
6013 ASSERT(tomax != NULL);
6014
6015 if (ecb->dte_size != 0)
6016 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid);
6017
6018 mstate.dtms_epid = ecb->dte_epid;
6019 mstate.dtms_present |= DTRACE_MSTATE_EPID;
6020
6021 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
6022 mstate.dtms_access = DTRACE_ACCESS_KERNEL;
6023 else
6024 mstate.dtms_access = 0;
6025
6026 if (pred != NULL) {
6027 dtrace_difo_t *dp = pred->dtp_difo;
6028 int rval;
6029
6030 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
6031
6032 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
6033 dtrace_cacheid_t cid = probe->dtpr_predcache;
6034
6035 if (cid != DTRACE_CACHEIDNONE && !onintr) {
6036 /*
6037 * Update the predicate cache...
6038 */
6039 ASSERT(cid == pred->dtp_cacheid);
6040 curthread->t_predcache = cid;
6041 }
6042
6043 continue;
6044 }
6045 }
6046
6047 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
6048 act != NULL; act = act->dta_next) {
6049 size_t valoffs;
6050 dtrace_difo_t *dp;
6051 dtrace_recdesc_t *rec = &act->dta_rec;
6052
6053 size = rec->dtrd_size;
6054 valoffs = offs + rec->dtrd_offset;
6055
6056 if (DTRACEACT_ISAGG(act->dta_kind)) {
6057 uint64_t v = 0xbad;
6058 dtrace_aggregation_t *agg;
6059
6060 agg = (dtrace_aggregation_t *)act;
6061
6062 if ((dp = act->dta_difo) != NULL)
6063 v = dtrace_dif_emulate(dp,
6064 &mstate, vstate, state);
6065
6066 if (*flags & CPU_DTRACE_ERROR)
6067 continue;
6068
6069 /*
6070 * Note that we always pass the expression
6071 * value from the previous iteration of the
6072 * action loop. This value will only be used
6073 * if there is an expression argument to the
6074 * aggregating action, denoted by the
6075 * dtag_hasarg field.
6076 */
6077 dtrace_aggregate(agg, buf,
6078 offs, aggbuf, v, val);
6079 continue;
6080 }
6081
6082 switch (act->dta_kind) {
6083 case DTRACEACT_STOP:
6084#ifndef VBOX
6085 if (dtrace_priv_proc_destructive(state))
6086 dtrace_action_stop();
6087#else
6088 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6089#endif
6090 continue;
6091
6092 case DTRACEACT_BREAKPOINT:
6093#ifndef VBOX
6094 if (dtrace_priv_kernel_destructive(state))
6095 dtrace_action_breakpoint(ecb);
6096#else
6097 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6098#endif
6099 continue;
6100
6101 case DTRACEACT_PANIC:
6102#ifndef VBOX
6103 if (dtrace_priv_kernel_destructive(state))
6104 dtrace_action_panic(ecb);
6105#endif
6106 continue;
6107
6108 case DTRACEACT_STACK:
6109 if (!dtrace_priv_kernel(state))
6110 continue;
6111
6112 dtrace_getpcstack((pc_t *)(tomax + valoffs),
6113 VBDTCAST(int)(size / sizeof (pc_t)), probe->dtpr_aframes,
6114 DTRACE_ANCHORED(probe) ? NULL :
6115 (uint32_t *)arg0);
6116
6117 continue;
6118
6119 case DTRACEACT_JSTACK:
6120 case DTRACEACT_USTACK:
6121 if (!dtrace_priv_proc(state))
6122 continue;
6123
6124 /*
6125 * See comment in DIF_VAR_PID.
6126 */
6127 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
6128 CPU_ON_INTR(CPU)) {
6129 int depth = DTRACE_USTACK_NFRAMES(
6130 rec->dtrd_arg) + 1;
6131
6132 dtrace_bzero((void *)(tomax + valoffs),
6133 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
6134 + depth * sizeof (uint64_t));
6135
6136 continue;
6137 }
6138
6139#ifndef VBOX /* no helpers */
6140 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
6141 curproc->p_dtrace_helpers != NULL) {
6142 /*
6143 * This is the slow path -- we have
6144 * allocated string space, and we're
6145 * getting the stack of a process that
6146 * has helpers. Call into a separate
6147 * routine to perform this processing.
6148 */
6149 dtrace_action_ustack(&mstate, state,
6150 (uint64_t *)(tomax + valoffs),
6151 rec->dtrd_arg);
6152 continue;
6153 }
6154#endif
6155
6156 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6157 dtrace_getupcstack((uint64_t *)
6158 (tomax + valoffs),
6159 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
6160 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6161 continue;
6162
6163 default:
6164 break;
6165 }
6166
6167 dp = act->dta_difo;
6168 ASSERT(dp != NULL);
6169
6170 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
6171
6172 if (*flags & CPU_DTRACE_ERROR)
6173 continue;
6174
6175 switch (act->dta_kind) {
6176 case DTRACEACT_SPECULATE:
6177 ASSERT(buf == &state->dts_buffer[cpuid]);
6178 buf = dtrace_speculation_buffer(state,
6179 cpuid, val);
6180
6181 if (buf == NULL) {
6182 *flags |= CPU_DTRACE_DROP;
6183 continue;
6184 }
6185
6186 offs = dtrace_buffer_reserve(buf,
6187 ecb->dte_needed, ecb->dte_alignment,
6188 state, NULL);
6189
6190 if (offs < 0) {
6191 *flags |= CPU_DTRACE_DROP;
6192 continue;
6193 }
6194
6195 tomax = buf->dtb_tomax;
6196 ASSERT(tomax != NULL);
6197
6198 if (ecb->dte_size != 0)
6199 DTRACE_STORE(uint32_t, tomax, offs,
6200 ecb->dte_epid);
6201 continue;
6202
6203 case DTRACEACT_CHILL:
6204#ifndef VBOX
6205 if (dtrace_priv_kernel_destructive(state))
6206 dtrace_action_chill(&mstate, val);
6207#else
6208 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6209#endif
6210 continue;
6211
6212 case DTRACEACT_RAISE:
6213#ifndef VBOX
6214 if (dtrace_priv_proc_destructive(state))
6215 dtrace_action_raise(val);
6216#else
6217 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6218#endif
6219 continue;
6220
6221 case DTRACEACT_COMMIT:
6222 ASSERT(!committed);
6223
6224 /*
6225 * We need to commit our buffer state.
6226 */
6227 if (ecb->dte_size)
6228 buf->dtb_offset = offs + ecb->dte_size;
6229 buf = &state->dts_buffer[cpuid];
6230 dtrace_speculation_commit(state, cpuid, val);
6231 committed = 1;
6232 continue;
6233
6234 case DTRACEACT_DISCARD:
6235 dtrace_speculation_discard(state, cpuid, val);
6236 continue;
6237
6238 case DTRACEACT_DIFEXPR:
6239 case DTRACEACT_LIBACT:
6240 case DTRACEACT_PRINTF:
6241 case DTRACEACT_PRINTA:
6242 case DTRACEACT_SYSTEM:
6243 case DTRACEACT_FREOPEN:
6244 break;
6245
6246 case DTRACEACT_SYM:
6247 case DTRACEACT_MOD:
6248 if (!dtrace_priv_kernel(state))
6249 continue;
6250 break;
6251
6252 case DTRACEACT_USYM:
6253 case DTRACEACT_UMOD:
6254 case DTRACEACT_UADDR: {
6255#ifndef VBOX
6256 struct pid *pid = curthread->t_procp->p_pidp;
6257
6258 if (!dtrace_priv_proc(state))
6259 continue;
6260
6261 DTRACE_STORE(uint64_t, tomax,
6262 valoffs, (uint64_t)pid->pid_id);
6263 DTRACE_STORE(uint64_t, tomax,
6264 valoffs + sizeof (uint64_t), val);
6265#else
6266 DTRACE_CPUFLAG_SET(CPU_DTRACE_UPRIV);
6267#endif
6268 continue;
6269 }
6270
6271 case DTRACEACT_EXIT: {
6272 /*
6273 * For the exit action, we are going to attempt
6274 * to atomically set our activity to be
6275 * draining. If this fails (either because
6276 * another CPU has beat us to the exit action,
6277 * or because our current activity is something
6278 * other than ACTIVE or WARMUP), we will
6279 * continue. This assures that the exit action
6280 * can be successfully recorded at most once
6281 * when we're in the ACTIVE state. If we're
6282 * encountering the exit() action while in
6283 * COOLDOWN, however, we want to honor the new
6284 * status code. (We know that we're the only
6285 * thread in COOLDOWN, so there is no race.)
6286 */
6287 void *activity = &state->dts_activity;
6288 dtrace_activity_t current = state->dts_activity;
6289
6290 if (current == DTRACE_ACTIVITY_COOLDOWN)
6291 break;
6292
6293 if (current != DTRACE_ACTIVITY_WARMUP)
6294 current = DTRACE_ACTIVITY_ACTIVE;
6295
6296 if (dtrace_cas32(activity, current,
6297 DTRACE_ACTIVITY_DRAINING) != current) {
6298 *flags |= CPU_DTRACE_DROP;
6299 continue;
6300 }
6301
6302 break;
6303 }
6304
6305 default:
6306#ifndef VBOX
6307 ASSERT(0);
6308#else
6309 AssertFatalMsgFailed(("%d\n", act->dta_kind));
6310#endif
6311 }
6312
6313 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) {
6314 uintptr_t end = valoffs + size;
6315
6316 if (!dtrace_vcanload((void *)(uintptr_t)val,
6317 &dp->dtdo_rtype, &mstate, vstate))
6318 continue;
6319
6320 /*
6321 * If this is a string, we're going to only
6322 * load until we find the zero byte -- after
6323 * which we'll store zero bytes.
6324 */
6325 if (dp->dtdo_rtype.dtdt_kind ==
6326 DIF_TYPE_STRING) {
6327 char c = '\0' + 1;
6328 int intuple = act->dta_intuple;
6329 size_t s;
6330
6331 for (s = 0; s < size; s++) {
6332 if (c != '\0')
6333 c = dtrace_load8(val++);
6334
6335 DTRACE_STORE(uint8_t, tomax,
6336 valoffs++, c);
6337
6338 if (c == '\0' && intuple)
6339 break;
6340 }
6341
6342 continue;
6343 }
6344
6345 while (valoffs < end) {
6346 DTRACE_STORE(uint8_t, tomax, valoffs++,
6347 dtrace_load8(val++));
6348 }
6349
6350 continue;
6351 }
6352
6353 switch (size) {
6354 case 0:
6355 break;
6356
6357 case sizeof (uint8_t):
6358 DTRACE_STORE(uint8_t, tomax, valoffs, val);
6359 break;
6360 case sizeof (uint16_t):
6361 DTRACE_STORE(uint16_t, tomax, valoffs, val);
6362 break;
6363 case sizeof (uint32_t):
6364 DTRACE_STORE(uint32_t, tomax, valoffs, val);
6365 break;
6366 case sizeof (uint64_t):
6367 DTRACE_STORE(uint64_t, tomax, valoffs, val);
6368 break;
6369 default:
6370 /*
6371 * Any other size should have been returned by
6372 * reference, not by value.
6373 */
6374#ifndef VBOX
6375 ASSERT(0);
6376#else
6377 AssertFatalMsgFailed(("%zu\n", size));
6378#endif
6379 break;
6380 }
6381 }
6382
6383 if (*flags & CPU_DTRACE_DROP)
6384 continue;
6385
6386 if (*flags & CPU_DTRACE_FAULT) {
6387 int ndx;
6388 dtrace_action_t *err;
6389
6390 buf->dtb_errors++;
6391
6392 if (probe->dtpr_id == dtrace_probeid_error) {
6393 /*
6394 * There's nothing we can do -- we had an
6395 * error on the error probe. We bump an
6396 * error counter to at least indicate that
6397 * this condition happened.
6398 */
6399 dtrace_error(&state->dts_dblerrors);
6400 continue;
6401 }
6402
6403 if (vtime) {
6404 /*
6405 * Before recursing on dtrace_probe(), we
6406 * need to explicitly clear out our start
6407 * time to prevent it from being accumulated
6408 * into t_dtrace_vtime.
6409 */
6410 curthread->t_dtrace_start = 0;
6411 }
6412
6413 /*
6414 * Iterate over the actions to figure out which action
6415 * we were processing when we experienced the error.
6416 * Note that act points _past_ the faulting action; if
6417 * act is ecb->dte_action, the fault was in the
6418 * predicate, if it's ecb->dte_action->dta_next it's
6419 * in action #1, and so on.
6420 */
6421 for (err = ecb->dte_action, ndx = 0;
6422 err != act; err = err->dta_next, ndx++)
6423 continue;
6424
6425 dtrace_probe_error(state, ecb->dte_epid, ndx,
6426 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
6427 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
6428 cpu_core[cpuid].cpuc_dtrace_illval);
6429
6430 continue;
6431 }
6432
6433 if (!committed)
6434 buf->dtb_offset = offs + ecb->dte_size;
6435 }
6436
6437 if (vtime)
6438 curthread->t_dtrace_start = dtrace_gethrtime();
6439
6440 dtrace_interrupt_enable(cookie);
6441}
6442
6443/*
6444 * DTrace Probe Hashing Functions
6445 *
6446 * The functions in this section (and indeed, the functions in remaining
6447 * sections) are not _called_ from probe context. (Any exceptions to this are
6448 * marked with a "Note:".) Rather, they are called from elsewhere in the
6449 * DTrace framework to look-up probes in, add probes to and remove probes from
6450 * the DTrace probe hashes. (Each probe is hashed by each element of the
6451 * probe tuple -- allowing for fast lookups, regardless of what was
6452 * specified.)
6453 */
6454static uint_t
6455dtrace_hash_str(char *p)
6456{
6457 unsigned int g;
6458 uint_t hval = 0;
6459
6460 while (*p) {
6461 hval = (hval << 4) + *p++;
6462 if ((g = (hval & 0xf0000000)) != 0)
6463 hval ^= g >> 24;
6464 hval &= ~g;
6465 }
6466 return (hval);
6467}
6468
6469static dtrace_hash_t *
6470dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
6471{
6472 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
6473
6474 hash->dth_stroffs = stroffs;
6475 hash->dth_nextoffs = nextoffs;
6476 hash->dth_prevoffs = prevoffs;
6477
6478 hash->dth_size = 1;
6479 hash->dth_mask = hash->dth_size - 1;
6480
6481 hash->dth_tab = kmem_zalloc(hash->dth_size *
6482 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
6483
6484 return (hash);
6485}
6486
6487static void
6488dtrace_hash_destroy(dtrace_hash_t *hash)
6489{
6490#ifdef DEBUG
6491 int i;
6492
6493 for (i = 0; i < hash->dth_size; i++)
6494 ASSERT(hash->dth_tab[i] == NULL);
6495#endif
6496
6497 kmem_free(hash->dth_tab,
6498 hash->dth_size * sizeof (dtrace_hashbucket_t *));
6499 kmem_free(hash, sizeof (dtrace_hash_t));
6500}
6501
6502static void
6503dtrace_hash_resize(dtrace_hash_t *hash)
6504{
6505 int size = hash->dth_size, i, ndx;
6506 int new_size = hash->dth_size << 1;
6507 int new_mask = new_size - 1;
6508 dtrace_hashbucket_t **new_tab, *bucket, *next;
6509
6510 ASSERT((new_size & new_mask) == 0);
6511
6512 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
6513
6514 for (i = 0; i < size; i++) {
6515 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
6516 dtrace_probe_t *probe = bucket->dthb_chain;
6517
6518 ASSERT(probe != NULL);
6519 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
6520
6521 next = bucket->dthb_next;
6522 bucket->dthb_next = new_tab[ndx];
6523 new_tab[ndx] = bucket;
6524 }
6525 }
6526
6527 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
6528 hash->dth_tab = new_tab;
6529 hash->dth_size = new_size;
6530 hash->dth_mask = new_mask;
6531}
6532
6533static void
6534dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
6535{
6536 int hashval = DTRACE_HASHSTR(hash, new);
6537 int ndx = hashval & hash->dth_mask;
6538 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6539 dtrace_probe_t **nextp, **prevp;
6540
6541 for (; bucket != NULL; bucket = bucket->dthb_next) {
6542 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
6543 goto add;
6544 }
6545
6546 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
6547 dtrace_hash_resize(hash);
6548 dtrace_hash_add(hash, new);
6549 return;
6550 }
6551
6552 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
6553 bucket->dthb_next = hash->dth_tab[ndx];
6554 hash->dth_tab[ndx] = bucket;
6555 hash->dth_nbuckets++;
6556
6557add:
6558 nextp = DTRACE_HASHNEXT(hash, new);
6559 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
6560 *nextp = bucket->dthb_chain;
6561
6562 if (bucket->dthb_chain != NULL) {
6563 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
6564 ASSERT(*prevp == NULL);
6565 *prevp = new;
6566 }
6567
6568 bucket->dthb_chain = new;
6569 bucket->dthb_len++;
6570}
6571
6572static dtrace_probe_t *
6573dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
6574{
6575 int hashval = DTRACE_HASHSTR(hash, template);
6576 int ndx = hashval & hash->dth_mask;
6577 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6578
6579 for (; bucket != NULL; bucket = bucket->dthb_next) {
6580 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6581 return (bucket->dthb_chain);
6582 }
6583
6584 return (NULL);
6585}
6586
6587static int
6588dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
6589{
6590 int hashval = DTRACE_HASHSTR(hash, template);
6591 int ndx = hashval & hash->dth_mask;
6592 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6593
6594 for (; bucket != NULL; bucket = bucket->dthb_next) {
6595 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6596 return (bucket->dthb_len);
6597 }
6598
6599 return (NULL);
6600}
6601
6602static void
6603dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
6604{
6605 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
6606 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6607
6608 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
6609 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
6610
6611 /*
6612 * Find the bucket that we're removing this probe from.
6613 */
6614 for (; bucket != NULL; bucket = bucket->dthb_next) {
6615 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
6616 break;
6617 }
6618
6619 ASSERT(bucket != NULL);
6620
6621 if (*prevp == NULL) {
6622 if (*nextp == NULL) {
6623 /*
6624 * The removed probe was the only probe on this
6625 * bucket; we need to remove the bucket.
6626 */
6627 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
6628
6629 ASSERT(bucket->dthb_chain == probe);
6630 ASSERT(b != NULL);
6631
6632 if (b == bucket) {
6633 hash->dth_tab[ndx] = bucket->dthb_next;
6634 } else {
6635 while (b->dthb_next != bucket)
6636 b = b->dthb_next;
6637 b->dthb_next = bucket->dthb_next;
6638 }
6639
6640 ASSERT(hash->dth_nbuckets > 0);
6641 hash->dth_nbuckets--;
6642 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
6643 return;
6644 }
6645
6646 bucket->dthb_chain = *nextp;
6647 } else {
6648 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
6649 }
6650
6651 if (*nextp != NULL)
6652 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
6653}
6654
6655/*
6656 * DTrace Utility Functions
6657 *
6658 * These are random utility functions that are _not_ called from probe context.
6659 */
6660static int
6661dtrace_badattr(const dtrace_attribute_t *a)
6662{
6663 return (a->dtat_name > DTRACE_STABILITY_MAX ||
6664 a->dtat_data > DTRACE_STABILITY_MAX ||
6665 a->dtat_class > DTRACE_CLASS_MAX);
6666}
6667
6668/*
6669 * Return a duplicate copy of a string. If the specified string is NULL,
6670 * this function returns a zero-length string.
6671 */
6672static char *
6673dtrace_strdup(const char *str)
6674{
6675 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
6676
6677 if (str != NULL)
6678 (void) strcpy(new, str);
6679
6680 return (new);
6681}
6682
6683#define DTRACE_ISALPHA(c) \
6684 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
6685
6686static int
6687dtrace_badname(const char *s)
6688{
6689 char c;
6690
6691 if (s == NULL || (c = *s++) == '\0')
6692 return (0);
6693
6694 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
6695 return (1);
6696
6697 while ((c = *s++) != '\0') {
6698 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
6699 c != '-' && c != '_' && c != '.' && c != '`')
6700 return (1);
6701 }
6702
6703 return (0);
6704}
6705
6706static void
6707dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
6708{
6709 uint32_t priv;
6710
6711 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
6712 /*
6713 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
6714 */
6715 priv = DTRACE_PRIV_ALL;
6716#ifdef VBOX
6717 *uidp = ~0;
6718 *zoneidp = 0;
6719#endif
6720 } else {
6721 *uidp = crgetuid(cr);
6722 *zoneidp = crgetzoneid(cr);
6723
6724 priv = 0;
6725 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
6726 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
6727 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
6728 priv |= DTRACE_PRIV_USER;
6729 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
6730 priv |= DTRACE_PRIV_PROC;
6731 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
6732 priv |= DTRACE_PRIV_OWNER;
6733 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
6734 priv |= DTRACE_PRIV_ZONEOWNER;
6735 }
6736
6737 *privp = priv;
6738}
6739
6740#ifdef DTRACE_ERRDEBUG
6741static void
6742dtrace_errdebug(const char *str)
6743{
6744 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ;
6745 int occupied = 0;
6746
6747 mutex_enter(&dtrace_errlock);
6748 dtrace_errlast = str;
6749 dtrace_errthread = curthread;
6750
6751 while (occupied++ < DTRACE_ERRHASHSZ) {
6752 if (dtrace_errhash[hval].dter_msg == str) {
6753 dtrace_errhash[hval].dter_count++;
6754 goto out;
6755 }
6756
6757 if (dtrace_errhash[hval].dter_msg != NULL) {
6758 hval = (hval + 1) % DTRACE_ERRHASHSZ;
6759 continue;
6760 }
6761
6762 dtrace_errhash[hval].dter_msg = str;
6763 dtrace_errhash[hval].dter_count = 1;
6764 goto out;
6765 }
6766
6767 panic("dtrace: undersized error hash");
6768out:
6769 mutex_exit(&dtrace_errlock);
6770}
6771#endif
6772
6773/*
6774 * DTrace Matching Functions
6775 *
6776 * These functions are used to match groups of probes, given some elements of
6777 * a probe tuple, or some globbed expressions for elements of a probe tuple.
6778 */
6779static int
6780dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
6781 zoneid_t zoneid)
6782{
6783 if (priv != DTRACE_PRIV_ALL) {
6784 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
6785 uint32_t match = priv & ppriv;
6786
6787 /*
6788 * No PRIV_DTRACE_* privileges...
6789 */
6790 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
6791 DTRACE_PRIV_KERNEL)) == 0)
6792 return (0);
6793
6794 /*
6795 * No matching bits, but there were bits to match...
6796 */
6797 if (match == 0 && ppriv != 0)
6798 return (0);
6799
6800 /*
6801 * Need to have permissions to the process, but don't...
6802 */
6803 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
6804 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
6805 return (0);
6806 }
6807
6808 /*
6809 * Need to be in the same zone unless we possess the
6810 * privilege to examine all zones.
6811 */
6812 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
6813 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
6814 return (0);
6815 }
6816 }
6817
6818 return (1);
6819}
6820
6821/*
6822 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
6823 * consists of input pattern strings and an ops-vector to evaluate them.
6824 * This function returns >0 for match, 0 for no match, and <0 for error.
6825 */
6826static int
6827dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
6828 uint32_t priv, uid_t uid, zoneid_t zoneid)
6829{
6830 dtrace_provider_t *pvp = prp->dtpr_provider;
6831 int rv;
6832
6833 if (pvp->dtpv_defunct)
6834 return (0);
6835
6836 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
6837 return (rv);
6838
6839 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
6840 return (rv);
6841
6842 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
6843 return (rv);
6844
6845 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
6846 return (rv);
6847
6848 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
6849 return (0);
6850
6851 return (rv);
6852}
6853
6854/*
6855 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
6856 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
6857 * libc's version, the kernel version only applies to 8-bit ASCII strings.
6858 * In addition, all of the recursion cases except for '*' matching have been
6859 * unwound. For '*', we still implement recursive evaluation, but a depth
6860 * counter is maintained and matching is aborted if we recurse too deep.
6861 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
6862 */
6863static int
6864dtrace_match_glob(const char *s, const char *p, int depth)
6865{
6866 const char *olds;
6867 char s1, c;
6868 int gs;
6869
6870 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
6871 return (-1);
6872
6873 if (s == NULL)
6874 s = ""; /* treat NULL as empty string */
6875
6876top:
6877 olds = s;
6878 s1 = *s++;
6879
6880 if (p == NULL)
6881 return (0);
6882
6883 if ((c = *p++) == '\0')
6884 return (s1 == '\0');
6885
6886 switch (c) {
6887 case '[': {
6888 int ok = 0, notflag = 0;
6889 char lc = '\0';
6890
6891 if (s1 == '\0')
6892 return (0);
6893
6894 if (*p == '!') {
6895 notflag = 1;
6896 p++;
6897 }
6898
6899 if ((c = *p++) == '\0')
6900 return (0);
6901
6902 do {
6903 if (c == '-' && lc != '\0' && *p != ']') {
6904 if ((c = *p++) == '\0')
6905 return (0);
6906 if (c == '\\' && (c = *p++) == '\0')
6907 return (0);
6908
6909 if (notflag) {
6910 if (s1 < lc || s1 > c)
6911 ok++;
6912 else
6913 return (0);
6914 } else if (lc <= s1 && s1 <= c)
6915 ok++;
6916
6917 } else if (c == '\\' && (c = *p++) == '\0')
6918 return (0);
6919
6920 lc = c; /* save left-hand 'c' for next iteration */
6921
6922 if (notflag) {
6923 if (s1 != c)
6924 ok++;
6925 else
6926 return (0);
6927 } else if (s1 == c)
6928 ok++;
6929
6930 if ((c = *p++) == '\0')
6931 return (0);
6932
6933 } while (c != ']');
6934
6935 if (ok)
6936 goto top;
6937
6938 return (0);
6939 }
6940
6941 case '\\':
6942 if ((c = *p++) == '\0')
6943 return (0);
6944 /*FALLTHRU*/
6945
6946 default:
6947 if (c != s1)
6948 return (0);
6949 /*FALLTHRU*/
6950
6951 case '?':
6952 if (s1 != '\0')
6953 goto top;
6954 return (0);
6955
6956 case '*':
6957 while (*p == '*')
6958 p++; /* consecutive *'s are identical to a single one */
6959
6960 if (*p == '\0')
6961 return (1);
6962
6963 for (s = olds; *s != '\0'; s++) {
6964 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
6965 return (gs);
6966 }
6967
6968 return (0);
6969 }
6970}
6971
6972/*ARGSUSED*/
6973static int
6974dtrace_match_string(const char *s, const char *p, int depth)
6975{
6976 return (s != NULL && strcmp(s, p) == 0);
6977}
6978
6979/*ARGSUSED*/
6980static int
6981dtrace_match_nul(const char *s, const char *p, int depth)
6982{
6983 return (1); /* always match the empty pattern */
6984}
6985
6986/*ARGSUSED*/
6987static int
6988dtrace_match_nonzero(const char *s, const char *p, int depth)
6989{
6990 return (s != NULL && s[0] != '\0');
6991}
6992
6993static int
6994dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
6995 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
6996{
6997 dtrace_probe_t template, *probe;
6998 dtrace_hash_t *hash = NULL;
6999 int len, rc, best = INT_MAX, nmatched = 0;
7000 dtrace_id_t i;
7001
7002 ASSERT(MUTEX_HELD(&dtrace_lock));
7003
7004 /*
7005 * If the probe ID is specified in the key, just lookup by ID and
7006 * invoke the match callback once if a matching probe is found.
7007 */
7008 if (pkp->dtpk_id != DTRACE_IDNONE) {
7009 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
7010 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
7011 if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL)
7012 return (DTRACE_MATCH_FAIL);
7013 nmatched++;
7014 }
7015 return (nmatched);
7016 }
7017
7018 template.dtpr_mod = (char *)pkp->dtpk_mod;
7019 template.dtpr_func = (char *)pkp->dtpk_func;
7020 template.dtpr_name = (char *)pkp->dtpk_name;
7021
7022 /*
7023 * We want to find the most distinct of the module name, function
7024 * name, and name. So for each one that is not a glob pattern or
7025 * empty string, we perform a lookup in the corresponding hash and
7026 * use the hash table with the fewest collisions to do our search.
7027 */
7028 if (pkp->dtpk_mmatch == &dtrace_match_string &&
7029 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
7030 best = len;
7031 hash = dtrace_bymod;
7032 }
7033
7034 if (pkp->dtpk_fmatch == &dtrace_match_string &&
7035 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
7036 best = len;
7037 hash = dtrace_byfunc;
7038 }
7039
7040 if (pkp->dtpk_nmatch == &dtrace_match_string &&
7041 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
7042 best = len;
7043 hash = dtrace_byname;
7044 }
7045
7046 /*
7047 * If we did not select a hash table, iterate over every probe and
7048 * invoke our callback for each one that matches our input probe key.
7049 */
7050 if (hash == NULL) {
7051 for (i = 0; i < VBDTCAST(dtrace_id_t)dtrace_nprobes; i++) {
7052 if ((probe = dtrace_probes[i]) == NULL ||
7053 dtrace_match_probe(probe, pkp, priv, uid,
7054 zoneid) <= 0)
7055 continue;
7056
7057 nmatched++;
7058
7059 if ((rc = (*matched)(probe, arg)) !=
7060 DTRACE_MATCH_NEXT) {
7061 if (rc == DTRACE_MATCH_FAIL)
7062 return (DTRACE_MATCH_FAIL);
7063 break;
7064 }
7065 }
7066
7067 return (nmatched);
7068 }
7069
7070 /*
7071 * If we selected a hash table, iterate over each probe of the same key
7072 * name and invoke the callback for every probe that matches the other
7073 * attributes of our input probe key.
7074 */
7075 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
7076 probe = *(DTRACE_HASHNEXT(hash, probe))) {
7077
7078 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
7079 continue;
7080
7081 nmatched++;
7082
7083 if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) {
7084 if (rc == DTRACE_MATCH_FAIL)
7085 return (DTRACE_MATCH_FAIL);
7086 break;
7087 }
7088 }
7089
7090 return (nmatched);
7091}
7092
7093/*
7094 * Return the function pointer dtrace_probecmp() should use to compare the
7095 * specified pattern with a string. For NULL or empty patterns, we select
7096 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
7097 * For non-empty non-glob strings, we use dtrace_match_string().
7098 */
7099static dtrace_probekey_f *
7100dtrace_probekey_func(const char *p)
7101{
7102 char c;
7103
7104 if (p == NULL || *p == '\0')
7105 return (&dtrace_match_nul);
7106
7107 while ((c = *p++) != '\0') {
7108 if (c == '[' || c == '?' || c == '*' || c == '\\')
7109 return (&dtrace_match_glob);
7110 }
7111
7112 return (&dtrace_match_string);
7113}
7114
7115/*
7116 * Build a probe comparison key for use with dtrace_match_probe() from the
7117 * given probe description. By convention, a null key only matches anchored
7118 * probes: if each field is the empty string, reset dtpk_fmatch to
7119 * dtrace_match_nonzero().
7120 */
7121static void
7122dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
7123{
7124 pkp->dtpk_prov = pdp->dtpd_provider;
7125 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
7126
7127 pkp->dtpk_mod = pdp->dtpd_mod;
7128 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
7129
7130 pkp->dtpk_func = pdp->dtpd_func;
7131 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
7132
7133 pkp->dtpk_name = pdp->dtpd_name;
7134 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
7135
7136 pkp->dtpk_id = pdp->dtpd_id;
7137
7138 if (pkp->dtpk_id == DTRACE_IDNONE &&
7139 pkp->dtpk_pmatch == &dtrace_match_nul &&
7140 pkp->dtpk_mmatch == &dtrace_match_nul &&
7141 pkp->dtpk_fmatch == &dtrace_match_nul &&
7142 pkp->dtpk_nmatch == &dtrace_match_nul)
7143 pkp->dtpk_fmatch = &dtrace_match_nonzero;
7144}
7145
7146/*
7147 * DTrace Provider-to-Framework API Functions
7148 *
7149 * These functions implement much of the Provider-to-Framework API, as
7150 * described in <sys/dtrace.h>. The parts of the API not in this section are
7151 * the functions in the API for probe management (found below), and
7152 * dtrace_probe() itself (found above).
7153 */
7154
7155/*
7156 * Register the calling provider with the DTrace framework. This should
7157 * generally be called by DTrace providers in their attach(9E) entry point.
7158 */
7159int
7160dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
7161 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
7162{
7163 dtrace_provider_t *provider;
7164
7165 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
7166 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7167 "arguments", name ? name : "<NULL>");
7168 return (EINVAL);
7169 }
7170
7171 if (name[0] == '\0' || dtrace_badname(name)) {
7172 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7173 "provider name", name);
7174 return (EINVAL);
7175 }
7176
7177 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
7178 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
7179 pops->dtps_destroy == NULL ||
7180 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
7181 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7182 "provider ops", name);
7183 return (EINVAL);
7184 }
7185
7186 if (dtrace_badattr(&pap->dtpa_provider) ||
7187 dtrace_badattr(&pap->dtpa_mod) ||
7188 dtrace_badattr(&pap->dtpa_func) ||
7189 dtrace_badattr(&pap->dtpa_name) ||
7190 dtrace_badattr(&pap->dtpa_args)) {
7191 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7192 "provider attributes", name);
7193 return (EINVAL);
7194 }
7195
7196 if (priv & ~DTRACE_PRIV_ALL) {
7197 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
7198 "privilege attributes", name);
7199 return (EINVAL);
7200 }
7201
7202 if ((priv & DTRACE_PRIV_KERNEL) &&
7203 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
7204 pops->dtps_usermode == NULL) {
7205 cmn_err(CE_WARN, "failed to register provider '%s': need "
7206 "dtps_usermode() op for given privilege attributes", name);
7207 return (EINVAL);
7208 }
7209
7210 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
7211 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
7212 (void) strcpy(provider->dtpv_name, name);
7213
7214 provider->dtpv_attr = *pap;
7215 provider->dtpv_priv.dtpp_flags = priv;
7216 if (cr != NULL) {
7217 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
7218 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
7219 }
7220 provider->dtpv_pops = *pops;
7221
7222 if (pops->dtps_provide == NULL) {
7223 ASSERT(pops->dtps_provide_module != NULL);
7224 provider->dtpv_pops.dtps_provide =
7225 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop;
7226 }
7227
7228 if (pops->dtps_provide_module == NULL) {
7229 ASSERT(pops->dtps_provide != NULL);
7230 provider->dtpv_pops.dtps_provide_module =
7231 (void (*)(void *, struct modctl *))dtrace_nullop;
7232 }
7233
7234 if (pops->dtps_suspend == NULL) {
7235 ASSERT(pops->dtps_resume == NULL);
7236 provider->dtpv_pops.dtps_suspend =
7237 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7238 provider->dtpv_pops.dtps_resume =
7239 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
7240 }
7241
7242 provider->dtpv_arg = arg;
7243 *idp = (dtrace_provider_id_t)provider;
7244
7245 if (pops == &dtrace_provider_ops) {
7246 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7247 ASSERT(MUTEX_HELD(&dtrace_lock));
7248 ASSERT(dtrace_anon.dta_enabling == NULL);
7249
7250 /*
7251 * We make sure that the DTrace provider is at the head of
7252 * the provider chain.
7253 */
7254 provider->dtpv_next = dtrace_provider;
7255 dtrace_provider = provider;
7256 return (0);
7257 }
7258
7259 mutex_enter(&dtrace_provider_lock);
7260 mutex_enter(&dtrace_lock);
7261
7262 /*
7263 * If there is at least one provider registered, we'll add this
7264 * provider after the first provider.
7265 */
7266 if (dtrace_provider != NULL) {
7267 provider->dtpv_next = dtrace_provider->dtpv_next;
7268 dtrace_provider->dtpv_next = provider;
7269 } else {
7270 dtrace_provider = provider;
7271 }
7272
7273 if (dtrace_retained != NULL) {
7274 dtrace_enabling_provide(provider);
7275
7276 /*
7277 * Now we need to call dtrace_enabling_matchall() -- which
7278 * will acquire cpu_lock and dtrace_lock. We therefore need
7279 * to drop all of our locks before calling into it...
7280 */
7281 mutex_exit(&dtrace_lock);
7282 mutex_exit(&dtrace_provider_lock);
7283 dtrace_enabling_matchall();
7284
7285 return (0);
7286 }
7287
7288 mutex_exit(&dtrace_lock);
7289 mutex_exit(&dtrace_provider_lock);
7290
7291 return (0);
7292}
7293
7294/*
7295 * Unregister the specified provider from the DTrace framework. This should
7296 * generally be called by DTrace providers in their detach(9E) entry point.
7297 */
7298int
7299dtrace_unregister(dtrace_provider_id_t id)
7300{
7301 dtrace_provider_t *old = (dtrace_provider_t *)id;
7302 dtrace_provider_t *prev = NULL;
7303 VBDTTYPE(uint32_t,int) i, self = 0;
7304 dtrace_probe_t *probe, *first = NULL;
7305
7306 if (old->dtpv_pops.dtps_enable ==
7307 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) {
7308 /*
7309 * If DTrace itself is the provider, we're called with locks
7310 * already held.
7311 */
7312 ASSERT(old == dtrace_provider);
7313#ifndef VBOX
7314 ASSERT(dtrace_devi != NULL);
7315#endif
7316 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7317 ASSERT(MUTEX_HELD(&dtrace_lock));
7318 self = 1;
7319
7320 if (dtrace_provider->dtpv_next != NULL) {
7321 /*
7322 * There's another provider here; return failure.
7323 */
7324 return (EBUSY);
7325 }
7326 } else {
7327 mutex_enter(&dtrace_provider_lock);
7328 mutex_enter(&mod_lock);
7329 mutex_enter(&dtrace_lock);
7330 }
7331
7332 /*
7333 * If anyone has /dev/dtrace open, or if there are anonymous enabled
7334 * probes, we refuse to let providers slither away, unless this
7335 * provider has already been explicitly invalidated.
7336 */
7337 if (!old->dtpv_defunct &&
7338 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
7339 dtrace_anon.dta_state->dts_necbs > 0))) {
7340 if (!self) {
7341 mutex_exit(&dtrace_lock);
7342 mutex_exit(&mod_lock);
7343 mutex_exit(&dtrace_provider_lock);
7344 }
7345 return (EBUSY);
7346 }
7347
7348 /*
7349 * Attempt to destroy the probes associated with this provider.
7350 */
7351 for (i = 0; i < dtrace_nprobes; i++) {
7352 if ((probe = dtrace_probes[i]) == NULL)
7353 continue;
7354
7355 if (probe->dtpr_provider != old)
7356 continue;
7357
7358 if (probe->dtpr_ecb == NULL)
7359 continue;
7360
7361 /*
7362 * We have at least one ECB; we can't remove this provider.
7363 */
7364 if (!self) {
7365 mutex_exit(&dtrace_lock);
7366 mutex_exit(&mod_lock);
7367 mutex_exit(&dtrace_provider_lock);
7368 }
7369 return (EBUSY);
7370 }
7371
7372 /*
7373 * All of the probes for this provider are disabled; we can safely
7374 * remove all of them from their hash chains and from the probe array.
7375 */
7376 for (i = 0; i < dtrace_nprobes; i++) {
7377 if ((probe = dtrace_probes[i]) == NULL)
7378 continue;
7379
7380 if (probe->dtpr_provider != old)
7381 continue;
7382
7383 dtrace_probes[i] = NULL;
7384
7385 dtrace_hash_remove(dtrace_bymod, probe);
7386 dtrace_hash_remove(dtrace_byfunc, probe);
7387 dtrace_hash_remove(dtrace_byname, probe);
7388
7389 if (first == NULL) {
7390 first = probe;
7391 probe->dtpr_nextmod = NULL;
7392 } else {
7393 probe->dtpr_nextmod = first;
7394 first = probe;
7395 }
7396 }
7397
7398 /*
7399 * The provider's probes have been removed from the hash chains and
7400 * from the probe array. Now issue a dtrace_sync() to be sure that
7401 * everyone has cleared out from any probe array processing.
7402 */
7403 dtrace_sync();
7404
7405 for (probe = first; probe != NULL; probe = first) {
7406 first = probe->dtpr_nextmod;
7407
7408 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
7409 probe->dtpr_arg);
7410 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7411 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7412 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7413 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
7414 kmem_free(probe, sizeof (dtrace_probe_t));
7415 }
7416
7417 if ((prev = dtrace_provider) == old) {
7418#ifndef VBOX
7419 ASSERT(self || dtrace_devi == NULL);
7420 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
7421#endif
7422 dtrace_provider = old->dtpv_next;
7423 } else {
7424 while (prev != NULL && prev->dtpv_next != old)
7425 prev = prev->dtpv_next;
7426
7427 if (prev == NULL) {
7428 panic("attempt to unregister non-existent "
7429 "dtrace provider %p\n", (void *)id);
7430 }
7431
7432 prev->dtpv_next = old->dtpv_next;
7433 }
7434
7435 if (!self) {
7436 mutex_exit(&dtrace_lock);
7437 mutex_exit(&mod_lock);
7438 mutex_exit(&dtrace_provider_lock);
7439 }
7440
7441 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
7442 kmem_free(old, sizeof (dtrace_provider_t));
7443
7444 return (0);
7445}
7446
7447/*
7448 * Invalidate the specified provider. All subsequent probe lookups for the
7449 * specified provider will fail, but its probes will not be removed.
7450 */
7451void
7452dtrace_invalidate(dtrace_provider_id_t id)
7453{
7454 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
7455
7456 ASSERT(pvp->dtpv_pops.dtps_enable !=
7457 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
7458
7459 mutex_enter(&dtrace_provider_lock);
7460 mutex_enter(&dtrace_lock);
7461
7462 pvp->dtpv_defunct = 1;
7463
7464 mutex_exit(&dtrace_lock);
7465 mutex_exit(&dtrace_provider_lock);
7466}
7467
7468/*
7469 * Indicate whether or not DTrace has attached.
7470 */
7471int
7472dtrace_attached(void)
7473{
7474 /*
7475 * dtrace_provider will be non-NULL iff the DTrace driver has
7476 * attached. (It's non-NULL because DTrace is always itself a
7477 * provider.)
7478 */
7479 return (dtrace_provider != NULL);
7480}
7481
7482/*
7483 * Remove all the unenabled probes for the given provider. This function is
7484 * not unlike dtrace_unregister(), except that it doesn't remove the provider
7485 * -- just as many of its associated probes as it can.
7486 */
7487int
7488dtrace_condense(dtrace_provider_id_t id)
7489{
7490 dtrace_provider_t *prov = (dtrace_provider_t *)id;
7491 VBDTTYPE(uint32_t,int) i;
7492 dtrace_probe_t *probe;
7493
7494 /*
7495 * Make sure this isn't the dtrace provider itself.
7496 */
7497 ASSERT(prov->dtpv_pops.dtps_enable !=
7498 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
7499
7500 mutex_enter(&dtrace_provider_lock);
7501 mutex_enter(&dtrace_lock);
7502
7503 /*
7504 * Attempt to destroy the probes associated with this provider.
7505 */
7506 for (i = 0; i < dtrace_nprobes; i++) {
7507 if ((probe = dtrace_probes[i]) == NULL)
7508 continue;
7509
7510 if (probe->dtpr_provider != prov)
7511 continue;
7512
7513 if (probe->dtpr_ecb != NULL)
7514 continue;
7515
7516 dtrace_probes[i] = NULL;
7517
7518 dtrace_hash_remove(dtrace_bymod, probe);
7519 dtrace_hash_remove(dtrace_byfunc, probe);
7520 dtrace_hash_remove(dtrace_byname, probe);
7521
7522 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
7523 probe->dtpr_arg);
7524 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7525 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7526 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7527 kmem_free(probe, sizeof (dtrace_probe_t));
7528 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
7529 }
7530
7531 mutex_exit(&dtrace_lock);
7532 mutex_exit(&dtrace_provider_lock);
7533
7534 return (0);
7535}
7536
7537/*
7538 * DTrace Probe Management Functions
7539 *
7540 * The functions in this section perform the DTrace probe management,
7541 * including functions to create probes, look-up probes, and call into the
7542 * providers to request that probes be provided. Some of these functions are
7543 * in the Provider-to-Framework API; these functions can be identified by the
7544 * fact that they are not declared "static".
7545 */
7546
7547/*
7548 * Create a probe with the specified module name, function name, and name.
7549 */
7550dtrace_id_t
7551dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
7552 const char *func, const char *name, int aframes, void *arg)
7553{
7554 dtrace_probe_t *probe, **probes;
7555 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
7556 dtrace_id_t id;
7557
7558 if (provider == dtrace_provider) {
7559 ASSERT(MUTEX_HELD(&dtrace_lock));
7560 } else {
7561 mutex_enter(&dtrace_lock);
7562 }
7563
7564 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
7565 VM_BESTFIT | VM_SLEEP);
7566 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
7567
7568 probe->dtpr_id = id;
7569 probe->dtpr_gen = dtrace_probegen++;
7570 probe->dtpr_mod = dtrace_strdup(mod);
7571 probe->dtpr_func = dtrace_strdup(func);
7572 probe->dtpr_name = dtrace_strdup(name);
7573 probe->dtpr_arg = arg;
7574 probe->dtpr_aframes = aframes;
7575 probe->dtpr_provider = provider;
7576
7577 dtrace_hash_add(dtrace_bymod, probe);
7578 dtrace_hash_add(dtrace_byfunc, probe);
7579 dtrace_hash_add(dtrace_byname, probe);
7580
7581 if (id - 1 >= dtrace_nprobes) {
7582 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
7583 size_t nsize = osize << 1;
7584
7585 if (nsize == 0) {
7586 ASSERT(osize == 0);
7587 ASSERT(dtrace_probes == NULL);
7588 nsize = sizeof (dtrace_probe_t *);
7589 }
7590
7591 probes = kmem_zalloc(nsize, KM_SLEEP);
7592
7593 if (dtrace_probes == NULL) {
7594 ASSERT(osize == 0);
7595 dtrace_probes = probes;
7596 dtrace_nprobes = 1;
7597 } else {
7598 dtrace_probe_t **oprobes = dtrace_probes;
7599
7600 bcopy(oprobes, probes, osize);
7601 dtrace_membar_producer();
7602 dtrace_probes = probes;
7603
7604 dtrace_sync();
7605
7606 /*
7607 * All CPUs are now seeing the new probes array; we can
7608 * safely free the old array.
7609 */
7610 kmem_free(oprobes, osize);
7611 dtrace_nprobes <<= 1;
7612 }
7613
7614 ASSERT(id - 1 < dtrace_nprobes);
7615 }
7616
7617 ASSERT(dtrace_probes[id - 1] == NULL);
7618 dtrace_probes[id - 1] = probe;
7619
7620 if (provider != dtrace_provider)
7621 mutex_exit(&dtrace_lock);
7622
7623 return (id);
7624}
7625
7626static dtrace_probe_t *
7627dtrace_probe_lookup_id(dtrace_id_t id)
7628{
7629 ASSERT(MUTEX_HELD(&dtrace_lock));
7630
7631 if (id == 0 || id > dtrace_nprobes)
7632 return (NULL);
7633
7634 return (dtrace_probes[id - 1]);
7635}
7636
7637static int
7638dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
7639{
7640 *((dtrace_id_t *)arg) = probe->dtpr_id;
7641
7642 return (DTRACE_MATCH_DONE);
7643}
7644
7645/*
7646 * Look up a probe based on provider and one or more of module name, function
7647 * name and probe name.
7648 */
7649dtrace_id_t
7650dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod,
7651 const char *func, const char *name)
7652{
7653 dtrace_probekey_t pkey;
7654 dtrace_id_t id;
7655 int match;
7656
7657 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
7658 pkey.dtpk_pmatch = &dtrace_match_string;
7659 pkey.dtpk_mod = mod;
7660 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
7661 pkey.dtpk_func = func;
7662 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
7663 pkey.dtpk_name = name;
7664 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
7665 pkey.dtpk_id = DTRACE_IDNONE;
7666
7667 mutex_enter(&dtrace_lock);
7668 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
7669 dtrace_probe_lookup_match, &id);
7670 mutex_exit(&dtrace_lock);
7671
7672 ASSERT(match == 1 || match == 0);
7673 return (match ? id : 0);
7674}
7675
7676/*
7677 * Returns the probe argument associated with the specified probe.
7678 */
7679void *
7680dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
7681{
7682 dtrace_probe_t *probe;
7683 void *rval = NULL;
7684
7685 mutex_enter(&dtrace_lock);
7686
7687 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
7688 probe->dtpr_provider == (dtrace_provider_t *)id)
7689 rval = probe->dtpr_arg;
7690
7691 mutex_exit(&dtrace_lock);
7692
7693 return (rval);
7694}
7695
7696/*
7697 * Copy a probe into a probe description.
7698 */
7699static void
7700dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
7701{
7702 bzero(pdp, sizeof (dtrace_probedesc_t));
7703 pdp->dtpd_id = prp->dtpr_id;
7704
7705 (void) strncpy(pdp->dtpd_provider,
7706 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
7707
7708 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
7709 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
7710 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
7711}
7712
7713/*
7714 * Called to indicate that a probe -- or probes -- should be provided by a
7715 * specfied provider. If the specified description is NULL, the provider will
7716 * be told to provide all of its probes. (This is done whenever a new
7717 * consumer comes along, or whenever a retained enabling is to be matched.) If
7718 * the specified description is non-NULL, the provider is given the
7719 * opportunity to dynamically provide the specified probe, allowing providers
7720 * to support the creation of probes on-the-fly. (So-called _autocreated_
7721 * probes.) If the provider is NULL, the operations will be applied to all
7722 * providers; if the provider is non-NULL the operations will only be applied
7723 * to the specified provider. The dtrace_provider_lock must be held, and the
7724 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
7725 * will need to grab the dtrace_lock when it reenters the framework through
7726 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
7727 */
7728static void
7729dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
7730{
7731#ifndef VBOX
7732 struct modctl *ctl;
7733#endif
7734 int all = 0;
7735
7736 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7737
7738 if (prv == NULL) {
7739 all = 1;
7740 prv = dtrace_provider;
7741 }
7742
7743 do {
7744 /*
7745 * First, call the blanket provide operation.
7746 */
7747 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
7748
7749#ifndef VBOX
7750 /*
7751 * Now call the per-module provide operation. We will grab
7752 * mod_lock to prevent the list from being modified. Note
7753 * that this also prevents the mod_busy bits from changing.
7754 * (mod_busy can only be changed with mod_lock held.)
7755 */
7756 mutex_enter(&mod_lock);
7757
7758 ctl = &modules;
7759 do {
7760 if (ctl->mod_busy || ctl->mod_mp == NULL)
7761 continue;
7762
7763 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
7764
7765 } while ((ctl = ctl->mod_next) != &modules);
7766
7767 mutex_exit(&mod_lock);
7768#endif
7769 } while (all && (prv = prv->dtpv_next) != NULL);
7770}
7771
7772/*
7773 * Iterate over each probe, and call the Framework-to-Provider API function
7774 * denoted by offs.
7775 */
7776static void
7777dtrace_probe_foreach(uintptr_t offs)
7778{
7779 dtrace_provider_t *prov;
7780 void (*func)(void *, dtrace_id_t, void *);
7781 dtrace_probe_t *probe;
7782 dtrace_icookie_t cookie;
7783 VBDTTYPE(uint32_t,int) i;
7784
7785 /*
7786 * We disable interrupts to walk through the probe array. This is
7787 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
7788 * won't see stale data.
7789 */
7790 cookie = dtrace_interrupt_disable();
7791
7792 for (i = 0; i < dtrace_nprobes; i++) {
7793 if ((probe = dtrace_probes[i]) == NULL)
7794 continue;
7795
7796 if (probe->dtpr_ecb == NULL) {
7797 /*
7798 * This probe isn't enabled -- don't call the function.
7799 */
7800 continue;
7801 }
7802
7803 prov = probe->dtpr_provider;
7804 func = *((void(**)(void *, dtrace_id_t, void *))
7805 ((uintptr_t)&prov->dtpv_pops + offs));
7806
7807 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
7808 }
7809
7810 dtrace_interrupt_enable(cookie);
7811}
7812
7813static int
7814dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
7815{
7816 dtrace_probekey_t pkey;
7817 uint32_t priv;
7818 uid_t uid;
7819 zoneid_t zoneid;
7820
7821 ASSERT(MUTEX_HELD(&dtrace_lock));
7822 dtrace_ecb_create_cache = NULL;
7823
7824 if (desc == NULL) {
7825 /*
7826 * If we're passed a NULL description, we're being asked to
7827 * create an ECB with a NULL probe.
7828 */
7829 (void) dtrace_ecb_create_enable(NULL, enab);
7830 return (0);
7831 }
7832
7833 dtrace_probekey(desc, &pkey);
7834 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
7835 &priv, &uid, &zoneid);
7836
7837 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
7838 enab));
7839}
7840
7841/*
7842 * DTrace Helper Provider Functions
7843 */
7844static void
7845dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
7846{
7847 attr->dtat_name = DOF_ATTR_NAME(dofattr);
7848 attr->dtat_data = DOF_ATTR_DATA(dofattr);
7849 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
7850}
7851
7852static void
7853dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
7854 const dof_provider_t *dofprov, char *strtab)
7855{
7856 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
7857 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
7858 dofprov->dofpv_provattr);
7859 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
7860 dofprov->dofpv_modattr);
7861 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
7862 dofprov->dofpv_funcattr);
7863 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
7864 dofprov->dofpv_nameattr);
7865 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
7866 dofprov->dofpv_argsattr);
7867}
7868
7869static void
7870dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
7871{
7872 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7873 dof_hdr_t *dof = (dof_hdr_t *)daddr;
7874 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
7875 dof_provider_t *provider;
7876 dof_probe_t *probe;
7877 uint32_t *off, *enoff;
7878 uint8_t *arg;
7879 char *strtab;
7880 uint_t i, nprobes;
7881 dtrace_helper_provdesc_t dhpv;
7882 dtrace_helper_probedesc_t dhpb;
7883 dtrace_meta_t *meta = dtrace_meta_pid;
7884 dtrace_mops_t *mops = &meta->dtm_mops;
7885 void *parg;
7886
7887 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
7888 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7889 provider->dofpv_strtab * dof->dofh_secsize);
7890 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7891 provider->dofpv_probes * dof->dofh_secsize);
7892 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7893 provider->dofpv_prargs * dof->dofh_secsize);
7894 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7895 provider->dofpv_proffs * dof->dofh_secsize);
7896
7897 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
7898 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
7899 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
7900 enoff = NULL;
7901
7902 /*
7903 * See dtrace_helper_provider_validate().
7904 */
7905 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
7906 provider->dofpv_prenoffs != DOF_SECT_NONE) {
7907 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7908 provider->dofpv_prenoffs * dof->dofh_secsize);
7909 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
7910 }
7911
7912 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
7913
7914 /*
7915 * Create the provider.
7916 */
7917 dtrace_dofprov2hprov(&dhpv, provider, strtab);
7918
7919 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
7920 return;
7921
7922 meta->dtm_count++;
7923
7924 /*
7925 * Create the probes.
7926 */
7927 for (i = 0; i < nprobes; i++) {
7928 probe = (dof_probe_t *)(uintptr_t)(daddr +
7929 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
7930
7931 dhpb.dthpb_mod = dhp->dofhp_mod;
7932 dhpb.dthpb_func = strtab + probe->dofpr_func;
7933 dhpb.dthpb_name = strtab + probe->dofpr_name;
7934 dhpb.dthpb_base = probe->dofpr_addr;
7935 dhpb.dthpb_offs = off + probe->dofpr_offidx;
7936 dhpb.dthpb_noffs = probe->dofpr_noffs;
7937 if (enoff != NULL) {
7938 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
7939 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
7940 } else {
7941 dhpb.dthpb_enoffs = NULL;
7942 dhpb.dthpb_nenoffs = 0;
7943 }
7944 dhpb.dthpb_args = arg + probe->dofpr_argidx;
7945 dhpb.dthpb_nargc = probe->dofpr_nargc;
7946 dhpb.dthpb_xargc = probe->dofpr_xargc;
7947 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
7948 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
7949
7950 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
7951 }
7952}
7953
7954static void
7955dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
7956{
7957 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7958 dof_hdr_t *dof = (dof_hdr_t *)daddr;
7959 VBDTTYPE(uint32_t,int) i;
7960
7961 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
7962
7963 for (i = 0; i < dof->dofh_secnum; i++) {
7964 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
7965 dof->dofh_secoff + i * dof->dofh_secsize);
7966
7967 if (sec->dofs_type != DOF_SECT_PROVIDER)
7968 continue;
7969
7970 dtrace_helper_provide_one(dhp, sec, pid);
7971 }
7972
7973 /*
7974 * We may have just created probes, so we must now rematch against
7975 * any retained enablings. Note that this call will acquire both
7976 * cpu_lock and dtrace_lock; the fact that we are holding
7977 * dtrace_meta_lock now is what defines the ordering with respect to
7978 * these three locks.
7979 */
7980 dtrace_enabling_matchall();
7981}
7982
7983static void
7984dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
7985{
7986 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7987 dof_hdr_t *dof = (dof_hdr_t *)daddr;
7988 dof_sec_t *str_sec;
7989 dof_provider_t *provider;
7990 char *strtab;
7991 dtrace_helper_provdesc_t dhpv;
7992 dtrace_meta_t *meta = dtrace_meta_pid;
7993 dtrace_mops_t *mops = &meta->dtm_mops;
7994
7995 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
7996 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7997 provider->dofpv_strtab * dof->dofh_secsize);
7998
7999 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
8000
8001 /*
8002 * Create the provider.
8003 */
8004 dtrace_dofprov2hprov(&dhpv, provider, strtab);
8005
8006 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
8007
8008 meta->dtm_count--;
8009}
8010
8011static void
8012dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
8013{
8014 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
8015 dof_hdr_t *dof = (dof_hdr_t *)daddr;
8016 VBDTTYPE(uint32_t,int) i;
8017
8018 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
8019
8020 for (i = 0; i < dof->dofh_secnum; i++) {
8021 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
8022 dof->dofh_secoff + i * dof->dofh_secsize);
8023
8024 if (sec->dofs_type != DOF_SECT_PROVIDER)
8025 continue;
8026
8027 dtrace_helper_provider_remove_one(dhp, sec, pid);
8028 }
8029}
8030
8031/*
8032 * DTrace Meta Provider-to-Framework API Functions
8033 *
8034 * These functions implement the Meta Provider-to-Framework API, as described
8035 * in <sys/dtrace.h>.
8036 */
8037int
8038dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
8039 dtrace_meta_provider_id_t *idp)
8040{
8041 dtrace_meta_t *meta;
8042 dtrace_helpers_t *help, *next;
8043 VBDTTYPE(uint32_t,int) i;
8044
8045 *idp = DTRACE_METAPROVNONE;
8046
8047 /*
8048 * We strictly don't need the name, but we hold onto it for
8049 * debuggability. All hail error queues!
8050 */
8051 if (name == NULL) {
8052 cmn_err(CE_WARN, "failed to register meta-provider: "
8053 "invalid name");
8054 return (EINVAL);
8055 }
8056
8057 if (mops == NULL ||
8058 mops->dtms_create_probe == NULL ||
8059 mops->dtms_provide_pid == NULL ||
8060 mops->dtms_remove_pid == NULL) {
8061 cmn_err(CE_WARN, "failed to register meta-register %s: "
8062 "invalid ops", name);
8063 return (EINVAL);
8064 }
8065
8066 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
8067 meta->dtm_mops = *mops;
8068 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8069 (void) strcpy(meta->dtm_name, name);
8070 meta->dtm_arg = arg;
8071
8072 mutex_enter(&dtrace_meta_lock);
8073 mutex_enter(&dtrace_lock);
8074
8075 if (dtrace_meta_pid != NULL) {
8076 mutex_exit(&dtrace_lock);
8077 mutex_exit(&dtrace_meta_lock);
8078 cmn_err(CE_WARN, "failed to register meta-register %s: "
8079 "user-land meta-provider exists", name);
8080 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
8081 kmem_free(meta, sizeof (dtrace_meta_t));
8082 return (EINVAL);
8083 }
8084
8085 dtrace_meta_pid = meta;
8086 *idp = (dtrace_meta_provider_id_t)meta;
8087
8088 /*
8089 * If there are providers and probes ready to go, pass them
8090 * off to the new meta provider now.
8091 */
8092
8093 help = dtrace_deferred_pid;
8094 dtrace_deferred_pid = NULL;
8095
8096 mutex_exit(&dtrace_lock);
8097
8098 while (help != NULL) {
8099 for (i = 0; i < help->dthps_nprovs; i++) {
8100 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
8101 help->dthps_pid);
8102 }
8103
8104 next = help->dthps_next;
8105 help->dthps_next = NULL;
8106 help->dthps_prev = NULL;
8107 help->dthps_deferred = 0;
8108 help = next;
8109 }
8110
8111 mutex_exit(&dtrace_meta_lock);
8112
8113 return (0);
8114}
8115
8116int
8117dtrace_meta_unregister(dtrace_meta_provider_id_t id)
8118{
8119 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
8120
8121 mutex_enter(&dtrace_meta_lock);
8122 mutex_enter(&dtrace_lock);
8123
8124 if (old == dtrace_meta_pid) {
8125 pp = &dtrace_meta_pid;
8126 } else {
8127 panic("attempt to unregister non-existent "
8128 "dtrace meta-provider %p\n", (void *)old);
8129#ifdef VBOX
8130 return EINVAL;
8131#endif
8132 }
8133
8134 if (old->dtm_count != 0) {
8135 mutex_exit(&dtrace_lock);
8136 mutex_exit(&dtrace_meta_lock);
8137 return (EBUSY);
8138 }
8139
8140 *pp = NULL;
8141
8142 mutex_exit(&dtrace_lock);
8143 mutex_exit(&dtrace_meta_lock);
8144
8145 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
8146 kmem_free(old, sizeof (dtrace_meta_t));
8147
8148 return (0);
8149}
8150
8151
8152/*
8153 * DTrace DIF Object Functions
8154 */
8155static int
8156dtrace_difo_err(uint_t pc, const char *format, ...)
8157{
8158 if (dtrace_err_verbose) {
8159 va_list alist;
8160
8161 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
8162 va_start(alist, format);
8163 (void) vuprintf(format, alist);
8164 va_end(alist);
8165 }
8166
8167#ifdef DTRACE_ERRDEBUG
8168 dtrace_errdebug(format);
8169#endif
8170 return (1);
8171}
8172
8173/*
8174 * Validate a DTrace DIF object by checking the IR instructions. The following
8175 * rules are currently enforced by dtrace_difo_validate():
8176 *
8177 * 1. Each instruction must have a valid opcode
8178 * 2. Each register, string, variable, or subroutine reference must be valid
8179 * 3. No instruction can modify register %r0 (must be zero)
8180 * 4. All instruction reserved bits must be set to zero
8181 * 5. The last instruction must be a "ret" instruction
8182 * 6. All branch targets must reference a valid instruction _after_ the branch
8183 */
8184static int
8185dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
8186 cred_t *cr)
8187{
8188#ifndef VBOX
8189 int err = 0, i;
8190#else
8191 int err = 0;
8192 uint_t i;
8193#endif
8194 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8195 int kcheckload;
8196 uint_t pc;
8197
8198 kcheckload = cr == NULL ||
8199 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
8200
8201 dp->dtdo_destructive = 0;
8202
8203 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
8204 dif_instr_t instr = dp->dtdo_buf[pc];
8205
8206 uint_t r1 = DIF_INSTR_R1(instr);
8207 uint_t r2 = DIF_INSTR_R2(instr);
8208 uint_t rd = DIF_INSTR_RD(instr);
8209 uint_t rs = DIF_INSTR_RS(instr);
8210 uint_t label = DIF_INSTR_LABEL(instr);
8211 uint_t v = DIF_INSTR_VAR(instr);
8212 uint_t subr = DIF_INSTR_SUBR(instr);
8213 uint_t type = DIF_INSTR_TYPE(instr);
8214 uint_t op = DIF_INSTR_OP(instr);
8215
8216 switch (op) {
8217 case DIF_OP_OR:
8218 case DIF_OP_XOR:
8219 case DIF_OP_AND:
8220 case DIF_OP_SLL:
8221 case DIF_OP_SRL:
8222 case DIF_OP_SRA:
8223 case DIF_OP_SUB:
8224 case DIF_OP_ADD:
8225 case DIF_OP_MUL:
8226 case DIF_OP_SDIV:
8227 case DIF_OP_UDIV:
8228 case DIF_OP_SREM:
8229 case DIF_OP_UREM:
8230 case DIF_OP_COPYS:
8231 if (r1 >= nregs)
8232 err += efunc(pc, "invalid register %u\n", r1);
8233 if (r2 >= nregs)
8234 err += efunc(pc, "invalid register %u\n", r2);
8235 if (rd >= nregs)
8236 err += efunc(pc, "invalid register %u\n", rd);
8237 if (rd == 0)
8238 err += efunc(pc, "cannot write to %r0\n");
8239 break;
8240 case DIF_OP_NOT:
8241 case DIF_OP_MOV:
8242 case DIF_OP_ALLOCS:
8243 if (r1 >= nregs)
8244 err += efunc(pc, "invalid register %u\n", r1);
8245 if (r2 != 0)
8246 err += efunc(pc, "non-zero reserved bits\n");
8247 if (rd >= nregs)
8248 err += efunc(pc, "invalid register %u\n", rd);
8249 if (rd == 0)
8250 err += efunc(pc, "cannot write to %r0\n");
8251 break;
8252 case DIF_OP_LDSB:
8253 case DIF_OP_LDSH:
8254 case DIF_OP_LDSW:
8255 case DIF_OP_LDUB:
8256 case DIF_OP_LDUH:
8257 case DIF_OP_LDUW:
8258 case DIF_OP_LDX:
8259 if (r1 >= nregs)
8260 err += efunc(pc, "invalid register %u\n", r1);
8261 if (r2 != 0)
8262 err += efunc(pc, "non-zero reserved bits\n");
8263 if (rd >= nregs)
8264 err += efunc(pc, "invalid register %u\n", rd);
8265 if (rd == 0)
8266 err += efunc(pc, "cannot write to %r0\n");
8267 if (kcheckload)
8268 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
8269 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
8270 break;
8271 case DIF_OP_RLDSB:
8272 case DIF_OP_RLDSH:
8273 case DIF_OP_RLDSW:
8274 case DIF_OP_RLDUB:
8275 case DIF_OP_RLDUH:
8276 case DIF_OP_RLDUW:
8277 case DIF_OP_RLDX:
8278 if (r1 >= nregs)
8279 err += efunc(pc, "invalid register %u\n", r1);
8280 if (r2 != 0)
8281 err += efunc(pc, "non-zero reserved bits\n");
8282 if (rd >= nregs)
8283 err += efunc(pc, "invalid register %u\n", rd);
8284 if (rd == 0)
8285 err += efunc(pc, "cannot write to %r0\n");
8286 break;
8287 case DIF_OP_ULDSB:
8288 case DIF_OP_ULDSH:
8289 case DIF_OP_ULDSW:
8290 case DIF_OP_ULDUB:
8291 case DIF_OP_ULDUH:
8292 case DIF_OP_ULDUW:
8293 case DIF_OP_ULDX:
8294 if (r1 >= nregs)
8295 err += efunc(pc, "invalid register %u\n", r1);
8296 if (r2 != 0)
8297 err += efunc(pc, "non-zero reserved bits\n");
8298 if (rd >= nregs)
8299 err += efunc(pc, "invalid register %u\n", rd);
8300 if (rd == 0)
8301 err += efunc(pc, "cannot write to %r0\n");
8302 break;
8303 case DIF_OP_STB:
8304 case DIF_OP_STH:
8305 case DIF_OP_STW:
8306 case DIF_OP_STX:
8307 if (r1 >= nregs)
8308 err += efunc(pc, "invalid register %u\n", r1);
8309 if (r2 != 0)
8310 err += efunc(pc, "non-zero reserved bits\n");
8311 if (rd >= nregs)
8312 err += efunc(pc, "invalid register %u\n", rd);
8313 if (rd == 0)
8314 err += efunc(pc, "cannot write to 0 address\n");
8315 break;
8316 case DIF_OP_CMP:
8317 case DIF_OP_SCMP:
8318 if (r1 >= nregs)
8319 err += efunc(pc, "invalid register %u\n", r1);
8320 if (r2 >= nregs)
8321 err += efunc(pc, "invalid register %u\n", r2);
8322 if (rd != 0)
8323 err += efunc(pc, "non-zero reserved bits\n");
8324 break;
8325 case DIF_OP_TST:
8326 if (r1 >= nregs)
8327 err += efunc(pc, "invalid register %u\n", r1);
8328 if (r2 != 0 || rd != 0)
8329 err += efunc(pc, "non-zero reserved bits\n");
8330 break;
8331 case DIF_OP_BA:
8332 case DIF_OP_BE:
8333 case DIF_OP_BNE:
8334 case DIF_OP_BG:
8335 case DIF_OP_BGU:
8336 case DIF_OP_BGE:
8337 case DIF_OP_BGEU:
8338 case DIF_OP_BL:
8339 case DIF_OP_BLU:
8340 case DIF_OP_BLE:
8341 case DIF_OP_BLEU:
8342 if (label >= dp->dtdo_len) {
8343 err += efunc(pc, "invalid branch target %u\n",
8344 label);
8345 }
8346 if (label <= pc) {
8347 err += efunc(pc, "backward branch to %u\n",
8348 label);
8349 }
8350 break;
8351 case DIF_OP_RET:
8352 if (r1 != 0 || r2 != 0)
8353 err += efunc(pc, "non-zero reserved bits\n");
8354 if (rd >= nregs)
8355 err += efunc(pc, "invalid register %u\n", rd);
8356 break;
8357 case DIF_OP_NOP:
8358 case DIF_OP_POPTS:
8359 case DIF_OP_FLUSHTS:
8360 if (r1 != 0 || r2 != 0 || rd != 0)
8361 err += efunc(pc, "non-zero reserved bits\n");
8362 break;
8363 case DIF_OP_SETX:
8364 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
8365 err += efunc(pc, "invalid integer ref %u\n",
8366 DIF_INSTR_INTEGER(instr));
8367 }
8368 if (rd >= nregs)
8369 err += efunc(pc, "invalid register %u\n", rd);
8370 if (rd == 0)
8371 err += efunc(pc, "cannot write to %r0\n");
8372 break;
8373 case DIF_OP_SETS:
8374 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
8375 err += efunc(pc, "invalid string ref %u\n",
8376 DIF_INSTR_STRING(instr));
8377 }
8378 if (rd >= nregs)
8379 err += efunc(pc, "invalid register %u\n", rd);
8380 if (rd == 0)
8381 err += efunc(pc, "cannot write to %r0\n");
8382 break;
8383 case DIF_OP_LDGA:
8384 case DIF_OP_LDTA:
8385 if (r1 > DIF_VAR_ARRAY_MAX)
8386 err += efunc(pc, "invalid array %u\n", r1);
8387 if (r2 >= nregs)
8388 err += efunc(pc, "invalid register %u\n", r2);
8389 if (rd >= nregs)
8390 err += efunc(pc, "invalid register %u\n", rd);
8391 if (rd == 0)
8392 err += efunc(pc, "cannot write to %r0\n");
8393 break;
8394 case DIF_OP_LDGS:
8395 case DIF_OP_LDTS:
8396 case DIF_OP_LDLS:
8397 case DIF_OP_LDGAA:
8398 case DIF_OP_LDTAA:
8399 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
8400 err += efunc(pc, "invalid variable %u\n", v);
8401 if (rd >= nregs)
8402 err += efunc(pc, "invalid register %u\n", rd);
8403 if (rd == 0)
8404 err += efunc(pc, "cannot write to %r0\n");
8405 break;
8406 case DIF_OP_STGS:
8407 case DIF_OP_STTS:
8408 case DIF_OP_STLS:
8409 case DIF_OP_STGAA:
8410 case DIF_OP_STTAA:
8411 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
8412 err += efunc(pc, "invalid variable %u\n", v);
8413 if (rs >= nregs)
8414 err += efunc(pc, "invalid register %u\n", rd);
8415 break;
8416 case DIF_OP_CALL:
8417 if (subr > DIF_SUBR_MAX)
8418 err += efunc(pc, "invalid subr %u\n", subr);
8419 if (rd >= nregs)
8420 err += efunc(pc, "invalid register %u\n", rd);
8421 if (rd == 0)
8422 err += efunc(pc, "cannot write to %r0\n");
8423
8424 if (subr == DIF_SUBR_COPYOUT ||
8425 subr == DIF_SUBR_COPYOUTSTR) {
8426 dp->dtdo_destructive = 1;
8427 }
8428 break;
8429 case DIF_OP_PUSHTR:
8430 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
8431 err += efunc(pc, "invalid ref type %u\n", type);
8432 if (r2 >= nregs)
8433 err += efunc(pc, "invalid register %u\n", r2);
8434 if (rs >= nregs)
8435 err += efunc(pc, "invalid register %u\n", rs);
8436 break;
8437 case DIF_OP_PUSHTV:
8438 if (type != DIF_TYPE_CTF)
8439 err += efunc(pc, "invalid val type %u\n", type);
8440 if (r2 >= nregs)
8441 err += efunc(pc, "invalid register %u\n", r2);
8442 if (rs >= nregs)
8443 err += efunc(pc, "invalid register %u\n", rs);
8444 break;
8445 default:
8446 err += efunc(pc, "invalid opcode %u\n",
8447 DIF_INSTR_OP(instr));
8448 }
8449 }
8450
8451 if (dp->dtdo_len != 0 &&
8452 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
8453 err += efunc(dp->dtdo_len - 1,
8454 "expected 'ret' as last DIF instruction\n");
8455 }
8456
8457 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) {
8458 /*
8459 * If we're not returning by reference, the size must be either
8460 * 0 or the size of one of the base types.
8461 */
8462 switch (dp->dtdo_rtype.dtdt_size) {
8463 case 0:
8464 case sizeof (uint8_t):
8465 case sizeof (uint16_t):
8466 case sizeof (uint32_t):
8467 case sizeof (uint64_t):
8468 break;
8469
8470 default:
8471 err += efunc(dp->dtdo_len - 1, "bad return size\n");
8472 }
8473 }
8474
8475 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
8476 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
8477 dtrace_diftype_t *vt, *et;
8478 uint_t id, ndx;
8479
8480 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
8481 v->dtdv_scope != DIFV_SCOPE_THREAD &&
8482 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
8483 err += efunc(i, "unrecognized variable scope %d\n",
8484 v->dtdv_scope);
8485 break;
8486 }
8487
8488 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
8489 v->dtdv_kind != DIFV_KIND_SCALAR) {
8490 err += efunc(i, "unrecognized variable type %d\n",
8491 v->dtdv_kind);
8492 break;
8493 }
8494
8495 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
8496 err += efunc(i, "%d exceeds variable id limit\n", id);
8497 break;
8498 }
8499
8500 if (id < DIF_VAR_OTHER_UBASE)
8501 continue;
8502
8503 /*
8504 * For user-defined variables, we need to check that this
8505 * definition is identical to any previous definition that we
8506 * encountered.
8507 */
8508 ndx = id - DIF_VAR_OTHER_UBASE;
8509
8510 switch (v->dtdv_scope) {
8511 case DIFV_SCOPE_GLOBAL:
8512 if (VBDTCAST(int64_t)ndx < vstate->dtvs_nglobals) {
8513 dtrace_statvar_t *svar;
8514
8515 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
8516 existing = &svar->dtsv_var;
8517 }
8518
8519 break;
8520
8521 case DIFV_SCOPE_THREAD:
8522 if (VBDTCAST(int64_t)ndx < vstate->dtvs_ntlocals)
8523 existing = &vstate->dtvs_tlocals[ndx];
8524 break;
8525
8526 case DIFV_SCOPE_LOCAL:
8527 if (VBDTCAST(int64_t)ndx < vstate->dtvs_nlocals) {
8528 dtrace_statvar_t *svar;
8529
8530 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
8531 existing = &svar->dtsv_var;
8532 }
8533
8534 break;
8535 }
8536
8537 vt = &v->dtdv_type;
8538
8539 if (vt->dtdt_flags & DIF_TF_BYREF) {
8540 if (vt->dtdt_size == 0) {
8541 err += efunc(i, "zero-sized variable\n");
8542 break;
8543 }
8544
8545 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
8546 vt->dtdt_size > dtrace_global_maxsize) {
8547 err += efunc(i, "oversized by-ref global\n");
8548 break;
8549 }
8550 }
8551
8552 if (existing == NULL || existing->dtdv_id == 0)
8553 continue;
8554
8555 ASSERT(existing->dtdv_id == v->dtdv_id);
8556 ASSERT(existing->dtdv_scope == v->dtdv_scope);
8557
8558 if (existing->dtdv_kind != v->dtdv_kind)
8559 err += efunc(i, "%d changed variable kind\n", id);
8560
8561 et = &existing->dtdv_type;
8562
8563 if (vt->dtdt_flags != et->dtdt_flags) {
8564 err += efunc(i, "%d changed variable type flags\n", id);
8565 break;
8566 }
8567
8568 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
8569 err += efunc(i, "%d changed variable type size\n", id);
8570 break;
8571 }
8572 }
8573
8574 return (err);
8575}
8576
8577/*
8578 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
8579 * are much more constrained than normal DIFOs. Specifically, they may
8580 * not:
8581 *
8582 * 1. Make calls to subroutines other than copyin(), copyinstr() or
8583 * miscellaneous string routines
8584 * 2. Access DTrace variables other than the args[] array, and the
8585 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
8586 * 3. Have thread-local variables.
8587 * 4. Have dynamic variables.
8588 */
8589static int
8590dtrace_difo_validate_helper(dtrace_difo_t *dp)
8591{
8592 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8593 int err = 0;
8594 uint_t pc;
8595
8596 for (pc = 0; pc < dp->dtdo_len; pc++) {
8597 dif_instr_t instr = dp->dtdo_buf[pc];
8598
8599 uint_t v = DIF_INSTR_VAR(instr);
8600 uint_t subr = DIF_INSTR_SUBR(instr);
8601 uint_t op = DIF_INSTR_OP(instr);
8602
8603 switch (op) {
8604 case DIF_OP_OR:
8605 case DIF_OP_XOR:
8606 case DIF_OP_AND:
8607 case DIF_OP_SLL:
8608 case DIF_OP_SRL:
8609 case DIF_OP_SRA:
8610 case DIF_OP_SUB:
8611 case DIF_OP_ADD:
8612 case DIF_OP_MUL:
8613 case DIF_OP_SDIV:
8614 case DIF_OP_UDIV:
8615 case DIF_OP_SREM:
8616 case DIF_OP_UREM:
8617 case DIF_OP_COPYS:
8618 case DIF_OP_NOT:
8619 case DIF_OP_MOV:
8620 case DIF_OP_RLDSB:
8621 case DIF_OP_RLDSH:
8622 case DIF_OP_RLDSW:
8623 case DIF_OP_RLDUB:
8624 case DIF_OP_RLDUH:
8625 case DIF_OP_RLDUW:
8626 case DIF_OP_RLDX:
8627 case DIF_OP_ULDSB:
8628 case DIF_OP_ULDSH:
8629 case DIF_OP_ULDSW:
8630 case DIF_OP_ULDUB:
8631 case DIF_OP_ULDUH:
8632 case DIF_OP_ULDUW:
8633 case DIF_OP_ULDX:
8634 case DIF_OP_STB:
8635 case DIF_OP_STH:
8636 case DIF_OP_STW:
8637 case DIF_OP_STX:
8638 case DIF_OP_ALLOCS:
8639 case DIF_OP_CMP:
8640 case DIF_OP_SCMP:
8641 case DIF_OP_TST:
8642 case DIF_OP_BA:
8643 case DIF_OP_BE:
8644 case DIF_OP_BNE:
8645 case DIF_OP_BG:
8646 case DIF_OP_BGU:
8647 case DIF_OP_BGE:
8648 case DIF_OP_BGEU:
8649 case DIF_OP_BL:
8650 case DIF_OP_BLU:
8651 case DIF_OP_BLE:
8652 case DIF_OP_BLEU:
8653 case DIF_OP_RET:
8654 case DIF_OP_NOP:
8655 case DIF_OP_POPTS:
8656 case DIF_OP_FLUSHTS:
8657 case DIF_OP_SETX:
8658 case DIF_OP_SETS:
8659 case DIF_OP_LDGA:
8660 case DIF_OP_LDLS:
8661 case DIF_OP_STGS:
8662 case DIF_OP_STLS:
8663 case DIF_OP_PUSHTR:
8664 case DIF_OP_PUSHTV:
8665 break;
8666
8667 case DIF_OP_LDGS:
8668 if (v >= DIF_VAR_OTHER_UBASE)
8669 break;
8670
8671 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
8672 break;
8673
8674 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
8675 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
8676 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
8677 v == DIF_VAR_UID || v == DIF_VAR_GID)
8678 break;
8679
8680 err += efunc(pc, "illegal variable %u\n", v);
8681 break;
8682
8683 case DIF_OP_LDTA:
8684 case DIF_OP_LDTS:
8685 case DIF_OP_LDGAA:
8686 case DIF_OP_LDTAA:
8687 err += efunc(pc, "illegal dynamic variable load\n");
8688 break;
8689
8690 case DIF_OP_STTS:
8691 case DIF_OP_STGAA:
8692 case DIF_OP_STTAA:
8693 err += efunc(pc, "illegal dynamic variable store\n");
8694 break;
8695
8696 case DIF_OP_CALL:
8697 if (subr == DIF_SUBR_ALLOCA ||
8698 subr == DIF_SUBR_BCOPY ||
8699 subr == DIF_SUBR_COPYIN ||
8700 subr == DIF_SUBR_COPYINTO ||
8701 subr == DIF_SUBR_COPYINSTR ||
8702 subr == DIF_SUBR_INDEX ||
8703 subr == DIF_SUBR_INET_NTOA ||
8704 subr == DIF_SUBR_INET_NTOA6 ||
8705 subr == DIF_SUBR_INET_NTOP ||
8706 subr == DIF_SUBR_LLTOSTR ||
8707 subr == DIF_SUBR_RINDEX ||
8708 subr == DIF_SUBR_STRCHR ||
8709 subr == DIF_SUBR_STRJOIN ||
8710 subr == DIF_SUBR_STRRCHR ||
8711 subr == DIF_SUBR_STRSTR ||
8712 subr == DIF_SUBR_HTONS ||
8713 subr == DIF_SUBR_HTONL ||
8714 subr == DIF_SUBR_HTONLL ||
8715 subr == DIF_SUBR_NTOHS ||
8716 subr == DIF_SUBR_NTOHL ||
8717 subr == DIF_SUBR_NTOHLL)
8718 break;
8719
8720 err += efunc(pc, "invalid subr %u\n", subr);
8721 break;
8722
8723 default:
8724 err += efunc(pc, "invalid opcode %u\n",
8725 DIF_INSTR_OP(instr));
8726 }
8727 }
8728
8729 return (err);
8730}
8731
8732/*
8733 * Returns 1 if the expression in the DIF object can be cached on a per-thread
8734 * basis; 0 if not.
8735 */
8736static int
8737dtrace_difo_cacheable(dtrace_difo_t *dp)
8738{
8739 VBDTTYPE(uint_t,int) i;
8740
8741 if (dp == NULL)
8742 return (0);
8743
8744 for (i = 0; i < dp->dtdo_varlen; i++) {
8745 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8746
8747 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
8748 continue;
8749
8750 switch (v->dtdv_id) {
8751 case DIF_VAR_CURTHREAD:
8752 case DIF_VAR_PID:
8753 case DIF_VAR_TID:
8754 case DIF_VAR_EXECNAME:
8755 case DIF_VAR_ZONENAME:
8756 break;
8757
8758 default:
8759 return (0);
8760 }
8761 }
8762
8763 /*
8764 * This DIF object may be cacheable. Now we need to look for any
8765 * array loading instructions, any memory loading instructions, or
8766 * any stores to thread-local variables.
8767 */
8768 for (i = 0; i < dp->dtdo_len; i++) {
8769 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
8770
8771 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
8772 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
8773 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
8774 op == DIF_OP_LDGA || op == DIF_OP_STTS)
8775 return (0);
8776 }
8777
8778 return (1);
8779}
8780
8781static void
8782dtrace_difo_hold(dtrace_difo_t *dp)
8783{
8784#ifndef VBOX
8785 VBDTTYPE(uint_t,int) i;
8786#endif
8787
8788 ASSERT(MUTEX_HELD(&dtrace_lock));
8789
8790 dp->dtdo_refcnt++;
8791 ASSERT(dp->dtdo_refcnt != 0);
8792
8793#ifndef VBOX
8794 /*
8795 * We need to check this DIF object for references to the variable
8796 * DIF_VAR_VTIMESTAMP.
8797 */
8798 for (i = 0; i < dp->dtdo_varlen; i++) {
8799 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8800
8801 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
8802 continue;
8803
8804 if (dtrace_vtime_references++ == 0)
8805 dtrace_vtime_enable();
8806 }
8807#endif
8808}
8809
8810/*
8811 * This routine calculates the dynamic variable chunksize for a given DIF
8812 * object. The calculation is not fool-proof, and can probably be tricked by
8813 * malicious DIF -- but it works for all compiler-generated DIF. Because this
8814 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
8815 * if a dynamic variable size exceeds the chunksize.
8816 */
8817static void
8818dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8819{
8820 uint64_t sval VBDTGCC(0);
8821 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
8822 const dif_instr_t *text = dp->dtdo_buf;
8823 uint_t pc, srd = 0;
8824 uint_t ttop = 0;
8825 size_t size, ksize;
8826 uint_t id, i;
8827
8828 for (pc = 0; pc < dp->dtdo_len; pc++) {
8829 dif_instr_t instr = text[pc];
8830 uint_t op = DIF_INSTR_OP(instr);
8831 uint_t rd = DIF_INSTR_RD(instr);
8832 uint_t r1 = DIF_INSTR_R1(instr);
8833 uint_t nkeys = 0;
8834 uchar_t scope VBDTGCC(0);
8835
8836 dtrace_key_t *key = tupregs;
8837
8838 switch (op) {
8839 case DIF_OP_SETX:
8840 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
8841 srd = rd;
8842 continue;
8843
8844 case DIF_OP_STTS:
8845 key = &tupregs[DIF_DTR_NREGS];
8846 key[0].dttk_size = 0;
8847 key[1].dttk_size = 0;
8848 nkeys = 2;
8849 scope = DIFV_SCOPE_THREAD;
8850 break;
8851
8852 case DIF_OP_STGAA:
8853 case DIF_OP_STTAA:
8854 nkeys = ttop;
8855
8856 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
8857 key[nkeys++].dttk_size = 0;
8858
8859 key[nkeys++].dttk_size = 0;
8860
8861 if (op == DIF_OP_STTAA) {
8862 scope = DIFV_SCOPE_THREAD;
8863 } else {
8864 scope = DIFV_SCOPE_GLOBAL;
8865 }
8866
8867 break;
8868
8869 case DIF_OP_PUSHTR:
8870 if (ttop == DIF_DTR_NREGS)
8871 return;
8872
8873 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
8874 /*
8875 * If the register for the size of the "pushtr"
8876 * is %r0 (or the value is 0) and the type is
8877 * a string, we'll use the system-wide default
8878 * string size.
8879 */
8880 tupregs[ttop++].dttk_size =
8881 dtrace_strsize_default;
8882 } else {
8883 if (srd == 0)
8884 return;
8885
8886 tupregs[ttop++].dttk_size = sval;
8887 }
8888
8889 break;
8890
8891 case DIF_OP_PUSHTV:
8892 if (ttop == DIF_DTR_NREGS)
8893 return;
8894
8895 tupregs[ttop++].dttk_size = 0;
8896 break;
8897
8898 case DIF_OP_FLUSHTS:
8899 ttop = 0;
8900 break;
8901
8902 case DIF_OP_POPTS:
8903 if (ttop != 0)
8904 ttop--;
8905 break;
8906 }
8907
8908 sval = 0;
8909 srd = 0;
8910
8911 if (nkeys == 0)
8912 continue;
8913
8914 /*
8915 * We have a dynamic variable allocation; calculate its size.
8916 */
8917 for (ksize = 0, i = 0; i < nkeys; i++)
8918 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
8919
8920 size = sizeof (dtrace_dynvar_t);
8921 size += sizeof (dtrace_key_t) * (nkeys - 1);
8922 size += ksize;
8923
8924 /*
8925 * Now we need to determine the size of the stored data.
8926 */
8927 id = DIF_INSTR_VAR(instr);
8928
8929 for (i = 0; i < dp->dtdo_varlen; i++) {
8930 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8931
8932 if (v->dtdv_id == id && v->dtdv_scope == scope) {
8933 size += v->dtdv_type.dtdt_size;
8934 break;
8935 }
8936 }
8937
8938 if (i == dp->dtdo_varlen)
8939 return;
8940
8941 /*
8942 * We have the size. If this is larger than the chunk size
8943 * for our dynamic variable state, reset the chunk size.
8944 */
8945 size = P2ROUNDUP(size, sizeof (uint64_t));
8946
8947 if (size > vstate->dtvs_dynvars.dtds_chunksize)
8948 vstate->dtvs_dynvars.dtds_chunksize = size;
8949 }
8950}
8951
8952static void
8953dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8954{
8955#ifndef VBOX
8956 int i, oldsvars, osz, nsz, otlocals, ntlocals;
8957#else
8958 int oldsvars, osz, nsz, otlocals, ntlocals;
8959 uint_t i;
8960#endif
8961 uint_t id;
8962
8963 ASSERT(MUTEX_HELD(&dtrace_lock));
8964 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
8965
8966 for (i = 0; i < dp->dtdo_varlen; i++) {
8967 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8968 dtrace_statvar_t *svar, ***svarp;
8969 size_t dsize = 0;
8970 uint8_t scope = v->dtdv_scope;
8971 int *np;
8972
8973 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
8974 continue;
8975
8976 id -= DIF_VAR_OTHER_UBASE;
8977
8978 switch (scope) {
8979 case DIFV_SCOPE_THREAD:
8980 while (VBDTCAST(int64_t)id >= (otlocals = vstate->dtvs_ntlocals)) {
8981 dtrace_difv_t *tlocals;
8982
8983 if ((ntlocals = (otlocals << 1)) == 0)
8984 ntlocals = 1;
8985
8986 osz = otlocals * sizeof (dtrace_difv_t);
8987 nsz = ntlocals * sizeof (dtrace_difv_t);
8988
8989 tlocals = kmem_zalloc(nsz, KM_SLEEP);
8990
8991 if (osz != 0) {
8992 bcopy(vstate->dtvs_tlocals,
8993 tlocals, osz);
8994 kmem_free(vstate->dtvs_tlocals, osz);
8995 }
8996
8997 vstate->dtvs_tlocals = tlocals;
8998 vstate->dtvs_ntlocals = ntlocals;
8999 }
9000
9001 vstate->dtvs_tlocals[id] = *v;
9002 continue;
9003
9004 case DIFV_SCOPE_LOCAL:
9005 np = &vstate->dtvs_nlocals;
9006 svarp = &vstate->dtvs_locals;
9007
9008 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9009 dsize = NCPU * (v->dtdv_type.dtdt_size +
9010 sizeof (uint64_t));
9011 else
9012 dsize = NCPU * sizeof (uint64_t);
9013
9014 break;
9015
9016 case DIFV_SCOPE_GLOBAL:
9017 np = &vstate->dtvs_nglobals;
9018 svarp = &vstate->dtvs_globals;
9019
9020 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
9021 dsize = v->dtdv_type.dtdt_size +
9022 sizeof (uint64_t);
9023
9024 break;
9025
9026 default:
9027#ifndef VBOX
9028 ASSERT(0);
9029#else
9030 AssertFatalMsgFailed(("%d\n", scope));
9031#endif
9032 }
9033
9034 while (VBDTCAST(int64_t)id >= (oldsvars = *np)) {
9035 dtrace_statvar_t **statics;
9036 int newsvars, oldsize, newsize;
9037
9038 if ((newsvars = (oldsvars << 1)) == 0)
9039 newsvars = 1;
9040
9041 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
9042 newsize = newsvars * sizeof (dtrace_statvar_t *);
9043
9044 statics = kmem_zalloc(newsize, KM_SLEEP);
9045
9046 if (oldsize != 0) {
9047 bcopy(*svarp, statics, oldsize);
9048 kmem_free(*svarp, oldsize);
9049 }
9050
9051 *svarp = statics;
9052 *np = newsvars;
9053 }
9054
9055 if ((svar = (*svarp)[id]) == NULL) {
9056 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
9057 svar->dtsv_var = *v;
9058
9059 if ((svar->dtsv_size = dsize) != 0) {
9060 svar->dtsv_data = (uint64_t)(uintptr_t)
9061 kmem_zalloc(dsize, KM_SLEEP);
9062 }
9063
9064 (*svarp)[id] = svar;
9065 }
9066
9067 svar->dtsv_refcnt++;
9068 }
9069
9070 dtrace_difo_chunksize(dp, vstate);
9071 dtrace_difo_hold(dp);
9072}
9073
9074static dtrace_difo_t *
9075dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9076{
9077 dtrace_difo_t *new;
9078 size_t sz;
9079
9080 ASSERT(dp->dtdo_buf != NULL);
9081 ASSERT(dp->dtdo_refcnt != 0);
9082
9083 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
9084
9085 ASSERT(dp->dtdo_buf != NULL);
9086 sz = dp->dtdo_len * sizeof (dif_instr_t);
9087 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
9088 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
9089 new->dtdo_len = dp->dtdo_len;
9090
9091 if (dp->dtdo_strtab != NULL) {
9092 ASSERT(dp->dtdo_strlen != 0);
9093 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
9094 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
9095 new->dtdo_strlen = dp->dtdo_strlen;
9096 }
9097
9098 if (dp->dtdo_inttab != NULL) {
9099 ASSERT(dp->dtdo_intlen != 0);
9100 sz = dp->dtdo_intlen * sizeof (uint64_t);
9101 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
9102 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
9103 new->dtdo_intlen = dp->dtdo_intlen;
9104 }
9105
9106 if (dp->dtdo_vartab != NULL) {
9107 ASSERT(dp->dtdo_varlen != 0);
9108 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
9109 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
9110 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
9111 new->dtdo_varlen = dp->dtdo_varlen;
9112 }
9113
9114 dtrace_difo_init(new, vstate);
9115 return (new);
9116}
9117
9118static void
9119dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9120{
9121 VBDTTYPE(uint_t,int) i;
9122
9123 ASSERT(dp->dtdo_refcnt == 0);
9124
9125 for (i = 0; i < dp->dtdo_varlen; i++) {
9126 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9127 dtrace_statvar_t *svar, **svarp;
9128 uint_t id;
9129 uint8_t scope = v->dtdv_scope;
9130 int *np;
9131
9132 switch (scope) {
9133 case DIFV_SCOPE_THREAD:
9134 continue;
9135
9136 case DIFV_SCOPE_LOCAL:
9137 np = &vstate->dtvs_nlocals;
9138 svarp = vstate->dtvs_locals;
9139 break;
9140
9141 case DIFV_SCOPE_GLOBAL:
9142 np = &vstate->dtvs_nglobals;
9143 svarp = vstate->dtvs_globals;
9144 break;
9145
9146 default:
9147#ifndef VBOX
9148 ASSERT(0);
9149#else
9150 AssertFatalMsgFailed(("%d\n", scope));
9151#endif
9152 }
9153
9154 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
9155 continue;
9156
9157 id -= DIF_VAR_OTHER_UBASE;
9158 ASSERT(VBDTCAST(int64_t)id < *np);
9159
9160 svar = svarp[id];
9161 ASSERT(svar != NULL);
9162 ASSERT(svar->dtsv_refcnt > 0);
9163
9164 if (--svar->dtsv_refcnt > 0)
9165 continue;
9166
9167 if (svar->dtsv_size != 0) {
9168 ASSERT(svar->dtsv_data != NULL);
9169 kmem_free((void *)(uintptr_t)svar->dtsv_data,
9170 svar->dtsv_size);
9171 }
9172
9173 kmem_free(svar, sizeof (dtrace_statvar_t));
9174 svarp[id] = NULL;
9175 }
9176
9177 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
9178 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
9179 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
9180 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
9181
9182 kmem_free(dp, sizeof (dtrace_difo_t));
9183}
9184
9185static void
9186dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
9187{
9188#ifndef VBOX
9189 VBDTTYPE(uint_t,int) i;
9190#endif
9191
9192 ASSERT(MUTEX_HELD(&dtrace_lock));
9193 ASSERT(dp->dtdo_refcnt != 0);
9194
9195#ifndef VBOX
9196 for (i = 0; i < dp->dtdo_varlen; i++) {
9197 dtrace_difv_t *v = &dp->dtdo_vartab[i];
9198
9199 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
9200 continue;
9201
9202 ASSERT(dtrace_vtime_references > 0);
9203 if (--dtrace_vtime_references == 0)
9204 dtrace_vtime_disable();
9205 }
9206#endif
9207
9208 if (--dp->dtdo_refcnt == 0)
9209 dtrace_difo_destroy(dp, vstate);
9210}
9211
9212/*
9213 * DTrace Format Functions
9214 */
9215static uint16_t
9216dtrace_format_add(dtrace_state_t *state, char *str)
9217{
9218 char *fmt, **new;
9219 uint16_t ndx, len = VBDTCAST(uint16_t)strlen(str) + 1;
9220
9221 fmt = kmem_zalloc(len, KM_SLEEP);
9222 bcopy(str, fmt, len);
9223
9224 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
9225 if (state->dts_formats[ndx] == NULL) {
9226 state->dts_formats[ndx] = fmt;
9227 return (ndx + 1);
9228 }
9229 }
9230
9231 if (state->dts_nformats == USHRT_MAX) {
9232 /*
9233 * This is only likely if a denial-of-service attack is being
9234 * attempted. As such, it's okay to fail silently here.
9235 */
9236 kmem_free(fmt, len);
9237 return (0);
9238 }
9239
9240 /*
9241 * For simplicity, we always resize the formats array to be exactly the
9242 * number of formats.
9243 */
9244 ndx = state->dts_nformats++;
9245 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
9246
9247 if (state->dts_formats != NULL) {
9248 ASSERT(ndx != 0);
9249 bcopy(state->dts_formats, new, ndx * sizeof (char *));
9250 kmem_free(state->dts_formats, ndx * sizeof (char *));
9251 }
9252
9253 state->dts_formats = new;
9254 state->dts_formats[ndx] = fmt;
9255
9256 return (ndx + 1);
9257}
9258
9259static void
9260dtrace_format_remove(dtrace_state_t *state, uint16_t format)
9261{
9262 char *fmt;
9263
9264 ASSERT(state->dts_formats != NULL);
9265 ASSERT(format <= state->dts_nformats);
9266 ASSERT(state->dts_formats[format - 1] != NULL);
9267
9268 fmt = state->dts_formats[format - 1];
9269 kmem_free(fmt, strlen(fmt) + 1);
9270 state->dts_formats[format - 1] = NULL;
9271}
9272
9273static void
9274dtrace_format_destroy(dtrace_state_t *state)
9275{
9276 int i;
9277
9278 if (state->dts_nformats == 0) {
9279 ASSERT(state->dts_formats == NULL);
9280 return;
9281 }
9282
9283 ASSERT(state->dts_formats != NULL);
9284
9285 for (i = 0; i < state->dts_nformats; i++) {
9286 char *fmt = state->dts_formats[i];
9287
9288 if (fmt == NULL)
9289 continue;
9290
9291 kmem_free(fmt, strlen(fmt) + 1);
9292 }
9293
9294 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
9295 state->dts_nformats = 0;
9296 state->dts_formats = NULL;
9297}
9298
9299/*
9300 * DTrace Predicate Functions
9301 */
9302static dtrace_predicate_t *
9303dtrace_predicate_create(dtrace_difo_t *dp)
9304{
9305 dtrace_predicate_t *pred;
9306
9307 ASSERT(MUTEX_HELD(&dtrace_lock));
9308 ASSERT(dp->dtdo_refcnt != 0);
9309
9310 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
9311 pred->dtp_difo = dp;
9312 pred->dtp_refcnt = 1;
9313
9314 if (!dtrace_difo_cacheable(dp))
9315 return (pred);
9316
9317 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
9318 /*
9319 * This is only theoretically possible -- we have had 2^32
9320 * cacheable predicates on this machine. We cannot allow any
9321 * more predicates to become cacheable: as unlikely as it is,
9322 * there may be a thread caching a (now stale) predicate cache
9323 * ID. (N.B.: the temptation is being successfully resisted to
9324 * have this cmn_err() "Holy shit -- we executed this code!")
9325 */
9326 return (pred);
9327 }
9328
9329 pred->dtp_cacheid = dtrace_predcache_id++;
9330
9331 return (pred);
9332}
9333
9334static void
9335dtrace_predicate_hold(dtrace_predicate_t *pred)
9336{
9337 ASSERT(MUTEX_HELD(&dtrace_lock));
9338 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
9339 ASSERT(pred->dtp_refcnt > 0);
9340
9341 pred->dtp_refcnt++;
9342}
9343
9344static void
9345dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
9346{
9347 dtrace_difo_t *dp = pred->dtp_difo;
9348
9349 ASSERT(MUTEX_HELD(&dtrace_lock));
9350 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
9351 ASSERT(pred->dtp_refcnt > 0);
9352
9353 if (--pred->dtp_refcnt == 0) {
9354 dtrace_difo_release(pred->dtp_difo, vstate);
9355 kmem_free(pred, sizeof (dtrace_predicate_t));
9356 }
9357}
9358
9359/*
9360 * DTrace Action Description Functions
9361 */
9362static dtrace_actdesc_t *
9363dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
9364 uint64_t uarg, uint64_t arg)
9365{
9366 dtrace_actdesc_t *act;
9367
9368 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
9369 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
9370
9371 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
9372 act->dtad_kind = kind;
9373 act->dtad_ntuple = ntuple;
9374 act->dtad_uarg = uarg;
9375 act->dtad_arg = arg;
9376 act->dtad_refcnt = 1;
9377
9378 return (act);
9379}
9380
9381static void
9382dtrace_actdesc_hold(dtrace_actdesc_t *act)
9383{
9384 ASSERT(act->dtad_refcnt >= 1);
9385 act->dtad_refcnt++;
9386}
9387
9388static void
9389dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
9390{
9391 dtrace_actkind_t kind = act->dtad_kind;
9392 dtrace_difo_t *dp;
9393
9394 ASSERT(act->dtad_refcnt >= 1);
9395
9396 if (--act->dtad_refcnt != 0)
9397 return;
9398
9399 if ((dp = act->dtad_difo) != NULL)
9400 dtrace_difo_release(dp, vstate);
9401
9402 if (DTRACEACT_ISPRINTFLIKE(kind)) {
9403 char *str = (char *)(uintptr_t)act->dtad_arg;
9404
9405 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
9406 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
9407
9408 if (str != NULL)
9409 kmem_free(str, strlen(str) + 1);
9410 }
9411
9412 kmem_free(act, sizeof (dtrace_actdesc_t));
9413}
9414
9415/*
9416 * DTrace ECB Functions
9417 */
9418static dtrace_ecb_t *
9419dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
9420{
9421 dtrace_ecb_t *ecb;
9422 dtrace_epid_t epid;
9423
9424 ASSERT(MUTEX_HELD(&dtrace_lock));
9425
9426 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
9427 ecb->dte_predicate = NULL;
9428 ecb->dte_probe = probe;
9429
9430 /*
9431 * The default size is the size of the default action: recording
9432 * the epid.
9433 */
9434 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9435 ecb->dte_alignment = sizeof (dtrace_epid_t);
9436
9437 epid = state->dts_epid++;
9438
9439 if (VBDTCAST(int64_t)epid - 1 >= state->dts_necbs) {
9440 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
9441 int necbs = state->dts_necbs << 1;
9442
9443 ASSERT(epid == VBDTCAST(dtrace_epid_t)state->dts_necbs + 1);
9444
9445 if (necbs == 0) {
9446 ASSERT(oecbs == NULL);
9447 necbs = 1;
9448 }
9449
9450 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
9451
9452 if (oecbs != NULL)
9453 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
9454
9455 dtrace_membar_producer();
9456 state->dts_ecbs = ecbs;
9457
9458 if (oecbs != NULL) {
9459 /*
9460 * If this state is active, we must dtrace_sync()
9461 * before we can free the old dts_ecbs array: we're
9462 * coming in hot, and there may be active ring
9463 * buffer processing (which indexes into the dts_ecbs
9464 * array) on another CPU.
9465 */
9466 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
9467 dtrace_sync();
9468
9469 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
9470 }
9471
9472 dtrace_membar_producer();
9473 state->dts_necbs = necbs;
9474 }
9475
9476 ecb->dte_state = state;
9477
9478 ASSERT(state->dts_ecbs[epid - 1] == NULL);
9479 dtrace_membar_producer();
9480 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
9481
9482 return (ecb);
9483}
9484
9485static int
9486dtrace_ecb_enable(dtrace_ecb_t *ecb)
9487{
9488 dtrace_probe_t *probe = ecb->dte_probe;
9489
9490 ASSERT(MUTEX_HELD(&cpu_lock));
9491 ASSERT(MUTEX_HELD(&dtrace_lock));
9492 ASSERT(ecb->dte_next == NULL);
9493
9494 if (probe == NULL) {
9495 /*
9496 * This is the NULL probe -- there's nothing to do.
9497 */
9498 return (0);
9499 }
9500
9501 if (probe->dtpr_ecb == NULL) {
9502 dtrace_provider_t *prov = probe->dtpr_provider;
9503
9504 /*
9505 * We're the first ECB on this probe.
9506 */
9507 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
9508
9509 if (ecb->dte_predicate != NULL)
9510 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
9511
9512 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
9513 probe->dtpr_id, probe->dtpr_arg));
9514 } else {
9515 /*
9516 * This probe is already active. Swing the last pointer to
9517 * point to the new ECB, and issue a dtrace_sync() to assure
9518 * that all CPUs have seen the change.
9519 */
9520 ASSERT(probe->dtpr_ecb_last != NULL);
9521 probe->dtpr_ecb_last->dte_next = ecb;
9522 probe->dtpr_ecb_last = ecb;
9523 probe->dtpr_predcache = 0;
9524
9525 dtrace_sync();
9526 return (0);
9527 }
9528}
9529
9530static void
9531dtrace_ecb_resize(dtrace_ecb_t *ecb)
9532{
9533 uint32_t maxalign = sizeof (dtrace_epid_t);
9534 uint32_t align = sizeof (uint8_t), offs, diff;
9535 dtrace_action_t *act;
9536 int wastuple = 0;
9537 uint32_t aggbase = UINT32_MAX;
9538 dtrace_state_t *state = ecb->dte_state;
9539
9540 /*
9541 * If we record anything, we always record the epid. (And we always
9542 * record it first.)
9543 */
9544 offs = sizeof (dtrace_epid_t);
9545 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9546
9547 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9548 dtrace_recdesc_t *rec = &act->dta_rec;
9549
9550 if ((align = rec->dtrd_alignment) > maxalign)
9551 maxalign = align;
9552
9553 if (!wastuple && act->dta_intuple) {
9554 /*
9555 * This is the first record in a tuple. Align the
9556 * offset to be at offset 4 in an 8-byte aligned
9557 * block.
9558 */
9559 diff = offs + sizeof (dtrace_aggid_t);
9560
9561 if ((diff = (diff & (sizeof (uint64_t) - 1))))
9562 offs += sizeof (uint64_t) - diff;
9563
9564 aggbase = offs - sizeof (dtrace_aggid_t);
9565 ASSERT(!(aggbase & (sizeof (uint64_t) - 1)));
9566 }
9567
9568 /*LINTED*/
9569 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) {
9570 /*
9571 * The current offset is not properly aligned; align it.
9572 */
9573 offs += align - diff;
9574 }
9575
9576 rec->dtrd_offset = offs;
9577
9578 if (offs + rec->dtrd_size > ecb->dte_needed) {
9579 ecb->dte_needed = offs + rec->dtrd_size;
9580
9581 if (ecb->dte_needed > state->dts_needed)
9582 state->dts_needed = ecb->dte_needed;
9583 }
9584
9585 if (DTRACEACT_ISAGG(act->dta_kind)) {
9586 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9587 dtrace_action_t *first = agg->dtag_first, *prev;
9588
9589 ASSERT(rec->dtrd_size != 0 && first != NULL);
9590 ASSERT(wastuple);
9591 ASSERT(aggbase != UINT32_MAX);
9592
9593 agg->dtag_base = aggbase;
9594
9595 while ((prev = first->dta_prev) != NULL &&
9596 DTRACEACT_ISAGG(prev->dta_kind)) {
9597 agg = (dtrace_aggregation_t *)prev;
9598 first = agg->dtag_first;
9599 }
9600
9601 if (prev != NULL) {
9602 offs = prev->dta_rec.dtrd_offset +
9603 prev->dta_rec.dtrd_size;
9604 } else {
9605 offs = sizeof (dtrace_epid_t);
9606 }
9607 wastuple = 0;
9608 } else {
9609 if (!act->dta_intuple)
9610 ecb->dte_size = offs + rec->dtrd_size;
9611
9612 offs += rec->dtrd_size;
9613 }
9614
9615 wastuple = act->dta_intuple;
9616 }
9617
9618 if ((act = ecb->dte_action) != NULL &&
9619 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
9620 ecb->dte_size == sizeof (dtrace_epid_t)) {
9621 /*
9622 * If the size is still sizeof (dtrace_epid_t), then all
9623 * actions store no data; set the size to 0.
9624 */
9625 ecb->dte_alignment = maxalign;
9626 ecb->dte_size = 0;
9627
9628 /*
9629 * If the needed space is still sizeof (dtrace_epid_t), then
9630 * all actions need no additional space; set the needed
9631 * size to 0.
9632 */
9633 if (ecb->dte_needed == sizeof (dtrace_epid_t))
9634 ecb->dte_needed = 0;
9635
9636 return;
9637 }
9638
9639 /*
9640 * Set our alignment, and make sure that the dte_size and dte_needed
9641 * are aligned to the size of an EPID.
9642 */
9643 ecb->dte_alignment = maxalign;
9644 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) &
9645 ~(sizeof (dtrace_epid_t) - 1);
9646 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) &
9647 ~(sizeof (dtrace_epid_t) - 1);
9648 ASSERT(ecb->dte_size <= ecb->dte_needed);
9649}
9650
9651static dtrace_action_t *
9652dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9653{
9654 dtrace_aggregation_t *agg;
9655 size_t size = sizeof (uint64_t);
9656 int ntuple = desc->dtad_ntuple;
9657 dtrace_action_t *act;
9658 dtrace_recdesc_t *frec;
9659 dtrace_aggid_t aggid;
9660 dtrace_state_t *state = ecb->dte_state;
9661
9662 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
9663 agg->dtag_ecb = ecb;
9664
9665 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
9666
9667 switch (desc->dtad_kind) {
9668 case DTRACEAGG_MIN:
9669 agg->dtag_initial = INT64_MAX;
9670 agg->dtag_aggregate = dtrace_aggregate_min;
9671 break;
9672
9673 case DTRACEAGG_MAX:
9674 agg->dtag_initial = INT64_MIN;
9675 agg->dtag_aggregate = dtrace_aggregate_max;
9676 break;
9677
9678 case DTRACEAGG_COUNT:
9679 agg->dtag_aggregate = dtrace_aggregate_count;
9680 break;
9681
9682 case DTRACEAGG_QUANTIZE:
9683 agg->dtag_aggregate = dtrace_aggregate_quantize;
9684 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
9685 sizeof (uint64_t);
9686 break;
9687
9688 case DTRACEAGG_LQUANTIZE: {
9689 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
9690 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
9691
9692 agg->dtag_initial = desc->dtad_arg;
9693 agg->dtag_aggregate = dtrace_aggregate_lquantize;
9694
9695 if (step == 0 || levels == 0)
9696 goto err;
9697
9698 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
9699 break;
9700 }
9701
9702 case DTRACEAGG_AVG:
9703 agg->dtag_aggregate = dtrace_aggregate_avg;
9704 size = sizeof (uint64_t) * 2;
9705 break;
9706
9707 case DTRACEAGG_STDDEV:
9708 agg->dtag_aggregate = dtrace_aggregate_stddev;
9709 size = sizeof (uint64_t) * 4;
9710 break;
9711
9712 case DTRACEAGG_SUM:
9713 agg->dtag_aggregate = dtrace_aggregate_sum;
9714 break;
9715
9716 default:
9717 goto err;
9718 }
9719
9720 agg->dtag_action.dta_rec.dtrd_size = VBDTCAST(uint32_t)size;
9721
9722 if (ntuple == 0)
9723 goto err;
9724
9725 /*
9726 * We must make sure that we have enough actions for the n-tuple.
9727 */
9728 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
9729 if (DTRACEACT_ISAGG(act->dta_kind))
9730 break;
9731
9732 if (--ntuple == 0) {
9733 /*
9734 * This is the action with which our n-tuple begins.
9735 */
9736 agg->dtag_first = act;
9737 goto success;
9738 }
9739 }
9740
9741 /*
9742 * This n-tuple is short by ntuple elements. Return failure.
9743 */
9744 ASSERT(ntuple != 0);
9745err:
9746 kmem_free(agg, sizeof (dtrace_aggregation_t));
9747 return (NULL);
9748
9749success:
9750 /*
9751 * If the last action in the tuple has a size of zero, it's actually
9752 * an expression argument for the aggregating action.
9753 */
9754 ASSERT(ecb->dte_action_last != NULL);
9755 act = ecb->dte_action_last;
9756
9757 if (act->dta_kind == DTRACEACT_DIFEXPR) {
9758 ASSERT(act->dta_difo != NULL);
9759
9760 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
9761 agg->dtag_hasarg = 1;
9762 }
9763
9764 /*
9765 * We need to allocate an id for this aggregation.
9766 */
9767 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
9768 VM_BESTFIT | VM_SLEEP);
9769
9770 if (VBDTCAST(int64_t)aggid - 1 >= state->dts_naggregations) {
9771 dtrace_aggregation_t **oaggs = state->dts_aggregations;
9772 dtrace_aggregation_t **aggs;
9773 int naggs = state->dts_naggregations << 1;
9774 int onaggs = state->dts_naggregations;
9775
9776 ASSERT(aggid == VBDTCAST(dtrace_aggid_t)state->dts_naggregations + 1);
9777
9778 if (naggs == 0) {
9779 ASSERT(oaggs == NULL);
9780 naggs = 1;
9781 }
9782
9783 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
9784
9785 if (oaggs != NULL) {
9786 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
9787 kmem_free(oaggs, onaggs * sizeof (*aggs));
9788 }
9789
9790 state->dts_aggregations = aggs;
9791 state->dts_naggregations = naggs;
9792 }
9793
9794 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
9795 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
9796
9797 frec = &agg->dtag_first->dta_rec;
9798 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
9799 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
9800
9801 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
9802 ASSERT(!act->dta_intuple);
9803 act->dta_intuple = 1;
9804 }
9805
9806 return (&agg->dtag_action);
9807}
9808
9809static void
9810dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
9811{
9812 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9813 dtrace_state_t *state = ecb->dte_state;
9814 dtrace_aggid_t aggid = agg->dtag_id;
9815
9816 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
9817 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
9818
9819 ASSERT(state->dts_aggregations[aggid - 1] == agg);
9820 state->dts_aggregations[aggid - 1] = NULL;
9821
9822 kmem_free(agg, sizeof (dtrace_aggregation_t));
9823}
9824
9825static int
9826dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9827{
9828 dtrace_action_t *action, *last;
9829 dtrace_difo_t *dp = desc->dtad_difo;
9830 uint32_t size = 0, align = sizeof (uint8_t), mask;
9831 uint16_t format = 0;
9832 dtrace_recdesc_t *rec;
9833 dtrace_state_t *state = ecb->dte_state;
9834 dtrace_optval_t *opt = state->dts_options, nframes VBDTUNASS(0), strsize;
9835 uint64_t arg = desc->dtad_arg;
9836
9837 ASSERT(MUTEX_HELD(&dtrace_lock));
9838 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
9839
9840 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
9841 /*
9842 * If this is an aggregating action, there must be neither
9843 * a speculate nor a commit on the action chain.
9844 */
9845 dtrace_action_t *act;
9846
9847 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9848 if (act->dta_kind == DTRACEACT_COMMIT)
9849 return (EINVAL);
9850
9851 if (act->dta_kind == DTRACEACT_SPECULATE)
9852 return (EINVAL);
9853 }
9854
9855 action = dtrace_ecb_aggregation_create(ecb, desc);
9856
9857 if (action == NULL)
9858 return (EINVAL);
9859 } else {
9860 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
9861 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
9862 dp != NULL && dp->dtdo_destructive)) {
9863 state->dts_destructive = 1;
9864 }
9865
9866 switch (desc->dtad_kind) {
9867 case DTRACEACT_PRINTF:
9868 case DTRACEACT_PRINTA:
9869 case DTRACEACT_SYSTEM:
9870 case DTRACEACT_FREOPEN:
9871 /*
9872 * We know that our arg is a string -- turn it into a
9873 * format.
9874 */
9875 if (arg == NULL) {
9876 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA);
9877 format = 0;
9878 } else {
9879 ASSERT(arg != NULL);
9880 ASSERT(arg > KERNELBASE);
9881 format = dtrace_format_add(state,
9882 (char *)(uintptr_t)arg);
9883 }
9884
9885 /*FALLTHROUGH*/
9886 case DTRACEACT_LIBACT:
9887 case DTRACEACT_DIFEXPR:
9888 if (dp == NULL)
9889 return (EINVAL);
9890
9891 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
9892 break;
9893
9894 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
9895 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9896 return (EINVAL);
9897
9898 size = opt[DTRACEOPT_STRSIZE];
9899 }
9900
9901 break;
9902
9903 case DTRACEACT_STACK:
9904 if ((nframes = arg) == 0) {
9905 nframes = opt[DTRACEOPT_STACKFRAMES];
9906 ASSERT(nframes > 0);
9907 arg = nframes;
9908 }
9909
9910 size = VBDTCAST(uint32_t)(nframes * sizeof (pc_t));
9911 break;
9912
9913 case DTRACEACT_JSTACK:
9914 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
9915 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
9916
9917 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
9918 nframes = opt[DTRACEOPT_JSTACKFRAMES];
9919
9920 arg = DTRACE_USTACK_ARG(nframes, strsize);
9921
9922 /*FALLTHROUGH*/
9923 case DTRACEACT_USTACK:
9924 if (desc->dtad_kind != DTRACEACT_JSTACK &&
9925 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
9926 strsize = DTRACE_USTACK_STRSIZE(arg);
9927 nframes = opt[DTRACEOPT_USTACKFRAMES];
9928 ASSERT(nframes > 0);
9929 arg = DTRACE_USTACK_ARG(nframes, strsize);
9930 }
9931
9932 /*
9933 * Save a slot for the pid.
9934 */
9935 size = VBDTCAST(uint32_t)((nframes + 1) * sizeof (uint64_t));
9936 size += DTRACE_USTACK_STRSIZE(arg);
9937 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
9938
9939 break;
9940
9941 case DTRACEACT_SYM:
9942 case DTRACEACT_MOD:
9943 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
9944 sizeof (uint64_t)) ||
9945 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9946 return (EINVAL);
9947 break;
9948
9949 case DTRACEACT_USYM:
9950 case DTRACEACT_UMOD:
9951 case DTRACEACT_UADDR:
9952 if (dp == NULL ||
9953 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
9954 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9955 return (EINVAL);
9956
9957 /*
9958 * We have a slot for the pid, plus a slot for the
9959 * argument. To keep things simple (aligned with
9960 * bitness-neutral sizing), we store each as a 64-bit
9961 * quantity.
9962 */
9963 size = 2 * sizeof (uint64_t);
9964 break;
9965
9966 case DTRACEACT_STOP:
9967 case DTRACEACT_BREAKPOINT:
9968 case DTRACEACT_PANIC:
9969 break;
9970
9971 case DTRACEACT_CHILL:
9972 case DTRACEACT_DISCARD:
9973 case DTRACEACT_RAISE:
9974 if (dp == NULL)
9975 return (EINVAL);
9976 break;
9977
9978 case DTRACEACT_EXIT:
9979 if (dp == NULL ||
9980 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
9981 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9982 return (EINVAL);
9983 break;
9984
9985 case DTRACEACT_SPECULATE:
9986 if (ecb->dte_size > sizeof (dtrace_epid_t))
9987 return (EINVAL);
9988
9989 if (dp == NULL)
9990 return (EINVAL);
9991
9992 state->dts_speculates = 1;
9993 break;
9994
9995 case DTRACEACT_COMMIT: {
9996 dtrace_action_t *act = ecb->dte_action;
9997
9998 for (; act != NULL; act = act->dta_next) {
9999 if (act->dta_kind == DTRACEACT_COMMIT)
10000 return (EINVAL);
10001 }
10002
10003 if (dp == NULL)
10004 return (EINVAL);
10005 break;
10006 }
10007
10008 default:
10009 return (EINVAL);
10010 }
10011
10012 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
10013 /*
10014 * If this is a data-storing action or a speculate,
10015 * we must be sure that there isn't a commit on the
10016 * action chain.
10017 */
10018 dtrace_action_t *act = ecb->dte_action;
10019
10020 for (; act != NULL; act = act->dta_next) {
10021 if (act->dta_kind == DTRACEACT_COMMIT)
10022 return (EINVAL);
10023 }
10024 }
10025
10026 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
10027 action->dta_rec.dtrd_size = size;
10028 }
10029
10030 action->dta_refcnt = 1;
10031 rec = &action->dta_rec;
10032 size = rec->dtrd_size;
10033
10034 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
10035 if (!(size & mask)) {
10036 align = mask + 1;
10037 break;
10038 }
10039 }
10040
10041 action->dta_kind = desc->dtad_kind;
10042
10043 if ((action->dta_difo = dp) != NULL)
10044 dtrace_difo_hold(dp);
10045
10046 rec->dtrd_action = action->dta_kind;
10047 rec->dtrd_arg = arg;
10048 rec->dtrd_uarg = desc->dtad_uarg;
10049 rec->dtrd_alignment = (uint16_t)align;
10050 rec->dtrd_format = format;
10051
10052 if ((last = ecb->dte_action_last) != NULL) {
10053 ASSERT(ecb->dte_action != NULL);
10054 action->dta_prev = last;
10055 last->dta_next = action;
10056 } else {
10057 ASSERT(ecb->dte_action == NULL);
10058 ecb->dte_action = action;
10059 }
10060
10061 ecb->dte_action_last = action;
10062
10063 return (0);
10064}
10065
10066static void
10067dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
10068{
10069 dtrace_action_t *act = ecb->dte_action, *next;
10070 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
10071 dtrace_difo_t *dp;
10072 uint16_t format;
10073
10074 if (act != NULL && act->dta_refcnt > 1) {
10075 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
10076 act->dta_refcnt--;
10077 } else {
10078 for (; act != NULL; act = next) {
10079 next = act->dta_next;
10080 ASSERT(next != NULL || act == ecb->dte_action_last);
10081 ASSERT(act->dta_refcnt == 1);
10082
10083 if ((format = act->dta_rec.dtrd_format) != 0)
10084 dtrace_format_remove(ecb->dte_state, format);
10085
10086 if ((dp = act->dta_difo) != NULL)
10087 dtrace_difo_release(dp, vstate);
10088
10089 if (DTRACEACT_ISAGG(act->dta_kind)) {
10090 dtrace_ecb_aggregation_destroy(ecb, act);
10091 } else {
10092 kmem_free(act, sizeof (dtrace_action_t));
10093 }
10094 }
10095 }
10096
10097 ecb->dte_action = NULL;
10098 ecb->dte_action_last = NULL;
10099 ecb->dte_size = sizeof (dtrace_epid_t);
10100}
10101
10102static void
10103dtrace_ecb_disable(dtrace_ecb_t *ecb)
10104{
10105 /*
10106 * We disable the ECB by removing it from its probe.
10107 */
10108 dtrace_ecb_t *pecb, *prev = NULL;
10109 dtrace_probe_t *probe = ecb->dte_probe;
10110
10111 ASSERT(MUTEX_HELD(&dtrace_lock));
10112
10113 if (probe == NULL) {
10114 /*
10115 * This is the NULL probe; there is nothing to disable.
10116 */
10117 return;
10118 }
10119
10120 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
10121 if (pecb == ecb)
10122 break;
10123 prev = pecb;
10124 }
10125
10126 ASSERT(pecb != NULL);
10127
10128 if (prev == NULL) {
10129 probe->dtpr_ecb = ecb->dte_next;
10130 } else {
10131 prev->dte_next = ecb->dte_next;
10132 }
10133
10134 if (ecb == probe->dtpr_ecb_last) {
10135 ASSERT(ecb->dte_next == NULL);
10136 probe->dtpr_ecb_last = prev;
10137 }
10138
10139 /*
10140 * The ECB has been disconnected from the probe; now sync to assure
10141 * that all CPUs have seen the change before returning.
10142 */
10143 dtrace_sync();
10144
10145 if (probe->dtpr_ecb == NULL) {
10146 /*
10147 * That was the last ECB on the probe; clear the predicate
10148 * cache ID for the probe, disable it and sync one more time
10149 * to assure that we'll never hit it again.
10150 */
10151 dtrace_provider_t *prov = probe->dtpr_provider;
10152
10153 ASSERT(ecb->dte_next == NULL);
10154 ASSERT(probe->dtpr_ecb_last == NULL);
10155 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
10156 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
10157 probe->dtpr_id, probe->dtpr_arg);
10158 dtrace_sync();
10159 } else {
10160 /*
10161 * There is at least one ECB remaining on the probe. If there
10162 * is _exactly_ one, set the probe's predicate cache ID to be
10163 * the predicate cache ID of the remaining ECB.
10164 */
10165 ASSERT(probe->dtpr_ecb_last != NULL);
10166 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
10167
10168 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
10169 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
10170
10171 ASSERT(probe->dtpr_ecb->dte_next == NULL);
10172
10173 if (p != NULL)
10174 probe->dtpr_predcache = p->dtp_cacheid;
10175 }
10176
10177 ecb->dte_next = NULL;
10178 }
10179}
10180
10181static void
10182dtrace_ecb_destroy(dtrace_ecb_t *ecb)
10183{
10184 dtrace_state_t *state = ecb->dte_state;
10185 dtrace_vstate_t *vstate = &state->dts_vstate;
10186 dtrace_predicate_t *pred;
10187 dtrace_epid_t epid = ecb->dte_epid;
10188
10189 ASSERT(MUTEX_HELD(&dtrace_lock));
10190 ASSERT(ecb->dte_next == NULL);
10191 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
10192
10193 if ((pred = ecb->dte_predicate) != NULL)
10194 dtrace_predicate_release(pred, vstate);
10195
10196 dtrace_ecb_action_remove(ecb);
10197
10198 ASSERT(state->dts_ecbs[epid - 1] == ecb);
10199 state->dts_ecbs[epid - 1] = NULL;
10200
10201 kmem_free(ecb, sizeof (dtrace_ecb_t));
10202}
10203
10204static dtrace_ecb_t *
10205dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
10206 dtrace_enabling_t *enab)
10207{
10208 dtrace_ecb_t *ecb;
10209 dtrace_predicate_t *pred;
10210 dtrace_actdesc_t *act;
10211 dtrace_provider_t *prov;
10212 dtrace_ecbdesc_t *desc = enab->dten_current;
10213
10214 ASSERT(MUTEX_HELD(&dtrace_lock));
10215 ASSERT(state != NULL);
10216
10217 ecb = dtrace_ecb_add(state, probe);
10218 ecb->dte_uarg = desc->dted_uarg;
10219
10220 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
10221 dtrace_predicate_hold(pred);
10222 ecb->dte_predicate = pred;
10223 }
10224
10225 if (probe != NULL) {
10226 /*
10227 * If the provider shows more leg than the consumer is old
10228 * enough to see, we need to enable the appropriate implicit
10229 * predicate bits to prevent the ecb from activating at
10230 * revealing times.
10231 *
10232 * Providers specifying DTRACE_PRIV_USER at register time
10233 * are stating that they need the /proc-style privilege
10234 * model to be enforced, and this is what DTRACE_COND_OWNER
10235 * and DTRACE_COND_ZONEOWNER will then do at probe time.
10236 */
10237 prov = probe->dtpr_provider;
10238 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
10239 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10240 ecb->dte_cond |= DTRACE_COND_OWNER;
10241
10242 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
10243 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
10244 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
10245
10246 /*
10247 * If the provider shows us kernel innards and the user
10248 * is lacking sufficient privilege, enable the
10249 * DTRACE_COND_USERMODE implicit predicate.
10250 */
10251 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
10252 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
10253 ecb->dte_cond |= DTRACE_COND_USERMODE;
10254 }
10255
10256 if (dtrace_ecb_create_cache != NULL) {
10257 /*
10258 * If we have a cached ecb, we'll use its action list instead
10259 * of creating our own (saving both time and space).
10260 */
10261 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
10262 dtrace_action_t *act2 = cached->dte_action;
10263
10264 if (act2 != NULL) {
10265 ASSERT(act2->dta_refcnt > 0);
10266 act2->dta_refcnt++;
10267 ecb->dte_action = act2;
10268 ecb->dte_action_last = cached->dte_action_last;
10269 ecb->dte_needed = cached->dte_needed;
10270 ecb->dte_size = cached->dte_size;
10271 ecb->dte_alignment = cached->dte_alignment;
10272 }
10273
10274 return (ecb);
10275 }
10276
10277 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
10278 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
10279 dtrace_ecb_destroy(ecb);
10280 return (NULL);
10281 }
10282 }
10283
10284 dtrace_ecb_resize(ecb);
10285
10286 return (dtrace_ecb_create_cache = ecb);
10287}
10288
10289static int
10290dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
10291{
10292 dtrace_ecb_t *ecb;
10293 dtrace_enabling_t *enab = arg;
10294 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
10295
10296 ASSERT(state != NULL);
10297
10298 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
10299 /*
10300 * This probe was created in a generation for which this
10301 * enabling has previously created ECBs; we don't want to
10302 * enable it again, so just kick out.
10303 */
10304 return (DTRACE_MATCH_NEXT);
10305 }
10306
10307 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
10308 return (DTRACE_MATCH_DONE);
10309
10310 if (dtrace_ecb_enable(ecb) < 0)
10311 return (DTRACE_MATCH_FAIL);
10312
10313 return (DTRACE_MATCH_NEXT);
10314}
10315
10316static dtrace_ecb_t *
10317dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
10318{
10319 dtrace_ecb_t *ecb;
10320
10321 ASSERT(MUTEX_HELD(&dtrace_lock));
10322
10323 if (id == 0 || VBDTCAST(int64_t)id > state->dts_necbs)
10324 return (NULL);
10325
10326 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
10327 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
10328
10329 return (state->dts_ecbs[id - 1]);
10330}
10331
10332static dtrace_aggregation_t *
10333dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
10334{
10335 dtrace_aggregation_t *agg;
10336
10337 ASSERT(MUTEX_HELD(&dtrace_lock));
10338
10339 if (id == 0 || VBDTCAST(int64_t)id > state->dts_naggregations)
10340 return (NULL);
10341
10342 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
10343 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
10344 agg->dtag_id == id);
10345
10346 return (state->dts_aggregations[id - 1]);
10347}
10348
10349/*
10350 * DTrace Buffer Functions
10351 *
10352 * The following functions manipulate DTrace buffers. Most of these functions
10353 * are called in the context of establishing or processing consumer state;
10354 * exceptions are explicitly noted.
10355 */
10356
10357/*
10358 * Note: called from cross call context. This function switches the two
10359 * buffers on a given CPU. The atomicity of this operation is assured by
10360 * disabling interrupts while the actual switch takes place; the disabling of
10361 * interrupts serializes the execution with any execution of dtrace_probe() on
10362 * the same CPU.
10363 */
10364static void
10365dtrace_buffer_switch(dtrace_buffer_t *buf)
10366{
10367 caddr_t tomax = buf->dtb_tomax;
10368 caddr_t xamot = buf->dtb_xamot;
10369 dtrace_icookie_t cookie;
10370
10371 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
10372 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
10373
10374 cookie = dtrace_interrupt_disable();
10375 buf->dtb_tomax = xamot;
10376 buf->dtb_xamot = tomax;
10377 buf->dtb_xamot_drops = buf->dtb_drops;
10378 buf->dtb_xamot_offset = buf->dtb_offset;
10379 buf->dtb_xamot_errors = buf->dtb_errors;
10380 buf->dtb_xamot_flags = buf->dtb_flags;
10381 buf->dtb_offset = 0;
10382 buf->dtb_drops = 0;
10383 buf->dtb_errors = 0;
10384 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
10385 dtrace_interrupt_enable(cookie);
10386}
10387
10388#ifdef VBOX
10389static DECLCALLBACK(void) dtrace_buffer_switch_wrapper(RTCPUID idCpu, void *pvUser1, void *pvUser2)
10390{
10391 dtrace_buffer_switch((dtrace_buffer_t *)pvUser1);
10392 NOREF(pvUser2); NOREF(idCpu);
10393}
10394#endif
10395
10396/*
10397 * Note: called from cross call context. This function activates a buffer
10398 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
10399 * is guaranteed by the disabling of interrupts.
10400 */
10401static void
10402dtrace_buffer_activate(dtrace_state_t *state)
10403{
10404 dtrace_buffer_t *buf;
10405 dtrace_icookie_t cookie = dtrace_interrupt_disable();
10406
10407 buf = &state->dts_buffer[VBDT_GET_CPUID()];
10408
10409 if (buf->dtb_tomax != NULL) {
10410 /*
10411 * We might like to assert that the buffer is marked inactive,
10412 * but this isn't necessarily true: the buffer for the CPU
10413 * that processes the BEGIN probe has its buffer activated
10414 * manually. In this case, we take the (harmless) action
10415 * re-clearing the bit INACTIVE bit.
10416 */
10417 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
10418 }
10419
10420 dtrace_interrupt_enable(cookie);
10421}
10422
10423#ifdef VBOX
10424static DECLCALLBACK(void) dtrace_buffer_activate_wrapper(RTCPUID idCpu, void *pvUser1, void *pvUser2)
10425{
10426 dtrace_buffer_activate((dtrace_state_t *)pvUser1);
10427 NOREF(pvUser2); NOREF(idCpu);
10428}
10429#endif
10430
10431static int
10432dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
10433 processorid_t cpu)
10434{
10435#ifndef VBOX
10436 cpu_t *cp;
10437#else
10438 RTCPUSET CpuSet;
10439 unsigned iCpu;
10440#endif
10441 dtrace_buffer_t *buf;
10442
10443 ASSERT(MUTEX_HELD(&cpu_lock));
10444 ASSERT(MUTEX_HELD(&dtrace_lock));
10445
10446 if (VBDTCAST(int64_t)size > dtrace_nonroot_maxsize
10447#ifndef VBOX
10448 && !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)
10449#endif
10450 )
10451 return (EFBIG);
10452
10453#ifndef VBOX
10454 cp = cpu_list;
10455#else
10456 RTMpGetSet(&CpuSet);
10457#endif
10458
10459#ifndef VBOX
10460 do {
10461 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10462 continue;
10463
10464 buf = &bufs[cp->cpu_id];
10465#else
10466 for (iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) {
10467 if ( !RTCpuSetIsMember(&CpuSet, iCpu)
10468 || (cpu != (processorid_t)DTRACE_CPUALL && cpu != iCpu))
10469 continue;
10470
10471 buf = &bufs[iCpu];
10472#endif
10473
10474 /*
10475 * If there is already a buffer allocated for this CPU, it
10476 * is only possible that this is a DR event. In this case,
10477 * the buffer size must match our specified size.
10478 */
10479 if (buf->dtb_tomax != NULL) {
10480 ASSERT(buf->dtb_size == size);
10481 continue;
10482 }
10483
10484 ASSERT(buf->dtb_xamot == NULL);
10485
10486 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10487 goto err;
10488
10489 buf->dtb_size = size;
10490 buf->dtb_flags = flags;
10491 buf->dtb_offset = 0;
10492 buf->dtb_drops = 0;
10493
10494 if (flags & DTRACEBUF_NOSWITCH)
10495 continue;
10496
10497 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10498 goto err;
10499#ifndef VBOX
10500 } while ((cp = cp->cpu_next) != cpu_list);
10501#else
10502 }
10503#endif
10504
10505 return (0);
10506
10507err:
10508#ifndef VBOX
10509 cp = cpu_list;
10510
10511 do {
10512 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10513 continue;
10514
10515 buf = &bufs[cp->cpu_id];
10516#else
10517 for (iCpu = 0; iCpu < RTCPUSET_MAX_CPUS; iCpu++) {
10518 if ( !RTCpuSetIsMember(&CpuSet, iCpu)
10519 || (cpu != (processorid_t)DTRACE_CPUALL && cpu != iCpu))
10520 continue;
10521
10522 buf = &bufs[iCpu];
10523#endif
10524
10525 if (buf->dtb_xamot != NULL) {
10526 ASSERT(buf->dtb_tomax != NULL);
10527 ASSERT(buf->dtb_size == size);
10528 kmem_free(buf->dtb_xamot, size);
10529 }
10530
10531 if (buf->dtb_tomax != NULL) {
10532 ASSERT(buf->dtb_size == size);
10533 kmem_free(buf->dtb_tomax, size);
10534 }
10535
10536 buf->dtb_tomax = NULL;
10537 buf->dtb_xamot = NULL;
10538 buf->dtb_size = 0;
10539#ifndef VBOX
10540 } while ((cp = cp->cpu_next) != cpu_list);
10541#else
10542 }
10543#endif
10544
10545 return (ENOMEM);
10546}
10547
10548/*
10549 * Note: called from probe context. This function just increments the drop
10550 * count on a buffer. It has been made a function to allow for the
10551 * possibility of understanding the source of mysterious drop counts. (A
10552 * problem for which one may be particularly disappointed that DTrace cannot
10553 * be used to understand DTrace.)
10554 */
10555static void
10556dtrace_buffer_drop(dtrace_buffer_t *buf)
10557{
10558 buf->dtb_drops++;
10559}
10560
10561/*
10562 * Note: called from probe context. This function is called to reserve space
10563 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
10564 * mstate. Returns the new offset in the buffer, or a negative value if an
10565 * error has occurred.
10566 */
10567static intptr_t
10568dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
10569 dtrace_state_t *state, dtrace_mstate_t *mstate)
10570{
10571 intptr_t offs = buf->dtb_offset, soffs;
10572 intptr_t woffs;
10573 caddr_t tomax;
10574 size_t total;
10575
10576 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
10577 return (-1);
10578
10579 if ((tomax = buf->dtb_tomax) == NULL) {
10580 dtrace_buffer_drop(buf);
10581 return (-1);
10582 }
10583
10584 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
10585 while (offs & (align - 1)) {
10586 /*
10587 * Assert that our alignment is off by a number which
10588 * is itself sizeof (uint32_t) aligned.
10589 */
10590 ASSERT(!((align - (offs & (align - 1))) &
10591 (sizeof (uint32_t) - 1)));
10592 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10593 offs += sizeof (uint32_t);
10594 }
10595
10596 if (VBDTCAST(uintptr_t)(soffs = offs + needed) > buf->dtb_size) {
10597 dtrace_buffer_drop(buf);
10598 return (-1);
10599 }
10600
10601 if (mstate == NULL)
10602 return (offs);
10603
10604 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
10605 mstate->dtms_scratch_size = buf->dtb_size - soffs;
10606 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10607
10608 return (offs);
10609 }
10610
10611 if (buf->dtb_flags & DTRACEBUF_FILL) {
10612 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
10613 (buf->dtb_flags & DTRACEBUF_FULL))
10614 return (-1);
10615 goto out;
10616 }
10617
10618 total = needed + (offs & (align - 1));
10619
10620 /*
10621 * For a ring buffer, life is quite a bit more complicated. Before
10622 * we can store any padding, we need to adjust our wrapping offset.
10623 * (If we've never before wrapped or we're not about to, no adjustment
10624 * is required.)
10625 */
10626 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
10627 offs + total > buf->dtb_size) {
10628 woffs = buf->dtb_xamot_offset;
10629
10630 if (offs + total > buf->dtb_size) {
10631 /*
10632 * We can't fit in the end of the buffer. First, a
10633 * sanity check that we can fit in the buffer at all.
10634 */
10635 if (total > buf->dtb_size) {
10636 dtrace_buffer_drop(buf);
10637 return (-1);
10638 }
10639
10640 /*
10641 * We're going to be storing at the top of the buffer,
10642 * so now we need to deal with the wrapped offset. We
10643 * only reset our wrapped offset to 0 if it is
10644 * currently greater than the current offset. If it
10645 * is less than the current offset, it is because a
10646 * previous allocation induced a wrap -- but the
10647 * allocation didn't subsequently take the space due
10648 * to an error or false predicate evaluation. In this
10649 * case, we'll just leave the wrapped offset alone: if
10650 * the wrapped offset hasn't been advanced far enough
10651 * for this allocation, it will be adjusted in the
10652 * lower loop.
10653 */
10654 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
10655 if (woffs >= offs)
10656 woffs = 0;
10657 } else {
10658 woffs = 0;
10659 }
10660
10661 /*
10662 * Now we know that we're going to be storing to the
10663 * top of the buffer and that there is room for us
10664 * there. We need to clear the buffer from the current
10665 * offset to the end (there may be old gunk there).
10666 */
10667 while (VBDTCAST(uintptr_t)offs < buf->dtb_size)
10668 tomax[offs++] = 0;
10669
10670 /*
10671 * We need to set our offset to zero. And because we
10672 * are wrapping, we need to set the bit indicating as
10673 * much. We can also adjust our needed space back
10674 * down to the space required by the ECB -- we know
10675 * that the top of the buffer is aligned.
10676 */
10677 offs = 0;
10678 total = needed;
10679 buf->dtb_flags |= DTRACEBUF_WRAPPED;
10680 } else {
10681 /*
10682 * There is room for us in the buffer, so we simply
10683 * need to check the wrapped offset.
10684 */
10685 if (woffs < offs) {
10686 /*
10687 * The wrapped offset is less than the offset.
10688 * This can happen if we allocated buffer space
10689 * that induced a wrap, but then we didn't
10690 * subsequently take the space due to an error
10691 * or false predicate evaluation. This is
10692 * okay; we know that _this_ allocation isn't
10693 * going to induce a wrap. We still can't
10694 * reset the wrapped offset to be zero,
10695 * however: the space may have been trashed in
10696 * the previous failed probe attempt. But at
10697 * least the wrapped offset doesn't need to
10698 * be adjusted at all...
10699 */
10700 goto out;
10701 }
10702 }
10703
10704 while (VBDTCAST(uintptr_t)offs + total > VBDTCAST(uintptr_t)woffs) {
10705 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
10706 size_t size;
10707
10708 if (epid == DTRACE_EPIDNONE) {
10709 size = sizeof (uint32_t);
10710 } else {
10711 ASSERT(VBDTCAST(int64_t)epid <= state->dts_necbs);
10712 ASSERT(state->dts_ecbs[epid - 1] != NULL);
10713
10714 size = state->dts_ecbs[epid - 1]->dte_size;
10715 }
10716
10717 ASSERT(woffs + size <= buf->dtb_size);
10718 ASSERT(size != 0);
10719
10720 if (woffs + size == buf->dtb_size) {
10721 /*
10722 * We've reached the end of the buffer; we want
10723 * to set the wrapped offset to 0 and break
10724 * out. However, if the offs is 0, then we're
10725 * in a strange edge-condition: the amount of
10726 * space that we want to reserve plus the size
10727 * of the record that we're overwriting is
10728 * greater than the size of the buffer. This
10729 * is problematic because if we reserve the
10730 * space but subsequently don't consume it (due
10731 * to a failed predicate or error) the wrapped
10732 * offset will be 0 -- yet the EPID at offset 0
10733 * will not be committed. This situation is
10734 * relatively easy to deal with: if we're in
10735 * this case, the buffer is indistinguishable
10736 * from one that hasn't wrapped; we need only
10737 * finish the job by clearing the wrapped bit,
10738 * explicitly setting the offset to be 0, and
10739 * zero'ing out the old data in the buffer.
10740 */
10741 if (offs == 0) {
10742 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
10743 buf->dtb_offset = 0;
10744 woffs = total;
10745
10746 while (VBDTCAST(uintptr_t)woffs < buf->dtb_size)
10747 tomax[woffs++] = 0;
10748 }
10749
10750 woffs = 0;
10751 break;
10752 }
10753
10754 woffs += size;
10755 }
10756
10757 /*
10758 * We have a wrapped offset. It may be that the wrapped offset
10759 * has become zero -- that's okay.
10760 */
10761 buf->dtb_xamot_offset = woffs;
10762 }
10763
10764out:
10765 /*
10766 * Now we can plow the buffer with any necessary padding.
10767 */
10768 while (offs & (align - 1)) {
10769 /*
10770 * Assert that our alignment is off by a number which
10771 * is itself sizeof (uint32_t) aligned.
10772 */
10773 ASSERT(!((align - (offs & (align - 1))) &
10774 (sizeof (uint32_t) - 1)));
10775 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10776 offs += sizeof (uint32_t);
10777 }
10778
10779 if (buf->dtb_flags & DTRACEBUF_FILL) {
10780 if (offs + needed > buf->dtb_size - state->dts_reserve) {
10781 buf->dtb_flags |= DTRACEBUF_FULL;
10782 return (-1);
10783 }
10784 }
10785
10786 if (mstate == NULL)
10787 return (offs);
10788
10789 /*
10790 * For ring buffers and fill buffers, the scratch space is always
10791 * the inactive buffer.
10792 */
10793 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
10794 mstate->dtms_scratch_size = buf->dtb_size;
10795 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10796
10797 return (offs);
10798}
10799
10800static void
10801dtrace_buffer_polish(dtrace_buffer_t *buf)
10802{
10803 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
10804 ASSERT(MUTEX_HELD(&dtrace_lock));
10805
10806 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
10807 return;
10808
10809 /*
10810 * We need to polish the ring buffer. There are three cases:
10811 *
10812 * - The first (and presumably most common) is that there is no gap
10813 * between the buffer offset and the wrapped offset. In this case,
10814 * there is nothing in the buffer that isn't valid data; we can
10815 * mark the buffer as polished and return.
10816 *
10817 * - The second (less common than the first but still more common
10818 * than the third) is that there is a gap between the buffer offset
10819 * and the wrapped offset, and the wrapped offset is larger than the
10820 * buffer offset. This can happen because of an alignment issue, or
10821 * can happen because of a call to dtrace_buffer_reserve() that
10822 * didn't subsequently consume the buffer space. In this case,
10823 * we need to zero the data from the buffer offset to the wrapped
10824 * offset.
10825 *
10826 * - The third (and least common) is that there is a gap between the
10827 * buffer offset and the wrapped offset, but the wrapped offset is
10828 * _less_ than the buffer offset. This can only happen because a
10829 * call to dtrace_buffer_reserve() induced a wrap, but the space
10830 * was not subsequently consumed. In this case, we need to zero the
10831 * space from the offset to the end of the buffer _and_ from the
10832 * top of the buffer to the wrapped offset.
10833 */
10834 if (buf->dtb_offset < buf->dtb_xamot_offset) {
10835 bzero(buf->dtb_tomax + buf->dtb_offset,
10836 buf->dtb_xamot_offset - buf->dtb_offset);
10837 }
10838
10839 if (buf->dtb_offset > buf->dtb_xamot_offset) {
10840 bzero(buf->dtb_tomax + buf->dtb_offset,
10841 buf->dtb_size - buf->dtb_offset);
10842 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
10843 }
10844}
10845
10846static void
10847dtrace_buffer_free(dtrace_buffer_t *bufs)
10848{
10849 int i;
10850
10851 for (i = 0; i < NCPU; i++) {
10852 dtrace_buffer_t *buf = &bufs[i];
10853
10854 if (buf->dtb_tomax == NULL) {
10855 ASSERT(buf->dtb_xamot == NULL);
10856 ASSERT(buf->dtb_size == 0);
10857 continue;
10858 }
10859
10860 if (buf->dtb_xamot != NULL) {
10861 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
10862 kmem_free(buf->dtb_xamot, buf->dtb_size);
10863 }
10864
10865 kmem_free(buf->dtb_tomax, buf->dtb_size);
10866 buf->dtb_size = 0;
10867 buf->dtb_tomax = NULL;
10868 buf->dtb_xamot = NULL;
10869 }
10870}
10871
10872/*
10873 * DTrace Enabling Functions
10874 */
10875static dtrace_enabling_t *
10876dtrace_enabling_create(dtrace_vstate_t *vstate)
10877{
10878 dtrace_enabling_t *enab;
10879
10880 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
10881 enab->dten_vstate = vstate;
10882
10883 return (enab);
10884}
10885
10886static void
10887dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
10888{
10889 dtrace_ecbdesc_t **ndesc;
10890 size_t osize, nsize;
10891
10892 /*
10893 * We can't add to enablings after we've enabled them, or after we've
10894 * retained them.
10895 */
10896 ASSERT(enab->dten_probegen == 0);
10897 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
10898
10899 if (enab->dten_ndesc < enab->dten_maxdesc) {
10900 enab->dten_desc[enab->dten_ndesc++] = ecb;
10901 return;
10902 }
10903
10904 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
10905
10906 if (enab->dten_maxdesc == 0) {
10907 enab->dten_maxdesc = 1;
10908 } else {
10909 enab->dten_maxdesc <<= 1;
10910 }
10911
10912 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
10913
10914 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
10915 ndesc = kmem_zalloc(nsize, KM_SLEEP);
10916 bcopy(enab->dten_desc, ndesc, osize);
10917 kmem_free(enab->dten_desc, osize);
10918
10919 enab->dten_desc = ndesc;
10920 enab->dten_desc[enab->dten_ndesc++] = ecb;
10921}
10922
10923static void
10924dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
10925 dtrace_probedesc_t *pd)
10926{
10927 dtrace_ecbdesc_t *new;
10928 dtrace_predicate_t *pred;
10929 dtrace_actdesc_t *act;
10930
10931 /*
10932 * We're going to create a new ECB description that matches the
10933 * specified ECB in every way, but has the specified probe description.
10934 */
10935 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
10936
10937 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
10938 dtrace_predicate_hold(pred);
10939
10940 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
10941 dtrace_actdesc_hold(act);
10942
10943 new->dted_action = ecb->dted_action;
10944 new->dted_pred = ecb->dted_pred;
10945 new->dted_probe = *pd;
10946 new->dted_uarg = ecb->dted_uarg;
10947
10948 dtrace_enabling_add(enab, new);
10949}
10950
10951static void
10952dtrace_enabling_dump(dtrace_enabling_t *enab)
10953{
10954 int i;
10955
10956 for (i = 0; i < enab->dten_ndesc; i++) {
10957 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
10958
10959 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
10960 desc->dtpd_provider, desc->dtpd_mod,
10961 desc->dtpd_func, desc->dtpd_name);
10962 }
10963}
10964
10965static void
10966dtrace_enabling_destroy(dtrace_enabling_t *enab)
10967{
10968 int i;
10969 dtrace_ecbdesc_t *ep;
10970 dtrace_vstate_t *vstate = enab->dten_vstate;
10971
10972 ASSERT(MUTEX_HELD(&dtrace_lock));
10973
10974 for (i = 0; i < enab->dten_ndesc; i++) {
10975 dtrace_actdesc_t *act, *next;
10976 dtrace_predicate_t *pred;
10977
10978 ep = enab->dten_desc[i];
10979
10980 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
10981 dtrace_predicate_release(pred, vstate);
10982
10983 for (act = ep->dted_action; act != NULL; act = next) {
10984 next = act->dtad_next;
10985 dtrace_actdesc_release(act, vstate);
10986 }
10987
10988 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
10989 }
10990
10991 kmem_free(enab->dten_desc,
10992 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
10993
10994 /*
10995 * If this was a retained enabling, decrement the dts_nretained count
10996 * and take it off of the dtrace_retained list.
10997 */
10998 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
10999 dtrace_retained == enab) {
11000 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11001 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
11002 enab->dten_vstate->dtvs_state->dts_nretained--;
11003 dtrace_retained_gen++;
11004 }
11005
11006 if (enab->dten_prev == NULL) {
11007 if (dtrace_retained == enab) {
11008 dtrace_retained = enab->dten_next;
11009
11010 if (dtrace_retained != NULL)
11011 dtrace_retained->dten_prev = NULL;
11012 }
11013 } else {
11014 ASSERT(enab != dtrace_retained);
11015 ASSERT(dtrace_retained != NULL);
11016 enab->dten_prev->dten_next = enab->dten_next;
11017 }
11018
11019 if (enab->dten_next != NULL) {
11020 ASSERT(dtrace_retained != NULL);
11021 enab->dten_next->dten_prev = enab->dten_prev;
11022 }
11023
11024 kmem_free(enab, sizeof (dtrace_enabling_t));
11025}
11026
11027static int
11028dtrace_enabling_retain(dtrace_enabling_t *enab)
11029{
11030 dtrace_state_t *state;
11031
11032 ASSERT(MUTEX_HELD(&dtrace_lock));
11033 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
11034 ASSERT(enab->dten_vstate != NULL);
11035
11036 state = enab->dten_vstate->dtvs_state;
11037 ASSERT(state != NULL);
11038
11039 /*
11040 * We only allow each state to retain dtrace_retain_max enablings.
11041 */
11042 if (state->dts_nretained >= dtrace_retain_max)
11043 return (ENOSPC);
11044
11045 state->dts_nretained++;
11046 dtrace_retained_gen++;
11047
11048 if (dtrace_retained == NULL) {
11049 dtrace_retained = enab;
11050 return (0);
11051 }
11052
11053 enab->dten_next = dtrace_retained;
11054 dtrace_retained->dten_prev = enab;
11055 dtrace_retained = enab;
11056
11057 return (0);
11058}
11059
11060static int
11061dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
11062 dtrace_probedesc_t *create)
11063{
11064 dtrace_enabling_t *new, *enab;
11065 int found = 0, err = ENOENT;
11066
11067 ASSERT(MUTEX_HELD(&dtrace_lock));
11068 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
11069 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
11070 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
11071 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
11072
11073 new = dtrace_enabling_create(&state->dts_vstate);
11074
11075 /*
11076 * Iterate over all retained enablings, looking for enablings that
11077 * match the specified state.
11078 */
11079 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11080 int i;
11081
11082 /*
11083 * dtvs_state can only be NULL for helper enablings -- and
11084 * helper enablings can't be retained.
11085 */
11086 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11087
11088 if (enab->dten_vstate->dtvs_state != state)
11089 continue;
11090
11091 /*
11092 * Now iterate over each probe description; we're looking for
11093 * an exact match to the specified probe description.
11094 */
11095 for (i = 0; i < enab->dten_ndesc; i++) {
11096 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11097 dtrace_probedesc_t *pd = &ep->dted_probe;
11098
11099 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
11100 continue;
11101
11102 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
11103 continue;
11104
11105 if (strcmp(pd->dtpd_func, match->dtpd_func))
11106 continue;
11107
11108 if (strcmp(pd->dtpd_name, match->dtpd_name))
11109 continue;
11110
11111 /*
11112 * We have a winning probe! Add it to our growing
11113 * enabling.
11114 */
11115 found = 1;
11116 dtrace_enabling_addlike(new, ep, create);
11117 }
11118 }
11119
11120 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
11121 dtrace_enabling_destroy(new);
11122 return (err);
11123 }
11124
11125 return (0);
11126}
11127
11128static void
11129dtrace_enabling_retract(dtrace_state_t *state)
11130{
11131 dtrace_enabling_t *enab, *next;
11132
11133 ASSERT(MUTEX_HELD(&dtrace_lock));
11134
11135 /*
11136 * Iterate over all retained enablings, destroy the enablings retained
11137 * for the specified state.
11138 */
11139 for (enab = dtrace_retained; enab != NULL; enab = next) {
11140 next = enab->dten_next;
11141
11142 /*
11143 * dtvs_state can only be NULL for helper enablings -- and
11144 * helper enablings can't be retained.
11145 */
11146 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11147
11148 if (enab->dten_vstate->dtvs_state == state) {
11149 ASSERT(state->dts_nretained > 0);
11150 dtrace_enabling_destroy(enab);
11151 }
11152 }
11153
11154 ASSERT(state->dts_nretained == 0);
11155}
11156
11157static int
11158dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
11159{
11160 int i = 0;
11161 int total_matched = 0, matched = 0;
11162
11163 ASSERT(MUTEX_HELD(&cpu_lock));
11164 ASSERT(MUTEX_HELD(&dtrace_lock));
11165
11166 for (i = 0; i < enab->dten_ndesc; i++) {
11167 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
11168
11169 enab->dten_current = ep;
11170 enab->dten_error = 0;
11171
11172 /*
11173 * If a provider failed to enable a probe then get out and
11174 * let the consumer know we failed.
11175 */
11176 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0)
11177 return (EBUSY);
11178
11179 total_matched += matched;
11180
11181 if (enab->dten_error != 0) {
11182 /*
11183 * If we get an error half-way through enabling the
11184 * probes, we kick out -- perhaps with some number of
11185 * them enabled. Leaving enabled probes enabled may
11186 * be slightly confusing for user-level, but we expect
11187 * that no one will attempt to actually drive on in
11188 * the face of such errors. If this is an anonymous
11189 * enabling (indicated with a NULL nmatched pointer),
11190 * we cmn_err() a message. We aren't expecting to
11191 * get such an error -- such as it can exist at all,
11192 * it would be a result of corrupted DOF in the driver
11193 * properties.
11194 */
11195 if (nmatched == NULL) {
11196 cmn_err(CE_WARN, "dtrace_enabling_match() "
11197 "error on %p: %d", (void *)ep,
11198 enab->dten_error);
11199 }
11200
11201 return (enab->dten_error);
11202 }
11203 }
11204
11205 enab->dten_probegen = dtrace_probegen;
11206 if (nmatched != NULL)
11207 *nmatched = total_matched;
11208
11209 return (0);
11210}
11211
11212static void
11213dtrace_enabling_matchall(void)
11214{
11215 dtrace_enabling_t *enab;
11216
11217 mutex_enter(&cpu_lock);
11218 mutex_enter(&dtrace_lock);
11219
11220 /*
11221 * Iterate over all retained enablings to see if any probes match
11222 * against them. We only perform this operation on enablings for which
11223 * we have sufficient permissions by virtue of being in the global zone
11224 * or in the same zone as the DTrace client. Because we can be called
11225 * after dtrace_detach() has been called, we cannot assert that there
11226 * are retained enablings. We can safely load from dtrace_retained,
11227 * however: the taskq_destroy() at the end of dtrace_detach() will
11228 * block pending our completion.
11229 */
11230 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11231 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred;
11232
11233#ifndef VBOX
11234 if (INGLOBALZONE(curproc) ||
11235 cr != NULL && getzoneid() == crgetzoneid(cr))
11236#endif
11237 (void) dtrace_enabling_match(enab, NULL);
11238 }
11239
11240 mutex_exit(&dtrace_lock);
11241 mutex_exit(&cpu_lock);
11242}
11243
11244/*
11245 * If an enabling is to be enabled without having matched probes (that is, if
11246 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
11247 * enabling must be _primed_ by creating an ECB for every ECB description.
11248 * This must be done to assure that we know the number of speculations, the
11249 * number of aggregations, the minimum buffer size needed, etc. before we
11250 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
11251 * enabling any probes, we create ECBs for every ECB decription, but with a
11252 * NULL probe -- which is exactly what this function does.
11253 */
11254static void
11255dtrace_enabling_prime(dtrace_state_t *state)
11256{
11257 dtrace_enabling_t *enab;
11258 int i;
11259
11260 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
11261 ASSERT(enab->dten_vstate->dtvs_state != NULL);
11262
11263 if (enab->dten_vstate->dtvs_state != state)
11264 continue;
11265
11266 /*
11267 * We don't want to prime an enabling more than once, lest
11268 * we allow a malicious user to induce resource exhaustion.
11269 * (The ECBs that result from priming an enabling aren't
11270 * leaked -- but they also aren't deallocated until the
11271 * consumer state is destroyed.)
11272 */
11273 if (enab->dten_primed)
11274 continue;
11275
11276 for (i = 0; i < enab->dten_ndesc; i++) {
11277 enab->dten_current = enab->dten_desc[i];
11278 (void) dtrace_probe_enable(NULL, enab);
11279 }
11280
11281 enab->dten_primed = 1;
11282 }
11283}
11284
11285/*
11286 * Called to indicate that probes should be provided due to retained
11287 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
11288 * must take an initial lap through the enabling calling the dtps_provide()
11289 * entry point explicitly to allow for autocreated probes.
11290 */
11291static void
11292dtrace_enabling_provide(dtrace_provider_t *prv)
11293{
11294 int i, all = 0;
11295 dtrace_probedesc_t desc;
11296 dtrace_genid_t gen;
11297
11298 ASSERT(MUTEX_HELD(&dtrace_lock));
11299 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
11300
11301 if (prv == NULL) {
11302 all = 1;
11303 prv = dtrace_provider;
11304 }
11305
11306 do {
11307 dtrace_enabling_t *enab;
11308 void *parg = prv->dtpv_arg;
11309
11310retry:
11311 gen = dtrace_retained_gen;
11312 for (enab = dtrace_retained; enab != NULL;
11313 enab = enab->dten_next) {
11314 for (i = 0; i < enab->dten_ndesc; i++) {
11315 desc = enab->dten_desc[i]->dted_probe;
11316 mutex_exit(&dtrace_lock);
11317 prv->dtpv_pops.dtps_provide(parg, &desc);
11318 mutex_enter(&dtrace_lock);
11319 /*
11320 * Process the retained enablings again if
11321 * they have changed while we weren't holding
11322 * dtrace_lock.
11323 */
11324 if (gen != dtrace_retained_gen)
11325 goto retry;
11326 }
11327 }
11328 } while (all && (prv = prv->dtpv_next) != NULL);
11329
11330 mutex_exit(&dtrace_lock);
11331 dtrace_probe_provide(NULL, all ? NULL : prv);
11332 mutex_enter(&dtrace_lock);
11333}
11334
11335/*
11336 * DTrace DOF Functions
11337 */
11338/*ARGSUSED*/
11339static void
11340dtrace_dof_error(dof_hdr_t *dof, const char *str)
11341{
11342 if (dtrace_err_verbose)
11343 cmn_err(CE_WARN, "failed to process DOF: %s", str);
11344
11345#ifdef DTRACE_ERRDEBUG
11346 dtrace_errdebug(str);
11347#endif
11348}
11349
11350/*
11351 * Create DOF out of a currently enabled state. Right now, we only create
11352 * DOF containing the run-time options -- but this could be expanded to create
11353 * complete DOF representing the enabled state.
11354 */
11355static dof_hdr_t *
11356dtrace_dof_create(dtrace_state_t *state)
11357{
11358 dof_hdr_t *dof;
11359 dof_sec_t *sec;
11360 dof_optdesc_t *opt;
11361 int i, len = sizeof (dof_hdr_t) +
11362 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
11363 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11364
11365 ASSERT(MUTEX_HELD(&dtrace_lock));
11366
11367 dof = kmem_zalloc(len, KM_SLEEP);
11368 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
11369 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
11370 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
11371 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
11372
11373 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
11374 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
11375 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
11376 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
11377 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
11378 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
11379
11380 dof->dofh_flags = 0;
11381 dof->dofh_hdrsize = sizeof (dof_hdr_t);
11382 dof->dofh_secsize = sizeof (dof_sec_t);
11383 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
11384 dof->dofh_secoff = sizeof (dof_hdr_t);
11385 dof->dofh_loadsz = len;
11386 dof->dofh_filesz = len;
11387 dof->dofh_pad = 0;
11388
11389 /*
11390 * Fill in the option section header...
11391 */
11392 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
11393 sec->dofs_type = DOF_SECT_OPTDESC;
11394 sec->dofs_align = sizeof (uint64_t);
11395 sec->dofs_flags = DOF_SECF_LOAD;
11396 sec->dofs_entsize = sizeof (dof_optdesc_t);
11397
11398 opt = (dof_optdesc_t *)((uintptr_t)sec +
11399 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
11400
11401 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
11402 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11403
11404 for (i = 0; i < DTRACEOPT_MAX; i++) {
11405 opt[i].dofo_option = i;
11406 opt[i].dofo_strtab = DOF_SECIDX_NONE;
11407 opt[i].dofo_value = state->dts_options[i];
11408 }
11409
11410 return (dof);
11411}
11412
11413static dof_hdr_t *
11414dtrace_dof_copyin(uintptr_t uarg, int *errp)
11415{
11416 dof_hdr_t hdr, *dof;
11417
11418 ASSERT(!MUTEX_HELD(&dtrace_lock));
11419
11420 /*
11421 * First, we're going to copyin() the sizeof (dof_hdr_t).
11422 */
11423 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
11424 dtrace_dof_error(NULL, "failed to copyin DOF header");
11425 *errp = EFAULT;
11426 return (NULL);
11427 }
11428
11429 /*
11430 * Now we'll allocate the entire DOF and copy it in -- provided
11431 * that the length isn't outrageous.
11432 */
11433 if (hdr.dofh_loadsz >= VBDTCAST(uint64_t)dtrace_dof_maxsize) {
11434 dtrace_dof_error(&hdr, "load size exceeds maximum");
11435 *errp = E2BIG;
11436 return (NULL);
11437 }
11438
11439 if (hdr.dofh_loadsz < sizeof (hdr)) {
11440 dtrace_dof_error(&hdr, "invalid load size");
11441 *errp = EINVAL;
11442 return (NULL);
11443 }
11444
11445 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
11446
11447 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 ||
11448 dof->dofh_loadsz != hdr.dofh_loadsz) {
11449 kmem_free(dof, hdr.dofh_loadsz);
11450 *errp = EFAULT;
11451 return (NULL);
11452 }
11453
11454 return (dof);
11455}
11456
11457static dof_hdr_t *
11458dtrace_dof_property(const char *name)
11459{
11460#ifndef VBOX
11461 uchar_t *buf;
11462 uint64_t loadsz;
11463 unsigned int len, i;
11464 dof_hdr_t *dof;
11465
11466 /*
11467 * Unfortunately, array of values in .conf files are always (and
11468 * only) interpreted to be integer arrays. We must read our DOF
11469 * as an integer array, and then squeeze it into a byte array.
11470 */
11471 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
11472 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
11473 return (NULL);
11474
11475 for (i = 0; i < len; i++)
11476 buf[i] = (uchar_t)(((int *)buf)[i]);
11477
11478 if (len < sizeof (dof_hdr_t)) {
11479 ddi_prop_free(buf);
11480 dtrace_dof_error(NULL, "truncated header");
11481 return (NULL);
11482 }
11483
11484 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
11485 ddi_prop_free(buf);
11486 dtrace_dof_error(NULL, "truncated DOF");
11487 return (NULL);
11488 }
11489
11490 if (loadsz >= dtrace_dof_maxsize) {
11491 ddi_prop_free(buf);
11492 dtrace_dof_error(NULL, "oversized DOF");
11493 return (NULL);
11494 }
11495
11496 dof = kmem_alloc(loadsz, KM_SLEEP);
11497 bcopy(buf, dof, loadsz);
11498 ddi_prop_free(buf);
11499
11500 return (dof);
11501#else /* VBOX */
11502 return (NULL);
11503#endif /* VBOX */
11504}
11505
11506static void
11507dtrace_dof_destroy(dof_hdr_t *dof)
11508{
11509 kmem_free(dof, dof->dofh_loadsz);
11510}
11511
11512/*
11513 * Return the dof_sec_t pointer corresponding to a given section index. If the
11514 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
11515 * a type other than DOF_SECT_NONE is specified, the header is checked against
11516 * this type and NULL is returned if the types do not match.
11517 */
11518static dof_sec_t *
11519dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
11520{
11521 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
11522 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
11523
11524 if (i >= dof->dofh_secnum) {
11525 dtrace_dof_error(dof, "referenced section index is invalid");
11526 return (NULL);
11527 }
11528
11529 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
11530 dtrace_dof_error(dof, "referenced section is not loadable");
11531 return (NULL);
11532 }
11533
11534 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
11535 dtrace_dof_error(dof, "referenced section is the wrong type");
11536 return (NULL);
11537 }
11538
11539 return (sec);
11540}
11541
11542static dtrace_probedesc_t *
11543dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
11544{
11545 dof_probedesc_t *probe;
11546 dof_sec_t *strtab;
11547 uintptr_t daddr = (uintptr_t)dof;
11548 uintptr_t str;
11549 size_t size;
11550
11551 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
11552 dtrace_dof_error(dof, "invalid probe section");
11553 return (NULL);
11554 }
11555
11556 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11557 dtrace_dof_error(dof, "bad alignment in probe description");
11558 return (NULL);
11559 }
11560
11561 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
11562 dtrace_dof_error(dof, "truncated probe description");
11563 return (NULL);
11564 }
11565
11566 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
11567 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
11568
11569 if (strtab == NULL)
11570 return (NULL);
11571
11572 str = daddr + strtab->dofs_offset;
11573 size = strtab->dofs_size;
11574
11575 if (probe->dofp_provider >= strtab->dofs_size) {
11576 dtrace_dof_error(dof, "corrupt probe provider");
11577 return (NULL);
11578 }
11579
11580 (void) strncpy(desc->dtpd_provider,
11581 (char *)(str + probe->dofp_provider),
11582 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
11583
11584 if (probe->dofp_mod >= strtab->dofs_size) {
11585 dtrace_dof_error(dof, "corrupt probe module");
11586 return (NULL);
11587 }
11588
11589 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
11590 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
11591
11592 if (probe->dofp_func >= strtab->dofs_size) {
11593 dtrace_dof_error(dof, "corrupt probe function");
11594 return (NULL);
11595 }
11596
11597 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
11598 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
11599
11600 if (probe->dofp_name >= strtab->dofs_size) {
11601 dtrace_dof_error(dof, "corrupt probe name");
11602 return (NULL);
11603 }
11604
11605 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
11606 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
11607
11608 return (desc);
11609}
11610
11611static dtrace_difo_t *
11612dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11613 cred_t *cr)
11614{
11615 dtrace_difo_t *dp;
11616 size_t ttl = 0;
11617 dof_difohdr_t *dofd;
11618 uintptr_t daddr = (uintptr_t)dof;
11619 size_t max = dtrace_difo_maxsize;
11620 int i, l, n;
11621
11622 static const struct {
11623 int section;
11624 int bufoffs;
11625 int lenoffs;
11626 int entsize;
11627 int align;
11628 const char *msg;
11629 } difo[] = {
11630 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
11631 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
11632 sizeof (dif_instr_t), "multiple DIF sections" },
11633
11634 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
11635 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
11636 sizeof (uint64_t), "multiple integer tables" },
11637
11638 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
11639 offsetof(dtrace_difo_t, dtdo_strlen), 0,
11640 sizeof (char), "multiple string tables" },
11641
11642 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
11643 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
11644 sizeof (uint_t), "multiple variable tables" },
11645
11646 { DOF_SECT_NONE, 0, 0, 0, NULL }
11647 };
11648
11649 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
11650 dtrace_dof_error(dof, "invalid DIFO header section");
11651 return (NULL);
11652 }
11653
11654 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11655 dtrace_dof_error(dof, "bad alignment in DIFO header");
11656 return (NULL);
11657 }
11658
11659 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
11660 sec->dofs_size % sizeof (dof_secidx_t)) {
11661 dtrace_dof_error(dof, "bad size in DIFO header");
11662 return (NULL);
11663 }
11664
11665 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
11666 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
11667
11668 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
11669 dp->dtdo_rtype = dofd->dofd_rtype;
11670
11671 for (l = 0; l < n; l++) {
11672 dof_sec_t *subsec;
11673 void **bufp;
11674 uint32_t *lenp;
11675
11676 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
11677 dofd->dofd_links[l])) == NULL)
11678 goto err; /* invalid section link */
11679
11680 if (ttl + subsec->dofs_size > max) {
11681 dtrace_dof_error(dof, "exceeds maximum size");
11682 goto err;
11683 }
11684
11685 ttl += subsec->dofs_size;
11686
11687 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
11688 if (subsec->dofs_type != VBDTCAST(uint32_t)difo[i].section)
11689 continue;
11690
11691 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
11692 dtrace_dof_error(dof, "section not loaded");
11693 goto err;
11694 }
11695
11696 if (subsec->dofs_align != VBDTCAST(uint32_t)difo[i].align) {
11697 dtrace_dof_error(dof, "bad alignment");
11698 goto err;
11699 }
11700
11701 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
11702 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
11703
11704 if (*bufp != NULL) {
11705 dtrace_dof_error(dof, difo[i].msg);
11706 goto err;
11707 }
11708
11709 if (VBDTCAST(uint32_t)difo[i].entsize != subsec->dofs_entsize) {
11710 dtrace_dof_error(dof, "entry size mismatch");
11711 goto err;
11712 }
11713
11714 if (subsec->dofs_entsize != 0 &&
11715 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
11716 dtrace_dof_error(dof, "corrupt entry size");
11717 goto err;
11718 }
11719
11720 *lenp = subsec->dofs_size;
11721 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
11722 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
11723 *bufp, subsec->dofs_size);
11724
11725 if (subsec->dofs_entsize != 0)
11726 *lenp /= subsec->dofs_entsize;
11727
11728 break;
11729 }
11730
11731 /*
11732 * If we encounter a loadable DIFO sub-section that is not
11733 * known to us, assume this is a broken program and fail.
11734 */
11735 if (difo[i].section == DOF_SECT_NONE &&
11736 (subsec->dofs_flags & DOF_SECF_LOAD)) {
11737 dtrace_dof_error(dof, "unrecognized DIFO subsection");
11738 goto err;
11739 }
11740 }
11741
11742 if (dp->dtdo_buf == NULL) {
11743 /*
11744 * We can't have a DIF object without DIF text.
11745 */
11746 dtrace_dof_error(dof, "missing DIF text");
11747 goto err;
11748 }
11749
11750 /*
11751 * Before we validate the DIF object, run through the variable table
11752 * looking for the strings -- if any of their size are under, we'll set
11753 * their size to be the system-wide default string size. Note that
11754 * this should _not_ happen if the "strsize" option has been set --
11755 * in this case, the compiler should have set the size to reflect the
11756 * setting of the option.
11757 */
11758 for (i = 0; VBDTCAST(unsigned)i < dp->dtdo_varlen; i++) {
11759 dtrace_difv_t *v = &dp->dtdo_vartab[i];
11760 dtrace_diftype_t *t = &v->dtdv_type;
11761
11762 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
11763 continue;
11764
11765 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
11766 t->dtdt_size = VBDTCAST(uint32_t)dtrace_strsize_default;
11767 }
11768
11769 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
11770 goto err;
11771
11772 dtrace_difo_init(dp, vstate);
11773 return (dp);
11774
11775err:
11776 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
11777 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
11778 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
11779 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
11780
11781 kmem_free(dp, sizeof (dtrace_difo_t));
11782 return (NULL);
11783}
11784
11785static dtrace_predicate_t *
11786dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11787 cred_t *cr)
11788{
11789 dtrace_difo_t *dp;
11790
11791 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
11792 return (NULL);
11793
11794 return (dtrace_predicate_create(dp));
11795}
11796
11797static dtrace_actdesc_t *
11798dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11799 cred_t *cr)
11800{
11801 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
11802 dof_actdesc_t *desc;
11803 dof_sec_t *difosec;
11804 size_t offs;
11805 uintptr_t daddr = (uintptr_t)dof;
11806 uint64_t arg;
11807 dtrace_actkind_t kind;
11808
11809 if (sec->dofs_type != DOF_SECT_ACTDESC) {
11810 dtrace_dof_error(dof, "invalid action section");
11811 return (NULL);
11812 }
11813
11814 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
11815 dtrace_dof_error(dof, "truncated action description");
11816 return (NULL);
11817 }
11818
11819 if (sec->dofs_align != sizeof (uint64_t)) {
11820 dtrace_dof_error(dof, "bad alignment in action description");
11821 return (NULL);
11822 }
11823
11824 if (sec->dofs_size < sec->dofs_entsize) {
11825 dtrace_dof_error(dof, "section entry size exceeds total size");
11826 return (NULL);
11827 }
11828
11829 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
11830 dtrace_dof_error(dof, "bad entry size in action description");
11831 return (NULL);
11832 }
11833
11834 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
11835 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
11836 return (NULL);
11837 }
11838
11839 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
11840 desc = (dof_actdesc_t *)(daddr +
11841 (uintptr_t)sec->dofs_offset + offs);
11842 kind = (dtrace_actkind_t)desc->dofa_kind;
11843
11844 if (DTRACEACT_ISPRINTFLIKE(kind) &&
11845 (kind != DTRACEACT_PRINTA ||
11846 desc->dofa_strtab != DOF_SECIDX_NONE)) {
11847 dof_sec_t *strtab;
11848 char *str, *fmt;
11849 uint64_t i;
11850
11851 /*
11852 * printf()-like actions must have a format string.
11853 */
11854 if ((strtab = dtrace_dof_sect(dof,
11855 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
11856 goto err;
11857
11858 str = (char *)((uintptr_t)dof +
11859 (uintptr_t)strtab->dofs_offset);
11860
11861 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
11862 if (str[i] == '\0')
11863 break;
11864 }
11865
11866 if (i >= strtab->dofs_size) {
11867 dtrace_dof_error(dof, "bogus format string");
11868 goto err;
11869 }
11870
11871 if (i == desc->dofa_arg) {
11872 dtrace_dof_error(dof, "empty format string");
11873 goto err;
11874 }
11875
11876 i -= desc->dofa_arg;
11877 fmt = kmem_alloc(i + 1, KM_SLEEP);
11878 bcopy(&str[desc->dofa_arg], fmt, i + 1);
11879 arg = (uint64_t)(uintptr_t)fmt;
11880 } else {
11881 if (kind == DTRACEACT_PRINTA) {
11882 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
11883 arg = 0;
11884 } else {
11885 arg = desc->dofa_arg;
11886 }
11887 }
11888
11889 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
11890 desc->dofa_uarg, arg);
11891
11892 if (last != NULL) {
11893 last->dtad_next = act;
11894 } else {
11895 first = act;
11896 }
11897
11898 last = act;
11899
11900 if (desc->dofa_difo == DOF_SECIDX_NONE)
11901 continue;
11902
11903 if ((difosec = dtrace_dof_sect(dof,
11904 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
11905 goto err;
11906
11907 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
11908
11909 if (act->dtad_difo == NULL)
11910 goto err;
11911 }
11912
11913 ASSERT(first != NULL);
11914 return (first);
11915
11916err:
11917 for (act = first; act != NULL; act = next) {
11918 next = act->dtad_next;
11919 dtrace_actdesc_release(act, vstate);
11920 }
11921
11922 return (NULL);
11923}
11924
11925static dtrace_ecbdesc_t *
11926dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11927 cred_t *cr)
11928{
11929 dtrace_ecbdesc_t *ep;
11930 dof_ecbdesc_t *ecb;
11931 dtrace_probedesc_t *desc;
11932 dtrace_predicate_t *pred = NULL;
11933
11934 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
11935 dtrace_dof_error(dof, "truncated ECB description");
11936 return (NULL);
11937 }
11938
11939 if (sec->dofs_align != sizeof (uint64_t)) {
11940 dtrace_dof_error(dof, "bad alignment in ECB description");
11941 return (NULL);
11942 }
11943
11944 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
11945 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
11946
11947 if (sec == NULL)
11948 return (NULL);
11949
11950 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11951 ep->dted_uarg = ecb->dofe_uarg;
11952 desc = &ep->dted_probe;
11953
11954 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
11955 goto err;
11956
11957 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
11958 if ((sec = dtrace_dof_sect(dof,
11959 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
11960 goto err;
11961
11962 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
11963 goto err;
11964
11965 ep->dted_pred.dtpdd_predicate = pred;
11966 }
11967
11968 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
11969 if ((sec = dtrace_dof_sect(dof,
11970 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
11971 goto err;
11972
11973 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
11974
11975 if (ep->dted_action == NULL)
11976 goto err;
11977 }
11978
11979 return (ep);
11980
11981err:
11982 if (pred != NULL)
11983 dtrace_predicate_release(pred, vstate);
11984 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
11985 return (NULL);
11986}
11987
11988/*
11989 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
11990 * specified DOF. At present, this amounts to simply adding 'ubase' to the
11991 * site of any user SETX relocations to account for load object base address.
11992 * In the future, if we need other relocations, this function can be extended.
11993 */
11994static int
11995dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
11996{
11997 uintptr_t daddr = (uintptr_t)dof;
11998 dof_relohdr_t *dofr =
11999 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
12000 dof_sec_t *ss, *rs, *ts;
12001 dof_relodesc_t *r;
12002 uint_t i, n;
12003
12004 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
12005 sec->dofs_align != sizeof (dof_secidx_t)) {
12006 dtrace_dof_error(dof, "invalid relocation header");
12007 return (-1);
12008 }
12009
12010 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
12011 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
12012 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
12013
12014 if (ss == NULL || rs == NULL || ts == NULL)
12015 return (-1); /* dtrace_dof_error() has been called already */
12016
12017 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
12018 rs->dofs_align != sizeof (uint64_t)) {
12019 dtrace_dof_error(dof, "invalid relocation section");
12020 return (-1);
12021 }
12022
12023 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
12024 n = rs->dofs_size / rs->dofs_entsize;
12025
12026 for (i = 0; i < n; i++) {
12027 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
12028
12029 switch (r->dofr_type) {
12030 case DOF_RELO_NONE:
12031 break;
12032 case DOF_RELO_SETX:
12033 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
12034 sizeof (uint64_t) > ts->dofs_size) {
12035 dtrace_dof_error(dof, "bad relocation offset");
12036 return (-1);
12037 }
12038
12039 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
12040 dtrace_dof_error(dof, "misaligned setx relo");
12041 return (-1);
12042 }
12043
12044 *(uint64_t *)taddr += ubase;
12045 break;
12046 default:
12047 dtrace_dof_error(dof, "invalid relocation type");
12048 return (-1);
12049 }
12050
12051 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
12052 }
12053
12054 return (0);
12055}
12056
12057/*
12058 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
12059 * header: it should be at the front of a memory region that is at least
12060 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
12061 * size. It need not be validated in any other way.
12062 */
12063static int
12064dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
12065 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
12066{
12067 uint64_t len = dof->dofh_loadsz, seclen;
12068 uintptr_t daddr = (uintptr_t)dof;
12069 dtrace_ecbdesc_t *ep;
12070 dtrace_enabling_t *enab;
12071 uint_t i;
12072
12073 ASSERT(MUTEX_HELD(&dtrace_lock));
12074 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
12075
12076 /*
12077 * Check the DOF header identification bytes. In addition to checking
12078 * valid settings, we also verify that unused bits/bytes are zeroed so
12079 * we can use them later without fear of regressing existing binaries.
12080 */
12081 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
12082 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
12083 dtrace_dof_error(dof, "DOF magic string mismatch");
12084 return (-1);
12085 }
12086
12087 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
12088 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
12089 dtrace_dof_error(dof, "DOF has invalid data model");
12090 return (-1);
12091 }
12092
12093 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
12094 dtrace_dof_error(dof, "DOF encoding mismatch");
12095 return (-1);
12096 }
12097
12098 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
12099 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
12100 dtrace_dof_error(dof, "DOF version mismatch");
12101 return (-1);
12102 }
12103
12104 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
12105 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
12106 return (-1);
12107 }
12108
12109 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
12110 dtrace_dof_error(dof, "DOF uses too many integer registers");
12111 return (-1);
12112 }
12113
12114 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
12115 dtrace_dof_error(dof, "DOF uses too many tuple registers");
12116 return (-1);
12117 }
12118
12119 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
12120 if (dof->dofh_ident[i] != 0) {
12121 dtrace_dof_error(dof, "DOF has invalid ident byte set");
12122 return (-1);
12123 }
12124 }
12125
12126 if (dof->dofh_flags & ~DOF_FL_VALID) {
12127 dtrace_dof_error(dof, "DOF has invalid flag bits set");
12128 return (-1);
12129 }
12130
12131 if (dof->dofh_secsize == 0) {
12132 dtrace_dof_error(dof, "zero section header size");
12133 return (-1);
12134 }
12135
12136 /*
12137 * Check that the section headers don't exceed the amount of DOF
12138 * data. Note that we cast the section size and number of sections
12139 * to uint64_t's to prevent possible overflow in the multiplication.
12140 */
12141 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
12142
12143 if (dof->dofh_secoff > len || seclen > len ||
12144 dof->dofh_secoff + seclen > len) {
12145 dtrace_dof_error(dof, "truncated section headers");
12146 return (-1);
12147 }
12148
12149 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
12150 dtrace_dof_error(dof, "misaligned section headers");
12151 return (-1);
12152 }
12153
12154 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
12155 dtrace_dof_error(dof, "misaligned section size");
12156 return (-1);
12157 }
12158
12159 /*
12160 * Take an initial pass through the section headers to be sure that
12161 * the headers don't have stray offsets. If the 'noprobes' flag is
12162 * set, do not permit sections relating to providers, probes, or args.
12163 */
12164 for (i = 0; i < dof->dofh_secnum; i++) {
12165 dof_sec_t *sec = (dof_sec_t *)(daddr +
12166 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12167
12168 if (noprobes) {
12169 switch (sec->dofs_type) {
12170 case DOF_SECT_PROVIDER:
12171 case DOF_SECT_PROBES:
12172 case DOF_SECT_PRARGS:
12173 case DOF_SECT_PROFFS:
12174 dtrace_dof_error(dof, "illegal sections "
12175 "for enabling");
12176 return (-1);
12177 }
12178 }
12179
12180 if (DOF_SEC_ISLOADABLE(sec->dofs_type) &&
12181 !(sec->dofs_flags & DOF_SECF_LOAD)) {
12182 dtrace_dof_error(dof, "loadable section with load "
12183 "flag unset");
12184 return (-1);
12185 }
12186
12187 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12188 continue; /* just ignore non-loadable sections */
12189
12190 if (sec->dofs_align & (sec->dofs_align - 1)) {
12191 dtrace_dof_error(dof, "bad section alignment");
12192 return (-1);
12193 }
12194
12195 if (sec->dofs_offset & (sec->dofs_align - 1)) {
12196 dtrace_dof_error(dof, "misaligned section");
12197 return (-1);
12198 }
12199
12200 if (sec->dofs_offset > len || sec->dofs_size > len ||
12201 sec->dofs_offset + sec->dofs_size > len) {
12202 dtrace_dof_error(dof, "corrupt section header");
12203 return (-1);
12204 }
12205
12206 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
12207 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
12208 dtrace_dof_error(dof, "non-terminating string table");
12209 return (-1);
12210 }
12211 }
12212
12213 /*
12214 * Take a second pass through the sections and locate and perform any
12215 * relocations that are present. We do this after the first pass to
12216 * be sure that all sections have had their headers validated.
12217 */
12218 for (i = 0; i < dof->dofh_secnum; i++) {
12219 dof_sec_t *sec = (dof_sec_t *)(daddr +
12220 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12221
12222 if (!(sec->dofs_flags & DOF_SECF_LOAD))
12223 continue; /* skip sections that are not loadable */
12224
12225 switch (sec->dofs_type) {
12226 case DOF_SECT_URELHDR:
12227 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
12228 return (-1);
12229 break;
12230 }
12231 }
12232
12233 if ((enab = *enabp) == NULL)
12234 enab = *enabp = dtrace_enabling_create(vstate);
12235
12236 for (i = 0; i < dof->dofh_secnum; i++) {
12237 dof_sec_t *sec = (dof_sec_t *)(daddr +
12238 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12239
12240 if (sec->dofs_type != DOF_SECT_ECBDESC)
12241 continue;
12242
12243 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
12244 dtrace_enabling_destroy(enab);
12245 *enabp = NULL;
12246 return (-1);
12247 }
12248
12249 dtrace_enabling_add(enab, ep);
12250 }
12251
12252 return (0);
12253}
12254
12255/*
12256 * Process DOF for any options. This routine assumes that the DOF has been
12257 * at least processed by dtrace_dof_slurp().
12258 */
12259static int
12260dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
12261{
12262 int i, rval;
12263 uint32_t entsize;
12264 size_t offs;
12265 dof_optdesc_t *desc;
12266
12267 for (i = 0; VBDTCAST(unsigned)i < dof->dofh_secnum; i++) {
12268 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
12269 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
12270
12271 if (sec->dofs_type != DOF_SECT_OPTDESC)
12272 continue;
12273
12274 if (sec->dofs_align != sizeof (uint64_t)) {
12275 dtrace_dof_error(dof, "bad alignment in "
12276 "option description");
12277 return (EINVAL);
12278 }
12279
12280 if ((entsize = sec->dofs_entsize) == 0) {
12281 dtrace_dof_error(dof, "zeroed option entry size");
12282 return (EINVAL);
12283 }
12284
12285 if (entsize < sizeof (dof_optdesc_t)) {
12286 dtrace_dof_error(dof, "bad option entry size");
12287 return (EINVAL);
12288 }
12289
12290 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
12291 desc = (dof_optdesc_t *)((uintptr_t)dof +
12292 (uintptr_t)sec->dofs_offset + offs);
12293
12294 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
12295 dtrace_dof_error(dof, "non-zero option string");
12296 return (EINVAL);
12297 }
12298
12299 if (desc->dofo_value == VBDTCAST(uint64_t)DTRACEOPT_UNSET) {
12300 dtrace_dof_error(dof, "unset option");
12301 return (EINVAL);
12302 }
12303
12304 if ((rval = dtrace_state_option(state,
12305 desc->dofo_option, desc->dofo_value)) != 0) {
12306 dtrace_dof_error(dof, "rejected option");
12307 return (rval);
12308 }
12309 }
12310 }
12311
12312 return (0);
12313}
12314
12315/*
12316 * DTrace Consumer State Functions
12317 */
12318VBDTSTATIC int
12319dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
12320{
12321 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
12322 void *base;
12323 uintptr_t limit;
12324 dtrace_dynvar_t *dvar, *next, *start;
12325 VBDTTYPE(size_t,int) i;
12326
12327 ASSERT(MUTEX_HELD(&dtrace_lock));
12328 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
12329
12330 bzero(dstate, sizeof (dtrace_dstate_t));
12331
12332 if ((dstate->dtds_chunksize = chunksize) == 0)
12333 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
12334
12335 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
12336 size = min;
12337
12338 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
12339 return (ENOMEM);
12340
12341 dstate->dtds_size = size;
12342 dstate->dtds_base = base;
12343 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
12344 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
12345
12346 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
12347
12348 if (hashsize != 1 && (hashsize & 1))
12349 hashsize--;
12350
12351 dstate->dtds_hashsize = hashsize;
12352 dstate->dtds_hash = dstate->dtds_base;
12353
12354 /*
12355 * Set all of our hash buckets to point to the single sink, and (if
12356 * it hasn't already been set), set the sink's hash value to be the
12357 * sink sentinel value. The sink is needed for dynamic variable
12358 * lookups to know that they have iterated over an entire, valid hash
12359 * chain.
12360 */
12361 for (i = 0; i < hashsize; i++)
12362 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
12363
12364 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
12365 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
12366
12367 /*
12368 * Determine number of active CPUs. Divide free list evenly among
12369 * active CPUs.
12370 */
12371 start = (dtrace_dynvar_t *)
12372 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
12373 limit = (uintptr_t)base + size;
12374
12375 maxper = (limit - (uintptr_t)start) / NCPU;
12376 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
12377
12378 for (i = 0; i < NCPU; i++) {
12379 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
12380
12381 /*
12382 * If we don't even have enough chunks to make it once through
12383 * NCPUs, we're just going to allocate everything to the first
12384 * CPU. And if we're on the last CPU, we're going to allocate
12385 * whatever is left over. In either case, we set the limit to
12386 * be the limit of the dynamic variable space.
12387 */
12388 if (maxper == 0 || i == NCPU - 1) {
12389 limit = (uintptr_t)base + size;
12390 start = NULL;
12391 } else {
12392 limit = (uintptr_t)start + maxper;
12393 start = (dtrace_dynvar_t *)limit;
12394 }
12395
12396 ASSERT(limit <= (uintptr_t)base + size);
12397
12398 for (;;) {
12399 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
12400 dstate->dtds_chunksize);
12401
12402 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
12403 break;
12404
12405 dvar->dtdv_next = next;
12406 dvar = next;
12407 }
12408
12409 if (maxper == 0)
12410 break;
12411 }
12412
12413 return (0);
12414}
12415
12416VBDTSTATIC void
12417dtrace_dstate_fini(dtrace_dstate_t *dstate)
12418{
12419 ASSERT(MUTEX_HELD(&cpu_lock));
12420
12421 if (dstate->dtds_base == NULL)
12422 return;
12423
12424 kmem_free(dstate->dtds_base, dstate->dtds_size);
12425 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
12426}
12427
12428static void
12429dtrace_vstate_fini(dtrace_vstate_t *vstate)
12430{
12431 /*
12432 * Logical XOR, where are you?
12433 */
12434 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
12435
12436 if (vstate->dtvs_nglobals > 0) {
12437 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
12438 sizeof (dtrace_statvar_t *));
12439 }
12440
12441 if (vstate->dtvs_ntlocals > 0) {
12442 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
12443 sizeof (dtrace_difv_t));
12444 }
12445
12446 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
12447
12448 if (vstate->dtvs_nlocals > 0) {
12449 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
12450 sizeof (dtrace_statvar_t *));
12451 }
12452}
12453
12454static void
12455dtrace_state_clean(dtrace_state_t *state)
12456{
12457 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
12458 return;
12459
12460 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
12461 dtrace_speculation_clean(state);
12462}
12463#ifdef VBOX
12464static DECLCALLBACK(void) dtrace_state_clean_timer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
12465{
12466 dtrace_state_clean((dtrace_state_t *)pvUser);
12467 NOREF(pTimer); NOREF(iTick);
12468}
12469#endif
12470
12471static void
12472dtrace_state_deadman(dtrace_state_t *state)
12473{
12474 hrtime_t now;
12475
12476 dtrace_sync();
12477
12478 now = dtrace_gethrtime();
12479
12480 if (state != dtrace_anon.dta_state &&
12481 now - state->dts_laststatus >= dtrace_deadman_user)
12482 return;
12483
12484 /*
12485 * We must be sure that dts_alive never appears to be less than the
12486 * value upon entry to dtrace_state_deadman(), and because we lack a
12487 * dtrace_cas64(), we cannot store to it atomically. We thus instead
12488 * store INT64_MAX to it, followed by a memory barrier, followed by
12489 * the new value. This assures that dts_alive never appears to be
12490 * less than its true value, regardless of the order in which the
12491 * stores to the underlying storage are issued.
12492 */
12493 state->dts_alive = INT64_MAX;
12494 dtrace_membar_producer();
12495 state->dts_alive = now;
12496}
12497
12498#ifdef VBOX
12499static DECLCALLBACK(void) dtrace_state_deadman_timer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
12500{
12501 dtrace_state_deadman((dtrace_state_t *)pvUser);
12502 NOREF(pTimer); NOREF(iTick);
12503}
12504#endif
12505
12506VBDTSTATIC dtrace_state_t *
12507#ifdef VBOX
12508dtrace_state_create(cred_t *cr)
12509#else
12510dtrace_state_create(dev_t *devp, cred_t *cr)
12511#endif
12512{
12513#ifndef VBOX
12514 minor_t minor;
12515 major_t major;
12516#endif
12517 char c[30];
12518 dtrace_state_t *state;
12519 dtrace_optval_t *opt;
12520 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
12521
12522 ASSERT(MUTEX_HELD(&dtrace_lock));
12523 ASSERT(MUTEX_HELD(&cpu_lock));
12524
12525#ifndef VBOX
12526 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
12527 VM_BESTFIT | VM_SLEEP);
12528
12529 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
12530 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
12531 return (NULL);
12532 }
12533
12534 state = ddi_get_soft_state(dtrace_softstate, minor);
12535#else
12536 state = kmem_alloc(sizeof (*state), KM_SLEEP);
12537 if (!state) {
12538 return (NULL);
12539 }
12540#endif
12541 state->dts_epid = DTRACE_EPIDNONE + 1;
12542
12543#ifndef VBOX
12544 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor);
12545#else
12546 (void) snprintf(c, sizeof (c), "dtrace_aggid_%p", state);
12547#endif
12548 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
12549 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
12550
12551#ifndef VBOX
12552 if (devp != NULL) {
12553 major = getemajor(*devp);
12554 } else {
12555 major = ddi_driver_major(dtrace_devi);
12556 }
12557
12558 state->dts_dev = makedevice(major, minor);
12559
12560 if (devp != NULL)
12561 *devp = state->dts_dev;
12562#endif
12563
12564 /*
12565 * We allocate NCPU buffers. On the one hand, this can be quite
12566 * a bit of memory per instance (nearly 36K on a Starcat). On the
12567 * other hand, it saves an additional memory reference in the probe
12568 * path.
12569 */
12570 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
12571 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
12572 state->dts_cleaner = CYCLIC_NONE;
12573 state->dts_deadman = CYCLIC_NONE;
12574 state->dts_vstate.dtvs_state = state;
12575
12576 for (i = 0; i < DTRACEOPT_MAX; i++)
12577 state->dts_options[i] = DTRACEOPT_UNSET;
12578
12579 /*
12580 * Set the default options.
12581 */
12582 opt = state->dts_options;
12583 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
12584 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
12585 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
12586 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
12587 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
12588 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
12589 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
12590 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
12591 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
12592 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
12593 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
12594 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
12595 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
12596 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
12597
12598 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
12599
12600 /*
12601 * Depending on the user credentials, we set flag bits which alter probe
12602 * visibility or the amount of destructiveness allowed. In the case of
12603 * actual anonymous tracing, or the possession of all privileges, all of
12604 * the normal checks are bypassed.
12605 */
12606 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
12607 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
12608 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
12609 } else {
12610 /*
12611 * Set up the credentials for this instantiation. We take a
12612 * hold on the credential to prevent it from disappearing on
12613 * us; this in turn prevents the zone_t referenced by this
12614 * credential from disappearing. This means that we can
12615 * examine the credential and the zone from probe context.
12616 */
12617 crhold(cr);
12618 state->dts_cred.dcr_cred = cr;
12619
12620 /*
12621 * CRA_PROC means "we have *some* privilege for dtrace" and
12622 * unlocks the use of variables like pid, zonename, etc.
12623 */
12624 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
12625 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
12626 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
12627 }
12628
12629 /*
12630 * dtrace_user allows use of syscall and profile providers.
12631 * If the user also has proc_owner and/or proc_zone, we
12632 * extend the scope to include additional visibility and
12633 * destructive power.
12634 */
12635 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
12636 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
12637 state->dts_cred.dcr_visible |=
12638 DTRACE_CRV_ALLPROC;
12639
12640 state->dts_cred.dcr_action |=
12641 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12642 }
12643
12644 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
12645 state->dts_cred.dcr_visible |=
12646 DTRACE_CRV_ALLZONE;
12647
12648 state->dts_cred.dcr_action |=
12649 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12650 }
12651
12652 /*
12653 * If we have all privs in whatever zone this is,
12654 * we can do destructive things to processes which
12655 * have altered credentials.
12656 */
12657 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12658 cr->cr_zone->zone_privset)) {
12659 state->dts_cred.dcr_action |=
12660 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12661 }
12662 }
12663
12664 /*
12665 * Holding the dtrace_kernel privilege also implies that
12666 * the user has the dtrace_user privilege from a visibility
12667 * perspective. But without further privileges, some
12668 * destructive actions are not available.
12669 */
12670 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
12671 /*
12672 * Make all probes in all zones visible. However,
12673 * this doesn't mean that all actions become available
12674 * to all zones.
12675 */
12676 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
12677 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
12678
12679 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
12680 DTRACE_CRA_PROC;
12681 /*
12682 * Holding proc_owner means that destructive actions
12683 * for *this* zone are allowed.
12684 */
12685 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
12686 state->dts_cred.dcr_action |=
12687 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12688
12689 /*
12690 * Holding proc_zone means that destructive actions
12691 * for this user/group ID in all zones is allowed.
12692 */
12693 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
12694 state->dts_cred.dcr_action |=
12695 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12696
12697 /*
12698 * If we have all privs in whatever zone this is,
12699 * we can do destructive things to processes which
12700 * have altered credentials.
12701 */
12702 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12703 cr->cr_zone->zone_privset)) {
12704 state->dts_cred.dcr_action |=
12705 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12706 }
12707 }
12708
12709 /*
12710 * Holding the dtrace_proc privilege gives control over fasttrap
12711 * and pid providers. We need to grant wider destructive
12712 * privileges in the event that the user has proc_owner and/or
12713 * proc_zone.
12714 */
12715 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
12716 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
12717 state->dts_cred.dcr_action |=
12718 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12719
12720 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
12721 state->dts_cred.dcr_action |=
12722 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12723 }
12724 }
12725
12726 return (state);
12727}
12728
12729static int
12730dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
12731{
12732 dtrace_optval_t *opt = state->dts_options, size;
12733 processorid_t cpu VBDTUNASS(DTRACE_CPUALL);
12734 int flags = 0, rval;
12735
12736 ASSERT(MUTEX_HELD(&dtrace_lock));
12737 ASSERT(MUTEX_HELD(&cpu_lock));
12738 ASSERT(which < DTRACEOPT_MAX);
12739 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
12740 (state == dtrace_anon.dta_state &&
12741 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
12742
12743 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
12744 return (0);
12745
12746 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
12747 cpu = opt[DTRACEOPT_CPU];
12748
12749 if (which == DTRACEOPT_SPECSIZE)
12750 flags |= DTRACEBUF_NOSWITCH;
12751
12752 if (which == DTRACEOPT_BUFSIZE) {
12753 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
12754 flags |= DTRACEBUF_RING;
12755
12756 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
12757 flags |= DTRACEBUF_FILL;
12758
12759 if (state != dtrace_anon.dta_state ||
12760 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
12761 flags |= DTRACEBUF_INACTIVE;
12762 }
12763
12764 for (size = opt[which]; size >= VBDTCAST(dtrace_optval_t)sizeof (uint64_t); size >>= 1) {
12765 /*
12766 * The size must be 8-byte aligned. If the size is not 8-byte
12767 * aligned, drop it down by the difference.
12768 */
12769 if (size & (sizeof (uint64_t) - 1))
12770 size -= size & (sizeof (uint64_t) - 1);
12771
12772 if (size < state->dts_reserve) {
12773 /*
12774 * Buffers always must be large enough to accommodate
12775 * their prereserved space. We return E2BIG instead
12776 * of ENOMEM in this case to allow for user-level
12777 * software to differentiate the cases.
12778 */
12779 return (E2BIG);
12780 }
12781
12782 rval = dtrace_buffer_alloc(buf, size, flags, cpu);
12783
12784 if (rval != ENOMEM) {
12785 opt[which] = size;
12786 return (rval);
12787 }
12788
12789 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
12790 return (rval);
12791 }
12792
12793 return (ENOMEM);
12794}
12795
12796static int
12797dtrace_state_buffers(dtrace_state_t *state)
12798{
12799 dtrace_speculation_t *spec = state->dts_speculations;
12800 int rval, i;
12801
12802 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
12803 DTRACEOPT_BUFSIZE)) != 0)
12804 return (rval);
12805
12806 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
12807 DTRACEOPT_AGGSIZE)) != 0)
12808 return (rval);
12809
12810 for (i = 0; i < state->dts_nspeculations; i++) {
12811 if ((rval = dtrace_state_buffer(state,
12812 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
12813 return (rval);
12814 }
12815
12816 return (0);
12817}
12818
12819static void
12820dtrace_state_prereserve(dtrace_state_t *state)
12821{
12822 dtrace_ecb_t *ecb;
12823 dtrace_probe_t *probe;
12824
12825 state->dts_reserve = 0;
12826
12827 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
12828 return;
12829
12830 /*
12831 * If our buffer policy is a "fill" buffer policy, we need to set the
12832 * prereserved space to be the space required by the END probes.
12833 */
12834 probe = dtrace_probes[dtrace_probeid_end - 1];
12835 ASSERT(probe != NULL);
12836
12837 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
12838 if (ecb->dte_state != state)
12839 continue;
12840
12841 state->dts_reserve += VBDTCAST(uint32_t)ecb->dte_needed + ecb->dte_alignment;
12842 }
12843}
12844
12845static int
12846dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
12847{
12848 dtrace_optval_t *opt = state->dts_options, sz, nspec;
12849 dtrace_speculation_t *spec;
12850 dtrace_buffer_t *buf;
12851#ifndef VBOX
12852 cyc_handler_t hdlr;
12853 cyc_time_t when;
12854#endif
12855 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
12856 dtrace_icookie_t cookie;
12857
12858 mutex_enter(&cpu_lock);
12859 mutex_enter(&dtrace_lock);
12860
12861 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
12862 rval = EBUSY;
12863 goto out;
12864 }
12865
12866 /*
12867 * Before we can perform any checks, we must prime all of the
12868 * retained enablings that correspond to this state.
12869 */
12870 dtrace_enabling_prime(state);
12871
12872 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
12873 rval = EACCES;
12874 goto out;
12875 }
12876
12877 dtrace_state_prereserve(state);
12878
12879 /*
12880 * Now we want to do is try to allocate our speculations.
12881 * We do not automatically resize the number of speculations; if
12882 * this fails, we will fail the operation.
12883 */
12884 nspec = opt[DTRACEOPT_NSPEC];
12885 ASSERT(nspec != DTRACEOPT_UNSET);
12886
12887 if (nspec > INT_MAX) {
12888 rval = ENOMEM;
12889 goto out;
12890 }
12891
12892 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
12893
12894 if (spec == NULL) {
12895 rval = ENOMEM;
12896 goto out;
12897 }
12898
12899 state->dts_speculations = spec;
12900 state->dts_nspeculations = (int)nspec;
12901
12902 for (i = 0; i < nspec; i++) {
12903 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
12904 rval = ENOMEM;
12905 goto err;
12906 }
12907
12908 spec[i].dtsp_buffer = buf;
12909 }
12910
12911 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
12912 if (dtrace_anon.dta_state == NULL) {
12913 rval = ENOENT;
12914 goto out;
12915 }
12916
12917 if (state->dts_necbs != 0) {
12918 rval = EALREADY;
12919 goto out;
12920 }
12921
12922 state->dts_anon = dtrace_anon_grab();
12923 ASSERT(state->dts_anon != NULL);
12924 state = state->dts_anon;
12925
12926 /*
12927 * We want "grabanon" to be set in the grabbed state, so we'll
12928 * copy that option value from the grabbing state into the
12929 * grabbed state.
12930 */
12931 state->dts_options[DTRACEOPT_GRABANON] =
12932 opt[DTRACEOPT_GRABANON];
12933
12934 *cpu = dtrace_anon.dta_beganon;
12935
12936 /*
12937 * If the anonymous state is active (as it almost certainly
12938 * is if the anonymous enabling ultimately matched anything),
12939 * we don't allow any further option processing -- but we
12940 * don't return failure.
12941 */
12942 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
12943 goto out;
12944 }
12945
12946 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
12947 opt[DTRACEOPT_AGGSIZE] != 0) {
12948 if (state->dts_aggregations == NULL) {
12949 /*
12950 * We're not going to create an aggregation buffer
12951 * because we don't have any ECBs that contain
12952 * aggregations -- set this option to 0.
12953 */
12954 opt[DTRACEOPT_AGGSIZE] = 0;
12955 } else {
12956 /*
12957 * If we have an aggregation buffer, we must also have
12958 * a buffer to use as scratch.
12959 */
12960 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
12961 opt[DTRACEOPT_BUFSIZE] < VBDTCAST(dtrace_optval_t)state->dts_needed) {
12962 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
12963 }
12964 }
12965 }
12966
12967 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
12968 opt[DTRACEOPT_SPECSIZE] != 0) {
12969 if (!state->dts_speculates) {
12970 /*
12971 * We're not going to create speculation buffers
12972 * because we don't have any ECBs that actually
12973 * speculate -- set the speculation size to 0.
12974 */
12975 opt[DTRACEOPT_SPECSIZE] = 0;
12976 }
12977 }
12978
12979 /*
12980 * The bare minimum size for any buffer that we're actually going to
12981 * do anything to is sizeof (uint64_t).
12982 */
12983 sz = sizeof (uint64_t);
12984
12985 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
12986 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
12987 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
12988 /*
12989 * A buffer size has been explicitly set to 0 (or to a size
12990 * that will be adjusted to 0) and we need the space -- we
12991 * need to return failure. We return ENOSPC to differentiate
12992 * it from failing to allocate a buffer due to failure to meet
12993 * the reserve (for which we return E2BIG).
12994 */
12995 rval = ENOSPC;
12996 goto out;
12997 }
12998
12999 if ((rval = dtrace_state_buffers(state)) != 0)
13000 goto err;
13001
13002 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
13003 sz = dtrace_dstate_defsize;
13004
13005 do {
13006 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
13007
13008 if (rval == 0)
13009 break;
13010
13011 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
13012 goto err;
13013 } while (sz >>= 1);
13014
13015 opt[DTRACEOPT_DYNVARSIZE] = sz;
13016
13017 if (rval != 0)
13018 goto err;
13019
13020 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
13021 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
13022
13023 if (opt[DTRACEOPT_CLEANRATE] == 0)
13024 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13025
13026 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
13027 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
13028
13029 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
13030 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
13031
13032#ifndef VBOX
13033 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
13034 hdlr.cyh_arg = state;
13035 hdlr.cyh_level = CY_LOW_LEVEL;
13036
13037 when.cyt_when = 0;
13038 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
13039
13040 state->dts_cleaner = cyclic_add(&hdlr, &when);
13041
13042 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
13043 hdlr.cyh_arg = state;
13044 hdlr.cyh_level = CY_LOW_LEVEL;
13045
13046 when.cyt_when = 0;
13047 when.cyt_interval = dtrace_deadman_interval;
13048
13049 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
13050 state->dts_deadman = cyclic_add(&hdlr, &when);
13051#else /* VBOX */
13052
13053 rval = RTTimerCreateEx(&state->dts_cleaner, opt[DTRACEOPT_CLEANRATE],
13054 RTTIMER_FLAGS_CPU_ANY, dtrace_state_clean_timer, state);
13055 if (RT_FAILURE(rval)) {
13056 rval = RTErrConvertToErrno(rval);
13057 goto err;
13058 }
13059
13060 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
13061 rval = RTTimerCreateEx(&state->dts_deadman, dtrace_deadman_interval,
13062 RTTIMER_FLAGS_CPU_ANY, dtrace_state_deadman_timer, state);
13063 if (RT_FAILURE(rval)) {
13064 RTTimerDestroy(state->dts_cleaner);
13065 state->dts_cleaner = CYCLIC_NONE;
13066 state->dts_deadman = CYCLIC_NONE;
13067 rval = RTErrConvertToErrno(rval);
13068 goto err;
13069 }
13070#endif /* VBOX */
13071
13072 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
13073
13074 /*
13075 * Now it's time to actually fire the BEGIN probe. We need to disable
13076 * interrupts here both to record the CPU on which we fired the BEGIN
13077 * probe (the data from this CPU will be processed first at user
13078 * level) and to manually activate the buffer for this CPU.
13079 */
13080 cookie = dtrace_interrupt_disable();
13081 *cpu = VBDT_GET_CPUID();
13082 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
13083 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
13084
13085 dtrace_probe(dtrace_probeid_begin,
13086 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13087 dtrace_interrupt_enable(cookie);
13088 /*
13089 * We may have had an exit action from a BEGIN probe; only change our
13090 * state to ACTIVE if we're still in WARMUP.
13091 */
13092 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
13093 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
13094
13095 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
13096 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
13097
13098 /*
13099 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
13100 * want each CPU to transition its principal buffer out of the
13101 * INACTIVE state. Doing this assures that no CPU will suddenly begin
13102 * processing an ECB halfway down a probe's ECB chain; all CPUs will
13103 * atomically transition from processing none of a state's ECBs to
13104 * processing all of them.
13105 */
13106#ifndef VBOX
13107 dtrace_xcall(DTRACE_CPUALL,
13108 (dtrace_xcall_t)dtrace_buffer_activate, state);
13109#else
13110 RTMpOnAll(dtrace_buffer_activate_wrapper, state, NULL);
13111#endif
13112 goto out;
13113
13114err:
13115 dtrace_buffer_free(state->dts_buffer);
13116 dtrace_buffer_free(state->dts_aggbuffer);
13117
13118 if ((nspec = state->dts_nspeculations) == 0) {
13119 ASSERT(state->dts_speculations == NULL);
13120 goto out;
13121 }
13122
13123 spec = state->dts_speculations;
13124 ASSERT(spec != NULL);
13125
13126 for (i = 0; i < state->dts_nspeculations; i++) {
13127 if ((buf = spec[i].dtsp_buffer) == NULL)
13128 break;
13129
13130 dtrace_buffer_free(buf);
13131 kmem_free(buf, bufsize);
13132 }
13133
13134 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13135 state->dts_nspeculations = 0;
13136 state->dts_speculations = NULL;
13137
13138out:
13139 mutex_exit(&dtrace_lock);
13140 mutex_exit(&cpu_lock);
13141
13142 return (rval);
13143}
13144
13145static int
13146dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
13147{
13148 dtrace_icookie_t cookie;
13149
13150 ASSERT(MUTEX_HELD(&dtrace_lock));
13151
13152 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
13153 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
13154 return (EINVAL);
13155
13156 /*
13157 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
13158 * to be sure that every CPU has seen it. See below for the details
13159 * on why this is done.
13160 */
13161 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
13162 dtrace_sync();
13163
13164 /*
13165 * By this point, it is impossible for any CPU to be still processing
13166 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
13167 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
13168 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
13169 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
13170 * iff we're in the END probe.
13171 */
13172 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
13173 dtrace_sync();
13174 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
13175
13176 /*
13177 * Finally, we can release the reserve and call the END probe. We
13178 * disable interrupts across calling the END probe to allow us to
13179 * return the CPU on which we actually called the END probe. This
13180 * allows user-land to be sure that this CPU's principal buffer is
13181 * processed last.
13182 */
13183 state->dts_reserve = 0;
13184
13185 cookie = dtrace_interrupt_disable();
13186 *cpu = VBDT_GET_CPUID();
13187 dtrace_probe(dtrace_probeid_end,
13188 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
13189 dtrace_interrupt_enable(cookie);
13190
13191 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
13192 dtrace_sync();
13193
13194 return (0);
13195}
13196
13197static int
13198dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
13199 dtrace_optval_t val)
13200{
13201 ASSERT(MUTEX_HELD(&dtrace_lock));
13202
13203 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
13204 return (EBUSY);
13205
13206 if (option >= DTRACEOPT_MAX)
13207 return (EINVAL);
13208
13209 if (option != DTRACEOPT_CPU && val < 0)
13210 return (EINVAL);
13211
13212 switch (option) {
13213 case DTRACEOPT_DESTRUCTIVE:
13214 if (dtrace_destructive_disallow)
13215 return (EACCES);
13216
13217 state->dts_cred.dcr_destructive = 1;
13218 break;
13219
13220 case DTRACEOPT_BUFSIZE:
13221 case DTRACEOPT_DYNVARSIZE:
13222 case DTRACEOPT_AGGSIZE:
13223 case DTRACEOPT_SPECSIZE:
13224 case DTRACEOPT_STRSIZE:
13225 if (val < 0)
13226 return (EINVAL);
13227
13228 if (val >= LONG_MAX) {
13229 /*
13230 * If this is an otherwise negative value, set it to
13231 * the highest multiple of 128m less than LONG_MAX.
13232 * Technically, we're adjusting the size without
13233 * regard to the buffer resizing policy, but in fact,
13234 * this has no effect -- if we set the buffer size to
13235 * ~LONG_MAX and the buffer policy is ultimately set to
13236 * be "manual", the buffer allocation is guaranteed to
13237 * fail, if only because the allocation requires two
13238 * buffers. (We set the the size to the highest
13239 * multiple of 128m because it ensures that the size
13240 * will remain a multiple of a megabyte when
13241 * repeatedly halved -- all the way down to 15m.)
13242 */
13243 val = LONG_MAX - (1 << 27) + 1;
13244 }
13245 }
13246
13247 state->dts_options[option] = val;
13248
13249 return (0);
13250}
13251
13252static void
13253dtrace_state_destroy(dtrace_state_t *state)
13254{
13255 dtrace_ecb_t *ecb;
13256 dtrace_vstate_t *vstate = &state->dts_vstate;
13257#ifndef VBOX
13258 minor_t minor = getminor(state->dts_dev);
13259#endif
13260 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
13261 dtrace_speculation_t *spec = state->dts_speculations;
13262 int nspec = state->dts_nspeculations;
13263 uint32_t match;
13264
13265 ASSERT(MUTEX_HELD(&dtrace_lock));
13266 ASSERT(MUTEX_HELD(&cpu_lock));
13267
13268 /*
13269 * First, retract any retained enablings for this state.
13270 */
13271 dtrace_enabling_retract(state);
13272 ASSERT(state->dts_nretained == 0);
13273
13274 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
13275 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
13276 /*
13277 * We have managed to come into dtrace_state_destroy() on a
13278 * hot enabling -- almost certainly because of a disorderly
13279 * shutdown of a consumer. (That is, a consumer that is
13280 * exiting without having called dtrace_stop().) In this case,
13281 * we're going to set our activity to be KILLED, and then
13282 * issue a sync to be sure that everyone is out of probe
13283 * context before we start blowing away ECBs.
13284 */
13285 state->dts_activity = DTRACE_ACTIVITY_KILLED;
13286 dtrace_sync();
13287 }
13288
13289 /*
13290 * Release the credential hold we took in dtrace_state_create().
13291 */
13292 if (state->dts_cred.dcr_cred != NULL)
13293 crfree(state->dts_cred.dcr_cred);
13294
13295 /*
13296 * Now we can safely disable and destroy any enabled probes. Because
13297 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
13298 * (especially if they're all enabled), we take two passes through the
13299 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
13300 * in the second we disable whatever is left over.
13301 */
13302 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
13303 for (i = 0; i < state->dts_necbs; i++) {
13304 if ((ecb = state->dts_ecbs[i]) == NULL)
13305 continue;
13306
13307 if (match && ecb->dte_probe != NULL) {
13308 dtrace_probe_t *probe = ecb->dte_probe;
13309 dtrace_provider_t *prov = probe->dtpr_provider;
13310
13311 if (!(prov->dtpv_priv.dtpp_flags & match))
13312 continue;
13313 }
13314
13315 dtrace_ecb_disable(ecb);
13316 dtrace_ecb_destroy(ecb);
13317 }
13318
13319 if (!match)
13320 break;
13321 }
13322
13323 /*
13324 * Before we free the buffers, perform one more sync to assure that
13325 * every CPU is out of probe context.
13326 */
13327 dtrace_sync();
13328
13329 dtrace_buffer_free(state->dts_buffer);
13330 dtrace_buffer_free(state->dts_aggbuffer);
13331
13332 for (i = 0; i < nspec; i++)
13333 dtrace_buffer_free(spec[i].dtsp_buffer);
13334
13335 if (state->dts_cleaner != CYCLIC_NONE)
13336 cyclic_remove(state->dts_cleaner);
13337
13338 if (state->dts_deadman != CYCLIC_NONE)
13339 cyclic_remove(state->dts_deadman);
13340
13341 dtrace_dstate_fini(&vstate->dtvs_dynvars);
13342 dtrace_vstate_fini(vstate);
13343 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
13344
13345 if (state->dts_aggregations != NULL) {
13346#ifdef DEBUG
13347 for (i = 0; i < state->dts_naggregations; i++)
13348 ASSERT(state->dts_aggregations[i] == NULL);
13349#endif
13350 ASSERT(state->dts_naggregations > 0);
13351 kmem_free(state->dts_aggregations,
13352 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
13353 }
13354
13355 kmem_free(state->dts_buffer, bufsize);
13356 kmem_free(state->dts_aggbuffer, bufsize);
13357
13358 for (i = 0; i < nspec; i++)
13359 kmem_free(spec[i].dtsp_buffer, bufsize);
13360
13361 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
13362
13363 dtrace_format_destroy(state);
13364
13365 vmem_destroy(state->dts_aggid_arena);
13366#ifndef VBOX
13367 ddi_soft_state_free(dtrace_softstate, minor);
13368 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
13369#else
13370 kmem_free(state, sizeof (*state));
13371#endif
13372}
13373
13374/*
13375 * DTrace Anonymous Enabling Functions
13376 */
13377static dtrace_state_t *
13378dtrace_anon_grab(void)
13379{
13380 dtrace_state_t *state;
13381
13382 ASSERT(MUTEX_HELD(&dtrace_lock));
13383
13384 if ((state = dtrace_anon.dta_state) == NULL) {
13385 ASSERT(dtrace_anon.dta_enabling == NULL);
13386 return (NULL);
13387 }
13388
13389 ASSERT(dtrace_anon.dta_enabling != NULL);
13390 ASSERT(dtrace_retained != NULL);
13391
13392 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
13393 dtrace_anon.dta_enabling = NULL;
13394 dtrace_anon.dta_state = NULL;
13395
13396 return (state);
13397}
13398
13399#ifndef VBOX
13400static void
13401dtrace_anon_property(void)
13402{
13403 int i, rv;
13404 dtrace_state_t *state;
13405 dof_hdr_t *dof;
13406 char c[32]; /* enough for "dof-data-" + digits */
13407
13408 ASSERT(MUTEX_HELD(&dtrace_lock));
13409 ASSERT(MUTEX_HELD(&cpu_lock));
13410
13411 for (i = 0; ; i++) {
13412 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
13413
13414 dtrace_err_verbose = 1;
13415
13416 if ((dof = dtrace_dof_property(c)) == NULL) {
13417 dtrace_err_verbose = 0;
13418 break;
13419 }
13420
13421#ifndef VBOX
13422 /*
13423 * We want to create anonymous state, so we need to transition
13424 * the kernel debugger to indicate that DTrace is active. If
13425 * this fails (e.g. because the debugger has modified text in
13426 * some way), we won't continue with the processing.
13427 */
13428 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
13429 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
13430 "enabling ignored.");
13431 dtrace_dof_destroy(dof);
13432 break;
13433 }
13434#endif
13435
13436 /*
13437 * If we haven't allocated an anonymous state, we'll do so now.
13438 */
13439 if ((state = dtrace_anon.dta_state) == NULL) {
13440 state = dtrace_state_create(NULL, NULL);
13441 dtrace_anon.dta_state = state;
13442
13443 if (state == NULL) {
13444 /*
13445 * This basically shouldn't happen: the only
13446 * failure mode from dtrace_state_create() is a
13447 * failure of ddi_soft_state_zalloc() that
13448 * itself should never happen. Still, the
13449 * interface allows for a failure mode, and
13450 * we want to fail as gracefully as possible:
13451 * we'll emit an error message and cease
13452 * processing anonymous state in this case.
13453 */
13454 cmn_err(CE_WARN, "failed to create "
13455 "anonymous state");
13456 dtrace_dof_destroy(dof);
13457 break;
13458 }
13459 }
13460
13461 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
13462 &dtrace_anon.dta_enabling, 0, B_TRUE);
13463
13464 if (rv == 0)
13465 rv = dtrace_dof_options(dof, state);
13466
13467 dtrace_err_verbose = 0;
13468 dtrace_dof_destroy(dof);
13469
13470 if (rv != 0) {
13471 /*
13472 * This is malformed DOF; chuck any anonymous state
13473 * that we created.
13474 */
13475 ASSERT(dtrace_anon.dta_enabling == NULL);
13476 dtrace_state_destroy(state);
13477 dtrace_anon.dta_state = NULL;
13478 break;
13479 }
13480
13481 ASSERT(dtrace_anon.dta_enabling != NULL);
13482 }
13483
13484 if (dtrace_anon.dta_enabling != NULL) {
13485 int rval;
13486
13487 /*
13488 * dtrace_enabling_retain() can only fail because we are
13489 * trying to retain more enablings than are allowed -- but
13490 * we only have one anonymous enabling, and we are guaranteed
13491 * to be allowed at least one retained enabling; we assert
13492 * that dtrace_enabling_retain() returns success.
13493 */
13494 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
13495 ASSERT(rval == 0);
13496
13497 dtrace_enabling_dump(dtrace_anon.dta_enabling);
13498 }
13499}
13500#endif /* !VBOX */
13501
13502/*
13503 * DTrace Helper Functions
13504 */
13505#ifndef VBOX /* No helper stuff */
13506static void
13507dtrace_helper_trace(dtrace_helper_action_t *helper,
13508 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
13509{
13510 uint32_t size, next, nnext, i;
13511 dtrace_helptrace_t *ent;
13512 uint16_t flags = cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags;
13513
13514 if (!dtrace_helptrace_enabled)
13515 return;
13516
13517 ASSERT(vstate->dtvs_nlocals <= VBDTCAST(int32_t)dtrace_helptrace_nlocals);
13518
13519 /*
13520 * What would a tracing framework be without its own tracing
13521 * framework? (Well, a hell of a lot simpler, for starters...)
13522 */
13523 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
13524 sizeof (uint64_t) - sizeof (uint64_t);
13525
13526 /*
13527 * Iterate until we can allocate a slot in the trace buffer.
13528 */
13529 do {
13530 next = dtrace_helptrace_next;
13531
13532 if (next + size < VBDTCAST(unsigned)dtrace_helptrace_bufsize) {
13533 nnext = next + size;
13534 } else {
13535 nnext = size;
13536 }
13537 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
13538
13539 /*
13540 * We have our slot; fill it in.
13541 */
13542 if (nnext == size)
13543 next = 0;
13544
13545 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
13546 ent->dtht_helper = helper;
13547 ent->dtht_where = where;
13548 ent->dtht_nlocals = vstate->dtvs_nlocals;
13549
13550 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
13551 mstate->dtms_fltoffs : -1;
13552 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
13553 ent->dtht_illval = cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval;
13554
13555 for (i = 0; VBDTCAST(int32_t)i < vstate->dtvs_nlocals; i++) {
13556 dtrace_statvar_t *svar;
13557
13558 if ((svar = vstate->dtvs_locals[i]) == NULL)
13559 continue;
13560
13561 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
13562 ent->dtht_locals[i] =
13563 ((uint64_t *)(uintptr_t)svar->dtsv_data)[VBDT_GET_CPUID()];
13564 }
13565}
13566
13567static uint64_t
13568dtrace_helper(int which, dtrace_mstate_t *mstate,
13569 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
13570{
13571 VBDTTYPE(uint16_t volatile *, uint16_t *)flags = &cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_flags;
13572 uint64_t sarg0 = mstate->dtms_arg[0];
13573 uint64_t sarg1 = mstate->dtms_arg[1];
13574 uint64_t rval VBDTUNASS(666);
13575 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
13576 dtrace_helper_action_t *helper;
13577 dtrace_vstate_t *vstate;
13578 dtrace_difo_t *pred;
13579 int i, trace = dtrace_helptrace_enabled;
13580
13581 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
13582
13583 if (helpers == NULL)
13584 return (0);
13585
13586 if ((helper = helpers->dthps_actions[which]) == NULL)
13587 return (0);
13588
13589 vstate = &helpers->dthps_vstate;
13590 mstate->dtms_arg[0] = arg0;
13591 mstate->dtms_arg[1] = arg1;
13592
13593 /*
13594 * Now iterate over each helper. If its predicate evaluates to 'true',
13595 * we'll call the corresponding actions. Note that the below calls
13596 * to dtrace_dif_emulate() may set faults in machine state. This is
13597 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
13598 * the stored DIF offset with its own (which is the desired behavior).
13599 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
13600 * from machine state; this is okay, too.
13601 */
13602 for (; helper != NULL; helper = helper->dtha_next) {
13603 if ((pred = helper->dtha_predicate) != NULL) {
13604 if (trace)
13605 dtrace_helper_trace(helper, mstate, vstate, 0);
13606
13607 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
13608 goto next;
13609
13610 if (*flags & CPU_DTRACE_FAULT)
13611 goto err;
13612 }
13613
13614 for (i = 0; i < helper->dtha_nactions; i++) {
13615 if (trace)
13616 dtrace_helper_trace(helper,
13617 mstate, vstate, i + 1);
13618
13619 rval = dtrace_dif_emulate(helper->dtha_actions[i],
13620 mstate, vstate, state);
13621
13622 if (*flags & CPU_DTRACE_FAULT)
13623 goto err;
13624 }
13625
13626next:
13627 if (trace)
13628 dtrace_helper_trace(helper, mstate, vstate,
13629 DTRACE_HELPTRACE_NEXT);
13630 }
13631
13632 if (trace)
13633 dtrace_helper_trace(helper, mstate, vstate,
13634 DTRACE_HELPTRACE_DONE);
13635
13636 /*
13637 * Restore the arg0 that we saved upon entry.
13638 */
13639 mstate->dtms_arg[0] = sarg0;
13640 mstate->dtms_arg[1] = sarg1;
13641
13642 return (rval);
13643
13644err:
13645 if (trace)
13646 dtrace_helper_trace(helper, mstate, vstate,
13647 DTRACE_HELPTRACE_ERR);
13648
13649 /*
13650 * Restore the arg0 that we saved upon entry.
13651 */
13652 mstate->dtms_arg[0] = sarg0;
13653 mstate->dtms_arg[1] = sarg1;
13654
13655 return (NULL);
13656}
13657
13658static void
13659dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
13660 dtrace_vstate_t *vstate)
13661{
13662 int i;
13663
13664 if (helper->dtha_predicate != NULL)
13665 dtrace_difo_release(helper->dtha_predicate, vstate);
13666
13667 for (i = 0; i < helper->dtha_nactions; i++) {
13668 ASSERT(helper->dtha_actions[i] != NULL);
13669 dtrace_difo_release(helper->dtha_actions[i], vstate);
13670 }
13671
13672 kmem_free(helper->dtha_actions,
13673 helper->dtha_nactions * sizeof (dtrace_difo_t *));
13674 kmem_free(helper, sizeof (dtrace_helper_action_t));
13675}
13676
13677static int
13678dtrace_helper_destroygen(int gen)
13679{
13680 proc_t *p = curproc;
13681 dtrace_helpers_t *help = p->p_dtrace_helpers;
13682 dtrace_vstate_t *vstate;
13683 VBDTTYPE(uint_t,int) i;
13684
13685 ASSERT(MUTEX_HELD(&dtrace_lock));
13686
13687 if (help == NULL || gen > help->dthps_generation)
13688 return (EINVAL);
13689
13690 vstate = &help->dthps_vstate;
13691
13692 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
13693 dtrace_helper_action_t *last = NULL, *h, *next;
13694
13695 for (h = help->dthps_actions[i]; h != NULL; h = next) {
13696 next = h->dtha_next;
13697
13698 if (h->dtha_generation == gen) {
13699 if (last != NULL) {
13700 last->dtha_next = next;
13701 } else {
13702 help->dthps_actions[i] = next;
13703 }
13704
13705 dtrace_helper_action_destroy(h, vstate);
13706 } else {
13707 last = h;
13708 }
13709 }
13710 }
13711
13712 /*
13713 * Interate until we've cleared out all helper providers with the
13714 * given generation number.
13715 */
13716 for (;;) {
13717 dtrace_helper_provider_t *prov VBDTGCC(NULL);
13718
13719 /*
13720 * Look for a helper provider with the right generation. We
13721 * have to start back at the beginning of the list each time
13722 * because we drop dtrace_lock. It's unlikely that we'll make
13723 * more than two passes.
13724 */
13725 for (i = 0; i < help->dthps_nprovs; i++) {
13726 prov = help->dthps_provs[i];
13727
13728 if (prov->dthp_generation == gen)
13729 break;
13730 }
13731
13732 /*
13733 * If there were no matches, we're done.
13734 */
13735 if (i == help->dthps_nprovs)
13736 break;
13737
13738 /*
13739 * Move the last helper provider into this slot.
13740 */
13741 help->dthps_nprovs--;
13742 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
13743 help->dthps_provs[help->dthps_nprovs] = NULL;
13744
13745 mutex_exit(&dtrace_lock);
13746
13747 /*
13748 * If we have a meta provider, remove this helper provider.
13749 */
13750 mutex_enter(&dtrace_meta_lock);
13751 if (dtrace_meta_pid != NULL) {
13752 ASSERT(dtrace_deferred_pid == NULL);
13753 dtrace_helper_provider_remove(&prov->dthp_prov,
13754 p->p_pid);
13755 }
13756 mutex_exit(&dtrace_meta_lock);
13757
13758 dtrace_helper_provider_destroy(prov);
13759
13760 mutex_enter(&dtrace_lock);
13761 }
13762
13763 return (0);
13764}
13765
13766static int
13767dtrace_helper_validate(dtrace_helper_action_t *helper)
13768{
13769 int err = 0, i;
13770 dtrace_difo_t *dp;
13771
13772 if ((dp = helper->dtha_predicate) != NULL)
13773 err += dtrace_difo_validate_helper(dp);
13774
13775 for (i = 0; i < helper->dtha_nactions; i++)
13776 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
13777
13778 return (err == 0);
13779}
13780
13781static int
13782dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
13783{
13784 dtrace_helpers_t *help;
13785 dtrace_helper_action_t *helper, *last;
13786 dtrace_actdesc_t *act;
13787 dtrace_vstate_t *vstate;
13788 dtrace_predicate_t *pred;
13789 int count = 0, nactions = 0, i;
13790
13791 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
13792 return (EINVAL);
13793
13794 help = curproc->p_dtrace_helpers;
13795 last = help->dthps_actions[which];
13796 vstate = &help->dthps_vstate;
13797
13798 for (count = 0; last != NULL; last = last->dtha_next) {
13799 count++;
13800 if (last->dtha_next == NULL)
13801 break;
13802 }
13803
13804 /*
13805 * If we already have dtrace_helper_actions_max helper actions for this
13806 * helper action type, we'll refuse to add a new one.
13807 */
13808 if (count >= dtrace_helper_actions_max)
13809 return (ENOSPC);
13810
13811 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
13812 helper->dtha_generation = help->dthps_generation;
13813
13814 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
13815 ASSERT(pred->dtp_difo != NULL);
13816 dtrace_difo_hold(pred->dtp_difo);
13817 helper->dtha_predicate = pred->dtp_difo;
13818 }
13819
13820 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
13821 if (act->dtad_kind != DTRACEACT_DIFEXPR)
13822 goto err;
13823
13824 if (act->dtad_difo == NULL)
13825 goto err;
13826
13827 nactions++;
13828 }
13829
13830 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
13831 (helper->dtha_nactions = nactions), KM_SLEEP);
13832
13833 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
13834 dtrace_difo_hold(act->dtad_difo);
13835 helper->dtha_actions[i++] = act->dtad_difo;
13836 }
13837
13838 if (!dtrace_helper_validate(helper))
13839 goto err;
13840
13841 if (last == NULL) {
13842 help->dthps_actions[which] = helper;
13843 } else {
13844 last->dtha_next = helper;
13845 }
13846
13847 if (vstate->dtvs_nlocals > VBDTCAST(int32_t)dtrace_helptrace_nlocals) {
13848 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
13849 dtrace_helptrace_next = 0;
13850 }
13851
13852 return (0);
13853err:
13854 dtrace_helper_action_destroy(helper, vstate);
13855 return (EINVAL);
13856}
13857
13858static void
13859dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
13860 dof_helper_t *dofhp)
13861{
13862 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
13863
13864 mutex_enter(&dtrace_meta_lock);
13865 mutex_enter(&dtrace_lock);
13866
13867 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
13868 /*
13869 * If the dtrace module is loaded but not attached, or if
13870 * there aren't isn't a meta provider registered to deal with
13871 * these provider descriptions, we need to postpone creating
13872 * the actual providers until later.
13873 */
13874
13875 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
13876 dtrace_deferred_pid != help) {
13877 help->dthps_deferred = 1;
13878 help->dthps_pid = p->p_pid;
13879 help->dthps_next = dtrace_deferred_pid;
13880 help->dthps_prev = NULL;
13881 if (dtrace_deferred_pid != NULL)
13882 dtrace_deferred_pid->dthps_prev = help;
13883 dtrace_deferred_pid = help;
13884 }
13885
13886 mutex_exit(&dtrace_lock);
13887
13888 } else if (dofhp != NULL) {
13889 /*
13890 * If the dtrace module is loaded and we have a particular
13891 * helper provider description, pass that off to the
13892 * meta provider.
13893 */
13894
13895 mutex_exit(&dtrace_lock);
13896
13897 dtrace_helper_provide(dofhp, p->p_pid);
13898
13899 } else {
13900 /*
13901 * Otherwise, just pass all the helper provider descriptions
13902 * off to the meta provider.
13903 */
13904
13905 VBDTTYPE(uint_t,int) i;
13906 mutex_exit(&dtrace_lock);
13907
13908 for (i = 0; i < help->dthps_nprovs; i++) {
13909 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
13910 p->p_pid);
13911 }
13912 }
13913
13914 mutex_exit(&dtrace_meta_lock);
13915}
13916
13917static int
13918dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
13919{
13920 dtrace_helpers_t *help;
13921 dtrace_helper_provider_t *hprov, **tmp_provs;
13922 uint_t tmp_maxprovs, i;
13923
13924 ASSERT(MUTEX_HELD(&dtrace_lock));
13925
13926 help = curproc->p_dtrace_helpers;
13927 ASSERT(help != NULL);
13928
13929 /*
13930 * If we already have dtrace_helper_providers_max helper providers,
13931 * we're refuse to add a new one.
13932 */
13933 if (help->dthps_nprovs >= dtrace_helper_providers_max)
13934 return (ENOSPC);
13935
13936 /*
13937 * Check to make sure this isn't a duplicate.
13938 */
13939 for (i = 0; i < help->dthps_nprovs; i++) {
13940 if (dofhp->dofhp_addr ==
13941 help->dthps_provs[i]->dthp_prov.dofhp_addr)
13942 return (EALREADY);
13943 }
13944
13945 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
13946 hprov->dthp_prov = *dofhp;
13947 hprov->dthp_ref = 1;
13948 hprov->dthp_generation = gen;
13949
13950 /*
13951 * Allocate a bigger table for helper providers if it's already full.
13952 */
13953 if (help->dthps_maxprovs == help->dthps_nprovs) {
13954 tmp_maxprovs = help->dthps_maxprovs;
13955 tmp_provs = help->dthps_provs;
13956
13957 if (help->dthps_maxprovs == 0)
13958 help->dthps_maxprovs = 2;
13959 else
13960 help->dthps_maxprovs *= 2;
13961 if (help->dthps_maxprovs > dtrace_helper_providers_max)
13962 help->dthps_maxprovs = dtrace_helper_providers_max;
13963
13964 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
13965
13966 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
13967 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
13968
13969 if (tmp_provs != NULL) {
13970 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
13971 sizeof (dtrace_helper_provider_t *));
13972 kmem_free(tmp_provs, tmp_maxprovs *
13973 sizeof (dtrace_helper_provider_t *));
13974 }
13975 }
13976
13977 help->dthps_provs[help->dthps_nprovs] = hprov;
13978 help->dthps_nprovs++;
13979
13980 return (0);
13981}
13982
13983static void
13984dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
13985{
13986 mutex_enter(&dtrace_lock);
13987
13988 if (--hprov->dthp_ref == 0) {
13989 dof_hdr_t *dof;
13990 mutex_exit(&dtrace_lock);
13991 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
13992 dtrace_dof_destroy(dof);
13993 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
13994 } else {
13995 mutex_exit(&dtrace_lock);
13996 }
13997}
13998
13999static int
14000dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
14001{
14002 uintptr_t daddr = (uintptr_t)dof;
14003 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
14004 dof_provider_t *provider;
14005 dof_probe_t *probe;
14006 uint8_t *arg;
14007 char *strtab, *typestr;
14008 dof_stridx_t typeidx;
14009 size_t typesz;
14010 uint_t nprobes, j, k;
14011
14012 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
14013
14014 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
14015 dtrace_dof_error(dof, "misaligned section offset");
14016 return (-1);
14017 }
14018
14019 /*
14020 * The section needs to be large enough to contain the DOF provider
14021 * structure appropriate for the given version.
14022 */
14023 if (sec->dofs_size <
14024 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
14025 offsetof(dof_provider_t, dofpv_prenoffs) :
14026 sizeof (dof_provider_t))) {
14027 dtrace_dof_error(dof, "provider section too small");
14028 return (-1);
14029 }
14030
14031 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
14032 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
14033 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
14034 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
14035 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
14036
14037 if (str_sec == NULL || prb_sec == NULL ||
14038 arg_sec == NULL || off_sec == NULL)
14039 return (-1);
14040
14041 enoff_sec = NULL;
14042
14043 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
14044 provider->dofpv_prenoffs != DOF_SECT_NONE &&
14045 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
14046 provider->dofpv_prenoffs)) == NULL)
14047 return (-1);
14048
14049 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
14050
14051 if (provider->dofpv_name >= str_sec->dofs_size ||
14052 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
14053 dtrace_dof_error(dof, "invalid provider name");
14054 return (-1);
14055 }
14056
14057 if (prb_sec->dofs_entsize == 0 ||
14058 prb_sec->dofs_entsize > prb_sec->dofs_size) {
14059 dtrace_dof_error(dof, "invalid entry size");
14060 return (-1);
14061 }
14062
14063 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
14064 dtrace_dof_error(dof, "misaligned entry size");
14065 return (-1);
14066 }
14067
14068 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
14069 dtrace_dof_error(dof, "invalid entry size");
14070 return (-1);
14071 }
14072
14073 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
14074 dtrace_dof_error(dof, "misaligned section offset");
14075 return (-1);
14076 }
14077
14078 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
14079 dtrace_dof_error(dof, "invalid entry size");
14080 return (-1);
14081 }
14082
14083 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
14084
14085 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
14086
14087 /*
14088 * Take a pass through the probes to check for errors.
14089 */
14090 for (j = 0; j < nprobes; j++) {
14091 probe = (dof_probe_t *)(uintptr_t)(daddr +
14092 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
14093
14094 if (probe->dofpr_func >= str_sec->dofs_size) {
14095 dtrace_dof_error(dof, "invalid function name");
14096 return (-1);
14097 }
14098
14099 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
14100 dtrace_dof_error(dof, "function name too long");
14101 return (-1);
14102 }
14103
14104 if (probe->dofpr_name >= str_sec->dofs_size ||
14105 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
14106 dtrace_dof_error(dof, "invalid probe name");
14107 return (-1);
14108 }
14109
14110 /*
14111 * The offset count must not wrap the index, and the offsets
14112 * must also not overflow the section's data.
14113 */
14114 if (probe->dofpr_offidx + probe->dofpr_noffs <
14115 probe->dofpr_offidx ||
14116 (probe->dofpr_offidx + probe->dofpr_noffs) *
14117 off_sec->dofs_entsize > off_sec->dofs_size) {
14118 dtrace_dof_error(dof, "invalid probe offset");
14119 return (-1);
14120 }
14121
14122 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
14123 /*
14124 * If there's no is-enabled offset section, make sure
14125 * there aren't any is-enabled offsets. Otherwise
14126 * perform the same checks as for probe offsets
14127 * (immediately above).
14128 */
14129 if (enoff_sec == NULL) {
14130 if (probe->dofpr_enoffidx != 0 ||
14131 probe->dofpr_nenoffs != 0) {
14132 dtrace_dof_error(dof, "is-enabled "
14133 "offsets with null section");
14134 return (-1);
14135 }
14136 } else if (probe->dofpr_enoffidx +
14137 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
14138 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
14139 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
14140 dtrace_dof_error(dof, "invalid is-enabled "
14141 "offset");
14142 return (-1);
14143 }
14144
14145 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
14146 dtrace_dof_error(dof, "zero probe and "
14147 "is-enabled offsets");
14148 return (-1);
14149 }
14150 } else if (probe->dofpr_noffs == 0) {
14151 dtrace_dof_error(dof, "zero probe offsets");
14152 return (-1);
14153 }
14154
14155 if (probe->dofpr_argidx + probe->dofpr_xargc <
14156 probe->dofpr_argidx ||
14157 (probe->dofpr_argidx + probe->dofpr_xargc) *
14158 arg_sec->dofs_entsize > arg_sec->dofs_size) {
14159 dtrace_dof_error(dof, "invalid args");
14160 return (-1);
14161 }
14162
14163 typeidx = probe->dofpr_nargv;
14164 typestr = strtab + probe->dofpr_nargv;
14165 for (k = 0; k < probe->dofpr_nargc; k++) {
14166 if (typeidx >= str_sec->dofs_size) {
14167 dtrace_dof_error(dof, "bad "
14168 "native argument type");
14169 return (-1);
14170 }
14171
14172 typesz = strlen(typestr) + 1;
14173 if (typesz > DTRACE_ARGTYPELEN) {
14174 dtrace_dof_error(dof, "native "
14175 "argument type too long");
14176 return (-1);
14177 }
14178 typeidx += VBDTCAST(dof_stridx_t)typesz;
14179 typestr += typesz;
14180 }
14181
14182 typeidx = probe->dofpr_xargv;
14183 typestr = strtab + probe->dofpr_xargv;
14184 for (k = 0; k < probe->dofpr_xargc; k++) {
14185 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
14186 dtrace_dof_error(dof, "bad "
14187 "native argument index");
14188 return (-1);
14189 }
14190
14191 if (typeidx >= str_sec->dofs_size) {
14192 dtrace_dof_error(dof, "bad "
14193 "translated argument type");
14194 return (-1);
14195 }
14196
14197 typesz = strlen(typestr) + 1;
14198 if (typesz > DTRACE_ARGTYPELEN) {
14199 dtrace_dof_error(dof, "translated argument "
14200 "type too long");
14201 return (-1);
14202 }
14203
14204 typeidx += VBDTCAST(dof_stridx_t)typesz;
14205 typestr += typesz;
14206 }
14207 }
14208
14209 return (0);
14210}
14211
14212static int
14213dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
14214{
14215 dtrace_helpers_t *help;
14216 dtrace_vstate_t *vstate;
14217 dtrace_enabling_t *enab = NULL;
14218 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
14219 uintptr_t daddr = (uintptr_t)dof;
14220
14221 ASSERT(MUTEX_HELD(&dtrace_lock));
14222
14223 if ((help = curproc->p_dtrace_helpers) == NULL)
14224 help = dtrace_helpers_create(curproc);
14225
14226 vstate = &help->dthps_vstate;
14227
14228 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
14229 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
14230 dtrace_dof_destroy(dof);
14231 return (rv);
14232 }
14233
14234 /*
14235 * Look for helper providers and validate their descriptions.
14236 */
14237 if (dhp != NULL) {
14238 for (i = 0; i < VBDTCAST(int)dof->dofh_secnum; i++) {
14239 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
14240 dof->dofh_secoff + i * dof->dofh_secsize);
14241
14242 if (sec->dofs_type != DOF_SECT_PROVIDER)
14243 continue;
14244
14245 if (dtrace_helper_provider_validate(dof, sec) != 0) {
14246 dtrace_enabling_destroy(enab);
14247 dtrace_dof_destroy(dof);
14248 return (-1);
14249 }
14250
14251 nprovs++;
14252 }
14253 }
14254
14255 /*
14256 * Now we need to walk through the ECB descriptions in the enabling.
14257 */
14258 for (i = 0; i < enab->dten_ndesc; i++) {
14259 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
14260 dtrace_probedesc_t *desc = &ep->dted_probe;
14261
14262 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
14263 continue;
14264
14265 if (strcmp(desc->dtpd_mod, "helper") != 0)
14266 continue;
14267
14268 if (strcmp(desc->dtpd_func, "ustack") != 0)
14269 continue;
14270
14271 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
14272 ep)) != 0) {
14273 /*
14274 * Adding this helper action failed -- we are now going
14275 * to rip out the entire generation and return failure.
14276 */
14277 (void) dtrace_helper_destroygen(help->dthps_generation);
14278 dtrace_enabling_destroy(enab);
14279 dtrace_dof_destroy(dof);
14280 return (-1);
14281 }
14282
14283 nhelpers++;
14284 }
14285
14286 if (nhelpers < enab->dten_ndesc)
14287 dtrace_dof_error(dof, "unmatched helpers");
14288
14289 gen = help->dthps_generation++;
14290 dtrace_enabling_destroy(enab);
14291
14292 if (dhp != NULL && nprovs > 0) {
14293 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
14294 if (dtrace_helper_provider_add(dhp, gen) == 0) {
14295 mutex_exit(&dtrace_lock);
14296 dtrace_helper_provider_register(curproc, help, dhp);
14297 mutex_enter(&dtrace_lock);
14298
14299 destroy = 0;
14300 }
14301 }
14302
14303 if (destroy)
14304 dtrace_dof_destroy(dof);
14305
14306 return (gen);
14307}
14308
14309static dtrace_helpers_t *
14310dtrace_helpers_create(proc_t *p)
14311{
14312 dtrace_helpers_t *help;
14313
14314 ASSERT(MUTEX_HELD(&dtrace_lock));
14315 ASSERT(p->p_dtrace_helpers == NULL);
14316
14317 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
14318 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
14319 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
14320
14321 p->p_dtrace_helpers = help;
14322 dtrace_helpers++;
14323
14324 return (help);
14325}
14326
14327static void
14328dtrace_helpers_destroy(void)
14329{
14330 dtrace_helpers_t *help;
14331 dtrace_vstate_t *vstate;
14332 proc_t *p = curproc;
14333 VBDTTYPE(uint_t, int) i;
14334
14335 mutex_enter(&dtrace_lock);
14336
14337 ASSERT(p->p_dtrace_helpers != NULL);
14338 ASSERT(dtrace_helpers > 0);
14339
14340 help = p->p_dtrace_helpers;
14341 vstate = &help->dthps_vstate;
14342
14343 /*
14344 * We're now going to lose the help from this process.
14345 */
14346 p->p_dtrace_helpers = NULL;
14347 dtrace_sync();
14348
14349 /*
14350 * Destory the helper actions.
14351 */
14352 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14353 dtrace_helper_action_t *h, *next;
14354
14355 for (h = help->dthps_actions[i]; h != NULL; h = next) {
14356 next = h->dtha_next;
14357 dtrace_helper_action_destroy(h, vstate);
14358 h = next;
14359 }
14360 }
14361
14362 mutex_exit(&dtrace_lock);
14363
14364 /*
14365 * Destroy the helper providers.
14366 */
14367 if (help->dthps_maxprovs > 0) {
14368 mutex_enter(&dtrace_meta_lock);
14369 if (dtrace_meta_pid != NULL) {
14370 ASSERT(dtrace_deferred_pid == NULL);
14371
14372 for (i = 0; i < help->dthps_nprovs; i++) {
14373 dtrace_helper_provider_remove(
14374 &help->dthps_provs[i]->dthp_prov, p->p_pid);
14375 }
14376 } else {
14377 mutex_enter(&dtrace_lock);
14378 ASSERT(help->dthps_deferred == 0 ||
14379 help->dthps_next != NULL ||
14380 help->dthps_prev != NULL ||
14381 help == dtrace_deferred_pid);
14382
14383 /*
14384 * Remove the helper from the deferred list.
14385 */
14386 if (help->dthps_next != NULL)
14387 help->dthps_next->dthps_prev = help->dthps_prev;
14388 if (help->dthps_prev != NULL)
14389 help->dthps_prev->dthps_next = help->dthps_next;
14390 if (dtrace_deferred_pid == help) {
14391 dtrace_deferred_pid = help->dthps_next;
14392 ASSERT(help->dthps_prev == NULL);
14393 }
14394
14395 mutex_exit(&dtrace_lock);
14396 }
14397
14398 mutex_exit(&dtrace_meta_lock);
14399
14400 for (i = 0; i < help->dthps_nprovs; i++) {
14401 dtrace_helper_provider_destroy(help->dthps_provs[i]);
14402 }
14403
14404 kmem_free(help->dthps_provs, help->dthps_maxprovs *
14405 sizeof (dtrace_helper_provider_t *));
14406 }
14407
14408 mutex_enter(&dtrace_lock);
14409
14410 dtrace_vstate_fini(&help->dthps_vstate);
14411 kmem_free(help->dthps_actions,
14412 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
14413 kmem_free(help, sizeof (dtrace_helpers_t));
14414
14415 --dtrace_helpers;
14416 mutex_exit(&dtrace_lock);
14417}
14418
14419static void
14420dtrace_helpers_duplicate(proc_t *from, proc_t *to)
14421{
14422 dtrace_helpers_t *help, *newhelp;
14423 dtrace_helper_action_t *helper, *new, *last;
14424 dtrace_difo_t *dp;
14425 dtrace_vstate_t *vstate;
14426 int i, j, sz, hasprovs = 0;
14427
14428 mutex_enter(&dtrace_lock);
14429 ASSERT(from->p_dtrace_helpers != NULL);
14430 ASSERT(dtrace_helpers > 0);
14431
14432 help = from->p_dtrace_helpers;
14433 newhelp = dtrace_helpers_create(to);
14434 ASSERT(to->p_dtrace_helpers != NULL);
14435
14436 newhelp->dthps_generation = help->dthps_generation;
14437 vstate = &newhelp->dthps_vstate;
14438
14439 /*
14440 * Duplicate the helper actions.
14441 */
14442 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
14443 if ((helper = help->dthps_actions[i]) == NULL)
14444 continue;
14445
14446 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
14447 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
14448 KM_SLEEP);
14449 new->dtha_generation = helper->dtha_generation;
14450
14451 if ((dp = helper->dtha_predicate) != NULL) {
14452 dp = dtrace_difo_duplicate(dp, vstate);
14453 new->dtha_predicate = dp;
14454 }
14455
14456 new->dtha_nactions = helper->dtha_nactions;
14457 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
14458 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
14459
14460 for (j = 0; j < new->dtha_nactions; j++) {
14461 dtrace_difo_t *dp2 = helper->dtha_actions[j];
14462
14463 ASSERT(dp2 != NULL);
14464 dp2 = dtrace_difo_duplicate(dp2, vstate);
14465 new->dtha_actions[j] = dp2;
14466 }
14467
14468 if (last != NULL) {
14469 last->dtha_next = new;
14470 } else {
14471 newhelp->dthps_actions[i] = new;
14472 }
14473
14474 last = new;
14475 }
14476 }
14477
14478 /*
14479 * Duplicate the helper providers and register them with the
14480 * DTrace framework.
14481 */
14482 if (help->dthps_nprovs > 0) {
14483 newhelp->dthps_nprovs = help->dthps_nprovs;
14484 newhelp->dthps_maxprovs = help->dthps_nprovs;
14485 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
14486 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14487 for (i = 0; i < VBDTCAST(int)newhelp->dthps_nprovs; i++) {
14488 newhelp->dthps_provs[i] = help->dthps_provs[i];
14489 newhelp->dthps_provs[i]->dthp_ref++;
14490 }
14491
14492 hasprovs = 1;
14493 }
14494
14495 mutex_exit(&dtrace_lock);
14496
14497 if (hasprovs)
14498 dtrace_helper_provider_register(to, newhelp, NULL);
14499}
14500
14501/*
14502 * DTrace Hook Functions
14503 */
14504static void
14505dtrace_module_loaded(struct modctl *ctl)
14506{
14507 dtrace_provider_t *prv;
14508
14509 mutex_enter(&dtrace_provider_lock);
14510 mutex_enter(&mod_lock);
14511
14512 ASSERT(ctl->mod_busy);
14513
14514 /*
14515 * We're going to call each providers per-module provide operation
14516 * specifying only this module.
14517 */
14518 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
14519 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
14520
14521 mutex_exit(&mod_lock);
14522 mutex_exit(&dtrace_provider_lock);
14523
14524 /*
14525 * If we have any retained enablings, we need to match against them.
14526 * Enabling probes requires that cpu_lock be held, and we cannot hold
14527 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
14528 * module. (In particular, this happens when loading scheduling
14529 * classes.) So if we have any retained enablings, we need to dispatch
14530 * our task queue to do the match for us.
14531 */
14532 mutex_enter(&dtrace_lock);
14533
14534 if (dtrace_retained == NULL) {
14535 mutex_exit(&dtrace_lock);
14536 return;
14537 }
14538
14539 (void) taskq_dispatch(dtrace_taskq,
14540 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
14541
14542 mutex_exit(&dtrace_lock);
14543
14544 /*
14545 * And now, for a little heuristic sleaze: in general, we want to
14546 * match modules as soon as they load. However, we cannot guarantee
14547 * this, because it would lead us to the lock ordering violation
14548 * outlined above. The common case, of course, is that cpu_lock is
14549 * _not_ held -- so we delay here for a clock tick, hoping that that's
14550 * long enough for the task queue to do its work. If it's not, it's
14551 * not a serious problem -- it just means that the module that we
14552 * just loaded may not be immediately instrumentable.
14553 */
14554 delay(1);
14555}
14556
14557static void
14558dtrace_module_unloaded(struct modctl *ctl)
14559{
14560 dtrace_probe_t template, *probe, *first, *next;
14561 dtrace_provider_t *prov;
14562
14563 template.dtpr_mod = ctl->mod_modname;
14564
14565 mutex_enter(&dtrace_provider_lock);
14566 mutex_enter(&mod_lock);
14567 mutex_enter(&dtrace_lock);
14568
14569 if (dtrace_bymod == NULL) {
14570 /*
14571 * The DTrace module is loaded (obviously) but not attached;
14572 * we don't have any work to do.
14573 */
14574 mutex_exit(&dtrace_provider_lock);
14575 mutex_exit(&mod_lock);
14576 mutex_exit(&dtrace_lock);
14577 return;
14578 }
14579
14580 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
14581 probe != NULL; probe = probe->dtpr_nextmod) {
14582 if (probe->dtpr_ecb != NULL) {
14583 mutex_exit(&dtrace_provider_lock);
14584 mutex_exit(&mod_lock);
14585 mutex_exit(&dtrace_lock);
14586
14587 /*
14588 * This shouldn't _actually_ be possible -- we're
14589 * unloading a module that has an enabled probe in it.
14590 * (It's normally up to the provider to make sure that
14591 * this can't happen.) However, because dtps_enable()
14592 * doesn't have a failure mode, there can be an
14593 * enable/unload race. Upshot: we don't want to
14594 * assert, but we're not going to disable the
14595 * probe, either.
14596 */
14597 if (dtrace_err_verbose) {
14598 cmn_err(CE_WARN, "unloaded module '%s' had "
14599 "enabled probes", ctl->mod_modname);
14600 }
14601
14602 return;
14603 }
14604 }
14605
14606 probe = first;
14607
14608 for (first = NULL; probe != NULL; probe = next) {
14609 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
14610
14611 dtrace_probes[probe->dtpr_id - 1] = NULL;
14612
14613 next = probe->dtpr_nextmod;
14614 dtrace_hash_remove(dtrace_bymod, probe);
14615 dtrace_hash_remove(dtrace_byfunc, probe);
14616 dtrace_hash_remove(dtrace_byname, probe);
14617
14618 if (first == NULL) {
14619 first = probe;
14620 probe->dtpr_nextmod = NULL;
14621 } else {
14622 probe->dtpr_nextmod = first;
14623 first = probe;
14624 }
14625 }
14626
14627 /*
14628 * We've removed all of the module's probes from the hash chains and
14629 * from the probe array. Now issue a dtrace_sync() to be sure that
14630 * everyone has cleared out from any probe array processing.
14631 */
14632 dtrace_sync();
14633
14634 for (probe = first; probe != NULL; probe = first) {
14635 first = probe->dtpr_nextmod;
14636 prov = probe->dtpr_provider;
14637 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
14638 probe->dtpr_arg);
14639 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
14640 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
14641 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
14642 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
14643 kmem_free(probe, sizeof (dtrace_probe_t));
14644 }
14645
14646 mutex_exit(&dtrace_lock);
14647 mutex_exit(&mod_lock);
14648 mutex_exit(&dtrace_provider_lock);
14649}
14650
14651#endif /* !VBOX */
14652
14653VBDTSTATIC void
14654dtrace_suspend(void)
14655{
14656 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
14657}
14658
14659VBDTSTATIC void
14660dtrace_resume(void)
14661{
14662 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
14663}
14664
14665#ifdef VBOX
14666typedef enum {
14667 CPU_INVALID,
14668 CPU_CONFIG,
14669 CPU_UNCONFIG
14670} cpu_setup_t;
14671#endif
14672
14673
14674static int
14675dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
14676{
14677 ASSERT(MUTEX_HELD(&cpu_lock));
14678 mutex_enter(&dtrace_lock);
14679
14680 switch (what) {
14681 case CPU_CONFIG: {
14682 dtrace_state_t *state;
14683 dtrace_optval_t *opt, rs, c;
14684
14685 /*
14686 * For now, we only allocate a new buffer for anonymous state.
14687 */
14688 if ((state = dtrace_anon.dta_state) == NULL)
14689 break;
14690
14691 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
14692 break;
14693
14694 opt = state->dts_options;
14695 c = opt[DTRACEOPT_CPU];
14696
14697 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
14698 break;
14699
14700 /*
14701 * Regardless of what the actual policy is, we're going to
14702 * temporarily set our resize policy to be manual. We're
14703 * also going to temporarily set our CPU option to denote
14704 * the newly configured CPU.
14705 */
14706 rs = opt[DTRACEOPT_BUFRESIZE];
14707 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
14708 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
14709
14710 (void) dtrace_state_buffers(state);
14711
14712 opt[DTRACEOPT_BUFRESIZE] = rs;
14713 opt[DTRACEOPT_CPU] = c;
14714
14715 break;
14716 }
14717
14718 case CPU_UNCONFIG:
14719 /*
14720 * We don't free the buffer in the CPU_UNCONFIG case. (The
14721 * buffer will be freed when the consumer exits.)
14722 */
14723 break;
14724
14725 default:
14726 break;
14727 }
14728
14729 mutex_exit(&dtrace_lock);
14730 return (0);
14731}
14732
14733#ifndef VBOX
14734static void
14735dtrace_cpu_setup_initial(processorid_t cpu)
14736{
14737 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
14738}
14739#endif /* !VBOX */
14740
14741static void
14742dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
14743{
14744 if (dtrace_toxranges >= dtrace_toxranges_max) {
14745 int osize, nsize;
14746 dtrace_toxrange_t *range;
14747
14748 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
14749
14750 if (osize == 0) {
14751 ASSERT(dtrace_toxrange == NULL);
14752 ASSERT(dtrace_toxranges_max == 0);
14753 dtrace_toxranges_max = 1;
14754 } else {
14755 dtrace_toxranges_max <<= 1;
14756 }
14757
14758 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
14759 range = kmem_zalloc(nsize, KM_SLEEP);
14760
14761 if (dtrace_toxrange != NULL) {
14762 ASSERT(osize != 0);
14763 bcopy(dtrace_toxrange, range, osize);
14764 kmem_free(dtrace_toxrange, osize);
14765 }
14766
14767 dtrace_toxrange = range;
14768 }
14769
14770 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL);
14771 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL);
14772
14773 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
14774 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
14775 dtrace_toxranges++;
14776}
14777
14778/*
14779 * DTrace Driver Cookbook Functions
14780 */
14781#ifdef VBOX
14782int dtrace_attach(ddi_attach_cmd_t cmd)
14783#else
14784/*ARGSUSED*/
14785static int
14786dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
14787#endif
14788{
14789 dtrace_provider_id_t id;
14790 dtrace_state_t *state = NULL;
14791 dtrace_enabling_t *enab;
14792
14793#ifndef VBOX
14794 if ( VBoxDtMutexInit(&dtrace_lock)
14795 || VBoxDtMutexInit(&dtrace_provider_lock)
14796 || VBoxDtMutexInit(&dtrace_meta_lock)
14797# ifdef DEBUG
14798 || VBoxDtMutexInit(&dtrace_errlock);
14799# endif
14800 )
14801 return (DDI_FAILURE);
14802#endif
14803
14804 mutex_enter(&cpu_lock);
14805 mutex_enter(&dtrace_provider_lock);
14806 mutex_enter(&dtrace_lock);
14807
14808#ifndef VBOX
14809 if (ddi_soft_state_init(&dtrace_softstate,
14810 sizeof (dtrace_state_t), 0) != 0) {
14811 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
14812 mutex_exit(&cpu_lock);
14813 mutex_exit(&dtrace_provider_lock);
14814 mutex_exit(&dtrace_lock);
14815 return (DDI_FAILURE);
14816 }
14817
14818 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
14819 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
14820 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
14821 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
14822 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
14823 ddi_remove_minor_node(devi, NULL);
14824 ddi_soft_state_fini(&dtrace_softstate);
14825 mutex_exit(&cpu_lock);
14826 mutex_exit(&dtrace_provider_lock);
14827 mutex_exit(&dtrace_lock);
14828 return (DDI_FAILURE);
14829 }
14830
14831 ddi_report_dev(devi);
14832 dtrace_devi = devi;
14833
14834 dtrace_modload = dtrace_module_loaded;
14835 dtrace_modunload = dtrace_module_unloaded;
14836 dtrace_cpu_init = dtrace_cpu_setup_initial;
14837 dtrace_helpers_cleanup = dtrace_helpers_destroy;
14838 dtrace_helpers_fork = dtrace_helpers_duplicate;
14839 dtrace_cpustart_init = dtrace_suspend;
14840 dtrace_cpustart_fini = dtrace_resume;
14841 dtrace_debugger_init = dtrace_suspend;
14842 dtrace_debugger_fini = dtrace_resume;
14843
14844 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
14845#else
14846 /** @todo some of these hooks needs checking out! */
14847#endif
14848
14849 ASSERT(MUTEX_HELD(&cpu_lock));
14850
14851 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
14852 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
14853#ifndef VBOX
14854 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
14855 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
14856 VM_SLEEP | VMC_IDENTIFIER);
14857 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
14858 1, INT_MAX, 0);
14859#endif
14860
14861 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
14862 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
14863 NULL, NULL, NULL, NULL, NULL, 0);
14864
14865 ASSERT(MUTEX_HELD(&cpu_lock));
14866 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
14867 offsetof(dtrace_probe_t, dtpr_nextmod),
14868 offsetof(dtrace_probe_t, dtpr_prevmod));
14869
14870 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
14871 offsetof(dtrace_probe_t, dtpr_nextfunc),
14872 offsetof(dtrace_probe_t, dtpr_prevfunc));
14873
14874 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
14875 offsetof(dtrace_probe_t, dtpr_nextname),
14876 offsetof(dtrace_probe_t, dtpr_prevname));
14877
14878 if (dtrace_retain_max < 1) {
14879 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
14880 "setting to 1", dtrace_retain_max);
14881 dtrace_retain_max = 1;
14882 }
14883
14884 /*
14885 * Now discover our toxic ranges.
14886 */
14887 dtrace_toxic_ranges(dtrace_toxrange_add);
14888
14889 /*
14890 * Before we register ourselves as a provider to our own framework,
14891 * we would like to assert that dtrace_provider is NULL -- but that's
14892 * not true if we were loaded as a dependency of a DTrace provider.
14893 * Once we've registered, we can assert that dtrace_provider is our
14894 * pseudo provider.
14895 */
14896 (void) dtrace_register("dtrace", &dtrace_provider_attr,
14897 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
14898
14899 ASSERT(dtrace_provider != NULL);
14900 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
14901
14902 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
14903 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
14904 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
14905 dtrace_provider, NULL, NULL, "END", 0, NULL);
14906 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
14907 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
14908
14909#ifndef VBOX
14910 dtrace_anon_property();
14911#endif
14912 mutex_exit(&cpu_lock);
14913
14914 /*
14915 * If DTrace helper tracing is enabled, we need to allocate the
14916 * trace buffer and initialize the values.
14917 */
14918 if (dtrace_helptrace_enabled) {
14919 ASSERT(dtrace_helptrace_buffer == NULL);
14920 dtrace_helptrace_buffer =
14921 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
14922 dtrace_helptrace_next = 0;
14923 }
14924
14925 /*
14926 * If there are already providers, we must ask them to provide their
14927 * probes, and then match any anonymous enabling against them. Note
14928 * that there should be no other retained enablings at this time:
14929 * the only retained enablings at this time should be the anonymous
14930 * enabling.
14931 */
14932 if (dtrace_anon.dta_enabling != NULL) {
14933 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
14934
14935 dtrace_enabling_provide(NULL);
14936 state = dtrace_anon.dta_state;
14937
14938 /*
14939 * We couldn't hold cpu_lock across the above call to
14940 * dtrace_enabling_provide(), but we must hold it to actually
14941 * enable the probes. We have to drop all of our locks, pick
14942 * up cpu_lock, and regain our locks before matching the
14943 * retained anonymous enabling.
14944 */
14945 mutex_exit(&dtrace_lock);
14946 mutex_exit(&dtrace_provider_lock);
14947
14948 mutex_enter(&cpu_lock);
14949 mutex_enter(&dtrace_provider_lock);
14950 mutex_enter(&dtrace_lock);
14951
14952 if ((enab = dtrace_anon.dta_enabling) != NULL)
14953 (void) dtrace_enabling_match(enab, NULL);
14954
14955 mutex_exit(&cpu_lock);
14956 }
14957
14958 mutex_exit(&dtrace_lock);
14959 mutex_exit(&dtrace_provider_lock);
14960
14961 if (state != NULL) {
14962 /*
14963 * If we created any anonymous state, set it going now.
14964 */
14965 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
14966 }
14967
14968 return (DDI_SUCCESS);
14969}
14970
14971#ifdef VBOX
14972int dtrace_open(dtrace_state_t **ppState, cred_t *cred_p)
14973#else
14974/*ARGSUSED*/
14975static int
14976dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
14977#endif
14978{
14979 dtrace_state_t *state;
14980 uint32_t priv;
14981 uid_t uid;
14982 zoneid_t zoneid;
14983
14984#ifndef VBOX
14985 if (getminor(*devp) == DTRACEMNRN_HELPER)
14986 return (0);
14987
14988 /*
14989 * If this wasn't an open with the "helper" minor, then it must be
14990 * the "dtrace" minor.
14991 */
14992 if (getminor(*devp) != DTRACEMNRN_DTRACE)
14993 return (ENXIO);
14994#endif /* !VBOX */
14995
14996 /*
14997 * If no DTRACE_PRIV_* bits are set in the credential, then the
14998 * caller lacks sufficient permission to do anything with DTrace.
14999 */
15000 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
15001 if (priv == DTRACE_PRIV_NONE)
15002 return (EACCES);
15003
15004 /*
15005 * Ask all providers to provide all their probes.
15006 */
15007 mutex_enter(&dtrace_provider_lock);
15008 dtrace_probe_provide(NULL, NULL);
15009 mutex_exit(&dtrace_provider_lock);
15010
15011 mutex_enter(&cpu_lock);
15012 mutex_enter(&dtrace_lock);
15013 dtrace_opens++;
15014 dtrace_membar_producer();
15015
15016#ifndef VBOX
15017 /*
15018 * If the kernel debugger is active (that is, if the kernel debugger
15019 * modified text in some way), we won't allow the open.
15020 */
15021 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15022 dtrace_opens--;
15023 mutex_exit(&cpu_lock);
15024 mutex_exit(&dtrace_lock);
15025 return (EBUSY);
15026 }
15027#endif
15028
15029#ifndef VBOX
15030 state = dtrace_state_create(devp, cred_p);
15031#else
15032 state = dtrace_state_create(cred_p);
15033#endif
15034 mutex_exit(&cpu_lock);
15035
15036 if (state == NULL) {
15037#ifndef VBOX
15038 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
15039 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15040#endif
15041 mutex_exit(&dtrace_lock);
15042 return (EAGAIN);
15043 }
15044
15045 mutex_exit(&dtrace_lock);
15046
15047#ifdef VBOX
15048 *ppState = state;
15049#endif
15050 return (0);
15051}
15052
15053#ifdef VBOX
15054int dtrace_close(dtrace_state_t *state)
15055#else
15056/*ARGSUSED*/
15057static int
15058dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
15059#endif
15060{
15061#ifndef VBOX
15062 minor_t minor = getminor(dev);
15063 dtrace_state_t *state;
15064
15065 if (minor == DTRACEMNRN_HELPER)
15066 return (0);
15067
15068 state = ddi_get_soft_state(dtrace_softstate, minor);
15069#endif
15070
15071 mutex_enter(&cpu_lock);
15072 mutex_enter(&dtrace_lock);
15073
15074 if (state->dts_anon) {
15075 /*
15076 * There is anonymous state. Destroy that first.
15077 */
15078 ASSERT(dtrace_anon.dta_state == NULL);
15079 dtrace_state_destroy(state->dts_anon);
15080 }
15081
15082 dtrace_state_destroy(state);
15083 ASSERT(dtrace_opens > 0);
15084
15085#ifndef VBOX
15086 /*
15087 * Only relinquish control of the kernel debugger interface when there
15088 * are no consumers and no anonymous enablings.
15089 */
15090 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
15091 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15092#endif
15093
15094 mutex_exit(&dtrace_lock);
15095 mutex_exit(&cpu_lock);
15096
15097 return (0);
15098}
15099
15100#ifndef VBOX
15101/*ARGSUSED*/
15102static int
15103dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
15104{
15105 int rval;
15106 dof_helper_t help, *dhp = NULL;
15107
15108 switch (cmd) {
15109 case DTRACEHIOC_ADDDOF:
15110 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
15111 dtrace_dof_error(NULL, "failed to copyin DOF helper");
15112 return (EFAULT);
15113 }
15114
15115 dhp = &help;
15116 arg = (intptr_t)help.dofhp_dof;
15117 /*FALLTHROUGH*/
15118
15119 case DTRACEHIOC_ADD: {
15120 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
15121
15122 if (dof == NULL)
15123 return (rval);
15124
15125 mutex_enter(&dtrace_lock);
15126
15127 /*
15128 * dtrace_helper_slurp() takes responsibility for the dof --
15129 * it may free it now or it may save it and free it later.
15130 */
15131 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
15132 *rv = rval;
15133 rval = 0;
15134 } else {
15135 rval = EINVAL;
15136 }
15137
15138 mutex_exit(&dtrace_lock);
15139 return (rval);
15140 }
15141
15142 case DTRACEHIOC_REMOVE: {
15143 mutex_enter(&dtrace_lock);
15144 rval = dtrace_helper_destroygen(arg);
15145 mutex_exit(&dtrace_lock);
15146
15147 return (rval);
15148 }
15149
15150 default:
15151 break;
15152 }
15153
15154 return (ENOTTY);
15155}
15156#endif /* !VBOX */
15157
15158#ifdef VBOX
15159int dtrace_ioctl(dtrace_state_t *state, int cmd, intptr_t arg, int32_t *rv)
15160#else
15161/*ARGSUSED*/
15162static int
15163dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
15164#endif
15165{
15166#ifndef VBOX
15167 minor_t minor = getminor(dev);
15168 dtrace_state_t *state;
15169#endif
15170 int rval;
15171
15172#ifndef VBOX
15173 if (minor == DTRACEMNRN_HELPER)
15174 return (dtrace_ioctl_helper(cmd, arg, rv));
15175
15176 state = ddi_get_soft_state(dtrace_softstate, minor);
15177#endif
15178
15179 if (state->dts_anon) {
15180 ASSERT(dtrace_anon.dta_state == NULL);
15181 state = state->dts_anon;
15182 }
15183
15184 switch (cmd) {
15185 case DTRACEIOC_PROVIDER: {
15186 dtrace_providerdesc_t pvd;
15187 dtrace_provider_t *pvp;
15188
15189 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
15190 return (EFAULT);
15191
15192 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
15193 mutex_enter(&dtrace_provider_lock);
15194
15195 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
15196 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
15197 break;
15198 }
15199
15200 mutex_exit(&dtrace_provider_lock);
15201
15202 if (pvp == NULL)
15203 return (ESRCH);
15204
15205 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
15206 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
15207 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
15208 return (EFAULT);
15209
15210 return (0);
15211 }
15212
15213 case DTRACEIOC_EPROBE: {
15214 dtrace_eprobedesc_t epdesc;
15215 dtrace_ecb_t *ecb;
15216 dtrace_action_t *act;
15217 void *buf;
15218 size_t size;
15219 uintptr_t dest;
15220 int nrecs;
15221
15222 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
15223 return (EFAULT);
15224
15225 mutex_enter(&dtrace_lock);
15226
15227 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
15228 mutex_exit(&dtrace_lock);
15229 return (EINVAL);
15230 }
15231
15232 if (ecb->dte_probe == NULL) {
15233 mutex_exit(&dtrace_lock);
15234 return (EINVAL);
15235 }
15236
15237 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
15238 epdesc.dtepd_uarg = ecb->dte_uarg;
15239 epdesc.dtepd_size = VBDTCAST(uint32_t)ecb->dte_size;
15240
15241 nrecs = epdesc.dtepd_nrecs;
15242 epdesc.dtepd_nrecs = 0;
15243 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15244 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15245 continue;
15246
15247 epdesc.dtepd_nrecs++;
15248 }
15249
15250 /*
15251 * Now that we have the size, we need to allocate a temporary
15252 * buffer in which to store the complete description. We need
15253 * the temporary buffer to be able to drop dtrace_lock()
15254 * across the copyout(), below.
15255 */
15256 size = sizeof (dtrace_eprobedesc_t) +
15257 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
15258
15259 buf = kmem_alloc(size, KM_SLEEP);
15260 dest = (uintptr_t)buf;
15261
15262 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
15263 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
15264
15265 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
15266 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
15267 continue;
15268
15269 if (nrecs-- == 0)
15270 break;
15271
15272 bcopy(&act->dta_rec, (void *)dest,
15273 sizeof (dtrace_recdesc_t));
15274 dest += sizeof (dtrace_recdesc_t);
15275 }
15276
15277 mutex_exit(&dtrace_lock);
15278
15279 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15280 kmem_free(buf, size);
15281 return (EFAULT);
15282 }
15283
15284 kmem_free(buf, size);
15285 return (0);
15286 }
15287
15288 case DTRACEIOC_AGGDESC: {
15289 dtrace_aggdesc_t aggdesc;
15290 dtrace_action_t *act;
15291 dtrace_aggregation_t *agg;
15292 int nrecs;
15293 uint32_t offs;
15294 dtrace_recdesc_t *lrec;
15295 void *buf;
15296 size_t size;
15297 uintptr_t dest;
15298
15299 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
15300 return (EFAULT);
15301
15302 mutex_enter(&dtrace_lock);
15303
15304 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
15305 mutex_exit(&dtrace_lock);
15306 return (EINVAL);
15307 }
15308
15309 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
15310
15311 nrecs = aggdesc.dtagd_nrecs;
15312 aggdesc.dtagd_nrecs = 0;
15313
15314 offs = agg->dtag_base;
15315 lrec = &agg->dtag_action.dta_rec;
15316 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
15317
15318 for (act = agg->dtag_first; ; act = act->dta_next) {
15319 ASSERT(act->dta_intuple ||
15320 DTRACEACT_ISAGG(act->dta_kind));
15321
15322 /*
15323 * If this action has a record size of zero, it
15324 * denotes an argument to the aggregating action.
15325 * Because the presence of this record doesn't (or
15326 * shouldn't) affect the way the data is interpreted,
15327 * we don't copy it out to save user-level the
15328 * confusion of dealing with a zero-length record.
15329 */
15330 if (act->dta_rec.dtrd_size == 0) {
15331 ASSERT(agg->dtag_hasarg);
15332 continue;
15333 }
15334
15335 aggdesc.dtagd_nrecs++;
15336
15337 if (act == &agg->dtag_action)
15338 break;
15339 }
15340
15341 /*
15342 * Now that we have the size, we need to allocate a temporary
15343 * buffer in which to store the complete description. We need
15344 * the temporary buffer to be able to drop dtrace_lock()
15345 * across the copyout(), below.
15346 */
15347 size = sizeof (dtrace_aggdesc_t) +
15348 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
15349
15350 buf = kmem_alloc(size, KM_SLEEP);
15351 dest = (uintptr_t)buf;
15352
15353 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
15354 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
15355
15356 for (act = agg->dtag_first; ; act = act->dta_next) {
15357 dtrace_recdesc_t rec = act->dta_rec;
15358
15359 /*
15360 * See the comment in the above loop for why we pass
15361 * over zero-length records.
15362 */
15363 if (rec.dtrd_size == 0) {
15364 ASSERT(agg->dtag_hasarg);
15365 continue;
15366 }
15367
15368 if (nrecs-- == 0)
15369 break;
15370
15371 rec.dtrd_offset -= offs;
15372 bcopy(&rec, (void *)dest, sizeof (rec));
15373 dest += sizeof (dtrace_recdesc_t);
15374
15375 if (act == &agg->dtag_action)
15376 break;
15377 }
15378
15379 mutex_exit(&dtrace_lock);
15380
15381 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
15382 kmem_free(buf, size);
15383 return (EFAULT);
15384 }
15385
15386 kmem_free(buf, size);
15387 return (0);
15388 }
15389
15390 case DTRACEIOC_ENABLE: {
15391 dof_hdr_t *dof;
15392 dtrace_enabling_t *enab = NULL;
15393 dtrace_vstate_t *vstate;
15394 int err = 0;
15395#ifdef VBOX
15396 cred_t *cr = CRED();
15397#endif
15398
15399 *rv = 0;
15400
15401 /*
15402 * If a NULL argument has been passed, we take this as our
15403 * cue to reevaluate our enablings.
15404 */
15405 if (arg == NULL) {
15406 dtrace_enabling_matchall();
15407
15408 return (0);
15409 }
15410
15411 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
15412 return (rval);
15413
15414 mutex_enter(&cpu_lock);
15415 mutex_enter(&dtrace_lock);
15416 vstate = &state->dts_vstate;
15417
15418 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
15419 mutex_exit(&dtrace_lock);
15420 mutex_exit(&cpu_lock);
15421 dtrace_dof_destroy(dof);
15422 return (EBUSY);
15423 }
15424
15425 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
15426 mutex_exit(&dtrace_lock);
15427 mutex_exit(&cpu_lock);
15428 dtrace_dof_destroy(dof);
15429 return (EINVAL);
15430 }
15431
15432 if ((rval = dtrace_dof_options(dof, state)) != 0) {
15433 dtrace_enabling_destroy(enab);
15434 mutex_exit(&dtrace_lock);
15435 mutex_exit(&cpu_lock);
15436 dtrace_dof_destroy(dof);
15437 return (rval);
15438 }
15439
15440 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
15441 err = dtrace_enabling_retain(enab);
15442 } else {
15443 dtrace_enabling_destroy(enab);
15444 }
15445
15446 mutex_exit(&cpu_lock);
15447 mutex_exit(&dtrace_lock);
15448 dtrace_dof_destroy(dof);
15449
15450 return (err);
15451 }
15452
15453 case DTRACEIOC_REPLICATE: {
15454 dtrace_repldesc_t desc;
15455 dtrace_probedesc_t *match = &desc.dtrpd_match;
15456 dtrace_probedesc_t *create = &desc.dtrpd_create;
15457 int err;
15458
15459 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15460 return (EFAULT);
15461
15462 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15463 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15464 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15465 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15466
15467 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15468 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15469 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15470 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15471
15472 mutex_enter(&dtrace_lock);
15473 err = dtrace_enabling_replicate(state, match, create);
15474 mutex_exit(&dtrace_lock);
15475
15476 return (err);
15477 }
15478
15479 case DTRACEIOC_PROBEMATCH:
15480 case DTRACEIOC_PROBES: {
15481 dtrace_probe_t *probe = NULL;
15482 dtrace_probedesc_t desc;
15483 dtrace_probekey_t pkey;
15484 dtrace_id_t i;
15485 int m = 0;
15486 uint32_t priv;
15487 uid_t uid;
15488 zoneid_t zoneid;
15489#ifdef VBOX
15490 cred_t *cr = CRED();
15491#endif
15492
15493 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15494 return (EFAULT);
15495
15496 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
15497 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
15498 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
15499 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
15500
15501 /*
15502 * Before we attempt to match this probe, we want to give
15503 * all providers the opportunity to provide it.
15504 */
15505 if (desc.dtpd_id == DTRACE_IDNONE) {
15506 mutex_enter(&dtrace_provider_lock);
15507 dtrace_probe_provide(&desc, NULL);
15508 mutex_exit(&dtrace_provider_lock);
15509 desc.dtpd_id++;
15510 }
15511
15512 if (cmd == DTRACEIOC_PROBEMATCH) {
15513 dtrace_probekey(&desc, &pkey);
15514 pkey.dtpk_id = DTRACE_IDNONE;
15515 }
15516
15517 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
15518
15519 mutex_enter(&dtrace_lock);
15520
15521 if (cmd == DTRACEIOC_PROBEMATCH) {
15522 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15523 if ((probe = dtrace_probes[i - 1]) != NULL &&
15524 (m = dtrace_match_probe(probe, &pkey,
15525 priv, uid, zoneid)) != 0)
15526 break;
15527 }
15528
15529 if (m < 0) {
15530 mutex_exit(&dtrace_lock);
15531 return (EINVAL);
15532 }
15533
15534 } else {
15535 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15536 if ((probe = dtrace_probes[i - 1]) != NULL &&
15537 dtrace_match_priv(probe, priv, uid, zoneid))
15538 break;
15539 }
15540 }
15541
15542 if (probe == NULL) {
15543 mutex_exit(&dtrace_lock);
15544 return (ESRCH);
15545 }
15546
15547 dtrace_probe_description(probe, &desc);
15548 mutex_exit(&dtrace_lock);
15549
15550 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15551 return (EFAULT);
15552
15553 return (0);
15554 }
15555
15556 case DTRACEIOC_PROBEARG: {
15557 dtrace_argdesc_t desc;
15558 dtrace_probe_t *probe;
15559 dtrace_provider_t *prov;
15560
15561 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15562 return (EFAULT);
15563
15564 if (desc.dtargd_id == DTRACE_IDNONE)
15565 return (EINVAL);
15566
15567 if (desc.dtargd_ndx == DTRACE_ARGNONE)
15568 return (EINVAL);
15569
15570 mutex_enter(&dtrace_provider_lock);
15571 mutex_enter(&mod_lock);
15572 mutex_enter(&dtrace_lock);
15573
15574 if (desc.dtargd_id > dtrace_nprobes) {
15575 mutex_exit(&dtrace_lock);
15576 mutex_exit(&mod_lock);
15577 mutex_exit(&dtrace_provider_lock);
15578 return (EINVAL);
15579 }
15580
15581 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
15582 mutex_exit(&dtrace_lock);
15583 mutex_exit(&mod_lock);
15584 mutex_exit(&dtrace_provider_lock);
15585 return (EINVAL);
15586 }
15587
15588 mutex_exit(&dtrace_lock);
15589
15590 prov = probe->dtpr_provider;
15591
15592 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
15593 /*
15594 * There isn't any typed information for this probe.
15595 * Set the argument number to DTRACE_ARGNONE.
15596 */
15597 desc.dtargd_ndx = DTRACE_ARGNONE;
15598 } else {
15599 desc.dtargd_native[0] = '\0';
15600 desc.dtargd_xlate[0] = '\0';
15601 desc.dtargd_mapping = desc.dtargd_ndx;
15602
15603 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
15604 probe->dtpr_id, probe->dtpr_arg, &desc);
15605 }
15606
15607 mutex_exit(&mod_lock);
15608 mutex_exit(&dtrace_provider_lock);
15609
15610 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15611 return (EFAULT);
15612
15613 return (0);
15614 }
15615
15616 case DTRACEIOC_GO: {
15617 processorid_t cpuid;
15618 rval = dtrace_state_go(state, &cpuid);
15619
15620 if (rval != 0)
15621 return (rval);
15622
15623 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
15624 return (EFAULT);
15625
15626 return (0);
15627 }
15628
15629 case DTRACEIOC_STOP: {
15630 processorid_t cpuid;
15631
15632 mutex_enter(&dtrace_lock);
15633 rval = dtrace_state_stop(state, &cpuid);
15634 mutex_exit(&dtrace_lock);
15635
15636 if (rval != 0)
15637 return (rval);
15638
15639 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
15640 return (EFAULT);
15641
15642 return (0);
15643 }
15644
15645 case DTRACEIOC_DOFGET: {
15646 dof_hdr_t hdr, *dof;
15647 uint64_t len;
15648
15649 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
15650 return (EFAULT);
15651
15652 mutex_enter(&dtrace_lock);
15653 dof = dtrace_dof_create(state);
15654 mutex_exit(&dtrace_lock);
15655
15656 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
15657 rval = copyout(dof, (void *)arg, len);
15658 dtrace_dof_destroy(dof);
15659
15660 return (rval == 0 ? 0 : EFAULT);
15661 }
15662
15663 case DTRACEIOC_AGGSNAP:
15664 case DTRACEIOC_BUFSNAP: {
15665 dtrace_bufdesc_t desc;
15666 caddr_t cached;
15667 dtrace_buffer_t *buf;
15668
15669 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15670 return (EFAULT);
15671
15672 if (/*VBox value is is unsigned: desc.dtbd_cpu < 0 ||*/ desc.dtbd_cpu >= NCPU)
15673 return (EINVAL);
15674
15675 mutex_enter(&dtrace_lock);
15676
15677 if (cmd == DTRACEIOC_BUFSNAP) {
15678 buf = &state->dts_buffer[desc.dtbd_cpu];
15679 } else {
15680 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
15681 }
15682
15683 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
15684 size_t sz = buf->dtb_offset;
15685
15686 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
15687 mutex_exit(&dtrace_lock);
15688 return (EBUSY);
15689 }
15690
15691 /*
15692 * If this buffer has already been consumed, we're
15693 * going to indicate that there's nothing left here
15694 * to consume.
15695 */
15696 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
15697 mutex_exit(&dtrace_lock);
15698
15699 desc.dtbd_size = 0;
15700 desc.dtbd_drops = 0;
15701 desc.dtbd_errors = 0;
15702 desc.dtbd_oldest = 0;
15703 sz = sizeof (desc);
15704
15705 if (copyout(&desc, (void *)arg, sz) != 0)
15706 return (EFAULT);
15707
15708 return (0);
15709 }
15710
15711 /*
15712 * If this is a ring buffer that has wrapped, we want
15713 * to copy the whole thing out.
15714 */
15715 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
15716 dtrace_buffer_polish(buf);
15717 sz = buf->dtb_size;
15718 }
15719
15720 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
15721 mutex_exit(&dtrace_lock);
15722 return (EFAULT);
15723 }
15724
15725 desc.dtbd_size = sz;
15726 desc.dtbd_drops = buf->dtb_drops;
15727 desc.dtbd_errors = buf->dtb_errors;
15728 desc.dtbd_oldest = buf->dtb_xamot_offset;
15729
15730 mutex_exit(&dtrace_lock);
15731
15732 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15733 return (EFAULT);
15734
15735 buf->dtb_flags |= DTRACEBUF_CONSUMED;
15736
15737 return (0);
15738 }
15739
15740 if (buf->dtb_tomax == NULL) {
15741 ASSERT(buf->dtb_xamot == NULL);
15742 mutex_exit(&dtrace_lock);
15743 return (ENOENT);
15744 }
15745
15746 cached = buf->dtb_tomax;
15747 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
15748
15749#ifndef VBOX
15750 dtrace_xcall(desc.dtbd_cpu,
15751 (dtrace_xcall_t)dtrace_buffer_switch, buf);
15752#else
15753 if ((int32_t)desc.dtbd_cpu == DTRACE_CPUALL)
15754 RTMpOnAll(dtrace_buffer_switch_wrapper, buf, NULL);
15755 else
15756 RTMpOnSpecific(desc.dtbd_cpu, dtrace_buffer_switch_wrapper, buf, NULL);
15757#endif
15758
15759 state->dts_errors += buf->dtb_xamot_errors;
15760
15761 /*
15762 * If the buffers did not actually switch, then the cross call
15763 * did not take place -- presumably because the given CPU is
15764 * not in the ready set. If this is the case, we'll return
15765 * ENOENT.
15766 */
15767 if (buf->dtb_tomax == cached) {
15768 ASSERT(buf->dtb_xamot != cached);
15769 mutex_exit(&dtrace_lock);
15770 return (ENOENT);
15771 }
15772
15773 ASSERT(cached == buf->dtb_xamot);
15774
15775 /*
15776 * We have our snapshot; now copy it out.
15777 */
15778 if (copyout(buf->dtb_xamot, desc.dtbd_data,
15779 buf->dtb_xamot_offset) != 0) {
15780 mutex_exit(&dtrace_lock);
15781 return (EFAULT);
15782 }
15783
15784 desc.dtbd_size = buf->dtb_xamot_offset;
15785 desc.dtbd_drops = buf->dtb_xamot_drops;
15786 desc.dtbd_errors = buf->dtb_xamot_errors;
15787 desc.dtbd_oldest = 0;
15788
15789 mutex_exit(&dtrace_lock);
15790
15791 /*
15792 * Finally, copy out the buffer description.
15793 */
15794 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15795 return (EFAULT);
15796
15797 return (0);
15798 }
15799
15800 case DTRACEIOC_CONF: {
15801 dtrace_conf_t conf;
15802
15803 bzero(&conf, sizeof (conf));
15804 conf.dtc_difversion = DIF_VERSION;
15805 conf.dtc_difintregs = DIF_DIR_NREGS;
15806 conf.dtc_diftupregs = DIF_DTR_NREGS;
15807 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
15808
15809 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
15810 return (EFAULT);
15811
15812 return (0);
15813 }
15814
15815 case DTRACEIOC_STATUS: {
15816 dtrace_status_t stat;
15817 dtrace_dstate_t *dstate;
15818 int i, j;
15819 uint64_t nerrs;
15820
15821 /*
15822 * See the comment in dtrace_state_deadman() for the reason
15823 * for setting dts_laststatus to INT64_MAX before setting
15824 * it to the correct value.
15825 */
15826 state->dts_laststatus = INT64_MAX;
15827 dtrace_membar_producer();
15828 state->dts_laststatus = dtrace_gethrtime();
15829
15830 bzero(&stat, sizeof (stat));
15831
15832 mutex_enter(&dtrace_lock);
15833
15834 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
15835 mutex_exit(&dtrace_lock);
15836 return (ENOENT);
15837 }
15838
15839 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
15840 stat.dtst_exiting = 1;
15841
15842 nerrs = state->dts_errors;
15843 dstate = &state->dts_vstate.dtvs_dynvars;
15844
15845 for (i = 0; i < NCPU; i++) {
15846 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
15847
15848 stat.dtst_dyndrops += dcpu->dtdsc_drops;
15849 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
15850 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
15851
15852 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
15853 stat.dtst_filled++;
15854
15855 nerrs += state->dts_buffer[i].dtb_errors;
15856
15857 for (j = 0; j < state->dts_nspeculations; j++) {
15858 dtrace_speculation_t *spec;
15859 dtrace_buffer_t *buf;
15860
15861 spec = &state->dts_speculations[j];
15862 buf = &spec->dtsp_buffer[i];
15863 stat.dtst_specdrops += buf->dtb_xamot_drops;
15864 }
15865 }
15866
15867 stat.dtst_specdrops_busy = state->dts_speculations_busy;
15868 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
15869 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
15870 stat.dtst_dblerrors = state->dts_dblerrors;
15871 stat.dtst_killed =
15872 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
15873 stat.dtst_errors = nerrs;
15874
15875 mutex_exit(&dtrace_lock);
15876
15877 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
15878 return (EFAULT);
15879
15880 return (0);
15881 }
15882
15883 case DTRACEIOC_FORMAT: {
15884 dtrace_fmtdesc_t fmt;
15885 char *str;
15886 int len;
15887
15888 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
15889 return (EFAULT);
15890
15891 mutex_enter(&dtrace_lock);
15892
15893 if (fmt.dtfd_format == 0 ||
15894 fmt.dtfd_format > state->dts_nformats) {
15895 mutex_exit(&dtrace_lock);
15896 return (EINVAL);
15897 }
15898
15899 /*
15900 * Format strings are allocated contiguously and they are
15901 * never freed; if a format index is less than the number
15902 * of formats, we can assert that the format map is non-NULL
15903 * and that the format for the specified index is non-NULL.
15904 */
15905 ASSERT(state->dts_formats != NULL);
15906 str = state->dts_formats[fmt.dtfd_format - 1];
15907 ASSERT(str != NULL);
15908
15909 len = VBDTCAST(int)strlen(str) + 1;
15910
15911 if (len > fmt.dtfd_length) {
15912 fmt.dtfd_length = len;
15913
15914 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
15915 mutex_exit(&dtrace_lock);
15916 return (EINVAL);
15917 }
15918 } else {
15919 if (copyout(str, fmt.dtfd_string, len) != 0) {
15920 mutex_exit(&dtrace_lock);
15921 return (EINVAL);
15922 }
15923 }
15924
15925 mutex_exit(&dtrace_lock);
15926 return (0);
15927 }
15928
15929 default:
15930 break;
15931 }
15932
15933 return (ENOTTY);
15934}
15935
15936#ifdef VBOX
15937int dtrace_detach(ddi_detach_cmd_t cmd)
15938#else
15939/*ARGSUSED*/
15940static int
15941dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
15942#endif
15943{
15944 dtrace_state_t *state;
15945
15946 switch (cmd) {
15947 case DDI_DETACH:
15948 break;
15949
15950 case DDI_SUSPEND:
15951 return (DDI_SUCCESS);
15952
15953 default:
15954 return (DDI_FAILURE);
15955 }
15956
15957 mutex_enter(&cpu_lock);
15958 mutex_enter(&dtrace_provider_lock);
15959 mutex_enter(&dtrace_lock);
15960
15961 ASSERT(dtrace_opens == 0);
15962
15963 if (dtrace_helpers > 0) {
15964 mutex_exit(&dtrace_provider_lock);
15965 mutex_exit(&dtrace_lock);
15966 mutex_exit(&cpu_lock);
15967 return (DDI_FAILURE);
15968 }
15969
15970 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
15971 mutex_exit(&dtrace_provider_lock);
15972 mutex_exit(&dtrace_lock);
15973 mutex_exit(&cpu_lock);
15974 return (DDI_FAILURE);
15975 }
15976
15977 dtrace_provider = NULL;
15978
15979 if ((state = dtrace_anon_grab()) != NULL) {
15980 /*
15981 * If there were ECBs on this state, the provider should
15982 * have not been allowed to detach; assert that there is
15983 * none.
15984 */
15985 ASSERT(state->dts_necbs == 0);
15986 dtrace_state_destroy(state);
15987
15988#ifndef VBOX
15989 /*
15990 * If we're being detached with anonymous state, we need to
15991 * indicate to the kernel debugger that DTrace is now inactive.
15992 */
15993 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15994#endif
15995 }
15996
15997 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
15998#ifndef VBOX /** @todo CPU hooks */
15999 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
16000#endif
16001 dtrace_cpu_init = NULL;
16002 dtrace_helpers_cleanup = NULL;
16003 dtrace_helpers_fork = NULL;
16004 dtrace_cpustart_init = NULL;
16005 dtrace_cpustart_fini = NULL;
16006 dtrace_debugger_init = NULL;
16007 dtrace_debugger_fini = NULL;
16008 dtrace_modload = NULL;
16009 dtrace_modunload = NULL;
16010
16011 mutex_exit(&cpu_lock);
16012
16013 if (dtrace_helptrace_enabled) {
16014 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
16015 dtrace_helptrace_buffer = NULL;
16016 }
16017
16018 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
16019 dtrace_probes = NULL;
16020 dtrace_nprobes = 0;
16021
16022 dtrace_hash_destroy(dtrace_bymod);
16023 dtrace_hash_destroy(dtrace_byfunc);
16024 dtrace_hash_destroy(dtrace_byname);
16025 dtrace_bymod = NULL;
16026 dtrace_byfunc = NULL;
16027 dtrace_byname = NULL;
16028
16029 kmem_cache_destroy(dtrace_state_cache);
16030#ifndef VBOX
16031 vmem_destroy(dtrace_minor);
16032#endif
16033 vmem_destroy(dtrace_arena);
16034
16035 if (dtrace_toxrange != NULL) {
16036 kmem_free(dtrace_toxrange,
16037 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
16038 dtrace_toxrange = NULL;
16039 dtrace_toxranges = 0;
16040 dtrace_toxranges_max = 0;
16041 }
16042
16043#ifndef VBOX
16044 ddi_remove_minor_node(dtrace_devi, NULL);
16045 dtrace_devi = NULL;
16046
16047 ddi_soft_state_fini(&dtrace_softstate);
16048#endif
16049
16050 ASSERT(dtrace_vtime_references == 0);
16051 ASSERT(dtrace_opens == 0);
16052 ASSERT(dtrace_retained == NULL);
16053
16054 mutex_exit(&dtrace_lock);
16055 mutex_exit(&dtrace_provider_lock);
16056
16057 /*
16058 * We don't destroy the task queue until after we have dropped our
16059 * locks (taskq_destroy() may block on running tasks). To prevent
16060 * attempting to do work after we have effectively detached but before
16061 * the task queue has been destroyed, all tasks dispatched via the
16062 * task queue must check that DTrace is still attached before
16063 * performing any operation.
16064 */
16065#ifndef VBOX
16066 taskq_destroy(dtrace_taskq);
16067 dtrace_taskq = NULL;
16068#endif
16069
16070 return (DDI_SUCCESS);
16071}
16072
16073#ifndef VBOX
16074/*ARGSUSED*/
16075static int
16076dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
16077{
16078 int error;
16079
16080 switch (infocmd) {
16081 case DDI_INFO_DEVT2DEVINFO:
16082 *result = (void *)dtrace_devi;
16083 error = DDI_SUCCESS;
16084 break;
16085 case DDI_INFO_DEVT2INSTANCE:
16086 *result = (void *)0;
16087 error = DDI_SUCCESS;
16088 break;
16089 default:
16090 error = DDI_FAILURE;
16091 }
16092 return (error);
16093}
16094
16095static struct cb_ops dtrace_cb_ops = {
16096 dtrace_open, /* open */
16097 dtrace_close, /* close */
16098 nulldev, /* strategy */
16099 nulldev, /* print */
16100 nodev, /* dump */
16101 nodev, /* read */
16102 nodev, /* write */
16103 dtrace_ioctl, /* ioctl */
16104 nodev, /* devmap */
16105 nodev, /* mmap */
16106 nodev, /* segmap */
16107 nochpoll, /* poll */
16108 ddi_prop_op, /* cb_prop_op */
16109 0, /* streamtab */
16110 D_NEW | D_MP /* Driver compatibility flag */
16111};
16112
16113static struct dev_ops dtrace_ops = {
16114 DEVO_REV, /* devo_rev */
16115 0, /* refcnt */
16116 dtrace_info, /* get_dev_info */
16117 nulldev, /* identify */
16118 nulldev, /* probe */
16119 dtrace_attach, /* attach */
16120 dtrace_detach, /* detach */
16121 nodev, /* reset */
16122 &dtrace_cb_ops, /* driver operations */
16123 NULL, /* bus operations */
16124 nodev, /* dev power */
16125 ddi_quiesce_not_needed, /* quiesce */
16126};
16127
16128static struct modldrv modldrv = {
16129 &mod_driverops, /* module type (this is a pseudo driver) */
16130 "Dynamic Tracing", /* name of module */
16131 &dtrace_ops, /* driver ops */
16132};
16133
16134static struct modlinkage modlinkage = {
16135 MODREV_1,
16136 (void *)&modldrv,
16137 NULL
16138};
16139
16140int
16141_init(void)
16142{
16143 return (mod_install(&modlinkage));
16144}
16145
16146int
16147_info(struct modinfo *modinfop)
16148{
16149 return (mod_info(&modlinkage, modinfop));
16150}
16151
16152int
16153_fini(void)
16154{
16155 return (mod_remove(&modlinkage));
16156}
16157
16158#endif /* !VBOX */
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette