VirtualBox

source: vbox/trunk/src/VBox/ExtPacks/VBoxDTrace/VBoxDTraceR0/VBoxDTraceR0.cpp@ 53670

Last change on this file since 53670 was 53670, checked in by vboxsync, 10 years ago

VBoxDTrace: init (r47)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 52.8 KB
Line 
1/* $Id: VBoxDTraceR0.cpp 53670 2015-01-02 12:33:41Z vboxsync $ */
2/** @file
3 * VBoxDTraceR0.
4 */
5
6/*
7 * Copyright (c) 2012 bird
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include <VBox/sup.h>
36#include <VBox/log.h>
37
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/assert.h>
40#include <iprt/ctype.h>
41#include <iprt/err.h>
42#include <iprt/mem.h>
43#include <iprt/mp.h>
44#include <iprt/process.h>
45#include <iprt/semaphore.h>
46#include <iprt/spinlock.h>
47#include <iprt/string.h>
48#include <iprt/thread.h>
49#include <iprt/time.h>
50
51#include <sys/dtrace_impl.h>
52
53#include <VBox/VBoxTpG.h>
54
55
56
57/*******************************************************************************
58* Structures and Typedefs *
59*******************************************************************************/
60
61/** Caller indicator. */
62typedef enum VBOXDTCALLER
63{
64 kVBoxDtCaller_Invalid = 0,
65 kVBoxDtCaller_Generic,
66 kVBoxDtCaller_ProbeFireUser,
67 kVBoxDtCaller_ProbeFireKernel
68} VBOXDTCALLER;
69
70/**
71 * Stack data used for thread structure and such.
72 *
73 * This is planted in every external entry point and used to emulate solaris
74 * curthread, CRED, curproc and similar. It is also used to get at the
75 * uncached probe arguments.
76 */
77typedef struct VBoxDtStackData
78{
79 /** Eyecatcher no. 1 (VBDT_STACK_DATA_MAGIC2). */
80 uint32_t u32Magic1;
81 /** Eyecatcher no. 2 (VBDT_STACK_DATA_MAGIC2). */
82 uint32_t u32Magic2;
83 /** The format of the caller specific data. */
84 VBOXDTCALLER enmCaller;
85 /** Caller specific data. */
86 union
87 {
88 /** kVBoxDtCaller_ProbeFireKernel. */
89 struct
90 {
91 /** The caller. */
92 uintptr_t uCaller;
93 /** Pointer to the stack arguments of a probe function call. */
94 uintptr_t *pauStackArgs;
95 } ProbeFireKernel;
96 /** kVBoxDtCaller_ProbeFireUser. */
97 struct
98 {
99 /** The user context. */
100 PCSUPDRVTRACERUSRCTX pCtx;
101 } ProbeFireUser;
102 } u;
103 /** Credentials allocated by VBoxDtGetCurrentCreds. */
104 struct VBoxDtCred *pCred;
105 /** Thread structure currently being held by this thread. */
106 struct VBoxDtThread *pThread;
107 /** Pointer to this structure.
108 * This is the final bit of integrity checking. */
109 struct VBoxDtStackData *pSelf;
110} VBDTSTACKDATA;
111/** Pointer to the on-stack thread specific data. */
112typedef VBDTSTACKDATA *PVBDTSTACKDATA;
113
114/** The first magic value. */
115#define VBDT_STACK_DATA_MAGIC1 RT_MAKE_U32_FROM_U8('V', 'B', 'o', 'x')
116/** The second magic value. */
117#define VBDT_STACK_DATA_MAGIC2 RT_MAKE_U32_FROM_U8('D', 'T', 'r', 'c')
118
119/** The alignment of the stack data.
120 * The data doesn't require more than sizeof(uintptr_t) alignment, but the
121 * greater alignment the quicker lookup. */
122#define VBDT_STACK_DATA_ALIGN 32
123
124/** Plants the stack data. */
125#define VBDT_SETUP_STACK_DATA(a_enmCaller) \
126 uint8_t abBlob[sizeof(VBoxDtStackData) + VBDT_STACK_DATA_ALIGN - 1]; \
127 PVBDTSTACKDATA pStackData = (PVBDTSTACKDATA)( (uintptr_t)&abBlob[VBDT_STACK_DATA_ALIGN - 1] \
128 & ~(uintptr_t)(VBDT_STACK_DATA_ALIGN - 1)); \
129 pStackData->u32Magic1 = VBDT_STACK_DATA_MAGIC1; \
130 pStackData->u32Magic2 = VBDT_STACK_DATA_MAGIC2; \
131 pStackData->enmCaller = a_enmCaller; \
132 pStackData->pCred = NULL; \
133 pStackData->pThread = NULL; \
134 pStackData->pSelf = pStackData
135
136/** Passifies the stack data and frees up resource held within it. */
137#define VBDT_CLEAR_STACK_DATA() \
138 do \
139 { \
140 pStackData->u32Magic1 = 0; \
141 pStackData->u32Magic2 = 0; \
142 pStackData->pSelf = NULL; \
143 if (pStackData->pCred) \
144 crfree(pStackData->pCred); \
145 if (pStackData->pThread) \
146 VBoxDtReleaseThread(pStackData->pThread); \
147 } while (0)
148
149
150/*******************************************************************************
151* Global Variables *
152*******************************************************************************/
153/** Per CPU information */
154cpucore_t g_aVBoxDtCpuCores[RTCPUSET_MAX_CPUS];
155/** Dummy mutex. */
156struct VBoxDtMutex g_DummyMtx;
157/** Pointer to the tracer helpers provided by VBoxDrv. */
158static PCSUPDRVTRACERHLP g_pVBoxDTraceHlp;
159
160dtrace_cacheid_t dtrace_predcache_id = DTRACE_CACHEIDNONE + 1;
161
162#if 0
163void (*dtrace_cpu_init)(processorid_t);
164void (*dtrace_modload)(struct modctl *);
165void (*dtrace_modunload)(struct modctl *);
166void (*dtrace_helpers_cleanup)(void);
167void (*dtrace_helpers_fork)(proc_t *, proc_t *);
168void (*dtrace_cpustart_init)(void);
169void (*dtrace_cpustart_fini)(void);
170void (*dtrace_cpc_fire)(uint64_t);
171void (*dtrace_debugger_init)(void);
172void (*dtrace_debugger_fini)(void);
173#endif
174
175
176/**
177 * Gets the stack data.
178 *
179 * @returns Pointer to the stack data. Never NULL.
180 */
181static PVBDTSTACKDATA vboxDtGetStackData(void)
182{
183 int volatile iDummy = 1; /* use this to get the stack address. */
184 PVBDTSTACKDATA pData = (PVBDTSTACKDATA)( ((uintptr_t)&iDummy + VBDT_STACK_DATA_ALIGN - 1)
185 & ~(uintptr_t)(VBDT_STACK_DATA_ALIGN - 1));
186 for (;;)
187 {
188 if ( pData->u32Magic1 == VBDT_STACK_DATA_MAGIC1
189 && pData->u32Magic2 == VBDT_STACK_DATA_MAGIC2
190 && pData->pSelf == pData)
191 return pData;
192 pData = (PVBDTSTACKDATA)((uintptr_t)pData + VBDT_STACK_DATA_ALIGN);
193 }
194}
195
196
197void dtrace_toxic_ranges(void (*pfnAddOne)(uintptr_t uBase, uintptr_t cbRange))
198{
199 /** @todo ? */
200}
201
202
203
204/**
205 * Dummy callback used by dtrace_sync.
206 */
207static DECLCALLBACK(void) vboxDtSyncCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
208{
209 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
210}
211
212
213/**
214 * Synchronzie across all CPUs (expensive).
215 */
216void dtrace_sync(void)
217{
218 int rc = RTMpOnAll(vboxDtSyncCallback, NULL, NULL);
219 AssertRC(rc);
220}
221
222
223/**
224 * Fetch a 8-bit "word" from userland.
225 *
226 * @return The byte value.
227 * @param pvUserAddr The userland address.
228 */
229uint8_t dtrace_fuword8( void *pvUserAddr)
230{
231 uint8_t u8;
232 int rc = RTR0MemUserCopyFrom(&u8, (uintptr_t)pvUserAddr, sizeof(u8));
233 if (RT_FAILURE(rc))
234 {
235 RTCPUID iCpu = VBDT_GET_CPUID();
236 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
237 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
238 u8 = 0;
239 }
240 return u8;
241}
242
243
244/**
245 * Fetch a 16-bit word from userland.
246 *
247 * @return The word value.
248 * @param pvUserAddr The userland address.
249 */
250uint16_t dtrace_fuword16(void *pvUserAddr)
251{
252 uint16_t u16;
253 int rc = RTR0MemUserCopyFrom(&u16, (uintptr_t)pvUserAddr, sizeof(u16));
254 if (RT_FAILURE(rc))
255 {
256 RTCPUID iCpu = VBDT_GET_CPUID();
257 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
258 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
259 u16 = 0;
260 }
261 return u16;
262}
263
264
265/**
266 * Fetch a 32-bit word from userland.
267 *
268 * @return The dword value.
269 * @param pvUserAddr The userland address.
270 */
271uint32_t dtrace_fuword32(void *pvUserAddr)
272{
273 uint32_t u32;
274 int rc = RTR0MemUserCopyFrom(&u32, (uintptr_t)pvUserAddr, sizeof(u32));
275 if (RT_FAILURE(rc))
276 {
277 RTCPUID iCpu = VBDT_GET_CPUID();
278 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
279 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
280 u32 = 0;
281 }
282 return u32;
283}
284
285
286/**
287 * Fetch a 64-bit word from userland.
288 *
289 * @return The qword value.
290 * @param pvUserAddr The userland address.
291 */
292uint64_t dtrace_fuword64(void *pvUserAddr)
293{
294 uint64_t u64;
295 int rc = RTR0MemUserCopyFrom(&u64, (uintptr_t)pvUserAddr, sizeof(u64));
296 if (RT_FAILURE(rc))
297 {
298 RTCPUID iCpu = VBDT_GET_CPUID();
299 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
300 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
301 u64 = 0;
302 }
303 return u64;
304}
305
306
307/** copyin implementation */
308int VBoxDtCopyIn(void const *pvUser, void *pvDst, size_t cb)
309{
310 int rc = RTR0MemUserCopyFrom(pvDst, (uintptr_t)pvUser, cb);
311 return RT_SUCCESS(rc) ? 0 : -1;
312}
313
314
315/** copyout implementation */
316int VBoxDtCopyOut(void const *pvSrc, void *pvUser, size_t cb)
317{
318 int rc = RTR0MemUserCopyTo((uintptr_t)pvUser, pvSrc, cb);
319 return RT_SUCCESS(rc) ? 0 : -1;
320}
321
322
323/**
324 * Copy data from userland into the kernel.
325 *
326 * @param uUserAddr The userland address.
327 * @param uKrnlAddr The kernel buffer address.
328 * @param cb The number of bytes to copy.
329 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
330 */
331void dtrace_copyin( uintptr_t uUserAddr, uintptr_t uKrnlAddr, size_t cb, volatile uint16_t *pfFlags)
332{
333 int rc = RTR0MemUserCopyFrom((void *)uKrnlAddr, uUserAddr, cb);
334 if (RT_FAILURE(rc))
335 {
336 *pfFlags |= CPU_DTRACE_BADADDR;
337 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = uUserAddr;
338 }
339}
340
341
342/**
343 * Copy data from the kernel into userlad.
344 *
345 * @param uKrnlAddr The kernel buffer address.
346 * @param uUserAddr The userland address.
347 * @param cb The number of bytes to copy.
348 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
349 */
350void dtrace_copyout( uintptr_t uKrnlAddr, uintptr_t uUserAddr, size_t cb, volatile uint16_t *pfFlags)
351{
352 int rc = RTR0MemUserCopyTo(uUserAddr, (void const *)uKrnlAddr, cb);
353 if (RT_FAILURE(rc))
354 {
355 *pfFlags |= CPU_DTRACE_BADADDR;
356 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = uUserAddr;
357 }
358}
359
360
361/**
362 * Copy a string from userland into the kernel.
363 *
364 * @param uUserAddr The userland address.
365 * @param uKrnlAddr The kernel buffer address.
366 * @param cbMax The maximum number of bytes to copy. May stop
367 * earlier if zero byte is encountered.
368 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
369 */
370void dtrace_copyinstr( uintptr_t uUserAddr, uintptr_t uKrnlAddr, size_t cbMax, volatile uint16_t *pfFlags)
371{
372 if (!cbMax)
373 return;
374
375 char *pszDst = (char *)uKrnlAddr;
376 int rc = RTR0MemUserCopyFrom(pszDst, uUserAddr, cbMax);
377 if (RT_FAILURE(rc))
378 {
379 /* Byte by byte - lazy bird! */
380 size_t off = 0;
381 while (off < cbMax)
382 {
383 rc = RTR0MemUserCopyFrom(&pszDst[off], uUserAddr + off, 1);
384 if (RT_FAILURE(rc))
385 {
386 *pfFlags |= CPU_DTRACE_BADADDR;
387 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = uUserAddr;
388 pszDst[off] = '\0';
389 return;
390 }
391 if (!pszDst[off])
392 return;
393 off++;
394 }
395 }
396
397 pszDst[cbMax - 1] = '\0';
398}
399
400
401/**
402 * Copy a string from the kernel and into user land.
403 *
404 * @param uKrnlAddr The kernel string address.
405 * @param uUserAddr The userland address.
406 * @param cbMax The maximum number of bytes to copy. Will stop
407 * earlier if zero byte is encountered.
408 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
409 */
410void dtrace_copyoutstr(uintptr_t uKrnlAddr, uintptr_t uUserAddr, size_t cbMax, volatile uint16_t *pfFlags)
411{
412 const char *pszSrc = (const char *)uKrnlAddr;
413 size_t cbActual = RTStrNLen(pszSrc, cbMax);
414 cbActual += cbActual < cbMax;
415 dtrace_copyout(uKrnlAddr,uUserAddr, cbActual, pfFlags);
416}
417
418
419/**
420 * Get the caller @a cCallFrames call frames up the stack.
421 *
422 * @returns The caller's return address or ~(uintptr_t)0.
423 * @param cCallFrames The number of frames.
424 */
425uintptr_t dtrace_caller(int cCallFrames)
426{
427 PVBDTSTACKDATA pData = vboxDtGetStackData();
428 if (pData->enmCaller == kVBoxDtCaller_ProbeFireKernel)
429 return pData->u.ProbeFireKernel.uCaller;
430 return ~(uintptr_t)0;
431}
432
433
434/**
435 * Get argument number @a iArg @a cCallFrames call frames up the stack.
436 *
437 * @returns The caller's return address or ~(uintptr_t)0.
438 * @param iArg The argument to get.
439 * @param cCallFrames The number of frames.
440 */
441uint64_t dtrace_getarg(int iArg, int cCallFrames)
442{
443 PVBDTSTACKDATA pData = vboxDtGetStackData();
444 AssertReturn(iArg >= 5, UINT64_MAX);
445
446 if (pData->enmCaller == kVBoxDtCaller_ProbeFireKernel)
447 return pData->u.ProbeFireKernel.pauStackArgs[iArg - 5];
448 return UINT64_MAX;
449}
450
451
452/**
453 * Produce a traceback of the kernel stack.
454 *
455 * @param paPcStack Where to return the program counters.
456 * @param cMaxFrames The maximum number of PCs to return.
457 * @param cSkipFrames The number of artificial callstack frames to
458 * skip at the top.
459 * @param pIntr Not sure what this is...
460 */
461void dtrace_getpcstack(pc_t *paPcStack, int cMaxFrames, int cSkipFrames, uint32_t *pIntr)
462{
463 int iFrame = 0;
464 while (iFrame < cMaxFrames)
465 {
466 paPcStack[iFrame] = NULL;
467 iFrame++;
468 }
469}
470
471
472/**
473 * Get the number of call frames on the stack.
474 *
475 * @returns The stack depth.
476 * @param cSkipFrames The number of artificial callstack frames to
477 * skip at the top.
478 */
479int dtrace_getstackdepth(int cSkipFrames)
480{
481 return 1;
482}
483
484
485/**
486 * Produce a traceback of the userland stack.
487 *
488 * @param paPcStack Where to return the program counters.
489 * @param paFpStack Where to return the frame pointers.
490 * @param cMaxFrames The maximum number of frames to return.
491 */
492void dtrace_getufpstack(uint64_t *paPcStack, uint64_t *paFpStack, int cMaxFrames)
493{
494 int iFrame = 0;
495 while (iFrame < cMaxFrames)
496 {
497 paPcStack[iFrame] = 0;
498 paFpStack[iFrame] = 0;
499 iFrame++;
500 }
501}
502
503
504/**
505 * Produce a traceback of the userland stack.
506 *
507 * @param paPcStack Where to return the program counters.
508 * @param cMaxFrames The maximum number of frames to return.
509 */
510void dtrace_getupcstack(uint64_t *paPcStack, int cMaxFrames)
511{
512 int iFrame = 0;
513 while (iFrame < cMaxFrames)
514 {
515 paPcStack[iFrame] = 0;
516 iFrame++;
517 }
518}
519
520
521/**
522 * Computes the depth of the userland stack.
523 */
524int dtrace_getustackdepth(void)
525{
526 return 0;
527}
528
529
530/**
531 * Get the current IPL/IRQL.
532 *
533 * @returns Current level.
534 */
535int dtrace_getipl(void)
536{
537#ifdef RT_ARCH_AMD64
538 /* CR8 is normally the same as IRQL / IPL on AMD64. */
539 return ASMGetCR8();
540#else
541 /* Just fake it on x86. */
542 return !ASMIntAreEnabled();
543#endif
544}
545
546
547/**
548 * Get current monotonic timestamp.
549 *
550 * @returns Timestamp, nano seconds.
551 */
552hrtime_t dtrace_gethrtime(void)
553{
554 return RTTimeNanoTS();
555}
556
557
558/**
559 * Get current walltime.
560 *
561 * @returns Timestamp, nano seconds.
562 */
563hrtime_t dtrace_gethrestime(void)
564{
565 /** @todo try get better resolution here somehow ... */
566 RTTIMESPEC Now;
567 return RTTimeSpecGetNano(RTTimeNow(&Now));
568}
569
570
571/**
572 * DTrace panic routine.
573 *
574 * @param pszFormat Panic message.
575 * @param va Arguments to the panic message.
576 */
577void dtrace_vpanic(const char *pszFormat, va_list va)
578{
579 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
580 RTAssertMsg2WeakV(pszFormat, va);
581 RTR0AssertPanicSystem();
582 for (;;)
583 {
584 ASMBreakpoint();
585 volatile char *pchCrash = (volatile char *)~(uintptr_t)0;
586 *pchCrash = '\0';
587 }
588}
589
590
591/**
592 * DTrace panic routine.
593 *
594 * @param pszFormat Panic message.
595 * @param ... Arguments to the panic message.
596 */
597void VBoxDtPanic(const char *pszFormat, ...)
598{
599 va_list va;
600 va_start(va, pszFormat);
601 dtrace_vpanic(pszFormat, va);
602 va_end(va);
603}
604
605
606/**
607 * DTrace kernel message routine.
608 *
609 * @param pszFormat Kernel message.
610 * @param ... Arguments to the panic message.
611 */
612void VBoxDtCmnErr(int iLevel, const char *pszFormat, ...)
613{
614 va_list va;
615 va_start(va, pszFormat);
616 SUPR0Printf("%N", pszFormat, va);
617 va_end(va);
618}
619
620
621/** uprintf implementation */
622void VBoxDtUPrintf(const char *pszFormat, ...)
623{
624 va_list va;
625 va_start(va, pszFormat);
626 VBoxDtUPrintfV(pszFormat, va);
627 va_end(va);
628}
629
630
631/** vuprintf implementation */
632void VBoxDtUPrintfV(const char *pszFormat, va_list va)
633{
634 SUPR0Printf("%N", pszFormat, va);
635}
636
637
638/* CRED implementation. */
639cred_t *VBoxDtGetCurrentCreds(void)
640{
641 PVBDTSTACKDATA pData = vboxDtGetStackData();
642 if (!pData->pCred)
643 {
644 struct VBoxDtCred *pCred;
645 int rc = RTMemAllocEx(sizeof(*pCred), 0, RTMEMALLOCEX_FLAGS_ANY_CTX, (void **)&pCred);
646 AssertFatalRC(rc);
647 pCred->cr_refs = 1;
648 /** @todo get the right creds on unix systems. */
649 pCred->cr_uid = 0;
650 pCred->cr_ruid = 0;
651 pCred->cr_suid = 0;
652 pCred->cr_gid = 0;
653 pCred->cr_rgid = 0;
654 pCred->cr_sgid = 0;
655 pCred->cr_zone = 0;
656 pData->pCred = pCred;
657 }
658
659 return pData->pCred;
660}
661
662
663/* crhold implementation */
664void VBoxDtCredHold(struct VBoxDtCred *pCred)
665{
666 int32_t cRefs = ASMAtomicIncS32(&pCred->cr_refs);
667 Assert(cRefs > 1);
668}
669
670
671/* crfree implementation */
672void VBoxDtCredFree(struct VBoxDtCred *pCred)
673{
674 int32_t cRefs = ASMAtomicDecS32(&pCred->cr_refs);
675 Assert(cRefs >= 0);
676 if (!cRefs)
677 RTMemFree(pCred);
678}
679
680/** Spinlock protecting the thread structures. */
681static RTSPINLOCK g_hThreadSpinlock = NIL_RTSPINLOCK;
682/** List of threads by usage age. */
683static RTLISTANCHOR g_ThreadAgeList;
684/** Hash table for looking up thread structures. */
685static struct VBoxDtThread *g_apThreadsHash[16384];
686/** Fake kthread_t structures.
687 * The size of this array is making horrible ASSUMPTIONS about the number of
688 * thread in the system that will be subjected to DTracing. */
689static struct VBoxDtThread g_aThreads[8192];
690
691
692static int vboxDtInitThreadDb(void)
693{
694 int rc = RTSpinlockCreate(&g_hThreadSpinlock);
695 if (RT_FAILURE(rc))
696 return rc;
697
698 RTListInit(&g_ThreadAgeList);
699 for (uint32_t i = 0; i < RT_ELEMENTS(g_aThreads); i++)
700 {
701 g_aThreads[i].hNative = NIL_RTNATIVETHREAD;
702 g_aThreads[i].uPid = NIL_RTPROCESS;
703 RTListPrepend(&g_ThreadAgeList, &g_aThreads[i].AgeEntry);
704 }
705
706 return VINF_SUCCESS;
707}
708
709
710static void vboxDtTermThreadDb(void)
711{
712 RTSpinlockDestroy(g_hThreadSpinlock);
713 g_hThreadSpinlock = NIL_RTSPINLOCK;
714 RTListInit(&g_ThreadAgeList);
715}
716
717
718/* curthread implementation, providing a fake kthread_t. */
719struct VBoxDtThread *VBoxDtGetCurrentThread(void)
720{
721 /*
722 * Once we've retrieved a thread, we hold on to it until the thread exits
723 * the VBoxDTrace module.
724 */
725 PVBDTSTACKDATA pData = vboxDtGetStackData();
726 if (pData->pThread)
727 {
728 AssertPtr(pData->pThread);
729 Assert(pData->pThread->hNative == RTThreadNativeSelf());
730 Assert(pData->pThread->uPid == RTProcSelf());
731 Assert(RTListIsEmpty(&pData->pThread->AgeEntry));
732 return pData->pThread;
733 }
734
735 /*
736 * Lookup the thread in the hash table.
737 */
738 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
739 RTPROCESS uPid = RTProcSelf();
740 uintptr_t iHash = (hNativeSelf * 2654435761) % RT_ELEMENTS(g_apThreadsHash);
741 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
742
743 RTSpinlockAcquireNoInts(g_hThreadSpinlock, &Tmp);
744
745 struct VBoxDtThread *pThread = g_apThreadsHash[iHash];
746 while (pThread)
747 {
748 if (pThread->hNative == hNativeSelf)
749 {
750 if (pThread->uPid != uPid)
751 {
752 /* Re-initialize the reused thread. */
753 pThread->uPid = uPid;
754 pThread->t_dtrace_vtime = 0;
755 pThread->t_dtrace_start = 0;
756 pThread->t_dtrace_stop = 0;
757 pThread->t_dtrace_scrpc = 0;
758 pThread->t_dtrace_astpc = 0;
759 pThread->t_predcache = 0;
760 }
761
762 /* Hold the thread in the on-stack data, making sure it does not
763 get reused till the thread leaves VBoxDTrace. */
764 RTListNodeRemove(&pThread->AgeEntry);
765 pData->pThread = pThread;
766
767 RTSpinlockReleaseNoInts(g_hThreadSpinlock, &Tmp);
768 return pThread;
769 }
770
771 pThread = pThread->pNext;
772 }
773
774 /*
775 * Unknown thread. Allocate a new entry, recycling unused or old ones.
776 */
777 pThread = RTListGetLast(&g_ThreadAgeList, struct VBoxDtThread, AgeEntry);
778 AssertFatal(pThread);
779 RTListNodeRemove(&pThread->AgeEntry);
780 if (pThread->hNative != NIL_RTNATIVETHREAD)
781 {
782 uintptr_t iHash2 = (pThread->hNative * 2654435761) % RT_ELEMENTS(g_apThreadsHash);
783 if (g_apThreadsHash[iHash2] == pThread)
784 g_apThreadsHash[iHash2] = pThread->pNext;
785 else
786 {
787 for (struct VBoxDtThread *pPrev = g_apThreadsHash[iHash2]; ; pPrev = pPrev->pNext)
788 {
789 AssertPtr(pPrev);
790 if (pPrev->pNext == pThread)
791 {
792 pPrev->pNext = pThread->pNext;
793 break;
794 }
795 }
796 }
797 }
798
799 /*
800 * Initialize the data.
801 */
802 pThread->t_dtrace_vtime = 0;
803 pThread->t_dtrace_start = 0;
804 pThread->t_dtrace_stop = 0;
805 pThread->t_dtrace_scrpc = 0;
806 pThread->t_dtrace_astpc = 0;
807 pThread->t_predcache = 0;
808 pThread->hNative = hNativeSelf;
809 pThread->uPid = uPid;
810
811 /*
812 * Add it to the hash as well as the on-stack data.
813 */
814 pThread->pNext = g_apThreadsHash[iHash];
815 g_apThreadsHash[iHash] = pThread->pNext;
816
817 pData->pThread = pThread;
818
819 RTSpinlockReleaseNoInts(g_hThreadSpinlock, &Tmp);
820 return pThread;
821}
822
823
824/**
825 * Called by the stack data destructor.
826 *
827 * @param pThread The thread to release.
828 *
829 */
830static void VBoxDtReleaseThread(struct VBoxDtThread *pThread)
831{
832 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
833 RTSpinlockAcquireNoInts(g_hThreadSpinlock, &Tmp);
834
835 RTListAppend(&g_ThreadAgeList, &pThread->AgeEntry);
836
837 RTSpinlockReleaseNoInts(g_hThreadSpinlock, &Tmp);
838}
839
840
841
842
843/*
844 *
845 * Virtual Memory / Resource Allocator.
846 * Virtual Memory / Resource Allocator.
847 * Virtual Memory / Resource Allocator.
848 *
849 */
850
851
852/** The number of bits per chunk.
853 * @remarks The 32 bytes are for heap headers and such like. */
854#define VBOXDTVMEMCHUNK_BITS ( ((_64K - 32 - sizeof(uint32_t) * 2) / sizeof(uint32_t)) * 32)
855
856/**
857 * Resource allocator chunk.
858 */
859typedef struct VBoxDtVMemChunk
860{
861 /** The ordinal (unbased) of the first item. */
862 uint32_t iFirst;
863 /** The current number of free items in this chunk. */
864 uint32_t cCurFree;
865 /** The allocation bitmap. */
866 uint32_t bm[VBOXDTVMEMCHUNK_BITS / 32];
867} VBOXDTVMEMCHUNK;
868/** Pointer to a resource allocator chunk. */
869typedef VBOXDTVMEMCHUNK *PVBOXDTVMEMCHUNK;
870
871
872
873/**
874 * Resource allocator instance.
875 */
876typedef struct VBoxDtVMem
877{
878 /** Spinlock protecting the data. */
879 RTSPINLOCK hSpinlock;
880 /** Magic value. */
881 uint32_t u32Magic;
882 /** The current number of free items in the chunks. */
883 uint32_t cCurFree;
884 /** The current number of chunks that we have allocated. */
885 uint32_t cCurChunks;
886 /** The configured resource base. */
887 uint32_t uBase;
888 /** The configured resource end (base included). */
889 uint32_t uEnd;
890 /** The size of the apChunks array. */
891 uint32_t cMaxChunks;
892 /** Array of chunk pointers.
893 * (The size is determined at creation.) */
894 PVBOXDTVMEMCHUNK apChunks[1];
895} VBOXDTVMEM;
896/** Pointer to a resource allocator instance. */
897typedef VBOXDTVMEM *PVBOXDTVMEM;
898
899/** Magic value for the VBOXDTVMEM structure. */
900#define VBOXDTVMEM_MAGIC RT_MAKE_U32_FROM_U8('V', 'M', 'e', 'm')
901
902
903/* vmem_create implementation */
904struct VBoxDtVMem *VBoxDtVMemCreate(const char *pszName, void *pvBase, size_t cb, size_t cbUnit,
905 PFNRT pfnAlloc, PFNRT pfnFree, struct VBoxDtVMem *pSrc,
906 size_t cbQCacheMax, uint32_t fFlags)
907{
908 /*
909 * Assert preconditions of this implementation.
910 */
911 AssertMsgReturn((uintptr_t)pvBase <= UINT32_MAX, ("%p\n", pvBase), NULL);
912 AssertMsgReturn((uintptr_t)pvBase + cb - 1 <= UINT32_MAX, ("%p %zu\n", pvBase, cb), NULL);
913 AssertMsgReturn(cbUnit == 1, ("%zu\n", cbUnit), NULL);
914 AssertReturn(!pfnAlloc, NULL);
915 AssertReturn(!pfnFree, NULL);
916 AssertReturn(!pSrc, NULL);
917 AssertReturn(!cbQCacheMax, NULL);
918 AssertReturn(fFlags & VM_SLEEP, NULL);
919 AssertReturn(fFlags & VMC_IDENTIFIER, NULL);
920
921 /*
922 * Allocate the instance.
923 */
924 uint32_t cChunks = (uint32_t)(cb - (uintptr_t)pvBase);
925 cChunks += VBOXDTVMEMCHUNK_BITS - 1;
926 cChunks /= VBOXDTVMEMCHUNK_BITS;
927 PVBOXDTVMEM pThis = (PVBOXDTVMEM)RTMemAllocZ(RT_OFFSETOF(VBOXDTVMEM, apChunks[cChunks]));
928 if (!pThis)
929 return NULL;
930 int rc = RTSpinlockCreate(&pThis->hSpinlock);
931 if (RT_FAILURE(rc))
932 {
933 RTMemFree(pThis);
934 return NULL;
935 }
936 pThis->u32Magic = VBOXDTVMEM_MAGIC;
937 pThis->cCurFree = 0;
938 pThis->cCurChunks = 0;
939 pThis->uBase = (uint32_t)(uintptr_t)pvBase;
940 pThis->uEnd = (uint32_t)cb;
941 pThis->cMaxChunks = cChunks;
942
943 return pThis;
944}
945
946
947/* vmem_destroy implementation */
948void VBoxDtVMemDestroy(struct VBoxDtVMem *pThis)
949{
950 if (!pThis)
951 return;
952 AssertPtrReturnVoid(pThis);
953 AssertReturnVoid(pThis->u32Magic == VBOXDTVMEM_MAGIC);
954
955 /*
956 * Invalidate the instance.
957 */
958 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
959 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp); /* paranoia */
960 pThis->u32Magic = 0;
961 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
962 RTSpinlockDestroy(pThis->hSpinlock);
963
964 /*
965 * Free the chunks, then the instance.
966 */
967 uint32_t iChunk = pThis->cCurChunks;
968 while (iChunk-- > 0)
969 {
970 RTMemFree(pThis->apChunks[iChunk]);
971 pThis->apChunks[iChunk] = NULL;
972 }
973 RTMemFree(pThis);
974}
975
976
977/* vmem_alloc implementation */
978void *VBoxDtVMemAlloc(struct VBoxDtVMem *pThis, size_t cbMem, uint32_t fFlags)
979{
980 /*
981 * Validate input.
982 */
983 AssertReturn(fFlags & VM_BESTFIT, NULL);
984 AssertReturn(fFlags & VM_SLEEP, NULL);
985 AssertReturn(cbMem == 1, NULL);
986 AssertPtrReturn(pThis, NULL);
987 AssertReturn(pThis->u32Magic == VBOXDTVMEM_MAGIC, NULL);
988
989 /*
990 * Allocation loop.
991 */
992 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
993 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
994 for (;;)
995 {
996 PVBOXDTVMEMCHUNK pChunk;
997 uint32_t const cChunks = pThis->cCurChunks;
998
999 if (RT_LIKELY(pThis->cCurFree > 0))
1000 {
1001 for (uint32_t iChunk = 0; iChunk < cChunks; iChunk++)
1002 {
1003 pChunk = pThis->apChunks[iChunk];
1004 if (pChunk->cCurFree > 0)
1005 {
1006 int iBit = ASMBitFirstClear(pChunk->bm, VBOXDTVMEMCHUNK_BITS);
1007 AssertMsgReturnStmt(iBit >= 0 && (unsigned)iBit < VBOXDTVMEMCHUNK_BITS, ("%d\n", iBit),
1008 RTSpinlockRelease(pThis->hSpinlock, &Tmp),
1009 NULL);
1010
1011 ASMBitSet(pChunk->bm, iBit);
1012 pChunk->cCurFree--;
1013 pThis->cCurFree--;
1014
1015 uint32_t iRet = (uint32_t)iBit + pChunk->iFirst + pThis->uBase;
1016 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1017 return (void *)(uintptr_t)iRet;
1018 }
1019 }
1020 AssertFailedBreak();
1021 }
1022
1023 /* Out of resources? */
1024 if (cChunks >= pThis->cMaxChunks)
1025 break;
1026
1027 /*
1028 * Allocate another chunk.
1029 */
1030 uint32_t const iFirstBit = cChunks > 0 ? pThis->apChunks[cChunks - 1]->iFirst + VBOXDTVMEMCHUNK_BITS : 0;
1031 uint32_t const cFreeBits = cChunks + 1 == pThis->cMaxChunks
1032 ? pThis->uEnd - pThis->uBase - iFirstBit
1033 : VBOXDTVMEMCHUNK_BITS;
1034 Assert(cFreeBits <= VBOXDTVMEMCHUNK_BITS);
1035
1036 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1037
1038 pChunk = (PVBOXDTVMEMCHUNK)RTMemAllocZ(sizeof(*pChunk));
1039 if (!pChunk)
1040 return NULL;
1041
1042 pChunk->iFirst = iFirstBit;
1043 pChunk->cCurFree = cFreeBits;
1044 if (cFreeBits != VBOXDTVMEMCHUNK_BITS)
1045 {
1046 /* lazy bird. */
1047 uint32_t iBit = cFreeBits;
1048 while (iBit < VBOXDTVMEMCHUNK_BITS)
1049 {
1050 ASMBitSet(pChunk->bm, iBit);
1051 iBit++;
1052 }
1053 }
1054
1055 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1056
1057 /*
1058 * Insert the new chunk. If someone raced us here, we'll drop it to
1059 * avoid wasting resources.
1060 */
1061 if (pThis->cCurChunks == cChunks)
1062 {
1063 pThis->apChunks[cChunks] = pChunk;
1064 pThis->cCurFree += pChunk->cCurFree;
1065 pThis->cCurChunks += 1;
1066 }
1067 else
1068 {
1069 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1070 RTMemFree(pChunk);
1071 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1072 }
1073 }
1074 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1075
1076 return NULL;
1077}
1078
1079/* vmem_free implementation */
1080void VBoxDtVMemFree(struct VBoxDtVMem *pThis, void *pvMem, size_t cbMem)
1081{
1082 /*
1083 * Validate input.
1084 */
1085 AssertReturnVoid(cbMem == 1);
1086 AssertPtrReturnVoid(pThis);
1087 AssertReturnVoid(pThis->u32Magic == VBOXDTVMEM_MAGIC);
1088
1089 AssertReturnVoid((uintptr_t)pvMem < UINT32_MAX);
1090 uint32_t uMem = (uint32_t)(uintptr_t)pvMem;
1091 AssertReturnVoid(uMem >= pThis->uBase);
1092 AssertReturnVoid(uMem < pThis->uEnd);
1093
1094 uMem -= pThis->uBase;
1095
1096 /*
1097 * Free it.
1098 */
1099 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1100 RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp);
1101 uint32_t const iChunk = uMem / VBOXDTVMEMCHUNK_BITS;
1102 if (iChunk < pThis->cCurChunks)
1103 {
1104 PVBOXDTVMEMCHUNK pChunk = pThis->apChunks[iChunk];
1105 uint32_t iBit = uMem - pChunk->iFirst;
1106 AssertReturnVoidStmt(iBit < VBOXDTVMEMCHUNK_BITS, RTSpinlockRelease(pThis->hSpinlock, &Tmp));
1107 AssertReturnVoidStmt(ASMBitTestAndClear(pChunk->bm, iBit), RTSpinlockRelease(pThis->hSpinlock, &Tmp));
1108
1109 pChunk->cCurFree++;
1110 pThis->cCurFree++;
1111 }
1112
1113 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1114}
1115
1116
1117/*
1118 *
1119 * Memory Allocators.
1120 * Memory Allocators.
1121 * Memory Allocators.
1122 *
1123 */
1124
1125
1126/* kmem_alloc implementation */
1127void *VBoxDtKMemAlloc(size_t cbMem, uint32_t fFlags)
1128{
1129 void *pvMem;
1130 int rc = RTMemAllocEx(cbMem, 0, fFlags & KM_NOSLEEP ? RTMEMALLOCEX_FLAGS_ANY_CTX : 0, &pvMem);
1131 AssertRCReturn(rc, NULL);
1132 AssertPtr(pvMem);
1133 return pvMem;
1134}
1135
1136
1137/* kmem_zalloc implementation */
1138void *VBoxDtKMemAllocZ(size_t cbMem, uint32_t fFlags)
1139{
1140 void *pvMem;
1141 int rc = RTMemAllocEx(cbMem, 0,
1142 (fFlags & KM_NOSLEEP ? RTMEMALLOCEX_FLAGS_ANY_CTX : 0) | RTMEMALLOCEX_FLAGS_ZEROED,
1143 &pvMem);
1144 AssertRCReturn(rc, NULL);
1145 AssertPtr(pvMem);
1146 return pvMem;
1147}
1148
1149
1150/* kmem_free implementation */
1151void VBoxDtKMemFree(void *pvMem, size_t cbMem)
1152{
1153 RTMemFreeEx(pvMem, cbMem);
1154}
1155
1156
1157/**
1158 * Memory cache mockup structure.
1159 * No slab allocator here!
1160 */
1161struct VBoxDtMemCache
1162{
1163 uint32_t u32Magic;
1164 size_t cbBuf;
1165 size_t cbAlign;
1166};
1167
1168
1169/* Limited kmem_cache_create implementation. */
1170struct VBoxDtMemCache *VBoxDtKMemCacheCreate(const char *pszName, size_t cbBuf, size_t cbAlign,
1171 PFNRT pfnCtor, PFNRT pfnDtor, PFNRT pfnReclaim,
1172 void *pvUser, void *pvVM, uint32_t fFlags)
1173{
1174 /*
1175 * Check the input.
1176 */
1177 AssertReturn(cbBuf > 0 && cbBuf < _1G, NULL);
1178 AssertReturn(RT_IS_POWER_OF_TWO(cbAlign), NULL);
1179 AssertReturn(!pfnCtor, NULL);
1180 AssertReturn(!pfnDtor, NULL);
1181 AssertReturn(!pfnReclaim, NULL);
1182 AssertReturn(!pvUser, NULL);
1183 AssertReturn(!pvVM, NULL);
1184 AssertReturn(!fFlags, NULL);
1185
1186 /*
1187 * Create a parameter container. Don't bother with anything fancy here yet,
1188 * just get something working.
1189 */
1190 struct VBoxDtMemCache *pThis = (struct VBoxDtMemCache *)RTMemAlloc(sizeof(*pThis));
1191 if (!pThis)
1192 return NULL;
1193
1194 pThis->cbAlign = cbAlign;
1195 pThis->cbBuf = cbBuf;
1196 return pThis;
1197}
1198
1199
1200/* Limited kmem_cache_destroy implementation. */
1201void VBoxDtKMemCacheDestroy(struct VBoxDtMemCache *pThis)
1202{
1203 RTMemFree(pThis);
1204}
1205
1206
1207/* kmem_cache_alloc implementation. */
1208void *VBoxDtKMemCacheAlloc(struct VBoxDtMemCache *pThis, uint32_t fFlags)
1209{
1210 void *pvMem;
1211 int rc = RTMemAllocEx(pThis->cbBuf,
1212 pThis->cbAlign,
1213 (fFlags & KM_NOSLEEP ? RTMEMALLOCEX_FLAGS_ANY_CTX : 0) | RTMEMALLOCEX_FLAGS_ZEROED,
1214 &pvMem);
1215 AssertRCReturn(rc, NULL);
1216 AssertPtr(pvMem);
1217 return pvMem;
1218}
1219
1220
1221/* kmem_cache_free implementation. */
1222void VBoxDtKMemCacheFree(struct VBoxDtMemCache *pThis, void *pvMem)
1223{
1224 RTMemFreeEx(pvMem, pThis->cbBuf);
1225}
1226
1227
1228/*
1229 *
1230 * Mutex Sempahore Wrappers.
1231 *
1232 */
1233
1234
1235/** Initializes a mutex. */
1236int VBoxDtMutexInit(struct VBoxDtMutex *pMtx)
1237{
1238 AssertReturn(pMtx != &g_DummyMtx, -1);
1239 AssertPtr(pMtx);
1240
1241 pMtx->hOwner = NIL_RTNATIVETHREAD;
1242 pMtx->hMtx = NIL_RTSEMMUTEX;
1243 int rc = RTSemMutexCreate(&pMtx->hMtx);
1244 if (RT_SUCCESS(rc))
1245 return 0;
1246 return -1;
1247}
1248
1249
1250/** Deletes a mutex. */
1251void VBoxDtMutexDelete(struct VBoxDtMutex *pMtx)
1252{
1253 AssertReturnVoid(pMtx != &g_DummyMtx);
1254 AssertPtr(pMtx);
1255 if (pMtx->hMtx == NIL_RTSEMMUTEX || pMtx->hMtx == NULL)
1256 return;
1257
1258 Assert(pMtx->hOwner == NIL_RTNATIVETHREAD);
1259 int rc = RTSemMutexDestroy(pMtx->hMtx); AssertRC(rc);
1260 pMtx->hMtx = NIL_RTSEMMUTEX;
1261}
1262
1263
1264/* mutex_enter implementation */
1265void VBoxDtMutexEnter(struct VBoxDtMutex *pMtx)
1266{
1267 AssertPtr(pMtx);
1268 if (pMtx == &g_DummyMtx)
1269 return;
1270
1271 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
1272
1273 int rc = RTSemMutexRequest(pMtx->hMtx, RT_INDEFINITE_WAIT);
1274 AssertFatalRC(rc);
1275
1276 Assert(pMtx->hOwner == NIL_RTNATIVETHREAD);
1277 pMtx->hOwner = hSelf;
1278}
1279
1280
1281/* mutex_exit implementation */
1282void VBoxDtMutexExit(struct VBoxDtMutex *pMtx)
1283{
1284 AssertPtr(pMtx);
1285 if (pMtx == &g_DummyMtx)
1286 return;
1287
1288 Assert(pMtx->hOwner == RTThreadNativeSelf());
1289
1290 pMtx->hOwner = NIL_RTNATIVETHREAD;
1291 int rc = RTSemMutexRelease(pMtx->hMtx);
1292 AssertFatalRC(rc);
1293}
1294
1295
1296/* MUTEX_HELD implementation */
1297bool VBoxDtMutexIsOwner(struct VBoxDtMutex *pMtx)
1298{
1299 AssertPtrReturn(pMtx, false);
1300 if (pMtx == &g_DummyMtx)
1301 return true;
1302 return pMtx->hOwner == RTThreadNativeSelf();
1303}
1304
1305
1306
1307/*
1308 *
1309 * Helpers for handling VTG structures.
1310 * Helpers for handling VTG structures.
1311 * Helpers for handling VTG structures.
1312 *
1313 */
1314
1315
1316
1317/**
1318 * Converts an attribute from VTG description speak to DTrace.
1319 *
1320 * @param pDtAttr The DTrace attribute (dst).
1321 * @param pVtgAttr The VTG attribute descriptor (src).
1322 */
1323static void vboxDtVtgConvAttr(dtrace_attribute_t *pDtAttr, PCVTGDESCATTR pVtgAttr)
1324{
1325 pDtAttr->dtat_name = pVtgAttr->u8Code - 1;
1326 pDtAttr->dtat_data = pVtgAttr->u8Data - 1;
1327 pDtAttr->dtat_class = pVtgAttr->u8DataDep - 1;
1328}
1329
1330/**
1331 * Gets a string from the string table.
1332 *
1333 * @returns Pointer to the string.
1334 * @param pVtgHdr The VTG object header.
1335 * @param offStrTab The string table offset.
1336 */
1337static const char *vboxDtVtgGetString(PVTGOBJHDR pVtgHdr, uint32_t offStrTab)
1338{
1339 Assert(offStrTab < pVtgHdr->cbStrTab);
1340 return &pVtgHdr->pachStrTab[offStrTab];
1341}
1342
1343
1344
1345/*
1346 *
1347 * DTrace Provider Interface.
1348 * DTrace Provider Interface.
1349 * DTrace Provider Interface.
1350 *
1351 */
1352
1353
1354/**
1355 * @callback_method_impl{dtrace_pops_t,dtps_provide}
1356 */
1357static void vboxDtPOps_Provide(void *pvProv, const dtrace_probedesc_t *pDtProbeDesc)
1358{
1359 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1360 PVTGPROBELOC pProbeLoc = pProv->pHdr->paProbLocs;
1361 PVTGPROBELOC pProbeLocEnd = pProv->pHdr->paProbLocsEnd;
1362 dtrace_provider_id_t idProvider = pProv->TracerData.DTrace.idProvider;
1363 size_t const cbFnNmBuf = _4K + _1K;
1364 char *pszFnNmBuf;
1365 uint16_t idxProv;
1366
1367 if (pDtProbeDesc)
1368 return; /* We don't generate probes, so never mind these requests. */
1369
1370 if (pProv->TracerData.DTrace.fZombie)
1371 return;
1372
1373 if (pProv->TracerData.DTrace.cProvidedProbes >= pProbeLocEnd - pProbeLoc)
1374 return;
1375
1376 /* Need a buffer for extracting the function names and mangling them in
1377 case of collision. */
1378 pszFnNmBuf = (char *)RTMemAlloc(cbFnNmBuf);
1379 if (!pszFnNmBuf)
1380 return;
1381
1382 /*
1383 * Itereate the probe location list and register all probes related to
1384 * this provider.
1385 */
1386 idxProv = (uint16_t)(&pProv->pHdr->paProviders[0] - pProv->pDesc);
1387 while ((uintptr_t)pProbeLoc < (uintptr_t)pProbeLocEnd)
1388 {
1389 PVTGDESCPROBE pProbeDesc = (PVTGDESCPROBE)pProbeLoc->pbProbe;
1390 if ( pProbeDesc->idxProvider == idxProv
1391 && pProbeLoc->idProbe == UINT32_MAX)
1392 {
1393 /* The function name normally needs to be stripped since we're
1394 using C++ compilers for most of the code. ASSUMES nobody are
1395 brave/stupid enough to use function pointer returns without
1396 typedef'ing properly them. */
1397 const char *pszPrbName = vboxDtVtgGetString(pProv->pHdr, pProbeDesc->offName);
1398 const char *pszFunc = pProbeLoc->pszFunction;
1399 const char *psz = strchr(pProbeLoc->pszFunction, '(');
1400 size_t cch;
1401 if (psz)
1402 {
1403 /* skip blanks preceeding the parameter parenthesis. */
1404 while ( (uintptr_t)psz > (uintptr_t)pProbeLoc->pszFunction
1405 && RT_C_IS_BLANK(psz[-1]))
1406 psz--;
1407
1408 /* Find the start of the function name. */
1409 pszFunc = psz - 1;
1410 while ((uintptr_t)pszFunc > (uintptr_t)pProbeLoc->pszFunction)
1411 {
1412 char ch = pszFunc[-1];
1413 if (!RT_C_IS_ALNUM(ch) && ch != '_' && ch != ':')
1414 break;
1415 pszFunc--;
1416 }
1417 cch = psz - pszFunc;
1418 }
1419 else
1420 cch = strlen(pszFunc);
1421 RTStrCopyEx(pszFnNmBuf, cbFnNmBuf, pszFunc, cch);
1422
1423 /* Look up the probe, if we have one in the same function, mangle
1424 the function name a little to avoid having to deal with having
1425 multiple location entries with the same probe ID. (lazy bird) */
1426 Assert(pProbeLoc->idProbe == UINT32_MAX);
1427 if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) != DTRACE_IDNONE)
1428 {
1429 RTStrPrintf(pszFnNmBuf+cch, cbFnNmBuf - cch, "-%u", pProbeLoc->uLine);
1430 if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) != DTRACE_IDNONE)
1431 {
1432 unsigned iOrd = 2;
1433 while (iOrd < 128)
1434 {
1435 RTStrPrintf(pszFnNmBuf+cch, cbFnNmBuf - cch, "-%u-%u", pProbeLoc->uLine, iOrd);
1436 if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) == DTRACE_IDNONE)
1437 break;
1438 iOrd++;
1439 }
1440 if (iOrd >= 128)
1441 {
1442 LogRel(("VBoxDrv: More than 128 duplicate probe location instances in file %s at line %u, function %s [%s], probe %s\n",
1443 pProbeLoc->pszFile, pProbeLoc->uLine, pProbeLoc->pszFunction, pszFnNmBuf, pszPrbName));
1444 continue;
1445 }
1446 }
1447 }
1448
1449 /* Create the probe. */
1450 AssertCompile(sizeof(pProbeLoc->idProbe) == sizeof(dtrace_id_t));
1451 pProbeLoc->idProbe = dtrace_probe_create(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName,
1452 1 /*aframes*/, pProbeLoc);
1453 pProv->TracerData.DTrace.cProvidedProbes++;
1454 }
1455
1456 pProbeLoc++;
1457 }
1458
1459 RTMemFree(pszFnNmBuf);
1460}
1461
1462
1463/**
1464 * @callback_method_impl{dtrace_pops_t,dtps_enable}
1465 */
1466static int vboxDtPOps_Enable(void *pvProv, dtrace_id_t idProbe, void *pvProbe)
1467{
1468 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1469 if (!pProv->TracerData.DTrace.fZombie)
1470 {
1471 PVTGPROBELOC pProbeLoc = (PVTGPROBELOC)pvProbe;
1472 PVTGDESCPROBE pProbeDesc = (PVTGDESCPROBE)pProbeLoc->pbProbe;
1473
1474 if (!pProbeLoc->fEnabled)
1475 {
1476 pProbeLoc->fEnabled = 1;
1477 if (ASMAtomicIncU32(&pProbeDesc->u32User) == 1)
1478 pProv->pHdr->pafProbeEnabled[pProbeDesc->idxEnabled] = 1;
1479 }
1480 }
1481
1482 return 0;
1483}
1484
1485
1486/**
1487 * @callback_method_impl{dtrace_pops_t,dtps_disable}
1488 */
1489static void vboxDtPOps_Disable(void *pvProv, dtrace_id_t idProbe, void *pvProbe)
1490{
1491 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1492 if (!pProv->TracerData.DTrace.fZombie)
1493 {
1494 PVTGPROBELOC pProbeLoc = (PVTGPROBELOC)pvProbe;
1495 PVTGDESCPROBE pProbeDesc = (PVTGDESCPROBE)pProbeLoc->pbProbe;
1496
1497 if (pProbeLoc->fEnabled)
1498 {
1499 pProbeLoc->fEnabled = 0;
1500 if (ASMAtomicDecU32(&pProbeDesc->u32User) == 0)
1501 pProv->pHdr->pafProbeEnabled[pProbeDesc->idxEnabled] = 1;
1502 }
1503 }
1504}
1505
1506
1507/**
1508 * @callback_method_impl{dtrace_pops_t,dtps_getargdesc}
1509 */
1510static void vboxDtPOps_GetArgDesc(void *pvProv, dtrace_id_t idProbe, void *pvProbe,
1511 dtrace_argdesc_t *pArgDesc)
1512{
1513 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1514 unsigned uArg = pArgDesc->dtargd_ndx;
1515
1516 if (!pProv->TracerData.DTrace.fZombie)
1517 {
1518 PVTGPROBELOC pProbeLoc = (PVTGPROBELOC)pvProbe;
1519 PVTGDESCPROBE pProbeDesc = (PVTGDESCPROBE)pProbeLoc->pbProbe;
1520 PVTGDESCARGLIST pArgList = (PVTGDESCARGLIST)((uintptr_t)pProv->pHdr->paArgLists + pProbeDesc->offArgList);
1521
1522 Assert(pProbeDesc->offArgList < pProv->pHdr->cbArgLists);
1523 if (pArgList->cArgs > uArg)
1524 {
1525 const char *pszType = vboxDtVtgGetString(pProv->pHdr, pArgList->aArgs[uArg].offType);
1526 size_t cchType = strlen(pszType);
1527 if (cchType < sizeof(pArgDesc->dtargd_native))
1528 {
1529 memcpy(pArgDesc->dtargd_native, pszType, cchType + 1);
1530 /** @todo mapping */
1531 return;
1532 }
1533 }
1534 }
1535
1536 pArgDesc->dtargd_ndx = DTRACE_ARGNONE;
1537}
1538
1539
1540/**
1541 * @callback_method_impl{dtrace_pops_t,dtps_getargval}
1542 */
1543static uint64_t vboxDtPOps_GetArgVal(void *pvProv, dtrace_id_t idProbe, void *pvProbe,
1544 int iArg, int cFrames)
1545{
1546 PVBDTSTACKDATA pData = vboxDtGetStackData();
1547 AssertReturn(iArg >= 5, UINT64_MAX);
1548
1549 if (pData->enmCaller == kVBoxDtCaller_ProbeFireKernel)
1550 return pData->u.ProbeFireKernel.pauStackArgs[iArg - 5];
1551
1552 if (pData->enmCaller == kVBoxDtCaller_ProbeFireUser)
1553 {
1554 PCSUPDRVTRACERUSRCTX pCtx = pData->u.ProbeFireUser.pCtx;
1555 if (pCtx->cBits == 32)
1556 {
1557 if ((unsigned)iArg < RT_ELEMENTS(pCtx->u.X86.aArgs))
1558 return pCtx->u.X86.aArgs[iArg];
1559 }
1560 else if (pCtx->cBits == 64)
1561 {
1562 if ((unsigned)iArg < RT_ELEMENTS(pCtx->u.Amd64.aArgs))
1563 return pCtx->u.Amd64.aArgs[iArg];
1564 }
1565 else
1566 AssertFailed();
1567 }
1568
1569 return UINT64_MAX;
1570}
1571
1572
1573/**
1574 * @callback_method_impl{dtrace_pops_t,dtps_destroy}
1575 */
1576static void vboxDtPOps_Destroy(void *pvProv, dtrace_id_t idProbe, void *pvProbe)
1577{
1578 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1579 if (!pProv->TracerData.DTrace.fZombie)
1580 {
1581 PVTGPROBELOC pProbeLoc = (PVTGPROBELOC)pvProbe;
1582 Assert(!pProbeLoc->fEnabled);
1583 Assert(pProbeLoc->idProbe == idProbe); NOREF(idProbe);
1584 pProbeLoc->idProbe = UINT32_MAX;
1585 }
1586 pProv->TracerData.DTrace.cProvidedProbes--;
1587}
1588
1589
1590
1591/**
1592 * DTrace provider method table.
1593 */
1594static const dtrace_pops_t g_vboxDtVtgProvOps =
1595{
1596 /* .dtps_provide = */ vboxDtPOps_Provide,
1597 /* .dtps_provide_module = */ NULL,
1598 /* .dtps_enable = */ vboxDtPOps_Enable,
1599 /* .dtps_disable = */ vboxDtPOps_Disable,
1600 /* .dtps_suspend = */ NULL,
1601 /* .dtps_resume = */ NULL,
1602 /* .dtps_getargdesc = */ vboxDtPOps_GetArgDesc,
1603 /* .dtps_getargval = */ vboxDtPOps_GetArgVal,
1604 /* .dtps_usermode = */ NULL,
1605 /* .dtps_destroy = */ vboxDtPOps_Destroy
1606};
1607
1608
1609
1610
1611/*
1612 *
1613 * Support Driver Tracer Interface.
1614 * Support Driver Tracer Interface.
1615 * Support Driver Tracer Interface.
1616 *
1617 */
1618
1619
1620
1621/**
1622 * interface_method_impl{SUPDRVTRACERREG,pfnProbeFireUser}
1623 */
1624static DECLCALLBACK(void) vbdt_ProbeFireKernel(struct VTGPROBELOC *pVtgProbeLoc, uintptr_t uArg0, uintptr_t uArg1, uintptr_t uArg2,
1625 uintptr_t uArg3, uintptr_t uArg4)
1626{
1627 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_ProbeFireKernel);
1628
1629 pStackData->u.ProbeFireKernel.uCaller = (uintptr_t)ASMReturnAddress();
1630 pStackData->u.ProbeFireKernel.pauStackArgs = &uArg4 + 1;
1631 dtrace_probe(pVtgProbeLoc->idProbe, uArg0, uArg1, uArg2, uArg3, uArg4);
1632
1633 VBDT_CLEAR_STACK_DATA();
1634 return ;
1635}
1636
1637
1638/**
1639 * interface_method_impl{SUPDRVTRACERREG,pfnProbeFireUser}
1640 */
1641static DECLCALLBACK(void) vbdt_ProbeFireUser(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, PCSUPDRVTRACERUSRCTX pCtx)
1642{
1643 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_ProbeFireUser);
1644
1645 pStackData->u.ProbeFireUser.pCtx = pCtx;
1646 if (pCtx->cBits == 32)
1647 dtrace_probe(pCtx->idProbe,
1648 pCtx->u.X86.aArgs[0],
1649 pCtx->u.X86.aArgs[1],
1650 pCtx->u.X86.aArgs[2],
1651 pCtx->u.X86.aArgs[3],
1652 pCtx->u.X86.aArgs[4]);
1653 else if (pCtx->cBits == 64)
1654 dtrace_probe(pCtx->idProbe,
1655 pCtx->u.Amd64.aArgs[0],
1656 pCtx->u.Amd64.aArgs[1],
1657 pCtx->u.Amd64.aArgs[2],
1658 pCtx->u.Amd64.aArgs[3],
1659 pCtx->u.Amd64.aArgs[4]);
1660 else
1661 AssertFailed();
1662
1663 VBDT_CLEAR_STACK_DATA();
1664}
1665
1666
1667/**
1668 * interface_method_impl{SUPDRVTRACERREG,pfnTracerOpen}
1669 */
1670static DECLCALLBACK(int) vbdt_TracerOpen(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, uint32_t uCookie, uintptr_t uArg,
1671 uintptr_t *puSessionData)
1672{
1673 if (uCookie != RT_MAKE_U32_FROM_U8('V', 'B', 'D', 'T'))
1674 return VERR_INVALID_MAGIC;
1675 if (uArg)
1676 return VERR_INVALID_PARAMETER;
1677
1678 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1679
1680 int rc = dtrace_open((dtrace_state_t **)puSessionData, VBoxDtGetCurrentCreds());
1681
1682 VBDT_CLEAR_STACK_DATA();
1683 return RTErrConvertFromErrno(rc);
1684}
1685
1686
1687/**
1688 * interface_method_impl{SUPDRVTRACERREG,pfnTracerClose}
1689 */
1690static DECLCALLBACK(int) vbdt_TracerIoCtl(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, uintptr_t uSessionData,
1691 uintptr_t uCmd, uintptr_t uArg, int32_t *piRetVal)
1692{
1693 AssertPtrReturn(uSessionData, VERR_INVALID_POINTER);
1694 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1695
1696 int rc = dtrace_ioctl((dtrace_state_t *)uSessionData, (intptr_t)uCmd, (intptr_t)uArg, piRetVal);
1697
1698 VBDT_CLEAR_STACK_DATA();
1699 return VINF_SUCCESS;
1700}
1701
1702
1703/**
1704 * interface_method_impl{SUPDRVTRACERREG,pfnTracerClose}
1705 */
1706static DECLCALLBACK(void) vbdt_TracerClose(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, uintptr_t uSessionData)
1707{
1708 AssertPtrReturnVoid(uSessionData);
1709 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1710
1711 dtrace_close((dtrace_state_t *)uSessionData);
1712
1713 VBDT_CLEAR_STACK_DATA();
1714}
1715
1716
1717/**
1718 * interface_method_impl{SUPDRVTRACERREG,pfnProviderRegister}
1719 */
1720static DECLCALLBACK(int) vbdt_ProviderRegister(PCSUPDRVTRACERREG pThis, PSUPDRVVDTPROVIDERCORE pCore)
1721{
1722 AssertReturn(pCore->TracerData.DTrace.idProvider == UINT32_MAX || pCore->TracerData.DTrace.idProvider == 0,
1723 VERR_INTERNAL_ERROR_3);
1724 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1725
1726 PVTGDESCPROVIDER pDesc = pCore->pDesc;
1727 dtrace_pattr_t DtAttrs;
1728 vboxDtVtgConvAttr(&DtAttrs.dtpa_provider, &pDesc->AttrSelf);
1729 vboxDtVtgConvAttr(&DtAttrs.dtpa_mod, &pDesc->AttrModules);
1730 vboxDtVtgConvAttr(&DtAttrs.dtpa_func, &pDesc->AttrFunctions);
1731 vboxDtVtgConvAttr(&DtAttrs.dtpa_name, &pDesc->AttrNames);
1732 vboxDtVtgConvAttr(&DtAttrs.dtpa_args, &pDesc->AttrArguments);
1733
1734 dtrace_provider_id_t idProvider;
1735 int rc = dtrace_register(pCore->pszName,
1736 &DtAttrs,
1737 DTRACE_PRIV_KERNEL,
1738 NULL /* cred */,
1739 &g_vboxDtVtgProvOps,
1740 pCore,
1741 &idProvider);
1742 if (!rc)
1743 {
1744 Assert(idProvider != UINT32_MAX && idProvider != 0);
1745 pCore->TracerData.DTrace.idProvider = idProvider;
1746 Assert(pCore->TracerData.DTrace.idProvider == idProvider);
1747 rc = VINF_SUCCESS;
1748 }
1749 else
1750 rc = RTErrConvertFromErrno(rc);
1751
1752 VBDT_CLEAR_STACK_DATA();
1753 return rc;
1754}
1755
1756
1757/**
1758 * interface_method_impl{SUPDRVTRACERREG,pfnProviderDeregister}
1759 */
1760static DECLCALLBACK(int) vbdt_ProviderDeregister(PCSUPDRVTRACERREG pThis, PSUPDRVVDTPROVIDERCORE pCore)
1761{
1762 uint32_t idProvider = pCore->TracerData.DTrace.idProvider;
1763 AssertReturn(idProvider != UINT32_MAX && idProvider != 0, VERR_INTERNAL_ERROR_4);
1764 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1765
1766 dtrace_invalidate(idProvider);
1767 int rc = dtrace_unregister(idProvider);
1768 if (!rc)
1769 {
1770 pCore->TracerData.DTrace.idProvider = UINT32_MAX;
1771 rc = VINF_SUCCESS;
1772 }
1773 else
1774 {
1775 AssertMsg(rc == EBUSY, ("%d\n", rc));
1776 rc = VERR_TRY_AGAIN;
1777 }
1778
1779 VBDT_CLEAR_STACK_DATA();
1780 return rc;
1781}
1782
1783
1784/**
1785 * interface_method_impl{SUPDRVTRACERREG,pfnProviderDeregisterZombie}
1786 */
1787static DECLCALLBACK(int) vbdt_ProviderDeregisterZombie(PCSUPDRVTRACERREG pThis, PSUPDRVVDTPROVIDERCORE pCore)
1788{
1789 uint32_t idProvider = pCore->TracerData.DTrace.idProvider;
1790 AssertReturn(idProvider != UINT32_MAX && idProvider != 0, VERR_INTERNAL_ERROR_4);
1791 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1792
1793 int rc = dtrace_unregister(idProvider);
1794 if (!rc)
1795 {
1796 pCore->TracerData.DTrace.idProvider = UINT32_MAX;
1797 rc = VINF_SUCCESS;
1798 }
1799 else
1800 {
1801 AssertMsg(rc == EBUSY, ("%d\n", rc));
1802 rc = VERR_TRY_AGAIN;
1803 }
1804
1805 VBDT_CLEAR_STACK_DATA();
1806 return rc;
1807}
1808
1809
1810
1811/**
1812 * The tracer registration record of the VBox DTrace implementation
1813 */
1814static SUPDRVTRACERREG g_VBoxDTraceReg =
1815{
1816 SUPDRVTRACERREG_MAGIC,
1817 SUPDRVTRACERREG_VERSION,
1818 vbdt_ProbeFireKernel,
1819 vbdt_ProbeFireUser,
1820 vbdt_TracerOpen,
1821 vbdt_TracerIoCtl,
1822 vbdt_TracerClose,
1823 vbdt_ProviderRegister,
1824 vbdt_ProviderDeregister,
1825 vbdt_ProviderDeregisterZombie,
1826 SUPDRVTRACERREG_MAGIC
1827};
1828
1829
1830
1831/**
1832 * Module termination code.
1833 *
1834 * @param hMod Opque module handle.
1835 */
1836DECLEXPORT(void) ModuleTerm(void *hMod)
1837{
1838
1839}
1840
1841
1842/**
1843 * Module initialization code.
1844 *
1845 * @param hMod Opque module handle.
1846 */
1847DECLEXPORT(int) ModuleInit(void *hMod)
1848{
1849 int rc = dtrace_attach();
1850 if (rc == DDI_SUCCESS)
1851 {
1852 rc = SUPR0TracerRegisterImpl(hMod, NULL, &g_VBoxDTraceReg, &g_pVBoxDTraceHlp);
1853 if (RT_SUCCESS(rc))
1854 {
1855 return rc;
1856 }
1857
1858 dtrace_detach();
1859 }
1860 else
1861 {
1862 SUPR0Printf("dtrace_attach -> %d\n", rc);
1863 rc = VERR_INTERNAL_ERROR_5;
1864 }
1865
1866 return rc;
1867}
1868
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette