VirtualBox

source: vbox/trunk/src/VBox/ExtPacks/VBoxDTrace/VBoxDTraceR0/VBoxDTraceR0.cpp@ 53678

Last change on this file since 53678 was 53678, checked in by vboxsync, 10 years ago

VBoxDTrace: Adjusted to new spinlock API. (r56)

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 53.0 KB
Line 
1/* $Id: VBoxDTraceR0.cpp 53678 2015-01-02 12:34:53Z vboxsync $ */
2/** @file
3 * VBoxDTraceR0.
4 */
5
6/*
7 * Copyright (c) 2012 bird
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*******************************************************************************
33* Header Files *
34*******************************************************************************/
35#include <VBox/sup.h>
36#include <VBox/log.h>
37
38#include <iprt/asm-amd64-x86.h>
39#include <iprt/assert.h>
40#include <iprt/ctype.h>
41#include <iprt/err.h>
42#include <iprt/mem.h>
43#include <iprt/mp.h>
44#include <iprt/process.h>
45#include <iprt/semaphore.h>
46#include <iprt/spinlock.h>
47#include <iprt/string.h>
48#include <iprt/thread.h>
49#include <iprt/time.h>
50
51#include <sys/dtrace_impl.h>
52
53#include <VBox/VBoxTpG.h>
54
55
56
57/*******************************************************************************
58* Structures and Typedefs *
59*******************************************************************************/
60
61/** Caller indicator. */
62typedef enum VBOXDTCALLER
63{
64 kVBoxDtCaller_Invalid = 0,
65 kVBoxDtCaller_Generic,
66 kVBoxDtCaller_ProbeFireUser,
67 kVBoxDtCaller_ProbeFireKernel
68} VBOXDTCALLER;
69
70/**
71 * Stack data used for thread structure and such.
72 *
73 * This is planted in every external entry point and used to emulate solaris
74 * curthread, CRED, curproc and similar. It is also used to get at the
75 * uncached probe arguments.
76 */
77typedef struct VBoxDtStackData
78{
79 /** Eyecatcher no. 1 (VBDT_STACK_DATA_MAGIC2). */
80 uint32_t u32Magic1;
81 /** Eyecatcher no. 2 (VBDT_STACK_DATA_MAGIC2). */
82 uint32_t u32Magic2;
83 /** The format of the caller specific data. */
84 VBOXDTCALLER enmCaller;
85 /** Caller specific data. */
86 union
87 {
88 /** kVBoxDtCaller_ProbeFireKernel. */
89 struct
90 {
91 /** The caller. */
92 uintptr_t uCaller;
93 /** Pointer to the stack arguments of a probe function call. */
94 uintptr_t *pauStackArgs;
95 } ProbeFireKernel;
96 /** kVBoxDtCaller_ProbeFireUser. */
97 struct
98 {
99 /** The user context. */
100 PCSUPDRVTRACERUSRCTX pCtx;
101 } ProbeFireUser;
102 } u;
103 /** Credentials allocated by VBoxDtGetCurrentCreds. */
104 struct VBoxDtCred *pCred;
105 /** Thread structure currently being held by this thread. */
106 struct VBoxDtThread *pThread;
107 /** Pointer to this structure.
108 * This is the final bit of integrity checking. */
109 struct VBoxDtStackData *pSelf;
110} VBDTSTACKDATA;
111/** Pointer to the on-stack thread specific data. */
112typedef VBDTSTACKDATA *PVBDTSTACKDATA;
113
114/** The first magic value. */
115#define VBDT_STACK_DATA_MAGIC1 RT_MAKE_U32_FROM_U8('V', 'B', 'o', 'x')
116/** The second magic value. */
117#define VBDT_STACK_DATA_MAGIC2 RT_MAKE_U32_FROM_U8('D', 'T', 'r', 'c')
118
119/** The alignment of the stack data.
120 * The data doesn't require more than sizeof(uintptr_t) alignment, but the
121 * greater alignment the quicker lookup. */
122#define VBDT_STACK_DATA_ALIGN 32
123
124/** Plants the stack data. */
125#define VBDT_SETUP_STACK_DATA(a_enmCaller) \
126 uint8_t abBlob[sizeof(VBoxDtStackData) + VBDT_STACK_DATA_ALIGN - 1]; \
127 PVBDTSTACKDATA pStackData = (PVBDTSTACKDATA)( (uintptr_t)&abBlob[VBDT_STACK_DATA_ALIGN - 1] \
128 & ~(uintptr_t)(VBDT_STACK_DATA_ALIGN - 1)); \
129 pStackData->u32Magic1 = VBDT_STACK_DATA_MAGIC1; \
130 pStackData->u32Magic2 = VBDT_STACK_DATA_MAGIC2; \
131 pStackData->enmCaller = a_enmCaller; \
132 pStackData->pCred = NULL; \
133 pStackData->pThread = NULL; \
134 pStackData->pSelf = pStackData
135
136/** Passifies the stack data and frees up resource held within it. */
137#define VBDT_CLEAR_STACK_DATA() \
138 do \
139 { \
140 pStackData->u32Magic1 = 0; \
141 pStackData->u32Magic2 = 0; \
142 pStackData->pSelf = NULL; \
143 if (pStackData->pCred) \
144 crfree(pStackData->pCred); \
145 if (pStackData->pThread) \
146 VBoxDtReleaseThread(pStackData->pThread); \
147 } while (0)
148
149
150/*******************************************************************************
151* Global Variables *
152*******************************************************************************/
153/** Per CPU information */
154cpucore_t g_aVBoxDtCpuCores[RTCPUSET_MAX_CPUS];
155/** Dummy mutex. */
156struct VBoxDtMutex g_DummyMtx;
157/** Pointer to the tracer helpers provided by VBoxDrv. */
158static PCSUPDRVTRACERHLP g_pVBoxDTraceHlp;
159
160dtrace_cacheid_t dtrace_predcache_id = DTRACE_CACHEIDNONE + 1;
161
162#if 0
163void (*dtrace_cpu_init)(processorid_t);
164void (*dtrace_modload)(struct modctl *);
165void (*dtrace_modunload)(struct modctl *);
166void (*dtrace_helpers_cleanup)(void);
167void (*dtrace_helpers_fork)(proc_t *, proc_t *);
168void (*dtrace_cpustart_init)(void);
169void (*dtrace_cpustart_fini)(void);
170void (*dtrace_cpc_fire)(uint64_t);
171void (*dtrace_debugger_init)(void);
172void (*dtrace_debugger_fini)(void);
173#endif
174
175
176/**
177 * Gets the stack data.
178 *
179 * @returns Pointer to the stack data. Never NULL.
180 */
181static PVBDTSTACKDATA vboxDtGetStackData(void)
182{
183 int volatile iDummy = 1; /* use this to get the stack address. */
184 PVBDTSTACKDATA pData = (PVBDTSTACKDATA)( ((uintptr_t)&iDummy + VBDT_STACK_DATA_ALIGN - 1)
185 & ~(uintptr_t)(VBDT_STACK_DATA_ALIGN - 1));
186 for (;;)
187 {
188 if ( pData->u32Magic1 == VBDT_STACK_DATA_MAGIC1
189 && pData->u32Magic2 == VBDT_STACK_DATA_MAGIC2
190 && pData->pSelf == pData)
191 return pData;
192 pData = (PVBDTSTACKDATA)((uintptr_t)pData + VBDT_STACK_DATA_ALIGN);
193 }
194}
195
196
197void dtrace_toxic_ranges(void (*pfnAddOne)(uintptr_t uBase, uintptr_t cbRange))
198{
199 /** @todo ? */
200}
201
202
203
204/**
205 * Dummy callback used by dtrace_sync.
206 */
207static DECLCALLBACK(void) vboxDtSyncCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
208{
209 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
210}
211
212
213/**
214 * Synchronzie across all CPUs (expensive).
215 */
216void dtrace_sync(void)
217{
218 int rc = RTMpOnAll(vboxDtSyncCallback, NULL, NULL);
219 AssertRC(rc);
220}
221
222
223/**
224 * Fetch a 8-bit "word" from userland.
225 *
226 * @return The byte value.
227 * @param pvUserAddr The userland address.
228 */
229uint8_t dtrace_fuword8( void *pvUserAddr)
230{
231 uint8_t u8;
232 int rc = RTR0MemUserCopyFrom(&u8, (uintptr_t)pvUserAddr, sizeof(u8));
233 if (RT_FAILURE(rc))
234 {
235 RTCPUID iCpu = VBDT_GET_CPUID();
236 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
237 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
238 u8 = 0;
239 }
240 return u8;
241}
242
243
244/**
245 * Fetch a 16-bit word from userland.
246 *
247 * @return The word value.
248 * @param pvUserAddr The userland address.
249 */
250uint16_t dtrace_fuword16(void *pvUserAddr)
251{
252 uint16_t u16;
253 int rc = RTR0MemUserCopyFrom(&u16, (uintptr_t)pvUserAddr, sizeof(u16));
254 if (RT_FAILURE(rc))
255 {
256 RTCPUID iCpu = VBDT_GET_CPUID();
257 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
258 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
259 u16 = 0;
260 }
261 return u16;
262}
263
264
265/**
266 * Fetch a 32-bit word from userland.
267 *
268 * @return The dword value.
269 * @param pvUserAddr The userland address.
270 */
271uint32_t dtrace_fuword32(void *pvUserAddr)
272{
273 uint32_t u32;
274 int rc = RTR0MemUserCopyFrom(&u32, (uintptr_t)pvUserAddr, sizeof(u32));
275 if (RT_FAILURE(rc))
276 {
277 RTCPUID iCpu = VBDT_GET_CPUID();
278 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
279 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
280 u32 = 0;
281 }
282 return u32;
283}
284
285
286/**
287 * Fetch a 64-bit word from userland.
288 *
289 * @return The qword value.
290 * @param pvUserAddr The userland address.
291 */
292uint64_t dtrace_fuword64(void *pvUserAddr)
293{
294 uint64_t u64;
295 int rc = RTR0MemUserCopyFrom(&u64, (uintptr_t)pvUserAddr, sizeof(u64));
296 if (RT_FAILURE(rc))
297 {
298 RTCPUID iCpu = VBDT_GET_CPUID();
299 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
300 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
301 u64 = 0;
302 }
303 return u64;
304}
305
306
307/** copyin implementation */
308int VBoxDtCopyIn(void const *pvUser, void *pvDst, size_t cb)
309{
310 int rc = RTR0MemUserCopyFrom(pvDst, (uintptr_t)pvUser, cb);
311 return RT_SUCCESS(rc) ? 0 : -1;
312}
313
314
315/** copyout implementation */
316int VBoxDtCopyOut(void const *pvSrc, void *pvUser, size_t cb)
317{
318 int rc = RTR0MemUserCopyTo((uintptr_t)pvUser, pvSrc, cb);
319 return RT_SUCCESS(rc) ? 0 : -1;
320}
321
322
323/**
324 * Copy data from userland into the kernel.
325 *
326 * @param uUserAddr The userland address.
327 * @param uKrnlAddr The kernel buffer address.
328 * @param cb The number of bytes to copy.
329 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
330 */
331void dtrace_copyin( uintptr_t uUserAddr, uintptr_t uKrnlAddr, size_t cb, volatile uint16_t *pfFlags)
332{
333 int rc = RTR0MemUserCopyFrom((void *)uKrnlAddr, uUserAddr, cb);
334 if (RT_FAILURE(rc))
335 {
336 *pfFlags |= CPU_DTRACE_BADADDR;
337 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = uUserAddr;
338 }
339}
340
341
342/**
343 * Copy data from the kernel into userlad.
344 *
345 * @param uKrnlAddr The kernel buffer address.
346 * @param uUserAddr The userland address.
347 * @param cb The number of bytes to copy.
348 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
349 */
350void dtrace_copyout( uintptr_t uKrnlAddr, uintptr_t uUserAddr, size_t cb, volatile uint16_t *pfFlags)
351{
352 int rc = RTR0MemUserCopyTo(uUserAddr, (void const *)uKrnlAddr, cb);
353 if (RT_FAILURE(rc))
354 {
355 *pfFlags |= CPU_DTRACE_BADADDR;
356 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = uUserAddr;
357 }
358}
359
360
361/**
362 * Copy a string from userland into the kernel.
363 *
364 * @param uUserAddr The userland address.
365 * @param uKrnlAddr The kernel buffer address.
366 * @param cbMax The maximum number of bytes to copy. May stop
367 * earlier if zero byte is encountered.
368 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
369 */
370void dtrace_copyinstr( uintptr_t uUserAddr, uintptr_t uKrnlAddr, size_t cbMax, volatile uint16_t *pfFlags)
371{
372 if (!cbMax)
373 return;
374
375 char *pszDst = (char *)uKrnlAddr;
376 int rc = RTR0MemUserCopyFrom(pszDst, uUserAddr, cbMax);
377 if (RT_FAILURE(rc))
378 {
379 /* Byte by byte - lazy bird! */
380 size_t off = 0;
381 while (off < cbMax)
382 {
383 rc = RTR0MemUserCopyFrom(&pszDst[off], uUserAddr + off, 1);
384 if (RT_FAILURE(rc))
385 {
386 *pfFlags |= CPU_DTRACE_BADADDR;
387 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = uUserAddr;
388 pszDst[off] = '\0';
389 return;
390 }
391 if (!pszDst[off])
392 return;
393 off++;
394 }
395 }
396
397 pszDst[cbMax - 1] = '\0';
398}
399
400
401/**
402 * Copy a string from the kernel and into user land.
403 *
404 * @param uKrnlAddr The kernel string address.
405 * @param uUserAddr The userland address.
406 * @param cbMax The maximum number of bytes to copy. Will stop
407 * earlier if zero byte is encountered.
408 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
409 */
410void dtrace_copyoutstr(uintptr_t uKrnlAddr, uintptr_t uUserAddr, size_t cbMax, volatile uint16_t *pfFlags)
411{
412 const char *pszSrc = (const char *)uKrnlAddr;
413 size_t cbActual = RTStrNLen(pszSrc, cbMax);
414 cbActual += cbActual < cbMax;
415 dtrace_copyout(uKrnlAddr,uUserAddr, cbActual, pfFlags);
416}
417
418
419/**
420 * Get the caller @a cCallFrames call frames up the stack.
421 *
422 * @returns The caller's return address or ~(uintptr_t)0.
423 * @param cCallFrames The number of frames.
424 */
425uintptr_t dtrace_caller(int cCallFrames)
426{
427 PVBDTSTACKDATA pData = vboxDtGetStackData();
428 if (pData->enmCaller == kVBoxDtCaller_ProbeFireKernel)
429 return pData->u.ProbeFireKernel.uCaller;
430 return ~(uintptr_t)0;
431}
432
433
434/**
435 * Get argument number @a iArg @a cCallFrames call frames up the stack.
436 *
437 * @returns The caller's return address or ~(uintptr_t)0.
438 * @param iArg The argument to get.
439 * @param cCallFrames The number of frames.
440 */
441uint64_t dtrace_getarg(int iArg, int cCallFrames)
442{
443 PVBDTSTACKDATA pData = vboxDtGetStackData();
444 AssertReturn(iArg >= 5, UINT64_MAX);
445
446 if (pData->enmCaller == kVBoxDtCaller_ProbeFireKernel)
447 return pData->u.ProbeFireKernel.pauStackArgs[iArg - 5];
448 return UINT64_MAX;
449}
450
451
452/**
453 * Produce a traceback of the kernel stack.
454 *
455 * @param paPcStack Where to return the program counters.
456 * @param cMaxFrames The maximum number of PCs to return.
457 * @param cSkipFrames The number of artificial callstack frames to
458 * skip at the top.
459 * @param pIntr Not sure what this is...
460 */
461void dtrace_getpcstack(pc_t *paPcStack, int cMaxFrames, int cSkipFrames, uint32_t *pIntr)
462{
463 int iFrame = 0;
464 while (iFrame < cMaxFrames)
465 {
466 paPcStack[iFrame] = NULL;
467 iFrame++;
468 }
469}
470
471
472/**
473 * Get the number of call frames on the stack.
474 *
475 * @returns The stack depth.
476 * @param cSkipFrames The number of artificial callstack frames to
477 * skip at the top.
478 */
479int dtrace_getstackdepth(int cSkipFrames)
480{
481 return 1;
482}
483
484
485/**
486 * Produce a traceback of the userland stack.
487 *
488 * @param paPcStack Where to return the program counters.
489 * @param paFpStack Where to return the frame pointers.
490 * @param cMaxFrames The maximum number of frames to return.
491 */
492void dtrace_getufpstack(uint64_t *paPcStack, uint64_t *paFpStack, int cMaxFrames)
493{
494 int iFrame = 0;
495 while (iFrame < cMaxFrames)
496 {
497 paPcStack[iFrame] = 0;
498 paFpStack[iFrame] = 0;
499 iFrame++;
500 }
501}
502
503
504/**
505 * Produce a traceback of the userland stack.
506 *
507 * @param paPcStack Where to return the program counters.
508 * @param cMaxFrames The maximum number of frames to return.
509 */
510void dtrace_getupcstack(uint64_t *paPcStack, int cMaxFrames)
511{
512 int iFrame = 0;
513 while (iFrame < cMaxFrames)
514 {
515 paPcStack[iFrame] = 0;
516 iFrame++;
517 }
518}
519
520
521/**
522 * Computes the depth of the userland stack.
523 */
524int dtrace_getustackdepth(void)
525{
526 return 0;
527}
528
529
530/**
531 * Get the current IPL/IRQL.
532 *
533 * @returns Current level.
534 */
535int dtrace_getipl(void)
536{
537#ifdef RT_ARCH_AMD64
538 /* CR8 is normally the same as IRQL / IPL on AMD64. */
539 return ASMGetCR8();
540#else
541 /* Just fake it on x86. */
542 return !ASMIntAreEnabled();
543#endif
544}
545
546
547/**
548 * Get current monotonic timestamp.
549 *
550 * @returns Timestamp, nano seconds.
551 */
552hrtime_t dtrace_gethrtime(void)
553{
554 return RTTimeNanoTS();
555}
556
557
558/**
559 * Get current walltime.
560 *
561 * @returns Timestamp, nano seconds.
562 */
563hrtime_t dtrace_gethrestime(void)
564{
565 /** @todo try get better resolution here somehow ... */
566 RTTIMESPEC Now;
567 return RTTimeSpecGetNano(RTTimeNow(&Now));
568}
569
570
571/**
572 * DTrace panic routine.
573 *
574 * @param pszFormat Panic message.
575 * @param va Arguments to the panic message.
576 */
577void dtrace_vpanic(const char *pszFormat, va_list va)
578{
579 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
580 RTAssertMsg2WeakV(pszFormat, va);
581 RTR0AssertPanicSystem();
582 for (;;)
583 {
584 ASMBreakpoint();
585 volatile char *pchCrash = (volatile char *)~(uintptr_t)0;
586 *pchCrash = '\0';
587 }
588}
589
590
591/**
592 * DTrace panic routine.
593 *
594 * @param pszFormat Panic message.
595 * @param ... Arguments to the panic message.
596 */
597void VBoxDtPanic(const char *pszFormat, ...)
598{
599 va_list va;
600 va_start(va, pszFormat);
601 dtrace_vpanic(pszFormat, va);
602 va_end(va);
603}
604
605
606/**
607 * DTrace kernel message routine.
608 *
609 * @param pszFormat Kernel message.
610 * @param ... Arguments to the panic message.
611 */
612void VBoxDtCmnErr(int iLevel, const char *pszFormat, ...)
613{
614 va_list va;
615 va_start(va, pszFormat);
616 SUPR0Printf("%N", pszFormat, va);
617 va_end(va);
618}
619
620
621/** uprintf implementation */
622void VBoxDtUPrintf(const char *pszFormat, ...)
623{
624 va_list va;
625 va_start(va, pszFormat);
626 VBoxDtUPrintfV(pszFormat, va);
627 va_end(va);
628}
629
630
631/** vuprintf implementation */
632void VBoxDtUPrintfV(const char *pszFormat, va_list va)
633{
634 SUPR0Printf("%N", pszFormat, va);
635}
636
637
638/* CRED implementation. */
639cred_t *VBoxDtGetCurrentCreds(void)
640{
641 PVBDTSTACKDATA pData = vboxDtGetStackData();
642 if (!pData->pCred)
643 {
644 struct VBoxDtCred *pCred;
645 int rc = RTMemAllocEx(sizeof(*pCred), 0, RTMEMALLOCEX_FLAGS_ANY_CTX, (void **)&pCred);
646 AssertFatalRC(rc);
647 pCred->cr_refs = 1;
648 /** @todo get the right creds on unix systems. */
649 pCred->cr_uid = 0;
650 pCred->cr_ruid = 0;
651 pCred->cr_suid = 0;
652 pCred->cr_gid = 0;
653 pCred->cr_rgid = 0;
654 pCred->cr_sgid = 0;
655 pCred->cr_zone = 0;
656 pData->pCred = pCred;
657 }
658
659 return pData->pCred;
660}
661
662
663/* crhold implementation */
664void VBoxDtCredHold(struct VBoxDtCred *pCred)
665{
666 int32_t cRefs = ASMAtomicIncS32(&pCred->cr_refs);
667 Assert(cRefs > 1);
668}
669
670
671/* crfree implementation */
672void VBoxDtCredFree(struct VBoxDtCred *pCred)
673{
674 int32_t cRefs = ASMAtomicDecS32(&pCred->cr_refs);
675 Assert(cRefs >= 0);
676 if (!cRefs)
677 RTMemFree(pCred);
678}
679
680/** Spinlock protecting the thread structures. */
681static RTSPINLOCK g_hThreadSpinlock = NIL_RTSPINLOCK;
682/** List of threads by usage age. */
683static RTLISTANCHOR g_ThreadAgeList;
684/** Hash table for looking up thread structures. */
685static struct VBoxDtThread *g_apThreadsHash[16384];
686/** Fake kthread_t structures.
687 * The size of this array is making horrible ASSUMPTIONS about the number of
688 * thread in the system that will be subjected to DTracing. */
689static struct VBoxDtThread g_aThreads[8192];
690
691
692static int vboxDtInitThreadDb(void)
693{
694 int rc = RTSpinlockCreate(&g_hThreadSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxDtThreadDb");
695 if (RT_FAILURE(rc))
696 return rc;
697
698 RTListInit(&g_ThreadAgeList);
699 for (uint32_t i = 0; i < RT_ELEMENTS(g_aThreads); i++)
700 {
701 g_aThreads[i].hNative = NIL_RTNATIVETHREAD;
702 g_aThreads[i].uPid = NIL_RTPROCESS;
703 RTListPrepend(&g_ThreadAgeList, &g_aThreads[i].AgeEntry);
704 }
705
706 return VINF_SUCCESS;
707}
708
709
710static void vboxDtTermThreadDb(void)
711{
712 RTSpinlockDestroy(g_hThreadSpinlock);
713 g_hThreadSpinlock = NIL_RTSPINLOCK;
714 RTListInit(&g_ThreadAgeList);
715}
716
717
718/* curthread implementation, providing a fake kthread_t. */
719struct VBoxDtThread *VBoxDtGetCurrentThread(void)
720{
721 /*
722 * Once we've retrieved a thread, we hold on to it until the thread exits
723 * the VBoxDTrace module.
724 */
725 PVBDTSTACKDATA pData = vboxDtGetStackData();
726 if (pData->pThread)
727 {
728 AssertPtr(pData->pThread);
729 Assert(pData->pThread->hNative == RTThreadNativeSelf());
730 Assert(pData->pThread->uPid == RTProcSelf());
731 Assert(RTListIsEmpty(&pData->pThread->AgeEntry));
732 return pData->pThread;
733 }
734
735 /*
736 * Lookup the thread in the hash table.
737 */
738 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
739 RTPROCESS uPid = RTProcSelf();
740 uintptr_t iHash = (hNativeSelf * 2654435761) % RT_ELEMENTS(g_apThreadsHash);
741
742 RTSpinlockAcquire(g_hThreadSpinlock);
743
744 struct VBoxDtThread *pThread = g_apThreadsHash[iHash];
745 while (pThread)
746 {
747 if (pThread->hNative == hNativeSelf)
748 {
749 if (pThread->uPid != uPid)
750 {
751 /* Re-initialize the reused thread. */
752 pThread->uPid = uPid;
753 pThread->t_dtrace_vtime = 0;
754 pThread->t_dtrace_start = 0;
755 pThread->t_dtrace_stop = 0;
756 pThread->t_dtrace_scrpc = 0;
757 pThread->t_dtrace_astpc = 0;
758 pThread->t_predcache = 0;
759 }
760
761 /* Hold the thread in the on-stack data, making sure it does not
762 get reused till the thread leaves VBoxDTrace. */
763 RTListNodeRemove(&pThread->AgeEntry);
764 pData->pThread = pThread;
765
766 RTSpinlockReleaseNoInts(g_hThreadSpinlock);
767 return pThread;
768 }
769
770 pThread = pThread->pNext;
771 }
772
773 /*
774 * Unknown thread. Allocate a new entry, recycling unused or old ones.
775 */
776 pThread = RTListGetLast(&g_ThreadAgeList, struct VBoxDtThread, AgeEntry);
777 AssertFatal(pThread);
778 RTListNodeRemove(&pThread->AgeEntry);
779 if (pThread->hNative != NIL_RTNATIVETHREAD)
780 {
781 uintptr_t iHash2 = (pThread->hNative * 2654435761) % RT_ELEMENTS(g_apThreadsHash);
782 if (g_apThreadsHash[iHash2] == pThread)
783 g_apThreadsHash[iHash2] = pThread->pNext;
784 else
785 {
786 for (struct VBoxDtThread *pPrev = g_apThreadsHash[iHash2]; ; pPrev = pPrev->pNext)
787 {
788 AssertPtr(pPrev);
789 if (pPrev->pNext == pThread)
790 {
791 pPrev->pNext = pThread->pNext;
792 break;
793 }
794 }
795 }
796 }
797
798 /*
799 * Initialize the data.
800 */
801 pThread->t_dtrace_vtime = 0;
802 pThread->t_dtrace_start = 0;
803 pThread->t_dtrace_stop = 0;
804 pThread->t_dtrace_scrpc = 0;
805 pThread->t_dtrace_astpc = 0;
806 pThread->t_predcache = 0;
807 pThread->hNative = hNativeSelf;
808 pThread->uPid = uPid;
809
810 /*
811 * Add it to the hash as well as the on-stack data.
812 */
813 pThread->pNext = g_apThreadsHash[iHash];
814 g_apThreadsHash[iHash] = pThread->pNext;
815
816 pData->pThread = pThread;
817
818 RTSpinlockReleaseNoInts(g_hThreadSpinlock);
819 return pThread;
820}
821
822
823/**
824 * Called by the stack data destructor.
825 *
826 * @param pThread The thread to release.
827 *
828 */
829static void VBoxDtReleaseThread(struct VBoxDtThread *pThread)
830{
831 RTSpinlockAcquire(g_hThreadSpinlock);
832
833 RTListAppend(&g_ThreadAgeList, &pThread->AgeEntry);
834
835 RTSpinlockReleaseNoInts(g_hThreadSpinlock);
836}
837
838
839
840
841/*
842 *
843 * Virtual Memory / Resource Allocator.
844 * Virtual Memory / Resource Allocator.
845 * Virtual Memory / Resource Allocator.
846 *
847 */
848
849
850/** The number of bits per chunk.
851 * @remarks The 32 bytes are for heap headers and such like. */
852#define VBOXDTVMEMCHUNK_BITS ( ((_64K - 32 - sizeof(uint32_t) * 2) / sizeof(uint32_t)) * 32)
853
854/**
855 * Resource allocator chunk.
856 */
857typedef struct VBoxDtVMemChunk
858{
859 /** The ordinal (unbased) of the first item. */
860 uint32_t iFirst;
861 /** The current number of free items in this chunk. */
862 uint32_t cCurFree;
863 /** The allocation bitmap. */
864 uint32_t bm[VBOXDTVMEMCHUNK_BITS / 32];
865} VBOXDTVMEMCHUNK;
866/** Pointer to a resource allocator chunk. */
867typedef VBOXDTVMEMCHUNK *PVBOXDTVMEMCHUNK;
868
869
870
871/**
872 * Resource allocator instance.
873 */
874typedef struct VBoxDtVMem
875{
876 /** Spinlock protecting the data. */
877 RTSPINLOCK hSpinlock;
878 /** Magic value. */
879 uint32_t u32Magic;
880 /** The current number of free items in the chunks. */
881 uint32_t cCurFree;
882 /** The current number of chunks that we have allocated. */
883 uint32_t cCurChunks;
884 /** The configured resource base. */
885 uint32_t uBase;
886 /** The configured max number of items. */
887 uint32_t cMaxItems;
888 /** The size of the apChunks array. */
889 uint32_t cMaxChunks;
890 /** Array of chunk pointers.
891 * (The size is determined at creation.) */
892 PVBOXDTVMEMCHUNK apChunks[1];
893} VBOXDTVMEM;
894/** Pointer to a resource allocator instance. */
895typedef VBOXDTVMEM *PVBOXDTVMEM;
896
897/** Magic value for the VBOXDTVMEM structure. */
898#define VBOXDTVMEM_MAGIC RT_MAKE_U32_FROM_U8('V', 'M', 'e', 'm')
899
900
901/* vmem_create implementation */
902struct VBoxDtVMem *VBoxDtVMemCreate(const char *pszName, void *pvBase, size_t cb, size_t cbUnit,
903 PFNRT pfnAlloc, PFNRT pfnFree, struct VBoxDtVMem *pSrc,
904 size_t cbQCacheMax, uint32_t fFlags)
905{
906 /*
907 * Assert preconditions of this implementation.
908 */
909 AssertMsgReturn((uintptr_t)pvBase <= UINT32_MAX, ("%p\n", pvBase), NULL);
910 AssertMsgReturn(cb <= UINT32_MAX, ("%zu\n", cb), NULL);
911 AssertMsgReturn((uintptr_t)pvBase + cb - 1 <= UINT32_MAX, ("%p %zu\n", pvBase, cb), NULL);
912 AssertMsgReturn(cbUnit == 1, ("%zu\n", cbUnit), NULL);
913 AssertReturn(!pfnAlloc, NULL);
914 AssertReturn(!pfnFree, NULL);
915 AssertReturn(!pSrc, NULL);
916 AssertReturn(!cbQCacheMax, NULL);
917 AssertReturn(fFlags & VM_SLEEP, NULL);
918 AssertReturn(fFlags & VMC_IDENTIFIER, NULL);
919
920 /*
921 * Allocate the instance.
922 */
923 uint32_t cChunks = cb / VBOXDTVMEMCHUNK_BITS;
924 if (cb % VBOXDTVMEMCHUNK_BITS)
925 cChunks++;
926 PVBOXDTVMEM pThis = (PVBOXDTVMEM)RTMemAllocZ(RT_OFFSETOF(VBOXDTVMEM, apChunks[cChunks]));
927 if (!pThis)
928 return NULL;
929 int rc = RTSpinlockCreate(&pThis->hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxDtVMem");
930 if (RT_FAILURE(rc))
931 {
932 RTMemFree(pThis);
933 return NULL;
934 }
935 pThis->u32Magic = VBOXDTVMEM_MAGIC;
936 pThis->cCurFree = 0;
937 pThis->cCurChunks = 0;
938 pThis->uBase = (uint32_t)(uintptr_t)pvBase;
939 pThis->cMaxItems = (uint32_t)cb;
940 pThis->cMaxChunks = cChunks;
941
942 return pThis;
943}
944
945
946/* vmem_destroy implementation */
947void VBoxDtVMemDestroy(struct VBoxDtVMem *pThis)
948{
949 if (!pThis)
950 return;
951 AssertPtrReturnVoid(pThis);
952 AssertReturnVoid(pThis->u32Magic == VBOXDTVMEM_MAGIC);
953
954 /*
955 * Invalidate the instance.
956 */
957 RTSpinlockAcquire(pThis->hSpinlock); /* paranoia */
958 pThis->u32Magic = 0;
959 RTSpinlockRelease(pThis->hSpinlock);
960 RTSpinlockDestroy(pThis->hSpinlock);
961
962 /*
963 * Free the chunks, then the instance.
964 */
965 uint32_t iChunk = pThis->cCurChunks;
966 while (iChunk-- > 0)
967 {
968 RTMemFree(pThis->apChunks[iChunk]);
969 pThis->apChunks[iChunk] = NULL;
970 }
971 RTMemFree(pThis);
972}
973
974
975/* vmem_alloc implementation */
976void *VBoxDtVMemAlloc(struct VBoxDtVMem *pThis, size_t cbMem, uint32_t fFlags)
977{
978 /*
979 * Validate input.
980 */
981 AssertReturn(fFlags & VM_BESTFIT, NULL);
982 AssertReturn(fFlags & VM_SLEEP, NULL);
983 AssertReturn(cbMem == 1, NULL);
984 AssertPtrReturn(pThis, NULL);
985 AssertReturn(pThis->u32Magic == VBOXDTVMEM_MAGIC, NULL);
986
987 /*
988 * Allocation loop.
989 */
990 RTSpinlockAcquire(pThis->hSpinlock);
991 for (;;)
992 {
993 PVBOXDTVMEMCHUNK pChunk;
994 uint32_t const cChunks = pThis->cCurChunks;
995
996 if (RT_LIKELY(pThis->cCurFree > 0))
997 {
998 for (uint32_t iChunk = 0; iChunk < cChunks; iChunk++)
999 {
1000 pChunk = pThis->apChunks[iChunk];
1001 if (pChunk->cCurFree > 0)
1002 {
1003 int iBit = ASMBitFirstClear(pChunk->bm, VBOXDTVMEMCHUNK_BITS);
1004 AssertMsgReturnStmt(iBit >= 0 && (unsigned)iBit < VBOXDTVMEMCHUNK_BITS, ("%d\n", iBit),
1005 RTSpinlockRelease(pThis->hSpinlock),
1006 NULL);
1007
1008 ASMBitSet(pChunk->bm, iBit);
1009 pChunk->cCurFree--;
1010 pThis->cCurFree--;
1011
1012 uint32_t iRet = (uint32_t)iBit + pChunk->iFirst + pThis->uBase;
1013 RTSpinlockReleaseNoInts(pThis->hSpinlock);
1014 return (void *)(uintptr_t)iRet;
1015 }
1016 }
1017 AssertFailedBreak();
1018 }
1019
1020 /* Out of resources? */
1021 if (cChunks >= pThis->cMaxChunks)
1022 break;
1023
1024 /*
1025 * Allocate another chunk.
1026 */
1027 uint32_t const iFirstBit = cChunks > 0 ? pThis->apChunks[cChunks - 1]->iFirst + VBOXDTVMEMCHUNK_BITS : 0;
1028 uint32_t const cFreeBits = cChunks + 1 == pThis->cMaxChunks
1029 ? pThis->cMaxItems - (iFirstBit - pThis->uBase)
1030 : VBOXDTVMEMCHUNK_BITS;
1031 Assert(cFreeBits <= VBOXDTVMEMCHUNK_BITS);
1032
1033 RTSpinlockRelease(pThis->hSpinlock);
1034
1035 pChunk = (PVBOXDTVMEMCHUNK)RTMemAllocZ(sizeof(*pChunk));
1036 if (!pChunk)
1037 return NULL;
1038
1039 pChunk->iFirst = iFirstBit;
1040 pChunk->cCurFree = cFreeBits;
1041 if (cFreeBits != VBOXDTVMEMCHUNK_BITS)
1042 {
1043 /* lazy bird. */
1044 uint32_t iBit = cFreeBits;
1045 while (iBit < VBOXDTVMEMCHUNK_BITS)
1046 {
1047 ASMBitSet(pChunk->bm, iBit);
1048 iBit++;
1049 }
1050 }
1051
1052 RTSpinlockAcquire(pThis->hSpinlock);
1053
1054 /*
1055 * Insert the new chunk. If someone raced us here, we'll drop it to
1056 * avoid wasting resources.
1057 */
1058 if (pThis->cCurChunks == cChunks)
1059 {
1060 pThis->apChunks[cChunks] = pChunk;
1061 pThis->cCurFree += pChunk->cCurFree;
1062 pThis->cCurChunks += 1;
1063 }
1064 else
1065 {
1066 RTSpinlockRelease(pThis->hSpinlock);
1067 RTMemFree(pChunk);
1068 RTSpinlockAcquire(pThis->hSpinlock);
1069 }
1070 }
1071 RTSpinlockReleaseNoInts(pThis->hSpinlock);
1072
1073 return NULL;
1074}
1075
1076/* vmem_free implementation */
1077void VBoxDtVMemFree(struct VBoxDtVMem *pThis, void *pvMem, size_t cbMem)
1078{
1079 /*
1080 * Validate input.
1081 */
1082 AssertReturnVoid(cbMem == 1);
1083 AssertPtrReturnVoid(pThis);
1084 AssertReturnVoid(pThis->u32Magic == VBOXDTVMEM_MAGIC);
1085
1086 AssertReturnVoid((uintptr_t)pvMem < UINT32_MAX);
1087 uint32_t uMem = (uint32_t)(uintptr_t)pvMem;
1088 AssertReturnVoid(uMem >= pThis->uBase);
1089 uMem -= pThis->uBase;
1090 AssertReturnVoid(uMem < pThis->cMaxItems);
1091
1092
1093 /*
1094 * Free it.
1095 */
1096 RTSpinlockAcquire(pThis->hSpinlock);
1097 uint32_t const iChunk = uMem / VBOXDTVMEMCHUNK_BITS;
1098 if (iChunk < pThis->cCurChunks)
1099 {
1100 PVBOXDTVMEMCHUNK pChunk = pThis->apChunks[iChunk];
1101 uint32_t iBit = uMem - pChunk->iFirst;
1102 AssertReturnVoidStmt(iBit < VBOXDTVMEMCHUNK_BITS, RTSpinlockRelease(pThis->hSpinlock));
1103 AssertReturnVoidStmt(ASMBitTestAndClear(pChunk->bm, iBit), RTSpinlockRelease(pThis->hSpinlock));
1104
1105 pChunk->cCurFree++;
1106 pThis->cCurFree++;
1107 }
1108
1109 RTSpinlockRelease(pThis->hSpinlock);
1110}
1111
1112
1113/*
1114 *
1115 * Memory Allocators.
1116 * Memory Allocators.
1117 * Memory Allocators.
1118 *
1119 */
1120
1121
1122/* kmem_alloc implementation */
1123void *VBoxDtKMemAlloc(size_t cbMem, uint32_t fFlags)
1124{
1125 void *pvMem;
1126 int rc = RTMemAllocEx(cbMem, 0, fFlags & KM_NOSLEEP ? RTMEMALLOCEX_FLAGS_ANY_CTX : 0, &pvMem);
1127 AssertRCReturn(rc, NULL);
1128 AssertPtr(pvMem);
1129 return pvMem;
1130}
1131
1132
1133/* kmem_zalloc implementation */
1134void *VBoxDtKMemAllocZ(size_t cbMem, uint32_t fFlags)
1135{
1136 void *pvMem;
1137 int rc = RTMemAllocEx(cbMem, 0,
1138 (fFlags & KM_NOSLEEP ? RTMEMALLOCEX_FLAGS_ANY_CTX : 0) | RTMEMALLOCEX_FLAGS_ZEROED,
1139 &pvMem);
1140 AssertRCReturn(rc, NULL);
1141 AssertPtr(pvMem);
1142 return pvMem;
1143}
1144
1145
1146/* kmem_free implementation */
1147void VBoxDtKMemFree(void *pvMem, size_t cbMem)
1148{
1149 RTMemFreeEx(pvMem, cbMem);
1150}
1151
1152
1153/**
1154 * Memory cache mockup structure.
1155 * No slab allocator here!
1156 */
1157struct VBoxDtMemCache
1158{
1159 uint32_t u32Magic;
1160 size_t cbBuf;
1161 size_t cbAlign;
1162};
1163
1164
1165/* Limited kmem_cache_create implementation. */
1166struct VBoxDtMemCache *VBoxDtKMemCacheCreate(const char *pszName, size_t cbBuf, size_t cbAlign,
1167 PFNRT pfnCtor, PFNRT pfnDtor, PFNRT pfnReclaim,
1168 void *pvUser, void *pvVM, uint32_t fFlags)
1169{
1170 /*
1171 * Check the input.
1172 */
1173 AssertReturn(cbBuf > 0 && cbBuf < _1G, NULL);
1174 AssertReturn(RT_IS_POWER_OF_TWO(cbAlign), NULL);
1175 AssertReturn(!pfnCtor, NULL);
1176 AssertReturn(!pfnDtor, NULL);
1177 AssertReturn(!pfnReclaim, NULL);
1178 AssertReturn(!pvUser, NULL);
1179 AssertReturn(!pvVM, NULL);
1180 AssertReturn(!fFlags, NULL);
1181
1182 /*
1183 * Create a parameter container. Don't bother with anything fancy here yet,
1184 * just get something working.
1185 */
1186 struct VBoxDtMemCache *pThis = (struct VBoxDtMemCache *)RTMemAlloc(sizeof(*pThis));
1187 if (!pThis)
1188 return NULL;
1189
1190 pThis->cbAlign = cbAlign;
1191 pThis->cbBuf = cbBuf;
1192 return pThis;
1193}
1194
1195
1196/* Limited kmem_cache_destroy implementation. */
1197void VBoxDtKMemCacheDestroy(struct VBoxDtMemCache *pThis)
1198{
1199 RTMemFree(pThis);
1200}
1201
1202
1203/* kmem_cache_alloc implementation. */
1204void *VBoxDtKMemCacheAlloc(struct VBoxDtMemCache *pThis, uint32_t fFlags)
1205{
1206 void *pvMem;
1207 int rc = RTMemAllocEx(pThis->cbBuf,
1208 pThis->cbAlign,
1209 (fFlags & KM_NOSLEEP ? RTMEMALLOCEX_FLAGS_ANY_CTX : 0) | RTMEMALLOCEX_FLAGS_ZEROED,
1210 &pvMem);
1211 AssertRCReturn(rc, NULL);
1212 AssertPtr(pvMem);
1213 return pvMem;
1214}
1215
1216
1217/* kmem_cache_free implementation. */
1218void VBoxDtKMemCacheFree(struct VBoxDtMemCache *pThis, void *pvMem)
1219{
1220 RTMemFreeEx(pvMem, pThis->cbBuf);
1221}
1222
1223
1224/*
1225 *
1226 * Mutex Sempahore Wrappers.
1227 *
1228 */
1229
1230
1231/** Initializes a mutex. */
1232int VBoxDtMutexInit(struct VBoxDtMutex *pMtx)
1233{
1234 AssertReturn(pMtx != &g_DummyMtx, -1);
1235 AssertPtr(pMtx);
1236
1237 pMtx->hOwner = NIL_RTNATIVETHREAD;
1238 pMtx->hMtx = NIL_RTSEMMUTEX;
1239 int rc = RTSemMutexCreate(&pMtx->hMtx);
1240 if (RT_SUCCESS(rc))
1241 return 0;
1242 return -1;
1243}
1244
1245
1246/** Deletes a mutex. */
1247void VBoxDtMutexDelete(struct VBoxDtMutex *pMtx)
1248{
1249 AssertReturnVoid(pMtx != &g_DummyMtx);
1250 AssertPtr(pMtx);
1251 if (pMtx->hMtx == NIL_RTSEMMUTEX || pMtx->hMtx == NULL)
1252 return;
1253
1254 Assert(pMtx->hOwner == NIL_RTNATIVETHREAD);
1255 int rc = RTSemMutexDestroy(pMtx->hMtx); AssertRC(rc);
1256 pMtx->hMtx = NIL_RTSEMMUTEX;
1257}
1258
1259
1260/* mutex_enter implementation */
1261void VBoxDtMutexEnter(struct VBoxDtMutex *pMtx)
1262{
1263 AssertPtr(pMtx);
1264 if (pMtx == &g_DummyMtx)
1265 return;
1266
1267 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
1268
1269 int rc = RTSemMutexRequest(pMtx->hMtx, RT_INDEFINITE_WAIT);
1270 AssertFatalRC(rc);
1271
1272 Assert(pMtx->hOwner == NIL_RTNATIVETHREAD);
1273 pMtx->hOwner = hSelf;
1274}
1275
1276
1277/* mutex_exit implementation */
1278void VBoxDtMutexExit(struct VBoxDtMutex *pMtx)
1279{
1280 AssertPtr(pMtx);
1281 if (pMtx == &g_DummyMtx)
1282 return;
1283
1284 Assert(pMtx->hOwner == RTThreadNativeSelf());
1285
1286 pMtx->hOwner = NIL_RTNATIVETHREAD;
1287 int rc = RTSemMutexRelease(pMtx->hMtx);
1288 AssertFatalRC(rc);
1289}
1290
1291
1292/* MUTEX_HELD implementation */
1293bool VBoxDtMutexIsOwner(struct VBoxDtMutex *pMtx)
1294{
1295 AssertPtrReturn(pMtx, false);
1296 if (pMtx == &g_DummyMtx)
1297 return true;
1298 return pMtx->hOwner == RTThreadNativeSelf();
1299}
1300
1301
1302
1303/*
1304 *
1305 * Helpers for handling VTG structures.
1306 * Helpers for handling VTG structures.
1307 * Helpers for handling VTG structures.
1308 *
1309 */
1310
1311
1312
1313/**
1314 * Converts an attribute from VTG description speak to DTrace.
1315 *
1316 * @param pDtAttr The DTrace attribute (dst).
1317 * @param pVtgAttr The VTG attribute descriptor (src).
1318 */
1319static void vboxDtVtgConvAttr(dtrace_attribute_t *pDtAttr, PCVTGDESCATTR pVtgAttr)
1320{
1321 pDtAttr->dtat_name = pVtgAttr->u8Code - 1;
1322 pDtAttr->dtat_data = pVtgAttr->u8Data - 1;
1323 pDtAttr->dtat_class = pVtgAttr->u8DataDep - 1;
1324}
1325
1326/**
1327 * Gets a string from the string table.
1328 *
1329 * @returns Pointer to the string.
1330 * @param pVtgHdr The VTG object header.
1331 * @param offStrTab The string table offset.
1332 */
1333static const char *vboxDtVtgGetString(PVTGOBJHDR pVtgHdr, uint32_t offStrTab)
1334{
1335 Assert(offStrTab < pVtgHdr->cbStrTab);
1336 return &pVtgHdr->pachStrTab[offStrTab];
1337}
1338
1339
1340
1341/*
1342 *
1343 * DTrace Provider Interface.
1344 * DTrace Provider Interface.
1345 * DTrace Provider Interface.
1346 *
1347 */
1348
1349
1350/**
1351 * @callback_method_impl{dtrace_pops_t,dtps_provide}
1352 */
1353static void vboxDtPOps_Provide(void *pvProv, const dtrace_probedesc_t *pDtProbeDesc)
1354{
1355 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1356 PVTGPROBELOC pProbeLoc = pProv->pHdr->paProbLocs;
1357 PVTGPROBELOC pProbeLocEnd = pProv->pHdr->paProbLocsEnd;
1358 dtrace_provider_id_t idProvider = pProv->TracerData.DTrace.idProvider;
1359 size_t const cbFnNmBuf = _4K + _1K;
1360 char *pszFnNmBuf;
1361 uint16_t idxProv;
1362
1363 if (pDtProbeDesc)
1364 return; /* We don't generate probes, so never mind these requests. */
1365
1366 if (pProv->TracerData.DTrace.fZombie)
1367 return;
1368
1369 if (pProv->TracerData.DTrace.cProvidedProbes >= pProbeLocEnd - pProbeLoc)
1370 return;
1371
1372 /* Need a buffer for extracting the function names and mangling them in
1373 case of collision. */
1374 pszFnNmBuf = (char *)RTMemAlloc(cbFnNmBuf);
1375 if (!pszFnNmBuf)
1376 return;
1377
1378 /*
1379 * Itereate the probe location list and register all probes related to
1380 * this provider.
1381 */
1382 idxProv = (uint16_t)(&pProv->pHdr->paProviders[0] - pProv->pDesc);
1383 while ((uintptr_t)pProbeLoc < (uintptr_t)pProbeLocEnd)
1384 {
1385 PVTGDESCPROBE pProbeDesc = (PVTGDESCPROBE)pProbeLoc->pbProbe;
1386 if ( pProbeDesc->idxProvider == idxProv
1387 && pProbeLoc->idProbe == UINT32_MAX)
1388 {
1389 /* The function name normally needs to be stripped since we're
1390 using C++ compilers for most of the code. ASSUMES nobody are
1391 brave/stupid enough to use function pointer returns without
1392 typedef'ing properly them. */
1393 const char *pszPrbName = vboxDtVtgGetString(pProv->pHdr, pProbeDesc->offName);
1394 const char *pszFunc = pProbeLoc->pszFunction;
1395 const char *psz = strchr(pProbeLoc->pszFunction, '(');
1396 size_t cch;
1397 if (psz)
1398 {
1399 /* skip blanks preceeding the parameter parenthesis. */
1400 while ( (uintptr_t)psz > (uintptr_t)pProbeLoc->pszFunction
1401 && RT_C_IS_BLANK(psz[-1]))
1402 psz--;
1403
1404 /* Find the start of the function name. */
1405 pszFunc = psz - 1;
1406 while ((uintptr_t)pszFunc > (uintptr_t)pProbeLoc->pszFunction)
1407 {
1408 char ch = pszFunc[-1];
1409 if (!RT_C_IS_ALNUM(ch) && ch != '_' && ch != ':')
1410 break;
1411 pszFunc--;
1412 }
1413 cch = psz - pszFunc;
1414 }
1415 else
1416 cch = strlen(pszFunc);
1417 RTStrCopyEx(pszFnNmBuf, cbFnNmBuf, pszFunc, cch);
1418
1419 /* Look up the probe, if we have one in the same function, mangle
1420 the function name a little to avoid having to deal with having
1421 multiple location entries with the same probe ID. (lazy bird) */
1422 Assert(pProbeLoc->idProbe == UINT32_MAX);
1423 if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) != DTRACE_IDNONE)
1424 {
1425 RTStrPrintf(pszFnNmBuf+cch, cbFnNmBuf - cch, "-%u", pProbeLoc->uLine);
1426 if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) != DTRACE_IDNONE)
1427 {
1428 unsigned iOrd = 2;
1429 while (iOrd < 128)
1430 {
1431 RTStrPrintf(pszFnNmBuf+cch, cbFnNmBuf - cch, "-%u-%u", pProbeLoc->uLine, iOrd);
1432 if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) == DTRACE_IDNONE)
1433 break;
1434 iOrd++;
1435 }
1436 if (iOrd >= 128)
1437 {
1438 LogRel(("VBoxDrv: More than 128 duplicate probe location instances in file %s at line %u, function %s [%s], probe %s\n",
1439 pProbeLoc->pszFile, pProbeLoc->uLine, pProbeLoc->pszFunction, pszFnNmBuf, pszPrbName));
1440 continue;
1441 }
1442 }
1443 }
1444
1445 /* Create the probe. */
1446 AssertCompile(sizeof(pProbeLoc->idProbe) == sizeof(dtrace_id_t));
1447 pProbeLoc->idProbe = dtrace_probe_create(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName,
1448 1 /*aframes*/, pProbeLoc);
1449 pProv->TracerData.DTrace.cProvidedProbes++;
1450 }
1451
1452 pProbeLoc++;
1453 }
1454
1455 RTMemFree(pszFnNmBuf);
1456}
1457
1458
1459/**
1460 * @callback_method_impl{dtrace_pops_t,dtps_enable}
1461 */
1462static int vboxDtPOps_Enable(void *pvProv, dtrace_id_t idProbe, void *pvProbe)
1463{
1464 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1465 if (!pProv->TracerData.DTrace.fZombie)
1466 {
1467 PVTGPROBELOC pProbeLoc = (PVTGPROBELOC)pvProbe;
1468 PVTGDESCPROBE pProbeDesc = (PVTGDESCPROBE)pProbeLoc->pbProbe;
1469
1470 if (!pProbeLoc->fEnabled)
1471 {
1472 pProbeLoc->fEnabled = 1;
1473 if (ASMAtomicIncU32(&pProbeDesc->u32User) == 1)
1474 pProv->pHdr->pafProbeEnabled[pProbeDesc->idxEnabled] = 1;
1475 }
1476 }
1477
1478 return 0;
1479}
1480
1481
1482/**
1483 * @callback_method_impl{dtrace_pops_t,dtps_disable}
1484 */
1485static void vboxDtPOps_Disable(void *pvProv, dtrace_id_t idProbe, void *pvProbe)
1486{
1487 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1488 if (!pProv->TracerData.DTrace.fZombie)
1489 {
1490 PVTGPROBELOC pProbeLoc = (PVTGPROBELOC)pvProbe;
1491 PVTGDESCPROBE pProbeDesc = (PVTGDESCPROBE)pProbeLoc->pbProbe;
1492
1493 if (pProbeLoc->fEnabled)
1494 {
1495 pProbeLoc->fEnabled = 0;
1496 if (ASMAtomicDecU32(&pProbeDesc->u32User) == 0)
1497 pProv->pHdr->pafProbeEnabled[pProbeDesc->idxEnabled] = 1;
1498 }
1499 }
1500}
1501
1502
1503/**
1504 * @callback_method_impl{dtrace_pops_t,dtps_getargdesc}
1505 */
1506static void vboxDtPOps_GetArgDesc(void *pvProv, dtrace_id_t idProbe, void *pvProbe,
1507 dtrace_argdesc_t *pArgDesc)
1508{
1509 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1510 unsigned uArg = pArgDesc->dtargd_ndx;
1511
1512 if (!pProv->TracerData.DTrace.fZombie)
1513 {
1514 PVTGPROBELOC pProbeLoc = (PVTGPROBELOC)pvProbe;
1515 PVTGDESCPROBE pProbeDesc = (PVTGDESCPROBE)pProbeLoc->pbProbe;
1516 PVTGDESCARGLIST pArgList = (PVTGDESCARGLIST)((uintptr_t)pProv->pHdr->paArgLists + pProbeDesc->offArgList);
1517
1518 Assert(pProbeDesc->offArgList < pProv->pHdr->cbArgLists);
1519 if (pArgList->cArgs > uArg)
1520 {
1521 const char *pszType = vboxDtVtgGetString(pProv->pHdr, pArgList->aArgs[uArg].offType);
1522 size_t cchType = strlen(pszType);
1523 if (cchType < sizeof(pArgDesc->dtargd_native))
1524 {
1525 memcpy(pArgDesc->dtargd_native, pszType, cchType + 1);
1526 /** @todo mapping */
1527 return;
1528 }
1529 }
1530 }
1531
1532 pArgDesc->dtargd_ndx = DTRACE_ARGNONE;
1533}
1534
1535
1536/**
1537 * @callback_method_impl{dtrace_pops_t,dtps_getargval}
1538 */
1539static uint64_t vboxDtPOps_GetArgVal(void *pvProv, dtrace_id_t idProbe, void *pvProbe,
1540 int iArg, int cFrames)
1541{
1542 PVBDTSTACKDATA pData = vboxDtGetStackData();
1543 AssertReturn(iArg >= 5, UINT64_MAX);
1544
1545 if (pData->enmCaller == kVBoxDtCaller_ProbeFireKernel)
1546 return pData->u.ProbeFireKernel.pauStackArgs[iArg - 5];
1547
1548 if (pData->enmCaller == kVBoxDtCaller_ProbeFireUser)
1549 {
1550 PCSUPDRVTRACERUSRCTX pCtx = pData->u.ProbeFireUser.pCtx;
1551 if (pCtx->cBits == 32)
1552 {
1553 if ((unsigned)iArg < RT_ELEMENTS(pCtx->u.X86.aArgs))
1554 return pCtx->u.X86.aArgs[iArg];
1555 }
1556 else if (pCtx->cBits == 64)
1557 {
1558 if ((unsigned)iArg < RT_ELEMENTS(pCtx->u.Amd64.aArgs))
1559 return pCtx->u.Amd64.aArgs[iArg];
1560 }
1561 else
1562 AssertFailed();
1563 }
1564
1565 return UINT64_MAX;
1566}
1567
1568
1569/**
1570 * @callback_method_impl{dtrace_pops_t,dtps_destroy}
1571 */
1572static void vboxDtPOps_Destroy(void *pvProv, dtrace_id_t idProbe, void *pvProbe)
1573{
1574 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1575 if (!pProv->TracerData.DTrace.fZombie)
1576 {
1577 PVTGPROBELOC pProbeLoc = (PVTGPROBELOC)pvProbe;
1578 Assert(!pProbeLoc->fEnabled);
1579 Assert(pProbeLoc->idProbe == idProbe); NOREF(idProbe);
1580 pProbeLoc->idProbe = UINT32_MAX;
1581 }
1582 pProv->TracerData.DTrace.cProvidedProbes--;
1583}
1584
1585
1586
1587/**
1588 * DTrace provider method table.
1589 */
1590static const dtrace_pops_t g_vboxDtVtgProvOps =
1591{
1592 /* .dtps_provide = */ vboxDtPOps_Provide,
1593 /* .dtps_provide_module = */ NULL,
1594 /* .dtps_enable = */ vboxDtPOps_Enable,
1595 /* .dtps_disable = */ vboxDtPOps_Disable,
1596 /* .dtps_suspend = */ NULL,
1597 /* .dtps_resume = */ NULL,
1598 /* .dtps_getargdesc = */ vboxDtPOps_GetArgDesc,
1599 /* .dtps_getargval = */ vboxDtPOps_GetArgVal,
1600 /* .dtps_usermode = */ NULL,
1601 /* .dtps_destroy = */ vboxDtPOps_Destroy
1602};
1603
1604
1605
1606
1607/*
1608 *
1609 * Support Driver Tracer Interface.
1610 * Support Driver Tracer Interface.
1611 * Support Driver Tracer Interface.
1612 *
1613 */
1614
1615
1616
1617/**
1618 * interface_method_impl{SUPDRVTRACERREG,pfnProbeFireUser}
1619 */
1620static DECLCALLBACK(void) vbdt_ProbeFireKernel(struct VTGPROBELOC *pVtgProbeLoc, uintptr_t uArg0, uintptr_t uArg1, uintptr_t uArg2,
1621 uintptr_t uArg3, uintptr_t uArg4)
1622{
1623 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_ProbeFireKernel);
1624
1625 pStackData->u.ProbeFireKernel.uCaller = (uintptr_t)ASMReturnAddress();
1626 pStackData->u.ProbeFireKernel.pauStackArgs = &uArg4 + 1;
1627 dtrace_probe(pVtgProbeLoc->idProbe, uArg0, uArg1, uArg2, uArg3, uArg4);
1628
1629 VBDT_CLEAR_STACK_DATA();
1630 return ;
1631}
1632
1633
1634/**
1635 * interface_method_impl{SUPDRVTRACERREG,pfnProbeFireUser}
1636 */
1637static DECLCALLBACK(void) vbdt_ProbeFireUser(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, PCSUPDRVTRACERUSRCTX pCtx)
1638{
1639 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_ProbeFireUser);
1640
1641 pStackData->u.ProbeFireUser.pCtx = pCtx;
1642 if (pCtx->cBits == 32)
1643 dtrace_probe(pCtx->idProbe,
1644 pCtx->u.X86.aArgs[0],
1645 pCtx->u.X86.aArgs[1],
1646 pCtx->u.X86.aArgs[2],
1647 pCtx->u.X86.aArgs[3],
1648 pCtx->u.X86.aArgs[4]);
1649 else if (pCtx->cBits == 64)
1650 dtrace_probe(pCtx->idProbe,
1651 pCtx->u.Amd64.aArgs[0],
1652 pCtx->u.Amd64.aArgs[1],
1653 pCtx->u.Amd64.aArgs[2],
1654 pCtx->u.Amd64.aArgs[3],
1655 pCtx->u.Amd64.aArgs[4]);
1656 else
1657 AssertFailed();
1658
1659 VBDT_CLEAR_STACK_DATA();
1660}
1661
1662
1663/**
1664 * interface_method_impl{SUPDRVTRACERREG,pfnTracerOpen}
1665 */
1666static DECLCALLBACK(int) vbdt_TracerOpen(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, uint32_t uCookie, uintptr_t uArg,
1667 uintptr_t *puSessionData)
1668{
1669 if (uCookie != RT_MAKE_U32_FROM_U8('V', 'B', 'D', 'T'))
1670 return VERR_INVALID_MAGIC;
1671 if (uArg)
1672 return VERR_INVALID_PARAMETER;
1673
1674 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1675
1676 int rc = dtrace_open((dtrace_state_t **)puSessionData, VBoxDtGetCurrentCreds());
1677
1678 VBDT_CLEAR_STACK_DATA();
1679 return RTErrConvertFromErrno(rc);
1680}
1681
1682
1683/**
1684 * interface_method_impl{SUPDRVTRACERREG,pfnTracerClose}
1685 */
1686static DECLCALLBACK(int) vbdt_TracerIoCtl(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, uintptr_t uSessionData,
1687 uintptr_t uCmd, uintptr_t uArg, int32_t *piRetVal)
1688{
1689 AssertPtrReturn(uSessionData, VERR_INVALID_POINTER);
1690 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1691
1692 int rc = dtrace_ioctl((dtrace_state_t *)uSessionData, (intptr_t)uCmd, (intptr_t)uArg, piRetVal);
1693
1694 VBDT_CLEAR_STACK_DATA();
1695 return VINF_SUCCESS;
1696}
1697
1698
1699/**
1700 * interface_method_impl{SUPDRVTRACERREG,pfnTracerClose}
1701 */
1702static DECLCALLBACK(void) vbdt_TracerClose(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, uintptr_t uSessionData)
1703{
1704 AssertPtrReturnVoid(uSessionData);
1705 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1706
1707 dtrace_close((dtrace_state_t *)uSessionData);
1708
1709 VBDT_CLEAR_STACK_DATA();
1710}
1711
1712
1713/**
1714 * interface_method_impl{SUPDRVTRACERREG,pfnProviderRegister}
1715 */
1716static DECLCALLBACK(int) vbdt_ProviderRegister(PCSUPDRVTRACERREG pThis, PSUPDRVVDTPROVIDERCORE pCore)
1717{
1718 AssertReturn(pCore->TracerData.DTrace.idProvider == UINT32_MAX || pCore->TracerData.DTrace.idProvider == 0,
1719 VERR_INTERNAL_ERROR_3);
1720 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1721
1722 PVTGDESCPROVIDER pDesc = pCore->pDesc;
1723 dtrace_pattr_t DtAttrs;
1724 vboxDtVtgConvAttr(&DtAttrs.dtpa_provider, &pDesc->AttrSelf);
1725 vboxDtVtgConvAttr(&DtAttrs.dtpa_mod, &pDesc->AttrModules);
1726 vboxDtVtgConvAttr(&DtAttrs.dtpa_func, &pDesc->AttrFunctions);
1727 vboxDtVtgConvAttr(&DtAttrs.dtpa_name, &pDesc->AttrNames);
1728 vboxDtVtgConvAttr(&DtAttrs.dtpa_args, &pDesc->AttrArguments);
1729
1730 dtrace_provider_id_t idProvider;
1731 int rc = dtrace_register(pCore->pszName,
1732 &DtAttrs,
1733 DTRACE_PRIV_KERNEL,
1734 NULL /* cred */,
1735 &g_vboxDtVtgProvOps,
1736 pCore,
1737 &idProvider);
1738 if (!rc)
1739 {
1740 Assert(idProvider != UINT32_MAX && idProvider != 0);
1741 pCore->TracerData.DTrace.idProvider = idProvider;
1742 Assert(pCore->TracerData.DTrace.idProvider == idProvider);
1743 rc = VINF_SUCCESS;
1744 }
1745 else
1746 rc = RTErrConvertFromErrno(rc);
1747
1748 VBDT_CLEAR_STACK_DATA();
1749 return rc;
1750}
1751
1752
1753/**
1754 * interface_method_impl{SUPDRVTRACERREG,pfnProviderDeregister}
1755 */
1756static DECLCALLBACK(int) vbdt_ProviderDeregister(PCSUPDRVTRACERREG pThis, PSUPDRVVDTPROVIDERCORE pCore)
1757{
1758 uint32_t idProvider = pCore->TracerData.DTrace.idProvider;
1759 AssertReturn(idProvider != UINT32_MAX && idProvider != 0, VERR_INTERNAL_ERROR_4);
1760 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1761
1762 dtrace_invalidate(idProvider);
1763 int rc = dtrace_unregister(idProvider);
1764 if (!rc)
1765 {
1766 pCore->TracerData.DTrace.idProvider = UINT32_MAX;
1767 rc = VINF_SUCCESS;
1768 }
1769 else
1770 {
1771 AssertMsg(rc == EBUSY, ("%d\n", rc));
1772 rc = VERR_TRY_AGAIN;
1773 }
1774
1775 VBDT_CLEAR_STACK_DATA();
1776 return rc;
1777}
1778
1779
1780/**
1781 * interface_method_impl{SUPDRVTRACERREG,pfnProviderDeregisterZombie}
1782 */
1783static DECLCALLBACK(int) vbdt_ProviderDeregisterZombie(PCSUPDRVTRACERREG pThis, PSUPDRVVDTPROVIDERCORE pCore)
1784{
1785 uint32_t idProvider = pCore->TracerData.DTrace.idProvider;
1786 AssertReturn(idProvider != UINT32_MAX && idProvider != 0, VERR_INTERNAL_ERROR_4);
1787 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1788
1789 int rc = dtrace_unregister(idProvider);
1790 if (!rc)
1791 {
1792 pCore->TracerData.DTrace.idProvider = UINT32_MAX;
1793 rc = VINF_SUCCESS;
1794 }
1795 else
1796 {
1797 AssertMsg(rc == EBUSY, ("%d\n", rc));
1798 rc = VERR_TRY_AGAIN;
1799 }
1800
1801 VBDT_CLEAR_STACK_DATA();
1802 return rc;
1803}
1804
1805
1806
1807/**
1808 * The tracer registration record of the VBox DTrace implementation
1809 */
1810static SUPDRVTRACERREG g_VBoxDTraceReg =
1811{
1812 SUPDRVTRACERREG_MAGIC,
1813 SUPDRVTRACERREG_VERSION,
1814 vbdt_ProbeFireKernel,
1815 vbdt_ProbeFireUser,
1816 vbdt_TracerOpen,
1817 vbdt_TracerIoCtl,
1818 vbdt_TracerClose,
1819 vbdt_ProviderRegister,
1820 vbdt_ProviderDeregister,
1821 vbdt_ProviderDeregisterZombie,
1822 SUPDRVTRACERREG_MAGIC
1823};
1824
1825
1826
1827/**
1828 * Module termination code.
1829 *
1830 * @param hMod Opque module handle.
1831 */
1832DECLEXPORT(void) ModuleTerm(void *hMod)
1833{
1834SUPR0Printf("ModuleTerm: IF=%RTbool#1\n", ASMIntAreEnabled());
1835 SUPR0TracerDeregisterImpl(hMod, NULL);
1836SUPR0Printf("ModuleTerm: IF=%RTbool#2\n", ASMIntAreEnabled());
1837 dtrace_detach();
1838SUPR0Printf("ModuleTerm: IF=%RTbool#3\n", ASMIntAreEnabled());
1839}
1840
1841
1842/**
1843 * Module initialization code.
1844 *
1845 * @param hMod Opque module handle.
1846 */
1847DECLEXPORT(int) ModuleInit(void *hMod)
1848{
1849SUPR0Printf("ModuleInit: IF=%RTbool#1\n", ASMIntAreEnabled());
1850
1851 int rc = dtrace_attach();
1852 if (rc == DDI_SUCCESS)
1853 {
1854SUPR0Printf("ModuleInit: IF=%RTbool #2\n", ASMIntAreEnabled());
1855 rc = SUPR0TracerRegisterImpl(hMod, NULL, &g_VBoxDTraceReg, &g_pVBoxDTraceHlp);
1856 if (RT_SUCCESS(rc))
1857 {
1858SUPR0Printf("ModuleInit: IF=%RTbool #3\n", ASMIntAreEnabled());
1859 return rc;
1860 }
1861
1862 dtrace_detach();
1863 }
1864 else
1865 {
1866 SUPR0Printf("dtrace_attach -> %d\n", rc);
1867 rc = VERR_INTERNAL_ERROR_5;
1868 }
1869
1870 return rc;
1871}
1872
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette