VirtualBox

source: vbox/trunk/src/VBox/ExtPacks/VBoxDTrace/VBoxDTraceR0.cpp

Last change on this file was 106921, checked in by vboxsync, 2 months ago

VBoxDTrace,VMM,IPRT: Made the ring-0 component of the DTrace extension pack compile on win.arm64. jiraref:VBP-1447

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 65.8 KB
Line 
1/* $Id: VBoxDTraceR0.cpp 106921 2024-11-11 09:51:28Z vboxsync $ */
2/** @file
3 * VBoxDTraceR0.
4 *
5 * Contributed by: bird
6 */
7
8/*
9 * Copyright (C) 2012-2024 Oracle and/or its affiliates.
10 *
11 * This file is part of VirtualBox base platform packages, as
12 * available from http://www.virtualbox.org.
13 *
14 * The contents of this file are subject to the terms of the Common
15 * Development and Distribution License Version 1.0 (CDDL) only, as it
16 * comes in the "COPYING.CDDL" file of the VirtualBox distribution.
17 *
18 * SPDX-License-Identifier: CDDL-1.0
19 */
20
21
22/*********************************************************************************************************************************
23* Header Files *
24*********************************************************************************************************************************/
25#include <VBox/sup.h>
26#include <VBox/log.h>
27
28#if defined(RT_ARCH_AMD64) || defined(RT_ARCH_X86)
29# include <iprt/asm-amd64-x86.h>
30#endif
31#include <iprt/assert.h>
32#include <iprt/ctype.h>
33#include <iprt/err.h>
34#include <iprt/mem.h>
35#include <iprt/mp.h>
36#include <iprt/process.h>
37#include <iprt/semaphore.h>
38#include <iprt/spinlock.h>
39#include <iprt/string.h>
40#include <iprt/thread.h>
41#include <iprt/time.h>
42
43#include <sys/dtrace_impl.h>
44
45#include <VBox/VBoxTpG.h>
46
47
48/*********************************************************************************************************************************
49* Defined Constants And Macros *
50*********************************************************************************************************************************/
51//#if !defined(RT_OS_WINDOWS) && !defined(RT_OS_OS2)
52//# define HAVE_RTMEMALLOCEX_FEATURES
53//#endif
54
55
56/*********************************************************************************************************************************
57* Structures and Typedefs *
58*********************************************************************************************************************************/
59
60/** Caller indicator. */
61typedef enum VBOXDTCALLER
62{
63 kVBoxDtCaller_Invalid = 0,
64 kVBoxDtCaller_Generic,
65 kVBoxDtCaller_ProbeFireUser,
66 kVBoxDtCaller_ProbeFireKernel
67} VBOXDTCALLER;
68
69/**
70 * Stack data used for thread structure and such.
71 *
72 * This is planted in every external entry point and used to emulate solaris
73 * curthread, CRED, curproc and similar. It is also used to get at the
74 * uncached probe arguments.
75 */
76typedef struct VBoxDtStackData
77{
78 /** Eyecatcher no. 1 (VBDT_STACK_DATA_MAGIC2). */
79 uint32_t u32Magic1;
80 /** Eyecatcher no. 2 (VBDT_STACK_DATA_MAGIC2). */
81 uint32_t u32Magic2;
82 /** The format of the caller specific data. */
83 VBOXDTCALLER enmCaller;
84 /** Caller specific data. */
85 union
86 {
87 /** kVBoxDtCaller_ProbeFireKernel. */
88 struct
89 {
90 /** The caller. */
91 uintptr_t uCaller;
92 /** Pointer to the stack arguments of a probe function call. */
93 uintptr_t *pauStackArgs;
94 } ProbeFireKernel;
95 /** kVBoxDtCaller_ProbeFireUser. */
96 struct
97 {
98 /** The user context. */
99 PCSUPDRVTRACERUSRCTX pCtx;
100 /** The argument displacement caused by 64-bit arguments passed directly to
101 * dtrace_probe. */
102 int offArg;
103 } ProbeFireUser;
104 } u;
105 /** Credentials allocated by VBoxDtGetCurrentCreds. */
106 struct VBoxDtCred *pCred;
107 /** Thread structure currently being held by this thread. */
108 struct VBoxDtThread *pThread;
109 /** Pointer to this structure.
110 * This is the final bit of integrity checking. */
111 struct VBoxDtStackData *pSelf;
112} VBDTSTACKDATA;
113/** Pointer to the on-stack thread specific data. */
114typedef VBDTSTACKDATA *PVBDTSTACKDATA;
115
116/** The first magic value. */
117#define VBDT_STACK_DATA_MAGIC1 RT_MAKE_U32_FROM_U8('V', 'B', 'o', 'x')
118/** The second magic value. */
119#define VBDT_STACK_DATA_MAGIC2 RT_MAKE_U32_FROM_U8('D', 'T', 'r', 'c')
120
121/** The alignment of the stack data.
122 * The data doesn't require more than sizeof(uintptr_t) alignment, but the
123 * greater alignment the quicker lookup. */
124#define VBDT_STACK_DATA_ALIGN 32
125
126/** Plants the stack data. */
127#define VBDT_SETUP_STACK_DATA(a_enmCaller) \
128 uint8_t abBlob[sizeof(VBDTSTACKDATA) + VBDT_STACK_DATA_ALIGN - 1]; \
129 PVBDTSTACKDATA pStackData = (PVBDTSTACKDATA)( (uintptr_t)&abBlob[VBDT_STACK_DATA_ALIGN - 1] \
130 & ~(uintptr_t)(VBDT_STACK_DATA_ALIGN - 1)); \
131 pStackData->u32Magic1 = VBDT_STACK_DATA_MAGIC1; \
132 pStackData->u32Magic2 = VBDT_STACK_DATA_MAGIC2; \
133 pStackData->enmCaller = a_enmCaller; \
134 pStackData->pCred = NULL; \
135 pStackData->pThread = NULL; \
136 pStackData->pSelf = pStackData
137
138/** Passifies the stack data and frees up resource held within it. */
139#define VBDT_CLEAR_STACK_DATA() \
140 do \
141 { \
142 pStackData->u32Magic1 = 0; \
143 pStackData->u32Magic2 = 0; \
144 pStackData->pSelf = NULL; \
145 if (pStackData->pCred) \
146 crfree(pStackData->pCred); \
147 if (pStackData->pThread) \
148 VBoxDtReleaseThread(pStackData->pThread); \
149 } while (0)
150
151
152/** Simple SUPR0Printf-style logging. */
153#if 0 /*def DEBUG_bird*/
154# define LOG_DTRACE(a) SUPR0Printf a
155#else
156# define LOG_DTRACE(a) do { } while (0)
157#endif
158
159
160/*********************************************************************************************************************************
161* Global Variables *
162*********************************************************************************************************************************/
163/** Per CPU information */
164cpucore_t g_aVBoxDtCpuCores[RTCPUSET_MAX_CPUS];
165/** Dummy mutex. */
166struct VBoxDtMutex g_DummyMtx;
167/** Pointer to the tracer helpers provided by VBoxDrv. */
168static PCSUPDRVTRACERHLP g_pVBoxDTraceHlp;
169
170dtrace_cacheid_t dtrace_predcache_id = DTRACE_CACHEIDNONE + 1;
171
172#if 0
173void (*dtrace_cpu_init)(processorid_t);
174void (*dtrace_modload)(struct modctl *);
175void (*dtrace_modunload)(struct modctl *);
176void (*dtrace_helpers_cleanup)(void);
177void (*dtrace_helpers_fork)(proc_t *, proc_t *);
178void (*dtrace_cpustart_init)(void);
179void (*dtrace_cpustart_fini)(void);
180void (*dtrace_cpc_fire)(uint64_t);
181void (*dtrace_debugger_init)(void);
182void (*dtrace_debugger_fini)(void);
183#endif
184
185
186/**
187 * Gets the stack data.
188 *
189 * @returns Pointer to the stack data. Never NULL.
190 */
191static PVBDTSTACKDATA vboxDtGetStackData(void)
192{
193 int volatile iDummy = 1; /* use this to get the stack address. */
194 PVBDTSTACKDATA pData = (PVBDTSTACKDATA)( ((uintptr_t)&iDummy + VBDT_STACK_DATA_ALIGN - 1)
195 & ~(uintptr_t)(VBDT_STACK_DATA_ALIGN - 1));
196 for (;;)
197 {
198 if ( pData->u32Magic1 == VBDT_STACK_DATA_MAGIC1
199 && pData->u32Magic2 == VBDT_STACK_DATA_MAGIC2
200 && pData->pSelf == pData)
201 return pData;
202 pData = (PVBDTSTACKDATA)((uintptr_t)pData + VBDT_STACK_DATA_ALIGN);
203 }
204}
205
206
207void dtrace_toxic_ranges(void (*pfnAddOne)(uintptr_t uBase, uintptr_t cbRange))
208{
209 /** @todo ? */
210 RT_NOREF_PV(pfnAddOne);
211}
212
213
214
215/**
216 * Dummy callback used by dtrace_sync.
217 */
218static DECLCALLBACK(void) vboxDtSyncCallback(RTCPUID idCpu, void *pvUser1, void *pvUser2)
219{
220 NOREF(idCpu); NOREF(pvUser1); NOREF(pvUser2);
221}
222
223
224/**
225 * Synchronzie across all CPUs (expensive).
226 */
227void dtrace_sync(void)
228{
229 int rc = RTMpOnAll(vboxDtSyncCallback, NULL, NULL);
230 AssertRC(rc);
231}
232
233
234/**
235 * Fetch a 8-bit "word" from userland.
236 *
237 * @return The byte value.
238 * @param pvUserAddr The userland address.
239 */
240uint8_t dtrace_fuword8( void *pvUserAddr)
241{
242 uint8_t u8;
243 int rc = RTR0MemUserCopyFrom(&u8, (uintptr_t)pvUserAddr, sizeof(u8));
244 if (RT_FAILURE(rc))
245 {
246 RTCPUID iCpu = VBDT_GET_CPUID();
247 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
248 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
249 u8 = 0;
250 }
251 return u8;
252}
253
254
255/**
256 * Fetch a 16-bit word from userland.
257 *
258 * @return The word value.
259 * @param pvUserAddr The userland address.
260 */
261uint16_t dtrace_fuword16(void *pvUserAddr)
262{
263 uint16_t u16;
264 int rc = RTR0MemUserCopyFrom(&u16, (uintptr_t)pvUserAddr, sizeof(u16));
265 if (RT_FAILURE(rc))
266 {
267 RTCPUID iCpu = VBDT_GET_CPUID();
268 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
269 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
270 u16 = 0;
271 }
272 return u16;
273}
274
275
276/**
277 * Fetch a 32-bit word from userland.
278 *
279 * @return The dword value.
280 * @param pvUserAddr The userland address.
281 */
282uint32_t dtrace_fuword32(void *pvUserAddr)
283{
284 uint32_t u32;
285 int rc = RTR0MemUserCopyFrom(&u32, (uintptr_t)pvUserAddr, sizeof(u32));
286 if (RT_FAILURE(rc))
287 {
288 RTCPUID iCpu = VBDT_GET_CPUID();
289 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
290 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
291 u32 = 0;
292 }
293 return u32;
294}
295
296
297/**
298 * Fetch a 64-bit word from userland.
299 *
300 * @return The qword value.
301 * @param pvUserAddr The userland address.
302 */
303uint64_t dtrace_fuword64(void *pvUserAddr)
304{
305 uint64_t u64;
306 int rc = RTR0MemUserCopyFrom(&u64, (uintptr_t)pvUserAddr, sizeof(u64));
307 if (RT_FAILURE(rc))
308 {
309 RTCPUID iCpu = VBDT_GET_CPUID();
310 cpu_core[iCpu].cpuc_dtrace_flags |= CPU_DTRACE_BADADDR;
311 cpu_core[iCpu].cpuc_dtrace_illval = (uintptr_t)pvUserAddr;
312 u64 = 0;
313 }
314 return u64;
315}
316
317
318/** copyin implementation */
319int VBoxDtCopyIn(void const *pvUser, void *pvDst, size_t cb)
320{
321 int rc = RTR0MemUserCopyFrom(pvDst, (uintptr_t)pvUser, cb);
322 return RT_SUCCESS(rc) ? 0 : -1;
323}
324
325
326/** copyout implementation */
327int VBoxDtCopyOut(void const *pvSrc, void *pvUser, size_t cb)
328{
329 int rc = RTR0MemUserCopyTo((uintptr_t)pvUser, pvSrc, cb);
330 return RT_SUCCESS(rc) ? 0 : -1;
331}
332
333
334/**
335 * Copy data from userland into the kernel.
336 *
337 * @param uUserAddr The userland address.
338 * @param uKrnlAddr The kernel buffer address.
339 * @param cb The number of bytes to copy.
340 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
341 */
342void dtrace_copyin( uintptr_t uUserAddr, uintptr_t uKrnlAddr, size_t cb, volatile uint16_t *pfFlags)
343{
344 int rc = RTR0MemUserCopyFrom((void *)uKrnlAddr, uUserAddr, cb);
345 if (RT_FAILURE(rc))
346 {
347 *pfFlags |= CPU_DTRACE_BADADDR;
348 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = uUserAddr;
349 }
350}
351
352
353/**
354 * Copy data from the kernel into userland.
355 *
356 * @param uKrnlAddr The kernel buffer address.
357 * @param uUserAddr The userland address.
358 * @param cb The number of bytes to copy.
359 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
360 */
361void dtrace_copyout( uintptr_t uKrnlAddr, uintptr_t uUserAddr, size_t cb, volatile uint16_t *pfFlags)
362{
363 int rc = RTR0MemUserCopyTo(uUserAddr, (void const *)uKrnlAddr, cb);
364 if (RT_FAILURE(rc))
365 {
366 *pfFlags |= CPU_DTRACE_BADADDR;
367 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = uUserAddr;
368 }
369}
370
371
372/**
373 * Copy a string from userland into the kernel.
374 *
375 * @param uUserAddr The userland address.
376 * @param uKrnlAddr The kernel buffer address.
377 * @param cbMax The maximum number of bytes to copy. May stop
378 * earlier if zero byte is encountered.
379 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
380 */
381void dtrace_copyinstr( uintptr_t uUserAddr, uintptr_t uKrnlAddr, size_t cbMax, volatile uint16_t *pfFlags)
382{
383 if (!cbMax)
384 return;
385
386 char *pszDst = (char *)uKrnlAddr;
387 int rc = RTR0MemUserCopyFrom(pszDst, uUserAddr, cbMax);
388 if (RT_FAILURE(rc))
389 {
390 /* Byte by byte - lazy bird! */
391 size_t off = 0;
392 while (off < cbMax)
393 {
394 rc = RTR0MemUserCopyFrom(&pszDst[off], uUserAddr + off, 1);
395 if (RT_FAILURE(rc))
396 {
397 *pfFlags |= CPU_DTRACE_BADADDR;
398 cpu_core[VBDT_GET_CPUID()].cpuc_dtrace_illval = uUserAddr;
399 pszDst[off] = '\0';
400 return;
401 }
402 if (!pszDst[off])
403 return;
404 off++;
405 }
406 }
407
408 pszDst[cbMax - 1] = '\0';
409}
410
411
412/**
413 * Copy a string from the kernel and into user land.
414 *
415 * @param uKrnlAddr The kernel string address.
416 * @param uUserAddr The userland address.
417 * @param cbMax The maximum number of bytes to copy. Will stop
418 * earlier if zero byte is encountered.
419 * @param pfFlags Pointer to the relevant cpuc_dtrace_flags.
420 */
421void dtrace_copyoutstr(uintptr_t uKrnlAddr, uintptr_t uUserAddr, size_t cbMax, volatile uint16_t *pfFlags)
422{
423 const char *pszSrc = (const char *)uKrnlAddr;
424 size_t cbActual = RTStrNLen(pszSrc, cbMax);
425 cbActual += cbActual < cbMax;
426 dtrace_copyout(uKrnlAddr,uUserAddr, cbActual, pfFlags);
427}
428
429
430/**
431 * Get the caller @a cCallFrames call frames up the stack.
432 *
433 * @returns The caller's return address or ~(uintptr_t)0.
434 * @param cCallFrames The number of frames.
435 */
436uintptr_t dtrace_caller(int cCallFrames)
437{
438 PVBDTSTACKDATA pData = vboxDtGetStackData();
439 if (pData->enmCaller == kVBoxDtCaller_ProbeFireKernel)
440 return pData->u.ProbeFireKernel.uCaller;
441 RT_NOREF_PV(cCallFrames);
442 return ~(uintptr_t)0;
443}
444
445
446/**
447 * Get argument number @a iArg @a cCallFrames call frames up the stack.
448 *
449 * @returns The caller's return address or ~(uintptr_t)0.
450 * @param iArg The argument to get.
451 * @param cCallFrames The number of frames.
452 */
453uint64_t dtrace_getarg(int iArg, int cCallFrames)
454{
455 PVBDTSTACKDATA pData = vboxDtGetStackData();
456 AssertReturn(iArg >= 5, UINT64_MAX);
457
458 if (pData->enmCaller == kVBoxDtCaller_ProbeFireKernel)
459 return pData->u.ProbeFireKernel.pauStackArgs[iArg - 5];
460 RT_NOREF_PV(cCallFrames);
461 return UINT64_MAX;
462}
463
464
465/**
466 * Produce a traceback of the kernel stack.
467 *
468 * @param paPcStack Where to return the program counters.
469 * @param cMaxFrames The maximum number of PCs to return.
470 * @param cSkipFrames The number of artificial callstack frames to
471 * skip at the top.
472 * @param pIntr Not sure what this is...
473 */
474void dtrace_getpcstack(pc_t *paPcStack, int cMaxFrames, int cSkipFrames, uint32_t *pIntr)
475{
476 int iFrame = 0;
477 while (iFrame < cMaxFrames)
478 {
479 paPcStack[iFrame] = NULL;
480 iFrame++;
481 }
482 RT_NOREF_PV(pIntr);
483 RT_NOREF_PV(cSkipFrames);
484}
485
486
487/**
488 * Get the number of call frames on the stack.
489 *
490 * @returns The stack depth.
491 * @param cSkipFrames The number of artificial callstack frames to
492 * skip at the top.
493 */
494int dtrace_getstackdepth(int cSkipFrames)
495{
496 RT_NOREF_PV(cSkipFrames);
497 return 1;
498}
499
500
501/**
502 * Produce a traceback of the userland stack.
503 *
504 * @param paPcStack Where to return the program counters.
505 * @param paFpStack Where to return the frame pointers.
506 * @param cMaxFrames The maximum number of frames to return.
507 */
508void dtrace_getufpstack(uint64_t *paPcStack, uint64_t *paFpStack, int cMaxFrames)
509{
510 int iFrame = 0;
511 while (iFrame < cMaxFrames)
512 {
513 paPcStack[iFrame] = 0;
514 paFpStack[iFrame] = 0;
515 iFrame++;
516 }
517}
518
519
520/**
521 * Produce a traceback of the userland stack.
522 *
523 * @param paPcStack Where to return the program counters.
524 * @param cMaxFrames The maximum number of frames to return.
525 */
526void dtrace_getupcstack(uint64_t *paPcStack, int cMaxFrames)
527{
528 int iFrame = 0;
529 while (iFrame < cMaxFrames)
530 {
531 paPcStack[iFrame] = 0;
532 iFrame++;
533 }
534}
535
536
537/**
538 * Computes the depth of the userland stack.
539 */
540int dtrace_getustackdepth(void)
541{
542 return 0;
543}
544
545
546/**
547 * Get the current IPL/IRQL.
548 *
549 * @returns Current level.
550 */
551int dtrace_getipl(void)
552{
553#ifdef RT_ARCH_AMD64
554 /* CR8 is normally the same as IRQL / IPL on AMD64. */
555 return ASMGetCR8();
556#else
557 /* Just fake it on x86. */
558 return !ASMIntAreEnabled();
559#endif
560}
561
562
563/**
564 * Get current monotonic timestamp.
565 *
566 * @returns Timestamp, nano seconds.
567 */
568hrtime_t dtrace_gethrtime(void)
569{
570 return RTTimeNanoTS();
571}
572
573
574/**
575 * Get current walltime.
576 *
577 * @returns Timestamp, nano seconds.
578 */
579hrtime_t dtrace_gethrestime(void)
580{
581 /** @todo try get better resolution here somehow ... */
582 RTTIMESPEC Now;
583 return RTTimeSpecGetNano(RTTimeNow(&Now));
584}
585
586
587/**
588 * DTrace panic routine.
589 *
590 * @param pszFormat Panic message.
591 * @param va Arguments to the panic message.
592 */
593void dtrace_vpanic(const char *pszFormat, va_list va)
594{
595 RTAssertMsg1(NULL, __LINE__, __FILE__, __FUNCTION__);
596 RTAssertMsg2WeakV(pszFormat, va);
597 RTR0AssertPanicSystem();
598 for (;;)
599 {
600 ASMBreakpoint();
601 volatile char *pchCrash = (volatile char *)~(uintptr_t)0;
602 *pchCrash = '\0';
603 }
604}
605
606
607/**
608 * DTrace panic routine.
609 *
610 * @param pszFormat Panic message.
611 * @param ... Arguments to the panic message.
612 */
613void VBoxDtPanic(const char *pszFormat, ...)
614{
615 va_list va;
616 va_start(va, pszFormat);
617 dtrace_vpanic(pszFormat, va);
618 /*va_end(va); - unreachable */
619}
620
621
622/**
623 * DTrace kernel message routine.
624 *
625 * @param pszFormat Kernel message.
626 * @param ... Arguments to the panic message.
627 */
628void VBoxDtCmnErr(int iLevel, const char *pszFormat, ...)
629{
630 va_list va;
631 va_start(va, pszFormat);
632 SUPR0Printf("%N", pszFormat, va);
633 va_end(va);
634 RT_NOREF_PV(iLevel);
635}
636
637
638/** uprintf implementation */
639void VBoxDtUPrintf(const char *pszFormat, ...)
640{
641 va_list va;
642 va_start(va, pszFormat);
643 VBoxDtUPrintfV(pszFormat, va);
644 va_end(va);
645}
646
647
648/** vuprintf implementation */
649void VBoxDtUPrintfV(const char *pszFormat, va_list va)
650{
651 SUPR0Printf("%N", pszFormat, va);
652}
653
654
655/* CRED implementation. */
656cred_t *VBoxDtGetCurrentCreds(void)
657{
658 PVBDTSTACKDATA pData = vboxDtGetStackData();
659 if (!pData->pCred)
660 {
661 struct VBoxDtCred *pCred;
662#ifdef HAVE_RTMEMALLOCEX_FEATURES
663 int rc = RTMemAllocEx(sizeof(*pCred), 0, RTMEMALLOCEX_FLAGS_ANY_CTX, (void **)&pCred);
664#else
665 int rc = RTMemAllocEx(sizeof(*pCred), 0, 0, (void **)&pCred);
666#endif
667 AssertFatalRC(rc);
668 pCred->cr_refs = 1;
669 /** @todo get the right creds on unix systems. */
670 pCred->cr_uid = 0;
671 pCred->cr_ruid = 0;
672 pCred->cr_suid = 0;
673 pCred->cr_gid = 0;
674 pCred->cr_rgid = 0;
675 pCred->cr_sgid = 0;
676 pCred->cr_zone = 0;
677 pData->pCred = pCred;
678 }
679
680 return pData->pCred;
681}
682
683
684/* crhold implementation */
685void VBoxDtCredHold(struct VBoxDtCred *pCred)
686{
687 int32_t cRefs = ASMAtomicIncS32(&pCred->cr_refs);
688 Assert(cRefs > 1); NOREF(cRefs);
689}
690
691
692/* crfree implementation */
693void VBoxDtCredFree(struct VBoxDtCred *pCred)
694{
695 int32_t cRefs = ASMAtomicDecS32(&pCred->cr_refs);
696 Assert(cRefs >= 0);
697 if (!cRefs)
698 RTMemFreeEx(pCred, sizeof(*pCred));
699}
700
701/** Spinlock protecting the thread structures. */
702static RTSPINLOCK g_hThreadSpinlock = NIL_RTSPINLOCK;
703/** List of threads by usage age. */
704static RTLISTANCHOR g_ThreadAgeList;
705/** Hash table for looking up thread structures. */
706static struct VBoxDtThread *g_apThreadsHash[16384];
707/** Fake kthread_t structures.
708 * The size of this array is making horrible ASSUMPTIONS about the number of
709 * thread in the system that will be subjected to DTracing. */
710static struct VBoxDtThread g_aThreads[8192];
711
712
713static int vboxDtInitThreadDb(void)
714{
715 int rc = RTSpinlockCreate(&g_hThreadSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxDtThreadDb");
716 if (RT_FAILURE(rc))
717 return rc;
718
719 RTListInit(&g_ThreadAgeList);
720 for (uint32_t i = 0; i < RT_ELEMENTS(g_aThreads); i++)
721 {
722 g_aThreads[i].hNative = NIL_RTNATIVETHREAD;
723 g_aThreads[i].uPid = NIL_RTPROCESS;
724 RTListPrepend(&g_ThreadAgeList, &g_aThreads[i].AgeEntry);
725 }
726
727 return VINF_SUCCESS;
728}
729
730
731static void vboxDtTermThreadDb(void)
732{
733 RTSpinlockDestroy(g_hThreadSpinlock);
734 g_hThreadSpinlock = NIL_RTSPINLOCK;
735 RTListInit(&g_ThreadAgeList);
736}
737
738
739/* curthread implementation, providing a fake kthread_t. */
740struct VBoxDtThread *VBoxDtGetCurrentThread(void)
741{
742 /*
743 * Once we've retrieved a thread, we hold on to it until the thread exits
744 * the VBoxDTrace module.
745 */
746 PVBDTSTACKDATA pData = vboxDtGetStackData();
747 if (pData->pThread)
748 {
749 AssertPtr(pData->pThread);
750 Assert(pData->pThread->hNative == RTThreadNativeSelf());
751 Assert(pData->pThread->uPid == RTProcSelf());
752 Assert(RTListIsEmpty(&pData->pThread->AgeEntry));
753 return pData->pThread;
754 }
755
756 /*
757 * Lookup the thread in the hash table.
758 */
759 RTNATIVETHREAD hNativeSelf = RTThreadNativeSelf();
760 RTPROCESS uPid = RTProcSelf();
761 uintptr_t iHash = (hNativeSelf * 2654435761U) % RT_ELEMENTS(g_apThreadsHash);
762
763 RTSpinlockAcquire(g_hThreadSpinlock);
764
765 struct VBoxDtThread *pThread = g_apThreadsHash[iHash];
766 while (pThread)
767 {
768 if (pThread->hNative == hNativeSelf)
769 {
770 if (pThread->uPid != uPid)
771 {
772 /* Re-initialize the reused thread. */
773 pThread->uPid = uPid;
774 pThread->t_dtrace_vtime = 0;
775 pThread->t_dtrace_start = 0;
776 pThread->t_dtrace_stop = 0;
777 pThread->t_dtrace_scrpc = 0;
778 pThread->t_dtrace_astpc = 0;
779 pThread->t_predcache = 0;
780 }
781
782 /* Hold the thread in the on-stack data, making sure it does not
783 get reused till the thread leaves VBoxDTrace. */
784 RTListNodeRemove(&pThread->AgeEntry);
785 pData->pThread = pThread;
786
787 RTSpinlockRelease(g_hThreadSpinlock);
788 return pThread;
789 }
790
791 pThread = pThread->pNext;
792 }
793
794 /*
795 * Unknown thread. Allocate a new entry, recycling unused or old ones.
796 */
797 pThread = RTListGetLast(&g_ThreadAgeList, struct VBoxDtThread, AgeEntry);
798 AssertFatal(pThread);
799 RTListNodeRemove(&pThread->AgeEntry);
800 if (pThread->hNative != NIL_RTNATIVETHREAD)
801 {
802 uintptr_t iHash2 = (pThread->hNative * 2654435761U) % RT_ELEMENTS(g_apThreadsHash);
803 if (g_apThreadsHash[iHash2] == pThread)
804 g_apThreadsHash[iHash2] = pThread->pNext;
805 else
806 {
807 for (struct VBoxDtThread *pPrev = g_apThreadsHash[iHash2]; ; pPrev = pPrev->pNext)
808 {
809 AssertPtr(pPrev);
810 if (pPrev->pNext == pThread)
811 {
812 pPrev->pNext = pThread->pNext;
813 break;
814 }
815 }
816 }
817 }
818
819 /*
820 * Initialize the data.
821 */
822 pThread->t_dtrace_vtime = 0;
823 pThread->t_dtrace_start = 0;
824 pThread->t_dtrace_stop = 0;
825 pThread->t_dtrace_scrpc = 0;
826 pThread->t_dtrace_astpc = 0;
827 pThread->t_predcache = 0;
828 pThread->hNative = hNativeSelf;
829 pThread->uPid = uPid;
830
831 /*
832 * Add it to the hash as well as the on-stack data.
833 */
834 pThread->pNext = g_apThreadsHash[iHash];
835 g_apThreadsHash[iHash] = pThread->pNext;
836
837 pData->pThread = pThread;
838
839 RTSpinlockRelease(g_hThreadSpinlock);
840 return pThread;
841}
842
843
844/**
845 * Called by the stack data destructor.
846 *
847 * @param pThread The thread to release.
848 *
849 */
850static void VBoxDtReleaseThread(struct VBoxDtThread *pThread)
851{
852 RTSpinlockAcquire(g_hThreadSpinlock);
853
854 RTListAppend(&g_ThreadAgeList, &pThread->AgeEntry);
855
856 RTSpinlockRelease(g_hThreadSpinlock);
857}
858
859
860
861
862/*
863 *
864 * Virtual Memory / Resource Allocator.
865 * Virtual Memory / Resource Allocator.
866 * Virtual Memory / Resource Allocator.
867 *
868 */
869
870
871/** The number of bits per chunk.
872 * @remarks The 32 bytes are for heap headers and such like. */
873#define VBOXDTVMEMCHUNK_BITS ( ((_64K - 32 - sizeof(uint32_t) * 2) / sizeof(uint32_t)) * 32)
874
875/**
876 * Resource allocator chunk.
877 */
878typedef struct VBoxDtVMemChunk
879{
880 /** The ordinal (unbased) of the first item. */
881 uint32_t iFirst;
882 /** The current number of free items in this chunk. */
883 uint32_t cCurFree;
884 /** The allocation bitmap. */
885 uint32_t bm[VBOXDTVMEMCHUNK_BITS / 32];
886} VBOXDTVMEMCHUNK;
887/** Pointer to a resource allocator chunk. */
888typedef VBOXDTVMEMCHUNK *PVBOXDTVMEMCHUNK;
889
890
891
892/**
893 * Resource allocator instance.
894 */
895typedef struct VBoxDtVMem
896{
897 /** Spinlock protecting the data (interrupt safe). */
898 RTSPINLOCK hSpinlock;
899 /** Magic value. */
900 uint32_t u32Magic;
901 /** The current number of free items in the chunks. */
902 uint32_t cCurFree;
903 /** The current number of chunks that we have allocated. */
904 uint32_t cCurChunks;
905 /** The configured resource base. */
906 uint32_t uBase;
907 /** The configured max number of items. */
908 uint32_t cMaxItems;
909 /** The size of the apChunks array. */
910 uint32_t cMaxChunks;
911 /** Array of chunk pointers.
912 * (The size is determined at creation.) */
913 PVBOXDTVMEMCHUNK apChunks[1];
914} VBOXDTVMEM;
915/** Pointer to a resource allocator instance. */
916typedef VBOXDTVMEM *PVBOXDTVMEM;
917
918/** Magic value for the VBOXDTVMEM structure. */
919#define VBOXDTVMEM_MAGIC RT_MAKE_U32_FROM_U8('V', 'M', 'e', 'm')
920
921
922/* vmem_create implementation */
923struct VBoxDtVMem *VBoxDtVMemCreate(const char *pszName, void *pvBase, size_t cb, size_t cbUnit,
924 PFNRT pfnAlloc, PFNRT pfnFree, struct VBoxDtVMem *pSrc,
925 size_t cbQCacheMax, uint32_t fFlags)
926{
927 /*
928 * Assert preconditions of this implementation.
929 */
930 AssertMsgReturn((uintptr_t)pvBase <= UINT32_MAX, ("%p\n", pvBase), NULL);
931 AssertMsgReturn(cb <= UINT32_MAX, ("%zu\n", cb), NULL);
932 AssertMsgReturn((uintptr_t)pvBase + cb - 1 <= UINT32_MAX, ("%p %zu\n", pvBase, cb), NULL);
933 AssertMsgReturn(cbUnit == 1, ("%zu\n", cbUnit), NULL);
934 AssertReturn(!pfnAlloc, NULL);
935 AssertReturn(!pfnFree, NULL);
936 AssertReturn(!pSrc, NULL);
937 AssertReturn(!cbQCacheMax, NULL);
938 AssertReturn(fFlags & VM_SLEEP, NULL);
939 AssertReturn(fFlags & VMC_IDENTIFIER, NULL);
940 RT_NOREF_PV(pszName);
941
942 /*
943 * Allocate the instance.
944 */
945 uint32_t cChunks = (uint32_t)cb / VBOXDTVMEMCHUNK_BITS;
946 if (cb % VBOXDTVMEMCHUNK_BITS)
947 cChunks++;
948 PVBOXDTVMEM pThis = (PVBOXDTVMEM)RTMemAllocZ(RT_UOFFSETOF_DYN(VBOXDTVMEM, apChunks[cChunks]));
949 if (!pThis)
950 return NULL;
951 int rc = RTSpinlockCreate(&pThis->hSpinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "VBoxDtVMem");
952 if (RT_FAILURE(rc))
953 {
954 RTMemFree(pThis);
955 return NULL;
956 }
957 pThis->u32Magic = VBOXDTVMEM_MAGIC;
958 pThis->cCurFree = 0;
959 pThis->cCurChunks = 0;
960 pThis->uBase = (uint32_t)(uintptr_t)pvBase;
961 pThis->cMaxItems = (uint32_t)cb;
962 pThis->cMaxChunks = cChunks;
963
964 return pThis;
965}
966
967
968/* vmem_destroy implementation */
969void VBoxDtVMemDestroy(struct VBoxDtVMem *pThis)
970{
971 if (!pThis)
972 return;
973 AssertPtrReturnVoid(pThis);
974 AssertReturnVoid(pThis->u32Magic == VBOXDTVMEM_MAGIC);
975
976 /*
977 * Invalidate the instance.
978 */
979 RTSpinlockAcquire(pThis->hSpinlock); /* paranoia */
980 pThis->u32Magic = 0;
981 RTSpinlockRelease(pThis->hSpinlock);
982 RTSpinlockDestroy(pThis->hSpinlock);
983
984 /*
985 * Free the chunks, then the instance.
986 */
987 uint32_t iChunk = pThis->cCurChunks;
988 while (iChunk-- > 0)
989 {
990 RTMemFree(pThis->apChunks[iChunk]);
991 pThis->apChunks[iChunk] = NULL;
992 }
993 RTMemFree(pThis);
994}
995
996
997/* vmem_alloc implementation */
998void *VBoxDtVMemAlloc(struct VBoxDtVMem *pThis, size_t cbMem, uint32_t fFlags)
999{
1000 /*
1001 * Validate input.
1002 */
1003 AssertReturn(fFlags & VM_BESTFIT, NULL);
1004 AssertReturn(fFlags & VM_SLEEP, NULL);
1005 AssertReturn(cbMem == 1, NULL);
1006 AssertPtrReturn(pThis, NULL);
1007 AssertReturn(pThis->u32Magic == VBOXDTVMEM_MAGIC, NULL);
1008
1009 /*
1010 * Allocation loop.
1011 */
1012 RTSpinlockAcquire(pThis->hSpinlock);
1013 for (;;)
1014 {
1015 PVBOXDTVMEMCHUNK pChunk;
1016 uint32_t const cChunks = pThis->cCurChunks;
1017
1018 if (RT_LIKELY(pThis->cCurFree > 0))
1019 {
1020 for (uint32_t iChunk = 0; iChunk < cChunks; iChunk++)
1021 {
1022 pChunk = pThis->apChunks[iChunk];
1023 if (pChunk->cCurFree > 0)
1024 {
1025 int iBit = ASMBitFirstClear(pChunk->bm, VBOXDTVMEMCHUNK_BITS);
1026 AssertMsgReturnStmt(iBit >= 0 && (unsigned)iBit < VBOXDTVMEMCHUNK_BITS, ("%d\n", iBit),
1027 RTSpinlockRelease(pThis->hSpinlock),
1028 NULL);
1029
1030 ASMBitSet(pChunk->bm, iBit);
1031 pChunk->cCurFree--;
1032 pThis->cCurFree--;
1033
1034 uint32_t iRet = (uint32_t)iBit + pChunk->iFirst + pThis->uBase;
1035 RTSpinlockRelease(pThis->hSpinlock);
1036 return (void *)(uintptr_t)iRet;
1037 }
1038 }
1039 AssertFailedBreak();
1040 }
1041
1042 /* Out of resources? */
1043 if (cChunks >= pThis->cMaxChunks)
1044 break;
1045
1046 /*
1047 * Allocate another chunk.
1048 */
1049 uint32_t const iFirstBit = cChunks > 0 ? pThis->apChunks[cChunks - 1]->iFirst + VBOXDTVMEMCHUNK_BITS : 0;
1050 uint32_t const cFreeBits = cChunks + 1 == pThis->cMaxChunks
1051 ? pThis->cMaxItems - (iFirstBit - pThis->uBase)
1052 : VBOXDTVMEMCHUNK_BITS;
1053 Assert(cFreeBits <= VBOXDTVMEMCHUNK_BITS);
1054
1055 RTSpinlockRelease(pThis->hSpinlock);
1056
1057 pChunk = (PVBOXDTVMEMCHUNK)RTMemAllocZ(sizeof(*pChunk));
1058 if (!pChunk)
1059 return NULL;
1060
1061 pChunk->iFirst = iFirstBit;
1062 pChunk->cCurFree = cFreeBits;
1063 if (cFreeBits != VBOXDTVMEMCHUNK_BITS)
1064 {
1065 /* lazy bird. */
1066 uint32_t iBit = cFreeBits;
1067 while (iBit < VBOXDTVMEMCHUNK_BITS)
1068 {
1069 ASMBitSet(pChunk->bm, iBit);
1070 iBit++;
1071 }
1072 }
1073
1074 RTSpinlockAcquire(pThis->hSpinlock);
1075
1076 /*
1077 * Insert the new chunk. If someone raced us here, we'll drop it to
1078 * avoid wasting resources.
1079 */
1080 if (pThis->cCurChunks == cChunks)
1081 {
1082 pThis->apChunks[cChunks] = pChunk;
1083 pThis->cCurFree += pChunk->cCurFree;
1084 pThis->cCurChunks += 1;
1085 }
1086 else
1087 {
1088 RTSpinlockRelease(pThis->hSpinlock);
1089 RTMemFree(pChunk);
1090 RTSpinlockAcquire(pThis->hSpinlock);
1091 }
1092 }
1093 RTSpinlockRelease(pThis->hSpinlock);
1094
1095 return NULL;
1096}
1097
1098/* vmem_free implementation */
1099void VBoxDtVMemFree(struct VBoxDtVMem *pThis, void *pvMem, size_t cbMem)
1100{
1101 /*
1102 * Validate input.
1103 */
1104 AssertReturnVoid(cbMem == 1);
1105 AssertPtrReturnVoid(pThis);
1106 AssertReturnVoid(pThis->u32Magic == VBOXDTVMEM_MAGIC);
1107
1108 AssertReturnVoid((uintptr_t)pvMem < UINT32_MAX);
1109 uint32_t uMem = (uint32_t)(uintptr_t)pvMem;
1110 AssertReturnVoid(uMem >= pThis->uBase);
1111 uMem -= pThis->uBase;
1112 AssertReturnVoid(uMem < pThis->cMaxItems);
1113
1114
1115 /*
1116 * Free it.
1117 */
1118 RTSpinlockAcquire(pThis->hSpinlock);
1119 uint32_t const iChunk = uMem / VBOXDTVMEMCHUNK_BITS;
1120 if (iChunk < pThis->cCurChunks)
1121 {
1122 PVBOXDTVMEMCHUNK pChunk = pThis->apChunks[iChunk];
1123 uint32_t iBit = uMem - pChunk->iFirst;
1124 AssertReturnVoidStmt(iBit < VBOXDTVMEMCHUNK_BITS, RTSpinlockRelease(pThis->hSpinlock));
1125 AssertReturnVoidStmt(ASMBitTestAndClear(pChunk->bm, iBit), RTSpinlockRelease(pThis->hSpinlock));
1126
1127 pChunk->cCurFree++;
1128 pThis->cCurFree++;
1129 }
1130
1131 RTSpinlockRelease(pThis->hSpinlock);
1132}
1133
1134
1135/*
1136 *
1137 * Memory Allocators.
1138 * Memory Allocators.
1139 * Memory Allocators.
1140 *
1141 */
1142
1143
1144/* kmem_alloc implementation */
1145void *VBoxDtKMemAlloc(size_t cbMem, uint32_t fFlags)
1146{
1147 void *pvMem;
1148#ifdef HAVE_RTMEMALLOCEX_FEATURES
1149 uint32_t fMemAllocFlags = fFlags & KM_NOSLEEP ? RTMEMALLOCEX_FLAGS_ANY_CTX : 0;
1150#else
1151 uint32_t fMemAllocFlags = 0;
1152 RT_NOREF_PV(fFlags);
1153#endif
1154 int rc = RTMemAllocEx(cbMem, 0, fMemAllocFlags, &pvMem);
1155 AssertRCReturn(rc, NULL);
1156 AssertPtr(pvMem);
1157 return pvMem;
1158}
1159
1160
1161/* kmem_zalloc implementation */
1162void *VBoxDtKMemAllocZ(size_t cbMem, uint32_t fFlags)
1163{
1164 void *pvMem;
1165#ifdef HAVE_RTMEMALLOCEX_FEATURES
1166 uint32_t fMemAllocFlags = (fFlags & KM_NOSLEEP ? RTMEMALLOCEX_FLAGS_ANY_CTX : 0) | RTMEMALLOCEX_FLAGS_ZEROED;
1167#else
1168 uint32_t fMemAllocFlags = RTMEMALLOCEX_FLAGS_ZEROED;
1169 RT_NOREF_PV(fFlags);
1170#endif
1171 int rc = RTMemAllocEx(cbMem, 0, fMemAllocFlags, &pvMem);
1172 AssertRCReturn(rc, NULL);
1173 AssertPtr(pvMem);
1174 return pvMem;
1175}
1176
1177
1178/* kmem_free implementation */
1179void VBoxDtKMemFree(void *pvMem, size_t cbMem)
1180{
1181 RTMemFreeEx(pvMem, cbMem);
1182}
1183
1184
1185/**
1186 * Memory cache mockup structure.
1187 * No slab allocator here!
1188 */
1189struct VBoxDtMemCache
1190{
1191 uint32_t u32Magic;
1192 size_t cbBuf;
1193 size_t cbAlign;
1194};
1195
1196
1197/* Limited kmem_cache_create implementation. */
1198struct VBoxDtMemCache *VBoxDtKMemCacheCreate(const char *pszName, size_t cbBuf, size_t cbAlign,
1199 PFNRT pfnCtor, PFNRT pfnDtor, PFNRT pfnReclaim,
1200 void *pvUser, void *pvVM, uint32_t fFlags)
1201{
1202 /*
1203 * Check the input.
1204 */
1205 AssertReturn(cbBuf > 0 && cbBuf < _1G, NULL);
1206 AssertReturn(RT_IS_POWER_OF_TWO(cbAlign), NULL);
1207 AssertReturn(!pfnCtor, NULL);
1208 AssertReturn(!pfnDtor, NULL);
1209 AssertReturn(!pfnReclaim, NULL);
1210 AssertReturn(!pvUser, NULL);
1211 AssertReturn(!pvVM, NULL);
1212 AssertReturn(!fFlags, NULL);
1213 RT_NOREF_PV(pszName);
1214
1215 /*
1216 * Create a parameter container. Don't bother with anything fancy here yet,
1217 * just get something working.
1218 */
1219 struct VBoxDtMemCache *pThis = (struct VBoxDtMemCache *)RTMemAlloc(sizeof(*pThis));
1220 if (!pThis)
1221 return NULL;
1222
1223 pThis->cbAlign = cbAlign;
1224 pThis->cbBuf = cbBuf;
1225 return pThis;
1226}
1227
1228
1229/* Limited kmem_cache_destroy implementation. */
1230void VBoxDtKMemCacheDestroy(struct VBoxDtMemCache *pThis)
1231{
1232 RTMemFree(pThis);
1233}
1234
1235
1236/* kmem_cache_alloc implementation. */
1237void *VBoxDtKMemCacheAlloc(struct VBoxDtMemCache *pThis, uint32_t fFlags)
1238{
1239 void *pvMem;
1240#ifdef HAVE_RTMEMALLOCEX_FEATURES
1241 uint32_t fMemAllocFlags = (fFlags & KM_NOSLEEP ? RTMEMALLOCEX_FLAGS_ANY_CTX : 0) | RTMEMALLOCEX_FLAGS_ZEROED;
1242#else
1243 uint32_t fMemAllocFlags = RTMEMALLOCEX_FLAGS_ZEROED;
1244 RT_NOREF_PV(fFlags);
1245#endif
1246 int rc = RTMemAllocEx(pThis->cbBuf, /*pThis->cbAlign*/0, fMemAllocFlags, &pvMem);
1247 AssertRCReturn(rc, NULL);
1248 AssertPtr(pvMem);
1249 return pvMem;
1250}
1251
1252
1253/* kmem_cache_free implementation. */
1254void VBoxDtKMemCacheFree(struct VBoxDtMemCache *pThis, void *pvMem)
1255{
1256 RTMemFreeEx(pvMem, pThis->cbBuf);
1257}
1258
1259
1260/*
1261 *
1262 * Mutex Semaphore Wrappers.
1263 *
1264 */
1265
1266
1267/** Initializes a mutex. */
1268int VBoxDtMutexInit(struct VBoxDtMutex *pMtx)
1269{
1270 AssertReturn(pMtx != &g_DummyMtx, -1);
1271 AssertPtr(pMtx);
1272
1273 pMtx->hOwner = NIL_RTNATIVETHREAD;
1274 pMtx->hMtx = NIL_RTSEMMUTEX;
1275 int rc = RTSemMutexCreate(&pMtx->hMtx);
1276 if (RT_SUCCESS(rc))
1277 return 0;
1278 return -1;
1279}
1280
1281
1282/** Deletes a mutex. */
1283void VBoxDtMutexDelete(struct VBoxDtMutex *pMtx)
1284{
1285 AssertReturnVoid(pMtx != &g_DummyMtx);
1286 AssertPtr(pMtx);
1287 if (pMtx->hMtx == NIL_RTSEMMUTEX)
1288 return;
1289
1290 Assert(pMtx->hOwner == NIL_RTNATIVETHREAD);
1291 int rc = RTSemMutexDestroy(pMtx->hMtx); AssertRC(rc);
1292 pMtx->hMtx = NIL_RTSEMMUTEX;
1293}
1294
1295
1296/* mutex_enter implementation */
1297void VBoxDtMutexEnter(struct VBoxDtMutex *pMtx)
1298{
1299 AssertPtr(pMtx);
1300 if (pMtx == &g_DummyMtx)
1301 return;
1302
1303 RTNATIVETHREAD hSelf = RTThreadNativeSelf();
1304
1305 int rc = RTSemMutexRequest(pMtx->hMtx, RT_INDEFINITE_WAIT);
1306 AssertFatalRC(rc);
1307
1308 Assert(pMtx->hOwner == NIL_RTNATIVETHREAD);
1309 pMtx->hOwner = hSelf;
1310}
1311
1312
1313/* mutex_exit implementation */
1314void VBoxDtMutexExit(struct VBoxDtMutex *pMtx)
1315{
1316 AssertPtr(pMtx);
1317 if (pMtx == &g_DummyMtx)
1318 return;
1319
1320 Assert(pMtx->hOwner == RTThreadNativeSelf());
1321
1322 pMtx->hOwner = NIL_RTNATIVETHREAD;
1323 int rc = RTSemMutexRelease(pMtx->hMtx);
1324 AssertFatalRC(rc);
1325}
1326
1327
1328/* MUTEX_HELD implementation */
1329bool VBoxDtMutexIsOwner(struct VBoxDtMutex *pMtx)
1330{
1331 AssertPtrReturn(pMtx, false);
1332 if (pMtx == &g_DummyMtx)
1333 return true;
1334 return pMtx->hOwner == RTThreadNativeSelf();
1335}
1336
1337
1338
1339/*
1340 *
1341 * Helpers for handling VTG structures.
1342 * Helpers for handling VTG structures.
1343 * Helpers for handling VTG structures.
1344 *
1345 */
1346
1347
1348
1349/**
1350 * Converts an attribute from VTG description speak to DTrace.
1351 *
1352 * @param pDtAttr The DTrace attribute (dst).
1353 * @param pVtgAttr The VTG attribute descriptor (src).
1354 */
1355static void vboxDtVtgConvAttr(dtrace_attribute_t *pDtAttr, PCVTGDESCATTR pVtgAttr)
1356{
1357 pDtAttr->dtat_name = pVtgAttr->u8Code - 1;
1358 pDtAttr->dtat_data = pVtgAttr->u8Data - 1;
1359 pDtAttr->dtat_class = pVtgAttr->u8DataDep - 1;
1360}
1361
1362/**
1363 * Gets a string from the string table.
1364 *
1365 * @returns Pointer to the string.
1366 * @param pVtgHdr The VTG object header.
1367 * @param offStrTab The string table offset.
1368 */
1369static const char *vboxDtVtgGetString(PVTGOBJHDR pVtgHdr, uint32_t offStrTab)
1370{
1371 Assert(offStrTab < pVtgHdr->cbStrTab);
1372 return (const char *)pVtgHdr + pVtgHdr->offStrTab + offStrTab;
1373}
1374
1375
1376
1377/*
1378 *
1379 * DTrace Provider Interface.
1380 * DTrace Provider Interface.
1381 * DTrace Provider Interface.
1382 *
1383 */
1384
1385
1386/**
1387 * @callback_method_impl{dtrace_pops_t,dtps_provide}
1388 */
1389static void vboxDtPOps_Provide(void *pvProv, const dtrace_probedesc_t *pDtProbeDesc)
1390{
1391 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1392 AssertPtrReturnVoid(pProv);
1393 LOG_DTRACE(("%s: %p / %p pDtProbeDesc=%p\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, pDtProbeDesc));
1394
1395 if (pDtProbeDesc)
1396 return; /* We don't generate probes, so never mind these requests. */
1397
1398 if (pProv->TracerData.DTrace.fZombie)
1399 return;
1400
1401 dtrace_provider_id_t const idProvider = pProv->TracerData.DTrace.idProvider;
1402 AssertPtrReturnVoid(idProvider);
1403
1404 AssertPtrReturnVoid(pProv->pHdr);
1405 AssertReturnVoid(pProv->pHdr->offProbeLocs != 0);
1406 uint32_t const cProbeLocs = pProv->pHdr->cbProbeLocs / sizeof(VTGPROBELOC);
1407
1408 /* Need a buffer for extracting the function names and mangling them in
1409 case of collision. */
1410 size_t const cbFnNmBuf = _4K + _1K;
1411 char *pszFnNmBuf = (char *)RTMemAlloc(cbFnNmBuf);
1412 if (!pszFnNmBuf)
1413 return;
1414
1415 /*
1416 * Itereate the probe location list and register all probes related to
1417 * this provider.
1418 */
1419 uint16_t const idxProv = (uint16_t)((PVTGDESCPROVIDER)((uintptr_t)pProv->pHdr + pProv->pHdr->offProviders) - pProv->pDesc);
1420 for (uint32_t idxProbeLoc = 0; idxProbeLoc < cProbeLocs; idxProbeLoc++)
1421 {
1422 /* Skip probe location belonging to other providers or once that
1423 we've already reported. */
1424 PCVTGPROBELOC pProbeLocRO = &pProv->paProbeLocsRO[idxProbeLoc];
1425 PVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe;
1426 if (pProbeDesc->idxProvider != idxProv)
1427 continue;
1428
1429 uint32_t *pidProbe;
1430 if (!pProv->fUmod)
1431 pidProbe = (uint32_t *)&pProbeLocRO->idProbe;
1432 else
1433 pidProbe = &pProv->paR0ProbeLocs[idxProbeLoc].idProbe;
1434 if (*pidProbe != 0)
1435 continue;
1436
1437 /* The function name may need to be stripped since we're using C++
1438 compilers for most of the code. ASSUMES nobody are brave/stupid
1439 enough to use function pointer returns without typedef'ing
1440 properly them (e.g. signal). */
1441 const char *pszPrbName = vboxDtVtgGetString(pProv->pHdr, pProbeDesc->offName);
1442 const char *pszFunc = pProbeLocRO->pszFunction;
1443 const char *psz = strchr(pProbeLocRO->pszFunction, '(');
1444 size_t cch;
1445 if (psz)
1446 {
1447 /* skip blanks preceeding the parameter parenthesis. */
1448 while ( (uintptr_t)psz > (uintptr_t)pProbeLocRO->pszFunction
1449 && RT_C_IS_BLANK(psz[-1]))
1450 psz--;
1451
1452 /* Find the start of the function name. */
1453 pszFunc = psz - 1;
1454 while ((uintptr_t)pszFunc > (uintptr_t)pProbeLocRO->pszFunction)
1455 {
1456 char ch = pszFunc[-1];
1457 if (!RT_C_IS_ALNUM(ch) && ch != '_' && ch != ':')
1458 break;
1459 pszFunc--;
1460 }
1461 cch = psz - pszFunc;
1462 }
1463 else
1464 cch = strlen(pszFunc);
1465 RTStrCopyEx(pszFnNmBuf, cbFnNmBuf, pszFunc, cch);
1466
1467 /* Look up the probe, if we have one in the same function, mangle
1468 the function name a little to avoid having to deal with having
1469 multiple location entries with the same probe ID. (lazy bird) */
1470 Assert(!*pidProbe);
1471 if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) != DTRACE_IDNONE)
1472 {
1473 RTStrPrintf(pszFnNmBuf+cch, cbFnNmBuf - cch, "-%u", pProbeLocRO->uLine);
1474 if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) != DTRACE_IDNONE)
1475 {
1476 unsigned iOrd = 2;
1477 while (iOrd < 128)
1478 {
1479 RTStrPrintf(pszFnNmBuf+cch, cbFnNmBuf - cch, "-%u-%u", pProbeLocRO->uLine, iOrd);
1480 if (dtrace_probe_lookup(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName) == DTRACE_IDNONE)
1481 break;
1482 iOrd++;
1483 }
1484 if (iOrd >= 128)
1485 {
1486 LogRel(("VBoxDrv: More than 128 duplicate probe location instances at line %u in function %s [%s], probe %s\n",
1487 pProbeLocRO->uLine, pProbeLocRO->pszFunction, pszFnNmBuf, pszPrbName));
1488 continue;
1489 }
1490 }
1491 }
1492
1493 /* Create the probe. */
1494 AssertCompile(sizeof(*pidProbe) == sizeof(dtrace_id_t));
1495 *pidProbe = dtrace_probe_create(idProvider, pProv->pszModName, pszFnNmBuf, pszPrbName,
1496 1 /*aframes*/, (void *)(uintptr_t)idxProbeLoc);
1497 pProv->TracerData.DTrace.cProvidedProbes++;
1498 }
1499
1500 RTMemFree(pszFnNmBuf);
1501 LOG_DTRACE(("%s: returns\n", __FUNCTION__));
1502}
1503
1504
1505/**
1506 * @callback_method_impl{dtrace_pops_t,dtps_enable}
1507 */
1508static int vboxDtPOps_Enable(void *pvProv, dtrace_id_t idProbe, void *pvProbe)
1509{
1510 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1511 LOG_DTRACE(("%s: %p / %p - %#x / %p\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, idProbe, pvProbe));
1512 AssertPtrReturn(pProv->TracerData.DTrace.idProvider, EINVAL);
1513 RT_NOREF_PV(idProbe);
1514
1515 if (!pProv->TracerData.DTrace.fZombie)
1516 {
1517 uint32_t idxProbeLoc = (uint32_t)(uintptr_t)pvProbe;
1518 PVTGPROBELOC32 pProbeLocEn = (PVTGPROBELOC32)( (uintptr_t)pProv->pvProbeLocsEn + idxProbeLoc * pProv->cbProbeLocsEn);
1519 PCVTGPROBELOC pProbeLocRO = (PVTGPROBELOC)&pProv->paProbeLocsRO[idxProbeLoc];
1520 PCVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe;
1521 uint32_t const idxProbe = pProbeDesc->idxEnabled;
1522
1523 if (!pProv->fUmod)
1524 {
1525 if (!pProbeLocEn->fEnabled)
1526 {
1527 pProbeLocEn->fEnabled = 1;
1528 ASMAtomicIncU32(&pProv->pacProbeEnabled[idxProbe]);
1529 ASMAtomicIncU32(&pProv->pDesc->cProbesEnabled);
1530 ASMAtomicIncU32(&pProv->pDesc->uSettingsSerialNo);
1531 }
1532 }
1533 else
1534 {
1535 /* Update kernel mode structure */
1536 if (!pProv->paR0ProbeLocs[idxProbeLoc].fEnabled)
1537 {
1538 pProv->paR0ProbeLocs[idxProbeLoc].fEnabled = 1;
1539 ASMAtomicIncU32(&pProv->paR0Probes[idxProbe].cEnabled);
1540 ASMAtomicIncU32(&pProv->pDesc->cProbesEnabled);
1541 ASMAtomicIncU32(&pProv->pDesc->uSettingsSerialNo);
1542 }
1543
1544 /* Update user mode structure. */
1545 pProbeLocEn->fEnabled = 1;
1546 pProv->pacProbeEnabled[idxProbe] = pProv->paR0Probes[idxProbe].cEnabled;
1547 }
1548 }
1549
1550 return 0;
1551}
1552
1553
1554/**
1555 * @callback_method_impl{dtrace_pops_t,dtps_disable}
1556 */
1557static void vboxDtPOps_Disable(void *pvProv, dtrace_id_t idProbe, void *pvProbe)
1558{
1559 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1560 AssertPtrReturnVoid(pProv);
1561 LOG_DTRACE(("%s: %p / %p - %#x / %p\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, idProbe, pvProbe));
1562 AssertPtrReturnVoid(pProv->TracerData.DTrace.idProvider);
1563 RT_NOREF_PV(idProbe);
1564
1565 if (!pProv->TracerData.DTrace.fZombie)
1566 {
1567 uint32_t idxProbeLoc = (uint32_t)(uintptr_t)pvProbe;
1568 PVTGPROBELOC32 pProbeLocEn = (PVTGPROBELOC32)( (uintptr_t)pProv->pvProbeLocsEn + idxProbeLoc * pProv->cbProbeLocsEn);
1569 PCVTGPROBELOC pProbeLocRO = (PVTGPROBELOC)&pProv->paProbeLocsRO[idxProbeLoc];
1570 PCVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe;
1571 uint32_t const idxProbe = pProbeDesc->idxEnabled;
1572
1573 if (!pProv->fUmod)
1574 {
1575 if (pProbeLocEn->fEnabled)
1576 {
1577 pProbeLocEn->fEnabled = 0;
1578 ASMAtomicDecU32(&pProv->pacProbeEnabled[idxProbe]);
1579 ASMAtomicDecU32(&pProv->pDesc->cProbesEnabled);
1580 ASMAtomicIncU32(&pProv->pDesc->uSettingsSerialNo);
1581 }
1582 }
1583 else
1584 {
1585 /* Update kernel mode structure */
1586 if (pProv->paR0ProbeLocs[idxProbeLoc].fEnabled)
1587 {
1588 pProv->paR0ProbeLocs[idxProbeLoc].fEnabled = 0;
1589 ASMAtomicDecU32(&pProv->paR0Probes[idxProbe].cEnabled);
1590 ASMAtomicDecU32(&pProv->pDesc->cProbesEnabled);
1591 ASMAtomicIncU32(&pProv->pDesc->uSettingsSerialNo);
1592 }
1593
1594 /* Update user mode structure. */
1595 pProbeLocEn->fEnabled = 0;
1596 pProv->pacProbeEnabled[idxProbe] = pProv->paR0Probes[idxProbe].cEnabled;
1597 }
1598 }
1599}
1600
1601
1602/**
1603 * @callback_method_impl{dtrace_pops_t,dtps_getargdesc}
1604 */
1605static void vboxDtPOps_GetArgDesc(void *pvProv, dtrace_id_t idProbe, void *pvProbe,
1606 dtrace_argdesc_t *pArgDesc)
1607{
1608 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1609 unsigned uArg = pArgDesc->dtargd_ndx;
1610 RT_NOREF_PV(idProbe);
1611
1612 pArgDesc->dtargd_ndx = DTRACE_ARGNONE;
1613 AssertPtrReturnVoid(pProv);
1614 LOG_DTRACE(("%s: %p / %p - %#x / %p uArg=%d\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, idProbe, pvProbe, uArg));
1615 AssertPtrReturnVoid(pProv->TracerData.DTrace.idProvider);
1616
1617 if (!pProv->TracerData.DTrace.fZombie)
1618 {
1619 uint32_t idxProbeLoc = (uint32_t)(uintptr_t)pvProbe;
1620 PCVTGPROBELOC pProbeLocRO = (PVTGPROBELOC)&pProv->paProbeLocsRO[idxProbeLoc];
1621 PCVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe;
1622 PCVTGDESCARGLIST pArgList = (PCVTGDESCARGLIST)( (uintptr_t)pProv->pHdr
1623 + pProv->pHdr->offArgLists
1624 + pProbeDesc->offArgList);
1625 AssertReturnVoid(pProbeDesc->offArgList < pProv->pHdr->cbArgLists);
1626
1627 if (uArg < pArgList->cArgs)
1628 {
1629 const char *pszType = vboxDtVtgGetString(pProv->pHdr, pArgList->aArgs[uArg].offType);
1630 size_t cchType = strlen(pszType);
1631 if (cchType < sizeof(pArgDesc->dtargd_native))
1632 {
1633 memcpy(pArgDesc->dtargd_native, pszType, cchType + 1);
1634 /** @todo mapping? */
1635 pArgDesc->dtargd_ndx = uArg;
1636 LOG_DTRACE(("%s: returns dtargd_native = %s\n", __FUNCTION__, pArgDesc->dtargd_native));
1637 return;
1638 }
1639 }
1640 }
1641}
1642
1643
1644/**
1645 * @callback_method_impl{dtrace_pops_t,dtps_getargval}
1646 */
1647static uint64_t vboxDtPOps_GetArgVal(void *pvProv, dtrace_id_t idProbe, void *pvProbe,
1648 int iArg, int cFrames)
1649{
1650 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1651 AssertPtrReturn(pProv, UINT64_MAX);
1652 LOG_DTRACE(("%s: %p / %p - %#x / %p iArg=%d cFrames=%u\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, idProbe, pvProbe, iArg, cFrames));
1653 AssertReturn(iArg >= 5, UINT64_MAX);
1654 RT_NOREF_PV(idProbe); RT_NOREF_PV(cFrames);
1655
1656 if (pProv->TracerData.DTrace.fZombie)
1657 return UINT64_MAX;
1658
1659 uint32_t idxProbeLoc = (uint32_t)(uintptr_t)pvProbe;
1660 PCVTGPROBELOC pProbeLocRO = (PVTGPROBELOC)&pProv->paProbeLocsRO[idxProbeLoc];
1661 PCVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe;
1662 PCVTGDESCARGLIST pArgList = (PCVTGDESCARGLIST)( (uintptr_t)pProv->pHdr
1663 + pProv->pHdr->offArgLists
1664 + pProbeDesc->offArgList);
1665 AssertReturn(pProbeDesc->offArgList < pProv->pHdr->cbArgLists, UINT64_MAX);
1666
1667 PVBDTSTACKDATA pData = vboxDtGetStackData();
1668
1669 /*
1670 * Get the stack data. This is a wee bit complicated on 32-bit systems
1671 * since we want to support 64-bit integer arguments.
1672 */
1673 uint64_t u64Ret;
1674 if (iArg >= 20)
1675 u64Ret = UINT64_MAX;
1676 else if (pData->enmCaller == kVBoxDtCaller_ProbeFireKernel)
1677 {
1678#if ARCH_BITS == 64
1679 u64Ret = pData->u.ProbeFireKernel.pauStackArgs[iArg - 5];
1680#else
1681 if ( !pArgList->fHaveLargeArgs
1682 || iArg >= pArgList->cArgs)
1683 u64Ret = pData->u.ProbeFireKernel.pauStackArgs[iArg - 5];
1684 else
1685 {
1686 /* Similar to what we did for mac in when calling dtrace_probe(). */
1687 uint32_t offArg = 0;
1688 for (int i = 5; i < iArg; i++)
1689 if (VTG_TYPE_IS_LARGE(pArgList->aArgs[iArg].fType))
1690 offArg++;
1691 u64Ret = pData->u.ProbeFireKernel.pauStackArgs[iArg - 5 + offArg];
1692 if (VTG_TYPE_IS_LARGE(pArgList->aArgs[iArg].fType))
1693 u64Ret |= (uint64_t)pData->u.ProbeFireKernel.pauStackArgs[iArg - 5 + offArg + 1] << 32;
1694 }
1695#endif
1696 }
1697 else if (pData->enmCaller == kVBoxDtCaller_ProbeFireUser)
1698 {
1699 int offArg = pData->u.ProbeFireUser.offArg;
1700 PCSUPDRVTRACERUSRCTX pCtx = pData->u.ProbeFireUser.pCtx;
1701 AssertPtrReturn(pCtx, UINT64_MAX);
1702
1703 if (pCtx->cBits == 32)
1704 {
1705 if ( !pArgList->fHaveLargeArgs
1706 || iArg >= pArgList->cArgs)
1707 {
1708 if (iArg + offArg < (int)RT_ELEMENTS(pCtx->u.X86.aArgs))
1709 u64Ret = pCtx->u.X86.aArgs[iArg + offArg];
1710 else
1711 u64Ret = UINT64_MAX;
1712 }
1713 else
1714 {
1715 for (int i = 5; i < iArg; i++)
1716 if (VTG_TYPE_IS_LARGE(pArgList->aArgs[iArg].fType))
1717 offArg++;
1718 if (offArg + iArg < (int)RT_ELEMENTS(pCtx->u.X86.aArgs))
1719 {
1720 u64Ret = pCtx->u.X86.aArgs[iArg + offArg];
1721 if ( VTG_TYPE_IS_LARGE(pArgList->aArgs[iArg].fType)
1722 && offArg + iArg + 1 < (int)RT_ELEMENTS(pCtx->u.X86.aArgs))
1723 u64Ret |= (uint64_t)pCtx->u.X86.aArgs[iArg + offArg + 1] << 32;
1724 }
1725 else
1726 u64Ret = UINT64_MAX;
1727 }
1728 }
1729 else
1730 {
1731 if (iArg + offArg < (int)RT_ELEMENTS(pCtx->u.Amd64.aArgs))
1732 u64Ret = pCtx->u.Amd64.aArgs[iArg + offArg];
1733 else
1734 u64Ret = UINT64_MAX;
1735 }
1736 }
1737 else
1738 AssertFailedReturn(UINT64_MAX);
1739
1740 LOG_DTRACE(("%s: returns %#llx\n", __FUNCTION__, u64Ret));
1741 return u64Ret;
1742}
1743
1744
1745/**
1746 * @callback_method_impl{dtrace_pops_t,dtps_destroy}
1747 */
1748static void vboxDtPOps_Destroy(void *pvProv, dtrace_id_t idProbe, void *pvProbe)
1749{
1750 PSUPDRVVDTPROVIDERCORE pProv = (PSUPDRVVDTPROVIDERCORE)pvProv;
1751 AssertPtrReturnVoid(pProv);
1752 LOG_DTRACE(("%s: %p / %p - %#x / %p\n", __FUNCTION__, pProv, pProv->TracerData.DTrace.idProvider, idProbe, pvProbe));
1753 AssertReturnVoid(pProv->TracerData.DTrace.cProvidedProbes > 0);
1754 AssertPtrReturnVoid(pProv->TracerData.DTrace.idProvider);
1755
1756 if (!pProv->TracerData.DTrace.fZombie)
1757 {
1758 uint32_t idxProbeLoc = (uint32_t)(uintptr_t)pvProbe;
1759 PCVTGPROBELOC pProbeLocRO = (PVTGPROBELOC)&pProv->paProbeLocsRO[idxProbeLoc];
1760 uint32_t *pidProbe;
1761 if (!pProv->fUmod)
1762 {
1763 pidProbe = (uint32_t *)&pProbeLocRO->idProbe;
1764 Assert(!pProbeLocRO->fEnabled);
1765 Assert(*pidProbe == idProbe);
1766 }
1767 else
1768 {
1769 pidProbe = &pProv->paR0ProbeLocs[idxProbeLoc].idProbe;
1770 Assert(!pProv->paR0ProbeLocs[idxProbeLoc].fEnabled);
1771 Assert(*pidProbe == idProbe); NOREF(idProbe);
1772 }
1773 *pidProbe = 0;
1774 }
1775 pProv->TracerData.DTrace.cProvidedProbes--;
1776}
1777
1778
1779
1780/**
1781 * DTrace provider method table.
1782 */
1783static const dtrace_pops_t g_vboxDtVtgProvOps =
1784{
1785 /* .dtps_provide = */ vboxDtPOps_Provide,
1786 /* .dtps_provide_module = */ NULL,
1787 /* .dtps_enable = */ vboxDtPOps_Enable,
1788 /* .dtps_disable = */ vboxDtPOps_Disable,
1789 /* .dtps_suspend = */ NULL,
1790 /* .dtps_resume = */ NULL,
1791 /* .dtps_getargdesc = */ vboxDtPOps_GetArgDesc,
1792 /* .dtps_getargval = */ vboxDtPOps_GetArgVal,
1793 /* .dtps_usermode = */ NULL,
1794 /* .dtps_destroy = */ vboxDtPOps_Destroy
1795};
1796
1797
1798
1799
1800/*
1801 *
1802 * Support Driver Tracer Interface.
1803 * Support Driver Tracer Interface.
1804 * Support Driver Tracer Interface.
1805 *
1806 */
1807
1808
1809
1810/**
1811 * interface_method_impl{SUPDRVTRACERREG,pfnProbeFireKernel}
1812 */
1813static DECLCALLBACK(void) vboxDtTOps_ProbeFireKernel(struct VTGPROBELOC *pVtgProbeLoc, uintptr_t uArg0, uintptr_t uArg1, uintptr_t uArg2,
1814 uintptr_t uArg3, uintptr_t uArg4)
1815{
1816 AssertPtrReturnVoid(pVtgProbeLoc);
1817 LOG_DTRACE(("%s: %p / %p\n", __FUNCTION__, pVtgProbeLoc, pVtgProbeLoc->idProbe));
1818 AssertPtrReturnVoid(pVtgProbeLoc->pProbe);
1819 AssertPtrReturnVoid(pVtgProbeLoc->pszFunction);
1820
1821 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_ProbeFireKernel);
1822
1823 pStackData->u.ProbeFireKernel.pauStackArgs = &uArg4 + 1;
1824
1825#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
1826 /*
1827 * Convert arguments from uintptr_t to uint64_t.
1828 */
1829 PVTGDESCPROBE pProbe = pVtgProbeLoc->pProbe;
1830 AssertPtrReturnVoid(pProbe);
1831 PVTGOBJHDR pVtgHdr = (PVTGOBJHDR)((uintptr_t)pProbe + pProbe->offObjHdr);
1832 AssertPtrReturnVoid(pVtgHdr);
1833 PVTGDESCARGLIST pArgList = (PVTGDESCARGLIST)((uintptr_t)pVtgHdr + pVtgHdr->offArgLists + pProbe->offArgList);
1834 AssertPtrReturnVoid(pArgList);
1835 if (!pArgList->fHaveLargeArgs)
1836 dtrace_probe(pVtgProbeLoc->idProbe, uArg0, uArg1, uArg2, uArg3, uArg4);
1837 else
1838 {
1839 uintptr_t *auSrcArgs = &uArg0;
1840 uint32_t iSrcArg = 0;
1841 uint32_t iDstArg = 0;
1842 uint64_t au64DstArgs[5];
1843
1844 while ( iDstArg < RT_ELEMENTS(au64DstArgs)
1845 && iSrcArg < pArgList->cArgs)
1846 {
1847 au64DstArgs[iDstArg] = auSrcArgs[iSrcArg];
1848 if (VTG_TYPE_IS_LARGE(pArgList->aArgs[iDstArg].fType))
1849 au64DstArgs[iDstArg] |= (uint64_t)auSrcArgs[++iSrcArg] << 32;
1850 iSrcArg++;
1851 iDstArg++;
1852 }
1853 while (iDstArg < RT_ELEMENTS(au64DstArgs))
1854 au64DstArgs[iDstArg++] = auSrcArgs[iSrcArg++];
1855
1856 pStackData->u.ProbeFireKernel.pauStackArgs = &auSrcArgs[iSrcArg];
1857 dtrace_probe(pVtgProbeLoc->idProbe, au64DstArgs[0], au64DstArgs[1], au64DstArgs[2], au64DstArgs[3], au64DstArgs[4]);
1858 }
1859#else
1860 dtrace_probe(pVtgProbeLoc->idProbe, uArg0, uArg1, uArg2, uArg3, uArg4);
1861#endif
1862
1863 VBDT_CLEAR_STACK_DATA();
1864 LOG_DTRACE(("%s: returns\n", __FUNCTION__));
1865}
1866
1867
1868/**
1869 * interface_method_impl{SUPDRVTRACERREG,pfnProbeFireUser}
1870 */
1871static DECLCALLBACK(void) vboxDtTOps_ProbeFireUser(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, PCSUPDRVTRACERUSRCTX pCtx,
1872 PCVTGOBJHDR pVtgHdr, PCVTGPROBELOC pProbeLocRO)
1873{
1874 LOG_DTRACE(("%s: %p / %p\n", __FUNCTION__, pCtx, pCtx->idProbe));
1875 AssertPtrReturnVoid(pProbeLocRO);
1876 AssertPtrReturnVoid(pVtgHdr);
1877 RT_NOREF_PV(pThis);
1878 RT_NOREF_PV(pSession);
1879 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_ProbeFireUser);
1880
1881 if (pCtx->cBits == 32)
1882 {
1883 pStackData->u.ProbeFireUser.pCtx = pCtx;
1884 pStackData->u.ProbeFireUser.offArg = 0;
1885
1886#if ARCH_BITS == 64 || defined(RT_OS_DARWIN)
1887 /*
1888 * Combine two 32-bit arguments into one 64-bit argument where needed.
1889 */
1890 PVTGDESCPROBE pProbeDesc = pProbeLocRO->pProbe;
1891 AssertPtrReturnVoid(pProbeDesc);
1892 PVTGDESCARGLIST pArgList = (PVTGDESCARGLIST)((uintptr_t)pVtgHdr + pVtgHdr->offArgLists + pProbeDesc->offArgList);
1893 AssertPtrReturnVoid(pArgList);
1894
1895 if (!pArgList->fHaveLargeArgs)
1896 dtrace_probe(pCtx->idProbe,
1897 pCtx->u.X86.aArgs[0],
1898 pCtx->u.X86.aArgs[1],
1899 pCtx->u.X86.aArgs[2],
1900 pCtx->u.X86.aArgs[3],
1901 pCtx->u.X86.aArgs[4]);
1902 else
1903 {
1904 uint32_t const *auSrcArgs = &pCtx->u.X86.aArgs[0];
1905 uint32_t iSrcArg = 0;
1906 uint32_t iDstArg = 0;
1907 uint64_t au64DstArgs[5];
1908
1909 while ( iDstArg < RT_ELEMENTS(au64DstArgs)
1910 && iSrcArg < pArgList->cArgs)
1911 {
1912 au64DstArgs[iDstArg] = auSrcArgs[iSrcArg];
1913 if (VTG_TYPE_IS_LARGE(pArgList->aArgs[iDstArg].fType))
1914 au64DstArgs[iDstArg] |= (uint64_t)auSrcArgs[++iSrcArg] << 32;
1915 iSrcArg++;
1916 iDstArg++;
1917 }
1918 while (iDstArg < RT_ELEMENTS(au64DstArgs))
1919 au64DstArgs[iDstArg++] = auSrcArgs[iSrcArg++];
1920
1921 pStackData->u.ProbeFireUser.offArg = iSrcArg - RT_ELEMENTS(au64DstArgs);
1922 dtrace_probe(pCtx->idProbe, au64DstArgs[0], au64DstArgs[1], au64DstArgs[2], au64DstArgs[3], au64DstArgs[4]);
1923 }
1924#else
1925 dtrace_probe(pCtx->idProbe,
1926 pCtx->u.X86.aArgs[0],
1927 pCtx->u.X86.aArgs[1],
1928 pCtx->u.X86.aArgs[2],
1929 pCtx->u.X86.aArgs[3],
1930 pCtx->u.X86.aArgs[4]);
1931#endif
1932 }
1933 else if (pCtx->cBits == 64)
1934 {
1935 pStackData->u.ProbeFireUser.pCtx = pCtx;
1936 pStackData->u.ProbeFireUser.offArg = 0;
1937 dtrace_probe(pCtx->idProbe,
1938 pCtx->u.Amd64.aArgs[0],
1939 pCtx->u.Amd64.aArgs[1],
1940 pCtx->u.Amd64.aArgs[2],
1941 pCtx->u.Amd64.aArgs[3],
1942 pCtx->u.Amd64.aArgs[4]);
1943 }
1944 else
1945 AssertFailed();
1946
1947 VBDT_CLEAR_STACK_DATA();
1948 LOG_DTRACE(("%s: returns\n", __FUNCTION__));
1949}
1950
1951
1952/**
1953 * interface_method_impl{SUPDRVTRACERREG,pfnTracerOpen}
1954 */
1955static DECLCALLBACK(int) vboxDtTOps_TracerOpen(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, uint32_t uCookie,
1956 uintptr_t uArg, uintptr_t *puSessionData)
1957{
1958 if (uCookie != RT_MAKE_U32_FROM_U8('V', 'B', 'D', 'T'))
1959 return VERR_INVALID_MAGIC;
1960 if (uArg)
1961 return VERR_INVALID_PARAMETER;
1962 RT_NOREF_PV(pThis); RT_NOREF_PV(pSession);
1963 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1964
1965 int rc = dtrace_open((dtrace_state_t **)puSessionData, VBoxDtGetCurrentCreds());
1966
1967 VBDT_CLEAR_STACK_DATA();
1968 return RTErrConvertFromErrno(rc);
1969}
1970
1971
1972/**
1973 * interface_method_impl{SUPDRVTRACERREG,pfnTracerClose}
1974 */
1975static DECLCALLBACK(int) vboxDtTOps_TracerIoCtl(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, uintptr_t uSessionData,
1976 uintptr_t uCmd, uintptr_t uArg, int32_t *piRetVal)
1977{
1978 AssertPtrReturn(uSessionData, VERR_INVALID_POINTER);
1979 RT_NOREF_PV(pThis); RT_NOREF_PV(pSession);
1980 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1981
1982 int rc = dtrace_ioctl((dtrace_state_t *)uSessionData, (intptr_t)uCmd, (intptr_t)uArg, piRetVal);
1983
1984 VBDT_CLEAR_STACK_DATA();
1985 return RTErrConvertFromErrno(rc);
1986}
1987
1988
1989/**
1990 * interface_method_impl{SUPDRVTRACERREG,pfnTracerClose}
1991 */
1992static DECLCALLBACK(void) vboxDtTOps_TracerClose(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, uintptr_t uSessionData)
1993{
1994 AssertPtrReturnVoid(uSessionData);
1995 RT_NOREF_PV(pThis); RT_NOREF_PV(pSession);
1996 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
1997
1998 dtrace_close((dtrace_state_t *)uSessionData);
1999
2000 VBDT_CLEAR_STACK_DATA();
2001}
2002
2003
2004/**
2005 * interface_method_impl{SUPDRVTRACERREG,pfnProviderRegister}
2006 */
2007static DECLCALLBACK(int) vboxDtTOps_ProviderRegister(PCSUPDRVTRACERREG pThis, PSUPDRVVDTPROVIDERCORE pCore)
2008{
2009 LOG_DTRACE(("%s: %p %s/%s\n", __FUNCTION__, pThis, pCore->pszModName, pCore->pszName));
2010 AssertReturn(pCore->TracerData.DTrace.idProvider == 0, VERR_INTERNAL_ERROR_3);
2011 RT_NOREF_PV(pThis);
2012 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
2013
2014 PVTGDESCPROVIDER pDesc = pCore->pDesc;
2015 dtrace_pattr_t DtAttrs;
2016 vboxDtVtgConvAttr(&DtAttrs.dtpa_provider, &pDesc->AttrSelf);
2017 vboxDtVtgConvAttr(&DtAttrs.dtpa_mod, &pDesc->AttrModules);
2018 vboxDtVtgConvAttr(&DtAttrs.dtpa_func, &pDesc->AttrFunctions);
2019 vboxDtVtgConvAttr(&DtAttrs.dtpa_name, &pDesc->AttrNames);
2020 vboxDtVtgConvAttr(&DtAttrs.dtpa_args, &pDesc->AttrArguments);
2021
2022 /* Note! DTrace may call us back before dtrace_register returns, so we
2023 have to point it to pCore->TracerData.DTrace.idProvider. */
2024 AssertCompile(sizeof(dtrace_provider_id_t) == sizeof(pCore->TracerData.DTrace.idProvider));
2025 int rc = dtrace_register(pCore->pszName,
2026 &DtAttrs,
2027 DTRACE_PRIV_KERNEL,
2028 NULL /* cred */,
2029 &g_vboxDtVtgProvOps,
2030 pCore,
2031 &pCore->TracerData.DTrace.idProvider);
2032 if (!rc)
2033 {
2034 LOG_DTRACE(("%s: idProvider=%p\n", __FUNCTION__, pCore->TracerData.DTrace.idProvider));
2035 AssertPtr(pCore->TracerData.DTrace.idProvider);
2036 rc = VINF_SUCCESS;
2037 }
2038 else
2039 {
2040 pCore->TracerData.DTrace.idProvider = 0;
2041 rc = RTErrConvertFromErrno(rc);
2042 }
2043
2044 VBDT_CLEAR_STACK_DATA();
2045 LOG_DTRACE(("%s: returns %Rrc\n", __FUNCTION__, rc));
2046 return rc;
2047}
2048
2049
2050/**
2051 * interface_method_impl{SUPDRVTRACERREG,pfnProviderDeregister}
2052 */
2053static DECLCALLBACK(int) vboxDtTOps_ProviderDeregister(PCSUPDRVTRACERREG pThis, PSUPDRVVDTPROVIDERCORE pCore)
2054{
2055 uintptr_t idProvider = pCore->TracerData.DTrace.idProvider;
2056 LOG_DTRACE(("%s: %p / %p\n", __FUNCTION__, pThis, idProvider));
2057 AssertPtrReturn(idProvider, VERR_INTERNAL_ERROR_3);
2058 RT_NOREF_PV(pThis);
2059 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
2060
2061 dtrace_invalidate(idProvider);
2062 int rc = dtrace_unregister(idProvider);
2063 if (!rc)
2064 {
2065 pCore->TracerData.DTrace.idProvider = 0;
2066 rc = VINF_SUCCESS;
2067 }
2068 else
2069 {
2070 AssertMsg(rc == EBUSY, ("%d\n", rc));
2071 pCore->TracerData.DTrace.fZombie = true;
2072 rc = VERR_TRY_AGAIN;
2073 }
2074
2075 VBDT_CLEAR_STACK_DATA();
2076 LOG_DTRACE(("%s: returns %Rrc\n", __FUNCTION__, rc));
2077 return rc;
2078}
2079
2080
2081/**
2082 * interface_method_impl{SUPDRVTRACERREG,pfnProviderDeregisterZombie}
2083 */
2084static DECLCALLBACK(int) vboxDtTOps_ProviderDeregisterZombie(PCSUPDRVTRACERREG pThis, PSUPDRVVDTPROVIDERCORE pCore)
2085{
2086 uintptr_t idProvider = pCore->TracerData.DTrace.idProvider;
2087 LOG_DTRACE(("%s: %p / %p\n", __FUNCTION__, pThis, idProvider));
2088 AssertPtrReturn(idProvider, VERR_INTERNAL_ERROR_3);
2089 Assert(pCore->TracerData.DTrace.fZombie);
2090 RT_NOREF_PV(pThis);
2091 VBDT_SETUP_STACK_DATA(kVBoxDtCaller_Generic);
2092
2093 int rc = dtrace_unregister(idProvider);
2094 if (!rc)
2095 {
2096 pCore->TracerData.DTrace.idProvider = 0;
2097 rc = VINF_SUCCESS;
2098 }
2099 else
2100 {
2101 AssertMsg(rc == EBUSY, ("%d\n", rc));
2102 rc = VERR_TRY_AGAIN;
2103 }
2104
2105 VBDT_CLEAR_STACK_DATA();
2106 LOG_DTRACE(("%s: returns %Rrc\n", __FUNCTION__, rc));
2107 return rc;
2108}
2109
2110
2111
2112/**
2113 * The tracer registration record of the VBox DTrace implementation
2114 */
2115static SUPDRVTRACERREG g_VBoxDTraceReg =
2116{
2117 SUPDRVTRACERREG_MAGIC,
2118 SUPDRVTRACERREG_VERSION,
2119 vboxDtTOps_ProbeFireKernel,
2120 vboxDtTOps_ProbeFireUser,
2121 vboxDtTOps_TracerOpen,
2122 vboxDtTOps_TracerIoCtl,
2123 vboxDtTOps_TracerClose,
2124 vboxDtTOps_ProviderRegister,
2125 vboxDtTOps_ProviderDeregister,
2126 vboxDtTOps_ProviderDeregisterZombie,
2127 SUPDRVTRACERREG_MAGIC
2128};
2129
2130
2131
2132/**
2133 * Module termination code.
2134 *
2135 * @param hMod Opque module handle.
2136 */
2137DECLEXPORT(void) ModuleTerm(void *hMod)
2138{
2139 SUPR0TracerDeregisterImpl(hMod, NULL);
2140 dtrace_detach();
2141 vboxDtTermThreadDb();
2142}
2143
2144
2145/**
2146 * Module initialization code.
2147 *
2148 * @param hMod Opque module handle.
2149 */
2150DECLEXPORT(int) ModuleInit(void *hMod)
2151{
2152 int rc = vboxDtInitThreadDb();
2153 if (RT_SUCCESS(rc))
2154 {
2155 rc = dtrace_attach();
2156 if (rc == DDI_SUCCESS)
2157 {
2158 rc = SUPR0TracerRegisterImpl(hMod, NULL, &g_VBoxDTraceReg, &g_pVBoxDTraceHlp);
2159 if (RT_SUCCESS(rc))
2160 return rc;
2161
2162 dtrace_detach();
2163 }
2164 else
2165 {
2166 SUPR0Printf("dtrace_attach -> %d\n", rc);
2167 rc = VERR_INTERNAL_ERROR_5;
2168 }
2169 vboxDtTermThreadDb();
2170 }
2171 else
2172 SUPR0Printf("vboxDtInitThreadDb -> %d\n", rc);
2173
2174 return rc;
2175}
2176
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette