VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.c@ 15531

Last change on this file since 15531 was 15505, checked in by vboxsync, 16 years ago

SUPDrv,INTNet: Heads up! SupDrv version bumped. Added SUPR0ObjAddRefEx for dealing with the handle table callback which occurs while owning a spinlock. Normally SUPR0ObjAddRef[Ex] would always allocate a usage record, which means RTMemAlloc, but this is a bad idea when inside a spinlock. SUPR0ObjAddRefEx sports an additional parameter indicating whether it is allowed block or not.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 181.0 KB
Line 
1/* $Revision: 15505 $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#define LOG_GROUP LOG_GROUP_SUP_DRV
35#include "SUPDrvInternal.h"
36#ifndef PAGE_SHIFT
37# include <iprt/param.h>
38#endif
39#include <iprt/alloc.h>
40#include <iprt/semaphore.h>
41#include <iprt/spinlock.h>
42#include <iprt/thread.h>
43#include <iprt/process.h>
44#include <iprt/mp.h>
45#include <iprt/power.h>
46#include <iprt/cpuset.h>
47#include <iprt/uuid.h>
48#include <VBox/param.h>
49#include <VBox/log.h>
50#include <VBox/err.h>
51#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
52# include <iprt/crc32.h>
53# include <iprt/net.h>
54#endif
55/* VBox/x86.h not compatible with the Linux kernel sources */
56#ifdef RT_OS_LINUX
57# define X86_CPUID_VENDOR_AMD_EBX 0x68747541
58# define X86_CPUID_VENDOR_AMD_ECX 0x444d4163
59# define X86_CPUID_VENDOR_AMD_EDX 0x69746e65
60#else
61# include <VBox/x86.h>
62#endif
63
64/*
65 * Logging assignments:
66 * Log - useful stuff, like failures.
67 * LogFlow - program flow, except the really noisy bits.
68 * Log2 - Cleanup.
69 * Log3 - Loader flow noise.
70 * Log4 - Call VMMR0 flow noise.
71 * Log5 - Native yet-to-be-defined noise.
72 * Log6 - Native ioctl flow noise.
73 *
74 * Logging requires BUILD_TYPE=debug and possibly changes to the logger
75 * instanciation in log-vbox.c(pp).
76 */
77
78
79/*******************************************************************************
80* Defined Constants And Macros *
81*******************************************************************************/
82/* from x86.h - clashes with linux thus this duplication */
83#undef X86_CR0_PG
84#define X86_CR0_PG RT_BIT(31)
85#undef X86_CR0_PE
86#define X86_CR0_PE RT_BIT(0)
87#undef X86_CPUID_AMD_FEATURE_EDX_NX
88#define X86_CPUID_AMD_FEATURE_EDX_NX RT_BIT(20)
89#undef MSR_K6_EFER
90#define MSR_K6_EFER 0xc0000080
91#undef MSR_K6_EFER_NXE
92#define MSR_K6_EFER_NXE RT_BIT(11)
93#undef MSR_K6_EFER_LMA
94#define MSR_K6_EFER_LMA RT_BIT(10)
95#undef X86_CR4_PGE
96#define X86_CR4_PGE RT_BIT(7)
97#undef X86_CR4_PAE
98#define X86_CR4_PAE RT_BIT(5)
99#undef X86_CPUID_AMD_FEATURE_EDX_LONG_MODE
100#define X86_CPUID_AMD_FEATURE_EDX_LONG_MODE RT_BIT(29)
101
102
103/** The frequency by which we recalculate the u32UpdateHz and
104 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
105#define GIP_UPDATEHZ_RECALC_FREQ 0x800
106
107/**
108 * Validates a session pointer.
109 *
110 * @returns true/false accordingly.
111 * @param pSession The session.
112 */
113#define SUP_IS_SESSION_VALID(pSession) \
114 ( VALID_PTR(pSession) \
115 && pSession->u32Cookie == BIRD_INV)
116
117/** @def VBOX_SVN_REV
118 * The makefile should define this if it can. */
119#ifndef VBOX_SVN_REV
120# define VBOX_SVN_REV 0
121#endif
122
123/*******************************************************************************
124* Internal Functions *
125*******************************************************************************/
126static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
127static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
128static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
129static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
130static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
131static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
132static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
133static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
134static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
135static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
136static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
137static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
138static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt);
139#ifdef RT_OS_WINDOWS
140static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
141static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3);
142#endif /* RT_OS_WINDOWS */
143static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
144static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
145static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
146static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
147static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
148
149#ifdef RT_WITH_W64_UNWIND_HACK
150DECLASM(int) supdrvNtWrapVMMR0EntryEx(PFNRT pfnVMMR0EntryEx, PVM pVM, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession);
151DECLASM(int) supdrvNtWrapVMMR0EntryFast(PFNRT pfnVMMR0EntryFast, PVM pVM, unsigned idCpu, unsigned uOperation);
152DECLASM(void) supdrvNtWrapObjDestructor(PFNRT pfnDestruction, void *pvObj, void *pvUser1, void *pvUser2);
153DECLASM(void *) supdrvNtWrapQueryFactoryInterface(PFNRT pfnQueryFactoryInterface, struct SUPDRVFACTORY const *pSupDrvFactory, PSUPDRVSESSION pSession, const char *pszInterfaceUuid);
154DECLASM(int) supdrvNtWrapModuleInit(PFNRT pfnModuleInit);
155DECLASM(void) supdrvNtWrapModuleTerm(PFNRT pfnModuleTerm);
156DECLASM(int) supdrvNtWrapServiceReqHandler(PFNRT pfnServiceReqHandler, PSUPDRVSESSION pSession, uint32_t uOperation, uint64_t u64Arg, PSUPR0SERVICEREQHDR pReqHdr);
157
158DECLASM(int) UNWIND_WRAP(SUPR0ComponentRegisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
159DECLASM(int) UNWIND_WRAP(SUPR0ComponentDeregisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
160DECLASM(int) UNWIND_WRAP(SUPR0ComponentQueryFactory)(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf);
161DECLASM(void *) UNWIND_WRAP(SUPR0ObjRegister)(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2);
162DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRef)(void *pvObj, PSUPDRVSESSION pSession);
163DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRefEx)(void *pvObj, PSUPDRVSESSION pSession, bool fNoPreempt);
164DECLASM(int) UNWIND_WRAP(SUPR0ObjRelease)(void *pvObj, PSUPDRVSESSION pSession);
165DECLASM(int) UNWIND_WRAP(SUPR0ObjVerifyAccess)(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName);
166DECLASM(int) UNWIND_WRAP(SUPR0LockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
167DECLASM(int) UNWIND_WRAP(SUPR0UnlockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
168DECLASM(int) UNWIND_WRAP(SUPR0ContAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys);
169DECLASM(int) UNWIND_WRAP(SUPR0ContFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
170DECLASM(int) UNWIND_WRAP(SUPR0LowAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages);
171DECLASM(int) UNWIND_WRAP(SUPR0LowFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
172DECLASM(int) UNWIND_WRAP(SUPR0MemAlloc)(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3);
173DECLASM(int) UNWIND_WRAP(SUPR0MemGetPhys)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages);
174DECLASM(int) UNWIND_WRAP(SUPR0MemFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
175DECLASM(int) UNWIND_WRAP(SUPR0PageAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages);
176DECLASM(int) UNWIND_WRAP(SUPR0PageFree)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
177//DECLASM(int) UNWIND_WRAP(SUPR0Printf)(const char *pszFormat, ...);
178DECLASM(SUPPAGINGMODE) UNWIND_WRAP(SUPR0GetPagingMode)(void);
179DECLASM(void *) UNWIND_WRAP(RTMemAlloc)(size_t cb) RT_NO_THROW;
180DECLASM(void *) UNWIND_WRAP(RTMemAllocZ)(size_t cb) RT_NO_THROW;
181DECLASM(void) UNWIND_WRAP(RTMemFree)(void *pv) RT_NO_THROW;
182DECLASM(void *) UNWIND_WRAP(RTMemDup)(const void *pvSrc, size_t cb) RT_NO_THROW;
183DECLASM(void *) UNWIND_WRAP(RTMemDupEx)(const void *pvSrc, size_t cbSrc, size_t cbExtra) RT_NO_THROW;
184DECLASM(void *) UNWIND_WRAP(RTMemRealloc)(void *pvOld, size_t cbNew) RT_NO_THROW;
185DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocLow)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
186DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPage)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
187DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhys)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
188DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhysNC)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
189DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocCont)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
190DECLASM(int) UNWIND_WRAP(RTR0MemObjEnterPhys)(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb);
191DECLASM(int) UNWIND_WRAP(RTR0MemObjLockUser)(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, RTR0PROCESS R0Process);
192DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernel)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt);
193DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernelEx)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt, size_t offSub, size_t cbSub);
194DECLASM(int) UNWIND_WRAP(RTR0MemObjMapUser)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process);
195/*DECLASM(void *) UNWIND_WRAP(RTR0MemObjAddress)(RTR0MEMOBJ MemObj); - not necessary */
196/*DECLASM(RTR3PTR) UNWIND_WRAP(RTR0MemObjAddressR3)(RTR0MEMOBJ MemObj); - not necessary */
197/*DECLASM(size_t) UNWIND_WRAP(RTR0MemObjSize)(RTR0MEMOBJ MemObj); - not necessary */
198/*DECLASM(bool) UNWIND_WRAP(RTR0MemObjIsMapping)(RTR0MEMOBJ MemObj); - not necessary */
199/*DECLASM(RTHCPHYS) UNWIND_WRAP(RTR0MemObjGetPagePhysAddr)(RTR0MEMOBJ MemObj, size_t iPage); - not necessary */
200DECLASM(int) UNWIND_WRAP(RTR0MemObjFree)(RTR0MEMOBJ MemObj, bool fFreeMappings);
201/* RTProcSelf - not necessary */
202/* RTR0ProcHandleSelf - not necessary */
203DECLASM(int) UNWIND_WRAP(RTSemFastMutexCreate)(PRTSEMFASTMUTEX pMutexSem);
204DECLASM(int) UNWIND_WRAP(RTSemFastMutexDestroy)(RTSEMFASTMUTEX MutexSem);
205DECLASM(int) UNWIND_WRAP(RTSemFastMutexRequest)(RTSEMFASTMUTEX MutexSem);
206DECLASM(int) UNWIND_WRAP(RTSemFastMutexRelease)(RTSEMFASTMUTEX MutexSem);
207DECLASM(int) UNWIND_WRAP(RTSemEventCreate)(PRTSEMEVENT pEventSem);
208DECLASM(int) UNWIND_WRAP(RTSemEventSignal)(RTSEMEVENT EventSem);
209DECLASM(int) UNWIND_WRAP(RTSemEventWait)(RTSEMEVENT EventSem, unsigned cMillies);
210DECLASM(int) UNWIND_WRAP(RTSemEventWaitNoResume)(RTSEMEVENT EventSem, unsigned cMillies);
211DECLASM(int) UNWIND_WRAP(RTSemEventDestroy)(RTSEMEVENT EventSem);
212DECLASM(int) UNWIND_WRAP(RTSemEventMultiCreate)(PRTSEMEVENTMULTI pEventMultiSem);
213DECLASM(int) UNWIND_WRAP(RTSemEventMultiSignal)(RTSEMEVENTMULTI EventMultiSem);
214DECLASM(int) UNWIND_WRAP(RTSemEventMultiReset)(RTSEMEVENTMULTI EventMultiSem);
215DECLASM(int) UNWIND_WRAP(RTSemEventMultiWait)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
216DECLASM(int) UNWIND_WRAP(RTSemEventMultiWaitNoResume)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
217DECLASM(int) UNWIND_WRAP(RTSemEventMultiDestroy)(RTSEMEVENTMULTI EventMultiSem);
218DECLASM(int) UNWIND_WRAP(RTSpinlockCreate)(PRTSPINLOCK pSpinlock);
219DECLASM(int) UNWIND_WRAP(RTSpinlockDestroy)(RTSPINLOCK Spinlock);
220DECLASM(void) UNWIND_WRAP(RTSpinlockAcquire)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
221DECLASM(void) UNWIND_WRAP(RTSpinlockRelease)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
222DECLASM(void) UNWIND_WRAP(RTSpinlockAcquireNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
223DECLASM(void) UNWIND_WRAP(RTSpinlockReleaseNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
224/* RTTimeNanoTS - not necessary */
225/* RTTimeMilliTS - not necessary */
226/* RTTimeSystemNanoTS - not necessary */
227/* RTTimeSystemMilliTS - not necessary */
228/* RTThreadNativeSelf - not necessary */
229DECLASM(int) UNWIND_WRAP(RTThreadSleep)(unsigned cMillies);
230DECLASM(bool) UNWIND_WRAP(RTThreadYield)(void);
231#if 0
232/* RTThreadSelf - not necessary */
233DECLASM(int) UNWIND_WRAP(RTThreadCreate)(PRTTHREAD pThread, PFNRTTHREAD pfnThread, void *pvUser, size_t cbStack,
234 RTTHREADTYPE enmType, unsigned fFlags, const char *pszName);
235DECLASM(RTNATIVETHREAD) UNWIND_WRAP(RTThreadGetNative)(RTTHREAD Thread);
236DECLASM(int) UNWIND_WRAP(RTThreadWait)(RTTHREAD Thread, unsigned cMillies, int *prc);
237DECLASM(int) UNWIND_WRAP(RTThreadWaitNoResume)(RTTHREAD Thread, unsigned cMillies, int *prc);
238DECLASM(const char *) UNWIND_WRAP(RTThreadGetName)(RTTHREAD Thread);
239DECLASM(const char *) UNWIND_WRAP(RTThreadSelfName)(void);
240DECLASM(RTTHREADTYPE) UNWIND_WRAP(RTThreadGetType)(RTTHREAD Thread);
241DECLASM(int) UNWIND_WRAP(RTThreadUserSignal)(RTTHREAD Thread);
242DECLASM(int) UNWIND_WRAP(RTThreadUserReset)(RTTHREAD Thread);
243DECLASM(int) UNWIND_WRAP(RTThreadUserWait)(RTTHREAD Thread, unsigned cMillies);
244DECLASM(int) UNWIND_WRAP(RTThreadUserWaitNoResume)(RTTHREAD Thread, unsigned cMillies);
245#endif
246/* RTLogDefaultInstance - a bit of a gamble, but we do not want the overhead! */
247/* RTMpCpuId - not necessary */
248/* RTMpCpuIdFromSetIndex - not necessary */
249/* RTMpCpuIdToSetIndex - not necessary */
250/* RTMpIsCpuPossible - not necessary */
251/* RTMpGetCount - not necessary */
252/* RTMpGetMaxCpuId - not necessary */
253/* RTMpGetOnlineCount - not necessary */
254/* RTMpGetOnlineSet - not necessary */
255/* RTMpGetSet - not necessary */
256/* RTMpIsCpuOnline - not necessary */
257DECLASM(int) UNWIND_WRAP(RTMpOnAll)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
258DECLASM(int) UNWIND_WRAP(RTMpOnOthers)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
259DECLASM(int) UNWIND_WRAP(RTMpOnSpecific)(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
260/* RTLogRelDefaultInstance - not necessary. */
261DECLASM(int) UNWIND_WRAP(RTLogSetDefaultInstanceThread)(PRTLOGGER pLogger, uintptr_t uKey);
262/* RTLogLogger - can't wrap this buster. */
263/* RTLogLoggerEx - can't wrap this buster. */
264DECLASM(void) UNWIND_WRAP(RTLogLoggerExV)(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, va_list args);
265/* RTLogPrintf - can't wrap this buster. */ /** @todo provide va_list log wrappers in RuntimeR0. */
266DECLASM(void) UNWIND_WRAP(RTLogPrintfV)(const char *pszFormat, va_list args);
267DECLASM(void) UNWIND_WRAP(AssertMsg1)(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction);
268/* AssertMsg2 - can't wrap this buster. */
269#endif /* RT_WITH_W64_UNWIND_HACK */
270
271
272/*******************************************************************************
273* Global Variables *
274*******************************************************************************/
275/**
276 * Array of the R0 SUP API.
277 */
278static SUPFUNC g_aFunctions[] =
279{
280 /* name function */
281 /* Entries with absolute addresses determined at runtime, fixup
282 code makes ugly ASSUMPTIONS about the order here: */
283 { "SUPR0AbsIs64bit", (void *)0 },
284 { "SUPR0Abs64bitKernelCS", (void *)0 },
285 { "SUPR0Abs64bitKernelSS", (void *)0 },
286 { "SUPR0Abs64bitKernelDS", (void *)0 },
287 { "SUPR0AbsKernelCS", (void *)0 },
288 { "SUPR0AbsKernelSS", (void *)0 },
289 { "SUPR0AbsKernelDS", (void *)0 },
290 { "SUPR0AbsKernelES", (void *)0 },
291 { "SUPR0AbsKernelFS", (void *)0 },
292 { "SUPR0AbsKernelGS", (void *)0 },
293 /* Normal function pointers: */
294 { "SUPR0ComponentRegisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentRegisterFactory) },
295 { "SUPR0ComponentDeregisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentDeregisterFactory) },
296 { "SUPR0ComponentQueryFactory", (void *)UNWIND_WRAP(SUPR0ComponentQueryFactory) },
297 { "SUPR0ObjRegister", (void *)UNWIND_WRAP(SUPR0ObjRegister) },
298 { "SUPR0ObjAddRef", (void *)UNWIND_WRAP(SUPR0ObjAddRef) },
299 { "SUPR0ObjAddRefEx", (void *)UNWIND_WRAP(SUPR0ObjAddRefEx) },
300 { "SUPR0ObjRelease", (void *)UNWIND_WRAP(SUPR0ObjRelease) },
301 { "SUPR0ObjVerifyAccess", (void *)UNWIND_WRAP(SUPR0ObjVerifyAccess) },
302 { "SUPR0LockMem", (void *)UNWIND_WRAP(SUPR0LockMem) },
303 { "SUPR0UnlockMem", (void *)UNWIND_WRAP(SUPR0UnlockMem) },
304 { "SUPR0ContAlloc", (void *)UNWIND_WRAP(SUPR0ContAlloc) },
305 { "SUPR0ContFree", (void *)UNWIND_WRAP(SUPR0ContFree) },
306 { "SUPR0LowAlloc", (void *)UNWIND_WRAP(SUPR0LowAlloc) },
307 { "SUPR0LowFree", (void *)UNWIND_WRAP(SUPR0LowFree) },
308 { "SUPR0MemAlloc", (void *)UNWIND_WRAP(SUPR0MemAlloc) },
309 { "SUPR0MemGetPhys", (void *)UNWIND_WRAP(SUPR0MemGetPhys) },
310 { "SUPR0MemFree", (void *)UNWIND_WRAP(SUPR0MemFree) },
311 { "SUPR0PageAlloc", (void *)UNWIND_WRAP(SUPR0PageAlloc) },
312 { "SUPR0PageFree", (void *)UNWIND_WRAP(SUPR0PageFree) },
313 { "SUPR0Printf", (void *)SUPR0Printf }, /** @todo needs wrapping? */
314 { "SUPR0GetPagingMode", (void *)UNWIND_WRAP(SUPR0GetPagingMode) },
315 { "SUPR0EnableVTx", (void *)SUPR0EnableVTx },
316 { "RTMemAlloc", (void *)UNWIND_WRAP(RTMemAlloc) },
317 { "RTMemAllocZ", (void *)UNWIND_WRAP(RTMemAllocZ) },
318 { "RTMemFree", (void *)UNWIND_WRAP(RTMemFree) },
319 /*{ "RTMemDup", (void *)UNWIND_WRAP(RTMemDup) },
320 { "RTMemDupEx", (void *)UNWIND_WRAP(RTMemDupEx) },*/
321 { "RTMemRealloc", (void *)UNWIND_WRAP(RTMemRealloc) },
322 { "RTR0MemObjAllocLow", (void *)UNWIND_WRAP(RTR0MemObjAllocLow) },
323 { "RTR0MemObjAllocPage", (void *)UNWIND_WRAP(RTR0MemObjAllocPage) },
324 { "RTR0MemObjAllocPhys", (void *)UNWIND_WRAP(RTR0MemObjAllocPhys) },
325 { "RTR0MemObjAllocPhysNC", (void *)UNWIND_WRAP(RTR0MemObjAllocPhysNC) },
326 { "RTR0MemObjAllocCont", (void *)UNWIND_WRAP(RTR0MemObjAllocCont) },
327 { "RTR0MemObjEnterPhys", (void *)UNWIND_WRAP(RTR0MemObjEnterPhys) },
328 { "RTR0MemObjLockUser", (void *)UNWIND_WRAP(RTR0MemObjLockUser) },
329 { "RTR0MemObjMapKernel", (void *)UNWIND_WRAP(RTR0MemObjMapKernel) },
330 { "RTR0MemObjMapKernelEx", (void *)UNWIND_WRAP(RTR0MemObjMapKernelEx) },
331 { "RTR0MemObjMapUser", (void *)UNWIND_WRAP(RTR0MemObjMapUser) },
332 { "RTR0MemObjAddress", (void *)RTR0MemObjAddress },
333 { "RTR0MemObjAddressR3", (void *)RTR0MemObjAddressR3 },
334 { "RTR0MemObjSize", (void *)RTR0MemObjSize },
335 { "RTR0MemObjIsMapping", (void *)RTR0MemObjIsMapping },
336 { "RTR0MemObjGetPagePhysAddr", (void *)RTR0MemObjGetPagePhysAddr },
337 { "RTR0MemObjFree", (void *)UNWIND_WRAP(RTR0MemObjFree) },
338/* These don't work yet on linux - use fast mutexes!
339 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
340 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
341 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
342 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
343*/
344 { "RTProcSelf", (void *)RTProcSelf },
345 { "RTR0ProcHandleSelf", (void *)RTR0ProcHandleSelf },
346 { "RTSemFastMutexCreate", (void *)UNWIND_WRAP(RTSemFastMutexCreate) },
347 { "RTSemFastMutexDestroy", (void *)UNWIND_WRAP(RTSemFastMutexDestroy) },
348 { "RTSemFastMutexRequest", (void *)UNWIND_WRAP(RTSemFastMutexRequest) },
349 { "RTSemFastMutexRelease", (void *)UNWIND_WRAP(RTSemFastMutexRelease) },
350 { "RTSemEventCreate", (void *)UNWIND_WRAP(RTSemEventCreate) },
351 { "RTSemEventSignal", (void *)UNWIND_WRAP(RTSemEventSignal) },
352 { "RTSemEventWait", (void *)UNWIND_WRAP(RTSemEventWait) },
353 { "RTSemEventWaitNoResume", (void *)UNWIND_WRAP(RTSemEventWaitNoResume) },
354 { "RTSemEventDestroy", (void *)UNWIND_WRAP(RTSemEventDestroy) },
355 { "RTSemEventMultiCreate", (void *)UNWIND_WRAP(RTSemEventMultiCreate) },
356 { "RTSemEventMultiSignal", (void *)UNWIND_WRAP(RTSemEventMultiSignal) },
357 { "RTSemEventMultiReset", (void *)UNWIND_WRAP(RTSemEventMultiReset) },
358 { "RTSemEventMultiWait", (void *)UNWIND_WRAP(RTSemEventMultiWait) },
359 { "RTSemEventMultiWaitNoResume", (void *)UNWIND_WRAP(RTSemEventMultiWaitNoResume) },
360 { "RTSemEventMultiDestroy", (void *)UNWIND_WRAP(RTSemEventMultiDestroy) },
361 { "RTSpinlockCreate", (void *)UNWIND_WRAP(RTSpinlockCreate) },
362 { "RTSpinlockDestroy", (void *)UNWIND_WRAP(RTSpinlockDestroy) },
363 { "RTSpinlockAcquire", (void *)UNWIND_WRAP(RTSpinlockAcquire) },
364 { "RTSpinlockRelease", (void *)UNWIND_WRAP(RTSpinlockRelease) },
365 { "RTSpinlockAcquireNoInts", (void *)UNWIND_WRAP(RTSpinlockAcquireNoInts) },
366 { "RTSpinlockReleaseNoInts", (void *)UNWIND_WRAP(RTSpinlockReleaseNoInts) },
367 { "RTTimeNanoTS", (void *)RTTimeNanoTS },
368 { "RTTimeMillieTS", (void *)RTTimeMilliTS },
369 { "RTTimeSystemNanoTS", (void *)RTTimeSystemNanoTS },
370 { "RTTimeSystemMillieTS", (void *)RTTimeSystemMilliTS },
371 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
372 { "RTThreadSleep", (void *)UNWIND_WRAP(RTThreadSleep) },
373 { "RTThreadYield", (void *)UNWIND_WRAP(RTThreadYield) },
374#if 0 /* Thread APIs, Part 2. */
375 { "RTThreadSelf", (void *)UNWIND_WRAP(RTThreadSelf) },
376 { "RTThreadCreate", (void *)UNWIND_WRAP(RTThreadCreate) }, /** @todo need to wrap the callback */
377 { "RTThreadGetNative", (void *)UNWIND_WRAP(RTThreadGetNative) },
378 { "RTThreadWait", (void *)UNWIND_WRAP(RTThreadWait) },
379 { "RTThreadWaitNoResume", (void *)UNWIND_WRAP(RTThreadWaitNoResume) },
380 { "RTThreadGetName", (void *)UNWIND_WRAP(RTThreadGetName) },
381 { "RTThreadSelfName", (void *)UNWIND_WRAP(RTThreadSelfName) },
382 { "RTThreadGetType", (void *)UNWIND_WRAP(RTThreadGetType) },
383 { "RTThreadUserSignal", (void *)UNWIND_WRAP(RTThreadUserSignal) },
384 { "RTThreadUserReset", (void *)UNWIND_WRAP(RTThreadUserReset) },
385 { "RTThreadUserWait", (void *)UNWIND_WRAP(RTThreadUserWait) },
386 { "RTThreadUserWaitNoResume", (void *)UNWIND_WRAP(RTThreadUserWaitNoResume) },
387#endif
388 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
389 { "RTMpCpuId", (void *)RTMpCpuId },
390 { "RTMpCpuIdFromSetIndex", (void *)RTMpCpuIdFromSetIndex },
391 { "RTMpCpuIdToSetIndex", (void *)RTMpCpuIdToSetIndex },
392 { "RTMpIsCpuPossible", (void *)RTMpIsCpuPossible },
393 { "RTMpGetCount", (void *)RTMpGetCount },
394 { "RTMpGetMaxCpuId", (void *)RTMpGetMaxCpuId },
395 { "RTMpGetOnlineCount", (void *)RTMpGetOnlineCount },
396 { "RTMpGetOnlineSet", (void *)RTMpGetOnlineSet },
397 { "RTMpGetSet", (void *)RTMpGetSet },
398 { "RTMpIsCpuOnline", (void *)RTMpIsCpuOnline },
399 { "RTMpOnAll", (void *)UNWIND_WRAP(RTMpOnAll) },
400 { "RTMpOnOthers", (void *)UNWIND_WRAP(RTMpOnOthers) },
401 { "RTMpOnSpecific", (void *)UNWIND_WRAP(RTMpOnSpecific) },
402 { "RTPowerNotificationRegister", (void *)RTPowerNotificationRegister },
403 { "RTPowerNotificationDeregister", (void *)RTPowerNotificationDeregister },
404 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
405 { "RTLogSetDefaultInstanceThread", (void *)UNWIND_WRAP(RTLogSetDefaultInstanceThread) },
406 { "RTLogLogger", (void *)RTLogLogger }, /** @todo remove this */
407 { "RTLogLoggerEx", (void *)RTLogLoggerEx }, /** @todo remove this */
408 { "RTLogLoggerExV", (void *)UNWIND_WRAP(RTLogLoggerExV) },
409 { "RTLogPrintf", (void *)RTLogPrintf }, /** @todo remove this */
410 { "RTLogPrintfV", (void *)UNWIND_WRAP(RTLogPrintfV) },
411 { "AssertMsg1", (void *)UNWIND_WRAP(AssertMsg1) },
412 { "AssertMsg2", (void *)AssertMsg2 }, /** @todo replace this by RTAssertMsg2V */
413#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
414 { "RTR0AssertPanicSystem", (void *)RTR0AssertPanicSystem },
415#endif
416#if defined(RT_OS_DARWIN)
417 { "RTAssertMsg1", (void *)RTAssertMsg1 },
418 { "RTAssertMsg2", (void *)RTAssertMsg2 },
419 { "RTAssertMsg2V", (void *)RTAssertMsg2V },
420#endif
421};
422
423#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
424/**
425 * Drag in the rest of IRPT since we share it with the
426 * rest of the kernel modules on darwin.
427 */
428PFNRT g_apfnVBoxDrvIPRTDeps[] =
429{
430 (PFNRT)RTCrc32,
431 (PFNRT)RTErrConvertFromErrno,
432 (PFNRT)RTNetIPv4IsHdrValid,
433 (PFNRT)RTNetIPv4TCPChecksum,
434 (PFNRT)RTNetIPv4UDPChecksum,
435 (PFNRT)RTUuidCompare,
436 (PFNRT)RTUuidCompareStr,
437 (PFNRT)RTUuidFromStr,
438 NULL
439};
440#endif /* RT_OS_DARWIN || RT_OS_SOLARIS */
441
442
443/**
444 * Initializes the device extentsion structure.
445 *
446 * @returns IPRT status code.
447 * @param pDevExt The device extension to initialize.
448 */
449int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
450{
451 int rc;
452
453#ifdef SUPDRV_WITH_RELEASE_LOGGER
454 /*
455 * Create the release log.
456 */
457 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
458 PRTLOGGER pRelLogger;
459 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
460 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
461 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
462 if (RT_SUCCESS(rc))
463 RTLogRelSetDefaultInstance(pRelLogger);
464#endif
465
466 /*
467 * Initialize it.
468 */
469 memset(pDevExt, 0, sizeof(*pDevExt));
470 rc = RTSpinlockCreate(&pDevExt->Spinlock);
471 if (!rc)
472 {
473 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
474 if (!rc)
475 {
476 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
477 if (!rc)
478 {
479 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
480 if (!rc)
481 {
482 rc = supdrvGipCreate(pDevExt);
483 if (RT_SUCCESS(rc))
484 {
485 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
486
487 /*
488 * Fixup the absolute symbols.
489 *
490 * Because of the table indexing assumptions we'll do #ifdef orgy here rather
491 * than distributing this to OS specific files. At least for now.
492 */
493#ifdef RT_OS_DARWIN
494 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
495 {
496 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
497 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
498 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
499 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
500 }
501 else
502 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
503 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
504 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
505 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
506 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
507#else
508# if ARCH_BITS == 64
509 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
510 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
511 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
512 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
513# elif ARCH_BITS == 32
514 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
515# endif
516 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
517 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
518 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
519 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
520#endif
521 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
522 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
523 return VINF_SUCCESS;
524 }
525
526 RTSemFastMutexDestroy(pDevExt->mtxGip);
527 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
528 }
529 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
530 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
531 }
532 RTSemFastMutexDestroy(pDevExt->mtxLdr);
533 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
534 }
535 RTSpinlockDestroy(pDevExt->Spinlock);
536 pDevExt->Spinlock = NIL_RTSPINLOCK;
537 }
538#ifdef SUPDRV_WITH_RELEASE_LOGGER
539 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
540 RTLogDestroy(RTLogSetDefaultInstance(NULL));
541#endif
542
543 return rc;
544}
545
546
547/**
548 * Delete the device extension (e.g. cleanup members).
549 *
550 * @param pDevExt The device extension to delete.
551 */
552void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
553{
554 PSUPDRVOBJ pObj;
555 PSUPDRVUSAGE pUsage;
556
557 /*
558 * Kill mutexes and spinlocks.
559 */
560 RTSemFastMutexDestroy(pDevExt->mtxGip);
561 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
562 RTSemFastMutexDestroy(pDevExt->mtxLdr);
563 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
564 RTSpinlockDestroy(pDevExt->Spinlock);
565 pDevExt->Spinlock = NIL_RTSPINLOCK;
566 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
567 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
568
569 /*
570 * Free lists.
571 */
572 /* objects. */
573 pObj = pDevExt->pObjs;
574#if !defined(DEBUG_bird) || !defined(RT_OS_LINUX) /* breaks unloading, temporary, remove me! */
575 Assert(!pObj); /* (can trigger on forced unloads) */
576#endif
577 pDevExt->pObjs = NULL;
578 while (pObj)
579 {
580 void *pvFree = pObj;
581 pObj = pObj->pNext;
582 RTMemFree(pvFree);
583 }
584
585 /* usage records. */
586 pUsage = pDevExt->pUsageFree;
587 pDevExt->pUsageFree = NULL;
588 while (pUsage)
589 {
590 void *pvFree = pUsage;
591 pUsage = pUsage->pNext;
592 RTMemFree(pvFree);
593 }
594
595 /* kill the GIP. */
596 supdrvGipDestroy(pDevExt);
597
598#ifdef SUPDRV_WITH_RELEASE_LOGGER
599 /* destroy the loggers. */
600 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
601 RTLogDestroy(RTLogSetDefaultInstance(NULL));
602#endif
603}
604
605
606/**
607 * Create session.
608 *
609 * @returns IPRT status code.
610 * @param pDevExt Device extension.
611 * @param fUser Flag indicating whether this is a user or kernel session.
612 * @param ppSession Where to store the pointer to the session data.
613 */
614int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, PSUPDRVSESSION *ppSession)
615{
616 /*
617 * Allocate memory for the session data.
618 */
619 int rc = VERR_NO_MEMORY;
620 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
621 if (pSession)
622 {
623 /* Initialize session data. */
624 rc = RTSpinlockCreate(&pSession->Spinlock);
625 if (!rc)
626 {
627 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
628 pSession->pDevExt = pDevExt;
629 pSession->u32Cookie = BIRD_INV;
630 /*pSession->pLdrUsage = NULL;
631 pSession->pVM = NULL;
632 pSession->pUsage = NULL;
633 pSession->pGip = NULL;
634 pSession->fGipReferenced = false;
635 pSession->Bundle.cUsed = 0; */
636 pSession->Uid = NIL_RTUID;
637 pSession->Gid = NIL_RTGID;
638 if (fUser)
639 {
640 pSession->Process = RTProcSelf();
641 pSession->R0Process = RTR0ProcHandleSelf();
642 }
643 else
644 {
645 pSession->Process = NIL_RTPROCESS;
646 pSession->R0Process = NIL_RTR0PROCESS;
647 }
648
649 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
650 return VINF_SUCCESS;
651 }
652
653 RTMemFree(pSession);
654 *ppSession = NULL;
655 Log(("Failed to create spinlock, rc=%d!\n", rc));
656 }
657
658 return rc;
659}
660
661
662/**
663 * Shared code for cleaning up a session.
664 *
665 * @param pDevExt Device extension.
666 * @param pSession Session data.
667 * This data will be freed by this routine.
668 */
669void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
670{
671 /*
672 * Cleanup the session first.
673 */
674 supdrvCleanupSession(pDevExt, pSession);
675
676 /*
677 * Free the rest of the session stuff.
678 */
679 RTSpinlockDestroy(pSession->Spinlock);
680 pSession->Spinlock = NIL_RTSPINLOCK;
681 pSession->pDevExt = NULL;
682 RTMemFree(pSession);
683 LogFlow(("supdrvCloseSession: returns\n"));
684}
685
686
687/**
688 * Shared code for cleaning up a session (but not quite freeing it).
689 *
690 * This is primarily intended for MAC OS X where we have to clean up the memory
691 * stuff before the file handle is closed.
692 *
693 * @param pDevExt Device extension.
694 * @param pSession Session data.
695 * This data will be freed by this routine.
696 */
697void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
698{
699 PSUPDRVBUNDLE pBundle;
700 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
701
702 /*
703 * Remove logger instances related to this session.
704 */
705 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
706
707 /*
708 * Release object references made in this session.
709 * In theory there should be noone racing us in this session.
710 */
711 Log2(("release objects - start\n"));
712 if (pSession->pUsage)
713 {
714 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
715 PSUPDRVUSAGE pUsage;
716 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
717
718 while ((pUsage = pSession->pUsage) != NULL)
719 {
720 PSUPDRVOBJ pObj = pUsage->pObj;
721 pSession->pUsage = pUsage->pNext;
722
723 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
724 if (pUsage->cUsage < pObj->cUsage)
725 {
726 pObj->cUsage -= pUsage->cUsage;
727 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
728 }
729 else
730 {
731 /* Destroy the object and free the record. */
732 if (pDevExt->pObjs == pObj)
733 pDevExt->pObjs = pObj->pNext;
734 else
735 {
736 PSUPDRVOBJ pObjPrev;
737 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
738 if (pObjPrev->pNext == pObj)
739 {
740 pObjPrev->pNext = pObj->pNext;
741 break;
742 }
743 Assert(pObjPrev);
744 }
745 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
746
747 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
748 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
749 if (pObj->pfnDestructor)
750#ifdef RT_WITH_W64_UNWIND_HACK
751 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
752#else
753 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
754#endif
755 RTMemFree(pObj);
756 }
757
758 /* free it and continue. */
759 RTMemFree(pUsage);
760
761 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
762 }
763
764 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
765 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
766 }
767 Log2(("release objects - done\n"));
768
769 /*
770 * Release memory allocated in the session.
771 *
772 * We do not serialize this as we assume that the application will
773 * not allocated memory while closing the file handle object.
774 */
775 Log2(("freeing memory:\n"));
776 pBundle = &pSession->Bundle;
777 while (pBundle)
778 {
779 PSUPDRVBUNDLE pToFree;
780 unsigned i;
781
782 /*
783 * Check and unlock all entries in the bundle.
784 */
785 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
786 {
787 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
788 {
789 int rc;
790 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
791 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
792 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
793 {
794 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
795 AssertRC(rc); /** @todo figure out how to handle this. */
796 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
797 }
798 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
799 AssertRC(rc); /** @todo figure out how to handle this. */
800 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
801 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
802 }
803 }
804
805 /*
806 * Advance and free previous bundle.
807 */
808 pToFree = pBundle;
809 pBundle = pBundle->pNext;
810
811 pToFree->pNext = NULL;
812 pToFree->cUsed = 0;
813 if (pToFree != &pSession->Bundle)
814 RTMemFree(pToFree);
815 }
816 Log2(("freeing memory - done\n"));
817
818 /*
819 * Deregister component factories.
820 */
821 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
822 Log2(("deregistering component factories:\n"));
823 if (pDevExt->pComponentFactoryHead)
824 {
825 PSUPDRVFACTORYREG pPrev = NULL;
826 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
827 while (pCur)
828 {
829 if (pCur->pSession == pSession)
830 {
831 /* unlink it */
832 PSUPDRVFACTORYREG pNext = pCur->pNext;
833 if (pPrev)
834 pPrev->pNext = pNext;
835 else
836 pDevExt->pComponentFactoryHead = pNext;
837
838 /* free it */
839 pCur->pNext = NULL;
840 pCur->pSession = NULL;
841 pCur->pFactory = NULL;
842 RTMemFree(pCur);
843
844 /* next */
845 pCur = pNext;
846 }
847 else
848 {
849 /* next */
850 pPrev = pCur;
851 pCur = pCur->pNext;
852 }
853 }
854 }
855 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
856 Log2(("deregistering component factories - done\n"));
857
858 /*
859 * Loaded images needs to be dereferenced and possibly freed up.
860 */
861 RTSemFastMutexRequest(pDevExt->mtxLdr);
862 Log2(("freeing images:\n"));
863 if (pSession->pLdrUsage)
864 {
865 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
866 pSession->pLdrUsage = NULL;
867 while (pUsage)
868 {
869 void *pvFree = pUsage;
870 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
871 if (pImage->cUsage > pUsage->cUsage)
872 pImage->cUsage -= pUsage->cUsage;
873 else
874 supdrvLdrFree(pDevExt, pImage);
875 pUsage->pImage = NULL;
876 pUsage = pUsage->pNext;
877 RTMemFree(pvFree);
878 }
879 }
880 RTSemFastMutexRelease(pDevExt->mtxLdr);
881 Log2(("freeing images - done\n"));
882
883 /*
884 * Unmap the GIP.
885 */
886 Log2(("umapping GIP:\n"));
887 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
888 {
889 SUPR0GipUnmap(pSession);
890 pSession->fGipReferenced = 0;
891 }
892 Log2(("umapping GIP - done\n"));
893}
894
895
896/**
897 * Fast path I/O Control worker.
898 *
899 * @returns VBox status code that should be passed down to ring-3 unchanged.
900 * @param uIOCtl Function number.
901 * @param idCpu VMCPU id.
902 * @param pDevExt Device extention.
903 * @param pSession Session data.
904 */
905int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, unsigned idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
906{
907 /*
908 * We check the two prereqs after doing this only to allow the compiler to optimize things better.
909 */
910 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0EntryFast))
911 {
912 switch (uIOCtl)
913 {
914 case SUP_IOCTL_FAST_DO_RAW_RUN:
915#ifdef RT_WITH_W64_UNWIND_HACK
916 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
917#else
918 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
919#endif
920 break;
921 case SUP_IOCTL_FAST_DO_HWACC_RUN:
922#ifdef RT_WITH_W64_UNWIND_HACK
923 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
924#else
925 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
926#endif
927 break;
928 case SUP_IOCTL_FAST_DO_NOP:
929#ifdef RT_WITH_W64_UNWIND_HACK
930 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
931#else
932 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
933#endif
934 break;
935 default:
936 return VERR_INTERNAL_ERROR;
937 }
938 return VINF_SUCCESS;
939 }
940 return VERR_INTERNAL_ERROR;
941}
942
943
944/**
945 * Helper for supdrvIOCtl. Check if pszStr contains any character of pszChars.
946 * We would use strpbrk here if this function would be contained in the RedHat kABI white
947 * list, see http://www.kerneldrivers.org/RHEL5.
948 *
949 * @return 1 if pszStr does contain any character of pszChars, 0 otherwise.
950 * @param pszStr String to check
951 * @param pszChars Character set
952 */
953static int supdrvCheckInvalidChar(const char *pszStr, const char *pszChars)
954{
955 int chCur;
956 while ((chCur = *pszStr++) != '\0')
957 {
958 int ch;
959 const char *psz = pszChars;
960 while ((ch = *psz++) != '\0')
961 if (ch == chCur)
962 return 1;
963
964 }
965 return 0;
966}
967
968
969/**
970 * I/O Control worker.
971 *
972 * @returns 0 on success.
973 * @returns VERR_INVALID_PARAMETER if the request is invalid.
974 *
975 * @param uIOCtl Function number.
976 * @param pDevExt Device extention.
977 * @param pSession Session data.
978 * @param pReqHdr The request header.
979 */
980int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
981{
982 /*
983 * Validate the request.
984 */
985 /* this first check could probably be omitted as its also done by the OS specific code... */
986 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
987 || pReqHdr->cbIn < sizeof(*pReqHdr)
988 || pReqHdr->cbOut < sizeof(*pReqHdr)))
989 {
990 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
991 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
992 return VERR_INVALID_PARAMETER;
993 }
994 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
995 {
996 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
997 {
998 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
999 return VERR_INVALID_PARAMETER;
1000 }
1001 }
1002 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
1003 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
1004 {
1005 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
1006 return VERR_INVALID_PARAMETER;
1007 }
1008
1009/*
1010 * Validation macros
1011 */
1012#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1013 do { \
1014 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1015 { \
1016 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1017 (long)pReq->Hdr.cbIn, (long)(cbInExpect), (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1018 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1019 } \
1020 } while (0)
1021
1022#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1023
1024#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1025 do { \
1026 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1027 { \
1028 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1029 (long)pReq->Hdr.cbIn, (long)(cbInExpect))); \
1030 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1031 } \
1032 } while (0)
1033
1034#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1035 do { \
1036 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1037 { \
1038 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1039 (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1040 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1041 } \
1042 } while (0)
1043
1044#define REQ_CHECK_EXPR(Name, expr) \
1045 do { \
1046 if (RT_UNLIKELY(!(expr))) \
1047 { \
1048 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1049 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1050 } \
1051 } while (0)
1052
1053#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1054 do { \
1055 if (RT_UNLIKELY(!(expr))) \
1056 { \
1057 OSDBGPRINT( fmt ); \
1058 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1059 } \
1060 } while (0)
1061
1062
1063 /*
1064 * The switch.
1065 */
1066 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1067 {
1068 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1069 {
1070 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1071 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1072 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1073 {
1074 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1075 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1076 return 0;
1077 }
1078
1079#if 0
1080 /*
1081 * Call out to the OS specific code and let it do permission checks on the
1082 * client process.
1083 */
1084 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1085 {
1086 pReq->u.Out.u32Cookie = 0xffffffff;
1087 pReq->u.Out.u32SessionCookie = 0xffffffff;
1088 pReq->u.Out.u32SessionVersion = 0xffffffff;
1089 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1090 pReq->u.Out.pSession = NULL;
1091 pReq->u.Out.cFunctions = 0;
1092 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1093 return 0;
1094 }
1095#endif
1096
1097 /*
1098 * Match the version.
1099 * The current logic is very simple, match the major interface version.
1100 */
1101 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1102 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1103 {
1104 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1105 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1106 pReq->u.Out.u32Cookie = 0xffffffff;
1107 pReq->u.Out.u32SessionCookie = 0xffffffff;
1108 pReq->u.Out.u32SessionVersion = 0xffffffff;
1109 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1110 pReq->u.Out.pSession = NULL;
1111 pReq->u.Out.cFunctions = 0;
1112 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1113 return 0;
1114 }
1115
1116 /*
1117 * Fill in return data and be gone.
1118 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1119 * u32SessionVersion <= u32ReqVersion!
1120 */
1121 /** @todo Somehow validate the client and negotiate a secure cookie... */
1122 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1123 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1124 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1125 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1126 pReq->u.Out.pSession = pSession;
1127 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1128 pReq->Hdr.rc = VINF_SUCCESS;
1129 return 0;
1130 }
1131
1132 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1133 {
1134 /* validate */
1135 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1136 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1137
1138 /* execute */
1139 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1140 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1141 pReq->Hdr.rc = VINF_SUCCESS;
1142 return 0;
1143 }
1144
1145 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_INSTALL):
1146 {
1147 /* validate */
1148 PSUPIDTINSTALL pReq = (PSUPIDTINSTALL)pReqHdr;
1149 REQ_CHECK_SIZES(SUP_IOCTL_IDT_INSTALL);
1150
1151 /* execute */
1152 pReq->u.Out.u8Idt = 3;
1153 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1154 return 0;
1155 }
1156
1157 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_IDT_REMOVE):
1158 {
1159 /* validate */
1160 PSUPIDTREMOVE pReq = (PSUPIDTREMOVE)pReqHdr;
1161 REQ_CHECK_SIZES(SUP_IOCTL_IDT_REMOVE);
1162
1163 /* execute */
1164 pReq->Hdr.rc = VERR_NOT_SUPPORTED;
1165 return 0;
1166 }
1167
1168 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1169 {
1170 /* validate */
1171 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1172 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1173 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1174 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1175 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1176
1177 /* execute */
1178 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1179 if (RT_FAILURE(pReq->Hdr.rc))
1180 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1181 return 0;
1182 }
1183
1184 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1185 {
1186 /* validate */
1187 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1188 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1189
1190 /* execute */
1191 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1192 return 0;
1193 }
1194
1195 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1196 {
1197 /* validate */
1198 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1199 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1200
1201 /* execute */
1202 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1203 if (RT_FAILURE(pReq->Hdr.rc))
1204 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1205 return 0;
1206 }
1207
1208 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1209 {
1210 /* validate */
1211 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1212 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1213
1214 /* execute */
1215 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1216 return 0;
1217 }
1218
1219 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1220 {
1221 /* validate */
1222 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1223 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1224 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage > 0);
1225 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage < _1M*16);
1226 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1227 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1228 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, !supdrvCheckInvalidChar(pReq->u.In.szName, ";:()[]{}/\\|&*%#@!~`\"'"));
1229
1230 /* execute */
1231 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1232 return 0;
1233 }
1234
1235 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1236 {
1237 /* validate */
1238 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1239 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= sizeof(*pReq));
1240 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImage), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1241 REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
1242 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1243 || ( pReq->u.In.offSymbols < pReq->u.In.cbImage
1244 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImage),
1245 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImage=%#lx\n", (long)pReq->u.In.offSymbols,
1246 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImage));
1247 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1248 || ( pReq->u.In.offStrTab < pReq->u.In.cbImage
1249 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImage
1250 && pReq->u.In.cbStrTab <= pReq->u.In.cbImage),
1251 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImage=%#lx\n", (long)pReq->u.In.offStrTab,
1252 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImage));
1253
1254 if (pReq->u.In.cSymbols)
1255 {
1256 uint32_t i;
1257 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.achImage[pReq->u.In.offSymbols];
1258 for (i = 0; i < pReq->u.In.cSymbols; i++)
1259 {
1260 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImage,
1261 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImage));
1262 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1263 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1264 REQ_CHECK_EXPR_FMT(memchr(&pReq->u.In.achImage[pReq->u.In.offStrTab + paSyms[i].offName], '\0', pReq->u.In.cbStrTab - paSyms[i].offName),
1265 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1266 }
1267 }
1268
1269 /* execute */
1270 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1271 return 0;
1272 }
1273
1274 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1275 {
1276 /* validate */
1277 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1278 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1279
1280 /* execute */
1281 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1282 return 0;
1283 }
1284
1285 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1286 {
1287 /* validate */
1288 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1289 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1290 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, memchr(pReq->u.In.szSymbol, '\0', sizeof(pReq->u.In.szSymbol)));
1291
1292 /* execute */
1293 pReq->Hdr.rc = supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pReq);
1294 return 0;
1295 }
1296
1297 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0(0)):
1298 {
1299 /* validate */
1300 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1301 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1302 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1303
1304 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1305 {
1306 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1307
1308 /* execute */
1309 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1310#ifdef RT_WITH_W64_UNWIND_HACK
1311 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1312#else
1313 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1314#endif
1315 else
1316 pReq->Hdr.rc = VERR_WRONG_ORDER;
1317 }
1318 else
1319 {
1320 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1321 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1322 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1323 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1324 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1325
1326 /* execute */
1327 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1328#ifdef RT_WITH_W64_UNWIND_HACK
1329 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1330#else
1331 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1332#endif
1333 else
1334 pReq->Hdr.rc = VERR_WRONG_ORDER;
1335 }
1336
1337 if ( RT_FAILURE(pReq->Hdr.rc)
1338 && pReq->Hdr.rc != VERR_INTERRUPTED
1339 && pReq->Hdr.rc != VERR_TIMEOUT)
1340 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1341 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1342 else
1343 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1344 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1345 return 0;
1346 }
1347
1348 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1349 {
1350 /* validate */
1351 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1352 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1353
1354 /* execute */
1355 pReq->Hdr.rc = VINF_SUCCESS;
1356 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1357 return 0;
1358 }
1359
1360 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1361 {
1362 /* validate */
1363 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1364 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1365 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1366
1367 /* execute */
1368 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1369 if (RT_FAILURE(pReq->Hdr.rc))
1370 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1371 return 0;
1372 }
1373
1374 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1375 {
1376 /* validate */
1377 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1378 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1379
1380 /* execute */
1381 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1382 return 0;
1383 }
1384
1385 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1386 {
1387 /* validate */
1388 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1389 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1390
1391 /* execute */
1392 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1393 if (RT_SUCCESS(pReq->Hdr.rc))
1394 pReq->u.Out.pGipR0 = pDevExt->pGip;
1395 return 0;
1396 }
1397
1398 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
1399 {
1400 /* validate */
1401 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
1402 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
1403
1404 /* execute */
1405 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
1406 return 0;
1407 }
1408
1409 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
1410 {
1411 /* validate */
1412 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
1413 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
1414 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
1415 || ( VALID_PTR(pReq->u.In.pVMR0)
1416 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
1417 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
1418 /* execute */
1419 pSession->pVM = pReq->u.In.pVMR0;
1420 pReq->Hdr.rc = VINF_SUCCESS;
1421 return 0;
1422 }
1423
1424 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC):
1425 {
1426 /* validate */
1427 PSUPPAGEALLOC pReq = (PSUPPAGEALLOC)pReqHdr;
1428 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_SIZE_IN);
1429 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC, SUP_IOCTL_PAGE_ALLOC_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1430
1431 /* execute */
1432 pReq->Hdr.rc = SUPR0PageAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1433 if (RT_FAILURE(pReq->Hdr.rc))
1434 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1435 return 0;
1436 }
1437
1438 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
1439 {
1440 /* validate */
1441 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
1442 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
1443 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
1444 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
1445 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
1446 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
1447 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
1448 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
1449 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
1450
1451 /* execute */
1452 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
1453 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
1454 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
1455 &pReq->u.Out.aPages[0]);
1456 if (RT_FAILURE(pReq->Hdr.rc))
1457 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1458 return 0;
1459 }
1460
1461 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
1462 {
1463 /* validate */
1464 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
1465 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
1466 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
1467 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
1468 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1469 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
1470
1471 /* execute */
1472 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
1473 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
1474 if (RT_FAILURE(pReq->Hdr.rc))
1475 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1476 return 0;
1477 }
1478
1479 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
1480 {
1481 /* validate */
1482 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
1483 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
1484
1485 /* execute */
1486 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
1487 return 0;
1488 }
1489
1490 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE(0)):
1491 {
1492 /* validate */
1493 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
1494 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1495 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1496
1497 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
1498 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
1499 else
1500 {
1501 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
1502 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
1503 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
1504 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
1505 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
1506 }
1507 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1508
1509 /* execute */
1510 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
1511 return 0;
1512 }
1513
1514 default:
1515 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
1516 break;
1517 }
1518 return SUPDRV_ERR_GENERAL_FAILURE;
1519}
1520
1521
1522/**
1523 * Inter-Driver Communcation (IDC) worker.
1524 *
1525 * @returns VBox status code.
1526 * @retval VINF_SUCCESS on success.
1527 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1528 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
1529 *
1530 * @param uReq The request (function) code.
1531 * @param pDevExt Device extention.
1532 * @param pSession Session data.
1533 * @param pReqHdr The request header.
1534 */
1535int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
1536{
1537 /*
1538 * The OS specific code has already validated the pSession
1539 * pointer, and the request size being greater or equal to
1540 * size of the header.
1541 *
1542 * So, just check that pSession is a kernel context session.
1543 */
1544 if (RT_UNLIKELY( pSession
1545 && pSession->R0Process != NIL_RTR0PROCESS))
1546 return VERR_INVALID_PARAMETER;
1547
1548/*
1549 * Validation macro.
1550 */
1551#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
1552 do { \
1553 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
1554 { \
1555 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
1556 (long)pReqHdr->cb, (long)(cbExpect))); \
1557 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1558 } \
1559 } while (0)
1560
1561 switch (uReq)
1562 {
1563 case SUPDRV_IDC_REQ_CONNECT:
1564 {
1565 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
1566 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
1567
1568 /*
1569 * Validate the cookie and other input.
1570 */
1571 if (pReq->Hdr.pSession != NULL)
1572 {
1573 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pReq->Hdr.pSession));
1574 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1575 }
1576 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
1577 {
1578 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
1579 pReq->u.In.u32MagicCookie, SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
1580 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1581 }
1582 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
1583 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
1584 {
1585 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1586 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1587 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1588 }
1589
1590 /*
1591 * Match the version.
1592 * The current logic is very simple, match the major interface version.
1593 */
1594 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
1595 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
1596 {
1597 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1598 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, SUPDRV_IDC_VERSION));
1599 pReq->u.Out.pSession = NULL;
1600 pReq->u.Out.uSessionVersion = 0xffffffff;
1601 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1602 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1603 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1604 return VINF_SUCCESS;
1605 }
1606
1607 pReq->u.Out.pSession = NULL;
1608 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
1609 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1610 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1611
1612 /*
1613 * On NT we will already have a session associated with the
1614 * client, just like with the SUP_IOCTL_COOKIE request, while
1615 * the other doesn't.
1616 */
1617#ifdef RT_OS_WINDOWS
1618 pReq->Hdr.rc = VINF_SUCCESS;
1619#else
1620 AssertReturn(!pSession, VERR_INTERNAL_ERROR);
1621 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, &pSession);
1622 if (RT_FAILURE(pReq->Hdr.rc))
1623 {
1624 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
1625 return VINF_SUCCESS;
1626 }
1627#endif
1628
1629 pReq->u.Out.pSession = pSession;
1630 pReq->Hdr.pSession = pSession;
1631
1632 return VINF_SUCCESS;
1633 }
1634
1635 case SUPDRV_IDC_REQ_DISCONNECT:
1636 {
1637 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
1638
1639#ifdef RT_OS_WINDOWS
1640 /* Windows will destroy the session when the file object is destroyed. */
1641#else
1642 supdrvCloseSession(pDevExt, pSession);
1643#endif
1644 return pReqHdr->rc = VINF_SUCCESS;
1645 }
1646
1647 case SUPDRV_IDC_REQ_GET_SYMBOL:
1648 {
1649 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
1650 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
1651
1652 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
1653 return VINF_SUCCESS;
1654 }
1655
1656 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
1657 {
1658 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
1659 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
1660
1661 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
1662 return VINF_SUCCESS;
1663 }
1664
1665 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
1666 {
1667 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
1668 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
1669
1670 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
1671 return VINF_SUCCESS;
1672 }
1673
1674 default:
1675 Log(("Unknown IDC %#lx\n", (long)uReq));
1676 break;
1677 }
1678
1679#undef REQ_CHECK_IDC_SIZE
1680 return VERR_NOT_SUPPORTED;
1681}
1682
1683
1684/**
1685 * Register a object for reference counting.
1686 * The object is registered with one reference in the specified session.
1687 *
1688 * @returns Unique identifier on success (pointer).
1689 * All future reference must use this identifier.
1690 * @returns NULL on failure.
1691 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1692 * @param pvUser1 The first user argument.
1693 * @param pvUser2 The second user argument.
1694 */
1695SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1696{
1697 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1698 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1699 PSUPDRVOBJ pObj;
1700 PSUPDRVUSAGE pUsage;
1701
1702 /*
1703 * Validate the input.
1704 */
1705 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
1706 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
1707 AssertPtrReturn(pfnDestructor, NULL);
1708
1709 /*
1710 * Allocate and initialize the object.
1711 */
1712 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1713 if (!pObj)
1714 return NULL;
1715 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1716 pObj->enmType = enmType;
1717 pObj->pNext = NULL;
1718 pObj->cUsage = 1;
1719 pObj->pfnDestructor = pfnDestructor;
1720 pObj->pvUser1 = pvUser1;
1721 pObj->pvUser2 = pvUser2;
1722 pObj->CreatorUid = pSession->Uid;
1723 pObj->CreatorGid = pSession->Gid;
1724 pObj->CreatorProcess= pSession->Process;
1725 supdrvOSObjInitCreator(pObj, pSession);
1726
1727 /*
1728 * Allocate the usage record.
1729 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
1730 */
1731 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1732
1733 pUsage = pDevExt->pUsageFree;
1734 if (pUsage)
1735 pDevExt->pUsageFree = pUsage->pNext;
1736 else
1737 {
1738 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1739 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1740 if (!pUsage)
1741 {
1742 RTMemFree(pObj);
1743 return NULL;
1744 }
1745 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1746 }
1747
1748 /*
1749 * Insert the object and create the session usage record.
1750 */
1751 /* The object. */
1752 pObj->pNext = pDevExt->pObjs;
1753 pDevExt->pObjs = pObj;
1754
1755 /* The session record. */
1756 pUsage->cUsage = 1;
1757 pUsage->pObj = pObj;
1758 pUsage->pNext = pSession->pUsage;
1759 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
1760 pSession->pUsage = pUsage;
1761
1762 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1763
1764 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
1765 return pObj;
1766}
1767
1768
1769/**
1770 * Increment the reference counter for the object associating the reference
1771 * with the specified session.
1772 *
1773 * @returns IPRT status code.
1774 * @param pvObj The identifier returned by SUPR0ObjRegister().
1775 * @param pSession The session which is referencing the object.
1776 *
1777 * @remarks The caller should not own any spinlocks and must carefully protect
1778 * itself against potential race with the destructor so freed memory
1779 * isn't accessed here.
1780 */
1781SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
1782{
1783 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
1784}
1785
1786
1787/**
1788 * Increment the reference counter for the object associating the reference
1789 * with the specified session.
1790 *
1791 * @returns IPRT status code.
1792 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
1793 * couldn't be allocated. (If you see this you're not doing the right
1794 * thing and it won't ever work reliably.)
1795 *
1796 * @param pvObj The identifier returned by SUPR0ObjRegister().
1797 * @param pSession The session which is referencing the object.
1798 * @param fNoBlocking Set if it's not OK to block. Never try to make the
1799 * first reference to an object in a session with this
1800 * argument set.
1801 *
1802 * @remarks The caller should not own any spinlocks and must carefully protect
1803 * itself against potential race with the destructor so freed memory
1804 * isn't accessed here.
1805 */
1806SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
1807{
1808 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1809 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1810 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1811 int rc = VINF_SUCCESS;
1812 PSUPDRVUSAGE pUsagePre;
1813 PSUPDRVUSAGE pUsage;
1814
1815 /*
1816 * Validate the input.
1817 * Be ready for the destruction race (someone might be stuck in the
1818 * destructor waiting a lock we own).
1819 */
1820 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1821 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
1822 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC + 1,
1823 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC + 1),
1824 VERR_INVALID_PARAMETER);
1825
1826 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1827
1828 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
1829 {
1830 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1831
1832 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
1833 return VERR_WRONG_ORDER;
1834 }
1835
1836 /*
1837 * Preallocate the usage record if we can.
1838 */
1839 pUsagePre = pDevExt->pUsageFree;
1840 if (pUsagePre)
1841 pDevExt->pUsageFree = pUsagePre->pNext;
1842 else if (!fNoBlocking)
1843 {
1844 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1845 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
1846 if (!pUsagePre)
1847 return VERR_NO_MEMORY;
1848
1849 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1850 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
1851 {
1852 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1853
1854 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
1855 return VERR_WRONG_ORDER;
1856 }
1857 }
1858
1859 /*
1860 * Reference the object.
1861 */
1862 pObj->cUsage++;
1863
1864 /*
1865 * Look for the session record.
1866 */
1867 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
1868 {
1869 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1870 if (pUsage->pObj == pObj)
1871 break;
1872 }
1873 if (pUsage)
1874 pUsage->cUsage++;
1875 else if (pUsagePre)
1876 {
1877 /* create a new session record. */
1878 pUsagePre->cUsage = 1;
1879 pUsagePre->pObj = pObj;
1880 pUsagePre->pNext = pSession->pUsage;
1881 pSession->pUsage = pUsagePre;
1882 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
1883
1884 pUsagePre = NULL;
1885 }
1886 else
1887 {
1888 pObj->cUsage--;
1889 rc = VERR_TRY_AGAIN;
1890 }
1891
1892 /*
1893 * Put any unused usage record into the free list..
1894 */
1895 if (pUsagePre)
1896 {
1897 pUsagePre->pNext = pDevExt->pUsageFree;
1898 pDevExt->pUsageFree = pUsagePre;
1899 }
1900
1901 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1902
1903 return rc;
1904}
1905
1906
1907/**
1908 * Decrement / destroy a reference counter record for an object.
1909 *
1910 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
1911 *
1912 * @returns IPRT status code.
1913 * @param pvObj The identifier returned by SUPR0ObjRegister().
1914 * @param pSession The session which is referencing the object.
1915 */
1916SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
1917{
1918 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1919 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1920 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
1921 bool fDestroy = false;
1922 PSUPDRVUSAGE pUsage;
1923 PSUPDRVUSAGE pUsagePrev;
1924
1925 /*
1926 * Validate the input.
1927 */
1928 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
1929 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
1930 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
1931 VERR_INVALID_PARAMETER);
1932
1933 /*
1934 * Acquire the spinlock and look for the usage record.
1935 */
1936 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1937
1938 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
1939 pUsage;
1940 pUsagePrev = pUsage, pUsage = pUsage->pNext)
1941 {
1942 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
1943 if (pUsage->pObj == pObj)
1944 {
1945 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
1946 if (pUsage->cUsage > 1)
1947 {
1948 pObj->cUsage--;
1949 pUsage->cUsage--;
1950 }
1951 else
1952 {
1953 /*
1954 * Free the session record.
1955 */
1956 if (pUsagePrev)
1957 pUsagePrev->pNext = pUsage->pNext;
1958 else
1959 pSession->pUsage = pUsage->pNext;
1960 pUsage->pNext = pDevExt->pUsageFree;
1961 pDevExt->pUsageFree = pUsage;
1962
1963 /* What about the object? */
1964 if (pObj->cUsage > 1)
1965 pObj->cUsage--;
1966 else
1967 {
1968 /*
1969 * Object is to be destroyed, unlink it.
1970 */
1971 pObj->u32Magic = SUPDRVOBJ_MAGIC + 1;
1972 fDestroy = true;
1973 if (pDevExt->pObjs == pObj)
1974 pDevExt->pObjs = pObj->pNext;
1975 else
1976 {
1977 PSUPDRVOBJ pObjPrev;
1978 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
1979 if (pObjPrev->pNext == pObj)
1980 {
1981 pObjPrev->pNext = pObj->pNext;
1982 break;
1983 }
1984 Assert(pObjPrev);
1985 }
1986 }
1987 }
1988 break;
1989 }
1990 }
1991
1992 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1993
1994 /*
1995 * Call the destructor and free the object if required.
1996 */
1997 if (fDestroy)
1998 {
1999 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
2000 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
2001 if (pObj->pfnDestructor)
2002#ifdef RT_WITH_W64_UNWIND_HACK
2003 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
2004#else
2005 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
2006#endif
2007 RTMemFree(pObj);
2008 }
2009
2010 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
2011 return pUsage ? VINF_SUCCESS : VERR_INVALID_PARAMETER;
2012}
2013
2014
2015/**
2016 * Verifies that the current process can access the specified object.
2017 *
2018 * @returns The following IPRT status code:
2019 * @retval VINF_SUCCESS if access was granted.
2020 * @retval VERR_PERMISSION_DENIED if denied access.
2021 * @retval VERR_INVALID_PARAMETER if invalid parameter.
2022 *
2023 * @param pvObj The identifier returned by SUPR0ObjRegister().
2024 * @param pSession The session which wishes to access the object.
2025 * @param pszObjName Object string name. This is optional and depends on the object type.
2026 *
2027 * @remark The caller is responsible for making sure the object isn't removed while
2028 * we're inside this function. If uncertain about this, just call AddRef before calling us.
2029 */
2030SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
2031{
2032 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2033 int rc;
2034
2035 /*
2036 * Validate the input.
2037 */
2038 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2039 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
2040 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2041 VERR_INVALID_PARAMETER);
2042
2043 /*
2044 * Check access. (returns true if a decision has been made.)
2045 */
2046 rc = VERR_INTERNAL_ERROR;
2047 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
2048 return rc;
2049
2050 /*
2051 * Default policy is to allow the user to access his own
2052 * stuff but nothing else.
2053 */
2054 if (pObj->CreatorUid == pSession->Uid)
2055 return VINF_SUCCESS;
2056 return VERR_PERMISSION_DENIED;
2057}
2058
2059
2060/**
2061 * Lock pages.
2062 *
2063 * @returns IPRT status code.
2064 * @param pSession Session to which the locked memory should be associated.
2065 * @param pvR3 Start of the memory range to lock.
2066 * This must be page aligned.
2067 * @param cb Size of the memory range to lock.
2068 * This must be page aligned.
2069 */
2070SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2071{
2072 int rc;
2073 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2074 const size_t cb = (size_t)cPages << PAGE_SHIFT;
2075 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
2076
2077 /*
2078 * Verify input.
2079 */
2080 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2081 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2082 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
2083 || !pvR3)
2084 {
2085 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
2086 return VERR_INVALID_PARAMETER;
2087 }
2088
2089#ifdef RT_OS_WINDOWS /* A temporary hack for windows, will be removed once all ring-3 code has been cleaned up. */
2090 /* First check if we allocated it using SUPPageAlloc; if so then we don't need to lock it again */
2091 rc = supdrvPageGetPhys(pSession, pvR3, cPages, paPages);
2092 if (RT_SUCCESS(rc))
2093 return rc;
2094#endif
2095
2096 /*
2097 * Let IPRT do the job.
2098 */
2099 Mem.eType = MEMREF_TYPE_LOCKED;
2100 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTR0ProcHandleSelf());
2101 if (RT_SUCCESS(rc))
2102 {
2103 uint32_t iPage = cPages;
2104 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
2105 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
2106
2107 while (iPage-- > 0)
2108 {
2109 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2110 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
2111 {
2112 AssertMsgFailed(("iPage=%d\n", iPage));
2113 rc = VERR_INTERNAL_ERROR;
2114 break;
2115 }
2116 }
2117 if (RT_SUCCESS(rc))
2118 rc = supdrvMemAdd(&Mem, pSession);
2119 if (RT_FAILURE(rc))
2120 {
2121 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
2122 AssertRC(rc2);
2123 }
2124 }
2125
2126 return rc;
2127}
2128
2129
2130/**
2131 * Unlocks the memory pointed to by pv.
2132 *
2133 * @returns IPRT status code.
2134 * @param pSession Session to which the memory was locked.
2135 * @param pvR3 Memory to unlock.
2136 */
2137SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2138{
2139 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2140 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2141#ifdef RT_OS_WINDOWS
2142 /*
2143 * Temporary hack for windows - SUPR0PageFree will unlock SUPR0PageAlloc
2144 * allocations; ignore this call.
2145 */
2146 if (supdrvPageWasLockedByPageAlloc(pSession, pvR3))
2147 {
2148 LogFlow(("Page will be unlocked in SUPR0PageFree -> ignore\n"));
2149 return VINF_SUCCESS;
2150 }
2151#endif
2152 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
2153}
2154
2155
2156/**
2157 * Allocates a chunk of page aligned memory with contiguous and fixed physical
2158 * backing.
2159 *
2160 * @returns IPRT status code.
2161 * @param pSession Session data.
2162 * @param cb Number of bytes to allocate.
2163 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
2164 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
2165 * @param pHCPhys Where to put the physical address of allocated memory.
2166 */
2167SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
2168{
2169 int rc;
2170 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2171 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
2172
2173 /*
2174 * Validate input.
2175 */
2176 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2177 if (!ppvR3 || !ppvR0 || !pHCPhys)
2178 {
2179 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
2180 pSession, ppvR0, ppvR3, pHCPhys));
2181 return VERR_INVALID_PARAMETER;
2182
2183 }
2184 if (cPages < 1 || cPages >= 256)
2185 {
2186 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2187 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2188 }
2189
2190 /*
2191 * Let IPRT do the job.
2192 */
2193 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
2194 if (RT_SUCCESS(rc))
2195 {
2196 int rc2;
2197 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2198 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2199 if (RT_SUCCESS(rc))
2200 {
2201 Mem.eType = MEMREF_TYPE_CONT;
2202 rc = supdrvMemAdd(&Mem, pSession);
2203 if (!rc)
2204 {
2205 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2206 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2207 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
2208 return 0;
2209 }
2210
2211 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2212 AssertRC(rc2);
2213 }
2214 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2215 AssertRC(rc2);
2216 }
2217
2218 return rc;
2219}
2220
2221
2222/**
2223 * Frees memory allocated using SUPR0ContAlloc().
2224 *
2225 * @returns IPRT status code.
2226 * @param pSession The session to which the memory was allocated.
2227 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2228 */
2229SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2230{
2231 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2232 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2233 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
2234}
2235
2236
2237/**
2238 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
2239 *
2240 * The memory isn't zeroed.
2241 *
2242 * @returns IPRT status code.
2243 * @param pSession Session data.
2244 * @param cPages Number of pages to allocate.
2245 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
2246 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
2247 * @param paPages Where to put the physical addresses of allocated memory.
2248 */
2249SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2250{
2251 unsigned iPage;
2252 int rc;
2253 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2254 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
2255
2256 /*
2257 * Validate input.
2258 */
2259 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2260 if (!ppvR3 || !ppvR0 || !paPages)
2261 {
2262 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
2263 pSession, ppvR3, ppvR0, paPages));
2264 return VERR_INVALID_PARAMETER;
2265
2266 }
2267 if (cPages < 1 || cPages >= 256)
2268 {
2269 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2270 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2271 }
2272
2273 /*
2274 * Let IPRT do the work.
2275 */
2276 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
2277 if (RT_SUCCESS(rc))
2278 {
2279 int rc2;
2280 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2281 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2282 if (RT_SUCCESS(rc))
2283 {
2284 Mem.eType = MEMREF_TYPE_LOW;
2285 rc = supdrvMemAdd(&Mem, pSession);
2286 if (!rc)
2287 {
2288 for (iPage = 0; iPage < cPages; iPage++)
2289 {
2290 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2291 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
2292 }
2293 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2294 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2295 return 0;
2296 }
2297
2298 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2299 AssertRC(rc2);
2300 }
2301
2302 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2303 AssertRC(rc2);
2304 }
2305
2306 return rc;
2307}
2308
2309
2310/**
2311 * Frees memory allocated using SUPR0LowAlloc().
2312 *
2313 * @returns IPRT status code.
2314 * @param pSession The session to which the memory was allocated.
2315 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2316 */
2317SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2318{
2319 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2320 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2321 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
2322}
2323
2324
2325
2326/**
2327 * Allocates a chunk of memory with both R0 and R3 mappings.
2328 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
2329 *
2330 * @returns IPRT status code.
2331 * @param pSession The session to associated the allocation with.
2332 * @param cb Number of bytes to allocate.
2333 * @param ppvR0 Where to store the address of the Ring-0 mapping.
2334 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2335 */
2336SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
2337{
2338 int rc;
2339 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2340 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
2341
2342 /*
2343 * Validate input.
2344 */
2345 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2346 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
2347 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2348 if (cb < 1 || cb >= _4M)
2349 {
2350 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
2351 return VERR_INVALID_PARAMETER;
2352 }
2353
2354 /*
2355 * Let IPRT do the work.
2356 */
2357 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
2358 if (RT_SUCCESS(rc))
2359 {
2360 int rc2;
2361 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2362 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2363 if (RT_SUCCESS(rc))
2364 {
2365 Mem.eType = MEMREF_TYPE_MEM;
2366 rc = supdrvMemAdd(&Mem, pSession);
2367 if (!rc)
2368 {
2369 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2370 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2371 return VINF_SUCCESS;
2372 }
2373
2374 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2375 AssertRC(rc2);
2376 }
2377
2378 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2379 AssertRC(rc2);
2380 }
2381
2382 return rc;
2383}
2384
2385
2386/**
2387 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
2388 *
2389 * @returns IPRT status code.
2390 * @param pSession The session to which the memory was allocated.
2391 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2392 * @param paPages Where to store the physical addresses.
2393 */
2394SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
2395{
2396 PSUPDRVBUNDLE pBundle;
2397 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2398 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
2399
2400 /*
2401 * Validate input.
2402 */
2403 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2404 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
2405 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
2406
2407 /*
2408 * Search for the address.
2409 */
2410 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2411 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2412 {
2413 if (pBundle->cUsed > 0)
2414 {
2415 unsigned i;
2416 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2417 {
2418 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2419 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2420 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2421 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2422 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
2423 )
2424 )
2425 {
2426 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2427 size_t iPage;
2428 for (iPage = 0; iPage < cPages; iPage++)
2429 {
2430 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2431 paPages[iPage].uReserved = 0;
2432 }
2433 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2434 return VINF_SUCCESS;
2435 }
2436 }
2437 }
2438 }
2439 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2440 Log(("Failed to find %p!!!\n", (void *)uPtr));
2441 return VERR_INVALID_PARAMETER;
2442}
2443
2444
2445/**
2446 * Free memory allocated by SUPR0MemAlloc().
2447 *
2448 * @returns IPRT status code.
2449 * @param pSession The session owning the allocation.
2450 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2451 */
2452SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2453{
2454 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2455 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2456 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
2457}
2458
2459
2460/**
2461 * Allocates a chunk of memory with only a R3 mappings.
2462 *
2463 * The memory is fixed and it's possible to query the physical addresses using
2464 * SUPR0MemGetPhys().
2465 *
2466 * @returns IPRT status code.
2467 * @param pSession The session to associated the allocation with.
2468 * @param cPages The number of pages to allocate.
2469 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2470 * @param paPages Where to store the addresses of the pages. Optional.
2471 */
2472SUPR0DECL(int) SUPR0PageAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2473{
2474 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2475 return SUPR0PageAllocEx(pSession, cPages, 0 /*fFlags*/, ppvR3, NULL, paPages);
2476}
2477
2478
2479/**
2480 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2481 *
2482 * The memory is fixed and it's possible to query the physical addresses using
2483 * SUPR0MemGetPhys().
2484 *
2485 * @returns IPRT status code.
2486 * @param pSession The session to associated the allocation with.
2487 * @param cPages The number of pages to allocate.
2488 * @param fFlags Flags, reserved for the future. Must be zero.
2489 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2490 * NULL if no ring-3 mapping.
2491 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2492 * NULL if no ring-0 mapping.
2493 * @param paPages Where to store the addresses of the pages. Optional.
2494 */
2495SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
2496{
2497 int rc;
2498 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2499 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
2500
2501 /*
2502 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2503 */
2504 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2505 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
2506 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2507 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
2508 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2509 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
2510 {
2511 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than 128MB.\n", cPages));
2512 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2513 }
2514
2515 /*
2516 * Let IPRT do the work.
2517 */
2518 if (ppvR0)
2519 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
2520 else
2521 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
2522 if (RT_SUCCESS(rc))
2523 {
2524 int rc2;
2525 if (ppvR3)
2526 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2527 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2528 else
2529 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
2530 if (RT_SUCCESS(rc))
2531 {
2532 Mem.eType = MEMREF_TYPE_PAGE;
2533 rc = supdrvMemAdd(&Mem, pSession);
2534 if (!rc)
2535 {
2536 if (ppvR3)
2537 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2538 if (ppvR0)
2539 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2540 if (paPages)
2541 {
2542 uint32_t iPage = cPages;
2543 while (iPage-- > 0)
2544 {
2545 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
2546 Assert(paPages[iPage] != NIL_RTHCPHYS);
2547 }
2548 }
2549 return VINF_SUCCESS;
2550 }
2551
2552 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2553 AssertRC(rc2);
2554 }
2555
2556 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2557 AssertRC(rc2);
2558 }
2559 return rc;
2560}
2561
2562
2563/**
2564 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2565 *
2566 * The memory is fixed and it's possible to query the physical addresses using
2567 * SUPR0MemGetPhys().
2568 *
2569 * @returns IPRT status code.
2570 * @param pSession The session to associated the allocation with.
2571 * @param cPages The number of pages to allocate.
2572 * @param fFlags Flags, reserved for the future. Must be zero.
2573 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2574 * NULL if no ring-3 mapping.
2575 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2576 * NULL if no ring-0 mapping.
2577 * @param paPages Where to store the addresses of the pages. Optional.
2578 */
2579SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
2580 uint32_t fFlags, PRTR0PTR ppvR0)
2581{
2582 int rc;
2583 PSUPDRVBUNDLE pBundle;
2584 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2585 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
2586 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
2587
2588 /*
2589 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2590 */
2591 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2592 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2593 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2594 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2595 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2596 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
2597
2598 /*
2599 * Find the memory object.
2600 */
2601 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2602 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2603 {
2604 if (pBundle->cUsed > 0)
2605 {
2606 unsigned i;
2607 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2608 {
2609 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2610 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2611 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2612 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2613 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
2614 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2615 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
2616 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
2617 {
2618 hMemObj = pBundle->aMem[i].MemObj;
2619 break;
2620 }
2621 }
2622 }
2623 }
2624 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2625
2626 rc = VERR_INVALID_PARAMETER;
2627 if (hMemObj != NIL_RTR0MEMOBJ)
2628 {
2629 /*
2630 * Do some furter input validations before calling IPRT.
2631 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
2632 */
2633 size_t cbMemObj = RTR0MemObjSize(hMemObj);
2634 if ( offSub < cbMemObj
2635 && cbSub <= cbMemObj
2636 && offSub + cbSub <= cbMemObj)
2637 {
2638 RTR0MEMOBJ hMapObj;
2639 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
2640 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
2641 if (RT_SUCCESS(rc))
2642 *ppvR0 = RTR0MemObjAddress(hMapObj);
2643 }
2644 else
2645 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
2646
2647 }
2648 return rc;
2649}
2650
2651
2652
2653#ifdef RT_OS_WINDOWS
2654/**
2655 * Check if the pages were locked by SUPR0PageAlloc
2656 *
2657 * This function will be removed along with the lock/unlock hacks when
2658 * we've cleaned up the ring-3 code properly.
2659 *
2660 * @returns boolean
2661 * @param pSession The session to which the memory was allocated.
2662 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2663 */
2664static bool supdrvPageWasLockedByPageAlloc(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2665{
2666 PSUPDRVBUNDLE pBundle;
2667 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2668 LogFlow(("SUPR0PageIsLockedByPageAlloc: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2669
2670 /*
2671 * Search for the address.
2672 */
2673 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2674 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2675 {
2676 if (pBundle->cUsed > 0)
2677 {
2678 unsigned i;
2679 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2680 {
2681 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2682 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2683 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2684 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2685 {
2686 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2687 return true;
2688 }
2689 }
2690 }
2691 }
2692 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2693 return false;
2694}
2695
2696
2697/**
2698 * Get the physical addresses of memory allocated using SUPR0PageAllocEx().
2699 *
2700 * This function will be removed along with the lock/unlock hacks when
2701 * we've cleaned up the ring-3 code properly.
2702 *
2703 * @returns IPRT status code.
2704 * @param pSession The session to which the memory was allocated.
2705 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc().
2706 * @param cPages Number of pages in paPages
2707 * @param paPages Where to store the physical addresses.
2708 */
2709static int supdrvPageGetPhys(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2710{
2711 PSUPDRVBUNDLE pBundle;
2712 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2713 LogFlow(("supdrvPageGetPhys: pSession=%p pvR3=%p cPages=%#lx paPages=%p\n", pSession, (void *)pvR3, (long)cPages, paPages));
2714
2715 /*
2716 * Search for the address.
2717 */
2718 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2719 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2720 {
2721 if (pBundle->cUsed > 0)
2722 {
2723 unsigned i;
2724 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2725 {
2726 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2727 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2728 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2729 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2730 {
2731 uint32_t iPage;
2732 size_t cMaxPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2733 cPages = (uint32_t)RT_MIN(cMaxPages, cPages);
2734 for (iPage = 0; iPage < cPages; iPage++)
2735 paPages[iPage] = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2736 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2737 return VINF_SUCCESS;
2738 }
2739 }
2740 }
2741 }
2742 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2743 return VERR_INVALID_PARAMETER;
2744}
2745#endif /* RT_OS_WINDOWS */
2746
2747
2748/**
2749 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
2750 *
2751 * @returns IPRT status code.
2752 * @param pSession The session owning the allocation.
2753 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
2754 * SUPR0PageAllocEx().
2755 */
2756SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2757{
2758 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2759 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2760 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
2761}
2762
2763
2764/**
2765 * Maps the GIP into userspace and/or get the physical address of the GIP.
2766 *
2767 * @returns IPRT status code.
2768 * @param pSession Session to which the GIP mapping should belong.
2769 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
2770 * @param pHCPhysGip Where to store the physical address. (optional)
2771 *
2772 * @remark There is no reference counting on the mapping, so one call to this function
2773 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
2774 * and remove the session as a GIP user.
2775 */
2776SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGip)
2777{
2778 int rc = 0;
2779 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2780 RTR3PTR pGip = NIL_RTR3PTR;
2781 RTHCPHYS HCPhys = NIL_RTHCPHYS;
2782 LogFlow(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGip=%p\n", pSession, ppGipR3, pHCPhysGip));
2783
2784 /*
2785 * Validate
2786 */
2787 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2788 AssertPtrNullReturn(ppGipR3, VERR_INVALID_POINTER);
2789 AssertPtrNullReturn(pHCPhysGip, VERR_INVALID_POINTER);
2790
2791 RTSemFastMutexRequest(pDevExt->mtxGip);
2792 if (pDevExt->pGip)
2793 {
2794 /*
2795 * Map it?
2796 */
2797 if (ppGipR3)
2798 {
2799 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
2800 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (RTR3PTR)-1, 0,
2801 RTMEM_PROT_READ, RTR0ProcHandleSelf());
2802 if (RT_SUCCESS(rc))
2803 {
2804 pGip = RTR0MemObjAddressR3(pSession->GipMapObjR3);
2805 rc = VINF_SUCCESS; /** @todo remove this and replace the !rc below with RT_SUCCESS(rc). */
2806 }
2807 }
2808
2809 /*
2810 * Get physical address.
2811 */
2812 if (pHCPhysGip && !rc)
2813 HCPhys = pDevExt->HCPhysGip;
2814
2815 /*
2816 * Reference globally.
2817 */
2818 if (!pSession->fGipReferenced && !rc)
2819 {
2820 pSession->fGipReferenced = 1;
2821 pDevExt->cGipUsers++;
2822 if (pDevExt->cGipUsers == 1)
2823 {
2824 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2825 unsigned i;
2826
2827 LogFlow(("SUPR0GipMap: Resumes GIP updating\n"));
2828
2829 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
2830 ASMAtomicXchgU32(&pGip->aCPUs[i].u32TransactionId, pGip->aCPUs[i].u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
2831 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, 0);
2832
2833 rc = RTTimerStart(pDevExt->pGipTimer, 0);
2834 AssertRC(rc); rc = VINF_SUCCESS;
2835 }
2836 }
2837 }
2838 else
2839 {
2840 rc = SUPDRV_ERR_GENERAL_FAILURE;
2841 Log(("SUPR0GipMap: GIP is not available!\n"));
2842 }
2843 RTSemFastMutexRelease(pDevExt->mtxGip);
2844
2845 /*
2846 * Write returns.
2847 */
2848 if (pHCPhysGip)
2849 *pHCPhysGip = HCPhys;
2850 if (ppGipR3)
2851 *ppGipR3 = pGip;
2852
2853#ifdef DEBUG_DARWIN_GIP
2854 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGip=%lx *ppGip=%p GipMapObjR3\n", rc, (unsigned long)HCPhys, pGip, pSession->GipMapObjR3));
2855#else
2856 LogFlow(("SUPR0GipMap: returns %d *pHCPhysGip=%lx *ppGipR3=%p\n", rc, (unsigned long)HCPhys, (void *)(uintptr_t)pGip));
2857#endif
2858 return rc;
2859}
2860
2861
2862/**
2863 * Unmaps any user mapping of the GIP and terminates all GIP access
2864 * from this session.
2865 *
2866 * @returns IPRT status code.
2867 * @param pSession Session to which the GIP mapping should belong.
2868 */
2869SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
2870{
2871 int rc = VINF_SUCCESS;
2872 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2873#ifdef DEBUG_DARWIN_GIP
2874 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
2875 pSession,
2876 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
2877 pSession->GipMapObjR3));
2878#else
2879 LogFlow(("SUPR0GipUnmap: pSession=%p\n", pSession));
2880#endif
2881 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2882
2883 RTSemFastMutexRequest(pDevExt->mtxGip);
2884
2885 /*
2886 * Unmap anything?
2887 */
2888 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
2889 {
2890 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
2891 AssertRC(rc);
2892 if (RT_SUCCESS(rc))
2893 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
2894 }
2895
2896 /*
2897 * Dereference global GIP.
2898 */
2899 if (pSession->fGipReferenced && !rc)
2900 {
2901 pSession->fGipReferenced = 0;
2902 if ( pDevExt->cGipUsers > 0
2903 && !--pDevExt->cGipUsers)
2904 {
2905 LogFlow(("SUPR0GipUnmap: Suspends GIP updating\n"));
2906 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = 0;
2907 }
2908 }
2909
2910 RTSemFastMutexRelease(pDevExt->mtxGip);
2911
2912 return rc;
2913}
2914
2915
2916/**
2917 * Register a component factory with the support driver.
2918 *
2919 * This is currently restricted to kernel sessions only.
2920 *
2921 * @returns VBox status code.
2922 * @retval VINF_SUCCESS on success.
2923 * @retval VERR_NO_MEMORY if we're out of memory.
2924 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
2925 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
2926 * @retval VERR_INVALID_PARAMETER on invalid parameter.
2927 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
2928 *
2929 * @param pSession The SUPDRV session (must be a ring-0 session).
2930 * @param pFactory Pointer to the component factory registration structure.
2931 *
2932 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
2933 */
2934SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
2935{
2936 PSUPDRVFACTORYREG pNewReg;
2937 const char *psz;
2938 int rc;
2939
2940 /*
2941 * Validate parameters.
2942 */
2943 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2944 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
2945 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
2946 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
2947 psz = (const char *)memchr(pFactory->szName, '\0', sizeof(pFactory->szName));
2948 AssertReturn(psz, VERR_INVALID_PARAMETER);
2949
2950 /*
2951 * Allocate and initialize a new registration structure.
2952 */
2953 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
2954 if (pNewReg)
2955 {
2956 pNewReg->pNext = NULL;
2957 pNewReg->pFactory = pFactory;
2958 pNewReg->pSession = pSession;
2959 pNewReg->cchName = psz - &pFactory->szName[0];
2960
2961 /*
2962 * Add it to the tail of the list after checking for prior registration.
2963 */
2964 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
2965 if (RT_SUCCESS(rc))
2966 {
2967 PSUPDRVFACTORYREG pPrev = NULL;
2968 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
2969 while (pCur && pCur->pFactory != pFactory)
2970 {
2971 pPrev = pCur;
2972 pCur = pCur->pNext;
2973 }
2974 if (!pCur)
2975 {
2976 if (pPrev)
2977 pPrev->pNext = pNewReg;
2978 else
2979 pSession->pDevExt->pComponentFactoryHead = pNewReg;
2980 rc = VINF_SUCCESS;
2981 }
2982 else
2983 rc = VERR_ALREADY_EXISTS;
2984
2985 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
2986 }
2987
2988 if (RT_FAILURE(rc))
2989 RTMemFree(pNewReg);
2990 }
2991 else
2992 rc = VERR_NO_MEMORY;
2993 return rc;
2994}
2995
2996
2997/**
2998 * Deregister a component factory.
2999 *
3000 * @returns VBox status code.
3001 * @retval VINF_SUCCESS on success.
3002 * @retval VERR_NOT_FOUND if the factory wasn't registered.
3003 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
3004 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3005 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3006 *
3007 * @param pSession The SUPDRV session (must be a ring-0 session).
3008 * @param pFactory Pointer to the component factory registration structure
3009 * previously passed SUPR0ComponentRegisterFactory().
3010 *
3011 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
3012 */
3013SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
3014{
3015 int rc;
3016
3017 /*
3018 * Validate parameters.
3019 */
3020 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3021 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
3022 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
3023
3024 /*
3025 * Take the lock and look for the registration record.
3026 */
3027 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3028 if (RT_SUCCESS(rc))
3029 {
3030 PSUPDRVFACTORYREG pPrev = NULL;
3031 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3032 while (pCur && pCur->pFactory != pFactory)
3033 {
3034 pPrev = pCur;
3035 pCur = pCur->pNext;
3036 }
3037 if (pCur)
3038 {
3039 if (!pPrev)
3040 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
3041 else
3042 pPrev->pNext = pCur->pNext;
3043
3044 pCur->pNext = NULL;
3045 pCur->pFactory = NULL;
3046 pCur->pSession = NULL;
3047 rc = VINF_SUCCESS;
3048 }
3049 else
3050 rc = VERR_NOT_FOUND;
3051
3052 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3053
3054 RTMemFree(pCur);
3055 }
3056 return rc;
3057}
3058
3059
3060/**
3061 * Queries a component factory.
3062 *
3063 * @returns VBox status code.
3064 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3065 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3066 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
3067 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
3068 *
3069 * @param pSession The SUPDRV session.
3070 * @param pszName The name of the component factory.
3071 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
3072 * @param ppvFactoryIf Where to store the factory interface.
3073 */
3074SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
3075{
3076 const char *pszEnd;
3077 size_t cchName;
3078 int rc;
3079
3080 /*
3081 * Validate parameters.
3082 */
3083 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3084
3085 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
3086 pszEnd = memchr(pszName, '\0', RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
3087 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3088 cchName = pszEnd - pszName;
3089
3090 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
3091 pszEnd = memchr(pszInterfaceUuid, '\0', RTUUID_STR_LENGTH);
3092 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3093
3094 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
3095 *ppvFactoryIf = NULL;
3096
3097 /*
3098 * Take the lock and try all factories by this name.
3099 */
3100 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3101 if (RT_SUCCESS(rc))
3102 {
3103 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3104 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
3105 while (pCur)
3106 {
3107 if ( pCur->cchName == cchName
3108 && !memcmp(pCur->pFactory->szName, pszName, cchName))
3109 {
3110#ifdef RT_WITH_W64_UNWIND_HACK
3111 void *pvFactory = supdrvNtWrapQueryFactoryInterface((PFNRT)pCur->pFactory->pfnQueryFactoryInterface, pCur->pFactory, pSession, pszInterfaceUuid);
3112#else
3113 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
3114#endif
3115 if (pvFactory)
3116 {
3117 *ppvFactoryIf = pvFactory;
3118 rc = VINF_SUCCESS;
3119 break;
3120 }
3121 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
3122 }
3123
3124 /* next */
3125 pCur = pCur->pNext;
3126 }
3127
3128 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3129 }
3130 return rc;
3131}
3132
3133
3134/**
3135 * Adds a memory object to the session.
3136 *
3137 * @returns IPRT status code.
3138 * @param pMem Memory tracking structure containing the
3139 * information to track.
3140 * @param pSession The session.
3141 */
3142static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
3143{
3144 PSUPDRVBUNDLE pBundle;
3145 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3146
3147 /*
3148 * Find free entry and record the allocation.
3149 */
3150 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3151 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3152 {
3153 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
3154 {
3155 unsigned i;
3156 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3157 {
3158 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
3159 {
3160 pBundle->cUsed++;
3161 pBundle->aMem[i] = *pMem;
3162 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3163 return VINF_SUCCESS;
3164 }
3165 }
3166 AssertFailed(); /* !!this can't be happening!!! */
3167 }
3168 }
3169 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3170
3171 /*
3172 * Need to allocate a new bundle.
3173 * Insert into the last entry in the bundle.
3174 */
3175 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
3176 if (!pBundle)
3177 return VERR_NO_MEMORY;
3178
3179 /* take last entry. */
3180 pBundle->cUsed++;
3181 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
3182
3183 /* insert into list. */
3184 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3185 pBundle->pNext = pSession->Bundle.pNext;
3186 pSession->Bundle.pNext = pBundle;
3187 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3188
3189 return VINF_SUCCESS;
3190}
3191
3192
3193/**
3194 * Releases a memory object referenced by pointer and type.
3195 *
3196 * @returns IPRT status code.
3197 * @param pSession Session data.
3198 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
3199 * @param eType Memory type.
3200 */
3201static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
3202{
3203 PSUPDRVBUNDLE pBundle;
3204 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3205
3206 /*
3207 * Validate input.
3208 */
3209 if (!uPtr)
3210 {
3211 Log(("Illegal address %p\n", (void *)uPtr));
3212 return VERR_INVALID_PARAMETER;
3213 }
3214
3215 /*
3216 * Search for the address.
3217 */
3218 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3219 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3220 {
3221 if (pBundle->cUsed > 0)
3222 {
3223 unsigned i;
3224 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3225 {
3226 if ( pBundle->aMem[i].eType == eType
3227 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3228 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3229 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3230 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
3231 )
3232 {
3233 /* Make a copy of it and release it outside the spinlock. */
3234 SUPDRVMEMREF Mem = pBundle->aMem[i];
3235 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
3236 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
3237 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
3238 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3239
3240 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
3241 {
3242 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
3243 AssertRC(rc); /** @todo figure out how to handle this. */
3244 }
3245 if (Mem.MemObj != NIL_RTR0MEMOBJ)
3246 {
3247 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
3248 AssertRC(rc); /** @todo figure out how to handle this. */
3249 }
3250 return VINF_SUCCESS;
3251 }
3252 }
3253 }
3254 }
3255 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3256 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
3257 return VERR_INVALID_PARAMETER;
3258}
3259
3260
3261/**
3262 * Opens an image. If it's the first time it's opened the call must upload
3263 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3264 *
3265 * This is the 1st step of the loading.
3266 *
3267 * @returns IPRT status code.
3268 * @param pDevExt Device globals.
3269 * @param pSession Session data.
3270 * @param pReq The open request.
3271 */
3272static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
3273{
3274 PSUPDRVLDRIMAGE pImage;
3275 unsigned cb;
3276 void *pv;
3277 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImage=%d\n", pReq->u.In.szName, pReq->u.In.cbImage));
3278
3279 /*
3280 * Check if we got an instance of the image already.
3281 */
3282 RTSemFastMutexRequest(pDevExt->mtxLdr);
3283 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3284 {
3285 if (!strcmp(pImage->szName, pReq->u.In.szName))
3286 {
3287 pImage->cUsage++;
3288 pReq->u.Out.pvImageBase = pImage->pvImage;
3289 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3290 supdrvLdrAddUsage(pSession, pImage);
3291 RTSemFastMutexRelease(pDevExt->mtxLdr);
3292 return VINF_SUCCESS;
3293 }
3294 }
3295 /* (not found - add it!) */
3296
3297 /*
3298 * Allocate memory.
3299 */
3300 cb = pReq->u.In.cbImage + sizeof(SUPDRVLDRIMAGE) + 31;
3301 pv = RTMemExecAlloc(cb);
3302 if (!pv)
3303 {
3304 RTSemFastMutexRelease(pDevExt->mtxLdr);
3305 Log(("supdrvIOCtl_LdrOpen: RTMemExecAlloc(%u) failed\n", cb));
3306 return VERR_NO_MEMORY;
3307 }
3308
3309 /*
3310 * Setup and link in the LDR stuff.
3311 */
3312 pImage = (PSUPDRVLDRIMAGE)pv;
3313 pImage->pvImage = RT_ALIGN_P(pImage + 1, 32);
3314 pImage->cbImage = pReq->u.In.cbImage;
3315 pImage->pfnModuleInit = NULL;
3316 pImage->pfnModuleTerm = NULL;
3317 pImage->pfnServiceReqHandler = NULL;
3318 pImage->uState = SUP_IOCTL_LDR_OPEN;
3319 pImage->cUsage = 1;
3320 strcpy(pImage->szName, pReq->u.In.szName);
3321
3322 pImage->pNext = pDevExt->pLdrImages;
3323 pDevExt->pLdrImages = pImage;
3324
3325 supdrvLdrAddUsage(pSession, pImage);
3326
3327 pReq->u.Out.pvImageBase = pImage->pvImage;
3328 pReq->u.Out.fNeedsLoading = true;
3329 RTSemFastMutexRelease(pDevExt->mtxLdr);
3330
3331#if defined(RT_OS_WINDOWS) && defined(DEBUG)
3332 SUPR0Printf("VBoxDrv: windbg> .reload /f %s=%#p\n", pImage->szName, pImage->pvImage);
3333#endif
3334 return VINF_SUCCESS;
3335}
3336
3337
3338/**
3339 * Loads the image bits.
3340 *
3341 * This is the 2nd step of the loading.
3342 *
3343 * @returns IPRT status code.
3344 * @param pDevExt Device globals.
3345 * @param pSession Session data.
3346 * @param pReq The request.
3347 */
3348static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
3349{
3350 PSUPDRVLDRUSAGE pUsage;
3351 PSUPDRVLDRIMAGE pImage;
3352 int rc;
3353 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImage=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImage));
3354
3355 /*
3356 * Find the ldr image.
3357 */
3358 RTSemFastMutexRequest(pDevExt->mtxLdr);
3359 pUsage = pSession->pLdrUsage;
3360 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3361 pUsage = pUsage->pNext;
3362 if (!pUsage)
3363 {
3364 RTSemFastMutexRelease(pDevExt->mtxLdr);
3365 Log(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
3366 return VERR_INVALID_HANDLE;
3367 }
3368 pImage = pUsage->pImage;
3369 if (pImage->cbImage != pReq->u.In.cbImage)
3370 {
3371 RTSemFastMutexRelease(pDevExt->mtxLdr);
3372 Log(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load)\n", pImage->cbImage, pReq->u.In.cbImage));
3373 return VERR_INVALID_HANDLE;
3374 }
3375 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
3376 {
3377 unsigned uState = pImage->uState;
3378 RTSemFastMutexRelease(pDevExt->mtxLdr);
3379 if (uState != SUP_IOCTL_LDR_LOAD)
3380 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
3381 return SUPDRV_ERR_ALREADY_LOADED;
3382 }
3383 switch (pReq->u.In.eEPType)
3384 {
3385 case SUPLDRLOADEP_NOTHING:
3386 break;
3387
3388 case SUPLDRLOADEP_VMMR0:
3389 if ( !pReq->u.In.EP.VMMR0.pvVMMR0
3390 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryInt
3391 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryFast
3392 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryEx)
3393 {
3394 RTSemFastMutexRelease(pDevExt->mtxLdr);
3395 Log(("NULL pointer: pvVMMR0=%p pvVMMR0EntryInt=%p pvVMMR0EntryFast=%p pvVMMR0EntryEx=%p!\n",
3396 pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3397 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3398 return VERR_INVALID_PARAMETER;
3399 }
3400 /** @todo validate pReq->u.In.EP.VMMR0.pvVMMR0 against pvImage! */
3401 if ( (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryInt - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3402 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryFast - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3403 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryEx - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3404 {
3405 RTSemFastMutexRelease(pDevExt->mtxLdr);
3406 Log(("Out of range (%p LB %#x): pvVMMR0EntryInt=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3407 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3408 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3409 return VERR_INVALID_PARAMETER;
3410 }
3411 break;
3412
3413 case SUPLDRLOADEP_SERVICE:
3414 if (!pReq->u.In.EP.Service.pfnServiceReq)
3415 {
3416 RTSemFastMutexRelease(pDevExt->mtxLdr);
3417 Log(("NULL pointer: pfnServiceReq=%p!\n", pReq->u.In.EP.Service.pfnServiceReq));
3418 return VERR_INVALID_PARAMETER;
3419 }
3420 if ((uintptr_t)pReq->u.In.EP.Service.pfnServiceReq - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3421 {
3422 RTSemFastMutexRelease(pDevExt->mtxLdr);
3423 Log(("Out of range (%p LB %#x): pfnServiceReq=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3424 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.Service.pfnServiceReq));
3425 return VERR_INVALID_PARAMETER;
3426 }
3427 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
3428 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
3429 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
3430 {
3431 RTSemFastMutexRelease(pDevExt->mtxLdr);
3432 Log(("Out of range (%p LB %#x): apvReserved={%p,%p,%p} MBZ!\n",
3433 pImage->pvImage, pReq->u.In.cbImage,
3434 pReq->u.In.EP.Service.apvReserved[0],
3435 pReq->u.In.EP.Service.apvReserved[1],
3436 pReq->u.In.EP.Service.apvReserved[2]));
3437 return VERR_INVALID_PARAMETER;
3438 }
3439 break;
3440
3441 default:
3442 RTSemFastMutexRelease(pDevExt->mtxLdr);
3443 Log(("Invalid eEPType=%d\n", pReq->u.In.eEPType));
3444 return VERR_INVALID_PARAMETER;
3445 }
3446 if ( pReq->u.In.pfnModuleInit
3447 && (uintptr_t)pReq->u.In.pfnModuleInit - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3448 {
3449 RTSemFastMutexRelease(pDevExt->mtxLdr);
3450 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleInit=%p is outside the image (%p %d bytes)\n",
3451 pReq->u.In.pfnModuleInit, pImage->pvImage, pReq->u.In.cbImage));
3452 return VERR_INVALID_PARAMETER;
3453 }
3454 if ( pReq->u.In.pfnModuleTerm
3455 && (uintptr_t)pReq->u.In.pfnModuleTerm - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3456 {
3457 RTSemFastMutexRelease(pDevExt->mtxLdr);
3458 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleTerm=%p is outside the image (%p %d bytes)\n",
3459 pReq->u.In.pfnModuleTerm, pImage->pvImage, pReq->u.In.cbImage));
3460 return VERR_INVALID_PARAMETER;
3461 }
3462
3463 /*
3464 * Copy the memory.
3465 */
3466 /* no need to do try/except as this is a buffered request. */
3467 memcpy(pImage->pvImage, &pReq->u.In.achImage[0], pImage->cbImage);
3468 pImage->uState = SUP_IOCTL_LDR_LOAD;
3469 pImage->pfnModuleInit = pReq->u.In.pfnModuleInit;
3470 pImage->pfnModuleTerm = pReq->u.In.pfnModuleTerm;
3471 pImage->offSymbols = pReq->u.In.offSymbols;
3472 pImage->cSymbols = pReq->u.In.cSymbols;
3473 pImage->offStrTab = pReq->u.In.offStrTab;
3474 pImage->cbStrTab = pReq->u.In.cbStrTab;
3475
3476 /*
3477 * Update any entry points.
3478 */
3479 switch (pReq->u.In.eEPType)
3480 {
3481 default:
3482 case SUPLDRLOADEP_NOTHING:
3483 rc = VINF_SUCCESS;
3484 break;
3485 case SUPLDRLOADEP_VMMR0:
3486 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3487 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
3488 break;
3489 case SUPLDRLOADEP_SERVICE:
3490 pImage->pfnServiceReqHandler = pReq->u.In.EP.Service.pfnServiceReq;
3491 rc = VINF_SUCCESS;
3492 break;
3493 }
3494
3495 /*
3496 * On success call the module initialization.
3497 */
3498 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
3499 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
3500 {
3501 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
3502#ifdef RT_WITH_W64_UNWIND_HACK
3503 rc = supdrvNtWrapModuleInit((PFNRT)pImage->pfnModuleInit);
3504#else
3505 rc = pImage->pfnModuleInit();
3506#endif
3507 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
3508 supdrvLdrUnsetVMMR0EPs(pDevExt);
3509 }
3510
3511 if (rc)
3512 pImage->uState = SUP_IOCTL_LDR_OPEN;
3513
3514 RTSemFastMutexRelease(pDevExt->mtxLdr);
3515 return rc;
3516}
3517
3518
3519/**
3520 * Frees a previously loaded (prep'ed) image.
3521 *
3522 * @returns IPRT status code.
3523 * @param pDevExt Device globals.
3524 * @param pSession Session data.
3525 * @param pReq The request.
3526 */
3527static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
3528{
3529 int rc;
3530 PSUPDRVLDRUSAGE pUsagePrev;
3531 PSUPDRVLDRUSAGE pUsage;
3532 PSUPDRVLDRIMAGE pImage;
3533 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
3534
3535 /*
3536 * Find the ldr image.
3537 */
3538 RTSemFastMutexRequest(pDevExt->mtxLdr);
3539 pUsagePrev = NULL;
3540 pUsage = pSession->pLdrUsage;
3541 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3542 {
3543 pUsagePrev = pUsage;
3544 pUsage = pUsage->pNext;
3545 }
3546 if (!pUsage)
3547 {
3548 RTSemFastMutexRelease(pDevExt->mtxLdr);
3549 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
3550 return VERR_INVALID_HANDLE;
3551 }
3552
3553 /*
3554 * Check if we can remove anything.
3555 */
3556 rc = VINF_SUCCESS;
3557 pImage = pUsage->pImage;
3558 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
3559 {
3560 /*
3561 * Check if there are any objects with destructors in the image, if
3562 * so leave it for the session cleanup routine so we get a chance to
3563 * clean things up in the right order and not leave them all dangling.
3564 */
3565 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3566 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3567 if (pImage->cUsage <= 1)
3568 {
3569 PSUPDRVOBJ pObj;
3570 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3571 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3572 {
3573 rc = VERR_SHARING_VIOLATION; /** @todo VERR_DANGLING_OBJECTS */
3574 break;
3575 }
3576 }
3577 else
3578 {
3579 PSUPDRVUSAGE pGenUsage;
3580 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
3581 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3582 {
3583 rc = VERR_SHARING_VIOLATION; /** @todo VERR_DANGLING_OBJECTS */
3584 break;
3585 }
3586 }
3587 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3588 if (rc == VINF_SUCCESS)
3589 {
3590 /* unlink it */
3591 if (pUsagePrev)
3592 pUsagePrev->pNext = pUsage->pNext;
3593 else
3594 pSession->pLdrUsage = pUsage->pNext;
3595
3596 /* free it */
3597 pUsage->pImage = NULL;
3598 pUsage->pNext = NULL;
3599 RTMemFree(pUsage);
3600
3601 /*
3602 * Derefrence the image.
3603 */
3604 if (pImage->cUsage <= 1)
3605 supdrvLdrFree(pDevExt, pImage);
3606 else
3607 pImage->cUsage--;
3608 }
3609 else
3610 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
3611 }
3612 else
3613 {
3614 /*
3615 * Dereference both image and usage.
3616 */
3617 pImage->cUsage--;
3618 pUsage->cUsage--;
3619 }
3620
3621 RTSemFastMutexRelease(pDevExt->mtxLdr);
3622 return VINF_SUCCESS;
3623}
3624
3625
3626/**
3627 * Gets the address of a symbol in an open image.
3628 *
3629 * @returns 0 on success.
3630 * @returns SUPDRV_ERR_* on failure.
3631 * @param pDevExt Device globals.
3632 * @param pSession Session data.
3633 * @param pReq The request buffer.
3634 */
3635static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
3636{
3637 PSUPDRVLDRIMAGE pImage;
3638 PSUPDRVLDRUSAGE pUsage;
3639 uint32_t i;
3640 PSUPLDRSYM paSyms;
3641 const char *pchStrings;
3642 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
3643 void *pvSymbol = NULL;
3644 int rc = VERR_GENERAL_FAILURE;
3645 Log3(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
3646
3647 /*
3648 * Find the ldr image.
3649 */
3650 RTSemFastMutexRequest(pDevExt->mtxLdr);
3651 pUsage = pSession->pLdrUsage;
3652 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3653 pUsage = pUsage->pNext;
3654 if (!pUsage)
3655 {
3656 RTSemFastMutexRelease(pDevExt->mtxLdr);
3657 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
3658 return VERR_INVALID_HANDLE;
3659 }
3660 pImage = pUsage->pImage;
3661 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
3662 {
3663 unsigned uState = pImage->uState;
3664 RTSemFastMutexRelease(pDevExt->mtxLdr);
3665 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
3666 return VERR_ALREADY_LOADED;
3667 }
3668
3669 /*
3670 * Search the symbol strings.
3671 */
3672 pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3673 paSyms = (PSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3674 for (i = 0; i < pImage->cSymbols; i++)
3675 {
3676 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3677 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3678 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
3679 {
3680 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
3681 rc = VINF_SUCCESS;
3682 break;
3683 }
3684 }
3685 RTSemFastMutexRelease(pDevExt->mtxLdr);
3686 pReq->u.Out.pvSymbol = pvSymbol;
3687 return rc;
3688}
3689
3690
3691/**
3692 * Gets the address of a symbol in an open image or the support driver.
3693 *
3694 * @returns VINF_SUCCESS on success.
3695 * @returns
3696 * @param pDevExt Device globals.
3697 * @param pSession Session data.
3698 * @param pReq The request buffer.
3699 */
3700static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
3701{
3702 int rc = VINF_SUCCESS;
3703 const char *pszSymbol = pReq->u.In.pszSymbol;
3704 const char *pszModule = pReq->u.In.pszModule;
3705 size_t cbSymbol;
3706 char const *pszEnd;
3707 uint32_t i;
3708
3709 /*
3710 * Input validation.
3711 */
3712 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
3713 pszEnd = (char *)memchr(pszSymbol, '\0', 512);
3714 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3715 cbSymbol = pszEnd - pszSymbol + 1;
3716
3717 if (pszModule)
3718 {
3719 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
3720 pszEnd = (char *)memchr(pszModule, '\0', 64);
3721 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3722 }
3723 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
3724
3725
3726 if ( !pszModule
3727 || !strcmp(pszModule, "SupDrv"))
3728 {
3729 /*
3730 * Search the support driver export table.
3731 */
3732 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
3733 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
3734 {
3735 pReq->u.Out.pfnSymbol = g_aFunctions[i].pfn;
3736 break;
3737 }
3738 }
3739 else
3740 {
3741 /*
3742 * Find the loader image.
3743 */
3744 PSUPDRVLDRIMAGE pImage;
3745
3746 RTSemFastMutexRequest(pDevExt->mtxLdr);
3747
3748 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3749 if (!strcmp(pImage->szName, pszModule))
3750 break;
3751 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
3752 {
3753 /*
3754 * Search the symbol strings.
3755 */
3756 const char *pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3757 PCSUPLDRSYM paSyms = (PCSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3758 for (i = 0; i < pImage->cSymbols; i++)
3759 {
3760 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3761 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3762 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
3763 {
3764 /*
3765 * Found it! Calc the symbol address and add a reference to the module.
3766 */
3767 pReq->u.Out.pfnSymbol = (PFNRT)((uint8_t *)pImage->pvImage + paSyms[i].offSymbol);
3768 rc = supdrvLdrAddUsage(pSession, pImage);
3769 break;
3770 }
3771 }
3772 }
3773 else
3774 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
3775
3776 RTSemFastMutexRelease(pDevExt->mtxLdr);
3777 }
3778 return rc;
3779}
3780
3781
3782/**
3783 * Updates the VMMR0 entry point pointers.
3784 *
3785 * @returns IPRT status code.
3786 * @param pDevExt Device globals.
3787 * @param pSession Session data.
3788 * @param pVMMR0 VMMR0 image handle.
3789 * @param pvVMMR0EntryInt VMMR0EntryInt address.
3790 * @param pvVMMR0EntryFast VMMR0EntryFast address.
3791 * @param pvVMMR0EntryEx VMMR0EntryEx address.
3792 * @remark Caller must own the loader mutex.
3793 */
3794static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
3795{
3796 int rc = VINF_SUCCESS;
3797 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryInt=%p\n", pvVMMR0, pvVMMR0EntryInt));
3798
3799
3800 /*
3801 * Check if not yet set.
3802 */
3803 if (!pDevExt->pvVMMR0)
3804 {
3805 pDevExt->pvVMMR0 = pvVMMR0;
3806 pDevExt->pfnVMMR0EntryInt = pvVMMR0EntryInt;
3807 pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
3808 pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
3809 }
3810 else
3811 {
3812 /*
3813 * Return failure or success depending on whether the values match or not.
3814 */
3815 if ( pDevExt->pvVMMR0 != pvVMMR0
3816 || (void *)pDevExt->pfnVMMR0EntryInt != pvVMMR0EntryInt
3817 || (void *)pDevExt->pfnVMMR0EntryFast != pvVMMR0EntryFast
3818 || (void *)pDevExt->pfnVMMR0EntryEx != pvVMMR0EntryEx)
3819 {
3820 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
3821 rc = VERR_INVALID_PARAMETER;
3822 }
3823 }
3824 return rc;
3825}
3826
3827
3828/**
3829 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
3830 *
3831 * @param pDevExt Device globals.
3832 */
3833static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
3834{
3835 pDevExt->pvVMMR0 = NULL;
3836 pDevExt->pfnVMMR0EntryInt = NULL;
3837 pDevExt->pfnVMMR0EntryFast = NULL;
3838 pDevExt->pfnVMMR0EntryEx = NULL;
3839}
3840
3841
3842/**
3843 * Adds a usage reference in the specified session of an image.
3844 *
3845 * Called while owning the loader semaphore.
3846 *
3847 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
3848 * @param pSession Session in question.
3849 * @param pImage Image which the session is using.
3850 */
3851static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
3852{
3853 PSUPDRVLDRUSAGE pUsage;
3854 LogFlow(("supdrvLdrAddUsage: pImage=%p\n", pImage));
3855
3856 /*
3857 * Referenced it already?
3858 */
3859 pUsage = pSession->pLdrUsage;
3860 while (pUsage)
3861 {
3862 if (pUsage->pImage == pImage)
3863 {
3864 pUsage->cUsage++;
3865 return VINF_SUCCESS;
3866 }
3867 pUsage = pUsage->pNext;
3868 }
3869
3870 /*
3871 * Allocate new usage record.
3872 */
3873 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
3874 AssertReturn(pUsage, VERR_NO_MEMORY);
3875 pUsage->cUsage = 1;
3876 pUsage->pImage = pImage;
3877 pUsage->pNext = pSession->pLdrUsage;
3878 pSession->pLdrUsage = pUsage;
3879 return VINF_SUCCESS;
3880}
3881
3882
3883/**
3884 * Frees a load image.
3885 *
3886 * @param pDevExt Pointer to device extension.
3887 * @param pImage Pointer to the image we're gonna free.
3888 * This image must exit!
3889 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
3890 */
3891static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
3892{
3893 PSUPDRVLDRIMAGE pImagePrev;
3894 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
3895
3896 /* find it - arg. should've used doubly linked list. */
3897 Assert(pDevExt->pLdrImages);
3898 pImagePrev = NULL;
3899 if (pDevExt->pLdrImages != pImage)
3900 {
3901 pImagePrev = pDevExt->pLdrImages;
3902 while (pImagePrev->pNext != pImage)
3903 pImagePrev = pImagePrev->pNext;
3904 Assert(pImagePrev->pNext == pImage);
3905 }
3906
3907 /* unlink */
3908 if (pImagePrev)
3909 pImagePrev->pNext = pImage->pNext;
3910 else
3911 pDevExt->pLdrImages = pImage->pNext;
3912
3913 /* check if this is VMMR0.r0 unset its entry point pointers. */
3914 if (pDevExt->pvVMMR0 == pImage->pvImage)
3915 supdrvLdrUnsetVMMR0EPs(pDevExt);
3916
3917 /* check for objects with destructors in this image. (Shouldn't happen.) */
3918 if (pDevExt->pObjs)
3919 {
3920 unsigned cObjs = 0;
3921 PSUPDRVOBJ pObj;
3922 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3923 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3924 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3925 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3926 {
3927 pObj->pfnDestructor = NULL;
3928 cObjs++;
3929 }
3930 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3931 if (cObjs)
3932 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
3933 }
3934
3935 /* call termination function if fully loaded. */
3936 if ( pImage->pfnModuleTerm
3937 && pImage->uState == SUP_IOCTL_LDR_LOAD)
3938 {
3939 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
3940#ifdef RT_WITH_W64_UNWIND_HACK
3941 supdrvNtWrapModuleTerm(pImage->pfnModuleTerm);
3942#else
3943 pImage->pfnModuleTerm();
3944#endif
3945 }
3946
3947 /* free the image */
3948 pImage->cUsage = 0;
3949 pImage->pNext = 0;
3950 pImage->uState = SUP_IOCTL_LDR_FREE;
3951 RTMemExecFree(pImage);
3952}
3953
3954
3955/**
3956 * Implements the service call request.
3957 *
3958 * @returns VBox status code.
3959 * @param pDevExt The device extension.
3960 * @param pSession The calling session.
3961 * @param pReq The request packet, valid.
3962 */
3963static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
3964{
3965#if !defined(RT_OS_WINDOWS) || defined(DEBUG)
3966 int rc;
3967
3968 /*
3969 * Find the module first in the module referenced by the calling session.
3970 */
3971 rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
3972 if (RT_SUCCESS(rc))
3973 {
3974 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
3975 PSUPDRVLDRUSAGE pUsage;
3976
3977 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
3978 if ( pUsage->pImage->pfnServiceReqHandler
3979 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
3980 {
3981 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
3982 break;
3983 }
3984 RTSemFastMutexRelease(pDevExt->mtxLdr);
3985
3986 if (pfnServiceReqHandler)
3987 {
3988 /*
3989 * Call it.
3990 */
3991 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
3992#ifdef RT_WITH_W64_UNWIND_HACK
3993 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
3994#else
3995 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
3996#endif
3997 else
3998#ifdef RT_WITH_W64_UNWIND_HACK
3999 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation,
4000 pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4001#else
4002 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4003#endif
4004 }
4005 else
4006 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
4007 }
4008
4009 /* log it */
4010 if ( RT_FAILURE(rc)
4011 && rc != VERR_INTERRUPTED
4012 && rc != VERR_TIMEOUT)
4013 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4014 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4015 else
4016 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4017 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4018 return rc;
4019#else /* RT_OS_WINDOWS && !DEBUG */
4020 return VERR_NOT_IMPLEMENTED;
4021#endif /* RT_OS_WINDOWS && !DEBUG */
4022}
4023
4024
4025/**
4026 * Gets the paging mode of the current CPU.
4027 *
4028 * @returns Paging mode, SUPPAGEINGMODE_INVALID on error.
4029 */
4030SUPR0DECL(SUPPAGINGMODE) SUPR0GetPagingMode(void)
4031{
4032 SUPPAGINGMODE enmMode;
4033
4034 RTR0UINTREG cr0 = ASMGetCR0();
4035 if ((cr0 & (X86_CR0_PG | X86_CR0_PE)) != (X86_CR0_PG | X86_CR0_PE))
4036 enmMode = SUPPAGINGMODE_INVALID;
4037 else
4038 {
4039 RTR0UINTREG cr4 = ASMGetCR4();
4040 uint32_t fNXEPlusLMA = 0;
4041 if (cr4 & X86_CR4_PAE)
4042 {
4043 uint32_t fAmdFeatures = ASMCpuId_EDX(0x80000001);
4044 if (fAmdFeatures & (X86_CPUID_AMD_FEATURE_EDX_NX | X86_CPUID_AMD_FEATURE_EDX_LONG_MODE))
4045 {
4046 uint64_t efer = ASMRdMsr(MSR_K6_EFER);
4047 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_NX) && (efer & MSR_K6_EFER_NXE))
4048 fNXEPlusLMA |= RT_BIT(0);
4049 if ((fAmdFeatures & X86_CPUID_AMD_FEATURE_EDX_LONG_MODE) && (efer & MSR_K6_EFER_LMA))
4050 fNXEPlusLMA |= RT_BIT(1);
4051 }
4052 }
4053
4054 switch ((cr4 & (X86_CR4_PAE | X86_CR4_PGE)) | fNXEPlusLMA)
4055 {
4056 case 0:
4057 enmMode = SUPPAGINGMODE_32_BIT;
4058 break;
4059
4060 case X86_CR4_PGE:
4061 enmMode = SUPPAGINGMODE_32_BIT_GLOBAL;
4062 break;
4063
4064 case X86_CR4_PAE:
4065 enmMode = SUPPAGINGMODE_PAE;
4066 break;
4067
4068 case X86_CR4_PAE | RT_BIT(0):
4069 enmMode = SUPPAGINGMODE_PAE_NX;
4070 break;
4071
4072 case X86_CR4_PAE | X86_CR4_PGE:
4073 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4074 break;
4075
4076 case X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4077 enmMode = SUPPAGINGMODE_PAE_GLOBAL;
4078 break;
4079
4080 case RT_BIT(1) | X86_CR4_PAE:
4081 enmMode = SUPPAGINGMODE_AMD64;
4082 break;
4083
4084 case RT_BIT(1) | X86_CR4_PAE | RT_BIT(0):
4085 enmMode = SUPPAGINGMODE_AMD64_NX;
4086 break;
4087
4088 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE:
4089 enmMode = SUPPAGINGMODE_AMD64_GLOBAL;
4090 break;
4091
4092 case RT_BIT(1) | X86_CR4_PAE | X86_CR4_PGE | RT_BIT(0):
4093 enmMode = SUPPAGINGMODE_AMD64_GLOBAL_NX;
4094 break;
4095
4096 default:
4097 AssertMsgFailed(("Cannot happen! cr4=%#x fNXEPlusLMA=%d\n", cr4, fNXEPlusLMA));
4098 enmMode = SUPPAGINGMODE_INVALID;
4099 break;
4100 }
4101 }
4102 return enmMode;
4103}
4104
4105
4106/**
4107 * Enables or disabled hardware virtualization extensions using native OS APIs.
4108 *
4109 * @returns VBox status code.
4110 * @retval VINF_SUCCESS on success.
4111 * @retval VERR_NOT_SUPPORTED if not supported by the native OS.
4112 *
4113 * @param fEnable Whether to enable or disable.
4114 */
4115SUPR0DECL(int) SUPR0EnableVTx(bool fEnable)
4116{
4117#ifdef RT_OS_DARWIN
4118 return supdrvOSEnableVTx(fEnable);
4119#else
4120 return VERR_NOT_SUPPORTED;
4121#endif
4122}
4123
4124
4125/**
4126 * Creates the GIP.
4127 *
4128 * @returns VBox status code.
4129 * @param pDevExt Instance data. GIP stuff may be updated.
4130 */
4131static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
4132{
4133 PSUPGLOBALINFOPAGE pGip;
4134 RTHCPHYS HCPhysGip;
4135 uint32_t u32SystemResolution;
4136 uint32_t u32Interval;
4137 int rc;
4138
4139 LogFlow(("supdrvGipCreate:\n"));
4140
4141 /* assert order */
4142 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
4143 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
4144 Assert(!pDevExt->pGipTimer);
4145
4146 /*
4147 * Allocate a suitable page with a default kernel mapping.
4148 */
4149 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
4150 if (RT_FAILURE(rc))
4151 {
4152 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
4153 return rc;
4154 }
4155 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
4156 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
4157
4158#if 0 /** @todo Disabled this as we didn't used to do it before and causes unnecessary stress on laptops.
4159 * It only applies to Windows and should probably revisited later, if possible made part of the
4160 * timer code (return min granularity in RTTimerGetSystemGranularity and set it in RTTimerStart). */
4161 /*
4162 * Try bump up the system timer resolution.
4163 * The more interrupts the better...
4164 */
4165 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 488281 /* 2048 HZ */, &u32SystemResolution))
4166 || RT_SUCCESS(RTTimerRequestSystemGranularity( 500000 /* 2000 HZ */, &u32SystemResolution))
4167 || RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
4168 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
4169 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1953125 /* 512 HZ */, &u32SystemResolution))
4170 || RT_SUCCESS(RTTimerRequestSystemGranularity( 2000000 /* 500 HZ */, &u32SystemResolution))
4171 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
4172 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
4173 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
4174 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
4175 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
4176 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
4177 )
4178 {
4179 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
4180 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
4181 }
4182#endif
4183
4184 /*
4185 * Find a reasonable update interval and initialize the structure.
4186 */
4187 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
4188 while (u32Interval < 10000000 /* 10 ms */)
4189 u32Interval += u32SystemResolution;
4190
4191 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
4192
4193 /*
4194 * Create the timer.
4195 * If CPU_ALL isn't supported we'll have to fall back to synchronous mode.
4196 */
4197 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4198 {
4199 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, RTTIMER_FLAGS_CPU_ALL, supdrvGipAsyncTimer, pDevExt);
4200 if (rc == VERR_NOT_SUPPORTED)
4201 {
4202 OSDBGPRINT(("supdrvGipCreate: omni timer not supported, falling back to synchronous mode\n"));
4203 pGip->u32Mode = SUPGIPMODE_SYNC_TSC;
4204 }
4205 }
4206 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4207 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipSyncTimer, pDevExt);
4208 if (RT_SUCCESS(rc))
4209 {
4210 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4211 rc = RTMpNotificationRegister(supdrvGipMpEvent, pDevExt);
4212 if (RT_SUCCESS(rc))
4213 {
4214 /*
4215 * We're good.
4216 */
4217 dprintf(("supdrvGipCreate: %ld ns interval.\n", (long)u32Interval));
4218 return VINF_SUCCESS;
4219 }
4220
4221 OSDBGPRINT(("supdrvGipCreate: failed register MP event notfication. rc=%d\n", rc));
4222 }
4223 else
4224 {
4225 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %ld ns interval. rc=%d\n", (long)u32Interval, rc));
4226 Assert(!pDevExt->pGipTimer);
4227 }
4228 supdrvGipDestroy(pDevExt);
4229 return rc;
4230}
4231
4232
4233/**
4234 * Terminates the GIP.
4235 *
4236 * @param pDevExt Instance data. GIP stuff may be updated.
4237 */
4238static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
4239{
4240 int rc;
4241#ifdef DEBUG_DARWIN_GIP
4242 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
4243 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
4244 pDevExt->pGipTimer, pDevExt->GipMemObj));
4245#endif
4246
4247 /*
4248 * Invalid the GIP data.
4249 */
4250 if (pDevExt->pGip)
4251 {
4252 supdrvGipTerm(pDevExt->pGip);
4253 pDevExt->pGip = NULL;
4254 }
4255
4256 /*
4257 * Destroy the timer and free the GIP memory object.
4258 */
4259 if (pDevExt->pGipTimer)
4260 {
4261 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
4262 pDevExt->pGipTimer = NULL;
4263 }
4264
4265 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
4266 {
4267 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
4268 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
4269 }
4270
4271 /*
4272 * Finally, release the system timer resolution request if one succeeded.
4273 */
4274 if (pDevExt->u32SystemTimerGranularityGrant)
4275 {
4276 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
4277 pDevExt->u32SystemTimerGranularityGrant = 0;
4278 }
4279}
4280
4281
4282/**
4283 * Timer callback function sync GIP mode.
4284 * @param pTimer The timer.
4285 * @param pvUser The device extension.
4286 */
4287static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4288{
4289 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4290 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4291
4292 supdrvGipUpdate(pDevExt->pGip, RTTimeSystemNanoTS());
4293
4294 ASMSetFlags(fOldFlags);
4295}
4296
4297
4298/**
4299 * Timer callback function for async GIP mode.
4300 * @param pTimer The timer.
4301 * @param pvUser The device extension.
4302 */
4303static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4304{
4305 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4306 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4307 RTCPUID idCpu = RTMpCpuId();
4308 uint64_t NanoTS = RTTimeSystemNanoTS();
4309
4310 /** @todo reset the transaction number and whatnot when iTick == 1. */
4311 if (pDevExt->idGipMaster == idCpu)
4312 supdrvGipUpdate(pDevExt->pGip, NanoTS);
4313 else
4314 supdrvGipUpdatePerCpu(pDevExt->pGip, NanoTS, ASMGetApicId());
4315
4316 ASMSetFlags(fOldFlags);
4317}
4318
4319
4320/**
4321 * Multiprocessor event notification callback.
4322 *
4323 * This is used to make sue that the GIP master gets passed on to
4324 * another CPU.
4325 *
4326 * @param enmEvent The event.
4327 * @param idCpu The cpu it applies to.
4328 * @param pvUser Pointer to the device extension.
4329 */
4330static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
4331{
4332 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4333 if (enmEvent == RTMPEVENT_OFFLINE)
4334 {
4335 RTCPUID idGipMaster;
4336 ASMAtomicReadSize(&pDevExt->idGipMaster, &idGipMaster);
4337 if (idGipMaster == idCpu)
4338 {
4339 /*
4340 * Find a new GIP master.
4341 */
4342 bool fIgnored;
4343 unsigned i;
4344 RTCPUID idNewGipMaster = NIL_RTCPUID;
4345 RTCPUSET OnlineCpus;
4346 RTMpGetOnlineSet(&OnlineCpus);
4347
4348 for (i = 0; i < RTCPUSET_MAX_CPUS; i++)
4349 {
4350 RTCPUID idCurCpu = RTMpCpuIdFromSetIndex(i);
4351 if ( RTCpuSetIsMember(&OnlineCpus, idCurCpu)
4352 && idCurCpu != idGipMaster)
4353 {
4354 idNewGipMaster = idCurCpu;
4355 break;
4356 }
4357 }
4358
4359 dprintf(("supdrvGipMpEvent: Gip master %#lx -> %#lx\n", (long)idGipMaster, (long)idNewGipMaster));
4360 ASMAtomicCmpXchgSize(&pDevExt->idGipMaster, idNewGipMaster, idGipMaster, fIgnored);
4361 NOREF(fIgnored);
4362 }
4363 }
4364}
4365
4366
4367/**
4368 * Initializes the GIP data.
4369 *
4370 * @returns IPRT status code.
4371 * @param pDevExt Pointer to the device instance data.
4372 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4373 * @param HCPhys The physical address of the GIP.
4374 * @param u64NanoTS The current nanosecond timestamp.
4375 * @param uUpdateHz The update freqence.
4376 */
4377int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
4378{
4379 unsigned i;
4380#ifdef DEBUG_DARWIN_GIP
4381 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4382#else
4383 LogFlow(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4384#endif
4385
4386 /*
4387 * Initialize the structure.
4388 */
4389 memset(pGip, 0, PAGE_SIZE);
4390 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
4391 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
4392 pGip->u32Mode = supdrvGipDeterminTscMode(pDevExt);
4393 pGip->u32UpdateHz = uUpdateHz;
4394 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
4395 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
4396
4397 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4398 {
4399 pGip->aCPUs[i].u32TransactionId = 2;
4400 pGip->aCPUs[i].u64NanoTS = u64NanoTS;
4401 pGip->aCPUs[i].u64TSC = ASMReadTSC();
4402
4403 /*
4404 * We don't know the following values until we've executed updates.
4405 * So, we'll just insert very high values.
4406 */
4407 pGip->aCPUs[i].u64CpuHz = _4G + 1;
4408 pGip->aCPUs[i].u32UpdateIntervalTSC = _2G / 4;
4409 pGip->aCPUs[i].au32TSCHistory[0] = _2G / 4;
4410 pGip->aCPUs[i].au32TSCHistory[1] = _2G / 4;
4411 pGip->aCPUs[i].au32TSCHistory[2] = _2G / 4;
4412 pGip->aCPUs[i].au32TSCHistory[3] = _2G / 4;
4413 pGip->aCPUs[i].au32TSCHistory[4] = _2G / 4;
4414 pGip->aCPUs[i].au32TSCHistory[5] = _2G / 4;
4415 pGip->aCPUs[i].au32TSCHistory[6] = _2G / 4;
4416 pGip->aCPUs[i].au32TSCHistory[7] = _2G / 4;
4417 }
4418
4419 /*
4420 * Link it to the device extension.
4421 */
4422 pDevExt->pGip = pGip;
4423 pDevExt->HCPhysGip = HCPhys;
4424 pDevExt->cGipUsers = 0;
4425
4426 return VINF_SUCCESS;
4427}
4428
4429
4430/**
4431 * Callback used by supdrvDetermineAsyncTSC to read the TSC on a CPU.
4432 *
4433 * @param idCpu Ignored.
4434 * @param pvUser1 Where to put the TSC.
4435 * @param pvUser2 Ignored.
4436 */
4437static DECLCALLBACK(void) supdrvDetermineAsyncTscWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
4438{
4439#if 1
4440 ASMAtomicWriteU64((uint64_t volatile *)pvUser1, ASMReadTSC());
4441#else
4442 *(uint64_t *)pvUser1 = ASMReadTSC();
4443#endif
4444}
4445
4446
4447/**
4448 * Determine if Async GIP mode is required because of TSC drift.
4449 *
4450 * When using the default/normal timer code it is essential that the time stamp counter
4451 * (TSC) runs never backwards, that is, a read operation to the counter should return
4452 * a bigger value than any previous read operation. This is guaranteed by the latest
4453 * AMD CPUs and by newer Intel CPUs which never enter the C2 state (P4). In any other
4454 * case we have to choose the asynchronous timer mode.
4455 *
4456 * @param poffMin Pointer to the determined difference between different cores.
4457 * @return false if the time stamp counters appear to be synchron, true otherwise.
4458 */
4459bool VBOXCALL supdrvDetermineAsyncTsc(uint64_t *poffMin)
4460{
4461 /*
4462 * Just iterate all the cpus 8 times and make sure that the TSC is
4463 * ever increasing. We don't bother taking TSC rollover into account.
4464 */
4465 RTCPUSET CpuSet;
4466 int iLastCpu = RTCpuLastIndex(RTMpGetSet(&CpuSet));
4467 int iCpu;
4468 int cLoops = 8;
4469 bool fAsync = false;
4470 int rc = VINF_SUCCESS;
4471 uint64_t offMax = 0;
4472 uint64_t offMin = ~(uint64_t)0;
4473 uint64_t PrevTsc = ASMReadTSC();
4474
4475 while (cLoops-- > 0)
4476 {
4477 for (iCpu = 0; iCpu <= iLastCpu; iCpu++)
4478 {
4479 uint64_t CurTsc;
4480 rc = RTMpOnSpecific(RTMpCpuIdFromSetIndex(iCpu), supdrvDetermineAsyncTscWorker, &CurTsc, NULL);
4481 if (RT_SUCCESS(rc))
4482 {
4483 if (CurTsc <= PrevTsc)
4484 {
4485 fAsync = true;
4486 offMin = offMax = PrevTsc - CurTsc;
4487 dprintf(("supdrvDetermineAsyncTsc: iCpu=%d cLoops=%d CurTsc=%llx PrevTsc=%llx\n",
4488 iCpu, cLoops, CurTsc, PrevTsc));
4489 break;
4490 }
4491
4492 /* Gather statistics (except the first time). */
4493 if (iCpu != 0 || cLoops != 7)
4494 {
4495 uint64_t off = CurTsc - PrevTsc;
4496 if (off < offMin)
4497 offMin = off;
4498 if (off > offMax)
4499 offMax = off;
4500 dprintf2(("%d/%d: off=%llx\n", cLoops, iCpu, off));
4501 }
4502
4503 /* Next */
4504 PrevTsc = CurTsc;
4505 }
4506 else if (rc == VERR_NOT_SUPPORTED)
4507 break;
4508 else
4509 AssertMsg(rc == VERR_CPU_NOT_FOUND || rc == VERR_CPU_OFFLINE, ("%d\n", rc));
4510 }
4511
4512 /* broke out of the loop. */
4513 if (iCpu <= iLastCpu)
4514 break;
4515 }
4516
4517 *poffMin = offMin; /* Almost RTMpOnSpecific profiling. */
4518 dprintf(("supdrvDetermineAsyncTsc: returns %d; iLastCpu=%d rc=%d offMin=%llx offMax=%llx\n",
4519 fAsync, iLastCpu, rc, offMin, offMax));
4520#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_OS2) && !defined(RT_OS_WINDOWS)
4521 OSDBGPRINT(("vboxdrv: fAsync=%d offMin=%#lx offMax=%#lx\n", fAsync, (long)offMin, (long)offMax));
4522#endif
4523 return fAsync;
4524}
4525
4526
4527/**
4528 * Determin the GIP TSC mode.
4529 *
4530 * @returns The most suitable TSC mode.
4531 * @param pDevExt Pointer to the device instance data.
4532 */
4533static SUPGIPMODE supdrvGipDeterminTscMode(PSUPDRVDEVEXT pDevExt)
4534{
4535 /*
4536 * On SMP we're faced with two problems:
4537 * (1) There might be a skew between the CPU, so that cpu0
4538 * returns a TSC that is sligtly different from cpu1.
4539 * (2) Power management (and other things) may cause the TSC
4540 * to run at a non-constant speed, and cause the speed
4541 * to be different on the cpus. This will result in (1).
4542 *
4543 * So, on SMP systems we'll have to select the ASYNC update method
4544 * if there are symphoms of these problems.
4545 */
4546 if (RTMpGetCount() > 1)
4547 {
4548 uint32_t uEAX, uEBX, uECX, uEDX;
4549 uint64_t u64DiffCoresIgnored;
4550
4551 /* Permit the user and/or the OS specfic bits to force async mode. */
4552 if (supdrvOSGetForcedAsyncTscMode(pDevExt))
4553 return SUPGIPMODE_ASYNC_TSC;
4554
4555 /* Try check for current differences between the cpus. */
4556 if (supdrvDetermineAsyncTsc(&u64DiffCoresIgnored))
4557 return SUPGIPMODE_ASYNC_TSC;
4558
4559 /*
4560 * If the CPU supports power management and is an AMD one we
4561 * won't trust it unless it has the TscInvariant bit is set.
4562 */
4563 /* Check for "AuthenticAMD" */
4564 ASMCpuId(0, &uEAX, &uEBX, &uECX, &uEDX);
4565 if ( uEAX >= 1
4566 && uEBX == X86_CPUID_VENDOR_AMD_EBX
4567 && uECX == X86_CPUID_VENDOR_AMD_ECX
4568 && uEDX == X86_CPUID_VENDOR_AMD_EDX)
4569 {
4570 /* Check for APM support and that TscInvariant is cleared. */
4571 ASMCpuId(0x80000000, &uEAX, &uEBX, &uECX, &uEDX);
4572 if (uEAX >= 0x80000007)
4573 {
4574 ASMCpuId(0x80000007, &uEAX, &uEBX, &uECX, &uEDX);
4575 if ( !(uEDX & RT_BIT(8))/* TscInvariant */
4576 && (uEDX & 0x3e)) /* STC|TM|THERMTRIP|VID|FID. Ignore TS. */
4577 return SUPGIPMODE_ASYNC_TSC;
4578 }
4579 }
4580 }
4581 return SUPGIPMODE_SYNC_TSC;
4582}
4583
4584
4585/**
4586 * Invalidates the GIP data upon termination.
4587 *
4588 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4589 */
4590void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
4591{
4592 unsigned i;
4593 pGip->u32Magic = 0;
4594 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4595 {
4596 pGip->aCPUs[i].u64NanoTS = 0;
4597 pGip->aCPUs[i].u64TSC = 0;
4598 pGip->aCPUs[i].iTSCHistoryHead = 0;
4599 }
4600}
4601
4602
4603/**
4604 * Worker routine for supdrvGipUpdate and supdrvGipUpdatePerCpu that
4605 * updates all the per cpu data except the transaction id.
4606 *
4607 * @param pGip The GIP.
4608 * @param pGipCpu Pointer to the per cpu data.
4609 * @param u64NanoTS The current time stamp.
4610 */
4611static void supdrvGipDoUpdateCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS)
4612{
4613 uint64_t u64TSC;
4614 uint64_t u64TSCDelta;
4615 uint32_t u32UpdateIntervalTSC;
4616 uint32_t u32UpdateIntervalTSCSlack;
4617 unsigned iTSCHistoryHead;
4618 uint64_t u64CpuHz;
4619
4620 /*
4621 * Update the NanoTS.
4622 */
4623 ASMAtomicXchgU64(&pGipCpu->u64NanoTS, u64NanoTS);
4624
4625 /*
4626 * Calc TSC delta.
4627 */
4628 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
4629 u64TSC = ASMReadTSC();
4630 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
4631 ASMAtomicXchgU64(&pGipCpu->u64TSC, u64TSC);
4632
4633 if (u64TSCDelta >> 32)
4634 {
4635 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
4636 pGipCpu->cErrors++;
4637 }
4638
4639 /*
4640 * TSC History.
4641 */
4642 Assert(RT_ELEMENTS(pGipCpu->au32TSCHistory) == 8);
4643
4644 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
4645 ASMAtomicXchgU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
4646 ASMAtomicXchgU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
4647
4648 /*
4649 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
4650 */
4651 if (pGip->u32UpdateHz >= 1000)
4652 {
4653 uint32_t u32;
4654 u32 = pGipCpu->au32TSCHistory[0];
4655 u32 += pGipCpu->au32TSCHistory[1];
4656 u32 += pGipCpu->au32TSCHistory[2];
4657 u32 += pGipCpu->au32TSCHistory[3];
4658 u32 >>= 2;
4659 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
4660 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
4661 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
4662 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
4663 u32UpdateIntervalTSC >>= 2;
4664 u32UpdateIntervalTSC += u32;
4665 u32UpdateIntervalTSC >>= 1;
4666
4667 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
4668 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
4669 }
4670 else if (pGip->u32UpdateHz >= 90)
4671 {
4672 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4673 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
4674 u32UpdateIntervalTSC >>= 1;
4675
4676 /* value choosen on a 2GHz thinkpad running windows */
4677 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
4678 }
4679 else
4680 {
4681 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4682
4683 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
4684 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
4685 }
4686 ASMAtomicXchgU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
4687
4688 /*
4689 * CpuHz.
4690 */
4691 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
4692 ASMAtomicXchgU64(&pGipCpu->u64CpuHz, u64CpuHz);
4693}
4694
4695
4696/**
4697 * Updates the GIP.
4698 *
4699 * @param pGip Pointer to the GIP.
4700 * @param u64NanoTS The current nanosecond timesamp.
4701 */
4702void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS)
4703{
4704 /*
4705 * Determin the relevant CPU data.
4706 */
4707 PSUPGIPCPU pGipCpu;
4708 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4709 pGipCpu = &pGip->aCPUs[0];
4710 else
4711 {
4712 unsigned iCpu = ASMGetApicId();
4713 if (RT_LIKELY(iCpu >= RT_ELEMENTS(pGip->aCPUs)))
4714 return;
4715 pGipCpu = &pGip->aCPUs[iCpu];
4716 }
4717
4718 /*
4719 * Start update transaction.
4720 */
4721 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4722 {
4723 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
4724 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4725 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4726 pGipCpu->cErrors++;
4727 return;
4728 }
4729
4730 /*
4731 * Recalc the update frequency every 0x800th time.
4732 */
4733 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
4734 {
4735 if (pGip->u64NanoTSLastUpdateHz)
4736 {
4737#ifdef RT_ARCH_AMD64 /** @todo fix 64-bit div here to work on x86 linux. */
4738 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
4739 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
4740 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
4741 {
4742 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
4743 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
4744 }
4745#endif
4746 }
4747 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
4748 }
4749
4750 /*
4751 * Update the data.
4752 */
4753 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4754
4755 /*
4756 * Complete transaction.
4757 */
4758 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4759}
4760
4761
4762/**
4763 * Updates the per cpu GIP data for the calling cpu.
4764 *
4765 * @param pGip Pointer to the GIP.
4766 * @param u64NanoTS The current nanosecond timesamp.
4767 * @param iCpu The CPU index.
4768 */
4769void VBOXCALL supdrvGipUpdatePerCpu(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, unsigned iCpu)
4770{
4771 PSUPGIPCPU pGipCpu;
4772
4773 if (RT_LIKELY(iCpu < RT_ELEMENTS(pGip->aCPUs)))
4774 {
4775 pGipCpu = &pGip->aCPUs[iCpu];
4776
4777 /*
4778 * Start update transaction.
4779 */
4780 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4781 {
4782 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4783 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4784 pGipCpu->cErrors++;
4785 return;
4786 }
4787
4788 /*
4789 * Update the data.
4790 */
4791 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS);
4792
4793 /*
4794 * Complete transaction.
4795 */
4796 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4797 }
4798}
4799
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette