VirtualBox

source: vbox/trunk/src/VBox/HostDrivers/Support/SUPDrv.c@ 23728

Last change on this file since 23728 was 23728, checked in by vboxsync, 15 years ago

SUPDrv: Moving more stuff over.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 191.6 KB
Line 
1/* $Revision: 23728 $ */
2/** @file
3 * VBoxDrv - The VirtualBox Support Driver - Common code.
4 */
5
6/*
7 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 *
26 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
27 * Clara, CA 95054 USA or visit http://www.sun.com if you need
28 * additional information or have any questions.
29 */
30
31/*******************************************************************************
32* Header Files *
33*******************************************************************************/
34#define LOG_GROUP LOG_GROUP_SUP_DRV
35#include "SUPDrvInternal.h"
36#ifndef PAGE_SHIFT
37# include <iprt/param.h>
38#endif
39#include <iprt/alloc.h>
40#include <iprt/cpuset.h>
41#include <iprt/handletable.h>
42#include <iprt/mp.h>
43#include <iprt/power.h>
44#include <iprt/process.h>
45#include <iprt/semaphore.h>
46#include <iprt/spinlock.h>
47#include <iprt/thread.h>
48#include <iprt/uuid.h>
49#include <VBox/param.h>
50#include <VBox/log.h>
51#include <VBox/err.h>
52#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
53# include <iprt/crc32.h>
54# include <iprt/net.h>
55# include <iprt/string.h>
56# include <iprt/rand.h>
57# include <iprt/path.h>
58#endif
59
60/*
61 * Logging assignments:
62 * Log - useful stuff, like failures.
63 * LogFlow - program flow, except the really noisy bits.
64 * Log2 - Cleanup.
65 * Log3 - Loader flow noise.
66 * Log4 - Call VMMR0 flow noise.
67 * Log5 - Native yet-to-be-defined noise.
68 * Log6 - Native ioctl flow noise.
69 *
70 * Logging requires BUILD_TYPE=debug and possibly changes to the logger
71 * instanciation in log-vbox.c(pp).
72 */
73
74
75/*******************************************************************************
76* Defined Constants And Macros *
77*******************************************************************************/
78/** The frequency by which we recalculate the u32UpdateHz and
79 * u32UpdateIntervalNS GIP members. The value must be a power of 2. */
80#define GIP_UPDATEHZ_RECALC_FREQ 0x800
81
82/** @def VBOX_SVN_REV
83 * The makefile should define this if it can. */
84#ifndef VBOX_SVN_REV
85# define VBOX_SVN_REV 0
86#endif
87
88
89/*******************************************************************************
90* Internal Functions *
91*******************************************************************************/
92static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser);
93static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser);
94static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession);
95static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType);
96static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq);
97static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq);
98static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq);
99static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq);
100static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq);
101static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx);
102static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt);
103static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage);
104static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage);
105static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq);
106static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq);
107static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt);
108static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt);
109static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
110static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick);
111static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser);
112
113#ifdef RT_WITH_W64_UNWIND_HACK
114DECLASM(int) supdrvNtWrapVMMR0EntryEx(PFNRT pfnVMMR0EntryEx, PVM pVM, VMCPUID idCpu, unsigned uOperation, PSUPVMMR0REQHDR pReq, uint64_t u64Arg, PSUPDRVSESSION pSession);
115DECLASM(int) supdrvNtWrapVMMR0EntryFast(PFNRT pfnVMMR0EntryFast, PVM pVM, VMCPUID idCpu, unsigned uOperation);
116DECLASM(void) supdrvNtWrapObjDestructor(PFNRT pfnDestruction, void *pvObj, void *pvUser1, void *pvUser2);
117DECLASM(void *) supdrvNtWrapQueryFactoryInterface(PFNRT pfnQueryFactoryInterface, struct SUPDRVFACTORY const *pSupDrvFactory, PSUPDRVSESSION pSession, const char *pszInterfaceUuid);
118DECLASM(int) supdrvNtWrapModuleInit(PFNRT pfnModuleInit);
119DECLASM(void) supdrvNtWrapModuleTerm(PFNRT pfnModuleTerm);
120DECLASM(int) supdrvNtWrapServiceReqHandler(PFNRT pfnServiceReqHandler, PSUPDRVSESSION pSession, uint32_t uOperation, uint64_t u64Arg, PSUPR0SERVICEREQHDR pReqHdr);
121
122DECLASM(int) UNWIND_WRAP(SUPR0ComponentRegisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
123DECLASM(int) UNWIND_WRAP(SUPR0ComponentDeregisterFactory)(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory);
124DECLASM(int) UNWIND_WRAP(SUPR0ComponentQueryFactory)(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf);
125DECLASM(void *) UNWIND_WRAP(SUPR0ObjRegister)(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2);
126DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRef)(void *pvObj, PSUPDRVSESSION pSession);
127DECLASM(int) UNWIND_WRAP(SUPR0ObjAddRefEx)(void *pvObj, PSUPDRVSESSION pSession, bool fNoPreempt);
128DECLASM(int) UNWIND_WRAP(SUPR0ObjRelease)(void *pvObj, PSUPDRVSESSION pSession);
129DECLASM(int) UNWIND_WRAP(SUPR0ObjVerifyAccess)(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName);
130DECLASM(int) UNWIND_WRAP(SUPR0LockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages);
131DECLASM(int) UNWIND_WRAP(SUPR0UnlockMem)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
132DECLASM(int) UNWIND_WRAP(SUPR0ContAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys);
133DECLASM(int) UNWIND_WRAP(SUPR0ContFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
134DECLASM(int) UNWIND_WRAP(SUPR0LowAlloc)(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages);
135DECLASM(int) UNWIND_WRAP(SUPR0LowFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
136DECLASM(int) UNWIND_WRAP(SUPR0MemAlloc)(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3);
137DECLASM(int) UNWIND_WRAP(SUPR0MemGetPhys)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages);
138DECLASM(int) UNWIND_WRAP(SUPR0MemFree)(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr);
139DECLASM(int) UNWIND_WRAP(SUPR0PageAllocEx)(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages);
140DECLASM(int) UNWIND_WRAP(SUPR0PageFree)(PSUPDRVSESSION pSession, RTR3PTR pvR3);
141//DECLASM(int) UNWIND_WRAP(SUPR0Printf)(const char *pszFormat, ...);
142DECLASM(int) UNWIND_WRAP(SUPSemEventCreate)(PSUPDRVSESSION pSession, PSUPSEMEVENT phEvent);
143DECLASM(int) UNWIND_WRAP(SUPSemEventClose)(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent);
144DECLASM(int) UNWIND_WRAP(SUPSemEventSignal)(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent);
145DECLASM(int) UNWIND_WRAP(SUPSemEventWait)(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent, uint32_t cMillies);
146DECLASM(int) UNWIND_WRAP(SUPSemEventWaitNoResume)(PSUPDRVSESSION pSession, SUPSEMEVENT hEvent, uint32_t cMillies);
147DECLASM(int) UNWIND_WRAP(SUPSemEventMultiCreate)(PSUPDRVSESSION pSession, PSUPSEMEVENTMULTI phEventMulti);
148DECLASM(int) UNWIND_WRAP(SUPSemEventMultiClose)(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti);
149DECLASM(int) UNWIND_WRAP(SUPSemEventMultiSignal)(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti);
150DECLASM(int) UNWIND_WRAP(SUPSemEventMultiReset)(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti);
151DECLASM(int) UNWIND_WRAP(SUPSemEventMultiWait)(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti, uint32_t cMillies);
152DECLASM(int) UNWIND_WRAP(SUPSemEventMultiWaitNoResume)(PSUPDRVSESSION pSession, SUPSEMEVENTMULTI hEventMulti, uint32_t cMillies);
153DECLASM(SUPPAGINGMODE) UNWIND_WRAP(SUPR0GetPagingMode)(void);
154DECLASM(void *) UNWIND_WRAP(RTMemAlloc)(size_t cb) RT_NO_THROW;
155DECLASM(void *) UNWIND_WRAP(RTMemAllocZ)(size_t cb) RT_NO_THROW;
156DECLASM(void) UNWIND_WRAP(RTMemFree)(void *pv) RT_NO_THROW;
157DECLASM(void *) UNWIND_WRAP(RTMemDup)(const void *pvSrc, size_t cb) RT_NO_THROW;
158DECLASM(void *) UNWIND_WRAP(RTMemDupEx)(const void *pvSrc, size_t cbSrc, size_t cbExtra) RT_NO_THROW;
159DECLASM(void *) UNWIND_WRAP(RTMemRealloc)(void *pvOld, size_t cbNew) RT_NO_THROW;
160DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocLow)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
161DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPage)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
162DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhys)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
163DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocPhysNC)(PRTR0MEMOBJ pMemObj, size_t cb, RTHCPHYS PhysHighest);
164DECLASM(int) UNWIND_WRAP(RTR0MemObjAllocCont)(PRTR0MEMOBJ pMemObj, size_t cb, bool fExecutable);
165DECLASM(int) UNWIND_WRAP(RTR0MemObjEnterPhys)(PRTR0MEMOBJ pMemObj, RTHCPHYS Phys, size_t cb);
166DECLASM(int) UNWIND_WRAP(RTR0MemObjLockUser)(PRTR0MEMOBJ pMemObj, RTR3PTR R3Ptr, size_t cb, uint32_t fFlags, RTR0PROCESS R0Process);
167DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernel)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt);
168DECLASM(int) UNWIND_WRAP(RTR0MemObjMapKernelEx)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, void *pvFixed, size_t uAlignment, unsigned fProt, size_t offSub, size_t cbSub);
169DECLASM(int) UNWIND_WRAP(RTR0MemObjMapUser)(PRTR0MEMOBJ pMemObj, RTR0MEMOBJ MemObjToMap, RTR3PTR R3PtrFixed, size_t uAlignment, unsigned fProt, RTR0PROCESS R0Process);
170DECLASM(int) UNWIND_WRAP(RTR0MemObjProtect)(RTR0MEMOBJ hMemObj, size_t offsub, size_t cbSub, uint32_t fProt);
171/*DECLASM(void *) UNWIND_WRAP(RTR0MemObjAddress)(RTR0MEMOBJ MemObj); - not necessary */
172/*DECLASM(RTR3PTR) UNWIND_WRAP(RTR0MemObjAddressR3)(RTR0MEMOBJ MemObj); - not necessary */
173/*DECLASM(size_t) UNWIND_WRAP(RTR0MemObjSize)(RTR0MEMOBJ MemObj); - not necessary */
174/*DECLASM(bool) UNWIND_WRAP(RTR0MemObjIsMapping)(RTR0MEMOBJ MemObj); - not necessary */
175/*DECLASM(RTHCPHYS) UNWIND_WRAP(RTR0MemObjGetPagePhysAddr)(RTR0MEMOBJ MemObj, size_t iPage); - not necessary */
176DECLASM(int) UNWIND_WRAP(RTR0MemObjFree)(RTR0MEMOBJ MemObj, bool fFreeMappings);
177DECLASM(int) UNWIND_WRAP(RTR0MemUserCopyFrom)(void *pvDst, RTR3PTR R3PtrSrc, size_t cb);
178DECLASM(int) UNWIND_WRAP(RTR0MemUserCopyTo)(RTR3PTR R3PtrDst, void const *pvSrc, size_t cb);
179/* RTR0MemUserIsValidAddr - not necessary */
180/* RTR0MemKernelIsValidAddr - not necessary */
181/* RTR0MemAreKrnlAndUsrDifferent - not necessary */
182/* RTProcSelf - not necessary */
183/* RTR0ProcHandleSelf - not necessary */
184DECLASM(int) UNWIND_WRAP(RTSemFastMutexCreate)(PRTSEMFASTMUTEX pMutexSem);
185DECLASM(int) UNWIND_WRAP(RTSemFastMutexDestroy)(RTSEMFASTMUTEX MutexSem);
186DECLASM(int) UNWIND_WRAP(RTSemFastMutexRequest)(RTSEMFASTMUTEX MutexSem);
187DECLASM(int) UNWIND_WRAP(RTSemFastMutexRelease)(RTSEMFASTMUTEX MutexSem);
188DECLASM(int) UNWIND_WRAP(RTSemEventCreate)(PRTSEMEVENT pEventSem);
189DECLASM(int) UNWIND_WRAP(RTSemEventSignal)(RTSEMEVENT EventSem);
190DECLASM(int) UNWIND_WRAP(RTSemEventWait)(RTSEMEVENT EventSem, unsigned cMillies);
191DECLASM(int) UNWIND_WRAP(RTSemEventWaitNoResume)(RTSEMEVENT EventSem, unsigned cMillies);
192DECLASM(int) UNWIND_WRAP(RTSemEventDestroy)(RTSEMEVENT EventSem);
193DECLASM(int) UNWIND_WRAP(RTSemEventMultiCreate)(PRTSEMEVENTMULTI pEventMultiSem);
194DECLASM(int) UNWIND_WRAP(RTSemEventMultiSignal)(RTSEMEVENTMULTI EventMultiSem);
195DECLASM(int) UNWIND_WRAP(RTSemEventMultiReset)(RTSEMEVENTMULTI EventMultiSem);
196DECLASM(int) UNWIND_WRAP(RTSemEventMultiWait)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
197DECLASM(int) UNWIND_WRAP(RTSemEventMultiWaitNoResume)(RTSEMEVENTMULTI EventMultiSem, unsigned cMillies);
198DECLASM(int) UNWIND_WRAP(RTSemEventMultiDestroy)(RTSEMEVENTMULTI EventMultiSem);
199DECLASM(int) UNWIND_WRAP(RTSpinlockCreate)(PRTSPINLOCK pSpinlock);
200DECLASM(int) UNWIND_WRAP(RTSpinlockDestroy)(RTSPINLOCK Spinlock);
201DECLASM(void) UNWIND_WRAP(RTSpinlockAcquire)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
202DECLASM(void) UNWIND_WRAP(RTSpinlockRelease)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
203DECLASM(void) UNWIND_WRAP(RTSpinlockAcquireNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
204DECLASM(void) UNWIND_WRAP(RTSpinlockReleaseNoInts)(RTSPINLOCK Spinlock, PRTSPINLOCKTMP pTmp);
205/* RTTimeNanoTS - not necessary */
206/* RTTimeMilliTS - not necessary */
207/* RTTimeSystemNanoTS - not necessary */
208/* RTTimeSystemMilliTS - not necessary */
209/* RTThreadNativeSelf - not necessary */
210DECLASM(int) UNWIND_WRAP(RTThreadSleep)(unsigned cMillies);
211DECLASM(bool) UNWIND_WRAP(RTThreadYield)(void);
212#if 0
213/* RTThreadSelf - not necessary */
214DECLASM(int) UNWIND_WRAP(RTThreadCreate)(PRTTHREAD pThread, PFNRTTHREAD pfnThread, void *pvUser, size_t cbStack,
215 RTTHREADTYPE enmType, unsigned fFlags, const char *pszName);
216DECLASM(RTNATIVETHREAD) UNWIND_WRAP(RTThreadGetNative)(RTTHREAD Thread);
217DECLASM(int) UNWIND_WRAP(RTThreadWait)(RTTHREAD Thread, unsigned cMillies, int *prc);
218DECLASM(int) UNWIND_WRAP(RTThreadWaitNoResume)(RTTHREAD Thread, unsigned cMillies, int *prc);
219DECLASM(const char *) UNWIND_WRAP(RTThreadGetName)(RTTHREAD Thread);
220DECLASM(const char *) UNWIND_WRAP(RTThreadSelfName)(void);
221DECLASM(RTTHREADTYPE) UNWIND_WRAP(RTThreadGetType)(RTTHREAD Thread);
222DECLASM(int) UNWIND_WRAP(RTThreadUserSignal)(RTTHREAD Thread);
223DECLASM(int) UNWIND_WRAP(RTThreadUserReset)(RTTHREAD Thread);
224DECLASM(int) UNWIND_WRAP(RTThreadUserWait)(RTTHREAD Thread, unsigned cMillies);
225DECLASM(int) UNWIND_WRAP(RTThreadUserWaitNoResume)(RTTHREAD Thread, unsigned cMillies);
226#endif
227/* RTThreadPreemptIsEnabled - not necessary */
228/* RTThreadPreemptIsPending - not necessary */
229/* RTThreadPreemptIsPendingTrusty - not necessary */
230/* RTThreadPreemptDisable - not necessary */
231DECLASM(void) UNWIND_WRAP(RTThreadPreemptRestore)(RTTHREADPREEMPTSTATE pState);
232/* RTLogDefaultInstance - a bit of a gamble, but we do not want the overhead! */
233/* RTMpCpuId - not necessary */
234/* RTMpCpuIdFromSetIndex - not necessary */
235/* RTMpCpuIdToSetIndex - not necessary */
236/* RTMpIsCpuPossible - not necessary */
237/* RTMpGetCount - not necessary */
238/* RTMpGetMaxCpuId - not necessary */
239/* RTMpGetOnlineCount - not necessary */
240/* RTMpGetOnlineSet - not necessary */
241/* RTMpGetSet - not necessary */
242/* RTMpIsCpuOnline - not necessary */
243DECLASM(int) UNWIND_WRAP(RTMpIsCpuWorkPending)(void);
244DECLASM(int) UNWIND_WRAP(RTMpOnAll)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
245DECLASM(int) UNWIND_WRAP(RTMpOnOthers)(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
246DECLASM(int) UNWIND_WRAP(RTMpOnSpecific)(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2);
247DECLASM(int) UNWIND_WRAP(RTMpPokeCpu)(RTCPUID idCpu);
248/* RTLogRelDefaultInstance - not necessary. */
249DECLASM(int) UNWIND_WRAP(RTLogSetDefaultInstanceThread)(PRTLOGGER pLogger, uintptr_t uKey);
250/* RTLogLogger - can't wrap this buster. */
251/* RTLogLoggerEx - can't wrap this buster. */
252DECLASM(void) UNWIND_WRAP(RTLogLoggerExV)(PRTLOGGER pLogger, unsigned fFlags, unsigned iGroup, const char *pszFormat, va_list args);
253/* RTLogPrintf - can't wrap this buster. */ /** @todo provide va_list log wrappers in RuntimeR0. */
254DECLASM(void) UNWIND_WRAP(RTLogPrintfV)(const char *pszFormat, va_list args);
255DECLASM(void) UNWIND_WRAP(AssertMsg1)(const char *pszExpr, unsigned uLine, const char *pszFile, const char *pszFunction);
256/* AssertMsg2 - can't wrap this buster. */
257#endif /* RT_WITH_W64_UNWIND_HACK */
258
259
260/*******************************************************************************
261* Global Variables *
262*******************************************************************************/
263/**
264 * Array of the R0 SUP API.
265 */
266static SUPFUNC g_aFunctions[] =
267{
268 /* name function */
269 /* Entries with absolute addresses determined at runtime, fixup
270 code makes ugly ASSUMPTIONS about the order here: */
271 { "SUPR0AbsIs64bit", (void *)0 },
272 { "SUPR0Abs64bitKernelCS", (void *)0 },
273 { "SUPR0Abs64bitKernelSS", (void *)0 },
274 { "SUPR0Abs64bitKernelDS", (void *)0 },
275 { "SUPR0AbsKernelCS", (void *)0 },
276 { "SUPR0AbsKernelSS", (void *)0 },
277 { "SUPR0AbsKernelDS", (void *)0 },
278 { "SUPR0AbsKernelES", (void *)0 },
279 { "SUPR0AbsKernelFS", (void *)0 },
280 { "SUPR0AbsKernelGS", (void *)0 },
281 /* Normal function pointers: */
282 { "SUPR0ComponentRegisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentRegisterFactory) },
283 { "SUPR0ComponentDeregisterFactory", (void *)UNWIND_WRAP(SUPR0ComponentDeregisterFactory) },
284 { "SUPR0ComponentQueryFactory", (void *)UNWIND_WRAP(SUPR0ComponentQueryFactory) },
285 { "SUPR0ObjRegister", (void *)UNWIND_WRAP(SUPR0ObjRegister) },
286 { "SUPR0ObjAddRef", (void *)UNWIND_WRAP(SUPR0ObjAddRef) },
287 { "SUPR0ObjAddRefEx", (void *)UNWIND_WRAP(SUPR0ObjAddRefEx) },
288 { "SUPR0ObjRelease", (void *)UNWIND_WRAP(SUPR0ObjRelease) },
289 { "SUPR0ObjVerifyAccess", (void *)UNWIND_WRAP(SUPR0ObjVerifyAccess) },
290 { "SUPR0LockMem", (void *)UNWIND_WRAP(SUPR0LockMem) },
291 { "SUPR0UnlockMem", (void *)UNWIND_WRAP(SUPR0UnlockMem) },
292 { "SUPR0ContAlloc", (void *)UNWIND_WRAP(SUPR0ContAlloc) },
293 { "SUPR0ContFree", (void *)UNWIND_WRAP(SUPR0ContFree) },
294 { "SUPR0LowAlloc", (void *)UNWIND_WRAP(SUPR0LowAlloc) },
295 { "SUPR0LowFree", (void *)UNWIND_WRAP(SUPR0LowFree) },
296 { "SUPR0MemAlloc", (void *)UNWIND_WRAP(SUPR0MemAlloc) },
297 { "SUPR0MemGetPhys", (void *)UNWIND_WRAP(SUPR0MemGetPhys) },
298 { "SUPR0MemFree", (void *)UNWIND_WRAP(SUPR0MemFree) },
299 { "SUPR0PageAllocEx", (void *)UNWIND_WRAP(SUPR0PageAllocEx) },
300 { "SUPR0PageFree", (void *)UNWIND_WRAP(SUPR0PageFree) },
301 { "SUPR0Printf", (void *)SUPR0Printf }, /** @todo needs wrapping? */
302 { "SUPSemEventCreate", (void *)UNWIND_WRAP(SUPSemEventCreate) },
303 { "SUPSemEventClose", (void *)UNWIND_WRAP(SUPSemEventClose) },
304 { "SUPSemEventSignal", (void *)UNWIND_WRAP(SUPSemEventSignal) },
305 { "SUPSemEventWait", (void *)UNWIND_WRAP(SUPSemEventWait) },
306 { "SUPSemEventWaitNoResume", (void *)UNWIND_WRAP(SUPSemEventWaitNoResume) },
307 { "SUPSemEventMultiCreate", (void *)UNWIND_WRAP(SUPSemEventMultiCreate) },
308 { "SUPSemEventMultiClose", (void *)UNWIND_WRAP(SUPSemEventMultiClose) },
309 { "SUPSemEventMultiSignal", (void *)UNWIND_WRAP(SUPSemEventMultiSignal) },
310 { "SUPSemEventMultiReset", (void *)UNWIND_WRAP(SUPSemEventMultiReset) },
311 { "SUPSemEventMultiWait", (void *)UNWIND_WRAP(SUPSemEventMultiWait) },
312 { "SUPSemEventMultiWaitNoResume", (void *)UNWIND_WRAP(SUPSemEventMultiWaitNoResume) },
313 { "SUPR0GetPagingMode", (void *)UNWIND_WRAP(SUPR0GetPagingMode) },
314 { "SUPR0EnableVTx", (void *)SUPR0EnableVTx },
315 { "RTMemAlloc", (void *)UNWIND_WRAP(RTMemAlloc) },
316 { "RTMemAllocZ", (void *)UNWIND_WRAP(RTMemAllocZ) },
317 { "RTMemFree", (void *)UNWIND_WRAP(RTMemFree) },
318 /*{ "RTMemDup", (void *)UNWIND_WRAP(RTMemDup) },
319 { "RTMemDupEx", (void *)UNWIND_WRAP(RTMemDupEx) },*/
320 { "RTMemRealloc", (void *)UNWIND_WRAP(RTMemRealloc) },
321 { "RTR0MemObjAllocLow", (void *)UNWIND_WRAP(RTR0MemObjAllocLow) },
322 { "RTR0MemObjAllocPage", (void *)UNWIND_WRAP(RTR0MemObjAllocPage) },
323 { "RTR0MemObjAllocPhys", (void *)UNWIND_WRAP(RTR0MemObjAllocPhys) },
324 { "RTR0MemObjAllocPhysNC", (void *)UNWIND_WRAP(RTR0MemObjAllocPhysNC) },
325 { "RTR0MemObjAllocCont", (void *)UNWIND_WRAP(RTR0MemObjAllocCont) },
326 { "RTR0MemObjEnterPhys", (void *)UNWIND_WRAP(RTR0MemObjEnterPhys) },
327 { "RTR0MemObjLockUser", (void *)UNWIND_WRAP(RTR0MemObjLockUser) },
328 { "RTR0MemObjMapKernel", (void *)UNWIND_WRAP(RTR0MemObjMapKernel) },
329 { "RTR0MemObjMapKernelEx", (void *)UNWIND_WRAP(RTR0MemObjMapKernelEx) },
330 { "RTR0MemObjMapUser", (void *)UNWIND_WRAP(RTR0MemObjMapUser) },
331 { "RTR0MemObjProtect", (void *)UNWIND_WRAP(RTR0MemObjProtect) },
332 { "RTR0MemObjAddress", (void *)RTR0MemObjAddress },
333 { "RTR0MemObjAddressR3", (void *)RTR0MemObjAddressR3 },
334 { "RTR0MemObjSize", (void *)RTR0MemObjSize },
335 { "RTR0MemObjIsMapping", (void *)RTR0MemObjIsMapping },
336 { "RTR0MemObjGetPagePhysAddr", (void *)RTR0MemObjGetPagePhysAddr },
337 { "RTR0MemObjFree", (void *)UNWIND_WRAP(RTR0MemObjFree) },
338 { "RTR0MemUserCopyFrom", (void *)UNWIND_WRAP(RTR0MemUserCopyFrom) },
339 { "RTR0MemUserCopyTo", (void *)UNWIND_WRAP(RTR0MemUserCopyTo) },
340 { "RTR0MemUserIsValidAddr", (void *)RTR0MemUserIsValidAddr },
341 { "RTR0MemKernelIsValidAddr", (void *)RTR0MemKernelIsValidAddr },
342 { "RTR0MemAreKrnlAndUsrDifferent", (void *)RTR0MemAreKrnlAndUsrDifferent },
343/* These don't work yet on linux - use fast mutexes!
344 { "RTSemMutexCreate", (void *)RTSemMutexCreate },
345 { "RTSemMutexRequest", (void *)RTSemMutexRequest },
346 { "RTSemMutexRelease", (void *)RTSemMutexRelease },
347 { "RTSemMutexDestroy", (void *)RTSemMutexDestroy },
348*/
349 { "RTProcSelf", (void *)RTProcSelf },
350 { "RTR0ProcHandleSelf", (void *)RTR0ProcHandleSelf },
351 { "RTSemFastMutexCreate", (void *)UNWIND_WRAP(RTSemFastMutexCreate) },
352 { "RTSemFastMutexDestroy", (void *)UNWIND_WRAP(RTSemFastMutexDestroy) },
353 { "RTSemFastMutexRequest", (void *)UNWIND_WRAP(RTSemFastMutexRequest) },
354 { "RTSemFastMutexRelease", (void *)UNWIND_WRAP(RTSemFastMutexRelease) },
355 { "RTSemEventCreate", (void *)UNWIND_WRAP(RTSemEventCreate) },
356 { "RTSemEventSignal", (void *)UNWIND_WRAP(RTSemEventSignal) },
357 { "RTSemEventWait", (void *)UNWIND_WRAP(RTSemEventWait) },
358 { "RTSemEventWaitNoResume", (void *)UNWIND_WRAP(RTSemEventWaitNoResume) },
359 { "RTSemEventDestroy", (void *)UNWIND_WRAP(RTSemEventDestroy) },
360 { "RTSemEventMultiCreate", (void *)UNWIND_WRAP(RTSemEventMultiCreate) },
361 { "RTSemEventMultiSignal", (void *)UNWIND_WRAP(RTSemEventMultiSignal) },
362 { "RTSemEventMultiReset", (void *)UNWIND_WRAP(RTSemEventMultiReset) },
363 { "RTSemEventMultiWait", (void *)UNWIND_WRAP(RTSemEventMultiWait) },
364 { "RTSemEventMultiWaitNoResume", (void *)UNWIND_WRAP(RTSemEventMultiWaitNoResume) },
365 { "RTSemEventMultiDestroy", (void *)UNWIND_WRAP(RTSemEventMultiDestroy) },
366 { "RTSpinlockCreate", (void *)UNWIND_WRAP(RTSpinlockCreate) },
367 { "RTSpinlockDestroy", (void *)UNWIND_WRAP(RTSpinlockDestroy) },
368 { "RTSpinlockAcquire", (void *)UNWIND_WRAP(RTSpinlockAcquire) },
369 { "RTSpinlockRelease", (void *)UNWIND_WRAP(RTSpinlockRelease) },
370 { "RTSpinlockAcquireNoInts", (void *)UNWIND_WRAP(RTSpinlockAcquireNoInts) },
371 { "RTSpinlockReleaseNoInts", (void *)UNWIND_WRAP(RTSpinlockReleaseNoInts) },
372 { "RTTimeNanoTS", (void *)RTTimeNanoTS },
373 { "RTTimeMilliTS", (void *)RTTimeMilliTS },
374 { "RTTimeSystemNanoTS", (void *)RTTimeSystemNanoTS },
375 { "RTTimeSystemMilliTS", (void *)RTTimeSystemMilliTS },
376 { "RTThreadNativeSelf", (void *)RTThreadNativeSelf },
377 { "RTThreadSleep", (void *)UNWIND_WRAP(RTThreadSleep) },
378 { "RTThreadYield", (void *)UNWIND_WRAP(RTThreadYield) },
379#if 0 /* Thread APIs, Part 2. */
380 { "RTThreadSelf", (void *)UNWIND_WRAP(RTThreadSelf) },
381 { "RTThreadCreate", (void *)UNWIND_WRAP(RTThreadCreate) }, /** @todo need to wrap the callback */
382 { "RTThreadGetNative", (void *)UNWIND_WRAP(RTThreadGetNative) },
383 { "RTThreadWait", (void *)UNWIND_WRAP(RTThreadWait) },
384 { "RTThreadWaitNoResume", (void *)UNWIND_WRAP(RTThreadWaitNoResume) },
385 { "RTThreadGetName", (void *)UNWIND_WRAP(RTThreadGetName) },
386 { "RTThreadSelfName", (void *)UNWIND_WRAP(RTThreadSelfName) },
387 { "RTThreadGetType", (void *)UNWIND_WRAP(RTThreadGetType) },
388 { "RTThreadUserSignal", (void *)UNWIND_WRAP(RTThreadUserSignal) },
389 { "RTThreadUserReset", (void *)UNWIND_WRAP(RTThreadUserReset) },
390 { "RTThreadUserWait", (void *)UNWIND_WRAP(RTThreadUserWait) },
391 { "RTThreadUserWaitNoResume", (void *)UNWIND_WRAP(RTThreadUserWaitNoResume) },
392#endif
393 { "RTThreadPreemptIsEnabled", (void *)RTThreadPreemptIsEnabled },
394 { "RTThreadPreemptIsPending", (void *)RTThreadPreemptIsPending },
395 { "RTThreadPreemptIsPendingTrusty", (void *)RTThreadPreemptIsPendingTrusty },
396 { "RTThreadPreemptIsPossible", (void *)RTThreadPreemptIsPossible },
397 { "RTThreadPreemptDisable", (void *)RTThreadPreemptDisable },
398 { "RTThreadPreemptRestore", (void *)UNWIND_WRAP(RTThreadPreemptRestore) },
399 { "RTThreadIsInInterrupt", (void *)RTThreadIsInInterrupt },
400
401 { "RTLogDefaultInstance", (void *)RTLogDefaultInstance },
402 { "RTMpCpuId", (void *)RTMpCpuId },
403 { "RTMpCpuIdFromSetIndex", (void *)RTMpCpuIdFromSetIndex },
404 { "RTMpCpuIdToSetIndex", (void *)RTMpCpuIdToSetIndex },
405 { "RTMpIsCpuPossible", (void *)RTMpIsCpuPossible },
406 { "RTMpGetCount", (void *)RTMpGetCount },
407 { "RTMpGetMaxCpuId", (void *)RTMpGetMaxCpuId },
408 { "RTMpGetOnlineCount", (void *)RTMpGetOnlineCount },
409 { "RTMpGetOnlineSet", (void *)RTMpGetOnlineSet },
410 { "RTMpGetSet", (void *)RTMpGetSet },
411 { "RTMpIsCpuOnline", (void *)RTMpIsCpuOnline },
412 { "RTMpIsCpuWorkPending", (void *)UNWIND_WRAP(RTMpIsCpuWorkPending) },
413 { "RTMpOnAll", (void *)UNWIND_WRAP(RTMpOnAll) },
414 { "RTMpOnOthers", (void *)UNWIND_WRAP(RTMpOnOthers) },
415 { "RTMpOnSpecific", (void *)UNWIND_WRAP(RTMpOnSpecific) },
416 { "RTMpPokeCpu", (void *)UNWIND_WRAP(RTMpPokeCpu) },
417 { "RTPowerNotificationRegister", (void *)RTPowerNotificationRegister },
418 { "RTPowerNotificationDeregister", (void *)RTPowerNotificationDeregister },
419 { "RTLogRelDefaultInstance", (void *)RTLogRelDefaultInstance },
420 { "RTLogSetDefaultInstanceThread", (void *)UNWIND_WRAP(RTLogSetDefaultInstanceThread) },
421 { "RTLogLogger", (void *)RTLogLogger }, /** @todo remove this */
422 { "RTLogLoggerEx", (void *)RTLogLoggerEx }, /** @todo remove this */
423 { "RTLogLoggerExV", (void *)UNWIND_WRAP(RTLogLoggerExV) },
424 { "RTLogPrintf", (void *)RTLogPrintf }, /** @todo remove this */
425 { "RTLogPrintfV", (void *)UNWIND_WRAP(RTLogPrintfV) },
426 { "AssertMsg1", (void *)UNWIND_WRAP(AssertMsg1) },
427 { "AssertMsg2", (void *)AssertMsg2 }, /** @todo replace this by RTAssertMsg2V */
428#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS)
429 { "RTR0AssertPanicSystem", (void *)RTR0AssertPanicSystem },
430#endif
431#if defined(RT_OS_DARWIN)
432 { "RTAssertMsg1", (void *)RTAssertMsg1 },
433 { "RTAssertMsg2", (void *)RTAssertMsg2 },
434 { "RTAssertMsg2V", (void *)RTAssertMsg2V },
435#endif
436};
437
438#if defined(RT_OS_DARWIN) || defined(RT_OS_SOLARIS) || defined(RT_OS_FREEBSD)
439/**
440 * Drag in the rest of IRPT since we share it with the
441 * rest of the kernel modules on darwin.
442 */
443PFNRT g_apfnVBoxDrvIPRTDeps[] =
444{
445 /* VBoxNetFlt */
446 (PFNRT)RTCrc32,
447 (PFNRT)RTErrConvertFromErrno,
448 (PFNRT)RTNetIPv4IsHdrValid,
449 (PFNRT)RTNetIPv4TCPChecksum,
450 (PFNRT)RTNetIPv4UDPChecksum,
451 (PFNRT)RTUuidCompare,
452 (PFNRT)RTUuidCompareStr,
453 (PFNRT)RTUuidFromStr,
454 (PFNRT)RTStrDup,
455 (PFNRT)RTStrFree,
456 /* VBoxNetAdp */
457 (PFNRT)RTRandBytes,
458 /* VBoxUSB */
459 (PFNRT)RTPathStripFilename,
460 NULL
461};
462#endif /* RT_OS_DARWIN || RT_OS_SOLARIS || RT_OS_SOLARIS */
463
464
465/**
466 * Initializes the device extentsion structure.
467 *
468 * @returns IPRT status code.
469 * @param pDevExt The device extension to initialize.
470 */
471int VBOXCALL supdrvInitDevExt(PSUPDRVDEVEXT pDevExt)
472{
473 int rc;
474
475#ifdef SUPDRV_WITH_RELEASE_LOGGER
476 /*
477 * Create the release log.
478 */
479 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
480 PRTLOGGER pRelLogger;
481 rc = RTLogCreate(&pRelLogger, 0 /* fFlags */, "all",
482 "VBOX_RELEASE_LOG", RT_ELEMENTS(s_apszGroups), s_apszGroups,
483 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER, NULL);
484 if (RT_SUCCESS(rc))
485 RTLogRelSetDefaultInstance(pRelLogger);
486 /** @todo Add native hook for getting logger config parameters and setting
487 * them. On linux we should use the module parameter stuff... */
488#endif
489
490 /*
491 * Initialize it.
492 */
493 memset(pDevExt, 0, sizeof(*pDevExt));
494 rc = RTSpinlockCreate(&pDevExt->Spinlock);
495 if (!rc)
496 {
497 rc = RTSemFastMutexCreate(&pDevExt->mtxLdr);
498 if (!rc)
499 {
500 rc = RTSemFastMutexCreate(&pDevExt->mtxComponentFactory);
501 if (!rc)
502 {
503 rc = RTSemFastMutexCreate(&pDevExt->mtxGip);
504 if (!rc)
505 {
506 rc = supdrvGipCreate(pDevExt);
507 if (RT_SUCCESS(rc))
508 {
509 pDevExt->u32Cookie = BIRD; /** @todo make this random? */
510
511 /*
512 * Fixup the absolute symbols.
513 *
514 * Because of the table indexing assumptions we'll have a little #ifdef orgy
515 * here rather than distributing this to OS specific files. At least for now.
516 */
517#ifdef RT_OS_DARWIN
518# if ARCH_BITS == 32
519 if (SUPR0GetPagingMode() >= SUPPAGINGMODE_AMD64)
520 {
521 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
522 g_aFunctions[1].pfn = (void *)0x80; /* SUPR0Abs64bitKernelCS - KERNEL64_CS, seg.h */
523 g_aFunctions[2].pfn = (void *)0x88; /* SUPR0Abs64bitKernelSS - KERNEL64_SS, seg.h */
524 g_aFunctions[3].pfn = (void *)0x88; /* SUPR0Abs64bitKernelDS - KERNEL64_SS, seg.h */
525 }
526 else
527 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
528 g_aFunctions[4].pfn = (void *)0x08; /* SUPR0AbsKernelCS - KERNEL_CS, seg.h */
529 g_aFunctions[5].pfn = (void *)0x10; /* SUPR0AbsKernelSS - KERNEL_DS, seg.h */
530 g_aFunctions[6].pfn = (void *)0x10; /* SUPR0AbsKernelDS - KERNEL_DS, seg.h */
531 g_aFunctions[7].pfn = (void *)0x10; /* SUPR0AbsKernelES - KERNEL_DS, seg.h */
532 g_aFunctions[8].pfn = (void *)0x10; /* SUPR0AbsKernelFS - KERNEL_DS, seg.h */
533 g_aFunctions[9].pfn = (void *)0x48; /* SUPR0AbsKernelGS - CPU_DATA_GS, seg.h */
534# else /* 64-bit darwin: */
535 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
536 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
537 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
538 g_aFunctions[3].pfn = (void *)0; /* SUPR0Abs64bitKernelDS */
539 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
540 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
541 g_aFunctions[6].pfn = (void *)0; /* SUPR0AbsKernelDS */
542 g_aFunctions[7].pfn = (void *)0; /* SUPR0AbsKernelES */
543 g_aFunctions[8].pfn = (void *)0; /* SUPR0AbsKernelFS */
544 g_aFunctions[9].pfn = (void *)0; /* SUPR0AbsKernelGS */
545
546# endif
547#else /* !RT_OS_DARWIN */
548# if ARCH_BITS == 64
549 g_aFunctions[0].pfn = (void *)1; /* SUPR0AbsIs64bit */
550 g_aFunctions[1].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0Abs64bitKernelCS */
551 g_aFunctions[2].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0Abs64bitKernelSS */
552 g_aFunctions[3].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0Abs64bitKernelDS */
553# else
554 g_aFunctions[0].pfn = g_aFunctions[1].pfn = g_aFunctions[2].pfn = g_aFunctions[4].pfn = (void *)0;
555# endif
556 g_aFunctions[4].pfn = (void *)(uintptr_t)ASMGetCS(); /* SUPR0AbsKernelCS */
557 g_aFunctions[5].pfn = (void *)(uintptr_t)ASMGetSS(); /* SUPR0AbsKernelSS */
558 g_aFunctions[6].pfn = (void *)(uintptr_t)ASMGetDS(); /* SUPR0AbsKernelDS */
559 g_aFunctions[7].pfn = (void *)(uintptr_t)ASMGetES(); /* SUPR0AbsKernelES */
560 g_aFunctions[8].pfn = (void *)(uintptr_t)ASMGetFS(); /* SUPR0AbsKernelFS */
561 g_aFunctions[9].pfn = (void *)(uintptr_t)ASMGetGS(); /* SUPR0AbsKernelGS */
562#endif /* !RT_OS_DARWIN */
563 return VINF_SUCCESS;
564 }
565
566 RTSemFastMutexDestroy(pDevExt->mtxGip);
567 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
568 }
569 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
570 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
571 }
572 RTSemFastMutexDestroy(pDevExt->mtxLdr);
573 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
574 }
575 RTSpinlockDestroy(pDevExt->Spinlock);
576 pDevExt->Spinlock = NIL_RTSPINLOCK;
577 }
578#ifdef SUPDRV_WITH_RELEASE_LOGGER
579 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
580 RTLogDestroy(RTLogSetDefaultInstance(NULL));
581#endif
582
583 return rc;
584}
585
586
587/**
588 * Delete the device extension (e.g. cleanup members).
589 *
590 * @param pDevExt The device extension to delete.
591 */
592void VBOXCALL supdrvDeleteDevExt(PSUPDRVDEVEXT pDevExt)
593{
594 PSUPDRVOBJ pObj;
595 PSUPDRVUSAGE pUsage;
596
597 /*
598 * Kill mutexes and spinlocks.
599 */
600 RTSemFastMutexDestroy(pDevExt->mtxGip);
601 pDevExt->mtxGip = NIL_RTSEMFASTMUTEX;
602 RTSemFastMutexDestroy(pDevExt->mtxLdr);
603 pDevExt->mtxLdr = NIL_RTSEMFASTMUTEX;
604 RTSpinlockDestroy(pDevExt->Spinlock);
605 pDevExt->Spinlock = NIL_RTSPINLOCK;
606 RTSemFastMutexDestroy(pDevExt->mtxComponentFactory);
607 pDevExt->mtxComponentFactory = NIL_RTSEMFASTMUTEX;
608
609 /*
610 * Free lists.
611 */
612 /* objects. */
613 pObj = pDevExt->pObjs;
614#if !defined(DEBUG_bird) || !defined(RT_OS_LINUX) /* breaks unloading, temporary, remove me! */
615 Assert(!pObj); /* (can trigger on forced unloads) */
616#endif
617 pDevExt->pObjs = NULL;
618 while (pObj)
619 {
620 void *pvFree = pObj;
621 pObj = pObj->pNext;
622 RTMemFree(pvFree);
623 }
624
625 /* usage records. */
626 pUsage = pDevExt->pUsageFree;
627 pDevExt->pUsageFree = NULL;
628 while (pUsage)
629 {
630 void *pvFree = pUsage;
631 pUsage = pUsage->pNext;
632 RTMemFree(pvFree);
633 }
634
635 /* kill the GIP. */
636 supdrvGipDestroy(pDevExt);
637
638#ifdef SUPDRV_WITH_RELEASE_LOGGER
639 /* destroy the loggers. */
640 RTLogDestroy(RTLogRelSetDefaultInstance(NULL));
641 RTLogDestroy(RTLogSetDefaultInstance(NULL));
642#endif
643}
644
645
646/**
647 * Create session.
648 *
649 * @returns IPRT status code.
650 * @param pDevExt Device extension.
651 * @param fUser Flag indicating whether this is a user or kernel session.
652 * @param ppSession Where to store the pointer to the session data.
653 */
654int VBOXCALL supdrvCreateSession(PSUPDRVDEVEXT pDevExt, bool fUser, PSUPDRVSESSION *ppSession)
655{
656 /*
657 * Allocate memory for the session data.
658 */
659 int rc = VERR_NO_MEMORY;
660 PSUPDRVSESSION pSession = *ppSession = (PSUPDRVSESSION)RTMemAllocZ(sizeof(*pSession));
661 if (pSession)
662 {
663 /* Initialize session data. */
664 rc = RTSpinlockCreate(&pSession->Spinlock);
665 if (!rc)
666 {
667 rc = RTHandleTableCreateEx(&pSession->hHandleTable,
668 RTHANDLETABLE_FLAGS_LOCKED | RTHANDLETABLE_FLAGS_CONTEXT,
669 1 /*uBase*/, 32768 /*cMax*/, supdrvSessionObjHandleRetain, pSession);
670 if (RT_SUCCESS(rc))
671 {
672 Assert(pSession->Spinlock != NIL_RTSPINLOCK);
673 pSession->pDevExt = pDevExt;
674 pSession->u32Cookie = BIRD_INV;
675 /*pSession->pLdrUsage = NULL;
676 pSession->pVM = NULL;
677 pSession->pUsage = NULL;
678 pSession->pGip = NULL;
679 pSession->fGipReferenced = false;
680 pSession->Bundle.cUsed = 0; */
681 pSession->Uid = NIL_RTUID;
682 pSession->Gid = NIL_RTGID;
683 if (fUser)
684 {
685 pSession->Process = RTProcSelf();
686 pSession->R0Process = RTR0ProcHandleSelf();
687 }
688 else
689 {
690 pSession->Process = NIL_RTPROCESS;
691 pSession->R0Process = NIL_RTR0PROCESS;
692 }
693
694 LogFlow(("Created session %p initial cookie=%#x\n", pSession, pSession->u32Cookie));
695 return VINF_SUCCESS;
696 }
697
698 RTSpinlockDestroy(pSession->Spinlock);
699 }
700 RTMemFree(pSession);
701 *ppSession = NULL;
702 Log(("Failed to create spinlock, rc=%d!\n", rc));
703 }
704
705 return rc;
706}
707
708
709/**
710 * Shared code for cleaning up a session.
711 *
712 * @param pDevExt Device extension.
713 * @param pSession Session data.
714 * This data will be freed by this routine.
715 */
716void VBOXCALL supdrvCloseSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
717{
718 /*
719 * Cleanup the session first.
720 */
721 supdrvCleanupSession(pDevExt, pSession);
722
723 /*
724 * Free the rest of the session stuff.
725 */
726 RTSpinlockDestroy(pSession->Spinlock);
727 pSession->Spinlock = NIL_RTSPINLOCK;
728 pSession->pDevExt = NULL;
729 RTMemFree(pSession);
730 LogFlow(("supdrvCloseSession: returns\n"));
731}
732
733
734/**
735 * Shared code for cleaning up a session (but not quite freeing it).
736 *
737 * This is primarily intended for MAC OS X where we have to clean up the memory
738 * stuff before the file handle is closed.
739 *
740 * @param pDevExt Device extension.
741 * @param pSession Session data.
742 * This data will be freed by this routine.
743 */
744void VBOXCALL supdrvCleanupSession(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
745{
746 int rc;
747 PSUPDRVBUNDLE pBundle;
748 LogFlow(("supdrvCleanupSession: pSession=%p\n", pSession));
749
750 /*
751 * Remove logger instances related to this session.
752 */
753 RTLogSetDefaultInstanceThread(NULL, (uintptr_t)pSession);
754
755 /*
756 * Destroy the handle table.
757 */
758 rc = RTHandleTableDestroy(pSession->hHandleTable, supdrvSessionObjHandleDelete, pSession);
759 AssertRC(rc);
760 pSession->hHandleTable = NIL_RTHANDLETABLE;
761
762 /*
763 * Release object references made in this session.
764 * In theory there should be noone racing us in this session.
765 */
766 Log2(("release objects - start\n"));
767 if (pSession->pUsage)
768 {
769 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
770 PSUPDRVUSAGE pUsage;
771 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
772
773 while ((pUsage = pSession->pUsage) != NULL)
774 {
775 PSUPDRVOBJ pObj = pUsage->pObj;
776 pSession->pUsage = pUsage->pNext;
777
778 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
779 if (pUsage->cUsage < pObj->cUsage)
780 {
781 pObj->cUsage -= pUsage->cUsage;
782 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
783 }
784 else
785 {
786 /* Destroy the object and free the record. */
787 if (pDevExt->pObjs == pObj)
788 pDevExt->pObjs = pObj->pNext;
789 else
790 {
791 PSUPDRVOBJ pObjPrev;
792 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
793 if (pObjPrev->pNext == pObj)
794 {
795 pObjPrev->pNext = pObj->pNext;
796 break;
797 }
798 Assert(pObjPrev);
799 }
800 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
801
802 Log(("supdrvCleanupSession: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
803 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
804 if (pObj->pfnDestructor)
805#ifdef RT_WITH_W64_UNWIND_HACK
806 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
807#else
808 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
809#endif
810 RTMemFree(pObj);
811 }
812
813 /* free it and continue. */
814 RTMemFree(pUsage);
815
816 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
817 }
818
819 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
820 AssertMsg(!pSession->pUsage, ("Some buster reregistered an object during desturction!\n"));
821 }
822 Log2(("release objects - done\n"));
823
824 /*
825 * Release memory allocated in the session.
826 *
827 * We do not serialize this as we assume that the application will
828 * not allocated memory while closing the file handle object.
829 */
830 Log2(("freeing memory:\n"));
831 pBundle = &pSession->Bundle;
832 while (pBundle)
833 {
834 PSUPDRVBUNDLE pToFree;
835 unsigned i;
836
837 /*
838 * Check and unlock all entries in the bundle.
839 */
840 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
841 {
842 if (pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ)
843 {
844 int rc;
845 Log2(("eType=%d pvR0=%p pvR3=%p cb=%ld\n", pBundle->aMem[i].eType, RTR0MemObjAddress(pBundle->aMem[i].MemObj),
846 (void *)RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3), (long)RTR0MemObjSize(pBundle->aMem[i].MemObj)));
847 if (pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ)
848 {
849 rc = RTR0MemObjFree(pBundle->aMem[i].MapObjR3, false);
850 AssertRC(rc); /** @todo figure out how to handle this. */
851 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
852 }
853 rc = RTR0MemObjFree(pBundle->aMem[i].MemObj, true /* fFreeMappings */);
854 AssertRC(rc); /** @todo figure out how to handle this. */
855 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
856 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
857 }
858 }
859
860 /*
861 * Advance and free previous bundle.
862 */
863 pToFree = pBundle;
864 pBundle = pBundle->pNext;
865
866 pToFree->pNext = NULL;
867 pToFree->cUsed = 0;
868 if (pToFree != &pSession->Bundle)
869 RTMemFree(pToFree);
870 }
871 Log2(("freeing memory - done\n"));
872
873 /*
874 * Deregister component factories.
875 */
876 RTSemFastMutexRequest(pDevExt->mtxComponentFactory);
877 Log2(("deregistering component factories:\n"));
878 if (pDevExt->pComponentFactoryHead)
879 {
880 PSUPDRVFACTORYREG pPrev = NULL;
881 PSUPDRVFACTORYREG pCur = pDevExt->pComponentFactoryHead;
882 while (pCur)
883 {
884 if (pCur->pSession == pSession)
885 {
886 /* unlink it */
887 PSUPDRVFACTORYREG pNext = pCur->pNext;
888 if (pPrev)
889 pPrev->pNext = pNext;
890 else
891 pDevExt->pComponentFactoryHead = pNext;
892
893 /* free it */
894 pCur->pNext = NULL;
895 pCur->pSession = NULL;
896 pCur->pFactory = NULL;
897 RTMemFree(pCur);
898
899 /* next */
900 pCur = pNext;
901 }
902 else
903 {
904 /* next */
905 pPrev = pCur;
906 pCur = pCur->pNext;
907 }
908 }
909 }
910 RTSemFastMutexRelease(pDevExt->mtxComponentFactory);
911 Log2(("deregistering component factories - done\n"));
912
913 /*
914 * Loaded images needs to be dereferenced and possibly freed up.
915 */
916 RTSemFastMutexRequest(pDevExt->mtxLdr);
917 Log2(("freeing images:\n"));
918 if (pSession->pLdrUsage)
919 {
920 PSUPDRVLDRUSAGE pUsage = pSession->pLdrUsage;
921 pSession->pLdrUsage = NULL;
922 while (pUsage)
923 {
924 void *pvFree = pUsage;
925 PSUPDRVLDRIMAGE pImage = pUsage->pImage;
926 if (pImage->cUsage > pUsage->cUsage)
927 pImage->cUsage -= pUsage->cUsage;
928 else
929 supdrvLdrFree(pDevExt, pImage);
930 pUsage->pImage = NULL;
931 pUsage = pUsage->pNext;
932 RTMemFree(pvFree);
933 }
934 }
935 RTSemFastMutexRelease(pDevExt->mtxLdr);
936 Log2(("freeing images - done\n"));
937
938 /*
939 * Unmap the GIP.
940 */
941 Log2(("umapping GIP:\n"));
942 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
943 {
944 SUPR0GipUnmap(pSession);
945 pSession->fGipReferenced = 0;
946 }
947 Log2(("umapping GIP - done\n"));
948}
949
950
951/**
952 * RTHandleTableDestroy callback used by supdrvCleanupSession.
953 *
954 * @returns IPRT status code, see SUPR0ObjAddRef.
955 * @param hHandleTable The handle table handle. Ignored.
956 * @param pvObj The object pointer.
957 * @param pvCtx Context, the handle type. Ignored.
958 * @param pvUser Session pointer.
959 */
960static DECLCALLBACK(int) supdrvSessionObjHandleRetain(RTHANDLETABLE hHandleTable, void *pvObj, void *pvCtx, void *pvUser)
961{
962 NOREF(pvCtx);
963 NOREF(hHandleTable);
964 return SUPR0ObjAddRefEx(pvObj, (PSUPDRVSESSION)pvUser, true /*fNoBlocking*/);
965}
966
967
968/**
969 * RTHandleTableDestroy callback used by supdrvCleanupSession.
970 *
971 * @param hHandleTable The handle table handle. Ignored.
972 * @param h The handle value. Ignored.
973 * @param pvObj The object pointer.
974 * @param pvCtx Context, the handle type. Ignored.
975 * @param pvUser Session pointer.
976 */
977static DECLCALLBACK(void) supdrvSessionObjHandleDelete(RTHANDLETABLE hHandleTable, uint32_t h, void *pvObj, void *pvCtx, void *pvUser)
978{
979 NOREF(pvCtx);
980 NOREF(h);
981 NOREF(hHandleTable);
982 SUPR0ObjRelease(pvObj, (PSUPDRVSESSION)pvUser);
983}
984
985
986/**
987 * Fast path I/O Control worker.
988 *
989 * @returns VBox status code that should be passed down to ring-3 unchanged.
990 * @param uIOCtl Function number.
991 * @param idCpu VMCPU id.
992 * @param pDevExt Device extention.
993 * @param pSession Session data.
994 */
995int VBOXCALL supdrvIOCtlFast(uintptr_t uIOCtl, VMCPUID idCpu, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession)
996{
997 /*
998 * We check the two prereqs after doing this only to allow the compiler to optimize things better.
999 */
1000 if (RT_LIKELY(pSession->pVM && pDevExt->pfnVMMR0EntryFast))
1001 {
1002 switch (uIOCtl)
1003 {
1004 case SUP_IOCTL_FAST_DO_RAW_RUN:
1005#ifdef RT_WITH_W64_UNWIND_HACK
1006 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
1007#else
1008 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_RAW_RUN);
1009#endif
1010 break;
1011 case SUP_IOCTL_FAST_DO_HWACC_RUN:
1012#ifdef RT_WITH_W64_UNWIND_HACK
1013 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
1014#else
1015 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_HWACC_RUN);
1016#endif
1017 break;
1018 case SUP_IOCTL_FAST_DO_NOP:
1019#ifdef RT_WITH_W64_UNWIND_HACK
1020 supdrvNtWrapVMMR0EntryFast((PFNRT)pDevExt->pfnVMMR0EntryFast, pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
1021#else
1022 pDevExt->pfnVMMR0EntryFast(pSession->pVM, idCpu, SUP_VMMR0_DO_NOP);
1023#endif
1024 break;
1025 default:
1026 return VERR_INTERNAL_ERROR;
1027 }
1028 return VINF_SUCCESS;
1029 }
1030 return VERR_INTERNAL_ERROR;
1031}
1032
1033
1034/**
1035 * Helper for supdrvIOCtl. Check if pszStr contains any character of pszChars.
1036 * We would use strpbrk here if this function would be contained in the RedHat kABI white
1037 * list, see http://www.kerneldrivers.org/RHEL5.
1038 *
1039 * @return 1 if pszStr does contain any character of pszChars, 0 otherwise.
1040 * @param pszStr String to check
1041 * @param pszChars Character set
1042 */
1043static int supdrvCheckInvalidChar(const char *pszStr, const char *pszChars)
1044{
1045 int chCur;
1046 while ((chCur = *pszStr++) != '\0')
1047 {
1048 int ch;
1049 const char *psz = pszChars;
1050 while ((ch = *psz++) != '\0')
1051 if (ch == chCur)
1052 return 1;
1053
1054 }
1055 return 0;
1056}
1057
1058
1059/**
1060 * I/O Control worker.
1061 *
1062 * @returns 0 on success.
1063 * @returns VERR_INVALID_PARAMETER if the request is invalid.
1064 *
1065 * @param uIOCtl Function number.
1066 * @param pDevExt Device extention.
1067 * @param pSession Session data.
1068 * @param pReqHdr The request header.
1069 */
1070int VBOXCALL supdrvIOCtl(uintptr_t uIOCtl, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPREQHDR pReqHdr)
1071{
1072 /*
1073 * Validate the request.
1074 */
1075 /* this first check could probably be omitted as its also done by the OS specific code... */
1076 if (RT_UNLIKELY( (pReqHdr->fFlags & SUPREQHDR_FLAGS_MAGIC_MASK) != SUPREQHDR_FLAGS_MAGIC
1077 || pReqHdr->cbIn < sizeof(*pReqHdr)
1078 || pReqHdr->cbOut < sizeof(*pReqHdr)))
1079 {
1080 OSDBGPRINT(("vboxdrv: Bad ioctl request header; cbIn=%#lx cbOut=%#lx fFlags=%#lx\n",
1081 (long)pReqHdr->cbIn, (long)pReqHdr->cbOut, (long)pReqHdr->fFlags));
1082 return VERR_INVALID_PARAMETER;
1083 }
1084 if (RT_UNLIKELY(uIOCtl == SUP_IOCTL_COOKIE))
1085 {
1086 if (pReqHdr->u32Cookie != SUPCOOKIE_INITIAL_COOKIE)
1087 {
1088 OSDBGPRINT(("SUP_IOCTL_COOKIE: bad cookie %#lx\n", (long)pReqHdr->u32Cookie));
1089 return VERR_INVALID_PARAMETER;
1090 }
1091 }
1092 else if (RT_UNLIKELY( pReqHdr->u32Cookie != pDevExt->u32Cookie
1093 || pReqHdr->u32SessionCookie != pSession->u32Cookie))
1094 {
1095 OSDBGPRINT(("vboxdrv: bad cookie %#lx / %#lx.\n", (long)pReqHdr->u32Cookie, (long)pReqHdr->u32SessionCookie));
1096 return VERR_INVALID_PARAMETER;
1097 }
1098
1099/*
1100 * Validation macros
1101 */
1102#define REQ_CHECK_SIZES_EX(Name, cbInExpect, cbOutExpect) \
1103 do { \
1104 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect) || pReqHdr->cbOut != (cbOutExpect))) \
1105 { \
1106 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld. cbOut=%ld expected %ld.\n", \
1107 (long)pReq->Hdr.cbIn, (long)(cbInExpect), (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1108 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1109 } \
1110 } while (0)
1111
1112#define REQ_CHECK_SIZES(Name) REQ_CHECK_SIZES_EX(Name, Name ## _SIZE_IN, Name ## _SIZE_OUT)
1113
1114#define REQ_CHECK_SIZE_IN(Name, cbInExpect) \
1115 do { \
1116 if (RT_UNLIKELY(pReqHdr->cbIn != (cbInExpect))) \
1117 { \
1118 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbIn=%ld expected %ld.\n", \
1119 (long)pReq->Hdr.cbIn, (long)(cbInExpect))); \
1120 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1121 } \
1122 } while (0)
1123
1124#define REQ_CHECK_SIZE_OUT(Name, cbOutExpect) \
1125 do { \
1126 if (RT_UNLIKELY(pReqHdr->cbOut != (cbOutExpect))) \
1127 { \
1128 OSDBGPRINT(( #Name ": Invalid input/output sizes. cbOut=%ld expected %ld.\n", \
1129 (long)pReq->Hdr.cbOut, (long)(cbOutExpect))); \
1130 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1131 } \
1132 } while (0)
1133
1134#define REQ_CHECK_EXPR(Name, expr) \
1135 do { \
1136 if (RT_UNLIKELY(!(expr))) \
1137 { \
1138 OSDBGPRINT(( #Name ": %s\n", #expr)); \
1139 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1140 } \
1141 } while (0)
1142
1143#define REQ_CHECK_EXPR_FMT(expr, fmt) \
1144 do { \
1145 if (RT_UNLIKELY(!(expr))) \
1146 { \
1147 OSDBGPRINT( fmt ); \
1148 return pReq->Hdr.rc = VERR_INVALID_PARAMETER; \
1149 } \
1150 } while (0)
1151
1152
1153 /*
1154 * The switch.
1155 */
1156 switch (SUP_CTL_CODE_NO_SIZE(uIOCtl))
1157 {
1158 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_COOKIE):
1159 {
1160 PSUPCOOKIE pReq = (PSUPCOOKIE)pReqHdr;
1161 REQ_CHECK_SIZES(SUP_IOCTL_COOKIE);
1162 if (strncmp(pReq->u.In.szMagic, SUPCOOKIE_MAGIC, sizeof(pReq->u.In.szMagic)))
1163 {
1164 OSDBGPRINT(("SUP_IOCTL_COOKIE: invalid magic %.16s\n", pReq->u.In.szMagic));
1165 pReq->Hdr.rc = VERR_INVALID_MAGIC;
1166 return 0;
1167 }
1168
1169#if 0
1170 /*
1171 * Call out to the OS specific code and let it do permission checks on the
1172 * client process.
1173 */
1174 if (!supdrvOSValidateClientProcess(pDevExt, pSession))
1175 {
1176 pReq->u.Out.u32Cookie = 0xffffffff;
1177 pReq->u.Out.u32SessionCookie = 0xffffffff;
1178 pReq->u.Out.u32SessionVersion = 0xffffffff;
1179 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1180 pReq->u.Out.pSession = NULL;
1181 pReq->u.Out.cFunctions = 0;
1182 pReq->Hdr.rc = VERR_PERMISSION_DENIED;
1183 return 0;
1184 }
1185#endif
1186
1187 /*
1188 * Match the version.
1189 * The current logic is very simple, match the major interface version.
1190 */
1191 if ( pReq->u.In.u32MinVersion > SUPDRV_IOC_VERSION
1192 || (pReq->u.In.u32MinVersion & 0xffff0000) != (SUPDRV_IOC_VERSION & 0xffff0000))
1193 {
1194 OSDBGPRINT(("SUP_IOCTL_COOKIE: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1195 pReq->u.In.u32ReqVersion, pReq->u.In.u32MinVersion, SUPDRV_IOC_VERSION));
1196 pReq->u.Out.u32Cookie = 0xffffffff;
1197 pReq->u.Out.u32SessionCookie = 0xffffffff;
1198 pReq->u.Out.u32SessionVersion = 0xffffffff;
1199 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1200 pReq->u.Out.pSession = NULL;
1201 pReq->u.Out.cFunctions = 0;
1202 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1203 return 0;
1204 }
1205
1206 /*
1207 * Fill in return data and be gone.
1208 * N.B. The first one to change SUPDRV_IOC_VERSION shall makes sure that
1209 * u32SessionVersion <= u32ReqVersion!
1210 */
1211 /** @todo Somehow validate the client and negotiate a secure cookie... */
1212 pReq->u.Out.u32Cookie = pDevExt->u32Cookie;
1213 pReq->u.Out.u32SessionCookie = pSession->u32Cookie;
1214 pReq->u.Out.u32SessionVersion = SUPDRV_IOC_VERSION;
1215 pReq->u.Out.u32DriverVersion = SUPDRV_IOC_VERSION;
1216 pReq->u.Out.pSession = pSession;
1217 pReq->u.Out.cFunctions = sizeof(g_aFunctions) / sizeof(g_aFunctions[0]);
1218 pReq->Hdr.rc = VINF_SUCCESS;
1219 return 0;
1220 }
1221
1222 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_QUERY_FUNCS(0)):
1223 {
1224 /* validate */
1225 PSUPQUERYFUNCS pReq = (PSUPQUERYFUNCS)pReqHdr;
1226 REQ_CHECK_SIZES_EX(SUP_IOCTL_QUERY_FUNCS, SUP_IOCTL_QUERY_FUNCS_SIZE_IN, SUP_IOCTL_QUERY_FUNCS_SIZE_OUT(RT_ELEMENTS(g_aFunctions)));
1227
1228 /* execute */
1229 pReq->u.Out.cFunctions = RT_ELEMENTS(g_aFunctions);
1230 memcpy(&pReq->u.Out.aFunctions[0], g_aFunctions, sizeof(g_aFunctions));
1231 pReq->Hdr.rc = VINF_SUCCESS;
1232 return 0;
1233 }
1234
1235 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_LOCK):
1236 {
1237 /* validate */
1238 PSUPPAGELOCK pReq = (PSUPPAGELOCK)pReqHdr;
1239 REQ_CHECK_SIZE_IN(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_IN);
1240 REQ_CHECK_SIZE_OUT(SUP_IOCTL_PAGE_LOCK, SUP_IOCTL_PAGE_LOCK_SIZE_OUT(pReq->u.In.cPages));
1241 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.cPages > 0);
1242 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_LOCK, pReq->u.In.pvR3 >= PAGE_SIZE);
1243
1244 /* execute */
1245 pReq->Hdr.rc = SUPR0LockMem(pSession, pReq->u.In.pvR3, pReq->u.In.cPages, &pReq->u.Out.aPages[0]);
1246 if (RT_FAILURE(pReq->Hdr.rc))
1247 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1248 return 0;
1249 }
1250
1251 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_UNLOCK):
1252 {
1253 /* validate */
1254 PSUPPAGEUNLOCK pReq = (PSUPPAGEUNLOCK)pReqHdr;
1255 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_UNLOCK);
1256
1257 /* execute */
1258 pReq->Hdr.rc = SUPR0UnlockMem(pSession, pReq->u.In.pvR3);
1259 return 0;
1260 }
1261
1262 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_ALLOC):
1263 {
1264 /* validate */
1265 PSUPCONTALLOC pReq = (PSUPCONTALLOC)pReqHdr;
1266 REQ_CHECK_SIZES(SUP_IOCTL_CONT_ALLOC);
1267
1268 /* execute */
1269 pReq->Hdr.rc = SUPR0ContAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.HCPhys);
1270 if (RT_FAILURE(pReq->Hdr.rc))
1271 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1272 return 0;
1273 }
1274
1275 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CONT_FREE):
1276 {
1277 /* validate */
1278 PSUPCONTFREE pReq = (PSUPCONTFREE)pReqHdr;
1279 REQ_CHECK_SIZES(SUP_IOCTL_CONT_FREE);
1280
1281 /* execute */
1282 pReq->Hdr.rc = SUPR0ContFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1283 return 0;
1284 }
1285
1286 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_OPEN):
1287 {
1288 /* validate */
1289 PSUPLDROPEN pReq = (PSUPLDROPEN)pReqHdr;
1290 REQ_CHECK_SIZES(SUP_IOCTL_LDR_OPEN);
1291 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage > 0);
1292 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.cbImage < _1M*16);
1293 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, pReq->u.In.szName[0]);
1294 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1295 REQ_CHECK_EXPR(SUP_IOCTL_LDR_OPEN, !supdrvCheckInvalidChar(pReq->u.In.szName, ";:()[]{}/\\|&*%#@!~`\"'"));
1296
1297 /* execute */
1298 pReq->Hdr.rc = supdrvIOCtl_LdrOpen(pDevExt, pSession, pReq);
1299 return 0;
1300 }
1301
1302 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_LOAD):
1303 {
1304 /* validate */
1305 PSUPLDRLOAD pReq = (PSUPLDRLOAD)pReqHdr;
1306 REQ_CHECK_EXPR(Name, pReq->Hdr.cbIn >= sizeof(*pReq));
1307 REQ_CHECK_SIZES_EX(SUP_IOCTL_LDR_LOAD, SUP_IOCTL_LDR_LOAD_SIZE_IN(pReq->u.In.cbImage), SUP_IOCTL_LDR_LOAD_SIZE_OUT);
1308 REQ_CHECK_EXPR(SUP_IOCTL_LDR_LOAD, pReq->u.In.cSymbols <= 16384);
1309 REQ_CHECK_EXPR_FMT( !pReq->u.In.cSymbols
1310 || ( pReq->u.In.offSymbols < pReq->u.In.cbImage
1311 && pReq->u.In.offSymbols + pReq->u.In.cSymbols * sizeof(SUPLDRSYM) <= pReq->u.In.cbImage),
1312 ("SUP_IOCTL_LDR_LOAD: offSymbols=%#lx cSymbols=%#lx cbImage=%#lx\n", (long)pReq->u.In.offSymbols,
1313 (long)pReq->u.In.cSymbols, (long)pReq->u.In.cbImage));
1314 REQ_CHECK_EXPR_FMT( !pReq->u.In.cbStrTab
1315 || ( pReq->u.In.offStrTab < pReq->u.In.cbImage
1316 && pReq->u.In.offStrTab + pReq->u.In.cbStrTab <= pReq->u.In.cbImage
1317 && pReq->u.In.cbStrTab <= pReq->u.In.cbImage),
1318 ("SUP_IOCTL_LDR_LOAD: offStrTab=%#lx cbStrTab=%#lx cbImage=%#lx\n", (long)pReq->u.In.offStrTab,
1319 (long)pReq->u.In.cbStrTab, (long)pReq->u.In.cbImage));
1320
1321 if (pReq->u.In.cSymbols)
1322 {
1323 uint32_t i;
1324 PSUPLDRSYM paSyms = (PSUPLDRSYM)&pReq->u.In.achImage[pReq->u.In.offSymbols];
1325 for (i = 0; i < pReq->u.In.cSymbols; i++)
1326 {
1327 REQ_CHECK_EXPR_FMT(paSyms[i].offSymbol < pReq->u.In.cbImage,
1328 ("SUP_IOCTL_LDR_LOAD: sym #%ld: symb off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offSymbol, (long)pReq->u.In.cbImage));
1329 REQ_CHECK_EXPR_FMT(paSyms[i].offName < pReq->u.In.cbStrTab,
1330 ("SUP_IOCTL_LDR_LOAD: sym #%ld: name off %#lx (max=%#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1331 REQ_CHECK_EXPR_FMT(memchr(&pReq->u.In.achImage[pReq->u.In.offStrTab + paSyms[i].offName], '\0', pReq->u.In.cbStrTab - paSyms[i].offName),
1332 ("SUP_IOCTL_LDR_LOAD: sym #%ld: unterminated name! (%#lx / %#lx)\n", (long)i, (long)paSyms[i].offName, (long)pReq->u.In.cbImage));
1333 }
1334 }
1335
1336 /* execute */
1337 pReq->Hdr.rc = supdrvIOCtl_LdrLoad(pDevExt, pSession, pReq);
1338 return 0;
1339 }
1340
1341 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_FREE):
1342 {
1343 /* validate */
1344 PSUPLDRFREE pReq = (PSUPLDRFREE)pReqHdr;
1345 REQ_CHECK_SIZES(SUP_IOCTL_LDR_FREE);
1346
1347 /* execute */
1348 pReq->Hdr.rc = supdrvIOCtl_LdrFree(pDevExt, pSession, pReq);
1349 return 0;
1350 }
1351
1352 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LDR_GET_SYMBOL):
1353 {
1354 /* validate */
1355 PSUPLDRGETSYMBOL pReq = (PSUPLDRGETSYMBOL)pReqHdr;
1356 REQ_CHECK_SIZES(SUP_IOCTL_LDR_GET_SYMBOL);
1357 REQ_CHECK_EXPR(SUP_IOCTL_LDR_GET_SYMBOL, memchr(pReq->u.In.szSymbol, '\0', sizeof(pReq->u.In.szSymbol)));
1358
1359 /* execute */
1360 pReq->Hdr.rc = supdrvIOCtl_LdrGetSymbol(pDevExt, pSession, pReq);
1361 return 0;
1362 }
1363
1364 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_VMMR0(0)):
1365 {
1366 /* validate */
1367 PSUPCALLVMMR0 pReq = (PSUPCALLVMMR0)pReqHdr;
1368 Log4(("SUP_IOCTL_CALL_VMMR0: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1369 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1370
1371 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_VMMR0_SIZE(0))
1372 {
1373 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(0), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(0));
1374
1375 /* execute */
1376 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1377#ifdef RT_WITH_W64_UNWIND_HACK
1378 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1379#else
1380 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, NULL, pReq->u.In.u64Arg, pSession);
1381#endif
1382 else
1383 pReq->Hdr.rc = VERR_WRONG_ORDER;
1384 }
1385 else
1386 {
1387 PSUPVMMR0REQHDR pVMMReq = (PSUPVMMR0REQHDR)&pReq->abReqPkt[0];
1388 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR)),
1389 ("SUP_IOCTL_CALL_VMMR0: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_VMMR0_SIZE(sizeof(SUPVMMR0REQHDR))));
1390 REQ_CHECK_EXPR(SUP_IOCTL_CALL_VMMR0, pVMMReq->u32Magic == SUPVMMR0REQHDR_MAGIC);
1391 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_VMMR0, SUP_IOCTL_CALL_VMMR0_SIZE_IN(pVMMReq->cbReq), SUP_IOCTL_CALL_VMMR0_SIZE_OUT(pVMMReq->cbReq));
1392
1393 /* execute */
1394 if (RT_LIKELY(pDevExt->pfnVMMR0EntryEx))
1395#ifdef RT_WITH_W64_UNWIND_HACK
1396 pReq->Hdr.rc = supdrvNtWrapVMMR0EntryEx((PFNRT)pDevExt->pfnVMMR0EntryEx, pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1397#else
1398 pReq->Hdr.rc = pDevExt->pfnVMMR0EntryEx(pReq->u.In.pVMR0, pReq->u.In.idCpu, pReq->u.In.uOperation, pVMMReq, pReq->u.In.u64Arg, pSession);
1399#endif
1400 else
1401 pReq->Hdr.rc = VERR_WRONG_ORDER;
1402 }
1403
1404 if ( RT_FAILURE(pReq->Hdr.rc)
1405 && pReq->Hdr.rc != VERR_INTERRUPTED
1406 && pReq->Hdr.rc != VERR_TIMEOUT)
1407 Log(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1408 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1409 else
1410 Log4(("SUP_IOCTL_CALL_VMMR0: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1411 pReq->Hdr.rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1412 return 0;
1413 }
1414
1415 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GET_PAGING_MODE):
1416 {
1417 /* validate */
1418 PSUPGETPAGINGMODE pReq = (PSUPGETPAGINGMODE)pReqHdr;
1419 REQ_CHECK_SIZES(SUP_IOCTL_GET_PAGING_MODE);
1420
1421 /* execute */
1422 pReq->Hdr.rc = VINF_SUCCESS;
1423 pReq->u.Out.enmMode = SUPR0GetPagingMode();
1424 return 0;
1425 }
1426
1427 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_ALLOC):
1428 {
1429 /* validate */
1430 PSUPLOWALLOC pReq = (PSUPLOWALLOC)pReqHdr;
1431 REQ_CHECK_EXPR(SUP_IOCTL_LOW_ALLOC, pReq->Hdr.cbIn <= SUP_IOCTL_LOW_ALLOC_SIZE_IN);
1432 REQ_CHECK_SIZES_EX(SUP_IOCTL_LOW_ALLOC, SUP_IOCTL_LOW_ALLOC_SIZE_IN, SUP_IOCTL_LOW_ALLOC_SIZE_OUT(pReq->u.In.cPages));
1433
1434 /* execute */
1435 pReq->Hdr.rc = SUPR0LowAlloc(pSession, pReq->u.In.cPages, &pReq->u.Out.pvR0, &pReq->u.Out.pvR3, &pReq->u.Out.aPages[0]);
1436 if (RT_FAILURE(pReq->Hdr.rc))
1437 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1438 return 0;
1439 }
1440
1441 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOW_FREE):
1442 {
1443 /* validate */
1444 PSUPLOWFREE pReq = (PSUPLOWFREE)pReqHdr;
1445 REQ_CHECK_SIZES(SUP_IOCTL_LOW_FREE);
1446
1447 /* execute */
1448 pReq->Hdr.rc = SUPR0LowFree(pSession, (RTHCUINTPTR)pReq->u.In.pvR3);
1449 return 0;
1450 }
1451
1452 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_MAP):
1453 {
1454 /* validate */
1455 PSUPGIPMAP pReq = (PSUPGIPMAP)pReqHdr;
1456 REQ_CHECK_SIZES(SUP_IOCTL_GIP_MAP);
1457
1458 /* execute */
1459 pReq->Hdr.rc = SUPR0GipMap(pSession, &pReq->u.Out.pGipR3, &pReq->u.Out.HCPhysGip);
1460 if (RT_SUCCESS(pReq->Hdr.rc))
1461 pReq->u.Out.pGipR0 = pDevExt->pGip;
1462 return 0;
1463 }
1464
1465 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_GIP_UNMAP):
1466 {
1467 /* validate */
1468 PSUPGIPUNMAP pReq = (PSUPGIPUNMAP)pReqHdr;
1469 REQ_CHECK_SIZES(SUP_IOCTL_GIP_UNMAP);
1470
1471 /* execute */
1472 pReq->Hdr.rc = SUPR0GipUnmap(pSession);
1473 return 0;
1474 }
1475
1476 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SET_VM_FOR_FAST):
1477 {
1478 /* validate */
1479 PSUPSETVMFORFAST pReq = (PSUPSETVMFORFAST)pReqHdr;
1480 REQ_CHECK_SIZES(SUP_IOCTL_SET_VM_FOR_FAST);
1481 REQ_CHECK_EXPR_FMT( !pReq->u.In.pVMR0
1482 || ( VALID_PTR(pReq->u.In.pVMR0)
1483 && !((uintptr_t)pReq->u.In.pVMR0 & (PAGE_SIZE - 1))),
1484 ("SUP_IOCTL_SET_VM_FOR_FAST: pVMR0=%p!\n", pReq->u.In.pVMR0));
1485 /* execute */
1486 pSession->pVM = pReq->u.In.pVMR0;
1487 pReq->Hdr.rc = VINF_SUCCESS;
1488 return 0;
1489 }
1490
1491 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_ALLOC_EX):
1492 {
1493 /* validate */
1494 PSUPPAGEALLOCEX pReq = (PSUPPAGEALLOCEX)pReqHdr;
1495 REQ_CHECK_EXPR(SUP_IOCTL_PAGE_ALLOC_EX, pReq->Hdr.cbIn <= SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN);
1496 REQ_CHECK_SIZES_EX(SUP_IOCTL_PAGE_ALLOC_EX, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_IN, SUP_IOCTL_PAGE_ALLOC_EX_SIZE_OUT(pReq->u.In.cPages));
1497 REQ_CHECK_EXPR_FMT(pReq->u.In.fKernelMapping || pReq->u.In.fUserMapping,
1498 ("SUP_IOCTL_PAGE_ALLOC_EX: No mapping requested!\n"));
1499 REQ_CHECK_EXPR_FMT(pReq->u.In.fUserMapping,
1500 ("SUP_IOCTL_PAGE_ALLOC_EX: Must have user mapping!\n"));
1501 REQ_CHECK_EXPR_FMT(!pReq->u.In.fReserved0 && !pReq->u.In.fReserved1,
1502 ("SUP_IOCTL_PAGE_ALLOC_EX: fReserved0=%d fReserved1=%d\n", pReq->u.In.fReserved0, pReq->u.In.fReserved1));
1503
1504 /* execute */
1505 pReq->Hdr.rc = SUPR0PageAllocEx(pSession, pReq->u.In.cPages, 0 /* fFlags */,
1506 pReq->u.In.fUserMapping ? &pReq->u.Out.pvR3 : NULL,
1507 pReq->u.In.fKernelMapping ? &pReq->u.Out.pvR0 : NULL,
1508 &pReq->u.Out.aPages[0]);
1509 if (RT_FAILURE(pReq->Hdr.rc))
1510 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1511 return 0;
1512 }
1513
1514 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_MAP_KERNEL):
1515 {
1516 /* validate */
1517 PSUPPAGEMAPKERNEL pReq = (PSUPPAGEMAPKERNEL)pReqHdr;
1518 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_MAP_KERNEL);
1519 REQ_CHECK_EXPR_FMT(!pReq->u.In.fFlags, ("SUP_IOCTL_PAGE_MAP_KERNEL: fFlags=%#x! MBZ\n", pReq->u.In.fFlags));
1520 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_MAP_KERNEL: offSub=%#x\n", pReq->u.In.offSub));
1521 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1522 ("SUP_IOCTL_PAGE_MAP_KERNEL: cbSub=%#x\n", pReq->u.In.cbSub));
1523
1524 /* execute */
1525 pReq->Hdr.rc = SUPR0PageMapKernel(pSession, pReq->u.In.pvR3, pReq->u.In.offSub, pReq->u.In.cbSub,
1526 pReq->u.In.fFlags, &pReq->u.Out.pvR0);
1527 if (RT_FAILURE(pReq->Hdr.rc))
1528 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1529 return 0;
1530 }
1531
1532 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_PROTECT):
1533 {
1534 /* validate */
1535 PSUPPAGEPROTECT pReq = (PSUPPAGEPROTECT)pReqHdr;
1536 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_PROTECT);
1537 REQ_CHECK_EXPR_FMT(!(pReq->u.In.fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)),
1538 ("SUP_IOCTL_PAGE_PROTECT: fProt=%#x!\n", pReq->u.In.fProt));
1539 REQ_CHECK_EXPR_FMT(!(pReq->u.In.offSub & PAGE_OFFSET_MASK), ("SUP_IOCTL_PAGE_PROTECT: offSub=%#x\n", pReq->u.In.offSub));
1540 REQ_CHECK_EXPR_FMT(pReq->u.In.cbSub && !(pReq->u.In.cbSub & PAGE_OFFSET_MASK),
1541 ("SUP_IOCTL_PAGE_PROTECT: cbSub=%#x\n", pReq->u.In.cbSub));
1542
1543 /* execute */
1544 pReq->Hdr.rc = SUPR0PageProtect(pSession, pReq->u.In.pvR3, pReq->u.In.pvR0, pReq->u.In.offSub, pReq->u.In.cbSub, pReq->u.In.fProt);
1545 return 0;
1546 }
1547
1548 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_PAGE_FREE):
1549 {
1550 /* validate */
1551 PSUPPAGEFREE pReq = (PSUPPAGEFREE)pReqHdr;
1552 REQ_CHECK_SIZES(SUP_IOCTL_PAGE_FREE);
1553
1554 /* execute */
1555 pReq->Hdr.rc = SUPR0PageFree(pSession, pReq->u.In.pvR3);
1556 return 0;
1557 }
1558
1559 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_CALL_SERVICE(0)):
1560 {
1561 /* validate */
1562 PSUPCALLSERVICE pReq = (PSUPCALLSERVICE)pReqHdr;
1563 Log4(("SUP_IOCTL_CALL_SERVICE: op=%u in=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
1564 pReq->u.In.uOperation, pReq->Hdr.cbIn, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
1565
1566 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
1567 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(0), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(0));
1568 else
1569 {
1570 PSUPR0SERVICEREQHDR pSrvReq = (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0];
1571 REQ_CHECK_EXPR_FMT(pReq->Hdr.cbIn >= SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR)),
1572 ("SUP_IOCTL_CALL_SERVICE: cbIn=%#x < %#lx\n", pReq->Hdr.cbIn, SUP_IOCTL_CALL_SERVICE_SIZE(sizeof(SUPR0SERVICEREQHDR))));
1573 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, pSrvReq->u32Magic == SUPR0SERVICEREQHDR_MAGIC);
1574 REQ_CHECK_SIZES_EX(SUP_IOCTL_CALL_SERVICE, SUP_IOCTL_CALL_SERVICE_SIZE_IN(pSrvReq->cbReq), SUP_IOCTL_CALL_SERVICE_SIZE_OUT(pSrvReq->cbReq));
1575 }
1576 REQ_CHECK_EXPR(SUP_IOCTL_CALL_SERVICE, memchr(pReq->u.In.szName, '\0', sizeof(pReq->u.In.szName)));
1577
1578 /* execute */
1579 pReq->Hdr.rc = supdrvIOCtl_CallServiceModule(pDevExt, pSession, pReq);
1580 return 0;
1581 }
1582
1583 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_LOGGER_SETTINGS(0)):
1584 {
1585 /* validate */
1586 PSUPLOGGERSETTINGS pReq = (PSUPLOGGERSETTINGS)pReqHdr;
1587 size_t cbStrTab;
1588 REQ_CHECK_SIZE_OUT(SUP_IOCTL_LOGGER_SETTINGS, SUP_IOCTL_LOGGER_SETTINGS_SIZE_OUT);
1589 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->Hdr.cbIn >= SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(1));
1590 cbStrTab = pReq->Hdr.cbIn - SUP_IOCTL_LOGGER_SETTINGS_SIZE_IN(0);
1591 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offGroups < cbStrTab);
1592 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offFlags < cbStrTab);
1593 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.offDestination < cbStrTab);
1594 REQ_CHECK_EXPR_FMT(pReq->u.In.szStrings[cbStrTab - 1] == '\0',
1595 ("SUP_IOCTL_LOGGER_SETTINGS: cbIn=%#x cbStrTab=%#zx LastChar=%d\n",
1596 pReq->Hdr.cbIn, cbStrTab, pReq->u.In.szStrings[cbStrTab - 1]));
1597 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhich <= SUPLOGGERSETTINGS_WHICH_RELEASE);
1598 REQ_CHECK_EXPR(SUP_IOCTL_LOGGER_SETTINGS, pReq->u.In.fWhat <= SUPLOGGERSETTINGS_WHAT_DESTROY);
1599
1600 /* execute */
1601 pReq->Hdr.rc = supdrvIOCtl_LoggerSettings(pDevExt, pSession, pReq);
1602 return 0;
1603 }
1604
1605 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_CREATE):
1606 {
1607 /* validate */
1608 PSUPSEMCREATE pReq = (PSUPSEMCREATE)pReqHdr;
1609 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_CREATE, SUP_IOCTL_SEM_CREATE_SIZE_IN, SUP_IOCTL_SEM_CREATE_SIZE_OUT);
1610
1611 /* execute */
1612 switch (pReq->u.In.uType)
1613 {
1614 case SUP_SEM_TYPE_EVENT:
1615 {
1616 SUPSEMEVENT hEvent;
1617 pReq->Hdr.rc = SUPSemEventCreate(pSession, &hEvent);
1618 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEvent;
1619 break;
1620 }
1621
1622 case SUP_SEM_TYPE_EVENT_MULTI:
1623 {
1624 SUPSEMEVENTMULTI hEventMulti;
1625 pReq->Hdr.rc = SUPSemEventMultiCreate(pSession, &hEventMulti);
1626 pReq->u.Out.hSem = (uint32_t)(uintptr_t)hEventMulti;
1627 break;
1628 }
1629
1630 default:
1631 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
1632 break;
1633 }
1634 return 0;
1635 }
1636
1637 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_SEM_OP):
1638 {
1639 /* validate */
1640 PSUPSEMOP pReq = (PSUPSEMOP)pReqHdr;
1641 REQ_CHECK_SIZES_EX(SUP_IOCTL_SEM_OP, SUP_IOCTL_SEM_OP_SIZE_IN, SUP_IOCTL_SEM_OP_SIZE_OUT);
1642
1643 /* execute */
1644 switch (pReq->u.In.uType)
1645 {
1646 case SUP_SEM_TYPE_EVENT:
1647 {
1648 SUPSEMEVENT hEvent = (SUPSEMEVENT)(uintptr_t)pReq->u.In.hSem;
1649 switch (pReq->u.In.uOp)
1650 {
1651 case SUPSEMOP_WAIT:
1652 pReq->Hdr.rc = SUPSemEventWaitNoResume(pSession, hEvent, pReq->u.In.cMillies);
1653 break;
1654 case SUPSEMOP_SIGNAL:
1655 pReq->Hdr.rc = SUPSemEventSignal(pSession, hEvent);
1656 break;
1657 case SUPSEMOP_CLOSE:
1658 pReq->Hdr.rc = SUPSemEventClose(pSession, hEvent);
1659 break;
1660 case SUPSEMOP_RESET:
1661 default:
1662 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
1663 break;
1664 }
1665 break;
1666 }
1667
1668 case SUP_SEM_TYPE_EVENT_MULTI:
1669 {
1670 SUPSEMEVENTMULTI hEventMulti = (SUPSEMEVENTMULTI)(uintptr_t)pReq->u.In.hSem;
1671 switch (pReq->u.In.uOp)
1672 {
1673 case SUPSEMOP_WAIT:
1674 pReq->Hdr.rc = SUPSemEventMultiWaitNoResume(pSession, hEventMulti, pReq->u.In.cMillies);
1675 break;
1676 case SUPSEMOP_SIGNAL:
1677 pReq->Hdr.rc = SUPSemEventMultiSignal(pSession, hEventMulti);
1678 break;
1679 case SUPSEMOP_CLOSE:
1680 pReq->Hdr.rc = SUPSemEventMultiClose(pSession, hEventMulti);
1681 break;
1682 case SUPSEMOP_RESET:
1683 pReq->Hdr.rc = SUPSemEventMultiReset(pSession, hEventMulti);
1684 break;
1685 default:
1686 pReq->Hdr.rc = VERR_INVALID_FUNCTION;
1687 break;
1688 }
1689 break;
1690 }
1691
1692 default:
1693 pReq->Hdr.rc = VERR_INVALID_PARAMETER;
1694 break;
1695 }
1696 return 0;
1697 }
1698
1699 case SUP_CTL_CODE_NO_SIZE(SUP_IOCTL_VT_CAPS):
1700 {
1701 /* validate */
1702 PSUPVTCAPS pReq = (PSUPVTCAPS)pReqHdr;
1703 REQ_CHECK_SIZES(SUP_IOCTL_VT_CAPS);
1704 REQ_CHECK_EXPR(SUP_IOCTL_VT_CAPS, pReq->Hdr.cbIn <= SUP_IOCTL_VT_CAPS_SIZE_IN);
1705
1706 /* execute */
1707 pReq->Hdr.rc = SUPR0QueryVTCaps(pSession, &pReq->u.Out.Caps);
1708 if (RT_FAILURE(pReq->Hdr.rc))
1709 pReq->Hdr.cbOut = sizeof(pReq->Hdr);
1710 return 0;
1711 }
1712
1713 default:
1714 Log(("Unknown IOCTL %#lx\n", (long)uIOCtl));
1715 break;
1716 }
1717 return SUPDRV_ERR_GENERAL_FAILURE;
1718}
1719
1720
1721/**
1722 * Inter-Driver Communcation (IDC) worker.
1723 *
1724 * @returns VBox status code.
1725 * @retval VINF_SUCCESS on success.
1726 * @retval VERR_INVALID_PARAMETER if the request is invalid.
1727 * @retval VERR_NOT_SUPPORTED if the request isn't supported.
1728 *
1729 * @param uReq The request (function) code.
1730 * @param pDevExt Device extention.
1731 * @param pSession Session data.
1732 * @param pReqHdr The request header.
1733 */
1734int VBOXCALL supdrvIDC(uintptr_t uReq, PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQHDR pReqHdr)
1735{
1736 /*
1737 * The OS specific code has already validated the pSession
1738 * pointer, and the request size being greater or equal to
1739 * size of the header.
1740 *
1741 * So, just check that pSession is a kernel context session.
1742 */
1743 if (RT_UNLIKELY( pSession
1744 && pSession->R0Process != NIL_RTR0PROCESS))
1745 return VERR_INVALID_PARAMETER;
1746
1747/*
1748 * Validation macro.
1749 */
1750#define REQ_CHECK_IDC_SIZE(Name, cbExpect) \
1751 do { \
1752 if (RT_UNLIKELY(pReqHdr->cb != (cbExpect))) \
1753 { \
1754 OSDBGPRINT(( #Name ": Invalid input/output sizes. cb=%ld expected %ld.\n", \
1755 (long)pReqHdr->cb, (long)(cbExpect))); \
1756 return pReqHdr->rc = VERR_INVALID_PARAMETER; \
1757 } \
1758 } while (0)
1759
1760 switch (uReq)
1761 {
1762 case SUPDRV_IDC_REQ_CONNECT:
1763 {
1764 PSUPDRVIDCREQCONNECT pReq = (PSUPDRVIDCREQCONNECT)pReqHdr;
1765 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_CONNECT, sizeof(*pReq));
1766
1767 /*
1768 * Validate the cookie and other input.
1769 */
1770 if (pReq->Hdr.pSession != NULL)
1771 {
1772 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: pSession=%p expected NULL!\n", pReq->Hdr.pSession));
1773 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1774 }
1775 if (pReq->u.In.u32MagicCookie != SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE)
1776 {
1777 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: u32MagicCookie=%#x expected %#x!\n",
1778 (unsigned)pReq->u.In.u32MagicCookie, (unsigned)SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE));
1779 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1780 }
1781 if ( pReq->u.In.uMinVersion > pReq->u.In.uReqVersion
1782 || (pReq->u.In.uMinVersion & UINT32_C(0xffff0000)) != (pReq->u.In.uReqVersion & UINT32_C(0xffff0000)))
1783 {
1784 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: uMinVersion=%#x uMaxVersion=%#x doesn't match!\n",
1785 pReq->u.In.uMinVersion, pReq->u.In.uReqVersion));
1786 return pReqHdr->rc = VERR_INVALID_PARAMETER;
1787 }
1788
1789 /*
1790 * Match the version.
1791 * The current logic is very simple, match the major interface version.
1792 */
1793 if ( pReq->u.In.uMinVersion > SUPDRV_IDC_VERSION
1794 || (pReq->u.In.uMinVersion & 0xffff0000) != (SUPDRV_IDC_VERSION & 0xffff0000))
1795 {
1796 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: Version mismatch. Requested: %#x Min: %#x Current: %#x\n",
1797 pReq->u.In.uReqVersion, pReq->u.In.uMinVersion, (unsigned)SUPDRV_IDC_VERSION));
1798 pReq->u.Out.pSession = NULL;
1799 pReq->u.Out.uSessionVersion = 0xffffffff;
1800 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1801 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1802 pReq->Hdr.rc = VERR_VERSION_MISMATCH;
1803 return VINF_SUCCESS;
1804 }
1805
1806 pReq->u.Out.pSession = NULL;
1807 pReq->u.Out.uSessionVersion = SUPDRV_IDC_VERSION;
1808 pReq->u.Out.uDriverVersion = SUPDRV_IDC_VERSION;
1809 pReq->u.Out.uDriverRevision = VBOX_SVN_REV;
1810
1811 /*
1812 * On NT we will already have a session associated with the
1813 * client, just like with the SUP_IOCTL_COOKIE request, while
1814 * the other doesn't.
1815 */
1816#ifdef RT_OS_WINDOWS
1817 pReq->Hdr.rc = VINF_SUCCESS;
1818#else
1819 AssertReturn(!pSession, VERR_INTERNAL_ERROR);
1820 pReq->Hdr.rc = supdrvCreateSession(pDevExt, false /* fUser */, &pSession);
1821 if (RT_FAILURE(pReq->Hdr.rc))
1822 {
1823 OSDBGPRINT(("SUPDRV_IDC_REQ_CONNECT: failed to create session, rc=%d\n", pReq->Hdr.rc));
1824 return VINF_SUCCESS;
1825 }
1826#endif
1827
1828 pReq->u.Out.pSession = pSession;
1829 pReq->Hdr.pSession = pSession;
1830
1831 return VINF_SUCCESS;
1832 }
1833
1834 case SUPDRV_IDC_REQ_DISCONNECT:
1835 {
1836 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_DISCONNECT, sizeof(*pReqHdr));
1837
1838#ifdef RT_OS_WINDOWS
1839 /* Windows will destroy the session when the file object is destroyed. */
1840#else
1841 supdrvCloseSession(pDevExt, pSession);
1842#endif
1843 return pReqHdr->rc = VINF_SUCCESS;
1844 }
1845
1846 case SUPDRV_IDC_REQ_GET_SYMBOL:
1847 {
1848 PSUPDRVIDCREQGETSYM pReq = (PSUPDRVIDCREQGETSYM)pReqHdr;
1849 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_GET_SYMBOL, sizeof(*pReq));
1850
1851 pReq->Hdr.rc = supdrvIDC_LdrGetSymbol(pDevExt, pSession, pReq);
1852 return VINF_SUCCESS;
1853 }
1854
1855 case SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY:
1856 {
1857 PSUPDRVIDCREQCOMPREGFACTORY pReq = (PSUPDRVIDCREQCOMPREGFACTORY)pReqHdr;
1858 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_REGISTER_FACTORY, sizeof(*pReq));
1859
1860 pReq->Hdr.rc = SUPR0ComponentRegisterFactory(pSession, pReq->u.In.pFactory);
1861 return VINF_SUCCESS;
1862 }
1863
1864 case SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY:
1865 {
1866 PSUPDRVIDCREQCOMPDEREGFACTORY pReq = (PSUPDRVIDCREQCOMPDEREGFACTORY)pReqHdr;
1867 REQ_CHECK_IDC_SIZE(SUPDRV_IDC_REQ_COMPONENT_DEREGISTER_FACTORY, sizeof(*pReq));
1868
1869 pReq->Hdr.rc = SUPR0ComponentDeregisterFactory(pSession, pReq->u.In.pFactory);
1870 return VINF_SUCCESS;
1871 }
1872
1873 default:
1874 Log(("Unknown IDC %#lx\n", (long)uReq));
1875 break;
1876 }
1877
1878#undef REQ_CHECK_IDC_SIZE
1879 return VERR_NOT_SUPPORTED;
1880}
1881
1882
1883/**
1884 * Register a object for reference counting.
1885 * The object is registered with one reference in the specified session.
1886 *
1887 * @returns Unique identifier on success (pointer).
1888 * All future reference must use this identifier.
1889 * @returns NULL on failure.
1890 * @param pfnDestructor The destructore function which will be called when the reference count reaches 0.
1891 * @param pvUser1 The first user argument.
1892 * @param pvUser2 The second user argument.
1893 */
1894SUPR0DECL(void *) SUPR0ObjRegister(PSUPDRVSESSION pSession, SUPDRVOBJTYPE enmType, PFNSUPDRVDESTRUCTOR pfnDestructor, void *pvUser1, void *pvUser2)
1895{
1896 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
1897 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
1898 PSUPDRVOBJ pObj;
1899 PSUPDRVUSAGE pUsage;
1900
1901 /*
1902 * Validate the input.
1903 */
1904 AssertReturn(SUP_IS_SESSION_VALID(pSession), NULL);
1905 AssertReturn(enmType > SUPDRVOBJTYPE_INVALID && enmType < SUPDRVOBJTYPE_END, NULL);
1906 AssertPtrReturn(pfnDestructor, NULL);
1907
1908 /*
1909 * Allocate and initialize the object.
1910 */
1911 pObj = (PSUPDRVOBJ)RTMemAlloc(sizeof(*pObj));
1912 if (!pObj)
1913 return NULL;
1914 pObj->u32Magic = SUPDRVOBJ_MAGIC;
1915 pObj->enmType = enmType;
1916 pObj->pNext = NULL;
1917 pObj->cUsage = 1;
1918 pObj->pfnDestructor = pfnDestructor;
1919 pObj->pvUser1 = pvUser1;
1920 pObj->pvUser2 = pvUser2;
1921 pObj->CreatorUid = pSession->Uid;
1922 pObj->CreatorGid = pSession->Gid;
1923 pObj->CreatorProcess= pSession->Process;
1924 supdrvOSObjInitCreator(pObj, pSession);
1925
1926 /*
1927 * Allocate the usage record.
1928 * (We keep freed usage records around to simplify SUPR0ObjAddRefEx().)
1929 */
1930 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1931
1932 pUsage = pDevExt->pUsageFree;
1933 if (pUsage)
1934 pDevExt->pUsageFree = pUsage->pNext;
1935 else
1936 {
1937 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1938 pUsage = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsage));
1939 if (!pUsage)
1940 {
1941 RTMemFree(pObj);
1942 return NULL;
1943 }
1944 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
1945 }
1946
1947 /*
1948 * Insert the object and create the session usage record.
1949 */
1950 /* The object. */
1951 pObj->pNext = pDevExt->pObjs;
1952 pDevExt->pObjs = pObj;
1953
1954 /* The session record. */
1955 pUsage->cUsage = 1;
1956 pUsage->pObj = pObj;
1957 pUsage->pNext = pSession->pUsage;
1958 /* Log2(("SUPR0ObjRegister: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext)); */
1959 pSession->pUsage = pUsage;
1960
1961 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
1962
1963 Log(("SUPR0ObjRegister: returns %p (pvUser1=%p, pvUser=%p)\n", pObj, pvUser1, pvUser2));
1964 return pObj;
1965}
1966
1967
1968/**
1969 * Increment the reference counter for the object associating the reference
1970 * with the specified session.
1971 *
1972 * @returns IPRT status code.
1973 * @param pvObj The identifier returned by SUPR0ObjRegister().
1974 * @param pSession The session which is referencing the object.
1975 *
1976 * @remarks The caller should not own any spinlocks and must carefully protect
1977 * itself against potential race with the destructor so freed memory
1978 * isn't accessed here.
1979 */
1980SUPR0DECL(int) SUPR0ObjAddRef(void *pvObj, PSUPDRVSESSION pSession)
1981{
1982 return SUPR0ObjAddRefEx(pvObj, pSession, false /* fNoBlocking */);
1983}
1984
1985
1986/**
1987 * Increment the reference counter for the object associating the reference
1988 * with the specified session.
1989 *
1990 * @returns IPRT status code.
1991 * @retval VERR_TRY_AGAIN if fNoBlocking was set and a new usage record
1992 * couldn't be allocated. (If you see this you're not doing the right
1993 * thing and it won't ever work reliably.)
1994 *
1995 * @param pvObj The identifier returned by SUPR0ObjRegister().
1996 * @param pSession The session which is referencing the object.
1997 * @param fNoBlocking Set if it's not OK to block. Never try to make the
1998 * first reference to an object in a session with this
1999 * argument set.
2000 *
2001 * @remarks The caller should not own any spinlocks and must carefully protect
2002 * itself against potential race with the destructor so freed memory
2003 * isn't accessed here.
2004 */
2005SUPR0DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking)
2006{
2007 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2008 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2009 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2010 int rc = VINF_SUCCESS;
2011 PSUPDRVUSAGE pUsagePre;
2012 PSUPDRVUSAGE pUsage;
2013
2014 /*
2015 * Validate the input.
2016 * Be ready for the destruction race (someone might be stuck in the
2017 * destructor waiting a lock we own).
2018 */
2019 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2020 AssertPtrReturn(pObj, VERR_INVALID_POINTER);
2021 AssertMsgReturn(pObj->u32Magic == SUPDRVOBJ_MAGIC || pObj->u32Magic == SUPDRVOBJ_MAGIC_DEAD,
2022 ("Invalid pvObj=%p magic=%#x (expected %#x or %#x)\n", pvObj, pObj->u32Magic, SUPDRVOBJ_MAGIC, SUPDRVOBJ_MAGIC_DEAD),
2023 VERR_INVALID_PARAMETER);
2024
2025 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
2026
2027 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2028 {
2029 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2030
2031 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2032 return VERR_WRONG_ORDER;
2033 }
2034
2035 /*
2036 * Preallocate the usage record if we can.
2037 */
2038 pUsagePre = pDevExt->pUsageFree;
2039 if (pUsagePre)
2040 pDevExt->pUsageFree = pUsagePre->pNext;
2041 else if (!fNoBlocking)
2042 {
2043 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2044 pUsagePre = (PSUPDRVUSAGE)RTMemAlloc(sizeof(*pUsagePre));
2045 if (!pUsagePre)
2046 return VERR_NO_MEMORY;
2047
2048 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
2049 if (RT_UNLIKELY(pObj->u32Magic != SUPDRVOBJ_MAGIC))
2050 {
2051 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2052
2053 AssertMsgFailed(("pvObj=%p magic=%#x\n", pvObj, pObj->u32Magic));
2054 return VERR_WRONG_ORDER;
2055 }
2056 }
2057
2058 /*
2059 * Reference the object.
2060 */
2061 pObj->cUsage++;
2062
2063 /*
2064 * Look for the session record.
2065 */
2066 for (pUsage = pSession->pUsage; pUsage; pUsage = pUsage->pNext)
2067 {
2068 /*Log(("SUPR0AddRef: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
2069 if (pUsage->pObj == pObj)
2070 break;
2071 }
2072 if (pUsage)
2073 pUsage->cUsage++;
2074 else if (pUsagePre)
2075 {
2076 /* create a new session record. */
2077 pUsagePre->cUsage = 1;
2078 pUsagePre->pObj = pObj;
2079 pUsagePre->pNext = pSession->pUsage;
2080 pSession->pUsage = pUsagePre;
2081 /*Log(("SUPR0AddRef: pUsagePre=%p:{.pObj=%p, .pNext=%p}\n", pUsagePre, pUsagePre->pObj, pUsagePre->pNext));*/
2082
2083 pUsagePre = NULL;
2084 }
2085 else
2086 {
2087 pObj->cUsage--;
2088 rc = VERR_TRY_AGAIN;
2089 }
2090
2091 /*
2092 * Put any unused usage record into the free list..
2093 */
2094 if (pUsagePre)
2095 {
2096 pUsagePre->pNext = pDevExt->pUsageFree;
2097 pDevExt->pUsageFree = pUsagePre;
2098 }
2099
2100 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2101
2102 return rc;
2103}
2104
2105
2106/**
2107 * Decrement / destroy a reference counter record for an object.
2108 *
2109 * The object is uniquely identified by pfnDestructor+pvUser1+pvUser2.
2110 *
2111 * @returns IPRT status code.
2112 * @retval VINF_SUCCESS if not destroyed.
2113 * @retval VINF_OBJECT_DESTROYED if it's destroyed by this release call.
2114 * @retval VERR_INVALID_PARAMETER if the object isn't valid. Will assert in
2115 * string builds.
2116 *
2117 * @param pvObj The identifier returned by SUPR0ObjRegister().
2118 * @param pSession The session which is referencing the object.
2119 */
2120SUPR0DECL(int) SUPR0ObjRelease(void *pvObj, PSUPDRVSESSION pSession)
2121{
2122 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2123 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2124 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2125 int rc = VERR_INVALID_PARAMETER;
2126 PSUPDRVUSAGE pUsage;
2127 PSUPDRVUSAGE pUsagePrev;
2128
2129 /*
2130 * Validate the input.
2131 */
2132 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2133 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
2134 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2135 VERR_INVALID_PARAMETER);
2136
2137 /*
2138 * Acquire the spinlock and look for the usage record.
2139 */
2140 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
2141
2142 for (pUsagePrev = NULL, pUsage = pSession->pUsage;
2143 pUsage;
2144 pUsagePrev = pUsage, pUsage = pUsage->pNext)
2145 {
2146 /*Log2(("SUPR0ObjRelease: pUsage=%p:{.pObj=%p, .pNext=%p}\n", pUsage, pUsage->pObj, pUsage->pNext));*/
2147 if (pUsage->pObj == pObj)
2148 {
2149 rc = VINF_SUCCESS;
2150 AssertMsg(pUsage->cUsage >= 1 && pObj->cUsage >= pUsage->cUsage, ("glob %d; sess %d\n", pObj->cUsage, pUsage->cUsage));
2151 if (pUsage->cUsage > 1)
2152 {
2153 pObj->cUsage--;
2154 pUsage->cUsage--;
2155 }
2156 else
2157 {
2158 /*
2159 * Free the session record.
2160 */
2161 if (pUsagePrev)
2162 pUsagePrev->pNext = pUsage->pNext;
2163 else
2164 pSession->pUsage = pUsage->pNext;
2165 pUsage->pNext = pDevExt->pUsageFree;
2166 pDevExt->pUsageFree = pUsage;
2167
2168 /* What about the object? */
2169 if (pObj->cUsage > 1)
2170 pObj->cUsage--;
2171 else
2172 {
2173 /*
2174 * Object is to be destroyed, unlink it.
2175 */
2176 pObj->u32Magic = SUPDRVOBJ_MAGIC_DEAD;
2177 rc = VINF_OBJECT_DESTROYED;
2178 if (pDevExt->pObjs == pObj)
2179 pDevExt->pObjs = pObj->pNext;
2180 else
2181 {
2182 PSUPDRVOBJ pObjPrev;
2183 for (pObjPrev = pDevExt->pObjs; pObjPrev; pObjPrev = pObjPrev->pNext)
2184 if (pObjPrev->pNext == pObj)
2185 {
2186 pObjPrev->pNext = pObj->pNext;
2187 break;
2188 }
2189 Assert(pObjPrev);
2190 }
2191 }
2192 }
2193 break;
2194 }
2195 }
2196
2197 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
2198
2199 /*
2200 * Call the destructor and free the object if required.
2201 */
2202 if (rc == VINF_OBJECT_DESTROYED)
2203 {
2204 Log(("SUPR0ObjRelease: destroying %p/%d (%p/%p) cpid=%RTproc pid=%RTproc dtor=%p\n",
2205 pObj, pObj->enmType, pObj->pvUser1, pObj->pvUser2, pObj->CreatorProcess, RTProcSelf(), pObj->pfnDestructor));
2206 if (pObj->pfnDestructor)
2207#ifdef RT_WITH_W64_UNWIND_HACK
2208 supdrvNtWrapObjDestructor((PFNRT)pObj->pfnDestructor, pObj, pObj->pvUser1, pObj->pvUser2);
2209#else
2210 pObj->pfnDestructor(pObj, pObj->pvUser1, pObj->pvUser2);
2211#endif
2212 RTMemFree(pObj);
2213 }
2214
2215 AssertMsg(pUsage, ("pvObj=%p\n", pvObj));
2216 return rc;
2217}
2218
2219
2220/**
2221 * Verifies that the current process can access the specified object.
2222 *
2223 * @returns The following IPRT status code:
2224 * @retval VINF_SUCCESS if access was granted.
2225 * @retval VERR_PERMISSION_DENIED if denied access.
2226 * @retval VERR_INVALID_PARAMETER if invalid parameter.
2227 *
2228 * @param pvObj The identifier returned by SUPR0ObjRegister().
2229 * @param pSession The session which wishes to access the object.
2230 * @param pszObjName Object string name. This is optional and depends on the object type.
2231 *
2232 * @remark The caller is responsible for making sure the object isn't removed while
2233 * we're inside this function. If uncertain about this, just call AddRef before calling us.
2234 */
2235SUPR0DECL(int) SUPR0ObjVerifyAccess(void *pvObj, PSUPDRVSESSION pSession, const char *pszObjName)
2236{
2237 PSUPDRVOBJ pObj = (PSUPDRVOBJ)pvObj;
2238 int rc;
2239
2240 /*
2241 * Validate the input.
2242 */
2243 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2244 AssertMsgReturn(VALID_PTR(pObj) && pObj->u32Magic == SUPDRVOBJ_MAGIC,
2245 ("Invalid pvObj=%p magic=%#x (exepcted %#x)\n", pvObj, pObj ? pObj->u32Magic : 0, SUPDRVOBJ_MAGIC),
2246 VERR_INVALID_PARAMETER);
2247
2248 /*
2249 * Check access. (returns true if a decision has been made.)
2250 */
2251 rc = VERR_INTERNAL_ERROR;
2252 if (supdrvOSObjCanAccess(pObj, pSession, pszObjName, &rc))
2253 return rc;
2254
2255 /*
2256 * Default policy is to allow the user to access his own
2257 * stuff but nothing else.
2258 */
2259 if (pObj->CreatorUid == pSession->Uid)
2260 return VINF_SUCCESS;
2261 return VERR_PERMISSION_DENIED;
2262}
2263
2264
2265/**
2266 * Lock pages.
2267 *
2268 * @returns IPRT status code.
2269 * @param pSession Session to which the locked memory should be associated.
2270 * @param pvR3 Start of the memory range to lock.
2271 * This must be page aligned.
2272 * @param cPages Number of pages to lock.
2273 * @param paPages Where to put the physical addresses of locked memory.
2274 */
2275SUPR0DECL(int) SUPR0LockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t cPages, PRTHCPHYS paPages)
2276{
2277 int rc;
2278 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2279 const size_t cb = (size_t)cPages << PAGE_SHIFT;
2280 LogFlow(("SUPR0LockMem: pSession=%p pvR3=%p cPages=%d paPages=%p\n", pSession, (void *)pvR3, cPages, paPages));
2281
2282 /*
2283 * Verify input.
2284 */
2285 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2286 AssertPtrReturn(paPages, VERR_INVALID_PARAMETER);
2287 if ( RT_ALIGN_R3PT(pvR3, PAGE_SIZE, RTR3PTR) != pvR3
2288 || !pvR3)
2289 {
2290 Log(("pvR3 (%p) must be page aligned and not NULL!\n", (void *)pvR3));
2291 return VERR_INVALID_PARAMETER;
2292 }
2293
2294 /*
2295 * Let IPRT do the job.
2296 */
2297 Mem.eType = MEMREF_TYPE_LOCKED;
2298 rc = RTR0MemObjLockUser(&Mem.MemObj, pvR3, cb, RTMEM_PROT_READ | RTMEM_PROT_WRITE, RTR0ProcHandleSelf());
2299 if (RT_SUCCESS(rc))
2300 {
2301 uint32_t iPage = cPages;
2302 AssertMsg(RTR0MemObjAddressR3(Mem.MemObj) == pvR3, ("%p == %p\n", RTR0MemObjAddressR3(Mem.MemObj), pvR3));
2303 AssertMsg(RTR0MemObjSize(Mem.MemObj) == cb, ("%x == %x\n", RTR0MemObjSize(Mem.MemObj), cb));
2304
2305 while (iPage-- > 0)
2306 {
2307 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2308 if (RT_UNLIKELY(paPages[iPage] == NIL_RTCCPHYS))
2309 {
2310 AssertMsgFailed(("iPage=%d\n", iPage));
2311 rc = VERR_INTERNAL_ERROR;
2312 break;
2313 }
2314 }
2315 if (RT_SUCCESS(rc))
2316 rc = supdrvMemAdd(&Mem, pSession);
2317 if (RT_FAILURE(rc))
2318 {
2319 int rc2 = RTR0MemObjFree(Mem.MemObj, false);
2320 AssertRC(rc2);
2321 }
2322 }
2323
2324 return rc;
2325}
2326
2327
2328/**
2329 * Unlocks the memory pointed to by pv.
2330 *
2331 * @returns IPRT status code.
2332 * @param pSession Session to which the memory was locked.
2333 * @param pvR3 Memory to unlock.
2334 */
2335SUPR0DECL(int) SUPR0UnlockMem(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2336{
2337 LogFlow(("SUPR0UnlockMem: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2338 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2339 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_LOCKED);
2340}
2341
2342
2343/**
2344 * Allocates a chunk of page aligned memory with contiguous and fixed physical
2345 * backing.
2346 *
2347 * @returns IPRT status code.
2348 * @param pSession Session data.
2349 * @param cPages Number of pages to allocate.
2350 * @param ppvR0 Where to put the address of Ring-0 mapping the allocated memory.
2351 * @param ppvR3 Where to put the address of Ring-3 mapping the allocated memory.
2352 * @param pHCPhys Where to put the physical address of allocated memory.
2353 */
2354SUPR0DECL(int) SUPR0ContAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS pHCPhys)
2355{
2356 int rc;
2357 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2358 LogFlow(("SUPR0ContAlloc: pSession=%p cPages=%d ppvR0=%p ppvR3=%p pHCPhys=%p\n", pSession, cPages, ppvR0, ppvR3, pHCPhys));
2359
2360 /*
2361 * Validate input.
2362 */
2363 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2364 if (!ppvR3 || !ppvR0 || !pHCPhys)
2365 {
2366 Log(("Null pointer. All of these should be set: pSession=%p ppvR0=%p ppvR3=%p pHCPhys=%p\n",
2367 pSession, ppvR0, ppvR3, pHCPhys));
2368 return VERR_INVALID_PARAMETER;
2369
2370 }
2371 if (cPages < 1 || cPages >= 256)
2372 {
2373 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2374 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2375 }
2376
2377 /*
2378 * Let IPRT do the job.
2379 */
2380 rc = RTR0MemObjAllocCont(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable R0 mapping */);
2381 if (RT_SUCCESS(rc))
2382 {
2383 int rc2;
2384 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2385 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2386 if (RT_SUCCESS(rc))
2387 {
2388 Mem.eType = MEMREF_TYPE_CONT;
2389 rc = supdrvMemAdd(&Mem, pSession);
2390 if (!rc)
2391 {
2392 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2393 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2394 *pHCPhys = RTR0MemObjGetPagePhysAddr(Mem.MemObj, 0);
2395 return 0;
2396 }
2397
2398 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2399 AssertRC(rc2);
2400 }
2401 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2402 AssertRC(rc2);
2403 }
2404
2405 return rc;
2406}
2407
2408
2409/**
2410 * Frees memory allocated using SUPR0ContAlloc().
2411 *
2412 * @returns IPRT status code.
2413 * @param pSession The session to which the memory was allocated.
2414 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2415 */
2416SUPR0DECL(int) SUPR0ContFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2417{
2418 LogFlow(("SUPR0ContFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2419 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2420 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_CONT);
2421}
2422
2423
2424/**
2425 * Allocates a chunk of page aligned memory with fixed physical backing below 4GB.
2426 *
2427 * The memory isn't zeroed.
2428 *
2429 * @returns IPRT status code.
2430 * @param pSession Session data.
2431 * @param cPages Number of pages to allocate.
2432 * @param ppvR0 Where to put the address of Ring-0 mapping of the allocated memory.
2433 * @param ppvR3 Where to put the address of Ring-3 mapping of the allocated memory.
2434 * @param paPages Where to put the physical addresses of allocated memory.
2435 */
2436SUPR0DECL(int) SUPR0LowAlloc(PSUPDRVSESSION pSession, uint32_t cPages, PRTR0PTR ppvR0, PRTR3PTR ppvR3, PRTHCPHYS paPages)
2437{
2438 unsigned iPage;
2439 int rc;
2440 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2441 LogFlow(("SUPR0LowAlloc: pSession=%p cPages=%d ppvR3=%p ppvR0=%p paPages=%p\n", pSession, cPages, ppvR3, ppvR0, paPages));
2442
2443 /*
2444 * Validate input.
2445 */
2446 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2447 if (!ppvR3 || !ppvR0 || !paPages)
2448 {
2449 Log(("Null pointer. All of these should be set: pSession=%p ppvR3=%p ppvR0=%p paPages=%p\n",
2450 pSession, ppvR3, ppvR0, paPages));
2451 return VERR_INVALID_PARAMETER;
2452
2453 }
2454 if (cPages < 1 || cPages >= 256)
2455 {
2456 Log(("Illegal request cPages=%d, must be greater than 0 and smaller than 256.\n", cPages));
2457 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2458 }
2459
2460 /*
2461 * Let IPRT do the work.
2462 */
2463 rc = RTR0MemObjAllocLow(&Mem.MemObj, cPages << PAGE_SHIFT, true /* executable ring-0 mapping */);
2464 if (RT_SUCCESS(rc))
2465 {
2466 int rc2;
2467 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2468 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2469 if (RT_SUCCESS(rc))
2470 {
2471 Mem.eType = MEMREF_TYPE_LOW;
2472 rc = supdrvMemAdd(&Mem, pSession);
2473 if (!rc)
2474 {
2475 for (iPage = 0; iPage < cPages; iPage++)
2476 {
2477 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MemObj, iPage);
2478 AssertMsg(!(paPages[iPage] & (PAGE_SIZE - 1)), ("iPage=%d Phys=%RHp\n", paPages[iPage]));
2479 }
2480 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2481 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2482 return 0;
2483 }
2484
2485 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2486 AssertRC(rc2);
2487 }
2488
2489 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2490 AssertRC(rc2);
2491 }
2492
2493 return rc;
2494}
2495
2496
2497/**
2498 * Frees memory allocated using SUPR0LowAlloc().
2499 *
2500 * @returns IPRT status code.
2501 * @param pSession The session to which the memory was allocated.
2502 * @param uPtr Pointer to the memory (ring-3 or ring-0).
2503 */
2504SUPR0DECL(int) SUPR0LowFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2505{
2506 LogFlow(("SUPR0LowFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2507 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2508 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_LOW);
2509}
2510
2511
2512
2513/**
2514 * Allocates a chunk of memory with both R0 and R3 mappings.
2515 * The memory is fixed and it's possible to query the physical addresses using SUPR0MemGetPhys().
2516 *
2517 * @returns IPRT status code.
2518 * @param pSession The session to associated the allocation with.
2519 * @param cb Number of bytes to allocate.
2520 * @param ppvR0 Where to store the address of the Ring-0 mapping.
2521 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2522 */
2523SUPR0DECL(int) SUPR0MemAlloc(PSUPDRVSESSION pSession, uint32_t cb, PRTR0PTR ppvR0, PRTR3PTR ppvR3)
2524{
2525 int rc;
2526 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2527 LogFlow(("SUPR0MemAlloc: pSession=%p cb=%d ppvR0=%p ppvR3=%p\n", pSession, cb, ppvR0, ppvR3));
2528
2529 /*
2530 * Validate input.
2531 */
2532 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2533 AssertPtrReturn(ppvR0, VERR_INVALID_POINTER);
2534 AssertPtrReturn(ppvR3, VERR_INVALID_POINTER);
2535 if (cb < 1 || cb >= _4M)
2536 {
2537 Log(("Illegal request cb=%u; must be greater than 0 and smaller than 4MB.\n", cb));
2538 return VERR_INVALID_PARAMETER;
2539 }
2540
2541 /*
2542 * Let IPRT do the work.
2543 */
2544 rc = RTR0MemObjAllocPage(&Mem.MemObj, cb, true /* executable ring-0 mapping */);
2545 if (RT_SUCCESS(rc))
2546 {
2547 int rc2;
2548 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2549 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2550 if (RT_SUCCESS(rc))
2551 {
2552 Mem.eType = MEMREF_TYPE_MEM;
2553 rc = supdrvMemAdd(&Mem, pSession);
2554 if (!rc)
2555 {
2556 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2557 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2558 return VINF_SUCCESS;
2559 }
2560
2561 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2562 AssertRC(rc2);
2563 }
2564
2565 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2566 AssertRC(rc2);
2567 }
2568
2569 return rc;
2570}
2571
2572
2573/**
2574 * Get the physical addresses of memory allocated using SUPR0MemAlloc().
2575 *
2576 * @returns IPRT status code.
2577 * @param pSession The session to which the memory was allocated.
2578 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2579 * @param paPages Where to store the physical addresses.
2580 */
2581SUPR0DECL(int) SUPR0MemGetPhys(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, PSUPPAGE paPages) /** @todo switch this bugger to RTHCPHYS */
2582{
2583 PSUPDRVBUNDLE pBundle;
2584 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2585 LogFlow(("SUPR0MemGetPhys: pSession=%p uPtr=%p paPages=%p\n", pSession, (void *)uPtr, paPages));
2586
2587 /*
2588 * Validate input.
2589 */
2590 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2591 AssertPtrReturn(paPages, VERR_INVALID_POINTER);
2592 AssertReturn(uPtr, VERR_INVALID_PARAMETER);
2593
2594 /*
2595 * Search for the address.
2596 */
2597 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2598 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2599 {
2600 if (pBundle->cUsed > 0)
2601 {
2602 unsigned i;
2603 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2604 {
2605 if ( pBundle->aMem[i].eType == MEMREF_TYPE_MEM
2606 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2607 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
2608 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2609 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr)
2610 )
2611 )
2612 {
2613 const size_t cPages = RTR0MemObjSize(pBundle->aMem[i].MemObj) >> PAGE_SHIFT;
2614 size_t iPage;
2615 for (iPage = 0; iPage < cPages; iPage++)
2616 {
2617 paPages[iPage].Phys = RTR0MemObjGetPagePhysAddr(pBundle->aMem[i].MemObj, iPage);
2618 paPages[iPage].uReserved = 0;
2619 }
2620 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2621 return VINF_SUCCESS;
2622 }
2623 }
2624 }
2625 }
2626 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2627 Log(("Failed to find %p!!!\n", (void *)uPtr));
2628 return VERR_INVALID_PARAMETER;
2629}
2630
2631
2632/**
2633 * Free memory allocated by SUPR0MemAlloc().
2634 *
2635 * @returns IPRT status code.
2636 * @param pSession The session owning the allocation.
2637 * @param uPtr The Ring-0 or Ring-3 address returned by SUPR0MemAlloc().
2638 */
2639SUPR0DECL(int) SUPR0MemFree(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr)
2640{
2641 LogFlow(("SUPR0MemFree: pSession=%p uPtr=%p\n", pSession, (void *)uPtr));
2642 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2643 return supdrvMemRelease(pSession, uPtr, MEMREF_TYPE_MEM);
2644}
2645
2646
2647/**
2648 * Allocates a chunk of memory with a kernel or/and a user mode mapping.
2649 *
2650 * The memory is fixed and it's possible to query the physical addresses using
2651 * SUPR0MemGetPhys().
2652 *
2653 * @returns IPRT status code.
2654 * @param pSession The session to associated the allocation with.
2655 * @param cPages The number of pages to allocate.
2656 * @param fFlags Flags, reserved for the future. Must be zero.
2657 * @param ppvR3 Where to store the address of the Ring-3 mapping.
2658 * NULL if no ring-3 mapping.
2659 * @param ppvR3 Where to store the address of the Ring-0 mapping.
2660 * NULL if no ring-0 mapping.
2661 * @param paPages Where to store the addresses of the pages. Optional.
2662 */
2663SUPR0DECL(int) SUPR0PageAllocEx(PSUPDRVSESSION pSession, uint32_t cPages, uint32_t fFlags, PRTR3PTR ppvR3, PRTR0PTR ppvR0, PRTHCPHYS paPages)
2664{
2665 int rc;
2666 SUPDRVMEMREF Mem = { NIL_RTR0MEMOBJ, NIL_RTR0MEMOBJ, MEMREF_TYPE_UNUSED };
2667 LogFlow(("SUPR0PageAlloc: pSession=%p cb=%d ppvR3=%p\n", pSession, cPages, ppvR3));
2668
2669 /*
2670 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2671 */
2672 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2673 AssertPtrNullReturn(ppvR3, VERR_INVALID_POINTER);
2674 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2675 AssertReturn(ppvR3 || ppvR0, VERR_INVALID_PARAMETER);
2676 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2677 if (cPages < 1 || cPages > VBOX_MAX_ALLOC_PAGE_COUNT)
2678 {
2679 Log(("SUPR0PageAlloc: Illegal request cb=%u; must be greater than 0 and smaller than 128MB.\n", cPages));
2680 return VERR_PAGE_COUNT_OUT_OF_RANGE;
2681 }
2682
2683 /*
2684 * Let IPRT do the work.
2685 */
2686 if (ppvR0)
2687 rc = RTR0MemObjAllocPage(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, true /* fExecutable */);
2688 else
2689 rc = RTR0MemObjAllocPhysNC(&Mem.MemObj, (size_t)cPages * PAGE_SIZE, NIL_RTHCPHYS);
2690 if (RT_SUCCESS(rc))
2691 {
2692 int rc2;
2693 if (ppvR3)
2694 rc = RTR0MemObjMapUser(&Mem.MapObjR3, Mem.MemObj, (RTR3PTR)-1, 0,
2695 RTMEM_PROT_EXEC | RTMEM_PROT_WRITE | RTMEM_PROT_READ, RTR0ProcHandleSelf());
2696 else
2697 Mem.MapObjR3 = NIL_RTR0MEMOBJ;
2698 if (RT_SUCCESS(rc))
2699 {
2700 Mem.eType = MEMREF_TYPE_PAGE;
2701 rc = supdrvMemAdd(&Mem, pSession);
2702 if (!rc)
2703 {
2704 if (ppvR3)
2705 *ppvR3 = RTR0MemObjAddressR3(Mem.MapObjR3);
2706 if (ppvR0)
2707 *ppvR0 = RTR0MemObjAddress(Mem.MemObj);
2708 if (paPages)
2709 {
2710 uint32_t iPage = cPages;
2711 while (iPage-- > 0)
2712 {
2713 paPages[iPage] = RTR0MemObjGetPagePhysAddr(Mem.MapObjR3, iPage);
2714 Assert(paPages[iPage] != NIL_RTHCPHYS);
2715 }
2716 }
2717 return VINF_SUCCESS;
2718 }
2719
2720 rc2 = RTR0MemObjFree(Mem.MapObjR3, false);
2721 AssertRC(rc2);
2722 }
2723
2724 rc2 = RTR0MemObjFree(Mem.MemObj, false);
2725 AssertRC(rc2);
2726 }
2727 return rc;
2728}
2729
2730
2731/**
2732 * Maps a chunk of memory previously allocated by SUPR0PageAllocEx into kernel
2733 * space.
2734 *
2735 * @returns IPRT status code.
2736 * @param pSession The session to associated the allocation with.
2737 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
2738 * @param offSub Where to start mapping. Must be page aligned.
2739 * @param cbSub How much to map. Must be page aligned.
2740 * @param fFlags Flags, MBZ.
2741 * @param ppvR0 Where to reutrn the address of the ring-0 mapping on
2742 * success.
2743 */
2744SUPR0DECL(int) SUPR0PageMapKernel(PSUPDRVSESSION pSession, RTR3PTR pvR3, uint32_t offSub, uint32_t cbSub,
2745 uint32_t fFlags, PRTR0PTR ppvR0)
2746{
2747 int rc;
2748 PSUPDRVBUNDLE pBundle;
2749 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2750 RTR0MEMOBJ hMemObj = NIL_RTR0MEMOBJ;
2751 LogFlow(("SUPR0PageMapKernel: pSession=%p pvR3=%p offSub=%#x cbSub=%#x\n", pSession, pvR3, offSub, cbSub));
2752
2753 /*
2754 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2755 */
2756 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2757 AssertPtrNullReturn(ppvR0, VERR_INVALID_POINTER);
2758 AssertReturn(!fFlags, VERR_INVALID_PARAMETER);
2759 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2760 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2761 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
2762
2763 /*
2764 * Find the memory object.
2765 */
2766 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2767 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2768 {
2769 if (pBundle->cUsed > 0)
2770 {
2771 unsigned i;
2772 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2773 {
2774 if ( ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2775 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2776 && pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2777 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3)
2778 || ( pBundle->aMem[i].eType == MEMREF_TYPE_LOCKED
2779 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2780 && pBundle->aMem[i].MapObjR3 == NIL_RTR0MEMOBJ
2781 && RTR0MemObjAddressR3(pBundle->aMem[i].MemObj) == pvR3))
2782 {
2783 hMemObj = pBundle->aMem[i].MemObj;
2784 break;
2785 }
2786 }
2787 }
2788 }
2789 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2790
2791 rc = VERR_INVALID_PARAMETER;
2792 if (hMemObj != NIL_RTR0MEMOBJ)
2793 {
2794 /*
2795 * Do some furter input validations before calling IPRT.
2796 * (Cleanup is done indirectly by telling RTR0MemObjFree to include mappings.)
2797 */
2798 size_t cbMemObj = RTR0MemObjSize(hMemObj);
2799 if ( offSub < cbMemObj
2800 && cbSub <= cbMemObj
2801 && offSub + cbSub <= cbMemObj)
2802 {
2803 RTR0MEMOBJ hMapObj;
2804 rc = RTR0MemObjMapKernelEx(&hMapObj, hMemObj, (void *)-1, 0,
2805 RTMEM_PROT_READ | RTMEM_PROT_WRITE, offSub, cbSub);
2806 if (RT_SUCCESS(rc))
2807 *ppvR0 = RTR0MemObjAddress(hMapObj);
2808 }
2809 else
2810 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
2811
2812 }
2813 return rc;
2814}
2815
2816
2817/**
2818 * Changes the page level protection of one or more pages previously allocated
2819 * by SUPR0PageAllocEx.
2820 *
2821 * @returns IPRT status code.
2822 * @param pSession The session to associated the allocation with.
2823 * @param pvR3 The ring-3 address returned by SUPR0PageAllocEx.
2824 * NIL_RTR3PTR if the ring-3 mapping should be unaffected.
2825 * @param pvR0 The ring-0 address returned by SUPR0PageAllocEx.
2826 * NIL_RTR0PTR if the ring-0 mapping should be unaffected.
2827 * @param offSub Where to start changing. Must be page aligned.
2828 * @param cbSub How much to change. Must be page aligned.
2829 * @param fProt The new page level protection, see RTMEM_PROT_*.
2830 */
2831SUPR0DECL(int) SUPR0PageProtect(PSUPDRVSESSION pSession, RTR3PTR pvR3, RTR0PTR pvR0, uint32_t offSub, uint32_t cbSub, uint32_t fProt)
2832{
2833 int rc;
2834 PSUPDRVBUNDLE pBundle;
2835 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
2836 RTR0MEMOBJ hMemObjR0 = NIL_RTR0MEMOBJ;
2837 RTR0MEMOBJ hMemObjR3 = NIL_RTR0MEMOBJ;
2838 LogFlow(("SUPR0PageProtect: pSession=%p pvR3=%p pvR0=%p offSub=%#x cbSub=%#x fProt-%#x\n", pSession, pvR3, pvR0, offSub, cbSub, fProt));
2839
2840 /*
2841 * Validate input. The allowed allocation size must be at least equal to the maximum guest VRAM size.
2842 */
2843 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2844 AssertReturn(!(fProt & ~(RTMEM_PROT_READ | RTMEM_PROT_WRITE | RTMEM_PROT_EXEC | RTMEM_PROT_NONE)), VERR_INVALID_PARAMETER);
2845 AssertReturn(!(offSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2846 AssertReturn(!(cbSub & PAGE_OFFSET_MASK), VERR_INVALID_PARAMETER);
2847 AssertReturn(cbSub, VERR_INVALID_PARAMETER);
2848
2849 /*
2850 * Find the memory object.
2851 */
2852 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
2853 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
2854 {
2855 if (pBundle->cUsed > 0)
2856 {
2857 unsigned i;
2858 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
2859 {
2860 if ( pBundle->aMem[i].eType == MEMREF_TYPE_PAGE
2861 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
2862 && ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
2863 || pvR3 == NIL_RTR3PTR)
2864 && ( pvR0 == NIL_RTR0PTR
2865 || RTR0MemObjAddress(pBundle->aMem[i].MemObj) == pvR0)
2866 && ( pvR3 == NIL_RTR3PTR
2867 || RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == pvR3))
2868 {
2869 if (pvR0 != NIL_RTR0PTR)
2870 hMemObjR0 = pBundle->aMem[i].MemObj;
2871 if (pvR3 != NIL_RTR3PTR)
2872 hMemObjR3 = pBundle->aMem[i].MapObjR3;
2873 break;
2874 }
2875 }
2876 }
2877 }
2878 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
2879
2880 rc = VERR_INVALID_PARAMETER;
2881 if ( hMemObjR0 != NIL_RTR0MEMOBJ
2882 || hMemObjR3 != NIL_RTR0MEMOBJ)
2883 {
2884 /*
2885 * Do some furter input validations before calling IPRT.
2886 */
2887 size_t cbMemObj = hMemObjR0 != NIL_RTR0PTR ? RTR0MemObjSize(hMemObjR0) : RTR0MemObjSize(hMemObjR3);
2888 if ( offSub < cbMemObj
2889 && cbSub <= cbMemObj
2890 && offSub + cbSub <= cbMemObj)
2891 {
2892 rc = VINF_SUCCESS;
2893 if (hMemObjR3 != NIL_RTR0PTR)
2894 rc = RTR0MemObjProtect(hMemObjR3, offSub, cbSub, fProt);
2895 if (hMemObjR0 != NIL_RTR0PTR && RT_SUCCESS(rc))
2896 rc = RTR0MemObjProtect(hMemObjR0, offSub, cbSub, fProt);
2897 }
2898 else
2899 SUPR0Printf("SUPR0PageMapKernel: cbMemObj=%#x offSub=%#x cbSub=%#x\n", cbMemObj, offSub, cbSub);
2900
2901 }
2902 return rc;
2903
2904}
2905
2906
2907/**
2908 * Free memory allocated by SUPR0PageAlloc() and SUPR0PageAllocEx().
2909 *
2910 * @returns IPRT status code.
2911 * @param pSession The session owning the allocation.
2912 * @param pvR3 The Ring-3 address returned by SUPR0PageAlloc() or
2913 * SUPR0PageAllocEx().
2914 */
2915SUPR0DECL(int) SUPR0PageFree(PSUPDRVSESSION pSession, RTR3PTR pvR3)
2916{
2917 LogFlow(("SUPR0PageFree: pSession=%p pvR3=%p\n", pSession, (void *)pvR3));
2918 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2919 return supdrvMemRelease(pSession, (RTHCUINTPTR)pvR3, MEMREF_TYPE_PAGE);
2920}
2921
2922
2923/**
2924 * Maps the GIP into userspace and/or get the physical address of the GIP.
2925 *
2926 * @returns IPRT status code.
2927 * @param pSession Session to which the GIP mapping should belong.
2928 * @param ppGipR3 Where to store the address of the ring-3 mapping. (optional)
2929 * @param pHCPhysGip Where to store the physical address. (optional)
2930 *
2931 * @remark There is no reference counting on the mapping, so one call to this function
2932 * count globally as one reference. One call to SUPR0GipUnmap() is will unmap GIP
2933 * and remove the session as a GIP user.
2934 */
2935SUPR0DECL(int) SUPR0GipMap(PSUPDRVSESSION pSession, PRTR3PTR ppGipR3, PRTHCPHYS pHCPhysGip)
2936{
2937 int rc = VINF_SUCCESS;
2938 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
2939 RTR3PTR pGip = NIL_RTR3PTR;
2940 RTHCPHYS HCPhys = NIL_RTHCPHYS;
2941 LogFlow(("SUPR0GipMap: pSession=%p ppGipR3=%p pHCPhysGip=%p\n", pSession, ppGipR3, pHCPhysGip));
2942
2943 /*
2944 * Validate
2945 */
2946 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
2947 AssertPtrNullReturn(ppGipR3, VERR_INVALID_POINTER);
2948 AssertPtrNullReturn(pHCPhysGip, VERR_INVALID_POINTER);
2949
2950 RTSemFastMutexRequest(pDevExt->mtxGip);
2951 if (pDevExt->pGip)
2952 {
2953 /*
2954 * Map it?
2955 */
2956 if (ppGipR3)
2957 {
2958 if (pSession->GipMapObjR3 == NIL_RTR0MEMOBJ)
2959 rc = RTR0MemObjMapUser(&pSession->GipMapObjR3, pDevExt->GipMemObj, (RTR3PTR)-1, 0,
2960 RTMEM_PROT_READ, RTR0ProcHandleSelf());
2961 if (RT_SUCCESS(rc))
2962 {
2963 pGip = RTR0MemObjAddressR3(pSession->GipMapObjR3);
2964 rc = VINF_SUCCESS; /** @todo remove this and replace the !rc below with RT_SUCCESS(rc). */
2965 }
2966 }
2967
2968 /*
2969 * Get physical address.
2970 */
2971 if (pHCPhysGip && !rc)
2972 HCPhys = pDevExt->HCPhysGip;
2973
2974 /*
2975 * Reference globally.
2976 */
2977 if (!pSession->fGipReferenced && !rc)
2978 {
2979 pSession->fGipReferenced = 1;
2980 pDevExt->cGipUsers++;
2981 if (pDevExt->cGipUsers == 1)
2982 {
2983 PSUPGLOBALINFOPAGE pGip = pDevExt->pGip;
2984 unsigned i;
2985
2986 LogFlow(("SUPR0GipMap: Resumes GIP updating\n"));
2987
2988 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
2989 ASMAtomicXchgU32(&pGip->aCPUs[i].u32TransactionId, pGip->aCPUs[i].u32TransactionId & ~(GIP_UPDATEHZ_RECALC_FREQ * 2 - 1));
2990 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, 0);
2991
2992 rc = RTTimerStart(pDevExt->pGipTimer, 0);
2993 AssertRC(rc); rc = VINF_SUCCESS;
2994 }
2995 }
2996 }
2997 else
2998 {
2999 rc = SUPDRV_ERR_GENERAL_FAILURE;
3000 Log(("SUPR0GipMap: GIP is not available!\n"));
3001 }
3002 RTSemFastMutexRelease(pDevExt->mtxGip);
3003
3004 /*
3005 * Write returns.
3006 */
3007 if (pHCPhysGip)
3008 *pHCPhysGip = HCPhys;
3009 if (ppGipR3)
3010 *ppGipR3 = pGip;
3011
3012#ifdef DEBUG_DARWIN_GIP
3013 OSDBGPRINT(("SUPR0GipMap: returns %d *pHCPhysGip=%lx pGip=%p\n", rc, (unsigned long)HCPhys, (void *)pGip));
3014#else
3015 LogFlow(( "SUPR0GipMap: returns %d *pHCPhysGip=%lx pGip=%p\n", rc, (unsigned long)HCPhys, (void *)pGip));
3016#endif
3017 return rc;
3018}
3019
3020
3021/**
3022 * Unmaps any user mapping of the GIP and terminates all GIP access
3023 * from this session.
3024 *
3025 * @returns IPRT status code.
3026 * @param pSession Session to which the GIP mapping should belong.
3027 */
3028SUPR0DECL(int) SUPR0GipUnmap(PSUPDRVSESSION pSession)
3029{
3030 int rc = VINF_SUCCESS;
3031 PSUPDRVDEVEXT pDevExt = pSession->pDevExt;
3032#ifdef DEBUG_DARWIN_GIP
3033 OSDBGPRINT(("SUPR0GipUnmap: pSession=%p pGip=%p GipMapObjR3=%p\n",
3034 pSession,
3035 pSession->GipMapObjR3 != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pSession->GipMapObjR3) : NULL,
3036 pSession->GipMapObjR3));
3037#else
3038 LogFlow(("SUPR0GipUnmap: pSession=%p\n", pSession));
3039#endif
3040 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3041
3042 RTSemFastMutexRequest(pDevExt->mtxGip);
3043
3044 /*
3045 * Unmap anything?
3046 */
3047 if (pSession->GipMapObjR3 != NIL_RTR0MEMOBJ)
3048 {
3049 rc = RTR0MemObjFree(pSession->GipMapObjR3, false);
3050 AssertRC(rc);
3051 if (RT_SUCCESS(rc))
3052 pSession->GipMapObjR3 = NIL_RTR0MEMOBJ;
3053 }
3054
3055 /*
3056 * Dereference global GIP.
3057 */
3058 if (pSession->fGipReferenced && !rc)
3059 {
3060 pSession->fGipReferenced = 0;
3061 if ( pDevExt->cGipUsers > 0
3062 && !--pDevExt->cGipUsers)
3063 {
3064 LogFlow(("SUPR0GipUnmap: Suspends GIP updating\n"));
3065 rc = RTTimerStop(pDevExt->pGipTimer); AssertRC(rc); rc = VINF_SUCCESS;
3066 }
3067 }
3068
3069 RTSemFastMutexRelease(pDevExt->mtxGip);
3070
3071 return rc;
3072}
3073
3074
3075/**
3076 * Register a component factory with the support driver.
3077 *
3078 * This is currently restricted to kernel sessions only.
3079 *
3080 * @returns VBox status code.
3081 * @retval VINF_SUCCESS on success.
3082 * @retval VERR_NO_MEMORY if we're out of memory.
3083 * @retval VERR_ALREADY_EXISTS if the factory has already been registered.
3084 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
3085 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3086 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3087 *
3088 * @param pSession The SUPDRV session (must be a ring-0 session).
3089 * @param pFactory Pointer to the component factory registration structure.
3090 *
3091 * @remarks This interface is also available via SUPR0IdcComponentRegisterFactory.
3092 */
3093SUPR0DECL(int) SUPR0ComponentRegisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
3094{
3095 PSUPDRVFACTORYREG pNewReg;
3096 const char *psz;
3097 int rc;
3098
3099 /*
3100 * Validate parameters.
3101 */
3102 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3103 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
3104 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
3105 AssertPtrReturn(pFactory->pfnQueryFactoryInterface, VERR_INVALID_POINTER);
3106 psz = (const char *)memchr(pFactory->szName, '\0', sizeof(pFactory->szName));
3107 AssertReturn(psz, VERR_INVALID_PARAMETER);
3108
3109 /*
3110 * Allocate and initialize a new registration structure.
3111 */
3112 pNewReg = (PSUPDRVFACTORYREG)RTMemAlloc(sizeof(SUPDRVFACTORYREG));
3113 if (pNewReg)
3114 {
3115 pNewReg->pNext = NULL;
3116 pNewReg->pFactory = pFactory;
3117 pNewReg->pSession = pSession;
3118 pNewReg->cchName = psz - &pFactory->szName[0];
3119
3120 /*
3121 * Add it to the tail of the list after checking for prior registration.
3122 */
3123 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3124 if (RT_SUCCESS(rc))
3125 {
3126 PSUPDRVFACTORYREG pPrev = NULL;
3127 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3128 while (pCur && pCur->pFactory != pFactory)
3129 {
3130 pPrev = pCur;
3131 pCur = pCur->pNext;
3132 }
3133 if (!pCur)
3134 {
3135 if (pPrev)
3136 pPrev->pNext = pNewReg;
3137 else
3138 pSession->pDevExt->pComponentFactoryHead = pNewReg;
3139 rc = VINF_SUCCESS;
3140 }
3141 else
3142 rc = VERR_ALREADY_EXISTS;
3143
3144 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3145 }
3146
3147 if (RT_FAILURE(rc))
3148 RTMemFree(pNewReg);
3149 }
3150 else
3151 rc = VERR_NO_MEMORY;
3152 return rc;
3153}
3154
3155
3156/**
3157 * Deregister a component factory.
3158 *
3159 * @returns VBox status code.
3160 * @retval VINF_SUCCESS on success.
3161 * @retval VERR_NOT_FOUND if the factory wasn't registered.
3162 * @retval VERR_ACCESS_DENIED if it isn't a kernel session.
3163 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3164 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3165 *
3166 * @param pSession The SUPDRV session (must be a ring-0 session).
3167 * @param pFactory Pointer to the component factory registration structure
3168 * previously passed SUPR0ComponentRegisterFactory().
3169 *
3170 * @remarks This interface is also available via SUPR0IdcComponentDeregisterFactory.
3171 */
3172SUPR0DECL(int) SUPR0ComponentDeregisterFactory(PSUPDRVSESSION pSession, PCSUPDRVFACTORY pFactory)
3173{
3174 int rc;
3175
3176 /*
3177 * Validate parameters.
3178 */
3179 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3180 AssertReturn(pSession->R0Process == NIL_RTR0PROCESS, VERR_ACCESS_DENIED);
3181 AssertPtrReturn(pFactory, VERR_INVALID_POINTER);
3182
3183 /*
3184 * Take the lock and look for the registration record.
3185 */
3186 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3187 if (RT_SUCCESS(rc))
3188 {
3189 PSUPDRVFACTORYREG pPrev = NULL;
3190 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3191 while (pCur && pCur->pFactory != pFactory)
3192 {
3193 pPrev = pCur;
3194 pCur = pCur->pNext;
3195 }
3196 if (pCur)
3197 {
3198 if (!pPrev)
3199 pSession->pDevExt->pComponentFactoryHead = pCur->pNext;
3200 else
3201 pPrev->pNext = pCur->pNext;
3202
3203 pCur->pNext = NULL;
3204 pCur->pFactory = NULL;
3205 pCur->pSession = NULL;
3206 rc = VINF_SUCCESS;
3207 }
3208 else
3209 rc = VERR_NOT_FOUND;
3210
3211 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3212
3213 RTMemFree(pCur);
3214 }
3215 return rc;
3216}
3217
3218
3219/**
3220 * Queries a component factory.
3221 *
3222 * @returns VBox status code.
3223 * @retval VERR_INVALID_PARAMETER on invalid parameter.
3224 * @retval VERR_INVALID_POINTER on invalid pointer parameter.
3225 * @retval VERR_SUPDRV_COMPONENT_NOT_FOUND if the component factory wasn't found.
3226 * @retval VERR_SUPDRV_INTERFACE_NOT_SUPPORTED if the interface wasn't supported.
3227 *
3228 * @param pSession The SUPDRV session.
3229 * @param pszName The name of the component factory.
3230 * @param pszInterfaceUuid The UUID of the factory interface (stringified).
3231 * @param ppvFactoryIf Where to store the factory interface.
3232 */
3233SUPR0DECL(int) SUPR0ComponentQueryFactory(PSUPDRVSESSION pSession, const char *pszName, const char *pszInterfaceUuid, void **ppvFactoryIf)
3234{
3235 const char *pszEnd;
3236 size_t cchName;
3237 int rc;
3238
3239 /*
3240 * Validate parameters.
3241 */
3242 AssertReturn(SUP_IS_SESSION_VALID(pSession), VERR_INVALID_PARAMETER);
3243
3244 AssertPtrReturn(pszName, VERR_INVALID_POINTER);
3245 pszEnd = memchr(pszName, '\0', RT_SIZEOFMEMB(SUPDRVFACTORY, szName));
3246 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3247 cchName = pszEnd - pszName;
3248
3249 AssertPtrReturn(pszInterfaceUuid, VERR_INVALID_POINTER);
3250 pszEnd = memchr(pszInterfaceUuid, '\0', RTUUID_STR_LENGTH);
3251 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3252
3253 AssertPtrReturn(ppvFactoryIf, VERR_INVALID_POINTER);
3254 *ppvFactoryIf = NULL;
3255
3256 /*
3257 * Take the lock and try all factories by this name.
3258 */
3259 rc = RTSemFastMutexRequest(pSession->pDevExt->mtxComponentFactory);
3260 if (RT_SUCCESS(rc))
3261 {
3262 PSUPDRVFACTORYREG pCur = pSession->pDevExt->pComponentFactoryHead;
3263 rc = VERR_SUPDRV_COMPONENT_NOT_FOUND;
3264 while (pCur)
3265 {
3266 if ( pCur->cchName == cchName
3267 && !memcmp(pCur->pFactory->szName, pszName, cchName))
3268 {
3269#ifdef RT_WITH_W64_UNWIND_HACK
3270 void *pvFactory = supdrvNtWrapQueryFactoryInterface((PFNRT)pCur->pFactory->pfnQueryFactoryInterface, pCur->pFactory, pSession, pszInterfaceUuid);
3271#else
3272 void *pvFactory = pCur->pFactory->pfnQueryFactoryInterface(pCur->pFactory, pSession, pszInterfaceUuid);
3273#endif
3274 if (pvFactory)
3275 {
3276 *ppvFactoryIf = pvFactory;
3277 rc = VINF_SUCCESS;
3278 break;
3279 }
3280 rc = VERR_SUPDRV_INTERFACE_NOT_SUPPORTED;
3281 }
3282
3283 /* next */
3284 pCur = pCur->pNext;
3285 }
3286
3287 RTSemFastMutexRelease(pSession->pDevExt->mtxComponentFactory);
3288 }
3289 return rc;
3290}
3291
3292
3293/**
3294 * Adds a memory object to the session.
3295 *
3296 * @returns IPRT status code.
3297 * @param pMem Memory tracking structure containing the
3298 * information to track.
3299 * @param pSession The session.
3300 */
3301static int supdrvMemAdd(PSUPDRVMEMREF pMem, PSUPDRVSESSION pSession)
3302{
3303 PSUPDRVBUNDLE pBundle;
3304 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3305
3306 /*
3307 * Find free entry and record the allocation.
3308 */
3309 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3310 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3311 {
3312 if (pBundle->cUsed < RT_ELEMENTS(pBundle->aMem))
3313 {
3314 unsigned i;
3315 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3316 {
3317 if (pBundle->aMem[i].MemObj == NIL_RTR0MEMOBJ)
3318 {
3319 pBundle->cUsed++;
3320 pBundle->aMem[i] = *pMem;
3321 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3322 return VINF_SUCCESS;
3323 }
3324 }
3325 AssertFailed(); /* !!this can't be happening!!! */
3326 }
3327 }
3328 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3329
3330 /*
3331 * Need to allocate a new bundle.
3332 * Insert into the last entry in the bundle.
3333 */
3334 pBundle = (PSUPDRVBUNDLE)RTMemAllocZ(sizeof(*pBundle));
3335 if (!pBundle)
3336 return VERR_NO_MEMORY;
3337
3338 /* take last entry. */
3339 pBundle->cUsed++;
3340 pBundle->aMem[RT_ELEMENTS(pBundle->aMem) - 1] = *pMem;
3341
3342 /* insert into list. */
3343 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3344 pBundle->pNext = pSession->Bundle.pNext;
3345 pSession->Bundle.pNext = pBundle;
3346 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3347
3348 return VINF_SUCCESS;
3349}
3350
3351
3352/**
3353 * Releases a memory object referenced by pointer and type.
3354 *
3355 * @returns IPRT status code.
3356 * @param pSession Session data.
3357 * @param uPtr Pointer to memory. This is matched against both the R0 and R3 addresses.
3358 * @param eType Memory type.
3359 */
3360static int supdrvMemRelease(PSUPDRVSESSION pSession, RTHCUINTPTR uPtr, SUPDRVMEMREFTYPE eType)
3361{
3362 PSUPDRVBUNDLE pBundle;
3363 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3364
3365 /*
3366 * Validate input.
3367 */
3368 if (!uPtr)
3369 {
3370 Log(("Illegal address %p\n", (void *)uPtr));
3371 return VERR_INVALID_PARAMETER;
3372 }
3373
3374 /*
3375 * Search for the address.
3376 */
3377 RTSpinlockAcquire(pSession->Spinlock, &SpinlockTmp);
3378 for (pBundle = &pSession->Bundle; pBundle; pBundle = pBundle->pNext)
3379 {
3380 if (pBundle->cUsed > 0)
3381 {
3382 unsigned i;
3383 for (i = 0; i < RT_ELEMENTS(pBundle->aMem); i++)
3384 {
3385 if ( pBundle->aMem[i].eType == eType
3386 && pBundle->aMem[i].MemObj != NIL_RTR0MEMOBJ
3387 && ( (RTHCUINTPTR)RTR0MemObjAddress(pBundle->aMem[i].MemObj) == uPtr
3388 || ( pBundle->aMem[i].MapObjR3 != NIL_RTR0MEMOBJ
3389 && RTR0MemObjAddressR3(pBundle->aMem[i].MapObjR3) == uPtr))
3390 )
3391 {
3392 /* Make a copy of it and release it outside the spinlock. */
3393 SUPDRVMEMREF Mem = pBundle->aMem[i];
3394 pBundle->aMem[i].eType = MEMREF_TYPE_UNUSED;
3395 pBundle->aMem[i].MemObj = NIL_RTR0MEMOBJ;
3396 pBundle->aMem[i].MapObjR3 = NIL_RTR0MEMOBJ;
3397 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3398
3399 if (Mem.MapObjR3 != NIL_RTR0MEMOBJ)
3400 {
3401 int rc = RTR0MemObjFree(Mem.MapObjR3, false);
3402 AssertRC(rc); /** @todo figure out how to handle this. */
3403 }
3404 if (Mem.MemObj != NIL_RTR0MEMOBJ)
3405 {
3406 int rc = RTR0MemObjFree(Mem.MemObj, true /* fFreeMappings */);
3407 AssertRC(rc); /** @todo figure out how to handle this. */
3408 }
3409 return VINF_SUCCESS;
3410 }
3411 }
3412 }
3413 }
3414 RTSpinlockRelease(pSession->Spinlock, &SpinlockTmp);
3415 Log(("Failed to find %p!!! (eType=%d)\n", (void *)uPtr, eType));
3416 return VERR_INVALID_PARAMETER;
3417}
3418
3419
3420/**
3421 * Opens an image. If it's the first time it's opened the call must upload
3422 * the bits using the supdrvIOCtl_LdrLoad() / SUPDRV_IOCTL_LDR_LOAD function.
3423 *
3424 * This is the 1st step of the loading.
3425 *
3426 * @returns IPRT status code.
3427 * @param pDevExt Device globals.
3428 * @param pSession Session data.
3429 * @param pReq The open request.
3430 */
3431static int supdrvIOCtl_LdrOpen(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDROPEN pReq)
3432{
3433 PSUPDRVLDRIMAGE pImage;
3434 unsigned cb;
3435 void *pv;
3436 size_t cchName = strlen(pReq->u.In.szName); /* (caller checked < 32). */
3437 LogFlow(("supdrvIOCtl_LdrOpen: szName=%s cbImage=%d\n", pReq->u.In.szName, pReq->u.In.cbImage));
3438
3439 /*
3440 * Check if we got an instance of the image already.
3441 */
3442 RTSemFastMutexRequest(pDevExt->mtxLdr);
3443 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3444 {
3445 if ( pImage->szName[cchName] == '\0'
3446 && !memcmp(pImage->szName, pReq->u.In.szName, cchName))
3447 {
3448 pImage->cUsage++;
3449 pReq->u.Out.pvImageBase = pImage->pvImage;
3450 pReq->u.Out.fNeedsLoading = pImage->uState == SUP_IOCTL_LDR_OPEN;
3451 supdrvLdrAddUsage(pSession, pImage);
3452 RTSemFastMutexRelease(pDevExt->mtxLdr);
3453 return VINF_SUCCESS;
3454 }
3455 }
3456 /* (not found - add it!) */
3457
3458 /*
3459 * Allocate memory.
3460 */
3461 cb = pReq->u.In.cbImage + sizeof(SUPDRVLDRIMAGE) + 31;
3462 pv = RTMemExecAlloc(cb);
3463 if (!pv)
3464 {
3465 RTSemFastMutexRelease(pDevExt->mtxLdr);
3466 Log(("supdrvIOCtl_LdrOpen: RTMemExecAlloc(%u) failed\n", cb));
3467 return VERR_NO_MEMORY;
3468 }
3469
3470 /*
3471 * Setup and link in the LDR stuff.
3472 */
3473 pImage = (PSUPDRVLDRIMAGE)pv;
3474 pImage->pvImage = RT_ALIGN_P(pImage + 1, 32);
3475 pImage->cbImage = pReq->u.In.cbImage;
3476 pImage->pfnModuleInit = NULL;
3477 pImage->pfnModuleTerm = NULL;
3478 pImage->pfnServiceReqHandler = NULL;
3479 pImage->uState = SUP_IOCTL_LDR_OPEN;
3480 pImage->cUsage = 1;
3481 memcpy(pImage->szName, pReq->u.In.szName, cchName + 1);
3482
3483 pImage->pNext = pDevExt->pLdrImages;
3484 pDevExt->pLdrImages = pImage;
3485
3486 supdrvLdrAddUsage(pSession, pImage);
3487
3488 pReq->u.Out.pvImageBase = pImage->pvImage;
3489 pReq->u.Out.fNeedsLoading = true;
3490 RTSemFastMutexRelease(pDevExt->mtxLdr);
3491
3492#if defined(RT_OS_WINDOWS) && defined(DEBUG)
3493 SUPR0Printf("VBoxDrv: windbg> .reload /f %s=%#p\n", pImage->szName, pImage->pvImage);
3494#endif
3495 return VINF_SUCCESS;
3496}
3497
3498
3499/**
3500 * Loads the image bits.
3501 *
3502 * This is the 2nd step of the loading.
3503 *
3504 * @returns IPRT status code.
3505 * @param pDevExt Device globals.
3506 * @param pSession Session data.
3507 * @param pReq The request.
3508 */
3509static int supdrvIOCtl_LdrLoad(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRLOAD pReq)
3510{
3511 PSUPDRVLDRUSAGE pUsage;
3512 PSUPDRVLDRIMAGE pImage;
3513 int rc;
3514 LogFlow(("supdrvIOCtl_LdrLoad: pvImageBase=%p cbImage=%d\n", pReq->u.In.pvImageBase, pReq->u.In.cbImage));
3515
3516 /*
3517 * Find the ldr image.
3518 */
3519 RTSemFastMutexRequest(pDevExt->mtxLdr);
3520 pUsage = pSession->pLdrUsage;
3521 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3522 pUsage = pUsage->pNext;
3523 if (!pUsage)
3524 {
3525 RTSemFastMutexRelease(pDevExt->mtxLdr);
3526 Log(("SUP_IOCTL_LDR_LOAD: couldn't find image!\n"));
3527 return VERR_INVALID_HANDLE;
3528 }
3529 pImage = pUsage->pImage;
3530 if (pImage->cbImage != pReq->u.In.cbImage)
3531 {
3532 RTSemFastMutexRelease(pDevExt->mtxLdr);
3533 Log(("SUP_IOCTL_LDR_LOAD: image size mismatch!! %d(prep) != %d(load)\n", pImage->cbImage, pReq->u.In.cbImage));
3534 return VERR_INVALID_HANDLE;
3535 }
3536 if (pImage->uState != SUP_IOCTL_LDR_OPEN)
3537 {
3538 unsigned uState = pImage->uState;
3539 RTSemFastMutexRelease(pDevExt->mtxLdr);
3540 if (uState != SUP_IOCTL_LDR_LOAD)
3541 AssertMsgFailed(("SUP_IOCTL_LDR_LOAD: invalid image state %d (%#x)!\n", uState, uState));
3542 return SUPDRV_ERR_ALREADY_LOADED;
3543 }
3544 switch (pReq->u.In.eEPType)
3545 {
3546 case SUPLDRLOADEP_NOTHING:
3547 break;
3548
3549 case SUPLDRLOADEP_VMMR0:
3550 if ( !pReq->u.In.EP.VMMR0.pvVMMR0
3551 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryInt
3552 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryFast
3553 || !pReq->u.In.EP.VMMR0.pvVMMR0EntryEx)
3554 {
3555 RTSemFastMutexRelease(pDevExt->mtxLdr);
3556 Log(("NULL pointer: pvVMMR0=%p pvVMMR0EntryInt=%p pvVMMR0EntryFast=%p pvVMMR0EntryEx=%p!\n",
3557 pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3558 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3559 return VERR_INVALID_PARAMETER;
3560 }
3561 /** @todo validate pReq->u.In.EP.VMMR0.pvVMMR0 against pvImage! */
3562 if ( (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryInt - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3563 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryFast - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage
3564 || (uintptr_t)pReq->u.In.EP.VMMR0.pvVMMR0EntryEx - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3565 {
3566 RTSemFastMutexRelease(pDevExt->mtxLdr);
3567 Log(("Out of range (%p LB %#x): pvVMMR0EntryInt=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3568 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3569 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx));
3570 return VERR_INVALID_PARAMETER;
3571 }
3572 break;
3573
3574 case SUPLDRLOADEP_SERVICE:
3575 if (!pReq->u.In.EP.Service.pfnServiceReq)
3576 {
3577 RTSemFastMutexRelease(pDevExt->mtxLdr);
3578 Log(("NULL pointer: pfnServiceReq=%p!\n", pReq->u.In.EP.Service.pfnServiceReq));
3579 return VERR_INVALID_PARAMETER;
3580 }
3581 if ((uintptr_t)pReq->u.In.EP.Service.pfnServiceReq - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3582 {
3583 RTSemFastMutexRelease(pDevExt->mtxLdr);
3584 Log(("Out of range (%p LB %#x): pfnServiceReq=%p, pvVMMR0EntryFast=%p or pvVMMR0EntryEx=%p is NULL!\n",
3585 pImage->pvImage, pReq->u.In.cbImage, pReq->u.In.EP.Service.pfnServiceReq));
3586 return VERR_INVALID_PARAMETER;
3587 }
3588 if ( pReq->u.In.EP.Service.apvReserved[0] != NIL_RTR0PTR
3589 || pReq->u.In.EP.Service.apvReserved[1] != NIL_RTR0PTR
3590 || pReq->u.In.EP.Service.apvReserved[2] != NIL_RTR0PTR)
3591 {
3592 RTSemFastMutexRelease(pDevExt->mtxLdr);
3593 Log(("Out of range (%p LB %#x): apvReserved={%p,%p,%p} MBZ!\n",
3594 pImage->pvImage, pReq->u.In.cbImage,
3595 pReq->u.In.EP.Service.apvReserved[0],
3596 pReq->u.In.EP.Service.apvReserved[1],
3597 pReq->u.In.EP.Service.apvReserved[2]));
3598 return VERR_INVALID_PARAMETER;
3599 }
3600 break;
3601
3602 default:
3603 RTSemFastMutexRelease(pDevExt->mtxLdr);
3604 Log(("Invalid eEPType=%d\n", pReq->u.In.eEPType));
3605 return VERR_INVALID_PARAMETER;
3606 }
3607 if ( pReq->u.In.pfnModuleInit
3608 && (uintptr_t)pReq->u.In.pfnModuleInit - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3609 {
3610 RTSemFastMutexRelease(pDevExt->mtxLdr);
3611 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleInit=%p is outside the image (%p %d bytes)\n",
3612 pReq->u.In.pfnModuleInit, pImage->pvImage, pReq->u.In.cbImage));
3613 return VERR_INVALID_PARAMETER;
3614 }
3615 if ( pReq->u.In.pfnModuleTerm
3616 && (uintptr_t)pReq->u.In.pfnModuleTerm - (uintptr_t)pImage->pvImage >= pReq->u.In.cbImage)
3617 {
3618 RTSemFastMutexRelease(pDevExt->mtxLdr);
3619 Log(("SUP_IOCTL_LDR_LOAD: pfnModuleTerm=%p is outside the image (%p %d bytes)\n",
3620 pReq->u.In.pfnModuleTerm, pImage->pvImage, pReq->u.In.cbImage));
3621 return VERR_INVALID_PARAMETER;
3622 }
3623
3624 /*
3625 * Copy the memory.
3626 */
3627 /* no need to do try/except as this is a buffered request. */
3628 memcpy(pImage->pvImage, &pReq->u.In.achImage[0], pImage->cbImage);
3629 pImage->uState = SUP_IOCTL_LDR_LOAD;
3630 pImage->pfnModuleInit = pReq->u.In.pfnModuleInit;
3631 pImage->pfnModuleTerm = pReq->u.In.pfnModuleTerm;
3632 pImage->offSymbols = pReq->u.In.offSymbols;
3633 pImage->cSymbols = pReq->u.In.cSymbols;
3634 pImage->offStrTab = pReq->u.In.offStrTab;
3635 pImage->cbStrTab = pReq->u.In.cbStrTab;
3636
3637 /*
3638 * Update any entry points.
3639 */
3640 switch (pReq->u.In.eEPType)
3641 {
3642 default:
3643 case SUPLDRLOADEP_NOTHING:
3644 rc = VINF_SUCCESS;
3645 break;
3646 case SUPLDRLOADEP_VMMR0:
3647 rc = supdrvLdrSetVMMR0EPs(pDevExt, pReq->u.In.EP.VMMR0.pvVMMR0, pReq->u.In.EP.VMMR0.pvVMMR0EntryInt,
3648 pReq->u.In.EP.VMMR0.pvVMMR0EntryFast, pReq->u.In.EP.VMMR0.pvVMMR0EntryEx);
3649 break;
3650 case SUPLDRLOADEP_SERVICE:
3651 pImage->pfnServiceReqHandler = pReq->u.In.EP.Service.pfnServiceReq;
3652 rc = VINF_SUCCESS;
3653 break;
3654 }
3655
3656 /*
3657 * On success call the module initialization.
3658 */
3659 LogFlow(("supdrvIOCtl_LdrLoad: pfnModuleInit=%p\n", pImage->pfnModuleInit));
3660 if (RT_SUCCESS(rc) && pImage->pfnModuleInit)
3661 {
3662 Log(("supdrvIOCtl_LdrLoad: calling pfnModuleInit=%p\n", pImage->pfnModuleInit));
3663#ifdef RT_WITH_W64_UNWIND_HACK
3664 rc = supdrvNtWrapModuleInit((PFNRT)pImage->pfnModuleInit);
3665#else
3666 rc = pImage->pfnModuleInit();
3667#endif
3668 if (rc && pDevExt->pvVMMR0 == pImage->pvImage)
3669 supdrvLdrUnsetVMMR0EPs(pDevExt);
3670 }
3671
3672 if (rc)
3673 pImage->uState = SUP_IOCTL_LDR_OPEN;
3674
3675 RTSemFastMutexRelease(pDevExt->mtxLdr);
3676 return rc;
3677}
3678
3679
3680/**
3681 * Frees a previously loaded (prep'ed) image.
3682 *
3683 * @returns IPRT status code.
3684 * @param pDevExt Device globals.
3685 * @param pSession Session data.
3686 * @param pReq The request.
3687 */
3688static int supdrvIOCtl_LdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRFREE pReq)
3689{
3690 int rc;
3691 PSUPDRVLDRUSAGE pUsagePrev;
3692 PSUPDRVLDRUSAGE pUsage;
3693 PSUPDRVLDRIMAGE pImage;
3694 LogFlow(("supdrvIOCtl_LdrFree: pvImageBase=%p\n", pReq->u.In.pvImageBase));
3695
3696 /*
3697 * Find the ldr image.
3698 */
3699 RTSemFastMutexRequest(pDevExt->mtxLdr);
3700 pUsagePrev = NULL;
3701 pUsage = pSession->pLdrUsage;
3702 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3703 {
3704 pUsagePrev = pUsage;
3705 pUsage = pUsage->pNext;
3706 }
3707 if (!pUsage)
3708 {
3709 RTSemFastMutexRelease(pDevExt->mtxLdr);
3710 Log(("SUP_IOCTL_LDR_FREE: couldn't find image!\n"));
3711 return VERR_INVALID_HANDLE;
3712 }
3713
3714 /*
3715 * Check if we can remove anything.
3716 */
3717 rc = VINF_SUCCESS;
3718 pImage = pUsage->pImage;
3719 if (pImage->cUsage <= 1 || pUsage->cUsage <= 1)
3720 {
3721 /*
3722 * Check if there are any objects with destructors in the image, if
3723 * so leave it for the session cleanup routine so we get a chance to
3724 * clean things up in the right order and not leave them all dangling.
3725 */
3726 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
3727 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
3728 if (pImage->cUsage <= 1)
3729 {
3730 PSUPDRVOBJ pObj;
3731 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
3732 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3733 {
3734 rc = VERR_DANGLING_OBJECTS;
3735 break;
3736 }
3737 }
3738 else
3739 {
3740 PSUPDRVUSAGE pGenUsage;
3741 for (pGenUsage = pSession->pUsage; pGenUsage; pGenUsage = pGenUsage->pNext)
3742 if (RT_UNLIKELY((uintptr_t)pGenUsage->pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
3743 {
3744 rc = VERR_DANGLING_OBJECTS;
3745 break;
3746 }
3747 }
3748 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
3749 if (rc == VINF_SUCCESS)
3750 {
3751 /* unlink it */
3752 if (pUsagePrev)
3753 pUsagePrev->pNext = pUsage->pNext;
3754 else
3755 pSession->pLdrUsage = pUsage->pNext;
3756
3757 /* free it */
3758 pUsage->pImage = NULL;
3759 pUsage->pNext = NULL;
3760 RTMemFree(pUsage);
3761
3762 /*
3763 * Derefrence the image.
3764 */
3765 if (pImage->cUsage <= 1)
3766 supdrvLdrFree(pDevExt, pImage);
3767 else
3768 pImage->cUsage--;
3769 }
3770 else
3771 {
3772 Log(("supdrvIOCtl_LdrFree: Dangling objects in %p/%s!\n", pImage->pvImage, pImage->szName));
3773 rc = VINF_SUCCESS; /** @todo BRANCH-2.1: remove this after branching. */
3774 }
3775 }
3776 else
3777 {
3778 /*
3779 * Dereference both image and usage.
3780 */
3781 pImage->cUsage--;
3782 pUsage->cUsage--;
3783 }
3784
3785 RTSemFastMutexRelease(pDevExt->mtxLdr);
3786 return rc;
3787}
3788
3789
3790/**
3791 * Gets the address of a symbol in an open image.
3792 *
3793 * @returns 0 on success.
3794 * @returns SUPDRV_ERR_* on failure.
3795 * @param pDevExt Device globals.
3796 * @param pSession Session data.
3797 * @param pReq The request buffer.
3798 */
3799static int supdrvIOCtl_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLDRGETSYMBOL pReq)
3800{
3801 PSUPDRVLDRIMAGE pImage;
3802 PSUPDRVLDRUSAGE pUsage;
3803 uint32_t i;
3804 PSUPLDRSYM paSyms;
3805 const char *pchStrings;
3806 const size_t cbSymbol = strlen(pReq->u.In.szSymbol) + 1;
3807 void *pvSymbol = NULL;
3808 int rc = VERR_GENERAL_FAILURE;
3809 Log3(("supdrvIOCtl_LdrGetSymbol: pvImageBase=%p szSymbol=\"%s\"\n", pReq->u.In.pvImageBase, pReq->u.In.szSymbol));
3810
3811 /*
3812 * Find the ldr image.
3813 */
3814 RTSemFastMutexRequest(pDevExt->mtxLdr);
3815 pUsage = pSession->pLdrUsage;
3816 while (pUsage && pUsage->pImage->pvImage != pReq->u.In.pvImageBase)
3817 pUsage = pUsage->pNext;
3818 if (!pUsage)
3819 {
3820 RTSemFastMutexRelease(pDevExt->mtxLdr);
3821 Log(("SUP_IOCTL_LDR_GET_SYMBOL: couldn't find image!\n"));
3822 return VERR_INVALID_HANDLE;
3823 }
3824 pImage = pUsage->pImage;
3825 if (pImage->uState != SUP_IOCTL_LDR_LOAD)
3826 {
3827 unsigned uState = pImage->uState;
3828 RTSemFastMutexRelease(pDevExt->mtxLdr);
3829 Log(("SUP_IOCTL_LDR_GET_SYMBOL: invalid image state %d (%#x)!\n", uState, uState)); NOREF(uState);
3830 return VERR_ALREADY_LOADED;
3831 }
3832
3833 /*
3834 * Search the symbol strings.
3835 */
3836 pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3837 paSyms = (PSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3838 for (i = 0; i < pImage->cSymbols; i++)
3839 {
3840 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3841 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3842 && !memcmp(pchStrings + paSyms[i].offName, pReq->u.In.szSymbol, cbSymbol))
3843 {
3844 pvSymbol = (uint8_t *)pImage->pvImage + paSyms[i].offSymbol;
3845 rc = VINF_SUCCESS;
3846 break;
3847 }
3848 }
3849 RTSemFastMutexRelease(pDevExt->mtxLdr);
3850 pReq->u.Out.pvSymbol = pvSymbol;
3851 return rc;
3852}
3853
3854
3855/**
3856 * Gets the address of a symbol in an open image or the support driver.
3857 *
3858 * @returns VINF_SUCCESS on success.
3859 * @returns
3860 * @param pDevExt Device globals.
3861 * @param pSession Session data.
3862 * @param pReq The request buffer.
3863 */
3864static int supdrvIDC_LdrGetSymbol(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPDRVIDCREQGETSYM pReq)
3865{
3866 int rc = VINF_SUCCESS;
3867 const char *pszSymbol = pReq->u.In.pszSymbol;
3868 const char *pszModule = pReq->u.In.pszModule;
3869 size_t cbSymbol;
3870 char const *pszEnd;
3871 uint32_t i;
3872
3873 /*
3874 * Input validation.
3875 */
3876 AssertPtrReturn(pszSymbol, VERR_INVALID_POINTER);
3877 pszEnd = (char *)memchr(pszSymbol, '\0', 512);
3878 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3879 cbSymbol = pszEnd - pszSymbol + 1;
3880
3881 if (pszModule)
3882 {
3883 AssertPtrReturn(pszModule, VERR_INVALID_POINTER);
3884 pszEnd = (char *)memchr(pszModule, '\0', 64);
3885 AssertReturn(pszEnd, VERR_INVALID_PARAMETER);
3886 }
3887 Log3(("supdrvIDC_LdrGetSymbol: pszModule=%p:{%s} pszSymbol=%p:{%s}\n", pszModule, pszModule, pszSymbol, pszSymbol));
3888
3889
3890 if ( !pszModule
3891 || !strcmp(pszModule, "SupDrv"))
3892 {
3893 /*
3894 * Search the support driver export table.
3895 */
3896 for (i = 0; i < RT_ELEMENTS(g_aFunctions); i++)
3897 if (!strcmp(g_aFunctions[i].szName, pszSymbol))
3898 {
3899 pReq->u.Out.pfnSymbol = g_aFunctions[i].pfn;
3900 break;
3901 }
3902 }
3903 else
3904 {
3905 /*
3906 * Find the loader image.
3907 */
3908 PSUPDRVLDRIMAGE pImage;
3909
3910 RTSemFastMutexRequest(pDevExt->mtxLdr);
3911
3912 for (pImage = pDevExt->pLdrImages; pImage; pImage = pImage->pNext)
3913 if (!strcmp(pImage->szName, pszModule))
3914 break;
3915 if (pImage && pImage->uState == SUP_IOCTL_LDR_LOAD)
3916 {
3917 /*
3918 * Search the symbol strings.
3919 */
3920 const char *pchStrings = (const char *)((uint8_t *)pImage->pvImage + pImage->offStrTab);
3921 PCSUPLDRSYM paSyms = (PCSUPLDRSYM)((uint8_t *)pImage->pvImage + pImage->offSymbols);
3922 for (i = 0; i < pImage->cSymbols; i++)
3923 {
3924 if ( paSyms[i].offSymbol < pImage->cbImage /* paranoia */
3925 && paSyms[i].offName + cbSymbol <= pImage->cbStrTab
3926 && !memcmp(pchStrings + paSyms[i].offName, pszSymbol, cbSymbol))
3927 {
3928 /*
3929 * Found it! Calc the symbol address and add a reference to the module.
3930 */
3931 pReq->u.Out.pfnSymbol = (PFNRT)((uint8_t *)pImage->pvImage + paSyms[i].offSymbol);
3932 rc = supdrvLdrAddUsage(pSession, pImage);
3933 break;
3934 }
3935 }
3936 }
3937 else
3938 rc = pImage ? VERR_WRONG_ORDER : VERR_MODULE_NOT_FOUND;
3939
3940 RTSemFastMutexRelease(pDevExt->mtxLdr);
3941 }
3942 return rc;
3943}
3944
3945
3946/**
3947 * Updates the VMMR0 entry point pointers.
3948 *
3949 * @returns IPRT status code.
3950 * @param pDevExt Device globals.
3951 * @param pSession Session data.
3952 * @param pVMMR0 VMMR0 image handle.
3953 * @param pvVMMR0EntryInt VMMR0EntryInt address.
3954 * @param pvVMMR0EntryFast VMMR0EntryFast address.
3955 * @param pvVMMR0EntryEx VMMR0EntryEx address.
3956 * @remark Caller must own the loader mutex.
3957 */
3958static int supdrvLdrSetVMMR0EPs(PSUPDRVDEVEXT pDevExt, void *pvVMMR0, void *pvVMMR0EntryInt, void *pvVMMR0EntryFast, void *pvVMMR0EntryEx)
3959{
3960 int rc = VINF_SUCCESS;
3961 LogFlow(("supdrvLdrSetR0EP pvVMMR0=%p pvVMMR0EntryInt=%p\n", pvVMMR0, pvVMMR0EntryInt));
3962
3963
3964 /*
3965 * Check if not yet set.
3966 */
3967 if (!pDevExt->pvVMMR0)
3968 {
3969 pDevExt->pvVMMR0 = pvVMMR0;
3970 pDevExt->pfnVMMR0EntryInt = pvVMMR0EntryInt;
3971 pDevExt->pfnVMMR0EntryFast = pvVMMR0EntryFast;
3972 pDevExt->pfnVMMR0EntryEx = pvVMMR0EntryEx;
3973 }
3974 else
3975 {
3976 /*
3977 * Return failure or success depending on whether the values match or not.
3978 */
3979 if ( pDevExt->pvVMMR0 != pvVMMR0
3980 || (void *)pDevExt->pfnVMMR0EntryInt != pvVMMR0EntryInt
3981 || (void *)pDevExt->pfnVMMR0EntryFast != pvVMMR0EntryFast
3982 || (void *)pDevExt->pfnVMMR0EntryEx != pvVMMR0EntryEx)
3983 {
3984 AssertMsgFailed(("SUP_IOCTL_LDR_SETR0EP: Already set pointing to a different module!\n"));
3985 rc = VERR_INVALID_PARAMETER;
3986 }
3987 }
3988 return rc;
3989}
3990
3991
3992/**
3993 * Unsets the VMMR0 entry point installed by supdrvLdrSetR0EP.
3994 *
3995 * @param pDevExt Device globals.
3996 */
3997static void supdrvLdrUnsetVMMR0EPs(PSUPDRVDEVEXT pDevExt)
3998{
3999 pDevExt->pvVMMR0 = NULL;
4000 pDevExt->pfnVMMR0EntryInt = NULL;
4001 pDevExt->pfnVMMR0EntryFast = NULL;
4002 pDevExt->pfnVMMR0EntryEx = NULL;
4003}
4004
4005
4006/**
4007 * Adds a usage reference in the specified session of an image.
4008 *
4009 * Called while owning the loader semaphore.
4010 *
4011 * @returns VINF_SUCCESS on success and VERR_NO_MEMORY on failure.
4012 * @param pSession Session in question.
4013 * @param pImage Image which the session is using.
4014 */
4015static int supdrvLdrAddUsage(PSUPDRVSESSION pSession, PSUPDRVLDRIMAGE pImage)
4016{
4017 PSUPDRVLDRUSAGE pUsage;
4018 LogFlow(("supdrvLdrAddUsage: pImage=%p\n", pImage));
4019
4020 /*
4021 * Referenced it already?
4022 */
4023 pUsage = pSession->pLdrUsage;
4024 while (pUsage)
4025 {
4026 if (pUsage->pImage == pImage)
4027 {
4028 pUsage->cUsage++;
4029 return VINF_SUCCESS;
4030 }
4031 pUsage = pUsage->pNext;
4032 }
4033
4034 /*
4035 * Allocate new usage record.
4036 */
4037 pUsage = (PSUPDRVLDRUSAGE)RTMemAlloc(sizeof(*pUsage));
4038 AssertReturn(pUsage, VERR_NO_MEMORY);
4039 pUsage->cUsage = 1;
4040 pUsage->pImage = pImage;
4041 pUsage->pNext = pSession->pLdrUsage;
4042 pSession->pLdrUsage = pUsage;
4043 return VINF_SUCCESS;
4044}
4045
4046
4047/**
4048 * Frees a load image.
4049 *
4050 * @param pDevExt Pointer to device extension.
4051 * @param pImage Pointer to the image we're gonna free.
4052 * This image must exit!
4053 * @remark The caller MUST own SUPDRVDEVEXT::mtxLdr!
4054 */
4055static void supdrvLdrFree(PSUPDRVDEVEXT pDevExt, PSUPDRVLDRIMAGE pImage)
4056{
4057 PSUPDRVLDRIMAGE pImagePrev;
4058 LogFlow(("supdrvLdrFree: pImage=%p\n", pImage));
4059
4060 /* find it - arg. should've used doubly linked list. */
4061 Assert(pDevExt->pLdrImages);
4062 pImagePrev = NULL;
4063 if (pDevExt->pLdrImages != pImage)
4064 {
4065 pImagePrev = pDevExt->pLdrImages;
4066 while (pImagePrev->pNext != pImage)
4067 pImagePrev = pImagePrev->pNext;
4068 Assert(pImagePrev->pNext == pImage);
4069 }
4070
4071 /* unlink */
4072 if (pImagePrev)
4073 pImagePrev->pNext = pImage->pNext;
4074 else
4075 pDevExt->pLdrImages = pImage->pNext;
4076
4077 /* check if this is VMMR0.r0 unset its entry point pointers. */
4078 if (pDevExt->pvVMMR0 == pImage->pvImage)
4079 supdrvLdrUnsetVMMR0EPs(pDevExt);
4080
4081 /* check for objects with destructors in this image. (Shouldn't happen.) */
4082 if (pDevExt->pObjs)
4083 {
4084 unsigned cObjs = 0;
4085 PSUPDRVOBJ pObj;
4086 RTSPINLOCKTMP SpinlockTmp = RTSPINLOCKTMP_INITIALIZER;
4087 RTSpinlockAcquire(pDevExt->Spinlock, &SpinlockTmp);
4088 for (pObj = pDevExt->pObjs; pObj; pObj = pObj->pNext)
4089 if (RT_UNLIKELY((uintptr_t)pObj->pfnDestructor - (uintptr_t)pImage->pvImage < pImage->cbImage))
4090 {
4091 pObj->pfnDestructor = NULL;
4092 cObjs++;
4093 }
4094 RTSpinlockRelease(pDevExt->Spinlock, &SpinlockTmp);
4095 if (cObjs)
4096 OSDBGPRINT(("supdrvLdrFree: Image '%s' has %d dangling objects!\n", pImage->szName, cObjs));
4097 }
4098
4099 /* call termination function if fully loaded. */
4100 if ( pImage->pfnModuleTerm
4101 && pImage->uState == SUP_IOCTL_LDR_LOAD)
4102 {
4103 LogFlow(("supdrvIOCtl_LdrLoad: calling pfnModuleTerm=%p\n", pImage->pfnModuleTerm));
4104#ifdef RT_WITH_W64_UNWIND_HACK
4105 supdrvNtWrapModuleTerm(pImage->pfnModuleTerm);
4106#else
4107 pImage->pfnModuleTerm();
4108#endif
4109 }
4110
4111 /* free the image */
4112 pImage->cUsage = 0;
4113 pImage->pNext = 0;
4114 pImage->uState = SUP_IOCTL_LDR_FREE;
4115 RTMemExecFree(pImage);
4116}
4117
4118
4119/**
4120 * Implements the service call request.
4121 *
4122 * @returns VBox status code.
4123 * @param pDevExt The device extension.
4124 * @param pSession The calling session.
4125 * @param pReq The request packet, valid.
4126 */
4127static int supdrvIOCtl_CallServiceModule(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPCALLSERVICE pReq)
4128{
4129#if !defined(RT_OS_WINDOWS) || defined(DEBUG)
4130 int rc;
4131
4132 /*
4133 * Find the module first in the module referenced by the calling session.
4134 */
4135 rc = RTSemFastMutexRequest(pDevExt->mtxLdr);
4136 if (RT_SUCCESS(rc))
4137 {
4138 PFNSUPR0SERVICEREQHANDLER pfnServiceReqHandler = NULL;
4139 PSUPDRVLDRUSAGE pUsage;
4140
4141 for (pUsage = pSession->pLdrUsage; pUsage; pUsage = pUsage->pNext)
4142 if ( pUsage->pImage->pfnServiceReqHandler
4143 && !strcmp(pUsage->pImage->szName, pReq->u.In.szName))
4144 {
4145 pfnServiceReqHandler = pUsage->pImage->pfnServiceReqHandler;
4146 break;
4147 }
4148 RTSemFastMutexRelease(pDevExt->mtxLdr);
4149
4150 if (pfnServiceReqHandler)
4151 {
4152 /*
4153 * Call it.
4154 */
4155 if (pReq->Hdr.cbIn == SUP_IOCTL_CALL_SERVICE_SIZE(0))
4156#ifdef RT_WITH_W64_UNWIND_HACK
4157 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
4158#else
4159 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, NULL);
4160#endif
4161 else
4162#ifdef RT_WITH_W64_UNWIND_HACK
4163 rc = supdrvNtWrapServiceReqHandler((PFNRT)pfnServiceReqHandler, pSession, pReq->u.In.uOperation,
4164 pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4165#else
4166 rc = pfnServiceReqHandler(pSession, pReq->u.In.uOperation, pReq->u.In.u64Arg, (PSUPR0SERVICEREQHDR)&pReq->abReqPkt[0]);
4167#endif
4168 }
4169 else
4170 rc = VERR_SUPDRV_SERVICE_NOT_FOUND;
4171 }
4172
4173 /* log it */
4174 if ( RT_FAILURE(rc)
4175 && rc != VERR_INTERRUPTED
4176 && rc != VERR_TIMEOUT)
4177 Log(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4178 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4179 else
4180 Log4(("SUP_IOCTL_CALL_SERVICE: rc=%Rrc op=%u out=%u arg=%RX64 p/t=%RTproc/%RTthrd\n",
4181 rc, pReq->u.In.uOperation, pReq->Hdr.cbOut, pReq->u.In.u64Arg, RTProcSelf(), RTThreadNativeSelf()));
4182 return rc;
4183#else /* RT_OS_WINDOWS && !DEBUG */
4184 return VERR_NOT_IMPLEMENTED;
4185#endif /* RT_OS_WINDOWS && !DEBUG */
4186}
4187
4188
4189/**
4190 * Implements the logger settings request.
4191 *
4192 * @returns VBox status code.
4193 * @param pDevExt The device extension.
4194 * @param pSession The caller's session.
4195 * @param pReq The request.
4196 */
4197static int supdrvIOCtl_LoggerSettings(PSUPDRVDEVEXT pDevExt, PSUPDRVSESSION pSession, PSUPLOGGERSETTINGS pReq)
4198{
4199 const char *pszGroup = &pReq->u.In.szStrings[pReq->u.In.offGroups];
4200 const char *pszFlags = &pReq->u.In.szStrings[pReq->u.In.offFlags];
4201 const char *pszDest = &pReq->u.In.szStrings[pReq->u.In.offDestination];
4202 PRTLOGGER pLogger = NULL;
4203 int rc;
4204
4205 /*
4206 * Some further validation.
4207 */
4208 switch (pReq->u.In.fWhat)
4209 {
4210 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
4211 case SUPLOGGERSETTINGS_WHAT_CREATE:
4212 break;
4213
4214 case SUPLOGGERSETTINGS_WHAT_DESTROY:
4215 if (*pszGroup || *pszFlags || *pszDest)
4216 return VERR_INVALID_PARAMETER;
4217 if (pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_RELEASE)
4218 return VERR_ACCESS_DENIED;
4219 break;
4220
4221 default:
4222 return VERR_INTERNAL_ERROR;
4223 }
4224
4225 /*
4226 * Get the logger.
4227 */
4228 switch (pReq->u.In.fWhich)
4229 {
4230 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4231 pLogger = RTLogGetDefaultInstance();
4232 break;
4233
4234 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4235 pLogger = RTLogRelDefaultInstance();
4236 break;
4237
4238 default:
4239 return VERR_INTERNAL_ERROR;
4240 }
4241
4242 /*
4243 * Do the job.
4244 */
4245 switch (pReq->u.In.fWhat)
4246 {
4247 case SUPLOGGERSETTINGS_WHAT_SETTINGS:
4248 if (pLogger)
4249 {
4250 rc = RTLogFlags(pLogger, pszFlags);
4251 if (RT_SUCCESS(rc))
4252 rc = RTLogGroupSettings(pLogger, pszGroup);
4253 NOREF(pszDest);
4254 }
4255 else
4256 rc = VERR_NOT_FOUND;
4257 break;
4258
4259 case SUPLOGGERSETTINGS_WHAT_CREATE:
4260 {
4261 if (pLogger)
4262 rc = VERR_ALREADY_EXISTS;
4263 else
4264 {
4265 static const char * const s_apszGroups[] = VBOX_LOGGROUP_NAMES;
4266
4267 rc = RTLogCreate(&pLogger,
4268 0 /* fFlags */,
4269 pszGroup,
4270 pReq->u.In.fWhich == SUPLOGGERSETTINGS_WHICH_DEBUG
4271 ? "VBOX_LOG"
4272 : "VBOX_RELEASE_LOG",
4273 RT_ELEMENTS(s_apszGroups),
4274 s_apszGroups,
4275 RTLOGDEST_STDOUT | RTLOGDEST_DEBUGGER,
4276 NULL);
4277 if (RT_SUCCESS(rc))
4278 {
4279 rc = RTLogFlags(pLogger, pszFlags);
4280 NOREF(pszDest);
4281 if (RT_SUCCESS(rc))
4282 {
4283 switch (pReq->u.In.fWhich)
4284 {
4285 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4286 pLogger = RTLogSetDefaultInstance(pLogger);
4287 break;
4288 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4289 pLogger = RTLogRelSetDefaultInstance(pLogger);
4290 break;
4291 }
4292 }
4293 RTLogDestroy(pLogger);
4294 }
4295 }
4296 break;
4297 }
4298
4299 case SUPLOGGERSETTINGS_WHAT_DESTROY:
4300 switch (pReq->u.In.fWhich)
4301 {
4302 case SUPLOGGERSETTINGS_WHICH_DEBUG:
4303 pLogger = RTLogSetDefaultInstance(NULL);
4304 break;
4305 case SUPLOGGERSETTINGS_WHICH_RELEASE:
4306 pLogger = RTLogRelSetDefaultInstance(NULL);
4307 break;
4308 }
4309 rc = RTLogDestroy(pLogger);
4310 break;
4311
4312 default:
4313 {
4314 rc = VERR_INTERNAL_ERROR;
4315 break;
4316 }
4317 }
4318
4319 return rc;
4320}
4321
4322
4323/**
4324 * Creates the GIP.
4325 *
4326 * @returns VBox status code.
4327 * @param pDevExt Instance data. GIP stuff may be updated.
4328 */
4329static int supdrvGipCreate(PSUPDRVDEVEXT pDevExt)
4330{
4331 PSUPGLOBALINFOPAGE pGip;
4332 RTHCPHYS HCPhysGip;
4333 uint32_t u32SystemResolution;
4334 uint32_t u32Interval;
4335 int rc;
4336
4337 LogFlow(("supdrvGipCreate:\n"));
4338
4339 /* assert order */
4340 Assert(pDevExt->u32SystemTimerGranularityGrant == 0);
4341 Assert(pDevExt->GipMemObj == NIL_RTR0MEMOBJ);
4342 Assert(!pDevExt->pGipTimer);
4343
4344 /*
4345 * Allocate a suitable page with a default kernel mapping.
4346 */
4347 rc = RTR0MemObjAllocLow(&pDevExt->GipMemObj, PAGE_SIZE, false);
4348 if (RT_FAILURE(rc))
4349 {
4350 OSDBGPRINT(("supdrvGipCreate: failed to allocate the GIP page. rc=%d\n", rc));
4351 return rc;
4352 }
4353 pGip = (PSUPGLOBALINFOPAGE)RTR0MemObjAddress(pDevExt->GipMemObj); AssertPtr(pGip);
4354 HCPhysGip = RTR0MemObjGetPagePhysAddr(pDevExt->GipMemObj, 0); Assert(HCPhysGip != NIL_RTHCPHYS);
4355
4356#if 0 /** @todo Disabled this as we didn't used to do it before and causes unnecessary stress on laptops.
4357 * It only applies to Windows and should probably revisited later, if possible made part of the
4358 * timer code (return min granularity in RTTimerGetSystemGranularity and set it in RTTimerStart). */
4359 /*
4360 * Try bump up the system timer resolution.
4361 * The more interrupts the better...
4362 */
4363 if ( RT_SUCCESS(RTTimerRequestSystemGranularity( 488281 /* 2048 HZ */, &u32SystemResolution))
4364 || RT_SUCCESS(RTTimerRequestSystemGranularity( 500000 /* 2000 HZ */, &u32SystemResolution))
4365 || RT_SUCCESS(RTTimerRequestSystemGranularity( 976563 /* 1024 HZ */, &u32SystemResolution))
4366 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1000000 /* 1000 HZ */, &u32SystemResolution))
4367 || RT_SUCCESS(RTTimerRequestSystemGranularity( 1953125 /* 512 HZ */, &u32SystemResolution))
4368 || RT_SUCCESS(RTTimerRequestSystemGranularity( 2000000 /* 500 HZ */, &u32SystemResolution))
4369 || RT_SUCCESS(RTTimerRequestSystemGranularity( 3906250 /* 256 HZ */, &u32SystemResolution))
4370 || RT_SUCCESS(RTTimerRequestSystemGranularity( 4000000 /* 250 HZ */, &u32SystemResolution))
4371 || RT_SUCCESS(RTTimerRequestSystemGranularity( 7812500 /* 128 HZ */, &u32SystemResolution))
4372 || RT_SUCCESS(RTTimerRequestSystemGranularity(10000000 /* 100 HZ */, &u32SystemResolution))
4373 || RT_SUCCESS(RTTimerRequestSystemGranularity(15625000 /* 64 HZ */, &u32SystemResolution))
4374 || RT_SUCCESS(RTTimerRequestSystemGranularity(31250000 /* 32 HZ */, &u32SystemResolution))
4375 )
4376 {
4377 Assert(RTTimerGetSystemGranularity() <= u32SystemResolution);
4378 pDevExt->u32SystemTimerGranularityGrant = u32SystemResolution;
4379 }
4380#endif
4381
4382 /*
4383 * Find a reasonable update interval and initialize the structure.
4384 */
4385 u32Interval = u32SystemResolution = RTTimerGetSystemGranularity();
4386 while (u32Interval < 10000000 /* 10 ms */)
4387 u32Interval += u32SystemResolution;
4388
4389 supdrvGipInit(pDevExt, pGip, HCPhysGip, RTTimeSystemNanoTS(), 1000000000 / u32Interval /*=Hz*/);
4390
4391 /*
4392 * Create the timer.
4393 * If CPU_ALL isn't supported we'll have to fall back to synchronous mode.
4394 */
4395 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4396 {
4397 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, RTTIMER_FLAGS_CPU_ALL, supdrvGipAsyncTimer, pDevExt);
4398 if (rc == VERR_NOT_SUPPORTED)
4399 {
4400 OSDBGPRINT(("supdrvGipCreate: omni timer not supported, falling back to synchronous mode\n"));
4401 pGip->u32Mode = SUPGIPMODE_SYNC_TSC;
4402 }
4403 }
4404 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4405 rc = RTTimerCreateEx(&pDevExt->pGipTimer, u32Interval, 0, supdrvGipSyncTimer, pDevExt);
4406 if (RT_SUCCESS(rc))
4407 {
4408 if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)
4409 rc = RTMpNotificationRegister(supdrvGipMpEvent, pDevExt);
4410 if (RT_SUCCESS(rc))
4411 {
4412 /*
4413 * We're good.
4414 */
4415 dprintf(("supdrvGipCreate: %ld ns interval.\n", (long)u32Interval));
4416 return VINF_SUCCESS;
4417 }
4418
4419 OSDBGPRINT(("supdrvGipCreate: failed register MP event notfication. rc=%d\n", rc));
4420 }
4421 else
4422 {
4423 OSDBGPRINT(("supdrvGipCreate: failed create GIP timer at %ld ns interval. rc=%d\n", (long)u32Interval, rc));
4424 Assert(!pDevExt->pGipTimer);
4425 }
4426 supdrvGipDestroy(pDevExt);
4427 return rc;
4428}
4429
4430
4431/**
4432 * Terminates the GIP.
4433 *
4434 * @param pDevExt Instance data. GIP stuff may be updated.
4435 */
4436static void supdrvGipDestroy(PSUPDRVDEVEXT pDevExt)
4437{
4438 int rc;
4439#ifdef DEBUG_DARWIN_GIP
4440 OSDBGPRINT(("supdrvGipDestroy: pDevExt=%p pGip=%p pGipTimer=%p GipMemObj=%p\n", pDevExt,
4441 pDevExt->GipMemObj != NIL_RTR0MEMOBJ ? RTR0MemObjAddress(pDevExt->GipMemObj) : NULL,
4442 pDevExt->pGipTimer, pDevExt->GipMemObj));
4443#endif
4444
4445 /*
4446 * Invalid the GIP data.
4447 */
4448 if (pDevExt->pGip)
4449 {
4450 supdrvGipTerm(pDevExt->pGip);
4451 pDevExt->pGip = NULL;
4452 }
4453
4454 /*
4455 * Destroy the timer and free the GIP memory object.
4456 */
4457 if (pDevExt->pGipTimer)
4458 {
4459 rc = RTTimerDestroy(pDevExt->pGipTimer); AssertRC(rc);
4460 pDevExt->pGipTimer = NULL;
4461 }
4462
4463 if (pDevExt->GipMemObj != NIL_RTR0MEMOBJ)
4464 {
4465 rc = RTR0MemObjFree(pDevExt->GipMemObj, true /* free mappings */); AssertRC(rc);
4466 pDevExt->GipMemObj = NIL_RTR0MEMOBJ;
4467 }
4468
4469 /*
4470 * Finally, release the system timer resolution request if one succeeded.
4471 */
4472 if (pDevExt->u32SystemTimerGranularityGrant)
4473 {
4474 rc = RTTimerReleaseSystemGranularity(pDevExt->u32SystemTimerGranularityGrant); AssertRC(rc);
4475 pDevExt->u32SystemTimerGranularityGrant = 0;
4476 }
4477}
4478
4479
4480/**
4481 * Timer callback function sync GIP mode.
4482 * @param pTimer The timer.
4483 * @param pvUser The device extension.
4484 */
4485static DECLCALLBACK(void) supdrvGipSyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4486{
4487 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4488 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4489 uint64_t u64TSC = ASMReadTSC();
4490 uint64_t NanoTS = RTTimeSystemNanoTS();
4491
4492 supdrvGipUpdate(pDevExt->pGip, NanoTS, u64TSC);
4493
4494 ASMSetFlags(fOldFlags);
4495}
4496
4497
4498/**
4499 * Timer callback function for async GIP mode.
4500 * @param pTimer The timer.
4501 * @param pvUser The device extension.
4502 */
4503static DECLCALLBACK(void) supdrvGipAsyncTimer(PRTTIMER pTimer, void *pvUser, uint64_t iTick)
4504{
4505 RTCCUINTREG fOldFlags = ASMIntDisableFlags(); /* No interruptions please (real problem on S10). */
4506 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4507 RTCPUID idCpu = RTMpCpuId();
4508 uint64_t u64TSC = ASMReadTSC();
4509 uint64_t NanoTS = RTTimeSystemNanoTS();
4510
4511 /** @todo reset the transaction number and whatnot when iTick == 1. */
4512 if (pDevExt->idGipMaster == idCpu)
4513 supdrvGipUpdate(pDevExt->pGip, NanoTS, u64TSC);
4514 else
4515 supdrvGipUpdatePerCpu(pDevExt->pGip, NanoTS, u64TSC, ASMGetApicId());
4516
4517 ASMSetFlags(fOldFlags);
4518}
4519
4520
4521/**
4522 * Multiprocessor event notification callback.
4523 *
4524 * This is used to make sue that the GIP master gets passed on to
4525 * another CPU.
4526 *
4527 * @param enmEvent The event.
4528 * @param idCpu The cpu it applies to.
4529 * @param pvUser Pointer to the device extension.
4530 */
4531static DECLCALLBACK(void) supdrvGipMpEvent(RTMPEVENT enmEvent, RTCPUID idCpu, void *pvUser)
4532{
4533 PSUPDRVDEVEXT pDevExt = (PSUPDRVDEVEXT)pvUser;
4534 if (enmEvent == RTMPEVENT_OFFLINE)
4535 {
4536 RTCPUID idGipMaster;
4537 ASMAtomicReadSize(&pDevExt->idGipMaster, &idGipMaster);
4538 if (idGipMaster == idCpu)
4539 {
4540 /*
4541 * Find a new GIP master.
4542 */
4543 bool fIgnored;
4544 unsigned i;
4545 RTCPUID idNewGipMaster = NIL_RTCPUID;
4546 RTCPUSET OnlineCpus;
4547 RTMpGetOnlineSet(&OnlineCpus);
4548
4549 for (i = 0; i < RTCPUSET_MAX_CPUS; i++)
4550 {
4551 RTCPUID idCurCpu = RTMpCpuIdFromSetIndex(i);
4552 if ( RTCpuSetIsMember(&OnlineCpus, idCurCpu)
4553 && idCurCpu != idGipMaster)
4554 {
4555 idNewGipMaster = idCurCpu;
4556 break;
4557 }
4558 }
4559
4560 dprintf(("supdrvGipMpEvent: Gip master %#lx -> %#lx\n", (long)idGipMaster, (long)idNewGipMaster));
4561 ASMAtomicCmpXchgSize(&pDevExt->idGipMaster, idNewGipMaster, idGipMaster, fIgnored);
4562 NOREF(fIgnored);
4563 }
4564 }
4565}
4566
4567
4568/**
4569 * Initializes the GIP data.
4570 *
4571 * @returns IPRT status code.
4572 * @param pDevExt Pointer to the device instance data.
4573 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4574 * @param HCPhys The physical address of the GIP.
4575 * @param u64NanoTS The current nanosecond timestamp.
4576 * @param uUpdateHz The update freqence.
4577 */
4578int VBOXCALL supdrvGipInit(PSUPDRVDEVEXT pDevExt, PSUPGLOBALINFOPAGE pGip, RTHCPHYS HCPhys, uint64_t u64NanoTS, unsigned uUpdateHz)
4579{
4580 unsigned i;
4581#ifdef DEBUG_DARWIN_GIP
4582 OSDBGPRINT(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4583#else
4584 LogFlow(("supdrvGipInit: pGip=%p HCPhys=%lx u64NanoTS=%llu uUpdateHz=%d\n", pGip, (long)HCPhys, u64NanoTS, uUpdateHz));
4585#endif
4586
4587 /*
4588 * Initialize the structure.
4589 */
4590 memset(pGip, 0, PAGE_SIZE);
4591 pGip->u32Magic = SUPGLOBALINFOPAGE_MAGIC;
4592 pGip->u32Version = SUPGLOBALINFOPAGE_VERSION;
4593 pGip->u32Mode = supdrvGipDeterminTscMode(pDevExt);
4594 pGip->u32UpdateHz = uUpdateHz;
4595 pGip->u32UpdateIntervalNS = 1000000000 / uUpdateHz;
4596 pGip->u64NanoTSLastUpdateHz = u64NanoTS;
4597
4598 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4599 {
4600 pGip->aCPUs[i].u32TransactionId = 2;
4601 pGip->aCPUs[i].u64NanoTS = u64NanoTS;
4602 pGip->aCPUs[i].u64TSC = ASMReadTSC();
4603
4604 /*
4605 * We don't know the following values until we've executed updates.
4606 * So, we'll just insert very high values.
4607 */
4608 pGip->aCPUs[i].u64CpuHz = _4G + 1;
4609 pGip->aCPUs[i].u32UpdateIntervalTSC = _2G / 4;
4610 pGip->aCPUs[i].au32TSCHistory[0] = _2G / 4;
4611 pGip->aCPUs[i].au32TSCHistory[1] = _2G / 4;
4612 pGip->aCPUs[i].au32TSCHistory[2] = _2G / 4;
4613 pGip->aCPUs[i].au32TSCHistory[3] = _2G / 4;
4614 pGip->aCPUs[i].au32TSCHistory[4] = _2G / 4;
4615 pGip->aCPUs[i].au32TSCHistory[5] = _2G / 4;
4616 pGip->aCPUs[i].au32TSCHistory[6] = _2G / 4;
4617 pGip->aCPUs[i].au32TSCHistory[7] = _2G / 4;
4618 }
4619
4620 /*
4621 * Link it to the device extension.
4622 */
4623 pDevExt->pGip = pGip;
4624 pDevExt->HCPhysGip = HCPhys;
4625 pDevExt->cGipUsers = 0;
4626
4627 return VINF_SUCCESS;
4628}
4629
4630
4631/**
4632 * Callback used by supdrvDetermineAsyncTSC to read the TSC on a CPU.
4633 *
4634 * @param idCpu Ignored.
4635 * @param pvUser1 Where to put the TSC.
4636 * @param pvUser2 Ignored.
4637 */
4638static DECLCALLBACK(void) supdrvDetermineAsyncTscWorker(RTCPUID idCpu, void *pvUser1, void *pvUser2)
4639{
4640#if 1
4641 ASMAtomicWriteU64((uint64_t volatile *)pvUser1, ASMReadTSC());
4642#else
4643 *(uint64_t *)pvUser1 = ASMReadTSC();
4644#endif
4645}
4646
4647
4648/**
4649 * Determine if Async GIP mode is required because of TSC drift.
4650 *
4651 * When using the default/normal timer code it is essential that the time stamp counter
4652 * (TSC) runs never backwards, that is, a read operation to the counter should return
4653 * a bigger value than any previous read operation. This is guaranteed by the latest
4654 * AMD CPUs and by newer Intel CPUs which never enter the C2 state (P4). In any other
4655 * case we have to choose the asynchronous timer mode.
4656 *
4657 * @param poffMin Pointer to the determined difference between different cores.
4658 * @return false if the time stamp counters appear to be synchron, true otherwise.
4659 */
4660bool VBOXCALL supdrvDetermineAsyncTsc(uint64_t *poffMin)
4661{
4662 /*
4663 * Just iterate all the cpus 8 times and make sure that the TSC is
4664 * ever increasing. We don't bother taking TSC rollover into account.
4665 */
4666 RTCPUSET CpuSet;
4667 int iLastCpu = RTCpuLastIndex(RTMpGetSet(&CpuSet));
4668 int iCpu;
4669 int cLoops = 8;
4670 bool fAsync = false;
4671 int rc = VINF_SUCCESS;
4672 uint64_t offMax = 0;
4673 uint64_t offMin = ~(uint64_t)0;
4674 uint64_t PrevTsc = ASMReadTSC();
4675
4676 while (cLoops-- > 0)
4677 {
4678 for (iCpu = 0; iCpu <= iLastCpu; iCpu++)
4679 {
4680 uint64_t CurTsc;
4681 rc = RTMpOnSpecific(RTMpCpuIdFromSetIndex(iCpu), supdrvDetermineAsyncTscWorker, &CurTsc, NULL);
4682 if (RT_SUCCESS(rc))
4683 {
4684 if (CurTsc <= PrevTsc)
4685 {
4686 fAsync = true;
4687 offMin = offMax = PrevTsc - CurTsc;
4688 dprintf(("supdrvDetermineAsyncTsc: iCpu=%d cLoops=%d CurTsc=%llx PrevTsc=%llx\n",
4689 iCpu, cLoops, CurTsc, PrevTsc));
4690 break;
4691 }
4692
4693 /* Gather statistics (except the first time). */
4694 if (iCpu != 0 || cLoops != 7)
4695 {
4696 uint64_t off = CurTsc - PrevTsc;
4697 if (off < offMin)
4698 offMin = off;
4699 if (off > offMax)
4700 offMax = off;
4701 dprintf2(("%d/%d: off=%llx\n", cLoops, iCpu, off));
4702 }
4703
4704 /* Next */
4705 PrevTsc = CurTsc;
4706 }
4707 else if (rc == VERR_NOT_SUPPORTED)
4708 break;
4709 else
4710 AssertMsg(rc == VERR_CPU_NOT_FOUND || rc == VERR_CPU_OFFLINE, ("%d\n", rc));
4711 }
4712
4713 /* broke out of the loop. */
4714 if (iCpu <= iLastCpu)
4715 break;
4716 }
4717
4718 *poffMin = offMin; /* Almost RTMpOnSpecific profiling. */
4719 dprintf(("supdrvDetermineAsyncTsc: returns %d; iLastCpu=%d rc=%d offMin=%llx offMax=%llx\n",
4720 fAsync, iLastCpu, rc, offMin, offMax));
4721#if !defined(RT_OS_SOLARIS) && !defined(RT_OS_OS2) && !defined(RT_OS_WINDOWS)
4722 OSDBGPRINT(("vboxdrv: fAsync=%d offMin=%#lx offMax=%#lx\n", fAsync, (long)offMin, (long)offMax));
4723#endif
4724 return fAsync;
4725}
4726
4727
4728/**
4729 * Invalidates the GIP data upon termination.
4730 *
4731 * @param pGip Pointer to the read-write kernel mapping of the GIP.
4732 */
4733void VBOXCALL supdrvGipTerm(PSUPGLOBALINFOPAGE pGip)
4734{
4735 unsigned i;
4736 pGip->u32Magic = 0;
4737 for (i = 0; i < RT_ELEMENTS(pGip->aCPUs); i++)
4738 {
4739 pGip->aCPUs[i].u64NanoTS = 0;
4740 pGip->aCPUs[i].u64TSC = 0;
4741 pGip->aCPUs[i].iTSCHistoryHead = 0;
4742 }
4743}
4744
4745
4746/**
4747 * Worker routine for supdrvGipUpdate and supdrvGipUpdatePerCpu that
4748 * updates all the per cpu data except the transaction id.
4749 *
4750 * @param pGip The GIP.
4751 * @param pGipCpu Pointer to the per cpu data.
4752 * @param u64NanoTS The current time stamp.
4753 * @param u64TSC The current TSC.
4754 */
4755static void supdrvGipDoUpdateCpu(PSUPGLOBALINFOPAGE pGip, PSUPGIPCPU pGipCpu, uint64_t u64NanoTS, uint64_t u64TSC)
4756{
4757 uint64_t u64TSCDelta;
4758 uint32_t u32UpdateIntervalTSC;
4759 uint32_t u32UpdateIntervalTSCSlack;
4760 unsigned iTSCHistoryHead;
4761 uint64_t u64CpuHz;
4762
4763 /* Delta between this and the previous update. */
4764 pGipCpu->u32UpdateIntervalNS = (uint32_t)(u64NanoTS - pGipCpu->u64NanoTS);
4765
4766 /*
4767 * Update the NanoTS.
4768 */
4769 ASMAtomicXchgU64(&pGipCpu->u64NanoTS, u64NanoTS);
4770
4771 /*
4772 * Calc TSC delta.
4773 */
4774 /** @todo validate the NanoTS delta, don't trust the OS to call us when it should... */
4775 u64TSCDelta = u64TSC - pGipCpu->u64TSC;
4776 ASMAtomicXchgU64(&pGipCpu->u64TSC, u64TSC);
4777
4778 if (u64TSCDelta >> 32)
4779 {
4780 u64TSCDelta = pGipCpu->u32UpdateIntervalTSC;
4781 pGipCpu->cErrors++;
4782 }
4783
4784 /*
4785 * TSC History.
4786 */
4787 Assert(RT_ELEMENTS(pGipCpu->au32TSCHistory) == 8);
4788
4789 iTSCHistoryHead = (pGipCpu->iTSCHistoryHead + 1) & 7;
4790 ASMAtomicXchgU32(&pGipCpu->iTSCHistoryHead, iTSCHistoryHead);
4791 ASMAtomicXchgU32(&pGipCpu->au32TSCHistory[iTSCHistoryHead], (uint32_t)u64TSCDelta);
4792
4793 /*
4794 * UpdateIntervalTSC = average of last 8,2,1 intervals depending on update HZ.
4795 */
4796 if (pGip->u32UpdateHz >= 1000)
4797 {
4798 uint32_t u32;
4799 u32 = pGipCpu->au32TSCHistory[0];
4800 u32 += pGipCpu->au32TSCHistory[1];
4801 u32 += pGipCpu->au32TSCHistory[2];
4802 u32 += pGipCpu->au32TSCHistory[3];
4803 u32 >>= 2;
4804 u32UpdateIntervalTSC = pGipCpu->au32TSCHistory[4];
4805 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[5];
4806 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[6];
4807 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[7];
4808 u32UpdateIntervalTSC >>= 2;
4809 u32UpdateIntervalTSC += u32;
4810 u32UpdateIntervalTSC >>= 1;
4811
4812 /* Value choosen for a 2GHz Athlon64 running linux 2.6.10/11, . */
4813 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 14;
4814 }
4815 else if (pGip->u32UpdateHz >= 90)
4816 {
4817 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4818 u32UpdateIntervalTSC += pGipCpu->au32TSCHistory[(iTSCHistoryHead - 1) & 7];
4819 u32UpdateIntervalTSC >>= 1;
4820
4821 /* value choosen on a 2GHz thinkpad running windows */
4822 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 7;
4823 }
4824 else
4825 {
4826 u32UpdateIntervalTSC = (uint32_t)u64TSCDelta;
4827
4828 /* This value hasn't be checked yet.. waiting for OS/2 and 33Hz timers.. :-) */
4829 u32UpdateIntervalTSCSlack = u32UpdateIntervalTSC >> 6;
4830 }
4831 ASMAtomicXchgU32(&pGipCpu->u32UpdateIntervalTSC, u32UpdateIntervalTSC + u32UpdateIntervalTSCSlack);
4832
4833 /*
4834 * CpuHz.
4835 */
4836 u64CpuHz = ASMMult2xU32RetU64(u32UpdateIntervalTSC, pGip->u32UpdateHz);
4837 ASMAtomicXchgU64(&pGipCpu->u64CpuHz, u64CpuHz);
4838}
4839
4840
4841/**
4842 * Updates the GIP.
4843 *
4844 * @param pGip Pointer to the GIP.
4845 * @param u64NanoTS The current nanosecond timesamp.
4846 * @param u64TSC The current TSC timesamp.
4847 */
4848void VBOXCALL supdrvGipUpdate(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, uint64_t u64TSC)
4849{
4850 /*
4851 * Determin the relevant CPU data.
4852 */
4853 PSUPGIPCPU pGipCpu;
4854 if (pGip->u32Mode != SUPGIPMODE_ASYNC_TSC)
4855 pGipCpu = &pGip->aCPUs[0];
4856 else
4857 {
4858 unsigned iCpu = ASMGetApicId();
4859 if (RT_LIKELY(iCpu >= RT_ELEMENTS(pGip->aCPUs)))
4860 return;
4861 pGipCpu = &pGip->aCPUs[iCpu];
4862 }
4863
4864 /*
4865 * Start update transaction.
4866 */
4867 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4868 {
4869 /* this can happen on win32 if we're taking to long and there are more CPUs around. shouldn't happen though. */
4870 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4871 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4872 pGipCpu->cErrors++;
4873 return;
4874 }
4875
4876 /*
4877 * Recalc the update frequency every 0x800th time.
4878 */
4879 if (!(pGipCpu->u32TransactionId & (GIP_UPDATEHZ_RECALC_FREQ * 2 - 2)))
4880 {
4881 if (pGip->u64NanoTSLastUpdateHz)
4882 {
4883#ifdef RT_ARCH_AMD64 /** @todo fix 64-bit div here to work on x86 linux. */
4884 uint64_t u64Delta = u64NanoTS - pGip->u64NanoTSLastUpdateHz;
4885 uint32_t u32UpdateHz = (uint32_t)((UINT64_C(1000000000) * GIP_UPDATEHZ_RECALC_FREQ) / u64Delta);
4886 if (u32UpdateHz <= 2000 && u32UpdateHz >= 30)
4887 {
4888 ASMAtomicXchgU32(&pGip->u32UpdateHz, u32UpdateHz);
4889 ASMAtomicXchgU32(&pGip->u32UpdateIntervalNS, 1000000000 / u32UpdateHz);
4890 }
4891#endif
4892 }
4893 ASMAtomicXchgU64(&pGip->u64NanoTSLastUpdateHz, u64NanoTS);
4894 }
4895
4896 /*
4897 * Update the data.
4898 */
4899 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS, u64TSC);
4900
4901 /*
4902 * Complete transaction.
4903 */
4904 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4905}
4906
4907
4908/**
4909 * Updates the per cpu GIP data for the calling cpu.
4910 *
4911 * @param pGip Pointer to the GIP.
4912 * @param u64NanoTS The current nanosecond timesamp.
4913 * @param u64TSC The current TSC timesamp.
4914 * @param iCpu The CPU index.
4915 */
4916void VBOXCALL supdrvGipUpdatePerCpu(PSUPGLOBALINFOPAGE pGip, uint64_t u64NanoTS, uint64_t u64TSC, unsigned iCpu)
4917{
4918 PSUPGIPCPU pGipCpu;
4919
4920 if (RT_LIKELY(iCpu < RT_ELEMENTS(pGip->aCPUs)))
4921 {
4922 pGipCpu = &pGip->aCPUs[iCpu];
4923
4924 /*
4925 * Start update transaction.
4926 */
4927 if (!(ASMAtomicIncU32(&pGipCpu->u32TransactionId) & 1))
4928 {
4929 AssertMsgFailed(("Invalid transaction id, %#x, not odd!\n", pGipCpu->u32TransactionId));
4930 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4931 pGipCpu->cErrors++;
4932 return;
4933 }
4934
4935 /*
4936 * Update the data.
4937 */
4938 supdrvGipDoUpdateCpu(pGip, pGipCpu, u64NanoTS, u64TSC);
4939
4940 /*
4941 * Complete transaction.
4942 */
4943 ASMAtomicIncU32(&pGipCpu->u32TransactionId);
4944 }
4945}
4946
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette